2 * Copyright 2015 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
29 #include "dm_services_types.h"
31 #include "dc/inc/core_types.h"
32 #include "dal_asic_id.h"
36 #include "amdgpu_display.h"
37 #include "amdgpu_ucode.h"
39 #include "amdgpu_dm.h"
40 #ifdef CONFIG_DRM_AMD_DC_HDCP
41 #include "amdgpu_dm_hdcp.h"
43 #include "amdgpu_pm.h"
45 #include "amd_shared.h"
46 #include "amdgpu_dm_irq.h"
47 #include "dm_helpers.h"
48 #include "amdgpu_dm_mst_types.h"
49 #if defined(CONFIG_DEBUG_FS)
50 #include "amdgpu_dm_debugfs.h"
53 #include "ivsrcid/ivsrcid_vislands30.h"
55 #include <linux/module.h>
56 #include <linux/moduleparam.h>
57 #include <linux/version.h>
58 #include <linux/types.h>
59 #include <linux/pm_runtime.h>
60 #include <linux/pci.h>
61 #include <linux/firmware.h>
62 #include <linux/component.h>
64 #include <drm/drm_atomic.h>
65 #include <drm/drm_atomic_uapi.h>
66 #include <drm/drm_atomic_helper.h>
67 #include <drm/drm_dp_mst_helper.h>
68 #include <drm/drm_fb_helper.h>
69 #include <drm/drm_fourcc.h>
70 #include <drm/drm_edid.h>
71 #include <drm/drm_vblank.h>
72 #include <drm/drm_audio_component.h>
73 #include <drm/drm_hdcp.h>
75 #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
76 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
78 #include "dcn/dcn_1_0_offset.h"
79 #include "dcn/dcn_1_0_sh_mask.h"
80 #include "soc15_hw_ip.h"
81 #include "vega10_ip_offset.h"
83 #include "soc15_common.h"
86 #include "modules/inc/mod_freesync.h"
87 #include "modules/power/power_helpers.h"
88 #include "modules/inc/mod_info_packet.h"
90 #define FIRMWARE_RAVEN_DMCU "amdgpu/raven_dmcu.bin"
91 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU
);
96 * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
97 * **dm**) sits between DRM and DC. It acts as a liason, converting DRM
98 * requests into DC requests, and DC responses into DRM responses.
100 * The root control structure is &struct amdgpu_display_manager.
103 /* basic init/fini API */
104 static int amdgpu_dm_init(struct amdgpu_device
*adev
);
105 static void amdgpu_dm_fini(struct amdgpu_device
*adev
);
108 * initializes drm_device display related structures, based on the information
109 * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
110 * drm_encoder, drm_mode_config
112 * Returns 0 on success
114 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device
*adev
);
115 /* removes and deallocates the drm structures, created by the above function */
116 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager
*dm
);
119 amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector
*aconnector
);
121 static int amdgpu_dm_plane_init(struct amdgpu_display_manager
*dm
,
122 struct drm_plane
*plane
,
123 unsigned long possible_crtcs
,
124 const struct dc_plane_cap
*plane_cap
);
125 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager
*dm
,
126 struct drm_plane
*plane
,
127 uint32_t link_index
);
128 static int amdgpu_dm_connector_init(struct amdgpu_display_manager
*dm
,
129 struct amdgpu_dm_connector
*amdgpu_dm_connector
,
131 struct amdgpu_encoder
*amdgpu_encoder
);
132 static int amdgpu_dm_encoder_init(struct drm_device
*dev
,
133 struct amdgpu_encoder
*aencoder
,
134 uint32_t link_index
);
136 static int amdgpu_dm_connector_get_modes(struct drm_connector
*connector
);
138 static int amdgpu_dm_atomic_commit(struct drm_device
*dev
,
139 struct drm_atomic_state
*state
,
142 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state
*state
);
144 static int amdgpu_dm_atomic_check(struct drm_device
*dev
,
145 struct drm_atomic_state
*state
);
147 static void handle_cursor_update(struct drm_plane
*plane
,
148 struct drm_plane_state
*old_plane_state
);
151 * dm_vblank_get_counter
154 * Get counter for number of vertical blanks
157 * struct amdgpu_device *adev - [in] desired amdgpu device
158 * int disp_idx - [in] which CRTC to get the counter from
161 * Counter for vertical blanks
163 static u32
dm_vblank_get_counter(struct amdgpu_device
*adev
, int crtc
)
165 if (crtc
>= adev
->mode_info
.num_crtc
)
168 struct amdgpu_crtc
*acrtc
= adev
->mode_info
.crtcs
[crtc
];
169 struct dm_crtc_state
*acrtc_state
= to_dm_crtc_state(
173 if (acrtc_state
->stream
== NULL
) {
174 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
179 return dc_stream_get_vblank_counter(acrtc_state
->stream
);
183 static int dm_crtc_get_scanoutpos(struct amdgpu_device
*adev
, int crtc
,
184 u32
*vbl
, u32
*position
)
186 uint32_t v_blank_start
, v_blank_end
, h_position
, v_position
;
188 if ((crtc
< 0) || (crtc
>= adev
->mode_info
.num_crtc
))
191 struct amdgpu_crtc
*acrtc
= adev
->mode_info
.crtcs
[crtc
];
192 struct dm_crtc_state
*acrtc_state
= to_dm_crtc_state(
195 if (acrtc_state
->stream
== NULL
) {
196 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
202 * TODO rework base driver to use values directly.
203 * for now parse it back into reg-format
205 dc_stream_get_scanoutpos(acrtc_state
->stream
,
211 *position
= v_position
| (h_position
<< 16);
212 *vbl
= v_blank_start
| (v_blank_end
<< 16);
218 static bool dm_is_idle(void *handle
)
224 static int dm_wait_for_idle(void *handle
)
230 static bool dm_check_soft_reset(void *handle
)
235 static int dm_soft_reset(void *handle
)
241 static struct amdgpu_crtc
*
242 get_crtc_by_otg_inst(struct amdgpu_device
*adev
,
245 struct drm_device
*dev
= adev
->ddev
;
246 struct drm_crtc
*crtc
;
247 struct amdgpu_crtc
*amdgpu_crtc
;
249 if (otg_inst
== -1) {
251 return adev
->mode_info
.crtcs
[0];
254 list_for_each_entry(crtc
, &dev
->mode_config
.crtc_list
, head
) {
255 amdgpu_crtc
= to_amdgpu_crtc(crtc
);
257 if (amdgpu_crtc
->otg_inst
== otg_inst
)
264 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state
*dm_state
)
266 return dm_state
->freesync_config
.state
== VRR_STATE_ACTIVE_VARIABLE
||
267 dm_state
->freesync_config
.state
== VRR_STATE_ACTIVE_FIXED
;
271 * dm_pflip_high_irq() - Handle pageflip interrupt
272 * @interrupt_params: ignored
274 * Handles the pageflip interrupt by notifying all interested parties
275 * that the pageflip has been completed.
277 static void dm_pflip_high_irq(void *interrupt_params
)
279 struct amdgpu_crtc
*amdgpu_crtc
;
280 struct common_irq_params
*irq_params
= interrupt_params
;
281 struct amdgpu_device
*adev
= irq_params
->adev
;
283 struct drm_pending_vblank_event
*e
;
284 struct dm_crtc_state
*acrtc_state
;
285 uint32_t vpos
, hpos
, v_blank_start
, v_blank_end
;
288 amdgpu_crtc
= get_crtc_by_otg_inst(adev
, irq_params
->irq_src
- IRQ_TYPE_PFLIP
);
290 /* IRQ could occur when in initial stage */
291 /* TODO work and BO cleanup */
292 if (amdgpu_crtc
== NULL
) {
293 DRM_DEBUG_DRIVER("CRTC is null, returning.\n");
297 spin_lock_irqsave(&adev
->ddev
->event_lock
, flags
);
299 if (amdgpu_crtc
->pflip_status
!= AMDGPU_FLIP_SUBMITTED
){
300 DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
301 amdgpu_crtc
->pflip_status
,
302 AMDGPU_FLIP_SUBMITTED
,
303 amdgpu_crtc
->crtc_id
,
305 spin_unlock_irqrestore(&adev
->ddev
->event_lock
, flags
);
309 /* page flip completed. */
310 e
= amdgpu_crtc
->event
;
311 amdgpu_crtc
->event
= NULL
;
316 acrtc_state
= to_dm_crtc_state(amdgpu_crtc
->base
.state
);
317 vrr_active
= amdgpu_dm_vrr_active(acrtc_state
);
319 /* Fixed refresh rate, or VRR scanout position outside front-porch? */
321 !dc_stream_get_scanoutpos(acrtc_state
->stream
, &v_blank_start
,
322 &v_blank_end
, &hpos
, &vpos
) ||
323 (vpos
< v_blank_start
)) {
324 /* Update to correct count and vblank timestamp if racing with
325 * vblank irq. This also updates to the correct vblank timestamp
326 * even in VRR mode, as scanout is past the front-porch atm.
328 drm_crtc_accurate_vblank_count(&amdgpu_crtc
->base
);
330 /* Wake up userspace by sending the pageflip event with proper
331 * count and timestamp of vblank of flip completion.
334 drm_crtc_send_vblank_event(&amdgpu_crtc
->base
, e
);
336 /* Event sent, so done with vblank for this flip */
337 drm_crtc_vblank_put(&amdgpu_crtc
->base
);
340 /* VRR active and inside front-porch: vblank count and
341 * timestamp for pageflip event will only be up to date after
342 * drm_crtc_handle_vblank() has been executed from late vblank
343 * irq handler after start of back-porch (vline 0). We queue the
344 * pageflip event for send-out by drm_crtc_handle_vblank() with
345 * updated timestamp and count, once it runs after us.
347 * We need to open-code this instead of using the helper
348 * drm_crtc_arm_vblank_event(), as that helper would
349 * call drm_crtc_accurate_vblank_count(), which we must
350 * not call in VRR mode while we are in front-porch!
353 /* sequence will be replaced by real count during send-out. */
354 e
->sequence
= drm_crtc_vblank_count(&amdgpu_crtc
->base
);
355 e
->pipe
= amdgpu_crtc
->crtc_id
;
357 list_add_tail(&e
->base
.link
, &adev
->ddev
->vblank_event_list
);
361 /* Keep track of vblank of this flip for flip throttling. We use the
362 * cooked hw counter, as that one incremented at start of this vblank
363 * of pageflip completion, so last_flip_vblank is the forbidden count
364 * for queueing new pageflips if vsync + VRR is enabled.
366 amdgpu_crtc
->last_flip_vblank
= amdgpu_get_vblank_counter_kms(adev
->ddev
,
367 amdgpu_crtc
->crtc_id
);
369 amdgpu_crtc
->pflip_status
= AMDGPU_FLIP_NONE
;
370 spin_unlock_irqrestore(&adev
->ddev
->event_lock
, flags
);
372 DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
373 amdgpu_crtc
->crtc_id
, amdgpu_crtc
,
374 vrr_active
, (int) !e
);
377 static void dm_vupdate_high_irq(void *interrupt_params
)
379 struct common_irq_params
*irq_params
= interrupt_params
;
380 struct amdgpu_device
*adev
= irq_params
->adev
;
381 struct amdgpu_crtc
*acrtc
;
382 struct dm_crtc_state
*acrtc_state
;
385 acrtc
= get_crtc_by_otg_inst(adev
, irq_params
->irq_src
- IRQ_TYPE_VUPDATE
);
388 acrtc_state
= to_dm_crtc_state(acrtc
->base
.state
);
390 DRM_DEBUG_DRIVER("crtc:%d, vupdate-vrr:%d\n", acrtc
->crtc_id
,
391 amdgpu_dm_vrr_active(acrtc_state
));
393 /* Core vblank handling is done here after end of front-porch in
394 * vrr mode, as vblank timestamping will give valid results
395 * while now done after front-porch. This will also deliver
396 * page-flip completion events that have been queued to us
397 * if a pageflip happened inside front-porch.
399 if (amdgpu_dm_vrr_active(acrtc_state
)) {
400 drm_crtc_handle_vblank(&acrtc
->base
);
402 /* BTR processing for pre-DCE12 ASICs */
403 if (acrtc_state
->stream
&&
404 adev
->family
< AMDGPU_FAMILY_AI
) {
405 spin_lock_irqsave(&adev
->ddev
->event_lock
, flags
);
406 mod_freesync_handle_v_update(
407 adev
->dm
.freesync_module
,
409 &acrtc_state
->vrr_params
);
411 dc_stream_adjust_vmin_vmax(
414 &acrtc_state
->vrr_params
.adjust
);
415 spin_unlock_irqrestore(&adev
->ddev
->event_lock
, flags
);
422 * dm_crtc_high_irq() - Handles CRTC interrupt
423 * @interrupt_params: ignored
425 * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
428 static void dm_crtc_high_irq(void *interrupt_params
)
430 struct common_irq_params
*irq_params
= interrupt_params
;
431 struct amdgpu_device
*adev
= irq_params
->adev
;
432 struct amdgpu_crtc
*acrtc
;
433 struct dm_crtc_state
*acrtc_state
;
436 acrtc
= get_crtc_by_otg_inst(adev
, irq_params
->irq_src
- IRQ_TYPE_VBLANK
);
439 acrtc_state
= to_dm_crtc_state(acrtc
->base
.state
);
441 DRM_DEBUG_DRIVER("crtc:%d, vupdate-vrr:%d\n", acrtc
->crtc_id
,
442 amdgpu_dm_vrr_active(acrtc_state
));
444 /* Core vblank handling at start of front-porch is only possible
445 * in non-vrr mode, as only there vblank timestamping will give
446 * valid results while done in front-porch. Otherwise defer it
447 * to dm_vupdate_high_irq after end of front-porch.
449 if (!amdgpu_dm_vrr_active(acrtc_state
))
450 drm_crtc_handle_vblank(&acrtc
->base
);
452 /* Following stuff must happen at start of vblank, for crc
453 * computation and below-the-range btr support in vrr mode.
455 amdgpu_dm_crtc_handle_crc_irq(&acrtc
->base
);
457 if (acrtc_state
->stream
&& adev
->family
>= AMDGPU_FAMILY_AI
&&
458 acrtc_state
->vrr_params
.supported
&&
459 acrtc_state
->freesync_config
.state
== VRR_STATE_ACTIVE_VARIABLE
) {
460 spin_lock_irqsave(&adev
->ddev
->event_lock
, flags
);
461 mod_freesync_handle_v_update(
462 adev
->dm
.freesync_module
,
464 &acrtc_state
->vrr_params
);
466 dc_stream_adjust_vmin_vmax(
469 &acrtc_state
->vrr_params
.adjust
);
470 spin_unlock_irqrestore(&adev
->ddev
->event_lock
, flags
);
475 static int dm_set_clockgating_state(void *handle
,
476 enum amd_clockgating_state state
)
481 static int dm_set_powergating_state(void *handle
,
482 enum amd_powergating_state state
)
487 /* Prototypes of private functions */
488 static int dm_early_init(void* handle
);
490 /* Allocate memory for FBC compressed data */
491 static void amdgpu_dm_fbc_init(struct drm_connector
*connector
)
493 struct drm_device
*dev
= connector
->dev
;
494 struct amdgpu_device
*adev
= dev
->dev_private
;
495 struct dm_comressor_info
*compressor
= &adev
->dm
.compressor
;
496 struct amdgpu_dm_connector
*aconn
= to_amdgpu_dm_connector(connector
);
497 struct drm_display_mode
*mode
;
498 unsigned long max_size
= 0;
500 if (adev
->dm
.dc
->fbc_compressor
== NULL
)
503 if (aconn
->dc_link
->connector_signal
!= SIGNAL_TYPE_EDP
)
506 if (compressor
->bo_ptr
)
510 list_for_each_entry(mode
, &connector
->modes
, head
) {
511 if (max_size
< mode
->htotal
* mode
->vtotal
)
512 max_size
= mode
->htotal
* mode
->vtotal
;
516 int r
= amdgpu_bo_create_kernel(adev
, max_size
* 4, PAGE_SIZE
,
517 AMDGPU_GEM_DOMAIN_GTT
, &compressor
->bo_ptr
,
518 &compressor
->gpu_addr
, &compressor
->cpu_addr
);
521 DRM_ERROR("DM: Failed to initialize FBC\n");
523 adev
->dm
.dc
->ctx
->fbc_gpu_addr
= compressor
->gpu_addr
;
524 DRM_INFO("DM: FBC alloc %lu\n", max_size
*4);
531 static int amdgpu_dm_audio_component_get_eld(struct device
*kdev
, int port
,
532 int pipe
, bool *enabled
,
533 unsigned char *buf
, int max_bytes
)
535 struct drm_device
*dev
= dev_get_drvdata(kdev
);
536 struct amdgpu_device
*adev
= dev
->dev_private
;
537 struct drm_connector
*connector
;
538 struct drm_connector_list_iter conn_iter
;
539 struct amdgpu_dm_connector
*aconnector
;
544 mutex_lock(&adev
->dm
.audio_lock
);
546 drm_connector_list_iter_begin(dev
, &conn_iter
);
547 drm_for_each_connector_iter(connector
, &conn_iter
) {
548 aconnector
= to_amdgpu_dm_connector(connector
);
549 if (aconnector
->audio_inst
!= port
)
553 ret
= drm_eld_size(connector
->eld
);
554 memcpy(buf
, connector
->eld
, min(max_bytes
, ret
));
558 drm_connector_list_iter_end(&conn_iter
);
560 mutex_unlock(&adev
->dm
.audio_lock
);
562 DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port
, ret
, *enabled
);
567 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops
= {
568 .get_eld
= amdgpu_dm_audio_component_get_eld
,
571 static int amdgpu_dm_audio_component_bind(struct device
*kdev
,
572 struct device
*hda_kdev
, void *data
)
574 struct drm_device
*dev
= dev_get_drvdata(kdev
);
575 struct amdgpu_device
*adev
= dev
->dev_private
;
576 struct drm_audio_component
*acomp
= data
;
578 acomp
->ops
= &amdgpu_dm_audio_component_ops
;
580 adev
->dm
.audio_component
= acomp
;
585 static void amdgpu_dm_audio_component_unbind(struct device
*kdev
,
586 struct device
*hda_kdev
, void *data
)
588 struct drm_device
*dev
= dev_get_drvdata(kdev
);
589 struct amdgpu_device
*adev
= dev
->dev_private
;
590 struct drm_audio_component
*acomp
= data
;
594 adev
->dm
.audio_component
= NULL
;
597 static const struct component_ops amdgpu_dm_audio_component_bind_ops
= {
598 .bind
= amdgpu_dm_audio_component_bind
,
599 .unbind
= amdgpu_dm_audio_component_unbind
,
602 static int amdgpu_dm_audio_init(struct amdgpu_device
*adev
)
609 adev
->mode_info
.audio
.enabled
= true;
611 adev
->mode_info
.audio
.num_pins
= adev
->dm
.dc
->res_pool
->audio_count
;
613 for (i
= 0; i
< adev
->mode_info
.audio
.num_pins
; i
++) {
614 adev
->mode_info
.audio
.pin
[i
].channels
= -1;
615 adev
->mode_info
.audio
.pin
[i
].rate
= -1;
616 adev
->mode_info
.audio
.pin
[i
].bits_per_sample
= -1;
617 adev
->mode_info
.audio
.pin
[i
].status_bits
= 0;
618 adev
->mode_info
.audio
.pin
[i
].category_code
= 0;
619 adev
->mode_info
.audio
.pin
[i
].connected
= false;
620 adev
->mode_info
.audio
.pin
[i
].id
=
621 adev
->dm
.dc
->res_pool
->audios
[i
]->inst
;
622 adev
->mode_info
.audio
.pin
[i
].offset
= 0;
625 ret
= component_add(adev
->dev
, &amdgpu_dm_audio_component_bind_ops
);
629 adev
->dm
.audio_registered
= true;
634 static void amdgpu_dm_audio_fini(struct amdgpu_device
*adev
)
639 if (!adev
->mode_info
.audio
.enabled
)
642 if (adev
->dm
.audio_registered
) {
643 component_del(adev
->dev
, &amdgpu_dm_audio_component_bind_ops
);
644 adev
->dm
.audio_registered
= false;
647 /* TODO: Disable audio? */
649 adev
->mode_info
.audio
.enabled
= false;
652 void amdgpu_dm_audio_eld_notify(struct amdgpu_device
*adev
, int pin
)
654 struct drm_audio_component
*acomp
= adev
->dm
.audio_component
;
656 if (acomp
&& acomp
->audio_ops
&& acomp
->audio_ops
->pin_eld_notify
) {
657 DRM_DEBUG_KMS("Notify ELD: %d\n", pin
);
659 acomp
->audio_ops
->pin_eld_notify(acomp
->audio_ops
->audio_ptr
,
664 static int amdgpu_dm_init(struct amdgpu_device
*adev
)
666 struct dc_init_data init_data
;
667 #ifdef CONFIG_DRM_AMD_DC_HDCP
668 struct dc_callback_init init_params
;
671 adev
->dm
.ddev
= adev
->ddev
;
672 adev
->dm
.adev
= adev
;
674 /* Zero all the fields */
675 memset(&init_data
, 0, sizeof(init_data
));
676 #ifdef CONFIG_DRM_AMD_DC_HDCP
677 memset(&init_params
, 0, sizeof(init_params
));
680 mutex_init(&adev
->dm
.dc_lock
);
681 mutex_init(&adev
->dm
.audio_lock
);
683 if(amdgpu_dm_irq_init(adev
)) {
684 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
688 init_data
.asic_id
.chip_family
= adev
->family
;
690 init_data
.asic_id
.pci_revision_id
= adev
->rev_id
;
691 init_data
.asic_id
.hw_internal_rev
= adev
->external_rev_id
;
693 init_data
.asic_id
.vram_width
= adev
->gmc
.vram_width
;
694 /* TODO: initialize init_data.asic_id.vram_type here!!!! */
695 init_data
.asic_id
.atombios_base_address
=
696 adev
->mode_info
.atom_context
->bios
;
698 init_data
.driver
= adev
;
700 adev
->dm
.cgs_device
= amdgpu_cgs_create_device(adev
);
702 if (!adev
->dm
.cgs_device
) {
703 DRM_ERROR("amdgpu: failed to create cgs device.\n");
707 init_data
.cgs_device
= adev
->dm
.cgs_device
;
709 init_data
.dce_environment
= DCE_ENV_PRODUCTION_DRV
;
712 * TODO debug why this doesn't work on Raven
714 if (adev
->flags
& AMD_IS_APU
&&
715 adev
->asic_type
>= CHIP_CARRIZO
&&
716 adev
->asic_type
<= CHIP_RAVEN
)
717 init_data
.flags
.gpu_vm_support
= true;
719 if (amdgpu_dc_feature_mask
& DC_FBC_MASK
)
720 init_data
.flags
.fbc_support
= true;
722 if (amdgpu_dc_feature_mask
& DC_MULTI_MON_PP_MCLK_SWITCH_MASK
)
723 init_data
.flags
.multi_mon_pp_mclk_switch
= true;
725 init_data
.flags
.power_down_display_on_boot
= true;
727 #ifdef CONFIG_DRM_AMD_DC_DCN2_0
728 init_data
.soc_bounding_box
= adev
->dm
.soc_bounding_box
;
731 /* Display Core create. */
732 adev
->dm
.dc
= dc_create(&init_data
);
735 DRM_INFO("Display Core initialized with v%s!\n", DC_VER
);
737 DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER
);
741 dc_hardware_init(adev
->dm
.dc
);
743 adev
->dm
.freesync_module
= mod_freesync_create(adev
->dm
.dc
);
744 if (!adev
->dm
.freesync_module
) {
746 "amdgpu: failed to initialize freesync_module.\n");
748 DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
749 adev
->dm
.freesync_module
);
751 amdgpu_dm_init_color_mod();
753 #ifdef CONFIG_DRM_AMD_DC_HDCP
754 if (adev
->asic_type
>= CHIP_RAVEN
) {
755 adev
->dm
.hdcp_workqueue
= hdcp_create_workqueue(&adev
->psp
, &init_params
.cp_psp
, adev
->dm
.dc
);
757 if (!adev
->dm
.hdcp_workqueue
)
758 DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
760 DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev
->dm
.hdcp_workqueue
);
762 dc_init_callbacks(adev
->dm
.dc
, &init_params
);
765 if (amdgpu_dm_initialize_drm_device(adev
)) {
767 "amdgpu: failed to initialize sw for display support.\n");
771 /* Update the actual used number of crtc */
772 adev
->mode_info
.num_crtc
= adev
->dm
.display_indexes_num
;
774 /* TODO: Add_display_info? */
776 /* TODO use dynamic cursor width */
777 adev
->ddev
->mode_config
.cursor_width
= adev
->dm
.dc
->caps
.max_cursor_size
;
778 adev
->ddev
->mode_config
.cursor_height
= adev
->dm
.dc
->caps
.max_cursor_size
;
780 if (drm_vblank_init(adev
->ddev
, adev
->dm
.display_indexes_num
)) {
782 "amdgpu: failed to initialize sw for display support.\n");
786 #if defined(CONFIG_DEBUG_FS)
787 if (dtn_debugfs_init(adev
))
788 DRM_ERROR("amdgpu: failed initialize dtn debugfs support.\n");
791 DRM_DEBUG_DRIVER("KMS initialized.\n");
795 amdgpu_dm_fini(adev
);
800 static void amdgpu_dm_fini(struct amdgpu_device
*adev
)
802 amdgpu_dm_audio_fini(adev
);
804 amdgpu_dm_destroy_drm_device(&adev
->dm
);
806 #ifdef CONFIG_DRM_AMD_DC_HDCP
807 if (adev
->dm
.hdcp_workqueue
) {
808 hdcp_destroy(adev
->dm
.hdcp_workqueue
);
809 adev
->dm
.hdcp_workqueue
= NULL
;
813 dc_deinit_callbacks(adev
->dm
.dc
);
816 /* DC Destroy TODO: Replace destroy DAL */
818 dc_destroy(&adev
->dm
.dc
);
820 * TODO: pageflip, vlank interrupt
822 * amdgpu_dm_irq_fini(adev);
825 if (adev
->dm
.cgs_device
) {
826 amdgpu_cgs_destroy_device(adev
->dm
.cgs_device
);
827 adev
->dm
.cgs_device
= NULL
;
829 if (adev
->dm
.freesync_module
) {
830 mod_freesync_destroy(adev
->dm
.freesync_module
);
831 adev
->dm
.freesync_module
= NULL
;
834 mutex_destroy(&adev
->dm
.audio_lock
);
835 mutex_destroy(&adev
->dm
.dc_lock
);
840 static int load_dmcu_fw(struct amdgpu_device
*adev
)
842 const char *fw_name_dmcu
= NULL
;
844 const struct dmcu_firmware_header_v1_0
*hdr
;
846 switch(adev
->asic_type
) {
869 if (ASICREV_IS_PICASSO(adev
->external_rev_id
))
870 fw_name_dmcu
= FIRMWARE_RAVEN_DMCU
;
871 else if (ASICREV_IS_RAVEN2(adev
->external_rev_id
))
872 fw_name_dmcu
= FIRMWARE_RAVEN_DMCU
;
877 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev
->asic_type
);
881 if (adev
->firmware
.load_type
!= AMDGPU_FW_LOAD_PSP
) {
882 DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
886 r
= request_firmware_direct(&adev
->dm
.fw_dmcu
, fw_name_dmcu
, adev
->dev
);
888 /* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
889 DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
890 adev
->dm
.fw_dmcu
= NULL
;
894 dev_err(adev
->dev
, "amdgpu_dm: Can't load firmware \"%s\"\n",
899 r
= amdgpu_ucode_validate(adev
->dm
.fw_dmcu
);
901 dev_err(adev
->dev
, "amdgpu_dm: Can't validate firmware \"%s\"\n",
903 release_firmware(adev
->dm
.fw_dmcu
);
904 adev
->dm
.fw_dmcu
= NULL
;
908 hdr
= (const struct dmcu_firmware_header_v1_0
*)adev
->dm
.fw_dmcu
->data
;
909 adev
->firmware
.ucode
[AMDGPU_UCODE_ID_DMCU_ERAM
].ucode_id
= AMDGPU_UCODE_ID_DMCU_ERAM
;
910 adev
->firmware
.ucode
[AMDGPU_UCODE_ID_DMCU_ERAM
].fw
= adev
->dm
.fw_dmcu
;
911 adev
->firmware
.fw_size
+=
912 ALIGN(le32_to_cpu(hdr
->header
.ucode_size_bytes
) - le32_to_cpu(hdr
->intv_size_bytes
), PAGE_SIZE
);
914 adev
->firmware
.ucode
[AMDGPU_UCODE_ID_DMCU_INTV
].ucode_id
= AMDGPU_UCODE_ID_DMCU_INTV
;
915 adev
->firmware
.ucode
[AMDGPU_UCODE_ID_DMCU_INTV
].fw
= adev
->dm
.fw_dmcu
;
916 adev
->firmware
.fw_size
+=
917 ALIGN(le32_to_cpu(hdr
->intv_size_bytes
), PAGE_SIZE
);
919 adev
->dm
.dmcu_fw_version
= le32_to_cpu(hdr
->header
.ucode_version
);
921 DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
926 static int dm_sw_init(void *handle
)
928 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
930 return load_dmcu_fw(adev
);
933 static int dm_sw_fini(void *handle
)
935 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
937 if(adev
->dm
.fw_dmcu
) {
938 release_firmware(adev
->dm
.fw_dmcu
);
939 adev
->dm
.fw_dmcu
= NULL
;
945 static int detect_mst_link_for_all_connectors(struct drm_device
*dev
)
947 struct amdgpu_dm_connector
*aconnector
;
948 struct drm_connector
*connector
;
949 struct drm_connector_list_iter iter
;
952 drm_connector_list_iter_begin(dev
, &iter
);
953 drm_for_each_connector_iter(connector
, &iter
) {
954 aconnector
= to_amdgpu_dm_connector(connector
);
955 if (aconnector
->dc_link
->type
== dc_connection_mst_branch
&&
956 aconnector
->mst_mgr
.aux
) {
957 DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
959 aconnector
->base
.base
.id
);
961 ret
= drm_dp_mst_topology_mgr_set_mst(&aconnector
->mst_mgr
, true);
963 DRM_ERROR("DM_MST: Failed to start MST\n");
964 aconnector
->dc_link
->type
=
965 dc_connection_single
;
970 drm_connector_list_iter_end(&iter
);
975 static int dm_late_init(void *handle
)
977 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
979 struct dmcu_iram_parameters params
;
980 unsigned int linear_lut
[16];
982 struct dmcu
*dmcu
= adev
->dm
.dc
->res_pool
->dmcu
;
985 for (i
= 0; i
< 16; i
++)
986 linear_lut
[i
] = 0xFFFF * i
/ 15;
989 params
.backlight_ramping_start
= 0xCCCC;
990 params
.backlight_ramping_reduction
= 0xCCCCCCCC;
991 params
.backlight_lut_array_size
= 16;
992 params
.backlight_lut_array
= linear_lut
;
994 /* Min backlight level after ABM reduction, Don't allow below 1%
995 * 0xFFFF x 0.01 = 0x28F
997 params
.min_abm_backlight
= 0x28F;
999 /* todo will enable for navi10 */
1000 if (adev
->asic_type
<= CHIP_RAVEN
) {
1001 ret
= dmcu_load_iram(dmcu
, params
);
1007 return detect_mst_link_for_all_connectors(adev
->ddev
);
1010 static void s3_handle_mst(struct drm_device
*dev
, bool suspend
)
1012 struct amdgpu_dm_connector
*aconnector
;
1013 struct drm_connector
*connector
;
1014 struct drm_connector_list_iter iter
;
1015 struct drm_dp_mst_topology_mgr
*mgr
;
1017 bool need_hotplug
= false;
1019 drm_connector_list_iter_begin(dev
, &iter
);
1020 drm_for_each_connector_iter(connector
, &iter
) {
1021 aconnector
= to_amdgpu_dm_connector(connector
);
1022 if (aconnector
->dc_link
->type
!= dc_connection_mst_branch
||
1023 aconnector
->mst_port
)
1026 mgr
= &aconnector
->mst_mgr
;
1029 drm_dp_mst_topology_mgr_suspend(mgr
);
1031 ret
= drm_dp_mst_topology_mgr_resume(mgr
);
1033 drm_dp_mst_topology_mgr_set_mst(mgr
, false);
1034 need_hotplug
= true;
1038 drm_connector_list_iter_end(&iter
);
1041 drm_kms_helper_hotplug_event(dev
);
1045 * dm_hw_init() - Initialize DC device
1046 * @handle: The base driver device containing the amdgpu_dm device.
1048 * Initialize the &struct amdgpu_display_manager device. This involves calling
1049 * the initializers of each DM component, then populating the struct with them.
1051 * Although the function implies hardware initialization, both hardware and
1052 * software are initialized here. Splitting them out to their relevant init
1053 * hooks is a future TODO item.
1055 * Some notable things that are initialized here:
1057 * - Display Core, both software and hardware
1058 * - DC modules that we need (freesync and color management)
1059 * - DRM software states
1060 * - Interrupt sources and handlers
1062 * - Debug FS entries, if enabled
1064 static int dm_hw_init(void *handle
)
1066 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1067 /* Create DAL display manager */
1068 amdgpu_dm_init(adev
);
1069 amdgpu_dm_hpd_init(adev
);
1075 * dm_hw_fini() - Teardown DC device
1076 * @handle: The base driver device containing the amdgpu_dm device.
1078 * Teardown components within &struct amdgpu_display_manager that require
1079 * cleanup. This involves cleaning up the DRM device, DC, and any modules that
1080 * were loaded. Also flush IRQ workqueues and disable them.
1082 static int dm_hw_fini(void *handle
)
1084 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1086 amdgpu_dm_hpd_fini(adev
);
1088 amdgpu_dm_irq_fini(adev
);
1089 amdgpu_dm_fini(adev
);
1093 static int dm_suspend(void *handle
)
1095 struct amdgpu_device
*adev
= handle
;
1096 struct amdgpu_display_manager
*dm
= &adev
->dm
;
1099 WARN_ON(adev
->dm
.cached_state
);
1100 adev
->dm
.cached_state
= drm_atomic_helper_suspend(adev
->ddev
);
1102 s3_handle_mst(adev
->ddev
, true);
1104 amdgpu_dm_irq_suspend(adev
);
1107 dc_set_power_state(dm
->dc
, DC_ACPI_CM_POWER_STATE_D3
);
1112 static struct amdgpu_dm_connector
*
1113 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state
*state
,
1114 struct drm_crtc
*crtc
)
1117 struct drm_connector_state
*new_con_state
;
1118 struct drm_connector
*connector
;
1119 struct drm_crtc
*crtc_from_state
;
1121 for_each_new_connector_in_state(state
, connector
, new_con_state
, i
) {
1122 crtc_from_state
= new_con_state
->crtc
;
1124 if (crtc_from_state
== crtc
)
1125 return to_amdgpu_dm_connector(connector
);
1131 static void emulated_link_detect(struct dc_link
*link
)
1133 struct dc_sink_init_data sink_init_data
= { 0 };
1134 struct display_sink_capability sink_caps
= { 0 };
1135 enum dc_edid_status edid_status
;
1136 struct dc_context
*dc_ctx
= link
->ctx
;
1137 struct dc_sink
*sink
= NULL
;
1138 struct dc_sink
*prev_sink
= NULL
;
1140 link
->type
= dc_connection_none
;
1141 prev_sink
= link
->local_sink
;
1143 if (prev_sink
!= NULL
)
1144 dc_sink_retain(prev_sink
);
1146 switch (link
->connector_signal
) {
1147 case SIGNAL_TYPE_HDMI_TYPE_A
: {
1148 sink_caps
.transaction_type
= DDC_TRANSACTION_TYPE_I2C
;
1149 sink_caps
.signal
= SIGNAL_TYPE_HDMI_TYPE_A
;
1153 case SIGNAL_TYPE_DVI_SINGLE_LINK
: {
1154 sink_caps
.transaction_type
= DDC_TRANSACTION_TYPE_I2C
;
1155 sink_caps
.signal
= SIGNAL_TYPE_DVI_SINGLE_LINK
;
1159 case SIGNAL_TYPE_DVI_DUAL_LINK
: {
1160 sink_caps
.transaction_type
= DDC_TRANSACTION_TYPE_I2C
;
1161 sink_caps
.signal
= SIGNAL_TYPE_DVI_DUAL_LINK
;
1165 case SIGNAL_TYPE_LVDS
: {
1166 sink_caps
.transaction_type
= DDC_TRANSACTION_TYPE_I2C
;
1167 sink_caps
.signal
= SIGNAL_TYPE_LVDS
;
1171 case SIGNAL_TYPE_EDP
: {
1172 sink_caps
.transaction_type
=
1173 DDC_TRANSACTION_TYPE_I2C_OVER_AUX
;
1174 sink_caps
.signal
= SIGNAL_TYPE_EDP
;
1178 case SIGNAL_TYPE_DISPLAY_PORT
: {
1179 sink_caps
.transaction_type
=
1180 DDC_TRANSACTION_TYPE_I2C_OVER_AUX
;
1181 sink_caps
.signal
= SIGNAL_TYPE_VIRTUAL
;
1186 DC_ERROR("Invalid connector type! signal:%d\n",
1187 link
->connector_signal
);
1191 sink_init_data
.link
= link
;
1192 sink_init_data
.sink_signal
= sink_caps
.signal
;
1194 sink
= dc_sink_create(&sink_init_data
);
1196 DC_ERROR("Failed to create sink!\n");
1200 /* dc_sink_create returns a new reference */
1201 link
->local_sink
= sink
;
1203 edid_status
= dm_helpers_read_local_edid(
1208 if (edid_status
!= EDID_OK
)
1209 DC_ERROR("Failed to read EDID");
1213 static int dm_resume(void *handle
)
1215 struct amdgpu_device
*adev
= handle
;
1216 struct drm_device
*ddev
= adev
->ddev
;
1217 struct amdgpu_display_manager
*dm
= &adev
->dm
;
1218 struct amdgpu_dm_connector
*aconnector
;
1219 struct drm_connector
*connector
;
1220 struct drm_connector_list_iter iter
;
1221 struct drm_crtc
*crtc
;
1222 struct drm_crtc_state
*new_crtc_state
;
1223 struct dm_crtc_state
*dm_new_crtc_state
;
1224 struct drm_plane
*plane
;
1225 struct drm_plane_state
*new_plane_state
;
1226 struct dm_plane_state
*dm_new_plane_state
;
1227 struct dm_atomic_state
*dm_state
= to_dm_atomic_state(dm
->atomic_obj
.state
);
1228 enum dc_connection_type new_connection_type
= dc_connection_none
;
1231 /* Recreate dc_state - DC invalidates it when setting power state to S3. */
1232 dc_release_state(dm_state
->context
);
1233 dm_state
->context
= dc_create_state(dm
->dc
);
1234 /* TODO: Remove dc_state->dccg, use dc->dccg directly. */
1235 dc_resource_state_construct(dm
->dc
, dm_state
->context
);
1237 /* power on hardware */
1238 dc_set_power_state(dm
->dc
, DC_ACPI_CM_POWER_STATE_D0
);
1240 /* program HPD filter */
1244 * early enable HPD Rx IRQ, should be done before set mode as short
1245 * pulse interrupts are used for MST
1247 amdgpu_dm_irq_resume_early(adev
);
1249 /* On resume we need to rewrite the MSTM control bits to enable MST*/
1250 s3_handle_mst(ddev
, false);
1253 drm_connector_list_iter_begin(ddev
, &iter
);
1254 drm_for_each_connector_iter(connector
, &iter
) {
1255 aconnector
= to_amdgpu_dm_connector(connector
);
1258 * this is the case when traversing through already created
1259 * MST connectors, should be skipped
1261 if (aconnector
->mst_port
)
1264 mutex_lock(&aconnector
->hpd_lock
);
1265 if (!dc_link_detect_sink(aconnector
->dc_link
, &new_connection_type
))
1266 DRM_ERROR("KMS: Failed to detect connector\n");
1268 if (aconnector
->base
.force
&& new_connection_type
== dc_connection_none
)
1269 emulated_link_detect(aconnector
->dc_link
);
1271 dc_link_detect(aconnector
->dc_link
, DETECT_REASON_HPD
);
1273 if (aconnector
->fake_enable
&& aconnector
->dc_link
->local_sink
)
1274 aconnector
->fake_enable
= false;
1276 if (aconnector
->dc_sink
)
1277 dc_sink_release(aconnector
->dc_sink
);
1278 aconnector
->dc_sink
= NULL
;
1279 amdgpu_dm_update_connector_after_detect(aconnector
);
1280 mutex_unlock(&aconnector
->hpd_lock
);
1282 drm_connector_list_iter_end(&iter
);
1284 /* Force mode set in atomic commit */
1285 for_each_new_crtc_in_state(dm
->cached_state
, crtc
, new_crtc_state
, i
)
1286 new_crtc_state
->active_changed
= true;
1289 * atomic_check is expected to create the dc states. We need to release
1290 * them here, since they were duplicated as part of the suspend
1293 for_each_new_crtc_in_state(dm
->cached_state
, crtc
, new_crtc_state
, i
) {
1294 dm_new_crtc_state
= to_dm_crtc_state(new_crtc_state
);
1295 if (dm_new_crtc_state
->stream
) {
1296 WARN_ON(kref_read(&dm_new_crtc_state
->stream
->refcount
) > 1);
1297 dc_stream_release(dm_new_crtc_state
->stream
);
1298 dm_new_crtc_state
->stream
= NULL
;
1302 for_each_new_plane_in_state(dm
->cached_state
, plane
, new_plane_state
, i
) {
1303 dm_new_plane_state
= to_dm_plane_state(new_plane_state
);
1304 if (dm_new_plane_state
->dc_state
) {
1305 WARN_ON(kref_read(&dm_new_plane_state
->dc_state
->refcount
) > 1);
1306 dc_plane_state_release(dm_new_plane_state
->dc_state
);
1307 dm_new_plane_state
->dc_state
= NULL
;
1311 drm_atomic_helper_resume(ddev
, dm
->cached_state
);
1313 dm
->cached_state
= NULL
;
1315 amdgpu_dm_irq_resume_late(adev
);
1323 * DM (and consequently DC) is registered in the amdgpu base driver as a IP
1324 * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
1325 * the base driver's device list to be initialized and torn down accordingly.
1327 * The functions to do so are provided as hooks in &struct amd_ip_funcs.
1330 static const struct amd_ip_funcs amdgpu_dm_funcs
= {
1332 .early_init
= dm_early_init
,
1333 .late_init
= dm_late_init
,
1334 .sw_init
= dm_sw_init
,
1335 .sw_fini
= dm_sw_fini
,
1336 .hw_init
= dm_hw_init
,
1337 .hw_fini
= dm_hw_fini
,
1338 .suspend
= dm_suspend
,
1339 .resume
= dm_resume
,
1340 .is_idle
= dm_is_idle
,
1341 .wait_for_idle
= dm_wait_for_idle
,
1342 .check_soft_reset
= dm_check_soft_reset
,
1343 .soft_reset
= dm_soft_reset
,
1344 .set_clockgating_state
= dm_set_clockgating_state
,
1345 .set_powergating_state
= dm_set_powergating_state
,
1348 const struct amdgpu_ip_block_version dm_ip_block
=
1350 .type
= AMD_IP_BLOCK_TYPE_DCE
,
1354 .funcs
= &amdgpu_dm_funcs
,
1364 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs
= {
1365 .fb_create
= amdgpu_display_user_framebuffer_create
,
1366 .output_poll_changed
= drm_fb_helper_output_poll_changed
,
1367 .atomic_check
= amdgpu_dm_atomic_check
,
1368 .atomic_commit
= amdgpu_dm_atomic_commit
,
1371 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs
= {
1372 .atomic_commit_tail
= amdgpu_dm_atomic_commit_tail
1376 amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector
*aconnector
)
1378 struct drm_connector
*connector
= &aconnector
->base
;
1379 struct drm_device
*dev
= connector
->dev
;
1380 struct dc_sink
*sink
;
1382 /* MST handled by drm_mst framework */
1383 if (aconnector
->mst_mgr
.mst_state
== true)
1387 sink
= aconnector
->dc_link
->local_sink
;
1389 dc_sink_retain(sink
);
1392 * Edid mgmt connector gets first update only in mode_valid hook and then
1393 * the connector sink is set to either fake or physical sink depends on link status.
1394 * Skip if already done during boot.
1396 if (aconnector
->base
.force
!= DRM_FORCE_UNSPECIFIED
1397 && aconnector
->dc_em_sink
) {
1400 * For S3 resume with headless use eml_sink to fake stream
1401 * because on resume connector->sink is set to NULL
1403 mutex_lock(&dev
->mode_config
.mutex
);
1406 if (aconnector
->dc_sink
) {
1407 amdgpu_dm_update_freesync_caps(connector
, NULL
);
1409 * retain and release below are used to
1410 * bump up refcount for sink because the link doesn't point
1411 * to it anymore after disconnect, so on next crtc to connector
1412 * reshuffle by UMD we will get into unwanted dc_sink release
1414 dc_sink_release(aconnector
->dc_sink
);
1416 aconnector
->dc_sink
= sink
;
1417 dc_sink_retain(aconnector
->dc_sink
);
1418 amdgpu_dm_update_freesync_caps(connector
,
1421 amdgpu_dm_update_freesync_caps(connector
, NULL
);
1422 if (!aconnector
->dc_sink
) {
1423 aconnector
->dc_sink
= aconnector
->dc_em_sink
;
1424 dc_sink_retain(aconnector
->dc_sink
);
1428 mutex_unlock(&dev
->mode_config
.mutex
);
1431 dc_sink_release(sink
);
1436 * TODO: temporary guard to look for proper fix
1437 * if this sink is MST sink, we should not do anything
1439 if (sink
&& sink
->sink_signal
== SIGNAL_TYPE_DISPLAY_PORT_MST
) {
1440 dc_sink_release(sink
);
1444 if (aconnector
->dc_sink
== sink
) {
1446 * We got a DP short pulse (Link Loss, DP CTS, etc...).
1449 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
1450 aconnector
->connector_id
);
1452 dc_sink_release(sink
);
1456 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
1457 aconnector
->connector_id
, aconnector
->dc_sink
, sink
);
1459 mutex_lock(&dev
->mode_config
.mutex
);
1462 * 1. Update status of the drm connector
1463 * 2. Send an event and let userspace tell us what to do
1467 * TODO: check if we still need the S3 mode update workaround.
1468 * If yes, put it here.
1470 if (aconnector
->dc_sink
)
1471 amdgpu_dm_update_freesync_caps(connector
, NULL
);
1473 aconnector
->dc_sink
= sink
;
1474 dc_sink_retain(aconnector
->dc_sink
);
1475 if (sink
->dc_edid
.length
== 0) {
1476 aconnector
->edid
= NULL
;
1477 drm_dp_cec_unset_edid(&aconnector
->dm_dp_aux
.aux
);
1480 (struct edid
*) sink
->dc_edid
.raw_edid
;
1483 drm_connector_update_edid_property(connector
,
1485 drm_dp_cec_set_edid(&aconnector
->dm_dp_aux
.aux
,
1488 amdgpu_dm_update_freesync_caps(connector
, aconnector
->edid
);
1491 drm_dp_cec_unset_edid(&aconnector
->dm_dp_aux
.aux
);
1492 amdgpu_dm_update_freesync_caps(connector
, NULL
);
1493 drm_connector_update_edid_property(connector
, NULL
);
1494 aconnector
->num_modes
= 0;
1495 dc_sink_release(aconnector
->dc_sink
);
1496 aconnector
->dc_sink
= NULL
;
1497 aconnector
->edid
= NULL
;
1498 #ifdef CONFIG_DRM_AMD_DC_HDCP
1499 /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
1500 if (connector
->state
->content_protection
== DRM_MODE_CONTENT_PROTECTION_ENABLED
)
1501 connector
->state
->content_protection
= DRM_MODE_CONTENT_PROTECTION_DESIRED
;
1505 mutex_unlock(&dev
->mode_config
.mutex
);
1508 dc_sink_release(sink
);
1511 static void handle_hpd_irq(void *param
)
1513 struct amdgpu_dm_connector
*aconnector
= (struct amdgpu_dm_connector
*)param
;
1514 struct drm_connector
*connector
= &aconnector
->base
;
1515 struct drm_device
*dev
= connector
->dev
;
1516 enum dc_connection_type new_connection_type
= dc_connection_none
;
1517 #ifdef CONFIG_DRM_AMD_DC_HDCP
1518 struct amdgpu_device
*adev
= dev
->dev_private
;
1522 * In case of failure or MST no need to update connector status or notify the OS
1523 * since (for MST case) MST does this in its own context.
1525 mutex_lock(&aconnector
->hpd_lock
);
1527 #ifdef CONFIG_DRM_AMD_DC_HDCP
1528 if (adev
->asic_type
>= CHIP_RAVEN
)
1529 hdcp_reset_display(adev
->dm
.hdcp_workqueue
, aconnector
->dc_link
->link_index
);
1531 if (aconnector
->fake_enable
)
1532 aconnector
->fake_enable
= false;
1534 if (!dc_link_detect_sink(aconnector
->dc_link
, &new_connection_type
))
1535 DRM_ERROR("KMS: Failed to detect connector\n");
1537 if (aconnector
->base
.force
&& new_connection_type
== dc_connection_none
) {
1538 emulated_link_detect(aconnector
->dc_link
);
1541 drm_modeset_lock_all(dev
);
1542 dm_restore_drm_connector_state(dev
, connector
);
1543 drm_modeset_unlock_all(dev
);
1545 if (aconnector
->base
.force
== DRM_FORCE_UNSPECIFIED
)
1546 drm_kms_helper_hotplug_event(dev
);
1548 } else if (dc_link_detect(aconnector
->dc_link
, DETECT_REASON_HPD
)) {
1549 amdgpu_dm_update_connector_after_detect(aconnector
);
1552 drm_modeset_lock_all(dev
);
1553 dm_restore_drm_connector_state(dev
, connector
);
1554 drm_modeset_unlock_all(dev
);
1556 if (aconnector
->base
.force
== DRM_FORCE_UNSPECIFIED
)
1557 drm_kms_helper_hotplug_event(dev
);
1559 mutex_unlock(&aconnector
->hpd_lock
);
1563 static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector
*aconnector
)
1565 uint8_t esi
[DP_PSR_ERROR_STATUS
- DP_SINK_COUNT_ESI
] = { 0 };
1567 bool new_irq_handled
= false;
1569 int dpcd_bytes_to_read
;
1571 const int max_process_count
= 30;
1572 int process_count
= 0;
1574 const struct dc_link_status
*link_status
= dc_link_get_status(aconnector
->dc_link
);
1576 if (link_status
->dpcd_caps
->dpcd_rev
.raw
< 0x12) {
1577 dpcd_bytes_to_read
= DP_LANE0_1_STATUS
- DP_SINK_COUNT
;
1578 /* DPCD 0x200 - 0x201 for downstream IRQ */
1579 dpcd_addr
= DP_SINK_COUNT
;
1581 dpcd_bytes_to_read
= DP_PSR_ERROR_STATUS
- DP_SINK_COUNT_ESI
;
1582 /* DPCD 0x2002 - 0x2005 for downstream IRQ */
1583 dpcd_addr
= DP_SINK_COUNT_ESI
;
1586 dret
= drm_dp_dpcd_read(
1587 &aconnector
->dm_dp_aux
.aux
,
1590 dpcd_bytes_to_read
);
1592 while (dret
== dpcd_bytes_to_read
&&
1593 process_count
< max_process_count
) {
1599 DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi
[0], esi
[1], esi
[2]);
1600 /* handle HPD short pulse irq */
1601 if (aconnector
->mst_mgr
.mst_state
)
1603 &aconnector
->mst_mgr
,
1607 if (new_irq_handled
) {
1608 /* ACK at DPCD to notify down stream */
1609 const int ack_dpcd_bytes_to_write
=
1610 dpcd_bytes_to_read
- 1;
1612 for (retry
= 0; retry
< 3; retry
++) {
1615 wret
= drm_dp_dpcd_write(
1616 &aconnector
->dm_dp_aux
.aux
,
1619 ack_dpcd_bytes_to_write
);
1620 if (wret
== ack_dpcd_bytes_to_write
)
1624 /* check if there is new irq to be handled */
1625 dret
= drm_dp_dpcd_read(
1626 &aconnector
->dm_dp_aux
.aux
,
1629 dpcd_bytes_to_read
);
1631 new_irq_handled
= false;
1637 if (process_count
== max_process_count
)
1638 DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
1641 static void handle_hpd_rx_irq(void *param
)
1643 struct amdgpu_dm_connector
*aconnector
= (struct amdgpu_dm_connector
*)param
;
1644 struct drm_connector
*connector
= &aconnector
->base
;
1645 struct drm_device
*dev
= connector
->dev
;
1646 struct dc_link
*dc_link
= aconnector
->dc_link
;
1647 bool is_mst_root_connector
= aconnector
->mst_mgr
.mst_state
;
1648 enum dc_connection_type new_connection_type
= dc_connection_none
;
1649 #ifdef CONFIG_DRM_AMD_DC_HDCP
1650 union hpd_irq_data hpd_irq_data
;
1651 struct amdgpu_device
*adev
= dev
->dev_private
;
1653 memset(&hpd_irq_data
, 0, sizeof(hpd_irq_data
));
1657 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
1658 * conflict, after implement i2c helper, this mutex should be
1661 if (dc_link
->type
!= dc_connection_mst_branch
)
1662 mutex_lock(&aconnector
->hpd_lock
);
1665 #ifdef CONFIG_DRM_AMD_DC_HDCP
1666 if (dc_link_handle_hpd_rx_irq(dc_link
, &hpd_irq_data
, NULL
) &&
1668 if (dc_link_handle_hpd_rx_irq(dc_link
, NULL
, NULL
) &&
1670 !is_mst_root_connector
) {
1671 /* Downstream Port status changed. */
1672 if (!dc_link_detect_sink(dc_link
, &new_connection_type
))
1673 DRM_ERROR("KMS: Failed to detect connector\n");
1675 if (aconnector
->base
.force
&& new_connection_type
== dc_connection_none
) {
1676 emulated_link_detect(dc_link
);
1678 if (aconnector
->fake_enable
)
1679 aconnector
->fake_enable
= false;
1681 amdgpu_dm_update_connector_after_detect(aconnector
);
1684 drm_modeset_lock_all(dev
);
1685 dm_restore_drm_connector_state(dev
, connector
);
1686 drm_modeset_unlock_all(dev
);
1688 drm_kms_helper_hotplug_event(dev
);
1689 } else if (dc_link_detect(dc_link
, DETECT_REASON_HPDRX
)) {
1691 if (aconnector
->fake_enable
)
1692 aconnector
->fake_enable
= false;
1694 amdgpu_dm_update_connector_after_detect(aconnector
);
1697 drm_modeset_lock_all(dev
);
1698 dm_restore_drm_connector_state(dev
, connector
);
1699 drm_modeset_unlock_all(dev
);
1701 drm_kms_helper_hotplug_event(dev
);
1704 #ifdef CONFIG_DRM_AMD_DC_HDCP
1705 if (hpd_irq_data
.bytes
.device_service_irq
.bits
.CP_IRQ
)
1706 hdcp_handle_cpirq(adev
->dm
.hdcp_workqueue
, aconnector
->base
.index
);
1708 if ((dc_link
->cur_link_settings
.lane_count
!= LANE_COUNT_UNKNOWN
) ||
1709 (dc_link
->type
== dc_connection_mst_branch
))
1710 dm_handle_hpd_rx_irq(aconnector
);
1712 if (dc_link
->type
!= dc_connection_mst_branch
) {
1713 drm_dp_cec_irq(&aconnector
->dm_dp_aux
.aux
);
1714 mutex_unlock(&aconnector
->hpd_lock
);
1718 static void register_hpd_handlers(struct amdgpu_device
*adev
)
1720 struct drm_device
*dev
= adev
->ddev
;
1721 struct drm_connector
*connector
;
1722 struct amdgpu_dm_connector
*aconnector
;
1723 const struct dc_link
*dc_link
;
1724 struct dc_interrupt_params int_params
= {0};
1726 int_params
.requested_polarity
= INTERRUPT_POLARITY_DEFAULT
;
1727 int_params
.current_polarity
= INTERRUPT_POLARITY_DEFAULT
;
1729 list_for_each_entry(connector
,
1730 &dev
->mode_config
.connector_list
, head
) {
1732 aconnector
= to_amdgpu_dm_connector(connector
);
1733 dc_link
= aconnector
->dc_link
;
1735 if (DC_IRQ_SOURCE_INVALID
!= dc_link
->irq_source_hpd
) {
1736 int_params
.int_context
= INTERRUPT_LOW_IRQ_CONTEXT
;
1737 int_params
.irq_source
= dc_link
->irq_source_hpd
;
1739 amdgpu_dm_irq_register_interrupt(adev
, &int_params
,
1741 (void *) aconnector
);
1744 if (DC_IRQ_SOURCE_INVALID
!= dc_link
->irq_source_hpd_rx
) {
1746 /* Also register for DP short pulse (hpd_rx). */
1747 int_params
.int_context
= INTERRUPT_LOW_IRQ_CONTEXT
;
1748 int_params
.irq_source
= dc_link
->irq_source_hpd_rx
;
1750 amdgpu_dm_irq_register_interrupt(adev
, &int_params
,
1752 (void *) aconnector
);
1757 /* Register IRQ sources and initialize IRQ callbacks */
1758 static int dce110_register_irq_handlers(struct amdgpu_device
*adev
)
1760 struct dc
*dc
= adev
->dm
.dc
;
1761 struct common_irq_params
*c_irq_params
;
1762 struct dc_interrupt_params int_params
= {0};
1765 unsigned client_id
= AMDGPU_IRQ_CLIENTID_LEGACY
;
1767 if (adev
->asic_type
>= CHIP_VEGA10
)
1768 client_id
= SOC15_IH_CLIENTID_DCE
;
1770 int_params
.requested_polarity
= INTERRUPT_POLARITY_DEFAULT
;
1771 int_params
.current_polarity
= INTERRUPT_POLARITY_DEFAULT
;
1774 * Actions of amdgpu_irq_add_id():
1775 * 1. Register a set() function with base driver.
1776 * Base driver will call set() function to enable/disable an
1777 * interrupt in DC hardware.
1778 * 2. Register amdgpu_dm_irq_handler().
1779 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
1780 * coming from DC hardware.
1781 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
1782 * for acknowledging and handling. */
1784 /* Use VBLANK interrupt */
1785 for (i
= VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0
; i
<= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0
; i
++) {
1786 r
= amdgpu_irq_add_id(adev
, client_id
, i
, &adev
->crtc_irq
);
1788 DRM_ERROR("Failed to add crtc irq id!\n");
1792 int_params
.int_context
= INTERRUPT_HIGH_IRQ_CONTEXT
;
1793 int_params
.irq_source
=
1794 dc_interrupt_to_irq_source(dc
, i
, 0);
1796 c_irq_params
= &adev
->dm
.vblank_params
[int_params
.irq_source
- DC_IRQ_SOURCE_VBLANK1
];
1798 c_irq_params
->adev
= adev
;
1799 c_irq_params
->irq_src
= int_params
.irq_source
;
1801 amdgpu_dm_irq_register_interrupt(adev
, &int_params
,
1802 dm_crtc_high_irq
, c_irq_params
);
1805 /* Use VUPDATE interrupt */
1806 for (i
= VISLANDS30_IV_SRCID_D1_V_UPDATE_INT
; i
<= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT
; i
+= 2) {
1807 r
= amdgpu_irq_add_id(adev
, client_id
, i
, &adev
->vupdate_irq
);
1809 DRM_ERROR("Failed to add vupdate irq id!\n");
1813 int_params
.int_context
= INTERRUPT_HIGH_IRQ_CONTEXT
;
1814 int_params
.irq_source
=
1815 dc_interrupt_to_irq_source(dc
, i
, 0);
1817 c_irq_params
= &adev
->dm
.vupdate_params
[int_params
.irq_source
- DC_IRQ_SOURCE_VUPDATE1
];
1819 c_irq_params
->adev
= adev
;
1820 c_irq_params
->irq_src
= int_params
.irq_source
;
1822 amdgpu_dm_irq_register_interrupt(adev
, &int_params
,
1823 dm_vupdate_high_irq
, c_irq_params
);
1826 /* Use GRPH_PFLIP interrupt */
1827 for (i
= VISLANDS30_IV_SRCID_D1_GRPH_PFLIP
;
1828 i
<= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP
; i
+= 2) {
1829 r
= amdgpu_irq_add_id(adev
, client_id
, i
, &adev
->pageflip_irq
);
1831 DRM_ERROR("Failed to add page flip irq id!\n");
1835 int_params
.int_context
= INTERRUPT_HIGH_IRQ_CONTEXT
;
1836 int_params
.irq_source
=
1837 dc_interrupt_to_irq_source(dc
, i
, 0);
1839 c_irq_params
= &adev
->dm
.pflip_params
[int_params
.irq_source
- DC_IRQ_SOURCE_PFLIP_FIRST
];
1841 c_irq_params
->adev
= adev
;
1842 c_irq_params
->irq_src
= int_params
.irq_source
;
1844 amdgpu_dm_irq_register_interrupt(adev
, &int_params
,
1845 dm_pflip_high_irq
, c_irq_params
);
1850 r
= amdgpu_irq_add_id(adev
, client_id
,
1851 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A
, &adev
->hpd_irq
);
1853 DRM_ERROR("Failed to add hpd irq id!\n");
1857 register_hpd_handlers(adev
);
1862 #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
1863 /* Register IRQ sources and initialize IRQ callbacks */
1864 static int dcn10_register_irq_handlers(struct amdgpu_device
*adev
)
1866 struct dc
*dc
= adev
->dm
.dc
;
1867 struct common_irq_params
*c_irq_params
;
1868 struct dc_interrupt_params int_params
= {0};
1872 int_params
.requested_polarity
= INTERRUPT_POLARITY_DEFAULT
;
1873 int_params
.current_polarity
= INTERRUPT_POLARITY_DEFAULT
;
1876 * Actions of amdgpu_irq_add_id():
1877 * 1. Register a set() function with base driver.
1878 * Base driver will call set() function to enable/disable an
1879 * interrupt in DC hardware.
1880 * 2. Register amdgpu_dm_irq_handler().
1881 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
1882 * coming from DC hardware.
1883 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
1884 * for acknowledging and handling.
1887 /* Use VSTARTUP interrupt */
1888 for (i
= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP
;
1889 i
<= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP
+ adev
->mode_info
.num_crtc
- 1;
1891 r
= amdgpu_irq_add_id(adev
, SOC15_IH_CLIENTID_DCE
, i
, &adev
->crtc_irq
);
1894 DRM_ERROR("Failed to add crtc irq id!\n");
1898 int_params
.int_context
= INTERRUPT_HIGH_IRQ_CONTEXT
;
1899 int_params
.irq_source
=
1900 dc_interrupt_to_irq_source(dc
, i
, 0);
1902 c_irq_params
= &adev
->dm
.vblank_params
[int_params
.irq_source
- DC_IRQ_SOURCE_VBLANK1
];
1904 c_irq_params
->adev
= adev
;
1905 c_irq_params
->irq_src
= int_params
.irq_source
;
1907 amdgpu_dm_irq_register_interrupt(adev
, &int_params
,
1908 dm_crtc_high_irq
, c_irq_params
);
1911 /* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
1912 * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
1913 * to trigger at end of each vblank, regardless of state of the lock,
1914 * matching DCE behaviour.
1916 for (i
= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT
;
1917 i
<= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT
+ adev
->mode_info
.num_crtc
- 1;
1919 r
= amdgpu_irq_add_id(adev
, SOC15_IH_CLIENTID_DCE
, i
, &adev
->vupdate_irq
);
1922 DRM_ERROR("Failed to add vupdate irq id!\n");
1926 int_params
.int_context
= INTERRUPT_HIGH_IRQ_CONTEXT
;
1927 int_params
.irq_source
=
1928 dc_interrupt_to_irq_source(dc
, i
, 0);
1930 c_irq_params
= &adev
->dm
.vupdate_params
[int_params
.irq_source
- DC_IRQ_SOURCE_VUPDATE1
];
1932 c_irq_params
->adev
= adev
;
1933 c_irq_params
->irq_src
= int_params
.irq_source
;
1935 amdgpu_dm_irq_register_interrupt(adev
, &int_params
,
1936 dm_vupdate_high_irq
, c_irq_params
);
1939 /* Use GRPH_PFLIP interrupt */
1940 for (i
= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT
;
1941 i
<= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT
+ adev
->mode_info
.num_crtc
- 1;
1943 r
= amdgpu_irq_add_id(adev
, SOC15_IH_CLIENTID_DCE
, i
, &adev
->pageflip_irq
);
1945 DRM_ERROR("Failed to add page flip irq id!\n");
1949 int_params
.int_context
= INTERRUPT_HIGH_IRQ_CONTEXT
;
1950 int_params
.irq_source
=
1951 dc_interrupt_to_irq_source(dc
, i
, 0);
1953 c_irq_params
= &adev
->dm
.pflip_params
[int_params
.irq_source
- DC_IRQ_SOURCE_PFLIP_FIRST
];
1955 c_irq_params
->adev
= adev
;
1956 c_irq_params
->irq_src
= int_params
.irq_source
;
1958 amdgpu_dm_irq_register_interrupt(adev
, &int_params
,
1959 dm_pflip_high_irq
, c_irq_params
);
1964 r
= amdgpu_irq_add_id(adev
, SOC15_IH_CLIENTID_DCE
, DCN_1_0__SRCID__DC_HPD1_INT
,
1967 DRM_ERROR("Failed to add hpd irq id!\n");
1971 register_hpd_handlers(adev
);
1978 * Acquires the lock for the atomic state object and returns
1979 * the new atomic state.
1981 * This should only be called during atomic check.
1983 static int dm_atomic_get_state(struct drm_atomic_state
*state
,
1984 struct dm_atomic_state
**dm_state
)
1986 struct drm_device
*dev
= state
->dev
;
1987 struct amdgpu_device
*adev
= dev
->dev_private
;
1988 struct amdgpu_display_manager
*dm
= &adev
->dm
;
1989 struct drm_private_state
*priv_state
;
1994 priv_state
= drm_atomic_get_private_obj_state(state
, &dm
->atomic_obj
);
1995 if (IS_ERR(priv_state
))
1996 return PTR_ERR(priv_state
);
1998 *dm_state
= to_dm_atomic_state(priv_state
);
2003 struct dm_atomic_state
*
2004 dm_atomic_get_new_state(struct drm_atomic_state
*state
)
2006 struct drm_device
*dev
= state
->dev
;
2007 struct amdgpu_device
*adev
= dev
->dev_private
;
2008 struct amdgpu_display_manager
*dm
= &adev
->dm
;
2009 struct drm_private_obj
*obj
;
2010 struct drm_private_state
*new_obj_state
;
2013 for_each_new_private_obj_in_state(state
, obj
, new_obj_state
, i
) {
2014 if (obj
->funcs
== dm
->atomic_obj
.funcs
)
2015 return to_dm_atomic_state(new_obj_state
);
2021 struct dm_atomic_state
*
2022 dm_atomic_get_old_state(struct drm_atomic_state
*state
)
2024 struct drm_device
*dev
= state
->dev
;
2025 struct amdgpu_device
*adev
= dev
->dev_private
;
2026 struct amdgpu_display_manager
*dm
= &adev
->dm
;
2027 struct drm_private_obj
*obj
;
2028 struct drm_private_state
*old_obj_state
;
2031 for_each_old_private_obj_in_state(state
, obj
, old_obj_state
, i
) {
2032 if (obj
->funcs
== dm
->atomic_obj
.funcs
)
2033 return to_dm_atomic_state(old_obj_state
);
2039 static struct drm_private_state
*
2040 dm_atomic_duplicate_state(struct drm_private_obj
*obj
)
2042 struct dm_atomic_state
*old_state
, *new_state
;
2044 new_state
= kzalloc(sizeof(*new_state
), GFP_KERNEL
);
2048 __drm_atomic_helper_private_obj_duplicate_state(obj
, &new_state
->base
);
2050 old_state
= to_dm_atomic_state(obj
->state
);
2052 if (old_state
&& old_state
->context
)
2053 new_state
->context
= dc_copy_state(old_state
->context
);
2055 if (!new_state
->context
) {
2060 return &new_state
->base
;
2063 static void dm_atomic_destroy_state(struct drm_private_obj
*obj
,
2064 struct drm_private_state
*state
)
2066 struct dm_atomic_state
*dm_state
= to_dm_atomic_state(state
);
2068 if (dm_state
&& dm_state
->context
)
2069 dc_release_state(dm_state
->context
);
2074 static struct drm_private_state_funcs dm_atomic_state_funcs
= {
2075 .atomic_duplicate_state
= dm_atomic_duplicate_state
,
2076 .atomic_destroy_state
= dm_atomic_destroy_state
,
2079 static int amdgpu_dm_mode_config_init(struct amdgpu_device
*adev
)
2081 struct dm_atomic_state
*state
;
2084 adev
->mode_info
.mode_config_initialized
= true;
2086 adev
->ddev
->mode_config
.funcs
= (void *)&amdgpu_dm_mode_funcs
;
2087 adev
->ddev
->mode_config
.helper_private
= &amdgpu_dm_mode_config_helperfuncs
;
2089 adev
->ddev
->mode_config
.max_width
= 16384;
2090 adev
->ddev
->mode_config
.max_height
= 16384;
2092 adev
->ddev
->mode_config
.preferred_depth
= 24;
2093 adev
->ddev
->mode_config
.prefer_shadow
= 1;
2094 /* indicates support for immediate flip */
2095 adev
->ddev
->mode_config
.async_page_flip
= true;
2097 adev
->ddev
->mode_config
.fb_base
= adev
->gmc
.aper_base
;
2099 state
= kzalloc(sizeof(*state
), GFP_KERNEL
);
2103 state
->context
= dc_create_state(adev
->dm
.dc
);
2104 if (!state
->context
) {
2109 dc_resource_state_copy_construct_current(adev
->dm
.dc
, state
->context
);
2111 drm_atomic_private_obj_init(adev
->ddev
,
2112 &adev
->dm
.atomic_obj
,
2114 &dm_atomic_state_funcs
);
2116 r
= amdgpu_display_modeset_create_props(adev
);
2120 r
= amdgpu_dm_audio_init(adev
);
2127 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
2128 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
2130 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
2131 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
2133 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager
*dm
)
2135 #if defined(CONFIG_ACPI)
2136 struct amdgpu_dm_backlight_caps caps
;
2138 if (dm
->backlight_caps
.caps_valid
)
2141 amdgpu_acpi_get_backlight_caps(dm
->adev
, &caps
);
2142 if (caps
.caps_valid
) {
2143 dm
->backlight_caps
.min_input_signal
= caps
.min_input_signal
;
2144 dm
->backlight_caps
.max_input_signal
= caps
.max_input_signal
;
2145 dm
->backlight_caps
.caps_valid
= true;
2147 dm
->backlight_caps
.min_input_signal
=
2148 AMDGPU_DM_DEFAULT_MIN_BACKLIGHT
;
2149 dm
->backlight_caps
.max_input_signal
=
2150 AMDGPU_DM_DEFAULT_MAX_BACKLIGHT
;
2153 dm
->backlight_caps
.min_input_signal
= AMDGPU_DM_DEFAULT_MIN_BACKLIGHT
;
2154 dm
->backlight_caps
.max_input_signal
= AMDGPU_DM_DEFAULT_MAX_BACKLIGHT
;
2158 static int amdgpu_dm_backlight_update_status(struct backlight_device
*bd
)
2160 struct amdgpu_display_manager
*dm
= bl_get_data(bd
);
2161 struct amdgpu_dm_backlight_caps caps
;
2162 uint32_t brightness
= bd
->props
.brightness
;
2164 amdgpu_dm_update_backlight_caps(dm
);
2165 caps
= dm
->backlight_caps
;
2167 * The brightness input is in the range 0-255
2168 * It needs to be rescaled to be between the
2169 * requested min and max input signal
2171 * It also needs to be scaled up by 0x101 to
2172 * match the DC interface which has a range of
2178 * (caps
.max_input_signal
- caps
.min_input_signal
)
2179 / AMDGPU_MAX_BL_LEVEL
2180 + caps
.min_input_signal
* 0x101;
2182 if (dc_link_set_backlight_level(dm
->backlight_link
,
2189 static int amdgpu_dm_backlight_get_brightness(struct backlight_device
*bd
)
2191 struct amdgpu_display_manager
*dm
= bl_get_data(bd
);
2192 int ret
= dc_link_get_backlight_level(dm
->backlight_link
);
2194 if (ret
== DC_ERROR_UNEXPECTED
)
2195 return bd
->props
.brightness
;
2199 static const struct backlight_ops amdgpu_dm_backlight_ops
= {
2200 .options
= BL_CORE_SUSPENDRESUME
,
2201 .get_brightness
= amdgpu_dm_backlight_get_brightness
,
2202 .update_status
= amdgpu_dm_backlight_update_status
,
2206 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager
*dm
)
2209 struct backlight_properties props
= { 0 };
2211 amdgpu_dm_update_backlight_caps(dm
);
2213 props
.max_brightness
= AMDGPU_MAX_BL_LEVEL
;
2214 props
.brightness
= AMDGPU_MAX_BL_LEVEL
;
2215 props
.type
= BACKLIGHT_RAW
;
2217 snprintf(bl_name
, sizeof(bl_name
), "amdgpu_bl%d",
2218 dm
->adev
->ddev
->primary
->index
);
2220 dm
->backlight_dev
= backlight_device_register(bl_name
,
2221 dm
->adev
->ddev
->dev
,
2223 &amdgpu_dm_backlight_ops
,
2226 if (IS_ERR(dm
->backlight_dev
))
2227 DRM_ERROR("DM: Backlight registration failed!\n");
2229 DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name
);
2234 static int initialize_plane(struct amdgpu_display_manager
*dm
,
2235 struct amdgpu_mode_info
*mode_info
, int plane_id
,
2236 enum drm_plane_type plane_type
,
2237 const struct dc_plane_cap
*plane_cap
)
2239 struct drm_plane
*plane
;
2240 unsigned long possible_crtcs
;
2243 plane
= kzalloc(sizeof(struct drm_plane
), GFP_KERNEL
);
2245 DRM_ERROR("KMS: Failed to allocate plane\n");
2248 plane
->type
= plane_type
;
2251 * HACK: IGT tests expect that the primary plane for a CRTC
2252 * can only have one possible CRTC. Only expose support for
2253 * any CRTC if they're not going to be used as a primary plane
2254 * for a CRTC - like overlay or underlay planes.
2256 possible_crtcs
= 1 << plane_id
;
2257 if (plane_id
>= dm
->dc
->caps
.max_streams
)
2258 possible_crtcs
= 0xff;
2260 ret
= amdgpu_dm_plane_init(dm
, plane
, possible_crtcs
, plane_cap
);
2263 DRM_ERROR("KMS: Failed to initialize plane\n");
2269 mode_info
->planes
[plane_id
] = plane
;
2275 static void register_backlight_device(struct amdgpu_display_manager
*dm
,
2276 struct dc_link
*link
)
2278 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
2279 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
2281 if ((link
->connector_signal
& (SIGNAL_TYPE_EDP
| SIGNAL_TYPE_LVDS
)) &&
2282 link
->type
!= dc_connection_none
) {
2284 * Event if registration failed, we should continue with
2285 * DM initialization because not having a backlight control
2286 * is better then a black screen.
2288 amdgpu_dm_register_backlight_device(dm
);
2290 if (dm
->backlight_dev
)
2291 dm
->backlight_link
= link
;
2298 * In this architecture, the association
2299 * connector -> encoder -> crtc
2300 * id not really requried. The crtc and connector will hold the
2301 * display_index as an abstraction to use with DAL component
2303 * Returns 0 on success
2305 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device
*adev
)
2307 struct amdgpu_display_manager
*dm
= &adev
->dm
;
2309 struct amdgpu_dm_connector
*aconnector
= NULL
;
2310 struct amdgpu_encoder
*aencoder
= NULL
;
2311 struct amdgpu_mode_info
*mode_info
= &adev
->mode_info
;
2313 int32_t primary_planes
;
2314 enum dc_connection_type new_connection_type
= dc_connection_none
;
2315 const struct dc_plane_cap
*plane
;
2317 link_cnt
= dm
->dc
->caps
.max_links
;
2318 if (amdgpu_dm_mode_config_init(dm
->adev
)) {
2319 DRM_ERROR("DM: Failed to initialize mode config\n");
2323 /* There is one primary plane per CRTC */
2324 primary_planes
= dm
->dc
->caps
.max_streams
;
2325 ASSERT(primary_planes
<= AMDGPU_MAX_PLANES
);
2328 * Initialize primary planes, implicit planes for legacy IOCTLS.
2329 * Order is reversed to match iteration order in atomic check.
2331 for (i
= (primary_planes
- 1); i
>= 0; i
--) {
2332 plane
= &dm
->dc
->caps
.planes
[i
];
2334 if (initialize_plane(dm
, mode_info
, i
,
2335 DRM_PLANE_TYPE_PRIMARY
, plane
)) {
2336 DRM_ERROR("KMS: Failed to initialize primary plane\n");
2342 * Initialize overlay planes, index starting after primary planes.
2343 * These planes have a higher DRM index than the primary planes since
2344 * they should be considered as having a higher z-order.
2345 * Order is reversed to match iteration order in atomic check.
2347 * Only support DCN for now, and only expose one so we don't encourage
2348 * userspace to use up all the pipes.
2350 for (i
= 0; i
< dm
->dc
->caps
.max_planes
; ++i
) {
2351 struct dc_plane_cap
*plane
= &dm
->dc
->caps
.planes
[i
];
2353 if (plane
->type
!= DC_PLANE_TYPE_DCN_UNIVERSAL
)
2356 if (!plane
->blends_with_above
|| !plane
->blends_with_below
)
2359 if (!plane
->pixel_format_support
.argb8888
)
2362 if (initialize_plane(dm
, NULL
, primary_planes
+ i
,
2363 DRM_PLANE_TYPE_OVERLAY
, plane
)) {
2364 DRM_ERROR("KMS: Failed to initialize overlay plane\n");
2368 /* Only create one overlay plane. */
2372 for (i
= 0; i
< dm
->dc
->caps
.max_streams
; i
++)
2373 if (amdgpu_dm_crtc_init(dm
, mode_info
->planes
[i
], i
)) {
2374 DRM_ERROR("KMS: Failed to initialize crtc\n");
2378 dm
->display_indexes_num
= dm
->dc
->caps
.max_streams
;
2380 /* loops over all connectors on the board */
2381 for (i
= 0; i
< link_cnt
; i
++) {
2382 struct dc_link
*link
= NULL
;
2384 if (i
> AMDGPU_DM_MAX_DISPLAY_INDEX
) {
2386 "KMS: Cannot support more than %d display indexes\n",
2387 AMDGPU_DM_MAX_DISPLAY_INDEX
);
2391 aconnector
= kzalloc(sizeof(*aconnector
), GFP_KERNEL
);
2395 aencoder
= kzalloc(sizeof(*aencoder
), GFP_KERNEL
);
2399 if (amdgpu_dm_encoder_init(dm
->ddev
, aencoder
, i
)) {
2400 DRM_ERROR("KMS: Failed to initialize encoder\n");
2404 if (amdgpu_dm_connector_init(dm
, aconnector
, i
, aencoder
)) {
2405 DRM_ERROR("KMS: Failed to initialize connector\n");
2409 link
= dc_get_link_at_index(dm
->dc
, i
);
2411 if (!dc_link_detect_sink(link
, &new_connection_type
))
2412 DRM_ERROR("KMS: Failed to detect connector\n");
2414 if (aconnector
->base
.force
&& new_connection_type
== dc_connection_none
) {
2415 emulated_link_detect(link
);
2416 amdgpu_dm_update_connector_after_detect(aconnector
);
2418 } else if (dc_link_detect(link
, DETECT_REASON_BOOT
)) {
2419 amdgpu_dm_update_connector_after_detect(aconnector
);
2420 register_backlight_device(dm
, link
);
2426 /* Software is initialized. Now we can register interrupt handlers. */
2427 switch (adev
->asic_type
) {
2437 case CHIP_POLARIS11
:
2438 case CHIP_POLARIS10
:
2439 case CHIP_POLARIS12
:
2444 if (dce110_register_irq_handlers(dm
->adev
)) {
2445 DRM_ERROR("DM: Failed to initialize IRQ\n");
2449 #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
2451 #if defined(CONFIG_DRM_AMD_DC_DCN2_0)
2456 #if defined(CONFIG_DRM_AMD_DC_DCN2_1)
2459 if (dcn10_register_irq_handlers(dm
->adev
)) {
2460 DRM_ERROR("DM: Failed to initialize IRQ\n");
2466 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev
->asic_type
);
2470 if (adev
->asic_type
!= CHIP_CARRIZO
&& adev
->asic_type
!= CHIP_STONEY
)
2471 dm
->dc
->debug
.disable_stutter
= amdgpu_pp_feature_mask
& PP_STUTTER_MODE
? false : true;
2481 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager
*dm
)
2483 drm_mode_config_cleanup(dm
->ddev
);
2484 drm_atomic_private_obj_fini(&dm
->atomic_obj
);
2488 /******************************************************************************
2489 * amdgpu_display_funcs functions
2490 *****************************************************************************/
2493 * dm_bandwidth_update - program display watermarks
2495 * @adev: amdgpu_device pointer
2497 * Calculate and program the display watermarks and line buffer allocation.
2499 static void dm_bandwidth_update(struct amdgpu_device
*adev
)
2501 /* TODO: implement later */
2504 static const struct amdgpu_display_funcs dm_display_funcs
= {
2505 .bandwidth_update
= dm_bandwidth_update
, /* called unconditionally */
2506 .vblank_get_counter
= dm_vblank_get_counter
,/* called unconditionally */
2507 .backlight_set_level
= NULL
, /* never called for DC */
2508 .backlight_get_level
= NULL
, /* never called for DC */
2509 .hpd_sense
= NULL
,/* called unconditionally */
2510 .hpd_set_polarity
= NULL
, /* called unconditionally */
2511 .hpd_get_gpio_reg
= NULL
, /* VBIOS parsing. DAL does it. */
2512 .page_flip_get_scanoutpos
=
2513 dm_crtc_get_scanoutpos
,/* called unconditionally */
2514 .add_encoder
= NULL
, /* VBIOS parsing. DAL does it. */
2515 .add_connector
= NULL
, /* VBIOS parsing. DAL does it. */
2518 #if defined(CONFIG_DEBUG_KERNEL_DC)
2520 static ssize_t
s3_debug_store(struct device
*device
,
2521 struct device_attribute
*attr
,
2527 struct drm_device
*drm_dev
= dev_get_drvdata(device
);
2528 struct amdgpu_device
*adev
= drm_dev
->dev_private
;
2530 ret
= kstrtoint(buf
, 0, &s3_state
);
2535 drm_kms_helper_hotplug_event(adev
->ddev
);
2540 return ret
== 0 ? count
: 0;
2543 DEVICE_ATTR_WO(s3_debug
);
2547 static int dm_early_init(void *handle
)
2549 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
2551 switch (adev
->asic_type
) {
2554 adev
->mode_info
.num_crtc
= 6;
2555 adev
->mode_info
.num_hpd
= 6;
2556 adev
->mode_info
.num_dig
= 6;
2559 adev
->mode_info
.num_crtc
= 4;
2560 adev
->mode_info
.num_hpd
= 6;
2561 adev
->mode_info
.num_dig
= 7;
2565 adev
->mode_info
.num_crtc
= 2;
2566 adev
->mode_info
.num_hpd
= 6;
2567 adev
->mode_info
.num_dig
= 6;
2571 adev
->mode_info
.num_crtc
= 6;
2572 adev
->mode_info
.num_hpd
= 6;
2573 adev
->mode_info
.num_dig
= 7;
2576 adev
->mode_info
.num_crtc
= 3;
2577 adev
->mode_info
.num_hpd
= 6;
2578 adev
->mode_info
.num_dig
= 9;
2581 adev
->mode_info
.num_crtc
= 2;
2582 adev
->mode_info
.num_hpd
= 6;
2583 adev
->mode_info
.num_dig
= 9;
2585 case CHIP_POLARIS11
:
2586 case CHIP_POLARIS12
:
2587 adev
->mode_info
.num_crtc
= 5;
2588 adev
->mode_info
.num_hpd
= 5;
2589 adev
->mode_info
.num_dig
= 5;
2591 case CHIP_POLARIS10
:
2593 adev
->mode_info
.num_crtc
= 6;
2594 adev
->mode_info
.num_hpd
= 6;
2595 adev
->mode_info
.num_dig
= 6;
2600 adev
->mode_info
.num_crtc
= 6;
2601 adev
->mode_info
.num_hpd
= 6;
2602 adev
->mode_info
.num_dig
= 6;
2604 #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
2606 adev
->mode_info
.num_crtc
= 4;
2607 adev
->mode_info
.num_hpd
= 4;
2608 adev
->mode_info
.num_dig
= 4;
2611 #if defined(CONFIG_DRM_AMD_DC_DCN2_0)
2614 adev
->mode_info
.num_crtc
= 6;
2615 adev
->mode_info
.num_hpd
= 6;
2616 adev
->mode_info
.num_dig
= 6;
2619 adev
->mode_info
.num_crtc
= 5;
2620 adev
->mode_info
.num_hpd
= 5;
2621 adev
->mode_info
.num_dig
= 5;
2624 #if defined(CONFIG_DRM_AMD_DC_DCN2_1)
2626 adev
->mode_info
.num_crtc
= 4;
2627 adev
->mode_info
.num_hpd
= 4;
2628 adev
->mode_info
.num_dig
= 4;
2632 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev
->asic_type
);
2636 amdgpu_dm_set_irq_funcs(adev
);
2638 if (adev
->mode_info
.funcs
== NULL
)
2639 adev
->mode_info
.funcs
= &dm_display_funcs
;
2642 * Note: Do NOT change adev->audio_endpt_rreg and
2643 * adev->audio_endpt_wreg because they are initialised in
2644 * amdgpu_device_init()
2646 #if defined(CONFIG_DEBUG_KERNEL_DC)
2649 &dev_attr_s3_debug
);
2655 static bool modeset_required(struct drm_crtc_state
*crtc_state
,
2656 struct dc_stream_state
*new_stream
,
2657 struct dc_stream_state
*old_stream
)
2659 if (!drm_atomic_crtc_needs_modeset(crtc_state
))
2662 if (!crtc_state
->enable
)
2665 return crtc_state
->active
;
2668 static bool modereset_required(struct drm_crtc_state
*crtc_state
)
2670 if (!drm_atomic_crtc_needs_modeset(crtc_state
))
2673 return !crtc_state
->enable
|| !crtc_state
->active
;
2676 static void amdgpu_dm_encoder_destroy(struct drm_encoder
*encoder
)
2678 drm_encoder_cleanup(encoder
);
2682 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs
= {
2683 .destroy
= amdgpu_dm_encoder_destroy
,
2687 static int fill_dc_scaling_info(const struct drm_plane_state
*state
,
2688 struct dc_scaling_info
*scaling_info
)
2690 int scale_w
, scale_h
;
2692 memset(scaling_info
, 0, sizeof(*scaling_info
));
2694 /* Source is fixed 16.16 but we ignore mantissa for now... */
2695 scaling_info
->src_rect
.x
= state
->src_x
>> 16;
2696 scaling_info
->src_rect
.y
= state
->src_y
>> 16;
2698 scaling_info
->src_rect
.width
= state
->src_w
>> 16;
2699 if (scaling_info
->src_rect
.width
== 0)
2702 scaling_info
->src_rect
.height
= state
->src_h
>> 16;
2703 if (scaling_info
->src_rect
.height
== 0)
2706 scaling_info
->dst_rect
.x
= state
->crtc_x
;
2707 scaling_info
->dst_rect
.y
= state
->crtc_y
;
2709 if (state
->crtc_w
== 0)
2712 scaling_info
->dst_rect
.width
= state
->crtc_w
;
2714 if (state
->crtc_h
== 0)
2717 scaling_info
->dst_rect
.height
= state
->crtc_h
;
2719 /* DRM doesn't specify clipping on destination output. */
2720 scaling_info
->clip_rect
= scaling_info
->dst_rect
;
2722 /* TODO: Validate scaling per-format with DC plane caps */
2723 scale_w
= scaling_info
->dst_rect
.width
* 1000 /
2724 scaling_info
->src_rect
.width
;
2726 if (scale_w
< 250 || scale_w
> 16000)
2729 scale_h
= scaling_info
->dst_rect
.height
* 1000 /
2730 scaling_info
->src_rect
.height
;
2732 if (scale_h
< 250 || scale_h
> 16000)
2736 * The "scaling_quality" can be ignored for now, quality = 0 has DC
2737 * assume reasonable defaults based on the format.
2743 static int get_fb_info(const struct amdgpu_framebuffer
*amdgpu_fb
,
2744 uint64_t *tiling_flags
)
2746 struct amdgpu_bo
*rbo
= gem_to_amdgpu_bo(amdgpu_fb
->base
.obj
[0]);
2747 int r
= amdgpu_bo_reserve(rbo
, false);
2750 /* Don't show error message when returning -ERESTARTSYS */
2751 if (r
!= -ERESTARTSYS
)
2752 DRM_ERROR("Unable to reserve buffer: %d\n", r
);
2757 amdgpu_bo_get_tiling_flags(rbo
, tiling_flags
);
2759 amdgpu_bo_unreserve(rbo
);
2764 static inline uint64_t get_dcc_address(uint64_t address
, uint64_t tiling_flags
)
2766 uint32_t offset
= AMDGPU_TILING_GET(tiling_flags
, DCC_OFFSET_256B
);
2768 return offset
? (address
+ offset
* 256) : 0;
2772 fill_plane_dcc_attributes(struct amdgpu_device
*adev
,
2773 const struct amdgpu_framebuffer
*afb
,
2774 const enum surface_pixel_format format
,
2775 const enum dc_rotation_angle rotation
,
2776 const struct plane_size
*plane_size
,
2777 const union dc_tiling_info
*tiling_info
,
2778 const uint64_t info
,
2779 struct dc_plane_dcc_param
*dcc
,
2780 struct dc_plane_address
*address
)
2782 struct dc
*dc
= adev
->dm
.dc
;
2783 struct dc_dcc_surface_param input
;
2784 struct dc_surface_dcc_cap output
;
2785 uint32_t offset
= AMDGPU_TILING_GET(info
, DCC_OFFSET_256B
);
2786 uint32_t i64b
= AMDGPU_TILING_GET(info
, DCC_INDEPENDENT_64B
) != 0;
2787 uint64_t dcc_address
;
2789 memset(&input
, 0, sizeof(input
));
2790 memset(&output
, 0, sizeof(output
));
2795 if (format
>= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN
)
2798 if (!dc
->cap_funcs
.get_dcc_compression_cap
)
2801 input
.format
= format
;
2802 input
.surface_size
.width
= plane_size
->surface_size
.width
;
2803 input
.surface_size
.height
= plane_size
->surface_size
.height
;
2804 input
.swizzle_mode
= tiling_info
->gfx9
.swizzle
;
2806 if (rotation
== ROTATION_ANGLE_0
|| rotation
== ROTATION_ANGLE_180
)
2807 input
.scan
= SCAN_DIRECTION_HORIZONTAL
;
2808 else if (rotation
== ROTATION_ANGLE_90
|| rotation
== ROTATION_ANGLE_270
)
2809 input
.scan
= SCAN_DIRECTION_VERTICAL
;
2811 if (!dc
->cap_funcs
.get_dcc_compression_cap(dc
, &input
, &output
))
2814 if (!output
.capable
)
2817 if (i64b
== 0 && output
.grph
.rgb
.independent_64b_blks
!= 0)
2822 AMDGPU_TILING_GET(info
, DCC_PITCH_MAX
) + 1;
2823 dcc
->independent_64b_blks
= i64b
;
2825 dcc_address
= get_dcc_address(afb
->address
, info
);
2826 address
->grph
.meta_addr
.low_part
= lower_32_bits(dcc_address
);
2827 address
->grph
.meta_addr
.high_part
= upper_32_bits(dcc_address
);
2833 fill_plane_buffer_attributes(struct amdgpu_device
*adev
,
2834 const struct amdgpu_framebuffer
*afb
,
2835 const enum surface_pixel_format format
,
2836 const enum dc_rotation_angle rotation
,
2837 const uint64_t tiling_flags
,
2838 union dc_tiling_info
*tiling_info
,
2839 struct plane_size
*plane_size
,
2840 struct dc_plane_dcc_param
*dcc
,
2841 struct dc_plane_address
*address
)
2843 const struct drm_framebuffer
*fb
= &afb
->base
;
2846 memset(tiling_info
, 0, sizeof(*tiling_info
));
2847 memset(plane_size
, 0, sizeof(*plane_size
));
2848 memset(dcc
, 0, sizeof(*dcc
));
2849 memset(address
, 0, sizeof(*address
));
2851 if (format
< SURFACE_PIXEL_FORMAT_VIDEO_BEGIN
) {
2852 plane_size
->surface_size
.x
= 0;
2853 plane_size
->surface_size
.y
= 0;
2854 plane_size
->surface_size
.width
= fb
->width
;
2855 plane_size
->surface_size
.height
= fb
->height
;
2856 plane_size
->surface_pitch
=
2857 fb
->pitches
[0] / fb
->format
->cpp
[0];
2859 address
->type
= PLN_ADDR_TYPE_GRAPHICS
;
2860 address
->grph
.addr
.low_part
= lower_32_bits(afb
->address
);
2861 address
->grph
.addr
.high_part
= upper_32_bits(afb
->address
);
2862 } else if (format
< SURFACE_PIXEL_FORMAT_INVALID
) {
2863 uint64_t chroma_addr
= afb
->address
+ fb
->offsets
[1];
2865 plane_size
->surface_size
.x
= 0;
2866 plane_size
->surface_size
.y
= 0;
2867 plane_size
->surface_size
.width
= fb
->width
;
2868 plane_size
->surface_size
.height
= fb
->height
;
2869 plane_size
->surface_pitch
=
2870 fb
->pitches
[0] / fb
->format
->cpp
[0];
2872 plane_size
->chroma_size
.x
= 0;
2873 plane_size
->chroma_size
.y
= 0;
2874 /* TODO: set these based on surface format */
2875 plane_size
->chroma_size
.width
= fb
->width
/ 2;
2876 plane_size
->chroma_size
.height
= fb
->height
/ 2;
2878 plane_size
->chroma_pitch
=
2879 fb
->pitches
[1] / fb
->format
->cpp
[1];
2881 address
->type
= PLN_ADDR_TYPE_VIDEO_PROGRESSIVE
;
2882 address
->video_progressive
.luma_addr
.low_part
=
2883 lower_32_bits(afb
->address
);
2884 address
->video_progressive
.luma_addr
.high_part
=
2885 upper_32_bits(afb
->address
);
2886 address
->video_progressive
.chroma_addr
.low_part
=
2887 lower_32_bits(chroma_addr
);
2888 address
->video_progressive
.chroma_addr
.high_part
=
2889 upper_32_bits(chroma_addr
);
2892 /* Fill GFX8 params */
2893 if (AMDGPU_TILING_GET(tiling_flags
, ARRAY_MODE
) == DC_ARRAY_2D_TILED_THIN1
) {
2894 unsigned int bankw
, bankh
, mtaspect
, tile_split
, num_banks
;
2896 bankw
= AMDGPU_TILING_GET(tiling_flags
, BANK_WIDTH
);
2897 bankh
= AMDGPU_TILING_GET(tiling_flags
, BANK_HEIGHT
);
2898 mtaspect
= AMDGPU_TILING_GET(tiling_flags
, MACRO_TILE_ASPECT
);
2899 tile_split
= AMDGPU_TILING_GET(tiling_flags
, TILE_SPLIT
);
2900 num_banks
= AMDGPU_TILING_GET(tiling_flags
, NUM_BANKS
);
2902 /* XXX fix me for VI */
2903 tiling_info
->gfx8
.num_banks
= num_banks
;
2904 tiling_info
->gfx8
.array_mode
=
2905 DC_ARRAY_2D_TILED_THIN1
;
2906 tiling_info
->gfx8
.tile_split
= tile_split
;
2907 tiling_info
->gfx8
.bank_width
= bankw
;
2908 tiling_info
->gfx8
.bank_height
= bankh
;
2909 tiling_info
->gfx8
.tile_aspect
= mtaspect
;
2910 tiling_info
->gfx8
.tile_mode
=
2911 DC_ADDR_SURF_MICRO_TILING_DISPLAY
;
2912 } else if (AMDGPU_TILING_GET(tiling_flags
, ARRAY_MODE
)
2913 == DC_ARRAY_1D_TILED_THIN1
) {
2914 tiling_info
->gfx8
.array_mode
= DC_ARRAY_1D_TILED_THIN1
;
2917 tiling_info
->gfx8
.pipe_config
=
2918 AMDGPU_TILING_GET(tiling_flags
, PIPE_CONFIG
);
2920 if (adev
->asic_type
== CHIP_VEGA10
||
2921 adev
->asic_type
== CHIP_VEGA12
||
2922 adev
->asic_type
== CHIP_VEGA20
||
2923 #if defined(CONFIG_DRM_AMD_DC_DCN2_0)
2924 adev
->asic_type
== CHIP_NAVI10
||
2925 adev
->asic_type
== CHIP_NAVI14
||
2926 adev
->asic_type
== CHIP_NAVI12
||
2928 #if defined(CONFIG_DRM_AMD_DC_DCN2_1)
2929 adev
->asic_type
== CHIP_RENOIR
||
2931 adev
->asic_type
== CHIP_RAVEN
) {
2932 /* Fill GFX9 params */
2933 tiling_info
->gfx9
.num_pipes
=
2934 adev
->gfx
.config
.gb_addr_config_fields
.num_pipes
;
2935 tiling_info
->gfx9
.num_banks
=
2936 adev
->gfx
.config
.gb_addr_config_fields
.num_banks
;
2937 tiling_info
->gfx9
.pipe_interleave
=
2938 adev
->gfx
.config
.gb_addr_config_fields
.pipe_interleave_size
;
2939 tiling_info
->gfx9
.num_shader_engines
=
2940 adev
->gfx
.config
.gb_addr_config_fields
.num_se
;
2941 tiling_info
->gfx9
.max_compressed_frags
=
2942 adev
->gfx
.config
.gb_addr_config_fields
.max_compress_frags
;
2943 tiling_info
->gfx9
.num_rb_per_se
=
2944 adev
->gfx
.config
.gb_addr_config_fields
.num_rb_per_se
;
2945 tiling_info
->gfx9
.swizzle
=
2946 AMDGPU_TILING_GET(tiling_flags
, SWIZZLE_MODE
);
2947 tiling_info
->gfx9
.shaderEnable
= 1;
2949 ret
= fill_plane_dcc_attributes(adev
, afb
, format
, rotation
,
2950 plane_size
, tiling_info
,
2951 tiling_flags
, dcc
, address
);
2960 fill_blending_from_plane_state(const struct drm_plane_state
*plane_state
,
2961 bool *per_pixel_alpha
, bool *global_alpha
,
2962 int *global_alpha_value
)
2964 *per_pixel_alpha
= false;
2965 *global_alpha
= false;
2966 *global_alpha_value
= 0xff;
2968 if (plane_state
->plane
->type
!= DRM_PLANE_TYPE_OVERLAY
)
2971 if (plane_state
->pixel_blend_mode
== DRM_MODE_BLEND_PREMULTI
) {
2972 static const uint32_t alpha_formats
[] = {
2973 DRM_FORMAT_ARGB8888
,
2974 DRM_FORMAT_RGBA8888
,
2975 DRM_FORMAT_ABGR8888
,
2977 uint32_t format
= plane_state
->fb
->format
->format
;
2980 for (i
= 0; i
< ARRAY_SIZE(alpha_formats
); ++i
) {
2981 if (format
== alpha_formats
[i
]) {
2982 *per_pixel_alpha
= true;
2988 if (plane_state
->alpha
< 0xffff) {
2989 *global_alpha
= true;
2990 *global_alpha_value
= plane_state
->alpha
>> 8;
2995 fill_plane_color_attributes(const struct drm_plane_state
*plane_state
,
2996 const enum surface_pixel_format format
,
2997 enum dc_color_space
*color_space
)
3001 *color_space
= COLOR_SPACE_SRGB
;
3003 /* DRM color properties only affect non-RGB formats. */
3004 if (format
< SURFACE_PIXEL_FORMAT_VIDEO_BEGIN
)
3007 full_range
= (plane_state
->color_range
== DRM_COLOR_YCBCR_FULL_RANGE
);
3009 switch (plane_state
->color_encoding
) {
3010 case DRM_COLOR_YCBCR_BT601
:
3012 *color_space
= COLOR_SPACE_YCBCR601
;
3014 *color_space
= COLOR_SPACE_YCBCR601_LIMITED
;
3017 case DRM_COLOR_YCBCR_BT709
:
3019 *color_space
= COLOR_SPACE_YCBCR709
;
3021 *color_space
= COLOR_SPACE_YCBCR709_LIMITED
;
3024 case DRM_COLOR_YCBCR_BT2020
:
3026 *color_space
= COLOR_SPACE_2020_YCBCR
;
3039 fill_dc_plane_info_and_addr(struct amdgpu_device
*adev
,
3040 const struct drm_plane_state
*plane_state
,
3041 const uint64_t tiling_flags
,
3042 struct dc_plane_info
*plane_info
,
3043 struct dc_plane_address
*address
)
3045 const struct drm_framebuffer
*fb
= plane_state
->fb
;
3046 const struct amdgpu_framebuffer
*afb
=
3047 to_amdgpu_framebuffer(plane_state
->fb
);
3048 struct drm_format_name_buf format_name
;
3051 memset(plane_info
, 0, sizeof(*plane_info
));
3053 switch (fb
->format
->format
) {
3055 plane_info
->format
=
3056 SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS
;
3058 case DRM_FORMAT_RGB565
:
3059 plane_info
->format
= SURFACE_PIXEL_FORMAT_GRPH_RGB565
;
3061 case DRM_FORMAT_XRGB8888
:
3062 case DRM_FORMAT_ARGB8888
:
3063 plane_info
->format
= SURFACE_PIXEL_FORMAT_GRPH_ARGB8888
;
3065 case DRM_FORMAT_XRGB2101010
:
3066 case DRM_FORMAT_ARGB2101010
:
3067 plane_info
->format
= SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010
;
3069 case DRM_FORMAT_XBGR2101010
:
3070 case DRM_FORMAT_ABGR2101010
:
3071 plane_info
->format
= SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010
;
3073 case DRM_FORMAT_XBGR8888
:
3074 case DRM_FORMAT_ABGR8888
:
3075 plane_info
->format
= SURFACE_PIXEL_FORMAT_GRPH_ABGR8888
;
3077 case DRM_FORMAT_NV21
:
3078 plane_info
->format
= SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr
;
3080 case DRM_FORMAT_NV12
:
3081 plane_info
->format
= SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb
;
3085 "Unsupported screen format %s\n",
3086 drm_get_format_name(fb
->format
->format
, &format_name
));
3090 switch (plane_state
->rotation
& DRM_MODE_ROTATE_MASK
) {
3091 case DRM_MODE_ROTATE_0
:
3092 plane_info
->rotation
= ROTATION_ANGLE_0
;
3094 case DRM_MODE_ROTATE_90
:
3095 plane_info
->rotation
= ROTATION_ANGLE_90
;
3097 case DRM_MODE_ROTATE_180
:
3098 plane_info
->rotation
= ROTATION_ANGLE_180
;
3100 case DRM_MODE_ROTATE_270
:
3101 plane_info
->rotation
= ROTATION_ANGLE_270
;
3104 plane_info
->rotation
= ROTATION_ANGLE_0
;
3108 plane_info
->visible
= true;
3109 plane_info
->stereo_format
= PLANE_STEREO_FORMAT_NONE
;
3111 plane_info
->layer_index
= 0;
3113 ret
= fill_plane_color_attributes(plane_state
, plane_info
->format
,
3114 &plane_info
->color_space
);
3118 ret
= fill_plane_buffer_attributes(adev
, afb
, plane_info
->format
,
3119 plane_info
->rotation
, tiling_flags
,
3120 &plane_info
->tiling_info
,
3121 &plane_info
->plane_size
,
3122 &plane_info
->dcc
, address
);
3126 fill_blending_from_plane_state(
3127 plane_state
, &plane_info
->per_pixel_alpha
,
3128 &plane_info
->global_alpha
, &plane_info
->global_alpha_value
);
3133 static int fill_dc_plane_attributes(struct amdgpu_device
*adev
,
3134 struct dc_plane_state
*dc_plane_state
,
3135 struct drm_plane_state
*plane_state
,
3136 struct drm_crtc_state
*crtc_state
)
3138 struct dm_crtc_state
*dm_crtc_state
= to_dm_crtc_state(crtc_state
);
3139 const struct amdgpu_framebuffer
*amdgpu_fb
=
3140 to_amdgpu_framebuffer(plane_state
->fb
);
3141 struct dc_scaling_info scaling_info
;
3142 struct dc_plane_info plane_info
;
3143 uint64_t tiling_flags
;
3146 ret
= fill_dc_scaling_info(plane_state
, &scaling_info
);
3150 dc_plane_state
->src_rect
= scaling_info
.src_rect
;
3151 dc_plane_state
->dst_rect
= scaling_info
.dst_rect
;
3152 dc_plane_state
->clip_rect
= scaling_info
.clip_rect
;
3153 dc_plane_state
->scaling_quality
= scaling_info
.scaling_quality
;
3155 ret
= get_fb_info(amdgpu_fb
, &tiling_flags
);
3159 ret
= fill_dc_plane_info_and_addr(adev
, plane_state
, tiling_flags
,
3161 &dc_plane_state
->address
);
3165 dc_plane_state
->format
= plane_info
.format
;
3166 dc_plane_state
->color_space
= plane_info
.color_space
;
3167 dc_plane_state
->format
= plane_info
.format
;
3168 dc_plane_state
->plane_size
= plane_info
.plane_size
;
3169 dc_plane_state
->rotation
= plane_info
.rotation
;
3170 dc_plane_state
->horizontal_mirror
= plane_info
.horizontal_mirror
;
3171 dc_plane_state
->stereo_format
= plane_info
.stereo_format
;
3172 dc_plane_state
->tiling_info
= plane_info
.tiling_info
;
3173 dc_plane_state
->visible
= plane_info
.visible
;
3174 dc_plane_state
->per_pixel_alpha
= plane_info
.per_pixel_alpha
;
3175 dc_plane_state
->global_alpha
= plane_info
.global_alpha
;
3176 dc_plane_state
->global_alpha_value
= plane_info
.global_alpha_value
;
3177 dc_plane_state
->dcc
= plane_info
.dcc
;
3178 dc_plane_state
->layer_index
= plane_info
.layer_index
; // Always returns 0
3181 * Always set input transfer function, since plane state is refreshed
3184 ret
= amdgpu_dm_update_plane_color_mgmt(dm_crtc_state
, dc_plane_state
);
3191 static void update_stream_scaling_settings(const struct drm_display_mode
*mode
,
3192 const struct dm_connector_state
*dm_state
,
3193 struct dc_stream_state
*stream
)
3195 enum amdgpu_rmx_type rmx_type
;
3197 struct rect src
= { 0 }; /* viewport in composition space*/
3198 struct rect dst
= { 0 }; /* stream addressable area */
3200 /* no mode. nothing to be done */
3204 /* Full screen scaling by default */
3205 src
.width
= mode
->hdisplay
;
3206 src
.height
= mode
->vdisplay
;
3207 dst
.width
= stream
->timing
.h_addressable
;
3208 dst
.height
= stream
->timing
.v_addressable
;
3211 rmx_type
= dm_state
->scaling
;
3212 if (rmx_type
== RMX_ASPECT
|| rmx_type
== RMX_OFF
) {
3213 if (src
.width
* dst
.height
<
3214 src
.height
* dst
.width
) {
3215 /* height needs less upscaling/more downscaling */
3216 dst
.width
= src
.width
*
3217 dst
.height
/ src
.height
;
3219 /* width needs less upscaling/more downscaling */
3220 dst
.height
= src
.height
*
3221 dst
.width
/ src
.width
;
3223 } else if (rmx_type
== RMX_CENTER
) {
3227 dst
.x
= (stream
->timing
.h_addressable
- dst
.width
) / 2;
3228 dst
.y
= (stream
->timing
.v_addressable
- dst
.height
) / 2;
3230 if (dm_state
->underscan_enable
) {
3231 dst
.x
+= dm_state
->underscan_hborder
/ 2;
3232 dst
.y
+= dm_state
->underscan_vborder
/ 2;
3233 dst
.width
-= dm_state
->underscan_hborder
;
3234 dst
.height
-= dm_state
->underscan_vborder
;
3241 DRM_DEBUG_DRIVER("Destination Rectangle x:%d y:%d width:%d height:%d\n",
3242 dst
.x
, dst
.y
, dst
.width
, dst
.height
);
3246 static enum dc_color_depth
3247 convert_color_depth_from_display_info(const struct drm_connector
*connector
,
3248 const struct drm_connector_state
*state
)
3250 uint8_t bpc
= (uint8_t)connector
->display_info
.bpc
;
3252 /* Assume 8 bpc by default if no bpc is specified. */
3253 bpc
= bpc
? bpc
: 8;
3256 state
= connector
->state
;
3260 * Cap display bpc based on the user requested value.
3262 * The value for state->max_bpc may not correctly updated
3263 * depending on when the connector gets added to the state
3264 * or if this was called outside of atomic check, so it
3265 * can't be used directly.
3267 bpc
= min(bpc
, state
->max_requested_bpc
);
3269 /* Round down to the nearest even number. */
3270 bpc
= bpc
- (bpc
& 1);
3276 * Temporary Work around, DRM doesn't parse color depth for
3277 * EDID revision before 1.4
3278 * TODO: Fix edid parsing
3280 return COLOR_DEPTH_888
;
3282 return COLOR_DEPTH_666
;
3284 return COLOR_DEPTH_888
;
3286 return COLOR_DEPTH_101010
;
3288 return COLOR_DEPTH_121212
;
3290 return COLOR_DEPTH_141414
;
3292 return COLOR_DEPTH_161616
;
3294 return COLOR_DEPTH_UNDEFINED
;
3298 static enum dc_aspect_ratio
3299 get_aspect_ratio(const struct drm_display_mode
*mode_in
)
3301 /* 1-1 mapping, since both enums follow the HDMI spec. */
3302 return (enum dc_aspect_ratio
) mode_in
->picture_aspect_ratio
;
3305 static enum dc_color_space
3306 get_output_color_space(const struct dc_crtc_timing
*dc_crtc_timing
)
3308 enum dc_color_space color_space
= COLOR_SPACE_SRGB
;
3310 switch (dc_crtc_timing
->pixel_encoding
) {
3311 case PIXEL_ENCODING_YCBCR422
:
3312 case PIXEL_ENCODING_YCBCR444
:
3313 case PIXEL_ENCODING_YCBCR420
:
3316 * 27030khz is the separation point between HDTV and SDTV
3317 * according to HDMI spec, we use YCbCr709 and YCbCr601
3320 if (dc_crtc_timing
->pix_clk_100hz
> 270300) {
3321 if (dc_crtc_timing
->flags
.Y_ONLY
)
3323 COLOR_SPACE_YCBCR709_LIMITED
;
3325 color_space
= COLOR_SPACE_YCBCR709
;
3327 if (dc_crtc_timing
->flags
.Y_ONLY
)
3329 COLOR_SPACE_YCBCR601_LIMITED
;
3331 color_space
= COLOR_SPACE_YCBCR601
;
3336 case PIXEL_ENCODING_RGB
:
3337 color_space
= COLOR_SPACE_SRGB
;
3348 static void reduce_mode_colour_depth(struct dc_crtc_timing
*timing_out
)
3350 if (timing_out
->display_color_depth
<= COLOR_DEPTH_888
)
3353 timing_out
->display_color_depth
--;
3356 static void adjust_colour_depth_from_display_info(struct dc_crtc_timing
*timing_out
,
3357 const struct drm_display_info
*info
)
3360 if (timing_out
->display_color_depth
<= COLOR_DEPTH_888
)
3363 normalized_clk
= timing_out
->pix_clk_100hz
/ 10;
3364 /* YCbCr 4:2:0 requires additional adjustment of 1/2 */
3365 if (timing_out
->pixel_encoding
== PIXEL_ENCODING_YCBCR420
)
3366 normalized_clk
/= 2;
3367 /* Adjusting pix clock following on HDMI spec based on colour depth */
3368 switch (timing_out
->display_color_depth
) {
3369 case COLOR_DEPTH_101010
:
3370 normalized_clk
= (normalized_clk
* 30) / 24;
3372 case COLOR_DEPTH_121212
:
3373 normalized_clk
= (normalized_clk
* 36) / 24;
3375 case COLOR_DEPTH_161616
:
3376 normalized_clk
= (normalized_clk
* 48) / 24;
3381 if (normalized_clk
<= info
->max_tmds_clock
)
3383 reduce_mode_colour_depth(timing_out
);
3385 } while (timing_out
->display_color_depth
> COLOR_DEPTH_888
);
3389 static void fill_stream_properties_from_drm_display_mode(
3390 struct dc_stream_state
*stream
,
3391 const struct drm_display_mode
*mode_in
,
3392 const struct drm_connector
*connector
,
3393 const struct drm_connector_state
*connector_state
,
3394 const struct dc_stream_state
*old_stream
)
3396 struct dc_crtc_timing
*timing_out
= &stream
->timing
;
3397 const struct drm_display_info
*info
= &connector
->display_info
;
3398 struct amdgpu_dm_connector
*aconnector
= to_amdgpu_dm_connector(connector
);
3399 struct hdmi_vendor_infoframe hv_frame
;
3400 struct hdmi_avi_infoframe avi_frame
;
3402 timing_out
->h_border_left
= 0;
3403 timing_out
->h_border_right
= 0;
3404 timing_out
->v_border_top
= 0;
3405 timing_out
->v_border_bottom
= 0;
3406 /* TODO: un-hardcode */
3407 if (drm_mode_is_420_only(info
, mode_in
)
3408 && stream
->signal
== SIGNAL_TYPE_HDMI_TYPE_A
)
3409 timing_out
->pixel_encoding
= PIXEL_ENCODING_YCBCR420
;
3410 else if (drm_mode_is_420_also(info
, mode_in
)
3411 && aconnector
->force_yuv420_output
)
3412 timing_out
->pixel_encoding
= PIXEL_ENCODING_YCBCR420
;
3413 else if ((connector
->display_info
.color_formats
& DRM_COLOR_FORMAT_YCRCB444
)
3414 && stream
->signal
== SIGNAL_TYPE_HDMI_TYPE_A
)
3415 timing_out
->pixel_encoding
= PIXEL_ENCODING_YCBCR444
;
3417 timing_out
->pixel_encoding
= PIXEL_ENCODING_RGB
;
3419 timing_out
->timing_3d_format
= TIMING_3D_FORMAT_NONE
;
3420 timing_out
->display_color_depth
= convert_color_depth_from_display_info(
3421 connector
, connector_state
);
3422 timing_out
->scan_type
= SCANNING_TYPE_NODATA
;
3423 timing_out
->hdmi_vic
= 0;
3426 timing_out
->vic
= old_stream
->timing
.vic
;
3427 timing_out
->flags
.HSYNC_POSITIVE_POLARITY
= old_stream
->timing
.flags
.HSYNC_POSITIVE_POLARITY
;
3428 timing_out
->flags
.VSYNC_POSITIVE_POLARITY
= old_stream
->timing
.flags
.VSYNC_POSITIVE_POLARITY
;
3430 timing_out
->vic
= drm_match_cea_mode(mode_in
);
3431 if (mode_in
->flags
& DRM_MODE_FLAG_PHSYNC
)
3432 timing_out
->flags
.HSYNC_POSITIVE_POLARITY
= 1;
3433 if (mode_in
->flags
& DRM_MODE_FLAG_PVSYNC
)
3434 timing_out
->flags
.VSYNC_POSITIVE_POLARITY
= 1;
3437 if (stream
->signal
== SIGNAL_TYPE_HDMI_TYPE_A
) {
3438 drm_hdmi_avi_infoframe_from_display_mode(&avi_frame
, (struct drm_connector
*)connector
, mode_in
);
3439 timing_out
->vic
= avi_frame
.video_code
;
3440 drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame
, (struct drm_connector
*)connector
, mode_in
);
3441 timing_out
->hdmi_vic
= hv_frame
.vic
;
3444 timing_out
->h_addressable
= mode_in
->crtc_hdisplay
;
3445 timing_out
->h_total
= mode_in
->crtc_htotal
;
3446 timing_out
->h_sync_width
=
3447 mode_in
->crtc_hsync_end
- mode_in
->crtc_hsync_start
;
3448 timing_out
->h_front_porch
=
3449 mode_in
->crtc_hsync_start
- mode_in
->crtc_hdisplay
;
3450 timing_out
->v_total
= mode_in
->crtc_vtotal
;
3451 timing_out
->v_addressable
= mode_in
->crtc_vdisplay
;
3452 timing_out
->v_front_porch
=
3453 mode_in
->crtc_vsync_start
- mode_in
->crtc_vdisplay
;
3454 timing_out
->v_sync_width
=
3455 mode_in
->crtc_vsync_end
- mode_in
->crtc_vsync_start
;
3456 timing_out
->pix_clk_100hz
= mode_in
->crtc_clock
* 10;
3457 timing_out
->aspect_ratio
= get_aspect_ratio(mode_in
);
3459 stream
->output_color_space
= get_output_color_space(timing_out
);
3461 stream
->out_transfer_func
->type
= TF_TYPE_PREDEFINED
;
3462 stream
->out_transfer_func
->tf
= TRANSFER_FUNCTION_SRGB
;
3463 if (stream
->signal
== SIGNAL_TYPE_HDMI_TYPE_A
)
3464 adjust_colour_depth_from_display_info(timing_out
, info
);
3467 static void fill_audio_info(struct audio_info
*audio_info
,
3468 const struct drm_connector
*drm_connector
,
3469 const struct dc_sink
*dc_sink
)
3472 int cea_revision
= 0;
3473 const struct dc_edid_caps
*edid_caps
= &dc_sink
->edid_caps
;
3475 audio_info
->manufacture_id
= edid_caps
->manufacturer_id
;
3476 audio_info
->product_id
= edid_caps
->product_id
;
3478 cea_revision
= drm_connector
->display_info
.cea_rev
;
3480 strscpy(audio_info
->display_name
,
3481 edid_caps
->display_name
,
3482 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS
);
3484 if (cea_revision
>= 3) {
3485 audio_info
->mode_count
= edid_caps
->audio_mode_count
;
3487 for (i
= 0; i
< audio_info
->mode_count
; ++i
) {
3488 audio_info
->modes
[i
].format_code
=
3489 (enum audio_format_code
)
3490 (edid_caps
->audio_modes
[i
].format_code
);
3491 audio_info
->modes
[i
].channel_count
=
3492 edid_caps
->audio_modes
[i
].channel_count
;
3493 audio_info
->modes
[i
].sample_rates
.all
=
3494 edid_caps
->audio_modes
[i
].sample_rate
;
3495 audio_info
->modes
[i
].sample_size
=
3496 edid_caps
->audio_modes
[i
].sample_size
;
3500 audio_info
->flags
.all
= edid_caps
->speaker_flags
;
3502 /* TODO: We only check for the progressive mode, check for interlace mode too */
3503 if (drm_connector
->latency_present
[0]) {
3504 audio_info
->video_latency
= drm_connector
->video_latency
[0];
3505 audio_info
->audio_latency
= drm_connector
->audio_latency
[0];
3508 /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
3513 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode
*src_mode
,
3514 struct drm_display_mode
*dst_mode
)
3516 dst_mode
->crtc_hdisplay
= src_mode
->crtc_hdisplay
;
3517 dst_mode
->crtc_vdisplay
= src_mode
->crtc_vdisplay
;
3518 dst_mode
->crtc_clock
= src_mode
->crtc_clock
;
3519 dst_mode
->crtc_hblank_start
= src_mode
->crtc_hblank_start
;
3520 dst_mode
->crtc_hblank_end
= src_mode
->crtc_hblank_end
;
3521 dst_mode
->crtc_hsync_start
= src_mode
->crtc_hsync_start
;
3522 dst_mode
->crtc_hsync_end
= src_mode
->crtc_hsync_end
;
3523 dst_mode
->crtc_htotal
= src_mode
->crtc_htotal
;
3524 dst_mode
->crtc_hskew
= src_mode
->crtc_hskew
;
3525 dst_mode
->crtc_vblank_start
= src_mode
->crtc_vblank_start
;
3526 dst_mode
->crtc_vblank_end
= src_mode
->crtc_vblank_end
;
3527 dst_mode
->crtc_vsync_start
= src_mode
->crtc_vsync_start
;
3528 dst_mode
->crtc_vsync_end
= src_mode
->crtc_vsync_end
;
3529 dst_mode
->crtc_vtotal
= src_mode
->crtc_vtotal
;
3533 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode
*drm_mode
,
3534 const struct drm_display_mode
*native_mode
,
3537 if (scale_enabled
) {
3538 copy_crtc_timing_for_drm_display_mode(native_mode
, drm_mode
);
3539 } else if (native_mode
->clock
== drm_mode
->clock
&&
3540 native_mode
->htotal
== drm_mode
->htotal
&&
3541 native_mode
->vtotal
== drm_mode
->vtotal
) {
3542 copy_crtc_timing_for_drm_display_mode(native_mode
, drm_mode
);
3544 /* no scaling nor amdgpu inserted, no need to patch */
3548 static struct dc_sink
*
3549 create_fake_sink(struct amdgpu_dm_connector
*aconnector
)
3551 struct dc_sink_init_data sink_init_data
= { 0 };
3552 struct dc_sink
*sink
= NULL
;
3553 sink_init_data
.link
= aconnector
->dc_link
;
3554 sink_init_data
.sink_signal
= aconnector
->dc_link
->connector_signal
;
3556 sink
= dc_sink_create(&sink_init_data
);
3558 DRM_ERROR("Failed to create sink!\n");
3561 sink
->sink_signal
= SIGNAL_TYPE_VIRTUAL
;
3566 static void set_multisync_trigger_params(
3567 struct dc_stream_state
*stream
)
3569 if (stream
->triggered_crtc_reset
.enabled
) {
3570 stream
->triggered_crtc_reset
.event
= CRTC_EVENT_VSYNC_RISING
;
3571 stream
->triggered_crtc_reset
.delay
= TRIGGER_DELAY_NEXT_LINE
;
3575 static void set_master_stream(struct dc_stream_state
*stream_set
[],
3578 int j
, highest_rfr
= 0, master_stream
= 0;
3580 for (j
= 0; j
< stream_count
; j
++) {
3581 if (stream_set
[j
] && stream_set
[j
]->triggered_crtc_reset
.enabled
) {
3582 int refresh_rate
= 0;
3584 refresh_rate
= (stream_set
[j
]->timing
.pix_clk_100hz
*100)/
3585 (stream_set
[j
]->timing
.h_total
*stream_set
[j
]->timing
.v_total
);
3586 if (refresh_rate
> highest_rfr
) {
3587 highest_rfr
= refresh_rate
;
3592 for (j
= 0; j
< stream_count
; j
++) {
3594 stream_set
[j
]->triggered_crtc_reset
.event_source
= stream_set
[master_stream
];
3598 static void dm_enable_per_frame_crtc_master_sync(struct dc_state
*context
)
3602 if (context
->stream_count
< 2)
3604 for (i
= 0; i
< context
->stream_count
; i
++) {
3605 if (!context
->streams
[i
])
3608 * TODO: add a function to read AMD VSDB bits and set
3609 * crtc_sync_master.multi_sync_enabled flag
3610 * For now it's set to false
3612 set_multisync_trigger_params(context
->streams
[i
]);
3614 set_master_stream(context
->streams
, context
->stream_count
);
3617 static struct dc_stream_state
*
3618 create_stream_for_sink(struct amdgpu_dm_connector
*aconnector
,
3619 const struct drm_display_mode
*drm_mode
,
3620 const struct dm_connector_state
*dm_state
,
3621 const struct dc_stream_state
*old_stream
)
3623 struct drm_display_mode
*preferred_mode
= NULL
;
3624 struct drm_connector
*drm_connector
;
3625 const struct drm_connector_state
*con_state
=
3626 dm_state
? &dm_state
->base
: NULL
;
3627 struct dc_stream_state
*stream
= NULL
;
3628 struct drm_display_mode mode
= *drm_mode
;
3629 bool native_mode_found
= false;
3630 bool scale
= dm_state
? (dm_state
->scaling
!= RMX_OFF
) : false;
3632 int preferred_refresh
= 0;
3633 #ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
3634 struct dsc_dec_dpcd_caps dsc_caps
;
3635 uint32_t link_bandwidth_kbps
;
3638 struct dc_sink
*sink
= NULL
;
3639 if (aconnector
== NULL
) {
3640 DRM_ERROR("aconnector is NULL!\n");
3644 drm_connector
= &aconnector
->base
;
3646 if (!aconnector
->dc_sink
) {
3647 sink
= create_fake_sink(aconnector
);
3651 sink
= aconnector
->dc_sink
;
3652 dc_sink_retain(sink
);
3655 stream
= dc_create_stream_for_sink(sink
);
3657 if (stream
== NULL
) {
3658 DRM_ERROR("Failed to create stream for sink!\n");
3662 stream
->dm_stream_context
= aconnector
;
3664 stream
->timing
.flags
.LTE_340MCSC_SCRAMBLE
=
3665 drm_connector
->display_info
.hdmi
.scdc
.scrambling
.low_rates
;
3667 list_for_each_entry(preferred_mode
, &aconnector
->base
.modes
, head
) {
3668 /* Search for preferred mode */
3669 if (preferred_mode
->type
& DRM_MODE_TYPE_PREFERRED
) {
3670 native_mode_found
= true;
3674 if (!native_mode_found
)
3675 preferred_mode
= list_first_entry_or_null(
3676 &aconnector
->base
.modes
,
3677 struct drm_display_mode
,
3680 mode_refresh
= drm_mode_vrefresh(&mode
);
3682 if (preferred_mode
== NULL
) {
3684 * This may not be an error, the use case is when we have no
3685 * usermode calls to reset and set mode upon hotplug. In this
3686 * case, we call set mode ourselves to restore the previous mode
3687 * and the modelist may not be filled in in time.
3689 DRM_DEBUG_DRIVER("No preferred mode found\n");
3691 decide_crtc_timing_for_drm_display_mode(
3692 &mode
, preferred_mode
,
3693 dm_state
? (dm_state
->scaling
!= RMX_OFF
) : false);
3694 preferred_refresh
= drm_mode_vrefresh(preferred_mode
);
3698 drm_mode_set_crtcinfo(&mode
, 0);
3701 * If scaling is enabled and refresh rate didn't change
3702 * we copy the vic and polarities of the old timings
3704 if (!scale
|| mode_refresh
!= preferred_refresh
)
3705 fill_stream_properties_from_drm_display_mode(stream
,
3706 &mode
, &aconnector
->base
, con_state
, NULL
);
3708 fill_stream_properties_from_drm_display_mode(stream
,
3709 &mode
, &aconnector
->base
, con_state
, old_stream
);
3711 #ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
3712 stream
->timing
.flags
.DSC
= 0;
3714 if (aconnector
->dc_link
&& sink
->sink_signal
== SIGNAL_TYPE_DISPLAY_PORT
) {
3715 dc_dsc_parse_dsc_dpcd(aconnector
->dc_link
->dpcd_caps
.dsc_caps
.dsc_basic_caps
.raw
,
3716 aconnector
->dc_link
->dpcd_caps
.dsc_caps
.dsc_ext_caps
.raw
,
3718 link_bandwidth_kbps
= dc_link_bandwidth_kbps(aconnector
->dc_link
,
3719 dc_link_get_link_cap(aconnector
->dc_link
));
3721 if (dsc_caps
.is_dsc_supported
)
3722 if (dc_dsc_compute_config(aconnector
->dc_link
->ctx
->dc
->res_pool
->dscs
[0],
3724 aconnector
->dc_link
->ctx
->dc
->debug
.dsc_min_slice_height_override
,
3725 link_bandwidth_kbps
,
3727 &stream
->timing
.dsc_cfg
))
3728 stream
->timing
.flags
.DSC
= 1;
3732 update_stream_scaling_settings(&mode
, dm_state
, stream
);
3735 &stream
->audio_info
,
3739 update_stream_signal(stream
, sink
);
3741 if (stream
->signal
== SIGNAL_TYPE_HDMI_TYPE_A
)
3742 mod_build_hf_vsif_infopacket(stream
, &stream
->vsp_infopacket
, false, false);
3745 dc_sink_release(sink
);
3750 static void amdgpu_dm_crtc_destroy(struct drm_crtc
*crtc
)
3752 drm_crtc_cleanup(crtc
);
3756 static void dm_crtc_destroy_state(struct drm_crtc
*crtc
,
3757 struct drm_crtc_state
*state
)
3759 struct dm_crtc_state
*cur
= to_dm_crtc_state(state
);
3761 /* TODO Destroy dc_stream objects are stream object is flattened */
3763 dc_stream_release(cur
->stream
);
3766 __drm_atomic_helper_crtc_destroy_state(state
);
3772 static void dm_crtc_reset_state(struct drm_crtc
*crtc
)
3774 struct dm_crtc_state
*state
;
3777 dm_crtc_destroy_state(crtc
, crtc
->state
);
3779 state
= kzalloc(sizeof(*state
), GFP_KERNEL
);
3780 if (WARN_ON(!state
))
3783 crtc
->state
= &state
->base
;
3784 crtc
->state
->crtc
= crtc
;
3788 static struct drm_crtc_state
*
3789 dm_crtc_duplicate_state(struct drm_crtc
*crtc
)
3791 struct dm_crtc_state
*state
, *cur
;
3793 cur
= to_dm_crtc_state(crtc
->state
);
3795 if (WARN_ON(!crtc
->state
))
3798 state
= kzalloc(sizeof(*state
), GFP_KERNEL
);
3802 __drm_atomic_helper_crtc_duplicate_state(crtc
, &state
->base
);
3805 state
->stream
= cur
->stream
;
3806 dc_stream_retain(state
->stream
);
3809 state
->active_planes
= cur
->active_planes
;
3810 state
->interrupts_enabled
= cur
->interrupts_enabled
;
3811 state
->vrr_params
= cur
->vrr_params
;
3812 state
->vrr_infopacket
= cur
->vrr_infopacket
;
3813 state
->abm_level
= cur
->abm_level
;
3814 state
->vrr_supported
= cur
->vrr_supported
;
3815 state
->freesync_config
= cur
->freesync_config
;
3816 state
->crc_src
= cur
->crc_src
;
3817 state
->cm_has_degamma
= cur
->cm_has_degamma
;
3818 state
->cm_is_degamma_srgb
= cur
->cm_is_degamma_srgb
;
3820 /* TODO Duplicate dc_stream after objects are stream object is flattened */
3822 return &state
->base
;
3825 static inline int dm_set_vupdate_irq(struct drm_crtc
*crtc
, bool enable
)
3827 enum dc_irq_source irq_source
;
3828 struct amdgpu_crtc
*acrtc
= to_amdgpu_crtc(crtc
);
3829 struct amdgpu_device
*adev
= crtc
->dev
->dev_private
;
3832 irq_source
= IRQ_TYPE_VUPDATE
+ acrtc
->otg_inst
;
3834 rc
= dc_interrupt_set(adev
->dm
.dc
, irq_source
, enable
) ? 0 : -EBUSY
;
3836 DRM_DEBUG_DRIVER("crtc %d - vupdate irq %sabling: r=%d\n",
3837 acrtc
->crtc_id
, enable
? "en" : "dis", rc
);
3841 static inline int dm_set_vblank(struct drm_crtc
*crtc
, bool enable
)
3843 enum dc_irq_source irq_source
;
3844 struct amdgpu_crtc
*acrtc
= to_amdgpu_crtc(crtc
);
3845 struct amdgpu_device
*adev
= crtc
->dev
->dev_private
;
3846 struct dm_crtc_state
*acrtc_state
= to_dm_crtc_state(crtc
->state
);
3850 /* vblank irq on -> Only need vupdate irq in vrr mode */
3851 if (amdgpu_dm_vrr_active(acrtc_state
))
3852 rc
= dm_set_vupdate_irq(crtc
, true);
3854 /* vblank irq off -> vupdate irq off */
3855 rc
= dm_set_vupdate_irq(crtc
, false);
3861 irq_source
= IRQ_TYPE_VBLANK
+ acrtc
->otg_inst
;
3862 return dc_interrupt_set(adev
->dm
.dc
, irq_source
, enable
) ? 0 : -EBUSY
;
3865 static int dm_enable_vblank(struct drm_crtc
*crtc
)
3867 return dm_set_vblank(crtc
, true);
3870 static void dm_disable_vblank(struct drm_crtc
*crtc
)
3872 dm_set_vblank(crtc
, false);
3875 /* Implemented only the options currently availible for the driver */
3876 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs
= {
3877 .reset
= dm_crtc_reset_state
,
3878 .destroy
= amdgpu_dm_crtc_destroy
,
3879 .gamma_set
= drm_atomic_helper_legacy_gamma_set
,
3880 .set_config
= drm_atomic_helper_set_config
,
3881 .page_flip
= drm_atomic_helper_page_flip
,
3882 .atomic_duplicate_state
= dm_crtc_duplicate_state
,
3883 .atomic_destroy_state
= dm_crtc_destroy_state
,
3884 .set_crc_source
= amdgpu_dm_crtc_set_crc_source
,
3885 .verify_crc_source
= amdgpu_dm_crtc_verify_crc_source
,
3886 .get_crc_sources
= amdgpu_dm_crtc_get_crc_sources
,
3887 .enable_vblank
= dm_enable_vblank
,
3888 .disable_vblank
= dm_disable_vblank
,
3891 static enum drm_connector_status
3892 amdgpu_dm_connector_detect(struct drm_connector
*connector
, bool force
)
3895 struct amdgpu_dm_connector
*aconnector
= to_amdgpu_dm_connector(connector
);
3899 * 1. This interface is NOT called in context of HPD irq.
3900 * 2. This interface *is called* in context of user-mode ioctl. Which
3901 * makes it a bad place for *any* MST-related activity.
3904 if (aconnector
->base
.force
== DRM_FORCE_UNSPECIFIED
&&
3905 !aconnector
->fake_enable
)
3906 connected
= (aconnector
->dc_sink
!= NULL
);
3908 connected
= (aconnector
->base
.force
== DRM_FORCE_ON
);
3910 return (connected
? connector_status_connected
:
3911 connector_status_disconnected
);
3914 int amdgpu_dm_connector_atomic_set_property(struct drm_connector
*connector
,
3915 struct drm_connector_state
*connector_state
,
3916 struct drm_property
*property
,
3919 struct drm_device
*dev
= connector
->dev
;
3920 struct amdgpu_device
*adev
= dev
->dev_private
;
3921 struct dm_connector_state
*dm_old_state
=
3922 to_dm_connector_state(connector
->state
);
3923 struct dm_connector_state
*dm_new_state
=
3924 to_dm_connector_state(connector_state
);
3928 if (property
== dev
->mode_config
.scaling_mode_property
) {
3929 enum amdgpu_rmx_type rmx_type
;
3932 case DRM_MODE_SCALE_CENTER
:
3933 rmx_type
= RMX_CENTER
;
3935 case DRM_MODE_SCALE_ASPECT
:
3936 rmx_type
= RMX_ASPECT
;
3938 case DRM_MODE_SCALE_FULLSCREEN
:
3939 rmx_type
= RMX_FULL
;
3941 case DRM_MODE_SCALE_NONE
:
3947 if (dm_old_state
->scaling
== rmx_type
)
3950 dm_new_state
->scaling
= rmx_type
;
3952 } else if (property
== adev
->mode_info
.underscan_hborder_property
) {
3953 dm_new_state
->underscan_hborder
= val
;
3955 } else if (property
== adev
->mode_info
.underscan_vborder_property
) {
3956 dm_new_state
->underscan_vborder
= val
;
3958 } else if (property
== adev
->mode_info
.underscan_property
) {
3959 dm_new_state
->underscan_enable
= val
;
3961 } else if (property
== adev
->mode_info
.abm_level_property
) {
3962 dm_new_state
->abm_level
= val
;
3969 int amdgpu_dm_connector_atomic_get_property(struct drm_connector
*connector
,
3970 const struct drm_connector_state
*state
,
3971 struct drm_property
*property
,
3974 struct drm_device
*dev
= connector
->dev
;
3975 struct amdgpu_device
*adev
= dev
->dev_private
;
3976 struct dm_connector_state
*dm_state
=
3977 to_dm_connector_state(state
);
3980 if (property
== dev
->mode_config
.scaling_mode_property
) {
3981 switch (dm_state
->scaling
) {
3983 *val
= DRM_MODE_SCALE_CENTER
;
3986 *val
= DRM_MODE_SCALE_ASPECT
;
3989 *val
= DRM_MODE_SCALE_FULLSCREEN
;
3993 *val
= DRM_MODE_SCALE_NONE
;
3997 } else if (property
== adev
->mode_info
.underscan_hborder_property
) {
3998 *val
= dm_state
->underscan_hborder
;
4000 } else if (property
== adev
->mode_info
.underscan_vborder_property
) {
4001 *val
= dm_state
->underscan_vborder
;
4003 } else if (property
== adev
->mode_info
.underscan_property
) {
4004 *val
= dm_state
->underscan_enable
;
4006 } else if (property
== adev
->mode_info
.abm_level_property
) {
4007 *val
= dm_state
->abm_level
;
4014 static void amdgpu_dm_connector_unregister(struct drm_connector
*connector
)
4016 struct amdgpu_dm_connector
*amdgpu_dm_connector
= to_amdgpu_dm_connector(connector
);
4018 drm_dp_aux_unregister(&amdgpu_dm_connector
->dm_dp_aux
.aux
);
4021 static void amdgpu_dm_connector_destroy(struct drm_connector
*connector
)
4023 struct amdgpu_dm_connector
*aconnector
= to_amdgpu_dm_connector(connector
);
4024 const struct dc_link
*link
= aconnector
->dc_link
;
4025 struct amdgpu_device
*adev
= connector
->dev
->dev_private
;
4026 struct amdgpu_display_manager
*dm
= &adev
->dm
;
4028 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
4029 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
4031 if ((link
->connector_signal
& (SIGNAL_TYPE_EDP
| SIGNAL_TYPE_LVDS
)) &&
4032 link
->type
!= dc_connection_none
&&
4033 dm
->backlight_dev
) {
4034 backlight_device_unregister(dm
->backlight_dev
);
4035 dm
->backlight_dev
= NULL
;
4039 if (aconnector
->dc_em_sink
)
4040 dc_sink_release(aconnector
->dc_em_sink
);
4041 aconnector
->dc_em_sink
= NULL
;
4042 if (aconnector
->dc_sink
)
4043 dc_sink_release(aconnector
->dc_sink
);
4044 aconnector
->dc_sink
= NULL
;
4046 drm_dp_cec_unregister_connector(&aconnector
->dm_dp_aux
.aux
);
4047 drm_connector_unregister(connector
);
4048 drm_connector_cleanup(connector
);
4049 if (aconnector
->i2c
) {
4050 i2c_del_adapter(&aconnector
->i2c
->base
);
4051 kfree(aconnector
->i2c
);
4057 void amdgpu_dm_connector_funcs_reset(struct drm_connector
*connector
)
4059 struct dm_connector_state
*state
=
4060 to_dm_connector_state(connector
->state
);
4062 if (connector
->state
)
4063 __drm_atomic_helper_connector_destroy_state(connector
->state
);
4067 state
= kzalloc(sizeof(*state
), GFP_KERNEL
);
4070 state
->scaling
= RMX_OFF
;
4071 state
->underscan_enable
= false;
4072 state
->underscan_hborder
= 0;
4073 state
->underscan_vborder
= 0;
4074 state
->base
.max_requested_bpc
= 8;
4076 if (connector
->connector_type
== DRM_MODE_CONNECTOR_eDP
)
4077 state
->abm_level
= amdgpu_dm_abm_level
;
4079 __drm_atomic_helper_connector_reset(connector
, &state
->base
);
4083 struct drm_connector_state
*
4084 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector
*connector
)
4086 struct dm_connector_state
*state
=
4087 to_dm_connector_state(connector
->state
);
4089 struct dm_connector_state
*new_state
=
4090 kmemdup(state
, sizeof(*state
), GFP_KERNEL
);
4095 __drm_atomic_helper_connector_duplicate_state(connector
, &new_state
->base
);
4097 new_state
->freesync_capable
= state
->freesync_capable
;
4098 new_state
->abm_level
= state
->abm_level
;
4099 new_state
->scaling
= state
->scaling
;
4100 new_state
->underscan_enable
= state
->underscan_enable
;
4101 new_state
->underscan_hborder
= state
->underscan_hborder
;
4102 new_state
->underscan_vborder
= state
->underscan_vborder
;
4104 return &new_state
->base
;
4107 static const struct drm_connector_funcs amdgpu_dm_connector_funcs
= {
4108 .reset
= amdgpu_dm_connector_funcs_reset
,
4109 .detect
= amdgpu_dm_connector_detect
,
4110 .fill_modes
= drm_helper_probe_single_connector_modes
,
4111 .destroy
= amdgpu_dm_connector_destroy
,
4112 .atomic_duplicate_state
= amdgpu_dm_connector_atomic_duplicate_state
,
4113 .atomic_destroy_state
= drm_atomic_helper_connector_destroy_state
,
4114 .atomic_set_property
= amdgpu_dm_connector_atomic_set_property
,
4115 .atomic_get_property
= amdgpu_dm_connector_atomic_get_property
,
4116 .early_unregister
= amdgpu_dm_connector_unregister
4119 static int get_modes(struct drm_connector
*connector
)
4121 return amdgpu_dm_connector_get_modes(connector
);
4124 static void create_eml_sink(struct amdgpu_dm_connector
*aconnector
)
4126 struct dc_sink_init_data init_params
= {
4127 .link
= aconnector
->dc_link
,
4128 .sink_signal
= SIGNAL_TYPE_VIRTUAL
4132 if (!aconnector
->base
.edid_blob_ptr
) {
4133 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
4134 aconnector
->base
.name
);
4136 aconnector
->base
.force
= DRM_FORCE_OFF
;
4137 aconnector
->base
.override_edid
= false;
4141 edid
= (struct edid
*) aconnector
->base
.edid_blob_ptr
->data
;
4143 aconnector
->edid
= edid
;
4145 aconnector
->dc_em_sink
= dc_link_add_remote_sink(
4146 aconnector
->dc_link
,
4148 (edid
->extensions
+ 1) * EDID_LENGTH
,
4151 if (aconnector
->base
.force
== DRM_FORCE_ON
) {
4152 aconnector
->dc_sink
= aconnector
->dc_link
->local_sink
?
4153 aconnector
->dc_link
->local_sink
:
4154 aconnector
->dc_em_sink
;
4155 dc_sink_retain(aconnector
->dc_sink
);
4159 static void handle_edid_mgmt(struct amdgpu_dm_connector
*aconnector
)
4161 struct dc_link
*link
= (struct dc_link
*)aconnector
->dc_link
;
4164 * In case of headless boot with force on for DP managed connector
4165 * Those settings have to be != 0 to get initial modeset
4167 if (link
->connector_signal
== SIGNAL_TYPE_DISPLAY_PORT
) {
4168 link
->verified_link_cap
.lane_count
= LANE_COUNT_FOUR
;
4169 link
->verified_link_cap
.link_rate
= LINK_RATE_HIGH2
;
4173 aconnector
->base
.override_edid
= true;
4174 create_eml_sink(aconnector
);
4177 enum drm_mode_status
amdgpu_dm_connector_mode_valid(struct drm_connector
*connector
,
4178 struct drm_display_mode
*mode
)
4180 int result
= MODE_ERROR
;
4181 struct dc_sink
*dc_sink
;
4182 struct amdgpu_device
*adev
= connector
->dev
->dev_private
;
4183 /* TODO: Unhardcode stream count */
4184 struct dc_stream_state
*stream
;
4185 struct amdgpu_dm_connector
*aconnector
= to_amdgpu_dm_connector(connector
);
4186 enum dc_status dc_result
= DC_OK
;
4188 if ((mode
->flags
& DRM_MODE_FLAG_INTERLACE
) ||
4189 (mode
->flags
& DRM_MODE_FLAG_DBLSCAN
))
4193 * Only run this the first time mode_valid is called to initilialize
4196 if (aconnector
->base
.force
!= DRM_FORCE_UNSPECIFIED
&&
4197 !aconnector
->dc_em_sink
)
4198 handle_edid_mgmt(aconnector
);
4200 dc_sink
= to_amdgpu_dm_connector(connector
)->dc_sink
;
4202 if (dc_sink
== NULL
) {
4203 DRM_ERROR("dc_sink is NULL!\n");
4207 stream
= create_stream_for_sink(aconnector
, mode
, NULL
, NULL
);
4208 if (stream
== NULL
) {
4209 DRM_ERROR("Failed to create stream for sink!\n");
4213 dc_result
= dc_validate_stream(adev
->dm
.dc
, stream
);
4215 if (dc_result
== DC_OK
)
4218 DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d\n",
4224 dc_stream_release(stream
);
4227 /* TODO: error handling*/
4231 static int fill_hdr_info_packet(const struct drm_connector_state
*state
,
4232 struct dc_info_packet
*out
)
4234 struct hdmi_drm_infoframe frame
;
4235 unsigned char buf
[30]; /* 26 + 4 */
4239 memset(out
, 0, sizeof(*out
));
4241 if (!state
->hdr_output_metadata
)
4244 ret
= drm_hdmi_infoframe_set_hdr_metadata(&frame
, state
);
4248 len
= hdmi_drm_infoframe_pack_only(&frame
, buf
, sizeof(buf
));
4252 /* Static metadata is a fixed 26 bytes + 4 byte header. */
4256 /* Prepare the infopacket for DC. */
4257 switch (state
->connector
->connector_type
) {
4258 case DRM_MODE_CONNECTOR_HDMIA
:
4259 out
->hb0
= 0x87; /* type */
4260 out
->hb1
= 0x01; /* version */
4261 out
->hb2
= 0x1A; /* length */
4262 out
->sb
[0] = buf
[3]; /* checksum */
4266 case DRM_MODE_CONNECTOR_DisplayPort
:
4267 case DRM_MODE_CONNECTOR_eDP
:
4268 out
->hb0
= 0x00; /* sdp id, zero */
4269 out
->hb1
= 0x87; /* type */
4270 out
->hb2
= 0x1D; /* payload len - 1 */
4271 out
->hb3
= (0x13 << 2); /* sdp version */
4272 out
->sb
[0] = 0x01; /* version */
4273 out
->sb
[1] = 0x1A; /* length */
4281 memcpy(&out
->sb
[i
], &buf
[4], 26);
4284 print_hex_dump(KERN_DEBUG
, "HDR SB:", DUMP_PREFIX_NONE
, 16, 1, out
->sb
,
4285 sizeof(out
->sb
), false);
4291 is_hdr_metadata_different(const struct drm_connector_state
*old_state
,
4292 const struct drm_connector_state
*new_state
)
4294 struct drm_property_blob
*old_blob
= old_state
->hdr_output_metadata
;
4295 struct drm_property_blob
*new_blob
= new_state
->hdr_output_metadata
;
4297 if (old_blob
!= new_blob
) {
4298 if (old_blob
&& new_blob
&&
4299 old_blob
->length
== new_blob
->length
)
4300 return memcmp(old_blob
->data
, new_blob
->data
,
4310 amdgpu_dm_connector_atomic_check(struct drm_connector
*conn
,
4311 struct drm_atomic_state
*state
)
4313 struct drm_connector_state
*new_con_state
=
4314 drm_atomic_get_new_connector_state(state
, conn
);
4315 struct drm_connector_state
*old_con_state
=
4316 drm_atomic_get_old_connector_state(state
, conn
);
4317 struct drm_crtc
*crtc
= new_con_state
->crtc
;
4318 struct drm_crtc_state
*new_crtc_state
;
4324 if (is_hdr_metadata_different(old_con_state
, new_con_state
)) {
4325 struct dc_info_packet hdr_infopacket
;
4327 ret
= fill_hdr_info_packet(new_con_state
, &hdr_infopacket
);
4331 new_crtc_state
= drm_atomic_get_crtc_state(state
, crtc
);
4332 if (IS_ERR(new_crtc_state
))
4333 return PTR_ERR(new_crtc_state
);
4336 * DC considers the stream backends changed if the
4337 * static metadata changes. Forcing the modeset also
4338 * gives a simple way for userspace to switch from
4339 * 8bpc to 10bpc when setting the metadata to enter
4342 * Changing the static metadata after it's been
4343 * set is permissible, however. So only force a
4344 * modeset if we're entering or exiting HDR.
4346 new_crtc_state
->mode_changed
=
4347 !old_con_state
->hdr_output_metadata
||
4348 !new_con_state
->hdr_output_metadata
;
4354 static const struct drm_connector_helper_funcs
4355 amdgpu_dm_connector_helper_funcs
= {
4357 * If hotplugging a second bigger display in FB Con mode, bigger resolution
4358 * modes will be filtered by drm_mode_validate_size(), and those modes
4359 * are missing after user start lightdm. So we need to renew modes list.
4360 * in get_modes call back, not just return the modes count
4362 .get_modes
= get_modes
,
4363 .mode_valid
= amdgpu_dm_connector_mode_valid
,
4364 .atomic_check
= amdgpu_dm_connector_atomic_check
,
4367 static void dm_crtc_helper_disable(struct drm_crtc
*crtc
)
4371 static bool does_crtc_have_active_cursor(struct drm_crtc_state
*new_crtc_state
)
4373 struct drm_device
*dev
= new_crtc_state
->crtc
->dev
;
4374 struct drm_plane
*plane
;
4376 drm_for_each_plane_mask(plane
, dev
, new_crtc_state
->plane_mask
) {
4377 if (plane
->type
== DRM_PLANE_TYPE_CURSOR
)
4384 static int count_crtc_active_planes(struct drm_crtc_state
*new_crtc_state
)
4386 struct drm_atomic_state
*state
= new_crtc_state
->state
;
4387 struct drm_plane
*plane
;
4390 drm_for_each_plane_mask(plane
, state
->dev
, new_crtc_state
->plane_mask
) {
4391 struct drm_plane_state
*new_plane_state
;
4393 /* Cursor planes are "fake". */
4394 if (plane
->type
== DRM_PLANE_TYPE_CURSOR
)
4397 new_plane_state
= drm_atomic_get_new_plane_state(state
, plane
);
4399 if (!new_plane_state
) {
4401 * The plane is enable on the CRTC and hasn't changed
4402 * state. This means that it previously passed
4403 * validation and is therefore enabled.
4409 /* We need a framebuffer to be considered enabled. */
4410 num_active
+= (new_plane_state
->fb
!= NULL
);
4417 * Sets whether interrupts should be enabled on a specific CRTC.
4418 * We require that the stream be enabled and that there exist active
4419 * DC planes on the stream.
4422 dm_update_crtc_interrupt_state(struct drm_crtc
*crtc
,
4423 struct drm_crtc_state
*new_crtc_state
)
4425 struct dm_crtc_state
*dm_new_crtc_state
=
4426 to_dm_crtc_state(new_crtc_state
);
4428 dm_new_crtc_state
->active_planes
= 0;
4429 dm_new_crtc_state
->interrupts_enabled
= false;
4431 if (!dm_new_crtc_state
->stream
)
4434 dm_new_crtc_state
->active_planes
=
4435 count_crtc_active_planes(new_crtc_state
);
4437 dm_new_crtc_state
->interrupts_enabled
=
4438 dm_new_crtc_state
->active_planes
> 0;
4441 static int dm_crtc_helper_atomic_check(struct drm_crtc
*crtc
,
4442 struct drm_crtc_state
*state
)
4444 struct amdgpu_device
*adev
= crtc
->dev
->dev_private
;
4445 struct dc
*dc
= adev
->dm
.dc
;
4446 struct dm_crtc_state
*dm_crtc_state
= to_dm_crtc_state(state
);
4450 * Update interrupt state for the CRTC. This needs to happen whenever
4451 * the CRTC has changed or whenever any of its planes have changed.
4452 * Atomic check satisfies both of these requirements since the CRTC
4453 * is added to the state by DRM during drm_atomic_helper_check_planes.
4455 dm_update_crtc_interrupt_state(crtc
, state
);
4457 if (unlikely(!dm_crtc_state
->stream
&&
4458 modeset_required(state
, NULL
, dm_crtc_state
->stream
))) {
4463 /* In some use cases, like reset, no stream is attached */
4464 if (!dm_crtc_state
->stream
)
4468 * We want at least one hardware plane enabled to use
4469 * the stream with a cursor enabled.
4471 if (state
->enable
&& state
->active
&&
4472 does_crtc_have_active_cursor(state
) &&
4473 dm_crtc_state
->active_planes
== 0)
4476 if (dc_validate_stream(dc
, dm_crtc_state
->stream
) == DC_OK
)
4482 static bool dm_crtc_helper_mode_fixup(struct drm_crtc
*crtc
,
4483 const struct drm_display_mode
*mode
,
4484 struct drm_display_mode
*adjusted_mode
)
4489 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs
= {
4490 .disable
= dm_crtc_helper_disable
,
4491 .atomic_check
= dm_crtc_helper_atomic_check
,
4492 .mode_fixup
= dm_crtc_helper_mode_fixup
4495 static void dm_encoder_helper_disable(struct drm_encoder
*encoder
)
4500 static int dm_encoder_helper_atomic_check(struct drm_encoder
*encoder
,
4501 struct drm_crtc_state
*crtc_state
,
4502 struct drm_connector_state
*conn_state
)
4507 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs
= {
4508 .disable
= dm_encoder_helper_disable
,
4509 .atomic_check
= dm_encoder_helper_atomic_check
4512 static void dm_drm_plane_reset(struct drm_plane
*plane
)
4514 struct dm_plane_state
*amdgpu_state
= NULL
;
4517 plane
->funcs
->atomic_destroy_state(plane
, plane
->state
);
4519 amdgpu_state
= kzalloc(sizeof(*amdgpu_state
), GFP_KERNEL
);
4520 WARN_ON(amdgpu_state
== NULL
);
4523 __drm_atomic_helper_plane_reset(plane
, &amdgpu_state
->base
);
4526 static struct drm_plane_state
*
4527 dm_drm_plane_duplicate_state(struct drm_plane
*plane
)
4529 struct dm_plane_state
*dm_plane_state
, *old_dm_plane_state
;
4531 old_dm_plane_state
= to_dm_plane_state(plane
->state
);
4532 dm_plane_state
= kzalloc(sizeof(*dm_plane_state
), GFP_KERNEL
);
4533 if (!dm_plane_state
)
4536 __drm_atomic_helper_plane_duplicate_state(plane
, &dm_plane_state
->base
);
4538 if (old_dm_plane_state
->dc_state
) {
4539 dm_plane_state
->dc_state
= old_dm_plane_state
->dc_state
;
4540 dc_plane_state_retain(dm_plane_state
->dc_state
);
4543 return &dm_plane_state
->base
;
4546 void dm_drm_plane_destroy_state(struct drm_plane
*plane
,
4547 struct drm_plane_state
*state
)
4549 struct dm_plane_state
*dm_plane_state
= to_dm_plane_state(state
);
4551 if (dm_plane_state
->dc_state
)
4552 dc_plane_state_release(dm_plane_state
->dc_state
);
4554 drm_atomic_helper_plane_destroy_state(plane
, state
);
4557 static const struct drm_plane_funcs dm_plane_funcs
= {
4558 .update_plane
= drm_atomic_helper_update_plane
,
4559 .disable_plane
= drm_atomic_helper_disable_plane
,
4560 .destroy
= drm_primary_helper_destroy
,
4561 .reset
= dm_drm_plane_reset
,
4562 .atomic_duplicate_state
= dm_drm_plane_duplicate_state
,
4563 .atomic_destroy_state
= dm_drm_plane_destroy_state
,
4566 static int dm_plane_helper_prepare_fb(struct drm_plane
*plane
,
4567 struct drm_plane_state
*new_state
)
4569 struct amdgpu_framebuffer
*afb
;
4570 struct drm_gem_object
*obj
;
4571 struct amdgpu_device
*adev
;
4572 struct amdgpu_bo
*rbo
;
4573 struct dm_plane_state
*dm_plane_state_new
, *dm_plane_state_old
;
4574 struct list_head list
;
4575 struct ttm_validate_buffer tv
;
4576 struct ww_acquire_ctx ticket
;
4577 uint64_t tiling_flags
;
4581 dm_plane_state_old
= to_dm_plane_state(plane
->state
);
4582 dm_plane_state_new
= to_dm_plane_state(new_state
);
4584 if (!new_state
->fb
) {
4585 DRM_DEBUG_DRIVER("No FB bound\n");
4589 afb
= to_amdgpu_framebuffer(new_state
->fb
);
4590 obj
= new_state
->fb
->obj
[0];
4591 rbo
= gem_to_amdgpu_bo(obj
);
4592 adev
= amdgpu_ttm_adev(rbo
->tbo
.bdev
);
4593 INIT_LIST_HEAD(&list
);
4597 list_add(&tv
.head
, &list
);
4599 r
= ttm_eu_reserve_buffers(&ticket
, &list
, false, NULL
, true);
4601 dev_err(adev
->dev
, "fail to reserve bo (%d)\n", r
);
4605 if (plane
->type
!= DRM_PLANE_TYPE_CURSOR
)
4606 domain
= amdgpu_display_supported_domains(adev
, rbo
->flags
);
4608 domain
= AMDGPU_GEM_DOMAIN_VRAM
;
4610 r
= amdgpu_bo_pin(rbo
, domain
);
4611 if (unlikely(r
!= 0)) {
4612 if (r
!= -ERESTARTSYS
)
4613 DRM_ERROR("Failed to pin framebuffer with error %d\n", r
);
4614 ttm_eu_backoff_reservation(&ticket
, &list
);
4618 r
= amdgpu_ttm_alloc_gart(&rbo
->tbo
);
4619 if (unlikely(r
!= 0)) {
4620 amdgpu_bo_unpin(rbo
);
4621 ttm_eu_backoff_reservation(&ticket
, &list
);
4622 DRM_ERROR("%p bind failed\n", rbo
);
4626 amdgpu_bo_get_tiling_flags(rbo
, &tiling_flags
);
4628 ttm_eu_backoff_reservation(&ticket
, &list
);
4630 afb
->address
= amdgpu_bo_gpu_offset(rbo
);
4634 if (dm_plane_state_new
->dc_state
&&
4635 dm_plane_state_old
->dc_state
!= dm_plane_state_new
->dc_state
) {
4636 struct dc_plane_state
*plane_state
= dm_plane_state_new
->dc_state
;
4638 fill_plane_buffer_attributes(
4639 adev
, afb
, plane_state
->format
, plane_state
->rotation
,
4640 tiling_flags
, &plane_state
->tiling_info
,
4641 &plane_state
->plane_size
, &plane_state
->dcc
,
4642 &plane_state
->address
);
4648 static void dm_plane_helper_cleanup_fb(struct drm_plane
*plane
,
4649 struct drm_plane_state
*old_state
)
4651 struct amdgpu_bo
*rbo
;
4657 rbo
= gem_to_amdgpu_bo(old_state
->fb
->obj
[0]);
4658 r
= amdgpu_bo_reserve(rbo
, false);
4660 DRM_ERROR("failed to reserve rbo before unpin\n");
4664 amdgpu_bo_unpin(rbo
);
4665 amdgpu_bo_unreserve(rbo
);
4666 amdgpu_bo_unref(&rbo
);
4669 static int dm_plane_atomic_check(struct drm_plane
*plane
,
4670 struct drm_plane_state
*state
)
4672 struct amdgpu_device
*adev
= plane
->dev
->dev_private
;
4673 struct dc
*dc
= adev
->dm
.dc
;
4674 struct dm_plane_state
*dm_plane_state
;
4675 struct dc_scaling_info scaling_info
;
4678 dm_plane_state
= to_dm_plane_state(state
);
4680 if (!dm_plane_state
->dc_state
)
4683 ret
= fill_dc_scaling_info(state
, &scaling_info
);
4687 if (dc_validate_plane(dc
, dm_plane_state
->dc_state
) == DC_OK
)
4693 static int dm_plane_atomic_async_check(struct drm_plane
*plane
,
4694 struct drm_plane_state
*new_plane_state
)
4696 /* Only support async updates on cursor planes. */
4697 if (plane
->type
!= DRM_PLANE_TYPE_CURSOR
)
4703 static void dm_plane_atomic_async_update(struct drm_plane
*plane
,
4704 struct drm_plane_state
*new_state
)
4706 struct drm_plane_state
*old_state
=
4707 drm_atomic_get_old_plane_state(new_state
->state
, plane
);
4709 swap(plane
->state
->fb
, new_state
->fb
);
4711 plane
->state
->src_x
= new_state
->src_x
;
4712 plane
->state
->src_y
= new_state
->src_y
;
4713 plane
->state
->src_w
= new_state
->src_w
;
4714 plane
->state
->src_h
= new_state
->src_h
;
4715 plane
->state
->crtc_x
= new_state
->crtc_x
;
4716 plane
->state
->crtc_y
= new_state
->crtc_y
;
4717 plane
->state
->crtc_w
= new_state
->crtc_w
;
4718 plane
->state
->crtc_h
= new_state
->crtc_h
;
4720 handle_cursor_update(plane
, old_state
);
4723 static const struct drm_plane_helper_funcs dm_plane_helper_funcs
= {
4724 .prepare_fb
= dm_plane_helper_prepare_fb
,
4725 .cleanup_fb
= dm_plane_helper_cleanup_fb
,
4726 .atomic_check
= dm_plane_atomic_check
,
4727 .atomic_async_check
= dm_plane_atomic_async_check
,
4728 .atomic_async_update
= dm_plane_atomic_async_update
4732 * TODO: these are currently initialized to rgb formats only.
4733 * For future use cases we should either initialize them dynamically based on
4734 * plane capabilities, or initialize this array to all formats, so internal drm
4735 * check will succeed, and let DC implement proper check
4737 static const uint32_t rgb_formats
[] = {
4738 DRM_FORMAT_XRGB8888
,
4739 DRM_FORMAT_ARGB8888
,
4740 DRM_FORMAT_RGBA8888
,
4741 DRM_FORMAT_XRGB2101010
,
4742 DRM_FORMAT_XBGR2101010
,
4743 DRM_FORMAT_ARGB2101010
,
4744 DRM_FORMAT_ABGR2101010
,
4745 DRM_FORMAT_XBGR8888
,
4746 DRM_FORMAT_ABGR8888
,
4750 static const uint32_t overlay_formats
[] = {
4751 DRM_FORMAT_XRGB8888
,
4752 DRM_FORMAT_ARGB8888
,
4753 DRM_FORMAT_RGBA8888
,
4754 DRM_FORMAT_XBGR8888
,
4755 DRM_FORMAT_ABGR8888
,
4759 static const u32 cursor_formats
[] = {
4763 static int get_plane_formats(const struct drm_plane
*plane
,
4764 const struct dc_plane_cap
*plane_cap
,
4765 uint32_t *formats
, int max_formats
)
4767 int i
, num_formats
= 0;
4770 * TODO: Query support for each group of formats directly from
4771 * DC plane caps. This will require adding more formats to the
4775 switch (plane
->type
) {
4776 case DRM_PLANE_TYPE_PRIMARY
:
4777 for (i
= 0; i
< ARRAY_SIZE(rgb_formats
); ++i
) {
4778 if (num_formats
>= max_formats
)
4781 formats
[num_formats
++] = rgb_formats
[i
];
4784 if (plane_cap
&& plane_cap
->pixel_format_support
.nv12
)
4785 formats
[num_formats
++] = DRM_FORMAT_NV12
;
4788 case DRM_PLANE_TYPE_OVERLAY
:
4789 for (i
= 0; i
< ARRAY_SIZE(overlay_formats
); ++i
) {
4790 if (num_formats
>= max_formats
)
4793 formats
[num_formats
++] = overlay_formats
[i
];
4797 case DRM_PLANE_TYPE_CURSOR
:
4798 for (i
= 0; i
< ARRAY_SIZE(cursor_formats
); ++i
) {
4799 if (num_formats
>= max_formats
)
4802 formats
[num_formats
++] = cursor_formats
[i
];
4810 static int amdgpu_dm_plane_init(struct amdgpu_display_manager
*dm
,
4811 struct drm_plane
*plane
,
4812 unsigned long possible_crtcs
,
4813 const struct dc_plane_cap
*plane_cap
)
4815 uint32_t formats
[32];
4819 num_formats
= get_plane_formats(plane
, plane_cap
, formats
,
4820 ARRAY_SIZE(formats
));
4822 res
= drm_universal_plane_init(dm
->adev
->ddev
, plane
, possible_crtcs
,
4823 &dm_plane_funcs
, formats
, num_formats
,
4824 NULL
, plane
->type
, NULL
);
4828 if (plane
->type
== DRM_PLANE_TYPE_OVERLAY
&&
4829 plane_cap
&& plane_cap
->per_pixel_alpha
) {
4830 unsigned int blend_caps
= BIT(DRM_MODE_BLEND_PIXEL_NONE
) |
4831 BIT(DRM_MODE_BLEND_PREMULTI
);
4833 drm_plane_create_alpha_property(plane
);
4834 drm_plane_create_blend_mode_property(plane
, blend_caps
);
4837 if (plane
->type
== DRM_PLANE_TYPE_PRIMARY
&&
4838 plane_cap
&& plane_cap
->pixel_format_support
.nv12
) {
4839 /* This only affects YUV formats. */
4840 drm_plane_create_color_properties(
4842 BIT(DRM_COLOR_YCBCR_BT601
) |
4843 BIT(DRM_COLOR_YCBCR_BT709
),
4844 BIT(DRM_COLOR_YCBCR_LIMITED_RANGE
) |
4845 BIT(DRM_COLOR_YCBCR_FULL_RANGE
),
4846 DRM_COLOR_YCBCR_BT709
, DRM_COLOR_YCBCR_LIMITED_RANGE
);
4849 drm_plane_helper_add(plane
, &dm_plane_helper_funcs
);
4851 /* Create (reset) the plane state */
4852 if (plane
->funcs
->reset
)
4853 plane
->funcs
->reset(plane
);
4858 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager
*dm
,
4859 struct drm_plane
*plane
,
4860 uint32_t crtc_index
)
4862 struct amdgpu_crtc
*acrtc
= NULL
;
4863 struct drm_plane
*cursor_plane
;
4867 cursor_plane
= kzalloc(sizeof(*cursor_plane
), GFP_KERNEL
);
4871 cursor_plane
->type
= DRM_PLANE_TYPE_CURSOR
;
4872 res
= amdgpu_dm_plane_init(dm
, cursor_plane
, 0, NULL
);
4874 acrtc
= kzalloc(sizeof(struct amdgpu_crtc
), GFP_KERNEL
);
4878 res
= drm_crtc_init_with_planes(
4883 &amdgpu_dm_crtc_funcs
, NULL
);
4888 drm_crtc_helper_add(&acrtc
->base
, &amdgpu_dm_crtc_helper_funcs
);
4890 /* Create (reset) the plane state */
4891 if (acrtc
->base
.funcs
->reset
)
4892 acrtc
->base
.funcs
->reset(&acrtc
->base
);
4894 acrtc
->max_cursor_width
= dm
->adev
->dm
.dc
->caps
.max_cursor_size
;
4895 acrtc
->max_cursor_height
= dm
->adev
->dm
.dc
->caps
.max_cursor_size
;
4897 acrtc
->crtc_id
= crtc_index
;
4898 acrtc
->base
.enabled
= false;
4899 acrtc
->otg_inst
= -1;
4901 dm
->adev
->mode_info
.crtcs
[crtc_index
] = acrtc
;
4902 drm_crtc_enable_color_mgmt(&acrtc
->base
, MAX_COLOR_LUT_ENTRIES
,
4903 true, MAX_COLOR_LUT_ENTRIES
);
4904 drm_mode_crtc_set_gamma_size(&acrtc
->base
, MAX_COLOR_LEGACY_LUT_ENTRIES
);
4910 kfree(cursor_plane
);
4915 static int to_drm_connector_type(enum signal_type st
)
4918 case SIGNAL_TYPE_HDMI_TYPE_A
:
4919 return DRM_MODE_CONNECTOR_HDMIA
;
4920 case SIGNAL_TYPE_EDP
:
4921 return DRM_MODE_CONNECTOR_eDP
;
4922 case SIGNAL_TYPE_LVDS
:
4923 return DRM_MODE_CONNECTOR_LVDS
;
4924 case SIGNAL_TYPE_RGB
:
4925 return DRM_MODE_CONNECTOR_VGA
;
4926 case SIGNAL_TYPE_DISPLAY_PORT
:
4927 case SIGNAL_TYPE_DISPLAY_PORT_MST
:
4928 return DRM_MODE_CONNECTOR_DisplayPort
;
4929 case SIGNAL_TYPE_DVI_DUAL_LINK
:
4930 case SIGNAL_TYPE_DVI_SINGLE_LINK
:
4931 return DRM_MODE_CONNECTOR_DVID
;
4932 case SIGNAL_TYPE_VIRTUAL
:
4933 return DRM_MODE_CONNECTOR_VIRTUAL
;
4936 return DRM_MODE_CONNECTOR_Unknown
;
4940 static struct drm_encoder
*amdgpu_dm_connector_to_encoder(struct drm_connector
*connector
)
4942 struct drm_encoder
*encoder
;
4944 /* There is only one encoder per connector */
4945 drm_connector_for_each_possible_encoder(connector
, encoder
)
4951 static void amdgpu_dm_get_native_mode(struct drm_connector
*connector
)
4953 struct drm_encoder
*encoder
;
4954 struct amdgpu_encoder
*amdgpu_encoder
;
4956 encoder
= amdgpu_dm_connector_to_encoder(connector
);
4958 if (encoder
== NULL
)
4961 amdgpu_encoder
= to_amdgpu_encoder(encoder
);
4963 amdgpu_encoder
->native_mode
.clock
= 0;
4965 if (!list_empty(&connector
->probed_modes
)) {
4966 struct drm_display_mode
*preferred_mode
= NULL
;
4968 list_for_each_entry(preferred_mode
,
4969 &connector
->probed_modes
,
4971 if (preferred_mode
->type
& DRM_MODE_TYPE_PREFERRED
)
4972 amdgpu_encoder
->native_mode
= *preferred_mode
;
4980 static struct drm_display_mode
*
4981 amdgpu_dm_create_common_mode(struct drm_encoder
*encoder
,
4983 int hdisplay
, int vdisplay
)
4985 struct drm_device
*dev
= encoder
->dev
;
4986 struct amdgpu_encoder
*amdgpu_encoder
= to_amdgpu_encoder(encoder
);
4987 struct drm_display_mode
*mode
= NULL
;
4988 struct drm_display_mode
*native_mode
= &amdgpu_encoder
->native_mode
;
4990 mode
= drm_mode_duplicate(dev
, native_mode
);
4995 mode
->hdisplay
= hdisplay
;
4996 mode
->vdisplay
= vdisplay
;
4997 mode
->type
&= ~DRM_MODE_TYPE_PREFERRED
;
4998 strscpy(mode
->name
, name
, DRM_DISPLAY_MODE_LEN
);
5004 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder
*encoder
,
5005 struct drm_connector
*connector
)
5007 struct amdgpu_encoder
*amdgpu_encoder
= to_amdgpu_encoder(encoder
);
5008 struct drm_display_mode
*mode
= NULL
;
5009 struct drm_display_mode
*native_mode
= &amdgpu_encoder
->native_mode
;
5010 struct amdgpu_dm_connector
*amdgpu_dm_connector
=
5011 to_amdgpu_dm_connector(connector
);
5015 char name
[DRM_DISPLAY_MODE_LEN
];
5018 } common_modes
[] = {
5019 { "640x480", 640, 480},
5020 { "800x600", 800, 600},
5021 { "1024x768", 1024, 768},
5022 { "1280x720", 1280, 720},
5023 { "1280x800", 1280, 800},
5024 {"1280x1024", 1280, 1024},
5025 { "1440x900", 1440, 900},
5026 {"1680x1050", 1680, 1050},
5027 {"1600x1200", 1600, 1200},
5028 {"1920x1080", 1920, 1080},
5029 {"1920x1200", 1920, 1200}
5032 n
= ARRAY_SIZE(common_modes
);
5034 for (i
= 0; i
< n
; i
++) {
5035 struct drm_display_mode
*curmode
= NULL
;
5036 bool mode_existed
= false;
5038 if (common_modes
[i
].w
> native_mode
->hdisplay
||
5039 common_modes
[i
].h
> native_mode
->vdisplay
||
5040 (common_modes
[i
].w
== native_mode
->hdisplay
&&
5041 common_modes
[i
].h
== native_mode
->vdisplay
))
5044 list_for_each_entry(curmode
, &connector
->probed_modes
, head
) {
5045 if (common_modes
[i
].w
== curmode
->hdisplay
&&
5046 common_modes
[i
].h
== curmode
->vdisplay
) {
5047 mode_existed
= true;
5055 mode
= amdgpu_dm_create_common_mode(encoder
,
5056 common_modes
[i
].name
, common_modes
[i
].w
,
5058 drm_mode_probed_add(connector
, mode
);
5059 amdgpu_dm_connector
->num_modes
++;
5063 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector
*connector
,
5066 struct amdgpu_dm_connector
*amdgpu_dm_connector
=
5067 to_amdgpu_dm_connector(connector
);
5070 /* empty probed_modes */
5071 INIT_LIST_HEAD(&connector
->probed_modes
);
5072 amdgpu_dm_connector
->num_modes
=
5073 drm_add_edid_modes(connector
, edid
);
5075 /* sorting the probed modes before calling function
5076 * amdgpu_dm_get_native_mode() since EDID can have
5077 * more than one preferred mode. The modes that are
5078 * later in the probed mode list could be of higher
5079 * and preferred resolution. For example, 3840x2160
5080 * resolution in base EDID preferred timing and 4096x2160
5081 * preferred resolution in DID extension block later.
5083 drm_mode_sort(&connector
->probed_modes
);
5084 amdgpu_dm_get_native_mode(connector
);
5086 amdgpu_dm_connector
->num_modes
= 0;
5090 static int amdgpu_dm_connector_get_modes(struct drm_connector
*connector
)
5092 struct amdgpu_dm_connector
*amdgpu_dm_connector
=
5093 to_amdgpu_dm_connector(connector
);
5094 struct drm_encoder
*encoder
;
5095 struct edid
*edid
= amdgpu_dm_connector
->edid
;
5097 encoder
= amdgpu_dm_connector_to_encoder(connector
);
5099 if (!edid
|| !drm_edid_is_valid(edid
)) {
5100 amdgpu_dm_connector
->num_modes
=
5101 drm_add_modes_noedid(connector
, 640, 480);
5103 amdgpu_dm_connector_ddc_get_modes(connector
, edid
);
5104 amdgpu_dm_connector_add_common_modes(encoder
, connector
);
5106 amdgpu_dm_fbc_init(connector
);
5108 return amdgpu_dm_connector
->num_modes
;
5111 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager
*dm
,
5112 struct amdgpu_dm_connector
*aconnector
,
5114 struct dc_link
*link
,
5117 struct amdgpu_device
*adev
= dm
->ddev
->dev_private
;
5120 * Some of the properties below require access to state, like bpc.
5121 * Allocate some default initial connector state with our reset helper.
5123 if (aconnector
->base
.funcs
->reset
)
5124 aconnector
->base
.funcs
->reset(&aconnector
->base
);
5126 aconnector
->connector_id
= link_index
;
5127 aconnector
->dc_link
= link
;
5128 aconnector
->base
.interlace_allowed
= false;
5129 aconnector
->base
.doublescan_allowed
= false;
5130 aconnector
->base
.stereo_allowed
= false;
5131 aconnector
->base
.dpms
= DRM_MODE_DPMS_OFF
;
5132 aconnector
->hpd
.hpd
= AMDGPU_HPD_NONE
; /* not used */
5133 aconnector
->audio_inst
= -1;
5134 mutex_init(&aconnector
->hpd_lock
);
5137 * configure support HPD hot plug connector_>polled default value is 0
5138 * which means HPD hot plug not supported
5140 switch (connector_type
) {
5141 case DRM_MODE_CONNECTOR_HDMIA
:
5142 aconnector
->base
.polled
= DRM_CONNECTOR_POLL_HPD
;
5143 aconnector
->base
.ycbcr_420_allowed
=
5144 link
->link_enc
->features
.hdmi_ycbcr420_supported
? true : false;
5146 case DRM_MODE_CONNECTOR_DisplayPort
:
5147 aconnector
->base
.polled
= DRM_CONNECTOR_POLL_HPD
;
5148 aconnector
->base
.ycbcr_420_allowed
=
5149 link
->link_enc
->features
.dp_ycbcr420_supported
? true : false;
5151 case DRM_MODE_CONNECTOR_DVID
:
5152 aconnector
->base
.polled
= DRM_CONNECTOR_POLL_HPD
;
5158 drm_object_attach_property(&aconnector
->base
.base
,
5159 dm
->ddev
->mode_config
.scaling_mode_property
,
5160 DRM_MODE_SCALE_NONE
);
5162 drm_object_attach_property(&aconnector
->base
.base
,
5163 adev
->mode_info
.underscan_property
,
5165 drm_object_attach_property(&aconnector
->base
.base
,
5166 adev
->mode_info
.underscan_hborder_property
,
5168 drm_object_attach_property(&aconnector
->base
.base
,
5169 adev
->mode_info
.underscan_vborder_property
,
5172 drm_connector_attach_max_bpc_property(&aconnector
->base
, 8, 16);
5174 /* This defaults to the max in the range, but we want 8bpc. */
5175 aconnector
->base
.state
->max_bpc
= 8;
5176 aconnector
->base
.state
->max_requested_bpc
= 8;
5178 if (connector_type
== DRM_MODE_CONNECTOR_eDP
&&
5179 dc_is_dmcu_initialized(adev
->dm
.dc
)) {
5180 drm_object_attach_property(&aconnector
->base
.base
,
5181 adev
->mode_info
.abm_level_property
, 0);
5184 if (connector_type
== DRM_MODE_CONNECTOR_HDMIA
||
5185 connector_type
== DRM_MODE_CONNECTOR_DisplayPort
||
5186 connector_type
== DRM_MODE_CONNECTOR_eDP
) {
5187 drm_object_attach_property(
5188 &aconnector
->base
.base
,
5189 dm
->ddev
->mode_config
.hdr_output_metadata_property
, 0);
5191 drm_connector_attach_vrr_capable_property(
5193 #ifdef CONFIG_DRM_AMD_DC_HDCP
5194 if (adev
->asic_type
>= CHIP_RAVEN
)
5195 drm_connector_attach_content_protection_property(&aconnector
->base
, false);
5200 static int amdgpu_dm_i2c_xfer(struct i2c_adapter
*i2c_adap
,
5201 struct i2c_msg
*msgs
, int num
)
5203 struct amdgpu_i2c_adapter
*i2c
= i2c_get_adapdata(i2c_adap
);
5204 struct ddc_service
*ddc_service
= i2c
->ddc_service
;
5205 struct i2c_command cmd
;
5209 cmd
.payloads
= kcalloc(num
, sizeof(struct i2c_payload
), GFP_KERNEL
);
5214 cmd
.number_of_payloads
= num
;
5215 cmd
.engine
= I2C_COMMAND_ENGINE_DEFAULT
;
5218 for (i
= 0; i
< num
; i
++) {
5219 cmd
.payloads
[i
].write
= !(msgs
[i
].flags
& I2C_M_RD
);
5220 cmd
.payloads
[i
].address
= msgs
[i
].addr
;
5221 cmd
.payloads
[i
].length
= msgs
[i
].len
;
5222 cmd
.payloads
[i
].data
= msgs
[i
].buf
;
5226 ddc_service
->ctx
->dc
,
5227 ddc_service
->ddc_pin
->hw_info
.ddc_channel
,
5231 kfree(cmd
.payloads
);
5235 static u32
amdgpu_dm_i2c_func(struct i2c_adapter
*adap
)
5237 return I2C_FUNC_I2C
| I2C_FUNC_SMBUS_EMUL
;
5240 static const struct i2c_algorithm amdgpu_dm_i2c_algo
= {
5241 .master_xfer
= amdgpu_dm_i2c_xfer
,
5242 .functionality
= amdgpu_dm_i2c_func
,
5245 static struct amdgpu_i2c_adapter
*
5246 create_i2c(struct ddc_service
*ddc_service
,
5250 struct amdgpu_device
*adev
= ddc_service
->ctx
->driver_context
;
5251 struct amdgpu_i2c_adapter
*i2c
;
5253 i2c
= kzalloc(sizeof(struct amdgpu_i2c_adapter
), GFP_KERNEL
);
5256 i2c
->base
.owner
= THIS_MODULE
;
5257 i2c
->base
.class = I2C_CLASS_DDC
;
5258 i2c
->base
.dev
.parent
= &adev
->pdev
->dev
;
5259 i2c
->base
.algo
= &amdgpu_dm_i2c_algo
;
5260 snprintf(i2c
->base
.name
, sizeof(i2c
->base
.name
), "AMDGPU DM i2c hw bus %d", link_index
);
5261 i2c_set_adapdata(&i2c
->base
, i2c
);
5262 i2c
->ddc_service
= ddc_service
;
5263 i2c
->ddc_service
->ddc_pin
->hw_info
.ddc_channel
= link_index
;
5270 * Note: this function assumes that dc_link_detect() was called for the
5271 * dc_link which will be represented by this aconnector.
5273 static int amdgpu_dm_connector_init(struct amdgpu_display_manager
*dm
,
5274 struct amdgpu_dm_connector
*aconnector
,
5275 uint32_t link_index
,
5276 struct amdgpu_encoder
*aencoder
)
5280 struct dc
*dc
= dm
->dc
;
5281 struct dc_link
*link
= dc_get_link_at_index(dc
, link_index
);
5282 struct amdgpu_i2c_adapter
*i2c
;
5284 link
->priv
= aconnector
;
5286 DRM_DEBUG_DRIVER("%s()\n", __func__
);
5288 i2c
= create_i2c(link
->ddc
, link
->link_index
, &res
);
5290 DRM_ERROR("Failed to create i2c adapter data\n");
5294 aconnector
->i2c
= i2c
;
5295 res
= i2c_add_adapter(&i2c
->base
);
5298 DRM_ERROR("Failed to register hw i2c %d\n", link
->link_index
);
5302 connector_type
= to_drm_connector_type(link
->connector_signal
);
5304 res
= drm_connector_init(
5307 &amdgpu_dm_connector_funcs
,
5311 DRM_ERROR("connector_init failed\n");
5312 aconnector
->connector_id
= -1;
5316 drm_connector_helper_add(
5318 &amdgpu_dm_connector_helper_funcs
);
5320 amdgpu_dm_connector_init_helper(
5327 drm_connector_attach_encoder(
5328 &aconnector
->base
, &aencoder
->base
);
5330 drm_connector_register(&aconnector
->base
);
5331 #if defined(CONFIG_DEBUG_FS)
5332 connector_debugfs_init(aconnector
);
5333 aconnector
->debugfs_dpcd_address
= 0;
5334 aconnector
->debugfs_dpcd_size
= 0;
5337 if (connector_type
== DRM_MODE_CONNECTOR_DisplayPort
5338 || connector_type
== DRM_MODE_CONNECTOR_eDP
)
5339 amdgpu_dm_initialize_dp_connector(dm
, aconnector
);
5344 aconnector
->i2c
= NULL
;
5349 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device
*adev
)
5351 switch (adev
->mode_info
.num_crtc
) {
5368 static int amdgpu_dm_encoder_init(struct drm_device
*dev
,
5369 struct amdgpu_encoder
*aencoder
,
5370 uint32_t link_index
)
5372 struct amdgpu_device
*adev
= dev
->dev_private
;
5374 int res
= drm_encoder_init(dev
,
5376 &amdgpu_dm_encoder_funcs
,
5377 DRM_MODE_ENCODER_TMDS
,
5380 aencoder
->base
.possible_crtcs
= amdgpu_dm_get_encoder_crtc_mask(adev
);
5383 aencoder
->encoder_id
= link_index
;
5385 aencoder
->encoder_id
= -1;
5387 drm_encoder_helper_add(&aencoder
->base
, &amdgpu_dm_encoder_helper_funcs
);
5392 static void manage_dm_interrupts(struct amdgpu_device
*adev
,
5393 struct amdgpu_crtc
*acrtc
,
5397 * this is not correct translation but will work as soon as VBLANK
5398 * constant is the same as PFLIP
5401 amdgpu_display_crtc_idx_to_irq_type(
5406 drm_crtc_vblank_on(&acrtc
->base
);
5409 &adev
->pageflip_irq
,
5415 &adev
->pageflip_irq
,
5417 drm_crtc_vblank_off(&acrtc
->base
);
5422 is_scaling_state_different(const struct dm_connector_state
*dm_state
,
5423 const struct dm_connector_state
*old_dm_state
)
5425 if (dm_state
->scaling
!= old_dm_state
->scaling
)
5427 if (!dm_state
->underscan_enable
&& old_dm_state
->underscan_enable
) {
5428 if (old_dm_state
->underscan_hborder
!= 0 && old_dm_state
->underscan_vborder
!= 0)
5430 } else if (dm_state
->underscan_enable
&& !old_dm_state
->underscan_enable
) {
5431 if (dm_state
->underscan_hborder
!= 0 && dm_state
->underscan_vborder
!= 0)
5433 } else if (dm_state
->underscan_hborder
!= old_dm_state
->underscan_hborder
||
5434 dm_state
->underscan_vborder
!= old_dm_state
->underscan_vborder
)
5439 #ifdef CONFIG_DRM_AMD_DC_HDCP
5440 static bool is_content_protection_different(struct drm_connector_state
*state
,
5441 const struct drm_connector_state
*old_state
,
5442 const struct drm_connector
*connector
, struct hdcp_workqueue
*hdcp_w
)
5444 struct amdgpu_dm_connector
*aconnector
= to_amdgpu_dm_connector(connector
);
5446 /* CP is being re enabled, ignore this */
5447 if (old_state
->content_protection
== DRM_MODE_CONTENT_PROTECTION_ENABLED
&&
5448 state
->content_protection
== DRM_MODE_CONTENT_PROTECTION_DESIRED
) {
5449 state
->content_protection
= DRM_MODE_CONTENT_PROTECTION_ENABLED
;
5453 /* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED */
5454 if (old_state
->content_protection
== DRM_MODE_CONTENT_PROTECTION_UNDESIRED
&&
5455 state
->content_protection
== DRM_MODE_CONTENT_PROTECTION_ENABLED
)
5456 state
->content_protection
= DRM_MODE_CONTENT_PROTECTION_DESIRED
;
5458 /* Check if something is connected/enabled, otherwise we start hdcp but nothing is connected/enabled
5459 * hot-plug, headless s3, dpms
5461 if (state
->content_protection
== DRM_MODE_CONTENT_PROTECTION_DESIRED
&& connector
->dpms
== DRM_MODE_DPMS_ON
&&
5462 aconnector
->dc_sink
!= NULL
)
5465 if (old_state
->content_protection
== state
->content_protection
)
5468 if (state
->content_protection
== DRM_MODE_CONTENT_PROTECTION_UNDESIRED
)
5474 static void update_content_protection(struct drm_connector_state
*state
, const struct drm_connector
*connector
,
5475 struct hdcp_workqueue
*hdcp_w
)
5477 struct amdgpu_dm_connector
*aconnector
= to_amdgpu_dm_connector(connector
);
5479 if (state
->content_protection
== DRM_MODE_CONTENT_PROTECTION_DESIRED
)
5480 hdcp_add_display(hdcp_w
, aconnector
->dc_link
->link_index
, aconnector
);
5481 else if (state
->content_protection
== DRM_MODE_CONTENT_PROTECTION_UNDESIRED
)
5482 hdcp_remove_display(hdcp_w
, aconnector
->dc_link
->link_index
, aconnector
->base
.index
);
5486 static void remove_stream(struct amdgpu_device
*adev
,
5487 struct amdgpu_crtc
*acrtc
,
5488 struct dc_stream_state
*stream
)
5490 /* this is the update mode case */
5492 acrtc
->otg_inst
= -1;
5493 acrtc
->enabled
= false;
5496 static int get_cursor_position(struct drm_plane
*plane
, struct drm_crtc
*crtc
,
5497 struct dc_cursor_position
*position
)
5499 struct amdgpu_crtc
*amdgpu_crtc
= to_amdgpu_crtc(crtc
);
5501 int xorigin
= 0, yorigin
= 0;
5503 position
->enable
= false;
5507 if (!crtc
|| !plane
->state
->fb
)
5510 if ((plane
->state
->crtc_w
> amdgpu_crtc
->max_cursor_width
) ||
5511 (plane
->state
->crtc_h
> amdgpu_crtc
->max_cursor_height
)) {
5512 DRM_ERROR("%s: bad cursor width or height %d x %d\n",
5514 plane
->state
->crtc_w
,
5515 plane
->state
->crtc_h
);
5519 x
= plane
->state
->crtc_x
;
5520 y
= plane
->state
->crtc_y
;
5522 if (x
<= -amdgpu_crtc
->max_cursor_width
||
5523 y
<= -amdgpu_crtc
->max_cursor_height
)
5526 if (crtc
->primary
->state
) {
5527 /* avivo cursor are offset into the total surface */
5528 x
+= crtc
->primary
->state
->src_x
>> 16;
5529 y
+= crtc
->primary
->state
->src_y
>> 16;
5533 xorigin
= min(-x
, amdgpu_crtc
->max_cursor_width
- 1);
5537 yorigin
= min(-y
, amdgpu_crtc
->max_cursor_height
- 1);
5540 position
->enable
= true;
5543 position
->x_hotspot
= xorigin
;
5544 position
->y_hotspot
= yorigin
;
5549 static void handle_cursor_update(struct drm_plane
*plane
,
5550 struct drm_plane_state
*old_plane_state
)
5552 struct amdgpu_device
*adev
= plane
->dev
->dev_private
;
5553 struct amdgpu_framebuffer
*afb
= to_amdgpu_framebuffer(plane
->state
->fb
);
5554 struct drm_crtc
*crtc
= afb
? plane
->state
->crtc
: old_plane_state
->crtc
;
5555 struct dm_crtc_state
*crtc_state
= crtc
? to_dm_crtc_state(crtc
->state
) : NULL
;
5556 struct amdgpu_crtc
*amdgpu_crtc
= to_amdgpu_crtc(crtc
);
5557 uint64_t address
= afb
? afb
->address
: 0;
5558 struct dc_cursor_position position
;
5559 struct dc_cursor_attributes attributes
;
5562 if (!plane
->state
->fb
&& !old_plane_state
->fb
)
5565 DRM_DEBUG_DRIVER("%s: crtc_id=%d with size %d to %d\n",
5567 amdgpu_crtc
->crtc_id
,
5568 plane
->state
->crtc_w
,
5569 plane
->state
->crtc_h
);
5571 ret
= get_cursor_position(plane
, crtc
, &position
);
5575 if (!position
.enable
) {
5576 /* turn off cursor */
5577 if (crtc_state
&& crtc_state
->stream
) {
5578 mutex_lock(&adev
->dm
.dc_lock
);
5579 dc_stream_set_cursor_position(crtc_state
->stream
,
5581 mutex_unlock(&adev
->dm
.dc_lock
);
5586 amdgpu_crtc
->cursor_width
= plane
->state
->crtc_w
;
5587 amdgpu_crtc
->cursor_height
= plane
->state
->crtc_h
;
5589 memset(&attributes
, 0, sizeof(attributes
));
5590 attributes
.address
.high_part
= upper_32_bits(address
);
5591 attributes
.address
.low_part
= lower_32_bits(address
);
5592 attributes
.width
= plane
->state
->crtc_w
;
5593 attributes
.height
= plane
->state
->crtc_h
;
5594 attributes
.color_format
= CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA
;
5595 attributes
.rotation_angle
= 0;
5596 attributes
.attribute_flags
.value
= 0;
5598 attributes
.pitch
= attributes
.width
;
5600 if (crtc_state
->stream
) {
5601 mutex_lock(&adev
->dm
.dc_lock
);
5602 if (!dc_stream_set_cursor_attributes(crtc_state
->stream
,
5604 DRM_ERROR("DC failed to set cursor attributes\n");
5606 if (!dc_stream_set_cursor_position(crtc_state
->stream
,
5608 DRM_ERROR("DC failed to set cursor position\n");
5609 mutex_unlock(&adev
->dm
.dc_lock
);
5613 static void prepare_flip_isr(struct amdgpu_crtc
*acrtc
)
5616 assert_spin_locked(&acrtc
->base
.dev
->event_lock
);
5617 WARN_ON(acrtc
->event
);
5619 acrtc
->event
= acrtc
->base
.state
->event
;
5621 /* Set the flip status */
5622 acrtc
->pflip_status
= AMDGPU_FLIP_SUBMITTED
;
5624 /* Mark this event as consumed */
5625 acrtc
->base
.state
->event
= NULL
;
5627 DRM_DEBUG_DRIVER("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
5631 static void update_freesync_state_on_stream(
5632 struct amdgpu_display_manager
*dm
,
5633 struct dm_crtc_state
*new_crtc_state
,
5634 struct dc_stream_state
*new_stream
,
5635 struct dc_plane_state
*surface
,
5636 u32 flip_timestamp_in_us
)
5638 struct mod_vrr_params vrr_params
;
5639 struct dc_info_packet vrr_infopacket
= {0};
5640 struct amdgpu_device
*adev
= dm
->adev
;
5641 unsigned long flags
;
5647 * TODO: Determine why min/max totals and vrefresh can be 0 here.
5648 * For now it's sufficient to just guard against these conditions.
5651 if (!new_stream
->timing
.h_total
|| !new_stream
->timing
.v_total
)
5654 spin_lock_irqsave(&adev
->ddev
->event_lock
, flags
);
5655 vrr_params
= new_crtc_state
->vrr_params
;
5658 mod_freesync_handle_preflip(
5659 dm
->freesync_module
,
5662 flip_timestamp_in_us
,
5665 if (adev
->family
< AMDGPU_FAMILY_AI
&&
5666 amdgpu_dm_vrr_active(new_crtc_state
)) {
5667 mod_freesync_handle_v_update(dm
->freesync_module
,
5668 new_stream
, &vrr_params
);
5670 /* Need to call this before the frame ends. */
5671 dc_stream_adjust_vmin_vmax(dm
->dc
,
5672 new_crtc_state
->stream
,
5673 &vrr_params
.adjust
);
5677 mod_freesync_build_vrr_infopacket(
5678 dm
->freesync_module
,
5682 TRANSFER_FUNC_UNKNOWN
,
5685 new_crtc_state
->freesync_timing_changed
|=
5686 (memcmp(&new_crtc_state
->vrr_params
.adjust
,
5688 sizeof(vrr_params
.adjust
)) != 0);
5690 new_crtc_state
->freesync_vrr_info_changed
|=
5691 (memcmp(&new_crtc_state
->vrr_infopacket
,
5693 sizeof(vrr_infopacket
)) != 0);
5695 new_crtc_state
->vrr_params
= vrr_params
;
5696 new_crtc_state
->vrr_infopacket
= vrr_infopacket
;
5698 new_stream
->adjust
= new_crtc_state
->vrr_params
.adjust
;
5699 new_stream
->vrr_infopacket
= vrr_infopacket
;
5701 if (new_crtc_state
->freesync_vrr_info_changed
)
5702 DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
5703 new_crtc_state
->base
.crtc
->base
.id
,
5704 (int)new_crtc_state
->base
.vrr_enabled
,
5705 (int)vrr_params
.state
);
5707 spin_unlock_irqrestore(&adev
->ddev
->event_lock
, flags
);
5710 static void pre_update_freesync_state_on_stream(
5711 struct amdgpu_display_manager
*dm
,
5712 struct dm_crtc_state
*new_crtc_state
)
5714 struct dc_stream_state
*new_stream
= new_crtc_state
->stream
;
5715 struct mod_vrr_params vrr_params
;
5716 struct mod_freesync_config config
= new_crtc_state
->freesync_config
;
5717 struct amdgpu_device
*adev
= dm
->adev
;
5718 unsigned long flags
;
5724 * TODO: Determine why min/max totals and vrefresh can be 0 here.
5725 * For now it's sufficient to just guard against these conditions.
5727 if (!new_stream
->timing
.h_total
|| !new_stream
->timing
.v_total
)
5730 spin_lock_irqsave(&adev
->ddev
->event_lock
, flags
);
5731 vrr_params
= new_crtc_state
->vrr_params
;
5733 if (new_crtc_state
->vrr_supported
&&
5734 config
.min_refresh_in_uhz
&&
5735 config
.max_refresh_in_uhz
) {
5736 config
.state
= new_crtc_state
->base
.vrr_enabled
?
5737 VRR_STATE_ACTIVE_VARIABLE
:
5740 config
.state
= VRR_STATE_UNSUPPORTED
;
5743 mod_freesync_build_vrr_params(dm
->freesync_module
,
5745 &config
, &vrr_params
);
5747 new_crtc_state
->freesync_timing_changed
|=
5748 (memcmp(&new_crtc_state
->vrr_params
.adjust
,
5750 sizeof(vrr_params
.adjust
)) != 0);
5752 new_crtc_state
->vrr_params
= vrr_params
;
5753 spin_unlock_irqrestore(&adev
->ddev
->event_lock
, flags
);
5756 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state
*old_state
,
5757 struct dm_crtc_state
*new_state
)
5759 bool old_vrr_active
= amdgpu_dm_vrr_active(old_state
);
5760 bool new_vrr_active
= amdgpu_dm_vrr_active(new_state
);
5762 if (!old_vrr_active
&& new_vrr_active
) {
5763 /* Transition VRR inactive -> active:
5764 * While VRR is active, we must not disable vblank irq, as a
5765 * reenable after disable would compute bogus vblank/pflip
5766 * timestamps if it likely happened inside display front-porch.
5768 * We also need vupdate irq for the actual core vblank handling
5771 dm_set_vupdate_irq(new_state
->base
.crtc
, true);
5772 drm_crtc_vblank_get(new_state
->base
.crtc
);
5773 DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
5774 __func__
, new_state
->base
.crtc
->base
.id
);
5775 } else if (old_vrr_active
&& !new_vrr_active
) {
5776 /* Transition VRR active -> inactive:
5777 * Allow vblank irq disable again for fixed refresh rate.
5779 dm_set_vupdate_irq(new_state
->base
.crtc
, false);
5780 drm_crtc_vblank_put(new_state
->base
.crtc
);
5781 DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
5782 __func__
, new_state
->base
.crtc
->base
.id
);
5786 static void amdgpu_dm_commit_cursors(struct drm_atomic_state
*state
)
5788 struct drm_plane
*plane
;
5789 struct drm_plane_state
*old_plane_state
, *new_plane_state
;
5793 * TODO: Make this per-stream so we don't issue redundant updates for
5794 * commits with multiple streams.
5796 for_each_oldnew_plane_in_state(state
, plane
, old_plane_state
,
5798 if (plane
->type
== DRM_PLANE_TYPE_CURSOR
)
5799 handle_cursor_update(plane
, old_plane_state
);
5802 static void amdgpu_dm_commit_planes(struct drm_atomic_state
*state
,
5803 struct dc_state
*dc_state
,
5804 struct drm_device
*dev
,
5805 struct amdgpu_display_manager
*dm
,
5806 struct drm_crtc
*pcrtc
,
5807 bool wait_for_vblank
)
5810 uint64_t timestamp_ns
;
5811 struct drm_plane
*plane
;
5812 struct drm_plane_state
*old_plane_state
, *new_plane_state
;
5813 struct amdgpu_crtc
*acrtc_attach
= to_amdgpu_crtc(pcrtc
);
5814 struct drm_crtc_state
*new_pcrtc_state
=
5815 drm_atomic_get_new_crtc_state(state
, pcrtc
);
5816 struct dm_crtc_state
*acrtc_state
= to_dm_crtc_state(new_pcrtc_state
);
5817 struct dm_crtc_state
*dm_old_crtc_state
=
5818 to_dm_crtc_state(drm_atomic_get_old_crtc_state(state
, pcrtc
));
5819 int planes_count
= 0, vpos
, hpos
;
5821 unsigned long flags
;
5822 struct amdgpu_bo
*abo
;
5823 uint64_t tiling_flags
;
5824 uint32_t target_vblank
, last_flip_vblank
;
5825 bool vrr_active
= amdgpu_dm_vrr_active(acrtc_state
);
5826 bool pflip_present
= false;
5828 struct dc_surface_update surface_updates
[MAX_SURFACES
];
5829 struct dc_plane_info plane_infos
[MAX_SURFACES
];
5830 struct dc_scaling_info scaling_infos
[MAX_SURFACES
];
5831 struct dc_flip_addrs flip_addrs
[MAX_SURFACES
];
5832 struct dc_stream_update stream_update
;
5835 bundle
= kzalloc(sizeof(*bundle
), GFP_KERNEL
);
5838 dm_error("Failed to allocate update bundle\n");
5843 * Disable the cursor first if we're disabling all the planes.
5844 * It'll remain on the screen after the planes are re-enabled
5847 if (acrtc_state
->active_planes
== 0)
5848 amdgpu_dm_commit_cursors(state
);
5850 /* update planes when needed */
5851 for_each_oldnew_plane_in_state(state
, plane
, old_plane_state
, new_plane_state
, i
) {
5852 struct drm_crtc
*crtc
= new_plane_state
->crtc
;
5853 struct drm_crtc_state
*new_crtc_state
;
5854 struct drm_framebuffer
*fb
= new_plane_state
->fb
;
5855 bool plane_needs_flip
;
5856 struct dc_plane_state
*dc_plane
;
5857 struct dm_plane_state
*dm_new_plane_state
= to_dm_plane_state(new_plane_state
);
5859 /* Cursor plane is handled after stream updates */
5860 if (plane
->type
== DRM_PLANE_TYPE_CURSOR
)
5863 if (!fb
|| !crtc
|| pcrtc
!= crtc
)
5866 new_crtc_state
= drm_atomic_get_new_crtc_state(state
, crtc
);
5867 if (!new_crtc_state
->active
)
5870 dc_plane
= dm_new_plane_state
->dc_state
;
5872 bundle
->surface_updates
[planes_count
].surface
= dc_plane
;
5873 if (new_pcrtc_state
->color_mgmt_changed
) {
5874 bundle
->surface_updates
[planes_count
].gamma
= dc_plane
->gamma_correction
;
5875 bundle
->surface_updates
[planes_count
].in_transfer_func
= dc_plane
->in_transfer_func
;
5878 fill_dc_scaling_info(new_plane_state
,
5879 &bundle
->scaling_infos
[planes_count
]);
5881 bundle
->surface_updates
[planes_count
].scaling_info
=
5882 &bundle
->scaling_infos
[planes_count
];
5884 plane_needs_flip
= old_plane_state
->fb
&& new_plane_state
->fb
;
5886 pflip_present
= pflip_present
|| plane_needs_flip
;
5888 if (!plane_needs_flip
) {
5893 abo
= gem_to_amdgpu_bo(fb
->obj
[0]);
5896 * Wait for all fences on this FB. Do limited wait to avoid
5897 * deadlock during GPU reset when this fence will not signal
5898 * but we hold reservation lock for the BO.
5900 r
= dma_resv_wait_timeout_rcu(abo
->tbo
.base
.resv
, true,
5902 msecs_to_jiffies(5000));
5903 if (unlikely(r
<= 0))
5904 DRM_ERROR("Waiting for fences timed out!");
5907 * TODO This might fail and hence better not used, wait
5908 * explicitly on fences instead
5909 * and in general should be called for
5910 * blocking commit to as per framework helpers
5912 r
= amdgpu_bo_reserve(abo
, true);
5913 if (unlikely(r
!= 0))
5914 DRM_ERROR("failed to reserve buffer before flip\n");
5916 amdgpu_bo_get_tiling_flags(abo
, &tiling_flags
);
5918 amdgpu_bo_unreserve(abo
);
5920 fill_dc_plane_info_and_addr(
5921 dm
->adev
, new_plane_state
, tiling_flags
,
5922 &bundle
->plane_infos
[planes_count
],
5923 &bundle
->flip_addrs
[planes_count
].address
);
5925 bundle
->surface_updates
[planes_count
].plane_info
=
5926 &bundle
->plane_infos
[planes_count
];
5929 * Only allow immediate flips for fast updates that don't
5930 * change FB pitch, DCC state, rotation or mirroing.
5932 bundle
->flip_addrs
[planes_count
].flip_immediate
=
5933 crtc
->state
->async_flip
&&
5934 acrtc_state
->update_type
== UPDATE_TYPE_FAST
;
5936 timestamp_ns
= ktime_get_ns();
5937 bundle
->flip_addrs
[planes_count
].flip_timestamp_in_us
= div_u64(timestamp_ns
, 1000);
5938 bundle
->surface_updates
[planes_count
].flip_addr
= &bundle
->flip_addrs
[planes_count
];
5939 bundle
->surface_updates
[planes_count
].surface
= dc_plane
;
5941 if (!bundle
->surface_updates
[planes_count
].surface
) {
5942 DRM_ERROR("No surface for CRTC: id=%d\n",
5943 acrtc_attach
->crtc_id
);
5947 if (plane
== pcrtc
->primary
)
5948 update_freesync_state_on_stream(
5951 acrtc_state
->stream
,
5953 bundle
->flip_addrs
[planes_count
].flip_timestamp_in_us
);
5955 DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x\n",
5957 bundle
->flip_addrs
[planes_count
].address
.grph
.addr
.high_part
,
5958 bundle
->flip_addrs
[planes_count
].address
.grph
.addr
.low_part
);
5964 if (pflip_present
) {
5966 /* Use old throttling in non-vrr fixed refresh rate mode
5967 * to keep flip scheduling based on target vblank counts
5968 * working in a backwards compatible way, e.g., for
5969 * clients using the GLX_OML_sync_control extension or
5970 * DRI3/Present extension with defined target_msc.
5972 last_flip_vblank
= amdgpu_get_vblank_counter_kms(dm
->ddev
, acrtc_attach
->crtc_id
);
5975 /* For variable refresh rate mode only:
5976 * Get vblank of last completed flip to avoid > 1 vrr
5977 * flips per video frame by use of throttling, but allow
5978 * flip programming anywhere in the possibly large
5979 * variable vrr vblank interval for fine-grained flip
5980 * timing control and more opportunity to avoid stutter
5981 * on late submission of flips.
5983 spin_lock_irqsave(&pcrtc
->dev
->event_lock
, flags
);
5984 last_flip_vblank
= acrtc_attach
->last_flip_vblank
;
5985 spin_unlock_irqrestore(&pcrtc
->dev
->event_lock
, flags
);
5988 target_vblank
= last_flip_vblank
+ wait_for_vblank
;
5991 * Wait until we're out of the vertical blank period before the one
5992 * targeted by the flip
5994 while ((acrtc_attach
->enabled
&&
5995 (amdgpu_display_get_crtc_scanoutpos(dm
->ddev
, acrtc_attach
->crtc_id
,
5996 0, &vpos
, &hpos
, NULL
,
5997 NULL
, &pcrtc
->hwmode
)
5998 & (DRM_SCANOUTPOS_VALID
| DRM_SCANOUTPOS_IN_VBLANK
)) ==
5999 (DRM_SCANOUTPOS_VALID
| DRM_SCANOUTPOS_IN_VBLANK
) &&
6000 (int)(target_vblank
-
6001 amdgpu_get_vblank_counter_kms(dm
->ddev
, acrtc_attach
->crtc_id
)) > 0)) {
6002 usleep_range(1000, 1100);
6005 if (acrtc_attach
->base
.state
->event
) {
6006 drm_crtc_vblank_get(pcrtc
);
6008 spin_lock_irqsave(&pcrtc
->dev
->event_lock
, flags
);
6010 WARN_ON(acrtc_attach
->pflip_status
!= AMDGPU_FLIP_NONE
);
6011 prepare_flip_isr(acrtc_attach
);
6013 spin_unlock_irqrestore(&pcrtc
->dev
->event_lock
, flags
);
6016 if (acrtc_state
->stream
) {
6017 if (acrtc_state
->freesync_vrr_info_changed
)
6018 bundle
->stream_update
.vrr_infopacket
=
6019 &acrtc_state
->stream
->vrr_infopacket
;
6023 /* Update the planes if changed or disable if we don't have any. */
6024 if ((planes_count
|| acrtc_state
->active_planes
== 0) &&
6025 acrtc_state
->stream
) {
6026 bundle
->stream_update
.stream
= acrtc_state
->stream
;
6027 if (new_pcrtc_state
->mode_changed
) {
6028 bundle
->stream_update
.src
= acrtc_state
->stream
->src
;
6029 bundle
->stream_update
.dst
= acrtc_state
->stream
->dst
;
6032 if (new_pcrtc_state
->color_mgmt_changed
) {
6034 * TODO: This isn't fully correct since we've actually
6035 * already modified the stream in place.
6037 bundle
->stream_update
.gamut_remap
=
6038 &acrtc_state
->stream
->gamut_remap_matrix
;
6039 bundle
->stream_update
.output_csc_transform
=
6040 &acrtc_state
->stream
->csc_color_matrix
;
6041 bundle
->stream_update
.out_transfer_func
=
6042 acrtc_state
->stream
->out_transfer_func
;
6045 acrtc_state
->stream
->abm_level
= acrtc_state
->abm_level
;
6046 if (acrtc_state
->abm_level
!= dm_old_crtc_state
->abm_level
)
6047 bundle
->stream_update
.abm_level
= &acrtc_state
->abm_level
;
6050 * If FreeSync state on the stream has changed then we need to
6051 * re-adjust the min/max bounds now that DC doesn't handle this
6052 * as part of commit.
6054 if (amdgpu_dm_vrr_active(dm_old_crtc_state
) !=
6055 amdgpu_dm_vrr_active(acrtc_state
)) {
6056 spin_lock_irqsave(&pcrtc
->dev
->event_lock
, flags
);
6057 dc_stream_adjust_vmin_vmax(
6058 dm
->dc
, acrtc_state
->stream
,
6059 &acrtc_state
->vrr_params
.adjust
);
6060 spin_unlock_irqrestore(&pcrtc
->dev
->event_lock
, flags
);
6063 mutex_lock(&dm
->dc_lock
);
6064 dc_commit_updates_for_stream(dm
->dc
,
6065 bundle
->surface_updates
,
6067 acrtc_state
->stream
,
6068 &bundle
->stream_update
,
6070 mutex_unlock(&dm
->dc_lock
);
6074 * Update cursor state *after* programming all the planes.
6075 * This avoids redundant programming in the case where we're going
6076 * to be disabling a single plane - those pipes are being disabled.
6078 if (acrtc_state
->active_planes
)
6079 amdgpu_dm_commit_cursors(state
);
6085 static void amdgpu_dm_commit_audio(struct drm_device
*dev
,
6086 struct drm_atomic_state
*state
)
6088 struct amdgpu_device
*adev
= dev
->dev_private
;
6089 struct amdgpu_dm_connector
*aconnector
;
6090 struct drm_connector
*connector
;
6091 struct drm_connector_state
*old_con_state
, *new_con_state
;
6092 struct drm_crtc_state
*new_crtc_state
;
6093 struct dm_crtc_state
*new_dm_crtc_state
;
6094 const struct dc_stream_status
*status
;
6097 /* Notify device removals. */
6098 for_each_oldnew_connector_in_state(state
, connector
, old_con_state
, new_con_state
, i
) {
6099 if (old_con_state
->crtc
!= new_con_state
->crtc
) {
6100 /* CRTC changes require notification. */
6104 if (!new_con_state
->crtc
)
6107 new_crtc_state
= drm_atomic_get_new_crtc_state(
6108 state
, new_con_state
->crtc
);
6110 if (!new_crtc_state
)
6113 if (!drm_atomic_crtc_needs_modeset(new_crtc_state
))
6117 aconnector
= to_amdgpu_dm_connector(connector
);
6119 mutex_lock(&adev
->dm
.audio_lock
);
6120 inst
= aconnector
->audio_inst
;
6121 aconnector
->audio_inst
= -1;
6122 mutex_unlock(&adev
->dm
.audio_lock
);
6124 amdgpu_dm_audio_eld_notify(adev
, inst
);
6127 /* Notify audio device additions. */
6128 for_each_new_connector_in_state(state
, connector
, new_con_state
, i
) {
6129 if (!new_con_state
->crtc
)
6132 new_crtc_state
= drm_atomic_get_new_crtc_state(
6133 state
, new_con_state
->crtc
);
6135 if (!new_crtc_state
)
6138 if (!drm_atomic_crtc_needs_modeset(new_crtc_state
))
6141 new_dm_crtc_state
= to_dm_crtc_state(new_crtc_state
);
6142 if (!new_dm_crtc_state
->stream
)
6145 status
= dc_stream_get_status(new_dm_crtc_state
->stream
);
6149 aconnector
= to_amdgpu_dm_connector(connector
);
6151 mutex_lock(&adev
->dm
.audio_lock
);
6152 inst
= status
->audio_inst
;
6153 aconnector
->audio_inst
= inst
;
6154 mutex_unlock(&adev
->dm
.audio_lock
);
6156 amdgpu_dm_audio_eld_notify(adev
, inst
);
6161 * Enable interrupts on CRTCs that are newly active, undergone
6162 * a modeset, or have active planes again.
6164 * Done in two passes, based on the for_modeset flag:
6165 * Pass 1: For CRTCs going through modeset
6166 * Pass 2: For CRTCs going from 0 to n active planes
6168 * Interrupts can only be enabled after the planes are programmed,
6169 * so this requires a two-pass approach since we don't want to
6170 * just defer the interrupts until after commit planes every time.
6172 static void amdgpu_dm_enable_crtc_interrupts(struct drm_device
*dev
,
6173 struct drm_atomic_state
*state
,
6176 struct amdgpu_device
*adev
= dev
->dev_private
;
6177 struct drm_crtc
*crtc
;
6178 struct drm_crtc_state
*old_crtc_state
, *new_crtc_state
;
6180 #ifdef CONFIG_DEBUG_FS
6181 enum amdgpu_dm_pipe_crc_source source
;
6184 for_each_oldnew_crtc_in_state(state
, crtc
, old_crtc_state
,
6185 new_crtc_state
, i
) {
6186 struct amdgpu_crtc
*acrtc
= to_amdgpu_crtc(crtc
);
6187 struct dm_crtc_state
*dm_new_crtc_state
=
6188 to_dm_crtc_state(new_crtc_state
);
6189 struct dm_crtc_state
*dm_old_crtc_state
=
6190 to_dm_crtc_state(old_crtc_state
);
6191 bool modeset
= drm_atomic_crtc_needs_modeset(new_crtc_state
);
6194 run_pass
= (for_modeset
&& modeset
) ||
6195 (!for_modeset
&& !modeset
&&
6196 !dm_old_crtc_state
->interrupts_enabled
);
6201 if (!dm_new_crtc_state
->interrupts_enabled
)
6204 manage_dm_interrupts(adev
, acrtc
, true);
6206 #ifdef CONFIG_DEBUG_FS
6207 /* The stream has changed so CRC capture needs to re-enabled. */
6208 source
= dm_new_crtc_state
->crc_src
;
6209 if (amdgpu_dm_is_valid_crc_source(source
)) {
6210 amdgpu_dm_crtc_configure_crc_source(
6211 crtc
, dm_new_crtc_state
,
6212 dm_new_crtc_state
->crc_src
);
6219 * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
6220 * @crtc_state: the DRM CRTC state
6221 * @stream_state: the DC stream state.
6223 * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
6224 * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
6226 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state
*crtc_state
,
6227 struct dc_stream_state
*stream_state
)
6229 stream_state
->mode_changed
= drm_atomic_crtc_needs_modeset(crtc_state
);
6232 static int amdgpu_dm_atomic_commit(struct drm_device
*dev
,
6233 struct drm_atomic_state
*state
,
6236 struct drm_crtc
*crtc
;
6237 struct drm_crtc_state
*old_crtc_state
, *new_crtc_state
;
6238 struct amdgpu_device
*adev
= dev
->dev_private
;
6242 * We evade vblank and pflip interrupts on CRTCs that are undergoing
6243 * a modeset, being disabled, or have no active planes.
6245 * It's done in atomic commit rather than commit tail for now since
6246 * some of these interrupt handlers access the current CRTC state and
6247 * potentially the stream pointer itself.
6249 * Since the atomic state is swapped within atomic commit and not within
6250 * commit tail this would leave to new state (that hasn't been committed yet)
6251 * being accesssed from within the handlers.
6253 * TODO: Fix this so we can do this in commit tail and not have to block
6256 for_each_oldnew_crtc_in_state(state
, crtc
, old_crtc_state
, new_crtc_state
, i
) {
6257 struct dm_crtc_state
*dm_old_crtc_state
= to_dm_crtc_state(old_crtc_state
);
6258 struct dm_crtc_state
*dm_new_crtc_state
= to_dm_crtc_state(new_crtc_state
);
6259 struct amdgpu_crtc
*acrtc
= to_amdgpu_crtc(crtc
);
6261 if (dm_old_crtc_state
->interrupts_enabled
&&
6262 (!dm_new_crtc_state
->interrupts_enabled
||
6263 drm_atomic_crtc_needs_modeset(new_crtc_state
)))
6264 manage_dm_interrupts(adev
, acrtc
, false);
6267 * Add check here for SoC's that support hardware cursor plane, to
6268 * unset legacy_cursor_update
6271 return drm_atomic_helper_commit(dev
, state
, nonblock
);
6273 /*TODO Handle EINTR, reenable IRQ*/
6277 * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
6278 * @state: The atomic state to commit
6280 * This will tell DC to commit the constructed DC state from atomic_check,
6281 * programming the hardware. Any failures here implies a hardware failure, since
6282 * atomic check should have filtered anything non-kosher.
6284 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state
*state
)
6286 struct drm_device
*dev
= state
->dev
;
6287 struct amdgpu_device
*adev
= dev
->dev_private
;
6288 struct amdgpu_display_manager
*dm
= &adev
->dm
;
6289 struct dm_atomic_state
*dm_state
;
6290 struct dc_state
*dc_state
= NULL
, *dc_state_temp
= NULL
;
6292 struct drm_crtc
*crtc
;
6293 struct drm_crtc_state
*old_crtc_state
, *new_crtc_state
;
6294 unsigned long flags
;
6295 bool wait_for_vblank
= true;
6296 struct drm_connector
*connector
;
6297 struct drm_connector_state
*old_con_state
, *new_con_state
;
6298 struct dm_crtc_state
*dm_old_crtc_state
, *dm_new_crtc_state
;
6299 int crtc_disable_count
= 0;
6301 drm_atomic_helper_update_legacy_modeset_state(dev
, state
);
6303 dm_state
= dm_atomic_get_new_state(state
);
6304 if (dm_state
&& dm_state
->context
) {
6305 dc_state
= dm_state
->context
;
6307 /* No state changes, retain current state. */
6308 dc_state_temp
= dc_create_state(dm
->dc
);
6309 ASSERT(dc_state_temp
);
6310 dc_state
= dc_state_temp
;
6311 dc_resource_state_copy_construct_current(dm
->dc
, dc_state
);
6314 /* update changed items */
6315 for_each_oldnew_crtc_in_state(state
, crtc
, old_crtc_state
, new_crtc_state
, i
) {
6316 struct amdgpu_crtc
*acrtc
= to_amdgpu_crtc(crtc
);
6318 dm_new_crtc_state
= to_dm_crtc_state(new_crtc_state
);
6319 dm_old_crtc_state
= to_dm_crtc_state(old_crtc_state
);
6322 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
6323 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
6324 "connectors_changed:%d\n",
6326 new_crtc_state
->enable
,
6327 new_crtc_state
->active
,
6328 new_crtc_state
->planes_changed
,
6329 new_crtc_state
->mode_changed
,
6330 new_crtc_state
->active_changed
,
6331 new_crtc_state
->connectors_changed
);
6333 /* Copy all transient state flags into dc state */
6334 if (dm_new_crtc_state
->stream
) {
6335 amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state
->base
,
6336 dm_new_crtc_state
->stream
);
6339 /* handles headless hotplug case, updating new_state and
6340 * aconnector as needed
6343 if (modeset_required(new_crtc_state
, dm_new_crtc_state
->stream
, dm_old_crtc_state
->stream
)) {
6345 DRM_DEBUG_DRIVER("Atomic commit: SET crtc id %d: [%p]\n", acrtc
->crtc_id
, acrtc
);
6347 if (!dm_new_crtc_state
->stream
) {
6349 * this could happen because of issues with
6350 * userspace notifications delivery.
6351 * In this case userspace tries to set mode on
6352 * display which is disconnected in fact.
6353 * dc_sink is NULL in this case on aconnector.
6354 * We expect reset mode will come soon.
6356 * This can also happen when unplug is done
6357 * during resume sequence ended
6359 * In this case, we want to pretend we still
6360 * have a sink to keep the pipe running so that
6361 * hw state is consistent with the sw state
6363 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
6364 __func__
, acrtc
->base
.base
.id
);
6368 if (dm_old_crtc_state
->stream
)
6369 remove_stream(adev
, acrtc
, dm_old_crtc_state
->stream
);
6371 pm_runtime_get_noresume(dev
->dev
);
6373 acrtc
->enabled
= true;
6374 acrtc
->hw_mode
= new_crtc_state
->mode
;
6375 crtc
->hwmode
= new_crtc_state
->mode
;
6376 } else if (modereset_required(new_crtc_state
)) {
6377 DRM_DEBUG_DRIVER("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc
->crtc_id
, acrtc
);
6379 /* i.e. reset mode */
6380 if (dm_old_crtc_state
->stream
)
6381 remove_stream(adev
, acrtc
, dm_old_crtc_state
->stream
);
6383 } /* for_each_crtc_in_state() */
6386 dm_enable_per_frame_crtc_master_sync(dc_state
);
6387 mutex_lock(&dm
->dc_lock
);
6388 WARN_ON(!dc_commit_state(dm
->dc
, dc_state
));
6389 mutex_unlock(&dm
->dc_lock
);
6392 for_each_new_crtc_in_state(state
, crtc
, new_crtc_state
, i
) {
6393 struct amdgpu_crtc
*acrtc
= to_amdgpu_crtc(crtc
);
6395 dm_new_crtc_state
= to_dm_crtc_state(new_crtc_state
);
6397 if (dm_new_crtc_state
->stream
!= NULL
) {
6398 const struct dc_stream_status
*status
=
6399 dc_stream_get_status(dm_new_crtc_state
->stream
);
6402 status
= dc_stream_get_status_from_state(dc_state
,
6403 dm_new_crtc_state
->stream
);
6406 DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state
->stream
, acrtc
);
6408 acrtc
->otg_inst
= status
->primary_otg_inst
;
6411 #ifdef CONFIG_DRM_AMD_DC_HDCP
6412 for_each_oldnew_connector_in_state(state
, connector
, old_con_state
, new_con_state
, i
) {
6413 struct dm_connector_state
*dm_new_con_state
= to_dm_connector_state(new_con_state
);
6414 struct amdgpu_crtc
*acrtc
= to_amdgpu_crtc(dm_new_con_state
->base
.crtc
);
6415 struct amdgpu_dm_connector
*aconnector
= to_amdgpu_dm_connector(connector
);
6417 new_crtc_state
= NULL
;
6420 new_crtc_state
= drm_atomic_get_new_crtc_state(state
, &acrtc
->base
);
6422 dm_new_crtc_state
= to_dm_crtc_state(new_crtc_state
);
6424 if (dm_new_crtc_state
&& dm_new_crtc_state
->stream
== NULL
&&
6425 connector
->state
->content_protection
== DRM_MODE_CONTENT_PROTECTION_ENABLED
) {
6426 hdcp_reset_display(adev
->dm
.hdcp_workqueue
, aconnector
->dc_link
->link_index
);
6427 new_con_state
->content_protection
= DRM_MODE_CONTENT_PROTECTION_DESIRED
;
6431 if (is_content_protection_different(new_con_state
, old_con_state
, connector
, adev
->dm
.hdcp_workqueue
))
6432 update_content_protection(new_con_state
, connector
, adev
->dm
.hdcp_workqueue
);
6436 /* Handle connector state changes */
6437 for_each_oldnew_connector_in_state(state
, connector
, old_con_state
, new_con_state
, i
) {
6438 struct dm_connector_state
*dm_new_con_state
= to_dm_connector_state(new_con_state
);
6439 struct dm_connector_state
*dm_old_con_state
= to_dm_connector_state(old_con_state
);
6440 struct amdgpu_crtc
*acrtc
= to_amdgpu_crtc(dm_new_con_state
->base
.crtc
);
6441 struct dc_surface_update dummy_updates
[MAX_SURFACES
];
6442 struct dc_stream_update stream_update
;
6443 struct dc_info_packet hdr_packet
;
6444 struct dc_stream_status
*status
= NULL
;
6445 bool abm_changed
, hdr_changed
, scaling_changed
;
6447 memset(&dummy_updates
, 0, sizeof(dummy_updates
));
6448 memset(&stream_update
, 0, sizeof(stream_update
));
6451 new_crtc_state
= drm_atomic_get_new_crtc_state(state
, &acrtc
->base
);
6452 old_crtc_state
= drm_atomic_get_old_crtc_state(state
, &acrtc
->base
);
6455 /* Skip any modesets/resets */
6456 if (!acrtc
|| drm_atomic_crtc_needs_modeset(new_crtc_state
))
6459 dm_new_crtc_state
= to_dm_crtc_state(new_crtc_state
);
6460 dm_old_crtc_state
= to_dm_crtc_state(old_crtc_state
);
6462 scaling_changed
= is_scaling_state_different(dm_new_con_state
,
6465 abm_changed
= dm_new_crtc_state
->abm_level
!=
6466 dm_old_crtc_state
->abm_level
;
6469 is_hdr_metadata_different(old_con_state
, new_con_state
);
6471 if (!scaling_changed
&& !abm_changed
&& !hdr_changed
)
6474 stream_update
.stream
= dm_new_crtc_state
->stream
;
6475 if (scaling_changed
) {
6476 update_stream_scaling_settings(&dm_new_con_state
->base
.crtc
->mode
,
6477 dm_new_con_state
, dm_new_crtc_state
->stream
);
6479 stream_update
.src
= dm_new_crtc_state
->stream
->src
;
6480 stream_update
.dst
= dm_new_crtc_state
->stream
->dst
;
6484 dm_new_crtc_state
->stream
->abm_level
= dm_new_crtc_state
->abm_level
;
6486 stream_update
.abm_level
= &dm_new_crtc_state
->abm_level
;
6490 fill_hdr_info_packet(new_con_state
, &hdr_packet
);
6491 stream_update
.hdr_static_metadata
= &hdr_packet
;
6494 status
= dc_stream_get_status(dm_new_crtc_state
->stream
);
6496 WARN_ON(!status
->plane_count
);
6499 * TODO: DC refuses to perform stream updates without a dc_surface_update.
6500 * Here we create an empty update on each plane.
6501 * To fix this, DC should permit updating only stream properties.
6503 for (j
= 0; j
< status
->plane_count
; j
++)
6504 dummy_updates
[j
].surface
= status
->plane_states
[0];
6507 mutex_lock(&dm
->dc_lock
);
6508 dc_commit_updates_for_stream(dm
->dc
,
6510 status
->plane_count
,
6511 dm_new_crtc_state
->stream
,
6514 mutex_unlock(&dm
->dc_lock
);
6517 /* Count number of newly disabled CRTCs for dropping PM refs later. */
6518 for_each_oldnew_crtc_in_state(state
, crtc
, old_crtc_state
,
6519 new_crtc_state
, i
) {
6520 if (old_crtc_state
->active
&& !new_crtc_state
->active
)
6521 crtc_disable_count
++;
6523 dm_new_crtc_state
= to_dm_crtc_state(new_crtc_state
);
6524 dm_old_crtc_state
= to_dm_crtc_state(old_crtc_state
);
6526 /* Update freesync active state. */
6527 pre_update_freesync_state_on_stream(dm
, dm_new_crtc_state
);
6529 /* Handle vrr on->off / off->on transitions */
6530 amdgpu_dm_handle_vrr_transition(dm_old_crtc_state
,
6534 /* Enable interrupts for CRTCs going through a modeset. */
6535 amdgpu_dm_enable_crtc_interrupts(dev
, state
, true);
6537 for_each_new_crtc_in_state(state
, crtc
, new_crtc_state
, j
)
6538 if (new_crtc_state
->async_flip
)
6539 wait_for_vblank
= false;
6541 /* update planes when needed per crtc*/
6542 for_each_new_crtc_in_state(state
, crtc
, new_crtc_state
, j
) {
6543 dm_new_crtc_state
= to_dm_crtc_state(new_crtc_state
);
6545 if (dm_new_crtc_state
->stream
)
6546 amdgpu_dm_commit_planes(state
, dc_state
, dev
,
6547 dm
, crtc
, wait_for_vblank
);
6550 /* Enable interrupts for CRTCs going from 0 to n active planes. */
6551 amdgpu_dm_enable_crtc_interrupts(dev
, state
, false);
6553 /* Update audio instances for each connector. */
6554 amdgpu_dm_commit_audio(dev
, state
);
6557 * send vblank event on all events not handled in flip and
6558 * mark consumed event for drm_atomic_helper_commit_hw_done
6560 spin_lock_irqsave(&adev
->ddev
->event_lock
, flags
);
6561 for_each_new_crtc_in_state(state
, crtc
, new_crtc_state
, i
) {
6563 if (new_crtc_state
->event
)
6564 drm_send_event_locked(dev
, &new_crtc_state
->event
->base
);
6566 new_crtc_state
->event
= NULL
;
6568 spin_unlock_irqrestore(&adev
->ddev
->event_lock
, flags
);
6570 /* Signal HW programming completion */
6571 drm_atomic_helper_commit_hw_done(state
);
6573 if (wait_for_vblank
)
6574 drm_atomic_helper_wait_for_flip_done(dev
, state
);
6576 drm_atomic_helper_cleanup_planes(dev
, state
);
6579 * Finally, drop a runtime PM reference for each newly disabled CRTC,
6580 * so we can put the GPU into runtime suspend if we're not driving any
6583 for (i
= 0; i
< crtc_disable_count
; i
++)
6584 pm_runtime_put_autosuspend(dev
->dev
);
6585 pm_runtime_mark_last_busy(dev
->dev
);
6588 dc_release_state(dc_state_temp
);
6592 static int dm_force_atomic_commit(struct drm_connector
*connector
)
6595 struct drm_device
*ddev
= connector
->dev
;
6596 struct drm_atomic_state
*state
= drm_atomic_state_alloc(ddev
);
6597 struct amdgpu_crtc
*disconnected_acrtc
= to_amdgpu_crtc(connector
->encoder
->crtc
);
6598 struct drm_plane
*plane
= disconnected_acrtc
->base
.primary
;
6599 struct drm_connector_state
*conn_state
;
6600 struct drm_crtc_state
*crtc_state
;
6601 struct drm_plane_state
*plane_state
;
6606 state
->acquire_ctx
= ddev
->mode_config
.acquire_ctx
;
6608 /* Construct an atomic state to restore previous display setting */
6611 * Attach connectors to drm_atomic_state
6613 conn_state
= drm_atomic_get_connector_state(state
, connector
);
6615 ret
= PTR_ERR_OR_ZERO(conn_state
);
6619 /* Attach crtc to drm_atomic_state*/
6620 crtc_state
= drm_atomic_get_crtc_state(state
, &disconnected_acrtc
->base
);
6622 ret
= PTR_ERR_OR_ZERO(crtc_state
);
6626 /* force a restore */
6627 crtc_state
->mode_changed
= true;
6629 /* Attach plane to drm_atomic_state */
6630 plane_state
= drm_atomic_get_plane_state(state
, plane
);
6632 ret
= PTR_ERR_OR_ZERO(plane_state
);
6637 /* Call commit internally with the state we just constructed */
6638 ret
= drm_atomic_commit(state
);
6643 DRM_ERROR("Restoring old state failed with %i\n", ret
);
6644 drm_atomic_state_put(state
);
6650 * This function handles all cases when set mode does not come upon hotplug.
6651 * This includes when a display is unplugged then plugged back into the
6652 * same port and when running without usermode desktop manager supprot
6654 void dm_restore_drm_connector_state(struct drm_device
*dev
,
6655 struct drm_connector
*connector
)
6657 struct amdgpu_dm_connector
*aconnector
= to_amdgpu_dm_connector(connector
);
6658 struct amdgpu_crtc
*disconnected_acrtc
;
6659 struct dm_crtc_state
*acrtc_state
;
6661 if (!aconnector
->dc_sink
|| !connector
->state
|| !connector
->encoder
)
6664 disconnected_acrtc
= to_amdgpu_crtc(connector
->encoder
->crtc
);
6665 if (!disconnected_acrtc
)
6668 acrtc_state
= to_dm_crtc_state(disconnected_acrtc
->base
.state
);
6669 if (!acrtc_state
->stream
)
6673 * If the previous sink is not released and different from the current,
6674 * we deduce we are in a state where we can not rely on usermode call
6675 * to turn on the display, so we do it here
6677 if (acrtc_state
->stream
->sink
!= aconnector
->dc_sink
)
6678 dm_force_atomic_commit(&aconnector
->base
);
6682 * Grabs all modesetting locks to serialize against any blocking commits,
6683 * Waits for completion of all non blocking commits.
6685 static int do_aquire_global_lock(struct drm_device
*dev
,
6686 struct drm_atomic_state
*state
)
6688 struct drm_crtc
*crtc
;
6689 struct drm_crtc_commit
*commit
;
6693 * Adding all modeset locks to aquire_ctx will
6694 * ensure that when the framework release it the
6695 * extra locks we are locking here will get released to
6697 ret
= drm_modeset_lock_all_ctx(dev
, state
->acquire_ctx
);
6701 list_for_each_entry(crtc
, &dev
->mode_config
.crtc_list
, head
) {
6702 spin_lock(&crtc
->commit_lock
);
6703 commit
= list_first_entry_or_null(&crtc
->commit_list
,
6704 struct drm_crtc_commit
, commit_entry
);
6706 drm_crtc_commit_get(commit
);
6707 spin_unlock(&crtc
->commit_lock
);
6713 * Make sure all pending HW programming completed and
6716 ret
= wait_for_completion_interruptible_timeout(&commit
->hw_done
, 10*HZ
);
6719 ret
= wait_for_completion_interruptible_timeout(
6720 &commit
->flip_done
, 10*HZ
);
6723 DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
6724 "timed out\n", crtc
->base
.id
, crtc
->name
);
6726 drm_crtc_commit_put(commit
);
6729 return ret
< 0 ? ret
: 0;
6732 static void get_freesync_config_for_crtc(
6733 struct dm_crtc_state
*new_crtc_state
,
6734 struct dm_connector_state
*new_con_state
)
6736 struct mod_freesync_config config
= {0};
6737 struct amdgpu_dm_connector
*aconnector
=
6738 to_amdgpu_dm_connector(new_con_state
->base
.connector
);
6739 struct drm_display_mode
*mode
= &new_crtc_state
->base
.mode
;
6740 int vrefresh
= drm_mode_vrefresh(mode
);
6742 new_crtc_state
->vrr_supported
= new_con_state
->freesync_capable
&&
6743 vrefresh
>= aconnector
->min_vfreq
&&
6744 vrefresh
<= aconnector
->max_vfreq
;
6746 if (new_crtc_state
->vrr_supported
) {
6747 new_crtc_state
->stream
->ignore_msa_timing_param
= true;
6748 config
.state
= new_crtc_state
->base
.vrr_enabled
?
6749 VRR_STATE_ACTIVE_VARIABLE
:
6751 config
.min_refresh_in_uhz
=
6752 aconnector
->min_vfreq
* 1000000;
6753 config
.max_refresh_in_uhz
=
6754 aconnector
->max_vfreq
* 1000000;
6755 config
.vsif_supported
= true;
6759 new_crtc_state
->freesync_config
= config
;
6762 static void reset_freesync_config_for_crtc(
6763 struct dm_crtc_state
*new_crtc_state
)
6765 new_crtc_state
->vrr_supported
= false;
6767 memset(&new_crtc_state
->vrr_params
, 0,
6768 sizeof(new_crtc_state
->vrr_params
));
6769 memset(&new_crtc_state
->vrr_infopacket
, 0,
6770 sizeof(new_crtc_state
->vrr_infopacket
));
6773 static int dm_update_crtc_state(struct amdgpu_display_manager
*dm
,
6774 struct drm_atomic_state
*state
,
6775 struct drm_crtc
*crtc
,
6776 struct drm_crtc_state
*old_crtc_state
,
6777 struct drm_crtc_state
*new_crtc_state
,
6779 bool *lock_and_validation_needed
)
6781 struct dm_atomic_state
*dm_state
= NULL
;
6782 struct dm_crtc_state
*dm_old_crtc_state
, *dm_new_crtc_state
;
6783 struct dc_stream_state
*new_stream
;
6787 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
6788 * update changed items
6790 struct amdgpu_crtc
*acrtc
= NULL
;
6791 struct amdgpu_dm_connector
*aconnector
= NULL
;
6792 struct drm_connector_state
*drm_new_conn_state
= NULL
, *drm_old_conn_state
= NULL
;
6793 struct dm_connector_state
*dm_new_conn_state
= NULL
, *dm_old_conn_state
= NULL
;
6797 dm_old_crtc_state
= to_dm_crtc_state(old_crtc_state
);
6798 dm_new_crtc_state
= to_dm_crtc_state(new_crtc_state
);
6799 acrtc
= to_amdgpu_crtc(crtc
);
6800 aconnector
= amdgpu_dm_find_first_crtc_matching_connector(state
, crtc
);
6802 /* TODO This hack should go away */
6803 if (aconnector
&& enable
) {
6804 /* Make sure fake sink is created in plug-in scenario */
6805 drm_new_conn_state
= drm_atomic_get_new_connector_state(state
,
6807 drm_old_conn_state
= drm_atomic_get_old_connector_state(state
,
6810 if (IS_ERR(drm_new_conn_state
)) {
6811 ret
= PTR_ERR_OR_ZERO(drm_new_conn_state
);
6815 dm_new_conn_state
= to_dm_connector_state(drm_new_conn_state
);
6816 dm_old_conn_state
= to_dm_connector_state(drm_old_conn_state
);
6818 if (!drm_atomic_crtc_needs_modeset(new_crtc_state
))
6821 new_stream
= create_stream_for_sink(aconnector
,
6822 &new_crtc_state
->mode
,
6824 dm_old_crtc_state
->stream
);
6827 * we can have no stream on ACTION_SET if a display
6828 * was disconnected during S3, in this case it is not an
6829 * error, the OS will be updated after detection, and
6830 * will do the right thing on next atomic commit
6834 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
6835 __func__
, acrtc
->base
.base
.id
);
6840 dm_new_crtc_state
->abm_level
= dm_new_conn_state
->abm_level
;
6842 ret
= fill_hdr_info_packet(drm_new_conn_state
,
6843 &new_stream
->hdr_static_metadata
);
6848 * If we already removed the old stream from the context
6849 * (and set the new stream to NULL) then we can't reuse
6850 * the old stream even if the stream and scaling are unchanged.
6851 * We'll hit the BUG_ON and black screen.
6853 * TODO: Refactor this function to allow this check to work
6854 * in all conditions.
6856 if (dm_new_crtc_state
->stream
&&
6857 dc_is_stream_unchanged(new_stream
, dm_old_crtc_state
->stream
) &&
6858 dc_is_stream_scaling_unchanged(new_stream
, dm_old_crtc_state
->stream
)) {
6859 new_crtc_state
->mode_changed
= false;
6860 DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
6861 new_crtc_state
->mode_changed
);
6865 /* mode_changed flag may get updated above, need to check again */
6866 if (!drm_atomic_crtc_needs_modeset(new_crtc_state
))
6870 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
6871 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
6872 "connectors_changed:%d\n",
6874 new_crtc_state
->enable
,
6875 new_crtc_state
->active
,
6876 new_crtc_state
->planes_changed
,
6877 new_crtc_state
->mode_changed
,
6878 new_crtc_state
->active_changed
,
6879 new_crtc_state
->connectors_changed
);
6881 /* Remove stream for any changed/disabled CRTC */
6884 if (!dm_old_crtc_state
->stream
)
6887 ret
= dm_atomic_get_state(state
, &dm_state
);
6891 DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
6894 /* i.e. reset mode */
6895 if (dc_remove_stream_from_ctx(
6898 dm_old_crtc_state
->stream
) != DC_OK
) {
6903 dc_stream_release(dm_old_crtc_state
->stream
);
6904 dm_new_crtc_state
->stream
= NULL
;
6906 reset_freesync_config_for_crtc(dm_new_crtc_state
);
6908 *lock_and_validation_needed
= true;
6910 } else {/* Add stream for any updated/enabled CRTC */
6912 * Quick fix to prevent NULL pointer on new_stream when
6913 * added MST connectors not found in existing crtc_state in the chained mode
6914 * TODO: need to dig out the root cause of that
6916 if (!aconnector
|| (!aconnector
->dc_sink
&& aconnector
->mst_port
))
6919 if (modereset_required(new_crtc_state
))
6922 if (modeset_required(new_crtc_state
, new_stream
,
6923 dm_old_crtc_state
->stream
)) {
6925 WARN_ON(dm_new_crtc_state
->stream
);
6927 ret
= dm_atomic_get_state(state
, &dm_state
);
6931 dm_new_crtc_state
->stream
= new_stream
;
6933 dc_stream_retain(new_stream
);
6935 DRM_DEBUG_DRIVER("Enabling DRM crtc: %d\n",
6938 if (dc_add_stream_to_ctx(
6941 dm_new_crtc_state
->stream
) != DC_OK
) {
6946 *lock_and_validation_needed
= true;
6951 /* Release extra reference */
6953 dc_stream_release(new_stream
);
6956 * We want to do dc stream updates that do not require a
6957 * full modeset below.
6959 if (!(enable
&& aconnector
&& new_crtc_state
->enable
&&
6960 new_crtc_state
->active
))
6963 * Given above conditions, the dc state cannot be NULL because:
6964 * 1. We're in the process of enabling CRTCs (just been added
6965 * to the dc context, or already is on the context)
6966 * 2. Has a valid connector attached, and
6967 * 3. Is currently active and enabled.
6968 * => The dc stream state currently exists.
6970 BUG_ON(dm_new_crtc_state
->stream
== NULL
);
6972 /* Scaling or underscan settings */
6973 if (is_scaling_state_different(dm_old_conn_state
, dm_new_conn_state
))
6974 update_stream_scaling_settings(
6975 &new_crtc_state
->mode
, dm_new_conn_state
, dm_new_crtc_state
->stream
);
6978 dm_new_crtc_state
->abm_level
= dm_new_conn_state
->abm_level
;
6981 * Color management settings. We also update color properties
6982 * when a modeset is needed, to ensure it gets reprogrammed.
6984 if (dm_new_crtc_state
->base
.color_mgmt_changed
||
6985 drm_atomic_crtc_needs_modeset(new_crtc_state
)) {
6986 ret
= amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state
);
6991 /* Update Freesync settings. */
6992 get_freesync_config_for_crtc(dm_new_crtc_state
,
6999 dc_stream_release(new_stream
);
7003 static bool should_reset_plane(struct drm_atomic_state
*state
,
7004 struct drm_plane
*plane
,
7005 struct drm_plane_state
*old_plane_state
,
7006 struct drm_plane_state
*new_plane_state
)
7008 struct drm_plane
*other
;
7009 struct drm_plane_state
*old_other_state
, *new_other_state
;
7010 struct drm_crtc_state
*new_crtc_state
;
7014 * TODO: Remove this hack once the checks below are sufficient
7015 * enough to determine when we need to reset all the planes on
7018 if (state
->allow_modeset
)
7021 /* Exit early if we know that we're adding or removing the plane. */
7022 if (old_plane_state
->crtc
!= new_plane_state
->crtc
)
7025 /* old crtc == new_crtc == NULL, plane not in context. */
7026 if (!new_plane_state
->crtc
)
7030 drm_atomic_get_new_crtc_state(state
, new_plane_state
->crtc
);
7032 if (!new_crtc_state
)
7035 /* CRTC Degamma changes currently require us to recreate planes. */
7036 if (new_crtc_state
->color_mgmt_changed
)
7039 if (drm_atomic_crtc_needs_modeset(new_crtc_state
))
7043 * If there are any new primary or overlay planes being added or
7044 * removed then the z-order can potentially change. To ensure
7045 * correct z-order and pipe acquisition the current DC architecture
7046 * requires us to remove and recreate all existing planes.
7048 * TODO: Come up with a more elegant solution for this.
7050 for_each_oldnew_plane_in_state(state
, other
, old_other_state
, new_other_state
, i
) {
7051 if (other
->type
== DRM_PLANE_TYPE_CURSOR
)
7054 if (old_other_state
->crtc
!= new_plane_state
->crtc
&&
7055 new_other_state
->crtc
!= new_plane_state
->crtc
)
7058 if (old_other_state
->crtc
!= new_other_state
->crtc
)
7061 /* TODO: Remove this once we can handle fast format changes. */
7062 if (old_other_state
->fb
&& new_other_state
->fb
&&
7063 old_other_state
->fb
->format
!= new_other_state
->fb
->format
)
7070 static int dm_update_plane_state(struct dc
*dc
,
7071 struct drm_atomic_state
*state
,
7072 struct drm_plane
*plane
,
7073 struct drm_plane_state
*old_plane_state
,
7074 struct drm_plane_state
*new_plane_state
,
7076 bool *lock_and_validation_needed
)
7079 struct dm_atomic_state
*dm_state
= NULL
;
7080 struct drm_crtc
*new_plane_crtc
, *old_plane_crtc
;
7081 struct drm_crtc_state
*old_crtc_state
, *new_crtc_state
;
7082 struct dm_crtc_state
*dm_new_crtc_state
, *dm_old_crtc_state
;
7083 struct dm_plane_state
*dm_new_plane_state
, *dm_old_plane_state
;
7088 new_plane_crtc
= new_plane_state
->crtc
;
7089 old_plane_crtc
= old_plane_state
->crtc
;
7090 dm_new_plane_state
= to_dm_plane_state(new_plane_state
);
7091 dm_old_plane_state
= to_dm_plane_state(old_plane_state
);
7093 /*TODO Implement atomic check for cursor plane */
7094 if (plane
->type
== DRM_PLANE_TYPE_CURSOR
)
7097 needs_reset
= should_reset_plane(state
, plane
, old_plane_state
,
7100 /* Remove any changed/removed planes */
7105 if (!old_plane_crtc
)
7108 old_crtc_state
= drm_atomic_get_old_crtc_state(
7109 state
, old_plane_crtc
);
7110 dm_old_crtc_state
= to_dm_crtc_state(old_crtc_state
);
7112 if (!dm_old_crtc_state
->stream
)
7115 DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
7116 plane
->base
.id
, old_plane_crtc
->base
.id
);
7118 ret
= dm_atomic_get_state(state
, &dm_state
);
7122 if (!dc_remove_plane_from_context(
7124 dm_old_crtc_state
->stream
,
7125 dm_old_plane_state
->dc_state
,
7126 dm_state
->context
)) {
7133 dc_plane_state_release(dm_old_plane_state
->dc_state
);
7134 dm_new_plane_state
->dc_state
= NULL
;
7136 *lock_and_validation_needed
= true;
7138 } else { /* Add new planes */
7139 struct dc_plane_state
*dc_new_plane_state
;
7141 if (drm_atomic_plane_disabling(plane
->state
, new_plane_state
))
7144 if (!new_plane_crtc
)
7147 new_crtc_state
= drm_atomic_get_new_crtc_state(state
, new_plane_crtc
);
7148 dm_new_crtc_state
= to_dm_crtc_state(new_crtc_state
);
7150 if (!dm_new_crtc_state
->stream
)
7156 WARN_ON(dm_new_plane_state
->dc_state
);
7158 dc_new_plane_state
= dc_create_plane_state(dc
);
7159 if (!dc_new_plane_state
)
7162 DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n",
7163 plane
->base
.id
, new_plane_crtc
->base
.id
);
7165 ret
= fill_dc_plane_attributes(
7166 new_plane_crtc
->dev
->dev_private
,
7171 dc_plane_state_release(dc_new_plane_state
);
7175 ret
= dm_atomic_get_state(state
, &dm_state
);
7177 dc_plane_state_release(dc_new_plane_state
);
7182 * Any atomic check errors that occur after this will
7183 * not need a release. The plane state will be attached
7184 * to the stream, and therefore part of the atomic
7185 * state. It'll be released when the atomic state is
7188 if (!dc_add_plane_to_context(
7190 dm_new_crtc_state
->stream
,
7192 dm_state
->context
)) {
7194 dc_plane_state_release(dc_new_plane_state
);
7198 dm_new_plane_state
->dc_state
= dc_new_plane_state
;
7200 /* Tell DC to do a full surface update every time there
7201 * is a plane change. Inefficient, but works for now.
7203 dm_new_plane_state
->dc_state
->update_flags
.bits
.full_update
= 1;
7205 *lock_and_validation_needed
= true;
7213 dm_determine_update_type_for_commit(struct amdgpu_display_manager
*dm
,
7214 struct drm_atomic_state
*state
,
7215 enum surface_update_type
*out_type
)
7217 struct dc
*dc
= dm
->dc
;
7218 struct dm_atomic_state
*dm_state
= NULL
, *old_dm_state
= NULL
;
7219 int i
, j
, num_plane
, ret
= 0;
7220 struct drm_plane_state
*old_plane_state
, *new_plane_state
;
7221 struct dm_plane_state
*new_dm_plane_state
, *old_dm_plane_state
;
7222 struct drm_crtc
*new_plane_crtc
, *old_plane_crtc
;
7223 struct drm_plane
*plane
;
7225 struct drm_crtc
*crtc
;
7226 struct drm_crtc_state
*new_crtc_state
, *old_crtc_state
;
7227 struct dm_crtc_state
*new_dm_crtc_state
, *old_dm_crtc_state
;
7228 struct dc_stream_status
*status
= NULL
;
7230 struct dc_surface_update
*updates
;
7231 enum surface_update_type update_type
= UPDATE_TYPE_FAST
;
7233 updates
= kcalloc(MAX_SURFACES
, sizeof(*updates
), GFP_KERNEL
);
7236 DRM_ERROR("Failed to allocate plane updates\n");
7237 /* Set type to FULL to avoid crashing in DC*/
7238 update_type
= UPDATE_TYPE_FULL
;
7242 for_each_oldnew_crtc_in_state(state
, crtc
, old_crtc_state
, new_crtc_state
, i
) {
7243 struct dc_scaling_info scaling_info
;
7244 struct dc_stream_update stream_update
;
7246 memset(&stream_update
, 0, sizeof(stream_update
));
7248 new_dm_crtc_state
= to_dm_crtc_state(new_crtc_state
);
7249 old_dm_crtc_state
= to_dm_crtc_state(old_crtc_state
);
7252 if (new_dm_crtc_state
->stream
!= old_dm_crtc_state
->stream
) {
7253 update_type
= UPDATE_TYPE_FULL
;
7257 if (!new_dm_crtc_state
->stream
)
7260 for_each_oldnew_plane_in_state(state
, plane
, old_plane_state
, new_plane_state
, j
) {
7261 const struct amdgpu_framebuffer
*amdgpu_fb
=
7262 to_amdgpu_framebuffer(new_plane_state
->fb
);
7263 struct dc_plane_info plane_info
;
7264 struct dc_flip_addrs flip_addr
;
7265 uint64_t tiling_flags
;
7267 new_plane_crtc
= new_plane_state
->crtc
;
7268 old_plane_crtc
= old_plane_state
->crtc
;
7269 new_dm_plane_state
= to_dm_plane_state(new_plane_state
);
7270 old_dm_plane_state
= to_dm_plane_state(old_plane_state
);
7272 if (plane
->type
== DRM_PLANE_TYPE_CURSOR
)
7275 if (new_dm_plane_state
->dc_state
!= old_dm_plane_state
->dc_state
) {
7276 update_type
= UPDATE_TYPE_FULL
;
7280 if (crtc
!= new_plane_crtc
)
7283 updates
[num_plane
].surface
= new_dm_plane_state
->dc_state
;
7285 if (new_crtc_state
->mode_changed
) {
7286 stream_update
.dst
= new_dm_crtc_state
->stream
->dst
;
7287 stream_update
.src
= new_dm_crtc_state
->stream
->src
;
7290 if (new_crtc_state
->color_mgmt_changed
) {
7291 updates
[num_plane
].gamma
=
7292 new_dm_plane_state
->dc_state
->gamma_correction
;
7293 updates
[num_plane
].in_transfer_func
=
7294 new_dm_plane_state
->dc_state
->in_transfer_func
;
7295 stream_update
.gamut_remap
=
7296 &new_dm_crtc_state
->stream
->gamut_remap_matrix
;
7297 stream_update
.output_csc_transform
=
7298 &new_dm_crtc_state
->stream
->csc_color_matrix
;
7299 stream_update
.out_transfer_func
=
7300 new_dm_crtc_state
->stream
->out_transfer_func
;
7303 ret
= fill_dc_scaling_info(new_plane_state
,
7308 updates
[num_plane
].scaling_info
= &scaling_info
;
7311 ret
= get_fb_info(amdgpu_fb
, &tiling_flags
);
7315 memset(&flip_addr
, 0, sizeof(flip_addr
));
7317 ret
= fill_dc_plane_info_and_addr(
7318 dm
->adev
, new_plane_state
, tiling_flags
,
7320 &flip_addr
.address
);
7324 updates
[num_plane
].plane_info
= &plane_info
;
7325 updates
[num_plane
].flip_addr
= &flip_addr
;
7334 ret
= dm_atomic_get_state(state
, &dm_state
);
7338 old_dm_state
= dm_atomic_get_old_state(state
);
7339 if (!old_dm_state
) {
7344 status
= dc_stream_get_status_from_state(old_dm_state
->context
,
7345 new_dm_crtc_state
->stream
);
7346 stream_update
.stream
= new_dm_crtc_state
->stream
;
7348 * TODO: DC modifies the surface during this call so we need
7349 * to lock here - find a way to do this without locking.
7351 mutex_lock(&dm
->dc_lock
);
7352 update_type
= dc_check_update_surfaces_for_stream(dc
, updates
, num_plane
,
7353 &stream_update
, status
);
7354 mutex_unlock(&dm
->dc_lock
);
7356 if (update_type
> UPDATE_TYPE_MED
) {
7357 update_type
= UPDATE_TYPE_FULL
;
7365 *out_type
= update_type
;
7370 * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
7371 * @dev: The DRM device
7372 * @state: The atomic state to commit
7374 * Validate that the given atomic state is programmable by DC into hardware.
7375 * This involves constructing a &struct dc_state reflecting the new hardware
7376 * state we wish to commit, then querying DC to see if it is programmable. It's
7377 * important not to modify the existing DC state. Otherwise, atomic_check
7378 * may unexpectedly commit hardware changes.
7380 * When validating the DC state, it's important that the right locks are
7381 * acquired. For full updates case which removes/adds/updates streams on one
7382 * CRTC while flipping on another CRTC, acquiring global lock will guarantee
7383 * that any such full update commit will wait for completion of any outstanding
7384 * flip using DRMs synchronization events. See
7385 * dm_determine_update_type_for_commit()
7387 * Note that DM adds the affected connectors for all CRTCs in state, when that
7388 * might not seem necessary. This is because DC stream creation requires the
7389 * DC sink, which is tied to the DRM connector state. Cleaning this up should
7390 * be possible but non-trivial - a possible TODO item.
7392 * Return: -Error code if validation failed.
7394 static int amdgpu_dm_atomic_check(struct drm_device
*dev
,
7395 struct drm_atomic_state
*state
)
7397 struct amdgpu_device
*adev
= dev
->dev_private
;
7398 struct dm_atomic_state
*dm_state
= NULL
;
7399 struct dc
*dc
= adev
->dm
.dc
;
7400 struct drm_connector
*connector
;
7401 struct drm_connector_state
*old_con_state
, *new_con_state
;
7402 struct drm_crtc
*crtc
;
7403 struct drm_crtc_state
*old_crtc_state
, *new_crtc_state
;
7404 struct drm_plane
*plane
;
7405 struct drm_plane_state
*old_plane_state
, *new_plane_state
;
7406 enum surface_update_type update_type
= UPDATE_TYPE_FAST
;
7407 enum surface_update_type overall_update_type
= UPDATE_TYPE_FAST
;
7412 * This bool will be set for true for any modeset/reset
7413 * or plane update which implies non fast surface update.
7415 bool lock_and_validation_needed
= false;
7417 ret
= drm_atomic_helper_check_modeset(dev
, state
);
7421 for_each_oldnew_crtc_in_state(state
, crtc
, old_crtc_state
, new_crtc_state
, i
) {
7422 if (!drm_atomic_crtc_needs_modeset(new_crtc_state
) &&
7423 !new_crtc_state
->color_mgmt_changed
&&
7424 old_crtc_state
->vrr_enabled
== new_crtc_state
->vrr_enabled
)
7427 if (!new_crtc_state
->enable
)
7430 ret
= drm_atomic_add_affected_connectors(state
, crtc
);
7434 ret
= drm_atomic_add_affected_planes(state
, crtc
);
7440 * Add all primary and overlay planes on the CRTC to the state
7441 * whenever a plane is enabled to maintain correct z-ordering
7442 * and to enable fast surface updates.
7444 drm_for_each_crtc(crtc
, dev
) {
7445 bool modified
= false;
7447 for_each_oldnew_plane_in_state(state
, plane
, old_plane_state
, new_plane_state
, i
) {
7448 if (plane
->type
== DRM_PLANE_TYPE_CURSOR
)
7451 if (new_plane_state
->crtc
== crtc
||
7452 old_plane_state
->crtc
== crtc
) {
7461 drm_for_each_plane_mask(plane
, state
->dev
, crtc
->state
->plane_mask
) {
7462 if (plane
->type
== DRM_PLANE_TYPE_CURSOR
)
7466 drm_atomic_get_plane_state(state
, plane
);
7468 if (IS_ERR(new_plane_state
)) {
7469 ret
= PTR_ERR(new_plane_state
);
7475 /* Remove exiting planes if they are modified */
7476 for_each_oldnew_plane_in_state_reverse(state
, plane
, old_plane_state
, new_plane_state
, i
) {
7477 ret
= dm_update_plane_state(dc
, state
, plane
,
7481 &lock_and_validation_needed
);
7486 /* Disable all crtcs which require disable */
7487 for_each_oldnew_crtc_in_state(state
, crtc
, old_crtc_state
, new_crtc_state
, i
) {
7488 ret
= dm_update_crtc_state(&adev
->dm
, state
, crtc
,
7492 &lock_and_validation_needed
);
7497 /* Enable all crtcs which require enable */
7498 for_each_oldnew_crtc_in_state(state
, crtc
, old_crtc_state
, new_crtc_state
, i
) {
7499 ret
= dm_update_crtc_state(&adev
->dm
, state
, crtc
,
7503 &lock_and_validation_needed
);
7508 /* Add new/modified planes */
7509 for_each_oldnew_plane_in_state_reverse(state
, plane
, old_plane_state
, new_plane_state
, i
) {
7510 ret
= dm_update_plane_state(dc
, state
, plane
,
7514 &lock_and_validation_needed
);
7519 /* Run this here since we want to validate the streams we created */
7520 ret
= drm_atomic_helper_check_planes(dev
, state
);
7524 if (state
->legacy_cursor_update
) {
7526 * This is a fast cursor update coming from the plane update
7527 * helper, check if it can be done asynchronously for better
7530 state
->async_update
=
7531 !drm_atomic_helper_async_check(dev
, state
);
7534 * Skip the remaining global validation if this is an async
7535 * update. Cursor updates can be done without affecting
7536 * state or bandwidth calcs and this avoids the performance
7537 * penalty of locking the private state object and
7538 * allocating a new dc_state.
7540 if (state
->async_update
)
7544 /* Check scaling and underscan changes*/
7545 /* TODO Removed scaling changes validation due to inability to commit
7546 * new stream into context w\o causing full reset. Need to
7547 * decide how to handle.
7549 for_each_oldnew_connector_in_state(state
, connector
, old_con_state
, new_con_state
, i
) {
7550 struct dm_connector_state
*dm_old_con_state
= to_dm_connector_state(old_con_state
);
7551 struct dm_connector_state
*dm_new_con_state
= to_dm_connector_state(new_con_state
);
7552 struct amdgpu_crtc
*acrtc
= to_amdgpu_crtc(dm_new_con_state
->base
.crtc
);
7554 /* Skip any modesets/resets */
7555 if (!acrtc
|| drm_atomic_crtc_needs_modeset(
7556 drm_atomic_get_new_crtc_state(state
, &acrtc
->base
)))
7559 /* Skip any thing not scale or underscan changes */
7560 if (!is_scaling_state_different(dm_new_con_state
, dm_old_con_state
))
7563 overall_update_type
= UPDATE_TYPE_FULL
;
7564 lock_and_validation_needed
= true;
7567 ret
= dm_determine_update_type_for_commit(&adev
->dm
, state
, &update_type
);
7571 if (overall_update_type
< update_type
)
7572 overall_update_type
= update_type
;
7575 * lock_and_validation_needed was an old way to determine if we need to set
7576 * the global lock. Leaving it in to check if we broke any corner cases
7577 * lock_and_validation_needed true = UPDATE_TYPE_FULL or UPDATE_TYPE_MED
7578 * lock_and_validation_needed false = UPDATE_TYPE_FAST
7580 if (lock_and_validation_needed
&& overall_update_type
<= UPDATE_TYPE_FAST
)
7581 WARN(1, "Global lock should be Set, overall_update_type should be UPDATE_TYPE_MED or UPDATE_TYPE_FULL");
7583 if (overall_update_type
> UPDATE_TYPE_FAST
) {
7584 ret
= dm_atomic_get_state(state
, &dm_state
);
7588 ret
= do_aquire_global_lock(dev
, state
);
7592 if (dc_validate_global_state(dc
, dm_state
->context
, false) != DC_OK
) {
7598 * The commit is a fast update. Fast updates shouldn't change
7599 * the DC context, affect global validation, and can have their
7600 * commit work done in parallel with other commits not touching
7601 * the same resource. If we have a new DC context as part of
7602 * the DM atomic state from validation we need to free it and
7603 * retain the existing one instead.
7605 struct dm_atomic_state
*new_dm_state
, *old_dm_state
;
7607 new_dm_state
= dm_atomic_get_new_state(state
);
7608 old_dm_state
= dm_atomic_get_old_state(state
);
7610 if (new_dm_state
&& old_dm_state
) {
7611 if (new_dm_state
->context
)
7612 dc_release_state(new_dm_state
->context
);
7614 new_dm_state
->context
= old_dm_state
->context
;
7616 if (old_dm_state
->context
)
7617 dc_retain_state(old_dm_state
->context
);
7621 /* Store the overall update type for use later in atomic check. */
7622 for_each_new_crtc_in_state (state
, crtc
, new_crtc_state
, i
) {
7623 struct dm_crtc_state
*dm_new_crtc_state
=
7624 to_dm_crtc_state(new_crtc_state
);
7626 dm_new_crtc_state
->update_type
= (int)overall_update_type
;
7629 /* Must be success */
7634 if (ret
== -EDEADLK
)
7635 DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
7636 else if (ret
== -EINTR
|| ret
== -EAGAIN
|| ret
== -ERESTARTSYS
)
7637 DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
7639 DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret
);
7644 static bool is_dp_capable_without_timing_msa(struct dc
*dc
,
7645 struct amdgpu_dm_connector
*amdgpu_dm_connector
)
7648 bool capable
= false;
7650 if (amdgpu_dm_connector
->dc_link
&&
7651 dm_helpers_dp_read_dpcd(
7653 amdgpu_dm_connector
->dc_link
,
7654 DP_DOWN_STREAM_PORT_COUNT
,
7656 sizeof(dpcd_data
))) {
7657 capable
= (dpcd_data
& DP_MSA_TIMING_PAR_IGNORED
) ? true:false;
7662 void amdgpu_dm_update_freesync_caps(struct drm_connector
*connector
,
7666 bool edid_check_required
;
7667 struct detailed_timing
*timing
;
7668 struct detailed_non_pixel
*data
;
7669 struct detailed_data_monitor_range
*range
;
7670 struct amdgpu_dm_connector
*amdgpu_dm_connector
=
7671 to_amdgpu_dm_connector(connector
);
7672 struct dm_connector_state
*dm_con_state
= NULL
;
7674 struct drm_device
*dev
= connector
->dev
;
7675 struct amdgpu_device
*adev
= dev
->dev_private
;
7676 bool freesync_capable
= false;
7678 if (!connector
->state
) {
7679 DRM_ERROR("%s - Connector has no state", __func__
);
7684 dm_con_state
= to_dm_connector_state(connector
->state
);
7686 amdgpu_dm_connector
->min_vfreq
= 0;
7687 amdgpu_dm_connector
->max_vfreq
= 0;
7688 amdgpu_dm_connector
->pixel_clock_mhz
= 0;
7693 dm_con_state
= to_dm_connector_state(connector
->state
);
7695 edid_check_required
= false;
7696 if (!amdgpu_dm_connector
->dc_sink
) {
7697 DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
7700 if (!adev
->dm
.freesync_module
)
7703 * if edid non zero restrict freesync only for dp and edp
7706 if (amdgpu_dm_connector
->dc_sink
->sink_signal
== SIGNAL_TYPE_DISPLAY_PORT
7707 || amdgpu_dm_connector
->dc_sink
->sink_signal
== SIGNAL_TYPE_EDP
) {
7708 edid_check_required
= is_dp_capable_without_timing_msa(
7710 amdgpu_dm_connector
);
7713 if (edid_check_required
== true && (edid
->version
> 1 ||
7714 (edid
->version
== 1 && edid
->revision
> 1))) {
7715 for (i
= 0; i
< 4; i
++) {
7717 timing
= &edid
->detailed_timings
[i
];
7718 data
= &timing
->data
.other_data
;
7719 range
= &data
->data
.range
;
7721 * Check if monitor has continuous frequency mode
7723 if (data
->type
!= EDID_DETAIL_MONITOR_RANGE
)
7726 * Check for flag range limits only. If flag == 1 then
7727 * no additional timing information provided.
7728 * Default GTF, GTF Secondary curve and CVT are not
7731 if (range
->flags
!= 1)
7734 amdgpu_dm_connector
->min_vfreq
= range
->min_vfreq
;
7735 amdgpu_dm_connector
->max_vfreq
= range
->max_vfreq
;
7736 amdgpu_dm_connector
->pixel_clock_mhz
=
7737 range
->pixel_clock_mhz
* 10;
7741 if (amdgpu_dm_connector
->max_vfreq
-
7742 amdgpu_dm_connector
->min_vfreq
> 10) {
7744 freesync_capable
= true;
7750 dm_con_state
->freesync_capable
= freesync_capable
;
7752 if (connector
->vrr_capable_property
)
7753 drm_connector_set_vrr_capable_property(connector
,