2 * Copyright 2015 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
26 #include "dm_services_types.h"
28 #include "dc/inc/core_types.h"
32 #include "amdgpu_display.h"
34 #include "amdgpu_dm.h"
35 #include "amdgpu_pm.h"
37 #include "amd_shared.h"
38 #include "amdgpu_dm_irq.h"
39 #include "dm_helpers.h"
40 #include "dm_services_types.h"
41 #include "amdgpu_dm_mst_types.h"
43 #include "ivsrcid/ivsrcid_vislands30.h"
45 #include <linux/module.h>
46 #include <linux/moduleparam.h>
47 #include <linux/version.h>
48 #include <linux/types.h>
49 #include <linux/pm_runtime.h>
52 #include <drm/drm_atomic.h>
53 #include <drm/drm_atomic_helper.h>
54 #include <drm/drm_dp_mst_helper.h>
55 #include <drm/drm_fb_helper.h>
56 #include <drm/drm_edid.h>
58 #include "modules/inc/mod_freesync.h"
60 #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
61 #include "ivsrcid/irqsrcs_dcn_1_0.h"
63 #include "dcn/dcn_1_0_offset.h"
64 #include "dcn/dcn_1_0_sh_mask.h"
65 #include "soc15_hw_ip.h"
66 #include "vega10_ip_offset.h"
68 #include "soc15_common.h"
71 #include "modules/inc/mod_freesync.h"
73 #include "i2caux_interface.h"
75 /* basic init/fini API */
76 static int amdgpu_dm_init(struct amdgpu_device
*adev
);
77 static void amdgpu_dm_fini(struct amdgpu_device
*adev
);
79 /* initializes drm_device display related structures, based on the information
80 * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
81 * drm_encoder, drm_mode_config
83 * Returns 0 on success
85 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device
*adev
);
86 /* removes and deallocates the drm structures, created by the above function */
87 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager
*dm
);
90 amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector
*aconnector
);
92 static int amdgpu_dm_plane_init(struct amdgpu_display_manager
*dm
,
93 struct amdgpu_plane
*aplane
,
94 unsigned long possible_crtcs
);
95 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager
*dm
,
96 struct drm_plane
*plane
,
98 static int amdgpu_dm_connector_init(struct amdgpu_display_manager
*dm
,
99 struct amdgpu_dm_connector
*amdgpu_dm_connector
,
101 struct amdgpu_encoder
*amdgpu_encoder
);
102 static int amdgpu_dm_encoder_init(struct drm_device
*dev
,
103 struct amdgpu_encoder
*aencoder
,
104 uint32_t link_index
);
106 static int amdgpu_dm_connector_get_modes(struct drm_connector
*connector
);
108 static int amdgpu_dm_atomic_commit(struct drm_device
*dev
,
109 struct drm_atomic_state
*state
,
112 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state
*state
);
114 static int amdgpu_dm_atomic_check(struct drm_device
*dev
,
115 struct drm_atomic_state
*state
);
120 static const enum drm_plane_type dm_plane_type_default
[AMDGPU_MAX_PLANES
] = {
121 DRM_PLANE_TYPE_PRIMARY
,
122 DRM_PLANE_TYPE_PRIMARY
,
123 DRM_PLANE_TYPE_PRIMARY
,
124 DRM_PLANE_TYPE_PRIMARY
,
125 DRM_PLANE_TYPE_PRIMARY
,
126 DRM_PLANE_TYPE_PRIMARY
,
129 static const enum drm_plane_type dm_plane_type_carizzo
[AMDGPU_MAX_PLANES
] = {
130 DRM_PLANE_TYPE_PRIMARY
,
131 DRM_PLANE_TYPE_PRIMARY
,
132 DRM_PLANE_TYPE_PRIMARY
,
133 DRM_PLANE_TYPE_OVERLAY
,/* YUV Capable Underlay */
136 static const enum drm_plane_type dm_plane_type_stoney
[AMDGPU_MAX_PLANES
] = {
137 DRM_PLANE_TYPE_PRIMARY
,
138 DRM_PLANE_TYPE_PRIMARY
,
139 DRM_PLANE_TYPE_OVERLAY
, /* YUV Capable Underlay */
143 * dm_vblank_get_counter
146 * Get counter for number of vertical blanks
149 * struct amdgpu_device *adev - [in] desired amdgpu device
150 * int disp_idx - [in] which CRTC to get the counter from
153 * Counter for vertical blanks
155 static u32
dm_vblank_get_counter(struct amdgpu_device
*adev
, int crtc
)
157 if (crtc
>= adev
->mode_info
.num_crtc
)
160 struct amdgpu_crtc
*acrtc
= adev
->mode_info
.crtcs
[crtc
];
161 struct dm_crtc_state
*acrtc_state
= to_dm_crtc_state(
165 if (acrtc_state
->stream
== NULL
) {
166 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
171 return dc_stream_get_vblank_counter(acrtc_state
->stream
);
175 static int dm_crtc_get_scanoutpos(struct amdgpu_device
*adev
, int crtc
,
176 u32
*vbl
, u32
*position
)
178 uint32_t v_blank_start
, v_blank_end
, h_position
, v_position
;
180 if ((crtc
< 0) || (crtc
>= adev
->mode_info
.num_crtc
))
183 struct amdgpu_crtc
*acrtc
= adev
->mode_info
.crtcs
[crtc
];
184 struct dm_crtc_state
*acrtc_state
= to_dm_crtc_state(
187 if (acrtc_state
->stream
== NULL
) {
188 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
194 * TODO rework base driver to use values directly.
195 * for now parse it back into reg-format
197 dc_stream_get_scanoutpos(acrtc_state
->stream
,
203 *position
= v_position
| (h_position
<< 16);
204 *vbl
= v_blank_start
| (v_blank_end
<< 16);
210 static bool dm_is_idle(void *handle
)
216 static int dm_wait_for_idle(void *handle
)
222 static bool dm_check_soft_reset(void *handle
)
227 static int dm_soft_reset(void *handle
)
233 static struct amdgpu_crtc
*
234 get_crtc_by_otg_inst(struct amdgpu_device
*adev
,
237 struct drm_device
*dev
= adev
->ddev
;
238 struct drm_crtc
*crtc
;
239 struct amdgpu_crtc
*amdgpu_crtc
;
242 * following if is check inherited from both functions where this one is
243 * used now. Need to be checked why it could happen.
245 if (otg_inst
== -1) {
247 return adev
->mode_info
.crtcs
[0];
250 list_for_each_entry(crtc
, &dev
->mode_config
.crtc_list
, head
) {
251 amdgpu_crtc
= to_amdgpu_crtc(crtc
);
253 if (amdgpu_crtc
->otg_inst
== otg_inst
)
260 static void dm_pflip_high_irq(void *interrupt_params
)
262 struct amdgpu_crtc
*amdgpu_crtc
;
263 struct common_irq_params
*irq_params
= interrupt_params
;
264 struct amdgpu_device
*adev
= irq_params
->adev
;
267 amdgpu_crtc
= get_crtc_by_otg_inst(adev
, irq_params
->irq_src
- IRQ_TYPE_PFLIP
);
269 /* IRQ could occur when in initial stage */
270 /*TODO work and BO cleanup */
271 if (amdgpu_crtc
== NULL
) {
272 DRM_DEBUG_DRIVER("CRTC is null, returning.\n");
276 spin_lock_irqsave(&adev
->ddev
->event_lock
, flags
);
278 if (amdgpu_crtc
->pflip_status
!= AMDGPU_FLIP_SUBMITTED
){
279 DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
280 amdgpu_crtc
->pflip_status
,
281 AMDGPU_FLIP_SUBMITTED
,
282 amdgpu_crtc
->crtc_id
,
284 spin_unlock_irqrestore(&adev
->ddev
->event_lock
, flags
);
289 /* wakeup usersapce */
290 if (amdgpu_crtc
->event
) {
291 /* Update to correct count/ts if racing with vblank irq */
292 drm_crtc_accurate_vblank_count(&amdgpu_crtc
->base
);
294 drm_crtc_send_vblank_event(&amdgpu_crtc
->base
, amdgpu_crtc
->event
);
296 /* page flip completed. clean up */
297 amdgpu_crtc
->event
= NULL
;
302 amdgpu_crtc
->pflip_status
= AMDGPU_FLIP_NONE
;
303 spin_unlock_irqrestore(&adev
->ddev
->event_lock
, flags
);
305 DRM_DEBUG_DRIVER("%s - crtc :%d[%p], pflip_stat:AMDGPU_FLIP_NONE\n",
306 __func__
, amdgpu_crtc
->crtc_id
, amdgpu_crtc
);
308 drm_crtc_vblank_put(&amdgpu_crtc
->base
);
311 static void dm_crtc_high_irq(void *interrupt_params
)
313 struct common_irq_params
*irq_params
= interrupt_params
;
314 struct amdgpu_device
*adev
= irq_params
->adev
;
315 uint8_t crtc_index
= 0;
316 struct amdgpu_crtc
*acrtc
;
318 acrtc
= get_crtc_by_otg_inst(adev
, irq_params
->irq_src
- IRQ_TYPE_VBLANK
);
321 crtc_index
= acrtc
->crtc_id
;
323 drm_handle_vblank(adev
->ddev
, crtc_index
);
324 amdgpu_dm_crtc_handle_crc_irq(&acrtc
->base
);
327 static int dm_set_clockgating_state(void *handle
,
328 enum amd_clockgating_state state
)
333 static int dm_set_powergating_state(void *handle
,
334 enum amd_powergating_state state
)
339 /* Prototypes of private functions */
340 static int dm_early_init(void* handle
);
342 static void hotplug_notify_work_func(struct work_struct
*work
)
344 struct amdgpu_display_manager
*dm
= container_of(work
, struct amdgpu_display_manager
, mst_hotplug_work
);
345 struct drm_device
*dev
= dm
->ddev
;
347 drm_kms_helper_hotplug_event(dev
);
350 /* Allocate memory for FBC compressed data */
351 static void amdgpu_dm_fbc_init(struct drm_connector
*connector
)
353 struct drm_device
*dev
= connector
->dev
;
354 struct amdgpu_device
*adev
= dev
->dev_private
;
355 struct dm_comressor_info
*compressor
= &adev
->dm
.compressor
;
356 struct amdgpu_dm_connector
*aconn
= to_amdgpu_dm_connector(connector
);
357 struct drm_display_mode
*mode
;
358 unsigned long max_size
= 0;
360 if (adev
->dm
.dc
->fbc_compressor
== NULL
)
363 if (aconn
->dc_link
->connector_signal
!= SIGNAL_TYPE_EDP
)
366 if (compressor
->bo_ptr
)
370 list_for_each_entry(mode
, &connector
->modes
, head
) {
371 if (max_size
< mode
->htotal
* mode
->vtotal
)
372 max_size
= mode
->htotal
* mode
->vtotal
;
376 int r
= amdgpu_bo_create_kernel(adev
, max_size
* 4, PAGE_SIZE
,
377 AMDGPU_GEM_DOMAIN_GTT
, &compressor
->bo_ptr
,
378 &compressor
->gpu_addr
, &compressor
->cpu_addr
);
381 DRM_ERROR("DM: Failed to initialize FBC\n");
383 adev
->dm
.dc
->ctx
->fbc_gpu_addr
= compressor
->gpu_addr
;
384 DRM_INFO("DM: FBC alloc %lu\n", max_size
*4);
394 * Returns 0 on success
396 static int amdgpu_dm_init(struct amdgpu_device
*adev
)
398 struct dc_init_data init_data
;
399 adev
->dm
.ddev
= adev
->ddev
;
400 adev
->dm
.adev
= adev
;
402 /* Zero all the fields */
403 memset(&init_data
, 0, sizeof(init_data
));
405 if(amdgpu_dm_irq_init(adev
)) {
406 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
410 init_data
.asic_id
.chip_family
= adev
->family
;
412 init_data
.asic_id
.pci_revision_id
= adev
->rev_id
;
413 init_data
.asic_id
.hw_internal_rev
= adev
->external_rev_id
;
415 init_data
.asic_id
.vram_width
= adev
->gmc
.vram_width
;
416 /* TODO: initialize init_data.asic_id.vram_type here!!!! */
417 init_data
.asic_id
.atombios_base_address
=
418 adev
->mode_info
.atom_context
->bios
;
420 init_data
.driver
= adev
;
422 adev
->dm
.cgs_device
= amdgpu_cgs_create_device(adev
);
424 if (!adev
->dm
.cgs_device
) {
425 DRM_ERROR("amdgpu: failed to create cgs device.\n");
429 init_data
.cgs_device
= adev
->dm
.cgs_device
;
433 init_data
.dce_environment
= DCE_ENV_PRODUCTION_DRV
;
436 * TODO debug why this doesn't work on Raven
438 if (adev
->flags
& AMD_IS_APU
&&
439 adev
->asic_type
>= CHIP_CARRIZO
&&
440 adev
->asic_type
< CHIP_RAVEN
)
441 init_data
.flags
.gpu_vm_support
= true;
443 /* Display Core create. */
444 adev
->dm
.dc
= dc_create(&init_data
);
447 DRM_INFO("Display Core initialized with v%s!\n", DC_VER
);
449 DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER
);
453 INIT_WORK(&adev
->dm
.mst_hotplug_work
, hotplug_notify_work_func
);
455 adev
->dm
.freesync_module
= mod_freesync_create(adev
->dm
.dc
);
456 if (!adev
->dm
.freesync_module
) {
458 "amdgpu: failed to initialize freesync_module.\n");
460 DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
461 adev
->dm
.freesync_module
);
463 amdgpu_dm_init_color_mod();
465 if (amdgpu_dm_initialize_drm_device(adev
)) {
467 "amdgpu: failed to initialize sw for display support.\n");
471 /* Update the actual used number of crtc */
472 adev
->mode_info
.num_crtc
= adev
->dm
.display_indexes_num
;
474 /* TODO: Add_display_info? */
476 /* TODO use dynamic cursor width */
477 adev
->ddev
->mode_config
.cursor_width
= adev
->dm
.dc
->caps
.max_cursor_size
;
478 adev
->ddev
->mode_config
.cursor_height
= adev
->dm
.dc
->caps
.max_cursor_size
;
480 if (drm_vblank_init(adev
->ddev
, adev
->dm
.display_indexes_num
)) {
482 "amdgpu: failed to initialize sw for display support.\n");
486 DRM_DEBUG_DRIVER("KMS initialized.\n");
490 amdgpu_dm_fini(adev
);
495 static void amdgpu_dm_fini(struct amdgpu_device
*adev
)
497 amdgpu_dm_destroy_drm_device(&adev
->dm
);
499 * TODO: pageflip, vlank interrupt
501 * amdgpu_dm_irq_fini(adev);
504 if (adev
->dm
.cgs_device
) {
505 amdgpu_cgs_destroy_device(adev
->dm
.cgs_device
);
506 adev
->dm
.cgs_device
= NULL
;
508 if (adev
->dm
.freesync_module
) {
509 mod_freesync_destroy(adev
->dm
.freesync_module
);
510 adev
->dm
.freesync_module
= NULL
;
512 /* DC Destroy TODO: Replace destroy DAL */
514 dc_destroy(&adev
->dm
.dc
);
518 static int dm_sw_init(void *handle
)
523 static int dm_sw_fini(void *handle
)
528 static int detect_mst_link_for_all_connectors(struct drm_device
*dev
)
530 struct amdgpu_dm_connector
*aconnector
;
531 struct drm_connector
*connector
;
534 drm_modeset_lock(&dev
->mode_config
.connection_mutex
, NULL
);
536 list_for_each_entry(connector
, &dev
->mode_config
.connector_list
, head
) {
537 aconnector
= to_amdgpu_dm_connector(connector
);
538 if (aconnector
->dc_link
->type
== dc_connection_mst_branch
&&
539 aconnector
->mst_mgr
.aux
) {
540 DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
541 aconnector
, aconnector
->base
.base
.id
);
543 ret
= drm_dp_mst_topology_mgr_set_mst(&aconnector
->mst_mgr
, true);
545 DRM_ERROR("DM_MST: Failed to start MST\n");
546 ((struct dc_link
*)aconnector
->dc_link
)->type
= dc_connection_single
;
552 drm_modeset_unlock(&dev
->mode_config
.connection_mutex
);
556 static int dm_late_init(void *handle
)
558 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
560 return detect_mst_link_for_all_connectors(adev
->ddev
);
563 static void s3_handle_mst(struct drm_device
*dev
, bool suspend
)
565 struct amdgpu_dm_connector
*aconnector
;
566 struct drm_connector
*connector
;
568 drm_modeset_lock(&dev
->mode_config
.connection_mutex
, NULL
);
570 list_for_each_entry(connector
, &dev
->mode_config
.connector_list
, head
) {
571 aconnector
= to_amdgpu_dm_connector(connector
);
572 if (aconnector
->dc_link
->type
== dc_connection_mst_branch
&&
573 !aconnector
->mst_port
) {
576 drm_dp_mst_topology_mgr_suspend(&aconnector
->mst_mgr
);
578 drm_dp_mst_topology_mgr_resume(&aconnector
->mst_mgr
);
582 drm_modeset_unlock(&dev
->mode_config
.connection_mutex
);
585 static int dm_hw_init(void *handle
)
587 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
588 /* Create DAL display manager */
589 amdgpu_dm_init(adev
);
590 amdgpu_dm_hpd_init(adev
);
595 static int dm_hw_fini(void *handle
)
597 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
599 amdgpu_dm_hpd_fini(adev
);
601 amdgpu_dm_irq_fini(adev
);
602 amdgpu_dm_fini(adev
);
606 static int dm_suspend(void *handle
)
608 struct amdgpu_device
*adev
= handle
;
609 struct amdgpu_display_manager
*dm
= &adev
->dm
;
612 s3_handle_mst(adev
->ddev
, true);
614 amdgpu_dm_irq_suspend(adev
);
616 WARN_ON(adev
->dm
.cached_state
);
617 adev
->dm
.cached_state
= drm_atomic_helper_suspend(adev
->ddev
);
619 dc_set_power_state(dm
->dc
, DC_ACPI_CM_POWER_STATE_D3
);
624 static struct amdgpu_dm_connector
*
625 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state
*state
,
626 struct drm_crtc
*crtc
)
629 struct drm_connector_state
*new_con_state
;
630 struct drm_connector
*connector
;
631 struct drm_crtc
*crtc_from_state
;
633 for_each_new_connector_in_state(state
, connector
, new_con_state
, i
) {
634 crtc_from_state
= new_con_state
->crtc
;
636 if (crtc_from_state
== crtc
)
637 return to_amdgpu_dm_connector(connector
);
643 static int dm_resume(void *handle
)
645 struct amdgpu_device
*adev
= handle
;
646 struct drm_device
*ddev
= adev
->ddev
;
647 struct amdgpu_display_manager
*dm
= &adev
->dm
;
648 struct amdgpu_dm_connector
*aconnector
;
649 struct drm_connector
*connector
;
650 struct drm_crtc
*crtc
;
651 struct drm_crtc_state
*new_crtc_state
;
652 struct dm_crtc_state
*dm_new_crtc_state
;
653 struct drm_plane
*plane
;
654 struct drm_plane_state
*new_plane_state
;
655 struct dm_plane_state
*dm_new_plane_state
;
659 /* power on hardware */
660 dc_set_power_state(dm
->dc
, DC_ACPI_CM_POWER_STATE_D0
);
662 /* program HPD filter */
665 /* On resume we need to rewrite the MSTM control bits to enamble MST*/
666 s3_handle_mst(ddev
, false);
669 * early enable HPD Rx IRQ, should be done before set mode as short
670 * pulse interrupts are used for MST
672 amdgpu_dm_irq_resume_early(adev
);
675 list_for_each_entry(connector
, &ddev
->mode_config
.connector_list
, head
) {
676 aconnector
= to_amdgpu_dm_connector(connector
);
679 * this is the case when traversing through already created
680 * MST connectors, should be skipped
682 if (aconnector
->mst_port
)
685 mutex_lock(&aconnector
->hpd_lock
);
686 dc_link_detect(aconnector
->dc_link
, DETECT_REASON_HPD
);
688 if (aconnector
->fake_enable
&& aconnector
->dc_link
->local_sink
)
689 aconnector
->fake_enable
= false;
691 aconnector
->dc_sink
= NULL
;
692 amdgpu_dm_update_connector_after_detect(aconnector
);
693 mutex_unlock(&aconnector
->hpd_lock
);
696 /* Force mode set in atomic comit */
697 for_each_new_crtc_in_state(dm
->cached_state
, crtc
, new_crtc_state
, i
)
698 new_crtc_state
->active_changed
= true;
701 * atomic_check is expected to create the dc states. We need to release
702 * them here, since they were duplicated as part of the suspend
705 for_each_new_crtc_in_state(dm
->cached_state
, crtc
, new_crtc_state
, i
) {
706 dm_new_crtc_state
= to_dm_crtc_state(new_crtc_state
);
707 if (dm_new_crtc_state
->stream
) {
708 WARN_ON(kref_read(&dm_new_crtc_state
->stream
->refcount
) > 1);
709 dc_stream_release(dm_new_crtc_state
->stream
);
710 dm_new_crtc_state
->stream
= NULL
;
714 for_each_new_plane_in_state(dm
->cached_state
, plane
, new_plane_state
, i
) {
715 dm_new_plane_state
= to_dm_plane_state(new_plane_state
);
716 if (dm_new_plane_state
->dc_state
) {
717 WARN_ON(kref_read(&dm_new_plane_state
->dc_state
->refcount
) > 1);
718 dc_plane_state_release(dm_new_plane_state
->dc_state
);
719 dm_new_plane_state
->dc_state
= NULL
;
723 ret
= drm_atomic_helper_resume(ddev
, dm
->cached_state
);
725 dm
->cached_state
= NULL
;
727 amdgpu_dm_irq_resume_late(adev
);
732 static const struct amd_ip_funcs amdgpu_dm_funcs
= {
734 .early_init
= dm_early_init
,
735 .late_init
= dm_late_init
,
736 .sw_init
= dm_sw_init
,
737 .sw_fini
= dm_sw_fini
,
738 .hw_init
= dm_hw_init
,
739 .hw_fini
= dm_hw_fini
,
740 .suspend
= dm_suspend
,
742 .is_idle
= dm_is_idle
,
743 .wait_for_idle
= dm_wait_for_idle
,
744 .check_soft_reset
= dm_check_soft_reset
,
745 .soft_reset
= dm_soft_reset
,
746 .set_clockgating_state
= dm_set_clockgating_state
,
747 .set_powergating_state
= dm_set_powergating_state
,
750 const struct amdgpu_ip_block_version dm_ip_block
=
752 .type
= AMD_IP_BLOCK_TYPE_DCE
,
756 .funcs
= &amdgpu_dm_funcs
,
760 static struct drm_atomic_state
*
761 dm_atomic_state_alloc(struct drm_device
*dev
)
763 struct dm_atomic_state
*state
= kzalloc(sizeof(*state
), GFP_KERNEL
);
768 if (drm_atomic_state_init(dev
, &state
->base
) < 0)
779 dm_atomic_state_clear(struct drm_atomic_state
*state
)
781 struct dm_atomic_state
*dm_state
= to_dm_atomic_state(state
);
783 if (dm_state
->context
) {
784 dc_release_state(dm_state
->context
);
785 dm_state
->context
= NULL
;
788 drm_atomic_state_default_clear(state
);
792 dm_atomic_state_alloc_free(struct drm_atomic_state
*state
)
794 struct dm_atomic_state
*dm_state
= to_dm_atomic_state(state
);
795 drm_atomic_state_default_release(state
);
799 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs
= {
800 .fb_create
= amdgpu_display_user_framebuffer_create
,
801 .output_poll_changed
= drm_fb_helper_output_poll_changed
,
802 .atomic_check
= amdgpu_dm_atomic_check
,
803 .atomic_commit
= amdgpu_dm_atomic_commit
,
804 .atomic_state_alloc
= dm_atomic_state_alloc
,
805 .atomic_state_clear
= dm_atomic_state_clear
,
806 .atomic_state_free
= dm_atomic_state_alloc_free
809 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs
= {
810 .atomic_commit_tail
= amdgpu_dm_atomic_commit_tail
814 amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector
*aconnector
)
816 struct drm_connector
*connector
= &aconnector
->base
;
817 struct drm_device
*dev
= connector
->dev
;
818 struct dc_sink
*sink
;
820 /* MST handled by drm_mst framework */
821 if (aconnector
->mst_mgr
.mst_state
== true)
825 sink
= aconnector
->dc_link
->local_sink
;
827 /* Edid mgmt connector gets first update only in mode_valid hook and then
828 * the connector sink is set to either fake or physical sink depends on link status.
829 * don't do it here if u are during boot
831 if (aconnector
->base
.force
!= DRM_FORCE_UNSPECIFIED
832 && aconnector
->dc_em_sink
) {
834 /* For S3 resume with headless use eml_sink to fake stream
835 * because on resume connecotr->sink is set ti NULL
837 mutex_lock(&dev
->mode_config
.mutex
);
840 if (aconnector
->dc_sink
) {
841 amdgpu_dm_remove_sink_from_freesync_module(
843 /* retain and release bellow are used for
844 * bump up refcount for sink because the link don't point
845 * to it anymore after disconnect so on next crtc to connector
846 * reshuffle by UMD we will get into unwanted dc_sink release
848 if (aconnector
->dc_sink
!= aconnector
->dc_em_sink
)
849 dc_sink_release(aconnector
->dc_sink
);
851 aconnector
->dc_sink
= sink
;
852 amdgpu_dm_add_sink_to_freesync_module(
853 connector
, aconnector
->edid
);
855 amdgpu_dm_remove_sink_from_freesync_module(connector
);
856 if (!aconnector
->dc_sink
)
857 aconnector
->dc_sink
= aconnector
->dc_em_sink
;
858 else if (aconnector
->dc_sink
!= aconnector
->dc_em_sink
)
859 dc_sink_retain(aconnector
->dc_sink
);
862 mutex_unlock(&dev
->mode_config
.mutex
);
867 * TODO: temporary guard to look for proper fix
868 * if this sink is MST sink, we should not do anything
870 if (sink
&& sink
->sink_signal
== SIGNAL_TYPE_DISPLAY_PORT_MST
)
873 if (aconnector
->dc_sink
== sink
) {
874 /* We got a DP short pulse (Link Loss, DP CTS, etc...).
876 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
877 aconnector
->connector_id
);
881 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
882 aconnector
->connector_id
, aconnector
->dc_sink
, sink
);
884 mutex_lock(&dev
->mode_config
.mutex
);
886 /* 1. Update status of the drm connector
887 * 2. Send an event and let userspace tell us what to do */
889 /* TODO: check if we still need the S3 mode update workaround.
890 * If yes, put it here. */
891 if (aconnector
->dc_sink
)
892 amdgpu_dm_remove_sink_from_freesync_module(
895 aconnector
->dc_sink
= sink
;
896 if (sink
->dc_edid
.length
== 0) {
897 aconnector
->edid
= NULL
;
900 (struct edid
*) sink
->dc_edid
.raw_edid
;
903 drm_mode_connector_update_edid_property(connector
,
906 amdgpu_dm_add_sink_to_freesync_module(connector
, aconnector
->edid
);
909 amdgpu_dm_remove_sink_from_freesync_module(connector
);
910 drm_mode_connector_update_edid_property(connector
, NULL
);
911 aconnector
->num_modes
= 0;
912 aconnector
->dc_sink
= NULL
;
913 aconnector
->edid
= NULL
;
916 mutex_unlock(&dev
->mode_config
.mutex
);
919 static void handle_hpd_irq(void *param
)
921 struct amdgpu_dm_connector
*aconnector
= (struct amdgpu_dm_connector
*)param
;
922 struct drm_connector
*connector
= &aconnector
->base
;
923 struct drm_device
*dev
= connector
->dev
;
925 /* In case of failure or MST no need to update connector status or notify the OS
926 * since (for MST case) MST does this in it's own context.
928 mutex_lock(&aconnector
->hpd_lock
);
930 if (aconnector
->fake_enable
)
931 aconnector
->fake_enable
= false;
933 if (dc_link_detect(aconnector
->dc_link
, DETECT_REASON_HPD
)) {
934 amdgpu_dm_update_connector_after_detect(aconnector
);
937 drm_modeset_lock_all(dev
);
938 dm_restore_drm_connector_state(dev
, connector
);
939 drm_modeset_unlock_all(dev
);
941 if (aconnector
->base
.force
== DRM_FORCE_UNSPECIFIED
)
942 drm_kms_helper_hotplug_event(dev
);
944 mutex_unlock(&aconnector
->hpd_lock
);
948 static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector
*aconnector
)
950 uint8_t esi
[DP_PSR_ERROR_STATUS
- DP_SINK_COUNT_ESI
] = { 0 };
952 bool new_irq_handled
= false;
954 int dpcd_bytes_to_read
;
956 const int max_process_count
= 30;
957 int process_count
= 0;
959 const struct dc_link_status
*link_status
= dc_link_get_status(aconnector
->dc_link
);
961 if (link_status
->dpcd_caps
->dpcd_rev
.raw
< 0x12) {
962 dpcd_bytes_to_read
= DP_LANE0_1_STATUS
- DP_SINK_COUNT
;
963 /* DPCD 0x200 - 0x201 for downstream IRQ */
964 dpcd_addr
= DP_SINK_COUNT
;
966 dpcd_bytes_to_read
= DP_PSR_ERROR_STATUS
- DP_SINK_COUNT_ESI
;
967 /* DPCD 0x2002 - 0x2005 for downstream IRQ */
968 dpcd_addr
= DP_SINK_COUNT_ESI
;
971 dret
= drm_dp_dpcd_read(
972 &aconnector
->dm_dp_aux
.aux
,
977 while (dret
== dpcd_bytes_to_read
&&
978 process_count
< max_process_count
) {
984 DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi
[0], esi
[1], esi
[2]);
985 /* handle HPD short pulse irq */
986 if (aconnector
->mst_mgr
.mst_state
)
988 &aconnector
->mst_mgr
,
992 if (new_irq_handled
) {
993 /* ACK at DPCD to notify down stream */
994 const int ack_dpcd_bytes_to_write
=
995 dpcd_bytes_to_read
- 1;
997 for (retry
= 0; retry
< 3; retry
++) {
1000 wret
= drm_dp_dpcd_write(
1001 &aconnector
->dm_dp_aux
.aux
,
1004 ack_dpcd_bytes_to_write
);
1005 if (wret
== ack_dpcd_bytes_to_write
)
1009 /* check if there is new irq to be handle */
1010 dret
= drm_dp_dpcd_read(
1011 &aconnector
->dm_dp_aux
.aux
,
1014 dpcd_bytes_to_read
);
1016 new_irq_handled
= false;
1022 if (process_count
== max_process_count
)
1023 DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
1026 static void handle_hpd_rx_irq(void *param
)
1028 struct amdgpu_dm_connector
*aconnector
= (struct amdgpu_dm_connector
*)param
;
1029 struct drm_connector
*connector
= &aconnector
->base
;
1030 struct drm_device
*dev
= connector
->dev
;
1031 struct dc_link
*dc_link
= aconnector
->dc_link
;
1032 bool is_mst_root_connector
= aconnector
->mst_mgr
.mst_state
;
1034 /* TODO:Temporary add mutex to protect hpd interrupt not have a gpio
1035 * conflict, after implement i2c helper, this mutex should be
1038 if (dc_link
->type
!= dc_connection_mst_branch
)
1039 mutex_lock(&aconnector
->hpd_lock
);
1041 if (dc_link_handle_hpd_rx_irq(dc_link
, NULL
) &&
1042 !is_mst_root_connector
) {
1043 /* Downstream Port status changed. */
1044 if (dc_link_detect(dc_link
, DETECT_REASON_HPDRX
)) {
1046 if (aconnector
->fake_enable
)
1047 aconnector
->fake_enable
= false;
1049 amdgpu_dm_update_connector_after_detect(aconnector
);
1052 drm_modeset_lock_all(dev
);
1053 dm_restore_drm_connector_state(dev
, connector
);
1054 drm_modeset_unlock_all(dev
);
1056 drm_kms_helper_hotplug_event(dev
);
1059 if ((dc_link
->cur_link_settings
.lane_count
!= LANE_COUNT_UNKNOWN
) ||
1060 (dc_link
->type
== dc_connection_mst_branch
))
1061 dm_handle_hpd_rx_irq(aconnector
);
1063 if (dc_link
->type
!= dc_connection_mst_branch
)
1064 mutex_unlock(&aconnector
->hpd_lock
);
1067 static void register_hpd_handlers(struct amdgpu_device
*adev
)
1069 struct drm_device
*dev
= adev
->ddev
;
1070 struct drm_connector
*connector
;
1071 struct amdgpu_dm_connector
*aconnector
;
1072 const struct dc_link
*dc_link
;
1073 struct dc_interrupt_params int_params
= {0};
1075 int_params
.requested_polarity
= INTERRUPT_POLARITY_DEFAULT
;
1076 int_params
.current_polarity
= INTERRUPT_POLARITY_DEFAULT
;
1078 list_for_each_entry(connector
,
1079 &dev
->mode_config
.connector_list
, head
) {
1081 aconnector
= to_amdgpu_dm_connector(connector
);
1082 dc_link
= aconnector
->dc_link
;
1084 if (DC_IRQ_SOURCE_INVALID
!= dc_link
->irq_source_hpd
) {
1085 int_params
.int_context
= INTERRUPT_LOW_IRQ_CONTEXT
;
1086 int_params
.irq_source
= dc_link
->irq_source_hpd
;
1088 amdgpu_dm_irq_register_interrupt(adev
, &int_params
,
1090 (void *) aconnector
);
1093 if (DC_IRQ_SOURCE_INVALID
!= dc_link
->irq_source_hpd_rx
) {
1095 /* Also register for DP short pulse (hpd_rx). */
1096 int_params
.int_context
= INTERRUPT_LOW_IRQ_CONTEXT
;
1097 int_params
.irq_source
= dc_link
->irq_source_hpd_rx
;
1099 amdgpu_dm_irq_register_interrupt(adev
, &int_params
,
1101 (void *) aconnector
);
1106 /* Register IRQ sources and initialize IRQ callbacks */
1107 static int dce110_register_irq_handlers(struct amdgpu_device
*adev
)
1109 struct dc
*dc
= adev
->dm
.dc
;
1110 struct common_irq_params
*c_irq_params
;
1111 struct dc_interrupt_params int_params
= {0};
1114 unsigned client_id
= AMDGPU_IH_CLIENTID_LEGACY
;
1116 if (adev
->asic_type
== CHIP_VEGA10
||
1117 adev
->asic_type
== CHIP_VEGA12
||
1118 adev
->asic_type
== CHIP_VEGA20
||
1119 adev
->asic_type
== CHIP_RAVEN
)
1120 client_id
= SOC15_IH_CLIENTID_DCE
;
1122 int_params
.requested_polarity
= INTERRUPT_POLARITY_DEFAULT
;
1123 int_params
.current_polarity
= INTERRUPT_POLARITY_DEFAULT
;
1125 /* Actions of amdgpu_irq_add_id():
1126 * 1. Register a set() function with base driver.
1127 * Base driver will call set() function to enable/disable an
1128 * interrupt in DC hardware.
1129 * 2. Register amdgpu_dm_irq_handler().
1130 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
1131 * coming from DC hardware.
1132 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
1133 * for acknowledging and handling. */
1135 /* Use VBLANK interrupt */
1136 for (i
= VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0
; i
<= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0
; i
++) {
1137 r
= amdgpu_irq_add_id(adev
, client_id
, i
, &adev
->crtc_irq
);
1139 DRM_ERROR("Failed to add crtc irq id!\n");
1143 int_params
.int_context
= INTERRUPT_HIGH_IRQ_CONTEXT
;
1144 int_params
.irq_source
=
1145 dc_interrupt_to_irq_source(dc
, i
, 0);
1147 c_irq_params
= &adev
->dm
.vblank_params
[int_params
.irq_source
- DC_IRQ_SOURCE_VBLANK1
];
1149 c_irq_params
->adev
= adev
;
1150 c_irq_params
->irq_src
= int_params
.irq_source
;
1152 amdgpu_dm_irq_register_interrupt(adev
, &int_params
,
1153 dm_crtc_high_irq
, c_irq_params
);
1156 /* Use GRPH_PFLIP interrupt */
1157 for (i
= VISLANDS30_IV_SRCID_D1_GRPH_PFLIP
;
1158 i
<= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP
; i
+= 2) {
1159 r
= amdgpu_irq_add_id(adev
, client_id
, i
, &adev
->pageflip_irq
);
1161 DRM_ERROR("Failed to add page flip irq id!\n");
1165 int_params
.int_context
= INTERRUPT_HIGH_IRQ_CONTEXT
;
1166 int_params
.irq_source
=
1167 dc_interrupt_to_irq_source(dc
, i
, 0);
1169 c_irq_params
= &adev
->dm
.pflip_params
[int_params
.irq_source
- DC_IRQ_SOURCE_PFLIP_FIRST
];
1171 c_irq_params
->adev
= adev
;
1172 c_irq_params
->irq_src
= int_params
.irq_source
;
1174 amdgpu_dm_irq_register_interrupt(adev
, &int_params
,
1175 dm_pflip_high_irq
, c_irq_params
);
1180 r
= amdgpu_irq_add_id(adev
, client_id
,
1181 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A
, &adev
->hpd_irq
);
1183 DRM_ERROR("Failed to add hpd irq id!\n");
1187 register_hpd_handlers(adev
);
1192 #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
1193 /* Register IRQ sources and initialize IRQ callbacks */
1194 static int dcn10_register_irq_handlers(struct amdgpu_device
*adev
)
1196 struct dc
*dc
= adev
->dm
.dc
;
1197 struct common_irq_params
*c_irq_params
;
1198 struct dc_interrupt_params int_params
= {0};
1202 int_params
.requested_polarity
= INTERRUPT_POLARITY_DEFAULT
;
1203 int_params
.current_polarity
= INTERRUPT_POLARITY_DEFAULT
;
1205 /* Actions of amdgpu_irq_add_id():
1206 * 1. Register a set() function with base driver.
1207 * Base driver will call set() function to enable/disable an
1208 * interrupt in DC hardware.
1209 * 2. Register amdgpu_dm_irq_handler().
1210 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
1211 * coming from DC hardware.
1212 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
1213 * for acknowledging and handling.
1216 /* Use VSTARTUP interrupt */
1217 for (i
= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP
;
1218 i
<= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP
+ adev
->mode_info
.num_crtc
- 1;
1220 r
= amdgpu_irq_add_id(adev
, SOC15_IH_CLIENTID_DCE
, i
, &adev
->crtc_irq
);
1223 DRM_ERROR("Failed to add crtc irq id!\n");
1227 int_params
.int_context
= INTERRUPT_HIGH_IRQ_CONTEXT
;
1228 int_params
.irq_source
=
1229 dc_interrupt_to_irq_source(dc
, i
, 0);
1231 c_irq_params
= &adev
->dm
.vblank_params
[int_params
.irq_source
- DC_IRQ_SOURCE_VBLANK1
];
1233 c_irq_params
->adev
= adev
;
1234 c_irq_params
->irq_src
= int_params
.irq_source
;
1236 amdgpu_dm_irq_register_interrupt(adev
, &int_params
,
1237 dm_crtc_high_irq
, c_irq_params
);
1240 /* Use GRPH_PFLIP interrupt */
1241 for (i
= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT
;
1242 i
<= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT
+ adev
->mode_info
.num_crtc
- 1;
1244 r
= amdgpu_irq_add_id(adev
, SOC15_IH_CLIENTID_DCE
, i
, &adev
->pageflip_irq
);
1246 DRM_ERROR("Failed to add page flip irq id!\n");
1250 int_params
.int_context
= INTERRUPT_HIGH_IRQ_CONTEXT
;
1251 int_params
.irq_source
=
1252 dc_interrupt_to_irq_source(dc
, i
, 0);
1254 c_irq_params
= &adev
->dm
.pflip_params
[int_params
.irq_source
- DC_IRQ_SOURCE_PFLIP_FIRST
];
1256 c_irq_params
->adev
= adev
;
1257 c_irq_params
->irq_src
= int_params
.irq_source
;
1259 amdgpu_dm_irq_register_interrupt(adev
, &int_params
,
1260 dm_pflip_high_irq
, c_irq_params
);
1265 r
= amdgpu_irq_add_id(adev
, SOC15_IH_CLIENTID_DCE
, DCN_1_0__SRCID__DC_HPD1_INT
,
1268 DRM_ERROR("Failed to add hpd irq id!\n");
1272 register_hpd_handlers(adev
);
1278 static int amdgpu_dm_mode_config_init(struct amdgpu_device
*adev
)
1282 adev
->mode_info
.mode_config_initialized
= true;
1284 adev
->ddev
->mode_config
.funcs
= (void *)&amdgpu_dm_mode_funcs
;
1285 adev
->ddev
->mode_config
.helper_private
= &amdgpu_dm_mode_config_helperfuncs
;
1287 adev
->ddev
->mode_config
.max_width
= 16384;
1288 adev
->ddev
->mode_config
.max_height
= 16384;
1290 adev
->ddev
->mode_config
.preferred_depth
= 24;
1291 adev
->ddev
->mode_config
.prefer_shadow
= 1;
1292 /* indicate support of immediate flip */
1293 adev
->ddev
->mode_config
.async_page_flip
= true;
1295 adev
->ddev
->mode_config
.fb_base
= adev
->gmc
.aper_base
;
1297 r
= amdgpu_display_modeset_create_props(adev
);
1304 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
1305 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
1307 static int amdgpu_dm_backlight_update_status(struct backlight_device
*bd
)
1309 struct amdgpu_display_manager
*dm
= bl_get_data(bd
);
1311 if (dc_link_set_backlight_level(dm
->backlight_link
,
1312 bd
->props
.brightness
, 0, 0))
1318 static int amdgpu_dm_backlight_get_brightness(struct backlight_device
*bd
)
1320 return bd
->props
.brightness
;
1323 static const struct backlight_ops amdgpu_dm_backlight_ops
= {
1324 .get_brightness
= amdgpu_dm_backlight_get_brightness
,
1325 .update_status
= amdgpu_dm_backlight_update_status
,
1329 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager
*dm
)
1332 struct backlight_properties props
= { 0 };
1334 props
.max_brightness
= AMDGPU_MAX_BL_LEVEL
;
1335 props
.type
= BACKLIGHT_RAW
;
1337 snprintf(bl_name
, sizeof(bl_name
), "amdgpu_bl%d",
1338 dm
->adev
->ddev
->primary
->index
);
1340 dm
->backlight_dev
= backlight_device_register(bl_name
,
1341 dm
->adev
->ddev
->dev
,
1343 &amdgpu_dm_backlight_ops
,
1346 if (IS_ERR(dm
->backlight_dev
))
1347 DRM_ERROR("DM: Backlight registration failed!\n");
1349 DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name
);
1354 static int initialize_plane(struct amdgpu_display_manager
*dm
,
1355 struct amdgpu_mode_info
*mode_info
,
1358 struct amdgpu_plane
*plane
;
1359 unsigned long possible_crtcs
;
1362 plane
= kzalloc(sizeof(struct amdgpu_plane
), GFP_KERNEL
);
1363 mode_info
->planes
[plane_id
] = plane
;
1366 DRM_ERROR("KMS: Failed to allocate plane\n");
1369 plane
->base
.type
= mode_info
->plane_type
[plane_id
];
1372 * HACK: IGT tests expect that each plane can only have one
1373 * one possible CRTC. For now, set one CRTC for each
1374 * plane that is not an underlay, but still allow multiple
1375 * CRTCs for underlay planes.
1377 possible_crtcs
= 1 << plane_id
;
1378 if (plane_id
>= dm
->dc
->caps
.max_streams
)
1379 possible_crtcs
= 0xff;
1381 ret
= amdgpu_dm_plane_init(dm
, mode_info
->planes
[plane_id
], possible_crtcs
);
1384 DRM_ERROR("KMS: Failed to initialize plane\n");
1392 static void register_backlight_device(struct amdgpu_display_manager
*dm
,
1393 struct dc_link
*link
)
1395 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
1396 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
1398 if ((link
->connector_signal
& (SIGNAL_TYPE_EDP
| SIGNAL_TYPE_LVDS
)) &&
1399 link
->type
!= dc_connection_none
) {
1400 /* Event if registration failed, we should continue with
1401 * DM initialization because not having a backlight control
1402 * is better then a black screen.
1404 amdgpu_dm_register_backlight_device(dm
);
1406 if (dm
->backlight_dev
)
1407 dm
->backlight_link
= link
;
1413 /* In this architecture, the association
1414 * connector -> encoder -> crtc
1415 * id not really requried. The crtc and connector will hold the
1416 * display_index as an abstraction to use with DAL component
1418 * Returns 0 on success
1420 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device
*adev
)
1422 struct amdgpu_display_manager
*dm
= &adev
->dm
;
1424 struct amdgpu_dm_connector
*aconnector
= NULL
;
1425 struct amdgpu_encoder
*aencoder
= NULL
;
1426 struct amdgpu_mode_info
*mode_info
= &adev
->mode_info
;
1428 int32_t total_overlay_planes
, total_primary_planes
;
1430 link_cnt
= dm
->dc
->caps
.max_links
;
1431 if (amdgpu_dm_mode_config_init(dm
->adev
)) {
1432 DRM_ERROR("DM: Failed to initialize mode config\n");
1436 /* Identify the number of planes to be initialized */
1437 total_overlay_planes
= dm
->dc
->caps
.max_slave_planes
;
1438 total_primary_planes
= dm
->dc
->caps
.max_planes
- dm
->dc
->caps
.max_slave_planes
;
1440 /* First initialize overlay planes, index starting after primary planes */
1441 for (i
= (total_overlay_planes
- 1); i
>= 0; i
--) {
1442 if (initialize_plane(dm
, mode_info
, (total_primary_planes
+ i
))) {
1443 DRM_ERROR("KMS: Failed to initialize overlay plane\n");
1448 /* Initialize primary planes */
1449 for (i
= (total_primary_planes
- 1); i
>= 0; i
--) {
1450 if (initialize_plane(dm
, mode_info
, i
)) {
1451 DRM_ERROR("KMS: Failed to initialize primary plane\n");
1456 for (i
= 0; i
< dm
->dc
->caps
.max_streams
; i
++)
1457 if (amdgpu_dm_crtc_init(dm
, &mode_info
->planes
[i
]->base
, i
)) {
1458 DRM_ERROR("KMS: Failed to initialize crtc\n");
1462 dm
->display_indexes_num
= dm
->dc
->caps
.max_streams
;
1464 /* loops over all connectors on the board */
1465 for (i
= 0; i
< link_cnt
; i
++) {
1466 struct dc_link
*link
= NULL
;
1468 if (i
> AMDGPU_DM_MAX_DISPLAY_INDEX
) {
1470 "KMS: Cannot support more than %d display indexes\n",
1471 AMDGPU_DM_MAX_DISPLAY_INDEX
);
1475 aconnector
= kzalloc(sizeof(*aconnector
), GFP_KERNEL
);
1479 aencoder
= kzalloc(sizeof(*aencoder
), GFP_KERNEL
);
1483 if (amdgpu_dm_encoder_init(dm
->ddev
, aencoder
, i
)) {
1484 DRM_ERROR("KMS: Failed to initialize encoder\n");
1488 if (amdgpu_dm_connector_init(dm
, aconnector
, i
, aencoder
)) {
1489 DRM_ERROR("KMS: Failed to initialize connector\n");
1493 link
= dc_get_link_at_index(dm
->dc
, i
);
1495 if (dc_link_detect(link
, DETECT_REASON_BOOT
)) {
1496 amdgpu_dm_update_connector_after_detect(aconnector
);
1497 register_backlight_device(dm
, link
);
1503 /* Software is initialized. Now we can register interrupt handlers. */
1504 switch (adev
->asic_type
) {
1514 case CHIP_POLARIS11
:
1515 case CHIP_POLARIS10
:
1516 case CHIP_POLARIS12
:
1521 if (dce110_register_irq_handlers(dm
->adev
)) {
1522 DRM_ERROR("DM: Failed to initialize IRQ\n");
1526 #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
1528 if (dcn10_register_irq_handlers(dm
->adev
)) {
1529 DRM_ERROR("DM: Failed to initialize IRQ\n");
1533 * Temporary disable until pplib/smu interaction is implemented
1535 dm
->dc
->debug
.disable_stutter
= true;
1539 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev
->asic_type
);
1547 for (i
= 0; i
< dm
->dc
->caps
.max_planes
; i
++)
1548 kfree(mode_info
->planes
[i
]);
1552 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager
*dm
)
1554 drm_mode_config_cleanup(dm
->ddev
);
1558 /******************************************************************************
1559 * amdgpu_display_funcs functions
1560 *****************************************************************************/
1563 * dm_bandwidth_update - program display watermarks
1565 * @adev: amdgpu_device pointer
1567 * Calculate and program the display watermarks and line buffer allocation.
1569 static void dm_bandwidth_update(struct amdgpu_device
*adev
)
1571 /* TODO: implement later */
1574 static void dm_set_backlight_level(struct amdgpu_encoder
*amdgpu_encoder
,
1577 /* TODO: translate amdgpu_encoder to display_index and call DAL */
1580 static u8
dm_get_backlight_level(struct amdgpu_encoder
*amdgpu_encoder
)
1582 /* TODO: translate amdgpu_encoder to display_index and call DAL */
1586 static int amdgpu_notify_freesync(struct drm_device
*dev
, void *data
,
1587 struct drm_file
*filp
)
1589 struct mod_freesync_params freesync_params
;
1590 uint8_t num_streams
;
1593 struct amdgpu_device
*adev
= dev
->dev_private
;
1596 /* Get freesync enable flag from DRM */
1598 num_streams
= dc_get_current_stream_count(adev
->dm
.dc
);
1600 for (i
= 0; i
< num_streams
; i
++) {
1601 struct dc_stream_state
*stream
;
1602 stream
= dc_get_stream_at_index(adev
->dm
.dc
, i
);
1604 mod_freesync_update_state(adev
->dm
.freesync_module
,
1605 &stream
, 1, &freesync_params
);
1611 static const struct amdgpu_display_funcs dm_display_funcs
= {
1612 .bandwidth_update
= dm_bandwidth_update
, /* called unconditionally */
1613 .vblank_get_counter
= dm_vblank_get_counter
,/* called unconditionally */
1614 .backlight_set_level
=
1615 dm_set_backlight_level
,/* called unconditionally */
1616 .backlight_get_level
=
1617 dm_get_backlight_level
,/* called unconditionally */
1618 .hpd_sense
= NULL
,/* called unconditionally */
1619 .hpd_set_polarity
= NULL
, /* called unconditionally */
1620 .hpd_get_gpio_reg
= NULL
, /* VBIOS parsing. DAL does it. */
1621 .page_flip_get_scanoutpos
=
1622 dm_crtc_get_scanoutpos
,/* called unconditionally */
1623 .add_encoder
= NULL
, /* VBIOS parsing. DAL does it. */
1624 .add_connector
= NULL
, /* VBIOS parsing. DAL does it. */
1625 .notify_freesync
= amdgpu_notify_freesync
,
1629 #if defined(CONFIG_DEBUG_KERNEL_DC)
1631 static ssize_t
s3_debug_store(struct device
*device
,
1632 struct device_attribute
*attr
,
1638 struct pci_dev
*pdev
= to_pci_dev(device
);
1639 struct drm_device
*drm_dev
= pci_get_drvdata(pdev
);
1640 struct amdgpu_device
*adev
= drm_dev
->dev_private
;
1642 ret
= kstrtoint(buf
, 0, &s3_state
);
1647 drm_kms_helper_hotplug_event(adev
->ddev
);
1652 return ret
== 0 ? count
: 0;
1655 DEVICE_ATTR_WO(s3_debug
);
1659 static int dm_early_init(void *handle
)
1661 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1663 switch (adev
->asic_type
) {
1666 adev
->mode_info
.num_crtc
= 6;
1667 adev
->mode_info
.num_hpd
= 6;
1668 adev
->mode_info
.num_dig
= 6;
1669 adev
->mode_info
.plane_type
= dm_plane_type_default
;
1672 adev
->mode_info
.num_crtc
= 4;
1673 adev
->mode_info
.num_hpd
= 6;
1674 adev
->mode_info
.num_dig
= 7;
1675 adev
->mode_info
.plane_type
= dm_plane_type_default
;
1679 adev
->mode_info
.num_crtc
= 2;
1680 adev
->mode_info
.num_hpd
= 6;
1681 adev
->mode_info
.num_dig
= 6;
1682 adev
->mode_info
.plane_type
= dm_plane_type_default
;
1686 adev
->mode_info
.num_crtc
= 6;
1687 adev
->mode_info
.num_hpd
= 6;
1688 adev
->mode_info
.num_dig
= 7;
1689 adev
->mode_info
.plane_type
= dm_plane_type_default
;
1692 adev
->mode_info
.num_crtc
= 3;
1693 adev
->mode_info
.num_hpd
= 6;
1694 adev
->mode_info
.num_dig
= 9;
1695 adev
->mode_info
.plane_type
= dm_plane_type_carizzo
;
1698 adev
->mode_info
.num_crtc
= 2;
1699 adev
->mode_info
.num_hpd
= 6;
1700 adev
->mode_info
.num_dig
= 9;
1701 adev
->mode_info
.plane_type
= dm_plane_type_stoney
;
1703 case CHIP_POLARIS11
:
1704 case CHIP_POLARIS12
:
1705 adev
->mode_info
.num_crtc
= 5;
1706 adev
->mode_info
.num_hpd
= 5;
1707 adev
->mode_info
.num_dig
= 5;
1708 adev
->mode_info
.plane_type
= dm_plane_type_default
;
1710 case CHIP_POLARIS10
:
1712 adev
->mode_info
.num_crtc
= 6;
1713 adev
->mode_info
.num_hpd
= 6;
1714 adev
->mode_info
.num_dig
= 6;
1715 adev
->mode_info
.plane_type
= dm_plane_type_default
;
1720 adev
->mode_info
.num_crtc
= 6;
1721 adev
->mode_info
.num_hpd
= 6;
1722 adev
->mode_info
.num_dig
= 6;
1723 adev
->mode_info
.plane_type
= dm_plane_type_default
;
1725 #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
1727 adev
->mode_info
.num_crtc
= 4;
1728 adev
->mode_info
.num_hpd
= 4;
1729 adev
->mode_info
.num_dig
= 4;
1730 adev
->mode_info
.plane_type
= dm_plane_type_default
;
1734 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev
->asic_type
);
1738 amdgpu_dm_set_irq_funcs(adev
);
1740 if (adev
->mode_info
.funcs
== NULL
)
1741 adev
->mode_info
.funcs
= &dm_display_funcs
;
1743 /* Note: Do NOT change adev->audio_endpt_rreg and
1744 * adev->audio_endpt_wreg because they are initialised in
1745 * amdgpu_device_init() */
1746 #if defined(CONFIG_DEBUG_KERNEL_DC)
1749 &dev_attr_s3_debug
);
1755 static bool modeset_required(struct drm_crtc_state
*crtc_state
,
1756 struct dc_stream_state
*new_stream
,
1757 struct dc_stream_state
*old_stream
)
1759 if (!drm_atomic_crtc_needs_modeset(crtc_state
))
1762 if (!crtc_state
->enable
)
1765 return crtc_state
->active
;
1768 static bool modereset_required(struct drm_crtc_state
*crtc_state
)
1770 if (!drm_atomic_crtc_needs_modeset(crtc_state
))
1773 return !crtc_state
->enable
|| !crtc_state
->active
;
1776 static void amdgpu_dm_encoder_destroy(struct drm_encoder
*encoder
)
1778 drm_encoder_cleanup(encoder
);
1782 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs
= {
1783 .destroy
= amdgpu_dm_encoder_destroy
,
1786 static bool fill_rects_from_plane_state(const struct drm_plane_state
*state
,
1787 struct dc_plane_state
*plane_state
)
1789 plane_state
->src_rect
.x
= state
->src_x
>> 16;
1790 plane_state
->src_rect
.y
= state
->src_y
>> 16;
1791 /*we ignore for now mantissa and do not to deal with floating pixels :(*/
1792 plane_state
->src_rect
.width
= state
->src_w
>> 16;
1794 if (plane_state
->src_rect
.width
== 0)
1797 plane_state
->src_rect
.height
= state
->src_h
>> 16;
1798 if (plane_state
->src_rect
.height
== 0)
1801 plane_state
->dst_rect
.x
= state
->crtc_x
;
1802 plane_state
->dst_rect
.y
= state
->crtc_y
;
1804 if (state
->crtc_w
== 0)
1807 plane_state
->dst_rect
.width
= state
->crtc_w
;
1809 if (state
->crtc_h
== 0)
1812 plane_state
->dst_rect
.height
= state
->crtc_h
;
1814 plane_state
->clip_rect
= plane_state
->dst_rect
;
1816 switch (state
->rotation
& DRM_MODE_ROTATE_MASK
) {
1817 case DRM_MODE_ROTATE_0
:
1818 plane_state
->rotation
= ROTATION_ANGLE_0
;
1820 case DRM_MODE_ROTATE_90
:
1821 plane_state
->rotation
= ROTATION_ANGLE_90
;
1823 case DRM_MODE_ROTATE_180
:
1824 plane_state
->rotation
= ROTATION_ANGLE_180
;
1826 case DRM_MODE_ROTATE_270
:
1827 plane_state
->rotation
= ROTATION_ANGLE_270
;
1830 plane_state
->rotation
= ROTATION_ANGLE_0
;
1836 static int get_fb_info(const struct amdgpu_framebuffer
*amdgpu_fb
,
1837 uint64_t *tiling_flags
)
1839 struct amdgpu_bo
*rbo
= gem_to_amdgpu_bo(amdgpu_fb
->base
.obj
[0]);
1840 int r
= amdgpu_bo_reserve(rbo
, false);
1843 // Don't show error msg. when return -ERESTARTSYS
1844 if (r
!= -ERESTARTSYS
)
1845 DRM_ERROR("Unable to reserve buffer: %d\n", r
);
1850 amdgpu_bo_get_tiling_flags(rbo
, tiling_flags
);
1852 amdgpu_bo_unreserve(rbo
);
1857 static int fill_plane_attributes_from_fb(struct amdgpu_device
*adev
,
1858 struct dc_plane_state
*plane_state
,
1859 const struct amdgpu_framebuffer
*amdgpu_fb
)
1861 uint64_t tiling_flags
;
1862 unsigned int awidth
;
1863 const struct drm_framebuffer
*fb
= &amdgpu_fb
->base
;
1865 struct drm_format_name_buf format_name
;
1874 switch (fb
->format
->format
) {
1876 plane_state
->format
= SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS
;
1878 case DRM_FORMAT_RGB565
:
1879 plane_state
->format
= SURFACE_PIXEL_FORMAT_GRPH_RGB565
;
1881 case DRM_FORMAT_XRGB8888
:
1882 case DRM_FORMAT_ARGB8888
:
1883 plane_state
->format
= SURFACE_PIXEL_FORMAT_GRPH_ARGB8888
;
1885 case DRM_FORMAT_XRGB2101010
:
1886 case DRM_FORMAT_ARGB2101010
:
1887 plane_state
->format
= SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010
;
1889 case DRM_FORMAT_XBGR2101010
:
1890 case DRM_FORMAT_ABGR2101010
:
1891 plane_state
->format
= SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010
;
1893 case DRM_FORMAT_NV21
:
1894 plane_state
->format
= SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr
;
1896 case DRM_FORMAT_NV12
:
1897 plane_state
->format
= SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb
;
1900 DRM_ERROR("Unsupported screen format %s\n",
1901 drm_get_format_name(fb
->format
->format
, &format_name
));
1905 if (plane_state
->format
< SURFACE_PIXEL_FORMAT_VIDEO_BEGIN
) {
1906 plane_state
->address
.type
= PLN_ADDR_TYPE_GRAPHICS
;
1907 plane_state
->plane_size
.grph
.surface_size
.x
= 0;
1908 plane_state
->plane_size
.grph
.surface_size
.y
= 0;
1909 plane_state
->plane_size
.grph
.surface_size
.width
= fb
->width
;
1910 plane_state
->plane_size
.grph
.surface_size
.height
= fb
->height
;
1911 plane_state
->plane_size
.grph
.surface_pitch
=
1912 fb
->pitches
[0] / fb
->format
->cpp
[0];
1913 /* TODO: unhardcode */
1914 plane_state
->color_space
= COLOR_SPACE_SRGB
;
1917 awidth
= ALIGN(fb
->width
, 64);
1918 plane_state
->address
.type
= PLN_ADDR_TYPE_VIDEO_PROGRESSIVE
;
1919 plane_state
->plane_size
.video
.luma_size
.x
= 0;
1920 plane_state
->plane_size
.video
.luma_size
.y
= 0;
1921 plane_state
->plane_size
.video
.luma_size
.width
= awidth
;
1922 plane_state
->plane_size
.video
.luma_size
.height
= fb
->height
;
1923 /* TODO: unhardcode */
1924 plane_state
->plane_size
.video
.luma_pitch
= awidth
;
1926 plane_state
->plane_size
.video
.chroma_size
.x
= 0;
1927 plane_state
->plane_size
.video
.chroma_size
.y
= 0;
1928 plane_state
->plane_size
.video
.chroma_size
.width
= awidth
;
1929 plane_state
->plane_size
.video
.chroma_size
.height
= fb
->height
;
1930 plane_state
->plane_size
.video
.chroma_pitch
= awidth
/ 2;
1932 /* TODO: unhardcode */
1933 plane_state
->color_space
= COLOR_SPACE_YCBCR709
;
1936 memset(&plane_state
->tiling_info
, 0, sizeof(plane_state
->tiling_info
));
1938 /* Fill GFX8 params */
1939 if (AMDGPU_TILING_GET(tiling_flags
, ARRAY_MODE
) == DC_ARRAY_2D_TILED_THIN1
) {
1940 unsigned int bankw
, bankh
, mtaspect
, tile_split
, num_banks
;
1942 bankw
= AMDGPU_TILING_GET(tiling_flags
, BANK_WIDTH
);
1943 bankh
= AMDGPU_TILING_GET(tiling_flags
, BANK_HEIGHT
);
1944 mtaspect
= AMDGPU_TILING_GET(tiling_flags
, MACRO_TILE_ASPECT
);
1945 tile_split
= AMDGPU_TILING_GET(tiling_flags
, TILE_SPLIT
);
1946 num_banks
= AMDGPU_TILING_GET(tiling_flags
, NUM_BANKS
);
1948 /* XXX fix me for VI */
1949 plane_state
->tiling_info
.gfx8
.num_banks
= num_banks
;
1950 plane_state
->tiling_info
.gfx8
.array_mode
=
1951 DC_ARRAY_2D_TILED_THIN1
;
1952 plane_state
->tiling_info
.gfx8
.tile_split
= tile_split
;
1953 plane_state
->tiling_info
.gfx8
.bank_width
= bankw
;
1954 plane_state
->tiling_info
.gfx8
.bank_height
= bankh
;
1955 plane_state
->tiling_info
.gfx8
.tile_aspect
= mtaspect
;
1956 plane_state
->tiling_info
.gfx8
.tile_mode
=
1957 DC_ADDR_SURF_MICRO_TILING_DISPLAY
;
1958 } else if (AMDGPU_TILING_GET(tiling_flags
, ARRAY_MODE
)
1959 == DC_ARRAY_1D_TILED_THIN1
) {
1960 plane_state
->tiling_info
.gfx8
.array_mode
= DC_ARRAY_1D_TILED_THIN1
;
1963 plane_state
->tiling_info
.gfx8
.pipe_config
=
1964 AMDGPU_TILING_GET(tiling_flags
, PIPE_CONFIG
);
1966 if (adev
->asic_type
== CHIP_VEGA10
||
1967 adev
->asic_type
== CHIP_VEGA12
||
1968 adev
->asic_type
== CHIP_VEGA20
||
1969 adev
->asic_type
== CHIP_RAVEN
) {
1970 /* Fill GFX9 params */
1971 plane_state
->tiling_info
.gfx9
.num_pipes
=
1972 adev
->gfx
.config
.gb_addr_config_fields
.num_pipes
;
1973 plane_state
->tiling_info
.gfx9
.num_banks
=
1974 adev
->gfx
.config
.gb_addr_config_fields
.num_banks
;
1975 plane_state
->tiling_info
.gfx9
.pipe_interleave
=
1976 adev
->gfx
.config
.gb_addr_config_fields
.pipe_interleave_size
;
1977 plane_state
->tiling_info
.gfx9
.num_shader_engines
=
1978 adev
->gfx
.config
.gb_addr_config_fields
.num_se
;
1979 plane_state
->tiling_info
.gfx9
.max_compressed_frags
=
1980 adev
->gfx
.config
.gb_addr_config_fields
.max_compress_frags
;
1981 plane_state
->tiling_info
.gfx9
.num_rb_per_se
=
1982 adev
->gfx
.config
.gb_addr_config_fields
.num_rb_per_se
;
1983 plane_state
->tiling_info
.gfx9
.swizzle
=
1984 AMDGPU_TILING_GET(tiling_flags
, SWIZZLE_MODE
);
1985 plane_state
->tiling_info
.gfx9
.shaderEnable
= 1;
1988 plane_state
->visible
= true;
1989 plane_state
->scaling_quality
.h_taps_c
= 0;
1990 plane_state
->scaling_quality
.v_taps_c
= 0;
1992 /* is this needed? is plane_state zeroed at allocation? */
1993 plane_state
->scaling_quality
.h_taps
= 0;
1994 plane_state
->scaling_quality
.v_taps
= 0;
1995 plane_state
->stereo_format
= PLANE_STEREO_FORMAT_NONE
;
2001 static int fill_plane_attributes(struct amdgpu_device
*adev
,
2002 struct dc_plane_state
*dc_plane_state
,
2003 struct drm_plane_state
*plane_state
,
2004 struct drm_crtc_state
*crtc_state
)
2006 const struct amdgpu_framebuffer
*amdgpu_fb
=
2007 to_amdgpu_framebuffer(plane_state
->fb
);
2008 const struct drm_crtc
*crtc
= plane_state
->crtc
;
2011 if (!fill_rects_from_plane_state(plane_state
, dc_plane_state
))
2014 ret
= fill_plane_attributes_from_fb(
2015 crtc
->dev
->dev_private
,
2023 * Always set input transfer function, since plane state is refreshed
2026 ret
= amdgpu_dm_set_degamma_lut(crtc_state
, dc_plane_state
);
2028 dc_transfer_func_release(dc_plane_state
->in_transfer_func
);
2029 dc_plane_state
->in_transfer_func
= NULL
;
2035 /*****************************************************************************/
2037 static void update_stream_scaling_settings(const struct drm_display_mode
*mode
,
2038 const struct dm_connector_state
*dm_state
,
2039 struct dc_stream_state
*stream
)
2041 enum amdgpu_rmx_type rmx_type
;
2043 struct rect src
= { 0 }; /* viewport in composition space*/
2044 struct rect dst
= { 0 }; /* stream addressable area */
2046 /* no mode. nothing to be done */
2050 /* Full screen scaling by default */
2051 src
.width
= mode
->hdisplay
;
2052 src
.height
= mode
->vdisplay
;
2053 dst
.width
= stream
->timing
.h_addressable
;
2054 dst
.height
= stream
->timing
.v_addressable
;
2057 rmx_type
= dm_state
->scaling
;
2058 if (rmx_type
== RMX_ASPECT
|| rmx_type
== RMX_OFF
) {
2059 if (src
.width
* dst
.height
<
2060 src
.height
* dst
.width
) {
2061 /* height needs less upscaling/more downscaling */
2062 dst
.width
= src
.width
*
2063 dst
.height
/ src
.height
;
2065 /* width needs less upscaling/more downscaling */
2066 dst
.height
= src
.height
*
2067 dst
.width
/ src
.width
;
2069 } else if (rmx_type
== RMX_CENTER
) {
2073 dst
.x
= (stream
->timing
.h_addressable
- dst
.width
) / 2;
2074 dst
.y
= (stream
->timing
.v_addressable
- dst
.height
) / 2;
2076 if (dm_state
->underscan_enable
) {
2077 dst
.x
+= dm_state
->underscan_hborder
/ 2;
2078 dst
.y
+= dm_state
->underscan_vborder
/ 2;
2079 dst
.width
-= dm_state
->underscan_hborder
;
2080 dst
.height
-= dm_state
->underscan_vborder
;
2087 DRM_DEBUG_DRIVER("Destination Rectangle x:%d y:%d width:%d height:%d\n",
2088 dst
.x
, dst
.y
, dst
.width
, dst
.height
);
2092 static enum dc_color_depth
2093 convert_color_depth_from_display_info(const struct drm_connector
*connector
)
2095 uint32_t bpc
= connector
->display_info
.bpc
;
2099 /* Temporary Work around, DRM don't parse color depth for
2100 * EDID revision before 1.4
2101 * TODO: Fix edid parsing
2103 return COLOR_DEPTH_888
;
2105 return COLOR_DEPTH_666
;
2107 return COLOR_DEPTH_888
;
2109 return COLOR_DEPTH_101010
;
2111 return COLOR_DEPTH_121212
;
2113 return COLOR_DEPTH_141414
;
2115 return COLOR_DEPTH_161616
;
2117 return COLOR_DEPTH_UNDEFINED
;
2121 static enum dc_aspect_ratio
2122 get_aspect_ratio(const struct drm_display_mode
*mode_in
)
2124 int32_t width
= mode_in
->crtc_hdisplay
* 9;
2125 int32_t height
= mode_in
->crtc_vdisplay
* 16;
2127 if ((width
- height
) < 10 && (width
- height
) > -10)
2128 return ASPECT_RATIO_16_9
;
2130 return ASPECT_RATIO_4_3
;
2133 static enum dc_color_space
2134 get_output_color_space(const struct dc_crtc_timing
*dc_crtc_timing
)
2136 enum dc_color_space color_space
= COLOR_SPACE_SRGB
;
2138 switch (dc_crtc_timing
->pixel_encoding
) {
2139 case PIXEL_ENCODING_YCBCR422
:
2140 case PIXEL_ENCODING_YCBCR444
:
2141 case PIXEL_ENCODING_YCBCR420
:
2144 * 27030khz is the separation point between HDTV and SDTV
2145 * according to HDMI spec, we use YCbCr709 and YCbCr601
2148 if (dc_crtc_timing
->pix_clk_khz
> 27030) {
2149 if (dc_crtc_timing
->flags
.Y_ONLY
)
2151 COLOR_SPACE_YCBCR709_LIMITED
;
2153 color_space
= COLOR_SPACE_YCBCR709
;
2155 if (dc_crtc_timing
->flags
.Y_ONLY
)
2157 COLOR_SPACE_YCBCR601_LIMITED
;
2159 color_space
= COLOR_SPACE_YCBCR601
;
2164 case PIXEL_ENCODING_RGB
:
2165 color_space
= COLOR_SPACE_SRGB
;
2176 /*****************************************************************************/
2179 fill_stream_properties_from_drm_display_mode(struct dc_stream_state
*stream
,
2180 const struct drm_display_mode
*mode_in
,
2181 const struct drm_connector
*connector
)
2183 struct dc_crtc_timing
*timing_out
= &stream
->timing
;
2185 memset(timing_out
, 0, sizeof(struct dc_crtc_timing
));
2187 timing_out
->h_border_left
= 0;
2188 timing_out
->h_border_right
= 0;
2189 timing_out
->v_border_top
= 0;
2190 timing_out
->v_border_bottom
= 0;
2191 /* TODO: un-hardcode */
2193 if ((connector
->display_info
.color_formats
& DRM_COLOR_FORMAT_YCRCB444
)
2194 && stream
->sink
->sink_signal
== SIGNAL_TYPE_HDMI_TYPE_A
)
2195 timing_out
->pixel_encoding
= PIXEL_ENCODING_YCBCR444
;
2197 timing_out
->pixel_encoding
= PIXEL_ENCODING_RGB
;
2199 timing_out
->timing_3d_format
= TIMING_3D_FORMAT_NONE
;
2200 timing_out
->display_color_depth
= convert_color_depth_from_display_info(
2202 timing_out
->scan_type
= SCANNING_TYPE_NODATA
;
2203 timing_out
->hdmi_vic
= 0;
2204 timing_out
->vic
= drm_match_cea_mode(mode_in
);
2206 timing_out
->h_addressable
= mode_in
->crtc_hdisplay
;
2207 timing_out
->h_total
= mode_in
->crtc_htotal
;
2208 timing_out
->h_sync_width
=
2209 mode_in
->crtc_hsync_end
- mode_in
->crtc_hsync_start
;
2210 timing_out
->h_front_porch
=
2211 mode_in
->crtc_hsync_start
- mode_in
->crtc_hdisplay
;
2212 timing_out
->v_total
= mode_in
->crtc_vtotal
;
2213 timing_out
->v_addressable
= mode_in
->crtc_vdisplay
;
2214 timing_out
->v_front_porch
=
2215 mode_in
->crtc_vsync_start
- mode_in
->crtc_vdisplay
;
2216 timing_out
->v_sync_width
=
2217 mode_in
->crtc_vsync_end
- mode_in
->crtc_vsync_start
;
2218 timing_out
->pix_clk_khz
= mode_in
->crtc_clock
;
2219 timing_out
->aspect_ratio
= get_aspect_ratio(mode_in
);
2220 if (mode_in
->flags
& DRM_MODE_FLAG_PHSYNC
)
2221 timing_out
->flags
.HSYNC_POSITIVE_POLARITY
= 1;
2222 if (mode_in
->flags
& DRM_MODE_FLAG_PVSYNC
)
2223 timing_out
->flags
.VSYNC_POSITIVE_POLARITY
= 1;
2225 stream
->output_color_space
= get_output_color_space(timing_out
);
2227 stream
->out_transfer_func
->type
= TF_TYPE_PREDEFINED
;
2228 stream
->out_transfer_func
->tf
= TRANSFER_FUNCTION_SRGB
;
2231 static void fill_audio_info(struct audio_info
*audio_info
,
2232 const struct drm_connector
*drm_connector
,
2233 const struct dc_sink
*dc_sink
)
2236 int cea_revision
= 0;
2237 const struct dc_edid_caps
*edid_caps
= &dc_sink
->edid_caps
;
2239 audio_info
->manufacture_id
= edid_caps
->manufacturer_id
;
2240 audio_info
->product_id
= edid_caps
->product_id
;
2242 cea_revision
= drm_connector
->display_info
.cea_rev
;
2244 strncpy(audio_info
->display_name
,
2245 edid_caps
->display_name
,
2246 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS
- 1);
2248 if (cea_revision
>= 3) {
2249 audio_info
->mode_count
= edid_caps
->audio_mode_count
;
2251 for (i
= 0; i
< audio_info
->mode_count
; ++i
) {
2252 audio_info
->modes
[i
].format_code
=
2253 (enum audio_format_code
)
2254 (edid_caps
->audio_modes
[i
].format_code
);
2255 audio_info
->modes
[i
].channel_count
=
2256 edid_caps
->audio_modes
[i
].channel_count
;
2257 audio_info
->modes
[i
].sample_rates
.all
=
2258 edid_caps
->audio_modes
[i
].sample_rate
;
2259 audio_info
->modes
[i
].sample_size
=
2260 edid_caps
->audio_modes
[i
].sample_size
;
2264 audio_info
->flags
.all
= edid_caps
->speaker_flags
;
2266 /* TODO: We only check for the progressive mode, check for interlace mode too */
2267 if (drm_connector
->latency_present
[0]) {
2268 audio_info
->video_latency
= drm_connector
->video_latency
[0];
2269 audio_info
->audio_latency
= drm_connector
->audio_latency
[0];
2272 /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
2277 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode
*src_mode
,
2278 struct drm_display_mode
*dst_mode
)
2280 dst_mode
->crtc_hdisplay
= src_mode
->crtc_hdisplay
;
2281 dst_mode
->crtc_vdisplay
= src_mode
->crtc_vdisplay
;
2282 dst_mode
->crtc_clock
= src_mode
->crtc_clock
;
2283 dst_mode
->crtc_hblank_start
= src_mode
->crtc_hblank_start
;
2284 dst_mode
->crtc_hblank_end
= src_mode
->crtc_hblank_end
;
2285 dst_mode
->crtc_hsync_start
= src_mode
->crtc_hsync_start
;
2286 dst_mode
->crtc_hsync_end
= src_mode
->crtc_hsync_end
;
2287 dst_mode
->crtc_htotal
= src_mode
->crtc_htotal
;
2288 dst_mode
->crtc_hskew
= src_mode
->crtc_hskew
;
2289 dst_mode
->crtc_vblank_start
= src_mode
->crtc_vblank_start
;
2290 dst_mode
->crtc_vblank_end
= src_mode
->crtc_vblank_end
;
2291 dst_mode
->crtc_vsync_start
= src_mode
->crtc_vsync_start
;
2292 dst_mode
->crtc_vsync_end
= src_mode
->crtc_vsync_end
;
2293 dst_mode
->crtc_vtotal
= src_mode
->crtc_vtotal
;
2297 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode
*drm_mode
,
2298 const struct drm_display_mode
*native_mode
,
2301 if (scale_enabled
) {
2302 copy_crtc_timing_for_drm_display_mode(native_mode
, drm_mode
);
2303 } else if (native_mode
->clock
== drm_mode
->clock
&&
2304 native_mode
->htotal
== drm_mode
->htotal
&&
2305 native_mode
->vtotal
== drm_mode
->vtotal
) {
2306 copy_crtc_timing_for_drm_display_mode(native_mode
, drm_mode
);
2308 /* no scaling nor amdgpu inserted, no need to patch */
2312 static struct dc_sink
*
2313 create_fake_sink(struct amdgpu_dm_connector
*aconnector
)
2315 struct dc_sink_init_data sink_init_data
= { 0 };
2316 struct dc_sink
*sink
= NULL
;
2317 sink_init_data
.link
= aconnector
->dc_link
;
2318 sink_init_data
.sink_signal
= aconnector
->dc_link
->connector_signal
;
2320 sink
= dc_sink_create(&sink_init_data
);
2322 DRM_ERROR("Failed to create sink!\n");
2325 sink
->sink_signal
= SIGNAL_TYPE_VIRTUAL
;
2330 static void set_multisync_trigger_params(
2331 struct dc_stream_state
*stream
)
2333 if (stream
->triggered_crtc_reset
.enabled
) {
2334 stream
->triggered_crtc_reset
.event
= CRTC_EVENT_VSYNC_RISING
;
2335 stream
->triggered_crtc_reset
.delay
= TRIGGER_DELAY_NEXT_LINE
;
2339 static void set_master_stream(struct dc_stream_state
*stream_set
[],
2342 int j
, highest_rfr
= 0, master_stream
= 0;
2344 for (j
= 0; j
< stream_count
; j
++) {
2345 if (stream_set
[j
] && stream_set
[j
]->triggered_crtc_reset
.enabled
) {
2346 int refresh_rate
= 0;
2348 refresh_rate
= (stream_set
[j
]->timing
.pix_clk_khz
*1000)/
2349 (stream_set
[j
]->timing
.h_total
*stream_set
[j
]->timing
.v_total
);
2350 if (refresh_rate
> highest_rfr
) {
2351 highest_rfr
= refresh_rate
;
2356 for (j
= 0; j
< stream_count
; j
++) {
2358 stream_set
[j
]->triggered_crtc_reset
.event_source
= stream_set
[master_stream
];
2362 static void dm_enable_per_frame_crtc_master_sync(struct dc_state
*context
)
2366 if (context
->stream_count
< 2)
2368 for (i
= 0; i
< context
->stream_count
; i
++) {
2369 if (!context
->streams
[i
])
2371 /* TODO: add a function to read AMD VSDB bits and will set
2372 * crtc_sync_master.multi_sync_enabled flag
2373 * For now its set to false
2375 set_multisync_trigger_params(context
->streams
[i
]);
2377 set_master_stream(context
->streams
, context
->stream_count
);
2380 static struct dc_stream_state
*
2381 create_stream_for_sink(struct amdgpu_dm_connector
*aconnector
,
2382 const struct drm_display_mode
*drm_mode
,
2383 const struct dm_connector_state
*dm_state
)
2385 struct drm_display_mode
*preferred_mode
= NULL
;
2386 struct drm_connector
*drm_connector
;
2387 struct dc_stream_state
*stream
= NULL
;
2388 struct drm_display_mode mode
= *drm_mode
;
2389 bool native_mode_found
= false;
2390 struct dc_sink
*sink
= NULL
;
2391 if (aconnector
== NULL
) {
2392 DRM_ERROR("aconnector is NULL!\n");
2396 drm_connector
= &aconnector
->base
;
2398 if (!aconnector
->dc_sink
) {
2400 * Create dc_sink when necessary to MST
2401 * Don't apply fake_sink to MST
2403 if (aconnector
->mst_port
) {
2404 dm_dp_mst_dc_sink_create(drm_connector
);
2408 sink
= create_fake_sink(aconnector
);
2412 sink
= aconnector
->dc_sink
;
2415 stream
= dc_create_stream_for_sink(sink
);
2417 if (stream
== NULL
) {
2418 DRM_ERROR("Failed to create stream for sink!\n");
2422 list_for_each_entry(preferred_mode
, &aconnector
->base
.modes
, head
) {
2423 /* Search for preferred mode */
2424 if (preferred_mode
->type
& DRM_MODE_TYPE_PREFERRED
) {
2425 native_mode_found
= true;
2429 if (!native_mode_found
)
2430 preferred_mode
= list_first_entry_or_null(
2431 &aconnector
->base
.modes
,
2432 struct drm_display_mode
,
2435 if (preferred_mode
== NULL
) {
2436 /* This may not be an error, the use case is when we we have no
2437 * usermode calls to reset and set mode upon hotplug. In this
2438 * case, we call set mode ourselves to restore the previous mode
2439 * and the modelist may not be filled in in time.
2441 DRM_DEBUG_DRIVER("No preferred mode found\n");
2443 decide_crtc_timing_for_drm_display_mode(
2444 &mode
, preferred_mode
,
2445 dm_state
? (dm_state
->scaling
!= RMX_OFF
) : false);
2449 drm_mode_set_crtcinfo(&mode
, 0);
2451 fill_stream_properties_from_drm_display_mode(stream
,
2452 &mode
, &aconnector
->base
);
2453 update_stream_scaling_settings(&mode
, dm_state
, stream
);
2456 &stream
->audio_info
,
2460 update_stream_signal(stream
);
2462 if (dm_state
&& dm_state
->freesync_capable
)
2463 stream
->ignore_msa_timing_param
= true;
2465 if (sink
&& sink
->sink_signal
== SIGNAL_TYPE_VIRTUAL
)
2466 dc_sink_release(sink
);
2471 static void amdgpu_dm_crtc_destroy(struct drm_crtc
*crtc
)
2473 drm_crtc_cleanup(crtc
);
2477 static void dm_crtc_destroy_state(struct drm_crtc
*crtc
,
2478 struct drm_crtc_state
*state
)
2480 struct dm_crtc_state
*cur
= to_dm_crtc_state(state
);
2482 /* TODO Destroy dc_stream objects are stream object is flattened */
2484 dc_stream_release(cur
->stream
);
2487 __drm_atomic_helper_crtc_destroy_state(state
);
2493 static void dm_crtc_reset_state(struct drm_crtc
*crtc
)
2495 struct dm_crtc_state
*state
;
2498 dm_crtc_destroy_state(crtc
, crtc
->state
);
2500 state
= kzalloc(sizeof(*state
), GFP_KERNEL
);
2501 if (WARN_ON(!state
))
2504 crtc
->state
= &state
->base
;
2505 crtc
->state
->crtc
= crtc
;
2509 static struct drm_crtc_state
*
2510 dm_crtc_duplicate_state(struct drm_crtc
*crtc
)
2512 struct dm_crtc_state
*state
, *cur
;
2514 cur
= to_dm_crtc_state(crtc
->state
);
2516 if (WARN_ON(!crtc
->state
))
2519 state
= kzalloc(sizeof(*state
), GFP_KERNEL
);
2523 __drm_atomic_helper_crtc_duplicate_state(crtc
, &state
->base
);
2526 state
->stream
= cur
->stream
;
2527 dc_stream_retain(state
->stream
);
2530 /* TODO Duplicate dc_stream after objects are stream object is flattened */
2532 return &state
->base
;
2536 static inline int dm_set_vblank(struct drm_crtc
*crtc
, bool enable
)
2538 enum dc_irq_source irq_source
;
2539 struct amdgpu_crtc
*acrtc
= to_amdgpu_crtc(crtc
);
2540 struct amdgpu_device
*adev
= crtc
->dev
->dev_private
;
2542 irq_source
= IRQ_TYPE_VBLANK
+ acrtc
->otg_inst
;
2543 return dc_interrupt_set(adev
->dm
.dc
, irq_source
, enable
) ? 0 : -EBUSY
;
2546 static int dm_enable_vblank(struct drm_crtc
*crtc
)
2548 return dm_set_vblank(crtc
, true);
2551 static void dm_disable_vblank(struct drm_crtc
*crtc
)
2553 dm_set_vblank(crtc
, false);
2556 /* Implemented only the options currently availible for the driver */
2557 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs
= {
2558 .reset
= dm_crtc_reset_state
,
2559 .destroy
= amdgpu_dm_crtc_destroy
,
2560 .gamma_set
= drm_atomic_helper_legacy_gamma_set
,
2561 .set_config
= drm_atomic_helper_set_config
,
2562 .page_flip
= drm_atomic_helper_page_flip
,
2563 .atomic_duplicate_state
= dm_crtc_duplicate_state
,
2564 .atomic_destroy_state
= dm_crtc_destroy_state
,
2565 .set_crc_source
= amdgpu_dm_crtc_set_crc_source
,
2566 .enable_vblank
= dm_enable_vblank
,
2567 .disable_vblank
= dm_disable_vblank
,
2570 static enum drm_connector_status
2571 amdgpu_dm_connector_detect(struct drm_connector
*connector
, bool force
)
2574 struct amdgpu_dm_connector
*aconnector
= to_amdgpu_dm_connector(connector
);
2577 * 1. This interface is NOT called in context of HPD irq.
2578 * 2. This interface *is called* in context of user-mode ioctl. Which
2579 * makes it a bad place for *any* MST-related activit. */
2581 if (aconnector
->base
.force
== DRM_FORCE_UNSPECIFIED
&&
2582 !aconnector
->fake_enable
)
2583 connected
= (aconnector
->dc_sink
!= NULL
);
2585 connected
= (aconnector
->base
.force
== DRM_FORCE_ON
);
2587 return (connected
? connector_status_connected
:
2588 connector_status_disconnected
);
2591 int amdgpu_dm_connector_atomic_set_property(struct drm_connector
*connector
,
2592 struct drm_connector_state
*connector_state
,
2593 struct drm_property
*property
,
2596 struct drm_device
*dev
= connector
->dev
;
2597 struct amdgpu_device
*adev
= dev
->dev_private
;
2598 struct dm_connector_state
*dm_old_state
=
2599 to_dm_connector_state(connector
->state
);
2600 struct dm_connector_state
*dm_new_state
=
2601 to_dm_connector_state(connector_state
);
2605 if (property
== dev
->mode_config
.scaling_mode_property
) {
2606 enum amdgpu_rmx_type rmx_type
;
2609 case DRM_MODE_SCALE_CENTER
:
2610 rmx_type
= RMX_CENTER
;
2612 case DRM_MODE_SCALE_ASPECT
:
2613 rmx_type
= RMX_ASPECT
;
2615 case DRM_MODE_SCALE_FULLSCREEN
:
2616 rmx_type
= RMX_FULL
;
2618 case DRM_MODE_SCALE_NONE
:
2624 if (dm_old_state
->scaling
== rmx_type
)
2627 dm_new_state
->scaling
= rmx_type
;
2629 } else if (property
== adev
->mode_info
.underscan_hborder_property
) {
2630 dm_new_state
->underscan_hborder
= val
;
2632 } else if (property
== adev
->mode_info
.underscan_vborder_property
) {
2633 dm_new_state
->underscan_vborder
= val
;
2635 } else if (property
== adev
->mode_info
.underscan_property
) {
2636 dm_new_state
->underscan_enable
= val
;
2643 int amdgpu_dm_connector_atomic_get_property(struct drm_connector
*connector
,
2644 const struct drm_connector_state
*state
,
2645 struct drm_property
*property
,
2648 struct drm_device
*dev
= connector
->dev
;
2649 struct amdgpu_device
*adev
= dev
->dev_private
;
2650 struct dm_connector_state
*dm_state
=
2651 to_dm_connector_state(state
);
2654 if (property
== dev
->mode_config
.scaling_mode_property
) {
2655 switch (dm_state
->scaling
) {
2657 *val
= DRM_MODE_SCALE_CENTER
;
2660 *val
= DRM_MODE_SCALE_ASPECT
;
2663 *val
= DRM_MODE_SCALE_FULLSCREEN
;
2667 *val
= DRM_MODE_SCALE_NONE
;
2671 } else if (property
== adev
->mode_info
.underscan_hborder_property
) {
2672 *val
= dm_state
->underscan_hborder
;
2674 } else if (property
== adev
->mode_info
.underscan_vborder_property
) {
2675 *val
= dm_state
->underscan_vborder
;
2677 } else if (property
== adev
->mode_info
.underscan_property
) {
2678 *val
= dm_state
->underscan_enable
;
2684 static void amdgpu_dm_connector_destroy(struct drm_connector
*connector
)
2686 struct amdgpu_dm_connector
*aconnector
= to_amdgpu_dm_connector(connector
);
2687 const struct dc_link
*link
= aconnector
->dc_link
;
2688 struct amdgpu_device
*adev
= connector
->dev
->dev_private
;
2689 struct amdgpu_display_manager
*dm
= &adev
->dm
;
2691 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
2692 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
2694 if ((link
->connector_signal
& (SIGNAL_TYPE_EDP
| SIGNAL_TYPE_LVDS
)) &&
2695 link
->type
!= dc_connection_none
&&
2696 dm
->backlight_dev
) {
2697 backlight_device_unregister(dm
->backlight_dev
);
2698 dm
->backlight_dev
= NULL
;
2701 drm_connector_unregister(connector
);
2702 drm_connector_cleanup(connector
);
2706 void amdgpu_dm_connector_funcs_reset(struct drm_connector
*connector
)
2708 struct dm_connector_state
*state
=
2709 to_dm_connector_state(connector
->state
);
2711 if (connector
->state
)
2712 __drm_atomic_helper_connector_destroy_state(connector
->state
);
2716 state
= kzalloc(sizeof(*state
), GFP_KERNEL
);
2719 state
->scaling
= RMX_OFF
;
2720 state
->underscan_enable
= false;
2721 state
->underscan_hborder
= 0;
2722 state
->underscan_vborder
= 0;
2724 __drm_atomic_helper_connector_reset(connector
, &state
->base
);
2728 struct drm_connector_state
*
2729 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector
*connector
)
2731 struct dm_connector_state
*state
=
2732 to_dm_connector_state(connector
->state
);
2734 struct dm_connector_state
*new_state
=
2735 kmemdup(state
, sizeof(*state
), GFP_KERNEL
);
2738 __drm_atomic_helper_connector_duplicate_state(connector
,
2740 return &new_state
->base
;
2746 static const struct drm_connector_funcs amdgpu_dm_connector_funcs
= {
2747 .reset
= amdgpu_dm_connector_funcs_reset
,
2748 .detect
= amdgpu_dm_connector_detect
,
2749 .fill_modes
= drm_helper_probe_single_connector_modes
,
2750 .destroy
= amdgpu_dm_connector_destroy
,
2751 .atomic_duplicate_state
= amdgpu_dm_connector_atomic_duplicate_state
,
2752 .atomic_destroy_state
= drm_atomic_helper_connector_destroy_state
,
2753 .atomic_set_property
= amdgpu_dm_connector_atomic_set_property
,
2754 .atomic_get_property
= amdgpu_dm_connector_atomic_get_property
2757 static struct drm_encoder
*best_encoder(struct drm_connector
*connector
)
2759 int enc_id
= connector
->encoder_ids
[0];
2760 struct drm_mode_object
*obj
;
2761 struct drm_encoder
*encoder
;
2763 DRM_DEBUG_DRIVER("Finding the best encoder\n");
2765 /* pick the encoder ids */
2767 obj
= drm_mode_object_find(connector
->dev
, NULL
, enc_id
, DRM_MODE_OBJECT_ENCODER
);
2769 DRM_ERROR("Couldn't find a matching encoder for our connector\n");
2772 encoder
= obj_to_encoder(obj
);
2775 DRM_ERROR("No encoder id\n");
2779 static int get_modes(struct drm_connector
*connector
)
2781 return amdgpu_dm_connector_get_modes(connector
);
2784 static void create_eml_sink(struct amdgpu_dm_connector
*aconnector
)
2786 struct dc_sink_init_data init_params
= {
2787 .link
= aconnector
->dc_link
,
2788 .sink_signal
= SIGNAL_TYPE_VIRTUAL
2792 if (!aconnector
->base
.edid_blob_ptr
) {
2793 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
2794 aconnector
->base
.name
);
2796 aconnector
->base
.force
= DRM_FORCE_OFF
;
2797 aconnector
->base
.override_edid
= false;
2801 edid
= (struct edid
*) aconnector
->base
.edid_blob_ptr
->data
;
2803 aconnector
->edid
= edid
;
2805 aconnector
->dc_em_sink
= dc_link_add_remote_sink(
2806 aconnector
->dc_link
,
2808 (edid
->extensions
+ 1) * EDID_LENGTH
,
2811 if (aconnector
->base
.force
== DRM_FORCE_ON
)
2812 aconnector
->dc_sink
= aconnector
->dc_link
->local_sink
?
2813 aconnector
->dc_link
->local_sink
:
2814 aconnector
->dc_em_sink
;
2817 static void handle_edid_mgmt(struct amdgpu_dm_connector
*aconnector
)
2819 struct dc_link
*link
= (struct dc_link
*)aconnector
->dc_link
;
2821 /* In case of headless boot with force on for DP managed connector
2822 * Those settings have to be != 0 to get initial modeset
2824 if (link
->connector_signal
== SIGNAL_TYPE_DISPLAY_PORT
) {
2825 link
->verified_link_cap
.lane_count
= LANE_COUNT_FOUR
;
2826 link
->verified_link_cap
.link_rate
= LINK_RATE_HIGH2
;
2830 aconnector
->base
.override_edid
= true;
2831 create_eml_sink(aconnector
);
2834 enum drm_mode_status
amdgpu_dm_connector_mode_valid(struct drm_connector
*connector
,
2835 struct drm_display_mode
*mode
)
2837 int result
= MODE_ERROR
;
2838 struct dc_sink
*dc_sink
;
2839 struct amdgpu_device
*adev
= connector
->dev
->dev_private
;
2840 /* TODO: Unhardcode stream count */
2841 struct dc_stream_state
*stream
;
2842 struct amdgpu_dm_connector
*aconnector
= to_amdgpu_dm_connector(connector
);
2843 enum dc_status dc_result
= DC_OK
;
2845 if ((mode
->flags
& DRM_MODE_FLAG_INTERLACE
) ||
2846 (mode
->flags
& DRM_MODE_FLAG_DBLSCAN
))
2849 /* Only run this the first time mode_valid is called to initilialize
2852 if (aconnector
->base
.force
!= DRM_FORCE_UNSPECIFIED
&&
2853 !aconnector
->dc_em_sink
)
2854 handle_edid_mgmt(aconnector
);
2856 dc_sink
= to_amdgpu_dm_connector(connector
)->dc_sink
;
2858 if (dc_sink
== NULL
) {
2859 DRM_ERROR("dc_sink is NULL!\n");
2863 stream
= create_stream_for_sink(aconnector
, mode
, NULL
);
2864 if (stream
== NULL
) {
2865 DRM_ERROR("Failed to create stream for sink!\n");
2869 dc_result
= dc_validate_stream(adev
->dm
.dc
, stream
);
2871 if (dc_result
== DC_OK
)
2874 DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d\n",
2880 dc_stream_release(stream
);
2883 /* TODO: error handling*/
2887 static const struct drm_connector_helper_funcs
2888 amdgpu_dm_connector_helper_funcs
= {
2890 * If hotplug a second bigger display in FB Con mode, bigger resolution
2891 * modes will be filtered by drm_mode_validate_size(), and those modes
2892 * is missing after user start lightdm. So we need to renew modes list.
2893 * in get_modes call back, not just return the modes count
2895 .get_modes
= get_modes
,
2896 .mode_valid
= amdgpu_dm_connector_mode_valid
,
2897 .best_encoder
= best_encoder
2900 static void dm_crtc_helper_disable(struct drm_crtc
*crtc
)
2904 static int dm_crtc_helper_atomic_check(struct drm_crtc
*crtc
,
2905 struct drm_crtc_state
*state
)
2907 struct amdgpu_device
*adev
= crtc
->dev
->dev_private
;
2908 struct dc
*dc
= adev
->dm
.dc
;
2909 struct dm_crtc_state
*dm_crtc_state
= to_dm_crtc_state(state
);
2912 if (unlikely(!dm_crtc_state
->stream
&&
2913 modeset_required(state
, NULL
, dm_crtc_state
->stream
))) {
2918 /* In some use cases, like reset, no stream is attached */
2919 if (!dm_crtc_state
->stream
)
2922 if (dc_validate_stream(dc
, dm_crtc_state
->stream
) == DC_OK
)
2928 static bool dm_crtc_helper_mode_fixup(struct drm_crtc
*crtc
,
2929 const struct drm_display_mode
*mode
,
2930 struct drm_display_mode
*adjusted_mode
)
2935 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs
= {
2936 .disable
= dm_crtc_helper_disable
,
2937 .atomic_check
= dm_crtc_helper_atomic_check
,
2938 .mode_fixup
= dm_crtc_helper_mode_fixup
2941 static void dm_encoder_helper_disable(struct drm_encoder
*encoder
)
2946 static int dm_encoder_helper_atomic_check(struct drm_encoder
*encoder
,
2947 struct drm_crtc_state
*crtc_state
,
2948 struct drm_connector_state
*conn_state
)
2953 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs
= {
2954 .disable
= dm_encoder_helper_disable
,
2955 .atomic_check
= dm_encoder_helper_atomic_check
2958 static void dm_drm_plane_reset(struct drm_plane
*plane
)
2960 struct dm_plane_state
*amdgpu_state
= NULL
;
2963 plane
->funcs
->atomic_destroy_state(plane
, plane
->state
);
2965 amdgpu_state
= kzalloc(sizeof(*amdgpu_state
), GFP_KERNEL
);
2966 WARN_ON(amdgpu_state
== NULL
);
2969 plane
->state
= &amdgpu_state
->base
;
2970 plane
->state
->plane
= plane
;
2971 plane
->state
->rotation
= DRM_MODE_ROTATE_0
;
2975 static struct drm_plane_state
*
2976 dm_drm_plane_duplicate_state(struct drm_plane
*plane
)
2978 struct dm_plane_state
*dm_plane_state
, *old_dm_plane_state
;
2980 old_dm_plane_state
= to_dm_plane_state(plane
->state
);
2981 dm_plane_state
= kzalloc(sizeof(*dm_plane_state
), GFP_KERNEL
);
2982 if (!dm_plane_state
)
2985 __drm_atomic_helper_plane_duplicate_state(plane
, &dm_plane_state
->base
);
2987 if (old_dm_plane_state
->dc_state
) {
2988 dm_plane_state
->dc_state
= old_dm_plane_state
->dc_state
;
2989 dc_plane_state_retain(dm_plane_state
->dc_state
);
2992 return &dm_plane_state
->base
;
2995 void dm_drm_plane_destroy_state(struct drm_plane
*plane
,
2996 struct drm_plane_state
*state
)
2998 struct dm_plane_state
*dm_plane_state
= to_dm_plane_state(state
);
3000 if (dm_plane_state
->dc_state
)
3001 dc_plane_state_release(dm_plane_state
->dc_state
);
3003 drm_atomic_helper_plane_destroy_state(plane
, state
);
3006 static const struct drm_plane_funcs dm_plane_funcs
= {
3007 .update_plane
= drm_atomic_helper_update_plane
,
3008 .disable_plane
= drm_atomic_helper_disable_plane
,
3009 .destroy
= drm_plane_cleanup
,
3010 .reset
= dm_drm_plane_reset
,
3011 .atomic_duplicate_state
= dm_drm_plane_duplicate_state
,
3012 .atomic_destroy_state
= dm_drm_plane_destroy_state
,
3015 static int dm_plane_helper_prepare_fb(struct drm_plane
*plane
,
3016 struct drm_plane_state
*new_state
)
3018 struct amdgpu_framebuffer
*afb
;
3019 struct drm_gem_object
*obj
;
3020 struct amdgpu_device
*adev
;
3021 struct amdgpu_bo
*rbo
;
3022 uint64_t chroma_addr
= 0;
3023 struct dm_plane_state
*dm_plane_state_new
, *dm_plane_state_old
;
3024 unsigned int awidth
;
3028 dm_plane_state_old
= to_dm_plane_state(plane
->state
);
3029 dm_plane_state_new
= to_dm_plane_state(new_state
);
3031 if (!new_state
->fb
) {
3032 DRM_DEBUG_DRIVER("No FB bound\n");
3036 afb
= to_amdgpu_framebuffer(new_state
->fb
);
3037 obj
= new_state
->fb
->obj
[0];
3038 rbo
= gem_to_amdgpu_bo(obj
);
3039 adev
= amdgpu_ttm_adev(rbo
->tbo
.bdev
);
3040 r
= amdgpu_bo_reserve(rbo
, false);
3041 if (unlikely(r
!= 0))
3044 if (plane
->type
!= DRM_PLANE_TYPE_CURSOR
)
3045 domain
= amdgpu_display_supported_domains(adev
);
3047 domain
= AMDGPU_GEM_DOMAIN_VRAM
;
3049 r
= amdgpu_bo_pin(rbo
, domain
, &afb
->address
);
3050 amdgpu_bo_unreserve(rbo
);
3052 if (unlikely(r
!= 0)) {
3053 if (r
!= -ERESTARTSYS
)
3054 DRM_ERROR("Failed to pin framebuffer with error %d\n", r
);
3060 if (dm_plane_state_new
->dc_state
&&
3061 dm_plane_state_old
->dc_state
!= dm_plane_state_new
->dc_state
) {
3062 struct dc_plane_state
*plane_state
= dm_plane_state_new
->dc_state
;
3064 if (plane_state
->format
< SURFACE_PIXEL_FORMAT_VIDEO_BEGIN
) {
3065 plane_state
->address
.grph
.addr
.low_part
= lower_32_bits(afb
->address
);
3066 plane_state
->address
.grph
.addr
.high_part
= upper_32_bits(afb
->address
);
3068 awidth
= ALIGN(new_state
->fb
->width
, 64);
3069 plane_state
->address
.type
= PLN_ADDR_TYPE_VIDEO_PROGRESSIVE
;
3070 plane_state
->address
.video_progressive
.luma_addr
.low_part
3071 = lower_32_bits(afb
->address
);
3072 plane_state
->address
.video_progressive
.luma_addr
.high_part
3073 = upper_32_bits(afb
->address
);
3074 chroma_addr
= afb
->address
+ (u64
)awidth
* new_state
->fb
->height
;
3075 plane_state
->address
.video_progressive
.chroma_addr
.low_part
3076 = lower_32_bits(chroma_addr
);
3077 plane_state
->address
.video_progressive
.chroma_addr
.high_part
3078 = upper_32_bits(chroma_addr
);
3085 static void dm_plane_helper_cleanup_fb(struct drm_plane
*plane
,
3086 struct drm_plane_state
*old_state
)
3088 struct amdgpu_bo
*rbo
;
3094 rbo
= gem_to_amdgpu_bo(old_state
->fb
->obj
[0]);
3095 r
= amdgpu_bo_reserve(rbo
, false);
3097 DRM_ERROR("failed to reserve rbo before unpin\n");
3101 amdgpu_bo_unpin(rbo
);
3102 amdgpu_bo_unreserve(rbo
);
3103 amdgpu_bo_unref(&rbo
);
3106 static int dm_plane_atomic_check(struct drm_plane
*plane
,
3107 struct drm_plane_state
*state
)
3109 struct amdgpu_device
*adev
= plane
->dev
->dev_private
;
3110 struct dc
*dc
= adev
->dm
.dc
;
3111 struct dm_plane_state
*dm_plane_state
= to_dm_plane_state(state
);
3113 if (!dm_plane_state
->dc_state
)
3116 if (!fill_rects_from_plane_state(state
, dm_plane_state
->dc_state
))
3119 if (dc_validate_plane(dc
, dm_plane_state
->dc_state
) == DC_OK
)
3125 static const struct drm_plane_helper_funcs dm_plane_helper_funcs
= {
3126 .prepare_fb
= dm_plane_helper_prepare_fb
,
3127 .cleanup_fb
= dm_plane_helper_cleanup_fb
,
3128 .atomic_check
= dm_plane_atomic_check
,
3132 * TODO: these are currently initialized to rgb formats only.
3133 * For future use cases we should either initialize them dynamically based on
3134 * plane capabilities, or initialize this array to all formats, so internal drm
3135 * check will succeed, and let DC to implement proper check
3137 static const uint32_t rgb_formats
[] = {
3139 DRM_FORMAT_XRGB8888
,
3140 DRM_FORMAT_ARGB8888
,
3141 DRM_FORMAT_RGBA8888
,
3142 DRM_FORMAT_XRGB2101010
,
3143 DRM_FORMAT_XBGR2101010
,
3144 DRM_FORMAT_ARGB2101010
,
3145 DRM_FORMAT_ABGR2101010
,
3148 static const uint32_t yuv_formats
[] = {
3153 static const u32 cursor_formats
[] = {
3157 static int amdgpu_dm_plane_init(struct amdgpu_display_manager
*dm
,
3158 struct amdgpu_plane
*aplane
,
3159 unsigned long possible_crtcs
)
3163 switch (aplane
->base
.type
) {
3164 case DRM_PLANE_TYPE_PRIMARY
:
3165 res
= drm_universal_plane_init(
3171 ARRAY_SIZE(rgb_formats
),
3172 NULL
, aplane
->base
.type
, NULL
);
3174 case DRM_PLANE_TYPE_OVERLAY
:
3175 res
= drm_universal_plane_init(
3181 ARRAY_SIZE(yuv_formats
),
3182 NULL
, aplane
->base
.type
, NULL
);
3184 case DRM_PLANE_TYPE_CURSOR
:
3185 res
= drm_universal_plane_init(
3191 ARRAY_SIZE(cursor_formats
),
3192 NULL
, aplane
->base
.type
, NULL
);
3196 drm_plane_helper_add(&aplane
->base
, &dm_plane_helper_funcs
);
3198 /* Create (reset) the plane state */
3199 if (aplane
->base
.funcs
->reset
)
3200 aplane
->base
.funcs
->reset(&aplane
->base
);
3206 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager
*dm
,
3207 struct drm_plane
*plane
,
3208 uint32_t crtc_index
)
3210 struct amdgpu_crtc
*acrtc
= NULL
;
3211 struct amdgpu_plane
*cursor_plane
;
3215 cursor_plane
= kzalloc(sizeof(*cursor_plane
), GFP_KERNEL
);
3219 cursor_plane
->base
.type
= DRM_PLANE_TYPE_CURSOR
;
3220 res
= amdgpu_dm_plane_init(dm
, cursor_plane
, 0);
3222 acrtc
= kzalloc(sizeof(struct amdgpu_crtc
), GFP_KERNEL
);
3226 res
= drm_crtc_init_with_planes(
3230 &cursor_plane
->base
,
3231 &amdgpu_dm_crtc_funcs
, NULL
);
3236 drm_crtc_helper_add(&acrtc
->base
, &amdgpu_dm_crtc_helper_funcs
);
3238 /* Create (reset) the plane state */
3239 if (acrtc
->base
.funcs
->reset
)
3240 acrtc
->base
.funcs
->reset(&acrtc
->base
);
3242 acrtc
->max_cursor_width
= dm
->adev
->dm
.dc
->caps
.max_cursor_size
;
3243 acrtc
->max_cursor_height
= dm
->adev
->dm
.dc
->caps
.max_cursor_size
;
3245 acrtc
->crtc_id
= crtc_index
;
3246 acrtc
->base
.enabled
= false;
3248 dm
->adev
->mode_info
.crtcs
[crtc_index
] = acrtc
;
3249 drm_crtc_enable_color_mgmt(&acrtc
->base
, MAX_COLOR_LUT_ENTRIES
,
3250 true, MAX_COLOR_LUT_ENTRIES
);
3251 drm_mode_crtc_set_gamma_size(&acrtc
->base
, MAX_COLOR_LEGACY_LUT_ENTRIES
);
3257 kfree(cursor_plane
);
3262 static int to_drm_connector_type(enum signal_type st
)
3265 case SIGNAL_TYPE_HDMI_TYPE_A
:
3266 return DRM_MODE_CONNECTOR_HDMIA
;
3267 case SIGNAL_TYPE_EDP
:
3268 return DRM_MODE_CONNECTOR_eDP
;
3269 case SIGNAL_TYPE_RGB
:
3270 return DRM_MODE_CONNECTOR_VGA
;
3271 case SIGNAL_TYPE_DISPLAY_PORT
:
3272 case SIGNAL_TYPE_DISPLAY_PORT_MST
:
3273 return DRM_MODE_CONNECTOR_DisplayPort
;
3274 case SIGNAL_TYPE_DVI_DUAL_LINK
:
3275 case SIGNAL_TYPE_DVI_SINGLE_LINK
:
3276 return DRM_MODE_CONNECTOR_DVID
;
3277 case SIGNAL_TYPE_VIRTUAL
:
3278 return DRM_MODE_CONNECTOR_VIRTUAL
;
3281 return DRM_MODE_CONNECTOR_Unknown
;
3285 static void amdgpu_dm_get_native_mode(struct drm_connector
*connector
)
3287 const struct drm_connector_helper_funcs
*helper
=
3288 connector
->helper_private
;
3289 struct drm_encoder
*encoder
;
3290 struct amdgpu_encoder
*amdgpu_encoder
;
3292 encoder
= helper
->best_encoder(connector
);
3294 if (encoder
== NULL
)
3297 amdgpu_encoder
= to_amdgpu_encoder(encoder
);
3299 amdgpu_encoder
->native_mode
.clock
= 0;
3301 if (!list_empty(&connector
->probed_modes
)) {
3302 struct drm_display_mode
*preferred_mode
= NULL
;
3304 list_for_each_entry(preferred_mode
,
3305 &connector
->probed_modes
,
3307 if (preferred_mode
->type
& DRM_MODE_TYPE_PREFERRED
)
3308 amdgpu_encoder
->native_mode
= *preferred_mode
;
3316 static struct drm_display_mode
*
3317 amdgpu_dm_create_common_mode(struct drm_encoder
*encoder
,
3319 int hdisplay
, int vdisplay
)
3321 struct drm_device
*dev
= encoder
->dev
;
3322 struct amdgpu_encoder
*amdgpu_encoder
= to_amdgpu_encoder(encoder
);
3323 struct drm_display_mode
*mode
= NULL
;
3324 struct drm_display_mode
*native_mode
= &amdgpu_encoder
->native_mode
;
3326 mode
= drm_mode_duplicate(dev
, native_mode
);
3331 mode
->hdisplay
= hdisplay
;
3332 mode
->vdisplay
= vdisplay
;
3333 mode
->type
&= ~DRM_MODE_TYPE_PREFERRED
;
3334 strncpy(mode
->name
, name
, DRM_DISPLAY_MODE_LEN
);
3340 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder
*encoder
,
3341 struct drm_connector
*connector
)
3343 struct amdgpu_encoder
*amdgpu_encoder
= to_amdgpu_encoder(encoder
);
3344 struct drm_display_mode
*mode
= NULL
;
3345 struct drm_display_mode
*native_mode
= &amdgpu_encoder
->native_mode
;
3346 struct amdgpu_dm_connector
*amdgpu_dm_connector
=
3347 to_amdgpu_dm_connector(connector
);
3351 char name
[DRM_DISPLAY_MODE_LEN
];
3354 } common_modes
[] = {
3355 { "640x480", 640, 480},
3356 { "800x600", 800, 600},
3357 { "1024x768", 1024, 768},
3358 { "1280x720", 1280, 720},
3359 { "1280x800", 1280, 800},
3360 {"1280x1024", 1280, 1024},
3361 { "1440x900", 1440, 900},
3362 {"1680x1050", 1680, 1050},
3363 {"1600x1200", 1600, 1200},
3364 {"1920x1080", 1920, 1080},
3365 {"1920x1200", 1920, 1200}
3368 n
= ARRAY_SIZE(common_modes
);
3370 for (i
= 0; i
< n
; i
++) {
3371 struct drm_display_mode
*curmode
= NULL
;
3372 bool mode_existed
= false;
3374 if (common_modes
[i
].w
> native_mode
->hdisplay
||
3375 common_modes
[i
].h
> native_mode
->vdisplay
||
3376 (common_modes
[i
].w
== native_mode
->hdisplay
&&
3377 common_modes
[i
].h
== native_mode
->vdisplay
))
3380 list_for_each_entry(curmode
, &connector
->probed_modes
, head
) {
3381 if (common_modes
[i
].w
== curmode
->hdisplay
&&
3382 common_modes
[i
].h
== curmode
->vdisplay
) {
3383 mode_existed
= true;
3391 mode
= amdgpu_dm_create_common_mode(encoder
,
3392 common_modes
[i
].name
, common_modes
[i
].w
,
3394 drm_mode_probed_add(connector
, mode
);
3395 amdgpu_dm_connector
->num_modes
++;
3399 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector
*connector
,
3402 struct amdgpu_dm_connector
*amdgpu_dm_connector
=
3403 to_amdgpu_dm_connector(connector
);
3406 /* empty probed_modes */
3407 INIT_LIST_HEAD(&connector
->probed_modes
);
3408 amdgpu_dm_connector
->num_modes
=
3409 drm_add_edid_modes(connector
, edid
);
3411 amdgpu_dm_get_native_mode(connector
);
3413 amdgpu_dm_connector
->num_modes
= 0;
3417 static int amdgpu_dm_connector_get_modes(struct drm_connector
*connector
)
3419 const struct drm_connector_helper_funcs
*helper
=
3420 connector
->helper_private
;
3421 struct amdgpu_dm_connector
*amdgpu_dm_connector
=
3422 to_amdgpu_dm_connector(connector
);
3423 struct drm_encoder
*encoder
;
3424 struct edid
*edid
= amdgpu_dm_connector
->edid
;
3426 encoder
= helper
->best_encoder(connector
);
3428 if (!edid
|| !drm_edid_is_valid(edid
))
3429 return drm_add_modes_noedid(connector
, 640, 480);
3431 amdgpu_dm_connector_ddc_get_modes(connector
, edid
);
3432 amdgpu_dm_connector_add_common_modes(encoder
, connector
);
3434 amdgpu_dm_fbc_init(connector
);
3436 return amdgpu_dm_connector
->num_modes
;
3439 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager
*dm
,
3440 struct amdgpu_dm_connector
*aconnector
,
3442 struct dc_link
*link
,
3445 struct amdgpu_device
*adev
= dm
->ddev
->dev_private
;
3447 aconnector
->connector_id
= link_index
;
3448 aconnector
->dc_link
= link
;
3449 aconnector
->base
.interlace_allowed
= false;
3450 aconnector
->base
.doublescan_allowed
= false;
3451 aconnector
->base
.stereo_allowed
= false;
3452 aconnector
->base
.dpms
= DRM_MODE_DPMS_OFF
;
3453 aconnector
->hpd
.hpd
= AMDGPU_HPD_NONE
; /* not used */
3455 mutex_init(&aconnector
->hpd_lock
);
3457 /* configure support HPD hot plug connector_>polled default value is 0
3458 * which means HPD hot plug not supported
3460 switch (connector_type
) {
3461 case DRM_MODE_CONNECTOR_HDMIA
:
3462 aconnector
->base
.polled
= DRM_CONNECTOR_POLL_HPD
;
3464 case DRM_MODE_CONNECTOR_DisplayPort
:
3465 aconnector
->base
.polled
= DRM_CONNECTOR_POLL_HPD
;
3467 case DRM_MODE_CONNECTOR_DVID
:
3468 aconnector
->base
.polled
= DRM_CONNECTOR_POLL_HPD
;
3474 drm_object_attach_property(&aconnector
->base
.base
,
3475 dm
->ddev
->mode_config
.scaling_mode_property
,
3476 DRM_MODE_SCALE_NONE
);
3478 drm_object_attach_property(&aconnector
->base
.base
,
3479 adev
->mode_info
.underscan_property
,
3481 drm_object_attach_property(&aconnector
->base
.base
,
3482 adev
->mode_info
.underscan_hborder_property
,
3484 drm_object_attach_property(&aconnector
->base
.base
,
3485 adev
->mode_info
.underscan_vborder_property
,
3490 static int amdgpu_dm_i2c_xfer(struct i2c_adapter
*i2c_adap
,
3491 struct i2c_msg
*msgs
, int num
)
3493 struct amdgpu_i2c_adapter
*i2c
= i2c_get_adapdata(i2c_adap
);
3494 struct ddc_service
*ddc_service
= i2c
->ddc_service
;
3495 struct i2c_command cmd
;
3499 cmd
.payloads
= kcalloc(num
, sizeof(struct i2c_payload
), GFP_KERNEL
);
3504 cmd
.number_of_payloads
= num
;
3505 cmd
.engine
= I2C_COMMAND_ENGINE_DEFAULT
;
3508 for (i
= 0; i
< num
; i
++) {
3509 cmd
.payloads
[i
].write
= !(msgs
[i
].flags
& I2C_M_RD
);
3510 cmd
.payloads
[i
].address
= msgs
[i
].addr
;
3511 cmd
.payloads
[i
].length
= msgs
[i
].len
;
3512 cmd
.payloads
[i
].data
= msgs
[i
].buf
;
3515 if (dal_i2caux_submit_i2c_command(
3516 ddc_service
->ctx
->i2caux
,
3517 ddc_service
->ddc_pin
,
3521 kfree(cmd
.payloads
);
3525 static u32
amdgpu_dm_i2c_func(struct i2c_adapter
*adap
)
3527 return I2C_FUNC_I2C
| I2C_FUNC_SMBUS_EMUL
;
3530 static const struct i2c_algorithm amdgpu_dm_i2c_algo
= {
3531 .master_xfer
= amdgpu_dm_i2c_xfer
,
3532 .functionality
= amdgpu_dm_i2c_func
,
3535 static struct amdgpu_i2c_adapter
*
3536 create_i2c(struct ddc_service
*ddc_service
,
3540 struct amdgpu_device
*adev
= ddc_service
->ctx
->driver_context
;
3541 struct amdgpu_i2c_adapter
*i2c
;
3543 i2c
= kzalloc(sizeof(struct amdgpu_i2c_adapter
), GFP_KERNEL
);
3546 i2c
->base
.owner
= THIS_MODULE
;
3547 i2c
->base
.class = I2C_CLASS_DDC
;
3548 i2c
->base
.dev
.parent
= &adev
->pdev
->dev
;
3549 i2c
->base
.algo
= &amdgpu_dm_i2c_algo
;
3550 snprintf(i2c
->base
.name
, sizeof(i2c
->base
.name
), "AMDGPU DM i2c hw bus %d", link_index
);
3551 i2c_set_adapdata(&i2c
->base
, i2c
);
3552 i2c
->ddc_service
= ddc_service
;
3558 /* Note: this function assumes that dc_link_detect() was called for the
3559 * dc_link which will be represented by this aconnector.
3561 static int amdgpu_dm_connector_init(struct amdgpu_display_manager
*dm
,
3562 struct amdgpu_dm_connector
*aconnector
,
3563 uint32_t link_index
,
3564 struct amdgpu_encoder
*aencoder
)
3568 struct dc
*dc
= dm
->dc
;
3569 struct dc_link
*link
= dc_get_link_at_index(dc
, link_index
);
3570 struct amdgpu_i2c_adapter
*i2c
;
3572 link
->priv
= aconnector
;
3574 DRM_DEBUG_DRIVER("%s()\n", __func__
);
3576 i2c
= create_i2c(link
->ddc
, link
->link_index
, &res
);
3578 DRM_ERROR("Failed to create i2c adapter data\n");
3582 aconnector
->i2c
= i2c
;
3583 res
= i2c_add_adapter(&i2c
->base
);
3586 DRM_ERROR("Failed to register hw i2c %d\n", link
->link_index
);
3590 connector_type
= to_drm_connector_type(link
->connector_signal
);
3592 res
= drm_connector_init(
3595 &amdgpu_dm_connector_funcs
,
3599 DRM_ERROR("connector_init failed\n");
3600 aconnector
->connector_id
= -1;
3604 drm_connector_helper_add(
3606 &amdgpu_dm_connector_helper_funcs
);
3608 if (aconnector
->base
.funcs
->reset
)
3609 aconnector
->base
.funcs
->reset(&aconnector
->base
);
3611 amdgpu_dm_connector_init_helper(
3618 drm_mode_connector_attach_encoder(
3619 &aconnector
->base
, &aencoder
->base
);
3621 drm_connector_register(&aconnector
->base
);
3623 if (connector_type
== DRM_MODE_CONNECTOR_DisplayPort
3624 || connector_type
== DRM_MODE_CONNECTOR_eDP
)
3625 amdgpu_dm_initialize_dp_connector(dm
, aconnector
);
3630 aconnector
->i2c
= NULL
;
3635 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device
*adev
)
3637 switch (adev
->mode_info
.num_crtc
) {
3654 static int amdgpu_dm_encoder_init(struct drm_device
*dev
,
3655 struct amdgpu_encoder
*aencoder
,
3656 uint32_t link_index
)
3658 struct amdgpu_device
*adev
= dev
->dev_private
;
3660 int res
= drm_encoder_init(dev
,
3662 &amdgpu_dm_encoder_funcs
,
3663 DRM_MODE_ENCODER_TMDS
,
3666 aencoder
->base
.possible_crtcs
= amdgpu_dm_get_encoder_crtc_mask(adev
);
3669 aencoder
->encoder_id
= link_index
;
3671 aencoder
->encoder_id
= -1;
3673 drm_encoder_helper_add(&aencoder
->base
, &amdgpu_dm_encoder_helper_funcs
);
3678 static void manage_dm_interrupts(struct amdgpu_device
*adev
,
3679 struct amdgpu_crtc
*acrtc
,
3683 * this is not correct translation but will work as soon as VBLANK
3684 * constant is the same as PFLIP
3687 amdgpu_display_crtc_idx_to_irq_type(
3692 drm_crtc_vblank_on(&acrtc
->base
);
3695 &adev
->pageflip_irq
,
3701 &adev
->pageflip_irq
,
3703 drm_crtc_vblank_off(&acrtc
->base
);
3708 is_scaling_state_different(const struct dm_connector_state
*dm_state
,
3709 const struct dm_connector_state
*old_dm_state
)
3711 if (dm_state
->scaling
!= old_dm_state
->scaling
)
3713 if (!dm_state
->underscan_enable
&& old_dm_state
->underscan_enable
) {
3714 if (old_dm_state
->underscan_hborder
!= 0 && old_dm_state
->underscan_vborder
!= 0)
3716 } else if (dm_state
->underscan_enable
&& !old_dm_state
->underscan_enable
) {
3717 if (dm_state
->underscan_hborder
!= 0 && dm_state
->underscan_vborder
!= 0)
3719 } else if (dm_state
->underscan_hborder
!= old_dm_state
->underscan_hborder
||
3720 dm_state
->underscan_vborder
!= old_dm_state
->underscan_vborder
)
3725 static void remove_stream(struct amdgpu_device
*adev
,
3726 struct amdgpu_crtc
*acrtc
,
3727 struct dc_stream_state
*stream
)
3729 /* this is the update mode case */
3730 if (adev
->dm
.freesync_module
)
3731 mod_freesync_remove_stream(adev
->dm
.freesync_module
, stream
);
3733 acrtc
->otg_inst
= -1;
3734 acrtc
->enabled
= false;
3737 static int get_cursor_position(struct drm_plane
*plane
, struct drm_crtc
*crtc
,
3738 struct dc_cursor_position
*position
)
3740 struct amdgpu_crtc
*amdgpu_crtc
= to_amdgpu_crtc(crtc
);
3742 int xorigin
= 0, yorigin
= 0;
3744 if (!crtc
|| !plane
->state
->fb
) {
3745 position
->enable
= false;
3751 if ((plane
->state
->crtc_w
> amdgpu_crtc
->max_cursor_width
) ||
3752 (plane
->state
->crtc_h
> amdgpu_crtc
->max_cursor_height
)) {
3753 DRM_ERROR("%s: bad cursor width or height %d x %d\n",
3755 plane
->state
->crtc_w
,
3756 plane
->state
->crtc_h
);
3760 x
= plane
->state
->crtc_x
;
3761 y
= plane
->state
->crtc_y
;
3762 /* avivo cursor are offset into the total surface */
3763 x
+= crtc
->primary
->state
->src_x
>> 16;
3764 y
+= crtc
->primary
->state
->src_y
>> 16;
3766 xorigin
= min(-x
, amdgpu_crtc
->max_cursor_width
- 1);
3770 yorigin
= min(-y
, amdgpu_crtc
->max_cursor_height
- 1);
3773 position
->enable
= true;
3776 position
->x_hotspot
= xorigin
;
3777 position
->y_hotspot
= yorigin
;
3782 static void handle_cursor_update(struct drm_plane
*plane
,
3783 struct drm_plane_state
*old_plane_state
)
3785 struct amdgpu_framebuffer
*afb
= to_amdgpu_framebuffer(plane
->state
->fb
);
3786 struct drm_crtc
*crtc
= afb
? plane
->state
->crtc
: old_plane_state
->crtc
;
3787 struct dm_crtc_state
*crtc_state
= crtc
? to_dm_crtc_state(crtc
->state
) : NULL
;
3788 struct amdgpu_crtc
*amdgpu_crtc
= to_amdgpu_crtc(crtc
);
3789 uint64_t address
= afb
? afb
->address
: 0;
3790 struct dc_cursor_position position
;
3791 struct dc_cursor_attributes attributes
;
3794 if (!plane
->state
->fb
&& !old_plane_state
->fb
)
3797 DRM_DEBUG_DRIVER("%s: crtc_id=%d with size %d to %d\n",
3799 amdgpu_crtc
->crtc_id
,
3800 plane
->state
->crtc_w
,
3801 plane
->state
->crtc_h
);
3803 ret
= get_cursor_position(plane
, crtc
, &position
);
3807 if (!position
.enable
) {
3808 /* turn off cursor */
3809 if (crtc_state
&& crtc_state
->stream
)
3810 dc_stream_set_cursor_position(crtc_state
->stream
,
3815 amdgpu_crtc
->cursor_width
= plane
->state
->crtc_w
;
3816 amdgpu_crtc
->cursor_height
= plane
->state
->crtc_h
;
3818 attributes
.address
.high_part
= upper_32_bits(address
);
3819 attributes
.address
.low_part
= lower_32_bits(address
);
3820 attributes
.width
= plane
->state
->crtc_w
;
3821 attributes
.height
= plane
->state
->crtc_h
;
3822 attributes
.color_format
= CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA
;
3823 attributes
.rotation_angle
= 0;
3824 attributes
.attribute_flags
.value
= 0;
3826 attributes
.pitch
= attributes
.width
;
3828 if (crtc_state
->stream
) {
3829 if (!dc_stream_set_cursor_attributes(crtc_state
->stream
,
3831 DRM_ERROR("DC failed to set cursor attributes\n");
3833 if (!dc_stream_set_cursor_position(crtc_state
->stream
,
3835 DRM_ERROR("DC failed to set cursor position\n");
3839 static void prepare_flip_isr(struct amdgpu_crtc
*acrtc
)
3842 assert_spin_locked(&acrtc
->base
.dev
->event_lock
);
3843 WARN_ON(acrtc
->event
);
3845 acrtc
->event
= acrtc
->base
.state
->event
;
3847 /* Set the flip status */
3848 acrtc
->pflip_status
= AMDGPU_FLIP_SUBMITTED
;
3850 /* Mark this event as consumed */
3851 acrtc
->base
.state
->event
= NULL
;
3853 DRM_DEBUG_DRIVER("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
3860 * Waits on all BO's fences and for proper vblank count
3862 static void amdgpu_dm_do_flip(struct drm_crtc
*crtc
,
3863 struct drm_framebuffer
*fb
,
3865 struct dc_state
*state
)
3867 unsigned long flags
;
3868 uint32_t target_vblank
;
3870 struct amdgpu_crtc
*acrtc
= to_amdgpu_crtc(crtc
);
3871 struct amdgpu_framebuffer
*afb
= to_amdgpu_framebuffer(fb
);
3872 struct amdgpu_bo
*abo
= gem_to_amdgpu_bo(fb
->obj
[0]);
3873 struct amdgpu_device
*adev
= crtc
->dev
->dev_private
;
3874 bool async_flip
= (crtc
->state
->pageflip_flags
& DRM_MODE_PAGE_FLIP_ASYNC
) != 0;
3875 struct dc_flip_addrs addr
= { {0} };
3876 /* TODO eliminate or rename surface_update */
3877 struct dc_surface_update surface_updates
[1] = { {0} };
3878 struct dm_crtc_state
*acrtc_state
= to_dm_crtc_state(crtc
->state
);
3881 /* Prepare wait for target vblank early - before the fence-waits */
3882 target_vblank
= target
- (uint32_t)drm_crtc_vblank_count(crtc
) +
3883 amdgpu_get_vblank_counter_kms(crtc
->dev
, acrtc
->crtc_id
);
3885 /* TODO This might fail and hence better not used, wait
3886 * explicitly on fences instead
3887 * and in general should be called for
3888 * blocking commit to as per framework helpers
3890 r
= amdgpu_bo_reserve(abo
, true);
3891 if (unlikely(r
!= 0)) {
3892 DRM_ERROR("failed to reserve buffer before flip\n");
3896 /* Wait for all fences on this FB */
3897 WARN_ON(reservation_object_wait_timeout_rcu(abo
->tbo
.resv
, true, false,
3898 MAX_SCHEDULE_TIMEOUT
) < 0);
3900 amdgpu_bo_unreserve(abo
);
3902 /* Wait until we're out of the vertical blank period before the one
3903 * targeted by the flip
3905 while ((acrtc
->enabled
&&
3906 (amdgpu_display_get_crtc_scanoutpos(adev
->ddev
, acrtc
->crtc_id
,
3907 0, &vpos
, &hpos
, NULL
,
3908 NULL
, &crtc
->hwmode
)
3909 & (DRM_SCANOUTPOS_VALID
| DRM_SCANOUTPOS_IN_VBLANK
)) ==
3910 (DRM_SCANOUTPOS_VALID
| DRM_SCANOUTPOS_IN_VBLANK
) &&
3911 (int)(target_vblank
-
3912 amdgpu_get_vblank_counter_kms(adev
->ddev
, acrtc
->crtc_id
)) > 0)) {
3913 usleep_range(1000, 1100);
3917 spin_lock_irqsave(&crtc
->dev
->event_lock
, flags
);
3918 /* update crtc fb */
3919 crtc
->primary
->fb
= fb
;
3921 WARN_ON(acrtc
->pflip_status
!= AMDGPU_FLIP_NONE
);
3922 WARN_ON(!acrtc_state
->stream
);
3924 addr
.address
.grph
.addr
.low_part
= lower_32_bits(afb
->address
);
3925 addr
.address
.grph
.addr
.high_part
= upper_32_bits(afb
->address
);
3926 addr
.flip_immediate
= async_flip
;
3929 if (acrtc
->base
.state
->event
)
3930 prepare_flip_isr(acrtc
);
3932 surface_updates
->surface
= dc_stream_get_status(acrtc_state
->stream
)->plane_states
[0];
3933 surface_updates
->flip_addr
= &addr
;
3936 dc_commit_updates_for_stream(adev
->dm
.dc
,
3939 acrtc_state
->stream
,
3941 &surface_updates
->surface
,
3944 DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x \n",
3946 addr
.address
.grph
.addr
.high_part
,
3947 addr
.address
.grph
.addr
.low_part
);
3950 spin_unlock_irqrestore(&crtc
->dev
->event_lock
, flags
);
3954 * TODO this whole function needs to go
3956 * dc_surface_update is needlessly complex. See if we can just replace this
3957 * with a dc_plane_state and follow the atomic model a bit more closely here.
3959 static bool commit_planes_to_stream(
3961 struct dc_plane_state
**plane_states
,
3962 uint8_t new_plane_count
,
3963 struct dm_crtc_state
*dm_new_crtc_state
,
3964 struct dm_crtc_state
*dm_old_crtc_state
,
3965 struct dc_state
*state
)
3967 /* no need to dynamically allocate this. it's pretty small */
3968 struct dc_surface_update updates
[MAX_SURFACES
];
3969 struct dc_flip_addrs
*flip_addr
;
3970 struct dc_plane_info
*plane_info
;
3971 struct dc_scaling_info
*scaling_info
;
3973 struct dc_stream_state
*dc_stream
= dm_new_crtc_state
->stream
;
3974 struct dc_stream_update
*stream_update
=
3975 kzalloc(sizeof(struct dc_stream_update
), GFP_KERNEL
);
3977 if (!stream_update
) {
3978 BREAK_TO_DEBUGGER();
3982 flip_addr
= kcalloc(MAX_SURFACES
, sizeof(struct dc_flip_addrs
),
3984 plane_info
= kcalloc(MAX_SURFACES
, sizeof(struct dc_plane_info
),
3986 scaling_info
= kcalloc(MAX_SURFACES
, sizeof(struct dc_scaling_info
),
3989 if (!flip_addr
|| !plane_info
|| !scaling_info
) {
3992 kfree(scaling_info
);
3993 kfree(stream_update
);
3997 memset(updates
, 0, sizeof(updates
));
3999 stream_update
->src
= dc_stream
->src
;
4000 stream_update
->dst
= dc_stream
->dst
;
4001 stream_update
->out_transfer_func
= dc_stream
->out_transfer_func
;
4003 for (i
= 0; i
< new_plane_count
; i
++) {
4004 updates
[i
].surface
= plane_states
[i
];
4006 (struct dc_gamma
*)plane_states
[i
]->gamma_correction
;
4007 updates
[i
].in_transfer_func
= plane_states
[i
]->in_transfer_func
;
4008 flip_addr
[i
].address
= plane_states
[i
]->address
;
4009 flip_addr
[i
].flip_immediate
= plane_states
[i
]->flip_immediate
;
4010 plane_info
[i
].color_space
= plane_states
[i
]->color_space
;
4011 plane_info
[i
].format
= plane_states
[i
]->format
;
4012 plane_info
[i
].plane_size
= plane_states
[i
]->plane_size
;
4013 plane_info
[i
].rotation
= plane_states
[i
]->rotation
;
4014 plane_info
[i
].horizontal_mirror
= plane_states
[i
]->horizontal_mirror
;
4015 plane_info
[i
].stereo_format
= plane_states
[i
]->stereo_format
;
4016 plane_info
[i
].tiling_info
= plane_states
[i
]->tiling_info
;
4017 plane_info
[i
].visible
= plane_states
[i
]->visible
;
4018 plane_info
[i
].per_pixel_alpha
= plane_states
[i
]->per_pixel_alpha
;
4019 plane_info
[i
].dcc
= plane_states
[i
]->dcc
;
4020 scaling_info
[i
].scaling_quality
= plane_states
[i
]->scaling_quality
;
4021 scaling_info
[i
].src_rect
= plane_states
[i
]->src_rect
;
4022 scaling_info
[i
].dst_rect
= plane_states
[i
]->dst_rect
;
4023 scaling_info
[i
].clip_rect
= plane_states
[i
]->clip_rect
;
4025 updates
[i
].flip_addr
= &flip_addr
[i
];
4026 updates
[i
].plane_info
= &plane_info
[i
];
4027 updates
[i
].scaling_info
= &scaling_info
[i
];
4030 dc_commit_updates_for_stream(
4034 dc_stream
, stream_update
, plane_states
, state
);
4038 kfree(scaling_info
);
4039 kfree(stream_update
);
4043 static void amdgpu_dm_commit_planes(struct drm_atomic_state
*state
,
4044 struct drm_device
*dev
,
4045 struct amdgpu_display_manager
*dm
,
4046 struct drm_crtc
*pcrtc
,
4047 bool *wait_for_vblank
)
4050 struct drm_plane
*plane
;
4051 struct drm_plane_state
*old_plane_state
, *new_plane_state
;
4052 struct dc_stream_state
*dc_stream_attach
;
4053 struct dc_plane_state
*plane_states_constructed
[MAX_SURFACES
];
4054 struct amdgpu_crtc
*acrtc_attach
= to_amdgpu_crtc(pcrtc
);
4055 struct drm_crtc_state
*new_pcrtc_state
=
4056 drm_atomic_get_new_crtc_state(state
, pcrtc
);
4057 struct dm_crtc_state
*acrtc_state
= to_dm_crtc_state(new_pcrtc_state
);
4058 struct dm_crtc_state
*dm_old_crtc_state
=
4059 to_dm_crtc_state(drm_atomic_get_old_crtc_state(state
, pcrtc
));
4060 struct dm_atomic_state
*dm_state
= to_dm_atomic_state(state
);
4061 int planes_count
= 0;
4062 unsigned long flags
;
4064 /* update planes when needed */
4065 for_each_oldnew_plane_in_state(state
, plane
, old_plane_state
, new_plane_state
, i
) {
4066 struct drm_crtc
*crtc
= new_plane_state
->crtc
;
4067 struct drm_crtc_state
*new_crtc_state
;
4068 struct drm_framebuffer
*fb
= new_plane_state
->fb
;
4070 struct dm_plane_state
*dm_new_plane_state
= to_dm_plane_state(new_plane_state
);
4072 if (plane
->type
== DRM_PLANE_TYPE_CURSOR
) {
4073 handle_cursor_update(plane
, old_plane_state
);
4077 if (!fb
|| !crtc
|| pcrtc
!= crtc
)
4080 new_crtc_state
= drm_atomic_get_new_crtc_state(state
, crtc
);
4081 if (!new_crtc_state
->active
)
4084 pflip_needed
= !state
->allow_modeset
;
4086 spin_lock_irqsave(&crtc
->dev
->event_lock
, flags
);
4087 if (acrtc_attach
->pflip_status
!= AMDGPU_FLIP_NONE
) {
4088 DRM_ERROR("%s: acrtc %d, already busy\n",
4090 acrtc_attach
->crtc_id
);
4091 /* In commit tail framework this cannot happen */
4094 spin_unlock_irqrestore(&crtc
->dev
->event_lock
, flags
);
4096 if (!pflip_needed
|| plane
->type
== DRM_PLANE_TYPE_OVERLAY
) {
4097 WARN_ON(!dm_new_plane_state
->dc_state
);
4099 plane_states_constructed
[planes_count
] = dm_new_plane_state
->dc_state
;
4101 dc_stream_attach
= acrtc_state
->stream
;
4104 } else if (new_crtc_state
->planes_changed
) {
4105 /* Assume even ONE crtc with immediate flip means
4106 * entire can't wait for VBLANK
4107 * TODO Check if it's correct
4110 new_pcrtc_state
->pageflip_flags
& DRM_MODE_PAGE_FLIP_ASYNC
?
4113 /* TODO: Needs rework for multiplane flip */
4114 if (plane
->type
== DRM_PLANE_TYPE_PRIMARY
)
4115 drm_crtc_vblank_get(crtc
);
4120 (uint32_t)drm_crtc_vblank_count(crtc
) + *wait_for_vblank
,
4127 unsigned long flags
;
4129 if (new_pcrtc_state
->event
) {
4131 drm_crtc_vblank_get(pcrtc
);
4133 spin_lock_irqsave(&pcrtc
->dev
->event_lock
, flags
);
4134 prepare_flip_isr(acrtc_attach
);
4135 spin_unlock_irqrestore(&pcrtc
->dev
->event_lock
, flags
);
4139 if (false == commit_planes_to_stream(dm
->dc
,
4140 plane_states_constructed
,
4145 dm_error("%s: Failed to attach plane!\n", __func__
);
4147 /*TODO BUG Here should go disable planes on CRTC. */
4152 * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
4153 * @crtc_state: the DRM CRTC state
4154 * @stream_state: the DC stream state.
4156 * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
4157 * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
4159 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state
*crtc_state
,
4160 struct dc_stream_state
*stream_state
)
4162 stream_state
->mode_changed
= crtc_state
->mode_changed
;
4165 static int amdgpu_dm_atomic_commit(struct drm_device
*dev
,
4166 struct drm_atomic_state
*state
,
4169 struct drm_crtc
*crtc
;
4170 struct drm_crtc_state
*old_crtc_state
, *new_crtc_state
;
4171 struct amdgpu_device
*adev
= dev
->dev_private
;
4175 * We evade vblanks and pflips on crtc that
4176 * should be changed. We do it here to flush & disable
4177 * interrupts before drm_swap_state is called in drm_atomic_helper_commit
4178 * it will update crtc->dm_crtc_state->stream pointer which is used in
4181 for_each_oldnew_crtc_in_state(state
, crtc
, old_crtc_state
, new_crtc_state
, i
) {
4182 struct dm_crtc_state
*dm_old_crtc_state
= to_dm_crtc_state(old_crtc_state
);
4183 struct amdgpu_crtc
*acrtc
= to_amdgpu_crtc(crtc
);
4185 if (drm_atomic_crtc_needs_modeset(new_crtc_state
) && dm_old_crtc_state
->stream
)
4186 manage_dm_interrupts(adev
, acrtc
, false);
4188 /* Add check here for SoC's that support hardware cursor plane, to
4189 * unset legacy_cursor_update */
4191 return drm_atomic_helper_commit(dev
, state
, nonblock
);
4193 /*TODO Handle EINTR, reenable IRQ*/
4196 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state
*state
)
4198 struct drm_device
*dev
= state
->dev
;
4199 struct amdgpu_device
*adev
= dev
->dev_private
;
4200 struct amdgpu_display_manager
*dm
= &adev
->dm
;
4201 struct dm_atomic_state
*dm_state
;
4203 struct drm_crtc
*crtc
;
4204 struct drm_crtc_state
*old_crtc_state
, *new_crtc_state
;
4205 unsigned long flags
;
4206 bool wait_for_vblank
= true;
4207 struct drm_connector
*connector
;
4208 struct drm_connector_state
*old_con_state
, *new_con_state
;
4209 struct dm_crtc_state
*dm_old_crtc_state
, *dm_new_crtc_state
;
4211 drm_atomic_helper_update_legacy_modeset_state(dev
, state
);
4213 dm_state
= to_dm_atomic_state(state
);
4215 /* update changed items */
4216 for_each_oldnew_crtc_in_state(state
, crtc
, old_crtc_state
, new_crtc_state
, i
) {
4217 struct amdgpu_crtc
*acrtc
= to_amdgpu_crtc(crtc
);
4219 dm_new_crtc_state
= to_dm_crtc_state(new_crtc_state
);
4220 dm_old_crtc_state
= to_dm_crtc_state(old_crtc_state
);
4223 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
4224 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
4225 "connectors_changed:%d\n",
4227 new_crtc_state
->enable
,
4228 new_crtc_state
->active
,
4229 new_crtc_state
->planes_changed
,
4230 new_crtc_state
->mode_changed
,
4231 new_crtc_state
->active_changed
,
4232 new_crtc_state
->connectors_changed
);
4234 /* Copy all transient state flags into dc state */
4235 if (dm_new_crtc_state
->stream
) {
4236 amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state
->base
,
4237 dm_new_crtc_state
->stream
);
4240 /* handles headless hotplug case, updating new_state and
4241 * aconnector as needed
4244 if (modeset_required(new_crtc_state
, dm_new_crtc_state
->stream
, dm_old_crtc_state
->stream
)) {
4246 DRM_DEBUG_DRIVER("Atomic commit: SET crtc id %d: [%p]\n", acrtc
->crtc_id
, acrtc
);
4248 if (!dm_new_crtc_state
->stream
) {
4250 * this could happen because of issues with
4251 * userspace notifications delivery.
4252 * In this case userspace tries to set mode on
4253 * display which is disconnect in fact.
4254 * dc_sink in NULL in this case on aconnector.
4255 * We expect reset mode will come soon.
4257 * This can also happen when unplug is done
4258 * during resume sequence ended
4260 * In this case, we want to pretend we still
4261 * have a sink to keep the pipe running so that
4262 * hw state is consistent with the sw state
4264 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
4265 __func__
, acrtc
->base
.base
.id
);
4269 if (dm_old_crtc_state
->stream
)
4270 remove_stream(adev
, acrtc
, dm_old_crtc_state
->stream
);
4272 pm_runtime_get_noresume(dev
->dev
);
4274 acrtc
->enabled
= true;
4275 acrtc
->hw_mode
= new_crtc_state
->mode
;
4276 crtc
->hwmode
= new_crtc_state
->mode
;
4277 } else if (modereset_required(new_crtc_state
)) {
4278 DRM_DEBUG_DRIVER("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc
->crtc_id
, acrtc
);
4280 /* i.e. reset mode */
4281 if (dm_old_crtc_state
->stream
)
4282 remove_stream(adev
, acrtc
, dm_old_crtc_state
->stream
);
4284 } /* for_each_crtc_in_state() */
4287 * Add streams after required streams from new and replaced streams
4288 * are removed from freesync module
4290 if (adev
->dm
.freesync_module
) {
4291 for_each_oldnew_crtc_in_state(state
, crtc
, old_crtc_state
,
4292 new_crtc_state
, i
) {
4293 struct amdgpu_dm_connector
*aconnector
= NULL
;
4294 struct dm_connector_state
*dm_new_con_state
= NULL
;
4295 struct amdgpu_crtc
*acrtc
= NULL
;
4296 bool modeset_needed
;
4298 dm_new_crtc_state
= to_dm_crtc_state(new_crtc_state
);
4299 dm_old_crtc_state
= to_dm_crtc_state(old_crtc_state
);
4300 modeset_needed
= modeset_required(
4302 dm_new_crtc_state
->stream
,
4303 dm_old_crtc_state
->stream
);
4304 /* We add stream to freesync if:
4305 * 1. Said stream is not null, and
4306 * 2. A modeset is requested. This means that the
4307 * stream was removed previously, and needs to be
4310 if (dm_new_crtc_state
->stream
== NULL
||
4314 acrtc
= to_amdgpu_crtc(crtc
);
4317 amdgpu_dm_find_first_crtc_matching_connector(
4320 DRM_DEBUG_DRIVER("Atomic commit: Failed to "
4321 "find connector for acrtc "
4322 "id:%d skipping freesync "
4328 mod_freesync_add_stream(adev
->dm
.freesync_module
,
4329 dm_new_crtc_state
->stream
,
4331 new_con_state
= drm_atomic_get_new_connector_state(
4332 state
, &aconnector
->base
);
4333 dm_new_con_state
= to_dm_connector_state(new_con_state
);
4335 mod_freesync_set_user_enable(adev
->dm
.freesync_module
,
4336 &dm_new_crtc_state
->stream
,
4338 &dm_new_con_state
->user_enable
);
4342 if (dm_state
->context
) {
4343 dm_enable_per_frame_crtc_master_sync(dm_state
->context
);
4344 WARN_ON(!dc_commit_state(dm
->dc
, dm_state
->context
));
4347 for_each_new_crtc_in_state(state
, crtc
, new_crtc_state
, i
) {
4348 struct amdgpu_crtc
*acrtc
= to_amdgpu_crtc(crtc
);
4350 dm_new_crtc_state
= to_dm_crtc_state(new_crtc_state
);
4352 if (dm_new_crtc_state
->stream
!= NULL
) {
4353 const struct dc_stream_status
*status
=
4354 dc_stream_get_status(dm_new_crtc_state
->stream
);
4357 DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state
->stream
, acrtc
);
4359 acrtc
->otg_inst
= status
->primary_otg_inst
;
4363 /* Handle scaling and underscan changes*/
4364 for_each_oldnew_connector_in_state(state
, connector
, old_con_state
, new_con_state
, i
) {
4365 struct dm_connector_state
*dm_new_con_state
= to_dm_connector_state(new_con_state
);
4366 struct dm_connector_state
*dm_old_con_state
= to_dm_connector_state(old_con_state
);
4367 struct amdgpu_crtc
*acrtc
= to_amdgpu_crtc(dm_new_con_state
->base
.crtc
);
4368 struct dc_stream_status
*status
= NULL
;
4371 new_crtc_state
= drm_atomic_get_new_crtc_state(state
, &acrtc
->base
);
4372 old_crtc_state
= drm_atomic_get_old_crtc_state(state
, &acrtc
->base
);
4375 /* Skip any modesets/resets */
4376 if (!acrtc
|| drm_atomic_crtc_needs_modeset(new_crtc_state
))
4379 /* Skip any thing not scale or underscan changes */
4380 if (!is_scaling_state_different(dm_new_con_state
, dm_old_con_state
))
4383 dm_new_crtc_state
= to_dm_crtc_state(new_crtc_state
);
4385 update_stream_scaling_settings(&dm_new_con_state
->base
.crtc
->mode
,
4386 dm_new_con_state
, (struct dc_stream_state
*)dm_new_crtc_state
->stream
);
4388 if (!dm_new_crtc_state
->stream
)
4391 status
= dc_stream_get_status(dm_new_crtc_state
->stream
);
4393 WARN_ON(!status
->plane_count
);
4395 /*TODO How it works with MPO ?*/
4396 if (!commit_planes_to_stream(
4398 status
->plane_states
,
4399 status
->plane_count
,
4401 to_dm_crtc_state(old_crtc_state
),
4403 dm_error("%s: Failed to update stream scaling!\n", __func__
);
4406 for_each_oldnew_crtc_in_state(state
, crtc
, old_crtc_state
,
4407 new_crtc_state
, i
) {
4409 * loop to enable interrupts on newly arrived crtc
4411 struct amdgpu_crtc
*acrtc
= to_amdgpu_crtc(crtc
);
4412 bool modeset_needed
;
4414 dm_new_crtc_state
= to_dm_crtc_state(new_crtc_state
);
4415 dm_old_crtc_state
= to_dm_crtc_state(old_crtc_state
);
4416 modeset_needed
= modeset_required(
4418 dm_new_crtc_state
->stream
,
4419 dm_old_crtc_state
->stream
);
4421 if (dm_new_crtc_state
->stream
== NULL
|| !modeset_needed
)
4424 if (adev
->dm
.freesync_module
)
4425 mod_freesync_notify_mode_change(
4426 adev
->dm
.freesync_module
,
4427 &dm_new_crtc_state
->stream
, 1);
4429 manage_dm_interrupts(adev
, acrtc
, true);
4432 /* update planes when needed per crtc*/
4433 for_each_new_crtc_in_state(state
, crtc
, new_crtc_state
, j
) {
4434 dm_new_crtc_state
= to_dm_crtc_state(new_crtc_state
);
4436 if (dm_new_crtc_state
->stream
)
4437 amdgpu_dm_commit_planes(state
, dev
, dm
, crtc
, &wait_for_vblank
);
4442 * send vblank event on all events not handled in flip and
4443 * mark consumed event for drm_atomic_helper_commit_hw_done
4445 spin_lock_irqsave(&adev
->ddev
->event_lock
, flags
);
4446 for_each_new_crtc_in_state(state
, crtc
, new_crtc_state
, i
) {
4448 if (new_crtc_state
->event
)
4449 drm_send_event_locked(dev
, &new_crtc_state
->event
->base
);
4451 new_crtc_state
->event
= NULL
;
4453 spin_unlock_irqrestore(&adev
->ddev
->event_lock
, flags
);
4455 /* Signal HW programming completion */
4456 drm_atomic_helper_commit_hw_done(state
);
4458 if (wait_for_vblank
)
4459 drm_atomic_helper_wait_for_flip_done(dev
, state
);
4461 drm_atomic_helper_cleanup_planes(dev
, state
);
4463 /* Finally, drop a runtime PM reference for each newly disabled CRTC,
4464 * so we can put the GPU into runtime suspend if we're not driving any
4467 pm_runtime_mark_last_busy(dev
->dev
);
4468 for_each_oldnew_crtc_in_state(state
, crtc
, old_crtc_state
, new_crtc_state
, i
) {
4469 if (old_crtc_state
->active
&& !new_crtc_state
->active
)
4470 pm_runtime_put_autosuspend(dev
->dev
);
4475 static int dm_force_atomic_commit(struct drm_connector
*connector
)
4478 struct drm_device
*ddev
= connector
->dev
;
4479 struct drm_atomic_state
*state
= drm_atomic_state_alloc(ddev
);
4480 struct amdgpu_crtc
*disconnected_acrtc
= to_amdgpu_crtc(connector
->encoder
->crtc
);
4481 struct drm_plane
*plane
= disconnected_acrtc
->base
.primary
;
4482 struct drm_connector_state
*conn_state
;
4483 struct drm_crtc_state
*crtc_state
;
4484 struct drm_plane_state
*plane_state
;
4489 state
->acquire_ctx
= ddev
->mode_config
.acquire_ctx
;
4491 /* Construct an atomic state to restore previous display setting */
4494 * Attach connectors to drm_atomic_state
4496 conn_state
= drm_atomic_get_connector_state(state
, connector
);
4498 ret
= PTR_ERR_OR_ZERO(conn_state
);
4502 /* Attach crtc to drm_atomic_state*/
4503 crtc_state
= drm_atomic_get_crtc_state(state
, &disconnected_acrtc
->base
);
4505 ret
= PTR_ERR_OR_ZERO(crtc_state
);
4509 /* force a restore */
4510 crtc_state
->mode_changed
= true;
4512 /* Attach plane to drm_atomic_state */
4513 plane_state
= drm_atomic_get_plane_state(state
, plane
);
4515 ret
= PTR_ERR_OR_ZERO(plane_state
);
4520 /* Call commit internally with the state we just constructed */
4521 ret
= drm_atomic_commit(state
);
4526 DRM_ERROR("Restoring old state failed with %i\n", ret
);
4527 drm_atomic_state_put(state
);
4533 * This functions handle all cases when set mode does not come upon hotplug.
4534 * This include when the same display is unplugged then plugged back into the
4535 * same port and when we are running without usermode desktop manager supprot
4537 void dm_restore_drm_connector_state(struct drm_device
*dev
,
4538 struct drm_connector
*connector
)
4540 struct amdgpu_dm_connector
*aconnector
= to_amdgpu_dm_connector(connector
);
4541 struct amdgpu_crtc
*disconnected_acrtc
;
4542 struct dm_crtc_state
*acrtc_state
;
4544 if (!aconnector
->dc_sink
|| !connector
->state
|| !connector
->encoder
)
4547 disconnected_acrtc
= to_amdgpu_crtc(connector
->encoder
->crtc
);
4548 if (!disconnected_acrtc
)
4551 acrtc_state
= to_dm_crtc_state(disconnected_acrtc
->base
.state
);
4552 if (!acrtc_state
->stream
)
4556 * If the previous sink is not released and different from the current,
4557 * we deduce we are in a state where we can not rely on usermode call
4558 * to turn on the display, so we do it here
4560 if (acrtc_state
->stream
->sink
!= aconnector
->dc_sink
)
4561 dm_force_atomic_commit(&aconnector
->base
);
4565 * Grabs all modesetting locks to serialize against any blocking commits,
4566 * Waits for completion of all non blocking commits.
4568 static int do_aquire_global_lock(struct drm_device
*dev
,
4569 struct drm_atomic_state
*state
)
4571 struct drm_crtc
*crtc
;
4572 struct drm_crtc_commit
*commit
;
4575 /* Adding all modeset locks to aquire_ctx will
4576 * ensure that when the framework release it the
4577 * extra locks we are locking here will get released to
4579 ret
= drm_modeset_lock_all_ctx(dev
, state
->acquire_ctx
);
4583 list_for_each_entry(crtc
, &dev
->mode_config
.crtc_list
, head
) {
4584 spin_lock(&crtc
->commit_lock
);
4585 commit
= list_first_entry_or_null(&crtc
->commit_list
,
4586 struct drm_crtc_commit
, commit_entry
);
4588 drm_crtc_commit_get(commit
);
4589 spin_unlock(&crtc
->commit_lock
);
4594 /* Make sure all pending HW programming completed and
4597 ret
= wait_for_completion_interruptible_timeout(&commit
->hw_done
, 10*HZ
);
4600 ret
= wait_for_completion_interruptible_timeout(
4601 &commit
->flip_done
, 10*HZ
);
4604 DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
4605 "timed out\n", crtc
->base
.id
, crtc
->name
);
4607 drm_crtc_commit_put(commit
);
4610 return ret
< 0 ? ret
: 0;
4613 static int dm_update_crtcs_state(struct dc
*dc
,
4614 struct drm_atomic_state
*state
,
4616 bool *lock_and_validation_needed
)
4618 struct drm_crtc
*crtc
;
4619 struct drm_crtc_state
*old_crtc_state
, *new_crtc_state
;
4621 struct dm_crtc_state
*dm_old_crtc_state
, *dm_new_crtc_state
;
4622 struct dm_atomic_state
*dm_state
= to_dm_atomic_state(state
);
4623 struct dc_stream_state
*new_stream
;
4626 /*TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set */
4627 /* update changed items */
4628 for_each_oldnew_crtc_in_state(state
, crtc
, old_crtc_state
, new_crtc_state
, i
) {
4629 struct amdgpu_crtc
*acrtc
= NULL
;
4630 struct amdgpu_dm_connector
*aconnector
= NULL
;
4631 struct drm_connector_state
*new_con_state
= NULL
;
4632 struct dm_connector_state
*dm_conn_state
= NULL
;
4633 struct drm_plane_state
*new_plane_state
= NULL
;
4637 dm_old_crtc_state
= to_dm_crtc_state(old_crtc_state
);
4638 dm_new_crtc_state
= to_dm_crtc_state(new_crtc_state
);
4639 acrtc
= to_amdgpu_crtc(crtc
);
4641 new_plane_state
= drm_atomic_get_new_plane_state(state
, new_crtc_state
->crtc
->primary
);
4643 if (new_crtc_state
->enable
&& new_plane_state
&& !new_plane_state
->fb
) {
4648 aconnector
= amdgpu_dm_find_first_crtc_matching_connector(state
, crtc
);
4650 /* TODO This hack should go away */
4651 if (aconnector
&& enable
) {
4652 // Make sure fake sink is created in plug-in scenario
4653 new_con_state
= drm_atomic_get_connector_state(state
,
4656 if (IS_ERR(new_con_state
)) {
4657 ret
= PTR_ERR_OR_ZERO(new_con_state
);
4661 dm_conn_state
= to_dm_connector_state(new_con_state
);
4663 new_stream
= create_stream_for_sink(aconnector
,
4664 &new_crtc_state
->mode
,
4668 * we can have no stream on ACTION_SET if a display
4669 * was disconnected during S3, in this case it not and
4670 * error, the OS will be updated after detection, and
4671 * do the right thing on next atomic commit
4675 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
4676 __func__
, acrtc
->base
.base
.id
);
4680 if (dc_is_stream_unchanged(new_stream
, dm_old_crtc_state
->stream
) &&
4681 dc_is_stream_scaling_unchanged(new_stream
, dm_old_crtc_state
->stream
)) {
4682 new_crtc_state
->mode_changed
= false;
4683 DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
4684 new_crtc_state
->mode_changed
);
4688 if (!drm_atomic_crtc_needs_modeset(new_crtc_state
))
4692 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
4693 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
4694 "connectors_changed:%d\n",
4696 new_crtc_state
->enable
,
4697 new_crtc_state
->active
,
4698 new_crtc_state
->planes_changed
,
4699 new_crtc_state
->mode_changed
,
4700 new_crtc_state
->active_changed
,
4701 new_crtc_state
->connectors_changed
);
4703 /* Remove stream for any changed/disabled CRTC */
4706 if (!dm_old_crtc_state
->stream
)
4709 DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
4712 /* i.e. reset mode */
4713 if (dc_remove_stream_from_ctx(
4716 dm_old_crtc_state
->stream
) != DC_OK
) {
4721 dc_stream_release(dm_old_crtc_state
->stream
);
4722 dm_new_crtc_state
->stream
= NULL
;
4724 *lock_and_validation_needed
= true;
4726 } else {/* Add stream for any updated/enabled CRTC */
4728 * Quick fix to prevent NULL pointer on new_stream when
4729 * added MST connectors not found in existing crtc_state in the chained mode
4730 * TODO: need to dig out the root cause of that
4732 if (!aconnector
|| (!aconnector
->dc_sink
&& aconnector
->mst_port
))
4735 if (modereset_required(new_crtc_state
))
4738 if (modeset_required(new_crtc_state
, new_stream
,
4739 dm_old_crtc_state
->stream
)) {
4741 WARN_ON(dm_new_crtc_state
->stream
);
4743 dm_new_crtc_state
->stream
= new_stream
;
4745 dc_stream_retain(new_stream
);
4747 DRM_DEBUG_DRIVER("Enabling DRM crtc: %d\n",
4750 if (dc_add_stream_to_ctx(
4753 dm_new_crtc_state
->stream
) != DC_OK
) {
4758 *lock_and_validation_needed
= true;
4763 /* Release extra reference */
4765 dc_stream_release(new_stream
);
4768 * We want to do dc stream updates that do not require a
4769 * full modeset below.
4771 if (!(enable
&& aconnector
&& new_crtc_state
->enable
&&
4772 new_crtc_state
->active
))
4775 * Given above conditions, the dc state cannot be NULL because:
4776 * 1. We're in the process of enabling CRTCs (just been added
4777 * to the dc context, or already is on the context)
4778 * 2. Has a valid connector attached, and
4779 * 3. Is currently active and enabled.
4780 * => The dc stream state currently exists.
4782 BUG_ON(dm_new_crtc_state
->stream
== NULL
);
4784 /* Color managment settings */
4785 if (dm_new_crtc_state
->base
.color_mgmt_changed
) {
4786 ret
= amdgpu_dm_set_regamma_lut(dm_new_crtc_state
);
4789 amdgpu_dm_set_ctm(dm_new_crtc_state
);
4797 dc_stream_release(new_stream
);
4801 static int dm_update_planes_state(struct dc
*dc
,
4802 struct drm_atomic_state
*state
,
4804 bool *lock_and_validation_needed
)
4806 struct drm_crtc
*new_plane_crtc
, *old_plane_crtc
;
4807 struct drm_crtc_state
*old_crtc_state
, *new_crtc_state
;
4808 struct drm_plane
*plane
;
4809 struct drm_plane_state
*old_plane_state
, *new_plane_state
;
4810 struct dm_crtc_state
*dm_new_crtc_state
, *dm_old_crtc_state
;
4811 struct dm_atomic_state
*dm_state
= to_dm_atomic_state(state
);
4812 struct dm_plane_state
*dm_new_plane_state
, *dm_old_plane_state
;
4814 /* TODO return page_flip_needed() function */
4815 bool pflip_needed
= !state
->allow_modeset
;
4819 /* Add new planes, in reverse order as DC expectation */
4820 for_each_oldnew_plane_in_state_reverse(state
, plane
, old_plane_state
, new_plane_state
, i
) {
4821 new_plane_crtc
= new_plane_state
->crtc
;
4822 old_plane_crtc
= old_plane_state
->crtc
;
4823 dm_new_plane_state
= to_dm_plane_state(new_plane_state
);
4824 dm_old_plane_state
= to_dm_plane_state(old_plane_state
);
4826 /*TODO Implement atomic check for cursor plane */
4827 if (plane
->type
== DRM_PLANE_TYPE_CURSOR
)
4830 /* Remove any changed/removed planes */
4833 plane
->type
!= DRM_PLANE_TYPE_OVERLAY
)
4836 if (!old_plane_crtc
)
4839 old_crtc_state
= drm_atomic_get_old_crtc_state(
4840 state
, old_plane_crtc
);
4841 dm_old_crtc_state
= to_dm_crtc_state(old_crtc_state
);
4843 if (!dm_old_crtc_state
->stream
)
4846 DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
4847 plane
->base
.id
, old_plane_crtc
->base
.id
);
4849 if (!dc_remove_plane_from_context(
4851 dm_old_crtc_state
->stream
,
4852 dm_old_plane_state
->dc_state
,
4853 dm_state
->context
)) {
4860 dc_plane_state_release(dm_old_plane_state
->dc_state
);
4861 dm_new_plane_state
->dc_state
= NULL
;
4863 *lock_and_validation_needed
= true;
4865 } else { /* Add new planes */
4866 struct dc_plane_state
*dc_new_plane_state
;
4868 if (drm_atomic_plane_disabling(plane
->state
, new_plane_state
))
4871 if (!new_plane_crtc
)
4874 new_crtc_state
= drm_atomic_get_new_crtc_state(state
, new_plane_crtc
);
4875 dm_new_crtc_state
= to_dm_crtc_state(new_crtc_state
);
4877 if (!dm_new_crtc_state
->stream
)
4881 plane
->type
!= DRM_PLANE_TYPE_OVERLAY
)
4884 WARN_ON(dm_new_plane_state
->dc_state
);
4886 dc_new_plane_state
= dc_create_plane_state(dc
);
4887 if (!dc_new_plane_state
)
4890 DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n",
4891 plane
->base
.id
, new_plane_crtc
->base
.id
);
4893 ret
= fill_plane_attributes(
4894 new_plane_crtc
->dev
->dev_private
,
4899 dc_plane_state_release(dc_new_plane_state
);
4904 * Any atomic check errors that occur after this will
4905 * not need a release. The plane state will be attached
4906 * to the stream, and therefore part of the atomic
4907 * state. It'll be released when the atomic state is
4910 if (!dc_add_plane_to_context(
4912 dm_new_crtc_state
->stream
,
4914 dm_state
->context
)) {
4916 dc_plane_state_release(dc_new_plane_state
);
4920 dm_new_plane_state
->dc_state
= dc_new_plane_state
;
4922 /* Tell DC to do a full surface update every time there
4923 * is a plane change. Inefficient, but works for now.
4925 dm_new_plane_state
->dc_state
->update_flags
.bits
.full_update
= 1;
4927 *lock_and_validation_needed
= true;
4935 static int amdgpu_dm_atomic_check(struct drm_device
*dev
,
4936 struct drm_atomic_state
*state
)
4938 struct amdgpu_device
*adev
= dev
->dev_private
;
4939 struct dc
*dc
= adev
->dm
.dc
;
4940 struct dm_atomic_state
*dm_state
= to_dm_atomic_state(state
);
4941 struct drm_connector
*connector
;
4942 struct drm_connector_state
*old_con_state
, *new_con_state
;
4943 struct drm_crtc
*crtc
;
4944 struct drm_crtc_state
*old_crtc_state
, *new_crtc_state
;
4948 * This bool will be set for true for any modeset/reset
4949 * or plane update which implies non fast surface update.
4951 bool lock_and_validation_needed
= false;
4953 ret
= drm_atomic_helper_check_modeset(dev
, state
);
4957 for_each_oldnew_crtc_in_state(state
, crtc
, old_crtc_state
, new_crtc_state
, i
) {
4958 if (!drm_atomic_crtc_needs_modeset(new_crtc_state
) &&
4959 !new_crtc_state
->color_mgmt_changed
)
4962 if (!new_crtc_state
->enable
)
4965 ret
= drm_atomic_add_affected_connectors(state
, crtc
);
4969 ret
= drm_atomic_add_affected_planes(state
, crtc
);
4974 dm_state
->context
= dc_create_state();
4975 ASSERT(dm_state
->context
);
4976 dc_resource_state_copy_construct_current(dc
, dm_state
->context
);
4978 /* Remove exiting planes if they are modified */
4979 ret
= dm_update_planes_state(dc
, state
, false, &lock_and_validation_needed
);
4984 /* Disable all crtcs which require disable */
4985 ret
= dm_update_crtcs_state(dc
, state
, false, &lock_and_validation_needed
);
4990 /* Enable all crtcs which require enable */
4991 ret
= dm_update_crtcs_state(dc
, state
, true, &lock_and_validation_needed
);
4996 /* Add new/modified planes */
4997 ret
= dm_update_planes_state(dc
, state
, true, &lock_and_validation_needed
);
5002 /* Run this here since we want to validate the streams we created */
5003 ret
= drm_atomic_helper_check_planes(dev
, state
);
5007 /* Check scaling and underscan changes*/
5008 /*TODO Removed scaling changes validation due to inability to commit
5009 * new stream into context w\o causing full reset. Need to
5010 * decide how to handle.
5012 for_each_oldnew_connector_in_state(state
, connector
, old_con_state
, new_con_state
, i
) {
5013 struct dm_connector_state
*dm_old_con_state
= to_dm_connector_state(old_con_state
);
5014 struct dm_connector_state
*dm_new_con_state
= to_dm_connector_state(new_con_state
);
5015 struct amdgpu_crtc
*acrtc
= to_amdgpu_crtc(dm_new_con_state
->base
.crtc
);
5017 /* Skip any modesets/resets */
5018 if (!acrtc
|| drm_atomic_crtc_needs_modeset(
5019 drm_atomic_get_new_crtc_state(state
, &acrtc
->base
)))
5022 /* Skip any thing not scale or underscan changes */
5023 if (!is_scaling_state_different(dm_new_con_state
, dm_old_con_state
))
5026 lock_and_validation_needed
= true;
5030 * For full updates case when
5031 * removing/adding/updating streams on once CRTC while flipping
5033 * acquiring global lock will guarantee that any such full
5035 * will wait for completion of any outstanding flip using DRMs
5036 * synchronization events.
5039 if (lock_and_validation_needed
) {
5041 ret
= do_aquire_global_lock(dev
, state
);
5045 if (dc_validate_global_state(dc
, dm_state
->context
) != DC_OK
) {
5051 /* Must be success */
5056 if (ret
== -EDEADLK
)
5057 DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
5058 else if (ret
== -EINTR
|| ret
== -EAGAIN
|| ret
== -ERESTARTSYS
)
5059 DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
5061 DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret
);
5066 static bool is_dp_capable_without_timing_msa(struct dc
*dc
,
5067 struct amdgpu_dm_connector
*amdgpu_dm_connector
)
5070 bool capable
= false;
5072 if (amdgpu_dm_connector
->dc_link
&&
5073 dm_helpers_dp_read_dpcd(
5075 amdgpu_dm_connector
->dc_link
,
5076 DP_DOWN_STREAM_PORT_COUNT
,
5078 sizeof(dpcd_data
))) {
5079 capable
= (dpcd_data
& DP_MSA_TIMING_PAR_IGNORED
) ? true:false;
5084 void amdgpu_dm_add_sink_to_freesync_module(struct drm_connector
*connector
,
5088 bool edid_check_required
;
5089 struct detailed_timing
*timing
;
5090 struct detailed_non_pixel
*data
;
5091 struct detailed_data_monitor_range
*range
;
5092 struct amdgpu_dm_connector
*amdgpu_dm_connector
=
5093 to_amdgpu_dm_connector(connector
);
5094 struct dm_connector_state
*dm_con_state
;
5096 struct drm_device
*dev
= connector
->dev
;
5097 struct amdgpu_device
*adev
= dev
->dev_private
;
5099 if (!connector
->state
) {
5100 DRM_ERROR("%s - Connector has no state", __func__
);
5104 dm_con_state
= to_dm_connector_state(connector
->state
);
5106 edid_check_required
= false;
5107 if (!amdgpu_dm_connector
->dc_sink
) {
5108 DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
5111 if (!adev
->dm
.freesync_module
)
5114 * if edid non zero restrict freesync only for dp and edp
5117 if (amdgpu_dm_connector
->dc_sink
->sink_signal
== SIGNAL_TYPE_DISPLAY_PORT
5118 || amdgpu_dm_connector
->dc_sink
->sink_signal
== SIGNAL_TYPE_EDP
) {
5119 edid_check_required
= is_dp_capable_without_timing_msa(
5121 amdgpu_dm_connector
);
5124 dm_con_state
->freesync_capable
= false;
5125 if (edid_check_required
== true && (edid
->version
> 1 ||
5126 (edid
->version
== 1 && edid
->revision
> 1))) {
5127 for (i
= 0; i
< 4; i
++) {
5129 timing
= &edid
->detailed_timings
[i
];
5130 data
= &timing
->data
.other_data
;
5131 range
= &data
->data
.range
;
5133 * Check if monitor has continuous frequency mode
5135 if (data
->type
!= EDID_DETAIL_MONITOR_RANGE
)
5138 * Check for flag range limits only. If flag == 1 then
5139 * no additional timing information provided.
5140 * Default GTF, GTF Secondary curve and CVT are not
5143 if (range
->flags
!= 1)
5146 amdgpu_dm_connector
->min_vfreq
= range
->min_vfreq
;
5147 amdgpu_dm_connector
->max_vfreq
= range
->max_vfreq
;
5148 amdgpu_dm_connector
->pixel_clock_mhz
=
5149 range
->pixel_clock_mhz
* 10;
5153 if (amdgpu_dm_connector
->max_vfreq
-
5154 amdgpu_dm_connector
->min_vfreq
> 10) {
5155 amdgpu_dm_connector
->caps
.supported
= true;
5156 amdgpu_dm_connector
->caps
.min_refresh_in_micro_hz
=
5157 amdgpu_dm_connector
->min_vfreq
* 1000000;
5158 amdgpu_dm_connector
->caps
.max_refresh_in_micro_hz
=
5159 amdgpu_dm_connector
->max_vfreq
* 1000000;
5160 dm_con_state
->freesync_capable
= true;
5165 * TODO figure out how to notify user-mode or DRM of freesync caps
5166 * once we figure out how to deal with freesync in an upstreamable
5172 void amdgpu_dm_remove_sink_from_freesync_module(struct drm_connector
*connector
)
5175 * TODO fill in once we figure out how to deal with freesync in
5176 * an upstreamable fashion