2 * Copyright 2015 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
26 #include "dm_services_types.h"
28 #include "dc/inc/core_types.h"
32 #include "amdgpu_display.h"
34 #include "amdgpu_dm.h"
35 #include "amdgpu_pm.h"
37 #include "amd_shared.h"
38 #include "amdgpu_dm_irq.h"
39 #include "dm_helpers.h"
40 #include "dm_services_types.h"
41 #include "amdgpu_dm_mst_types.h"
43 #include "ivsrcid/ivsrcid_vislands30.h"
45 #include <linux/module.h>
46 #include <linux/moduleparam.h>
47 #include <linux/version.h>
48 #include <linux/types.h>
49 #include <linux/pm_runtime.h>
52 #include <drm/drm_atomic.h>
53 #include <drm/drm_atomic_helper.h>
54 #include <drm/drm_dp_mst_helper.h>
55 #include <drm/drm_fb_helper.h>
56 #include <drm/drm_edid.h>
58 #include "modules/inc/mod_freesync.h"
60 #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
61 #include "ivsrcid/irqsrcs_dcn_1_0.h"
63 #include "dcn/dcn_1_0_offset.h"
64 #include "dcn/dcn_1_0_sh_mask.h"
65 #include "soc15_hw_ip.h"
66 #include "vega10_ip_offset.h"
68 #include "soc15_common.h"
71 #include "modules/inc/mod_freesync.h"
73 #include "i2caux_interface.h"
75 /* basic init/fini API */
76 static int amdgpu_dm_init(struct amdgpu_device
*adev
);
77 static void amdgpu_dm_fini(struct amdgpu_device
*adev
);
79 /* initializes drm_device display related structures, based on the information
80 * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
81 * drm_encoder, drm_mode_config
83 * Returns 0 on success
85 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device
*adev
);
86 /* removes and deallocates the drm structures, created by the above function */
87 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager
*dm
);
90 amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector
*aconnector
);
92 static int amdgpu_dm_plane_init(struct amdgpu_display_manager
*dm
,
93 struct amdgpu_plane
*aplane
,
94 unsigned long possible_crtcs
);
95 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager
*dm
,
96 struct drm_plane
*plane
,
98 static int amdgpu_dm_connector_init(struct amdgpu_display_manager
*dm
,
99 struct amdgpu_dm_connector
*amdgpu_dm_connector
,
101 struct amdgpu_encoder
*amdgpu_encoder
);
102 static int amdgpu_dm_encoder_init(struct drm_device
*dev
,
103 struct amdgpu_encoder
*aencoder
,
104 uint32_t link_index
);
106 static int amdgpu_dm_connector_get_modes(struct drm_connector
*connector
);
108 static int amdgpu_dm_atomic_commit(struct drm_device
*dev
,
109 struct drm_atomic_state
*state
,
112 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state
*state
);
114 static int amdgpu_dm_atomic_check(struct drm_device
*dev
,
115 struct drm_atomic_state
*state
);
120 static const enum drm_plane_type dm_plane_type_default
[AMDGPU_MAX_PLANES
] = {
121 DRM_PLANE_TYPE_PRIMARY
,
122 DRM_PLANE_TYPE_PRIMARY
,
123 DRM_PLANE_TYPE_PRIMARY
,
124 DRM_PLANE_TYPE_PRIMARY
,
125 DRM_PLANE_TYPE_PRIMARY
,
126 DRM_PLANE_TYPE_PRIMARY
,
129 static const enum drm_plane_type dm_plane_type_carizzo
[AMDGPU_MAX_PLANES
] = {
130 DRM_PLANE_TYPE_PRIMARY
,
131 DRM_PLANE_TYPE_PRIMARY
,
132 DRM_PLANE_TYPE_PRIMARY
,
133 DRM_PLANE_TYPE_OVERLAY
,/* YUV Capable Underlay */
136 static const enum drm_plane_type dm_plane_type_stoney
[AMDGPU_MAX_PLANES
] = {
137 DRM_PLANE_TYPE_PRIMARY
,
138 DRM_PLANE_TYPE_PRIMARY
,
139 DRM_PLANE_TYPE_OVERLAY
, /* YUV Capable Underlay */
143 * dm_vblank_get_counter
146 * Get counter for number of vertical blanks
149 * struct amdgpu_device *adev - [in] desired amdgpu device
150 * int disp_idx - [in] which CRTC to get the counter from
153 * Counter for vertical blanks
155 static u32
dm_vblank_get_counter(struct amdgpu_device
*adev
, int crtc
)
157 if (crtc
>= adev
->mode_info
.num_crtc
)
160 struct amdgpu_crtc
*acrtc
= adev
->mode_info
.crtcs
[crtc
];
161 struct dm_crtc_state
*acrtc_state
= to_dm_crtc_state(
165 if (acrtc_state
->stream
== NULL
) {
166 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
171 return dc_stream_get_vblank_counter(acrtc_state
->stream
);
175 static int dm_crtc_get_scanoutpos(struct amdgpu_device
*adev
, int crtc
,
176 u32
*vbl
, u32
*position
)
178 uint32_t v_blank_start
, v_blank_end
, h_position
, v_position
;
180 if ((crtc
< 0) || (crtc
>= adev
->mode_info
.num_crtc
))
183 struct amdgpu_crtc
*acrtc
= adev
->mode_info
.crtcs
[crtc
];
184 struct dm_crtc_state
*acrtc_state
= to_dm_crtc_state(
187 if (acrtc_state
->stream
== NULL
) {
188 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
194 * TODO rework base driver to use values directly.
195 * for now parse it back into reg-format
197 dc_stream_get_scanoutpos(acrtc_state
->stream
,
203 *position
= v_position
| (h_position
<< 16);
204 *vbl
= v_blank_start
| (v_blank_end
<< 16);
210 static bool dm_is_idle(void *handle
)
216 static int dm_wait_for_idle(void *handle
)
222 static bool dm_check_soft_reset(void *handle
)
227 static int dm_soft_reset(void *handle
)
233 static struct amdgpu_crtc
*
234 get_crtc_by_otg_inst(struct amdgpu_device
*adev
,
237 struct drm_device
*dev
= adev
->ddev
;
238 struct drm_crtc
*crtc
;
239 struct amdgpu_crtc
*amdgpu_crtc
;
242 * following if is check inherited from both functions where this one is
243 * used now. Need to be checked why it could happen.
245 if (otg_inst
== -1) {
247 return adev
->mode_info
.crtcs
[0];
250 list_for_each_entry(crtc
, &dev
->mode_config
.crtc_list
, head
) {
251 amdgpu_crtc
= to_amdgpu_crtc(crtc
);
253 if (amdgpu_crtc
->otg_inst
== otg_inst
)
260 static void dm_pflip_high_irq(void *interrupt_params
)
262 struct amdgpu_crtc
*amdgpu_crtc
;
263 struct common_irq_params
*irq_params
= interrupt_params
;
264 struct amdgpu_device
*adev
= irq_params
->adev
;
267 amdgpu_crtc
= get_crtc_by_otg_inst(adev
, irq_params
->irq_src
- IRQ_TYPE_PFLIP
);
269 /* IRQ could occur when in initial stage */
270 /*TODO work and BO cleanup */
271 if (amdgpu_crtc
== NULL
) {
272 DRM_DEBUG_DRIVER("CRTC is null, returning.\n");
276 spin_lock_irqsave(&adev
->ddev
->event_lock
, flags
);
278 if (amdgpu_crtc
->pflip_status
!= AMDGPU_FLIP_SUBMITTED
){
279 DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
280 amdgpu_crtc
->pflip_status
,
281 AMDGPU_FLIP_SUBMITTED
,
282 amdgpu_crtc
->crtc_id
,
284 spin_unlock_irqrestore(&adev
->ddev
->event_lock
, flags
);
289 /* wakeup usersapce */
290 if (amdgpu_crtc
->event
) {
291 /* Update to correct count/ts if racing with vblank irq */
292 drm_crtc_accurate_vblank_count(&amdgpu_crtc
->base
);
294 drm_crtc_send_vblank_event(&amdgpu_crtc
->base
, amdgpu_crtc
->event
);
296 /* page flip completed. clean up */
297 amdgpu_crtc
->event
= NULL
;
302 amdgpu_crtc
->pflip_status
= AMDGPU_FLIP_NONE
;
303 spin_unlock_irqrestore(&adev
->ddev
->event_lock
, flags
);
305 DRM_DEBUG_DRIVER("%s - crtc :%d[%p], pflip_stat:AMDGPU_FLIP_NONE\n",
306 __func__
, amdgpu_crtc
->crtc_id
, amdgpu_crtc
);
308 drm_crtc_vblank_put(&amdgpu_crtc
->base
);
311 static void dm_crtc_high_irq(void *interrupt_params
)
313 struct common_irq_params
*irq_params
= interrupt_params
;
314 struct amdgpu_device
*adev
= irq_params
->adev
;
315 uint8_t crtc_index
= 0;
316 struct amdgpu_crtc
*acrtc
;
318 acrtc
= get_crtc_by_otg_inst(adev
, irq_params
->irq_src
- IRQ_TYPE_VBLANK
);
321 crtc_index
= acrtc
->crtc_id
;
323 drm_handle_vblank(adev
->ddev
, crtc_index
);
324 amdgpu_dm_crtc_handle_crc_irq(&acrtc
->base
);
327 static int dm_set_clockgating_state(void *handle
,
328 enum amd_clockgating_state state
)
333 static int dm_set_powergating_state(void *handle
,
334 enum amd_powergating_state state
)
339 /* Prototypes of private functions */
340 static int dm_early_init(void* handle
);
342 static void hotplug_notify_work_func(struct work_struct
*work
)
344 struct amdgpu_display_manager
*dm
= container_of(work
, struct amdgpu_display_manager
, mst_hotplug_work
);
345 struct drm_device
*dev
= dm
->ddev
;
347 drm_kms_helper_hotplug_event(dev
);
350 #if defined(CONFIG_DRM_AMD_DC_FBC)
351 /* Allocate memory for FBC compressed data */
352 static void amdgpu_dm_fbc_init(struct drm_connector
*connector
)
354 struct drm_device
*dev
= connector
->dev
;
355 struct amdgpu_device
*adev
= dev
->dev_private
;
356 struct dm_comressor_info
*compressor
= &adev
->dm
.compressor
;
357 struct amdgpu_dm_connector
*aconn
= to_amdgpu_dm_connector(connector
);
358 struct drm_display_mode
*mode
;
359 unsigned long max_size
= 0;
361 if (adev
->dm
.dc
->fbc_compressor
== NULL
)
364 if (aconn
->dc_link
->connector_signal
!= SIGNAL_TYPE_EDP
)
367 if (compressor
->bo_ptr
)
371 list_for_each_entry(mode
, &connector
->modes
, head
) {
372 if (max_size
< mode
->htotal
* mode
->vtotal
)
373 max_size
= mode
->htotal
* mode
->vtotal
;
377 int r
= amdgpu_bo_create_kernel(adev
, max_size
* 4, PAGE_SIZE
,
378 AMDGPU_GEM_DOMAIN_GTT
, &compressor
->bo_ptr
,
379 &compressor
->gpu_addr
, &compressor
->cpu_addr
);
382 DRM_ERROR("DM: Failed to initialize FBC\n");
384 adev
->dm
.dc
->ctx
->fbc_gpu_addr
= compressor
->gpu_addr
;
385 DRM_INFO("DM: FBC alloc %lu\n", max_size
*4);
396 * Returns 0 on success
398 static int amdgpu_dm_init(struct amdgpu_device
*adev
)
400 struct dc_init_data init_data
;
401 adev
->dm
.ddev
= adev
->ddev
;
402 adev
->dm
.adev
= adev
;
404 /* Zero all the fields */
405 memset(&init_data
, 0, sizeof(init_data
));
407 if(amdgpu_dm_irq_init(adev
)) {
408 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
412 init_data
.asic_id
.chip_family
= adev
->family
;
414 init_data
.asic_id
.pci_revision_id
= adev
->rev_id
;
415 init_data
.asic_id
.hw_internal_rev
= adev
->external_rev_id
;
417 init_data
.asic_id
.vram_width
= adev
->gmc
.vram_width
;
418 /* TODO: initialize init_data.asic_id.vram_type here!!!! */
419 init_data
.asic_id
.atombios_base_address
=
420 adev
->mode_info
.atom_context
->bios
;
422 init_data
.driver
= adev
;
424 adev
->dm
.cgs_device
= amdgpu_cgs_create_device(adev
);
426 if (!adev
->dm
.cgs_device
) {
427 DRM_ERROR("amdgpu: failed to create cgs device.\n");
431 init_data
.cgs_device
= adev
->dm
.cgs_device
;
435 init_data
.dce_environment
= DCE_ENV_PRODUCTION_DRV
;
438 * TODO debug why this doesn't work on Raven
440 if (adev
->flags
& AMD_IS_APU
&&
441 adev
->asic_type
>= CHIP_CARRIZO
&&
442 adev
->asic_type
< CHIP_RAVEN
)
443 init_data
.flags
.gpu_vm_support
= true;
445 /* Display Core create. */
446 adev
->dm
.dc
= dc_create(&init_data
);
449 DRM_INFO("Display Core initialized with v%s!\n", DC_VER
);
451 DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER
);
455 INIT_WORK(&adev
->dm
.mst_hotplug_work
, hotplug_notify_work_func
);
457 adev
->dm
.freesync_module
= mod_freesync_create(adev
->dm
.dc
);
458 if (!adev
->dm
.freesync_module
) {
460 "amdgpu: failed to initialize freesync_module.\n");
462 DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
463 adev
->dm
.freesync_module
);
465 amdgpu_dm_init_color_mod();
467 if (amdgpu_dm_initialize_drm_device(adev
)) {
469 "amdgpu: failed to initialize sw for display support.\n");
473 /* Update the actual used number of crtc */
474 adev
->mode_info
.num_crtc
= adev
->dm
.display_indexes_num
;
476 /* TODO: Add_display_info? */
478 /* TODO use dynamic cursor width */
479 adev
->ddev
->mode_config
.cursor_width
= adev
->dm
.dc
->caps
.max_cursor_size
;
480 adev
->ddev
->mode_config
.cursor_height
= adev
->dm
.dc
->caps
.max_cursor_size
;
482 if (drm_vblank_init(adev
->ddev
, adev
->dm
.display_indexes_num
)) {
484 "amdgpu: failed to initialize sw for display support.\n");
488 DRM_DEBUG_DRIVER("KMS initialized.\n");
492 amdgpu_dm_fini(adev
);
497 static void amdgpu_dm_fini(struct amdgpu_device
*adev
)
499 amdgpu_dm_destroy_drm_device(&adev
->dm
);
501 * TODO: pageflip, vlank interrupt
503 * amdgpu_dm_irq_fini(adev);
506 if (adev
->dm
.cgs_device
) {
507 amdgpu_cgs_destroy_device(adev
->dm
.cgs_device
);
508 adev
->dm
.cgs_device
= NULL
;
510 if (adev
->dm
.freesync_module
) {
511 mod_freesync_destroy(adev
->dm
.freesync_module
);
512 adev
->dm
.freesync_module
= NULL
;
514 /* DC Destroy TODO: Replace destroy DAL */
516 dc_destroy(&adev
->dm
.dc
);
520 static int dm_sw_init(void *handle
)
525 static int dm_sw_fini(void *handle
)
530 static int detect_mst_link_for_all_connectors(struct drm_device
*dev
)
532 struct amdgpu_dm_connector
*aconnector
;
533 struct drm_connector
*connector
;
536 drm_modeset_lock(&dev
->mode_config
.connection_mutex
, NULL
);
538 list_for_each_entry(connector
, &dev
->mode_config
.connector_list
, head
) {
539 aconnector
= to_amdgpu_dm_connector(connector
);
540 if (aconnector
->dc_link
->type
== dc_connection_mst_branch
&&
541 aconnector
->mst_mgr
.aux
) {
542 DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
543 aconnector
, aconnector
->base
.base
.id
);
545 ret
= drm_dp_mst_topology_mgr_set_mst(&aconnector
->mst_mgr
, true);
547 DRM_ERROR("DM_MST: Failed to start MST\n");
548 ((struct dc_link
*)aconnector
->dc_link
)->type
= dc_connection_single
;
554 drm_modeset_unlock(&dev
->mode_config
.connection_mutex
);
558 static int dm_late_init(void *handle
)
560 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
562 return detect_mst_link_for_all_connectors(adev
->ddev
);
565 static void s3_handle_mst(struct drm_device
*dev
, bool suspend
)
567 struct amdgpu_dm_connector
*aconnector
;
568 struct drm_connector
*connector
;
570 drm_modeset_lock(&dev
->mode_config
.connection_mutex
, NULL
);
572 list_for_each_entry(connector
, &dev
->mode_config
.connector_list
, head
) {
573 aconnector
= to_amdgpu_dm_connector(connector
);
574 if (aconnector
->dc_link
->type
== dc_connection_mst_branch
&&
575 !aconnector
->mst_port
) {
578 drm_dp_mst_topology_mgr_suspend(&aconnector
->mst_mgr
);
580 drm_dp_mst_topology_mgr_resume(&aconnector
->mst_mgr
);
584 drm_modeset_unlock(&dev
->mode_config
.connection_mutex
);
587 static int dm_hw_init(void *handle
)
589 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
590 /* Create DAL display manager */
591 amdgpu_dm_init(adev
);
592 amdgpu_dm_hpd_init(adev
);
597 static int dm_hw_fini(void *handle
)
599 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
601 amdgpu_dm_hpd_fini(adev
);
603 amdgpu_dm_irq_fini(adev
);
604 amdgpu_dm_fini(adev
);
608 static int dm_suspend(void *handle
)
610 struct amdgpu_device
*adev
= handle
;
611 struct amdgpu_display_manager
*dm
= &adev
->dm
;
614 s3_handle_mst(adev
->ddev
, true);
616 amdgpu_dm_irq_suspend(adev
);
618 WARN_ON(adev
->dm
.cached_state
);
619 adev
->dm
.cached_state
= drm_atomic_helper_suspend(adev
->ddev
);
621 dc_set_power_state(dm
->dc
, DC_ACPI_CM_POWER_STATE_D3
);
626 static struct amdgpu_dm_connector
*
627 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state
*state
,
628 struct drm_crtc
*crtc
)
631 struct drm_connector_state
*new_con_state
;
632 struct drm_connector
*connector
;
633 struct drm_crtc
*crtc_from_state
;
635 for_each_new_connector_in_state(state
, connector
, new_con_state
, i
) {
636 crtc_from_state
= new_con_state
->crtc
;
638 if (crtc_from_state
== crtc
)
639 return to_amdgpu_dm_connector(connector
);
645 static int dm_resume(void *handle
)
647 struct amdgpu_device
*adev
= handle
;
648 struct drm_device
*ddev
= adev
->ddev
;
649 struct amdgpu_display_manager
*dm
= &adev
->dm
;
650 struct amdgpu_dm_connector
*aconnector
;
651 struct drm_connector
*connector
;
652 struct drm_crtc
*crtc
;
653 struct drm_crtc_state
*new_crtc_state
;
654 struct dm_crtc_state
*dm_new_crtc_state
;
655 struct drm_plane
*plane
;
656 struct drm_plane_state
*new_plane_state
;
657 struct dm_plane_state
*dm_new_plane_state
;
661 /* power on hardware */
662 dc_set_power_state(dm
->dc
, DC_ACPI_CM_POWER_STATE_D0
);
664 /* program HPD filter */
667 /* On resume we need to rewrite the MSTM control bits to enamble MST*/
668 s3_handle_mst(ddev
, false);
671 * early enable HPD Rx IRQ, should be done before set mode as short
672 * pulse interrupts are used for MST
674 amdgpu_dm_irq_resume_early(adev
);
677 list_for_each_entry(connector
, &ddev
->mode_config
.connector_list
, head
) {
678 aconnector
= to_amdgpu_dm_connector(connector
);
681 * this is the case when traversing through already created
682 * MST connectors, should be skipped
684 if (aconnector
->mst_port
)
687 mutex_lock(&aconnector
->hpd_lock
);
688 dc_link_detect(aconnector
->dc_link
, DETECT_REASON_HPD
);
690 if (aconnector
->fake_enable
&& aconnector
->dc_link
->local_sink
)
691 aconnector
->fake_enable
= false;
693 aconnector
->dc_sink
= NULL
;
694 amdgpu_dm_update_connector_after_detect(aconnector
);
695 mutex_unlock(&aconnector
->hpd_lock
);
698 /* Force mode set in atomic comit */
699 for_each_new_crtc_in_state(dm
->cached_state
, crtc
, new_crtc_state
, i
)
700 new_crtc_state
->active_changed
= true;
703 * atomic_check is expected to create the dc states. We need to release
704 * them here, since they were duplicated as part of the suspend
707 for_each_new_crtc_in_state(dm
->cached_state
, crtc
, new_crtc_state
, i
) {
708 dm_new_crtc_state
= to_dm_crtc_state(new_crtc_state
);
709 if (dm_new_crtc_state
->stream
) {
710 WARN_ON(kref_read(&dm_new_crtc_state
->stream
->refcount
) > 1);
711 dc_stream_release(dm_new_crtc_state
->stream
);
712 dm_new_crtc_state
->stream
= NULL
;
716 for_each_new_plane_in_state(dm
->cached_state
, plane
, new_plane_state
, i
) {
717 dm_new_plane_state
= to_dm_plane_state(new_plane_state
);
718 if (dm_new_plane_state
->dc_state
) {
719 WARN_ON(kref_read(&dm_new_plane_state
->dc_state
->refcount
) > 1);
720 dc_plane_state_release(dm_new_plane_state
->dc_state
);
721 dm_new_plane_state
->dc_state
= NULL
;
725 ret
= drm_atomic_helper_resume(ddev
, dm
->cached_state
);
727 dm
->cached_state
= NULL
;
729 amdgpu_dm_irq_resume_late(adev
);
734 static const struct amd_ip_funcs amdgpu_dm_funcs
= {
736 .early_init
= dm_early_init
,
737 .late_init
= dm_late_init
,
738 .sw_init
= dm_sw_init
,
739 .sw_fini
= dm_sw_fini
,
740 .hw_init
= dm_hw_init
,
741 .hw_fini
= dm_hw_fini
,
742 .suspend
= dm_suspend
,
744 .is_idle
= dm_is_idle
,
745 .wait_for_idle
= dm_wait_for_idle
,
746 .check_soft_reset
= dm_check_soft_reset
,
747 .soft_reset
= dm_soft_reset
,
748 .set_clockgating_state
= dm_set_clockgating_state
,
749 .set_powergating_state
= dm_set_powergating_state
,
752 const struct amdgpu_ip_block_version dm_ip_block
=
754 .type
= AMD_IP_BLOCK_TYPE_DCE
,
758 .funcs
= &amdgpu_dm_funcs
,
762 static struct drm_atomic_state
*
763 dm_atomic_state_alloc(struct drm_device
*dev
)
765 struct dm_atomic_state
*state
= kzalloc(sizeof(*state
), GFP_KERNEL
);
770 if (drm_atomic_state_init(dev
, &state
->base
) < 0)
781 dm_atomic_state_clear(struct drm_atomic_state
*state
)
783 struct dm_atomic_state
*dm_state
= to_dm_atomic_state(state
);
785 if (dm_state
->context
) {
786 dc_release_state(dm_state
->context
);
787 dm_state
->context
= NULL
;
790 drm_atomic_state_default_clear(state
);
794 dm_atomic_state_alloc_free(struct drm_atomic_state
*state
)
796 struct dm_atomic_state
*dm_state
= to_dm_atomic_state(state
);
797 drm_atomic_state_default_release(state
);
801 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs
= {
802 .fb_create
= amdgpu_display_user_framebuffer_create
,
803 .output_poll_changed
= drm_fb_helper_output_poll_changed
,
804 .atomic_check
= amdgpu_dm_atomic_check
,
805 .atomic_commit
= amdgpu_dm_atomic_commit
,
806 .atomic_state_alloc
= dm_atomic_state_alloc
,
807 .atomic_state_clear
= dm_atomic_state_clear
,
808 .atomic_state_free
= dm_atomic_state_alloc_free
811 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs
= {
812 .atomic_commit_tail
= amdgpu_dm_atomic_commit_tail
816 amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector
*aconnector
)
818 struct drm_connector
*connector
= &aconnector
->base
;
819 struct drm_device
*dev
= connector
->dev
;
820 struct dc_sink
*sink
;
822 /* MST handled by drm_mst framework */
823 if (aconnector
->mst_mgr
.mst_state
== true)
827 sink
= aconnector
->dc_link
->local_sink
;
829 /* Edid mgmt connector gets first update only in mode_valid hook and then
830 * the connector sink is set to either fake or physical sink depends on link status.
831 * don't do it here if u are during boot
833 if (aconnector
->base
.force
!= DRM_FORCE_UNSPECIFIED
834 && aconnector
->dc_em_sink
) {
836 /* For S3 resume with headless use eml_sink to fake stream
837 * because on resume connecotr->sink is set ti NULL
839 mutex_lock(&dev
->mode_config
.mutex
);
842 if (aconnector
->dc_sink
) {
843 amdgpu_dm_remove_sink_from_freesync_module(
845 /* retain and release bellow are used for
846 * bump up refcount for sink because the link don't point
847 * to it anymore after disconnect so on next crtc to connector
848 * reshuffle by UMD we will get into unwanted dc_sink release
850 if (aconnector
->dc_sink
!= aconnector
->dc_em_sink
)
851 dc_sink_release(aconnector
->dc_sink
);
853 aconnector
->dc_sink
= sink
;
854 amdgpu_dm_add_sink_to_freesync_module(
855 connector
, aconnector
->edid
);
857 amdgpu_dm_remove_sink_from_freesync_module(connector
);
858 if (!aconnector
->dc_sink
)
859 aconnector
->dc_sink
= aconnector
->dc_em_sink
;
860 else if (aconnector
->dc_sink
!= aconnector
->dc_em_sink
)
861 dc_sink_retain(aconnector
->dc_sink
);
864 mutex_unlock(&dev
->mode_config
.mutex
);
869 * TODO: temporary guard to look for proper fix
870 * if this sink is MST sink, we should not do anything
872 if (sink
&& sink
->sink_signal
== SIGNAL_TYPE_DISPLAY_PORT_MST
)
875 if (aconnector
->dc_sink
== sink
) {
876 /* We got a DP short pulse (Link Loss, DP CTS, etc...).
878 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
879 aconnector
->connector_id
);
883 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
884 aconnector
->connector_id
, aconnector
->dc_sink
, sink
);
886 mutex_lock(&dev
->mode_config
.mutex
);
888 /* 1. Update status of the drm connector
889 * 2. Send an event and let userspace tell us what to do */
891 /* TODO: check if we still need the S3 mode update workaround.
892 * If yes, put it here. */
893 if (aconnector
->dc_sink
)
894 amdgpu_dm_remove_sink_from_freesync_module(
897 aconnector
->dc_sink
= sink
;
898 if (sink
->dc_edid
.length
== 0) {
899 aconnector
->edid
= NULL
;
902 (struct edid
*) sink
->dc_edid
.raw_edid
;
905 drm_mode_connector_update_edid_property(connector
,
908 amdgpu_dm_add_sink_to_freesync_module(connector
, aconnector
->edid
);
911 amdgpu_dm_remove_sink_from_freesync_module(connector
);
912 drm_mode_connector_update_edid_property(connector
, NULL
);
913 aconnector
->num_modes
= 0;
914 aconnector
->dc_sink
= NULL
;
915 aconnector
->edid
= NULL
;
918 mutex_unlock(&dev
->mode_config
.mutex
);
921 static void handle_hpd_irq(void *param
)
923 struct amdgpu_dm_connector
*aconnector
= (struct amdgpu_dm_connector
*)param
;
924 struct drm_connector
*connector
= &aconnector
->base
;
925 struct drm_device
*dev
= connector
->dev
;
927 /* In case of failure or MST no need to update connector status or notify the OS
928 * since (for MST case) MST does this in it's own context.
930 mutex_lock(&aconnector
->hpd_lock
);
932 if (aconnector
->fake_enable
)
933 aconnector
->fake_enable
= false;
935 if (dc_link_detect(aconnector
->dc_link
, DETECT_REASON_HPD
)) {
936 amdgpu_dm_update_connector_after_detect(aconnector
);
939 drm_modeset_lock_all(dev
);
940 dm_restore_drm_connector_state(dev
, connector
);
941 drm_modeset_unlock_all(dev
);
943 if (aconnector
->base
.force
== DRM_FORCE_UNSPECIFIED
)
944 drm_kms_helper_hotplug_event(dev
);
946 mutex_unlock(&aconnector
->hpd_lock
);
950 static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector
*aconnector
)
952 uint8_t esi
[DP_PSR_ERROR_STATUS
- DP_SINK_COUNT_ESI
] = { 0 };
954 bool new_irq_handled
= false;
956 int dpcd_bytes_to_read
;
958 const int max_process_count
= 30;
959 int process_count
= 0;
961 const struct dc_link_status
*link_status
= dc_link_get_status(aconnector
->dc_link
);
963 if (link_status
->dpcd_caps
->dpcd_rev
.raw
< 0x12) {
964 dpcd_bytes_to_read
= DP_LANE0_1_STATUS
- DP_SINK_COUNT
;
965 /* DPCD 0x200 - 0x201 for downstream IRQ */
966 dpcd_addr
= DP_SINK_COUNT
;
968 dpcd_bytes_to_read
= DP_PSR_ERROR_STATUS
- DP_SINK_COUNT_ESI
;
969 /* DPCD 0x2002 - 0x2005 for downstream IRQ */
970 dpcd_addr
= DP_SINK_COUNT_ESI
;
973 dret
= drm_dp_dpcd_read(
974 &aconnector
->dm_dp_aux
.aux
,
979 while (dret
== dpcd_bytes_to_read
&&
980 process_count
< max_process_count
) {
986 DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi
[0], esi
[1], esi
[2]);
987 /* handle HPD short pulse irq */
988 if (aconnector
->mst_mgr
.mst_state
)
990 &aconnector
->mst_mgr
,
994 if (new_irq_handled
) {
995 /* ACK at DPCD to notify down stream */
996 const int ack_dpcd_bytes_to_write
=
997 dpcd_bytes_to_read
- 1;
999 for (retry
= 0; retry
< 3; retry
++) {
1002 wret
= drm_dp_dpcd_write(
1003 &aconnector
->dm_dp_aux
.aux
,
1006 ack_dpcd_bytes_to_write
);
1007 if (wret
== ack_dpcd_bytes_to_write
)
1011 /* check if there is new irq to be handle */
1012 dret
= drm_dp_dpcd_read(
1013 &aconnector
->dm_dp_aux
.aux
,
1016 dpcd_bytes_to_read
);
1018 new_irq_handled
= false;
1024 if (process_count
== max_process_count
)
1025 DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
1028 static void handle_hpd_rx_irq(void *param
)
1030 struct amdgpu_dm_connector
*aconnector
= (struct amdgpu_dm_connector
*)param
;
1031 struct drm_connector
*connector
= &aconnector
->base
;
1032 struct drm_device
*dev
= connector
->dev
;
1033 struct dc_link
*dc_link
= aconnector
->dc_link
;
1034 bool is_mst_root_connector
= aconnector
->mst_mgr
.mst_state
;
1036 /* TODO:Temporary add mutex to protect hpd interrupt not have a gpio
1037 * conflict, after implement i2c helper, this mutex should be
1040 if (dc_link
->type
!= dc_connection_mst_branch
)
1041 mutex_lock(&aconnector
->hpd_lock
);
1043 if (dc_link_handle_hpd_rx_irq(dc_link
, NULL
) &&
1044 !is_mst_root_connector
) {
1045 /* Downstream Port status changed. */
1046 if (dc_link_detect(dc_link
, DETECT_REASON_HPDRX
)) {
1048 if (aconnector
->fake_enable
)
1049 aconnector
->fake_enable
= false;
1051 amdgpu_dm_update_connector_after_detect(aconnector
);
1054 drm_modeset_lock_all(dev
);
1055 dm_restore_drm_connector_state(dev
, connector
);
1056 drm_modeset_unlock_all(dev
);
1058 drm_kms_helper_hotplug_event(dev
);
1061 if ((dc_link
->cur_link_settings
.lane_count
!= LANE_COUNT_UNKNOWN
) ||
1062 (dc_link
->type
== dc_connection_mst_branch
))
1063 dm_handle_hpd_rx_irq(aconnector
);
1065 if (dc_link
->type
!= dc_connection_mst_branch
)
1066 mutex_unlock(&aconnector
->hpd_lock
);
1069 static void register_hpd_handlers(struct amdgpu_device
*adev
)
1071 struct drm_device
*dev
= adev
->ddev
;
1072 struct drm_connector
*connector
;
1073 struct amdgpu_dm_connector
*aconnector
;
1074 const struct dc_link
*dc_link
;
1075 struct dc_interrupt_params int_params
= {0};
1077 int_params
.requested_polarity
= INTERRUPT_POLARITY_DEFAULT
;
1078 int_params
.current_polarity
= INTERRUPT_POLARITY_DEFAULT
;
1080 list_for_each_entry(connector
,
1081 &dev
->mode_config
.connector_list
, head
) {
1083 aconnector
= to_amdgpu_dm_connector(connector
);
1084 dc_link
= aconnector
->dc_link
;
1086 if (DC_IRQ_SOURCE_INVALID
!= dc_link
->irq_source_hpd
) {
1087 int_params
.int_context
= INTERRUPT_LOW_IRQ_CONTEXT
;
1088 int_params
.irq_source
= dc_link
->irq_source_hpd
;
1090 amdgpu_dm_irq_register_interrupt(adev
, &int_params
,
1092 (void *) aconnector
);
1095 if (DC_IRQ_SOURCE_INVALID
!= dc_link
->irq_source_hpd_rx
) {
1097 /* Also register for DP short pulse (hpd_rx). */
1098 int_params
.int_context
= INTERRUPT_LOW_IRQ_CONTEXT
;
1099 int_params
.irq_source
= dc_link
->irq_source_hpd_rx
;
1101 amdgpu_dm_irq_register_interrupt(adev
, &int_params
,
1103 (void *) aconnector
);
1108 /* Register IRQ sources and initialize IRQ callbacks */
1109 static int dce110_register_irq_handlers(struct amdgpu_device
*adev
)
1111 struct dc
*dc
= adev
->dm
.dc
;
1112 struct common_irq_params
*c_irq_params
;
1113 struct dc_interrupt_params int_params
= {0};
1116 unsigned client_id
= AMDGPU_IH_CLIENTID_LEGACY
;
1118 if (adev
->asic_type
== CHIP_VEGA10
||
1119 adev
->asic_type
== CHIP_VEGA12
||
1120 adev
->asic_type
== CHIP_VEGA20
||
1121 adev
->asic_type
== CHIP_RAVEN
)
1122 client_id
= SOC15_IH_CLIENTID_DCE
;
1124 int_params
.requested_polarity
= INTERRUPT_POLARITY_DEFAULT
;
1125 int_params
.current_polarity
= INTERRUPT_POLARITY_DEFAULT
;
1127 /* Actions of amdgpu_irq_add_id():
1128 * 1. Register a set() function with base driver.
1129 * Base driver will call set() function to enable/disable an
1130 * interrupt in DC hardware.
1131 * 2. Register amdgpu_dm_irq_handler().
1132 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
1133 * coming from DC hardware.
1134 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
1135 * for acknowledging and handling. */
1137 /* Use VBLANK interrupt */
1138 for (i
= VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0
; i
<= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0
; i
++) {
1139 r
= amdgpu_irq_add_id(adev
, client_id
, i
, &adev
->crtc_irq
);
1141 DRM_ERROR("Failed to add crtc irq id!\n");
1145 int_params
.int_context
= INTERRUPT_HIGH_IRQ_CONTEXT
;
1146 int_params
.irq_source
=
1147 dc_interrupt_to_irq_source(dc
, i
, 0);
1149 c_irq_params
= &adev
->dm
.vblank_params
[int_params
.irq_source
- DC_IRQ_SOURCE_VBLANK1
];
1151 c_irq_params
->adev
= adev
;
1152 c_irq_params
->irq_src
= int_params
.irq_source
;
1154 amdgpu_dm_irq_register_interrupt(adev
, &int_params
,
1155 dm_crtc_high_irq
, c_irq_params
);
1158 /* Use GRPH_PFLIP interrupt */
1159 for (i
= VISLANDS30_IV_SRCID_D1_GRPH_PFLIP
;
1160 i
<= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP
; i
+= 2) {
1161 r
= amdgpu_irq_add_id(adev
, client_id
, i
, &adev
->pageflip_irq
);
1163 DRM_ERROR("Failed to add page flip irq id!\n");
1167 int_params
.int_context
= INTERRUPT_HIGH_IRQ_CONTEXT
;
1168 int_params
.irq_source
=
1169 dc_interrupt_to_irq_source(dc
, i
, 0);
1171 c_irq_params
= &adev
->dm
.pflip_params
[int_params
.irq_source
- DC_IRQ_SOURCE_PFLIP_FIRST
];
1173 c_irq_params
->adev
= adev
;
1174 c_irq_params
->irq_src
= int_params
.irq_source
;
1176 amdgpu_dm_irq_register_interrupt(adev
, &int_params
,
1177 dm_pflip_high_irq
, c_irq_params
);
1182 r
= amdgpu_irq_add_id(adev
, client_id
,
1183 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A
, &adev
->hpd_irq
);
1185 DRM_ERROR("Failed to add hpd irq id!\n");
1189 register_hpd_handlers(adev
);
1194 #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
1195 /* Register IRQ sources and initialize IRQ callbacks */
1196 static int dcn10_register_irq_handlers(struct amdgpu_device
*adev
)
1198 struct dc
*dc
= adev
->dm
.dc
;
1199 struct common_irq_params
*c_irq_params
;
1200 struct dc_interrupt_params int_params
= {0};
1204 int_params
.requested_polarity
= INTERRUPT_POLARITY_DEFAULT
;
1205 int_params
.current_polarity
= INTERRUPT_POLARITY_DEFAULT
;
1207 /* Actions of amdgpu_irq_add_id():
1208 * 1. Register a set() function with base driver.
1209 * Base driver will call set() function to enable/disable an
1210 * interrupt in DC hardware.
1211 * 2. Register amdgpu_dm_irq_handler().
1212 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
1213 * coming from DC hardware.
1214 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
1215 * for acknowledging and handling.
1218 /* Use VSTARTUP interrupt */
1219 for (i
= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP
;
1220 i
<= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP
+ adev
->mode_info
.num_crtc
- 1;
1222 r
= amdgpu_irq_add_id(adev
, SOC15_IH_CLIENTID_DCE
, i
, &adev
->crtc_irq
);
1225 DRM_ERROR("Failed to add crtc irq id!\n");
1229 int_params
.int_context
= INTERRUPT_HIGH_IRQ_CONTEXT
;
1230 int_params
.irq_source
=
1231 dc_interrupt_to_irq_source(dc
, i
, 0);
1233 c_irq_params
= &adev
->dm
.vblank_params
[int_params
.irq_source
- DC_IRQ_SOURCE_VBLANK1
];
1235 c_irq_params
->adev
= adev
;
1236 c_irq_params
->irq_src
= int_params
.irq_source
;
1238 amdgpu_dm_irq_register_interrupt(adev
, &int_params
,
1239 dm_crtc_high_irq
, c_irq_params
);
1242 /* Use GRPH_PFLIP interrupt */
1243 for (i
= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT
;
1244 i
<= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT
+ adev
->mode_info
.num_crtc
- 1;
1246 r
= amdgpu_irq_add_id(adev
, SOC15_IH_CLIENTID_DCE
, i
, &adev
->pageflip_irq
);
1248 DRM_ERROR("Failed to add page flip irq id!\n");
1252 int_params
.int_context
= INTERRUPT_HIGH_IRQ_CONTEXT
;
1253 int_params
.irq_source
=
1254 dc_interrupt_to_irq_source(dc
, i
, 0);
1256 c_irq_params
= &adev
->dm
.pflip_params
[int_params
.irq_source
- DC_IRQ_SOURCE_PFLIP_FIRST
];
1258 c_irq_params
->adev
= adev
;
1259 c_irq_params
->irq_src
= int_params
.irq_source
;
1261 amdgpu_dm_irq_register_interrupt(adev
, &int_params
,
1262 dm_pflip_high_irq
, c_irq_params
);
1267 r
= amdgpu_irq_add_id(adev
, SOC15_IH_CLIENTID_DCE
, DCN_1_0__SRCID__DC_HPD1_INT
,
1270 DRM_ERROR("Failed to add hpd irq id!\n");
1274 register_hpd_handlers(adev
);
1280 static int amdgpu_dm_mode_config_init(struct amdgpu_device
*adev
)
1284 adev
->mode_info
.mode_config_initialized
= true;
1286 adev
->ddev
->mode_config
.funcs
= (void *)&amdgpu_dm_mode_funcs
;
1287 adev
->ddev
->mode_config
.helper_private
= &amdgpu_dm_mode_config_helperfuncs
;
1289 adev
->ddev
->mode_config
.max_width
= 16384;
1290 adev
->ddev
->mode_config
.max_height
= 16384;
1292 adev
->ddev
->mode_config
.preferred_depth
= 24;
1293 adev
->ddev
->mode_config
.prefer_shadow
= 1;
1294 /* indicate support of immediate flip */
1295 adev
->ddev
->mode_config
.async_page_flip
= true;
1297 adev
->ddev
->mode_config
.fb_base
= adev
->gmc
.aper_base
;
1299 r
= amdgpu_display_modeset_create_props(adev
);
1306 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
1307 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
1309 static int amdgpu_dm_backlight_update_status(struct backlight_device
*bd
)
1311 struct amdgpu_display_manager
*dm
= bl_get_data(bd
);
1313 if (dc_link_set_backlight_level(dm
->backlight_link
,
1314 bd
->props
.brightness
, 0, 0))
1320 static int amdgpu_dm_backlight_get_brightness(struct backlight_device
*bd
)
1322 return bd
->props
.brightness
;
1325 static const struct backlight_ops amdgpu_dm_backlight_ops
= {
1326 .get_brightness
= amdgpu_dm_backlight_get_brightness
,
1327 .update_status
= amdgpu_dm_backlight_update_status
,
1331 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager
*dm
)
1334 struct backlight_properties props
= { 0 };
1336 props
.max_brightness
= AMDGPU_MAX_BL_LEVEL
;
1337 props
.type
= BACKLIGHT_RAW
;
1339 snprintf(bl_name
, sizeof(bl_name
), "amdgpu_bl%d",
1340 dm
->adev
->ddev
->primary
->index
);
1342 dm
->backlight_dev
= backlight_device_register(bl_name
,
1343 dm
->adev
->ddev
->dev
,
1345 &amdgpu_dm_backlight_ops
,
1348 if (IS_ERR(dm
->backlight_dev
))
1349 DRM_ERROR("DM: Backlight registration failed!\n");
1351 DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name
);
1356 static int initialize_plane(struct amdgpu_display_manager
*dm
,
1357 struct amdgpu_mode_info
*mode_info
,
1360 struct amdgpu_plane
*plane
;
1361 unsigned long possible_crtcs
;
1364 plane
= kzalloc(sizeof(struct amdgpu_plane
), GFP_KERNEL
);
1365 mode_info
->planes
[plane_id
] = plane
;
1368 DRM_ERROR("KMS: Failed to allocate plane\n");
1371 plane
->base
.type
= mode_info
->plane_type
[plane_id
];
1374 * HACK: IGT tests expect that each plane can only have one
1375 * one possible CRTC. For now, set one CRTC for each
1376 * plane that is not an underlay, but still allow multiple
1377 * CRTCs for underlay planes.
1379 possible_crtcs
= 1 << plane_id
;
1380 if (plane_id
>= dm
->dc
->caps
.max_streams
)
1381 possible_crtcs
= 0xff;
1383 ret
= amdgpu_dm_plane_init(dm
, mode_info
->planes
[plane_id
], possible_crtcs
);
1386 DRM_ERROR("KMS: Failed to initialize plane\n");
1394 static void register_backlight_device(struct amdgpu_display_manager
*dm
,
1395 struct dc_link
*link
)
1397 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
1398 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
1400 if ((link
->connector_signal
& (SIGNAL_TYPE_EDP
| SIGNAL_TYPE_LVDS
)) &&
1401 link
->type
!= dc_connection_none
) {
1402 /* Event if registration failed, we should continue with
1403 * DM initialization because not having a backlight control
1404 * is better then a black screen.
1406 amdgpu_dm_register_backlight_device(dm
);
1408 if (dm
->backlight_dev
)
1409 dm
->backlight_link
= link
;
1415 /* In this architecture, the association
1416 * connector -> encoder -> crtc
1417 * id not really requried. The crtc and connector will hold the
1418 * display_index as an abstraction to use with DAL component
1420 * Returns 0 on success
1422 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device
*adev
)
1424 struct amdgpu_display_manager
*dm
= &adev
->dm
;
1426 struct amdgpu_dm_connector
*aconnector
= NULL
;
1427 struct amdgpu_encoder
*aencoder
= NULL
;
1428 struct amdgpu_mode_info
*mode_info
= &adev
->mode_info
;
1430 int32_t total_overlay_planes
, total_primary_planes
;
1432 link_cnt
= dm
->dc
->caps
.max_links
;
1433 if (amdgpu_dm_mode_config_init(dm
->adev
)) {
1434 DRM_ERROR("DM: Failed to initialize mode config\n");
1438 /* Identify the number of planes to be initialized */
1439 total_overlay_planes
= dm
->dc
->caps
.max_slave_planes
;
1440 total_primary_planes
= dm
->dc
->caps
.max_planes
- dm
->dc
->caps
.max_slave_planes
;
1442 /* First initialize overlay planes, index starting after primary planes */
1443 for (i
= (total_overlay_planes
- 1); i
>= 0; i
--) {
1444 if (initialize_plane(dm
, mode_info
, (total_primary_planes
+ i
))) {
1445 DRM_ERROR("KMS: Failed to initialize overlay plane\n");
1450 /* Initialize primary planes */
1451 for (i
= (total_primary_planes
- 1); i
>= 0; i
--) {
1452 if (initialize_plane(dm
, mode_info
, i
)) {
1453 DRM_ERROR("KMS: Failed to initialize primary plane\n");
1458 for (i
= 0; i
< dm
->dc
->caps
.max_streams
; i
++)
1459 if (amdgpu_dm_crtc_init(dm
, &mode_info
->planes
[i
]->base
, i
)) {
1460 DRM_ERROR("KMS: Failed to initialize crtc\n");
1464 dm
->display_indexes_num
= dm
->dc
->caps
.max_streams
;
1466 /* loops over all connectors on the board */
1467 for (i
= 0; i
< link_cnt
; i
++) {
1468 struct dc_link
*link
= NULL
;
1470 if (i
> AMDGPU_DM_MAX_DISPLAY_INDEX
) {
1472 "KMS: Cannot support more than %d display indexes\n",
1473 AMDGPU_DM_MAX_DISPLAY_INDEX
);
1477 aconnector
= kzalloc(sizeof(*aconnector
), GFP_KERNEL
);
1481 aencoder
= kzalloc(sizeof(*aencoder
), GFP_KERNEL
);
1485 if (amdgpu_dm_encoder_init(dm
->ddev
, aencoder
, i
)) {
1486 DRM_ERROR("KMS: Failed to initialize encoder\n");
1490 if (amdgpu_dm_connector_init(dm
, aconnector
, i
, aencoder
)) {
1491 DRM_ERROR("KMS: Failed to initialize connector\n");
1495 link
= dc_get_link_at_index(dm
->dc
, i
);
1497 if (dc_link_detect(link
, DETECT_REASON_BOOT
)) {
1498 amdgpu_dm_update_connector_after_detect(aconnector
);
1499 register_backlight_device(dm
, link
);
1505 /* Software is initialized. Now we can register interrupt handlers. */
1506 switch (adev
->asic_type
) {
1516 case CHIP_POLARIS11
:
1517 case CHIP_POLARIS10
:
1518 case CHIP_POLARIS12
:
1523 if (dce110_register_irq_handlers(dm
->adev
)) {
1524 DRM_ERROR("DM: Failed to initialize IRQ\n");
1528 #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
1530 if (dcn10_register_irq_handlers(dm
->adev
)) {
1531 DRM_ERROR("DM: Failed to initialize IRQ\n");
1535 * Temporary disable until pplib/smu interaction is implemented
1537 dm
->dc
->debug
.disable_stutter
= true;
1541 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev
->asic_type
);
1549 for (i
= 0; i
< dm
->dc
->caps
.max_planes
; i
++)
1550 kfree(mode_info
->planes
[i
]);
1554 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager
*dm
)
1556 drm_mode_config_cleanup(dm
->ddev
);
1560 /******************************************************************************
1561 * amdgpu_display_funcs functions
1562 *****************************************************************************/
1565 * dm_bandwidth_update - program display watermarks
1567 * @adev: amdgpu_device pointer
1569 * Calculate and program the display watermarks and line buffer allocation.
1571 static void dm_bandwidth_update(struct amdgpu_device
*adev
)
1573 /* TODO: implement later */
1576 static void dm_set_backlight_level(struct amdgpu_encoder
*amdgpu_encoder
,
1579 /* TODO: translate amdgpu_encoder to display_index and call DAL */
1582 static u8
dm_get_backlight_level(struct amdgpu_encoder
*amdgpu_encoder
)
1584 /* TODO: translate amdgpu_encoder to display_index and call DAL */
1588 static int amdgpu_notify_freesync(struct drm_device
*dev
, void *data
,
1589 struct drm_file
*filp
)
1591 struct mod_freesync_params freesync_params
;
1592 uint8_t num_streams
;
1595 struct amdgpu_device
*adev
= dev
->dev_private
;
1598 /* Get freesync enable flag from DRM */
1600 num_streams
= dc_get_current_stream_count(adev
->dm
.dc
);
1602 for (i
= 0; i
< num_streams
; i
++) {
1603 struct dc_stream_state
*stream
;
1604 stream
= dc_get_stream_at_index(adev
->dm
.dc
, i
);
1606 mod_freesync_update_state(adev
->dm
.freesync_module
,
1607 &stream
, 1, &freesync_params
);
1613 static const struct amdgpu_display_funcs dm_display_funcs
= {
1614 .bandwidth_update
= dm_bandwidth_update
, /* called unconditionally */
1615 .vblank_get_counter
= dm_vblank_get_counter
,/* called unconditionally */
1616 .backlight_set_level
=
1617 dm_set_backlight_level
,/* called unconditionally */
1618 .backlight_get_level
=
1619 dm_get_backlight_level
,/* called unconditionally */
1620 .hpd_sense
= NULL
,/* called unconditionally */
1621 .hpd_set_polarity
= NULL
, /* called unconditionally */
1622 .hpd_get_gpio_reg
= NULL
, /* VBIOS parsing. DAL does it. */
1623 .page_flip_get_scanoutpos
=
1624 dm_crtc_get_scanoutpos
,/* called unconditionally */
1625 .add_encoder
= NULL
, /* VBIOS parsing. DAL does it. */
1626 .add_connector
= NULL
, /* VBIOS parsing. DAL does it. */
1627 .notify_freesync
= amdgpu_notify_freesync
,
1631 #if defined(CONFIG_DEBUG_KERNEL_DC)
1633 static ssize_t
s3_debug_store(struct device
*device
,
1634 struct device_attribute
*attr
,
1640 struct pci_dev
*pdev
= to_pci_dev(device
);
1641 struct drm_device
*drm_dev
= pci_get_drvdata(pdev
);
1642 struct amdgpu_device
*adev
= drm_dev
->dev_private
;
1644 ret
= kstrtoint(buf
, 0, &s3_state
);
1649 drm_kms_helper_hotplug_event(adev
->ddev
);
1654 return ret
== 0 ? count
: 0;
1657 DEVICE_ATTR_WO(s3_debug
);
1661 static int dm_early_init(void *handle
)
1663 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1665 switch (adev
->asic_type
) {
1668 adev
->mode_info
.num_crtc
= 6;
1669 adev
->mode_info
.num_hpd
= 6;
1670 adev
->mode_info
.num_dig
= 6;
1671 adev
->mode_info
.plane_type
= dm_plane_type_default
;
1674 adev
->mode_info
.num_crtc
= 4;
1675 adev
->mode_info
.num_hpd
= 6;
1676 adev
->mode_info
.num_dig
= 7;
1677 adev
->mode_info
.plane_type
= dm_plane_type_default
;
1681 adev
->mode_info
.num_crtc
= 2;
1682 adev
->mode_info
.num_hpd
= 6;
1683 adev
->mode_info
.num_dig
= 6;
1684 adev
->mode_info
.plane_type
= dm_plane_type_default
;
1688 adev
->mode_info
.num_crtc
= 6;
1689 adev
->mode_info
.num_hpd
= 6;
1690 adev
->mode_info
.num_dig
= 7;
1691 adev
->mode_info
.plane_type
= dm_plane_type_default
;
1694 adev
->mode_info
.num_crtc
= 3;
1695 adev
->mode_info
.num_hpd
= 6;
1696 adev
->mode_info
.num_dig
= 9;
1697 adev
->mode_info
.plane_type
= dm_plane_type_carizzo
;
1700 adev
->mode_info
.num_crtc
= 2;
1701 adev
->mode_info
.num_hpd
= 6;
1702 adev
->mode_info
.num_dig
= 9;
1703 adev
->mode_info
.plane_type
= dm_plane_type_stoney
;
1705 case CHIP_POLARIS11
:
1706 case CHIP_POLARIS12
:
1707 adev
->mode_info
.num_crtc
= 5;
1708 adev
->mode_info
.num_hpd
= 5;
1709 adev
->mode_info
.num_dig
= 5;
1710 adev
->mode_info
.plane_type
= dm_plane_type_default
;
1712 case CHIP_POLARIS10
:
1714 adev
->mode_info
.num_crtc
= 6;
1715 adev
->mode_info
.num_hpd
= 6;
1716 adev
->mode_info
.num_dig
= 6;
1717 adev
->mode_info
.plane_type
= dm_plane_type_default
;
1722 adev
->mode_info
.num_crtc
= 6;
1723 adev
->mode_info
.num_hpd
= 6;
1724 adev
->mode_info
.num_dig
= 6;
1725 adev
->mode_info
.plane_type
= dm_plane_type_default
;
1727 #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
1729 adev
->mode_info
.num_crtc
= 4;
1730 adev
->mode_info
.num_hpd
= 4;
1731 adev
->mode_info
.num_dig
= 4;
1732 adev
->mode_info
.plane_type
= dm_plane_type_default
;
1736 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev
->asic_type
);
1740 amdgpu_dm_set_irq_funcs(adev
);
1742 if (adev
->mode_info
.funcs
== NULL
)
1743 adev
->mode_info
.funcs
= &dm_display_funcs
;
1745 /* Note: Do NOT change adev->audio_endpt_rreg and
1746 * adev->audio_endpt_wreg because they are initialised in
1747 * amdgpu_device_init() */
1748 #if defined(CONFIG_DEBUG_KERNEL_DC)
1751 &dev_attr_s3_debug
);
1757 static bool modeset_required(struct drm_crtc_state
*crtc_state
,
1758 struct dc_stream_state
*new_stream
,
1759 struct dc_stream_state
*old_stream
)
1761 if (!drm_atomic_crtc_needs_modeset(crtc_state
))
1764 if (!crtc_state
->enable
)
1767 return crtc_state
->active
;
1770 static bool modereset_required(struct drm_crtc_state
*crtc_state
)
1772 if (!drm_atomic_crtc_needs_modeset(crtc_state
))
1775 return !crtc_state
->enable
|| !crtc_state
->active
;
1778 static void amdgpu_dm_encoder_destroy(struct drm_encoder
*encoder
)
1780 drm_encoder_cleanup(encoder
);
1784 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs
= {
1785 .destroy
= amdgpu_dm_encoder_destroy
,
1788 static bool fill_rects_from_plane_state(const struct drm_plane_state
*state
,
1789 struct dc_plane_state
*plane_state
)
1791 plane_state
->src_rect
.x
= state
->src_x
>> 16;
1792 plane_state
->src_rect
.y
= state
->src_y
>> 16;
1793 /*we ignore for now mantissa and do not to deal with floating pixels :(*/
1794 plane_state
->src_rect
.width
= state
->src_w
>> 16;
1796 if (plane_state
->src_rect
.width
== 0)
1799 plane_state
->src_rect
.height
= state
->src_h
>> 16;
1800 if (plane_state
->src_rect
.height
== 0)
1803 plane_state
->dst_rect
.x
= state
->crtc_x
;
1804 plane_state
->dst_rect
.y
= state
->crtc_y
;
1806 if (state
->crtc_w
== 0)
1809 plane_state
->dst_rect
.width
= state
->crtc_w
;
1811 if (state
->crtc_h
== 0)
1814 plane_state
->dst_rect
.height
= state
->crtc_h
;
1816 plane_state
->clip_rect
= plane_state
->dst_rect
;
1818 switch (state
->rotation
& DRM_MODE_ROTATE_MASK
) {
1819 case DRM_MODE_ROTATE_0
:
1820 plane_state
->rotation
= ROTATION_ANGLE_0
;
1822 case DRM_MODE_ROTATE_90
:
1823 plane_state
->rotation
= ROTATION_ANGLE_90
;
1825 case DRM_MODE_ROTATE_180
:
1826 plane_state
->rotation
= ROTATION_ANGLE_180
;
1828 case DRM_MODE_ROTATE_270
:
1829 plane_state
->rotation
= ROTATION_ANGLE_270
;
1832 plane_state
->rotation
= ROTATION_ANGLE_0
;
1838 static int get_fb_info(const struct amdgpu_framebuffer
*amdgpu_fb
,
1839 uint64_t *tiling_flags
)
1841 struct amdgpu_bo
*rbo
= gem_to_amdgpu_bo(amdgpu_fb
->base
.obj
[0]);
1842 int r
= amdgpu_bo_reserve(rbo
, false);
1845 // Don't show error msg. when return -ERESTARTSYS
1846 if (r
!= -ERESTARTSYS
)
1847 DRM_ERROR("Unable to reserve buffer: %d\n", r
);
1852 amdgpu_bo_get_tiling_flags(rbo
, tiling_flags
);
1854 amdgpu_bo_unreserve(rbo
);
1859 static int fill_plane_attributes_from_fb(struct amdgpu_device
*adev
,
1860 struct dc_plane_state
*plane_state
,
1861 const struct amdgpu_framebuffer
*amdgpu_fb
)
1863 uint64_t tiling_flags
;
1864 unsigned int awidth
;
1865 const struct drm_framebuffer
*fb
= &amdgpu_fb
->base
;
1867 struct drm_format_name_buf format_name
;
1876 switch (fb
->format
->format
) {
1878 plane_state
->format
= SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS
;
1880 case DRM_FORMAT_RGB565
:
1881 plane_state
->format
= SURFACE_PIXEL_FORMAT_GRPH_RGB565
;
1883 case DRM_FORMAT_XRGB8888
:
1884 case DRM_FORMAT_ARGB8888
:
1885 plane_state
->format
= SURFACE_PIXEL_FORMAT_GRPH_ARGB8888
;
1887 case DRM_FORMAT_XRGB2101010
:
1888 case DRM_FORMAT_ARGB2101010
:
1889 plane_state
->format
= SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010
;
1891 case DRM_FORMAT_XBGR2101010
:
1892 case DRM_FORMAT_ABGR2101010
:
1893 plane_state
->format
= SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010
;
1895 case DRM_FORMAT_NV21
:
1896 plane_state
->format
= SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr
;
1898 case DRM_FORMAT_NV12
:
1899 plane_state
->format
= SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb
;
1902 DRM_ERROR("Unsupported screen format %s\n",
1903 drm_get_format_name(fb
->format
->format
, &format_name
));
1907 if (plane_state
->format
< SURFACE_PIXEL_FORMAT_VIDEO_BEGIN
) {
1908 plane_state
->address
.type
= PLN_ADDR_TYPE_GRAPHICS
;
1909 plane_state
->plane_size
.grph
.surface_size
.x
= 0;
1910 plane_state
->plane_size
.grph
.surface_size
.y
= 0;
1911 plane_state
->plane_size
.grph
.surface_size
.width
= fb
->width
;
1912 plane_state
->plane_size
.grph
.surface_size
.height
= fb
->height
;
1913 plane_state
->plane_size
.grph
.surface_pitch
=
1914 fb
->pitches
[0] / fb
->format
->cpp
[0];
1915 /* TODO: unhardcode */
1916 plane_state
->color_space
= COLOR_SPACE_SRGB
;
1919 awidth
= ALIGN(fb
->width
, 64);
1920 plane_state
->address
.type
= PLN_ADDR_TYPE_VIDEO_PROGRESSIVE
;
1921 plane_state
->plane_size
.video
.luma_size
.x
= 0;
1922 plane_state
->plane_size
.video
.luma_size
.y
= 0;
1923 plane_state
->plane_size
.video
.luma_size
.width
= awidth
;
1924 plane_state
->plane_size
.video
.luma_size
.height
= fb
->height
;
1925 /* TODO: unhardcode */
1926 plane_state
->plane_size
.video
.luma_pitch
= awidth
;
1928 plane_state
->plane_size
.video
.chroma_size
.x
= 0;
1929 plane_state
->plane_size
.video
.chroma_size
.y
= 0;
1930 plane_state
->plane_size
.video
.chroma_size
.width
= awidth
;
1931 plane_state
->plane_size
.video
.chroma_size
.height
= fb
->height
;
1932 plane_state
->plane_size
.video
.chroma_pitch
= awidth
/ 2;
1934 /* TODO: unhardcode */
1935 plane_state
->color_space
= COLOR_SPACE_YCBCR709
;
1938 memset(&plane_state
->tiling_info
, 0, sizeof(plane_state
->tiling_info
));
1940 /* Fill GFX8 params */
1941 if (AMDGPU_TILING_GET(tiling_flags
, ARRAY_MODE
) == DC_ARRAY_2D_TILED_THIN1
) {
1942 unsigned int bankw
, bankh
, mtaspect
, tile_split
, num_banks
;
1944 bankw
= AMDGPU_TILING_GET(tiling_flags
, BANK_WIDTH
);
1945 bankh
= AMDGPU_TILING_GET(tiling_flags
, BANK_HEIGHT
);
1946 mtaspect
= AMDGPU_TILING_GET(tiling_flags
, MACRO_TILE_ASPECT
);
1947 tile_split
= AMDGPU_TILING_GET(tiling_flags
, TILE_SPLIT
);
1948 num_banks
= AMDGPU_TILING_GET(tiling_flags
, NUM_BANKS
);
1950 /* XXX fix me for VI */
1951 plane_state
->tiling_info
.gfx8
.num_banks
= num_banks
;
1952 plane_state
->tiling_info
.gfx8
.array_mode
=
1953 DC_ARRAY_2D_TILED_THIN1
;
1954 plane_state
->tiling_info
.gfx8
.tile_split
= tile_split
;
1955 plane_state
->tiling_info
.gfx8
.bank_width
= bankw
;
1956 plane_state
->tiling_info
.gfx8
.bank_height
= bankh
;
1957 plane_state
->tiling_info
.gfx8
.tile_aspect
= mtaspect
;
1958 plane_state
->tiling_info
.gfx8
.tile_mode
=
1959 DC_ADDR_SURF_MICRO_TILING_DISPLAY
;
1960 } else if (AMDGPU_TILING_GET(tiling_flags
, ARRAY_MODE
)
1961 == DC_ARRAY_1D_TILED_THIN1
) {
1962 plane_state
->tiling_info
.gfx8
.array_mode
= DC_ARRAY_1D_TILED_THIN1
;
1965 plane_state
->tiling_info
.gfx8
.pipe_config
=
1966 AMDGPU_TILING_GET(tiling_flags
, PIPE_CONFIG
);
1968 if (adev
->asic_type
== CHIP_VEGA10
||
1969 adev
->asic_type
== CHIP_VEGA12
||
1970 adev
->asic_type
== CHIP_VEGA20
||
1971 adev
->asic_type
== CHIP_RAVEN
) {
1972 /* Fill GFX9 params */
1973 plane_state
->tiling_info
.gfx9
.num_pipes
=
1974 adev
->gfx
.config
.gb_addr_config_fields
.num_pipes
;
1975 plane_state
->tiling_info
.gfx9
.num_banks
=
1976 adev
->gfx
.config
.gb_addr_config_fields
.num_banks
;
1977 plane_state
->tiling_info
.gfx9
.pipe_interleave
=
1978 adev
->gfx
.config
.gb_addr_config_fields
.pipe_interleave_size
;
1979 plane_state
->tiling_info
.gfx9
.num_shader_engines
=
1980 adev
->gfx
.config
.gb_addr_config_fields
.num_se
;
1981 plane_state
->tiling_info
.gfx9
.max_compressed_frags
=
1982 adev
->gfx
.config
.gb_addr_config_fields
.max_compress_frags
;
1983 plane_state
->tiling_info
.gfx9
.num_rb_per_se
=
1984 adev
->gfx
.config
.gb_addr_config_fields
.num_rb_per_se
;
1985 plane_state
->tiling_info
.gfx9
.swizzle
=
1986 AMDGPU_TILING_GET(tiling_flags
, SWIZZLE_MODE
);
1987 plane_state
->tiling_info
.gfx9
.shaderEnable
= 1;
1990 plane_state
->visible
= true;
1991 plane_state
->scaling_quality
.h_taps_c
= 0;
1992 plane_state
->scaling_quality
.v_taps_c
= 0;
1994 /* is this needed? is plane_state zeroed at allocation? */
1995 plane_state
->scaling_quality
.h_taps
= 0;
1996 plane_state
->scaling_quality
.v_taps
= 0;
1997 plane_state
->stereo_format
= PLANE_STEREO_FORMAT_NONE
;
2003 static int fill_plane_attributes(struct amdgpu_device
*adev
,
2004 struct dc_plane_state
*dc_plane_state
,
2005 struct drm_plane_state
*plane_state
,
2006 struct drm_crtc_state
*crtc_state
)
2008 const struct amdgpu_framebuffer
*amdgpu_fb
=
2009 to_amdgpu_framebuffer(plane_state
->fb
);
2010 const struct drm_crtc
*crtc
= plane_state
->crtc
;
2013 if (!fill_rects_from_plane_state(plane_state
, dc_plane_state
))
2016 ret
= fill_plane_attributes_from_fb(
2017 crtc
->dev
->dev_private
,
2025 * Always set input transfer function, since plane state is refreshed
2028 ret
= amdgpu_dm_set_degamma_lut(crtc_state
, dc_plane_state
);
2030 dc_transfer_func_release(dc_plane_state
->in_transfer_func
);
2031 dc_plane_state
->in_transfer_func
= NULL
;
2037 /*****************************************************************************/
2039 static void update_stream_scaling_settings(const struct drm_display_mode
*mode
,
2040 const struct dm_connector_state
*dm_state
,
2041 struct dc_stream_state
*stream
)
2043 enum amdgpu_rmx_type rmx_type
;
2045 struct rect src
= { 0 }; /* viewport in composition space*/
2046 struct rect dst
= { 0 }; /* stream addressable area */
2048 /* no mode. nothing to be done */
2052 /* Full screen scaling by default */
2053 src
.width
= mode
->hdisplay
;
2054 src
.height
= mode
->vdisplay
;
2055 dst
.width
= stream
->timing
.h_addressable
;
2056 dst
.height
= stream
->timing
.v_addressable
;
2059 rmx_type
= dm_state
->scaling
;
2060 if (rmx_type
== RMX_ASPECT
|| rmx_type
== RMX_OFF
) {
2061 if (src
.width
* dst
.height
<
2062 src
.height
* dst
.width
) {
2063 /* height needs less upscaling/more downscaling */
2064 dst
.width
= src
.width
*
2065 dst
.height
/ src
.height
;
2067 /* width needs less upscaling/more downscaling */
2068 dst
.height
= src
.height
*
2069 dst
.width
/ src
.width
;
2071 } else if (rmx_type
== RMX_CENTER
) {
2075 dst
.x
= (stream
->timing
.h_addressable
- dst
.width
) / 2;
2076 dst
.y
= (stream
->timing
.v_addressable
- dst
.height
) / 2;
2078 if (dm_state
->underscan_enable
) {
2079 dst
.x
+= dm_state
->underscan_hborder
/ 2;
2080 dst
.y
+= dm_state
->underscan_vborder
/ 2;
2081 dst
.width
-= dm_state
->underscan_hborder
;
2082 dst
.height
-= dm_state
->underscan_vborder
;
2089 DRM_DEBUG_DRIVER("Destination Rectangle x:%d y:%d width:%d height:%d\n",
2090 dst
.x
, dst
.y
, dst
.width
, dst
.height
);
2094 static enum dc_color_depth
2095 convert_color_depth_from_display_info(const struct drm_connector
*connector
)
2097 uint32_t bpc
= connector
->display_info
.bpc
;
2101 /* Temporary Work around, DRM don't parse color depth for
2102 * EDID revision before 1.4
2103 * TODO: Fix edid parsing
2105 return COLOR_DEPTH_888
;
2107 return COLOR_DEPTH_666
;
2109 return COLOR_DEPTH_888
;
2111 return COLOR_DEPTH_101010
;
2113 return COLOR_DEPTH_121212
;
2115 return COLOR_DEPTH_141414
;
2117 return COLOR_DEPTH_161616
;
2119 return COLOR_DEPTH_UNDEFINED
;
2123 static enum dc_aspect_ratio
2124 get_aspect_ratio(const struct drm_display_mode
*mode_in
)
2126 int32_t width
= mode_in
->crtc_hdisplay
* 9;
2127 int32_t height
= mode_in
->crtc_vdisplay
* 16;
2129 if ((width
- height
) < 10 && (width
- height
) > -10)
2130 return ASPECT_RATIO_16_9
;
2132 return ASPECT_RATIO_4_3
;
2135 static enum dc_color_space
2136 get_output_color_space(const struct dc_crtc_timing
*dc_crtc_timing
)
2138 enum dc_color_space color_space
= COLOR_SPACE_SRGB
;
2140 switch (dc_crtc_timing
->pixel_encoding
) {
2141 case PIXEL_ENCODING_YCBCR422
:
2142 case PIXEL_ENCODING_YCBCR444
:
2143 case PIXEL_ENCODING_YCBCR420
:
2146 * 27030khz is the separation point between HDTV and SDTV
2147 * according to HDMI spec, we use YCbCr709 and YCbCr601
2150 if (dc_crtc_timing
->pix_clk_khz
> 27030) {
2151 if (dc_crtc_timing
->flags
.Y_ONLY
)
2153 COLOR_SPACE_YCBCR709_LIMITED
;
2155 color_space
= COLOR_SPACE_YCBCR709
;
2157 if (dc_crtc_timing
->flags
.Y_ONLY
)
2159 COLOR_SPACE_YCBCR601_LIMITED
;
2161 color_space
= COLOR_SPACE_YCBCR601
;
2166 case PIXEL_ENCODING_RGB
:
2167 color_space
= COLOR_SPACE_SRGB
;
2178 /*****************************************************************************/
2181 fill_stream_properties_from_drm_display_mode(struct dc_stream_state
*stream
,
2182 const struct drm_display_mode
*mode_in
,
2183 const struct drm_connector
*connector
)
2185 struct dc_crtc_timing
*timing_out
= &stream
->timing
;
2187 memset(timing_out
, 0, sizeof(struct dc_crtc_timing
));
2189 timing_out
->h_border_left
= 0;
2190 timing_out
->h_border_right
= 0;
2191 timing_out
->v_border_top
= 0;
2192 timing_out
->v_border_bottom
= 0;
2193 /* TODO: un-hardcode */
2195 if ((connector
->display_info
.color_formats
& DRM_COLOR_FORMAT_YCRCB444
)
2196 && stream
->sink
->sink_signal
== SIGNAL_TYPE_HDMI_TYPE_A
)
2197 timing_out
->pixel_encoding
= PIXEL_ENCODING_YCBCR444
;
2199 timing_out
->pixel_encoding
= PIXEL_ENCODING_RGB
;
2201 timing_out
->timing_3d_format
= TIMING_3D_FORMAT_NONE
;
2202 timing_out
->display_color_depth
= convert_color_depth_from_display_info(
2204 timing_out
->scan_type
= SCANNING_TYPE_NODATA
;
2205 timing_out
->hdmi_vic
= 0;
2206 timing_out
->vic
= drm_match_cea_mode(mode_in
);
2208 timing_out
->h_addressable
= mode_in
->crtc_hdisplay
;
2209 timing_out
->h_total
= mode_in
->crtc_htotal
;
2210 timing_out
->h_sync_width
=
2211 mode_in
->crtc_hsync_end
- mode_in
->crtc_hsync_start
;
2212 timing_out
->h_front_porch
=
2213 mode_in
->crtc_hsync_start
- mode_in
->crtc_hdisplay
;
2214 timing_out
->v_total
= mode_in
->crtc_vtotal
;
2215 timing_out
->v_addressable
= mode_in
->crtc_vdisplay
;
2216 timing_out
->v_front_porch
=
2217 mode_in
->crtc_vsync_start
- mode_in
->crtc_vdisplay
;
2218 timing_out
->v_sync_width
=
2219 mode_in
->crtc_vsync_end
- mode_in
->crtc_vsync_start
;
2220 timing_out
->pix_clk_khz
= mode_in
->crtc_clock
;
2221 timing_out
->aspect_ratio
= get_aspect_ratio(mode_in
);
2222 if (mode_in
->flags
& DRM_MODE_FLAG_PHSYNC
)
2223 timing_out
->flags
.HSYNC_POSITIVE_POLARITY
= 1;
2224 if (mode_in
->flags
& DRM_MODE_FLAG_PVSYNC
)
2225 timing_out
->flags
.VSYNC_POSITIVE_POLARITY
= 1;
2227 stream
->output_color_space
= get_output_color_space(timing_out
);
2229 stream
->out_transfer_func
->type
= TF_TYPE_PREDEFINED
;
2230 stream
->out_transfer_func
->tf
= TRANSFER_FUNCTION_SRGB
;
2233 static void fill_audio_info(struct audio_info
*audio_info
,
2234 const struct drm_connector
*drm_connector
,
2235 const struct dc_sink
*dc_sink
)
2238 int cea_revision
= 0;
2239 const struct dc_edid_caps
*edid_caps
= &dc_sink
->edid_caps
;
2241 audio_info
->manufacture_id
= edid_caps
->manufacturer_id
;
2242 audio_info
->product_id
= edid_caps
->product_id
;
2244 cea_revision
= drm_connector
->display_info
.cea_rev
;
2246 strncpy(audio_info
->display_name
,
2247 edid_caps
->display_name
,
2248 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS
- 1);
2250 if (cea_revision
>= 3) {
2251 audio_info
->mode_count
= edid_caps
->audio_mode_count
;
2253 for (i
= 0; i
< audio_info
->mode_count
; ++i
) {
2254 audio_info
->modes
[i
].format_code
=
2255 (enum audio_format_code
)
2256 (edid_caps
->audio_modes
[i
].format_code
);
2257 audio_info
->modes
[i
].channel_count
=
2258 edid_caps
->audio_modes
[i
].channel_count
;
2259 audio_info
->modes
[i
].sample_rates
.all
=
2260 edid_caps
->audio_modes
[i
].sample_rate
;
2261 audio_info
->modes
[i
].sample_size
=
2262 edid_caps
->audio_modes
[i
].sample_size
;
2266 audio_info
->flags
.all
= edid_caps
->speaker_flags
;
2268 /* TODO: We only check for the progressive mode, check for interlace mode too */
2269 if (drm_connector
->latency_present
[0]) {
2270 audio_info
->video_latency
= drm_connector
->video_latency
[0];
2271 audio_info
->audio_latency
= drm_connector
->audio_latency
[0];
2274 /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
2279 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode
*src_mode
,
2280 struct drm_display_mode
*dst_mode
)
2282 dst_mode
->crtc_hdisplay
= src_mode
->crtc_hdisplay
;
2283 dst_mode
->crtc_vdisplay
= src_mode
->crtc_vdisplay
;
2284 dst_mode
->crtc_clock
= src_mode
->crtc_clock
;
2285 dst_mode
->crtc_hblank_start
= src_mode
->crtc_hblank_start
;
2286 dst_mode
->crtc_hblank_end
= src_mode
->crtc_hblank_end
;
2287 dst_mode
->crtc_hsync_start
= src_mode
->crtc_hsync_start
;
2288 dst_mode
->crtc_hsync_end
= src_mode
->crtc_hsync_end
;
2289 dst_mode
->crtc_htotal
= src_mode
->crtc_htotal
;
2290 dst_mode
->crtc_hskew
= src_mode
->crtc_hskew
;
2291 dst_mode
->crtc_vblank_start
= src_mode
->crtc_vblank_start
;
2292 dst_mode
->crtc_vblank_end
= src_mode
->crtc_vblank_end
;
2293 dst_mode
->crtc_vsync_start
= src_mode
->crtc_vsync_start
;
2294 dst_mode
->crtc_vsync_end
= src_mode
->crtc_vsync_end
;
2295 dst_mode
->crtc_vtotal
= src_mode
->crtc_vtotal
;
2299 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode
*drm_mode
,
2300 const struct drm_display_mode
*native_mode
,
2303 if (scale_enabled
) {
2304 copy_crtc_timing_for_drm_display_mode(native_mode
, drm_mode
);
2305 } else if (native_mode
->clock
== drm_mode
->clock
&&
2306 native_mode
->htotal
== drm_mode
->htotal
&&
2307 native_mode
->vtotal
== drm_mode
->vtotal
) {
2308 copy_crtc_timing_for_drm_display_mode(native_mode
, drm_mode
);
2310 /* no scaling nor amdgpu inserted, no need to patch */
2314 static struct dc_sink
*
2315 create_fake_sink(struct amdgpu_dm_connector
*aconnector
)
2317 struct dc_sink_init_data sink_init_data
= { 0 };
2318 struct dc_sink
*sink
= NULL
;
2319 sink_init_data
.link
= aconnector
->dc_link
;
2320 sink_init_data
.sink_signal
= aconnector
->dc_link
->connector_signal
;
2322 sink
= dc_sink_create(&sink_init_data
);
2324 DRM_ERROR("Failed to create sink!\n");
2327 sink
->sink_signal
= SIGNAL_TYPE_VIRTUAL
;
2332 static void set_multisync_trigger_params(
2333 struct dc_stream_state
*stream
)
2335 if (stream
->triggered_crtc_reset
.enabled
) {
2336 stream
->triggered_crtc_reset
.event
= CRTC_EVENT_VSYNC_RISING
;
2337 stream
->triggered_crtc_reset
.delay
= TRIGGER_DELAY_NEXT_LINE
;
2341 static void set_master_stream(struct dc_stream_state
*stream_set
[],
2344 int j
, highest_rfr
= 0, master_stream
= 0;
2346 for (j
= 0; j
< stream_count
; j
++) {
2347 if (stream_set
[j
] && stream_set
[j
]->triggered_crtc_reset
.enabled
) {
2348 int refresh_rate
= 0;
2350 refresh_rate
= (stream_set
[j
]->timing
.pix_clk_khz
*1000)/
2351 (stream_set
[j
]->timing
.h_total
*stream_set
[j
]->timing
.v_total
);
2352 if (refresh_rate
> highest_rfr
) {
2353 highest_rfr
= refresh_rate
;
2358 for (j
= 0; j
< stream_count
; j
++) {
2360 stream_set
[j
]->triggered_crtc_reset
.event_source
= stream_set
[master_stream
];
2364 static void dm_enable_per_frame_crtc_master_sync(struct dc_state
*context
)
2368 if (context
->stream_count
< 2)
2370 for (i
= 0; i
< context
->stream_count
; i
++) {
2371 if (!context
->streams
[i
])
2373 /* TODO: add a function to read AMD VSDB bits and will set
2374 * crtc_sync_master.multi_sync_enabled flag
2375 * For now its set to false
2377 set_multisync_trigger_params(context
->streams
[i
]);
2379 set_master_stream(context
->streams
, context
->stream_count
);
2382 static struct dc_stream_state
*
2383 create_stream_for_sink(struct amdgpu_dm_connector
*aconnector
,
2384 const struct drm_display_mode
*drm_mode
,
2385 const struct dm_connector_state
*dm_state
)
2387 struct drm_display_mode
*preferred_mode
= NULL
;
2388 struct drm_connector
*drm_connector
;
2389 struct dc_stream_state
*stream
= NULL
;
2390 struct drm_display_mode mode
= *drm_mode
;
2391 bool native_mode_found
= false;
2392 struct dc_sink
*sink
= NULL
;
2393 if (aconnector
== NULL
) {
2394 DRM_ERROR("aconnector is NULL!\n");
2398 drm_connector
= &aconnector
->base
;
2400 if (!aconnector
->dc_sink
) {
2402 * Create dc_sink when necessary to MST
2403 * Don't apply fake_sink to MST
2405 if (aconnector
->mst_port
) {
2406 dm_dp_mst_dc_sink_create(drm_connector
);
2410 sink
= create_fake_sink(aconnector
);
2414 sink
= aconnector
->dc_sink
;
2417 stream
= dc_create_stream_for_sink(sink
);
2419 if (stream
== NULL
) {
2420 DRM_ERROR("Failed to create stream for sink!\n");
2424 list_for_each_entry(preferred_mode
, &aconnector
->base
.modes
, head
) {
2425 /* Search for preferred mode */
2426 if (preferred_mode
->type
& DRM_MODE_TYPE_PREFERRED
) {
2427 native_mode_found
= true;
2431 if (!native_mode_found
)
2432 preferred_mode
= list_first_entry_or_null(
2433 &aconnector
->base
.modes
,
2434 struct drm_display_mode
,
2437 if (preferred_mode
== NULL
) {
2438 /* This may not be an error, the use case is when we we have no
2439 * usermode calls to reset and set mode upon hotplug. In this
2440 * case, we call set mode ourselves to restore the previous mode
2441 * and the modelist may not be filled in in time.
2443 DRM_DEBUG_DRIVER("No preferred mode found\n");
2445 decide_crtc_timing_for_drm_display_mode(
2446 &mode
, preferred_mode
,
2447 dm_state
? (dm_state
->scaling
!= RMX_OFF
) : false);
2451 drm_mode_set_crtcinfo(&mode
, 0);
2453 fill_stream_properties_from_drm_display_mode(stream
,
2454 &mode
, &aconnector
->base
);
2455 update_stream_scaling_settings(&mode
, dm_state
, stream
);
2458 &stream
->audio_info
,
2462 update_stream_signal(stream
);
2464 if (dm_state
&& dm_state
->freesync_capable
)
2465 stream
->ignore_msa_timing_param
= true;
2467 if (sink
&& sink
->sink_signal
== SIGNAL_TYPE_VIRTUAL
)
2468 dc_sink_release(sink
);
2473 static void amdgpu_dm_crtc_destroy(struct drm_crtc
*crtc
)
2475 drm_crtc_cleanup(crtc
);
2479 static void dm_crtc_destroy_state(struct drm_crtc
*crtc
,
2480 struct drm_crtc_state
*state
)
2482 struct dm_crtc_state
*cur
= to_dm_crtc_state(state
);
2484 /* TODO Destroy dc_stream objects are stream object is flattened */
2486 dc_stream_release(cur
->stream
);
2489 __drm_atomic_helper_crtc_destroy_state(state
);
2495 static void dm_crtc_reset_state(struct drm_crtc
*crtc
)
2497 struct dm_crtc_state
*state
;
2500 dm_crtc_destroy_state(crtc
, crtc
->state
);
2502 state
= kzalloc(sizeof(*state
), GFP_KERNEL
);
2503 if (WARN_ON(!state
))
2506 crtc
->state
= &state
->base
;
2507 crtc
->state
->crtc
= crtc
;
2511 static struct drm_crtc_state
*
2512 dm_crtc_duplicate_state(struct drm_crtc
*crtc
)
2514 struct dm_crtc_state
*state
, *cur
;
2516 cur
= to_dm_crtc_state(crtc
->state
);
2518 if (WARN_ON(!crtc
->state
))
2521 state
= kzalloc(sizeof(*state
), GFP_KERNEL
);
2525 __drm_atomic_helper_crtc_duplicate_state(crtc
, &state
->base
);
2528 state
->stream
= cur
->stream
;
2529 dc_stream_retain(state
->stream
);
2532 /* TODO Duplicate dc_stream after objects are stream object is flattened */
2534 return &state
->base
;
2538 static inline int dm_set_vblank(struct drm_crtc
*crtc
, bool enable
)
2540 enum dc_irq_source irq_source
;
2541 struct amdgpu_crtc
*acrtc
= to_amdgpu_crtc(crtc
);
2542 struct amdgpu_device
*adev
= crtc
->dev
->dev_private
;
2544 irq_source
= IRQ_TYPE_VBLANK
+ acrtc
->otg_inst
;
2545 return dc_interrupt_set(adev
->dm
.dc
, irq_source
, enable
) ? 0 : -EBUSY
;
2548 static int dm_enable_vblank(struct drm_crtc
*crtc
)
2550 return dm_set_vblank(crtc
, true);
2553 static void dm_disable_vblank(struct drm_crtc
*crtc
)
2555 dm_set_vblank(crtc
, false);
2558 /* Implemented only the options currently availible for the driver */
2559 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs
= {
2560 .reset
= dm_crtc_reset_state
,
2561 .destroy
= amdgpu_dm_crtc_destroy
,
2562 .gamma_set
= drm_atomic_helper_legacy_gamma_set
,
2563 .set_config
= drm_atomic_helper_set_config
,
2564 .page_flip
= drm_atomic_helper_page_flip
,
2565 .atomic_duplicate_state
= dm_crtc_duplicate_state
,
2566 .atomic_destroy_state
= dm_crtc_destroy_state
,
2567 .set_crc_source
= amdgpu_dm_crtc_set_crc_source
,
2568 .enable_vblank
= dm_enable_vblank
,
2569 .disable_vblank
= dm_disable_vblank
,
2572 static enum drm_connector_status
2573 amdgpu_dm_connector_detect(struct drm_connector
*connector
, bool force
)
2576 struct amdgpu_dm_connector
*aconnector
= to_amdgpu_dm_connector(connector
);
2579 * 1. This interface is NOT called in context of HPD irq.
2580 * 2. This interface *is called* in context of user-mode ioctl. Which
2581 * makes it a bad place for *any* MST-related activit. */
2583 if (aconnector
->base
.force
== DRM_FORCE_UNSPECIFIED
&&
2584 !aconnector
->fake_enable
)
2585 connected
= (aconnector
->dc_sink
!= NULL
);
2587 connected
= (aconnector
->base
.force
== DRM_FORCE_ON
);
2589 return (connected
? connector_status_connected
:
2590 connector_status_disconnected
);
2593 int amdgpu_dm_connector_atomic_set_property(struct drm_connector
*connector
,
2594 struct drm_connector_state
*connector_state
,
2595 struct drm_property
*property
,
2598 struct drm_device
*dev
= connector
->dev
;
2599 struct amdgpu_device
*adev
= dev
->dev_private
;
2600 struct dm_connector_state
*dm_old_state
=
2601 to_dm_connector_state(connector
->state
);
2602 struct dm_connector_state
*dm_new_state
=
2603 to_dm_connector_state(connector_state
);
2607 if (property
== dev
->mode_config
.scaling_mode_property
) {
2608 enum amdgpu_rmx_type rmx_type
;
2611 case DRM_MODE_SCALE_CENTER
:
2612 rmx_type
= RMX_CENTER
;
2614 case DRM_MODE_SCALE_ASPECT
:
2615 rmx_type
= RMX_ASPECT
;
2617 case DRM_MODE_SCALE_FULLSCREEN
:
2618 rmx_type
= RMX_FULL
;
2620 case DRM_MODE_SCALE_NONE
:
2626 if (dm_old_state
->scaling
== rmx_type
)
2629 dm_new_state
->scaling
= rmx_type
;
2631 } else if (property
== adev
->mode_info
.underscan_hborder_property
) {
2632 dm_new_state
->underscan_hborder
= val
;
2634 } else if (property
== adev
->mode_info
.underscan_vborder_property
) {
2635 dm_new_state
->underscan_vborder
= val
;
2637 } else if (property
== adev
->mode_info
.underscan_property
) {
2638 dm_new_state
->underscan_enable
= val
;
2645 int amdgpu_dm_connector_atomic_get_property(struct drm_connector
*connector
,
2646 const struct drm_connector_state
*state
,
2647 struct drm_property
*property
,
2650 struct drm_device
*dev
= connector
->dev
;
2651 struct amdgpu_device
*adev
= dev
->dev_private
;
2652 struct dm_connector_state
*dm_state
=
2653 to_dm_connector_state(state
);
2656 if (property
== dev
->mode_config
.scaling_mode_property
) {
2657 switch (dm_state
->scaling
) {
2659 *val
= DRM_MODE_SCALE_CENTER
;
2662 *val
= DRM_MODE_SCALE_ASPECT
;
2665 *val
= DRM_MODE_SCALE_FULLSCREEN
;
2669 *val
= DRM_MODE_SCALE_NONE
;
2673 } else if (property
== adev
->mode_info
.underscan_hborder_property
) {
2674 *val
= dm_state
->underscan_hborder
;
2676 } else if (property
== adev
->mode_info
.underscan_vborder_property
) {
2677 *val
= dm_state
->underscan_vborder
;
2679 } else if (property
== adev
->mode_info
.underscan_property
) {
2680 *val
= dm_state
->underscan_enable
;
2686 static void amdgpu_dm_connector_destroy(struct drm_connector
*connector
)
2688 struct amdgpu_dm_connector
*aconnector
= to_amdgpu_dm_connector(connector
);
2689 const struct dc_link
*link
= aconnector
->dc_link
;
2690 struct amdgpu_device
*adev
= connector
->dev
->dev_private
;
2691 struct amdgpu_display_manager
*dm
= &adev
->dm
;
2693 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
2694 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
2696 if ((link
->connector_signal
& (SIGNAL_TYPE_EDP
| SIGNAL_TYPE_LVDS
)) &&
2697 link
->type
!= dc_connection_none
&&
2698 dm
->backlight_dev
) {
2699 backlight_device_unregister(dm
->backlight_dev
);
2700 dm
->backlight_dev
= NULL
;
2703 drm_connector_unregister(connector
);
2704 drm_connector_cleanup(connector
);
2708 void amdgpu_dm_connector_funcs_reset(struct drm_connector
*connector
)
2710 struct dm_connector_state
*state
=
2711 to_dm_connector_state(connector
->state
);
2713 if (connector
->state
)
2714 __drm_atomic_helper_connector_destroy_state(connector
->state
);
2718 state
= kzalloc(sizeof(*state
), GFP_KERNEL
);
2721 state
->scaling
= RMX_OFF
;
2722 state
->underscan_enable
= false;
2723 state
->underscan_hborder
= 0;
2724 state
->underscan_vborder
= 0;
2726 __drm_atomic_helper_connector_reset(connector
, &state
->base
);
2730 struct drm_connector_state
*
2731 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector
*connector
)
2733 struct dm_connector_state
*state
=
2734 to_dm_connector_state(connector
->state
);
2736 struct dm_connector_state
*new_state
=
2737 kmemdup(state
, sizeof(*state
), GFP_KERNEL
);
2740 __drm_atomic_helper_connector_duplicate_state(connector
,
2742 return &new_state
->base
;
2748 static const struct drm_connector_funcs amdgpu_dm_connector_funcs
= {
2749 .reset
= amdgpu_dm_connector_funcs_reset
,
2750 .detect
= amdgpu_dm_connector_detect
,
2751 .fill_modes
= drm_helper_probe_single_connector_modes
,
2752 .destroy
= amdgpu_dm_connector_destroy
,
2753 .atomic_duplicate_state
= amdgpu_dm_connector_atomic_duplicate_state
,
2754 .atomic_destroy_state
= drm_atomic_helper_connector_destroy_state
,
2755 .atomic_set_property
= amdgpu_dm_connector_atomic_set_property
,
2756 .atomic_get_property
= amdgpu_dm_connector_atomic_get_property
2759 static struct drm_encoder
*best_encoder(struct drm_connector
*connector
)
2761 int enc_id
= connector
->encoder_ids
[0];
2762 struct drm_mode_object
*obj
;
2763 struct drm_encoder
*encoder
;
2765 DRM_DEBUG_DRIVER("Finding the best encoder\n");
2767 /* pick the encoder ids */
2769 obj
= drm_mode_object_find(connector
->dev
, NULL
, enc_id
, DRM_MODE_OBJECT_ENCODER
);
2771 DRM_ERROR("Couldn't find a matching encoder for our connector\n");
2774 encoder
= obj_to_encoder(obj
);
2777 DRM_ERROR("No encoder id\n");
2781 static int get_modes(struct drm_connector
*connector
)
2783 return amdgpu_dm_connector_get_modes(connector
);
2786 static void create_eml_sink(struct amdgpu_dm_connector
*aconnector
)
2788 struct dc_sink_init_data init_params
= {
2789 .link
= aconnector
->dc_link
,
2790 .sink_signal
= SIGNAL_TYPE_VIRTUAL
2794 if (!aconnector
->base
.edid_blob_ptr
) {
2795 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
2796 aconnector
->base
.name
);
2798 aconnector
->base
.force
= DRM_FORCE_OFF
;
2799 aconnector
->base
.override_edid
= false;
2803 edid
= (struct edid
*) aconnector
->base
.edid_blob_ptr
->data
;
2805 aconnector
->edid
= edid
;
2807 aconnector
->dc_em_sink
= dc_link_add_remote_sink(
2808 aconnector
->dc_link
,
2810 (edid
->extensions
+ 1) * EDID_LENGTH
,
2813 if (aconnector
->base
.force
== DRM_FORCE_ON
)
2814 aconnector
->dc_sink
= aconnector
->dc_link
->local_sink
?
2815 aconnector
->dc_link
->local_sink
:
2816 aconnector
->dc_em_sink
;
2819 static void handle_edid_mgmt(struct amdgpu_dm_connector
*aconnector
)
2821 struct dc_link
*link
= (struct dc_link
*)aconnector
->dc_link
;
2823 /* In case of headless boot with force on for DP managed connector
2824 * Those settings have to be != 0 to get initial modeset
2826 if (link
->connector_signal
== SIGNAL_TYPE_DISPLAY_PORT
) {
2827 link
->verified_link_cap
.lane_count
= LANE_COUNT_FOUR
;
2828 link
->verified_link_cap
.link_rate
= LINK_RATE_HIGH2
;
2832 aconnector
->base
.override_edid
= true;
2833 create_eml_sink(aconnector
);
2836 enum drm_mode_status
amdgpu_dm_connector_mode_valid(struct drm_connector
*connector
,
2837 struct drm_display_mode
*mode
)
2839 int result
= MODE_ERROR
;
2840 struct dc_sink
*dc_sink
;
2841 struct amdgpu_device
*adev
= connector
->dev
->dev_private
;
2842 /* TODO: Unhardcode stream count */
2843 struct dc_stream_state
*stream
;
2844 struct amdgpu_dm_connector
*aconnector
= to_amdgpu_dm_connector(connector
);
2845 enum dc_status dc_result
= DC_OK
;
2847 if ((mode
->flags
& DRM_MODE_FLAG_INTERLACE
) ||
2848 (mode
->flags
& DRM_MODE_FLAG_DBLSCAN
))
2851 /* Only run this the first time mode_valid is called to initilialize
2854 if (aconnector
->base
.force
!= DRM_FORCE_UNSPECIFIED
&&
2855 !aconnector
->dc_em_sink
)
2856 handle_edid_mgmt(aconnector
);
2858 dc_sink
= to_amdgpu_dm_connector(connector
)->dc_sink
;
2860 if (dc_sink
== NULL
) {
2861 DRM_ERROR("dc_sink is NULL!\n");
2865 stream
= create_stream_for_sink(aconnector
, mode
, NULL
);
2866 if (stream
== NULL
) {
2867 DRM_ERROR("Failed to create stream for sink!\n");
2871 dc_result
= dc_validate_stream(adev
->dm
.dc
, stream
);
2873 if (dc_result
== DC_OK
)
2876 DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d\n",
2882 dc_stream_release(stream
);
2885 /* TODO: error handling*/
2889 static const struct drm_connector_helper_funcs
2890 amdgpu_dm_connector_helper_funcs
= {
2892 * If hotplug a second bigger display in FB Con mode, bigger resolution
2893 * modes will be filtered by drm_mode_validate_size(), and those modes
2894 * is missing after user start lightdm. So we need to renew modes list.
2895 * in get_modes call back, not just return the modes count
2897 .get_modes
= get_modes
,
2898 .mode_valid
= amdgpu_dm_connector_mode_valid
,
2899 .best_encoder
= best_encoder
2902 static void dm_crtc_helper_disable(struct drm_crtc
*crtc
)
2906 static int dm_crtc_helper_atomic_check(struct drm_crtc
*crtc
,
2907 struct drm_crtc_state
*state
)
2909 struct amdgpu_device
*adev
= crtc
->dev
->dev_private
;
2910 struct dc
*dc
= adev
->dm
.dc
;
2911 struct dm_crtc_state
*dm_crtc_state
= to_dm_crtc_state(state
);
2914 if (unlikely(!dm_crtc_state
->stream
&&
2915 modeset_required(state
, NULL
, dm_crtc_state
->stream
))) {
2920 /* In some use cases, like reset, no stream is attached */
2921 if (!dm_crtc_state
->stream
)
2924 if (dc_validate_stream(dc
, dm_crtc_state
->stream
) == DC_OK
)
2930 static bool dm_crtc_helper_mode_fixup(struct drm_crtc
*crtc
,
2931 const struct drm_display_mode
*mode
,
2932 struct drm_display_mode
*adjusted_mode
)
2937 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs
= {
2938 .disable
= dm_crtc_helper_disable
,
2939 .atomic_check
= dm_crtc_helper_atomic_check
,
2940 .mode_fixup
= dm_crtc_helper_mode_fixup
2943 static void dm_encoder_helper_disable(struct drm_encoder
*encoder
)
2948 static int dm_encoder_helper_atomic_check(struct drm_encoder
*encoder
,
2949 struct drm_crtc_state
*crtc_state
,
2950 struct drm_connector_state
*conn_state
)
2955 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs
= {
2956 .disable
= dm_encoder_helper_disable
,
2957 .atomic_check
= dm_encoder_helper_atomic_check
2960 static void dm_drm_plane_reset(struct drm_plane
*plane
)
2962 struct dm_plane_state
*amdgpu_state
= NULL
;
2965 plane
->funcs
->atomic_destroy_state(plane
, plane
->state
);
2967 amdgpu_state
= kzalloc(sizeof(*amdgpu_state
), GFP_KERNEL
);
2968 WARN_ON(amdgpu_state
== NULL
);
2971 plane
->state
= &amdgpu_state
->base
;
2972 plane
->state
->plane
= plane
;
2973 plane
->state
->rotation
= DRM_MODE_ROTATE_0
;
2977 static struct drm_plane_state
*
2978 dm_drm_plane_duplicate_state(struct drm_plane
*plane
)
2980 struct dm_plane_state
*dm_plane_state
, *old_dm_plane_state
;
2982 old_dm_plane_state
= to_dm_plane_state(plane
->state
);
2983 dm_plane_state
= kzalloc(sizeof(*dm_plane_state
), GFP_KERNEL
);
2984 if (!dm_plane_state
)
2987 __drm_atomic_helper_plane_duplicate_state(plane
, &dm_plane_state
->base
);
2989 if (old_dm_plane_state
->dc_state
) {
2990 dm_plane_state
->dc_state
= old_dm_plane_state
->dc_state
;
2991 dc_plane_state_retain(dm_plane_state
->dc_state
);
2994 return &dm_plane_state
->base
;
2997 void dm_drm_plane_destroy_state(struct drm_plane
*plane
,
2998 struct drm_plane_state
*state
)
3000 struct dm_plane_state
*dm_plane_state
= to_dm_plane_state(state
);
3002 if (dm_plane_state
->dc_state
)
3003 dc_plane_state_release(dm_plane_state
->dc_state
);
3005 drm_atomic_helper_plane_destroy_state(plane
, state
);
3008 static const struct drm_plane_funcs dm_plane_funcs
= {
3009 .update_plane
= drm_atomic_helper_update_plane
,
3010 .disable_plane
= drm_atomic_helper_disable_plane
,
3011 .destroy
= drm_plane_cleanup
,
3012 .reset
= dm_drm_plane_reset
,
3013 .atomic_duplicate_state
= dm_drm_plane_duplicate_state
,
3014 .atomic_destroy_state
= dm_drm_plane_destroy_state
,
3017 static int dm_plane_helper_prepare_fb(struct drm_plane
*plane
,
3018 struct drm_plane_state
*new_state
)
3020 struct amdgpu_framebuffer
*afb
;
3021 struct drm_gem_object
*obj
;
3022 struct amdgpu_device
*adev
;
3023 struct amdgpu_bo
*rbo
;
3024 uint64_t chroma_addr
= 0;
3025 struct dm_plane_state
*dm_plane_state_new
, *dm_plane_state_old
;
3026 unsigned int awidth
;
3030 dm_plane_state_old
= to_dm_plane_state(plane
->state
);
3031 dm_plane_state_new
= to_dm_plane_state(new_state
);
3033 if (!new_state
->fb
) {
3034 DRM_DEBUG_DRIVER("No FB bound\n");
3038 afb
= to_amdgpu_framebuffer(new_state
->fb
);
3039 obj
= new_state
->fb
->obj
[0];
3040 rbo
= gem_to_amdgpu_bo(obj
);
3041 adev
= amdgpu_ttm_adev(rbo
->tbo
.bdev
);
3042 r
= amdgpu_bo_reserve(rbo
, false);
3043 if (unlikely(r
!= 0))
3046 if (plane
->type
!= DRM_PLANE_TYPE_CURSOR
)
3047 domain
= amdgpu_display_supported_domains(adev
);
3049 domain
= AMDGPU_GEM_DOMAIN_VRAM
;
3051 r
= amdgpu_bo_pin(rbo
, domain
, &afb
->address
);
3052 amdgpu_bo_unreserve(rbo
);
3054 if (unlikely(r
!= 0)) {
3055 if (r
!= -ERESTARTSYS
)
3056 DRM_ERROR("Failed to pin framebuffer with error %d\n", r
);
3062 if (dm_plane_state_new
->dc_state
&&
3063 dm_plane_state_old
->dc_state
!= dm_plane_state_new
->dc_state
) {
3064 struct dc_plane_state
*plane_state
= dm_plane_state_new
->dc_state
;
3066 if (plane_state
->format
< SURFACE_PIXEL_FORMAT_VIDEO_BEGIN
) {
3067 plane_state
->address
.grph
.addr
.low_part
= lower_32_bits(afb
->address
);
3068 plane_state
->address
.grph
.addr
.high_part
= upper_32_bits(afb
->address
);
3070 awidth
= ALIGN(new_state
->fb
->width
, 64);
3071 plane_state
->address
.type
= PLN_ADDR_TYPE_VIDEO_PROGRESSIVE
;
3072 plane_state
->address
.video_progressive
.luma_addr
.low_part
3073 = lower_32_bits(afb
->address
);
3074 plane_state
->address
.video_progressive
.luma_addr
.high_part
3075 = upper_32_bits(afb
->address
);
3076 chroma_addr
= afb
->address
+ (u64
)awidth
* new_state
->fb
->height
;
3077 plane_state
->address
.video_progressive
.chroma_addr
.low_part
3078 = lower_32_bits(chroma_addr
);
3079 plane_state
->address
.video_progressive
.chroma_addr
.high_part
3080 = upper_32_bits(chroma_addr
);
3087 static void dm_plane_helper_cleanup_fb(struct drm_plane
*plane
,
3088 struct drm_plane_state
*old_state
)
3090 struct amdgpu_bo
*rbo
;
3096 rbo
= gem_to_amdgpu_bo(old_state
->fb
->obj
[0]);
3097 r
= amdgpu_bo_reserve(rbo
, false);
3099 DRM_ERROR("failed to reserve rbo before unpin\n");
3103 amdgpu_bo_unpin(rbo
);
3104 amdgpu_bo_unreserve(rbo
);
3105 amdgpu_bo_unref(&rbo
);
3108 static int dm_plane_atomic_check(struct drm_plane
*plane
,
3109 struct drm_plane_state
*state
)
3111 struct amdgpu_device
*adev
= plane
->dev
->dev_private
;
3112 struct dc
*dc
= adev
->dm
.dc
;
3113 struct dm_plane_state
*dm_plane_state
= to_dm_plane_state(state
);
3115 if (!dm_plane_state
->dc_state
)
3118 if (!fill_rects_from_plane_state(state
, dm_plane_state
->dc_state
))
3121 if (dc_validate_plane(dc
, dm_plane_state
->dc_state
) == DC_OK
)
3127 static const struct drm_plane_helper_funcs dm_plane_helper_funcs
= {
3128 .prepare_fb
= dm_plane_helper_prepare_fb
,
3129 .cleanup_fb
= dm_plane_helper_cleanup_fb
,
3130 .atomic_check
= dm_plane_atomic_check
,
3134 * TODO: these are currently initialized to rgb formats only.
3135 * For future use cases we should either initialize them dynamically based on
3136 * plane capabilities, or initialize this array to all formats, so internal drm
3137 * check will succeed, and let DC to implement proper check
3139 static const uint32_t rgb_formats
[] = {
3141 DRM_FORMAT_XRGB8888
,
3142 DRM_FORMAT_ARGB8888
,
3143 DRM_FORMAT_RGBA8888
,
3144 DRM_FORMAT_XRGB2101010
,
3145 DRM_FORMAT_XBGR2101010
,
3146 DRM_FORMAT_ARGB2101010
,
3147 DRM_FORMAT_ABGR2101010
,
3150 static const uint32_t yuv_formats
[] = {
3155 static const u32 cursor_formats
[] = {
3159 static int amdgpu_dm_plane_init(struct amdgpu_display_manager
*dm
,
3160 struct amdgpu_plane
*aplane
,
3161 unsigned long possible_crtcs
)
3165 switch (aplane
->base
.type
) {
3166 case DRM_PLANE_TYPE_PRIMARY
:
3167 res
= drm_universal_plane_init(
3173 ARRAY_SIZE(rgb_formats
),
3174 NULL
, aplane
->base
.type
, NULL
);
3176 case DRM_PLANE_TYPE_OVERLAY
:
3177 res
= drm_universal_plane_init(
3183 ARRAY_SIZE(yuv_formats
),
3184 NULL
, aplane
->base
.type
, NULL
);
3186 case DRM_PLANE_TYPE_CURSOR
:
3187 res
= drm_universal_plane_init(
3193 ARRAY_SIZE(cursor_formats
),
3194 NULL
, aplane
->base
.type
, NULL
);
3198 drm_plane_helper_add(&aplane
->base
, &dm_plane_helper_funcs
);
3200 /* Create (reset) the plane state */
3201 if (aplane
->base
.funcs
->reset
)
3202 aplane
->base
.funcs
->reset(&aplane
->base
);
3208 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager
*dm
,
3209 struct drm_plane
*plane
,
3210 uint32_t crtc_index
)
3212 struct amdgpu_crtc
*acrtc
= NULL
;
3213 struct amdgpu_plane
*cursor_plane
;
3217 cursor_plane
= kzalloc(sizeof(*cursor_plane
), GFP_KERNEL
);
3221 cursor_plane
->base
.type
= DRM_PLANE_TYPE_CURSOR
;
3222 res
= amdgpu_dm_plane_init(dm
, cursor_plane
, 0);
3224 acrtc
= kzalloc(sizeof(struct amdgpu_crtc
), GFP_KERNEL
);
3228 res
= drm_crtc_init_with_planes(
3232 &cursor_plane
->base
,
3233 &amdgpu_dm_crtc_funcs
, NULL
);
3238 drm_crtc_helper_add(&acrtc
->base
, &amdgpu_dm_crtc_helper_funcs
);
3240 /* Create (reset) the plane state */
3241 if (acrtc
->base
.funcs
->reset
)
3242 acrtc
->base
.funcs
->reset(&acrtc
->base
);
3244 acrtc
->max_cursor_width
= dm
->adev
->dm
.dc
->caps
.max_cursor_size
;
3245 acrtc
->max_cursor_height
= dm
->adev
->dm
.dc
->caps
.max_cursor_size
;
3247 acrtc
->crtc_id
= crtc_index
;
3248 acrtc
->base
.enabled
= false;
3250 dm
->adev
->mode_info
.crtcs
[crtc_index
] = acrtc
;
3251 drm_crtc_enable_color_mgmt(&acrtc
->base
, MAX_COLOR_LUT_ENTRIES
,
3252 true, MAX_COLOR_LUT_ENTRIES
);
3253 drm_mode_crtc_set_gamma_size(&acrtc
->base
, MAX_COLOR_LEGACY_LUT_ENTRIES
);
3259 kfree(cursor_plane
);
3264 static int to_drm_connector_type(enum signal_type st
)
3267 case SIGNAL_TYPE_HDMI_TYPE_A
:
3268 return DRM_MODE_CONNECTOR_HDMIA
;
3269 case SIGNAL_TYPE_EDP
:
3270 return DRM_MODE_CONNECTOR_eDP
;
3271 case SIGNAL_TYPE_RGB
:
3272 return DRM_MODE_CONNECTOR_VGA
;
3273 case SIGNAL_TYPE_DISPLAY_PORT
:
3274 case SIGNAL_TYPE_DISPLAY_PORT_MST
:
3275 return DRM_MODE_CONNECTOR_DisplayPort
;
3276 case SIGNAL_TYPE_DVI_DUAL_LINK
:
3277 case SIGNAL_TYPE_DVI_SINGLE_LINK
:
3278 return DRM_MODE_CONNECTOR_DVID
;
3279 case SIGNAL_TYPE_VIRTUAL
:
3280 return DRM_MODE_CONNECTOR_VIRTUAL
;
3283 return DRM_MODE_CONNECTOR_Unknown
;
3287 static void amdgpu_dm_get_native_mode(struct drm_connector
*connector
)
3289 const struct drm_connector_helper_funcs
*helper
=
3290 connector
->helper_private
;
3291 struct drm_encoder
*encoder
;
3292 struct amdgpu_encoder
*amdgpu_encoder
;
3294 encoder
= helper
->best_encoder(connector
);
3296 if (encoder
== NULL
)
3299 amdgpu_encoder
= to_amdgpu_encoder(encoder
);
3301 amdgpu_encoder
->native_mode
.clock
= 0;
3303 if (!list_empty(&connector
->probed_modes
)) {
3304 struct drm_display_mode
*preferred_mode
= NULL
;
3306 list_for_each_entry(preferred_mode
,
3307 &connector
->probed_modes
,
3309 if (preferred_mode
->type
& DRM_MODE_TYPE_PREFERRED
)
3310 amdgpu_encoder
->native_mode
= *preferred_mode
;
3318 static struct drm_display_mode
*
3319 amdgpu_dm_create_common_mode(struct drm_encoder
*encoder
,
3321 int hdisplay
, int vdisplay
)
3323 struct drm_device
*dev
= encoder
->dev
;
3324 struct amdgpu_encoder
*amdgpu_encoder
= to_amdgpu_encoder(encoder
);
3325 struct drm_display_mode
*mode
= NULL
;
3326 struct drm_display_mode
*native_mode
= &amdgpu_encoder
->native_mode
;
3328 mode
= drm_mode_duplicate(dev
, native_mode
);
3333 mode
->hdisplay
= hdisplay
;
3334 mode
->vdisplay
= vdisplay
;
3335 mode
->type
&= ~DRM_MODE_TYPE_PREFERRED
;
3336 strncpy(mode
->name
, name
, DRM_DISPLAY_MODE_LEN
);
3342 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder
*encoder
,
3343 struct drm_connector
*connector
)
3345 struct amdgpu_encoder
*amdgpu_encoder
= to_amdgpu_encoder(encoder
);
3346 struct drm_display_mode
*mode
= NULL
;
3347 struct drm_display_mode
*native_mode
= &amdgpu_encoder
->native_mode
;
3348 struct amdgpu_dm_connector
*amdgpu_dm_connector
=
3349 to_amdgpu_dm_connector(connector
);
3353 char name
[DRM_DISPLAY_MODE_LEN
];
3356 } common_modes
[] = {
3357 { "640x480", 640, 480},
3358 { "800x600", 800, 600},
3359 { "1024x768", 1024, 768},
3360 { "1280x720", 1280, 720},
3361 { "1280x800", 1280, 800},
3362 {"1280x1024", 1280, 1024},
3363 { "1440x900", 1440, 900},
3364 {"1680x1050", 1680, 1050},
3365 {"1600x1200", 1600, 1200},
3366 {"1920x1080", 1920, 1080},
3367 {"1920x1200", 1920, 1200}
3370 n
= ARRAY_SIZE(common_modes
);
3372 for (i
= 0; i
< n
; i
++) {
3373 struct drm_display_mode
*curmode
= NULL
;
3374 bool mode_existed
= false;
3376 if (common_modes
[i
].w
> native_mode
->hdisplay
||
3377 common_modes
[i
].h
> native_mode
->vdisplay
||
3378 (common_modes
[i
].w
== native_mode
->hdisplay
&&
3379 common_modes
[i
].h
== native_mode
->vdisplay
))
3382 list_for_each_entry(curmode
, &connector
->probed_modes
, head
) {
3383 if (common_modes
[i
].w
== curmode
->hdisplay
&&
3384 common_modes
[i
].h
== curmode
->vdisplay
) {
3385 mode_existed
= true;
3393 mode
= amdgpu_dm_create_common_mode(encoder
,
3394 common_modes
[i
].name
, common_modes
[i
].w
,
3396 drm_mode_probed_add(connector
, mode
);
3397 amdgpu_dm_connector
->num_modes
++;
3401 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector
*connector
,
3404 struct amdgpu_dm_connector
*amdgpu_dm_connector
=
3405 to_amdgpu_dm_connector(connector
);
3408 /* empty probed_modes */
3409 INIT_LIST_HEAD(&connector
->probed_modes
);
3410 amdgpu_dm_connector
->num_modes
=
3411 drm_add_edid_modes(connector
, edid
);
3413 amdgpu_dm_get_native_mode(connector
);
3415 amdgpu_dm_connector
->num_modes
= 0;
3419 static int amdgpu_dm_connector_get_modes(struct drm_connector
*connector
)
3421 const struct drm_connector_helper_funcs
*helper
=
3422 connector
->helper_private
;
3423 struct amdgpu_dm_connector
*amdgpu_dm_connector
=
3424 to_amdgpu_dm_connector(connector
);
3425 struct drm_encoder
*encoder
;
3426 struct edid
*edid
= amdgpu_dm_connector
->edid
;
3428 encoder
= helper
->best_encoder(connector
);
3429 amdgpu_dm_connector_ddc_get_modes(connector
, edid
);
3430 amdgpu_dm_connector_add_common_modes(encoder
, connector
);
3432 #if defined(CONFIG_DRM_AMD_DC_FBC)
3433 amdgpu_dm_fbc_init(connector
);
3435 return amdgpu_dm_connector
->num_modes
;
3438 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager
*dm
,
3439 struct amdgpu_dm_connector
*aconnector
,
3441 struct dc_link
*link
,
3444 struct amdgpu_device
*adev
= dm
->ddev
->dev_private
;
3446 aconnector
->connector_id
= link_index
;
3447 aconnector
->dc_link
= link
;
3448 aconnector
->base
.interlace_allowed
= false;
3449 aconnector
->base
.doublescan_allowed
= false;
3450 aconnector
->base
.stereo_allowed
= false;
3451 aconnector
->base
.dpms
= DRM_MODE_DPMS_OFF
;
3452 aconnector
->hpd
.hpd
= AMDGPU_HPD_NONE
; /* not used */
3454 mutex_init(&aconnector
->hpd_lock
);
3456 /* configure support HPD hot plug connector_>polled default value is 0
3457 * which means HPD hot plug not supported
3459 switch (connector_type
) {
3460 case DRM_MODE_CONNECTOR_HDMIA
:
3461 aconnector
->base
.polled
= DRM_CONNECTOR_POLL_HPD
;
3463 case DRM_MODE_CONNECTOR_DisplayPort
:
3464 aconnector
->base
.polled
= DRM_CONNECTOR_POLL_HPD
;
3466 case DRM_MODE_CONNECTOR_DVID
:
3467 aconnector
->base
.polled
= DRM_CONNECTOR_POLL_HPD
;
3473 drm_object_attach_property(&aconnector
->base
.base
,
3474 dm
->ddev
->mode_config
.scaling_mode_property
,
3475 DRM_MODE_SCALE_NONE
);
3477 drm_object_attach_property(&aconnector
->base
.base
,
3478 adev
->mode_info
.underscan_property
,
3480 drm_object_attach_property(&aconnector
->base
.base
,
3481 adev
->mode_info
.underscan_hborder_property
,
3483 drm_object_attach_property(&aconnector
->base
.base
,
3484 adev
->mode_info
.underscan_vborder_property
,
3489 static int amdgpu_dm_i2c_xfer(struct i2c_adapter
*i2c_adap
,
3490 struct i2c_msg
*msgs
, int num
)
3492 struct amdgpu_i2c_adapter
*i2c
= i2c_get_adapdata(i2c_adap
);
3493 struct ddc_service
*ddc_service
= i2c
->ddc_service
;
3494 struct i2c_command cmd
;
3498 cmd
.payloads
= kcalloc(num
, sizeof(struct i2c_payload
), GFP_KERNEL
);
3503 cmd
.number_of_payloads
= num
;
3504 cmd
.engine
= I2C_COMMAND_ENGINE_DEFAULT
;
3507 for (i
= 0; i
< num
; i
++) {
3508 cmd
.payloads
[i
].write
= !(msgs
[i
].flags
& I2C_M_RD
);
3509 cmd
.payloads
[i
].address
= msgs
[i
].addr
;
3510 cmd
.payloads
[i
].length
= msgs
[i
].len
;
3511 cmd
.payloads
[i
].data
= msgs
[i
].buf
;
3514 if (dal_i2caux_submit_i2c_command(
3515 ddc_service
->ctx
->i2caux
,
3516 ddc_service
->ddc_pin
,
3520 kfree(cmd
.payloads
);
3524 static u32
amdgpu_dm_i2c_func(struct i2c_adapter
*adap
)
3526 return I2C_FUNC_I2C
| I2C_FUNC_SMBUS_EMUL
;
3529 static const struct i2c_algorithm amdgpu_dm_i2c_algo
= {
3530 .master_xfer
= amdgpu_dm_i2c_xfer
,
3531 .functionality
= amdgpu_dm_i2c_func
,
3534 static struct amdgpu_i2c_adapter
*
3535 create_i2c(struct ddc_service
*ddc_service
,
3539 struct amdgpu_device
*adev
= ddc_service
->ctx
->driver_context
;
3540 struct amdgpu_i2c_adapter
*i2c
;
3542 i2c
= kzalloc(sizeof(struct amdgpu_i2c_adapter
), GFP_KERNEL
);
3545 i2c
->base
.owner
= THIS_MODULE
;
3546 i2c
->base
.class = I2C_CLASS_DDC
;
3547 i2c
->base
.dev
.parent
= &adev
->pdev
->dev
;
3548 i2c
->base
.algo
= &amdgpu_dm_i2c_algo
;
3549 snprintf(i2c
->base
.name
, sizeof(i2c
->base
.name
), "AMDGPU DM i2c hw bus %d", link_index
);
3550 i2c_set_adapdata(&i2c
->base
, i2c
);
3551 i2c
->ddc_service
= ddc_service
;
3557 /* Note: this function assumes that dc_link_detect() was called for the
3558 * dc_link which will be represented by this aconnector.
3560 static int amdgpu_dm_connector_init(struct amdgpu_display_manager
*dm
,
3561 struct amdgpu_dm_connector
*aconnector
,
3562 uint32_t link_index
,
3563 struct amdgpu_encoder
*aencoder
)
3567 struct dc
*dc
= dm
->dc
;
3568 struct dc_link
*link
= dc_get_link_at_index(dc
, link_index
);
3569 struct amdgpu_i2c_adapter
*i2c
;
3571 link
->priv
= aconnector
;
3573 DRM_DEBUG_DRIVER("%s()\n", __func__
);
3575 i2c
= create_i2c(link
->ddc
, link
->link_index
, &res
);
3577 DRM_ERROR("Failed to create i2c adapter data\n");
3581 aconnector
->i2c
= i2c
;
3582 res
= i2c_add_adapter(&i2c
->base
);
3585 DRM_ERROR("Failed to register hw i2c %d\n", link
->link_index
);
3589 connector_type
= to_drm_connector_type(link
->connector_signal
);
3591 res
= drm_connector_init(
3594 &amdgpu_dm_connector_funcs
,
3598 DRM_ERROR("connector_init failed\n");
3599 aconnector
->connector_id
= -1;
3603 drm_connector_helper_add(
3605 &amdgpu_dm_connector_helper_funcs
);
3607 if (aconnector
->base
.funcs
->reset
)
3608 aconnector
->base
.funcs
->reset(&aconnector
->base
);
3610 amdgpu_dm_connector_init_helper(
3617 drm_mode_connector_attach_encoder(
3618 &aconnector
->base
, &aencoder
->base
);
3620 drm_connector_register(&aconnector
->base
);
3622 if (connector_type
== DRM_MODE_CONNECTOR_DisplayPort
3623 || connector_type
== DRM_MODE_CONNECTOR_eDP
)
3624 amdgpu_dm_initialize_dp_connector(dm
, aconnector
);
3629 aconnector
->i2c
= NULL
;
3634 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device
*adev
)
3636 switch (adev
->mode_info
.num_crtc
) {
3653 static int amdgpu_dm_encoder_init(struct drm_device
*dev
,
3654 struct amdgpu_encoder
*aencoder
,
3655 uint32_t link_index
)
3657 struct amdgpu_device
*adev
= dev
->dev_private
;
3659 int res
= drm_encoder_init(dev
,
3661 &amdgpu_dm_encoder_funcs
,
3662 DRM_MODE_ENCODER_TMDS
,
3665 aencoder
->base
.possible_crtcs
= amdgpu_dm_get_encoder_crtc_mask(adev
);
3668 aencoder
->encoder_id
= link_index
;
3670 aencoder
->encoder_id
= -1;
3672 drm_encoder_helper_add(&aencoder
->base
, &amdgpu_dm_encoder_helper_funcs
);
3677 static void manage_dm_interrupts(struct amdgpu_device
*adev
,
3678 struct amdgpu_crtc
*acrtc
,
3682 * this is not correct translation but will work as soon as VBLANK
3683 * constant is the same as PFLIP
3686 amdgpu_display_crtc_idx_to_irq_type(
3691 drm_crtc_vblank_on(&acrtc
->base
);
3694 &adev
->pageflip_irq
,
3700 &adev
->pageflip_irq
,
3702 drm_crtc_vblank_off(&acrtc
->base
);
3707 is_scaling_state_different(const struct dm_connector_state
*dm_state
,
3708 const struct dm_connector_state
*old_dm_state
)
3710 if (dm_state
->scaling
!= old_dm_state
->scaling
)
3712 if (!dm_state
->underscan_enable
&& old_dm_state
->underscan_enable
) {
3713 if (old_dm_state
->underscan_hborder
!= 0 && old_dm_state
->underscan_vborder
!= 0)
3715 } else if (dm_state
->underscan_enable
&& !old_dm_state
->underscan_enable
) {
3716 if (dm_state
->underscan_hborder
!= 0 && dm_state
->underscan_vborder
!= 0)
3718 } else if (dm_state
->underscan_hborder
!= old_dm_state
->underscan_hborder
||
3719 dm_state
->underscan_vborder
!= old_dm_state
->underscan_vborder
)
3724 static void remove_stream(struct amdgpu_device
*adev
,
3725 struct amdgpu_crtc
*acrtc
,
3726 struct dc_stream_state
*stream
)
3728 /* this is the update mode case */
3729 if (adev
->dm
.freesync_module
)
3730 mod_freesync_remove_stream(adev
->dm
.freesync_module
, stream
);
3732 acrtc
->otg_inst
= -1;
3733 acrtc
->enabled
= false;
3736 static int get_cursor_position(struct drm_plane
*plane
, struct drm_crtc
*crtc
,
3737 struct dc_cursor_position
*position
)
3739 struct amdgpu_crtc
*amdgpu_crtc
= to_amdgpu_crtc(crtc
);
3741 int xorigin
= 0, yorigin
= 0;
3743 if (!crtc
|| !plane
->state
->fb
) {
3744 position
->enable
= false;
3750 if ((plane
->state
->crtc_w
> amdgpu_crtc
->max_cursor_width
) ||
3751 (plane
->state
->crtc_h
> amdgpu_crtc
->max_cursor_height
)) {
3752 DRM_ERROR("%s: bad cursor width or height %d x %d\n",
3754 plane
->state
->crtc_w
,
3755 plane
->state
->crtc_h
);
3759 x
= plane
->state
->crtc_x
;
3760 y
= plane
->state
->crtc_y
;
3761 /* avivo cursor are offset into the total surface */
3762 x
+= crtc
->primary
->state
->src_x
>> 16;
3763 y
+= crtc
->primary
->state
->src_y
>> 16;
3765 xorigin
= min(-x
, amdgpu_crtc
->max_cursor_width
- 1);
3769 yorigin
= min(-y
, amdgpu_crtc
->max_cursor_height
- 1);
3772 position
->enable
= true;
3775 position
->x_hotspot
= xorigin
;
3776 position
->y_hotspot
= yorigin
;
3781 static void handle_cursor_update(struct drm_plane
*plane
,
3782 struct drm_plane_state
*old_plane_state
)
3784 struct amdgpu_framebuffer
*afb
= to_amdgpu_framebuffer(plane
->state
->fb
);
3785 struct drm_crtc
*crtc
= afb
? plane
->state
->crtc
: old_plane_state
->crtc
;
3786 struct dm_crtc_state
*crtc_state
= crtc
? to_dm_crtc_state(crtc
->state
) : NULL
;
3787 struct amdgpu_crtc
*amdgpu_crtc
= to_amdgpu_crtc(crtc
);
3788 uint64_t address
= afb
? afb
->address
: 0;
3789 struct dc_cursor_position position
;
3790 struct dc_cursor_attributes attributes
;
3793 if (!plane
->state
->fb
&& !old_plane_state
->fb
)
3796 DRM_DEBUG_DRIVER("%s: crtc_id=%d with size %d to %d\n",
3798 amdgpu_crtc
->crtc_id
,
3799 plane
->state
->crtc_w
,
3800 plane
->state
->crtc_h
);
3802 ret
= get_cursor_position(plane
, crtc
, &position
);
3806 if (!position
.enable
) {
3807 /* turn off cursor */
3808 if (crtc_state
&& crtc_state
->stream
)
3809 dc_stream_set_cursor_position(crtc_state
->stream
,
3814 amdgpu_crtc
->cursor_width
= plane
->state
->crtc_w
;
3815 amdgpu_crtc
->cursor_height
= plane
->state
->crtc_h
;
3817 attributes
.address
.high_part
= upper_32_bits(address
);
3818 attributes
.address
.low_part
= lower_32_bits(address
);
3819 attributes
.width
= plane
->state
->crtc_w
;
3820 attributes
.height
= plane
->state
->crtc_h
;
3821 attributes
.color_format
= CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA
;
3822 attributes
.rotation_angle
= 0;
3823 attributes
.attribute_flags
.value
= 0;
3825 attributes
.pitch
= attributes
.width
;
3827 if (crtc_state
->stream
) {
3828 if (!dc_stream_set_cursor_attributes(crtc_state
->stream
,
3830 DRM_ERROR("DC failed to set cursor attributes\n");
3832 if (!dc_stream_set_cursor_position(crtc_state
->stream
,
3834 DRM_ERROR("DC failed to set cursor position\n");
3838 static void prepare_flip_isr(struct amdgpu_crtc
*acrtc
)
3841 assert_spin_locked(&acrtc
->base
.dev
->event_lock
);
3842 WARN_ON(acrtc
->event
);
3844 acrtc
->event
= acrtc
->base
.state
->event
;
3846 /* Set the flip status */
3847 acrtc
->pflip_status
= AMDGPU_FLIP_SUBMITTED
;
3849 /* Mark this event as consumed */
3850 acrtc
->base
.state
->event
= NULL
;
3852 DRM_DEBUG_DRIVER("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
3859 * Waits on all BO's fences and for proper vblank count
3861 static void amdgpu_dm_do_flip(struct drm_crtc
*crtc
,
3862 struct drm_framebuffer
*fb
,
3864 struct dc_state
*state
)
3866 unsigned long flags
;
3867 uint32_t target_vblank
;
3869 struct amdgpu_crtc
*acrtc
= to_amdgpu_crtc(crtc
);
3870 struct amdgpu_framebuffer
*afb
= to_amdgpu_framebuffer(fb
);
3871 struct amdgpu_bo
*abo
= gem_to_amdgpu_bo(fb
->obj
[0]);
3872 struct amdgpu_device
*adev
= crtc
->dev
->dev_private
;
3873 bool async_flip
= (crtc
->state
->pageflip_flags
& DRM_MODE_PAGE_FLIP_ASYNC
) != 0;
3874 struct dc_flip_addrs addr
= { {0} };
3875 /* TODO eliminate or rename surface_update */
3876 struct dc_surface_update surface_updates
[1] = { {0} };
3877 struct dm_crtc_state
*acrtc_state
= to_dm_crtc_state(crtc
->state
);
3880 /* Prepare wait for target vblank early - before the fence-waits */
3881 target_vblank
= target
- (uint32_t)drm_crtc_vblank_count(crtc
) +
3882 amdgpu_get_vblank_counter_kms(crtc
->dev
, acrtc
->crtc_id
);
3884 /* TODO This might fail and hence better not used, wait
3885 * explicitly on fences instead
3886 * and in general should be called for
3887 * blocking commit to as per framework helpers
3889 r
= amdgpu_bo_reserve(abo
, true);
3890 if (unlikely(r
!= 0)) {
3891 DRM_ERROR("failed to reserve buffer before flip\n");
3895 /* Wait for all fences on this FB */
3896 WARN_ON(reservation_object_wait_timeout_rcu(abo
->tbo
.resv
, true, false,
3897 MAX_SCHEDULE_TIMEOUT
) < 0);
3899 amdgpu_bo_unreserve(abo
);
3901 /* Wait until we're out of the vertical blank period before the one
3902 * targeted by the flip
3904 while ((acrtc
->enabled
&&
3905 (amdgpu_display_get_crtc_scanoutpos(adev
->ddev
, acrtc
->crtc_id
,
3906 0, &vpos
, &hpos
, NULL
,
3907 NULL
, &crtc
->hwmode
)
3908 & (DRM_SCANOUTPOS_VALID
| DRM_SCANOUTPOS_IN_VBLANK
)) ==
3909 (DRM_SCANOUTPOS_VALID
| DRM_SCANOUTPOS_IN_VBLANK
) &&
3910 (int)(target_vblank
-
3911 amdgpu_get_vblank_counter_kms(adev
->ddev
, acrtc
->crtc_id
)) > 0)) {
3912 usleep_range(1000, 1100);
3916 spin_lock_irqsave(&crtc
->dev
->event_lock
, flags
);
3917 /* update crtc fb */
3918 crtc
->primary
->fb
= fb
;
3920 WARN_ON(acrtc
->pflip_status
!= AMDGPU_FLIP_NONE
);
3921 WARN_ON(!acrtc_state
->stream
);
3923 addr
.address
.grph
.addr
.low_part
= lower_32_bits(afb
->address
);
3924 addr
.address
.grph
.addr
.high_part
= upper_32_bits(afb
->address
);
3925 addr
.flip_immediate
= async_flip
;
3928 if (acrtc
->base
.state
->event
)
3929 prepare_flip_isr(acrtc
);
3931 spin_unlock_irqrestore(&crtc
->dev
->event_lock
, flags
);
3933 surface_updates
->surface
= dc_stream_get_status(acrtc_state
->stream
)->plane_states
[0];
3934 surface_updates
->flip_addr
= &addr
;
3936 dc_commit_updates_for_stream(adev
->dm
.dc
,
3939 acrtc_state
->stream
,
3941 &surface_updates
->surface
,
3944 DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x \n",
3946 addr
.address
.grph
.addr
.high_part
,
3947 addr
.address
.grph
.addr
.low_part
);
3951 * TODO this whole function needs to go
3953 * dc_surface_update is needlessly complex. See if we can just replace this
3954 * with a dc_plane_state and follow the atomic model a bit more closely here.
3956 static bool commit_planes_to_stream(
3958 struct dc_plane_state
**plane_states
,
3959 uint8_t new_plane_count
,
3960 struct dm_crtc_state
*dm_new_crtc_state
,
3961 struct dm_crtc_state
*dm_old_crtc_state
,
3962 struct dc_state
*state
)
3964 /* no need to dynamically allocate this. it's pretty small */
3965 struct dc_surface_update updates
[MAX_SURFACES
];
3966 struct dc_flip_addrs
*flip_addr
;
3967 struct dc_plane_info
*plane_info
;
3968 struct dc_scaling_info
*scaling_info
;
3970 struct dc_stream_state
*dc_stream
= dm_new_crtc_state
->stream
;
3971 struct dc_stream_update
*stream_update
=
3972 kzalloc(sizeof(struct dc_stream_update
), GFP_KERNEL
);
3974 if (!stream_update
) {
3975 BREAK_TO_DEBUGGER();
3979 flip_addr
= kcalloc(MAX_SURFACES
, sizeof(struct dc_flip_addrs
),
3981 plane_info
= kcalloc(MAX_SURFACES
, sizeof(struct dc_plane_info
),
3983 scaling_info
= kcalloc(MAX_SURFACES
, sizeof(struct dc_scaling_info
),
3986 if (!flip_addr
|| !plane_info
|| !scaling_info
) {
3989 kfree(scaling_info
);
3990 kfree(stream_update
);
3994 memset(updates
, 0, sizeof(updates
));
3996 stream_update
->src
= dc_stream
->src
;
3997 stream_update
->dst
= dc_stream
->dst
;
3998 stream_update
->out_transfer_func
= dc_stream
->out_transfer_func
;
4000 for (i
= 0; i
< new_plane_count
; i
++) {
4001 updates
[i
].surface
= plane_states
[i
];
4003 (struct dc_gamma
*)plane_states
[i
]->gamma_correction
;
4004 updates
[i
].in_transfer_func
= plane_states
[i
]->in_transfer_func
;
4005 flip_addr
[i
].address
= plane_states
[i
]->address
;
4006 flip_addr
[i
].flip_immediate
= plane_states
[i
]->flip_immediate
;
4007 plane_info
[i
].color_space
= plane_states
[i
]->color_space
;
4008 plane_info
[i
].format
= plane_states
[i
]->format
;
4009 plane_info
[i
].plane_size
= plane_states
[i
]->plane_size
;
4010 plane_info
[i
].rotation
= plane_states
[i
]->rotation
;
4011 plane_info
[i
].horizontal_mirror
= plane_states
[i
]->horizontal_mirror
;
4012 plane_info
[i
].stereo_format
= plane_states
[i
]->stereo_format
;
4013 plane_info
[i
].tiling_info
= plane_states
[i
]->tiling_info
;
4014 plane_info
[i
].visible
= plane_states
[i
]->visible
;
4015 plane_info
[i
].per_pixel_alpha
= plane_states
[i
]->per_pixel_alpha
;
4016 plane_info
[i
].dcc
= plane_states
[i
]->dcc
;
4017 scaling_info
[i
].scaling_quality
= plane_states
[i
]->scaling_quality
;
4018 scaling_info
[i
].src_rect
= plane_states
[i
]->src_rect
;
4019 scaling_info
[i
].dst_rect
= plane_states
[i
]->dst_rect
;
4020 scaling_info
[i
].clip_rect
= plane_states
[i
]->clip_rect
;
4022 updates
[i
].flip_addr
= &flip_addr
[i
];
4023 updates
[i
].plane_info
= &plane_info
[i
];
4024 updates
[i
].scaling_info
= &scaling_info
[i
];
4027 dc_commit_updates_for_stream(
4031 dc_stream
, stream_update
, plane_states
, state
);
4035 kfree(scaling_info
);
4036 kfree(stream_update
);
4040 static void amdgpu_dm_commit_planes(struct drm_atomic_state
*state
,
4041 struct drm_device
*dev
,
4042 struct amdgpu_display_manager
*dm
,
4043 struct drm_crtc
*pcrtc
,
4044 bool *wait_for_vblank
)
4047 struct drm_plane
*plane
;
4048 struct drm_plane_state
*old_plane_state
, *new_plane_state
;
4049 struct dc_stream_state
*dc_stream_attach
;
4050 struct dc_plane_state
*plane_states_constructed
[MAX_SURFACES
];
4051 struct amdgpu_crtc
*acrtc_attach
= to_amdgpu_crtc(pcrtc
);
4052 struct drm_crtc_state
*new_pcrtc_state
=
4053 drm_atomic_get_new_crtc_state(state
, pcrtc
);
4054 struct dm_crtc_state
*acrtc_state
= to_dm_crtc_state(new_pcrtc_state
);
4055 struct dm_crtc_state
*dm_old_crtc_state
=
4056 to_dm_crtc_state(drm_atomic_get_old_crtc_state(state
, pcrtc
));
4057 struct dm_atomic_state
*dm_state
= to_dm_atomic_state(state
);
4058 int planes_count
= 0;
4059 unsigned long flags
;
4061 /* update planes when needed */
4062 for_each_oldnew_plane_in_state(state
, plane
, old_plane_state
, new_plane_state
, i
) {
4063 struct drm_crtc
*crtc
= new_plane_state
->crtc
;
4064 struct drm_crtc_state
*new_crtc_state
;
4065 struct drm_framebuffer
*fb
= new_plane_state
->fb
;
4067 struct dm_plane_state
*dm_new_plane_state
= to_dm_plane_state(new_plane_state
);
4069 if (plane
->type
== DRM_PLANE_TYPE_CURSOR
) {
4070 handle_cursor_update(plane
, old_plane_state
);
4074 if (!fb
|| !crtc
|| pcrtc
!= crtc
)
4077 new_crtc_state
= drm_atomic_get_new_crtc_state(state
, crtc
);
4078 if (!new_crtc_state
->active
)
4081 pflip_needed
= !state
->allow_modeset
;
4083 spin_lock_irqsave(&crtc
->dev
->event_lock
, flags
);
4084 if (acrtc_attach
->pflip_status
!= AMDGPU_FLIP_NONE
) {
4085 DRM_ERROR("%s: acrtc %d, already busy\n",
4087 acrtc_attach
->crtc_id
);
4088 /* In commit tail framework this cannot happen */
4091 spin_unlock_irqrestore(&crtc
->dev
->event_lock
, flags
);
4093 if (!pflip_needed
|| plane
->type
== DRM_PLANE_TYPE_OVERLAY
) {
4094 WARN_ON(!dm_new_plane_state
->dc_state
);
4096 plane_states_constructed
[planes_count
] = dm_new_plane_state
->dc_state
;
4098 dc_stream_attach
= acrtc_state
->stream
;
4101 } else if (new_crtc_state
->planes_changed
) {
4102 /* Assume even ONE crtc with immediate flip means
4103 * entire can't wait for VBLANK
4104 * TODO Check if it's correct
4107 new_pcrtc_state
->pageflip_flags
& DRM_MODE_PAGE_FLIP_ASYNC
?
4110 /* TODO: Needs rework for multiplane flip */
4111 if (plane
->type
== DRM_PLANE_TYPE_PRIMARY
)
4112 drm_crtc_vblank_get(crtc
);
4117 (uint32_t)drm_crtc_vblank_count(crtc
) + *wait_for_vblank
,
4124 unsigned long flags
;
4126 if (new_pcrtc_state
->event
) {
4128 drm_crtc_vblank_get(pcrtc
);
4130 spin_lock_irqsave(&pcrtc
->dev
->event_lock
, flags
);
4131 prepare_flip_isr(acrtc_attach
);
4132 spin_unlock_irqrestore(&pcrtc
->dev
->event_lock
, flags
);
4136 if (false == commit_planes_to_stream(dm
->dc
,
4137 plane_states_constructed
,
4142 dm_error("%s: Failed to attach plane!\n", __func__
);
4144 /*TODO BUG Here should go disable planes on CRTC. */
4149 * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
4150 * @crtc_state: the DRM CRTC state
4151 * @stream_state: the DC stream state.
4153 * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
4154 * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
4156 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state
*crtc_state
,
4157 struct dc_stream_state
*stream_state
)
4159 stream_state
->mode_changed
= crtc_state
->mode_changed
;
4162 static int amdgpu_dm_atomic_commit(struct drm_device
*dev
,
4163 struct drm_atomic_state
*state
,
4166 struct drm_crtc
*crtc
;
4167 struct drm_crtc_state
*old_crtc_state
, *new_crtc_state
;
4168 struct amdgpu_device
*adev
= dev
->dev_private
;
4172 * We evade vblanks and pflips on crtc that
4173 * should be changed. We do it here to flush & disable
4174 * interrupts before drm_swap_state is called in drm_atomic_helper_commit
4175 * it will update crtc->dm_crtc_state->stream pointer which is used in
4178 for_each_oldnew_crtc_in_state(state
, crtc
, old_crtc_state
, new_crtc_state
, i
) {
4179 struct dm_crtc_state
*dm_old_crtc_state
= to_dm_crtc_state(old_crtc_state
);
4180 struct amdgpu_crtc
*acrtc
= to_amdgpu_crtc(crtc
);
4182 if (drm_atomic_crtc_needs_modeset(new_crtc_state
) && dm_old_crtc_state
->stream
)
4183 manage_dm_interrupts(adev
, acrtc
, false);
4185 /* Add check here for SoC's that support hardware cursor plane, to
4186 * unset legacy_cursor_update */
4188 return drm_atomic_helper_commit(dev
, state
, nonblock
);
4190 /*TODO Handle EINTR, reenable IRQ*/
4193 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state
*state
)
4195 struct drm_device
*dev
= state
->dev
;
4196 struct amdgpu_device
*adev
= dev
->dev_private
;
4197 struct amdgpu_display_manager
*dm
= &adev
->dm
;
4198 struct dm_atomic_state
*dm_state
;
4200 struct drm_crtc
*crtc
;
4201 struct drm_crtc_state
*old_crtc_state
, *new_crtc_state
;
4202 unsigned long flags
;
4203 bool wait_for_vblank
= true;
4204 struct drm_connector
*connector
;
4205 struct drm_connector_state
*old_con_state
, *new_con_state
;
4206 struct dm_crtc_state
*dm_old_crtc_state
, *dm_new_crtc_state
;
4207 int crtc_disable_count
= 0;
4209 drm_atomic_helper_update_legacy_modeset_state(dev
, state
);
4211 dm_state
= to_dm_atomic_state(state
);
4213 /* update changed items */
4214 for_each_oldnew_crtc_in_state(state
, crtc
, old_crtc_state
, new_crtc_state
, i
) {
4215 struct amdgpu_crtc
*acrtc
= to_amdgpu_crtc(crtc
);
4217 dm_new_crtc_state
= to_dm_crtc_state(new_crtc_state
);
4218 dm_old_crtc_state
= to_dm_crtc_state(old_crtc_state
);
4221 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
4222 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
4223 "connectors_changed:%d\n",
4225 new_crtc_state
->enable
,
4226 new_crtc_state
->active
,
4227 new_crtc_state
->planes_changed
,
4228 new_crtc_state
->mode_changed
,
4229 new_crtc_state
->active_changed
,
4230 new_crtc_state
->connectors_changed
);
4232 /* Copy all transient state flags into dc state */
4233 if (dm_new_crtc_state
->stream
) {
4234 amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state
->base
,
4235 dm_new_crtc_state
->stream
);
4238 /* handles headless hotplug case, updating new_state and
4239 * aconnector as needed
4242 if (modeset_required(new_crtc_state
, dm_new_crtc_state
->stream
, dm_old_crtc_state
->stream
)) {
4244 DRM_DEBUG_DRIVER("Atomic commit: SET crtc id %d: [%p]\n", acrtc
->crtc_id
, acrtc
);
4246 if (!dm_new_crtc_state
->stream
) {
4248 * this could happen because of issues with
4249 * userspace notifications delivery.
4250 * In this case userspace tries to set mode on
4251 * display which is disconnect in fact.
4252 * dc_sink in NULL in this case on aconnector.
4253 * We expect reset mode will come soon.
4255 * This can also happen when unplug is done
4256 * during resume sequence ended
4258 * In this case, we want to pretend we still
4259 * have a sink to keep the pipe running so that
4260 * hw state is consistent with the sw state
4262 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
4263 __func__
, acrtc
->base
.base
.id
);
4267 if (dm_old_crtc_state
->stream
)
4268 remove_stream(adev
, acrtc
, dm_old_crtc_state
->stream
);
4270 pm_runtime_get_noresume(dev
->dev
);
4272 acrtc
->enabled
= true;
4273 acrtc
->hw_mode
= new_crtc_state
->mode
;
4274 crtc
->hwmode
= new_crtc_state
->mode
;
4275 } else if (modereset_required(new_crtc_state
)) {
4276 DRM_DEBUG_DRIVER("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc
->crtc_id
, acrtc
);
4278 /* i.e. reset mode */
4279 if (dm_old_crtc_state
->stream
)
4280 remove_stream(adev
, acrtc
, dm_old_crtc_state
->stream
);
4282 } /* for_each_crtc_in_state() */
4285 * Add streams after required streams from new and replaced streams
4286 * are removed from freesync module
4288 if (adev
->dm
.freesync_module
) {
4289 for_each_oldnew_crtc_in_state(state
, crtc
, old_crtc_state
,
4290 new_crtc_state
, i
) {
4291 struct amdgpu_dm_connector
*aconnector
= NULL
;
4292 struct dm_connector_state
*dm_new_con_state
= NULL
;
4293 struct amdgpu_crtc
*acrtc
= NULL
;
4294 bool modeset_needed
;
4296 dm_new_crtc_state
= to_dm_crtc_state(new_crtc_state
);
4297 dm_old_crtc_state
= to_dm_crtc_state(old_crtc_state
);
4298 modeset_needed
= modeset_required(
4300 dm_new_crtc_state
->stream
,
4301 dm_old_crtc_state
->stream
);
4302 /* We add stream to freesync if:
4303 * 1. Said stream is not null, and
4304 * 2. A modeset is requested. This means that the
4305 * stream was removed previously, and needs to be
4308 if (dm_new_crtc_state
->stream
== NULL
||
4312 acrtc
= to_amdgpu_crtc(crtc
);
4315 amdgpu_dm_find_first_crtc_matching_connector(
4318 DRM_DEBUG_DRIVER("Atomic commit: Failed to "
4319 "find connector for acrtc "
4320 "id:%d skipping freesync "
4326 mod_freesync_add_stream(adev
->dm
.freesync_module
,
4327 dm_new_crtc_state
->stream
,
4329 new_con_state
= drm_atomic_get_new_connector_state(
4330 state
, &aconnector
->base
);
4331 dm_new_con_state
= to_dm_connector_state(new_con_state
);
4333 mod_freesync_set_user_enable(adev
->dm
.freesync_module
,
4334 &dm_new_crtc_state
->stream
,
4336 &dm_new_con_state
->user_enable
);
4340 if (dm_state
->context
) {
4341 dm_enable_per_frame_crtc_master_sync(dm_state
->context
);
4342 WARN_ON(!dc_commit_state(dm
->dc
, dm_state
->context
));
4345 for_each_new_crtc_in_state(state
, crtc
, new_crtc_state
, i
) {
4346 struct amdgpu_crtc
*acrtc
= to_amdgpu_crtc(crtc
);
4348 dm_new_crtc_state
= to_dm_crtc_state(new_crtc_state
);
4350 if (dm_new_crtc_state
->stream
!= NULL
) {
4351 const struct dc_stream_status
*status
=
4352 dc_stream_get_status(dm_new_crtc_state
->stream
);
4355 DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state
->stream
, acrtc
);
4357 acrtc
->otg_inst
= status
->primary_otg_inst
;
4361 /* Handle scaling and underscan changes*/
4362 for_each_oldnew_connector_in_state(state
, connector
, old_con_state
, new_con_state
, i
) {
4363 struct dm_connector_state
*dm_new_con_state
= to_dm_connector_state(new_con_state
);
4364 struct dm_connector_state
*dm_old_con_state
= to_dm_connector_state(old_con_state
);
4365 struct amdgpu_crtc
*acrtc
= to_amdgpu_crtc(dm_new_con_state
->base
.crtc
);
4366 struct dc_stream_status
*status
= NULL
;
4369 new_crtc_state
= drm_atomic_get_new_crtc_state(state
, &acrtc
->base
);
4370 old_crtc_state
= drm_atomic_get_old_crtc_state(state
, &acrtc
->base
);
4373 /* Skip any modesets/resets */
4374 if (!acrtc
|| drm_atomic_crtc_needs_modeset(new_crtc_state
))
4377 /* Skip any thing not scale or underscan changes */
4378 if (!is_scaling_state_different(dm_new_con_state
, dm_old_con_state
))
4381 dm_new_crtc_state
= to_dm_crtc_state(new_crtc_state
);
4383 update_stream_scaling_settings(&dm_new_con_state
->base
.crtc
->mode
,
4384 dm_new_con_state
, (struct dc_stream_state
*)dm_new_crtc_state
->stream
);
4386 if (!dm_new_crtc_state
->stream
)
4389 status
= dc_stream_get_status(dm_new_crtc_state
->stream
);
4391 WARN_ON(!status
->plane_count
);
4393 /*TODO How it works with MPO ?*/
4394 if (!commit_planes_to_stream(
4396 status
->plane_states
,
4397 status
->plane_count
,
4399 to_dm_crtc_state(old_crtc_state
),
4401 dm_error("%s: Failed to update stream scaling!\n", __func__
);
4404 for_each_oldnew_crtc_in_state(state
, crtc
, old_crtc_state
,
4405 new_crtc_state
, i
) {
4407 * loop to enable interrupts on newly arrived crtc
4409 struct amdgpu_crtc
*acrtc
= to_amdgpu_crtc(crtc
);
4410 bool modeset_needed
;
4412 if (old_crtc_state
->active
&& !new_crtc_state
->active
)
4413 crtc_disable_count
++;
4415 dm_new_crtc_state
= to_dm_crtc_state(new_crtc_state
);
4416 dm_old_crtc_state
= to_dm_crtc_state(old_crtc_state
);
4417 modeset_needed
= modeset_required(
4419 dm_new_crtc_state
->stream
,
4420 dm_old_crtc_state
->stream
);
4422 if (dm_new_crtc_state
->stream
== NULL
|| !modeset_needed
)
4425 if (adev
->dm
.freesync_module
)
4426 mod_freesync_notify_mode_change(
4427 adev
->dm
.freesync_module
,
4428 &dm_new_crtc_state
->stream
, 1);
4430 manage_dm_interrupts(adev
, acrtc
, true);
4433 /* update planes when needed per crtc*/
4434 for_each_new_crtc_in_state(state
, crtc
, new_crtc_state
, j
) {
4435 dm_new_crtc_state
= to_dm_crtc_state(new_crtc_state
);
4437 if (dm_new_crtc_state
->stream
)
4438 amdgpu_dm_commit_planes(state
, dev
, dm
, crtc
, &wait_for_vblank
);
4443 * send vblank event on all events not handled in flip and
4444 * mark consumed event for drm_atomic_helper_commit_hw_done
4446 spin_lock_irqsave(&adev
->ddev
->event_lock
, flags
);
4447 for_each_new_crtc_in_state(state
, crtc
, new_crtc_state
, i
) {
4449 if (new_crtc_state
->event
)
4450 drm_send_event_locked(dev
, &new_crtc_state
->event
->base
);
4452 new_crtc_state
->event
= NULL
;
4454 spin_unlock_irqrestore(&adev
->ddev
->event_lock
, flags
);
4456 /* Signal HW programming completion */
4457 drm_atomic_helper_commit_hw_done(state
);
4459 if (wait_for_vblank
)
4460 drm_atomic_helper_wait_for_flip_done(dev
, state
);
4462 drm_atomic_helper_cleanup_planes(dev
, state
);
4464 /* Finally, drop a runtime PM reference for each newly disabled CRTC,
4465 * so we can put the GPU into runtime suspend if we're not driving any
4468 for (i
= 0; i
< crtc_disable_count
; i
++)
4469 pm_runtime_put_autosuspend(dev
->dev
);
4470 pm_runtime_mark_last_busy(dev
->dev
);
4474 static int dm_force_atomic_commit(struct drm_connector
*connector
)
4477 struct drm_device
*ddev
= connector
->dev
;
4478 struct drm_atomic_state
*state
= drm_atomic_state_alloc(ddev
);
4479 struct amdgpu_crtc
*disconnected_acrtc
= to_amdgpu_crtc(connector
->encoder
->crtc
);
4480 struct drm_plane
*plane
= disconnected_acrtc
->base
.primary
;
4481 struct drm_connector_state
*conn_state
;
4482 struct drm_crtc_state
*crtc_state
;
4483 struct drm_plane_state
*plane_state
;
4488 state
->acquire_ctx
= ddev
->mode_config
.acquire_ctx
;
4490 /* Construct an atomic state to restore previous display setting */
4493 * Attach connectors to drm_atomic_state
4495 conn_state
= drm_atomic_get_connector_state(state
, connector
);
4497 ret
= PTR_ERR_OR_ZERO(conn_state
);
4501 /* Attach crtc to drm_atomic_state*/
4502 crtc_state
= drm_atomic_get_crtc_state(state
, &disconnected_acrtc
->base
);
4504 ret
= PTR_ERR_OR_ZERO(crtc_state
);
4508 /* force a restore */
4509 crtc_state
->mode_changed
= true;
4511 /* Attach plane to drm_atomic_state */
4512 plane_state
= drm_atomic_get_plane_state(state
, plane
);
4514 ret
= PTR_ERR_OR_ZERO(plane_state
);
4519 /* Call commit internally with the state we just constructed */
4520 ret
= drm_atomic_commit(state
);
4525 DRM_ERROR("Restoring old state failed with %i\n", ret
);
4526 drm_atomic_state_put(state
);
4532 * This functions handle all cases when set mode does not come upon hotplug.
4533 * This include when the same display is unplugged then plugged back into the
4534 * same port and when we are running without usermode desktop manager supprot
4536 void dm_restore_drm_connector_state(struct drm_device
*dev
,
4537 struct drm_connector
*connector
)
4539 struct amdgpu_dm_connector
*aconnector
= to_amdgpu_dm_connector(connector
);
4540 struct amdgpu_crtc
*disconnected_acrtc
;
4541 struct dm_crtc_state
*acrtc_state
;
4543 if (!aconnector
->dc_sink
|| !connector
->state
|| !connector
->encoder
)
4546 disconnected_acrtc
= to_amdgpu_crtc(connector
->encoder
->crtc
);
4547 if (!disconnected_acrtc
)
4550 acrtc_state
= to_dm_crtc_state(disconnected_acrtc
->base
.state
);
4551 if (!acrtc_state
->stream
)
4555 * If the previous sink is not released and different from the current,
4556 * we deduce we are in a state where we can not rely on usermode call
4557 * to turn on the display, so we do it here
4559 if (acrtc_state
->stream
->sink
!= aconnector
->dc_sink
)
4560 dm_force_atomic_commit(&aconnector
->base
);
4564 * Grabs all modesetting locks to serialize against any blocking commits,
4565 * Waits for completion of all non blocking commits.
4567 static int do_aquire_global_lock(struct drm_device
*dev
,
4568 struct drm_atomic_state
*state
)
4570 struct drm_crtc
*crtc
;
4571 struct drm_crtc_commit
*commit
;
4574 /* Adding all modeset locks to aquire_ctx will
4575 * ensure that when the framework release it the
4576 * extra locks we are locking here will get released to
4578 ret
= drm_modeset_lock_all_ctx(dev
, state
->acquire_ctx
);
4582 list_for_each_entry(crtc
, &dev
->mode_config
.crtc_list
, head
) {
4583 spin_lock(&crtc
->commit_lock
);
4584 commit
= list_first_entry_or_null(&crtc
->commit_list
,
4585 struct drm_crtc_commit
, commit_entry
);
4587 drm_crtc_commit_get(commit
);
4588 spin_unlock(&crtc
->commit_lock
);
4593 /* Make sure all pending HW programming completed and
4596 ret
= wait_for_completion_interruptible_timeout(&commit
->hw_done
, 10*HZ
);
4599 ret
= wait_for_completion_interruptible_timeout(
4600 &commit
->flip_done
, 10*HZ
);
4603 DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
4604 "timed out\n", crtc
->base
.id
, crtc
->name
);
4606 drm_crtc_commit_put(commit
);
4609 return ret
< 0 ? ret
: 0;
4612 static int dm_update_crtcs_state(struct dc
*dc
,
4613 struct drm_atomic_state
*state
,
4615 bool *lock_and_validation_needed
)
4617 struct drm_crtc
*crtc
;
4618 struct drm_crtc_state
*old_crtc_state
, *new_crtc_state
;
4620 struct dm_crtc_state
*dm_old_crtc_state
, *dm_new_crtc_state
;
4621 struct dm_atomic_state
*dm_state
= to_dm_atomic_state(state
);
4622 struct dc_stream_state
*new_stream
;
4625 /*TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set */
4626 /* update changed items */
4627 for_each_oldnew_crtc_in_state(state
, crtc
, old_crtc_state
, new_crtc_state
, i
) {
4628 struct amdgpu_crtc
*acrtc
= NULL
;
4629 struct amdgpu_dm_connector
*aconnector
= NULL
;
4630 struct drm_connector_state
*drm_new_conn_state
= NULL
, *drm_old_conn_state
= NULL
;
4631 struct dm_connector_state
*dm_new_conn_state
= NULL
, *dm_old_conn_state
= NULL
;
4632 struct drm_plane_state
*new_plane_state
= NULL
;
4636 dm_old_crtc_state
= to_dm_crtc_state(old_crtc_state
);
4637 dm_new_crtc_state
= to_dm_crtc_state(new_crtc_state
);
4638 acrtc
= to_amdgpu_crtc(crtc
);
4640 new_plane_state
= drm_atomic_get_new_plane_state(state
, new_crtc_state
->crtc
->primary
);
4642 if (new_crtc_state
->enable
&& new_plane_state
&& !new_plane_state
->fb
) {
4647 aconnector
= amdgpu_dm_find_first_crtc_matching_connector(state
, crtc
);
4649 /* TODO This hack should go away */
4650 if (aconnector
&& enable
) {
4651 // Make sure fake sink is created in plug-in scenario
4652 drm_new_conn_state
= drm_atomic_get_new_connector_state(state
,
4654 drm_old_conn_state
= drm_atomic_get_old_connector_state(state
,
4657 if (IS_ERR(drm_new_conn_state
)) {
4658 ret
= PTR_ERR_OR_ZERO(drm_new_conn_state
);
4662 dm_new_conn_state
= to_dm_connector_state(drm_new_conn_state
);
4663 dm_old_conn_state
= to_dm_connector_state(drm_old_conn_state
);
4665 new_stream
= create_stream_for_sink(aconnector
,
4666 &new_crtc_state
->mode
,
4670 * we can have no stream on ACTION_SET if a display
4671 * was disconnected during S3, in this case it not and
4672 * error, the OS will be updated after detection, and
4673 * do the right thing on next atomic commit
4677 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
4678 __func__
, acrtc
->base
.base
.id
);
4682 if (dc_is_stream_unchanged(new_stream
, dm_old_crtc_state
->stream
) &&
4683 dc_is_stream_scaling_unchanged(new_stream
, dm_old_crtc_state
->stream
)) {
4684 new_crtc_state
->mode_changed
= false;
4685 DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
4686 new_crtc_state
->mode_changed
);
4690 if (!drm_atomic_crtc_needs_modeset(new_crtc_state
))
4694 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
4695 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
4696 "connectors_changed:%d\n",
4698 new_crtc_state
->enable
,
4699 new_crtc_state
->active
,
4700 new_crtc_state
->planes_changed
,
4701 new_crtc_state
->mode_changed
,
4702 new_crtc_state
->active_changed
,
4703 new_crtc_state
->connectors_changed
);
4705 /* Remove stream for any changed/disabled CRTC */
4708 if (!dm_old_crtc_state
->stream
)
4711 DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
4714 /* i.e. reset mode */
4715 if (dc_remove_stream_from_ctx(
4718 dm_old_crtc_state
->stream
) != DC_OK
) {
4723 dc_stream_release(dm_old_crtc_state
->stream
);
4724 dm_new_crtc_state
->stream
= NULL
;
4726 *lock_and_validation_needed
= true;
4728 } else {/* Add stream for any updated/enabled CRTC */
4730 * Quick fix to prevent NULL pointer on new_stream when
4731 * added MST connectors not found in existing crtc_state in the chained mode
4732 * TODO: need to dig out the root cause of that
4734 if (!aconnector
|| (!aconnector
->dc_sink
&& aconnector
->mst_port
))
4737 if (modereset_required(new_crtc_state
))
4740 if (modeset_required(new_crtc_state
, new_stream
,
4741 dm_old_crtc_state
->stream
)) {
4743 WARN_ON(dm_new_crtc_state
->stream
);
4745 dm_new_crtc_state
->stream
= new_stream
;
4747 dc_stream_retain(new_stream
);
4749 DRM_DEBUG_DRIVER("Enabling DRM crtc: %d\n",
4752 if (dc_add_stream_to_ctx(
4755 dm_new_crtc_state
->stream
) != DC_OK
) {
4760 *lock_and_validation_needed
= true;
4765 /* Release extra reference */
4767 dc_stream_release(new_stream
);
4770 * We want to do dc stream updates that do not require a
4771 * full modeset below.
4773 if (!(enable
&& aconnector
&& new_crtc_state
->enable
&&
4774 new_crtc_state
->active
))
4777 * Given above conditions, the dc state cannot be NULL because:
4778 * 1. We're in the process of enabling CRTCs (just been added
4779 * to the dc context, or already is on the context)
4780 * 2. Has a valid connector attached, and
4781 * 3. Is currently active and enabled.
4782 * => The dc stream state currently exists.
4784 BUG_ON(dm_new_crtc_state
->stream
== NULL
);
4786 /* Scaling or underscan settings */
4787 if (is_scaling_state_different(dm_old_conn_state
, dm_new_conn_state
))
4788 update_stream_scaling_settings(
4789 &new_crtc_state
->mode
, dm_new_conn_state
, dm_new_crtc_state
->stream
);
4792 * Color management settings. We also update color properties
4793 * when a modeset is needed, to ensure it gets reprogrammed.
4795 if (dm_new_crtc_state
->base
.color_mgmt_changed
||
4796 drm_atomic_crtc_needs_modeset(new_crtc_state
)) {
4797 ret
= amdgpu_dm_set_regamma_lut(dm_new_crtc_state
);
4800 amdgpu_dm_set_ctm(dm_new_crtc_state
);
4808 dc_stream_release(new_stream
);
4812 static int dm_update_planes_state(struct dc
*dc
,
4813 struct drm_atomic_state
*state
,
4815 bool *lock_and_validation_needed
)
4817 struct drm_crtc
*new_plane_crtc
, *old_plane_crtc
;
4818 struct drm_crtc_state
*old_crtc_state
, *new_crtc_state
;
4819 struct drm_plane
*plane
;
4820 struct drm_plane_state
*old_plane_state
, *new_plane_state
;
4821 struct dm_crtc_state
*dm_new_crtc_state
, *dm_old_crtc_state
;
4822 struct dm_atomic_state
*dm_state
= to_dm_atomic_state(state
);
4823 struct dm_plane_state
*dm_new_plane_state
, *dm_old_plane_state
;
4825 /* TODO return page_flip_needed() function */
4826 bool pflip_needed
= !state
->allow_modeset
;
4830 /* Add new planes, in reverse order as DC expectation */
4831 for_each_oldnew_plane_in_state_reverse(state
, plane
, old_plane_state
, new_plane_state
, i
) {
4832 new_plane_crtc
= new_plane_state
->crtc
;
4833 old_plane_crtc
= old_plane_state
->crtc
;
4834 dm_new_plane_state
= to_dm_plane_state(new_plane_state
);
4835 dm_old_plane_state
= to_dm_plane_state(old_plane_state
);
4837 /*TODO Implement atomic check for cursor plane */
4838 if (plane
->type
== DRM_PLANE_TYPE_CURSOR
)
4841 /* Remove any changed/removed planes */
4844 plane
->type
!= DRM_PLANE_TYPE_OVERLAY
)
4847 if (!old_plane_crtc
)
4850 old_crtc_state
= drm_atomic_get_old_crtc_state(
4851 state
, old_plane_crtc
);
4852 dm_old_crtc_state
= to_dm_crtc_state(old_crtc_state
);
4854 if (!dm_old_crtc_state
->stream
)
4857 DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
4858 plane
->base
.id
, old_plane_crtc
->base
.id
);
4860 if (!dc_remove_plane_from_context(
4862 dm_old_crtc_state
->stream
,
4863 dm_old_plane_state
->dc_state
,
4864 dm_state
->context
)) {
4871 dc_plane_state_release(dm_old_plane_state
->dc_state
);
4872 dm_new_plane_state
->dc_state
= NULL
;
4874 *lock_and_validation_needed
= true;
4876 } else { /* Add new planes */
4877 struct dc_plane_state
*dc_new_plane_state
;
4879 if (drm_atomic_plane_disabling(plane
->state
, new_plane_state
))
4882 if (!new_plane_crtc
)
4885 new_crtc_state
= drm_atomic_get_new_crtc_state(state
, new_plane_crtc
);
4886 dm_new_crtc_state
= to_dm_crtc_state(new_crtc_state
);
4888 if (!dm_new_crtc_state
->stream
)
4892 plane
->type
!= DRM_PLANE_TYPE_OVERLAY
)
4895 WARN_ON(dm_new_plane_state
->dc_state
);
4897 dc_new_plane_state
= dc_create_plane_state(dc
);
4898 if (!dc_new_plane_state
)
4901 DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n",
4902 plane
->base
.id
, new_plane_crtc
->base
.id
);
4904 ret
= fill_plane_attributes(
4905 new_plane_crtc
->dev
->dev_private
,
4910 dc_plane_state_release(dc_new_plane_state
);
4915 * Any atomic check errors that occur after this will
4916 * not need a release. The plane state will be attached
4917 * to the stream, and therefore part of the atomic
4918 * state. It'll be released when the atomic state is
4921 if (!dc_add_plane_to_context(
4923 dm_new_crtc_state
->stream
,
4925 dm_state
->context
)) {
4927 dc_plane_state_release(dc_new_plane_state
);
4931 dm_new_plane_state
->dc_state
= dc_new_plane_state
;
4933 /* Tell DC to do a full surface update every time there
4934 * is a plane change. Inefficient, but works for now.
4936 dm_new_plane_state
->dc_state
->update_flags
.bits
.full_update
= 1;
4938 *lock_and_validation_needed
= true;
4946 static int amdgpu_dm_atomic_check(struct drm_device
*dev
,
4947 struct drm_atomic_state
*state
)
4949 struct amdgpu_device
*adev
= dev
->dev_private
;
4950 struct dc
*dc
= adev
->dm
.dc
;
4951 struct dm_atomic_state
*dm_state
= to_dm_atomic_state(state
);
4952 struct drm_connector
*connector
;
4953 struct drm_connector_state
*old_con_state
, *new_con_state
;
4954 struct drm_crtc
*crtc
;
4955 struct drm_crtc_state
*old_crtc_state
, *new_crtc_state
;
4959 * This bool will be set for true for any modeset/reset
4960 * or plane update which implies non fast surface update.
4962 bool lock_and_validation_needed
= false;
4964 ret
= drm_atomic_helper_check_modeset(dev
, state
);
4968 for_each_oldnew_crtc_in_state(state
, crtc
, old_crtc_state
, new_crtc_state
, i
) {
4969 if (!drm_atomic_crtc_needs_modeset(new_crtc_state
) &&
4970 !new_crtc_state
->color_mgmt_changed
)
4973 if (!new_crtc_state
->enable
)
4976 ret
= drm_atomic_add_affected_connectors(state
, crtc
);
4980 ret
= drm_atomic_add_affected_planes(state
, crtc
);
4985 dm_state
->context
= dc_create_state();
4986 ASSERT(dm_state
->context
);
4987 dc_resource_state_copy_construct_current(dc
, dm_state
->context
);
4989 /* Remove exiting planes if they are modified */
4990 ret
= dm_update_planes_state(dc
, state
, false, &lock_and_validation_needed
);
4995 /* Disable all crtcs which require disable */
4996 ret
= dm_update_crtcs_state(dc
, state
, false, &lock_and_validation_needed
);
5001 /* Enable all crtcs which require enable */
5002 ret
= dm_update_crtcs_state(dc
, state
, true, &lock_and_validation_needed
);
5007 /* Add new/modified planes */
5008 ret
= dm_update_planes_state(dc
, state
, true, &lock_and_validation_needed
);
5013 /* Run this here since we want to validate the streams we created */
5014 ret
= drm_atomic_helper_check_planes(dev
, state
);
5018 /* Check scaling and underscan changes*/
5019 /*TODO Removed scaling changes validation due to inability to commit
5020 * new stream into context w\o causing full reset. Need to
5021 * decide how to handle.
5023 for_each_oldnew_connector_in_state(state
, connector
, old_con_state
, new_con_state
, i
) {
5024 struct dm_connector_state
*dm_old_con_state
= to_dm_connector_state(old_con_state
);
5025 struct dm_connector_state
*dm_new_con_state
= to_dm_connector_state(new_con_state
);
5026 struct amdgpu_crtc
*acrtc
= to_amdgpu_crtc(dm_new_con_state
->base
.crtc
);
5028 /* Skip any modesets/resets */
5029 if (!acrtc
|| drm_atomic_crtc_needs_modeset(
5030 drm_atomic_get_new_crtc_state(state
, &acrtc
->base
)))
5033 /* Skip any thing not scale or underscan changes */
5034 if (!is_scaling_state_different(dm_new_con_state
, dm_old_con_state
))
5037 lock_and_validation_needed
= true;
5041 * For full updates case when
5042 * removing/adding/updating streams on once CRTC while flipping
5044 * acquiring global lock will guarantee that any such full
5046 * will wait for completion of any outstanding flip using DRMs
5047 * synchronization events.
5050 if (lock_and_validation_needed
) {
5052 ret
= do_aquire_global_lock(dev
, state
);
5056 if (dc_validate_global_state(dc
, dm_state
->context
) != DC_OK
) {
5062 /* Must be success */
5067 if (ret
== -EDEADLK
)
5068 DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
5069 else if (ret
== -EINTR
|| ret
== -EAGAIN
|| ret
== -ERESTARTSYS
)
5070 DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
5072 DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret
);
5077 static bool is_dp_capable_without_timing_msa(struct dc
*dc
,
5078 struct amdgpu_dm_connector
*amdgpu_dm_connector
)
5081 bool capable
= false;
5083 if (amdgpu_dm_connector
->dc_link
&&
5084 dm_helpers_dp_read_dpcd(
5086 amdgpu_dm_connector
->dc_link
,
5087 DP_DOWN_STREAM_PORT_COUNT
,
5089 sizeof(dpcd_data
))) {
5090 capable
= (dpcd_data
& DP_MSA_TIMING_PAR_IGNORED
) ? true:false;
5095 void amdgpu_dm_add_sink_to_freesync_module(struct drm_connector
*connector
,
5099 bool edid_check_required
;
5100 struct detailed_timing
*timing
;
5101 struct detailed_non_pixel
*data
;
5102 struct detailed_data_monitor_range
*range
;
5103 struct amdgpu_dm_connector
*amdgpu_dm_connector
=
5104 to_amdgpu_dm_connector(connector
);
5105 struct dm_connector_state
*dm_con_state
;
5107 struct drm_device
*dev
= connector
->dev
;
5108 struct amdgpu_device
*adev
= dev
->dev_private
;
5110 if (!connector
->state
) {
5111 DRM_ERROR("%s - Connector has no state", __func__
);
5115 dm_con_state
= to_dm_connector_state(connector
->state
);
5117 edid_check_required
= false;
5118 if (!amdgpu_dm_connector
->dc_sink
) {
5119 DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
5122 if (!adev
->dm
.freesync_module
)
5125 * if edid non zero restrict freesync only for dp and edp
5128 if (amdgpu_dm_connector
->dc_sink
->sink_signal
== SIGNAL_TYPE_DISPLAY_PORT
5129 || amdgpu_dm_connector
->dc_sink
->sink_signal
== SIGNAL_TYPE_EDP
) {
5130 edid_check_required
= is_dp_capable_without_timing_msa(
5132 amdgpu_dm_connector
);
5135 dm_con_state
->freesync_capable
= false;
5136 if (edid_check_required
== true && (edid
->version
> 1 ||
5137 (edid
->version
== 1 && edid
->revision
> 1))) {
5138 for (i
= 0; i
< 4; i
++) {
5140 timing
= &edid
->detailed_timings
[i
];
5141 data
= &timing
->data
.other_data
;
5142 range
= &data
->data
.range
;
5144 * Check if monitor has continuous frequency mode
5146 if (data
->type
!= EDID_DETAIL_MONITOR_RANGE
)
5149 * Check for flag range limits only. If flag == 1 then
5150 * no additional timing information provided.
5151 * Default GTF, GTF Secondary curve and CVT are not
5154 if (range
->flags
!= 1)
5157 amdgpu_dm_connector
->min_vfreq
= range
->min_vfreq
;
5158 amdgpu_dm_connector
->max_vfreq
= range
->max_vfreq
;
5159 amdgpu_dm_connector
->pixel_clock_mhz
=
5160 range
->pixel_clock_mhz
* 10;
5164 if (amdgpu_dm_connector
->max_vfreq
-
5165 amdgpu_dm_connector
->min_vfreq
> 10) {
5166 amdgpu_dm_connector
->caps
.supported
= true;
5167 amdgpu_dm_connector
->caps
.min_refresh_in_micro_hz
=
5168 amdgpu_dm_connector
->min_vfreq
* 1000000;
5169 amdgpu_dm_connector
->caps
.max_refresh_in_micro_hz
=
5170 amdgpu_dm_connector
->max_vfreq
* 1000000;
5171 dm_con_state
->freesync_capable
= true;
5176 * TODO figure out how to notify user-mode or DRM of freesync caps
5177 * once we figure out how to deal with freesync in an upstreamable
5183 void amdgpu_dm_remove_sink_from_freesync_module(struct drm_connector
*connector
)
5186 * TODO fill in once we figure out how to deal with freesync in
5187 * an upstreamable fashion