2 * Copyright 2015 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
26 #include "dm_services_types.h"
28 #include "dc/inc/core_types.h"
32 #include "amdgpu_display.h"
34 #include "amdgpu_dm.h"
35 #include "amdgpu_pm.h"
37 #include "amd_shared.h"
38 #include "amdgpu_dm_irq.h"
39 #include "dm_helpers.h"
40 #include "dm_services_types.h"
41 #include "amdgpu_dm_mst_types.h"
43 #include "ivsrcid/ivsrcid_vislands30.h"
45 #include <linux/module.h>
46 #include <linux/moduleparam.h>
47 #include <linux/version.h>
48 #include <linux/types.h>
51 #include <drm/drm_atomic.h>
52 #include <drm/drm_atomic_helper.h>
53 #include <drm/drm_dp_mst_helper.h>
54 #include <drm/drm_fb_helper.h>
55 #include <drm/drm_edid.h>
57 #include "modules/inc/mod_freesync.h"
59 #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
60 #include "ivsrcid/irqsrcs_dcn_1_0.h"
62 #include "raven1/DCN/dcn_1_0_offset.h"
63 #include "raven1/DCN/dcn_1_0_sh_mask.h"
64 #include "vega10/soc15ip.h"
66 #include "soc15_common.h"
69 #include "modules/inc/mod_freesync.h"
71 #include "i2caux_interface.h"
73 /* basic init/fini API */
74 static int amdgpu_dm_init(struct amdgpu_device
*adev
);
75 static void amdgpu_dm_fini(struct amdgpu_device
*adev
);
77 /* initializes drm_device display related structures, based on the information
78 * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
79 * drm_encoder, drm_mode_config
81 * Returns 0 on success
83 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device
*adev
);
84 /* removes and deallocates the drm structures, created by the above function */
85 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager
*dm
);
88 amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector
*aconnector
);
90 static int amdgpu_dm_plane_init(struct amdgpu_display_manager
*dm
,
91 struct amdgpu_plane
*aplane
,
92 unsigned long possible_crtcs
);
93 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager
*dm
,
94 struct drm_plane
*plane
,
96 static int amdgpu_dm_connector_init(struct amdgpu_display_manager
*dm
,
97 struct amdgpu_dm_connector
*amdgpu_dm_connector
,
99 struct amdgpu_encoder
*amdgpu_encoder
);
100 static int amdgpu_dm_encoder_init(struct drm_device
*dev
,
101 struct amdgpu_encoder
*aencoder
,
102 uint32_t link_index
);
104 static int amdgpu_dm_connector_get_modes(struct drm_connector
*connector
);
106 static int amdgpu_dm_atomic_commit(struct drm_device
*dev
,
107 struct drm_atomic_state
*state
,
110 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state
*state
);
112 static int amdgpu_dm_atomic_check(struct drm_device
*dev
,
113 struct drm_atomic_state
*state
);
118 static const enum drm_plane_type dm_plane_type_default
[AMDGPU_MAX_PLANES
] = {
119 DRM_PLANE_TYPE_PRIMARY
,
120 DRM_PLANE_TYPE_PRIMARY
,
121 DRM_PLANE_TYPE_PRIMARY
,
122 DRM_PLANE_TYPE_PRIMARY
,
123 DRM_PLANE_TYPE_PRIMARY
,
124 DRM_PLANE_TYPE_PRIMARY
,
127 static const enum drm_plane_type dm_plane_type_carizzo
[AMDGPU_MAX_PLANES
] = {
128 DRM_PLANE_TYPE_PRIMARY
,
129 DRM_PLANE_TYPE_PRIMARY
,
130 DRM_PLANE_TYPE_PRIMARY
,
131 DRM_PLANE_TYPE_OVERLAY
,/* YUV Capable Underlay */
134 static const enum drm_plane_type dm_plane_type_stoney
[AMDGPU_MAX_PLANES
] = {
135 DRM_PLANE_TYPE_PRIMARY
,
136 DRM_PLANE_TYPE_PRIMARY
,
137 DRM_PLANE_TYPE_OVERLAY
, /* YUV Capable Underlay */
141 * dm_vblank_get_counter
144 * Get counter for number of vertical blanks
147 * struct amdgpu_device *adev - [in] desired amdgpu device
148 * int disp_idx - [in] which CRTC to get the counter from
151 * Counter for vertical blanks
153 static u32
dm_vblank_get_counter(struct amdgpu_device
*adev
, int crtc
)
155 if (crtc
>= adev
->mode_info
.num_crtc
)
158 struct amdgpu_crtc
*acrtc
= adev
->mode_info
.crtcs
[crtc
];
159 struct dm_crtc_state
*acrtc_state
= to_dm_crtc_state(
163 if (acrtc_state
->stream
== NULL
) {
164 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
169 return dc_stream_get_vblank_counter(acrtc_state
->stream
);
173 static int dm_crtc_get_scanoutpos(struct amdgpu_device
*adev
, int crtc
,
174 u32
*vbl
, u32
*position
)
176 uint32_t v_blank_start
, v_blank_end
, h_position
, v_position
;
178 if ((crtc
< 0) || (crtc
>= adev
->mode_info
.num_crtc
))
181 struct amdgpu_crtc
*acrtc
= adev
->mode_info
.crtcs
[crtc
];
182 struct dm_crtc_state
*acrtc_state
= to_dm_crtc_state(
185 if (acrtc_state
->stream
== NULL
) {
186 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
192 * TODO rework base driver to use values directly.
193 * for now parse it back into reg-format
195 dc_stream_get_scanoutpos(acrtc_state
->stream
,
201 *position
= v_position
| (h_position
<< 16);
202 *vbl
= v_blank_start
| (v_blank_end
<< 16);
208 static bool dm_is_idle(void *handle
)
214 static int dm_wait_for_idle(void *handle
)
220 static bool dm_check_soft_reset(void *handle
)
225 static int dm_soft_reset(void *handle
)
231 static struct amdgpu_crtc
*
232 get_crtc_by_otg_inst(struct amdgpu_device
*adev
,
235 struct drm_device
*dev
= adev
->ddev
;
236 struct drm_crtc
*crtc
;
237 struct amdgpu_crtc
*amdgpu_crtc
;
240 * following if is check inherited from both functions where this one is
241 * used now. Need to be checked why it could happen.
243 if (otg_inst
== -1) {
245 return adev
->mode_info
.crtcs
[0];
248 list_for_each_entry(crtc
, &dev
->mode_config
.crtc_list
, head
) {
249 amdgpu_crtc
= to_amdgpu_crtc(crtc
);
251 if (amdgpu_crtc
->otg_inst
== otg_inst
)
258 static void dm_pflip_high_irq(void *interrupt_params
)
260 struct amdgpu_crtc
*amdgpu_crtc
;
261 struct common_irq_params
*irq_params
= interrupt_params
;
262 struct amdgpu_device
*adev
= irq_params
->adev
;
265 amdgpu_crtc
= get_crtc_by_otg_inst(adev
, irq_params
->irq_src
- IRQ_TYPE_PFLIP
);
267 /* IRQ could occur when in initial stage */
268 /*TODO work and BO cleanup */
269 if (amdgpu_crtc
== NULL
) {
270 DRM_DEBUG_DRIVER("CRTC is null, returning.\n");
274 spin_lock_irqsave(&adev
->ddev
->event_lock
, flags
);
276 if (amdgpu_crtc
->pflip_status
!= AMDGPU_FLIP_SUBMITTED
){
277 DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
278 amdgpu_crtc
->pflip_status
,
279 AMDGPU_FLIP_SUBMITTED
,
280 amdgpu_crtc
->crtc_id
,
282 spin_unlock_irqrestore(&adev
->ddev
->event_lock
, flags
);
287 /* wakeup usersapce */
288 if (amdgpu_crtc
->event
) {
289 /* Update to correct count/ts if racing with vblank irq */
290 drm_crtc_accurate_vblank_count(&amdgpu_crtc
->base
);
292 drm_crtc_send_vblank_event(&amdgpu_crtc
->base
, amdgpu_crtc
->event
);
294 /* page flip completed. clean up */
295 amdgpu_crtc
->event
= NULL
;
300 amdgpu_crtc
->pflip_status
= AMDGPU_FLIP_NONE
;
301 spin_unlock_irqrestore(&adev
->ddev
->event_lock
, flags
);
303 DRM_DEBUG_DRIVER("%s - crtc :%d[%p], pflip_stat:AMDGPU_FLIP_NONE\n",
304 __func__
, amdgpu_crtc
->crtc_id
, amdgpu_crtc
);
306 drm_crtc_vblank_put(&amdgpu_crtc
->base
);
309 static void dm_crtc_high_irq(void *interrupt_params
)
311 struct common_irq_params
*irq_params
= interrupt_params
;
312 struct amdgpu_device
*adev
= irq_params
->adev
;
313 uint8_t crtc_index
= 0;
314 struct amdgpu_crtc
*acrtc
;
316 acrtc
= get_crtc_by_otg_inst(adev
, irq_params
->irq_src
- IRQ_TYPE_VBLANK
);
319 crtc_index
= acrtc
->crtc_id
;
321 drm_handle_vblank(adev
->ddev
, crtc_index
);
324 static int dm_set_clockgating_state(void *handle
,
325 enum amd_clockgating_state state
)
330 static int dm_set_powergating_state(void *handle
,
331 enum amd_powergating_state state
)
336 /* Prototypes of private functions */
337 static int dm_early_init(void* handle
);
339 static void hotplug_notify_work_func(struct work_struct
*work
)
341 struct amdgpu_display_manager
*dm
= container_of(work
, struct amdgpu_display_manager
, mst_hotplug_work
);
342 struct drm_device
*dev
= dm
->ddev
;
344 drm_kms_helper_hotplug_event(dev
);
347 #if defined(CONFIG_DRM_AMD_DC_FBC)
348 #include "dal_asic_id.h"
349 /* Allocate memory for FBC compressed data */
350 /* TODO: Dynamic allocation */
351 #define AMDGPU_FBC_SIZE (3840 * 2160 * 4)
353 static void amdgpu_dm_initialize_fbc(struct amdgpu_device
*adev
)
356 struct dm_comressor_info
*compressor
= &adev
->dm
.compressor
;
358 if (!compressor
->bo_ptr
) {
359 r
= amdgpu_bo_create_kernel(adev
, AMDGPU_FBC_SIZE
, PAGE_SIZE
,
360 AMDGPU_GEM_DOMAIN_VRAM
, &compressor
->bo_ptr
,
361 &compressor
->gpu_addr
, &compressor
->cpu_addr
);
364 DRM_ERROR("DM: Failed to initialize fbc\n");
373 * Returns 0 on success
375 static int amdgpu_dm_init(struct amdgpu_device
*adev
)
377 struct dc_init_data init_data
;
378 adev
->dm
.ddev
= adev
->ddev
;
379 adev
->dm
.adev
= adev
;
381 /* Zero all the fields */
382 memset(&init_data
, 0, sizeof(init_data
));
384 /* initialize DAL's lock (for SYNC context use) */
385 spin_lock_init(&adev
->dm
.dal_lock
);
387 /* initialize DAL's mutex */
388 mutex_init(&adev
->dm
.dal_mutex
);
390 if(amdgpu_dm_irq_init(adev
)) {
391 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
395 init_data
.asic_id
.chip_family
= adev
->family
;
397 init_data
.asic_id
.pci_revision_id
= adev
->rev_id
;
398 init_data
.asic_id
.hw_internal_rev
= adev
->external_rev_id
;
400 init_data
.asic_id
.vram_width
= adev
->mc
.vram_width
;
401 /* TODO: initialize init_data.asic_id.vram_type here!!!! */
402 init_data
.asic_id
.atombios_base_address
=
403 adev
->mode_info
.atom_context
->bios
;
405 init_data
.driver
= adev
;
407 adev
->dm
.cgs_device
= amdgpu_cgs_create_device(adev
);
409 if (!adev
->dm
.cgs_device
) {
410 DRM_ERROR("amdgpu: failed to create cgs device.\n");
414 init_data
.cgs_device
= adev
->dm
.cgs_device
;
418 init_data
.dce_environment
= DCE_ENV_PRODUCTION_DRV
;
421 init_data
.log_mask
= DC_DEFAULT_LOG_MASK
;
423 init_data
.log_mask
= DC_MIN_LOG_MASK
;
425 #if defined(CONFIG_DRM_AMD_DC_FBC)
426 if (adev
->family
== FAMILY_CZ
)
427 amdgpu_dm_initialize_fbc(adev
);
428 init_data
.fbc_gpu_addr
= adev
->dm
.compressor
.gpu_addr
;
430 /* Display Core create. */
431 adev
->dm
.dc
= dc_create(&init_data
);
434 DRM_INFO("Display Core initialized!\n");
436 DRM_INFO("Display Core failed to initialize!\n");
440 INIT_WORK(&adev
->dm
.mst_hotplug_work
, hotplug_notify_work_func
);
442 adev
->dm
.freesync_module
= mod_freesync_create(adev
->dm
.dc
);
443 if (!adev
->dm
.freesync_module
) {
445 "amdgpu: failed to initialize freesync_module.\n");
447 DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
448 adev
->dm
.freesync_module
);
450 if (amdgpu_dm_initialize_drm_device(adev
)) {
452 "amdgpu: failed to initialize sw for display support.\n");
456 /* Update the actual used number of crtc */
457 adev
->mode_info
.num_crtc
= adev
->dm
.display_indexes_num
;
459 /* TODO: Add_display_info? */
461 /* TODO use dynamic cursor width */
462 adev
->ddev
->mode_config
.cursor_width
= adev
->dm
.dc
->caps
.max_cursor_size
;
463 adev
->ddev
->mode_config
.cursor_height
= adev
->dm
.dc
->caps
.max_cursor_size
;
465 if (drm_vblank_init(adev
->ddev
, adev
->dm
.display_indexes_num
)) {
467 "amdgpu: failed to initialize sw for display support.\n");
471 DRM_DEBUG_DRIVER("KMS initialized.\n");
475 amdgpu_dm_fini(adev
);
480 static void amdgpu_dm_fini(struct amdgpu_device
*adev
)
482 amdgpu_dm_destroy_drm_device(&adev
->dm
);
484 * TODO: pageflip, vlank interrupt
486 * amdgpu_dm_irq_fini(adev);
489 if (adev
->dm
.cgs_device
) {
490 amdgpu_cgs_destroy_device(adev
->dm
.cgs_device
);
491 adev
->dm
.cgs_device
= NULL
;
493 if (adev
->dm
.freesync_module
) {
494 mod_freesync_destroy(adev
->dm
.freesync_module
);
495 adev
->dm
.freesync_module
= NULL
;
497 /* DC Destroy TODO: Replace destroy DAL */
499 dc_destroy(&adev
->dm
.dc
);
503 static int dm_sw_init(void *handle
)
508 static int dm_sw_fini(void *handle
)
513 static int detect_mst_link_for_all_connectors(struct drm_device
*dev
)
515 struct amdgpu_dm_connector
*aconnector
;
516 struct drm_connector
*connector
;
519 drm_modeset_lock(&dev
->mode_config
.connection_mutex
, NULL
);
521 list_for_each_entry(connector
, &dev
->mode_config
.connector_list
, head
) {
522 aconnector
= to_amdgpu_dm_connector(connector
);
523 if (aconnector
->dc_link
->type
== dc_connection_mst_branch
&&
524 aconnector
->mst_mgr
.aux
) {
525 DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
526 aconnector
, aconnector
->base
.base
.id
);
528 ret
= drm_dp_mst_topology_mgr_set_mst(&aconnector
->mst_mgr
, true);
530 DRM_ERROR("DM_MST: Failed to start MST\n");
531 ((struct dc_link
*)aconnector
->dc_link
)->type
= dc_connection_single
;
537 drm_modeset_unlock(&dev
->mode_config
.connection_mutex
);
541 static int dm_late_init(void *handle
)
543 struct drm_device
*dev
= ((struct amdgpu_device
*)handle
)->ddev
;
545 return detect_mst_link_for_all_connectors(dev
);
548 static void s3_handle_mst(struct drm_device
*dev
, bool suspend
)
550 struct amdgpu_dm_connector
*aconnector
;
551 struct drm_connector
*connector
;
553 drm_modeset_lock(&dev
->mode_config
.connection_mutex
, NULL
);
555 list_for_each_entry(connector
, &dev
->mode_config
.connector_list
, head
) {
556 aconnector
= to_amdgpu_dm_connector(connector
);
557 if (aconnector
->dc_link
->type
== dc_connection_mst_branch
&&
558 !aconnector
->mst_port
) {
561 drm_dp_mst_topology_mgr_suspend(&aconnector
->mst_mgr
);
563 drm_dp_mst_topology_mgr_resume(&aconnector
->mst_mgr
);
567 drm_modeset_unlock(&dev
->mode_config
.connection_mutex
);
570 static int dm_hw_init(void *handle
)
572 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
573 /* Create DAL display manager */
574 amdgpu_dm_init(adev
);
575 amdgpu_dm_hpd_init(adev
);
580 static int dm_hw_fini(void *handle
)
582 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
584 amdgpu_dm_hpd_fini(adev
);
586 amdgpu_dm_irq_fini(adev
);
587 amdgpu_dm_fini(adev
);
591 static int dm_suspend(void *handle
)
593 struct amdgpu_device
*adev
= handle
;
594 struct amdgpu_display_manager
*dm
= &adev
->dm
;
597 s3_handle_mst(adev
->ddev
, true);
599 amdgpu_dm_irq_suspend(adev
);
601 WARN_ON(adev
->dm
.cached_state
);
602 adev
->dm
.cached_state
= drm_atomic_helper_suspend(adev
->ddev
);
604 dc_set_power_state(dm
->dc
, DC_ACPI_CM_POWER_STATE_D3
);
609 static struct amdgpu_dm_connector
*
610 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state
*state
,
611 struct drm_crtc
*crtc
)
614 struct drm_connector_state
*new_con_state
;
615 struct drm_connector
*connector
;
616 struct drm_crtc
*crtc_from_state
;
618 for_each_new_connector_in_state(state
, connector
, new_con_state
, i
) {
619 crtc_from_state
= new_con_state
->crtc
;
621 if (crtc_from_state
== crtc
)
622 return to_amdgpu_dm_connector(connector
);
628 static int dm_resume(void *handle
)
630 struct amdgpu_device
*adev
= handle
;
631 struct amdgpu_display_manager
*dm
= &adev
->dm
;
633 /* power on hardware */
634 dc_set_power_state(dm
->dc
, DC_ACPI_CM_POWER_STATE_D0
);
639 int amdgpu_dm_display_resume(struct amdgpu_device
*adev
)
641 struct drm_device
*ddev
= adev
->ddev
;
642 struct amdgpu_display_manager
*dm
= &adev
->dm
;
643 struct amdgpu_dm_connector
*aconnector
;
644 struct drm_connector
*connector
;
645 struct drm_crtc
*crtc
;
646 struct drm_crtc_state
*new_crtc_state
;
647 struct dm_crtc_state
*dm_new_crtc_state
;
648 struct drm_plane
*plane
;
649 struct drm_plane_state
*new_plane_state
;
650 struct dm_plane_state
*dm_new_plane_state
;
655 /* program HPD filter */
658 /* On resume we need to rewrite the MSTM control bits to enamble MST*/
659 s3_handle_mst(ddev
, false);
662 * early enable HPD Rx IRQ, should be done before set mode as short
663 * pulse interrupts are used for MST
665 amdgpu_dm_irq_resume_early(adev
);
668 list_for_each_entry(connector
,
669 &ddev
->mode_config
.connector_list
, head
) {
670 aconnector
= to_amdgpu_dm_connector(connector
);
673 * this is the case when traversing through already created
674 * MST connectors, should be skipped
676 if (aconnector
->mst_port
)
679 mutex_lock(&aconnector
->hpd_lock
);
680 dc_link_detect(aconnector
->dc_link
, DETECT_REASON_HPD
);
682 if (aconnector
->fake_enable
&& aconnector
->dc_link
->local_sink
)
683 aconnector
->fake_enable
= false;
685 aconnector
->dc_sink
= NULL
;
686 amdgpu_dm_update_connector_after_detect(aconnector
);
687 mutex_unlock(&aconnector
->hpd_lock
);
690 /* Force mode set in atomic comit */
691 for_each_new_crtc_in_state(adev
->dm
.cached_state
, crtc
, new_crtc_state
, i
)
692 new_crtc_state
->active_changed
= true;
695 * atomic_check is expected to create the dc states. We need to release
696 * them here, since they were duplicated as part of the suspend
699 for_each_new_crtc_in_state(adev
->dm
.cached_state
, crtc
, new_crtc_state
, i
) {
700 dm_new_crtc_state
= to_dm_crtc_state(new_crtc_state
);
701 if (dm_new_crtc_state
->stream
) {
702 WARN_ON(kref_read(&dm_new_crtc_state
->stream
->refcount
) > 1);
703 dc_stream_release(dm_new_crtc_state
->stream
);
704 dm_new_crtc_state
->stream
= NULL
;
708 for_each_new_plane_in_state(adev
->dm
.cached_state
, plane
, new_plane_state
, i
) {
709 dm_new_plane_state
= to_dm_plane_state(new_plane_state
);
710 if (dm_new_plane_state
->dc_state
) {
711 WARN_ON(kref_read(&dm_new_plane_state
->dc_state
->refcount
) > 1);
712 dc_plane_state_release(dm_new_plane_state
->dc_state
);
713 dm_new_plane_state
->dc_state
= NULL
;
717 ret
= drm_atomic_helper_resume(ddev
, adev
->dm
.cached_state
);
719 adev
->dm
.cached_state
= NULL
;
721 amdgpu_dm_irq_resume_late(adev
);
726 static const struct amd_ip_funcs amdgpu_dm_funcs
= {
728 .early_init
= dm_early_init
,
729 .late_init
= dm_late_init
,
730 .sw_init
= dm_sw_init
,
731 .sw_fini
= dm_sw_fini
,
732 .hw_init
= dm_hw_init
,
733 .hw_fini
= dm_hw_fini
,
734 .suspend
= dm_suspend
,
736 .is_idle
= dm_is_idle
,
737 .wait_for_idle
= dm_wait_for_idle
,
738 .check_soft_reset
= dm_check_soft_reset
,
739 .soft_reset
= dm_soft_reset
,
740 .set_clockgating_state
= dm_set_clockgating_state
,
741 .set_powergating_state
= dm_set_powergating_state
,
744 const struct amdgpu_ip_block_version dm_ip_block
=
746 .type
= AMD_IP_BLOCK_TYPE_DCE
,
750 .funcs
= &amdgpu_dm_funcs
,
754 static struct drm_atomic_state
*
755 dm_atomic_state_alloc(struct drm_device
*dev
)
757 struct dm_atomic_state
*state
= kzalloc(sizeof(*state
), GFP_KERNEL
);
762 if (drm_atomic_state_init(dev
, &state
->base
) < 0)
773 dm_atomic_state_clear(struct drm_atomic_state
*state
)
775 struct dm_atomic_state
*dm_state
= to_dm_atomic_state(state
);
777 if (dm_state
->context
) {
778 dc_release_state(dm_state
->context
);
779 dm_state
->context
= NULL
;
782 drm_atomic_state_default_clear(state
);
786 dm_atomic_state_alloc_free(struct drm_atomic_state
*state
)
788 struct dm_atomic_state
*dm_state
= to_dm_atomic_state(state
);
789 drm_atomic_state_default_release(state
);
793 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs
= {
794 .fb_create
= amdgpu_user_framebuffer_create
,
795 .output_poll_changed
= amdgpu_output_poll_changed
,
796 .atomic_check
= amdgpu_dm_atomic_check
,
797 .atomic_commit
= amdgpu_dm_atomic_commit
,
798 .atomic_state_alloc
= dm_atomic_state_alloc
,
799 .atomic_state_clear
= dm_atomic_state_clear
,
800 .atomic_state_free
= dm_atomic_state_alloc_free
803 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs
= {
804 .atomic_commit_tail
= amdgpu_dm_atomic_commit_tail
808 amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector
*aconnector
)
810 struct drm_connector
*connector
= &aconnector
->base
;
811 struct drm_device
*dev
= connector
->dev
;
812 struct dc_sink
*sink
;
814 /* MST handled by drm_mst framework */
815 if (aconnector
->mst_mgr
.mst_state
== true)
819 sink
= aconnector
->dc_link
->local_sink
;
821 /* Edid mgmt connector gets first update only in mode_valid hook and then
822 * the connector sink is set to either fake or physical sink depends on link status.
823 * don't do it here if u are during boot
825 if (aconnector
->base
.force
!= DRM_FORCE_UNSPECIFIED
826 && aconnector
->dc_em_sink
) {
828 /* For S3 resume with headless use eml_sink to fake stream
829 * because on resume connecotr->sink is set ti NULL
831 mutex_lock(&dev
->mode_config
.mutex
);
834 if (aconnector
->dc_sink
) {
835 amdgpu_dm_remove_sink_from_freesync_module(
837 /* retain and release bellow are used for
838 * bump up refcount for sink because the link don't point
839 * to it anymore after disconnect so on next crtc to connector
840 * reshuffle by UMD we will get into unwanted dc_sink release
842 if (aconnector
->dc_sink
!= aconnector
->dc_em_sink
)
843 dc_sink_release(aconnector
->dc_sink
);
845 aconnector
->dc_sink
= sink
;
846 amdgpu_dm_add_sink_to_freesync_module(
847 connector
, aconnector
->edid
);
849 amdgpu_dm_remove_sink_from_freesync_module(connector
);
850 if (!aconnector
->dc_sink
)
851 aconnector
->dc_sink
= aconnector
->dc_em_sink
;
852 else if (aconnector
->dc_sink
!= aconnector
->dc_em_sink
)
853 dc_sink_retain(aconnector
->dc_sink
);
856 mutex_unlock(&dev
->mode_config
.mutex
);
861 * TODO: temporary guard to look for proper fix
862 * if this sink is MST sink, we should not do anything
864 if (sink
&& sink
->sink_signal
== SIGNAL_TYPE_DISPLAY_PORT_MST
)
867 if (aconnector
->dc_sink
== sink
) {
868 /* We got a DP short pulse (Link Loss, DP CTS, etc...).
870 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
871 aconnector
->connector_id
);
875 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
876 aconnector
->connector_id
, aconnector
->dc_sink
, sink
);
878 mutex_lock(&dev
->mode_config
.mutex
);
880 /* 1. Update status of the drm connector
881 * 2. Send an event and let userspace tell us what to do */
883 /* TODO: check if we still need the S3 mode update workaround.
884 * If yes, put it here. */
885 if (aconnector
->dc_sink
)
886 amdgpu_dm_remove_sink_from_freesync_module(
889 aconnector
->dc_sink
= sink
;
890 if (sink
->dc_edid
.length
== 0) {
891 aconnector
->edid
= NULL
;
894 (struct edid
*) sink
->dc_edid
.raw_edid
;
897 drm_mode_connector_update_edid_property(connector
,
900 amdgpu_dm_add_sink_to_freesync_module(connector
, aconnector
->edid
);
903 amdgpu_dm_remove_sink_from_freesync_module(connector
);
904 drm_mode_connector_update_edid_property(connector
, NULL
);
905 aconnector
->num_modes
= 0;
906 aconnector
->dc_sink
= NULL
;
907 aconnector
->edid
= NULL
;
910 mutex_unlock(&dev
->mode_config
.mutex
);
913 static void handle_hpd_irq(void *param
)
915 struct amdgpu_dm_connector
*aconnector
= (struct amdgpu_dm_connector
*)param
;
916 struct drm_connector
*connector
= &aconnector
->base
;
917 struct drm_device
*dev
= connector
->dev
;
919 /* In case of failure or MST no need to update connector status or notify the OS
920 * since (for MST case) MST does this in it's own context.
922 mutex_lock(&aconnector
->hpd_lock
);
924 if (aconnector
->fake_enable
)
925 aconnector
->fake_enable
= false;
927 if (dc_link_detect(aconnector
->dc_link
, DETECT_REASON_HPD
)) {
928 amdgpu_dm_update_connector_after_detect(aconnector
);
931 drm_modeset_lock_all(dev
);
932 dm_restore_drm_connector_state(dev
, connector
);
933 drm_modeset_unlock_all(dev
);
935 if (aconnector
->base
.force
== DRM_FORCE_UNSPECIFIED
)
936 drm_kms_helper_hotplug_event(dev
);
938 mutex_unlock(&aconnector
->hpd_lock
);
942 static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector
*aconnector
)
944 uint8_t esi
[DP_PSR_ERROR_STATUS
- DP_SINK_COUNT_ESI
] = { 0 };
946 bool new_irq_handled
= false;
948 int dpcd_bytes_to_read
;
950 const int max_process_count
= 30;
951 int process_count
= 0;
953 const struct dc_link_status
*link_status
= dc_link_get_status(aconnector
->dc_link
);
955 if (link_status
->dpcd_caps
->dpcd_rev
.raw
< 0x12) {
956 dpcd_bytes_to_read
= DP_LANE0_1_STATUS
- DP_SINK_COUNT
;
957 /* DPCD 0x200 - 0x201 for downstream IRQ */
958 dpcd_addr
= DP_SINK_COUNT
;
960 dpcd_bytes_to_read
= DP_PSR_ERROR_STATUS
- DP_SINK_COUNT_ESI
;
961 /* DPCD 0x2002 - 0x2005 for downstream IRQ */
962 dpcd_addr
= DP_SINK_COUNT_ESI
;
965 dret
= drm_dp_dpcd_read(
966 &aconnector
->dm_dp_aux
.aux
,
971 while (dret
== dpcd_bytes_to_read
&&
972 process_count
< max_process_count
) {
978 DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi
[0], esi
[1], esi
[2]);
979 /* handle HPD short pulse irq */
980 if (aconnector
->mst_mgr
.mst_state
)
982 &aconnector
->mst_mgr
,
986 if (new_irq_handled
) {
987 /* ACK at DPCD to notify down stream */
988 const int ack_dpcd_bytes_to_write
=
989 dpcd_bytes_to_read
- 1;
991 for (retry
= 0; retry
< 3; retry
++) {
994 wret
= drm_dp_dpcd_write(
995 &aconnector
->dm_dp_aux
.aux
,
998 ack_dpcd_bytes_to_write
);
999 if (wret
== ack_dpcd_bytes_to_write
)
1003 /* check if there is new irq to be handle */
1004 dret
= drm_dp_dpcd_read(
1005 &aconnector
->dm_dp_aux
.aux
,
1008 dpcd_bytes_to_read
);
1010 new_irq_handled
= false;
1016 if (process_count
== max_process_count
)
1017 DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
1020 static void handle_hpd_rx_irq(void *param
)
1022 struct amdgpu_dm_connector
*aconnector
= (struct amdgpu_dm_connector
*)param
;
1023 struct drm_connector
*connector
= &aconnector
->base
;
1024 struct drm_device
*dev
= connector
->dev
;
1025 struct dc_link
*dc_link
= aconnector
->dc_link
;
1026 bool is_mst_root_connector
= aconnector
->mst_mgr
.mst_state
;
1028 /* TODO:Temporary add mutex to protect hpd interrupt not have a gpio
1029 * conflict, after implement i2c helper, this mutex should be
1032 if (dc_link
->type
!= dc_connection_mst_branch
)
1033 mutex_lock(&aconnector
->hpd_lock
);
1035 if (dc_link_handle_hpd_rx_irq(dc_link
, NULL
) &&
1036 !is_mst_root_connector
) {
1037 /* Downstream Port status changed. */
1038 if (dc_link_detect(dc_link
, DETECT_REASON_HPDRX
)) {
1039 amdgpu_dm_update_connector_after_detect(aconnector
);
1042 drm_modeset_lock_all(dev
);
1043 dm_restore_drm_connector_state(dev
, connector
);
1044 drm_modeset_unlock_all(dev
);
1046 drm_kms_helper_hotplug_event(dev
);
1049 if ((dc_link
->cur_link_settings
.lane_count
!= LANE_COUNT_UNKNOWN
) ||
1050 (dc_link
->type
== dc_connection_mst_branch
))
1051 dm_handle_hpd_rx_irq(aconnector
);
1053 if (dc_link
->type
!= dc_connection_mst_branch
)
1054 mutex_unlock(&aconnector
->hpd_lock
);
1057 static void register_hpd_handlers(struct amdgpu_device
*adev
)
1059 struct drm_device
*dev
= adev
->ddev
;
1060 struct drm_connector
*connector
;
1061 struct amdgpu_dm_connector
*aconnector
;
1062 const struct dc_link
*dc_link
;
1063 struct dc_interrupt_params int_params
= {0};
1065 int_params
.requested_polarity
= INTERRUPT_POLARITY_DEFAULT
;
1066 int_params
.current_polarity
= INTERRUPT_POLARITY_DEFAULT
;
1068 list_for_each_entry(connector
,
1069 &dev
->mode_config
.connector_list
, head
) {
1071 aconnector
= to_amdgpu_dm_connector(connector
);
1072 dc_link
= aconnector
->dc_link
;
1074 if (DC_IRQ_SOURCE_INVALID
!= dc_link
->irq_source_hpd
) {
1075 int_params
.int_context
= INTERRUPT_LOW_IRQ_CONTEXT
;
1076 int_params
.irq_source
= dc_link
->irq_source_hpd
;
1078 amdgpu_dm_irq_register_interrupt(adev
, &int_params
,
1080 (void *) aconnector
);
1083 if (DC_IRQ_SOURCE_INVALID
!= dc_link
->irq_source_hpd_rx
) {
1085 /* Also register for DP short pulse (hpd_rx). */
1086 int_params
.int_context
= INTERRUPT_LOW_IRQ_CONTEXT
;
1087 int_params
.irq_source
= dc_link
->irq_source_hpd_rx
;
1089 amdgpu_dm_irq_register_interrupt(adev
, &int_params
,
1091 (void *) aconnector
);
1096 /* Register IRQ sources and initialize IRQ callbacks */
1097 static int dce110_register_irq_handlers(struct amdgpu_device
*adev
)
1099 struct dc
*dc
= adev
->dm
.dc
;
1100 struct common_irq_params
*c_irq_params
;
1101 struct dc_interrupt_params int_params
= {0};
1104 unsigned client_id
= AMDGPU_IH_CLIENTID_LEGACY
;
1106 if (adev
->asic_type
== CHIP_VEGA10
||
1107 adev
->asic_type
== CHIP_RAVEN
)
1108 client_id
= AMDGPU_IH_CLIENTID_DCE
;
1110 int_params
.requested_polarity
= INTERRUPT_POLARITY_DEFAULT
;
1111 int_params
.current_polarity
= INTERRUPT_POLARITY_DEFAULT
;
1113 /* Actions of amdgpu_irq_add_id():
1114 * 1. Register a set() function with base driver.
1115 * Base driver will call set() function to enable/disable an
1116 * interrupt in DC hardware.
1117 * 2. Register amdgpu_dm_irq_handler().
1118 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
1119 * coming from DC hardware.
1120 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
1121 * for acknowledging and handling. */
1123 /* Use VBLANK interrupt */
1124 for (i
= VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0
; i
<= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0
; i
++) {
1125 r
= amdgpu_irq_add_id(adev
, client_id
, i
, &adev
->crtc_irq
);
1127 DRM_ERROR("Failed to add crtc irq id!\n");
1131 int_params
.int_context
= INTERRUPT_HIGH_IRQ_CONTEXT
;
1132 int_params
.irq_source
=
1133 dc_interrupt_to_irq_source(dc
, i
, 0);
1135 c_irq_params
= &adev
->dm
.vblank_params
[int_params
.irq_source
- DC_IRQ_SOURCE_VBLANK1
];
1137 c_irq_params
->adev
= adev
;
1138 c_irq_params
->irq_src
= int_params
.irq_source
;
1140 amdgpu_dm_irq_register_interrupt(adev
, &int_params
,
1141 dm_crtc_high_irq
, c_irq_params
);
1144 /* Use GRPH_PFLIP interrupt */
1145 for (i
= VISLANDS30_IV_SRCID_D1_GRPH_PFLIP
;
1146 i
<= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP
; i
+= 2) {
1147 r
= amdgpu_irq_add_id(adev
, client_id
, i
, &adev
->pageflip_irq
);
1149 DRM_ERROR("Failed to add page flip irq id!\n");
1153 int_params
.int_context
= INTERRUPT_HIGH_IRQ_CONTEXT
;
1154 int_params
.irq_source
=
1155 dc_interrupt_to_irq_source(dc
, i
, 0);
1157 c_irq_params
= &adev
->dm
.pflip_params
[int_params
.irq_source
- DC_IRQ_SOURCE_PFLIP_FIRST
];
1159 c_irq_params
->adev
= adev
;
1160 c_irq_params
->irq_src
= int_params
.irq_source
;
1162 amdgpu_dm_irq_register_interrupt(adev
, &int_params
,
1163 dm_pflip_high_irq
, c_irq_params
);
1168 r
= amdgpu_irq_add_id(adev
, client_id
,
1169 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A
, &adev
->hpd_irq
);
1171 DRM_ERROR("Failed to add hpd irq id!\n");
1175 register_hpd_handlers(adev
);
1180 #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
1181 /* Register IRQ sources and initialize IRQ callbacks */
1182 static int dcn10_register_irq_handlers(struct amdgpu_device
*adev
)
1184 struct dc
*dc
= adev
->dm
.dc
;
1185 struct common_irq_params
*c_irq_params
;
1186 struct dc_interrupt_params int_params
= {0};
1190 int_params
.requested_polarity
= INTERRUPT_POLARITY_DEFAULT
;
1191 int_params
.current_polarity
= INTERRUPT_POLARITY_DEFAULT
;
1193 /* Actions of amdgpu_irq_add_id():
1194 * 1. Register a set() function with base driver.
1195 * Base driver will call set() function to enable/disable an
1196 * interrupt in DC hardware.
1197 * 2. Register amdgpu_dm_irq_handler().
1198 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
1199 * coming from DC hardware.
1200 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
1201 * for acknowledging and handling.
1204 /* Use VSTARTUP interrupt */
1205 for (i
= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP
;
1206 i
<= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP
+ adev
->mode_info
.num_crtc
- 1;
1208 r
= amdgpu_irq_add_id(adev
, AMDGPU_IH_CLIENTID_DCE
, i
, &adev
->crtc_irq
);
1211 DRM_ERROR("Failed to add crtc irq id!\n");
1215 int_params
.int_context
= INTERRUPT_HIGH_IRQ_CONTEXT
;
1216 int_params
.irq_source
=
1217 dc_interrupt_to_irq_source(dc
, i
, 0);
1219 c_irq_params
= &adev
->dm
.vblank_params
[int_params
.irq_source
- DC_IRQ_SOURCE_VBLANK1
];
1221 c_irq_params
->adev
= adev
;
1222 c_irq_params
->irq_src
= int_params
.irq_source
;
1224 amdgpu_dm_irq_register_interrupt(adev
, &int_params
,
1225 dm_crtc_high_irq
, c_irq_params
);
1228 /* Use GRPH_PFLIP interrupt */
1229 for (i
= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT
;
1230 i
<= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT
+ adev
->mode_info
.num_crtc
- 1;
1232 r
= amdgpu_irq_add_id(adev
, AMDGPU_IH_CLIENTID_DCE
, i
, &adev
->pageflip_irq
);
1234 DRM_ERROR("Failed to add page flip irq id!\n");
1238 int_params
.int_context
= INTERRUPT_HIGH_IRQ_CONTEXT
;
1239 int_params
.irq_source
=
1240 dc_interrupt_to_irq_source(dc
, i
, 0);
1242 c_irq_params
= &adev
->dm
.pflip_params
[int_params
.irq_source
- DC_IRQ_SOURCE_PFLIP_FIRST
];
1244 c_irq_params
->adev
= adev
;
1245 c_irq_params
->irq_src
= int_params
.irq_source
;
1247 amdgpu_dm_irq_register_interrupt(adev
, &int_params
,
1248 dm_pflip_high_irq
, c_irq_params
);
1253 r
= amdgpu_irq_add_id(adev
, AMDGPU_IH_CLIENTID_DCE
, DCN_1_0__SRCID__DC_HPD1_INT
,
1256 DRM_ERROR("Failed to add hpd irq id!\n");
1260 register_hpd_handlers(adev
);
1266 static int amdgpu_dm_mode_config_init(struct amdgpu_device
*adev
)
1270 adev
->mode_info
.mode_config_initialized
= true;
1272 adev
->ddev
->mode_config
.funcs
= (void *)&amdgpu_dm_mode_funcs
;
1273 adev
->ddev
->mode_config
.helper_private
= &amdgpu_dm_mode_config_helperfuncs
;
1275 adev
->ddev
->mode_config
.max_width
= 16384;
1276 adev
->ddev
->mode_config
.max_height
= 16384;
1278 adev
->ddev
->mode_config
.preferred_depth
= 24;
1279 adev
->ddev
->mode_config
.prefer_shadow
= 1;
1280 /* indicate support of immediate flip */
1281 adev
->ddev
->mode_config
.async_page_flip
= true;
1283 adev
->ddev
->mode_config
.fb_base
= adev
->mc
.aper_base
;
1285 r
= amdgpu_modeset_create_props(adev
);
1292 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
1293 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
1295 static int amdgpu_dm_backlight_update_status(struct backlight_device
*bd
)
1297 struct amdgpu_display_manager
*dm
= bl_get_data(bd
);
1299 if (dc_link_set_backlight_level(dm
->backlight_link
,
1300 bd
->props
.brightness
, 0, 0))
1306 static int amdgpu_dm_backlight_get_brightness(struct backlight_device
*bd
)
1308 return bd
->props
.brightness
;
1311 static const struct backlight_ops amdgpu_dm_backlight_ops
= {
1312 .get_brightness
= amdgpu_dm_backlight_get_brightness
,
1313 .update_status
= amdgpu_dm_backlight_update_status
,
1317 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager
*dm
)
1320 struct backlight_properties props
= { 0 };
1322 props
.max_brightness
= AMDGPU_MAX_BL_LEVEL
;
1323 props
.type
= BACKLIGHT_RAW
;
1325 snprintf(bl_name
, sizeof(bl_name
), "amdgpu_bl%d",
1326 dm
->adev
->ddev
->primary
->index
);
1328 dm
->backlight_dev
= backlight_device_register(bl_name
,
1329 dm
->adev
->ddev
->dev
,
1331 &amdgpu_dm_backlight_ops
,
1334 if (IS_ERR(dm
->backlight_dev
))
1335 DRM_ERROR("DM: Backlight registration failed!\n");
1337 DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name
);
1342 /* In this architecture, the association
1343 * connector -> encoder -> crtc
1344 * id not really requried. The crtc and connector will hold the
1345 * display_index as an abstraction to use with DAL component
1347 * Returns 0 on success
1349 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device
*adev
)
1351 struct amdgpu_display_manager
*dm
= &adev
->dm
;
1353 struct amdgpu_dm_connector
*aconnector
= NULL
;
1354 struct amdgpu_encoder
*aencoder
= NULL
;
1355 struct amdgpu_mode_info
*mode_info
= &adev
->mode_info
;
1357 unsigned long possible_crtcs
;
1359 link_cnt
= dm
->dc
->caps
.max_links
;
1360 if (amdgpu_dm_mode_config_init(dm
->adev
)) {
1361 DRM_ERROR("DM: Failed to initialize mode config\n");
1365 for (i
= 0; i
< dm
->dc
->caps
.max_planes
; i
++) {
1366 struct amdgpu_plane
*plane
;
1368 plane
= kzalloc(sizeof(struct amdgpu_plane
), GFP_KERNEL
);
1369 mode_info
->planes
[i
] = plane
;
1372 DRM_ERROR("KMS: Failed to allocate plane\n");
1375 plane
->base
.type
= mode_info
->plane_type
[i
];
1378 * HACK: IGT tests expect that each plane can only have one
1379 * one possible CRTC. For now, set one CRTC for each
1380 * plane that is not an underlay, but still allow multiple
1381 * CRTCs for underlay planes.
1383 possible_crtcs
= 1 << i
;
1384 if (i
>= dm
->dc
->caps
.max_streams
)
1385 possible_crtcs
= 0xff;
1387 if (amdgpu_dm_plane_init(dm
, mode_info
->planes
[i
], possible_crtcs
)) {
1388 DRM_ERROR("KMS: Failed to initialize plane\n");
1393 for (i
= 0; i
< dm
->dc
->caps
.max_streams
; i
++)
1394 if (amdgpu_dm_crtc_init(dm
, &mode_info
->planes
[i
]->base
, i
)) {
1395 DRM_ERROR("KMS: Failed to initialize crtc\n");
1399 dm
->display_indexes_num
= dm
->dc
->caps
.max_streams
;
1401 /* loops over all connectors on the board */
1402 for (i
= 0; i
< link_cnt
; i
++) {
1404 if (i
> AMDGPU_DM_MAX_DISPLAY_INDEX
) {
1406 "KMS: Cannot support more than %d display indexes\n",
1407 AMDGPU_DM_MAX_DISPLAY_INDEX
);
1411 aconnector
= kzalloc(sizeof(*aconnector
), GFP_KERNEL
);
1415 aencoder
= kzalloc(sizeof(*aencoder
), GFP_KERNEL
);
1419 if (amdgpu_dm_encoder_init(dm
->ddev
, aencoder
, i
)) {
1420 DRM_ERROR("KMS: Failed to initialize encoder\n");
1424 if (amdgpu_dm_connector_init(dm
, aconnector
, i
, aencoder
)) {
1425 DRM_ERROR("KMS: Failed to initialize connector\n");
1429 if (dc_link_detect(dc_get_link_at_index(dm
->dc
, i
),
1430 DETECT_REASON_BOOT
))
1431 amdgpu_dm_update_connector_after_detect(aconnector
);
1434 /* Software is initialized. Now we can register interrupt handlers. */
1435 switch (adev
->asic_type
) {
1445 case CHIP_POLARIS11
:
1446 case CHIP_POLARIS10
:
1447 case CHIP_POLARIS12
:
1449 if (dce110_register_irq_handlers(dm
->adev
)) {
1450 DRM_ERROR("DM: Failed to initialize IRQ\n");
1454 #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
1456 if (dcn10_register_irq_handlers(dm
->adev
)) {
1457 DRM_ERROR("DM: Failed to initialize IRQ\n");
1461 * Temporary disable until pplib/smu interaction is implemented
1463 dm
->dc
->debug
.disable_stutter
= true;
1467 DRM_ERROR("Usupported ASIC type: 0x%X\n", adev
->asic_type
);
1475 for (i
= 0; i
< dm
->dc
->caps
.max_planes
; i
++)
1476 kfree(mode_info
->planes
[i
]);
1480 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager
*dm
)
1482 drm_mode_config_cleanup(dm
->ddev
);
1486 /******************************************************************************
1487 * amdgpu_display_funcs functions
1488 *****************************************************************************/
1491 * dm_bandwidth_update - program display watermarks
1493 * @adev: amdgpu_device pointer
1495 * Calculate and program the display watermarks and line buffer allocation.
1497 static void dm_bandwidth_update(struct amdgpu_device
*adev
)
1499 /* TODO: implement later */
1502 static void dm_set_backlight_level(struct amdgpu_encoder
*amdgpu_encoder
,
1505 /* TODO: translate amdgpu_encoder to display_index and call DAL */
1508 static u8
dm_get_backlight_level(struct amdgpu_encoder
*amdgpu_encoder
)
1510 /* TODO: translate amdgpu_encoder to display_index and call DAL */
1514 static int amdgpu_notify_freesync(struct drm_device
*dev
, void *data
,
1515 struct drm_file
*filp
)
1517 struct mod_freesync_params freesync_params
;
1518 uint8_t num_streams
;
1521 struct amdgpu_device
*adev
= dev
->dev_private
;
1524 /* Get freesync enable flag from DRM */
1526 num_streams
= dc_get_current_stream_count(adev
->dm
.dc
);
1528 for (i
= 0; i
< num_streams
; i
++) {
1529 struct dc_stream_state
*stream
;
1530 stream
= dc_get_stream_at_index(adev
->dm
.dc
, i
);
1532 mod_freesync_update_state(adev
->dm
.freesync_module
,
1533 &stream
, 1, &freesync_params
);
1539 static const struct amdgpu_display_funcs dm_display_funcs
= {
1540 .bandwidth_update
= dm_bandwidth_update
, /* called unconditionally */
1541 .vblank_get_counter
= dm_vblank_get_counter
,/* called unconditionally */
1542 .vblank_wait
= NULL
,
1543 .backlight_set_level
=
1544 dm_set_backlight_level
,/* called unconditionally */
1545 .backlight_get_level
=
1546 dm_get_backlight_level
,/* called unconditionally */
1547 .hpd_sense
= NULL
,/* called unconditionally */
1548 .hpd_set_polarity
= NULL
, /* called unconditionally */
1549 .hpd_get_gpio_reg
= NULL
, /* VBIOS parsing. DAL does it. */
1550 .page_flip_get_scanoutpos
=
1551 dm_crtc_get_scanoutpos
,/* called unconditionally */
1552 .add_encoder
= NULL
, /* VBIOS parsing. DAL does it. */
1553 .add_connector
= NULL
, /* VBIOS parsing. DAL does it. */
1554 .notify_freesync
= amdgpu_notify_freesync
,
1558 #if defined(CONFIG_DEBUG_KERNEL_DC)
1560 static ssize_t
s3_debug_store(struct device
*device
,
1561 struct device_attribute
*attr
,
1567 struct pci_dev
*pdev
= to_pci_dev(device
);
1568 struct drm_device
*drm_dev
= pci_get_drvdata(pdev
);
1569 struct amdgpu_device
*adev
= drm_dev
->dev_private
;
1571 ret
= kstrtoint(buf
, 0, &s3_state
);
1576 amdgpu_dm_display_resume(adev
);
1577 drm_kms_helper_hotplug_event(adev
->ddev
);
1582 return ret
== 0 ? count
: 0;
1585 DEVICE_ATTR_WO(s3_debug
);
1589 static int dm_early_init(void *handle
)
1591 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1593 adev
->ddev
->driver
->driver_features
|= DRIVER_ATOMIC
;
1594 amdgpu_dm_set_irq_funcs(adev
);
1596 switch (adev
->asic_type
) {
1599 adev
->mode_info
.num_crtc
= 6;
1600 adev
->mode_info
.num_hpd
= 6;
1601 adev
->mode_info
.num_dig
= 6;
1602 adev
->mode_info
.plane_type
= dm_plane_type_default
;
1605 adev
->mode_info
.num_crtc
= 4;
1606 adev
->mode_info
.num_hpd
= 6;
1607 adev
->mode_info
.num_dig
= 7;
1608 adev
->mode_info
.plane_type
= dm_plane_type_default
;
1612 adev
->mode_info
.num_crtc
= 2;
1613 adev
->mode_info
.num_hpd
= 6;
1614 adev
->mode_info
.num_dig
= 6;
1615 adev
->mode_info
.plane_type
= dm_plane_type_default
;
1619 adev
->mode_info
.num_crtc
= 6;
1620 adev
->mode_info
.num_hpd
= 6;
1621 adev
->mode_info
.num_dig
= 7;
1622 adev
->mode_info
.plane_type
= dm_plane_type_default
;
1625 adev
->mode_info
.num_crtc
= 3;
1626 adev
->mode_info
.num_hpd
= 6;
1627 adev
->mode_info
.num_dig
= 9;
1628 adev
->mode_info
.plane_type
= dm_plane_type_carizzo
;
1631 adev
->mode_info
.num_crtc
= 2;
1632 adev
->mode_info
.num_hpd
= 6;
1633 adev
->mode_info
.num_dig
= 9;
1634 adev
->mode_info
.plane_type
= dm_plane_type_stoney
;
1636 case CHIP_POLARIS11
:
1637 case CHIP_POLARIS12
:
1638 adev
->mode_info
.num_crtc
= 5;
1639 adev
->mode_info
.num_hpd
= 5;
1640 adev
->mode_info
.num_dig
= 5;
1641 adev
->mode_info
.plane_type
= dm_plane_type_default
;
1643 case CHIP_POLARIS10
:
1644 adev
->mode_info
.num_crtc
= 6;
1645 adev
->mode_info
.num_hpd
= 6;
1646 adev
->mode_info
.num_dig
= 6;
1647 adev
->mode_info
.plane_type
= dm_plane_type_default
;
1650 adev
->mode_info
.num_crtc
= 6;
1651 adev
->mode_info
.num_hpd
= 6;
1652 adev
->mode_info
.num_dig
= 6;
1653 adev
->mode_info
.plane_type
= dm_plane_type_default
;
1655 #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
1657 adev
->mode_info
.num_crtc
= 4;
1658 adev
->mode_info
.num_hpd
= 4;
1659 adev
->mode_info
.num_dig
= 4;
1660 adev
->mode_info
.plane_type
= dm_plane_type_default
;
1664 DRM_ERROR("Usupported ASIC type: 0x%X\n", adev
->asic_type
);
1668 if (adev
->mode_info
.funcs
== NULL
)
1669 adev
->mode_info
.funcs
= &dm_display_funcs
;
1671 /* Note: Do NOT change adev->audio_endpt_rreg and
1672 * adev->audio_endpt_wreg because they are initialised in
1673 * amdgpu_device_init() */
1674 #if defined(CONFIG_DEBUG_KERNEL_DC)
1677 &dev_attr_s3_debug
);
1683 struct dm_connector_state
{
1684 struct drm_connector_state base
;
1686 enum amdgpu_rmx_type scaling
;
1687 uint8_t underscan_vborder
;
1688 uint8_t underscan_hborder
;
1689 bool underscan_enable
;
1692 #define to_dm_connector_state(x)\
1693 container_of((x), struct dm_connector_state, base)
1695 static bool modeset_required(struct drm_crtc_state
*crtc_state
,
1696 struct dc_stream_state
*new_stream
,
1697 struct dc_stream_state
*old_stream
)
1699 if (!drm_atomic_crtc_needs_modeset(crtc_state
))
1702 if (!crtc_state
->enable
)
1705 return crtc_state
->active
;
1708 static bool modereset_required(struct drm_crtc_state
*crtc_state
)
1710 if (!drm_atomic_crtc_needs_modeset(crtc_state
))
1713 return !crtc_state
->enable
|| !crtc_state
->active
;
1716 static void amdgpu_dm_encoder_destroy(struct drm_encoder
*encoder
)
1718 drm_encoder_cleanup(encoder
);
1722 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs
= {
1723 .destroy
= amdgpu_dm_encoder_destroy
,
1726 static bool fill_rects_from_plane_state(const struct drm_plane_state
*state
,
1727 struct dc_plane_state
*plane_state
)
1729 plane_state
->src_rect
.x
= state
->src_x
>> 16;
1730 plane_state
->src_rect
.y
= state
->src_y
>> 16;
1731 /*we ignore for now mantissa and do not to deal with floating pixels :(*/
1732 plane_state
->src_rect
.width
= state
->src_w
>> 16;
1734 if (plane_state
->src_rect
.width
== 0)
1737 plane_state
->src_rect
.height
= state
->src_h
>> 16;
1738 if (plane_state
->src_rect
.height
== 0)
1741 plane_state
->dst_rect
.x
= state
->crtc_x
;
1742 plane_state
->dst_rect
.y
= state
->crtc_y
;
1744 if (state
->crtc_w
== 0)
1747 plane_state
->dst_rect
.width
= state
->crtc_w
;
1749 if (state
->crtc_h
== 0)
1752 plane_state
->dst_rect
.height
= state
->crtc_h
;
1754 plane_state
->clip_rect
= plane_state
->dst_rect
;
1756 switch (state
->rotation
& DRM_MODE_ROTATE_MASK
) {
1757 case DRM_MODE_ROTATE_0
:
1758 plane_state
->rotation
= ROTATION_ANGLE_0
;
1760 case DRM_MODE_ROTATE_90
:
1761 plane_state
->rotation
= ROTATION_ANGLE_90
;
1763 case DRM_MODE_ROTATE_180
:
1764 plane_state
->rotation
= ROTATION_ANGLE_180
;
1766 case DRM_MODE_ROTATE_270
:
1767 plane_state
->rotation
= ROTATION_ANGLE_270
;
1770 plane_state
->rotation
= ROTATION_ANGLE_0
;
1776 static int get_fb_info(const struct amdgpu_framebuffer
*amdgpu_fb
,
1777 uint64_t *tiling_flags
,
1778 uint64_t *fb_location
)
1780 struct amdgpu_bo
*rbo
= gem_to_amdgpu_bo(amdgpu_fb
->obj
);
1781 int r
= amdgpu_bo_reserve(rbo
, false);
1784 // Don't show error msg. when return -ERESTARTSYS
1785 if (r
!= -ERESTARTSYS
)
1786 DRM_ERROR("Unable to reserve buffer: %d\n", r
);
1791 *fb_location
= amdgpu_bo_gpu_offset(rbo
);
1794 amdgpu_bo_get_tiling_flags(rbo
, tiling_flags
);
1796 amdgpu_bo_unreserve(rbo
);
1801 static int fill_plane_attributes_from_fb(struct amdgpu_device
*adev
,
1802 struct dc_plane_state
*plane_state
,
1803 const struct amdgpu_framebuffer
*amdgpu_fb
,
1806 uint64_t tiling_flags
;
1807 uint64_t fb_location
= 0;
1808 uint64_t chroma_addr
= 0;
1809 unsigned int awidth
;
1810 const struct drm_framebuffer
*fb
= &amdgpu_fb
->base
;
1812 struct drm_format_name_buf format_name
;
1817 addReq
== true ? &fb_location
:NULL
);
1822 switch (fb
->format
->format
) {
1824 plane_state
->format
= SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS
;
1826 case DRM_FORMAT_RGB565
:
1827 plane_state
->format
= SURFACE_PIXEL_FORMAT_GRPH_RGB565
;
1829 case DRM_FORMAT_XRGB8888
:
1830 case DRM_FORMAT_ARGB8888
:
1831 plane_state
->format
= SURFACE_PIXEL_FORMAT_GRPH_ARGB8888
;
1833 case DRM_FORMAT_XRGB2101010
:
1834 case DRM_FORMAT_ARGB2101010
:
1835 plane_state
->format
= SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010
;
1837 case DRM_FORMAT_XBGR2101010
:
1838 case DRM_FORMAT_ABGR2101010
:
1839 plane_state
->format
= SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010
;
1841 case DRM_FORMAT_NV21
:
1842 plane_state
->format
= SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr
;
1844 case DRM_FORMAT_NV12
:
1845 plane_state
->format
= SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb
;
1848 DRM_ERROR("Unsupported screen format %s\n",
1849 drm_get_format_name(fb
->format
->format
, &format_name
));
1853 if (plane_state
->format
< SURFACE_PIXEL_FORMAT_VIDEO_BEGIN
) {
1854 plane_state
->address
.type
= PLN_ADDR_TYPE_GRAPHICS
;
1855 plane_state
->address
.grph
.addr
.low_part
= lower_32_bits(fb_location
);
1856 plane_state
->address
.grph
.addr
.high_part
= upper_32_bits(fb_location
);
1857 plane_state
->plane_size
.grph
.surface_size
.x
= 0;
1858 plane_state
->plane_size
.grph
.surface_size
.y
= 0;
1859 plane_state
->plane_size
.grph
.surface_size
.width
= fb
->width
;
1860 plane_state
->plane_size
.grph
.surface_size
.height
= fb
->height
;
1861 plane_state
->plane_size
.grph
.surface_pitch
=
1862 fb
->pitches
[0] / fb
->format
->cpp
[0];
1863 /* TODO: unhardcode */
1864 plane_state
->color_space
= COLOR_SPACE_SRGB
;
1867 awidth
= ALIGN(fb
->width
, 64);
1868 plane_state
->address
.type
= PLN_ADDR_TYPE_VIDEO_PROGRESSIVE
;
1869 plane_state
->address
.video_progressive
.luma_addr
.low_part
1870 = lower_32_bits(fb_location
);
1871 plane_state
->address
.video_progressive
.luma_addr
.high_part
1872 = upper_32_bits(fb_location
);
1873 chroma_addr
= fb_location
+ (u64
)(awidth
* fb
->height
);
1874 plane_state
->address
.video_progressive
.chroma_addr
.low_part
1875 = lower_32_bits(chroma_addr
);
1876 plane_state
->address
.video_progressive
.chroma_addr
.high_part
1877 = upper_32_bits(chroma_addr
);
1878 plane_state
->plane_size
.video
.luma_size
.x
= 0;
1879 plane_state
->plane_size
.video
.luma_size
.y
= 0;
1880 plane_state
->plane_size
.video
.luma_size
.width
= awidth
;
1881 plane_state
->plane_size
.video
.luma_size
.height
= fb
->height
;
1882 /* TODO: unhardcode */
1883 plane_state
->plane_size
.video
.luma_pitch
= awidth
;
1885 plane_state
->plane_size
.video
.chroma_size
.x
= 0;
1886 plane_state
->plane_size
.video
.chroma_size
.y
= 0;
1887 plane_state
->plane_size
.video
.chroma_size
.width
= awidth
;
1888 plane_state
->plane_size
.video
.chroma_size
.height
= fb
->height
;
1889 plane_state
->plane_size
.video
.chroma_pitch
= awidth
/ 2;
1891 /* TODO: unhardcode */
1892 plane_state
->color_space
= COLOR_SPACE_YCBCR709
;
1895 memset(&plane_state
->tiling_info
, 0, sizeof(plane_state
->tiling_info
));
1897 /* Fill GFX8 params */
1898 if (AMDGPU_TILING_GET(tiling_flags
, ARRAY_MODE
) == DC_ARRAY_2D_TILED_THIN1
) {
1899 unsigned int bankw
, bankh
, mtaspect
, tile_split
, num_banks
;
1901 bankw
= AMDGPU_TILING_GET(tiling_flags
, BANK_WIDTH
);
1902 bankh
= AMDGPU_TILING_GET(tiling_flags
, BANK_HEIGHT
);
1903 mtaspect
= AMDGPU_TILING_GET(tiling_flags
, MACRO_TILE_ASPECT
);
1904 tile_split
= AMDGPU_TILING_GET(tiling_flags
, TILE_SPLIT
);
1905 num_banks
= AMDGPU_TILING_GET(tiling_flags
, NUM_BANKS
);
1907 /* XXX fix me for VI */
1908 plane_state
->tiling_info
.gfx8
.num_banks
= num_banks
;
1909 plane_state
->tiling_info
.gfx8
.array_mode
=
1910 DC_ARRAY_2D_TILED_THIN1
;
1911 plane_state
->tiling_info
.gfx8
.tile_split
= tile_split
;
1912 plane_state
->tiling_info
.gfx8
.bank_width
= bankw
;
1913 plane_state
->tiling_info
.gfx8
.bank_height
= bankh
;
1914 plane_state
->tiling_info
.gfx8
.tile_aspect
= mtaspect
;
1915 plane_state
->tiling_info
.gfx8
.tile_mode
=
1916 DC_ADDR_SURF_MICRO_TILING_DISPLAY
;
1917 } else if (AMDGPU_TILING_GET(tiling_flags
, ARRAY_MODE
)
1918 == DC_ARRAY_1D_TILED_THIN1
) {
1919 plane_state
->tiling_info
.gfx8
.array_mode
= DC_ARRAY_1D_TILED_THIN1
;
1922 plane_state
->tiling_info
.gfx8
.pipe_config
=
1923 AMDGPU_TILING_GET(tiling_flags
, PIPE_CONFIG
);
1925 if (adev
->asic_type
== CHIP_VEGA10
||
1926 adev
->asic_type
== CHIP_RAVEN
) {
1927 /* Fill GFX9 params */
1928 plane_state
->tiling_info
.gfx9
.num_pipes
=
1929 adev
->gfx
.config
.gb_addr_config_fields
.num_pipes
;
1930 plane_state
->tiling_info
.gfx9
.num_banks
=
1931 adev
->gfx
.config
.gb_addr_config_fields
.num_banks
;
1932 plane_state
->tiling_info
.gfx9
.pipe_interleave
=
1933 adev
->gfx
.config
.gb_addr_config_fields
.pipe_interleave_size
;
1934 plane_state
->tiling_info
.gfx9
.num_shader_engines
=
1935 adev
->gfx
.config
.gb_addr_config_fields
.num_se
;
1936 plane_state
->tiling_info
.gfx9
.max_compressed_frags
=
1937 adev
->gfx
.config
.gb_addr_config_fields
.max_compress_frags
;
1938 plane_state
->tiling_info
.gfx9
.num_rb_per_se
=
1939 adev
->gfx
.config
.gb_addr_config_fields
.num_rb_per_se
;
1940 plane_state
->tiling_info
.gfx9
.swizzle
=
1941 AMDGPU_TILING_GET(tiling_flags
, SWIZZLE_MODE
);
1942 plane_state
->tiling_info
.gfx9
.shaderEnable
= 1;
1945 plane_state
->visible
= true;
1946 plane_state
->scaling_quality
.h_taps_c
= 0;
1947 plane_state
->scaling_quality
.v_taps_c
= 0;
1949 /* is this needed? is plane_state zeroed at allocation? */
1950 plane_state
->scaling_quality
.h_taps
= 0;
1951 plane_state
->scaling_quality
.v_taps
= 0;
1952 plane_state
->stereo_format
= PLANE_STEREO_FORMAT_NONE
;
1958 static void fill_gamma_from_crtc_state(const struct drm_crtc_state
*crtc_state
,
1959 struct dc_plane_state
*plane_state
)
1962 struct dc_gamma
*gamma
;
1963 struct drm_color_lut
*lut
=
1964 (struct drm_color_lut
*) crtc_state
->gamma_lut
->data
;
1966 gamma
= dc_create_gamma();
1968 if (gamma
== NULL
) {
1973 gamma
->type
= GAMMA_RGB_256
;
1974 gamma
->num_entries
= GAMMA_RGB_256_ENTRIES
;
1975 for (i
= 0; i
< GAMMA_RGB_256_ENTRIES
; i
++) {
1976 gamma
->entries
.red
[i
] = dal_fixed31_32_from_int(lut
[i
].red
);
1977 gamma
->entries
.green
[i
] = dal_fixed31_32_from_int(lut
[i
].green
);
1978 gamma
->entries
.blue
[i
] = dal_fixed31_32_from_int(lut
[i
].blue
);
1981 plane_state
->gamma_correction
= gamma
;
1984 static int fill_plane_attributes(struct amdgpu_device
*adev
,
1985 struct dc_plane_state
*dc_plane_state
,
1986 struct drm_plane_state
*plane_state
,
1987 struct drm_crtc_state
*crtc_state
,
1990 const struct amdgpu_framebuffer
*amdgpu_fb
=
1991 to_amdgpu_framebuffer(plane_state
->fb
);
1992 const struct drm_crtc
*crtc
= plane_state
->crtc
;
1993 struct dc_transfer_func
*input_tf
;
1996 if (!fill_rects_from_plane_state(plane_state
, dc_plane_state
))
1999 ret
= fill_plane_attributes_from_fb(
2000 crtc
->dev
->dev_private
,
2008 input_tf
= dc_create_transfer_func();
2010 if (input_tf
== NULL
)
2013 input_tf
->type
= TF_TYPE_PREDEFINED
;
2014 input_tf
->tf
= TRANSFER_FUNCTION_SRGB
;
2016 dc_plane_state
->in_transfer_func
= input_tf
;
2018 /* In case of gamma set, update gamma value */
2019 if (crtc_state
->gamma_lut
)
2020 fill_gamma_from_crtc_state(crtc_state
, dc_plane_state
);
2025 /*****************************************************************************/
2027 static void update_stream_scaling_settings(const struct drm_display_mode
*mode
,
2028 const struct dm_connector_state
*dm_state
,
2029 struct dc_stream_state
*stream
)
2031 enum amdgpu_rmx_type rmx_type
;
2033 struct rect src
= { 0 }; /* viewport in composition space*/
2034 struct rect dst
= { 0 }; /* stream addressable area */
2036 /* no mode. nothing to be done */
2040 /* Full screen scaling by default */
2041 src
.width
= mode
->hdisplay
;
2042 src
.height
= mode
->vdisplay
;
2043 dst
.width
= stream
->timing
.h_addressable
;
2044 dst
.height
= stream
->timing
.v_addressable
;
2046 rmx_type
= dm_state
->scaling
;
2047 if (rmx_type
== RMX_ASPECT
|| rmx_type
== RMX_OFF
) {
2048 if (src
.width
* dst
.height
<
2049 src
.height
* dst
.width
) {
2050 /* height needs less upscaling/more downscaling */
2051 dst
.width
= src
.width
*
2052 dst
.height
/ src
.height
;
2054 /* width needs less upscaling/more downscaling */
2055 dst
.height
= src
.height
*
2056 dst
.width
/ src
.width
;
2058 } else if (rmx_type
== RMX_CENTER
) {
2062 dst
.x
= (stream
->timing
.h_addressable
- dst
.width
) / 2;
2063 dst
.y
= (stream
->timing
.v_addressable
- dst
.height
) / 2;
2065 if (dm_state
->underscan_enable
) {
2066 dst
.x
+= dm_state
->underscan_hborder
/ 2;
2067 dst
.y
+= dm_state
->underscan_vborder
/ 2;
2068 dst
.width
-= dm_state
->underscan_hborder
;
2069 dst
.height
-= dm_state
->underscan_vborder
;
2075 DRM_DEBUG_DRIVER("Destination Rectangle x:%d y:%d width:%d height:%d\n",
2076 dst
.x
, dst
.y
, dst
.width
, dst
.height
);
2080 static enum dc_color_depth
2081 convert_color_depth_from_display_info(const struct drm_connector
*connector
)
2083 uint32_t bpc
= connector
->display_info
.bpc
;
2085 /* Limited color depth to 8bit
2086 * TODO: Still need to handle deep color
2093 /* Temporary Work around, DRM don't parse color depth for
2094 * EDID revision before 1.4
2095 * TODO: Fix edid parsing
2097 return COLOR_DEPTH_888
;
2099 return COLOR_DEPTH_666
;
2101 return COLOR_DEPTH_888
;
2103 return COLOR_DEPTH_101010
;
2105 return COLOR_DEPTH_121212
;
2107 return COLOR_DEPTH_141414
;
2109 return COLOR_DEPTH_161616
;
2111 return COLOR_DEPTH_UNDEFINED
;
2115 static enum dc_aspect_ratio
2116 get_aspect_ratio(const struct drm_display_mode
*mode_in
)
2118 int32_t width
= mode_in
->crtc_hdisplay
* 9;
2119 int32_t height
= mode_in
->crtc_vdisplay
* 16;
2121 if ((width
- height
) < 10 && (width
- height
) > -10)
2122 return ASPECT_RATIO_16_9
;
2124 return ASPECT_RATIO_4_3
;
2127 static enum dc_color_space
2128 get_output_color_space(const struct dc_crtc_timing
*dc_crtc_timing
)
2130 enum dc_color_space color_space
= COLOR_SPACE_SRGB
;
2132 switch (dc_crtc_timing
->pixel_encoding
) {
2133 case PIXEL_ENCODING_YCBCR422
:
2134 case PIXEL_ENCODING_YCBCR444
:
2135 case PIXEL_ENCODING_YCBCR420
:
2138 * 27030khz is the separation point between HDTV and SDTV
2139 * according to HDMI spec, we use YCbCr709 and YCbCr601
2142 if (dc_crtc_timing
->pix_clk_khz
> 27030) {
2143 if (dc_crtc_timing
->flags
.Y_ONLY
)
2145 COLOR_SPACE_YCBCR709_LIMITED
;
2147 color_space
= COLOR_SPACE_YCBCR709
;
2149 if (dc_crtc_timing
->flags
.Y_ONLY
)
2151 COLOR_SPACE_YCBCR601_LIMITED
;
2153 color_space
= COLOR_SPACE_YCBCR601
;
2158 case PIXEL_ENCODING_RGB
:
2159 color_space
= COLOR_SPACE_SRGB
;
2170 /*****************************************************************************/
2173 fill_stream_properties_from_drm_display_mode(struct dc_stream_state
*stream
,
2174 const struct drm_display_mode
*mode_in
,
2175 const struct drm_connector
*connector
)
2177 struct dc_crtc_timing
*timing_out
= &stream
->timing
;
2179 memset(timing_out
, 0, sizeof(struct dc_crtc_timing
));
2181 timing_out
->h_border_left
= 0;
2182 timing_out
->h_border_right
= 0;
2183 timing_out
->v_border_top
= 0;
2184 timing_out
->v_border_bottom
= 0;
2185 /* TODO: un-hardcode */
2187 if ((connector
->display_info
.color_formats
& DRM_COLOR_FORMAT_YCRCB444
)
2188 && stream
->sink
->sink_signal
== SIGNAL_TYPE_HDMI_TYPE_A
)
2189 timing_out
->pixel_encoding
= PIXEL_ENCODING_YCBCR444
;
2191 timing_out
->pixel_encoding
= PIXEL_ENCODING_RGB
;
2193 timing_out
->timing_3d_format
= TIMING_3D_FORMAT_NONE
;
2194 timing_out
->display_color_depth
= convert_color_depth_from_display_info(
2196 timing_out
->scan_type
= SCANNING_TYPE_NODATA
;
2197 timing_out
->hdmi_vic
= 0;
2198 timing_out
->vic
= drm_match_cea_mode(mode_in
);
2200 timing_out
->h_addressable
= mode_in
->crtc_hdisplay
;
2201 timing_out
->h_total
= mode_in
->crtc_htotal
;
2202 timing_out
->h_sync_width
=
2203 mode_in
->crtc_hsync_end
- mode_in
->crtc_hsync_start
;
2204 timing_out
->h_front_porch
=
2205 mode_in
->crtc_hsync_start
- mode_in
->crtc_hdisplay
;
2206 timing_out
->v_total
= mode_in
->crtc_vtotal
;
2207 timing_out
->v_addressable
= mode_in
->crtc_vdisplay
;
2208 timing_out
->v_front_porch
=
2209 mode_in
->crtc_vsync_start
- mode_in
->crtc_vdisplay
;
2210 timing_out
->v_sync_width
=
2211 mode_in
->crtc_vsync_end
- mode_in
->crtc_vsync_start
;
2212 timing_out
->pix_clk_khz
= mode_in
->crtc_clock
;
2213 timing_out
->aspect_ratio
= get_aspect_ratio(mode_in
);
2214 if (mode_in
->flags
& DRM_MODE_FLAG_PHSYNC
)
2215 timing_out
->flags
.HSYNC_POSITIVE_POLARITY
= 1;
2216 if (mode_in
->flags
& DRM_MODE_FLAG_PVSYNC
)
2217 timing_out
->flags
.VSYNC_POSITIVE_POLARITY
= 1;
2219 stream
->output_color_space
= get_output_color_space(timing_out
);
2222 struct dc_transfer_func
*tf
= dc_create_transfer_func();
2224 tf
->type
= TF_TYPE_PREDEFINED
;
2225 tf
->tf
= TRANSFER_FUNCTION_SRGB
;
2226 stream
->out_transfer_func
= tf
;
2230 static void fill_audio_info(struct audio_info
*audio_info
,
2231 const struct drm_connector
*drm_connector
,
2232 const struct dc_sink
*dc_sink
)
2235 int cea_revision
= 0;
2236 const struct dc_edid_caps
*edid_caps
= &dc_sink
->edid_caps
;
2238 audio_info
->manufacture_id
= edid_caps
->manufacturer_id
;
2239 audio_info
->product_id
= edid_caps
->product_id
;
2241 cea_revision
= drm_connector
->display_info
.cea_rev
;
2243 strncpy(audio_info
->display_name
,
2244 edid_caps
->display_name
,
2245 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS
- 1);
2247 if (cea_revision
>= 3) {
2248 audio_info
->mode_count
= edid_caps
->audio_mode_count
;
2250 for (i
= 0; i
< audio_info
->mode_count
; ++i
) {
2251 audio_info
->modes
[i
].format_code
=
2252 (enum audio_format_code
)
2253 (edid_caps
->audio_modes
[i
].format_code
);
2254 audio_info
->modes
[i
].channel_count
=
2255 edid_caps
->audio_modes
[i
].channel_count
;
2256 audio_info
->modes
[i
].sample_rates
.all
=
2257 edid_caps
->audio_modes
[i
].sample_rate
;
2258 audio_info
->modes
[i
].sample_size
=
2259 edid_caps
->audio_modes
[i
].sample_size
;
2263 audio_info
->flags
.all
= edid_caps
->speaker_flags
;
2265 /* TODO: We only check for the progressive mode, check for interlace mode too */
2266 if (drm_connector
->latency_present
[0]) {
2267 audio_info
->video_latency
= drm_connector
->video_latency
[0];
2268 audio_info
->audio_latency
= drm_connector
->audio_latency
[0];
2271 /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
2276 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode
*src_mode
,
2277 struct drm_display_mode
*dst_mode
)
2279 dst_mode
->crtc_hdisplay
= src_mode
->crtc_hdisplay
;
2280 dst_mode
->crtc_vdisplay
= src_mode
->crtc_vdisplay
;
2281 dst_mode
->crtc_clock
= src_mode
->crtc_clock
;
2282 dst_mode
->crtc_hblank_start
= src_mode
->crtc_hblank_start
;
2283 dst_mode
->crtc_hblank_end
= src_mode
->crtc_hblank_end
;
2284 dst_mode
->crtc_hsync_start
= src_mode
->crtc_hsync_start
;
2285 dst_mode
->crtc_hsync_end
= src_mode
->crtc_hsync_end
;
2286 dst_mode
->crtc_htotal
= src_mode
->crtc_htotal
;
2287 dst_mode
->crtc_hskew
= src_mode
->crtc_hskew
;
2288 dst_mode
->crtc_vblank_start
= src_mode
->crtc_vblank_start
;
2289 dst_mode
->crtc_vblank_end
= src_mode
->crtc_vblank_end
;
2290 dst_mode
->crtc_vsync_start
= src_mode
->crtc_vsync_start
;
2291 dst_mode
->crtc_vsync_end
= src_mode
->crtc_vsync_end
;
2292 dst_mode
->crtc_vtotal
= src_mode
->crtc_vtotal
;
2296 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode
*drm_mode
,
2297 const struct drm_display_mode
*native_mode
,
2300 if (scale_enabled
) {
2301 copy_crtc_timing_for_drm_display_mode(native_mode
, drm_mode
);
2302 } else if (native_mode
->clock
== drm_mode
->clock
&&
2303 native_mode
->htotal
== drm_mode
->htotal
&&
2304 native_mode
->vtotal
== drm_mode
->vtotal
) {
2305 copy_crtc_timing_for_drm_display_mode(native_mode
, drm_mode
);
2307 /* no scaling nor amdgpu inserted, no need to patch */
2311 static int create_fake_sink(struct amdgpu_dm_connector
*aconnector
)
2313 struct dc_sink
*sink
= NULL
;
2314 struct dc_sink_init_data sink_init_data
= { 0 };
2316 sink_init_data
.link
= aconnector
->dc_link
;
2317 sink_init_data
.sink_signal
= aconnector
->dc_link
->connector_signal
;
2319 sink
= dc_sink_create(&sink_init_data
);
2321 DRM_ERROR("Failed to create sink!\n");
2325 sink
->sink_signal
= SIGNAL_TYPE_VIRTUAL
;
2326 aconnector
->fake_enable
= true;
2328 aconnector
->dc_sink
= sink
;
2329 aconnector
->dc_link
->local_sink
= sink
;
2334 static struct dc_stream_state
*
2335 create_stream_for_sink(struct amdgpu_dm_connector
*aconnector
,
2336 const struct drm_display_mode
*drm_mode
,
2337 const struct dm_connector_state
*dm_state
)
2339 struct drm_display_mode
*preferred_mode
= NULL
;
2340 struct drm_connector
*drm_connector
;
2341 struct dc_stream_state
*stream
= NULL
;
2342 struct drm_display_mode mode
= *drm_mode
;
2343 bool native_mode_found
= false;
2345 if (aconnector
== NULL
) {
2346 DRM_ERROR("aconnector is NULL!\n");
2347 goto drm_connector_null
;
2350 if (dm_state
== NULL
) {
2351 DRM_ERROR("dm_state is NULL!\n");
2355 drm_connector
= &aconnector
->base
;
2357 if (!aconnector
->dc_sink
) {
2359 * Create dc_sink when necessary to MST
2360 * Don't apply fake_sink to MST
2362 if (aconnector
->mst_port
) {
2363 dm_dp_mst_dc_sink_create(drm_connector
);
2364 goto mst_dc_sink_create_done
;
2367 if (create_fake_sink(aconnector
))
2368 goto stream_create_fail
;
2371 stream
= dc_create_stream_for_sink(aconnector
->dc_sink
);
2373 if (stream
== NULL
) {
2374 DRM_ERROR("Failed to create stream for sink!\n");
2375 goto stream_create_fail
;
2378 list_for_each_entry(preferred_mode
, &aconnector
->base
.modes
, head
) {
2379 /* Search for preferred mode */
2380 if (preferred_mode
->type
& DRM_MODE_TYPE_PREFERRED
) {
2381 native_mode_found
= true;
2385 if (!native_mode_found
)
2386 preferred_mode
= list_first_entry_or_null(
2387 &aconnector
->base
.modes
,
2388 struct drm_display_mode
,
2391 if (preferred_mode
== NULL
) {
2392 /* This may not be an error, the use case is when we we have no
2393 * usermode calls to reset and set mode upon hotplug. In this
2394 * case, we call set mode ourselves to restore the previous mode
2395 * and the modelist may not be filled in in time.
2397 DRM_DEBUG_DRIVER("No preferred mode found\n");
2399 decide_crtc_timing_for_drm_display_mode(
2400 &mode
, preferred_mode
,
2401 dm_state
->scaling
!= RMX_OFF
);
2404 fill_stream_properties_from_drm_display_mode(stream
,
2405 &mode
, &aconnector
->base
);
2406 update_stream_scaling_settings(&mode
, dm_state
, stream
);
2409 &stream
->audio_info
,
2411 aconnector
->dc_sink
);
2416 mst_dc_sink_create_done
:
2420 static void amdgpu_dm_crtc_destroy(struct drm_crtc
*crtc
)
2422 drm_crtc_cleanup(crtc
);
2426 static void dm_crtc_destroy_state(struct drm_crtc
*crtc
,
2427 struct drm_crtc_state
*state
)
2429 struct dm_crtc_state
*cur
= to_dm_crtc_state(state
);
2431 /* TODO Destroy dc_stream objects are stream object is flattened */
2433 dc_stream_release(cur
->stream
);
2436 __drm_atomic_helper_crtc_destroy_state(state
);
2442 static void dm_crtc_reset_state(struct drm_crtc
*crtc
)
2444 struct dm_crtc_state
*state
;
2447 dm_crtc_destroy_state(crtc
, crtc
->state
);
2449 state
= kzalloc(sizeof(*state
), GFP_KERNEL
);
2450 if (WARN_ON(!state
))
2453 crtc
->state
= &state
->base
;
2454 crtc
->state
->crtc
= crtc
;
2458 static struct drm_crtc_state
*
2459 dm_crtc_duplicate_state(struct drm_crtc
*crtc
)
2461 struct dm_crtc_state
*state
, *cur
;
2463 cur
= to_dm_crtc_state(crtc
->state
);
2465 if (WARN_ON(!crtc
->state
))
2468 state
= kzalloc(sizeof(*state
), GFP_KERNEL
);
2472 __drm_atomic_helper_crtc_duplicate_state(crtc
, &state
->base
);
2475 state
->stream
= cur
->stream
;
2476 dc_stream_retain(state
->stream
);
2479 /* TODO Duplicate dc_stream after objects are stream object is flattened */
2481 return &state
->base
;
2484 /* Implemented only the options currently availible for the driver */
2485 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs
= {
2486 .reset
= dm_crtc_reset_state
,
2487 .destroy
= amdgpu_dm_crtc_destroy
,
2488 .gamma_set
= drm_atomic_helper_legacy_gamma_set
,
2489 .set_config
= drm_atomic_helper_set_config
,
2490 .page_flip
= drm_atomic_helper_page_flip
,
2491 .atomic_duplicate_state
= dm_crtc_duplicate_state
,
2492 .atomic_destroy_state
= dm_crtc_destroy_state
,
2495 static enum drm_connector_status
2496 amdgpu_dm_connector_detect(struct drm_connector
*connector
, bool force
)
2499 struct amdgpu_dm_connector
*aconnector
= to_amdgpu_dm_connector(connector
);
2502 * 1. This interface is NOT called in context of HPD irq.
2503 * 2. This interface *is called* in context of user-mode ioctl. Which
2504 * makes it a bad place for *any* MST-related activit. */
2506 if (aconnector
->base
.force
== DRM_FORCE_UNSPECIFIED
&&
2507 !aconnector
->fake_enable
)
2508 connected
= (aconnector
->dc_sink
!= NULL
);
2510 connected
= (aconnector
->base
.force
== DRM_FORCE_ON
);
2512 return (connected
? connector_status_connected
:
2513 connector_status_disconnected
);
2516 int amdgpu_dm_connector_atomic_set_property(struct drm_connector
*connector
,
2517 struct drm_connector_state
*connector_state
,
2518 struct drm_property
*property
,
2521 struct drm_device
*dev
= connector
->dev
;
2522 struct amdgpu_device
*adev
= dev
->dev_private
;
2523 struct dm_connector_state
*dm_old_state
=
2524 to_dm_connector_state(connector
->state
);
2525 struct dm_connector_state
*dm_new_state
=
2526 to_dm_connector_state(connector_state
);
2530 if (property
== dev
->mode_config
.scaling_mode_property
) {
2531 enum amdgpu_rmx_type rmx_type
;
2534 case DRM_MODE_SCALE_CENTER
:
2535 rmx_type
= RMX_CENTER
;
2537 case DRM_MODE_SCALE_ASPECT
:
2538 rmx_type
= RMX_ASPECT
;
2540 case DRM_MODE_SCALE_FULLSCREEN
:
2541 rmx_type
= RMX_FULL
;
2543 case DRM_MODE_SCALE_NONE
:
2549 if (dm_old_state
->scaling
== rmx_type
)
2552 dm_new_state
->scaling
= rmx_type
;
2554 } else if (property
== adev
->mode_info
.underscan_hborder_property
) {
2555 dm_new_state
->underscan_hborder
= val
;
2557 } else if (property
== adev
->mode_info
.underscan_vborder_property
) {
2558 dm_new_state
->underscan_vborder
= val
;
2560 } else if (property
== adev
->mode_info
.underscan_property
) {
2561 dm_new_state
->underscan_enable
= val
;
2568 int amdgpu_dm_connector_atomic_get_property(struct drm_connector
*connector
,
2569 const struct drm_connector_state
*state
,
2570 struct drm_property
*property
,
2573 struct drm_device
*dev
= connector
->dev
;
2574 struct amdgpu_device
*adev
= dev
->dev_private
;
2575 struct dm_connector_state
*dm_state
=
2576 to_dm_connector_state(state
);
2579 if (property
== dev
->mode_config
.scaling_mode_property
) {
2580 switch (dm_state
->scaling
) {
2582 *val
= DRM_MODE_SCALE_CENTER
;
2585 *val
= DRM_MODE_SCALE_ASPECT
;
2588 *val
= DRM_MODE_SCALE_FULLSCREEN
;
2592 *val
= DRM_MODE_SCALE_NONE
;
2596 } else if (property
== adev
->mode_info
.underscan_hborder_property
) {
2597 *val
= dm_state
->underscan_hborder
;
2599 } else if (property
== adev
->mode_info
.underscan_vborder_property
) {
2600 *val
= dm_state
->underscan_vborder
;
2602 } else if (property
== adev
->mode_info
.underscan_property
) {
2603 *val
= dm_state
->underscan_enable
;
2609 static void amdgpu_dm_connector_destroy(struct drm_connector
*connector
)
2611 struct amdgpu_dm_connector
*aconnector
= to_amdgpu_dm_connector(connector
);
2612 const struct dc_link
*link
= aconnector
->dc_link
;
2613 struct amdgpu_device
*adev
= connector
->dev
->dev_private
;
2614 struct amdgpu_display_manager
*dm
= &adev
->dm
;
2615 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
2616 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
2618 if (link
->connector_signal
& (SIGNAL_TYPE_EDP
| SIGNAL_TYPE_LVDS
)) {
2619 amdgpu_dm_register_backlight_device(dm
);
2621 if (dm
->backlight_dev
) {
2622 backlight_device_unregister(dm
->backlight_dev
);
2623 dm
->backlight_dev
= NULL
;
2628 drm_connector_unregister(connector
);
2629 drm_connector_cleanup(connector
);
2633 void amdgpu_dm_connector_funcs_reset(struct drm_connector
*connector
)
2635 struct dm_connector_state
*state
=
2636 to_dm_connector_state(connector
->state
);
2640 state
= kzalloc(sizeof(*state
), GFP_KERNEL
);
2643 state
->scaling
= RMX_OFF
;
2644 state
->underscan_enable
= false;
2645 state
->underscan_hborder
= 0;
2646 state
->underscan_vborder
= 0;
2648 connector
->state
= &state
->base
;
2649 connector
->state
->connector
= connector
;
2653 struct drm_connector_state
*
2654 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector
*connector
)
2656 struct dm_connector_state
*state
=
2657 to_dm_connector_state(connector
->state
);
2659 struct dm_connector_state
*new_state
=
2660 kmemdup(state
, sizeof(*state
), GFP_KERNEL
);
2663 __drm_atomic_helper_connector_duplicate_state(connector
,
2665 return &new_state
->base
;
2671 static const struct drm_connector_funcs amdgpu_dm_connector_funcs
= {
2672 .reset
= amdgpu_dm_connector_funcs_reset
,
2673 .detect
= amdgpu_dm_connector_detect
,
2674 .fill_modes
= drm_helper_probe_single_connector_modes
,
2675 .destroy
= amdgpu_dm_connector_destroy
,
2676 .atomic_duplicate_state
= amdgpu_dm_connector_atomic_duplicate_state
,
2677 .atomic_destroy_state
= drm_atomic_helper_connector_destroy_state
,
2678 .atomic_set_property
= amdgpu_dm_connector_atomic_set_property
,
2679 .atomic_get_property
= amdgpu_dm_connector_atomic_get_property
2682 static struct drm_encoder
*best_encoder(struct drm_connector
*connector
)
2684 int enc_id
= connector
->encoder_ids
[0];
2685 struct drm_mode_object
*obj
;
2686 struct drm_encoder
*encoder
;
2688 DRM_DEBUG_DRIVER("Finding the best encoder\n");
2690 /* pick the encoder ids */
2692 obj
= drm_mode_object_find(connector
->dev
, NULL
, enc_id
, DRM_MODE_OBJECT_ENCODER
);
2694 DRM_ERROR("Couldn't find a matching encoder for our connector\n");
2697 encoder
= obj_to_encoder(obj
);
2700 DRM_ERROR("No encoder id\n");
2704 static int get_modes(struct drm_connector
*connector
)
2706 return amdgpu_dm_connector_get_modes(connector
);
2709 static void create_eml_sink(struct amdgpu_dm_connector
*aconnector
)
2711 struct dc_sink_init_data init_params
= {
2712 .link
= aconnector
->dc_link
,
2713 .sink_signal
= SIGNAL_TYPE_VIRTUAL
2717 if (!aconnector
->base
.edid_blob_ptr
||
2718 !aconnector
->base
.edid_blob_ptr
->data
) {
2719 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
2720 aconnector
->base
.name
);
2722 aconnector
->base
.force
= DRM_FORCE_OFF
;
2723 aconnector
->base
.override_edid
= false;
2727 edid
= (struct edid
*) aconnector
->base
.edid_blob_ptr
->data
;
2729 aconnector
->edid
= edid
;
2731 aconnector
->dc_em_sink
= dc_link_add_remote_sink(
2732 aconnector
->dc_link
,
2734 (edid
->extensions
+ 1) * EDID_LENGTH
,
2737 if (aconnector
->base
.force
== DRM_FORCE_ON
)
2738 aconnector
->dc_sink
= aconnector
->dc_link
->local_sink
?
2739 aconnector
->dc_link
->local_sink
:
2740 aconnector
->dc_em_sink
;
2743 static void handle_edid_mgmt(struct amdgpu_dm_connector
*aconnector
)
2745 struct dc_link
*link
= (struct dc_link
*)aconnector
->dc_link
;
2747 /* In case of headless boot with force on for DP managed connector
2748 * Those settings have to be != 0 to get initial modeset
2750 if (link
->connector_signal
== SIGNAL_TYPE_DISPLAY_PORT
) {
2751 link
->verified_link_cap
.lane_count
= LANE_COUNT_FOUR
;
2752 link
->verified_link_cap
.link_rate
= LINK_RATE_HIGH2
;
2756 aconnector
->base
.override_edid
= true;
2757 create_eml_sink(aconnector
);
2760 int amdgpu_dm_connector_mode_valid(struct drm_connector
*connector
,
2761 struct drm_display_mode
*mode
)
2763 int result
= MODE_ERROR
;
2764 struct dc_sink
*dc_sink
;
2765 struct amdgpu_device
*adev
= connector
->dev
->dev_private
;
2766 /* TODO: Unhardcode stream count */
2767 struct dc_stream_state
*stream
;
2768 struct amdgpu_dm_connector
*aconnector
= to_amdgpu_dm_connector(connector
);
2770 if ((mode
->flags
& DRM_MODE_FLAG_INTERLACE
) ||
2771 (mode
->flags
& DRM_MODE_FLAG_DBLSCAN
))
2774 /* Only run this the first time mode_valid is called to initilialize
2777 if (aconnector
->base
.force
!= DRM_FORCE_UNSPECIFIED
&&
2778 !aconnector
->dc_em_sink
)
2779 handle_edid_mgmt(aconnector
);
2781 dc_sink
= to_amdgpu_dm_connector(connector
)->dc_sink
;
2783 if (dc_sink
== NULL
) {
2784 DRM_ERROR("dc_sink is NULL!\n");
2788 stream
= dc_create_stream_for_sink(dc_sink
);
2789 if (stream
== NULL
) {
2790 DRM_ERROR("Failed to create stream for sink!\n");
2794 drm_mode_set_crtcinfo(mode
, 0);
2795 fill_stream_properties_from_drm_display_mode(stream
, mode
, connector
);
2797 stream
->src
.width
= mode
->hdisplay
;
2798 stream
->src
.height
= mode
->vdisplay
;
2799 stream
->dst
= stream
->src
;
2801 if (dc_validate_stream(adev
->dm
.dc
, stream
) == DC_OK
)
2804 dc_stream_release(stream
);
2807 /* TODO: error handling*/
2811 static const struct drm_connector_helper_funcs
2812 amdgpu_dm_connector_helper_funcs
= {
2814 * If hotplug a second bigger display in FB Con mode, bigger resolution
2815 * modes will be filtered by drm_mode_validate_size(), and those modes
2816 * is missing after user start lightdm. So we need to renew modes list.
2817 * in get_modes call back, not just return the modes count
2819 .get_modes
= get_modes
,
2820 .mode_valid
= amdgpu_dm_connector_mode_valid
,
2821 .best_encoder
= best_encoder
2824 static void dm_crtc_helper_disable(struct drm_crtc
*crtc
)
2828 static int dm_crtc_helper_atomic_check(struct drm_crtc
*crtc
,
2829 struct drm_crtc_state
*state
)
2831 struct amdgpu_device
*adev
= crtc
->dev
->dev_private
;
2832 struct dc
*dc
= adev
->dm
.dc
;
2833 struct dm_crtc_state
*dm_crtc_state
= to_dm_crtc_state(state
);
2836 if (unlikely(!dm_crtc_state
->stream
&&
2837 modeset_required(state
, NULL
, dm_crtc_state
->stream
))) {
2842 /* In some use cases, like reset, no stream is attached */
2843 if (!dm_crtc_state
->stream
)
2846 if (dc_validate_stream(dc
, dm_crtc_state
->stream
) == DC_OK
)
2852 static bool dm_crtc_helper_mode_fixup(struct drm_crtc
*crtc
,
2853 const struct drm_display_mode
*mode
,
2854 struct drm_display_mode
*adjusted_mode
)
2859 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs
= {
2860 .disable
= dm_crtc_helper_disable
,
2861 .atomic_check
= dm_crtc_helper_atomic_check
,
2862 .mode_fixup
= dm_crtc_helper_mode_fixup
2865 static void dm_encoder_helper_disable(struct drm_encoder
*encoder
)
2870 static int dm_encoder_helper_atomic_check(struct drm_encoder
*encoder
,
2871 struct drm_crtc_state
*crtc_state
,
2872 struct drm_connector_state
*conn_state
)
2877 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs
= {
2878 .disable
= dm_encoder_helper_disable
,
2879 .atomic_check
= dm_encoder_helper_atomic_check
2882 static void dm_drm_plane_reset(struct drm_plane
*plane
)
2884 struct dm_plane_state
*amdgpu_state
= NULL
;
2887 plane
->funcs
->atomic_destroy_state(plane
, plane
->state
);
2889 amdgpu_state
= kzalloc(sizeof(*amdgpu_state
), GFP_KERNEL
);
2890 WARN_ON(amdgpu_state
== NULL
);
2893 plane
->state
= &amdgpu_state
->base
;
2894 plane
->state
->plane
= plane
;
2895 plane
->state
->rotation
= DRM_MODE_ROTATE_0
;
2899 static struct drm_plane_state
*
2900 dm_drm_plane_duplicate_state(struct drm_plane
*plane
)
2902 struct dm_plane_state
*dm_plane_state
, *old_dm_plane_state
;
2904 old_dm_plane_state
= to_dm_plane_state(plane
->state
);
2905 dm_plane_state
= kzalloc(sizeof(*dm_plane_state
), GFP_KERNEL
);
2906 if (!dm_plane_state
)
2909 __drm_atomic_helper_plane_duplicate_state(plane
, &dm_plane_state
->base
);
2911 if (old_dm_plane_state
->dc_state
) {
2912 dm_plane_state
->dc_state
= old_dm_plane_state
->dc_state
;
2913 dc_plane_state_retain(dm_plane_state
->dc_state
);
2916 return &dm_plane_state
->base
;
2919 void dm_drm_plane_destroy_state(struct drm_plane
*plane
,
2920 struct drm_plane_state
*state
)
2922 struct dm_plane_state
*dm_plane_state
= to_dm_plane_state(state
);
2924 if (dm_plane_state
->dc_state
)
2925 dc_plane_state_release(dm_plane_state
->dc_state
);
2927 drm_atomic_helper_plane_destroy_state(plane
, state
);
2930 static const struct drm_plane_funcs dm_plane_funcs
= {
2931 .update_plane
= drm_atomic_helper_update_plane
,
2932 .disable_plane
= drm_atomic_helper_disable_plane
,
2933 .destroy
= drm_plane_cleanup
,
2934 .reset
= dm_drm_plane_reset
,
2935 .atomic_duplicate_state
= dm_drm_plane_duplicate_state
,
2936 .atomic_destroy_state
= dm_drm_plane_destroy_state
,
2939 static int dm_plane_helper_prepare_fb(struct drm_plane
*plane
,
2940 struct drm_plane_state
*new_state
)
2942 struct amdgpu_framebuffer
*afb
;
2943 struct drm_gem_object
*obj
;
2944 struct amdgpu_bo
*rbo
;
2945 uint64_t chroma_addr
= 0;
2947 struct dm_plane_state
*dm_plane_state_new
, *dm_plane_state_old
;
2948 unsigned int awidth
;
2950 dm_plane_state_old
= to_dm_plane_state(plane
->state
);
2951 dm_plane_state_new
= to_dm_plane_state(new_state
);
2953 if (!new_state
->fb
) {
2954 DRM_DEBUG_DRIVER("No FB bound\n");
2958 afb
= to_amdgpu_framebuffer(new_state
->fb
);
2961 rbo
= gem_to_amdgpu_bo(obj
);
2962 r
= amdgpu_bo_reserve(rbo
, false);
2963 if (unlikely(r
!= 0))
2966 r
= amdgpu_bo_pin(rbo
, AMDGPU_GEM_DOMAIN_VRAM
, &afb
->address
);
2969 amdgpu_bo_unreserve(rbo
);
2971 if (unlikely(r
!= 0)) {
2972 if (r
!= -ERESTARTSYS
)
2973 DRM_ERROR("Failed to pin framebuffer with error %d\n", r
);
2979 if (dm_plane_state_new
->dc_state
&&
2980 dm_plane_state_old
->dc_state
!= dm_plane_state_new
->dc_state
) {
2981 struct dc_plane_state
*plane_state
= dm_plane_state_new
->dc_state
;
2983 if (plane_state
->format
< SURFACE_PIXEL_FORMAT_VIDEO_BEGIN
) {
2984 plane_state
->address
.grph
.addr
.low_part
= lower_32_bits(afb
->address
);
2985 plane_state
->address
.grph
.addr
.high_part
= upper_32_bits(afb
->address
);
2987 awidth
= ALIGN(new_state
->fb
->width
, 64);
2988 plane_state
->address
.type
= PLN_ADDR_TYPE_VIDEO_PROGRESSIVE
;
2989 plane_state
->address
.video_progressive
.luma_addr
.low_part
2990 = lower_32_bits(afb
->address
);
2991 plane_state
->address
.video_progressive
.luma_addr
.high_part
2992 = upper_32_bits(afb
->address
);
2993 chroma_addr
= afb
->address
+ (u64
)(awidth
* new_state
->fb
->height
);
2994 plane_state
->address
.video_progressive
.chroma_addr
.low_part
2995 = lower_32_bits(chroma_addr
);
2996 plane_state
->address
.video_progressive
.chroma_addr
.high_part
2997 = upper_32_bits(chroma_addr
);
3001 /* It's a hack for s3 since in 4.9 kernel filter out cursor buffer
3002 * prepare and cleanup in drm_atomic_helper_prepare_planes
3003 * and drm_atomic_helper_cleanup_planes because fb doens't in s3.
3004 * IN 4.10 kernel this code should be removed and amdgpu_device_suspend
3005 * code touching fram buffers should be avoided for DC.
3007 if (plane
->type
== DRM_PLANE_TYPE_CURSOR
) {
3008 struct amdgpu_crtc
*acrtc
= to_amdgpu_crtc(new_state
->crtc
);
3010 acrtc
->cursor_bo
= obj
;
3015 static void dm_plane_helper_cleanup_fb(struct drm_plane
*plane
,
3016 struct drm_plane_state
*old_state
)
3018 struct amdgpu_bo
*rbo
;
3019 struct amdgpu_framebuffer
*afb
;
3025 afb
= to_amdgpu_framebuffer(old_state
->fb
);
3026 rbo
= gem_to_amdgpu_bo(afb
->obj
);
3027 r
= amdgpu_bo_reserve(rbo
, false);
3029 DRM_ERROR("failed to reserve rbo before unpin\n");
3033 amdgpu_bo_unpin(rbo
);
3034 amdgpu_bo_unreserve(rbo
);
3035 amdgpu_bo_unref(&rbo
);
3038 static int dm_plane_atomic_check(struct drm_plane
*plane
,
3039 struct drm_plane_state
*state
)
3041 struct amdgpu_device
*adev
= plane
->dev
->dev_private
;
3042 struct dc
*dc
= adev
->dm
.dc
;
3043 struct dm_plane_state
*dm_plane_state
= to_dm_plane_state(state
);
3045 if (!dm_plane_state
->dc_state
)
3048 if (dc_validate_plane(dc
, dm_plane_state
->dc_state
) == DC_OK
)
3054 static const struct drm_plane_helper_funcs dm_plane_helper_funcs
= {
3055 .prepare_fb
= dm_plane_helper_prepare_fb
,
3056 .cleanup_fb
= dm_plane_helper_cleanup_fb
,
3057 .atomic_check
= dm_plane_atomic_check
,
3061 * TODO: these are currently initialized to rgb formats only.
3062 * For future use cases we should either initialize them dynamically based on
3063 * plane capabilities, or initialize this array to all formats, so internal drm
3064 * check will succeed, and let DC to implement proper check
3066 static const uint32_t rgb_formats
[] = {
3068 DRM_FORMAT_XRGB8888
,
3069 DRM_FORMAT_ARGB8888
,
3070 DRM_FORMAT_RGBA8888
,
3071 DRM_FORMAT_XRGB2101010
,
3072 DRM_FORMAT_XBGR2101010
,
3073 DRM_FORMAT_ARGB2101010
,
3074 DRM_FORMAT_ABGR2101010
,
3077 static const uint32_t yuv_formats
[] = {
3082 static const u32 cursor_formats
[] = {
3086 static int amdgpu_dm_plane_init(struct amdgpu_display_manager
*dm
,
3087 struct amdgpu_plane
*aplane
,
3088 unsigned long possible_crtcs
)
3092 switch (aplane
->base
.type
) {
3093 case DRM_PLANE_TYPE_PRIMARY
:
3094 res
= drm_universal_plane_init(
3100 ARRAY_SIZE(rgb_formats
),
3101 NULL
, aplane
->base
.type
, NULL
);
3103 case DRM_PLANE_TYPE_OVERLAY
:
3104 res
= drm_universal_plane_init(
3110 ARRAY_SIZE(yuv_formats
),
3111 NULL
, aplane
->base
.type
, NULL
);
3113 case DRM_PLANE_TYPE_CURSOR
:
3114 res
= drm_universal_plane_init(
3120 ARRAY_SIZE(cursor_formats
),
3121 NULL
, aplane
->base
.type
, NULL
);
3125 drm_plane_helper_add(&aplane
->base
, &dm_plane_helper_funcs
);
3127 /* Create (reset) the plane state */
3128 if (aplane
->base
.funcs
->reset
)
3129 aplane
->base
.funcs
->reset(&aplane
->base
);
3135 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager
*dm
,
3136 struct drm_plane
*plane
,
3137 uint32_t crtc_index
)
3139 struct amdgpu_crtc
*acrtc
= NULL
;
3140 struct amdgpu_plane
*cursor_plane
;
3144 cursor_plane
= kzalloc(sizeof(*cursor_plane
), GFP_KERNEL
);
3148 cursor_plane
->base
.type
= DRM_PLANE_TYPE_CURSOR
;
3149 res
= amdgpu_dm_plane_init(dm
, cursor_plane
, 0);
3151 acrtc
= kzalloc(sizeof(struct amdgpu_crtc
), GFP_KERNEL
);
3155 res
= drm_crtc_init_with_planes(
3159 &cursor_plane
->base
,
3160 &amdgpu_dm_crtc_funcs
, NULL
);
3165 drm_crtc_helper_add(&acrtc
->base
, &amdgpu_dm_crtc_helper_funcs
);
3167 /* Create (reset) the plane state */
3168 if (acrtc
->base
.funcs
->reset
)
3169 acrtc
->base
.funcs
->reset(&acrtc
->base
);
3171 acrtc
->max_cursor_width
= dm
->adev
->dm
.dc
->caps
.max_cursor_size
;
3172 acrtc
->max_cursor_height
= dm
->adev
->dm
.dc
->caps
.max_cursor_size
;
3174 acrtc
->crtc_id
= crtc_index
;
3175 acrtc
->base
.enabled
= false;
3177 dm
->adev
->mode_info
.crtcs
[crtc_index
] = acrtc
;
3178 drm_mode_crtc_set_gamma_size(&acrtc
->base
, 256);
3184 kfree(cursor_plane
);
3189 static int to_drm_connector_type(enum signal_type st
)
3192 case SIGNAL_TYPE_HDMI_TYPE_A
:
3193 return DRM_MODE_CONNECTOR_HDMIA
;
3194 case SIGNAL_TYPE_EDP
:
3195 return DRM_MODE_CONNECTOR_eDP
;
3196 case SIGNAL_TYPE_RGB
:
3197 return DRM_MODE_CONNECTOR_VGA
;
3198 case SIGNAL_TYPE_DISPLAY_PORT
:
3199 case SIGNAL_TYPE_DISPLAY_PORT_MST
:
3200 return DRM_MODE_CONNECTOR_DisplayPort
;
3201 case SIGNAL_TYPE_DVI_DUAL_LINK
:
3202 case SIGNAL_TYPE_DVI_SINGLE_LINK
:
3203 return DRM_MODE_CONNECTOR_DVID
;
3204 case SIGNAL_TYPE_VIRTUAL
:
3205 return DRM_MODE_CONNECTOR_VIRTUAL
;
3208 return DRM_MODE_CONNECTOR_Unknown
;
3212 static void amdgpu_dm_get_native_mode(struct drm_connector
*connector
)
3214 const struct drm_connector_helper_funcs
*helper
=
3215 connector
->helper_private
;
3216 struct drm_encoder
*encoder
;
3217 struct amdgpu_encoder
*amdgpu_encoder
;
3219 encoder
= helper
->best_encoder(connector
);
3221 if (encoder
== NULL
)
3224 amdgpu_encoder
= to_amdgpu_encoder(encoder
);
3226 amdgpu_encoder
->native_mode
.clock
= 0;
3228 if (!list_empty(&connector
->probed_modes
)) {
3229 struct drm_display_mode
*preferred_mode
= NULL
;
3231 list_for_each_entry(preferred_mode
,
3232 &connector
->probed_modes
,
3234 if (preferred_mode
->type
& DRM_MODE_TYPE_PREFERRED
)
3235 amdgpu_encoder
->native_mode
= *preferred_mode
;
3243 static struct drm_display_mode
*
3244 amdgpu_dm_create_common_mode(struct drm_encoder
*encoder
,
3246 int hdisplay
, int vdisplay
)
3248 struct drm_device
*dev
= encoder
->dev
;
3249 struct amdgpu_encoder
*amdgpu_encoder
= to_amdgpu_encoder(encoder
);
3250 struct drm_display_mode
*mode
= NULL
;
3251 struct drm_display_mode
*native_mode
= &amdgpu_encoder
->native_mode
;
3253 mode
= drm_mode_duplicate(dev
, native_mode
);
3258 mode
->hdisplay
= hdisplay
;
3259 mode
->vdisplay
= vdisplay
;
3260 mode
->type
&= ~DRM_MODE_TYPE_PREFERRED
;
3261 strncpy(mode
->name
, name
, DRM_DISPLAY_MODE_LEN
);
3267 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder
*encoder
,
3268 struct drm_connector
*connector
)
3270 struct amdgpu_encoder
*amdgpu_encoder
= to_amdgpu_encoder(encoder
);
3271 struct drm_display_mode
*mode
= NULL
;
3272 struct drm_display_mode
*native_mode
= &amdgpu_encoder
->native_mode
;
3273 struct amdgpu_dm_connector
*amdgpu_dm_connector
=
3274 to_amdgpu_dm_connector(connector
);
3278 char name
[DRM_DISPLAY_MODE_LEN
];
3281 } common_modes
[] = {
3282 { "640x480", 640, 480},
3283 { "800x600", 800, 600},
3284 { "1024x768", 1024, 768},
3285 { "1280x720", 1280, 720},
3286 { "1280x800", 1280, 800},
3287 {"1280x1024", 1280, 1024},
3288 { "1440x900", 1440, 900},
3289 {"1680x1050", 1680, 1050},
3290 {"1600x1200", 1600, 1200},
3291 {"1920x1080", 1920, 1080},
3292 {"1920x1200", 1920, 1200}
3295 n
= ARRAY_SIZE(common_modes
);
3297 for (i
= 0; i
< n
; i
++) {
3298 struct drm_display_mode
*curmode
= NULL
;
3299 bool mode_existed
= false;
3301 if (common_modes
[i
].w
> native_mode
->hdisplay
||
3302 common_modes
[i
].h
> native_mode
->vdisplay
||
3303 (common_modes
[i
].w
== native_mode
->hdisplay
&&
3304 common_modes
[i
].h
== native_mode
->vdisplay
))
3307 list_for_each_entry(curmode
, &connector
->probed_modes
, head
) {
3308 if (common_modes
[i
].w
== curmode
->hdisplay
&&
3309 common_modes
[i
].h
== curmode
->vdisplay
) {
3310 mode_existed
= true;
3318 mode
= amdgpu_dm_create_common_mode(encoder
,
3319 common_modes
[i
].name
, common_modes
[i
].w
,
3321 drm_mode_probed_add(connector
, mode
);
3322 amdgpu_dm_connector
->num_modes
++;
3326 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector
*connector
,
3329 struct amdgpu_dm_connector
*amdgpu_dm_connector
=
3330 to_amdgpu_dm_connector(connector
);
3333 /* empty probed_modes */
3334 INIT_LIST_HEAD(&connector
->probed_modes
);
3335 amdgpu_dm_connector
->num_modes
=
3336 drm_add_edid_modes(connector
, edid
);
3338 drm_edid_to_eld(connector
, edid
);
3340 amdgpu_dm_get_native_mode(connector
);
3342 amdgpu_dm_connector
->num_modes
= 0;
3346 static int amdgpu_dm_connector_get_modes(struct drm_connector
*connector
)
3348 const struct drm_connector_helper_funcs
*helper
=
3349 connector
->helper_private
;
3350 struct amdgpu_dm_connector
*amdgpu_dm_connector
=
3351 to_amdgpu_dm_connector(connector
);
3352 struct drm_encoder
*encoder
;
3353 struct edid
*edid
= amdgpu_dm_connector
->edid
;
3355 encoder
= helper
->best_encoder(connector
);
3357 amdgpu_dm_connector_ddc_get_modes(connector
, edid
);
3358 amdgpu_dm_connector_add_common_modes(encoder
, connector
);
3359 return amdgpu_dm_connector
->num_modes
;
3362 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager
*dm
,
3363 struct amdgpu_dm_connector
*aconnector
,
3365 struct dc_link
*link
,
3368 struct amdgpu_device
*adev
= dm
->ddev
->dev_private
;
3370 aconnector
->connector_id
= link_index
;
3371 aconnector
->dc_link
= link
;
3372 aconnector
->base
.interlace_allowed
= false;
3373 aconnector
->base
.doublescan_allowed
= false;
3374 aconnector
->base
.stereo_allowed
= false;
3375 aconnector
->base
.dpms
= DRM_MODE_DPMS_OFF
;
3376 aconnector
->hpd
.hpd
= AMDGPU_HPD_NONE
; /* not used */
3378 mutex_init(&aconnector
->hpd_lock
);
3380 /* configure support HPD hot plug connector_>polled default value is 0
3381 * which means HPD hot plug not supported
3383 switch (connector_type
) {
3384 case DRM_MODE_CONNECTOR_HDMIA
:
3385 aconnector
->base
.polled
= DRM_CONNECTOR_POLL_HPD
;
3387 case DRM_MODE_CONNECTOR_DisplayPort
:
3388 aconnector
->base
.polled
= DRM_CONNECTOR_POLL_HPD
;
3390 case DRM_MODE_CONNECTOR_DVID
:
3391 aconnector
->base
.polled
= DRM_CONNECTOR_POLL_HPD
;
3397 drm_object_attach_property(&aconnector
->base
.base
,
3398 dm
->ddev
->mode_config
.scaling_mode_property
,
3399 DRM_MODE_SCALE_NONE
);
3401 drm_object_attach_property(&aconnector
->base
.base
,
3402 adev
->mode_info
.underscan_property
,
3404 drm_object_attach_property(&aconnector
->base
.base
,
3405 adev
->mode_info
.underscan_hborder_property
,
3407 drm_object_attach_property(&aconnector
->base
.base
,
3408 adev
->mode_info
.underscan_vborder_property
,
3413 static int amdgpu_dm_i2c_xfer(struct i2c_adapter
*i2c_adap
,
3414 struct i2c_msg
*msgs
, int num
)
3416 struct amdgpu_i2c_adapter
*i2c
= i2c_get_adapdata(i2c_adap
);
3417 struct ddc_service
*ddc_service
= i2c
->ddc_service
;
3418 struct i2c_command cmd
;
3422 cmd
.payloads
= kcalloc(num
, sizeof(struct i2c_payload
), GFP_KERNEL
);
3427 cmd
.number_of_payloads
= num
;
3428 cmd
.engine
= I2C_COMMAND_ENGINE_DEFAULT
;
3431 for (i
= 0; i
< num
; i
++) {
3432 cmd
.payloads
[i
].write
= !(msgs
[i
].flags
& I2C_M_RD
);
3433 cmd
.payloads
[i
].address
= msgs
[i
].addr
;
3434 cmd
.payloads
[i
].length
= msgs
[i
].len
;
3435 cmd
.payloads
[i
].data
= msgs
[i
].buf
;
3438 if (dal_i2caux_submit_i2c_command(
3439 ddc_service
->ctx
->i2caux
,
3440 ddc_service
->ddc_pin
,
3444 kfree(cmd
.payloads
);
3448 static u32
amdgpu_dm_i2c_func(struct i2c_adapter
*adap
)
3450 return I2C_FUNC_I2C
| I2C_FUNC_SMBUS_EMUL
;
3453 static const struct i2c_algorithm amdgpu_dm_i2c_algo
= {
3454 .master_xfer
= amdgpu_dm_i2c_xfer
,
3455 .functionality
= amdgpu_dm_i2c_func
,
3458 static struct amdgpu_i2c_adapter
*
3459 create_i2c(struct ddc_service
*ddc_service
,
3463 struct amdgpu_device
*adev
= ddc_service
->ctx
->driver_context
;
3464 struct amdgpu_i2c_adapter
*i2c
;
3466 i2c
= kzalloc(sizeof(struct amdgpu_i2c_adapter
), GFP_KERNEL
);
3469 i2c
->base
.owner
= THIS_MODULE
;
3470 i2c
->base
.class = I2C_CLASS_DDC
;
3471 i2c
->base
.dev
.parent
= &adev
->pdev
->dev
;
3472 i2c
->base
.algo
= &amdgpu_dm_i2c_algo
;
3473 snprintf(i2c
->base
.name
, sizeof(i2c
->base
.name
), "AMDGPU DM i2c hw bus %d", link_index
);
3474 i2c_set_adapdata(&i2c
->base
, i2c
);
3475 i2c
->ddc_service
= ddc_service
;
3480 /* Note: this function assumes that dc_link_detect() was called for the
3481 * dc_link which will be represented by this aconnector.
3483 static int amdgpu_dm_connector_init(struct amdgpu_display_manager
*dm
,
3484 struct amdgpu_dm_connector
*aconnector
,
3485 uint32_t link_index
,
3486 struct amdgpu_encoder
*aencoder
)
3490 struct dc
*dc
= dm
->dc
;
3491 struct dc_link
*link
= dc_get_link_at_index(dc
, link_index
);
3492 struct amdgpu_i2c_adapter
*i2c
;
3494 link
->priv
= aconnector
;
3496 DRM_DEBUG_DRIVER("%s()\n", __func__
);
3498 i2c
= create_i2c(link
->ddc
, link
->link_index
, &res
);
3500 DRM_ERROR("Failed to create i2c adapter data\n");
3504 aconnector
->i2c
= i2c
;
3505 res
= i2c_add_adapter(&i2c
->base
);
3508 DRM_ERROR("Failed to register hw i2c %d\n", link
->link_index
);
3512 connector_type
= to_drm_connector_type(link
->connector_signal
);
3514 res
= drm_connector_init(
3517 &amdgpu_dm_connector_funcs
,
3521 DRM_ERROR("connector_init failed\n");
3522 aconnector
->connector_id
= -1;
3526 drm_connector_helper_add(
3528 &amdgpu_dm_connector_helper_funcs
);
3530 if (aconnector
->base
.funcs
->reset
)
3531 aconnector
->base
.funcs
->reset(&aconnector
->base
);
3533 amdgpu_dm_connector_init_helper(
3540 drm_mode_connector_attach_encoder(
3541 &aconnector
->base
, &aencoder
->base
);
3543 drm_connector_register(&aconnector
->base
);
3545 if (connector_type
== DRM_MODE_CONNECTOR_DisplayPort
3546 || connector_type
== DRM_MODE_CONNECTOR_eDP
)
3547 amdgpu_dm_initialize_dp_connector(dm
, aconnector
);
3549 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3550 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3552 /* NOTE: this currently will create backlight device even if a panel
3553 * is not connected to the eDP/LVDS connector.
3555 * This is less than ideal but we don't have sink information at this
3556 * stage since detection happens after. We can't do detection earlier
3557 * since MST detection needs connectors to be created first.
3559 if (link
->connector_signal
& (SIGNAL_TYPE_EDP
| SIGNAL_TYPE_LVDS
)) {
3560 /* Event if registration failed, we should continue with
3561 * DM initialization because not having a backlight control
3562 * is better then a black screen.
3564 amdgpu_dm_register_backlight_device(dm
);
3566 if (dm
->backlight_dev
)
3567 dm
->backlight_link
= link
;
3574 aconnector
->i2c
= NULL
;
3579 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device
*adev
)
3581 switch (adev
->mode_info
.num_crtc
) {
3598 static int amdgpu_dm_encoder_init(struct drm_device
*dev
,
3599 struct amdgpu_encoder
*aencoder
,
3600 uint32_t link_index
)
3602 struct amdgpu_device
*adev
= dev
->dev_private
;
3604 int res
= drm_encoder_init(dev
,
3606 &amdgpu_dm_encoder_funcs
,
3607 DRM_MODE_ENCODER_TMDS
,
3610 aencoder
->base
.possible_crtcs
= amdgpu_dm_get_encoder_crtc_mask(adev
);
3613 aencoder
->encoder_id
= link_index
;
3615 aencoder
->encoder_id
= -1;
3617 drm_encoder_helper_add(&aencoder
->base
, &amdgpu_dm_encoder_helper_funcs
);
3622 static void manage_dm_interrupts(struct amdgpu_device
*adev
,
3623 struct amdgpu_crtc
*acrtc
,
3627 * this is not correct translation but will work as soon as VBLANK
3628 * constant is the same as PFLIP
3631 amdgpu_crtc_idx_to_irq_type(
3636 drm_crtc_vblank_on(&acrtc
->base
);
3639 &adev
->pageflip_irq
,
3645 &adev
->pageflip_irq
,
3647 drm_crtc_vblank_off(&acrtc
->base
);
3652 is_scaling_state_different(const struct dm_connector_state
*dm_state
,
3653 const struct dm_connector_state
*old_dm_state
)
3655 if (dm_state
->scaling
!= old_dm_state
->scaling
)
3657 if (!dm_state
->underscan_enable
&& old_dm_state
->underscan_enable
) {
3658 if (old_dm_state
->underscan_hborder
!= 0 && old_dm_state
->underscan_vborder
!= 0)
3660 } else if (dm_state
->underscan_enable
&& !old_dm_state
->underscan_enable
) {
3661 if (dm_state
->underscan_hborder
!= 0 && dm_state
->underscan_vborder
!= 0)
3663 } else if (dm_state
->underscan_hborder
!= old_dm_state
->underscan_hborder
||
3664 dm_state
->underscan_vborder
!= old_dm_state
->underscan_vborder
)
3669 static void remove_stream(struct amdgpu_device
*adev
,
3670 struct amdgpu_crtc
*acrtc
,
3671 struct dc_stream_state
*stream
)
3673 /* this is the update mode case */
3674 if (adev
->dm
.freesync_module
)
3675 mod_freesync_remove_stream(adev
->dm
.freesync_module
, stream
);
3677 acrtc
->otg_inst
= -1;
3678 acrtc
->enabled
= false;
3681 static int get_cursor_position(struct drm_plane
*plane
, struct drm_crtc
*crtc
,
3682 struct dc_cursor_position
*position
)
3684 struct amdgpu_crtc
*amdgpu_crtc
= amdgpu_crtc
= to_amdgpu_crtc(crtc
);
3686 int xorigin
= 0, yorigin
= 0;
3688 if (!crtc
|| !plane
->state
->fb
) {
3689 position
->enable
= false;
3695 if ((plane
->state
->crtc_w
> amdgpu_crtc
->max_cursor_width
) ||
3696 (plane
->state
->crtc_h
> amdgpu_crtc
->max_cursor_height
)) {
3697 DRM_ERROR("%s: bad cursor width or height %d x %d\n",
3699 plane
->state
->crtc_w
,
3700 plane
->state
->crtc_h
);
3704 x
= plane
->state
->crtc_x
;
3705 y
= plane
->state
->crtc_y
;
3706 /* avivo cursor are offset into the total surface */
3707 x
+= crtc
->primary
->state
->src_x
>> 16;
3708 y
+= crtc
->primary
->state
->src_y
>> 16;
3710 xorigin
= min(-x
, amdgpu_crtc
->max_cursor_width
- 1);
3714 yorigin
= min(-y
, amdgpu_crtc
->max_cursor_height
- 1);
3717 position
->enable
= true;
3720 position
->x_hotspot
= xorigin
;
3721 position
->y_hotspot
= yorigin
;
3726 static void handle_cursor_update(struct drm_plane
*plane
,
3727 struct drm_plane_state
*old_plane_state
)
3729 struct amdgpu_framebuffer
*afb
= to_amdgpu_framebuffer(plane
->state
->fb
);
3730 struct drm_crtc
*crtc
= afb
? plane
->state
->crtc
: old_plane_state
->crtc
;
3731 struct dm_crtc_state
*crtc_state
= crtc
? to_dm_crtc_state(crtc
->state
) : NULL
;
3732 struct amdgpu_crtc
*amdgpu_crtc
= to_amdgpu_crtc(crtc
);
3733 uint64_t address
= afb
? afb
->address
: 0;
3734 struct dc_cursor_position position
;
3735 struct dc_cursor_attributes attributes
;
3738 if (!plane
->state
->fb
&& !old_plane_state
->fb
)
3741 DRM_DEBUG_DRIVER("%s: crtc_id=%d with size %d to %d\n",
3743 amdgpu_crtc
->crtc_id
,
3744 plane
->state
->crtc_w
,
3745 plane
->state
->crtc_h
);
3747 ret
= get_cursor_position(plane
, crtc
, &position
);
3751 if (!position
.enable
) {
3752 /* turn off cursor */
3753 if (crtc_state
&& crtc_state
->stream
)
3754 dc_stream_set_cursor_position(crtc_state
->stream
,
3759 amdgpu_crtc
->cursor_width
= plane
->state
->crtc_w
;
3760 amdgpu_crtc
->cursor_height
= plane
->state
->crtc_h
;
3762 attributes
.address
.high_part
= upper_32_bits(address
);
3763 attributes
.address
.low_part
= lower_32_bits(address
);
3764 attributes
.width
= plane
->state
->crtc_w
;
3765 attributes
.height
= plane
->state
->crtc_h
;
3766 attributes
.color_format
= CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA
;
3767 attributes
.rotation_angle
= 0;
3768 attributes
.attribute_flags
.value
= 0;
3770 attributes
.pitch
= attributes
.width
;
3772 if (crtc_state
->stream
) {
3773 if (!dc_stream_set_cursor_attributes(crtc_state
->stream
,
3775 DRM_ERROR("DC failed to set cursor attributes\n");
3777 if (!dc_stream_set_cursor_position(crtc_state
->stream
,
3779 DRM_ERROR("DC failed to set cursor position\n");
3783 static void prepare_flip_isr(struct amdgpu_crtc
*acrtc
)
3786 assert_spin_locked(&acrtc
->base
.dev
->event_lock
);
3787 WARN_ON(acrtc
->event
);
3789 acrtc
->event
= acrtc
->base
.state
->event
;
3791 /* Set the flip status */
3792 acrtc
->pflip_status
= AMDGPU_FLIP_SUBMITTED
;
3794 /* Mark this event as consumed */
3795 acrtc
->base
.state
->event
= NULL
;
3797 DRM_DEBUG_DRIVER("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
3804 * Waits on all BO's fences and for proper vblank count
3806 static void amdgpu_dm_do_flip(struct drm_crtc
*crtc
,
3807 struct drm_framebuffer
*fb
,
3809 struct dc_state
*state
)
3811 unsigned long flags
;
3812 uint32_t target_vblank
;
3814 struct amdgpu_crtc
*acrtc
= to_amdgpu_crtc(crtc
);
3815 struct amdgpu_framebuffer
*afb
= to_amdgpu_framebuffer(fb
);
3816 struct amdgpu_bo
*abo
= gem_to_amdgpu_bo(afb
->obj
);
3817 struct amdgpu_device
*adev
= crtc
->dev
->dev_private
;
3818 bool async_flip
= (crtc
->state
->pageflip_flags
& DRM_MODE_PAGE_FLIP_ASYNC
) != 0;
3819 struct dc_flip_addrs addr
= { {0} };
3820 /* TODO eliminate or rename surface_update */
3821 struct dc_surface_update surface_updates
[1] = { {0} };
3822 struct dm_crtc_state
*acrtc_state
= to_dm_crtc_state(crtc
->state
);
3825 /* Prepare wait for target vblank early - before the fence-waits */
3826 target_vblank
= target
- drm_crtc_vblank_count(crtc
) +
3827 amdgpu_get_vblank_counter_kms(crtc
->dev
, acrtc
->crtc_id
);
3829 /* TODO This might fail and hence better not used, wait
3830 * explicitly on fences instead
3831 * and in general should be called for
3832 * blocking commit to as per framework helpers
3834 r
= amdgpu_bo_reserve(abo
, true);
3835 if (unlikely(r
!= 0)) {
3836 DRM_ERROR("failed to reserve buffer before flip\n");
3840 /* Wait for all fences on this FB */
3841 WARN_ON(reservation_object_wait_timeout_rcu(abo
->tbo
.resv
, true, false,
3842 MAX_SCHEDULE_TIMEOUT
) < 0);
3844 amdgpu_bo_unreserve(abo
);
3846 /* Wait until we're out of the vertical blank period before the one
3847 * targeted by the flip
3849 while ((acrtc
->enabled
&&
3850 (amdgpu_get_crtc_scanoutpos(adev
->ddev
, acrtc
->crtc_id
, 0,
3851 &vpos
, &hpos
, NULL
, NULL
,
3853 & (DRM_SCANOUTPOS_VALID
| DRM_SCANOUTPOS_IN_VBLANK
)) ==
3854 (DRM_SCANOUTPOS_VALID
| DRM_SCANOUTPOS_IN_VBLANK
) &&
3855 (int)(target_vblank
-
3856 amdgpu_get_vblank_counter_kms(adev
->ddev
, acrtc
->crtc_id
)) > 0)) {
3857 usleep_range(1000, 1100);
3861 spin_lock_irqsave(&crtc
->dev
->event_lock
, flags
);
3862 /* update crtc fb */
3863 crtc
->primary
->fb
= fb
;
3865 WARN_ON(acrtc
->pflip_status
!= AMDGPU_FLIP_NONE
);
3866 WARN_ON(!acrtc_state
->stream
);
3868 addr
.address
.grph
.addr
.low_part
= lower_32_bits(afb
->address
);
3869 addr
.address
.grph
.addr
.high_part
= upper_32_bits(afb
->address
);
3870 addr
.flip_immediate
= async_flip
;
3873 if (acrtc
->base
.state
->event
)
3874 prepare_flip_isr(acrtc
);
3876 surface_updates
->surface
= dc_stream_get_status(acrtc_state
->stream
)->plane_states
[0];
3877 surface_updates
->flip_addr
= &addr
;
3880 dc_commit_updates_for_stream(adev
->dm
.dc
,
3883 acrtc_state
->stream
,
3885 &surface_updates
->surface
,
3888 DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x \n",
3890 addr
.address
.grph
.addr
.high_part
,
3891 addr
.address
.grph
.addr
.low_part
);
3894 spin_unlock_irqrestore(&crtc
->dev
->event_lock
, flags
);
3897 static void amdgpu_dm_commit_planes(struct drm_atomic_state
*state
,
3898 struct drm_device
*dev
,
3899 struct amdgpu_display_manager
*dm
,
3900 struct drm_crtc
*pcrtc
,
3901 bool *wait_for_vblank
)
3904 struct drm_plane
*plane
;
3905 struct drm_plane_state
*old_plane_state
, *new_plane_state
;
3906 struct dc_stream_state
*dc_stream_attach
;
3907 struct dc_plane_state
*plane_states_constructed
[MAX_SURFACES
];
3908 struct amdgpu_crtc
*acrtc_attach
= to_amdgpu_crtc(pcrtc
);
3909 struct drm_crtc_state
*new_pcrtc_state
=
3910 drm_atomic_get_new_crtc_state(state
, pcrtc
);
3911 struct dm_crtc_state
*acrtc_state
= to_dm_crtc_state(new_pcrtc_state
);
3912 struct dm_atomic_state
*dm_state
= to_dm_atomic_state(state
);
3913 int planes_count
= 0;
3914 unsigned long flags
;
3916 /* update planes when needed */
3917 for_each_oldnew_plane_in_state(state
, plane
, old_plane_state
, new_plane_state
, i
) {
3918 struct drm_crtc
*crtc
= new_plane_state
->crtc
;
3919 struct drm_crtc_state
*new_crtc_state
;
3920 struct drm_framebuffer
*fb
= new_plane_state
->fb
;
3922 struct dm_plane_state
*dm_new_plane_state
= to_dm_plane_state(new_plane_state
);
3924 if (plane
->type
== DRM_PLANE_TYPE_CURSOR
) {
3925 handle_cursor_update(plane
, old_plane_state
);
3929 if (!fb
|| !crtc
|| pcrtc
!= crtc
)
3932 new_crtc_state
= drm_atomic_get_new_crtc_state(state
, crtc
);
3933 if (!new_crtc_state
->active
)
3936 pflip_needed
= !state
->allow_modeset
;
3938 spin_lock_irqsave(&crtc
->dev
->event_lock
, flags
);
3939 if (acrtc_attach
->pflip_status
!= AMDGPU_FLIP_NONE
) {
3940 DRM_ERROR("%s: acrtc %d, already busy\n",
3942 acrtc_attach
->crtc_id
);
3943 /* In commit tail framework this cannot happen */
3946 spin_unlock_irqrestore(&crtc
->dev
->event_lock
, flags
);
3948 if (!pflip_needed
) {
3949 WARN_ON(!dm_new_plane_state
->dc_state
);
3951 plane_states_constructed
[planes_count
] = dm_new_plane_state
->dc_state
;
3953 dc_stream_attach
= acrtc_state
->stream
;
3956 } else if (new_crtc_state
->planes_changed
) {
3957 /* Assume even ONE crtc with immediate flip means
3958 * entire can't wait for VBLANK
3959 * TODO Check if it's correct
3962 new_pcrtc_state
->pageflip_flags
& DRM_MODE_PAGE_FLIP_ASYNC
?
3965 /* TODO: Needs rework for multiplane flip */
3966 if (plane
->type
== DRM_PLANE_TYPE_PRIMARY
)
3967 drm_crtc_vblank_get(crtc
);
3972 drm_crtc_vblank_count(crtc
) + *wait_for_vblank
,
3979 unsigned long flags
;
3981 if (new_pcrtc_state
->event
) {
3983 drm_crtc_vblank_get(pcrtc
);
3985 spin_lock_irqsave(&pcrtc
->dev
->event_lock
, flags
);
3986 prepare_flip_isr(acrtc_attach
);
3987 spin_unlock_irqrestore(&pcrtc
->dev
->event_lock
, flags
);
3990 if (false == dc_commit_planes_to_stream(dm
->dc
,
3991 plane_states_constructed
,
3995 dm_error("%s: Failed to attach plane!\n", __func__
);
3997 /*TODO BUG Here should go disable planes on CRTC. */
4002 static int amdgpu_dm_atomic_commit(struct drm_device
*dev
,
4003 struct drm_atomic_state
*state
,
4006 struct drm_crtc
*crtc
;
4007 struct drm_crtc_state
*old_crtc_state
, *new_crtc_state
;
4008 struct amdgpu_device
*adev
= dev
->dev_private
;
4012 * We evade vblanks and pflips on crtc that
4013 * should be changed. We do it here to flush & disable
4014 * interrupts before drm_swap_state is called in drm_atomic_helper_commit
4015 * it will update crtc->dm_crtc_state->stream pointer which is used in
4018 for_each_oldnew_crtc_in_state(state
, crtc
, old_crtc_state
, new_crtc_state
, i
) {
4019 struct dm_crtc_state
*dm_old_crtc_state
= to_dm_crtc_state(old_crtc_state
);
4020 struct amdgpu_crtc
*acrtc
= to_amdgpu_crtc(crtc
);
4022 if (drm_atomic_crtc_needs_modeset(new_crtc_state
) && dm_old_crtc_state
->stream
)
4023 manage_dm_interrupts(adev
, acrtc
, false);
4025 /* Add check here for SoC's that support hardware cursor plane, to
4026 * unset legacy_cursor_update */
4028 return drm_atomic_helper_commit(dev
, state
, nonblock
);
4030 /*TODO Handle EINTR, reenable IRQ*/
4033 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state
*state
)
4035 struct drm_device
*dev
= state
->dev
;
4036 struct amdgpu_device
*adev
= dev
->dev_private
;
4037 struct amdgpu_display_manager
*dm
= &adev
->dm
;
4038 struct dm_atomic_state
*dm_state
;
4040 uint32_t new_crtcs_count
= 0;
4041 struct drm_crtc
*crtc
;
4042 struct drm_crtc_state
*old_crtc_state
, *new_crtc_state
;
4043 struct amdgpu_crtc
*new_crtcs
[MAX_STREAMS
];
4044 struct dc_stream_state
*new_stream
= NULL
;
4045 unsigned long flags
;
4046 bool wait_for_vblank
= true;
4047 struct drm_connector
*connector
;
4048 struct drm_connector_state
*old_con_state
, *new_con_state
;
4049 struct dm_crtc_state
*dm_old_crtc_state
, *dm_new_crtc_state
;
4051 drm_atomic_helper_update_legacy_modeset_state(dev
, state
);
4053 dm_state
= to_dm_atomic_state(state
);
4055 /* update changed items */
4056 for_each_oldnew_crtc_in_state(state
, crtc
, old_crtc_state
, new_crtc_state
, i
) {
4057 struct amdgpu_crtc
*acrtc
= to_amdgpu_crtc(crtc
);
4059 dm_new_crtc_state
= to_dm_crtc_state(new_crtc_state
);
4060 dm_old_crtc_state
= to_dm_crtc_state(old_crtc_state
);
4063 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
4064 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
4065 "connectors_changed:%d\n",
4067 new_crtc_state
->enable
,
4068 new_crtc_state
->active
,
4069 new_crtc_state
->planes_changed
,
4070 new_crtc_state
->mode_changed
,
4071 new_crtc_state
->active_changed
,
4072 new_crtc_state
->connectors_changed
);
4074 /* handles headless hotplug case, updating new_state and
4075 * aconnector as needed
4078 if (modeset_required(new_crtc_state
, dm_new_crtc_state
->stream
, dm_old_crtc_state
->stream
)) {
4080 DRM_DEBUG_DRIVER("Atomic commit: SET crtc id %d: [%p]\n", acrtc
->crtc_id
, acrtc
);
4082 if (!dm_new_crtc_state
->stream
) {
4084 * this could happen because of issues with
4085 * userspace notifications delivery.
4086 * In this case userspace tries to set mode on
4087 * display which is disconnect in fact.
4088 * dc_sink in NULL in this case on aconnector.
4089 * We expect reset mode will come soon.
4091 * This can also happen when unplug is done
4092 * during resume sequence ended
4094 * In this case, we want to pretend we still
4095 * have a sink to keep the pipe running so that
4096 * hw state is consistent with the sw state
4098 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
4099 __func__
, acrtc
->base
.base
.id
);
4104 if (dm_old_crtc_state
->stream
)
4105 remove_stream(adev
, acrtc
, dm_old_crtc_state
->stream
);
4109 * this loop saves set mode crtcs
4110 * we needed to enable vblanks once all
4111 * resources acquired in dc after dc_commit_streams
4114 /*TODO move all this into dm_crtc_state, get rid of
4115 * new_crtcs array and use old and new atomic states
4118 new_crtcs
[new_crtcs_count
] = acrtc
;
4121 new_crtc_state
= drm_atomic_get_new_crtc_state(state
, crtc
);
4122 acrtc
->enabled
= true;
4123 acrtc
->hw_mode
= new_crtc_state
->mode
;
4124 crtc
->hwmode
= new_crtc_state
->mode
;
4125 } else if (modereset_required(new_crtc_state
)) {
4126 DRM_DEBUG_DRIVER("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc
->crtc_id
, acrtc
);
4128 /* i.e. reset mode */
4129 if (dm_old_crtc_state
->stream
)
4130 remove_stream(adev
, acrtc
, dm_old_crtc_state
->stream
);
4132 } /* for_each_crtc_in_state() */
4135 * Add streams after required streams from new and replaced streams
4136 * are removed from freesync module
4138 if (adev
->dm
.freesync_module
) {
4139 for (i
= 0; i
< new_crtcs_count
; i
++) {
4140 struct amdgpu_dm_connector
*aconnector
= NULL
;
4142 new_crtc_state
= drm_atomic_get_new_crtc_state(state
,
4143 &new_crtcs
[i
]->base
);
4144 dm_new_crtc_state
= to_dm_crtc_state(new_crtc_state
);
4146 new_stream
= dm_new_crtc_state
->stream
;
4147 aconnector
= amdgpu_dm_find_first_crtc_matching_connector(
4149 &new_crtcs
[i
]->base
);
4151 DRM_DEBUG_DRIVER("Atomic commit: Failed to find connector for acrtc id:%d "
4152 "skipping freesync init\n",
4153 new_crtcs
[i
]->crtc_id
);
4157 mod_freesync_add_stream(adev
->dm
.freesync_module
,
4158 new_stream
, &aconnector
->caps
);
4162 if (dm_state
->context
)
4163 WARN_ON(!dc_commit_state(dm
->dc
, dm_state
->context
));
4165 for_each_new_crtc_in_state(state
, crtc
, new_crtc_state
, i
) {
4166 struct amdgpu_crtc
*acrtc
= to_amdgpu_crtc(crtc
);
4168 dm_new_crtc_state
= to_dm_crtc_state(new_crtc_state
);
4170 if (dm_new_crtc_state
->stream
!= NULL
) {
4171 const struct dc_stream_status
*status
=
4172 dc_stream_get_status(dm_new_crtc_state
->stream
);
4175 DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state
->stream
, acrtc
);
4177 acrtc
->otg_inst
= status
->primary_otg_inst
;
4181 /* Handle scaling and underscan changes*/
4182 for_each_oldnew_connector_in_state(state
, connector
, old_con_state
, new_con_state
, i
) {
4183 struct dm_connector_state
*dm_new_con_state
= to_dm_connector_state(new_con_state
);
4184 struct dm_connector_state
*dm_old_con_state
= to_dm_connector_state(old_con_state
);
4185 struct amdgpu_crtc
*acrtc
= to_amdgpu_crtc(dm_new_con_state
->base
.crtc
);
4186 struct dc_stream_status
*status
= NULL
;
4189 new_crtc_state
= drm_atomic_get_new_crtc_state(state
, &acrtc
->base
);
4191 /* Skip any modesets/resets */
4192 if (!acrtc
|| drm_atomic_crtc_needs_modeset(new_crtc_state
))
4195 /* Skip any thing not scale or underscan changes */
4196 if (!is_scaling_state_different(dm_new_con_state
, dm_old_con_state
))
4199 dm_new_crtc_state
= to_dm_crtc_state(new_crtc_state
);
4201 update_stream_scaling_settings(&dm_new_con_state
->base
.crtc
->mode
,
4202 dm_new_con_state
, (struct dc_stream_state
*)dm_new_crtc_state
->stream
);
4204 if (!dm_new_crtc_state
->stream
)
4207 status
= dc_stream_get_status(dm_new_crtc_state
->stream
);
4209 WARN_ON(!status
->plane_count
);
4211 /*TODO How it works with MPO ?*/
4212 if (!dc_commit_planes_to_stream(
4214 status
->plane_states
,
4215 status
->plane_count
,
4216 dm_new_crtc_state
->stream
,
4218 dm_error("%s: Failed to update stream scaling!\n", __func__
);
4221 for (i
= 0; i
< new_crtcs_count
; i
++) {
4223 * loop to enable interrupts on newly arrived crtc
4225 struct amdgpu_crtc
*acrtc
= new_crtcs
[i
];
4227 new_crtc_state
= drm_atomic_get_new_crtc_state(state
, &acrtc
->base
);
4228 dm_new_crtc_state
= to_dm_crtc_state(new_crtc_state
);
4230 if (adev
->dm
.freesync_module
)
4231 mod_freesync_notify_mode_change(
4232 adev
->dm
.freesync_module
, &dm_new_crtc_state
->stream
, 1);
4234 manage_dm_interrupts(adev
, acrtc
, true);
4237 /* update planes when needed per crtc*/
4238 for_each_new_crtc_in_state(state
, crtc
, new_crtc_state
, j
) {
4239 dm_new_crtc_state
= to_dm_crtc_state(new_crtc_state
);
4241 if (dm_new_crtc_state
->stream
)
4242 amdgpu_dm_commit_planes(state
, dev
, dm
, crtc
, &wait_for_vblank
);
4247 * send vblank event on all events not handled in flip and
4248 * mark consumed event for drm_atomic_helper_commit_hw_done
4250 spin_lock_irqsave(&adev
->ddev
->event_lock
, flags
);
4251 for_each_new_crtc_in_state(state
, crtc
, new_crtc_state
, i
) {
4253 if (new_crtc_state
->event
)
4254 drm_send_event_locked(dev
, &new_crtc_state
->event
->base
);
4256 new_crtc_state
->event
= NULL
;
4258 spin_unlock_irqrestore(&adev
->ddev
->event_lock
, flags
);
4260 /* Signal HW programming completion */
4261 drm_atomic_helper_commit_hw_done(state
);
4263 if (wait_for_vblank
)
4264 drm_atomic_helper_wait_for_flip_done(dev
, state
);
4266 drm_atomic_helper_cleanup_planes(dev
, state
);
4270 static int dm_force_atomic_commit(struct drm_connector
*connector
)
4273 struct drm_device
*ddev
= connector
->dev
;
4274 struct drm_atomic_state
*state
= drm_atomic_state_alloc(ddev
);
4275 struct amdgpu_crtc
*disconnected_acrtc
= to_amdgpu_crtc(connector
->encoder
->crtc
);
4276 struct drm_plane
*plane
= disconnected_acrtc
->base
.primary
;
4277 struct drm_connector_state
*conn_state
;
4278 struct drm_crtc_state
*crtc_state
;
4279 struct drm_plane_state
*plane_state
;
4284 state
->acquire_ctx
= ddev
->mode_config
.acquire_ctx
;
4286 /* Construct an atomic state to restore previous display setting */
4289 * Attach connectors to drm_atomic_state
4291 conn_state
= drm_atomic_get_connector_state(state
, connector
);
4293 ret
= PTR_ERR_OR_ZERO(conn_state
);
4297 /* Attach crtc to drm_atomic_state*/
4298 crtc_state
= drm_atomic_get_crtc_state(state
, &disconnected_acrtc
->base
);
4300 ret
= PTR_ERR_OR_ZERO(crtc_state
);
4304 /* force a restore */
4305 crtc_state
->mode_changed
= true;
4307 /* Attach plane to drm_atomic_state */
4308 plane_state
= drm_atomic_get_plane_state(state
, plane
);
4310 ret
= PTR_ERR_OR_ZERO(plane_state
);
4315 /* Call commit internally with the state we just constructed */
4316 ret
= drm_atomic_commit(state
);
4321 DRM_ERROR("Restoring old state failed with %i\n", ret
);
4322 drm_atomic_state_put(state
);
4328 * This functions handle all cases when set mode does not come upon hotplug.
4329 * This include when the same display is unplugged then plugged back into the
4330 * same port and when we are running without usermode desktop manager supprot
4332 void dm_restore_drm_connector_state(struct drm_device
*dev
,
4333 struct drm_connector
*connector
)
4335 struct amdgpu_dm_connector
*aconnector
= to_amdgpu_dm_connector(connector
);
4336 struct amdgpu_crtc
*disconnected_acrtc
;
4337 struct dm_crtc_state
*acrtc_state
;
4339 if (!aconnector
->dc_sink
|| !connector
->state
|| !connector
->encoder
)
4342 disconnected_acrtc
= to_amdgpu_crtc(connector
->encoder
->crtc
);
4343 if (!disconnected_acrtc
)
4346 acrtc_state
= to_dm_crtc_state(disconnected_acrtc
->base
.state
);
4347 if (!acrtc_state
->stream
)
4351 * If the previous sink is not released and different from the current,
4352 * we deduce we are in a state where we can not rely on usermode call
4353 * to turn on the display, so we do it here
4355 if (acrtc_state
->stream
->sink
!= aconnector
->dc_sink
)
4356 dm_force_atomic_commit(&aconnector
->base
);
4360 * Grabs all modesetting locks to serialize against any blocking commits,
4361 * Waits for completion of all non blocking commits.
4363 static int do_aquire_global_lock(struct drm_device
*dev
,
4364 struct drm_atomic_state
*state
)
4366 struct drm_crtc
*crtc
;
4367 struct drm_crtc_commit
*commit
;
4370 /* Adding all modeset locks to aquire_ctx will
4371 * ensure that when the framework release it the
4372 * extra locks we are locking here will get released to
4374 ret
= drm_modeset_lock_all_ctx(dev
, state
->acquire_ctx
);
4378 list_for_each_entry(crtc
, &dev
->mode_config
.crtc_list
, head
) {
4379 spin_lock(&crtc
->commit_lock
);
4380 commit
= list_first_entry_or_null(&crtc
->commit_list
,
4381 struct drm_crtc_commit
, commit_entry
);
4383 drm_crtc_commit_get(commit
);
4384 spin_unlock(&crtc
->commit_lock
);
4389 /* Make sure all pending HW programming completed and
4392 ret
= wait_for_completion_interruptible_timeout(&commit
->hw_done
, 10*HZ
);
4395 ret
= wait_for_completion_interruptible_timeout(
4396 &commit
->flip_done
, 10*HZ
);
4399 DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
4400 "timed out\n", crtc
->base
.id
, crtc
->name
);
4402 drm_crtc_commit_put(commit
);
4405 return ret
< 0 ? ret
: 0;
4408 static int dm_update_crtcs_state(struct dc
*dc
,
4409 struct drm_atomic_state
*state
,
4411 bool *lock_and_validation_needed
)
4413 struct drm_crtc
*crtc
;
4414 struct drm_crtc_state
*old_crtc_state
, *new_crtc_state
;
4416 struct dm_crtc_state
*dm_old_crtc_state
, *dm_new_crtc_state
;
4417 struct dm_atomic_state
*dm_state
= to_dm_atomic_state(state
);
4418 struct dc_stream_state
*new_stream
;
4421 /*TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set */
4422 /* update changed items */
4423 for_each_oldnew_crtc_in_state(state
, crtc
, old_crtc_state
, new_crtc_state
, i
) {
4424 struct amdgpu_crtc
*acrtc
= NULL
;
4425 struct amdgpu_dm_connector
*aconnector
= NULL
;
4426 struct drm_connector_state
*new_con_state
= NULL
;
4427 struct dm_connector_state
*dm_conn_state
= NULL
;
4428 struct drm_plane_state
*new_plane_state
= NULL
;
4432 dm_old_crtc_state
= to_dm_crtc_state(old_crtc_state
);
4433 dm_new_crtc_state
= to_dm_crtc_state(new_crtc_state
);
4434 acrtc
= to_amdgpu_crtc(crtc
);
4436 new_plane_state
= drm_atomic_get_new_plane_state(state
, new_crtc_state
->crtc
->primary
);
4438 if (new_crtc_state
->enable
&& new_plane_state
&& !new_plane_state
->fb
) {
4443 aconnector
= amdgpu_dm_find_first_crtc_matching_connector(state
, crtc
);
4445 /* TODO This hack should go away */
4446 if (aconnector
&& enable
) {
4447 // Make sure fake sink is created in plug-in scenario
4448 new_con_state
= drm_atomic_get_connector_state(state
,
4451 if (IS_ERR(new_con_state
)) {
4452 ret
= PTR_ERR_OR_ZERO(new_con_state
);
4456 dm_conn_state
= to_dm_connector_state(new_con_state
);
4458 new_stream
= create_stream_for_sink(aconnector
,
4459 &new_crtc_state
->mode
,
4463 * we can have no stream on ACTION_SET if a display
4464 * was disconnected during S3, in this case it not and
4465 * error, the OS will be updated after detection, and
4466 * do the right thing on next atomic commit
4470 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
4471 __func__
, acrtc
->base
.base
.id
);
4476 if (enable
&& dc_is_stream_unchanged(new_stream
, dm_old_crtc_state
->stream
) &&
4477 dc_is_stream_scaling_unchanged(new_stream
, dm_old_crtc_state
->stream
)) {
4479 new_crtc_state
->mode_changed
= false;
4481 DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
4482 new_crtc_state
->mode_changed
);
4486 if (!drm_atomic_crtc_needs_modeset(new_crtc_state
))
4490 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
4491 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
4492 "connectors_changed:%d\n",
4494 new_crtc_state
->enable
,
4495 new_crtc_state
->active
,
4496 new_crtc_state
->planes_changed
,
4497 new_crtc_state
->mode_changed
,
4498 new_crtc_state
->active_changed
,
4499 new_crtc_state
->connectors_changed
);
4501 /* Remove stream for any changed/disabled CRTC */
4504 if (!dm_old_crtc_state
->stream
)
4507 DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
4510 /* i.e. reset mode */
4511 if (dc_remove_stream_from_ctx(
4514 dm_old_crtc_state
->stream
) != DC_OK
) {
4519 dc_stream_release(dm_old_crtc_state
->stream
);
4520 dm_new_crtc_state
->stream
= NULL
;
4522 *lock_and_validation_needed
= true;
4524 } else {/* Add stream for any updated/enabled CRTC */
4526 * Quick fix to prevent NULL pointer on new_stream when
4527 * added MST connectors not found in existing crtc_state in the chained mode
4528 * TODO: need to dig out the root cause of that
4530 if (!aconnector
|| (!aconnector
->dc_sink
&& aconnector
->mst_port
))
4533 if (modereset_required(new_crtc_state
))
4536 if (modeset_required(new_crtc_state
, new_stream
,
4537 dm_old_crtc_state
->stream
)) {
4539 WARN_ON(dm_new_crtc_state
->stream
);
4541 dm_new_crtc_state
->stream
= new_stream
;
4542 dc_stream_retain(new_stream
);
4544 DRM_DEBUG_DRIVER("Enabling DRM crtc: %d\n",
4547 if (dc_add_stream_to_ctx(
4550 dm_new_crtc_state
->stream
) != DC_OK
) {
4555 *lock_and_validation_needed
= true;
4560 /* Release extra reference */
4562 dc_stream_release(new_stream
);
4569 dc_stream_release(new_stream
);
4573 static int dm_update_planes_state(struct dc
*dc
,
4574 struct drm_atomic_state
*state
,
4576 bool *lock_and_validation_needed
)
4578 struct drm_crtc
*new_plane_crtc
, *old_plane_crtc
;
4579 struct drm_crtc_state
*old_crtc_state
, *new_crtc_state
;
4580 struct drm_plane
*plane
;
4581 struct drm_plane_state
*old_plane_state
, *new_plane_state
;
4582 struct dm_crtc_state
*dm_new_crtc_state
, *dm_old_crtc_state
;
4583 struct dm_atomic_state
*dm_state
= to_dm_atomic_state(state
);
4584 struct dm_plane_state
*dm_new_plane_state
, *dm_old_plane_state
;
4586 /* TODO return page_flip_needed() function */
4587 bool pflip_needed
= !state
->allow_modeset
;
4593 /* Add new planes */
4594 for_each_oldnew_plane_in_state(state
, plane
, old_plane_state
, new_plane_state
, i
) {
4595 new_plane_crtc
= new_plane_state
->crtc
;
4596 old_plane_crtc
= old_plane_state
->crtc
;
4597 dm_new_plane_state
= to_dm_plane_state(new_plane_state
);
4598 dm_old_plane_state
= to_dm_plane_state(old_plane_state
);
4600 /*TODO Implement atomic check for cursor plane */
4601 if (plane
->type
== DRM_PLANE_TYPE_CURSOR
)
4604 /* Remove any changed/removed planes */
4607 if (!old_plane_crtc
)
4610 old_crtc_state
= drm_atomic_get_old_crtc_state(
4611 state
, old_plane_crtc
);
4612 dm_old_crtc_state
= to_dm_crtc_state(old_crtc_state
);
4614 if (!dm_old_crtc_state
->stream
)
4617 DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
4618 plane
->base
.id
, old_plane_crtc
->base
.id
);
4620 if (!dc_remove_plane_from_context(
4622 dm_old_crtc_state
->stream
,
4623 dm_old_plane_state
->dc_state
,
4624 dm_state
->context
)) {
4631 dc_plane_state_release(dm_old_plane_state
->dc_state
);
4632 dm_new_plane_state
->dc_state
= NULL
;
4634 *lock_and_validation_needed
= true;
4636 } else { /* Add new planes */
4638 if (drm_atomic_plane_disabling(plane
->state
, new_plane_state
))
4641 if (!new_plane_crtc
)
4644 new_crtc_state
= drm_atomic_get_new_crtc_state(state
, new_plane_crtc
);
4645 dm_new_crtc_state
= to_dm_crtc_state(new_crtc_state
);
4647 if (!dm_new_crtc_state
->stream
)
4651 WARN_ON(dm_new_plane_state
->dc_state
);
4653 dm_new_plane_state
->dc_state
= dc_create_plane_state(dc
);
4655 DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n",
4656 plane
->base
.id
, new_plane_crtc
->base
.id
);
4658 if (!dm_new_plane_state
->dc_state
) {
4663 ret
= fill_plane_attributes(
4664 new_plane_crtc
->dev
->dev_private
,
4665 dm_new_plane_state
->dc_state
,
4673 if (!dc_add_plane_to_context(
4675 dm_new_crtc_state
->stream
,
4676 dm_new_plane_state
->dc_state
,
4677 dm_state
->context
)) {
4683 *lock_and_validation_needed
= true;
4691 static int amdgpu_dm_atomic_check(struct drm_device
*dev
,
4692 struct drm_atomic_state
*state
)
4696 struct amdgpu_device
*adev
= dev
->dev_private
;
4697 struct dc
*dc
= adev
->dm
.dc
;
4698 struct dm_atomic_state
*dm_state
= to_dm_atomic_state(state
);
4699 struct drm_connector
*connector
;
4700 struct drm_connector_state
*old_con_state
, *new_con_state
;
4701 struct drm_crtc
*crtc
;
4702 struct drm_crtc_state
*old_crtc_state
, *new_crtc_state
;
4705 * This bool will be set for true for any modeset/reset
4706 * or plane update which implies non fast surface update.
4708 bool lock_and_validation_needed
= false;
4710 ret
= drm_atomic_helper_check_modeset(dev
, state
);
4715 * legacy_cursor_update should be made false for SoC's having
4716 * a dedicated hardware plane for cursor in amdgpu_dm_atomic_commit(),
4717 * otherwise for software cursor plane,
4718 * we should not add it to list of affected planes.
4720 if (state
->legacy_cursor_update
) {
4721 for_each_new_crtc_in_state(state
, crtc
, new_crtc_state
, i
) {
4722 if (new_crtc_state
->color_mgmt_changed
) {
4723 ret
= drm_atomic_add_affected_planes(state
, crtc
);
4729 for_each_oldnew_crtc_in_state(state
, crtc
, old_crtc_state
, new_crtc_state
, i
) {
4730 if (!drm_atomic_crtc_needs_modeset(new_crtc_state
) &&
4731 !new_crtc_state
->color_mgmt_changed
)
4734 if (!new_crtc_state
->enable
)
4737 ret
= drm_atomic_add_affected_connectors(state
, crtc
);
4741 ret
= drm_atomic_add_affected_planes(state
, crtc
);
4747 dm_state
->context
= dc_create_state();
4748 ASSERT(dm_state
->context
);
4749 dc_resource_state_copy_construct_current(dc
, dm_state
->context
);
4751 /* Remove exiting planes if they are modified */
4752 ret
= dm_update_planes_state(dc
, state
, false, &lock_and_validation_needed
);
4757 /* Disable all crtcs which require disable */
4758 ret
= dm_update_crtcs_state(dc
, state
, false, &lock_and_validation_needed
);
4763 /* Enable all crtcs which require enable */
4764 ret
= dm_update_crtcs_state(dc
, state
, true, &lock_and_validation_needed
);
4769 /* Add new/modified planes */
4770 ret
= dm_update_planes_state(dc
, state
, true, &lock_and_validation_needed
);
4775 /* Run this here since we want to validate the streams we created */
4776 ret
= drm_atomic_helper_check_planes(dev
, state
);
4780 /* Check scaling and underscan changes*/
4781 /*TODO Removed scaling changes validation due to inability to commit
4782 * new stream into context w\o causing full reset. Need to
4783 * decide how to handle.
4785 for_each_oldnew_connector_in_state(state
, connector
, old_con_state
, new_con_state
, i
) {
4786 struct dm_connector_state
*dm_old_con_state
= to_dm_connector_state(old_con_state
);
4787 struct dm_connector_state
*dm_new_con_state
= to_dm_connector_state(new_con_state
);
4788 struct amdgpu_crtc
*acrtc
= to_amdgpu_crtc(dm_new_con_state
->base
.crtc
);
4790 /* Skip any modesets/resets */
4791 if (!acrtc
|| drm_atomic_crtc_needs_modeset(
4792 drm_atomic_get_new_crtc_state(state
, &acrtc
->base
)))
4795 /* Skip any thing not scale or underscan changes */
4796 if (!is_scaling_state_different(dm_new_con_state
, dm_old_con_state
))
4799 lock_and_validation_needed
= true;
4803 * For full updates case when
4804 * removing/adding/updating streams on once CRTC while flipping
4806 * acquiring global lock will guarantee that any such full
4808 * will wait for completion of any outstanding flip using DRMs
4809 * synchronization events.
4812 if (lock_and_validation_needed
) {
4814 ret
= do_aquire_global_lock(dev
, state
);
4818 if (dc_validate_global_state(dc
, dm_state
->context
) != DC_OK
) {
4824 /* Must be success */
4829 if (ret
== -EDEADLK
)
4830 DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
4831 else if (ret
== -EINTR
|| ret
== -EAGAIN
|| ret
== -ERESTARTSYS
)
4832 DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
4834 DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret
);
4839 static bool is_dp_capable_without_timing_msa(struct dc
*dc
,
4840 struct amdgpu_dm_connector
*amdgpu_dm_connector
)
4843 bool capable
= false;
4845 if (amdgpu_dm_connector
->dc_link
&&
4846 dm_helpers_dp_read_dpcd(
4848 amdgpu_dm_connector
->dc_link
,
4849 DP_DOWN_STREAM_PORT_COUNT
,
4851 sizeof(dpcd_data
))) {
4852 capable
= (dpcd_data
& DP_MSA_TIMING_PAR_IGNORED
) ? true:false;
4857 void amdgpu_dm_add_sink_to_freesync_module(struct drm_connector
*connector
,
4861 uint64_t val_capable
;
4862 bool edid_check_required
;
4863 struct detailed_timing
*timing
;
4864 struct detailed_non_pixel
*data
;
4865 struct detailed_data_monitor_range
*range
;
4866 struct amdgpu_dm_connector
*amdgpu_dm_connector
=
4867 to_amdgpu_dm_connector(connector
);
4869 struct drm_device
*dev
= connector
->dev
;
4870 struct amdgpu_device
*adev
= dev
->dev_private
;
4872 edid_check_required
= false;
4873 if (!amdgpu_dm_connector
->dc_sink
) {
4874 DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
4877 if (!adev
->dm
.freesync_module
)
4880 * if edid non zero restrict freesync only for dp and edp
4883 if (amdgpu_dm_connector
->dc_sink
->sink_signal
== SIGNAL_TYPE_DISPLAY_PORT
4884 || amdgpu_dm_connector
->dc_sink
->sink_signal
== SIGNAL_TYPE_EDP
) {
4885 edid_check_required
= is_dp_capable_without_timing_msa(
4887 amdgpu_dm_connector
);
4891 if (edid_check_required
== true && (edid
->version
> 1 ||
4892 (edid
->version
== 1 && edid
->revision
> 1))) {
4893 for (i
= 0; i
< 4; i
++) {
4895 timing
= &edid
->detailed_timings
[i
];
4896 data
= &timing
->data
.other_data
;
4897 range
= &data
->data
.range
;
4899 * Check if monitor has continuous frequency mode
4901 if (data
->type
!= EDID_DETAIL_MONITOR_RANGE
)
4904 * Check for flag range limits only. If flag == 1 then
4905 * no additional timing information provided.
4906 * Default GTF, GTF Secondary curve and CVT are not
4909 if (range
->flags
!= 1)
4912 amdgpu_dm_connector
->min_vfreq
= range
->min_vfreq
;
4913 amdgpu_dm_connector
->max_vfreq
= range
->max_vfreq
;
4914 amdgpu_dm_connector
->pixel_clock_mhz
=
4915 range
->pixel_clock_mhz
* 10;
4919 if (amdgpu_dm_connector
->max_vfreq
-
4920 amdgpu_dm_connector
->min_vfreq
> 10) {
4921 amdgpu_dm_connector
->caps
.supported
= true;
4922 amdgpu_dm_connector
->caps
.min_refresh_in_micro_hz
=
4923 amdgpu_dm_connector
->min_vfreq
* 1000000;
4924 amdgpu_dm_connector
->caps
.max_refresh_in_micro_hz
=
4925 amdgpu_dm_connector
->max_vfreq
* 1000000;
4931 * TODO figure out how to notify user-mode or DRM of freesync caps
4932 * once we figure out how to deal with freesync in an upstreamable
4938 void amdgpu_dm_remove_sink_from_freesync_module(struct drm_connector
*connector
)
4941 * TODO fill in once we figure out how to deal with freesync in
4942 * an upstreamable fashion