2 * Copyright 2015 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
26 #include "dm_services_types.h"
28 #include "dc/inc/core_types.h"
32 #include "amdgpu_display.h"
34 #include "amdgpu_dm.h"
35 #include "amdgpu_pm.h"
37 #include "amd_shared.h"
38 #include "amdgpu_dm_irq.h"
39 #include "dm_helpers.h"
40 #include "dm_services_types.h"
41 #include "amdgpu_dm_mst_types.h"
43 #include "ivsrcid/ivsrcid_vislands30.h"
45 #include <linux/module.h>
46 #include <linux/moduleparam.h>
47 #include <linux/version.h>
48 #include <linux/types.h>
51 #include <drm/drm_atomic.h>
52 #include <drm/drm_atomic_helper.h>
53 #include <drm/drm_dp_mst_helper.h>
54 #include <drm/drm_fb_helper.h>
55 #include <drm/drm_edid.h>
57 #include "modules/inc/mod_freesync.h"
59 #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
60 #include "ivsrcid/irqsrcs_dcn_1_0.h"
62 #include "raven1/DCN/dcn_1_0_offset.h"
63 #include "raven1/DCN/dcn_1_0_sh_mask.h"
64 #include "vega10/soc15ip.h"
66 #include "soc15_common.h"
69 #include "modules/inc/mod_freesync.h"
71 #include "i2caux_interface.h"
73 /* basic init/fini API */
74 static int amdgpu_dm_init(struct amdgpu_device
*adev
);
75 static void amdgpu_dm_fini(struct amdgpu_device
*adev
);
77 /* initializes drm_device display related structures, based on the information
78 * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
79 * drm_encoder, drm_mode_config
81 * Returns 0 on success
83 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device
*adev
);
84 /* removes and deallocates the drm structures, created by the above function */
85 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager
*dm
);
88 amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector
*aconnector
);
90 static int amdgpu_dm_plane_init(struct amdgpu_display_manager
*dm
,
91 struct amdgpu_plane
*aplane
,
92 unsigned long possible_crtcs
);
93 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager
*dm
,
94 struct drm_plane
*plane
,
96 static int amdgpu_dm_connector_init(struct amdgpu_display_manager
*dm
,
97 struct amdgpu_dm_connector
*amdgpu_dm_connector
,
99 struct amdgpu_encoder
*amdgpu_encoder
);
100 static int amdgpu_dm_encoder_init(struct drm_device
*dev
,
101 struct amdgpu_encoder
*aencoder
,
102 uint32_t link_index
);
104 static int amdgpu_dm_connector_get_modes(struct drm_connector
*connector
);
106 static int amdgpu_dm_atomic_commit(struct drm_device
*dev
,
107 struct drm_atomic_state
*state
,
110 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state
*state
);
112 static int amdgpu_dm_atomic_check(struct drm_device
*dev
,
113 struct drm_atomic_state
*state
);
118 static const enum drm_plane_type dm_plane_type_default
[AMDGPU_MAX_PLANES
] = {
119 DRM_PLANE_TYPE_PRIMARY
,
120 DRM_PLANE_TYPE_PRIMARY
,
121 DRM_PLANE_TYPE_PRIMARY
,
122 DRM_PLANE_TYPE_PRIMARY
,
123 DRM_PLANE_TYPE_PRIMARY
,
124 DRM_PLANE_TYPE_PRIMARY
,
127 static const enum drm_plane_type dm_plane_type_carizzo
[AMDGPU_MAX_PLANES
] = {
128 DRM_PLANE_TYPE_PRIMARY
,
129 DRM_PLANE_TYPE_PRIMARY
,
130 DRM_PLANE_TYPE_PRIMARY
,
131 DRM_PLANE_TYPE_OVERLAY
,/* YUV Capable Underlay */
134 static const enum drm_plane_type dm_plane_type_stoney
[AMDGPU_MAX_PLANES
] = {
135 DRM_PLANE_TYPE_PRIMARY
,
136 DRM_PLANE_TYPE_PRIMARY
,
137 DRM_PLANE_TYPE_OVERLAY
, /* YUV Capable Underlay */
141 * dm_vblank_get_counter
144 * Get counter for number of vertical blanks
147 * struct amdgpu_device *adev - [in] desired amdgpu device
148 * int disp_idx - [in] which CRTC to get the counter from
151 * Counter for vertical blanks
153 static u32
dm_vblank_get_counter(struct amdgpu_device
*adev
, int crtc
)
155 if (crtc
>= adev
->mode_info
.num_crtc
)
158 struct amdgpu_crtc
*acrtc
= adev
->mode_info
.crtcs
[crtc
];
159 struct dm_crtc_state
*acrtc_state
= to_dm_crtc_state(
163 if (acrtc_state
->stream
== NULL
) {
164 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
169 return dc_stream_get_vblank_counter(acrtc_state
->stream
);
173 static int dm_crtc_get_scanoutpos(struct amdgpu_device
*adev
, int crtc
,
174 u32
*vbl
, u32
*position
)
176 uint32_t v_blank_start
, v_blank_end
, h_position
, v_position
;
178 if ((crtc
< 0) || (crtc
>= adev
->mode_info
.num_crtc
))
181 struct amdgpu_crtc
*acrtc
= adev
->mode_info
.crtcs
[crtc
];
182 struct dm_crtc_state
*acrtc_state
= to_dm_crtc_state(
185 if (acrtc_state
->stream
== NULL
) {
186 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
192 * TODO rework base driver to use values directly.
193 * for now parse it back into reg-format
195 dc_stream_get_scanoutpos(acrtc_state
->stream
,
201 *position
= v_position
| (h_position
<< 16);
202 *vbl
= v_blank_start
| (v_blank_end
<< 16);
208 static bool dm_is_idle(void *handle
)
214 static int dm_wait_for_idle(void *handle
)
220 static bool dm_check_soft_reset(void *handle
)
225 static int dm_soft_reset(void *handle
)
231 static struct amdgpu_crtc
*
232 get_crtc_by_otg_inst(struct amdgpu_device
*adev
,
235 struct drm_device
*dev
= adev
->ddev
;
236 struct drm_crtc
*crtc
;
237 struct amdgpu_crtc
*amdgpu_crtc
;
240 * following if is check inherited from both functions where this one is
241 * used now. Need to be checked why it could happen.
243 if (otg_inst
== -1) {
245 return adev
->mode_info
.crtcs
[0];
248 list_for_each_entry(crtc
, &dev
->mode_config
.crtc_list
, head
) {
249 amdgpu_crtc
= to_amdgpu_crtc(crtc
);
251 if (amdgpu_crtc
->otg_inst
== otg_inst
)
258 static void dm_pflip_high_irq(void *interrupt_params
)
260 struct amdgpu_crtc
*amdgpu_crtc
;
261 struct common_irq_params
*irq_params
= interrupt_params
;
262 struct amdgpu_device
*adev
= irq_params
->adev
;
265 amdgpu_crtc
= get_crtc_by_otg_inst(adev
, irq_params
->irq_src
- IRQ_TYPE_PFLIP
);
267 /* IRQ could occur when in initial stage */
268 /*TODO work and BO cleanup */
269 if (amdgpu_crtc
== NULL
) {
270 DRM_DEBUG_DRIVER("CRTC is null, returning.\n");
274 spin_lock_irqsave(&adev
->ddev
->event_lock
, flags
);
276 if (amdgpu_crtc
->pflip_status
!= AMDGPU_FLIP_SUBMITTED
){
277 DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
278 amdgpu_crtc
->pflip_status
,
279 AMDGPU_FLIP_SUBMITTED
,
280 amdgpu_crtc
->crtc_id
,
282 spin_unlock_irqrestore(&adev
->ddev
->event_lock
, flags
);
287 /* wakeup usersapce */
288 if (amdgpu_crtc
->event
) {
289 /* Update to correct count/ts if racing with vblank irq */
290 drm_crtc_accurate_vblank_count(&amdgpu_crtc
->base
);
292 drm_crtc_send_vblank_event(&amdgpu_crtc
->base
, amdgpu_crtc
->event
);
294 /* page flip completed. clean up */
295 amdgpu_crtc
->event
= NULL
;
300 amdgpu_crtc
->pflip_status
= AMDGPU_FLIP_NONE
;
301 spin_unlock_irqrestore(&adev
->ddev
->event_lock
, flags
);
303 DRM_DEBUG_DRIVER("%s - crtc :%d[%p], pflip_stat:AMDGPU_FLIP_NONE\n",
304 __func__
, amdgpu_crtc
->crtc_id
, amdgpu_crtc
);
306 drm_crtc_vblank_put(&amdgpu_crtc
->base
);
309 static void dm_crtc_high_irq(void *interrupt_params
)
311 struct common_irq_params
*irq_params
= interrupt_params
;
312 struct amdgpu_device
*adev
= irq_params
->adev
;
313 uint8_t crtc_index
= 0;
314 struct amdgpu_crtc
*acrtc
;
316 acrtc
= get_crtc_by_otg_inst(adev
, irq_params
->irq_src
- IRQ_TYPE_VBLANK
);
319 crtc_index
= acrtc
->crtc_id
;
321 drm_handle_vblank(adev
->ddev
, crtc_index
);
324 static int dm_set_clockgating_state(void *handle
,
325 enum amd_clockgating_state state
)
330 static int dm_set_powergating_state(void *handle
,
331 enum amd_powergating_state state
)
336 /* Prototypes of private functions */
337 static int dm_early_init(void* handle
);
339 static void hotplug_notify_work_func(struct work_struct
*work
)
341 struct amdgpu_display_manager
*dm
= container_of(work
, struct amdgpu_display_manager
, mst_hotplug_work
);
342 struct drm_device
*dev
= dm
->ddev
;
344 drm_kms_helper_hotplug_event(dev
);
348 #include "dal_asic_id.h"
349 /* Allocate memory for FBC compressed data */
350 /* TODO: Dynamic allocation */
351 #define AMDGPU_FBC_SIZE (3840 * 2160 * 4)
353 static void amdgpu_dm_initialize_fbc(struct amdgpu_device
*adev
)
356 struct dm_comressor_info
*compressor
= &adev
->dm
.compressor
;
358 if (!compressor
->bo_ptr
) {
359 r
= amdgpu_bo_create_kernel(adev
, AMDGPU_FBC_SIZE
, PAGE_SIZE
,
360 AMDGPU_GEM_DOMAIN_VRAM
, &compressor
->bo_ptr
,
361 &compressor
->gpu_addr
, &compressor
->cpu_addr
);
364 DRM_ERROR("DM: Failed to initialize fbc\n");
373 * Returns 0 on success
375 static int amdgpu_dm_init(struct amdgpu_device
*adev
)
377 struct dc_init_data init_data
;
378 adev
->dm
.ddev
= adev
->ddev
;
379 adev
->dm
.adev
= adev
;
381 /* Zero all the fields */
382 memset(&init_data
, 0, sizeof(init_data
));
384 /* initialize DAL's lock (for SYNC context use) */
385 spin_lock_init(&adev
->dm
.dal_lock
);
387 /* initialize DAL's mutex */
388 mutex_init(&adev
->dm
.dal_mutex
);
390 if(amdgpu_dm_irq_init(adev
)) {
391 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
395 init_data
.asic_id
.chip_family
= adev
->family
;
397 init_data
.asic_id
.pci_revision_id
= adev
->rev_id
;
398 init_data
.asic_id
.hw_internal_rev
= adev
->external_rev_id
;
400 init_data
.asic_id
.vram_width
= adev
->mc
.vram_width
;
401 /* TODO: initialize init_data.asic_id.vram_type here!!!! */
402 init_data
.asic_id
.atombios_base_address
=
403 adev
->mode_info
.atom_context
->bios
;
405 init_data
.driver
= adev
;
407 adev
->dm
.cgs_device
= amdgpu_cgs_create_device(adev
);
409 if (!adev
->dm
.cgs_device
) {
410 DRM_ERROR("amdgpu: failed to create cgs device.\n");
414 init_data
.cgs_device
= adev
->dm
.cgs_device
;
418 init_data
.dce_environment
= DCE_ENV_PRODUCTION_DRV
;
421 init_data
.log_mask
= DC_DEFAULT_LOG_MASK
;
423 init_data
.log_mask
= DC_MIN_LOG_MASK
;
426 if (adev
->family
== FAMILY_CZ
)
427 amdgpu_dm_initialize_fbc(adev
);
428 init_data
.fbc_gpu_addr
= adev
->dm
.compressor
.gpu_addr
;
430 /* Display Core create. */
431 adev
->dm
.dc
= dc_create(&init_data
);
434 DRM_INFO("Display Core initialized!\n");
436 DRM_INFO("Display Core failed to initialize!\n");
438 INIT_WORK(&adev
->dm
.mst_hotplug_work
, hotplug_notify_work_func
);
440 adev
->dm
.freesync_module
= mod_freesync_create(adev
->dm
.dc
);
441 if (!adev
->dm
.freesync_module
) {
443 "amdgpu: failed to initialize freesync_module.\n");
445 DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
446 adev
->dm
.freesync_module
);
448 if (amdgpu_dm_initialize_drm_device(adev
)) {
450 "amdgpu: failed to initialize sw for display support.\n");
454 /* Update the actual used number of crtc */
455 adev
->mode_info
.num_crtc
= adev
->dm
.display_indexes_num
;
457 /* TODO: Add_display_info? */
459 /* TODO use dynamic cursor width */
460 adev
->ddev
->mode_config
.cursor_width
= adev
->dm
.dc
->caps
.max_cursor_size
;
461 adev
->ddev
->mode_config
.cursor_height
= adev
->dm
.dc
->caps
.max_cursor_size
;
463 if (drm_vblank_init(adev
->ddev
, adev
->dm
.display_indexes_num
)) {
465 "amdgpu: failed to initialize sw for display support.\n");
469 DRM_DEBUG_DRIVER("KMS initialized.\n");
473 amdgpu_dm_fini(adev
);
478 static void amdgpu_dm_fini(struct amdgpu_device
*adev
)
480 amdgpu_dm_destroy_drm_device(&adev
->dm
);
482 * TODO: pageflip, vlank interrupt
484 * amdgpu_dm_irq_fini(adev);
487 if (adev
->dm
.cgs_device
) {
488 amdgpu_cgs_destroy_device(adev
->dm
.cgs_device
);
489 adev
->dm
.cgs_device
= NULL
;
491 if (adev
->dm
.freesync_module
) {
492 mod_freesync_destroy(adev
->dm
.freesync_module
);
493 adev
->dm
.freesync_module
= NULL
;
495 /* DC Destroy TODO: Replace destroy DAL */
497 dc_destroy(&adev
->dm
.dc
);
501 static int dm_sw_init(void *handle
)
506 static int dm_sw_fini(void *handle
)
511 static int detect_mst_link_for_all_connectors(struct drm_device
*dev
)
513 struct amdgpu_dm_connector
*aconnector
;
514 struct drm_connector
*connector
;
517 drm_modeset_lock(&dev
->mode_config
.connection_mutex
, NULL
);
519 list_for_each_entry(connector
, &dev
->mode_config
.connector_list
, head
) {
520 aconnector
= to_amdgpu_dm_connector(connector
);
521 if (aconnector
->dc_link
->type
== dc_connection_mst_branch
) {
522 DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
523 aconnector
, aconnector
->base
.base
.id
);
525 ret
= drm_dp_mst_topology_mgr_set_mst(&aconnector
->mst_mgr
, true);
527 DRM_ERROR("DM_MST: Failed to start MST\n");
528 ((struct dc_link
*)aconnector
->dc_link
)->type
= dc_connection_single
;
534 drm_modeset_unlock(&dev
->mode_config
.connection_mutex
);
538 static int dm_late_init(void *handle
)
540 struct drm_device
*dev
= ((struct amdgpu_device
*)handle
)->ddev
;
542 return detect_mst_link_for_all_connectors(dev
);
545 static void s3_handle_mst(struct drm_device
*dev
, bool suspend
)
547 struct amdgpu_dm_connector
*aconnector
;
548 struct drm_connector
*connector
;
550 drm_modeset_lock(&dev
->mode_config
.connection_mutex
, NULL
);
552 list_for_each_entry(connector
, &dev
->mode_config
.connector_list
, head
) {
553 aconnector
= to_amdgpu_dm_connector(connector
);
554 if (aconnector
->dc_link
->type
== dc_connection_mst_branch
&&
555 !aconnector
->mst_port
) {
558 drm_dp_mst_topology_mgr_suspend(&aconnector
->mst_mgr
);
560 drm_dp_mst_topology_mgr_resume(&aconnector
->mst_mgr
);
564 drm_modeset_unlock(&dev
->mode_config
.connection_mutex
);
567 static int dm_hw_init(void *handle
)
569 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
570 /* Create DAL display manager */
571 amdgpu_dm_init(adev
);
572 amdgpu_dm_hpd_init(adev
);
577 static int dm_hw_fini(void *handle
)
579 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
581 amdgpu_dm_hpd_fini(adev
);
583 amdgpu_dm_irq_fini(adev
);
584 amdgpu_dm_fini(adev
);
588 static int dm_suspend(void *handle
)
590 struct amdgpu_device
*adev
= handle
;
591 struct amdgpu_display_manager
*dm
= &adev
->dm
;
594 s3_handle_mst(adev
->ddev
, true);
596 amdgpu_dm_irq_suspend(adev
);
598 WARN_ON(adev
->dm
.cached_state
);
599 adev
->dm
.cached_state
= drm_atomic_helper_suspend(adev
->ddev
);
601 dc_set_power_state(dm
->dc
, DC_ACPI_CM_POWER_STATE_D3
);
606 static struct amdgpu_dm_connector
*
607 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state
*state
,
608 struct drm_crtc
*crtc
)
611 struct drm_connector_state
*new_con_state
;
612 struct drm_connector
*connector
;
613 struct drm_crtc
*crtc_from_state
;
615 for_each_new_connector_in_state(state
, connector
, new_con_state
, i
) {
616 crtc_from_state
= new_con_state
->crtc
;
618 if (crtc_from_state
== crtc
)
619 return to_amdgpu_dm_connector(connector
);
625 static int dm_resume(void *handle
)
627 struct amdgpu_device
*adev
= handle
;
628 struct amdgpu_display_manager
*dm
= &adev
->dm
;
630 /* power on hardware */
631 dc_set_power_state(dm
->dc
, DC_ACPI_CM_POWER_STATE_D0
);
636 int amdgpu_dm_display_resume(struct amdgpu_device
*adev
)
638 struct drm_device
*ddev
= adev
->ddev
;
639 struct amdgpu_display_manager
*dm
= &adev
->dm
;
640 struct amdgpu_dm_connector
*aconnector
;
641 struct drm_connector
*connector
;
642 struct drm_crtc
*crtc
;
643 struct drm_crtc_state
*new_crtc_state
;
647 /* program HPD filter */
650 /* On resume we need to rewrite the MSTM control bits to enamble MST*/
651 s3_handle_mst(ddev
, false);
654 * early enable HPD Rx IRQ, should be done before set mode as short
655 * pulse interrupts are used for MST
657 amdgpu_dm_irq_resume_early(adev
);
660 list_for_each_entry(connector
,
661 &ddev
->mode_config
.connector_list
, head
) {
662 aconnector
= to_amdgpu_dm_connector(connector
);
665 * this is the case when traversing through already created
666 * MST connectors, should be skipped
668 if (aconnector
->mst_port
)
671 mutex_lock(&aconnector
->hpd_lock
);
672 dc_link_detect(aconnector
->dc_link
, DETECT_REASON_HPD
);
673 aconnector
->dc_sink
= NULL
;
674 amdgpu_dm_update_connector_after_detect(aconnector
);
675 mutex_unlock(&aconnector
->hpd_lock
);
678 /* Force mode set in atomic comit */
679 for_each_new_crtc_in_state(adev
->dm
.cached_state
, crtc
, new_crtc_state
, i
)
680 new_crtc_state
->active_changed
= true;
682 ret
= drm_atomic_helper_resume(ddev
, adev
->dm
.cached_state
);
684 drm_atomic_state_put(adev
->dm
.cached_state
);
685 adev
->dm
.cached_state
= NULL
;
687 amdgpu_dm_irq_resume_late(adev
);
692 static const struct amd_ip_funcs amdgpu_dm_funcs
= {
694 .early_init
= dm_early_init
,
695 .late_init
= dm_late_init
,
696 .sw_init
= dm_sw_init
,
697 .sw_fini
= dm_sw_fini
,
698 .hw_init
= dm_hw_init
,
699 .hw_fini
= dm_hw_fini
,
700 .suspend
= dm_suspend
,
702 .is_idle
= dm_is_idle
,
703 .wait_for_idle
= dm_wait_for_idle
,
704 .check_soft_reset
= dm_check_soft_reset
,
705 .soft_reset
= dm_soft_reset
,
706 .set_clockgating_state
= dm_set_clockgating_state
,
707 .set_powergating_state
= dm_set_powergating_state
,
710 const struct amdgpu_ip_block_version dm_ip_block
=
712 .type
= AMD_IP_BLOCK_TYPE_DCE
,
716 .funcs
= &amdgpu_dm_funcs
,
720 static struct drm_atomic_state
*
721 dm_atomic_state_alloc(struct drm_device
*dev
)
723 struct dm_atomic_state
*state
= kzalloc(sizeof(*state
), GFP_KERNEL
);
728 if (drm_atomic_state_init(dev
, &state
->base
) < 0)
739 dm_atomic_state_clear(struct drm_atomic_state
*state
)
741 struct dm_atomic_state
*dm_state
= to_dm_atomic_state(state
);
743 if (dm_state
->context
) {
744 dc_release_state(dm_state
->context
);
745 dm_state
->context
= NULL
;
748 drm_atomic_state_default_clear(state
);
752 dm_atomic_state_alloc_free(struct drm_atomic_state
*state
)
754 struct dm_atomic_state
*dm_state
= to_dm_atomic_state(state
);
755 drm_atomic_state_default_release(state
);
759 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs
= {
760 .fb_create
= amdgpu_user_framebuffer_create
,
761 .output_poll_changed
= amdgpu_output_poll_changed
,
762 .atomic_check
= amdgpu_dm_atomic_check
,
763 .atomic_commit
= amdgpu_dm_atomic_commit
,
764 .atomic_state_alloc
= dm_atomic_state_alloc
,
765 .atomic_state_clear
= dm_atomic_state_clear
,
766 .atomic_state_free
= dm_atomic_state_alloc_free
769 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs
= {
770 .atomic_commit_tail
= amdgpu_dm_atomic_commit_tail
774 amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector
*aconnector
)
776 struct drm_connector
*connector
= &aconnector
->base
;
777 struct drm_device
*dev
= connector
->dev
;
778 struct dc_sink
*sink
;
780 /* MST handled by drm_mst framework */
781 if (aconnector
->mst_mgr
.mst_state
== true)
785 sink
= aconnector
->dc_link
->local_sink
;
787 /* Edid mgmt connector gets first update only in mode_valid hook and then
788 * the connector sink is set to either fake or physical sink depends on link status.
789 * don't do it here if u are during boot
791 if (aconnector
->base
.force
!= DRM_FORCE_UNSPECIFIED
792 && aconnector
->dc_em_sink
) {
794 /* For S3 resume with headless use eml_sink to fake stream
795 * because on resume connecotr->sink is set ti NULL
797 mutex_lock(&dev
->mode_config
.mutex
);
800 if (aconnector
->dc_sink
) {
801 amdgpu_dm_remove_sink_from_freesync_module(
803 /* retain and release bellow are used for
804 * bump up refcount for sink because the link don't point
805 * to it anymore after disconnect so on next crtc to connector
806 * reshuffle by UMD we will get into unwanted dc_sink release
808 if (aconnector
->dc_sink
!= aconnector
->dc_em_sink
)
809 dc_sink_release(aconnector
->dc_sink
);
811 aconnector
->dc_sink
= sink
;
812 amdgpu_dm_add_sink_to_freesync_module(
813 connector
, aconnector
->edid
);
815 amdgpu_dm_remove_sink_from_freesync_module(connector
);
816 if (!aconnector
->dc_sink
)
817 aconnector
->dc_sink
= aconnector
->dc_em_sink
;
818 else if (aconnector
->dc_sink
!= aconnector
->dc_em_sink
)
819 dc_sink_retain(aconnector
->dc_sink
);
822 mutex_unlock(&dev
->mode_config
.mutex
);
827 * TODO: temporary guard to look for proper fix
828 * if this sink is MST sink, we should not do anything
830 if (sink
&& sink
->sink_signal
== SIGNAL_TYPE_DISPLAY_PORT_MST
)
833 if (aconnector
->dc_sink
== sink
) {
834 /* We got a DP short pulse (Link Loss, DP CTS, etc...).
836 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
837 aconnector
->connector_id
);
841 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
842 aconnector
->connector_id
, aconnector
->dc_sink
, sink
);
844 mutex_lock(&dev
->mode_config
.mutex
);
846 /* 1. Update status of the drm connector
847 * 2. Send an event and let userspace tell us what to do */
849 /* TODO: check if we still need the S3 mode update workaround.
850 * If yes, put it here. */
851 if (aconnector
->dc_sink
)
852 amdgpu_dm_remove_sink_from_freesync_module(
855 aconnector
->dc_sink
= sink
;
856 if (sink
->dc_edid
.length
== 0) {
857 aconnector
->edid
= NULL
;
860 (struct edid
*) sink
->dc_edid
.raw_edid
;
863 drm_mode_connector_update_edid_property(connector
,
866 amdgpu_dm_add_sink_to_freesync_module(connector
, aconnector
->edid
);
869 amdgpu_dm_remove_sink_from_freesync_module(connector
);
870 drm_mode_connector_update_edid_property(connector
, NULL
);
871 aconnector
->num_modes
= 0;
872 aconnector
->dc_sink
= NULL
;
875 mutex_unlock(&dev
->mode_config
.mutex
);
878 static void handle_hpd_irq(void *param
)
880 struct amdgpu_dm_connector
*aconnector
= (struct amdgpu_dm_connector
*)param
;
881 struct drm_connector
*connector
= &aconnector
->base
;
882 struct drm_device
*dev
= connector
->dev
;
884 /* In case of failure or MST no need to update connector status or notify the OS
885 * since (for MST case) MST does this in it's own context.
887 mutex_lock(&aconnector
->hpd_lock
);
889 if (aconnector
->fake_enable
)
890 aconnector
->fake_enable
= false;
892 if (dc_link_detect(aconnector
->dc_link
, DETECT_REASON_HPD
)) {
893 amdgpu_dm_update_connector_after_detect(aconnector
);
896 drm_modeset_lock_all(dev
);
897 dm_restore_drm_connector_state(dev
, connector
);
898 drm_modeset_unlock_all(dev
);
900 if (aconnector
->base
.force
== DRM_FORCE_UNSPECIFIED
)
901 drm_kms_helper_hotplug_event(dev
);
903 mutex_unlock(&aconnector
->hpd_lock
);
907 static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector
*aconnector
)
909 uint8_t esi
[DP_PSR_ERROR_STATUS
- DP_SINK_COUNT_ESI
] = { 0 };
911 bool new_irq_handled
= false;
913 int dpcd_bytes_to_read
;
915 const int max_process_count
= 30;
916 int process_count
= 0;
918 const struct dc_link_status
*link_status
= dc_link_get_status(aconnector
->dc_link
);
920 if (link_status
->dpcd_caps
->dpcd_rev
.raw
< 0x12) {
921 dpcd_bytes_to_read
= DP_LANE0_1_STATUS
- DP_SINK_COUNT
;
922 /* DPCD 0x200 - 0x201 for downstream IRQ */
923 dpcd_addr
= DP_SINK_COUNT
;
925 dpcd_bytes_to_read
= DP_PSR_ERROR_STATUS
- DP_SINK_COUNT_ESI
;
926 /* DPCD 0x2002 - 0x2005 for downstream IRQ */
927 dpcd_addr
= DP_SINK_COUNT_ESI
;
930 dret
= drm_dp_dpcd_read(
931 &aconnector
->dm_dp_aux
.aux
,
936 while (dret
== dpcd_bytes_to_read
&&
937 process_count
< max_process_count
) {
943 DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi
[0], esi
[1], esi
[2]);
944 /* handle HPD short pulse irq */
945 if (aconnector
->mst_mgr
.mst_state
)
947 &aconnector
->mst_mgr
,
951 if (new_irq_handled
) {
952 /* ACK at DPCD to notify down stream */
953 const int ack_dpcd_bytes_to_write
=
954 dpcd_bytes_to_read
- 1;
956 for (retry
= 0; retry
< 3; retry
++) {
959 wret
= drm_dp_dpcd_write(
960 &aconnector
->dm_dp_aux
.aux
,
963 ack_dpcd_bytes_to_write
);
964 if (wret
== ack_dpcd_bytes_to_write
)
968 /* check if there is new irq to be handle */
969 dret
= drm_dp_dpcd_read(
970 &aconnector
->dm_dp_aux
.aux
,
975 new_irq_handled
= false;
981 if (process_count
== max_process_count
)
982 DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
985 static void handle_hpd_rx_irq(void *param
)
987 struct amdgpu_dm_connector
*aconnector
= (struct amdgpu_dm_connector
*)param
;
988 struct drm_connector
*connector
= &aconnector
->base
;
989 struct drm_device
*dev
= connector
->dev
;
990 struct dc_link
*dc_link
= aconnector
->dc_link
;
991 bool is_mst_root_connector
= aconnector
->mst_mgr
.mst_state
;
993 /* TODO:Temporary add mutex to protect hpd interrupt not have a gpio
994 * conflict, after implement i2c helper, this mutex should be
997 if (dc_link
->type
!= dc_connection_mst_branch
)
998 mutex_lock(&aconnector
->hpd_lock
);
1000 if (dc_link_handle_hpd_rx_irq(dc_link
, NULL
) &&
1001 !is_mst_root_connector
) {
1002 /* Downstream Port status changed. */
1003 if (dc_link_detect(dc_link
, DETECT_REASON_HPDRX
)) {
1004 amdgpu_dm_update_connector_after_detect(aconnector
);
1007 drm_modeset_lock_all(dev
);
1008 dm_restore_drm_connector_state(dev
, connector
);
1009 drm_modeset_unlock_all(dev
);
1011 drm_kms_helper_hotplug_event(dev
);
1014 if ((dc_link
->cur_link_settings
.lane_count
!= LANE_COUNT_UNKNOWN
) ||
1015 (dc_link
->type
== dc_connection_mst_branch
))
1016 dm_handle_hpd_rx_irq(aconnector
);
1018 if (dc_link
->type
!= dc_connection_mst_branch
)
1019 mutex_unlock(&aconnector
->hpd_lock
);
1022 static void register_hpd_handlers(struct amdgpu_device
*adev
)
1024 struct drm_device
*dev
= adev
->ddev
;
1025 struct drm_connector
*connector
;
1026 struct amdgpu_dm_connector
*aconnector
;
1027 const struct dc_link
*dc_link
;
1028 struct dc_interrupt_params int_params
= {0};
1030 int_params
.requested_polarity
= INTERRUPT_POLARITY_DEFAULT
;
1031 int_params
.current_polarity
= INTERRUPT_POLARITY_DEFAULT
;
1033 list_for_each_entry(connector
,
1034 &dev
->mode_config
.connector_list
, head
) {
1036 aconnector
= to_amdgpu_dm_connector(connector
);
1037 dc_link
= aconnector
->dc_link
;
1039 if (DC_IRQ_SOURCE_INVALID
!= dc_link
->irq_source_hpd
) {
1040 int_params
.int_context
= INTERRUPT_LOW_IRQ_CONTEXT
;
1041 int_params
.irq_source
= dc_link
->irq_source_hpd
;
1043 amdgpu_dm_irq_register_interrupt(adev
, &int_params
,
1045 (void *) aconnector
);
1048 if (DC_IRQ_SOURCE_INVALID
!= dc_link
->irq_source_hpd_rx
) {
1050 /* Also register for DP short pulse (hpd_rx). */
1051 int_params
.int_context
= INTERRUPT_LOW_IRQ_CONTEXT
;
1052 int_params
.irq_source
= dc_link
->irq_source_hpd_rx
;
1054 amdgpu_dm_irq_register_interrupt(adev
, &int_params
,
1056 (void *) aconnector
);
1061 /* Register IRQ sources and initialize IRQ callbacks */
1062 static int dce110_register_irq_handlers(struct amdgpu_device
*adev
)
1064 struct dc
*dc
= adev
->dm
.dc
;
1065 struct common_irq_params
*c_irq_params
;
1066 struct dc_interrupt_params int_params
= {0};
1069 unsigned client_id
= AMDGPU_IH_CLIENTID_LEGACY
;
1071 if (adev
->asic_type
== CHIP_VEGA10
||
1072 adev
->asic_type
== CHIP_RAVEN
)
1073 client_id
= AMDGPU_IH_CLIENTID_DCE
;
1075 int_params
.requested_polarity
= INTERRUPT_POLARITY_DEFAULT
;
1076 int_params
.current_polarity
= INTERRUPT_POLARITY_DEFAULT
;
1078 /* Actions of amdgpu_irq_add_id():
1079 * 1. Register a set() function with base driver.
1080 * Base driver will call set() function to enable/disable an
1081 * interrupt in DC hardware.
1082 * 2. Register amdgpu_dm_irq_handler().
1083 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
1084 * coming from DC hardware.
1085 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
1086 * for acknowledging and handling. */
1088 /* Use VBLANK interrupt */
1089 for (i
= VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0
; i
<= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0
; i
++) {
1090 r
= amdgpu_irq_add_id(adev
, client_id
, i
, &adev
->crtc_irq
);
1092 DRM_ERROR("Failed to add crtc irq id!\n");
1096 int_params
.int_context
= INTERRUPT_HIGH_IRQ_CONTEXT
;
1097 int_params
.irq_source
=
1098 dc_interrupt_to_irq_source(dc
, i
, 0);
1100 c_irq_params
= &adev
->dm
.vblank_params
[int_params
.irq_source
- DC_IRQ_SOURCE_VBLANK1
];
1102 c_irq_params
->adev
= adev
;
1103 c_irq_params
->irq_src
= int_params
.irq_source
;
1105 amdgpu_dm_irq_register_interrupt(adev
, &int_params
,
1106 dm_crtc_high_irq
, c_irq_params
);
1109 /* Use GRPH_PFLIP interrupt */
1110 for (i
= VISLANDS30_IV_SRCID_D1_GRPH_PFLIP
;
1111 i
<= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP
; i
+= 2) {
1112 r
= amdgpu_irq_add_id(adev
, client_id
, i
, &adev
->pageflip_irq
);
1114 DRM_ERROR("Failed to add page flip irq id!\n");
1118 int_params
.int_context
= INTERRUPT_HIGH_IRQ_CONTEXT
;
1119 int_params
.irq_source
=
1120 dc_interrupt_to_irq_source(dc
, i
, 0);
1122 c_irq_params
= &adev
->dm
.pflip_params
[int_params
.irq_source
- DC_IRQ_SOURCE_PFLIP_FIRST
];
1124 c_irq_params
->adev
= adev
;
1125 c_irq_params
->irq_src
= int_params
.irq_source
;
1127 amdgpu_dm_irq_register_interrupt(adev
, &int_params
,
1128 dm_pflip_high_irq
, c_irq_params
);
1133 r
= amdgpu_irq_add_id(adev
, client_id
,
1134 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A
, &adev
->hpd_irq
);
1136 DRM_ERROR("Failed to add hpd irq id!\n");
1140 register_hpd_handlers(adev
);
1145 #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
1146 /* Register IRQ sources and initialize IRQ callbacks */
1147 static int dcn10_register_irq_handlers(struct amdgpu_device
*adev
)
1149 struct dc
*dc
= adev
->dm
.dc
;
1150 struct common_irq_params
*c_irq_params
;
1151 struct dc_interrupt_params int_params
= {0};
1155 int_params
.requested_polarity
= INTERRUPT_POLARITY_DEFAULT
;
1156 int_params
.current_polarity
= INTERRUPT_POLARITY_DEFAULT
;
1158 /* Actions of amdgpu_irq_add_id():
1159 * 1. Register a set() function with base driver.
1160 * Base driver will call set() function to enable/disable an
1161 * interrupt in DC hardware.
1162 * 2. Register amdgpu_dm_irq_handler().
1163 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
1164 * coming from DC hardware.
1165 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
1166 * for acknowledging and handling.
1169 /* Use VSTARTUP interrupt */
1170 for (i
= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP
;
1171 i
<= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP
+ adev
->mode_info
.num_crtc
- 1;
1173 r
= amdgpu_irq_add_id(adev
, AMDGPU_IH_CLIENTID_DCE
, i
, &adev
->crtc_irq
);
1176 DRM_ERROR("Failed to add crtc irq id!\n");
1180 int_params
.int_context
= INTERRUPT_HIGH_IRQ_CONTEXT
;
1181 int_params
.irq_source
=
1182 dc_interrupt_to_irq_source(dc
, i
, 0);
1184 c_irq_params
= &adev
->dm
.vblank_params
[int_params
.irq_source
- DC_IRQ_SOURCE_VBLANK1
];
1186 c_irq_params
->adev
= adev
;
1187 c_irq_params
->irq_src
= int_params
.irq_source
;
1189 amdgpu_dm_irq_register_interrupt(adev
, &int_params
,
1190 dm_crtc_high_irq
, c_irq_params
);
1193 /* Use GRPH_PFLIP interrupt */
1194 for (i
= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT
;
1195 i
<= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT
+ adev
->mode_info
.num_crtc
- 1;
1197 r
= amdgpu_irq_add_id(adev
, AMDGPU_IH_CLIENTID_DCE
, i
, &adev
->pageflip_irq
);
1199 DRM_ERROR("Failed to add page flip irq id!\n");
1203 int_params
.int_context
= INTERRUPT_HIGH_IRQ_CONTEXT
;
1204 int_params
.irq_source
=
1205 dc_interrupt_to_irq_source(dc
, i
, 0);
1207 c_irq_params
= &adev
->dm
.pflip_params
[int_params
.irq_source
- DC_IRQ_SOURCE_PFLIP_FIRST
];
1209 c_irq_params
->adev
= adev
;
1210 c_irq_params
->irq_src
= int_params
.irq_source
;
1212 amdgpu_dm_irq_register_interrupt(adev
, &int_params
,
1213 dm_pflip_high_irq
, c_irq_params
);
1218 r
= amdgpu_irq_add_id(adev
, AMDGPU_IH_CLIENTID_DCE
, DCN_1_0__SRCID__DC_HPD1_INT
,
1221 DRM_ERROR("Failed to add hpd irq id!\n");
1225 register_hpd_handlers(adev
);
1231 static int amdgpu_dm_mode_config_init(struct amdgpu_device
*adev
)
1235 adev
->mode_info
.mode_config_initialized
= true;
1237 adev
->ddev
->mode_config
.funcs
= (void *)&amdgpu_dm_mode_funcs
;
1238 adev
->ddev
->mode_config
.helper_private
= &amdgpu_dm_mode_config_helperfuncs
;
1240 adev
->ddev
->mode_config
.max_width
= 16384;
1241 adev
->ddev
->mode_config
.max_height
= 16384;
1243 adev
->ddev
->mode_config
.preferred_depth
= 24;
1244 adev
->ddev
->mode_config
.prefer_shadow
= 1;
1245 /* indicate support of immediate flip */
1246 adev
->ddev
->mode_config
.async_page_flip
= true;
1248 adev
->ddev
->mode_config
.fb_base
= adev
->mc
.aper_base
;
1250 r
= amdgpu_modeset_create_props(adev
);
1257 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
1258 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
1260 static int amdgpu_dm_backlight_update_status(struct backlight_device
*bd
)
1262 struct amdgpu_display_manager
*dm
= bl_get_data(bd
);
1264 if (dc_link_set_backlight_level(dm
->backlight_link
,
1265 bd
->props
.brightness
, 0, 0))
1271 static int amdgpu_dm_backlight_get_brightness(struct backlight_device
*bd
)
1273 return bd
->props
.brightness
;
1276 static const struct backlight_ops amdgpu_dm_backlight_ops
= {
1277 .get_brightness
= amdgpu_dm_backlight_get_brightness
,
1278 .update_status
= amdgpu_dm_backlight_update_status
,
1282 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager
*dm
)
1285 struct backlight_properties props
= { 0 };
1287 props
.max_brightness
= AMDGPU_MAX_BL_LEVEL
;
1288 props
.type
= BACKLIGHT_RAW
;
1290 snprintf(bl_name
, sizeof(bl_name
), "amdgpu_bl%d",
1291 dm
->adev
->ddev
->primary
->index
);
1293 dm
->backlight_dev
= backlight_device_register(bl_name
,
1294 dm
->adev
->ddev
->dev
,
1296 &amdgpu_dm_backlight_ops
,
1299 if (NULL
== dm
->backlight_dev
)
1300 DRM_ERROR("DM: Backlight registration failed!\n");
1302 DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name
);
1307 /* In this architecture, the association
1308 * connector -> encoder -> crtc
1309 * id not really requried. The crtc and connector will hold the
1310 * display_index as an abstraction to use with DAL component
1312 * Returns 0 on success
1314 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device
*adev
)
1316 struct amdgpu_display_manager
*dm
= &adev
->dm
;
1318 struct amdgpu_dm_connector
*aconnector
= NULL
;
1319 struct amdgpu_encoder
*aencoder
= NULL
;
1320 struct amdgpu_mode_info
*mode_info
= &adev
->mode_info
;
1322 unsigned long possible_crtcs
;
1324 link_cnt
= dm
->dc
->caps
.max_links
;
1325 if (amdgpu_dm_mode_config_init(dm
->adev
)) {
1326 DRM_ERROR("DM: Failed to initialize mode config\n");
1330 for (i
= 0; i
< dm
->dc
->caps
.max_planes
; i
++) {
1331 mode_info
->planes
[i
] = kzalloc(sizeof(struct amdgpu_plane
),
1333 if (!mode_info
->planes
[i
]) {
1334 DRM_ERROR("KMS: Failed to allocate plane\n");
1335 goto fail_free_planes
;
1337 mode_info
->planes
[i
]->base
.type
= mode_info
->plane_type
[i
];
1340 * HACK: IGT tests expect that each plane can only have one
1341 * one possible CRTC. For now, set one CRTC for each
1342 * plane that is not an underlay, but still allow multiple
1343 * CRTCs for underlay planes.
1345 possible_crtcs
= 1 << i
;
1346 if (i
>= dm
->dc
->caps
.max_streams
)
1347 possible_crtcs
= 0xff;
1349 if (amdgpu_dm_plane_init(dm
, mode_info
->planes
[i
], possible_crtcs
)) {
1350 DRM_ERROR("KMS: Failed to initialize plane\n");
1351 goto fail_free_planes
;
1355 for (i
= 0; i
< dm
->dc
->caps
.max_streams
; i
++)
1356 if (amdgpu_dm_crtc_init(dm
, &mode_info
->planes
[i
]->base
, i
)) {
1357 DRM_ERROR("KMS: Failed to initialize crtc\n");
1358 goto fail_free_planes
;
1361 dm
->display_indexes_num
= dm
->dc
->caps
.max_streams
;
1363 /* loops over all connectors on the board */
1364 for (i
= 0; i
< link_cnt
; i
++) {
1366 if (i
> AMDGPU_DM_MAX_DISPLAY_INDEX
) {
1368 "KMS: Cannot support more than %d display indexes\n",
1369 AMDGPU_DM_MAX_DISPLAY_INDEX
);
1373 aconnector
= kzalloc(sizeof(*aconnector
), GFP_KERNEL
);
1375 goto fail_free_planes
;
1377 aencoder
= kzalloc(sizeof(*aencoder
), GFP_KERNEL
);
1379 goto fail_free_connector
;
1381 if (amdgpu_dm_encoder_init(dm
->ddev
, aencoder
, i
)) {
1382 DRM_ERROR("KMS: Failed to initialize encoder\n");
1383 goto fail_free_encoder
;
1386 if (amdgpu_dm_connector_init(dm
, aconnector
, i
, aencoder
)) {
1387 DRM_ERROR("KMS: Failed to initialize connector\n");
1388 goto fail_free_encoder
;
1391 if (dc_link_detect(dc_get_link_at_index(dm
->dc
, i
),
1392 DETECT_REASON_BOOT
))
1393 amdgpu_dm_update_connector_after_detect(aconnector
);
1396 /* Software is initialized. Now we can register interrupt handlers. */
1397 switch (adev
->asic_type
) {
1407 case CHIP_POLARIS11
:
1408 case CHIP_POLARIS10
:
1409 case CHIP_POLARIS12
:
1411 if (dce110_register_irq_handlers(dm
->adev
)) {
1412 DRM_ERROR("DM: Failed to initialize IRQ\n");
1413 goto fail_free_encoder
;
1416 #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
1418 if (dcn10_register_irq_handlers(dm
->adev
)) {
1419 DRM_ERROR("DM: Failed to initialize IRQ\n");
1420 goto fail_free_encoder
;
1423 * Temporary disable until pplib/smu interaction is implemented
1425 dm
->dc
->debug
.disable_stutter
= true;
1429 DRM_ERROR("Usupported ASIC type: 0x%X\n", adev
->asic_type
);
1430 goto fail_free_encoder
;
1433 drm_mode_config_reset(dm
->ddev
);
1438 fail_free_connector
:
1441 for (i
= 0; i
< dm
->dc
->caps
.max_planes
; i
++)
1442 kfree(mode_info
->planes
[i
]);
1446 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager
*dm
)
1448 drm_mode_config_cleanup(dm
->ddev
);
1452 /******************************************************************************
1453 * amdgpu_display_funcs functions
1454 *****************************************************************************/
1457 * dm_bandwidth_update - program display watermarks
1459 * @adev: amdgpu_device pointer
1461 * Calculate and program the display watermarks and line buffer allocation.
1463 static void dm_bandwidth_update(struct amdgpu_device
*adev
)
1465 /* TODO: implement later */
1468 static void dm_set_backlight_level(struct amdgpu_encoder
*amdgpu_encoder
,
1471 /* TODO: translate amdgpu_encoder to display_index and call DAL */
1474 static u8
dm_get_backlight_level(struct amdgpu_encoder
*amdgpu_encoder
)
1476 /* TODO: translate amdgpu_encoder to display_index and call DAL */
1480 static int amdgpu_notify_freesync(struct drm_device
*dev
, void *data
,
1481 struct drm_file
*filp
)
1483 struct mod_freesync_params freesync_params
;
1484 uint8_t num_streams
;
1487 struct amdgpu_device
*adev
= dev
->dev_private
;
1490 /* Get freesync enable flag from DRM */
1492 num_streams
= dc_get_current_stream_count(adev
->dm
.dc
);
1494 for (i
= 0; i
< num_streams
; i
++) {
1495 struct dc_stream_state
*stream
;
1496 stream
= dc_get_stream_at_index(adev
->dm
.dc
, i
);
1498 mod_freesync_update_state(adev
->dm
.freesync_module
,
1499 &stream
, 1, &freesync_params
);
1505 static const struct amdgpu_display_funcs dm_display_funcs
= {
1506 .bandwidth_update
= dm_bandwidth_update
, /* called unconditionally */
1507 .vblank_get_counter
= dm_vblank_get_counter
,/* called unconditionally */
1508 .vblank_wait
= NULL
,
1509 .backlight_set_level
=
1510 dm_set_backlight_level
,/* called unconditionally */
1511 .backlight_get_level
=
1512 dm_get_backlight_level
,/* called unconditionally */
1513 .hpd_sense
= NULL
,/* called unconditionally */
1514 .hpd_set_polarity
= NULL
, /* called unconditionally */
1515 .hpd_get_gpio_reg
= NULL
, /* VBIOS parsing. DAL does it. */
1516 .page_flip_get_scanoutpos
=
1517 dm_crtc_get_scanoutpos
,/* called unconditionally */
1518 .add_encoder
= NULL
, /* VBIOS parsing. DAL does it. */
1519 .add_connector
= NULL
, /* VBIOS parsing. DAL does it. */
1520 .notify_freesync
= amdgpu_notify_freesync
,
1524 #if defined(CONFIG_DEBUG_KERNEL_DC)
1526 static ssize_t
s3_debug_store(struct device
*device
,
1527 struct device_attribute
*attr
,
1533 struct pci_dev
*pdev
= to_pci_dev(device
);
1534 struct drm_device
*drm_dev
= pci_get_drvdata(pdev
);
1535 struct amdgpu_device
*adev
= drm_dev
->dev_private
;
1537 ret
= kstrtoint(buf
, 0, &s3_state
);
1542 amdgpu_dm_display_resume(adev
);
1543 drm_kms_helper_hotplug_event(adev
->ddev
);
1548 return ret
== 0 ? count
: 0;
1551 DEVICE_ATTR_WO(s3_debug
);
1555 static int dm_early_init(void *handle
)
1557 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1559 adev
->ddev
->driver
->driver_features
|= DRIVER_ATOMIC
;
1560 amdgpu_dm_set_irq_funcs(adev
);
1562 switch (adev
->asic_type
) {
1565 adev
->mode_info
.num_crtc
= 6;
1566 adev
->mode_info
.num_hpd
= 6;
1567 adev
->mode_info
.num_dig
= 6;
1568 adev
->mode_info
.plane_type
= dm_plane_type_default
;
1571 adev
->mode_info
.num_crtc
= 4;
1572 adev
->mode_info
.num_hpd
= 6;
1573 adev
->mode_info
.num_dig
= 7;
1574 adev
->mode_info
.plane_type
= dm_plane_type_default
;
1578 adev
->mode_info
.num_crtc
= 2;
1579 adev
->mode_info
.num_hpd
= 6;
1580 adev
->mode_info
.num_dig
= 6;
1581 adev
->mode_info
.plane_type
= dm_plane_type_default
;
1585 adev
->mode_info
.num_crtc
= 6;
1586 adev
->mode_info
.num_hpd
= 6;
1587 adev
->mode_info
.num_dig
= 7;
1588 adev
->mode_info
.plane_type
= dm_plane_type_default
;
1591 adev
->mode_info
.num_crtc
= 3;
1592 adev
->mode_info
.num_hpd
= 6;
1593 adev
->mode_info
.num_dig
= 9;
1594 adev
->mode_info
.plane_type
= dm_plane_type_carizzo
;
1597 adev
->mode_info
.num_crtc
= 2;
1598 adev
->mode_info
.num_hpd
= 6;
1599 adev
->mode_info
.num_dig
= 9;
1600 adev
->mode_info
.plane_type
= dm_plane_type_stoney
;
1602 case CHIP_POLARIS11
:
1603 case CHIP_POLARIS12
:
1604 adev
->mode_info
.num_crtc
= 5;
1605 adev
->mode_info
.num_hpd
= 5;
1606 adev
->mode_info
.num_dig
= 5;
1607 adev
->mode_info
.plane_type
= dm_plane_type_default
;
1609 case CHIP_POLARIS10
:
1610 adev
->mode_info
.num_crtc
= 6;
1611 adev
->mode_info
.num_hpd
= 6;
1612 adev
->mode_info
.num_dig
= 6;
1613 adev
->mode_info
.plane_type
= dm_plane_type_default
;
1616 adev
->mode_info
.num_crtc
= 6;
1617 adev
->mode_info
.num_hpd
= 6;
1618 adev
->mode_info
.num_dig
= 6;
1619 adev
->mode_info
.plane_type
= dm_plane_type_default
;
1621 #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
1623 adev
->mode_info
.num_crtc
= 4;
1624 adev
->mode_info
.num_hpd
= 4;
1625 adev
->mode_info
.num_dig
= 4;
1626 adev
->mode_info
.plane_type
= dm_plane_type_default
;
1630 DRM_ERROR("Usupported ASIC type: 0x%X\n", adev
->asic_type
);
1634 if (adev
->mode_info
.funcs
== NULL
)
1635 adev
->mode_info
.funcs
= &dm_display_funcs
;
1637 /* Note: Do NOT change adev->audio_endpt_rreg and
1638 * adev->audio_endpt_wreg because they are initialised in
1639 * amdgpu_device_init() */
1640 #if defined(CONFIG_DEBUG_KERNEL_DC)
1643 &dev_attr_s3_debug
);
1649 struct dm_connector_state
{
1650 struct drm_connector_state base
;
1652 enum amdgpu_rmx_type scaling
;
1653 uint8_t underscan_vborder
;
1654 uint8_t underscan_hborder
;
1655 bool underscan_enable
;
1658 #define to_dm_connector_state(x)\
1659 container_of((x), struct dm_connector_state, base)
1661 static bool modeset_required(struct drm_crtc_state
*crtc_state
,
1662 struct dc_stream_state
*new_stream
,
1663 struct dc_stream_state
*old_stream
)
1665 if (!drm_atomic_crtc_needs_modeset(crtc_state
))
1668 if (!crtc_state
->enable
)
1671 return crtc_state
->active
;
1674 static bool modereset_required(struct drm_crtc_state
*crtc_state
)
1676 if (!drm_atomic_crtc_needs_modeset(crtc_state
))
1679 return !crtc_state
->enable
|| !crtc_state
->active
;
1682 static void amdgpu_dm_encoder_destroy(struct drm_encoder
*encoder
)
1684 drm_encoder_cleanup(encoder
);
1688 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs
= {
1689 .destroy
= amdgpu_dm_encoder_destroy
,
1692 static bool fill_rects_from_plane_state(const struct drm_plane_state
*state
,
1693 struct dc_plane_state
*plane_state
)
1695 plane_state
->src_rect
.x
= state
->src_x
>> 16;
1696 plane_state
->src_rect
.y
= state
->src_y
>> 16;
1697 /*we ignore for now mantissa and do not to deal with floating pixels :(*/
1698 plane_state
->src_rect
.width
= state
->src_w
>> 16;
1700 if (plane_state
->src_rect
.width
== 0)
1703 plane_state
->src_rect
.height
= state
->src_h
>> 16;
1704 if (plane_state
->src_rect
.height
== 0)
1707 plane_state
->dst_rect
.x
= state
->crtc_x
;
1708 plane_state
->dst_rect
.y
= state
->crtc_y
;
1710 if (state
->crtc_w
== 0)
1713 plane_state
->dst_rect
.width
= state
->crtc_w
;
1715 if (state
->crtc_h
== 0)
1718 plane_state
->dst_rect
.height
= state
->crtc_h
;
1720 plane_state
->clip_rect
= plane_state
->dst_rect
;
1722 switch (state
->rotation
& DRM_MODE_ROTATE_MASK
) {
1723 case DRM_MODE_ROTATE_0
:
1724 plane_state
->rotation
= ROTATION_ANGLE_0
;
1726 case DRM_MODE_ROTATE_90
:
1727 plane_state
->rotation
= ROTATION_ANGLE_90
;
1729 case DRM_MODE_ROTATE_180
:
1730 plane_state
->rotation
= ROTATION_ANGLE_180
;
1732 case DRM_MODE_ROTATE_270
:
1733 plane_state
->rotation
= ROTATION_ANGLE_270
;
1736 plane_state
->rotation
= ROTATION_ANGLE_0
;
1742 static int get_fb_info(const struct amdgpu_framebuffer
*amdgpu_fb
,
1743 uint64_t *tiling_flags
,
1744 uint64_t *fb_location
)
1746 struct amdgpu_bo
*rbo
= gem_to_amdgpu_bo(amdgpu_fb
->obj
);
1747 int r
= amdgpu_bo_reserve(rbo
, false);
1750 // Don't show error msg. when return -ERESTARTSYS
1751 if (r
!= -ERESTARTSYS
)
1752 DRM_ERROR("Unable to reserve buffer: %d\n", r
);
1757 *fb_location
= amdgpu_bo_gpu_offset(rbo
);
1760 amdgpu_bo_get_tiling_flags(rbo
, tiling_flags
);
1762 amdgpu_bo_unreserve(rbo
);
1767 static int fill_plane_attributes_from_fb(struct amdgpu_device
*adev
,
1768 struct dc_plane_state
*plane_state
,
1769 const struct amdgpu_framebuffer
*amdgpu_fb
,
1772 uint64_t tiling_flags
;
1773 uint64_t fb_location
= 0;
1774 uint64_t chroma_addr
= 0;
1775 unsigned int awidth
;
1776 const struct drm_framebuffer
*fb
= &amdgpu_fb
->base
;
1778 struct drm_format_name_buf format_name
;
1783 addReq
== true ? &fb_location
:NULL
);
1788 switch (fb
->format
->format
) {
1790 plane_state
->format
= SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS
;
1792 case DRM_FORMAT_RGB565
:
1793 plane_state
->format
= SURFACE_PIXEL_FORMAT_GRPH_RGB565
;
1795 case DRM_FORMAT_XRGB8888
:
1796 case DRM_FORMAT_ARGB8888
:
1797 plane_state
->format
= SURFACE_PIXEL_FORMAT_GRPH_ARGB8888
;
1799 case DRM_FORMAT_XRGB2101010
:
1800 case DRM_FORMAT_ARGB2101010
:
1801 plane_state
->format
= SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010
;
1803 case DRM_FORMAT_XBGR2101010
:
1804 case DRM_FORMAT_ABGR2101010
:
1805 plane_state
->format
= SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010
;
1807 case DRM_FORMAT_NV21
:
1808 plane_state
->format
= SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr
;
1810 case DRM_FORMAT_NV12
:
1811 plane_state
->format
= SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb
;
1814 DRM_ERROR("Unsupported screen format %s\n",
1815 drm_get_format_name(fb
->format
->format
, &format_name
));
1819 if (plane_state
->format
< SURFACE_PIXEL_FORMAT_VIDEO_BEGIN
) {
1820 plane_state
->address
.type
= PLN_ADDR_TYPE_GRAPHICS
;
1821 plane_state
->address
.grph
.addr
.low_part
= lower_32_bits(fb_location
);
1822 plane_state
->address
.grph
.addr
.high_part
= upper_32_bits(fb_location
);
1823 plane_state
->plane_size
.grph
.surface_size
.x
= 0;
1824 plane_state
->plane_size
.grph
.surface_size
.y
= 0;
1825 plane_state
->plane_size
.grph
.surface_size
.width
= fb
->width
;
1826 plane_state
->plane_size
.grph
.surface_size
.height
= fb
->height
;
1827 plane_state
->plane_size
.grph
.surface_pitch
=
1828 fb
->pitches
[0] / fb
->format
->cpp
[0];
1829 /* TODO: unhardcode */
1830 plane_state
->color_space
= COLOR_SPACE_SRGB
;
1833 awidth
= ALIGN(fb
->width
, 64);
1834 plane_state
->address
.type
= PLN_ADDR_TYPE_VIDEO_PROGRESSIVE
;
1835 plane_state
->address
.video_progressive
.luma_addr
.low_part
1836 = lower_32_bits(fb_location
);
1837 plane_state
->address
.video_progressive
.luma_addr
.high_part
1838 = upper_32_bits(fb_location
);
1839 chroma_addr
= fb_location
+ (u64
)(awidth
* fb
->height
);
1840 plane_state
->address
.video_progressive
.chroma_addr
.low_part
1841 = lower_32_bits(chroma_addr
);
1842 plane_state
->address
.video_progressive
.chroma_addr
.high_part
1843 = upper_32_bits(chroma_addr
);
1844 plane_state
->plane_size
.video
.luma_size
.x
= 0;
1845 plane_state
->plane_size
.video
.luma_size
.y
= 0;
1846 plane_state
->plane_size
.video
.luma_size
.width
= awidth
;
1847 plane_state
->plane_size
.video
.luma_size
.height
= fb
->height
;
1848 /* TODO: unhardcode */
1849 plane_state
->plane_size
.video
.luma_pitch
= awidth
;
1851 plane_state
->plane_size
.video
.chroma_size
.x
= 0;
1852 plane_state
->plane_size
.video
.chroma_size
.y
= 0;
1853 plane_state
->plane_size
.video
.chroma_size
.width
= awidth
;
1854 plane_state
->plane_size
.video
.chroma_size
.height
= fb
->height
;
1855 plane_state
->plane_size
.video
.chroma_pitch
= awidth
/ 2;
1857 /* TODO: unhardcode */
1858 plane_state
->color_space
= COLOR_SPACE_YCBCR709
;
1861 memset(&plane_state
->tiling_info
, 0, sizeof(plane_state
->tiling_info
));
1863 /* Fill GFX8 params */
1864 if (AMDGPU_TILING_GET(tiling_flags
, ARRAY_MODE
) == DC_ARRAY_2D_TILED_THIN1
) {
1865 unsigned int bankw
, bankh
, mtaspect
, tile_split
, num_banks
;
1867 bankw
= AMDGPU_TILING_GET(tiling_flags
, BANK_WIDTH
);
1868 bankh
= AMDGPU_TILING_GET(tiling_flags
, BANK_HEIGHT
);
1869 mtaspect
= AMDGPU_TILING_GET(tiling_flags
, MACRO_TILE_ASPECT
);
1870 tile_split
= AMDGPU_TILING_GET(tiling_flags
, TILE_SPLIT
);
1871 num_banks
= AMDGPU_TILING_GET(tiling_flags
, NUM_BANKS
);
1873 /* XXX fix me for VI */
1874 plane_state
->tiling_info
.gfx8
.num_banks
= num_banks
;
1875 plane_state
->tiling_info
.gfx8
.array_mode
=
1876 DC_ARRAY_2D_TILED_THIN1
;
1877 plane_state
->tiling_info
.gfx8
.tile_split
= tile_split
;
1878 plane_state
->tiling_info
.gfx8
.bank_width
= bankw
;
1879 plane_state
->tiling_info
.gfx8
.bank_height
= bankh
;
1880 plane_state
->tiling_info
.gfx8
.tile_aspect
= mtaspect
;
1881 plane_state
->tiling_info
.gfx8
.tile_mode
=
1882 DC_ADDR_SURF_MICRO_TILING_DISPLAY
;
1883 } else if (AMDGPU_TILING_GET(tiling_flags
, ARRAY_MODE
)
1884 == DC_ARRAY_1D_TILED_THIN1
) {
1885 plane_state
->tiling_info
.gfx8
.array_mode
= DC_ARRAY_1D_TILED_THIN1
;
1888 plane_state
->tiling_info
.gfx8
.pipe_config
=
1889 AMDGPU_TILING_GET(tiling_flags
, PIPE_CONFIG
);
1891 if (adev
->asic_type
== CHIP_VEGA10
||
1892 adev
->asic_type
== CHIP_RAVEN
) {
1893 /* Fill GFX9 params */
1894 plane_state
->tiling_info
.gfx9
.num_pipes
=
1895 adev
->gfx
.config
.gb_addr_config_fields
.num_pipes
;
1896 plane_state
->tiling_info
.gfx9
.num_banks
=
1897 adev
->gfx
.config
.gb_addr_config_fields
.num_banks
;
1898 plane_state
->tiling_info
.gfx9
.pipe_interleave
=
1899 adev
->gfx
.config
.gb_addr_config_fields
.pipe_interleave_size
;
1900 plane_state
->tiling_info
.gfx9
.num_shader_engines
=
1901 adev
->gfx
.config
.gb_addr_config_fields
.num_se
;
1902 plane_state
->tiling_info
.gfx9
.max_compressed_frags
=
1903 adev
->gfx
.config
.gb_addr_config_fields
.max_compress_frags
;
1904 plane_state
->tiling_info
.gfx9
.num_rb_per_se
=
1905 adev
->gfx
.config
.gb_addr_config_fields
.num_rb_per_se
;
1906 plane_state
->tiling_info
.gfx9
.swizzle
=
1907 AMDGPU_TILING_GET(tiling_flags
, SWIZZLE_MODE
);
1908 plane_state
->tiling_info
.gfx9
.shaderEnable
= 1;
1911 plane_state
->visible
= true;
1912 plane_state
->scaling_quality
.h_taps_c
= 0;
1913 plane_state
->scaling_quality
.v_taps_c
= 0;
1915 /* is this needed? is plane_state zeroed at allocation? */
1916 plane_state
->scaling_quality
.h_taps
= 0;
1917 plane_state
->scaling_quality
.v_taps
= 0;
1918 plane_state
->stereo_format
= PLANE_STEREO_FORMAT_NONE
;
1924 static void fill_gamma_from_crtc_state(const struct drm_crtc_state
*crtc_state
,
1925 struct dc_plane_state
*plane_state
)
1928 struct dc_gamma
*gamma
;
1929 struct drm_color_lut
*lut
=
1930 (struct drm_color_lut
*) crtc_state
->gamma_lut
->data
;
1932 gamma
= dc_create_gamma();
1934 if (gamma
== NULL
) {
1939 gamma
->type
= GAMMA_RGB_256
;
1940 gamma
->num_entries
= GAMMA_RGB_256_ENTRIES
;
1941 for (i
= 0; i
< GAMMA_RGB_256_ENTRIES
; i
++) {
1942 gamma
->entries
.red
[i
] = dal_fixed31_32_from_int(lut
[i
].red
);
1943 gamma
->entries
.green
[i
] = dal_fixed31_32_from_int(lut
[i
].green
);
1944 gamma
->entries
.blue
[i
] = dal_fixed31_32_from_int(lut
[i
].blue
);
1947 plane_state
->gamma_correction
= gamma
;
1950 static int fill_plane_attributes(struct amdgpu_device
*adev
,
1951 struct dc_plane_state
*dc_plane_state
,
1952 struct drm_plane_state
*plane_state
,
1953 struct drm_crtc_state
*crtc_state
,
1956 const struct amdgpu_framebuffer
*amdgpu_fb
=
1957 to_amdgpu_framebuffer(plane_state
->fb
);
1958 const struct drm_crtc
*crtc
= plane_state
->crtc
;
1959 struct dc_transfer_func
*input_tf
;
1962 if (!fill_rects_from_plane_state(plane_state
, dc_plane_state
))
1965 ret
= fill_plane_attributes_from_fb(
1966 crtc
->dev
->dev_private
,
1974 input_tf
= dc_create_transfer_func();
1976 if (input_tf
== NULL
)
1979 input_tf
->type
= TF_TYPE_PREDEFINED
;
1980 input_tf
->tf
= TRANSFER_FUNCTION_SRGB
;
1982 dc_plane_state
->in_transfer_func
= input_tf
;
1984 /* In case of gamma set, update gamma value */
1985 if (crtc_state
->gamma_lut
)
1986 fill_gamma_from_crtc_state(crtc_state
, dc_plane_state
);
1991 /*****************************************************************************/
1993 static void update_stream_scaling_settings(const struct drm_display_mode
*mode
,
1994 const struct dm_connector_state
*dm_state
,
1995 struct dc_stream_state
*stream
)
1997 enum amdgpu_rmx_type rmx_type
;
1999 struct rect src
= { 0 }; /* viewport in composition space*/
2000 struct rect dst
= { 0 }; /* stream addressable area */
2002 /* no mode. nothing to be done */
2006 /* Full screen scaling by default */
2007 src
.width
= mode
->hdisplay
;
2008 src
.height
= mode
->vdisplay
;
2009 dst
.width
= stream
->timing
.h_addressable
;
2010 dst
.height
= stream
->timing
.v_addressable
;
2012 rmx_type
= dm_state
->scaling
;
2013 if (rmx_type
== RMX_ASPECT
|| rmx_type
== RMX_OFF
) {
2014 if (src
.width
* dst
.height
<
2015 src
.height
* dst
.width
) {
2016 /* height needs less upscaling/more downscaling */
2017 dst
.width
= src
.width
*
2018 dst
.height
/ src
.height
;
2020 /* width needs less upscaling/more downscaling */
2021 dst
.height
= src
.height
*
2022 dst
.width
/ src
.width
;
2024 } else if (rmx_type
== RMX_CENTER
) {
2028 dst
.x
= (stream
->timing
.h_addressable
- dst
.width
) / 2;
2029 dst
.y
= (stream
->timing
.v_addressable
- dst
.height
) / 2;
2031 if (dm_state
->underscan_enable
) {
2032 dst
.x
+= dm_state
->underscan_hborder
/ 2;
2033 dst
.y
+= dm_state
->underscan_vborder
/ 2;
2034 dst
.width
-= dm_state
->underscan_hborder
;
2035 dst
.height
-= dm_state
->underscan_vborder
;
2041 DRM_DEBUG_DRIVER("Destination Rectangle x:%d y:%d width:%d height:%d\n",
2042 dst
.x
, dst
.y
, dst
.width
, dst
.height
);
2046 static enum dc_color_depth
2047 convert_color_depth_from_display_info(const struct drm_connector
*connector
)
2049 uint32_t bpc
= connector
->display_info
.bpc
;
2051 /* Limited color depth to 8bit
2052 * TODO: Still need to handle deep color
2059 /* Temporary Work around, DRM don't parse color depth for
2060 * EDID revision before 1.4
2061 * TODO: Fix edid parsing
2063 return COLOR_DEPTH_888
;
2065 return COLOR_DEPTH_666
;
2067 return COLOR_DEPTH_888
;
2069 return COLOR_DEPTH_101010
;
2071 return COLOR_DEPTH_121212
;
2073 return COLOR_DEPTH_141414
;
2075 return COLOR_DEPTH_161616
;
2077 return COLOR_DEPTH_UNDEFINED
;
2081 static enum dc_aspect_ratio
2082 get_aspect_ratio(const struct drm_display_mode
*mode_in
)
2084 int32_t width
= mode_in
->crtc_hdisplay
* 9;
2085 int32_t height
= mode_in
->crtc_vdisplay
* 16;
2087 if ((width
- height
) < 10 && (width
- height
) > -10)
2088 return ASPECT_RATIO_16_9
;
2090 return ASPECT_RATIO_4_3
;
2093 static enum dc_color_space
2094 get_output_color_space(const struct dc_crtc_timing
*dc_crtc_timing
)
2096 enum dc_color_space color_space
= COLOR_SPACE_SRGB
;
2098 switch (dc_crtc_timing
->pixel_encoding
) {
2099 case PIXEL_ENCODING_YCBCR422
:
2100 case PIXEL_ENCODING_YCBCR444
:
2101 case PIXEL_ENCODING_YCBCR420
:
2104 * 27030khz is the separation point between HDTV and SDTV
2105 * according to HDMI spec, we use YCbCr709 and YCbCr601
2108 if (dc_crtc_timing
->pix_clk_khz
> 27030) {
2109 if (dc_crtc_timing
->flags
.Y_ONLY
)
2111 COLOR_SPACE_YCBCR709_LIMITED
;
2113 color_space
= COLOR_SPACE_YCBCR709
;
2115 if (dc_crtc_timing
->flags
.Y_ONLY
)
2117 COLOR_SPACE_YCBCR601_LIMITED
;
2119 color_space
= COLOR_SPACE_YCBCR601
;
2124 case PIXEL_ENCODING_RGB
:
2125 color_space
= COLOR_SPACE_SRGB
;
2136 /*****************************************************************************/
2139 fill_stream_properties_from_drm_display_mode(struct dc_stream_state
*stream
,
2140 const struct drm_display_mode
*mode_in
,
2141 const struct drm_connector
*connector
)
2143 struct dc_crtc_timing
*timing_out
= &stream
->timing
;
2145 memset(timing_out
, 0, sizeof(struct dc_crtc_timing
));
2147 timing_out
->h_border_left
= 0;
2148 timing_out
->h_border_right
= 0;
2149 timing_out
->v_border_top
= 0;
2150 timing_out
->v_border_bottom
= 0;
2151 /* TODO: un-hardcode */
2153 if ((connector
->display_info
.color_formats
& DRM_COLOR_FORMAT_YCRCB444
)
2154 && stream
->sink
->sink_signal
== SIGNAL_TYPE_HDMI_TYPE_A
)
2155 timing_out
->pixel_encoding
= PIXEL_ENCODING_YCBCR444
;
2157 timing_out
->pixel_encoding
= PIXEL_ENCODING_RGB
;
2159 timing_out
->timing_3d_format
= TIMING_3D_FORMAT_NONE
;
2160 timing_out
->display_color_depth
= convert_color_depth_from_display_info(
2162 timing_out
->scan_type
= SCANNING_TYPE_NODATA
;
2163 timing_out
->hdmi_vic
= 0;
2164 timing_out
->vic
= drm_match_cea_mode(mode_in
);
2166 timing_out
->h_addressable
= mode_in
->crtc_hdisplay
;
2167 timing_out
->h_total
= mode_in
->crtc_htotal
;
2168 timing_out
->h_sync_width
=
2169 mode_in
->crtc_hsync_end
- mode_in
->crtc_hsync_start
;
2170 timing_out
->h_front_porch
=
2171 mode_in
->crtc_hsync_start
- mode_in
->crtc_hdisplay
;
2172 timing_out
->v_total
= mode_in
->crtc_vtotal
;
2173 timing_out
->v_addressable
= mode_in
->crtc_vdisplay
;
2174 timing_out
->v_front_porch
=
2175 mode_in
->crtc_vsync_start
- mode_in
->crtc_vdisplay
;
2176 timing_out
->v_sync_width
=
2177 mode_in
->crtc_vsync_end
- mode_in
->crtc_vsync_start
;
2178 timing_out
->pix_clk_khz
= mode_in
->crtc_clock
;
2179 timing_out
->aspect_ratio
= get_aspect_ratio(mode_in
);
2180 if (mode_in
->flags
& DRM_MODE_FLAG_PHSYNC
)
2181 timing_out
->flags
.HSYNC_POSITIVE_POLARITY
= 1;
2182 if (mode_in
->flags
& DRM_MODE_FLAG_PVSYNC
)
2183 timing_out
->flags
.VSYNC_POSITIVE_POLARITY
= 1;
2185 stream
->output_color_space
= get_output_color_space(timing_out
);
2188 struct dc_transfer_func
*tf
= dc_create_transfer_func();
2190 tf
->type
= TF_TYPE_PREDEFINED
;
2191 tf
->tf
= TRANSFER_FUNCTION_SRGB
;
2192 stream
->out_transfer_func
= tf
;
2196 static void fill_audio_info(struct audio_info
*audio_info
,
2197 const struct drm_connector
*drm_connector
,
2198 const struct dc_sink
*dc_sink
)
2201 int cea_revision
= 0;
2202 const struct dc_edid_caps
*edid_caps
= &dc_sink
->edid_caps
;
2204 audio_info
->manufacture_id
= edid_caps
->manufacturer_id
;
2205 audio_info
->product_id
= edid_caps
->product_id
;
2207 cea_revision
= drm_connector
->display_info
.cea_rev
;
2209 strncpy(audio_info
->display_name
,
2210 edid_caps
->display_name
,
2211 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS
- 1);
2213 if (cea_revision
>= 3) {
2214 audio_info
->mode_count
= edid_caps
->audio_mode_count
;
2216 for (i
= 0; i
< audio_info
->mode_count
; ++i
) {
2217 audio_info
->modes
[i
].format_code
=
2218 (enum audio_format_code
)
2219 (edid_caps
->audio_modes
[i
].format_code
);
2220 audio_info
->modes
[i
].channel_count
=
2221 edid_caps
->audio_modes
[i
].channel_count
;
2222 audio_info
->modes
[i
].sample_rates
.all
=
2223 edid_caps
->audio_modes
[i
].sample_rate
;
2224 audio_info
->modes
[i
].sample_size
=
2225 edid_caps
->audio_modes
[i
].sample_size
;
2229 audio_info
->flags
.all
= edid_caps
->speaker_flags
;
2231 /* TODO: We only check for the progressive mode, check for interlace mode too */
2232 if (drm_connector
->latency_present
[0]) {
2233 audio_info
->video_latency
= drm_connector
->video_latency
[0];
2234 audio_info
->audio_latency
= drm_connector
->audio_latency
[0];
2237 /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
2242 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode
*src_mode
,
2243 struct drm_display_mode
*dst_mode
)
2245 dst_mode
->crtc_hdisplay
= src_mode
->crtc_hdisplay
;
2246 dst_mode
->crtc_vdisplay
= src_mode
->crtc_vdisplay
;
2247 dst_mode
->crtc_clock
= src_mode
->crtc_clock
;
2248 dst_mode
->crtc_hblank_start
= src_mode
->crtc_hblank_start
;
2249 dst_mode
->crtc_hblank_end
= src_mode
->crtc_hblank_end
;
2250 dst_mode
->crtc_hsync_start
= src_mode
->crtc_hsync_start
;
2251 dst_mode
->crtc_hsync_end
= src_mode
->crtc_hsync_end
;
2252 dst_mode
->crtc_htotal
= src_mode
->crtc_htotal
;
2253 dst_mode
->crtc_hskew
= src_mode
->crtc_hskew
;
2254 dst_mode
->crtc_vblank_start
= src_mode
->crtc_vblank_start
;
2255 dst_mode
->crtc_vblank_end
= src_mode
->crtc_vblank_end
;
2256 dst_mode
->crtc_vsync_start
= src_mode
->crtc_vsync_start
;
2257 dst_mode
->crtc_vsync_end
= src_mode
->crtc_vsync_end
;
2258 dst_mode
->crtc_vtotal
= src_mode
->crtc_vtotal
;
2262 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode
*drm_mode
,
2263 const struct drm_display_mode
*native_mode
,
2266 if (scale_enabled
) {
2267 copy_crtc_timing_for_drm_display_mode(native_mode
, drm_mode
);
2268 } else if (native_mode
->clock
== drm_mode
->clock
&&
2269 native_mode
->htotal
== drm_mode
->htotal
&&
2270 native_mode
->vtotal
== drm_mode
->vtotal
) {
2271 copy_crtc_timing_for_drm_display_mode(native_mode
, drm_mode
);
2273 /* no scaling nor amdgpu inserted, no need to patch */
2277 static void create_fake_sink(struct amdgpu_dm_connector
*aconnector
)
2279 struct dc_sink
*sink
= NULL
;
2280 struct dc_sink_init_data sink_init_data
= { 0 };
2282 sink_init_data
.link
= aconnector
->dc_link
;
2283 sink_init_data
.sink_signal
= aconnector
->dc_link
->connector_signal
;
2285 sink
= dc_sink_create(&sink_init_data
);
2287 DRM_ERROR("Failed to create sink!\n");
2289 sink
->sink_signal
= SIGNAL_TYPE_VIRTUAL
;
2290 aconnector
->fake_enable
= true;
2292 aconnector
->dc_sink
= sink
;
2293 aconnector
->dc_link
->local_sink
= sink
;
2296 static struct dc_stream_state
*
2297 create_stream_for_sink(struct amdgpu_dm_connector
*aconnector
,
2298 const struct drm_display_mode
*drm_mode
,
2299 const struct dm_connector_state
*dm_state
)
2301 struct drm_display_mode
*preferred_mode
= NULL
;
2302 const struct drm_connector
*drm_connector
;
2303 struct dc_stream_state
*stream
= NULL
;
2304 struct drm_display_mode mode
= *drm_mode
;
2305 bool native_mode_found
= false;
2307 if (aconnector
== NULL
) {
2308 DRM_ERROR("aconnector is NULL!\n");
2309 goto drm_connector_null
;
2312 if (dm_state
== NULL
) {
2313 DRM_ERROR("dm_state is NULL!\n");
2317 drm_connector
= &aconnector
->base
;
2319 if (!aconnector
->dc_sink
) {
2321 * Exclude MST from creating fake_sink
2322 * TODO: need to enable MST into fake_sink feature
2324 if (aconnector
->mst_port
)
2325 goto stream_create_fail
;
2327 create_fake_sink(aconnector
);
2330 stream
= dc_create_stream_for_sink(aconnector
->dc_sink
);
2332 if (stream
== NULL
) {
2333 DRM_ERROR("Failed to create stream for sink!\n");
2334 goto stream_create_fail
;
2337 list_for_each_entry(preferred_mode
, &aconnector
->base
.modes
, head
) {
2338 /* Search for preferred mode */
2339 if (preferred_mode
->type
& DRM_MODE_TYPE_PREFERRED
) {
2340 native_mode_found
= true;
2344 if (!native_mode_found
)
2345 preferred_mode
= list_first_entry_or_null(
2346 &aconnector
->base
.modes
,
2347 struct drm_display_mode
,
2350 if (preferred_mode
== NULL
) {
2351 /* This may not be an error, the use case is when we we have no
2352 * usermode calls to reset and set mode upon hotplug. In this
2353 * case, we call set mode ourselves to restore the previous mode
2354 * and the modelist may not be filled in in time.
2356 DRM_DEBUG_DRIVER("No preferred mode found\n");
2358 decide_crtc_timing_for_drm_display_mode(
2359 &mode
, preferred_mode
,
2360 dm_state
->scaling
!= RMX_OFF
);
2363 fill_stream_properties_from_drm_display_mode(stream
,
2364 &mode
, &aconnector
->base
);
2365 update_stream_scaling_settings(&mode
, dm_state
, stream
);
2368 &stream
->audio_info
,
2370 aconnector
->dc_sink
);
2378 static void amdgpu_dm_crtc_destroy(struct drm_crtc
*crtc
)
2380 drm_crtc_cleanup(crtc
);
2384 static void dm_crtc_destroy_state(struct drm_crtc
*crtc
,
2385 struct drm_crtc_state
*state
)
2387 struct dm_crtc_state
*cur
= to_dm_crtc_state(state
);
2389 /* TODO Destroy dc_stream objects are stream object is flattened */
2391 dc_stream_release(cur
->stream
);
2394 __drm_atomic_helper_crtc_destroy_state(state
);
2400 static void dm_crtc_reset_state(struct drm_crtc
*crtc
)
2402 struct dm_crtc_state
*state
;
2405 dm_crtc_destroy_state(crtc
, crtc
->state
);
2407 state
= kzalloc(sizeof(*state
), GFP_KERNEL
);
2408 if (WARN_ON(!state
))
2411 crtc
->state
= &state
->base
;
2412 crtc
->state
->crtc
= crtc
;
2416 static struct drm_crtc_state
*
2417 dm_crtc_duplicate_state(struct drm_crtc
*crtc
)
2419 struct dm_crtc_state
*state
, *cur
;
2421 cur
= to_dm_crtc_state(crtc
->state
);
2423 if (WARN_ON(!crtc
->state
))
2426 state
= kzalloc(sizeof(*state
), GFP_KERNEL
);
2428 __drm_atomic_helper_crtc_duplicate_state(crtc
, &state
->base
);
2431 state
->stream
= cur
->stream
;
2432 dc_stream_retain(state
->stream
);
2435 /* TODO Duplicate dc_stream after objects are stream object is flattened */
2437 return &state
->base
;
2440 /* Implemented only the options currently availible for the driver */
2441 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs
= {
2442 .reset
= dm_crtc_reset_state
,
2443 .destroy
= amdgpu_dm_crtc_destroy
,
2444 .gamma_set
= drm_atomic_helper_legacy_gamma_set
,
2445 .set_config
= drm_atomic_helper_set_config
,
2446 .page_flip
= drm_atomic_helper_page_flip
,
2447 .atomic_duplicate_state
= dm_crtc_duplicate_state
,
2448 .atomic_destroy_state
= dm_crtc_destroy_state
,
2451 static enum drm_connector_status
2452 amdgpu_dm_connector_detect(struct drm_connector
*connector
, bool force
)
2455 struct amdgpu_dm_connector
*aconnector
= to_amdgpu_dm_connector(connector
);
2458 * 1. This interface is NOT called in context of HPD irq.
2459 * 2. This interface *is called* in context of user-mode ioctl. Which
2460 * makes it a bad place for *any* MST-related activit. */
2462 if (aconnector
->base
.force
== DRM_FORCE_UNSPECIFIED
&&
2463 !aconnector
->fake_enable
)
2464 connected
= (aconnector
->dc_sink
!= NULL
);
2466 connected
= (aconnector
->base
.force
== DRM_FORCE_ON
);
2468 return (connected
? connector_status_connected
:
2469 connector_status_disconnected
);
2472 int amdgpu_dm_connector_atomic_set_property(struct drm_connector
*connector
,
2473 struct drm_connector_state
*connector_state
,
2474 struct drm_property
*property
,
2477 struct drm_device
*dev
= connector
->dev
;
2478 struct amdgpu_device
*adev
= dev
->dev_private
;
2479 struct dm_connector_state
*dm_old_state
=
2480 to_dm_connector_state(connector
->state
);
2481 struct dm_connector_state
*dm_new_state
=
2482 to_dm_connector_state(connector_state
);
2486 if (property
== dev
->mode_config
.scaling_mode_property
) {
2487 enum amdgpu_rmx_type rmx_type
;
2490 case DRM_MODE_SCALE_CENTER
:
2491 rmx_type
= RMX_CENTER
;
2493 case DRM_MODE_SCALE_ASPECT
:
2494 rmx_type
= RMX_ASPECT
;
2496 case DRM_MODE_SCALE_FULLSCREEN
:
2497 rmx_type
= RMX_FULL
;
2499 case DRM_MODE_SCALE_NONE
:
2505 if (dm_old_state
->scaling
== rmx_type
)
2508 dm_new_state
->scaling
= rmx_type
;
2510 } else if (property
== adev
->mode_info
.underscan_hborder_property
) {
2511 dm_new_state
->underscan_hborder
= val
;
2513 } else if (property
== adev
->mode_info
.underscan_vborder_property
) {
2514 dm_new_state
->underscan_vborder
= val
;
2516 } else if (property
== adev
->mode_info
.underscan_property
) {
2517 dm_new_state
->underscan_enable
= val
;
2524 int amdgpu_dm_connector_atomic_get_property(struct drm_connector
*connector
,
2525 const struct drm_connector_state
*state
,
2526 struct drm_property
*property
,
2529 struct drm_device
*dev
= connector
->dev
;
2530 struct amdgpu_device
*adev
= dev
->dev_private
;
2531 struct dm_connector_state
*dm_state
=
2532 to_dm_connector_state(state
);
2535 if (property
== dev
->mode_config
.scaling_mode_property
) {
2536 switch (dm_state
->scaling
) {
2538 *val
= DRM_MODE_SCALE_CENTER
;
2541 *val
= DRM_MODE_SCALE_ASPECT
;
2544 *val
= DRM_MODE_SCALE_FULLSCREEN
;
2548 *val
= DRM_MODE_SCALE_NONE
;
2552 } else if (property
== adev
->mode_info
.underscan_hborder_property
) {
2553 *val
= dm_state
->underscan_hborder
;
2555 } else if (property
== adev
->mode_info
.underscan_vborder_property
) {
2556 *val
= dm_state
->underscan_vborder
;
2558 } else if (property
== adev
->mode_info
.underscan_property
) {
2559 *val
= dm_state
->underscan_enable
;
2565 static void amdgpu_dm_connector_destroy(struct drm_connector
*connector
)
2567 struct amdgpu_dm_connector
*aconnector
= to_amdgpu_dm_connector(connector
);
2568 const struct dc_link
*link
= aconnector
->dc_link
;
2569 struct amdgpu_device
*adev
= connector
->dev
->dev_private
;
2570 struct amdgpu_display_manager
*dm
= &adev
->dm
;
2571 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
2572 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
2574 if (link
->connector_signal
& (SIGNAL_TYPE_EDP
| SIGNAL_TYPE_LVDS
)) {
2575 amdgpu_dm_register_backlight_device(dm
);
2577 if (dm
->backlight_dev
) {
2578 backlight_device_unregister(dm
->backlight_dev
);
2579 dm
->backlight_dev
= NULL
;
2584 drm_connector_unregister(connector
);
2585 drm_connector_cleanup(connector
);
2589 void amdgpu_dm_connector_funcs_reset(struct drm_connector
*connector
)
2591 struct dm_connector_state
*state
=
2592 to_dm_connector_state(connector
->state
);
2596 state
= kzalloc(sizeof(*state
), GFP_KERNEL
);
2599 state
->scaling
= RMX_OFF
;
2600 state
->underscan_enable
= false;
2601 state
->underscan_hborder
= 0;
2602 state
->underscan_vborder
= 0;
2604 connector
->state
= &state
->base
;
2605 connector
->state
->connector
= connector
;
2609 struct drm_connector_state
*
2610 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector
*connector
)
2612 struct dm_connector_state
*state
=
2613 to_dm_connector_state(connector
->state
);
2615 struct dm_connector_state
*new_state
=
2616 kmemdup(state
, sizeof(*state
), GFP_KERNEL
);
2619 __drm_atomic_helper_connector_duplicate_state(connector
,
2621 return &new_state
->base
;
2627 static const struct drm_connector_funcs amdgpu_dm_connector_funcs
= {
2628 .reset
= amdgpu_dm_connector_funcs_reset
,
2629 .detect
= amdgpu_dm_connector_detect
,
2630 .fill_modes
= drm_helper_probe_single_connector_modes
,
2631 .destroy
= amdgpu_dm_connector_destroy
,
2632 .atomic_duplicate_state
= amdgpu_dm_connector_atomic_duplicate_state
,
2633 .atomic_destroy_state
= drm_atomic_helper_connector_destroy_state
,
2634 .atomic_set_property
= amdgpu_dm_connector_atomic_set_property
,
2635 .atomic_get_property
= amdgpu_dm_connector_atomic_get_property
2638 static struct drm_encoder
*best_encoder(struct drm_connector
*connector
)
2640 int enc_id
= connector
->encoder_ids
[0];
2641 struct drm_mode_object
*obj
;
2642 struct drm_encoder
*encoder
;
2644 DRM_DEBUG_DRIVER("Finding the best encoder\n");
2646 /* pick the encoder ids */
2648 obj
= drm_mode_object_find(connector
->dev
, NULL
, enc_id
, DRM_MODE_OBJECT_ENCODER
);
2650 DRM_ERROR("Couldn't find a matching encoder for our connector\n");
2653 encoder
= obj_to_encoder(obj
);
2656 DRM_ERROR("No encoder id\n");
2660 static int get_modes(struct drm_connector
*connector
)
2662 return amdgpu_dm_connector_get_modes(connector
);
2665 static void create_eml_sink(struct amdgpu_dm_connector
*aconnector
)
2667 struct dc_sink_init_data init_params
= {
2668 .link
= aconnector
->dc_link
,
2669 .sink_signal
= SIGNAL_TYPE_VIRTUAL
2671 struct edid
*edid
= (struct edid
*) aconnector
->base
.edid_blob_ptr
->data
;
2673 if (!aconnector
->base
.edid_blob_ptr
||
2674 !aconnector
->base
.edid_blob_ptr
->data
) {
2675 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
2676 aconnector
->base
.name
);
2678 aconnector
->base
.force
= DRM_FORCE_OFF
;
2679 aconnector
->base
.override_edid
= false;
2683 aconnector
->edid
= edid
;
2685 aconnector
->dc_em_sink
= dc_link_add_remote_sink(
2686 aconnector
->dc_link
,
2688 (edid
->extensions
+ 1) * EDID_LENGTH
,
2691 if (aconnector
->base
.force
== DRM_FORCE_ON
)
2692 aconnector
->dc_sink
= aconnector
->dc_link
->local_sink
?
2693 aconnector
->dc_link
->local_sink
:
2694 aconnector
->dc_em_sink
;
2697 static void handle_edid_mgmt(struct amdgpu_dm_connector
*aconnector
)
2699 struct dc_link
*link
= (struct dc_link
*)aconnector
->dc_link
;
2701 /* In case of headless boot with force on for DP managed connector
2702 * Those settings have to be != 0 to get initial modeset
2704 if (link
->connector_signal
== SIGNAL_TYPE_DISPLAY_PORT
) {
2705 link
->verified_link_cap
.lane_count
= LANE_COUNT_FOUR
;
2706 link
->verified_link_cap
.link_rate
= LINK_RATE_HIGH2
;
2710 aconnector
->base
.override_edid
= true;
2711 create_eml_sink(aconnector
);
2714 int amdgpu_dm_connector_mode_valid(struct drm_connector
*connector
,
2715 struct drm_display_mode
*mode
)
2717 int result
= MODE_ERROR
;
2718 struct dc_sink
*dc_sink
;
2719 struct amdgpu_device
*adev
= connector
->dev
->dev_private
;
2720 /* TODO: Unhardcode stream count */
2721 struct dc_stream_state
*stream
;
2722 struct amdgpu_dm_connector
*aconnector
= to_amdgpu_dm_connector(connector
);
2724 if ((mode
->flags
& DRM_MODE_FLAG_INTERLACE
) ||
2725 (mode
->flags
& DRM_MODE_FLAG_DBLSCAN
))
2728 /* Only run this the first time mode_valid is called to initilialize
2731 if (aconnector
->base
.force
!= DRM_FORCE_UNSPECIFIED
&&
2732 !aconnector
->dc_em_sink
)
2733 handle_edid_mgmt(aconnector
);
2735 dc_sink
= to_amdgpu_dm_connector(connector
)->dc_sink
;
2737 if (dc_sink
== NULL
) {
2738 DRM_ERROR("dc_sink is NULL!\n");
2742 stream
= dc_create_stream_for_sink(dc_sink
);
2743 if (stream
== NULL
) {
2744 DRM_ERROR("Failed to create stream for sink!\n");
2748 drm_mode_set_crtcinfo(mode
, 0);
2749 fill_stream_properties_from_drm_display_mode(stream
, mode
, connector
);
2751 stream
->src
.width
= mode
->hdisplay
;
2752 stream
->src
.height
= mode
->vdisplay
;
2753 stream
->dst
= stream
->src
;
2755 if (dc_validate_stream(adev
->dm
.dc
, stream
) == DC_OK
)
2758 dc_stream_release(stream
);
2761 /* TODO: error handling*/
2765 static const struct drm_connector_helper_funcs
2766 amdgpu_dm_connector_helper_funcs
= {
2768 * If hotplug a second bigger display in FB Con mode, bigger resolution
2769 * modes will be filtered by drm_mode_validate_size(), and those modes
2770 * is missing after user start lightdm. So we need to renew modes list.
2771 * in get_modes call back, not just return the modes count
2773 .get_modes
= get_modes
,
2774 .mode_valid
= amdgpu_dm_connector_mode_valid
,
2775 .best_encoder
= best_encoder
2778 static void dm_crtc_helper_disable(struct drm_crtc
*crtc
)
2782 static int dm_crtc_helper_atomic_check(struct drm_crtc
*crtc
,
2783 struct drm_crtc_state
*state
)
2785 struct amdgpu_device
*adev
= crtc
->dev
->dev_private
;
2786 struct dc
*dc
= adev
->dm
.dc
;
2787 struct dm_crtc_state
*dm_crtc_state
= to_dm_crtc_state(state
);
2790 if (unlikely(!dm_crtc_state
->stream
&&
2791 modeset_required(state
, NULL
, dm_crtc_state
->stream
))) {
2796 /* In some use cases, like reset, no stream is attached */
2797 if (!dm_crtc_state
->stream
)
2800 if (dc_validate_stream(dc
, dm_crtc_state
->stream
) == DC_OK
)
2806 static bool dm_crtc_helper_mode_fixup(struct drm_crtc
*crtc
,
2807 const struct drm_display_mode
*mode
,
2808 struct drm_display_mode
*adjusted_mode
)
2813 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs
= {
2814 .disable
= dm_crtc_helper_disable
,
2815 .atomic_check
= dm_crtc_helper_atomic_check
,
2816 .mode_fixup
= dm_crtc_helper_mode_fixup
2819 static void dm_encoder_helper_disable(struct drm_encoder
*encoder
)
2824 static int dm_encoder_helper_atomic_check(struct drm_encoder
*encoder
,
2825 struct drm_crtc_state
*crtc_state
,
2826 struct drm_connector_state
*conn_state
)
2831 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs
= {
2832 .disable
= dm_encoder_helper_disable
,
2833 .atomic_check
= dm_encoder_helper_atomic_check
2836 static void dm_drm_plane_reset(struct drm_plane
*plane
)
2838 struct dm_plane_state
*amdgpu_state
= NULL
;
2841 plane
->funcs
->atomic_destroy_state(plane
, plane
->state
);
2843 amdgpu_state
= kzalloc(sizeof(*amdgpu_state
), GFP_KERNEL
);
2844 WARN_ON(amdgpu_state
== NULL
);
2847 plane
->state
= &amdgpu_state
->base
;
2848 plane
->state
->plane
= plane
;
2849 plane
->state
->rotation
= DRM_MODE_ROTATE_0
;
2853 static struct drm_plane_state
*
2854 dm_drm_plane_duplicate_state(struct drm_plane
*plane
)
2856 struct dm_plane_state
*dm_plane_state
, *old_dm_plane_state
;
2858 old_dm_plane_state
= to_dm_plane_state(plane
->state
);
2859 dm_plane_state
= kzalloc(sizeof(*dm_plane_state
), GFP_KERNEL
);
2860 if (!dm_plane_state
)
2863 __drm_atomic_helper_plane_duplicate_state(plane
, &dm_plane_state
->base
);
2865 if (old_dm_plane_state
->dc_state
) {
2866 dm_plane_state
->dc_state
= old_dm_plane_state
->dc_state
;
2867 dc_plane_state_retain(dm_plane_state
->dc_state
);
2870 return &dm_plane_state
->base
;
2873 void dm_drm_plane_destroy_state(struct drm_plane
*plane
,
2874 struct drm_plane_state
*state
)
2876 struct dm_plane_state
*dm_plane_state
= to_dm_plane_state(state
);
2878 if (dm_plane_state
->dc_state
)
2879 dc_plane_state_release(dm_plane_state
->dc_state
);
2881 drm_atomic_helper_plane_destroy_state(plane
, state
);
2884 static const struct drm_plane_funcs dm_plane_funcs
= {
2885 .update_plane
= drm_atomic_helper_update_plane
,
2886 .disable_plane
= drm_atomic_helper_disable_plane
,
2887 .destroy
= drm_plane_cleanup
,
2888 .reset
= dm_drm_plane_reset
,
2889 .atomic_duplicate_state
= dm_drm_plane_duplicate_state
,
2890 .atomic_destroy_state
= dm_drm_plane_destroy_state
,
2893 static int dm_plane_helper_prepare_fb(struct drm_plane
*plane
,
2894 struct drm_plane_state
*new_state
)
2896 struct amdgpu_framebuffer
*afb
;
2897 struct drm_gem_object
*obj
;
2898 struct amdgpu_bo
*rbo
;
2899 uint64_t chroma_addr
= 0;
2901 struct dm_plane_state
*dm_plane_state_new
, *dm_plane_state_old
;
2902 unsigned int awidth
;
2904 dm_plane_state_old
= to_dm_plane_state(plane
->state
);
2905 dm_plane_state_new
= to_dm_plane_state(new_state
);
2907 if (!new_state
->fb
) {
2908 DRM_DEBUG_DRIVER("No FB bound\n");
2912 afb
= to_amdgpu_framebuffer(new_state
->fb
);
2915 rbo
= gem_to_amdgpu_bo(obj
);
2916 r
= amdgpu_bo_reserve(rbo
, false);
2917 if (unlikely(r
!= 0))
2920 r
= amdgpu_bo_pin(rbo
, AMDGPU_GEM_DOMAIN_VRAM
, &afb
->address
);
2923 amdgpu_bo_unreserve(rbo
);
2925 if (unlikely(r
!= 0)) {
2926 if (r
!= -ERESTARTSYS
)
2927 DRM_ERROR("Failed to pin framebuffer with error %d\n", r
);
2933 if (dm_plane_state_new
->dc_state
&&
2934 dm_plane_state_old
->dc_state
!= dm_plane_state_new
->dc_state
) {
2935 struct dc_plane_state
*plane_state
= dm_plane_state_new
->dc_state
;
2937 if (plane_state
->format
< SURFACE_PIXEL_FORMAT_VIDEO_BEGIN
) {
2938 plane_state
->address
.grph
.addr
.low_part
= lower_32_bits(afb
->address
);
2939 plane_state
->address
.grph
.addr
.high_part
= upper_32_bits(afb
->address
);
2941 awidth
= ALIGN(new_state
->fb
->width
, 64);
2942 plane_state
->address
.type
= PLN_ADDR_TYPE_VIDEO_PROGRESSIVE
;
2943 plane_state
->address
.video_progressive
.luma_addr
.low_part
2944 = lower_32_bits(afb
->address
);
2945 plane_state
->address
.video_progressive
.luma_addr
.high_part
2946 = upper_32_bits(afb
->address
);
2947 chroma_addr
= afb
->address
+ (u64
)(awidth
* new_state
->fb
->height
);
2948 plane_state
->address
.video_progressive
.chroma_addr
.low_part
2949 = lower_32_bits(chroma_addr
);
2950 plane_state
->address
.video_progressive
.chroma_addr
.high_part
2951 = upper_32_bits(chroma_addr
);
2955 /* It's a hack for s3 since in 4.9 kernel filter out cursor buffer
2956 * prepare and cleanup in drm_atomic_helper_prepare_planes
2957 * and drm_atomic_helper_cleanup_planes because fb doens't in s3.
2958 * IN 4.10 kernel this code should be removed and amdgpu_device_suspend
2959 * code touching fram buffers should be avoided for DC.
2961 if (plane
->type
== DRM_PLANE_TYPE_CURSOR
) {
2962 struct amdgpu_crtc
*acrtc
= to_amdgpu_crtc(new_state
->crtc
);
2964 acrtc
->cursor_bo
= obj
;
2969 static void dm_plane_helper_cleanup_fb(struct drm_plane
*plane
,
2970 struct drm_plane_state
*old_state
)
2972 struct amdgpu_bo
*rbo
;
2973 struct amdgpu_framebuffer
*afb
;
2979 afb
= to_amdgpu_framebuffer(old_state
->fb
);
2980 rbo
= gem_to_amdgpu_bo(afb
->obj
);
2981 r
= amdgpu_bo_reserve(rbo
, false);
2983 DRM_ERROR("failed to reserve rbo before unpin\n");
2987 amdgpu_bo_unpin(rbo
);
2988 amdgpu_bo_unreserve(rbo
);
2989 amdgpu_bo_unref(&rbo
);
2992 static int dm_plane_atomic_check(struct drm_plane
*plane
,
2993 struct drm_plane_state
*state
)
2995 struct amdgpu_device
*adev
= plane
->dev
->dev_private
;
2996 struct dc
*dc
= adev
->dm
.dc
;
2997 struct dm_plane_state
*dm_plane_state
= to_dm_plane_state(state
);
2999 if (!dm_plane_state
->dc_state
)
3002 if (dc_validate_plane(dc
, dm_plane_state
->dc_state
) == DC_OK
)
3008 static const struct drm_plane_helper_funcs dm_plane_helper_funcs
= {
3009 .prepare_fb
= dm_plane_helper_prepare_fb
,
3010 .cleanup_fb
= dm_plane_helper_cleanup_fb
,
3011 .atomic_check
= dm_plane_atomic_check
,
3015 * TODO: these are currently initialized to rgb formats only.
3016 * For future use cases we should either initialize them dynamically based on
3017 * plane capabilities, or initialize this array to all formats, so internal drm
3018 * check will succeed, and let DC to implement proper check
3020 static const uint32_t rgb_formats
[] = {
3022 DRM_FORMAT_XRGB8888
,
3023 DRM_FORMAT_ARGB8888
,
3024 DRM_FORMAT_RGBA8888
,
3025 DRM_FORMAT_XRGB2101010
,
3026 DRM_FORMAT_XBGR2101010
,
3027 DRM_FORMAT_ARGB2101010
,
3028 DRM_FORMAT_ABGR2101010
,
3031 static const uint32_t yuv_formats
[] = {
3036 static const u32 cursor_formats
[] = {
3040 static int amdgpu_dm_plane_init(struct amdgpu_display_manager
*dm
,
3041 struct amdgpu_plane
*aplane
,
3042 unsigned long possible_crtcs
)
3046 switch (aplane
->base
.type
) {
3047 case DRM_PLANE_TYPE_PRIMARY
:
3048 aplane
->base
.format_default
= true;
3050 res
= drm_universal_plane_init(
3056 ARRAY_SIZE(rgb_formats
),
3057 NULL
, aplane
->base
.type
, NULL
);
3059 case DRM_PLANE_TYPE_OVERLAY
:
3060 res
= drm_universal_plane_init(
3066 ARRAY_SIZE(yuv_formats
),
3067 NULL
, aplane
->base
.type
, NULL
);
3069 case DRM_PLANE_TYPE_CURSOR
:
3070 res
= drm_universal_plane_init(
3076 ARRAY_SIZE(cursor_formats
),
3077 NULL
, aplane
->base
.type
, NULL
);
3081 drm_plane_helper_add(&aplane
->base
, &dm_plane_helper_funcs
);
3086 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager
*dm
,
3087 struct drm_plane
*plane
,
3088 uint32_t crtc_index
)
3090 struct amdgpu_crtc
*acrtc
= NULL
;
3091 struct amdgpu_plane
*cursor_plane
;
3095 cursor_plane
= kzalloc(sizeof(*cursor_plane
), GFP_KERNEL
);
3099 cursor_plane
->base
.type
= DRM_PLANE_TYPE_CURSOR
;
3100 res
= amdgpu_dm_plane_init(dm
, cursor_plane
, 0);
3102 acrtc
= kzalloc(sizeof(struct amdgpu_crtc
), GFP_KERNEL
);
3106 res
= drm_crtc_init_with_planes(
3110 &cursor_plane
->base
,
3111 &amdgpu_dm_crtc_funcs
, NULL
);
3116 drm_crtc_helper_add(&acrtc
->base
, &amdgpu_dm_crtc_helper_funcs
);
3118 acrtc
->max_cursor_width
= dm
->adev
->dm
.dc
->caps
.max_cursor_size
;
3119 acrtc
->max_cursor_height
= dm
->adev
->dm
.dc
->caps
.max_cursor_size
;
3121 acrtc
->crtc_id
= crtc_index
;
3122 acrtc
->base
.enabled
= false;
3124 dm
->adev
->mode_info
.crtcs
[crtc_index
] = acrtc
;
3125 drm_mode_crtc_set_gamma_size(&acrtc
->base
, 256);
3131 kfree(cursor_plane
);
3136 static int to_drm_connector_type(enum signal_type st
)
3139 case SIGNAL_TYPE_HDMI_TYPE_A
:
3140 return DRM_MODE_CONNECTOR_HDMIA
;
3141 case SIGNAL_TYPE_EDP
:
3142 return DRM_MODE_CONNECTOR_eDP
;
3143 case SIGNAL_TYPE_RGB
:
3144 return DRM_MODE_CONNECTOR_VGA
;
3145 case SIGNAL_TYPE_DISPLAY_PORT
:
3146 case SIGNAL_TYPE_DISPLAY_PORT_MST
:
3147 return DRM_MODE_CONNECTOR_DisplayPort
;
3148 case SIGNAL_TYPE_DVI_DUAL_LINK
:
3149 case SIGNAL_TYPE_DVI_SINGLE_LINK
:
3150 return DRM_MODE_CONNECTOR_DVID
;
3151 case SIGNAL_TYPE_VIRTUAL
:
3152 return DRM_MODE_CONNECTOR_VIRTUAL
;
3155 return DRM_MODE_CONNECTOR_Unknown
;
3159 static void amdgpu_dm_get_native_mode(struct drm_connector
*connector
)
3161 const struct drm_connector_helper_funcs
*helper
=
3162 connector
->helper_private
;
3163 struct drm_encoder
*encoder
;
3164 struct amdgpu_encoder
*amdgpu_encoder
;
3166 encoder
= helper
->best_encoder(connector
);
3168 if (encoder
== NULL
)
3171 amdgpu_encoder
= to_amdgpu_encoder(encoder
);
3173 amdgpu_encoder
->native_mode
.clock
= 0;
3175 if (!list_empty(&connector
->probed_modes
)) {
3176 struct drm_display_mode
*preferred_mode
= NULL
;
3178 list_for_each_entry(preferred_mode
,
3179 &connector
->probed_modes
,
3181 if (preferred_mode
->type
& DRM_MODE_TYPE_PREFERRED
)
3182 amdgpu_encoder
->native_mode
= *preferred_mode
;
3190 static struct drm_display_mode
*
3191 amdgpu_dm_create_common_mode(struct drm_encoder
*encoder
,
3193 int hdisplay
, int vdisplay
)
3195 struct drm_device
*dev
= encoder
->dev
;
3196 struct amdgpu_encoder
*amdgpu_encoder
= to_amdgpu_encoder(encoder
);
3197 struct drm_display_mode
*mode
= NULL
;
3198 struct drm_display_mode
*native_mode
= &amdgpu_encoder
->native_mode
;
3200 mode
= drm_mode_duplicate(dev
, native_mode
);
3205 mode
->hdisplay
= hdisplay
;
3206 mode
->vdisplay
= vdisplay
;
3207 mode
->type
&= ~DRM_MODE_TYPE_PREFERRED
;
3208 strncpy(mode
->name
, name
, DRM_DISPLAY_MODE_LEN
);
3214 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder
*encoder
,
3215 struct drm_connector
*connector
)
3217 struct amdgpu_encoder
*amdgpu_encoder
= to_amdgpu_encoder(encoder
);
3218 struct drm_display_mode
*mode
= NULL
;
3219 struct drm_display_mode
*native_mode
= &amdgpu_encoder
->native_mode
;
3220 struct amdgpu_dm_connector
*amdgpu_dm_connector
=
3221 to_amdgpu_dm_connector(connector
);
3225 char name
[DRM_DISPLAY_MODE_LEN
];
3228 } common_modes
[] = {
3229 { "640x480", 640, 480},
3230 { "800x600", 800, 600},
3231 { "1024x768", 1024, 768},
3232 { "1280x720", 1280, 720},
3233 { "1280x800", 1280, 800},
3234 {"1280x1024", 1280, 1024},
3235 { "1440x900", 1440, 900},
3236 {"1680x1050", 1680, 1050},
3237 {"1600x1200", 1600, 1200},
3238 {"1920x1080", 1920, 1080},
3239 {"1920x1200", 1920, 1200}
3242 n
= ARRAY_SIZE(common_modes
);
3244 for (i
= 0; i
< n
; i
++) {
3245 struct drm_display_mode
*curmode
= NULL
;
3246 bool mode_existed
= false;
3248 if (common_modes
[i
].w
> native_mode
->hdisplay
||
3249 common_modes
[i
].h
> native_mode
->vdisplay
||
3250 (common_modes
[i
].w
== native_mode
->hdisplay
&&
3251 common_modes
[i
].h
== native_mode
->vdisplay
))
3254 list_for_each_entry(curmode
, &connector
->probed_modes
, head
) {
3255 if (common_modes
[i
].w
== curmode
->hdisplay
&&
3256 common_modes
[i
].h
== curmode
->vdisplay
) {
3257 mode_existed
= true;
3265 mode
= amdgpu_dm_create_common_mode(encoder
,
3266 common_modes
[i
].name
, common_modes
[i
].w
,
3268 drm_mode_probed_add(connector
, mode
);
3269 amdgpu_dm_connector
->num_modes
++;
3273 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector
*connector
,
3276 struct amdgpu_dm_connector
*amdgpu_dm_connector
=
3277 to_amdgpu_dm_connector(connector
);
3280 /* empty probed_modes */
3281 INIT_LIST_HEAD(&connector
->probed_modes
);
3282 amdgpu_dm_connector
->num_modes
=
3283 drm_add_edid_modes(connector
, edid
);
3285 drm_edid_to_eld(connector
, edid
);
3287 amdgpu_dm_get_native_mode(connector
);
3289 amdgpu_dm_connector
->num_modes
= 0;
3293 static int amdgpu_dm_connector_get_modes(struct drm_connector
*connector
)
3295 const struct drm_connector_helper_funcs
*helper
=
3296 connector
->helper_private
;
3297 struct amdgpu_dm_connector
*amdgpu_dm_connector
=
3298 to_amdgpu_dm_connector(connector
);
3299 struct drm_encoder
*encoder
;
3300 struct edid
*edid
= amdgpu_dm_connector
->edid
;
3302 encoder
= helper
->best_encoder(connector
);
3304 amdgpu_dm_connector_ddc_get_modes(connector
, edid
);
3305 amdgpu_dm_connector_add_common_modes(encoder
, connector
);
3306 return amdgpu_dm_connector
->num_modes
;
3309 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager
*dm
,
3310 struct amdgpu_dm_connector
*aconnector
,
3312 struct dc_link
*link
,
3315 struct amdgpu_device
*adev
= dm
->ddev
->dev_private
;
3317 aconnector
->connector_id
= link_index
;
3318 aconnector
->dc_link
= link
;
3319 aconnector
->base
.interlace_allowed
= false;
3320 aconnector
->base
.doublescan_allowed
= false;
3321 aconnector
->base
.stereo_allowed
= false;
3322 aconnector
->base
.dpms
= DRM_MODE_DPMS_OFF
;
3323 aconnector
->hpd
.hpd
= AMDGPU_HPD_NONE
; /* not used */
3325 mutex_init(&aconnector
->hpd_lock
);
3327 /* configure support HPD hot plug connector_>polled default value is 0
3328 * which means HPD hot plug not supported
3330 switch (connector_type
) {
3331 case DRM_MODE_CONNECTOR_HDMIA
:
3332 aconnector
->base
.polled
= DRM_CONNECTOR_POLL_HPD
;
3334 case DRM_MODE_CONNECTOR_DisplayPort
:
3335 aconnector
->base
.polled
= DRM_CONNECTOR_POLL_HPD
;
3337 case DRM_MODE_CONNECTOR_DVID
:
3338 aconnector
->base
.polled
= DRM_CONNECTOR_POLL_HPD
;
3344 drm_object_attach_property(&aconnector
->base
.base
,
3345 dm
->ddev
->mode_config
.scaling_mode_property
,
3346 DRM_MODE_SCALE_NONE
);
3348 drm_object_attach_property(&aconnector
->base
.base
,
3349 adev
->mode_info
.underscan_property
,
3351 drm_object_attach_property(&aconnector
->base
.base
,
3352 adev
->mode_info
.underscan_hborder_property
,
3354 drm_object_attach_property(&aconnector
->base
.base
,
3355 adev
->mode_info
.underscan_vborder_property
,
3360 static int amdgpu_dm_i2c_xfer(struct i2c_adapter
*i2c_adap
,
3361 struct i2c_msg
*msgs
, int num
)
3363 struct amdgpu_i2c_adapter
*i2c
= i2c_get_adapdata(i2c_adap
);
3364 struct ddc_service
*ddc_service
= i2c
->ddc_service
;
3365 struct i2c_command cmd
;
3369 cmd
.payloads
= kcalloc(num
, sizeof(struct i2c_payload
), GFP_KERNEL
);
3374 cmd
.number_of_payloads
= num
;
3375 cmd
.engine
= I2C_COMMAND_ENGINE_DEFAULT
;
3378 for (i
= 0; i
< num
; i
++) {
3379 cmd
.payloads
[i
].write
= !(msgs
[i
].flags
& I2C_M_RD
);
3380 cmd
.payloads
[i
].address
= msgs
[i
].addr
;
3381 cmd
.payloads
[i
].length
= msgs
[i
].len
;
3382 cmd
.payloads
[i
].data
= msgs
[i
].buf
;
3385 if (dal_i2caux_submit_i2c_command(
3386 ddc_service
->ctx
->i2caux
,
3387 ddc_service
->ddc_pin
,
3391 kfree(cmd
.payloads
);
3395 static u32
amdgpu_dm_i2c_func(struct i2c_adapter
*adap
)
3397 return I2C_FUNC_I2C
| I2C_FUNC_SMBUS_EMUL
;
3400 static const struct i2c_algorithm amdgpu_dm_i2c_algo
= {
3401 .master_xfer
= amdgpu_dm_i2c_xfer
,
3402 .functionality
= amdgpu_dm_i2c_func
,
3405 static struct amdgpu_i2c_adapter
*
3406 create_i2c(struct ddc_service
*ddc_service
,
3410 struct amdgpu_device
*adev
= ddc_service
->ctx
->driver_context
;
3411 struct amdgpu_i2c_adapter
*i2c
;
3413 i2c
= kzalloc(sizeof(struct amdgpu_i2c_adapter
), GFP_KERNEL
);
3414 i2c
->base
.owner
= THIS_MODULE
;
3415 i2c
->base
.class = I2C_CLASS_DDC
;
3416 i2c
->base
.dev
.parent
= &adev
->pdev
->dev
;
3417 i2c
->base
.algo
= &amdgpu_dm_i2c_algo
;
3418 snprintf(i2c
->base
.name
, sizeof(i2c
->base
.name
), "AMDGPU DM i2c hw bus %d", link_index
);
3419 i2c_set_adapdata(&i2c
->base
, i2c
);
3420 i2c
->ddc_service
= ddc_service
;
3425 /* Note: this function assumes that dc_link_detect() was called for the
3426 * dc_link which will be represented by this aconnector.
3428 static int amdgpu_dm_connector_init(struct amdgpu_display_manager
*dm
,
3429 struct amdgpu_dm_connector
*aconnector
,
3430 uint32_t link_index
,
3431 struct amdgpu_encoder
*aencoder
)
3435 struct dc
*dc
= dm
->dc
;
3436 struct dc_link
*link
= dc_get_link_at_index(dc
, link_index
);
3437 struct amdgpu_i2c_adapter
*i2c
;
3439 link
->priv
= aconnector
;
3441 DRM_DEBUG_DRIVER("%s()\n", __func__
);
3443 i2c
= create_i2c(link
->ddc
, link
->link_index
, &res
);
3444 aconnector
->i2c
= i2c
;
3445 res
= i2c_add_adapter(&i2c
->base
);
3448 DRM_ERROR("Failed to register hw i2c %d\n", link
->link_index
);
3452 connector_type
= to_drm_connector_type(link
->connector_signal
);
3454 res
= drm_connector_init(
3457 &amdgpu_dm_connector_funcs
,
3461 DRM_ERROR("connector_init failed\n");
3462 aconnector
->connector_id
= -1;
3466 drm_connector_helper_add(
3468 &amdgpu_dm_connector_helper_funcs
);
3470 amdgpu_dm_connector_init_helper(
3477 drm_mode_connector_attach_encoder(
3478 &aconnector
->base
, &aencoder
->base
);
3480 drm_connector_register(&aconnector
->base
);
3482 if (connector_type
== DRM_MODE_CONNECTOR_DisplayPort
3483 || connector_type
== DRM_MODE_CONNECTOR_eDP
)
3484 amdgpu_dm_initialize_dp_connector(dm
, aconnector
);
3486 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3487 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3489 /* NOTE: this currently will create backlight device even if a panel
3490 * is not connected to the eDP/LVDS connector.
3492 * This is less than ideal but we don't have sink information at this
3493 * stage since detection happens after. We can't do detection earlier
3494 * since MST detection needs connectors to be created first.
3496 if (link
->connector_signal
& (SIGNAL_TYPE_EDP
| SIGNAL_TYPE_LVDS
)) {
3497 /* Event if registration failed, we should continue with
3498 * DM initialization because not having a backlight control
3499 * is better then a black screen.
3501 amdgpu_dm_register_backlight_device(dm
);
3503 if (dm
->backlight_dev
)
3504 dm
->backlight_link
= link
;
3511 aconnector
->i2c
= NULL
;
3516 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device
*adev
)
3518 switch (adev
->mode_info
.num_crtc
) {
3535 static int amdgpu_dm_encoder_init(struct drm_device
*dev
,
3536 struct amdgpu_encoder
*aencoder
,
3537 uint32_t link_index
)
3539 struct amdgpu_device
*adev
= dev
->dev_private
;
3541 int res
= drm_encoder_init(dev
,
3543 &amdgpu_dm_encoder_funcs
,
3544 DRM_MODE_ENCODER_TMDS
,
3547 aencoder
->base
.possible_crtcs
= amdgpu_dm_get_encoder_crtc_mask(adev
);
3550 aencoder
->encoder_id
= link_index
;
3552 aencoder
->encoder_id
= -1;
3554 drm_encoder_helper_add(&aencoder
->base
, &amdgpu_dm_encoder_helper_funcs
);
3559 static void manage_dm_interrupts(struct amdgpu_device
*adev
,
3560 struct amdgpu_crtc
*acrtc
,
3564 * this is not correct translation but will work as soon as VBLANK
3565 * constant is the same as PFLIP
3568 amdgpu_crtc_idx_to_irq_type(
3573 drm_crtc_vblank_on(&acrtc
->base
);
3576 &adev
->pageflip_irq
,
3582 &adev
->pageflip_irq
,
3584 drm_crtc_vblank_off(&acrtc
->base
);
3589 is_scaling_state_different(const struct dm_connector_state
*dm_state
,
3590 const struct dm_connector_state
*old_dm_state
)
3592 if (dm_state
->scaling
!= old_dm_state
->scaling
)
3594 if (!dm_state
->underscan_enable
&& old_dm_state
->underscan_enable
) {
3595 if (old_dm_state
->underscan_hborder
!= 0 && old_dm_state
->underscan_vborder
!= 0)
3597 } else if (dm_state
->underscan_enable
&& !old_dm_state
->underscan_enable
) {
3598 if (dm_state
->underscan_hborder
!= 0 && dm_state
->underscan_vborder
!= 0)
3600 } else if (dm_state
->underscan_hborder
!= old_dm_state
->underscan_hborder
||
3601 dm_state
->underscan_vborder
!= old_dm_state
->underscan_vborder
)
3606 static void remove_stream(struct amdgpu_device
*adev
,
3607 struct amdgpu_crtc
*acrtc
,
3608 struct dc_stream_state
*stream
)
3610 /* this is the update mode case */
3611 if (adev
->dm
.freesync_module
)
3612 mod_freesync_remove_stream(adev
->dm
.freesync_module
, stream
);
3614 acrtc
->otg_inst
= -1;
3615 acrtc
->enabled
= false;
3618 static int get_cursor_position(struct drm_plane
*plane
, struct drm_crtc
*crtc
,
3619 struct dc_cursor_position
*position
)
3621 struct amdgpu_crtc
*amdgpu_crtc
= amdgpu_crtc
= to_amdgpu_crtc(crtc
);
3623 int xorigin
= 0, yorigin
= 0;
3625 if (!crtc
|| !plane
->state
->fb
) {
3626 position
->enable
= false;
3632 if ((plane
->state
->crtc_w
> amdgpu_crtc
->max_cursor_width
) ||
3633 (plane
->state
->crtc_h
> amdgpu_crtc
->max_cursor_height
)) {
3634 DRM_ERROR("%s: bad cursor width or height %d x %d\n",
3636 plane
->state
->crtc_w
,
3637 plane
->state
->crtc_h
);
3641 x
= plane
->state
->crtc_x
;
3642 y
= plane
->state
->crtc_y
;
3643 /* avivo cursor are offset into the total surface */
3644 x
+= crtc
->primary
->state
->src_x
>> 16;
3645 y
+= crtc
->primary
->state
->src_y
>> 16;
3647 xorigin
= min(-x
, amdgpu_crtc
->max_cursor_width
- 1);
3651 yorigin
= min(-y
, amdgpu_crtc
->max_cursor_height
- 1);
3654 position
->enable
= true;
3657 position
->x_hotspot
= xorigin
;
3658 position
->y_hotspot
= yorigin
;
3663 static void handle_cursor_update(struct drm_plane
*plane
,
3664 struct drm_plane_state
*old_plane_state
)
3666 struct amdgpu_framebuffer
*afb
= to_amdgpu_framebuffer(plane
->state
->fb
);
3667 struct drm_crtc
*crtc
= afb
? plane
->state
->crtc
: old_plane_state
->crtc
;
3668 struct dm_crtc_state
*crtc_state
= crtc
? to_dm_crtc_state(crtc
->state
) : NULL
;
3669 struct amdgpu_crtc
*amdgpu_crtc
= to_amdgpu_crtc(crtc
);
3670 uint64_t address
= afb
? afb
->address
: 0;
3671 struct dc_cursor_position position
;
3672 struct dc_cursor_attributes attributes
;
3675 if (!plane
->state
->fb
&& !old_plane_state
->fb
)
3678 DRM_DEBUG_DRIVER("%s: crtc_id=%d with size %d to %d\n",
3680 amdgpu_crtc
->crtc_id
,
3681 plane
->state
->crtc_w
,
3682 plane
->state
->crtc_h
);
3684 ret
= get_cursor_position(plane
, crtc
, &position
);
3688 if (!position
.enable
) {
3689 /* turn off cursor */
3690 if (crtc_state
&& crtc_state
->stream
)
3691 dc_stream_set_cursor_position(crtc_state
->stream
,
3696 amdgpu_crtc
->cursor_width
= plane
->state
->crtc_w
;
3697 amdgpu_crtc
->cursor_height
= plane
->state
->crtc_h
;
3699 attributes
.address
.high_part
= upper_32_bits(address
);
3700 attributes
.address
.low_part
= lower_32_bits(address
);
3701 attributes
.width
= plane
->state
->crtc_w
;
3702 attributes
.height
= plane
->state
->crtc_h
;
3703 attributes
.color_format
= CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA
;
3704 attributes
.rotation_angle
= 0;
3705 attributes
.attribute_flags
.value
= 0;
3707 attributes
.pitch
= attributes
.width
;
3709 if (crtc_state
->stream
) {
3710 if (!dc_stream_set_cursor_attributes(crtc_state
->stream
,
3712 DRM_ERROR("DC failed to set cursor attributes\n");
3714 if (!dc_stream_set_cursor_position(crtc_state
->stream
,
3716 DRM_ERROR("DC failed to set cursor position\n");
3720 static void prepare_flip_isr(struct amdgpu_crtc
*acrtc
)
3723 assert_spin_locked(&acrtc
->base
.dev
->event_lock
);
3724 WARN_ON(acrtc
->event
);
3726 acrtc
->event
= acrtc
->base
.state
->event
;
3728 /* Set the flip status */
3729 acrtc
->pflip_status
= AMDGPU_FLIP_SUBMITTED
;
3731 /* Mark this event as consumed */
3732 acrtc
->base
.state
->event
= NULL
;
3734 DRM_DEBUG_DRIVER("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
3741 * Waits on all BO's fences and for proper vblank count
3743 static void amdgpu_dm_do_flip(struct drm_crtc
*crtc
,
3744 struct drm_framebuffer
*fb
,
3746 struct dc_state
*state
)
3748 unsigned long flags
;
3749 uint32_t target_vblank
;
3751 struct amdgpu_crtc
*acrtc
= to_amdgpu_crtc(crtc
);
3752 struct amdgpu_framebuffer
*afb
= to_amdgpu_framebuffer(fb
);
3753 struct amdgpu_bo
*abo
= gem_to_amdgpu_bo(afb
->obj
);
3754 struct amdgpu_device
*adev
= crtc
->dev
->dev_private
;
3755 bool async_flip
= (crtc
->state
->pageflip_flags
& DRM_MODE_PAGE_FLIP_ASYNC
) != 0;
3756 struct dc_flip_addrs addr
= { {0} };
3757 /* TODO eliminate or rename surface_update */
3758 struct dc_surface_update surface_updates
[1] = { {0} };
3759 struct dm_crtc_state
*acrtc_state
= to_dm_crtc_state(crtc
->state
);
3762 /* Prepare wait for target vblank early - before the fence-waits */
3763 target_vblank
= target
- drm_crtc_vblank_count(crtc
) +
3764 amdgpu_get_vblank_counter_kms(crtc
->dev
, acrtc
->crtc_id
);
3766 /* TODO This might fail and hence better not used, wait
3767 * explicitly on fences instead
3768 * and in general should be called for
3769 * blocking commit to as per framework helpers
3771 r
= amdgpu_bo_reserve(abo
, true);
3772 if (unlikely(r
!= 0)) {
3773 DRM_ERROR("failed to reserve buffer before flip\n");
3777 /* Wait for all fences on this FB */
3778 WARN_ON(reservation_object_wait_timeout_rcu(abo
->tbo
.resv
, true, false,
3779 MAX_SCHEDULE_TIMEOUT
) < 0);
3781 amdgpu_bo_unreserve(abo
);
3783 /* Wait until we're out of the vertical blank period before the one
3784 * targeted by the flip
3786 while ((acrtc
->enabled
&&
3787 (amdgpu_get_crtc_scanoutpos(adev
->ddev
, acrtc
->crtc_id
, 0,
3788 &vpos
, &hpos
, NULL
, NULL
,
3790 & (DRM_SCANOUTPOS_VALID
| DRM_SCANOUTPOS_IN_VBLANK
)) ==
3791 (DRM_SCANOUTPOS_VALID
| DRM_SCANOUTPOS_IN_VBLANK
) &&
3792 (int)(target_vblank
-
3793 amdgpu_get_vblank_counter_kms(adev
->ddev
, acrtc
->crtc_id
)) > 0)) {
3794 usleep_range(1000, 1100);
3798 spin_lock_irqsave(&crtc
->dev
->event_lock
, flags
);
3799 /* update crtc fb */
3800 crtc
->primary
->fb
= fb
;
3802 WARN_ON(acrtc
->pflip_status
!= AMDGPU_FLIP_NONE
);
3803 WARN_ON(!acrtc_state
->stream
);
3805 addr
.address
.grph
.addr
.low_part
= lower_32_bits(afb
->address
);
3806 addr
.address
.grph
.addr
.high_part
= upper_32_bits(afb
->address
);
3807 addr
.flip_immediate
= async_flip
;
3810 if (acrtc
->base
.state
->event
)
3811 prepare_flip_isr(acrtc
);
3813 surface_updates
->surface
= dc_stream_get_status(acrtc_state
->stream
)->plane_states
[0];
3814 surface_updates
->flip_addr
= &addr
;
3817 dc_commit_updates_for_stream(adev
->dm
.dc
,
3820 acrtc_state
->stream
,
3822 &surface_updates
->surface
,
3825 DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x \n",
3827 addr
.address
.grph
.addr
.high_part
,
3828 addr
.address
.grph
.addr
.low_part
);
3831 spin_unlock_irqrestore(&crtc
->dev
->event_lock
, flags
);
3834 static void amdgpu_dm_commit_planes(struct drm_atomic_state
*state
,
3835 struct drm_device
*dev
,
3836 struct amdgpu_display_manager
*dm
,
3837 struct drm_crtc
*pcrtc
,
3838 bool *wait_for_vblank
)
3841 struct drm_plane
*plane
;
3842 struct drm_plane_state
*old_plane_state
, *new_plane_state
;
3843 struct dc_stream_state
*dc_stream_attach
;
3844 struct dc_plane_state
*plane_states_constructed
[MAX_SURFACES
];
3845 struct amdgpu_crtc
*acrtc_attach
= to_amdgpu_crtc(pcrtc
);
3846 struct drm_crtc_state
*new_pcrtc_state
=
3847 drm_atomic_get_new_crtc_state(state
, pcrtc
);
3848 struct dm_crtc_state
*acrtc_state
= to_dm_crtc_state(new_pcrtc_state
);
3849 struct dm_atomic_state
*dm_state
= to_dm_atomic_state(state
);
3850 int planes_count
= 0;
3851 unsigned long flags
;
3853 /* update planes when needed */
3854 for_each_oldnew_plane_in_state(state
, plane
, old_plane_state
, new_plane_state
, i
) {
3855 struct drm_crtc
*crtc
= new_plane_state
->crtc
;
3856 struct drm_crtc_state
*new_crtc_state
;
3857 struct drm_framebuffer
*fb
= new_plane_state
->fb
;
3859 struct dm_plane_state
*dm_new_plane_state
= to_dm_plane_state(new_plane_state
);
3861 if (plane
->type
== DRM_PLANE_TYPE_CURSOR
) {
3862 handle_cursor_update(plane
, old_plane_state
);
3866 if (!fb
|| !crtc
|| pcrtc
!= crtc
)
3869 new_crtc_state
= drm_atomic_get_new_crtc_state(state
, crtc
);
3870 if (!new_crtc_state
->active
)
3873 pflip_needed
= !state
->allow_modeset
;
3875 spin_lock_irqsave(&crtc
->dev
->event_lock
, flags
);
3876 if (acrtc_attach
->pflip_status
!= AMDGPU_FLIP_NONE
) {
3877 DRM_ERROR("%s: acrtc %d, already busy\n",
3879 acrtc_attach
->crtc_id
);
3880 spin_unlock_irqrestore(&crtc
->dev
->event_lock
, flags
);
3881 /* In commit tail framework this cannot happen */
3884 spin_unlock_irqrestore(&crtc
->dev
->event_lock
, flags
);
3886 if (!pflip_needed
) {
3887 WARN_ON(!dm_new_plane_state
->dc_state
);
3889 plane_states_constructed
[planes_count
] = dm_new_plane_state
->dc_state
;
3891 dc_stream_attach
= acrtc_state
->stream
;
3894 } else if (new_crtc_state
->planes_changed
) {
3895 /* Assume even ONE crtc with immediate flip means
3896 * entire can't wait for VBLANK
3897 * TODO Check if it's correct
3900 new_pcrtc_state
->pageflip_flags
& DRM_MODE_PAGE_FLIP_ASYNC
?
3903 /* TODO: Needs rework for multiplane flip */
3904 if (plane
->type
== DRM_PLANE_TYPE_PRIMARY
)
3905 drm_crtc_vblank_get(crtc
);
3910 drm_crtc_vblank_count(crtc
) + *wait_for_vblank
,
3917 unsigned long flags
;
3919 if (new_pcrtc_state
->event
) {
3921 drm_crtc_vblank_get(pcrtc
);
3923 spin_lock_irqsave(&pcrtc
->dev
->event_lock
, flags
);
3924 prepare_flip_isr(acrtc_attach
);
3925 spin_unlock_irqrestore(&pcrtc
->dev
->event_lock
, flags
);
3928 if (false == dc_commit_planes_to_stream(dm
->dc
,
3929 plane_states_constructed
,
3933 dm_error("%s: Failed to attach plane!\n", __func__
);
3935 /*TODO BUG Here should go disable planes on CRTC. */
3940 static int amdgpu_dm_atomic_commit(struct drm_device
*dev
,
3941 struct drm_atomic_state
*state
,
3944 struct drm_crtc
*crtc
;
3945 struct drm_crtc_state
*old_crtc_state
, *new_crtc_state
;
3946 struct amdgpu_device
*adev
= dev
->dev_private
;
3950 * We evade vblanks and pflips on crtc that
3951 * should be changed. We do it here to flush & disable
3952 * interrupts before drm_swap_state is called in drm_atomic_helper_commit
3953 * it will update crtc->dm_crtc_state->stream pointer which is used in
3956 for_each_oldnew_crtc_in_state(state
, crtc
, old_crtc_state
, new_crtc_state
, i
) {
3957 struct dm_crtc_state
*dm_old_crtc_state
= to_dm_crtc_state(old_crtc_state
);
3958 struct amdgpu_crtc
*acrtc
= to_amdgpu_crtc(crtc
);
3960 if (drm_atomic_crtc_needs_modeset(new_crtc_state
) && dm_old_crtc_state
->stream
)
3961 manage_dm_interrupts(adev
, acrtc
, false);
3963 /* Add check here for SoC's that support hardware cursor plane, to
3964 * unset legacy_cursor_update */
3966 return drm_atomic_helper_commit(dev
, state
, nonblock
);
3968 /*TODO Handle EINTR, reenable IRQ*/
3971 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state
*state
)
3973 struct drm_device
*dev
= state
->dev
;
3974 struct amdgpu_device
*adev
= dev
->dev_private
;
3975 struct amdgpu_display_manager
*dm
= &adev
->dm
;
3976 struct dm_atomic_state
*dm_state
;
3978 uint32_t new_crtcs_count
= 0;
3979 struct drm_crtc
*crtc
;
3980 struct drm_crtc_state
*old_crtc_state
, *new_crtc_state
;
3981 struct amdgpu_crtc
*new_crtcs
[MAX_STREAMS
];
3982 struct dc_stream_state
*new_stream
= NULL
;
3983 unsigned long flags
;
3984 bool wait_for_vblank
= true;
3985 struct drm_connector
*connector
;
3986 struct drm_connector_state
*old_con_state
, *new_con_state
;
3987 struct dm_crtc_state
*dm_old_crtc_state
, *dm_new_crtc_state
;
3989 drm_atomic_helper_update_legacy_modeset_state(dev
, state
);
3991 dm_state
= to_dm_atomic_state(state
);
3993 /* update changed items */
3994 for_each_oldnew_crtc_in_state(state
, crtc
, old_crtc_state
, new_crtc_state
, i
) {
3995 struct amdgpu_crtc
*acrtc
= to_amdgpu_crtc(crtc
);
3997 dm_new_crtc_state
= to_dm_crtc_state(new_crtc_state
);
3998 dm_old_crtc_state
= to_dm_crtc_state(old_crtc_state
);
4001 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
4002 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
4003 "connectors_changed:%d\n",
4005 new_crtc_state
->enable
,
4006 new_crtc_state
->active
,
4007 new_crtc_state
->planes_changed
,
4008 new_crtc_state
->mode_changed
,
4009 new_crtc_state
->active_changed
,
4010 new_crtc_state
->connectors_changed
);
4012 /* handles headless hotplug case, updating new_state and
4013 * aconnector as needed
4016 if (modeset_required(new_crtc_state
, dm_new_crtc_state
->stream
, dm_old_crtc_state
->stream
)) {
4018 DRM_DEBUG_DRIVER("Atomic commit: SET crtc id %d: [%p]\n", acrtc
->crtc_id
, acrtc
);
4020 if (!dm_new_crtc_state
->stream
) {
4022 * this could happen because of issues with
4023 * userspace notifications delivery.
4024 * In this case userspace tries to set mode on
4025 * display which is disconnect in fact.
4026 * dc_sink in NULL in this case on aconnector.
4027 * We expect reset mode will come soon.
4029 * This can also happen when unplug is done
4030 * during resume sequence ended
4032 * In this case, we want to pretend we still
4033 * have a sink to keep the pipe running so that
4034 * hw state is consistent with the sw state
4036 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
4037 __func__
, acrtc
->base
.base
.id
);
4042 if (dm_old_crtc_state
->stream
)
4043 remove_stream(adev
, acrtc
, dm_old_crtc_state
->stream
);
4047 * this loop saves set mode crtcs
4048 * we needed to enable vblanks once all
4049 * resources acquired in dc after dc_commit_streams
4052 /*TODO move all this into dm_crtc_state, get rid of
4053 * new_crtcs array and use old and new atomic states
4056 new_crtcs
[new_crtcs_count
] = acrtc
;
4059 new_crtc_state
= drm_atomic_get_new_crtc_state(state
, crtc
);
4060 acrtc
->enabled
= true;
4061 acrtc
->hw_mode
= new_crtc_state
->mode
;
4062 crtc
->hwmode
= new_crtc_state
->mode
;
4063 } else if (modereset_required(new_crtc_state
)) {
4064 DRM_DEBUG_DRIVER("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc
->crtc_id
, acrtc
);
4066 /* i.e. reset mode */
4067 if (dm_old_crtc_state
->stream
)
4068 remove_stream(adev
, acrtc
, dm_old_crtc_state
->stream
);
4070 } /* for_each_crtc_in_state() */
4073 * Add streams after required streams from new and replaced streams
4074 * are removed from freesync module
4076 if (adev
->dm
.freesync_module
) {
4077 for (i
= 0; i
< new_crtcs_count
; i
++) {
4078 struct amdgpu_dm_connector
*aconnector
= NULL
;
4080 new_crtc_state
= drm_atomic_get_new_crtc_state(state
,
4081 &new_crtcs
[i
]->base
);
4082 dm_new_crtc_state
= to_dm_crtc_state(new_crtc_state
);
4084 new_stream
= dm_new_crtc_state
->stream
;
4085 aconnector
= amdgpu_dm_find_first_crtc_matching_connector(
4087 &new_crtcs
[i
]->base
);
4089 DRM_DEBUG_DRIVER("Atomic commit: Failed to find connector for acrtc id:%d "
4090 "skipping freesync init\n",
4091 new_crtcs
[i
]->crtc_id
);
4095 mod_freesync_add_stream(adev
->dm
.freesync_module
,
4096 new_stream
, &aconnector
->caps
);
4100 if (dm_state
->context
)
4101 WARN_ON(!dc_commit_state(dm
->dc
, dm_state
->context
));
4103 for_each_new_crtc_in_state(state
, crtc
, new_crtc_state
, i
) {
4104 struct amdgpu_crtc
*acrtc
= to_amdgpu_crtc(crtc
);
4106 dm_new_crtc_state
= to_dm_crtc_state(new_crtc_state
);
4108 if (dm_new_crtc_state
->stream
!= NULL
) {
4109 const struct dc_stream_status
*status
=
4110 dc_stream_get_status(dm_new_crtc_state
->stream
);
4113 DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state
->stream
, acrtc
);
4115 acrtc
->otg_inst
= status
->primary_otg_inst
;
4119 /* Handle scaling and underscan changes*/
4120 for_each_oldnew_connector_in_state(state
, connector
, old_con_state
, new_con_state
, i
) {
4121 struct dm_connector_state
*dm_new_con_state
= to_dm_connector_state(new_con_state
);
4122 struct dm_connector_state
*dm_old_con_state
= to_dm_connector_state(old_con_state
);
4123 struct amdgpu_crtc
*acrtc
= to_amdgpu_crtc(dm_new_con_state
->base
.crtc
);
4124 struct dc_stream_status
*status
= NULL
;
4127 new_crtc_state
= drm_atomic_get_new_crtc_state(state
, &acrtc
->base
);
4129 /* Skip any modesets/resets */
4130 if (!acrtc
|| drm_atomic_crtc_needs_modeset(new_crtc_state
))
4133 /* Skip any thing not scale or underscan changes */
4134 if (!is_scaling_state_different(dm_new_con_state
, dm_old_con_state
))
4137 dm_new_crtc_state
= to_dm_crtc_state(new_crtc_state
);
4139 update_stream_scaling_settings(&dm_new_con_state
->base
.crtc
->mode
,
4140 dm_new_con_state
, (struct dc_stream_state
*)dm_new_crtc_state
->stream
);
4142 status
= dc_stream_get_status(dm_new_crtc_state
->stream
);
4144 WARN_ON(!status
->plane_count
);
4146 if (!dm_new_crtc_state
->stream
)
4149 /*TODO How it works with MPO ?*/
4150 if (!dc_commit_planes_to_stream(
4152 status
->plane_states
,
4153 status
->plane_count
,
4154 dm_new_crtc_state
->stream
,
4156 dm_error("%s: Failed to update stream scaling!\n", __func__
);
4159 for (i
= 0; i
< new_crtcs_count
; i
++) {
4161 * loop to enable interrupts on newly arrived crtc
4163 struct amdgpu_crtc
*acrtc
= new_crtcs
[i
];
4165 new_crtc_state
= drm_atomic_get_new_crtc_state(state
, &acrtc
->base
);
4166 dm_new_crtc_state
= to_dm_crtc_state(new_crtc_state
);
4168 if (adev
->dm
.freesync_module
)
4169 mod_freesync_notify_mode_change(
4170 adev
->dm
.freesync_module
, &dm_new_crtc_state
->stream
, 1);
4172 manage_dm_interrupts(adev
, acrtc
, true);
4175 /* update planes when needed per crtc*/
4176 for_each_new_crtc_in_state(state
, crtc
, new_crtc_state
, j
) {
4177 dm_new_crtc_state
= to_dm_crtc_state(new_crtc_state
);
4179 if (dm_new_crtc_state
->stream
)
4180 amdgpu_dm_commit_planes(state
, dev
, dm
, crtc
, &wait_for_vblank
);
4185 * send vblank event on all events not handled in flip and
4186 * mark consumed event for drm_atomic_helper_commit_hw_done
4188 spin_lock_irqsave(&adev
->ddev
->event_lock
, flags
);
4189 for_each_new_crtc_in_state(state
, crtc
, new_crtc_state
, i
) {
4191 if (new_crtc_state
->event
)
4192 drm_send_event_locked(dev
, &new_crtc_state
->event
->base
);
4194 new_crtc_state
->event
= NULL
;
4196 spin_unlock_irqrestore(&adev
->ddev
->event_lock
, flags
);
4198 /* Signal HW programming completion */
4199 drm_atomic_helper_commit_hw_done(state
);
4201 if (wait_for_vblank
)
4202 drm_atomic_helper_wait_for_vblanks(dev
, state
);
4204 drm_atomic_helper_cleanup_planes(dev
, state
);
4208 static int dm_force_atomic_commit(struct drm_connector
*connector
)
4211 struct drm_device
*ddev
= connector
->dev
;
4212 struct drm_atomic_state
*state
= drm_atomic_state_alloc(ddev
);
4213 struct amdgpu_crtc
*disconnected_acrtc
= to_amdgpu_crtc(connector
->encoder
->crtc
);
4214 struct drm_plane
*plane
= disconnected_acrtc
->base
.primary
;
4215 struct drm_connector_state
*conn_state
;
4216 struct drm_crtc_state
*crtc_state
;
4217 struct drm_plane_state
*plane_state
;
4222 state
->acquire_ctx
= ddev
->mode_config
.acquire_ctx
;
4224 /* Construct an atomic state to restore previous display setting */
4227 * Attach connectors to drm_atomic_state
4229 conn_state
= drm_atomic_get_connector_state(state
, connector
);
4231 ret
= PTR_ERR_OR_ZERO(conn_state
);
4235 /* Attach crtc to drm_atomic_state*/
4236 crtc_state
= drm_atomic_get_crtc_state(state
, &disconnected_acrtc
->base
);
4238 ret
= PTR_ERR_OR_ZERO(crtc_state
);
4242 /* force a restore */
4243 crtc_state
->mode_changed
= true;
4245 /* Attach plane to drm_atomic_state */
4246 plane_state
= drm_atomic_get_plane_state(state
, plane
);
4248 ret
= PTR_ERR_OR_ZERO(plane_state
);
4253 /* Call commit internally with the state we just constructed */
4254 ret
= drm_atomic_commit(state
);
4259 DRM_ERROR("Restoring old state failed with %i\n", ret
);
4260 drm_atomic_state_put(state
);
4266 * This functions handle all cases when set mode does not come upon hotplug.
4267 * This include when the same display is unplugged then plugged back into the
4268 * same port and when we are running without usermode desktop manager supprot
4270 void dm_restore_drm_connector_state(struct drm_device
*dev
,
4271 struct drm_connector
*connector
)
4273 struct amdgpu_dm_connector
*aconnector
= to_amdgpu_dm_connector(connector
);
4274 struct amdgpu_crtc
*disconnected_acrtc
;
4275 struct dm_crtc_state
*acrtc_state
;
4277 if (!aconnector
->dc_sink
|| !connector
->state
|| !connector
->encoder
)
4280 disconnected_acrtc
= to_amdgpu_crtc(connector
->encoder
->crtc
);
4281 acrtc_state
= to_dm_crtc_state(disconnected_acrtc
->base
.state
);
4283 if (!disconnected_acrtc
|| !acrtc_state
->stream
)
4287 * If the previous sink is not released and different from the current,
4288 * we deduce we are in a state where we can not rely on usermode call
4289 * to turn on the display, so we do it here
4291 if (acrtc_state
->stream
->sink
!= aconnector
->dc_sink
)
4292 dm_force_atomic_commit(&aconnector
->base
);
4296 * Grabs all modesetting locks to serialize against any blocking commits,
4297 * Waits for completion of all non blocking commits.
4299 static int do_aquire_global_lock(struct drm_device
*dev
,
4300 struct drm_atomic_state
*state
)
4302 struct drm_crtc
*crtc
;
4303 struct drm_crtc_commit
*commit
;
4306 /* Adding all modeset locks to aquire_ctx will
4307 * ensure that when the framework release it the
4308 * extra locks we are locking here will get released to
4310 ret
= drm_modeset_lock_all_ctx(dev
, state
->acquire_ctx
);
4314 list_for_each_entry(crtc
, &dev
->mode_config
.crtc_list
, head
) {
4315 spin_lock(&crtc
->commit_lock
);
4316 commit
= list_first_entry_or_null(&crtc
->commit_list
,
4317 struct drm_crtc_commit
, commit_entry
);
4319 drm_crtc_commit_get(commit
);
4320 spin_unlock(&crtc
->commit_lock
);
4325 /* Make sure all pending HW programming completed and
4328 ret
= wait_for_completion_interruptible_timeout(&commit
->hw_done
, 10*HZ
);
4331 ret
= wait_for_completion_interruptible_timeout(
4332 &commit
->flip_done
, 10*HZ
);
4335 DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
4336 "timed out\n", crtc
->base
.id
, crtc
->name
);
4338 drm_crtc_commit_put(commit
);
4341 return ret
< 0 ? ret
: 0;
4344 static int dm_update_crtcs_state(struct dc
*dc
,
4345 struct drm_atomic_state
*state
,
4347 bool *lock_and_validation_needed
)
4349 struct drm_crtc
*crtc
;
4350 struct drm_crtc_state
*old_crtc_state
, *new_crtc_state
;
4352 struct dm_crtc_state
*dm_old_crtc_state
, *dm_new_crtc_state
;
4353 struct dm_atomic_state
*dm_state
= to_dm_atomic_state(state
);
4354 struct dc_stream_state
*new_stream
;
4357 /*TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set */
4358 /* update changed items */
4359 for_each_oldnew_crtc_in_state(state
, crtc
, old_crtc_state
, new_crtc_state
, i
) {
4360 struct amdgpu_crtc
*acrtc
= NULL
;
4361 struct amdgpu_dm_connector
*aconnector
= NULL
;
4362 struct drm_connector_state
*new_con_state
= NULL
;
4363 struct dm_connector_state
*dm_conn_state
= NULL
;
4367 dm_old_crtc_state
= to_dm_crtc_state(old_crtc_state
);
4368 dm_new_crtc_state
= to_dm_crtc_state(new_crtc_state
);
4369 acrtc
= to_amdgpu_crtc(crtc
);
4371 aconnector
= amdgpu_dm_find_first_crtc_matching_connector(state
, crtc
);
4373 /* TODO This hack should go away */
4374 if (aconnector
&& enable
) {
4375 // Make sure fake sink is created in plug-in scenario
4376 new_con_state
= drm_atomic_get_connector_state(state
,
4379 if (IS_ERR(new_con_state
)) {
4380 ret
= PTR_ERR_OR_ZERO(new_con_state
);
4384 dm_conn_state
= to_dm_connector_state(new_con_state
);
4386 new_stream
= create_stream_for_sink(aconnector
,
4387 &new_crtc_state
->mode
,
4391 * we can have no stream on ACTION_SET if a display
4392 * was disconnected during S3, in this case it not and
4393 * error, the OS will be updated after detection, and
4394 * do the right thing on next atomic commit
4398 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
4399 __func__
, acrtc
->base
.base
.id
);
4404 if (dc_is_stream_unchanged(new_stream
, dm_old_crtc_state
->stream
) &&
4405 dc_is_stream_scaling_unchanged(new_stream
, dm_old_crtc_state
->stream
)) {
4407 new_crtc_state
->mode_changed
= false;
4409 DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
4410 new_crtc_state
->mode_changed
);
4414 if (!drm_atomic_crtc_needs_modeset(new_crtc_state
))
4418 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
4419 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
4420 "connectors_changed:%d\n",
4422 new_crtc_state
->enable
,
4423 new_crtc_state
->active
,
4424 new_crtc_state
->planes_changed
,
4425 new_crtc_state
->mode_changed
,
4426 new_crtc_state
->active_changed
,
4427 new_crtc_state
->connectors_changed
);
4429 /* Remove stream for any changed/disabled CRTC */
4432 if (!dm_old_crtc_state
->stream
)
4435 DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
4438 /* i.e. reset mode */
4439 if (dc_remove_stream_from_ctx(
4442 dm_old_crtc_state
->stream
) != DC_OK
) {
4447 dc_stream_release(dm_old_crtc_state
->stream
);
4448 dm_new_crtc_state
->stream
= NULL
;
4450 *lock_and_validation_needed
= true;
4452 } else {/* Add stream for any updated/enabled CRTC */
4454 * Quick fix to prevent NULL pointer on new_stream when
4455 * added MST connectors not found in existing crtc_state in the chained mode
4456 * TODO: need to dig out the root cause of that
4458 if (!aconnector
|| (!aconnector
->dc_sink
&& aconnector
->mst_port
))
4461 if (modereset_required(new_crtc_state
))
4464 if (modeset_required(new_crtc_state
, new_stream
,
4465 dm_old_crtc_state
->stream
)) {
4467 WARN_ON(dm_new_crtc_state
->stream
);
4469 dm_new_crtc_state
->stream
= new_stream
;
4470 dc_stream_retain(new_stream
);
4472 DRM_DEBUG_DRIVER("Enabling DRM crtc: %d\n",
4475 if (dc_add_stream_to_ctx(
4478 dm_new_crtc_state
->stream
) != DC_OK
) {
4483 *lock_and_validation_needed
= true;
4488 /* Release extra reference */
4490 dc_stream_release(new_stream
);
4497 dc_stream_release(new_stream
);
4501 static int dm_update_planes_state(struct dc
*dc
,
4502 struct drm_atomic_state
*state
,
4504 bool *lock_and_validation_needed
)
4506 struct drm_crtc
*new_plane_crtc
, *old_plane_crtc
;
4507 struct drm_crtc_state
*old_crtc_state
, *new_crtc_state
;
4508 struct drm_plane
*plane
;
4509 struct drm_plane_state
*old_plane_state
, *new_plane_state
;
4510 struct dm_crtc_state
*dm_new_crtc_state
, *dm_old_crtc_state
;
4511 struct dm_atomic_state
*dm_state
= to_dm_atomic_state(state
);
4512 struct dm_plane_state
*dm_new_plane_state
, *dm_old_plane_state
;
4514 /* TODO return page_flip_needed() function */
4515 bool pflip_needed
= !state
->allow_modeset
;
4521 /* Add new planes */
4522 for_each_oldnew_plane_in_state(state
, plane
, old_plane_state
, new_plane_state
, i
) {
4523 new_plane_crtc
= new_plane_state
->crtc
;
4524 old_plane_crtc
= old_plane_state
->crtc
;
4525 dm_new_plane_state
= to_dm_plane_state(new_plane_state
);
4526 dm_old_plane_state
= to_dm_plane_state(old_plane_state
);
4528 /*TODO Implement atomic check for cursor plane */
4529 if (plane
->type
== DRM_PLANE_TYPE_CURSOR
)
4532 /* Remove any changed/removed planes */
4535 if (!old_plane_crtc
)
4538 old_crtc_state
= drm_atomic_get_old_crtc_state(
4539 state
, old_plane_crtc
);
4540 dm_old_crtc_state
= to_dm_crtc_state(old_crtc_state
);
4542 if (!dm_old_crtc_state
->stream
)
4545 DRM_DEBUG_DRIVER("Disabling DRM plane: %d on DRM crtc %d\n",
4546 plane
->base
.id
, old_plane_crtc
->base
.id
);
4548 if (!dc_remove_plane_from_context(
4550 dm_old_crtc_state
->stream
,
4551 dm_old_plane_state
->dc_state
,
4552 dm_state
->context
)) {
4559 dc_plane_state_release(dm_old_plane_state
->dc_state
);
4560 dm_new_plane_state
->dc_state
= NULL
;
4562 *lock_and_validation_needed
= true;
4564 } else { /* Add new planes */
4566 if (drm_atomic_plane_disabling(plane
->state
, new_plane_state
))
4569 if (!new_plane_crtc
)
4572 new_crtc_state
= drm_atomic_get_new_crtc_state(state
, new_plane_crtc
);
4573 dm_new_crtc_state
= to_dm_crtc_state(new_crtc_state
);
4575 if (!dm_new_crtc_state
->stream
)
4579 WARN_ON(dm_new_plane_state
->dc_state
);
4581 dm_new_plane_state
->dc_state
= dc_create_plane_state(dc
);
4583 DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n",
4584 plane
->base
.id
, new_plane_crtc
->base
.id
);
4586 if (!dm_new_plane_state
->dc_state
) {
4591 ret
= fill_plane_attributes(
4592 new_plane_crtc
->dev
->dev_private
,
4593 dm_new_plane_state
->dc_state
,
4601 if (!dc_add_plane_to_context(
4603 dm_new_crtc_state
->stream
,
4604 dm_new_plane_state
->dc_state
,
4605 dm_state
->context
)) {
4611 *lock_and_validation_needed
= true;
4619 static int amdgpu_dm_atomic_check(struct drm_device
*dev
,
4620 struct drm_atomic_state
*state
)
4624 struct amdgpu_device
*adev
= dev
->dev_private
;
4625 struct dc
*dc
= adev
->dm
.dc
;
4626 struct dm_atomic_state
*dm_state
= to_dm_atomic_state(state
);
4627 struct drm_connector
*connector
;
4628 struct drm_connector_state
*old_con_state
, *new_con_state
;
4629 struct drm_crtc
*crtc
;
4630 struct drm_crtc_state
*old_crtc_state
, *new_crtc_state
;
4633 * This bool will be set for true for any modeset/reset
4634 * or plane update which implies non fast surface update.
4636 bool lock_and_validation_needed
= false;
4638 ret
= drm_atomic_helper_check_modeset(dev
, state
);
4640 DRM_ERROR("Atomic state validation failed with error :%d !\n", ret
);
4645 * legacy_cursor_update should be made false for SoC's having
4646 * a dedicated hardware plane for cursor in amdgpu_dm_atomic_commit(),
4647 * otherwise for software cursor plane,
4648 * we should not add it to list of affected planes.
4650 if (state
->legacy_cursor_update
) {
4651 for_each_new_crtc_in_state(state
, crtc
, new_crtc_state
, i
) {
4652 if (new_crtc_state
->color_mgmt_changed
) {
4653 ret
= drm_atomic_add_affected_planes(state
, crtc
);
4659 for_each_oldnew_crtc_in_state(state
, crtc
, old_crtc_state
, new_crtc_state
, i
) {
4660 if (!drm_atomic_crtc_needs_modeset(new_crtc_state
))
4663 if (!new_crtc_state
->enable
)
4666 ret
= drm_atomic_add_affected_connectors(state
, crtc
);
4670 ret
= drm_atomic_add_affected_planes(state
, crtc
);
4676 dm_state
->context
= dc_create_state();
4677 ASSERT(dm_state
->context
);
4678 dc_resource_state_copy_construct_current(dc
, dm_state
->context
);
4680 /* Remove exiting planes if they are modified */
4681 ret
= dm_update_planes_state(dc
, state
, false, &lock_and_validation_needed
);
4686 /* Disable all crtcs which require disable */
4687 ret
= dm_update_crtcs_state(dc
, state
, false, &lock_and_validation_needed
);
4692 /* Enable all crtcs which require enable */
4693 ret
= dm_update_crtcs_state(dc
, state
, true, &lock_and_validation_needed
);
4698 /* Add new/modified planes */
4699 ret
= dm_update_planes_state(dc
, state
, true, &lock_and_validation_needed
);
4704 /* Run this here since we want to validate the streams we created */
4705 ret
= drm_atomic_helper_check_planes(dev
, state
);
4709 /* Check scaling and underscan changes*/
4710 /*TODO Removed scaling changes validation due to inability to commit
4711 * new stream into context w\o causing full reset. Need to
4712 * decide how to handle.
4714 for_each_oldnew_connector_in_state(state
, connector
, old_con_state
, new_con_state
, i
) {
4715 struct dm_connector_state
*dm_old_con_state
= to_dm_connector_state(old_con_state
);
4716 struct dm_connector_state
*dm_new_con_state
= to_dm_connector_state(new_con_state
);
4717 struct amdgpu_crtc
*acrtc
= to_amdgpu_crtc(dm_new_con_state
->base
.crtc
);
4719 /* Skip any modesets/resets */
4720 if (!acrtc
|| drm_atomic_crtc_needs_modeset(
4721 drm_atomic_get_new_crtc_state(state
, &acrtc
->base
)))
4724 /* Skip any thing not scale or underscan changes */
4725 if (!is_scaling_state_different(dm_new_con_state
, dm_old_con_state
))
4728 lock_and_validation_needed
= true;
4732 * For full updates case when
4733 * removing/adding/updating streams on once CRTC while flipping
4735 * acquiring global lock will guarantee that any such full
4737 * will wait for completion of any outstanding flip using DRMs
4738 * synchronization events.
4741 if (lock_and_validation_needed
) {
4743 ret
= do_aquire_global_lock(dev
, state
);
4747 if (dc_validate_global_state(dc
, dm_state
->context
) != DC_OK
) {
4753 /* Must be success */
4758 if (ret
== -EDEADLK
)
4759 DRM_DEBUG_DRIVER("Atomic check stopped due to to deadlock.\n");
4760 else if (ret
== -EINTR
|| ret
== -EAGAIN
|| ret
== -ERESTARTSYS
)
4761 DRM_DEBUG_DRIVER("Atomic check stopped due to to signal.\n");
4763 DRM_ERROR("Atomic check failed with err: %d \n", ret
);
4768 static bool is_dp_capable_without_timing_msa(struct dc
*dc
,
4769 struct amdgpu_dm_connector
*amdgpu_dm_connector
)
4772 bool capable
= false;
4774 if (amdgpu_dm_connector
->dc_link
&&
4775 dm_helpers_dp_read_dpcd(
4777 amdgpu_dm_connector
->dc_link
,
4778 DP_DOWN_STREAM_PORT_COUNT
,
4780 sizeof(dpcd_data
))) {
4781 capable
= (dpcd_data
& DP_MSA_TIMING_PAR_IGNORED
) ? true:false;
4786 void amdgpu_dm_add_sink_to_freesync_module(struct drm_connector
*connector
,
4790 uint64_t val_capable
;
4791 bool edid_check_required
;
4792 struct detailed_timing
*timing
;
4793 struct detailed_non_pixel
*data
;
4794 struct detailed_data_monitor_range
*range
;
4795 struct amdgpu_dm_connector
*amdgpu_dm_connector
=
4796 to_amdgpu_dm_connector(connector
);
4798 struct drm_device
*dev
= connector
->dev
;
4799 struct amdgpu_device
*adev
= dev
->dev_private
;
4801 edid_check_required
= false;
4802 if (!amdgpu_dm_connector
->dc_sink
) {
4803 DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
4806 if (!adev
->dm
.freesync_module
)
4809 * if edid non zero restrict freesync only for dp and edp
4812 if (amdgpu_dm_connector
->dc_sink
->sink_signal
== SIGNAL_TYPE_DISPLAY_PORT
4813 || amdgpu_dm_connector
->dc_sink
->sink_signal
== SIGNAL_TYPE_EDP
) {
4814 edid_check_required
= is_dp_capable_without_timing_msa(
4816 amdgpu_dm_connector
);
4820 if (edid_check_required
== true && (edid
->version
> 1 ||
4821 (edid
->version
== 1 && edid
->revision
> 1))) {
4822 for (i
= 0; i
< 4; i
++) {
4824 timing
= &edid
->detailed_timings
[i
];
4825 data
= &timing
->data
.other_data
;
4826 range
= &data
->data
.range
;
4828 * Check if monitor has continuous frequency mode
4830 if (data
->type
!= EDID_DETAIL_MONITOR_RANGE
)
4833 * Check for flag range limits only. If flag == 1 then
4834 * no additional timing information provided.
4835 * Default GTF, GTF Secondary curve and CVT are not
4838 if (range
->flags
!= 1)
4841 amdgpu_dm_connector
->min_vfreq
= range
->min_vfreq
;
4842 amdgpu_dm_connector
->max_vfreq
= range
->max_vfreq
;
4843 amdgpu_dm_connector
->pixel_clock_mhz
=
4844 range
->pixel_clock_mhz
* 10;
4848 if (amdgpu_dm_connector
->max_vfreq
-
4849 amdgpu_dm_connector
->min_vfreq
> 10) {
4850 amdgpu_dm_connector
->caps
.supported
= true;
4851 amdgpu_dm_connector
->caps
.min_refresh_in_micro_hz
=
4852 amdgpu_dm_connector
->min_vfreq
* 1000000;
4853 amdgpu_dm_connector
->caps
.max_refresh_in_micro_hz
=
4854 amdgpu_dm_connector
->max_vfreq
* 1000000;
4860 * TODO figure out how to notify user-mode or DRM of freesync caps
4861 * once we figure out how to deal with freesync in an upstreamable
4867 void amdgpu_dm_remove_sink_from_freesync_module(struct drm_connector
*connector
)
4870 * TODO fill in once we figure out how to deal with freesync in
4871 * an upstreamable fashion