2 * Copyright 2015 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
26 #include "dm_services_types.h"
28 #include "dc/inc/core_types.h"
32 #include "amdgpu_display.h"
34 #include "amdgpu_dm.h"
35 #include "amdgpu_pm.h"
37 #include "amd_shared.h"
38 #include "amdgpu_dm_irq.h"
39 #include "dm_helpers.h"
40 #include "dm_services_types.h"
41 #include "amdgpu_dm_mst_types.h"
43 #include "ivsrcid/ivsrcid_vislands30.h"
45 #include <linux/module.h>
46 #include <linux/moduleparam.h>
47 #include <linux/version.h>
48 #include <linux/types.h>
51 #include <drm/drm_atomic.h>
52 #include <drm/drm_atomic_helper.h>
53 #include <drm/drm_dp_mst_helper.h>
54 #include <drm/drm_fb_helper.h>
55 #include <drm/drm_edid.h>
57 #include "modules/inc/mod_freesync.h"
59 #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
60 #include "ivsrcid/irqsrcs_dcn_1_0.h"
62 #include "raven1/DCN/dcn_1_0_offset.h"
63 #include "raven1/DCN/dcn_1_0_sh_mask.h"
64 #include "vega10/soc15ip.h"
66 #include "soc15_common.h"
69 #include "modules/inc/mod_freesync.h"
71 #include "i2caux_interface.h"
73 /* basic init/fini API */
74 static int amdgpu_dm_init(struct amdgpu_device
*adev
);
75 static void amdgpu_dm_fini(struct amdgpu_device
*adev
);
77 /* initializes drm_device display related structures, based on the information
78 * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
79 * drm_encoder, drm_mode_config
81 * Returns 0 on success
83 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device
*adev
);
84 /* removes and deallocates the drm structures, created by the above function */
85 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager
*dm
);
88 amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector
*aconnector
);
90 static int amdgpu_dm_plane_init(struct amdgpu_display_manager
*dm
,
91 struct amdgpu_plane
*aplane
,
92 unsigned long possible_crtcs
);
93 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager
*dm
,
94 struct drm_plane
*plane
,
96 static int amdgpu_dm_connector_init(struct amdgpu_display_manager
*dm
,
97 struct amdgpu_dm_connector
*amdgpu_dm_connector
,
99 struct amdgpu_encoder
*amdgpu_encoder
);
100 static int amdgpu_dm_encoder_init(struct drm_device
*dev
,
101 struct amdgpu_encoder
*aencoder
,
102 uint32_t link_index
);
104 static int amdgpu_dm_connector_get_modes(struct drm_connector
*connector
);
106 static int amdgpu_dm_atomic_commit(struct drm_device
*dev
,
107 struct drm_atomic_state
*state
,
110 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state
*state
);
112 static int amdgpu_dm_atomic_check(struct drm_device
*dev
,
113 struct drm_atomic_state
*state
);
118 static const enum drm_plane_type dm_plane_type_default
[AMDGPU_MAX_PLANES
] = {
119 DRM_PLANE_TYPE_PRIMARY
,
120 DRM_PLANE_TYPE_PRIMARY
,
121 DRM_PLANE_TYPE_PRIMARY
,
122 DRM_PLANE_TYPE_PRIMARY
,
123 DRM_PLANE_TYPE_PRIMARY
,
124 DRM_PLANE_TYPE_PRIMARY
,
127 static const enum drm_plane_type dm_plane_type_carizzo
[AMDGPU_MAX_PLANES
] = {
128 DRM_PLANE_TYPE_PRIMARY
,
129 DRM_PLANE_TYPE_PRIMARY
,
130 DRM_PLANE_TYPE_PRIMARY
,
131 DRM_PLANE_TYPE_OVERLAY
,/* YUV Capable Underlay */
134 static const enum drm_plane_type dm_plane_type_stoney
[AMDGPU_MAX_PLANES
] = {
135 DRM_PLANE_TYPE_PRIMARY
,
136 DRM_PLANE_TYPE_PRIMARY
,
137 DRM_PLANE_TYPE_OVERLAY
, /* YUV Capable Underlay */
141 * dm_vblank_get_counter
144 * Get counter for number of vertical blanks
147 * struct amdgpu_device *adev - [in] desired amdgpu device
148 * int disp_idx - [in] which CRTC to get the counter from
151 * Counter for vertical blanks
153 static u32
dm_vblank_get_counter(struct amdgpu_device
*adev
, int crtc
)
155 if (crtc
>= adev
->mode_info
.num_crtc
)
158 struct amdgpu_crtc
*acrtc
= adev
->mode_info
.crtcs
[crtc
];
159 struct dm_crtc_state
*acrtc_state
= to_dm_crtc_state(
163 if (acrtc_state
->stream
== NULL
) {
164 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
169 return dc_stream_get_vblank_counter(acrtc_state
->stream
);
173 static int dm_crtc_get_scanoutpos(struct amdgpu_device
*adev
, int crtc
,
174 u32
*vbl
, u32
*position
)
176 uint32_t v_blank_start
, v_blank_end
, h_position
, v_position
;
178 if ((crtc
< 0) || (crtc
>= adev
->mode_info
.num_crtc
))
181 struct amdgpu_crtc
*acrtc
= adev
->mode_info
.crtcs
[crtc
];
182 struct dm_crtc_state
*acrtc_state
= to_dm_crtc_state(
185 if (acrtc_state
->stream
== NULL
) {
186 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
192 * TODO rework base driver to use values directly.
193 * for now parse it back into reg-format
195 dc_stream_get_scanoutpos(acrtc_state
->stream
,
201 *position
= v_position
| (h_position
<< 16);
202 *vbl
= v_blank_start
| (v_blank_end
<< 16);
208 static bool dm_is_idle(void *handle
)
214 static int dm_wait_for_idle(void *handle
)
220 static bool dm_check_soft_reset(void *handle
)
225 static int dm_soft_reset(void *handle
)
231 static struct amdgpu_crtc
*
232 get_crtc_by_otg_inst(struct amdgpu_device
*adev
,
235 struct drm_device
*dev
= adev
->ddev
;
236 struct drm_crtc
*crtc
;
237 struct amdgpu_crtc
*amdgpu_crtc
;
240 * following if is check inherited from both functions where this one is
241 * used now. Need to be checked why it could happen.
243 if (otg_inst
== -1) {
245 return adev
->mode_info
.crtcs
[0];
248 list_for_each_entry(crtc
, &dev
->mode_config
.crtc_list
, head
) {
249 amdgpu_crtc
= to_amdgpu_crtc(crtc
);
251 if (amdgpu_crtc
->otg_inst
== otg_inst
)
258 static void dm_pflip_high_irq(void *interrupt_params
)
260 struct amdgpu_crtc
*amdgpu_crtc
;
261 struct common_irq_params
*irq_params
= interrupt_params
;
262 struct amdgpu_device
*adev
= irq_params
->adev
;
265 amdgpu_crtc
= get_crtc_by_otg_inst(adev
, irq_params
->irq_src
- IRQ_TYPE_PFLIP
);
267 /* IRQ could occur when in initial stage */
268 /*TODO work and BO cleanup */
269 if (amdgpu_crtc
== NULL
) {
270 DRM_DEBUG_DRIVER("CRTC is null, returning.\n");
274 spin_lock_irqsave(&adev
->ddev
->event_lock
, flags
);
276 if (amdgpu_crtc
->pflip_status
!= AMDGPU_FLIP_SUBMITTED
){
277 DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
278 amdgpu_crtc
->pflip_status
,
279 AMDGPU_FLIP_SUBMITTED
,
280 amdgpu_crtc
->crtc_id
,
282 spin_unlock_irqrestore(&adev
->ddev
->event_lock
, flags
);
287 /* wakeup usersapce */
288 if (amdgpu_crtc
->event
) {
289 /* Update to correct count/ts if racing with vblank irq */
290 drm_crtc_accurate_vblank_count(&amdgpu_crtc
->base
);
292 drm_crtc_send_vblank_event(&amdgpu_crtc
->base
, amdgpu_crtc
->event
);
294 /* page flip completed. clean up */
295 amdgpu_crtc
->event
= NULL
;
300 amdgpu_crtc
->pflip_status
= AMDGPU_FLIP_NONE
;
301 spin_unlock_irqrestore(&adev
->ddev
->event_lock
, flags
);
303 DRM_DEBUG_DRIVER("%s - crtc :%d[%p], pflip_stat:AMDGPU_FLIP_NONE\n",
304 __func__
, amdgpu_crtc
->crtc_id
, amdgpu_crtc
);
306 drm_crtc_vblank_put(&amdgpu_crtc
->base
);
309 static void dm_crtc_high_irq(void *interrupt_params
)
311 struct common_irq_params
*irq_params
= interrupt_params
;
312 struct amdgpu_device
*adev
= irq_params
->adev
;
313 uint8_t crtc_index
= 0;
314 struct amdgpu_crtc
*acrtc
;
316 acrtc
= get_crtc_by_otg_inst(adev
, irq_params
->irq_src
- IRQ_TYPE_VBLANK
);
319 crtc_index
= acrtc
->crtc_id
;
321 drm_handle_vblank(adev
->ddev
, crtc_index
);
324 static int dm_set_clockgating_state(void *handle
,
325 enum amd_clockgating_state state
)
330 static int dm_set_powergating_state(void *handle
,
331 enum amd_powergating_state state
)
336 /* Prototypes of private functions */
337 static int dm_early_init(void* handle
);
339 static void hotplug_notify_work_func(struct work_struct
*work
)
341 struct amdgpu_display_manager
*dm
= container_of(work
, struct amdgpu_display_manager
, mst_hotplug_work
);
342 struct drm_device
*dev
= dm
->ddev
;
344 drm_kms_helper_hotplug_event(dev
);
348 #include "dal_asic_id.h"
349 /* Allocate memory for FBC compressed data */
350 /* TODO: Dynamic allocation */
351 #define AMDGPU_FBC_SIZE (3840 * 2160 * 4)
353 static void amdgpu_dm_initialize_fbc(struct amdgpu_device
*adev
)
356 struct dm_comressor_info
*compressor
= &adev
->dm
.compressor
;
358 if (!compressor
->bo_ptr
) {
359 r
= amdgpu_bo_create_kernel(adev
, AMDGPU_FBC_SIZE
, PAGE_SIZE
,
360 AMDGPU_GEM_DOMAIN_VRAM
, &compressor
->bo_ptr
,
361 &compressor
->gpu_addr
, &compressor
->cpu_addr
);
364 DRM_ERROR("DM: Failed to initialize fbc\n");
373 * Returns 0 on success
375 static int amdgpu_dm_init(struct amdgpu_device
*adev
)
377 struct dc_init_data init_data
;
378 adev
->dm
.ddev
= adev
->ddev
;
379 adev
->dm
.adev
= adev
;
381 /* Zero all the fields */
382 memset(&init_data
, 0, sizeof(init_data
));
384 /* initialize DAL's lock (for SYNC context use) */
385 spin_lock_init(&adev
->dm
.dal_lock
);
387 /* initialize DAL's mutex */
388 mutex_init(&adev
->dm
.dal_mutex
);
390 if(amdgpu_dm_irq_init(adev
)) {
391 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
395 init_data
.asic_id
.chip_family
= adev
->family
;
397 init_data
.asic_id
.pci_revision_id
= adev
->rev_id
;
398 init_data
.asic_id
.hw_internal_rev
= adev
->external_rev_id
;
400 init_data
.asic_id
.vram_width
= adev
->mc
.vram_width
;
401 /* TODO: initialize init_data.asic_id.vram_type here!!!! */
402 init_data
.asic_id
.atombios_base_address
=
403 adev
->mode_info
.atom_context
->bios
;
405 init_data
.driver
= adev
;
407 adev
->dm
.cgs_device
= amdgpu_cgs_create_device(adev
);
409 if (!adev
->dm
.cgs_device
) {
410 DRM_ERROR("amdgpu: failed to create cgs device.\n");
414 init_data
.cgs_device
= adev
->dm
.cgs_device
;
418 init_data
.dce_environment
= DCE_ENV_PRODUCTION_DRV
;
421 init_data
.log_mask
= DC_DEFAULT_LOG_MASK
;
423 init_data
.log_mask
= DC_MIN_LOG_MASK
;
426 if (adev
->family
== FAMILY_CZ
)
427 amdgpu_dm_initialize_fbc(adev
);
428 init_data
.fbc_gpu_addr
= adev
->dm
.compressor
.gpu_addr
;
430 /* Display Core create. */
431 adev
->dm
.dc
= dc_create(&init_data
);
434 DRM_INFO("Display Core initialized!\n");
436 DRM_INFO("Display Core failed to initialize!\n");
438 INIT_WORK(&adev
->dm
.mst_hotplug_work
, hotplug_notify_work_func
);
440 adev
->dm
.freesync_module
= mod_freesync_create(adev
->dm
.dc
);
441 if (!adev
->dm
.freesync_module
) {
443 "amdgpu: failed to initialize freesync_module.\n");
445 DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
446 adev
->dm
.freesync_module
);
448 if (amdgpu_dm_initialize_drm_device(adev
)) {
450 "amdgpu: failed to initialize sw for display support.\n");
454 /* Update the actual used number of crtc */
455 adev
->mode_info
.num_crtc
= adev
->dm
.display_indexes_num
;
457 /* TODO: Add_display_info? */
459 /* TODO use dynamic cursor width */
460 adev
->ddev
->mode_config
.cursor_width
= adev
->dm
.dc
->caps
.max_cursor_size
;
461 adev
->ddev
->mode_config
.cursor_height
= adev
->dm
.dc
->caps
.max_cursor_size
;
463 if (drm_vblank_init(adev
->ddev
, adev
->dm
.display_indexes_num
)) {
465 "amdgpu: failed to initialize sw for display support.\n");
469 DRM_DEBUG_DRIVER("KMS initialized.\n");
473 amdgpu_dm_fini(adev
);
478 static void amdgpu_dm_fini(struct amdgpu_device
*adev
)
480 amdgpu_dm_destroy_drm_device(&adev
->dm
);
482 * TODO: pageflip, vlank interrupt
484 * amdgpu_dm_irq_fini(adev);
487 if (adev
->dm
.cgs_device
) {
488 amdgpu_cgs_destroy_device(adev
->dm
.cgs_device
);
489 adev
->dm
.cgs_device
= NULL
;
491 if (adev
->dm
.freesync_module
) {
492 mod_freesync_destroy(adev
->dm
.freesync_module
);
493 adev
->dm
.freesync_module
= NULL
;
495 /* DC Destroy TODO: Replace destroy DAL */
497 dc_destroy(&adev
->dm
.dc
);
501 static int dm_sw_init(void *handle
)
506 static int dm_sw_fini(void *handle
)
511 static int detect_mst_link_for_all_connectors(struct drm_device
*dev
)
513 struct amdgpu_dm_connector
*aconnector
;
514 struct drm_connector
*connector
;
517 drm_modeset_lock(&dev
->mode_config
.connection_mutex
, NULL
);
519 list_for_each_entry(connector
, &dev
->mode_config
.connector_list
, head
) {
520 aconnector
= to_amdgpu_dm_connector(connector
);
521 if (aconnector
->dc_link
->type
== dc_connection_mst_branch
) {
522 DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
523 aconnector
, aconnector
->base
.base
.id
);
525 ret
= drm_dp_mst_topology_mgr_set_mst(&aconnector
->mst_mgr
, true);
527 DRM_ERROR("DM_MST: Failed to start MST\n");
528 ((struct dc_link
*)aconnector
->dc_link
)->type
= dc_connection_single
;
534 drm_modeset_unlock(&dev
->mode_config
.connection_mutex
);
538 static int dm_late_init(void *handle
)
540 struct drm_device
*dev
= ((struct amdgpu_device
*)handle
)->ddev
;
542 return detect_mst_link_for_all_connectors(dev
);
545 static void s3_handle_mst(struct drm_device
*dev
, bool suspend
)
547 struct amdgpu_dm_connector
*aconnector
;
548 struct drm_connector
*connector
;
550 drm_modeset_lock(&dev
->mode_config
.connection_mutex
, NULL
);
552 list_for_each_entry(connector
, &dev
->mode_config
.connector_list
, head
) {
553 aconnector
= to_amdgpu_dm_connector(connector
);
554 if (aconnector
->dc_link
->type
== dc_connection_mst_branch
&&
555 !aconnector
->mst_port
) {
558 drm_dp_mst_topology_mgr_suspend(&aconnector
->mst_mgr
);
560 drm_dp_mst_topology_mgr_resume(&aconnector
->mst_mgr
);
564 drm_modeset_unlock(&dev
->mode_config
.connection_mutex
);
567 static int dm_hw_init(void *handle
)
569 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
570 /* Create DAL display manager */
571 amdgpu_dm_init(adev
);
572 amdgpu_dm_hpd_init(adev
);
577 static int dm_hw_fini(void *handle
)
579 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
581 amdgpu_dm_hpd_fini(adev
);
583 amdgpu_dm_irq_fini(adev
);
584 amdgpu_dm_fini(adev
);
588 static int dm_suspend(void *handle
)
590 struct amdgpu_device
*adev
= handle
;
591 struct amdgpu_display_manager
*dm
= &adev
->dm
;
594 s3_handle_mst(adev
->ddev
, true);
596 amdgpu_dm_irq_suspend(adev
);
598 WARN_ON(adev
->dm
.cached_state
);
599 adev
->dm
.cached_state
= drm_atomic_helper_suspend(adev
->ddev
);
601 dc_set_power_state(dm
->dc
, DC_ACPI_CM_POWER_STATE_D3
);
606 static struct amdgpu_dm_connector
*
607 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state
*state
,
608 struct drm_crtc
*crtc
)
611 struct drm_connector_state
*new_con_state
;
612 struct drm_connector
*connector
;
613 struct drm_crtc
*crtc_from_state
;
615 for_each_new_connector_in_state(state
, connector
, new_con_state
, i
) {
616 crtc_from_state
= new_con_state
->crtc
;
618 if (crtc_from_state
== crtc
)
619 return to_amdgpu_dm_connector(connector
);
625 static int dm_resume(void *handle
)
627 struct amdgpu_device
*adev
= handle
;
628 struct amdgpu_display_manager
*dm
= &adev
->dm
;
630 /* power on hardware */
631 dc_set_power_state(dm
->dc
, DC_ACPI_CM_POWER_STATE_D0
);
636 int amdgpu_dm_display_resume(struct amdgpu_device
*adev
)
638 struct drm_device
*ddev
= adev
->ddev
;
639 struct amdgpu_display_manager
*dm
= &adev
->dm
;
640 struct amdgpu_dm_connector
*aconnector
;
641 struct drm_connector
*connector
;
642 struct drm_crtc
*crtc
;
643 struct drm_crtc_state
*new_crtc_state
;
647 /* program HPD filter */
650 /* On resume we need to rewrite the MSTM control bits to enamble MST*/
651 s3_handle_mst(ddev
, false);
654 * early enable HPD Rx IRQ, should be done before set mode as short
655 * pulse interrupts are used for MST
657 amdgpu_dm_irq_resume_early(adev
);
660 list_for_each_entry(connector
,
661 &ddev
->mode_config
.connector_list
, head
) {
662 aconnector
= to_amdgpu_dm_connector(connector
);
665 * this is the case when traversing through already created
666 * MST connectors, should be skipped
668 if (aconnector
->mst_port
)
671 mutex_lock(&aconnector
->hpd_lock
);
672 dc_link_detect(aconnector
->dc_link
, DETECT_REASON_HPD
);
673 aconnector
->dc_sink
= NULL
;
674 amdgpu_dm_update_connector_after_detect(aconnector
);
675 mutex_unlock(&aconnector
->hpd_lock
);
678 /* Force mode set in atomic comit */
679 for_each_new_crtc_in_state(adev
->dm
.cached_state
, crtc
, new_crtc_state
, i
)
680 new_crtc_state
->active_changed
= true;
682 ret
= drm_atomic_helper_resume(ddev
, adev
->dm
.cached_state
);
684 drm_atomic_state_put(adev
->dm
.cached_state
);
685 adev
->dm
.cached_state
= NULL
;
687 amdgpu_dm_irq_resume_late(adev
);
692 static const struct amd_ip_funcs amdgpu_dm_funcs
= {
694 .early_init
= dm_early_init
,
695 .late_init
= dm_late_init
,
696 .sw_init
= dm_sw_init
,
697 .sw_fini
= dm_sw_fini
,
698 .hw_init
= dm_hw_init
,
699 .hw_fini
= dm_hw_fini
,
700 .suspend
= dm_suspend
,
702 .is_idle
= dm_is_idle
,
703 .wait_for_idle
= dm_wait_for_idle
,
704 .check_soft_reset
= dm_check_soft_reset
,
705 .soft_reset
= dm_soft_reset
,
706 .set_clockgating_state
= dm_set_clockgating_state
,
707 .set_powergating_state
= dm_set_powergating_state
,
710 const struct amdgpu_ip_block_version dm_ip_block
=
712 .type
= AMD_IP_BLOCK_TYPE_DCE
,
716 .funcs
= &amdgpu_dm_funcs
,
720 static struct drm_atomic_state
*
721 dm_atomic_state_alloc(struct drm_device
*dev
)
723 struct dm_atomic_state
*state
= kzalloc(sizeof(*state
), GFP_KERNEL
);
728 if (drm_atomic_state_init(dev
, &state
->base
) < 0)
739 dm_atomic_state_clear(struct drm_atomic_state
*state
)
741 struct dm_atomic_state
*dm_state
= to_dm_atomic_state(state
);
743 if (dm_state
->context
) {
744 dc_release_state(dm_state
->context
);
745 dm_state
->context
= NULL
;
748 drm_atomic_state_default_clear(state
);
752 dm_atomic_state_alloc_free(struct drm_atomic_state
*state
)
754 struct dm_atomic_state
*dm_state
= to_dm_atomic_state(state
);
755 drm_atomic_state_default_release(state
);
759 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs
= {
760 .fb_create
= amdgpu_user_framebuffer_create
,
761 .output_poll_changed
= amdgpu_output_poll_changed
,
762 .atomic_check
= amdgpu_dm_atomic_check
,
763 .atomic_commit
= amdgpu_dm_atomic_commit
,
764 .atomic_state_alloc
= dm_atomic_state_alloc
,
765 .atomic_state_clear
= dm_atomic_state_clear
,
766 .atomic_state_free
= dm_atomic_state_alloc_free
769 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs
= {
770 .atomic_commit_tail
= amdgpu_dm_atomic_commit_tail
774 amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector
*aconnector
)
776 struct drm_connector
*connector
= &aconnector
->base
;
777 struct drm_device
*dev
= connector
->dev
;
778 struct dc_sink
*sink
;
780 /* MST handled by drm_mst framework */
781 if (aconnector
->mst_mgr
.mst_state
== true)
785 sink
= aconnector
->dc_link
->local_sink
;
787 /* Edid mgmt connector gets first update only in mode_valid hook and then
788 * the connector sink is set to either fake or physical sink depends on link status.
789 * don't do it here if u are during boot
791 if (aconnector
->base
.force
!= DRM_FORCE_UNSPECIFIED
792 && aconnector
->dc_em_sink
) {
794 /* For S3 resume with headless use eml_sink to fake stream
795 * because on resume connecotr->sink is set ti NULL
797 mutex_lock(&dev
->mode_config
.mutex
);
800 if (aconnector
->dc_sink
) {
801 amdgpu_dm_remove_sink_from_freesync_module(
803 /* retain and release bellow are used for
804 * bump up refcount for sink because the link don't point
805 * to it anymore after disconnect so on next crtc to connector
806 * reshuffle by UMD we will get into unwanted dc_sink release
808 if (aconnector
->dc_sink
!= aconnector
->dc_em_sink
)
809 dc_sink_release(aconnector
->dc_sink
);
811 aconnector
->dc_sink
= sink
;
812 amdgpu_dm_add_sink_to_freesync_module(
813 connector
, aconnector
->edid
);
815 amdgpu_dm_remove_sink_from_freesync_module(connector
);
816 if (!aconnector
->dc_sink
)
817 aconnector
->dc_sink
= aconnector
->dc_em_sink
;
818 else if (aconnector
->dc_sink
!= aconnector
->dc_em_sink
)
819 dc_sink_retain(aconnector
->dc_sink
);
822 mutex_unlock(&dev
->mode_config
.mutex
);
827 * TODO: temporary guard to look for proper fix
828 * if this sink is MST sink, we should not do anything
830 if (sink
&& sink
->sink_signal
== SIGNAL_TYPE_DISPLAY_PORT_MST
)
833 if (aconnector
->dc_sink
== sink
) {
834 /* We got a DP short pulse (Link Loss, DP CTS, etc...).
836 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
837 aconnector
->connector_id
);
841 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
842 aconnector
->connector_id
, aconnector
->dc_sink
, sink
);
844 mutex_lock(&dev
->mode_config
.mutex
);
846 /* 1. Update status of the drm connector
847 * 2. Send an event and let userspace tell us what to do */
849 /* TODO: check if we still need the S3 mode update workaround.
850 * If yes, put it here. */
851 if (aconnector
->dc_sink
)
852 amdgpu_dm_remove_sink_from_freesync_module(
855 aconnector
->dc_sink
= sink
;
856 if (sink
->dc_edid
.length
== 0) {
857 aconnector
->edid
= NULL
;
860 (struct edid
*) sink
->dc_edid
.raw_edid
;
863 drm_mode_connector_update_edid_property(connector
,
866 amdgpu_dm_add_sink_to_freesync_module(connector
, aconnector
->edid
);
869 amdgpu_dm_remove_sink_from_freesync_module(connector
);
870 drm_mode_connector_update_edid_property(connector
, NULL
);
871 aconnector
->num_modes
= 0;
872 aconnector
->dc_sink
= NULL
;
875 mutex_unlock(&dev
->mode_config
.mutex
);
878 static void handle_hpd_irq(void *param
)
880 struct amdgpu_dm_connector
*aconnector
= (struct amdgpu_dm_connector
*)param
;
881 struct drm_connector
*connector
= &aconnector
->base
;
882 struct drm_device
*dev
= connector
->dev
;
884 /* In case of failure or MST no need to update connector status or notify the OS
885 * since (for MST case) MST does this in it's own context.
887 mutex_lock(&aconnector
->hpd_lock
);
889 if (aconnector
->fake_enable
)
890 aconnector
->fake_enable
= false;
892 if (dc_link_detect(aconnector
->dc_link
, DETECT_REASON_HPD
)) {
893 amdgpu_dm_update_connector_after_detect(aconnector
);
896 drm_modeset_lock_all(dev
);
897 dm_restore_drm_connector_state(dev
, connector
);
898 drm_modeset_unlock_all(dev
);
900 if (aconnector
->base
.force
== DRM_FORCE_UNSPECIFIED
)
901 drm_kms_helper_hotplug_event(dev
);
903 mutex_unlock(&aconnector
->hpd_lock
);
907 static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector
*aconnector
)
909 uint8_t esi
[DP_PSR_ERROR_STATUS
- DP_SINK_COUNT_ESI
] = { 0 };
911 bool new_irq_handled
= false;
913 int dpcd_bytes_to_read
;
915 const int max_process_count
= 30;
916 int process_count
= 0;
918 const struct dc_link_status
*link_status
= dc_link_get_status(aconnector
->dc_link
);
920 if (link_status
->dpcd_caps
->dpcd_rev
.raw
< 0x12) {
921 dpcd_bytes_to_read
= DP_LANE0_1_STATUS
- DP_SINK_COUNT
;
922 /* DPCD 0x200 - 0x201 for downstream IRQ */
923 dpcd_addr
= DP_SINK_COUNT
;
925 dpcd_bytes_to_read
= DP_PSR_ERROR_STATUS
- DP_SINK_COUNT_ESI
;
926 /* DPCD 0x2002 - 0x2005 for downstream IRQ */
927 dpcd_addr
= DP_SINK_COUNT_ESI
;
930 dret
= drm_dp_dpcd_read(
931 &aconnector
->dm_dp_aux
.aux
,
936 while (dret
== dpcd_bytes_to_read
&&
937 process_count
< max_process_count
) {
943 DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi
[0], esi
[1], esi
[2]);
944 /* handle HPD short pulse irq */
945 if (aconnector
->mst_mgr
.mst_state
)
947 &aconnector
->mst_mgr
,
951 if (new_irq_handled
) {
952 /* ACK at DPCD to notify down stream */
953 const int ack_dpcd_bytes_to_write
=
954 dpcd_bytes_to_read
- 1;
956 for (retry
= 0; retry
< 3; retry
++) {
959 wret
= drm_dp_dpcd_write(
960 &aconnector
->dm_dp_aux
.aux
,
963 ack_dpcd_bytes_to_write
);
964 if (wret
== ack_dpcd_bytes_to_write
)
968 /* check if there is new irq to be handle */
969 dret
= drm_dp_dpcd_read(
970 &aconnector
->dm_dp_aux
.aux
,
975 new_irq_handled
= false;
981 if (process_count
== max_process_count
)
982 DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
985 static void handle_hpd_rx_irq(void *param
)
987 struct amdgpu_dm_connector
*aconnector
= (struct amdgpu_dm_connector
*)param
;
988 struct drm_connector
*connector
= &aconnector
->base
;
989 struct drm_device
*dev
= connector
->dev
;
990 struct dc_link
*dc_link
= aconnector
->dc_link
;
991 bool is_mst_root_connector
= aconnector
->mst_mgr
.mst_state
;
993 /* TODO:Temporary add mutex to protect hpd interrupt not have a gpio
994 * conflict, after implement i2c helper, this mutex should be
997 if (dc_link
->type
!= dc_connection_mst_branch
)
998 mutex_lock(&aconnector
->hpd_lock
);
1000 if (dc_link_handle_hpd_rx_irq(dc_link
, NULL
) &&
1001 !is_mst_root_connector
) {
1002 /* Downstream Port status changed. */
1003 if (dc_link_detect(dc_link
, DETECT_REASON_HPDRX
)) {
1004 amdgpu_dm_update_connector_after_detect(aconnector
);
1007 drm_modeset_lock_all(dev
);
1008 dm_restore_drm_connector_state(dev
, connector
);
1009 drm_modeset_unlock_all(dev
);
1011 drm_kms_helper_hotplug_event(dev
);
1014 if ((dc_link
->cur_link_settings
.lane_count
!= LANE_COUNT_UNKNOWN
) ||
1015 (dc_link
->type
== dc_connection_mst_branch
))
1016 dm_handle_hpd_rx_irq(aconnector
);
1018 if (dc_link
->type
!= dc_connection_mst_branch
)
1019 mutex_unlock(&aconnector
->hpd_lock
);
1022 static void register_hpd_handlers(struct amdgpu_device
*adev
)
1024 struct drm_device
*dev
= adev
->ddev
;
1025 struct drm_connector
*connector
;
1026 struct amdgpu_dm_connector
*aconnector
;
1027 const struct dc_link
*dc_link
;
1028 struct dc_interrupt_params int_params
= {0};
1030 int_params
.requested_polarity
= INTERRUPT_POLARITY_DEFAULT
;
1031 int_params
.current_polarity
= INTERRUPT_POLARITY_DEFAULT
;
1033 list_for_each_entry(connector
,
1034 &dev
->mode_config
.connector_list
, head
) {
1036 aconnector
= to_amdgpu_dm_connector(connector
);
1037 dc_link
= aconnector
->dc_link
;
1039 if (DC_IRQ_SOURCE_INVALID
!= dc_link
->irq_source_hpd
) {
1040 int_params
.int_context
= INTERRUPT_LOW_IRQ_CONTEXT
;
1041 int_params
.irq_source
= dc_link
->irq_source_hpd
;
1043 amdgpu_dm_irq_register_interrupt(adev
, &int_params
,
1045 (void *) aconnector
);
1048 if (DC_IRQ_SOURCE_INVALID
!= dc_link
->irq_source_hpd_rx
) {
1050 /* Also register for DP short pulse (hpd_rx). */
1051 int_params
.int_context
= INTERRUPT_LOW_IRQ_CONTEXT
;
1052 int_params
.irq_source
= dc_link
->irq_source_hpd_rx
;
1054 amdgpu_dm_irq_register_interrupt(adev
, &int_params
,
1056 (void *) aconnector
);
1061 /* Register IRQ sources and initialize IRQ callbacks */
1062 static int dce110_register_irq_handlers(struct amdgpu_device
*adev
)
1064 struct dc
*dc
= adev
->dm
.dc
;
1065 struct common_irq_params
*c_irq_params
;
1066 struct dc_interrupt_params int_params
= {0};
1069 unsigned client_id
= AMDGPU_IH_CLIENTID_LEGACY
;
1071 if (adev
->asic_type
== CHIP_VEGA10
||
1072 adev
->asic_type
== CHIP_RAVEN
)
1073 client_id
= AMDGPU_IH_CLIENTID_DCE
;
1075 int_params
.requested_polarity
= INTERRUPT_POLARITY_DEFAULT
;
1076 int_params
.current_polarity
= INTERRUPT_POLARITY_DEFAULT
;
1078 /* Actions of amdgpu_irq_add_id():
1079 * 1. Register a set() function with base driver.
1080 * Base driver will call set() function to enable/disable an
1081 * interrupt in DC hardware.
1082 * 2. Register amdgpu_dm_irq_handler().
1083 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
1084 * coming from DC hardware.
1085 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
1086 * for acknowledging and handling. */
1088 /* Use VBLANK interrupt */
1089 for (i
= VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0
; i
<= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0
; i
++) {
1090 r
= amdgpu_irq_add_id(adev
, client_id
, i
, &adev
->crtc_irq
);
1092 DRM_ERROR("Failed to add crtc irq id!\n");
1096 int_params
.int_context
= INTERRUPT_HIGH_IRQ_CONTEXT
;
1097 int_params
.irq_source
=
1098 dc_interrupt_to_irq_source(dc
, i
, 0);
1100 c_irq_params
= &adev
->dm
.vblank_params
[int_params
.irq_source
- DC_IRQ_SOURCE_VBLANK1
];
1102 c_irq_params
->adev
= adev
;
1103 c_irq_params
->irq_src
= int_params
.irq_source
;
1105 amdgpu_dm_irq_register_interrupt(adev
, &int_params
,
1106 dm_crtc_high_irq
, c_irq_params
);
1109 /* Use GRPH_PFLIP interrupt */
1110 for (i
= VISLANDS30_IV_SRCID_D1_GRPH_PFLIP
;
1111 i
<= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP
; i
+= 2) {
1112 r
= amdgpu_irq_add_id(adev
, client_id
, i
, &adev
->pageflip_irq
);
1114 DRM_ERROR("Failed to add page flip irq id!\n");
1118 int_params
.int_context
= INTERRUPT_HIGH_IRQ_CONTEXT
;
1119 int_params
.irq_source
=
1120 dc_interrupt_to_irq_source(dc
, i
, 0);
1122 c_irq_params
= &adev
->dm
.pflip_params
[int_params
.irq_source
- DC_IRQ_SOURCE_PFLIP_FIRST
];
1124 c_irq_params
->adev
= adev
;
1125 c_irq_params
->irq_src
= int_params
.irq_source
;
1127 amdgpu_dm_irq_register_interrupt(adev
, &int_params
,
1128 dm_pflip_high_irq
, c_irq_params
);
1133 r
= amdgpu_irq_add_id(adev
, client_id
,
1134 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A
, &adev
->hpd_irq
);
1136 DRM_ERROR("Failed to add hpd irq id!\n");
1140 register_hpd_handlers(adev
);
1145 #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
1146 /* Register IRQ sources and initialize IRQ callbacks */
1147 static int dcn10_register_irq_handlers(struct amdgpu_device
*adev
)
1149 struct dc
*dc
= adev
->dm
.dc
;
1150 struct common_irq_params
*c_irq_params
;
1151 struct dc_interrupt_params int_params
= {0};
1155 int_params
.requested_polarity
= INTERRUPT_POLARITY_DEFAULT
;
1156 int_params
.current_polarity
= INTERRUPT_POLARITY_DEFAULT
;
1158 /* Actions of amdgpu_irq_add_id():
1159 * 1. Register a set() function with base driver.
1160 * Base driver will call set() function to enable/disable an
1161 * interrupt in DC hardware.
1162 * 2. Register amdgpu_dm_irq_handler().
1163 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
1164 * coming from DC hardware.
1165 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
1166 * for acknowledging and handling.
1169 /* Use VSTARTUP interrupt */
1170 for (i
= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP
;
1171 i
<= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP
+ adev
->mode_info
.num_crtc
- 1;
1173 r
= amdgpu_irq_add_id(adev
, AMDGPU_IH_CLIENTID_DCE
, i
, &adev
->crtc_irq
);
1176 DRM_ERROR("Failed to add crtc irq id!\n");
1180 int_params
.int_context
= INTERRUPT_HIGH_IRQ_CONTEXT
;
1181 int_params
.irq_source
=
1182 dc_interrupt_to_irq_source(dc
, i
, 0);
1184 c_irq_params
= &adev
->dm
.vblank_params
[int_params
.irq_source
- DC_IRQ_SOURCE_VBLANK1
];
1186 c_irq_params
->adev
= adev
;
1187 c_irq_params
->irq_src
= int_params
.irq_source
;
1189 amdgpu_dm_irq_register_interrupt(adev
, &int_params
,
1190 dm_crtc_high_irq
, c_irq_params
);
1193 /* Use GRPH_PFLIP interrupt */
1194 for (i
= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT
;
1195 i
<= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT
+ adev
->mode_info
.num_crtc
- 1;
1197 r
= amdgpu_irq_add_id(adev
, AMDGPU_IH_CLIENTID_DCE
, i
, &adev
->pageflip_irq
);
1199 DRM_ERROR("Failed to add page flip irq id!\n");
1203 int_params
.int_context
= INTERRUPT_HIGH_IRQ_CONTEXT
;
1204 int_params
.irq_source
=
1205 dc_interrupt_to_irq_source(dc
, i
, 0);
1207 c_irq_params
= &adev
->dm
.pflip_params
[int_params
.irq_source
- DC_IRQ_SOURCE_PFLIP_FIRST
];
1209 c_irq_params
->adev
= adev
;
1210 c_irq_params
->irq_src
= int_params
.irq_source
;
1212 amdgpu_dm_irq_register_interrupt(adev
, &int_params
,
1213 dm_pflip_high_irq
, c_irq_params
);
1218 r
= amdgpu_irq_add_id(adev
, AMDGPU_IH_CLIENTID_DCE
, DCN_1_0__SRCID__DC_HPD1_INT
,
1221 DRM_ERROR("Failed to add hpd irq id!\n");
1225 register_hpd_handlers(adev
);
1231 static int amdgpu_dm_mode_config_init(struct amdgpu_device
*adev
)
1235 adev
->mode_info
.mode_config_initialized
= true;
1237 adev
->ddev
->mode_config
.funcs
= (void *)&amdgpu_dm_mode_funcs
;
1238 adev
->ddev
->mode_config
.helper_private
= &amdgpu_dm_mode_config_helperfuncs
;
1240 adev
->ddev
->mode_config
.max_width
= 16384;
1241 adev
->ddev
->mode_config
.max_height
= 16384;
1243 adev
->ddev
->mode_config
.preferred_depth
= 24;
1244 adev
->ddev
->mode_config
.prefer_shadow
= 1;
1245 /* indicate support of immediate flip */
1246 adev
->ddev
->mode_config
.async_page_flip
= true;
1248 adev
->ddev
->mode_config
.fb_base
= adev
->mc
.aper_base
;
1250 r
= amdgpu_modeset_create_props(adev
);
1257 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
1258 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
1260 static int amdgpu_dm_backlight_update_status(struct backlight_device
*bd
)
1262 struct amdgpu_display_manager
*dm
= bl_get_data(bd
);
1264 if (dc_link_set_backlight_level(dm
->backlight_link
,
1265 bd
->props
.brightness
, 0, 0))
1271 static int amdgpu_dm_backlight_get_brightness(struct backlight_device
*bd
)
1273 return bd
->props
.brightness
;
1276 static const struct backlight_ops amdgpu_dm_backlight_ops
= {
1277 .get_brightness
= amdgpu_dm_backlight_get_brightness
,
1278 .update_status
= amdgpu_dm_backlight_update_status
,
1282 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager
*dm
)
1285 struct backlight_properties props
= { 0 };
1287 props
.max_brightness
= AMDGPU_MAX_BL_LEVEL
;
1288 props
.type
= BACKLIGHT_RAW
;
1290 snprintf(bl_name
, sizeof(bl_name
), "amdgpu_bl%d",
1291 dm
->adev
->ddev
->primary
->index
);
1293 dm
->backlight_dev
= backlight_device_register(bl_name
,
1294 dm
->adev
->ddev
->dev
,
1296 &amdgpu_dm_backlight_ops
,
1299 if (NULL
== dm
->backlight_dev
)
1300 DRM_ERROR("DM: Backlight registration failed!\n");
1302 DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name
);
1307 /* In this architecture, the association
1308 * connector -> encoder -> crtc
1309 * id not really requried. The crtc and connector will hold the
1310 * display_index as an abstraction to use with DAL component
1312 * Returns 0 on success
1314 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device
*adev
)
1316 struct amdgpu_display_manager
*dm
= &adev
->dm
;
1318 struct amdgpu_dm_connector
*aconnector
= NULL
;
1319 struct amdgpu_encoder
*aencoder
= NULL
;
1320 struct amdgpu_mode_info
*mode_info
= &adev
->mode_info
;
1322 unsigned long possible_crtcs
;
1324 link_cnt
= dm
->dc
->caps
.max_links
;
1325 if (amdgpu_dm_mode_config_init(dm
->adev
)) {
1326 DRM_ERROR("DM: Failed to initialize mode config\n");
1330 for (i
= 0; i
< dm
->dc
->caps
.max_planes
; i
++) {
1331 struct amdgpu_plane
*plane
;
1333 plane
= kzalloc(sizeof(struct amdgpu_plane
), GFP_KERNEL
);
1334 mode_info
->planes
[i
] = plane
;
1337 DRM_ERROR("KMS: Failed to allocate plane\n");
1338 goto fail_free_planes
;
1340 plane
->base
.type
= mode_info
->plane_type
[i
];
1343 * HACK: IGT tests expect that each plane can only have one
1344 * one possible CRTC. For now, set one CRTC for each
1345 * plane that is not an underlay, but still allow multiple
1346 * CRTCs for underlay planes.
1348 possible_crtcs
= 1 << i
;
1349 if (i
>= dm
->dc
->caps
.max_streams
)
1350 possible_crtcs
= 0xff;
1352 if (amdgpu_dm_plane_init(dm
, mode_info
->planes
[i
], possible_crtcs
)) {
1353 DRM_ERROR("KMS: Failed to initialize plane\n");
1354 goto fail_free_planes
;
1358 for (i
= 0; i
< dm
->dc
->caps
.max_streams
; i
++)
1359 if (amdgpu_dm_crtc_init(dm
, &mode_info
->planes
[i
]->base
, i
)) {
1360 DRM_ERROR("KMS: Failed to initialize crtc\n");
1361 goto fail_free_planes
;
1364 dm
->display_indexes_num
= dm
->dc
->caps
.max_streams
;
1366 /* loops over all connectors on the board */
1367 for (i
= 0; i
< link_cnt
; i
++) {
1369 if (i
> AMDGPU_DM_MAX_DISPLAY_INDEX
) {
1371 "KMS: Cannot support more than %d display indexes\n",
1372 AMDGPU_DM_MAX_DISPLAY_INDEX
);
1376 aconnector
= kzalloc(sizeof(*aconnector
), GFP_KERNEL
);
1378 goto fail_free_planes
;
1380 aencoder
= kzalloc(sizeof(*aencoder
), GFP_KERNEL
);
1382 goto fail_free_connector
;
1384 if (amdgpu_dm_encoder_init(dm
->ddev
, aencoder
, i
)) {
1385 DRM_ERROR("KMS: Failed to initialize encoder\n");
1386 goto fail_free_encoder
;
1389 if (amdgpu_dm_connector_init(dm
, aconnector
, i
, aencoder
)) {
1390 DRM_ERROR("KMS: Failed to initialize connector\n");
1391 goto fail_free_encoder
;
1394 if (dc_link_detect(dc_get_link_at_index(dm
->dc
, i
),
1395 DETECT_REASON_BOOT
))
1396 amdgpu_dm_update_connector_after_detect(aconnector
);
1399 /* Software is initialized. Now we can register interrupt handlers. */
1400 switch (adev
->asic_type
) {
1410 case CHIP_POLARIS11
:
1411 case CHIP_POLARIS10
:
1412 case CHIP_POLARIS12
:
1414 if (dce110_register_irq_handlers(dm
->adev
)) {
1415 DRM_ERROR("DM: Failed to initialize IRQ\n");
1416 goto fail_free_encoder
;
1419 #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
1421 if (dcn10_register_irq_handlers(dm
->adev
)) {
1422 DRM_ERROR("DM: Failed to initialize IRQ\n");
1423 goto fail_free_encoder
;
1426 * Temporary disable until pplib/smu interaction is implemented
1428 dm
->dc
->debug
.disable_stutter
= true;
1432 DRM_ERROR("Usupported ASIC type: 0x%X\n", adev
->asic_type
);
1433 goto fail_free_encoder
;
1436 drm_mode_config_reset(dm
->ddev
);
1441 fail_free_connector
:
1444 for (i
= 0; i
< dm
->dc
->caps
.max_planes
; i
++)
1445 kfree(mode_info
->planes
[i
]);
1449 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager
*dm
)
1451 drm_mode_config_cleanup(dm
->ddev
);
1455 /******************************************************************************
1456 * amdgpu_display_funcs functions
1457 *****************************************************************************/
1460 * dm_bandwidth_update - program display watermarks
1462 * @adev: amdgpu_device pointer
1464 * Calculate and program the display watermarks and line buffer allocation.
1466 static void dm_bandwidth_update(struct amdgpu_device
*adev
)
1468 /* TODO: implement later */
1471 static void dm_set_backlight_level(struct amdgpu_encoder
*amdgpu_encoder
,
1474 /* TODO: translate amdgpu_encoder to display_index and call DAL */
1477 static u8
dm_get_backlight_level(struct amdgpu_encoder
*amdgpu_encoder
)
1479 /* TODO: translate amdgpu_encoder to display_index and call DAL */
1483 static int amdgpu_notify_freesync(struct drm_device
*dev
, void *data
,
1484 struct drm_file
*filp
)
1486 struct mod_freesync_params freesync_params
;
1487 uint8_t num_streams
;
1490 struct amdgpu_device
*adev
= dev
->dev_private
;
1493 /* Get freesync enable flag from DRM */
1495 num_streams
= dc_get_current_stream_count(adev
->dm
.dc
);
1497 for (i
= 0; i
< num_streams
; i
++) {
1498 struct dc_stream_state
*stream
;
1499 stream
= dc_get_stream_at_index(adev
->dm
.dc
, i
);
1501 mod_freesync_update_state(adev
->dm
.freesync_module
,
1502 &stream
, 1, &freesync_params
);
1508 static const struct amdgpu_display_funcs dm_display_funcs
= {
1509 .bandwidth_update
= dm_bandwidth_update
, /* called unconditionally */
1510 .vblank_get_counter
= dm_vblank_get_counter
,/* called unconditionally */
1511 .vblank_wait
= NULL
,
1512 .backlight_set_level
=
1513 dm_set_backlight_level
,/* called unconditionally */
1514 .backlight_get_level
=
1515 dm_get_backlight_level
,/* called unconditionally */
1516 .hpd_sense
= NULL
,/* called unconditionally */
1517 .hpd_set_polarity
= NULL
, /* called unconditionally */
1518 .hpd_get_gpio_reg
= NULL
, /* VBIOS parsing. DAL does it. */
1519 .page_flip_get_scanoutpos
=
1520 dm_crtc_get_scanoutpos
,/* called unconditionally */
1521 .add_encoder
= NULL
, /* VBIOS parsing. DAL does it. */
1522 .add_connector
= NULL
, /* VBIOS parsing. DAL does it. */
1523 .notify_freesync
= amdgpu_notify_freesync
,
1527 #if defined(CONFIG_DEBUG_KERNEL_DC)
1529 static ssize_t
s3_debug_store(struct device
*device
,
1530 struct device_attribute
*attr
,
1536 struct pci_dev
*pdev
= to_pci_dev(device
);
1537 struct drm_device
*drm_dev
= pci_get_drvdata(pdev
);
1538 struct amdgpu_device
*adev
= drm_dev
->dev_private
;
1540 ret
= kstrtoint(buf
, 0, &s3_state
);
1545 amdgpu_dm_display_resume(adev
);
1546 drm_kms_helper_hotplug_event(adev
->ddev
);
1551 return ret
== 0 ? count
: 0;
1554 DEVICE_ATTR_WO(s3_debug
);
1558 static int dm_early_init(void *handle
)
1560 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1562 adev
->ddev
->driver
->driver_features
|= DRIVER_ATOMIC
;
1563 amdgpu_dm_set_irq_funcs(adev
);
1565 switch (adev
->asic_type
) {
1568 adev
->mode_info
.num_crtc
= 6;
1569 adev
->mode_info
.num_hpd
= 6;
1570 adev
->mode_info
.num_dig
= 6;
1571 adev
->mode_info
.plane_type
= dm_plane_type_default
;
1574 adev
->mode_info
.num_crtc
= 4;
1575 adev
->mode_info
.num_hpd
= 6;
1576 adev
->mode_info
.num_dig
= 7;
1577 adev
->mode_info
.plane_type
= dm_plane_type_default
;
1581 adev
->mode_info
.num_crtc
= 2;
1582 adev
->mode_info
.num_hpd
= 6;
1583 adev
->mode_info
.num_dig
= 6;
1584 adev
->mode_info
.plane_type
= dm_plane_type_default
;
1588 adev
->mode_info
.num_crtc
= 6;
1589 adev
->mode_info
.num_hpd
= 6;
1590 adev
->mode_info
.num_dig
= 7;
1591 adev
->mode_info
.plane_type
= dm_plane_type_default
;
1594 adev
->mode_info
.num_crtc
= 3;
1595 adev
->mode_info
.num_hpd
= 6;
1596 adev
->mode_info
.num_dig
= 9;
1597 adev
->mode_info
.plane_type
= dm_plane_type_carizzo
;
1600 adev
->mode_info
.num_crtc
= 2;
1601 adev
->mode_info
.num_hpd
= 6;
1602 adev
->mode_info
.num_dig
= 9;
1603 adev
->mode_info
.plane_type
= dm_plane_type_stoney
;
1605 case CHIP_POLARIS11
:
1606 case CHIP_POLARIS12
:
1607 adev
->mode_info
.num_crtc
= 5;
1608 adev
->mode_info
.num_hpd
= 5;
1609 adev
->mode_info
.num_dig
= 5;
1610 adev
->mode_info
.plane_type
= dm_plane_type_default
;
1612 case CHIP_POLARIS10
:
1613 adev
->mode_info
.num_crtc
= 6;
1614 adev
->mode_info
.num_hpd
= 6;
1615 adev
->mode_info
.num_dig
= 6;
1616 adev
->mode_info
.plane_type
= dm_plane_type_default
;
1619 adev
->mode_info
.num_crtc
= 6;
1620 adev
->mode_info
.num_hpd
= 6;
1621 adev
->mode_info
.num_dig
= 6;
1622 adev
->mode_info
.plane_type
= dm_plane_type_default
;
1624 #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
1626 adev
->mode_info
.num_crtc
= 4;
1627 adev
->mode_info
.num_hpd
= 4;
1628 adev
->mode_info
.num_dig
= 4;
1629 adev
->mode_info
.plane_type
= dm_plane_type_default
;
1633 DRM_ERROR("Usupported ASIC type: 0x%X\n", adev
->asic_type
);
1637 if (adev
->mode_info
.funcs
== NULL
)
1638 adev
->mode_info
.funcs
= &dm_display_funcs
;
1640 /* Note: Do NOT change adev->audio_endpt_rreg and
1641 * adev->audio_endpt_wreg because they are initialised in
1642 * amdgpu_device_init() */
1643 #if defined(CONFIG_DEBUG_KERNEL_DC)
1646 &dev_attr_s3_debug
);
1652 struct dm_connector_state
{
1653 struct drm_connector_state base
;
1655 enum amdgpu_rmx_type scaling
;
1656 uint8_t underscan_vborder
;
1657 uint8_t underscan_hborder
;
1658 bool underscan_enable
;
1661 #define to_dm_connector_state(x)\
1662 container_of((x), struct dm_connector_state, base)
1664 static bool modeset_required(struct drm_crtc_state
*crtc_state
,
1665 struct dc_stream_state
*new_stream
,
1666 struct dc_stream_state
*old_stream
)
1668 if (!drm_atomic_crtc_needs_modeset(crtc_state
))
1671 if (!crtc_state
->enable
)
1674 return crtc_state
->active
;
1677 static bool modereset_required(struct drm_crtc_state
*crtc_state
)
1679 if (!drm_atomic_crtc_needs_modeset(crtc_state
))
1682 return !crtc_state
->enable
|| !crtc_state
->active
;
1685 static void amdgpu_dm_encoder_destroy(struct drm_encoder
*encoder
)
1687 drm_encoder_cleanup(encoder
);
1691 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs
= {
1692 .destroy
= amdgpu_dm_encoder_destroy
,
1695 static bool fill_rects_from_plane_state(const struct drm_plane_state
*state
,
1696 struct dc_plane_state
*plane_state
)
1698 plane_state
->src_rect
.x
= state
->src_x
>> 16;
1699 plane_state
->src_rect
.y
= state
->src_y
>> 16;
1700 /*we ignore for now mantissa and do not to deal with floating pixels :(*/
1701 plane_state
->src_rect
.width
= state
->src_w
>> 16;
1703 if (plane_state
->src_rect
.width
== 0)
1706 plane_state
->src_rect
.height
= state
->src_h
>> 16;
1707 if (plane_state
->src_rect
.height
== 0)
1710 plane_state
->dst_rect
.x
= state
->crtc_x
;
1711 plane_state
->dst_rect
.y
= state
->crtc_y
;
1713 if (state
->crtc_w
== 0)
1716 plane_state
->dst_rect
.width
= state
->crtc_w
;
1718 if (state
->crtc_h
== 0)
1721 plane_state
->dst_rect
.height
= state
->crtc_h
;
1723 plane_state
->clip_rect
= plane_state
->dst_rect
;
1725 switch (state
->rotation
& DRM_MODE_ROTATE_MASK
) {
1726 case DRM_MODE_ROTATE_0
:
1727 plane_state
->rotation
= ROTATION_ANGLE_0
;
1729 case DRM_MODE_ROTATE_90
:
1730 plane_state
->rotation
= ROTATION_ANGLE_90
;
1732 case DRM_MODE_ROTATE_180
:
1733 plane_state
->rotation
= ROTATION_ANGLE_180
;
1735 case DRM_MODE_ROTATE_270
:
1736 plane_state
->rotation
= ROTATION_ANGLE_270
;
1739 plane_state
->rotation
= ROTATION_ANGLE_0
;
1745 static int get_fb_info(const struct amdgpu_framebuffer
*amdgpu_fb
,
1746 uint64_t *tiling_flags
,
1747 uint64_t *fb_location
)
1749 struct amdgpu_bo
*rbo
= gem_to_amdgpu_bo(amdgpu_fb
->obj
);
1750 int r
= amdgpu_bo_reserve(rbo
, false);
1753 // Don't show error msg. when return -ERESTARTSYS
1754 if (r
!= -ERESTARTSYS
)
1755 DRM_ERROR("Unable to reserve buffer: %d\n", r
);
1760 *fb_location
= amdgpu_bo_gpu_offset(rbo
);
1763 amdgpu_bo_get_tiling_flags(rbo
, tiling_flags
);
1765 amdgpu_bo_unreserve(rbo
);
1770 static int fill_plane_attributes_from_fb(struct amdgpu_device
*adev
,
1771 struct dc_plane_state
*plane_state
,
1772 const struct amdgpu_framebuffer
*amdgpu_fb
,
1775 uint64_t tiling_flags
;
1776 uint64_t fb_location
= 0;
1777 uint64_t chroma_addr
= 0;
1778 unsigned int awidth
;
1779 const struct drm_framebuffer
*fb
= &amdgpu_fb
->base
;
1781 struct drm_format_name_buf format_name
;
1786 addReq
== true ? &fb_location
:NULL
);
1791 switch (fb
->format
->format
) {
1793 plane_state
->format
= SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS
;
1795 case DRM_FORMAT_RGB565
:
1796 plane_state
->format
= SURFACE_PIXEL_FORMAT_GRPH_RGB565
;
1798 case DRM_FORMAT_XRGB8888
:
1799 case DRM_FORMAT_ARGB8888
:
1800 plane_state
->format
= SURFACE_PIXEL_FORMAT_GRPH_ARGB8888
;
1802 case DRM_FORMAT_XRGB2101010
:
1803 case DRM_FORMAT_ARGB2101010
:
1804 plane_state
->format
= SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010
;
1806 case DRM_FORMAT_XBGR2101010
:
1807 case DRM_FORMAT_ABGR2101010
:
1808 plane_state
->format
= SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010
;
1810 case DRM_FORMAT_NV21
:
1811 plane_state
->format
= SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr
;
1813 case DRM_FORMAT_NV12
:
1814 plane_state
->format
= SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb
;
1817 DRM_ERROR("Unsupported screen format %s\n",
1818 drm_get_format_name(fb
->format
->format
, &format_name
));
1822 if (plane_state
->format
< SURFACE_PIXEL_FORMAT_VIDEO_BEGIN
) {
1823 plane_state
->address
.type
= PLN_ADDR_TYPE_GRAPHICS
;
1824 plane_state
->address
.grph
.addr
.low_part
= lower_32_bits(fb_location
);
1825 plane_state
->address
.grph
.addr
.high_part
= upper_32_bits(fb_location
);
1826 plane_state
->plane_size
.grph
.surface_size
.x
= 0;
1827 plane_state
->plane_size
.grph
.surface_size
.y
= 0;
1828 plane_state
->plane_size
.grph
.surface_size
.width
= fb
->width
;
1829 plane_state
->plane_size
.grph
.surface_size
.height
= fb
->height
;
1830 plane_state
->plane_size
.grph
.surface_pitch
=
1831 fb
->pitches
[0] / fb
->format
->cpp
[0];
1832 /* TODO: unhardcode */
1833 plane_state
->color_space
= COLOR_SPACE_SRGB
;
1836 awidth
= ALIGN(fb
->width
, 64);
1837 plane_state
->address
.type
= PLN_ADDR_TYPE_VIDEO_PROGRESSIVE
;
1838 plane_state
->address
.video_progressive
.luma_addr
.low_part
1839 = lower_32_bits(fb_location
);
1840 plane_state
->address
.video_progressive
.luma_addr
.high_part
1841 = upper_32_bits(fb_location
);
1842 chroma_addr
= fb_location
+ (u64
)(awidth
* fb
->height
);
1843 plane_state
->address
.video_progressive
.chroma_addr
.low_part
1844 = lower_32_bits(chroma_addr
);
1845 plane_state
->address
.video_progressive
.chroma_addr
.high_part
1846 = upper_32_bits(chroma_addr
);
1847 plane_state
->plane_size
.video
.luma_size
.x
= 0;
1848 plane_state
->plane_size
.video
.luma_size
.y
= 0;
1849 plane_state
->plane_size
.video
.luma_size
.width
= awidth
;
1850 plane_state
->plane_size
.video
.luma_size
.height
= fb
->height
;
1851 /* TODO: unhardcode */
1852 plane_state
->plane_size
.video
.luma_pitch
= awidth
;
1854 plane_state
->plane_size
.video
.chroma_size
.x
= 0;
1855 plane_state
->plane_size
.video
.chroma_size
.y
= 0;
1856 plane_state
->plane_size
.video
.chroma_size
.width
= awidth
;
1857 plane_state
->plane_size
.video
.chroma_size
.height
= fb
->height
;
1858 plane_state
->plane_size
.video
.chroma_pitch
= awidth
/ 2;
1860 /* TODO: unhardcode */
1861 plane_state
->color_space
= COLOR_SPACE_YCBCR709
;
1864 memset(&plane_state
->tiling_info
, 0, sizeof(plane_state
->tiling_info
));
1866 /* Fill GFX8 params */
1867 if (AMDGPU_TILING_GET(tiling_flags
, ARRAY_MODE
) == DC_ARRAY_2D_TILED_THIN1
) {
1868 unsigned int bankw
, bankh
, mtaspect
, tile_split
, num_banks
;
1870 bankw
= AMDGPU_TILING_GET(tiling_flags
, BANK_WIDTH
);
1871 bankh
= AMDGPU_TILING_GET(tiling_flags
, BANK_HEIGHT
);
1872 mtaspect
= AMDGPU_TILING_GET(tiling_flags
, MACRO_TILE_ASPECT
);
1873 tile_split
= AMDGPU_TILING_GET(tiling_flags
, TILE_SPLIT
);
1874 num_banks
= AMDGPU_TILING_GET(tiling_flags
, NUM_BANKS
);
1876 /* XXX fix me for VI */
1877 plane_state
->tiling_info
.gfx8
.num_banks
= num_banks
;
1878 plane_state
->tiling_info
.gfx8
.array_mode
=
1879 DC_ARRAY_2D_TILED_THIN1
;
1880 plane_state
->tiling_info
.gfx8
.tile_split
= tile_split
;
1881 plane_state
->tiling_info
.gfx8
.bank_width
= bankw
;
1882 plane_state
->tiling_info
.gfx8
.bank_height
= bankh
;
1883 plane_state
->tiling_info
.gfx8
.tile_aspect
= mtaspect
;
1884 plane_state
->tiling_info
.gfx8
.tile_mode
=
1885 DC_ADDR_SURF_MICRO_TILING_DISPLAY
;
1886 } else if (AMDGPU_TILING_GET(tiling_flags
, ARRAY_MODE
)
1887 == DC_ARRAY_1D_TILED_THIN1
) {
1888 plane_state
->tiling_info
.gfx8
.array_mode
= DC_ARRAY_1D_TILED_THIN1
;
1891 plane_state
->tiling_info
.gfx8
.pipe_config
=
1892 AMDGPU_TILING_GET(tiling_flags
, PIPE_CONFIG
);
1894 if (adev
->asic_type
== CHIP_VEGA10
||
1895 adev
->asic_type
== CHIP_RAVEN
) {
1896 /* Fill GFX9 params */
1897 plane_state
->tiling_info
.gfx9
.num_pipes
=
1898 adev
->gfx
.config
.gb_addr_config_fields
.num_pipes
;
1899 plane_state
->tiling_info
.gfx9
.num_banks
=
1900 adev
->gfx
.config
.gb_addr_config_fields
.num_banks
;
1901 plane_state
->tiling_info
.gfx9
.pipe_interleave
=
1902 adev
->gfx
.config
.gb_addr_config_fields
.pipe_interleave_size
;
1903 plane_state
->tiling_info
.gfx9
.num_shader_engines
=
1904 adev
->gfx
.config
.gb_addr_config_fields
.num_se
;
1905 plane_state
->tiling_info
.gfx9
.max_compressed_frags
=
1906 adev
->gfx
.config
.gb_addr_config_fields
.max_compress_frags
;
1907 plane_state
->tiling_info
.gfx9
.num_rb_per_se
=
1908 adev
->gfx
.config
.gb_addr_config_fields
.num_rb_per_se
;
1909 plane_state
->tiling_info
.gfx9
.swizzle
=
1910 AMDGPU_TILING_GET(tiling_flags
, SWIZZLE_MODE
);
1911 plane_state
->tiling_info
.gfx9
.shaderEnable
= 1;
1914 plane_state
->visible
= true;
1915 plane_state
->scaling_quality
.h_taps_c
= 0;
1916 plane_state
->scaling_quality
.v_taps_c
= 0;
1918 /* is this needed? is plane_state zeroed at allocation? */
1919 plane_state
->scaling_quality
.h_taps
= 0;
1920 plane_state
->scaling_quality
.v_taps
= 0;
1921 plane_state
->stereo_format
= PLANE_STEREO_FORMAT_NONE
;
1927 static void fill_gamma_from_crtc_state(const struct drm_crtc_state
*crtc_state
,
1928 struct dc_plane_state
*plane_state
)
1931 struct dc_gamma
*gamma
;
1932 struct drm_color_lut
*lut
=
1933 (struct drm_color_lut
*) crtc_state
->gamma_lut
->data
;
1935 gamma
= dc_create_gamma();
1937 if (gamma
== NULL
) {
1942 gamma
->type
= GAMMA_RGB_256
;
1943 gamma
->num_entries
= GAMMA_RGB_256_ENTRIES
;
1944 for (i
= 0; i
< GAMMA_RGB_256_ENTRIES
; i
++) {
1945 gamma
->entries
.red
[i
] = dal_fixed31_32_from_int(lut
[i
].red
);
1946 gamma
->entries
.green
[i
] = dal_fixed31_32_from_int(lut
[i
].green
);
1947 gamma
->entries
.blue
[i
] = dal_fixed31_32_from_int(lut
[i
].blue
);
1950 plane_state
->gamma_correction
= gamma
;
1953 static int fill_plane_attributes(struct amdgpu_device
*adev
,
1954 struct dc_plane_state
*dc_plane_state
,
1955 struct drm_plane_state
*plane_state
,
1956 struct drm_crtc_state
*crtc_state
,
1959 const struct amdgpu_framebuffer
*amdgpu_fb
=
1960 to_amdgpu_framebuffer(plane_state
->fb
);
1961 const struct drm_crtc
*crtc
= plane_state
->crtc
;
1962 struct dc_transfer_func
*input_tf
;
1965 if (!fill_rects_from_plane_state(plane_state
, dc_plane_state
))
1968 ret
= fill_plane_attributes_from_fb(
1969 crtc
->dev
->dev_private
,
1977 input_tf
= dc_create_transfer_func();
1979 if (input_tf
== NULL
)
1982 input_tf
->type
= TF_TYPE_PREDEFINED
;
1983 input_tf
->tf
= TRANSFER_FUNCTION_SRGB
;
1985 dc_plane_state
->in_transfer_func
= input_tf
;
1987 /* In case of gamma set, update gamma value */
1988 if (crtc_state
->gamma_lut
)
1989 fill_gamma_from_crtc_state(crtc_state
, dc_plane_state
);
1994 /*****************************************************************************/
1996 static void update_stream_scaling_settings(const struct drm_display_mode
*mode
,
1997 const struct dm_connector_state
*dm_state
,
1998 struct dc_stream_state
*stream
)
2000 enum amdgpu_rmx_type rmx_type
;
2002 struct rect src
= { 0 }; /* viewport in composition space*/
2003 struct rect dst
= { 0 }; /* stream addressable area */
2005 /* no mode. nothing to be done */
2009 /* Full screen scaling by default */
2010 src
.width
= mode
->hdisplay
;
2011 src
.height
= mode
->vdisplay
;
2012 dst
.width
= stream
->timing
.h_addressable
;
2013 dst
.height
= stream
->timing
.v_addressable
;
2015 rmx_type
= dm_state
->scaling
;
2016 if (rmx_type
== RMX_ASPECT
|| rmx_type
== RMX_OFF
) {
2017 if (src
.width
* dst
.height
<
2018 src
.height
* dst
.width
) {
2019 /* height needs less upscaling/more downscaling */
2020 dst
.width
= src
.width
*
2021 dst
.height
/ src
.height
;
2023 /* width needs less upscaling/more downscaling */
2024 dst
.height
= src
.height
*
2025 dst
.width
/ src
.width
;
2027 } else if (rmx_type
== RMX_CENTER
) {
2031 dst
.x
= (stream
->timing
.h_addressable
- dst
.width
) / 2;
2032 dst
.y
= (stream
->timing
.v_addressable
- dst
.height
) / 2;
2034 if (dm_state
->underscan_enable
) {
2035 dst
.x
+= dm_state
->underscan_hborder
/ 2;
2036 dst
.y
+= dm_state
->underscan_vborder
/ 2;
2037 dst
.width
-= dm_state
->underscan_hborder
;
2038 dst
.height
-= dm_state
->underscan_vborder
;
2044 DRM_DEBUG_DRIVER("Destination Rectangle x:%d y:%d width:%d height:%d\n",
2045 dst
.x
, dst
.y
, dst
.width
, dst
.height
);
2049 static enum dc_color_depth
2050 convert_color_depth_from_display_info(const struct drm_connector
*connector
)
2052 uint32_t bpc
= connector
->display_info
.bpc
;
2054 /* Limited color depth to 8bit
2055 * TODO: Still need to handle deep color
2062 /* Temporary Work around, DRM don't parse color depth for
2063 * EDID revision before 1.4
2064 * TODO: Fix edid parsing
2066 return COLOR_DEPTH_888
;
2068 return COLOR_DEPTH_666
;
2070 return COLOR_DEPTH_888
;
2072 return COLOR_DEPTH_101010
;
2074 return COLOR_DEPTH_121212
;
2076 return COLOR_DEPTH_141414
;
2078 return COLOR_DEPTH_161616
;
2080 return COLOR_DEPTH_UNDEFINED
;
2084 static enum dc_aspect_ratio
2085 get_aspect_ratio(const struct drm_display_mode
*mode_in
)
2087 int32_t width
= mode_in
->crtc_hdisplay
* 9;
2088 int32_t height
= mode_in
->crtc_vdisplay
* 16;
2090 if ((width
- height
) < 10 && (width
- height
) > -10)
2091 return ASPECT_RATIO_16_9
;
2093 return ASPECT_RATIO_4_3
;
2096 static enum dc_color_space
2097 get_output_color_space(const struct dc_crtc_timing
*dc_crtc_timing
)
2099 enum dc_color_space color_space
= COLOR_SPACE_SRGB
;
2101 switch (dc_crtc_timing
->pixel_encoding
) {
2102 case PIXEL_ENCODING_YCBCR422
:
2103 case PIXEL_ENCODING_YCBCR444
:
2104 case PIXEL_ENCODING_YCBCR420
:
2107 * 27030khz is the separation point between HDTV and SDTV
2108 * according to HDMI spec, we use YCbCr709 and YCbCr601
2111 if (dc_crtc_timing
->pix_clk_khz
> 27030) {
2112 if (dc_crtc_timing
->flags
.Y_ONLY
)
2114 COLOR_SPACE_YCBCR709_LIMITED
;
2116 color_space
= COLOR_SPACE_YCBCR709
;
2118 if (dc_crtc_timing
->flags
.Y_ONLY
)
2120 COLOR_SPACE_YCBCR601_LIMITED
;
2122 color_space
= COLOR_SPACE_YCBCR601
;
2127 case PIXEL_ENCODING_RGB
:
2128 color_space
= COLOR_SPACE_SRGB
;
2139 /*****************************************************************************/
2142 fill_stream_properties_from_drm_display_mode(struct dc_stream_state
*stream
,
2143 const struct drm_display_mode
*mode_in
,
2144 const struct drm_connector
*connector
)
2146 struct dc_crtc_timing
*timing_out
= &stream
->timing
;
2148 memset(timing_out
, 0, sizeof(struct dc_crtc_timing
));
2150 timing_out
->h_border_left
= 0;
2151 timing_out
->h_border_right
= 0;
2152 timing_out
->v_border_top
= 0;
2153 timing_out
->v_border_bottom
= 0;
2154 /* TODO: un-hardcode */
2156 if ((connector
->display_info
.color_formats
& DRM_COLOR_FORMAT_YCRCB444
)
2157 && stream
->sink
->sink_signal
== SIGNAL_TYPE_HDMI_TYPE_A
)
2158 timing_out
->pixel_encoding
= PIXEL_ENCODING_YCBCR444
;
2160 timing_out
->pixel_encoding
= PIXEL_ENCODING_RGB
;
2162 timing_out
->timing_3d_format
= TIMING_3D_FORMAT_NONE
;
2163 timing_out
->display_color_depth
= convert_color_depth_from_display_info(
2165 timing_out
->scan_type
= SCANNING_TYPE_NODATA
;
2166 timing_out
->hdmi_vic
= 0;
2167 timing_out
->vic
= drm_match_cea_mode(mode_in
);
2169 timing_out
->h_addressable
= mode_in
->crtc_hdisplay
;
2170 timing_out
->h_total
= mode_in
->crtc_htotal
;
2171 timing_out
->h_sync_width
=
2172 mode_in
->crtc_hsync_end
- mode_in
->crtc_hsync_start
;
2173 timing_out
->h_front_porch
=
2174 mode_in
->crtc_hsync_start
- mode_in
->crtc_hdisplay
;
2175 timing_out
->v_total
= mode_in
->crtc_vtotal
;
2176 timing_out
->v_addressable
= mode_in
->crtc_vdisplay
;
2177 timing_out
->v_front_porch
=
2178 mode_in
->crtc_vsync_start
- mode_in
->crtc_vdisplay
;
2179 timing_out
->v_sync_width
=
2180 mode_in
->crtc_vsync_end
- mode_in
->crtc_vsync_start
;
2181 timing_out
->pix_clk_khz
= mode_in
->crtc_clock
;
2182 timing_out
->aspect_ratio
= get_aspect_ratio(mode_in
);
2183 if (mode_in
->flags
& DRM_MODE_FLAG_PHSYNC
)
2184 timing_out
->flags
.HSYNC_POSITIVE_POLARITY
= 1;
2185 if (mode_in
->flags
& DRM_MODE_FLAG_PVSYNC
)
2186 timing_out
->flags
.VSYNC_POSITIVE_POLARITY
= 1;
2188 stream
->output_color_space
= get_output_color_space(timing_out
);
2191 struct dc_transfer_func
*tf
= dc_create_transfer_func();
2193 tf
->type
= TF_TYPE_PREDEFINED
;
2194 tf
->tf
= TRANSFER_FUNCTION_SRGB
;
2195 stream
->out_transfer_func
= tf
;
2199 static void fill_audio_info(struct audio_info
*audio_info
,
2200 const struct drm_connector
*drm_connector
,
2201 const struct dc_sink
*dc_sink
)
2204 int cea_revision
= 0;
2205 const struct dc_edid_caps
*edid_caps
= &dc_sink
->edid_caps
;
2207 audio_info
->manufacture_id
= edid_caps
->manufacturer_id
;
2208 audio_info
->product_id
= edid_caps
->product_id
;
2210 cea_revision
= drm_connector
->display_info
.cea_rev
;
2212 strncpy(audio_info
->display_name
,
2213 edid_caps
->display_name
,
2214 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS
- 1);
2216 if (cea_revision
>= 3) {
2217 audio_info
->mode_count
= edid_caps
->audio_mode_count
;
2219 for (i
= 0; i
< audio_info
->mode_count
; ++i
) {
2220 audio_info
->modes
[i
].format_code
=
2221 (enum audio_format_code
)
2222 (edid_caps
->audio_modes
[i
].format_code
);
2223 audio_info
->modes
[i
].channel_count
=
2224 edid_caps
->audio_modes
[i
].channel_count
;
2225 audio_info
->modes
[i
].sample_rates
.all
=
2226 edid_caps
->audio_modes
[i
].sample_rate
;
2227 audio_info
->modes
[i
].sample_size
=
2228 edid_caps
->audio_modes
[i
].sample_size
;
2232 audio_info
->flags
.all
= edid_caps
->speaker_flags
;
2234 /* TODO: We only check for the progressive mode, check for interlace mode too */
2235 if (drm_connector
->latency_present
[0]) {
2236 audio_info
->video_latency
= drm_connector
->video_latency
[0];
2237 audio_info
->audio_latency
= drm_connector
->audio_latency
[0];
2240 /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
2245 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode
*src_mode
,
2246 struct drm_display_mode
*dst_mode
)
2248 dst_mode
->crtc_hdisplay
= src_mode
->crtc_hdisplay
;
2249 dst_mode
->crtc_vdisplay
= src_mode
->crtc_vdisplay
;
2250 dst_mode
->crtc_clock
= src_mode
->crtc_clock
;
2251 dst_mode
->crtc_hblank_start
= src_mode
->crtc_hblank_start
;
2252 dst_mode
->crtc_hblank_end
= src_mode
->crtc_hblank_end
;
2253 dst_mode
->crtc_hsync_start
= src_mode
->crtc_hsync_start
;
2254 dst_mode
->crtc_hsync_end
= src_mode
->crtc_hsync_end
;
2255 dst_mode
->crtc_htotal
= src_mode
->crtc_htotal
;
2256 dst_mode
->crtc_hskew
= src_mode
->crtc_hskew
;
2257 dst_mode
->crtc_vblank_start
= src_mode
->crtc_vblank_start
;
2258 dst_mode
->crtc_vblank_end
= src_mode
->crtc_vblank_end
;
2259 dst_mode
->crtc_vsync_start
= src_mode
->crtc_vsync_start
;
2260 dst_mode
->crtc_vsync_end
= src_mode
->crtc_vsync_end
;
2261 dst_mode
->crtc_vtotal
= src_mode
->crtc_vtotal
;
2265 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode
*drm_mode
,
2266 const struct drm_display_mode
*native_mode
,
2269 if (scale_enabled
) {
2270 copy_crtc_timing_for_drm_display_mode(native_mode
, drm_mode
);
2271 } else if (native_mode
->clock
== drm_mode
->clock
&&
2272 native_mode
->htotal
== drm_mode
->htotal
&&
2273 native_mode
->vtotal
== drm_mode
->vtotal
) {
2274 copy_crtc_timing_for_drm_display_mode(native_mode
, drm_mode
);
2276 /* no scaling nor amdgpu inserted, no need to patch */
2280 static void create_fake_sink(struct amdgpu_dm_connector
*aconnector
)
2282 struct dc_sink
*sink
= NULL
;
2283 struct dc_sink_init_data sink_init_data
= { 0 };
2285 sink_init_data
.link
= aconnector
->dc_link
;
2286 sink_init_data
.sink_signal
= aconnector
->dc_link
->connector_signal
;
2288 sink
= dc_sink_create(&sink_init_data
);
2290 DRM_ERROR("Failed to create sink!\n");
2292 sink
->sink_signal
= SIGNAL_TYPE_VIRTUAL
;
2293 aconnector
->fake_enable
= true;
2295 aconnector
->dc_sink
= sink
;
2296 aconnector
->dc_link
->local_sink
= sink
;
2299 static struct dc_stream_state
*
2300 create_stream_for_sink(struct amdgpu_dm_connector
*aconnector
,
2301 const struct drm_display_mode
*drm_mode
,
2302 const struct dm_connector_state
*dm_state
)
2304 struct drm_display_mode
*preferred_mode
= NULL
;
2305 const struct drm_connector
*drm_connector
;
2306 struct dc_stream_state
*stream
= NULL
;
2307 struct drm_display_mode mode
= *drm_mode
;
2308 bool native_mode_found
= false;
2310 if (aconnector
== NULL
) {
2311 DRM_ERROR("aconnector is NULL!\n");
2312 goto drm_connector_null
;
2315 if (dm_state
== NULL
) {
2316 DRM_ERROR("dm_state is NULL!\n");
2320 drm_connector
= &aconnector
->base
;
2322 if (!aconnector
->dc_sink
) {
2324 * Exclude MST from creating fake_sink
2325 * TODO: need to enable MST into fake_sink feature
2327 if (aconnector
->mst_port
)
2328 goto stream_create_fail
;
2330 create_fake_sink(aconnector
);
2333 stream
= dc_create_stream_for_sink(aconnector
->dc_sink
);
2335 if (stream
== NULL
) {
2336 DRM_ERROR("Failed to create stream for sink!\n");
2337 goto stream_create_fail
;
2340 list_for_each_entry(preferred_mode
, &aconnector
->base
.modes
, head
) {
2341 /* Search for preferred mode */
2342 if (preferred_mode
->type
& DRM_MODE_TYPE_PREFERRED
) {
2343 native_mode_found
= true;
2347 if (!native_mode_found
)
2348 preferred_mode
= list_first_entry_or_null(
2349 &aconnector
->base
.modes
,
2350 struct drm_display_mode
,
2353 if (preferred_mode
== NULL
) {
2354 /* This may not be an error, the use case is when we we have no
2355 * usermode calls to reset and set mode upon hotplug. In this
2356 * case, we call set mode ourselves to restore the previous mode
2357 * and the modelist may not be filled in in time.
2359 DRM_DEBUG_DRIVER("No preferred mode found\n");
2361 decide_crtc_timing_for_drm_display_mode(
2362 &mode
, preferred_mode
,
2363 dm_state
->scaling
!= RMX_OFF
);
2366 fill_stream_properties_from_drm_display_mode(stream
,
2367 &mode
, &aconnector
->base
);
2368 update_stream_scaling_settings(&mode
, dm_state
, stream
);
2371 &stream
->audio_info
,
2373 aconnector
->dc_sink
);
2381 static void amdgpu_dm_crtc_destroy(struct drm_crtc
*crtc
)
2383 drm_crtc_cleanup(crtc
);
2387 static void dm_crtc_destroy_state(struct drm_crtc
*crtc
,
2388 struct drm_crtc_state
*state
)
2390 struct dm_crtc_state
*cur
= to_dm_crtc_state(state
);
2392 /* TODO Destroy dc_stream objects are stream object is flattened */
2394 dc_stream_release(cur
->stream
);
2397 __drm_atomic_helper_crtc_destroy_state(state
);
2403 static void dm_crtc_reset_state(struct drm_crtc
*crtc
)
2405 struct dm_crtc_state
*state
;
2408 dm_crtc_destroy_state(crtc
, crtc
->state
);
2410 state
= kzalloc(sizeof(*state
), GFP_KERNEL
);
2411 if (WARN_ON(!state
))
2414 crtc
->state
= &state
->base
;
2415 crtc
->state
->crtc
= crtc
;
2419 static struct drm_crtc_state
*
2420 dm_crtc_duplicate_state(struct drm_crtc
*crtc
)
2422 struct dm_crtc_state
*state
, *cur
;
2424 cur
= to_dm_crtc_state(crtc
->state
);
2426 if (WARN_ON(!crtc
->state
))
2429 state
= kzalloc(sizeof(*state
), GFP_KERNEL
);
2431 __drm_atomic_helper_crtc_duplicate_state(crtc
, &state
->base
);
2434 state
->stream
= cur
->stream
;
2435 dc_stream_retain(state
->stream
);
2438 /* TODO Duplicate dc_stream after objects are stream object is flattened */
2440 return &state
->base
;
2443 /* Implemented only the options currently availible for the driver */
2444 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs
= {
2445 .reset
= dm_crtc_reset_state
,
2446 .destroy
= amdgpu_dm_crtc_destroy
,
2447 .gamma_set
= drm_atomic_helper_legacy_gamma_set
,
2448 .set_config
= drm_atomic_helper_set_config
,
2449 .page_flip
= drm_atomic_helper_page_flip
,
2450 .atomic_duplicate_state
= dm_crtc_duplicate_state
,
2451 .atomic_destroy_state
= dm_crtc_destroy_state
,
2454 static enum drm_connector_status
2455 amdgpu_dm_connector_detect(struct drm_connector
*connector
, bool force
)
2458 struct amdgpu_dm_connector
*aconnector
= to_amdgpu_dm_connector(connector
);
2461 * 1. This interface is NOT called in context of HPD irq.
2462 * 2. This interface *is called* in context of user-mode ioctl. Which
2463 * makes it a bad place for *any* MST-related activit. */
2465 if (aconnector
->base
.force
== DRM_FORCE_UNSPECIFIED
&&
2466 !aconnector
->fake_enable
)
2467 connected
= (aconnector
->dc_sink
!= NULL
);
2469 connected
= (aconnector
->base
.force
== DRM_FORCE_ON
);
2471 return (connected
? connector_status_connected
:
2472 connector_status_disconnected
);
2475 int amdgpu_dm_connector_atomic_set_property(struct drm_connector
*connector
,
2476 struct drm_connector_state
*connector_state
,
2477 struct drm_property
*property
,
2480 struct drm_device
*dev
= connector
->dev
;
2481 struct amdgpu_device
*adev
= dev
->dev_private
;
2482 struct dm_connector_state
*dm_old_state
=
2483 to_dm_connector_state(connector
->state
);
2484 struct dm_connector_state
*dm_new_state
=
2485 to_dm_connector_state(connector_state
);
2489 if (property
== dev
->mode_config
.scaling_mode_property
) {
2490 enum amdgpu_rmx_type rmx_type
;
2493 case DRM_MODE_SCALE_CENTER
:
2494 rmx_type
= RMX_CENTER
;
2496 case DRM_MODE_SCALE_ASPECT
:
2497 rmx_type
= RMX_ASPECT
;
2499 case DRM_MODE_SCALE_FULLSCREEN
:
2500 rmx_type
= RMX_FULL
;
2502 case DRM_MODE_SCALE_NONE
:
2508 if (dm_old_state
->scaling
== rmx_type
)
2511 dm_new_state
->scaling
= rmx_type
;
2513 } else if (property
== adev
->mode_info
.underscan_hborder_property
) {
2514 dm_new_state
->underscan_hborder
= val
;
2516 } else if (property
== adev
->mode_info
.underscan_vborder_property
) {
2517 dm_new_state
->underscan_vborder
= val
;
2519 } else if (property
== adev
->mode_info
.underscan_property
) {
2520 dm_new_state
->underscan_enable
= val
;
2527 int amdgpu_dm_connector_atomic_get_property(struct drm_connector
*connector
,
2528 const struct drm_connector_state
*state
,
2529 struct drm_property
*property
,
2532 struct drm_device
*dev
= connector
->dev
;
2533 struct amdgpu_device
*adev
= dev
->dev_private
;
2534 struct dm_connector_state
*dm_state
=
2535 to_dm_connector_state(state
);
2538 if (property
== dev
->mode_config
.scaling_mode_property
) {
2539 switch (dm_state
->scaling
) {
2541 *val
= DRM_MODE_SCALE_CENTER
;
2544 *val
= DRM_MODE_SCALE_ASPECT
;
2547 *val
= DRM_MODE_SCALE_FULLSCREEN
;
2551 *val
= DRM_MODE_SCALE_NONE
;
2555 } else if (property
== adev
->mode_info
.underscan_hborder_property
) {
2556 *val
= dm_state
->underscan_hborder
;
2558 } else if (property
== adev
->mode_info
.underscan_vborder_property
) {
2559 *val
= dm_state
->underscan_vborder
;
2561 } else if (property
== adev
->mode_info
.underscan_property
) {
2562 *val
= dm_state
->underscan_enable
;
2568 static void amdgpu_dm_connector_destroy(struct drm_connector
*connector
)
2570 struct amdgpu_dm_connector
*aconnector
= to_amdgpu_dm_connector(connector
);
2571 const struct dc_link
*link
= aconnector
->dc_link
;
2572 struct amdgpu_device
*adev
= connector
->dev
->dev_private
;
2573 struct amdgpu_display_manager
*dm
= &adev
->dm
;
2574 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
2575 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
2577 if (link
->connector_signal
& (SIGNAL_TYPE_EDP
| SIGNAL_TYPE_LVDS
)) {
2578 amdgpu_dm_register_backlight_device(dm
);
2580 if (dm
->backlight_dev
) {
2581 backlight_device_unregister(dm
->backlight_dev
);
2582 dm
->backlight_dev
= NULL
;
2587 drm_connector_unregister(connector
);
2588 drm_connector_cleanup(connector
);
2592 void amdgpu_dm_connector_funcs_reset(struct drm_connector
*connector
)
2594 struct dm_connector_state
*state
=
2595 to_dm_connector_state(connector
->state
);
2599 state
= kzalloc(sizeof(*state
), GFP_KERNEL
);
2602 state
->scaling
= RMX_OFF
;
2603 state
->underscan_enable
= false;
2604 state
->underscan_hborder
= 0;
2605 state
->underscan_vborder
= 0;
2607 connector
->state
= &state
->base
;
2608 connector
->state
->connector
= connector
;
2612 struct drm_connector_state
*
2613 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector
*connector
)
2615 struct dm_connector_state
*state
=
2616 to_dm_connector_state(connector
->state
);
2618 struct dm_connector_state
*new_state
=
2619 kmemdup(state
, sizeof(*state
), GFP_KERNEL
);
2622 __drm_atomic_helper_connector_duplicate_state(connector
,
2624 return &new_state
->base
;
2630 static const struct drm_connector_funcs amdgpu_dm_connector_funcs
= {
2631 .reset
= amdgpu_dm_connector_funcs_reset
,
2632 .detect
= amdgpu_dm_connector_detect
,
2633 .fill_modes
= drm_helper_probe_single_connector_modes
,
2634 .destroy
= amdgpu_dm_connector_destroy
,
2635 .atomic_duplicate_state
= amdgpu_dm_connector_atomic_duplicate_state
,
2636 .atomic_destroy_state
= drm_atomic_helper_connector_destroy_state
,
2637 .atomic_set_property
= amdgpu_dm_connector_atomic_set_property
,
2638 .atomic_get_property
= amdgpu_dm_connector_atomic_get_property
2641 static struct drm_encoder
*best_encoder(struct drm_connector
*connector
)
2643 int enc_id
= connector
->encoder_ids
[0];
2644 struct drm_mode_object
*obj
;
2645 struct drm_encoder
*encoder
;
2647 DRM_DEBUG_DRIVER("Finding the best encoder\n");
2649 /* pick the encoder ids */
2651 obj
= drm_mode_object_find(connector
->dev
, NULL
, enc_id
, DRM_MODE_OBJECT_ENCODER
);
2653 DRM_ERROR("Couldn't find a matching encoder for our connector\n");
2656 encoder
= obj_to_encoder(obj
);
2659 DRM_ERROR("No encoder id\n");
2663 static int get_modes(struct drm_connector
*connector
)
2665 return amdgpu_dm_connector_get_modes(connector
);
2668 static void create_eml_sink(struct amdgpu_dm_connector
*aconnector
)
2670 struct dc_sink_init_data init_params
= {
2671 .link
= aconnector
->dc_link
,
2672 .sink_signal
= SIGNAL_TYPE_VIRTUAL
2674 struct edid
*edid
= (struct edid
*) aconnector
->base
.edid_blob_ptr
->data
;
2676 if (!aconnector
->base
.edid_blob_ptr
||
2677 !aconnector
->base
.edid_blob_ptr
->data
) {
2678 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
2679 aconnector
->base
.name
);
2681 aconnector
->base
.force
= DRM_FORCE_OFF
;
2682 aconnector
->base
.override_edid
= false;
2686 aconnector
->edid
= edid
;
2688 aconnector
->dc_em_sink
= dc_link_add_remote_sink(
2689 aconnector
->dc_link
,
2691 (edid
->extensions
+ 1) * EDID_LENGTH
,
2694 if (aconnector
->base
.force
== DRM_FORCE_ON
)
2695 aconnector
->dc_sink
= aconnector
->dc_link
->local_sink
?
2696 aconnector
->dc_link
->local_sink
:
2697 aconnector
->dc_em_sink
;
2700 static void handle_edid_mgmt(struct amdgpu_dm_connector
*aconnector
)
2702 struct dc_link
*link
= (struct dc_link
*)aconnector
->dc_link
;
2704 /* In case of headless boot with force on for DP managed connector
2705 * Those settings have to be != 0 to get initial modeset
2707 if (link
->connector_signal
== SIGNAL_TYPE_DISPLAY_PORT
) {
2708 link
->verified_link_cap
.lane_count
= LANE_COUNT_FOUR
;
2709 link
->verified_link_cap
.link_rate
= LINK_RATE_HIGH2
;
2713 aconnector
->base
.override_edid
= true;
2714 create_eml_sink(aconnector
);
2717 int amdgpu_dm_connector_mode_valid(struct drm_connector
*connector
,
2718 struct drm_display_mode
*mode
)
2720 int result
= MODE_ERROR
;
2721 struct dc_sink
*dc_sink
;
2722 struct amdgpu_device
*adev
= connector
->dev
->dev_private
;
2723 /* TODO: Unhardcode stream count */
2724 struct dc_stream_state
*stream
;
2725 struct amdgpu_dm_connector
*aconnector
= to_amdgpu_dm_connector(connector
);
2727 if ((mode
->flags
& DRM_MODE_FLAG_INTERLACE
) ||
2728 (mode
->flags
& DRM_MODE_FLAG_DBLSCAN
))
2731 /* Only run this the first time mode_valid is called to initilialize
2734 if (aconnector
->base
.force
!= DRM_FORCE_UNSPECIFIED
&&
2735 !aconnector
->dc_em_sink
)
2736 handle_edid_mgmt(aconnector
);
2738 dc_sink
= to_amdgpu_dm_connector(connector
)->dc_sink
;
2740 if (dc_sink
== NULL
) {
2741 DRM_ERROR("dc_sink is NULL!\n");
2745 stream
= dc_create_stream_for_sink(dc_sink
);
2746 if (stream
== NULL
) {
2747 DRM_ERROR("Failed to create stream for sink!\n");
2751 drm_mode_set_crtcinfo(mode
, 0);
2752 fill_stream_properties_from_drm_display_mode(stream
, mode
, connector
);
2754 stream
->src
.width
= mode
->hdisplay
;
2755 stream
->src
.height
= mode
->vdisplay
;
2756 stream
->dst
= stream
->src
;
2758 if (dc_validate_stream(adev
->dm
.dc
, stream
) == DC_OK
)
2761 dc_stream_release(stream
);
2764 /* TODO: error handling*/
2768 static const struct drm_connector_helper_funcs
2769 amdgpu_dm_connector_helper_funcs
= {
2771 * If hotplug a second bigger display in FB Con mode, bigger resolution
2772 * modes will be filtered by drm_mode_validate_size(), and those modes
2773 * is missing after user start lightdm. So we need to renew modes list.
2774 * in get_modes call back, not just return the modes count
2776 .get_modes
= get_modes
,
2777 .mode_valid
= amdgpu_dm_connector_mode_valid
,
2778 .best_encoder
= best_encoder
2781 static void dm_crtc_helper_disable(struct drm_crtc
*crtc
)
2785 static int dm_crtc_helper_atomic_check(struct drm_crtc
*crtc
,
2786 struct drm_crtc_state
*state
)
2788 struct amdgpu_device
*adev
= crtc
->dev
->dev_private
;
2789 struct dc
*dc
= adev
->dm
.dc
;
2790 struct dm_crtc_state
*dm_crtc_state
= to_dm_crtc_state(state
);
2793 if (unlikely(!dm_crtc_state
->stream
&&
2794 modeset_required(state
, NULL
, dm_crtc_state
->stream
))) {
2799 /* In some use cases, like reset, no stream is attached */
2800 if (!dm_crtc_state
->stream
)
2803 if (dc_validate_stream(dc
, dm_crtc_state
->stream
) == DC_OK
)
2809 static bool dm_crtc_helper_mode_fixup(struct drm_crtc
*crtc
,
2810 const struct drm_display_mode
*mode
,
2811 struct drm_display_mode
*adjusted_mode
)
2816 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs
= {
2817 .disable
= dm_crtc_helper_disable
,
2818 .atomic_check
= dm_crtc_helper_atomic_check
,
2819 .mode_fixup
= dm_crtc_helper_mode_fixup
2822 static void dm_encoder_helper_disable(struct drm_encoder
*encoder
)
2827 static int dm_encoder_helper_atomic_check(struct drm_encoder
*encoder
,
2828 struct drm_crtc_state
*crtc_state
,
2829 struct drm_connector_state
*conn_state
)
2834 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs
= {
2835 .disable
= dm_encoder_helper_disable
,
2836 .atomic_check
= dm_encoder_helper_atomic_check
2839 static void dm_drm_plane_reset(struct drm_plane
*plane
)
2841 struct dm_plane_state
*amdgpu_state
= NULL
;
2844 plane
->funcs
->atomic_destroy_state(plane
, plane
->state
);
2846 amdgpu_state
= kzalloc(sizeof(*amdgpu_state
), GFP_KERNEL
);
2847 WARN_ON(amdgpu_state
== NULL
);
2850 plane
->state
= &amdgpu_state
->base
;
2851 plane
->state
->plane
= plane
;
2852 plane
->state
->rotation
= DRM_MODE_ROTATE_0
;
2856 static struct drm_plane_state
*
2857 dm_drm_plane_duplicate_state(struct drm_plane
*plane
)
2859 struct dm_plane_state
*dm_plane_state
, *old_dm_plane_state
;
2861 old_dm_plane_state
= to_dm_plane_state(plane
->state
);
2862 dm_plane_state
= kzalloc(sizeof(*dm_plane_state
), GFP_KERNEL
);
2863 if (!dm_plane_state
)
2866 __drm_atomic_helper_plane_duplicate_state(plane
, &dm_plane_state
->base
);
2868 if (old_dm_plane_state
->dc_state
) {
2869 dm_plane_state
->dc_state
= old_dm_plane_state
->dc_state
;
2870 dc_plane_state_retain(dm_plane_state
->dc_state
);
2873 return &dm_plane_state
->base
;
2876 void dm_drm_plane_destroy_state(struct drm_plane
*plane
,
2877 struct drm_plane_state
*state
)
2879 struct dm_plane_state
*dm_plane_state
= to_dm_plane_state(state
);
2881 if (dm_plane_state
->dc_state
)
2882 dc_plane_state_release(dm_plane_state
->dc_state
);
2884 drm_atomic_helper_plane_destroy_state(plane
, state
);
2887 static const struct drm_plane_funcs dm_plane_funcs
= {
2888 .update_plane
= drm_atomic_helper_update_plane
,
2889 .disable_plane
= drm_atomic_helper_disable_plane
,
2890 .destroy
= drm_plane_cleanup
,
2891 .reset
= dm_drm_plane_reset
,
2892 .atomic_duplicate_state
= dm_drm_plane_duplicate_state
,
2893 .atomic_destroy_state
= dm_drm_plane_destroy_state
,
2896 static int dm_plane_helper_prepare_fb(struct drm_plane
*plane
,
2897 struct drm_plane_state
*new_state
)
2899 struct amdgpu_framebuffer
*afb
;
2900 struct drm_gem_object
*obj
;
2901 struct amdgpu_bo
*rbo
;
2902 uint64_t chroma_addr
= 0;
2904 struct dm_plane_state
*dm_plane_state_new
, *dm_plane_state_old
;
2905 unsigned int awidth
;
2907 dm_plane_state_old
= to_dm_plane_state(plane
->state
);
2908 dm_plane_state_new
= to_dm_plane_state(new_state
);
2910 if (!new_state
->fb
) {
2911 DRM_DEBUG_DRIVER("No FB bound\n");
2915 afb
= to_amdgpu_framebuffer(new_state
->fb
);
2918 rbo
= gem_to_amdgpu_bo(obj
);
2919 r
= amdgpu_bo_reserve(rbo
, false);
2920 if (unlikely(r
!= 0))
2923 r
= amdgpu_bo_pin(rbo
, AMDGPU_GEM_DOMAIN_VRAM
, &afb
->address
);
2926 amdgpu_bo_unreserve(rbo
);
2928 if (unlikely(r
!= 0)) {
2929 if (r
!= -ERESTARTSYS
)
2930 DRM_ERROR("Failed to pin framebuffer with error %d\n", r
);
2936 if (dm_plane_state_new
->dc_state
&&
2937 dm_plane_state_old
->dc_state
!= dm_plane_state_new
->dc_state
) {
2938 struct dc_plane_state
*plane_state
= dm_plane_state_new
->dc_state
;
2940 if (plane_state
->format
< SURFACE_PIXEL_FORMAT_VIDEO_BEGIN
) {
2941 plane_state
->address
.grph
.addr
.low_part
= lower_32_bits(afb
->address
);
2942 plane_state
->address
.grph
.addr
.high_part
= upper_32_bits(afb
->address
);
2944 awidth
= ALIGN(new_state
->fb
->width
, 64);
2945 plane_state
->address
.type
= PLN_ADDR_TYPE_VIDEO_PROGRESSIVE
;
2946 plane_state
->address
.video_progressive
.luma_addr
.low_part
2947 = lower_32_bits(afb
->address
);
2948 plane_state
->address
.video_progressive
.luma_addr
.high_part
2949 = upper_32_bits(afb
->address
);
2950 chroma_addr
= afb
->address
+ (u64
)(awidth
* new_state
->fb
->height
);
2951 plane_state
->address
.video_progressive
.chroma_addr
.low_part
2952 = lower_32_bits(chroma_addr
);
2953 plane_state
->address
.video_progressive
.chroma_addr
.high_part
2954 = upper_32_bits(chroma_addr
);
2958 /* It's a hack for s3 since in 4.9 kernel filter out cursor buffer
2959 * prepare and cleanup in drm_atomic_helper_prepare_planes
2960 * and drm_atomic_helper_cleanup_planes because fb doens't in s3.
2961 * IN 4.10 kernel this code should be removed and amdgpu_device_suspend
2962 * code touching fram buffers should be avoided for DC.
2964 if (plane
->type
== DRM_PLANE_TYPE_CURSOR
) {
2965 struct amdgpu_crtc
*acrtc
= to_amdgpu_crtc(new_state
->crtc
);
2967 acrtc
->cursor_bo
= obj
;
2972 static void dm_plane_helper_cleanup_fb(struct drm_plane
*plane
,
2973 struct drm_plane_state
*old_state
)
2975 struct amdgpu_bo
*rbo
;
2976 struct amdgpu_framebuffer
*afb
;
2982 afb
= to_amdgpu_framebuffer(old_state
->fb
);
2983 rbo
= gem_to_amdgpu_bo(afb
->obj
);
2984 r
= amdgpu_bo_reserve(rbo
, false);
2986 DRM_ERROR("failed to reserve rbo before unpin\n");
2990 amdgpu_bo_unpin(rbo
);
2991 amdgpu_bo_unreserve(rbo
);
2992 amdgpu_bo_unref(&rbo
);
2995 static int dm_plane_atomic_check(struct drm_plane
*plane
,
2996 struct drm_plane_state
*state
)
2998 struct amdgpu_device
*adev
= plane
->dev
->dev_private
;
2999 struct dc
*dc
= adev
->dm
.dc
;
3000 struct dm_plane_state
*dm_plane_state
= to_dm_plane_state(state
);
3002 if (!dm_plane_state
->dc_state
)
3005 if (dc_validate_plane(dc
, dm_plane_state
->dc_state
) == DC_OK
)
3011 static const struct drm_plane_helper_funcs dm_plane_helper_funcs
= {
3012 .prepare_fb
= dm_plane_helper_prepare_fb
,
3013 .cleanup_fb
= dm_plane_helper_cleanup_fb
,
3014 .atomic_check
= dm_plane_atomic_check
,
3018 * TODO: these are currently initialized to rgb formats only.
3019 * For future use cases we should either initialize them dynamically based on
3020 * plane capabilities, or initialize this array to all formats, so internal drm
3021 * check will succeed, and let DC to implement proper check
3023 static const uint32_t rgb_formats
[] = {
3025 DRM_FORMAT_XRGB8888
,
3026 DRM_FORMAT_ARGB8888
,
3027 DRM_FORMAT_RGBA8888
,
3028 DRM_FORMAT_XRGB2101010
,
3029 DRM_FORMAT_XBGR2101010
,
3030 DRM_FORMAT_ARGB2101010
,
3031 DRM_FORMAT_ABGR2101010
,
3034 static const uint32_t yuv_formats
[] = {
3039 static const u32 cursor_formats
[] = {
3043 static int amdgpu_dm_plane_init(struct amdgpu_display_manager
*dm
,
3044 struct amdgpu_plane
*aplane
,
3045 unsigned long possible_crtcs
)
3049 switch (aplane
->base
.type
) {
3050 case DRM_PLANE_TYPE_PRIMARY
:
3051 aplane
->base
.format_default
= true;
3053 res
= drm_universal_plane_init(
3059 ARRAY_SIZE(rgb_formats
),
3060 NULL
, aplane
->base
.type
, NULL
);
3062 case DRM_PLANE_TYPE_OVERLAY
:
3063 res
= drm_universal_plane_init(
3069 ARRAY_SIZE(yuv_formats
),
3070 NULL
, aplane
->base
.type
, NULL
);
3072 case DRM_PLANE_TYPE_CURSOR
:
3073 res
= drm_universal_plane_init(
3079 ARRAY_SIZE(cursor_formats
),
3080 NULL
, aplane
->base
.type
, NULL
);
3084 drm_plane_helper_add(&aplane
->base
, &dm_plane_helper_funcs
);
3089 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager
*dm
,
3090 struct drm_plane
*plane
,
3091 uint32_t crtc_index
)
3093 struct amdgpu_crtc
*acrtc
= NULL
;
3094 struct amdgpu_plane
*cursor_plane
;
3098 cursor_plane
= kzalloc(sizeof(*cursor_plane
), GFP_KERNEL
);
3102 cursor_plane
->base
.type
= DRM_PLANE_TYPE_CURSOR
;
3103 res
= amdgpu_dm_plane_init(dm
, cursor_plane
, 0);
3105 acrtc
= kzalloc(sizeof(struct amdgpu_crtc
), GFP_KERNEL
);
3109 res
= drm_crtc_init_with_planes(
3113 &cursor_plane
->base
,
3114 &amdgpu_dm_crtc_funcs
, NULL
);
3119 drm_crtc_helper_add(&acrtc
->base
, &amdgpu_dm_crtc_helper_funcs
);
3121 acrtc
->max_cursor_width
= dm
->adev
->dm
.dc
->caps
.max_cursor_size
;
3122 acrtc
->max_cursor_height
= dm
->adev
->dm
.dc
->caps
.max_cursor_size
;
3124 acrtc
->crtc_id
= crtc_index
;
3125 acrtc
->base
.enabled
= false;
3127 dm
->adev
->mode_info
.crtcs
[crtc_index
] = acrtc
;
3128 drm_mode_crtc_set_gamma_size(&acrtc
->base
, 256);
3134 kfree(cursor_plane
);
3139 static int to_drm_connector_type(enum signal_type st
)
3142 case SIGNAL_TYPE_HDMI_TYPE_A
:
3143 return DRM_MODE_CONNECTOR_HDMIA
;
3144 case SIGNAL_TYPE_EDP
:
3145 return DRM_MODE_CONNECTOR_eDP
;
3146 case SIGNAL_TYPE_RGB
:
3147 return DRM_MODE_CONNECTOR_VGA
;
3148 case SIGNAL_TYPE_DISPLAY_PORT
:
3149 case SIGNAL_TYPE_DISPLAY_PORT_MST
:
3150 return DRM_MODE_CONNECTOR_DisplayPort
;
3151 case SIGNAL_TYPE_DVI_DUAL_LINK
:
3152 case SIGNAL_TYPE_DVI_SINGLE_LINK
:
3153 return DRM_MODE_CONNECTOR_DVID
;
3154 case SIGNAL_TYPE_VIRTUAL
:
3155 return DRM_MODE_CONNECTOR_VIRTUAL
;
3158 return DRM_MODE_CONNECTOR_Unknown
;
3162 static void amdgpu_dm_get_native_mode(struct drm_connector
*connector
)
3164 const struct drm_connector_helper_funcs
*helper
=
3165 connector
->helper_private
;
3166 struct drm_encoder
*encoder
;
3167 struct amdgpu_encoder
*amdgpu_encoder
;
3169 encoder
= helper
->best_encoder(connector
);
3171 if (encoder
== NULL
)
3174 amdgpu_encoder
= to_amdgpu_encoder(encoder
);
3176 amdgpu_encoder
->native_mode
.clock
= 0;
3178 if (!list_empty(&connector
->probed_modes
)) {
3179 struct drm_display_mode
*preferred_mode
= NULL
;
3181 list_for_each_entry(preferred_mode
,
3182 &connector
->probed_modes
,
3184 if (preferred_mode
->type
& DRM_MODE_TYPE_PREFERRED
)
3185 amdgpu_encoder
->native_mode
= *preferred_mode
;
3193 static struct drm_display_mode
*
3194 amdgpu_dm_create_common_mode(struct drm_encoder
*encoder
,
3196 int hdisplay
, int vdisplay
)
3198 struct drm_device
*dev
= encoder
->dev
;
3199 struct amdgpu_encoder
*amdgpu_encoder
= to_amdgpu_encoder(encoder
);
3200 struct drm_display_mode
*mode
= NULL
;
3201 struct drm_display_mode
*native_mode
= &amdgpu_encoder
->native_mode
;
3203 mode
= drm_mode_duplicate(dev
, native_mode
);
3208 mode
->hdisplay
= hdisplay
;
3209 mode
->vdisplay
= vdisplay
;
3210 mode
->type
&= ~DRM_MODE_TYPE_PREFERRED
;
3211 strncpy(mode
->name
, name
, DRM_DISPLAY_MODE_LEN
);
3217 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder
*encoder
,
3218 struct drm_connector
*connector
)
3220 struct amdgpu_encoder
*amdgpu_encoder
= to_amdgpu_encoder(encoder
);
3221 struct drm_display_mode
*mode
= NULL
;
3222 struct drm_display_mode
*native_mode
= &amdgpu_encoder
->native_mode
;
3223 struct amdgpu_dm_connector
*amdgpu_dm_connector
=
3224 to_amdgpu_dm_connector(connector
);
3228 char name
[DRM_DISPLAY_MODE_LEN
];
3231 } common_modes
[] = {
3232 { "640x480", 640, 480},
3233 { "800x600", 800, 600},
3234 { "1024x768", 1024, 768},
3235 { "1280x720", 1280, 720},
3236 { "1280x800", 1280, 800},
3237 {"1280x1024", 1280, 1024},
3238 { "1440x900", 1440, 900},
3239 {"1680x1050", 1680, 1050},
3240 {"1600x1200", 1600, 1200},
3241 {"1920x1080", 1920, 1080},
3242 {"1920x1200", 1920, 1200}
3245 n
= ARRAY_SIZE(common_modes
);
3247 for (i
= 0; i
< n
; i
++) {
3248 struct drm_display_mode
*curmode
= NULL
;
3249 bool mode_existed
= false;
3251 if (common_modes
[i
].w
> native_mode
->hdisplay
||
3252 common_modes
[i
].h
> native_mode
->vdisplay
||
3253 (common_modes
[i
].w
== native_mode
->hdisplay
&&
3254 common_modes
[i
].h
== native_mode
->vdisplay
))
3257 list_for_each_entry(curmode
, &connector
->probed_modes
, head
) {
3258 if (common_modes
[i
].w
== curmode
->hdisplay
&&
3259 common_modes
[i
].h
== curmode
->vdisplay
) {
3260 mode_existed
= true;
3268 mode
= amdgpu_dm_create_common_mode(encoder
,
3269 common_modes
[i
].name
, common_modes
[i
].w
,
3271 drm_mode_probed_add(connector
, mode
);
3272 amdgpu_dm_connector
->num_modes
++;
3276 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector
*connector
,
3279 struct amdgpu_dm_connector
*amdgpu_dm_connector
=
3280 to_amdgpu_dm_connector(connector
);
3283 /* empty probed_modes */
3284 INIT_LIST_HEAD(&connector
->probed_modes
);
3285 amdgpu_dm_connector
->num_modes
=
3286 drm_add_edid_modes(connector
, edid
);
3288 drm_edid_to_eld(connector
, edid
);
3290 amdgpu_dm_get_native_mode(connector
);
3292 amdgpu_dm_connector
->num_modes
= 0;
3296 static int amdgpu_dm_connector_get_modes(struct drm_connector
*connector
)
3298 const struct drm_connector_helper_funcs
*helper
=
3299 connector
->helper_private
;
3300 struct amdgpu_dm_connector
*amdgpu_dm_connector
=
3301 to_amdgpu_dm_connector(connector
);
3302 struct drm_encoder
*encoder
;
3303 struct edid
*edid
= amdgpu_dm_connector
->edid
;
3305 encoder
= helper
->best_encoder(connector
);
3307 amdgpu_dm_connector_ddc_get_modes(connector
, edid
);
3308 amdgpu_dm_connector_add_common_modes(encoder
, connector
);
3309 return amdgpu_dm_connector
->num_modes
;
3312 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager
*dm
,
3313 struct amdgpu_dm_connector
*aconnector
,
3315 struct dc_link
*link
,
3318 struct amdgpu_device
*adev
= dm
->ddev
->dev_private
;
3320 aconnector
->connector_id
= link_index
;
3321 aconnector
->dc_link
= link
;
3322 aconnector
->base
.interlace_allowed
= false;
3323 aconnector
->base
.doublescan_allowed
= false;
3324 aconnector
->base
.stereo_allowed
= false;
3325 aconnector
->base
.dpms
= DRM_MODE_DPMS_OFF
;
3326 aconnector
->hpd
.hpd
= AMDGPU_HPD_NONE
; /* not used */
3328 mutex_init(&aconnector
->hpd_lock
);
3330 /* configure support HPD hot plug connector_>polled default value is 0
3331 * which means HPD hot plug not supported
3333 switch (connector_type
) {
3334 case DRM_MODE_CONNECTOR_HDMIA
:
3335 aconnector
->base
.polled
= DRM_CONNECTOR_POLL_HPD
;
3337 case DRM_MODE_CONNECTOR_DisplayPort
:
3338 aconnector
->base
.polled
= DRM_CONNECTOR_POLL_HPD
;
3340 case DRM_MODE_CONNECTOR_DVID
:
3341 aconnector
->base
.polled
= DRM_CONNECTOR_POLL_HPD
;
3347 drm_object_attach_property(&aconnector
->base
.base
,
3348 dm
->ddev
->mode_config
.scaling_mode_property
,
3349 DRM_MODE_SCALE_NONE
);
3351 drm_object_attach_property(&aconnector
->base
.base
,
3352 adev
->mode_info
.underscan_property
,
3354 drm_object_attach_property(&aconnector
->base
.base
,
3355 adev
->mode_info
.underscan_hborder_property
,
3357 drm_object_attach_property(&aconnector
->base
.base
,
3358 adev
->mode_info
.underscan_vborder_property
,
3363 static int amdgpu_dm_i2c_xfer(struct i2c_adapter
*i2c_adap
,
3364 struct i2c_msg
*msgs
, int num
)
3366 struct amdgpu_i2c_adapter
*i2c
= i2c_get_adapdata(i2c_adap
);
3367 struct ddc_service
*ddc_service
= i2c
->ddc_service
;
3368 struct i2c_command cmd
;
3372 cmd
.payloads
= kcalloc(num
, sizeof(struct i2c_payload
), GFP_KERNEL
);
3377 cmd
.number_of_payloads
= num
;
3378 cmd
.engine
= I2C_COMMAND_ENGINE_DEFAULT
;
3381 for (i
= 0; i
< num
; i
++) {
3382 cmd
.payloads
[i
].write
= !(msgs
[i
].flags
& I2C_M_RD
);
3383 cmd
.payloads
[i
].address
= msgs
[i
].addr
;
3384 cmd
.payloads
[i
].length
= msgs
[i
].len
;
3385 cmd
.payloads
[i
].data
= msgs
[i
].buf
;
3388 if (dal_i2caux_submit_i2c_command(
3389 ddc_service
->ctx
->i2caux
,
3390 ddc_service
->ddc_pin
,
3394 kfree(cmd
.payloads
);
3398 static u32
amdgpu_dm_i2c_func(struct i2c_adapter
*adap
)
3400 return I2C_FUNC_I2C
| I2C_FUNC_SMBUS_EMUL
;
3403 static const struct i2c_algorithm amdgpu_dm_i2c_algo
= {
3404 .master_xfer
= amdgpu_dm_i2c_xfer
,
3405 .functionality
= amdgpu_dm_i2c_func
,
3408 static struct amdgpu_i2c_adapter
*
3409 create_i2c(struct ddc_service
*ddc_service
,
3413 struct amdgpu_device
*adev
= ddc_service
->ctx
->driver_context
;
3414 struct amdgpu_i2c_adapter
*i2c
;
3416 i2c
= kzalloc(sizeof(struct amdgpu_i2c_adapter
), GFP_KERNEL
);
3417 i2c
->base
.owner
= THIS_MODULE
;
3418 i2c
->base
.class = I2C_CLASS_DDC
;
3419 i2c
->base
.dev
.parent
= &adev
->pdev
->dev
;
3420 i2c
->base
.algo
= &amdgpu_dm_i2c_algo
;
3421 snprintf(i2c
->base
.name
, sizeof(i2c
->base
.name
), "AMDGPU DM i2c hw bus %d", link_index
);
3422 i2c_set_adapdata(&i2c
->base
, i2c
);
3423 i2c
->ddc_service
= ddc_service
;
3428 /* Note: this function assumes that dc_link_detect() was called for the
3429 * dc_link which will be represented by this aconnector.
3431 static int amdgpu_dm_connector_init(struct amdgpu_display_manager
*dm
,
3432 struct amdgpu_dm_connector
*aconnector
,
3433 uint32_t link_index
,
3434 struct amdgpu_encoder
*aencoder
)
3438 struct dc
*dc
= dm
->dc
;
3439 struct dc_link
*link
= dc_get_link_at_index(dc
, link_index
);
3440 struct amdgpu_i2c_adapter
*i2c
;
3442 link
->priv
= aconnector
;
3444 DRM_DEBUG_DRIVER("%s()\n", __func__
);
3446 i2c
= create_i2c(link
->ddc
, link
->link_index
, &res
);
3447 aconnector
->i2c
= i2c
;
3448 res
= i2c_add_adapter(&i2c
->base
);
3451 DRM_ERROR("Failed to register hw i2c %d\n", link
->link_index
);
3455 connector_type
= to_drm_connector_type(link
->connector_signal
);
3457 res
= drm_connector_init(
3460 &amdgpu_dm_connector_funcs
,
3464 DRM_ERROR("connector_init failed\n");
3465 aconnector
->connector_id
= -1;
3469 drm_connector_helper_add(
3471 &amdgpu_dm_connector_helper_funcs
);
3473 amdgpu_dm_connector_init_helper(
3480 drm_mode_connector_attach_encoder(
3481 &aconnector
->base
, &aencoder
->base
);
3483 drm_connector_register(&aconnector
->base
);
3485 if (connector_type
== DRM_MODE_CONNECTOR_DisplayPort
3486 || connector_type
== DRM_MODE_CONNECTOR_eDP
)
3487 amdgpu_dm_initialize_dp_connector(dm
, aconnector
);
3489 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3490 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3492 /* NOTE: this currently will create backlight device even if a panel
3493 * is not connected to the eDP/LVDS connector.
3495 * This is less than ideal but we don't have sink information at this
3496 * stage since detection happens after. We can't do detection earlier
3497 * since MST detection needs connectors to be created first.
3499 if (link
->connector_signal
& (SIGNAL_TYPE_EDP
| SIGNAL_TYPE_LVDS
)) {
3500 /* Event if registration failed, we should continue with
3501 * DM initialization because not having a backlight control
3502 * is better then a black screen.
3504 amdgpu_dm_register_backlight_device(dm
);
3506 if (dm
->backlight_dev
)
3507 dm
->backlight_link
= link
;
3514 aconnector
->i2c
= NULL
;
3519 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device
*adev
)
3521 switch (adev
->mode_info
.num_crtc
) {
3538 static int amdgpu_dm_encoder_init(struct drm_device
*dev
,
3539 struct amdgpu_encoder
*aencoder
,
3540 uint32_t link_index
)
3542 struct amdgpu_device
*adev
= dev
->dev_private
;
3544 int res
= drm_encoder_init(dev
,
3546 &amdgpu_dm_encoder_funcs
,
3547 DRM_MODE_ENCODER_TMDS
,
3550 aencoder
->base
.possible_crtcs
= amdgpu_dm_get_encoder_crtc_mask(adev
);
3553 aencoder
->encoder_id
= link_index
;
3555 aencoder
->encoder_id
= -1;
3557 drm_encoder_helper_add(&aencoder
->base
, &amdgpu_dm_encoder_helper_funcs
);
3562 static void manage_dm_interrupts(struct amdgpu_device
*adev
,
3563 struct amdgpu_crtc
*acrtc
,
3567 * this is not correct translation but will work as soon as VBLANK
3568 * constant is the same as PFLIP
3571 amdgpu_crtc_idx_to_irq_type(
3576 drm_crtc_vblank_on(&acrtc
->base
);
3579 &adev
->pageflip_irq
,
3585 &adev
->pageflip_irq
,
3587 drm_crtc_vblank_off(&acrtc
->base
);
3592 is_scaling_state_different(const struct dm_connector_state
*dm_state
,
3593 const struct dm_connector_state
*old_dm_state
)
3595 if (dm_state
->scaling
!= old_dm_state
->scaling
)
3597 if (!dm_state
->underscan_enable
&& old_dm_state
->underscan_enable
) {
3598 if (old_dm_state
->underscan_hborder
!= 0 && old_dm_state
->underscan_vborder
!= 0)
3600 } else if (dm_state
->underscan_enable
&& !old_dm_state
->underscan_enable
) {
3601 if (dm_state
->underscan_hborder
!= 0 && dm_state
->underscan_vborder
!= 0)
3603 } else if (dm_state
->underscan_hborder
!= old_dm_state
->underscan_hborder
||
3604 dm_state
->underscan_vborder
!= old_dm_state
->underscan_vborder
)
3609 static void remove_stream(struct amdgpu_device
*adev
,
3610 struct amdgpu_crtc
*acrtc
,
3611 struct dc_stream_state
*stream
)
3613 /* this is the update mode case */
3614 if (adev
->dm
.freesync_module
)
3615 mod_freesync_remove_stream(adev
->dm
.freesync_module
, stream
);
3617 acrtc
->otg_inst
= -1;
3618 acrtc
->enabled
= false;
3621 static int get_cursor_position(struct drm_plane
*plane
, struct drm_crtc
*crtc
,
3622 struct dc_cursor_position
*position
)
3624 struct amdgpu_crtc
*amdgpu_crtc
= amdgpu_crtc
= to_amdgpu_crtc(crtc
);
3626 int xorigin
= 0, yorigin
= 0;
3628 if (!crtc
|| !plane
->state
->fb
) {
3629 position
->enable
= false;
3635 if ((plane
->state
->crtc_w
> amdgpu_crtc
->max_cursor_width
) ||
3636 (plane
->state
->crtc_h
> amdgpu_crtc
->max_cursor_height
)) {
3637 DRM_ERROR("%s: bad cursor width or height %d x %d\n",
3639 plane
->state
->crtc_w
,
3640 plane
->state
->crtc_h
);
3644 x
= plane
->state
->crtc_x
;
3645 y
= plane
->state
->crtc_y
;
3646 /* avivo cursor are offset into the total surface */
3647 x
+= crtc
->primary
->state
->src_x
>> 16;
3648 y
+= crtc
->primary
->state
->src_y
>> 16;
3650 xorigin
= min(-x
, amdgpu_crtc
->max_cursor_width
- 1);
3654 yorigin
= min(-y
, amdgpu_crtc
->max_cursor_height
- 1);
3657 position
->enable
= true;
3660 position
->x_hotspot
= xorigin
;
3661 position
->y_hotspot
= yorigin
;
3666 static void handle_cursor_update(struct drm_plane
*plane
,
3667 struct drm_plane_state
*old_plane_state
)
3669 struct amdgpu_framebuffer
*afb
= to_amdgpu_framebuffer(plane
->state
->fb
);
3670 struct drm_crtc
*crtc
= afb
? plane
->state
->crtc
: old_plane_state
->crtc
;
3671 struct dm_crtc_state
*crtc_state
= crtc
? to_dm_crtc_state(crtc
->state
) : NULL
;
3672 struct amdgpu_crtc
*amdgpu_crtc
= to_amdgpu_crtc(crtc
);
3673 uint64_t address
= afb
? afb
->address
: 0;
3674 struct dc_cursor_position position
;
3675 struct dc_cursor_attributes attributes
;
3678 if (!plane
->state
->fb
&& !old_plane_state
->fb
)
3681 DRM_DEBUG_DRIVER("%s: crtc_id=%d with size %d to %d\n",
3683 amdgpu_crtc
->crtc_id
,
3684 plane
->state
->crtc_w
,
3685 plane
->state
->crtc_h
);
3687 ret
= get_cursor_position(plane
, crtc
, &position
);
3691 if (!position
.enable
) {
3692 /* turn off cursor */
3693 if (crtc_state
&& crtc_state
->stream
)
3694 dc_stream_set_cursor_position(crtc_state
->stream
,
3699 amdgpu_crtc
->cursor_width
= plane
->state
->crtc_w
;
3700 amdgpu_crtc
->cursor_height
= plane
->state
->crtc_h
;
3702 attributes
.address
.high_part
= upper_32_bits(address
);
3703 attributes
.address
.low_part
= lower_32_bits(address
);
3704 attributes
.width
= plane
->state
->crtc_w
;
3705 attributes
.height
= plane
->state
->crtc_h
;
3706 attributes
.color_format
= CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA
;
3707 attributes
.rotation_angle
= 0;
3708 attributes
.attribute_flags
.value
= 0;
3710 attributes
.pitch
= attributes
.width
;
3712 if (crtc_state
->stream
) {
3713 if (!dc_stream_set_cursor_attributes(crtc_state
->stream
,
3715 DRM_ERROR("DC failed to set cursor attributes\n");
3717 if (!dc_stream_set_cursor_position(crtc_state
->stream
,
3719 DRM_ERROR("DC failed to set cursor position\n");
3723 static void prepare_flip_isr(struct amdgpu_crtc
*acrtc
)
3726 assert_spin_locked(&acrtc
->base
.dev
->event_lock
);
3727 WARN_ON(acrtc
->event
);
3729 acrtc
->event
= acrtc
->base
.state
->event
;
3731 /* Set the flip status */
3732 acrtc
->pflip_status
= AMDGPU_FLIP_SUBMITTED
;
3734 /* Mark this event as consumed */
3735 acrtc
->base
.state
->event
= NULL
;
3737 DRM_DEBUG_DRIVER("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
3744 * Waits on all BO's fences and for proper vblank count
3746 static void amdgpu_dm_do_flip(struct drm_crtc
*crtc
,
3747 struct drm_framebuffer
*fb
,
3749 struct dc_state
*state
)
3751 unsigned long flags
;
3752 uint32_t target_vblank
;
3754 struct amdgpu_crtc
*acrtc
= to_amdgpu_crtc(crtc
);
3755 struct amdgpu_framebuffer
*afb
= to_amdgpu_framebuffer(fb
);
3756 struct amdgpu_bo
*abo
= gem_to_amdgpu_bo(afb
->obj
);
3757 struct amdgpu_device
*adev
= crtc
->dev
->dev_private
;
3758 bool async_flip
= (crtc
->state
->pageflip_flags
& DRM_MODE_PAGE_FLIP_ASYNC
) != 0;
3759 struct dc_flip_addrs addr
= { {0} };
3760 /* TODO eliminate or rename surface_update */
3761 struct dc_surface_update surface_updates
[1] = { {0} };
3762 struct dm_crtc_state
*acrtc_state
= to_dm_crtc_state(crtc
->state
);
3765 /* Prepare wait for target vblank early - before the fence-waits */
3766 target_vblank
= target
- drm_crtc_vblank_count(crtc
) +
3767 amdgpu_get_vblank_counter_kms(crtc
->dev
, acrtc
->crtc_id
);
3769 /* TODO This might fail and hence better not used, wait
3770 * explicitly on fences instead
3771 * and in general should be called for
3772 * blocking commit to as per framework helpers
3774 r
= amdgpu_bo_reserve(abo
, true);
3775 if (unlikely(r
!= 0)) {
3776 DRM_ERROR("failed to reserve buffer before flip\n");
3780 /* Wait for all fences on this FB */
3781 WARN_ON(reservation_object_wait_timeout_rcu(abo
->tbo
.resv
, true, false,
3782 MAX_SCHEDULE_TIMEOUT
) < 0);
3784 amdgpu_bo_unreserve(abo
);
3786 /* Wait until we're out of the vertical blank period before the one
3787 * targeted by the flip
3789 while ((acrtc
->enabled
&&
3790 (amdgpu_get_crtc_scanoutpos(adev
->ddev
, acrtc
->crtc_id
, 0,
3791 &vpos
, &hpos
, NULL
, NULL
,
3793 & (DRM_SCANOUTPOS_VALID
| DRM_SCANOUTPOS_IN_VBLANK
)) ==
3794 (DRM_SCANOUTPOS_VALID
| DRM_SCANOUTPOS_IN_VBLANK
) &&
3795 (int)(target_vblank
-
3796 amdgpu_get_vblank_counter_kms(adev
->ddev
, acrtc
->crtc_id
)) > 0)) {
3797 usleep_range(1000, 1100);
3801 spin_lock_irqsave(&crtc
->dev
->event_lock
, flags
);
3802 /* update crtc fb */
3803 crtc
->primary
->fb
= fb
;
3805 WARN_ON(acrtc
->pflip_status
!= AMDGPU_FLIP_NONE
);
3806 WARN_ON(!acrtc_state
->stream
);
3808 addr
.address
.grph
.addr
.low_part
= lower_32_bits(afb
->address
);
3809 addr
.address
.grph
.addr
.high_part
= upper_32_bits(afb
->address
);
3810 addr
.flip_immediate
= async_flip
;
3813 if (acrtc
->base
.state
->event
)
3814 prepare_flip_isr(acrtc
);
3816 surface_updates
->surface
= dc_stream_get_status(acrtc_state
->stream
)->plane_states
[0];
3817 surface_updates
->flip_addr
= &addr
;
3820 dc_commit_updates_for_stream(adev
->dm
.dc
,
3823 acrtc_state
->stream
,
3825 &surface_updates
->surface
,
3828 DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x \n",
3830 addr
.address
.grph
.addr
.high_part
,
3831 addr
.address
.grph
.addr
.low_part
);
3834 spin_unlock_irqrestore(&crtc
->dev
->event_lock
, flags
);
3837 static void amdgpu_dm_commit_planes(struct drm_atomic_state
*state
,
3838 struct drm_device
*dev
,
3839 struct amdgpu_display_manager
*dm
,
3840 struct drm_crtc
*pcrtc
,
3841 bool *wait_for_vblank
)
3844 struct drm_plane
*plane
;
3845 struct drm_plane_state
*old_plane_state
, *new_plane_state
;
3846 struct dc_stream_state
*dc_stream_attach
;
3847 struct dc_plane_state
*plane_states_constructed
[MAX_SURFACES
];
3848 struct amdgpu_crtc
*acrtc_attach
= to_amdgpu_crtc(pcrtc
);
3849 struct drm_crtc_state
*new_pcrtc_state
=
3850 drm_atomic_get_new_crtc_state(state
, pcrtc
);
3851 struct dm_crtc_state
*acrtc_state
= to_dm_crtc_state(new_pcrtc_state
);
3852 struct dm_atomic_state
*dm_state
= to_dm_atomic_state(state
);
3853 int planes_count
= 0;
3854 unsigned long flags
;
3856 /* update planes when needed */
3857 for_each_oldnew_plane_in_state(state
, plane
, old_plane_state
, new_plane_state
, i
) {
3858 struct drm_crtc
*crtc
= new_plane_state
->crtc
;
3859 struct drm_crtc_state
*new_crtc_state
;
3860 struct drm_framebuffer
*fb
= new_plane_state
->fb
;
3862 struct dm_plane_state
*dm_new_plane_state
= to_dm_plane_state(new_plane_state
);
3864 if (plane
->type
== DRM_PLANE_TYPE_CURSOR
) {
3865 handle_cursor_update(plane
, old_plane_state
);
3869 if (!fb
|| !crtc
|| pcrtc
!= crtc
)
3872 new_crtc_state
= drm_atomic_get_new_crtc_state(state
, crtc
);
3873 if (!new_crtc_state
->active
)
3876 pflip_needed
= !state
->allow_modeset
;
3878 spin_lock_irqsave(&crtc
->dev
->event_lock
, flags
);
3879 if (acrtc_attach
->pflip_status
!= AMDGPU_FLIP_NONE
) {
3880 DRM_ERROR("%s: acrtc %d, already busy\n",
3882 acrtc_attach
->crtc_id
);
3883 spin_unlock_irqrestore(&crtc
->dev
->event_lock
, flags
);
3884 /* In commit tail framework this cannot happen */
3887 spin_unlock_irqrestore(&crtc
->dev
->event_lock
, flags
);
3889 if (!pflip_needed
) {
3890 WARN_ON(!dm_new_plane_state
->dc_state
);
3892 plane_states_constructed
[planes_count
] = dm_new_plane_state
->dc_state
;
3894 dc_stream_attach
= acrtc_state
->stream
;
3897 } else if (new_crtc_state
->planes_changed
) {
3898 /* Assume even ONE crtc with immediate flip means
3899 * entire can't wait for VBLANK
3900 * TODO Check if it's correct
3903 new_pcrtc_state
->pageflip_flags
& DRM_MODE_PAGE_FLIP_ASYNC
?
3906 /* TODO: Needs rework for multiplane flip */
3907 if (plane
->type
== DRM_PLANE_TYPE_PRIMARY
)
3908 drm_crtc_vblank_get(crtc
);
3913 drm_crtc_vblank_count(crtc
) + *wait_for_vblank
,
3920 unsigned long flags
;
3922 if (new_pcrtc_state
->event
) {
3924 drm_crtc_vblank_get(pcrtc
);
3926 spin_lock_irqsave(&pcrtc
->dev
->event_lock
, flags
);
3927 prepare_flip_isr(acrtc_attach
);
3928 spin_unlock_irqrestore(&pcrtc
->dev
->event_lock
, flags
);
3931 if (false == dc_commit_planes_to_stream(dm
->dc
,
3932 plane_states_constructed
,
3936 dm_error("%s: Failed to attach plane!\n", __func__
);
3938 /*TODO BUG Here should go disable planes on CRTC. */
3943 static int amdgpu_dm_atomic_commit(struct drm_device
*dev
,
3944 struct drm_atomic_state
*state
,
3947 struct drm_crtc
*crtc
;
3948 struct drm_crtc_state
*old_crtc_state
, *new_crtc_state
;
3949 struct amdgpu_device
*adev
= dev
->dev_private
;
3953 * We evade vblanks and pflips on crtc that
3954 * should be changed. We do it here to flush & disable
3955 * interrupts before drm_swap_state is called in drm_atomic_helper_commit
3956 * it will update crtc->dm_crtc_state->stream pointer which is used in
3959 for_each_oldnew_crtc_in_state(state
, crtc
, old_crtc_state
, new_crtc_state
, i
) {
3960 struct dm_crtc_state
*dm_old_crtc_state
= to_dm_crtc_state(old_crtc_state
);
3961 struct amdgpu_crtc
*acrtc
= to_amdgpu_crtc(crtc
);
3963 if (drm_atomic_crtc_needs_modeset(new_crtc_state
) && dm_old_crtc_state
->stream
)
3964 manage_dm_interrupts(adev
, acrtc
, false);
3966 /* Add check here for SoC's that support hardware cursor plane, to
3967 * unset legacy_cursor_update */
3969 return drm_atomic_helper_commit(dev
, state
, nonblock
);
3971 /*TODO Handle EINTR, reenable IRQ*/
3974 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state
*state
)
3976 struct drm_device
*dev
= state
->dev
;
3977 struct amdgpu_device
*adev
= dev
->dev_private
;
3978 struct amdgpu_display_manager
*dm
= &adev
->dm
;
3979 struct dm_atomic_state
*dm_state
;
3981 uint32_t new_crtcs_count
= 0;
3982 struct drm_crtc
*crtc
;
3983 struct drm_crtc_state
*old_crtc_state
, *new_crtc_state
;
3984 struct amdgpu_crtc
*new_crtcs
[MAX_STREAMS
];
3985 struct dc_stream_state
*new_stream
= NULL
;
3986 unsigned long flags
;
3987 bool wait_for_vblank
= true;
3988 struct drm_connector
*connector
;
3989 struct drm_connector_state
*old_con_state
, *new_con_state
;
3990 struct dm_crtc_state
*dm_old_crtc_state
, *dm_new_crtc_state
;
3992 drm_atomic_helper_update_legacy_modeset_state(dev
, state
);
3994 dm_state
= to_dm_atomic_state(state
);
3996 /* update changed items */
3997 for_each_oldnew_crtc_in_state(state
, crtc
, old_crtc_state
, new_crtc_state
, i
) {
3998 struct amdgpu_crtc
*acrtc
= to_amdgpu_crtc(crtc
);
4000 dm_new_crtc_state
= to_dm_crtc_state(new_crtc_state
);
4001 dm_old_crtc_state
= to_dm_crtc_state(old_crtc_state
);
4004 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
4005 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
4006 "connectors_changed:%d\n",
4008 new_crtc_state
->enable
,
4009 new_crtc_state
->active
,
4010 new_crtc_state
->planes_changed
,
4011 new_crtc_state
->mode_changed
,
4012 new_crtc_state
->active_changed
,
4013 new_crtc_state
->connectors_changed
);
4015 /* handles headless hotplug case, updating new_state and
4016 * aconnector as needed
4019 if (modeset_required(new_crtc_state
, dm_new_crtc_state
->stream
, dm_old_crtc_state
->stream
)) {
4021 DRM_DEBUG_DRIVER("Atomic commit: SET crtc id %d: [%p]\n", acrtc
->crtc_id
, acrtc
);
4023 if (!dm_new_crtc_state
->stream
) {
4025 * this could happen because of issues with
4026 * userspace notifications delivery.
4027 * In this case userspace tries to set mode on
4028 * display which is disconnect in fact.
4029 * dc_sink in NULL in this case on aconnector.
4030 * We expect reset mode will come soon.
4032 * This can also happen when unplug is done
4033 * during resume sequence ended
4035 * In this case, we want to pretend we still
4036 * have a sink to keep the pipe running so that
4037 * hw state is consistent with the sw state
4039 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
4040 __func__
, acrtc
->base
.base
.id
);
4045 if (dm_old_crtc_state
->stream
)
4046 remove_stream(adev
, acrtc
, dm_old_crtc_state
->stream
);
4050 * this loop saves set mode crtcs
4051 * we needed to enable vblanks once all
4052 * resources acquired in dc after dc_commit_streams
4055 /*TODO move all this into dm_crtc_state, get rid of
4056 * new_crtcs array and use old and new atomic states
4059 new_crtcs
[new_crtcs_count
] = acrtc
;
4062 new_crtc_state
= drm_atomic_get_new_crtc_state(state
, crtc
);
4063 acrtc
->enabled
= true;
4064 acrtc
->hw_mode
= new_crtc_state
->mode
;
4065 crtc
->hwmode
= new_crtc_state
->mode
;
4066 } else if (modereset_required(new_crtc_state
)) {
4067 DRM_DEBUG_DRIVER("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc
->crtc_id
, acrtc
);
4069 /* i.e. reset mode */
4070 if (dm_old_crtc_state
->stream
)
4071 remove_stream(adev
, acrtc
, dm_old_crtc_state
->stream
);
4073 } /* for_each_crtc_in_state() */
4076 * Add streams after required streams from new and replaced streams
4077 * are removed from freesync module
4079 if (adev
->dm
.freesync_module
) {
4080 for (i
= 0; i
< new_crtcs_count
; i
++) {
4081 struct amdgpu_dm_connector
*aconnector
= NULL
;
4083 new_crtc_state
= drm_atomic_get_new_crtc_state(state
,
4084 &new_crtcs
[i
]->base
);
4085 dm_new_crtc_state
= to_dm_crtc_state(new_crtc_state
);
4087 new_stream
= dm_new_crtc_state
->stream
;
4088 aconnector
= amdgpu_dm_find_first_crtc_matching_connector(
4090 &new_crtcs
[i
]->base
);
4092 DRM_DEBUG_DRIVER("Atomic commit: Failed to find connector for acrtc id:%d "
4093 "skipping freesync init\n",
4094 new_crtcs
[i
]->crtc_id
);
4098 mod_freesync_add_stream(adev
->dm
.freesync_module
,
4099 new_stream
, &aconnector
->caps
);
4103 if (dm_state
->context
)
4104 WARN_ON(!dc_commit_state(dm
->dc
, dm_state
->context
));
4106 for_each_new_crtc_in_state(state
, crtc
, new_crtc_state
, i
) {
4107 struct amdgpu_crtc
*acrtc
= to_amdgpu_crtc(crtc
);
4109 dm_new_crtc_state
= to_dm_crtc_state(new_crtc_state
);
4111 if (dm_new_crtc_state
->stream
!= NULL
) {
4112 const struct dc_stream_status
*status
=
4113 dc_stream_get_status(dm_new_crtc_state
->stream
);
4116 DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state
->stream
, acrtc
);
4118 acrtc
->otg_inst
= status
->primary_otg_inst
;
4122 /* Handle scaling and underscan changes*/
4123 for_each_oldnew_connector_in_state(state
, connector
, old_con_state
, new_con_state
, i
) {
4124 struct dm_connector_state
*dm_new_con_state
= to_dm_connector_state(new_con_state
);
4125 struct dm_connector_state
*dm_old_con_state
= to_dm_connector_state(old_con_state
);
4126 struct amdgpu_crtc
*acrtc
= to_amdgpu_crtc(dm_new_con_state
->base
.crtc
);
4127 struct dc_stream_status
*status
= NULL
;
4130 new_crtc_state
= drm_atomic_get_new_crtc_state(state
, &acrtc
->base
);
4132 /* Skip any modesets/resets */
4133 if (!acrtc
|| drm_atomic_crtc_needs_modeset(new_crtc_state
))
4136 /* Skip any thing not scale or underscan changes */
4137 if (!is_scaling_state_different(dm_new_con_state
, dm_old_con_state
))
4140 dm_new_crtc_state
= to_dm_crtc_state(new_crtc_state
);
4142 update_stream_scaling_settings(&dm_new_con_state
->base
.crtc
->mode
,
4143 dm_new_con_state
, (struct dc_stream_state
*)dm_new_crtc_state
->stream
);
4145 status
= dc_stream_get_status(dm_new_crtc_state
->stream
);
4147 WARN_ON(!status
->plane_count
);
4149 if (!dm_new_crtc_state
->stream
)
4152 /*TODO How it works with MPO ?*/
4153 if (!dc_commit_planes_to_stream(
4155 status
->plane_states
,
4156 status
->plane_count
,
4157 dm_new_crtc_state
->stream
,
4159 dm_error("%s: Failed to update stream scaling!\n", __func__
);
4162 for (i
= 0; i
< new_crtcs_count
; i
++) {
4164 * loop to enable interrupts on newly arrived crtc
4166 struct amdgpu_crtc
*acrtc
= new_crtcs
[i
];
4168 new_crtc_state
= drm_atomic_get_new_crtc_state(state
, &acrtc
->base
);
4169 dm_new_crtc_state
= to_dm_crtc_state(new_crtc_state
);
4171 if (adev
->dm
.freesync_module
)
4172 mod_freesync_notify_mode_change(
4173 adev
->dm
.freesync_module
, &dm_new_crtc_state
->stream
, 1);
4175 manage_dm_interrupts(adev
, acrtc
, true);
4178 /* update planes when needed per crtc*/
4179 for_each_new_crtc_in_state(state
, crtc
, new_crtc_state
, j
) {
4180 dm_new_crtc_state
= to_dm_crtc_state(new_crtc_state
);
4182 if (dm_new_crtc_state
->stream
)
4183 amdgpu_dm_commit_planes(state
, dev
, dm
, crtc
, &wait_for_vblank
);
4188 * send vblank event on all events not handled in flip and
4189 * mark consumed event for drm_atomic_helper_commit_hw_done
4191 spin_lock_irqsave(&adev
->ddev
->event_lock
, flags
);
4192 for_each_new_crtc_in_state(state
, crtc
, new_crtc_state
, i
) {
4194 if (new_crtc_state
->event
)
4195 drm_send_event_locked(dev
, &new_crtc_state
->event
->base
);
4197 new_crtc_state
->event
= NULL
;
4199 spin_unlock_irqrestore(&adev
->ddev
->event_lock
, flags
);
4201 /* Signal HW programming completion */
4202 drm_atomic_helper_commit_hw_done(state
);
4204 if (wait_for_vblank
)
4205 drm_atomic_helper_wait_for_vblanks(dev
, state
);
4207 drm_atomic_helper_cleanup_planes(dev
, state
);
4211 static int dm_force_atomic_commit(struct drm_connector
*connector
)
4214 struct drm_device
*ddev
= connector
->dev
;
4215 struct drm_atomic_state
*state
= drm_atomic_state_alloc(ddev
);
4216 struct amdgpu_crtc
*disconnected_acrtc
= to_amdgpu_crtc(connector
->encoder
->crtc
);
4217 struct drm_plane
*plane
= disconnected_acrtc
->base
.primary
;
4218 struct drm_connector_state
*conn_state
;
4219 struct drm_crtc_state
*crtc_state
;
4220 struct drm_plane_state
*plane_state
;
4225 state
->acquire_ctx
= ddev
->mode_config
.acquire_ctx
;
4227 /* Construct an atomic state to restore previous display setting */
4230 * Attach connectors to drm_atomic_state
4232 conn_state
= drm_atomic_get_connector_state(state
, connector
);
4234 ret
= PTR_ERR_OR_ZERO(conn_state
);
4238 /* Attach crtc to drm_atomic_state*/
4239 crtc_state
= drm_atomic_get_crtc_state(state
, &disconnected_acrtc
->base
);
4241 ret
= PTR_ERR_OR_ZERO(crtc_state
);
4245 /* force a restore */
4246 crtc_state
->mode_changed
= true;
4248 /* Attach plane to drm_atomic_state */
4249 plane_state
= drm_atomic_get_plane_state(state
, plane
);
4251 ret
= PTR_ERR_OR_ZERO(plane_state
);
4256 /* Call commit internally with the state we just constructed */
4257 ret
= drm_atomic_commit(state
);
4262 DRM_ERROR("Restoring old state failed with %i\n", ret
);
4263 drm_atomic_state_put(state
);
4269 * This functions handle all cases when set mode does not come upon hotplug.
4270 * This include when the same display is unplugged then plugged back into the
4271 * same port and when we are running without usermode desktop manager supprot
4273 void dm_restore_drm_connector_state(struct drm_device
*dev
,
4274 struct drm_connector
*connector
)
4276 struct amdgpu_dm_connector
*aconnector
= to_amdgpu_dm_connector(connector
);
4277 struct amdgpu_crtc
*disconnected_acrtc
;
4278 struct dm_crtc_state
*acrtc_state
;
4280 if (!aconnector
->dc_sink
|| !connector
->state
|| !connector
->encoder
)
4283 disconnected_acrtc
= to_amdgpu_crtc(connector
->encoder
->crtc
);
4284 acrtc_state
= to_dm_crtc_state(disconnected_acrtc
->base
.state
);
4286 if (!disconnected_acrtc
|| !acrtc_state
->stream
)
4290 * If the previous sink is not released and different from the current,
4291 * we deduce we are in a state where we can not rely on usermode call
4292 * to turn on the display, so we do it here
4294 if (acrtc_state
->stream
->sink
!= aconnector
->dc_sink
)
4295 dm_force_atomic_commit(&aconnector
->base
);
4299 * Grabs all modesetting locks to serialize against any blocking commits,
4300 * Waits for completion of all non blocking commits.
4302 static int do_aquire_global_lock(struct drm_device
*dev
,
4303 struct drm_atomic_state
*state
)
4305 struct drm_crtc
*crtc
;
4306 struct drm_crtc_commit
*commit
;
4309 /* Adding all modeset locks to aquire_ctx will
4310 * ensure that when the framework release it the
4311 * extra locks we are locking here will get released to
4313 ret
= drm_modeset_lock_all_ctx(dev
, state
->acquire_ctx
);
4317 list_for_each_entry(crtc
, &dev
->mode_config
.crtc_list
, head
) {
4318 spin_lock(&crtc
->commit_lock
);
4319 commit
= list_first_entry_or_null(&crtc
->commit_list
,
4320 struct drm_crtc_commit
, commit_entry
);
4322 drm_crtc_commit_get(commit
);
4323 spin_unlock(&crtc
->commit_lock
);
4328 /* Make sure all pending HW programming completed and
4331 ret
= wait_for_completion_interruptible_timeout(&commit
->hw_done
, 10*HZ
);
4334 ret
= wait_for_completion_interruptible_timeout(
4335 &commit
->flip_done
, 10*HZ
);
4338 DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
4339 "timed out\n", crtc
->base
.id
, crtc
->name
);
4341 drm_crtc_commit_put(commit
);
4344 return ret
< 0 ? ret
: 0;
4347 static int dm_update_crtcs_state(struct dc
*dc
,
4348 struct drm_atomic_state
*state
,
4350 bool *lock_and_validation_needed
)
4352 struct drm_crtc
*crtc
;
4353 struct drm_crtc_state
*old_crtc_state
, *new_crtc_state
;
4355 struct dm_crtc_state
*dm_old_crtc_state
, *dm_new_crtc_state
;
4356 struct dm_atomic_state
*dm_state
= to_dm_atomic_state(state
);
4357 struct dc_stream_state
*new_stream
;
4360 /*TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set */
4361 /* update changed items */
4362 for_each_oldnew_crtc_in_state(state
, crtc
, old_crtc_state
, new_crtc_state
, i
) {
4363 struct amdgpu_crtc
*acrtc
= NULL
;
4364 struct amdgpu_dm_connector
*aconnector
= NULL
;
4365 struct drm_connector_state
*new_con_state
= NULL
;
4366 struct dm_connector_state
*dm_conn_state
= NULL
;
4370 dm_old_crtc_state
= to_dm_crtc_state(old_crtc_state
);
4371 dm_new_crtc_state
= to_dm_crtc_state(new_crtc_state
);
4372 acrtc
= to_amdgpu_crtc(crtc
);
4374 aconnector
= amdgpu_dm_find_first_crtc_matching_connector(state
, crtc
);
4376 /* TODO This hack should go away */
4377 if (aconnector
&& enable
) {
4378 // Make sure fake sink is created in plug-in scenario
4379 new_con_state
= drm_atomic_get_connector_state(state
,
4382 if (IS_ERR(new_con_state
)) {
4383 ret
= PTR_ERR_OR_ZERO(new_con_state
);
4387 dm_conn_state
= to_dm_connector_state(new_con_state
);
4389 new_stream
= create_stream_for_sink(aconnector
,
4390 &new_crtc_state
->mode
,
4394 * we can have no stream on ACTION_SET if a display
4395 * was disconnected during S3, in this case it not and
4396 * error, the OS will be updated after detection, and
4397 * do the right thing on next atomic commit
4401 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
4402 __func__
, acrtc
->base
.base
.id
);
4407 if (dc_is_stream_unchanged(new_stream
, dm_old_crtc_state
->stream
) &&
4408 dc_is_stream_scaling_unchanged(new_stream
, dm_old_crtc_state
->stream
)) {
4410 new_crtc_state
->mode_changed
= false;
4412 DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
4413 new_crtc_state
->mode_changed
);
4417 if (!drm_atomic_crtc_needs_modeset(new_crtc_state
))
4421 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
4422 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
4423 "connectors_changed:%d\n",
4425 new_crtc_state
->enable
,
4426 new_crtc_state
->active
,
4427 new_crtc_state
->planes_changed
,
4428 new_crtc_state
->mode_changed
,
4429 new_crtc_state
->active_changed
,
4430 new_crtc_state
->connectors_changed
);
4432 /* Remove stream for any changed/disabled CRTC */
4435 if (!dm_old_crtc_state
->stream
)
4438 DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
4441 /* i.e. reset mode */
4442 if (dc_remove_stream_from_ctx(
4445 dm_old_crtc_state
->stream
) != DC_OK
) {
4450 dc_stream_release(dm_old_crtc_state
->stream
);
4451 dm_new_crtc_state
->stream
= NULL
;
4453 *lock_and_validation_needed
= true;
4455 } else {/* Add stream for any updated/enabled CRTC */
4457 * Quick fix to prevent NULL pointer on new_stream when
4458 * added MST connectors not found in existing crtc_state in the chained mode
4459 * TODO: need to dig out the root cause of that
4461 if (!aconnector
|| (!aconnector
->dc_sink
&& aconnector
->mst_port
))
4464 if (modereset_required(new_crtc_state
))
4467 if (modeset_required(new_crtc_state
, new_stream
,
4468 dm_old_crtc_state
->stream
)) {
4470 WARN_ON(dm_new_crtc_state
->stream
);
4472 dm_new_crtc_state
->stream
= new_stream
;
4473 dc_stream_retain(new_stream
);
4475 DRM_DEBUG_DRIVER("Enabling DRM crtc: %d\n",
4478 if (dc_add_stream_to_ctx(
4481 dm_new_crtc_state
->stream
) != DC_OK
) {
4486 *lock_and_validation_needed
= true;
4491 /* Release extra reference */
4493 dc_stream_release(new_stream
);
4500 dc_stream_release(new_stream
);
4504 static int dm_update_planes_state(struct dc
*dc
,
4505 struct drm_atomic_state
*state
,
4507 bool *lock_and_validation_needed
)
4509 struct drm_crtc
*new_plane_crtc
, *old_plane_crtc
;
4510 struct drm_crtc_state
*old_crtc_state
, *new_crtc_state
;
4511 struct drm_plane
*plane
;
4512 struct drm_plane_state
*old_plane_state
, *new_plane_state
;
4513 struct dm_crtc_state
*dm_new_crtc_state
, *dm_old_crtc_state
;
4514 struct dm_atomic_state
*dm_state
= to_dm_atomic_state(state
);
4515 struct dm_plane_state
*dm_new_plane_state
, *dm_old_plane_state
;
4517 /* TODO return page_flip_needed() function */
4518 bool pflip_needed
= !state
->allow_modeset
;
4524 /* Add new planes */
4525 for_each_oldnew_plane_in_state(state
, plane
, old_plane_state
, new_plane_state
, i
) {
4526 new_plane_crtc
= new_plane_state
->crtc
;
4527 old_plane_crtc
= old_plane_state
->crtc
;
4528 dm_new_plane_state
= to_dm_plane_state(new_plane_state
);
4529 dm_old_plane_state
= to_dm_plane_state(old_plane_state
);
4531 /*TODO Implement atomic check for cursor plane */
4532 if (plane
->type
== DRM_PLANE_TYPE_CURSOR
)
4535 /* Remove any changed/removed planes */
4538 if (!old_plane_crtc
)
4541 old_crtc_state
= drm_atomic_get_old_crtc_state(
4542 state
, old_plane_crtc
);
4543 dm_old_crtc_state
= to_dm_crtc_state(old_crtc_state
);
4545 if (!dm_old_crtc_state
->stream
)
4548 DRM_DEBUG_DRIVER("Disabling DRM plane: %d on DRM crtc %d\n",
4549 plane
->base
.id
, old_plane_crtc
->base
.id
);
4551 if (!dc_remove_plane_from_context(
4553 dm_old_crtc_state
->stream
,
4554 dm_old_plane_state
->dc_state
,
4555 dm_state
->context
)) {
4562 dc_plane_state_release(dm_old_plane_state
->dc_state
);
4563 dm_new_plane_state
->dc_state
= NULL
;
4565 *lock_and_validation_needed
= true;
4567 } else { /* Add new planes */
4569 if (drm_atomic_plane_disabling(plane
->state
, new_plane_state
))
4572 if (!new_plane_crtc
)
4575 new_crtc_state
= drm_atomic_get_new_crtc_state(state
, new_plane_crtc
);
4576 dm_new_crtc_state
= to_dm_crtc_state(new_crtc_state
);
4578 if (!dm_new_crtc_state
->stream
)
4582 WARN_ON(dm_new_plane_state
->dc_state
);
4584 dm_new_plane_state
->dc_state
= dc_create_plane_state(dc
);
4586 DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n",
4587 plane
->base
.id
, new_plane_crtc
->base
.id
);
4589 if (!dm_new_plane_state
->dc_state
) {
4594 ret
= fill_plane_attributes(
4595 new_plane_crtc
->dev
->dev_private
,
4596 dm_new_plane_state
->dc_state
,
4604 if (!dc_add_plane_to_context(
4606 dm_new_crtc_state
->stream
,
4607 dm_new_plane_state
->dc_state
,
4608 dm_state
->context
)) {
4614 *lock_and_validation_needed
= true;
4622 static int amdgpu_dm_atomic_check(struct drm_device
*dev
,
4623 struct drm_atomic_state
*state
)
4627 struct amdgpu_device
*adev
= dev
->dev_private
;
4628 struct dc
*dc
= adev
->dm
.dc
;
4629 struct dm_atomic_state
*dm_state
= to_dm_atomic_state(state
);
4630 struct drm_connector
*connector
;
4631 struct drm_connector_state
*old_con_state
, *new_con_state
;
4632 struct drm_crtc
*crtc
;
4633 struct drm_crtc_state
*old_crtc_state
, *new_crtc_state
;
4636 * This bool will be set for true for any modeset/reset
4637 * or plane update which implies non fast surface update.
4639 bool lock_and_validation_needed
= false;
4641 ret
= drm_atomic_helper_check_modeset(dev
, state
);
4643 DRM_ERROR("Atomic state validation failed with error :%d !\n", ret
);
4648 * legacy_cursor_update should be made false for SoC's having
4649 * a dedicated hardware plane for cursor in amdgpu_dm_atomic_commit(),
4650 * otherwise for software cursor plane,
4651 * we should not add it to list of affected planes.
4653 if (state
->legacy_cursor_update
) {
4654 for_each_new_crtc_in_state(state
, crtc
, new_crtc_state
, i
) {
4655 if (new_crtc_state
->color_mgmt_changed
) {
4656 ret
= drm_atomic_add_affected_planes(state
, crtc
);
4662 for_each_oldnew_crtc_in_state(state
, crtc
, old_crtc_state
, new_crtc_state
, i
) {
4663 if (!drm_atomic_crtc_needs_modeset(new_crtc_state
))
4666 if (!new_crtc_state
->enable
)
4669 ret
= drm_atomic_add_affected_connectors(state
, crtc
);
4673 ret
= drm_atomic_add_affected_planes(state
, crtc
);
4679 dm_state
->context
= dc_create_state();
4680 ASSERT(dm_state
->context
);
4681 dc_resource_state_copy_construct_current(dc
, dm_state
->context
);
4683 /* Remove exiting planes if they are modified */
4684 ret
= dm_update_planes_state(dc
, state
, false, &lock_and_validation_needed
);
4689 /* Disable all crtcs which require disable */
4690 ret
= dm_update_crtcs_state(dc
, state
, false, &lock_and_validation_needed
);
4695 /* Enable all crtcs which require enable */
4696 ret
= dm_update_crtcs_state(dc
, state
, true, &lock_and_validation_needed
);
4701 /* Add new/modified planes */
4702 ret
= dm_update_planes_state(dc
, state
, true, &lock_and_validation_needed
);
4707 /* Run this here since we want to validate the streams we created */
4708 ret
= drm_atomic_helper_check_planes(dev
, state
);
4712 /* Check scaling and underscan changes*/
4713 /*TODO Removed scaling changes validation due to inability to commit
4714 * new stream into context w\o causing full reset. Need to
4715 * decide how to handle.
4717 for_each_oldnew_connector_in_state(state
, connector
, old_con_state
, new_con_state
, i
) {
4718 struct dm_connector_state
*dm_old_con_state
= to_dm_connector_state(old_con_state
);
4719 struct dm_connector_state
*dm_new_con_state
= to_dm_connector_state(new_con_state
);
4720 struct amdgpu_crtc
*acrtc
= to_amdgpu_crtc(dm_new_con_state
->base
.crtc
);
4722 /* Skip any modesets/resets */
4723 if (!acrtc
|| drm_atomic_crtc_needs_modeset(
4724 drm_atomic_get_new_crtc_state(state
, &acrtc
->base
)))
4727 /* Skip any thing not scale or underscan changes */
4728 if (!is_scaling_state_different(dm_new_con_state
, dm_old_con_state
))
4731 lock_and_validation_needed
= true;
4735 * For full updates case when
4736 * removing/adding/updating streams on once CRTC while flipping
4738 * acquiring global lock will guarantee that any such full
4740 * will wait for completion of any outstanding flip using DRMs
4741 * synchronization events.
4744 if (lock_and_validation_needed
) {
4746 ret
= do_aquire_global_lock(dev
, state
);
4750 if (dc_validate_global_state(dc
, dm_state
->context
) != DC_OK
) {
4756 /* Must be success */
4761 if (ret
== -EDEADLK
)
4762 DRM_DEBUG_DRIVER("Atomic check stopped due to to deadlock.\n");
4763 else if (ret
== -EINTR
|| ret
== -EAGAIN
|| ret
== -ERESTARTSYS
)
4764 DRM_DEBUG_DRIVER("Atomic check stopped due to to signal.\n");
4766 DRM_ERROR("Atomic check failed with err: %d \n", ret
);
4771 static bool is_dp_capable_without_timing_msa(struct dc
*dc
,
4772 struct amdgpu_dm_connector
*amdgpu_dm_connector
)
4775 bool capable
= false;
4777 if (amdgpu_dm_connector
->dc_link
&&
4778 dm_helpers_dp_read_dpcd(
4780 amdgpu_dm_connector
->dc_link
,
4781 DP_DOWN_STREAM_PORT_COUNT
,
4783 sizeof(dpcd_data
))) {
4784 capable
= (dpcd_data
& DP_MSA_TIMING_PAR_IGNORED
) ? true:false;
4789 void amdgpu_dm_add_sink_to_freesync_module(struct drm_connector
*connector
,
4793 uint64_t val_capable
;
4794 bool edid_check_required
;
4795 struct detailed_timing
*timing
;
4796 struct detailed_non_pixel
*data
;
4797 struct detailed_data_monitor_range
*range
;
4798 struct amdgpu_dm_connector
*amdgpu_dm_connector
=
4799 to_amdgpu_dm_connector(connector
);
4801 struct drm_device
*dev
= connector
->dev
;
4802 struct amdgpu_device
*adev
= dev
->dev_private
;
4804 edid_check_required
= false;
4805 if (!amdgpu_dm_connector
->dc_sink
) {
4806 DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
4809 if (!adev
->dm
.freesync_module
)
4812 * if edid non zero restrict freesync only for dp and edp
4815 if (amdgpu_dm_connector
->dc_sink
->sink_signal
== SIGNAL_TYPE_DISPLAY_PORT
4816 || amdgpu_dm_connector
->dc_sink
->sink_signal
== SIGNAL_TYPE_EDP
) {
4817 edid_check_required
= is_dp_capable_without_timing_msa(
4819 amdgpu_dm_connector
);
4823 if (edid_check_required
== true && (edid
->version
> 1 ||
4824 (edid
->version
== 1 && edid
->revision
> 1))) {
4825 for (i
= 0; i
< 4; i
++) {
4827 timing
= &edid
->detailed_timings
[i
];
4828 data
= &timing
->data
.other_data
;
4829 range
= &data
->data
.range
;
4831 * Check if monitor has continuous frequency mode
4833 if (data
->type
!= EDID_DETAIL_MONITOR_RANGE
)
4836 * Check for flag range limits only. If flag == 1 then
4837 * no additional timing information provided.
4838 * Default GTF, GTF Secondary curve and CVT are not
4841 if (range
->flags
!= 1)
4844 amdgpu_dm_connector
->min_vfreq
= range
->min_vfreq
;
4845 amdgpu_dm_connector
->max_vfreq
= range
->max_vfreq
;
4846 amdgpu_dm_connector
->pixel_clock_mhz
=
4847 range
->pixel_clock_mhz
* 10;
4851 if (amdgpu_dm_connector
->max_vfreq
-
4852 amdgpu_dm_connector
->min_vfreq
> 10) {
4853 amdgpu_dm_connector
->caps
.supported
= true;
4854 amdgpu_dm_connector
->caps
.min_refresh_in_micro_hz
=
4855 amdgpu_dm_connector
->min_vfreq
* 1000000;
4856 amdgpu_dm_connector
->caps
.max_refresh_in_micro_hz
=
4857 amdgpu_dm_connector
->max_vfreq
* 1000000;
4863 * TODO figure out how to notify user-mode or DRM of freesync caps
4864 * once we figure out how to deal with freesync in an upstreamable
4870 void amdgpu_dm_remove_sink_from_freesync_module(struct drm_connector
*connector
)
4873 * TODO fill in once we figure out how to deal with freesync in
4874 * an upstreamable fashion