2 * Copyright 2015 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
26 #include "dm_services_types.h"
31 #include "amdgpu_display.h"
33 #include "amdgpu_dm.h"
34 #include "amdgpu_pm.h"
36 #include "amd_shared.h"
37 #include "amdgpu_dm_irq.h"
38 #include "dm_helpers.h"
39 #include "dm_services_types.h"
40 #include "amdgpu_dm_mst_types.h"
42 #include "ivsrcid/ivsrcid_vislands30.h"
44 #include <linux/module.h>
45 #include <linux/moduleparam.h>
46 #include <linux/version.h>
47 #include <linux/types.h>
50 #include <drm/drm_atomic.h>
51 #include <drm/drm_atomic_helper.h>
52 #include <drm/drm_dp_mst_helper.h>
53 #include <drm/drm_fb_helper.h>
54 #include <drm/drm_edid.h>
56 #include "modules/inc/mod_freesync.h"
58 #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
59 #include "ivsrcid/irqsrcs_dcn_1_0.h"
61 #include "raven1/DCN/dcn_1_0_offset.h"
62 #include "raven1/DCN/dcn_1_0_sh_mask.h"
63 #include "vega10/soc15ip.h"
65 #include "soc15_common.h"
68 #include "modules/inc/mod_freesync.h"
70 #include "i2caux_interface.h"
73 static enum drm_plane_type dm_plane_type_default
[AMDGPU_MAX_PLANES
] = {
74 DRM_PLANE_TYPE_PRIMARY
,
75 DRM_PLANE_TYPE_PRIMARY
,
76 DRM_PLANE_TYPE_PRIMARY
,
77 DRM_PLANE_TYPE_PRIMARY
,
78 DRM_PLANE_TYPE_PRIMARY
,
79 DRM_PLANE_TYPE_PRIMARY
,
82 static enum drm_plane_type dm_plane_type_carizzo
[AMDGPU_MAX_PLANES
] = {
83 DRM_PLANE_TYPE_PRIMARY
,
84 DRM_PLANE_TYPE_PRIMARY
,
85 DRM_PLANE_TYPE_PRIMARY
,
86 DRM_PLANE_TYPE_OVERLAY
,/* YUV Capable Underlay */
89 static enum drm_plane_type dm_plane_type_stoney
[AMDGPU_MAX_PLANES
] = {
90 DRM_PLANE_TYPE_PRIMARY
,
91 DRM_PLANE_TYPE_PRIMARY
,
92 DRM_PLANE_TYPE_OVERLAY
, /* YUV Capable Underlay */
96 * dm_vblank_get_counter
99 * Get counter for number of vertical blanks
102 * struct amdgpu_device *adev - [in] desired amdgpu device
103 * int disp_idx - [in] which CRTC to get the counter from
106 * Counter for vertical blanks
108 static u32
dm_vblank_get_counter(struct amdgpu_device
*adev
, int crtc
)
110 if (crtc
>= adev
->mode_info
.num_crtc
)
113 struct amdgpu_crtc
*acrtc
= adev
->mode_info
.crtcs
[crtc
];
114 struct dm_crtc_state
*acrtc_state
= to_dm_crtc_state(
118 if (acrtc_state
->stream
== NULL
) {
119 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
124 return dc_stream_get_vblank_counter(acrtc_state
->stream
);
128 static int dm_crtc_get_scanoutpos(struct amdgpu_device
*adev
, int crtc
,
129 u32
*vbl
, u32
*position
)
131 uint32_t v_blank_start
, v_blank_end
, h_position
, v_position
;
133 if ((crtc
< 0) || (crtc
>= adev
->mode_info
.num_crtc
))
136 struct amdgpu_crtc
*acrtc
= adev
->mode_info
.crtcs
[crtc
];
137 struct dm_crtc_state
*acrtc_state
= to_dm_crtc_state(
140 if (acrtc_state
->stream
== NULL
) {
141 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
147 * TODO rework base driver to use values directly.
148 * for now parse it back into reg-format
150 dc_stream_get_scanoutpos(acrtc_state
->stream
,
156 *position
= v_position
| (h_position
<< 16);
157 *vbl
= v_blank_start
| (v_blank_end
<< 16);
163 static bool dm_is_idle(void *handle
)
169 static int dm_wait_for_idle(void *handle
)
175 static bool dm_check_soft_reset(void *handle
)
180 static int dm_soft_reset(void *handle
)
186 static struct amdgpu_crtc
*get_crtc_by_otg_inst(
187 struct amdgpu_device
*adev
,
190 struct drm_device
*dev
= adev
->ddev
;
191 struct drm_crtc
*crtc
;
192 struct amdgpu_crtc
*amdgpu_crtc
;
195 * following if is check inherited from both functions where this one is
196 * used now. Need to be checked why it could happen.
198 if (otg_inst
== -1) {
200 return adev
->mode_info
.crtcs
[0];
203 list_for_each_entry(crtc
, &dev
->mode_config
.crtc_list
, head
) {
204 amdgpu_crtc
= to_amdgpu_crtc(crtc
);
206 if (amdgpu_crtc
->otg_inst
== otg_inst
)
213 static void dm_pflip_high_irq(void *interrupt_params
)
215 struct amdgpu_crtc
*amdgpu_crtc
;
216 struct common_irq_params
*irq_params
= interrupt_params
;
217 struct amdgpu_device
*adev
= irq_params
->adev
;
220 amdgpu_crtc
= get_crtc_by_otg_inst(adev
, irq_params
->irq_src
- IRQ_TYPE_PFLIP
);
222 /* IRQ could occur when in initial stage */
223 /*TODO work and BO cleanup */
224 if (amdgpu_crtc
== NULL
) {
225 DRM_DEBUG_DRIVER("CRTC is null, returning.\n");
229 spin_lock_irqsave(&adev
->ddev
->event_lock
, flags
);
231 if (amdgpu_crtc
->pflip_status
!= AMDGPU_FLIP_SUBMITTED
){
232 DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
233 amdgpu_crtc
->pflip_status
,
234 AMDGPU_FLIP_SUBMITTED
,
235 amdgpu_crtc
->crtc_id
,
237 spin_unlock_irqrestore(&adev
->ddev
->event_lock
, flags
);
242 /* wakeup usersapce */
243 if (amdgpu_crtc
->event
) {
244 /* Update to correct count/ts if racing with vblank irq */
245 drm_crtc_accurate_vblank_count(&amdgpu_crtc
->base
);
247 drm_crtc_send_vblank_event(&amdgpu_crtc
->base
, amdgpu_crtc
->event
);
249 /* page flip completed. clean up */
250 amdgpu_crtc
->event
= NULL
;
255 amdgpu_crtc
->pflip_status
= AMDGPU_FLIP_NONE
;
256 spin_unlock_irqrestore(&adev
->ddev
->event_lock
, flags
);
258 DRM_DEBUG_DRIVER("%s - crtc :%d[%p], pflip_stat:AMDGPU_FLIP_NONE\n",
259 __func__
, amdgpu_crtc
->crtc_id
, amdgpu_crtc
);
261 drm_crtc_vblank_put(&amdgpu_crtc
->base
);
264 static void dm_crtc_high_irq(void *interrupt_params
)
266 struct common_irq_params
*irq_params
= interrupt_params
;
267 struct amdgpu_device
*adev
= irq_params
->adev
;
268 uint8_t crtc_index
= 0;
269 struct amdgpu_crtc
*acrtc
;
271 acrtc
= get_crtc_by_otg_inst(adev
, irq_params
->irq_src
- IRQ_TYPE_VBLANK
);
274 crtc_index
= acrtc
->crtc_id
;
276 drm_handle_vblank(adev
->ddev
, crtc_index
);
279 static int dm_set_clockgating_state(void *handle
,
280 enum amd_clockgating_state state
)
285 static int dm_set_powergating_state(void *handle
,
286 enum amd_powergating_state state
)
291 /* Prototypes of private functions */
292 static int dm_early_init(void* handle
);
294 static void hotplug_notify_work_func(struct work_struct
*work
)
296 struct amdgpu_display_manager
*dm
= container_of(work
, struct amdgpu_display_manager
, mst_hotplug_work
);
297 struct drm_device
*dev
= dm
->ddev
;
299 drm_kms_helper_hotplug_event(dev
);
303 #include "dal_asic_id.h"
304 /* Allocate memory for FBC compressed data */
305 /* TODO: Dynamic allocation */
306 #define AMDGPU_FBC_SIZE (3840 * 2160 * 4)
308 void amdgpu_dm_initialize_fbc(struct amdgpu_device
*adev
)
311 struct dm_comressor_info
*compressor
= &adev
->dm
.compressor
;
313 if (!compressor
->bo_ptr
) {
314 r
= amdgpu_bo_create_kernel(adev
, AMDGPU_FBC_SIZE
, PAGE_SIZE
,
315 AMDGPU_GEM_DOMAIN_VRAM
, &compressor
->bo_ptr
,
316 &compressor
->gpu_addr
, &compressor
->cpu_addr
);
319 DRM_ERROR("DM: Failed to initialize fbc\n");
328 * Returns 0 on success
330 int amdgpu_dm_init(struct amdgpu_device
*adev
)
332 struct dc_init_data init_data
;
333 adev
->dm
.ddev
= adev
->ddev
;
334 adev
->dm
.adev
= adev
;
336 DRM_INFO("DAL is enabled\n");
337 /* Zero all the fields */
338 memset(&init_data
, 0, sizeof(init_data
));
340 /* initialize DAL's lock (for SYNC context use) */
341 spin_lock_init(&adev
->dm
.dal_lock
);
343 /* initialize DAL's mutex */
344 mutex_init(&adev
->dm
.dal_mutex
);
346 if(amdgpu_dm_irq_init(adev
)) {
347 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
351 init_data
.asic_id
.chip_family
= adev
->family
;
353 init_data
.asic_id
.pci_revision_id
= adev
->rev_id
;
354 init_data
.asic_id
.hw_internal_rev
= adev
->external_rev_id
;
356 init_data
.asic_id
.vram_width
= adev
->mc
.vram_width
;
357 /* TODO: initialize init_data.asic_id.vram_type here!!!! */
358 init_data
.asic_id
.atombios_base_address
=
359 adev
->mode_info
.atom_context
->bios
;
361 init_data
.driver
= adev
;
363 adev
->dm
.cgs_device
= amdgpu_cgs_create_device(adev
);
365 if (!adev
->dm
.cgs_device
) {
366 DRM_ERROR("amdgpu: failed to create cgs device.\n");
370 init_data
.cgs_device
= adev
->dm
.cgs_device
;
374 init_data
.dce_environment
= DCE_ENV_PRODUCTION_DRV
;
377 if (adev
->family
== FAMILY_CZ
)
378 amdgpu_dm_initialize_fbc(adev
);
379 init_data
.fbc_gpu_addr
= adev
->dm
.compressor
.gpu_addr
;
381 /* Display Core create. */
382 adev
->dm
.dc
= dc_create(&init_data
);
385 DRM_INFO("Display Core failed to initialize!\n");
387 INIT_WORK(&adev
->dm
.mst_hotplug_work
, hotplug_notify_work_func
);
389 adev
->dm
.freesync_module
= mod_freesync_create(adev
->dm
.dc
);
390 if (!adev
->dm
.freesync_module
) {
392 "amdgpu: failed to initialize freesync_module.\n");
394 DRM_INFO("amdgpu: freesync_module init done %p.\n",
395 adev
->dm
.freesync_module
);
397 if (amdgpu_dm_initialize_drm_device(adev
)) {
399 "amdgpu: failed to initialize sw for display support.\n");
403 /* Update the actual used number of crtc */
404 adev
->mode_info
.num_crtc
= adev
->dm
.display_indexes_num
;
406 /* TODO: Add_display_info? */
408 /* TODO use dynamic cursor width */
409 adev
->ddev
->mode_config
.cursor_width
= adev
->dm
.dc
->caps
.max_cursor_size
;
410 adev
->ddev
->mode_config
.cursor_height
= adev
->dm
.dc
->caps
.max_cursor_size
;
412 if (drm_vblank_init(adev
->ddev
, adev
->dm
.display_indexes_num
)) {
414 "amdgpu: failed to initialize sw for display support.\n");
418 DRM_INFO("KMS initialized.\n");
422 amdgpu_dm_fini(adev
);
427 void amdgpu_dm_fini(struct amdgpu_device
*adev
)
429 amdgpu_dm_destroy_drm_device(&adev
->dm
);
431 * TODO: pageflip, vlank interrupt
433 * amdgpu_dm_irq_fini(adev);
436 if (adev
->dm
.cgs_device
) {
437 amdgpu_cgs_destroy_device(adev
->dm
.cgs_device
);
438 adev
->dm
.cgs_device
= NULL
;
440 if (adev
->dm
.freesync_module
) {
441 mod_freesync_destroy(adev
->dm
.freesync_module
);
442 adev
->dm
.freesync_module
= NULL
;
444 /* DC Destroy TODO: Replace destroy DAL */
446 dc_destroy(&adev
->dm
.dc
);
450 /* moved from amdgpu_dm_kms.c */
451 void amdgpu_dm_destroy()
455 static int dm_sw_init(void *handle
)
460 static int dm_sw_fini(void *handle
)
465 static int detect_mst_link_for_all_connectors(struct drm_device
*dev
)
467 struct amdgpu_connector
*aconnector
;
468 struct drm_connector
*connector
;
471 drm_modeset_lock(&dev
->mode_config
.connection_mutex
, NULL
);
473 list_for_each_entry(connector
, &dev
->mode_config
.connector_list
, head
) {
474 aconnector
= to_amdgpu_connector(connector
);
475 if (aconnector
->dc_link
->type
== dc_connection_mst_branch
) {
476 DRM_INFO("DM_MST: starting TM on aconnector: %p [id: %d]\n",
477 aconnector
, aconnector
->base
.base
.id
);
479 ret
= drm_dp_mst_topology_mgr_set_mst(&aconnector
->mst_mgr
, true);
481 DRM_ERROR("DM_MST: Failed to start MST\n");
482 ((struct dc_link
*)aconnector
->dc_link
)->type
= dc_connection_single
;
488 drm_modeset_unlock(&dev
->mode_config
.connection_mutex
);
492 static int dm_late_init(void *handle
)
494 struct drm_device
*dev
= ((struct amdgpu_device
*)handle
)->ddev
;
495 int r
= detect_mst_link_for_all_connectors(dev
);
500 static void s3_handle_mst(struct drm_device
*dev
, bool suspend
)
502 struct amdgpu_connector
*aconnector
;
503 struct drm_connector
*connector
;
505 drm_modeset_lock(&dev
->mode_config
.connection_mutex
, NULL
);
507 list_for_each_entry(connector
, &dev
->mode_config
.connector_list
, head
) {
508 aconnector
= to_amdgpu_connector(connector
);
509 if (aconnector
->dc_link
->type
== dc_connection_mst_branch
&&
510 !aconnector
->mst_port
) {
513 drm_dp_mst_topology_mgr_suspend(&aconnector
->mst_mgr
);
515 drm_dp_mst_topology_mgr_resume(&aconnector
->mst_mgr
);
519 drm_modeset_unlock(&dev
->mode_config
.connection_mutex
);
522 static int dm_hw_init(void *handle
)
524 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
525 /* Create DAL display manager */
526 amdgpu_dm_init(adev
);
527 amdgpu_dm_hpd_init(adev
);
532 static int dm_hw_fini(void *handle
)
534 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
536 amdgpu_dm_hpd_fini(adev
);
538 amdgpu_dm_irq_fini(adev
);
539 amdgpu_dm_fini(adev
);
543 static int dm_suspend(void *handle
)
545 struct amdgpu_device
*adev
= handle
;
546 struct amdgpu_display_manager
*dm
= &adev
->dm
;
549 s3_handle_mst(adev
->ddev
, true);
551 amdgpu_dm_irq_suspend(adev
);
553 WARN_ON(adev
->dm
.cached_state
);
554 adev
->dm
.cached_state
= drm_atomic_helper_suspend(adev
->ddev
);
558 DC_ACPI_CM_POWER_STATE_D3
564 struct amdgpu_connector
*amdgpu_dm_find_first_crct_matching_connector(
565 struct drm_atomic_state
*state
,
566 struct drm_crtc
*crtc
,
570 struct drm_connector_state
*conn_state
;
571 struct drm_connector
*connector
;
572 struct drm_crtc
*crtc_from_state
;
574 for_each_connector_in_state(
582 connector
->state
->crtc
;
584 if (crtc_from_state
== crtc
)
585 return to_amdgpu_connector(connector
);
591 static int dm_resume(void *handle
)
593 struct amdgpu_device
*adev
= handle
;
594 struct amdgpu_display_manager
*dm
= &adev
->dm
;
596 /* power on hardware */
599 DC_ACPI_CM_POWER_STATE_D0
605 int amdgpu_dm_display_resume(struct amdgpu_device
*adev
)
607 struct drm_device
*ddev
= adev
->ddev
;
608 struct amdgpu_display_manager
*dm
= &adev
->dm
;
609 struct amdgpu_connector
*aconnector
;
610 struct drm_connector
*connector
;
611 struct drm_crtc
*crtc
;
612 struct drm_crtc_state
*crtc_state
;
616 /* program HPD filter */
619 /* On resume we need to rewrite the MSTM control bits to enamble MST*/
620 s3_handle_mst(ddev
, false);
623 * early enable HPD Rx IRQ, should be done before set mode as short
624 * pulse interrupts are used for MST
626 amdgpu_dm_irq_resume_early(adev
);
629 list_for_each_entry(connector
,
630 &ddev
->mode_config
.connector_list
, head
) {
631 aconnector
= to_amdgpu_connector(connector
);
634 * this is the case when traversing through already created
635 * MST connectors, should be skipped
637 if (aconnector
->mst_port
)
640 mutex_lock(&aconnector
->hpd_lock
);
641 dc_link_detect(aconnector
->dc_link
, false);
642 aconnector
->dc_sink
= NULL
;
643 amdgpu_dm_update_connector_after_detect(aconnector
);
644 mutex_unlock(&aconnector
->hpd_lock
);
647 /* Force mode set in atomic comit */
648 for_each_crtc_in_state(adev
->dm
.cached_state
, crtc
, crtc_state
, i
)
649 crtc_state
->active_changed
= true;
651 ret
= drm_atomic_helper_resume(ddev
, adev
->dm
.cached_state
);
653 drm_atomic_state_put(adev
->dm
.cached_state
);
654 adev
->dm
.cached_state
= NULL
;
656 amdgpu_dm_irq_resume_late(adev
);
661 static const struct amd_ip_funcs amdgpu_dm_funcs
= {
663 .early_init
= dm_early_init
,
664 .late_init
= dm_late_init
,
665 .sw_init
= dm_sw_init
,
666 .sw_fini
= dm_sw_fini
,
667 .hw_init
= dm_hw_init
,
668 .hw_fini
= dm_hw_fini
,
669 .suspend
= dm_suspend
,
671 .is_idle
= dm_is_idle
,
672 .wait_for_idle
= dm_wait_for_idle
,
673 .check_soft_reset
= dm_check_soft_reset
,
674 .soft_reset
= dm_soft_reset
,
675 .set_clockgating_state
= dm_set_clockgating_state
,
676 .set_powergating_state
= dm_set_powergating_state
,
679 const struct amdgpu_ip_block_version dm_ip_block
=
681 .type
= AMD_IP_BLOCK_TYPE_DCE
,
685 .funcs
= &amdgpu_dm_funcs
,
689 struct drm_atomic_state
*
690 dm_atomic_state_alloc(struct drm_device
*dev
)
692 struct dm_atomic_state
*state
= kzalloc(sizeof(*state
), GFP_KERNEL
);
694 if (!state
|| drm_atomic_state_init(dev
, &state
->base
) < 0) {
703 dm_atomic_state_clear(struct drm_atomic_state
*state
)
705 struct dm_atomic_state
*dm_state
= to_dm_atomic_state(state
);
707 if (dm_state
->context
) {
708 dc_release_validate_context(dm_state
->context
);
709 dm_state
->context
= NULL
;
712 drm_atomic_state_default_clear(state
);
716 dm_atomic_state_alloc_free(struct drm_atomic_state
*state
)
718 struct dm_atomic_state
*dm_state
= to_dm_atomic_state(state
);
719 drm_atomic_state_default_release(state
);
723 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs
= {
724 .fb_create
= amdgpu_user_framebuffer_create
,
725 .output_poll_changed
= amdgpu_output_poll_changed
,
726 .atomic_check
= amdgpu_dm_atomic_check
,
727 .atomic_commit
= amdgpu_dm_atomic_commit
,
728 .atomic_state_alloc
= dm_atomic_state_alloc
,
729 .atomic_state_clear
= dm_atomic_state_clear
,
730 .atomic_state_free
= dm_atomic_state_alloc_free
733 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs
= {
734 .atomic_commit_tail
= amdgpu_dm_atomic_commit_tail
737 void amdgpu_dm_update_connector_after_detect(
738 struct amdgpu_connector
*aconnector
)
740 struct drm_connector
*connector
= &aconnector
->base
;
741 struct drm_device
*dev
= connector
->dev
;
742 struct dc_sink
*sink
;
744 /* MST handled by drm_mst framework */
745 if (aconnector
->mst_mgr
.mst_state
== true)
749 sink
= aconnector
->dc_link
->local_sink
;
751 /* Edid mgmt connector gets first update only in mode_valid hook and then
752 * the connector sink is set to either fake or physical sink depends on link status.
753 * don't do it here if u are during boot
755 if (aconnector
->base
.force
!= DRM_FORCE_UNSPECIFIED
756 && aconnector
->dc_em_sink
) {
758 /* For S3 resume with headless use eml_sink to fake stream
759 * because on resume connecotr->sink is set ti NULL
761 mutex_lock(&dev
->mode_config
.mutex
);
764 if (aconnector
->dc_sink
) {
765 amdgpu_dm_remove_sink_from_freesync_module(
767 /* retain and release bellow are used for
768 * bump up refcount for sink because the link don't point
769 * to it anymore after disconnect so on next crtc to connector
770 * reshuffle by UMD we will get into unwanted dc_sink release
772 if (aconnector
->dc_sink
!= aconnector
->dc_em_sink
)
773 dc_sink_release(aconnector
->dc_sink
);
775 aconnector
->dc_sink
= sink
;
776 amdgpu_dm_add_sink_to_freesync_module(
777 connector
, aconnector
->edid
);
779 amdgpu_dm_remove_sink_from_freesync_module(connector
);
780 if (!aconnector
->dc_sink
)
781 aconnector
->dc_sink
= aconnector
->dc_em_sink
;
782 else if (aconnector
->dc_sink
!= aconnector
->dc_em_sink
)
783 dc_sink_retain(aconnector
->dc_sink
);
786 mutex_unlock(&dev
->mode_config
.mutex
);
791 * TODO: temporary guard to look for proper fix
792 * if this sink is MST sink, we should not do anything
794 if (sink
&& sink
->sink_signal
== SIGNAL_TYPE_DISPLAY_PORT_MST
)
797 if (aconnector
->dc_sink
== sink
) {
798 /* We got a DP short pulse (Link Loss, DP CTS, etc...).
800 DRM_INFO("DCHPD: connector_id=%d: dc_sink didn't change.\n",
801 aconnector
->connector_id
);
805 DRM_INFO("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
806 aconnector
->connector_id
, aconnector
->dc_sink
, sink
);
808 mutex_lock(&dev
->mode_config
.mutex
);
810 /* 1. Update status of the drm connector
811 * 2. Send an event and let userspace tell us what to do */
813 /* TODO: check if we still need the S3 mode update workaround.
814 * If yes, put it here. */
815 if (aconnector
->dc_sink
)
816 amdgpu_dm_remove_sink_from_freesync_module(
819 aconnector
->dc_sink
= sink
;
820 if (sink
->dc_edid
.length
== 0)
821 aconnector
->edid
= NULL
;
824 (struct edid
*) sink
->dc_edid
.raw_edid
;
827 drm_mode_connector_update_edid_property(connector
,
830 amdgpu_dm_add_sink_to_freesync_module(connector
, aconnector
->edid
);
833 amdgpu_dm_remove_sink_from_freesync_module(connector
);
834 drm_mode_connector_update_edid_property(connector
, NULL
);
835 aconnector
->num_modes
= 0;
836 aconnector
->dc_sink
= NULL
;
839 mutex_unlock(&dev
->mode_config
.mutex
);
842 static void handle_hpd_irq(void *param
)
844 struct amdgpu_connector
*aconnector
= (struct amdgpu_connector
*)param
;
845 struct drm_connector
*connector
= &aconnector
->base
;
846 struct drm_device
*dev
= connector
->dev
;
848 /* In case of failure or MST no need to update connector status or notify the OS
849 * since (for MST case) MST does this in it's own context.
851 mutex_lock(&aconnector
->hpd_lock
);
852 if (dc_link_detect(aconnector
->dc_link
, false)) {
853 amdgpu_dm_update_connector_after_detect(aconnector
);
856 drm_modeset_lock_all(dev
);
857 dm_restore_drm_connector_state(dev
, connector
);
858 drm_modeset_unlock_all(dev
);
860 if (aconnector
->base
.force
== DRM_FORCE_UNSPECIFIED
)
861 drm_kms_helper_hotplug_event(dev
);
863 mutex_unlock(&aconnector
->hpd_lock
);
867 static void dm_handle_hpd_rx_irq(struct amdgpu_connector
*aconnector
)
869 uint8_t esi
[DP_PSR_ERROR_STATUS
- DP_SINK_COUNT_ESI
] = { 0 };
871 bool new_irq_handled
= false;
873 int dpcd_bytes_to_read
;
875 const int max_process_count
= 30;
876 int process_count
= 0;
878 const struct dc_link_status
*link_status
= dc_link_get_status(aconnector
->dc_link
);
880 if (link_status
->dpcd_caps
->dpcd_rev
.raw
< 0x12) {
881 dpcd_bytes_to_read
= DP_LANE0_1_STATUS
- DP_SINK_COUNT
;
882 /* DPCD 0x200 - 0x201 for downstream IRQ */
883 dpcd_addr
= DP_SINK_COUNT
;
885 dpcd_bytes_to_read
= DP_PSR_ERROR_STATUS
- DP_SINK_COUNT_ESI
;
886 /* DPCD 0x2002 - 0x2005 for downstream IRQ */
887 dpcd_addr
= DP_SINK_COUNT_ESI
;
890 dret
= drm_dp_dpcd_read(
891 &aconnector
->dm_dp_aux
.aux
,
896 while (dret
== dpcd_bytes_to_read
&&
897 process_count
< max_process_count
) {
903 DRM_DEBUG_KMS("ESI %02x %02x %02x\n", esi
[0], esi
[1], esi
[2]);
904 /* handle HPD short pulse irq */
905 if (aconnector
->mst_mgr
.mst_state
)
907 &aconnector
->mst_mgr
,
911 if (new_irq_handled
) {
912 /* ACK at DPCD to notify down stream */
913 const int ack_dpcd_bytes_to_write
=
914 dpcd_bytes_to_read
- 1;
916 for (retry
= 0; retry
< 3; retry
++) {
919 wret
= drm_dp_dpcd_write(
920 &aconnector
->dm_dp_aux
.aux
,
923 ack_dpcd_bytes_to_write
);
924 if (wret
== ack_dpcd_bytes_to_write
)
928 /* check if there is new irq to be handle */
929 dret
= drm_dp_dpcd_read(
930 &aconnector
->dm_dp_aux
.aux
,
935 new_irq_handled
= false;
940 if (process_count
== max_process_count
)
941 DRM_DEBUG_KMS("Loop exceeded max iterations\n");
944 static void handle_hpd_rx_irq(void *param
)
946 struct amdgpu_connector
*aconnector
= (struct amdgpu_connector
*)param
;
947 struct drm_connector
*connector
= &aconnector
->base
;
948 struct drm_device
*dev
= connector
->dev
;
949 const struct dc_link
*dc_link
= aconnector
->dc_link
;
950 bool is_mst_root_connector
= aconnector
->mst_mgr
.mst_state
;
952 /* TODO:Temporary add mutex to protect hpd interrupt not have a gpio
953 * conflict, after implement i2c helper, this mutex should be
956 if (aconnector
->dc_link
->type
!= dc_connection_mst_branch
)
957 mutex_lock(&aconnector
->hpd_lock
);
959 if (dc_link_handle_hpd_rx_irq(aconnector
->dc_link
, NULL
) &&
960 !is_mst_root_connector
) {
961 /* Downstream Port status changed. */
962 if (dc_link_detect(aconnector
->dc_link
, false)) {
963 amdgpu_dm_update_connector_after_detect(aconnector
);
966 drm_modeset_lock_all(dev
);
967 dm_restore_drm_connector_state(dev
, connector
);
968 drm_modeset_unlock_all(dev
);
970 drm_kms_helper_hotplug_event(dev
);
973 if ((dc_link
->cur_link_settings
.lane_count
!= LANE_COUNT_UNKNOWN
) ||
974 (dc_link
->type
== dc_connection_mst_branch
))
975 dm_handle_hpd_rx_irq(aconnector
);
977 if (aconnector
->dc_link
->type
!= dc_connection_mst_branch
)
978 mutex_unlock(&aconnector
->hpd_lock
);
981 static void register_hpd_handlers(struct amdgpu_device
*adev
)
983 struct drm_device
*dev
= adev
->ddev
;
984 struct drm_connector
*connector
;
985 struct amdgpu_connector
*aconnector
;
986 const struct dc_link
*dc_link
;
987 struct dc_interrupt_params int_params
= {0};
989 int_params
.requested_polarity
= INTERRUPT_POLARITY_DEFAULT
;
990 int_params
.current_polarity
= INTERRUPT_POLARITY_DEFAULT
;
992 list_for_each_entry(connector
,
993 &dev
->mode_config
.connector_list
, head
) {
995 aconnector
= to_amdgpu_connector(connector
);
996 dc_link
= aconnector
->dc_link
;
998 if (DC_IRQ_SOURCE_INVALID
!= dc_link
->irq_source_hpd
) {
999 int_params
.int_context
= INTERRUPT_LOW_IRQ_CONTEXT
;
1000 int_params
.irq_source
= dc_link
->irq_source_hpd
;
1002 amdgpu_dm_irq_register_interrupt(adev
, &int_params
,
1004 (void *) aconnector
);
1007 if (DC_IRQ_SOURCE_INVALID
!= dc_link
->irq_source_hpd_rx
) {
1009 /* Also register for DP short pulse (hpd_rx). */
1010 int_params
.int_context
= INTERRUPT_LOW_IRQ_CONTEXT
;
1011 int_params
.irq_source
= dc_link
->irq_source_hpd_rx
;
1013 amdgpu_dm_irq_register_interrupt(adev
, &int_params
,
1015 (void *) aconnector
);
1020 /* Register IRQ sources and initialize IRQ callbacks */
1021 static int dce110_register_irq_handlers(struct amdgpu_device
*adev
)
1023 struct dc
*dc
= adev
->dm
.dc
;
1024 struct common_irq_params
*c_irq_params
;
1025 struct dc_interrupt_params int_params
= {0};
1028 unsigned client_id
= AMDGPU_IH_CLIENTID_LEGACY
;
1030 if (adev
->asic_type
== CHIP_VEGA10
||
1031 adev
->asic_type
== CHIP_RAVEN
)
1032 client_id
= AMDGPU_IH_CLIENTID_DCE
;
1034 int_params
.requested_polarity
= INTERRUPT_POLARITY_DEFAULT
;
1035 int_params
.current_polarity
= INTERRUPT_POLARITY_DEFAULT
;
1037 /* Actions of amdgpu_irq_add_id():
1038 * 1. Register a set() function with base driver.
1039 * Base driver will call set() function to enable/disable an
1040 * interrupt in DC hardware.
1041 * 2. Register amdgpu_dm_irq_handler().
1042 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
1043 * coming from DC hardware.
1044 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
1045 * for acknowledging and handling. */
1047 /* Use VBLANK interrupt */
1048 for (i
= VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0
; i
<= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0
; i
++) {
1049 r
= amdgpu_irq_add_id(adev
, client_id
, i
, &adev
->crtc_irq
);
1051 DRM_ERROR("Failed to add crtc irq id!\n");
1055 int_params
.int_context
= INTERRUPT_HIGH_IRQ_CONTEXT
;
1056 int_params
.irq_source
=
1057 dc_interrupt_to_irq_source(dc
, i
, 0);
1059 c_irq_params
= &adev
->dm
.vblank_params
[int_params
.irq_source
- DC_IRQ_SOURCE_VBLANK1
];
1061 c_irq_params
->adev
= adev
;
1062 c_irq_params
->irq_src
= int_params
.irq_source
;
1064 amdgpu_dm_irq_register_interrupt(adev
, &int_params
,
1065 dm_crtc_high_irq
, c_irq_params
);
1068 /* Use GRPH_PFLIP interrupt */
1069 for (i
= VISLANDS30_IV_SRCID_D1_GRPH_PFLIP
;
1070 i
<= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP
; i
+= 2) {
1071 r
= amdgpu_irq_add_id(adev
, client_id
, i
, &adev
->pageflip_irq
);
1073 DRM_ERROR("Failed to add page flip irq id!\n");
1077 int_params
.int_context
= INTERRUPT_HIGH_IRQ_CONTEXT
;
1078 int_params
.irq_source
=
1079 dc_interrupt_to_irq_source(dc
, i
, 0);
1081 c_irq_params
= &adev
->dm
.pflip_params
[int_params
.irq_source
- DC_IRQ_SOURCE_PFLIP_FIRST
];
1083 c_irq_params
->adev
= adev
;
1084 c_irq_params
->irq_src
= int_params
.irq_source
;
1086 amdgpu_dm_irq_register_interrupt(adev
, &int_params
,
1087 dm_pflip_high_irq
, c_irq_params
);
1092 r
= amdgpu_irq_add_id(adev
, client_id
,
1093 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A
, &adev
->hpd_irq
);
1095 DRM_ERROR("Failed to add hpd irq id!\n");
1099 register_hpd_handlers(adev
);
1104 #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
1105 /* Register IRQ sources and initialize IRQ callbacks */
1106 static int dcn10_register_irq_handlers(struct amdgpu_device
*adev
)
1108 struct dc
*dc
= adev
->dm
.dc
;
1109 struct common_irq_params
*c_irq_params
;
1110 struct dc_interrupt_params int_params
= {0};
1114 int_params
.requested_polarity
= INTERRUPT_POLARITY_DEFAULT
;
1115 int_params
.current_polarity
= INTERRUPT_POLARITY_DEFAULT
;
1117 /* Actions of amdgpu_irq_add_id():
1118 * 1. Register a set() function with base driver.
1119 * Base driver will call set() function to enable/disable an
1120 * interrupt in DC hardware.
1121 * 2. Register amdgpu_dm_irq_handler().
1122 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
1123 * coming from DC hardware.
1124 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
1125 * for acknowledging and handling.
1128 /* Use VSTARTUP interrupt */
1129 for (i
= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP
;
1130 i
<= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP
+ adev
->mode_info
.num_crtc
- 1;
1132 r
= amdgpu_irq_add_id(adev
, AMDGPU_IH_CLIENTID_DCE
, i
, &adev
->crtc_irq
);
1135 DRM_ERROR("Failed to add crtc irq id!\n");
1139 int_params
.int_context
= INTERRUPT_HIGH_IRQ_CONTEXT
;
1140 int_params
.irq_source
=
1141 dc_interrupt_to_irq_source(dc
, i
, 0);
1143 c_irq_params
= &adev
->dm
.vblank_params
[int_params
.irq_source
- DC_IRQ_SOURCE_VBLANK1
];
1145 c_irq_params
->adev
= adev
;
1146 c_irq_params
->irq_src
= int_params
.irq_source
;
1148 amdgpu_dm_irq_register_interrupt(adev
, &int_params
,
1149 dm_crtc_high_irq
, c_irq_params
);
1152 /* Use GRPH_PFLIP interrupt */
1153 for (i
= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT
;
1154 i
<= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT
+ adev
->mode_info
.num_crtc
- 1;
1156 r
= amdgpu_irq_add_id(adev
, AMDGPU_IH_CLIENTID_DCE
, i
, &adev
->pageflip_irq
);
1158 DRM_ERROR("Failed to add page flip irq id!\n");
1162 int_params
.int_context
= INTERRUPT_HIGH_IRQ_CONTEXT
;
1163 int_params
.irq_source
=
1164 dc_interrupt_to_irq_source(dc
, i
, 0);
1166 c_irq_params
= &adev
->dm
.pflip_params
[int_params
.irq_source
- DC_IRQ_SOURCE_PFLIP_FIRST
];
1168 c_irq_params
->adev
= adev
;
1169 c_irq_params
->irq_src
= int_params
.irq_source
;
1171 amdgpu_dm_irq_register_interrupt(adev
, &int_params
,
1172 dm_pflip_high_irq
, c_irq_params
);
1177 r
= amdgpu_irq_add_id(adev
, AMDGPU_IH_CLIENTID_DCE
, DCN_1_0__SRCID__DC_HPD1_INT
,
1180 DRM_ERROR("Failed to add hpd irq id!\n");
1184 register_hpd_handlers(adev
);
1190 static int amdgpu_dm_mode_config_init(struct amdgpu_device
*adev
)
1194 adev
->mode_info
.mode_config_initialized
= true;
1196 adev
->ddev
->mode_config
.funcs
= (void *)&amdgpu_dm_mode_funcs
;
1197 adev
->ddev
->mode_config
.helper_private
= &amdgpu_dm_mode_config_helperfuncs
;
1199 adev
->ddev
->mode_config
.max_width
= 16384;
1200 adev
->ddev
->mode_config
.max_height
= 16384;
1202 adev
->ddev
->mode_config
.preferred_depth
= 24;
1203 adev
->ddev
->mode_config
.prefer_shadow
= 1;
1204 /* indicate support of immediate flip */
1205 adev
->ddev
->mode_config
.async_page_flip
= true;
1207 adev
->ddev
->mode_config
.fb_base
= adev
->mc
.aper_base
;
1209 r
= amdgpu_modeset_create_props(adev
);
1216 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
1217 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
1219 static int amdgpu_dm_backlight_update_status(struct backlight_device
*bd
)
1221 struct amdgpu_display_manager
*dm
= bl_get_data(bd
);
1223 if (dc_link_set_backlight_level(dm
->backlight_link
,
1224 bd
->props
.brightness
, 0, 0))
1230 static int amdgpu_dm_backlight_get_brightness(struct backlight_device
*bd
)
1232 return bd
->props
.brightness
;
1235 static const struct backlight_ops amdgpu_dm_backlight_ops
= {
1236 .get_brightness
= amdgpu_dm_backlight_get_brightness
,
1237 .update_status
= amdgpu_dm_backlight_update_status
,
1240 void amdgpu_dm_register_backlight_device(struct amdgpu_display_manager
*dm
)
1243 struct backlight_properties props
= { 0 };
1245 props
.max_brightness
= AMDGPU_MAX_BL_LEVEL
;
1246 props
.type
= BACKLIGHT_RAW
;
1248 snprintf(bl_name
, sizeof(bl_name
), "amdgpu_bl%d",
1249 dm
->adev
->ddev
->primary
->index
);
1251 dm
->backlight_dev
= backlight_device_register(bl_name
,
1252 dm
->adev
->ddev
->dev
,
1254 &amdgpu_dm_backlight_ops
,
1257 if (NULL
== dm
->backlight_dev
)
1258 DRM_ERROR("DM: Backlight registration failed!\n");
1260 DRM_INFO("DM: Registered Backlight device: %s\n", bl_name
);
1265 /* In this architecture, the association
1266 * connector -> encoder -> crtc
1267 * id not really requried. The crtc and connector will hold the
1268 * display_index as an abstraction to use with DAL component
1270 * Returns 0 on success
1272 int amdgpu_dm_initialize_drm_device(struct amdgpu_device
*adev
)
1274 struct amdgpu_display_manager
*dm
= &adev
->dm
;
1276 struct amdgpu_connector
*aconnector
= NULL
;
1277 struct amdgpu_encoder
*aencoder
= NULL
;
1278 struct amdgpu_mode_info
*mode_info
= &adev
->mode_info
;
1280 unsigned long possible_crtcs
;
1282 link_cnt
= dm
->dc
->caps
.max_links
;
1283 if (amdgpu_dm_mode_config_init(dm
->adev
)) {
1284 DRM_ERROR("DM: Failed to initialize mode config\n");
1288 for (i
= 0; i
< dm
->dc
->caps
.max_planes
; i
++) {
1289 mode_info
->planes
[i
] = kzalloc(sizeof(struct amdgpu_plane
),
1291 if (!mode_info
->planes
[i
]) {
1292 DRM_ERROR("KMS: Failed to allocate plane\n");
1293 goto fail_free_planes
;
1295 mode_info
->planes
[i
]->base
.type
= mode_info
->plane_type
[i
];
1298 * HACK: IGT tests expect that each plane can only have one
1299 * one possible CRTC. For now, set one CRTC for each
1300 * plane that is not an underlay, but still allow multiple
1301 * CRTCs for underlay planes.
1303 possible_crtcs
= 1 << i
;
1304 if (i
>= dm
->dc
->caps
.max_streams
)
1305 possible_crtcs
= 0xff;
1307 if (amdgpu_dm_plane_init(dm
, mode_info
->planes
[i
], possible_crtcs
)) {
1308 DRM_ERROR("KMS: Failed to initialize plane\n");
1309 goto fail_free_planes
;
1313 for (i
= 0; i
< dm
->dc
->caps
.max_streams
; i
++)
1314 if (amdgpu_dm_crtc_init(dm
, &mode_info
->planes
[i
]->base
, i
)) {
1315 DRM_ERROR("KMS: Failed to initialize crtc\n");
1316 goto fail_free_planes
;
1319 dm
->display_indexes_num
= dm
->dc
->caps
.max_streams
;
1321 /* loops over all connectors on the board */
1322 for (i
= 0; i
< link_cnt
; i
++) {
1324 if (i
> AMDGPU_DM_MAX_DISPLAY_INDEX
) {
1326 "KMS: Cannot support more than %d display indexes\n",
1327 AMDGPU_DM_MAX_DISPLAY_INDEX
);
1331 aconnector
= kzalloc(sizeof(*aconnector
), GFP_KERNEL
);
1333 goto fail_free_planes
;
1335 aencoder
= kzalloc(sizeof(*aencoder
), GFP_KERNEL
);
1337 goto fail_free_connector
;
1340 if (amdgpu_dm_encoder_init(dm
->ddev
, aencoder
, i
)) {
1341 DRM_ERROR("KMS: Failed to initialize encoder\n");
1342 goto fail_free_encoder
;
1345 if (amdgpu_dm_connector_init(dm
, aconnector
, i
, aencoder
)) {
1346 DRM_ERROR("KMS: Failed to initialize connector\n");
1347 goto fail_free_encoder
;
1350 if (dc_link_detect(dc_get_link_at_index(dm
->dc
, i
), true))
1351 amdgpu_dm_update_connector_after_detect(aconnector
);
1354 /* Software is initialized. Now we can register interrupt handlers. */
1355 switch (adev
->asic_type
) {
1362 case CHIP_POLARIS11
:
1363 case CHIP_POLARIS10
:
1364 case CHIP_POLARIS12
:
1366 if (dce110_register_irq_handlers(dm
->adev
)) {
1367 DRM_ERROR("DM: Failed to initialize IRQ\n");
1368 goto fail_free_encoder
;
1371 #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
1373 if (dcn10_register_irq_handlers(dm
->adev
)) {
1374 DRM_ERROR("DM: Failed to initialize IRQ\n");
1375 goto fail_free_encoder
;
1380 DRM_ERROR("Usupported ASIC type: 0x%X\n", adev
->asic_type
);
1381 goto fail_free_encoder
;
1384 drm_mode_config_reset(dm
->ddev
);
1389 fail_free_connector
:
1392 for (i
= 0; i
< dm
->dc
->caps
.max_planes
; i
++)
1393 kfree(mode_info
->planes
[i
]);
1397 void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager
*dm
)
1399 drm_mode_config_cleanup(dm
->ddev
);
1403 /******************************************************************************
1404 * amdgpu_display_funcs functions
1405 *****************************************************************************/
1408 * dm_bandwidth_update - program display watermarks
1410 * @adev: amdgpu_device pointer
1412 * Calculate and program the display watermarks and line buffer allocation.
1414 static void dm_bandwidth_update(struct amdgpu_device
*adev
)
1416 /* TODO: implement later */
1419 static void dm_set_backlight_level(struct amdgpu_encoder
*amdgpu_encoder
,
1422 /* TODO: translate amdgpu_encoder to display_index and call DAL */
1425 static u8
dm_get_backlight_level(struct amdgpu_encoder
*amdgpu_encoder
)
1427 /* TODO: translate amdgpu_encoder to display_index and call DAL */
1431 static int amdgpu_notify_freesync(struct drm_device
*dev
, void *data
,
1432 struct drm_file
*filp
)
1434 struct mod_freesync_params freesync_params
;
1435 uint8_t num_streams
;
1438 struct amdgpu_device
*adev
= dev
->dev_private
;
1441 /* Get freesync enable flag from DRM */
1443 num_streams
= dc_get_current_stream_count(adev
->dm
.dc
);
1445 for (i
= 0; i
< num_streams
; i
++) {
1446 struct dc_stream_state
*stream
;
1447 stream
= dc_get_stream_at_index(adev
->dm
.dc
, i
);
1449 mod_freesync_update_state(adev
->dm
.freesync_module
,
1450 &stream
, 1, &freesync_params
);
1456 static const struct amdgpu_display_funcs dm_display_funcs
= {
1457 .bandwidth_update
= dm_bandwidth_update
, /* called unconditionally */
1458 .vblank_get_counter
= dm_vblank_get_counter
,/* called unconditionally */
1459 .vblank_wait
= NULL
,
1460 .backlight_set_level
=
1461 dm_set_backlight_level
,/* called unconditionally */
1462 .backlight_get_level
=
1463 dm_get_backlight_level
,/* called unconditionally */
1464 .hpd_sense
= NULL
,/* called unconditionally */
1465 .hpd_set_polarity
= NULL
, /* called unconditionally */
1466 .hpd_get_gpio_reg
= NULL
, /* VBIOS parsing. DAL does it. */
1467 .page_flip_get_scanoutpos
=
1468 dm_crtc_get_scanoutpos
,/* called unconditionally */
1469 .add_encoder
= NULL
, /* VBIOS parsing. DAL does it. */
1470 .add_connector
= NULL
, /* VBIOS parsing. DAL does it. */
1471 .notify_freesync
= amdgpu_notify_freesync
,
1476 #if defined(CONFIG_DEBUG_KERNEL_DC)
1478 static ssize_t
s3_debug_store(
1479 struct device
*device
,
1480 struct device_attribute
*attr
,
1486 struct pci_dev
*pdev
= to_pci_dev(device
);
1487 struct drm_device
*drm_dev
= pci_get_drvdata(pdev
);
1488 struct amdgpu_device
*adev
= drm_dev
->dev_private
;
1490 ret
= kstrtoint(buf
, 0, &s3_state
);
1495 amdgpu_dm_display_resume(adev
);
1496 drm_kms_helper_hotplug_event(adev
->ddev
);
1501 return ret
== 0 ? count
: 0;
1504 DEVICE_ATTR_WO(s3_debug
);
1508 static int dm_early_init(void *handle
)
1510 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1512 adev
->ddev
->driver
->driver_features
|= DRIVER_ATOMIC
;
1513 amdgpu_dm_set_irq_funcs(adev
);
1515 switch (adev
->asic_type
) {
1518 adev
->mode_info
.num_crtc
= 6;
1519 adev
->mode_info
.num_hpd
= 6;
1520 adev
->mode_info
.num_dig
= 6;
1521 adev
->mode_info
.plane_type
= dm_plane_type_default
;
1525 adev
->mode_info
.num_crtc
= 6;
1526 adev
->mode_info
.num_hpd
= 6;
1527 adev
->mode_info
.num_dig
= 7;
1528 adev
->mode_info
.plane_type
= dm_plane_type_default
;
1531 adev
->mode_info
.num_crtc
= 3;
1532 adev
->mode_info
.num_hpd
= 6;
1533 adev
->mode_info
.num_dig
= 9;
1534 adev
->mode_info
.plane_type
= dm_plane_type_carizzo
;
1537 adev
->mode_info
.num_crtc
= 2;
1538 adev
->mode_info
.num_hpd
= 6;
1539 adev
->mode_info
.num_dig
= 9;
1540 adev
->mode_info
.plane_type
= dm_plane_type_stoney
;
1542 case CHIP_POLARIS11
:
1543 case CHIP_POLARIS12
:
1544 adev
->mode_info
.num_crtc
= 5;
1545 adev
->mode_info
.num_hpd
= 5;
1546 adev
->mode_info
.num_dig
= 5;
1547 adev
->mode_info
.plane_type
= dm_plane_type_default
;
1549 case CHIP_POLARIS10
:
1550 adev
->mode_info
.num_crtc
= 6;
1551 adev
->mode_info
.num_hpd
= 6;
1552 adev
->mode_info
.num_dig
= 6;
1553 adev
->mode_info
.plane_type
= dm_plane_type_default
;
1556 adev
->mode_info
.num_crtc
= 6;
1557 adev
->mode_info
.num_hpd
= 6;
1558 adev
->mode_info
.num_dig
= 6;
1559 adev
->mode_info
.plane_type
= dm_plane_type_default
;
1561 #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
1563 adev
->mode_info
.num_crtc
= 4;
1564 adev
->mode_info
.num_hpd
= 4;
1565 adev
->mode_info
.num_dig
= 4;
1566 adev
->mode_info
.plane_type
= dm_plane_type_default
;
1570 DRM_ERROR("Usupported ASIC type: 0x%X\n", adev
->asic_type
);
1574 if (adev
->mode_info
.funcs
== NULL
)
1575 adev
->mode_info
.funcs
= &dm_display_funcs
;
1577 /* Note: Do NOT change adev->audio_endpt_rreg and
1578 * adev->audio_endpt_wreg because they are initialised in
1579 * amdgpu_device_init() */
1580 #if defined(CONFIG_DEBUG_KERNEL_DC)
1583 &dev_attr_s3_debug
);
1589 bool amdgpu_dm_acquire_dal_lock(struct amdgpu_display_manager
*dm
)
1595 bool amdgpu_dm_release_dal_lock(struct amdgpu_display_manager
*dm
)
1597 /* TODO */ return true;
1601 struct dm_connector_state
{
1602 struct drm_connector_state base
;
1604 enum amdgpu_rmx_type scaling
;
1605 uint8_t underscan_vborder
;
1606 uint8_t underscan_hborder
;
1607 bool underscan_enable
;
1610 #define to_dm_connector_state(x)\
1611 container_of((x), struct dm_connector_state, base)
1613 static bool modeset_required(struct drm_crtc_state
*crtc_state
,
1614 struct dc_stream_state
*new_stream
,
1615 struct dc_stream_state
*old_stream
)
1617 if (dc_is_stream_unchanged(new_stream
, old_stream
)) {
1618 crtc_state
->mode_changed
= false;
1619 DRM_DEBUG_KMS("Mode change not required, setting mode_changed to %d",
1620 crtc_state
->mode_changed
);
1623 if (!drm_atomic_crtc_needs_modeset(crtc_state
))
1626 if (!crtc_state
->enable
)
1629 return crtc_state
->active
;
1632 static bool modereset_required(struct drm_crtc_state
*crtc_state
)
1634 if (!drm_atomic_crtc_needs_modeset(crtc_state
))
1637 return !crtc_state
->enable
|| !crtc_state
->active
;
1640 void amdgpu_dm_encoder_destroy(struct drm_encoder
*encoder
)
1642 drm_encoder_cleanup(encoder
);
1646 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs
= {
1647 .destroy
= amdgpu_dm_encoder_destroy
,
1650 static void dm_set_cursor(
1651 struct amdgpu_crtc
*amdgpu_crtc
,
1656 struct dc_cursor_attributes attributes
;
1657 struct dc_cursor_position position
;
1658 struct drm_crtc
*crtc
= &amdgpu_crtc
->base
;
1660 int xorigin
= 0, yorigin
= 0;
1661 struct dm_crtc_state
*acrtc_state
= to_dm_crtc_state(crtc
->state
);
1663 amdgpu_crtc
->cursor_width
= width
;
1664 amdgpu_crtc
->cursor_height
= height
;
1666 attributes
.address
.high_part
= upper_32_bits(gpu_addr
);
1667 attributes
.address
.low_part
= lower_32_bits(gpu_addr
);
1668 attributes
.width
= width
;
1669 attributes
.height
= height
;
1670 attributes
.color_format
= CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA
;
1671 attributes
.rotation_angle
= 0;
1672 attributes
.attribute_flags
.value
= 0;
1674 attributes
.pitch
= attributes
.width
;
1676 x
= amdgpu_crtc
->cursor_x
;
1677 y
= amdgpu_crtc
->cursor_y
;
1679 /* avivo cursor are offset into the total surface */
1680 x
+= crtc
->primary
->state
->src_x
>> 16;
1681 y
+= crtc
->primary
->state
->src_y
>> 16;
1684 xorigin
= min(-x
, amdgpu_crtc
->max_cursor_width
- 1);
1688 yorigin
= min(-y
, amdgpu_crtc
->max_cursor_height
- 1);
1692 position
.enable
= true;
1696 position
.x_hotspot
= xorigin
;
1697 position
.y_hotspot
= yorigin
;
1699 if (!dc_stream_set_cursor_attributes(
1700 acrtc_state
->stream
,
1702 DRM_ERROR("DC failed to set cursor attributes\n");
1705 if (!dc_stream_set_cursor_position(
1706 acrtc_state
->stream
,
1708 DRM_ERROR("DC failed to set cursor position\n");
1712 static int dm_crtc_cursor_set(
1713 struct drm_crtc
*crtc
,
1718 struct dc_cursor_position position
;
1719 struct dm_crtc_state
*acrtc_state
= to_dm_crtc_state(crtc
->state
);
1723 struct amdgpu_crtc
*amdgpu_crtc
= to_amdgpu_crtc(crtc
);
1726 DRM_DEBUG_KMS("%s: crtc_id=%d with size %d to %d \n",
1728 amdgpu_crtc
->crtc_id
,
1733 /* turn off cursor */
1734 position
.enable
= false;
1738 if (acrtc_state
->stream
) {
1739 /*set cursor visible false*/
1740 dc_stream_set_cursor_position(
1741 acrtc_state
->stream
,
1748 if ((width
> amdgpu_crtc
->max_cursor_width
) ||
1749 (height
> amdgpu_crtc
->max_cursor_height
)) {
1751 "%s: bad cursor width or height %d x %d\n",
1758 /*program new cursor bo to hardware*/
1759 dm_set_cursor(amdgpu_crtc
, address
, width
, height
);
1766 static int dm_crtc_cursor_move(struct drm_crtc
*crtc
,
1769 struct amdgpu_crtc
*amdgpu_crtc
= to_amdgpu_crtc(crtc
);
1770 int xorigin
= 0, yorigin
= 0;
1771 struct dc_cursor_position position
;
1772 struct dm_crtc_state
*acrtc_state
= to_dm_crtc_state(crtc
->state
);
1774 amdgpu_crtc
->cursor_x
= x
;
1775 amdgpu_crtc
->cursor_y
= y
;
1777 /* avivo cursor are offset into the total surface */
1778 x
+= crtc
->primary
->state
->src_x
>> 16;
1779 y
+= crtc
->primary
->state
->src_y
>> 16;
1782 * TODO: for cursor debugging unguard the following
1786 "%s: x %d y %d c->x %d c->y %d\n",
1795 xorigin
= min(-x
, amdgpu_crtc
->max_cursor_width
- 1);
1799 yorigin
= min(-y
, amdgpu_crtc
->max_cursor_height
- 1);
1803 position
.enable
= true;
1807 position
.x_hotspot
= xorigin
;
1808 position
.y_hotspot
= yorigin
;
1810 if (acrtc_state
->stream
) {
1811 if (!dc_stream_set_cursor_position(
1812 acrtc_state
->stream
,
1814 DRM_ERROR("DC failed to set cursor position\n");
1822 static bool fill_rects_from_plane_state(
1823 const struct drm_plane_state
*state
,
1824 struct dc_plane_state
*plane_state
)
1826 plane_state
->src_rect
.x
= state
->src_x
>> 16;
1827 plane_state
->src_rect
.y
= state
->src_y
>> 16;
1828 /*we ignore for now mantissa and do not to deal with floating pixels :(*/
1829 plane_state
->src_rect
.width
= state
->src_w
>> 16;
1831 if (plane_state
->src_rect
.width
== 0)
1834 plane_state
->src_rect
.height
= state
->src_h
>> 16;
1835 if (plane_state
->src_rect
.height
== 0)
1838 plane_state
->dst_rect
.x
= state
->crtc_x
;
1839 plane_state
->dst_rect
.y
= state
->crtc_y
;
1841 if (state
->crtc_w
== 0)
1844 plane_state
->dst_rect
.width
= state
->crtc_w
;
1846 if (state
->crtc_h
== 0)
1849 plane_state
->dst_rect
.height
= state
->crtc_h
;
1851 plane_state
->clip_rect
= plane_state
->dst_rect
;
1853 switch (state
->rotation
& DRM_MODE_ROTATE_MASK
) {
1854 case DRM_MODE_ROTATE_0
:
1855 plane_state
->rotation
= ROTATION_ANGLE_0
;
1857 case DRM_MODE_ROTATE_90
:
1858 plane_state
->rotation
= ROTATION_ANGLE_90
;
1860 case DRM_MODE_ROTATE_180
:
1861 plane_state
->rotation
= ROTATION_ANGLE_180
;
1863 case DRM_MODE_ROTATE_270
:
1864 plane_state
->rotation
= ROTATION_ANGLE_270
;
1867 plane_state
->rotation
= ROTATION_ANGLE_0
;
1873 static int get_fb_info(
1874 const struct amdgpu_framebuffer
*amdgpu_fb
,
1875 uint64_t *tiling_flags
,
1876 uint64_t *fb_location
)
1878 struct amdgpu_bo
*rbo
= gem_to_amdgpu_bo(amdgpu_fb
->obj
);
1879 int r
= amdgpu_bo_reserve(rbo
, false);
1882 DRM_ERROR("Unable to reserve buffer\n");
1887 *fb_location
= amdgpu_bo_gpu_offset(rbo
);
1890 amdgpu_bo_get_tiling_flags(rbo
, tiling_flags
);
1892 amdgpu_bo_unreserve(rbo
);
1897 static int fill_plane_attributes_from_fb(
1898 struct amdgpu_device
*adev
,
1899 struct dc_plane_state
*plane_state
,
1900 const struct amdgpu_framebuffer
*amdgpu_fb
, bool addReq
)
1902 uint64_t tiling_flags
;
1903 uint64_t fb_location
= 0;
1904 unsigned int awidth
;
1905 const struct drm_framebuffer
*fb
= &amdgpu_fb
->base
;
1907 struct drm_format_name_buf format_name
;
1912 addReq
== true ? &fb_location
:NULL
);
1917 switch (fb
->format
->format
) {
1919 plane_state
->format
= SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS
;
1921 case DRM_FORMAT_RGB565
:
1922 plane_state
->format
= SURFACE_PIXEL_FORMAT_GRPH_RGB565
;
1924 case DRM_FORMAT_XRGB8888
:
1925 case DRM_FORMAT_ARGB8888
:
1926 plane_state
->format
= SURFACE_PIXEL_FORMAT_GRPH_ARGB8888
;
1928 case DRM_FORMAT_XRGB2101010
:
1929 case DRM_FORMAT_ARGB2101010
:
1930 plane_state
->format
= SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010
;
1932 case DRM_FORMAT_XBGR2101010
:
1933 case DRM_FORMAT_ABGR2101010
:
1934 plane_state
->format
= SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010
;
1936 case DRM_FORMAT_NV21
:
1937 plane_state
->format
= SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr
;
1939 case DRM_FORMAT_NV12
:
1940 plane_state
->format
= SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb
;
1943 DRM_ERROR("Unsupported screen format %s\n",
1944 drm_get_format_name(fb
->format
->format
, &format_name
));
1948 if (plane_state
->format
< SURFACE_PIXEL_FORMAT_VIDEO_BEGIN
) {
1949 plane_state
->address
.type
= PLN_ADDR_TYPE_GRAPHICS
;
1950 plane_state
->address
.grph
.addr
.low_part
= lower_32_bits(fb_location
);
1951 plane_state
->address
.grph
.addr
.high_part
= upper_32_bits(fb_location
);
1952 plane_state
->plane_size
.grph
.surface_size
.x
= 0;
1953 plane_state
->plane_size
.grph
.surface_size
.y
= 0;
1954 plane_state
->plane_size
.grph
.surface_size
.width
= fb
->width
;
1955 plane_state
->plane_size
.grph
.surface_size
.height
= fb
->height
;
1956 plane_state
->plane_size
.grph
.surface_pitch
=
1957 fb
->pitches
[0] / fb
->format
->cpp
[0];
1958 /* TODO: unhardcode */
1959 plane_state
->color_space
= COLOR_SPACE_SRGB
;
1962 awidth
= ALIGN(fb
->width
, 64);
1963 plane_state
->address
.type
= PLN_ADDR_TYPE_VIDEO_PROGRESSIVE
;
1964 plane_state
->address
.video_progressive
.luma_addr
.low_part
1965 = lower_32_bits(fb_location
);
1966 plane_state
->address
.video_progressive
.chroma_addr
.low_part
1967 = lower_32_bits(fb_location
) +
1968 (awidth
* fb
->height
);
1969 plane_state
->plane_size
.video
.luma_size
.x
= 0;
1970 plane_state
->plane_size
.video
.luma_size
.y
= 0;
1971 plane_state
->plane_size
.video
.luma_size
.width
= awidth
;
1972 plane_state
->plane_size
.video
.luma_size
.height
= fb
->height
;
1973 /* TODO: unhardcode */
1974 plane_state
->plane_size
.video
.luma_pitch
= awidth
;
1976 plane_state
->plane_size
.video
.chroma_size
.x
= 0;
1977 plane_state
->plane_size
.video
.chroma_size
.y
= 0;
1978 plane_state
->plane_size
.video
.chroma_size
.width
= awidth
;
1979 plane_state
->plane_size
.video
.chroma_size
.height
= fb
->height
;
1980 plane_state
->plane_size
.video
.chroma_pitch
= awidth
/ 2;
1982 /* TODO: unhardcode */
1983 plane_state
->color_space
= COLOR_SPACE_YCBCR709
;
1986 memset(&plane_state
->tiling_info
, 0, sizeof(plane_state
->tiling_info
));
1988 /* Fill GFX8 params */
1989 if (AMDGPU_TILING_GET(tiling_flags
, ARRAY_MODE
) == DC_ARRAY_2D_TILED_THIN1
) {
1990 unsigned int bankw
, bankh
, mtaspect
, tile_split
, num_banks
;
1992 bankw
= AMDGPU_TILING_GET(tiling_flags
, BANK_WIDTH
);
1993 bankh
= AMDGPU_TILING_GET(tiling_flags
, BANK_HEIGHT
);
1994 mtaspect
= AMDGPU_TILING_GET(tiling_flags
, MACRO_TILE_ASPECT
);
1995 tile_split
= AMDGPU_TILING_GET(tiling_flags
, TILE_SPLIT
);
1996 num_banks
= AMDGPU_TILING_GET(tiling_flags
, NUM_BANKS
);
1998 /* XXX fix me for VI */
1999 plane_state
->tiling_info
.gfx8
.num_banks
= num_banks
;
2000 plane_state
->tiling_info
.gfx8
.array_mode
=
2001 DC_ARRAY_2D_TILED_THIN1
;
2002 plane_state
->tiling_info
.gfx8
.tile_split
= tile_split
;
2003 plane_state
->tiling_info
.gfx8
.bank_width
= bankw
;
2004 plane_state
->tiling_info
.gfx8
.bank_height
= bankh
;
2005 plane_state
->tiling_info
.gfx8
.tile_aspect
= mtaspect
;
2006 plane_state
->tiling_info
.gfx8
.tile_mode
=
2007 DC_ADDR_SURF_MICRO_TILING_DISPLAY
;
2008 } else if (AMDGPU_TILING_GET(tiling_flags
, ARRAY_MODE
)
2009 == DC_ARRAY_1D_TILED_THIN1
) {
2010 plane_state
->tiling_info
.gfx8
.array_mode
= DC_ARRAY_1D_TILED_THIN1
;
2013 plane_state
->tiling_info
.gfx8
.pipe_config
=
2014 AMDGPU_TILING_GET(tiling_flags
, PIPE_CONFIG
);
2016 if (adev
->asic_type
== CHIP_VEGA10
||
2017 adev
->asic_type
== CHIP_RAVEN
) {
2018 /* Fill GFX9 params */
2019 plane_state
->tiling_info
.gfx9
.num_pipes
=
2020 adev
->gfx
.config
.gb_addr_config_fields
.num_pipes
;
2021 plane_state
->tiling_info
.gfx9
.num_banks
=
2022 adev
->gfx
.config
.gb_addr_config_fields
.num_banks
;
2023 plane_state
->tiling_info
.gfx9
.pipe_interleave
=
2024 adev
->gfx
.config
.gb_addr_config_fields
.pipe_interleave_size
;
2025 plane_state
->tiling_info
.gfx9
.num_shader_engines
=
2026 adev
->gfx
.config
.gb_addr_config_fields
.num_se
;
2027 plane_state
->tiling_info
.gfx9
.max_compressed_frags
=
2028 adev
->gfx
.config
.gb_addr_config_fields
.max_compress_frags
;
2029 plane_state
->tiling_info
.gfx9
.num_rb_per_se
=
2030 adev
->gfx
.config
.gb_addr_config_fields
.num_rb_per_se
;
2031 plane_state
->tiling_info
.gfx9
.swizzle
=
2032 AMDGPU_TILING_GET(tiling_flags
, SWIZZLE_MODE
);
2033 plane_state
->tiling_info
.gfx9
.shaderEnable
= 1;
2036 plane_state
->visible
= true;
2037 plane_state
->scaling_quality
.h_taps_c
= 0;
2038 plane_state
->scaling_quality
.v_taps_c
= 0;
2040 /* is this needed? is plane_state zeroed at allocation? */
2041 plane_state
->scaling_quality
.h_taps
= 0;
2042 plane_state
->scaling_quality
.v_taps
= 0;
2043 plane_state
->stereo_format
= PLANE_STEREO_FORMAT_NONE
;
2049 #define NUM_OF_RAW_GAMMA_RAMP_RGB_256 256
2051 static void fill_gamma_from_crtc_state(
2052 const struct drm_crtc_state
*crtc_state
,
2053 struct dc_plane_state
*plane_state
)
2056 struct dc_gamma
*gamma
;
2057 struct drm_color_lut
*lut
= (struct drm_color_lut
*) crtc_state
->gamma_lut
->data
;
2059 gamma
= dc_create_gamma();
2061 if (gamma
== NULL
) {
2066 for (i
= 0; i
< NUM_OF_RAW_GAMMA_RAMP_RGB_256
; i
++) {
2067 gamma
->red
[i
] = lut
[i
].red
;
2068 gamma
->green
[i
] = lut
[i
].green
;
2069 gamma
->blue
[i
] = lut
[i
].blue
;
2072 plane_state
->gamma_correction
= gamma
;
2075 static int fill_plane_attributes(
2076 struct amdgpu_device
*adev
,
2077 struct dc_plane_state
*dc_plane_state
,
2078 struct drm_plane_state
*plane_state
,
2079 struct drm_crtc_state
*crtc_state
,
2082 const struct amdgpu_framebuffer
*amdgpu_fb
=
2083 to_amdgpu_framebuffer(plane_state
->fb
);
2084 const struct drm_crtc
*crtc
= plane_state
->crtc
;
2085 struct dc_transfer_func
*input_tf
;
2088 if (!fill_rects_from_plane_state(plane_state
, dc_plane_state
))
2091 ret
= fill_plane_attributes_from_fb(
2092 crtc
->dev
->dev_private
,
2100 input_tf
= dc_create_transfer_func();
2102 if (input_tf
== NULL
)
2105 input_tf
->type
= TF_TYPE_PREDEFINED
;
2106 input_tf
->tf
= TRANSFER_FUNCTION_SRGB
;
2108 dc_plane_state
->in_transfer_func
= input_tf
;
2110 /* In case of gamma set, update gamma value */
2111 if (crtc_state
->gamma_lut
)
2112 fill_gamma_from_crtc_state(crtc_state
, dc_plane_state
);
2117 /*****************************************************************************/
2119 struct amdgpu_connector
*aconnector_from_drm_crtc_id(
2120 const struct drm_crtc
*crtc
)
2122 struct drm_device
*dev
= crtc
->dev
;
2123 struct drm_connector
*connector
;
2124 struct amdgpu_crtc
*acrtc
= to_amdgpu_crtc(crtc
);
2125 struct amdgpu_connector
*aconnector
;
2127 list_for_each_entry(connector
,
2128 &dev
->mode_config
.connector_list
, head
) {
2130 aconnector
= to_amdgpu_connector(connector
);
2132 if (aconnector
->base
.state
->crtc
!= &acrtc
->base
)
2135 /* Found the connector */
2139 /* If we get here, not found. */
2143 static void update_stream_scaling_settings(
2144 const struct drm_display_mode
*mode
,
2145 const struct dm_connector_state
*dm_state
,
2146 struct dc_stream_state
*stream
)
2148 enum amdgpu_rmx_type rmx_type
;
2150 struct rect src
= { 0 }; /* viewport in composition space*/
2151 struct rect dst
= { 0 }; /* stream addressable area */
2153 /* no mode. nothing to be done */
2157 /* Full screen scaling by default */
2158 src
.width
= mode
->hdisplay
;
2159 src
.height
= mode
->vdisplay
;
2160 dst
.width
= stream
->timing
.h_addressable
;
2161 dst
.height
= stream
->timing
.v_addressable
;
2163 rmx_type
= dm_state
->scaling
;
2164 if (rmx_type
== RMX_ASPECT
|| rmx_type
== RMX_OFF
) {
2165 if (src
.width
* dst
.height
<
2166 src
.height
* dst
.width
) {
2167 /* height needs less upscaling/more downscaling */
2168 dst
.width
= src
.width
*
2169 dst
.height
/ src
.height
;
2171 /* width needs less upscaling/more downscaling */
2172 dst
.height
= src
.height
*
2173 dst
.width
/ src
.width
;
2175 } else if (rmx_type
== RMX_CENTER
) {
2179 dst
.x
= (stream
->timing
.h_addressable
- dst
.width
) / 2;
2180 dst
.y
= (stream
->timing
.v_addressable
- dst
.height
) / 2;
2182 if (dm_state
->underscan_enable
) {
2183 dst
.x
+= dm_state
->underscan_hborder
/ 2;
2184 dst
.y
+= dm_state
->underscan_vborder
/ 2;
2185 dst
.width
-= dm_state
->underscan_hborder
;
2186 dst
.height
-= dm_state
->underscan_vborder
;
2192 DRM_DEBUG_KMS("Destination Rectangle x:%d y:%d width:%d height:%d\n",
2193 dst
.x
, dst
.y
, dst
.width
, dst
.height
);
2197 static enum dc_color_depth
convert_color_depth_from_display_info(
2198 const struct drm_connector
*connector
)
2200 uint32_t bpc
= connector
->display_info
.bpc
;
2202 /* Limited color depth to 8bit
2203 * TODO: Still need to handle deep color
2210 /* Temporary Work around, DRM don't parse color depth for
2211 * EDID revision before 1.4
2212 * TODO: Fix edid parsing
2214 return COLOR_DEPTH_888
;
2216 return COLOR_DEPTH_666
;
2218 return COLOR_DEPTH_888
;
2220 return COLOR_DEPTH_101010
;
2222 return COLOR_DEPTH_121212
;
2224 return COLOR_DEPTH_141414
;
2226 return COLOR_DEPTH_161616
;
2228 return COLOR_DEPTH_UNDEFINED
;
2232 static enum dc_aspect_ratio
get_aspect_ratio(
2233 const struct drm_display_mode
*mode_in
)
2235 int32_t width
= mode_in
->crtc_hdisplay
* 9;
2236 int32_t height
= mode_in
->crtc_vdisplay
* 16;
2238 if ((width
- height
) < 10 && (width
- height
) > -10)
2239 return ASPECT_RATIO_16_9
;
2241 return ASPECT_RATIO_4_3
;
2244 static enum dc_color_space
get_output_color_space(
2245 const struct dc_crtc_timing
*dc_crtc_timing
)
2247 enum dc_color_space color_space
= COLOR_SPACE_SRGB
;
2249 switch (dc_crtc_timing
->pixel_encoding
) {
2250 case PIXEL_ENCODING_YCBCR422
:
2251 case PIXEL_ENCODING_YCBCR444
:
2252 case PIXEL_ENCODING_YCBCR420
:
2255 * 27030khz is the separation point between HDTV and SDTV
2256 * according to HDMI spec, we use YCbCr709 and YCbCr601
2259 if (dc_crtc_timing
->pix_clk_khz
> 27030) {
2260 if (dc_crtc_timing
->flags
.Y_ONLY
)
2262 COLOR_SPACE_YCBCR709_LIMITED
;
2264 color_space
= COLOR_SPACE_YCBCR709
;
2266 if (dc_crtc_timing
->flags
.Y_ONLY
)
2268 COLOR_SPACE_YCBCR601_LIMITED
;
2270 color_space
= COLOR_SPACE_YCBCR601
;
2275 case PIXEL_ENCODING_RGB
:
2276 color_space
= COLOR_SPACE_SRGB
;
2287 /*****************************************************************************/
2289 static void fill_stream_properties_from_drm_display_mode(
2290 struct dc_stream_state
*stream
,
2291 const struct drm_display_mode
*mode_in
,
2292 const struct drm_connector
*connector
)
2294 struct dc_crtc_timing
*timing_out
= &stream
->timing
;
2296 memset(timing_out
, 0, sizeof(struct dc_crtc_timing
));
2298 timing_out
->h_border_left
= 0;
2299 timing_out
->h_border_right
= 0;
2300 timing_out
->v_border_top
= 0;
2301 timing_out
->v_border_bottom
= 0;
2302 /* TODO: un-hardcode */
2304 if ((connector
->display_info
.color_formats
& DRM_COLOR_FORMAT_YCRCB444
)
2305 && stream
->sink
->sink_signal
== SIGNAL_TYPE_HDMI_TYPE_A
)
2306 timing_out
->pixel_encoding
= PIXEL_ENCODING_YCBCR444
;
2308 timing_out
->pixel_encoding
= PIXEL_ENCODING_RGB
;
2310 timing_out
->timing_3d_format
= TIMING_3D_FORMAT_NONE
;
2311 timing_out
->display_color_depth
= convert_color_depth_from_display_info(
2313 timing_out
->scan_type
= SCANNING_TYPE_NODATA
;
2314 timing_out
->hdmi_vic
= 0;
2315 timing_out
->vic
= drm_match_cea_mode(mode_in
);
2317 timing_out
->h_addressable
= mode_in
->crtc_hdisplay
;
2318 timing_out
->h_total
= mode_in
->crtc_htotal
;
2319 timing_out
->h_sync_width
=
2320 mode_in
->crtc_hsync_end
- mode_in
->crtc_hsync_start
;
2321 timing_out
->h_front_porch
=
2322 mode_in
->crtc_hsync_start
- mode_in
->crtc_hdisplay
;
2323 timing_out
->v_total
= mode_in
->crtc_vtotal
;
2324 timing_out
->v_addressable
= mode_in
->crtc_vdisplay
;
2325 timing_out
->v_front_porch
=
2326 mode_in
->crtc_vsync_start
- mode_in
->crtc_vdisplay
;
2327 timing_out
->v_sync_width
=
2328 mode_in
->crtc_vsync_end
- mode_in
->crtc_vsync_start
;
2329 timing_out
->pix_clk_khz
= mode_in
->crtc_clock
;
2330 timing_out
->aspect_ratio
= get_aspect_ratio(mode_in
);
2331 if (mode_in
->flags
& DRM_MODE_FLAG_PHSYNC
)
2332 timing_out
->flags
.HSYNC_POSITIVE_POLARITY
= 1;
2333 if (mode_in
->flags
& DRM_MODE_FLAG_PVSYNC
)
2334 timing_out
->flags
.VSYNC_POSITIVE_POLARITY
= 1;
2336 stream
->output_color_space
= get_output_color_space(timing_out
);
2339 struct dc_transfer_func
*tf
= dc_create_transfer_func();
2341 tf
->type
= TF_TYPE_PREDEFINED
;
2342 tf
->tf
= TRANSFER_FUNCTION_SRGB
;
2343 stream
->out_transfer_func
= tf
;
2347 static void fill_audio_info(
2348 struct audio_info
*audio_info
,
2349 const struct drm_connector
*drm_connector
,
2350 const struct dc_sink
*dc_sink
)
2353 int cea_revision
= 0;
2354 const struct dc_edid_caps
*edid_caps
= &dc_sink
->edid_caps
;
2356 audio_info
->manufacture_id
= edid_caps
->manufacturer_id
;
2357 audio_info
->product_id
= edid_caps
->product_id
;
2359 cea_revision
= drm_connector
->display_info
.cea_rev
;
2361 while (i
< AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS
&&
2362 edid_caps
->display_name
[i
]) {
2363 audio_info
->display_name
[i
] = edid_caps
->display_name
[i
];
2367 if (cea_revision
>= 3) {
2368 audio_info
->mode_count
= edid_caps
->audio_mode_count
;
2370 for (i
= 0; i
< audio_info
->mode_count
; ++i
) {
2371 audio_info
->modes
[i
].format_code
=
2372 (enum audio_format_code
)
2373 (edid_caps
->audio_modes
[i
].format_code
);
2374 audio_info
->modes
[i
].channel_count
=
2375 edid_caps
->audio_modes
[i
].channel_count
;
2376 audio_info
->modes
[i
].sample_rates
.all
=
2377 edid_caps
->audio_modes
[i
].sample_rate
;
2378 audio_info
->modes
[i
].sample_size
=
2379 edid_caps
->audio_modes
[i
].sample_size
;
2383 audio_info
->flags
.all
= edid_caps
->speaker_flags
;
2385 /* TODO: We only check for the progressive mode, check for interlace mode too */
2386 if (drm_connector
->latency_present
[0]) {
2387 audio_info
->video_latency
= drm_connector
->video_latency
[0];
2388 audio_info
->audio_latency
= drm_connector
->audio_latency
[0];
2391 /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
2395 static void copy_crtc_timing_for_drm_display_mode(
2396 const struct drm_display_mode
*src_mode
,
2397 struct drm_display_mode
*dst_mode
)
2399 dst_mode
->crtc_hdisplay
= src_mode
->crtc_hdisplay
;
2400 dst_mode
->crtc_vdisplay
= src_mode
->crtc_vdisplay
;
2401 dst_mode
->crtc_clock
= src_mode
->crtc_clock
;
2402 dst_mode
->crtc_hblank_start
= src_mode
->crtc_hblank_start
;
2403 dst_mode
->crtc_hblank_end
= src_mode
->crtc_hblank_end
;
2404 dst_mode
->crtc_hsync_start
= src_mode
->crtc_hsync_start
;
2405 dst_mode
->crtc_hsync_end
= src_mode
->crtc_hsync_end
;
2406 dst_mode
->crtc_htotal
= src_mode
->crtc_htotal
;
2407 dst_mode
->crtc_hskew
= src_mode
->crtc_hskew
;
2408 dst_mode
->crtc_vblank_start
= src_mode
->crtc_vblank_start
;
2409 dst_mode
->crtc_vblank_end
= src_mode
->crtc_vblank_end
;
2410 dst_mode
->crtc_vsync_start
= src_mode
->crtc_vsync_start
;
2411 dst_mode
->crtc_vsync_end
= src_mode
->crtc_vsync_end
;
2412 dst_mode
->crtc_vtotal
= src_mode
->crtc_vtotal
;
2415 static void decide_crtc_timing_for_drm_display_mode(
2416 struct drm_display_mode
*drm_mode
,
2417 const struct drm_display_mode
*native_mode
,
2420 if (scale_enabled
) {
2421 copy_crtc_timing_for_drm_display_mode(native_mode
, drm_mode
);
2422 } else if (native_mode
->clock
== drm_mode
->clock
&&
2423 native_mode
->htotal
== drm_mode
->htotal
&&
2424 native_mode
->vtotal
== drm_mode
->vtotal
) {
2425 copy_crtc_timing_for_drm_display_mode(native_mode
, drm_mode
);
2427 /* no scaling nor amdgpu inserted, no need to patch */
2431 static struct dc_stream_state
*create_stream_for_sink(
2432 struct amdgpu_connector
*aconnector
,
2433 const struct drm_display_mode
*drm_mode
,
2434 const struct dm_connector_state
*dm_state
)
2436 struct drm_display_mode
*preferred_mode
= NULL
;
2437 const struct drm_connector
*drm_connector
;
2438 struct dc_stream_state
*stream
= NULL
;
2439 struct drm_display_mode mode
= *drm_mode
;
2440 bool native_mode_found
= false;
2442 if (aconnector
== NULL
) {
2443 DRM_ERROR("aconnector is NULL!\n");
2444 goto drm_connector_null
;
2447 if (dm_state
== NULL
) {
2448 DRM_ERROR("dm_state is NULL!\n");
2452 drm_connector
= &aconnector
->base
;
2453 stream
= dc_create_stream_for_sink(aconnector
->dc_sink
);
2455 if (stream
== NULL
) {
2456 DRM_ERROR("Failed to create stream for sink!\n");
2457 goto stream_create_fail
;
2460 list_for_each_entry(preferred_mode
, &aconnector
->base
.modes
, head
) {
2461 /* Search for preferred mode */
2462 if (preferred_mode
->type
& DRM_MODE_TYPE_PREFERRED
) {
2463 native_mode_found
= true;
2467 if (!native_mode_found
)
2468 preferred_mode
= list_first_entry_or_null(
2469 &aconnector
->base
.modes
,
2470 struct drm_display_mode
,
2473 if (preferred_mode
== NULL
) {
2474 /* This may not be an error, the use case is when we we have no
2475 * usermode calls to reset and set mode upon hotplug. In this
2476 * case, we call set mode ourselves to restore the previous mode
2477 * and the modelist may not be filled in in time.
2479 DRM_INFO("No preferred mode found\n");
2481 decide_crtc_timing_for_drm_display_mode(
2482 &mode
, preferred_mode
,
2483 dm_state
->scaling
!= RMX_OFF
);
2486 fill_stream_properties_from_drm_display_mode(stream
,
2487 &mode
, &aconnector
->base
);
2488 update_stream_scaling_settings(&mode
, dm_state
, stream
);
2491 &stream
->audio_info
,
2493 aconnector
->dc_sink
);
2501 void amdgpu_dm_crtc_destroy(struct drm_crtc
*crtc
)
2503 drm_crtc_cleanup(crtc
);
2507 static void dm_crtc_destroy_state(struct drm_crtc
*crtc
,
2508 struct drm_crtc_state
*state
)
2510 struct dm_crtc_state
*cur
= to_dm_crtc_state(state
);
2512 /* TODO Destroy dc_stream objects are stream object is flattened */
2514 dc_stream_release(cur
->stream
);
2517 __drm_atomic_helper_crtc_destroy_state(state
);
2523 static void dm_crtc_reset_state(struct drm_crtc
*crtc
)
2525 struct dm_crtc_state
*state
;
2528 dm_crtc_destroy_state(crtc
, crtc
->state
);
2530 state
= kzalloc(sizeof(*state
), GFP_KERNEL
);
2531 if (WARN_ON(!state
))
2534 crtc
->state
= &state
->base
;
2535 crtc
->state
->crtc
= crtc
;
2539 static struct drm_crtc_state
*
2540 dm_crtc_duplicate_state(struct drm_crtc
*crtc
)
2542 struct dm_crtc_state
*state
, *cur
;
2544 cur
= to_dm_crtc_state(crtc
->state
);
2546 if (WARN_ON(!crtc
->state
))
2549 state
= dm_alloc(sizeof(*state
));
2551 __drm_atomic_helper_crtc_duplicate_state(crtc
, &state
->base
);
2554 state
->stream
= cur
->stream
;
2555 dc_stream_retain(state
->stream
);
2558 /* TODO Duplicate dc_stream after objects are stream object is flattened */
2560 return &state
->base
;
2563 /* Implemented only the options currently availible for the driver */
2564 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs
= {
2565 .reset
= dm_crtc_reset_state
,
2566 .destroy
= amdgpu_dm_crtc_destroy
,
2567 .gamma_set
= drm_atomic_helper_legacy_gamma_set
,
2568 .set_config
= drm_atomic_helper_set_config
,
2569 .page_flip
= drm_atomic_helper_page_flip
,
2570 .atomic_duplicate_state
= dm_crtc_duplicate_state
,
2571 .atomic_destroy_state
= dm_crtc_destroy_state
,
2574 static enum drm_connector_status
2575 amdgpu_dm_connector_detect(struct drm_connector
*connector
, bool force
)
2578 struct amdgpu_connector
*aconnector
= to_amdgpu_connector(connector
);
2581 * 1. This interface is NOT called in context of HPD irq.
2582 * 2. This interface *is called* in context of user-mode ioctl. Which
2583 * makes it a bad place for *any* MST-related activit. */
2585 if (aconnector
->base
.force
== DRM_FORCE_UNSPECIFIED
)
2586 connected
= (aconnector
->dc_sink
!= NULL
);
2588 connected
= (aconnector
->base
.force
== DRM_FORCE_ON
);
2590 return (connected
? connector_status_connected
:
2591 connector_status_disconnected
);
2594 int amdgpu_dm_connector_atomic_set_property(
2595 struct drm_connector
*connector
,
2596 struct drm_connector_state
*connector_state
,
2597 struct drm_property
*property
,
2600 struct drm_device
*dev
= connector
->dev
;
2601 struct amdgpu_device
*adev
= dev
->dev_private
;
2602 struct dm_connector_state
*dm_old_state
=
2603 to_dm_connector_state(connector
->state
);
2604 struct dm_connector_state
*dm_new_state
=
2605 to_dm_connector_state(connector_state
);
2609 if (property
== dev
->mode_config
.scaling_mode_property
) {
2610 enum amdgpu_rmx_type rmx_type
;
2613 case DRM_MODE_SCALE_CENTER
:
2614 rmx_type
= RMX_CENTER
;
2616 case DRM_MODE_SCALE_ASPECT
:
2617 rmx_type
= RMX_ASPECT
;
2619 case DRM_MODE_SCALE_FULLSCREEN
:
2620 rmx_type
= RMX_FULL
;
2622 case DRM_MODE_SCALE_NONE
:
2628 if (dm_old_state
->scaling
== rmx_type
)
2631 dm_new_state
->scaling
= rmx_type
;
2633 } else if (property
== adev
->mode_info
.underscan_hborder_property
) {
2634 dm_new_state
->underscan_hborder
= val
;
2636 } else if (property
== adev
->mode_info
.underscan_vborder_property
) {
2637 dm_new_state
->underscan_vborder
= val
;
2639 } else if (property
== adev
->mode_info
.underscan_property
) {
2640 dm_new_state
->underscan_enable
= val
;
2647 int amdgpu_dm_connector_atomic_get_property(
2648 struct drm_connector
*connector
,
2649 const struct drm_connector_state
*state
,
2650 struct drm_property
*property
,
2653 struct drm_device
*dev
= connector
->dev
;
2654 struct amdgpu_device
*adev
= dev
->dev_private
;
2655 struct dm_connector_state
*dm_state
=
2656 to_dm_connector_state(state
);
2659 if (property
== dev
->mode_config
.scaling_mode_property
) {
2660 switch (dm_state
->scaling
) {
2662 *val
= DRM_MODE_SCALE_CENTER
;
2665 *val
= DRM_MODE_SCALE_ASPECT
;
2668 *val
= DRM_MODE_SCALE_FULLSCREEN
;
2672 *val
= DRM_MODE_SCALE_NONE
;
2676 } else if (property
== adev
->mode_info
.underscan_hborder_property
) {
2677 *val
= dm_state
->underscan_hborder
;
2679 } else if (property
== adev
->mode_info
.underscan_vborder_property
) {
2680 *val
= dm_state
->underscan_vborder
;
2682 } else if (property
== adev
->mode_info
.underscan_property
) {
2683 *val
= dm_state
->underscan_enable
;
2689 void amdgpu_dm_connector_destroy(struct drm_connector
*connector
)
2691 struct amdgpu_connector
*aconnector
= to_amdgpu_connector(connector
);
2692 const struct dc_link
*link
= aconnector
->dc_link
;
2693 struct amdgpu_device
*adev
= connector
->dev
->dev_private
;
2694 struct amdgpu_display_manager
*dm
= &adev
->dm
;
2695 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
2696 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
2698 if (link
->connector_signal
& (SIGNAL_TYPE_EDP
| SIGNAL_TYPE_LVDS
)) {
2699 amdgpu_dm_register_backlight_device(dm
);
2701 if (dm
->backlight_dev
) {
2702 backlight_device_unregister(dm
->backlight_dev
);
2703 dm
->backlight_dev
= NULL
;
2708 drm_connector_unregister(connector
);
2709 drm_connector_cleanup(connector
);
2713 void amdgpu_dm_connector_funcs_reset(struct drm_connector
*connector
)
2715 struct dm_connector_state
*state
=
2716 to_dm_connector_state(connector
->state
);
2720 state
= kzalloc(sizeof(*state
), GFP_KERNEL
);
2723 state
->scaling
= RMX_OFF
;
2724 state
->underscan_enable
= false;
2725 state
->underscan_hborder
= 0;
2726 state
->underscan_vborder
= 0;
2728 connector
->state
= &state
->base
;
2729 connector
->state
->connector
= connector
;
2733 struct drm_connector_state
*amdgpu_dm_connector_atomic_duplicate_state(
2734 struct drm_connector
*connector
)
2736 struct dm_connector_state
*state
=
2737 to_dm_connector_state(connector
->state
);
2739 struct dm_connector_state
*new_state
=
2740 kmemdup(state
, sizeof(*state
), GFP_KERNEL
);
2743 __drm_atomic_helper_connector_duplicate_state(connector
,
2745 return &new_state
->base
;
2751 static const struct drm_connector_funcs amdgpu_dm_connector_funcs
= {
2752 .reset
= amdgpu_dm_connector_funcs_reset
,
2753 .detect
= amdgpu_dm_connector_detect
,
2754 .fill_modes
= drm_helper_probe_single_connector_modes
,
2755 .destroy
= amdgpu_dm_connector_destroy
,
2756 .atomic_duplicate_state
= amdgpu_dm_connector_atomic_duplicate_state
,
2757 .atomic_destroy_state
= drm_atomic_helper_connector_destroy_state
,
2758 .atomic_set_property
= amdgpu_dm_connector_atomic_set_property
,
2759 .atomic_get_property
= amdgpu_dm_connector_atomic_get_property
2762 static struct drm_encoder
*best_encoder(struct drm_connector
*connector
)
2764 int enc_id
= connector
->encoder_ids
[0];
2765 struct drm_mode_object
*obj
;
2766 struct drm_encoder
*encoder
;
2768 DRM_DEBUG_KMS("Finding the best encoder\n");
2770 /* pick the encoder ids */
2772 obj
= drm_mode_object_find(connector
->dev
, enc_id
, DRM_MODE_OBJECT_ENCODER
);
2774 DRM_ERROR("Couldn't find a matching encoder for our connector\n");
2777 encoder
= obj_to_encoder(obj
);
2780 DRM_ERROR("No encoder id\n");
2784 static int get_modes(struct drm_connector
*connector
)
2786 return amdgpu_dm_connector_get_modes(connector
);
2789 static void create_eml_sink(struct amdgpu_connector
*aconnector
)
2791 struct dc_sink_init_data init_params
= {
2792 .link
= aconnector
->dc_link
,
2793 .sink_signal
= SIGNAL_TYPE_VIRTUAL
2795 struct edid
*edid
= (struct edid
*) aconnector
->base
.edid_blob_ptr
->data
;
2797 if (!aconnector
->base
.edid_blob_ptr
||
2798 !aconnector
->base
.edid_blob_ptr
->data
) {
2799 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
2800 aconnector
->base
.name
);
2802 aconnector
->base
.force
= DRM_FORCE_OFF
;
2803 aconnector
->base
.override_edid
= false;
2807 aconnector
->edid
= edid
;
2809 aconnector
->dc_em_sink
= dc_link_add_remote_sink(
2810 aconnector
->dc_link
,
2812 (edid
->extensions
+ 1) * EDID_LENGTH
,
2815 if (aconnector
->base
.force
2817 aconnector
->dc_sink
= aconnector
->dc_link
->local_sink
?
2818 aconnector
->dc_link
->local_sink
:
2819 aconnector
->dc_em_sink
;
2822 static void handle_edid_mgmt(struct amdgpu_connector
*aconnector
)
2824 struct dc_link
*link
= (struct dc_link
*)aconnector
->dc_link
;
2826 /* In case of headless boot with force on for DP managed connector
2827 * Those settings have to be != 0 to get initial modeset
2829 if (link
->connector_signal
== SIGNAL_TYPE_DISPLAY_PORT
) {
2830 link
->verified_link_cap
.lane_count
= LANE_COUNT_FOUR
;
2831 link
->verified_link_cap
.link_rate
= LINK_RATE_HIGH2
;
2835 aconnector
->base
.override_edid
= true;
2836 create_eml_sink(aconnector
);
2839 int amdgpu_dm_connector_mode_valid(
2840 struct drm_connector
*connector
,
2841 struct drm_display_mode
*mode
)
2843 int result
= MODE_ERROR
;
2844 struct dc_sink
*dc_sink
;
2845 struct amdgpu_device
*adev
= connector
->dev
->dev_private
;
2846 /* TODO: Unhardcode stream count */
2847 struct dc_stream_state
*stream
;
2848 struct amdgpu_connector
*aconnector
= to_amdgpu_connector(connector
);
2850 if ((mode
->flags
& DRM_MODE_FLAG_INTERLACE
) ||
2851 (mode
->flags
& DRM_MODE_FLAG_DBLSCAN
))
2854 /* Only run this the first time mode_valid is called to initilialize
2857 if (aconnector
->base
.force
!= DRM_FORCE_UNSPECIFIED
&&
2858 !aconnector
->dc_em_sink
)
2859 handle_edid_mgmt(aconnector
);
2861 dc_sink
= to_amdgpu_connector(connector
)->dc_sink
;
2863 if (dc_sink
== NULL
) {
2864 DRM_ERROR("dc_sink is NULL!\n");
2868 stream
= dc_create_stream_for_sink(dc_sink
);
2869 if (stream
== NULL
) {
2870 DRM_ERROR("Failed to create stream for sink!\n");
2874 drm_mode_set_crtcinfo(mode
, 0);
2875 fill_stream_properties_from_drm_display_mode(stream
, mode
, connector
);
2877 stream
->src
.width
= mode
->hdisplay
;
2878 stream
->src
.height
= mode
->vdisplay
;
2879 stream
->dst
= stream
->src
;
2881 if (dc_validate_stream(adev
->dm
.dc
, stream
))
2884 dc_stream_release(stream
);
2887 /* TODO: error handling*/
2891 static const struct drm_connector_helper_funcs
2892 amdgpu_dm_connector_helper_funcs
= {
2894 * If hotplug a second bigger display in FB Con mode, bigger resolution
2895 * modes will be filtered by drm_mode_validate_size(), and those modes
2896 * is missing after user start lightdm. So we need to renew modes list.
2897 * in get_modes call back, not just return the modes count
2899 .get_modes
= get_modes
,
2900 .mode_valid
= amdgpu_dm_connector_mode_valid
,
2901 .best_encoder
= best_encoder
2904 static void dm_crtc_helper_disable(struct drm_crtc
*crtc
)
2908 static int dm_crtc_helper_atomic_check(
2909 struct drm_crtc
*crtc
,
2910 struct drm_crtc_state
*state
)
2912 struct amdgpu_device
*adev
= crtc
->dev
->dev_private
;
2913 struct dc
*dc
= adev
->dm
.dc
;
2914 struct dm_crtc_state
*dm_crtc_state
= to_dm_crtc_state(state
);
2917 if (unlikely(!dm_crtc_state
->stream
&&
2918 modeset_required(state
, NULL
, dm_crtc_state
->stream
))) {
2923 /* In some use cases, like reset, no stream is attached */
2924 if (!dm_crtc_state
->stream
)
2927 if (dc_validate_stream(dc
, dm_crtc_state
->stream
))
2933 static bool dm_crtc_helper_mode_fixup(
2934 struct drm_crtc
*crtc
,
2935 const struct drm_display_mode
*mode
,
2936 struct drm_display_mode
*adjusted_mode
)
2941 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs
= {
2942 .disable
= dm_crtc_helper_disable
,
2943 .atomic_check
= dm_crtc_helper_atomic_check
,
2944 .mode_fixup
= dm_crtc_helper_mode_fixup
2947 static void dm_encoder_helper_disable(struct drm_encoder
*encoder
)
2952 static int dm_encoder_helper_atomic_check(
2953 struct drm_encoder
*encoder
,
2954 struct drm_crtc_state
*crtc_state
,
2955 struct drm_connector_state
*conn_state
)
2960 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs
= {
2961 .disable
= dm_encoder_helper_disable
,
2962 .atomic_check
= dm_encoder_helper_atomic_check
2965 static void dm_drm_plane_reset(struct drm_plane
*plane
)
2967 struct dm_plane_state
*amdgpu_state
= NULL
;
2970 plane
->funcs
->atomic_destroy_state(plane
, plane
->state
);
2972 amdgpu_state
= kzalloc(sizeof(*amdgpu_state
), GFP_KERNEL
);
2975 plane
->state
= &amdgpu_state
->base
;
2976 plane
->state
->plane
= plane
;
2977 plane
->state
->rotation
= DRM_MODE_ROTATE_0
;
2982 static struct drm_plane_state
*
2983 dm_drm_plane_duplicate_state(struct drm_plane
*plane
)
2985 struct dm_plane_state
*dm_plane_state
, *old_dm_plane_state
;
2987 old_dm_plane_state
= to_dm_plane_state(plane
->state
);
2988 dm_plane_state
= kzalloc(sizeof(*dm_plane_state
), GFP_KERNEL
);
2989 if (!dm_plane_state
)
2992 __drm_atomic_helper_plane_duplicate_state(plane
, &dm_plane_state
->base
);
2994 if (old_dm_plane_state
->dc_state
) {
2995 dm_plane_state
->dc_state
= old_dm_plane_state
->dc_state
;
2996 dc_plane_state_retain(dm_plane_state
->dc_state
);
2999 return &dm_plane_state
->base
;
3002 void dm_drm_plane_destroy_state(struct drm_plane
*plane
,
3003 struct drm_plane_state
*state
)
3005 struct dm_plane_state
*dm_plane_state
= to_dm_plane_state(state
);
3007 if (dm_plane_state
->dc_state
)
3008 dc_plane_state_release(dm_plane_state
->dc_state
);
3010 __drm_atomic_helper_plane_destroy_state(state
);
3011 kfree(dm_plane_state
);
3014 static const struct drm_plane_funcs dm_plane_funcs
= {
3015 .update_plane
= drm_atomic_helper_update_plane
,
3016 .disable_plane
= drm_atomic_helper_disable_plane
,
3017 .destroy
= drm_plane_cleanup
,
3018 .reset
= dm_drm_plane_reset
,
3019 .atomic_duplicate_state
= dm_drm_plane_duplicate_state
,
3020 .atomic_destroy_state
= dm_drm_plane_destroy_state
,
3023 static int dm_plane_helper_prepare_fb(
3024 struct drm_plane
*plane
,
3025 struct drm_plane_state
*new_state
)
3027 struct amdgpu_framebuffer
*afb
;
3028 struct drm_gem_object
*obj
;
3029 struct amdgpu_bo
*rbo
;
3031 struct dm_plane_state
*dm_plane_state_new
, *dm_plane_state_old
;
3032 unsigned int awidth
;
3034 dm_plane_state_old
= to_dm_plane_state(plane
->state
);
3035 dm_plane_state_new
= to_dm_plane_state(new_state
);
3037 if (!new_state
->fb
) {
3038 DRM_DEBUG_KMS("No FB bound\n");
3042 afb
= to_amdgpu_framebuffer(new_state
->fb
);
3045 rbo
= gem_to_amdgpu_bo(obj
);
3046 r
= amdgpu_bo_reserve(rbo
, false);
3047 if (unlikely(r
!= 0))
3050 r
= amdgpu_bo_pin(rbo
, AMDGPU_GEM_DOMAIN_VRAM
, &afb
->address
);
3053 amdgpu_bo_unreserve(rbo
);
3055 if (unlikely(r
!= 0)) {
3056 DRM_ERROR("Failed to pin framebuffer\n");
3062 if (dm_plane_state_new
->dc_state
&&
3063 dm_plane_state_old
->dc_state
!= dm_plane_state_new
->dc_state
) {
3064 struct dc_plane_state
*plane_state
= dm_plane_state_new
->dc_state
;
3066 if (plane_state
->format
< SURFACE_PIXEL_FORMAT_VIDEO_BEGIN
) {
3067 plane_state
->address
.grph
.addr
.low_part
= lower_32_bits(afb
->address
);
3068 plane_state
->address
.grph
.addr
.high_part
= upper_32_bits(afb
->address
);
3070 awidth
= ALIGN(new_state
->fb
->width
, 64);
3071 plane_state
->address
.video_progressive
.luma_addr
.low_part
3072 = lower_32_bits(afb
->address
);
3073 plane_state
->address
.video_progressive
.chroma_addr
.low_part
3074 = lower_32_bits(afb
->address
) +
3075 (awidth
* new_state
->fb
->height
);
3079 /* It's a hack for s3 since in 4.9 kernel filter out cursor buffer
3080 * prepare and cleanup in drm_atomic_helper_prepare_planes
3081 * and drm_atomic_helper_cleanup_planes because fb doens't in s3.
3082 * IN 4.10 kernel this code should be removed and amdgpu_device_suspend
3083 * code touching fram buffers should be avoided for DC.
3085 if (plane
->type
== DRM_PLANE_TYPE_CURSOR
) {
3086 struct amdgpu_crtc
*acrtc
= to_amdgpu_crtc(new_state
->crtc
);
3088 acrtc
->cursor_bo
= obj
;
3093 static void dm_plane_helper_cleanup_fb(
3094 struct drm_plane
*plane
,
3095 struct drm_plane_state
*old_state
)
3097 struct amdgpu_bo
*rbo
;
3098 struct amdgpu_framebuffer
*afb
;
3104 afb
= to_amdgpu_framebuffer(old_state
->fb
);
3105 rbo
= gem_to_amdgpu_bo(afb
->obj
);
3106 r
= amdgpu_bo_reserve(rbo
, false);
3108 DRM_ERROR("failed to reserve rbo before unpin\n");
3112 amdgpu_bo_unpin(rbo
);
3113 amdgpu_bo_unreserve(rbo
);
3114 amdgpu_bo_unref(&rbo
);
3117 int dm_create_validation_set_for_connector(struct drm_connector
*connector
,
3118 struct drm_display_mode
*mode
, struct dc_validation_set
*val_set
)
3120 int result
= MODE_ERROR
;
3121 struct dc_sink
*dc_sink
=
3122 to_amdgpu_connector(connector
)->dc_sink
;
3123 /* TODO: Unhardcode stream count */
3124 struct dc_stream_state
*stream
;
3126 if ((mode
->flags
& DRM_MODE_FLAG_INTERLACE
) ||
3127 (mode
->flags
& DRM_MODE_FLAG_DBLSCAN
))
3130 if (dc_sink
== NULL
) {
3131 DRM_ERROR("dc_sink is NULL!\n");
3135 stream
= dc_create_stream_for_sink(dc_sink
);
3137 if (stream
== NULL
) {
3138 DRM_ERROR("Failed to create stream for sink!\n");
3142 drm_mode_set_crtcinfo(mode
, 0);
3144 fill_stream_properties_from_drm_display_mode(stream
, mode
, connector
);
3146 val_set
->stream
= stream
;
3148 stream
->src
.width
= mode
->hdisplay
;
3149 stream
->src
.height
= mode
->vdisplay
;
3150 stream
->dst
= stream
->src
;
3155 int dm_plane_atomic_check(struct drm_plane
*plane
,
3156 struct drm_plane_state
*state
)
3158 struct amdgpu_device
*adev
= plane
->dev
->dev_private
;
3159 struct dc
*dc
= adev
->dm
.dc
;
3160 struct dm_plane_state
*dm_plane_state
= to_dm_plane_state(state
);
3162 if (!dm_plane_state
->dc_state
)
3165 if (dc_validate_plane(dc
, dm_plane_state
->dc_state
))
3171 static const struct drm_plane_helper_funcs dm_plane_helper_funcs
= {
3172 .prepare_fb
= dm_plane_helper_prepare_fb
,
3173 .cleanup_fb
= dm_plane_helper_cleanup_fb
,
3174 .atomic_check
= dm_plane_atomic_check
,
3178 * TODO: these are currently initialized to rgb formats only.
3179 * For future use cases we should either initialize them dynamically based on
3180 * plane capabilities, or initialize this array to all formats, so internal drm
3181 * check will succeed, and let DC to implement proper check
3183 static uint32_t rgb_formats
[] = {
3185 DRM_FORMAT_XRGB8888
,
3186 DRM_FORMAT_ARGB8888
,
3187 DRM_FORMAT_RGBA8888
,
3188 DRM_FORMAT_XRGB2101010
,
3189 DRM_FORMAT_XBGR2101010
,
3190 DRM_FORMAT_ARGB2101010
,
3191 DRM_FORMAT_ABGR2101010
,
3194 static uint32_t yuv_formats
[] = {
3199 static const u32 cursor_formats
[] = {
3203 int amdgpu_dm_plane_init(struct amdgpu_display_manager
*dm
,
3204 struct amdgpu_plane
*aplane
,
3205 unsigned long possible_crtcs
)
3209 switch (aplane
->base
.type
) {
3210 case DRM_PLANE_TYPE_PRIMARY
:
3211 aplane
->base
.format_default
= true;
3213 res
= drm_universal_plane_init(
3219 ARRAY_SIZE(rgb_formats
),
3220 NULL
, aplane
->base
.type
, NULL
);
3222 case DRM_PLANE_TYPE_OVERLAY
:
3223 res
= drm_universal_plane_init(
3229 ARRAY_SIZE(yuv_formats
),
3230 NULL
, aplane
->base
.type
, NULL
);
3232 case DRM_PLANE_TYPE_CURSOR
:
3233 res
= drm_universal_plane_init(
3239 ARRAY_SIZE(cursor_formats
),
3240 NULL
, aplane
->base
.type
, NULL
);
3244 drm_plane_helper_add(&aplane
->base
, &dm_plane_helper_funcs
);
3249 int amdgpu_dm_crtc_init(struct amdgpu_display_manager
*dm
,
3250 struct drm_plane
*plane
,
3251 uint32_t crtc_index
)
3253 struct amdgpu_crtc
*acrtc
= NULL
;
3254 struct amdgpu_plane
*cursor_plane
;
3258 cursor_plane
= kzalloc(sizeof(*cursor_plane
), GFP_KERNEL
);
3262 cursor_plane
->base
.type
= DRM_PLANE_TYPE_CURSOR
;
3263 res
= amdgpu_dm_plane_init(dm
, cursor_plane
, 0);
3265 acrtc
= kzalloc(sizeof(struct amdgpu_crtc
), GFP_KERNEL
);
3269 res
= drm_crtc_init_with_planes(
3273 &cursor_plane
->base
,
3274 &amdgpu_dm_crtc_funcs
, NULL
);
3279 drm_crtc_helper_add(&acrtc
->base
, &amdgpu_dm_crtc_helper_funcs
);
3281 acrtc
->max_cursor_width
= dm
->adev
->dm
.dc
->caps
.max_cursor_size
;
3282 acrtc
->max_cursor_height
= dm
->adev
->dm
.dc
->caps
.max_cursor_size
;
3284 acrtc
->crtc_id
= crtc_index
;
3285 acrtc
->base
.enabled
= false;
3287 dm
->adev
->mode_info
.crtcs
[crtc_index
] = acrtc
;
3288 drm_mode_crtc_set_gamma_size(&acrtc
->base
, 256);
3294 kfree(cursor_plane
);
3295 acrtc
->crtc_id
= -1;
3300 static int to_drm_connector_type(enum signal_type st
)
3303 case SIGNAL_TYPE_HDMI_TYPE_A
:
3304 return DRM_MODE_CONNECTOR_HDMIA
;
3305 case SIGNAL_TYPE_EDP
:
3306 return DRM_MODE_CONNECTOR_eDP
;
3307 case SIGNAL_TYPE_RGB
:
3308 return DRM_MODE_CONNECTOR_VGA
;
3309 case SIGNAL_TYPE_DISPLAY_PORT
:
3310 case SIGNAL_TYPE_DISPLAY_PORT_MST
:
3311 return DRM_MODE_CONNECTOR_DisplayPort
;
3312 case SIGNAL_TYPE_DVI_DUAL_LINK
:
3313 case SIGNAL_TYPE_DVI_SINGLE_LINK
:
3314 return DRM_MODE_CONNECTOR_DVID
;
3315 case SIGNAL_TYPE_VIRTUAL
:
3316 return DRM_MODE_CONNECTOR_VIRTUAL
;
3319 return DRM_MODE_CONNECTOR_Unknown
;
3323 static void amdgpu_dm_get_native_mode(struct drm_connector
*connector
)
3325 const struct drm_connector_helper_funcs
*helper
=
3326 connector
->helper_private
;
3327 struct drm_encoder
*encoder
;
3328 struct amdgpu_encoder
*amdgpu_encoder
;
3330 encoder
= helper
->best_encoder(connector
);
3332 if (encoder
== NULL
)
3335 amdgpu_encoder
= to_amdgpu_encoder(encoder
);
3337 amdgpu_encoder
->native_mode
.clock
= 0;
3339 if (!list_empty(&connector
->probed_modes
)) {
3340 struct drm_display_mode
*preferred_mode
= NULL
;
3342 list_for_each_entry(preferred_mode
,
3343 &connector
->probed_modes
,
3345 if (preferred_mode
->type
& DRM_MODE_TYPE_PREFERRED
)
3346 amdgpu_encoder
->native_mode
= *preferred_mode
;
3354 static struct drm_display_mode
*amdgpu_dm_create_common_mode(
3355 struct drm_encoder
*encoder
, char *name
,
3356 int hdisplay
, int vdisplay
)
3358 struct drm_device
*dev
= encoder
->dev
;
3359 struct amdgpu_encoder
*amdgpu_encoder
= to_amdgpu_encoder(encoder
);
3360 struct drm_display_mode
*mode
= NULL
;
3361 struct drm_display_mode
*native_mode
= &amdgpu_encoder
->native_mode
;
3363 mode
= drm_mode_duplicate(dev
, native_mode
);
3368 mode
->hdisplay
= hdisplay
;
3369 mode
->vdisplay
= vdisplay
;
3370 mode
->type
&= ~DRM_MODE_TYPE_PREFERRED
;
3371 strncpy(mode
->name
, name
, DRM_DISPLAY_MODE_LEN
);
3377 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder
*encoder
,
3378 struct drm_connector
*connector
)
3380 struct amdgpu_encoder
*amdgpu_encoder
= to_amdgpu_encoder(encoder
);
3381 struct drm_display_mode
*mode
= NULL
;
3382 struct drm_display_mode
*native_mode
= &amdgpu_encoder
->native_mode
;
3383 struct amdgpu_connector
*amdgpu_connector
=
3384 to_amdgpu_connector(connector
);
3388 char name
[DRM_DISPLAY_MODE_LEN
];
3391 } common_modes
[] = {
3392 { "640x480", 640, 480},
3393 { "800x600", 800, 600},
3394 { "1024x768", 1024, 768},
3395 { "1280x720", 1280, 720},
3396 { "1280x800", 1280, 800},
3397 {"1280x1024", 1280, 1024},
3398 { "1440x900", 1440, 900},
3399 {"1680x1050", 1680, 1050},
3400 {"1600x1200", 1600, 1200},
3401 {"1920x1080", 1920, 1080},
3402 {"1920x1200", 1920, 1200}
3405 n
= ARRAY_SIZE(common_modes
);
3407 for (i
= 0; i
< n
; i
++) {
3408 struct drm_display_mode
*curmode
= NULL
;
3409 bool mode_existed
= false;
3411 if (common_modes
[i
].w
> native_mode
->hdisplay
||
3412 common_modes
[i
].h
> native_mode
->vdisplay
||
3413 (common_modes
[i
].w
== native_mode
->hdisplay
&&
3414 common_modes
[i
].h
== native_mode
->vdisplay
))
3417 list_for_each_entry(curmode
, &connector
->probed_modes
, head
) {
3418 if (common_modes
[i
].w
== curmode
->hdisplay
&&
3419 common_modes
[i
].h
== curmode
->vdisplay
) {
3420 mode_existed
= true;
3428 mode
= amdgpu_dm_create_common_mode(encoder
,
3429 common_modes
[i
].name
, common_modes
[i
].w
,
3431 drm_mode_probed_add(connector
, mode
);
3432 amdgpu_connector
->num_modes
++;
3436 static void amdgpu_dm_connector_ddc_get_modes(
3437 struct drm_connector
*connector
,
3440 struct amdgpu_connector
*amdgpu_connector
=
3441 to_amdgpu_connector(connector
);
3444 /* empty probed_modes */
3445 INIT_LIST_HEAD(&connector
->probed_modes
);
3446 amdgpu_connector
->num_modes
=
3447 drm_add_edid_modes(connector
, edid
);
3449 drm_edid_to_eld(connector
, edid
);
3451 amdgpu_dm_get_native_mode(connector
);
3453 amdgpu_connector
->num_modes
= 0;
3456 int amdgpu_dm_connector_get_modes(struct drm_connector
*connector
)
3458 const struct drm_connector_helper_funcs
*helper
=
3459 connector
->helper_private
;
3460 struct amdgpu_connector
*amdgpu_connector
=
3461 to_amdgpu_connector(connector
);
3462 struct drm_encoder
*encoder
;
3463 struct edid
*edid
= amdgpu_connector
->edid
;
3465 encoder
= helper
->best_encoder(connector
);
3467 amdgpu_dm_connector_ddc_get_modes(connector
, edid
);
3468 amdgpu_dm_connector_add_common_modes(encoder
, connector
);
3469 return amdgpu_connector
->num_modes
;
3472 void amdgpu_dm_connector_init_helper(
3473 struct amdgpu_display_manager
*dm
,
3474 struct amdgpu_connector
*aconnector
,
3476 struct dc_link
*link
,
3479 struct amdgpu_device
*adev
= dm
->ddev
->dev_private
;
3481 aconnector
->connector_id
= link_index
;
3482 aconnector
->dc_link
= link
;
3483 aconnector
->base
.interlace_allowed
= false;
3484 aconnector
->base
.doublescan_allowed
= false;
3485 aconnector
->base
.stereo_allowed
= false;
3486 aconnector
->base
.dpms
= DRM_MODE_DPMS_OFF
;
3487 aconnector
->hpd
.hpd
= AMDGPU_HPD_NONE
; /* not used */
3489 mutex_init(&aconnector
->hpd_lock
);
3491 /* configure support HPD hot plug connector_>polled default value is 0
3492 * which means HPD hot plug not supported
3494 switch (connector_type
) {
3495 case DRM_MODE_CONNECTOR_HDMIA
:
3496 aconnector
->base
.polled
= DRM_CONNECTOR_POLL_HPD
;
3498 case DRM_MODE_CONNECTOR_DisplayPort
:
3499 aconnector
->base
.polled
= DRM_CONNECTOR_POLL_HPD
;
3501 case DRM_MODE_CONNECTOR_DVID
:
3502 aconnector
->base
.polled
= DRM_CONNECTOR_POLL_HPD
;
3508 drm_object_attach_property(&aconnector
->base
.base
,
3509 dm
->ddev
->mode_config
.scaling_mode_property
,
3510 DRM_MODE_SCALE_NONE
);
3512 drm_object_attach_property(&aconnector
->base
.base
,
3513 adev
->mode_info
.underscan_property
,
3515 drm_object_attach_property(&aconnector
->base
.base
,
3516 adev
->mode_info
.underscan_hborder_property
,
3518 drm_object_attach_property(&aconnector
->base
.base
,
3519 adev
->mode_info
.underscan_vborder_property
,
3524 int amdgpu_dm_i2c_xfer(struct i2c_adapter
*i2c_adap
,
3525 struct i2c_msg
*msgs
, int num
)
3527 struct amdgpu_i2c_adapter
*i2c
= i2c_get_adapdata(i2c_adap
);
3528 struct ddc_service
*ddc_service
= i2c
->ddc_service
;
3529 struct i2c_command cmd
;
3533 cmd
.payloads
= kcalloc(num
, sizeof(struct i2c_payload
), GFP_KERNEL
);
3538 cmd
.number_of_payloads
= num
;
3539 cmd
.engine
= I2C_COMMAND_ENGINE_DEFAULT
;
3542 for (i
= 0; i
< num
; i
++) {
3543 cmd
.payloads
[i
].write
= !(msgs
[i
].flags
& I2C_M_RD
);
3544 cmd
.payloads
[i
].address
= msgs
[i
].addr
;
3545 cmd
.payloads
[i
].length
= msgs
[i
].len
;
3546 cmd
.payloads
[i
].data
= msgs
[i
].buf
;
3549 if (dal_i2caux_submit_i2c_command(
3550 ddc_service
->ctx
->i2caux
,
3551 ddc_service
->ddc_pin
,
3555 kfree(cmd
.payloads
);
3559 u32
amdgpu_dm_i2c_func(struct i2c_adapter
*adap
)
3561 return I2C_FUNC_I2C
| I2C_FUNC_SMBUS_EMUL
;
3564 static const struct i2c_algorithm amdgpu_dm_i2c_algo
= {
3565 .master_xfer
= amdgpu_dm_i2c_xfer
,
3566 .functionality
= amdgpu_dm_i2c_func
,
3569 static struct amdgpu_i2c_adapter
*create_i2c(
3570 struct ddc_service
*ddc_service
,
3574 struct amdgpu_device
*adev
= ddc_service
->ctx
->driver_context
;
3575 struct amdgpu_i2c_adapter
*i2c
;
3577 i2c
= kzalloc(sizeof(struct amdgpu_i2c_adapter
), GFP_KERNEL
);
3578 i2c
->base
.owner
= THIS_MODULE
;
3579 i2c
->base
.class = I2C_CLASS_DDC
;
3580 i2c
->base
.dev
.parent
= &adev
->pdev
->dev
;
3581 i2c
->base
.algo
= &amdgpu_dm_i2c_algo
;
3582 snprintf(i2c
->base
.name
, sizeof(i2c
->base
.name
), "AMDGPU DM i2c hw bus %d", link_index
);
3583 i2c_set_adapdata(&i2c
->base
, i2c
);
3584 i2c
->ddc_service
= ddc_service
;
3589 /* Note: this function assumes that dc_link_detect() was called for the
3590 * dc_link which will be represented by this aconnector.
3592 int amdgpu_dm_connector_init(
3593 struct amdgpu_display_manager
*dm
,
3594 struct amdgpu_connector
*aconnector
,
3595 uint32_t link_index
,
3596 struct amdgpu_encoder
*aencoder
)
3600 struct dc
*dc
= dm
->dc
;
3601 struct dc_link
*link
= dc_get_link_at_index(dc
, link_index
);
3602 struct amdgpu_i2c_adapter
*i2c
;
3603 ((struct dc_link
*)link
)->priv
= aconnector
;
3605 DRM_DEBUG_KMS("%s()\n", __func__
);
3607 i2c
= create_i2c(link
->ddc
, link
->link_index
, &res
);
3608 aconnector
->i2c
= i2c
;
3609 res
= i2c_add_adapter(&i2c
->base
);
3612 DRM_ERROR("Failed to register hw i2c %d\n", link
->link_index
);
3616 connector_type
= to_drm_connector_type(link
->connector_signal
);
3618 res
= drm_connector_init(
3621 &amdgpu_dm_connector_funcs
,
3625 DRM_ERROR("connector_init failed\n");
3626 aconnector
->connector_id
= -1;
3630 drm_connector_helper_add(
3632 &amdgpu_dm_connector_helper_funcs
);
3634 amdgpu_dm_connector_init_helper(
3641 drm_mode_connector_attach_encoder(
3642 &aconnector
->base
, &aencoder
->base
);
3644 drm_connector_register(&aconnector
->base
);
3646 if (connector_type
== DRM_MODE_CONNECTOR_DisplayPort
3647 || connector_type
== DRM_MODE_CONNECTOR_eDP
)
3648 amdgpu_dm_initialize_dp_connector(dm
, aconnector
);
3650 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3651 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3653 /* NOTE: this currently will create backlight device even if a panel
3654 * is not connected to the eDP/LVDS connector.
3656 * This is less than ideal but we don't have sink information at this
3657 * stage since detection happens after. We can't do detection earlier
3658 * since MST detection needs connectors to be created first.
3660 if (link
->connector_signal
& (SIGNAL_TYPE_EDP
| SIGNAL_TYPE_LVDS
)) {
3661 /* Event if registration failed, we should continue with
3662 * DM initialization because not having a backlight control
3663 * is better then a black screen.
3665 amdgpu_dm_register_backlight_device(dm
);
3667 if (dm
->backlight_dev
)
3668 dm
->backlight_link
= link
;
3675 aconnector
->i2c
= NULL
;
3680 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device
*adev
)
3682 switch (adev
->mode_info
.num_crtc
) {
3699 int amdgpu_dm_encoder_init(
3700 struct drm_device
*dev
,
3701 struct amdgpu_encoder
*aencoder
,
3702 uint32_t link_index
)
3704 struct amdgpu_device
*adev
= dev
->dev_private
;
3706 int res
= drm_encoder_init(dev
,
3708 &amdgpu_dm_encoder_funcs
,
3709 DRM_MODE_ENCODER_TMDS
,
3712 aencoder
->base
.possible_crtcs
= amdgpu_dm_get_encoder_crtc_mask(adev
);
3715 aencoder
->encoder_id
= link_index
;
3717 aencoder
->encoder_id
= -1;
3719 drm_encoder_helper_add(&aencoder
->base
, &amdgpu_dm_encoder_helper_funcs
);
3724 static void manage_dm_interrupts(
3725 struct amdgpu_device
*adev
,
3726 struct amdgpu_crtc
*acrtc
,
3730 * this is not correct translation but will work as soon as VBLANK
3731 * constant is the same as PFLIP
3734 amdgpu_crtc_idx_to_irq_type(
3739 drm_crtc_vblank_on(&acrtc
->base
);
3742 &adev
->pageflip_irq
,
3748 &adev
->pageflip_irq
,
3750 drm_crtc_vblank_off(&acrtc
->base
);
3754 static bool is_scaling_state_different(
3755 const struct dm_connector_state
*dm_state
,
3756 const struct dm_connector_state
*old_dm_state
)
3758 if (dm_state
->scaling
!= old_dm_state
->scaling
)
3760 if (!dm_state
->underscan_enable
&& old_dm_state
->underscan_enable
) {
3761 if (old_dm_state
->underscan_hborder
!= 0 && old_dm_state
->underscan_vborder
!= 0)
3763 } else if (dm_state
->underscan_enable
&& !old_dm_state
->underscan_enable
) {
3764 if (dm_state
->underscan_hborder
!= 0 && dm_state
->underscan_vborder
!= 0)
3766 } else if (dm_state
->underscan_hborder
!= old_dm_state
->underscan_hborder
||
3767 dm_state
->underscan_vborder
!= old_dm_state
->underscan_vborder
)
3772 static void remove_stream(
3773 struct amdgpu_device
*adev
,
3774 struct amdgpu_crtc
*acrtc
,
3775 struct dc_stream_state
*stream
)
3777 /* this is the update mode case */
3778 if (adev
->dm
.freesync_module
)
3779 mod_freesync_remove_stream(adev
->dm
.freesync_module
, stream
);
3781 acrtc
->otg_inst
= -1;
3782 acrtc
->enabled
= false;
3785 static void handle_cursor_update(
3786 struct drm_plane
*plane
,
3787 struct drm_plane_state
*old_plane_state
)
3789 if (!plane
->state
->fb
&& !old_plane_state
->fb
)
3792 /* Check if it's a cursor on/off update or just cursor move*/
3793 if (plane
->state
->fb
== old_plane_state
->fb
)
3794 dm_crtc_cursor_move(
3796 plane
->state
->crtc_x
,
3797 plane
->state
->crtc_y
);
3799 struct amdgpu_framebuffer
*afb
=
3800 to_amdgpu_framebuffer(plane
->state
->fb
);
3802 (!!plane
->state
->fb
) ?
3803 plane
->state
->crtc
:
3804 old_plane_state
->crtc
,
3805 (!!plane
->state
->fb
) ?
3808 plane
->state
->crtc_w
,
3809 plane
->state
->crtc_h
);
3814 static void prepare_flip_isr(struct amdgpu_crtc
*acrtc
)
3817 assert_spin_locked(&acrtc
->base
.dev
->event_lock
);
3818 WARN_ON(acrtc
->event
);
3820 acrtc
->event
= acrtc
->base
.state
->event
;
3822 /* Set the flip status */
3823 acrtc
->pflip_status
= AMDGPU_FLIP_SUBMITTED
;
3825 /* Mark this event as consumed */
3826 acrtc
->base
.state
->event
= NULL
;
3828 DRM_DEBUG_DRIVER("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
3835 * Waits on all BO's fences and for proper vblank count
3837 static void amdgpu_dm_do_flip(
3838 struct drm_crtc
*crtc
,
3839 struct drm_framebuffer
*fb
,
3842 unsigned long flags
;
3843 uint32_t target_vblank
;
3845 struct amdgpu_crtc
*acrtc
= to_amdgpu_crtc(crtc
);
3846 struct amdgpu_framebuffer
*afb
= to_amdgpu_framebuffer(fb
);
3847 struct amdgpu_bo
*abo
= gem_to_amdgpu_bo(afb
->obj
);
3848 struct amdgpu_device
*adev
= crtc
->dev
->dev_private
;
3849 bool async_flip
= (acrtc
->flip_flags
& DRM_MODE_PAGE_FLIP_ASYNC
) != 0;
3850 struct dc_flip_addrs addr
= { {0} };
3851 /* TODO eliminate or rename surface_update */
3852 struct dc_surface_update surface_updates
[1] = { {0} };
3853 struct dm_crtc_state
*acrtc_state
= to_dm_crtc_state(crtc
->state
);
3856 /* Prepare wait for target vblank early - before the fence-waits */
3857 target_vblank
= target
- drm_crtc_vblank_count(crtc
) +
3858 amdgpu_get_vblank_counter_kms(crtc
->dev
, acrtc
->crtc_id
);
3860 /* TODO This might fail and hence better not used, wait
3861 * explicitly on fences instead
3862 * and in general should be called for
3863 * blocking commit to as per framework helpers
3865 r
= amdgpu_bo_reserve(abo
, true);
3866 if (unlikely(r
!= 0)) {
3867 DRM_ERROR("failed to reserve buffer before flip\n");
3871 /* Wait for all fences on this FB */
3872 WARN_ON(reservation_object_wait_timeout_rcu(abo
->tbo
.resv
, true, false,
3873 MAX_SCHEDULE_TIMEOUT
) < 0);
3875 amdgpu_bo_unreserve(abo
);
3877 /* Wait until we're out of the vertical blank period before the one
3878 * targeted by the flip
3880 while ((acrtc
->enabled
&&
3881 (amdgpu_get_crtc_scanoutpos(adev
->ddev
, acrtc
->crtc_id
, 0,
3882 &vpos
, &hpos
, NULL
, NULL
,
3884 & (DRM_SCANOUTPOS_VALID
| DRM_SCANOUTPOS_IN_VBLANK
)) ==
3885 (DRM_SCANOUTPOS_VALID
| DRM_SCANOUTPOS_IN_VBLANK
) &&
3886 (int)(target_vblank
-
3887 amdgpu_get_vblank_counter_kms(adev
->ddev
, acrtc
->crtc_id
)) > 0)) {
3888 usleep_range(1000, 1100);
3892 spin_lock_irqsave(&crtc
->dev
->event_lock
, flags
);
3893 /* update crtc fb */
3894 crtc
->primary
->fb
= fb
;
3896 WARN_ON(acrtc
->pflip_status
!= AMDGPU_FLIP_NONE
);
3897 WARN_ON(!acrtc_state
->stream
);
3899 addr
.address
.grph
.addr
.low_part
= lower_32_bits(afb
->address
);
3900 addr
.address
.grph
.addr
.high_part
= upper_32_bits(afb
->address
);
3901 addr
.flip_immediate
= async_flip
;
3904 if (acrtc
->base
.state
->event
)
3905 prepare_flip_isr(acrtc
);
3907 surface_updates
->surface
= dc_stream_get_status(acrtc_state
->stream
)->plane_states
[0];
3908 surface_updates
->flip_addr
= &addr
;
3911 dc_update_planes_and_stream(adev
->dm
.dc
, surface_updates
, 1, acrtc_state
->stream
, NULL
);
3913 DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x \n",
3915 addr
.address
.grph
.addr
.high_part
,
3916 addr
.address
.grph
.addr
.low_part
);
3919 spin_unlock_irqrestore(&crtc
->dev
->event_lock
, flags
);
3922 static void amdgpu_dm_commit_planes(struct drm_atomic_state
*state
,
3923 struct drm_device
*dev
,
3924 struct amdgpu_display_manager
*dm
,
3925 struct drm_crtc
*pcrtc
,
3926 bool *wait_for_vblank
)
3929 struct drm_plane
*plane
;
3930 struct drm_plane_state
*old_plane_state
;
3931 struct dc_stream_state
*dc_stream_attach
;
3932 struct dc_plane_state
*plane_states_constructed
[MAX_SURFACES
];
3933 struct amdgpu_crtc
*acrtc_attach
= to_amdgpu_crtc(pcrtc
);
3934 struct dm_crtc_state
*acrtc_state
= to_dm_crtc_state(pcrtc
->state
);
3935 int planes_count
= 0;
3936 unsigned long flags
;
3938 /* update planes when needed */
3939 for_each_plane_in_state(state
, plane
, old_plane_state
, i
) {
3940 struct drm_plane_state
*plane_state
= plane
->state
;
3941 struct drm_crtc
*crtc
= plane_state
->crtc
;
3942 struct drm_framebuffer
*fb
= plane_state
->fb
;
3944 struct dm_plane_state
*dm_plane_state
= to_dm_plane_state(plane_state
);
3946 if (plane
->type
== DRM_PLANE_TYPE_CURSOR
) {
3947 handle_cursor_update(plane
, old_plane_state
);
3951 if (!fb
|| !crtc
|| pcrtc
!= crtc
|| !crtc
->state
->active
||
3952 (!crtc
->state
->planes_changed
&&
3953 !pcrtc
->state
->color_mgmt_changed
))
3956 pflip_needed
= !state
->allow_modeset
;
3958 spin_lock_irqsave(&crtc
->dev
->event_lock
, flags
);
3959 if (acrtc_attach
->pflip_status
!= AMDGPU_FLIP_NONE
) {
3960 DRM_ERROR("%s: acrtc %d, already busy\n",
3962 acrtc_attach
->crtc_id
);
3963 spin_unlock_irqrestore(&crtc
->dev
->event_lock
, flags
);
3964 /* In commit tail framework this cannot happen */
3967 spin_unlock_irqrestore(&crtc
->dev
->event_lock
, flags
);
3969 if (!pflip_needed
) {
3970 WARN_ON(!dm_plane_state
->dc_state
);
3972 plane_states_constructed
[planes_count
] = dm_plane_state
->dc_state
;
3974 dc_stream_attach
= acrtc_state
->stream
;
3977 } else if (crtc
->state
->planes_changed
) {
3978 /* Assume even ONE crtc with immediate flip means
3979 * entire can't wait for VBLANK
3980 * TODO Check if it's correct
3983 acrtc_attach
->flip_flags
& DRM_MODE_PAGE_FLIP_ASYNC
?
3986 /* TODO: Needs rework for multiplane flip */
3987 if (plane
->type
== DRM_PLANE_TYPE_PRIMARY
)
3988 drm_crtc_vblank_get(crtc
);
3993 drm_crtc_vblank_count(crtc
) + *wait_for_vblank
);
3995 /*TODO BUG remove ASAP in 4.12 to avoid race between worker and flip IOCTL */
3997 /*clean up the flags for next usage*/
3998 acrtc_attach
->flip_flags
= 0;
4004 unsigned long flags
;
4006 if (pcrtc
->state
->event
) {
4008 drm_crtc_vblank_get(pcrtc
);
4010 spin_lock_irqsave(&pcrtc
->dev
->event_lock
, flags
);
4011 prepare_flip_isr(acrtc_attach
);
4012 spin_unlock_irqrestore(&pcrtc
->dev
->event_lock
, flags
);
4015 if (false == dc_commit_planes_to_stream(dm
->dc
,
4016 plane_states_constructed
,
4019 dm_error("%s: Failed to attach plane!\n", __func__
);
4021 /*TODO BUG Here should go disable planes on CRTC. */
4026 int amdgpu_dm_atomic_commit(
4027 struct drm_device
*dev
,
4028 struct drm_atomic_state
*state
,
4031 struct drm_crtc
*crtc
;
4032 struct drm_crtc_state
*new_state
;
4033 struct amdgpu_device
*adev
= dev
->dev_private
;
4037 * We evade vblanks and pflips on crtc that
4038 * should be changed. We do it here to flush & disable
4039 * interrupts before drm_swap_state is called in drm_atomic_helper_commit
4040 * it will update crtc->dm_crtc_state->stream pointer which is used in
4043 for_each_crtc_in_state(state
, crtc
, new_state
, i
) {
4044 struct dm_crtc_state
*old_acrtc_state
= to_dm_crtc_state(crtc
->state
);
4045 struct amdgpu_crtc
*acrtc
= to_amdgpu_crtc(crtc
);
4047 if (drm_atomic_crtc_needs_modeset(new_state
) && old_acrtc_state
->stream
)
4048 manage_dm_interrupts(adev
, acrtc
, false);
4051 return drm_atomic_helper_commit(dev
, state
, nonblock
);
4053 /*TODO Handle EINTR, reenable IRQ*/
4056 void amdgpu_dm_atomic_commit_tail(
4057 struct drm_atomic_state
*state
)
4059 struct drm_device
*dev
= state
->dev
;
4060 struct amdgpu_device
*adev
= dev
->dev_private
;
4061 struct amdgpu_display_manager
*dm
= &adev
->dm
;
4062 struct dm_atomic_state
*dm_state
;
4064 uint32_t new_crtcs_count
= 0;
4065 struct drm_crtc
*crtc
, *pcrtc
;
4066 struct drm_crtc_state
*old_crtc_state
;
4067 struct amdgpu_crtc
*new_crtcs
[MAX_STREAMS
];
4068 struct dc_stream_state
*new_stream
= NULL
;
4069 unsigned long flags
;
4070 bool wait_for_vblank
= true;
4071 struct drm_connector
*connector
;
4072 struct drm_connector_state
*old_conn_state
;
4073 struct dm_crtc_state
*old_acrtc_state
, *new_acrtc_state
;
4075 drm_atomic_helper_update_legacy_modeset_state(dev
, state
);
4077 dm_state
= to_dm_atomic_state(state
);
4079 /* update changed items */
4080 for_each_crtc_in_state(state
, crtc
, old_crtc_state
, i
) {
4081 struct amdgpu_crtc
*acrtc
= to_amdgpu_crtc(crtc
);
4082 struct drm_crtc_state
*new_state
= crtc
->state
;
4084 new_acrtc_state
= to_dm_crtc_state(new_state
);
4085 old_acrtc_state
= to_dm_crtc_state(old_crtc_state
);
4088 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
4089 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
4090 "connectors_changed:%d\n",
4094 new_state
->planes_changed
,
4095 new_state
->mode_changed
,
4096 new_state
->active_changed
,
4097 new_state
->connectors_changed
);
4099 /* handles headless hotplug case, updating new_state and
4100 * aconnector as needed
4103 if (modeset_required(new_state
, new_acrtc_state
->stream
, old_acrtc_state
->stream
)) {
4105 DRM_INFO("Atomic commit: SET crtc id %d: [%p]\n", acrtc
->crtc_id
, acrtc
);
4107 if (!new_acrtc_state
->stream
) {
4109 * this could happen because of issues with
4110 * userspace notifications delivery.
4111 * In this case userspace tries to set mode on
4112 * display which is disconnect in fact.
4113 * dc_sink in NULL in this case on aconnector.
4114 * We expect reset mode will come soon.
4116 * This can also happen when unplug is done
4117 * during resume sequence ended
4119 * In this case, we want to pretend we still
4120 * have a sink to keep the pipe running so that
4121 * hw state is consistent with the sw state
4123 DRM_DEBUG_KMS("%s: Failed to create new stream for crtc %d\n",
4124 __func__
, acrtc
->base
.base
.id
);
4129 if (old_acrtc_state
->stream
)
4130 remove_stream(adev
, acrtc
, old_acrtc_state
->stream
);
4134 * this loop saves set mode crtcs
4135 * we needed to enable vblanks once all
4136 * resources acquired in dc after dc_commit_streams
4139 /*TODO move all this into dm_crtc_state, get rid of
4140 * new_crtcs array and use old and new atomic states
4143 new_crtcs
[new_crtcs_count
] = acrtc
;
4146 acrtc
->enabled
= true;
4147 acrtc
->hw_mode
= crtc
->state
->mode
;
4148 crtc
->hwmode
= crtc
->state
->mode
;
4149 } else if (modereset_required(new_state
)) {
4150 DRM_INFO("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc
->crtc_id
, acrtc
);
4152 /* i.e. reset mode */
4153 if (old_acrtc_state
->stream
)
4154 remove_stream(adev
, acrtc
, old_acrtc_state
->stream
);
4156 } /* for_each_crtc_in_state() */
4159 * Add streams after required streams from new and replaced streams
4160 * are removed from freesync module
4162 if (adev
->dm
.freesync_module
) {
4163 for (i
= 0; i
< new_crtcs_count
; i
++) {
4164 struct amdgpu_connector
*aconnector
= NULL
;
4166 new_acrtc_state
= to_dm_crtc_state(new_crtcs
[i
]->base
.state
);
4168 new_stream
= new_acrtc_state
->stream
;
4170 amdgpu_dm_find_first_crct_matching_connector(
4172 &new_crtcs
[i
]->base
,
4175 DRM_INFO("Atomic commit: Failed to find connector for acrtc id:%d "
4176 "skipping freesync init\n",
4177 new_crtcs
[i
]->crtc_id
);
4181 mod_freesync_add_stream(adev
->dm
.freesync_module
,
4182 new_stream
, &aconnector
->caps
);
4186 if (dm_state
->context
)
4187 WARN_ON(!dc_commit_context(dm
->dc
, dm_state
->context
));
4190 list_for_each_entry(crtc
, &dev
->mode_config
.crtc_list
, head
) {
4191 struct amdgpu_crtc
*acrtc
= to_amdgpu_crtc(crtc
);
4193 new_acrtc_state
= to_dm_crtc_state(crtc
->state
);
4195 if (new_acrtc_state
->stream
!= NULL
) {
4196 const struct dc_stream_status
*status
=
4197 dc_stream_get_status(new_acrtc_state
->stream
);
4200 DC_ERR("got no status for stream %p on acrtc%p\n", new_acrtc_state
->stream
, acrtc
);
4202 acrtc
->otg_inst
= status
->primary_otg_inst
;
4206 /* Handle scaling and undersacn changes*/
4207 for_each_connector_in_state(state
, connector
, old_conn_state
, i
) {
4208 struct amdgpu_connector
*aconnector
= to_amdgpu_connector(connector
);
4209 struct dm_connector_state
*con_new_state
=
4210 to_dm_connector_state(aconnector
->base
.state
);
4211 struct dm_connector_state
*con_old_state
=
4212 to_dm_connector_state(old_conn_state
);
4213 struct amdgpu_crtc
*acrtc
= to_amdgpu_crtc(con_new_state
->base
.crtc
);
4214 struct dc_stream_status
*status
= NULL
;
4216 /* Skip any modesets/resets */
4217 if (!acrtc
|| drm_atomic_crtc_needs_modeset(acrtc
->base
.state
))
4220 /* Skip any thing not scale or underscan changes */
4221 if (!is_scaling_state_different(con_new_state
, con_old_state
))
4224 new_acrtc_state
= to_dm_crtc_state(acrtc
->base
.state
);
4226 update_stream_scaling_settings(&con_new_state
->base
.crtc
->mode
,
4227 con_new_state
, (struct dc_stream_state
*)new_acrtc_state
->stream
);
4229 status
= dc_stream_get_status(new_acrtc_state
->stream
);
4231 WARN_ON(!status
->plane_count
);
4233 if (!new_acrtc_state
->stream
)
4236 /*TODO How it works with MPO ?*/
4237 if (!dc_commit_planes_to_stream(
4239 status
->plane_states
,
4240 status
->plane_count
,
4241 new_acrtc_state
->stream
))
4242 dm_error("%s: Failed to update stream scaling!\n", __func__
);
4245 for (i
= 0; i
< new_crtcs_count
; i
++) {
4247 * loop to enable interrupts on newly arrived crtc
4249 struct amdgpu_crtc
*acrtc
= new_crtcs
[i
];
4251 new_acrtc_state
= to_dm_crtc_state(acrtc
->base
.state
);
4253 if (adev
->dm
.freesync_module
)
4254 mod_freesync_notify_mode_change(
4255 adev
->dm
.freesync_module
, &new_acrtc_state
->stream
, 1);
4257 manage_dm_interrupts(adev
, acrtc
, true);
4260 /* update planes when needed per crtc*/
4261 for_each_crtc_in_state(state
, pcrtc
, old_crtc_state
, j
) {
4262 new_acrtc_state
= to_dm_crtc_state(pcrtc
->state
);
4264 if (new_acrtc_state
->stream
)
4265 amdgpu_dm_commit_planes(state
, dev
, dm
, pcrtc
, &wait_for_vblank
);
4270 * send vblank event on all events not handled in flip and
4271 * mark consumed event for drm_atomic_helper_commit_hw_done
4273 spin_lock_irqsave(&adev
->ddev
->event_lock
, flags
);
4274 for_each_crtc_in_state(state
, crtc
, old_crtc_state
, i
) {
4275 struct amdgpu_crtc
*acrtc
= to_amdgpu_crtc(crtc
);
4277 if (acrtc
->base
.state
->event
)
4278 drm_send_event_locked(dev
, &crtc
->state
->event
->base
);
4280 acrtc
->base
.state
->event
= NULL
;
4282 spin_unlock_irqrestore(&adev
->ddev
->event_lock
, flags
);
4284 /* Signal HW programming completion */
4285 drm_atomic_helper_commit_hw_done(state
);
4287 if (wait_for_vblank
)
4288 drm_atomic_helper_wait_for_vblanks(dev
, state
);
4290 drm_atomic_helper_cleanup_planes(dev
, state
);
4294 static int dm_force_atomic_commit(struct drm_connector
*connector
)
4297 struct drm_device
*ddev
= connector
->dev
;
4298 struct drm_atomic_state
*state
= drm_atomic_state_alloc(ddev
);
4299 struct amdgpu_crtc
*disconnected_acrtc
= to_amdgpu_crtc(connector
->encoder
->crtc
);
4300 struct drm_plane
*plane
= disconnected_acrtc
->base
.primary
;
4301 struct drm_connector_state
*conn_state
;
4302 struct drm_crtc_state
*crtc_state
;
4303 struct drm_plane_state
*plane_state
;
4308 state
->acquire_ctx
= ddev
->mode_config
.acquire_ctx
;
4310 /* Construct an atomic state to restore previous display setting */
4313 * Attach connectors to drm_atomic_state
4315 conn_state
= drm_atomic_get_connector_state(state
, connector
);
4317 ret
= PTR_ERR_OR_ZERO(conn_state
);
4321 /* Attach crtc to drm_atomic_state*/
4322 crtc_state
= drm_atomic_get_crtc_state(state
, &disconnected_acrtc
->base
);
4324 ret
= PTR_ERR_OR_ZERO(crtc_state
);
4328 /* force a restore */
4329 crtc_state
->mode_changed
= true;
4331 /* Attach plane to drm_atomic_state */
4332 plane_state
= drm_atomic_get_plane_state(state
, plane
);
4334 ret
= PTR_ERR_OR_ZERO(plane_state
);
4339 /* Call commit internally with the state we just constructed */
4340 ret
= drm_atomic_commit(state
);
4345 DRM_ERROR("Restoring old state failed with %i\n", ret
);
4346 drm_atomic_state_put(state
);
4352 * This functions handle all cases when set mode does not come upon hotplug.
4353 * This include when the same display is unplugged then plugged back into the
4354 * same port and when we are running without usermode desktop manager supprot
4356 void dm_restore_drm_connector_state(struct drm_device
*dev
, struct drm_connector
*connector
)
4358 struct amdgpu_connector
*aconnector
= to_amdgpu_connector(connector
);
4359 struct amdgpu_crtc
*disconnected_acrtc
;
4360 struct dm_crtc_state
*acrtc_state
;
4362 if (!aconnector
->dc_sink
|| !connector
->state
|| !connector
->encoder
)
4365 disconnected_acrtc
= to_amdgpu_crtc(connector
->encoder
->crtc
);
4366 acrtc_state
= to_dm_crtc_state(disconnected_acrtc
->base
.state
);
4368 if (!disconnected_acrtc
|| !acrtc_state
->stream
)
4372 * If the previous sink is not released and different from the current,
4373 * we deduce we are in a state where we can not rely on usermode call
4374 * to turn on the display, so we do it here
4376 if (acrtc_state
->stream
->sink
!= aconnector
->dc_sink
)
4377 dm_force_atomic_commit(&aconnector
->base
);
4380 static uint32_t add_val_sets_plane(
4381 struct dc_validation_set
*val_sets
,
4383 const struct dc_stream_state
*stream
,
4384 struct dc_plane_state
*plane_state
)
4386 uint32_t i
= 0, j
= 0;
4388 while (i
< set_count
) {
4389 if (val_sets
[i
].stream
== stream
) {
4390 while (val_sets
[i
].plane_states
[j
])
4397 val_sets
[i
].plane_states
[j
] = plane_state
;
4398 val_sets
[i
].plane_count
++;
4400 return val_sets
[i
].plane_count
;
4403 static uint32_t update_in_val_sets_stream(
4404 struct dc_validation_set
*val_sets
,
4406 struct dc_stream_state
*old_stream
,
4407 struct dc_stream_state
*new_stream
,
4408 struct drm_crtc
*crtc
)
4412 while (i
< set_count
) {
4413 if (val_sets
[i
].stream
== old_stream
)
4418 val_sets
[i
].stream
= new_stream
;
4421 /* nothing found. add new one to the end */
4422 return set_count
+ 1;
4427 static uint32_t remove_from_val_sets(
4428 struct dc_validation_set
*val_sets
,
4430 const struct dc_stream_state
*stream
)
4434 for (i
= 0; i
< set_count
; i
++)
4435 if (val_sets
[i
].stream
== stream
)
4438 if (i
== set_count
) {
4445 for (; i
< set_count
; i
++)
4446 val_sets
[i
] = val_sets
[i
+ 1];
4452 * Grabs all modesetting locks to serialize against any blocking commits,
4453 * Waits for completion of all non blocking commits.
4455 static int do_aquire_global_lock(
4456 struct drm_device
*dev
,
4457 struct drm_atomic_state
*state
)
4459 struct drm_crtc
*crtc
;
4460 struct drm_crtc_commit
*commit
;
4463 /* Adding all modeset locks to aquire_ctx will
4464 * ensure that when the framework release it the
4465 * extra locks we are locking here will get released to
4467 ret
= drm_modeset_lock_all_ctx(dev
, state
->acquire_ctx
);
4471 list_for_each_entry(crtc
, &dev
->mode_config
.crtc_list
, head
) {
4472 spin_lock(&crtc
->commit_lock
);
4473 commit
= list_first_entry_or_null(&crtc
->commit_list
,
4474 struct drm_crtc_commit
, commit_entry
);
4476 drm_crtc_commit_get(commit
);
4477 spin_unlock(&crtc
->commit_lock
);
4482 /* Make sure all pending HW programming completed and
4485 ret
= wait_for_completion_interruptible_timeout(&commit
->hw_done
, 10*HZ
);
4488 ret
= wait_for_completion_interruptible_timeout(
4489 &commit
->flip_done
, 10*HZ
);
4492 DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
4493 "timed out\n", crtc
->base
.id
, crtc
->name
);
4495 drm_crtc_commit_put(commit
);
4498 return ret
< 0 ? ret
: 0;
4501 int amdgpu_dm_atomic_check(struct drm_device
*dev
,
4502 struct drm_atomic_state
*state
)
4504 struct dm_atomic_state
*dm_state
;
4505 struct drm_crtc
*crtc
;
4506 struct drm_crtc_state
*crtc_state
;
4507 struct drm_plane
*plane
;
4508 struct drm_plane_state
*plane_state
;
4511 struct amdgpu_device
*adev
= dev
->dev_private
;
4512 struct dc
*dc
= adev
->dm
.dc
;
4513 struct drm_connector
*connector
;
4514 struct drm_connector_state
*conn_state
;
4516 struct dc_validation_set set
[MAX_STREAMS
] = { { 0 } };
4517 struct dm_crtc_state
*old_acrtc_state
, *new_acrtc_state
;
4520 * This bool will be set for true for any modeset/reset
4521 * or plane update which implies non fast surface update.
4523 bool lock_and_validation_needed
= false;
4525 ret
= drm_atomic_helper_check_modeset(dev
, state
);
4528 DRM_ERROR("Atomic state validation failed with error :%d !\n", ret
);
4532 dm_state
= to_dm_atomic_state(state
);
4534 /* copy existing configuration */
4536 list_for_each_entry(crtc
, &dev
->mode_config
.crtc_list
, head
) {
4538 old_acrtc_state
= to_dm_crtc_state(crtc
->state
);
4540 if (old_acrtc_state
->stream
) {
4541 dc_stream_retain(old_acrtc_state
->stream
);
4542 set
[set_count
].stream
= old_acrtc_state
->stream
;
4547 /*TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set */
4548 /* update changed items */
4549 for_each_crtc_in_state(state
, crtc
, crtc_state
, i
) {
4550 struct amdgpu_crtc
*acrtc
= NULL
;
4551 struct amdgpu_connector
*aconnector
= NULL
;
4552 struct dc_stream_state
*new_stream
= NULL
;
4553 struct drm_connector_state
*conn_state
= NULL
;
4554 struct dm_connector_state
*dm_conn_state
= NULL
;
4556 old_acrtc_state
= to_dm_crtc_state(crtc
->state
);
4557 new_acrtc_state
= to_dm_crtc_state(crtc_state
);
4558 acrtc
= to_amdgpu_crtc(crtc
);
4560 aconnector
= amdgpu_dm_find_first_crct_matching_connector(state
, crtc
, true);
4563 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
4564 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
4565 "connectors_changed:%d\n",
4569 crtc_state
->planes_changed
,
4570 crtc_state
->mode_changed
,
4571 crtc_state
->active_changed
,
4572 crtc_state
->connectors_changed
);
4574 if (modereset_required(crtc_state
)) {
4576 /* i.e. reset mode */
4577 if (new_acrtc_state
->stream
) {
4578 set_count
= remove_from_val_sets(
4581 new_acrtc_state
->stream
);
4583 dc_stream_release(new_acrtc_state
->stream
);
4584 new_acrtc_state
->stream
= NULL
;
4586 lock_and_validation_needed
= true;
4592 conn_state
= drm_atomic_get_connector_state(state
,
4595 if (IS_ERR(conn_state
)) {
4596 ret
= PTR_ERR_OR_ZERO(conn_state
);
4600 dm_conn_state
= to_dm_connector_state(conn_state
);
4602 new_stream
= create_stream_for_sink(aconnector
,
4607 DRM_DEBUG_KMS("%s: Failed to create new stream for crtc %d\n",
4608 __func__
, acrtc
->base
.base
.id
);
4615 if (modeset_required(crtc_state
, new_stream
,
4616 old_acrtc_state
->stream
)) {
4620 * we can have no stream on ACTION_SET if a display
4621 * was disconnected during S3, in this case it not and
4622 * error, the OS will be updated after detection, and
4623 * do the right thing on next atomic commit
4626 if (new_acrtc_state
->stream
)
4627 dc_stream_release(new_acrtc_state
->stream
);
4629 new_acrtc_state
->stream
= new_stream
;
4631 set_count
= update_in_val_sets_stream(
4634 old_acrtc_state
->stream
,
4635 new_acrtc_state
->stream
,
4638 lock_and_validation_needed
= true;
4641 * The new stream is unused, so we release it
4644 dc_stream_release(new_stream
);
4651 * Hack: Commit needs planes right now, specifically for gamma
4652 * TODO rework commit to check CRTC for gamma change
4654 if (crtc_state
->color_mgmt_changed
) {
4656 ret
= drm_atomic_add_affected_planes(state
, crtc
);
4662 /* Check scaling and undersacn changes*/
4663 /*TODO Removed scaling changes validation due to inability to commit
4664 * new stream into context w\o causing full reset. Need to
4665 * decide how to handle.
4667 for_each_connector_in_state(state
, connector
, conn_state
, i
) {
4668 struct amdgpu_connector
*aconnector
= to_amdgpu_connector(connector
);
4669 struct dm_connector_state
*con_old_state
=
4670 to_dm_connector_state(aconnector
->base
.state
);
4671 struct dm_connector_state
*con_new_state
=
4672 to_dm_connector_state(conn_state
);
4673 struct amdgpu_crtc
*acrtc
= to_amdgpu_crtc(con_new_state
->base
.crtc
);
4675 /* Skip any modesets/resets */
4676 if (!acrtc
|| drm_atomic_crtc_needs_modeset(acrtc
->base
.state
))
4679 /* Skip any thing not scale or underscan changes */
4680 if (!is_scaling_state_different(con_new_state
, con_old_state
))
4683 lock_and_validation_needed
= true;
4686 for_each_crtc_in_state(state
, crtc
, crtc_state
, i
) {
4687 new_acrtc_state
= to_dm_crtc_state(crtc_state
);
4689 for_each_plane_in_state(state
, plane
, plane_state
, j
) {
4690 struct drm_crtc
*plane_crtc
= plane_state
->crtc
;
4691 struct drm_framebuffer
*fb
= plane_state
->fb
;
4693 struct dm_plane_state
*dm_plane_state
= to_dm_plane_state(plane_state
);
4695 /*TODO Implement atomic check for cursor plane */
4696 if (plane
->type
== DRM_PLANE_TYPE_CURSOR
)
4699 if (!fb
|| !plane_crtc
|| crtc
!= plane_crtc
|| !crtc_state
->active
)
4702 WARN_ON(!new_acrtc_state
->stream
);
4704 pflip_needed
= !state
->allow_modeset
;
4705 if (!pflip_needed
) {
4706 struct dc_plane_state
*dc_plane_state
;
4708 dc_plane_state
= dc_create_plane_state(dc
);
4710 ret
= fill_plane_attributes(
4711 plane_crtc
->dev
->dev_private
,
4720 if (dm_plane_state
->dc_state
)
4721 dc_plane_state_release(dm_plane_state
->dc_state
);
4723 dm_plane_state
->dc_state
= dc_plane_state
;
4725 add_val_sets_plane(set
,
4727 new_acrtc_state
->stream
,
4730 lock_and_validation_needed
= true;
4735 /* Run this here since we want to validate the streams we created */
4736 ret
= drm_atomic_helper_check_planes(dev
, state
);
4741 * For full updates case when
4742 * removing/adding/updating streams on once CRTC while flipping
4744 * acquiring global lock will guarantee that any such full
4746 * will wait for completion of any outstanding flip using DRMs
4747 * synchronization events.
4750 if (lock_and_validation_needed
) {
4752 ret
= do_aquire_global_lock(dev
, state
);
4755 WARN_ON(dm_state
->context
);
4756 dm_state
->context
= dc_get_validate_context(dc
, set
, set_count
);
4757 if (!dm_state
->context
) {
4763 /* Must be success */
4768 if (ret
== -EDEADLK
)
4769 DRM_DEBUG_KMS("Atomic check stopped due to to deadlock.\n");
4770 else if (ret
== -EINTR
|| ret
== -EAGAIN
|| ret
== -ERESTARTSYS
)
4771 DRM_DEBUG_KMS("Atomic check stopped due to to signal.\n");
4773 DRM_ERROR("Atomic check failed with err: %d .\n", ret
);
4778 static bool is_dp_capable_without_timing_msa(
4780 struct amdgpu_connector
*amdgpu_connector
)
4783 bool capable
= false;
4785 if (amdgpu_connector
->dc_link
&&
4786 dm_helpers_dp_read_dpcd(
4788 amdgpu_connector
->dc_link
,
4789 DP_DOWN_STREAM_PORT_COUNT
,
4791 sizeof(dpcd_data
))) {
4792 capable
= (dpcd_data
& DP_MSA_TIMING_PAR_IGNORED
) ? true:false;
4797 void amdgpu_dm_add_sink_to_freesync_module(
4798 struct drm_connector
*connector
,
4802 uint64_t val_capable
;
4803 bool edid_check_required
;
4804 struct detailed_timing
*timing
;
4805 struct detailed_non_pixel
*data
;
4806 struct detailed_data_monitor_range
*range
;
4807 struct amdgpu_connector
*amdgpu_connector
=
4808 to_amdgpu_connector(connector
);
4810 struct drm_device
*dev
= connector
->dev
;
4811 struct amdgpu_device
*adev
= dev
->dev_private
;
4813 edid_check_required
= false;
4814 if (!amdgpu_connector
->dc_sink
) {
4815 DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
4818 if (!adev
->dm
.freesync_module
)
4821 * if edid non zero restrict freesync only for dp and edp
4824 if (amdgpu_connector
->dc_sink
->sink_signal
== SIGNAL_TYPE_DISPLAY_PORT
4825 || amdgpu_connector
->dc_sink
->sink_signal
== SIGNAL_TYPE_EDP
) {
4826 edid_check_required
= is_dp_capable_without_timing_msa(
4832 if (edid_check_required
== true && (edid
->version
> 1 ||
4833 (edid
->version
== 1 && edid
->revision
> 1))) {
4834 for (i
= 0; i
< 4; i
++) {
4836 timing
= &edid
->detailed_timings
[i
];
4837 data
= &timing
->data
.other_data
;
4838 range
= &data
->data
.range
;
4840 * Check if monitor has continuous frequency mode
4842 if (data
->type
!= EDID_DETAIL_MONITOR_RANGE
)
4845 * Check for flag range limits only. If flag == 1 then
4846 * no additional timing information provided.
4847 * Default GTF, GTF Secondary curve and CVT are not
4850 if (range
->flags
!= 1)
4853 amdgpu_connector
->min_vfreq
= range
->min_vfreq
;
4854 amdgpu_connector
->max_vfreq
= range
->max_vfreq
;
4855 amdgpu_connector
->pixel_clock_mhz
=
4856 range
->pixel_clock_mhz
* 10;
4860 if (amdgpu_connector
->max_vfreq
-
4861 amdgpu_connector
->min_vfreq
> 10) {
4862 amdgpu_connector
->caps
.supported
= true;
4863 amdgpu_connector
->caps
.min_refresh_in_micro_hz
=
4864 amdgpu_connector
->min_vfreq
* 1000000;
4865 amdgpu_connector
->caps
.max_refresh_in_micro_hz
=
4866 amdgpu_connector
->max_vfreq
* 1000000;
4872 * TODO figure out how to notify user-mode or DRM of freesync caps
4873 * once we figure out how to deal with freesync in an upstreamable
4879 void amdgpu_dm_remove_sink_from_freesync_module(
4880 struct drm_connector
*connector
)
4883 * TODO fill in once we figure out how to deal with freesync in
4884 * an upstreamable fashion