2 * Copyright 2015 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
26 #include "dm_services_types.h"
28 #include "dc/inc/core_types.h"
32 #include "amdgpu_display.h"
34 #include "amdgpu_dm.h"
35 #include "amdgpu_pm.h"
37 #include "amd_shared.h"
38 #include "amdgpu_dm_irq.h"
39 #include "dm_helpers.h"
40 #include "dm_services_types.h"
41 #include "amdgpu_dm_mst_types.h"
43 #include "ivsrcid/ivsrcid_vislands30.h"
45 #include <linux/module.h>
46 #include <linux/moduleparam.h>
47 #include <linux/version.h>
48 #include <linux/types.h>
51 #include <drm/drm_atomic.h>
52 #include <drm/drm_atomic_helper.h>
53 #include <drm/drm_dp_mst_helper.h>
54 #include <drm/drm_fb_helper.h>
55 #include <drm/drm_edid.h>
57 #include "modules/inc/mod_freesync.h"
59 #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
60 #include "ivsrcid/irqsrcs_dcn_1_0.h"
62 #include "raven1/DCN/dcn_1_0_offset.h"
63 #include "raven1/DCN/dcn_1_0_sh_mask.h"
64 #include "vega10/soc15ip.h"
66 #include "soc15_common.h"
69 #include "modules/inc/mod_freesync.h"
71 #include "i2caux_interface.h"
74 static enum drm_plane_type dm_plane_type_default
[AMDGPU_MAX_PLANES
] = {
75 DRM_PLANE_TYPE_PRIMARY
,
76 DRM_PLANE_TYPE_PRIMARY
,
77 DRM_PLANE_TYPE_PRIMARY
,
78 DRM_PLANE_TYPE_PRIMARY
,
79 DRM_PLANE_TYPE_PRIMARY
,
80 DRM_PLANE_TYPE_PRIMARY
,
83 static enum drm_plane_type dm_plane_type_carizzo
[AMDGPU_MAX_PLANES
] = {
84 DRM_PLANE_TYPE_PRIMARY
,
85 DRM_PLANE_TYPE_PRIMARY
,
86 DRM_PLANE_TYPE_PRIMARY
,
87 DRM_PLANE_TYPE_OVERLAY
,/* YUV Capable Underlay */
90 static enum drm_plane_type dm_plane_type_stoney
[AMDGPU_MAX_PLANES
] = {
91 DRM_PLANE_TYPE_PRIMARY
,
92 DRM_PLANE_TYPE_PRIMARY
,
93 DRM_PLANE_TYPE_OVERLAY
, /* YUV Capable Underlay */
97 * dm_vblank_get_counter
100 * Get counter for number of vertical blanks
103 * struct amdgpu_device *adev - [in] desired amdgpu device
104 * int disp_idx - [in] which CRTC to get the counter from
107 * Counter for vertical blanks
109 static u32
dm_vblank_get_counter(struct amdgpu_device
*adev
, int crtc
)
111 if (crtc
>= adev
->mode_info
.num_crtc
)
114 struct amdgpu_crtc
*acrtc
= adev
->mode_info
.crtcs
[crtc
];
115 struct dm_crtc_state
*acrtc_state
= to_dm_crtc_state(
119 if (acrtc_state
->stream
== NULL
) {
120 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
125 return dc_stream_get_vblank_counter(acrtc_state
->stream
);
129 static int dm_crtc_get_scanoutpos(struct amdgpu_device
*adev
, int crtc
,
130 u32
*vbl
, u32
*position
)
132 uint32_t v_blank_start
, v_blank_end
, h_position
, v_position
;
134 if ((crtc
< 0) || (crtc
>= adev
->mode_info
.num_crtc
))
137 struct amdgpu_crtc
*acrtc
= adev
->mode_info
.crtcs
[crtc
];
138 struct dm_crtc_state
*acrtc_state
= to_dm_crtc_state(
141 if (acrtc_state
->stream
== NULL
) {
142 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
148 * TODO rework base driver to use values directly.
149 * for now parse it back into reg-format
151 dc_stream_get_scanoutpos(acrtc_state
->stream
,
157 *position
= v_position
| (h_position
<< 16);
158 *vbl
= v_blank_start
| (v_blank_end
<< 16);
164 static bool dm_is_idle(void *handle
)
170 static int dm_wait_for_idle(void *handle
)
176 static bool dm_check_soft_reset(void *handle
)
181 static int dm_soft_reset(void *handle
)
187 static struct amdgpu_crtc
*get_crtc_by_otg_inst(
188 struct amdgpu_device
*adev
,
191 struct drm_device
*dev
= adev
->ddev
;
192 struct drm_crtc
*crtc
;
193 struct amdgpu_crtc
*amdgpu_crtc
;
196 * following if is check inherited from both functions where this one is
197 * used now. Need to be checked why it could happen.
199 if (otg_inst
== -1) {
201 return adev
->mode_info
.crtcs
[0];
204 list_for_each_entry(crtc
, &dev
->mode_config
.crtc_list
, head
) {
205 amdgpu_crtc
= to_amdgpu_crtc(crtc
);
207 if (amdgpu_crtc
->otg_inst
== otg_inst
)
214 static void dm_pflip_high_irq(void *interrupt_params
)
216 struct amdgpu_crtc
*amdgpu_crtc
;
217 struct common_irq_params
*irq_params
= interrupt_params
;
218 struct amdgpu_device
*adev
= irq_params
->adev
;
221 amdgpu_crtc
= get_crtc_by_otg_inst(adev
, irq_params
->irq_src
- IRQ_TYPE_PFLIP
);
223 /* IRQ could occur when in initial stage */
224 /*TODO work and BO cleanup */
225 if (amdgpu_crtc
== NULL
) {
226 DRM_DEBUG_DRIVER("CRTC is null, returning.\n");
230 spin_lock_irqsave(&adev
->ddev
->event_lock
, flags
);
232 if (amdgpu_crtc
->pflip_status
!= AMDGPU_FLIP_SUBMITTED
){
233 DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
234 amdgpu_crtc
->pflip_status
,
235 AMDGPU_FLIP_SUBMITTED
,
236 amdgpu_crtc
->crtc_id
,
238 spin_unlock_irqrestore(&adev
->ddev
->event_lock
, flags
);
243 /* wakeup usersapce */
244 if (amdgpu_crtc
->event
) {
245 /* Update to correct count/ts if racing with vblank irq */
246 drm_crtc_accurate_vblank_count(&amdgpu_crtc
->base
);
248 drm_crtc_send_vblank_event(&amdgpu_crtc
->base
, amdgpu_crtc
->event
);
250 /* page flip completed. clean up */
251 amdgpu_crtc
->event
= NULL
;
256 amdgpu_crtc
->pflip_status
= AMDGPU_FLIP_NONE
;
257 spin_unlock_irqrestore(&adev
->ddev
->event_lock
, flags
);
259 DRM_DEBUG_DRIVER("%s - crtc :%d[%p], pflip_stat:AMDGPU_FLIP_NONE\n",
260 __func__
, amdgpu_crtc
->crtc_id
, amdgpu_crtc
);
262 drm_crtc_vblank_put(&amdgpu_crtc
->base
);
265 static void dm_crtc_high_irq(void *interrupt_params
)
267 struct common_irq_params
*irq_params
= interrupt_params
;
268 struct amdgpu_device
*adev
= irq_params
->adev
;
269 uint8_t crtc_index
= 0;
270 struct amdgpu_crtc
*acrtc
;
272 acrtc
= get_crtc_by_otg_inst(adev
, irq_params
->irq_src
- IRQ_TYPE_VBLANK
);
275 crtc_index
= acrtc
->crtc_id
;
277 drm_handle_vblank(adev
->ddev
, crtc_index
);
280 static int dm_set_clockgating_state(void *handle
,
281 enum amd_clockgating_state state
)
286 static int dm_set_powergating_state(void *handle
,
287 enum amd_powergating_state state
)
292 /* Prototypes of private functions */
293 static int dm_early_init(void* handle
);
295 static void hotplug_notify_work_func(struct work_struct
*work
)
297 struct amdgpu_display_manager
*dm
= container_of(work
, struct amdgpu_display_manager
, mst_hotplug_work
);
298 struct drm_device
*dev
= dm
->ddev
;
300 drm_kms_helper_hotplug_event(dev
);
304 #include "dal_asic_id.h"
305 /* Allocate memory for FBC compressed data */
306 /* TODO: Dynamic allocation */
307 #define AMDGPU_FBC_SIZE (3840 * 2160 * 4)
309 void amdgpu_dm_initialize_fbc(struct amdgpu_device
*adev
)
312 struct dm_comressor_info
*compressor
= &adev
->dm
.compressor
;
314 if (!compressor
->bo_ptr
) {
315 r
= amdgpu_bo_create_kernel(adev
, AMDGPU_FBC_SIZE
, PAGE_SIZE
,
316 AMDGPU_GEM_DOMAIN_VRAM
, &compressor
->bo_ptr
,
317 &compressor
->gpu_addr
, &compressor
->cpu_addr
);
320 DRM_ERROR("DM: Failed to initialize fbc\n");
329 * Returns 0 on success
331 int amdgpu_dm_init(struct amdgpu_device
*adev
)
333 struct dc_init_data init_data
;
334 adev
->dm
.ddev
= adev
->ddev
;
335 adev
->dm
.adev
= adev
;
337 DRM_INFO("DAL is enabled\n");
338 /* Zero all the fields */
339 memset(&init_data
, 0, sizeof(init_data
));
341 /* initialize DAL's lock (for SYNC context use) */
342 spin_lock_init(&adev
->dm
.dal_lock
);
344 /* initialize DAL's mutex */
345 mutex_init(&adev
->dm
.dal_mutex
);
347 if(amdgpu_dm_irq_init(adev
)) {
348 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
352 init_data
.asic_id
.chip_family
= adev
->family
;
354 init_data
.asic_id
.pci_revision_id
= adev
->rev_id
;
355 init_data
.asic_id
.hw_internal_rev
= adev
->external_rev_id
;
357 init_data
.asic_id
.vram_width
= adev
->mc
.vram_width
;
358 /* TODO: initialize init_data.asic_id.vram_type here!!!! */
359 init_data
.asic_id
.atombios_base_address
=
360 adev
->mode_info
.atom_context
->bios
;
362 init_data
.driver
= adev
;
364 adev
->dm
.cgs_device
= amdgpu_cgs_create_device(adev
);
366 if (!adev
->dm
.cgs_device
) {
367 DRM_ERROR("amdgpu: failed to create cgs device.\n");
371 init_data
.cgs_device
= adev
->dm
.cgs_device
;
375 init_data
.dce_environment
= DCE_ENV_PRODUCTION_DRV
;
378 if (adev
->family
== FAMILY_CZ
)
379 amdgpu_dm_initialize_fbc(adev
);
380 init_data
.fbc_gpu_addr
= adev
->dm
.compressor
.gpu_addr
;
382 /* Display Core create. */
383 adev
->dm
.dc
= dc_create(&init_data
);
386 DRM_INFO("Display Core failed to initialize!\n");
388 INIT_WORK(&adev
->dm
.mst_hotplug_work
, hotplug_notify_work_func
);
390 adev
->dm
.freesync_module
= mod_freesync_create(adev
->dm
.dc
);
391 if (!adev
->dm
.freesync_module
) {
393 "amdgpu: failed to initialize freesync_module.\n");
395 DRM_INFO("amdgpu: freesync_module init done %p.\n",
396 adev
->dm
.freesync_module
);
398 if (amdgpu_dm_initialize_drm_device(adev
)) {
400 "amdgpu: failed to initialize sw for display support.\n");
404 /* Update the actual used number of crtc */
405 adev
->mode_info
.num_crtc
= adev
->dm
.display_indexes_num
;
407 /* TODO: Add_display_info? */
409 /* TODO use dynamic cursor width */
410 adev
->ddev
->mode_config
.cursor_width
= adev
->dm
.dc
->caps
.max_cursor_size
;
411 adev
->ddev
->mode_config
.cursor_height
= adev
->dm
.dc
->caps
.max_cursor_size
;
413 if (drm_vblank_init(adev
->ddev
, adev
->dm
.display_indexes_num
)) {
415 "amdgpu: failed to initialize sw for display support.\n");
419 DRM_INFO("KMS initialized.\n");
423 amdgpu_dm_fini(adev
);
428 void amdgpu_dm_fini(struct amdgpu_device
*adev
)
430 amdgpu_dm_destroy_drm_device(&adev
->dm
);
432 * TODO: pageflip, vlank interrupt
434 * amdgpu_dm_irq_fini(adev);
437 if (adev
->dm
.cgs_device
) {
438 amdgpu_cgs_destroy_device(adev
->dm
.cgs_device
);
439 adev
->dm
.cgs_device
= NULL
;
441 if (adev
->dm
.freesync_module
) {
442 mod_freesync_destroy(adev
->dm
.freesync_module
);
443 adev
->dm
.freesync_module
= NULL
;
445 /* DC Destroy TODO: Replace destroy DAL */
447 dc_destroy(&adev
->dm
.dc
);
451 /* moved from amdgpu_dm_kms.c */
452 void amdgpu_dm_destroy()
456 static int dm_sw_init(void *handle
)
461 static int dm_sw_fini(void *handle
)
466 static int detect_mst_link_for_all_connectors(struct drm_device
*dev
)
468 struct amdgpu_dm_connector
*aconnector
;
469 struct drm_connector
*connector
;
472 drm_modeset_lock(&dev
->mode_config
.connection_mutex
, NULL
);
474 list_for_each_entry(connector
, &dev
->mode_config
.connector_list
, head
) {
475 aconnector
= to_amdgpu_dm_connector(connector
);
476 if (aconnector
->dc_link
->type
== dc_connection_mst_branch
) {
477 DRM_INFO("DM_MST: starting TM on aconnector: %p [id: %d]\n",
478 aconnector
, aconnector
->base
.base
.id
);
480 ret
= drm_dp_mst_topology_mgr_set_mst(&aconnector
->mst_mgr
, true);
482 DRM_ERROR("DM_MST: Failed to start MST\n");
483 ((struct dc_link
*)aconnector
->dc_link
)->type
= dc_connection_single
;
489 drm_modeset_unlock(&dev
->mode_config
.connection_mutex
);
493 static int dm_late_init(void *handle
)
495 struct drm_device
*dev
= ((struct amdgpu_device
*)handle
)->ddev
;
496 int r
= detect_mst_link_for_all_connectors(dev
);
501 static void s3_handle_mst(struct drm_device
*dev
, bool suspend
)
503 struct amdgpu_dm_connector
*aconnector
;
504 struct drm_connector
*connector
;
506 drm_modeset_lock(&dev
->mode_config
.connection_mutex
, NULL
);
508 list_for_each_entry(connector
, &dev
->mode_config
.connector_list
, head
) {
509 aconnector
= to_amdgpu_dm_connector(connector
);
510 if (aconnector
->dc_link
->type
== dc_connection_mst_branch
&&
511 !aconnector
->mst_port
) {
514 drm_dp_mst_topology_mgr_suspend(&aconnector
->mst_mgr
);
516 drm_dp_mst_topology_mgr_resume(&aconnector
->mst_mgr
);
520 drm_modeset_unlock(&dev
->mode_config
.connection_mutex
);
523 static int dm_hw_init(void *handle
)
525 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
526 /* Create DAL display manager */
527 amdgpu_dm_init(adev
);
528 amdgpu_dm_hpd_init(adev
);
533 static int dm_hw_fini(void *handle
)
535 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
537 amdgpu_dm_hpd_fini(adev
);
539 amdgpu_dm_irq_fini(adev
);
540 amdgpu_dm_fini(adev
);
544 static int dm_suspend(void *handle
)
546 struct amdgpu_device
*adev
= handle
;
547 struct amdgpu_display_manager
*dm
= &adev
->dm
;
550 s3_handle_mst(adev
->ddev
, true);
552 amdgpu_dm_irq_suspend(adev
);
554 WARN_ON(adev
->dm
.cached_state
);
555 adev
->dm
.cached_state
= drm_atomic_helper_suspend(adev
->ddev
);
559 DC_ACPI_CM_POWER_STATE_D3
565 struct amdgpu_dm_connector
*amdgpu_dm_find_first_crct_matching_connector(
566 struct drm_atomic_state
*state
,
567 struct drm_crtc
*crtc
,
571 struct drm_connector_state
*conn_state
;
572 struct drm_connector
*connector
;
573 struct drm_crtc
*crtc_from_state
;
575 for_each_connector_in_state(
583 connector
->state
->crtc
;
585 if (crtc_from_state
== crtc
)
586 return to_amdgpu_dm_connector(connector
);
592 static int dm_resume(void *handle
)
594 struct amdgpu_device
*adev
= handle
;
595 struct amdgpu_display_manager
*dm
= &adev
->dm
;
597 /* power on hardware */
600 DC_ACPI_CM_POWER_STATE_D0
606 int amdgpu_dm_display_resume(struct amdgpu_device
*adev
)
608 struct drm_device
*ddev
= adev
->ddev
;
609 struct amdgpu_display_manager
*dm
= &adev
->dm
;
610 struct amdgpu_dm_connector
*aconnector
;
611 struct drm_connector
*connector
;
612 struct drm_crtc
*crtc
;
613 struct drm_crtc_state
*crtc_state
;
617 /* program HPD filter */
620 /* On resume we need to rewrite the MSTM control bits to enamble MST*/
621 s3_handle_mst(ddev
, false);
624 * early enable HPD Rx IRQ, should be done before set mode as short
625 * pulse interrupts are used for MST
627 amdgpu_dm_irq_resume_early(adev
);
630 list_for_each_entry(connector
,
631 &ddev
->mode_config
.connector_list
, head
) {
632 aconnector
= to_amdgpu_dm_connector(connector
);
635 * this is the case when traversing through already created
636 * MST connectors, should be skipped
638 if (aconnector
->mst_port
)
641 mutex_lock(&aconnector
->hpd_lock
);
642 dc_link_detect(aconnector
->dc_link
, DETECT_REASON_HPD
);
643 aconnector
->dc_sink
= NULL
;
644 amdgpu_dm_update_connector_after_detect(aconnector
);
645 mutex_unlock(&aconnector
->hpd_lock
);
648 /* Force mode set in atomic comit */
649 for_each_crtc_in_state(adev
->dm
.cached_state
, crtc
, crtc_state
, i
)
650 crtc_state
->active_changed
= true;
652 ret
= drm_atomic_helper_resume(ddev
, adev
->dm
.cached_state
);
654 drm_atomic_state_put(adev
->dm
.cached_state
);
655 adev
->dm
.cached_state
= NULL
;
657 amdgpu_dm_irq_resume_late(adev
);
662 static const struct amd_ip_funcs amdgpu_dm_funcs
= {
664 .early_init
= dm_early_init
,
665 .late_init
= dm_late_init
,
666 .sw_init
= dm_sw_init
,
667 .sw_fini
= dm_sw_fini
,
668 .hw_init
= dm_hw_init
,
669 .hw_fini
= dm_hw_fini
,
670 .suspend
= dm_suspend
,
672 .is_idle
= dm_is_idle
,
673 .wait_for_idle
= dm_wait_for_idle
,
674 .check_soft_reset
= dm_check_soft_reset
,
675 .soft_reset
= dm_soft_reset
,
676 .set_clockgating_state
= dm_set_clockgating_state
,
677 .set_powergating_state
= dm_set_powergating_state
,
680 const struct amdgpu_ip_block_version dm_ip_block
=
682 .type
= AMD_IP_BLOCK_TYPE_DCE
,
686 .funcs
= &amdgpu_dm_funcs
,
690 struct drm_atomic_state
*
691 dm_atomic_state_alloc(struct drm_device
*dev
)
693 struct dm_atomic_state
*state
= kzalloc(sizeof(*state
), GFP_KERNEL
);
698 if (drm_atomic_state_init(dev
, &state
->base
) < 0)
709 dm_atomic_state_clear(struct drm_atomic_state
*state
)
711 struct dm_atomic_state
*dm_state
= to_dm_atomic_state(state
);
713 if (dm_state
->context
) {
714 dc_release_state(dm_state
->context
);
715 dm_state
->context
= NULL
;
718 drm_atomic_state_default_clear(state
);
722 dm_atomic_state_alloc_free(struct drm_atomic_state
*state
)
724 struct dm_atomic_state
*dm_state
= to_dm_atomic_state(state
);
725 drm_atomic_state_default_release(state
);
729 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs
= {
730 .fb_create
= amdgpu_user_framebuffer_create
,
731 .output_poll_changed
= amdgpu_output_poll_changed
,
732 .atomic_check
= amdgpu_dm_atomic_check
,
733 .atomic_commit
= amdgpu_dm_atomic_commit
,
734 .atomic_state_alloc
= dm_atomic_state_alloc
,
735 .atomic_state_clear
= dm_atomic_state_clear
,
736 .atomic_state_free
= dm_atomic_state_alloc_free
739 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs
= {
740 .atomic_commit_tail
= amdgpu_dm_atomic_commit_tail
743 void amdgpu_dm_update_connector_after_detect(
744 struct amdgpu_dm_connector
*aconnector
)
746 struct drm_connector
*connector
= &aconnector
->base
;
747 struct drm_device
*dev
= connector
->dev
;
748 struct dc_sink
*sink
;
750 /* MST handled by drm_mst framework */
751 if (aconnector
->mst_mgr
.mst_state
== true)
755 sink
= aconnector
->dc_link
->local_sink
;
757 /* Edid mgmt connector gets first update only in mode_valid hook and then
758 * the connector sink is set to either fake or physical sink depends on link status.
759 * don't do it here if u are during boot
761 if (aconnector
->base
.force
!= DRM_FORCE_UNSPECIFIED
762 && aconnector
->dc_em_sink
) {
764 /* For S3 resume with headless use eml_sink to fake stream
765 * because on resume connecotr->sink is set ti NULL
767 mutex_lock(&dev
->mode_config
.mutex
);
770 if (aconnector
->dc_sink
) {
771 amdgpu_dm_remove_sink_from_freesync_module(
773 /* retain and release bellow are used for
774 * bump up refcount for sink because the link don't point
775 * to it anymore after disconnect so on next crtc to connector
776 * reshuffle by UMD we will get into unwanted dc_sink release
778 if (aconnector
->dc_sink
!= aconnector
->dc_em_sink
)
779 dc_sink_release(aconnector
->dc_sink
);
781 aconnector
->dc_sink
= sink
;
782 amdgpu_dm_add_sink_to_freesync_module(
783 connector
, aconnector
->edid
);
785 amdgpu_dm_remove_sink_from_freesync_module(connector
);
786 if (!aconnector
->dc_sink
)
787 aconnector
->dc_sink
= aconnector
->dc_em_sink
;
788 else if (aconnector
->dc_sink
!= aconnector
->dc_em_sink
)
789 dc_sink_retain(aconnector
->dc_sink
);
792 mutex_unlock(&dev
->mode_config
.mutex
);
797 * TODO: temporary guard to look for proper fix
798 * if this sink is MST sink, we should not do anything
800 if (sink
&& sink
->sink_signal
== SIGNAL_TYPE_DISPLAY_PORT_MST
)
803 if (aconnector
->dc_sink
== sink
) {
804 /* We got a DP short pulse (Link Loss, DP CTS, etc...).
806 DRM_INFO("DCHPD: connector_id=%d: dc_sink didn't change.\n",
807 aconnector
->connector_id
);
811 DRM_INFO("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
812 aconnector
->connector_id
, aconnector
->dc_sink
, sink
);
814 mutex_lock(&dev
->mode_config
.mutex
);
816 /* 1. Update status of the drm connector
817 * 2. Send an event and let userspace tell us what to do */
819 /* TODO: check if we still need the S3 mode update workaround.
820 * If yes, put it here. */
821 if (aconnector
->dc_sink
)
822 amdgpu_dm_remove_sink_from_freesync_module(
825 aconnector
->dc_sink
= sink
;
826 if (sink
->dc_edid
.length
== 0)
827 aconnector
->edid
= NULL
;
830 (struct edid
*) sink
->dc_edid
.raw_edid
;
833 drm_mode_connector_update_edid_property(connector
,
836 amdgpu_dm_add_sink_to_freesync_module(connector
, aconnector
->edid
);
839 amdgpu_dm_remove_sink_from_freesync_module(connector
);
840 drm_mode_connector_update_edid_property(connector
, NULL
);
841 aconnector
->num_modes
= 0;
842 aconnector
->dc_sink
= NULL
;
845 mutex_unlock(&dev
->mode_config
.mutex
);
848 static void handle_hpd_irq(void *param
)
850 struct amdgpu_dm_connector
*aconnector
= (struct amdgpu_dm_connector
*)param
;
851 struct drm_connector
*connector
= &aconnector
->base
;
852 struct drm_device
*dev
= connector
->dev
;
854 /* In case of failure or MST no need to update connector status or notify the OS
855 * since (for MST case) MST does this in it's own context.
857 mutex_lock(&aconnector
->hpd_lock
);
858 if (dc_link_detect(aconnector
->dc_link
, DETECT_REASON_HPD
)) {
859 amdgpu_dm_update_connector_after_detect(aconnector
);
862 drm_modeset_lock_all(dev
);
863 dm_restore_drm_connector_state(dev
, connector
);
864 drm_modeset_unlock_all(dev
);
866 if (aconnector
->base
.force
== DRM_FORCE_UNSPECIFIED
)
867 drm_kms_helper_hotplug_event(dev
);
869 mutex_unlock(&aconnector
->hpd_lock
);
873 static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector
*aconnector
)
875 uint8_t esi
[DP_PSR_ERROR_STATUS
- DP_SINK_COUNT_ESI
] = { 0 };
877 bool new_irq_handled
= false;
879 int dpcd_bytes_to_read
;
881 const int max_process_count
= 30;
882 int process_count
= 0;
884 const struct dc_link_status
*link_status
= dc_link_get_status(aconnector
->dc_link
);
886 if (link_status
->dpcd_caps
->dpcd_rev
.raw
< 0x12) {
887 dpcd_bytes_to_read
= DP_LANE0_1_STATUS
- DP_SINK_COUNT
;
888 /* DPCD 0x200 - 0x201 for downstream IRQ */
889 dpcd_addr
= DP_SINK_COUNT
;
891 dpcd_bytes_to_read
= DP_PSR_ERROR_STATUS
- DP_SINK_COUNT_ESI
;
892 /* DPCD 0x2002 - 0x2005 for downstream IRQ */
893 dpcd_addr
= DP_SINK_COUNT_ESI
;
896 dret
= drm_dp_dpcd_read(
897 &aconnector
->dm_dp_aux
.aux
,
902 while (dret
== dpcd_bytes_to_read
&&
903 process_count
< max_process_count
) {
909 DRM_DEBUG_KMS("ESI %02x %02x %02x\n", esi
[0], esi
[1], esi
[2]);
910 /* handle HPD short pulse irq */
911 if (aconnector
->mst_mgr
.mst_state
)
913 &aconnector
->mst_mgr
,
917 if (new_irq_handled
) {
918 /* ACK at DPCD to notify down stream */
919 const int ack_dpcd_bytes_to_write
=
920 dpcd_bytes_to_read
- 1;
922 for (retry
= 0; retry
< 3; retry
++) {
925 wret
= drm_dp_dpcd_write(
926 &aconnector
->dm_dp_aux
.aux
,
929 ack_dpcd_bytes_to_write
);
930 if (wret
== ack_dpcd_bytes_to_write
)
934 /* check if there is new irq to be handle */
935 dret
= drm_dp_dpcd_read(
936 &aconnector
->dm_dp_aux
.aux
,
941 new_irq_handled
= false;
946 if (process_count
== max_process_count
)
947 DRM_DEBUG_KMS("Loop exceeded max iterations\n");
950 static void handle_hpd_rx_irq(void *param
)
952 struct amdgpu_dm_connector
*aconnector
= (struct amdgpu_dm_connector
*)param
;
953 struct drm_connector
*connector
= &aconnector
->base
;
954 struct drm_device
*dev
= connector
->dev
;
955 const struct dc_link
*dc_link
= aconnector
->dc_link
;
956 bool is_mst_root_connector
= aconnector
->mst_mgr
.mst_state
;
958 /* TODO:Temporary add mutex to protect hpd interrupt not have a gpio
959 * conflict, after implement i2c helper, this mutex should be
962 if (aconnector
->dc_link
->type
!= dc_connection_mst_branch
)
963 mutex_lock(&aconnector
->hpd_lock
);
965 if (dc_link_handle_hpd_rx_irq(aconnector
->dc_link
, NULL
) &&
966 !is_mst_root_connector
) {
967 /* Downstream Port status changed. */
968 if (dc_link_detect(aconnector
->dc_link
, DETECT_REASON_HPDRX
)) {
969 amdgpu_dm_update_connector_after_detect(aconnector
);
972 drm_modeset_lock_all(dev
);
973 dm_restore_drm_connector_state(dev
, connector
);
974 drm_modeset_unlock_all(dev
);
976 drm_kms_helper_hotplug_event(dev
);
979 if ((dc_link
->cur_link_settings
.lane_count
!= LANE_COUNT_UNKNOWN
) ||
980 (dc_link
->type
== dc_connection_mst_branch
))
981 dm_handle_hpd_rx_irq(aconnector
);
983 if (aconnector
->dc_link
->type
!= dc_connection_mst_branch
)
984 mutex_unlock(&aconnector
->hpd_lock
);
987 static void register_hpd_handlers(struct amdgpu_device
*adev
)
989 struct drm_device
*dev
= adev
->ddev
;
990 struct drm_connector
*connector
;
991 struct amdgpu_dm_connector
*aconnector
;
992 const struct dc_link
*dc_link
;
993 struct dc_interrupt_params int_params
= {0};
995 int_params
.requested_polarity
= INTERRUPT_POLARITY_DEFAULT
;
996 int_params
.current_polarity
= INTERRUPT_POLARITY_DEFAULT
;
998 list_for_each_entry(connector
,
999 &dev
->mode_config
.connector_list
, head
) {
1001 aconnector
= to_amdgpu_dm_connector(connector
);
1002 dc_link
= aconnector
->dc_link
;
1004 if (DC_IRQ_SOURCE_INVALID
!= dc_link
->irq_source_hpd
) {
1005 int_params
.int_context
= INTERRUPT_LOW_IRQ_CONTEXT
;
1006 int_params
.irq_source
= dc_link
->irq_source_hpd
;
1008 amdgpu_dm_irq_register_interrupt(adev
, &int_params
,
1010 (void *) aconnector
);
1013 if (DC_IRQ_SOURCE_INVALID
!= dc_link
->irq_source_hpd_rx
) {
1015 /* Also register for DP short pulse (hpd_rx). */
1016 int_params
.int_context
= INTERRUPT_LOW_IRQ_CONTEXT
;
1017 int_params
.irq_source
= dc_link
->irq_source_hpd_rx
;
1019 amdgpu_dm_irq_register_interrupt(adev
, &int_params
,
1021 (void *) aconnector
);
1026 /* Register IRQ sources and initialize IRQ callbacks */
1027 static int dce110_register_irq_handlers(struct amdgpu_device
*adev
)
1029 struct dc
*dc
= adev
->dm
.dc
;
1030 struct common_irq_params
*c_irq_params
;
1031 struct dc_interrupt_params int_params
= {0};
1034 unsigned client_id
= AMDGPU_IH_CLIENTID_LEGACY
;
1036 if (adev
->asic_type
== CHIP_VEGA10
||
1037 adev
->asic_type
== CHIP_RAVEN
)
1038 client_id
= AMDGPU_IH_CLIENTID_DCE
;
1040 int_params
.requested_polarity
= INTERRUPT_POLARITY_DEFAULT
;
1041 int_params
.current_polarity
= INTERRUPT_POLARITY_DEFAULT
;
1043 /* Actions of amdgpu_irq_add_id():
1044 * 1. Register a set() function with base driver.
1045 * Base driver will call set() function to enable/disable an
1046 * interrupt in DC hardware.
1047 * 2. Register amdgpu_dm_irq_handler().
1048 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
1049 * coming from DC hardware.
1050 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
1051 * for acknowledging and handling. */
1053 /* Use VBLANK interrupt */
1054 for (i
= VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0
; i
<= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0
; i
++) {
1055 r
= amdgpu_irq_add_id(adev
, client_id
, i
, &adev
->crtc_irq
);
1057 DRM_ERROR("Failed to add crtc irq id!\n");
1061 int_params
.int_context
= INTERRUPT_HIGH_IRQ_CONTEXT
;
1062 int_params
.irq_source
=
1063 dc_interrupt_to_irq_source(dc
, i
, 0);
1065 c_irq_params
= &adev
->dm
.vblank_params
[int_params
.irq_source
- DC_IRQ_SOURCE_VBLANK1
];
1067 c_irq_params
->adev
= adev
;
1068 c_irq_params
->irq_src
= int_params
.irq_source
;
1070 amdgpu_dm_irq_register_interrupt(adev
, &int_params
,
1071 dm_crtc_high_irq
, c_irq_params
);
1074 /* Use GRPH_PFLIP interrupt */
1075 for (i
= VISLANDS30_IV_SRCID_D1_GRPH_PFLIP
;
1076 i
<= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP
; i
+= 2) {
1077 r
= amdgpu_irq_add_id(adev
, client_id
, i
, &adev
->pageflip_irq
);
1079 DRM_ERROR("Failed to add page flip irq id!\n");
1083 int_params
.int_context
= INTERRUPT_HIGH_IRQ_CONTEXT
;
1084 int_params
.irq_source
=
1085 dc_interrupt_to_irq_source(dc
, i
, 0);
1087 c_irq_params
= &adev
->dm
.pflip_params
[int_params
.irq_source
- DC_IRQ_SOURCE_PFLIP_FIRST
];
1089 c_irq_params
->adev
= adev
;
1090 c_irq_params
->irq_src
= int_params
.irq_source
;
1092 amdgpu_dm_irq_register_interrupt(adev
, &int_params
,
1093 dm_pflip_high_irq
, c_irq_params
);
1098 r
= amdgpu_irq_add_id(adev
, client_id
,
1099 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A
, &adev
->hpd_irq
);
1101 DRM_ERROR("Failed to add hpd irq id!\n");
1105 register_hpd_handlers(adev
);
1110 #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
1111 /* Register IRQ sources and initialize IRQ callbacks */
1112 static int dcn10_register_irq_handlers(struct amdgpu_device
*adev
)
1114 struct dc
*dc
= adev
->dm
.dc
;
1115 struct common_irq_params
*c_irq_params
;
1116 struct dc_interrupt_params int_params
= {0};
1120 int_params
.requested_polarity
= INTERRUPT_POLARITY_DEFAULT
;
1121 int_params
.current_polarity
= INTERRUPT_POLARITY_DEFAULT
;
1123 /* Actions of amdgpu_irq_add_id():
1124 * 1. Register a set() function with base driver.
1125 * Base driver will call set() function to enable/disable an
1126 * interrupt in DC hardware.
1127 * 2. Register amdgpu_dm_irq_handler().
1128 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
1129 * coming from DC hardware.
1130 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
1131 * for acknowledging and handling.
1134 /* Use VSTARTUP interrupt */
1135 for (i
= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP
;
1136 i
<= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP
+ adev
->mode_info
.num_crtc
- 1;
1138 r
= amdgpu_irq_add_id(adev
, AMDGPU_IH_CLIENTID_DCE
, i
, &adev
->crtc_irq
);
1141 DRM_ERROR("Failed to add crtc irq id!\n");
1145 int_params
.int_context
= INTERRUPT_HIGH_IRQ_CONTEXT
;
1146 int_params
.irq_source
=
1147 dc_interrupt_to_irq_source(dc
, i
, 0);
1149 c_irq_params
= &adev
->dm
.vblank_params
[int_params
.irq_source
- DC_IRQ_SOURCE_VBLANK1
];
1151 c_irq_params
->adev
= adev
;
1152 c_irq_params
->irq_src
= int_params
.irq_source
;
1154 amdgpu_dm_irq_register_interrupt(adev
, &int_params
,
1155 dm_crtc_high_irq
, c_irq_params
);
1158 /* Use GRPH_PFLIP interrupt */
1159 for (i
= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT
;
1160 i
<= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT
+ adev
->mode_info
.num_crtc
- 1;
1162 r
= amdgpu_irq_add_id(adev
, AMDGPU_IH_CLIENTID_DCE
, i
, &adev
->pageflip_irq
);
1164 DRM_ERROR("Failed to add page flip irq id!\n");
1168 int_params
.int_context
= INTERRUPT_HIGH_IRQ_CONTEXT
;
1169 int_params
.irq_source
=
1170 dc_interrupt_to_irq_source(dc
, i
, 0);
1172 c_irq_params
= &adev
->dm
.pflip_params
[int_params
.irq_source
- DC_IRQ_SOURCE_PFLIP_FIRST
];
1174 c_irq_params
->adev
= adev
;
1175 c_irq_params
->irq_src
= int_params
.irq_source
;
1177 amdgpu_dm_irq_register_interrupt(adev
, &int_params
,
1178 dm_pflip_high_irq
, c_irq_params
);
1183 r
= amdgpu_irq_add_id(adev
, AMDGPU_IH_CLIENTID_DCE
, DCN_1_0__SRCID__DC_HPD1_INT
,
1186 DRM_ERROR("Failed to add hpd irq id!\n");
1190 register_hpd_handlers(adev
);
1196 static int amdgpu_dm_mode_config_init(struct amdgpu_device
*adev
)
1200 adev
->mode_info
.mode_config_initialized
= true;
1202 adev
->ddev
->mode_config
.funcs
= (void *)&amdgpu_dm_mode_funcs
;
1203 adev
->ddev
->mode_config
.helper_private
= &amdgpu_dm_mode_config_helperfuncs
;
1205 adev
->ddev
->mode_config
.max_width
= 16384;
1206 adev
->ddev
->mode_config
.max_height
= 16384;
1208 adev
->ddev
->mode_config
.preferred_depth
= 24;
1209 adev
->ddev
->mode_config
.prefer_shadow
= 1;
1210 /* indicate support of immediate flip */
1211 adev
->ddev
->mode_config
.async_page_flip
= true;
1213 adev
->ddev
->mode_config
.fb_base
= adev
->mc
.aper_base
;
1215 r
= amdgpu_modeset_create_props(adev
);
1222 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
1223 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
1225 static int amdgpu_dm_backlight_update_status(struct backlight_device
*bd
)
1227 struct amdgpu_display_manager
*dm
= bl_get_data(bd
);
1229 if (dc_link_set_backlight_level(dm
->backlight_link
,
1230 bd
->props
.brightness
, 0, 0))
1236 static int amdgpu_dm_backlight_get_brightness(struct backlight_device
*bd
)
1238 return bd
->props
.brightness
;
1241 static const struct backlight_ops amdgpu_dm_backlight_ops
= {
1242 .get_brightness
= amdgpu_dm_backlight_get_brightness
,
1243 .update_status
= amdgpu_dm_backlight_update_status
,
1246 void amdgpu_dm_register_backlight_device(struct amdgpu_display_manager
*dm
)
1249 struct backlight_properties props
= { 0 };
1251 props
.max_brightness
= AMDGPU_MAX_BL_LEVEL
;
1252 props
.type
= BACKLIGHT_RAW
;
1254 snprintf(bl_name
, sizeof(bl_name
), "amdgpu_bl%d",
1255 dm
->adev
->ddev
->primary
->index
);
1257 dm
->backlight_dev
= backlight_device_register(bl_name
,
1258 dm
->adev
->ddev
->dev
,
1260 &amdgpu_dm_backlight_ops
,
1263 if (NULL
== dm
->backlight_dev
)
1264 DRM_ERROR("DM: Backlight registration failed!\n");
1266 DRM_INFO("DM: Registered Backlight device: %s\n", bl_name
);
1271 /* In this architecture, the association
1272 * connector -> encoder -> crtc
1273 * id not really requried. The crtc and connector will hold the
1274 * display_index as an abstraction to use with DAL component
1276 * Returns 0 on success
1278 int amdgpu_dm_initialize_drm_device(struct amdgpu_device
*adev
)
1280 struct amdgpu_display_manager
*dm
= &adev
->dm
;
1282 struct amdgpu_dm_connector
*aconnector
= NULL
;
1283 struct amdgpu_encoder
*aencoder
= NULL
;
1284 struct amdgpu_mode_info
*mode_info
= &adev
->mode_info
;
1286 unsigned long possible_crtcs
;
1288 link_cnt
= dm
->dc
->caps
.max_links
;
1289 if (amdgpu_dm_mode_config_init(dm
->adev
)) {
1290 DRM_ERROR("DM: Failed to initialize mode config\n");
1294 for (i
= 0; i
< dm
->dc
->caps
.max_planes
; i
++) {
1295 mode_info
->planes
[i
] = kzalloc(sizeof(struct amdgpu_plane
),
1297 if (!mode_info
->planes
[i
]) {
1298 DRM_ERROR("KMS: Failed to allocate plane\n");
1299 goto fail_free_planes
;
1301 mode_info
->planes
[i
]->base
.type
= mode_info
->plane_type
[i
];
1304 * HACK: IGT tests expect that each plane can only have one
1305 * one possible CRTC. For now, set one CRTC for each
1306 * plane that is not an underlay, but still allow multiple
1307 * CRTCs for underlay planes.
1309 possible_crtcs
= 1 << i
;
1310 if (i
>= dm
->dc
->caps
.max_streams
)
1311 possible_crtcs
= 0xff;
1313 if (amdgpu_dm_plane_init(dm
, mode_info
->planes
[i
], possible_crtcs
)) {
1314 DRM_ERROR("KMS: Failed to initialize plane\n");
1315 goto fail_free_planes
;
1319 for (i
= 0; i
< dm
->dc
->caps
.max_streams
; i
++)
1320 if (amdgpu_dm_crtc_init(dm
, &mode_info
->planes
[i
]->base
, i
)) {
1321 DRM_ERROR("KMS: Failed to initialize crtc\n");
1322 goto fail_free_planes
;
1325 dm
->display_indexes_num
= dm
->dc
->caps
.max_streams
;
1327 /* loops over all connectors on the board */
1328 for (i
= 0; i
< link_cnt
; i
++) {
1330 if (i
> AMDGPU_DM_MAX_DISPLAY_INDEX
) {
1332 "KMS: Cannot support more than %d display indexes\n",
1333 AMDGPU_DM_MAX_DISPLAY_INDEX
);
1337 aconnector
= kzalloc(sizeof(*aconnector
), GFP_KERNEL
);
1339 goto fail_free_planes
;
1341 aencoder
= kzalloc(sizeof(*aencoder
), GFP_KERNEL
);
1343 goto fail_free_connector
;
1346 if (amdgpu_dm_encoder_init(dm
->ddev
, aencoder
, i
)) {
1347 DRM_ERROR("KMS: Failed to initialize encoder\n");
1348 goto fail_free_encoder
;
1351 if (amdgpu_dm_connector_init(dm
, aconnector
, i
, aencoder
)) {
1352 DRM_ERROR("KMS: Failed to initialize connector\n");
1353 goto fail_free_encoder
;
1356 if (dc_link_detect(dc_get_link_at_index(dm
->dc
, i
),
1357 DETECT_REASON_BOOT
))
1358 amdgpu_dm_update_connector_after_detect(aconnector
);
1361 /* Software is initialized. Now we can register interrupt handlers. */
1362 switch (adev
->asic_type
) {
1372 case CHIP_POLARIS11
:
1373 case CHIP_POLARIS10
:
1374 case CHIP_POLARIS12
:
1376 if (dce110_register_irq_handlers(dm
->adev
)) {
1377 DRM_ERROR("DM: Failed to initialize IRQ\n");
1378 goto fail_free_encoder
;
1381 #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
1383 if (dcn10_register_irq_handlers(dm
->adev
)) {
1384 DRM_ERROR("DM: Failed to initialize IRQ\n");
1385 goto fail_free_encoder
;
1388 * Temporary disable until pplib/smu interaction is implemented
1390 dm
->dc
->debug
.disable_stutter
= true;
1394 DRM_ERROR("Usupported ASIC type: 0x%X\n", adev
->asic_type
);
1395 goto fail_free_encoder
;
1398 drm_mode_config_reset(dm
->ddev
);
1403 fail_free_connector
:
1406 for (i
= 0; i
< dm
->dc
->caps
.max_planes
; i
++)
1407 kfree(mode_info
->planes
[i
]);
1411 void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager
*dm
)
1413 drm_mode_config_cleanup(dm
->ddev
);
1417 /******************************************************************************
1418 * amdgpu_display_funcs functions
1419 *****************************************************************************/
1422 * dm_bandwidth_update - program display watermarks
1424 * @adev: amdgpu_device pointer
1426 * Calculate and program the display watermarks and line buffer allocation.
1428 static void dm_bandwidth_update(struct amdgpu_device
*adev
)
1430 /* TODO: implement later */
1433 static void dm_set_backlight_level(struct amdgpu_encoder
*amdgpu_encoder
,
1436 /* TODO: translate amdgpu_encoder to display_index and call DAL */
1439 static u8
dm_get_backlight_level(struct amdgpu_encoder
*amdgpu_encoder
)
1441 /* TODO: translate amdgpu_encoder to display_index and call DAL */
1445 static int amdgpu_notify_freesync(struct drm_device
*dev
, void *data
,
1446 struct drm_file
*filp
)
1448 struct mod_freesync_params freesync_params
;
1449 uint8_t num_streams
;
1452 struct amdgpu_device
*adev
= dev
->dev_private
;
1455 /* Get freesync enable flag from DRM */
1457 num_streams
= dc_get_current_stream_count(adev
->dm
.dc
);
1459 for (i
= 0; i
< num_streams
; i
++) {
1460 struct dc_stream_state
*stream
;
1461 stream
= dc_get_stream_at_index(adev
->dm
.dc
, i
);
1463 mod_freesync_update_state(adev
->dm
.freesync_module
,
1464 &stream
, 1, &freesync_params
);
1470 static const struct amdgpu_display_funcs dm_display_funcs
= {
1471 .bandwidth_update
= dm_bandwidth_update
, /* called unconditionally */
1472 .vblank_get_counter
= dm_vblank_get_counter
,/* called unconditionally */
1473 .vblank_wait
= NULL
,
1474 .backlight_set_level
=
1475 dm_set_backlight_level
,/* called unconditionally */
1476 .backlight_get_level
=
1477 dm_get_backlight_level
,/* called unconditionally */
1478 .hpd_sense
= NULL
,/* called unconditionally */
1479 .hpd_set_polarity
= NULL
, /* called unconditionally */
1480 .hpd_get_gpio_reg
= NULL
, /* VBIOS parsing. DAL does it. */
1481 .page_flip_get_scanoutpos
=
1482 dm_crtc_get_scanoutpos
,/* called unconditionally */
1483 .add_encoder
= NULL
, /* VBIOS parsing. DAL does it. */
1484 .add_connector
= NULL
, /* VBIOS parsing. DAL does it. */
1485 .notify_freesync
= amdgpu_notify_freesync
,
1489 #if defined(CONFIG_DEBUG_KERNEL_DC)
1491 static ssize_t
s3_debug_store(
1492 struct device
*device
,
1493 struct device_attribute
*attr
,
1499 struct pci_dev
*pdev
= to_pci_dev(device
);
1500 struct drm_device
*drm_dev
= pci_get_drvdata(pdev
);
1501 struct amdgpu_device
*adev
= drm_dev
->dev_private
;
1503 ret
= kstrtoint(buf
, 0, &s3_state
);
1508 amdgpu_dm_display_resume(adev
);
1509 drm_kms_helper_hotplug_event(adev
->ddev
);
1514 return ret
== 0 ? count
: 0;
1517 DEVICE_ATTR_WO(s3_debug
);
1521 static int dm_early_init(void *handle
)
1523 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1525 adev
->ddev
->driver
->driver_features
|= DRIVER_ATOMIC
;
1526 amdgpu_dm_set_irq_funcs(adev
);
1528 switch (adev
->asic_type
) {
1531 adev
->mode_info
.num_crtc
= 6;
1532 adev
->mode_info
.num_hpd
= 6;
1533 adev
->mode_info
.num_dig
= 6;
1534 adev
->mode_info
.plane_type
= dm_plane_type_default
;
1537 adev
->mode_info
.num_crtc
= 4;
1538 adev
->mode_info
.num_hpd
= 6;
1539 adev
->mode_info
.num_dig
= 7;
1540 adev
->mode_info
.plane_type
= dm_plane_type_default
;
1544 adev
->mode_info
.num_crtc
= 2;
1545 adev
->mode_info
.num_hpd
= 6;
1546 adev
->mode_info
.num_dig
= 6;
1547 adev
->mode_info
.plane_type
= dm_plane_type_default
;
1551 adev
->mode_info
.num_crtc
= 6;
1552 adev
->mode_info
.num_hpd
= 6;
1553 adev
->mode_info
.num_dig
= 7;
1554 adev
->mode_info
.plane_type
= dm_plane_type_default
;
1557 adev
->mode_info
.num_crtc
= 3;
1558 adev
->mode_info
.num_hpd
= 6;
1559 adev
->mode_info
.num_dig
= 9;
1560 adev
->mode_info
.plane_type
= dm_plane_type_carizzo
;
1563 adev
->mode_info
.num_crtc
= 2;
1564 adev
->mode_info
.num_hpd
= 6;
1565 adev
->mode_info
.num_dig
= 9;
1566 adev
->mode_info
.plane_type
= dm_plane_type_stoney
;
1568 case CHIP_POLARIS11
:
1569 case CHIP_POLARIS12
:
1570 adev
->mode_info
.num_crtc
= 5;
1571 adev
->mode_info
.num_hpd
= 5;
1572 adev
->mode_info
.num_dig
= 5;
1573 adev
->mode_info
.plane_type
= dm_plane_type_default
;
1575 case CHIP_POLARIS10
:
1576 adev
->mode_info
.num_crtc
= 6;
1577 adev
->mode_info
.num_hpd
= 6;
1578 adev
->mode_info
.num_dig
= 6;
1579 adev
->mode_info
.plane_type
= dm_plane_type_default
;
1582 adev
->mode_info
.num_crtc
= 6;
1583 adev
->mode_info
.num_hpd
= 6;
1584 adev
->mode_info
.num_dig
= 6;
1585 adev
->mode_info
.plane_type
= dm_plane_type_default
;
1587 #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
1589 adev
->mode_info
.num_crtc
= 4;
1590 adev
->mode_info
.num_hpd
= 4;
1591 adev
->mode_info
.num_dig
= 4;
1592 adev
->mode_info
.plane_type
= dm_plane_type_default
;
1596 DRM_ERROR("Usupported ASIC type: 0x%X\n", adev
->asic_type
);
1600 if (adev
->mode_info
.funcs
== NULL
)
1601 adev
->mode_info
.funcs
= &dm_display_funcs
;
1603 /* Note: Do NOT change adev->audio_endpt_rreg and
1604 * adev->audio_endpt_wreg because they are initialised in
1605 * amdgpu_device_init() */
1606 #if defined(CONFIG_DEBUG_KERNEL_DC)
1609 &dev_attr_s3_debug
);
1615 bool amdgpu_dm_acquire_dal_lock(struct amdgpu_display_manager
*dm
)
1621 bool amdgpu_dm_release_dal_lock(struct amdgpu_display_manager
*dm
)
1628 struct dm_connector_state
{
1629 struct drm_connector_state base
;
1631 enum amdgpu_rmx_type scaling
;
1632 uint8_t underscan_vborder
;
1633 uint8_t underscan_hborder
;
1634 bool underscan_enable
;
1637 #define to_dm_connector_state(x)\
1638 container_of((x), struct dm_connector_state, base)
1640 static bool modeset_required(struct drm_crtc_state
*crtc_state
,
1641 struct dc_stream_state
*new_stream
,
1642 struct dc_stream_state
*old_stream
)
1644 if (!drm_atomic_crtc_needs_modeset(crtc_state
))
1647 if (!crtc_state
->enable
)
1650 return crtc_state
->active
;
1653 static bool modereset_required(struct drm_crtc_state
*crtc_state
)
1655 if (!drm_atomic_crtc_needs_modeset(crtc_state
))
1658 return !crtc_state
->enable
|| !crtc_state
->active
;
1661 void amdgpu_dm_encoder_destroy(struct drm_encoder
*encoder
)
1663 drm_encoder_cleanup(encoder
);
1667 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs
= {
1668 .destroy
= amdgpu_dm_encoder_destroy
,
1671 static bool fill_rects_from_plane_state(
1672 const struct drm_plane_state
*state
,
1673 struct dc_plane_state
*plane_state
)
1675 plane_state
->src_rect
.x
= state
->src_x
>> 16;
1676 plane_state
->src_rect
.y
= state
->src_y
>> 16;
1677 /*we ignore for now mantissa and do not to deal with floating pixels :(*/
1678 plane_state
->src_rect
.width
= state
->src_w
>> 16;
1680 if (plane_state
->src_rect
.width
== 0)
1683 plane_state
->src_rect
.height
= state
->src_h
>> 16;
1684 if (plane_state
->src_rect
.height
== 0)
1687 plane_state
->dst_rect
.x
= state
->crtc_x
;
1688 plane_state
->dst_rect
.y
= state
->crtc_y
;
1690 if (state
->crtc_w
== 0)
1693 plane_state
->dst_rect
.width
= state
->crtc_w
;
1695 if (state
->crtc_h
== 0)
1698 plane_state
->dst_rect
.height
= state
->crtc_h
;
1700 plane_state
->clip_rect
= plane_state
->dst_rect
;
1702 switch (state
->rotation
& DRM_MODE_ROTATE_MASK
) {
1703 case DRM_MODE_ROTATE_0
:
1704 plane_state
->rotation
= ROTATION_ANGLE_0
;
1706 case DRM_MODE_ROTATE_90
:
1707 plane_state
->rotation
= ROTATION_ANGLE_90
;
1709 case DRM_MODE_ROTATE_180
:
1710 plane_state
->rotation
= ROTATION_ANGLE_180
;
1712 case DRM_MODE_ROTATE_270
:
1713 plane_state
->rotation
= ROTATION_ANGLE_270
;
1716 plane_state
->rotation
= ROTATION_ANGLE_0
;
1722 static int get_fb_info(
1723 const struct amdgpu_framebuffer
*amdgpu_fb
,
1724 uint64_t *tiling_flags
,
1725 uint64_t *fb_location
)
1727 struct amdgpu_bo
*rbo
= gem_to_amdgpu_bo(amdgpu_fb
->obj
);
1728 int r
= amdgpu_bo_reserve(rbo
, false);
1731 DRM_ERROR("Unable to reserve buffer\n");
1736 *fb_location
= amdgpu_bo_gpu_offset(rbo
);
1739 amdgpu_bo_get_tiling_flags(rbo
, tiling_flags
);
1741 amdgpu_bo_unreserve(rbo
);
1746 static int fill_plane_attributes_from_fb(
1747 struct amdgpu_device
*adev
,
1748 struct dc_plane_state
*plane_state
,
1749 const struct amdgpu_framebuffer
*amdgpu_fb
, bool addReq
)
1751 uint64_t tiling_flags
;
1752 uint64_t fb_location
= 0;
1753 unsigned int awidth
;
1754 const struct drm_framebuffer
*fb
= &amdgpu_fb
->base
;
1756 struct drm_format_name_buf format_name
;
1761 addReq
== true ? &fb_location
:NULL
);
1766 switch (fb
->format
->format
) {
1768 plane_state
->format
= SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS
;
1770 case DRM_FORMAT_RGB565
:
1771 plane_state
->format
= SURFACE_PIXEL_FORMAT_GRPH_RGB565
;
1773 case DRM_FORMAT_XRGB8888
:
1774 case DRM_FORMAT_ARGB8888
:
1775 plane_state
->format
= SURFACE_PIXEL_FORMAT_GRPH_ARGB8888
;
1777 case DRM_FORMAT_XRGB2101010
:
1778 case DRM_FORMAT_ARGB2101010
:
1779 plane_state
->format
= SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010
;
1781 case DRM_FORMAT_XBGR2101010
:
1782 case DRM_FORMAT_ABGR2101010
:
1783 plane_state
->format
= SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010
;
1785 case DRM_FORMAT_NV21
:
1786 plane_state
->format
= SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr
;
1788 case DRM_FORMAT_NV12
:
1789 plane_state
->format
= SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb
;
1792 DRM_ERROR("Unsupported screen format %s\n",
1793 drm_get_format_name(fb
->format
->format
, &format_name
));
1797 if (plane_state
->format
< SURFACE_PIXEL_FORMAT_VIDEO_BEGIN
) {
1798 plane_state
->address
.type
= PLN_ADDR_TYPE_GRAPHICS
;
1799 plane_state
->address
.grph
.addr
.low_part
= lower_32_bits(fb_location
);
1800 plane_state
->address
.grph
.addr
.high_part
= upper_32_bits(fb_location
);
1801 plane_state
->plane_size
.grph
.surface_size
.x
= 0;
1802 plane_state
->plane_size
.grph
.surface_size
.y
= 0;
1803 plane_state
->plane_size
.grph
.surface_size
.width
= fb
->width
;
1804 plane_state
->plane_size
.grph
.surface_size
.height
= fb
->height
;
1805 plane_state
->plane_size
.grph
.surface_pitch
=
1806 fb
->pitches
[0] / fb
->format
->cpp
[0];
1807 /* TODO: unhardcode */
1808 plane_state
->color_space
= COLOR_SPACE_SRGB
;
1811 awidth
= ALIGN(fb
->width
, 64);
1812 plane_state
->address
.type
= PLN_ADDR_TYPE_VIDEO_PROGRESSIVE
;
1813 plane_state
->address
.video_progressive
.luma_addr
.low_part
1814 = lower_32_bits(fb_location
);
1815 plane_state
->address
.video_progressive
.chroma_addr
.low_part
1816 = lower_32_bits(fb_location
) +
1817 (awidth
* fb
->height
);
1818 plane_state
->plane_size
.video
.luma_size
.x
= 0;
1819 plane_state
->plane_size
.video
.luma_size
.y
= 0;
1820 plane_state
->plane_size
.video
.luma_size
.width
= awidth
;
1821 plane_state
->plane_size
.video
.luma_size
.height
= fb
->height
;
1822 /* TODO: unhardcode */
1823 plane_state
->plane_size
.video
.luma_pitch
= awidth
;
1825 plane_state
->plane_size
.video
.chroma_size
.x
= 0;
1826 plane_state
->plane_size
.video
.chroma_size
.y
= 0;
1827 plane_state
->plane_size
.video
.chroma_size
.width
= awidth
;
1828 plane_state
->plane_size
.video
.chroma_size
.height
= fb
->height
;
1829 plane_state
->plane_size
.video
.chroma_pitch
= awidth
/ 2;
1831 /* TODO: unhardcode */
1832 plane_state
->color_space
= COLOR_SPACE_YCBCR709
;
1835 memset(&plane_state
->tiling_info
, 0, sizeof(plane_state
->tiling_info
));
1837 /* Fill GFX8 params */
1838 if (AMDGPU_TILING_GET(tiling_flags
, ARRAY_MODE
) == DC_ARRAY_2D_TILED_THIN1
) {
1839 unsigned int bankw
, bankh
, mtaspect
, tile_split
, num_banks
;
1841 bankw
= AMDGPU_TILING_GET(tiling_flags
, BANK_WIDTH
);
1842 bankh
= AMDGPU_TILING_GET(tiling_flags
, BANK_HEIGHT
);
1843 mtaspect
= AMDGPU_TILING_GET(tiling_flags
, MACRO_TILE_ASPECT
);
1844 tile_split
= AMDGPU_TILING_GET(tiling_flags
, TILE_SPLIT
);
1845 num_banks
= AMDGPU_TILING_GET(tiling_flags
, NUM_BANKS
);
1847 /* XXX fix me for VI */
1848 plane_state
->tiling_info
.gfx8
.num_banks
= num_banks
;
1849 plane_state
->tiling_info
.gfx8
.array_mode
=
1850 DC_ARRAY_2D_TILED_THIN1
;
1851 plane_state
->tiling_info
.gfx8
.tile_split
= tile_split
;
1852 plane_state
->tiling_info
.gfx8
.bank_width
= bankw
;
1853 plane_state
->tiling_info
.gfx8
.bank_height
= bankh
;
1854 plane_state
->tiling_info
.gfx8
.tile_aspect
= mtaspect
;
1855 plane_state
->tiling_info
.gfx8
.tile_mode
=
1856 DC_ADDR_SURF_MICRO_TILING_DISPLAY
;
1857 } else if (AMDGPU_TILING_GET(tiling_flags
, ARRAY_MODE
)
1858 == DC_ARRAY_1D_TILED_THIN1
) {
1859 plane_state
->tiling_info
.gfx8
.array_mode
= DC_ARRAY_1D_TILED_THIN1
;
1862 plane_state
->tiling_info
.gfx8
.pipe_config
=
1863 AMDGPU_TILING_GET(tiling_flags
, PIPE_CONFIG
);
1865 if (adev
->asic_type
== CHIP_VEGA10
||
1866 adev
->asic_type
== CHIP_RAVEN
) {
1867 /* Fill GFX9 params */
1868 plane_state
->tiling_info
.gfx9
.num_pipes
=
1869 adev
->gfx
.config
.gb_addr_config_fields
.num_pipes
;
1870 plane_state
->tiling_info
.gfx9
.num_banks
=
1871 adev
->gfx
.config
.gb_addr_config_fields
.num_banks
;
1872 plane_state
->tiling_info
.gfx9
.pipe_interleave
=
1873 adev
->gfx
.config
.gb_addr_config_fields
.pipe_interleave_size
;
1874 plane_state
->tiling_info
.gfx9
.num_shader_engines
=
1875 adev
->gfx
.config
.gb_addr_config_fields
.num_se
;
1876 plane_state
->tiling_info
.gfx9
.max_compressed_frags
=
1877 adev
->gfx
.config
.gb_addr_config_fields
.max_compress_frags
;
1878 plane_state
->tiling_info
.gfx9
.num_rb_per_se
=
1879 adev
->gfx
.config
.gb_addr_config_fields
.num_rb_per_se
;
1880 plane_state
->tiling_info
.gfx9
.swizzle
=
1881 AMDGPU_TILING_GET(tiling_flags
, SWIZZLE_MODE
);
1882 plane_state
->tiling_info
.gfx9
.shaderEnable
= 1;
1885 plane_state
->visible
= true;
1886 plane_state
->scaling_quality
.h_taps_c
= 0;
1887 plane_state
->scaling_quality
.v_taps_c
= 0;
1889 /* is this needed? is plane_state zeroed at allocation? */
1890 plane_state
->scaling_quality
.h_taps
= 0;
1891 plane_state
->scaling_quality
.v_taps
= 0;
1892 plane_state
->stereo_format
= PLANE_STEREO_FORMAT_NONE
;
1898 static void fill_gamma_from_crtc_state(
1899 const struct drm_crtc_state
*crtc_state
,
1900 struct dc_plane_state
*plane_state
)
1903 struct dc_gamma
*gamma
;
1904 struct drm_color_lut
*lut
=
1905 (struct drm_color_lut
*) crtc_state
->gamma_lut
->data
;
1907 gamma
= dc_create_gamma();
1909 if (gamma
== NULL
) {
1914 gamma
->type
= GAMMA_RGB_256
;
1915 gamma
->num_entries
= GAMMA_RGB_256_ENTRIES
;
1916 for (i
= 0; i
< GAMMA_RGB_256_ENTRIES
; i
++) {
1917 gamma
->entries
.red
[i
] = dal_fixed31_32_from_int(lut
[i
].red
);
1918 gamma
->entries
.green
[i
] = dal_fixed31_32_from_int(lut
[i
].green
);
1919 gamma
->entries
.blue
[i
] = dal_fixed31_32_from_int(lut
[i
].blue
);
1922 plane_state
->gamma_correction
= gamma
;
1925 static int fill_plane_attributes(
1926 struct amdgpu_device
*adev
,
1927 struct dc_plane_state
*dc_plane_state
,
1928 struct drm_plane_state
*plane_state
,
1929 struct drm_crtc_state
*crtc_state
,
1932 const struct amdgpu_framebuffer
*amdgpu_fb
=
1933 to_amdgpu_framebuffer(plane_state
->fb
);
1934 const struct drm_crtc
*crtc
= plane_state
->crtc
;
1935 struct dc_transfer_func
*input_tf
;
1938 if (!fill_rects_from_plane_state(plane_state
, dc_plane_state
))
1941 ret
= fill_plane_attributes_from_fb(
1942 crtc
->dev
->dev_private
,
1950 input_tf
= dc_create_transfer_func();
1952 if (input_tf
== NULL
)
1955 input_tf
->type
= TF_TYPE_PREDEFINED
;
1956 input_tf
->tf
= TRANSFER_FUNCTION_SRGB
;
1958 dc_plane_state
->in_transfer_func
= input_tf
;
1960 /* In case of gamma set, update gamma value */
1961 if (crtc_state
->gamma_lut
)
1962 fill_gamma_from_crtc_state(crtc_state
, dc_plane_state
);
1967 /*****************************************************************************/
1969 struct amdgpu_dm_connector
*aconnector_from_drm_crtc_id(
1970 const struct drm_crtc
*crtc
)
1972 struct drm_device
*dev
= crtc
->dev
;
1973 struct drm_connector
*connector
;
1974 struct amdgpu_crtc
*acrtc
= to_amdgpu_crtc(crtc
);
1975 struct amdgpu_dm_connector
*aconnector
;
1977 list_for_each_entry(connector
,
1978 &dev
->mode_config
.connector_list
, head
) {
1980 aconnector
= to_amdgpu_dm_connector(connector
);
1982 if (aconnector
->base
.state
->crtc
!= &acrtc
->base
)
1985 /* Found the connector */
1989 /* If we get here, not found. */
1993 static void update_stream_scaling_settings(
1994 const struct drm_display_mode
*mode
,
1995 const struct dm_connector_state
*dm_state
,
1996 struct dc_stream_state
*stream
)
1998 enum amdgpu_rmx_type rmx_type
;
2000 struct rect src
= { 0 }; /* viewport in composition space*/
2001 struct rect dst
= { 0 }; /* stream addressable area */
2003 /* no mode. nothing to be done */
2007 /* Full screen scaling by default */
2008 src
.width
= mode
->hdisplay
;
2009 src
.height
= mode
->vdisplay
;
2010 dst
.width
= stream
->timing
.h_addressable
;
2011 dst
.height
= stream
->timing
.v_addressable
;
2013 rmx_type
= dm_state
->scaling
;
2014 if (rmx_type
== RMX_ASPECT
|| rmx_type
== RMX_OFF
) {
2015 if (src
.width
* dst
.height
<
2016 src
.height
* dst
.width
) {
2017 /* height needs less upscaling/more downscaling */
2018 dst
.width
= src
.width
*
2019 dst
.height
/ src
.height
;
2021 /* width needs less upscaling/more downscaling */
2022 dst
.height
= src
.height
*
2023 dst
.width
/ src
.width
;
2025 } else if (rmx_type
== RMX_CENTER
) {
2029 dst
.x
= (stream
->timing
.h_addressable
- dst
.width
) / 2;
2030 dst
.y
= (stream
->timing
.v_addressable
- dst
.height
) / 2;
2032 if (dm_state
->underscan_enable
) {
2033 dst
.x
+= dm_state
->underscan_hborder
/ 2;
2034 dst
.y
+= dm_state
->underscan_vborder
/ 2;
2035 dst
.width
-= dm_state
->underscan_hborder
;
2036 dst
.height
-= dm_state
->underscan_vborder
;
2042 DRM_DEBUG_KMS("Destination Rectangle x:%d y:%d width:%d height:%d\n",
2043 dst
.x
, dst
.y
, dst
.width
, dst
.height
);
2047 static enum dc_color_depth
convert_color_depth_from_display_info(
2048 const struct drm_connector
*connector
)
2050 uint32_t bpc
= connector
->display_info
.bpc
;
2052 /* Limited color depth to 8bit
2053 * TODO: Still need to handle deep color
2060 /* Temporary Work around, DRM don't parse color depth for
2061 * EDID revision before 1.4
2062 * TODO: Fix edid parsing
2064 return COLOR_DEPTH_888
;
2066 return COLOR_DEPTH_666
;
2068 return COLOR_DEPTH_888
;
2070 return COLOR_DEPTH_101010
;
2072 return COLOR_DEPTH_121212
;
2074 return COLOR_DEPTH_141414
;
2076 return COLOR_DEPTH_161616
;
2078 return COLOR_DEPTH_UNDEFINED
;
2082 static enum dc_aspect_ratio
get_aspect_ratio(
2083 const struct drm_display_mode
*mode_in
)
2085 int32_t width
= mode_in
->crtc_hdisplay
* 9;
2086 int32_t height
= mode_in
->crtc_vdisplay
* 16;
2088 if ((width
- height
) < 10 && (width
- height
) > -10)
2089 return ASPECT_RATIO_16_9
;
2091 return ASPECT_RATIO_4_3
;
2094 static enum dc_color_space
get_output_color_space(
2095 const struct dc_crtc_timing
*dc_crtc_timing
)
2097 enum dc_color_space color_space
= COLOR_SPACE_SRGB
;
2099 switch (dc_crtc_timing
->pixel_encoding
) {
2100 case PIXEL_ENCODING_YCBCR422
:
2101 case PIXEL_ENCODING_YCBCR444
:
2102 case PIXEL_ENCODING_YCBCR420
:
2105 * 27030khz is the separation point between HDTV and SDTV
2106 * according to HDMI spec, we use YCbCr709 and YCbCr601
2109 if (dc_crtc_timing
->pix_clk_khz
> 27030) {
2110 if (dc_crtc_timing
->flags
.Y_ONLY
)
2112 COLOR_SPACE_YCBCR709_LIMITED
;
2114 color_space
= COLOR_SPACE_YCBCR709
;
2116 if (dc_crtc_timing
->flags
.Y_ONLY
)
2118 COLOR_SPACE_YCBCR601_LIMITED
;
2120 color_space
= COLOR_SPACE_YCBCR601
;
2125 case PIXEL_ENCODING_RGB
:
2126 color_space
= COLOR_SPACE_SRGB
;
2137 /*****************************************************************************/
2139 static void fill_stream_properties_from_drm_display_mode(
2140 struct dc_stream_state
*stream
,
2141 const struct drm_display_mode
*mode_in
,
2142 const struct drm_connector
*connector
)
2144 struct dc_crtc_timing
*timing_out
= &stream
->timing
;
2146 memset(timing_out
, 0, sizeof(struct dc_crtc_timing
));
2148 timing_out
->h_border_left
= 0;
2149 timing_out
->h_border_right
= 0;
2150 timing_out
->v_border_top
= 0;
2151 timing_out
->v_border_bottom
= 0;
2152 /* TODO: un-hardcode */
2154 if ((connector
->display_info
.color_formats
& DRM_COLOR_FORMAT_YCRCB444
)
2155 && stream
->sink
->sink_signal
== SIGNAL_TYPE_HDMI_TYPE_A
)
2156 timing_out
->pixel_encoding
= PIXEL_ENCODING_YCBCR444
;
2158 timing_out
->pixel_encoding
= PIXEL_ENCODING_RGB
;
2160 timing_out
->timing_3d_format
= TIMING_3D_FORMAT_NONE
;
2161 timing_out
->display_color_depth
= convert_color_depth_from_display_info(
2163 timing_out
->scan_type
= SCANNING_TYPE_NODATA
;
2164 timing_out
->hdmi_vic
= 0;
2165 timing_out
->vic
= drm_match_cea_mode(mode_in
);
2167 timing_out
->h_addressable
= mode_in
->crtc_hdisplay
;
2168 timing_out
->h_total
= mode_in
->crtc_htotal
;
2169 timing_out
->h_sync_width
=
2170 mode_in
->crtc_hsync_end
- mode_in
->crtc_hsync_start
;
2171 timing_out
->h_front_porch
=
2172 mode_in
->crtc_hsync_start
- mode_in
->crtc_hdisplay
;
2173 timing_out
->v_total
= mode_in
->crtc_vtotal
;
2174 timing_out
->v_addressable
= mode_in
->crtc_vdisplay
;
2175 timing_out
->v_front_porch
=
2176 mode_in
->crtc_vsync_start
- mode_in
->crtc_vdisplay
;
2177 timing_out
->v_sync_width
=
2178 mode_in
->crtc_vsync_end
- mode_in
->crtc_vsync_start
;
2179 timing_out
->pix_clk_khz
= mode_in
->crtc_clock
;
2180 timing_out
->aspect_ratio
= get_aspect_ratio(mode_in
);
2181 if (mode_in
->flags
& DRM_MODE_FLAG_PHSYNC
)
2182 timing_out
->flags
.HSYNC_POSITIVE_POLARITY
= 1;
2183 if (mode_in
->flags
& DRM_MODE_FLAG_PVSYNC
)
2184 timing_out
->flags
.VSYNC_POSITIVE_POLARITY
= 1;
2186 stream
->output_color_space
= get_output_color_space(timing_out
);
2189 struct dc_transfer_func
*tf
= dc_create_transfer_func();
2191 tf
->type
= TF_TYPE_PREDEFINED
;
2192 tf
->tf
= TRANSFER_FUNCTION_SRGB
;
2193 stream
->out_transfer_func
= tf
;
2197 static void fill_audio_info(
2198 struct audio_info
*audio_info
,
2199 const struct drm_connector
*drm_connector
,
2200 const struct dc_sink
*dc_sink
)
2203 int cea_revision
= 0;
2204 const struct dc_edid_caps
*edid_caps
= &dc_sink
->edid_caps
;
2206 audio_info
->manufacture_id
= edid_caps
->manufacturer_id
;
2207 audio_info
->product_id
= edid_caps
->product_id
;
2209 cea_revision
= drm_connector
->display_info
.cea_rev
;
2211 while (i
< AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS
&&
2212 edid_caps
->display_name
[i
]) {
2213 audio_info
->display_name
[i
] = edid_caps
->display_name
[i
];
2217 if (cea_revision
>= 3) {
2218 audio_info
->mode_count
= edid_caps
->audio_mode_count
;
2220 for (i
= 0; i
< audio_info
->mode_count
; ++i
) {
2221 audio_info
->modes
[i
].format_code
=
2222 (enum audio_format_code
)
2223 (edid_caps
->audio_modes
[i
].format_code
);
2224 audio_info
->modes
[i
].channel_count
=
2225 edid_caps
->audio_modes
[i
].channel_count
;
2226 audio_info
->modes
[i
].sample_rates
.all
=
2227 edid_caps
->audio_modes
[i
].sample_rate
;
2228 audio_info
->modes
[i
].sample_size
=
2229 edid_caps
->audio_modes
[i
].sample_size
;
2233 audio_info
->flags
.all
= edid_caps
->speaker_flags
;
2235 /* TODO: We only check for the progressive mode, check for interlace mode too */
2236 if (drm_connector
->latency_present
[0]) {
2237 audio_info
->video_latency
= drm_connector
->video_latency
[0];
2238 audio_info
->audio_latency
= drm_connector
->audio_latency
[0];
2241 /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
2245 static void copy_crtc_timing_for_drm_display_mode(
2246 const struct drm_display_mode
*src_mode
,
2247 struct drm_display_mode
*dst_mode
)
2249 dst_mode
->crtc_hdisplay
= src_mode
->crtc_hdisplay
;
2250 dst_mode
->crtc_vdisplay
= src_mode
->crtc_vdisplay
;
2251 dst_mode
->crtc_clock
= src_mode
->crtc_clock
;
2252 dst_mode
->crtc_hblank_start
= src_mode
->crtc_hblank_start
;
2253 dst_mode
->crtc_hblank_end
= src_mode
->crtc_hblank_end
;
2254 dst_mode
->crtc_hsync_start
= src_mode
->crtc_hsync_start
;
2255 dst_mode
->crtc_hsync_end
= src_mode
->crtc_hsync_end
;
2256 dst_mode
->crtc_htotal
= src_mode
->crtc_htotal
;
2257 dst_mode
->crtc_hskew
= src_mode
->crtc_hskew
;
2258 dst_mode
->crtc_vblank_start
= src_mode
->crtc_vblank_start
;
2259 dst_mode
->crtc_vblank_end
= src_mode
->crtc_vblank_end
;
2260 dst_mode
->crtc_vsync_start
= src_mode
->crtc_vsync_start
;
2261 dst_mode
->crtc_vsync_end
= src_mode
->crtc_vsync_end
;
2262 dst_mode
->crtc_vtotal
= src_mode
->crtc_vtotal
;
2265 static void decide_crtc_timing_for_drm_display_mode(
2266 struct drm_display_mode
*drm_mode
,
2267 const struct drm_display_mode
*native_mode
,
2270 if (scale_enabled
) {
2271 copy_crtc_timing_for_drm_display_mode(native_mode
, drm_mode
);
2272 } else if (native_mode
->clock
== drm_mode
->clock
&&
2273 native_mode
->htotal
== drm_mode
->htotal
&&
2274 native_mode
->vtotal
== drm_mode
->vtotal
) {
2275 copy_crtc_timing_for_drm_display_mode(native_mode
, drm_mode
);
2277 /* no scaling nor amdgpu inserted, no need to patch */
2281 static struct dc_stream_state
*create_stream_for_sink(
2282 struct amdgpu_dm_connector
*aconnector
,
2283 const struct drm_display_mode
*drm_mode
,
2284 const struct dm_connector_state
*dm_state
)
2286 struct drm_display_mode
*preferred_mode
= NULL
;
2287 const struct drm_connector
*drm_connector
;
2288 struct dc_stream_state
*stream
= NULL
;
2289 struct drm_display_mode mode
= *drm_mode
;
2290 bool native_mode_found
= false;
2292 if (aconnector
== NULL
) {
2293 DRM_ERROR("aconnector is NULL!\n");
2294 goto drm_connector_null
;
2297 if (dm_state
== NULL
) {
2298 DRM_ERROR("dm_state is NULL!\n");
2302 drm_connector
= &aconnector
->base
;
2303 stream
= dc_create_stream_for_sink(aconnector
->dc_sink
);
2305 if (stream
== NULL
) {
2306 DRM_ERROR("Failed to create stream for sink!\n");
2307 goto stream_create_fail
;
2310 list_for_each_entry(preferred_mode
, &aconnector
->base
.modes
, head
) {
2311 /* Search for preferred mode */
2312 if (preferred_mode
->type
& DRM_MODE_TYPE_PREFERRED
) {
2313 native_mode_found
= true;
2317 if (!native_mode_found
)
2318 preferred_mode
= list_first_entry_or_null(
2319 &aconnector
->base
.modes
,
2320 struct drm_display_mode
,
2323 if (preferred_mode
== NULL
) {
2324 /* This may not be an error, the use case is when we we have no
2325 * usermode calls to reset and set mode upon hotplug. In this
2326 * case, we call set mode ourselves to restore the previous mode
2327 * and the modelist may not be filled in in time.
2329 DRM_INFO("No preferred mode found\n");
2331 decide_crtc_timing_for_drm_display_mode(
2332 &mode
, preferred_mode
,
2333 dm_state
->scaling
!= RMX_OFF
);
2336 fill_stream_properties_from_drm_display_mode(stream
,
2337 &mode
, &aconnector
->base
);
2338 update_stream_scaling_settings(&mode
, dm_state
, stream
);
2341 &stream
->audio_info
,
2343 aconnector
->dc_sink
);
2351 void amdgpu_dm_crtc_destroy(struct drm_crtc
*crtc
)
2353 drm_crtc_cleanup(crtc
);
2357 static void dm_crtc_destroy_state(struct drm_crtc
*crtc
,
2358 struct drm_crtc_state
*state
)
2360 struct dm_crtc_state
*cur
= to_dm_crtc_state(state
);
2362 /* TODO Destroy dc_stream objects are stream object is flattened */
2364 dc_stream_release(cur
->stream
);
2367 __drm_atomic_helper_crtc_destroy_state(state
);
2373 static void dm_crtc_reset_state(struct drm_crtc
*crtc
)
2375 struct dm_crtc_state
*state
;
2378 dm_crtc_destroy_state(crtc
, crtc
->state
);
2380 state
= kzalloc(sizeof(*state
), GFP_KERNEL
);
2381 if (WARN_ON(!state
))
2384 crtc
->state
= &state
->base
;
2385 crtc
->state
->crtc
= crtc
;
2389 static struct drm_crtc_state
*
2390 dm_crtc_duplicate_state(struct drm_crtc
*crtc
)
2392 struct dm_crtc_state
*state
, *cur
;
2394 cur
= to_dm_crtc_state(crtc
->state
);
2396 if (WARN_ON(!crtc
->state
))
2399 state
= dm_alloc(sizeof(*state
));
2401 __drm_atomic_helper_crtc_duplicate_state(crtc
, &state
->base
);
2404 state
->stream
= cur
->stream
;
2405 dc_stream_retain(state
->stream
);
2408 /* TODO Duplicate dc_stream after objects are stream object is flattened */
2410 return &state
->base
;
2413 /* Implemented only the options currently availible for the driver */
2414 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs
= {
2415 .reset
= dm_crtc_reset_state
,
2416 .destroy
= amdgpu_dm_crtc_destroy
,
2417 .gamma_set
= drm_atomic_helper_legacy_gamma_set
,
2418 .set_config
= drm_atomic_helper_set_config
,
2419 .page_flip
= drm_atomic_helper_page_flip
,
2420 .atomic_duplicate_state
= dm_crtc_duplicate_state
,
2421 .atomic_destroy_state
= dm_crtc_destroy_state
,
2424 static enum drm_connector_status
2425 amdgpu_dm_connector_detect(struct drm_connector
*connector
, bool force
)
2428 struct amdgpu_dm_connector
*aconnector
= to_amdgpu_dm_connector(connector
);
2431 * 1. This interface is NOT called in context of HPD irq.
2432 * 2. This interface *is called* in context of user-mode ioctl. Which
2433 * makes it a bad place for *any* MST-related activit. */
2435 if (aconnector
->base
.force
== DRM_FORCE_UNSPECIFIED
)
2436 connected
= (aconnector
->dc_sink
!= NULL
);
2438 connected
= (aconnector
->base
.force
== DRM_FORCE_ON
);
2440 return (connected
? connector_status_connected
:
2441 connector_status_disconnected
);
2444 int amdgpu_dm_connector_atomic_set_property(
2445 struct drm_connector
*connector
,
2446 struct drm_connector_state
*connector_state
,
2447 struct drm_property
*property
,
2450 struct drm_device
*dev
= connector
->dev
;
2451 struct amdgpu_device
*adev
= dev
->dev_private
;
2452 struct dm_connector_state
*dm_old_state
=
2453 to_dm_connector_state(connector
->state
);
2454 struct dm_connector_state
*dm_new_state
=
2455 to_dm_connector_state(connector_state
);
2459 if (property
== dev
->mode_config
.scaling_mode_property
) {
2460 enum amdgpu_rmx_type rmx_type
;
2463 case DRM_MODE_SCALE_CENTER
:
2464 rmx_type
= RMX_CENTER
;
2466 case DRM_MODE_SCALE_ASPECT
:
2467 rmx_type
= RMX_ASPECT
;
2469 case DRM_MODE_SCALE_FULLSCREEN
:
2470 rmx_type
= RMX_FULL
;
2472 case DRM_MODE_SCALE_NONE
:
2478 if (dm_old_state
->scaling
== rmx_type
)
2481 dm_new_state
->scaling
= rmx_type
;
2483 } else if (property
== adev
->mode_info
.underscan_hborder_property
) {
2484 dm_new_state
->underscan_hborder
= val
;
2486 } else if (property
== adev
->mode_info
.underscan_vborder_property
) {
2487 dm_new_state
->underscan_vborder
= val
;
2489 } else if (property
== adev
->mode_info
.underscan_property
) {
2490 dm_new_state
->underscan_enable
= val
;
2497 int amdgpu_dm_connector_atomic_get_property(
2498 struct drm_connector
*connector
,
2499 const struct drm_connector_state
*state
,
2500 struct drm_property
*property
,
2503 struct drm_device
*dev
= connector
->dev
;
2504 struct amdgpu_device
*adev
= dev
->dev_private
;
2505 struct dm_connector_state
*dm_state
=
2506 to_dm_connector_state(state
);
2509 if (property
== dev
->mode_config
.scaling_mode_property
) {
2510 switch (dm_state
->scaling
) {
2512 *val
= DRM_MODE_SCALE_CENTER
;
2515 *val
= DRM_MODE_SCALE_ASPECT
;
2518 *val
= DRM_MODE_SCALE_FULLSCREEN
;
2522 *val
= DRM_MODE_SCALE_NONE
;
2526 } else if (property
== adev
->mode_info
.underscan_hborder_property
) {
2527 *val
= dm_state
->underscan_hborder
;
2529 } else if (property
== adev
->mode_info
.underscan_vborder_property
) {
2530 *val
= dm_state
->underscan_vborder
;
2532 } else if (property
== adev
->mode_info
.underscan_property
) {
2533 *val
= dm_state
->underscan_enable
;
2539 void amdgpu_dm_connector_destroy(struct drm_connector
*connector
)
2541 struct amdgpu_dm_connector
*aconnector
= to_amdgpu_dm_connector(connector
);
2542 const struct dc_link
*link
= aconnector
->dc_link
;
2543 struct amdgpu_device
*adev
= connector
->dev
->dev_private
;
2544 struct amdgpu_display_manager
*dm
= &adev
->dm
;
2545 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
2546 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
2548 if (link
->connector_signal
& (SIGNAL_TYPE_EDP
| SIGNAL_TYPE_LVDS
)) {
2549 amdgpu_dm_register_backlight_device(dm
);
2551 if (dm
->backlight_dev
) {
2552 backlight_device_unregister(dm
->backlight_dev
);
2553 dm
->backlight_dev
= NULL
;
2558 drm_connector_unregister(connector
);
2559 drm_connector_cleanup(connector
);
2563 void amdgpu_dm_connector_funcs_reset(struct drm_connector
*connector
)
2565 struct dm_connector_state
*state
=
2566 to_dm_connector_state(connector
->state
);
2570 state
= kzalloc(sizeof(*state
), GFP_KERNEL
);
2573 state
->scaling
= RMX_OFF
;
2574 state
->underscan_enable
= false;
2575 state
->underscan_hborder
= 0;
2576 state
->underscan_vborder
= 0;
2578 connector
->state
= &state
->base
;
2579 connector
->state
->connector
= connector
;
2583 struct drm_connector_state
*amdgpu_dm_connector_atomic_duplicate_state(
2584 struct drm_connector
*connector
)
2586 struct dm_connector_state
*state
=
2587 to_dm_connector_state(connector
->state
);
2589 struct dm_connector_state
*new_state
=
2590 kmemdup(state
, sizeof(*state
), GFP_KERNEL
);
2593 __drm_atomic_helper_connector_duplicate_state(connector
,
2595 return &new_state
->base
;
2601 static const struct drm_connector_funcs amdgpu_dm_connector_funcs
= {
2602 .reset
= amdgpu_dm_connector_funcs_reset
,
2603 .detect
= amdgpu_dm_connector_detect
,
2604 .fill_modes
= drm_helper_probe_single_connector_modes
,
2605 .destroy
= amdgpu_dm_connector_destroy
,
2606 .atomic_duplicate_state
= amdgpu_dm_connector_atomic_duplicate_state
,
2607 .atomic_destroy_state
= drm_atomic_helper_connector_destroy_state
,
2608 .atomic_set_property
= amdgpu_dm_connector_atomic_set_property
,
2609 .atomic_get_property
= amdgpu_dm_connector_atomic_get_property
2612 static struct drm_encoder
*best_encoder(struct drm_connector
*connector
)
2614 int enc_id
= connector
->encoder_ids
[0];
2615 struct drm_mode_object
*obj
;
2616 struct drm_encoder
*encoder
;
2618 DRM_DEBUG_KMS("Finding the best encoder\n");
2620 /* pick the encoder ids */
2622 obj
= drm_mode_object_find(connector
->dev
, enc_id
, DRM_MODE_OBJECT_ENCODER
);
2624 DRM_ERROR("Couldn't find a matching encoder for our connector\n");
2627 encoder
= obj_to_encoder(obj
);
2630 DRM_ERROR("No encoder id\n");
2634 static int get_modes(struct drm_connector
*connector
)
2636 return amdgpu_dm_connector_get_modes(connector
);
2639 static void create_eml_sink(struct amdgpu_dm_connector
*aconnector
)
2641 struct dc_sink_init_data init_params
= {
2642 .link
= aconnector
->dc_link
,
2643 .sink_signal
= SIGNAL_TYPE_VIRTUAL
2645 struct edid
*edid
= (struct edid
*) aconnector
->base
.edid_blob_ptr
->data
;
2647 if (!aconnector
->base
.edid_blob_ptr
||
2648 !aconnector
->base
.edid_blob_ptr
->data
) {
2649 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
2650 aconnector
->base
.name
);
2652 aconnector
->base
.force
= DRM_FORCE_OFF
;
2653 aconnector
->base
.override_edid
= false;
2657 aconnector
->edid
= edid
;
2659 aconnector
->dc_em_sink
= dc_link_add_remote_sink(
2660 aconnector
->dc_link
,
2662 (edid
->extensions
+ 1) * EDID_LENGTH
,
2665 if (aconnector
->base
.force
2667 aconnector
->dc_sink
= aconnector
->dc_link
->local_sink
?
2668 aconnector
->dc_link
->local_sink
:
2669 aconnector
->dc_em_sink
;
2672 static void handle_edid_mgmt(struct amdgpu_dm_connector
*aconnector
)
2674 struct dc_link
*link
= (struct dc_link
*)aconnector
->dc_link
;
2676 /* In case of headless boot with force on for DP managed connector
2677 * Those settings have to be != 0 to get initial modeset
2679 if (link
->connector_signal
== SIGNAL_TYPE_DISPLAY_PORT
) {
2680 link
->verified_link_cap
.lane_count
= LANE_COUNT_FOUR
;
2681 link
->verified_link_cap
.link_rate
= LINK_RATE_HIGH2
;
2685 aconnector
->base
.override_edid
= true;
2686 create_eml_sink(aconnector
);
2689 int amdgpu_dm_connector_mode_valid(
2690 struct drm_connector
*connector
,
2691 struct drm_display_mode
*mode
)
2693 int result
= MODE_ERROR
;
2694 struct dc_sink
*dc_sink
;
2695 struct amdgpu_device
*adev
= connector
->dev
->dev_private
;
2696 /* TODO: Unhardcode stream count */
2697 struct dc_stream_state
*stream
;
2698 struct amdgpu_dm_connector
*aconnector
= to_amdgpu_dm_connector(connector
);
2700 if ((mode
->flags
& DRM_MODE_FLAG_INTERLACE
) ||
2701 (mode
->flags
& DRM_MODE_FLAG_DBLSCAN
))
2704 /* Only run this the first time mode_valid is called to initilialize
2707 if (aconnector
->base
.force
!= DRM_FORCE_UNSPECIFIED
&&
2708 !aconnector
->dc_em_sink
)
2709 handle_edid_mgmt(aconnector
);
2711 dc_sink
= to_amdgpu_dm_connector(connector
)->dc_sink
;
2713 if (dc_sink
== NULL
) {
2714 DRM_ERROR("dc_sink is NULL!\n");
2718 stream
= dc_create_stream_for_sink(dc_sink
);
2719 if (stream
== NULL
) {
2720 DRM_ERROR("Failed to create stream for sink!\n");
2724 drm_mode_set_crtcinfo(mode
, 0);
2725 fill_stream_properties_from_drm_display_mode(stream
, mode
, connector
);
2727 stream
->src
.width
= mode
->hdisplay
;
2728 stream
->src
.height
= mode
->vdisplay
;
2729 stream
->dst
= stream
->src
;
2731 if (dc_validate_stream(adev
->dm
.dc
, stream
))
2734 dc_stream_release(stream
);
2737 /* TODO: error handling*/
2741 static const struct drm_connector_helper_funcs
2742 amdgpu_dm_connector_helper_funcs
= {
2744 * If hotplug a second bigger display in FB Con mode, bigger resolution
2745 * modes will be filtered by drm_mode_validate_size(), and those modes
2746 * is missing after user start lightdm. So we need to renew modes list.
2747 * in get_modes call back, not just return the modes count
2749 .get_modes
= get_modes
,
2750 .mode_valid
= amdgpu_dm_connector_mode_valid
,
2751 .best_encoder
= best_encoder
2754 static void dm_crtc_helper_disable(struct drm_crtc
*crtc
)
2758 static int dm_crtc_helper_atomic_check(
2759 struct drm_crtc
*crtc
,
2760 struct drm_crtc_state
*state
)
2762 struct amdgpu_device
*adev
= crtc
->dev
->dev_private
;
2763 struct dc
*dc
= adev
->dm
.dc
;
2764 struct dm_crtc_state
*dm_crtc_state
= to_dm_crtc_state(state
);
2767 if (unlikely(!dm_crtc_state
->stream
&&
2768 modeset_required(state
, NULL
, dm_crtc_state
->stream
))) {
2773 /* In some use cases, like reset, no stream is attached */
2774 if (!dm_crtc_state
->stream
)
2777 if (dc_validate_stream(dc
, dm_crtc_state
->stream
))
2783 static bool dm_crtc_helper_mode_fixup(
2784 struct drm_crtc
*crtc
,
2785 const struct drm_display_mode
*mode
,
2786 struct drm_display_mode
*adjusted_mode
)
2791 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs
= {
2792 .disable
= dm_crtc_helper_disable
,
2793 .atomic_check
= dm_crtc_helper_atomic_check
,
2794 .mode_fixup
= dm_crtc_helper_mode_fixup
2797 static void dm_encoder_helper_disable(struct drm_encoder
*encoder
)
2802 static int dm_encoder_helper_atomic_check(
2803 struct drm_encoder
*encoder
,
2804 struct drm_crtc_state
*crtc_state
,
2805 struct drm_connector_state
*conn_state
)
2810 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs
= {
2811 .disable
= dm_encoder_helper_disable
,
2812 .atomic_check
= dm_encoder_helper_atomic_check
2815 static void dm_drm_plane_reset(struct drm_plane
*plane
)
2817 struct dm_plane_state
*amdgpu_state
= NULL
;
2820 plane
->funcs
->atomic_destroy_state(plane
, plane
->state
);
2822 amdgpu_state
= kzalloc(sizeof(*amdgpu_state
), GFP_KERNEL
);
2825 plane
->state
= &amdgpu_state
->base
;
2826 plane
->state
->plane
= plane
;
2827 plane
->state
->rotation
= DRM_MODE_ROTATE_0
;
2832 static struct drm_plane_state
*
2833 dm_drm_plane_duplicate_state(struct drm_plane
*plane
)
2835 struct dm_plane_state
*dm_plane_state
, *old_dm_plane_state
;
2837 old_dm_plane_state
= to_dm_plane_state(plane
->state
);
2838 dm_plane_state
= kzalloc(sizeof(*dm_plane_state
), GFP_KERNEL
);
2839 if (!dm_plane_state
)
2842 __drm_atomic_helper_plane_duplicate_state(plane
, &dm_plane_state
->base
);
2844 if (old_dm_plane_state
->dc_state
) {
2845 dm_plane_state
->dc_state
= old_dm_plane_state
->dc_state
;
2846 dc_plane_state_retain(dm_plane_state
->dc_state
);
2849 return &dm_plane_state
->base
;
2852 void dm_drm_plane_destroy_state(struct drm_plane
*plane
,
2853 struct drm_plane_state
*state
)
2855 struct dm_plane_state
*dm_plane_state
= to_dm_plane_state(state
);
2857 if (dm_plane_state
->dc_state
)
2858 dc_plane_state_release(dm_plane_state
->dc_state
);
2860 drm_atomic_helper_plane_destroy_state(plane
, state
);
2863 static const struct drm_plane_funcs dm_plane_funcs
= {
2864 .update_plane
= drm_atomic_helper_update_plane
,
2865 .disable_plane
= drm_atomic_helper_disable_plane
,
2866 .destroy
= drm_plane_cleanup
,
2867 .reset
= dm_drm_plane_reset
,
2868 .atomic_duplicate_state
= dm_drm_plane_duplicate_state
,
2869 .atomic_destroy_state
= dm_drm_plane_destroy_state
,
2872 static int dm_plane_helper_prepare_fb(
2873 struct drm_plane
*plane
,
2874 struct drm_plane_state
*new_state
)
2876 struct amdgpu_framebuffer
*afb
;
2877 struct drm_gem_object
*obj
;
2878 struct amdgpu_bo
*rbo
;
2880 struct dm_plane_state
*dm_plane_state_new
, *dm_plane_state_old
;
2881 unsigned int awidth
;
2883 dm_plane_state_old
= to_dm_plane_state(plane
->state
);
2884 dm_plane_state_new
= to_dm_plane_state(new_state
);
2886 if (!new_state
->fb
) {
2887 DRM_DEBUG_KMS("No FB bound\n");
2891 afb
= to_amdgpu_framebuffer(new_state
->fb
);
2894 rbo
= gem_to_amdgpu_bo(obj
);
2895 r
= amdgpu_bo_reserve(rbo
, false);
2896 if (unlikely(r
!= 0))
2899 r
= amdgpu_bo_pin(rbo
, AMDGPU_GEM_DOMAIN_VRAM
, &afb
->address
);
2902 amdgpu_bo_unreserve(rbo
);
2904 if (unlikely(r
!= 0)) {
2905 DRM_ERROR("Failed to pin framebuffer\n");
2911 if (dm_plane_state_new
->dc_state
&&
2912 dm_plane_state_old
->dc_state
!= dm_plane_state_new
->dc_state
) {
2913 struct dc_plane_state
*plane_state
= dm_plane_state_new
->dc_state
;
2915 if (plane_state
->format
< SURFACE_PIXEL_FORMAT_VIDEO_BEGIN
) {
2916 plane_state
->address
.grph
.addr
.low_part
= lower_32_bits(afb
->address
);
2917 plane_state
->address
.grph
.addr
.high_part
= upper_32_bits(afb
->address
);
2919 awidth
= ALIGN(new_state
->fb
->width
, 64);
2920 plane_state
->address
.video_progressive
.luma_addr
.low_part
2921 = lower_32_bits(afb
->address
);
2922 plane_state
->address
.video_progressive
.chroma_addr
.low_part
2923 = lower_32_bits(afb
->address
) +
2924 (awidth
* new_state
->fb
->height
);
2928 /* It's a hack for s3 since in 4.9 kernel filter out cursor buffer
2929 * prepare and cleanup in drm_atomic_helper_prepare_planes
2930 * and drm_atomic_helper_cleanup_planes because fb doens't in s3.
2931 * IN 4.10 kernel this code should be removed and amdgpu_device_suspend
2932 * code touching fram buffers should be avoided for DC.
2934 if (plane
->type
== DRM_PLANE_TYPE_CURSOR
) {
2935 struct amdgpu_crtc
*acrtc
= to_amdgpu_crtc(new_state
->crtc
);
2937 acrtc
->cursor_bo
= obj
;
2942 static void dm_plane_helper_cleanup_fb(
2943 struct drm_plane
*plane
,
2944 struct drm_plane_state
*old_state
)
2946 struct amdgpu_bo
*rbo
;
2947 struct amdgpu_framebuffer
*afb
;
2953 afb
= to_amdgpu_framebuffer(old_state
->fb
);
2954 rbo
= gem_to_amdgpu_bo(afb
->obj
);
2955 r
= amdgpu_bo_reserve(rbo
, false);
2957 DRM_ERROR("failed to reserve rbo before unpin\n");
2961 amdgpu_bo_unpin(rbo
);
2962 amdgpu_bo_unreserve(rbo
);
2963 amdgpu_bo_unref(&rbo
);
2966 int dm_create_validation_set_for_connector(struct drm_connector
*connector
,
2967 struct drm_display_mode
*mode
, struct dc_validation_set
*val_set
)
2969 int result
= MODE_ERROR
;
2970 struct dc_sink
*dc_sink
=
2971 to_amdgpu_dm_connector(connector
)->dc_sink
;
2972 /* TODO: Unhardcode stream count */
2973 struct dc_stream_state
*stream
;
2975 if ((mode
->flags
& DRM_MODE_FLAG_INTERLACE
) ||
2976 (mode
->flags
& DRM_MODE_FLAG_DBLSCAN
))
2979 if (dc_sink
== NULL
) {
2980 DRM_ERROR("dc_sink is NULL!\n");
2984 stream
= dc_create_stream_for_sink(dc_sink
);
2986 if (stream
== NULL
) {
2987 DRM_ERROR("Failed to create stream for sink!\n");
2991 drm_mode_set_crtcinfo(mode
, 0);
2993 fill_stream_properties_from_drm_display_mode(stream
, mode
, connector
);
2995 val_set
->stream
= stream
;
2997 stream
->src
.width
= mode
->hdisplay
;
2998 stream
->src
.height
= mode
->vdisplay
;
2999 stream
->dst
= stream
->src
;
3004 int dm_plane_atomic_check(struct drm_plane
*plane
,
3005 struct drm_plane_state
*state
)
3007 struct amdgpu_device
*adev
= plane
->dev
->dev_private
;
3008 struct dc
*dc
= adev
->dm
.dc
;
3009 struct dm_plane_state
*dm_plane_state
= to_dm_plane_state(state
);
3011 if (!dm_plane_state
->dc_state
)
3014 if (dc_validate_plane(dc
, dm_plane_state
->dc_state
))
3020 static const struct drm_plane_helper_funcs dm_plane_helper_funcs
= {
3021 .prepare_fb
= dm_plane_helper_prepare_fb
,
3022 .cleanup_fb
= dm_plane_helper_cleanup_fb
,
3023 .atomic_check
= dm_plane_atomic_check
,
3027 * TODO: these are currently initialized to rgb formats only.
3028 * For future use cases we should either initialize them dynamically based on
3029 * plane capabilities, or initialize this array to all formats, so internal drm
3030 * check will succeed, and let DC to implement proper check
3032 static uint32_t rgb_formats
[] = {
3034 DRM_FORMAT_XRGB8888
,
3035 DRM_FORMAT_ARGB8888
,
3036 DRM_FORMAT_RGBA8888
,
3037 DRM_FORMAT_XRGB2101010
,
3038 DRM_FORMAT_XBGR2101010
,
3039 DRM_FORMAT_ARGB2101010
,
3040 DRM_FORMAT_ABGR2101010
,
3043 static uint32_t yuv_formats
[] = {
3048 static const u32 cursor_formats
[] = {
3052 int amdgpu_dm_plane_init(struct amdgpu_display_manager
*dm
,
3053 struct amdgpu_plane
*aplane
,
3054 unsigned long possible_crtcs
)
3058 switch (aplane
->base
.type
) {
3059 case DRM_PLANE_TYPE_PRIMARY
:
3060 aplane
->base
.format_default
= true;
3062 res
= drm_universal_plane_init(
3068 ARRAY_SIZE(rgb_formats
),
3069 NULL
, aplane
->base
.type
, NULL
);
3071 case DRM_PLANE_TYPE_OVERLAY
:
3072 res
= drm_universal_plane_init(
3078 ARRAY_SIZE(yuv_formats
),
3079 NULL
, aplane
->base
.type
, NULL
);
3081 case DRM_PLANE_TYPE_CURSOR
:
3082 res
= drm_universal_plane_init(
3088 ARRAY_SIZE(cursor_formats
),
3089 NULL
, aplane
->base
.type
, NULL
);
3093 drm_plane_helper_add(&aplane
->base
, &dm_plane_helper_funcs
);
3098 int amdgpu_dm_crtc_init(struct amdgpu_display_manager
*dm
,
3099 struct drm_plane
*plane
,
3100 uint32_t crtc_index
)
3102 struct amdgpu_crtc
*acrtc
= NULL
;
3103 struct amdgpu_plane
*cursor_plane
;
3107 cursor_plane
= kzalloc(sizeof(*cursor_plane
), GFP_KERNEL
);
3111 cursor_plane
->base
.type
= DRM_PLANE_TYPE_CURSOR
;
3112 res
= amdgpu_dm_plane_init(dm
, cursor_plane
, 0);
3114 acrtc
= kzalloc(sizeof(struct amdgpu_crtc
), GFP_KERNEL
);
3118 res
= drm_crtc_init_with_planes(
3122 &cursor_plane
->base
,
3123 &amdgpu_dm_crtc_funcs
, NULL
);
3128 drm_crtc_helper_add(&acrtc
->base
, &amdgpu_dm_crtc_helper_funcs
);
3130 acrtc
->max_cursor_width
= dm
->adev
->dm
.dc
->caps
.max_cursor_size
;
3131 acrtc
->max_cursor_height
= dm
->adev
->dm
.dc
->caps
.max_cursor_size
;
3133 acrtc
->crtc_id
= crtc_index
;
3134 acrtc
->base
.enabled
= false;
3136 dm
->adev
->mode_info
.crtcs
[crtc_index
] = acrtc
;
3137 drm_mode_crtc_set_gamma_size(&acrtc
->base
, 256);
3143 kfree(cursor_plane
);
3144 acrtc
->crtc_id
= -1;
3149 static int to_drm_connector_type(enum signal_type st
)
3152 case SIGNAL_TYPE_HDMI_TYPE_A
:
3153 return DRM_MODE_CONNECTOR_HDMIA
;
3154 case SIGNAL_TYPE_EDP
:
3155 return DRM_MODE_CONNECTOR_eDP
;
3156 case SIGNAL_TYPE_RGB
:
3157 return DRM_MODE_CONNECTOR_VGA
;
3158 case SIGNAL_TYPE_DISPLAY_PORT
:
3159 case SIGNAL_TYPE_DISPLAY_PORT_MST
:
3160 return DRM_MODE_CONNECTOR_DisplayPort
;
3161 case SIGNAL_TYPE_DVI_DUAL_LINK
:
3162 case SIGNAL_TYPE_DVI_SINGLE_LINK
:
3163 return DRM_MODE_CONNECTOR_DVID
;
3164 case SIGNAL_TYPE_VIRTUAL
:
3165 return DRM_MODE_CONNECTOR_VIRTUAL
;
3168 return DRM_MODE_CONNECTOR_Unknown
;
3172 static void amdgpu_dm_get_native_mode(struct drm_connector
*connector
)
3174 const struct drm_connector_helper_funcs
*helper
=
3175 connector
->helper_private
;
3176 struct drm_encoder
*encoder
;
3177 struct amdgpu_encoder
*amdgpu_encoder
;
3179 encoder
= helper
->best_encoder(connector
);
3181 if (encoder
== NULL
)
3184 amdgpu_encoder
= to_amdgpu_encoder(encoder
);
3186 amdgpu_encoder
->native_mode
.clock
= 0;
3188 if (!list_empty(&connector
->probed_modes
)) {
3189 struct drm_display_mode
*preferred_mode
= NULL
;
3191 list_for_each_entry(preferred_mode
,
3192 &connector
->probed_modes
,
3194 if (preferred_mode
->type
& DRM_MODE_TYPE_PREFERRED
)
3195 amdgpu_encoder
->native_mode
= *preferred_mode
;
3203 static struct drm_display_mode
*amdgpu_dm_create_common_mode(
3204 struct drm_encoder
*encoder
, char *name
,
3205 int hdisplay
, int vdisplay
)
3207 struct drm_device
*dev
= encoder
->dev
;
3208 struct amdgpu_encoder
*amdgpu_encoder
= to_amdgpu_encoder(encoder
);
3209 struct drm_display_mode
*mode
= NULL
;
3210 struct drm_display_mode
*native_mode
= &amdgpu_encoder
->native_mode
;
3212 mode
= drm_mode_duplicate(dev
, native_mode
);
3217 mode
->hdisplay
= hdisplay
;
3218 mode
->vdisplay
= vdisplay
;
3219 mode
->type
&= ~DRM_MODE_TYPE_PREFERRED
;
3220 strncpy(mode
->name
, name
, DRM_DISPLAY_MODE_LEN
);
3226 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder
*encoder
,
3227 struct drm_connector
*connector
)
3229 struct amdgpu_encoder
*amdgpu_encoder
= to_amdgpu_encoder(encoder
);
3230 struct drm_display_mode
*mode
= NULL
;
3231 struct drm_display_mode
*native_mode
= &amdgpu_encoder
->native_mode
;
3232 struct amdgpu_dm_connector
*amdgpu_dm_connector
=
3233 to_amdgpu_dm_connector(connector
);
3237 char name
[DRM_DISPLAY_MODE_LEN
];
3240 } common_modes
[] = {
3241 { "640x480", 640, 480},
3242 { "800x600", 800, 600},
3243 { "1024x768", 1024, 768},
3244 { "1280x720", 1280, 720},
3245 { "1280x800", 1280, 800},
3246 {"1280x1024", 1280, 1024},
3247 { "1440x900", 1440, 900},
3248 {"1680x1050", 1680, 1050},
3249 {"1600x1200", 1600, 1200},
3250 {"1920x1080", 1920, 1080},
3251 {"1920x1200", 1920, 1200}
3254 n
= ARRAY_SIZE(common_modes
);
3256 for (i
= 0; i
< n
; i
++) {
3257 struct drm_display_mode
*curmode
= NULL
;
3258 bool mode_existed
= false;
3260 if (common_modes
[i
].w
> native_mode
->hdisplay
||
3261 common_modes
[i
].h
> native_mode
->vdisplay
||
3262 (common_modes
[i
].w
== native_mode
->hdisplay
&&
3263 common_modes
[i
].h
== native_mode
->vdisplay
))
3266 list_for_each_entry(curmode
, &connector
->probed_modes
, head
) {
3267 if (common_modes
[i
].w
== curmode
->hdisplay
&&
3268 common_modes
[i
].h
== curmode
->vdisplay
) {
3269 mode_existed
= true;
3277 mode
= amdgpu_dm_create_common_mode(encoder
,
3278 common_modes
[i
].name
, common_modes
[i
].w
,
3280 drm_mode_probed_add(connector
, mode
);
3281 amdgpu_dm_connector
->num_modes
++;
3285 static void amdgpu_dm_connector_ddc_get_modes(
3286 struct drm_connector
*connector
,
3289 struct amdgpu_dm_connector
*amdgpu_dm_connector
=
3290 to_amdgpu_dm_connector(connector
);
3293 /* empty probed_modes */
3294 INIT_LIST_HEAD(&connector
->probed_modes
);
3295 amdgpu_dm_connector
->num_modes
=
3296 drm_add_edid_modes(connector
, edid
);
3298 drm_edid_to_eld(connector
, edid
);
3300 amdgpu_dm_get_native_mode(connector
);
3302 amdgpu_dm_connector
->num_modes
= 0;
3305 int amdgpu_dm_connector_get_modes(struct drm_connector
*connector
)
3307 const struct drm_connector_helper_funcs
*helper
=
3308 connector
->helper_private
;
3309 struct amdgpu_dm_connector
*amdgpu_dm_connector
=
3310 to_amdgpu_dm_connector(connector
);
3311 struct drm_encoder
*encoder
;
3312 struct edid
*edid
= amdgpu_dm_connector
->edid
;
3314 encoder
= helper
->best_encoder(connector
);
3316 amdgpu_dm_connector_ddc_get_modes(connector
, edid
);
3317 amdgpu_dm_connector_add_common_modes(encoder
, connector
);
3318 return amdgpu_dm_connector
->num_modes
;
3321 void amdgpu_dm_connector_init_helper(
3322 struct amdgpu_display_manager
*dm
,
3323 struct amdgpu_dm_connector
*aconnector
,
3325 struct dc_link
*link
,
3328 struct amdgpu_device
*adev
= dm
->ddev
->dev_private
;
3330 aconnector
->connector_id
= link_index
;
3331 aconnector
->dc_link
= link
;
3332 aconnector
->base
.interlace_allowed
= false;
3333 aconnector
->base
.doublescan_allowed
= false;
3334 aconnector
->base
.stereo_allowed
= false;
3335 aconnector
->base
.dpms
= DRM_MODE_DPMS_OFF
;
3336 aconnector
->hpd
.hpd
= AMDGPU_HPD_NONE
; /* not used */
3338 mutex_init(&aconnector
->hpd_lock
);
3340 /* configure support HPD hot plug connector_>polled default value is 0
3341 * which means HPD hot plug not supported
3343 switch (connector_type
) {
3344 case DRM_MODE_CONNECTOR_HDMIA
:
3345 aconnector
->base
.polled
= DRM_CONNECTOR_POLL_HPD
;
3347 case DRM_MODE_CONNECTOR_DisplayPort
:
3348 aconnector
->base
.polled
= DRM_CONNECTOR_POLL_HPD
;
3350 case DRM_MODE_CONNECTOR_DVID
:
3351 aconnector
->base
.polled
= DRM_CONNECTOR_POLL_HPD
;
3357 drm_object_attach_property(&aconnector
->base
.base
,
3358 dm
->ddev
->mode_config
.scaling_mode_property
,
3359 DRM_MODE_SCALE_NONE
);
3361 drm_object_attach_property(&aconnector
->base
.base
,
3362 adev
->mode_info
.underscan_property
,
3364 drm_object_attach_property(&aconnector
->base
.base
,
3365 adev
->mode_info
.underscan_hborder_property
,
3367 drm_object_attach_property(&aconnector
->base
.base
,
3368 adev
->mode_info
.underscan_vborder_property
,
3373 int amdgpu_dm_i2c_xfer(struct i2c_adapter
*i2c_adap
,
3374 struct i2c_msg
*msgs
, int num
)
3376 struct amdgpu_i2c_adapter
*i2c
= i2c_get_adapdata(i2c_adap
);
3377 struct ddc_service
*ddc_service
= i2c
->ddc_service
;
3378 struct i2c_command cmd
;
3382 cmd
.payloads
= kcalloc(num
, sizeof(struct i2c_payload
), GFP_KERNEL
);
3387 cmd
.number_of_payloads
= num
;
3388 cmd
.engine
= I2C_COMMAND_ENGINE_DEFAULT
;
3391 for (i
= 0; i
< num
; i
++) {
3392 cmd
.payloads
[i
].write
= !(msgs
[i
].flags
& I2C_M_RD
);
3393 cmd
.payloads
[i
].address
= msgs
[i
].addr
;
3394 cmd
.payloads
[i
].length
= msgs
[i
].len
;
3395 cmd
.payloads
[i
].data
= msgs
[i
].buf
;
3398 if (dal_i2caux_submit_i2c_command(
3399 ddc_service
->ctx
->i2caux
,
3400 ddc_service
->ddc_pin
,
3404 kfree(cmd
.payloads
);
3408 u32
amdgpu_dm_i2c_func(struct i2c_adapter
*adap
)
3410 return I2C_FUNC_I2C
| I2C_FUNC_SMBUS_EMUL
;
3413 static const struct i2c_algorithm amdgpu_dm_i2c_algo
= {
3414 .master_xfer
= amdgpu_dm_i2c_xfer
,
3415 .functionality
= amdgpu_dm_i2c_func
,
3418 static struct amdgpu_i2c_adapter
*create_i2c(
3419 struct ddc_service
*ddc_service
,
3423 struct amdgpu_device
*adev
= ddc_service
->ctx
->driver_context
;
3424 struct amdgpu_i2c_adapter
*i2c
;
3426 i2c
= kzalloc(sizeof(struct amdgpu_i2c_adapter
), GFP_KERNEL
);
3427 i2c
->base
.owner
= THIS_MODULE
;
3428 i2c
->base
.class = I2C_CLASS_DDC
;
3429 i2c
->base
.dev
.parent
= &adev
->pdev
->dev
;
3430 i2c
->base
.algo
= &amdgpu_dm_i2c_algo
;
3431 snprintf(i2c
->base
.name
, sizeof(i2c
->base
.name
), "AMDGPU DM i2c hw bus %d", link_index
);
3432 i2c_set_adapdata(&i2c
->base
, i2c
);
3433 i2c
->ddc_service
= ddc_service
;
3438 /* Note: this function assumes that dc_link_detect() was called for the
3439 * dc_link which will be represented by this aconnector.
3441 int amdgpu_dm_connector_init(
3442 struct amdgpu_display_manager
*dm
,
3443 struct amdgpu_dm_connector
*aconnector
,
3444 uint32_t link_index
,
3445 struct amdgpu_encoder
*aencoder
)
3449 struct dc
*dc
= dm
->dc
;
3450 struct dc_link
*link
= dc_get_link_at_index(dc
, link_index
);
3451 struct amdgpu_i2c_adapter
*i2c
;
3452 ((struct dc_link
*)link
)->priv
= aconnector
;
3454 DRM_DEBUG_KMS("%s()\n", __func__
);
3456 i2c
= create_i2c(link
->ddc
, link
->link_index
, &res
);
3457 aconnector
->i2c
= i2c
;
3458 res
= i2c_add_adapter(&i2c
->base
);
3461 DRM_ERROR("Failed to register hw i2c %d\n", link
->link_index
);
3465 connector_type
= to_drm_connector_type(link
->connector_signal
);
3467 res
= drm_connector_init(
3470 &amdgpu_dm_connector_funcs
,
3474 DRM_ERROR("connector_init failed\n");
3475 aconnector
->connector_id
= -1;
3479 drm_connector_helper_add(
3481 &amdgpu_dm_connector_helper_funcs
);
3483 amdgpu_dm_connector_init_helper(
3490 drm_mode_connector_attach_encoder(
3491 &aconnector
->base
, &aencoder
->base
);
3493 drm_connector_register(&aconnector
->base
);
3495 if (connector_type
== DRM_MODE_CONNECTOR_DisplayPort
3496 || connector_type
== DRM_MODE_CONNECTOR_eDP
)
3497 amdgpu_dm_initialize_dp_connector(dm
, aconnector
);
3499 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3500 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3502 /* NOTE: this currently will create backlight device even if a panel
3503 * is not connected to the eDP/LVDS connector.
3505 * This is less than ideal but we don't have sink information at this
3506 * stage since detection happens after. We can't do detection earlier
3507 * since MST detection needs connectors to be created first.
3509 if (link
->connector_signal
& (SIGNAL_TYPE_EDP
| SIGNAL_TYPE_LVDS
)) {
3510 /* Event if registration failed, we should continue with
3511 * DM initialization because not having a backlight control
3512 * is better then a black screen.
3514 amdgpu_dm_register_backlight_device(dm
);
3516 if (dm
->backlight_dev
)
3517 dm
->backlight_link
= link
;
3524 aconnector
->i2c
= NULL
;
3529 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device
*adev
)
3531 switch (adev
->mode_info
.num_crtc
) {
3548 int amdgpu_dm_encoder_init(
3549 struct drm_device
*dev
,
3550 struct amdgpu_encoder
*aencoder
,
3551 uint32_t link_index
)
3553 struct amdgpu_device
*adev
= dev
->dev_private
;
3555 int res
= drm_encoder_init(dev
,
3557 &amdgpu_dm_encoder_funcs
,
3558 DRM_MODE_ENCODER_TMDS
,
3561 aencoder
->base
.possible_crtcs
= amdgpu_dm_get_encoder_crtc_mask(adev
);
3564 aencoder
->encoder_id
= link_index
;
3566 aencoder
->encoder_id
= -1;
3568 drm_encoder_helper_add(&aencoder
->base
, &amdgpu_dm_encoder_helper_funcs
);
3573 static void manage_dm_interrupts(
3574 struct amdgpu_device
*adev
,
3575 struct amdgpu_crtc
*acrtc
,
3579 * this is not correct translation but will work as soon as VBLANK
3580 * constant is the same as PFLIP
3583 amdgpu_crtc_idx_to_irq_type(
3588 drm_crtc_vblank_on(&acrtc
->base
);
3591 &adev
->pageflip_irq
,
3597 &adev
->pageflip_irq
,
3599 drm_crtc_vblank_off(&acrtc
->base
);
3603 static bool is_scaling_state_different(
3604 const struct dm_connector_state
*dm_state
,
3605 const struct dm_connector_state
*old_dm_state
)
3607 if (dm_state
->scaling
!= old_dm_state
->scaling
)
3609 if (!dm_state
->underscan_enable
&& old_dm_state
->underscan_enable
) {
3610 if (old_dm_state
->underscan_hborder
!= 0 && old_dm_state
->underscan_vborder
!= 0)
3612 } else if (dm_state
->underscan_enable
&& !old_dm_state
->underscan_enable
) {
3613 if (dm_state
->underscan_hborder
!= 0 && dm_state
->underscan_vborder
!= 0)
3615 } else if (dm_state
->underscan_hborder
!= old_dm_state
->underscan_hborder
||
3616 dm_state
->underscan_vborder
!= old_dm_state
->underscan_vborder
)
3621 static void remove_stream(
3622 struct amdgpu_device
*adev
,
3623 struct amdgpu_crtc
*acrtc
,
3624 struct dc_stream_state
*stream
)
3626 /* this is the update mode case */
3627 if (adev
->dm
.freesync_module
)
3628 mod_freesync_remove_stream(adev
->dm
.freesync_module
, stream
);
3630 acrtc
->otg_inst
= -1;
3631 acrtc
->enabled
= false;
3634 int get_cursor_position(struct drm_plane
*plane
, struct drm_crtc
*crtc
,
3635 struct dc_cursor_position
*position
)
3637 struct amdgpu_crtc
*amdgpu_crtc
= amdgpu_crtc
= to_amdgpu_crtc(crtc
);
3639 int xorigin
= 0, yorigin
= 0;
3641 if (!crtc
|| !plane
->state
->fb
) {
3642 position
->enable
= false;
3648 if ((plane
->state
->crtc_w
> amdgpu_crtc
->max_cursor_width
) ||
3649 (plane
->state
->crtc_h
> amdgpu_crtc
->max_cursor_height
)) {
3650 DRM_ERROR("%s: bad cursor width or height %d x %d\n",
3652 plane
->state
->crtc_w
,
3653 plane
->state
->crtc_h
);
3657 x
= plane
->state
->crtc_x
;
3658 y
= plane
->state
->crtc_y
;
3659 /* avivo cursor are offset into the total surface */
3660 x
+= crtc
->primary
->state
->src_x
>> 16;
3661 y
+= crtc
->primary
->state
->src_y
>> 16;
3663 xorigin
= min(-x
, amdgpu_crtc
->max_cursor_width
- 1);
3667 yorigin
= min(-y
, amdgpu_crtc
->max_cursor_height
- 1);
3670 position
->enable
= true;
3673 position
->x_hotspot
= xorigin
;
3674 position
->y_hotspot
= yorigin
;
3679 static void handle_cursor_update(
3680 struct drm_plane
*plane
,
3681 struct drm_plane_state
*old_plane_state
)
3683 struct amdgpu_framebuffer
*afb
= to_amdgpu_framebuffer(plane
->state
->fb
);
3684 struct drm_crtc
*crtc
= afb
? plane
->state
->crtc
: old_plane_state
->crtc
;
3685 struct dm_crtc_state
*crtc_state
= crtc
? to_dm_crtc_state(crtc
->state
) : NULL
;
3686 struct amdgpu_crtc
*amdgpu_crtc
= to_amdgpu_crtc(crtc
);
3687 uint64_t address
= afb
? afb
->address
: 0;
3688 struct dc_cursor_position position
;
3689 struct dc_cursor_attributes attributes
;
3692 if (!plane
->state
->fb
&& !old_plane_state
->fb
)
3695 DRM_DEBUG_KMS("%s: crtc_id=%d with size %d to %d\n",
3697 amdgpu_crtc
->crtc_id
,
3698 plane
->state
->crtc_w
,
3699 plane
->state
->crtc_h
);
3701 ret
= get_cursor_position(plane
, crtc
, &position
);
3705 if (!position
.enable
) {
3706 /* turn off cursor */
3707 if (crtc_state
&& crtc_state
->stream
)
3708 dc_stream_set_cursor_position(crtc_state
->stream
,
3713 amdgpu_crtc
->cursor_width
= plane
->state
->crtc_w
;
3714 amdgpu_crtc
->cursor_height
= plane
->state
->crtc_h
;
3716 attributes
.address
.high_part
= upper_32_bits(address
);
3717 attributes
.address
.low_part
= lower_32_bits(address
);
3718 attributes
.width
= plane
->state
->crtc_w
;
3719 attributes
.height
= plane
->state
->crtc_h
;
3720 attributes
.color_format
= CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA
;
3721 attributes
.rotation_angle
= 0;
3722 attributes
.attribute_flags
.value
= 0;
3724 attributes
.pitch
= attributes
.width
;
3726 if (!dc_stream_set_cursor_attributes(crtc_state
->stream
,
3728 DRM_ERROR("DC failed to set cursor attributes\n");
3730 if (crtc_state
->stream
)
3731 if (!dc_stream_set_cursor_position(crtc_state
->stream
,
3733 DRM_ERROR("DC failed to set cursor position\n");
3736 static void prepare_flip_isr(struct amdgpu_crtc
*acrtc
)
3739 assert_spin_locked(&acrtc
->base
.dev
->event_lock
);
3740 WARN_ON(acrtc
->event
);
3742 acrtc
->event
= acrtc
->base
.state
->event
;
3744 /* Set the flip status */
3745 acrtc
->pflip_status
= AMDGPU_FLIP_SUBMITTED
;
3747 /* Mark this event as consumed */
3748 acrtc
->base
.state
->event
= NULL
;
3750 DRM_DEBUG_DRIVER("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
3757 * Waits on all BO's fences and for proper vblank count
3759 static void amdgpu_dm_do_flip(
3760 struct drm_crtc
*crtc
,
3761 struct drm_framebuffer
*fb
,
3764 unsigned long flags
;
3765 uint32_t target_vblank
;
3767 struct amdgpu_crtc
*acrtc
= to_amdgpu_crtc(crtc
);
3768 struct amdgpu_framebuffer
*afb
= to_amdgpu_framebuffer(fb
);
3769 struct amdgpu_bo
*abo
= gem_to_amdgpu_bo(afb
->obj
);
3770 struct amdgpu_device
*adev
= crtc
->dev
->dev_private
;
3771 bool async_flip
= (crtc
->state
->pageflip_flags
& DRM_MODE_PAGE_FLIP_ASYNC
) != 0;
3772 struct dc_flip_addrs addr
= { {0} };
3773 /* TODO eliminate or rename surface_update */
3774 struct dc_surface_update surface_updates
[1] = { {0} };
3775 struct dm_crtc_state
*acrtc_state
= to_dm_crtc_state(crtc
->state
);
3778 /* Prepare wait for target vblank early - before the fence-waits */
3779 target_vblank
= target
- drm_crtc_vblank_count(crtc
) +
3780 amdgpu_get_vblank_counter_kms(crtc
->dev
, acrtc
->crtc_id
);
3782 /* TODO This might fail and hence better not used, wait
3783 * explicitly on fences instead
3784 * and in general should be called for
3785 * blocking commit to as per framework helpers
3787 r
= amdgpu_bo_reserve(abo
, true);
3788 if (unlikely(r
!= 0)) {
3789 DRM_ERROR("failed to reserve buffer before flip\n");
3793 /* Wait for all fences on this FB */
3794 WARN_ON(reservation_object_wait_timeout_rcu(abo
->tbo
.resv
, true, false,
3795 MAX_SCHEDULE_TIMEOUT
) < 0);
3797 amdgpu_bo_unreserve(abo
);
3799 /* Wait until we're out of the vertical blank period before the one
3800 * targeted by the flip
3802 while ((acrtc
->enabled
&&
3803 (amdgpu_get_crtc_scanoutpos(adev
->ddev
, acrtc
->crtc_id
, 0,
3804 &vpos
, &hpos
, NULL
, NULL
,
3806 & (DRM_SCANOUTPOS_VALID
| DRM_SCANOUTPOS_IN_VBLANK
)) ==
3807 (DRM_SCANOUTPOS_VALID
| DRM_SCANOUTPOS_IN_VBLANK
) &&
3808 (int)(target_vblank
-
3809 amdgpu_get_vblank_counter_kms(adev
->ddev
, acrtc
->crtc_id
)) > 0)) {
3810 usleep_range(1000, 1100);
3814 spin_lock_irqsave(&crtc
->dev
->event_lock
, flags
);
3815 /* update crtc fb */
3816 crtc
->primary
->fb
= fb
;
3818 WARN_ON(acrtc
->pflip_status
!= AMDGPU_FLIP_NONE
);
3819 WARN_ON(!acrtc_state
->stream
);
3821 addr
.address
.grph
.addr
.low_part
= lower_32_bits(afb
->address
);
3822 addr
.address
.grph
.addr
.high_part
= upper_32_bits(afb
->address
);
3823 addr
.flip_immediate
= async_flip
;
3826 if (acrtc
->base
.state
->event
)
3827 prepare_flip_isr(acrtc
);
3829 surface_updates
->surface
= dc_stream_get_status(acrtc_state
->stream
)->plane_states
[0];
3830 surface_updates
->flip_addr
= &addr
;
3833 dc_update_planes_and_stream(adev
->dm
.dc
, surface_updates
, 1, acrtc_state
->stream
, NULL
);
3835 DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x \n",
3837 addr
.address
.grph
.addr
.high_part
,
3838 addr
.address
.grph
.addr
.low_part
);
3841 spin_unlock_irqrestore(&crtc
->dev
->event_lock
, flags
);
3844 static void amdgpu_dm_commit_planes(struct drm_atomic_state
*state
,
3845 struct drm_device
*dev
,
3846 struct amdgpu_display_manager
*dm
,
3847 struct drm_crtc
*pcrtc
,
3848 bool *wait_for_vblank
)
3851 struct drm_plane
*plane
;
3852 struct drm_plane_state
*old_plane_state
;
3853 struct dc_stream_state
*dc_stream_attach
;
3854 struct dc_plane_state
*plane_states_constructed
[MAX_SURFACES
];
3855 struct amdgpu_crtc
*acrtc_attach
= to_amdgpu_crtc(pcrtc
);
3856 struct dm_crtc_state
*acrtc_state
= to_dm_crtc_state(pcrtc
->state
);
3857 int planes_count
= 0;
3858 unsigned long flags
;
3860 /* update planes when needed */
3861 for_each_plane_in_state(state
, plane
, old_plane_state
, i
) {
3862 struct drm_plane_state
*plane_state
= plane
->state
;
3863 struct drm_crtc
*crtc
= plane_state
->crtc
;
3864 struct drm_framebuffer
*fb
= plane_state
->fb
;
3866 struct dm_plane_state
*dm_plane_state
= to_dm_plane_state(plane_state
);
3868 if (plane
->type
== DRM_PLANE_TYPE_CURSOR
) {
3869 handle_cursor_update(plane
, old_plane_state
);
3873 if (!fb
|| !crtc
|| pcrtc
!= crtc
|| !crtc
->state
->active
)
3876 pflip_needed
= !state
->allow_modeset
;
3878 spin_lock_irqsave(&crtc
->dev
->event_lock
, flags
);
3879 if (acrtc_attach
->pflip_status
!= AMDGPU_FLIP_NONE
) {
3880 DRM_ERROR("%s: acrtc %d, already busy\n",
3882 acrtc_attach
->crtc_id
);
3883 spin_unlock_irqrestore(&crtc
->dev
->event_lock
, flags
);
3884 /* In commit tail framework this cannot happen */
3887 spin_unlock_irqrestore(&crtc
->dev
->event_lock
, flags
);
3889 if (!pflip_needed
) {
3890 WARN_ON(!dm_plane_state
->dc_state
);
3892 plane_states_constructed
[planes_count
] = dm_plane_state
->dc_state
;
3894 dc_stream_attach
= acrtc_state
->stream
;
3897 } else if (crtc
->state
->planes_changed
) {
3898 /* Assume even ONE crtc with immediate flip means
3899 * entire can't wait for VBLANK
3900 * TODO Check if it's correct
3903 pcrtc
->state
->pageflip_flags
& DRM_MODE_PAGE_FLIP_ASYNC
?
3906 /* TODO: Needs rework for multiplane flip */
3907 if (plane
->type
== DRM_PLANE_TYPE_PRIMARY
)
3908 drm_crtc_vblank_get(crtc
);
3913 drm_crtc_vblank_count(crtc
) + *wait_for_vblank
);
3919 unsigned long flags
;
3921 if (pcrtc
->state
->event
) {
3923 drm_crtc_vblank_get(pcrtc
);
3925 spin_lock_irqsave(&pcrtc
->dev
->event_lock
, flags
);
3926 prepare_flip_isr(acrtc_attach
);
3927 spin_unlock_irqrestore(&pcrtc
->dev
->event_lock
, flags
);
3930 if (false == dc_commit_planes_to_stream(dm
->dc
,
3931 plane_states_constructed
,
3934 dm_error("%s: Failed to attach plane!\n", __func__
);
3936 /*TODO BUG Here should go disable planes on CRTC. */
3941 int amdgpu_dm_atomic_commit(
3942 struct drm_device
*dev
,
3943 struct drm_atomic_state
*state
,
3946 struct drm_crtc
*crtc
;
3947 struct drm_crtc_state
*new_state
;
3948 struct amdgpu_device
*adev
= dev
->dev_private
;
3952 * We evade vblanks and pflips on crtc that
3953 * should be changed. We do it here to flush & disable
3954 * interrupts before drm_swap_state is called in drm_atomic_helper_commit
3955 * it will update crtc->dm_crtc_state->stream pointer which is used in
3958 for_each_crtc_in_state(state
, crtc
, new_state
, i
) {
3959 struct dm_crtc_state
*old_acrtc_state
= to_dm_crtc_state(crtc
->state
);
3960 struct amdgpu_crtc
*acrtc
= to_amdgpu_crtc(crtc
);
3962 if (drm_atomic_crtc_needs_modeset(new_state
) && old_acrtc_state
->stream
)
3963 manage_dm_interrupts(adev
, acrtc
, false);
3966 return drm_atomic_helper_commit(dev
, state
, nonblock
);
3968 /*TODO Handle EINTR, reenable IRQ*/
3971 void amdgpu_dm_atomic_commit_tail(
3972 struct drm_atomic_state
*state
)
3974 struct drm_device
*dev
= state
->dev
;
3975 struct amdgpu_device
*adev
= dev
->dev_private
;
3976 struct amdgpu_display_manager
*dm
= &adev
->dm
;
3977 struct dm_atomic_state
*dm_state
;
3979 uint32_t new_crtcs_count
= 0;
3980 struct drm_crtc
*crtc
, *pcrtc
;
3981 struct drm_crtc_state
*old_crtc_state
;
3982 struct amdgpu_crtc
*new_crtcs
[MAX_STREAMS
];
3983 struct dc_stream_state
*new_stream
= NULL
;
3984 unsigned long flags
;
3985 bool wait_for_vblank
= true;
3986 struct drm_connector
*connector
;
3987 struct drm_connector_state
*old_conn_state
;
3988 struct dm_crtc_state
*old_acrtc_state
, *new_acrtc_state
;
3990 drm_atomic_helper_update_legacy_modeset_state(dev
, state
);
3992 dm_state
= to_dm_atomic_state(state
);
3994 /* update changed items */
3995 for_each_crtc_in_state(state
, crtc
, old_crtc_state
, i
) {
3996 struct amdgpu_crtc
*acrtc
= to_amdgpu_crtc(crtc
);
3997 struct drm_crtc_state
*new_state
= crtc
->state
;
3999 new_acrtc_state
= to_dm_crtc_state(new_state
);
4000 old_acrtc_state
= to_dm_crtc_state(old_crtc_state
);
4003 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
4004 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
4005 "connectors_changed:%d\n",
4009 new_state
->planes_changed
,
4010 new_state
->mode_changed
,
4011 new_state
->active_changed
,
4012 new_state
->connectors_changed
);
4014 /* handles headless hotplug case, updating new_state and
4015 * aconnector as needed
4018 if (modeset_required(new_state
, new_acrtc_state
->stream
, old_acrtc_state
->stream
)) {
4020 DRM_INFO("Atomic commit: SET crtc id %d: [%p]\n", acrtc
->crtc_id
, acrtc
);
4022 if (!new_acrtc_state
->stream
) {
4024 * this could happen because of issues with
4025 * userspace notifications delivery.
4026 * In this case userspace tries to set mode on
4027 * display which is disconnect in fact.
4028 * dc_sink in NULL in this case on aconnector.
4029 * We expect reset mode will come soon.
4031 * This can also happen when unplug is done
4032 * during resume sequence ended
4034 * In this case, we want to pretend we still
4035 * have a sink to keep the pipe running so that
4036 * hw state is consistent with the sw state
4038 DRM_DEBUG_KMS("%s: Failed to create new stream for crtc %d\n",
4039 __func__
, acrtc
->base
.base
.id
);
4044 if (old_acrtc_state
->stream
)
4045 remove_stream(adev
, acrtc
, old_acrtc_state
->stream
);
4049 * this loop saves set mode crtcs
4050 * we needed to enable vblanks once all
4051 * resources acquired in dc after dc_commit_streams
4054 /*TODO move all this into dm_crtc_state, get rid of
4055 * new_crtcs array and use old and new atomic states
4058 new_crtcs
[new_crtcs_count
] = acrtc
;
4061 acrtc
->enabled
= true;
4062 acrtc
->hw_mode
= crtc
->state
->mode
;
4063 crtc
->hwmode
= crtc
->state
->mode
;
4064 } else if (modereset_required(new_state
)) {
4065 DRM_INFO("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc
->crtc_id
, acrtc
);
4067 /* i.e. reset mode */
4068 if (old_acrtc_state
->stream
)
4069 remove_stream(adev
, acrtc
, old_acrtc_state
->stream
);
4071 } /* for_each_crtc_in_state() */
4074 * Add streams after required streams from new and replaced streams
4075 * are removed from freesync module
4077 if (adev
->dm
.freesync_module
) {
4078 for (i
= 0; i
< new_crtcs_count
; i
++) {
4079 struct amdgpu_dm_connector
*aconnector
= NULL
;
4081 new_acrtc_state
= to_dm_crtc_state(new_crtcs
[i
]->base
.state
);
4083 new_stream
= new_acrtc_state
->stream
;
4085 amdgpu_dm_find_first_crct_matching_connector(
4087 &new_crtcs
[i
]->base
,
4090 DRM_INFO("Atomic commit: Failed to find connector for acrtc id:%d "
4091 "skipping freesync init\n",
4092 new_crtcs
[i
]->crtc_id
);
4096 mod_freesync_add_stream(adev
->dm
.freesync_module
,
4097 new_stream
, &aconnector
->caps
);
4101 if (dm_state
->context
)
4102 WARN_ON(!dc_commit_state(dm
->dc
, dm_state
->context
));
4105 list_for_each_entry(crtc
, &dev
->mode_config
.crtc_list
, head
) {
4106 struct amdgpu_crtc
*acrtc
= to_amdgpu_crtc(crtc
);
4108 new_acrtc_state
= to_dm_crtc_state(crtc
->state
);
4110 if (new_acrtc_state
->stream
!= NULL
) {
4111 const struct dc_stream_status
*status
=
4112 dc_stream_get_status(new_acrtc_state
->stream
);
4115 DC_ERR("got no status for stream %p on acrtc%p\n", new_acrtc_state
->stream
, acrtc
);
4117 acrtc
->otg_inst
= status
->primary_otg_inst
;
4121 /* Handle scaling and undersacn changes*/
4122 for_each_connector_in_state(state
, connector
, old_conn_state
, i
) {
4123 struct amdgpu_dm_connector
*aconnector
= to_amdgpu_dm_connector(connector
);
4124 struct dm_connector_state
*con_new_state
=
4125 to_dm_connector_state(aconnector
->base
.state
);
4126 struct dm_connector_state
*con_old_state
=
4127 to_dm_connector_state(old_conn_state
);
4128 struct amdgpu_crtc
*acrtc
= to_amdgpu_crtc(con_new_state
->base
.crtc
);
4129 struct dc_stream_status
*status
= NULL
;
4131 /* Skip any modesets/resets */
4132 if (!acrtc
|| drm_atomic_crtc_needs_modeset(acrtc
->base
.state
))
4135 /* Skip any thing not scale or underscan changes */
4136 if (!is_scaling_state_different(con_new_state
, con_old_state
))
4139 new_acrtc_state
= to_dm_crtc_state(acrtc
->base
.state
);
4141 update_stream_scaling_settings(&con_new_state
->base
.crtc
->mode
,
4142 con_new_state
, (struct dc_stream_state
*)new_acrtc_state
->stream
);
4144 status
= dc_stream_get_status(new_acrtc_state
->stream
);
4146 WARN_ON(!status
->plane_count
);
4148 if (!new_acrtc_state
->stream
)
4151 /*TODO How it works with MPO ?*/
4152 if (!dc_commit_planes_to_stream(
4154 status
->plane_states
,
4155 status
->plane_count
,
4156 new_acrtc_state
->stream
))
4157 dm_error("%s: Failed to update stream scaling!\n", __func__
);
4160 for (i
= 0; i
< new_crtcs_count
; i
++) {
4162 * loop to enable interrupts on newly arrived crtc
4164 struct amdgpu_crtc
*acrtc
= new_crtcs
[i
];
4166 new_acrtc_state
= to_dm_crtc_state(acrtc
->base
.state
);
4168 if (adev
->dm
.freesync_module
)
4169 mod_freesync_notify_mode_change(
4170 adev
->dm
.freesync_module
, &new_acrtc_state
->stream
, 1);
4172 manage_dm_interrupts(adev
, acrtc
, true);
4175 /* update planes when needed per crtc*/
4176 for_each_crtc_in_state(state
, pcrtc
, old_crtc_state
, j
) {
4177 new_acrtc_state
= to_dm_crtc_state(pcrtc
->state
);
4179 if (new_acrtc_state
->stream
)
4180 amdgpu_dm_commit_planes(state
, dev
, dm
, pcrtc
, &wait_for_vblank
);
4185 * send vblank event on all events not handled in flip and
4186 * mark consumed event for drm_atomic_helper_commit_hw_done
4188 spin_lock_irqsave(&adev
->ddev
->event_lock
, flags
);
4189 for_each_crtc_in_state(state
, crtc
, old_crtc_state
, i
) {
4190 struct amdgpu_crtc
*acrtc
= to_amdgpu_crtc(crtc
);
4192 if (acrtc
->base
.state
->event
)
4193 drm_send_event_locked(dev
, &crtc
->state
->event
->base
);
4195 acrtc
->base
.state
->event
= NULL
;
4197 spin_unlock_irqrestore(&adev
->ddev
->event_lock
, flags
);
4199 /* Signal HW programming completion */
4200 drm_atomic_helper_commit_hw_done(state
);
4202 if (wait_for_vblank
)
4203 drm_atomic_helper_wait_for_vblanks(dev
, state
);
4205 drm_atomic_helper_cleanup_planes(dev
, state
);
4209 static int dm_force_atomic_commit(struct drm_connector
*connector
)
4212 struct drm_device
*ddev
= connector
->dev
;
4213 struct drm_atomic_state
*state
= drm_atomic_state_alloc(ddev
);
4214 struct amdgpu_crtc
*disconnected_acrtc
= to_amdgpu_crtc(connector
->encoder
->crtc
);
4215 struct drm_plane
*plane
= disconnected_acrtc
->base
.primary
;
4216 struct drm_connector_state
*conn_state
;
4217 struct drm_crtc_state
*crtc_state
;
4218 struct drm_plane_state
*plane_state
;
4223 state
->acquire_ctx
= ddev
->mode_config
.acquire_ctx
;
4225 /* Construct an atomic state to restore previous display setting */
4228 * Attach connectors to drm_atomic_state
4230 conn_state
= drm_atomic_get_connector_state(state
, connector
);
4232 ret
= PTR_ERR_OR_ZERO(conn_state
);
4236 /* Attach crtc to drm_atomic_state*/
4237 crtc_state
= drm_atomic_get_crtc_state(state
, &disconnected_acrtc
->base
);
4239 ret
= PTR_ERR_OR_ZERO(crtc_state
);
4243 /* force a restore */
4244 crtc_state
->mode_changed
= true;
4246 /* Attach plane to drm_atomic_state */
4247 plane_state
= drm_atomic_get_plane_state(state
, plane
);
4249 ret
= PTR_ERR_OR_ZERO(plane_state
);
4254 /* Call commit internally with the state we just constructed */
4255 ret
= drm_atomic_commit(state
);
4260 DRM_ERROR("Restoring old state failed with %i\n", ret
);
4261 drm_atomic_state_put(state
);
4267 * This functions handle all cases when set mode does not come upon hotplug.
4268 * This include when the same display is unplugged then plugged back into the
4269 * same port and when we are running without usermode desktop manager supprot
4271 void dm_restore_drm_connector_state(struct drm_device
*dev
, struct drm_connector
*connector
)
4273 struct amdgpu_dm_connector
*aconnector
= to_amdgpu_dm_connector(connector
);
4274 struct amdgpu_crtc
*disconnected_acrtc
;
4275 struct dm_crtc_state
*acrtc_state
;
4277 if (!aconnector
->dc_sink
|| !connector
->state
|| !connector
->encoder
)
4280 disconnected_acrtc
= to_amdgpu_crtc(connector
->encoder
->crtc
);
4281 acrtc_state
= to_dm_crtc_state(disconnected_acrtc
->base
.state
);
4283 if (!disconnected_acrtc
|| !acrtc_state
->stream
)
4287 * If the previous sink is not released and different from the current,
4288 * we deduce we are in a state where we can not rely on usermode call
4289 * to turn on the display, so we do it here
4291 if (acrtc_state
->stream
->sink
!= aconnector
->dc_sink
)
4292 dm_force_atomic_commit(&aconnector
->base
);
4296 * Grabs all modesetting locks to serialize against any blocking commits,
4297 * Waits for completion of all non blocking commits.
4299 static int do_aquire_global_lock(
4300 struct drm_device
*dev
,
4301 struct drm_atomic_state
*state
)
4303 struct drm_crtc
*crtc
;
4304 struct drm_crtc_commit
*commit
;
4307 /* Adding all modeset locks to aquire_ctx will
4308 * ensure that when the framework release it the
4309 * extra locks we are locking here will get released to
4311 ret
= drm_modeset_lock_all_ctx(dev
, state
->acquire_ctx
);
4315 list_for_each_entry(crtc
, &dev
->mode_config
.crtc_list
, head
) {
4316 spin_lock(&crtc
->commit_lock
);
4317 commit
= list_first_entry_or_null(&crtc
->commit_list
,
4318 struct drm_crtc_commit
, commit_entry
);
4320 drm_crtc_commit_get(commit
);
4321 spin_unlock(&crtc
->commit_lock
);
4326 /* Make sure all pending HW programming completed and
4329 ret
= wait_for_completion_interruptible_timeout(&commit
->hw_done
, 10*HZ
);
4332 ret
= wait_for_completion_interruptible_timeout(
4333 &commit
->flip_done
, 10*HZ
);
4336 DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
4337 "timed out\n", crtc
->base
.id
, crtc
->name
);
4339 drm_crtc_commit_put(commit
);
4342 return ret
< 0 ? ret
: 0;
4345 static int dm_update_crtcs_state(
4347 struct drm_atomic_state
*state
,
4349 bool *lock_and_validation_needed
)
4351 struct drm_crtc
*crtc
;
4352 struct drm_crtc_state
*crtc_state
;
4354 struct dm_crtc_state
*old_acrtc_state
, *new_acrtc_state
;
4355 struct dm_atomic_state
*dm_state
= to_dm_atomic_state(state
);
4356 struct dc_stream_state
*new_stream
;
4359 /*TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set */
4360 /* update changed items */
4361 for_each_crtc_in_state(state
, crtc
, crtc_state
, i
) {
4362 struct amdgpu_crtc
*acrtc
= NULL
;
4363 struct amdgpu_dm_connector
*aconnector
= NULL
;
4364 struct drm_connector_state
*conn_state
= NULL
;
4365 struct dm_connector_state
*dm_conn_state
= NULL
;
4369 old_acrtc_state
= to_dm_crtc_state(crtc
->state
);
4370 new_acrtc_state
= to_dm_crtc_state(crtc_state
);
4371 acrtc
= to_amdgpu_crtc(crtc
);
4373 aconnector
= amdgpu_dm_find_first_crct_matching_connector(state
, crtc
, true);
4375 /* TODO This hack should go away */
4376 if (aconnector
&& aconnector
->dc_sink
) {
4377 conn_state
= drm_atomic_get_connector_state(state
,
4380 if (IS_ERR(conn_state
)) {
4381 ret
= PTR_ERR_OR_ZERO(conn_state
);
4385 dm_conn_state
= to_dm_connector_state(conn_state
);
4387 new_stream
= create_stream_for_sink(aconnector
,
4392 * we can have no stream on ACTION_SET if a display
4393 * was disconnected during S3, in this case it not and
4394 * error, the OS will be updated after detection, and
4395 * do the right thing on next atomic commit
4399 DRM_DEBUG_KMS("%s: Failed to create new stream for crtc %d\n",
4400 __func__
, acrtc
->base
.base
.id
);
4405 if (dc_is_stream_unchanged(new_stream
,
4406 old_acrtc_state
->stream
)) {
4408 crtc_state
->mode_changed
= false;
4410 DRM_DEBUG_KMS("Mode change not required, setting mode_changed to %d",
4411 crtc_state
->mode_changed
);
4415 if (!drm_atomic_crtc_needs_modeset(crtc_state
))
4419 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
4420 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
4421 "connectors_changed:%d\n",
4425 crtc_state
->planes_changed
,
4426 crtc_state
->mode_changed
,
4427 crtc_state
->active_changed
,
4428 crtc_state
->connectors_changed
);
4430 /* Remove stream for any changed/disabled CRTC */
4433 if (!old_acrtc_state
->stream
)
4436 DRM_DEBUG_KMS("Disabling DRM crtc: %d\n",
4439 /* i.e. reset mode */
4440 if (!dc_remove_stream_from_ctx(
4443 old_acrtc_state
->stream
)) {
4448 dc_stream_release(old_acrtc_state
->stream
);
4449 new_acrtc_state
->stream
= NULL
;
4451 *lock_and_validation_needed
= true;
4453 } else {/* Add stream for any updated/enabled CRTC */
4455 if (modereset_required(crtc_state
))
4458 if (modeset_required(crtc_state
, new_stream
,
4459 old_acrtc_state
->stream
)) {
4461 WARN_ON(new_acrtc_state
->stream
);
4463 new_acrtc_state
->stream
= new_stream
;
4464 dc_stream_retain(new_stream
);
4466 DRM_DEBUG_KMS("Enabling DRM crtc: %d\n",
4469 if (!dc_add_stream_to_ctx(
4472 new_acrtc_state
->stream
)) {
4477 *lock_and_validation_needed
= true;
4482 /* Release extra reference */
4484 dc_stream_release(new_stream
);
4491 dc_stream_release(new_stream
);
4495 static int dm_update_planes_state(
4497 struct drm_atomic_state
*state
,
4499 bool *lock_and_validation_needed
)
4501 struct drm_crtc
*new_plane_crtc
, *old_plane_crtc
;
4502 struct drm_crtc_state
*new_crtc_state
;
4503 struct drm_plane
*plane
;
4504 struct drm_plane_state
*old_plane_state
, *new_plane_state
;
4505 struct dm_crtc_state
*new_acrtc_state
, *old_acrtc_state
;
4506 struct dm_atomic_state
*dm_state
= to_dm_atomic_state(state
);
4507 struct dm_plane_state
*new_dm_plane_state
, *old_dm_plane_state
;
4509 /* TODO return page_flip_needed() function */
4510 bool pflip_needed
= !state
->allow_modeset
;
4516 /* Add new planes */
4517 for_each_oldnew_plane_in_state(state
, plane
, old_plane_state
, new_plane_state
, i
) {
4518 new_plane_crtc
= new_plane_state
->crtc
;
4519 old_plane_crtc
= old_plane_state
->crtc
;
4520 new_dm_plane_state
= to_dm_plane_state(new_plane_state
);
4521 old_dm_plane_state
= to_dm_plane_state(old_plane_state
);
4523 /*TODO Implement atomic check for cursor plane */
4524 if (plane
->type
== DRM_PLANE_TYPE_CURSOR
)
4527 /* Remove any changed/removed planes */
4530 if (!old_plane_crtc
)
4533 old_acrtc_state
= to_dm_crtc_state(
4534 drm_atomic_get_old_crtc_state(
4538 if (!old_acrtc_state
->stream
)
4541 DRM_DEBUG_KMS("Disabling DRM plane: %d on DRM crtc %d\n",
4542 plane
->base
.id
, old_plane_crtc
->base
.id
);
4544 if (!dc_remove_plane_from_context(
4546 old_acrtc_state
->stream
,
4547 old_dm_plane_state
->dc_state
,
4548 dm_state
->context
)) {
4555 dc_plane_state_release(old_dm_plane_state
->dc_state
);
4556 new_dm_plane_state
->dc_state
= NULL
;
4558 *lock_and_validation_needed
= true;
4560 } else { /* Add new planes */
4562 if (drm_atomic_plane_disabling(plane
->state
, new_plane_state
))
4565 if (!new_plane_crtc
)
4568 new_crtc_state
= drm_atomic_get_new_crtc_state(state
, new_plane_crtc
);
4569 new_acrtc_state
= to_dm_crtc_state(new_crtc_state
);
4571 if (!new_acrtc_state
->stream
)
4575 WARN_ON(new_dm_plane_state
->dc_state
);
4577 new_dm_plane_state
->dc_state
= dc_create_plane_state(dc
);
4579 DRM_DEBUG_KMS("Enabling DRM plane: %d on DRM crtc %d\n",
4580 plane
->base
.id
, new_plane_crtc
->base
.id
);
4582 if (!new_dm_plane_state
->dc_state
) {
4587 ret
= fill_plane_attributes(
4588 new_plane_crtc
->dev
->dev_private
,
4589 new_dm_plane_state
->dc_state
,
4597 if (!dc_add_plane_to_context(
4599 new_acrtc_state
->stream
,
4600 new_dm_plane_state
->dc_state
,
4601 dm_state
->context
)) {
4607 *lock_and_validation_needed
= true;
4615 int amdgpu_dm_atomic_check(struct drm_device
*dev
,
4616 struct drm_atomic_state
*state
)
4620 struct amdgpu_device
*adev
= dev
->dev_private
;
4621 struct dc
*dc
= adev
->dm
.dc
;
4622 struct dm_atomic_state
*dm_state
= to_dm_atomic_state(state
);
4623 struct drm_connector
*connector
;
4624 struct drm_connector_state
*conn_state
;
4625 struct drm_crtc
*crtc
;
4626 struct drm_crtc_state
*crtc_state
;
4629 * This bool will be set for true for any modeset/reset
4630 * or plane update which implies non fast surface update.
4632 bool lock_and_validation_needed
= false;
4634 ret
= drm_atomic_helper_check_modeset(dev
, state
);
4637 DRM_ERROR("Atomic state validation failed with error :%d !\n", ret
);
4642 * Hack: Commit needs planes right now, specifically for gamma
4643 * TODO rework commit to check CRTC for gamma change
4645 for_each_crtc_in_state(state
, crtc
, crtc_state
, i
) {
4646 if (crtc_state
->color_mgmt_changed
) {
4647 ret
= drm_atomic_add_affected_planes(state
, crtc
);
4653 dm_state
->context
= dc_create_state();
4654 ASSERT(dm_state
->context
);
4655 dc_resource_state_copy_construct_current(dc
, dm_state
->context
);
4657 /* Remove exiting planes if they are modified */
4658 ret
= dm_update_planes_state(dc
, state
, false, &lock_and_validation_needed
);
4663 /* Disable all crtcs which require disable */
4664 ret
= dm_update_crtcs_state(dc
, state
, false, &lock_and_validation_needed
);
4669 /* Enable all crtcs which require enable */
4670 ret
= dm_update_crtcs_state(dc
, state
, true, &lock_and_validation_needed
);
4675 /* Add new/modified planes */
4676 ret
= dm_update_planes_state(dc
, state
, true, &lock_and_validation_needed
);
4681 /* Run this here since we want to validate the streams we created */
4682 ret
= drm_atomic_helper_check_planes(dev
, state
);
4686 /* Check scaling and undersacn changes*/
4687 /*TODO Removed scaling changes validation due to inability to commit
4688 * new stream into context w\o causing full reset. Need to
4689 * decide how to handle.
4691 for_each_connector_in_state(state
, connector
, conn_state
, i
) {
4692 struct amdgpu_dm_connector
*aconnector
= to_amdgpu_dm_connector(connector
);
4693 struct dm_connector_state
*con_old_state
=
4694 to_dm_connector_state(aconnector
->base
.state
);
4695 struct dm_connector_state
*con_new_state
=
4696 to_dm_connector_state(conn_state
);
4697 struct amdgpu_crtc
*acrtc
= to_amdgpu_crtc(con_new_state
->base
.crtc
);
4699 /* Skip any modesets/resets */
4700 if (!acrtc
|| drm_atomic_crtc_needs_modeset(acrtc
->base
.state
))
4703 /* Skip any thing not scale or underscan changes */
4704 if (!is_scaling_state_different(con_new_state
, con_old_state
))
4707 lock_and_validation_needed
= true;
4711 * For full updates case when
4712 * removing/adding/updating streams on once CRTC while flipping
4714 * acquiring global lock will guarantee that any such full
4716 * will wait for completion of any outstanding flip using DRMs
4717 * synchronization events.
4720 if (lock_and_validation_needed
) {
4722 ret
= do_aquire_global_lock(dev
, state
);
4726 if (!dc_validate_global_state(dc
, dm_state
->context
)) {
4732 /* Must be success */
4737 if (ret
== -EDEADLK
)
4738 DRM_DEBUG_KMS("Atomic check stopped due to to deadlock.\n");
4739 else if (ret
== -EINTR
|| ret
== -EAGAIN
|| ret
== -ERESTARTSYS
)
4740 DRM_DEBUG_KMS("Atomic check stopped due to to signal.\n");
4742 DRM_ERROR("Atomic check failed with err: %d \n", ret
);
4747 static bool is_dp_capable_without_timing_msa(
4749 struct amdgpu_dm_connector
*amdgpu_dm_connector
)
4752 bool capable
= false;
4754 if (amdgpu_dm_connector
->dc_link
&&
4755 dm_helpers_dp_read_dpcd(
4757 amdgpu_dm_connector
->dc_link
,
4758 DP_DOWN_STREAM_PORT_COUNT
,
4760 sizeof(dpcd_data
))) {
4761 capable
= (dpcd_data
& DP_MSA_TIMING_PAR_IGNORED
) ? true:false;
4766 void amdgpu_dm_add_sink_to_freesync_module(
4767 struct drm_connector
*connector
,
4771 uint64_t val_capable
;
4772 bool edid_check_required
;
4773 struct detailed_timing
*timing
;
4774 struct detailed_non_pixel
*data
;
4775 struct detailed_data_monitor_range
*range
;
4776 struct amdgpu_dm_connector
*amdgpu_dm_connector
=
4777 to_amdgpu_dm_connector(connector
);
4779 struct drm_device
*dev
= connector
->dev
;
4780 struct amdgpu_device
*adev
= dev
->dev_private
;
4782 edid_check_required
= false;
4783 if (!amdgpu_dm_connector
->dc_sink
) {
4784 DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
4787 if (!adev
->dm
.freesync_module
)
4790 * if edid non zero restrict freesync only for dp and edp
4793 if (amdgpu_dm_connector
->dc_sink
->sink_signal
== SIGNAL_TYPE_DISPLAY_PORT
4794 || amdgpu_dm_connector
->dc_sink
->sink_signal
== SIGNAL_TYPE_EDP
) {
4795 edid_check_required
= is_dp_capable_without_timing_msa(
4797 amdgpu_dm_connector
);
4801 if (edid_check_required
== true && (edid
->version
> 1 ||
4802 (edid
->version
== 1 && edid
->revision
> 1))) {
4803 for (i
= 0; i
< 4; i
++) {
4805 timing
= &edid
->detailed_timings
[i
];
4806 data
= &timing
->data
.other_data
;
4807 range
= &data
->data
.range
;
4809 * Check if monitor has continuous frequency mode
4811 if (data
->type
!= EDID_DETAIL_MONITOR_RANGE
)
4814 * Check for flag range limits only. If flag == 1 then
4815 * no additional timing information provided.
4816 * Default GTF, GTF Secondary curve and CVT are not
4819 if (range
->flags
!= 1)
4822 amdgpu_dm_connector
->min_vfreq
= range
->min_vfreq
;
4823 amdgpu_dm_connector
->max_vfreq
= range
->max_vfreq
;
4824 amdgpu_dm_connector
->pixel_clock_mhz
=
4825 range
->pixel_clock_mhz
* 10;
4829 if (amdgpu_dm_connector
->max_vfreq
-
4830 amdgpu_dm_connector
->min_vfreq
> 10) {
4831 amdgpu_dm_connector
->caps
.supported
= true;
4832 amdgpu_dm_connector
->caps
.min_refresh_in_micro_hz
=
4833 amdgpu_dm_connector
->min_vfreq
* 1000000;
4834 amdgpu_dm_connector
->caps
.max_refresh_in_micro_hz
=
4835 amdgpu_dm_connector
->max_vfreq
* 1000000;
4841 * TODO figure out how to notify user-mode or DRM of freesync caps
4842 * once we figure out how to deal with freesync in an upstreamable
4848 void amdgpu_dm_remove_sink_from_freesync_module(
4849 struct drm_connector
*connector
)
4852 * TODO fill in once we figure out how to deal with freesync in
4853 * an upstreamable fashion