2 * Copyright 2015 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
26 #include "dm_services_types.h"
31 #include "amdgpu_display.h"
33 #include "amdgpu_dm.h"
34 #include "amdgpu_pm.h"
36 #include "amd_shared.h"
37 #include "amdgpu_dm_irq.h"
38 #include "dm_helpers.h"
39 #include "dm_services_types.h"
40 #include "amdgpu_dm_mst_types.h"
42 #include "ivsrcid/ivsrcid_vislands30.h"
44 #include <linux/module.h>
45 #include <linux/moduleparam.h>
46 #include <linux/version.h>
47 #include <linux/types.h>
50 #include <drm/drm_atomic.h>
51 #include <drm/drm_atomic_helper.h>
52 #include <drm/drm_dp_mst_helper.h>
53 #include <drm/drm_fb_helper.h>
54 #include <drm/drm_edid.h>
56 #include "modules/inc/mod_freesync.h"
58 #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
59 #include "ivsrcid/irqsrcs_dcn_1_0.h"
61 #include "raven1/DCN/dcn_1_0_offset.h"
62 #include "raven1/DCN/dcn_1_0_sh_mask.h"
63 #include "vega10/soc15ip.h"
65 #include "soc15_common.h"
68 #include "modules/inc/mod_freesync.h"
70 #include "i2caux_interface.h"
73 static enum drm_plane_type dm_surfaces_type_default
[AMDGPU_MAX_PLANES
] = {
74 DRM_PLANE_TYPE_PRIMARY
,
75 DRM_PLANE_TYPE_PRIMARY
,
76 DRM_PLANE_TYPE_PRIMARY
,
77 DRM_PLANE_TYPE_PRIMARY
,
78 DRM_PLANE_TYPE_PRIMARY
,
79 DRM_PLANE_TYPE_PRIMARY
,
82 static enum drm_plane_type dm_surfaces_type_carizzo
[AMDGPU_MAX_PLANES
] = {
83 DRM_PLANE_TYPE_PRIMARY
,
84 DRM_PLANE_TYPE_PRIMARY
,
85 DRM_PLANE_TYPE_PRIMARY
,
86 DRM_PLANE_TYPE_OVERLAY
,/* YUV Capable Underlay */
89 static enum drm_plane_type dm_surfaces_type_stoney
[AMDGPU_MAX_PLANES
] = {
90 DRM_PLANE_TYPE_PRIMARY
,
91 DRM_PLANE_TYPE_PRIMARY
,
92 DRM_PLANE_TYPE_OVERLAY
, /* YUV Capable Underlay */
96 * dm_vblank_get_counter
99 * Get counter for number of vertical blanks
102 * struct amdgpu_device *adev - [in] desired amdgpu device
103 * int disp_idx - [in] which CRTC to get the counter from
106 * Counter for vertical blanks
108 static u32
dm_vblank_get_counter(struct amdgpu_device
*adev
, int crtc
)
110 if (crtc
>= adev
->mode_info
.num_crtc
)
113 struct amdgpu_crtc
*acrtc
= adev
->mode_info
.crtcs
[crtc
];
114 struct dm_crtc_state
*acrtc_state
= to_dm_crtc_state(
118 if (acrtc_state
->stream
== NULL
) {
119 DRM_ERROR("dc_stream is NULL for crtc '%d'!\n", crtc
);
123 return dc_stream_get_vblank_counter(acrtc_state
->stream
);
127 static int dm_crtc_get_scanoutpos(struct amdgpu_device
*adev
, int crtc
,
128 u32
*vbl
, u32
*position
)
130 uint32_t v_blank_start
, v_blank_end
, h_position
, v_position
;
132 if ((crtc
< 0) || (crtc
>= adev
->mode_info
.num_crtc
))
135 struct amdgpu_crtc
*acrtc
= adev
->mode_info
.crtcs
[crtc
];
136 struct dm_crtc_state
*acrtc_state
= to_dm_crtc_state(
139 if (acrtc_state
->stream
== NULL
) {
140 DRM_ERROR("dc_stream is NULL for crtc '%d'!\n", crtc
);
145 * TODO rework base driver to use values directly.
146 * for now parse it back into reg-format
148 dc_stream_get_scanoutpos(acrtc_state
->stream
,
154 *position
= v_position
| (h_position
<< 16);
155 *vbl
= v_blank_start
| (v_blank_end
<< 16);
161 static bool dm_is_idle(void *handle
)
167 static int dm_wait_for_idle(void *handle
)
173 static bool dm_check_soft_reset(void *handle
)
178 static int dm_soft_reset(void *handle
)
184 static struct amdgpu_crtc
*get_crtc_by_otg_inst(
185 struct amdgpu_device
*adev
,
188 struct drm_device
*dev
= adev
->ddev
;
189 struct drm_crtc
*crtc
;
190 struct amdgpu_crtc
*amdgpu_crtc
;
193 * following if is check inherited from both functions where this one is
194 * used now. Need to be checked why it could happen.
196 if (otg_inst
== -1) {
198 return adev
->mode_info
.crtcs
[0];
201 list_for_each_entry(crtc
, &dev
->mode_config
.crtc_list
, head
) {
202 amdgpu_crtc
= to_amdgpu_crtc(crtc
);
204 if (amdgpu_crtc
->otg_inst
== otg_inst
)
211 static void dm_pflip_high_irq(void *interrupt_params
)
213 struct amdgpu_crtc
*amdgpu_crtc
;
214 struct common_irq_params
*irq_params
= interrupt_params
;
215 struct amdgpu_device
*adev
= irq_params
->adev
;
218 amdgpu_crtc
= get_crtc_by_otg_inst(adev
, irq_params
->irq_src
- IRQ_TYPE_PFLIP
);
220 /* IRQ could occur when in initial stage */
221 /*TODO work and BO cleanup */
222 if (amdgpu_crtc
== NULL
) {
223 DRM_DEBUG_DRIVER("CRTC is null, returning.\n");
227 spin_lock_irqsave(&adev
->ddev
->event_lock
, flags
);
229 if (amdgpu_crtc
->pflip_status
!= AMDGPU_FLIP_SUBMITTED
){
230 DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
231 amdgpu_crtc
->pflip_status
,
232 AMDGPU_FLIP_SUBMITTED
,
233 amdgpu_crtc
->crtc_id
,
235 spin_unlock_irqrestore(&adev
->ddev
->event_lock
, flags
);
240 /* wakeup usersapce */
241 if (amdgpu_crtc
->event
) {
242 /* Update to correct count/ts if racing with vblank irq */
243 drm_crtc_accurate_vblank_count(&amdgpu_crtc
->base
);
245 drm_crtc_send_vblank_event(&amdgpu_crtc
->base
, amdgpu_crtc
->event
);
247 /* page flip completed. clean up */
248 amdgpu_crtc
->event
= NULL
;
253 amdgpu_crtc
->pflip_status
= AMDGPU_FLIP_NONE
;
254 spin_unlock_irqrestore(&adev
->ddev
->event_lock
, flags
);
256 DRM_DEBUG_DRIVER("%s - crtc :%d[%p], pflip_stat:AMDGPU_FLIP_NONE\n",
257 __func__
, amdgpu_crtc
->crtc_id
, amdgpu_crtc
);
259 drm_crtc_vblank_put(&amdgpu_crtc
->base
);
262 static void dm_crtc_high_irq(void *interrupt_params
)
264 struct common_irq_params
*irq_params
= interrupt_params
;
265 struct amdgpu_device
*adev
= irq_params
->adev
;
266 uint8_t crtc_index
= 0;
267 struct amdgpu_crtc
*acrtc
;
269 acrtc
= get_crtc_by_otg_inst(adev
, irq_params
->irq_src
- IRQ_TYPE_VBLANK
);
272 crtc_index
= acrtc
->crtc_id
;
274 drm_handle_vblank(adev
->ddev
, crtc_index
);
277 static int dm_set_clockgating_state(void *handle
,
278 enum amd_clockgating_state state
)
283 static int dm_set_powergating_state(void *handle
,
284 enum amd_powergating_state state
)
289 /* Prototypes of private functions */
290 static int dm_early_init(void* handle
);
292 static void hotplug_notify_work_func(struct work_struct
*work
)
294 struct amdgpu_display_manager
*dm
= container_of(work
, struct amdgpu_display_manager
, mst_hotplug_work
);
295 struct drm_device
*dev
= dm
->ddev
;
297 drm_kms_helper_hotplug_event(dev
);
302 * Returns 0 on success
304 int amdgpu_dm_init(struct amdgpu_device
*adev
)
306 struct dc_init_data init_data
;
307 adev
->dm
.ddev
= adev
->ddev
;
308 adev
->dm
.adev
= adev
;
310 DRM_INFO("DAL is enabled\n");
311 /* Zero all the fields */
312 memset(&init_data
, 0, sizeof(init_data
));
314 /* initialize DAL's lock (for SYNC context use) */
315 spin_lock_init(&adev
->dm
.dal_lock
);
317 /* initialize DAL's mutex */
318 mutex_init(&adev
->dm
.dal_mutex
);
320 if(amdgpu_dm_irq_init(adev
)) {
321 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
325 init_data
.asic_id
.chip_family
= adev
->family
;
327 init_data
.asic_id
.pci_revision_id
= adev
->rev_id
;
328 init_data
.asic_id
.hw_internal_rev
= adev
->external_rev_id
;
330 init_data
.asic_id
.vram_width
= adev
->mc
.vram_width
;
331 /* TODO: initialize init_data.asic_id.vram_type here!!!! */
332 init_data
.asic_id
.atombios_base_address
=
333 adev
->mode_info
.atom_context
->bios
;
335 init_data
.driver
= adev
;
337 adev
->dm
.cgs_device
= amdgpu_cgs_create_device(adev
);
339 if (!adev
->dm
.cgs_device
) {
340 DRM_ERROR("amdgpu: failed to create cgs device.\n");
344 init_data
.cgs_device
= adev
->dm
.cgs_device
;
348 init_data
.dce_environment
= DCE_ENV_PRODUCTION_DRV
;
350 /* Display Core create. */
351 adev
->dm
.dc
= dc_create(&init_data
);
354 DRM_INFO("Display Core failed to initialize!\n");
356 INIT_WORK(&adev
->dm
.mst_hotplug_work
, hotplug_notify_work_func
);
358 adev
->dm
.freesync_module
= mod_freesync_create(adev
->dm
.dc
);
359 if (!adev
->dm
.freesync_module
) {
361 "amdgpu: failed to initialize freesync_module.\n");
363 DRM_INFO("amdgpu: freesync_module init done %p.\n",
364 adev
->dm
.freesync_module
);
366 if (amdgpu_dm_initialize_drm_device(adev
)) {
368 "amdgpu: failed to initialize sw for display support.\n");
372 /* Update the actual used number of crtc */
373 adev
->mode_info
.num_crtc
= adev
->dm
.display_indexes_num
;
375 /* TODO: Add_display_info? */
377 /* TODO use dynamic cursor width */
378 adev
->ddev
->mode_config
.cursor_width
= adev
->dm
.dc
->caps
.max_cursor_size
;
379 adev
->ddev
->mode_config
.cursor_height
= adev
->dm
.dc
->caps
.max_cursor_size
;
381 if (drm_vblank_init(adev
->ddev
, adev
->dm
.display_indexes_num
)) {
383 "amdgpu: failed to initialize sw for display support.\n");
387 DRM_INFO("KMS initialized.\n");
391 amdgpu_dm_fini(adev
);
396 void amdgpu_dm_fini(struct amdgpu_device
*adev
)
398 amdgpu_dm_destroy_drm_device(&adev
->dm
);
400 * TODO: pageflip, vlank interrupt
402 * amdgpu_dm_irq_fini(adev);
405 if (adev
->dm
.cgs_device
) {
406 amdgpu_cgs_destroy_device(adev
->dm
.cgs_device
);
407 adev
->dm
.cgs_device
= NULL
;
409 if (adev
->dm
.freesync_module
) {
410 mod_freesync_destroy(adev
->dm
.freesync_module
);
411 adev
->dm
.freesync_module
= NULL
;
413 /* DC Destroy TODO: Replace destroy DAL */
415 dc_destroy(&adev
->dm
.dc
);
419 /* moved from amdgpu_dm_kms.c */
420 void amdgpu_dm_destroy()
424 static int dm_sw_init(void *handle
)
429 static int dm_sw_fini(void *handle
)
434 static int detect_mst_link_for_all_connectors(struct drm_device
*dev
)
436 struct amdgpu_connector
*aconnector
;
437 struct drm_connector
*connector
;
440 drm_modeset_lock(&dev
->mode_config
.connection_mutex
, NULL
);
442 list_for_each_entry(connector
, &dev
->mode_config
.connector_list
, head
) {
443 aconnector
= to_amdgpu_connector(connector
);
444 if (aconnector
->dc_link
->type
== dc_connection_mst_branch
) {
445 DRM_INFO("DM_MST: starting TM on aconnector: %p [id: %d]\n",
446 aconnector
, aconnector
->base
.base
.id
);
448 ret
= drm_dp_mst_topology_mgr_set_mst(&aconnector
->mst_mgr
, true);
450 DRM_ERROR("DM_MST: Failed to start MST\n");
451 ((struct dc_link
*)aconnector
->dc_link
)->type
= dc_connection_single
;
457 drm_modeset_unlock(&dev
->mode_config
.connection_mutex
);
461 static int dm_late_init(void *handle
)
463 struct drm_device
*dev
= ((struct amdgpu_device
*)handle
)->ddev
;
464 int r
= detect_mst_link_for_all_connectors(dev
);
469 static void s3_handle_mst(struct drm_device
*dev
, bool suspend
)
471 struct amdgpu_connector
*aconnector
;
472 struct drm_connector
*connector
;
474 drm_modeset_lock(&dev
->mode_config
.connection_mutex
, NULL
);
476 list_for_each_entry(connector
, &dev
->mode_config
.connector_list
, head
) {
477 aconnector
= to_amdgpu_connector(connector
);
478 if (aconnector
->dc_link
->type
== dc_connection_mst_branch
&&
479 !aconnector
->mst_port
) {
482 drm_dp_mst_topology_mgr_suspend(&aconnector
->mst_mgr
);
484 drm_dp_mst_topology_mgr_resume(&aconnector
->mst_mgr
);
488 drm_modeset_unlock(&dev
->mode_config
.connection_mutex
);
491 static int dm_hw_init(void *handle
)
493 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
494 /* Create DAL display manager */
495 amdgpu_dm_init(adev
);
496 amdgpu_dm_hpd_init(adev
);
501 static int dm_hw_fini(void *handle
)
503 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
505 amdgpu_dm_hpd_fini(adev
);
507 amdgpu_dm_irq_fini(adev
);
508 amdgpu_dm_fini(adev
);
512 static int dm_suspend(void *handle
)
514 struct amdgpu_device
*adev
= handle
;
515 struct amdgpu_display_manager
*dm
= &adev
->dm
;
518 s3_handle_mst(adev
->ddev
, true);
520 amdgpu_dm_irq_suspend(adev
);
522 WARN_ON(adev
->dm
.cached_state
);
523 adev
->dm
.cached_state
= drm_atomic_helper_suspend(adev
->ddev
);
527 DC_ACPI_CM_POWER_STATE_D3
533 struct amdgpu_connector
*amdgpu_dm_find_first_crct_matching_connector(
534 struct drm_atomic_state
*state
,
535 struct drm_crtc
*crtc
,
539 struct drm_connector_state
*conn_state
;
540 struct drm_connector
*connector
;
541 struct drm_crtc
*crtc_from_state
;
543 for_each_connector_in_state(
551 connector
->state
->crtc
;
553 if (crtc_from_state
== crtc
)
554 return to_amdgpu_connector(connector
);
560 static int dm_resume(void *handle
)
562 struct amdgpu_device
*adev
= handle
;
563 struct amdgpu_display_manager
*dm
= &adev
->dm
;
565 /* power on hardware */
568 DC_ACPI_CM_POWER_STATE_D0
574 int amdgpu_dm_display_resume(struct amdgpu_device
*adev
)
576 struct drm_device
*ddev
= adev
->ddev
;
577 struct amdgpu_display_manager
*dm
= &adev
->dm
;
578 struct amdgpu_connector
*aconnector
;
579 struct drm_connector
*connector
;
580 struct drm_crtc
*crtc
;
581 struct drm_crtc_state
*crtc_state
;
585 /* program HPD filter */
588 /* On resume we need to rewrite the MSTM control bits to enamble MST*/
589 s3_handle_mst(ddev
, false);
592 * early enable HPD Rx IRQ, should be done before set mode as short
593 * pulse interrupts are used for MST
595 amdgpu_dm_irq_resume_early(adev
);
598 list_for_each_entry(connector
,
599 &ddev
->mode_config
.connector_list
, head
) {
600 aconnector
= to_amdgpu_connector(connector
);
603 * this is the case when traversing through already created
604 * MST connectors, should be skipped
606 if (aconnector
->mst_port
)
609 mutex_lock(&aconnector
->hpd_lock
);
610 dc_link_detect(aconnector
->dc_link
, false);
611 aconnector
->dc_sink
= NULL
;
612 amdgpu_dm_update_connector_after_detect(aconnector
);
613 mutex_unlock(&aconnector
->hpd_lock
);
616 /* Force mode set in atomic comit */
617 for_each_crtc_in_state(adev
->dm
.cached_state
, crtc
, crtc_state
, i
)
618 crtc_state
->active_changed
= true;
620 ret
= drm_atomic_helper_resume(ddev
, adev
->dm
.cached_state
);
622 drm_atomic_state_put(adev
->dm
.cached_state
);
623 adev
->dm
.cached_state
= NULL
;
625 amdgpu_dm_irq_resume_late(adev
);
630 static const struct amd_ip_funcs amdgpu_dm_funcs
= {
632 .early_init
= dm_early_init
,
633 .late_init
= dm_late_init
,
634 .sw_init
= dm_sw_init
,
635 .sw_fini
= dm_sw_fini
,
636 .hw_init
= dm_hw_init
,
637 .hw_fini
= dm_hw_fini
,
638 .suspend
= dm_suspend
,
640 .is_idle
= dm_is_idle
,
641 .wait_for_idle
= dm_wait_for_idle
,
642 .check_soft_reset
= dm_check_soft_reset
,
643 .soft_reset
= dm_soft_reset
,
644 .set_clockgating_state
= dm_set_clockgating_state
,
645 .set_powergating_state
= dm_set_powergating_state
,
648 const struct amdgpu_ip_block_version dm_ip_block
=
650 .type
= AMD_IP_BLOCK_TYPE_DCE
,
654 .funcs
= &amdgpu_dm_funcs
,
658 struct drm_atomic_state
*
659 dm_atomic_state_alloc(struct drm_device
*dev
)
661 struct dm_atomic_state
*state
= kzalloc(sizeof(*state
), GFP_KERNEL
);
663 if (!state
|| drm_atomic_state_init(dev
, &state
->base
) < 0) {
672 dm_atomic_state_clear(struct drm_atomic_state
*state
)
674 struct dm_atomic_state
*dm_state
= to_dm_atomic_state(state
);
676 if (dm_state
->context
) {
677 dc_release_validate_context(dm_state
->context
);
678 dm_state
->context
= NULL
;
681 drm_atomic_state_default_clear(state
);
685 dm_atomic_state_alloc_free(struct drm_atomic_state
*state
)
687 struct dm_atomic_state
*dm_state
= to_dm_atomic_state(state
);
688 drm_atomic_state_default_release(state
);
692 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs
= {
693 .fb_create
= amdgpu_user_framebuffer_create
,
694 .output_poll_changed
= amdgpu_output_poll_changed
,
695 .atomic_check
= amdgpu_dm_atomic_check
,
696 .atomic_commit
= amdgpu_dm_atomic_commit
,
697 .atomic_state_alloc
= dm_atomic_state_alloc
,
698 .atomic_state_clear
= dm_atomic_state_clear
,
699 .atomic_state_free
= dm_atomic_state_alloc_free
702 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs
= {
703 .atomic_commit_tail
= amdgpu_dm_atomic_commit_tail
706 void amdgpu_dm_update_connector_after_detect(
707 struct amdgpu_connector
*aconnector
)
709 struct drm_connector
*connector
= &aconnector
->base
;
710 struct drm_device
*dev
= connector
->dev
;
711 struct dc_sink
*sink
;
713 /* MST handled by drm_mst framework */
714 if (aconnector
->mst_mgr
.mst_state
== true)
718 sink
= aconnector
->dc_link
->local_sink
;
720 /* Edid mgmt connector gets first update only in mode_valid hook and then
721 * the connector sink is set to either fake or physical sink depends on link status.
722 * don't do it here if u are during boot
724 if (aconnector
->base
.force
!= DRM_FORCE_UNSPECIFIED
725 && aconnector
->dc_em_sink
) {
727 /* For S3 resume with headless use eml_sink to fake stream
728 * because on resume connecotr->sink is set ti NULL
730 mutex_lock(&dev
->mode_config
.mutex
);
733 if (aconnector
->dc_sink
) {
734 amdgpu_dm_remove_sink_from_freesync_module(
736 /* retain and release bellow are used for
737 * bump up refcount for sink because the link don't point
738 * to it anymore after disconnect so on next crtc to connector
739 * reshuffle by UMD we will get into unwanted dc_sink release
741 if (aconnector
->dc_sink
!= aconnector
->dc_em_sink
)
742 dc_sink_release(aconnector
->dc_sink
);
744 aconnector
->dc_sink
= sink
;
745 amdgpu_dm_add_sink_to_freesync_module(
746 connector
, aconnector
->edid
);
748 amdgpu_dm_remove_sink_from_freesync_module(connector
);
749 if (!aconnector
->dc_sink
)
750 aconnector
->dc_sink
= aconnector
->dc_em_sink
;
751 else if (aconnector
->dc_sink
!= aconnector
->dc_em_sink
)
752 dc_sink_retain(aconnector
->dc_sink
);
755 mutex_unlock(&dev
->mode_config
.mutex
);
760 * TODO: temporary guard to look for proper fix
761 * if this sink is MST sink, we should not do anything
763 if (sink
&& sink
->sink_signal
== SIGNAL_TYPE_DISPLAY_PORT_MST
)
766 if (aconnector
->dc_sink
== sink
) {
767 /* We got a DP short pulse (Link Loss, DP CTS, etc...).
769 DRM_INFO("DCHPD: connector_id=%d: dc_sink didn't change.\n",
770 aconnector
->connector_id
);
774 DRM_INFO("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
775 aconnector
->connector_id
, aconnector
->dc_sink
, sink
);
777 mutex_lock(&dev
->mode_config
.mutex
);
779 /* 1. Update status of the drm connector
780 * 2. Send an event and let userspace tell us what to do */
782 /* TODO: check if we still need the S3 mode update workaround.
783 * If yes, put it here. */
784 if (aconnector
->dc_sink
)
785 amdgpu_dm_remove_sink_from_freesync_module(
788 aconnector
->dc_sink
= sink
;
789 if (sink
->dc_edid
.length
== 0)
790 aconnector
->edid
= NULL
;
793 (struct edid
*) sink
->dc_edid
.raw_edid
;
796 drm_mode_connector_update_edid_property(connector
,
799 amdgpu_dm_add_sink_to_freesync_module(connector
, aconnector
->edid
);
802 amdgpu_dm_remove_sink_from_freesync_module(connector
);
803 drm_mode_connector_update_edid_property(connector
, NULL
);
804 aconnector
->num_modes
= 0;
805 aconnector
->dc_sink
= NULL
;
808 mutex_unlock(&dev
->mode_config
.mutex
);
811 static void handle_hpd_irq(void *param
)
813 struct amdgpu_connector
*aconnector
= (struct amdgpu_connector
*)param
;
814 struct drm_connector
*connector
= &aconnector
->base
;
815 struct drm_device
*dev
= connector
->dev
;
817 /* In case of failure or MST no need to update connector status or notify the OS
818 * since (for MST case) MST does this in it's own context.
820 mutex_lock(&aconnector
->hpd_lock
);
821 if (dc_link_detect(aconnector
->dc_link
, false)) {
822 amdgpu_dm_update_connector_after_detect(aconnector
);
825 drm_modeset_lock_all(dev
);
826 dm_restore_drm_connector_state(dev
, connector
);
827 drm_modeset_unlock_all(dev
);
829 if (aconnector
->base
.force
== DRM_FORCE_UNSPECIFIED
)
830 drm_kms_helper_hotplug_event(dev
);
832 mutex_unlock(&aconnector
->hpd_lock
);
836 static void dm_handle_hpd_rx_irq(struct amdgpu_connector
*aconnector
)
838 uint8_t esi
[DP_PSR_ERROR_STATUS
- DP_SINK_COUNT_ESI
] = { 0 };
840 bool new_irq_handled
= false;
842 int dpcd_bytes_to_read
;
844 const int max_process_count
= 30;
845 int process_count
= 0;
847 const struct dc_link_status
*link_status
= dc_link_get_status(aconnector
->dc_link
);
849 if (link_status
->dpcd_caps
->dpcd_rev
.raw
< 0x12) {
850 dpcd_bytes_to_read
= DP_LANE0_1_STATUS
- DP_SINK_COUNT
;
851 /* DPCD 0x200 - 0x201 for downstream IRQ */
852 dpcd_addr
= DP_SINK_COUNT
;
854 dpcd_bytes_to_read
= DP_PSR_ERROR_STATUS
- DP_SINK_COUNT_ESI
;
855 /* DPCD 0x2002 - 0x2005 for downstream IRQ */
856 dpcd_addr
= DP_SINK_COUNT_ESI
;
859 dret
= drm_dp_dpcd_read(
860 &aconnector
->dm_dp_aux
.aux
,
865 while (dret
== dpcd_bytes_to_read
&&
866 process_count
< max_process_count
) {
872 DRM_DEBUG_KMS("ESI %02x %02x %02x\n", esi
[0], esi
[1], esi
[2]);
873 /* handle HPD short pulse irq */
874 if (aconnector
->mst_mgr
.mst_state
)
876 &aconnector
->mst_mgr
,
880 if (new_irq_handled
) {
881 /* ACK at DPCD to notify down stream */
882 const int ack_dpcd_bytes_to_write
=
883 dpcd_bytes_to_read
- 1;
885 for (retry
= 0; retry
< 3; retry
++) {
888 wret
= drm_dp_dpcd_write(
889 &aconnector
->dm_dp_aux
.aux
,
892 ack_dpcd_bytes_to_write
);
893 if (wret
== ack_dpcd_bytes_to_write
)
897 /* check if there is new irq to be handle */
898 dret
= drm_dp_dpcd_read(
899 &aconnector
->dm_dp_aux
.aux
,
904 new_irq_handled
= false;
909 if (process_count
== max_process_count
)
910 DRM_DEBUG_KMS("Loop exceeded max iterations\n");
913 static void handle_hpd_rx_irq(void *param
)
915 struct amdgpu_connector
*aconnector
= (struct amdgpu_connector
*)param
;
916 struct drm_connector
*connector
= &aconnector
->base
;
917 struct drm_device
*dev
= connector
->dev
;
918 const struct dc_link
*dc_link
= aconnector
->dc_link
;
919 bool is_mst_root_connector
= aconnector
->mst_mgr
.mst_state
;
921 /* TODO:Temporary add mutex to protect hpd interrupt not have a gpio
922 * conflict, after implement i2c helper, this mutex should be
925 if (aconnector
->dc_link
->type
!= dc_connection_mst_branch
)
926 mutex_lock(&aconnector
->hpd_lock
);
928 if (dc_link_handle_hpd_rx_irq(aconnector
->dc_link
, NULL
) &&
929 !is_mst_root_connector
) {
930 /* Downstream Port status changed. */
931 if (dc_link_detect(aconnector
->dc_link
, false)) {
932 amdgpu_dm_update_connector_after_detect(aconnector
);
935 drm_modeset_lock_all(dev
);
936 dm_restore_drm_connector_state(dev
, connector
);
937 drm_modeset_unlock_all(dev
);
939 drm_kms_helper_hotplug_event(dev
);
942 if ((dc_link
->cur_link_settings
.lane_count
!= LANE_COUNT_UNKNOWN
) ||
943 (dc_link
->type
== dc_connection_mst_branch
))
944 dm_handle_hpd_rx_irq(aconnector
);
946 if (aconnector
->dc_link
->type
!= dc_connection_mst_branch
)
947 mutex_unlock(&aconnector
->hpd_lock
);
950 static void register_hpd_handlers(struct amdgpu_device
*adev
)
952 struct drm_device
*dev
= adev
->ddev
;
953 struct drm_connector
*connector
;
954 struct amdgpu_connector
*aconnector
;
955 const struct dc_link
*dc_link
;
956 struct dc_interrupt_params int_params
= {0};
958 int_params
.requested_polarity
= INTERRUPT_POLARITY_DEFAULT
;
959 int_params
.current_polarity
= INTERRUPT_POLARITY_DEFAULT
;
961 list_for_each_entry(connector
,
962 &dev
->mode_config
.connector_list
, head
) {
964 aconnector
= to_amdgpu_connector(connector
);
965 dc_link
= aconnector
->dc_link
;
967 if (DC_IRQ_SOURCE_INVALID
!= dc_link
->irq_source_hpd
) {
968 int_params
.int_context
= INTERRUPT_LOW_IRQ_CONTEXT
;
969 int_params
.irq_source
= dc_link
->irq_source_hpd
;
971 amdgpu_dm_irq_register_interrupt(adev
, &int_params
,
973 (void *) aconnector
);
976 if (DC_IRQ_SOURCE_INVALID
!= dc_link
->irq_source_hpd_rx
) {
978 /* Also register for DP short pulse (hpd_rx). */
979 int_params
.int_context
= INTERRUPT_LOW_IRQ_CONTEXT
;
980 int_params
.irq_source
= dc_link
->irq_source_hpd_rx
;
982 amdgpu_dm_irq_register_interrupt(adev
, &int_params
,
984 (void *) aconnector
);
989 /* Register IRQ sources and initialize IRQ callbacks */
990 static int dce110_register_irq_handlers(struct amdgpu_device
*adev
)
992 struct dc
*dc
= adev
->dm
.dc
;
993 struct common_irq_params
*c_irq_params
;
994 struct dc_interrupt_params int_params
= {0};
997 unsigned client_id
= AMDGPU_IH_CLIENTID_LEGACY
;
999 if (adev
->asic_type
== CHIP_VEGA10
||
1000 adev
->asic_type
== CHIP_RAVEN
)
1001 client_id
= AMDGPU_IH_CLIENTID_DCE
;
1003 int_params
.requested_polarity
= INTERRUPT_POLARITY_DEFAULT
;
1004 int_params
.current_polarity
= INTERRUPT_POLARITY_DEFAULT
;
1006 /* Actions of amdgpu_irq_add_id():
1007 * 1. Register a set() function with base driver.
1008 * Base driver will call set() function to enable/disable an
1009 * interrupt in DC hardware.
1010 * 2. Register amdgpu_dm_irq_handler().
1011 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
1012 * coming from DC hardware.
1013 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
1014 * for acknowledging and handling. */
1016 /* Use VBLANK interrupt */
1017 for (i
= VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0
; i
<= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0
; i
++) {
1018 r
= amdgpu_irq_add_id(adev
, client_id
, i
, &adev
->crtc_irq
);
1020 DRM_ERROR("Failed to add crtc irq id!\n");
1024 int_params
.int_context
= INTERRUPT_HIGH_IRQ_CONTEXT
;
1025 int_params
.irq_source
=
1026 dc_interrupt_to_irq_source(dc
, i
, 0);
1028 c_irq_params
= &adev
->dm
.vblank_params
[int_params
.irq_source
- DC_IRQ_SOURCE_VBLANK1
];
1030 c_irq_params
->adev
= adev
;
1031 c_irq_params
->irq_src
= int_params
.irq_source
;
1033 amdgpu_dm_irq_register_interrupt(adev
, &int_params
,
1034 dm_crtc_high_irq
, c_irq_params
);
1037 /* Use GRPH_PFLIP interrupt */
1038 for (i
= VISLANDS30_IV_SRCID_D1_GRPH_PFLIP
;
1039 i
<= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP
; i
+= 2) {
1040 r
= amdgpu_irq_add_id(adev
, client_id
, i
, &adev
->pageflip_irq
);
1042 DRM_ERROR("Failed to add page flip irq id!\n");
1046 int_params
.int_context
= INTERRUPT_HIGH_IRQ_CONTEXT
;
1047 int_params
.irq_source
=
1048 dc_interrupt_to_irq_source(dc
, i
, 0);
1050 c_irq_params
= &adev
->dm
.pflip_params
[int_params
.irq_source
- DC_IRQ_SOURCE_PFLIP_FIRST
];
1052 c_irq_params
->adev
= adev
;
1053 c_irq_params
->irq_src
= int_params
.irq_source
;
1055 amdgpu_dm_irq_register_interrupt(adev
, &int_params
,
1056 dm_pflip_high_irq
, c_irq_params
);
1061 r
= amdgpu_irq_add_id(adev
, client_id
,
1062 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A
, &adev
->hpd_irq
);
1064 DRM_ERROR("Failed to add hpd irq id!\n");
1068 register_hpd_handlers(adev
);
1073 #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
1074 /* Register IRQ sources and initialize IRQ callbacks */
1075 static int dcn10_register_irq_handlers(struct amdgpu_device
*adev
)
1077 struct dc
*dc
= adev
->dm
.dc
;
1078 struct common_irq_params
*c_irq_params
;
1079 struct dc_interrupt_params int_params
= {0};
1083 int_params
.requested_polarity
= INTERRUPT_POLARITY_DEFAULT
;
1084 int_params
.current_polarity
= INTERRUPT_POLARITY_DEFAULT
;
1086 /* Actions of amdgpu_irq_add_id():
1087 * 1. Register a set() function with base driver.
1088 * Base driver will call set() function to enable/disable an
1089 * interrupt in DC hardware.
1090 * 2. Register amdgpu_dm_irq_handler().
1091 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
1092 * coming from DC hardware.
1093 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
1094 * for acknowledging and handling.
1097 /* Use VSTARTUP interrupt */
1098 for (i
= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP
;
1099 i
<= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP
+ adev
->mode_info
.num_crtc
- 1;
1101 r
= amdgpu_irq_add_id(adev
, AMDGPU_IH_CLIENTID_DCE
, i
, &adev
->crtc_irq
);
1104 DRM_ERROR("Failed to add crtc irq id!\n");
1108 int_params
.int_context
= INTERRUPT_HIGH_IRQ_CONTEXT
;
1109 int_params
.irq_source
=
1110 dc_interrupt_to_irq_source(dc
, i
, 0);
1112 c_irq_params
= &adev
->dm
.vblank_params
[int_params
.irq_source
- DC_IRQ_SOURCE_VBLANK1
];
1114 c_irq_params
->adev
= adev
;
1115 c_irq_params
->irq_src
= int_params
.irq_source
;
1117 amdgpu_dm_irq_register_interrupt(adev
, &int_params
,
1118 dm_crtc_high_irq
, c_irq_params
);
1121 /* Use GRPH_PFLIP interrupt */
1122 for (i
= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT
;
1123 i
<= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT
+ adev
->mode_info
.num_crtc
- 1;
1125 r
= amdgpu_irq_add_id(adev
, AMDGPU_IH_CLIENTID_DCE
, i
, &adev
->pageflip_irq
);
1127 DRM_ERROR("Failed to add page flip irq id!\n");
1131 int_params
.int_context
= INTERRUPT_HIGH_IRQ_CONTEXT
;
1132 int_params
.irq_source
=
1133 dc_interrupt_to_irq_source(dc
, i
, 0);
1135 c_irq_params
= &adev
->dm
.pflip_params
[int_params
.irq_source
- DC_IRQ_SOURCE_PFLIP_FIRST
];
1137 c_irq_params
->adev
= adev
;
1138 c_irq_params
->irq_src
= int_params
.irq_source
;
1140 amdgpu_dm_irq_register_interrupt(adev
, &int_params
,
1141 dm_pflip_high_irq
, c_irq_params
);
1146 r
= amdgpu_irq_add_id(adev
, AMDGPU_IH_CLIENTID_DCE
, DCN_1_0__SRCID__DC_HPD1_INT
,
1149 DRM_ERROR("Failed to add hpd irq id!\n");
1153 register_hpd_handlers(adev
);
1159 static int amdgpu_dm_mode_config_init(struct amdgpu_device
*adev
)
1163 adev
->mode_info
.mode_config_initialized
= true;
1165 adev
->ddev
->mode_config
.funcs
= (void *)&amdgpu_dm_mode_funcs
;
1166 adev
->ddev
->mode_config
.helper_private
= &amdgpu_dm_mode_config_helperfuncs
;
1168 adev
->ddev
->mode_config
.max_width
= 16384;
1169 adev
->ddev
->mode_config
.max_height
= 16384;
1171 adev
->ddev
->mode_config
.preferred_depth
= 24;
1172 adev
->ddev
->mode_config
.prefer_shadow
= 1;
1173 /* indicate support of immediate flip */
1174 adev
->ddev
->mode_config
.async_page_flip
= true;
1176 adev
->ddev
->mode_config
.fb_base
= adev
->mc
.aper_base
;
1178 r
= amdgpu_modeset_create_props(adev
);
1185 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
1186 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
1188 static int amdgpu_dm_backlight_update_status(struct backlight_device
*bd
)
1190 struct amdgpu_display_manager
*dm
= bl_get_data(bd
);
1192 if (dc_link_set_backlight_level(dm
->backlight_link
,
1193 bd
->props
.brightness
, 0, 0))
1199 static int amdgpu_dm_backlight_get_brightness(struct backlight_device
*bd
)
1201 return bd
->props
.brightness
;
1204 static const struct backlight_ops amdgpu_dm_backlight_ops
= {
1205 .get_brightness
= amdgpu_dm_backlight_get_brightness
,
1206 .update_status
= amdgpu_dm_backlight_update_status
,
1209 void amdgpu_dm_register_backlight_device(struct amdgpu_display_manager
*dm
)
1212 struct backlight_properties props
= { 0 };
1214 props
.max_brightness
= AMDGPU_MAX_BL_LEVEL
;
1215 props
.type
= BACKLIGHT_RAW
;
1217 snprintf(bl_name
, sizeof(bl_name
), "amdgpu_bl%d",
1218 dm
->adev
->ddev
->primary
->index
);
1220 dm
->backlight_dev
= backlight_device_register(bl_name
,
1221 dm
->adev
->ddev
->dev
,
1223 &amdgpu_dm_backlight_ops
,
1226 if (NULL
== dm
->backlight_dev
)
1227 DRM_ERROR("DM: Backlight registration failed!\n");
1229 DRM_INFO("DM: Registered Backlight device: %s\n", bl_name
);
1234 /* In this architecture, the association
1235 * connector -> encoder -> crtc
1236 * id not really requried. The crtc and connector will hold the
1237 * display_index as an abstraction to use with DAL component
1239 * Returns 0 on success
1241 int amdgpu_dm_initialize_drm_device(struct amdgpu_device
*adev
)
1243 struct amdgpu_display_manager
*dm
= &adev
->dm
;
1245 struct amdgpu_connector
*aconnector
= NULL
;
1246 struct amdgpu_encoder
*aencoder
= NULL
;
1247 struct amdgpu_mode_info
*mode_info
= &adev
->mode_info
;
1249 unsigned long possible_crtcs
;
1251 link_cnt
= dm
->dc
->caps
.max_links
;
1252 if (amdgpu_dm_mode_config_init(dm
->adev
)) {
1253 DRM_ERROR("DM: Failed to initialize mode config\n");
1257 for (i
= 0; i
< dm
->dc
->caps
.max_surfaces
; i
++) {
1258 mode_info
->planes
[i
] = kzalloc(sizeof(struct amdgpu_plane
),
1260 if (!mode_info
->planes
[i
]) {
1261 DRM_ERROR("KMS: Failed to allocate surface\n");
1262 goto fail_free_planes
;
1264 mode_info
->planes
[i
]->base
.type
= mode_info
->plane_type
[i
];
1267 * HACK: IGT tests expect that each plane can only have one
1268 * one possible CRTC. For now, set one CRTC for each
1269 * plane that is not an underlay, but still allow multiple
1270 * CRTCs for underlay planes.
1272 possible_crtcs
= 1 << i
;
1273 if (i
>= dm
->dc
->caps
.max_streams
)
1274 possible_crtcs
= 0xff;
1276 if (amdgpu_dm_plane_init(dm
, mode_info
->planes
[i
], possible_crtcs
)) {
1277 DRM_ERROR("KMS: Failed to initialize plane\n");
1278 goto fail_free_planes
;
1282 for (i
= 0; i
< dm
->dc
->caps
.max_streams
; i
++)
1283 if (amdgpu_dm_crtc_init(dm
, &mode_info
->planes
[i
]->base
, i
)) {
1284 DRM_ERROR("KMS: Failed to initialize crtc\n");
1285 goto fail_free_planes
;
1288 dm
->display_indexes_num
= dm
->dc
->caps
.max_streams
;
1290 /* loops over all connectors on the board */
1291 for (i
= 0; i
< link_cnt
; i
++) {
1293 if (i
> AMDGPU_DM_MAX_DISPLAY_INDEX
) {
1295 "KMS: Cannot support more than %d display indexes\n",
1296 AMDGPU_DM_MAX_DISPLAY_INDEX
);
1300 aconnector
= kzalloc(sizeof(*aconnector
), GFP_KERNEL
);
1302 goto fail_free_planes
;
1304 aencoder
= kzalloc(sizeof(*aencoder
), GFP_KERNEL
);
1306 goto fail_free_connector
;
1309 if (amdgpu_dm_encoder_init(dm
->ddev
, aencoder
, i
)) {
1310 DRM_ERROR("KMS: Failed to initialize encoder\n");
1311 goto fail_free_encoder
;
1314 if (amdgpu_dm_connector_init(dm
, aconnector
, i
, aencoder
)) {
1315 DRM_ERROR("KMS: Failed to initialize connector\n");
1316 goto fail_free_encoder
;
1319 if (dc_link_detect(dc_get_link_at_index(dm
->dc
, i
), true))
1320 amdgpu_dm_update_connector_after_detect(aconnector
);
1323 /* Software is initialized. Now we can register interrupt handlers. */
1324 switch (adev
->asic_type
) {
1331 case CHIP_POLARIS11
:
1332 case CHIP_POLARIS10
:
1333 case CHIP_POLARIS12
:
1335 if (dce110_register_irq_handlers(dm
->adev
)) {
1336 DRM_ERROR("DM: Failed to initialize IRQ\n");
1337 goto fail_free_encoder
;
1340 #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
1342 if (dcn10_register_irq_handlers(dm
->adev
)) {
1343 DRM_ERROR("DM: Failed to initialize IRQ\n");
1344 goto fail_free_encoder
;
1349 DRM_ERROR("Usupported ASIC type: 0x%X\n", adev
->asic_type
);
1350 goto fail_free_encoder
;
1353 drm_mode_config_reset(dm
->ddev
);
1358 fail_free_connector
:
1361 for (i
= 0; i
< dm
->dc
->caps
.max_surfaces
; i
++)
1362 kfree(mode_info
->planes
[i
]);
1366 void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager
*dm
)
1368 drm_mode_config_cleanup(dm
->ddev
);
1372 /******************************************************************************
1373 * amdgpu_display_funcs functions
1374 *****************************************************************************/
1377 * dm_bandwidth_update - program display watermarks
1379 * @adev: amdgpu_device pointer
1381 * Calculate and program the display watermarks and line buffer allocation.
1383 static void dm_bandwidth_update(struct amdgpu_device
*adev
)
1385 /* TODO: implement later */
1388 static void dm_set_backlight_level(struct amdgpu_encoder
*amdgpu_encoder
,
1391 /* TODO: translate amdgpu_encoder to display_index and call DAL */
1394 static u8
dm_get_backlight_level(struct amdgpu_encoder
*amdgpu_encoder
)
1396 /* TODO: translate amdgpu_encoder to display_index and call DAL */
1400 static int amdgpu_notify_freesync(struct drm_device
*dev
, void *data
,
1401 struct drm_file
*filp
)
1403 struct mod_freesync_params freesync_params
;
1404 uint8_t num_streams
;
1407 struct amdgpu_device
*adev
= dev
->dev_private
;
1410 /* Get freesync enable flag from DRM */
1412 num_streams
= dc_get_current_stream_count(adev
->dm
.dc
);
1414 for (i
= 0; i
< num_streams
; i
++) {
1415 struct dc_stream
*stream
;
1416 stream
= dc_get_stream_at_index(adev
->dm
.dc
, i
);
1418 mod_freesync_update_state(adev
->dm
.freesync_module
,
1419 &stream
, 1, &freesync_params
);
1425 static const struct amdgpu_display_funcs dm_display_funcs
= {
1426 .bandwidth_update
= dm_bandwidth_update
, /* called unconditionally */
1427 .vblank_get_counter
= dm_vblank_get_counter
,/* called unconditionally */
1428 .vblank_wait
= NULL
,
1429 .backlight_set_level
=
1430 dm_set_backlight_level
,/* called unconditionally */
1431 .backlight_get_level
=
1432 dm_get_backlight_level
,/* called unconditionally */
1433 .hpd_sense
= NULL
,/* called unconditionally */
1434 .hpd_set_polarity
= NULL
, /* called unconditionally */
1435 .hpd_get_gpio_reg
= NULL
, /* VBIOS parsing. DAL does it. */
1436 .page_flip_get_scanoutpos
=
1437 dm_crtc_get_scanoutpos
,/* called unconditionally */
1438 .add_encoder
= NULL
, /* VBIOS parsing. DAL does it. */
1439 .add_connector
= NULL
, /* VBIOS parsing. DAL does it. */
1440 .notify_freesync
= amdgpu_notify_freesync
,
1445 #if defined(CONFIG_DEBUG_KERNEL_DC)
1447 static ssize_t
s3_debug_store(
1448 struct device
*device
,
1449 struct device_attribute
*attr
,
1455 struct pci_dev
*pdev
= to_pci_dev(device
);
1456 struct drm_device
*drm_dev
= pci_get_drvdata(pdev
);
1457 struct amdgpu_device
*adev
= drm_dev
->dev_private
;
1459 ret
= kstrtoint(buf
, 0, &s3_state
);
1464 amdgpu_dm_display_resume(adev
);
1465 drm_kms_helper_hotplug_event(adev
->ddev
);
1470 return ret
== 0 ? count
: 0;
1473 DEVICE_ATTR_WO(s3_debug
);
1477 static int dm_early_init(void *handle
)
1479 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1481 adev
->ddev
->driver
->driver_features
|= DRIVER_ATOMIC
;
1482 amdgpu_dm_set_irq_funcs(adev
);
1484 switch (adev
->asic_type
) {
1487 adev
->mode_info
.num_crtc
= 6;
1488 adev
->mode_info
.num_hpd
= 6;
1489 adev
->mode_info
.num_dig
= 6;
1490 adev
->mode_info
.plane_type
= dm_surfaces_type_default
;
1494 adev
->mode_info
.num_crtc
= 6;
1495 adev
->mode_info
.num_hpd
= 6;
1496 adev
->mode_info
.num_dig
= 7;
1497 adev
->mode_info
.plane_type
= dm_surfaces_type_default
;
1500 adev
->mode_info
.num_crtc
= 3;
1501 adev
->mode_info
.num_hpd
= 6;
1502 adev
->mode_info
.num_dig
= 9;
1503 adev
->mode_info
.plane_type
= dm_surfaces_type_carizzo
;
1506 adev
->mode_info
.num_crtc
= 2;
1507 adev
->mode_info
.num_hpd
= 6;
1508 adev
->mode_info
.num_dig
= 9;
1509 adev
->mode_info
.plane_type
= dm_surfaces_type_stoney
;
1511 case CHIP_POLARIS11
:
1512 case CHIP_POLARIS12
:
1513 adev
->mode_info
.num_crtc
= 5;
1514 adev
->mode_info
.num_hpd
= 5;
1515 adev
->mode_info
.num_dig
= 5;
1516 adev
->mode_info
.plane_type
= dm_surfaces_type_default
;
1518 case CHIP_POLARIS10
:
1519 adev
->mode_info
.num_crtc
= 6;
1520 adev
->mode_info
.num_hpd
= 6;
1521 adev
->mode_info
.num_dig
= 6;
1522 adev
->mode_info
.plane_type
= dm_surfaces_type_default
;
1525 adev
->mode_info
.num_crtc
= 6;
1526 adev
->mode_info
.num_hpd
= 6;
1527 adev
->mode_info
.num_dig
= 6;
1528 adev
->mode_info
.plane_type
= dm_surfaces_type_default
;
1530 #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
1532 adev
->mode_info
.num_crtc
= 4;
1533 adev
->mode_info
.num_hpd
= 4;
1534 adev
->mode_info
.num_dig
= 4;
1535 adev
->mode_info
.plane_type
= dm_surfaces_type_default
;
1539 DRM_ERROR("Usupported ASIC type: 0x%X\n", adev
->asic_type
);
1543 if (adev
->mode_info
.funcs
== NULL
)
1544 adev
->mode_info
.funcs
= &dm_display_funcs
;
1546 /* Note: Do NOT change adev->audio_endpt_rreg and
1547 * adev->audio_endpt_wreg because they are initialised in
1548 * amdgpu_device_init() */
1549 #if defined(CONFIG_DEBUG_KERNEL_DC)
1552 &dev_attr_s3_debug
);
1558 bool amdgpu_dm_acquire_dal_lock(struct amdgpu_display_manager
*dm
)
1564 bool amdgpu_dm_release_dal_lock(struct amdgpu_display_manager
*dm
)
1566 /* TODO */ return true;
1570 struct dm_connector_state
{
1571 struct drm_connector_state base
;
1573 enum amdgpu_rmx_type scaling
;
1574 uint8_t underscan_vborder
;
1575 uint8_t underscan_hborder
;
1576 bool underscan_enable
;
1579 #define to_dm_connector_state(x)\
1580 container_of((x), struct dm_connector_state, base)
1582 static bool modeset_required(struct drm_crtc_state
*crtc_state
)
1584 if (!drm_atomic_crtc_needs_modeset(crtc_state
))
1587 if (!crtc_state
->enable
)
1590 return crtc_state
->active
;
1593 static bool modereset_required(struct drm_crtc_state
*crtc_state
)
1595 if (!drm_atomic_crtc_needs_modeset(crtc_state
))
1598 return !crtc_state
->enable
|| !crtc_state
->active
;
1601 void amdgpu_dm_encoder_destroy(struct drm_encoder
*encoder
)
1603 drm_encoder_cleanup(encoder
);
1607 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs
= {
1608 .destroy
= amdgpu_dm_encoder_destroy
,
1611 static void dm_set_cursor(
1612 struct amdgpu_crtc
*amdgpu_crtc
,
1617 struct dc_cursor_attributes attributes
;
1618 struct dc_cursor_position position
;
1619 struct drm_crtc
*crtc
= &amdgpu_crtc
->base
;
1621 int xorigin
= 0, yorigin
= 0;
1622 struct dm_crtc_state
*acrtc_state
= to_dm_crtc_state(crtc
->state
);
1624 amdgpu_crtc
->cursor_width
= width
;
1625 amdgpu_crtc
->cursor_height
= height
;
1627 attributes
.address
.high_part
= upper_32_bits(gpu_addr
);
1628 attributes
.address
.low_part
= lower_32_bits(gpu_addr
);
1629 attributes
.width
= width
;
1630 attributes
.height
= height
;
1631 attributes
.color_format
= CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA
;
1632 attributes
.rotation_angle
= 0;
1633 attributes
.attribute_flags
.value
= 0;
1635 attributes
.pitch
= attributes
.width
;
1637 x
= amdgpu_crtc
->cursor_x
;
1638 y
= amdgpu_crtc
->cursor_y
;
1640 /* avivo cursor are offset into the total surface */
1641 x
+= crtc
->primary
->state
->src_x
>> 16;
1642 y
+= crtc
->primary
->state
->src_y
>> 16;
1645 xorigin
= min(-x
, amdgpu_crtc
->max_cursor_width
- 1);
1649 yorigin
= min(-y
, amdgpu_crtc
->max_cursor_height
- 1);
1653 position
.enable
= true;
1657 position
.x_hotspot
= xorigin
;
1658 position
.y_hotspot
= yorigin
;
1660 if (!dc_stream_set_cursor_attributes(
1661 acrtc_state
->stream
,
1663 DRM_ERROR("DC failed to set cursor attributes\n");
1666 if (!dc_stream_set_cursor_position(
1667 acrtc_state
->stream
,
1669 DRM_ERROR("DC failed to set cursor position\n");
1673 static int dm_crtc_cursor_set(
1674 struct drm_crtc
*crtc
,
1679 struct dc_cursor_position position
;
1680 struct dm_crtc_state
*acrtc_state
= to_dm_crtc_state(crtc
->state
);
1684 struct amdgpu_crtc
*amdgpu_crtc
= to_amdgpu_crtc(crtc
);
1688 "%s: crtc_id=%d with size %d to %d \n",
1690 amdgpu_crtc
->crtc_id
,
1695 /* turn off cursor */
1696 position
.enable
= false;
1700 if (acrtc_state
->stream
) {
1701 /*set cursor visible false*/
1702 dc_stream_set_cursor_position(
1703 acrtc_state
->stream
,
1710 if ((width
> amdgpu_crtc
->max_cursor_width
) ||
1711 (height
> amdgpu_crtc
->max_cursor_height
)) {
1713 "%s: bad cursor width or height %d x %d\n",
1720 /*program new cursor bo to hardware*/
1721 dm_set_cursor(amdgpu_crtc
, address
, width
, height
);
1728 static int dm_crtc_cursor_move(struct drm_crtc
*crtc
,
1731 struct amdgpu_crtc
*amdgpu_crtc
= to_amdgpu_crtc(crtc
);
1732 int xorigin
= 0, yorigin
= 0;
1733 struct dc_cursor_position position
;
1734 struct dm_crtc_state
*acrtc_state
= to_dm_crtc_state(crtc
->state
);
1736 amdgpu_crtc
->cursor_x
= x
;
1737 amdgpu_crtc
->cursor_y
= y
;
1739 /* avivo cursor are offset into the total surface */
1740 x
+= crtc
->primary
->state
->src_x
>> 16;
1741 y
+= crtc
->primary
->state
->src_y
>> 16;
1744 * TODO: for cursor debugging unguard the following
1748 "%s: x %d y %d c->x %d c->y %d\n",
1757 xorigin
= min(-x
, amdgpu_crtc
->max_cursor_width
- 1);
1761 yorigin
= min(-y
, amdgpu_crtc
->max_cursor_height
- 1);
1765 position
.enable
= true;
1769 position
.x_hotspot
= xorigin
;
1770 position
.y_hotspot
= yorigin
;
1772 if (acrtc_state
->stream
) {
1773 if (!dc_stream_set_cursor_position(
1774 acrtc_state
->stream
,
1776 DRM_ERROR("DC failed to set cursor position\n");
1784 static bool fill_rects_from_plane_state(
1785 const struct drm_plane_state
*state
,
1786 struct dc_surface
*surface
)
1788 surface
->src_rect
.x
= state
->src_x
>> 16;
1789 surface
->src_rect
.y
= state
->src_y
>> 16;
1790 /*we ignore for now mantissa and do not to deal with floating pixels :(*/
1791 surface
->src_rect
.width
= state
->src_w
>> 16;
1793 if (surface
->src_rect
.width
== 0)
1796 surface
->src_rect
.height
= state
->src_h
>> 16;
1797 if (surface
->src_rect
.height
== 0)
1800 surface
->dst_rect
.x
= state
->crtc_x
;
1801 surface
->dst_rect
.y
= state
->crtc_y
;
1803 if (state
->crtc_w
== 0)
1806 surface
->dst_rect
.width
= state
->crtc_w
;
1808 if (state
->crtc_h
== 0)
1811 surface
->dst_rect
.height
= state
->crtc_h
;
1813 surface
->clip_rect
= surface
->dst_rect
;
1815 switch (state
->rotation
& DRM_MODE_ROTATE_MASK
) {
1816 case DRM_MODE_ROTATE_0
:
1817 surface
->rotation
= ROTATION_ANGLE_0
;
1819 case DRM_MODE_ROTATE_90
:
1820 surface
->rotation
= ROTATION_ANGLE_90
;
1822 case DRM_MODE_ROTATE_180
:
1823 surface
->rotation
= ROTATION_ANGLE_180
;
1825 case DRM_MODE_ROTATE_270
:
1826 surface
->rotation
= ROTATION_ANGLE_270
;
1829 surface
->rotation
= ROTATION_ANGLE_0
;
1835 static int get_fb_info(
1836 const struct amdgpu_framebuffer
*amdgpu_fb
,
1837 uint64_t *tiling_flags
,
1838 uint64_t *fb_location
)
1840 struct amdgpu_bo
*rbo
= gem_to_amdgpu_bo(amdgpu_fb
->obj
);
1841 int r
= amdgpu_bo_reserve(rbo
, false);
1843 DRM_ERROR("Unable to reserve buffer\n");
1848 *fb_location
= amdgpu_bo_gpu_offset(rbo
);
1851 amdgpu_bo_get_tiling_flags(rbo
, tiling_flags
);
1853 amdgpu_bo_unreserve(rbo
);
1858 static int fill_plane_attributes_from_fb(
1859 struct amdgpu_device
*adev
,
1860 struct dc_surface
*surface
,
1861 const struct amdgpu_framebuffer
*amdgpu_fb
, bool addReq
)
1863 uint64_t tiling_flags
;
1864 uint64_t fb_location
= 0;
1865 unsigned int awidth
;
1866 const struct drm_framebuffer
*fb
= &amdgpu_fb
->base
;
1868 struct drm_format_name_buf format_name
;
1873 addReq
== true ? &fb_location
:NULL
);
1878 switch (fb
->format
->format
) {
1880 surface
->format
= SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS
;
1882 case DRM_FORMAT_RGB565
:
1883 surface
->format
= SURFACE_PIXEL_FORMAT_GRPH_RGB565
;
1885 case DRM_FORMAT_XRGB8888
:
1886 case DRM_FORMAT_ARGB8888
:
1887 surface
->format
= SURFACE_PIXEL_FORMAT_GRPH_ARGB8888
;
1889 case DRM_FORMAT_XRGB2101010
:
1890 case DRM_FORMAT_ARGB2101010
:
1891 surface
->format
= SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010
;
1893 case DRM_FORMAT_XBGR2101010
:
1894 case DRM_FORMAT_ABGR2101010
:
1895 surface
->format
= SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010
;
1897 case DRM_FORMAT_NV21
:
1898 surface
->format
= SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr
;
1900 case DRM_FORMAT_NV12
:
1901 surface
->format
= SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb
;
1904 DRM_ERROR("Unsupported screen format %s\n",
1905 drm_get_format_name(fb
->format
->format
, &format_name
));
1909 if (surface
->format
< SURFACE_PIXEL_FORMAT_VIDEO_BEGIN
) {
1910 surface
->address
.type
= PLN_ADDR_TYPE_GRAPHICS
;
1911 surface
->address
.grph
.addr
.low_part
= lower_32_bits(fb_location
);
1912 surface
->address
.grph
.addr
.high_part
= upper_32_bits(fb_location
);
1913 surface
->plane_size
.grph
.surface_size
.x
= 0;
1914 surface
->plane_size
.grph
.surface_size
.y
= 0;
1915 surface
->plane_size
.grph
.surface_size
.width
= fb
->width
;
1916 surface
->plane_size
.grph
.surface_size
.height
= fb
->height
;
1917 surface
->plane_size
.grph
.surface_pitch
=
1918 fb
->pitches
[0] / fb
->format
->cpp
[0];
1919 /* TODO: unhardcode */
1920 surface
->color_space
= COLOR_SPACE_SRGB
;
1923 awidth
= ALIGN(fb
->width
, 64);
1924 surface
->address
.type
= PLN_ADDR_TYPE_VIDEO_PROGRESSIVE
;
1925 surface
->address
.video_progressive
.luma_addr
.low_part
1926 = lower_32_bits(fb_location
);
1927 surface
->address
.video_progressive
.chroma_addr
.low_part
1928 = lower_32_bits(fb_location
) +
1929 (awidth
* fb
->height
);
1930 surface
->plane_size
.video
.luma_size
.x
= 0;
1931 surface
->plane_size
.video
.luma_size
.y
= 0;
1932 surface
->plane_size
.video
.luma_size
.width
= awidth
;
1933 surface
->plane_size
.video
.luma_size
.height
= fb
->height
;
1934 /* TODO: unhardcode */
1935 surface
->plane_size
.video
.luma_pitch
= awidth
;
1937 surface
->plane_size
.video
.chroma_size
.x
= 0;
1938 surface
->plane_size
.video
.chroma_size
.y
= 0;
1939 surface
->plane_size
.video
.chroma_size
.width
= awidth
;
1940 surface
->plane_size
.video
.chroma_size
.height
= fb
->height
;
1941 surface
->plane_size
.video
.chroma_pitch
= awidth
/ 2;
1943 /* TODO: unhardcode */
1944 surface
->color_space
= COLOR_SPACE_YCBCR709
;
1947 memset(&surface
->tiling_info
, 0, sizeof(surface
->tiling_info
));
1949 /* Fill GFX params */
1950 if (AMDGPU_TILING_GET(tiling_flags
, ARRAY_MODE
) == DC_ARRAY_2D_TILED_THIN1
)
1952 unsigned bankw
, bankh
, mtaspect
, tile_split
, num_banks
;
1954 bankw
= AMDGPU_TILING_GET(tiling_flags
, BANK_WIDTH
);
1955 bankh
= AMDGPU_TILING_GET(tiling_flags
, BANK_HEIGHT
);
1956 mtaspect
= AMDGPU_TILING_GET(tiling_flags
, MACRO_TILE_ASPECT
);
1957 tile_split
= AMDGPU_TILING_GET(tiling_flags
, TILE_SPLIT
);
1958 num_banks
= AMDGPU_TILING_GET(tiling_flags
, NUM_BANKS
);
1960 /* XXX fix me for VI */
1961 surface
->tiling_info
.gfx8
.num_banks
= num_banks
;
1962 surface
->tiling_info
.gfx8
.array_mode
=
1963 DC_ARRAY_2D_TILED_THIN1
;
1964 surface
->tiling_info
.gfx8
.tile_split
= tile_split
;
1965 surface
->tiling_info
.gfx8
.bank_width
= bankw
;
1966 surface
->tiling_info
.gfx8
.bank_height
= bankh
;
1967 surface
->tiling_info
.gfx8
.tile_aspect
= mtaspect
;
1968 surface
->tiling_info
.gfx8
.tile_mode
=
1969 DC_ADDR_SURF_MICRO_TILING_DISPLAY
;
1970 } else if (AMDGPU_TILING_GET(tiling_flags
, ARRAY_MODE
)
1971 == DC_ARRAY_1D_TILED_THIN1
) {
1972 surface
->tiling_info
.gfx8
.array_mode
= DC_ARRAY_1D_TILED_THIN1
;
1975 surface
->tiling_info
.gfx8
.pipe_config
=
1976 AMDGPU_TILING_GET(tiling_flags
, PIPE_CONFIG
);
1978 if (adev
->asic_type
== CHIP_VEGA10
||
1979 adev
->asic_type
== CHIP_RAVEN
) {
1980 /* Fill GFX9 params */
1981 surface
->tiling_info
.gfx9
.num_pipes
=
1982 adev
->gfx
.config
.gb_addr_config_fields
.num_pipes
;
1983 surface
->tiling_info
.gfx9
.num_banks
=
1984 adev
->gfx
.config
.gb_addr_config_fields
.num_banks
;
1985 surface
->tiling_info
.gfx9
.pipe_interleave
=
1986 adev
->gfx
.config
.gb_addr_config_fields
.pipe_interleave_size
;
1987 surface
->tiling_info
.gfx9
.num_shader_engines
=
1988 adev
->gfx
.config
.gb_addr_config_fields
.num_se
;
1989 surface
->tiling_info
.gfx9
.max_compressed_frags
=
1990 adev
->gfx
.config
.gb_addr_config_fields
.max_compress_frags
;
1991 surface
->tiling_info
.gfx9
.num_rb_per_se
=
1992 adev
->gfx
.config
.gb_addr_config_fields
.num_rb_per_se
;
1993 surface
->tiling_info
.gfx9
.swizzle
=
1994 AMDGPU_TILING_GET(tiling_flags
, SWIZZLE_MODE
);
1995 surface
->tiling_info
.gfx9
.shaderEnable
= 1;
1998 surface
->visible
= true;
1999 surface
->scaling_quality
.h_taps_c
= 0;
2000 surface
->scaling_quality
.v_taps_c
= 0;
2002 /* is this needed? is surface zeroed at allocation? */
2003 surface
->scaling_quality
.h_taps
= 0;
2004 surface
->scaling_quality
.v_taps
= 0;
2005 surface
->stereo_format
= PLANE_STEREO_FORMAT_NONE
;
2011 #define NUM_OF_RAW_GAMMA_RAMP_RGB_256 256
2013 static void fill_gamma_from_crtc_state(
2014 const struct drm_crtc_state
*crtc_state
,
2015 struct dc_surface
*dc_surface
)
2018 struct dc_gamma
*gamma
;
2019 struct drm_color_lut
*lut
= (struct drm_color_lut
*) crtc_state
->gamma_lut
->data
;
2021 gamma
= dc_create_gamma();
2023 if (gamma
== NULL
) {
2028 for (i
= 0; i
< NUM_OF_RAW_GAMMA_RAMP_RGB_256
; i
++) {
2029 gamma
->red
[i
] = lut
[i
].red
;
2030 gamma
->green
[i
] = lut
[i
].green
;
2031 gamma
->blue
[i
] = lut
[i
].blue
;
2034 dc_surface
->gamma_correction
= gamma
;
2037 static int fill_plane_attributes(
2038 struct amdgpu_device
*adev
,
2039 struct dc_surface
*surface
,
2040 struct drm_plane_state
*plane_state
,
2041 struct drm_crtc_state
*crtc_state
,
2044 const struct amdgpu_framebuffer
*amdgpu_fb
=
2045 to_amdgpu_framebuffer(plane_state
->fb
);
2046 const struct drm_crtc
*crtc
= plane_state
->crtc
;
2047 struct dc_transfer_func
*input_tf
;
2050 if (!fill_rects_from_plane_state(plane_state
, surface
))
2053 ret
= fill_plane_attributes_from_fb(
2054 crtc
->dev
->dev_private
,
2062 input_tf
= dc_create_transfer_func();
2064 if (input_tf
== NULL
)
2067 input_tf
->type
= TF_TYPE_PREDEFINED
;
2068 input_tf
->tf
= TRANSFER_FUNCTION_SRGB
;
2070 surface
->in_transfer_func
= input_tf
;
2072 /* In case of gamma set, update gamma value */
2073 if (crtc_state
->gamma_lut
)
2074 fill_gamma_from_crtc_state(crtc_state
, surface
);
2079 /*****************************************************************************/
2081 struct amdgpu_connector
*aconnector_from_drm_crtc_id(
2082 const struct drm_crtc
*crtc
)
2084 struct drm_device
*dev
= crtc
->dev
;
2085 struct drm_connector
*connector
;
2086 struct amdgpu_crtc
*acrtc
= to_amdgpu_crtc(crtc
);
2087 struct amdgpu_connector
*aconnector
;
2089 list_for_each_entry(connector
,
2090 &dev
->mode_config
.connector_list
, head
) {
2092 aconnector
= to_amdgpu_connector(connector
);
2094 if (aconnector
->base
.state
->crtc
!= &acrtc
->base
)
2097 /* Found the connector */
2101 /* If we get here, not found. */
2105 static void update_stream_scaling_settings(
2106 const struct drm_display_mode
*mode
,
2107 const struct dm_connector_state
*dm_state
,
2108 struct dc_stream
*stream
)
2110 enum amdgpu_rmx_type rmx_type
;
2112 struct rect src
= { 0 }; /* viewport in composition space*/
2113 struct rect dst
= { 0 }; /* stream addressable area */
2115 /* no mode. nothing to be done */
2119 /* Full screen scaling by default */
2120 src
.width
= mode
->hdisplay
;
2121 src
.height
= mode
->vdisplay
;
2122 dst
.width
= stream
->timing
.h_addressable
;
2123 dst
.height
= stream
->timing
.v_addressable
;
2125 rmx_type
= dm_state
->scaling
;
2126 if (rmx_type
== RMX_ASPECT
|| rmx_type
== RMX_OFF
) {
2127 if (src
.width
* dst
.height
<
2128 src
.height
* dst
.width
) {
2129 /* height needs less upscaling/more downscaling */
2130 dst
.width
= src
.width
*
2131 dst
.height
/ src
.height
;
2133 /* width needs less upscaling/more downscaling */
2134 dst
.height
= src
.height
*
2135 dst
.width
/ src
.width
;
2137 } else if (rmx_type
== RMX_CENTER
) {
2141 dst
.x
= (stream
->timing
.h_addressable
- dst
.width
) / 2;
2142 dst
.y
= (stream
->timing
.v_addressable
- dst
.height
) / 2;
2144 if (dm_state
->underscan_enable
) {
2145 dst
.x
+= dm_state
->underscan_hborder
/ 2;
2146 dst
.y
+= dm_state
->underscan_vborder
/ 2;
2147 dst
.width
-= dm_state
->underscan_hborder
;
2148 dst
.height
-= dm_state
->underscan_vborder
;
2154 DRM_DEBUG_KMS("Destination Rectangle x:%d y:%d width:%d height:%d\n",
2155 dst
.x
, dst
.y
, dst
.width
, dst
.height
);
2159 static enum dc_color_depth
convert_color_depth_from_display_info(
2160 const struct drm_connector
*connector
)
2162 uint32_t bpc
= connector
->display_info
.bpc
;
2164 /* Limited color depth to 8bit
2165 * TODO: Still need to handle deep color*/
2171 /* Temporary Work around, DRM don't parse color depth for
2172 * EDID revision before 1.4
2173 * TODO: Fix edid parsing
2175 return COLOR_DEPTH_888
;
2177 return COLOR_DEPTH_666
;
2179 return COLOR_DEPTH_888
;
2181 return COLOR_DEPTH_101010
;
2183 return COLOR_DEPTH_121212
;
2185 return COLOR_DEPTH_141414
;
2187 return COLOR_DEPTH_161616
;
2189 return COLOR_DEPTH_UNDEFINED
;
2193 static enum dc_aspect_ratio
get_aspect_ratio(
2194 const struct drm_display_mode
*mode_in
)
2196 int32_t width
= mode_in
->crtc_hdisplay
* 9;
2197 int32_t height
= mode_in
->crtc_vdisplay
* 16;
2198 if ((width
- height
) < 10 && (width
- height
) > -10)
2199 return ASPECT_RATIO_16_9
;
2201 return ASPECT_RATIO_4_3
;
2204 static enum dc_color_space
get_output_color_space(
2205 const struct dc_crtc_timing
*dc_crtc_timing
)
2207 enum dc_color_space color_space
= COLOR_SPACE_SRGB
;
2209 switch (dc_crtc_timing
->pixel_encoding
) {
2210 case PIXEL_ENCODING_YCBCR422
:
2211 case PIXEL_ENCODING_YCBCR444
:
2212 case PIXEL_ENCODING_YCBCR420
:
2215 * 27030khz is the separation point between HDTV and SDTV
2216 * according to HDMI spec, we use YCbCr709 and YCbCr601
2219 if (dc_crtc_timing
->pix_clk_khz
> 27030) {
2220 if (dc_crtc_timing
->flags
.Y_ONLY
)
2222 COLOR_SPACE_YCBCR709_LIMITED
;
2224 color_space
= COLOR_SPACE_YCBCR709
;
2226 if (dc_crtc_timing
->flags
.Y_ONLY
)
2228 COLOR_SPACE_YCBCR601_LIMITED
;
2230 color_space
= COLOR_SPACE_YCBCR601
;
2235 case PIXEL_ENCODING_RGB
:
2236 color_space
= COLOR_SPACE_SRGB
;
2247 /*****************************************************************************/
2249 static void fill_stream_properties_from_drm_display_mode(
2250 struct dc_stream
*stream
,
2251 const struct drm_display_mode
*mode_in
,
2252 const struct drm_connector
*connector
)
2254 struct dc_crtc_timing
*timing_out
= &stream
->timing
;
2255 memset(timing_out
, 0, sizeof(struct dc_crtc_timing
));
2257 timing_out
->h_border_left
= 0;
2258 timing_out
->h_border_right
= 0;
2259 timing_out
->v_border_top
= 0;
2260 timing_out
->v_border_bottom
= 0;
2261 /* TODO: un-hardcode */
2263 if ((connector
->display_info
.color_formats
& DRM_COLOR_FORMAT_YCRCB444
)
2264 && stream
->sink
->sink_signal
== SIGNAL_TYPE_HDMI_TYPE_A
)
2265 timing_out
->pixel_encoding
= PIXEL_ENCODING_YCBCR444
;
2267 timing_out
->pixel_encoding
= PIXEL_ENCODING_RGB
;
2269 timing_out
->timing_3d_format
= TIMING_3D_FORMAT_NONE
;
2270 timing_out
->display_color_depth
= convert_color_depth_from_display_info(
2272 timing_out
->scan_type
= SCANNING_TYPE_NODATA
;
2273 timing_out
->hdmi_vic
= 0;
2274 timing_out
->vic
= drm_match_cea_mode(mode_in
);
2276 timing_out
->h_addressable
= mode_in
->crtc_hdisplay
;
2277 timing_out
->h_total
= mode_in
->crtc_htotal
;
2278 timing_out
->h_sync_width
=
2279 mode_in
->crtc_hsync_end
- mode_in
->crtc_hsync_start
;
2280 timing_out
->h_front_porch
=
2281 mode_in
->crtc_hsync_start
- mode_in
->crtc_hdisplay
;
2282 timing_out
->v_total
= mode_in
->crtc_vtotal
;
2283 timing_out
->v_addressable
= mode_in
->crtc_vdisplay
;
2284 timing_out
->v_front_porch
=
2285 mode_in
->crtc_vsync_start
- mode_in
->crtc_vdisplay
;
2286 timing_out
->v_sync_width
=
2287 mode_in
->crtc_vsync_end
- mode_in
->crtc_vsync_start
;
2288 timing_out
->pix_clk_khz
= mode_in
->crtc_clock
;
2289 timing_out
->aspect_ratio
= get_aspect_ratio(mode_in
);
2290 if (mode_in
->flags
& DRM_MODE_FLAG_PHSYNC
)
2291 timing_out
->flags
.HSYNC_POSITIVE_POLARITY
= 1;
2292 if (mode_in
->flags
& DRM_MODE_FLAG_PVSYNC
)
2293 timing_out
->flags
.VSYNC_POSITIVE_POLARITY
= 1;
2295 stream
->output_color_space
= get_output_color_space(timing_out
);
2298 struct dc_transfer_func
*tf
= dc_create_transfer_func();
2299 tf
->type
= TF_TYPE_PREDEFINED
;
2300 tf
->tf
= TRANSFER_FUNCTION_SRGB
;
2301 stream
->out_transfer_func
= tf
;
2305 static void fill_audio_info(
2306 struct audio_info
*audio_info
,
2307 const struct drm_connector
*drm_connector
,
2308 const struct dc_sink
*dc_sink
)
2311 int cea_revision
= 0;
2312 const struct dc_edid_caps
*edid_caps
= &dc_sink
->edid_caps
;
2314 audio_info
->manufacture_id
= edid_caps
->manufacturer_id
;
2315 audio_info
->product_id
= edid_caps
->product_id
;
2317 cea_revision
= drm_connector
->display_info
.cea_rev
;
2319 while (i
< AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS
&&
2320 edid_caps
->display_name
[i
]) {
2321 audio_info
->display_name
[i
] = edid_caps
->display_name
[i
];
2325 if(cea_revision
>= 3) {
2326 audio_info
->mode_count
= edid_caps
->audio_mode_count
;
2328 for (i
= 0; i
< audio_info
->mode_count
; ++i
) {
2329 audio_info
->modes
[i
].format_code
=
2330 (enum audio_format_code
)
2331 (edid_caps
->audio_modes
[i
].format_code
);
2332 audio_info
->modes
[i
].channel_count
=
2333 edid_caps
->audio_modes
[i
].channel_count
;
2334 audio_info
->modes
[i
].sample_rates
.all
=
2335 edid_caps
->audio_modes
[i
].sample_rate
;
2336 audio_info
->modes
[i
].sample_size
=
2337 edid_caps
->audio_modes
[i
].sample_size
;
2341 audio_info
->flags
.all
= edid_caps
->speaker_flags
;
2343 /* TODO: We only check for the progressive mode, check for interlace mode too */
2344 if(drm_connector
->latency_present
[0]) {
2345 audio_info
->video_latency
= drm_connector
->video_latency
[0];
2346 audio_info
->audio_latency
= drm_connector
->audio_latency
[0];
2349 /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
2353 static void copy_crtc_timing_for_drm_display_mode(
2354 const struct drm_display_mode
*src_mode
,
2355 struct drm_display_mode
*dst_mode
)
2357 dst_mode
->crtc_hdisplay
= src_mode
->crtc_hdisplay
;
2358 dst_mode
->crtc_vdisplay
= src_mode
->crtc_vdisplay
;
2359 dst_mode
->crtc_clock
= src_mode
->crtc_clock
;
2360 dst_mode
->crtc_hblank_start
= src_mode
->crtc_hblank_start
;
2361 dst_mode
->crtc_hblank_end
= src_mode
->crtc_hblank_end
;
2362 dst_mode
->crtc_hsync_start
= src_mode
->crtc_hsync_start
;
2363 dst_mode
->crtc_hsync_end
= src_mode
->crtc_hsync_end
;
2364 dst_mode
->crtc_htotal
= src_mode
->crtc_htotal
;
2365 dst_mode
->crtc_hskew
= src_mode
->crtc_hskew
;
2366 dst_mode
->crtc_vblank_start
= src_mode
->crtc_vblank_start
;
2367 dst_mode
->crtc_vblank_end
= src_mode
->crtc_vblank_end
;
2368 dst_mode
->crtc_vsync_start
= src_mode
->crtc_vsync_start
;
2369 dst_mode
->crtc_vsync_end
= src_mode
->crtc_vsync_end
;
2370 dst_mode
->crtc_vtotal
= src_mode
->crtc_vtotal
;
2373 static void decide_crtc_timing_for_drm_display_mode(
2374 struct drm_display_mode
*drm_mode
,
2375 const struct drm_display_mode
*native_mode
,
2378 if (scale_enabled
) {
2379 copy_crtc_timing_for_drm_display_mode(native_mode
, drm_mode
);
2380 } else if (native_mode
->clock
== drm_mode
->clock
&&
2381 native_mode
->htotal
== drm_mode
->htotal
&&
2382 native_mode
->vtotal
== drm_mode
->vtotal
) {
2383 copy_crtc_timing_for_drm_display_mode(native_mode
, drm_mode
);
2385 /* no scaling nor amdgpu inserted, no need to patch */
2389 static struct dc_stream
*create_stream_for_sink(
2390 struct amdgpu_connector
*aconnector
,
2391 const struct drm_display_mode
*drm_mode
,
2392 const struct dm_connector_state
*dm_state
)
2394 struct drm_display_mode
*preferred_mode
= NULL
;
2395 const struct drm_connector
*drm_connector
;
2396 struct dc_stream
*stream
= NULL
;
2397 struct drm_display_mode mode
= *drm_mode
;
2398 bool native_mode_found
= false;
2400 if (NULL
== aconnector
) {
2401 DRM_ERROR("aconnector is NULL!\n");
2402 goto drm_connector_null
;
2405 if (NULL
== dm_state
) {
2406 DRM_ERROR("dm_state is NULL!\n");
2410 drm_connector
= &aconnector
->base
;
2411 stream
= dc_create_stream_for_sink(aconnector
->dc_sink
);
2413 if (NULL
== stream
) {
2414 DRM_ERROR("Failed to create stream for sink!\n");
2415 goto stream_create_fail
;
2418 list_for_each_entry(preferred_mode
, &aconnector
->base
.modes
, head
) {
2419 /* Search for preferred mode */
2420 if (preferred_mode
->type
& DRM_MODE_TYPE_PREFERRED
) {
2421 native_mode_found
= true;
2425 if (!native_mode_found
)
2426 preferred_mode
= list_first_entry_or_null(
2427 &aconnector
->base
.modes
,
2428 struct drm_display_mode
,
2431 if (NULL
== preferred_mode
) {
2432 /* This may not be an error, the use case is when we we have no
2433 * usermode calls to reset and set mode upon hotplug. In this
2434 * case, we call set mode ourselves to restore the previous mode
2435 * and the modelist may not be filled in in time.
2437 DRM_INFO("No preferred mode found\n");
2439 decide_crtc_timing_for_drm_display_mode(
2440 &mode
, preferred_mode
,
2441 dm_state
->scaling
!= RMX_OFF
);
2444 fill_stream_properties_from_drm_display_mode(stream
,
2445 &mode
, &aconnector
->base
);
2446 update_stream_scaling_settings(&mode
, dm_state
, stream
);
2449 &stream
->audio_info
,
2451 aconnector
->dc_sink
);
2459 void amdgpu_dm_crtc_destroy(struct drm_crtc
*crtc
)
2461 drm_crtc_cleanup(crtc
);
2465 static void dm_crtc_destroy_state(struct drm_crtc
*crtc
,
2466 struct drm_crtc_state
*state
)
2468 struct dm_crtc_state
*cur
= to_dm_crtc_state(state
);
2470 /* TODO Destroy dc_stream objects are stream object is flattened */
2472 dc_stream_release(cur
->stream
);
2475 __drm_atomic_helper_crtc_destroy_state(state
);
2481 static void dm_crtc_reset_state(struct drm_crtc
*crtc
)
2483 struct dm_crtc_state
*state
;
2486 dm_crtc_destroy_state(crtc
, crtc
->state
);
2488 state
= kzalloc(sizeof(*state
), GFP_KERNEL
);
2489 if (WARN_ON(!state
))
2492 crtc
->state
= &state
->base
;
2493 crtc
->state
->crtc
= crtc
;
2497 static struct drm_crtc_state
*
2498 dm_crtc_duplicate_state(struct drm_crtc
*crtc
)
2500 struct dm_crtc_state
*state
, *cur
;
2502 cur
= to_dm_crtc_state(crtc
->state
);
2504 if (WARN_ON(!crtc
->state
))
2507 state
= dm_alloc(sizeof(*state
));
2509 __drm_atomic_helper_crtc_duplicate_state(crtc
, &state
->base
);
2512 state
->stream
= cur
->stream
;
2513 dc_stream_retain(state
->stream
);
2516 /* TODO Duplicate dc_stream after objects are stream object is flattened */
2518 return &state
->base
;
2521 /* Implemented only the options currently availible for the driver */
2522 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs
= {
2523 .reset
= dm_crtc_reset_state
,
2524 .destroy
= amdgpu_dm_crtc_destroy
,
2525 .gamma_set
= drm_atomic_helper_legacy_gamma_set
,
2526 .set_config
= drm_atomic_helper_set_config
,
2527 .page_flip
= drm_atomic_helper_page_flip
,
2528 .atomic_duplicate_state
= dm_crtc_duplicate_state
,
2529 .atomic_destroy_state
= dm_crtc_destroy_state
,
2532 static enum drm_connector_status
2533 amdgpu_dm_connector_detect(struct drm_connector
*connector
, bool force
)
2536 struct amdgpu_connector
*aconnector
= to_amdgpu_connector(connector
);
2539 * 1. This interface is NOT called in context of HPD irq.
2540 * 2. This interface *is called* in context of user-mode ioctl. Which
2541 * makes it a bad place for *any* MST-related activit. */
2543 if (aconnector
->base
.force
== DRM_FORCE_UNSPECIFIED
)
2544 connected
= (aconnector
->dc_sink
!= NULL
);
2546 connected
= (aconnector
->base
.force
== DRM_FORCE_ON
);
2548 return (connected
? connector_status_connected
:
2549 connector_status_disconnected
);
2552 int amdgpu_dm_connector_atomic_set_property(
2553 struct drm_connector
*connector
,
2554 struct drm_connector_state
*connector_state
,
2555 struct drm_property
*property
,
2558 struct drm_device
*dev
= connector
->dev
;
2559 struct amdgpu_device
*adev
= dev
->dev_private
;
2560 struct dm_connector_state
*dm_old_state
=
2561 to_dm_connector_state(connector
->state
);
2562 struct dm_connector_state
*dm_new_state
=
2563 to_dm_connector_state(connector_state
);
2567 if (property
== dev
->mode_config
.scaling_mode_property
) {
2568 enum amdgpu_rmx_type rmx_type
;
2571 case DRM_MODE_SCALE_CENTER
:
2572 rmx_type
= RMX_CENTER
;
2574 case DRM_MODE_SCALE_ASPECT
:
2575 rmx_type
= RMX_ASPECT
;
2577 case DRM_MODE_SCALE_FULLSCREEN
:
2578 rmx_type
= RMX_FULL
;
2580 case DRM_MODE_SCALE_NONE
:
2586 if (dm_old_state
->scaling
== rmx_type
)
2589 dm_new_state
->scaling
= rmx_type
;
2591 } else if (property
== adev
->mode_info
.underscan_hborder_property
) {
2592 dm_new_state
->underscan_hborder
= val
;
2594 } else if (property
== adev
->mode_info
.underscan_vborder_property
) {
2595 dm_new_state
->underscan_vborder
= val
;
2597 } else if (property
== adev
->mode_info
.underscan_property
) {
2598 dm_new_state
->underscan_enable
= val
;
2605 int amdgpu_dm_connector_atomic_get_property(
2606 struct drm_connector
*connector
,
2607 const struct drm_connector_state
*state
,
2608 struct drm_property
*property
,
2611 struct drm_device
*dev
= connector
->dev
;
2612 struct amdgpu_device
*adev
= dev
->dev_private
;
2613 struct dm_connector_state
*dm_state
=
2614 to_dm_connector_state(state
);
2617 if (property
== dev
->mode_config
.scaling_mode_property
) {
2618 switch (dm_state
->scaling
) {
2620 *val
= DRM_MODE_SCALE_CENTER
;
2623 *val
= DRM_MODE_SCALE_ASPECT
;
2626 *val
= DRM_MODE_SCALE_FULLSCREEN
;
2630 *val
= DRM_MODE_SCALE_NONE
;
2634 } else if (property
== adev
->mode_info
.underscan_hborder_property
) {
2635 *val
= dm_state
->underscan_hborder
;
2637 } else if (property
== adev
->mode_info
.underscan_vborder_property
) {
2638 *val
= dm_state
->underscan_vborder
;
2640 } else if (property
== adev
->mode_info
.underscan_property
) {
2641 *val
= dm_state
->underscan_enable
;
2647 void amdgpu_dm_connector_destroy(struct drm_connector
*connector
)
2649 struct amdgpu_connector
*aconnector
= to_amdgpu_connector(connector
);
2650 const struct dc_link
*link
= aconnector
->dc_link
;
2651 struct amdgpu_device
*adev
= connector
->dev
->dev_private
;
2652 struct amdgpu_display_manager
*dm
= &adev
->dm
;
2653 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
2654 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
2656 if (link
->connector_signal
& (SIGNAL_TYPE_EDP
| SIGNAL_TYPE_LVDS
)) {
2657 amdgpu_dm_register_backlight_device(dm
);
2659 if (dm
->backlight_dev
) {
2660 backlight_device_unregister(dm
->backlight_dev
);
2661 dm
->backlight_dev
= NULL
;
2666 drm_connector_unregister(connector
);
2667 drm_connector_cleanup(connector
);
2671 void amdgpu_dm_connector_funcs_reset(struct drm_connector
*connector
)
2673 struct dm_connector_state
*state
=
2674 to_dm_connector_state(connector
->state
);
2678 state
= kzalloc(sizeof(*state
), GFP_KERNEL
);
2681 state
->scaling
= RMX_OFF
;
2682 state
->underscan_enable
= false;
2683 state
->underscan_hborder
= 0;
2684 state
->underscan_vborder
= 0;
2686 connector
->state
= &state
->base
;
2687 connector
->state
->connector
= connector
;
2691 struct drm_connector_state
*amdgpu_dm_connector_atomic_duplicate_state(
2692 struct drm_connector
*connector
)
2694 struct dm_connector_state
*state
=
2695 to_dm_connector_state(connector
->state
);
2697 struct dm_connector_state
*new_state
=
2698 kmemdup(state
, sizeof(*state
), GFP_KERNEL
);
2701 __drm_atomic_helper_connector_duplicate_state(connector
,
2703 return &new_state
->base
;
2709 static const struct drm_connector_funcs amdgpu_dm_connector_funcs
= {
2710 .reset
= amdgpu_dm_connector_funcs_reset
,
2711 .detect
= amdgpu_dm_connector_detect
,
2712 .fill_modes
= drm_helper_probe_single_connector_modes
,
2713 .destroy
= amdgpu_dm_connector_destroy
,
2714 .atomic_duplicate_state
= amdgpu_dm_connector_atomic_duplicate_state
,
2715 .atomic_destroy_state
= drm_atomic_helper_connector_destroy_state
,
2716 .atomic_set_property
= amdgpu_dm_connector_atomic_set_property
,
2717 .atomic_get_property
= amdgpu_dm_connector_atomic_get_property
2720 static struct drm_encoder
*best_encoder(struct drm_connector
*connector
)
2722 int enc_id
= connector
->encoder_ids
[0];
2723 struct drm_mode_object
*obj
;
2724 struct drm_encoder
*encoder
;
2726 DRM_DEBUG_KMS("Finding the best encoder\n");
2728 /* pick the encoder ids */
2730 obj
= drm_mode_object_find(connector
->dev
, enc_id
, DRM_MODE_OBJECT_ENCODER
);
2732 DRM_ERROR("Couldn't find a matching encoder for our connector\n");
2735 encoder
= obj_to_encoder(obj
);
2738 DRM_ERROR("No encoder id\n");
2742 static int get_modes(struct drm_connector
*connector
)
2744 return amdgpu_dm_connector_get_modes(connector
);
2747 static void create_eml_sink(struct amdgpu_connector
*aconnector
)
2749 struct dc_sink_init_data init_params
= {
2750 .link
= aconnector
->dc_link
,
2751 .sink_signal
= SIGNAL_TYPE_VIRTUAL
2753 struct edid
*edid
= (struct edid
*) aconnector
->base
.edid_blob_ptr
->data
;
2755 if (!aconnector
->base
.edid_blob_ptr
||
2756 !aconnector
->base
.edid_blob_ptr
->data
) {
2757 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
2758 aconnector
->base
.name
);
2760 aconnector
->base
.force
= DRM_FORCE_OFF
;
2761 aconnector
->base
.override_edid
= false;
2765 aconnector
->edid
= edid
;
2767 aconnector
->dc_em_sink
= dc_link_add_remote_sink(
2768 aconnector
->dc_link
,
2770 (edid
->extensions
+ 1) * EDID_LENGTH
,
2773 if (aconnector
->base
.force
2775 aconnector
->dc_sink
= aconnector
->dc_link
->local_sink
?
2776 aconnector
->dc_link
->local_sink
:
2777 aconnector
->dc_em_sink
;
2780 static void handle_edid_mgmt(struct amdgpu_connector
*aconnector
)
2782 struct dc_link
*link
= (struct dc_link
*)aconnector
->dc_link
;
2784 /* In case of headless boot with force on for DP managed connector
2785 * Those settings have to be != 0 to get initial modeset
2787 if (link
->connector_signal
== SIGNAL_TYPE_DISPLAY_PORT
) {
2788 link
->verified_link_cap
.lane_count
= LANE_COUNT_FOUR
;
2789 link
->verified_link_cap
.link_rate
= LINK_RATE_HIGH2
;
2793 aconnector
->base
.override_edid
= true;
2794 create_eml_sink(aconnector
);
2797 int amdgpu_dm_connector_mode_valid(
2798 struct drm_connector
*connector
,
2799 struct drm_display_mode
*mode
)
2801 int result
= MODE_ERROR
;
2802 struct dc_sink
*dc_sink
;
2803 struct amdgpu_device
*adev
= connector
->dev
->dev_private
;
2804 /* TODO: Unhardcode stream count */
2805 struct dc_stream
*stream
;
2806 struct amdgpu_connector
*aconnector
= to_amdgpu_connector(connector
);
2808 if ((mode
->flags
& DRM_MODE_FLAG_INTERLACE
) ||
2809 (mode
->flags
& DRM_MODE_FLAG_DBLSCAN
))
2812 /* Only run this the first time mode_valid is called to initilialize
2815 if (aconnector
->base
.force
!= DRM_FORCE_UNSPECIFIED
&&
2816 !aconnector
->dc_em_sink
)
2817 handle_edid_mgmt(aconnector
);
2819 dc_sink
= to_amdgpu_connector(connector
)->dc_sink
;
2821 if (NULL
== dc_sink
) {
2822 DRM_ERROR("dc_sink is NULL!\n");
2826 stream
= dc_create_stream_for_sink(dc_sink
);
2827 if (NULL
== stream
) {
2828 DRM_ERROR("Failed to create stream for sink!\n");
2832 drm_mode_set_crtcinfo(mode
, 0);
2833 fill_stream_properties_from_drm_display_mode(stream
, mode
, connector
);
2835 stream
->src
.width
= mode
->hdisplay
;
2836 stream
->src
.height
= mode
->vdisplay
;
2837 stream
->dst
= stream
->src
;
2839 if (dc_validate_stream(adev
->dm
.dc
, stream
))
2842 dc_stream_release(stream
);
2845 /* TODO: error handling*/
2849 static const struct drm_connector_helper_funcs
2850 amdgpu_dm_connector_helper_funcs
= {
2852 * If hotplug a second bigger display in FB Con mode, bigger resolution
2853 * modes will be filtered by drm_mode_validate_size(), and those modes
2854 * is missing after user start lightdm. So we need to renew modes list.
2855 * in get_modes call back, not just return the modes count
2857 .get_modes
= get_modes
,
2858 .mode_valid
= amdgpu_dm_connector_mode_valid
,
2859 .best_encoder
= best_encoder
2862 static void dm_crtc_helper_disable(struct drm_crtc
*crtc
)
2866 static int dm_crtc_helper_atomic_check(
2867 struct drm_crtc
*crtc
,
2868 struct drm_crtc_state
*state
)
2870 struct amdgpu_device
*adev
= crtc
->dev
->dev_private
;
2871 struct dc
*dc
= adev
->dm
.dc
;
2872 struct dm_crtc_state
*dm_crtc_state
= to_dm_crtc_state(state
);
2875 if (unlikely(!dm_crtc_state
->stream
&& modeset_required(state
))) {
2880 /* In some use cases, like reset, no stream is attached */
2881 if (!dm_crtc_state
->stream
)
2884 if (dc_validate_stream(dc
, dm_crtc_state
->stream
))
2890 static bool dm_crtc_helper_mode_fixup(
2891 struct drm_crtc
*crtc
,
2892 const struct drm_display_mode
*mode
,
2893 struct drm_display_mode
*adjusted_mode
)
2898 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs
= {
2899 .disable
= dm_crtc_helper_disable
,
2900 .atomic_check
= dm_crtc_helper_atomic_check
,
2901 .mode_fixup
= dm_crtc_helper_mode_fixup
2904 static void dm_encoder_helper_disable(struct drm_encoder
*encoder
)
2909 static int dm_encoder_helper_atomic_check(
2910 struct drm_encoder
*encoder
,
2911 struct drm_crtc_state
*crtc_state
,
2912 struct drm_connector_state
*conn_state
)
2917 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs
= {
2918 .disable
= dm_encoder_helper_disable
,
2919 .atomic_check
= dm_encoder_helper_atomic_check
2922 static void dm_drm_plane_reset(struct drm_plane
*plane
)
2924 struct dm_plane_state
*amdgpu_state
= NULL
;
2927 plane
->funcs
->atomic_destroy_state(plane
, plane
->state
);
2929 amdgpu_state
= kzalloc(sizeof(*amdgpu_state
), GFP_KERNEL
);
2932 plane
->state
= &amdgpu_state
->base
;
2933 plane
->state
->plane
= plane
;
2934 plane
->state
->rotation
= DRM_MODE_ROTATE_0
;
2939 static struct drm_plane_state
*
2940 dm_drm_plane_duplicate_state(struct drm_plane
*plane
)
2942 struct dm_plane_state
*dm_plane_state
, *old_dm_plane_state
;
2944 old_dm_plane_state
= to_dm_plane_state(plane
->state
);
2945 dm_plane_state
= kzalloc(sizeof(*dm_plane_state
), GFP_KERNEL
);
2946 if (!dm_plane_state
)
2949 __drm_atomic_helper_plane_duplicate_state(plane
, &dm_plane_state
->base
);
2951 if (old_dm_plane_state
->surface
) {
2952 dm_plane_state
->surface
= old_dm_plane_state
->surface
;
2953 dc_surface_retain(dm_plane_state
->surface
);
2956 return &dm_plane_state
->base
;
2959 void dm_drm_plane_destroy_state(struct drm_plane
*plane
,
2960 struct drm_plane_state
*state
)
2962 struct dm_plane_state
*dm_plane_state
= to_dm_plane_state(state
);
2964 if (dm_plane_state
->surface
)
2965 dc_surface_release(dm_plane_state
->surface
);
2967 __drm_atomic_helper_plane_destroy_state(state
);
2968 kfree(dm_plane_state
);
2971 static const struct drm_plane_funcs dm_plane_funcs
= {
2972 .update_plane
= drm_atomic_helper_update_plane
,
2973 .disable_plane
= drm_atomic_helper_disable_plane
,
2974 .destroy
= drm_plane_cleanup
,
2975 .reset
= dm_drm_plane_reset
,
2976 .atomic_duplicate_state
= dm_drm_plane_duplicate_state
,
2977 .atomic_destroy_state
= dm_drm_plane_destroy_state
,
2980 static int dm_plane_helper_prepare_fb(
2981 struct drm_plane
*plane
,
2982 struct drm_plane_state
*new_state
)
2984 struct amdgpu_framebuffer
*afb
;
2985 struct drm_gem_object
*obj
;
2986 struct amdgpu_bo
*rbo
;
2988 struct dm_plane_state
*dm_plane_state_new
, *dm_plane_state_old
;
2989 unsigned int awidth
;
2991 dm_plane_state_old
= to_dm_plane_state(plane
->state
);
2992 dm_plane_state_new
= to_dm_plane_state(new_state
);
2994 if (!new_state
->fb
) {
2995 DRM_DEBUG_KMS("No FB bound\n");
2999 afb
= to_amdgpu_framebuffer(new_state
->fb
);
3002 rbo
= gem_to_amdgpu_bo(obj
);
3003 r
= amdgpu_bo_reserve(rbo
, false);
3004 if (unlikely(r
!= 0))
3007 r
= amdgpu_bo_pin(rbo
, AMDGPU_GEM_DOMAIN_VRAM
, &afb
->address
);
3010 amdgpu_bo_unreserve(rbo
);
3012 if (unlikely(r
!= 0)) {
3013 DRM_ERROR("Failed to pin framebuffer\n");
3019 if (dm_plane_state_new
->surface
&&
3020 dm_plane_state_old
->surface
!= dm_plane_state_new
->surface
) {
3021 struct dc_surface
*surface
= dm_plane_state_new
->surface
;
3023 if (surface
->format
< SURFACE_PIXEL_FORMAT_VIDEO_BEGIN
) {
3024 surface
->address
.grph
.addr
.low_part
= lower_32_bits(afb
->address
);
3025 surface
->address
.grph
.addr
.high_part
= upper_32_bits(afb
->address
);
3027 awidth
= ALIGN(new_state
->fb
->width
, 64);
3028 surface
->address
.video_progressive
.luma_addr
.low_part
3029 = lower_32_bits(afb
->address
);
3030 surface
->address
.video_progressive
.chroma_addr
.low_part
3031 = lower_32_bits(afb
->address
) +
3032 (awidth
* new_state
->fb
->height
);
3036 /* It's a hack for s3 since in 4.9 kernel filter out cursor buffer
3037 * prepare and cleanup in drm_atomic_helper_prepare_planes
3038 * and drm_atomic_helper_cleanup_planes because fb doens't in s3.
3039 * IN 4.10 kernel this code should be removed and amdgpu_device_suspend
3040 * code touching fram buffers should be avoided for DC.
3042 if (plane
->type
== DRM_PLANE_TYPE_CURSOR
) {
3043 struct amdgpu_crtc
*acrtc
= to_amdgpu_crtc(new_state
->crtc
);
3045 acrtc
->cursor_bo
= obj
;
3050 static void dm_plane_helper_cleanup_fb(
3051 struct drm_plane
*plane
,
3052 struct drm_plane_state
*old_state
)
3054 struct amdgpu_bo
*rbo
;
3055 struct amdgpu_framebuffer
*afb
;
3061 afb
= to_amdgpu_framebuffer(old_state
->fb
);
3062 rbo
= gem_to_amdgpu_bo(afb
->obj
);
3063 r
= amdgpu_bo_reserve(rbo
, false);
3065 DRM_ERROR("failed to reserve rbo before unpin\n");
3068 amdgpu_bo_unpin(rbo
);
3069 amdgpu_bo_unreserve(rbo
);
3070 amdgpu_bo_unref(&rbo
);
3074 int dm_create_validation_set_for_connector(struct drm_connector
*connector
,
3075 struct drm_display_mode
*mode
, struct dc_validation_set
*val_set
)
3077 int result
= MODE_ERROR
;
3078 struct dc_sink
*dc_sink
=
3079 to_amdgpu_connector(connector
)->dc_sink
;
3080 /* TODO: Unhardcode stream count */
3081 struct dc_stream
*stream
;
3083 if ((mode
->flags
& DRM_MODE_FLAG_INTERLACE
) ||
3084 (mode
->flags
& DRM_MODE_FLAG_DBLSCAN
))
3087 if (NULL
== dc_sink
) {
3088 DRM_ERROR("dc_sink is NULL!\n");
3092 stream
= dc_create_stream_for_sink(dc_sink
);
3094 if (NULL
== stream
) {
3095 DRM_ERROR("Failed to create stream for sink!\n");
3099 drm_mode_set_crtcinfo(mode
, 0);
3101 fill_stream_properties_from_drm_display_mode(stream
, mode
, connector
);
3103 val_set
->stream
= stream
;
3105 stream
->src
.width
= mode
->hdisplay
;
3106 stream
->src
.height
= mode
->vdisplay
;
3107 stream
->dst
= stream
->src
;
3112 static const struct drm_plane_helper_funcs dm_plane_helper_funcs
= {
3113 .prepare_fb
= dm_plane_helper_prepare_fb
,
3114 .cleanup_fb
= dm_plane_helper_cleanup_fb
,
3118 * TODO: these are currently initialized to rgb formats only.
3119 * For future use cases we should either initialize them dynamically based on
3120 * plane capabilities, or initialize this array to all formats, so internal drm
3121 * check will succeed, and let DC to implement proper check
3123 static uint32_t rgb_formats
[] = {
3125 DRM_FORMAT_XRGB8888
,
3126 DRM_FORMAT_ARGB8888
,
3127 DRM_FORMAT_RGBA8888
,
3128 DRM_FORMAT_XRGB2101010
,
3129 DRM_FORMAT_XBGR2101010
,
3130 DRM_FORMAT_ARGB2101010
,
3131 DRM_FORMAT_ABGR2101010
,
3134 static uint32_t yuv_formats
[] = {
3139 static const u32 cursor_formats
[] = {
3143 int amdgpu_dm_plane_init(struct amdgpu_display_manager
*dm
,
3144 struct amdgpu_plane
*aplane
,
3145 unsigned long possible_crtcs
)
3149 switch (aplane
->base
.type
) {
3150 case DRM_PLANE_TYPE_PRIMARY
:
3151 aplane
->base
.format_default
= true;
3153 res
= drm_universal_plane_init(
3159 ARRAY_SIZE(rgb_formats
),
3160 NULL
, aplane
->base
.type
, NULL
);
3162 case DRM_PLANE_TYPE_OVERLAY
:
3163 res
= drm_universal_plane_init(
3169 ARRAY_SIZE(yuv_formats
),
3170 NULL
, aplane
->base
.type
, NULL
);
3172 case DRM_PLANE_TYPE_CURSOR
:
3173 res
= drm_universal_plane_init(
3179 ARRAY_SIZE(cursor_formats
),
3180 NULL
, aplane
->base
.type
, NULL
);
3184 drm_plane_helper_add(&aplane
->base
, &dm_plane_helper_funcs
);
3189 int amdgpu_dm_crtc_init(struct amdgpu_display_manager
*dm
,
3190 struct drm_plane
*plane
,
3191 uint32_t crtc_index
)
3193 struct amdgpu_crtc
*acrtc
= NULL
;
3194 struct amdgpu_plane
*cursor_plane
;
3198 cursor_plane
= kzalloc(sizeof(*cursor_plane
), GFP_KERNEL
);
3202 cursor_plane
->base
.type
= DRM_PLANE_TYPE_CURSOR
;
3203 res
= amdgpu_dm_plane_init(dm
, cursor_plane
, 0);
3205 acrtc
= kzalloc(sizeof(struct amdgpu_crtc
), GFP_KERNEL
);
3209 res
= drm_crtc_init_with_planes(
3213 &cursor_plane
->base
,
3214 &amdgpu_dm_crtc_funcs
, NULL
);
3219 drm_crtc_helper_add(&acrtc
->base
, &amdgpu_dm_crtc_helper_funcs
);
3221 acrtc
->max_cursor_width
= dm
->adev
->dm
.dc
->caps
.max_cursor_size
;
3222 acrtc
->max_cursor_height
= dm
->adev
->dm
.dc
->caps
.max_cursor_size
;
3224 acrtc
->crtc_id
= crtc_index
;
3225 acrtc
->base
.enabled
= false;
3227 dm
->adev
->mode_info
.crtcs
[crtc_index
] = acrtc
;
3228 drm_mode_crtc_set_gamma_size(&acrtc
->base
, 256);
3236 kfree(cursor_plane
);
3237 acrtc
->crtc_id
= -1;
3242 static int to_drm_connector_type(enum signal_type st
)
3245 case SIGNAL_TYPE_HDMI_TYPE_A
:
3246 return DRM_MODE_CONNECTOR_HDMIA
;
3247 case SIGNAL_TYPE_EDP
:
3248 return DRM_MODE_CONNECTOR_eDP
;
3249 case SIGNAL_TYPE_RGB
:
3250 return DRM_MODE_CONNECTOR_VGA
;
3251 case SIGNAL_TYPE_DISPLAY_PORT
:
3252 case SIGNAL_TYPE_DISPLAY_PORT_MST
:
3253 return DRM_MODE_CONNECTOR_DisplayPort
;
3254 case SIGNAL_TYPE_DVI_DUAL_LINK
:
3255 case SIGNAL_TYPE_DVI_SINGLE_LINK
:
3256 return DRM_MODE_CONNECTOR_DVID
;
3257 case SIGNAL_TYPE_VIRTUAL
:
3258 return DRM_MODE_CONNECTOR_VIRTUAL
;
3261 return DRM_MODE_CONNECTOR_Unknown
;
3265 static void amdgpu_dm_get_native_mode(struct drm_connector
*connector
)
3267 const struct drm_connector_helper_funcs
*helper
=
3268 connector
->helper_private
;
3269 struct drm_encoder
*encoder
;
3270 struct amdgpu_encoder
*amdgpu_encoder
;
3272 encoder
= helper
->best_encoder(connector
);
3274 if (encoder
== NULL
)
3277 amdgpu_encoder
= to_amdgpu_encoder(encoder
);
3279 amdgpu_encoder
->native_mode
.clock
= 0;
3281 if (!list_empty(&connector
->probed_modes
)) {
3282 struct drm_display_mode
*preferred_mode
= NULL
;
3283 list_for_each_entry(preferred_mode
,
3284 &connector
->probed_modes
,
3286 if (preferred_mode
->type
& DRM_MODE_TYPE_PREFERRED
) {
3287 amdgpu_encoder
->native_mode
= *preferred_mode
;
3295 static struct drm_display_mode
*amdgpu_dm_create_common_mode(
3296 struct drm_encoder
*encoder
, char *name
,
3297 int hdisplay
, int vdisplay
)
3299 struct drm_device
*dev
= encoder
->dev
;
3300 struct amdgpu_encoder
*amdgpu_encoder
= to_amdgpu_encoder(encoder
);
3301 struct drm_display_mode
*mode
= NULL
;
3302 struct drm_display_mode
*native_mode
= &amdgpu_encoder
->native_mode
;
3304 mode
= drm_mode_duplicate(dev
, native_mode
);
3309 mode
->hdisplay
= hdisplay
;
3310 mode
->vdisplay
= vdisplay
;
3311 mode
->type
&= ~DRM_MODE_TYPE_PREFERRED
;
3312 strncpy(mode
->name
, name
, DRM_DISPLAY_MODE_LEN
);
3318 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder
*encoder
,
3319 struct drm_connector
*connector
)
3321 struct amdgpu_encoder
*amdgpu_encoder
= to_amdgpu_encoder(encoder
);
3322 struct drm_display_mode
*mode
= NULL
;
3323 struct drm_display_mode
*native_mode
= &amdgpu_encoder
->native_mode
;
3324 struct amdgpu_connector
*amdgpu_connector
=
3325 to_amdgpu_connector(connector
);
3329 char name
[DRM_DISPLAY_MODE_LEN
];
3333 { "640x480", 640, 480},
3334 { "800x600", 800, 600},
3335 { "1024x768", 1024, 768},
3336 { "1280x720", 1280, 720},
3337 { "1280x800", 1280, 800},
3338 {"1280x1024", 1280, 1024},
3339 { "1440x900", 1440, 900},
3340 {"1680x1050", 1680, 1050},
3341 {"1600x1200", 1600, 1200},
3342 {"1920x1080", 1920, 1080},
3343 {"1920x1200", 1920, 1200}
3346 n
= sizeof(common_modes
) / sizeof(common_modes
[0]);
3348 for (i
= 0; i
< n
; i
++) {
3349 struct drm_display_mode
*curmode
= NULL
;
3350 bool mode_existed
= false;
3352 if (common_modes
[i
].w
> native_mode
->hdisplay
||
3353 common_modes
[i
].h
> native_mode
->vdisplay
||
3354 (common_modes
[i
].w
== native_mode
->hdisplay
&&
3355 common_modes
[i
].h
== native_mode
->vdisplay
))
3358 list_for_each_entry(curmode
, &connector
->probed_modes
, head
) {
3359 if (common_modes
[i
].w
== curmode
->hdisplay
&&
3360 common_modes
[i
].h
== curmode
->vdisplay
) {
3361 mode_existed
= true;
3369 mode
= amdgpu_dm_create_common_mode(encoder
,
3370 common_modes
[i
].name
, common_modes
[i
].w
,
3372 drm_mode_probed_add(connector
, mode
);
3373 amdgpu_connector
->num_modes
++;
3377 static void amdgpu_dm_connector_ddc_get_modes(
3378 struct drm_connector
*connector
,
3381 struct amdgpu_connector
*amdgpu_connector
=
3382 to_amdgpu_connector(connector
);
3385 /* empty probed_modes */
3386 INIT_LIST_HEAD(&connector
->probed_modes
);
3387 amdgpu_connector
->num_modes
=
3388 drm_add_edid_modes(connector
, edid
);
3390 drm_edid_to_eld(connector
, edid
);
3392 amdgpu_dm_get_native_mode(connector
);
3394 amdgpu_connector
->num_modes
= 0;
3397 int amdgpu_dm_connector_get_modes(struct drm_connector
*connector
)
3399 const struct drm_connector_helper_funcs
*helper
=
3400 connector
->helper_private
;
3401 struct amdgpu_connector
*amdgpu_connector
=
3402 to_amdgpu_connector(connector
);
3403 struct drm_encoder
*encoder
;
3404 struct edid
*edid
= amdgpu_connector
->edid
;
3406 encoder
= helper
->best_encoder(connector
);
3408 amdgpu_dm_connector_ddc_get_modes(connector
, edid
);
3409 amdgpu_dm_connector_add_common_modes(encoder
, connector
);
3410 return amdgpu_connector
->num_modes
;
3413 void amdgpu_dm_connector_init_helper(
3414 struct amdgpu_display_manager
*dm
,
3415 struct amdgpu_connector
*aconnector
,
3417 struct dc_link
*link
,
3420 struct amdgpu_device
*adev
= dm
->ddev
->dev_private
;
3422 aconnector
->connector_id
= link_index
;
3423 aconnector
->dc_link
= link
;
3424 aconnector
->base
.interlace_allowed
= false;
3425 aconnector
->base
.doublescan_allowed
= false;
3426 aconnector
->base
.stereo_allowed
= false;
3427 aconnector
->base
.dpms
= DRM_MODE_DPMS_OFF
;
3428 aconnector
->hpd
.hpd
= AMDGPU_HPD_NONE
; /* not used */
3430 mutex_init(&aconnector
->hpd_lock
);
3432 /*configure suport HPD hot plug connector_>polled default value is 0
3433 * which means HPD hot plug not supported*/
3434 switch (connector_type
) {
3435 case DRM_MODE_CONNECTOR_HDMIA
:
3436 aconnector
->base
.polled
= DRM_CONNECTOR_POLL_HPD
;
3438 case DRM_MODE_CONNECTOR_DisplayPort
:
3439 aconnector
->base
.polled
= DRM_CONNECTOR_POLL_HPD
;
3441 case DRM_MODE_CONNECTOR_DVID
:
3442 aconnector
->base
.polled
= DRM_CONNECTOR_POLL_HPD
;
3448 drm_object_attach_property(&aconnector
->base
.base
,
3449 dm
->ddev
->mode_config
.scaling_mode_property
,
3450 DRM_MODE_SCALE_NONE
);
3452 drm_object_attach_property(&aconnector
->base
.base
,
3453 adev
->mode_info
.underscan_property
,
3455 drm_object_attach_property(&aconnector
->base
.base
,
3456 adev
->mode_info
.underscan_hborder_property
,
3458 drm_object_attach_property(&aconnector
->base
.base
,
3459 adev
->mode_info
.underscan_vborder_property
,
3464 int amdgpu_dm_i2c_xfer(struct i2c_adapter
*i2c_adap
,
3465 struct i2c_msg
*msgs
, int num
)
3467 struct amdgpu_i2c_adapter
*i2c
= i2c_get_adapdata(i2c_adap
);
3468 struct ddc_service
*ddc_service
= i2c
->ddc_service
;
3469 struct i2c_command cmd
;
3473 cmd
.payloads
= kzalloc(num
* sizeof(struct i2c_payload
), GFP_KERNEL
);
3478 cmd
.number_of_payloads
= num
;
3479 cmd
.engine
= I2C_COMMAND_ENGINE_DEFAULT
;
3482 for (i
= 0; i
< num
; i
++) {
3483 cmd
.payloads
[i
].write
= !(msgs
[i
].flags
& I2C_M_RD
);
3484 cmd
.payloads
[i
].address
= msgs
[i
].addr
;
3485 cmd
.payloads
[i
].length
= msgs
[i
].len
;
3486 cmd
.payloads
[i
].data
= msgs
[i
].buf
;
3489 if (dal_i2caux_submit_i2c_command(
3490 ddc_service
->ctx
->i2caux
,
3491 ddc_service
->ddc_pin
,
3495 kfree(cmd
.payloads
);
3499 u32
amdgpu_dm_i2c_func(struct i2c_adapter
*adap
)
3501 return I2C_FUNC_I2C
| I2C_FUNC_SMBUS_EMUL
;
3504 static const struct i2c_algorithm amdgpu_dm_i2c_algo
= {
3505 .master_xfer
= amdgpu_dm_i2c_xfer
,
3506 .functionality
= amdgpu_dm_i2c_func
,
3509 static struct amdgpu_i2c_adapter
*create_i2c(
3510 struct ddc_service
*ddc_service
,
3514 struct amdgpu_device
*adev
= ddc_service
->ctx
->driver_context
;
3515 struct amdgpu_i2c_adapter
*i2c
;
3517 i2c
= kzalloc(sizeof (struct amdgpu_i2c_adapter
), GFP_KERNEL
);
3518 i2c
->base
.owner
= THIS_MODULE
;
3519 i2c
->base
.class = I2C_CLASS_DDC
;
3520 i2c
->base
.dev
.parent
= &adev
->pdev
->dev
;
3521 i2c
->base
.algo
= &amdgpu_dm_i2c_algo
;
3522 snprintf(i2c
->base
.name
, sizeof (i2c
->base
.name
), "AMDGPU DM i2c hw bus %d", link_index
);
3523 i2c_set_adapdata(&i2c
->base
, i2c
);
3524 i2c
->ddc_service
= ddc_service
;
3529 /* Note: this function assumes that dc_link_detect() was called for the
3530 * dc_link which will be represented by this aconnector. */
3531 int amdgpu_dm_connector_init(
3532 struct amdgpu_display_manager
*dm
,
3533 struct amdgpu_connector
*aconnector
,
3534 uint32_t link_index
,
3535 struct amdgpu_encoder
*aencoder
)
3539 struct dc
*dc
= dm
->dc
;
3540 struct dc_link
*link
= dc_get_link_at_index(dc
, link_index
);
3541 struct amdgpu_i2c_adapter
*i2c
;
3542 ((struct dc_link
*)link
)->priv
= aconnector
;
3544 DRM_DEBUG_KMS("%s()\n", __func__
);
3546 i2c
= create_i2c(link
->ddc
, link
->link_index
, &res
);
3547 aconnector
->i2c
= i2c
;
3548 res
= i2c_add_adapter(&i2c
->base
);
3551 DRM_ERROR("Failed to register hw i2c %d\n", link
->link_index
);
3555 connector_type
= to_drm_connector_type(link
->connector_signal
);
3557 res
= drm_connector_init(
3560 &amdgpu_dm_connector_funcs
,
3564 DRM_ERROR("connector_init failed\n");
3565 aconnector
->connector_id
= -1;
3569 drm_connector_helper_add(
3571 &amdgpu_dm_connector_helper_funcs
);
3573 amdgpu_dm_connector_init_helper(
3580 drm_mode_connector_attach_encoder(
3581 &aconnector
->base
, &aencoder
->base
);
3583 drm_connector_register(&aconnector
->base
);
3585 if (connector_type
== DRM_MODE_CONNECTOR_DisplayPort
3586 || connector_type
== DRM_MODE_CONNECTOR_eDP
)
3587 amdgpu_dm_initialize_dp_connector(dm
, aconnector
);
3589 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3590 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3592 /* NOTE: this currently will create backlight device even if a panel
3593 * is not connected to the eDP/LVDS connector.
3595 * This is less than ideal but we don't have sink information at this
3596 * stage since detection happens after. We can't do detection earlier
3597 * since MST detection needs connectors to be created first.
3599 if (link
->connector_signal
& (SIGNAL_TYPE_EDP
| SIGNAL_TYPE_LVDS
)) {
3600 /* Event if registration failed, we should continue with
3601 * DM initialization because not having a backlight control
3602 * is better then a black screen. */
3603 amdgpu_dm_register_backlight_device(dm
);
3605 if (dm
->backlight_dev
)
3606 dm
->backlight_link
= link
;
3613 aconnector
->i2c
= NULL
;
3618 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device
*adev
)
3620 switch (adev
->mode_info
.num_crtc
) {
3637 int amdgpu_dm_encoder_init(
3638 struct drm_device
*dev
,
3639 struct amdgpu_encoder
*aencoder
,
3640 uint32_t link_index
)
3642 struct amdgpu_device
*adev
= dev
->dev_private
;
3644 int res
= drm_encoder_init(dev
,
3646 &amdgpu_dm_encoder_funcs
,
3647 DRM_MODE_ENCODER_TMDS
,
3650 aencoder
->base
.possible_crtcs
= amdgpu_dm_get_encoder_crtc_mask(adev
);
3653 aencoder
->encoder_id
= link_index
;
3655 aencoder
->encoder_id
= -1;
3657 drm_encoder_helper_add(&aencoder
->base
, &amdgpu_dm_encoder_helper_funcs
);
3662 static void manage_dm_interrupts(
3663 struct amdgpu_device
*adev
,
3664 struct amdgpu_crtc
*acrtc
,
3668 * this is not correct translation but will work as soon as VBLANK
3669 * constant is the same as PFLIP
3672 amdgpu_crtc_idx_to_irq_type(
3677 drm_crtc_vblank_on(&acrtc
->base
);
3680 &adev
->pageflip_irq
,
3686 &adev
->pageflip_irq
,
3688 drm_crtc_vblank_off(&acrtc
->base
);
3692 static bool is_scaling_state_different(
3693 const struct dm_connector_state
*dm_state
,
3694 const struct dm_connector_state
*old_dm_state
)
3696 if (dm_state
->scaling
!= old_dm_state
->scaling
)
3698 if (!dm_state
->underscan_enable
&& old_dm_state
->underscan_enable
) {
3699 if (old_dm_state
->underscan_hborder
!= 0 && old_dm_state
->underscan_vborder
!= 0)
3701 } else if (dm_state
->underscan_enable
&& !old_dm_state
->underscan_enable
) {
3702 if (dm_state
->underscan_hborder
!= 0 && dm_state
->underscan_vborder
!= 0)
3704 } else if (dm_state
->underscan_hborder
!= old_dm_state
->underscan_hborder
3705 || dm_state
->underscan_vborder
!= old_dm_state
->underscan_vborder
)
3710 static void remove_stream(
3711 struct amdgpu_device
*adev
,
3712 struct amdgpu_crtc
*acrtc
,
3713 struct dc_stream
*stream
)
3715 /* this is the update mode case */
3716 if (adev
->dm
.freesync_module
)
3717 mod_freesync_remove_stream(adev
->dm
.freesync_module
, stream
);
3719 acrtc
->otg_inst
= -1;
3720 acrtc
->enabled
= false;
3723 static void handle_cursor_update(
3724 struct drm_plane
*plane
,
3725 struct drm_plane_state
*old_plane_state
)
3727 if (!plane
->state
->fb
&& !old_plane_state
->fb
)
3730 /* Check if it's a cursor on/off update or just cursor move*/
3731 if (plane
->state
->fb
== old_plane_state
->fb
)
3732 dm_crtc_cursor_move(
3734 plane
->state
->crtc_x
,
3735 plane
->state
->crtc_y
);
3737 struct amdgpu_framebuffer
*afb
=
3738 to_amdgpu_framebuffer(plane
->state
->fb
);
3740 (!!plane
->state
->fb
) ?
3741 plane
->state
->crtc
:
3742 old_plane_state
->crtc
,
3743 (!!plane
->state
->fb
) ?
3746 plane
->state
->crtc_w
,
3747 plane
->state
->crtc_h
);
3752 static void prepare_flip_isr(struct amdgpu_crtc
*acrtc
)
3755 assert_spin_locked(&acrtc
->base
.dev
->event_lock
);
3756 WARN_ON(acrtc
->event
);
3758 acrtc
->event
= acrtc
->base
.state
->event
;
3760 /* Set the flip status */
3761 acrtc
->pflip_status
= AMDGPU_FLIP_SUBMITTED
;
3763 /* Mark this event as consumed */
3764 acrtc
->base
.state
->event
= NULL
;
3766 DRM_DEBUG_DRIVER("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
3773 * Waits on all BO's fences and for proper vblank count
3775 static void amdgpu_dm_do_flip(
3776 struct drm_crtc
*crtc
,
3777 struct drm_framebuffer
*fb
,
3780 unsigned long flags
;
3781 uint32_t target_vblank
;
3783 struct amdgpu_crtc
*acrtc
= to_amdgpu_crtc(crtc
);
3784 struct amdgpu_framebuffer
*afb
= to_amdgpu_framebuffer(fb
);
3785 struct amdgpu_bo
*abo
= gem_to_amdgpu_bo(afb
->obj
);
3786 struct amdgpu_device
*adev
= crtc
->dev
->dev_private
;
3787 bool async_flip
= (acrtc
->flip_flags
& DRM_MODE_PAGE_FLIP_ASYNC
) != 0;
3788 struct dc_flip_addrs addr
= { {0} };
3789 struct dc_surface_update surface_updates
[1] = { {0} };
3790 struct dm_crtc_state
*acrtc_state
= to_dm_crtc_state(crtc
->state
);
3793 /* Prepare wait for target vblank early - before the fence-waits */
3794 target_vblank
= target
- drm_crtc_vblank_count(crtc
) +
3795 amdgpu_get_vblank_counter_kms(crtc
->dev
, acrtc
->crtc_id
);
3797 /*TODO This might fail and hence better not used, wait
3798 * explicitly on fences instead
3799 * and in general should be called for
3800 * blocking commit to as per framework helpers
3802 r
= amdgpu_bo_reserve(abo
, true);
3803 if (unlikely(r
!= 0)) {
3804 DRM_ERROR("failed to reserve buffer before flip\n");
3808 /* Wait for all fences on this FB */
3809 WARN_ON(reservation_object_wait_timeout_rcu(abo
->tbo
.resv
, true, false,
3810 MAX_SCHEDULE_TIMEOUT
) < 0);
3812 amdgpu_bo_unreserve(abo
);
3814 /* Wait until we're out of the vertical blank period before the one
3815 * targeted by the flip
3817 while ((acrtc
->enabled
&&
3818 (amdgpu_get_crtc_scanoutpos(adev
->ddev
, acrtc
->crtc_id
, 0,
3819 &vpos
, &hpos
, NULL
, NULL
,
3821 & (DRM_SCANOUTPOS_VALID
| DRM_SCANOUTPOS_IN_VBLANK
)) ==
3822 (DRM_SCANOUTPOS_VALID
| DRM_SCANOUTPOS_IN_VBLANK
) &&
3823 (int)(target_vblank
-
3824 amdgpu_get_vblank_counter_kms(adev
->ddev
, acrtc
->crtc_id
)) > 0)) {
3825 usleep_range(1000, 1100);
3829 spin_lock_irqsave(&crtc
->dev
->event_lock
, flags
);
3830 /* update crtc fb */
3831 crtc
->primary
->fb
= fb
;
3833 WARN_ON(acrtc
->pflip_status
!= AMDGPU_FLIP_NONE
);
3834 WARN_ON(!acrtc_state
->stream
);
3836 addr
.address
.grph
.addr
.low_part
= lower_32_bits(afb
->address
);
3837 addr
.address
.grph
.addr
.high_part
= upper_32_bits(afb
->address
);
3838 addr
.flip_immediate
= async_flip
;
3841 if (acrtc
->base
.state
->event
)
3842 prepare_flip_isr(acrtc
);
3844 surface_updates
->surface
= dc_stream_get_status(acrtc_state
->stream
)->surfaces
[0];
3845 surface_updates
->flip_addr
= &addr
;
3848 dc_update_surfaces_and_stream(adev
->dm
.dc
, surface_updates
, 1, acrtc_state
->stream
, NULL
);
3850 DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x \n",
3852 addr
.address
.grph
.addr
.high_part
,
3853 addr
.address
.grph
.addr
.low_part
);
3856 spin_unlock_irqrestore(&crtc
->dev
->event_lock
, flags
);
3859 static void amdgpu_dm_commit_surfaces(struct drm_atomic_state
*state
,
3860 struct drm_device
*dev
,
3861 struct amdgpu_display_manager
*dm
,
3862 struct drm_crtc
*pcrtc
,
3863 bool *wait_for_vblank
)
3866 struct drm_plane
*plane
;
3867 struct drm_plane_state
*old_plane_state
;
3868 struct dc_stream
*dc_stream_attach
;
3869 struct dc_surface
*dc_surfaces_constructed
[MAX_SURFACES
];
3870 struct amdgpu_crtc
*acrtc_attach
= to_amdgpu_crtc(pcrtc
);
3871 struct dm_crtc_state
*acrtc_state
= to_dm_crtc_state(pcrtc
->state
);
3872 int planes_count
= 0;
3873 unsigned long flags
;
3875 /* update planes when needed */
3876 for_each_plane_in_state(state
, plane
, old_plane_state
, i
) {
3877 struct drm_plane_state
*plane_state
= plane
->state
;
3878 struct drm_crtc
*crtc
= plane_state
->crtc
;
3879 struct drm_framebuffer
*fb
= plane_state
->fb
;
3881 struct dm_plane_state
*dm_plane_state
= to_dm_plane_state(plane_state
);
3883 if (plane
->type
== DRM_PLANE_TYPE_CURSOR
) {
3884 handle_cursor_update(plane
, old_plane_state
);
3888 if (!fb
|| !crtc
|| pcrtc
!= crtc
|| !crtc
->state
->active
||
3889 (!crtc
->state
->planes_changed
&&
3890 !pcrtc
->state
->color_mgmt_changed
))
3893 pflip_needed
= !state
->allow_modeset
;
3895 spin_lock_irqsave(&crtc
->dev
->event_lock
, flags
);
3896 if (acrtc_attach
->pflip_status
!= AMDGPU_FLIP_NONE
) {
3897 DRM_ERROR("add_surface: acrtc %d, already busy\n",
3898 acrtc_attach
->crtc_id
);
3899 spin_unlock_irqrestore(&crtc
->dev
->event_lock
, flags
);
3900 /* In comit tail framework this cannot happen */
3903 spin_unlock_irqrestore(&crtc
->dev
->event_lock
, flags
);
3905 if (!pflip_needed
) {
3906 WARN_ON(!dm_plane_state
->surface
);
3908 dc_surfaces_constructed
[planes_count
] = dm_plane_state
->surface
;
3910 dc_stream_attach
= acrtc_state
->stream
;
3913 } else if (crtc
->state
->planes_changed
) {
3914 /* Assume even ONE crtc with immediate flip means
3915 * entire can't wait for VBLANK
3916 * TODO Check if it's correct
3919 acrtc_attach
->flip_flags
& DRM_MODE_PAGE_FLIP_ASYNC
?
3922 /* TODO: Needs rework for multiplane flip */
3923 if (plane
->type
== DRM_PLANE_TYPE_PRIMARY
)
3924 drm_crtc_vblank_get(crtc
);
3929 drm_crtc_vblank_count(crtc
) + *wait_for_vblank
);
3931 /*TODO BUG remove ASAP in 4.12 to avoid race between worker and flip IOCTL */
3933 /*clean up the flags for next usage*/
3934 acrtc_attach
->flip_flags
= 0;
3940 unsigned long flags
;
3942 if (pcrtc
->state
->event
) {
3944 drm_crtc_vblank_get(pcrtc
);
3946 spin_lock_irqsave(&pcrtc
->dev
->event_lock
, flags
);
3947 prepare_flip_isr(acrtc_attach
);
3948 spin_unlock_irqrestore(&pcrtc
->dev
->event_lock
, flags
);
3951 if (false == dc_commit_surfaces_to_stream(dm
->dc
,
3952 dc_surfaces_constructed
,
3955 dm_error("%s: Failed to attach surface!\n", __func__
);
3957 /*TODO BUG Here should go disable planes on CRTC. */
3962 int amdgpu_dm_atomic_commit(
3963 struct drm_device
*dev
,
3964 struct drm_atomic_state
*state
,
3967 struct drm_crtc
*crtc
;
3968 struct drm_crtc_state
*new_state
;
3969 struct amdgpu_device
*adev
= dev
->dev_private
;
3973 * We evade vblanks and pflips on crtc that
3974 * should be changed. We do it here to flush & disable
3975 * interrupts before drm_swap_state is called in drm_atomic_helper_commit
3976 * it will update crtc->dm_crtc_state->stream pointer which is used in
3979 for_each_crtc_in_state(state
, crtc
, new_state
, i
) {
3980 struct dm_crtc_state
*old_acrtc_state
= to_dm_crtc_state(crtc
->state
);
3981 struct amdgpu_crtc
*acrtc
= to_amdgpu_crtc(crtc
);
3983 if (drm_atomic_crtc_needs_modeset(new_state
) && old_acrtc_state
->stream
)
3984 manage_dm_interrupts(adev
, acrtc
, false);
3987 return drm_atomic_helper_commit(dev
, state
, nonblock
);
3989 /*TODO Handle EINTR, reenable IRQ*/
3992 void amdgpu_dm_atomic_commit_tail(
3993 struct drm_atomic_state
*state
)
3995 struct drm_device
*dev
= state
->dev
;
3996 struct amdgpu_device
*adev
= dev
->dev_private
;
3997 struct amdgpu_display_manager
*dm
= &adev
->dm
;
3998 struct dm_atomic_state
*dm_state
;
4000 uint32_t new_crtcs_count
= 0;
4001 struct drm_crtc
*crtc
, *pcrtc
;
4002 struct drm_crtc_state
*old_crtc_state
;
4003 struct amdgpu_crtc
*new_crtcs
[MAX_STREAMS
];
4004 struct dc_stream
*new_stream
= NULL
;
4005 unsigned long flags
;
4006 bool wait_for_vblank
= true;
4007 struct drm_connector
*connector
;
4008 struct drm_connector_state
*old_conn_state
;
4009 struct dm_crtc_state
*old_acrtc_state
, *new_acrtc_state
;
4011 drm_atomic_helper_update_legacy_modeset_state(dev
, state
);
4013 dm_state
= to_dm_atomic_state(state
);
4015 /* update changed items */
4016 for_each_crtc_in_state(state
, crtc
, old_crtc_state
, i
) {
4017 struct amdgpu_crtc
*acrtc
= to_amdgpu_crtc(crtc
);
4018 struct drm_crtc_state
*new_state
= crtc
->state
;
4019 new_acrtc_state
= to_dm_crtc_state(new_state
);
4020 old_acrtc_state
= to_dm_crtc_state(old_crtc_state
);
4023 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
4024 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
4025 "connectors_changed:%d\n",
4029 new_state
->planes_changed
,
4030 new_state
->mode_changed
,
4031 new_state
->active_changed
,
4032 new_state
->connectors_changed
);
4034 /* handles headless hotplug case, updating new_state and
4035 * aconnector as needed
4038 if (modeset_required(new_state
)) {
4040 DRM_INFO("Atomic commit: SET crtc id %d: [%p]\n", acrtc
->crtc_id
, acrtc
);
4042 if (!new_acrtc_state
->stream
) {
4044 * this could happen because of issues with
4045 * userspace notifications delivery.
4046 * In this case userspace tries to set mode on
4047 * display which is disconnect in fact.
4048 * dc_sink in NULL in this case on aconnector.
4049 * We expect reset mode will come soon.
4051 * This can also happen when unplug is done
4052 * during resume sequence ended
4054 * In this case, we want to pretend we still
4055 * have a sink to keep the pipe running so that
4056 * hw state is consistent with the sw state
4058 DRM_DEBUG_KMS("%s: Failed to create new stream for crtc %d\n",
4059 __func__
, acrtc
->base
.base
.id
);
4064 if (old_acrtc_state
->stream
)
4065 remove_stream(adev
, acrtc
, old_acrtc_state
->stream
);
4069 * this loop saves set mode crtcs
4070 * we needed to enable vblanks once all
4071 * resources acquired in dc after dc_commit_streams
4074 /*TODO move all this into dm_crtc_state, get rid of
4075 * new_crtcs array and use old and new atomic states
4078 new_crtcs
[new_crtcs_count
] = acrtc
;
4081 acrtc
->enabled
= true;
4082 acrtc
->hw_mode
= crtc
->state
->mode
;
4083 crtc
->hwmode
= crtc
->state
->mode
;
4084 } else if (modereset_required(new_state
)) {
4085 DRM_INFO("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc
->crtc_id
, acrtc
);
4087 /* i.e. reset mode */
4088 if (old_acrtc_state
->stream
)
4089 remove_stream(adev
, acrtc
, old_acrtc_state
->stream
);
4091 } /* for_each_crtc_in_state() */
4094 * Add streams after required streams from new and replaced streams
4095 * are removed from freesync module
4097 if (adev
->dm
.freesync_module
) {
4098 for (i
= 0; i
< new_crtcs_count
; i
++) {
4099 struct amdgpu_connector
*aconnector
= NULL
;
4100 new_acrtc_state
= to_dm_crtc_state(new_crtcs
[i
]->base
.state
);
4102 new_stream
= new_acrtc_state
->stream
;
4104 amdgpu_dm_find_first_crct_matching_connector(
4106 &new_crtcs
[i
]->base
,
4110 "Atomic commit: Failed to find connector for acrtc id:%d "
4111 "skipping freesync init\n",
4112 new_crtcs
[i
]->crtc_id
);
4116 mod_freesync_add_stream(adev
->dm
.freesync_module
,
4117 new_stream
, &aconnector
->caps
);
4121 if (dm_state
->context
)
4122 WARN_ON(!dc_commit_context(dm
->dc
, dm_state
->context
));
4125 list_for_each_entry(crtc
, &dev
->mode_config
.crtc_list
, head
) {
4126 struct amdgpu_crtc
*acrtc
= to_amdgpu_crtc(crtc
);
4127 new_acrtc_state
= to_dm_crtc_state(crtc
->state
);
4129 if (new_acrtc_state
->stream
!= NULL
) {
4130 const struct dc_stream_status
*status
=
4131 dc_stream_get_status(new_acrtc_state
->stream
);
4134 DC_ERR("got no status for stream %p on acrtc%p\n", new_acrtc_state
->stream
, acrtc
);
4136 acrtc
->otg_inst
= status
->primary_otg_inst
;
4140 /* Handle scaling and undersacn changes*/
4141 for_each_connector_in_state(state
, connector
, old_conn_state
, i
) {
4142 struct amdgpu_connector
*aconnector
= to_amdgpu_connector(connector
);
4143 struct dm_connector_state
*con_new_state
=
4144 to_dm_connector_state(aconnector
->base
.state
);
4145 struct dm_connector_state
*con_old_state
=
4146 to_dm_connector_state(old_conn_state
);
4147 struct amdgpu_crtc
*acrtc
= to_amdgpu_crtc(con_new_state
->base
.crtc
);
4148 struct dc_stream_status
*status
= NULL
;
4150 /* Skip any modesets/resets */
4151 if (!acrtc
|| drm_atomic_crtc_needs_modeset(acrtc
->base
.state
))
4154 /* Skip any thing not scale or underscan changes */
4155 if (!is_scaling_state_different(con_new_state
, con_old_state
))
4158 new_acrtc_state
= to_dm_crtc_state(acrtc
->base
.state
);
4160 update_stream_scaling_settings(&con_new_state
->base
.crtc
->mode
,
4161 con_new_state
, (struct dc_stream
*)new_acrtc_state
->stream
);
4163 status
= dc_stream_get_status(new_acrtc_state
->stream
);
4165 WARN_ON(!status
->surface_count
);
4167 if (!new_acrtc_state
->stream
)
4170 /*TODO How it works with MPO ?*/
4171 if (!dc_commit_surfaces_to_stream(
4174 status
->surface_count
,
4175 new_acrtc_state
->stream
))
4176 dm_error("%s: Failed to update stream scaling!\n", __func__
);
4179 for (i
= 0; i
< new_crtcs_count
; i
++) {
4181 * loop to enable interrupts on newly arrived crtc
4183 struct amdgpu_crtc
*acrtc
= new_crtcs
[i
];
4184 new_acrtc_state
= to_dm_crtc_state(acrtc
->base
.state
);
4186 if (adev
->dm
.freesync_module
)
4187 mod_freesync_notify_mode_change(
4188 adev
->dm
.freesync_module
, &new_acrtc_state
->stream
, 1);
4190 manage_dm_interrupts(adev
, acrtc
, true);
4193 /* update planes when needed per crtc*/
4194 for_each_crtc_in_state(state
, pcrtc
, old_crtc_state
, j
) {
4195 new_acrtc_state
= to_dm_crtc_state(pcrtc
->state
);
4197 if (new_acrtc_state
->stream
)
4198 amdgpu_dm_commit_surfaces(state
, dev
, dm
, pcrtc
, &wait_for_vblank
);
4203 * send vblank event on all events not handled in flip and
4204 * mark consumed event for drm_atomic_helper_commit_hw_done
4206 spin_lock_irqsave(&adev
->ddev
->event_lock
, flags
);
4207 for_each_crtc_in_state(state
, crtc
, old_crtc_state
, i
) {
4208 struct amdgpu_crtc
*acrtc
= to_amdgpu_crtc(crtc
);
4210 if (acrtc
->base
.state
->event
)
4211 drm_send_event_locked(dev
, &crtc
->state
->event
->base
);
4213 acrtc
->base
.state
->event
= NULL
;
4215 spin_unlock_irqrestore(&adev
->ddev
->event_lock
, flags
);
4217 /* Signal HW programming completion */
4218 drm_atomic_helper_commit_hw_done(state
);
4220 if (wait_for_vblank
)
4221 drm_atomic_helper_wait_for_vblanks(dev
, state
);
4223 drm_atomic_helper_cleanup_planes(dev
, state
);
4227 static int dm_force_atomic_commit(struct drm_connector
*connector
)
4230 struct drm_device
*ddev
= connector
->dev
;
4231 struct drm_atomic_state
*state
= drm_atomic_state_alloc(ddev
);
4232 struct amdgpu_crtc
*disconnected_acrtc
= to_amdgpu_crtc(connector
->encoder
->crtc
);
4233 struct drm_plane
*plane
= disconnected_acrtc
->base
.primary
;
4234 struct drm_connector_state
*conn_state
;
4235 struct drm_crtc_state
*crtc_state
;
4236 struct drm_plane_state
*plane_state
;
4241 state
->acquire_ctx
= ddev
->mode_config
.acquire_ctx
;
4243 /* Construct an atomic state to restore previous display setting */
4246 * Attach connectors to drm_atomic_state
4248 conn_state
= drm_atomic_get_connector_state(state
, connector
);
4250 ret
= PTR_ERR_OR_ZERO(conn_state
);
4254 /* Attach crtc to drm_atomic_state*/
4255 crtc_state
= drm_atomic_get_crtc_state(state
, &disconnected_acrtc
->base
);
4257 ret
= PTR_ERR_OR_ZERO(crtc_state
);
4261 /* force a restore */
4262 crtc_state
->mode_changed
= true;
4264 /* Attach plane to drm_atomic_state */
4265 plane_state
= drm_atomic_get_plane_state(state
, plane
);
4267 ret
= PTR_ERR_OR_ZERO(plane_state
);
4272 /* Call commit internally with the state we just constructed */
4273 ret
= drm_atomic_commit(state
);
4278 DRM_ERROR("Restoring old state failed with %i\n", ret
);
4279 drm_atomic_state_put(state
);
4285 * This functions handle all cases when set mode does not come upon hotplug.
4286 * This include when the same display is unplugged then plugged back into the
4287 * same port and when we are running without usermode desktop manager supprot
4289 void dm_restore_drm_connector_state(struct drm_device
*dev
, struct drm_connector
*connector
)
4291 struct amdgpu_connector
*aconnector
= to_amdgpu_connector(connector
);
4292 struct amdgpu_crtc
*disconnected_acrtc
;
4293 struct dm_crtc_state
*acrtc_state
;
4295 if (!aconnector
->dc_sink
|| !connector
->state
|| !connector
->encoder
)
4298 disconnected_acrtc
= to_amdgpu_crtc(connector
->encoder
->crtc
);
4299 acrtc_state
= to_dm_crtc_state(disconnected_acrtc
->base
.state
);
4301 if (!disconnected_acrtc
|| !acrtc_state
->stream
)
4305 * If the previous sink is not released and different from the current,
4306 * we deduce we are in a state where we can not rely on usermode call
4307 * to turn on the display, so we do it here
4309 if (acrtc_state
->stream
->sink
!= aconnector
->dc_sink
)
4310 dm_force_atomic_commit(&aconnector
->base
);
4313 static uint32_t add_val_sets_surface(
4314 struct dc_validation_set
*val_sets
,
4316 const struct dc_stream
*stream
,
4317 struct dc_surface
*surface
)
4319 uint32_t i
= 0, j
= 0;
4321 while (i
< set_count
) {
4322 if (val_sets
[i
].stream
== stream
) {
4323 while (val_sets
[i
].surfaces
[j
])
4330 val_sets
[i
].surfaces
[j
] = surface
;
4331 val_sets
[i
].surface_count
++;
4333 return val_sets
[i
].surface_count
;
4336 static uint32_t update_in_val_sets_stream(
4337 struct dc_validation_set
*val_sets
,
4339 struct dc_stream
*old_stream
,
4340 struct dc_stream
*new_stream
,
4341 struct drm_crtc
*crtc
)
4345 while (i
< set_count
) {
4346 if (val_sets
[i
].stream
== old_stream
)
4351 val_sets
[i
].stream
= new_stream
;
4354 /* nothing found. add new one to the end */
4355 return set_count
+ 1;
4360 static uint32_t remove_from_val_sets(
4361 struct dc_validation_set
*val_sets
,
4363 const struct dc_stream
*stream
)
4367 for (i
= 0; i
< set_count
; i
++)
4368 if (val_sets
[i
].stream
== stream
)
4371 if (i
== set_count
) {
4378 for (; i
< set_count
; i
++) {
4379 val_sets
[i
] = val_sets
[i
+ 1];
4386 * Grabs all modesetting locks to serialize against any blocking commits,
4387 * Waits for completion of all non blocking commits.
4389 static int do_aquire_global_lock(
4390 struct drm_device
*dev
,
4391 struct drm_atomic_state
*state
)
4393 struct drm_crtc
*crtc
;
4394 struct drm_crtc_commit
*commit
;
4397 /* Adding all modeset locks to aquire_ctx will
4398 * ensure that when the framework release it the
4399 * extra locks we are locking here will get released to
4401 ret
= drm_modeset_lock_all_ctx(dev
, state
->acquire_ctx
);
4405 list_for_each_entry(crtc
, &dev
->mode_config
.crtc_list
, head
) {
4406 spin_lock(&crtc
->commit_lock
);
4407 commit
= list_first_entry_or_null(&crtc
->commit_list
,
4408 struct drm_crtc_commit
, commit_entry
);
4410 drm_crtc_commit_get(commit
);
4411 spin_unlock(&crtc
->commit_lock
);
4416 /* Make sure all pending HW programming completed and
4419 ret
= wait_for_completion_interruptible_timeout(&commit
->hw_done
, 10*HZ
);
4422 ret
= wait_for_completion_interruptible_timeout(
4423 &commit
->flip_done
, 10*HZ
);
4426 DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
4427 "timed out\n", crtc
->base
.id
, crtc
->name
);
4429 drm_crtc_commit_put(commit
);
4432 return ret
< 0 ? ret
: 0;
4435 int amdgpu_dm_atomic_check(struct drm_device
*dev
,
4436 struct drm_atomic_state
*state
)
4438 struct dm_atomic_state
*dm_state
;
4439 struct drm_crtc
*crtc
;
4440 struct drm_crtc_state
*crtc_state
;
4441 struct drm_plane
*plane
;
4442 struct drm_plane_state
*plane_state
;
4445 struct amdgpu_device
*adev
= dev
->dev_private
;
4446 struct dc
*dc
= adev
->dm
.dc
;
4447 struct drm_connector
*connector
;
4448 struct drm_connector_state
*conn_state
;
4450 struct dc_validation_set set
[MAX_STREAMS
] = { { 0 } };
4451 struct dm_crtc_state
*old_acrtc_state
, *new_acrtc_state
;
4454 * This bool will be set for true for any modeset/reset
4455 * or surface update which implies non fast surface update.
4457 bool lock_and_validation_needed
= false;
4459 ret
= drm_atomic_helper_check_modeset(dev
, state
);
4462 DRM_ERROR("Atomic state validation failed with error :%d !\n", ret
);
4466 dm_state
= to_dm_atomic_state(state
);
4468 /* copy existing configuration */
4470 list_for_each_entry(crtc
, &dev
->mode_config
.crtc_list
, head
) {
4472 old_acrtc_state
= to_dm_crtc_state(crtc
->state
);
4474 if (old_acrtc_state
->stream
) {
4475 dc_stream_retain(old_acrtc_state
->stream
);
4476 set
[set_count
].stream
= old_acrtc_state
->stream
;
4481 /*TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set */
4482 /* update changed items */
4483 for_each_crtc_in_state(state
, crtc
, crtc_state
, i
) {
4484 struct amdgpu_crtc
*acrtc
= NULL
;
4485 struct amdgpu_connector
*aconnector
= NULL
;
4486 old_acrtc_state
= to_dm_crtc_state(crtc
->state
);
4487 new_acrtc_state
= to_dm_crtc_state(crtc_state
);
4488 acrtc
= to_amdgpu_crtc(crtc
);
4490 aconnector
= amdgpu_dm_find_first_crct_matching_connector(state
, crtc
, true);
4493 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
4494 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
4495 "connectors_changed:%d\n",
4499 crtc_state
->planes_changed
,
4500 crtc_state
->mode_changed
,
4501 crtc_state
->active_changed
,
4502 crtc_state
->connectors_changed
);
4504 if (modeset_required(crtc_state
)) {
4506 struct dc_stream
*new_stream
= NULL
;
4507 struct drm_connector_state
*conn_state
= NULL
;
4508 struct dm_connector_state
*dm_conn_state
= NULL
;
4511 conn_state
= drm_atomic_get_connector_state(state
, &aconnector
->base
);
4512 if (IS_ERR(conn_state
)) {
4513 ret
= PTR_ERR_OR_ZERO(conn_state
);
4517 dm_conn_state
= to_dm_connector_state(conn_state
);
4520 new_stream
= create_stream_for_sink(aconnector
, &crtc_state
->mode
, dm_conn_state
);
4523 * we can have no stream on ACTION_SET if a display
4524 * was disconnected during S3, in this case it not and
4525 * error, the OS will be updated after detection, and
4526 * do the right thing on next atomic commit
4529 DRM_DEBUG_KMS("%s: Failed to create new stream for crtc %d\n",
4530 __func__
, acrtc
->base
.base
.id
);
4534 if (new_acrtc_state
->stream
)
4535 dc_stream_release(new_acrtc_state
->stream
);
4537 new_acrtc_state
->stream
= new_stream
;
4539 set_count
= update_in_val_sets_stream(
4542 old_acrtc_state
->stream
,
4543 new_acrtc_state
->stream
,
4546 lock_and_validation_needed
= true;
4548 } else if (modereset_required(crtc_state
)) {
4550 /* i.e. reset mode */
4551 if (new_acrtc_state
->stream
) {
4552 set_count
= remove_from_val_sets(
4555 new_acrtc_state
->stream
);
4557 dc_stream_release(new_acrtc_state
->stream
);
4558 new_acrtc_state
->stream
= NULL
;
4560 lock_and_validation_needed
= true;
4566 * Hack: Commit needs planes right now, specifically for gamma
4567 * TODO rework commit to check CRTC for gamma change
4569 if (crtc_state
->color_mgmt_changed
) {
4571 ret
= drm_atomic_add_affected_planes(state
, crtc
);
4577 /* Check scaling and undersacn changes*/
4578 /*TODO Removed scaling changes validation due to inability to commit
4579 * new stream into context w\o causing full reset. Need to
4580 * decide how to handle.
4582 for_each_connector_in_state(state
, connector
, conn_state
, i
) {
4583 struct amdgpu_connector
*aconnector
= to_amdgpu_connector(connector
);
4584 struct dm_connector_state
*con_old_state
=
4585 to_dm_connector_state(aconnector
->base
.state
);
4586 struct dm_connector_state
*con_new_state
=
4587 to_dm_connector_state(conn_state
);
4588 struct amdgpu_crtc
*acrtc
= to_amdgpu_crtc(con_new_state
->base
.crtc
);
4590 /* Skip any modesets/resets */
4591 if (!acrtc
|| drm_atomic_crtc_needs_modeset(acrtc
->base
.state
))
4594 /* Skip any thing not scale or underscan chnages */
4595 if (!is_scaling_state_different(con_new_state
, con_old_state
))
4598 lock_and_validation_needed
= true;
4601 for_each_crtc_in_state(state
, crtc
, crtc_state
, i
) {
4602 new_acrtc_state
= to_dm_crtc_state(crtc_state
);
4604 for_each_plane_in_state(state
, plane
, plane_state
, j
) {
4605 struct drm_crtc
*plane_crtc
= plane_state
->crtc
;
4606 struct drm_framebuffer
*fb
= plane_state
->fb
;
4608 struct dm_plane_state
*dm_plane_state
= to_dm_plane_state(plane_state
);
4610 /*TODO Implement atomic check for cursor plane */
4611 if (plane
->type
== DRM_PLANE_TYPE_CURSOR
)
4614 if (!fb
|| !plane_crtc
|| crtc
!= plane_crtc
|| !crtc_state
->active
)
4617 WARN_ON(!new_acrtc_state
->stream
);
4619 pflip_needed
= !state
->allow_modeset
;
4620 if (!pflip_needed
) {
4621 struct dc_surface
*surface
;
4623 surface
= dc_create_surface(dc
);
4625 ret
= fill_plane_attributes(
4626 plane_crtc
->dev
->dev_private
,
4635 if (dm_plane_state
->surface
)
4636 dc_surface_release(dm_plane_state
->surface
);
4638 dm_plane_state
->surface
= surface
;
4640 add_val_sets_surface(set
,
4642 new_acrtc_state
->stream
,
4645 lock_and_validation_needed
= true;
4650 /* Run this here since we want to validate the streams we created */
4651 ret
= drm_atomic_helper_check_planes(dev
, state
);
4656 * For full updates case when
4657 * removing/adding/updating streams on once CRTC while flipping
4659 * acquiring global lock will guarantee that any such full
4661 * will wait for completion of any outstanding flip using DRMs
4662 * synchronization events.
4665 if (lock_and_validation_needed
) {
4667 ret
= do_aquire_global_lock(dev
, state
);
4670 WARN_ON(dm_state
->context
);
4671 dm_state
->context
= dc_get_validate_context(dc
, set
, set_count
);
4672 if (!dm_state
->context
) {
4678 /* Must be success */
4683 if (ret
== -EDEADLK
)
4684 DRM_DEBUG_KMS("Atomic check stopped due to to deadlock.\n");
4685 else if (ret
== -EINTR
|| ret
== -EAGAIN
|| ret
== -ERESTARTSYS
)
4686 DRM_DEBUG_KMS("Atomic check stopped due to to signal.\n");
4688 DRM_ERROR("Atomic check failed with err: %d .\n", ret
);
4693 static bool is_dp_capable_without_timing_msa(
4695 struct amdgpu_connector
*amdgpu_connector
)
4698 bool capable
= false;
4700 if (amdgpu_connector
->dc_link
&&
4701 dm_helpers_dp_read_dpcd(
4703 amdgpu_connector
->dc_link
,
4704 DP_DOWN_STREAM_PORT_COUNT
,
4706 sizeof(dpcd_data
))) {
4707 capable
= (dpcd_data
& DP_MSA_TIMING_PAR_IGNORED
) ? true:false;
4712 void amdgpu_dm_add_sink_to_freesync_module(
4713 struct drm_connector
*connector
,
4717 uint64_t val_capable
;
4718 bool edid_check_required
;
4719 struct detailed_timing
*timing
;
4720 struct detailed_non_pixel
*data
;
4721 struct detailed_data_monitor_range
*range
;
4722 struct amdgpu_connector
*amdgpu_connector
=
4723 to_amdgpu_connector(connector
);
4725 struct drm_device
*dev
= connector
->dev
;
4726 struct amdgpu_device
*adev
= dev
->dev_private
;
4727 edid_check_required
= false;
4728 if (!amdgpu_connector
->dc_sink
) {
4729 DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
4732 if (!adev
->dm
.freesync_module
)
4735 * if edid non zero restrict freesync only for dp and edp
4738 if (amdgpu_connector
->dc_sink
->sink_signal
== SIGNAL_TYPE_DISPLAY_PORT
4739 || amdgpu_connector
->dc_sink
->sink_signal
== SIGNAL_TYPE_EDP
) {
4740 edid_check_required
= is_dp_capable_without_timing_msa(
4746 if (edid_check_required
== true && (edid
->version
> 1 ||
4747 (edid
->version
== 1 && edid
->revision
> 1))) {
4748 for (i
= 0; i
< 4; i
++) {
4750 timing
= &edid
->detailed_timings
[i
];
4751 data
= &timing
->data
.other_data
;
4752 range
= &data
->data
.range
;
4754 * Check if monitor has continuous frequency mode
4756 if (data
->type
!= EDID_DETAIL_MONITOR_RANGE
)
4759 * Check for flag range limits only. If flag == 1 then
4760 * no additional timing information provided.
4761 * Default GTF, GTF Secondary curve and CVT are not
4764 if (range
->flags
!= 1)
4767 amdgpu_connector
->min_vfreq
= range
->min_vfreq
;
4768 amdgpu_connector
->max_vfreq
= range
->max_vfreq
;
4769 amdgpu_connector
->pixel_clock_mhz
=
4770 range
->pixel_clock_mhz
* 10;
4774 if (amdgpu_connector
->max_vfreq
-
4775 amdgpu_connector
->min_vfreq
> 10) {
4776 amdgpu_connector
->caps
.supported
= true;
4777 amdgpu_connector
->caps
.min_refresh_in_micro_hz
=
4778 amdgpu_connector
->min_vfreq
* 1000000;
4779 amdgpu_connector
->caps
.max_refresh_in_micro_hz
=
4780 amdgpu_connector
->max_vfreq
* 1000000;
4786 * TODO figure out how to notify user-mode or DRM of freesync caps
4787 * once we figure out how to deal with freesync in an upstreamable
4793 void amdgpu_dm_remove_sink_from_freesync_module(
4794 struct drm_connector
*connector
)
4797 * TODO fill in once we figure out how to deal with freesync in
4798 * an upstreamable fashion