]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
drm/amd/display: Add function to get PSR state
[mirror_ubuntu-bionic-kernel.git] / drivers / gpu / drm / amd / display / amdgpu_dm / amdgpu_dm.c
CommitLineData
4562236b
HW
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#include "dm_services_types.h"
27#include "dc.h"
28
29#include "vid.h"
30#include "amdgpu.h"
a49dcb88 31#include "amdgpu_display.h"
4562236b
HW
32#include "atom.h"
33#include "amdgpu_dm.h"
34#include "amdgpu_dm_types.h"
35
36#include "amd_shared.h"
37#include "amdgpu_dm_irq.h"
38#include "dm_helpers.h"
39
40#include "ivsrcid/ivsrcid_vislands30.h"
41
42#include <linux/module.h>
43#include <linux/moduleparam.h>
44#include <linux/version.h>
45
46#include <drm/drm_atomic.h>
47#include <drm/drm_atomic_helper.h>
48#include <drm/drm_dp_mst_helper.h>
49
50#include "modules/inc/mod_freesync.h"
51
ff5ef992
AD
52#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
53#include "ivsrcid/irqsrcs_dcn_1_0.h"
54
55#include "raven1/DCN/dcn_1_0_offset.h"
56#include "raven1/DCN/dcn_1_0_sh_mask.h"
57#include "vega10/soc15ip.h"
58
59#include "soc15_common.h"
60#endif
61
d4e13b0d
AD
62static enum drm_plane_type dm_surfaces_type_default[AMDGPU_MAX_PLANES] = {
63 DRM_PLANE_TYPE_PRIMARY,
64 DRM_PLANE_TYPE_PRIMARY,
65 DRM_PLANE_TYPE_PRIMARY,
66 DRM_PLANE_TYPE_PRIMARY,
67 DRM_PLANE_TYPE_PRIMARY,
68 DRM_PLANE_TYPE_PRIMARY,
69};
70
71static enum drm_plane_type dm_surfaces_type_carizzo[AMDGPU_MAX_PLANES] = {
72 DRM_PLANE_TYPE_PRIMARY,
73 DRM_PLANE_TYPE_PRIMARY,
74 DRM_PLANE_TYPE_PRIMARY,
75 DRM_PLANE_TYPE_OVERLAY,/* YUV Capable Underlay */
76};
77
78static enum drm_plane_type dm_surfaces_type_stoney[AMDGPU_MAX_PLANES] = {
79 DRM_PLANE_TYPE_PRIMARY,
80 DRM_PLANE_TYPE_PRIMARY,
81 DRM_PLANE_TYPE_OVERLAY, /* YUV Capable Underlay */
82};
83
4562236b
HW
84/*
85 * dm_vblank_get_counter
86 *
87 * @brief
88 * Get counter for number of vertical blanks
89 *
90 * @param
91 * struct amdgpu_device *adev - [in] desired amdgpu device
92 * int disp_idx - [in] which CRTC to get the counter from
93 *
94 * @return
95 * Counter for vertical blanks
96 */
97static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
98{
99 if (crtc >= adev->mode_info.num_crtc)
100 return 0;
101 else {
102 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
103
ab2541b6
AC
104 if (NULL == acrtc->stream) {
105 DRM_ERROR("dc_stream is NULL for crtc '%d'!\n", crtc);
4562236b
HW
106 return 0;
107 }
108
ab2541b6 109 return dc_stream_get_vblank_counter(acrtc->stream);
4562236b
HW
110 }
111}
112
113static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
114 u32 *vbl, u32 *position)
115{
81c50963
ST
116 uint32_t v_blank_start, v_blank_end, h_position, v_position;
117
4562236b
HW
118 if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
119 return -EINVAL;
120 else {
121 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
122
ab2541b6
AC
123 if (NULL == acrtc->stream) {
124 DRM_ERROR("dc_stream is NULL for crtc '%d'!\n", crtc);
4562236b
HW
125 return 0;
126 }
127
81c50963
ST
128 /*
129 * TODO rework base driver to use values directly.
130 * for now parse it back into reg-format
131 */
132 dc_stream_get_scanoutpos(acrtc->stream,
133 &v_blank_start,
134 &v_blank_end,
135 &h_position,
136 &v_position);
137
e806208d
AG
138 *position = v_position | (h_position << 16);
139 *vbl = v_blank_start | (v_blank_end << 16);
4562236b
HW
140 }
141
142 return 0;
143}
144
145static bool dm_is_idle(void *handle)
146{
147 /* XXX todo */
148 return true;
149}
150
151static int dm_wait_for_idle(void *handle)
152{
153 /* XXX todo */
154 return 0;
155}
156
157static bool dm_check_soft_reset(void *handle)
158{
159 return false;
160}
161
162static int dm_soft_reset(void *handle)
163{
164 /* XXX todo */
165 return 0;
166}
167
168static struct amdgpu_crtc *get_crtc_by_otg_inst(
169 struct amdgpu_device *adev,
170 int otg_inst)
171{
172 struct drm_device *dev = adev->ddev;
173 struct drm_crtc *crtc;
174 struct amdgpu_crtc *amdgpu_crtc;
175
176 /*
177 * following if is check inherited from both functions where this one is
178 * used now. Need to be checked why it could happen.
179 */
180 if (otg_inst == -1) {
181 WARN_ON(1);
182 return adev->mode_info.crtcs[0];
183 }
184
185 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
186 amdgpu_crtc = to_amdgpu_crtc(crtc);
187
188 if (amdgpu_crtc->otg_inst == otg_inst)
189 return amdgpu_crtc;
190 }
191
192 return NULL;
193}
194
195static void dm_pflip_high_irq(void *interrupt_params)
196{
4562236b
HW
197 struct amdgpu_crtc *amdgpu_crtc;
198 struct common_irq_params *irq_params = interrupt_params;
199 struct amdgpu_device *adev = irq_params->adev;
200 unsigned long flags;
201
202 amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
203
204 /* IRQ could occur when in initial stage */
205 /*TODO work and BO cleanup */
206 if (amdgpu_crtc == NULL) {
207 DRM_DEBUG_DRIVER("CRTC is null, returning.\n");
208 return;
209 }
210
211 spin_lock_irqsave(&adev->ddev->event_lock, flags);
4562236b
HW
212
213 if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
214 DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
215 amdgpu_crtc->pflip_status,
216 AMDGPU_FLIP_SUBMITTED,
217 amdgpu_crtc->crtc_id,
218 amdgpu_crtc);
219 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
220 return;
221 }
222
4562236b
HW
223
224 /* wakeup usersapce */
54f5499a
AG
225 if (amdgpu_crtc->event
226 && amdgpu_crtc->event->event.base.type
227 == DRM_EVENT_FLIP_COMPLETE) {
753c66c9
MK
228 /* Update to correct count/ts if racing with vblank irq */
229 drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
230
54f5499a
AG
231 drm_crtc_send_vblank_event(&amdgpu_crtc->base, amdgpu_crtc->event);
232 /* page flip completed. clean up */
233 amdgpu_crtc->event = NULL;
234 } else
235 WARN_ON(1);
4562236b 236
54f5499a 237 amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
4562236b
HW
238 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
239
54f5499a
AG
240 DRM_DEBUG_DRIVER("%s - crtc :%d[%p], pflip_stat:AMDGPU_FLIP_NONE\n",
241 __func__, amdgpu_crtc->crtc_id, amdgpu_crtc);
4562236b
HW
242
243 drm_crtc_vblank_put(&amdgpu_crtc->base);
4562236b
HW
244}
245
246static void dm_crtc_high_irq(void *interrupt_params)
247{
248 struct common_irq_params *irq_params = interrupt_params;
249 struct amdgpu_device *adev = irq_params->adev;
250 uint8_t crtc_index = 0;
251 struct amdgpu_crtc *acrtc;
252
b57de80a 253 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
4562236b
HW
254
255 if (acrtc)
256 crtc_index = acrtc->crtc_id;
257
258 drm_handle_vblank(adev->ddev, crtc_index);
259}
260
261static int dm_set_clockgating_state(void *handle,
262 enum amd_clockgating_state state)
263{
264 return 0;
265}
266
267static int dm_set_powergating_state(void *handle,
268 enum amd_powergating_state state)
269{
270 return 0;
271}
272
273/* Prototypes of private functions */
274static int dm_early_init(void* handle);
275
276static void hotplug_notify_work_func(struct work_struct *work)
277{
278 struct amdgpu_display_manager *dm = container_of(work, struct amdgpu_display_manager, mst_hotplug_work);
279 struct drm_device *dev = dm->ddev;
280
281 drm_kms_helper_hotplug_event(dev);
282}
283
284/* Init display KMS
285 *
286 * Returns 0 on success
287 */
288int amdgpu_dm_init(struct amdgpu_device *adev)
289{
290 struct dc_init_data init_data;
291 adev->dm.ddev = adev->ddev;
292 adev->dm.adev = adev;
293
294 DRM_INFO("DAL is enabled\n");
295 /* Zero all the fields */
296 memset(&init_data, 0, sizeof(init_data));
297
298 /* initialize DAL's lock (for SYNC context use) */
299 spin_lock_init(&adev->dm.dal_lock);
300
301 /* initialize DAL's mutex */
302 mutex_init(&adev->dm.dal_mutex);
303
304 if(amdgpu_dm_irq_init(adev)) {
305 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
306 goto error;
307 }
308
309 init_data.asic_id.chip_family = adev->family;
310
311 init_data.asic_id.pci_revision_id = adev->rev_id;
312 init_data.asic_id.hw_internal_rev = adev->external_rev_id;
313
314 init_data.asic_id.vram_width = adev->mc.vram_width;
315 /* TODO: initialize init_data.asic_id.vram_type here!!!! */
316 init_data.asic_id.atombios_base_address =
317 adev->mode_info.atom_context->bios;
318
319 init_data.driver = adev;
320
321 adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
322
323 if (!adev->dm.cgs_device) {
324 DRM_ERROR("amdgpu: failed to create cgs device.\n");
325 goto error;
326 }
327
328 init_data.cgs_device = adev->dm.cgs_device;
329
330 adev->dm.dal = NULL;
331
332 init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
333
334 /* Display Core create. */
335 adev->dm.dc = dc_create(&init_data);
336
337 if (!adev->dm.dc)
338 DRM_INFO("Display Core failed to initialize!\n");
339
340 INIT_WORK(&adev->dm.mst_hotplug_work, hotplug_notify_work_func);
341
342 adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
343 if (!adev->dm.freesync_module) {
344 DRM_ERROR(
345 "amdgpu: failed to initialize freesync_module.\n");
346 } else
347 DRM_INFO("amdgpu: freesync_module init done %p.\n",
348 adev->dm.freesync_module);
349
350 if (amdgpu_dm_initialize_drm_device(adev)) {
351 DRM_ERROR(
352 "amdgpu: failed to initialize sw for display support.\n");
353 goto error;
354 }
355
356 /* Update the actual used number of crtc */
357 adev->mode_info.num_crtc = adev->dm.display_indexes_num;
358
359 /* TODO: Add_display_info? */
360
361 /* TODO use dynamic cursor width */
ce75805e
AG
362 adev->ddev->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
363 adev->ddev->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
4562236b
HW
364
365 if (drm_vblank_init(adev->ddev, adev->dm.display_indexes_num)) {
366 DRM_ERROR(
367 "amdgpu: failed to initialize sw for display support.\n");
368 goto error;
369 }
370
371 DRM_INFO("KMS initialized.\n");
372
373 return 0;
374error:
375 amdgpu_dm_fini(adev);
376
377 return -1;
378}
379
380void amdgpu_dm_fini(struct amdgpu_device *adev)
381{
382 amdgpu_dm_destroy_drm_device(&adev->dm);
383 /*
384 * TODO: pageflip, vlank interrupt
385 *
386 * amdgpu_dm_irq_fini(adev);
387 */
388
389 if (adev->dm.cgs_device) {
390 amdgpu_cgs_destroy_device(adev->dm.cgs_device);
391 adev->dm.cgs_device = NULL;
392 }
393 if (adev->dm.freesync_module) {
394 mod_freesync_destroy(adev->dm.freesync_module);
395 adev->dm.freesync_module = NULL;
396 }
397 /* DC Destroy TODO: Replace destroy DAL */
21de3396 398 if (adev->dm.dc)
4562236b 399 dc_destroy(&adev->dm.dc);
4562236b
HW
400 return;
401}
402
403/* moved from amdgpu_dm_kms.c */
404void amdgpu_dm_destroy()
405{
406}
407
408static int dm_sw_init(void *handle)
409{
410 return 0;
411}
412
413static int dm_sw_fini(void *handle)
414{
415 return 0;
416}
417
7abcf6b5 418static int detect_mst_link_for_all_connectors(struct drm_device *dev)
4562236b
HW
419{
420 struct amdgpu_connector *aconnector;
421 struct drm_connector *connector;
7abcf6b5 422 int ret = 0;
4562236b
HW
423
424 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
425
426 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
427 aconnector = to_amdgpu_connector(connector);
7abcf6b5
AG
428 if (aconnector->dc_link->type == dc_connection_mst_branch) {
429 DRM_INFO("DM_MST: starting TM on aconnector: %p [id: %d]\n",
430 aconnector, aconnector->base.base.id);
431
432 ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
433 if (ret < 0) {
434 DRM_ERROR("DM_MST: Failed to start MST\n");
435 ((struct dc_link *)aconnector->dc_link)->type = dc_connection_single;
436 return ret;
4562236b 437 }
7abcf6b5 438 }
4562236b
HW
439 }
440
441 drm_modeset_unlock(&dev->mode_config.connection_mutex);
7abcf6b5
AG
442 return ret;
443}
444
445static int dm_late_init(void *handle)
446{
447 struct drm_device *dev = ((struct amdgpu_device *)handle)->ddev;
448 int r = detect_mst_link_for_all_connectors(dev);
449
450 return r;
4562236b
HW
451}
452
453static void s3_handle_mst(struct drm_device *dev, bool suspend)
454{
455 struct amdgpu_connector *aconnector;
456 struct drm_connector *connector;
457
458 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
459
460 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
461 aconnector = to_amdgpu_connector(connector);
462 if (aconnector->dc_link->type == dc_connection_mst_branch &&
463 !aconnector->mst_port) {
464
465 if (suspend)
466 drm_dp_mst_topology_mgr_suspend(&aconnector->mst_mgr);
467 else
468 drm_dp_mst_topology_mgr_resume(&aconnector->mst_mgr);
469 }
470 }
471
472 drm_modeset_unlock(&dev->mode_config.connection_mutex);
473}
474
475static int dm_hw_init(void *handle)
476{
477 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
478 /* Create DAL display manager */
479 amdgpu_dm_init(adev);
4562236b
HW
480 amdgpu_dm_hpd_init(adev);
481
4562236b
HW
482 return 0;
483}
484
485static int dm_hw_fini(void *handle)
486{
487 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
488
489 amdgpu_dm_hpd_fini(adev);
490
491 amdgpu_dm_irq_fini(adev);
21de3396 492 amdgpu_dm_fini(adev);
4562236b
HW
493 return 0;
494}
495
496static int dm_suspend(void *handle)
497{
498 struct amdgpu_device *adev = handle;
499 struct amdgpu_display_manager *dm = &adev->dm;
500 int ret = 0;
4562236b
HW
501
502 s3_handle_mst(adev->ddev, true);
503
4562236b
HW
504 amdgpu_dm_irq_suspend(adev);
505
a3621485
AG
506 adev->dm.cached_state = drm_atomic_helper_suspend(adev->ddev);
507
4562236b
HW
508 dc_set_power_state(
509 dm->dc,
a3621485
AG
510 DC_ACPI_CM_POWER_STATE_D3
511 );
4562236b
HW
512
513 return ret;
514}
515
516struct amdgpu_connector *amdgpu_dm_find_first_crct_matching_connector(
517 struct drm_atomic_state *state,
518 struct drm_crtc *crtc,
519 bool from_state_var)
520{
521 uint32_t i;
522 struct drm_connector_state *conn_state;
523 struct drm_connector *connector;
524 struct drm_crtc *crtc_from_state;
525
526 for_each_connector_in_state(
527 state,
528 connector,
529 conn_state,
530 i) {
531 crtc_from_state =
532 from_state_var ?
533 conn_state->crtc :
534 connector->state->crtc;
535
536 if (crtc_from_state == crtc)
537 return to_amdgpu_connector(connector);
538 }
539
540 return NULL;
541}
542
4562236b
HW
543static int dm_resume(void *handle)
544{
545 struct amdgpu_device *adev = handle;
546 struct amdgpu_display_manager *dm = &adev->dm;
547
548 /* power on hardware */
549 dc_set_power_state(
550 dm->dc,
a3621485
AG
551 DC_ACPI_CM_POWER_STATE_D0
552 );
4562236b
HW
553
554 return 0;
555}
556
557int amdgpu_dm_display_resume(struct amdgpu_device *adev )
558{
559 struct drm_device *ddev = adev->ddev;
560 struct amdgpu_display_manager *dm = &adev->dm;
561 struct amdgpu_connector *aconnector;
562 struct drm_connector *connector;
4562236b 563 struct drm_crtc *crtc;
a3621485
AG
564 struct drm_crtc_state *crtc_state;
565 int ret = 0;
566 int i;
4562236b
HW
567
568 /* program HPD filter */
569 dc_resume(dm->dc);
570
571 /* On resume we need to rewrite the MSTM control bits to enamble MST*/
572 s3_handle_mst(ddev, false);
573
574 /*
575 * early enable HPD Rx IRQ, should be done before set mode as short
576 * pulse interrupts are used for MST
577 */
578 amdgpu_dm_irq_resume_early(adev);
579
4562236b
HW
580 /* Do detection*/
581 list_for_each_entry(connector,
582 &ddev->mode_config.connector_list, head) {
583 aconnector = to_amdgpu_connector(connector);
584
585 /*
586 * this is the case when traversing through already created
587 * MST connectors, should be skipped
588 */
589 if (aconnector->mst_port)
590 continue;
591
03ea364c 592 mutex_lock(&aconnector->hpd_lock);
4562236b
HW
593 dc_link_detect(aconnector->dc_link, false);
594 aconnector->dc_sink = NULL;
595 amdgpu_dm_update_connector_after_detect(aconnector);
03ea364c 596 mutex_unlock(&aconnector->hpd_lock);
4562236b
HW
597 }
598
a3621485
AG
599 /* Force mode set in atomic comit */
600 for_each_crtc_in_state(adev->dm.cached_state, crtc, crtc_state, i)
601 crtc_state->active_changed = true;
602
603 ret = drm_atomic_helper_resume(ddev, adev->dm.cached_state);
4562236b 604
9faa4237 605 amdgpu_dm_irq_resume_late(adev);
4562236b
HW
606
607 return ret;
608}
609
610static const struct amd_ip_funcs amdgpu_dm_funcs = {
611 .name = "dm",
612 .early_init = dm_early_init,
7abcf6b5 613 .late_init = dm_late_init,
4562236b
HW
614 .sw_init = dm_sw_init,
615 .sw_fini = dm_sw_fini,
616 .hw_init = dm_hw_init,
617 .hw_fini = dm_hw_fini,
618 .suspend = dm_suspend,
619 .resume = dm_resume,
620 .is_idle = dm_is_idle,
621 .wait_for_idle = dm_wait_for_idle,
622 .check_soft_reset = dm_check_soft_reset,
623 .soft_reset = dm_soft_reset,
624 .set_clockgating_state = dm_set_clockgating_state,
625 .set_powergating_state = dm_set_powergating_state,
626};
627
628const struct amdgpu_ip_block_version dm_ip_block =
629{
630 .type = AMD_IP_BLOCK_TYPE_DCE,
631 .major = 1,
632 .minor = 0,
633 .rev = 0,
634 .funcs = &amdgpu_dm_funcs,
635};
636
637/* TODO: it is temporary non-const, should fixed later */
638static struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
a49dcb88
HW
639 .fb_create = amdgpu_user_framebuffer_create,
640 .output_poll_changed = amdgpu_output_poll_changed,
4562236b 641 .atomic_check = amdgpu_dm_atomic_check,
54f5499a
AG
642 .atomic_commit = drm_atomic_helper_commit
643};
644
645static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
646 .atomic_commit_tail = amdgpu_dm_atomic_commit_tail
4562236b
HW
647};
648
649void amdgpu_dm_update_connector_after_detect(
650 struct amdgpu_connector *aconnector)
651{
652 struct drm_connector *connector = &aconnector->base;
653 struct drm_device *dev = connector->dev;
654 const struct dc_sink *sink;
655
656 /* MST handled by drm_mst framework */
657 if (aconnector->mst_mgr.mst_state == true)
658 return;
659
660
661 sink = aconnector->dc_link->local_sink;
662
663 /* Edid mgmt connector gets first update only in mode_valid hook and then
664 * the connector sink is set to either fake or physical sink depends on link status.
665 * don't do it here if u are during boot
666 */
667 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
668 && aconnector->dc_em_sink) {
669
ab2541b6 670 /* For S3 resume with headless use eml_sink to fake stream
4562236b
HW
671 * because on resume connecotr->sink is set ti NULL
672 */
673 mutex_lock(&dev->mode_config.mutex);
674
675 if (sink) {
922aa1e1 676 if (aconnector->dc_sink) {
4562236b
HW
677 amdgpu_dm_remove_sink_from_freesync_module(
678 connector);
922aa1e1
AG
679 /* retain and release bellow are used for
680 * bump up refcount for sink because the link don't point
681 * to it anymore after disconnect so on next crtc to connector
682 * reshuffle by UMD we will get into unwanted dc_sink release
683 */
684 if (aconnector->dc_sink != aconnector->dc_em_sink)
685 dc_sink_release(aconnector->dc_sink);
686 }
4562236b
HW
687 aconnector->dc_sink = sink;
688 amdgpu_dm_add_sink_to_freesync_module(
689 connector, aconnector->edid);
690 } else {
691 amdgpu_dm_remove_sink_from_freesync_module(connector);
692 if (!aconnector->dc_sink)
693 aconnector->dc_sink = aconnector->dc_em_sink;
922aa1e1
AG
694 else if (aconnector->dc_sink != aconnector->dc_em_sink)
695 dc_sink_retain(aconnector->dc_sink);
4562236b
HW
696 }
697
698 mutex_unlock(&dev->mode_config.mutex);
699 return;
700 }
701
702 /*
703 * TODO: temporary guard to look for proper fix
704 * if this sink is MST sink, we should not do anything
705 */
706 if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
707 return;
708
709 if (aconnector->dc_sink == sink) {
710 /* We got a DP short pulse (Link Loss, DP CTS, etc...).
711 * Do nothing!! */
712 DRM_INFO("DCHPD: connector_id=%d: dc_sink didn't change.\n",
713 aconnector->connector_id);
714 return;
715 }
716
717 DRM_INFO("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
718 aconnector->connector_id, aconnector->dc_sink, sink);
719
720 mutex_lock(&dev->mode_config.mutex);
721
722 /* 1. Update status of the drm connector
723 * 2. Send an event and let userspace tell us what to do */
724 if (sink) {
725 /* TODO: check if we still need the S3 mode update workaround.
726 * If yes, put it here. */
727 if (aconnector->dc_sink)
728 amdgpu_dm_remove_sink_from_freesync_module(
729 connector);
730
731 aconnector->dc_sink = sink;
732 if (sink->dc_edid.length == 0)
733 aconnector->edid = NULL;
734 else {
735 aconnector->edid =
736 (struct edid *) sink->dc_edid.raw_edid;
737
738
739 drm_mode_connector_update_edid_property(connector,
740 aconnector->edid);
741 }
742 amdgpu_dm_add_sink_to_freesync_module(connector, aconnector->edid);
743
744 } else {
745 amdgpu_dm_remove_sink_from_freesync_module(connector);
746 drm_mode_connector_update_edid_property(connector, NULL);
747 aconnector->num_modes = 0;
748 aconnector->dc_sink = NULL;
749 }
750
751 mutex_unlock(&dev->mode_config.mutex);
752}
753
754static void handle_hpd_irq(void *param)
755{
756 struct amdgpu_connector *aconnector = (struct amdgpu_connector *)param;
757 struct drm_connector *connector = &aconnector->base;
758 struct drm_device *dev = connector->dev;
759
760 /* In case of failure or MST no need to update connector status or notify the OS
761 * since (for MST case) MST does this in it's own context.
762 */
763 mutex_lock(&aconnector->hpd_lock);
764 if (dc_link_detect(aconnector->dc_link, false)) {
765 amdgpu_dm_update_connector_after_detect(aconnector);
766
767
768 drm_modeset_lock_all(dev);
769 dm_restore_drm_connector_state(dev, connector);
770 drm_modeset_unlock_all(dev);
771
772 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
773 drm_kms_helper_hotplug_event(dev);
774 }
775 mutex_unlock(&aconnector->hpd_lock);
776
777}
778
779static void dm_handle_hpd_rx_irq(struct amdgpu_connector *aconnector)
780{
781 uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
782 uint8_t dret;
783 bool new_irq_handled = false;
784 int dpcd_addr;
785 int dpcd_bytes_to_read;
786
787 const int max_process_count = 30;
788 int process_count = 0;
789
790 const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
791
792 if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
793 dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
794 /* DPCD 0x200 - 0x201 for downstream IRQ */
795 dpcd_addr = DP_SINK_COUNT;
796 } else {
797 dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
798 /* DPCD 0x2002 - 0x2005 for downstream IRQ */
799 dpcd_addr = DP_SINK_COUNT_ESI;
800 }
801
802 dret = drm_dp_dpcd_read(
803 &aconnector->dm_dp_aux.aux,
804 dpcd_addr,
805 esi,
806 dpcd_bytes_to_read);
807
808 while (dret == dpcd_bytes_to_read &&
809 process_count < max_process_count) {
810 uint8_t retry;
811 dret = 0;
812
813 process_count++;
814
815 DRM_DEBUG_KMS("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
4562236b
HW
816 /* handle HPD short pulse irq */
817 if (aconnector->mst_mgr.mst_state)
818 drm_dp_mst_hpd_irq(
819 &aconnector->mst_mgr,
820 esi,
821 &new_irq_handled);
4562236b
HW
822
823 if (new_irq_handled) {
824 /* ACK at DPCD to notify down stream */
825 const int ack_dpcd_bytes_to_write =
826 dpcd_bytes_to_read - 1;
827
828 for (retry = 0; retry < 3; retry++) {
829 uint8_t wret;
830
831 wret = drm_dp_dpcd_write(
832 &aconnector->dm_dp_aux.aux,
833 dpcd_addr + 1,
834 &esi[1],
835 ack_dpcd_bytes_to_write);
836 if (wret == ack_dpcd_bytes_to_write)
837 break;
838 }
839
840 /* check if there is new irq to be handle */
841 dret = drm_dp_dpcd_read(
842 &aconnector->dm_dp_aux.aux,
843 dpcd_addr,
844 esi,
845 dpcd_bytes_to_read);
846
847 new_irq_handled = false;
848 } else
849 break;
850 }
851
852 if (process_count == max_process_count)
853 DRM_DEBUG_KMS("Loop exceeded max iterations\n");
854}
855
856static void handle_hpd_rx_irq(void *param)
857{
858 struct amdgpu_connector *aconnector = (struct amdgpu_connector *)param;
859 struct drm_connector *connector = &aconnector->base;
860 struct drm_device *dev = connector->dev;
861 const struct dc_link *dc_link = aconnector->dc_link;
862 bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
863
864 /* TODO:Temporary add mutex to protect hpd interrupt not have a gpio
865 * conflict, after implement i2c helper, this mutex should be
866 * retired.
867 */
868 if (aconnector->dc_link->type != dc_connection_mst_branch)
869 mutex_lock(&aconnector->hpd_lock);
870
871 if (dc_link_handle_hpd_rx_irq(aconnector->dc_link) &&
872 !is_mst_root_connector) {
873 /* Downstream Port status changed. */
874 if (dc_link_detect(aconnector->dc_link, false)) {
875 amdgpu_dm_update_connector_after_detect(aconnector);
876
877
878 drm_modeset_lock_all(dev);
879 dm_restore_drm_connector_state(dev, connector);
880 drm_modeset_unlock_all(dev);
881
882 drm_kms_helper_hotplug_event(dev);
883 }
884 }
885 if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
886 (dc_link->type == dc_connection_mst_branch))
887 dm_handle_hpd_rx_irq(aconnector);
888
889 if (aconnector->dc_link->type != dc_connection_mst_branch)
890 mutex_unlock(&aconnector->hpd_lock);
891}
892
893static void register_hpd_handlers(struct amdgpu_device *adev)
894{
895 struct drm_device *dev = adev->ddev;
896 struct drm_connector *connector;
897 struct amdgpu_connector *aconnector;
898 const struct dc_link *dc_link;
899 struct dc_interrupt_params int_params = {0};
900
901 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
902 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
903
904 list_for_each_entry(connector,
905 &dev->mode_config.connector_list, head) {
906
907 aconnector = to_amdgpu_connector(connector);
908 dc_link = aconnector->dc_link;
909
910 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
911 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
912 int_params.irq_source = dc_link->irq_source_hpd;
913
914 amdgpu_dm_irq_register_interrupt(adev, &int_params,
915 handle_hpd_irq,
916 (void *) aconnector);
917 }
918
919 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
920
921 /* Also register for DP short pulse (hpd_rx). */
922 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
923 int_params.irq_source = dc_link->irq_source_hpd_rx;
924
925 amdgpu_dm_irq_register_interrupt(adev, &int_params,
926 handle_hpd_rx_irq,
927 (void *) aconnector);
928 }
929 }
930}
931
932/* Register IRQ sources and initialize IRQ callbacks */
933static int dce110_register_irq_handlers(struct amdgpu_device *adev)
934{
935 struct dc *dc = adev->dm.dc;
936 struct common_irq_params *c_irq_params;
937 struct dc_interrupt_params int_params = {0};
938 int r;
939 int i;
2c8ad2d5
AD
940 unsigned client_id = AMDGPU_IH_CLIENTID_LEGACY;
941
ff5ef992
AD
942 if (adev->asic_type == CHIP_VEGA10 ||
943 adev->asic_type == CHIP_RAVEN)
2c8ad2d5 944 client_id = AMDGPU_IH_CLIENTID_DCE;
4562236b
HW
945
946 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
947 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
948
949 /* Actions of amdgpu_irq_add_id():
950 * 1. Register a set() function with base driver.
951 * Base driver will call set() function to enable/disable an
952 * interrupt in DC hardware.
953 * 2. Register amdgpu_dm_irq_handler().
954 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
955 * coming from DC hardware.
956 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
957 * for acknowledging and handling. */
958
b57de80a 959 /* Use VBLANK interrupt */
e9029155 960 for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
2c8ad2d5 961 r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
4562236b
HW
962 if (r) {
963 DRM_ERROR("Failed to add crtc irq id!\n");
964 return r;
965 }
966
967 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
968 int_params.irq_source =
3d761e79 969 dc_interrupt_to_irq_source(dc, i, 0);
4562236b 970
b57de80a 971 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
4562236b
HW
972
973 c_irq_params->adev = adev;
974 c_irq_params->irq_src = int_params.irq_source;
975
976 amdgpu_dm_irq_register_interrupt(adev, &int_params,
977 dm_crtc_high_irq, c_irq_params);
978 }
979
3d761e79 980 /* Use GRPH_PFLIP interrupt */
4562236b
HW
981 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
982 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2c8ad2d5 983 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
4562236b
HW
984 if (r) {
985 DRM_ERROR("Failed to add page flip irq id!\n");
986 return r;
987 }
988
989 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
990 int_params.irq_source =
991 dc_interrupt_to_irq_source(dc, i, 0);
992
993 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
994
995 c_irq_params->adev = adev;
996 c_irq_params->irq_src = int_params.irq_source;
997
998 amdgpu_dm_irq_register_interrupt(adev, &int_params,
999 dm_pflip_high_irq, c_irq_params);
1000
1001 }
1002
1003 /* HPD */
2c8ad2d5
AD
1004 r = amdgpu_irq_add_id(adev, client_id,
1005 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
4562236b
HW
1006 if (r) {
1007 DRM_ERROR("Failed to add hpd irq id!\n");
1008 return r;
1009 }
1010
1011 register_hpd_handlers(adev);
1012
1013 return 0;
1014}
1015
ff5ef992
AD
1016#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
1017/* Register IRQ sources and initialize IRQ callbacks */
1018static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
1019{
1020 struct dc *dc = adev->dm.dc;
1021 struct common_irq_params *c_irq_params;
1022 struct dc_interrupt_params int_params = {0};
1023 int r;
1024 int i;
1025
1026 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
1027 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
1028
1029 /* Actions of amdgpu_irq_add_id():
1030 * 1. Register a set() function with base driver.
1031 * Base driver will call set() function to enable/disable an
1032 * interrupt in DC hardware.
1033 * 2. Register amdgpu_dm_irq_handler().
1034 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
1035 * coming from DC hardware.
1036 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
1037 * for acknowledging and handling.
1038 * */
1039
1040 /* Use VSTARTUP interrupt */
1041 for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
1042 i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
1043 i++) {
1044 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_DCE, i, &adev->crtc_irq);
1045
1046 if (r) {
1047 DRM_ERROR("Failed to add crtc irq id!\n");
1048 return r;
1049 }
1050
1051 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
1052 int_params.irq_source =
1053 dc_interrupt_to_irq_source(dc, i, 0);
1054
1055 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
1056
1057 c_irq_params->adev = adev;
1058 c_irq_params->irq_src = int_params.irq_source;
1059
1060 amdgpu_dm_irq_register_interrupt(adev, &int_params,
1061 dm_crtc_high_irq, c_irq_params);
1062 }
1063
1064 /* Use GRPH_PFLIP interrupt */
1065 for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
1066 i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
1067 i++) {
1068 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
1069 if (r) {
1070 DRM_ERROR("Failed to add page flip irq id!\n");
1071 return r;
1072 }
1073
1074 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
1075 int_params.irq_source =
1076 dc_interrupt_to_irq_source(dc, i, 0);
1077
1078 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
1079
1080 c_irq_params->adev = adev;
1081 c_irq_params->irq_src = int_params.irq_source;
1082
1083 amdgpu_dm_irq_register_interrupt(adev, &int_params,
1084 dm_pflip_high_irq, c_irq_params);
1085
1086 }
1087
1088 /* HPD */
1089 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
1090 &adev->hpd_irq);
1091 if (r) {
1092 DRM_ERROR("Failed to add hpd irq id!\n");
1093 return r;
1094 }
1095
1096 register_hpd_handlers(adev);
1097
1098 return 0;
1099}
1100#endif
1101
4562236b
HW
1102static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
1103{
1104 int r;
1105
1106 adev->mode_info.mode_config_initialized = true;
1107
4562236b 1108 adev->ddev->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
54f5499a 1109 adev->ddev->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
4562236b
HW
1110
1111 adev->ddev->mode_config.max_width = 16384;
1112 adev->ddev->mode_config.max_height = 16384;
1113
1114 adev->ddev->mode_config.preferred_depth = 24;
1115 adev->ddev->mode_config.prefer_shadow = 1;
1116 /* indicate support of immediate flip */
1117 adev->ddev->mode_config.async_page_flip = true;
1118
1119 adev->ddev->mode_config.fb_base = adev->mc.aper_base;
1120
1121 r = amdgpu_modeset_create_props(adev);
1122 if (r)
1123 return r;
1124
1125 return 0;
1126}
1127
1128#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
1129 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
1130
1131static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
1132{
1133 struct amdgpu_display_manager *dm = bl_get_data(bd);
1134
1135 if (dc_link_set_backlight_level(dm->backlight_link,
1136 bd->props.brightness, 0, 0))
1137 return 0;
1138 else
1139 return 1;
1140}
1141
1142static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
1143{
1144 return bd->props.brightness;
1145}
1146
1147static const struct backlight_ops amdgpu_dm_backlight_ops = {
1148 .get_brightness = amdgpu_dm_backlight_get_brightness,
1149 .update_status = amdgpu_dm_backlight_update_status,
1150};
1151
1152void amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
1153{
1154 char bl_name[16];
1155 struct backlight_properties props = { 0 };
1156
1157 props.max_brightness = AMDGPU_MAX_BL_LEVEL;
1158 props.type = BACKLIGHT_RAW;
1159
1160 snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
1161 dm->adev->ddev->primary->index);
1162
1163 dm->backlight_dev = backlight_device_register(bl_name,
1164 dm->adev->ddev->dev,
1165 dm,
1166 &amdgpu_dm_backlight_ops,
1167 &props);
1168
1169 if (NULL == dm->backlight_dev)
1170 DRM_ERROR("DM: Backlight registration failed!\n");
1171 else
1172 DRM_INFO("DM: Registered Backlight device: %s\n", bl_name);
1173}
1174
1175#endif
1176
1177/* In this architecture, the association
1178 * connector -> encoder -> crtc
1179 * id not really requried. The crtc and connector will hold the
1180 * display_index as an abstraction to use with DAL component
1181 *
1182 * Returns 0 on success
1183 */
1184int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
1185{
1186 struct amdgpu_display_manager *dm = &adev->dm;
1187 uint32_t i;
f2a0f5e6
HW
1188 struct amdgpu_connector *aconnector = NULL;
1189 struct amdgpu_encoder *aencoder = NULL;
d4e13b0d 1190 struct amdgpu_mode_info *mode_info = &adev->mode_info;
4562236b
HW
1191 uint32_t link_cnt;
1192
1193 link_cnt = dm->dc->caps.max_links;
4562236b
HW
1194 if (amdgpu_dm_mode_config_init(dm->adev)) {
1195 DRM_ERROR("DM: Failed to initialize mode config\n");
f2a0f5e6 1196 return -1;
4562236b
HW
1197 }
1198
d4e13b0d
AD
1199 for (i = 0; i < dm->dc->caps.max_surfaces; i++) {
1200 mode_info->planes[i] = kzalloc(sizeof(struct amdgpu_plane),
1201 GFP_KERNEL);
1202 if (!mode_info->planes[i]) {
1203 DRM_ERROR("KMS: Failed to allocate surface\n");
1204 goto fail_free_planes;
1205 }
1206 mode_info->planes[i]->plane_type = mode_info->plane_type[i];
7df498fa 1207 if (amdgpu_dm_plane_init(dm, mode_info->planes[i], 0xff)) {
d4e13b0d
AD
1208 DRM_ERROR("KMS: Failed to initialize plane\n");
1209 goto fail_free_planes;
1210 }
1211 }
4562236b 1212
d4e13b0d
AD
1213 for (i = 0; i < dm->dc->caps.max_streams; i++)
1214 if (amdgpu_dm_crtc_init(dm, &mode_info->planes[i]->base, i)) {
4562236b 1215 DRM_ERROR("KMS: Failed to initialize crtc\n");
d4e13b0d 1216 goto fail_free_planes;
4562236b 1217 }
4562236b 1218
ab2541b6 1219 dm->display_indexes_num = dm->dc->caps.max_streams;
4562236b
HW
1220
1221 /* loops over all connectors on the board */
1222 for (i = 0; i < link_cnt; i++) {
1223
1224 if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
1225 DRM_ERROR(
1226 "KMS: Cannot support more than %d display indexes\n",
1227 AMDGPU_DM_MAX_DISPLAY_INDEX);
1228 continue;
1229 }
1230
1231 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
1232 if (!aconnector)
f2a0f5e6 1233 goto fail_free_planes;
4562236b
HW
1234
1235 aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
1236 if (!aencoder) {
1237 goto fail_free_connector;
1238 }
1239
1240 if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
1241 DRM_ERROR("KMS: Failed to initialize encoder\n");
1242 goto fail_free_encoder;
1243 }
1244
1245 if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
1246 DRM_ERROR("KMS: Failed to initialize connector\n");
f2a0f5e6 1247 goto fail_free_encoder;
4562236b
HW
1248 }
1249
1250 if (dc_link_detect(dc_get_link_at_index(dm->dc, i), true))
1251 amdgpu_dm_update_connector_after_detect(aconnector);
1252 }
1253
1254 /* Software is initialized. Now we can register interrupt handlers. */
1255 switch (adev->asic_type) {
1256 case CHIP_BONAIRE:
1257 case CHIP_HAWAII:
1258 case CHIP_TONGA:
1259 case CHIP_FIJI:
1260 case CHIP_CARRIZO:
1261 case CHIP_STONEY:
1262 case CHIP_POLARIS11:
1263 case CHIP_POLARIS10:
b264d345 1264 case CHIP_POLARIS12:
2c8ad2d5 1265 case CHIP_VEGA10:
4562236b
HW
1266 if (dce110_register_irq_handlers(dm->adev)) {
1267 DRM_ERROR("DM: Failed to initialize IRQ\n");
d4e13b0d 1268 goto fail_free_encoder;
4562236b
HW
1269 }
1270 break;
ff5ef992
AD
1271#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
1272 case CHIP_RAVEN:
1273 if (dcn10_register_irq_handlers(dm->adev)) {
1274 DRM_ERROR("DM: Failed to initialize IRQ\n");
1275 goto fail_free_encoder;
1276 }
1277 break;
1278#endif
4562236b
HW
1279 default:
1280 DRM_ERROR("Usupported ASIC type: 0x%X\n", adev->asic_type);
d4e13b0d 1281 goto fail_free_encoder;
4562236b
HW
1282 }
1283
1284 drm_mode_config_reset(dm->ddev);
1285
1286 return 0;
1287fail_free_encoder:
1288 kfree(aencoder);
1289fail_free_connector:
1290 kfree(aconnector);
d4e13b0d
AD
1291fail_free_planes:
1292 for (i = 0; i < dm->dc->caps.max_surfaces; i++)
1293 kfree(mode_info->planes[i]);
4562236b
HW
1294 return -1;
1295}
1296
1297void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
1298{
1299 drm_mode_config_cleanup(dm->ddev);
1300 return;
1301}
1302
1303/******************************************************************************
1304 * amdgpu_display_funcs functions
1305 *****************************************************************************/
1306
1307/**
1308 * dm_bandwidth_update - program display watermarks
1309 *
1310 * @adev: amdgpu_device pointer
1311 *
1312 * Calculate and program the display watermarks and line buffer allocation.
1313 */
1314static void dm_bandwidth_update(struct amdgpu_device *adev)
1315{
49c07a99 1316 /* TODO: implement later */
4562236b
HW
1317}
1318
1319static void dm_set_backlight_level(struct amdgpu_encoder *amdgpu_encoder,
1320 u8 level)
1321{
1322 /* TODO: translate amdgpu_encoder to display_index and call DAL */
4562236b
HW
1323}
1324
1325static u8 dm_get_backlight_level(struct amdgpu_encoder *amdgpu_encoder)
1326{
1327 /* TODO: translate amdgpu_encoder to display_index and call DAL */
4562236b
HW
1328 return 0;
1329}
1330
1331/******************************************************************************
1332 * Page Flip functions
1333 ******************************************************************************/
1334
1335/**
1336 * dm_page_flip - called by amdgpu_flip_work_func(), which is triggered
1337 * via DRM IOCTL, by user mode.
1338 *
1339 * @adev: amdgpu_device pointer
1340 * @crtc_id: crtc to cleanup pageflip on
1341 * @crtc_base: new address of the crtc (GPU MC address)
1342 *
1343 * Does the actual pageflip (surface address update).
1344 */
1345static void dm_page_flip(struct amdgpu_device *adev,
1346 int crtc_id, u64 crtc_base, bool async)
1347{
1348 struct amdgpu_crtc *acrtc;
ab2541b6 1349 const struct dc_stream *stream;
4562236b 1350 struct dc_flip_addrs addr = { {0} };
3379da83 1351 struct dc_surface_update surface_updates[1] = { {0} };
4562236b
HW
1352
1353 /*
1354 * TODO risk of concurrency issues
1355 *
1356 * This should guarded by the dal_mutex but we can't do this since the
1357 * caller uses a spin_lock on event_lock.
1358 *
1359 * If we wait on the dal_mutex a second page flip interrupt might come,
1360 * spin on the event_lock, disabling interrupts while it does so. At
1361 * this point the core can no longer be pre-empted and return to the
1362 * thread that waited on the dal_mutex and we're deadlocked.
1363 *
1364 * With multiple cores the same essentially happens but might just take
1365 * a little longer to lock up all cores.
1366 *
1367 * The reason we should lock on dal_mutex is so that we can be sure
ab2541b6 1368 * nobody messes with acrtc->stream after we read and check its value.
4562236b
HW
1369 *
1370 * We might be able to fix our concurrency issues with a work queue
1371 * where we schedule all work items (mode_set, page_flip, etc.) and
1372 * execute them one by one. Care needs to be taken to still deal with
1373 * any potential concurrency issues arising from interrupt calls.
1374 */
1375
1376 acrtc = adev->mode_info.crtcs[crtc_id];
ab2541b6 1377 stream = acrtc->stream;
4562236b 1378
54f5499a
AG
1379
1380 if (acrtc->pflip_status != AMDGPU_FLIP_NONE) {
1381 DRM_ERROR("flip queue: acrtc %d, already busy\n", acrtc->crtc_id);
1382 /* In commit tail framework this cannot happen */
1383 BUG_ON(0);
1384 }
1385
1386
4562236b
HW
1387 /*
1388 * Received a page flip call after the display has been reset.
1389 * Just return in this case. Everything should be clean-up on reset.
1390 */
1391
ab2541b6 1392 if (!stream) {
4562236b
HW
1393 WARN_ON(1);
1394 return;
1395 }
1396
1397 addr.address.grph.addr.low_part = lower_32_bits(crtc_base);
1398 addr.address.grph.addr.high_part = upper_32_bits(crtc_base);
1399 addr.flip_immediate = async;
1400
54f5499a
AG
1401
1402 if (acrtc->base.state->event &&
1403 acrtc->base.state->event->event.base.type ==
1404 DRM_EVENT_FLIP_COMPLETE) {
1405 acrtc->event = acrtc->base.state->event;
1406
1407 /* Set the flip status */
1408 acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
1409
1410 /* Mark this event as consumed */
1411 acrtc->base.state->event = NULL;
1412 }
1413
3379da83
AG
1414 surface_updates->surface = dc_stream_get_status(stream)->surfaces[0];
1415 surface_updates->flip_addr = &addr;
1416
1417
1418 dc_update_surfaces_for_stream(adev->dm.dc, surface_updates, 1, stream);
54f5499a 1419
4562236b
HW
1420 DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x \n",
1421 __func__,
1422 addr.address.grph.addr.high_part,
1423 addr.address.grph.addr.low_part);
1424
4562236b
HW
1425}
1426
1427static int amdgpu_notify_freesync(struct drm_device *dev, void *data,
1428 struct drm_file *filp)
1429{
1430 struct mod_freesync_params freesync_params;
ab2541b6 1431 uint8_t num_streams;
4562236b 1432 uint8_t i;
4562236b
HW
1433
1434 struct amdgpu_device *adev = dev->dev_private;
1435 int r = 0;
1436
1437 /* Get freesync enable flag from DRM */
1438
ab2541b6 1439 num_streams = dc_get_current_stream_count(adev->dm.dc);
4562236b 1440
ab2541b6
AC
1441 for (i = 0; i < num_streams; i++) {
1442 const struct dc_stream *stream;
1443 stream = dc_get_stream_at_index(adev->dm.dc, i);
4562236b
HW
1444
1445 mod_freesync_update_state(adev->dm.freesync_module,
ab2541b6 1446 &stream, 1, &freesync_params);
4562236b
HW
1447 }
1448
1449 return r;
1450}
1451
39cc5be2 1452static const struct amdgpu_display_funcs dm_display_funcs = {
4562236b
HW
1453 .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
1454 .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
1455 .vblank_wait = NULL,
1456 .backlight_set_level =
1457 dm_set_backlight_level,/* called unconditionally */
1458 .backlight_get_level =
1459 dm_get_backlight_level,/* called unconditionally */
1460 .hpd_sense = NULL,/* called unconditionally */
1461 .hpd_set_polarity = NULL, /* called unconditionally */
1462 .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
1463 .page_flip = dm_page_flip, /* called unconditionally */
1464 .page_flip_get_scanoutpos =
1465 dm_crtc_get_scanoutpos,/* called unconditionally */
1466 .add_encoder = NULL, /* VBIOS parsing. DAL does it. */
1467 .add_connector = NULL, /* VBIOS parsing. DAL does it. */
1468 .notify_freesync = amdgpu_notify_freesync,
1469
1470};
1471
2c8ad2d5 1472
4562236b
HW
1473#if defined(CONFIG_DEBUG_KERNEL_DC)
1474
1475static ssize_t s3_debug_store(
1476 struct device *device,
1477 struct device_attribute *attr,
1478 const char *buf,
1479 size_t count)
1480{
1481 int ret;
1482 int s3_state;
1483 struct pci_dev *pdev = to_pci_dev(device);
1484 struct drm_device *drm_dev = pci_get_drvdata(pdev);
1485 struct amdgpu_device *adev = drm_dev->dev_private;
1486
1487 ret = kstrtoint(buf, 0, &s3_state);
1488
1489 if (ret == 0) {
1490 if (s3_state) {
1491 dm_resume(adev);
1492 amdgpu_dm_display_resume(adev);
1493 drm_kms_helper_hotplug_event(adev->ddev);
1494 } else
1495 dm_suspend(adev);
1496 }
1497
1498 return ret == 0 ? count : 0;
1499}
1500
1501DEVICE_ATTR_WO(s3_debug);
1502
1503#endif
1504
1505static int dm_early_init(void *handle)
1506{
1507 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1508
1509 amdgpu_dm_set_irq_funcs(adev);
1510
1511 switch (adev->asic_type) {
1512 case CHIP_BONAIRE:
1513 case CHIP_HAWAII:
1514 adev->mode_info.num_crtc = 6;
1515 adev->mode_info.num_hpd = 6;
1516 adev->mode_info.num_dig = 6;
d4e13b0d 1517 adev->mode_info.plane_type = dm_surfaces_type_default;
4562236b
HW
1518 break;
1519 case CHIP_FIJI:
1520 case CHIP_TONGA:
1521 adev->mode_info.num_crtc = 6;
1522 adev->mode_info.num_hpd = 6;
1523 adev->mode_info.num_dig = 7;
d4e13b0d 1524 adev->mode_info.plane_type = dm_surfaces_type_default;
4562236b
HW
1525 break;
1526 case CHIP_CARRIZO:
1527 adev->mode_info.num_crtc = 3;
1528 adev->mode_info.num_hpd = 6;
1529 adev->mode_info.num_dig = 9;
d4e13b0d 1530 adev->mode_info.plane_type = dm_surfaces_type_carizzo;
4562236b
HW
1531 break;
1532 case CHIP_STONEY:
1533 adev->mode_info.num_crtc = 2;
1534 adev->mode_info.num_hpd = 6;
1535 adev->mode_info.num_dig = 9;
d4e13b0d 1536 adev->mode_info.plane_type = dm_surfaces_type_stoney;
4562236b
HW
1537 break;
1538 case CHIP_POLARIS11:
b264d345 1539 case CHIP_POLARIS12:
4562236b
HW
1540 adev->mode_info.num_crtc = 5;
1541 adev->mode_info.num_hpd = 5;
1542 adev->mode_info.num_dig = 5;
d4e13b0d 1543 adev->mode_info.plane_type = dm_surfaces_type_default;
4562236b
HW
1544 break;
1545 case CHIP_POLARIS10:
1546 adev->mode_info.num_crtc = 6;
1547 adev->mode_info.num_hpd = 6;
1548 adev->mode_info.num_dig = 6;
d4e13b0d 1549 adev->mode_info.plane_type = dm_surfaces_type_default;
4562236b 1550 break;
2c8ad2d5
AD
1551 case CHIP_VEGA10:
1552 adev->mode_info.num_crtc = 6;
1553 adev->mode_info.num_hpd = 6;
1554 adev->mode_info.num_dig = 6;
6f43fd62 1555 adev->mode_info.plane_type = dm_surfaces_type_default;
2c8ad2d5 1556 break;
ff5ef992
AD
1557#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
1558 case CHIP_RAVEN:
1559 adev->mode_info.num_crtc = 4;
1560 adev->mode_info.num_hpd = 4;
1561 adev->mode_info.num_dig = 4;
1562 adev->mode_info.plane_type = dm_surfaces_type_default;
1563 break;
1564#endif
4562236b
HW
1565 default:
1566 DRM_ERROR("Usupported ASIC type: 0x%X\n", adev->asic_type);
1567 return -EINVAL;
1568 }
1569
39cc5be2
AD
1570 if (adev->mode_info.funcs == NULL)
1571 adev->mode_info.funcs = &dm_display_funcs;
1572
4562236b
HW
1573 /* Note: Do NOT change adev->audio_endpt_rreg and
1574 * adev->audio_endpt_wreg because they are initialised in
1575 * amdgpu_device_init() */
1576#if defined(CONFIG_DEBUG_KERNEL_DC)
1577 device_create_file(
1578 adev->ddev->dev,
1579 &dev_attr_s3_debug);
1580#endif
1581
1582 return 0;
1583}
1584
1585bool amdgpu_dm_acquire_dal_lock(struct amdgpu_display_manager *dm)
1586{
1587 /* TODO */
1588 return true;
1589}
1590
1591bool amdgpu_dm_release_dal_lock(struct amdgpu_display_manager *dm)
1592{
1593 /* TODO */
1594 return true;
1595}
1596
1597