]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
drm/amd/display: Pass visible flag into surface programming
[mirror_ubuntu-bionic-kernel.git] / drivers / gpu / drm / amd / display / amdgpu_dm / amdgpu_dm.c
CommitLineData
4562236b
HW
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#include "dm_services_types.h"
27#include "dc.h"
28
29#include "vid.h"
30#include "amdgpu.h"
31#include "atom.h"
32#include "amdgpu_dm.h"
33#include "amdgpu_dm_types.h"
34
35#include "amd_shared.h"
36#include "amdgpu_dm_irq.h"
37#include "dm_helpers.h"
38
39#include "ivsrcid/ivsrcid_vislands30.h"
40
41#include <linux/module.h>
42#include <linux/moduleparam.h>
43#include <linux/version.h>
44
45#include <drm/drm_atomic.h>
46#include <drm/drm_atomic_helper.h>
47#include <drm/drm_dp_mst_helper.h>
48
49#include "modules/inc/mod_freesync.h"
50
4562236b
HW
51/*
52 * dm_vblank_get_counter
53 *
54 * @brief
55 * Get counter for number of vertical blanks
56 *
57 * @param
58 * struct amdgpu_device *adev - [in] desired amdgpu device
59 * int disp_idx - [in] which CRTC to get the counter from
60 *
61 * @return
62 * Counter for vertical blanks
63 */
64static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
65{
66 if (crtc >= adev->mode_info.num_crtc)
67 return 0;
68 else {
69 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
70
71 if (NULL == acrtc->target) {
72 DRM_ERROR("dc_target is NULL for crtc '%d'!\n", crtc);
73 return 0;
74 }
75
76 return dc_target_get_vblank_counter(acrtc->target);
77 }
78}
79
80static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
81 u32 *vbl, u32 *position)
82{
83 if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
84 return -EINVAL;
85 else {
86 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
87
88 if (NULL == acrtc->target) {
89 DRM_ERROR("dc_target is NULL for crtc '%d'!\n", crtc);
90 return 0;
91 }
92
93 return dc_target_get_scanoutpos(acrtc->target, vbl, position);
94 }
95
96 return 0;
97}
98
99static bool dm_is_idle(void *handle)
100{
101 /* XXX todo */
102 return true;
103}
104
105static int dm_wait_for_idle(void *handle)
106{
107 /* XXX todo */
108 return 0;
109}
110
111static bool dm_check_soft_reset(void *handle)
112{
113 return false;
114}
115
116static int dm_soft_reset(void *handle)
117{
118 /* XXX todo */
119 return 0;
120}
121
122static struct amdgpu_crtc *get_crtc_by_otg_inst(
123 struct amdgpu_device *adev,
124 int otg_inst)
125{
126 struct drm_device *dev = adev->ddev;
127 struct drm_crtc *crtc;
128 struct amdgpu_crtc *amdgpu_crtc;
129
130 /*
131 * following if is check inherited from both functions where this one is
132 * used now. Need to be checked why it could happen.
133 */
134 if (otg_inst == -1) {
135 WARN_ON(1);
136 return adev->mode_info.crtcs[0];
137 }
138
139 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
140 amdgpu_crtc = to_amdgpu_crtc(crtc);
141
142 if (amdgpu_crtc->otg_inst == otg_inst)
143 return amdgpu_crtc;
144 }
145
146 return NULL;
147}
148
149static void dm_pflip_high_irq(void *interrupt_params)
150{
151 struct amdgpu_flip_work *works;
152 struct amdgpu_crtc *amdgpu_crtc;
153 struct common_irq_params *irq_params = interrupt_params;
154 struct amdgpu_device *adev = irq_params->adev;
155 unsigned long flags;
156
157 amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
158
159 /* IRQ could occur when in initial stage */
160 /*TODO work and BO cleanup */
161 if (amdgpu_crtc == NULL) {
162 DRM_DEBUG_DRIVER("CRTC is null, returning.\n");
163 return;
164 }
165
166 spin_lock_irqsave(&adev->ddev->event_lock, flags);
167 works = amdgpu_crtc->pflip_works;
168
169 if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
170 DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
171 amdgpu_crtc->pflip_status,
172 AMDGPU_FLIP_SUBMITTED,
173 amdgpu_crtc->crtc_id,
174 amdgpu_crtc);
175 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
176 return;
177 }
178
179 /* page flip completed. clean up */
180 amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
181 amdgpu_crtc->pflip_works = NULL;
182
183 /* wakeup usersapce */
184 if (works->event)
185 drm_crtc_send_vblank_event(&amdgpu_crtc->base,
186 works->event);
187
188 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
189
190 DRM_DEBUG_DRIVER("%s - crtc :%d[%p], pflip_stat:AMDGPU_FLIP_NONE, work: %p,\n",
191 __func__, amdgpu_crtc->crtc_id, amdgpu_crtc, works);
192
193 drm_crtc_vblank_put(&amdgpu_crtc->base);
194 schedule_work(&works->unpin_work);
195}
196
197static void dm_crtc_high_irq(void *interrupt_params)
198{
199 struct common_irq_params *irq_params = interrupt_params;
200 struct amdgpu_device *adev = irq_params->adev;
201 uint8_t crtc_index = 0;
202 struct amdgpu_crtc *acrtc;
203
204 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
205
206 if (acrtc)
207 crtc_index = acrtc->crtc_id;
208
209 drm_handle_vblank(adev->ddev, crtc_index);
210}
211
212static int dm_set_clockgating_state(void *handle,
213 enum amd_clockgating_state state)
214{
215 return 0;
216}
217
218static int dm_set_powergating_state(void *handle,
219 enum amd_powergating_state state)
220{
221 return 0;
222}
223
224/* Prototypes of private functions */
225static int dm_early_init(void* handle);
226
227static void hotplug_notify_work_func(struct work_struct *work)
228{
229 struct amdgpu_display_manager *dm = container_of(work, struct amdgpu_display_manager, mst_hotplug_work);
230 struct drm_device *dev = dm->ddev;
231
232 drm_kms_helper_hotplug_event(dev);
233}
234
235/* Init display KMS
236 *
237 * Returns 0 on success
238 */
239int amdgpu_dm_init(struct amdgpu_device *adev)
240{
241 struct dc_init_data init_data;
242 adev->dm.ddev = adev->ddev;
243 adev->dm.adev = adev;
244
245 DRM_INFO("DAL is enabled\n");
246 /* Zero all the fields */
247 memset(&init_data, 0, sizeof(init_data));
248
249 /* initialize DAL's lock (for SYNC context use) */
250 spin_lock_init(&adev->dm.dal_lock);
251
252 /* initialize DAL's mutex */
253 mutex_init(&adev->dm.dal_mutex);
254
255 if(amdgpu_dm_irq_init(adev)) {
256 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
257 goto error;
258 }
259
260 init_data.asic_id.chip_family = adev->family;
261
262 init_data.asic_id.pci_revision_id = adev->rev_id;
263 init_data.asic_id.hw_internal_rev = adev->external_rev_id;
264
265 init_data.asic_id.vram_width = adev->mc.vram_width;
266 /* TODO: initialize init_data.asic_id.vram_type here!!!! */
267 init_data.asic_id.atombios_base_address =
268 adev->mode_info.atom_context->bios;
269
270 init_data.driver = adev;
271
272 adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
273
274 if (!adev->dm.cgs_device) {
275 DRM_ERROR("amdgpu: failed to create cgs device.\n");
276 goto error;
277 }
278
279 init_data.cgs_device = adev->dm.cgs_device;
280
281 adev->dm.dal = NULL;
282
283 init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
284
285 /* Display Core create. */
286 adev->dm.dc = dc_create(&init_data);
287
288 if (!adev->dm.dc)
289 DRM_INFO("Display Core failed to initialize!\n");
290
291 INIT_WORK(&adev->dm.mst_hotplug_work, hotplug_notify_work_func);
292
293 adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
294 if (!adev->dm.freesync_module) {
295 DRM_ERROR(
296 "amdgpu: failed to initialize freesync_module.\n");
297 } else
298 DRM_INFO("amdgpu: freesync_module init done %p.\n",
299 adev->dm.freesync_module);
300
301 if (amdgpu_dm_initialize_drm_device(adev)) {
302 DRM_ERROR(
303 "amdgpu: failed to initialize sw for display support.\n");
304 goto error;
305 }
306
307 /* Update the actual used number of crtc */
308 adev->mode_info.num_crtc = adev->dm.display_indexes_num;
309
310 /* TODO: Add_display_info? */
311
312 /* TODO use dynamic cursor width */
313 adev->ddev->mode_config.cursor_width = 128;
314 adev->ddev->mode_config.cursor_height = 128;
315
316 if (drm_vblank_init(adev->ddev, adev->dm.display_indexes_num)) {
317 DRM_ERROR(
318 "amdgpu: failed to initialize sw for display support.\n");
319 goto error;
320 }
321
322 DRM_INFO("KMS initialized.\n");
323
324 return 0;
325error:
326 amdgpu_dm_fini(adev);
327
328 return -1;
329}
330
331void amdgpu_dm_fini(struct amdgpu_device *adev)
332{
333 amdgpu_dm_destroy_drm_device(&adev->dm);
334 /*
335 * TODO: pageflip, vlank interrupt
336 *
337 * amdgpu_dm_irq_fini(adev);
338 */
339
340 if (adev->dm.cgs_device) {
341 amdgpu_cgs_destroy_device(adev->dm.cgs_device);
342 adev->dm.cgs_device = NULL;
343 }
344 if (adev->dm.freesync_module) {
345 mod_freesync_destroy(adev->dm.freesync_module);
346 adev->dm.freesync_module = NULL;
347 }
348 /* DC Destroy TODO: Replace destroy DAL */
349 {
350 dc_destroy(&adev->dm.dc);
351 }
352 return;
353}
354
355/* moved from amdgpu_dm_kms.c */
356void amdgpu_dm_destroy()
357{
358}
359
360static int dm_sw_init(void *handle)
361{
362 return 0;
363}
364
365static int dm_sw_fini(void *handle)
366{
367 return 0;
368}
369
7abcf6b5 370static int detect_mst_link_for_all_connectors(struct drm_device *dev)
4562236b
HW
371{
372 struct amdgpu_connector *aconnector;
373 struct drm_connector *connector;
7abcf6b5 374 int ret = 0;
4562236b
HW
375
376 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
377
378 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
379 aconnector = to_amdgpu_connector(connector);
7abcf6b5
AG
380 if (aconnector->dc_link->type == dc_connection_mst_branch) {
381 DRM_INFO("DM_MST: starting TM on aconnector: %p [id: %d]\n",
382 aconnector, aconnector->base.base.id);
383
384 ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
385 if (ret < 0) {
386 DRM_ERROR("DM_MST: Failed to start MST\n");
387 ((struct dc_link *)aconnector->dc_link)->type = dc_connection_single;
388 return ret;
4562236b 389 }
7abcf6b5 390 }
4562236b
HW
391 }
392
393 drm_modeset_unlock(&dev->mode_config.connection_mutex);
7abcf6b5
AG
394 return ret;
395}
396
397static int dm_late_init(void *handle)
398{
399 struct drm_device *dev = ((struct amdgpu_device *)handle)->ddev;
400 int r = detect_mst_link_for_all_connectors(dev);
401
402 return r;
4562236b
HW
403}
404
405static void s3_handle_mst(struct drm_device *dev, bool suspend)
406{
407 struct amdgpu_connector *aconnector;
408 struct drm_connector *connector;
409
410 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
411
412 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
413 aconnector = to_amdgpu_connector(connector);
414 if (aconnector->dc_link->type == dc_connection_mst_branch &&
415 !aconnector->mst_port) {
416
417 if (suspend)
418 drm_dp_mst_topology_mgr_suspend(&aconnector->mst_mgr);
419 else
420 drm_dp_mst_topology_mgr_resume(&aconnector->mst_mgr);
421 }
422 }
423
424 drm_modeset_unlock(&dev->mode_config.connection_mutex);
425}
426
427static int dm_hw_init(void *handle)
428{
429 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
430 /* Create DAL display manager */
431 amdgpu_dm_init(adev);
4562236b
HW
432 amdgpu_dm_hpd_init(adev);
433
4562236b
HW
434 return 0;
435}
436
437static int dm_hw_fini(void *handle)
438{
439 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
440
441 amdgpu_dm_hpd_fini(adev);
442
443 amdgpu_dm_irq_fini(adev);
444
445 return 0;
446}
447
448static int dm_suspend(void *handle)
449{
450 struct amdgpu_device *adev = handle;
451 struct amdgpu_display_manager *dm = &adev->dm;
452 int ret = 0;
453 struct drm_crtc *crtc;
454
455 s3_handle_mst(adev->ddev, true);
456
457 /* flash all pending vblank events and turn interrupt off
458 * before disabling CRTCs. They will be enabled back in
459 * dm_display_resume
460 */
461 drm_modeset_lock_all(adev->ddev);
462 list_for_each_entry(crtc, &adev->ddev->mode_config.crtc_list, head) {
463 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
464 if (acrtc->target)
465 drm_crtc_vblank_off(crtc);
466 }
467 drm_modeset_unlock_all(adev->ddev);
468
469 amdgpu_dm_irq_suspend(adev);
470
471 dc_set_power_state(
472 dm->dc,
473 DC_ACPI_CM_POWER_STATE_D3,
474 DC_VIDEO_POWER_SUSPEND);
475
476 return ret;
477}
478
479struct amdgpu_connector *amdgpu_dm_find_first_crct_matching_connector(
480 struct drm_atomic_state *state,
481 struct drm_crtc *crtc,
482 bool from_state_var)
483{
484 uint32_t i;
485 struct drm_connector_state *conn_state;
486 struct drm_connector *connector;
487 struct drm_crtc *crtc_from_state;
488
489 for_each_connector_in_state(
490 state,
491 connector,
492 conn_state,
493 i) {
494 crtc_from_state =
495 from_state_var ?
496 conn_state->crtc :
497 connector->state->crtc;
498
499 if (crtc_from_state == crtc)
500 return to_amdgpu_connector(connector);
501 }
502
503 return NULL;
504}
505
506static int dm_display_resume(struct drm_device *ddev)
507{
508 int ret = 0;
509 struct drm_connector *connector;
510
511 struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
512 struct drm_plane *plane;
513 struct drm_crtc *crtc;
514 struct amdgpu_connector *aconnector;
515 struct drm_connector_state *conn_state;
516
517 if (!state)
518 return ENOMEM;
519
520 state->acquire_ctx = ddev->mode_config.acquire_ctx;
521
522 /* Construct an atomic state to restore previous display setting */
523
524 /*
525 * Attach connectors to drm_atomic_state
526 * Should be done in the first place in order to make connectors
527 * available in state during crtc state processing. It is used for
528 * making decision if crtc should be disabled in case sink got
529 * disconnected.
530 *
531 * Connectors state crtc with NULL dc_sink should be cleared, because it
532 * will fail validation during commit
533 */
534 list_for_each_entry(connector, &ddev->mode_config.connector_list, head) {
535 aconnector = to_amdgpu_connector(connector);
536 conn_state = drm_atomic_get_connector_state(state, connector);
537
538 ret = PTR_ERR_OR_ZERO(conn_state);
539 if (ret)
540 goto err;
541 }
542
543 /* Attach crtcs to drm_atomic_state*/
544 list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) {
545 struct drm_crtc_state *crtc_state =
546 drm_atomic_get_crtc_state(state, crtc);
547
548 ret = PTR_ERR_OR_ZERO(crtc_state);
549 if (ret)
550 goto err;
551
552 /* force a restore */
553 crtc_state->mode_changed = true;
554 }
555
556
557 /* Attach planes to drm_atomic_state */
558 list_for_each_entry(plane, &ddev->mode_config.plane_list, head) {
559
560 struct drm_crtc *crtc;
561 struct drm_gem_object *obj;
562 struct drm_framebuffer *fb;
563 struct amdgpu_framebuffer *afb;
564 struct amdgpu_bo *rbo;
565 int r;
566 struct drm_plane_state *plane_state = drm_atomic_get_plane_state(state, plane);
567
568 ret = PTR_ERR_OR_ZERO(plane_state);
569 if (ret)
570 goto err;
571
572 crtc = plane_state->crtc;
573 fb = plane_state->fb;
574
575 if (!crtc || !crtc->state || !crtc->state->active)
576 continue;
577
578 if (!fb) {
579 DRM_DEBUG_KMS("No FB bound\n");
580 return 0;
581 }
582
583 /*
584 * Pin back the front buffers, cursor buffer was already pinned
585 * back in amdgpu_resume_kms
586 */
587
588 afb = to_amdgpu_framebuffer(fb);
589
590 obj = afb->obj;
591 rbo = gem_to_amdgpu_bo(obj);
592 r = amdgpu_bo_reserve(rbo, false);
593 if (unlikely(r != 0))
594 return r;
595
596 r = amdgpu_bo_pin(rbo, AMDGPU_GEM_DOMAIN_VRAM, NULL);
597
598 amdgpu_bo_unreserve(rbo);
599
600 if (unlikely(r != 0)) {
601 DRM_ERROR("Failed to pin framebuffer\n");
602 return r;
603 }
604
605 }
606
607
608 /* Call commit internally with the state we just constructed */
609 ret = drm_atomic_commit(state);
610 if (!ret)
611 return 0;
612
613err:
614 DRM_ERROR("Restoring old state failed with %i\n", ret);
615 drm_atomic_state_put(state);
616
617 return ret;
618}
619
620static int dm_resume(void *handle)
621{
622 struct amdgpu_device *adev = handle;
623 struct amdgpu_display_manager *dm = &adev->dm;
624
625 /* power on hardware */
626 dc_set_power_state(
627 dm->dc,
628 DC_ACPI_CM_POWER_STATE_D0,
629 DC_VIDEO_POWER_ON);
630
631 return 0;
632}
633
634int amdgpu_dm_display_resume(struct amdgpu_device *adev )
635{
636 struct drm_device *ddev = adev->ddev;
637 struct amdgpu_display_manager *dm = &adev->dm;
638 struct amdgpu_connector *aconnector;
639 struct drm_connector *connector;
640 int ret = 0;
641 struct drm_crtc *crtc;
642
643 /* program HPD filter */
644 dc_resume(dm->dc);
645
646 /* On resume we need to rewrite the MSTM control bits to enamble MST*/
647 s3_handle_mst(ddev, false);
648
649 /*
650 * early enable HPD Rx IRQ, should be done before set mode as short
651 * pulse interrupts are used for MST
652 */
653 amdgpu_dm_irq_resume_early(adev);
654
655 drm_modeset_lock_all(ddev);
656 list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) {
657 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
658 if (acrtc->target)
659 drm_crtc_vblank_on(crtc);
660 }
661 drm_modeset_unlock_all(ddev);
662
663 /* Do detection*/
664 list_for_each_entry(connector,
665 &ddev->mode_config.connector_list, head) {
666 aconnector = to_amdgpu_connector(connector);
667
668 /*
669 * this is the case when traversing through already created
670 * MST connectors, should be skipped
671 */
672 if (aconnector->mst_port)
673 continue;
674
675 dc_link_detect(aconnector->dc_link, false);
676 aconnector->dc_sink = NULL;
677 amdgpu_dm_update_connector_after_detect(aconnector);
678 }
679
680 drm_modeset_lock_all(ddev);
681 ret = dm_display_resume(ddev);
682 drm_modeset_unlock_all(ddev);
683
684 amdgpu_dm_irq_resume(adev);
685
686 return ret;
687}
688
689static const struct amd_ip_funcs amdgpu_dm_funcs = {
690 .name = "dm",
691 .early_init = dm_early_init,
7abcf6b5 692 .late_init = dm_late_init,
4562236b
HW
693 .sw_init = dm_sw_init,
694 .sw_fini = dm_sw_fini,
695 .hw_init = dm_hw_init,
696 .hw_fini = dm_hw_fini,
697 .suspend = dm_suspend,
698 .resume = dm_resume,
699 .is_idle = dm_is_idle,
700 .wait_for_idle = dm_wait_for_idle,
701 .check_soft_reset = dm_check_soft_reset,
702 .soft_reset = dm_soft_reset,
703 .set_clockgating_state = dm_set_clockgating_state,
704 .set_powergating_state = dm_set_powergating_state,
705};
706
707const struct amdgpu_ip_block_version dm_ip_block =
708{
709 .type = AMD_IP_BLOCK_TYPE_DCE,
710 .major = 1,
711 .minor = 0,
712 .rev = 0,
713 .funcs = &amdgpu_dm_funcs,
714};
715
716/* TODO: it is temporary non-const, should fixed later */
717static struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
718 .atomic_check = amdgpu_dm_atomic_check,
719 .atomic_commit = amdgpu_dm_atomic_commit
720};
721
722void amdgpu_dm_update_connector_after_detect(
723 struct amdgpu_connector *aconnector)
724{
725 struct drm_connector *connector = &aconnector->base;
726 struct drm_device *dev = connector->dev;
727 const struct dc_sink *sink;
728
729 /* MST handled by drm_mst framework */
730 if (aconnector->mst_mgr.mst_state == true)
731 return;
732
733
734 sink = aconnector->dc_link->local_sink;
735
736 /* Edid mgmt connector gets first update only in mode_valid hook and then
737 * the connector sink is set to either fake or physical sink depends on link status.
738 * don't do it here if u are during boot
739 */
740 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
741 && aconnector->dc_em_sink) {
742
743 /* For S3 resume with headless use eml_sink to fake target
744 * because on resume connecotr->sink is set ti NULL
745 */
746 mutex_lock(&dev->mode_config.mutex);
747
748 if (sink) {
922aa1e1 749 if (aconnector->dc_sink) {
4562236b
HW
750 amdgpu_dm_remove_sink_from_freesync_module(
751 connector);
922aa1e1
AG
752 /* retain and release bellow are used for
753 * bump up refcount for sink because the link don't point
754 * to it anymore after disconnect so on next crtc to connector
755 * reshuffle by UMD we will get into unwanted dc_sink release
756 */
757 if (aconnector->dc_sink != aconnector->dc_em_sink)
758 dc_sink_release(aconnector->dc_sink);
759 }
4562236b
HW
760 aconnector->dc_sink = sink;
761 amdgpu_dm_add_sink_to_freesync_module(
762 connector, aconnector->edid);
763 } else {
764 amdgpu_dm_remove_sink_from_freesync_module(connector);
765 if (!aconnector->dc_sink)
766 aconnector->dc_sink = aconnector->dc_em_sink;
922aa1e1
AG
767 else if (aconnector->dc_sink != aconnector->dc_em_sink)
768 dc_sink_retain(aconnector->dc_sink);
4562236b
HW
769 }
770
771 mutex_unlock(&dev->mode_config.mutex);
772 return;
773 }
774
775 /*
776 * TODO: temporary guard to look for proper fix
777 * if this sink is MST sink, we should not do anything
778 */
779 if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
780 return;
781
782 if (aconnector->dc_sink == sink) {
783 /* We got a DP short pulse (Link Loss, DP CTS, etc...).
784 * Do nothing!! */
785 DRM_INFO("DCHPD: connector_id=%d: dc_sink didn't change.\n",
786 aconnector->connector_id);
787 return;
788 }
789
790 DRM_INFO("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
791 aconnector->connector_id, aconnector->dc_sink, sink);
792
793 mutex_lock(&dev->mode_config.mutex);
794
795 /* 1. Update status of the drm connector
796 * 2. Send an event and let userspace tell us what to do */
797 if (sink) {
798 /* TODO: check if we still need the S3 mode update workaround.
799 * If yes, put it here. */
800 if (aconnector->dc_sink)
801 amdgpu_dm_remove_sink_from_freesync_module(
802 connector);
803
804 aconnector->dc_sink = sink;
805 if (sink->dc_edid.length == 0)
806 aconnector->edid = NULL;
807 else {
808 aconnector->edid =
809 (struct edid *) sink->dc_edid.raw_edid;
810
811
812 drm_mode_connector_update_edid_property(connector,
813 aconnector->edid);
814 }
815 amdgpu_dm_add_sink_to_freesync_module(connector, aconnector->edid);
816
817 } else {
818 amdgpu_dm_remove_sink_from_freesync_module(connector);
819 drm_mode_connector_update_edid_property(connector, NULL);
820 aconnector->num_modes = 0;
821 aconnector->dc_sink = NULL;
822 }
823
824 mutex_unlock(&dev->mode_config.mutex);
825}
826
827static void handle_hpd_irq(void *param)
828{
829 struct amdgpu_connector *aconnector = (struct amdgpu_connector *)param;
830 struct drm_connector *connector = &aconnector->base;
831 struct drm_device *dev = connector->dev;
832
833 /* In case of failure or MST no need to update connector status or notify the OS
834 * since (for MST case) MST does this in it's own context.
835 */
836 mutex_lock(&aconnector->hpd_lock);
837 if (dc_link_detect(aconnector->dc_link, false)) {
838 amdgpu_dm_update_connector_after_detect(aconnector);
839
840
841 drm_modeset_lock_all(dev);
842 dm_restore_drm_connector_state(dev, connector);
843 drm_modeset_unlock_all(dev);
844
845 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
846 drm_kms_helper_hotplug_event(dev);
847 }
848 mutex_unlock(&aconnector->hpd_lock);
849
850}
851
852static void dm_handle_hpd_rx_irq(struct amdgpu_connector *aconnector)
853{
854 uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
855 uint8_t dret;
856 bool new_irq_handled = false;
857 int dpcd_addr;
858 int dpcd_bytes_to_read;
859
860 const int max_process_count = 30;
861 int process_count = 0;
862
863 const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
864
865 if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
866 dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
867 /* DPCD 0x200 - 0x201 for downstream IRQ */
868 dpcd_addr = DP_SINK_COUNT;
869 } else {
870 dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
871 /* DPCD 0x2002 - 0x2005 for downstream IRQ */
872 dpcd_addr = DP_SINK_COUNT_ESI;
873 }
874
875 dret = drm_dp_dpcd_read(
876 &aconnector->dm_dp_aux.aux,
877 dpcd_addr,
878 esi,
879 dpcd_bytes_to_read);
880
881 while (dret == dpcd_bytes_to_read &&
882 process_count < max_process_count) {
883 uint8_t retry;
884 dret = 0;
885
886 process_count++;
887
888 DRM_DEBUG_KMS("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
889#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 3, 0)
890 /* handle HPD short pulse irq */
891 if (aconnector->mst_mgr.mst_state)
892 drm_dp_mst_hpd_irq(
893 &aconnector->mst_mgr,
894 esi,
895 &new_irq_handled);
896#endif
897
898 if (new_irq_handled) {
899 /* ACK at DPCD to notify down stream */
900 const int ack_dpcd_bytes_to_write =
901 dpcd_bytes_to_read - 1;
902
903 for (retry = 0; retry < 3; retry++) {
904 uint8_t wret;
905
906 wret = drm_dp_dpcd_write(
907 &aconnector->dm_dp_aux.aux,
908 dpcd_addr + 1,
909 &esi[1],
910 ack_dpcd_bytes_to_write);
911 if (wret == ack_dpcd_bytes_to_write)
912 break;
913 }
914
915 /* check if there is new irq to be handle */
916 dret = drm_dp_dpcd_read(
917 &aconnector->dm_dp_aux.aux,
918 dpcd_addr,
919 esi,
920 dpcd_bytes_to_read);
921
922 new_irq_handled = false;
923 } else
924 break;
925 }
926
927 if (process_count == max_process_count)
928 DRM_DEBUG_KMS("Loop exceeded max iterations\n");
929}
930
931static void handle_hpd_rx_irq(void *param)
932{
933 struct amdgpu_connector *aconnector = (struct amdgpu_connector *)param;
934 struct drm_connector *connector = &aconnector->base;
935 struct drm_device *dev = connector->dev;
936 const struct dc_link *dc_link = aconnector->dc_link;
937 bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
938
939 /* TODO:Temporary add mutex to protect hpd interrupt not have a gpio
940 * conflict, after implement i2c helper, this mutex should be
941 * retired.
942 */
943 if (aconnector->dc_link->type != dc_connection_mst_branch)
944 mutex_lock(&aconnector->hpd_lock);
945
946 if (dc_link_handle_hpd_rx_irq(aconnector->dc_link) &&
947 !is_mst_root_connector) {
948 /* Downstream Port status changed. */
949 if (dc_link_detect(aconnector->dc_link, false)) {
950 amdgpu_dm_update_connector_after_detect(aconnector);
951
952
953 drm_modeset_lock_all(dev);
954 dm_restore_drm_connector_state(dev, connector);
955 drm_modeset_unlock_all(dev);
956
957 drm_kms_helper_hotplug_event(dev);
958 }
959 }
960 if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
961 (dc_link->type == dc_connection_mst_branch))
962 dm_handle_hpd_rx_irq(aconnector);
963
964 if (aconnector->dc_link->type != dc_connection_mst_branch)
965 mutex_unlock(&aconnector->hpd_lock);
966}
967
968static void register_hpd_handlers(struct amdgpu_device *adev)
969{
970 struct drm_device *dev = adev->ddev;
971 struct drm_connector *connector;
972 struct amdgpu_connector *aconnector;
973 const struct dc_link *dc_link;
974 struct dc_interrupt_params int_params = {0};
975
976 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
977 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
978
979 list_for_each_entry(connector,
980 &dev->mode_config.connector_list, head) {
981
982 aconnector = to_amdgpu_connector(connector);
983 dc_link = aconnector->dc_link;
984
985 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
986 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
987 int_params.irq_source = dc_link->irq_source_hpd;
988
989 amdgpu_dm_irq_register_interrupt(adev, &int_params,
990 handle_hpd_irq,
991 (void *) aconnector);
992 }
993
994 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
995
996 /* Also register for DP short pulse (hpd_rx). */
997 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
998 int_params.irq_source = dc_link->irq_source_hpd_rx;
999
1000 amdgpu_dm_irq_register_interrupt(adev, &int_params,
1001 handle_hpd_rx_irq,
1002 (void *) aconnector);
1003 }
1004 }
1005}
1006
1007/* Register IRQ sources and initialize IRQ callbacks */
1008static int dce110_register_irq_handlers(struct amdgpu_device *adev)
1009{
1010 struct dc *dc = adev->dm.dc;
1011 struct common_irq_params *c_irq_params;
1012 struct dc_interrupt_params int_params = {0};
1013 int r;
1014 int i;
1015
1016 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
1017 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
1018
1019 /* Actions of amdgpu_irq_add_id():
1020 * 1. Register a set() function with base driver.
1021 * Base driver will call set() function to enable/disable an
1022 * interrupt in DC hardware.
1023 * 2. Register amdgpu_dm_irq_handler().
1024 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
1025 * coming from DC hardware.
1026 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
1027 * for acknowledging and handling. */
1028
1029 for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT;
1030 i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
1031 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, i, &adev->crtc_irq);
1032 if (r) {
1033 DRM_ERROR("Failed to add crtc irq id!\n");
1034 return r;
1035 }
1036
1037 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
1038 int_params.irq_source =
1039 dc_interrupt_to_irq_source(dc, i, 0);
1040
1041 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
1042
1043 c_irq_params->adev = adev;
1044 c_irq_params->irq_src = int_params.irq_source;
1045
1046 amdgpu_dm_irq_register_interrupt(adev, &int_params,
1047 dm_crtc_high_irq, c_irq_params);
1048 }
1049
1050 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
1051 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
1052 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, i, &adev->pageflip_irq);
1053 if (r) {
1054 DRM_ERROR("Failed to add page flip irq id!\n");
1055 return r;
1056 }
1057
1058 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
1059 int_params.irq_source =
1060 dc_interrupt_to_irq_source(dc, i, 0);
1061
1062 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
1063
1064 c_irq_params->adev = adev;
1065 c_irq_params->irq_src = int_params.irq_source;
1066
1067 amdgpu_dm_irq_register_interrupt(adev, &int_params,
1068 dm_pflip_high_irq, c_irq_params);
1069
1070 }
1071
1072 /* HPD */
1073 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A,
1074 &adev->hpd_irq);
1075 if (r) {
1076 DRM_ERROR("Failed to add hpd irq id!\n");
1077 return r;
1078 }
1079
1080 register_hpd_handlers(adev);
1081
1082 return 0;
1083}
1084
1085static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
1086{
1087 int r;
1088
1089 adev->mode_info.mode_config_initialized = true;
1090
1091 amdgpu_dm_mode_funcs.fb_create =
1092 amdgpu_mode_funcs.fb_create;
1093 amdgpu_dm_mode_funcs.output_poll_changed =
1094 amdgpu_mode_funcs.output_poll_changed;
1095
1096 adev->ddev->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
1097
1098 adev->ddev->mode_config.max_width = 16384;
1099 adev->ddev->mode_config.max_height = 16384;
1100
1101 adev->ddev->mode_config.preferred_depth = 24;
1102 adev->ddev->mode_config.prefer_shadow = 1;
1103 /* indicate support of immediate flip */
1104 adev->ddev->mode_config.async_page_flip = true;
1105
1106 adev->ddev->mode_config.fb_base = adev->mc.aper_base;
1107
1108 r = amdgpu_modeset_create_props(adev);
1109 if (r)
1110 return r;
1111
1112 return 0;
1113}
1114
1115#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
1116 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
1117
1118static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
1119{
1120 struct amdgpu_display_manager *dm = bl_get_data(bd);
1121
1122 if (dc_link_set_backlight_level(dm->backlight_link,
1123 bd->props.brightness, 0, 0))
1124 return 0;
1125 else
1126 return 1;
1127}
1128
1129static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
1130{
1131 return bd->props.brightness;
1132}
1133
1134static const struct backlight_ops amdgpu_dm_backlight_ops = {
1135 .get_brightness = amdgpu_dm_backlight_get_brightness,
1136 .update_status = amdgpu_dm_backlight_update_status,
1137};
1138
1139void amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
1140{
1141 char bl_name[16];
1142 struct backlight_properties props = { 0 };
1143
1144 props.max_brightness = AMDGPU_MAX_BL_LEVEL;
1145 props.type = BACKLIGHT_RAW;
1146
1147 snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
1148 dm->adev->ddev->primary->index);
1149
1150 dm->backlight_dev = backlight_device_register(bl_name,
1151 dm->adev->ddev->dev,
1152 dm,
1153 &amdgpu_dm_backlight_ops,
1154 &props);
1155
1156 if (NULL == dm->backlight_dev)
1157 DRM_ERROR("DM: Backlight registration failed!\n");
1158 else
1159 DRM_INFO("DM: Registered Backlight device: %s\n", bl_name);
1160}
1161
1162#endif
1163
1164/* In this architecture, the association
1165 * connector -> encoder -> crtc
1166 * id not really requried. The crtc and connector will hold the
1167 * display_index as an abstraction to use with DAL component
1168 *
1169 * Returns 0 on success
1170 */
1171int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
1172{
1173 struct amdgpu_display_manager *dm = &adev->dm;
1174 uint32_t i;
1175 struct amdgpu_connector *aconnector;
1176 struct amdgpu_encoder *aencoder;
1177 struct amdgpu_crtc *acrtc;
1178 uint32_t link_cnt;
1179
1180 link_cnt = dm->dc->caps.max_links;
1181
1182 if (amdgpu_dm_mode_config_init(dm->adev)) {
1183 DRM_ERROR("DM: Failed to initialize mode config\n");
1184 return -1;
1185 }
1186
1187 for (i = 0; i < dm->dc->caps.max_targets; i++) {
1188 acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
1189 if (!acrtc)
1190 goto fail;
1191
1192 if (amdgpu_dm_crtc_init(
1193 dm,
1194 acrtc,
1195 i)) {
1196 DRM_ERROR("KMS: Failed to initialize crtc\n");
1197 kfree(acrtc);
1198 goto fail;
1199 }
1200 }
1201
1202 dm->display_indexes_num = dm->dc->caps.max_targets;
1203
1204 /* loops over all connectors on the board */
1205 for (i = 0; i < link_cnt; i++) {
1206
1207 if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
1208 DRM_ERROR(
1209 "KMS: Cannot support more than %d display indexes\n",
1210 AMDGPU_DM_MAX_DISPLAY_INDEX);
1211 continue;
1212 }
1213
1214 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
1215 if (!aconnector)
1216 goto fail;
1217
1218 aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
1219 if (!aencoder) {
1220 goto fail_free_connector;
1221 }
1222
1223 if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
1224 DRM_ERROR("KMS: Failed to initialize encoder\n");
1225 goto fail_free_encoder;
1226 }
1227
1228 if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
1229 DRM_ERROR("KMS: Failed to initialize connector\n");
1230 goto fail_free_connector;
1231 }
1232
1233 if (dc_link_detect(dc_get_link_at_index(dm->dc, i), true))
1234 amdgpu_dm_update_connector_after_detect(aconnector);
1235 }
1236
1237 /* Software is initialized. Now we can register interrupt handlers. */
1238 switch (adev->asic_type) {
1239 case CHIP_BONAIRE:
1240 case CHIP_HAWAII:
1241 case CHIP_TONGA:
1242 case CHIP_FIJI:
1243 case CHIP_CARRIZO:
1244 case CHIP_STONEY:
1245 case CHIP_POLARIS11:
1246 case CHIP_POLARIS10:
b264d345 1247 case CHIP_POLARIS12:
4562236b
HW
1248 if (dce110_register_irq_handlers(dm->adev)) {
1249 DRM_ERROR("DM: Failed to initialize IRQ\n");
1250 return -1;
1251 }
1252 break;
1253 default:
1254 DRM_ERROR("Usupported ASIC type: 0x%X\n", adev->asic_type);
1255 return -1;
1256 }
1257
1258 drm_mode_config_reset(dm->ddev);
1259
1260 return 0;
1261fail_free_encoder:
1262 kfree(aencoder);
1263fail_free_connector:
1264 kfree(aconnector);
1265fail:
1266 return -1;
1267}
1268
1269void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
1270{
1271 drm_mode_config_cleanup(dm->ddev);
1272 return;
1273}
1274
1275/******************************************************************************
1276 * amdgpu_display_funcs functions
1277 *****************************************************************************/
1278
1279/**
1280 * dm_bandwidth_update - program display watermarks
1281 *
1282 * @adev: amdgpu_device pointer
1283 *
1284 * Calculate and program the display watermarks and line buffer allocation.
1285 */
1286static void dm_bandwidth_update(struct amdgpu_device *adev)
1287{
49c07a99 1288 /* TODO: implement later */
4562236b
HW
1289}
1290
1291static void dm_set_backlight_level(struct amdgpu_encoder *amdgpu_encoder,
1292 u8 level)
1293{
1294 /* TODO: translate amdgpu_encoder to display_index and call DAL */
4562236b
HW
1295}
1296
1297static u8 dm_get_backlight_level(struct amdgpu_encoder *amdgpu_encoder)
1298{
1299 /* TODO: translate amdgpu_encoder to display_index and call DAL */
4562236b
HW
1300 return 0;
1301}
1302
1303/******************************************************************************
1304 * Page Flip functions
1305 ******************************************************************************/
1306
1307/**
1308 * dm_page_flip - called by amdgpu_flip_work_func(), which is triggered
1309 * via DRM IOCTL, by user mode.
1310 *
1311 * @adev: amdgpu_device pointer
1312 * @crtc_id: crtc to cleanup pageflip on
1313 * @crtc_base: new address of the crtc (GPU MC address)
1314 *
1315 * Does the actual pageflip (surface address update).
1316 */
1317static void dm_page_flip(struct amdgpu_device *adev,
1318 int crtc_id, u64 crtc_base, bool async)
1319{
1320 struct amdgpu_crtc *acrtc;
1321 struct dc_target *target;
1322 struct dc_flip_addrs addr = { {0} };
1323
1324 /*
1325 * TODO risk of concurrency issues
1326 *
1327 * This should guarded by the dal_mutex but we can't do this since the
1328 * caller uses a spin_lock on event_lock.
1329 *
1330 * If we wait on the dal_mutex a second page flip interrupt might come,
1331 * spin on the event_lock, disabling interrupts while it does so. At
1332 * this point the core can no longer be pre-empted and return to the
1333 * thread that waited on the dal_mutex and we're deadlocked.
1334 *
1335 * With multiple cores the same essentially happens but might just take
1336 * a little longer to lock up all cores.
1337 *
1338 * The reason we should lock on dal_mutex is so that we can be sure
1339 * nobody messes with acrtc->target after we read and check its value.
1340 *
1341 * We might be able to fix our concurrency issues with a work queue
1342 * where we schedule all work items (mode_set, page_flip, etc.) and
1343 * execute them one by one. Care needs to be taken to still deal with
1344 * any potential concurrency issues arising from interrupt calls.
1345 */
1346
1347 acrtc = adev->mode_info.crtcs[crtc_id];
1348 target = acrtc->target;
1349
1350 /*
1351 * Received a page flip call after the display has been reset.
1352 * Just return in this case. Everything should be clean-up on reset.
1353 */
1354
1355 if (!target) {
1356 WARN_ON(1);
1357 return;
1358 }
1359
1360 addr.address.grph.addr.low_part = lower_32_bits(crtc_base);
1361 addr.address.grph.addr.high_part = upper_32_bits(crtc_base);
1362 addr.flip_immediate = async;
1363
1364 DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x \n",
1365 __func__,
1366 addr.address.grph.addr.high_part,
1367 addr.address.grph.addr.low_part);
1368
1369 dc_flip_surface_addrs(
1370 adev->dm.dc,
1371 dc_target_get_status(target)->surfaces,
1372 &addr, 1);
1373}
1374
1375static int amdgpu_notify_freesync(struct drm_device *dev, void *data,
1376 struct drm_file *filp)
1377{
1378 struct mod_freesync_params freesync_params;
1379 uint8_t num_targets;
1380 uint8_t i;
1381 struct dc_target *target;
1382
1383 struct amdgpu_device *adev = dev->dev_private;
1384 int r = 0;
1385
1386 /* Get freesync enable flag from DRM */
1387
1388 num_targets = dc_get_current_target_count(adev->dm.dc);
1389
1390 for (i = 0; i < num_targets; i++) {
1391
1392 target = dc_get_target_at_index(adev->dm.dc, i);
1393
1394 mod_freesync_update_state(adev->dm.freesync_module,
1395 target->streams,
1396 target->stream_count,
1397 &freesync_params);
1398 }
1399
1400 return r;
1401}
1402
39cc5be2 1403static const struct amdgpu_display_funcs dm_display_funcs = {
4562236b
HW
1404 .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
1405 .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
1406 .vblank_wait = NULL,
1407 .backlight_set_level =
1408 dm_set_backlight_level,/* called unconditionally */
1409 .backlight_get_level =
1410 dm_get_backlight_level,/* called unconditionally */
1411 .hpd_sense = NULL,/* called unconditionally */
1412 .hpd_set_polarity = NULL, /* called unconditionally */
1413 .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
1414 .page_flip = dm_page_flip, /* called unconditionally */
1415 .page_flip_get_scanoutpos =
1416 dm_crtc_get_scanoutpos,/* called unconditionally */
1417 .add_encoder = NULL, /* VBIOS parsing. DAL does it. */
1418 .add_connector = NULL, /* VBIOS parsing. DAL does it. */
1419 .notify_freesync = amdgpu_notify_freesync,
1420
1421};
1422
1423#if defined(CONFIG_DEBUG_KERNEL_DC)
1424
1425static ssize_t s3_debug_store(
1426 struct device *device,
1427 struct device_attribute *attr,
1428 const char *buf,
1429 size_t count)
1430{
1431 int ret;
1432 int s3_state;
1433 struct pci_dev *pdev = to_pci_dev(device);
1434 struct drm_device *drm_dev = pci_get_drvdata(pdev);
1435 struct amdgpu_device *adev = drm_dev->dev_private;
1436
1437 ret = kstrtoint(buf, 0, &s3_state);
1438
1439 if (ret == 0) {
1440 if (s3_state) {
1441 dm_resume(adev);
1442 amdgpu_dm_display_resume(adev);
1443 drm_kms_helper_hotplug_event(adev->ddev);
1444 } else
1445 dm_suspend(adev);
1446 }
1447
1448 return ret == 0 ? count : 0;
1449}
1450
1451DEVICE_ATTR_WO(s3_debug);
1452
1453#endif
1454
1455static int dm_early_init(void *handle)
1456{
1457 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1458
1459 amdgpu_dm_set_irq_funcs(adev);
1460
1461 switch (adev->asic_type) {
1462 case CHIP_BONAIRE:
1463 case CHIP_HAWAII:
1464 adev->mode_info.num_crtc = 6;
1465 adev->mode_info.num_hpd = 6;
1466 adev->mode_info.num_dig = 6;
4562236b
HW
1467 break;
1468 case CHIP_FIJI:
1469 case CHIP_TONGA:
1470 adev->mode_info.num_crtc = 6;
1471 adev->mode_info.num_hpd = 6;
1472 adev->mode_info.num_dig = 7;
4562236b
HW
1473 break;
1474 case CHIP_CARRIZO:
1475 adev->mode_info.num_crtc = 3;
1476 adev->mode_info.num_hpd = 6;
1477 adev->mode_info.num_dig = 9;
4562236b
HW
1478 break;
1479 case CHIP_STONEY:
1480 adev->mode_info.num_crtc = 2;
1481 adev->mode_info.num_hpd = 6;
1482 adev->mode_info.num_dig = 9;
4562236b
HW
1483 break;
1484 case CHIP_POLARIS11:
b264d345 1485 case CHIP_POLARIS12:
4562236b
HW
1486 adev->mode_info.num_crtc = 5;
1487 adev->mode_info.num_hpd = 5;
1488 adev->mode_info.num_dig = 5;
4562236b
HW
1489 break;
1490 case CHIP_POLARIS10:
1491 adev->mode_info.num_crtc = 6;
1492 adev->mode_info.num_hpd = 6;
1493 adev->mode_info.num_dig = 6;
4562236b
HW
1494 break;
1495 default:
1496 DRM_ERROR("Usupported ASIC type: 0x%X\n", adev->asic_type);
1497 return -EINVAL;
1498 }
1499
39cc5be2
AD
1500 if (adev->mode_info.funcs == NULL)
1501 adev->mode_info.funcs = &dm_display_funcs;
1502
4562236b
HW
1503 /* Note: Do NOT change adev->audio_endpt_rreg and
1504 * adev->audio_endpt_wreg because they are initialised in
1505 * amdgpu_device_init() */
1506#if defined(CONFIG_DEBUG_KERNEL_DC)
1507 device_create_file(
1508 adev->ddev->dev,
1509 &dev_attr_s3_debug);
1510#endif
1511
1512 return 0;
1513}
1514
1515bool amdgpu_dm_acquire_dal_lock(struct amdgpu_display_manager *dm)
1516{
1517 /* TODO */
1518 return true;
1519}
1520
1521bool amdgpu_dm_release_dal_lock(struct amdgpu_display_manager *dm)
1522{
1523 /* TODO */
1524 return true;
1525}
1526
1527