]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
drm/amd/dc: Add dc display driver (v2)
[mirror_ubuntu-bionic-kernel.git] / drivers / gpu / drm / amd / display / amdgpu_dm / amdgpu_dm.c
CommitLineData
4562236b
HW
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#include "dm_services_types.h"
27#include "dc.h"
28
29#include "vid.h"
30#include "amdgpu.h"
31#include "atom.h"
32#include "amdgpu_dm.h"
33#include "amdgpu_dm_types.h"
34
35#include "amd_shared.h"
36#include "amdgpu_dm_irq.h"
37#include "dm_helpers.h"
38
39#include "ivsrcid/ivsrcid_vislands30.h"
40
41#include <linux/module.h>
42#include <linux/moduleparam.h>
43#include <linux/version.h>
44
45#include <drm/drm_atomic.h>
46#include <drm/drm_atomic_helper.h>
47#include <drm/drm_dp_mst_helper.h>
48
49#include "modules/inc/mod_freesync.h"
50
51/* Debug facilities */
52#define AMDGPU_DM_NOT_IMPL(fmt, ...) \
53 DRM_INFO("DM_NOT_IMPL: " fmt, ##__VA_ARGS__)
54
55/*
56 * dm_vblank_get_counter
57 *
58 * @brief
59 * Get counter for number of vertical blanks
60 *
61 * @param
62 * struct amdgpu_device *adev - [in] desired amdgpu device
63 * int disp_idx - [in] which CRTC to get the counter from
64 *
65 * @return
66 * Counter for vertical blanks
67 */
68static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
69{
70 if (crtc >= adev->mode_info.num_crtc)
71 return 0;
72 else {
73 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
74
75 if (NULL == acrtc->target) {
76 DRM_ERROR("dc_target is NULL for crtc '%d'!\n", crtc);
77 return 0;
78 }
79
80 return dc_target_get_vblank_counter(acrtc->target);
81 }
82}
83
84static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
85 u32 *vbl, u32 *position)
86{
87 if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
88 return -EINVAL;
89 else {
90 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
91
92 if (NULL == acrtc->target) {
93 DRM_ERROR("dc_target is NULL for crtc '%d'!\n", crtc);
94 return 0;
95 }
96
97 return dc_target_get_scanoutpos(acrtc->target, vbl, position);
98 }
99
100 return 0;
101}
102
103static bool dm_is_idle(void *handle)
104{
105 /* XXX todo */
106 return true;
107}
108
109static int dm_wait_for_idle(void *handle)
110{
111 /* XXX todo */
112 return 0;
113}
114
115static bool dm_check_soft_reset(void *handle)
116{
117 return false;
118}
119
120static int dm_soft_reset(void *handle)
121{
122 /* XXX todo */
123 return 0;
124}
125
126static struct amdgpu_crtc *get_crtc_by_otg_inst(
127 struct amdgpu_device *adev,
128 int otg_inst)
129{
130 struct drm_device *dev = adev->ddev;
131 struct drm_crtc *crtc;
132 struct amdgpu_crtc *amdgpu_crtc;
133
134 /*
135 * following if is check inherited from both functions where this one is
136 * used now. Need to be checked why it could happen.
137 */
138 if (otg_inst == -1) {
139 WARN_ON(1);
140 return adev->mode_info.crtcs[0];
141 }
142
143 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
144 amdgpu_crtc = to_amdgpu_crtc(crtc);
145
146 if (amdgpu_crtc->otg_inst == otg_inst)
147 return amdgpu_crtc;
148 }
149
150 return NULL;
151}
152
153static void dm_pflip_high_irq(void *interrupt_params)
154{
155 struct amdgpu_flip_work *works;
156 struct amdgpu_crtc *amdgpu_crtc;
157 struct common_irq_params *irq_params = interrupt_params;
158 struct amdgpu_device *adev = irq_params->adev;
159 unsigned long flags;
160
161 amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
162
163 /* IRQ could occur when in initial stage */
164 /*TODO work and BO cleanup */
165 if (amdgpu_crtc == NULL) {
166 DRM_DEBUG_DRIVER("CRTC is null, returning.\n");
167 return;
168 }
169
170 spin_lock_irqsave(&adev->ddev->event_lock, flags);
171 works = amdgpu_crtc->pflip_works;
172
173 if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
174 DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
175 amdgpu_crtc->pflip_status,
176 AMDGPU_FLIP_SUBMITTED,
177 amdgpu_crtc->crtc_id,
178 amdgpu_crtc);
179 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
180 return;
181 }
182
183 /* page flip completed. clean up */
184 amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
185 amdgpu_crtc->pflip_works = NULL;
186
187 /* wakeup usersapce */
188 if (works->event)
189 drm_crtc_send_vblank_event(&amdgpu_crtc->base,
190 works->event);
191
192 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
193
194 DRM_DEBUG_DRIVER("%s - crtc :%d[%p], pflip_stat:AMDGPU_FLIP_NONE, work: %p,\n",
195 __func__, amdgpu_crtc->crtc_id, amdgpu_crtc, works);
196
197 drm_crtc_vblank_put(&amdgpu_crtc->base);
198 schedule_work(&works->unpin_work);
199}
200
201static void dm_crtc_high_irq(void *interrupt_params)
202{
203 struct common_irq_params *irq_params = interrupt_params;
204 struct amdgpu_device *adev = irq_params->adev;
205 uint8_t crtc_index = 0;
206 struct amdgpu_crtc *acrtc;
207
208 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
209
210 if (acrtc)
211 crtc_index = acrtc->crtc_id;
212
213 drm_handle_vblank(adev->ddev, crtc_index);
214}
215
216static int dm_set_clockgating_state(void *handle,
217 enum amd_clockgating_state state)
218{
219 return 0;
220}
221
222static int dm_set_powergating_state(void *handle,
223 enum amd_powergating_state state)
224{
225 return 0;
226}
227
228/* Prototypes of private functions */
229static int dm_early_init(void* handle);
230
231static void hotplug_notify_work_func(struct work_struct *work)
232{
233 struct amdgpu_display_manager *dm = container_of(work, struct amdgpu_display_manager, mst_hotplug_work);
234 struct drm_device *dev = dm->ddev;
235
236 drm_kms_helper_hotplug_event(dev);
237}
238
239/* Init display KMS
240 *
241 * Returns 0 on success
242 */
243int amdgpu_dm_init(struct amdgpu_device *adev)
244{
245 struct dc_init_data init_data;
246 adev->dm.ddev = adev->ddev;
247 adev->dm.adev = adev;
248
249 DRM_INFO("DAL is enabled\n");
250 /* Zero all the fields */
251 memset(&init_data, 0, sizeof(init_data));
252
253 /* initialize DAL's lock (for SYNC context use) */
254 spin_lock_init(&adev->dm.dal_lock);
255
256 /* initialize DAL's mutex */
257 mutex_init(&adev->dm.dal_mutex);
258
259 if(amdgpu_dm_irq_init(adev)) {
260 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
261 goto error;
262 }
263
264 init_data.asic_id.chip_family = adev->family;
265
266 init_data.asic_id.pci_revision_id = adev->rev_id;
267 init_data.asic_id.hw_internal_rev = adev->external_rev_id;
268
269 init_data.asic_id.vram_width = adev->mc.vram_width;
270 /* TODO: initialize init_data.asic_id.vram_type here!!!! */
271 init_data.asic_id.atombios_base_address =
272 adev->mode_info.atom_context->bios;
273
274 init_data.driver = adev;
275
276 adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
277
278 if (!adev->dm.cgs_device) {
279 DRM_ERROR("amdgpu: failed to create cgs device.\n");
280 goto error;
281 }
282
283 init_data.cgs_device = adev->dm.cgs_device;
284
285 adev->dm.dal = NULL;
286
287 init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
288
289 /* Display Core create. */
290 adev->dm.dc = dc_create(&init_data);
291
292 if (!adev->dm.dc)
293 DRM_INFO("Display Core failed to initialize!\n");
294
295 INIT_WORK(&adev->dm.mst_hotplug_work, hotplug_notify_work_func);
296
297 adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
298 if (!adev->dm.freesync_module) {
299 DRM_ERROR(
300 "amdgpu: failed to initialize freesync_module.\n");
301 } else
302 DRM_INFO("amdgpu: freesync_module init done %p.\n",
303 adev->dm.freesync_module);
304
305 if (amdgpu_dm_initialize_drm_device(adev)) {
306 DRM_ERROR(
307 "amdgpu: failed to initialize sw for display support.\n");
308 goto error;
309 }
310
311 /* Update the actual used number of crtc */
312 adev->mode_info.num_crtc = adev->dm.display_indexes_num;
313
314 /* TODO: Add_display_info? */
315
316 /* TODO use dynamic cursor width */
317 adev->ddev->mode_config.cursor_width = 128;
318 adev->ddev->mode_config.cursor_height = 128;
319
320 if (drm_vblank_init(adev->ddev, adev->dm.display_indexes_num)) {
321 DRM_ERROR(
322 "amdgpu: failed to initialize sw for display support.\n");
323 goto error;
324 }
325
326 DRM_INFO("KMS initialized.\n");
327
328 return 0;
329error:
330 amdgpu_dm_fini(adev);
331
332 return -1;
333}
334
335void amdgpu_dm_fini(struct amdgpu_device *adev)
336{
337 amdgpu_dm_destroy_drm_device(&adev->dm);
338 /*
339 * TODO: pageflip, vlank interrupt
340 *
341 * amdgpu_dm_irq_fini(adev);
342 */
343
344 if (adev->dm.cgs_device) {
345 amdgpu_cgs_destroy_device(adev->dm.cgs_device);
346 adev->dm.cgs_device = NULL;
347 }
348 if (adev->dm.freesync_module) {
349 mod_freesync_destroy(adev->dm.freesync_module);
350 adev->dm.freesync_module = NULL;
351 }
352 /* DC Destroy TODO: Replace destroy DAL */
353 {
354 dc_destroy(&adev->dm.dc);
355 }
356 return;
357}
358
359/* moved from amdgpu_dm_kms.c */
360void amdgpu_dm_destroy()
361{
362}
363
364static int dm_sw_init(void *handle)
365{
366 return 0;
367}
368
369static int dm_sw_fini(void *handle)
370{
371 return 0;
372}
373
374static void detect_link_for_all_connectors(struct drm_device *dev)
375{
376 struct amdgpu_connector *aconnector;
377 struct drm_connector *connector;
378
379 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
380
381 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
382 aconnector = to_amdgpu_connector(connector);
383 if (aconnector->dc_link->type == dc_connection_mst_branch) {
384 DRM_INFO("DM_MST: starting TM on aconnector: %p [id: %d]\n",
385 aconnector, aconnector->base.base.id);
386
387 if (drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true) < 0) {
388 DRM_ERROR("DM_MST: Failed to start MST\n");
389 ((struct dc_link *)aconnector->dc_link)->type = dc_connection_single;
390 }
391 }
392 }
393
394 drm_modeset_unlock(&dev->mode_config.connection_mutex);
395}
396
397static void s3_handle_mst(struct drm_device *dev, bool suspend)
398{
399 struct amdgpu_connector *aconnector;
400 struct drm_connector *connector;
401
402 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
403
404 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
405 aconnector = to_amdgpu_connector(connector);
406 if (aconnector->dc_link->type == dc_connection_mst_branch &&
407 !aconnector->mst_port) {
408
409 if (suspend)
410 drm_dp_mst_topology_mgr_suspend(&aconnector->mst_mgr);
411 else
412 drm_dp_mst_topology_mgr_resume(&aconnector->mst_mgr);
413 }
414 }
415
416 drm_modeset_unlock(&dev->mode_config.connection_mutex);
417}
418
419static int dm_hw_init(void *handle)
420{
421 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
422 /* Create DAL display manager */
423 amdgpu_dm_init(adev);
424
425 amdgpu_dm_hpd_init(adev);
426
427 detect_link_for_all_connectors(adev->ddev);
428
429 return 0;
430}
431
432static int dm_hw_fini(void *handle)
433{
434 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
435
436 amdgpu_dm_hpd_fini(adev);
437
438 amdgpu_dm_irq_fini(adev);
439
440 return 0;
441}
442
443static int dm_suspend(void *handle)
444{
445 struct amdgpu_device *adev = handle;
446 struct amdgpu_display_manager *dm = &adev->dm;
447 int ret = 0;
448 struct drm_crtc *crtc;
449
450 s3_handle_mst(adev->ddev, true);
451
452 /* flash all pending vblank events and turn interrupt off
453 * before disabling CRTCs. They will be enabled back in
454 * dm_display_resume
455 */
456 drm_modeset_lock_all(adev->ddev);
457 list_for_each_entry(crtc, &adev->ddev->mode_config.crtc_list, head) {
458 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
459 if (acrtc->target)
460 drm_crtc_vblank_off(crtc);
461 }
462 drm_modeset_unlock_all(adev->ddev);
463
464 amdgpu_dm_irq_suspend(adev);
465
466 dc_set_power_state(
467 dm->dc,
468 DC_ACPI_CM_POWER_STATE_D3,
469 DC_VIDEO_POWER_SUSPEND);
470
471 return ret;
472}
473
474struct amdgpu_connector *amdgpu_dm_find_first_crct_matching_connector(
475 struct drm_atomic_state *state,
476 struct drm_crtc *crtc,
477 bool from_state_var)
478{
479 uint32_t i;
480 struct drm_connector_state *conn_state;
481 struct drm_connector *connector;
482 struct drm_crtc *crtc_from_state;
483
484 for_each_connector_in_state(
485 state,
486 connector,
487 conn_state,
488 i) {
489 crtc_from_state =
490 from_state_var ?
491 conn_state->crtc :
492 connector->state->crtc;
493
494 if (crtc_from_state == crtc)
495 return to_amdgpu_connector(connector);
496 }
497
498 return NULL;
499}
500
501static int dm_display_resume(struct drm_device *ddev)
502{
503 int ret = 0;
504 struct drm_connector *connector;
505
506 struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
507 struct drm_plane *plane;
508 struct drm_crtc *crtc;
509 struct amdgpu_connector *aconnector;
510 struct drm_connector_state *conn_state;
511
512 if (!state)
513 return ENOMEM;
514
515 state->acquire_ctx = ddev->mode_config.acquire_ctx;
516
517 /* Construct an atomic state to restore previous display setting */
518
519 /*
520 * Attach connectors to drm_atomic_state
521 * Should be done in the first place in order to make connectors
522 * available in state during crtc state processing. It is used for
523 * making decision if crtc should be disabled in case sink got
524 * disconnected.
525 *
526 * Connectors state crtc with NULL dc_sink should be cleared, because it
527 * will fail validation during commit
528 */
529 list_for_each_entry(connector, &ddev->mode_config.connector_list, head) {
530 aconnector = to_amdgpu_connector(connector);
531 conn_state = drm_atomic_get_connector_state(state, connector);
532
533 ret = PTR_ERR_OR_ZERO(conn_state);
534 if (ret)
535 goto err;
536 }
537
538 /* Attach crtcs to drm_atomic_state*/
539 list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) {
540 struct drm_crtc_state *crtc_state =
541 drm_atomic_get_crtc_state(state, crtc);
542
543 ret = PTR_ERR_OR_ZERO(crtc_state);
544 if (ret)
545 goto err;
546
547 /* force a restore */
548 crtc_state->mode_changed = true;
549 }
550
551
552 /* Attach planes to drm_atomic_state */
553 list_for_each_entry(plane, &ddev->mode_config.plane_list, head) {
554
555 struct drm_crtc *crtc;
556 struct drm_gem_object *obj;
557 struct drm_framebuffer *fb;
558 struct amdgpu_framebuffer *afb;
559 struct amdgpu_bo *rbo;
560 int r;
561 struct drm_plane_state *plane_state = drm_atomic_get_plane_state(state, plane);
562
563 ret = PTR_ERR_OR_ZERO(plane_state);
564 if (ret)
565 goto err;
566
567 crtc = plane_state->crtc;
568 fb = plane_state->fb;
569
570 if (!crtc || !crtc->state || !crtc->state->active)
571 continue;
572
573 if (!fb) {
574 DRM_DEBUG_KMS("No FB bound\n");
575 return 0;
576 }
577
578 /*
579 * Pin back the front buffers, cursor buffer was already pinned
580 * back in amdgpu_resume_kms
581 */
582
583 afb = to_amdgpu_framebuffer(fb);
584
585 obj = afb->obj;
586 rbo = gem_to_amdgpu_bo(obj);
587 r = amdgpu_bo_reserve(rbo, false);
588 if (unlikely(r != 0))
589 return r;
590
591 r = amdgpu_bo_pin(rbo, AMDGPU_GEM_DOMAIN_VRAM, NULL);
592
593 amdgpu_bo_unreserve(rbo);
594
595 if (unlikely(r != 0)) {
596 DRM_ERROR("Failed to pin framebuffer\n");
597 return r;
598 }
599
600 }
601
602
603 /* Call commit internally with the state we just constructed */
604 ret = drm_atomic_commit(state);
605 if (!ret)
606 return 0;
607
608err:
609 DRM_ERROR("Restoring old state failed with %i\n", ret);
610 drm_atomic_state_put(state);
611
612 return ret;
613}
614
615static int dm_resume(void *handle)
616{
617 struct amdgpu_device *adev = handle;
618 struct amdgpu_display_manager *dm = &adev->dm;
619
620 /* power on hardware */
621 dc_set_power_state(
622 dm->dc,
623 DC_ACPI_CM_POWER_STATE_D0,
624 DC_VIDEO_POWER_ON);
625
626 return 0;
627}
628
629int amdgpu_dm_display_resume(struct amdgpu_device *adev )
630{
631 struct drm_device *ddev = adev->ddev;
632 struct amdgpu_display_manager *dm = &adev->dm;
633 struct amdgpu_connector *aconnector;
634 struct drm_connector *connector;
635 int ret = 0;
636 struct drm_crtc *crtc;
637
638 /* program HPD filter */
639 dc_resume(dm->dc);
640
641 /* On resume we need to rewrite the MSTM control bits to enamble MST*/
642 s3_handle_mst(ddev, false);
643
644 /*
645 * early enable HPD Rx IRQ, should be done before set mode as short
646 * pulse interrupts are used for MST
647 */
648 amdgpu_dm_irq_resume_early(adev);
649
650 drm_modeset_lock_all(ddev);
651 list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) {
652 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
653 if (acrtc->target)
654 drm_crtc_vblank_on(crtc);
655 }
656 drm_modeset_unlock_all(ddev);
657
658 /* Do detection*/
659 list_for_each_entry(connector,
660 &ddev->mode_config.connector_list, head) {
661 aconnector = to_amdgpu_connector(connector);
662
663 /*
664 * this is the case when traversing through already created
665 * MST connectors, should be skipped
666 */
667 if (aconnector->mst_port)
668 continue;
669
670 dc_link_detect(aconnector->dc_link, false);
671 aconnector->dc_sink = NULL;
672 amdgpu_dm_update_connector_after_detect(aconnector);
673 }
674
675 drm_modeset_lock_all(ddev);
676 ret = dm_display_resume(ddev);
677 drm_modeset_unlock_all(ddev);
678
679 amdgpu_dm_irq_resume(adev);
680
681 return ret;
682}
683
684static const struct amd_ip_funcs amdgpu_dm_funcs = {
685 .name = "dm",
686 .early_init = dm_early_init,
687 .late_init = NULL,
688 .sw_init = dm_sw_init,
689 .sw_fini = dm_sw_fini,
690 .hw_init = dm_hw_init,
691 .hw_fini = dm_hw_fini,
692 .suspend = dm_suspend,
693 .resume = dm_resume,
694 .is_idle = dm_is_idle,
695 .wait_for_idle = dm_wait_for_idle,
696 .check_soft_reset = dm_check_soft_reset,
697 .soft_reset = dm_soft_reset,
698 .set_clockgating_state = dm_set_clockgating_state,
699 .set_powergating_state = dm_set_powergating_state,
700};
701
702const struct amdgpu_ip_block_version dm_ip_block =
703{
704 .type = AMD_IP_BLOCK_TYPE_DCE,
705 .major = 1,
706 .minor = 0,
707 .rev = 0,
708 .funcs = &amdgpu_dm_funcs,
709};
710
711/* TODO: it is temporary non-const, should fixed later */
712static struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
713 .atomic_check = amdgpu_dm_atomic_check,
714 .atomic_commit = amdgpu_dm_atomic_commit
715};
716
717void amdgpu_dm_update_connector_after_detect(
718 struct amdgpu_connector *aconnector)
719{
720 struct drm_connector *connector = &aconnector->base;
721 struct drm_device *dev = connector->dev;
722 const struct dc_sink *sink;
723
724 /* MST handled by drm_mst framework */
725 if (aconnector->mst_mgr.mst_state == true)
726 return;
727
728
729 sink = aconnector->dc_link->local_sink;
730
731 /* Edid mgmt connector gets first update only in mode_valid hook and then
732 * the connector sink is set to either fake or physical sink depends on link status.
733 * don't do it here if u are during boot
734 */
735 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
736 && aconnector->dc_em_sink) {
737
738 /* For S3 resume with headless use eml_sink to fake target
739 * because on resume connecotr->sink is set ti NULL
740 */
741 mutex_lock(&dev->mode_config.mutex);
742
743 if (sink) {
744 if (aconnector->dc_sink)
745 amdgpu_dm_remove_sink_from_freesync_module(
746 connector);
747 aconnector->dc_sink = sink;
748 amdgpu_dm_add_sink_to_freesync_module(
749 connector, aconnector->edid);
750 } else {
751 amdgpu_dm_remove_sink_from_freesync_module(connector);
752 if (!aconnector->dc_sink)
753 aconnector->dc_sink = aconnector->dc_em_sink;
754 }
755
756 mutex_unlock(&dev->mode_config.mutex);
757 return;
758 }
759
760 /*
761 * TODO: temporary guard to look for proper fix
762 * if this sink is MST sink, we should not do anything
763 */
764 if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
765 return;
766
767 if (aconnector->dc_sink == sink) {
768 /* We got a DP short pulse (Link Loss, DP CTS, etc...).
769 * Do nothing!! */
770 DRM_INFO("DCHPD: connector_id=%d: dc_sink didn't change.\n",
771 aconnector->connector_id);
772 return;
773 }
774
775 DRM_INFO("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
776 aconnector->connector_id, aconnector->dc_sink, sink);
777
778 mutex_lock(&dev->mode_config.mutex);
779
780 /* 1. Update status of the drm connector
781 * 2. Send an event and let userspace tell us what to do */
782 if (sink) {
783 /* TODO: check if we still need the S3 mode update workaround.
784 * If yes, put it here. */
785 if (aconnector->dc_sink)
786 amdgpu_dm_remove_sink_from_freesync_module(
787 connector);
788
789 aconnector->dc_sink = sink;
790 if (sink->dc_edid.length == 0)
791 aconnector->edid = NULL;
792 else {
793 aconnector->edid =
794 (struct edid *) sink->dc_edid.raw_edid;
795
796
797 drm_mode_connector_update_edid_property(connector,
798 aconnector->edid);
799 }
800 amdgpu_dm_add_sink_to_freesync_module(connector, aconnector->edid);
801
802 } else {
803 amdgpu_dm_remove_sink_from_freesync_module(connector);
804 drm_mode_connector_update_edid_property(connector, NULL);
805 aconnector->num_modes = 0;
806 aconnector->dc_sink = NULL;
807 }
808
809 mutex_unlock(&dev->mode_config.mutex);
810}
811
812static void handle_hpd_irq(void *param)
813{
814 struct amdgpu_connector *aconnector = (struct amdgpu_connector *)param;
815 struct drm_connector *connector = &aconnector->base;
816 struct drm_device *dev = connector->dev;
817
818 /* In case of failure or MST no need to update connector status or notify the OS
819 * since (for MST case) MST does this in it's own context.
820 */
821 mutex_lock(&aconnector->hpd_lock);
822 if (dc_link_detect(aconnector->dc_link, false)) {
823 amdgpu_dm_update_connector_after_detect(aconnector);
824
825
826 drm_modeset_lock_all(dev);
827 dm_restore_drm_connector_state(dev, connector);
828 drm_modeset_unlock_all(dev);
829
830 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
831 drm_kms_helper_hotplug_event(dev);
832 }
833 mutex_unlock(&aconnector->hpd_lock);
834
835}
836
837static void dm_handle_hpd_rx_irq(struct amdgpu_connector *aconnector)
838{
839 uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
840 uint8_t dret;
841 bool new_irq_handled = false;
842 int dpcd_addr;
843 int dpcd_bytes_to_read;
844
845 const int max_process_count = 30;
846 int process_count = 0;
847
848 const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
849
850 if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
851 dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
852 /* DPCD 0x200 - 0x201 for downstream IRQ */
853 dpcd_addr = DP_SINK_COUNT;
854 } else {
855 dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
856 /* DPCD 0x2002 - 0x2005 for downstream IRQ */
857 dpcd_addr = DP_SINK_COUNT_ESI;
858 }
859
860 dret = drm_dp_dpcd_read(
861 &aconnector->dm_dp_aux.aux,
862 dpcd_addr,
863 esi,
864 dpcd_bytes_to_read);
865
866 while (dret == dpcd_bytes_to_read &&
867 process_count < max_process_count) {
868 uint8_t retry;
869 dret = 0;
870
871 process_count++;
872
873 DRM_DEBUG_KMS("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
874#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 3, 0)
875 /* handle HPD short pulse irq */
876 if (aconnector->mst_mgr.mst_state)
877 drm_dp_mst_hpd_irq(
878 &aconnector->mst_mgr,
879 esi,
880 &new_irq_handled);
881#endif
882
883 if (new_irq_handled) {
884 /* ACK at DPCD to notify down stream */
885 const int ack_dpcd_bytes_to_write =
886 dpcd_bytes_to_read - 1;
887
888 for (retry = 0; retry < 3; retry++) {
889 uint8_t wret;
890
891 wret = drm_dp_dpcd_write(
892 &aconnector->dm_dp_aux.aux,
893 dpcd_addr + 1,
894 &esi[1],
895 ack_dpcd_bytes_to_write);
896 if (wret == ack_dpcd_bytes_to_write)
897 break;
898 }
899
900 /* check if there is new irq to be handle */
901 dret = drm_dp_dpcd_read(
902 &aconnector->dm_dp_aux.aux,
903 dpcd_addr,
904 esi,
905 dpcd_bytes_to_read);
906
907 new_irq_handled = false;
908 } else
909 break;
910 }
911
912 if (process_count == max_process_count)
913 DRM_DEBUG_KMS("Loop exceeded max iterations\n");
914}
915
916static void handle_hpd_rx_irq(void *param)
917{
918 struct amdgpu_connector *aconnector = (struct amdgpu_connector *)param;
919 struct drm_connector *connector = &aconnector->base;
920 struct drm_device *dev = connector->dev;
921 const struct dc_link *dc_link = aconnector->dc_link;
922 bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
923
924 /* TODO:Temporary add mutex to protect hpd interrupt not have a gpio
925 * conflict, after implement i2c helper, this mutex should be
926 * retired.
927 */
928 if (aconnector->dc_link->type != dc_connection_mst_branch)
929 mutex_lock(&aconnector->hpd_lock);
930
931 if (dc_link_handle_hpd_rx_irq(aconnector->dc_link) &&
932 !is_mst_root_connector) {
933 /* Downstream Port status changed. */
934 if (dc_link_detect(aconnector->dc_link, false)) {
935 amdgpu_dm_update_connector_after_detect(aconnector);
936
937
938 drm_modeset_lock_all(dev);
939 dm_restore_drm_connector_state(dev, connector);
940 drm_modeset_unlock_all(dev);
941
942 drm_kms_helper_hotplug_event(dev);
943 }
944 }
945 if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
946 (dc_link->type == dc_connection_mst_branch))
947 dm_handle_hpd_rx_irq(aconnector);
948
949 if (aconnector->dc_link->type != dc_connection_mst_branch)
950 mutex_unlock(&aconnector->hpd_lock);
951}
952
953static void register_hpd_handlers(struct amdgpu_device *adev)
954{
955 struct drm_device *dev = adev->ddev;
956 struct drm_connector *connector;
957 struct amdgpu_connector *aconnector;
958 const struct dc_link *dc_link;
959 struct dc_interrupt_params int_params = {0};
960
961 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
962 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
963
964 list_for_each_entry(connector,
965 &dev->mode_config.connector_list, head) {
966
967 aconnector = to_amdgpu_connector(connector);
968 dc_link = aconnector->dc_link;
969
970 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
971 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
972 int_params.irq_source = dc_link->irq_source_hpd;
973
974 amdgpu_dm_irq_register_interrupt(adev, &int_params,
975 handle_hpd_irq,
976 (void *) aconnector);
977 }
978
979 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
980
981 /* Also register for DP short pulse (hpd_rx). */
982 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
983 int_params.irq_source = dc_link->irq_source_hpd_rx;
984
985 amdgpu_dm_irq_register_interrupt(adev, &int_params,
986 handle_hpd_rx_irq,
987 (void *) aconnector);
988 }
989 }
990}
991
992/* Register IRQ sources and initialize IRQ callbacks */
993static int dce110_register_irq_handlers(struct amdgpu_device *adev)
994{
995 struct dc *dc = adev->dm.dc;
996 struct common_irq_params *c_irq_params;
997 struct dc_interrupt_params int_params = {0};
998 int r;
999 int i;
1000
1001 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
1002 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
1003
1004 /* Actions of amdgpu_irq_add_id():
1005 * 1. Register a set() function with base driver.
1006 * Base driver will call set() function to enable/disable an
1007 * interrupt in DC hardware.
1008 * 2. Register amdgpu_dm_irq_handler().
1009 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
1010 * coming from DC hardware.
1011 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
1012 * for acknowledging and handling. */
1013
1014 for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT;
1015 i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
1016 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, i, &adev->crtc_irq);
1017 if (r) {
1018 DRM_ERROR("Failed to add crtc irq id!\n");
1019 return r;
1020 }
1021
1022 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
1023 int_params.irq_source =
1024 dc_interrupt_to_irq_source(dc, i, 0);
1025
1026 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
1027
1028 c_irq_params->adev = adev;
1029 c_irq_params->irq_src = int_params.irq_source;
1030
1031 amdgpu_dm_irq_register_interrupt(adev, &int_params,
1032 dm_crtc_high_irq, c_irq_params);
1033 }
1034
1035 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
1036 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
1037 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, i, &adev->pageflip_irq);
1038 if (r) {
1039 DRM_ERROR("Failed to add page flip irq id!\n");
1040 return r;
1041 }
1042
1043 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
1044 int_params.irq_source =
1045 dc_interrupt_to_irq_source(dc, i, 0);
1046
1047 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
1048
1049 c_irq_params->adev = adev;
1050 c_irq_params->irq_src = int_params.irq_source;
1051
1052 amdgpu_dm_irq_register_interrupt(adev, &int_params,
1053 dm_pflip_high_irq, c_irq_params);
1054
1055 }
1056
1057 /* HPD */
1058 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A,
1059 &adev->hpd_irq);
1060 if (r) {
1061 DRM_ERROR("Failed to add hpd irq id!\n");
1062 return r;
1063 }
1064
1065 register_hpd_handlers(adev);
1066
1067 return 0;
1068}
1069
1070static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
1071{
1072 int r;
1073
1074 adev->mode_info.mode_config_initialized = true;
1075
1076 amdgpu_dm_mode_funcs.fb_create =
1077 amdgpu_mode_funcs.fb_create;
1078 amdgpu_dm_mode_funcs.output_poll_changed =
1079 amdgpu_mode_funcs.output_poll_changed;
1080
1081 adev->ddev->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
1082
1083 adev->ddev->mode_config.max_width = 16384;
1084 adev->ddev->mode_config.max_height = 16384;
1085
1086 adev->ddev->mode_config.preferred_depth = 24;
1087 adev->ddev->mode_config.prefer_shadow = 1;
1088 /* indicate support of immediate flip */
1089 adev->ddev->mode_config.async_page_flip = true;
1090
1091 adev->ddev->mode_config.fb_base = adev->mc.aper_base;
1092
1093 r = amdgpu_modeset_create_props(adev);
1094 if (r)
1095 return r;
1096
1097 return 0;
1098}
1099
1100#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
1101 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
1102
1103static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
1104{
1105 struct amdgpu_display_manager *dm = bl_get_data(bd);
1106
1107 if (dc_link_set_backlight_level(dm->backlight_link,
1108 bd->props.brightness, 0, 0))
1109 return 0;
1110 else
1111 return 1;
1112}
1113
1114static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
1115{
1116 return bd->props.brightness;
1117}
1118
1119static const struct backlight_ops amdgpu_dm_backlight_ops = {
1120 .get_brightness = amdgpu_dm_backlight_get_brightness,
1121 .update_status = amdgpu_dm_backlight_update_status,
1122};
1123
1124void amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
1125{
1126 char bl_name[16];
1127 struct backlight_properties props = { 0 };
1128
1129 props.max_brightness = AMDGPU_MAX_BL_LEVEL;
1130 props.type = BACKLIGHT_RAW;
1131
1132 snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
1133 dm->adev->ddev->primary->index);
1134
1135 dm->backlight_dev = backlight_device_register(bl_name,
1136 dm->adev->ddev->dev,
1137 dm,
1138 &amdgpu_dm_backlight_ops,
1139 &props);
1140
1141 if (NULL == dm->backlight_dev)
1142 DRM_ERROR("DM: Backlight registration failed!\n");
1143 else
1144 DRM_INFO("DM: Registered Backlight device: %s\n", bl_name);
1145}
1146
1147#endif
1148
1149/* In this architecture, the association
1150 * connector -> encoder -> crtc
1151 * id not really requried. The crtc and connector will hold the
1152 * display_index as an abstraction to use with DAL component
1153 *
1154 * Returns 0 on success
1155 */
1156int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
1157{
1158 struct amdgpu_display_manager *dm = &adev->dm;
1159 uint32_t i;
1160 struct amdgpu_connector *aconnector;
1161 struct amdgpu_encoder *aencoder;
1162 struct amdgpu_crtc *acrtc;
1163 uint32_t link_cnt;
1164
1165 link_cnt = dm->dc->caps.max_links;
1166
1167 if (amdgpu_dm_mode_config_init(dm->adev)) {
1168 DRM_ERROR("DM: Failed to initialize mode config\n");
1169 return -1;
1170 }
1171
1172 for (i = 0; i < dm->dc->caps.max_targets; i++) {
1173 acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
1174 if (!acrtc)
1175 goto fail;
1176
1177 if (amdgpu_dm_crtc_init(
1178 dm,
1179 acrtc,
1180 i)) {
1181 DRM_ERROR("KMS: Failed to initialize crtc\n");
1182 kfree(acrtc);
1183 goto fail;
1184 }
1185 }
1186
1187 dm->display_indexes_num = dm->dc->caps.max_targets;
1188
1189 /* loops over all connectors on the board */
1190 for (i = 0; i < link_cnt; i++) {
1191
1192 if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
1193 DRM_ERROR(
1194 "KMS: Cannot support more than %d display indexes\n",
1195 AMDGPU_DM_MAX_DISPLAY_INDEX);
1196 continue;
1197 }
1198
1199 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
1200 if (!aconnector)
1201 goto fail;
1202
1203 aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
1204 if (!aencoder) {
1205 goto fail_free_connector;
1206 }
1207
1208 if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
1209 DRM_ERROR("KMS: Failed to initialize encoder\n");
1210 goto fail_free_encoder;
1211 }
1212
1213 if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
1214 DRM_ERROR("KMS: Failed to initialize connector\n");
1215 goto fail_free_connector;
1216 }
1217
1218 if (dc_link_detect(dc_get_link_at_index(dm->dc, i), true))
1219 amdgpu_dm_update_connector_after_detect(aconnector);
1220 }
1221
1222 /* Software is initialized. Now we can register interrupt handlers. */
1223 switch (adev->asic_type) {
1224 case CHIP_BONAIRE:
1225 case CHIP_HAWAII:
1226 case CHIP_TONGA:
1227 case CHIP_FIJI:
1228 case CHIP_CARRIZO:
1229 case CHIP_STONEY:
1230 case CHIP_POLARIS11:
1231 case CHIP_POLARIS10:
1232 if (dce110_register_irq_handlers(dm->adev)) {
1233 DRM_ERROR("DM: Failed to initialize IRQ\n");
1234 return -1;
1235 }
1236 break;
1237 default:
1238 DRM_ERROR("Usupported ASIC type: 0x%X\n", adev->asic_type);
1239 return -1;
1240 }
1241
1242 drm_mode_config_reset(dm->ddev);
1243
1244 return 0;
1245fail_free_encoder:
1246 kfree(aencoder);
1247fail_free_connector:
1248 kfree(aconnector);
1249fail:
1250 return -1;
1251}
1252
1253void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
1254{
1255 drm_mode_config_cleanup(dm->ddev);
1256 return;
1257}
1258
1259/******************************************************************************
1260 * amdgpu_display_funcs functions
1261 *****************************************************************************/
1262
1263/**
1264 * dm_bandwidth_update - program display watermarks
1265 *
1266 * @adev: amdgpu_device pointer
1267 *
1268 * Calculate and program the display watermarks and line buffer allocation.
1269 */
1270static void dm_bandwidth_update(struct amdgpu_device *adev)
1271{
1272 AMDGPU_DM_NOT_IMPL("%s\n", __func__);
1273}
1274
1275static void dm_set_backlight_level(struct amdgpu_encoder *amdgpu_encoder,
1276 u8 level)
1277{
1278 /* TODO: translate amdgpu_encoder to display_index and call DAL */
1279 AMDGPU_DM_NOT_IMPL("%s\n", __func__);
1280}
1281
1282static u8 dm_get_backlight_level(struct amdgpu_encoder *amdgpu_encoder)
1283{
1284 /* TODO: translate amdgpu_encoder to display_index and call DAL */
1285 AMDGPU_DM_NOT_IMPL("%s\n", __func__);
1286 return 0;
1287}
1288
1289/******************************************************************************
1290 * Page Flip functions
1291 ******************************************************************************/
1292
1293/**
1294 * dm_page_flip - called by amdgpu_flip_work_func(), which is triggered
1295 * via DRM IOCTL, by user mode.
1296 *
1297 * @adev: amdgpu_device pointer
1298 * @crtc_id: crtc to cleanup pageflip on
1299 * @crtc_base: new address of the crtc (GPU MC address)
1300 *
1301 * Does the actual pageflip (surface address update).
1302 */
1303static void dm_page_flip(struct amdgpu_device *adev,
1304 int crtc_id, u64 crtc_base, bool async)
1305{
1306 struct amdgpu_crtc *acrtc;
1307 struct dc_target *target;
1308 struct dc_flip_addrs addr = { {0} };
1309
1310 /*
1311 * TODO risk of concurrency issues
1312 *
1313 * This should guarded by the dal_mutex but we can't do this since the
1314 * caller uses a spin_lock on event_lock.
1315 *
1316 * If we wait on the dal_mutex a second page flip interrupt might come,
1317 * spin on the event_lock, disabling interrupts while it does so. At
1318 * this point the core can no longer be pre-empted and return to the
1319 * thread that waited on the dal_mutex and we're deadlocked.
1320 *
1321 * With multiple cores the same essentially happens but might just take
1322 * a little longer to lock up all cores.
1323 *
1324 * The reason we should lock on dal_mutex is so that we can be sure
1325 * nobody messes with acrtc->target after we read and check its value.
1326 *
1327 * We might be able to fix our concurrency issues with a work queue
1328 * where we schedule all work items (mode_set, page_flip, etc.) and
1329 * execute them one by one. Care needs to be taken to still deal with
1330 * any potential concurrency issues arising from interrupt calls.
1331 */
1332
1333 acrtc = adev->mode_info.crtcs[crtc_id];
1334 target = acrtc->target;
1335
1336 /*
1337 * Received a page flip call after the display has been reset.
1338 * Just return in this case. Everything should be clean-up on reset.
1339 */
1340
1341 if (!target) {
1342 WARN_ON(1);
1343 return;
1344 }
1345
1346 addr.address.grph.addr.low_part = lower_32_bits(crtc_base);
1347 addr.address.grph.addr.high_part = upper_32_bits(crtc_base);
1348 addr.flip_immediate = async;
1349
1350 DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x \n",
1351 __func__,
1352 addr.address.grph.addr.high_part,
1353 addr.address.grph.addr.low_part);
1354
1355 dc_flip_surface_addrs(
1356 adev->dm.dc,
1357 dc_target_get_status(target)->surfaces,
1358 &addr, 1);
1359}
1360
1361static int amdgpu_notify_freesync(struct drm_device *dev, void *data,
1362 struct drm_file *filp)
1363{
1364 struct mod_freesync_params freesync_params;
1365 uint8_t num_targets;
1366 uint8_t i;
1367 struct dc_target *target;
1368
1369 struct amdgpu_device *adev = dev->dev_private;
1370 int r = 0;
1371
1372 /* Get freesync enable flag from DRM */
1373
1374 num_targets = dc_get_current_target_count(adev->dm.dc);
1375
1376 for (i = 0; i < num_targets; i++) {
1377
1378 target = dc_get_target_at_index(adev->dm.dc, i);
1379
1380 mod_freesync_update_state(adev->dm.freesync_module,
1381 target->streams,
1382 target->stream_count,
1383 &freesync_params);
1384 }
1385
1386 return r;
1387}
1388
1389#ifdef CONFIG_DRM_AMDGPU_CIK
1390static const struct amdgpu_display_funcs dm_dce_v8_0_display_funcs = {
1391 .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
1392 .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
1393 .vblank_wait = NULL,
1394 .backlight_set_level =
1395 dm_set_backlight_level,/* called unconditionally */
1396 .backlight_get_level =
1397 dm_get_backlight_level,/* called unconditionally */
1398 .hpd_sense = NULL,/* called unconditionally */
1399 .hpd_set_polarity = NULL, /* called unconditionally */
1400 .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
1401 .page_flip = dm_page_flip, /* called unconditionally */
1402 .page_flip_get_scanoutpos =
1403 dm_crtc_get_scanoutpos,/* called unconditionally */
1404 .add_encoder = NULL, /* VBIOS parsing. DAL does it. */
1405 .add_connector = NULL, /* VBIOS parsing. DAL does it. */
1406 .notify_freesync = amdgpu_notify_freesync,
1407};
1408#endif
1409
1410static const struct amdgpu_display_funcs dm_dce_v10_0_display_funcs = {
1411 .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
1412 .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
1413 .vblank_wait = NULL,
1414 .backlight_set_level =
1415 dm_set_backlight_level,/* called unconditionally */
1416 .backlight_get_level =
1417 dm_get_backlight_level,/* called unconditionally */
1418 .hpd_sense = NULL,/* called unconditionally */
1419 .hpd_set_polarity = NULL, /* called unconditionally */
1420 .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
1421 .page_flip = dm_page_flip, /* called unconditionally */
1422 .page_flip_get_scanoutpos =
1423 dm_crtc_get_scanoutpos,/* called unconditionally */
1424 .add_encoder = NULL, /* VBIOS parsing. DAL does it. */
1425 .add_connector = NULL, /* VBIOS parsing. DAL does it. */
1426 .notify_freesync = amdgpu_notify_freesync,
1427
1428};
1429
1430static const struct amdgpu_display_funcs dm_dce_v11_0_display_funcs = {
1431 .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
1432 .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
1433 .vblank_wait = NULL,
1434 .backlight_set_level =
1435 dm_set_backlight_level,/* called unconditionally */
1436 .backlight_get_level =
1437 dm_get_backlight_level,/* called unconditionally */
1438 .hpd_sense = NULL,/* called unconditionally */
1439 .hpd_set_polarity = NULL, /* called unconditionally */
1440 .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
1441 .page_flip = dm_page_flip, /* called unconditionally */
1442 .page_flip_get_scanoutpos =
1443 dm_crtc_get_scanoutpos,/* called unconditionally */
1444 .add_encoder = NULL, /* VBIOS parsing. DAL does it. */
1445 .add_connector = NULL, /* VBIOS parsing. DAL does it. */
1446 .notify_freesync = amdgpu_notify_freesync,
1447
1448};
1449
1450#if defined(CONFIG_DEBUG_KERNEL_DC)
1451
1452static ssize_t s3_debug_store(
1453 struct device *device,
1454 struct device_attribute *attr,
1455 const char *buf,
1456 size_t count)
1457{
1458 int ret;
1459 int s3_state;
1460 struct pci_dev *pdev = to_pci_dev(device);
1461 struct drm_device *drm_dev = pci_get_drvdata(pdev);
1462 struct amdgpu_device *adev = drm_dev->dev_private;
1463
1464 ret = kstrtoint(buf, 0, &s3_state);
1465
1466 if (ret == 0) {
1467 if (s3_state) {
1468 dm_resume(adev);
1469 amdgpu_dm_display_resume(adev);
1470 drm_kms_helper_hotplug_event(adev->ddev);
1471 } else
1472 dm_suspend(adev);
1473 }
1474
1475 return ret == 0 ? count : 0;
1476}
1477
1478DEVICE_ATTR_WO(s3_debug);
1479
1480#endif
1481
1482static int dm_early_init(void *handle)
1483{
1484 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1485
1486 amdgpu_dm_set_irq_funcs(adev);
1487
1488 switch (adev->asic_type) {
1489 case CHIP_BONAIRE:
1490 case CHIP_HAWAII:
1491 adev->mode_info.num_crtc = 6;
1492 adev->mode_info.num_hpd = 6;
1493 adev->mode_info.num_dig = 6;
1494#ifdef CONFIG_DRM_AMDGPU_CIK
1495 if (adev->mode_info.funcs == NULL)
1496 adev->mode_info.funcs = &dm_dce_v8_0_display_funcs;
1497#endif
1498 break;
1499 case CHIP_FIJI:
1500 case CHIP_TONGA:
1501 adev->mode_info.num_crtc = 6;
1502 adev->mode_info.num_hpd = 6;
1503 adev->mode_info.num_dig = 7;
1504 if (adev->mode_info.funcs == NULL)
1505 adev->mode_info.funcs = &dm_dce_v10_0_display_funcs;
1506 break;
1507 case CHIP_CARRIZO:
1508 adev->mode_info.num_crtc = 3;
1509 adev->mode_info.num_hpd = 6;
1510 adev->mode_info.num_dig = 9;
1511 if (adev->mode_info.funcs == NULL)
1512 adev->mode_info.funcs = &dm_dce_v11_0_display_funcs;
1513 break;
1514 case CHIP_STONEY:
1515 adev->mode_info.num_crtc = 2;
1516 adev->mode_info.num_hpd = 6;
1517 adev->mode_info.num_dig = 9;
1518 if (adev->mode_info.funcs == NULL)
1519 adev->mode_info.funcs = &dm_dce_v11_0_display_funcs;
1520 break;
1521 case CHIP_POLARIS11:
1522 adev->mode_info.num_crtc = 5;
1523 adev->mode_info.num_hpd = 5;
1524 adev->mode_info.num_dig = 5;
1525 if (adev->mode_info.funcs == NULL)
1526 adev->mode_info.funcs = &dm_dce_v11_0_display_funcs;
1527 break;
1528 case CHIP_POLARIS10:
1529 adev->mode_info.num_crtc = 6;
1530 adev->mode_info.num_hpd = 6;
1531 adev->mode_info.num_dig = 6;
1532 if (adev->mode_info.funcs == NULL)
1533 adev->mode_info.funcs = &dm_dce_v11_0_display_funcs;
1534 break;
1535 default:
1536 DRM_ERROR("Usupported ASIC type: 0x%X\n", adev->asic_type);
1537 return -EINVAL;
1538 }
1539
1540 /* Note: Do NOT change adev->audio_endpt_rreg and
1541 * adev->audio_endpt_wreg because they are initialised in
1542 * amdgpu_device_init() */
1543#if defined(CONFIG_DEBUG_KERNEL_DC)
1544 device_create_file(
1545 adev->ddev->dev,
1546 &dev_attr_s3_debug);
1547#endif
1548
1549 return 0;
1550}
1551
1552bool amdgpu_dm_acquire_dal_lock(struct amdgpu_display_manager *dm)
1553{
1554 /* TODO */
1555 return true;
1556}
1557
1558bool amdgpu_dm_release_dal_lock(struct amdgpu_display_manager *dm)
1559{
1560 /* TODO */
1561 return true;
1562}
1563
1564