]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
drm/amd/display: extended the programming sequence to VFlip as well
[mirror_ubuntu-bionic-kernel.git] / drivers / gpu / drm / amd / display / amdgpu_dm / amdgpu_dm.c
CommitLineData
4562236b
HW
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#include "dm_services_types.h"
27#include "dc.h"
28
29#include "vid.h"
30#include "amdgpu.h"
a49dcb88 31#include "amdgpu_display.h"
4562236b
HW
32#include "atom.h"
33#include "amdgpu_dm.h"
34#include "amdgpu_dm_types.h"
35
36#include "amd_shared.h"
37#include "amdgpu_dm_irq.h"
38#include "dm_helpers.h"
39
40#include "ivsrcid/ivsrcid_vislands30.h"
41
42#include <linux/module.h>
43#include <linux/moduleparam.h>
44#include <linux/version.h>
45
46#include <drm/drm_atomic.h>
47#include <drm/drm_atomic_helper.h>
48#include <drm/drm_dp_mst_helper.h>
49
50#include "modules/inc/mod_freesync.h"
51
4562236b
HW
52/*
53 * dm_vblank_get_counter
54 *
55 * @brief
56 * Get counter for number of vertical blanks
57 *
58 * @param
59 * struct amdgpu_device *adev - [in] desired amdgpu device
60 * int disp_idx - [in] which CRTC to get the counter from
61 *
62 * @return
63 * Counter for vertical blanks
64 */
65static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
66{
67 if (crtc >= adev->mode_info.num_crtc)
68 return 0;
69 else {
70 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
71
ab2541b6
AC
72 if (NULL == acrtc->stream) {
73 DRM_ERROR("dc_stream is NULL for crtc '%d'!\n", crtc);
4562236b
HW
74 return 0;
75 }
76
ab2541b6 77 return dc_stream_get_vblank_counter(acrtc->stream);
4562236b
HW
78 }
79}
80
81static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
82 u32 *vbl, u32 *position)
83{
84 if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
85 return -EINVAL;
86 else {
87 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
88
ab2541b6
AC
89 if (NULL == acrtc->stream) {
90 DRM_ERROR("dc_stream is NULL for crtc '%d'!\n", crtc);
4562236b
HW
91 return 0;
92 }
93
ab2541b6 94 return dc_stream_get_scanoutpos(acrtc->stream, vbl, position);
4562236b
HW
95 }
96
97 return 0;
98}
99
100static bool dm_is_idle(void *handle)
101{
102 /* XXX todo */
103 return true;
104}
105
106static int dm_wait_for_idle(void *handle)
107{
108 /* XXX todo */
109 return 0;
110}
111
112static bool dm_check_soft_reset(void *handle)
113{
114 return false;
115}
116
117static int dm_soft_reset(void *handle)
118{
119 /* XXX todo */
120 return 0;
121}
122
123static struct amdgpu_crtc *get_crtc_by_otg_inst(
124 struct amdgpu_device *adev,
125 int otg_inst)
126{
127 struct drm_device *dev = adev->ddev;
128 struct drm_crtc *crtc;
129 struct amdgpu_crtc *amdgpu_crtc;
130
131 /*
132 * following if is check inherited from both functions where this one is
133 * used now. Need to be checked why it could happen.
134 */
135 if (otg_inst == -1) {
136 WARN_ON(1);
137 return adev->mode_info.crtcs[0];
138 }
139
140 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
141 amdgpu_crtc = to_amdgpu_crtc(crtc);
142
143 if (amdgpu_crtc->otg_inst == otg_inst)
144 return amdgpu_crtc;
145 }
146
147 return NULL;
148}
149
150static void dm_pflip_high_irq(void *interrupt_params)
151{
152 struct amdgpu_flip_work *works;
153 struct amdgpu_crtc *amdgpu_crtc;
154 struct common_irq_params *irq_params = interrupt_params;
155 struct amdgpu_device *adev = irq_params->adev;
156 unsigned long flags;
157
158 amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
159
160 /* IRQ could occur when in initial stage */
161 /*TODO work and BO cleanup */
162 if (amdgpu_crtc == NULL) {
163 DRM_DEBUG_DRIVER("CRTC is null, returning.\n");
164 return;
165 }
166
167 spin_lock_irqsave(&adev->ddev->event_lock, flags);
168 works = amdgpu_crtc->pflip_works;
169
170 if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
171 DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
172 amdgpu_crtc->pflip_status,
173 AMDGPU_FLIP_SUBMITTED,
174 amdgpu_crtc->crtc_id,
175 amdgpu_crtc);
176 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
177 return;
178 }
179
180 /* page flip completed. clean up */
181 amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
182 amdgpu_crtc->pflip_works = NULL;
183
184 /* wakeup usersapce */
185 if (works->event)
186 drm_crtc_send_vblank_event(&amdgpu_crtc->base,
187 works->event);
188
189 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
190
191 DRM_DEBUG_DRIVER("%s - crtc :%d[%p], pflip_stat:AMDGPU_FLIP_NONE, work: %p,\n",
192 __func__, amdgpu_crtc->crtc_id, amdgpu_crtc, works);
193
194 drm_crtc_vblank_put(&amdgpu_crtc->base);
195 schedule_work(&works->unpin_work);
196}
197
198static void dm_crtc_high_irq(void *interrupt_params)
199{
200 struct common_irq_params *irq_params = interrupt_params;
201 struct amdgpu_device *adev = irq_params->adev;
202 uint8_t crtc_index = 0;
203 struct amdgpu_crtc *acrtc;
204
b57de80a 205 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
4562236b
HW
206
207 if (acrtc)
208 crtc_index = acrtc->crtc_id;
209
210 drm_handle_vblank(adev->ddev, crtc_index);
211}
212
213static int dm_set_clockgating_state(void *handle,
214 enum amd_clockgating_state state)
215{
216 return 0;
217}
218
219static int dm_set_powergating_state(void *handle,
220 enum amd_powergating_state state)
221{
222 return 0;
223}
224
225/* Prototypes of private functions */
226static int dm_early_init(void* handle);
227
228static void hotplug_notify_work_func(struct work_struct *work)
229{
230 struct amdgpu_display_manager *dm = container_of(work, struct amdgpu_display_manager, mst_hotplug_work);
231 struct drm_device *dev = dm->ddev;
232
233 drm_kms_helper_hotplug_event(dev);
234}
235
236/* Init display KMS
237 *
238 * Returns 0 on success
239 */
240int amdgpu_dm_init(struct amdgpu_device *adev)
241{
242 struct dc_init_data init_data;
243 adev->dm.ddev = adev->ddev;
244 adev->dm.adev = adev;
245
246 DRM_INFO("DAL is enabled\n");
247 /* Zero all the fields */
248 memset(&init_data, 0, sizeof(init_data));
249
250 /* initialize DAL's lock (for SYNC context use) */
251 spin_lock_init(&adev->dm.dal_lock);
252
253 /* initialize DAL's mutex */
254 mutex_init(&adev->dm.dal_mutex);
255
256 if(amdgpu_dm_irq_init(adev)) {
257 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
258 goto error;
259 }
260
261 init_data.asic_id.chip_family = adev->family;
262
263 init_data.asic_id.pci_revision_id = adev->rev_id;
264 init_data.asic_id.hw_internal_rev = adev->external_rev_id;
265
266 init_data.asic_id.vram_width = adev->mc.vram_width;
267 /* TODO: initialize init_data.asic_id.vram_type here!!!! */
268 init_data.asic_id.atombios_base_address =
269 adev->mode_info.atom_context->bios;
270
271 init_data.driver = adev;
272
273 adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
274
275 if (!adev->dm.cgs_device) {
276 DRM_ERROR("amdgpu: failed to create cgs device.\n");
277 goto error;
278 }
279
280 init_data.cgs_device = adev->dm.cgs_device;
281
282 adev->dm.dal = NULL;
283
284 init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
285
286 /* Display Core create. */
287 adev->dm.dc = dc_create(&init_data);
288
289 if (!adev->dm.dc)
290 DRM_INFO("Display Core failed to initialize!\n");
291
292 INIT_WORK(&adev->dm.mst_hotplug_work, hotplug_notify_work_func);
293
294 adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
295 if (!adev->dm.freesync_module) {
296 DRM_ERROR(
297 "amdgpu: failed to initialize freesync_module.\n");
298 } else
299 DRM_INFO("amdgpu: freesync_module init done %p.\n",
300 adev->dm.freesync_module);
301
302 if (amdgpu_dm_initialize_drm_device(adev)) {
303 DRM_ERROR(
304 "amdgpu: failed to initialize sw for display support.\n");
305 goto error;
306 }
307
308 /* Update the actual used number of crtc */
309 adev->mode_info.num_crtc = adev->dm.display_indexes_num;
310
311 /* TODO: Add_display_info? */
312
313 /* TODO use dynamic cursor width */
314 adev->ddev->mode_config.cursor_width = 128;
315 adev->ddev->mode_config.cursor_height = 128;
316
317 if (drm_vblank_init(adev->ddev, adev->dm.display_indexes_num)) {
318 DRM_ERROR(
319 "amdgpu: failed to initialize sw for display support.\n");
320 goto error;
321 }
322
323 DRM_INFO("KMS initialized.\n");
324
325 return 0;
326error:
327 amdgpu_dm_fini(adev);
328
329 return -1;
330}
331
332void amdgpu_dm_fini(struct amdgpu_device *adev)
333{
334 amdgpu_dm_destroy_drm_device(&adev->dm);
335 /*
336 * TODO: pageflip, vlank interrupt
337 *
338 * amdgpu_dm_irq_fini(adev);
339 */
340
341 if (adev->dm.cgs_device) {
342 amdgpu_cgs_destroy_device(adev->dm.cgs_device);
343 adev->dm.cgs_device = NULL;
344 }
345 if (adev->dm.freesync_module) {
346 mod_freesync_destroy(adev->dm.freesync_module);
347 adev->dm.freesync_module = NULL;
348 }
349 /* DC Destroy TODO: Replace destroy DAL */
350 {
351 dc_destroy(&adev->dm.dc);
352 }
353 return;
354}
355
356/* moved from amdgpu_dm_kms.c */
357void amdgpu_dm_destroy()
358{
359}
360
361static int dm_sw_init(void *handle)
362{
363 return 0;
364}
365
366static int dm_sw_fini(void *handle)
367{
368 return 0;
369}
370
7abcf6b5 371static int detect_mst_link_for_all_connectors(struct drm_device *dev)
4562236b
HW
372{
373 struct amdgpu_connector *aconnector;
374 struct drm_connector *connector;
7abcf6b5 375 int ret = 0;
4562236b
HW
376
377 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
378
379 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
380 aconnector = to_amdgpu_connector(connector);
7abcf6b5
AG
381 if (aconnector->dc_link->type == dc_connection_mst_branch) {
382 DRM_INFO("DM_MST: starting TM on aconnector: %p [id: %d]\n",
383 aconnector, aconnector->base.base.id);
384
385 ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
386 if (ret < 0) {
387 DRM_ERROR("DM_MST: Failed to start MST\n");
388 ((struct dc_link *)aconnector->dc_link)->type = dc_connection_single;
389 return ret;
4562236b 390 }
7abcf6b5 391 }
4562236b
HW
392 }
393
394 drm_modeset_unlock(&dev->mode_config.connection_mutex);
7abcf6b5
AG
395 return ret;
396}
397
398static int dm_late_init(void *handle)
399{
400 struct drm_device *dev = ((struct amdgpu_device *)handle)->ddev;
401 int r = detect_mst_link_for_all_connectors(dev);
402
403 return r;
4562236b
HW
404}
405
406static void s3_handle_mst(struct drm_device *dev, bool suspend)
407{
408 struct amdgpu_connector *aconnector;
409 struct drm_connector *connector;
410
411 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
412
413 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
414 aconnector = to_amdgpu_connector(connector);
415 if (aconnector->dc_link->type == dc_connection_mst_branch &&
416 !aconnector->mst_port) {
417
418 if (suspend)
419 drm_dp_mst_topology_mgr_suspend(&aconnector->mst_mgr);
420 else
421 drm_dp_mst_topology_mgr_resume(&aconnector->mst_mgr);
422 }
423 }
424
425 drm_modeset_unlock(&dev->mode_config.connection_mutex);
426}
427
428static int dm_hw_init(void *handle)
429{
430 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
431 /* Create DAL display manager */
432 amdgpu_dm_init(adev);
4562236b
HW
433 amdgpu_dm_hpd_init(adev);
434
4562236b
HW
435 return 0;
436}
437
438static int dm_hw_fini(void *handle)
439{
440 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
441
442 amdgpu_dm_hpd_fini(adev);
443
444 amdgpu_dm_irq_fini(adev);
445
446 return 0;
447}
448
449static int dm_suspend(void *handle)
450{
451 struct amdgpu_device *adev = handle;
452 struct amdgpu_display_manager *dm = &adev->dm;
453 int ret = 0;
454 struct drm_crtc *crtc;
455
456 s3_handle_mst(adev->ddev, true);
457
458 /* flash all pending vblank events and turn interrupt off
459 * before disabling CRTCs. They will be enabled back in
460 * dm_display_resume
461 */
462 drm_modeset_lock_all(adev->ddev);
463 list_for_each_entry(crtc, &adev->ddev->mode_config.crtc_list, head) {
464 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
ab2541b6 465 if (acrtc->stream)
4562236b
HW
466 drm_crtc_vblank_off(crtc);
467 }
468 drm_modeset_unlock_all(adev->ddev);
469
470 amdgpu_dm_irq_suspend(adev);
471
472 dc_set_power_state(
473 dm->dc,
474 DC_ACPI_CM_POWER_STATE_D3,
475 DC_VIDEO_POWER_SUSPEND);
476
477 return ret;
478}
479
480struct amdgpu_connector *amdgpu_dm_find_first_crct_matching_connector(
481 struct drm_atomic_state *state,
482 struct drm_crtc *crtc,
483 bool from_state_var)
484{
485 uint32_t i;
486 struct drm_connector_state *conn_state;
487 struct drm_connector *connector;
488 struct drm_crtc *crtc_from_state;
489
490 for_each_connector_in_state(
491 state,
492 connector,
493 conn_state,
494 i) {
495 crtc_from_state =
496 from_state_var ?
497 conn_state->crtc :
498 connector->state->crtc;
499
500 if (crtc_from_state == crtc)
501 return to_amdgpu_connector(connector);
502 }
503
504 return NULL;
505}
506
507static int dm_display_resume(struct drm_device *ddev)
508{
509 int ret = 0;
510 struct drm_connector *connector;
511
512 struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
513 struct drm_plane *plane;
514 struct drm_crtc *crtc;
515 struct amdgpu_connector *aconnector;
516 struct drm_connector_state *conn_state;
517
518 if (!state)
519 return ENOMEM;
520
521 state->acquire_ctx = ddev->mode_config.acquire_ctx;
522
523 /* Construct an atomic state to restore previous display setting */
524
525 /*
526 * Attach connectors to drm_atomic_state
527 * Should be done in the first place in order to make connectors
528 * available in state during crtc state processing. It is used for
529 * making decision if crtc should be disabled in case sink got
530 * disconnected.
531 *
532 * Connectors state crtc with NULL dc_sink should be cleared, because it
533 * will fail validation during commit
534 */
535 list_for_each_entry(connector, &ddev->mode_config.connector_list, head) {
536 aconnector = to_amdgpu_connector(connector);
537 conn_state = drm_atomic_get_connector_state(state, connector);
538
539 ret = PTR_ERR_OR_ZERO(conn_state);
540 if (ret)
541 goto err;
542 }
543
544 /* Attach crtcs to drm_atomic_state*/
545 list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) {
546 struct drm_crtc_state *crtc_state =
547 drm_atomic_get_crtc_state(state, crtc);
548
549 ret = PTR_ERR_OR_ZERO(crtc_state);
550 if (ret)
551 goto err;
552
553 /* force a restore */
554 crtc_state->mode_changed = true;
555 }
556
557
558 /* Attach planes to drm_atomic_state */
559 list_for_each_entry(plane, &ddev->mode_config.plane_list, head) {
560
561 struct drm_crtc *crtc;
562 struct drm_gem_object *obj;
563 struct drm_framebuffer *fb;
564 struct amdgpu_framebuffer *afb;
565 struct amdgpu_bo *rbo;
566 int r;
567 struct drm_plane_state *plane_state = drm_atomic_get_plane_state(state, plane);
568
569 ret = PTR_ERR_OR_ZERO(plane_state);
570 if (ret)
571 goto err;
572
573 crtc = plane_state->crtc;
574 fb = plane_state->fb;
575
576 if (!crtc || !crtc->state || !crtc->state->active)
577 continue;
578
579 if (!fb) {
580 DRM_DEBUG_KMS("No FB bound\n");
581 return 0;
582 }
583
584 /*
585 * Pin back the front buffers, cursor buffer was already pinned
586 * back in amdgpu_resume_kms
587 */
588
589 afb = to_amdgpu_framebuffer(fb);
590
591 obj = afb->obj;
592 rbo = gem_to_amdgpu_bo(obj);
593 r = amdgpu_bo_reserve(rbo, false);
594 if (unlikely(r != 0))
595 return r;
596
597 r = amdgpu_bo_pin(rbo, AMDGPU_GEM_DOMAIN_VRAM, NULL);
598
599 amdgpu_bo_unreserve(rbo);
600
601 if (unlikely(r != 0)) {
602 DRM_ERROR("Failed to pin framebuffer\n");
603 return r;
604 }
605
606 }
607
608
609 /* Call commit internally with the state we just constructed */
610 ret = drm_atomic_commit(state);
611 if (!ret)
612 return 0;
613
614err:
615 DRM_ERROR("Restoring old state failed with %i\n", ret);
616 drm_atomic_state_put(state);
617
618 return ret;
619}
620
621static int dm_resume(void *handle)
622{
623 struct amdgpu_device *adev = handle;
624 struct amdgpu_display_manager *dm = &adev->dm;
625
626 /* power on hardware */
627 dc_set_power_state(
628 dm->dc,
629 DC_ACPI_CM_POWER_STATE_D0,
630 DC_VIDEO_POWER_ON);
631
632 return 0;
633}
634
635int amdgpu_dm_display_resume(struct amdgpu_device *adev )
636{
637 struct drm_device *ddev = adev->ddev;
638 struct amdgpu_display_manager *dm = &adev->dm;
639 struct amdgpu_connector *aconnector;
640 struct drm_connector *connector;
641 int ret = 0;
642 struct drm_crtc *crtc;
643
644 /* program HPD filter */
645 dc_resume(dm->dc);
646
647 /* On resume we need to rewrite the MSTM control bits to enamble MST*/
648 s3_handle_mst(ddev, false);
649
650 /*
651 * early enable HPD Rx IRQ, should be done before set mode as short
652 * pulse interrupts are used for MST
653 */
654 amdgpu_dm_irq_resume_early(adev);
655
656 drm_modeset_lock_all(ddev);
657 list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) {
658 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
ab2541b6 659 if (acrtc->stream)
4562236b
HW
660 drm_crtc_vblank_on(crtc);
661 }
662 drm_modeset_unlock_all(ddev);
663
664 /* Do detection*/
665 list_for_each_entry(connector,
666 &ddev->mode_config.connector_list, head) {
667 aconnector = to_amdgpu_connector(connector);
668
669 /*
670 * this is the case when traversing through already created
671 * MST connectors, should be skipped
672 */
673 if (aconnector->mst_port)
674 continue;
675
676 dc_link_detect(aconnector->dc_link, false);
677 aconnector->dc_sink = NULL;
678 amdgpu_dm_update_connector_after_detect(aconnector);
679 }
680
681 drm_modeset_lock_all(ddev);
682 ret = dm_display_resume(ddev);
683 drm_modeset_unlock_all(ddev);
684
685 amdgpu_dm_irq_resume(adev);
686
687 return ret;
688}
689
690static const struct amd_ip_funcs amdgpu_dm_funcs = {
691 .name = "dm",
692 .early_init = dm_early_init,
7abcf6b5 693 .late_init = dm_late_init,
4562236b
HW
694 .sw_init = dm_sw_init,
695 .sw_fini = dm_sw_fini,
696 .hw_init = dm_hw_init,
697 .hw_fini = dm_hw_fini,
698 .suspend = dm_suspend,
699 .resume = dm_resume,
700 .is_idle = dm_is_idle,
701 .wait_for_idle = dm_wait_for_idle,
702 .check_soft_reset = dm_check_soft_reset,
703 .soft_reset = dm_soft_reset,
704 .set_clockgating_state = dm_set_clockgating_state,
705 .set_powergating_state = dm_set_powergating_state,
706};
707
708const struct amdgpu_ip_block_version dm_ip_block =
709{
710 .type = AMD_IP_BLOCK_TYPE_DCE,
711 .major = 1,
712 .minor = 0,
713 .rev = 0,
714 .funcs = &amdgpu_dm_funcs,
715};
716
717/* TODO: it is temporary non-const, should fixed later */
718static struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
a49dcb88
HW
719 .fb_create = amdgpu_user_framebuffer_create,
720 .output_poll_changed = amdgpu_output_poll_changed,
4562236b
HW
721 .atomic_check = amdgpu_dm_atomic_check,
722 .atomic_commit = amdgpu_dm_atomic_commit
723};
724
725void amdgpu_dm_update_connector_after_detect(
726 struct amdgpu_connector *aconnector)
727{
728 struct drm_connector *connector = &aconnector->base;
729 struct drm_device *dev = connector->dev;
730 const struct dc_sink *sink;
731
732 /* MST handled by drm_mst framework */
733 if (aconnector->mst_mgr.mst_state == true)
734 return;
735
736
737 sink = aconnector->dc_link->local_sink;
738
739 /* Edid mgmt connector gets first update only in mode_valid hook and then
740 * the connector sink is set to either fake or physical sink depends on link status.
741 * don't do it here if u are during boot
742 */
743 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
744 && aconnector->dc_em_sink) {
745
ab2541b6 746 /* For S3 resume with headless use eml_sink to fake stream
4562236b
HW
747 * because on resume connecotr->sink is set ti NULL
748 */
749 mutex_lock(&dev->mode_config.mutex);
750
751 if (sink) {
922aa1e1 752 if (aconnector->dc_sink) {
4562236b
HW
753 amdgpu_dm_remove_sink_from_freesync_module(
754 connector);
922aa1e1
AG
755 /* retain and release bellow are used for
756 * bump up refcount for sink because the link don't point
757 * to it anymore after disconnect so on next crtc to connector
758 * reshuffle by UMD we will get into unwanted dc_sink release
759 */
760 if (aconnector->dc_sink != aconnector->dc_em_sink)
761 dc_sink_release(aconnector->dc_sink);
762 }
4562236b
HW
763 aconnector->dc_sink = sink;
764 amdgpu_dm_add_sink_to_freesync_module(
765 connector, aconnector->edid);
766 } else {
767 amdgpu_dm_remove_sink_from_freesync_module(connector);
768 if (!aconnector->dc_sink)
769 aconnector->dc_sink = aconnector->dc_em_sink;
922aa1e1
AG
770 else if (aconnector->dc_sink != aconnector->dc_em_sink)
771 dc_sink_retain(aconnector->dc_sink);
4562236b
HW
772 }
773
774 mutex_unlock(&dev->mode_config.mutex);
775 return;
776 }
777
778 /*
779 * TODO: temporary guard to look for proper fix
780 * if this sink is MST sink, we should not do anything
781 */
782 if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
783 return;
784
785 if (aconnector->dc_sink == sink) {
786 /* We got a DP short pulse (Link Loss, DP CTS, etc...).
787 * Do nothing!! */
788 DRM_INFO("DCHPD: connector_id=%d: dc_sink didn't change.\n",
789 aconnector->connector_id);
790 return;
791 }
792
793 DRM_INFO("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
794 aconnector->connector_id, aconnector->dc_sink, sink);
795
796 mutex_lock(&dev->mode_config.mutex);
797
798 /* 1. Update status of the drm connector
799 * 2. Send an event and let userspace tell us what to do */
800 if (sink) {
801 /* TODO: check if we still need the S3 mode update workaround.
802 * If yes, put it here. */
803 if (aconnector->dc_sink)
804 amdgpu_dm_remove_sink_from_freesync_module(
805 connector);
806
807 aconnector->dc_sink = sink;
808 if (sink->dc_edid.length == 0)
809 aconnector->edid = NULL;
810 else {
811 aconnector->edid =
812 (struct edid *) sink->dc_edid.raw_edid;
813
814
815 drm_mode_connector_update_edid_property(connector,
816 aconnector->edid);
817 }
818 amdgpu_dm_add_sink_to_freesync_module(connector, aconnector->edid);
819
820 } else {
821 amdgpu_dm_remove_sink_from_freesync_module(connector);
822 drm_mode_connector_update_edid_property(connector, NULL);
823 aconnector->num_modes = 0;
824 aconnector->dc_sink = NULL;
825 }
826
827 mutex_unlock(&dev->mode_config.mutex);
828}
829
830static void handle_hpd_irq(void *param)
831{
832 struct amdgpu_connector *aconnector = (struct amdgpu_connector *)param;
833 struct drm_connector *connector = &aconnector->base;
834 struct drm_device *dev = connector->dev;
835
836 /* In case of failure or MST no need to update connector status or notify the OS
837 * since (for MST case) MST does this in it's own context.
838 */
839 mutex_lock(&aconnector->hpd_lock);
840 if (dc_link_detect(aconnector->dc_link, false)) {
841 amdgpu_dm_update_connector_after_detect(aconnector);
842
843
844 drm_modeset_lock_all(dev);
845 dm_restore_drm_connector_state(dev, connector);
846 drm_modeset_unlock_all(dev);
847
848 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
849 drm_kms_helper_hotplug_event(dev);
850 }
851 mutex_unlock(&aconnector->hpd_lock);
852
853}
854
855static void dm_handle_hpd_rx_irq(struct amdgpu_connector *aconnector)
856{
857 uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
858 uint8_t dret;
859 bool new_irq_handled = false;
860 int dpcd_addr;
861 int dpcd_bytes_to_read;
862
863 const int max_process_count = 30;
864 int process_count = 0;
865
866 const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
867
868 if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
869 dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
870 /* DPCD 0x200 - 0x201 for downstream IRQ */
871 dpcd_addr = DP_SINK_COUNT;
872 } else {
873 dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
874 /* DPCD 0x2002 - 0x2005 for downstream IRQ */
875 dpcd_addr = DP_SINK_COUNT_ESI;
876 }
877
878 dret = drm_dp_dpcd_read(
879 &aconnector->dm_dp_aux.aux,
880 dpcd_addr,
881 esi,
882 dpcd_bytes_to_read);
883
884 while (dret == dpcd_bytes_to_read &&
885 process_count < max_process_count) {
886 uint8_t retry;
887 dret = 0;
888
889 process_count++;
890
891 DRM_DEBUG_KMS("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
4562236b
HW
892 /* handle HPD short pulse irq */
893 if (aconnector->mst_mgr.mst_state)
894 drm_dp_mst_hpd_irq(
895 &aconnector->mst_mgr,
896 esi,
897 &new_irq_handled);
4562236b
HW
898
899 if (new_irq_handled) {
900 /* ACK at DPCD to notify down stream */
901 const int ack_dpcd_bytes_to_write =
902 dpcd_bytes_to_read - 1;
903
904 for (retry = 0; retry < 3; retry++) {
905 uint8_t wret;
906
907 wret = drm_dp_dpcd_write(
908 &aconnector->dm_dp_aux.aux,
909 dpcd_addr + 1,
910 &esi[1],
911 ack_dpcd_bytes_to_write);
912 if (wret == ack_dpcd_bytes_to_write)
913 break;
914 }
915
916 /* check if there is new irq to be handle */
917 dret = drm_dp_dpcd_read(
918 &aconnector->dm_dp_aux.aux,
919 dpcd_addr,
920 esi,
921 dpcd_bytes_to_read);
922
923 new_irq_handled = false;
924 } else
925 break;
926 }
927
928 if (process_count == max_process_count)
929 DRM_DEBUG_KMS("Loop exceeded max iterations\n");
930}
931
932static void handle_hpd_rx_irq(void *param)
933{
934 struct amdgpu_connector *aconnector = (struct amdgpu_connector *)param;
935 struct drm_connector *connector = &aconnector->base;
936 struct drm_device *dev = connector->dev;
937 const struct dc_link *dc_link = aconnector->dc_link;
938 bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
939
940 /* TODO:Temporary add mutex to protect hpd interrupt not have a gpio
941 * conflict, after implement i2c helper, this mutex should be
942 * retired.
943 */
944 if (aconnector->dc_link->type != dc_connection_mst_branch)
945 mutex_lock(&aconnector->hpd_lock);
946
947 if (dc_link_handle_hpd_rx_irq(aconnector->dc_link) &&
948 !is_mst_root_connector) {
949 /* Downstream Port status changed. */
950 if (dc_link_detect(aconnector->dc_link, false)) {
951 amdgpu_dm_update_connector_after_detect(aconnector);
952
953
954 drm_modeset_lock_all(dev);
955 dm_restore_drm_connector_state(dev, connector);
956 drm_modeset_unlock_all(dev);
957
958 drm_kms_helper_hotplug_event(dev);
959 }
960 }
961 if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
962 (dc_link->type == dc_connection_mst_branch))
963 dm_handle_hpd_rx_irq(aconnector);
964
965 if (aconnector->dc_link->type != dc_connection_mst_branch)
966 mutex_unlock(&aconnector->hpd_lock);
967}
968
969static void register_hpd_handlers(struct amdgpu_device *adev)
970{
971 struct drm_device *dev = adev->ddev;
972 struct drm_connector *connector;
973 struct amdgpu_connector *aconnector;
974 const struct dc_link *dc_link;
975 struct dc_interrupt_params int_params = {0};
976
977 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
978 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
979
980 list_for_each_entry(connector,
981 &dev->mode_config.connector_list, head) {
982
983 aconnector = to_amdgpu_connector(connector);
984 dc_link = aconnector->dc_link;
985
986 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
987 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
988 int_params.irq_source = dc_link->irq_source_hpd;
989
990 amdgpu_dm_irq_register_interrupt(adev, &int_params,
991 handle_hpd_irq,
992 (void *) aconnector);
993 }
994
995 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
996
997 /* Also register for DP short pulse (hpd_rx). */
998 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
999 int_params.irq_source = dc_link->irq_source_hpd_rx;
1000
1001 amdgpu_dm_irq_register_interrupt(adev, &int_params,
1002 handle_hpd_rx_irq,
1003 (void *) aconnector);
1004 }
1005 }
1006}
1007
1008/* Register IRQ sources and initialize IRQ callbacks */
1009static int dce110_register_irq_handlers(struct amdgpu_device *adev)
1010{
1011 struct dc *dc = adev->dm.dc;
1012 struct common_irq_params *c_irq_params;
1013 struct dc_interrupt_params int_params = {0};
1014 int r;
1015 int i;
1016
1017 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
1018 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
1019
1020 /* Actions of amdgpu_irq_add_id():
1021 * 1. Register a set() function with base driver.
1022 * Base driver will call set() function to enable/disable an
1023 * interrupt in DC hardware.
1024 * 2. Register amdgpu_dm_irq_handler().
1025 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
1026 * coming from DC hardware.
1027 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
1028 * for acknowledging and handling. */
1029
b57de80a 1030 /* Use VBLANK interrupt */
3d761e79
AG
1031 for (i = 1; i <= adev->mode_info.num_crtc; i++) {
1032 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, i, &adev->crtc_irq);
b57de80a 1033
4562236b
HW
1034 if (r) {
1035 DRM_ERROR("Failed to add crtc irq id!\n");
1036 return r;
1037 }
1038
1039 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
1040 int_params.irq_source =
3d761e79 1041 dc_interrupt_to_irq_source(dc, i, 0);
4562236b 1042
b57de80a 1043 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
4562236b
HW
1044
1045 c_irq_params->adev = adev;
1046 c_irq_params->irq_src = int_params.irq_source;
1047
1048 amdgpu_dm_irq_register_interrupt(adev, &int_params,
1049 dm_crtc_high_irq, c_irq_params);
1050 }
1051
3d761e79 1052 /* Use GRPH_PFLIP interrupt */
4562236b
HW
1053 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
1054 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
1055 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, i, &adev->pageflip_irq);
1056 if (r) {
1057 DRM_ERROR("Failed to add page flip irq id!\n");
1058 return r;
1059 }
1060
1061 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
1062 int_params.irq_source =
1063 dc_interrupt_to_irq_source(dc, i, 0);
1064
1065 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
1066
1067 c_irq_params->adev = adev;
1068 c_irq_params->irq_src = int_params.irq_source;
1069
1070 amdgpu_dm_irq_register_interrupt(adev, &int_params,
1071 dm_pflip_high_irq, c_irq_params);
1072
1073 }
1074
1075 /* HPD */
1076 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A,
1077 &adev->hpd_irq);
1078 if (r) {
1079 DRM_ERROR("Failed to add hpd irq id!\n");
1080 return r;
1081 }
1082
1083 register_hpd_handlers(adev);
1084
1085 return 0;
1086}
1087
1088static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
1089{
1090 int r;
1091
1092 adev->mode_info.mode_config_initialized = true;
1093
4562236b
HW
1094 adev->ddev->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
1095
1096 adev->ddev->mode_config.max_width = 16384;
1097 adev->ddev->mode_config.max_height = 16384;
1098
1099 adev->ddev->mode_config.preferred_depth = 24;
1100 adev->ddev->mode_config.prefer_shadow = 1;
1101 /* indicate support of immediate flip */
1102 adev->ddev->mode_config.async_page_flip = true;
1103
1104 adev->ddev->mode_config.fb_base = adev->mc.aper_base;
1105
1106 r = amdgpu_modeset_create_props(adev);
1107 if (r)
1108 return r;
1109
1110 return 0;
1111}
1112
1113#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
1114 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
1115
1116static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
1117{
1118 struct amdgpu_display_manager *dm = bl_get_data(bd);
1119
1120 if (dc_link_set_backlight_level(dm->backlight_link,
1121 bd->props.brightness, 0, 0))
1122 return 0;
1123 else
1124 return 1;
1125}
1126
1127static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
1128{
1129 return bd->props.brightness;
1130}
1131
1132static const struct backlight_ops amdgpu_dm_backlight_ops = {
1133 .get_brightness = amdgpu_dm_backlight_get_brightness,
1134 .update_status = amdgpu_dm_backlight_update_status,
1135};
1136
1137void amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
1138{
1139 char bl_name[16];
1140 struct backlight_properties props = { 0 };
1141
1142 props.max_brightness = AMDGPU_MAX_BL_LEVEL;
1143 props.type = BACKLIGHT_RAW;
1144
1145 snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
1146 dm->adev->ddev->primary->index);
1147
1148 dm->backlight_dev = backlight_device_register(bl_name,
1149 dm->adev->ddev->dev,
1150 dm,
1151 &amdgpu_dm_backlight_ops,
1152 &props);
1153
1154 if (NULL == dm->backlight_dev)
1155 DRM_ERROR("DM: Backlight registration failed!\n");
1156 else
1157 DRM_INFO("DM: Registered Backlight device: %s\n", bl_name);
1158}
1159
1160#endif
1161
1162/* In this architecture, the association
1163 * connector -> encoder -> crtc
1164 * id not really requried. The crtc and connector will hold the
1165 * display_index as an abstraction to use with DAL component
1166 *
1167 * Returns 0 on success
1168 */
1169int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
1170{
1171 struct amdgpu_display_manager *dm = &adev->dm;
1172 uint32_t i;
1173 struct amdgpu_connector *aconnector;
1174 struct amdgpu_encoder *aencoder;
1175 struct amdgpu_crtc *acrtc;
1176 uint32_t link_cnt;
1177
1178 link_cnt = dm->dc->caps.max_links;
1179
1180 if (amdgpu_dm_mode_config_init(dm->adev)) {
1181 DRM_ERROR("DM: Failed to initialize mode config\n");
1182 return -1;
1183 }
1184
ab2541b6 1185 for (i = 0; i < dm->dc->caps.max_streams; i++) {
4562236b
HW
1186 acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
1187 if (!acrtc)
1188 goto fail;
1189
1190 if (amdgpu_dm_crtc_init(
1191 dm,
1192 acrtc,
1193 i)) {
1194 DRM_ERROR("KMS: Failed to initialize crtc\n");
1195 kfree(acrtc);
1196 goto fail;
1197 }
1198 }
1199
ab2541b6 1200 dm->display_indexes_num = dm->dc->caps.max_streams;
4562236b
HW
1201
1202 /* loops over all connectors on the board */
1203 for (i = 0; i < link_cnt; i++) {
1204
1205 if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
1206 DRM_ERROR(
1207 "KMS: Cannot support more than %d display indexes\n",
1208 AMDGPU_DM_MAX_DISPLAY_INDEX);
1209 continue;
1210 }
1211
1212 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
1213 if (!aconnector)
1214 goto fail;
1215
1216 aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
1217 if (!aencoder) {
1218 goto fail_free_connector;
1219 }
1220
1221 if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
1222 DRM_ERROR("KMS: Failed to initialize encoder\n");
1223 goto fail_free_encoder;
1224 }
1225
1226 if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
1227 DRM_ERROR("KMS: Failed to initialize connector\n");
1228 goto fail_free_connector;
1229 }
1230
1231 if (dc_link_detect(dc_get_link_at_index(dm->dc, i), true))
1232 amdgpu_dm_update_connector_after_detect(aconnector);
1233 }
1234
1235 /* Software is initialized. Now we can register interrupt handlers. */
1236 switch (adev->asic_type) {
1237 case CHIP_BONAIRE:
1238 case CHIP_HAWAII:
1239 case CHIP_TONGA:
1240 case CHIP_FIJI:
1241 case CHIP_CARRIZO:
1242 case CHIP_STONEY:
1243 case CHIP_POLARIS11:
1244 case CHIP_POLARIS10:
b264d345 1245 case CHIP_POLARIS12:
4562236b
HW
1246 if (dce110_register_irq_handlers(dm->adev)) {
1247 DRM_ERROR("DM: Failed to initialize IRQ\n");
1248 return -1;
1249 }
1250 break;
1251 default:
1252 DRM_ERROR("Usupported ASIC type: 0x%X\n", adev->asic_type);
1253 return -1;
1254 }
1255
1256 drm_mode_config_reset(dm->ddev);
1257
1258 return 0;
1259fail_free_encoder:
1260 kfree(aencoder);
1261fail_free_connector:
1262 kfree(aconnector);
1263fail:
1264 return -1;
1265}
1266
1267void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
1268{
1269 drm_mode_config_cleanup(dm->ddev);
1270 return;
1271}
1272
1273/******************************************************************************
1274 * amdgpu_display_funcs functions
1275 *****************************************************************************/
1276
1277/**
1278 * dm_bandwidth_update - program display watermarks
1279 *
1280 * @adev: amdgpu_device pointer
1281 *
1282 * Calculate and program the display watermarks and line buffer allocation.
1283 */
1284static void dm_bandwidth_update(struct amdgpu_device *adev)
1285{
49c07a99 1286 /* TODO: implement later */
4562236b
HW
1287}
1288
1289static void dm_set_backlight_level(struct amdgpu_encoder *amdgpu_encoder,
1290 u8 level)
1291{
1292 /* TODO: translate amdgpu_encoder to display_index and call DAL */
4562236b
HW
1293}
1294
1295static u8 dm_get_backlight_level(struct amdgpu_encoder *amdgpu_encoder)
1296{
1297 /* TODO: translate amdgpu_encoder to display_index and call DAL */
4562236b
HW
1298 return 0;
1299}
1300
1301/******************************************************************************
1302 * Page Flip functions
1303 ******************************************************************************/
1304
1305/**
1306 * dm_page_flip - called by amdgpu_flip_work_func(), which is triggered
1307 * via DRM IOCTL, by user mode.
1308 *
1309 * @adev: amdgpu_device pointer
1310 * @crtc_id: crtc to cleanup pageflip on
1311 * @crtc_base: new address of the crtc (GPU MC address)
1312 *
1313 * Does the actual pageflip (surface address update).
1314 */
1315static void dm_page_flip(struct amdgpu_device *adev,
1316 int crtc_id, u64 crtc_base, bool async)
1317{
1318 struct amdgpu_crtc *acrtc;
ab2541b6 1319 const struct dc_stream *stream;
4562236b
HW
1320 struct dc_flip_addrs addr = { {0} };
1321
1322 /*
1323 * TODO risk of concurrency issues
1324 *
1325 * This should guarded by the dal_mutex but we can't do this since the
1326 * caller uses a spin_lock on event_lock.
1327 *
1328 * If we wait on the dal_mutex a second page flip interrupt might come,
1329 * spin on the event_lock, disabling interrupts while it does so. At
1330 * this point the core can no longer be pre-empted and return to the
1331 * thread that waited on the dal_mutex and we're deadlocked.
1332 *
1333 * With multiple cores the same essentially happens but might just take
1334 * a little longer to lock up all cores.
1335 *
1336 * The reason we should lock on dal_mutex is so that we can be sure
ab2541b6 1337 * nobody messes with acrtc->stream after we read and check its value.
4562236b
HW
1338 *
1339 * We might be able to fix our concurrency issues with a work queue
1340 * where we schedule all work items (mode_set, page_flip, etc.) and
1341 * execute them one by one. Care needs to be taken to still deal with
1342 * any potential concurrency issues arising from interrupt calls.
1343 */
1344
1345 acrtc = adev->mode_info.crtcs[crtc_id];
ab2541b6 1346 stream = acrtc->stream;
4562236b
HW
1347
1348 /*
1349 * Received a page flip call after the display has been reset.
1350 * Just return in this case. Everything should be clean-up on reset.
1351 */
1352
ab2541b6 1353 if (!stream) {
4562236b
HW
1354 WARN_ON(1);
1355 return;
1356 }
1357
1358 addr.address.grph.addr.low_part = lower_32_bits(crtc_base);
1359 addr.address.grph.addr.high_part = upper_32_bits(crtc_base);
1360 addr.flip_immediate = async;
1361
1362 DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x \n",
1363 __func__,
1364 addr.address.grph.addr.high_part,
1365 addr.address.grph.addr.low_part);
1366
1367 dc_flip_surface_addrs(
1368 adev->dm.dc,
ab2541b6 1369 dc_stream_get_status(stream)->surfaces,
4562236b
HW
1370 &addr, 1);
1371}
1372
1373static int amdgpu_notify_freesync(struct drm_device *dev, void *data,
1374 struct drm_file *filp)
1375{
1376 struct mod_freesync_params freesync_params;
ab2541b6 1377 uint8_t num_streams;
4562236b 1378 uint8_t i;
4562236b
HW
1379
1380 struct amdgpu_device *adev = dev->dev_private;
1381 int r = 0;
1382
1383 /* Get freesync enable flag from DRM */
1384
ab2541b6 1385 num_streams = dc_get_current_stream_count(adev->dm.dc);
4562236b 1386
ab2541b6
AC
1387 for (i = 0; i < num_streams; i++) {
1388 const struct dc_stream *stream;
1389 stream = dc_get_stream_at_index(adev->dm.dc, i);
4562236b
HW
1390
1391 mod_freesync_update_state(adev->dm.freesync_module,
ab2541b6 1392 &stream, 1, &freesync_params);
4562236b
HW
1393 }
1394
1395 return r;
1396}
1397
39cc5be2 1398static const struct amdgpu_display_funcs dm_display_funcs = {
4562236b
HW
1399 .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
1400 .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
1401 .vblank_wait = NULL,
1402 .backlight_set_level =
1403 dm_set_backlight_level,/* called unconditionally */
1404 .backlight_get_level =
1405 dm_get_backlight_level,/* called unconditionally */
1406 .hpd_sense = NULL,/* called unconditionally */
1407 .hpd_set_polarity = NULL, /* called unconditionally */
1408 .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
1409 .page_flip = dm_page_flip, /* called unconditionally */
1410 .page_flip_get_scanoutpos =
1411 dm_crtc_get_scanoutpos,/* called unconditionally */
1412 .add_encoder = NULL, /* VBIOS parsing. DAL does it. */
1413 .add_connector = NULL, /* VBIOS parsing. DAL does it. */
1414 .notify_freesync = amdgpu_notify_freesync,
1415
1416};
1417
1418#if defined(CONFIG_DEBUG_KERNEL_DC)
1419
1420static ssize_t s3_debug_store(
1421 struct device *device,
1422 struct device_attribute *attr,
1423 const char *buf,
1424 size_t count)
1425{
1426 int ret;
1427 int s3_state;
1428 struct pci_dev *pdev = to_pci_dev(device);
1429 struct drm_device *drm_dev = pci_get_drvdata(pdev);
1430 struct amdgpu_device *adev = drm_dev->dev_private;
1431
1432 ret = kstrtoint(buf, 0, &s3_state);
1433
1434 if (ret == 0) {
1435 if (s3_state) {
1436 dm_resume(adev);
1437 amdgpu_dm_display_resume(adev);
1438 drm_kms_helper_hotplug_event(adev->ddev);
1439 } else
1440 dm_suspend(adev);
1441 }
1442
1443 return ret == 0 ? count : 0;
1444}
1445
1446DEVICE_ATTR_WO(s3_debug);
1447
1448#endif
1449
1450static int dm_early_init(void *handle)
1451{
1452 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1453
1454 amdgpu_dm_set_irq_funcs(adev);
1455
1456 switch (adev->asic_type) {
1457 case CHIP_BONAIRE:
1458 case CHIP_HAWAII:
1459 adev->mode_info.num_crtc = 6;
1460 adev->mode_info.num_hpd = 6;
1461 adev->mode_info.num_dig = 6;
4562236b
HW
1462 break;
1463 case CHIP_FIJI:
1464 case CHIP_TONGA:
1465 adev->mode_info.num_crtc = 6;
1466 adev->mode_info.num_hpd = 6;
1467 adev->mode_info.num_dig = 7;
4562236b
HW
1468 break;
1469 case CHIP_CARRIZO:
1470 adev->mode_info.num_crtc = 3;
1471 adev->mode_info.num_hpd = 6;
1472 adev->mode_info.num_dig = 9;
4562236b
HW
1473 break;
1474 case CHIP_STONEY:
1475 adev->mode_info.num_crtc = 2;
1476 adev->mode_info.num_hpd = 6;
1477 adev->mode_info.num_dig = 9;
4562236b
HW
1478 break;
1479 case CHIP_POLARIS11:
b264d345 1480 case CHIP_POLARIS12:
4562236b
HW
1481 adev->mode_info.num_crtc = 5;
1482 adev->mode_info.num_hpd = 5;
1483 adev->mode_info.num_dig = 5;
4562236b
HW
1484 break;
1485 case CHIP_POLARIS10:
1486 adev->mode_info.num_crtc = 6;
1487 adev->mode_info.num_hpd = 6;
1488 adev->mode_info.num_dig = 6;
4562236b
HW
1489 break;
1490 default:
1491 DRM_ERROR("Usupported ASIC type: 0x%X\n", adev->asic_type);
1492 return -EINVAL;
1493 }
1494
39cc5be2
AD
1495 if (adev->mode_info.funcs == NULL)
1496 adev->mode_info.funcs = &dm_display_funcs;
1497
4562236b
HW
1498 /* Note: Do NOT change adev->audio_endpt_rreg and
1499 * adev->audio_endpt_wreg because they are initialised in
1500 * amdgpu_device_init() */
1501#if defined(CONFIG_DEBUG_KERNEL_DC)
1502 device_create_file(
1503 adev->ddev->dev,
1504 &dev_attr_s3_debug);
1505#endif
1506
1507 return 0;
1508}
1509
1510bool amdgpu_dm_acquire_dal_lock(struct amdgpu_display_manager *dm)
1511{
1512 /* TODO */
1513 return true;
1514}
1515
1516bool amdgpu_dm_release_dal_lock(struct amdgpu_display_manager *dm)
1517{
1518 /* TODO */
1519 return true;
1520}
1521
1522