]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
drm/amd/display: Log clock source in error condition
[mirror_ubuntu-bionic-kernel.git] / drivers / gpu / drm / amd / display / amdgpu_dm / amdgpu_dm.c
CommitLineData
4562236b
HW
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#include "dm_services_types.h"
27#include "dc.h"
28
29#include "vid.h"
30#include "amdgpu.h"
a49dcb88 31#include "amdgpu_display.h"
4562236b
HW
32#include "atom.h"
33#include "amdgpu_dm.h"
34#include "amdgpu_dm_types.h"
35
36#include "amd_shared.h"
37#include "amdgpu_dm_irq.h"
38#include "dm_helpers.h"
39
40#include "ivsrcid/ivsrcid_vislands30.h"
41
42#include <linux/module.h>
43#include <linux/moduleparam.h>
44#include <linux/version.h>
45
46#include <drm/drm_atomic.h>
47#include <drm/drm_atomic_helper.h>
48#include <drm/drm_dp_mst_helper.h>
49
50#include "modules/inc/mod_freesync.h"
51
d4e13b0d
AD
52static enum drm_plane_type dm_surfaces_type_default[AMDGPU_MAX_PLANES] = {
53 DRM_PLANE_TYPE_PRIMARY,
54 DRM_PLANE_TYPE_PRIMARY,
55 DRM_PLANE_TYPE_PRIMARY,
56 DRM_PLANE_TYPE_PRIMARY,
57 DRM_PLANE_TYPE_PRIMARY,
58 DRM_PLANE_TYPE_PRIMARY,
59};
60
61static enum drm_plane_type dm_surfaces_type_carizzo[AMDGPU_MAX_PLANES] = {
62 DRM_PLANE_TYPE_PRIMARY,
63 DRM_PLANE_TYPE_PRIMARY,
64 DRM_PLANE_TYPE_PRIMARY,
65 DRM_PLANE_TYPE_OVERLAY,/* YUV Capable Underlay */
66};
67
68static enum drm_plane_type dm_surfaces_type_stoney[AMDGPU_MAX_PLANES] = {
69 DRM_PLANE_TYPE_PRIMARY,
70 DRM_PLANE_TYPE_PRIMARY,
71 DRM_PLANE_TYPE_OVERLAY, /* YUV Capable Underlay */
72};
73
4562236b
HW
74/*
75 * dm_vblank_get_counter
76 *
77 * @brief
78 * Get counter for number of vertical blanks
79 *
80 * @param
81 * struct amdgpu_device *adev - [in] desired amdgpu device
82 * int disp_idx - [in] which CRTC to get the counter from
83 *
84 * @return
85 * Counter for vertical blanks
86 */
87static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
88{
89 if (crtc >= adev->mode_info.num_crtc)
90 return 0;
91 else {
92 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
93
ab2541b6
AC
94 if (NULL == acrtc->stream) {
95 DRM_ERROR("dc_stream is NULL for crtc '%d'!\n", crtc);
4562236b
HW
96 return 0;
97 }
98
ab2541b6 99 return dc_stream_get_vblank_counter(acrtc->stream);
4562236b
HW
100 }
101}
102
103static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
104 u32 *vbl, u32 *position)
105{
106 if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
107 return -EINVAL;
108 else {
109 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
110
ab2541b6
AC
111 if (NULL == acrtc->stream) {
112 DRM_ERROR("dc_stream is NULL for crtc '%d'!\n", crtc);
4562236b
HW
113 return 0;
114 }
115
ab2541b6 116 return dc_stream_get_scanoutpos(acrtc->stream, vbl, position);
4562236b
HW
117 }
118
119 return 0;
120}
121
122static bool dm_is_idle(void *handle)
123{
124 /* XXX todo */
125 return true;
126}
127
128static int dm_wait_for_idle(void *handle)
129{
130 /* XXX todo */
131 return 0;
132}
133
134static bool dm_check_soft_reset(void *handle)
135{
136 return false;
137}
138
139static int dm_soft_reset(void *handle)
140{
141 /* XXX todo */
142 return 0;
143}
144
145static struct amdgpu_crtc *get_crtc_by_otg_inst(
146 struct amdgpu_device *adev,
147 int otg_inst)
148{
149 struct drm_device *dev = adev->ddev;
150 struct drm_crtc *crtc;
151 struct amdgpu_crtc *amdgpu_crtc;
152
153 /*
154 * following if is check inherited from both functions where this one is
155 * used now. Need to be checked why it could happen.
156 */
157 if (otg_inst == -1) {
158 WARN_ON(1);
159 return adev->mode_info.crtcs[0];
160 }
161
162 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
163 amdgpu_crtc = to_amdgpu_crtc(crtc);
164
165 if (amdgpu_crtc->otg_inst == otg_inst)
166 return amdgpu_crtc;
167 }
168
169 return NULL;
170}
171
172static void dm_pflip_high_irq(void *interrupt_params)
173{
4562236b
HW
174 struct amdgpu_crtc *amdgpu_crtc;
175 struct common_irq_params *irq_params = interrupt_params;
176 struct amdgpu_device *adev = irq_params->adev;
177 unsigned long flags;
178
179 amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
180
181 /* IRQ could occur when in initial stage */
182 /*TODO work and BO cleanup */
183 if (amdgpu_crtc == NULL) {
184 DRM_DEBUG_DRIVER("CRTC is null, returning.\n");
185 return;
186 }
187
188 spin_lock_irqsave(&adev->ddev->event_lock, flags);
4562236b
HW
189
190 if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
191 DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
192 amdgpu_crtc->pflip_status,
193 AMDGPU_FLIP_SUBMITTED,
194 amdgpu_crtc->crtc_id,
195 amdgpu_crtc);
196 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
197 return;
198 }
199
4562236b
HW
200
201 /* wakeup usersapce */
54f5499a
AG
202 if (amdgpu_crtc->event
203 && amdgpu_crtc->event->event.base.type
204 == DRM_EVENT_FLIP_COMPLETE) {
205 drm_crtc_send_vblank_event(&amdgpu_crtc->base, amdgpu_crtc->event);
206 /* page flip completed. clean up */
207 amdgpu_crtc->event = NULL;
208 } else
209 WARN_ON(1);
4562236b 210
54f5499a 211 amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
4562236b
HW
212 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
213
54f5499a
AG
214 DRM_DEBUG_DRIVER("%s - crtc :%d[%p], pflip_stat:AMDGPU_FLIP_NONE\n",
215 __func__, amdgpu_crtc->crtc_id, amdgpu_crtc);
4562236b
HW
216
217 drm_crtc_vblank_put(&amdgpu_crtc->base);
4562236b
HW
218}
219
220static void dm_crtc_high_irq(void *interrupt_params)
221{
222 struct common_irq_params *irq_params = interrupt_params;
223 struct amdgpu_device *adev = irq_params->adev;
224 uint8_t crtc_index = 0;
225 struct amdgpu_crtc *acrtc;
226
b57de80a 227 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
4562236b
HW
228
229 if (acrtc)
230 crtc_index = acrtc->crtc_id;
231
232 drm_handle_vblank(adev->ddev, crtc_index);
233}
234
235static int dm_set_clockgating_state(void *handle,
236 enum amd_clockgating_state state)
237{
238 return 0;
239}
240
241static int dm_set_powergating_state(void *handle,
242 enum amd_powergating_state state)
243{
244 return 0;
245}
246
247/* Prototypes of private functions */
248static int dm_early_init(void* handle);
249
250static void hotplug_notify_work_func(struct work_struct *work)
251{
252 struct amdgpu_display_manager *dm = container_of(work, struct amdgpu_display_manager, mst_hotplug_work);
253 struct drm_device *dev = dm->ddev;
254
255 drm_kms_helper_hotplug_event(dev);
256}
257
258/* Init display KMS
259 *
260 * Returns 0 on success
261 */
262int amdgpu_dm_init(struct amdgpu_device *adev)
263{
264 struct dc_init_data init_data;
265 adev->dm.ddev = adev->ddev;
266 adev->dm.adev = adev;
267
268 DRM_INFO("DAL is enabled\n");
269 /* Zero all the fields */
270 memset(&init_data, 0, sizeof(init_data));
271
272 /* initialize DAL's lock (for SYNC context use) */
273 spin_lock_init(&adev->dm.dal_lock);
274
275 /* initialize DAL's mutex */
276 mutex_init(&adev->dm.dal_mutex);
277
278 if(amdgpu_dm_irq_init(adev)) {
279 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
280 goto error;
281 }
282
283 init_data.asic_id.chip_family = adev->family;
284
285 init_data.asic_id.pci_revision_id = adev->rev_id;
286 init_data.asic_id.hw_internal_rev = adev->external_rev_id;
287
288 init_data.asic_id.vram_width = adev->mc.vram_width;
289 /* TODO: initialize init_data.asic_id.vram_type here!!!! */
290 init_data.asic_id.atombios_base_address =
291 adev->mode_info.atom_context->bios;
292
293 init_data.driver = adev;
294
295 adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
296
297 if (!adev->dm.cgs_device) {
298 DRM_ERROR("amdgpu: failed to create cgs device.\n");
299 goto error;
300 }
301
302 init_data.cgs_device = adev->dm.cgs_device;
303
304 adev->dm.dal = NULL;
305
306 init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
307
308 /* Display Core create. */
309 adev->dm.dc = dc_create(&init_data);
310
311 if (!adev->dm.dc)
312 DRM_INFO("Display Core failed to initialize!\n");
313
314 INIT_WORK(&adev->dm.mst_hotplug_work, hotplug_notify_work_func);
315
316 adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
317 if (!adev->dm.freesync_module) {
318 DRM_ERROR(
319 "amdgpu: failed to initialize freesync_module.\n");
320 } else
321 DRM_INFO("amdgpu: freesync_module init done %p.\n",
322 adev->dm.freesync_module);
323
324 if (amdgpu_dm_initialize_drm_device(adev)) {
325 DRM_ERROR(
326 "amdgpu: failed to initialize sw for display support.\n");
327 goto error;
328 }
329
330 /* Update the actual used number of crtc */
331 adev->mode_info.num_crtc = adev->dm.display_indexes_num;
332
333 /* TODO: Add_display_info? */
334
335 /* TODO use dynamic cursor width */
ce75805e
AG
336 adev->ddev->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
337 adev->ddev->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
4562236b
HW
338
339 if (drm_vblank_init(adev->ddev, adev->dm.display_indexes_num)) {
340 DRM_ERROR(
341 "amdgpu: failed to initialize sw for display support.\n");
342 goto error;
343 }
344
345 DRM_INFO("KMS initialized.\n");
346
347 return 0;
348error:
349 amdgpu_dm_fini(adev);
350
351 return -1;
352}
353
354void amdgpu_dm_fini(struct amdgpu_device *adev)
355{
356 amdgpu_dm_destroy_drm_device(&adev->dm);
357 /*
358 * TODO: pageflip, vlank interrupt
359 *
360 * amdgpu_dm_irq_fini(adev);
361 */
362
363 if (adev->dm.cgs_device) {
364 amdgpu_cgs_destroy_device(adev->dm.cgs_device);
365 adev->dm.cgs_device = NULL;
366 }
367 if (adev->dm.freesync_module) {
368 mod_freesync_destroy(adev->dm.freesync_module);
369 adev->dm.freesync_module = NULL;
370 }
371 /* DC Destroy TODO: Replace destroy DAL */
372 {
373 dc_destroy(&adev->dm.dc);
374 }
375 return;
376}
377
378/* moved from amdgpu_dm_kms.c */
379void amdgpu_dm_destroy()
380{
381}
382
383static int dm_sw_init(void *handle)
384{
385 return 0;
386}
387
388static int dm_sw_fini(void *handle)
389{
390 return 0;
391}
392
7abcf6b5 393static int detect_mst_link_for_all_connectors(struct drm_device *dev)
4562236b
HW
394{
395 struct amdgpu_connector *aconnector;
396 struct drm_connector *connector;
7abcf6b5 397 int ret = 0;
4562236b
HW
398
399 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
400
401 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
402 aconnector = to_amdgpu_connector(connector);
7abcf6b5
AG
403 if (aconnector->dc_link->type == dc_connection_mst_branch) {
404 DRM_INFO("DM_MST: starting TM on aconnector: %p [id: %d]\n",
405 aconnector, aconnector->base.base.id);
406
407 ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
408 if (ret < 0) {
409 DRM_ERROR("DM_MST: Failed to start MST\n");
410 ((struct dc_link *)aconnector->dc_link)->type = dc_connection_single;
411 return ret;
4562236b 412 }
7abcf6b5 413 }
4562236b
HW
414 }
415
416 drm_modeset_unlock(&dev->mode_config.connection_mutex);
7abcf6b5
AG
417 return ret;
418}
419
420static int dm_late_init(void *handle)
421{
422 struct drm_device *dev = ((struct amdgpu_device *)handle)->ddev;
423 int r = detect_mst_link_for_all_connectors(dev);
424
425 return r;
4562236b
HW
426}
427
428static void s3_handle_mst(struct drm_device *dev, bool suspend)
429{
430 struct amdgpu_connector *aconnector;
431 struct drm_connector *connector;
432
433 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
434
435 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
436 aconnector = to_amdgpu_connector(connector);
437 if (aconnector->dc_link->type == dc_connection_mst_branch &&
438 !aconnector->mst_port) {
439
440 if (suspend)
441 drm_dp_mst_topology_mgr_suspend(&aconnector->mst_mgr);
442 else
443 drm_dp_mst_topology_mgr_resume(&aconnector->mst_mgr);
444 }
445 }
446
447 drm_modeset_unlock(&dev->mode_config.connection_mutex);
448}
449
450static int dm_hw_init(void *handle)
451{
452 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
453 /* Create DAL display manager */
454 amdgpu_dm_init(adev);
4562236b
HW
455 amdgpu_dm_hpd_init(adev);
456
4562236b
HW
457 return 0;
458}
459
460static int dm_hw_fini(void *handle)
461{
462 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
463
464 amdgpu_dm_hpd_fini(adev);
465
466 amdgpu_dm_irq_fini(adev);
467
468 return 0;
469}
470
471static int dm_suspend(void *handle)
472{
473 struct amdgpu_device *adev = handle;
474 struct amdgpu_display_manager *dm = &adev->dm;
475 int ret = 0;
4562236b
HW
476
477 s3_handle_mst(adev->ddev, true);
478
4562236b
HW
479 amdgpu_dm_irq_suspend(adev);
480
a3621485
AG
481 adev->dm.cached_state = drm_atomic_helper_suspend(adev->ddev);
482
4562236b
HW
483 dc_set_power_state(
484 dm->dc,
a3621485
AG
485 DC_ACPI_CM_POWER_STATE_D3
486 );
4562236b
HW
487
488 return ret;
489}
490
491struct amdgpu_connector *amdgpu_dm_find_first_crct_matching_connector(
492 struct drm_atomic_state *state,
493 struct drm_crtc *crtc,
494 bool from_state_var)
495{
496 uint32_t i;
497 struct drm_connector_state *conn_state;
498 struct drm_connector *connector;
499 struct drm_crtc *crtc_from_state;
500
501 for_each_connector_in_state(
502 state,
503 connector,
504 conn_state,
505 i) {
506 crtc_from_state =
507 from_state_var ?
508 conn_state->crtc :
509 connector->state->crtc;
510
511 if (crtc_from_state == crtc)
512 return to_amdgpu_connector(connector);
513 }
514
515 return NULL;
516}
517
4562236b
HW
518static int dm_resume(void *handle)
519{
520 struct amdgpu_device *adev = handle;
521 struct amdgpu_display_manager *dm = &adev->dm;
522
523 /* power on hardware */
524 dc_set_power_state(
525 dm->dc,
a3621485
AG
526 DC_ACPI_CM_POWER_STATE_D0
527 );
4562236b
HW
528
529 return 0;
530}
531
532int amdgpu_dm_display_resume(struct amdgpu_device *adev )
533{
534 struct drm_device *ddev = adev->ddev;
535 struct amdgpu_display_manager *dm = &adev->dm;
536 struct amdgpu_connector *aconnector;
537 struct drm_connector *connector;
4562236b 538 struct drm_crtc *crtc;
a3621485
AG
539 struct drm_crtc_state *crtc_state;
540 int ret = 0;
541 int i;
4562236b
HW
542
543 /* program HPD filter */
544 dc_resume(dm->dc);
545
546 /* On resume we need to rewrite the MSTM control bits to enamble MST*/
547 s3_handle_mst(ddev, false);
548
549 /*
550 * early enable HPD Rx IRQ, should be done before set mode as short
551 * pulse interrupts are used for MST
552 */
553 amdgpu_dm_irq_resume_early(adev);
554
4562236b
HW
555 /* Do detection*/
556 list_for_each_entry(connector,
557 &ddev->mode_config.connector_list, head) {
558 aconnector = to_amdgpu_connector(connector);
559
560 /*
561 * this is the case when traversing through already created
562 * MST connectors, should be skipped
563 */
564 if (aconnector->mst_port)
565 continue;
566
567 dc_link_detect(aconnector->dc_link, false);
568 aconnector->dc_sink = NULL;
569 amdgpu_dm_update_connector_after_detect(aconnector);
570 }
571
a3621485
AG
572 /* Force mode set in atomic comit */
573 for_each_crtc_in_state(adev->dm.cached_state, crtc, crtc_state, i)
574 crtc_state->active_changed = true;
575
576 ret = drm_atomic_helper_resume(ddev, adev->dm.cached_state);
4562236b
HW
577
578 amdgpu_dm_irq_resume(adev);
579
580 return ret;
581}
582
583static const struct amd_ip_funcs amdgpu_dm_funcs = {
584 .name = "dm",
585 .early_init = dm_early_init,
7abcf6b5 586 .late_init = dm_late_init,
4562236b
HW
587 .sw_init = dm_sw_init,
588 .sw_fini = dm_sw_fini,
589 .hw_init = dm_hw_init,
590 .hw_fini = dm_hw_fini,
591 .suspend = dm_suspend,
592 .resume = dm_resume,
593 .is_idle = dm_is_idle,
594 .wait_for_idle = dm_wait_for_idle,
595 .check_soft_reset = dm_check_soft_reset,
596 .soft_reset = dm_soft_reset,
597 .set_clockgating_state = dm_set_clockgating_state,
598 .set_powergating_state = dm_set_powergating_state,
599};
600
601const struct amdgpu_ip_block_version dm_ip_block =
602{
603 .type = AMD_IP_BLOCK_TYPE_DCE,
604 .major = 1,
605 .minor = 0,
606 .rev = 0,
607 .funcs = &amdgpu_dm_funcs,
608};
609
610/* TODO: it is temporary non-const, should fixed later */
611static struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
a49dcb88
HW
612 .fb_create = amdgpu_user_framebuffer_create,
613 .output_poll_changed = amdgpu_output_poll_changed,
4562236b 614 .atomic_check = amdgpu_dm_atomic_check,
54f5499a
AG
615 .atomic_commit = drm_atomic_helper_commit
616};
617
618static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
619 .atomic_commit_tail = amdgpu_dm_atomic_commit_tail
4562236b
HW
620};
621
622void amdgpu_dm_update_connector_after_detect(
623 struct amdgpu_connector *aconnector)
624{
625 struct drm_connector *connector = &aconnector->base;
626 struct drm_device *dev = connector->dev;
627 const struct dc_sink *sink;
628
629 /* MST handled by drm_mst framework */
630 if (aconnector->mst_mgr.mst_state == true)
631 return;
632
633
634 sink = aconnector->dc_link->local_sink;
635
636 /* Edid mgmt connector gets first update only in mode_valid hook and then
637 * the connector sink is set to either fake or physical sink depends on link status.
638 * don't do it here if u are during boot
639 */
640 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
641 && aconnector->dc_em_sink) {
642
ab2541b6 643 /* For S3 resume with headless use eml_sink to fake stream
4562236b
HW
644 * because on resume connecotr->sink is set ti NULL
645 */
646 mutex_lock(&dev->mode_config.mutex);
647
648 if (sink) {
922aa1e1 649 if (aconnector->dc_sink) {
4562236b
HW
650 amdgpu_dm_remove_sink_from_freesync_module(
651 connector);
922aa1e1
AG
652 /* retain and release bellow are used for
653 * bump up refcount for sink because the link don't point
654 * to it anymore after disconnect so on next crtc to connector
655 * reshuffle by UMD we will get into unwanted dc_sink release
656 */
657 if (aconnector->dc_sink != aconnector->dc_em_sink)
658 dc_sink_release(aconnector->dc_sink);
659 }
4562236b
HW
660 aconnector->dc_sink = sink;
661 amdgpu_dm_add_sink_to_freesync_module(
662 connector, aconnector->edid);
663 } else {
664 amdgpu_dm_remove_sink_from_freesync_module(connector);
665 if (!aconnector->dc_sink)
666 aconnector->dc_sink = aconnector->dc_em_sink;
922aa1e1
AG
667 else if (aconnector->dc_sink != aconnector->dc_em_sink)
668 dc_sink_retain(aconnector->dc_sink);
4562236b
HW
669 }
670
671 mutex_unlock(&dev->mode_config.mutex);
672 return;
673 }
674
675 /*
676 * TODO: temporary guard to look for proper fix
677 * if this sink is MST sink, we should not do anything
678 */
679 if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
680 return;
681
682 if (aconnector->dc_sink == sink) {
683 /* We got a DP short pulse (Link Loss, DP CTS, etc...).
684 * Do nothing!! */
685 DRM_INFO("DCHPD: connector_id=%d: dc_sink didn't change.\n",
686 aconnector->connector_id);
687 return;
688 }
689
690 DRM_INFO("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
691 aconnector->connector_id, aconnector->dc_sink, sink);
692
693 mutex_lock(&dev->mode_config.mutex);
694
695 /* 1. Update status of the drm connector
696 * 2. Send an event and let userspace tell us what to do */
697 if (sink) {
698 /* TODO: check if we still need the S3 mode update workaround.
699 * If yes, put it here. */
700 if (aconnector->dc_sink)
701 amdgpu_dm_remove_sink_from_freesync_module(
702 connector);
703
704 aconnector->dc_sink = sink;
705 if (sink->dc_edid.length == 0)
706 aconnector->edid = NULL;
707 else {
708 aconnector->edid =
709 (struct edid *) sink->dc_edid.raw_edid;
710
711
712 drm_mode_connector_update_edid_property(connector,
713 aconnector->edid);
714 }
715 amdgpu_dm_add_sink_to_freesync_module(connector, aconnector->edid);
716
717 } else {
718 amdgpu_dm_remove_sink_from_freesync_module(connector);
719 drm_mode_connector_update_edid_property(connector, NULL);
720 aconnector->num_modes = 0;
721 aconnector->dc_sink = NULL;
722 }
723
724 mutex_unlock(&dev->mode_config.mutex);
725}
726
727static void handle_hpd_irq(void *param)
728{
729 struct amdgpu_connector *aconnector = (struct amdgpu_connector *)param;
730 struct drm_connector *connector = &aconnector->base;
731 struct drm_device *dev = connector->dev;
732
733 /* In case of failure or MST no need to update connector status or notify the OS
734 * since (for MST case) MST does this in it's own context.
735 */
736 mutex_lock(&aconnector->hpd_lock);
737 if (dc_link_detect(aconnector->dc_link, false)) {
738 amdgpu_dm_update_connector_after_detect(aconnector);
739
740
741 drm_modeset_lock_all(dev);
742 dm_restore_drm_connector_state(dev, connector);
743 drm_modeset_unlock_all(dev);
744
745 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
746 drm_kms_helper_hotplug_event(dev);
747 }
748 mutex_unlock(&aconnector->hpd_lock);
749
750}
751
752static void dm_handle_hpd_rx_irq(struct amdgpu_connector *aconnector)
753{
754 uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
755 uint8_t dret;
756 bool new_irq_handled = false;
757 int dpcd_addr;
758 int dpcd_bytes_to_read;
759
760 const int max_process_count = 30;
761 int process_count = 0;
762
763 const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
764
765 if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
766 dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
767 /* DPCD 0x200 - 0x201 for downstream IRQ */
768 dpcd_addr = DP_SINK_COUNT;
769 } else {
770 dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
771 /* DPCD 0x2002 - 0x2005 for downstream IRQ */
772 dpcd_addr = DP_SINK_COUNT_ESI;
773 }
774
775 dret = drm_dp_dpcd_read(
776 &aconnector->dm_dp_aux.aux,
777 dpcd_addr,
778 esi,
779 dpcd_bytes_to_read);
780
781 while (dret == dpcd_bytes_to_read &&
782 process_count < max_process_count) {
783 uint8_t retry;
784 dret = 0;
785
786 process_count++;
787
788 DRM_DEBUG_KMS("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
4562236b
HW
789 /* handle HPD short pulse irq */
790 if (aconnector->mst_mgr.mst_state)
791 drm_dp_mst_hpd_irq(
792 &aconnector->mst_mgr,
793 esi,
794 &new_irq_handled);
4562236b
HW
795
796 if (new_irq_handled) {
797 /* ACK at DPCD to notify down stream */
798 const int ack_dpcd_bytes_to_write =
799 dpcd_bytes_to_read - 1;
800
801 for (retry = 0; retry < 3; retry++) {
802 uint8_t wret;
803
804 wret = drm_dp_dpcd_write(
805 &aconnector->dm_dp_aux.aux,
806 dpcd_addr + 1,
807 &esi[1],
808 ack_dpcd_bytes_to_write);
809 if (wret == ack_dpcd_bytes_to_write)
810 break;
811 }
812
813 /* check if there is new irq to be handle */
814 dret = drm_dp_dpcd_read(
815 &aconnector->dm_dp_aux.aux,
816 dpcd_addr,
817 esi,
818 dpcd_bytes_to_read);
819
820 new_irq_handled = false;
821 } else
822 break;
823 }
824
825 if (process_count == max_process_count)
826 DRM_DEBUG_KMS("Loop exceeded max iterations\n");
827}
828
829static void handle_hpd_rx_irq(void *param)
830{
831 struct amdgpu_connector *aconnector = (struct amdgpu_connector *)param;
832 struct drm_connector *connector = &aconnector->base;
833 struct drm_device *dev = connector->dev;
834 const struct dc_link *dc_link = aconnector->dc_link;
835 bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
836
837 /* TODO:Temporary add mutex to protect hpd interrupt not have a gpio
838 * conflict, after implement i2c helper, this mutex should be
839 * retired.
840 */
841 if (aconnector->dc_link->type != dc_connection_mst_branch)
842 mutex_lock(&aconnector->hpd_lock);
843
844 if (dc_link_handle_hpd_rx_irq(aconnector->dc_link) &&
845 !is_mst_root_connector) {
846 /* Downstream Port status changed. */
847 if (dc_link_detect(aconnector->dc_link, false)) {
848 amdgpu_dm_update_connector_after_detect(aconnector);
849
850
851 drm_modeset_lock_all(dev);
852 dm_restore_drm_connector_state(dev, connector);
853 drm_modeset_unlock_all(dev);
854
855 drm_kms_helper_hotplug_event(dev);
856 }
857 }
858 if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
859 (dc_link->type == dc_connection_mst_branch))
860 dm_handle_hpd_rx_irq(aconnector);
861
862 if (aconnector->dc_link->type != dc_connection_mst_branch)
863 mutex_unlock(&aconnector->hpd_lock);
864}
865
866static void register_hpd_handlers(struct amdgpu_device *adev)
867{
868 struct drm_device *dev = adev->ddev;
869 struct drm_connector *connector;
870 struct amdgpu_connector *aconnector;
871 const struct dc_link *dc_link;
872 struct dc_interrupt_params int_params = {0};
873
874 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
875 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
876
877 list_for_each_entry(connector,
878 &dev->mode_config.connector_list, head) {
879
880 aconnector = to_amdgpu_connector(connector);
881 dc_link = aconnector->dc_link;
882
883 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
884 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
885 int_params.irq_source = dc_link->irq_source_hpd;
886
887 amdgpu_dm_irq_register_interrupt(adev, &int_params,
888 handle_hpd_irq,
889 (void *) aconnector);
890 }
891
892 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
893
894 /* Also register for DP short pulse (hpd_rx). */
895 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
896 int_params.irq_source = dc_link->irq_source_hpd_rx;
897
898 amdgpu_dm_irq_register_interrupt(adev, &int_params,
899 handle_hpd_rx_irq,
900 (void *) aconnector);
901 }
902 }
903}
904
905/* Register IRQ sources and initialize IRQ callbacks */
906static int dce110_register_irq_handlers(struct amdgpu_device *adev)
907{
908 struct dc *dc = adev->dm.dc;
909 struct common_irq_params *c_irq_params;
910 struct dc_interrupt_params int_params = {0};
911 int r;
912 int i;
2c8ad2d5
AD
913 unsigned client_id = AMDGPU_IH_CLIENTID_LEGACY;
914
915 if (adev->asic_type == CHIP_VEGA10)
916 client_id = AMDGPU_IH_CLIENTID_DCE;
4562236b
HW
917
918 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
919 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
920
921 /* Actions of amdgpu_irq_add_id():
922 * 1. Register a set() function with base driver.
923 * Base driver will call set() function to enable/disable an
924 * interrupt in DC hardware.
925 * 2. Register amdgpu_dm_irq_handler().
926 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
927 * coming from DC hardware.
928 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
929 * for acknowledging and handling. */
930
b57de80a 931 /* Use VBLANK interrupt */
e9029155 932 for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
2c8ad2d5 933 r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
4562236b
HW
934 if (r) {
935 DRM_ERROR("Failed to add crtc irq id!\n");
936 return r;
937 }
938
939 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
940 int_params.irq_source =
3d761e79 941 dc_interrupt_to_irq_source(dc, i, 0);
4562236b 942
b57de80a 943 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
4562236b
HW
944
945 c_irq_params->adev = adev;
946 c_irq_params->irq_src = int_params.irq_source;
947
948 amdgpu_dm_irq_register_interrupt(adev, &int_params,
949 dm_crtc_high_irq, c_irq_params);
950 }
951
3d761e79 952 /* Use GRPH_PFLIP interrupt */
4562236b
HW
953 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
954 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2c8ad2d5 955 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
4562236b
HW
956 if (r) {
957 DRM_ERROR("Failed to add page flip irq id!\n");
958 return r;
959 }
960
961 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
962 int_params.irq_source =
963 dc_interrupt_to_irq_source(dc, i, 0);
964
965 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
966
967 c_irq_params->adev = adev;
968 c_irq_params->irq_src = int_params.irq_source;
969
970 amdgpu_dm_irq_register_interrupt(adev, &int_params,
971 dm_pflip_high_irq, c_irq_params);
972
973 }
974
975 /* HPD */
2c8ad2d5
AD
976 r = amdgpu_irq_add_id(adev, client_id,
977 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
4562236b
HW
978 if (r) {
979 DRM_ERROR("Failed to add hpd irq id!\n");
980 return r;
981 }
982
983 register_hpd_handlers(adev);
984
985 return 0;
986}
987
988static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
989{
990 int r;
991
992 adev->mode_info.mode_config_initialized = true;
993
4562236b 994 adev->ddev->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
54f5499a 995 adev->ddev->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
4562236b
HW
996
997 adev->ddev->mode_config.max_width = 16384;
998 adev->ddev->mode_config.max_height = 16384;
999
1000 adev->ddev->mode_config.preferred_depth = 24;
1001 adev->ddev->mode_config.prefer_shadow = 1;
1002 /* indicate support of immediate flip */
1003 adev->ddev->mode_config.async_page_flip = true;
1004
1005 adev->ddev->mode_config.fb_base = adev->mc.aper_base;
1006
1007 r = amdgpu_modeset_create_props(adev);
1008 if (r)
1009 return r;
1010
1011 return 0;
1012}
1013
1014#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
1015 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
1016
1017static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
1018{
1019 struct amdgpu_display_manager *dm = bl_get_data(bd);
1020
1021 if (dc_link_set_backlight_level(dm->backlight_link,
1022 bd->props.brightness, 0, 0))
1023 return 0;
1024 else
1025 return 1;
1026}
1027
1028static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
1029{
1030 return bd->props.brightness;
1031}
1032
1033static const struct backlight_ops amdgpu_dm_backlight_ops = {
1034 .get_brightness = amdgpu_dm_backlight_get_brightness,
1035 .update_status = amdgpu_dm_backlight_update_status,
1036};
1037
1038void amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
1039{
1040 char bl_name[16];
1041 struct backlight_properties props = { 0 };
1042
1043 props.max_brightness = AMDGPU_MAX_BL_LEVEL;
1044 props.type = BACKLIGHT_RAW;
1045
1046 snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
1047 dm->adev->ddev->primary->index);
1048
1049 dm->backlight_dev = backlight_device_register(bl_name,
1050 dm->adev->ddev->dev,
1051 dm,
1052 &amdgpu_dm_backlight_ops,
1053 &props);
1054
1055 if (NULL == dm->backlight_dev)
1056 DRM_ERROR("DM: Backlight registration failed!\n");
1057 else
1058 DRM_INFO("DM: Registered Backlight device: %s\n", bl_name);
1059}
1060
1061#endif
1062
1063/* In this architecture, the association
1064 * connector -> encoder -> crtc
1065 * id not really requried. The crtc and connector will hold the
1066 * display_index as an abstraction to use with DAL component
1067 *
1068 * Returns 0 on success
1069 */
1070int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
1071{
1072 struct amdgpu_display_manager *dm = &adev->dm;
1073 uint32_t i;
f2a0f5e6
HW
1074 struct amdgpu_connector *aconnector = NULL;
1075 struct amdgpu_encoder *aencoder = NULL;
d4e13b0d 1076 struct amdgpu_mode_info *mode_info = &adev->mode_info;
4562236b
HW
1077 uint32_t link_cnt;
1078
1079 link_cnt = dm->dc->caps.max_links;
4562236b
HW
1080 if (amdgpu_dm_mode_config_init(dm->adev)) {
1081 DRM_ERROR("DM: Failed to initialize mode config\n");
f2a0f5e6 1082 return -1;
4562236b
HW
1083 }
1084
d4e13b0d
AD
1085 for (i = 0; i < dm->dc->caps.max_surfaces; i++) {
1086 mode_info->planes[i] = kzalloc(sizeof(struct amdgpu_plane),
1087 GFP_KERNEL);
1088 if (!mode_info->planes[i]) {
1089 DRM_ERROR("KMS: Failed to allocate surface\n");
1090 goto fail_free_planes;
1091 }
1092 mode_info->planes[i]->plane_type = mode_info->plane_type[i];
1093 if (amdgpu_dm_plane_init(dm, mode_info->planes[i], 1)) {
1094 DRM_ERROR("KMS: Failed to initialize plane\n");
1095 goto fail_free_planes;
1096 }
1097 }
4562236b 1098
d4e13b0d
AD
1099 for (i = 0; i < dm->dc->caps.max_streams; i++)
1100 if (amdgpu_dm_crtc_init(dm, &mode_info->planes[i]->base, i)) {
4562236b 1101 DRM_ERROR("KMS: Failed to initialize crtc\n");
d4e13b0d 1102 goto fail_free_planes;
4562236b 1103 }
4562236b 1104
ab2541b6 1105 dm->display_indexes_num = dm->dc->caps.max_streams;
4562236b
HW
1106
1107 /* loops over all connectors on the board */
1108 for (i = 0; i < link_cnt; i++) {
1109
1110 if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
1111 DRM_ERROR(
1112 "KMS: Cannot support more than %d display indexes\n",
1113 AMDGPU_DM_MAX_DISPLAY_INDEX);
1114 continue;
1115 }
1116
1117 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
1118 if (!aconnector)
f2a0f5e6 1119 goto fail_free_planes;
4562236b
HW
1120
1121 aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
1122 if (!aencoder) {
1123 goto fail_free_connector;
1124 }
1125
1126 if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
1127 DRM_ERROR("KMS: Failed to initialize encoder\n");
1128 goto fail_free_encoder;
1129 }
1130
1131 if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
1132 DRM_ERROR("KMS: Failed to initialize connector\n");
f2a0f5e6 1133 goto fail_free_encoder;
4562236b
HW
1134 }
1135
1136 if (dc_link_detect(dc_get_link_at_index(dm->dc, i), true))
1137 amdgpu_dm_update_connector_after_detect(aconnector);
1138 }
1139
1140 /* Software is initialized. Now we can register interrupt handlers. */
1141 switch (adev->asic_type) {
1142 case CHIP_BONAIRE:
1143 case CHIP_HAWAII:
1144 case CHIP_TONGA:
1145 case CHIP_FIJI:
1146 case CHIP_CARRIZO:
1147 case CHIP_STONEY:
1148 case CHIP_POLARIS11:
1149 case CHIP_POLARIS10:
b264d345 1150 case CHIP_POLARIS12:
2c8ad2d5 1151 case CHIP_VEGA10:
4562236b
HW
1152 if (dce110_register_irq_handlers(dm->adev)) {
1153 DRM_ERROR("DM: Failed to initialize IRQ\n");
d4e13b0d 1154 goto fail_free_encoder;
4562236b
HW
1155 }
1156 break;
1157 default:
1158 DRM_ERROR("Usupported ASIC type: 0x%X\n", adev->asic_type);
d4e13b0d 1159 goto fail_free_encoder;
4562236b
HW
1160 }
1161
1162 drm_mode_config_reset(dm->ddev);
1163
1164 return 0;
1165fail_free_encoder:
1166 kfree(aencoder);
1167fail_free_connector:
1168 kfree(aconnector);
d4e13b0d
AD
1169fail_free_planes:
1170 for (i = 0; i < dm->dc->caps.max_surfaces; i++)
1171 kfree(mode_info->planes[i]);
4562236b
HW
1172 return -1;
1173}
1174
1175void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
1176{
1177 drm_mode_config_cleanup(dm->ddev);
1178 return;
1179}
1180
1181/******************************************************************************
1182 * amdgpu_display_funcs functions
1183 *****************************************************************************/
1184
1185/**
1186 * dm_bandwidth_update - program display watermarks
1187 *
1188 * @adev: amdgpu_device pointer
1189 *
1190 * Calculate and program the display watermarks and line buffer allocation.
1191 */
1192static void dm_bandwidth_update(struct amdgpu_device *adev)
1193{
49c07a99 1194 /* TODO: implement later */
4562236b
HW
1195}
1196
1197static void dm_set_backlight_level(struct amdgpu_encoder *amdgpu_encoder,
1198 u8 level)
1199{
1200 /* TODO: translate amdgpu_encoder to display_index and call DAL */
4562236b
HW
1201}
1202
1203static u8 dm_get_backlight_level(struct amdgpu_encoder *amdgpu_encoder)
1204{
1205 /* TODO: translate amdgpu_encoder to display_index and call DAL */
4562236b
HW
1206 return 0;
1207}
1208
1209/******************************************************************************
1210 * Page Flip functions
1211 ******************************************************************************/
1212
1213/**
1214 * dm_page_flip - called by amdgpu_flip_work_func(), which is triggered
1215 * via DRM IOCTL, by user mode.
1216 *
1217 * @adev: amdgpu_device pointer
1218 * @crtc_id: crtc to cleanup pageflip on
1219 * @crtc_base: new address of the crtc (GPU MC address)
1220 *
1221 * Does the actual pageflip (surface address update).
1222 */
1223static void dm_page_flip(struct amdgpu_device *adev,
1224 int crtc_id, u64 crtc_base, bool async)
1225{
1226 struct amdgpu_crtc *acrtc;
ab2541b6 1227 const struct dc_stream *stream;
4562236b
HW
1228 struct dc_flip_addrs addr = { {0} };
1229
1230 /*
1231 * TODO risk of concurrency issues
1232 *
1233 * This should guarded by the dal_mutex but we can't do this since the
1234 * caller uses a spin_lock on event_lock.
1235 *
1236 * If we wait on the dal_mutex a second page flip interrupt might come,
1237 * spin on the event_lock, disabling interrupts while it does so. At
1238 * this point the core can no longer be pre-empted and return to the
1239 * thread that waited on the dal_mutex and we're deadlocked.
1240 *
1241 * With multiple cores the same essentially happens but might just take
1242 * a little longer to lock up all cores.
1243 *
1244 * The reason we should lock on dal_mutex is so that we can be sure
ab2541b6 1245 * nobody messes with acrtc->stream after we read and check its value.
4562236b
HW
1246 *
1247 * We might be able to fix our concurrency issues with a work queue
1248 * where we schedule all work items (mode_set, page_flip, etc.) and
1249 * execute them one by one. Care needs to be taken to still deal with
1250 * any potential concurrency issues arising from interrupt calls.
1251 */
1252
1253 acrtc = adev->mode_info.crtcs[crtc_id];
ab2541b6 1254 stream = acrtc->stream;
4562236b 1255
54f5499a
AG
1256
1257 if (acrtc->pflip_status != AMDGPU_FLIP_NONE) {
1258 DRM_ERROR("flip queue: acrtc %d, already busy\n", acrtc->crtc_id);
1259 /* In commit tail framework this cannot happen */
1260 BUG_ON(0);
1261 }
1262
1263
4562236b
HW
1264 /*
1265 * Received a page flip call after the display has been reset.
1266 * Just return in this case. Everything should be clean-up on reset.
1267 */
1268
ab2541b6 1269 if (!stream) {
4562236b
HW
1270 WARN_ON(1);
1271 return;
1272 }
1273
1274 addr.address.grph.addr.low_part = lower_32_bits(crtc_base);
1275 addr.address.grph.addr.high_part = upper_32_bits(crtc_base);
1276 addr.flip_immediate = async;
1277
54f5499a
AG
1278
1279 if (acrtc->base.state->event &&
1280 acrtc->base.state->event->event.base.type ==
1281 DRM_EVENT_FLIP_COMPLETE) {
1282 acrtc->event = acrtc->base.state->event;
1283
1284 /* Set the flip status */
1285 acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
1286
1287 /* Mark this event as consumed */
1288 acrtc->base.state->event = NULL;
1289 }
1290
1291 dc_flip_surface_addrs(adev->dm.dc,
1292 dc_stream_get_status(stream)->surfaces,
1293 &addr, 1);
1294
4562236b
HW
1295 DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x \n",
1296 __func__,
1297 addr.address.grph.addr.high_part,
1298 addr.address.grph.addr.low_part);
1299
4562236b
HW
1300}
1301
1302static int amdgpu_notify_freesync(struct drm_device *dev, void *data,
1303 struct drm_file *filp)
1304{
1305 struct mod_freesync_params freesync_params;
ab2541b6 1306 uint8_t num_streams;
4562236b 1307 uint8_t i;
4562236b
HW
1308
1309 struct amdgpu_device *adev = dev->dev_private;
1310 int r = 0;
1311
1312 /* Get freesync enable flag from DRM */
1313
ab2541b6 1314 num_streams = dc_get_current_stream_count(adev->dm.dc);
4562236b 1315
ab2541b6
AC
1316 for (i = 0; i < num_streams; i++) {
1317 const struct dc_stream *stream;
1318 stream = dc_get_stream_at_index(adev->dm.dc, i);
4562236b
HW
1319
1320 mod_freesync_update_state(adev->dm.freesync_module,
ab2541b6 1321 &stream, 1, &freesync_params);
4562236b
HW
1322 }
1323
1324 return r;
1325}
1326
39cc5be2 1327static const struct amdgpu_display_funcs dm_display_funcs = {
4562236b
HW
1328 .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
1329 .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
1330 .vblank_wait = NULL,
1331 .backlight_set_level =
1332 dm_set_backlight_level,/* called unconditionally */
1333 .backlight_get_level =
1334 dm_get_backlight_level,/* called unconditionally */
1335 .hpd_sense = NULL,/* called unconditionally */
1336 .hpd_set_polarity = NULL, /* called unconditionally */
1337 .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
1338 .page_flip = dm_page_flip, /* called unconditionally */
1339 .page_flip_get_scanoutpos =
1340 dm_crtc_get_scanoutpos,/* called unconditionally */
1341 .add_encoder = NULL, /* VBIOS parsing. DAL does it. */
1342 .add_connector = NULL, /* VBIOS parsing. DAL does it. */
1343 .notify_freesync = amdgpu_notify_freesync,
1344
1345};
1346
2c8ad2d5 1347
4562236b
HW
1348#if defined(CONFIG_DEBUG_KERNEL_DC)
1349
1350static ssize_t s3_debug_store(
1351 struct device *device,
1352 struct device_attribute *attr,
1353 const char *buf,
1354 size_t count)
1355{
1356 int ret;
1357 int s3_state;
1358 struct pci_dev *pdev = to_pci_dev(device);
1359 struct drm_device *drm_dev = pci_get_drvdata(pdev);
1360 struct amdgpu_device *adev = drm_dev->dev_private;
1361
1362 ret = kstrtoint(buf, 0, &s3_state);
1363
1364 if (ret == 0) {
1365 if (s3_state) {
1366 dm_resume(adev);
1367 amdgpu_dm_display_resume(adev);
1368 drm_kms_helper_hotplug_event(adev->ddev);
1369 } else
1370 dm_suspend(adev);
1371 }
1372
1373 return ret == 0 ? count : 0;
1374}
1375
1376DEVICE_ATTR_WO(s3_debug);
1377
1378#endif
1379
1380static int dm_early_init(void *handle)
1381{
1382 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1383
1384 amdgpu_dm_set_irq_funcs(adev);
1385
1386 switch (adev->asic_type) {
1387 case CHIP_BONAIRE:
1388 case CHIP_HAWAII:
1389 adev->mode_info.num_crtc = 6;
1390 adev->mode_info.num_hpd = 6;
1391 adev->mode_info.num_dig = 6;
d4e13b0d 1392 adev->mode_info.plane_type = dm_surfaces_type_default;
4562236b
HW
1393 break;
1394 case CHIP_FIJI:
1395 case CHIP_TONGA:
1396 adev->mode_info.num_crtc = 6;
1397 adev->mode_info.num_hpd = 6;
1398 adev->mode_info.num_dig = 7;
d4e13b0d 1399 adev->mode_info.plane_type = dm_surfaces_type_default;
4562236b
HW
1400 break;
1401 case CHIP_CARRIZO:
1402 adev->mode_info.num_crtc = 3;
1403 adev->mode_info.num_hpd = 6;
1404 adev->mode_info.num_dig = 9;
d4e13b0d 1405 adev->mode_info.plane_type = dm_surfaces_type_carizzo;
4562236b
HW
1406 break;
1407 case CHIP_STONEY:
1408 adev->mode_info.num_crtc = 2;
1409 adev->mode_info.num_hpd = 6;
1410 adev->mode_info.num_dig = 9;
d4e13b0d 1411 adev->mode_info.plane_type = dm_surfaces_type_stoney;
4562236b
HW
1412 break;
1413 case CHIP_POLARIS11:
b264d345 1414 case CHIP_POLARIS12:
4562236b
HW
1415 adev->mode_info.num_crtc = 5;
1416 adev->mode_info.num_hpd = 5;
1417 adev->mode_info.num_dig = 5;
d4e13b0d 1418 adev->mode_info.plane_type = dm_surfaces_type_default;
4562236b
HW
1419 break;
1420 case CHIP_POLARIS10:
1421 adev->mode_info.num_crtc = 6;
1422 adev->mode_info.num_hpd = 6;
1423 adev->mode_info.num_dig = 6;
d4e13b0d 1424 adev->mode_info.plane_type = dm_surfaces_type_default;
4562236b 1425 break;
2c8ad2d5
AD
1426 case CHIP_VEGA10:
1427 adev->mode_info.num_crtc = 6;
1428 adev->mode_info.num_hpd = 6;
1429 adev->mode_info.num_dig = 6;
1430 break;
4562236b
HW
1431 default:
1432 DRM_ERROR("Usupported ASIC type: 0x%X\n", adev->asic_type);
1433 return -EINVAL;
1434 }
1435
39cc5be2
AD
1436 if (adev->mode_info.funcs == NULL)
1437 adev->mode_info.funcs = &dm_display_funcs;
1438
4562236b
HW
1439 /* Note: Do NOT change adev->audio_endpt_rreg and
1440 * adev->audio_endpt_wreg because they are initialised in
1441 * amdgpu_device_init() */
1442#if defined(CONFIG_DEBUG_KERNEL_DC)
1443 device_create_file(
1444 adev->ddev->dev,
1445 &dev_attr_s3_debug);
1446#endif
1447
1448 return 0;
1449}
1450
1451bool amdgpu_dm_acquire_dal_lock(struct amdgpu_display_manager *dm)
1452{
1453 /* TODO */
1454 return true;
1455}
1456
1457bool amdgpu_dm_release_dal_lock(struct amdgpu_display_manager *dm)
1458{
1459 /* TODO */
1460 return true;
1461}
1462
1463