]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
drm/amd/display: Roll core_sink into dc_sink
[mirror_ubuntu-bionic-kernel.git] / drivers / gpu / drm / amd / display / amdgpu_dm / amdgpu_dm.c
CommitLineData
4562236b
HW
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#include "dm_services_types.h"
27#include "dc.h"
28
29#include "vid.h"
30#include "amdgpu.h"
a49dcb88 31#include "amdgpu_display.h"
4562236b
HW
32#include "atom.h"
33#include "amdgpu_dm.h"
34#include "amdgpu_dm_types.h"
35
36#include "amd_shared.h"
37#include "amdgpu_dm_irq.h"
38#include "dm_helpers.h"
39
40#include "ivsrcid/ivsrcid_vislands30.h"
41
42#include <linux/module.h>
43#include <linux/moduleparam.h>
44#include <linux/version.h>
45
46#include <drm/drm_atomic.h>
47#include <drm/drm_atomic_helper.h>
48#include <drm/drm_dp_mst_helper.h>
49
50#include "modules/inc/mod_freesync.h"
51
ff5ef992
AD
52#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
53#include "ivsrcid/irqsrcs_dcn_1_0.h"
54
55#include "raven1/DCN/dcn_1_0_offset.h"
56#include "raven1/DCN/dcn_1_0_sh_mask.h"
57#include "vega10/soc15ip.h"
58
59#include "soc15_common.h"
60#endif
61
d4e13b0d
AD
62static enum drm_plane_type dm_surfaces_type_default[AMDGPU_MAX_PLANES] = {
63 DRM_PLANE_TYPE_PRIMARY,
64 DRM_PLANE_TYPE_PRIMARY,
65 DRM_PLANE_TYPE_PRIMARY,
66 DRM_PLANE_TYPE_PRIMARY,
67 DRM_PLANE_TYPE_PRIMARY,
68 DRM_PLANE_TYPE_PRIMARY,
69};
70
71static enum drm_plane_type dm_surfaces_type_carizzo[AMDGPU_MAX_PLANES] = {
72 DRM_PLANE_TYPE_PRIMARY,
73 DRM_PLANE_TYPE_PRIMARY,
74 DRM_PLANE_TYPE_PRIMARY,
75 DRM_PLANE_TYPE_OVERLAY,/* YUV Capable Underlay */
76};
77
78static enum drm_plane_type dm_surfaces_type_stoney[AMDGPU_MAX_PLANES] = {
79 DRM_PLANE_TYPE_PRIMARY,
80 DRM_PLANE_TYPE_PRIMARY,
81 DRM_PLANE_TYPE_OVERLAY, /* YUV Capable Underlay */
82};
83
4562236b
HW
84/*
85 * dm_vblank_get_counter
86 *
87 * @brief
88 * Get counter for number of vertical blanks
89 *
90 * @param
91 * struct amdgpu_device *adev - [in] desired amdgpu device
92 * int disp_idx - [in] which CRTC to get the counter from
93 *
94 * @return
95 * Counter for vertical blanks
96 */
97static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
98{
99 if (crtc >= adev->mode_info.num_crtc)
100 return 0;
101 else {
102 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
da5c47f6
AG
103 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(
104 acrtc->base.state);
4562236b 105
da5c47f6
AG
106
107 if (acrtc_state->stream == NULL) {
ab2541b6 108 DRM_ERROR("dc_stream is NULL for crtc '%d'!\n", crtc);
4562236b
HW
109 return 0;
110 }
111
da5c47f6 112 return dc_stream_get_vblank_counter(acrtc_state->stream);
4562236b
HW
113 }
114}
115
116static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
117 u32 *vbl, u32 *position)
118{
81c50963
ST
119 uint32_t v_blank_start, v_blank_end, h_position, v_position;
120
4562236b
HW
121 if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
122 return -EINVAL;
123 else {
124 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
da5c47f6
AG
125 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(
126 acrtc->base.state);
4562236b 127
da5c47f6 128 if (acrtc_state->stream == NULL) {
ab2541b6 129 DRM_ERROR("dc_stream is NULL for crtc '%d'!\n", crtc);
4562236b
HW
130 return 0;
131 }
132
81c50963
ST
133 /*
134 * TODO rework base driver to use values directly.
135 * for now parse it back into reg-format
136 */
da5c47f6 137 dc_stream_get_scanoutpos(acrtc_state->stream,
81c50963
ST
138 &v_blank_start,
139 &v_blank_end,
140 &h_position,
141 &v_position);
142
e806208d
AG
143 *position = v_position | (h_position << 16);
144 *vbl = v_blank_start | (v_blank_end << 16);
4562236b
HW
145 }
146
147 return 0;
148}
149
150static bool dm_is_idle(void *handle)
151{
152 /* XXX todo */
153 return true;
154}
155
156static int dm_wait_for_idle(void *handle)
157{
158 /* XXX todo */
159 return 0;
160}
161
162static bool dm_check_soft_reset(void *handle)
163{
164 return false;
165}
166
167static int dm_soft_reset(void *handle)
168{
169 /* XXX todo */
170 return 0;
171}
172
173static struct amdgpu_crtc *get_crtc_by_otg_inst(
174 struct amdgpu_device *adev,
175 int otg_inst)
176{
177 struct drm_device *dev = adev->ddev;
178 struct drm_crtc *crtc;
179 struct amdgpu_crtc *amdgpu_crtc;
180
181 /*
182 * following if is check inherited from both functions where this one is
183 * used now. Need to be checked why it could happen.
184 */
185 if (otg_inst == -1) {
186 WARN_ON(1);
187 return adev->mode_info.crtcs[0];
188 }
189
190 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
191 amdgpu_crtc = to_amdgpu_crtc(crtc);
192
193 if (amdgpu_crtc->otg_inst == otg_inst)
194 return amdgpu_crtc;
195 }
196
197 return NULL;
198}
199
200static void dm_pflip_high_irq(void *interrupt_params)
201{
4562236b
HW
202 struct amdgpu_crtc *amdgpu_crtc;
203 struct common_irq_params *irq_params = interrupt_params;
204 struct amdgpu_device *adev = irq_params->adev;
205 unsigned long flags;
206
207 amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
208
209 /* IRQ could occur when in initial stage */
210 /*TODO work and BO cleanup */
211 if (amdgpu_crtc == NULL) {
212 DRM_DEBUG_DRIVER("CRTC is null, returning.\n");
213 return;
214 }
215
216 spin_lock_irqsave(&adev->ddev->event_lock, flags);
4562236b
HW
217
218 if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
219 DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
220 amdgpu_crtc->pflip_status,
221 AMDGPU_FLIP_SUBMITTED,
222 amdgpu_crtc->crtc_id,
223 amdgpu_crtc);
224 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
225 return;
226 }
227
4562236b
HW
228
229 /* wakeup usersapce */
1159898a 230 if (amdgpu_crtc->event) {
753c66c9
MK
231 /* Update to correct count/ts if racing with vblank irq */
232 drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
233
54f5499a 234 drm_crtc_send_vblank_event(&amdgpu_crtc->base, amdgpu_crtc->event);
1159898a 235
54f5499a
AG
236 /* page flip completed. clean up */
237 amdgpu_crtc->event = NULL;
1159898a 238
54f5499a
AG
239 } else
240 WARN_ON(1);
4562236b 241
54f5499a 242 amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
4562236b
HW
243 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
244
54f5499a
AG
245 DRM_DEBUG_DRIVER("%s - crtc :%d[%p], pflip_stat:AMDGPU_FLIP_NONE\n",
246 __func__, amdgpu_crtc->crtc_id, amdgpu_crtc);
4562236b
HW
247
248 drm_crtc_vblank_put(&amdgpu_crtc->base);
4562236b
HW
249}
250
251static void dm_crtc_high_irq(void *interrupt_params)
252{
253 struct common_irq_params *irq_params = interrupt_params;
254 struct amdgpu_device *adev = irq_params->adev;
255 uint8_t crtc_index = 0;
256 struct amdgpu_crtc *acrtc;
257
b57de80a 258 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
4562236b
HW
259
260 if (acrtc)
261 crtc_index = acrtc->crtc_id;
262
263 drm_handle_vblank(adev->ddev, crtc_index);
264}
265
266static int dm_set_clockgating_state(void *handle,
267 enum amd_clockgating_state state)
268{
269 return 0;
270}
271
272static int dm_set_powergating_state(void *handle,
273 enum amd_powergating_state state)
274{
275 return 0;
276}
277
278/* Prototypes of private functions */
279static int dm_early_init(void* handle);
280
281static void hotplug_notify_work_func(struct work_struct *work)
282{
283 struct amdgpu_display_manager *dm = container_of(work, struct amdgpu_display_manager, mst_hotplug_work);
284 struct drm_device *dev = dm->ddev;
285
286 drm_kms_helper_hotplug_event(dev);
287}
288
289/* Init display KMS
290 *
291 * Returns 0 on success
292 */
293int amdgpu_dm_init(struct amdgpu_device *adev)
294{
295 struct dc_init_data init_data;
296 adev->dm.ddev = adev->ddev;
297 adev->dm.adev = adev;
298
299 DRM_INFO("DAL is enabled\n");
300 /* Zero all the fields */
301 memset(&init_data, 0, sizeof(init_data));
302
303 /* initialize DAL's lock (for SYNC context use) */
304 spin_lock_init(&adev->dm.dal_lock);
305
306 /* initialize DAL's mutex */
307 mutex_init(&adev->dm.dal_mutex);
308
309 if(amdgpu_dm_irq_init(adev)) {
310 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
311 goto error;
312 }
313
314 init_data.asic_id.chip_family = adev->family;
315
316 init_data.asic_id.pci_revision_id = adev->rev_id;
317 init_data.asic_id.hw_internal_rev = adev->external_rev_id;
318
319 init_data.asic_id.vram_width = adev->mc.vram_width;
320 /* TODO: initialize init_data.asic_id.vram_type here!!!! */
321 init_data.asic_id.atombios_base_address =
322 adev->mode_info.atom_context->bios;
323
324 init_data.driver = adev;
325
326 adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
327
328 if (!adev->dm.cgs_device) {
329 DRM_ERROR("amdgpu: failed to create cgs device.\n");
330 goto error;
331 }
332
333 init_data.cgs_device = adev->dm.cgs_device;
334
335 adev->dm.dal = NULL;
336
337 init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
338
339 /* Display Core create. */
340 adev->dm.dc = dc_create(&init_data);
341
342 if (!adev->dm.dc)
343 DRM_INFO("Display Core failed to initialize!\n");
344
345 INIT_WORK(&adev->dm.mst_hotplug_work, hotplug_notify_work_func);
346
347 adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
348 if (!adev->dm.freesync_module) {
349 DRM_ERROR(
350 "amdgpu: failed to initialize freesync_module.\n");
351 } else
352 DRM_INFO("amdgpu: freesync_module init done %p.\n",
353 adev->dm.freesync_module);
354
355 if (amdgpu_dm_initialize_drm_device(adev)) {
356 DRM_ERROR(
357 "amdgpu: failed to initialize sw for display support.\n");
358 goto error;
359 }
360
361 /* Update the actual used number of crtc */
362 adev->mode_info.num_crtc = adev->dm.display_indexes_num;
363
364 /* TODO: Add_display_info? */
365
366 /* TODO use dynamic cursor width */
ce75805e
AG
367 adev->ddev->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
368 adev->ddev->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
4562236b
HW
369
370 if (drm_vblank_init(adev->ddev, adev->dm.display_indexes_num)) {
371 DRM_ERROR(
372 "amdgpu: failed to initialize sw for display support.\n");
373 goto error;
374 }
375
376 DRM_INFO("KMS initialized.\n");
377
378 return 0;
379error:
380 amdgpu_dm_fini(adev);
381
382 return -1;
383}
384
385void amdgpu_dm_fini(struct amdgpu_device *adev)
386{
387 amdgpu_dm_destroy_drm_device(&adev->dm);
388 /*
389 * TODO: pageflip, vlank interrupt
390 *
391 * amdgpu_dm_irq_fini(adev);
392 */
393
394 if (adev->dm.cgs_device) {
395 amdgpu_cgs_destroy_device(adev->dm.cgs_device);
396 adev->dm.cgs_device = NULL;
397 }
398 if (adev->dm.freesync_module) {
399 mod_freesync_destroy(adev->dm.freesync_module);
400 adev->dm.freesync_module = NULL;
401 }
402 /* DC Destroy TODO: Replace destroy DAL */
21de3396 403 if (adev->dm.dc)
4562236b 404 dc_destroy(&adev->dm.dc);
4562236b
HW
405 return;
406}
407
408/* moved from amdgpu_dm_kms.c */
409void amdgpu_dm_destroy()
410{
411}
412
413static int dm_sw_init(void *handle)
414{
415 return 0;
416}
417
418static int dm_sw_fini(void *handle)
419{
420 return 0;
421}
422
7abcf6b5 423static int detect_mst_link_for_all_connectors(struct drm_device *dev)
4562236b
HW
424{
425 struct amdgpu_connector *aconnector;
426 struct drm_connector *connector;
7abcf6b5 427 int ret = 0;
4562236b
HW
428
429 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
430
431 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
432 aconnector = to_amdgpu_connector(connector);
7abcf6b5
AG
433 if (aconnector->dc_link->type == dc_connection_mst_branch) {
434 DRM_INFO("DM_MST: starting TM on aconnector: %p [id: %d]\n",
435 aconnector, aconnector->base.base.id);
436
437 ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
438 if (ret < 0) {
439 DRM_ERROR("DM_MST: Failed to start MST\n");
440 ((struct dc_link *)aconnector->dc_link)->type = dc_connection_single;
441 return ret;
4562236b 442 }
7abcf6b5 443 }
4562236b
HW
444 }
445
446 drm_modeset_unlock(&dev->mode_config.connection_mutex);
7abcf6b5
AG
447 return ret;
448}
449
450static int dm_late_init(void *handle)
451{
452 struct drm_device *dev = ((struct amdgpu_device *)handle)->ddev;
453 int r = detect_mst_link_for_all_connectors(dev);
454
455 return r;
4562236b
HW
456}
457
458static void s3_handle_mst(struct drm_device *dev, bool suspend)
459{
460 struct amdgpu_connector *aconnector;
461 struct drm_connector *connector;
462
463 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
464
465 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
466 aconnector = to_amdgpu_connector(connector);
467 if (aconnector->dc_link->type == dc_connection_mst_branch &&
468 !aconnector->mst_port) {
469
470 if (suspend)
471 drm_dp_mst_topology_mgr_suspend(&aconnector->mst_mgr);
472 else
473 drm_dp_mst_topology_mgr_resume(&aconnector->mst_mgr);
474 }
475 }
476
477 drm_modeset_unlock(&dev->mode_config.connection_mutex);
478}
479
480static int dm_hw_init(void *handle)
481{
482 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
483 /* Create DAL display manager */
484 amdgpu_dm_init(adev);
4562236b
HW
485 amdgpu_dm_hpd_init(adev);
486
4562236b
HW
487 return 0;
488}
489
490static int dm_hw_fini(void *handle)
491{
492 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
493
494 amdgpu_dm_hpd_fini(adev);
495
496 amdgpu_dm_irq_fini(adev);
21de3396 497 amdgpu_dm_fini(adev);
4562236b
HW
498 return 0;
499}
500
501static int dm_suspend(void *handle)
502{
503 struct amdgpu_device *adev = handle;
504 struct amdgpu_display_manager *dm = &adev->dm;
505 int ret = 0;
4562236b
HW
506
507 s3_handle_mst(adev->ddev, true);
508
4562236b
HW
509 amdgpu_dm_irq_suspend(adev);
510
0a214e2f 511 WARN_ON(adev->dm.cached_state);
a3621485
AG
512 adev->dm.cached_state = drm_atomic_helper_suspend(adev->ddev);
513
4562236b
HW
514 dc_set_power_state(
515 dm->dc,
a3621485
AG
516 DC_ACPI_CM_POWER_STATE_D3
517 );
4562236b
HW
518
519 return ret;
520}
521
522struct amdgpu_connector *amdgpu_dm_find_first_crct_matching_connector(
523 struct drm_atomic_state *state,
524 struct drm_crtc *crtc,
525 bool from_state_var)
526{
527 uint32_t i;
528 struct drm_connector_state *conn_state;
529 struct drm_connector *connector;
530 struct drm_crtc *crtc_from_state;
531
532 for_each_connector_in_state(
533 state,
534 connector,
535 conn_state,
536 i) {
537 crtc_from_state =
538 from_state_var ?
539 conn_state->crtc :
540 connector->state->crtc;
541
542 if (crtc_from_state == crtc)
543 return to_amdgpu_connector(connector);
544 }
545
546 return NULL;
547}
548
4562236b
HW
549static int dm_resume(void *handle)
550{
551 struct amdgpu_device *adev = handle;
552 struct amdgpu_display_manager *dm = &adev->dm;
553
554 /* power on hardware */
555 dc_set_power_state(
556 dm->dc,
a3621485
AG
557 DC_ACPI_CM_POWER_STATE_D0
558 );
4562236b
HW
559
560 return 0;
561}
562
563int amdgpu_dm_display_resume(struct amdgpu_device *adev )
564{
565 struct drm_device *ddev = adev->ddev;
566 struct amdgpu_display_manager *dm = &adev->dm;
567 struct amdgpu_connector *aconnector;
568 struct drm_connector *connector;
4562236b 569 struct drm_crtc *crtc;
a3621485
AG
570 struct drm_crtc_state *crtc_state;
571 int ret = 0;
572 int i;
4562236b
HW
573
574 /* program HPD filter */
575 dc_resume(dm->dc);
576
577 /* On resume we need to rewrite the MSTM control bits to enamble MST*/
578 s3_handle_mst(ddev, false);
579
580 /*
581 * early enable HPD Rx IRQ, should be done before set mode as short
582 * pulse interrupts are used for MST
583 */
584 amdgpu_dm_irq_resume_early(adev);
585
4562236b
HW
586 /* Do detection*/
587 list_for_each_entry(connector,
588 &ddev->mode_config.connector_list, head) {
589 aconnector = to_amdgpu_connector(connector);
590
591 /*
592 * this is the case when traversing through already created
593 * MST connectors, should be skipped
594 */
595 if (aconnector->mst_port)
596 continue;
597
03ea364c 598 mutex_lock(&aconnector->hpd_lock);
4562236b
HW
599 dc_link_detect(aconnector->dc_link, false);
600 aconnector->dc_sink = NULL;
601 amdgpu_dm_update_connector_after_detect(aconnector);
03ea364c 602 mutex_unlock(&aconnector->hpd_lock);
4562236b
HW
603 }
604
a3621485
AG
605 /* Force mode set in atomic comit */
606 for_each_crtc_in_state(adev->dm.cached_state, crtc, crtc_state, i)
607 crtc_state->active_changed = true;
608
609 ret = drm_atomic_helper_resume(ddev, adev->dm.cached_state);
4562236b 610
0a214e2f
AG
611 drm_atomic_state_put(adev->dm.cached_state);
612 adev->dm.cached_state = NULL;
613
9faa4237 614 amdgpu_dm_irq_resume_late(adev);
4562236b
HW
615
616 return ret;
617}
618
619static const struct amd_ip_funcs amdgpu_dm_funcs = {
620 .name = "dm",
621 .early_init = dm_early_init,
7abcf6b5 622 .late_init = dm_late_init,
4562236b
HW
623 .sw_init = dm_sw_init,
624 .sw_fini = dm_sw_fini,
625 .hw_init = dm_hw_init,
626 .hw_fini = dm_hw_fini,
627 .suspend = dm_suspend,
628 .resume = dm_resume,
629 .is_idle = dm_is_idle,
630 .wait_for_idle = dm_wait_for_idle,
631 .check_soft_reset = dm_check_soft_reset,
632 .soft_reset = dm_soft_reset,
633 .set_clockgating_state = dm_set_clockgating_state,
634 .set_powergating_state = dm_set_powergating_state,
635};
636
637const struct amdgpu_ip_block_version dm_ip_block =
638{
639 .type = AMD_IP_BLOCK_TYPE_DCE,
640 .major = 1,
641 .minor = 0,
642 .rev = 0,
643 .funcs = &amdgpu_dm_funcs,
644};
645
ca3268c4
HW
646
647struct drm_atomic_state *
648dm_atomic_state_alloc(struct drm_device *dev)
649{
650 struct dm_atomic_state *state = kzalloc(sizeof(*state), GFP_KERNEL);
651
652 if (!state || drm_atomic_state_init(dev, &state->base) < 0) {
653 kfree(state);
654 return NULL;
655 }
656
657 return &state->base;
658}
659
0a323b84
AG
660static void
661dm_atomic_state_clear(struct drm_atomic_state *state)
662{
663 struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
664
665 if (dm_state->context) {
666 dc_release_validate_context(dm_state->context);
667 dm_state->context = NULL;
668 }
669
670 drm_atomic_state_default_clear(state);
671}
672
673static void
674dm_atomic_state_alloc_free(struct drm_atomic_state *state)
675{
676 struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
677 drm_atomic_state_default_release(state);
678 kfree(dm_state);
679}
680
b3663f70 681static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
a49dcb88
HW
682 .fb_create = amdgpu_user_framebuffer_create,
683 .output_poll_changed = amdgpu_output_poll_changed,
4562236b 684 .atomic_check = amdgpu_dm_atomic_check,
da5c47f6 685 .atomic_commit = amdgpu_dm_atomic_commit,
ca3268c4 686 .atomic_state_alloc = dm_atomic_state_alloc,
0a323b84
AG
687 .atomic_state_clear = dm_atomic_state_clear,
688 .atomic_state_free = dm_atomic_state_alloc_free
54f5499a
AG
689};
690
691static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
692 .atomic_commit_tail = amdgpu_dm_atomic_commit_tail
4562236b
HW
693};
694
695void amdgpu_dm_update_connector_after_detect(
696 struct amdgpu_connector *aconnector)
697{
698 struct drm_connector *connector = &aconnector->base;
699 struct drm_device *dev = connector->dev;
b73a22d3 700 struct dc_sink *sink;
4562236b
HW
701
702 /* MST handled by drm_mst framework */
703 if (aconnector->mst_mgr.mst_state == true)
704 return;
705
706
707 sink = aconnector->dc_link->local_sink;
708
709 /* Edid mgmt connector gets first update only in mode_valid hook and then
710 * the connector sink is set to either fake or physical sink depends on link status.
711 * don't do it here if u are during boot
712 */
713 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
714 && aconnector->dc_em_sink) {
715
ab2541b6 716 /* For S3 resume with headless use eml_sink to fake stream
4562236b
HW
717 * because on resume connecotr->sink is set ti NULL
718 */
719 mutex_lock(&dev->mode_config.mutex);
720
721 if (sink) {
922aa1e1 722 if (aconnector->dc_sink) {
4562236b
HW
723 amdgpu_dm_remove_sink_from_freesync_module(
724 connector);
922aa1e1
AG
725 /* retain and release bellow are used for
726 * bump up refcount for sink because the link don't point
727 * to it anymore after disconnect so on next crtc to connector
728 * reshuffle by UMD we will get into unwanted dc_sink release
729 */
730 if (aconnector->dc_sink != aconnector->dc_em_sink)
731 dc_sink_release(aconnector->dc_sink);
732 }
4562236b
HW
733 aconnector->dc_sink = sink;
734 amdgpu_dm_add_sink_to_freesync_module(
735 connector, aconnector->edid);
736 } else {
737 amdgpu_dm_remove_sink_from_freesync_module(connector);
738 if (!aconnector->dc_sink)
739 aconnector->dc_sink = aconnector->dc_em_sink;
922aa1e1
AG
740 else if (aconnector->dc_sink != aconnector->dc_em_sink)
741 dc_sink_retain(aconnector->dc_sink);
4562236b
HW
742 }
743
744 mutex_unlock(&dev->mode_config.mutex);
745 return;
746 }
747
748 /*
749 * TODO: temporary guard to look for proper fix
750 * if this sink is MST sink, we should not do anything
751 */
752 if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
753 return;
754
755 if (aconnector->dc_sink == sink) {
756 /* We got a DP short pulse (Link Loss, DP CTS, etc...).
757 * Do nothing!! */
758 DRM_INFO("DCHPD: connector_id=%d: dc_sink didn't change.\n",
759 aconnector->connector_id);
760 return;
761 }
762
763 DRM_INFO("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
764 aconnector->connector_id, aconnector->dc_sink, sink);
765
766 mutex_lock(&dev->mode_config.mutex);
767
768 /* 1. Update status of the drm connector
769 * 2. Send an event and let userspace tell us what to do */
770 if (sink) {
771 /* TODO: check if we still need the S3 mode update workaround.
772 * If yes, put it here. */
773 if (aconnector->dc_sink)
774 amdgpu_dm_remove_sink_from_freesync_module(
775 connector);
776
777 aconnector->dc_sink = sink;
778 if (sink->dc_edid.length == 0)
779 aconnector->edid = NULL;
780 else {
781 aconnector->edid =
782 (struct edid *) sink->dc_edid.raw_edid;
783
784
785 drm_mode_connector_update_edid_property(connector,
786 aconnector->edid);
787 }
788 amdgpu_dm_add_sink_to_freesync_module(connector, aconnector->edid);
789
790 } else {
791 amdgpu_dm_remove_sink_from_freesync_module(connector);
792 drm_mode_connector_update_edid_property(connector, NULL);
793 aconnector->num_modes = 0;
794 aconnector->dc_sink = NULL;
795 }
796
797 mutex_unlock(&dev->mode_config.mutex);
798}
799
800static void handle_hpd_irq(void *param)
801{
802 struct amdgpu_connector *aconnector = (struct amdgpu_connector *)param;
803 struct drm_connector *connector = &aconnector->base;
804 struct drm_device *dev = connector->dev;
805
806 /* In case of failure or MST no need to update connector status or notify the OS
807 * since (for MST case) MST does this in it's own context.
808 */
809 mutex_lock(&aconnector->hpd_lock);
810 if (dc_link_detect(aconnector->dc_link, false)) {
811 amdgpu_dm_update_connector_after_detect(aconnector);
812
813
814 drm_modeset_lock_all(dev);
815 dm_restore_drm_connector_state(dev, connector);
816 drm_modeset_unlock_all(dev);
817
818 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
819 drm_kms_helper_hotplug_event(dev);
820 }
821 mutex_unlock(&aconnector->hpd_lock);
822
823}
824
825static void dm_handle_hpd_rx_irq(struct amdgpu_connector *aconnector)
826{
827 uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
828 uint8_t dret;
829 bool new_irq_handled = false;
830 int dpcd_addr;
831 int dpcd_bytes_to_read;
832
833 const int max_process_count = 30;
834 int process_count = 0;
835
836 const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
837
838 if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
839 dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
840 /* DPCD 0x200 - 0x201 for downstream IRQ */
841 dpcd_addr = DP_SINK_COUNT;
842 } else {
843 dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
844 /* DPCD 0x2002 - 0x2005 for downstream IRQ */
845 dpcd_addr = DP_SINK_COUNT_ESI;
846 }
847
848 dret = drm_dp_dpcd_read(
849 &aconnector->dm_dp_aux.aux,
850 dpcd_addr,
851 esi,
852 dpcd_bytes_to_read);
853
854 while (dret == dpcd_bytes_to_read &&
855 process_count < max_process_count) {
856 uint8_t retry;
857 dret = 0;
858
859 process_count++;
860
861 DRM_DEBUG_KMS("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
4562236b
HW
862 /* handle HPD short pulse irq */
863 if (aconnector->mst_mgr.mst_state)
864 drm_dp_mst_hpd_irq(
865 &aconnector->mst_mgr,
866 esi,
867 &new_irq_handled);
4562236b
HW
868
869 if (new_irq_handled) {
870 /* ACK at DPCD to notify down stream */
871 const int ack_dpcd_bytes_to_write =
872 dpcd_bytes_to_read - 1;
873
874 for (retry = 0; retry < 3; retry++) {
875 uint8_t wret;
876
877 wret = drm_dp_dpcd_write(
878 &aconnector->dm_dp_aux.aux,
879 dpcd_addr + 1,
880 &esi[1],
881 ack_dpcd_bytes_to_write);
882 if (wret == ack_dpcd_bytes_to_write)
883 break;
884 }
885
886 /* check if there is new irq to be handle */
887 dret = drm_dp_dpcd_read(
888 &aconnector->dm_dp_aux.aux,
889 dpcd_addr,
890 esi,
891 dpcd_bytes_to_read);
892
893 new_irq_handled = false;
894 } else
895 break;
896 }
897
898 if (process_count == max_process_count)
899 DRM_DEBUG_KMS("Loop exceeded max iterations\n");
900}
901
902static void handle_hpd_rx_irq(void *param)
903{
904 struct amdgpu_connector *aconnector = (struct amdgpu_connector *)param;
905 struct drm_connector *connector = &aconnector->base;
906 struct drm_device *dev = connector->dev;
907 const struct dc_link *dc_link = aconnector->dc_link;
908 bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
909
910 /* TODO:Temporary add mutex to protect hpd interrupt not have a gpio
911 * conflict, after implement i2c helper, this mutex should be
912 * retired.
913 */
914 if (aconnector->dc_link->type != dc_connection_mst_branch)
915 mutex_lock(&aconnector->hpd_lock);
916
8ee65d7c 917 if (dc_link_handle_hpd_rx_irq(aconnector->dc_link, NULL) &&
4562236b
HW
918 !is_mst_root_connector) {
919 /* Downstream Port status changed. */
920 if (dc_link_detect(aconnector->dc_link, false)) {
921 amdgpu_dm_update_connector_after_detect(aconnector);
922
923
924 drm_modeset_lock_all(dev);
925 dm_restore_drm_connector_state(dev, connector);
926 drm_modeset_unlock_all(dev);
927
928 drm_kms_helper_hotplug_event(dev);
929 }
930 }
931 if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
932 (dc_link->type == dc_connection_mst_branch))
933 dm_handle_hpd_rx_irq(aconnector);
934
935 if (aconnector->dc_link->type != dc_connection_mst_branch)
936 mutex_unlock(&aconnector->hpd_lock);
937}
938
939static void register_hpd_handlers(struct amdgpu_device *adev)
940{
941 struct drm_device *dev = adev->ddev;
942 struct drm_connector *connector;
943 struct amdgpu_connector *aconnector;
944 const struct dc_link *dc_link;
945 struct dc_interrupt_params int_params = {0};
946
947 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
948 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
949
950 list_for_each_entry(connector,
951 &dev->mode_config.connector_list, head) {
952
953 aconnector = to_amdgpu_connector(connector);
954 dc_link = aconnector->dc_link;
955
956 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
957 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
958 int_params.irq_source = dc_link->irq_source_hpd;
959
960 amdgpu_dm_irq_register_interrupt(adev, &int_params,
961 handle_hpd_irq,
962 (void *) aconnector);
963 }
964
965 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
966
967 /* Also register for DP short pulse (hpd_rx). */
968 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
969 int_params.irq_source = dc_link->irq_source_hpd_rx;
970
971 amdgpu_dm_irq_register_interrupt(adev, &int_params,
972 handle_hpd_rx_irq,
973 (void *) aconnector);
974 }
975 }
976}
977
978/* Register IRQ sources and initialize IRQ callbacks */
979static int dce110_register_irq_handlers(struct amdgpu_device *adev)
980{
981 struct dc *dc = adev->dm.dc;
982 struct common_irq_params *c_irq_params;
983 struct dc_interrupt_params int_params = {0};
984 int r;
985 int i;
2c8ad2d5
AD
986 unsigned client_id = AMDGPU_IH_CLIENTID_LEGACY;
987
ff5ef992
AD
988 if (adev->asic_type == CHIP_VEGA10 ||
989 adev->asic_type == CHIP_RAVEN)
2c8ad2d5 990 client_id = AMDGPU_IH_CLIENTID_DCE;
4562236b
HW
991
992 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
993 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
994
995 /* Actions of amdgpu_irq_add_id():
996 * 1. Register a set() function with base driver.
997 * Base driver will call set() function to enable/disable an
998 * interrupt in DC hardware.
999 * 2. Register amdgpu_dm_irq_handler().
1000 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
1001 * coming from DC hardware.
1002 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
1003 * for acknowledging and handling. */
1004
b57de80a 1005 /* Use VBLANK interrupt */
e9029155 1006 for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
2c8ad2d5 1007 r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
4562236b
HW
1008 if (r) {
1009 DRM_ERROR("Failed to add crtc irq id!\n");
1010 return r;
1011 }
1012
1013 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
1014 int_params.irq_source =
3d761e79 1015 dc_interrupt_to_irq_source(dc, i, 0);
4562236b 1016
b57de80a 1017 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
4562236b
HW
1018
1019 c_irq_params->adev = adev;
1020 c_irq_params->irq_src = int_params.irq_source;
1021
1022 amdgpu_dm_irq_register_interrupt(adev, &int_params,
1023 dm_crtc_high_irq, c_irq_params);
1024 }
1025
3d761e79 1026 /* Use GRPH_PFLIP interrupt */
4562236b
HW
1027 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
1028 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2c8ad2d5 1029 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
4562236b
HW
1030 if (r) {
1031 DRM_ERROR("Failed to add page flip irq id!\n");
1032 return r;
1033 }
1034
1035 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
1036 int_params.irq_source =
1037 dc_interrupt_to_irq_source(dc, i, 0);
1038
1039 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
1040
1041 c_irq_params->adev = adev;
1042 c_irq_params->irq_src = int_params.irq_source;
1043
1044 amdgpu_dm_irq_register_interrupt(adev, &int_params,
1045 dm_pflip_high_irq, c_irq_params);
1046
1047 }
1048
1049 /* HPD */
2c8ad2d5
AD
1050 r = amdgpu_irq_add_id(adev, client_id,
1051 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
4562236b
HW
1052 if (r) {
1053 DRM_ERROR("Failed to add hpd irq id!\n");
1054 return r;
1055 }
1056
1057 register_hpd_handlers(adev);
1058
1059 return 0;
1060}
1061
ff5ef992
AD
1062#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
1063/* Register IRQ sources and initialize IRQ callbacks */
1064static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
1065{
1066 struct dc *dc = adev->dm.dc;
1067 struct common_irq_params *c_irq_params;
1068 struct dc_interrupt_params int_params = {0};
1069 int r;
1070 int i;
1071
1072 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
1073 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
1074
1075 /* Actions of amdgpu_irq_add_id():
1076 * 1. Register a set() function with base driver.
1077 * Base driver will call set() function to enable/disable an
1078 * interrupt in DC hardware.
1079 * 2. Register amdgpu_dm_irq_handler().
1080 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
1081 * coming from DC hardware.
1082 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
1083 * for acknowledging and handling.
1084 * */
1085
1086 /* Use VSTARTUP interrupt */
1087 for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
1088 i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
1089 i++) {
1090 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_DCE, i, &adev->crtc_irq);
1091
1092 if (r) {
1093 DRM_ERROR("Failed to add crtc irq id!\n");
1094 return r;
1095 }
1096
1097 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
1098 int_params.irq_source =
1099 dc_interrupt_to_irq_source(dc, i, 0);
1100
1101 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
1102
1103 c_irq_params->adev = adev;
1104 c_irq_params->irq_src = int_params.irq_source;
1105
1106 amdgpu_dm_irq_register_interrupt(adev, &int_params,
1107 dm_crtc_high_irq, c_irq_params);
1108 }
1109
1110 /* Use GRPH_PFLIP interrupt */
1111 for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
1112 i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
1113 i++) {
1114 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
1115 if (r) {
1116 DRM_ERROR("Failed to add page flip irq id!\n");
1117 return r;
1118 }
1119
1120 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
1121 int_params.irq_source =
1122 dc_interrupt_to_irq_source(dc, i, 0);
1123
1124 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
1125
1126 c_irq_params->adev = adev;
1127 c_irq_params->irq_src = int_params.irq_source;
1128
1129 amdgpu_dm_irq_register_interrupt(adev, &int_params,
1130 dm_pflip_high_irq, c_irq_params);
1131
1132 }
1133
1134 /* HPD */
1135 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
1136 &adev->hpd_irq);
1137 if (r) {
1138 DRM_ERROR("Failed to add hpd irq id!\n");
1139 return r;
1140 }
1141
1142 register_hpd_handlers(adev);
1143
1144 return 0;
1145}
1146#endif
1147
4562236b
HW
1148static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
1149{
1150 int r;
1151
1152 adev->mode_info.mode_config_initialized = true;
1153
4562236b 1154 adev->ddev->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
54f5499a 1155 adev->ddev->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
4562236b
HW
1156
1157 adev->ddev->mode_config.max_width = 16384;
1158 adev->ddev->mode_config.max_height = 16384;
1159
1160 adev->ddev->mode_config.preferred_depth = 24;
1161 adev->ddev->mode_config.prefer_shadow = 1;
1162 /* indicate support of immediate flip */
1163 adev->ddev->mode_config.async_page_flip = true;
1164
1165 adev->ddev->mode_config.fb_base = adev->mc.aper_base;
1166
1167 r = amdgpu_modeset_create_props(adev);
1168 if (r)
1169 return r;
1170
1171 return 0;
1172}
1173
1174#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
1175 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
1176
1177static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
1178{
1179 struct amdgpu_display_manager *dm = bl_get_data(bd);
1180
1181 if (dc_link_set_backlight_level(dm->backlight_link,
1182 bd->props.brightness, 0, 0))
1183 return 0;
1184 else
1185 return 1;
1186}
1187
1188static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
1189{
1190 return bd->props.brightness;
1191}
1192
1193static const struct backlight_ops amdgpu_dm_backlight_ops = {
1194 .get_brightness = amdgpu_dm_backlight_get_brightness,
1195 .update_status = amdgpu_dm_backlight_update_status,
1196};
1197
1198void amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
1199{
1200 char bl_name[16];
1201 struct backlight_properties props = { 0 };
1202
1203 props.max_brightness = AMDGPU_MAX_BL_LEVEL;
1204 props.type = BACKLIGHT_RAW;
1205
1206 snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
1207 dm->adev->ddev->primary->index);
1208
1209 dm->backlight_dev = backlight_device_register(bl_name,
1210 dm->adev->ddev->dev,
1211 dm,
1212 &amdgpu_dm_backlight_ops,
1213 &props);
1214
1215 if (NULL == dm->backlight_dev)
1216 DRM_ERROR("DM: Backlight registration failed!\n");
1217 else
1218 DRM_INFO("DM: Registered Backlight device: %s\n", bl_name);
1219}
1220
1221#endif
1222
1223/* In this architecture, the association
1224 * connector -> encoder -> crtc
1225 * id not really requried. The crtc and connector will hold the
1226 * display_index as an abstraction to use with DAL component
1227 *
1228 * Returns 0 on success
1229 */
1230int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
1231{
1232 struct amdgpu_display_manager *dm = &adev->dm;
1233 uint32_t i;
f2a0f5e6
HW
1234 struct amdgpu_connector *aconnector = NULL;
1235 struct amdgpu_encoder *aencoder = NULL;
d4e13b0d 1236 struct amdgpu_mode_info *mode_info = &adev->mode_info;
4562236b 1237 uint32_t link_cnt;
92f3ac40 1238 unsigned long possible_crtcs;
4562236b
HW
1239
1240 link_cnt = dm->dc->caps.max_links;
4562236b
HW
1241 if (amdgpu_dm_mode_config_init(dm->adev)) {
1242 DRM_ERROR("DM: Failed to initialize mode config\n");
f2a0f5e6 1243 return -1;
4562236b
HW
1244 }
1245
d4e13b0d
AD
1246 for (i = 0; i < dm->dc->caps.max_surfaces; i++) {
1247 mode_info->planes[i] = kzalloc(sizeof(struct amdgpu_plane),
1248 GFP_KERNEL);
1249 if (!mode_info->planes[i]) {
1250 DRM_ERROR("KMS: Failed to allocate surface\n");
1251 goto fail_free_planes;
1252 }
1605b3be 1253 mode_info->planes[i]->base.type = mode_info->plane_type[i];
92f3ac40
LSL
1254
1255 /*
1256 * HACK: IGT tests expect that each plane can only have one
1257 * one possible CRTC. For now, set one CRTC for each
1258 * plane that is not an underlay, but still allow multiple
1259 * CRTCs for underlay planes.
1260 */
1261 possible_crtcs = 1 << i;
1262 if (i >= dm->dc->caps.max_streams)
1263 possible_crtcs = 0xff;
1264
1265 if (amdgpu_dm_plane_init(dm, mode_info->planes[i], possible_crtcs)) {
d4e13b0d
AD
1266 DRM_ERROR("KMS: Failed to initialize plane\n");
1267 goto fail_free_planes;
1268 }
1269 }
4562236b 1270
d4e13b0d
AD
1271 for (i = 0; i < dm->dc->caps.max_streams; i++)
1272 if (amdgpu_dm_crtc_init(dm, &mode_info->planes[i]->base, i)) {
4562236b 1273 DRM_ERROR("KMS: Failed to initialize crtc\n");
d4e13b0d 1274 goto fail_free_planes;
4562236b 1275 }
4562236b 1276
ab2541b6 1277 dm->display_indexes_num = dm->dc->caps.max_streams;
4562236b
HW
1278
1279 /* loops over all connectors on the board */
1280 for (i = 0; i < link_cnt; i++) {
1281
1282 if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
1283 DRM_ERROR(
1284 "KMS: Cannot support more than %d display indexes\n",
1285 AMDGPU_DM_MAX_DISPLAY_INDEX);
1286 continue;
1287 }
1288
1289 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
1290 if (!aconnector)
f2a0f5e6 1291 goto fail_free_planes;
4562236b
HW
1292
1293 aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
1294 if (!aencoder) {
1295 goto fail_free_connector;
1296 }
1297
1298 if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
1299 DRM_ERROR("KMS: Failed to initialize encoder\n");
1300 goto fail_free_encoder;
1301 }
1302
1303 if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
1304 DRM_ERROR("KMS: Failed to initialize connector\n");
f2a0f5e6 1305 goto fail_free_encoder;
4562236b
HW
1306 }
1307
1308 if (dc_link_detect(dc_get_link_at_index(dm->dc, i), true))
1309 amdgpu_dm_update_connector_after_detect(aconnector);
1310 }
1311
1312 /* Software is initialized. Now we can register interrupt handlers. */
1313 switch (adev->asic_type) {
1314 case CHIP_BONAIRE:
1315 case CHIP_HAWAII:
1316 case CHIP_TONGA:
1317 case CHIP_FIJI:
1318 case CHIP_CARRIZO:
1319 case CHIP_STONEY:
1320 case CHIP_POLARIS11:
1321 case CHIP_POLARIS10:
b264d345 1322 case CHIP_POLARIS12:
2c8ad2d5 1323 case CHIP_VEGA10:
4562236b
HW
1324 if (dce110_register_irq_handlers(dm->adev)) {
1325 DRM_ERROR("DM: Failed to initialize IRQ\n");
d4e13b0d 1326 goto fail_free_encoder;
4562236b
HW
1327 }
1328 break;
ff5ef992
AD
1329#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
1330 case CHIP_RAVEN:
1331 if (dcn10_register_irq_handlers(dm->adev)) {
1332 DRM_ERROR("DM: Failed to initialize IRQ\n");
1333 goto fail_free_encoder;
1334 }
1335 break;
1336#endif
4562236b
HW
1337 default:
1338 DRM_ERROR("Usupported ASIC type: 0x%X\n", adev->asic_type);
d4e13b0d 1339 goto fail_free_encoder;
4562236b
HW
1340 }
1341
1342 drm_mode_config_reset(dm->ddev);
1343
1344 return 0;
1345fail_free_encoder:
1346 kfree(aencoder);
1347fail_free_connector:
1348 kfree(aconnector);
d4e13b0d
AD
1349fail_free_planes:
1350 for (i = 0; i < dm->dc->caps.max_surfaces; i++)
1351 kfree(mode_info->planes[i]);
4562236b
HW
1352 return -1;
1353}
1354
1355void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
1356{
1357 drm_mode_config_cleanup(dm->ddev);
1358 return;
1359}
1360
1361/******************************************************************************
1362 * amdgpu_display_funcs functions
1363 *****************************************************************************/
1364
1365/**
1366 * dm_bandwidth_update - program display watermarks
1367 *
1368 * @adev: amdgpu_device pointer
1369 *
1370 * Calculate and program the display watermarks and line buffer allocation.
1371 */
1372static void dm_bandwidth_update(struct amdgpu_device *adev)
1373{
49c07a99 1374 /* TODO: implement later */
4562236b
HW
1375}
1376
1377static void dm_set_backlight_level(struct amdgpu_encoder *amdgpu_encoder,
1378 u8 level)
1379{
1380 /* TODO: translate amdgpu_encoder to display_index and call DAL */
4562236b
HW
1381}
1382
1383static u8 dm_get_backlight_level(struct amdgpu_encoder *amdgpu_encoder)
1384{
1385 /* TODO: translate amdgpu_encoder to display_index and call DAL */
4562236b
HW
1386 return 0;
1387}
1388
4562236b
HW
1389static int amdgpu_notify_freesync(struct drm_device *dev, void *data,
1390 struct drm_file *filp)
1391{
1392 struct mod_freesync_params freesync_params;
ab2541b6 1393 uint8_t num_streams;
4562236b 1394 uint8_t i;
4562236b
HW
1395
1396 struct amdgpu_device *adev = dev->dev_private;
1397 int r = 0;
1398
1399 /* Get freesync enable flag from DRM */
1400
ab2541b6 1401 num_streams = dc_get_current_stream_count(adev->dm.dc);
4562236b 1402
ab2541b6
AC
1403 for (i = 0; i < num_streams; i++) {
1404 const struct dc_stream *stream;
1405 stream = dc_get_stream_at_index(adev->dm.dc, i);
4562236b
HW
1406
1407 mod_freesync_update_state(adev->dm.freesync_module,
ab2541b6 1408 &stream, 1, &freesync_params);
4562236b
HW
1409 }
1410
1411 return r;
1412}
1413
39cc5be2 1414static const struct amdgpu_display_funcs dm_display_funcs = {
4562236b
HW
1415 .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
1416 .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
1417 .vblank_wait = NULL,
1418 .backlight_set_level =
1419 dm_set_backlight_level,/* called unconditionally */
1420 .backlight_get_level =
1421 dm_get_backlight_level,/* called unconditionally */
1422 .hpd_sense = NULL,/* called unconditionally */
1423 .hpd_set_polarity = NULL, /* called unconditionally */
1424 .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
4562236b
HW
1425 .page_flip_get_scanoutpos =
1426 dm_crtc_get_scanoutpos,/* called unconditionally */
1427 .add_encoder = NULL, /* VBIOS parsing. DAL does it. */
1428 .add_connector = NULL, /* VBIOS parsing. DAL does it. */
1429 .notify_freesync = amdgpu_notify_freesync,
1430
1431};
1432
2c8ad2d5 1433
4562236b
HW
1434#if defined(CONFIG_DEBUG_KERNEL_DC)
1435
1436static ssize_t s3_debug_store(
1437 struct device *device,
1438 struct device_attribute *attr,
1439 const char *buf,
1440 size_t count)
1441{
1442 int ret;
1443 int s3_state;
1444 struct pci_dev *pdev = to_pci_dev(device);
1445 struct drm_device *drm_dev = pci_get_drvdata(pdev);
1446 struct amdgpu_device *adev = drm_dev->dev_private;
1447
1448 ret = kstrtoint(buf, 0, &s3_state);
1449
1450 if (ret == 0) {
1451 if (s3_state) {
1452 dm_resume(adev);
1453 amdgpu_dm_display_resume(adev);
1454 drm_kms_helper_hotplug_event(adev->ddev);
1455 } else
1456 dm_suspend(adev);
1457 }
1458
1459 return ret == 0 ? count : 0;
1460}
1461
1462DEVICE_ATTR_WO(s3_debug);
1463
1464#endif
1465
1466static int dm_early_init(void *handle)
1467{
1468 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1469
d7ec53d9 1470 adev->ddev->driver->driver_features |= DRIVER_ATOMIC;
4562236b
HW
1471 amdgpu_dm_set_irq_funcs(adev);
1472
1473 switch (adev->asic_type) {
1474 case CHIP_BONAIRE:
1475 case CHIP_HAWAII:
1476 adev->mode_info.num_crtc = 6;
1477 adev->mode_info.num_hpd = 6;
1478 adev->mode_info.num_dig = 6;
d4e13b0d 1479 adev->mode_info.plane_type = dm_surfaces_type_default;
4562236b
HW
1480 break;
1481 case CHIP_FIJI:
1482 case CHIP_TONGA:
1483 adev->mode_info.num_crtc = 6;
1484 adev->mode_info.num_hpd = 6;
1485 adev->mode_info.num_dig = 7;
d4e13b0d 1486 adev->mode_info.plane_type = dm_surfaces_type_default;
4562236b
HW
1487 break;
1488 case CHIP_CARRIZO:
1489 adev->mode_info.num_crtc = 3;
1490 adev->mode_info.num_hpd = 6;
1491 adev->mode_info.num_dig = 9;
d4e13b0d 1492 adev->mode_info.plane_type = dm_surfaces_type_carizzo;
4562236b
HW
1493 break;
1494 case CHIP_STONEY:
1495 adev->mode_info.num_crtc = 2;
1496 adev->mode_info.num_hpd = 6;
1497 adev->mode_info.num_dig = 9;
d4e13b0d 1498 adev->mode_info.plane_type = dm_surfaces_type_stoney;
4562236b
HW
1499 break;
1500 case CHIP_POLARIS11:
b264d345 1501 case CHIP_POLARIS12:
4562236b
HW
1502 adev->mode_info.num_crtc = 5;
1503 adev->mode_info.num_hpd = 5;
1504 adev->mode_info.num_dig = 5;
d4e13b0d 1505 adev->mode_info.plane_type = dm_surfaces_type_default;
4562236b
HW
1506 break;
1507 case CHIP_POLARIS10:
1508 adev->mode_info.num_crtc = 6;
1509 adev->mode_info.num_hpd = 6;
1510 adev->mode_info.num_dig = 6;
d4e13b0d 1511 adev->mode_info.plane_type = dm_surfaces_type_default;
4562236b 1512 break;
2c8ad2d5
AD
1513 case CHIP_VEGA10:
1514 adev->mode_info.num_crtc = 6;
1515 adev->mode_info.num_hpd = 6;
1516 adev->mode_info.num_dig = 6;
6f43fd62 1517 adev->mode_info.plane_type = dm_surfaces_type_default;
2c8ad2d5 1518 break;
ff5ef992
AD
1519#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
1520 case CHIP_RAVEN:
1521 adev->mode_info.num_crtc = 4;
1522 adev->mode_info.num_hpd = 4;
1523 adev->mode_info.num_dig = 4;
1524 adev->mode_info.plane_type = dm_surfaces_type_default;
1525 break;
1526#endif
4562236b
HW
1527 default:
1528 DRM_ERROR("Usupported ASIC type: 0x%X\n", adev->asic_type);
1529 return -EINVAL;
1530 }
1531
39cc5be2
AD
1532 if (adev->mode_info.funcs == NULL)
1533 adev->mode_info.funcs = &dm_display_funcs;
1534
4562236b
HW
1535 /* Note: Do NOT change adev->audio_endpt_rreg and
1536 * adev->audio_endpt_wreg because they are initialised in
1537 * amdgpu_device_init() */
1538#if defined(CONFIG_DEBUG_KERNEL_DC)
1539 device_create_file(
1540 adev->ddev->dev,
1541 &dev_attr_s3_debug);
1542#endif
1543
1544 return 0;
1545}
1546
1547bool amdgpu_dm_acquire_dal_lock(struct amdgpu_display_manager *dm)
1548{
1549 /* TODO */
1550 return true;
1551}
1552
1553bool amdgpu_dm_release_dal_lock(struct amdgpu_display_manager *dm)
1554{
1555 /* TODO */
1556 return true;
1557}
1558
1559