]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
drm/amd/display: Use plane pointer to avoid line breaks
[mirror_ubuntu-bionic-kernel.git] / drivers / gpu / drm / amd / display / amdgpu_dm / amdgpu_dm.c
1 /*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26 #include "dm_services_types.h"
27 #include "dc.h"
28 #include "dc/inc/core_types.h"
29
30 #include "vid.h"
31 #include "amdgpu.h"
32 #include "amdgpu_display.h"
33 #include "atom.h"
34 #include "amdgpu_dm.h"
35 #include "amdgpu_pm.h"
36
37 #include "amd_shared.h"
38 #include "amdgpu_dm_irq.h"
39 #include "dm_helpers.h"
40 #include "dm_services_types.h"
41 #include "amdgpu_dm_mst_types.h"
42
43 #include "ivsrcid/ivsrcid_vislands30.h"
44
45 #include <linux/module.h>
46 #include <linux/moduleparam.h>
47 #include <linux/version.h>
48 #include <linux/types.h>
49
50 #include <drm/drmP.h>
51 #include <drm/drm_atomic.h>
52 #include <drm/drm_atomic_helper.h>
53 #include <drm/drm_dp_mst_helper.h>
54 #include <drm/drm_fb_helper.h>
55 #include <drm/drm_edid.h>
56
57 #include "modules/inc/mod_freesync.h"
58
59 #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
60 #include "ivsrcid/irqsrcs_dcn_1_0.h"
61
62 #include "raven1/DCN/dcn_1_0_offset.h"
63 #include "raven1/DCN/dcn_1_0_sh_mask.h"
64 #include "vega10/soc15ip.h"
65
66 #include "soc15_common.h"
67 #endif
68
69 #include "modules/inc/mod_freesync.h"
70
71 #include "i2caux_interface.h"
72
73 /* basic init/fini API */
74 static int amdgpu_dm_init(struct amdgpu_device *adev);
75 static void amdgpu_dm_fini(struct amdgpu_device *adev);
76
77 /* initializes drm_device display related structures, based on the information
78 * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
79 * drm_encoder, drm_mode_config
80 *
81 * Returns 0 on success
82 */
83 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
84 /* removes and deallocates the drm structures, created by the above function */
85 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
86
87 static void
88 amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector);
89
90 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
91 struct amdgpu_plane *aplane,
92 unsigned long possible_crtcs);
93 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
94 struct drm_plane *plane,
95 uint32_t link_index);
96 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
97 struct amdgpu_dm_connector *amdgpu_dm_connector,
98 uint32_t link_index,
99 struct amdgpu_encoder *amdgpu_encoder);
100 static int amdgpu_dm_encoder_init(struct drm_device *dev,
101 struct amdgpu_encoder *aencoder,
102 uint32_t link_index);
103
104 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
105
106 static int amdgpu_dm_atomic_commit(struct drm_device *dev,
107 struct drm_atomic_state *state,
108 bool nonblock);
109
110 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
111
112 static int amdgpu_dm_atomic_check(struct drm_device *dev,
113 struct drm_atomic_state *state);
114
115
116
117
118 static const enum drm_plane_type dm_plane_type_default[AMDGPU_MAX_PLANES] = {
119 DRM_PLANE_TYPE_PRIMARY,
120 DRM_PLANE_TYPE_PRIMARY,
121 DRM_PLANE_TYPE_PRIMARY,
122 DRM_PLANE_TYPE_PRIMARY,
123 DRM_PLANE_TYPE_PRIMARY,
124 DRM_PLANE_TYPE_PRIMARY,
125 };
126
127 static const enum drm_plane_type dm_plane_type_carizzo[AMDGPU_MAX_PLANES] = {
128 DRM_PLANE_TYPE_PRIMARY,
129 DRM_PLANE_TYPE_PRIMARY,
130 DRM_PLANE_TYPE_PRIMARY,
131 DRM_PLANE_TYPE_OVERLAY,/* YUV Capable Underlay */
132 };
133
134 static const enum drm_plane_type dm_plane_type_stoney[AMDGPU_MAX_PLANES] = {
135 DRM_PLANE_TYPE_PRIMARY,
136 DRM_PLANE_TYPE_PRIMARY,
137 DRM_PLANE_TYPE_OVERLAY, /* YUV Capable Underlay */
138 };
139
140 /*
141 * dm_vblank_get_counter
142 *
143 * @brief
144 * Get counter for number of vertical blanks
145 *
146 * @param
147 * struct amdgpu_device *adev - [in] desired amdgpu device
148 * int disp_idx - [in] which CRTC to get the counter from
149 *
150 * @return
151 * Counter for vertical blanks
152 */
153 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
154 {
155 if (crtc >= adev->mode_info.num_crtc)
156 return 0;
157 else {
158 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
159 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(
160 acrtc->base.state);
161
162
163 if (acrtc_state->stream == NULL) {
164 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
165 crtc);
166 return 0;
167 }
168
169 return dc_stream_get_vblank_counter(acrtc_state->stream);
170 }
171 }
172
173 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
174 u32 *vbl, u32 *position)
175 {
176 uint32_t v_blank_start, v_blank_end, h_position, v_position;
177
178 if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
179 return -EINVAL;
180 else {
181 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
182 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(
183 acrtc->base.state);
184
185 if (acrtc_state->stream == NULL) {
186 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
187 crtc);
188 return 0;
189 }
190
191 /*
192 * TODO rework base driver to use values directly.
193 * for now parse it back into reg-format
194 */
195 dc_stream_get_scanoutpos(acrtc_state->stream,
196 &v_blank_start,
197 &v_blank_end,
198 &h_position,
199 &v_position);
200
201 *position = v_position | (h_position << 16);
202 *vbl = v_blank_start | (v_blank_end << 16);
203 }
204
205 return 0;
206 }
207
208 static bool dm_is_idle(void *handle)
209 {
210 /* XXX todo */
211 return true;
212 }
213
214 static int dm_wait_for_idle(void *handle)
215 {
216 /* XXX todo */
217 return 0;
218 }
219
220 static bool dm_check_soft_reset(void *handle)
221 {
222 return false;
223 }
224
225 static int dm_soft_reset(void *handle)
226 {
227 /* XXX todo */
228 return 0;
229 }
230
231 static struct amdgpu_crtc *
232 get_crtc_by_otg_inst(struct amdgpu_device *adev,
233 int otg_inst)
234 {
235 struct drm_device *dev = adev->ddev;
236 struct drm_crtc *crtc;
237 struct amdgpu_crtc *amdgpu_crtc;
238
239 /*
240 * following if is check inherited from both functions where this one is
241 * used now. Need to be checked why it could happen.
242 */
243 if (otg_inst == -1) {
244 WARN_ON(1);
245 return adev->mode_info.crtcs[0];
246 }
247
248 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
249 amdgpu_crtc = to_amdgpu_crtc(crtc);
250
251 if (amdgpu_crtc->otg_inst == otg_inst)
252 return amdgpu_crtc;
253 }
254
255 return NULL;
256 }
257
258 static void dm_pflip_high_irq(void *interrupt_params)
259 {
260 struct amdgpu_crtc *amdgpu_crtc;
261 struct common_irq_params *irq_params = interrupt_params;
262 struct amdgpu_device *adev = irq_params->adev;
263 unsigned long flags;
264
265 amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
266
267 /* IRQ could occur when in initial stage */
268 /*TODO work and BO cleanup */
269 if (amdgpu_crtc == NULL) {
270 DRM_DEBUG_DRIVER("CRTC is null, returning.\n");
271 return;
272 }
273
274 spin_lock_irqsave(&adev->ddev->event_lock, flags);
275
276 if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
277 DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
278 amdgpu_crtc->pflip_status,
279 AMDGPU_FLIP_SUBMITTED,
280 amdgpu_crtc->crtc_id,
281 amdgpu_crtc);
282 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
283 return;
284 }
285
286
287 /* wakeup usersapce */
288 if (amdgpu_crtc->event) {
289 /* Update to correct count/ts if racing with vblank irq */
290 drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
291
292 drm_crtc_send_vblank_event(&amdgpu_crtc->base, amdgpu_crtc->event);
293
294 /* page flip completed. clean up */
295 amdgpu_crtc->event = NULL;
296
297 } else
298 WARN_ON(1);
299
300 amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
301 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
302
303 DRM_DEBUG_DRIVER("%s - crtc :%d[%p], pflip_stat:AMDGPU_FLIP_NONE\n",
304 __func__, amdgpu_crtc->crtc_id, amdgpu_crtc);
305
306 drm_crtc_vblank_put(&amdgpu_crtc->base);
307 }
308
309 static void dm_crtc_high_irq(void *interrupt_params)
310 {
311 struct common_irq_params *irq_params = interrupt_params;
312 struct amdgpu_device *adev = irq_params->adev;
313 uint8_t crtc_index = 0;
314 struct amdgpu_crtc *acrtc;
315
316 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
317
318 if (acrtc)
319 crtc_index = acrtc->crtc_id;
320
321 drm_handle_vblank(adev->ddev, crtc_index);
322 }
323
324 static int dm_set_clockgating_state(void *handle,
325 enum amd_clockgating_state state)
326 {
327 return 0;
328 }
329
330 static int dm_set_powergating_state(void *handle,
331 enum amd_powergating_state state)
332 {
333 return 0;
334 }
335
336 /* Prototypes of private functions */
337 static int dm_early_init(void* handle);
338
339 static void hotplug_notify_work_func(struct work_struct *work)
340 {
341 struct amdgpu_display_manager *dm = container_of(work, struct amdgpu_display_manager, mst_hotplug_work);
342 struct drm_device *dev = dm->ddev;
343
344 drm_kms_helper_hotplug_event(dev);
345 }
346
347 #ifdef ENABLE_FBC
348 #include "dal_asic_id.h"
349 /* Allocate memory for FBC compressed data */
350 /* TODO: Dynamic allocation */
351 #define AMDGPU_FBC_SIZE (3840 * 2160 * 4)
352
353 static void amdgpu_dm_initialize_fbc(struct amdgpu_device *adev)
354 {
355 int r;
356 struct dm_comressor_info *compressor = &adev->dm.compressor;
357
358 if (!compressor->bo_ptr) {
359 r = amdgpu_bo_create_kernel(adev, AMDGPU_FBC_SIZE, PAGE_SIZE,
360 AMDGPU_GEM_DOMAIN_VRAM, &compressor->bo_ptr,
361 &compressor->gpu_addr, &compressor->cpu_addr);
362
363 if (r)
364 DRM_ERROR("DM: Failed to initialize fbc\n");
365 }
366
367 }
368 #endif
369
370
371 /* Init display KMS
372 *
373 * Returns 0 on success
374 */
375 static int amdgpu_dm_init(struct amdgpu_device *adev)
376 {
377 struct dc_init_data init_data;
378 adev->dm.ddev = adev->ddev;
379 adev->dm.adev = adev;
380
381 /* Zero all the fields */
382 memset(&init_data, 0, sizeof(init_data));
383
384 /* initialize DAL's lock (for SYNC context use) */
385 spin_lock_init(&adev->dm.dal_lock);
386
387 /* initialize DAL's mutex */
388 mutex_init(&adev->dm.dal_mutex);
389
390 if(amdgpu_dm_irq_init(adev)) {
391 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
392 goto error;
393 }
394
395 init_data.asic_id.chip_family = adev->family;
396
397 init_data.asic_id.pci_revision_id = adev->rev_id;
398 init_data.asic_id.hw_internal_rev = adev->external_rev_id;
399
400 init_data.asic_id.vram_width = adev->mc.vram_width;
401 /* TODO: initialize init_data.asic_id.vram_type here!!!! */
402 init_data.asic_id.atombios_base_address =
403 adev->mode_info.atom_context->bios;
404
405 init_data.driver = adev;
406
407 adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
408
409 if (!adev->dm.cgs_device) {
410 DRM_ERROR("amdgpu: failed to create cgs device.\n");
411 goto error;
412 }
413
414 init_data.cgs_device = adev->dm.cgs_device;
415
416 adev->dm.dal = NULL;
417
418 init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
419
420 if (amdgpu_dc_log)
421 init_data.log_mask = DC_DEFAULT_LOG_MASK;
422 else
423 init_data.log_mask = DC_MIN_LOG_MASK;
424
425 #ifdef ENABLE_FBC
426 if (adev->family == FAMILY_CZ)
427 amdgpu_dm_initialize_fbc(adev);
428 init_data.fbc_gpu_addr = adev->dm.compressor.gpu_addr;
429 #endif
430 /* Display Core create. */
431 adev->dm.dc = dc_create(&init_data);
432
433 if (adev->dm.dc)
434 DRM_INFO("Display Core initialized!\n");
435 else
436 DRM_INFO("Display Core failed to initialize!\n");
437
438 INIT_WORK(&adev->dm.mst_hotplug_work, hotplug_notify_work_func);
439
440 adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
441 if (!adev->dm.freesync_module) {
442 DRM_ERROR(
443 "amdgpu: failed to initialize freesync_module.\n");
444 } else
445 DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
446 adev->dm.freesync_module);
447
448 if (amdgpu_dm_initialize_drm_device(adev)) {
449 DRM_ERROR(
450 "amdgpu: failed to initialize sw for display support.\n");
451 goto error;
452 }
453
454 /* Update the actual used number of crtc */
455 adev->mode_info.num_crtc = adev->dm.display_indexes_num;
456
457 /* TODO: Add_display_info? */
458
459 /* TODO use dynamic cursor width */
460 adev->ddev->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
461 adev->ddev->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
462
463 if (drm_vblank_init(adev->ddev, adev->dm.display_indexes_num)) {
464 DRM_ERROR(
465 "amdgpu: failed to initialize sw for display support.\n");
466 goto error;
467 }
468
469 DRM_DEBUG_DRIVER("KMS initialized.\n");
470
471 return 0;
472 error:
473 amdgpu_dm_fini(adev);
474
475 return -1;
476 }
477
478 static void amdgpu_dm_fini(struct amdgpu_device *adev)
479 {
480 amdgpu_dm_destroy_drm_device(&adev->dm);
481 /*
482 * TODO: pageflip, vlank interrupt
483 *
484 * amdgpu_dm_irq_fini(adev);
485 */
486
487 if (adev->dm.cgs_device) {
488 amdgpu_cgs_destroy_device(adev->dm.cgs_device);
489 adev->dm.cgs_device = NULL;
490 }
491 if (adev->dm.freesync_module) {
492 mod_freesync_destroy(adev->dm.freesync_module);
493 adev->dm.freesync_module = NULL;
494 }
495 /* DC Destroy TODO: Replace destroy DAL */
496 if (adev->dm.dc)
497 dc_destroy(&adev->dm.dc);
498 return;
499 }
500
501 static int dm_sw_init(void *handle)
502 {
503 return 0;
504 }
505
506 static int dm_sw_fini(void *handle)
507 {
508 return 0;
509 }
510
511 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
512 {
513 struct amdgpu_dm_connector *aconnector;
514 struct drm_connector *connector;
515 int ret = 0;
516
517 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
518
519 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
520 aconnector = to_amdgpu_dm_connector(connector);
521 if (aconnector->dc_link->type == dc_connection_mst_branch) {
522 DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
523 aconnector, aconnector->base.base.id);
524
525 ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
526 if (ret < 0) {
527 DRM_ERROR("DM_MST: Failed to start MST\n");
528 ((struct dc_link *)aconnector->dc_link)->type = dc_connection_single;
529 return ret;
530 }
531 }
532 }
533
534 drm_modeset_unlock(&dev->mode_config.connection_mutex);
535 return ret;
536 }
537
538 static int dm_late_init(void *handle)
539 {
540 struct drm_device *dev = ((struct amdgpu_device *)handle)->ddev;
541
542 return detect_mst_link_for_all_connectors(dev);
543 }
544
545 static void s3_handle_mst(struct drm_device *dev, bool suspend)
546 {
547 struct amdgpu_dm_connector *aconnector;
548 struct drm_connector *connector;
549
550 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
551
552 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
553 aconnector = to_amdgpu_dm_connector(connector);
554 if (aconnector->dc_link->type == dc_connection_mst_branch &&
555 !aconnector->mst_port) {
556
557 if (suspend)
558 drm_dp_mst_topology_mgr_suspend(&aconnector->mst_mgr);
559 else
560 drm_dp_mst_topology_mgr_resume(&aconnector->mst_mgr);
561 }
562 }
563
564 drm_modeset_unlock(&dev->mode_config.connection_mutex);
565 }
566
567 static int dm_hw_init(void *handle)
568 {
569 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
570 /* Create DAL display manager */
571 amdgpu_dm_init(adev);
572 amdgpu_dm_hpd_init(adev);
573
574 return 0;
575 }
576
577 static int dm_hw_fini(void *handle)
578 {
579 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
580
581 amdgpu_dm_hpd_fini(adev);
582
583 amdgpu_dm_irq_fini(adev);
584 amdgpu_dm_fini(adev);
585 return 0;
586 }
587
588 static int dm_suspend(void *handle)
589 {
590 struct amdgpu_device *adev = handle;
591 struct amdgpu_display_manager *dm = &adev->dm;
592 int ret = 0;
593
594 s3_handle_mst(adev->ddev, true);
595
596 amdgpu_dm_irq_suspend(adev);
597
598 WARN_ON(adev->dm.cached_state);
599 adev->dm.cached_state = drm_atomic_helper_suspend(adev->ddev);
600
601 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
602
603 return ret;
604 }
605
606 static struct amdgpu_dm_connector *
607 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
608 struct drm_crtc *crtc)
609 {
610 uint32_t i;
611 struct drm_connector_state *new_con_state;
612 struct drm_connector *connector;
613 struct drm_crtc *crtc_from_state;
614
615 for_each_new_connector_in_state(state, connector, new_con_state, i) {
616 crtc_from_state = new_con_state->crtc;
617
618 if (crtc_from_state == crtc)
619 return to_amdgpu_dm_connector(connector);
620 }
621
622 return NULL;
623 }
624
625 static int dm_resume(void *handle)
626 {
627 struct amdgpu_device *adev = handle;
628 struct amdgpu_display_manager *dm = &adev->dm;
629
630 /* power on hardware */
631 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
632
633 return 0;
634 }
635
636 int amdgpu_dm_display_resume(struct amdgpu_device *adev)
637 {
638 struct drm_device *ddev = adev->ddev;
639 struct amdgpu_display_manager *dm = &adev->dm;
640 struct amdgpu_dm_connector *aconnector;
641 struct drm_connector *connector;
642 struct drm_crtc *crtc;
643 struct drm_crtc_state *new_crtc_state;
644 int ret = 0;
645 int i;
646
647 /* program HPD filter */
648 dc_resume(dm->dc);
649
650 /* On resume we need to rewrite the MSTM control bits to enamble MST*/
651 s3_handle_mst(ddev, false);
652
653 /*
654 * early enable HPD Rx IRQ, should be done before set mode as short
655 * pulse interrupts are used for MST
656 */
657 amdgpu_dm_irq_resume_early(adev);
658
659 /* Do detection*/
660 list_for_each_entry(connector,
661 &ddev->mode_config.connector_list, head) {
662 aconnector = to_amdgpu_dm_connector(connector);
663
664 /*
665 * this is the case when traversing through already created
666 * MST connectors, should be skipped
667 */
668 if (aconnector->mst_port)
669 continue;
670
671 mutex_lock(&aconnector->hpd_lock);
672 dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
673 aconnector->dc_sink = NULL;
674 amdgpu_dm_update_connector_after_detect(aconnector);
675 mutex_unlock(&aconnector->hpd_lock);
676 }
677
678 /* Force mode set in atomic comit */
679 for_each_new_crtc_in_state(adev->dm.cached_state, crtc, new_crtc_state, i)
680 new_crtc_state->active_changed = true;
681
682 ret = drm_atomic_helper_resume(ddev, adev->dm.cached_state);
683
684 drm_atomic_state_put(adev->dm.cached_state);
685 adev->dm.cached_state = NULL;
686
687 amdgpu_dm_irq_resume_late(adev);
688
689 return ret;
690 }
691
692 static const struct amd_ip_funcs amdgpu_dm_funcs = {
693 .name = "dm",
694 .early_init = dm_early_init,
695 .late_init = dm_late_init,
696 .sw_init = dm_sw_init,
697 .sw_fini = dm_sw_fini,
698 .hw_init = dm_hw_init,
699 .hw_fini = dm_hw_fini,
700 .suspend = dm_suspend,
701 .resume = dm_resume,
702 .is_idle = dm_is_idle,
703 .wait_for_idle = dm_wait_for_idle,
704 .check_soft_reset = dm_check_soft_reset,
705 .soft_reset = dm_soft_reset,
706 .set_clockgating_state = dm_set_clockgating_state,
707 .set_powergating_state = dm_set_powergating_state,
708 };
709
710 const struct amdgpu_ip_block_version dm_ip_block =
711 {
712 .type = AMD_IP_BLOCK_TYPE_DCE,
713 .major = 1,
714 .minor = 0,
715 .rev = 0,
716 .funcs = &amdgpu_dm_funcs,
717 };
718
719
720 static struct drm_atomic_state *
721 dm_atomic_state_alloc(struct drm_device *dev)
722 {
723 struct dm_atomic_state *state = kzalloc(sizeof(*state), GFP_KERNEL);
724
725 if (!state)
726 return NULL;
727
728 if (drm_atomic_state_init(dev, &state->base) < 0)
729 goto fail;
730
731 return &state->base;
732
733 fail:
734 kfree(state);
735 return NULL;
736 }
737
738 static void
739 dm_atomic_state_clear(struct drm_atomic_state *state)
740 {
741 struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
742
743 if (dm_state->context) {
744 dc_release_state(dm_state->context);
745 dm_state->context = NULL;
746 }
747
748 drm_atomic_state_default_clear(state);
749 }
750
751 static void
752 dm_atomic_state_alloc_free(struct drm_atomic_state *state)
753 {
754 struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
755 drm_atomic_state_default_release(state);
756 kfree(dm_state);
757 }
758
759 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
760 .fb_create = amdgpu_user_framebuffer_create,
761 .output_poll_changed = amdgpu_output_poll_changed,
762 .atomic_check = amdgpu_dm_atomic_check,
763 .atomic_commit = amdgpu_dm_atomic_commit,
764 .atomic_state_alloc = dm_atomic_state_alloc,
765 .atomic_state_clear = dm_atomic_state_clear,
766 .atomic_state_free = dm_atomic_state_alloc_free
767 };
768
769 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
770 .atomic_commit_tail = amdgpu_dm_atomic_commit_tail
771 };
772
773 static void
774 amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector)
775 {
776 struct drm_connector *connector = &aconnector->base;
777 struct drm_device *dev = connector->dev;
778 struct dc_sink *sink;
779
780 /* MST handled by drm_mst framework */
781 if (aconnector->mst_mgr.mst_state == true)
782 return;
783
784
785 sink = aconnector->dc_link->local_sink;
786
787 /* Edid mgmt connector gets first update only in mode_valid hook and then
788 * the connector sink is set to either fake or physical sink depends on link status.
789 * don't do it here if u are during boot
790 */
791 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
792 && aconnector->dc_em_sink) {
793
794 /* For S3 resume with headless use eml_sink to fake stream
795 * because on resume connecotr->sink is set ti NULL
796 */
797 mutex_lock(&dev->mode_config.mutex);
798
799 if (sink) {
800 if (aconnector->dc_sink) {
801 amdgpu_dm_remove_sink_from_freesync_module(
802 connector);
803 /* retain and release bellow are used for
804 * bump up refcount for sink because the link don't point
805 * to it anymore after disconnect so on next crtc to connector
806 * reshuffle by UMD we will get into unwanted dc_sink release
807 */
808 if (aconnector->dc_sink != aconnector->dc_em_sink)
809 dc_sink_release(aconnector->dc_sink);
810 }
811 aconnector->dc_sink = sink;
812 amdgpu_dm_add_sink_to_freesync_module(
813 connector, aconnector->edid);
814 } else {
815 amdgpu_dm_remove_sink_from_freesync_module(connector);
816 if (!aconnector->dc_sink)
817 aconnector->dc_sink = aconnector->dc_em_sink;
818 else if (aconnector->dc_sink != aconnector->dc_em_sink)
819 dc_sink_retain(aconnector->dc_sink);
820 }
821
822 mutex_unlock(&dev->mode_config.mutex);
823 return;
824 }
825
826 /*
827 * TODO: temporary guard to look for proper fix
828 * if this sink is MST sink, we should not do anything
829 */
830 if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
831 return;
832
833 if (aconnector->dc_sink == sink) {
834 /* We got a DP short pulse (Link Loss, DP CTS, etc...).
835 * Do nothing!! */
836 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
837 aconnector->connector_id);
838 return;
839 }
840
841 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
842 aconnector->connector_id, aconnector->dc_sink, sink);
843
844 mutex_lock(&dev->mode_config.mutex);
845
846 /* 1. Update status of the drm connector
847 * 2. Send an event and let userspace tell us what to do */
848 if (sink) {
849 /* TODO: check if we still need the S3 mode update workaround.
850 * If yes, put it here. */
851 if (aconnector->dc_sink)
852 amdgpu_dm_remove_sink_from_freesync_module(
853 connector);
854
855 aconnector->dc_sink = sink;
856 if (sink->dc_edid.length == 0) {
857 aconnector->edid = NULL;
858 } else {
859 aconnector->edid =
860 (struct edid *) sink->dc_edid.raw_edid;
861
862
863 drm_mode_connector_update_edid_property(connector,
864 aconnector->edid);
865 }
866 amdgpu_dm_add_sink_to_freesync_module(connector, aconnector->edid);
867
868 } else {
869 amdgpu_dm_remove_sink_from_freesync_module(connector);
870 drm_mode_connector_update_edid_property(connector, NULL);
871 aconnector->num_modes = 0;
872 aconnector->dc_sink = NULL;
873 }
874
875 mutex_unlock(&dev->mode_config.mutex);
876 }
877
878 static void handle_hpd_irq(void *param)
879 {
880 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
881 struct drm_connector *connector = &aconnector->base;
882 struct drm_device *dev = connector->dev;
883
884 /* In case of failure or MST no need to update connector status or notify the OS
885 * since (for MST case) MST does this in it's own context.
886 */
887 mutex_lock(&aconnector->hpd_lock);
888
889 if (aconnector->fake_enable)
890 aconnector->fake_enable = false;
891
892 if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
893 amdgpu_dm_update_connector_after_detect(aconnector);
894
895
896 drm_modeset_lock_all(dev);
897 dm_restore_drm_connector_state(dev, connector);
898 drm_modeset_unlock_all(dev);
899
900 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
901 drm_kms_helper_hotplug_event(dev);
902 }
903 mutex_unlock(&aconnector->hpd_lock);
904
905 }
906
907 static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
908 {
909 uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
910 uint8_t dret;
911 bool new_irq_handled = false;
912 int dpcd_addr;
913 int dpcd_bytes_to_read;
914
915 const int max_process_count = 30;
916 int process_count = 0;
917
918 const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
919
920 if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
921 dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
922 /* DPCD 0x200 - 0x201 for downstream IRQ */
923 dpcd_addr = DP_SINK_COUNT;
924 } else {
925 dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
926 /* DPCD 0x2002 - 0x2005 for downstream IRQ */
927 dpcd_addr = DP_SINK_COUNT_ESI;
928 }
929
930 dret = drm_dp_dpcd_read(
931 &aconnector->dm_dp_aux.aux,
932 dpcd_addr,
933 esi,
934 dpcd_bytes_to_read);
935
936 while (dret == dpcd_bytes_to_read &&
937 process_count < max_process_count) {
938 uint8_t retry;
939 dret = 0;
940
941 process_count++;
942
943 DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
944 /* handle HPD short pulse irq */
945 if (aconnector->mst_mgr.mst_state)
946 drm_dp_mst_hpd_irq(
947 &aconnector->mst_mgr,
948 esi,
949 &new_irq_handled);
950
951 if (new_irq_handled) {
952 /* ACK at DPCD to notify down stream */
953 const int ack_dpcd_bytes_to_write =
954 dpcd_bytes_to_read - 1;
955
956 for (retry = 0; retry < 3; retry++) {
957 uint8_t wret;
958
959 wret = drm_dp_dpcd_write(
960 &aconnector->dm_dp_aux.aux,
961 dpcd_addr + 1,
962 &esi[1],
963 ack_dpcd_bytes_to_write);
964 if (wret == ack_dpcd_bytes_to_write)
965 break;
966 }
967
968 /* check if there is new irq to be handle */
969 dret = drm_dp_dpcd_read(
970 &aconnector->dm_dp_aux.aux,
971 dpcd_addr,
972 esi,
973 dpcd_bytes_to_read);
974
975 new_irq_handled = false;
976 } else {
977 break;
978 }
979 }
980
981 if (process_count == max_process_count)
982 DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
983 }
984
985 static void handle_hpd_rx_irq(void *param)
986 {
987 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
988 struct drm_connector *connector = &aconnector->base;
989 struct drm_device *dev = connector->dev;
990 struct dc_link *dc_link = aconnector->dc_link;
991 bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
992
993 /* TODO:Temporary add mutex to protect hpd interrupt not have a gpio
994 * conflict, after implement i2c helper, this mutex should be
995 * retired.
996 */
997 if (dc_link->type != dc_connection_mst_branch)
998 mutex_lock(&aconnector->hpd_lock);
999
1000 if (dc_link_handle_hpd_rx_irq(dc_link, NULL) &&
1001 !is_mst_root_connector) {
1002 /* Downstream Port status changed. */
1003 if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
1004 amdgpu_dm_update_connector_after_detect(aconnector);
1005
1006
1007 drm_modeset_lock_all(dev);
1008 dm_restore_drm_connector_state(dev, connector);
1009 drm_modeset_unlock_all(dev);
1010
1011 drm_kms_helper_hotplug_event(dev);
1012 }
1013 }
1014 if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
1015 (dc_link->type == dc_connection_mst_branch))
1016 dm_handle_hpd_rx_irq(aconnector);
1017
1018 if (dc_link->type != dc_connection_mst_branch)
1019 mutex_unlock(&aconnector->hpd_lock);
1020 }
1021
1022 static void register_hpd_handlers(struct amdgpu_device *adev)
1023 {
1024 struct drm_device *dev = adev->ddev;
1025 struct drm_connector *connector;
1026 struct amdgpu_dm_connector *aconnector;
1027 const struct dc_link *dc_link;
1028 struct dc_interrupt_params int_params = {0};
1029
1030 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
1031 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
1032
1033 list_for_each_entry(connector,
1034 &dev->mode_config.connector_list, head) {
1035
1036 aconnector = to_amdgpu_dm_connector(connector);
1037 dc_link = aconnector->dc_link;
1038
1039 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
1040 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
1041 int_params.irq_source = dc_link->irq_source_hpd;
1042
1043 amdgpu_dm_irq_register_interrupt(adev, &int_params,
1044 handle_hpd_irq,
1045 (void *) aconnector);
1046 }
1047
1048 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
1049
1050 /* Also register for DP short pulse (hpd_rx). */
1051 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
1052 int_params.irq_source = dc_link->irq_source_hpd_rx;
1053
1054 amdgpu_dm_irq_register_interrupt(adev, &int_params,
1055 handle_hpd_rx_irq,
1056 (void *) aconnector);
1057 }
1058 }
1059 }
1060
1061 /* Register IRQ sources and initialize IRQ callbacks */
1062 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
1063 {
1064 struct dc *dc = adev->dm.dc;
1065 struct common_irq_params *c_irq_params;
1066 struct dc_interrupt_params int_params = {0};
1067 int r;
1068 int i;
1069 unsigned client_id = AMDGPU_IH_CLIENTID_LEGACY;
1070
1071 if (adev->asic_type == CHIP_VEGA10 ||
1072 adev->asic_type == CHIP_RAVEN)
1073 client_id = AMDGPU_IH_CLIENTID_DCE;
1074
1075 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
1076 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
1077
1078 /* Actions of amdgpu_irq_add_id():
1079 * 1. Register a set() function with base driver.
1080 * Base driver will call set() function to enable/disable an
1081 * interrupt in DC hardware.
1082 * 2. Register amdgpu_dm_irq_handler().
1083 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
1084 * coming from DC hardware.
1085 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
1086 * for acknowledging and handling. */
1087
1088 /* Use VBLANK interrupt */
1089 for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
1090 r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
1091 if (r) {
1092 DRM_ERROR("Failed to add crtc irq id!\n");
1093 return r;
1094 }
1095
1096 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
1097 int_params.irq_source =
1098 dc_interrupt_to_irq_source(dc, i, 0);
1099
1100 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
1101
1102 c_irq_params->adev = adev;
1103 c_irq_params->irq_src = int_params.irq_source;
1104
1105 amdgpu_dm_irq_register_interrupt(adev, &int_params,
1106 dm_crtc_high_irq, c_irq_params);
1107 }
1108
1109 /* Use GRPH_PFLIP interrupt */
1110 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
1111 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
1112 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
1113 if (r) {
1114 DRM_ERROR("Failed to add page flip irq id!\n");
1115 return r;
1116 }
1117
1118 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
1119 int_params.irq_source =
1120 dc_interrupt_to_irq_source(dc, i, 0);
1121
1122 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
1123
1124 c_irq_params->adev = adev;
1125 c_irq_params->irq_src = int_params.irq_source;
1126
1127 amdgpu_dm_irq_register_interrupt(adev, &int_params,
1128 dm_pflip_high_irq, c_irq_params);
1129
1130 }
1131
1132 /* HPD */
1133 r = amdgpu_irq_add_id(adev, client_id,
1134 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
1135 if (r) {
1136 DRM_ERROR("Failed to add hpd irq id!\n");
1137 return r;
1138 }
1139
1140 register_hpd_handlers(adev);
1141
1142 return 0;
1143 }
1144
1145 #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
1146 /* Register IRQ sources and initialize IRQ callbacks */
1147 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
1148 {
1149 struct dc *dc = adev->dm.dc;
1150 struct common_irq_params *c_irq_params;
1151 struct dc_interrupt_params int_params = {0};
1152 int r;
1153 int i;
1154
1155 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
1156 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
1157
1158 /* Actions of amdgpu_irq_add_id():
1159 * 1. Register a set() function with base driver.
1160 * Base driver will call set() function to enable/disable an
1161 * interrupt in DC hardware.
1162 * 2. Register amdgpu_dm_irq_handler().
1163 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
1164 * coming from DC hardware.
1165 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
1166 * for acknowledging and handling.
1167 * */
1168
1169 /* Use VSTARTUP interrupt */
1170 for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
1171 i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
1172 i++) {
1173 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_DCE, i, &adev->crtc_irq);
1174
1175 if (r) {
1176 DRM_ERROR("Failed to add crtc irq id!\n");
1177 return r;
1178 }
1179
1180 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
1181 int_params.irq_source =
1182 dc_interrupt_to_irq_source(dc, i, 0);
1183
1184 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
1185
1186 c_irq_params->adev = adev;
1187 c_irq_params->irq_src = int_params.irq_source;
1188
1189 amdgpu_dm_irq_register_interrupt(adev, &int_params,
1190 dm_crtc_high_irq, c_irq_params);
1191 }
1192
1193 /* Use GRPH_PFLIP interrupt */
1194 for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
1195 i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
1196 i++) {
1197 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
1198 if (r) {
1199 DRM_ERROR("Failed to add page flip irq id!\n");
1200 return r;
1201 }
1202
1203 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
1204 int_params.irq_source =
1205 dc_interrupt_to_irq_source(dc, i, 0);
1206
1207 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
1208
1209 c_irq_params->adev = adev;
1210 c_irq_params->irq_src = int_params.irq_source;
1211
1212 amdgpu_dm_irq_register_interrupt(adev, &int_params,
1213 dm_pflip_high_irq, c_irq_params);
1214
1215 }
1216
1217 /* HPD */
1218 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
1219 &adev->hpd_irq);
1220 if (r) {
1221 DRM_ERROR("Failed to add hpd irq id!\n");
1222 return r;
1223 }
1224
1225 register_hpd_handlers(adev);
1226
1227 return 0;
1228 }
1229 #endif
1230
1231 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
1232 {
1233 int r;
1234
1235 adev->mode_info.mode_config_initialized = true;
1236
1237 adev->ddev->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
1238 adev->ddev->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
1239
1240 adev->ddev->mode_config.max_width = 16384;
1241 adev->ddev->mode_config.max_height = 16384;
1242
1243 adev->ddev->mode_config.preferred_depth = 24;
1244 adev->ddev->mode_config.prefer_shadow = 1;
1245 /* indicate support of immediate flip */
1246 adev->ddev->mode_config.async_page_flip = true;
1247
1248 adev->ddev->mode_config.fb_base = adev->mc.aper_base;
1249
1250 r = amdgpu_modeset_create_props(adev);
1251 if (r)
1252 return r;
1253
1254 return 0;
1255 }
1256
1257 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
1258 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
1259
1260 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
1261 {
1262 struct amdgpu_display_manager *dm = bl_get_data(bd);
1263
1264 if (dc_link_set_backlight_level(dm->backlight_link,
1265 bd->props.brightness, 0, 0))
1266 return 0;
1267 else
1268 return 1;
1269 }
1270
1271 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
1272 {
1273 return bd->props.brightness;
1274 }
1275
1276 static const struct backlight_ops amdgpu_dm_backlight_ops = {
1277 .get_brightness = amdgpu_dm_backlight_get_brightness,
1278 .update_status = amdgpu_dm_backlight_update_status,
1279 };
1280
1281 static void
1282 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
1283 {
1284 char bl_name[16];
1285 struct backlight_properties props = { 0 };
1286
1287 props.max_brightness = AMDGPU_MAX_BL_LEVEL;
1288 props.type = BACKLIGHT_RAW;
1289
1290 snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
1291 dm->adev->ddev->primary->index);
1292
1293 dm->backlight_dev = backlight_device_register(bl_name,
1294 dm->adev->ddev->dev,
1295 dm,
1296 &amdgpu_dm_backlight_ops,
1297 &props);
1298
1299 if (NULL == dm->backlight_dev)
1300 DRM_ERROR("DM: Backlight registration failed!\n");
1301 else
1302 DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
1303 }
1304
1305 #endif
1306
1307 /* In this architecture, the association
1308 * connector -> encoder -> crtc
1309 * id not really requried. The crtc and connector will hold the
1310 * display_index as an abstraction to use with DAL component
1311 *
1312 * Returns 0 on success
1313 */
1314 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
1315 {
1316 struct amdgpu_display_manager *dm = &adev->dm;
1317 uint32_t i;
1318 struct amdgpu_dm_connector *aconnector = NULL;
1319 struct amdgpu_encoder *aencoder = NULL;
1320 struct amdgpu_mode_info *mode_info = &adev->mode_info;
1321 uint32_t link_cnt;
1322 unsigned long possible_crtcs;
1323
1324 link_cnt = dm->dc->caps.max_links;
1325 if (amdgpu_dm_mode_config_init(dm->adev)) {
1326 DRM_ERROR("DM: Failed to initialize mode config\n");
1327 return -1;
1328 }
1329
1330 for (i = 0; i < dm->dc->caps.max_planes; i++) {
1331 struct amdgpu_plane *plane;
1332
1333 plane = kzalloc(sizeof(struct amdgpu_plane), GFP_KERNEL);
1334 mode_info->planes[i] = plane;
1335
1336 if (!plane) {
1337 DRM_ERROR("KMS: Failed to allocate plane\n");
1338 goto fail_free_planes;
1339 }
1340 plane->base.type = mode_info->plane_type[i];
1341
1342 /*
1343 * HACK: IGT tests expect that each plane can only have one
1344 * one possible CRTC. For now, set one CRTC for each
1345 * plane that is not an underlay, but still allow multiple
1346 * CRTCs for underlay planes.
1347 */
1348 possible_crtcs = 1 << i;
1349 if (i >= dm->dc->caps.max_streams)
1350 possible_crtcs = 0xff;
1351
1352 if (amdgpu_dm_plane_init(dm, mode_info->planes[i], possible_crtcs)) {
1353 DRM_ERROR("KMS: Failed to initialize plane\n");
1354 goto fail_free_planes;
1355 }
1356 }
1357
1358 for (i = 0; i < dm->dc->caps.max_streams; i++)
1359 if (amdgpu_dm_crtc_init(dm, &mode_info->planes[i]->base, i)) {
1360 DRM_ERROR("KMS: Failed to initialize crtc\n");
1361 goto fail_free_planes;
1362 }
1363
1364 dm->display_indexes_num = dm->dc->caps.max_streams;
1365
1366 /* loops over all connectors on the board */
1367 for (i = 0; i < link_cnt; i++) {
1368
1369 if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
1370 DRM_ERROR(
1371 "KMS: Cannot support more than %d display indexes\n",
1372 AMDGPU_DM_MAX_DISPLAY_INDEX);
1373 continue;
1374 }
1375
1376 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
1377 if (!aconnector)
1378 goto fail_free_planes;
1379
1380 aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
1381 if (!aencoder)
1382 goto fail_free_connector;
1383
1384 if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
1385 DRM_ERROR("KMS: Failed to initialize encoder\n");
1386 goto fail_free_encoder;
1387 }
1388
1389 if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
1390 DRM_ERROR("KMS: Failed to initialize connector\n");
1391 goto fail_free_encoder;
1392 }
1393
1394 if (dc_link_detect(dc_get_link_at_index(dm->dc, i),
1395 DETECT_REASON_BOOT))
1396 amdgpu_dm_update_connector_after_detect(aconnector);
1397 }
1398
1399 /* Software is initialized. Now we can register interrupt handlers. */
1400 switch (adev->asic_type) {
1401 case CHIP_BONAIRE:
1402 case CHIP_HAWAII:
1403 case CHIP_KAVERI:
1404 case CHIP_KABINI:
1405 case CHIP_MULLINS:
1406 case CHIP_TONGA:
1407 case CHIP_FIJI:
1408 case CHIP_CARRIZO:
1409 case CHIP_STONEY:
1410 case CHIP_POLARIS11:
1411 case CHIP_POLARIS10:
1412 case CHIP_POLARIS12:
1413 case CHIP_VEGA10:
1414 if (dce110_register_irq_handlers(dm->adev)) {
1415 DRM_ERROR("DM: Failed to initialize IRQ\n");
1416 goto fail_free_encoder;
1417 }
1418 break;
1419 #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
1420 case CHIP_RAVEN:
1421 if (dcn10_register_irq_handlers(dm->adev)) {
1422 DRM_ERROR("DM: Failed to initialize IRQ\n");
1423 goto fail_free_encoder;
1424 }
1425 /*
1426 * Temporary disable until pplib/smu interaction is implemented
1427 */
1428 dm->dc->debug.disable_stutter = true;
1429 break;
1430 #endif
1431 default:
1432 DRM_ERROR("Usupported ASIC type: 0x%X\n", adev->asic_type);
1433 goto fail_free_encoder;
1434 }
1435
1436 drm_mode_config_reset(dm->ddev);
1437
1438 return 0;
1439 fail_free_encoder:
1440 kfree(aencoder);
1441 fail_free_connector:
1442 kfree(aconnector);
1443 fail_free_planes:
1444 for (i = 0; i < dm->dc->caps.max_planes; i++)
1445 kfree(mode_info->planes[i]);
1446 return -1;
1447 }
1448
1449 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
1450 {
1451 drm_mode_config_cleanup(dm->ddev);
1452 return;
1453 }
1454
1455 /******************************************************************************
1456 * amdgpu_display_funcs functions
1457 *****************************************************************************/
1458
1459 /**
1460 * dm_bandwidth_update - program display watermarks
1461 *
1462 * @adev: amdgpu_device pointer
1463 *
1464 * Calculate and program the display watermarks and line buffer allocation.
1465 */
1466 static void dm_bandwidth_update(struct amdgpu_device *adev)
1467 {
1468 /* TODO: implement later */
1469 }
1470
1471 static void dm_set_backlight_level(struct amdgpu_encoder *amdgpu_encoder,
1472 u8 level)
1473 {
1474 /* TODO: translate amdgpu_encoder to display_index and call DAL */
1475 }
1476
1477 static u8 dm_get_backlight_level(struct amdgpu_encoder *amdgpu_encoder)
1478 {
1479 /* TODO: translate amdgpu_encoder to display_index and call DAL */
1480 return 0;
1481 }
1482
1483 static int amdgpu_notify_freesync(struct drm_device *dev, void *data,
1484 struct drm_file *filp)
1485 {
1486 struct mod_freesync_params freesync_params;
1487 uint8_t num_streams;
1488 uint8_t i;
1489
1490 struct amdgpu_device *adev = dev->dev_private;
1491 int r = 0;
1492
1493 /* Get freesync enable flag from DRM */
1494
1495 num_streams = dc_get_current_stream_count(adev->dm.dc);
1496
1497 for (i = 0; i < num_streams; i++) {
1498 struct dc_stream_state *stream;
1499 stream = dc_get_stream_at_index(adev->dm.dc, i);
1500
1501 mod_freesync_update_state(adev->dm.freesync_module,
1502 &stream, 1, &freesync_params);
1503 }
1504
1505 return r;
1506 }
1507
1508 static const struct amdgpu_display_funcs dm_display_funcs = {
1509 .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
1510 .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
1511 .vblank_wait = NULL,
1512 .backlight_set_level =
1513 dm_set_backlight_level,/* called unconditionally */
1514 .backlight_get_level =
1515 dm_get_backlight_level,/* called unconditionally */
1516 .hpd_sense = NULL,/* called unconditionally */
1517 .hpd_set_polarity = NULL, /* called unconditionally */
1518 .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
1519 .page_flip_get_scanoutpos =
1520 dm_crtc_get_scanoutpos,/* called unconditionally */
1521 .add_encoder = NULL, /* VBIOS parsing. DAL does it. */
1522 .add_connector = NULL, /* VBIOS parsing. DAL does it. */
1523 .notify_freesync = amdgpu_notify_freesync,
1524
1525 };
1526
1527 #if defined(CONFIG_DEBUG_KERNEL_DC)
1528
1529 static ssize_t s3_debug_store(struct device *device,
1530 struct device_attribute *attr,
1531 const char *buf,
1532 size_t count)
1533 {
1534 int ret;
1535 int s3_state;
1536 struct pci_dev *pdev = to_pci_dev(device);
1537 struct drm_device *drm_dev = pci_get_drvdata(pdev);
1538 struct amdgpu_device *adev = drm_dev->dev_private;
1539
1540 ret = kstrtoint(buf, 0, &s3_state);
1541
1542 if (ret == 0) {
1543 if (s3_state) {
1544 dm_resume(adev);
1545 amdgpu_dm_display_resume(adev);
1546 drm_kms_helper_hotplug_event(adev->ddev);
1547 } else
1548 dm_suspend(adev);
1549 }
1550
1551 return ret == 0 ? count : 0;
1552 }
1553
1554 DEVICE_ATTR_WO(s3_debug);
1555
1556 #endif
1557
1558 static int dm_early_init(void *handle)
1559 {
1560 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1561
1562 adev->ddev->driver->driver_features |= DRIVER_ATOMIC;
1563 amdgpu_dm_set_irq_funcs(adev);
1564
1565 switch (adev->asic_type) {
1566 case CHIP_BONAIRE:
1567 case CHIP_HAWAII:
1568 adev->mode_info.num_crtc = 6;
1569 adev->mode_info.num_hpd = 6;
1570 adev->mode_info.num_dig = 6;
1571 adev->mode_info.plane_type = dm_plane_type_default;
1572 break;
1573 case CHIP_KAVERI:
1574 adev->mode_info.num_crtc = 4;
1575 adev->mode_info.num_hpd = 6;
1576 adev->mode_info.num_dig = 7;
1577 adev->mode_info.plane_type = dm_plane_type_default;
1578 break;
1579 case CHIP_KABINI:
1580 case CHIP_MULLINS:
1581 adev->mode_info.num_crtc = 2;
1582 adev->mode_info.num_hpd = 6;
1583 adev->mode_info.num_dig = 6;
1584 adev->mode_info.plane_type = dm_plane_type_default;
1585 break;
1586 case CHIP_FIJI:
1587 case CHIP_TONGA:
1588 adev->mode_info.num_crtc = 6;
1589 adev->mode_info.num_hpd = 6;
1590 adev->mode_info.num_dig = 7;
1591 adev->mode_info.plane_type = dm_plane_type_default;
1592 break;
1593 case CHIP_CARRIZO:
1594 adev->mode_info.num_crtc = 3;
1595 adev->mode_info.num_hpd = 6;
1596 adev->mode_info.num_dig = 9;
1597 adev->mode_info.plane_type = dm_plane_type_carizzo;
1598 break;
1599 case CHIP_STONEY:
1600 adev->mode_info.num_crtc = 2;
1601 adev->mode_info.num_hpd = 6;
1602 adev->mode_info.num_dig = 9;
1603 adev->mode_info.plane_type = dm_plane_type_stoney;
1604 break;
1605 case CHIP_POLARIS11:
1606 case CHIP_POLARIS12:
1607 adev->mode_info.num_crtc = 5;
1608 adev->mode_info.num_hpd = 5;
1609 adev->mode_info.num_dig = 5;
1610 adev->mode_info.plane_type = dm_plane_type_default;
1611 break;
1612 case CHIP_POLARIS10:
1613 adev->mode_info.num_crtc = 6;
1614 adev->mode_info.num_hpd = 6;
1615 adev->mode_info.num_dig = 6;
1616 adev->mode_info.plane_type = dm_plane_type_default;
1617 break;
1618 case CHIP_VEGA10:
1619 adev->mode_info.num_crtc = 6;
1620 adev->mode_info.num_hpd = 6;
1621 adev->mode_info.num_dig = 6;
1622 adev->mode_info.plane_type = dm_plane_type_default;
1623 break;
1624 #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
1625 case CHIP_RAVEN:
1626 adev->mode_info.num_crtc = 4;
1627 adev->mode_info.num_hpd = 4;
1628 adev->mode_info.num_dig = 4;
1629 adev->mode_info.plane_type = dm_plane_type_default;
1630 break;
1631 #endif
1632 default:
1633 DRM_ERROR("Usupported ASIC type: 0x%X\n", adev->asic_type);
1634 return -EINVAL;
1635 }
1636
1637 if (adev->mode_info.funcs == NULL)
1638 adev->mode_info.funcs = &dm_display_funcs;
1639
1640 /* Note: Do NOT change adev->audio_endpt_rreg and
1641 * adev->audio_endpt_wreg because they are initialised in
1642 * amdgpu_device_init() */
1643 #if defined(CONFIG_DEBUG_KERNEL_DC)
1644 device_create_file(
1645 adev->ddev->dev,
1646 &dev_attr_s3_debug);
1647 #endif
1648
1649 return 0;
1650 }
1651
1652 struct dm_connector_state {
1653 struct drm_connector_state base;
1654
1655 enum amdgpu_rmx_type scaling;
1656 uint8_t underscan_vborder;
1657 uint8_t underscan_hborder;
1658 bool underscan_enable;
1659 };
1660
1661 #define to_dm_connector_state(x)\
1662 container_of((x), struct dm_connector_state, base)
1663
1664 static bool modeset_required(struct drm_crtc_state *crtc_state,
1665 struct dc_stream_state *new_stream,
1666 struct dc_stream_state *old_stream)
1667 {
1668 if (!drm_atomic_crtc_needs_modeset(crtc_state))
1669 return false;
1670
1671 if (!crtc_state->enable)
1672 return false;
1673
1674 return crtc_state->active;
1675 }
1676
1677 static bool modereset_required(struct drm_crtc_state *crtc_state)
1678 {
1679 if (!drm_atomic_crtc_needs_modeset(crtc_state))
1680 return false;
1681
1682 return !crtc_state->enable || !crtc_state->active;
1683 }
1684
1685 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
1686 {
1687 drm_encoder_cleanup(encoder);
1688 kfree(encoder);
1689 }
1690
1691 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
1692 .destroy = amdgpu_dm_encoder_destroy,
1693 };
1694
1695 static bool fill_rects_from_plane_state(const struct drm_plane_state *state,
1696 struct dc_plane_state *plane_state)
1697 {
1698 plane_state->src_rect.x = state->src_x >> 16;
1699 plane_state->src_rect.y = state->src_y >> 16;
1700 /*we ignore for now mantissa and do not to deal with floating pixels :(*/
1701 plane_state->src_rect.width = state->src_w >> 16;
1702
1703 if (plane_state->src_rect.width == 0)
1704 return false;
1705
1706 plane_state->src_rect.height = state->src_h >> 16;
1707 if (plane_state->src_rect.height == 0)
1708 return false;
1709
1710 plane_state->dst_rect.x = state->crtc_x;
1711 plane_state->dst_rect.y = state->crtc_y;
1712
1713 if (state->crtc_w == 0)
1714 return false;
1715
1716 plane_state->dst_rect.width = state->crtc_w;
1717
1718 if (state->crtc_h == 0)
1719 return false;
1720
1721 plane_state->dst_rect.height = state->crtc_h;
1722
1723 plane_state->clip_rect = plane_state->dst_rect;
1724
1725 switch (state->rotation & DRM_MODE_ROTATE_MASK) {
1726 case DRM_MODE_ROTATE_0:
1727 plane_state->rotation = ROTATION_ANGLE_0;
1728 break;
1729 case DRM_MODE_ROTATE_90:
1730 plane_state->rotation = ROTATION_ANGLE_90;
1731 break;
1732 case DRM_MODE_ROTATE_180:
1733 plane_state->rotation = ROTATION_ANGLE_180;
1734 break;
1735 case DRM_MODE_ROTATE_270:
1736 plane_state->rotation = ROTATION_ANGLE_270;
1737 break;
1738 default:
1739 plane_state->rotation = ROTATION_ANGLE_0;
1740 break;
1741 }
1742
1743 return true;
1744 }
1745 static int get_fb_info(const struct amdgpu_framebuffer *amdgpu_fb,
1746 uint64_t *tiling_flags,
1747 uint64_t *fb_location)
1748 {
1749 struct amdgpu_bo *rbo = gem_to_amdgpu_bo(amdgpu_fb->obj);
1750 int r = amdgpu_bo_reserve(rbo, false);
1751
1752 if (unlikely(r)) {
1753 // Don't show error msg. when return -ERESTARTSYS
1754 if (r != -ERESTARTSYS)
1755 DRM_ERROR("Unable to reserve buffer: %d\n", r);
1756 return r;
1757 }
1758
1759 if (fb_location)
1760 *fb_location = amdgpu_bo_gpu_offset(rbo);
1761
1762 if (tiling_flags)
1763 amdgpu_bo_get_tiling_flags(rbo, tiling_flags);
1764
1765 amdgpu_bo_unreserve(rbo);
1766
1767 return r;
1768 }
1769
1770 static int fill_plane_attributes_from_fb(struct amdgpu_device *adev,
1771 struct dc_plane_state *plane_state,
1772 const struct amdgpu_framebuffer *amdgpu_fb,
1773 bool addReq)
1774 {
1775 uint64_t tiling_flags;
1776 uint64_t fb_location = 0;
1777 uint64_t chroma_addr = 0;
1778 unsigned int awidth;
1779 const struct drm_framebuffer *fb = &amdgpu_fb->base;
1780 int ret = 0;
1781 struct drm_format_name_buf format_name;
1782
1783 ret = get_fb_info(
1784 amdgpu_fb,
1785 &tiling_flags,
1786 addReq == true ? &fb_location:NULL);
1787
1788 if (ret)
1789 return ret;
1790
1791 switch (fb->format->format) {
1792 case DRM_FORMAT_C8:
1793 plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
1794 break;
1795 case DRM_FORMAT_RGB565:
1796 plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
1797 break;
1798 case DRM_FORMAT_XRGB8888:
1799 case DRM_FORMAT_ARGB8888:
1800 plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
1801 break;
1802 case DRM_FORMAT_XRGB2101010:
1803 case DRM_FORMAT_ARGB2101010:
1804 plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
1805 break;
1806 case DRM_FORMAT_XBGR2101010:
1807 case DRM_FORMAT_ABGR2101010:
1808 plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
1809 break;
1810 case DRM_FORMAT_NV21:
1811 plane_state->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
1812 break;
1813 case DRM_FORMAT_NV12:
1814 plane_state->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
1815 break;
1816 default:
1817 DRM_ERROR("Unsupported screen format %s\n",
1818 drm_get_format_name(fb->format->format, &format_name));
1819 return -EINVAL;
1820 }
1821
1822 if (plane_state->format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
1823 plane_state->address.type = PLN_ADDR_TYPE_GRAPHICS;
1824 plane_state->address.grph.addr.low_part = lower_32_bits(fb_location);
1825 plane_state->address.grph.addr.high_part = upper_32_bits(fb_location);
1826 plane_state->plane_size.grph.surface_size.x = 0;
1827 plane_state->plane_size.grph.surface_size.y = 0;
1828 plane_state->plane_size.grph.surface_size.width = fb->width;
1829 plane_state->plane_size.grph.surface_size.height = fb->height;
1830 plane_state->plane_size.grph.surface_pitch =
1831 fb->pitches[0] / fb->format->cpp[0];
1832 /* TODO: unhardcode */
1833 plane_state->color_space = COLOR_SPACE_SRGB;
1834
1835 } else {
1836 awidth = ALIGN(fb->width, 64);
1837 plane_state->address.type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
1838 plane_state->address.video_progressive.luma_addr.low_part
1839 = lower_32_bits(fb_location);
1840 plane_state->address.video_progressive.luma_addr.high_part
1841 = upper_32_bits(fb_location);
1842 chroma_addr = fb_location + (u64)(awidth * fb->height);
1843 plane_state->address.video_progressive.chroma_addr.low_part
1844 = lower_32_bits(chroma_addr);
1845 plane_state->address.video_progressive.chroma_addr.high_part
1846 = upper_32_bits(chroma_addr);
1847 plane_state->plane_size.video.luma_size.x = 0;
1848 plane_state->plane_size.video.luma_size.y = 0;
1849 plane_state->plane_size.video.luma_size.width = awidth;
1850 plane_state->plane_size.video.luma_size.height = fb->height;
1851 /* TODO: unhardcode */
1852 plane_state->plane_size.video.luma_pitch = awidth;
1853
1854 plane_state->plane_size.video.chroma_size.x = 0;
1855 plane_state->plane_size.video.chroma_size.y = 0;
1856 plane_state->plane_size.video.chroma_size.width = awidth;
1857 plane_state->plane_size.video.chroma_size.height = fb->height;
1858 plane_state->plane_size.video.chroma_pitch = awidth / 2;
1859
1860 /* TODO: unhardcode */
1861 plane_state->color_space = COLOR_SPACE_YCBCR709;
1862 }
1863
1864 memset(&plane_state->tiling_info, 0, sizeof(plane_state->tiling_info));
1865
1866 /* Fill GFX8 params */
1867 if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
1868 unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
1869
1870 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
1871 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
1872 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
1873 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
1874 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
1875
1876 /* XXX fix me for VI */
1877 plane_state->tiling_info.gfx8.num_banks = num_banks;
1878 plane_state->tiling_info.gfx8.array_mode =
1879 DC_ARRAY_2D_TILED_THIN1;
1880 plane_state->tiling_info.gfx8.tile_split = tile_split;
1881 plane_state->tiling_info.gfx8.bank_width = bankw;
1882 plane_state->tiling_info.gfx8.bank_height = bankh;
1883 plane_state->tiling_info.gfx8.tile_aspect = mtaspect;
1884 plane_state->tiling_info.gfx8.tile_mode =
1885 DC_ADDR_SURF_MICRO_TILING_DISPLAY;
1886 } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
1887 == DC_ARRAY_1D_TILED_THIN1) {
1888 plane_state->tiling_info.gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
1889 }
1890
1891 plane_state->tiling_info.gfx8.pipe_config =
1892 AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
1893
1894 if (adev->asic_type == CHIP_VEGA10 ||
1895 adev->asic_type == CHIP_RAVEN) {
1896 /* Fill GFX9 params */
1897 plane_state->tiling_info.gfx9.num_pipes =
1898 adev->gfx.config.gb_addr_config_fields.num_pipes;
1899 plane_state->tiling_info.gfx9.num_banks =
1900 adev->gfx.config.gb_addr_config_fields.num_banks;
1901 plane_state->tiling_info.gfx9.pipe_interleave =
1902 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
1903 plane_state->tiling_info.gfx9.num_shader_engines =
1904 adev->gfx.config.gb_addr_config_fields.num_se;
1905 plane_state->tiling_info.gfx9.max_compressed_frags =
1906 adev->gfx.config.gb_addr_config_fields.max_compress_frags;
1907 plane_state->tiling_info.gfx9.num_rb_per_se =
1908 adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
1909 plane_state->tiling_info.gfx9.swizzle =
1910 AMDGPU_TILING_GET(tiling_flags, SWIZZLE_MODE);
1911 plane_state->tiling_info.gfx9.shaderEnable = 1;
1912 }
1913
1914 plane_state->visible = true;
1915 plane_state->scaling_quality.h_taps_c = 0;
1916 plane_state->scaling_quality.v_taps_c = 0;
1917
1918 /* is this needed? is plane_state zeroed at allocation? */
1919 plane_state->scaling_quality.h_taps = 0;
1920 plane_state->scaling_quality.v_taps = 0;
1921 plane_state->stereo_format = PLANE_STEREO_FORMAT_NONE;
1922
1923 return ret;
1924
1925 }
1926
1927 static void fill_gamma_from_crtc_state(const struct drm_crtc_state *crtc_state,
1928 struct dc_plane_state *plane_state)
1929 {
1930 int i;
1931 struct dc_gamma *gamma;
1932 struct drm_color_lut *lut =
1933 (struct drm_color_lut *) crtc_state->gamma_lut->data;
1934
1935 gamma = dc_create_gamma();
1936
1937 if (gamma == NULL) {
1938 WARN_ON(1);
1939 return;
1940 }
1941
1942 gamma->type = GAMMA_RGB_256;
1943 gamma->num_entries = GAMMA_RGB_256_ENTRIES;
1944 for (i = 0; i < GAMMA_RGB_256_ENTRIES; i++) {
1945 gamma->entries.red[i] = dal_fixed31_32_from_int(lut[i].red);
1946 gamma->entries.green[i] = dal_fixed31_32_from_int(lut[i].green);
1947 gamma->entries.blue[i] = dal_fixed31_32_from_int(lut[i].blue);
1948 }
1949
1950 plane_state->gamma_correction = gamma;
1951 }
1952
1953 static int fill_plane_attributes(struct amdgpu_device *adev,
1954 struct dc_plane_state *dc_plane_state,
1955 struct drm_plane_state *plane_state,
1956 struct drm_crtc_state *crtc_state,
1957 bool addrReq)
1958 {
1959 const struct amdgpu_framebuffer *amdgpu_fb =
1960 to_amdgpu_framebuffer(plane_state->fb);
1961 const struct drm_crtc *crtc = plane_state->crtc;
1962 struct dc_transfer_func *input_tf;
1963 int ret = 0;
1964
1965 if (!fill_rects_from_plane_state(plane_state, dc_plane_state))
1966 return -EINVAL;
1967
1968 ret = fill_plane_attributes_from_fb(
1969 crtc->dev->dev_private,
1970 dc_plane_state,
1971 amdgpu_fb,
1972 addrReq);
1973
1974 if (ret)
1975 return ret;
1976
1977 input_tf = dc_create_transfer_func();
1978
1979 if (input_tf == NULL)
1980 return -ENOMEM;
1981
1982 input_tf->type = TF_TYPE_PREDEFINED;
1983 input_tf->tf = TRANSFER_FUNCTION_SRGB;
1984
1985 dc_plane_state->in_transfer_func = input_tf;
1986
1987 /* In case of gamma set, update gamma value */
1988 if (crtc_state->gamma_lut)
1989 fill_gamma_from_crtc_state(crtc_state, dc_plane_state);
1990
1991 return ret;
1992 }
1993
1994 /*****************************************************************************/
1995
1996 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
1997 const struct dm_connector_state *dm_state,
1998 struct dc_stream_state *stream)
1999 {
2000 enum amdgpu_rmx_type rmx_type;
2001
2002 struct rect src = { 0 }; /* viewport in composition space*/
2003 struct rect dst = { 0 }; /* stream addressable area */
2004
2005 /* no mode. nothing to be done */
2006 if (!mode)
2007 return;
2008
2009 /* Full screen scaling by default */
2010 src.width = mode->hdisplay;
2011 src.height = mode->vdisplay;
2012 dst.width = stream->timing.h_addressable;
2013 dst.height = stream->timing.v_addressable;
2014
2015 rmx_type = dm_state->scaling;
2016 if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
2017 if (src.width * dst.height <
2018 src.height * dst.width) {
2019 /* height needs less upscaling/more downscaling */
2020 dst.width = src.width *
2021 dst.height / src.height;
2022 } else {
2023 /* width needs less upscaling/more downscaling */
2024 dst.height = src.height *
2025 dst.width / src.width;
2026 }
2027 } else if (rmx_type == RMX_CENTER) {
2028 dst = src;
2029 }
2030
2031 dst.x = (stream->timing.h_addressable - dst.width) / 2;
2032 dst.y = (stream->timing.v_addressable - dst.height) / 2;
2033
2034 if (dm_state->underscan_enable) {
2035 dst.x += dm_state->underscan_hborder / 2;
2036 dst.y += dm_state->underscan_vborder / 2;
2037 dst.width -= dm_state->underscan_hborder;
2038 dst.height -= dm_state->underscan_vborder;
2039 }
2040
2041 stream->src = src;
2042 stream->dst = dst;
2043
2044 DRM_DEBUG_DRIVER("Destination Rectangle x:%d y:%d width:%d height:%d\n",
2045 dst.x, dst.y, dst.width, dst.height);
2046
2047 }
2048
2049 static enum dc_color_depth
2050 convert_color_depth_from_display_info(const struct drm_connector *connector)
2051 {
2052 uint32_t bpc = connector->display_info.bpc;
2053
2054 /* Limited color depth to 8bit
2055 * TODO: Still need to handle deep color
2056 */
2057 if (bpc > 8)
2058 bpc = 8;
2059
2060 switch (bpc) {
2061 case 0:
2062 /* Temporary Work around, DRM don't parse color depth for
2063 * EDID revision before 1.4
2064 * TODO: Fix edid parsing
2065 */
2066 return COLOR_DEPTH_888;
2067 case 6:
2068 return COLOR_DEPTH_666;
2069 case 8:
2070 return COLOR_DEPTH_888;
2071 case 10:
2072 return COLOR_DEPTH_101010;
2073 case 12:
2074 return COLOR_DEPTH_121212;
2075 case 14:
2076 return COLOR_DEPTH_141414;
2077 case 16:
2078 return COLOR_DEPTH_161616;
2079 default:
2080 return COLOR_DEPTH_UNDEFINED;
2081 }
2082 }
2083
2084 static enum dc_aspect_ratio
2085 get_aspect_ratio(const struct drm_display_mode *mode_in)
2086 {
2087 int32_t width = mode_in->crtc_hdisplay * 9;
2088 int32_t height = mode_in->crtc_vdisplay * 16;
2089
2090 if ((width - height) < 10 && (width - height) > -10)
2091 return ASPECT_RATIO_16_9;
2092 else
2093 return ASPECT_RATIO_4_3;
2094 }
2095
2096 static enum dc_color_space
2097 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
2098 {
2099 enum dc_color_space color_space = COLOR_SPACE_SRGB;
2100
2101 switch (dc_crtc_timing->pixel_encoding) {
2102 case PIXEL_ENCODING_YCBCR422:
2103 case PIXEL_ENCODING_YCBCR444:
2104 case PIXEL_ENCODING_YCBCR420:
2105 {
2106 /*
2107 * 27030khz is the separation point between HDTV and SDTV
2108 * according to HDMI spec, we use YCbCr709 and YCbCr601
2109 * respectively
2110 */
2111 if (dc_crtc_timing->pix_clk_khz > 27030) {
2112 if (dc_crtc_timing->flags.Y_ONLY)
2113 color_space =
2114 COLOR_SPACE_YCBCR709_LIMITED;
2115 else
2116 color_space = COLOR_SPACE_YCBCR709;
2117 } else {
2118 if (dc_crtc_timing->flags.Y_ONLY)
2119 color_space =
2120 COLOR_SPACE_YCBCR601_LIMITED;
2121 else
2122 color_space = COLOR_SPACE_YCBCR601;
2123 }
2124
2125 }
2126 break;
2127 case PIXEL_ENCODING_RGB:
2128 color_space = COLOR_SPACE_SRGB;
2129 break;
2130
2131 default:
2132 WARN_ON(1);
2133 break;
2134 }
2135
2136 return color_space;
2137 }
2138
2139 /*****************************************************************************/
2140
2141 static void
2142 fill_stream_properties_from_drm_display_mode(struct dc_stream_state *stream,
2143 const struct drm_display_mode *mode_in,
2144 const struct drm_connector *connector)
2145 {
2146 struct dc_crtc_timing *timing_out = &stream->timing;
2147
2148 memset(timing_out, 0, sizeof(struct dc_crtc_timing));
2149
2150 timing_out->h_border_left = 0;
2151 timing_out->h_border_right = 0;
2152 timing_out->v_border_top = 0;
2153 timing_out->v_border_bottom = 0;
2154 /* TODO: un-hardcode */
2155
2156 if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
2157 && stream->sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A)
2158 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
2159 else
2160 timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
2161
2162 timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
2163 timing_out->display_color_depth = convert_color_depth_from_display_info(
2164 connector);
2165 timing_out->scan_type = SCANNING_TYPE_NODATA;
2166 timing_out->hdmi_vic = 0;
2167 timing_out->vic = drm_match_cea_mode(mode_in);
2168
2169 timing_out->h_addressable = mode_in->crtc_hdisplay;
2170 timing_out->h_total = mode_in->crtc_htotal;
2171 timing_out->h_sync_width =
2172 mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
2173 timing_out->h_front_porch =
2174 mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
2175 timing_out->v_total = mode_in->crtc_vtotal;
2176 timing_out->v_addressable = mode_in->crtc_vdisplay;
2177 timing_out->v_front_porch =
2178 mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
2179 timing_out->v_sync_width =
2180 mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
2181 timing_out->pix_clk_khz = mode_in->crtc_clock;
2182 timing_out->aspect_ratio = get_aspect_ratio(mode_in);
2183 if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
2184 timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
2185 if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
2186 timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
2187
2188 stream->output_color_space = get_output_color_space(timing_out);
2189
2190 {
2191 struct dc_transfer_func *tf = dc_create_transfer_func();
2192
2193 tf->type = TF_TYPE_PREDEFINED;
2194 tf->tf = TRANSFER_FUNCTION_SRGB;
2195 stream->out_transfer_func = tf;
2196 }
2197 }
2198
2199 static void fill_audio_info(struct audio_info *audio_info,
2200 const struct drm_connector *drm_connector,
2201 const struct dc_sink *dc_sink)
2202 {
2203 int i = 0;
2204 int cea_revision = 0;
2205 const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
2206
2207 audio_info->manufacture_id = edid_caps->manufacturer_id;
2208 audio_info->product_id = edid_caps->product_id;
2209
2210 cea_revision = drm_connector->display_info.cea_rev;
2211
2212 strncpy(audio_info->display_name,
2213 edid_caps->display_name,
2214 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS - 1);
2215
2216 if (cea_revision >= 3) {
2217 audio_info->mode_count = edid_caps->audio_mode_count;
2218
2219 for (i = 0; i < audio_info->mode_count; ++i) {
2220 audio_info->modes[i].format_code =
2221 (enum audio_format_code)
2222 (edid_caps->audio_modes[i].format_code);
2223 audio_info->modes[i].channel_count =
2224 edid_caps->audio_modes[i].channel_count;
2225 audio_info->modes[i].sample_rates.all =
2226 edid_caps->audio_modes[i].sample_rate;
2227 audio_info->modes[i].sample_size =
2228 edid_caps->audio_modes[i].sample_size;
2229 }
2230 }
2231
2232 audio_info->flags.all = edid_caps->speaker_flags;
2233
2234 /* TODO: We only check for the progressive mode, check for interlace mode too */
2235 if (drm_connector->latency_present[0]) {
2236 audio_info->video_latency = drm_connector->video_latency[0];
2237 audio_info->audio_latency = drm_connector->audio_latency[0];
2238 }
2239
2240 /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
2241
2242 }
2243
2244 static void
2245 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
2246 struct drm_display_mode *dst_mode)
2247 {
2248 dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
2249 dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
2250 dst_mode->crtc_clock = src_mode->crtc_clock;
2251 dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
2252 dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
2253 dst_mode->crtc_hsync_start = src_mode->crtc_hsync_start;
2254 dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
2255 dst_mode->crtc_htotal = src_mode->crtc_htotal;
2256 dst_mode->crtc_hskew = src_mode->crtc_hskew;
2257 dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
2258 dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
2259 dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
2260 dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
2261 dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
2262 }
2263
2264 static void
2265 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
2266 const struct drm_display_mode *native_mode,
2267 bool scale_enabled)
2268 {
2269 if (scale_enabled) {
2270 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
2271 } else if (native_mode->clock == drm_mode->clock &&
2272 native_mode->htotal == drm_mode->htotal &&
2273 native_mode->vtotal == drm_mode->vtotal) {
2274 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
2275 } else {
2276 /* no scaling nor amdgpu inserted, no need to patch */
2277 }
2278 }
2279
2280 static void create_fake_sink(struct amdgpu_dm_connector *aconnector)
2281 {
2282 struct dc_sink *sink = NULL;
2283 struct dc_sink_init_data sink_init_data = { 0 };
2284
2285 sink_init_data.link = aconnector->dc_link;
2286 sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
2287
2288 sink = dc_sink_create(&sink_init_data);
2289 if (!sink)
2290 DRM_ERROR("Failed to create sink!\n");
2291
2292 sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
2293 aconnector->fake_enable = true;
2294
2295 aconnector->dc_sink = sink;
2296 aconnector->dc_link->local_sink = sink;
2297 }
2298
2299 static struct dc_stream_state *
2300 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
2301 const struct drm_display_mode *drm_mode,
2302 const struct dm_connector_state *dm_state)
2303 {
2304 struct drm_display_mode *preferred_mode = NULL;
2305 const struct drm_connector *drm_connector;
2306 struct dc_stream_state *stream = NULL;
2307 struct drm_display_mode mode = *drm_mode;
2308 bool native_mode_found = false;
2309
2310 if (aconnector == NULL) {
2311 DRM_ERROR("aconnector is NULL!\n");
2312 goto drm_connector_null;
2313 }
2314
2315 if (dm_state == NULL) {
2316 DRM_ERROR("dm_state is NULL!\n");
2317 goto dm_state_null;
2318 }
2319
2320 drm_connector = &aconnector->base;
2321
2322 if (!aconnector->dc_sink) {
2323 /*
2324 * Exclude MST from creating fake_sink
2325 * TODO: need to enable MST into fake_sink feature
2326 */
2327 if (aconnector->mst_port)
2328 goto stream_create_fail;
2329
2330 create_fake_sink(aconnector);
2331 }
2332
2333 stream = dc_create_stream_for_sink(aconnector->dc_sink);
2334
2335 if (stream == NULL) {
2336 DRM_ERROR("Failed to create stream for sink!\n");
2337 goto stream_create_fail;
2338 }
2339
2340 list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
2341 /* Search for preferred mode */
2342 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
2343 native_mode_found = true;
2344 break;
2345 }
2346 }
2347 if (!native_mode_found)
2348 preferred_mode = list_first_entry_or_null(
2349 &aconnector->base.modes,
2350 struct drm_display_mode,
2351 head);
2352
2353 if (preferred_mode == NULL) {
2354 /* This may not be an error, the use case is when we we have no
2355 * usermode calls to reset and set mode upon hotplug. In this
2356 * case, we call set mode ourselves to restore the previous mode
2357 * and the modelist may not be filled in in time.
2358 */
2359 DRM_DEBUG_DRIVER("No preferred mode found\n");
2360 } else {
2361 decide_crtc_timing_for_drm_display_mode(
2362 &mode, preferred_mode,
2363 dm_state->scaling != RMX_OFF);
2364 }
2365
2366 fill_stream_properties_from_drm_display_mode(stream,
2367 &mode, &aconnector->base);
2368 update_stream_scaling_settings(&mode, dm_state, stream);
2369
2370 fill_audio_info(
2371 &stream->audio_info,
2372 drm_connector,
2373 aconnector->dc_sink);
2374
2375 stream_create_fail:
2376 dm_state_null:
2377 drm_connector_null:
2378 return stream;
2379 }
2380
2381 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
2382 {
2383 drm_crtc_cleanup(crtc);
2384 kfree(crtc);
2385 }
2386
2387 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
2388 struct drm_crtc_state *state)
2389 {
2390 struct dm_crtc_state *cur = to_dm_crtc_state(state);
2391
2392 /* TODO Destroy dc_stream objects are stream object is flattened */
2393 if (cur->stream)
2394 dc_stream_release(cur->stream);
2395
2396
2397 __drm_atomic_helper_crtc_destroy_state(state);
2398
2399
2400 kfree(state);
2401 }
2402
2403 static void dm_crtc_reset_state(struct drm_crtc *crtc)
2404 {
2405 struct dm_crtc_state *state;
2406
2407 if (crtc->state)
2408 dm_crtc_destroy_state(crtc, crtc->state);
2409
2410 state = kzalloc(sizeof(*state), GFP_KERNEL);
2411 if (WARN_ON(!state))
2412 return;
2413
2414 crtc->state = &state->base;
2415 crtc->state->crtc = crtc;
2416
2417 }
2418
2419 static struct drm_crtc_state *
2420 dm_crtc_duplicate_state(struct drm_crtc *crtc)
2421 {
2422 struct dm_crtc_state *state, *cur;
2423
2424 cur = to_dm_crtc_state(crtc->state);
2425
2426 if (WARN_ON(!crtc->state))
2427 return NULL;
2428
2429 state = kzalloc(sizeof(*state), GFP_KERNEL);
2430
2431 __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
2432
2433 if (cur->stream) {
2434 state->stream = cur->stream;
2435 dc_stream_retain(state->stream);
2436 }
2437
2438 /* TODO Duplicate dc_stream after objects are stream object is flattened */
2439
2440 return &state->base;
2441 }
2442
2443 /* Implemented only the options currently availible for the driver */
2444 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
2445 .reset = dm_crtc_reset_state,
2446 .destroy = amdgpu_dm_crtc_destroy,
2447 .gamma_set = drm_atomic_helper_legacy_gamma_set,
2448 .set_config = drm_atomic_helper_set_config,
2449 .page_flip = drm_atomic_helper_page_flip,
2450 .atomic_duplicate_state = dm_crtc_duplicate_state,
2451 .atomic_destroy_state = dm_crtc_destroy_state,
2452 };
2453
2454 static enum drm_connector_status
2455 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
2456 {
2457 bool connected;
2458 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
2459
2460 /* Notes:
2461 * 1. This interface is NOT called in context of HPD irq.
2462 * 2. This interface *is called* in context of user-mode ioctl. Which
2463 * makes it a bad place for *any* MST-related activit. */
2464
2465 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
2466 !aconnector->fake_enable)
2467 connected = (aconnector->dc_sink != NULL);
2468 else
2469 connected = (aconnector->base.force == DRM_FORCE_ON);
2470
2471 return (connected ? connector_status_connected :
2472 connector_status_disconnected);
2473 }
2474
2475 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
2476 struct drm_connector_state *connector_state,
2477 struct drm_property *property,
2478 uint64_t val)
2479 {
2480 struct drm_device *dev = connector->dev;
2481 struct amdgpu_device *adev = dev->dev_private;
2482 struct dm_connector_state *dm_old_state =
2483 to_dm_connector_state(connector->state);
2484 struct dm_connector_state *dm_new_state =
2485 to_dm_connector_state(connector_state);
2486
2487 int ret = -EINVAL;
2488
2489 if (property == dev->mode_config.scaling_mode_property) {
2490 enum amdgpu_rmx_type rmx_type;
2491
2492 switch (val) {
2493 case DRM_MODE_SCALE_CENTER:
2494 rmx_type = RMX_CENTER;
2495 break;
2496 case DRM_MODE_SCALE_ASPECT:
2497 rmx_type = RMX_ASPECT;
2498 break;
2499 case DRM_MODE_SCALE_FULLSCREEN:
2500 rmx_type = RMX_FULL;
2501 break;
2502 case DRM_MODE_SCALE_NONE:
2503 default:
2504 rmx_type = RMX_OFF;
2505 break;
2506 }
2507
2508 if (dm_old_state->scaling == rmx_type)
2509 return 0;
2510
2511 dm_new_state->scaling = rmx_type;
2512 ret = 0;
2513 } else if (property == adev->mode_info.underscan_hborder_property) {
2514 dm_new_state->underscan_hborder = val;
2515 ret = 0;
2516 } else if (property == adev->mode_info.underscan_vborder_property) {
2517 dm_new_state->underscan_vborder = val;
2518 ret = 0;
2519 } else if (property == adev->mode_info.underscan_property) {
2520 dm_new_state->underscan_enable = val;
2521 ret = 0;
2522 }
2523
2524 return ret;
2525 }
2526
2527 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
2528 const struct drm_connector_state *state,
2529 struct drm_property *property,
2530 uint64_t *val)
2531 {
2532 struct drm_device *dev = connector->dev;
2533 struct amdgpu_device *adev = dev->dev_private;
2534 struct dm_connector_state *dm_state =
2535 to_dm_connector_state(state);
2536 int ret = -EINVAL;
2537
2538 if (property == dev->mode_config.scaling_mode_property) {
2539 switch (dm_state->scaling) {
2540 case RMX_CENTER:
2541 *val = DRM_MODE_SCALE_CENTER;
2542 break;
2543 case RMX_ASPECT:
2544 *val = DRM_MODE_SCALE_ASPECT;
2545 break;
2546 case RMX_FULL:
2547 *val = DRM_MODE_SCALE_FULLSCREEN;
2548 break;
2549 case RMX_OFF:
2550 default:
2551 *val = DRM_MODE_SCALE_NONE;
2552 break;
2553 }
2554 ret = 0;
2555 } else if (property == adev->mode_info.underscan_hborder_property) {
2556 *val = dm_state->underscan_hborder;
2557 ret = 0;
2558 } else if (property == adev->mode_info.underscan_vborder_property) {
2559 *val = dm_state->underscan_vborder;
2560 ret = 0;
2561 } else if (property == adev->mode_info.underscan_property) {
2562 *val = dm_state->underscan_enable;
2563 ret = 0;
2564 }
2565 return ret;
2566 }
2567
2568 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
2569 {
2570 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
2571 const struct dc_link *link = aconnector->dc_link;
2572 struct amdgpu_device *adev = connector->dev->dev_private;
2573 struct amdgpu_display_manager *dm = &adev->dm;
2574 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
2575 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
2576
2577 if (link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) {
2578 amdgpu_dm_register_backlight_device(dm);
2579
2580 if (dm->backlight_dev) {
2581 backlight_device_unregister(dm->backlight_dev);
2582 dm->backlight_dev = NULL;
2583 }
2584
2585 }
2586 #endif
2587 drm_connector_unregister(connector);
2588 drm_connector_cleanup(connector);
2589 kfree(connector);
2590 }
2591
2592 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
2593 {
2594 struct dm_connector_state *state =
2595 to_dm_connector_state(connector->state);
2596
2597 kfree(state);
2598
2599 state = kzalloc(sizeof(*state), GFP_KERNEL);
2600
2601 if (state) {
2602 state->scaling = RMX_OFF;
2603 state->underscan_enable = false;
2604 state->underscan_hborder = 0;
2605 state->underscan_vborder = 0;
2606
2607 connector->state = &state->base;
2608 connector->state->connector = connector;
2609 }
2610 }
2611
2612 struct drm_connector_state *
2613 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
2614 {
2615 struct dm_connector_state *state =
2616 to_dm_connector_state(connector->state);
2617
2618 struct dm_connector_state *new_state =
2619 kmemdup(state, sizeof(*state), GFP_KERNEL);
2620
2621 if (new_state) {
2622 __drm_atomic_helper_connector_duplicate_state(connector,
2623 &new_state->base);
2624 return &new_state->base;
2625 }
2626
2627 return NULL;
2628 }
2629
2630 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
2631 .reset = amdgpu_dm_connector_funcs_reset,
2632 .detect = amdgpu_dm_connector_detect,
2633 .fill_modes = drm_helper_probe_single_connector_modes,
2634 .destroy = amdgpu_dm_connector_destroy,
2635 .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
2636 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
2637 .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
2638 .atomic_get_property = amdgpu_dm_connector_atomic_get_property
2639 };
2640
2641 static struct drm_encoder *best_encoder(struct drm_connector *connector)
2642 {
2643 int enc_id = connector->encoder_ids[0];
2644 struct drm_mode_object *obj;
2645 struct drm_encoder *encoder;
2646
2647 DRM_DEBUG_DRIVER("Finding the best encoder\n");
2648
2649 /* pick the encoder ids */
2650 if (enc_id) {
2651 obj = drm_mode_object_find(connector->dev, NULL, enc_id, DRM_MODE_OBJECT_ENCODER);
2652 if (!obj) {
2653 DRM_ERROR("Couldn't find a matching encoder for our connector\n");
2654 return NULL;
2655 }
2656 encoder = obj_to_encoder(obj);
2657 return encoder;
2658 }
2659 DRM_ERROR("No encoder id\n");
2660 return NULL;
2661 }
2662
2663 static int get_modes(struct drm_connector *connector)
2664 {
2665 return amdgpu_dm_connector_get_modes(connector);
2666 }
2667
2668 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
2669 {
2670 struct dc_sink_init_data init_params = {
2671 .link = aconnector->dc_link,
2672 .sink_signal = SIGNAL_TYPE_VIRTUAL
2673 };
2674 struct edid *edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
2675
2676 if (!aconnector->base.edid_blob_ptr ||
2677 !aconnector->base.edid_blob_ptr->data) {
2678 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
2679 aconnector->base.name);
2680
2681 aconnector->base.force = DRM_FORCE_OFF;
2682 aconnector->base.override_edid = false;
2683 return;
2684 }
2685
2686 aconnector->edid = edid;
2687
2688 aconnector->dc_em_sink = dc_link_add_remote_sink(
2689 aconnector->dc_link,
2690 (uint8_t *)edid,
2691 (edid->extensions + 1) * EDID_LENGTH,
2692 &init_params);
2693
2694 if (aconnector->base.force == DRM_FORCE_ON)
2695 aconnector->dc_sink = aconnector->dc_link->local_sink ?
2696 aconnector->dc_link->local_sink :
2697 aconnector->dc_em_sink;
2698 }
2699
2700 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
2701 {
2702 struct dc_link *link = (struct dc_link *)aconnector->dc_link;
2703
2704 /* In case of headless boot with force on for DP managed connector
2705 * Those settings have to be != 0 to get initial modeset
2706 */
2707 if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
2708 link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
2709 link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
2710 }
2711
2712
2713 aconnector->base.override_edid = true;
2714 create_eml_sink(aconnector);
2715 }
2716
2717 int amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
2718 struct drm_display_mode *mode)
2719 {
2720 int result = MODE_ERROR;
2721 struct dc_sink *dc_sink;
2722 struct amdgpu_device *adev = connector->dev->dev_private;
2723 /* TODO: Unhardcode stream count */
2724 struct dc_stream_state *stream;
2725 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
2726
2727 if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
2728 (mode->flags & DRM_MODE_FLAG_DBLSCAN))
2729 return result;
2730
2731 /* Only run this the first time mode_valid is called to initilialize
2732 * EDID mgmt
2733 */
2734 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
2735 !aconnector->dc_em_sink)
2736 handle_edid_mgmt(aconnector);
2737
2738 dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
2739
2740 if (dc_sink == NULL) {
2741 DRM_ERROR("dc_sink is NULL!\n");
2742 goto fail;
2743 }
2744
2745 stream = dc_create_stream_for_sink(dc_sink);
2746 if (stream == NULL) {
2747 DRM_ERROR("Failed to create stream for sink!\n");
2748 goto fail;
2749 }
2750
2751 drm_mode_set_crtcinfo(mode, 0);
2752 fill_stream_properties_from_drm_display_mode(stream, mode, connector);
2753
2754 stream->src.width = mode->hdisplay;
2755 stream->src.height = mode->vdisplay;
2756 stream->dst = stream->src;
2757
2758 if (dc_validate_stream(adev->dm.dc, stream) == DC_OK)
2759 result = MODE_OK;
2760
2761 dc_stream_release(stream);
2762
2763 fail:
2764 /* TODO: error handling*/
2765 return result;
2766 }
2767
2768 static const struct drm_connector_helper_funcs
2769 amdgpu_dm_connector_helper_funcs = {
2770 /*
2771 * If hotplug a second bigger display in FB Con mode, bigger resolution
2772 * modes will be filtered by drm_mode_validate_size(), and those modes
2773 * is missing after user start lightdm. So we need to renew modes list.
2774 * in get_modes call back, not just return the modes count
2775 */
2776 .get_modes = get_modes,
2777 .mode_valid = amdgpu_dm_connector_mode_valid,
2778 .best_encoder = best_encoder
2779 };
2780
2781 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
2782 {
2783 }
2784
2785 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
2786 struct drm_crtc_state *state)
2787 {
2788 struct amdgpu_device *adev = crtc->dev->dev_private;
2789 struct dc *dc = adev->dm.dc;
2790 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(state);
2791 int ret = -EINVAL;
2792
2793 if (unlikely(!dm_crtc_state->stream &&
2794 modeset_required(state, NULL, dm_crtc_state->stream))) {
2795 WARN_ON(1);
2796 return ret;
2797 }
2798
2799 /* In some use cases, like reset, no stream is attached */
2800 if (!dm_crtc_state->stream)
2801 return 0;
2802
2803 if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
2804 return 0;
2805
2806 return ret;
2807 }
2808
2809 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
2810 const struct drm_display_mode *mode,
2811 struct drm_display_mode *adjusted_mode)
2812 {
2813 return true;
2814 }
2815
2816 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
2817 .disable = dm_crtc_helper_disable,
2818 .atomic_check = dm_crtc_helper_atomic_check,
2819 .mode_fixup = dm_crtc_helper_mode_fixup
2820 };
2821
2822 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
2823 {
2824
2825 }
2826
2827 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
2828 struct drm_crtc_state *crtc_state,
2829 struct drm_connector_state *conn_state)
2830 {
2831 return 0;
2832 }
2833
2834 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
2835 .disable = dm_encoder_helper_disable,
2836 .atomic_check = dm_encoder_helper_atomic_check
2837 };
2838
2839 static void dm_drm_plane_reset(struct drm_plane *plane)
2840 {
2841 struct dm_plane_state *amdgpu_state = NULL;
2842
2843 if (plane->state)
2844 plane->funcs->atomic_destroy_state(plane, plane->state);
2845
2846 amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
2847 WARN_ON(amdgpu_state == NULL);
2848
2849 if (amdgpu_state) {
2850 plane->state = &amdgpu_state->base;
2851 plane->state->plane = plane;
2852 plane->state->rotation = DRM_MODE_ROTATE_0;
2853 }
2854 }
2855
2856 static struct drm_plane_state *
2857 dm_drm_plane_duplicate_state(struct drm_plane *plane)
2858 {
2859 struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
2860
2861 old_dm_plane_state = to_dm_plane_state(plane->state);
2862 dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
2863 if (!dm_plane_state)
2864 return NULL;
2865
2866 __drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
2867
2868 if (old_dm_plane_state->dc_state) {
2869 dm_plane_state->dc_state = old_dm_plane_state->dc_state;
2870 dc_plane_state_retain(dm_plane_state->dc_state);
2871 }
2872
2873 return &dm_plane_state->base;
2874 }
2875
2876 void dm_drm_plane_destroy_state(struct drm_plane *plane,
2877 struct drm_plane_state *state)
2878 {
2879 struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
2880
2881 if (dm_plane_state->dc_state)
2882 dc_plane_state_release(dm_plane_state->dc_state);
2883
2884 drm_atomic_helper_plane_destroy_state(plane, state);
2885 }
2886
2887 static const struct drm_plane_funcs dm_plane_funcs = {
2888 .update_plane = drm_atomic_helper_update_plane,
2889 .disable_plane = drm_atomic_helper_disable_plane,
2890 .destroy = drm_plane_cleanup,
2891 .reset = dm_drm_plane_reset,
2892 .atomic_duplicate_state = dm_drm_plane_duplicate_state,
2893 .atomic_destroy_state = dm_drm_plane_destroy_state,
2894 };
2895
2896 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
2897 struct drm_plane_state *new_state)
2898 {
2899 struct amdgpu_framebuffer *afb;
2900 struct drm_gem_object *obj;
2901 struct amdgpu_bo *rbo;
2902 uint64_t chroma_addr = 0;
2903 int r;
2904 struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
2905 unsigned int awidth;
2906
2907 dm_plane_state_old = to_dm_plane_state(plane->state);
2908 dm_plane_state_new = to_dm_plane_state(new_state);
2909
2910 if (!new_state->fb) {
2911 DRM_DEBUG_DRIVER("No FB bound\n");
2912 return 0;
2913 }
2914
2915 afb = to_amdgpu_framebuffer(new_state->fb);
2916
2917 obj = afb->obj;
2918 rbo = gem_to_amdgpu_bo(obj);
2919 r = amdgpu_bo_reserve(rbo, false);
2920 if (unlikely(r != 0))
2921 return r;
2922
2923 r = amdgpu_bo_pin(rbo, AMDGPU_GEM_DOMAIN_VRAM, &afb->address);
2924
2925
2926 amdgpu_bo_unreserve(rbo);
2927
2928 if (unlikely(r != 0)) {
2929 if (r != -ERESTARTSYS)
2930 DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
2931 return r;
2932 }
2933
2934 amdgpu_bo_ref(rbo);
2935
2936 if (dm_plane_state_new->dc_state &&
2937 dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
2938 struct dc_plane_state *plane_state = dm_plane_state_new->dc_state;
2939
2940 if (plane_state->format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
2941 plane_state->address.grph.addr.low_part = lower_32_bits(afb->address);
2942 plane_state->address.grph.addr.high_part = upper_32_bits(afb->address);
2943 } else {
2944 awidth = ALIGN(new_state->fb->width, 64);
2945 plane_state->address.type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
2946 plane_state->address.video_progressive.luma_addr.low_part
2947 = lower_32_bits(afb->address);
2948 plane_state->address.video_progressive.luma_addr.high_part
2949 = upper_32_bits(afb->address);
2950 chroma_addr = afb->address + (u64)(awidth * new_state->fb->height);
2951 plane_state->address.video_progressive.chroma_addr.low_part
2952 = lower_32_bits(chroma_addr);
2953 plane_state->address.video_progressive.chroma_addr.high_part
2954 = upper_32_bits(chroma_addr);
2955 }
2956 }
2957
2958 /* It's a hack for s3 since in 4.9 kernel filter out cursor buffer
2959 * prepare and cleanup in drm_atomic_helper_prepare_planes
2960 * and drm_atomic_helper_cleanup_planes because fb doens't in s3.
2961 * IN 4.10 kernel this code should be removed and amdgpu_device_suspend
2962 * code touching fram buffers should be avoided for DC.
2963 */
2964 if (plane->type == DRM_PLANE_TYPE_CURSOR) {
2965 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_state->crtc);
2966
2967 acrtc->cursor_bo = obj;
2968 }
2969 return 0;
2970 }
2971
2972 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
2973 struct drm_plane_state *old_state)
2974 {
2975 struct amdgpu_bo *rbo;
2976 struct amdgpu_framebuffer *afb;
2977 int r;
2978
2979 if (!old_state->fb)
2980 return;
2981
2982 afb = to_amdgpu_framebuffer(old_state->fb);
2983 rbo = gem_to_amdgpu_bo(afb->obj);
2984 r = amdgpu_bo_reserve(rbo, false);
2985 if (unlikely(r)) {
2986 DRM_ERROR("failed to reserve rbo before unpin\n");
2987 return;
2988 }
2989
2990 amdgpu_bo_unpin(rbo);
2991 amdgpu_bo_unreserve(rbo);
2992 amdgpu_bo_unref(&rbo);
2993 }
2994
2995 static int dm_plane_atomic_check(struct drm_plane *plane,
2996 struct drm_plane_state *state)
2997 {
2998 struct amdgpu_device *adev = plane->dev->dev_private;
2999 struct dc *dc = adev->dm.dc;
3000 struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
3001
3002 if (!dm_plane_state->dc_state)
3003 return 0;
3004
3005 if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
3006 return 0;
3007
3008 return -EINVAL;
3009 }
3010
3011 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
3012 .prepare_fb = dm_plane_helper_prepare_fb,
3013 .cleanup_fb = dm_plane_helper_cleanup_fb,
3014 .atomic_check = dm_plane_atomic_check,
3015 };
3016
3017 /*
3018 * TODO: these are currently initialized to rgb formats only.
3019 * For future use cases we should either initialize them dynamically based on
3020 * plane capabilities, or initialize this array to all formats, so internal drm
3021 * check will succeed, and let DC to implement proper check
3022 */
3023 static const uint32_t rgb_formats[] = {
3024 DRM_FORMAT_RGB888,
3025 DRM_FORMAT_XRGB8888,
3026 DRM_FORMAT_ARGB8888,
3027 DRM_FORMAT_RGBA8888,
3028 DRM_FORMAT_XRGB2101010,
3029 DRM_FORMAT_XBGR2101010,
3030 DRM_FORMAT_ARGB2101010,
3031 DRM_FORMAT_ABGR2101010,
3032 };
3033
3034 static const uint32_t yuv_formats[] = {
3035 DRM_FORMAT_NV12,
3036 DRM_FORMAT_NV21,
3037 };
3038
3039 static const u32 cursor_formats[] = {
3040 DRM_FORMAT_ARGB8888
3041 };
3042
3043 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
3044 struct amdgpu_plane *aplane,
3045 unsigned long possible_crtcs)
3046 {
3047 int res = -EPERM;
3048
3049 switch (aplane->base.type) {
3050 case DRM_PLANE_TYPE_PRIMARY:
3051 aplane->base.format_default = true;
3052
3053 res = drm_universal_plane_init(
3054 dm->adev->ddev,
3055 &aplane->base,
3056 possible_crtcs,
3057 &dm_plane_funcs,
3058 rgb_formats,
3059 ARRAY_SIZE(rgb_formats),
3060 NULL, aplane->base.type, NULL);
3061 break;
3062 case DRM_PLANE_TYPE_OVERLAY:
3063 res = drm_universal_plane_init(
3064 dm->adev->ddev,
3065 &aplane->base,
3066 possible_crtcs,
3067 &dm_plane_funcs,
3068 yuv_formats,
3069 ARRAY_SIZE(yuv_formats),
3070 NULL, aplane->base.type, NULL);
3071 break;
3072 case DRM_PLANE_TYPE_CURSOR:
3073 res = drm_universal_plane_init(
3074 dm->adev->ddev,
3075 &aplane->base,
3076 possible_crtcs,
3077 &dm_plane_funcs,
3078 cursor_formats,
3079 ARRAY_SIZE(cursor_formats),
3080 NULL, aplane->base.type, NULL);
3081 break;
3082 }
3083
3084 drm_plane_helper_add(&aplane->base, &dm_plane_helper_funcs);
3085
3086 return res;
3087 }
3088
3089 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
3090 struct drm_plane *plane,
3091 uint32_t crtc_index)
3092 {
3093 struct amdgpu_crtc *acrtc = NULL;
3094 struct amdgpu_plane *cursor_plane;
3095
3096 int res = -ENOMEM;
3097
3098 cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
3099 if (!cursor_plane)
3100 goto fail;
3101
3102 cursor_plane->base.type = DRM_PLANE_TYPE_CURSOR;
3103 res = amdgpu_dm_plane_init(dm, cursor_plane, 0);
3104
3105 acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
3106 if (!acrtc)
3107 goto fail;
3108
3109 res = drm_crtc_init_with_planes(
3110 dm->ddev,
3111 &acrtc->base,
3112 plane,
3113 &cursor_plane->base,
3114 &amdgpu_dm_crtc_funcs, NULL);
3115
3116 if (res)
3117 goto fail;
3118
3119 drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
3120
3121 acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
3122 acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
3123
3124 acrtc->crtc_id = crtc_index;
3125 acrtc->base.enabled = false;
3126
3127 dm->adev->mode_info.crtcs[crtc_index] = acrtc;
3128 drm_mode_crtc_set_gamma_size(&acrtc->base, 256);
3129
3130 return 0;
3131
3132 fail:
3133 kfree(acrtc);
3134 kfree(cursor_plane);
3135 return res;
3136 }
3137
3138
3139 static int to_drm_connector_type(enum signal_type st)
3140 {
3141 switch (st) {
3142 case SIGNAL_TYPE_HDMI_TYPE_A:
3143 return DRM_MODE_CONNECTOR_HDMIA;
3144 case SIGNAL_TYPE_EDP:
3145 return DRM_MODE_CONNECTOR_eDP;
3146 case SIGNAL_TYPE_RGB:
3147 return DRM_MODE_CONNECTOR_VGA;
3148 case SIGNAL_TYPE_DISPLAY_PORT:
3149 case SIGNAL_TYPE_DISPLAY_PORT_MST:
3150 return DRM_MODE_CONNECTOR_DisplayPort;
3151 case SIGNAL_TYPE_DVI_DUAL_LINK:
3152 case SIGNAL_TYPE_DVI_SINGLE_LINK:
3153 return DRM_MODE_CONNECTOR_DVID;
3154 case SIGNAL_TYPE_VIRTUAL:
3155 return DRM_MODE_CONNECTOR_VIRTUAL;
3156
3157 default:
3158 return DRM_MODE_CONNECTOR_Unknown;
3159 }
3160 }
3161
3162 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
3163 {
3164 const struct drm_connector_helper_funcs *helper =
3165 connector->helper_private;
3166 struct drm_encoder *encoder;
3167 struct amdgpu_encoder *amdgpu_encoder;
3168
3169 encoder = helper->best_encoder(connector);
3170
3171 if (encoder == NULL)
3172 return;
3173
3174 amdgpu_encoder = to_amdgpu_encoder(encoder);
3175
3176 amdgpu_encoder->native_mode.clock = 0;
3177
3178 if (!list_empty(&connector->probed_modes)) {
3179 struct drm_display_mode *preferred_mode = NULL;
3180
3181 list_for_each_entry(preferred_mode,
3182 &connector->probed_modes,
3183 head) {
3184 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
3185 amdgpu_encoder->native_mode = *preferred_mode;
3186
3187 break;
3188 }
3189
3190 }
3191 }
3192
3193 static struct drm_display_mode *
3194 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
3195 char *name,
3196 int hdisplay, int vdisplay)
3197 {
3198 struct drm_device *dev = encoder->dev;
3199 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
3200 struct drm_display_mode *mode = NULL;
3201 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
3202
3203 mode = drm_mode_duplicate(dev, native_mode);
3204
3205 if (mode == NULL)
3206 return NULL;
3207
3208 mode->hdisplay = hdisplay;
3209 mode->vdisplay = vdisplay;
3210 mode->type &= ~DRM_MODE_TYPE_PREFERRED;
3211 strncpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
3212
3213 return mode;
3214
3215 }
3216
3217 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
3218 struct drm_connector *connector)
3219 {
3220 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
3221 struct drm_display_mode *mode = NULL;
3222 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
3223 struct amdgpu_dm_connector *amdgpu_dm_connector =
3224 to_amdgpu_dm_connector(connector);
3225 int i;
3226 int n;
3227 struct mode_size {
3228 char name[DRM_DISPLAY_MODE_LEN];
3229 int w;
3230 int h;
3231 } common_modes[] = {
3232 { "640x480", 640, 480},
3233 { "800x600", 800, 600},
3234 { "1024x768", 1024, 768},
3235 { "1280x720", 1280, 720},
3236 { "1280x800", 1280, 800},
3237 {"1280x1024", 1280, 1024},
3238 { "1440x900", 1440, 900},
3239 {"1680x1050", 1680, 1050},
3240 {"1600x1200", 1600, 1200},
3241 {"1920x1080", 1920, 1080},
3242 {"1920x1200", 1920, 1200}
3243 };
3244
3245 n = ARRAY_SIZE(common_modes);
3246
3247 for (i = 0; i < n; i++) {
3248 struct drm_display_mode *curmode = NULL;
3249 bool mode_existed = false;
3250
3251 if (common_modes[i].w > native_mode->hdisplay ||
3252 common_modes[i].h > native_mode->vdisplay ||
3253 (common_modes[i].w == native_mode->hdisplay &&
3254 common_modes[i].h == native_mode->vdisplay))
3255 continue;
3256
3257 list_for_each_entry(curmode, &connector->probed_modes, head) {
3258 if (common_modes[i].w == curmode->hdisplay &&
3259 common_modes[i].h == curmode->vdisplay) {
3260 mode_existed = true;
3261 break;
3262 }
3263 }
3264
3265 if (mode_existed)
3266 continue;
3267
3268 mode = amdgpu_dm_create_common_mode(encoder,
3269 common_modes[i].name, common_modes[i].w,
3270 common_modes[i].h);
3271 drm_mode_probed_add(connector, mode);
3272 amdgpu_dm_connector->num_modes++;
3273 }
3274 }
3275
3276 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
3277 struct edid *edid)
3278 {
3279 struct amdgpu_dm_connector *amdgpu_dm_connector =
3280 to_amdgpu_dm_connector(connector);
3281
3282 if (edid) {
3283 /* empty probed_modes */
3284 INIT_LIST_HEAD(&connector->probed_modes);
3285 amdgpu_dm_connector->num_modes =
3286 drm_add_edid_modes(connector, edid);
3287
3288 drm_edid_to_eld(connector, edid);
3289
3290 amdgpu_dm_get_native_mode(connector);
3291 } else {
3292 amdgpu_dm_connector->num_modes = 0;
3293 }
3294 }
3295
3296 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
3297 {
3298 const struct drm_connector_helper_funcs *helper =
3299 connector->helper_private;
3300 struct amdgpu_dm_connector *amdgpu_dm_connector =
3301 to_amdgpu_dm_connector(connector);
3302 struct drm_encoder *encoder;
3303 struct edid *edid = amdgpu_dm_connector->edid;
3304
3305 encoder = helper->best_encoder(connector);
3306
3307 amdgpu_dm_connector_ddc_get_modes(connector, edid);
3308 amdgpu_dm_connector_add_common_modes(encoder, connector);
3309 return amdgpu_dm_connector->num_modes;
3310 }
3311
3312 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
3313 struct amdgpu_dm_connector *aconnector,
3314 int connector_type,
3315 struct dc_link *link,
3316 int link_index)
3317 {
3318 struct amdgpu_device *adev = dm->ddev->dev_private;
3319
3320 aconnector->connector_id = link_index;
3321 aconnector->dc_link = link;
3322 aconnector->base.interlace_allowed = false;
3323 aconnector->base.doublescan_allowed = false;
3324 aconnector->base.stereo_allowed = false;
3325 aconnector->base.dpms = DRM_MODE_DPMS_OFF;
3326 aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
3327
3328 mutex_init(&aconnector->hpd_lock);
3329
3330 /* configure support HPD hot plug connector_>polled default value is 0
3331 * which means HPD hot plug not supported
3332 */
3333 switch (connector_type) {
3334 case DRM_MODE_CONNECTOR_HDMIA:
3335 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
3336 break;
3337 case DRM_MODE_CONNECTOR_DisplayPort:
3338 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
3339 break;
3340 case DRM_MODE_CONNECTOR_DVID:
3341 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
3342 break;
3343 default:
3344 break;
3345 }
3346
3347 drm_object_attach_property(&aconnector->base.base,
3348 dm->ddev->mode_config.scaling_mode_property,
3349 DRM_MODE_SCALE_NONE);
3350
3351 drm_object_attach_property(&aconnector->base.base,
3352 adev->mode_info.underscan_property,
3353 UNDERSCAN_OFF);
3354 drm_object_attach_property(&aconnector->base.base,
3355 adev->mode_info.underscan_hborder_property,
3356 0);
3357 drm_object_attach_property(&aconnector->base.base,
3358 adev->mode_info.underscan_vborder_property,
3359 0);
3360
3361 }
3362
3363 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
3364 struct i2c_msg *msgs, int num)
3365 {
3366 struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
3367 struct ddc_service *ddc_service = i2c->ddc_service;
3368 struct i2c_command cmd;
3369 int i;
3370 int result = -EIO;
3371
3372 cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
3373
3374 if (!cmd.payloads)
3375 return result;
3376
3377 cmd.number_of_payloads = num;
3378 cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
3379 cmd.speed = 100;
3380
3381 for (i = 0; i < num; i++) {
3382 cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
3383 cmd.payloads[i].address = msgs[i].addr;
3384 cmd.payloads[i].length = msgs[i].len;
3385 cmd.payloads[i].data = msgs[i].buf;
3386 }
3387
3388 if (dal_i2caux_submit_i2c_command(
3389 ddc_service->ctx->i2caux,
3390 ddc_service->ddc_pin,
3391 &cmd))
3392 result = num;
3393
3394 kfree(cmd.payloads);
3395 return result;
3396 }
3397
3398 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
3399 {
3400 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
3401 }
3402
3403 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
3404 .master_xfer = amdgpu_dm_i2c_xfer,
3405 .functionality = amdgpu_dm_i2c_func,
3406 };
3407
3408 static struct amdgpu_i2c_adapter *
3409 create_i2c(struct ddc_service *ddc_service,
3410 int link_index,
3411 int *res)
3412 {
3413 struct amdgpu_device *adev = ddc_service->ctx->driver_context;
3414 struct amdgpu_i2c_adapter *i2c;
3415
3416 i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
3417 i2c->base.owner = THIS_MODULE;
3418 i2c->base.class = I2C_CLASS_DDC;
3419 i2c->base.dev.parent = &adev->pdev->dev;
3420 i2c->base.algo = &amdgpu_dm_i2c_algo;
3421 snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
3422 i2c_set_adapdata(&i2c->base, i2c);
3423 i2c->ddc_service = ddc_service;
3424
3425 return i2c;
3426 }
3427
3428 /* Note: this function assumes that dc_link_detect() was called for the
3429 * dc_link which will be represented by this aconnector.
3430 */
3431 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
3432 struct amdgpu_dm_connector *aconnector,
3433 uint32_t link_index,
3434 struct amdgpu_encoder *aencoder)
3435 {
3436 int res = 0;
3437 int connector_type;
3438 struct dc *dc = dm->dc;
3439 struct dc_link *link = dc_get_link_at_index(dc, link_index);
3440 struct amdgpu_i2c_adapter *i2c;
3441
3442 link->priv = aconnector;
3443
3444 DRM_DEBUG_DRIVER("%s()\n", __func__);
3445
3446 i2c = create_i2c(link->ddc, link->link_index, &res);
3447 aconnector->i2c = i2c;
3448 res = i2c_add_adapter(&i2c->base);
3449
3450 if (res) {
3451 DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
3452 goto out_free;
3453 }
3454
3455 connector_type = to_drm_connector_type(link->connector_signal);
3456
3457 res = drm_connector_init(
3458 dm->ddev,
3459 &aconnector->base,
3460 &amdgpu_dm_connector_funcs,
3461 connector_type);
3462
3463 if (res) {
3464 DRM_ERROR("connector_init failed\n");
3465 aconnector->connector_id = -1;
3466 goto out_free;
3467 }
3468
3469 drm_connector_helper_add(
3470 &aconnector->base,
3471 &amdgpu_dm_connector_helper_funcs);
3472
3473 amdgpu_dm_connector_init_helper(
3474 dm,
3475 aconnector,
3476 connector_type,
3477 link,
3478 link_index);
3479
3480 drm_mode_connector_attach_encoder(
3481 &aconnector->base, &aencoder->base);
3482
3483 drm_connector_register(&aconnector->base);
3484
3485 if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
3486 || connector_type == DRM_MODE_CONNECTOR_eDP)
3487 amdgpu_dm_initialize_dp_connector(dm, aconnector);
3488
3489 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3490 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3491
3492 /* NOTE: this currently will create backlight device even if a panel
3493 * is not connected to the eDP/LVDS connector.
3494 *
3495 * This is less than ideal but we don't have sink information at this
3496 * stage since detection happens after. We can't do detection earlier
3497 * since MST detection needs connectors to be created first.
3498 */
3499 if (link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) {
3500 /* Event if registration failed, we should continue with
3501 * DM initialization because not having a backlight control
3502 * is better then a black screen.
3503 */
3504 amdgpu_dm_register_backlight_device(dm);
3505
3506 if (dm->backlight_dev)
3507 dm->backlight_link = link;
3508 }
3509 #endif
3510
3511 out_free:
3512 if (res) {
3513 kfree(i2c);
3514 aconnector->i2c = NULL;
3515 }
3516 return res;
3517 }
3518
3519 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
3520 {
3521 switch (adev->mode_info.num_crtc) {
3522 case 1:
3523 return 0x1;
3524 case 2:
3525 return 0x3;
3526 case 3:
3527 return 0x7;
3528 case 4:
3529 return 0xf;
3530 case 5:
3531 return 0x1f;
3532 case 6:
3533 default:
3534 return 0x3f;
3535 }
3536 }
3537
3538 static int amdgpu_dm_encoder_init(struct drm_device *dev,
3539 struct amdgpu_encoder *aencoder,
3540 uint32_t link_index)
3541 {
3542 struct amdgpu_device *adev = dev->dev_private;
3543
3544 int res = drm_encoder_init(dev,
3545 &aencoder->base,
3546 &amdgpu_dm_encoder_funcs,
3547 DRM_MODE_ENCODER_TMDS,
3548 NULL);
3549
3550 aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
3551
3552 if (!res)
3553 aencoder->encoder_id = link_index;
3554 else
3555 aencoder->encoder_id = -1;
3556
3557 drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
3558
3559 return res;
3560 }
3561
3562 static void manage_dm_interrupts(struct amdgpu_device *adev,
3563 struct amdgpu_crtc *acrtc,
3564 bool enable)
3565 {
3566 /*
3567 * this is not correct translation but will work as soon as VBLANK
3568 * constant is the same as PFLIP
3569 */
3570 int irq_type =
3571 amdgpu_crtc_idx_to_irq_type(
3572 adev,
3573 acrtc->crtc_id);
3574
3575 if (enable) {
3576 drm_crtc_vblank_on(&acrtc->base);
3577 amdgpu_irq_get(
3578 adev,
3579 &adev->pageflip_irq,
3580 irq_type);
3581 } else {
3582
3583 amdgpu_irq_put(
3584 adev,
3585 &adev->pageflip_irq,
3586 irq_type);
3587 drm_crtc_vblank_off(&acrtc->base);
3588 }
3589 }
3590
3591 static bool
3592 is_scaling_state_different(const struct dm_connector_state *dm_state,
3593 const struct dm_connector_state *old_dm_state)
3594 {
3595 if (dm_state->scaling != old_dm_state->scaling)
3596 return true;
3597 if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
3598 if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
3599 return true;
3600 } else if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
3601 if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
3602 return true;
3603 } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
3604 dm_state->underscan_vborder != old_dm_state->underscan_vborder)
3605 return true;
3606 return false;
3607 }
3608
3609 static void remove_stream(struct amdgpu_device *adev,
3610 struct amdgpu_crtc *acrtc,
3611 struct dc_stream_state *stream)
3612 {
3613 /* this is the update mode case */
3614 if (adev->dm.freesync_module)
3615 mod_freesync_remove_stream(adev->dm.freesync_module, stream);
3616
3617 acrtc->otg_inst = -1;
3618 acrtc->enabled = false;
3619 }
3620
3621 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
3622 struct dc_cursor_position *position)
3623 {
3624 struct amdgpu_crtc *amdgpu_crtc = amdgpu_crtc = to_amdgpu_crtc(crtc);
3625 int x, y;
3626 int xorigin = 0, yorigin = 0;
3627
3628 if (!crtc || !plane->state->fb) {
3629 position->enable = false;
3630 position->x = 0;
3631 position->y = 0;
3632 return 0;
3633 }
3634
3635 if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
3636 (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
3637 DRM_ERROR("%s: bad cursor width or height %d x %d\n",
3638 __func__,
3639 plane->state->crtc_w,
3640 plane->state->crtc_h);
3641 return -EINVAL;
3642 }
3643
3644 x = plane->state->crtc_x;
3645 y = plane->state->crtc_y;
3646 /* avivo cursor are offset into the total surface */
3647 x += crtc->primary->state->src_x >> 16;
3648 y += crtc->primary->state->src_y >> 16;
3649 if (x < 0) {
3650 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
3651 x = 0;
3652 }
3653 if (y < 0) {
3654 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
3655 y = 0;
3656 }
3657 position->enable = true;
3658 position->x = x;
3659 position->y = y;
3660 position->x_hotspot = xorigin;
3661 position->y_hotspot = yorigin;
3662
3663 return 0;
3664 }
3665
3666 static void handle_cursor_update(struct drm_plane *plane,
3667 struct drm_plane_state *old_plane_state)
3668 {
3669 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
3670 struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
3671 struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
3672 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
3673 uint64_t address = afb ? afb->address : 0;
3674 struct dc_cursor_position position;
3675 struct dc_cursor_attributes attributes;
3676 int ret;
3677
3678 if (!plane->state->fb && !old_plane_state->fb)
3679 return;
3680
3681 DRM_DEBUG_DRIVER("%s: crtc_id=%d with size %d to %d\n",
3682 __func__,
3683 amdgpu_crtc->crtc_id,
3684 plane->state->crtc_w,
3685 plane->state->crtc_h);
3686
3687 ret = get_cursor_position(plane, crtc, &position);
3688 if (ret)
3689 return;
3690
3691 if (!position.enable) {
3692 /* turn off cursor */
3693 if (crtc_state && crtc_state->stream)
3694 dc_stream_set_cursor_position(crtc_state->stream,
3695 &position);
3696 return;
3697 }
3698
3699 amdgpu_crtc->cursor_width = plane->state->crtc_w;
3700 amdgpu_crtc->cursor_height = plane->state->crtc_h;
3701
3702 attributes.address.high_part = upper_32_bits(address);
3703 attributes.address.low_part = lower_32_bits(address);
3704 attributes.width = plane->state->crtc_w;
3705 attributes.height = plane->state->crtc_h;
3706 attributes.color_format = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
3707 attributes.rotation_angle = 0;
3708 attributes.attribute_flags.value = 0;
3709
3710 attributes.pitch = attributes.width;
3711
3712 if (crtc_state->stream) {
3713 if (!dc_stream_set_cursor_attributes(crtc_state->stream,
3714 &attributes))
3715 DRM_ERROR("DC failed to set cursor attributes\n");
3716
3717 if (!dc_stream_set_cursor_position(crtc_state->stream,
3718 &position))
3719 DRM_ERROR("DC failed to set cursor position\n");
3720 }
3721 }
3722
3723 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
3724 {
3725
3726 assert_spin_locked(&acrtc->base.dev->event_lock);
3727 WARN_ON(acrtc->event);
3728
3729 acrtc->event = acrtc->base.state->event;
3730
3731 /* Set the flip status */
3732 acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
3733
3734 /* Mark this event as consumed */
3735 acrtc->base.state->event = NULL;
3736
3737 DRM_DEBUG_DRIVER("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
3738 acrtc->crtc_id);
3739 }
3740
3741 /*
3742 * Executes flip
3743 *
3744 * Waits on all BO's fences and for proper vblank count
3745 */
3746 static void amdgpu_dm_do_flip(struct drm_crtc *crtc,
3747 struct drm_framebuffer *fb,
3748 uint32_t target,
3749 struct dc_state *state)
3750 {
3751 unsigned long flags;
3752 uint32_t target_vblank;
3753 int r, vpos, hpos;
3754 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
3755 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
3756 struct amdgpu_bo *abo = gem_to_amdgpu_bo(afb->obj);
3757 struct amdgpu_device *adev = crtc->dev->dev_private;
3758 bool async_flip = (crtc->state->pageflip_flags & DRM_MODE_PAGE_FLIP_ASYNC) != 0;
3759 struct dc_flip_addrs addr = { {0} };
3760 /* TODO eliminate or rename surface_update */
3761 struct dc_surface_update surface_updates[1] = { {0} };
3762 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
3763
3764
3765 /* Prepare wait for target vblank early - before the fence-waits */
3766 target_vblank = target - drm_crtc_vblank_count(crtc) +
3767 amdgpu_get_vblank_counter_kms(crtc->dev, acrtc->crtc_id);
3768
3769 /* TODO This might fail and hence better not used, wait
3770 * explicitly on fences instead
3771 * and in general should be called for
3772 * blocking commit to as per framework helpers
3773 */
3774 r = amdgpu_bo_reserve(abo, true);
3775 if (unlikely(r != 0)) {
3776 DRM_ERROR("failed to reserve buffer before flip\n");
3777 WARN_ON(1);
3778 }
3779
3780 /* Wait for all fences on this FB */
3781 WARN_ON(reservation_object_wait_timeout_rcu(abo->tbo.resv, true, false,
3782 MAX_SCHEDULE_TIMEOUT) < 0);
3783
3784 amdgpu_bo_unreserve(abo);
3785
3786 /* Wait until we're out of the vertical blank period before the one
3787 * targeted by the flip
3788 */
3789 while ((acrtc->enabled &&
3790 (amdgpu_get_crtc_scanoutpos(adev->ddev, acrtc->crtc_id, 0,
3791 &vpos, &hpos, NULL, NULL,
3792 &crtc->hwmode)
3793 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
3794 (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
3795 (int)(target_vblank -
3796 amdgpu_get_vblank_counter_kms(adev->ddev, acrtc->crtc_id)) > 0)) {
3797 usleep_range(1000, 1100);
3798 }
3799
3800 /* Flip */
3801 spin_lock_irqsave(&crtc->dev->event_lock, flags);
3802 /* update crtc fb */
3803 crtc->primary->fb = fb;
3804
3805 WARN_ON(acrtc->pflip_status != AMDGPU_FLIP_NONE);
3806 WARN_ON(!acrtc_state->stream);
3807
3808 addr.address.grph.addr.low_part = lower_32_bits(afb->address);
3809 addr.address.grph.addr.high_part = upper_32_bits(afb->address);
3810 addr.flip_immediate = async_flip;
3811
3812
3813 if (acrtc->base.state->event)
3814 prepare_flip_isr(acrtc);
3815
3816 surface_updates->surface = dc_stream_get_status(acrtc_state->stream)->plane_states[0];
3817 surface_updates->flip_addr = &addr;
3818
3819
3820 dc_commit_updates_for_stream(adev->dm.dc,
3821 surface_updates,
3822 1,
3823 acrtc_state->stream,
3824 NULL,
3825 &surface_updates->surface,
3826 state);
3827
3828 DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x \n",
3829 __func__,
3830 addr.address.grph.addr.high_part,
3831 addr.address.grph.addr.low_part);
3832
3833
3834 spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
3835 }
3836
3837 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
3838 struct drm_device *dev,
3839 struct amdgpu_display_manager *dm,
3840 struct drm_crtc *pcrtc,
3841 bool *wait_for_vblank)
3842 {
3843 uint32_t i;
3844 struct drm_plane *plane;
3845 struct drm_plane_state *old_plane_state, *new_plane_state;
3846 struct dc_stream_state *dc_stream_attach;
3847 struct dc_plane_state *plane_states_constructed[MAX_SURFACES];
3848 struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
3849 struct drm_crtc_state *new_pcrtc_state =
3850 drm_atomic_get_new_crtc_state(state, pcrtc);
3851 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
3852 struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
3853 int planes_count = 0;
3854 unsigned long flags;
3855
3856 /* update planes when needed */
3857 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
3858 struct drm_crtc *crtc = new_plane_state->crtc;
3859 struct drm_crtc_state *new_crtc_state;
3860 struct drm_framebuffer *fb = new_plane_state->fb;
3861 bool pflip_needed;
3862 struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
3863
3864 if (plane->type == DRM_PLANE_TYPE_CURSOR) {
3865 handle_cursor_update(plane, old_plane_state);
3866 continue;
3867 }
3868
3869 if (!fb || !crtc || pcrtc != crtc)
3870 continue;
3871
3872 new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
3873 if (!new_crtc_state->active)
3874 continue;
3875
3876 pflip_needed = !state->allow_modeset;
3877
3878 spin_lock_irqsave(&crtc->dev->event_lock, flags);
3879 if (acrtc_attach->pflip_status != AMDGPU_FLIP_NONE) {
3880 DRM_ERROR("%s: acrtc %d, already busy\n",
3881 __func__,
3882 acrtc_attach->crtc_id);
3883 spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
3884 /* In commit tail framework this cannot happen */
3885 WARN_ON(1);
3886 }
3887 spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
3888
3889 if (!pflip_needed) {
3890 WARN_ON(!dm_new_plane_state->dc_state);
3891
3892 plane_states_constructed[planes_count] = dm_new_plane_state->dc_state;
3893
3894 dc_stream_attach = acrtc_state->stream;
3895 planes_count++;
3896
3897 } else if (new_crtc_state->planes_changed) {
3898 /* Assume even ONE crtc with immediate flip means
3899 * entire can't wait for VBLANK
3900 * TODO Check if it's correct
3901 */
3902 *wait_for_vblank =
3903 new_pcrtc_state->pageflip_flags & DRM_MODE_PAGE_FLIP_ASYNC ?
3904 false : true;
3905
3906 /* TODO: Needs rework for multiplane flip */
3907 if (plane->type == DRM_PLANE_TYPE_PRIMARY)
3908 drm_crtc_vblank_get(crtc);
3909
3910 amdgpu_dm_do_flip(
3911 crtc,
3912 fb,
3913 drm_crtc_vblank_count(crtc) + *wait_for_vblank,
3914 dm_state->context);
3915 }
3916
3917 }
3918
3919 if (planes_count) {
3920 unsigned long flags;
3921
3922 if (new_pcrtc_state->event) {
3923
3924 drm_crtc_vblank_get(pcrtc);
3925
3926 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
3927 prepare_flip_isr(acrtc_attach);
3928 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
3929 }
3930
3931 if (false == dc_commit_planes_to_stream(dm->dc,
3932 plane_states_constructed,
3933 planes_count,
3934 dc_stream_attach,
3935 dm_state->context))
3936 dm_error("%s: Failed to attach plane!\n", __func__);
3937 } else {
3938 /*TODO BUG Here should go disable planes on CRTC. */
3939 }
3940 }
3941
3942
3943 static int amdgpu_dm_atomic_commit(struct drm_device *dev,
3944 struct drm_atomic_state *state,
3945 bool nonblock)
3946 {
3947 struct drm_crtc *crtc;
3948 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
3949 struct amdgpu_device *adev = dev->dev_private;
3950 int i;
3951
3952 /*
3953 * We evade vblanks and pflips on crtc that
3954 * should be changed. We do it here to flush & disable
3955 * interrupts before drm_swap_state is called in drm_atomic_helper_commit
3956 * it will update crtc->dm_crtc_state->stream pointer which is used in
3957 * the ISRs.
3958 */
3959 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
3960 struct dm_crtc_state *dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
3961 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
3962
3963 if (drm_atomic_crtc_needs_modeset(new_crtc_state) && dm_old_crtc_state->stream)
3964 manage_dm_interrupts(adev, acrtc, false);
3965 }
3966 /* Add check here for SoC's that support hardware cursor plane, to
3967 * unset legacy_cursor_update */
3968
3969 return drm_atomic_helper_commit(dev, state, nonblock);
3970
3971 /*TODO Handle EINTR, reenable IRQ*/
3972 }
3973
3974 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
3975 {
3976 struct drm_device *dev = state->dev;
3977 struct amdgpu_device *adev = dev->dev_private;
3978 struct amdgpu_display_manager *dm = &adev->dm;
3979 struct dm_atomic_state *dm_state;
3980 uint32_t i, j;
3981 uint32_t new_crtcs_count = 0;
3982 struct drm_crtc *crtc;
3983 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
3984 struct amdgpu_crtc *new_crtcs[MAX_STREAMS];
3985 struct dc_stream_state *new_stream = NULL;
3986 unsigned long flags;
3987 bool wait_for_vblank = true;
3988 struct drm_connector *connector;
3989 struct drm_connector_state *old_con_state, *new_con_state;
3990 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
3991
3992 drm_atomic_helper_update_legacy_modeset_state(dev, state);
3993
3994 dm_state = to_dm_atomic_state(state);
3995
3996 /* update changed items */
3997 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
3998 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
3999
4000 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
4001 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
4002
4003 DRM_DEBUG_DRIVER(
4004 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
4005 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
4006 "connectors_changed:%d\n",
4007 acrtc->crtc_id,
4008 new_crtc_state->enable,
4009 new_crtc_state->active,
4010 new_crtc_state->planes_changed,
4011 new_crtc_state->mode_changed,
4012 new_crtc_state->active_changed,
4013 new_crtc_state->connectors_changed);
4014
4015 /* handles headless hotplug case, updating new_state and
4016 * aconnector as needed
4017 */
4018
4019 if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
4020
4021 DRM_DEBUG_DRIVER("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
4022
4023 if (!dm_new_crtc_state->stream) {
4024 /*
4025 * this could happen because of issues with
4026 * userspace notifications delivery.
4027 * In this case userspace tries to set mode on
4028 * display which is disconnect in fact.
4029 * dc_sink in NULL in this case on aconnector.
4030 * We expect reset mode will come soon.
4031 *
4032 * This can also happen when unplug is done
4033 * during resume sequence ended
4034 *
4035 * In this case, we want to pretend we still
4036 * have a sink to keep the pipe running so that
4037 * hw state is consistent with the sw state
4038 */
4039 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
4040 __func__, acrtc->base.base.id);
4041 continue;
4042 }
4043
4044
4045 if (dm_old_crtc_state->stream)
4046 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
4047
4048
4049 /*
4050 * this loop saves set mode crtcs
4051 * we needed to enable vblanks once all
4052 * resources acquired in dc after dc_commit_streams
4053 */
4054
4055 /*TODO move all this into dm_crtc_state, get rid of
4056 * new_crtcs array and use old and new atomic states
4057 * instead
4058 */
4059 new_crtcs[new_crtcs_count] = acrtc;
4060 new_crtcs_count++;
4061
4062 new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
4063 acrtc->enabled = true;
4064 acrtc->hw_mode = new_crtc_state->mode;
4065 crtc->hwmode = new_crtc_state->mode;
4066 } else if (modereset_required(new_crtc_state)) {
4067 DRM_DEBUG_DRIVER("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
4068
4069 /* i.e. reset mode */
4070 if (dm_old_crtc_state->stream)
4071 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
4072 }
4073 } /* for_each_crtc_in_state() */
4074
4075 /*
4076 * Add streams after required streams from new and replaced streams
4077 * are removed from freesync module
4078 */
4079 if (adev->dm.freesync_module) {
4080 for (i = 0; i < new_crtcs_count; i++) {
4081 struct amdgpu_dm_connector *aconnector = NULL;
4082
4083 new_crtc_state = drm_atomic_get_new_crtc_state(state,
4084 &new_crtcs[i]->base);
4085 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
4086
4087 new_stream = dm_new_crtc_state->stream;
4088 aconnector = amdgpu_dm_find_first_crtc_matching_connector(
4089 state,
4090 &new_crtcs[i]->base);
4091 if (!aconnector) {
4092 DRM_DEBUG_DRIVER("Atomic commit: Failed to find connector for acrtc id:%d "
4093 "skipping freesync init\n",
4094 new_crtcs[i]->crtc_id);
4095 continue;
4096 }
4097
4098 mod_freesync_add_stream(adev->dm.freesync_module,
4099 new_stream, &aconnector->caps);
4100 }
4101 }
4102
4103 if (dm_state->context)
4104 WARN_ON(!dc_commit_state(dm->dc, dm_state->context));
4105
4106 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
4107 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
4108
4109 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
4110
4111 if (dm_new_crtc_state->stream != NULL) {
4112 const struct dc_stream_status *status =
4113 dc_stream_get_status(dm_new_crtc_state->stream);
4114
4115 if (!status)
4116 DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
4117 else
4118 acrtc->otg_inst = status->primary_otg_inst;
4119 }
4120 }
4121
4122 /* Handle scaling and underscan changes*/
4123 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
4124 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
4125 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
4126 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
4127 struct dc_stream_status *status = NULL;
4128
4129 if (acrtc)
4130 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
4131
4132 /* Skip any modesets/resets */
4133 if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
4134 continue;
4135
4136 /* Skip any thing not scale or underscan changes */
4137 if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
4138 continue;
4139
4140 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
4141
4142 update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
4143 dm_new_con_state, (struct dc_stream_state *)dm_new_crtc_state->stream);
4144
4145 status = dc_stream_get_status(dm_new_crtc_state->stream);
4146 WARN_ON(!status);
4147 WARN_ON(!status->plane_count);
4148
4149 if (!dm_new_crtc_state->stream)
4150 continue;
4151
4152 /*TODO How it works with MPO ?*/
4153 if (!dc_commit_planes_to_stream(
4154 dm->dc,
4155 status->plane_states,
4156 status->plane_count,
4157 dm_new_crtc_state->stream,
4158 dm_state->context))
4159 dm_error("%s: Failed to update stream scaling!\n", __func__);
4160 }
4161
4162 for (i = 0; i < new_crtcs_count; i++) {
4163 /*
4164 * loop to enable interrupts on newly arrived crtc
4165 */
4166 struct amdgpu_crtc *acrtc = new_crtcs[i];
4167
4168 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
4169 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
4170
4171 if (adev->dm.freesync_module)
4172 mod_freesync_notify_mode_change(
4173 adev->dm.freesync_module, &dm_new_crtc_state->stream, 1);
4174
4175 manage_dm_interrupts(adev, acrtc, true);
4176 }
4177
4178 /* update planes when needed per crtc*/
4179 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
4180 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
4181
4182 if (dm_new_crtc_state->stream)
4183 amdgpu_dm_commit_planes(state, dev, dm, crtc, &wait_for_vblank);
4184 }
4185
4186
4187 /*
4188 * send vblank event on all events not handled in flip and
4189 * mark consumed event for drm_atomic_helper_commit_hw_done
4190 */
4191 spin_lock_irqsave(&adev->ddev->event_lock, flags);
4192 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
4193
4194 if (new_crtc_state->event)
4195 drm_send_event_locked(dev, &new_crtc_state->event->base);
4196
4197 new_crtc_state->event = NULL;
4198 }
4199 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
4200
4201 /* Signal HW programming completion */
4202 drm_atomic_helper_commit_hw_done(state);
4203
4204 if (wait_for_vblank)
4205 drm_atomic_helper_wait_for_vblanks(dev, state);
4206
4207 drm_atomic_helper_cleanup_planes(dev, state);
4208 }
4209
4210
4211 static int dm_force_atomic_commit(struct drm_connector *connector)
4212 {
4213 int ret = 0;
4214 struct drm_device *ddev = connector->dev;
4215 struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
4216 struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
4217 struct drm_plane *plane = disconnected_acrtc->base.primary;
4218 struct drm_connector_state *conn_state;
4219 struct drm_crtc_state *crtc_state;
4220 struct drm_plane_state *plane_state;
4221
4222 if (!state)
4223 return -ENOMEM;
4224
4225 state->acquire_ctx = ddev->mode_config.acquire_ctx;
4226
4227 /* Construct an atomic state to restore previous display setting */
4228
4229 /*
4230 * Attach connectors to drm_atomic_state
4231 */
4232 conn_state = drm_atomic_get_connector_state(state, connector);
4233
4234 ret = PTR_ERR_OR_ZERO(conn_state);
4235 if (ret)
4236 goto err;
4237
4238 /* Attach crtc to drm_atomic_state*/
4239 crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
4240
4241 ret = PTR_ERR_OR_ZERO(crtc_state);
4242 if (ret)
4243 goto err;
4244
4245 /* force a restore */
4246 crtc_state->mode_changed = true;
4247
4248 /* Attach plane to drm_atomic_state */
4249 plane_state = drm_atomic_get_plane_state(state, plane);
4250
4251 ret = PTR_ERR_OR_ZERO(plane_state);
4252 if (ret)
4253 goto err;
4254
4255
4256 /* Call commit internally with the state we just constructed */
4257 ret = drm_atomic_commit(state);
4258 if (!ret)
4259 return 0;
4260
4261 err:
4262 DRM_ERROR("Restoring old state failed with %i\n", ret);
4263 drm_atomic_state_put(state);
4264
4265 return ret;
4266 }
4267
4268 /*
4269 * This functions handle all cases when set mode does not come upon hotplug.
4270 * This include when the same display is unplugged then plugged back into the
4271 * same port and when we are running without usermode desktop manager supprot
4272 */
4273 void dm_restore_drm_connector_state(struct drm_device *dev,
4274 struct drm_connector *connector)
4275 {
4276 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4277 struct amdgpu_crtc *disconnected_acrtc;
4278 struct dm_crtc_state *acrtc_state;
4279
4280 if (!aconnector->dc_sink || !connector->state || !connector->encoder)
4281 return;
4282
4283 disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
4284 acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
4285
4286 if (!disconnected_acrtc || !acrtc_state->stream)
4287 return;
4288
4289 /*
4290 * If the previous sink is not released and different from the current,
4291 * we deduce we are in a state where we can not rely on usermode call
4292 * to turn on the display, so we do it here
4293 */
4294 if (acrtc_state->stream->sink != aconnector->dc_sink)
4295 dm_force_atomic_commit(&aconnector->base);
4296 }
4297
4298 /*`
4299 * Grabs all modesetting locks to serialize against any blocking commits,
4300 * Waits for completion of all non blocking commits.
4301 */
4302 static int do_aquire_global_lock(struct drm_device *dev,
4303 struct drm_atomic_state *state)
4304 {
4305 struct drm_crtc *crtc;
4306 struct drm_crtc_commit *commit;
4307 long ret;
4308
4309 /* Adding all modeset locks to aquire_ctx will
4310 * ensure that when the framework release it the
4311 * extra locks we are locking here will get released to
4312 */
4313 ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
4314 if (ret)
4315 return ret;
4316
4317 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
4318 spin_lock(&crtc->commit_lock);
4319 commit = list_first_entry_or_null(&crtc->commit_list,
4320 struct drm_crtc_commit, commit_entry);
4321 if (commit)
4322 drm_crtc_commit_get(commit);
4323 spin_unlock(&crtc->commit_lock);
4324
4325 if (!commit)
4326 continue;
4327
4328 /* Make sure all pending HW programming completed and
4329 * page flips done
4330 */
4331 ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
4332
4333 if (ret > 0)
4334 ret = wait_for_completion_interruptible_timeout(
4335 &commit->flip_done, 10*HZ);
4336
4337 if (ret == 0)
4338 DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
4339 "timed out\n", crtc->base.id, crtc->name);
4340
4341 drm_crtc_commit_put(commit);
4342 }
4343
4344 return ret < 0 ? ret : 0;
4345 }
4346
4347 static int dm_update_crtcs_state(struct dc *dc,
4348 struct drm_atomic_state *state,
4349 bool enable,
4350 bool *lock_and_validation_needed)
4351 {
4352 struct drm_crtc *crtc;
4353 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
4354 int i;
4355 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
4356 struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
4357 struct dc_stream_state *new_stream;
4358 int ret = 0;
4359
4360 /*TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set */
4361 /* update changed items */
4362 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
4363 struct amdgpu_crtc *acrtc = NULL;
4364 struct amdgpu_dm_connector *aconnector = NULL;
4365 struct drm_connector_state *new_con_state = NULL;
4366 struct dm_connector_state *dm_conn_state = NULL;
4367
4368 new_stream = NULL;
4369
4370 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
4371 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
4372 acrtc = to_amdgpu_crtc(crtc);
4373
4374 aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
4375
4376 /* TODO This hack should go away */
4377 if (aconnector && enable) {
4378 // Make sure fake sink is created in plug-in scenario
4379 new_con_state = drm_atomic_get_connector_state(state,
4380 &aconnector->base);
4381
4382 if (IS_ERR(new_con_state)) {
4383 ret = PTR_ERR_OR_ZERO(new_con_state);
4384 break;
4385 }
4386
4387 dm_conn_state = to_dm_connector_state(new_con_state);
4388
4389 new_stream = create_stream_for_sink(aconnector,
4390 &new_crtc_state->mode,
4391 dm_conn_state);
4392
4393 /*
4394 * we can have no stream on ACTION_SET if a display
4395 * was disconnected during S3, in this case it not and
4396 * error, the OS will be updated after detection, and
4397 * do the right thing on next atomic commit
4398 */
4399
4400 if (!new_stream) {
4401 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
4402 __func__, acrtc->base.base.id);
4403 break;
4404 }
4405 }
4406
4407 if (dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
4408 dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
4409
4410 new_crtc_state->mode_changed = false;
4411
4412 DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
4413 new_crtc_state->mode_changed);
4414 }
4415
4416
4417 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
4418 goto next_crtc;
4419
4420 DRM_DEBUG_DRIVER(
4421 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
4422 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
4423 "connectors_changed:%d\n",
4424 acrtc->crtc_id,
4425 new_crtc_state->enable,
4426 new_crtc_state->active,
4427 new_crtc_state->planes_changed,
4428 new_crtc_state->mode_changed,
4429 new_crtc_state->active_changed,
4430 new_crtc_state->connectors_changed);
4431
4432 /* Remove stream for any changed/disabled CRTC */
4433 if (!enable) {
4434
4435 if (!dm_old_crtc_state->stream)
4436 goto next_crtc;
4437
4438 DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
4439 crtc->base.id);
4440
4441 /* i.e. reset mode */
4442 if (dc_remove_stream_from_ctx(
4443 dc,
4444 dm_state->context,
4445 dm_old_crtc_state->stream) != DC_OK) {
4446 ret = -EINVAL;
4447 goto fail;
4448 }
4449
4450 dc_stream_release(dm_old_crtc_state->stream);
4451 dm_new_crtc_state->stream = NULL;
4452
4453 *lock_and_validation_needed = true;
4454
4455 } else {/* Add stream for any updated/enabled CRTC */
4456 /*
4457 * Quick fix to prevent NULL pointer on new_stream when
4458 * added MST connectors not found in existing crtc_state in the chained mode
4459 * TODO: need to dig out the root cause of that
4460 */
4461 if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
4462 goto next_crtc;
4463
4464 if (modereset_required(new_crtc_state))
4465 goto next_crtc;
4466
4467 if (modeset_required(new_crtc_state, new_stream,
4468 dm_old_crtc_state->stream)) {
4469
4470 WARN_ON(dm_new_crtc_state->stream);
4471
4472 dm_new_crtc_state->stream = new_stream;
4473 dc_stream_retain(new_stream);
4474
4475 DRM_DEBUG_DRIVER("Enabling DRM crtc: %d\n",
4476 crtc->base.id);
4477
4478 if (dc_add_stream_to_ctx(
4479 dc,
4480 dm_state->context,
4481 dm_new_crtc_state->stream) != DC_OK) {
4482 ret = -EINVAL;
4483 goto fail;
4484 }
4485
4486 *lock_and_validation_needed = true;
4487 }
4488 }
4489
4490 next_crtc:
4491 /* Release extra reference */
4492 if (new_stream)
4493 dc_stream_release(new_stream);
4494 }
4495
4496 return ret;
4497
4498 fail:
4499 if (new_stream)
4500 dc_stream_release(new_stream);
4501 return ret;
4502 }
4503
4504 static int dm_update_planes_state(struct dc *dc,
4505 struct drm_atomic_state *state,
4506 bool enable,
4507 bool *lock_and_validation_needed)
4508 {
4509 struct drm_crtc *new_plane_crtc, *old_plane_crtc;
4510 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
4511 struct drm_plane *plane;
4512 struct drm_plane_state *old_plane_state, *new_plane_state;
4513 struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
4514 struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
4515 struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
4516 int i ;
4517 /* TODO return page_flip_needed() function */
4518 bool pflip_needed = !state->allow_modeset;
4519 int ret = 0;
4520
4521 if (pflip_needed)
4522 return ret;
4523
4524 /* Add new planes */
4525 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
4526 new_plane_crtc = new_plane_state->crtc;
4527 old_plane_crtc = old_plane_state->crtc;
4528 dm_new_plane_state = to_dm_plane_state(new_plane_state);
4529 dm_old_plane_state = to_dm_plane_state(old_plane_state);
4530
4531 /*TODO Implement atomic check for cursor plane */
4532 if (plane->type == DRM_PLANE_TYPE_CURSOR)
4533 continue;
4534
4535 /* Remove any changed/removed planes */
4536 if (!enable) {
4537
4538 if (!old_plane_crtc)
4539 continue;
4540
4541 old_crtc_state = drm_atomic_get_old_crtc_state(
4542 state, old_plane_crtc);
4543 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
4544
4545 if (!dm_old_crtc_state->stream)
4546 continue;
4547
4548 DRM_DEBUG_DRIVER("Disabling DRM plane: %d on DRM crtc %d\n",
4549 plane->base.id, old_plane_crtc->base.id);
4550
4551 if (!dc_remove_plane_from_context(
4552 dc,
4553 dm_old_crtc_state->stream,
4554 dm_old_plane_state->dc_state,
4555 dm_state->context)) {
4556
4557 ret = EINVAL;
4558 return ret;
4559 }
4560
4561
4562 dc_plane_state_release(dm_old_plane_state->dc_state);
4563 dm_new_plane_state->dc_state = NULL;
4564
4565 *lock_and_validation_needed = true;
4566
4567 } else { /* Add new planes */
4568
4569 if (drm_atomic_plane_disabling(plane->state, new_plane_state))
4570 continue;
4571
4572 if (!new_plane_crtc)
4573 continue;
4574
4575 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
4576 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
4577
4578 if (!dm_new_crtc_state->stream)
4579 continue;
4580
4581
4582 WARN_ON(dm_new_plane_state->dc_state);
4583
4584 dm_new_plane_state->dc_state = dc_create_plane_state(dc);
4585
4586 DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n",
4587 plane->base.id, new_plane_crtc->base.id);
4588
4589 if (!dm_new_plane_state->dc_state) {
4590 ret = -EINVAL;
4591 return ret;
4592 }
4593
4594 ret = fill_plane_attributes(
4595 new_plane_crtc->dev->dev_private,
4596 dm_new_plane_state->dc_state,
4597 new_plane_state,
4598 new_crtc_state,
4599 false);
4600 if (ret)
4601 return ret;
4602
4603
4604 if (!dc_add_plane_to_context(
4605 dc,
4606 dm_new_crtc_state->stream,
4607 dm_new_plane_state->dc_state,
4608 dm_state->context)) {
4609
4610 ret = -EINVAL;
4611 return ret;
4612 }
4613
4614 *lock_and_validation_needed = true;
4615 }
4616 }
4617
4618
4619 return ret;
4620 }
4621
4622 static int amdgpu_dm_atomic_check(struct drm_device *dev,
4623 struct drm_atomic_state *state)
4624 {
4625 int i;
4626 int ret;
4627 struct amdgpu_device *adev = dev->dev_private;
4628 struct dc *dc = adev->dm.dc;
4629 struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
4630 struct drm_connector *connector;
4631 struct drm_connector_state *old_con_state, *new_con_state;
4632 struct drm_crtc *crtc;
4633 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
4634
4635 /*
4636 * This bool will be set for true for any modeset/reset
4637 * or plane update which implies non fast surface update.
4638 */
4639 bool lock_and_validation_needed = false;
4640
4641 ret = drm_atomic_helper_check_modeset(dev, state);
4642 if (ret) {
4643 DRM_ERROR("Atomic state validation failed with error :%d !\n", ret);
4644 return ret;
4645 }
4646
4647 /*
4648 * legacy_cursor_update should be made false for SoC's having
4649 * a dedicated hardware plane for cursor in amdgpu_dm_atomic_commit(),
4650 * otherwise for software cursor plane,
4651 * we should not add it to list of affected planes.
4652 */
4653 if (state->legacy_cursor_update) {
4654 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
4655 if (new_crtc_state->color_mgmt_changed) {
4656 ret = drm_atomic_add_affected_planes(state, crtc);
4657 if (ret)
4658 goto fail;
4659 }
4660 }
4661 } else {
4662 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
4663 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
4664 continue;
4665
4666 if (!new_crtc_state->enable)
4667 continue;
4668
4669 ret = drm_atomic_add_affected_connectors(state, crtc);
4670 if (ret)
4671 return ret;
4672
4673 ret = drm_atomic_add_affected_planes(state, crtc);
4674 if (ret)
4675 goto fail;
4676 }
4677 }
4678
4679 dm_state->context = dc_create_state();
4680 ASSERT(dm_state->context);
4681 dc_resource_state_copy_construct_current(dc, dm_state->context);
4682
4683 /* Remove exiting planes if they are modified */
4684 ret = dm_update_planes_state(dc, state, false, &lock_and_validation_needed);
4685 if (ret) {
4686 goto fail;
4687 }
4688
4689 /* Disable all crtcs which require disable */
4690 ret = dm_update_crtcs_state(dc, state, false, &lock_and_validation_needed);
4691 if (ret) {
4692 goto fail;
4693 }
4694
4695 /* Enable all crtcs which require enable */
4696 ret = dm_update_crtcs_state(dc, state, true, &lock_and_validation_needed);
4697 if (ret) {
4698 goto fail;
4699 }
4700
4701 /* Add new/modified planes */
4702 ret = dm_update_planes_state(dc, state, true, &lock_and_validation_needed);
4703 if (ret) {
4704 goto fail;
4705 }
4706
4707 /* Run this here since we want to validate the streams we created */
4708 ret = drm_atomic_helper_check_planes(dev, state);
4709 if (ret)
4710 goto fail;
4711
4712 /* Check scaling and underscan changes*/
4713 /*TODO Removed scaling changes validation due to inability to commit
4714 * new stream into context w\o causing full reset. Need to
4715 * decide how to handle.
4716 */
4717 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
4718 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
4719 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
4720 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
4721
4722 /* Skip any modesets/resets */
4723 if (!acrtc || drm_atomic_crtc_needs_modeset(
4724 drm_atomic_get_new_crtc_state(state, &acrtc->base)))
4725 continue;
4726
4727 /* Skip any thing not scale or underscan changes */
4728 if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
4729 continue;
4730
4731 lock_and_validation_needed = true;
4732 }
4733
4734 /*
4735 * For full updates case when
4736 * removing/adding/updating streams on once CRTC while flipping
4737 * on another CRTC,
4738 * acquiring global lock will guarantee that any such full
4739 * update commit
4740 * will wait for completion of any outstanding flip using DRMs
4741 * synchronization events.
4742 */
4743
4744 if (lock_and_validation_needed) {
4745
4746 ret = do_aquire_global_lock(dev, state);
4747 if (ret)
4748 goto fail;
4749
4750 if (dc_validate_global_state(dc, dm_state->context) != DC_OK) {
4751 ret = -EINVAL;
4752 goto fail;
4753 }
4754 }
4755
4756 /* Must be success */
4757 WARN_ON(ret);
4758 return ret;
4759
4760 fail:
4761 if (ret == -EDEADLK)
4762 DRM_DEBUG_DRIVER("Atomic check stopped due to to deadlock.\n");
4763 else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
4764 DRM_DEBUG_DRIVER("Atomic check stopped due to to signal.\n");
4765 else
4766 DRM_ERROR("Atomic check failed with err: %d \n", ret);
4767
4768 return ret;
4769 }
4770
4771 static bool is_dp_capable_without_timing_msa(struct dc *dc,
4772 struct amdgpu_dm_connector *amdgpu_dm_connector)
4773 {
4774 uint8_t dpcd_data;
4775 bool capable = false;
4776
4777 if (amdgpu_dm_connector->dc_link &&
4778 dm_helpers_dp_read_dpcd(
4779 NULL,
4780 amdgpu_dm_connector->dc_link,
4781 DP_DOWN_STREAM_PORT_COUNT,
4782 &dpcd_data,
4783 sizeof(dpcd_data))) {
4784 capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
4785 }
4786
4787 return capable;
4788 }
4789 void amdgpu_dm_add_sink_to_freesync_module(struct drm_connector *connector,
4790 struct edid *edid)
4791 {
4792 int i;
4793 uint64_t val_capable;
4794 bool edid_check_required;
4795 struct detailed_timing *timing;
4796 struct detailed_non_pixel *data;
4797 struct detailed_data_monitor_range *range;
4798 struct amdgpu_dm_connector *amdgpu_dm_connector =
4799 to_amdgpu_dm_connector(connector);
4800
4801 struct drm_device *dev = connector->dev;
4802 struct amdgpu_device *adev = dev->dev_private;
4803
4804 edid_check_required = false;
4805 if (!amdgpu_dm_connector->dc_sink) {
4806 DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
4807 return;
4808 }
4809 if (!adev->dm.freesync_module)
4810 return;
4811 /*
4812 * if edid non zero restrict freesync only for dp and edp
4813 */
4814 if (edid) {
4815 if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
4816 || amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
4817 edid_check_required = is_dp_capable_without_timing_msa(
4818 adev->dm.dc,
4819 amdgpu_dm_connector);
4820 }
4821 }
4822 val_capable = 0;
4823 if (edid_check_required == true && (edid->version > 1 ||
4824 (edid->version == 1 && edid->revision > 1))) {
4825 for (i = 0; i < 4; i++) {
4826
4827 timing = &edid->detailed_timings[i];
4828 data = &timing->data.other_data;
4829 range = &data->data.range;
4830 /*
4831 * Check if monitor has continuous frequency mode
4832 */
4833 if (data->type != EDID_DETAIL_MONITOR_RANGE)
4834 continue;
4835 /*
4836 * Check for flag range limits only. If flag == 1 then
4837 * no additional timing information provided.
4838 * Default GTF, GTF Secondary curve and CVT are not
4839 * supported
4840 */
4841 if (range->flags != 1)
4842 continue;
4843
4844 amdgpu_dm_connector->min_vfreq = range->min_vfreq;
4845 amdgpu_dm_connector->max_vfreq = range->max_vfreq;
4846 amdgpu_dm_connector->pixel_clock_mhz =
4847 range->pixel_clock_mhz * 10;
4848 break;
4849 }
4850
4851 if (amdgpu_dm_connector->max_vfreq -
4852 amdgpu_dm_connector->min_vfreq > 10) {
4853 amdgpu_dm_connector->caps.supported = true;
4854 amdgpu_dm_connector->caps.min_refresh_in_micro_hz =
4855 amdgpu_dm_connector->min_vfreq * 1000000;
4856 amdgpu_dm_connector->caps.max_refresh_in_micro_hz =
4857 amdgpu_dm_connector->max_vfreq * 1000000;
4858 val_capable = 1;
4859 }
4860 }
4861
4862 /*
4863 * TODO figure out how to notify user-mode or DRM of freesync caps
4864 * once we figure out how to deal with freesync in an upstreamable
4865 * fashion
4866 */
4867
4868 }
4869
4870 void amdgpu_dm_remove_sink_from_freesync_module(struct drm_connector *connector)
4871 {
4872 /*
4873 * TODO fill in once we figure out how to deal with freesync in
4874 * an upstreamable fashion
4875 */
4876 }