]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
drm/amd/display: fix null pointer dereference
[mirror_ubuntu-bionic-kernel.git] / drivers / gpu / drm / amd / display / amdgpu_dm / amdgpu_dm.c
1 /*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26 #include "dm_services_types.h"
27 #include "dc.h"
28 #include "dc/inc/core_types.h"
29
30 #include "vid.h"
31 #include "amdgpu.h"
32 #include "amdgpu_display.h"
33 #include "atom.h"
34 #include "amdgpu_dm.h"
35 #include "amdgpu_pm.h"
36
37 #include "amd_shared.h"
38 #include "amdgpu_dm_irq.h"
39 #include "dm_helpers.h"
40 #include "dm_services_types.h"
41 #include "amdgpu_dm_mst_types.h"
42
43 #include "ivsrcid/ivsrcid_vislands30.h"
44
45 #include <linux/module.h>
46 #include <linux/moduleparam.h>
47 #include <linux/version.h>
48 #include <linux/types.h>
49
50 #include <drm/drmP.h>
51 #include <drm/drm_atomic.h>
52 #include <drm/drm_atomic_helper.h>
53 #include <drm/drm_dp_mst_helper.h>
54 #include <drm/drm_fb_helper.h>
55 #include <drm/drm_edid.h>
56
57 #include "modules/inc/mod_freesync.h"
58
59 #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
60 #include "ivsrcid/irqsrcs_dcn_1_0.h"
61
62 #include "raven1/DCN/dcn_1_0_offset.h"
63 #include "raven1/DCN/dcn_1_0_sh_mask.h"
64 #include "vega10/soc15ip.h"
65
66 #include "soc15_common.h"
67 #endif
68
69 #include "modules/inc/mod_freesync.h"
70
71 #include "i2caux_interface.h"
72
73 /* basic init/fini API */
74 static int amdgpu_dm_init(struct amdgpu_device *adev);
75 static void amdgpu_dm_fini(struct amdgpu_device *adev);
76
77 /* initializes drm_device display related structures, based on the information
78 * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
79 * drm_encoder, drm_mode_config
80 *
81 * Returns 0 on success
82 */
83 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
84 /* removes and deallocates the drm structures, created by the above function */
85 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
86
87 static void
88 amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector);
89
90 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
91 struct amdgpu_plane *aplane,
92 unsigned long possible_crtcs);
93 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
94 struct drm_plane *plane,
95 uint32_t link_index);
96 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
97 struct amdgpu_dm_connector *amdgpu_dm_connector,
98 uint32_t link_index,
99 struct amdgpu_encoder *amdgpu_encoder);
100 static int amdgpu_dm_encoder_init(struct drm_device *dev,
101 struct amdgpu_encoder *aencoder,
102 uint32_t link_index);
103
104 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
105
106 static int amdgpu_dm_atomic_commit(struct drm_device *dev,
107 struct drm_atomic_state *state,
108 bool nonblock);
109
110 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
111
112 static int amdgpu_dm_atomic_check(struct drm_device *dev,
113 struct drm_atomic_state *state);
114
115
116
117
118 static const enum drm_plane_type dm_plane_type_default[AMDGPU_MAX_PLANES] = {
119 DRM_PLANE_TYPE_PRIMARY,
120 DRM_PLANE_TYPE_PRIMARY,
121 DRM_PLANE_TYPE_PRIMARY,
122 DRM_PLANE_TYPE_PRIMARY,
123 DRM_PLANE_TYPE_PRIMARY,
124 DRM_PLANE_TYPE_PRIMARY,
125 };
126
127 static const enum drm_plane_type dm_plane_type_carizzo[AMDGPU_MAX_PLANES] = {
128 DRM_PLANE_TYPE_PRIMARY,
129 DRM_PLANE_TYPE_PRIMARY,
130 DRM_PLANE_TYPE_PRIMARY,
131 DRM_PLANE_TYPE_OVERLAY,/* YUV Capable Underlay */
132 };
133
134 static const enum drm_plane_type dm_plane_type_stoney[AMDGPU_MAX_PLANES] = {
135 DRM_PLANE_TYPE_PRIMARY,
136 DRM_PLANE_TYPE_PRIMARY,
137 DRM_PLANE_TYPE_OVERLAY, /* YUV Capable Underlay */
138 };
139
140 /*
141 * dm_vblank_get_counter
142 *
143 * @brief
144 * Get counter for number of vertical blanks
145 *
146 * @param
147 * struct amdgpu_device *adev - [in] desired amdgpu device
148 * int disp_idx - [in] which CRTC to get the counter from
149 *
150 * @return
151 * Counter for vertical blanks
152 */
153 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
154 {
155 if (crtc >= adev->mode_info.num_crtc)
156 return 0;
157 else {
158 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
159 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(
160 acrtc->base.state);
161
162
163 if (acrtc_state->stream == NULL) {
164 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
165 crtc);
166 return 0;
167 }
168
169 return dc_stream_get_vblank_counter(acrtc_state->stream);
170 }
171 }
172
173 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
174 u32 *vbl, u32 *position)
175 {
176 uint32_t v_blank_start, v_blank_end, h_position, v_position;
177
178 if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
179 return -EINVAL;
180 else {
181 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
182 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(
183 acrtc->base.state);
184
185 if (acrtc_state->stream == NULL) {
186 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
187 crtc);
188 return 0;
189 }
190
191 /*
192 * TODO rework base driver to use values directly.
193 * for now parse it back into reg-format
194 */
195 dc_stream_get_scanoutpos(acrtc_state->stream,
196 &v_blank_start,
197 &v_blank_end,
198 &h_position,
199 &v_position);
200
201 *position = v_position | (h_position << 16);
202 *vbl = v_blank_start | (v_blank_end << 16);
203 }
204
205 return 0;
206 }
207
208 static bool dm_is_idle(void *handle)
209 {
210 /* XXX todo */
211 return true;
212 }
213
214 static int dm_wait_for_idle(void *handle)
215 {
216 /* XXX todo */
217 return 0;
218 }
219
220 static bool dm_check_soft_reset(void *handle)
221 {
222 return false;
223 }
224
225 static int dm_soft_reset(void *handle)
226 {
227 /* XXX todo */
228 return 0;
229 }
230
231 static struct amdgpu_crtc *
232 get_crtc_by_otg_inst(struct amdgpu_device *adev,
233 int otg_inst)
234 {
235 struct drm_device *dev = adev->ddev;
236 struct drm_crtc *crtc;
237 struct amdgpu_crtc *amdgpu_crtc;
238
239 /*
240 * following if is check inherited from both functions where this one is
241 * used now. Need to be checked why it could happen.
242 */
243 if (otg_inst == -1) {
244 WARN_ON(1);
245 return adev->mode_info.crtcs[0];
246 }
247
248 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
249 amdgpu_crtc = to_amdgpu_crtc(crtc);
250
251 if (amdgpu_crtc->otg_inst == otg_inst)
252 return amdgpu_crtc;
253 }
254
255 return NULL;
256 }
257
258 static void dm_pflip_high_irq(void *interrupt_params)
259 {
260 struct amdgpu_crtc *amdgpu_crtc;
261 struct common_irq_params *irq_params = interrupt_params;
262 struct amdgpu_device *adev = irq_params->adev;
263 unsigned long flags;
264
265 amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
266
267 /* IRQ could occur when in initial stage */
268 /*TODO work and BO cleanup */
269 if (amdgpu_crtc == NULL) {
270 DRM_DEBUG_DRIVER("CRTC is null, returning.\n");
271 return;
272 }
273
274 spin_lock_irqsave(&adev->ddev->event_lock, flags);
275
276 if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
277 DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
278 amdgpu_crtc->pflip_status,
279 AMDGPU_FLIP_SUBMITTED,
280 amdgpu_crtc->crtc_id,
281 amdgpu_crtc);
282 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
283 return;
284 }
285
286
287 /* wakeup usersapce */
288 if (amdgpu_crtc->event) {
289 /* Update to correct count/ts if racing with vblank irq */
290 drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
291
292 drm_crtc_send_vblank_event(&amdgpu_crtc->base, amdgpu_crtc->event);
293
294 /* page flip completed. clean up */
295 amdgpu_crtc->event = NULL;
296
297 } else
298 WARN_ON(1);
299
300 amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
301 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
302
303 DRM_DEBUG_DRIVER("%s - crtc :%d[%p], pflip_stat:AMDGPU_FLIP_NONE\n",
304 __func__, amdgpu_crtc->crtc_id, amdgpu_crtc);
305
306 drm_crtc_vblank_put(&amdgpu_crtc->base);
307 }
308
309 static void dm_crtc_high_irq(void *interrupt_params)
310 {
311 struct common_irq_params *irq_params = interrupt_params;
312 struct amdgpu_device *adev = irq_params->adev;
313 uint8_t crtc_index = 0;
314 struct amdgpu_crtc *acrtc;
315
316 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
317
318 if (acrtc)
319 crtc_index = acrtc->crtc_id;
320
321 drm_handle_vblank(adev->ddev, crtc_index);
322 }
323
324 static int dm_set_clockgating_state(void *handle,
325 enum amd_clockgating_state state)
326 {
327 return 0;
328 }
329
330 static int dm_set_powergating_state(void *handle,
331 enum amd_powergating_state state)
332 {
333 return 0;
334 }
335
336 /* Prototypes of private functions */
337 static int dm_early_init(void* handle);
338
339 static void hotplug_notify_work_func(struct work_struct *work)
340 {
341 struct amdgpu_display_manager *dm = container_of(work, struct amdgpu_display_manager, mst_hotplug_work);
342 struct drm_device *dev = dm->ddev;
343
344 drm_kms_helper_hotplug_event(dev);
345 }
346
347 #ifdef ENABLE_FBC
348 #include "dal_asic_id.h"
349 /* Allocate memory for FBC compressed data */
350 /* TODO: Dynamic allocation */
351 #define AMDGPU_FBC_SIZE (3840 * 2160 * 4)
352
353 static void amdgpu_dm_initialize_fbc(struct amdgpu_device *adev)
354 {
355 int r;
356 struct dm_comressor_info *compressor = &adev->dm.compressor;
357
358 if (!compressor->bo_ptr) {
359 r = amdgpu_bo_create_kernel(adev, AMDGPU_FBC_SIZE, PAGE_SIZE,
360 AMDGPU_GEM_DOMAIN_VRAM, &compressor->bo_ptr,
361 &compressor->gpu_addr, &compressor->cpu_addr);
362
363 if (r)
364 DRM_ERROR("DM: Failed to initialize fbc\n");
365 }
366
367 }
368 #endif
369
370
371 /* Init display KMS
372 *
373 * Returns 0 on success
374 */
375 static int amdgpu_dm_init(struct amdgpu_device *adev)
376 {
377 struct dc_init_data init_data;
378 adev->dm.ddev = adev->ddev;
379 adev->dm.adev = adev;
380
381 /* Zero all the fields */
382 memset(&init_data, 0, sizeof(init_data));
383
384 /* initialize DAL's lock (for SYNC context use) */
385 spin_lock_init(&adev->dm.dal_lock);
386
387 /* initialize DAL's mutex */
388 mutex_init(&adev->dm.dal_mutex);
389
390 if(amdgpu_dm_irq_init(adev)) {
391 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
392 goto error;
393 }
394
395 init_data.asic_id.chip_family = adev->family;
396
397 init_data.asic_id.pci_revision_id = adev->rev_id;
398 init_data.asic_id.hw_internal_rev = adev->external_rev_id;
399
400 init_data.asic_id.vram_width = adev->mc.vram_width;
401 /* TODO: initialize init_data.asic_id.vram_type here!!!! */
402 init_data.asic_id.atombios_base_address =
403 adev->mode_info.atom_context->bios;
404
405 init_data.driver = adev;
406
407 adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
408
409 if (!adev->dm.cgs_device) {
410 DRM_ERROR("amdgpu: failed to create cgs device.\n");
411 goto error;
412 }
413
414 init_data.cgs_device = adev->dm.cgs_device;
415
416 adev->dm.dal = NULL;
417
418 init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
419
420 if (amdgpu_dc_log)
421 init_data.log_mask = DC_DEFAULT_LOG_MASK;
422 else
423 init_data.log_mask = DC_MIN_LOG_MASK;
424
425 #ifdef ENABLE_FBC
426 if (adev->family == FAMILY_CZ)
427 amdgpu_dm_initialize_fbc(adev);
428 init_data.fbc_gpu_addr = adev->dm.compressor.gpu_addr;
429 #endif
430 /* Display Core create. */
431 adev->dm.dc = dc_create(&init_data);
432
433 if (adev->dm.dc)
434 DRM_INFO("Display Core initialized!\n");
435 else
436 DRM_INFO("Display Core failed to initialize!\n");
437
438 INIT_WORK(&adev->dm.mst_hotplug_work, hotplug_notify_work_func);
439
440 adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
441 if (!adev->dm.freesync_module) {
442 DRM_ERROR(
443 "amdgpu: failed to initialize freesync_module.\n");
444 } else
445 DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
446 adev->dm.freesync_module);
447
448 if (amdgpu_dm_initialize_drm_device(adev)) {
449 DRM_ERROR(
450 "amdgpu: failed to initialize sw for display support.\n");
451 goto error;
452 }
453
454 /* Update the actual used number of crtc */
455 adev->mode_info.num_crtc = adev->dm.display_indexes_num;
456
457 /* TODO: Add_display_info? */
458
459 /* TODO use dynamic cursor width */
460 adev->ddev->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
461 adev->ddev->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
462
463 if (drm_vblank_init(adev->ddev, adev->dm.display_indexes_num)) {
464 DRM_ERROR(
465 "amdgpu: failed to initialize sw for display support.\n");
466 goto error;
467 }
468
469 DRM_DEBUG_DRIVER("KMS initialized.\n");
470
471 return 0;
472 error:
473 amdgpu_dm_fini(adev);
474
475 return -1;
476 }
477
478 static void amdgpu_dm_fini(struct amdgpu_device *adev)
479 {
480 amdgpu_dm_destroy_drm_device(&adev->dm);
481 /*
482 * TODO: pageflip, vlank interrupt
483 *
484 * amdgpu_dm_irq_fini(adev);
485 */
486
487 if (adev->dm.cgs_device) {
488 amdgpu_cgs_destroy_device(adev->dm.cgs_device);
489 adev->dm.cgs_device = NULL;
490 }
491 if (adev->dm.freesync_module) {
492 mod_freesync_destroy(adev->dm.freesync_module);
493 adev->dm.freesync_module = NULL;
494 }
495 /* DC Destroy TODO: Replace destroy DAL */
496 if (adev->dm.dc)
497 dc_destroy(&adev->dm.dc);
498 return;
499 }
500
501 static int dm_sw_init(void *handle)
502 {
503 return 0;
504 }
505
506 static int dm_sw_fini(void *handle)
507 {
508 return 0;
509 }
510
511 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
512 {
513 struct amdgpu_dm_connector *aconnector;
514 struct drm_connector *connector;
515 int ret = 0;
516
517 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
518
519 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
520 aconnector = to_amdgpu_dm_connector(connector);
521 if (aconnector->dc_link->type == dc_connection_mst_branch) {
522 DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
523 aconnector, aconnector->base.base.id);
524
525 ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
526 if (ret < 0) {
527 DRM_ERROR("DM_MST: Failed to start MST\n");
528 ((struct dc_link *)aconnector->dc_link)->type = dc_connection_single;
529 return ret;
530 }
531 }
532 }
533
534 drm_modeset_unlock(&dev->mode_config.connection_mutex);
535 return ret;
536 }
537
538 static int dm_late_init(void *handle)
539 {
540 struct drm_device *dev = ((struct amdgpu_device *)handle)->ddev;
541
542 return detect_mst_link_for_all_connectors(dev);
543 }
544
545 static void s3_handle_mst(struct drm_device *dev, bool suspend)
546 {
547 struct amdgpu_dm_connector *aconnector;
548 struct drm_connector *connector;
549
550 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
551
552 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
553 aconnector = to_amdgpu_dm_connector(connector);
554 if (aconnector->dc_link->type == dc_connection_mst_branch &&
555 !aconnector->mst_port) {
556
557 if (suspend)
558 drm_dp_mst_topology_mgr_suspend(&aconnector->mst_mgr);
559 else
560 drm_dp_mst_topology_mgr_resume(&aconnector->mst_mgr);
561 }
562 }
563
564 drm_modeset_unlock(&dev->mode_config.connection_mutex);
565 }
566
567 static int dm_hw_init(void *handle)
568 {
569 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
570 /* Create DAL display manager */
571 amdgpu_dm_init(adev);
572 amdgpu_dm_hpd_init(adev);
573
574 return 0;
575 }
576
577 static int dm_hw_fini(void *handle)
578 {
579 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
580
581 amdgpu_dm_hpd_fini(adev);
582
583 amdgpu_dm_irq_fini(adev);
584 amdgpu_dm_fini(adev);
585 return 0;
586 }
587
588 static int dm_suspend(void *handle)
589 {
590 struct amdgpu_device *adev = handle;
591 struct amdgpu_display_manager *dm = &adev->dm;
592 int ret = 0;
593
594 s3_handle_mst(adev->ddev, true);
595
596 amdgpu_dm_irq_suspend(adev);
597
598 WARN_ON(adev->dm.cached_state);
599 adev->dm.cached_state = drm_atomic_helper_suspend(adev->ddev);
600
601 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
602
603 return ret;
604 }
605
606 static struct amdgpu_dm_connector *
607 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
608 struct drm_crtc *crtc)
609 {
610 uint32_t i;
611 struct drm_connector_state *new_con_state;
612 struct drm_connector *connector;
613 struct drm_crtc *crtc_from_state;
614
615 for_each_new_connector_in_state(state, connector, new_con_state, i) {
616 crtc_from_state = new_con_state->crtc;
617
618 if (crtc_from_state == crtc)
619 return to_amdgpu_dm_connector(connector);
620 }
621
622 return NULL;
623 }
624
625 static int dm_resume(void *handle)
626 {
627 struct amdgpu_device *adev = handle;
628 struct amdgpu_display_manager *dm = &adev->dm;
629
630 /* power on hardware */
631 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
632
633 return 0;
634 }
635
636 int amdgpu_dm_display_resume(struct amdgpu_device *adev)
637 {
638 struct drm_device *ddev = adev->ddev;
639 struct amdgpu_display_manager *dm = &adev->dm;
640 struct amdgpu_dm_connector *aconnector;
641 struct drm_connector *connector;
642 struct drm_crtc *crtc;
643 struct drm_crtc_state *new_crtc_state;
644 int ret = 0;
645 int i;
646
647 /* program HPD filter */
648 dc_resume(dm->dc);
649
650 /* On resume we need to rewrite the MSTM control bits to enamble MST*/
651 s3_handle_mst(ddev, false);
652
653 /*
654 * early enable HPD Rx IRQ, should be done before set mode as short
655 * pulse interrupts are used for MST
656 */
657 amdgpu_dm_irq_resume_early(adev);
658
659 /* Do detection*/
660 list_for_each_entry(connector,
661 &ddev->mode_config.connector_list, head) {
662 aconnector = to_amdgpu_dm_connector(connector);
663
664 /*
665 * this is the case when traversing through already created
666 * MST connectors, should be skipped
667 */
668 if (aconnector->mst_port)
669 continue;
670
671 mutex_lock(&aconnector->hpd_lock);
672 dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
673 aconnector->dc_sink = NULL;
674 amdgpu_dm_update_connector_after_detect(aconnector);
675 mutex_unlock(&aconnector->hpd_lock);
676 }
677
678 /* Force mode set in atomic comit */
679 for_each_new_crtc_in_state(adev->dm.cached_state, crtc, new_crtc_state, i)
680 new_crtc_state->active_changed = true;
681
682 ret = drm_atomic_helper_resume(ddev, adev->dm.cached_state);
683
684 drm_atomic_state_put(adev->dm.cached_state);
685 adev->dm.cached_state = NULL;
686
687 amdgpu_dm_irq_resume_late(adev);
688
689 return ret;
690 }
691
692 static const struct amd_ip_funcs amdgpu_dm_funcs = {
693 .name = "dm",
694 .early_init = dm_early_init,
695 .late_init = dm_late_init,
696 .sw_init = dm_sw_init,
697 .sw_fini = dm_sw_fini,
698 .hw_init = dm_hw_init,
699 .hw_fini = dm_hw_fini,
700 .suspend = dm_suspend,
701 .resume = dm_resume,
702 .is_idle = dm_is_idle,
703 .wait_for_idle = dm_wait_for_idle,
704 .check_soft_reset = dm_check_soft_reset,
705 .soft_reset = dm_soft_reset,
706 .set_clockgating_state = dm_set_clockgating_state,
707 .set_powergating_state = dm_set_powergating_state,
708 };
709
710 const struct amdgpu_ip_block_version dm_ip_block =
711 {
712 .type = AMD_IP_BLOCK_TYPE_DCE,
713 .major = 1,
714 .minor = 0,
715 .rev = 0,
716 .funcs = &amdgpu_dm_funcs,
717 };
718
719
720 static struct drm_atomic_state *
721 dm_atomic_state_alloc(struct drm_device *dev)
722 {
723 struct dm_atomic_state *state = kzalloc(sizeof(*state), GFP_KERNEL);
724
725 if (!state)
726 return NULL;
727
728 if (drm_atomic_state_init(dev, &state->base) < 0)
729 goto fail;
730
731 return &state->base;
732
733 fail:
734 kfree(state);
735 return NULL;
736 }
737
738 static void
739 dm_atomic_state_clear(struct drm_atomic_state *state)
740 {
741 struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
742
743 if (dm_state->context) {
744 dc_release_state(dm_state->context);
745 dm_state->context = NULL;
746 }
747
748 drm_atomic_state_default_clear(state);
749 }
750
751 static void
752 dm_atomic_state_alloc_free(struct drm_atomic_state *state)
753 {
754 struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
755 drm_atomic_state_default_release(state);
756 kfree(dm_state);
757 }
758
759 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
760 .fb_create = amdgpu_user_framebuffer_create,
761 .output_poll_changed = amdgpu_output_poll_changed,
762 .atomic_check = amdgpu_dm_atomic_check,
763 .atomic_commit = amdgpu_dm_atomic_commit,
764 .atomic_state_alloc = dm_atomic_state_alloc,
765 .atomic_state_clear = dm_atomic_state_clear,
766 .atomic_state_free = dm_atomic_state_alloc_free
767 };
768
769 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
770 .atomic_commit_tail = amdgpu_dm_atomic_commit_tail
771 };
772
773 static void
774 amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector)
775 {
776 struct drm_connector *connector = &aconnector->base;
777 struct drm_device *dev = connector->dev;
778 struct dc_sink *sink;
779
780 /* MST handled by drm_mst framework */
781 if (aconnector->mst_mgr.mst_state == true)
782 return;
783
784
785 sink = aconnector->dc_link->local_sink;
786
787 /* Edid mgmt connector gets first update only in mode_valid hook and then
788 * the connector sink is set to either fake or physical sink depends on link status.
789 * don't do it here if u are during boot
790 */
791 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
792 && aconnector->dc_em_sink) {
793
794 /* For S3 resume with headless use eml_sink to fake stream
795 * because on resume connecotr->sink is set ti NULL
796 */
797 mutex_lock(&dev->mode_config.mutex);
798
799 if (sink) {
800 if (aconnector->dc_sink) {
801 amdgpu_dm_remove_sink_from_freesync_module(
802 connector);
803 /* retain and release bellow are used for
804 * bump up refcount for sink because the link don't point
805 * to it anymore after disconnect so on next crtc to connector
806 * reshuffle by UMD we will get into unwanted dc_sink release
807 */
808 if (aconnector->dc_sink != aconnector->dc_em_sink)
809 dc_sink_release(aconnector->dc_sink);
810 }
811 aconnector->dc_sink = sink;
812 amdgpu_dm_add_sink_to_freesync_module(
813 connector, aconnector->edid);
814 } else {
815 amdgpu_dm_remove_sink_from_freesync_module(connector);
816 if (!aconnector->dc_sink)
817 aconnector->dc_sink = aconnector->dc_em_sink;
818 else if (aconnector->dc_sink != aconnector->dc_em_sink)
819 dc_sink_retain(aconnector->dc_sink);
820 }
821
822 mutex_unlock(&dev->mode_config.mutex);
823 return;
824 }
825
826 /*
827 * TODO: temporary guard to look for proper fix
828 * if this sink is MST sink, we should not do anything
829 */
830 if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
831 return;
832
833 if (aconnector->dc_sink == sink) {
834 /* We got a DP short pulse (Link Loss, DP CTS, etc...).
835 * Do nothing!! */
836 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
837 aconnector->connector_id);
838 return;
839 }
840
841 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
842 aconnector->connector_id, aconnector->dc_sink, sink);
843
844 mutex_lock(&dev->mode_config.mutex);
845
846 /* 1. Update status of the drm connector
847 * 2. Send an event and let userspace tell us what to do */
848 if (sink) {
849 /* TODO: check if we still need the S3 mode update workaround.
850 * If yes, put it here. */
851 if (aconnector->dc_sink)
852 amdgpu_dm_remove_sink_from_freesync_module(
853 connector);
854
855 aconnector->dc_sink = sink;
856 if (sink->dc_edid.length == 0) {
857 aconnector->edid = NULL;
858 } else {
859 aconnector->edid =
860 (struct edid *) sink->dc_edid.raw_edid;
861
862
863 drm_mode_connector_update_edid_property(connector,
864 aconnector->edid);
865 }
866 amdgpu_dm_add_sink_to_freesync_module(connector, aconnector->edid);
867
868 } else {
869 amdgpu_dm_remove_sink_from_freesync_module(connector);
870 drm_mode_connector_update_edid_property(connector, NULL);
871 aconnector->num_modes = 0;
872 aconnector->dc_sink = NULL;
873 }
874
875 mutex_unlock(&dev->mode_config.mutex);
876 }
877
878 static void handle_hpd_irq(void *param)
879 {
880 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
881 struct drm_connector *connector = &aconnector->base;
882 struct drm_device *dev = connector->dev;
883
884 /* In case of failure or MST no need to update connector status or notify the OS
885 * since (for MST case) MST does this in it's own context.
886 */
887 mutex_lock(&aconnector->hpd_lock);
888
889 if (aconnector->fake_enable)
890 aconnector->fake_enable = false;
891
892 if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
893 amdgpu_dm_update_connector_after_detect(aconnector);
894
895
896 drm_modeset_lock_all(dev);
897 dm_restore_drm_connector_state(dev, connector);
898 drm_modeset_unlock_all(dev);
899
900 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
901 drm_kms_helper_hotplug_event(dev);
902 }
903 mutex_unlock(&aconnector->hpd_lock);
904
905 }
906
907 static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
908 {
909 uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
910 uint8_t dret;
911 bool new_irq_handled = false;
912 int dpcd_addr;
913 int dpcd_bytes_to_read;
914
915 const int max_process_count = 30;
916 int process_count = 0;
917
918 const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
919
920 if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
921 dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
922 /* DPCD 0x200 - 0x201 for downstream IRQ */
923 dpcd_addr = DP_SINK_COUNT;
924 } else {
925 dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
926 /* DPCD 0x2002 - 0x2005 for downstream IRQ */
927 dpcd_addr = DP_SINK_COUNT_ESI;
928 }
929
930 dret = drm_dp_dpcd_read(
931 &aconnector->dm_dp_aux.aux,
932 dpcd_addr,
933 esi,
934 dpcd_bytes_to_read);
935
936 while (dret == dpcd_bytes_to_read &&
937 process_count < max_process_count) {
938 uint8_t retry;
939 dret = 0;
940
941 process_count++;
942
943 DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
944 /* handle HPD short pulse irq */
945 if (aconnector->mst_mgr.mst_state)
946 drm_dp_mst_hpd_irq(
947 &aconnector->mst_mgr,
948 esi,
949 &new_irq_handled);
950
951 if (new_irq_handled) {
952 /* ACK at DPCD to notify down stream */
953 const int ack_dpcd_bytes_to_write =
954 dpcd_bytes_to_read - 1;
955
956 for (retry = 0; retry < 3; retry++) {
957 uint8_t wret;
958
959 wret = drm_dp_dpcd_write(
960 &aconnector->dm_dp_aux.aux,
961 dpcd_addr + 1,
962 &esi[1],
963 ack_dpcd_bytes_to_write);
964 if (wret == ack_dpcd_bytes_to_write)
965 break;
966 }
967
968 /* check if there is new irq to be handle */
969 dret = drm_dp_dpcd_read(
970 &aconnector->dm_dp_aux.aux,
971 dpcd_addr,
972 esi,
973 dpcd_bytes_to_read);
974
975 new_irq_handled = false;
976 } else {
977 break;
978 }
979 }
980
981 if (process_count == max_process_count)
982 DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
983 }
984
985 static void handle_hpd_rx_irq(void *param)
986 {
987 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
988 struct drm_connector *connector = &aconnector->base;
989 struct drm_device *dev = connector->dev;
990 struct dc_link *dc_link = aconnector->dc_link;
991 bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
992
993 /* TODO:Temporary add mutex to protect hpd interrupt not have a gpio
994 * conflict, after implement i2c helper, this mutex should be
995 * retired.
996 */
997 if (dc_link->type != dc_connection_mst_branch)
998 mutex_lock(&aconnector->hpd_lock);
999
1000 if (dc_link_handle_hpd_rx_irq(dc_link, NULL) &&
1001 !is_mst_root_connector) {
1002 /* Downstream Port status changed. */
1003 if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
1004 amdgpu_dm_update_connector_after_detect(aconnector);
1005
1006
1007 drm_modeset_lock_all(dev);
1008 dm_restore_drm_connector_state(dev, connector);
1009 drm_modeset_unlock_all(dev);
1010
1011 drm_kms_helper_hotplug_event(dev);
1012 }
1013 }
1014 if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
1015 (dc_link->type == dc_connection_mst_branch))
1016 dm_handle_hpd_rx_irq(aconnector);
1017
1018 if (dc_link->type != dc_connection_mst_branch)
1019 mutex_unlock(&aconnector->hpd_lock);
1020 }
1021
1022 static void register_hpd_handlers(struct amdgpu_device *adev)
1023 {
1024 struct drm_device *dev = adev->ddev;
1025 struct drm_connector *connector;
1026 struct amdgpu_dm_connector *aconnector;
1027 const struct dc_link *dc_link;
1028 struct dc_interrupt_params int_params = {0};
1029
1030 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
1031 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
1032
1033 list_for_each_entry(connector,
1034 &dev->mode_config.connector_list, head) {
1035
1036 aconnector = to_amdgpu_dm_connector(connector);
1037 dc_link = aconnector->dc_link;
1038
1039 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
1040 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
1041 int_params.irq_source = dc_link->irq_source_hpd;
1042
1043 amdgpu_dm_irq_register_interrupt(adev, &int_params,
1044 handle_hpd_irq,
1045 (void *) aconnector);
1046 }
1047
1048 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
1049
1050 /* Also register for DP short pulse (hpd_rx). */
1051 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
1052 int_params.irq_source = dc_link->irq_source_hpd_rx;
1053
1054 amdgpu_dm_irq_register_interrupt(adev, &int_params,
1055 handle_hpd_rx_irq,
1056 (void *) aconnector);
1057 }
1058 }
1059 }
1060
1061 /* Register IRQ sources and initialize IRQ callbacks */
1062 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
1063 {
1064 struct dc *dc = adev->dm.dc;
1065 struct common_irq_params *c_irq_params;
1066 struct dc_interrupt_params int_params = {0};
1067 int r;
1068 int i;
1069 unsigned client_id = AMDGPU_IH_CLIENTID_LEGACY;
1070
1071 if (adev->asic_type == CHIP_VEGA10 ||
1072 adev->asic_type == CHIP_RAVEN)
1073 client_id = AMDGPU_IH_CLIENTID_DCE;
1074
1075 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
1076 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
1077
1078 /* Actions of amdgpu_irq_add_id():
1079 * 1. Register a set() function with base driver.
1080 * Base driver will call set() function to enable/disable an
1081 * interrupt in DC hardware.
1082 * 2. Register amdgpu_dm_irq_handler().
1083 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
1084 * coming from DC hardware.
1085 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
1086 * for acknowledging and handling. */
1087
1088 /* Use VBLANK interrupt */
1089 for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
1090 r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
1091 if (r) {
1092 DRM_ERROR("Failed to add crtc irq id!\n");
1093 return r;
1094 }
1095
1096 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
1097 int_params.irq_source =
1098 dc_interrupt_to_irq_source(dc, i, 0);
1099
1100 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
1101
1102 c_irq_params->adev = adev;
1103 c_irq_params->irq_src = int_params.irq_source;
1104
1105 amdgpu_dm_irq_register_interrupt(adev, &int_params,
1106 dm_crtc_high_irq, c_irq_params);
1107 }
1108
1109 /* Use GRPH_PFLIP interrupt */
1110 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
1111 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
1112 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
1113 if (r) {
1114 DRM_ERROR("Failed to add page flip irq id!\n");
1115 return r;
1116 }
1117
1118 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
1119 int_params.irq_source =
1120 dc_interrupt_to_irq_source(dc, i, 0);
1121
1122 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
1123
1124 c_irq_params->adev = adev;
1125 c_irq_params->irq_src = int_params.irq_source;
1126
1127 amdgpu_dm_irq_register_interrupt(adev, &int_params,
1128 dm_pflip_high_irq, c_irq_params);
1129
1130 }
1131
1132 /* HPD */
1133 r = amdgpu_irq_add_id(adev, client_id,
1134 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
1135 if (r) {
1136 DRM_ERROR("Failed to add hpd irq id!\n");
1137 return r;
1138 }
1139
1140 register_hpd_handlers(adev);
1141
1142 return 0;
1143 }
1144
1145 #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
1146 /* Register IRQ sources and initialize IRQ callbacks */
1147 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
1148 {
1149 struct dc *dc = adev->dm.dc;
1150 struct common_irq_params *c_irq_params;
1151 struct dc_interrupt_params int_params = {0};
1152 int r;
1153 int i;
1154
1155 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
1156 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
1157
1158 /* Actions of amdgpu_irq_add_id():
1159 * 1. Register a set() function with base driver.
1160 * Base driver will call set() function to enable/disable an
1161 * interrupt in DC hardware.
1162 * 2. Register amdgpu_dm_irq_handler().
1163 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
1164 * coming from DC hardware.
1165 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
1166 * for acknowledging and handling.
1167 * */
1168
1169 /* Use VSTARTUP interrupt */
1170 for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
1171 i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
1172 i++) {
1173 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_DCE, i, &adev->crtc_irq);
1174
1175 if (r) {
1176 DRM_ERROR("Failed to add crtc irq id!\n");
1177 return r;
1178 }
1179
1180 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
1181 int_params.irq_source =
1182 dc_interrupt_to_irq_source(dc, i, 0);
1183
1184 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
1185
1186 c_irq_params->adev = adev;
1187 c_irq_params->irq_src = int_params.irq_source;
1188
1189 amdgpu_dm_irq_register_interrupt(adev, &int_params,
1190 dm_crtc_high_irq, c_irq_params);
1191 }
1192
1193 /* Use GRPH_PFLIP interrupt */
1194 for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
1195 i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
1196 i++) {
1197 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
1198 if (r) {
1199 DRM_ERROR("Failed to add page flip irq id!\n");
1200 return r;
1201 }
1202
1203 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
1204 int_params.irq_source =
1205 dc_interrupt_to_irq_source(dc, i, 0);
1206
1207 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
1208
1209 c_irq_params->adev = adev;
1210 c_irq_params->irq_src = int_params.irq_source;
1211
1212 amdgpu_dm_irq_register_interrupt(adev, &int_params,
1213 dm_pflip_high_irq, c_irq_params);
1214
1215 }
1216
1217 /* HPD */
1218 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
1219 &adev->hpd_irq);
1220 if (r) {
1221 DRM_ERROR("Failed to add hpd irq id!\n");
1222 return r;
1223 }
1224
1225 register_hpd_handlers(adev);
1226
1227 return 0;
1228 }
1229 #endif
1230
1231 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
1232 {
1233 int r;
1234
1235 adev->mode_info.mode_config_initialized = true;
1236
1237 adev->ddev->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
1238 adev->ddev->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
1239
1240 adev->ddev->mode_config.max_width = 16384;
1241 adev->ddev->mode_config.max_height = 16384;
1242
1243 adev->ddev->mode_config.preferred_depth = 24;
1244 adev->ddev->mode_config.prefer_shadow = 1;
1245 /* indicate support of immediate flip */
1246 adev->ddev->mode_config.async_page_flip = true;
1247
1248 adev->ddev->mode_config.fb_base = adev->mc.aper_base;
1249
1250 r = amdgpu_modeset_create_props(adev);
1251 if (r)
1252 return r;
1253
1254 return 0;
1255 }
1256
1257 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
1258 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
1259
1260 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
1261 {
1262 struct amdgpu_display_manager *dm = bl_get_data(bd);
1263
1264 if (dc_link_set_backlight_level(dm->backlight_link,
1265 bd->props.brightness, 0, 0))
1266 return 0;
1267 else
1268 return 1;
1269 }
1270
1271 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
1272 {
1273 return bd->props.brightness;
1274 }
1275
1276 static const struct backlight_ops amdgpu_dm_backlight_ops = {
1277 .get_brightness = amdgpu_dm_backlight_get_brightness,
1278 .update_status = amdgpu_dm_backlight_update_status,
1279 };
1280
1281 static void
1282 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
1283 {
1284 char bl_name[16];
1285 struct backlight_properties props = { 0 };
1286
1287 props.max_brightness = AMDGPU_MAX_BL_LEVEL;
1288 props.type = BACKLIGHT_RAW;
1289
1290 snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
1291 dm->adev->ddev->primary->index);
1292
1293 dm->backlight_dev = backlight_device_register(bl_name,
1294 dm->adev->ddev->dev,
1295 dm,
1296 &amdgpu_dm_backlight_ops,
1297 &props);
1298
1299 if (NULL == dm->backlight_dev)
1300 DRM_ERROR("DM: Backlight registration failed!\n");
1301 else
1302 DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
1303 }
1304
1305 #endif
1306
1307 /* In this architecture, the association
1308 * connector -> encoder -> crtc
1309 * id not really requried. The crtc and connector will hold the
1310 * display_index as an abstraction to use with DAL component
1311 *
1312 * Returns 0 on success
1313 */
1314 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
1315 {
1316 struct amdgpu_display_manager *dm = &adev->dm;
1317 uint32_t i;
1318 struct amdgpu_dm_connector *aconnector = NULL;
1319 struct amdgpu_encoder *aencoder = NULL;
1320 struct amdgpu_mode_info *mode_info = &adev->mode_info;
1321 uint32_t link_cnt;
1322 unsigned long possible_crtcs;
1323
1324 link_cnt = dm->dc->caps.max_links;
1325 if (amdgpu_dm_mode_config_init(dm->adev)) {
1326 DRM_ERROR("DM: Failed to initialize mode config\n");
1327 return -1;
1328 }
1329
1330 for (i = 0; i < dm->dc->caps.max_planes; i++) {
1331 mode_info->planes[i] = kzalloc(sizeof(struct amdgpu_plane),
1332 GFP_KERNEL);
1333 if (!mode_info->planes[i]) {
1334 DRM_ERROR("KMS: Failed to allocate plane\n");
1335 goto fail_free_planes;
1336 }
1337 mode_info->planes[i]->base.type = mode_info->plane_type[i];
1338
1339 /*
1340 * HACK: IGT tests expect that each plane can only have one
1341 * one possible CRTC. For now, set one CRTC for each
1342 * plane that is not an underlay, but still allow multiple
1343 * CRTCs for underlay planes.
1344 */
1345 possible_crtcs = 1 << i;
1346 if (i >= dm->dc->caps.max_streams)
1347 possible_crtcs = 0xff;
1348
1349 if (amdgpu_dm_plane_init(dm, mode_info->planes[i], possible_crtcs)) {
1350 DRM_ERROR("KMS: Failed to initialize plane\n");
1351 goto fail_free_planes;
1352 }
1353 }
1354
1355 for (i = 0; i < dm->dc->caps.max_streams; i++)
1356 if (amdgpu_dm_crtc_init(dm, &mode_info->planes[i]->base, i)) {
1357 DRM_ERROR("KMS: Failed to initialize crtc\n");
1358 goto fail_free_planes;
1359 }
1360
1361 dm->display_indexes_num = dm->dc->caps.max_streams;
1362
1363 /* loops over all connectors on the board */
1364 for (i = 0; i < link_cnt; i++) {
1365
1366 if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
1367 DRM_ERROR(
1368 "KMS: Cannot support more than %d display indexes\n",
1369 AMDGPU_DM_MAX_DISPLAY_INDEX);
1370 continue;
1371 }
1372
1373 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
1374 if (!aconnector)
1375 goto fail_free_planes;
1376
1377 aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
1378 if (!aencoder)
1379 goto fail_free_connector;
1380
1381 if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
1382 DRM_ERROR("KMS: Failed to initialize encoder\n");
1383 goto fail_free_encoder;
1384 }
1385
1386 if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
1387 DRM_ERROR("KMS: Failed to initialize connector\n");
1388 goto fail_free_encoder;
1389 }
1390
1391 if (dc_link_detect(dc_get_link_at_index(dm->dc, i),
1392 DETECT_REASON_BOOT))
1393 amdgpu_dm_update_connector_after_detect(aconnector);
1394 }
1395
1396 /* Software is initialized. Now we can register interrupt handlers. */
1397 switch (adev->asic_type) {
1398 case CHIP_BONAIRE:
1399 case CHIP_HAWAII:
1400 case CHIP_KAVERI:
1401 case CHIP_KABINI:
1402 case CHIP_MULLINS:
1403 case CHIP_TONGA:
1404 case CHIP_FIJI:
1405 case CHIP_CARRIZO:
1406 case CHIP_STONEY:
1407 case CHIP_POLARIS11:
1408 case CHIP_POLARIS10:
1409 case CHIP_POLARIS12:
1410 case CHIP_VEGA10:
1411 if (dce110_register_irq_handlers(dm->adev)) {
1412 DRM_ERROR("DM: Failed to initialize IRQ\n");
1413 goto fail_free_encoder;
1414 }
1415 break;
1416 #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
1417 case CHIP_RAVEN:
1418 if (dcn10_register_irq_handlers(dm->adev)) {
1419 DRM_ERROR("DM: Failed to initialize IRQ\n");
1420 goto fail_free_encoder;
1421 }
1422 /*
1423 * Temporary disable until pplib/smu interaction is implemented
1424 */
1425 dm->dc->debug.disable_stutter = true;
1426 break;
1427 #endif
1428 default:
1429 DRM_ERROR("Usupported ASIC type: 0x%X\n", adev->asic_type);
1430 goto fail_free_encoder;
1431 }
1432
1433 drm_mode_config_reset(dm->ddev);
1434
1435 return 0;
1436 fail_free_encoder:
1437 kfree(aencoder);
1438 fail_free_connector:
1439 kfree(aconnector);
1440 fail_free_planes:
1441 for (i = 0; i < dm->dc->caps.max_planes; i++)
1442 kfree(mode_info->planes[i]);
1443 return -1;
1444 }
1445
1446 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
1447 {
1448 drm_mode_config_cleanup(dm->ddev);
1449 return;
1450 }
1451
1452 /******************************************************************************
1453 * amdgpu_display_funcs functions
1454 *****************************************************************************/
1455
1456 /**
1457 * dm_bandwidth_update - program display watermarks
1458 *
1459 * @adev: amdgpu_device pointer
1460 *
1461 * Calculate and program the display watermarks and line buffer allocation.
1462 */
1463 static void dm_bandwidth_update(struct amdgpu_device *adev)
1464 {
1465 /* TODO: implement later */
1466 }
1467
1468 static void dm_set_backlight_level(struct amdgpu_encoder *amdgpu_encoder,
1469 u8 level)
1470 {
1471 /* TODO: translate amdgpu_encoder to display_index and call DAL */
1472 }
1473
1474 static u8 dm_get_backlight_level(struct amdgpu_encoder *amdgpu_encoder)
1475 {
1476 /* TODO: translate amdgpu_encoder to display_index and call DAL */
1477 return 0;
1478 }
1479
1480 static int amdgpu_notify_freesync(struct drm_device *dev, void *data,
1481 struct drm_file *filp)
1482 {
1483 struct mod_freesync_params freesync_params;
1484 uint8_t num_streams;
1485 uint8_t i;
1486
1487 struct amdgpu_device *adev = dev->dev_private;
1488 int r = 0;
1489
1490 /* Get freesync enable flag from DRM */
1491
1492 num_streams = dc_get_current_stream_count(adev->dm.dc);
1493
1494 for (i = 0; i < num_streams; i++) {
1495 struct dc_stream_state *stream;
1496 stream = dc_get_stream_at_index(adev->dm.dc, i);
1497
1498 mod_freesync_update_state(adev->dm.freesync_module,
1499 &stream, 1, &freesync_params);
1500 }
1501
1502 return r;
1503 }
1504
1505 static const struct amdgpu_display_funcs dm_display_funcs = {
1506 .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
1507 .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
1508 .vblank_wait = NULL,
1509 .backlight_set_level =
1510 dm_set_backlight_level,/* called unconditionally */
1511 .backlight_get_level =
1512 dm_get_backlight_level,/* called unconditionally */
1513 .hpd_sense = NULL,/* called unconditionally */
1514 .hpd_set_polarity = NULL, /* called unconditionally */
1515 .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
1516 .page_flip_get_scanoutpos =
1517 dm_crtc_get_scanoutpos,/* called unconditionally */
1518 .add_encoder = NULL, /* VBIOS parsing. DAL does it. */
1519 .add_connector = NULL, /* VBIOS parsing. DAL does it. */
1520 .notify_freesync = amdgpu_notify_freesync,
1521
1522 };
1523
1524 #if defined(CONFIG_DEBUG_KERNEL_DC)
1525
1526 static ssize_t s3_debug_store(struct device *device,
1527 struct device_attribute *attr,
1528 const char *buf,
1529 size_t count)
1530 {
1531 int ret;
1532 int s3_state;
1533 struct pci_dev *pdev = to_pci_dev(device);
1534 struct drm_device *drm_dev = pci_get_drvdata(pdev);
1535 struct amdgpu_device *adev = drm_dev->dev_private;
1536
1537 ret = kstrtoint(buf, 0, &s3_state);
1538
1539 if (ret == 0) {
1540 if (s3_state) {
1541 dm_resume(adev);
1542 amdgpu_dm_display_resume(adev);
1543 drm_kms_helper_hotplug_event(adev->ddev);
1544 } else
1545 dm_suspend(adev);
1546 }
1547
1548 return ret == 0 ? count : 0;
1549 }
1550
1551 DEVICE_ATTR_WO(s3_debug);
1552
1553 #endif
1554
1555 static int dm_early_init(void *handle)
1556 {
1557 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1558
1559 adev->ddev->driver->driver_features |= DRIVER_ATOMIC;
1560 amdgpu_dm_set_irq_funcs(adev);
1561
1562 switch (adev->asic_type) {
1563 case CHIP_BONAIRE:
1564 case CHIP_HAWAII:
1565 adev->mode_info.num_crtc = 6;
1566 adev->mode_info.num_hpd = 6;
1567 adev->mode_info.num_dig = 6;
1568 adev->mode_info.plane_type = dm_plane_type_default;
1569 break;
1570 case CHIP_KAVERI:
1571 adev->mode_info.num_crtc = 4;
1572 adev->mode_info.num_hpd = 6;
1573 adev->mode_info.num_dig = 7;
1574 adev->mode_info.plane_type = dm_plane_type_default;
1575 break;
1576 case CHIP_KABINI:
1577 case CHIP_MULLINS:
1578 adev->mode_info.num_crtc = 2;
1579 adev->mode_info.num_hpd = 6;
1580 adev->mode_info.num_dig = 6;
1581 adev->mode_info.plane_type = dm_plane_type_default;
1582 break;
1583 case CHIP_FIJI:
1584 case CHIP_TONGA:
1585 adev->mode_info.num_crtc = 6;
1586 adev->mode_info.num_hpd = 6;
1587 adev->mode_info.num_dig = 7;
1588 adev->mode_info.plane_type = dm_plane_type_default;
1589 break;
1590 case CHIP_CARRIZO:
1591 adev->mode_info.num_crtc = 3;
1592 adev->mode_info.num_hpd = 6;
1593 adev->mode_info.num_dig = 9;
1594 adev->mode_info.plane_type = dm_plane_type_carizzo;
1595 break;
1596 case CHIP_STONEY:
1597 adev->mode_info.num_crtc = 2;
1598 adev->mode_info.num_hpd = 6;
1599 adev->mode_info.num_dig = 9;
1600 adev->mode_info.plane_type = dm_plane_type_stoney;
1601 break;
1602 case CHIP_POLARIS11:
1603 case CHIP_POLARIS12:
1604 adev->mode_info.num_crtc = 5;
1605 adev->mode_info.num_hpd = 5;
1606 adev->mode_info.num_dig = 5;
1607 adev->mode_info.plane_type = dm_plane_type_default;
1608 break;
1609 case CHIP_POLARIS10:
1610 adev->mode_info.num_crtc = 6;
1611 adev->mode_info.num_hpd = 6;
1612 adev->mode_info.num_dig = 6;
1613 adev->mode_info.plane_type = dm_plane_type_default;
1614 break;
1615 case CHIP_VEGA10:
1616 adev->mode_info.num_crtc = 6;
1617 adev->mode_info.num_hpd = 6;
1618 adev->mode_info.num_dig = 6;
1619 adev->mode_info.plane_type = dm_plane_type_default;
1620 break;
1621 #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
1622 case CHIP_RAVEN:
1623 adev->mode_info.num_crtc = 4;
1624 adev->mode_info.num_hpd = 4;
1625 adev->mode_info.num_dig = 4;
1626 adev->mode_info.plane_type = dm_plane_type_default;
1627 break;
1628 #endif
1629 default:
1630 DRM_ERROR("Usupported ASIC type: 0x%X\n", adev->asic_type);
1631 return -EINVAL;
1632 }
1633
1634 if (adev->mode_info.funcs == NULL)
1635 adev->mode_info.funcs = &dm_display_funcs;
1636
1637 /* Note: Do NOT change adev->audio_endpt_rreg and
1638 * adev->audio_endpt_wreg because they are initialised in
1639 * amdgpu_device_init() */
1640 #if defined(CONFIG_DEBUG_KERNEL_DC)
1641 device_create_file(
1642 adev->ddev->dev,
1643 &dev_attr_s3_debug);
1644 #endif
1645
1646 return 0;
1647 }
1648
1649 struct dm_connector_state {
1650 struct drm_connector_state base;
1651
1652 enum amdgpu_rmx_type scaling;
1653 uint8_t underscan_vborder;
1654 uint8_t underscan_hborder;
1655 bool underscan_enable;
1656 };
1657
1658 #define to_dm_connector_state(x)\
1659 container_of((x), struct dm_connector_state, base)
1660
1661 static bool modeset_required(struct drm_crtc_state *crtc_state,
1662 struct dc_stream_state *new_stream,
1663 struct dc_stream_state *old_stream)
1664 {
1665 if (!drm_atomic_crtc_needs_modeset(crtc_state))
1666 return false;
1667
1668 if (!crtc_state->enable)
1669 return false;
1670
1671 return crtc_state->active;
1672 }
1673
1674 static bool modereset_required(struct drm_crtc_state *crtc_state)
1675 {
1676 if (!drm_atomic_crtc_needs_modeset(crtc_state))
1677 return false;
1678
1679 return !crtc_state->enable || !crtc_state->active;
1680 }
1681
1682 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
1683 {
1684 drm_encoder_cleanup(encoder);
1685 kfree(encoder);
1686 }
1687
1688 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
1689 .destroy = amdgpu_dm_encoder_destroy,
1690 };
1691
1692 static bool fill_rects_from_plane_state(const struct drm_plane_state *state,
1693 struct dc_plane_state *plane_state)
1694 {
1695 plane_state->src_rect.x = state->src_x >> 16;
1696 plane_state->src_rect.y = state->src_y >> 16;
1697 /*we ignore for now mantissa and do not to deal with floating pixels :(*/
1698 plane_state->src_rect.width = state->src_w >> 16;
1699
1700 if (plane_state->src_rect.width == 0)
1701 return false;
1702
1703 plane_state->src_rect.height = state->src_h >> 16;
1704 if (plane_state->src_rect.height == 0)
1705 return false;
1706
1707 plane_state->dst_rect.x = state->crtc_x;
1708 plane_state->dst_rect.y = state->crtc_y;
1709
1710 if (state->crtc_w == 0)
1711 return false;
1712
1713 plane_state->dst_rect.width = state->crtc_w;
1714
1715 if (state->crtc_h == 0)
1716 return false;
1717
1718 plane_state->dst_rect.height = state->crtc_h;
1719
1720 plane_state->clip_rect = plane_state->dst_rect;
1721
1722 switch (state->rotation & DRM_MODE_ROTATE_MASK) {
1723 case DRM_MODE_ROTATE_0:
1724 plane_state->rotation = ROTATION_ANGLE_0;
1725 break;
1726 case DRM_MODE_ROTATE_90:
1727 plane_state->rotation = ROTATION_ANGLE_90;
1728 break;
1729 case DRM_MODE_ROTATE_180:
1730 plane_state->rotation = ROTATION_ANGLE_180;
1731 break;
1732 case DRM_MODE_ROTATE_270:
1733 plane_state->rotation = ROTATION_ANGLE_270;
1734 break;
1735 default:
1736 plane_state->rotation = ROTATION_ANGLE_0;
1737 break;
1738 }
1739
1740 return true;
1741 }
1742 static int get_fb_info(const struct amdgpu_framebuffer *amdgpu_fb,
1743 uint64_t *tiling_flags,
1744 uint64_t *fb_location)
1745 {
1746 struct amdgpu_bo *rbo = gem_to_amdgpu_bo(amdgpu_fb->obj);
1747 int r = amdgpu_bo_reserve(rbo, false);
1748
1749 if (unlikely(r)) {
1750 // Don't show error msg. when return -ERESTARTSYS
1751 if (r != -ERESTARTSYS)
1752 DRM_ERROR("Unable to reserve buffer: %d\n", r);
1753 return r;
1754 }
1755
1756 if (fb_location)
1757 *fb_location = amdgpu_bo_gpu_offset(rbo);
1758
1759 if (tiling_flags)
1760 amdgpu_bo_get_tiling_flags(rbo, tiling_flags);
1761
1762 amdgpu_bo_unreserve(rbo);
1763
1764 return r;
1765 }
1766
1767 static int fill_plane_attributes_from_fb(struct amdgpu_device *adev,
1768 struct dc_plane_state *plane_state,
1769 const struct amdgpu_framebuffer *amdgpu_fb,
1770 bool addReq)
1771 {
1772 uint64_t tiling_flags;
1773 uint64_t fb_location = 0;
1774 uint64_t chroma_addr = 0;
1775 unsigned int awidth;
1776 const struct drm_framebuffer *fb = &amdgpu_fb->base;
1777 int ret = 0;
1778 struct drm_format_name_buf format_name;
1779
1780 ret = get_fb_info(
1781 amdgpu_fb,
1782 &tiling_flags,
1783 addReq == true ? &fb_location:NULL);
1784
1785 if (ret)
1786 return ret;
1787
1788 switch (fb->format->format) {
1789 case DRM_FORMAT_C8:
1790 plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
1791 break;
1792 case DRM_FORMAT_RGB565:
1793 plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
1794 break;
1795 case DRM_FORMAT_XRGB8888:
1796 case DRM_FORMAT_ARGB8888:
1797 plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
1798 break;
1799 case DRM_FORMAT_XRGB2101010:
1800 case DRM_FORMAT_ARGB2101010:
1801 plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
1802 break;
1803 case DRM_FORMAT_XBGR2101010:
1804 case DRM_FORMAT_ABGR2101010:
1805 plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
1806 break;
1807 case DRM_FORMAT_NV21:
1808 plane_state->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
1809 break;
1810 case DRM_FORMAT_NV12:
1811 plane_state->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
1812 break;
1813 default:
1814 DRM_ERROR("Unsupported screen format %s\n",
1815 drm_get_format_name(fb->format->format, &format_name));
1816 return -EINVAL;
1817 }
1818
1819 if (plane_state->format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
1820 plane_state->address.type = PLN_ADDR_TYPE_GRAPHICS;
1821 plane_state->address.grph.addr.low_part = lower_32_bits(fb_location);
1822 plane_state->address.grph.addr.high_part = upper_32_bits(fb_location);
1823 plane_state->plane_size.grph.surface_size.x = 0;
1824 plane_state->plane_size.grph.surface_size.y = 0;
1825 plane_state->plane_size.grph.surface_size.width = fb->width;
1826 plane_state->plane_size.grph.surface_size.height = fb->height;
1827 plane_state->plane_size.grph.surface_pitch =
1828 fb->pitches[0] / fb->format->cpp[0];
1829 /* TODO: unhardcode */
1830 plane_state->color_space = COLOR_SPACE_SRGB;
1831
1832 } else {
1833 awidth = ALIGN(fb->width, 64);
1834 plane_state->address.type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
1835 plane_state->address.video_progressive.luma_addr.low_part
1836 = lower_32_bits(fb_location);
1837 plane_state->address.video_progressive.luma_addr.high_part
1838 = upper_32_bits(fb_location);
1839 chroma_addr = fb_location + (u64)(awidth * fb->height);
1840 plane_state->address.video_progressive.chroma_addr.low_part
1841 = lower_32_bits(chroma_addr);
1842 plane_state->address.video_progressive.chroma_addr.high_part
1843 = upper_32_bits(chroma_addr);
1844 plane_state->plane_size.video.luma_size.x = 0;
1845 plane_state->plane_size.video.luma_size.y = 0;
1846 plane_state->plane_size.video.luma_size.width = awidth;
1847 plane_state->plane_size.video.luma_size.height = fb->height;
1848 /* TODO: unhardcode */
1849 plane_state->plane_size.video.luma_pitch = awidth;
1850
1851 plane_state->plane_size.video.chroma_size.x = 0;
1852 plane_state->plane_size.video.chroma_size.y = 0;
1853 plane_state->plane_size.video.chroma_size.width = awidth;
1854 plane_state->plane_size.video.chroma_size.height = fb->height;
1855 plane_state->plane_size.video.chroma_pitch = awidth / 2;
1856
1857 /* TODO: unhardcode */
1858 plane_state->color_space = COLOR_SPACE_YCBCR709;
1859 }
1860
1861 memset(&plane_state->tiling_info, 0, sizeof(plane_state->tiling_info));
1862
1863 /* Fill GFX8 params */
1864 if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
1865 unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
1866
1867 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
1868 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
1869 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
1870 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
1871 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
1872
1873 /* XXX fix me for VI */
1874 plane_state->tiling_info.gfx8.num_banks = num_banks;
1875 plane_state->tiling_info.gfx8.array_mode =
1876 DC_ARRAY_2D_TILED_THIN1;
1877 plane_state->tiling_info.gfx8.tile_split = tile_split;
1878 plane_state->tiling_info.gfx8.bank_width = bankw;
1879 plane_state->tiling_info.gfx8.bank_height = bankh;
1880 plane_state->tiling_info.gfx8.tile_aspect = mtaspect;
1881 plane_state->tiling_info.gfx8.tile_mode =
1882 DC_ADDR_SURF_MICRO_TILING_DISPLAY;
1883 } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
1884 == DC_ARRAY_1D_TILED_THIN1) {
1885 plane_state->tiling_info.gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
1886 }
1887
1888 plane_state->tiling_info.gfx8.pipe_config =
1889 AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
1890
1891 if (adev->asic_type == CHIP_VEGA10 ||
1892 adev->asic_type == CHIP_RAVEN) {
1893 /* Fill GFX9 params */
1894 plane_state->tiling_info.gfx9.num_pipes =
1895 adev->gfx.config.gb_addr_config_fields.num_pipes;
1896 plane_state->tiling_info.gfx9.num_banks =
1897 adev->gfx.config.gb_addr_config_fields.num_banks;
1898 plane_state->tiling_info.gfx9.pipe_interleave =
1899 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
1900 plane_state->tiling_info.gfx9.num_shader_engines =
1901 adev->gfx.config.gb_addr_config_fields.num_se;
1902 plane_state->tiling_info.gfx9.max_compressed_frags =
1903 adev->gfx.config.gb_addr_config_fields.max_compress_frags;
1904 plane_state->tiling_info.gfx9.num_rb_per_se =
1905 adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
1906 plane_state->tiling_info.gfx9.swizzle =
1907 AMDGPU_TILING_GET(tiling_flags, SWIZZLE_MODE);
1908 plane_state->tiling_info.gfx9.shaderEnable = 1;
1909 }
1910
1911 plane_state->visible = true;
1912 plane_state->scaling_quality.h_taps_c = 0;
1913 plane_state->scaling_quality.v_taps_c = 0;
1914
1915 /* is this needed? is plane_state zeroed at allocation? */
1916 plane_state->scaling_quality.h_taps = 0;
1917 plane_state->scaling_quality.v_taps = 0;
1918 plane_state->stereo_format = PLANE_STEREO_FORMAT_NONE;
1919
1920 return ret;
1921
1922 }
1923
1924 static void fill_gamma_from_crtc_state(const struct drm_crtc_state *crtc_state,
1925 struct dc_plane_state *plane_state)
1926 {
1927 int i;
1928 struct dc_gamma *gamma;
1929 struct drm_color_lut *lut =
1930 (struct drm_color_lut *) crtc_state->gamma_lut->data;
1931
1932 gamma = dc_create_gamma();
1933
1934 if (gamma == NULL) {
1935 WARN_ON(1);
1936 return;
1937 }
1938
1939 gamma->type = GAMMA_RGB_256;
1940 gamma->num_entries = GAMMA_RGB_256_ENTRIES;
1941 for (i = 0; i < GAMMA_RGB_256_ENTRIES; i++) {
1942 gamma->entries.red[i] = dal_fixed31_32_from_int(lut[i].red);
1943 gamma->entries.green[i] = dal_fixed31_32_from_int(lut[i].green);
1944 gamma->entries.blue[i] = dal_fixed31_32_from_int(lut[i].blue);
1945 }
1946
1947 plane_state->gamma_correction = gamma;
1948 }
1949
1950 static int fill_plane_attributes(struct amdgpu_device *adev,
1951 struct dc_plane_state *dc_plane_state,
1952 struct drm_plane_state *plane_state,
1953 struct drm_crtc_state *crtc_state,
1954 bool addrReq)
1955 {
1956 const struct amdgpu_framebuffer *amdgpu_fb =
1957 to_amdgpu_framebuffer(plane_state->fb);
1958 const struct drm_crtc *crtc = plane_state->crtc;
1959 struct dc_transfer_func *input_tf;
1960 int ret = 0;
1961
1962 if (!fill_rects_from_plane_state(plane_state, dc_plane_state))
1963 return -EINVAL;
1964
1965 ret = fill_plane_attributes_from_fb(
1966 crtc->dev->dev_private,
1967 dc_plane_state,
1968 amdgpu_fb,
1969 addrReq);
1970
1971 if (ret)
1972 return ret;
1973
1974 input_tf = dc_create_transfer_func();
1975
1976 if (input_tf == NULL)
1977 return -ENOMEM;
1978
1979 input_tf->type = TF_TYPE_PREDEFINED;
1980 input_tf->tf = TRANSFER_FUNCTION_SRGB;
1981
1982 dc_plane_state->in_transfer_func = input_tf;
1983
1984 /* In case of gamma set, update gamma value */
1985 if (crtc_state->gamma_lut)
1986 fill_gamma_from_crtc_state(crtc_state, dc_plane_state);
1987
1988 return ret;
1989 }
1990
1991 /*****************************************************************************/
1992
1993 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
1994 const struct dm_connector_state *dm_state,
1995 struct dc_stream_state *stream)
1996 {
1997 enum amdgpu_rmx_type rmx_type;
1998
1999 struct rect src = { 0 }; /* viewport in composition space*/
2000 struct rect dst = { 0 }; /* stream addressable area */
2001
2002 /* no mode. nothing to be done */
2003 if (!mode)
2004 return;
2005
2006 /* Full screen scaling by default */
2007 src.width = mode->hdisplay;
2008 src.height = mode->vdisplay;
2009 dst.width = stream->timing.h_addressable;
2010 dst.height = stream->timing.v_addressable;
2011
2012 rmx_type = dm_state->scaling;
2013 if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
2014 if (src.width * dst.height <
2015 src.height * dst.width) {
2016 /* height needs less upscaling/more downscaling */
2017 dst.width = src.width *
2018 dst.height / src.height;
2019 } else {
2020 /* width needs less upscaling/more downscaling */
2021 dst.height = src.height *
2022 dst.width / src.width;
2023 }
2024 } else if (rmx_type == RMX_CENTER) {
2025 dst = src;
2026 }
2027
2028 dst.x = (stream->timing.h_addressable - dst.width) / 2;
2029 dst.y = (stream->timing.v_addressable - dst.height) / 2;
2030
2031 if (dm_state->underscan_enable) {
2032 dst.x += dm_state->underscan_hborder / 2;
2033 dst.y += dm_state->underscan_vborder / 2;
2034 dst.width -= dm_state->underscan_hborder;
2035 dst.height -= dm_state->underscan_vborder;
2036 }
2037
2038 stream->src = src;
2039 stream->dst = dst;
2040
2041 DRM_DEBUG_DRIVER("Destination Rectangle x:%d y:%d width:%d height:%d\n",
2042 dst.x, dst.y, dst.width, dst.height);
2043
2044 }
2045
2046 static enum dc_color_depth
2047 convert_color_depth_from_display_info(const struct drm_connector *connector)
2048 {
2049 uint32_t bpc = connector->display_info.bpc;
2050
2051 /* Limited color depth to 8bit
2052 * TODO: Still need to handle deep color
2053 */
2054 if (bpc > 8)
2055 bpc = 8;
2056
2057 switch (bpc) {
2058 case 0:
2059 /* Temporary Work around, DRM don't parse color depth for
2060 * EDID revision before 1.4
2061 * TODO: Fix edid parsing
2062 */
2063 return COLOR_DEPTH_888;
2064 case 6:
2065 return COLOR_DEPTH_666;
2066 case 8:
2067 return COLOR_DEPTH_888;
2068 case 10:
2069 return COLOR_DEPTH_101010;
2070 case 12:
2071 return COLOR_DEPTH_121212;
2072 case 14:
2073 return COLOR_DEPTH_141414;
2074 case 16:
2075 return COLOR_DEPTH_161616;
2076 default:
2077 return COLOR_DEPTH_UNDEFINED;
2078 }
2079 }
2080
2081 static enum dc_aspect_ratio
2082 get_aspect_ratio(const struct drm_display_mode *mode_in)
2083 {
2084 int32_t width = mode_in->crtc_hdisplay * 9;
2085 int32_t height = mode_in->crtc_vdisplay * 16;
2086
2087 if ((width - height) < 10 && (width - height) > -10)
2088 return ASPECT_RATIO_16_9;
2089 else
2090 return ASPECT_RATIO_4_3;
2091 }
2092
2093 static enum dc_color_space
2094 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
2095 {
2096 enum dc_color_space color_space = COLOR_SPACE_SRGB;
2097
2098 switch (dc_crtc_timing->pixel_encoding) {
2099 case PIXEL_ENCODING_YCBCR422:
2100 case PIXEL_ENCODING_YCBCR444:
2101 case PIXEL_ENCODING_YCBCR420:
2102 {
2103 /*
2104 * 27030khz is the separation point between HDTV and SDTV
2105 * according to HDMI spec, we use YCbCr709 and YCbCr601
2106 * respectively
2107 */
2108 if (dc_crtc_timing->pix_clk_khz > 27030) {
2109 if (dc_crtc_timing->flags.Y_ONLY)
2110 color_space =
2111 COLOR_SPACE_YCBCR709_LIMITED;
2112 else
2113 color_space = COLOR_SPACE_YCBCR709;
2114 } else {
2115 if (dc_crtc_timing->flags.Y_ONLY)
2116 color_space =
2117 COLOR_SPACE_YCBCR601_LIMITED;
2118 else
2119 color_space = COLOR_SPACE_YCBCR601;
2120 }
2121
2122 }
2123 break;
2124 case PIXEL_ENCODING_RGB:
2125 color_space = COLOR_SPACE_SRGB;
2126 break;
2127
2128 default:
2129 WARN_ON(1);
2130 break;
2131 }
2132
2133 return color_space;
2134 }
2135
2136 /*****************************************************************************/
2137
2138 static void
2139 fill_stream_properties_from_drm_display_mode(struct dc_stream_state *stream,
2140 const struct drm_display_mode *mode_in,
2141 const struct drm_connector *connector)
2142 {
2143 struct dc_crtc_timing *timing_out = &stream->timing;
2144
2145 memset(timing_out, 0, sizeof(struct dc_crtc_timing));
2146
2147 timing_out->h_border_left = 0;
2148 timing_out->h_border_right = 0;
2149 timing_out->v_border_top = 0;
2150 timing_out->v_border_bottom = 0;
2151 /* TODO: un-hardcode */
2152
2153 if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
2154 && stream->sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A)
2155 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
2156 else
2157 timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
2158
2159 timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
2160 timing_out->display_color_depth = convert_color_depth_from_display_info(
2161 connector);
2162 timing_out->scan_type = SCANNING_TYPE_NODATA;
2163 timing_out->hdmi_vic = 0;
2164 timing_out->vic = drm_match_cea_mode(mode_in);
2165
2166 timing_out->h_addressable = mode_in->crtc_hdisplay;
2167 timing_out->h_total = mode_in->crtc_htotal;
2168 timing_out->h_sync_width =
2169 mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
2170 timing_out->h_front_porch =
2171 mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
2172 timing_out->v_total = mode_in->crtc_vtotal;
2173 timing_out->v_addressable = mode_in->crtc_vdisplay;
2174 timing_out->v_front_porch =
2175 mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
2176 timing_out->v_sync_width =
2177 mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
2178 timing_out->pix_clk_khz = mode_in->crtc_clock;
2179 timing_out->aspect_ratio = get_aspect_ratio(mode_in);
2180 if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
2181 timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
2182 if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
2183 timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
2184
2185 stream->output_color_space = get_output_color_space(timing_out);
2186
2187 {
2188 struct dc_transfer_func *tf = dc_create_transfer_func();
2189
2190 tf->type = TF_TYPE_PREDEFINED;
2191 tf->tf = TRANSFER_FUNCTION_SRGB;
2192 stream->out_transfer_func = tf;
2193 }
2194 }
2195
2196 static void fill_audio_info(struct audio_info *audio_info,
2197 const struct drm_connector *drm_connector,
2198 const struct dc_sink *dc_sink)
2199 {
2200 int i = 0;
2201 int cea_revision = 0;
2202 const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
2203
2204 audio_info->manufacture_id = edid_caps->manufacturer_id;
2205 audio_info->product_id = edid_caps->product_id;
2206
2207 cea_revision = drm_connector->display_info.cea_rev;
2208
2209 strncpy(audio_info->display_name,
2210 edid_caps->display_name,
2211 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS - 1);
2212
2213 if (cea_revision >= 3) {
2214 audio_info->mode_count = edid_caps->audio_mode_count;
2215
2216 for (i = 0; i < audio_info->mode_count; ++i) {
2217 audio_info->modes[i].format_code =
2218 (enum audio_format_code)
2219 (edid_caps->audio_modes[i].format_code);
2220 audio_info->modes[i].channel_count =
2221 edid_caps->audio_modes[i].channel_count;
2222 audio_info->modes[i].sample_rates.all =
2223 edid_caps->audio_modes[i].sample_rate;
2224 audio_info->modes[i].sample_size =
2225 edid_caps->audio_modes[i].sample_size;
2226 }
2227 }
2228
2229 audio_info->flags.all = edid_caps->speaker_flags;
2230
2231 /* TODO: We only check for the progressive mode, check for interlace mode too */
2232 if (drm_connector->latency_present[0]) {
2233 audio_info->video_latency = drm_connector->video_latency[0];
2234 audio_info->audio_latency = drm_connector->audio_latency[0];
2235 }
2236
2237 /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
2238
2239 }
2240
2241 static void
2242 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
2243 struct drm_display_mode *dst_mode)
2244 {
2245 dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
2246 dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
2247 dst_mode->crtc_clock = src_mode->crtc_clock;
2248 dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
2249 dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
2250 dst_mode->crtc_hsync_start = src_mode->crtc_hsync_start;
2251 dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
2252 dst_mode->crtc_htotal = src_mode->crtc_htotal;
2253 dst_mode->crtc_hskew = src_mode->crtc_hskew;
2254 dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
2255 dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
2256 dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
2257 dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
2258 dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
2259 }
2260
2261 static void
2262 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
2263 const struct drm_display_mode *native_mode,
2264 bool scale_enabled)
2265 {
2266 if (scale_enabled) {
2267 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
2268 } else if (native_mode->clock == drm_mode->clock &&
2269 native_mode->htotal == drm_mode->htotal &&
2270 native_mode->vtotal == drm_mode->vtotal) {
2271 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
2272 } else {
2273 /* no scaling nor amdgpu inserted, no need to patch */
2274 }
2275 }
2276
2277 static void create_fake_sink(struct amdgpu_dm_connector *aconnector)
2278 {
2279 struct dc_sink *sink = NULL;
2280 struct dc_sink_init_data sink_init_data = { 0 };
2281
2282 sink_init_data.link = aconnector->dc_link;
2283 sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
2284
2285 sink = dc_sink_create(&sink_init_data);
2286 if (!sink)
2287 DRM_ERROR("Failed to create sink!\n");
2288
2289 sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
2290 aconnector->fake_enable = true;
2291
2292 aconnector->dc_sink = sink;
2293 aconnector->dc_link->local_sink = sink;
2294 }
2295
2296 static struct dc_stream_state *
2297 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
2298 const struct drm_display_mode *drm_mode,
2299 const struct dm_connector_state *dm_state)
2300 {
2301 struct drm_display_mode *preferred_mode = NULL;
2302 const struct drm_connector *drm_connector;
2303 struct dc_stream_state *stream = NULL;
2304 struct drm_display_mode mode = *drm_mode;
2305 bool native_mode_found = false;
2306
2307 if (aconnector == NULL) {
2308 DRM_ERROR("aconnector is NULL!\n");
2309 goto drm_connector_null;
2310 }
2311
2312 if (dm_state == NULL) {
2313 DRM_ERROR("dm_state is NULL!\n");
2314 goto dm_state_null;
2315 }
2316
2317 drm_connector = &aconnector->base;
2318
2319 if (!aconnector->dc_sink) {
2320 /*
2321 * Exclude MST from creating fake_sink
2322 * TODO: need to enable MST into fake_sink feature
2323 */
2324 if (aconnector->mst_port)
2325 goto stream_create_fail;
2326
2327 create_fake_sink(aconnector);
2328 }
2329
2330 stream = dc_create_stream_for_sink(aconnector->dc_sink);
2331
2332 if (stream == NULL) {
2333 DRM_ERROR("Failed to create stream for sink!\n");
2334 goto stream_create_fail;
2335 }
2336
2337 list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
2338 /* Search for preferred mode */
2339 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
2340 native_mode_found = true;
2341 break;
2342 }
2343 }
2344 if (!native_mode_found)
2345 preferred_mode = list_first_entry_or_null(
2346 &aconnector->base.modes,
2347 struct drm_display_mode,
2348 head);
2349
2350 if (preferred_mode == NULL) {
2351 /* This may not be an error, the use case is when we we have no
2352 * usermode calls to reset and set mode upon hotplug. In this
2353 * case, we call set mode ourselves to restore the previous mode
2354 * and the modelist may not be filled in in time.
2355 */
2356 DRM_DEBUG_DRIVER("No preferred mode found\n");
2357 } else {
2358 decide_crtc_timing_for_drm_display_mode(
2359 &mode, preferred_mode,
2360 dm_state->scaling != RMX_OFF);
2361 }
2362
2363 fill_stream_properties_from_drm_display_mode(stream,
2364 &mode, &aconnector->base);
2365 update_stream_scaling_settings(&mode, dm_state, stream);
2366
2367 fill_audio_info(
2368 &stream->audio_info,
2369 drm_connector,
2370 aconnector->dc_sink);
2371
2372 stream_create_fail:
2373 dm_state_null:
2374 drm_connector_null:
2375 return stream;
2376 }
2377
2378 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
2379 {
2380 drm_crtc_cleanup(crtc);
2381 kfree(crtc);
2382 }
2383
2384 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
2385 struct drm_crtc_state *state)
2386 {
2387 struct dm_crtc_state *cur = to_dm_crtc_state(state);
2388
2389 /* TODO Destroy dc_stream objects are stream object is flattened */
2390 if (cur->stream)
2391 dc_stream_release(cur->stream);
2392
2393
2394 __drm_atomic_helper_crtc_destroy_state(state);
2395
2396
2397 kfree(state);
2398 }
2399
2400 static void dm_crtc_reset_state(struct drm_crtc *crtc)
2401 {
2402 struct dm_crtc_state *state;
2403
2404 if (crtc->state)
2405 dm_crtc_destroy_state(crtc, crtc->state);
2406
2407 state = kzalloc(sizeof(*state), GFP_KERNEL);
2408 if (WARN_ON(!state))
2409 return;
2410
2411 crtc->state = &state->base;
2412 crtc->state->crtc = crtc;
2413
2414 }
2415
2416 static struct drm_crtc_state *
2417 dm_crtc_duplicate_state(struct drm_crtc *crtc)
2418 {
2419 struct dm_crtc_state *state, *cur;
2420
2421 cur = to_dm_crtc_state(crtc->state);
2422
2423 if (WARN_ON(!crtc->state))
2424 return NULL;
2425
2426 state = kzalloc(sizeof(*state), GFP_KERNEL);
2427
2428 __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
2429
2430 if (cur->stream) {
2431 state->stream = cur->stream;
2432 dc_stream_retain(state->stream);
2433 }
2434
2435 /* TODO Duplicate dc_stream after objects are stream object is flattened */
2436
2437 return &state->base;
2438 }
2439
2440 /* Implemented only the options currently availible for the driver */
2441 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
2442 .reset = dm_crtc_reset_state,
2443 .destroy = amdgpu_dm_crtc_destroy,
2444 .gamma_set = drm_atomic_helper_legacy_gamma_set,
2445 .set_config = drm_atomic_helper_set_config,
2446 .page_flip = drm_atomic_helper_page_flip,
2447 .atomic_duplicate_state = dm_crtc_duplicate_state,
2448 .atomic_destroy_state = dm_crtc_destroy_state,
2449 };
2450
2451 static enum drm_connector_status
2452 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
2453 {
2454 bool connected;
2455 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
2456
2457 /* Notes:
2458 * 1. This interface is NOT called in context of HPD irq.
2459 * 2. This interface *is called* in context of user-mode ioctl. Which
2460 * makes it a bad place for *any* MST-related activit. */
2461
2462 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
2463 !aconnector->fake_enable)
2464 connected = (aconnector->dc_sink != NULL);
2465 else
2466 connected = (aconnector->base.force == DRM_FORCE_ON);
2467
2468 return (connected ? connector_status_connected :
2469 connector_status_disconnected);
2470 }
2471
2472 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
2473 struct drm_connector_state *connector_state,
2474 struct drm_property *property,
2475 uint64_t val)
2476 {
2477 struct drm_device *dev = connector->dev;
2478 struct amdgpu_device *adev = dev->dev_private;
2479 struct dm_connector_state *dm_old_state =
2480 to_dm_connector_state(connector->state);
2481 struct dm_connector_state *dm_new_state =
2482 to_dm_connector_state(connector_state);
2483
2484 int ret = -EINVAL;
2485
2486 if (property == dev->mode_config.scaling_mode_property) {
2487 enum amdgpu_rmx_type rmx_type;
2488
2489 switch (val) {
2490 case DRM_MODE_SCALE_CENTER:
2491 rmx_type = RMX_CENTER;
2492 break;
2493 case DRM_MODE_SCALE_ASPECT:
2494 rmx_type = RMX_ASPECT;
2495 break;
2496 case DRM_MODE_SCALE_FULLSCREEN:
2497 rmx_type = RMX_FULL;
2498 break;
2499 case DRM_MODE_SCALE_NONE:
2500 default:
2501 rmx_type = RMX_OFF;
2502 break;
2503 }
2504
2505 if (dm_old_state->scaling == rmx_type)
2506 return 0;
2507
2508 dm_new_state->scaling = rmx_type;
2509 ret = 0;
2510 } else if (property == adev->mode_info.underscan_hborder_property) {
2511 dm_new_state->underscan_hborder = val;
2512 ret = 0;
2513 } else if (property == adev->mode_info.underscan_vborder_property) {
2514 dm_new_state->underscan_vborder = val;
2515 ret = 0;
2516 } else if (property == adev->mode_info.underscan_property) {
2517 dm_new_state->underscan_enable = val;
2518 ret = 0;
2519 }
2520
2521 return ret;
2522 }
2523
2524 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
2525 const struct drm_connector_state *state,
2526 struct drm_property *property,
2527 uint64_t *val)
2528 {
2529 struct drm_device *dev = connector->dev;
2530 struct amdgpu_device *adev = dev->dev_private;
2531 struct dm_connector_state *dm_state =
2532 to_dm_connector_state(state);
2533 int ret = -EINVAL;
2534
2535 if (property == dev->mode_config.scaling_mode_property) {
2536 switch (dm_state->scaling) {
2537 case RMX_CENTER:
2538 *val = DRM_MODE_SCALE_CENTER;
2539 break;
2540 case RMX_ASPECT:
2541 *val = DRM_MODE_SCALE_ASPECT;
2542 break;
2543 case RMX_FULL:
2544 *val = DRM_MODE_SCALE_FULLSCREEN;
2545 break;
2546 case RMX_OFF:
2547 default:
2548 *val = DRM_MODE_SCALE_NONE;
2549 break;
2550 }
2551 ret = 0;
2552 } else if (property == adev->mode_info.underscan_hborder_property) {
2553 *val = dm_state->underscan_hborder;
2554 ret = 0;
2555 } else if (property == adev->mode_info.underscan_vborder_property) {
2556 *val = dm_state->underscan_vborder;
2557 ret = 0;
2558 } else if (property == adev->mode_info.underscan_property) {
2559 *val = dm_state->underscan_enable;
2560 ret = 0;
2561 }
2562 return ret;
2563 }
2564
2565 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
2566 {
2567 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
2568 const struct dc_link *link = aconnector->dc_link;
2569 struct amdgpu_device *adev = connector->dev->dev_private;
2570 struct amdgpu_display_manager *dm = &adev->dm;
2571 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
2572 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
2573
2574 if (link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) {
2575 amdgpu_dm_register_backlight_device(dm);
2576
2577 if (dm->backlight_dev) {
2578 backlight_device_unregister(dm->backlight_dev);
2579 dm->backlight_dev = NULL;
2580 }
2581
2582 }
2583 #endif
2584 drm_connector_unregister(connector);
2585 drm_connector_cleanup(connector);
2586 kfree(connector);
2587 }
2588
2589 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
2590 {
2591 struct dm_connector_state *state =
2592 to_dm_connector_state(connector->state);
2593
2594 kfree(state);
2595
2596 state = kzalloc(sizeof(*state), GFP_KERNEL);
2597
2598 if (state) {
2599 state->scaling = RMX_OFF;
2600 state->underscan_enable = false;
2601 state->underscan_hborder = 0;
2602 state->underscan_vborder = 0;
2603
2604 connector->state = &state->base;
2605 connector->state->connector = connector;
2606 }
2607 }
2608
2609 struct drm_connector_state *
2610 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
2611 {
2612 struct dm_connector_state *state =
2613 to_dm_connector_state(connector->state);
2614
2615 struct dm_connector_state *new_state =
2616 kmemdup(state, sizeof(*state), GFP_KERNEL);
2617
2618 if (new_state) {
2619 __drm_atomic_helper_connector_duplicate_state(connector,
2620 &new_state->base);
2621 return &new_state->base;
2622 }
2623
2624 return NULL;
2625 }
2626
2627 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
2628 .reset = amdgpu_dm_connector_funcs_reset,
2629 .detect = amdgpu_dm_connector_detect,
2630 .fill_modes = drm_helper_probe_single_connector_modes,
2631 .destroy = amdgpu_dm_connector_destroy,
2632 .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
2633 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
2634 .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
2635 .atomic_get_property = amdgpu_dm_connector_atomic_get_property
2636 };
2637
2638 static struct drm_encoder *best_encoder(struct drm_connector *connector)
2639 {
2640 int enc_id = connector->encoder_ids[0];
2641 struct drm_mode_object *obj;
2642 struct drm_encoder *encoder;
2643
2644 DRM_DEBUG_DRIVER("Finding the best encoder\n");
2645
2646 /* pick the encoder ids */
2647 if (enc_id) {
2648 obj = drm_mode_object_find(connector->dev, NULL, enc_id, DRM_MODE_OBJECT_ENCODER);
2649 if (!obj) {
2650 DRM_ERROR("Couldn't find a matching encoder for our connector\n");
2651 return NULL;
2652 }
2653 encoder = obj_to_encoder(obj);
2654 return encoder;
2655 }
2656 DRM_ERROR("No encoder id\n");
2657 return NULL;
2658 }
2659
2660 static int get_modes(struct drm_connector *connector)
2661 {
2662 return amdgpu_dm_connector_get_modes(connector);
2663 }
2664
2665 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
2666 {
2667 struct dc_sink_init_data init_params = {
2668 .link = aconnector->dc_link,
2669 .sink_signal = SIGNAL_TYPE_VIRTUAL
2670 };
2671 struct edid *edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
2672
2673 if (!aconnector->base.edid_blob_ptr ||
2674 !aconnector->base.edid_blob_ptr->data) {
2675 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
2676 aconnector->base.name);
2677
2678 aconnector->base.force = DRM_FORCE_OFF;
2679 aconnector->base.override_edid = false;
2680 return;
2681 }
2682
2683 aconnector->edid = edid;
2684
2685 aconnector->dc_em_sink = dc_link_add_remote_sink(
2686 aconnector->dc_link,
2687 (uint8_t *)edid,
2688 (edid->extensions + 1) * EDID_LENGTH,
2689 &init_params);
2690
2691 if (aconnector->base.force == DRM_FORCE_ON)
2692 aconnector->dc_sink = aconnector->dc_link->local_sink ?
2693 aconnector->dc_link->local_sink :
2694 aconnector->dc_em_sink;
2695 }
2696
2697 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
2698 {
2699 struct dc_link *link = (struct dc_link *)aconnector->dc_link;
2700
2701 /* In case of headless boot with force on for DP managed connector
2702 * Those settings have to be != 0 to get initial modeset
2703 */
2704 if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
2705 link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
2706 link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
2707 }
2708
2709
2710 aconnector->base.override_edid = true;
2711 create_eml_sink(aconnector);
2712 }
2713
2714 int amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
2715 struct drm_display_mode *mode)
2716 {
2717 int result = MODE_ERROR;
2718 struct dc_sink *dc_sink;
2719 struct amdgpu_device *adev = connector->dev->dev_private;
2720 /* TODO: Unhardcode stream count */
2721 struct dc_stream_state *stream;
2722 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
2723
2724 if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
2725 (mode->flags & DRM_MODE_FLAG_DBLSCAN))
2726 return result;
2727
2728 /* Only run this the first time mode_valid is called to initilialize
2729 * EDID mgmt
2730 */
2731 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
2732 !aconnector->dc_em_sink)
2733 handle_edid_mgmt(aconnector);
2734
2735 dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
2736
2737 if (dc_sink == NULL) {
2738 DRM_ERROR("dc_sink is NULL!\n");
2739 goto fail;
2740 }
2741
2742 stream = dc_create_stream_for_sink(dc_sink);
2743 if (stream == NULL) {
2744 DRM_ERROR("Failed to create stream for sink!\n");
2745 goto fail;
2746 }
2747
2748 drm_mode_set_crtcinfo(mode, 0);
2749 fill_stream_properties_from_drm_display_mode(stream, mode, connector);
2750
2751 stream->src.width = mode->hdisplay;
2752 stream->src.height = mode->vdisplay;
2753 stream->dst = stream->src;
2754
2755 if (dc_validate_stream(adev->dm.dc, stream) == DC_OK)
2756 result = MODE_OK;
2757
2758 dc_stream_release(stream);
2759
2760 fail:
2761 /* TODO: error handling*/
2762 return result;
2763 }
2764
2765 static const struct drm_connector_helper_funcs
2766 amdgpu_dm_connector_helper_funcs = {
2767 /*
2768 * If hotplug a second bigger display in FB Con mode, bigger resolution
2769 * modes will be filtered by drm_mode_validate_size(), and those modes
2770 * is missing after user start lightdm. So we need to renew modes list.
2771 * in get_modes call back, not just return the modes count
2772 */
2773 .get_modes = get_modes,
2774 .mode_valid = amdgpu_dm_connector_mode_valid,
2775 .best_encoder = best_encoder
2776 };
2777
2778 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
2779 {
2780 }
2781
2782 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
2783 struct drm_crtc_state *state)
2784 {
2785 struct amdgpu_device *adev = crtc->dev->dev_private;
2786 struct dc *dc = adev->dm.dc;
2787 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(state);
2788 int ret = -EINVAL;
2789
2790 if (unlikely(!dm_crtc_state->stream &&
2791 modeset_required(state, NULL, dm_crtc_state->stream))) {
2792 WARN_ON(1);
2793 return ret;
2794 }
2795
2796 /* In some use cases, like reset, no stream is attached */
2797 if (!dm_crtc_state->stream)
2798 return 0;
2799
2800 if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
2801 return 0;
2802
2803 return ret;
2804 }
2805
2806 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
2807 const struct drm_display_mode *mode,
2808 struct drm_display_mode *adjusted_mode)
2809 {
2810 return true;
2811 }
2812
2813 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
2814 .disable = dm_crtc_helper_disable,
2815 .atomic_check = dm_crtc_helper_atomic_check,
2816 .mode_fixup = dm_crtc_helper_mode_fixup
2817 };
2818
2819 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
2820 {
2821
2822 }
2823
2824 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
2825 struct drm_crtc_state *crtc_state,
2826 struct drm_connector_state *conn_state)
2827 {
2828 return 0;
2829 }
2830
2831 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
2832 .disable = dm_encoder_helper_disable,
2833 .atomic_check = dm_encoder_helper_atomic_check
2834 };
2835
2836 static void dm_drm_plane_reset(struct drm_plane *plane)
2837 {
2838 struct dm_plane_state *amdgpu_state = NULL;
2839
2840 if (plane->state)
2841 plane->funcs->atomic_destroy_state(plane, plane->state);
2842
2843 amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
2844 WARN_ON(amdgpu_state == NULL);
2845
2846 if (amdgpu_state) {
2847 plane->state = &amdgpu_state->base;
2848 plane->state->plane = plane;
2849 plane->state->rotation = DRM_MODE_ROTATE_0;
2850 }
2851 }
2852
2853 static struct drm_plane_state *
2854 dm_drm_plane_duplicate_state(struct drm_plane *plane)
2855 {
2856 struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
2857
2858 old_dm_plane_state = to_dm_plane_state(plane->state);
2859 dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
2860 if (!dm_plane_state)
2861 return NULL;
2862
2863 __drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
2864
2865 if (old_dm_plane_state->dc_state) {
2866 dm_plane_state->dc_state = old_dm_plane_state->dc_state;
2867 dc_plane_state_retain(dm_plane_state->dc_state);
2868 }
2869
2870 return &dm_plane_state->base;
2871 }
2872
2873 void dm_drm_plane_destroy_state(struct drm_plane *plane,
2874 struct drm_plane_state *state)
2875 {
2876 struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
2877
2878 if (dm_plane_state->dc_state)
2879 dc_plane_state_release(dm_plane_state->dc_state);
2880
2881 drm_atomic_helper_plane_destroy_state(plane, state);
2882 }
2883
2884 static const struct drm_plane_funcs dm_plane_funcs = {
2885 .update_plane = drm_atomic_helper_update_plane,
2886 .disable_plane = drm_atomic_helper_disable_plane,
2887 .destroy = drm_plane_cleanup,
2888 .reset = dm_drm_plane_reset,
2889 .atomic_duplicate_state = dm_drm_plane_duplicate_state,
2890 .atomic_destroy_state = dm_drm_plane_destroy_state,
2891 };
2892
2893 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
2894 struct drm_plane_state *new_state)
2895 {
2896 struct amdgpu_framebuffer *afb;
2897 struct drm_gem_object *obj;
2898 struct amdgpu_bo *rbo;
2899 uint64_t chroma_addr = 0;
2900 int r;
2901 struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
2902 unsigned int awidth;
2903
2904 dm_plane_state_old = to_dm_plane_state(plane->state);
2905 dm_plane_state_new = to_dm_plane_state(new_state);
2906
2907 if (!new_state->fb) {
2908 DRM_DEBUG_DRIVER("No FB bound\n");
2909 return 0;
2910 }
2911
2912 afb = to_amdgpu_framebuffer(new_state->fb);
2913
2914 obj = afb->obj;
2915 rbo = gem_to_amdgpu_bo(obj);
2916 r = amdgpu_bo_reserve(rbo, false);
2917 if (unlikely(r != 0))
2918 return r;
2919
2920 r = amdgpu_bo_pin(rbo, AMDGPU_GEM_DOMAIN_VRAM, &afb->address);
2921
2922
2923 amdgpu_bo_unreserve(rbo);
2924
2925 if (unlikely(r != 0)) {
2926 if (r != -ERESTARTSYS)
2927 DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
2928 return r;
2929 }
2930
2931 amdgpu_bo_ref(rbo);
2932
2933 if (dm_plane_state_new->dc_state &&
2934 dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
2935 struct dc_plane_state *plane_state = dm_plane_state_new->dc_state;
2936
2937 if (plane_state->format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
2938 plane_state->address.grph.addr.low_part = lower_32_bits(afb->address);
2939 plane_state->address.grph.addr.high_part = upper_32_bits(afb->address);
2940 } else {
2941 awidth = ALIGN(new_state->fb->width, 64);
2942 plane_state->address.type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
2943 plane_state->address.video_progressive.luma_addr.low_part
2944 = lower_32_bits(afb->address);
2945 plane_state->address.video_progressive.luma_addr.high_part
2946 = upper_32_bits(afb->address);
2947 chroma_addr = afb->address + (u64)(awidth * new_state->fb->height);
2948 plane_state->address.video_progressive.chroma_addr.low_part
2949 = lower_32_bits(chroma_addr);
2950 plane_state->address.video_progressive.chroma_addr.high_part
2951 = upper_32_bits(chroma_addr);
2952 }
2953 }
2954
2955 /* It's a hack for s3 since in 4.9 kernel filter out cursor buffer
2956 * prepare and cleanup in drm_atomic_helper_prepare_planes
2957 * and drm_atomic_helper_cleanup_planes because fb doens't in s3.
2958 * IN 4.10 kernel this code should be removed and amdgpu_device_suspend
2959 * code touching fram buffers should be avoided for DC.
2960 */
2961 if (plane->type == DRM_PLANE_TYPE_CURSOR) {
2962 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_state->crtc);
2963
2964 acrtc->cursor_bo = obj;
2965 }
2966 return 0;
2967 }
2968
2969 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
2970 struct drm_plane_state *old_state)
2971 {
2972 struct amdgpu_bo *rbo;
2973 struct amdgpu_framebuffer *afb;
2974 int r;
2975
2976 if (!old_state->fb)
2977 return;
2978
2979 afb = to_amdgpu_framebuffer(old_state->fb);
2980 rbo = gem_to_amdgpu_bo(afb->obj);
2981 r = amdgpu_bo_reserve(rbo, false);
2982 if (unlikely(r)) {
2983 DRM_ERROR("failed to reserve rbo before unpin\n");
2984 return;
2985 }
2986
2987 amdgpu_bo_unpin(rbo);
2988 amdgpu_bo_unreserve(rbo);
2989 amdgpu_bo_unref(&rbo);
2990 }
2991
2992 static int dm_plane_atomic_check(struct drm_plane *plane,
2993 struct drm_plane_state *state)
2994 {
2995 struct amdgpu_device *adev = plane->dev->dev_private;
2996 struct dc *dc = adev->dm.dc;
2997 struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
2998
2999 if (!dm_plane_state->dc_state)
3000 return 0;
3001
3002 if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
3003 return 0;
3004
3005 return -EINVAL;
3006 }
3007
3008 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
3009 .prepare_fb = dm_plane_helper_prepare_fb,
3010 .cleanup_fb = dm_plane_helper_cleanup_fb,
3011 .atomic_check = dm_plane_atomic_check,
3012 };
3013
3014 /*
3015 * TODO: these are currently initialized to rgb formats only.
3016 * For future use cases we should either initialize them dynamically based on
3017 * plane capabilities, or initialize this array to all formats, so internal drm
3018 * check will succeed, and let DC to implement proper check
3019 */
3020 static const uint32_t rgb_formats[] = {
3021 DRM_FORMAT_RGB888,
3022 DRM_FORMAT_XRGB8888,
3023 DRM_FORMAT_ARGB8888,
3024 DRM_FORMAT_RGBA8888,
3025 DRM_FORMAT_XRGB2101010,
3026 DRM_FORMAT_XBGR2101010,
3027 DRM_FORMAT_ARGB2101010,
3028 DRM_FORMAT_ABGR2101010,
3029 };
3030
3031 static const uint32_t yuv_formats[] = {
3032 DRM_FORMAT_NV12,
3033 DRM_FORMAT_NV21,
3034 };
3035
3036 static const u32 cursor_formats[] = {
3037 DRM_FORMAT_ARGB8888
3038 };
3039
3040 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
3041 struct amdgpu_plane *aplane,
3042 unsigned long possible_crtcs)
3043 {
3044 int res = -EPERM;
3045
3046 switch (aplane->base.type) {
3047 case DRM_PLANE_TYPE_PRIMARY:
3048 aplane->base.format_default = true;
3049
3050 res = drm_universal_plane_init(
3051 dm->adev->ddev,
3052 &aplane->base,
3053 possible_crtcs,
3054 &dm_plane_funcs,
3055 rgb_formats,
3056 ARRAY_SIZE(rgb_formats),
3057 NULL, aplane->base.type, NULL);
3058 break;
3059 case DRM_PLANE_TYPE_OVERLAY:
3060 res = drm_universal_plane_init(
3061 dm->adev->ddev,
3062 &aplane->base,
3063 possible_crtcs,
3064 &dm_plane_funcs,
3065 yuv_formats,
3066 ARRAY_SIZE(yuv_formats),
3067 NULL, aplane->base.type, NULL);
3068 break;
3069 case DRM_PLANE_TYPE_CURSOR:
3070 res = drm_universal_plane_init(
3071 dm->adev->ddev,
3072 &aplane->base,
3073 possible_crtcs,
3074 &dm_plane_funcs,
3075 cursor_formats,
3076 ARRAY_SIZE(cursor_formats),
3077 NULL, aplane->base.type, NULL);
3078 break;
3079 }
3080
3081 drm_plane_helper_add(&aplane->base, &dm_plane_helper_funcs);
3082
3083 return res;
3084 }
3085
3086 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
3087 struct drm_plane *plane,
3088 uint32_t crtc_index)
3089 {
3090 struct amdgpu_crtc *acrtc = NULL;
3091 struct amdgpu_plane *cursor_plane;
3092
3093 int res = -ENOMEM;
3094
3095 cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
3096 if (!cursor_plane)
3097 goto fail;
3098
3099 cursor_plane->base.type = DRM_PLANE_TYPE_CURSOR;
3100 res = amdgpu_dm_plane_init(dm, cursor_plane, 0);
3101
3102 acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
3103 if (!acrtc)
3104 goto fail;
3105
3106 res = drm_crtc_init_with_planes(
3107 dm->ddev,
3108 &acrtc->base,
3109 plane,
3110 &cursor_plane->base,
3111 &amdgpu_dm_crtc_funcs, NULL);
3112
3113 if (res)
3114 goto fail;
3115
3116 drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
3117
3118 acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
3119 acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
3120
3121 acrtc->crtc_id = crtc_index;
3122 acrtc->base.enabled = false;
3123
3124 dm->adev->mode_info.crtcs[crtc_index] = acrtc;
3125 drm_mode_crtc_set_gamma_size(&acrtc->base, 256);
3126
3127 return 0;
3128
3129 fail:
3130 kfree(acrtc);
3131 kfree(cursor_plane);
3132 return res;
3133 }
3134
3135
3136 static int to_drm_connector_type(enum signal_type st)
3137 {
3138 switch (st) {
3139 case SIGNAL_TYPE_HDMI_TYPE_A:
3140 return DRM_MODE_CONNECTOR_HDMIA;
3141 case SIGNAL_TYPE_EDP:
3142 return DRM_MODE_CONNECTOR_eDP;
3143 case SIGNAL_TYPE_RGB:
3144 return DRM_MODE_CONNECTOR_VGA;
3145 case SIGNAL_TYPE_DISPLAY_PORT:
3146 case SIGNAL_TYPE_DISPLAY_PORT_MST:
3147 return DRM_MODE_CONNECTOR_DisplayPort;
3148 case SIGNAL_TYPE_DVI_DUAL_LINK:
3149 case SIGNAL_TYPE_DVI_SINGLE_LINK:
3150 return DRM_MODE_CONNECTOR_DVID;
3151 case SIGNAL_TYPE_VIRTUAL:
3152 return DRM_MODE_CONNECTOR_VIRTUAL;
3153
3154 default:
3155 return DRM_MODE_CONNECTOR_Unknown;
3156 }
3157 }
3158
3159 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
3160 {
3161 const struct drm_connector_helper_funcs *helper =
3162 connector->helper_private;
3163 struct drm_encoder *encoder;
3164 struct amdgpu_encoder *amdgpu_encoder;
3165
3166 encoder = helper->best_encoder(connector);
3167
3168 if (encoder == NULL)
3169 return;
3170
3171 amdgpu_encoder = to_amdgpu_encoder(encoder);
3172
3173 amdgpu_encoder->native_mode.clock = 0;
3174
3175 if (!list_empty(&connector->probed_modes)) {
3176 struct drm_display_mode *preferred_mode = NULL;
3177
3178 list_for_each_entry(preferred_mode,
3179 &connector->probed_modes,
3180 head) {
3181 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
3182 amdgpu_encoder->native_mode = *preferred_mode;
3183
3184 break;
3185 }
3186
3187 }
3188 }
3189
3190 static struct drm_display_mode *
3191 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
3192 char *name,
3193 int hdisplay, int vdisplay)
3194 {
3195 struct drm_device *dev = encoder->dev;
3196 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
3197 struct drm_display_mode *mode = NULL;
3198 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
3199
3200 mode = drm_mode_duplicate(dev, native_mode);
3201
3202 if (mode == NULL)
3203 return NULL;
3204
3205 mode->hdisplay = hdisplay;
3206 mode->vdisplay = vdisplay;
3207 mode->type &= ~DRM_MODE_TYPE_PREFERRED;
3208 strncpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
3209
3210 return mode;
3211
3212 }
3213
3214 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
3215 struct drm_connector *connector)
3216 {
3217 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
3218 struct drm_display_mode *mode = NULL;
3219 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
3220 struct amdgpu_dm_connector *amdgpu_dm_connector =
3221 to_amdgpu_dm_connector(connector);
3222 int i;
3223 int n;
3224 struct mode_size {
3225 char name[DRM_DISPLAY_MODE_LEN];
3226 int w;
3227 int h;
3228 } common_modes[] = {
3229 { "640x480", 640, 480},
3230 { "800x600", 800, 600},
3231 { "1024x768", 1024, 768},
3232 { "1280x720", 1280, 720},
3233 { "1280x800", 1280, 800},
3234 {"1280x1024", 1280, 1024},
3235 { "1440x900", 1440, 900},
3236 {"1680x1050", 1680, 1050},
3237 {"1600x1200", 1600, 1200},
3238 {"1920x1080", 1920, 1080},
3239 {"1920x1200", 1920, 1200}
3240 };
3241
3242 n = ARRAY_SIZE(common_modes);
3243
3244 for (i = 0; i < n; i++) {
3245 struct drm_display_mode *curmode = NULL;
3246 bool mode_existed = false;
3247
3248 if (common_modes[i].w > native_mode->hdisplay ||
3249 common_modes[i].h > native_mode->vdisplay ||
3250 (common_modes[i].w == native_mode->hdisplay &&
3251 common_modes[i].h == native_mode->vdisplay))
3252 continue;
3253
3254 list_for_each_entry(curmode, &connector->probed_modes, head) {
3255 if (common_modes[i].w == curmode->hdisplay &&
3256 common_modes[i].h == curmode->vdisplay) {
3257 mode_existed = true;
3258 break;
3259 }
3260 }
3261
3262 if (mode_existed)
3263 continue;
3264
3265 mode = amdgpu_dm_create_common_mode(encoder,
3266 common_modes[i].name, common_modes[i].w,
3267 common_modes[i].h);
3268 drm_mode_probed_add(connector, mode);
3269 amdgpu_dm_connector->num_modes++;
3270 }
3271 }
3272
3273 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
3274 struct edid *edid)
3275 {
3276 struct amdgpu_dm_connector *amdgpu_dm_connector =
3277 to_amdgpu_dm_connector(connector);
3278
3279 if (edid) {
3280 /* empty probed_modes */
3281 INIT_LIST_HEAD(&connector->probed_modes);
3282 amdgpu_dm_connector->num_modes =
3283 drm_add_edid_modes(connector, edid);
3284
3285 drm_edid_to_eld(connector, edid);
3286
3287 amdgpu_dm_get_native_mode(connector);
3288 } else {
3289 amdgpu_dm_connector->num_modes = 0;
3290 }
3291 }
3292
3293 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
3294 {
3295 const struct drm_connector_helper_funcs *helper =
3296 connector->helper_private;
3297 struct amdgpu_dm_connector *amdgpu_dm_connector =
3298 to_amdgpu_dm_connector(connector);
3299 struct drm_encoder *encoder;
3300 struct edid *edid = amdgpu_dm_connector->edid;
3301
3302 encoder = helper->best_encoder(connector);
3303
3304 amdgpu_dm_connector_ddc_get_modes(connector, edid);
3305 amdgpu_dm_connector_add_common_modes(encoder, connector);
3306 return amdgpu_dm_connector->num_modes;
3307 }
3308
3309 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
3310 struct amdgpu_dm_connector *aconnector,
3311 int connector_type,
3312 struct dc_link *link,
3313 int link_index)
3314 {
3315 struct amdgpu_device *adev = dm->ddev->dev_private;
3316
3317 aconnector->connector_id = link_index;
3318 aconnector->dc_link = link;
3319 aconnector->base.interlace_allowed = false;
3320 aconnector->base.doublescan_allowed = false;
3321 aconnector->base.stereo_allowed = false;
3322 aconnector->base.dpms = DRM_MODE_DPMS_OFF;
3323 aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
3324
3325 mutex_init(&aconnector->hpd_lock);
3326
3327 /* configure support HPD hot plug connector_>polled default value is 0
3328 * which means HPD hot plug not supported
3329 */
3330 switch (connector_type) {
3331 case DRM_MODE_CONNECTOR_HDMIA:
3332 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
3333 break;
3334 case DRM_MODE_CONNECTOR_DisplayPort:
3335 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
3336 break;
3337 case DRM_MODE_CONNECTOR_DVID:
3338 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
3339 break;
3340 default:
3341 break;
3342 }
3343
3344 drm_object_attach_property(&aconnector->base.base,
3345 dm->ddev->mode_config.scaling_mode_property,
3346 DRM_MODE_SCALE_NONE);
3347
3348 drm_object_attach_property(&aconnector->base.base,
3349 adev->mode_info.underscan_property,
3350 UNDERSCAN_OFF);
3351 drm_object_attach_property(&aconnector->base.base,
3352 adev->mode_info.underscan_hborder_property,
3353 0);
3354 drm_object_attach_property(&aconnector->base.base,
3355 adev->mode_info.underscan_vborder_property,
3356 0);
3357
3358 }
3359
3360 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
3361 struct i2c_msg *msgs, int num)
3362 {
3363 struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
3364 struct ddc_service *ddc_service = i2c->ddc_service;
3365 struct i2c_command cmd;
3366 int i;
3367 int result = -EIO;
3368
3369 cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
3370
3371 if (!cmd.payloads)
3372 return result;
3373
3374 cmd.number_of_payloads = num;
3375 cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
3376 cmd.speed = 100;
3377
3378 for (i = 0; i < num; i++) {
3379 cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
3380 cmd.payloads[i].address = msgs[i].addr;
3381 cmd.payloads[i].length = msgs[i].len;
3382 cmd.payloads[i].data = msgs[i].buf;
3383 }
3384
3385 if (dal_i2caux_submit_i2c_command(
3386 ddc_service->ctx->i2caux,
3387 ddc_service->ddc_pin,
3388 &cmd))
3389 result = num;
3390
3391 kfree(cmd.payloads);
3392 return result;
3393 }
3394
3395 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
3396 {
3397 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
3398 }
3399
3400 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
3401 .master_xfer = amdgpu_dm_i2c_xfer,
3402 .functionality = amdgpu_dm_i2c_func,
3403 };
3404
3405 static struct amdgpu_i2c_adapter *
3406 create_i2c(struct ddc_service *ddc_service,
3407 int link_index,
3408 int *res)
3409 {
3410 struct amdgpu_device *adev = ddc_service->ctx->driver_context;
3411 struct amdgpu_i2c_adapter *i2c;
3412
3413 i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
3414 i2c->base.owner = THIS_MODULE;
3415 i2c->base.class = I2C_CLASS_DDC;
3416 i2c->base.dev.parent = &adev->pdev->dev;
3417 i2c->base.algo = &amdgpu_dm_i2c_algo;
3418 snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
3419 i2c_set_adapdata(&i2c->base, i2c);
3420 i2c->ddc_service = ddc_service;
3421
3422 return i2c;
3423 }
3424
3425 /* Note: this function assumes that dc_link_detect() was called for the
3426 * dc_link which will be represented by this aconnector.
3427 */
3428 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
3429 struct amdgpu_dm_connector *aconnector,
3430 uint32_t link_index,
3431 struct amdgpu_encoder *aencoder)
3432 {
3433 int res = 0;
3434 int connector_type;
3435 struct dc *dc = dm->dc;
3436 struct dc_link *link = dc_get_link_at_index(dc, link_index);
3437 struct amdgpu_i2c_adapter *i2c;
3438
3439 link->priv = aconnector;
3440
3441 DRM_DEBUG_DRIVER("%s()\n", __func__);
3442
3443 i2c = create_i2c(link->ddc, link->link_index, &res);
3444 aconnector->i2c = i2c;
3445 res = i2c_add_adapter(&i2c->base);
3446
3447 if (res) {
3448 DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
3449 goto out_free;
3450 }
3451
3452 connector_type = to_drm_connector_type(link->connector_signal);
3453
3454 res = drm_connector_init(
3455 dm->ddev,
3456 &aconnector->base,
3457 &amdgpu_dm_connector_funcs,
3458 connector_type);
3459
3460 if (res) {
3461 DRM_ERROR("connector_init failed\n");
3462 aconnector->connector_id = -1;
3463 goto out_free;
3464 }
3465
3466 drm_connector_helper_add(
3467 &aconnector->base,
3468 &amdgpu_dm_connector_helper_funcs);
3469
3470 amdgpu_dm_connector_init_helper(
3471 dm,
3472 aconnector,
3473 connector_type,
3474 link,
3475 link_index);
3476
3477 drm_mode_connector_attach_encoder(
3478 &aconnector->base, &aencoder->base);
3479
3480 drm_connector_register(&aconnector->base);
3481
3482 if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
3483 || connector_type == DRM_MODE_CONNECTOR_eDP)
3484 amdgpu_dm_initialize_dp_connector(dm, aconnector);
3485
3486 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3487 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3488
3489 /* NOTE: this currently will create backlight device even if a panel
3490 * is not connected to the eDP/LVDS connector.
3491 *
3492 * This is less than ideal but we don't have sink information at this
3493 * stage since detection happens after. We can't do detection earlier
3494 * since MST detection needs connectors to be created first.
3495 */
3496 if (link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) {
3497 /* Event if registration failed, we should continue with
3498 * DM initialization because not having a backlight control
3499 * is better then a black screen.
3500 */
3501 amdgpu_dm_register_backlight_device(dm);
3502
3503 if (dm->backlight_dev)
3504 dm->backlight_link = link;
3505 }
3506 #endif
3507
3508 out_free:
3509 if (res) {
3510 kfree(i2c);
3511 aconnector->i2c = NULL;
3512 }
3513 return res;
3514 }
3515
3516 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
3517 {
3518 switch (adev->mode_info.num_crtc) {
3519 case 1:
3520 return 0x1;
3521 case 2:
3522 return 0x3;
3523 case 3:
3524 return 0x7;
3525 case 4:
3526 return 0xf;
3527 case 5:
3528 return 0x1f;
3529 case 6:
3530 default:
3531 return 0x3f;
3532 }
3533 }
3534
3535 static int amdgpu_dm_encoder_init(struct drm_device *dev,
3536 struct amdgpu_encoder *aencoder,
3537 uint32_t link_index)
3538 {
3539 struct amdgpu_device *adev = dev->dev_private;
3540
3541 int res = drm_encoder_init(dev,
3542 &aencoder->base,
3543 &amdgpu_dm_encoder_funcs,
3544 DRM_MODE_ENCODER_TMDS,
3545 NULL);
3546
3547 aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
3548
3549 if (!res)
3550 aencoder->encoder_id = link_index;
3551 else
3552 aencoder->encoder_id = -1;
3553
3554 drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
3555
3556 return res;
3557 }
3558
3559 static void manage_dm_interrupts(struct amdgpu_device *adev,
3560 struct amdgpu_crtc *acrtc,
3561 bool enable)
3562 {
3563 /*
3564 * this is not correct translation but will work as soon as VBLANK
3565 * constant is the same as PFLIP
3566 */
3567 int irq_type =
3568 amdgpu_crtc_idx_to_irq_type(
3569 adev,
3570 acrtc->crtc_id);
3571
3572 if (enable) {
3573 drm_crtc_vblank_on(&acrtc->base);
3574 amdgpu_irq_get(
3575 adev,
3576 &adev->pageflip_irq,
3577 irq_type);
3578 } else {
3579
3580 amdgpu_irq_put(
3581 adev,
3582 &adev->pageflip_irq,
3583 irq_type);
3584 drm_crtc_vblank_off(&acrtc->base);
3585 }
3586 }
3587
3588 static bool
3589 is_scaling_state_different(const struct dm_connector_state *dm_state,
3590 const struct dm_connector_state *old_dm_state)
3591 {
3592 if (dm_state->scaling != old_dm_state->scaling)
3593 return true;
3594 if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
3595 if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
3596 return true;
3597 } else if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
3598 if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
3599 return true;
3600 } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
3601 dm_state->underscan_vborder != old_dm_state->underscan_vborder)
3602 return true;
3603 return false;
3604 }
3605
3606 static void remove_stream(struct amdgpu_device *adev,
3607 struct amdgpu_crtc *acrtc,
3608 struct dc_stream_state *stream)
3609 {
3610 /* this is the update mode case */
3611 if (adev->dm.freesync_module)
3612 mod_freesync_remove_stream(adev->dm.freesync_module, stream);
3613
3614 acrtc->otg_inst = -1;
3615 acrtc->enabled = false;
3616 }
3617
3618 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
3619 struct dc_cursor_position *position)
3620 {
3621 struct amdgpu_crtc *amdgpu_crtc = amdgpu_crtc = to_amdgpu_crtc(crtc);
3622 int x, y;
3623 int xorigin = 0, yorigin = 0;
3624
3625 if (!crtc || !plane->state->fb) {
3626 position->enable = false;
3627 position->x = 0;
3628 position->y = 0;
3629 return 0;
3630 }
3631
3632 if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
3633 (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
3634 DRM_ERROR("%s: bad cursor width or height %d x %d\n",
3635 __func__,
3636 plane->state->crtc_w,
3637 plane->state->crtc_h);
3638 return -EINVAL;
3639 }
3640
3641 x = plane->state->crtc_x;
3642 y = plane->state->crtc_y;
3643 /* avivo cursor are offset into the total surface */
3644 x += crtc->primary->state->src_x >> 16;
3645 y += crtc->primary->state->src_y >> 16;
3646 if (x < 0) {
3647 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
3648 x = 0;
3649 }
3650 if (y < 0) {
3651 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
3652 y = 0;
3653 }
3654 position->enable = true;
3655 position->x = x;
3656 position->y = y;
3657 position->x_hotspot = xorigin;
3658 position->y_hotspot = yorigin;
3659
3660 return 0;
3661 }
3662
3663 static void handle_cursor_update(struct drm_plane *plane,
3664 struct drm_plane_state *old_plane_state)
3665 {
3666 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
3667 struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
3668 struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
3669 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
3670 uint64_t address = afb ? afb->address : 0;
3671 struct dc_cursor_position position;
3672 struct dc_cursor_attributes attributes;
3673 int ret;
3674
3675 if (!plane->state->fb && !old_plane_state->fb)
3676 return;
3677
3678 DRM_DEBUG_DRIVER("%s: crtc_id=%d with size %d to %d\n",
3679 __func__,
3680 amdgpu_crtc->crtc_id,
3681 plane->state->crtc_w,
3682 plane->state->crtc_h);
3683
3684 ret = get_cursor_position(plane, crtc, &position);
3685 if (ret)
3686 return;
3687
3688 if (!position.enable) {
3689 /* turn off cursor */
3690 if (crtc_state && crtc_state->stream)
3691 dc_stream_set_cursor_position(crtc_state->stream,
3692 &position);
3693 return;
3694 }
3695
3696 amdgpu_crtc->cursor_width = plane->state->crtc_w;
3697 amdgpu_crtc->cursor_height = plane->state->crtc_h;
3698
3699 attributes.address.high_part = upper_32_bits(address);
3700 attributes.address.low_part = lower_32_bits(address);
3701 attributes.width = plane->state->crtc_w;
3702 attributes.height = plane->state->crtc_h;
3703 attributes.color_format = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
3704 attributes.rotation_angle = 0;
3705 attributes.attribute_flags.value = 0;
3706
3707 attributes.pitch = attributes.width;
3708
3709 if (crtc_state->stream) {
3710 if (!dc_stream_set_cursor_attributes(crtc_state->stream,
3711 &attributes))
3712 DRM_ERROR("DC failed to set cursor attributes\n");
3713
3714 if (!dc_stream_set_cursor_position(crtc_state->stream,
3715 &position))
3716 DRM_ERROR("DC failed to set cursor position\n");
3717 }
3718 }
3719
3720 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
3721 {
3722
3723 assert_spin_locked(&acrtc->base.dev->event_lock);
3724 WARN_ON(acrtc->event);
3725
3726 acrtc->event = acrtc->base.state->event;
3727
3728 /* Set the flip status */
3729 acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
3730
3731 /* Mark this event as consumed */
3732 acrtc->base.state->event = NULL;
3733
3734 DRM_DEBUG_DRIVER("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
3735 acrtc->crtc_id);
3736 }
3737
3738 /*
3739 * Executes flip
3740 *
3741 * Waits on all BO's fences and for proper vblank count
3742 */
3743 static void amdgpu_dm_do_flip(struct drm_crtc *crtc,
3744 struct drm_framebuffer *fb,
3745 uint32_t target,
3746 struct dc_state *state)
3747 {
3748 unsigned long flags;
3749 uint32_t target_vblank;
3750 int r, vpos, hpos;
3751 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
3752 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
3753 struct amdgpu_bo *abo = gem_to_amdgpu_bo(afb->obj);
3754 struct amdgpu_device *adev = crtc->dev->dev_private;
3755 bool async_flip = (crtc->state->pageflip_flags & DRM_MODE_PAGE_FLIP_ASYNC) != 0;
3756 struct dc_flip_addrs addr = { {0} };
3757 /* TODO eliminate or rename surface_update */
3758 struct dc_surface_update surface_updates[1] = { {0} };
3759 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
3760
3761
3762 /* Prepare wait for target vblank early - before the fence-waits */
3763 target_vblank = target - drm_crtc_vblank_count(crtc) +
3764 amdgpu_get_vblank_counter_kms(crtc->dev, acrtc->crtc_id);
3765
3766 /* TODO This might fail and hence better not used, wait
3767 * explicitly on fences instead
3768 * and in general should be called for
3769 * blocking commit to as per framework helpers
3770 */
3771 r = amdgpu_bo_reserve(abo, true);
3772 if (unlikely(r != 0)) {
3773 DRM_ERROR("failed to reserve buffer before flip\n");
3774 WARN_ON(1);
3775 }
3776
3777 /* Wait for all fences on this FB */
3778 WARN_ON(reservation_object_wait_timeout_rcu(abo->tbo.resv, true, false,
3779 MAX_SCHEDULE_TIMEOUT) < 0);
3780
3781 amdgpu_bo_unreserve(abo);
3782
3783 /* Wait until we're out of the vertical blank period before the one
3784 * targeted by the flip
3785 */
3786 while ((acrtc->enabled &&
3787 (amdgpu_get_crtc_scanoutpos(adev->ddev, acrtc->crtc_id, 0,
3788 &vpos, &hpos, NULL, NULL,
3789 &crtc->hwmode)
3790 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
3791 (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
3792 (int)(target_vblank -
3793 amdgpu_get_vblank_counter_kms(adev->ddev, acrtc->crtc_id)) > 0)) {
3794 usleep_range(1000, 1100);
3795 }
3796
3797 /* Flip */
3798 spin_lock_irqsave(&crtc->dev->event_lock, flags);
3799 /* update crtc fb */
3800 crtc->primary->fb = fb;
3801
3802 WARN_ON(acrtc->pflip_status != AMDGPU_FLIP_NONE);
3803 WARN_ON(!acrtc_state->stream);
3804
3805 addr.address.grph.addr.low_part = lower_32_bits(afb->address);
3806 addr.address.grph.addr.high_part = upper_32_bits(afb->address);
3807 addr.flip_immediate = async_flip;
3808
3809
3810 if (acrtc->base.state->event)
3811 prepare_flip_isr(acrtc);
3812
3813 surface_updates->surface = dc_stream_get_status(acrtc_state->stream)->plane_states[0];
3814 surface_updates->flip_addr = &addr;
3815
3816
3817 dc_commit_updates_for_stream(adev->dm.dc,
3818 surface_updates,
3819 1,
3820 acrtc_state->stream,
3821 NULL,
3822 &surface_updates->surface,
3823 state);
3824
3825 DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x \n",
3826 __func__,
3827 addr.address.grph.addr.high_part,
3828 addr.address.grph.addr.low_part);
3829
3830
3831 spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
3832 }
3833
3834 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
3835 struct drm_device *dev,
3836 struct amdgpu_display_manager *dm,
3837 struct drm_crtc *pcrtc,
3838 bool *wait_for_vblank)
3839 {
3840 uint32_t i;
3841 struct drm_plane *plane;
3842 struct drm_plane_state *old_plane_state, *new_plane_state;
3843 struct dc_stream_state *dc_stream_attach;
3844 struct dc_plane_state *plane_states_constructed[MAX_SURFACES];
3845 struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
3846 struct drm_crtc_state *new_pcrtc_state =
3847 drm_atomic_get_new_crtc_state(state, pcrtc);
3848 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
3849 struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
3850 int planes_count = 0;
3851 unsigned long flags;
3852
3853 /* update planes when needed */
3854 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
3855 struct drm_crtc *crtc = new_plane_state->crtc;
3856 struct drm_crtc_state *new_crtc_state;
3857 struct drm_framebuffer *fb = new_plane_state->fb;
3858 bool pflip_needed;
3859 struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
3860
3861 if (plane->type == DRM_PLANE_TYPE_CURSOR) {
3862 handle_cursor_update(plane, old_plane_state);
3863 continue;
3864 }
3865
3866 if (!fb || !crtc || pcrtc != crtc)
3867 continue;
3868
3869 new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
3870 if (!new_crtc_state->active)
3871 continue;
3872
3873 pflip_needed = !state->allow_modeset;
3874
3875 spin_lock_irqsave(&crtc->dev->event_lock, flags);
3876 if (acrtc_attach->pflip_status != AMDGPU_FLIP_NONE) {
3877 DRM_ERROR("%s: acrtc %d, already busy\n",
3878 __func__,
3879 acrtc_attach->crtc_id);
3880 spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
3881 /* In commit tail framework this cannot happen */
3882 WARN_ON(1);
3883 }
3884 spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
3885
3886 if (!pflip_needed) {
3887 WARN_ON(!dm_new_plane_state->dc_state);
3888
3889 plane_states_constructed[planes_count] = dm_new_plane_state->dc_state;
3890
3891 dc_stream_attach = acrtc_state->stream;
3892 planes_count++;
3893
3894 } else if (new_crtc_state->planes_changed) {
3895 /* Assume even ONE crtc with immediate flip means
3896 * entire can't wait for VBLANK
3897 * TODO Check if it's correct
3898 */
3899 *wait_for_vblank =
3900 new_pcrtc_state->pageflip_flags & DRM_MODE_PAGE_FLIP_ASYNC ?
3901 false : true;
3902
3903 /* TODO: Needs rework for multiplane flip */
3904 if (plane->type == DRM_PLANE_TYPE_PRIMARY)
3905 drm_crtc_vblank_get(crtc);
3906
3907 amdgpu_dm_do_flip(
3908 crtc,
3909 fb,
3910 drm_crtc_vblank_count(crtc) + *wait_for_vblank,
3911 dm_state->context);
3912 }
3913
3914 }
3915
3916 if (planes_count) {
3917 unsigned long flags;
3918
3919 if (new_pcrtc_state->event) {
3920
3921 drm_crtc_vblank_get(pcrtc);
3922
3923 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
3924 prepare_flip_isr(acrtc_attach);
3925 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
3926 }
3927
3928 if (false == dc_commit_planes_to_stream(dm->dc,
3929 plane_states_constructed,
3930 planes_count,
3931 dc_stream_attach,
3932 dm_state->context))
3933 dm_error("%s: Failed to attach plane!\n", __func__);
3934 } else {
3935 /*TODO BUG Here should go disable planes on CRTC. */
3936 }
3937 }
3938
3939
3940 static int amdgpu_dm_atomic_commit(struct drm_device *dev,
3941 struct drm_atomic_state *state,
3942 bool nonblock)
3943 {
3944 struct drm_crtc *crtc;
3945 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
3946 struct amdgpu_device *adev = dev->dev_private;
3947 int i;
3948
3949 /*
3950 * We evade vblanks and pflips on crtc that
3951 * should be changed. We do it here to flush & disable
3952 * interrupts before drm_swap_state is called in drm_atomic_helper_commit
3953 * it will update crtc->dm_crtc_state->stream pointer which is used in
3954 * the ISRs.
3955 */
3956 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
3957 struct dm_crtc_state *dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
3958 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
3959
3960 if (drm_atomic_crtc_needs_modeset(new_crtc_state) && dm_old_crtc_state->stream)
3961 manage_dm_interrupts(adev, acrtc, false);
3962 }
3963 /* Add check here for SoC's that support hardware cursor plane, to
3964 * unset legacy_cursor_update */
3965
3966 return drm_atomic_helper_commit(dev, state, nonblock);
3967
3968 /*TODO Handle EINTR, reenable IRQ*/
3969 }
3970
3971 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
3972 {
3973 struct drm_device *dev = state->dev;
3974 struct amdgpu_device *adev = dev->dev_private;
3975 struct amdgpu_display_manager *dm = &adev->dm;
3976 struct dm_atomic_state *dm_state;
3977 uint32_t i, j;
3978 uint32_t new_crtcs_count = 0;
3979 struct drm_crtc *crtc;
3980 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
3981 struct amdgpu_crtc *new_crtcs[MAX_STREAMS];
3982 struct dc_stream_state *new_stream = NULL;
3983 unsigned long flags;
3984 bool wait_for_vblank = true;
3985 struct drm_connector *connector;
3986 struct drm_connector_state *old_con_state, *new_con_state;
3987 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
3988
3989 drm_atomic_helper_update_legacy_modeset_state(dev, state);
3990
3991 dm_state = to_dm_atomic_state(state);
3992
3993 /* update changed items */
3994 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
3995 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
3996
3997 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
3998 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
3999
4000 DRM_DEBUG_DRIVER(
4001 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
4002 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
4003 "connectors_changed:%d\n",
4004 acrtc->crtc_id,
4005 new_crtc_state->enable,
4006 new_crtc_state->active,
4007 new_crtc_state->planes_changed,
4008 new_crtc_state->mode_changed,
4009 new_crtc_state->active_changed,
4010 new_crtc_state->connectors_changed);
4011
4012 /* handles headless hotplug case, updating new_state and
4013 * aconnector as needed
4014 */
4015
4016 if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
4017
4018 DRM_DEBUG_DRIVER("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
4019
4020 if (!dm_new_crtc_state->stream) {
4021 /*
4022 * this could happen because of issues with
4023 * userspace notifications delivery.
4024 * In this case userspace tries to set mode on
4025 * display which is disconnect in fact.
4026 * dc_sink in NULL in this case on aconnector.
4027 * We expect reset mode will come soon.
4028 *
4029 * This can also happen when unplug is done
4030 * during resume sequence ended
4031 *
4032 * In this case, we want to pretend we still
4033 * have a sink to keep the pipe running so that
4034 * hw state is consistent with the sw state
4035 */
4036 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
4037 __func__, acrtc->base.base.id);
4038 continue;
4039 }
4040
4041
4042 if (dm_old_crtc_state->stream)
4043 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
4044
4045
4046 /*
4047 * this loop saves set mode crtcs
4048 * we needed to enable vblanks once all
4049 * resources acquired in dc after dc_commit_streams
4050 */
4051
4052 /*TODO move all this into dm_crtc_state, get rid of
4053 * new_crtcs array and use old and new atomic states
4054 * instead
4055 */
4056 new_crtcs[new_crtcs_count] = acrtc;
4057 new_crtcs_count++;
4058
4059 new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
4060 acrtc->enabled = true;
4061 acrtc->hw_mode = new_crtc_state->mode;
4062 crtc->hwmode = new_crtc_state->mode;
4063 } else if (modereset_required(new_crtc_state)) {
4064 DRM_DEBUG_DRIVER("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
4065
4066 /* i.e. reset mode */
4067 if (dm_old_crtc_state->stream)
4068 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
4069 }
4070 } /* for_each_crtc_in_state() */
4071
4072 /*
4073 * Add streams after required streams from new and replaced streams
4074 * are removed from freesync module
4075 */
4076 if (adev->dm.freesync_module) {
4077 for (i = 0; i < new_crtcs_count; i++) {
4078 struct amdgpu_dm_connector *aconnector = NULL;
4079
4080 new_crtc_state = drm_atomic_get_new_crtc_state(state,
4081 &new_crtcs[i]->base);
4082 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
4083
4084 new_stream = dm_new_crtc_state->stream;
4085 aconnector = amdgpu_dm_find_first_crtc_matching_connector(
4086 state,
4087 &new_crtcs[i]->base);
4088 if (!aconnector) {
4089 DRM_DEBUG_DRIVER("Atomic commit: Failed to find connector for acrtc id:%d "
4090 "skipping freesync init\n",
4091 new_crtcs[i]->crtc_id);
4092 continue;
4093 }
4094
4095 mod_freesync_add_stream(adev->dm.freesync_module,
4096 new_stream, &aconnector->caps);
4097 }
4098 }
4099
4100 if (dm_state->context)
4101 WARN_ON(!dc_commit_state(dm->dc, dm_state->context));
4102
4103 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
4104 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
4105
4106 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
4107
4108 if (dm_new_crtc_state->stream != NULL) {
4109 const struct dc_stream_status *status =
4110 dc_stream_get_status(dm_new_crtc_state->stream);
4111
4112 if (!status)
4113 DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
4114 else
4115 acrtc->otg_inst = status->primary_otg_inst;
4116 }
4117 }
4118
4119 /* Handle scaling and underscan changes*/
4120 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
4121 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
4122 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
4123 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
4124 struct dc_stream_status *status = NULL;
4125
4126 if (acrtc)
4127 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
4128
4129 /* Skip any modesets/resets */
4130 if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
4131 continue;
4132
4133 /* Skip any thing not scale or underscan changes */
4134 if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
4135 continue;
4136
4137 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
4138
4139 update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
4140 dm_new_con_state, (struct dc_stream_state *)dm_new_crtc_state->stream);
4141
4142 status = dc_stream_get_status(dm_new_crtc_state->stream);
4143 WARN_ON(!status);
4144 WARN_ON(!status->plane_count);
4145
4146 if (!dm_new_crtc_state->stream)
4147 continue;
4148
4149 /*TODO How it works with MPO ?*/
4150 if (!dc_commit_planes_to_stream(
4151 dm->dc,
4152 status->plane_states,
4153 status->plane_count,
4154 dm_new_crtc_state->stream,
4155 dm_state->context))
4156 dm_error("%s: Failed to update stream scaling!\n", __func__);
4157 }
4158
4159 for (i = 0; i < new_crtcs_count; i++) {
4160 /*
4161 * loop to enable interrupts on newly arrived crtc
4162 */
4163 struct amdgpu_crtc *acrtc = new_crtcs[i];
4164
4165 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
4166 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
4167
4168 if (adev->dm.freesync_module)
4169 mod_freesync_notify_mode_change(
4170 adev->dm.freesync_module, &dm_new_crtc_state->stream, 1);
4171
4172 manage_dm_interrupts(adev, acrtc, true);
4173 }
4174
4175 /* update planes when needed per crtc*/
4176 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
4177 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
4178
4179 if (dm_new_crtc_state->stream)
4180 amdgpu_dm_commit_planes(state, dev, dm, crtc, &wait_for_vblank);
4181 }
4182
4183
4184 /*
4185 * send vblank event on all events not handled in flip and
4186 * mark consumed event for drm_atomic_helper_commit_hw_done
4187 */
4188 spin_lock_irqsave(&adev->ddev->event_lock, flags);
4189 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
4190
4191 if (new_crtc_state->event)
4192 drm_send_event_locked(dev, &new_crtc_state->event->base);
4193
4194 new_crtc_state->event = NULL;
4195 }
4196 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
4197
4198 /* Signal HW programming completion */
4199 drm_atomic_helper_commit_hw_done(state);
4200
4201 if (wait_for_vblank)
4202 drm_atomic_helper_wait_for_vblanks(dev, state);
4203
4204 drm_atomic_helper_cleanup_planes(dev, state);
4205 }
4206
4207
4208 static int dm_force_atomic_commit(struct drm_connector *connector)
4209 {
4210 int ret = 0;
4211 struct drm_device *ddev = connector->dev;
4212 struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
4213 struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
4214 struct drm_plane *plane = disconnected_acrtc->base.primary;
4215 struct drm_connector_state *conn_state;
4216 struct drm_crtc_state *crtc_state;
4217 struct drm_plane_state *plane_state;
4218
4219 if (!state)
4220 return -ENOMEM;
4221
4222 state->acquire_ctx = ddev->mode_config.acquire_ctx;
4223
4224 /* Construct an atomic state to restore previous display setting */
4225
4226 /*
4227 * Attach connectors to drm_atomic_state
4228 */
4229 conn_state = drm_atomic_get_connector_state(state, connector);
4230
4231 ret = PTR_ERR_OR_ZERO(conn_state);
4232 if (ret)
4233 goto err;
4234
4235 /* Attach crtc to drm_atomic_state*/
4236 crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
4237
4238 ret = PTR_ERR_OR_ZERO(crtc_state);
4239 if (ret)
4240 goto err;
4241
4242 /* force a restore */
4243 crtc_state->mode_changed = true;
4244
4245 /* Attach plane to drm_atomic_state */
4246 plane_state = drm_atomic_get_plane_state(state, plane);
4247
4248 ret = PTR_ERR_OR_ZERO(plane_state);
4249 if (ret)
4250 goto err;
4251
4252
4253 /* Call commit internally with the state we just constructed */
4254 ret = drm_atomic_commit(state);
4255 if (!ret)
4256 return 0;
4257
4258 err:
4259 DRM_ERROR("Restoring old state failed with %i\n", ret);
4260 drm_atomic_state_put(state);
4261
4262 return ret;
4263 }
4264
4265 /*
4266 * This functions handle all cases when set mode does not come upon hotplug.
4267 * This include when the same display is unplugged then plugged back into the
4268 * same port and when we are running without usermode desktop manager supprot
4269 */
4270 void dm_restore_drm_connector_state(struct drm_device *dev,
4271 struct drm_connector *connector)
4272 {
4273 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4274 struct amdgpu_crtc *disconnected_acrtc;
4275 struct dm_crtc_state *acrtc_state;
4276
4277 if (!aconnector->dc_sink || !connector->state || !connector->encoder)
4278 return;
4279
4280 disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
4281 acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
4282
4283 if (!disconnected_acrtc || !acrtc_state->stream)
4284 return;
4285
4286 /*
4287 * If the previous sink is not released and different from the current,
4288 * we deduce we are in a state where we can not rely on usermode call
4289 * to turn on the display, so we do it here
4290 */
4291 if (acrtc_state->stream->sink != aconnector->dc_sink)
4292 dm_force_atomic_commit(&aconnector->base);
4293 }
4294
4295 /*`
4296 * Grabs all modesetting locks to serialize against any blocking commits,
4297 * Waits for completion of all non blocking commits.
4298 */
4299 static int do_aquire_global_lock(struct drm_device *dev,
4300 struct drm_atomic_state *state)
4301 {
4302 struct drm_crtc *crtc;
4303 struct drm_crtc_commit *commit;
4304 long ret;
4305
4306 /* Adding all modeset locks to aquire_ctx will
4307 * ensure that when the framework release it the
4308 * extra locks we are locking here will get released to
4309 */
4310 ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
4311 if (ret)
4312 return ret;
4313
4314 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
4315 spin_lock(&crtc->commit_lock);
4316 commit = list_first_entry_or_null(&crtc->commit_list,
4317 struct drm_crtc_commit, commit_entry);
4318 if (commit)
4319 drm_crtc_commit_get(commit);
4320 spin_unlock(&crtc->commit_lock);
4321
4322 if (!commit)
4323 continue;
4324
4325 /* Make sure all pending HW programming completed and
4326 * page flips done
4327 */
4328 ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
4329
4330 if (ret > 0)
4331 ret = wait_for_completion_interruptible_timeout(
4332 &commit->flip_done, 10*HZ);
4333
4334 if (ret == 0)
4335 DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
4336 "timed out\n", crtc->base.id, crtc->name);
4337
4338 drm_crtc_commit_put(commit);
4339 }
4340
4341 return ret < 0 ? ret : 0;
4342 }
4343
4344 static int dm_update_crtcs_state(struct dc *dc,
4345 struct drm_atomic_state *state,
4346 bool enable,
4347 bool *lock_and_validation_needed)
4348 {
4349 struct drm_crtc *crtc;
4350 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
4351 int i;
4352 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
4353 struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
4354 struct dc_stream_state *new_stream;
4355 int ret = 0;
4356
4357 /*TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set */
4358 /* update changed items */
4359 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
4360 struct amdgpu_crtc *acrtc = NULL;
4361 struct amdgpu_dm_connector *aconnector = NULL;
4362 struct drm_connector_state *new_con_state = NULL;
4363 struct dm_connector_state *dm_conn_state = NULL;
4364
4365 new_stream = NULL;
4366
4367 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
4368 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
4369 acrtc = to_amdgpu_crtc(crtc);
4370
4371 aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
4372
4373 /* TODO This hack should go away */
4374 if (aconnector && enable) {
4375 // Make sure fake sink is created in plug-in scenario
4376 new_con_state = drm_atomic_get_connector_state(state,
4377 &aconnector->base);
4378
4379 if (IS_ERR(new_con_state)) {
4380 ret = PTR_ERR_OR_ZERO(new_con_state);
4381 break;
4382 }
4383
4384 dm_conn_state = to_dm_connector_state(new_con_state);
4385
4386 new_stream = create_stream_for_sink(aconnector,
4387 &new_crtc_state->mode,
4388 dm_conn_state);
4389
4390 /*
4391 * we can have no stream on ACTION_SET if a display
4392 * was disconnected during S3, in this case it not and
4393 * error, the OS will be updated after detection, and
4394 * do the right thing on next atomic commit
4395 */
4396
4397 if (!new_stream) {
4398 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
4399 __func__, acrtc->base.base.id);
4400 break;
4401 }
4402 }
4403
4404 if (dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
4405 dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
4406
4407 new_crtc_state->mode_changed = false;
4408
4409 DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
4410 new_crtc_state->mode_changed);
4411 }
4412
4413
4414 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
4415 goto next_crtc;
4416
4417 DRM_DEBUG_DRIVER(
4418 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
4419 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
4420 "connectors_changed:%d\n",
4421 acrtc->crtc_id,
4422 new_crtc_state->enable,
4423 new_crtc_state->active,
4424 new_crtc_state->planes_changed,
4425 new_crtc_state->mode_changed,
4426 new_crtc_state->active_changed,
4427 new_crtc_state->connectors_changed);
4428
4429 /* Remove stream for any changed/disabled CRTC */
4430 if (!enable) {
4431
4432 if (!dm_old_crtc_state->stream)
4433 goto next_crtc;
4434
4435 DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
4436 crtc->base.id);
4437
4438 /* i.e. reset mode */
4439 if (dc_remove_stream_from_ctx(
4440 dc,
4441 dm_state->context,
4442 dm_old_crtc_state->stream) != DC_OK) {
4443 ret = -EINVAL;
4444 goto fail;
4445 }
4446
4447 dc_stream_release(dm_old_crtc_state->stream);
4448 dm_new_crtc_state->stream = NULL;
4449
4450 *lock_and_validation_needed = true;
4451
4452 } else {/* Add stream for any updated/enabled CRTC */
4453 /*
4454 * Quick fix to prevent NULL pointer on new_stream when
4455 * added MST connectors not found in existing crtc_state in the chained mode
4456 * TODO: need to dig out the root cause of that
4457 */
4458 if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
4459 goto next_crtc;
4460
4461 if (modereset_required(new_crtc_state))
4462 goto next_crtc;
4463
4464 if (modeset_required(new_crtc_state, new_stream,
4465 dm_old_crtc_state->stream)) {
4466
4467 WARN_ON(dm_new_crtc_state->stream);
4468
4469 dm_new_crtc_state->stream = new_stream;
4470 dc_stream_retain(new_stream);
4471
4472 DRM_DEBUG_DRIVER("Enabling DRM crtc: %d\n",
4473 crtc->base.id);
4474
4475 if (dc_add_stream_to_ctx(
4476 dc,
4477 dm_state->context,
4478 dm_new_crtc_state->stream) != DC_OK) {
4479 ret = -EINVAL;
4480 goto fail;
4481 }
4482
4483 *lock_and_validation_needed = true;
4484 }
4485 }
4486
4487 next_crtc:
4488 /* Release extra reference */
4489 if (new_stream)
4490 dc_stream_release(new_stream);
4491 }
4492
4493 return ret;
4494
4495 fail:
4496 if (new_stream)
4497 dc_stream_release(new_stream);
4498 return ret;
4499 }
4500
4501 static int dm_update_planes_state(struct dc *dc,
4502 struct drm_atomic_state *state,
4503 bool enable,
4504 bool *lock_and_validation_needed)
4505 {
4506 struct drm_crtc *new_plane_crtc, *old_plane_crtc;
4507 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
4508 struct drm_plane *plane;
4509 struct drm_plane_state *old_plane_state, *new_plane_state;
4510 struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
4511 struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
4512 struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
4513 int i ;
4514 /* TODO return page_flip_needed() function */
4515 bool pflip_needed = !state->allow_modeset;
4516 int ret = 0;
4517
4518 if (pflip_needed)
4519 return ret;
4520
4521 /* Add new planes */
4522 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
4523 new_plane_crtc = new_plane_state->crtc;
4524 old_plane_crtc = old_plane_state->crtc;
4525 dm_new_plane_state = to_dm_plane_state(new_plane_state);
4526 dm_old_plane_state = to_dm_plane_state(old_plane_state);
4527
4528 /*TODO Implement atomic check for cursor plane */
4529 if (plane->type == DRM_PLANE_TYPE_CURSOR)
4530 continue;
4531
4532 /* Remove any changed/removed planes */
4533 if (!enable) {
4534
4535 if (!old_plane_crtc)
4536 continue;
4537
4538 old_crtc_state = drm_atomic_get_old_crtc_state(
4539 state, old_plane_crtc);
4540 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
4541
4542 if (!dm_old_crtc_state->stream)
4543 continue;
4544
4545 DRM_DEBUG_DRIVER("Disabling DRM plane: %d on DRM crtc %d\n",
4546 plane->base.id, old_plane_crtc->base.id);
4547
4548 if (!dc_remove_plane_from_context(
4549 dc,
4550 dm_old_crtc_state->stream,
4551 dm_old_plane_state->dc_state,
4552 dm_state->context)) {
4553
4554 ret = EINVAL;
4555 return ret;
4556 }
4557
4558
4559 dc_plane_state_release(dm_old_plane_state->dc_state);
4560 dm_new_plane_state->dc_state = NULL;
4561
4562 *lock_and_validation_needed = true;
4563
4564 } else { /* Add new planes */
4565
4566 if (drm_atomic_plane_disabling(plane->state, new_plane_state))
4567 continue;
4568
4569 if (!new_plane_crtc)
4570 continue;
4571
4572 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
4573 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
4574
4575 if (!dm_new_crtc_state->stream)
4576 continue;
4577
4578
4579 WARN_ON(dm_new_plane_state->dc_state);
4580
4581 dm_new_plane_state->dc_state = dc_create_plane_state(dc);
4582
4583 DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n",
4584 plane->base.id, new_plane_crtc->base.id);
4585
4586 if (!dm_new_plane_state->dc_state) {
4587 ret = -EINVAL;
4588 return ret;
4589 }
4590
4591 ret = fill_plane_attributes(
4592 new_plane_crtc->dev->dev_private,
4593 dm_new_plane_state->dc_state,
4594 new_plane_state,
4595 new_crtc_state,
4596 false);
4597 if (ret)
4598 return ret;
4599
4600
4601 if (!dc_add_plane_to_context(
4602 dc,
4603 dm_new_crtc_state->stream,
4604 dm_new_plane_state->dc_state,
4605 dm_state->context)) {
4606
4607 ret = -EINVAL;
4608 return ret;
4609 }
4610
4611 *lock_and_validation_needed = true;
4612 }
4613 }
4614
4615
4616 return ret;
4617 }
4618
4619 static int amdgpu_dm_atomic_check(struct drm_device *dev,
4620 struct drm_atomic_state *state)
4621 {
4622 int i;
4623 int ret;
4624 struct amdgpu_device *adev = dev->dev_private;
4625 struct dc *dc = adev->dm.dc;
4626 struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
4627 struct drm_connector *connector;
4628 struct drm_connector_state *old_con_state, *new_con_state;
4629 struct drm_crtc *crtc;
4630 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
4631
4632 /*
4633 * This bool will be set for true for any modeset/reset
4634 * or plane update which implies non fast surface update.
4635 */
4636 bool lock_and_validation_needed = false;
4637
4638 ret = drm_atomic_helper_check_modeset(dev, state);
4639 if (ret) {
4640 DRM_ERROR("Atomic state validation failed with error :%d !\n", ret);
4641 return ret;
4642 }
4643
4644 /*
4645 * legacy_cursor_update should be made false for SoC's having
4646 * a dedicated hardware plane for cursor in amdgpu_dm_atomic_commit(),
4647 * otherwise for software cursor plane,
4648 * we should not add it to list of affected planes.
4649 */
4650 if (state->legacy_cursor_update) {
4651 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
4652 if (new_crtc_state->color_mgmt_changed) {
4653 ret = drm_atomic_add_affected_planes(state, crtc);
4654 if (ret)
4655 goto fail;
4656 }
4657 }
4658 } else {
4659 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
4660 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
4661 continue;
4662
4663 if (!new_crtc_state->enable)
4664 continue;
4665
4666 ret = drm_atomic_add_affected_connectors(state, crtc);
4667 if (ret)
4668 return ret;
4669
4670 ret = drm_atomic_add_affected_planes(state, crtc);
4671 if (ret)
4672 goto fail;
4673 }
4674 }
4675
4676 dm_state->context = dc_create_state();
4677 ASSERT(dm_state->context);
4678 dc_resource_state_copy_construct_current(dc, dm_state->context);
4679
4680 /* Remove exiting planes if they are modified */
4681 ret = dm_update_planes_state(dc, state, false, &lock_and_validation_needed);
4682 if (ret) {
4683 goto fail;
4684 }
4685
4686 /* Disable all crtcs which require disable */
4687 ret = dm_update_crtcs_state(dc, state, false, &lock_and_validation_needed);
4688 if (ret) {
4689 goto fail;
4690 }
4691
4692 /* Enable all crtcs which require enable */
4693 ret = dm_update_crtcs_state(dc, state, true, &lock_and_validation_needed);
4694 if (ret) {
4695 goto fail;
4696 }
4697
4698 /* Add new/modified planes */
4699 ret = dm_update_planes_state(dc, state, true, &lock_and_validation_needed);
4700 if (ret) {
4701 goto fail;
4702 }
4703
4704 /* Run this here since we want to validate the streams we created */
4705 ret = drm_atomic_helper_check_planes(dev, state);
4706 if (ret)
4707 goto fail;
4708
4709 /* Check scaling and underscan changes*/
4710 /*TODO Removed scaling changes validation due to inability to commit
4711 * new stream into context w\o causing full reset. Need to
4712 * decide how to handle.
4713 */
4714 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
4715 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
4716 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
4717 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
4718
4719 /* Skip any modesets/resets */
4720 if (!acrtc || drm_atomic_crtc_needs_modeset(
4721 drm_atomic_get_new_crtc_state(state, &acrtc->base)))
4722 continue;
4723
4724 /* Skip any thing not scale or underscan changes */
4725 if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
4726 continue;
4727
4728 lock_and_validation_needed = true;
4729 }
4730
4731 /*
4732 * For full updates case when
4733 * removing/adding/updating streams on once CRTC while flipping
4734 * on another CRTC,
4735 * acquiring global lock will guarantee that any such full
4736 * update commit
4737 * will wait for completion of any outstanding flip using DRMs
4738 * synchronization events.
4739 */
4740
4741 if (lock_and_validation_needed) {
4742
4743 ret = do_aquire_global_lock(dev, state);
4744 if (ret)
4745 goto fail;
4746
4747 if (dc_validate_global_state(dc, dm_state->context) != DC_OK) {
4748 ret = -EINVAL;
4749 goto fail;
4750 }
4751 }
4752
4753 /* Must be success */
4754 WARN_ON(ret);
4755 return ret;
4756
4757 fail:
4758 if (ret == -EDEADLK)
4759 DRM_DEBUG_DRIVER("Atomic check stopped due to to deadlock.\n");
4760 else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
4761 DRM_DEBUG_DRIVER("Atomic check stopped due to to signal.\n");
4762 else
4763 DRM_ERROR("Atomic check failed with err: %d \n", ret);
4764
4765 return ret;
4766 }
4767
4768 static bool is_dp_capable_without_timing_msa(struct dc *dc,
4769 struct amdgpu_dm_connector *amdgpu_dm_connector)
4770 {
4771 uint8_t dpcd_data;
4772 bool capable = false;
4773
4774 if (amdgpu_dm_connector->dc_link &&
4775 dm_helpers_dp_read_dpcd(
4776 NULL,
4777 amdgpu_dm_connector->dc_link,
4778 DP_DOWN_STREAM_PORT_COUNT,
4779 &dpcd_data,
4780 sizeof(dpcd_data))) {
4781 capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
4782 }
4783
4784 return capable;
4785 }
4786 void amdgpu_dm_add_sink_to_freesync_module(struct drm_connector *connector,
4787 struct edid *edid)
4788 {
4789 int i;
4790 uint64_t val_capable;
4791 bool edid_check_required;
4792 struct detailed_timing *timing;
4793 struct detailed_non_pixel *data;
4794 struct detailed_data_monitor_range *range;
4795 struct amdgpu_dm_connector *amdgpu_dm_connector =
4796 to_amdgpu_dm_connector(connector);
4797
4798 struct drm_device *dev = connector->dev;
4799 struct amdgpu_device *adev = dev->dev_private;
4800
4801 edid_check_required = false;
4802 if (!amdgpu_dm_connector->dc_sink) {
4803 DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
4804 return;
4805 }
4806 if (!adev->dm.freesync_module)
4807 return;
4808 /*
4809 * if edid non zero restrict freesync only for dp and edp
4810 */
4811 if (edid) {
4812 if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
4813 || amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
4814 edid_check_required = is_dp_capable_without_timing_msa(
4815 adev->dm.dc,
4816 amdgpu_dm_connector);
4817 }
4818 }
4819 val_capable = 0;
4820 if (edid_check_required == true && (edid->version > 1 ||
4821 (edid->version == 1 && edid->revision > 1))) {
4822 for (i = 0; i < 4; i++) {
4823
4824 timing = &edid->detailed_timings[i];
4825 data = &timing->data.other_data;
4826 range = &data->data.range;
4827 /*
4828 * Check if monitor has continuous frequency mode
4829 */
4830 if (data->type != EDID_DETAIL_MONITOR_RANGE)
4831 continue;
4832 /*
4833 * Check for flag range limits only. If flag == 1 then
4834 * no additional timing information provided.
4835 * Default GTF, GTF Secondary curve and CVT are not
4836 * supported
4837 */
4838 if (range->flags != 1)
4839 continue;
4840
4841 amdgpu_dm_connector->min_vfreq = range->min_vfreq;
4842 amdgpu_dm_connector->max_vfreq = range->max_vfreq;
4843 amdgpu_dm_connector->pixel_clock_mhz =
4844 range->pixel_clock_mhz * 10;
4845 break;
4846 }
4847
4848 if (amdgpu_dm_connector->max_vfreq -
4849 amdgpu_dm_connector->min_vfreq > 10) {
4850 amdgpu_dm_connector->caps.supported = true;
4851 amdgpu_dm_connector->caps.min_refresh_in_micro_hz =
4852 amdgpu_dm_connector->min_vfreq * 1000000;
4853 amdgpu_dm_connector->caps.max_refresh_in_micro_hz =
4854 amdgpu_dm_connector->max_vfreq * 1000000;
4855 val_capable = 1;
4856 }
4857 }
4858
4859 /*
4860 * TODO figure out how to notify user-mode or DRM of freesync caps
4861 * once we figure out how to deal with freesync in an upstreamable
4862 * fashion
4863 */
4864
4865 }
4866
4867 void amdgpu_dm_remove_sink_from_freesync_module(struct drm_connector *connector)
4868 {
4869 /*
4870 * TODO fill in once we figure out how to deal with freesync in
4871 * an upstreamable fashion
4872 */
4873 }