]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_types.c
drm/amd/display: fix gamma for dpms usecase
[mirror_ubuntu-bionic-kernel.git] / drivers / gpu / drm / amd / display / amdgpu_dm / amdgpu_dm_types.c
1 /*
2 * Copyright 2012-13 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26 #include <linux/types.h>
27 #include <linux/version.h>
28
29 #include <drm/drmP.h>
30 #include <drm/drm_atomic_helper.h>
31 #include <drm/drm_fb_helper.h>
32 #include <drm/drm_atomic.h>
33 #include <drm/drm_edid.h>
34
35 #include "amdgpu.h"
36 #include "amdgpu_pm.h"
37 #include "dm_services_types.h"
38
39 // We need to #undef FRAME_SIZE and DEPRECATED because they conflict
40 // with ptrace-abi.h's #define's of them.
41 #undef FRAME_SIZE
42 #undef DEPRECATED
43
44 #include "dc.h"
45
46 #include "amdgpu_dm_types.h"
47 #include "amdgpu_dm_mst_types.h"
48
49 #include "modules/inc/mod_freesync.h"
50
51 struct dm_connector_state {
52 struct drm_connector_state base;
53
54 enum amdgpu_rmx_type scaling;
55 uint8_t underscan_vborder;
56 uint8_t underscan_hborder;
57 bool underscan_enable;
58 };
59
60 #define to_dm_connector_state(x)\
61 container_of((x), struct dm_connector_state, base)
62
63
64 void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
65 {
66 drm_encoder_cleanup(encoder);
67 kfree(encoder);
68 }
69
70 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
71 .destroy = amdgpu_dm_encoder_destroy,
72 };
73
74 static void dm_set_cursor(
75 struct amdgpu_crtc *amdgpu_crtc,
76 uint64_t gpu_addr,
77 uint32_t width,
78 uint32_t height)
79 {
80 struct dc_cursor_attributes attributes;
81 amdgpu_crtc->cursor_width = width;
82 amdgpu_crtc->cursor_height = height;
83
84 attributes.address.high_part = upper_32_bits(gpu_addr);
85 attributes.address.low_part = lower_32_bits(gpu_addr);
86 attributes.width = width;
87 attributes.height = height;
88 attributes.x_hot = 0;
89 attributes.y_hot = 0;
90 attributes.color_format = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
91 attributes.rotation_angle = 0;
92 attributes.attribute_flags.value = 0;
93
94 if (!dc_target_set_cursor_attributes(
95 amdgpu_crtc->target,
96 &attributes)) {
97 DRM_ERROR("DC failed to set cursor attributes\n");
98 }
99 }
100
101 static int dm_crtc_unpin_cursor_bo_old(
102 struct amdgpu_crtc *amdgpu_crtc)
103 {
104 struct amdgpu_bo *robj;
105 int ret = 0;
106
107 if (NULL != amdgpu_crtc && NULL != amdgpu_crtc->cursor_bo) {
108 robj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
109
110 ret = amdgpu_bo_reserve(robj, false);
111
112 if (likely(ret == 0)) {
113 ret = amdgpu_bo_unpin(robj);
114
115 if (unlikely(ret != 0)) {
116 DRM_ERROR(
117 "%s: unpin failed (ret=%d), bo %p\n",
118 __func__,
119 ret,
120 amdgpu_crtc->cursor_bo);
121 }
122
123 amdgpu_bo_unreserve(robj);
124 } else {
125 DRM_ERROR(
126 "%s: reserve failed (ret=%d), bo %p\n",
127 __func__,
128 ret,
129 amdgpu_crtc->cursor_bo);
130 }
131
132 drm_gem_object_unreference_unlocked(amdgpu_crtc->cursor_bo);
133 amdgpu_crtc->cursor_bo = NULL;
134 }
135
136 return ret;
137 }
138
139 static int dm_crtc_pin_cursor_bo_new(
140 struct drm_crtc *crtc,
141 struct drm_file *file_priv,
142 uint32_t handle,
143 struct amdgpu_bo **ret_obj)
144 {
145 struct amdgpu_crtc *amdgpu_crtc;
146 struct amdgpu_bo *robj;
147 struct drm_gem_object *obj;
148 int ret = -EINVAL;
149
150 if (NULL != crtc) {
151 struct drm_device *dev = crtc->dev;
152 struct amdgpu_device *adev = dev->dev_private;
153 uint64_t gpu_addr;
154
155 amdgpu_crtc = to_amdgpu_crtc(crtc);
156
157 obj = drm_gem_object_lookup(file_priv, handle);
158
159 if (!obj) {
160 DRM_ERROR(
161 "Cannot find cursor object %x for crtc %d\n",
162 handle,
163 amdgpu_crtc->crtc_id);
164 goto release;
165 }
166 robj = gem_to_amdgpu_bo(obj);
167
168 ret = amdgpu_bo_reserve(robj, false);
169
170 if (unlikely(ret != 0)) {
171 drm_gem_object_unreference_unlocked(obj);
172 DRM_ERROR("dm_crtc_pin_cursor_bo_new ret %x, handle %x\n",
173 ret, handle);
174 goto release;
175 }
176
177 ret = amdgpu_bo_pin_restricted(robj, AMDGPU_GEM_DOMAIN_VRAM, 0,
178 adev->mc.visible_vram_size,
179 &gpu_addr);
180
181 if (ret == 0) {
182 amdgpu_crtc->cursor_addr = gpu_addr;
183 *ret_obj = robj;
184 }
185 amdgpu_bo_unreserve(robj);
186 if (ret)
187 drm_gem_object_unreference_unlocked(obj);
188
189 }
190 release:
191
192 return ret;
193 }
194
195 static int dm_crtc_cursor_set(
196 struct drm_crtc *crtc,
197 struct drm_file *file_priv,
198 uint32_t handle,
199 uint32_t width,
200 uint32_t height)
201 {
202 struct amdgpu_bo *new_cursor_bo;
203 struct dc_cursor_position position;
204
205 int ret;
206
207 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
208
209 ret = EINVAL;
210 new_cursor_bo = NULL;
211
212 DRM_DEBUG_KMS(
213 "%s: crtc_id=%d with handle %d and size %d to %d, bo_object %p\n",
214 __func__,
215 amdgpu_crtc->crtc_id,
216 handle,
217 width,
218 height,
219 amdgpu_crtc->cursor_bo);
220
221 if (!handle) {
222 /* turn off cursor */
223 position.enable = false;
224 position.x = 0;
225 position.y = 0;
226 position.hot_spot_enable = false;
227
228 if (amdgpu_crtc->target) {
229 /*set cursor visible false*/
230 dc_target_set_cursor_position(
231 amdgpu_crtc->target,
232 &position);
233 }
234 /*unpin old cursor buffer and update cache*/
235 ret = dm_crtc_unpin_cursor_bo_old(amdgpu_crtc);
236 goto release;
237
238 }
239
240 if ((width > amdgpu_crtc->max_cursor_width) ||
241 (height > amdgpu_crtc->max_cursor_height)) {
242 DRM_ERROR(
243 "%s: bad cursor width or height %d x %d\n",
244 __func__,
245 width,
246 height);
247 goto release;
248 }
249 /*try to pin new cursor bo*/
250 ret = dm_crtc_pin_cursor_bo_new(crtc, file_priv, handle, &new_cursor_bo);
251 /*if map not successful then return an error*/
252 if (ret)
253 goto release;
254
255 /*program new cursor bo to hardware*/
256 dm_set_cursor(amdgpu_crtc, amdgpu_crtc->cursor_addr, width, height);
257
258 /*un map old, not used anymore cursor bo ,
259 * return memory and mapping back */
260 dm_crtc_unpin_cursor_bo_old(amdgpu_crtc);
261
262 /*assign new cursor bo to our internal cache*/
263 amdgpu_crtc->cursor_bo = &new_cursor_bo->gem_base;
264
265 release:
266 return ret;
267
268 }
269
270 static int dm_crtc_cursor_move(struct drm_crtc *crtc,
271 int x, int y)
272 {
273 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
274 int xorigin = 0, yorigin = 0;
275 struct dc_cursor_position position;
276
277 /* avivo cursor are offset into the total surface */
278 x += crtc->primary->state->src_x >> 16;
279 y += crtc->primary->state->src_y >> 16;
280
281 /*
282 * TODO: for cursor debugging unguard the following
283 */
284 #if 0
285 DRM_DEBUG_KMS(
286 "%s: x %d y %d c->x %d c->y %d\n",
287 __func__,
288 x,
289 y,
290 crtc->x,
291 crtc->y);
292 #endif
293
294 if (x < 0) {
295 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
296 x = 0;
297 }
298 if (y < 0) {
299 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
300 y = 0;
301 }
302
303 position.enable = true;
304 position.x = x;
305 position.y = y;
306
307 position.hot_spot_enable = true;
308 position.x_hotspot = xorigin;
309 position.y_hotspot = yorigin;
310
311 if (amdgpu_crtc->target) {
312 if (!dc_target_set_cursor_position(
313 amdgpu_crtc->target,
314 &position)) {
315 DRM_ERROR("DC failed to set cursor position\n");
316 return -EINVAL;
317 }
318 }
319
320 return 0;
321 }
322
323 static void dm_crtc_cursor_reset(struct drm_crtc *crtc)
324 {
325 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
326
327 DRM_DEBUG_KMS(
328 "%s: with cursor_bo %p\n",
329 __func__,
330 amdgpu_crtc->cursor_bo);
331
332 if (amdgpu_crtc->cursor_bo && amdgpu_crtc->target) {
333 dm_set_cursor(
334 amdgpu_crtc,
335 amdgpu_crtc->cursor_addr,
336 amdgpu_crtc->cursor_width,
337 amdgpu_crtc->cursor_height);
338 }
339 }
340 static bool fill_rects_from_plane_state(
341 const struct drm_plane_state *state,
342 struct dc_surface *surface)
343 {
344 surface->src_rect.x = state->src_x >> 16;
345 surface->src_rect.y = state->src_y >> 16;
346 /*we ignore for now mantissa and do not to deal with floating pixels :(*/
347 surface->src_rect.width = state->src_w >> 16;
348
349 if (surface->src_rect.width == 0)
350 return false;
351
352 surface->src_rect.height = state->src_h >> 16;
353 if (surface->src_rect.height == 0)
354 return false;
355
356 surface->dst_rect.x = state->crtc_x;
357 surface->dst_rect.y = state->crtc_y;
358
359 if (state->crtc_w == 0)
360 return false;
361
362 surface->dst_rect.width = state->crtc_w;
363
364 if (state->crtc_h == 0)
365 return false;
366
367 surface->dst_rect.height = state->crtc_h;
368
369 surface->clip_rect = surface->dst_rect;
370
371 switch (state->rotation & DRM_MODE_ROTATE_MASK) {
372 case DRM_MODE_ROTATE_0:
373 surface->rotation = ROTATION_ANGLE_0;
374 break;
375 case DRM_MODE_ROTATE_90:
376 surface->rotation = ROTATION_ANGLE_90;
377 break;
378 case DRM_MODE_ROTATE_180:
379 surface->rotation = ROTATION_ANGLE_180;
380 break;
381 case DRM_MODE_ROTATE_270:
382 surface->rotation = ROTATION_ANGLE_270;
383 break;
384 default:
385 surface->rotation = ROTATION_ANGLE_0;
386 break;
387 }
388
389 return true;
390 }
391 static bool get_fb_info(
392 const struct amdgpu_framebuffer *amdgpu_fb,
393 uint64_t *tiling_flags,
394 uint64_t *fb_location)
395 {
396 struct amdgpu_bo *rbo = gem_to_amdgpu_bo(amdgpu_fb->obj);
397 int r = amdgpu_bo_reserve(rbo, false);
398 if (unlikely(r != 0)){
399 DRM_ERROR("Unable to reserve buffer\n");
400 return false;
401 }
402
403 if (fb_location)
404 *fb_location = amdgpu_bo_gpu_offset(rbo);
405
406 if (tiling_flags)
407 amdgpu_bo_get_tiling_flags(rbo, tiling_flags);
408
409 amdgpu_bo_unreserve(rbo);
410
411 return true;
412 }
413 static void fill_plane_attributes_from_fb(
414 struct amdgpu_device *adev,
415 struct dc_surface *surface,
416 const struct amdgpu_framebuffer *amdgpu_fb, bool addReq)
417 {
418 uint64_t tiling_flags;
419 uint64_t fb_location = 0;
420 const struct drm_framebuffer *fb = &amdgpu_fb->base;
421 struct drm_format_name_buf format_name;
422
423 get_fb_info(
424 amdgpu_fb,
425 &tiling_flags,
426 addReq == true ? &fb_location:NULL);
427
428 surface->address.type = PLN_ADDR_TYPE_GRAPHICS;
429 surface->address.grph.addr.low_part = lower_32_bits(fb_location);
430 surface->address.grph.addr.high_part = upper_32_bits(fb_location);
431
432 switch (fb->format->format) {
433 case DRM_FORMAT_C8:
434 surface->format = SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
435 break;
436 case DRM_FORMAT_RGB565:
437 surface->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
438 break;
439 case DRM_FORMAT_XRGB8888:
440 case DRM_FORMAT_ARGB8888:
441 surface->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
442 break;
443 case DRM_FORMAT_XRGB2101010:
444 case DRM_FORMAT_ARGB2101010:
445 surface->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
446 break;
447 case DRM_FORMAT_XBGR2101010:
448 case DRM_FORMAT_ABGR2101010:
449 surface->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
450 break;
451 default:
452 DRM_ERROR("Unsupported screen format %s\n",
453 drm_get_format_name(fb->format->format, &format_name));
454 return;
455 }
456
457 memset(&surface->tiling_info, 0, sizeof(surface->tiling_info));
458
459 /* Fill GFX8 params */
460 if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1)
461 {
462 unsigned bankw, bankh, mtaspect, tile_split, num_banks;
463
464 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
465 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
466 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
467 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
468 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
469
470 /* XXX fix me for VI */
471 surface->tiling_info.gfx8.num_banks = num_banks;
472 surface->tiling_info.gfx8.array_mode =
473 DC_ARRAY_2D_TILED_THIN1;
474 surface->tiling_info.gfx8.tile_split = tile_split;
475 surface->tiling_info.gfx8.bank_width = bankw;
476 surface->tiling_info.gfx8.bank_height = bankh;
477 surface->tiling_info.gfx8.tile_aspect = mtaspect;
478 surface->tiling_info.gfx8.tile_mode =
479 DC_ADDR_SURF_MICRO_TILING_DISPLAY;
480 } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
481 == DC_ARRAY_1D_TILED_THIN1) {
482 surface->tiling_info.gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
483 }
484
485 surface->tiling_info.gfx8.pipe_config =
486 AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
487
488 surface->plane_size.grph.surface_size.x = 0;
489 surface->plane_size.grph.surface_size.y = 0;
490 surface->plane_size.grph.surface_size.width = fb->width;
491 surface->plane_size.grph.surface_size.height = fb->height;
492 surface->plane_size.grph.surface_pitch =
493 fb->pitches[0] / fb->format->cpp[0];
494
495 surface->visible = true;
496 surface->scaling_quality.h_taps_c = 0;
497 surface->scaling_quality.v_taps_c = 0;
498
499 /* TODO: unhardcode */
500 surface->color_space = COLOR_SPACE_SRGB;
501 /* is this needed? is surface zeroed at allocation? */
502 surface->scaling_quality.h_taps = 0;
503 surface->scaling_quality.v_taps = 0;
504 surface->stereo_format = PLANE_STEREO_FORMAT_NONE;
505
506 }
507
508 #define NUM_OF_RAW_GAMMA_RAMP_RGB_256 256
509
510 static void fill_gamma_from_crtc(
511 const struct drm_crtc *crtc,
512 struct dc_surface *dc_surface)
513 {
514 int i;
515 struct dc_gamma *gamma;
516 struct drm_crtc_state *state = crtc->state;
517 struct drm_color_lut *lut = (struct drm_color_lut *) state->gamma_lut->data;
518
519 gamma = dc_create_gamma();
520
521 if (gamma == NULL)
522 return;
523
524 for (i = 0; i < NUM_OF_RAW_GAMMA_RAMP_RGB_256; i++) {
525 gamma->red[i] = lut[i].red;
526 gamma->green[i] = lut[i].green;
527 gamma->blue[i] = lut[i].blue;
528 }
529
530 dc_surface->gamma_correction = gamma;
531 }
532
533 static void fill_plane_attributes(
534 struct amdgpu_device *adev,
535 struct dc_surface *surface,
536 struct drm_plane_state *state, bool addrReq)
537 {
538 const struct amdgpu_framebuffer *amdgpu_fb =
539 to_amdgpu_framebuffer(state->fb);
540 const struct drm_crtc *crtc = state->crtc;
541 struct dc_transfer_func *input_tf;
542
543 fill_rects_from_plane_state(state, surface);
544 fill_plane_attributes_from_fb(
545 crtc->dev->dev_private,
546 surface,
547 amdgpu_fb,
548 addrReq);
549
550 input_tf = dc_create_transfer_func();
551
552 if (input_tf == NULL)
553 return;
554
555 input_tf->type = TF_TYPE_PREDEFINED;
556 input_tf->tf = TRANSFER_FUNCTION_SRGB;
557
558 surface->in_transfer_func = input_tf;
559
560 /* In case of gamma set, update gamma value */
561 if (state->crtc->state->gamma_lut) {
562 fill_gamma_from_crtc(crtc, surface);
563 }
564 }
565
566 /*****************************************************************************/
567
568 struct amdgpu_connector *aconnector_from_drm_crtc_id(
569 const struct drm_crtc *crtc)
570 {
571 struct drm_device *dev = crtc->dev;
572 struct drm_connector *connector;
573 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
574 struct amdgpu_connector *aconnector;
575
576 list_for_each_entry(connector,
577 &dev->mode_config.connector_list, head) {
578
579 aconnector = to_amdgpu_connector(connector);
580
581 if (aconnector->base.state->crtc != &acrtc->base)
582 continue;
583
584 /* Found the connector */
585 return aconnector;
586 }
587
588 /* If we get here, not found. */
589 return NULL;
590 }
591
592 static void update_stream_scaling_settings(
593 const struct drm_display_mode *mode,
594 const struct dm_connector_state *dm_state,
595 const struct dc_stream *stream)
596 {
597 struct amdgpu_device *adev = dm_state->base.crtc->dev->dev_private;
598 enum amdgpu_rmx_type rmx_type;
599
600 struct rect src = { 0 }; /* viewport in target space*/
601 struct rect dst = { 0 }; /* stream addressable area */
602
603 /* Full screen scaling by default */
604 src.width = mode->hdisplay;
605 src.height = mode->vdisplay;
606 dst.width = stream->timing.h_addressable;
607 dst.height = stream->timing.v_addressable;
608
609 rmx_type = dm_state->scaling;
610 if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
611 if (src.width * dst.height <
612 src.height * dst.width) {
613 /* height needs less upscaling/more downscaling */
614 dst.width = src.width *
615 dst.height / src.height;
616 } else {
617 /* width needs less upscaling/more downscaling */
618 dst.height = src.height *
619 dst.width / src.width;
620 }
621 } else if (rmx_type == RMX_CENTER) {
622 dst = src;
623 }
624
625 dst.x = (stream->timing.h_addressable - dst.width) / 2;
626 dst.y = (stream->timing.v_addressable - dst.height) / 2;
627
628 if (dm_state->underscan_enable) {
629 dst.x += dm_state->underscan_hborder / 2;
630 dst.y += dm_state->underscan_vborder / 2;
631 dst.width -= dm_state->underscan_hborder;
632 dst.height -= dm_state->underscan_vborder;
633 }
634
635 adev->dm.dc->stream_funcs.stream_update_scaling(adev->dm.dc, stream, &src, &dst);
636
637 DRM_DEBUG_KMS("Destination Rectangle x:%d y:%d width:%d height:%d\n",
638 dst.x, dst.y, dst.width, dst.height);
639
640 }
641
642 static void dm_dc_surface_commit(
643 struct dc *dc,
644 struct drm_crtc *crtc)
645 {
646 struct dc_surface *dc_surface;
647 const struct dc_surface *dc_surfaces[1];
648 const struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
649 struct dc_target *dc_target = acrtc->target;
650
651 if (!dc_target) {
652 dm_error(
653 "%s: Failed to obtain target on crtc (%d)!\n",
654 __func__,
655 acrtc->crtc_id);
656 goto fail;
657 }
658
659 dc_surface = dc_create_surface(dc);
660
661 if (!dc_surface) {
662 dm_error(
663 "%s: Failed to create a surface!\n",
664 __func__);
665 goto fail;
666 }
667
668 /* Surface programming */
669 fill_plane_attributes(
670 crtc->dev->dev_private,
671 dc_surface,
672 crtc->primary->state,
673 true);
674
675 dc_surfaces[0] = dc_surface;
676
677 if (false == dc_commit_surfaces_to_target(
678 dc,
679 dc_surfaces,
680 1,
681 dc_target)) {
682 dm_error(
683 "%s: Failed to attach surface!\n",
684 __func__);
685 }
686
687 dc_surface_release(dc_surface);
688 fail:
689 return;
690 }
691
692 static enum dc_color_depth convert_color_depth_from_display_info(
693 const struct drm_connector *connector)
694 {
695 uint32_t bpc = connector->display_info.bpc;
696
697 /* Limited color depth to 8bit
698 * TODO: Still need to handle deep color*/
699 if (bpc > 8)
700 bpc = 8;
701
702 switch (bpc) {
703 case 0:
704 /* Temporary Work around, DRM don't parse color depth for
705 * EDID revision before 1.4
706 * TODO: Fix edid parsing
707 */
708 return COLOR_DEPTH_888;
709 case 6:
710 return COLOR_DEPTH_666;
711 case 8:
712 return COLOR_DEPTH_888;
713 case 10:
714 return COLOR_DEPTH_101010;
715 case 12:
716 return COLOR_DEPTH_121212;
717 case 14:
718 return COLOR_DEPTH_141414;
719 case 16:
720 return COLOR_DEPTH_161616;
721 default:
722 return COLOR_DEPTH_UNDEFINED;
723 }
724 }
725
726 static enum dc_aspect_ratio get_aspect_ratio(
727 const struct drm_display_mode *mode_in)
728 {
729 int32_t width = mode_in->crtc_hdisplay * 9;
730 int32_t height = mode_in->crtc_vdisplay * 16;
731 if ((width - height) < 10 && (width - height) > -10)
732 return ASPECT_RATIO_16_9;
733 else
734 return ASPECT_RATIO_4_3;
735 }
736
737 static enum dc_color_space get_output_color_space(
738 const struct dc_crtc_timing *dc_crtc_timing)
739 {
740 enum dc_color_space color_space = COLOR_SPACE_SRGB;
741
742 switch (dc_crtc_timing->pixel_encoding) {
743 case PIXEL_ENCODING_YCBCR422:
744 case PIXEL_ENCODING_YCBCR444:
745 case PIXEL_ENCODING_YCBCR420:
746 {
747 /*
748 * 27030khz is the separation point between HDTV and SDTV
749 * according to HDMI spec, we use YCbCr709 and YCbCr601
750 * respectively
751 */
752 if (dc_crtc_timing->pix_clk_khz > 27030) {
753 if (dc_crtc_timing->flags.Y_ONLY)
754 color_space =
755 COLOR_SPACE_YCBCR709_LIMITED;
756 else
757 color_space = COLOR_SPACE_YCBCR709;
758 } else {
759 if (dc_crtc_timing->flags.Y_ONLY)
760 color_space =
761 COLOR_SPACE_YCBCR601_LIMITED;
762 else
763 color_space = COLOR_SPACE_YCBCR601;
764 }
765
766 }
767 break;
768 case PIXEL_ENCODING_RGB:
769 color_space = COLOR_SPACE_SRGB;
770 break;
771
772 default:
773 WARN_ON(1);
774 break;
775 }
776
777 return color_space;
778 }
779
780 /*****************************************************************************/
781
782 static void fill_stream_properties_from_drm_display_mode(
783 struct dc_stream *stream,
784 const struct drm_display_mode *mode_in,
785 const struct drm_connector *connector)
786 {
787 struct dc_crtc_timing *timing_out = &stream->timing;
788 memset(timing_out, 0, sizeof(struct dc_crtc_timing));
789
790 timing_out->h_border_left = 0;
791 timing_out->h_border_right = 0;
792 timing_out->v_border_top = 0;
793 timing_out->v_border_bottom = 0;
794 /* TODO: un-hardcode */
795
796 if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
797 && stream->sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A)
798 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
799 else
800 timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
801
802 timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
803 timing_out->display_color_depth = convert_color_depth_from_display_info(
804 connector);
805 timing_out->scan_type = SCANNING_TYPE_NODATA;
806 timing_out->hdmi_vic = 0;
807 timing_out->vic = drm_match_cea_mode(mode_in);
808
809 timing_out->h_addressable = mode_in->crtc_hdisplay;
810 timing_out->h_total = mode_in->crtc_htotal;
811 timing_out->h_sync_width =
812 mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
813 timing_out->h_front_porch =
814 mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
815 timing_out->v_total = mode_in->crtc_vtotal;
816 timing_out->v_addressable = mode_in->crtc_vdisplay;
817 timing_out->v_front_porch =
818 mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
819 timing_out->v_sync_width =
820 mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
821 timing_out->pix_clk_khz = mode_in->crtc_clock;
822 timing_out->aspect_ratio = get_aspect_ratio(mode_in);
823 if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
824 timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
825 if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
826 timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
827
828 stream->output_color_space = get_output_color_space(timing_out);
829
830 {
831 struct dc_transfer_func *tf = dc_create_transfer_func();
832 tf->type = TF_TYPE_PREDEFINED;
833 tf->tf = TRANSFER_FUNCTION_SRGB;
834 stream->out_transfer_func = tf;
835 }
836 }
837
838 static void fill_audio_info(
839 struct audio_info *audio_info,
840 const struct drm_connector *drm_connector,
841 const struct dc_sink *dc_sink)
842 {
843 int i = 0;
844 int cea_revision = 0;
845 const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
846
847 audio_info->manufacture_id = edid_caps->manufacturer_id;
848 audio_info->product_id = edid_caps->product_id;
849
850 cea_revision = drm_connector->display_info.cea_rev;
851
852 while (i < AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS &&
853 edid_caps->display_name[i]) {
854 audio_info->display_name[i] = edid_caps->display_name[i];
855 i++;
856 }
857
858 if(cea_revision >= 3) {
859 audio_info->mode_count = edid_caps->audio_mode_count;
860
861 for (i = 0; i < audio_info->mode_count; ++i) {
862 audio_info->modes[i].format_code =
863 (enum audio_format_code)
864 (edid_caps->audio_modes[i].format_code);
865 audio_info->modes[i].channel_count =
866 edid_caps->audio_modes[i].channel_count;
867 audio_info->modes[i].sample_rates.all =
868 edid_caps->audio_modes[i].sample_rate;
869 audio_info->modes[i].sample_size =
870 edid_caps->audio_modes[i].sample_size;
871 }
872 }
873
874 audio_info->flags.all = edid_caps->speaker_flags;
875
876 /* TODO: We only check for the progressive mode, check for interlace mode too */
877 if(drm_connector->latency_present[0]) {
878 audio_info->video_latency = drm_connector->video_latency[0];
879 audio_info->audio_latency = drm_connector->audio_latency[0];
880 }
881
882 /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
883
884 }
885
886 static void copy_crtc_timing_for_drm_display_mode(
887 const struct drm_display_mode *src_mode,
888 struct drm_display_mode *dst_mode)
889 {
890 dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
891 dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
892 dst_mode->crtc_clock = src_mode->crtc_clock;
893 dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
894 dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
895 dst_mode->crtc_hsync_start= src_mode->crtc_hsync_start;
896 dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
897 dst_mode->crtc_htotal = src_mode->crtc_htotal;
898 dst_mode->crtc_hskew = src_mode->crtc_hskew;
899 dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;;
900 dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;;
901 dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;;
902 dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;;
903 dst_mode->crtc_vtotal = src_mode->crtc_vtotal;;
904 }
905
906 static void decide_crtc_timing_for_drm_display_mode(
907 struct drm_display_mode *drm_mode,
908 const struct drm_display_mode *native_mode,
909 bool scale_enabled)
910 {
911 if (scale_enabled) {
912 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
913 } else if (native_mode->clock == drm_mode->clock &&
914 native_mode->htotal == drm_mode->htotal &&
915 native_mode->vtotal == drm_mode->vtotal) {
916 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
917 } else {
918 /* no scaling nor amdgpu inserted, no need to patch */
919 }
920 }
921
922 static struct dc_target *create_target_for_sink(
923 const struct amdgpu_connector *aconnector,
924 const struct drm_display_mode *drm_mode,
925 const struct dm_connector_state *dm_state)
926 {
927 struct drm_display_mode *preferred_mode = NULL;
928 const struct drm_connector *drm_connector;
929 struct dc_target *target = NULL;
930 struct dc_stream *stream;
931 struct drm_display_mode mode = *drm_mode;
932 bool native_mode_found = false;
933
934 if (NULL == aconnector) {
935 DRM_ERROR("aconnector is NULL!\n");
936 goto drm_connector_null;
937 }
938
939 if (NULL == dm_state) {
940 DRM_ERROR("dm_state is NULL!\n");
941 goto dm_state_null;
942 }
943
944 drm_connector = &aconnector->base;
945 stream = dc_create_stream_for_sink(aconnector->dc_sink);
946
947 if (NULL == stream) {
948 DRM_ERROR("Failed to create stream for sink!\n");
949 goto stream_create_fail;
950 }
951
952 list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
953 /* Search for preferred mode */
954 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
955 native_mode_found = true;
956 break;
957 }
958 }
959 if (!native_mode_found)
960 preferred_mode = list_first_entry_or_null(
961 &aconnector->base.modes,
962 struct drm_display_mode,
963 head);
964
965 if (NULL == preferred_mode) {
966 /* This may not be an error, the use case is when we we have no
967 * usermode calls to reset and set mode upon hotplug. In this
968 * case, we call set mode ourselves to restore the previous mode
969 * and the modelist may not be filled in in time.
970 */
971 DRM_INFO("No preferred mode found\n");
972 } else {
973 decide_crtc_timing_for_drm_display_mode(
974 &mode, preferred_mode,
975 dm_state->scaling != RMX_OFF);
976 }
977
978 fill_stream_properties_from_drm_display_mode(stream,
979 &mode, &aconnector->base);
980 update_stream_scaling_settings(&mode, dm_state, stream);
981
982 fill_audio_info(
983 &stream->audio_info,
984 drm_connector,
985 aconnector->dc_sink);
986
987 target = dc_create_target_for_streams(&stream, 1);
988 dc_stream_release(stream);
989
990 if (NULL == target) {
991 DRM_ERROR("Failed to create target with streams!\n");
992 goto target_create_fail;
993 }
994
995 dm_state_null:
996 drm_connector_null:
997 target_create_fail:
998 stream_create_fail:
999 return target;
1000 }
1001
1002 void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
1003 {
1004 drm_crtc_cleanup(crtc);
1005 kfree(crtc);
1006 }
1007
1008 /* Implemented only the options currently availible for the driver */
1009 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
1010 .reset = drm_atomic_helper_crtc_reset,
1011 .cursor_set = dm_crtc_cursor_set,
1012 .cursor_move = dm_crtc_cursor_move,
1013 .destroy = amdgpu_dm_crtc_destroy,
1014 .gamma_set = drm_atomic_helper_legacy_gamma_set,
1015 .set_config = drm_atomic_helper_set_config,
1016 .page_flip = drm_atomic_helper_page_flip,
1017 .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
1018 .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
1019 };
1020
1021 static enum drm_connector_status
1022 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
1023 {
1024 bool connected;
1025 struct amdgpu_connector *aconnector = to_amdgpu_connector(connector);
1026
1027 /* Notes:
1028 * 1. This interface is NOT called in context of HPD irq.
1029 * 2. This interface *is called* in context of user-mode ioctl. Which
1030 * makes it a bad place for *any* MST-related activit. */
1031
1032 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
1033 connected = (aconnector->dc_sink != NULL);
1034 else
1035 connected = (aconnector->base.force == DRM_FORCE_ON);
1036
1037 return (connected ? connector_status_connected :
1038 connector_status_disconnected);
1039 }
1040
1041 int amdgpu_dm_connector_atomic_set_property(
1042 struct drm_connector *connector,
1043 struct drm_connector_state *connector_state,
1044 struct drm_property *property,
1045 uint64_t val)
1046 {
1047 struct drm_device *dev = connector->dev;
1048 struct amdgpu_device *adev = dev->dev_private;
1049 struct dm_connector_state *dm_old_state =
1050 to_dm_connector_state(connector->state);
1051 struct dm_connector_state *dm_new_state =
1052 to_dm_connector_state(connector_state);
1053
1054 struct drm_crtc_state *new_crtc_state;
1055 struct drm_crtc *crtc;
1056 int i;
1057 int ret = -EINVAL;
1058
1059 if (property == dev->mode_config.scaling_mode_property) {
1060 enum amdgpu_rmx_type rmx_type;
1061
1062 switch (val) {
1063 case DRM_MODE_SCALE_CENTER:
1064 rmx_type = RMX_CENTER;
1065 break;
1066 case DRM_MODE_SCALE_ASPECT:
1067 rmx_type = RMX_ASPECT;
1068 break;
1069 case DRM_MODE_SCALE_FULLSCREEN:
1070 rmx_type = RMX_FULL;
1071 break;
1072 case DRM_MODE_SCALE_NONE:
1073 default:
1074 rmx_type = RMX_OFF;
1075 break;
1076 }
1077
1078 if (dm_old_state->scaling == rmx_type)
1079 return 0;
1080
1081 dm_new_state->scaling = rmx_type;
1082 ret = 0;
1083 } else if (property == adev->mode_info.underscan_hborder_property) {
1084 dm_new_state->underscan_hborder = val;
1085 ret = 0;
1086 } else if (property == adev->mode_info.underscan_vborder_property) {
1087 dm_new_state->underscan_vborder = val;
1088 ret = 0;
1089 } else if (property == adev->mode_info.underscan_property) {
1090 dm_new_state->underscan_enable = val;
1091 ret = 0;
1092 }
1093
1094 for_each_crtc_in_state(
1095 connector_state->state,
1096 crtc,
1097 new_crtc_state,
1098 i) {
1099
1100 if (crtc == connector_state->crtc) {
1101 struct drm_plane_state *plane_state;
1102
1103 /*
1104 * Bit of magic done here. We need to ensure
1105 * that planes get update after mode is set.
1106 * So, we need to add primary plane to state,
1107 * and this way atomic_update would be called
1108 * for it
1109 */
1110 plane_state =
1111 drm_atomic_get_plane_state(
1112 connector_state->state,
1113 crtc->primary);
1114
1115 if (!plane_state)
1116 return -EINVAL;
1117 }
1118 }
1119
1120 return ret;
1121 }
1122
1123 void amdgpu_dm_connector_destroy(struct drm_connector *connector)
1124 {
1125 struct amdgpu_connector *aconnector = to_amdgpu_connector(connector);
1126 const struct dc_link *link = aconnector->dc_link;
1127 struct amdgpu_device *adev = connector->dev->dev_private;
1128 struct amdgpu_display_manager *dm = &adev->dm;
1129 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
1130 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
1131
1132 if (link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) {
1133 amdgpu_dm_register_backlight_device(dm);
1134
1135 if (dm->backlight_dev) {
1136 backlight_device_unregister(dm->backlight_dev);
1137 dm->backlight_dev = NULL;
1138 }
1139
1140 }
1141 #endif
1142 drm_connector_unregister(connector);
1143 drm_connector_cleanup(connector);
1144 kfree(connector);
1145 }
1146
1147 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
1148 {
1149 struct dm_connector_state *state =
1150 to_dm_connector_state(connector->state);
1151
1152 kfree(state);
1153
1154 state = kzalloc(sizeof(*state), GFP_KERNEL);
1155
1156 if (state) {
1157 state->scaling = RMX_OFF;
1158 state->underscan_enable = false;
1159 state->underscan_hborder = 0;
1160 state->underscan_vborder = 0;
1161
1162 connector->state = &state->base;
1163 connector->state->connector = connector;
1164 }
1165 }
1166
1167 struct drm_connector_state *amdgpu_dm_connector_atomic_duplicate_state(
1168 struct drm_connector *connector)
1169 {
1170 struct dm_connector_state *state =
1171 to_dm_connector_state(connector->state);
1172
1173 struct dm_connector_state *new_state =
1174 kmemdup(state, sizeof(*state), GFP_KERNEL);
1175
1176 if (new_state) {
1177 __drm_atomic_helper_connector_duplicate_state(connector,
1178 &new_state->base);
1179 return &new_state->base;
1180 }
1181
1182 return NULL;
1183 }
1184
1185 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
1186 .reset = amdgpu_dm_connector_funcs_reset,
1187 .detect = amdgpu_dm_connector_detect,
1188 .fill_modes = drm_helper_probe_single_connector_modes,
1189 .destroy = amdgpu_dm_connector_destroy,
1190 .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
1191 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
1192 .atomic_set_property = amdgpu_dm_connector_atomic_set_property
1193 };
1194
1195 static struct drm_encoder *best_encoder(struct drm_connector *connector)
1196 {
1197 int enc_id = connector->encoder_ids[0];
1198 struct drm_mode_object *obj;
1199 struct drm_encoder *encoder;
1200
1201 DRM_DEBUG_KMS("Finding the best encoder\n");
1202
1203 /* pick the encoder ids */
1204 if (enc_id) {
1205 obj = drm_mode_object_find(connector->dev, enc_id, DRM_MODE_OBJECT_ENCODER);
1206 if (!obj) {
1207 DRM_ERROR("Couldn't find a matching encoder for our connector\n");
1208 return NULL;
1209 }
1210 encoder = obj_to_encoder(obj);
1211 return encoder;
1212 }
1213 DRM_ERROR("No encoder id\n");
1214 return NULL;
1215 }
1216
1217 static int get_modes(struct drm_connector *connector)
1218 {
1219 return amdgpu_dm_connector_get_modes(connector);
1220 }
1221
1222 static void create_eml_sink(struct amdgpu_connector *aconnector)
1223 {
1224 struct dc_sink_init_data init_params = {
1225 .link = aconnector->dc_link,
1226 .sink_signal = SIGNAL_TYPE_VIRTUAL
1227 };
1228 struct edid *edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
1229
1230 if (!aconnector->base.edid_blob_ptr ||
1231 !aconnector->base.edid_blob_ptr->data) {
1232 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
1233 aconnector->base.name);
1234
1235 aconnector->base.force = DRM_FORCE_OFF;
1236 aconnector->base.override_edid = false;
1237 return;
1238 }
1239
1240 aconnector->edid = edid;
1241
1242 aconnector->dc_em_sink = dc_link_add_remote_sink(
1243 aconnector->dc_link,
1244 (uint8_t *)edid,
1245 (edid->extensions + 1) * EDID_LENGTH,
1246 &init_params);
1247
1248 if (aconnector->base.force
1249 == DRM_FORCE_ON)
1250 aconnector->dc_sink = aconnector->dc_link->local_sink ?
1251 aconnector->dc_link->local_sink :
1252 aconnector->dc_em_sink;
1253 }
1254
1255 static void handle_edid_mgmt(struct amdgpu_connector *aconnector)
1256 {
1257 struct dc_link *link = (struct dc_link *)aconnector->dc_link;
1258
1259 /* In case of headless boot with force on for DP managed connector
1260 * Those settings have to be != 0 to get initial modeset
1261 */
1262 if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
1263 link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
1264 link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
1265 }
1266
1267
1268 aconnector->base.override_edid = true;
1269 create_eml_sink(aconnector);
1270 }
1271
1272 int amdgpu_dm_connector_mode_valid(
1273 struct drm_connector *connector,
1274 struct drm_display_mode *mode)
1275 {
1276 int result = MODE_ERROR;
1277 const struct dc_sink *dc_sink;
1278 struct amdgpu_device *adev = connector->dev->dev_private;
1279 struct dc_validation_set val_set = { 0 };
1280 /* TODO: Unhardcode stream count */
1281 struct dc_stream *streams[1];
1282 struct dc_target *target;
1283 struct amdgpu_connector *aconnector = to_amdgpu_connector(connector);
1284
1285 if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
1286 (mode->flags & DRM_MODE_FLAG_DBLSCAN))
1287 return result;
1288
1289 /* Only run this the first time mode_valid is called to initilialize
1290 * EDID mgmt
1291 */
1292 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
1293 !aconnector->dc_em_sink)
1294 handle_edid_mgmt(aconnector);
1295
1296 dc_sink = to_amdgpu_connector(connector)->dc_sink;
1297
1298 if (NULL == dc_sink) {
1299 DRM_ERROR("dc_sink is NULL!\n");
1300 goto stream_create_fail;
1301 }
1302
1303 streams[0] = dc_create_stream_for_sink(dc_sink);
1304
1305 if (NULL == streams[0]) {
1306 DRM_ERROR("Failed to create stream for sink!\n");
1307 goto stream_create_fail;
1308 }
1309
1310 drm_mode_set_crtcinfo(mode, 0);
1311 fill_stream_properties_from_drm_display_mode(streams[0], mode, connector);
1312
1313 target = dc_create_target_for_streams(streams, 1);
1314 val_set.target = target;
1315
1316 if (NULL == val_set.target) {
1317 DRM_ERROR("Failed to create target with stream!\n");
1318 goto target_create_fail;
1319 }
1320
1321 val_set.surface_count = 0;
1322 streams[0]->src.width = mode->hdisplay;
1323 streams[0]->src.height = mode->vdisplay;
1324 streams[0]->dst = streams[0]->src;
1325
1326 if (dc_validate_resources(adev->dm.dc, &val_set, 1))
1327 result = MODE_OK;
1328
1329 dc_target_release(target);
1330 target_create_fail:
1331 dc_stream_release(streams[0]);
1332 stream_create_fail:
1333 /* TODO: error handling*/
1334 return result;
1335 }
1336
1337 static const struct drm_connector_helper_funcs
1338 amdgpu_dm_connector_helper_funcs = {
1339 /*
1340 * If hotplug a second bigger display in FB Con mode, bigger resolution
1341 * modes will be filtered by drm_mode_validate_size(), and those modes
1342 * is missing after user start lightdm. So we need to renew modes list.
1343 * in get_modes call back, not just return the modes count
1344 */
1345 .get_modes = get_modes,
1346 .mode_valid = amdgpu_dm_connector_mode_valid,
1347 .best_encoder = best_encoder
1348 };
1349
1350 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
1351 {
1352 }
1353
1354 static int dm_crtc_helper_atomic_check(
1355 struct drm_crtc *crtc,
1356 struct drm_crtc_state *state)
1357 {
1358 return 0;
1359 }
1360
1361 static bool dm_crtc_helper_mode_fixup(
1362 struct drm_crtc *crtc,
1363 const struct drm_display_mode *mode,
1364 struct drm_display_mode *adjusted_mode)
1365 {
1366 return true;
1367 }
1368
1369 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
1370 .disable = dm_crtc_helper_disable,
1371 .atomic_check = dm_crtc_helper_atomic_check,
1372 .mode_fixup = dm_crtc_helper_mode_fixup
1373 };
1374
1375 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
1376 {
1377
1378 }
1379
1380 static int dm_encoder_helper_atomic_check(
1381 struct drm_encoder *encoder,
1382 struct drm_crtc_state *crtc_state,
1383 struct drm_connector_state *conn_state)
1384 {
1385 return 0;
1386 }
1387
1388 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
1389 .disable = dm_encoder_helper_disable,
1390 .atomic_check = dm_encoder_helper_atomic_check
1391 };
1392
1393 static const struct drm_plane_funcs dm_plane_funcs = {
1394 .reset = drm_atomic_helper_plane_reset,
1395 .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
1396 .atomic_destroy_state = drm_atomic_helper_plane_destroy_state
1397 };
1398
1399 static void clear_unrelated_fields(struct drm_plane_state *state)
1400 {
1401 state->crtc = NULL;
1402 state->fb = NULL;
1403 state->state = NULL;
1404 state->fence = NULL;
1405 }
1406
1407 static bool page_flip_needed(
1408 const struct drm_plane_state *new_state,
1409 const struct drm_plane_state *old_state,
1410 struct drm_pending_vblank_event *event,
1411 bool commit_surface_required)
1412 {
1413 struct drm_plane_state old_state_tmp;
1414 struct drm_plane_state new_state_tmp;
1415
1416 struct amdgpu_framebuffer *amdgpu_fb_old;
1417 struct amdgpu_framebuffer *amdgpu_fb_new;
1418 struct amdgpu_crtc *acrtc_new;
1419
1420 uint64_t old_tiling_flags;
1421 uint64_t new_tiling_flags;
1422
1423 bool page_flip_required;
1424
1425 if (!old_state)
1426 return false;
1427
1428 if (!old_state->fb)
1429 return false;
1430
1431 if (!new_state)
1432 return false;
1433
1434 if (!new_state->fb)
1435 return false;
1436
1437 old_state_tmp = *old_state;
1438 new_state_tmp = *new_state;
1439
1440 if (!event)
1441 return false;
1442
1443 amdgpu_fb_old = to_amdgpu_framebuffer(old_state->fb);
1444 amdgpu_fb_new = to_amdgpu_framebuffer(new_state->fb);
1445
1446 if (!get_fb_info(amdgpu_fb_old, &old_tiling_flags, NULL))
1447 return false;
1448
1449 if (!get_fb_info(amdgpu_fb_new, &new_tiling_flags, NULL))
1450 return false;
1451
1452 if (commit_surface_required == true &&
1453 old_tiling_flags != new_tiling_flags)
1454 return false;
1455
1456 clear_unrelated_fields(&old_state_tmp);
1457 clear_unrelated_fields(&new_state_tmp);
1458
1459 page_flip_required = memcmp(&old_state_tmp,
1460 &new_state_tmp,
1461 sizeof(old_state_tmp)) == 0 ? true:false;
1462 if (new_state->crtc && page_flip_required == false) {
1463 acrtc_new = to_amdgpu_crtc(new_state->crtc);
1464 if (acrtc_new->flip_flags & DRM_MODE_PAGE_FLIP_ASYNC)
1465 page_flip_required = true;
1466 }
1467 return page_flip_required;
1468 }
1469
1470 static int dm_plane_helper_prepare_fb(
1471 struct drm_plane *plane,
1472 struct drm_plane_state *new_state)
1473 {
1474 struct amdgpu_framebuffer *afb;
1475 struct drm_gem_object *obj;
1476 struct amdgpu_bo *rbo;
1477 int r;
1478
1479 if (!new_state->fb) {
1480 DRM_DEBUG_KMS("No FB bound\n");
1481 return 0;
1482 }
1483
1484 afb = to_amdgpu_framebuffer(new_state->fb);
1485
1486 obj = afb->obj;
1487 rbo = gem_to_amdgpu_bo(obj);
1488 r = amdgpu_bo_reserve(rbo, false);
1489 if (unlikely(r != 0))
1490 return r;
1491
1492 r = amdgpu_bo_pin(rbo, AMDGPU_GEM_DOMAIN_VRAM, NULL);
1493
1494 amdgpu_bo_unreserve(rbo);
1495
1496 if (unlikely(r != 0)) {
1497 DRM_ERROR("Failed to pin framebuffer\n");
1498 return r;
1499 }
1500
1501 return 0;
1502 }
1503
1504 static void dm_plane_helper_cleanup_fb(
1505 struct drm_plane *plane,
1506 struct drm_plane_state *old_state)
1507 {
1508 struct amdgpu_bo *rbo;
1509 struct amdgpu_framebuffer *afb;
1510 int r;
1511
1512 if (!old_state->fb)
1513 return;
1514
1515 afb = to_amdgpu_framebuffer(old_state->fb);
1516 rbo = gem_to_amdgpu_bo(afb->obj);
1517 r = amdgpu_bo_reserve(rbo, false);
1518 if (unlikely(r)) {
1519 DRM_ERROR("failed to reserve rbo before unpin\n");
1520 return;
1521 } else {
1522 amdgpu_bo_unpin(rbo);
1523 amdgpu_bo_unreserve(rbo);
1524 }
1525 }
1526
1527 int dm_create_validation_set_for_target(struct drm_connector *connector,
1528 struct drm_display_mode *mode, struct dc_validation_set *val_set)
1529 {
1530 int result = MODE_ERROR;
1531 const struct dc_sink *dc_sink =
1532 to_amdgpu_connector(connector)->dc_sink;
1533 /* TODO: Unhardcode stream count */
1534 struct dc_stream *streams[1];
1535 struct dc_target *target;
1536
1537 if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
1538 (mode->flags & DRM_MODE_FLAG_DBLSCAN))
1539 return result;
1540
1541 if (NULL == dc_sink) {
1542 DRM_ERROR("dc_sink is NULL!\n");
1543 return result;
1544 }
1545
1546 streams[0] = dc_create_stream_for_sink(dc_sink);
1547
1548 if (NULL == streams[0]) {
1549 DRM_ERROR("Failed to create stream for sink!\n");
1550 return result;
1551 }
1552
1553 drm_mode_set_crtcinfo(mode, 0);
1554
1555 fill_stream_properties_from_drm_display_mode(streams[0], mode, connector);
1556
1557 target = dc_create_target_for_streams(streams, 1);
1558 val_set->target = target;
1559
1560 if (NULL == val_set->target) {
1561 DRM_ERROR("Failed to create target with stream!\n");
1562 goto fail;
1563 }
1564
1565 streams[0]->src.width = mode->hdisplay;
1566 streams[0]->src.height = mode->vdisplay;
1567 streams[0]->dst = streams[0]->src;
1568
1569 return MODE_OK;
1570
1571 fail:
1572 dc_stream_release(streams[0]);
1573 return result;
1574
1575 }
1576
1577 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
1578 .prepare_fb = dm_plane_helper_prepare_fb,
1579 .cleanup_fb = dm_plane_helper_cleanup_fb,
1580 };
1581
1582 /*
1583 * TODO: these are currently initialized to rgb formats only.
1584 * For future use cases we should either initialize them dynamically based on
1585 * plane capabilities, or initialize this array to all formats, so internal drm
1586 * check will succeed, and let DC to implement proper check
1587 */
1588 static uint32_t rgb_formats[] = {
1589 DRM_FORMAT_XRGB4444,
1590 DRM_FORMAT_ARGB4444,
1591 DRM_FORMAT_RGBA4444,
1592 DRM_FORMAT_ARGB1555,
1593 DRM_FORMAT_RGB565,
1594 DRM_FORMAT_RGB888,
1595 DRM_FORMAT_XRGB8888,
1596 DRM_FORMAT_ARGB8888,
1597 DRM_FORMAT_RGBA8888,
1598 DRM_FORMAT_XRGB2101010,
1599 DRM_FORMAT_XBGR2101010,
1600 DRM_FORMAT_ARGB2101010,
1601 DRM_FORMAT_ABGR2101010,
1602 };
1603
1604 int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
1605 struct amdgpu_crtc *acrtc,
1606 uint32_t crtc_index)
1607 {
1608 int res = -ENOMEM;
1609
1610 struct drm_plane *primary_plane =
1611 kzalloc(sizeof(*primary_plane), GFP_KERNEL);
1612
1613 if (!primary_plane)
1614 goto fail_plane;
1615
1616 primary_plane->format_default = true;
1617
1618 res = drm_universal_plane_init(
1619 dm->adev->ddev,
1620 primary_plane,
1621 0,
1622 &dm_plane_funcs,
1623 rgb_formats,
1624 ARRAY_SIZE(rgb_formats),
1625 NULL,
1626 DRM_PLANE_TYPE_PRIMARY, NULL);
1627
1628 primary_plane->crtc = &acrtc->base;
1629
1630 drm_plane_helper_add(primary_plane, &dm_plane_helper_funcs);
1631
1632 res = drm_crtc_init_with_planes(
1633 dm->ddev,
1634 &acrtc->base,
1635 primary_plane,
1636 NULL,
1637 &amdgpu_dm_crtc_funcs, NULL);
1638
1639 if (res)
1640 goto fail;
1641
1642 drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
1643
1644 acrtc->max_cursor_width = 128;
1645 acrtc->max_cursor_height = 128;
1646
1647 acrtc->crtc_id = crtc_index;
1648 acrtc->base.enabled = false;
1649
1650 dm->adev->mode_info.crtcs[crtc_index] = acrtc;
1651 drm_mode_crtc_set_gamma_size(&acrtc->base, 256);
1652
1653 return 0;
1654 fail:
1655 kfree(primary_plane);
1656 fail_plane:
1657 acrtc->crtc_id = -1;
1658 return res;
1659 }
1660
1661 static int to_drm_connector_type(enum signal_type st)
1662 {
1663 switch (st) {
1664 case SIGNAL_TYPE_HDMI_TYPE_A:
1665 return DRM_MODE_CONNECTOR_HDMIA;
1666 case SIGNAL_TYPE_EDP:
1667 return DRM_MODE_CONNECTOR_eDP;
1668 case SIGNAL_TYPE_RGB:
1669 return DRM_MODE_CONNECTOR_VGA;
1670 case SIGNAL_TYPE_DISPLAY_PORT:
1671 case SIGNAL_TYPE_DISPLAY_PORT_MST:
1672 return DRM_MODE_CONNECTOR_DisplayPort;
1673 case SIGNAL_TYPE_DVI_DUAL_LINK:
1674 case SIGNAL_TYPE_DVI_SINGLE_LINK:
1675 return DRM_MODE_CONNECTOR_DVID;
1676 case SIGNAL_TYPE_VIRTUAL:
1677 return DRM_MODE_CONNECTOR_VIRTUAL;
1678
1679 default:
1680 return DRM_MODE_CONNECTOR_Unknown;
1681 }
1682 }
1683
1684 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
1685 {
1686 const struct drm_connector_helper_funcs *helper =
1687 connector->helper_private;
1688 struct drm_encoder *encoder;
1689 struct amdgpu_encoder *amdgpu_encoder;
1690
1691 encoder = helper->best_encoder(connector);
1692
1693 if (encoder == NULL)
1694 return;
1695
1696 amdgpu_encoder = to_amdgpu_encoder(encoder);
1697
1698 amdgpu_encoder->native_mode.clock = 0;
1699
1700 if (!list_empty(&connector->probed_modes)) {
1701 struct drm_display_mode *preferred_mode = NULL;
1702 list_for_each_entry(preferred_mode,
1703 &connector->probed_modes,
1704 head) {
1705 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
1706 amdgpu_encoder->native_mode = *preferred_mode;
1707 }
1708 break;
1709 }
1710
1711 }
1712 }
1713
1714 static struct drm_display_mode *amdgpu_dm_create_common_mode(
1715 struct drm_encoder *encoder, char *name,
1716 int hdisplay, int vdisplay)
1717 {
1718 struct drm_device *dev = encoder->dev;
1719 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1720 struct drm_display_mode *mode = NULL;
1721 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
1722
1723 mode = drm_mode_duplicate(dev, native_mode);
1724
1725 if(mode == NULL)
1726 return NULL;
1727
1728 mode->hdisplay = hdisplay;
1729 mode->vdisplay = vdisplay;
1730 mode->type &= ~DRM_MODE_TYPE_PREFERRED;
1731 strncpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
1732
1733 return mode;
1734
1735 }
1736
1737 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
1738 struct drm_connector *connector)
1739 {
1740 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1741 struct drm_display_mode *mode = NULL;
1742 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
1743 struct amdgpu_connector *amdgpu_connector =
1744 to_amdgpu_connector(connector);
1745 int i;
1746 int n;
1747 struct mode_size {
1748 char name[DRM_DISPLAY_MODE_LEN];
1749 int w;
1750 int h;
1751 }common_modes[] = {
1752 { "640x480", 640, 480},
1753 { "800x600", 800, 600},
1754 { "1024x768", 1024, 768},
1755 { "1280x720", 1280, 720},
1756 { "1280x800", 1280, 800},
1757 {"1280x1024", 1280, 1024},
1758 { "1440x900", 1440, 900},
1759 {"1680x1050", 1680, 1050},
1760 {"1600x1200", 1600, 1200},
1761 {"1920x1080", 1920, 1080},
1762 {"1920x1200", 1920, 1200}
1763 };
1764
1765 n = sizeof(common_modes) / sizeof(common_modes[0]);
1766
1767 for (i = 0; i < n; i++) {
1768 struct drm_display_mode *curmode = NULL;
1769 bool mode_existed = false;
1770
1771 if (common_modes[i].w > native_mode->hdisplay ||
1772 common_modes[i].h > native_mode->vdisplay ||
1773 (common_modes[i].w == native_mode->hdisplay &&
1774 common_modes[i].h == native_mode->vdisplay))
1775 continue;
1776
1777 list_for_each_entry(curmode, &connector->probed_modes, head) {
1778 if (common_modes[i].w == curmode->hdisplay &&
1779 common_modes[i].h == curmode->vdisplay) {
1780 mode_existed = true;
1781 break;
1782 }
1783 }
1784
1785 if (mode_existed)
1786 continue;
1787
1788 mode = amdgpu_dm_create_common_mode(encoder,
1789 common_modes[i].name, common_modes[i].w,
1790 common_modes[i].h);
1791 drm_mode_probed_add(connector, mode);
1792 amdgpu_connector->num_modes++;
1793 }
1794 }
1795
1796 static void amdgpu_dm_connector_ddc_get_modes(
1797 struct drm_connector *connector,
1798 struct edid *edid)
1799 {
1800 struct amdgpu_connector *amdgpu_connector =
1801 to_amdgpu_connector(connector);
1802
1803 if (edid) {
1804 /* empty probed_modes */
1805 INIT_LIST_HEAD(&connector->probed_modes);
1806 amdgpu_connector->num_modes =
1807 drm_add_edid_modes(connector, edid);
1808
1809 drm_edid_to_eld(connector, edid);
1810
1811 amdgpu_dm_get_native_mode(connector);
1812 } else
1813 amdgpu_connector->num_modes = 0;
1814 }
1815
1816 int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
1817 {
1818 const struct drm_connector_helper_funcs *helper =
1819 connector->helper_private;
1820 struct amdgpu_connector *amdgpu_connector =
1821 to_amdgpu_connector(connector);
1822 struct drm_encoder *encoder;
1823 struct edid *edid = amdgpu_connector->edid;
1824
1825 encoder = helper->best_encoder(connector);
1826
1827 amdgpu_dm_connector_ddc_get_modes(connector, edid);
1828 amdgpu_dm_connector_add_common_modes(encoder, connector);
1829 return amdgpu_connector->num_modes;
1830 }
1831
1832 void amdgpu_dm_connector_init_helper(
1833 struct amdgpu_display_manager *dm,
1834 struct amdgpu_connector *aconnector,
1835 int connector_type,
1836 const struct dc_link *link,
1837 int link_index)
1838 {
1839 struct amdgpu_device *adev = dm->ddev->dev_private;
1840
1841 aconnector->connector_id = link_index;
1842 aconnector->dc_link = link;
1843 aconnector->base.interlace_allowed = true;
1844 aconnector->base.doublescan_allowed = true;
1845 aconnector->base.dpms = DRM_MODE_DPMS_OFF;
1846 aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
1847
1848 mutex_init(&aconnector->hpd_lock);
1849
1850 /*configure suport HPD hot plug connector_>polled default value is 0
1851 * which means HPD hot plug not supported*/
1852 switch (connector_type) {
1853 case DRM_MODE_CONNECTOR_HDMIA:
1854 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
1855 break;
1856 case DRM_MODE_CONNECTOR_DisplayPort:
1857 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
1858 break;
1859 case DRM_MODE_CONNECTOR_DVID:
1860 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
1861 break;
1862 default:
1863 break;
1864 }
1865
1866 drm_object_attach_property(&aconnector->base.base,
1867 dm->ddev->mode_config.scaling_mode_property,
1868 DRM_MODE_SCALE_NONE);
1869
1870 drm_object_attach_property(&aconnector->base.base,
1871 adev->mode_info.underscan_property,
1872 UNDERSCAN_OFF);
1873 drm_object_attach_property(&aconnector->base.base,
1874 adev->mode_info.underscan_hborder_property,
1875 0);
1876 drm_object_attach_property(&aconnector->base.base,
1877 adev->mode_info.underscan_vborder_property,
1878 0);
1879
1880 }
1881
1882 int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
1883 struct i2c_msg *msgs, int num)
1884 {
1885 struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
1886 struct i2c_command cmd;
1887 int i;
1888 int result = -EIO;
1889
1890 cmd.payloads = kzalloc(num * sizeof(struct i2c_payload), GFP_KERNEL);
1891
1892 if (!cmd.payloads)
1893 return result;
1894
1895 cmd.number_of_payloads = num;
1896 cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
1897 cmd.speed = 100;
1898
1899 for (i = 0; i < num; i++) {
1900 cmd.payloads[i].write = (msgs[i].flags & I2C_M_RD);
1901 cmd.payloads[i].address = msgs[i].addr;
1902 cmd.payloads[i].length = msgs[i].len;
1903 cmd.payloads[i].data = msgs[i].buf;
1904 }
1905
1906 if (dc_submit_i2c(i2c->dm->dc, i2c->link_index, &cmd))
1907 result = num;
1908
1909 kfree(cmd.payloads);
1910
1911 return result;
1912 }
1913
1914 u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
1915 {
1916 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
1917 }
1918
1919 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
1920 .master_xfer = amdgpu_dm_i2c_xfer,
1921 .functionality = amdgpu_dm_i2c_func,
1922 };
1923
1924 struct amdgpu_i2c_adapter *create_i2c(unsigned int link_index, struct amdgpu_display_manager *dm, int *res)
1925 {
1926 struct amdgpu_i2c_adapter *i2c;
1927
1928 i2c = kzalloc(sizeof (struct amdgpu_i2c_adapter), GFP_KERNEL);
1929 i2c->dm = dm;
1930 i2c->base.owner = THIS_MODULE;
1931 i2c->base.class = I2C_CLASS_DDC;
1932 i2c->base.dev.parent = &dm->adev->pdev->dev;
1933 i2c->base.algo = &amdgpu_dm_i2c_algo;
1934 snprintf(i2c->base.name, sizeof (i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
1935 i2c->link_index = link_index;
1936 i2c_set_adapdata(&i2c->base, i2c);
1937
1938 return i2c;
1939 }
1940
1941 /* Note: this function assumes that dc_link_detect() was called for the
1942 * dc_link which will be represented by this aconnector. */
1943 int amdgpu_dm_connector_init(
1944 struct amdgpu_display_manager *dm,
1945 struct amdgpu_connector *aconnector,
1946 uint32_t link_index,
1947 struct amdgpu_encoder *aencoder)
1948 {
1949 int res = 0;
1950 int connector_type;
1951 struct dc *dc = dm->dc;
1952 const struct dc_link *link = dc_get_link_at_index(dc, link_index);
1953 struct amdgpu_i2c_adapter *i2c;
1954
1955 DRM_DEBUG_KMS("%s()\n", __func__);
1956
1957 i2c = create_i2c(link->link_index, dm, &res);
1958 aconnector->i2c = i2c;
1959 res = i2c_add_adapter(&i2c->base);
1960
1961 if (res) {
1962 DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
1963 goto out_free;
1964 }
1965
1966 connector_type = to_drm_connector_type(link->connector_signal);
1967
1968 res = drm_connector_init(
1969 dm->ddev,
1970 &aconnector->base,
1971 &amdgpu_dm_connector_funcs,
1972 connector_type);
1973
1974 if (res) {
1975 DRM_ERROR("connector_init failed\n");
1976 aconnector->connector_id = -1;
1977 goto out_free;
1978 }
1979
1980 drm_connector_helper_add(
1981 &aconnector->base,
1982 &amdgpu_dm_connector_helper_funcs);
1983
1984 amdgpu_dm_connector_init_helper(
1985 dm,
1986 aconnector,
1987 connector_type,
1988 link,
1989 link_index);
1990
1991 drm_mode_connector_attach_encoder(
1992 &aconnector->base, &aencoder->base);
1993
1994 drm_connector_register(&aconnector->base);
1995
1996 if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
1997 || connector_type == DRM_MODE_CONNECTOR_eDP)
1998 amdgpu_dm_initialize_mst_connector(dm, aconnector);
1999
2000 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
2001 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
2002
2003 /* NOTE: this currently will create backlight device even if a panel
2004 * is not connected to the eDP/LVDS connector.
2005 *
2006 * This is less than ideal but we don't have sink information at this
2007 * stage since detection happens after. We can't do detection earlier
2008 * since MST detection needs connectors to be created first.
2009 */
2010 if (link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) {
2011 /* Event if registration failed, we should continue with
2012 * DM initialization because not having a backlight control
2013 * is better then a black screen. */
2014 amdgpu_dm_register_backlight_device(dm);
2015
2016 if (dm->backlight_dev)
2017 dm->backlight_link = link;
2018 }
2019 #endif
2020
2021 out_free:
2022 if (res) {
2023 kfree(i2c);
2024 aconnector->i2c = NULL;
2025 }
2026 return res;
2027 }
2028
2029 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
2030 {
2031 switch (adev->mode_info.num_crtc) {
2032 case 1:
2033 return 0x1;
2034 case 2:
2035 return 0x3;
2036 case 3:
2037 return 0x7;
2038 case 4:
2039 return 0xf;
2040 case 5:
2041 return 0x1f;
2042 case 6:
2043 default:
2044 return 0x3f;
2045 }
2046 }
2047
2048 int amdgpu_dm_encoder_init(
2049 struct drm_device *dev,
2050 struct amdgpu_encoder *aencoder,
2051 uint32_t link_index)
2052 {
2053 struct amdgpu_device *adev = dev->dev_private;
2054
2055 int res = drm_encoder_init(dev,
2056 &aencoder->base,
2057 &amdgpu_dm_encoder_funcs,
2058 DRM_MODE_ENCODER_TMDS,
2059 NULL);
2060
2061 aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
2062
2063 if (!res)
2064 aencoder->encoder_id = link_index;
2065 else
2066 aencoder->encoder_id = -1;
2067
2068 drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
2069
2070 return res;
2071 }
2072
2073 enum dm_commit_action {
2074 DM_COMMIT_ACTION_NOTHING,
2075 DM_COMMIT_ACTION_RESET,
2076 DM_COMMIT_ACTION_DPMS_ON,
2077 DM_COMMIT_ACTION_DPMS_OFF,
2078 DM_COMMIT_ACTION_SET
2079 };
2080
2081 static enum dm_commit_action get_dm_commit_action(struct drm_crtc_state *state)
2082 {
2083 /* mode changed means either actually mode changed or enabled changed */
2084 /* active changed means dpms changed */
2085
2086 DRM_DEBUG_KMS("crtc_state_flags: enable:%d, active:%d, planes_changed:%d, mode_changed:%d,active_changed:%d,connectors_changed:%d\n",
2087 state->enable,
2088 state->active,
2089 state->planes_changed,
2090 state->mode_changed,
2091 state->active_changed,
2092 state->connectors_changed);
2093
2094 if (state->mode_changed) {
2095 /* if it is got disabled - call reset mode */
2096 if (!state->enable)
2097 return DM_COMMIT_ACTION_RESET;
2098
2099 if (state->active)
2100 return DM_COMMIT_ACTION_SET;
2101 else
2102 return DM_COMMIT_ACTION_RESET;
2103 } else {
2104 /* ! mode_changed */
2105
2106 /* if it is remain disable - skip it */
2107 if (!state->enable)
2108 return DM_COMMIT_ACTION_NOTHING;
2109
2110 if (state->active && state->connectors_changed)
2111 return DM_COMMIT_ACTION_SET;
2112
2113 if (state->active_changed) {
2114 if (state->active) {
2115 return DM_COMMIT_ACTION_DPMS_ON;
2116 } else {
2117 return DM_COMMIT_ACTION_DPMS_OFF;
2118 }
2119 } else {
2120 /* ! active_changed */
2121 return DM_COMMIT_ACTION_NOTHING;
2122 }
2123 }
2124 }
2125
2126
2127 typedef bool (*predicate)(struct amdgpu_crtc *acrtc);
2128
2129 static void wait_while_pflip_status(struct amdgpu_device *adev,
2130 struct amdgpu_crtc *acrtc, predicate f) {
2131 int count = 0;
2132 while (f(acrtc)) {
2133 /* Spin Wait*/
2134 msleep(1);
2135 count++;
2136 if (count == 1000) {
2137 DRM_ERROR("%s - crtc:%d[%p], pflip_stat:%d, probable hang!\n",
2138 __func__, acrtc->crtc_id,
2139 acrtc,
2140 acrtc->pflip_status);
2141
2142 /* we do not expect to hit this case except on Polaris with PHY PLL
2143 * 1. DP to HDMI passive dongle connected
2144 * 2. unplug (headless)
2145 * 3. plug in DP
2146 * 3a. on plug in, DP will try verify link by training, and training
2147 * would disable PHY PLL which HDMI rely on to drive TG
2148 * 3b. this will cause flip interrupt cannot be generated, and we
2149 * exit when timeout expired. however we do not have code to clean
2150 * up flip, flip clean up will happen when the address is written
2151 * with the restore mode change
2152 */
2153 WARN_ON(1);
2154 break;
2155 }
2156 }
2157
2158 DRM_DEBUG_DRIVER("%s - Finished waiting for:%d msec, crtc:%d[%p], pflip_stat:%d \n",
2159 __func__,
2160 count,
2161 acrtc->crtc_id,
2162 acrtc,
2163 acrtc->pflip_status);
2164 }
2165
2166 static bool pflip_in_progress_predicate(struct amdgpu_crtc *acrtc)
2167 {
2168 return acrtc->pflip_status != AMDGPU_FLIP_NONE;
2169 }
2170
2171 static void manage_dm_interrupts(
2172 struct amdgpu_device *adev,
2173 struct amdgpu_crtc *acrtc,
2174 bool enable)
2175 {
2176 /*
2177 * this is not correct translation but will work as soon as VBLANK
2178 * constant is the same as PFLIP
2179 */
2180 int irq_type =
2181 amdgpu_crtc_idx_to_irq_type(
2182 adev,
2183 acrtc->crtc_id);
2184
2185 if (enable) {
2186 drm_crtc_vblank_on(&acrtc->base);
2187 amdgpu_irq_get(
2188 adev,
2189 &adev->pageflip_irq,
2190 irq_type);
2191 } else {
2192 wait_while_pflip_status(adev, acrtc,
2193 pflip_in_progress_predicate);
2194
2195 amdgpu_irq_put(
2196 adev,
2197 &adev->pageflip_irq,
2198 irq_type);
2199 drm_crtc_vblank_off(&acrtc->base);
2200 }
2201 }
2202
2203
2204 static bool pflip_pending_predicate(struct amdgpu_crtc *acrtc)
2205 {
2206 return acrtc->pflip_status == AMDGPU_FLIP_PENDING;
2207 }
2208
2209 static bool is_scaling_state_different(
2210 const struct dm_connector_state *dm_state,
2211 const struct dm_connector_state *old_dm_state)
2212 {
2213 if (dm_state->scaling != old_dm_state->scaling)
2214 return true;
2215 if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
2216 if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
2217 return true;
2218 } else if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
2219 if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
2220 return true;
2221 } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder
2222 || dm_state->underscan_vborder != old_dm_state->underscan_vborder)
2223 return true;
2224 return false;
2225 }
2226
2227 static void remove_target(struct amdgpu_device *adev, struct amdgpu_crtc *acrtc)
2228 {
2229 int i;
2230
2231 /*
2232 * we evade vblanks and pflips on crtc that
2233 * should be changed
2234 */
2235 manage_dm_interrupts(adev, acrtc, false);
2236 /* this is the update mode case */
2237 if (adev->dm.freesync_module)
2238 for (i = 0; i < acrtc->target->stream_count; i++)
2239 mod_freesync_remove_stream(
2240 adev->dm.freesync_module,
2241 acrtc->target->streams[i]);
2242 dc_target_release(acrtc->target);
2243 acrtc->target = NULL;
2244 acrtc->otg_inst = -1;
2245 acrtc->enabled = false;
2246 }
2247
2248 int amdgpu_dm_atomic_commit(
2249 struct drm_device *dev,
2250 struct drm_atomic_state *state,
2251 bool async)
2252 {
2253 struct amdgpu_device *adev = dev->dev_private;
2254 struct amdgpu_display_manager *dm = &adev->dm;
2255 struct drm_plane *plane;
2256 struct drm_plane_state *new_plane_state;
2257 struct drm_plane_state *old_plane_state;
2258 uint32_t i, j;
2259 int32_t ret = 0;
2260 uint32_t commit_targets_count = 0;
2261 uint32_t new_crtcs_count = 0;
2262 uint32_t flip_crtcs_count = 0;
2263 struct drm_crtc *crtc;
2264 struct drm_crtc_state *old_crtc_state;
2265
2266 struct dc_target *commit_targets[MAX_TARGETS];
2267 struct amdgpu_crtc *new_crtcs[MAX_TARGETS];
2268 struct dc_target *new_target;
2269 struct drm_crtc *flip_crtcs[MAX_TARGETS];
2270 struct amdgpu_flip_work *work[MAX_TARGETS] = {0};
2271 struct amdgpu_bo *new_abo[MAX_TARGETS] = {0};
2272
2273 /* In this step all new fb would be pinned */
2274
2275 /*
2276 * TODO: Revisit when we support true asynchronous commit.
2277 * Right now we receive async commit only from pageflip, in which case
2278 * we should not pin/unpin the fb here, it should be done in
2279 * amdgpu_crtc_flip and from the vblank irq handler.
2280 */
2281 if (!async) {
2282 ret = drm_atomic_helper_prepare_planes(dev, state);
2283 if (ret)
2284 return ret;
2285 }
2286
2287 /* Page flip if needed */
2288 for_each_plane_in_state(state, plane, new_plane_state, i) {
2289 struct drm_plane_state *old_plane_state = plane->state;
2290 struct drm_crtc *crtc = new_plane_state->crtc;
2291 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
2292 struct drm_framebuffer *fb = new_plane_state->fb;
2293 struct drm_crtc_state *crtc_state;
2294
2295 if (!fb || !crtc)
2296 continue;
2297
2298 crtc_state = drm_atomic_get_crtc_state(state, crtc);
2299
2300 if (!crtc_state->planes_changed || !crtc_state->active)
2301 continue;
2302
2303 if (page_flip_needed(
2304 new_plane_state,
2305 old_plane_state,
2306 crtc_state->event,
2307 false)) {
2308 ret = amdgpu_crtc_prepare_flip(crtc,
2309 fb,
2310 crtc_state->event,
2311 acrtc->flip_flags,
2312 drm_crtc_vblank_count(crtc),
2313 &work[flip_crtcs_count],
2314 &new_abo[flip_crtcs_count]);
2315
2316 if (ret) {
2317 /* According to atomic_commit hook API, EINVAL is not allowed */
2318 if (unlikely(ret == -EINVAL))
2319 ret = -ENOMEM;
2320
2321 DRM_ERROR("Atomic commit: Flip for crtc id %d: [%p], "
2322 "failed, errno = %d\n",
2323 acrtc->crtc_id,
2324 acrtc,
2325 ret);
2326 /* cleanup all flip configurations which
2327 * succeeded in this commit
2328 */
2329 for (i = 0; i < flip_crtcs_count; i++)
2330 amdgpu_crtc_cleanup_flip_ctx(
2331 work[i],
2332 new_abo[i]);
2333
2334 return ret;
2335 }
2336
2337 flip_crtcs[flip_crtcs_count] = crtc;
2338 flip_crtcs_count++;
2339 }
2340 }
2341
2342 /*
2343 * This is the point of no return - everything below never fails except
2344 * when the hw goes bonghits. Which means we can commit the new state on
2345 * the software side now.
2346 */
2347
2348 drm_atomic_helper_swap_state(state, true);
2349
2350 /*
2351 * From this point state become old state really. New state is
2352 * initialized to appropriate objects and could be accessed from there
2353 */
2354
2355 /*
2356 * there is no fences usage yet in state. We can skip the following line
2357 * wait_for_fences(dev, state);
2358 */
2359
2360 drm_atomic_helper_update_legacy_modeset_state(dev, state);
2361
2362 /* update changed items */
2363 for_each_crtc_in_state(state, crtc, old_crtc_state, i) {
2364 struct amdgpu_crtc *acrtc;
2365 struct amdgpu_connector *aconnector = NULL;
2366 enum dm_commit_action action;
2367 struct drm_crtc_state *new_state = crtc->state;
2368
2369 acrtc = to_amdgpu_crtc(crtc);
2370
2371 aconnector =
2372 amdgpu_dm_find_first_crct_matching_connector(
2373 state,
2374 crtc,
2375 false);
2376
2377 /* handles headless hotplug case, updating new_state and
2378 * aconnector as needed
2379 */
2380
2381 action = get_dm_commit_action(new_state);
2382
2383 switch (action) {
2384 case DM_COMMIT_ACTION_DPMS_ON:
2385 case DM_COMMIT_ACTION_SET: {
2386 struct dm_connector_state *dm_state = NULL;
2387 new_target = NULL;
2388
2389 if (aconnector)
2390 dm_state = to_dm_connector_state(aconnector->base.state);
2391
2392 new_target = create_target_for_sink(
2393 aconnector,
2394 &crtc->state->mode,
2395 dm_state);
2396
2397 DRM_INFO("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
2398
2399 if (!new_target) {
2400 /*
2401 * this could happen because of issues with
2402 * userspace notifications delivery.
2403 * In this case userspace tries to set mode on
2404 * display which is disconnect in fact.
2405 * dc_sink in NULL in this case on aconnector.
2406 * We expect reset mode will come soon.
2407 *
2408 * This can also happen when unplug is done
2409 * during resume sequence ended
2410 *
2411 * In this case, we want to pretend we still
2412 * have a sink to keep the pipe running so that
2413 * hw state is consistent with the sw state
2414 */
2415 DRM_DEBUG_KMS("%s: Failed to create new target for crtc %d\n",
2416 __func__, acrtc->base.base.id);
2417 break;
2418 }
2419
2420 if (acrtc->target)
2421 remove_target(adev, acrtc);
2422
2423 /*
2424 * this loop saves set mode crtcs
2425 * we needed to enable vblanks once all
2426 * resources acquired in dc after dc_commit_targets
2427 */
2428 new_crtcs[new_crtcs_count] = acrtc;
2429 new_crtcs_count++;
2430
2431 acrtc->target = new_target;
2432 acrtc->enabled = true;
2433 acrtc->hw_mode = crtc->state->mode;
2434 crtc->hwmode = crtc->state->mode;
2435
2436 break;
2437 }
2438
2439 case DM_COMMIT_ACTION_NOTHING: {
2440 struct dm_connector_state *dm_state = NULL;
2441
2442 if (!aconnector)
2443 break;
2444
2445 dm_state = to_dm_connector_state(aconnector->base.state);
2446
2447 /* Scaling update */
2448 update_stream_scaling_settings(
2449 &crtc->state->mode,
2450 dm_state,
2451 acrtc->target->streams[0]);
2452
2453 break;
2454 }
2455 case DM_COMMIT_ACTION_DPMS_OFF:
2456 case DM_COMMIT_ACTION_RESET:
2457 DRM_INFO("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
2458 /* i.e. reset mode */
2459 if (acrtc->target)
2460 remove_target(adev, acrtc);
2461 break;
2462 } /* switch() */
2463 } /* for_each_crtc_in_state() */
2464
2465 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
2466
2467 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
2468
2469 if (acrtc->target) {
2470 commit_targets[commit_targets_count] = acrtc->target;
2471 ++commit_targets_count;
2472 }
2473 }
2474
2475 /*
2476 * Add streams after required streams from new and replaced targets
2477 * are removed from freesync module
2478 */
2479 if (adev->dm.freesync_module) {
2480 for (i = 0; i < new_crtcs_count; i++) {
2481 struct amdgpu_connector *aconnector = NULL;
2482 new_target = new_crtcs[i]->target;
2483 aconnector =
2484 amdgpu_dm_find_first_crct_matching_connector(
2485 state,
2486 &new_crtcs[i]->base,
2487 false);
2488 if (!aconnector) {
2489 DRM_INFO(
2490 "Atomic commit: Failed to find connector for acrtc id:%d "
2491 "skipping freesync init\n",
2492 new_crtcs[i]->crtc_id);
2493 continue;
2494 }
2495
2496 for (j = 0; j < new_target->stream_count; j++)
2497 mod_freesync_add_stream(
2498 adev->dm.freesync_module,
2499 new_target->streams[j], &aconnector->caps);
2500 }
2501 }
2502
2503 /* DC is optimized not to do anything if 'targets' didn't change. */
2504 dc_commit_targets(dm->dc, commit_targets, commit_targets_count);
2505
2506 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
2507 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
2508
2509 if (acrtc->target != NULL)
2510 acrtc->otg_inst =
2511 dc_target_get_status(acrtc->target)->primary_otg_inst;
2512 }
2513
2514 /* update planes when needed */
2515 for_each_plane_in_state(state, plane, old_plane_state, i) {
2516 struct drm_plane_state *plane_state = plane->state;
2517 struct drm_crtc *crtc = plane_state->crtc;
2518 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
2519 struct drm_framebuffer *fb = plane_state->fb;
2520 struct drm_connector *connector;
2521 struct dm_connector_state *dm_state = NULL;
2522 enum dm_commit_action action;
2523
2524 if (!fb || !crtc || !crtc->state->active)
2525 continue;
2526
2527 action = get_dm_commit_action(crtc->state);
2528
2529 /* Surfaces are created under two scenarios:
2530 * 1. This commit is not a page flip.
2531 * 2. This commit is a page flip, and targets are created.
2532 */
2533 if (!page_flip_needed(
2534 plane_state,
2535 old_plane_state,
2536 crtc->state->event, true) ||
2537 action == DM_COMMIT_ACTION_DPMS_ON ||
2538 action == DM_COMMIT_ACTION_SET) {
2539 list_for_each_entry(connector,
2540 &dev->mode_config.connector_list, head) {
2541 if (connector->state->crtc == crtc) {
2542 dm_state = to_dm_connector_state(
2543 connector->state);
2544 break;
2545 }
2546 }
2547
2548 /*
2549 * This situation happens in the following case:
2550 * we are about to get set mode for connector who's only
2551 * possible crtc (in encoder crtc mask) is used by
2552 * another connector, that is why it will try to
2553 * re-assing crtcs in order to make configuration
2554 * supported. For our implementation we need to make all
2555 * encoders support all crtcs, then this issue will
2556 * never arise again. But to guard code from this issue
2557 * check is left.
2558 *
2559 * Also it should be needed when used with actual
2560 * drm_atomic_commit ioctl in future
2561 */
2562 if (!dm_state)
2563 continue;
2564
2565 /*
2566 * if flip is pending (ie, still waiting for fence to return
2567 * before address is submitted) here, we cannot commit_surface
2568 * as commit_surface will pre-maturely write out the future
2569 * address. wait until flip is submitted before proceeding.
2570 */
2571 wait_while_pflip_status(adev, acrtc, pflip_pending_predicate);
2572
2573 dm_dc_surface_commit(dm->dc, crtc);
2574 }
2575 }
2576
2577 for (i = 0; i < new_crtcs_count; i++) {
2578 /*
2579 * loop to enable interrupts on newly arrived crtc
2580 */
2581 struct amdgpu_crtc *acrtc = new_crtcs[i];
2582
2583 if (adev->dm.freesync_module) {
2584 for (j = 0; j < acrtc->target->stream_count; j++)
2585 mod_freesync_notify_mode_change(
2586 adev->dm.freesync_module,
2587 acrtc->target->streams,
2588 acrtc->target->stream_count);
2589 }
2590
2591 manage_dm_interrupts(adev, acrtc, true);
2592 dm_crtc_cursor_reset(&acrtc->base);
2593
2594 }
2595
2596 /* Do actual flip */
2597 flip_crtcs_count = 0;
2598 for_each_plane_in_state(state, plane, old_plane_state, i) {
2599 struct drm_plane_state *plane_state = plane->state;
2600 struct drm_crtc *crtc = plane_state->crtc;
2601 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
2602 struct drm_framebuffer *fb = plane_state->fb;
2603
2604 if (!fb || !crtc || !crtc->state->planes_changed ||
2605 !crtc->state->active)
2606 continue;
2607
2608 if (page_flip_needed(
2609 plane_state,
2610 old_plane_state,
2611 crtc->state->event,
2612 false)) {
2613 amdgpu_crtc_submit_flip(
2614 crtc,
2615 fb,
2616 work[flip_crtcs_count],
2617 new_abo[i]);
2618 flip_crtcs_count++;
2619 /*clean up the flags for next usage*/
2620 acrtc->flip_flags = 0;
2621 }
2622 }
2623
2624 /* In this state all old framebuffers would be unpinned */
2625
2626 /* TODO: Revisit when we support true asynchronous commit.*/
2627 if (!async)
2628 drm_atomic_helper_cleanup_planes(dev, state);
2629
2630 drm_atomic_state_put(state);
2631
2632 return ret;
2633 }
2634 /*
2635 * This functions handle all cases when set mode does not come upon hotplug.
2636 * This include when the same display is unplugged then plugged back into the
2637 * same port and when we are running without usermode desktop manager supprot
2638 */
2639 void dm_restore_drm_connector_state(struct drm_device *dev, struct drm_connector *connector)
2640 {
2641 struct drm_crtc *crtc;
2642 struct amdgpu_device *adev = dev->dev_private;
2643 struct dc *dc = adev->dm.dc;
2644 struct amdgpu_connector *aconnector = to_amdgpu_connector(connector);
2645 struct amdgpu_crtc *disconnected_acrtc;
2646 const struct dc_sink *sink;
2647 struct dc_target *commit_targets[6];
2648 struct dc_target *current_target;
2649 uint32_t commit_targets_count = 0;
2650 int i;
2651
2652 if (!aconnector->dc_sink || !connector->state || !connector->encoder)
2653 return;
2654
2655 disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
2656
2657 if (!disconnected_acrtc || !disconnected_acrtc->target)
2658 return;
2659
2660 sink = disconnected_acrtc->target->streams[0]->sink;
2661
2662 /*
2663 * If the previous sink is not released and different from the current,
2664 * we deduce we are in a state where we can not rely on usermode call
2665 * to turn on the display, so we do it here
2666 */
2667 if (sink != aconnector->dc_sink) {
2668 struct dm_connector_state *dm_state =
2669 to_dm_connector_state(aconnector->base.state);
2670
2671 struct dc_target *new_target =
2672 create_target_for_sink(
2673 aconnector,
2674 &disconnected_acrtc->base.state->mode,
2675 dm_state);
2676
2677 DRM_INFO("Headless hotplug, restoring connector state\n");
2678 /*
2679 * we evade vblanks and pflips on crtc that
2680 * should be changed
2681 */
2682 manage_dm_interrupts(adev, disconnected_acrtc, false);
2683 /* this is the update mode case */
2684
2685 current_target = disconnected_acrtc->target;
2686
2687 disconnected_acrtc->target = new_target;
2688 disconnected_acrtc->enabled = true;
2689 disconnected_acrtc->hw_mode = disconnected_acrtc->base.state->mode;
2690
2691 commit_targets_count = 0;
2692
2693 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
2694 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
2695
2696 if (acrtc->target) {
2697 commit_targets[commit_targets_count] = acrtc->target;
2698 ++commit_targets_count;
2699 }
2700 }
2701
2702 /* DC is optimized not to do anything if 'targets' didn't change. */
2703 if (!dc_commit_targets(dc, commit_targets,
2704 commit_targets_count)) {
2705 DRM_INFO("Failed to restore connector state!\n");
2706 dc_target_release(disconnected_acrtc->target);
2707 disconnected_acrtc->target = current_target;
2708 manage_dm_interrupts(adev, disconnected_acrtc, true);
2709 return;
2710 }
2711
2712 if (adev->dm.freesync_module) {
2713
2714 for (i = 0; i < current_target->stream_count; i++)
2715 mod_freesync_remove_stream(
2716 adev->dm.freesync_module,
2717 current_target->streams[i]);
2718
2719 for (i = 0; i < new_target->stream_count; i++)
2720 mod_freesync_add_stream(
2721 adev->dm.freesync_module,
2722 new_target->streams[i],
2723 &aconnector->caps);
2724 }
2725 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
2726 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
2727
2728 if (acrtc->target != NULL) {
2729 acrtc->otg_inst =
2730 dc_target_get_status(acrtc->target)->primary_otg_inst;
2731 }
2732 }
2733
2734 dc_target_release(current_target);
2735
2736 dm_dc_surface_commit(dc, &disconnected_acrtc->base);
2737
2738 manage_dm_interrupts(adev, disconnected_acrtc, true);
2739 dm_crtc_cursor_reset(&disconnected_acrtc->base);
2740
2741 }
2742 }
2743
2744 static uint32_t add_val_sets_surface(
2745 struct dc_validation_set *val_sets,
2746 uint32_t set_count,
2747 const struct dc_target *target,
2748 const struct dc_surface *surface)
2749 {
2750 uint32_t i = 0;
2751
2752 while (i < set_count) {
2753 if (val_sets[i].target == target)
2754 break;
2755 ++i;
2756 }
2757
2758 val_sets[i].surfaces[val_sets[i].surface_count] = surface;
2759 val_sets[i].surface_count++;
2760
2761 return val_sets[i].surface_count;
2762 }
2763
2764 static uint32_t update_in_val_sets_target(
2765 struct dc_validation_set *val_sets,
2766 struct drm_crtc **crtcs,
2767 uint32_t set_count,
2768 const struct dc_target *old_target,
2769 const struct dc_target *new_target,
2770 struct drm_crtc *crtc)
2771 {
2772 uint32_t i = 0;
2773
2774 while (i < set_count) {
2775 if (val_sets[i].target == old_target)
2776 break;
2777 ++i;
2778 }
2779
2780 val_sets[i].target = new_target;
2781 crtcs[i] = crtc;
2782
2783 if (i == set_count) {
2784 /* nothing found. add new one to the end */
2785 return set_count + 1;
2786 }
2787
2788 return set_count;
2789 }
2790
2791 static uint32_t remove_from_val_sets(
2792 struct dc_validation_set *val_sets,
2793 uint32_t set_count,
2794 const struct dc_target *target)
2795 {
2796 int i;
2797
2798 for (i = 0; i < set_count; i++)
2799 if (val_sets[i].target == target)
2800 break;
2801
2802 if (i == set_count) {
2803 /* nothing found */
2804 return set_count;
2805 }
2806
2807 set_count--;
2808
2809 for (; i < set_count; i++) {
2810 val_sets[i] = val_sets[i + 1];
2811 }
2812
2813 return set_count;
2814 }
2815
2816 int amdgpu_dm_atomic_check(struct drm_device *dev,
2817 struct drm_atomic_state *state)
2818 {
2819 struct drm_crtc *crtc;
2820 struct drm_crtc_state *crtc_state;
2821 struct drm_plane *plane;
2822 struct drm_plane_state *plane_state;
2823 int i, j;
2824 int ret;
2825 int set_count;
2826 int new_target_count;
2827 struct dc_validation_set set[MAX_TARGETS] = {{ 0 }};
2828 struct dc_target *new_targets[MAX_TARGETS] = { 0 };
2829 struct drm_crtc *crtc_set[MAX_TARGETS] = { 0 };
2830 struct amdgpu_device *adev = dev->dev_private;
2831 struct dc *dc = adev->dm.dc;
2832 bool need_to_validate = false;
2833
2834 ret = drm_atomic_helper_check(dev, state);
2835
2836 if (ret) {
2837 DRM_ERROR("Atomic state validation failed with error :%d !\n",
2838 ret);
2839 return ret;
2840 }
2841
2842 ret = -EINVAL;
2843
2844 /* copy existing configuration */
2845 new_target_count = 0;
2846 set_count = 0;
2847 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
2848
2849 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
2850
2851 if (acrtc->target) {
2852 set[set_count].target = acrtc->target;
2853 crtc_set[set_count] = crtc;
2854 ++set_count;
2855 }
2856 }
2857
2858 /* update changed items */
2859 for_each_crtc_in_state(state, crtc, crtc_state, i) {
2860 struct amdgpu_crtc *acrtc = NULL;
2861 struct amdgpu_connector *aconnector = NULL;
2862 enum dm_commit_action action;
2863
2864 acrtc = to_amdgpu_crtc(crtc);
2865
2866 aconnector = amdgpu_dm_find_first_crct_matching_connector(state, crtc, true);
2867
2868 action = get_dm_commit_action(crtc_state);
2869
2870 switch (action) {
2871 case DM_COMMIT_ACTION_DPMS_ON:
2872 case DM_COMMIT_ACTION_SET: {
2873 struct dc_target *new_target = NULL;
2874 struct drm_connector_state *conn_state = NULL;
2875 struct dm_connector_state *dm_state = NULL;
2876
2877 if (aconnector) {
2878 conn_state = drm_atomic_get_connector_state(state, &aconnector->base);
2879 if (IS_ERR(conn_state))
2880 return ret;
2881 dm_state = to_dm_connector_state(conn_state);
2882 }
2883
2884 new_target = create_target_for_sink(aconnector, &crtc_state->mode, dm_state);
2885
2886 /*
2887 * we can have no target on ACTION_SET if a display
2888 * was disconnected during S3, in this case it not and
2889 * error, the OS will be updated after detection, and
2890 * do the right thing on next atomic commit
2891 */
2892 if (!new_target) {
2893 DRM_DEBUG_KMS("%s: Failed to create new target for crtc %d\n",
2894 __func__, acrtc->base.base.id);
2895 break;
2896 }
2897
2898 new_targets[new_target_count] = new_target;
2899 set_count = update_in_val_sets_target(
2900 set,
2901 crtc_set,
2902 set_count,
2903 acrtc->target,
2904 new_target,
2905 crtc);
2906
2907 new_target_count++;
2908 need_to_validate = true;
2909 break;
2910 }
2911
2912 case DM_COMMIT_ACTION_NOTHING: {
2913 const struct drm_connector *drm_connector = NULL;
2914 struct drm_connector_state *conn_state = NULL;
2915 struct dm_connector_state *dm_state = NULL;
2916 struct dm_connector_state *old_dm_state = NULL;
2917 struct dc_target *new_target;
2918
2919 if (!aconnector)
2920 break;
2921
2922 for_each_connector_in_state(
2923 state, drm_connector, conn_state, j) {
2924 if (&aconnector->base == drm_connector)
2925 break;
2926 }
2927
2928 old_dm_state = to_dm_connector_state(drm_connector->state);
2929 dm_state = to_dm_connector_state(conn_state);
2930
2931 /* Support underscan adjustment*/
2932 if (!is_scaling_state_different(dm_state, old_dm_state))
2933 break;
2934
2935 new_target = create_target_for_sink(aconnector, &crtc_state->mode, dm_state);
2936
2937 if (!new_target) {
2938 DRM_ERROR("%s: Failed to create new target for crtc %d\n",
2939 __func__, acrtc->base.base.id);
2940 break;
2941 }
2942
2943 new_targets[new_target_count] = new_target;
2944 set_count = update_in_val_sets_target(
2945 set,
2946 crtc_set,
2947 set_count,
2948 acrtc->target,
2949 new_target,
2950 crtc);
2951
2952 new_target_count++;
2953 need_to_validate = true;
2954
2955 break;
2956 }
2957 case DM_COMMIT_ACTION_DPMS_OFF:
2958 case DM_COMMIT_ACTION_RESET:
2959 /* i.e. reset mode */
2960 if (acrtc->target) {
2961 set_count = remove_from_val_sets(
2962 set,
2963 set_count,
2964 acrtc->target);
2965 }
2966 break;
2967 }
2968
2969 /*
2970 * TODO revisit when removing commit action
2971 * and looking at atomic flags directly
2972 */
2973
2974 /* commit needs planes right now (for gamma, eg.) */
2975 /* TODO rework commit to chack crtc for gamma change */
2976 ret = drm_atomic_add_affected_planes(state, crtc);
2977 if (ret)
2978 return ret;
2979 }
2980
2981 for (i = 0; i < set_count; i++) {
2982 for_each_plane_in_state(state, plane, plane_state, j) {
2983 struct drm_plane_state *old_plane_state = plane->state;
2984 struct drm_crtc *crtc = plane_state->crtc;
2985 struct drm_framebuffer *fb = plane_state->fb;
2986 struct drm_connector *connector;
2987 struct dm_connector_state *dm_state = NULL;
2988 enum dm_commit_action action;
2989 struct drm_crtc_state *crtc_state;
2990
2991
2992 if (!fb || !crtc || crtc_set[i] != crtc ||
2993 !crtc->state->planes_changed || !crtc->state->active)
2994 continue;
2995
2996 action = get_dm_commit_action(crtc->state);
2997
2998 /* Surfaces are created under two scenarios:
2999 * 1. This commit is not a page flip.
3000 * 2. This commit is a page flip, and targets are created.
3001 */
3002 crtc_state = drm_atomic_get_crtc_state(state, crtc);
3003 if (!page_flip_needed(plane_state, old_plane_state,
3004 crtc_state->event, true) ||
3005 action == DM_COMMIT_ACTION_DPMS_ON ||
3006 action == DM_COMMIT_ACTION_SET) {
3007 struct dc_surface *surface;
3008
3009 list_for_each_entry(connector,
3010 &dev->mode_config.connector_list, head) {
3011 if (connector->state->crtc == crtc) {
3012 dm_state = to_dm_connector_state(
3013 connector->state);
3014 break;
3015 }
3016 }
3017
3018 /*
3019 * This situation happens in the following case:
3020 * we are about to get set mode for connector who's only
3021 * possible crtc (in encoder crtc mask) is used by
3022 * another connector, that is why it will try to
3023 * re-assing crtcs in order to make configuration
3024 * supported. For our implementation we need to make all
3025 * encoders support all crtcs, then this issue will
3026 * never arise again. But to guard code from this issue
3027 * check is left.
3028 *
3029 * Also it should be needed when used with actual
3030 * drm_atomic_commit ioctl in future
3031 */
3032 if (!dm_state)
3033 continue;
3034
3035 surface = dc_create_surface(dc);
3036 fill_plane_attributes(
3037 crtc->dev->dev_private,
3038 surface,
3039 plane_state,
3040 false);
3041
3042 add_val_sets_surface(
3043 set,
3044 set_count,
3045 set[i].target,
3046 surface);
3047
3048 need_to_validate = true;
3049 }
3050 }
3051 }
3052
3053 if (need_to_validate == false || set_count == 0 ||
3054 dc_validate_resources(dc, set, set_count))
3055 ret = 0;
3056
3057 for (i = 0; i < set_count; i++) {
3058 for (j = 0; j < set[i].surface_count; j++) {
3059 dc_surface_release(set[i].surfaces[j]);
3060 }
3061 }
3062 for (i = 0; i < new_target_count; i++)
3063 dc_target_release(new_targets[i]);
3064
3065 if (ret != 0)
3066 DRM_ERROR("Atomic check failed.\n");
3067
3068 return ret;
3069 }
3070
3071 static bool is_dp_capable_without_timing_msa(
3072 struct dc *dc,
3073 struct amdgpu_connector *amdgpu_connector)
3074 {
3075 uint8_t dpcd_data;
3076 bool capable = false;
3077 if (amdgpu_connector->dc_link &&
3078 dc_read_dpcd(dc, amdgpu_connector->dc_link->link_index,
3079 DP_DOWN_STREAM_PORT_COUNT,
3080 &dpcd_data, sizeof(dpcd_data)) )
3081 capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
3082
3083 return capable;
3084 }
3085 void amdgpu_dm_add_sink_to_freesync_module(
3086 struct drm_connector *connector,
3087 struct edid *edid)
3088 {
3089 int i;
3090 uint64_t val_capable;
3091 bool edid_check_required;
3092 struct detailed_timing *timing;
3093 struct detailed_non_pixel *data;
3094 struct detailed_data_monitor_range *range;
3095 struct amdgpu_connector *amdgpu_connector =
3096 to_amdgpu_connector(connector);
3097
3098 struct drm_device *dev = connector->dev;
3099 struct amdgpu_device *adev = dev->dev_private;
3100 edid_check_required = false;
3101 if (!amdgpu_connector->dc_sink) {
3102 DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
3103 return;
3104 }
3105 if (!adev->dm.freesync_module)
3106 return;
3107 /*
3108 * if edid non zero restrict freesync only for dp and edp
3109 */
3110 if (edid) {
3111 if (amdgpu_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
3112 || amdgpu_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
3113 edid_check_required = is_dp_capable_without_timing_msa(
3114 adev->dm.dc,
3115 amdgpu_connector);
3116 }
3117 }
3118 val_capable = 0;
3119 if (edid_check_required == true && (edid->version > 1 ||
3120 (edid->version == 1 && edid->revision > 1))) {
3121 for (i = 0; i < 4; i++) {
3122
3123 timing = &edid->detailed_timings[i];
3124 data = &timing->data.other_data;
3125 range = &data->data.range;
3126 /*
3127 * Check if monitor has continuous frequency mode
3128 */
3129 if (data->type != EDID_DETAIL_MONITOR_RANGE)
3130 continue;
3131 /*
3132 * Check for flag range limits only. If flag == 1 then
3133 * no additional timing information provided.
3134 * Default GTF, GTF Secondary curve and CVT are not
3135 * supported
3136 */
3137 if (range->flags != 1)
3138 continue;
3139
3140 amdgpu_connector->min_vfreq = range->min_vfreq;
3141 amdgpu_connector->max_vfreq = range->max_vfreq;
3142 amdgpu_connector->pixel_clock_mhz =
3143 range->pixel_clock_mhz * 10;
3144 break;
3145 }
3146
3147 if (amdgpu_connector->max_vfreq -
3148 amdgpu_connector->min_vfreq > 10) {
3149 amdgpu_connector->caps.supported = true;
3150 amdgpu_connector->caps.min_refresh_in_micro_hz =
3151 amdgpu_connector->min_vfreq * 1000000;
3152 amdgpu_connector->caps.max_refresh_in_micro_hz =
3153 amdgpu_connector->max_vfreq * 1000000;
3154 val_capable = 1;
3155 }
3156 }
3157
3158 /*
3159 * TODO figure out how to notify user-mode or DRM of freesync caps
3160 * once we figure out how to deal with freesync in an upstreamable
3161 * fashion
3162 */
3163
3164 }
3165
3166 void amdgpu_dm_remove_sink_from_freesync_module(
3167 struct drm_connector *connector)
3168 {
3169 /*
3170 * TODO fill in once we figure out how to deal with freesync in
3171 * an upstreamable fashion
3172 */
3173 }