]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_types.c
drm/amd/display: We don't support interlace and doublescan
[mirror_ubuntu-bionic-kernel.git] / drivers / gpu / drm / amd / display / amdgpu_dm / amdgpu_dm_types.c
1 /*
2 * Copyright 2012-13 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26 #include <linux/types.h>
27 #include <linux/version.h>
28
29 #include <drm/drmP.h>
30 #include <drm/drm_atomic_helper.h>
31 #include <drm/drm_fb_helper.h>
32 #include <drm/drm_atomic.h>
33 #include <drm/drm_edid.h>
34
35 #include "amdgpu.h"
36 #include "amdgpu_pm.h"
37 #include "dm_services_types.h"
38
39 // We need to #undef FRAME_SIZE and DEPRECATED because they conflict
40 // with ptrace-abi.h's #define's of them.
41 #undef FRAME_SIZE
42 #undef DEPRECATED
43
44 #include "dc.h"
45
46 #include "amdgpu_dm_types.h"
47 #include "amdgpu_dm_mst_types.h"
48
49 #include "modules/inc/mod_freesync.h"
50
51 struct dm_connector_state {
52 struct drm_connector_state base;
53
54 enum amdgpu_rmx_type scaling;
55 uint8_t underscan_vborder;
56 uint8_t underscan_hborder;
57 bool underscan_enable;
58 };
59
60 #define to_dm_connector_state(x)\
61 container_of((x), struct dm_connector_state, base)
62
63
64 void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
65 {
66 drm_encoder_cleanup(encoder);
67 kfree(encoder);
68 }
69
70 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
71 .destroy = amdgpu_dm_encoder_destroy,
72 };
73
74 static void dm_set_cursor(
75 struct amdgpu_crtc *amdgpu_crtc,
76 uint64_t gpu_addr,
77 uint32_t width,
78 uint32_t height)
79 {
80 struct dc_cursor_attributes attributes;
81 struct dc_cursor_position position;
82 struct drm_crtc *crtc = &amdgpu_crtc->base;
83 int x, y;
84 int xorigin = 0, yorigin = 0;
85
86 amdgpu_crtc->cursor_width = width;
87 amdgpu_crtc->cursor_height = height;
88
89 attributes.address.high_part = upper_32_bits(gpu_addr);
90 attributes.address.low_part = lower_32_bits(gpu_addr);
91 attributes.width = width;
92 attributes.height = height;
93 attributes.x_hot = 0;
94 attributes.y_hot = 0;
95 attributes.color_format = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
96 attributes.rotation_angle = 0;
97 attributes.attribute_flags.value = 0;
98
99 x = amdgpu_crtc->cursor_x;
100 y = amdgpu_crtc->cursor_y;
101
102 /* avivo cursor are offset into the total surface */
103 x += crtc->primary->state->src_x >> 16;
104 y += crtc->primary->state->src_y >> 16;
105
106 if (x < 0) {
107 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
108 x = 0;
109 }
110 if (y < 0) {
111 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
112 y = 0;
113 }
114
115 position.enable = true;
116 position.x = x;
117 position.y = y;
118
119 position.hot_spot_enable = true;
120 position.x_hotspot = xorigin;
121 position.y_hotspot = yorigin;
122
123 if (!dc_stream_set_cursor_attributes(
124 amdgpu_crtc->stream,
125 &attributes)) {
126 DRM_ERROR("DC failed to set cursor attributes\n");
127 }
128
129 if (!dc_stream_set_cursor_position(
130 amdgpu_crtc->stream,
131 &position)) {
132 DRM_ERROR("DC failed to set cursor position\n");
133 }
134 }
135
136 static int dm_crtc_unpin_cursor_bo_old(
137 struct amdgpu_crtc *amdgpu_crtc)
138 {
139 struct amdgpu_bo *robj;
140 int ret = 0;
141
142 if (NULL != amdgpu_crtc && NULL != amdgpu_crtc->cursor_bo) {
143 robj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
144
145 ret = amdgpu_bo_reserve(robj, false);
146
147 if (likely(ret == 0)) {
148 ret = amdgpu_bo_unpin(robj);
149
150 if (unlikely(ret != 0)) {
151 DRM_ERROR(
152 "%s: unpin failed (ret=%d), bo %p\n",
153 __func__,
154 ret,
155 amdgpu_crtc->cursor_bo);
156 }
157
158 amdgpu_bo_unreserve(robj);
159 } else {
160 DRM_ERROR(
161 "%s: reserve failed (ret=%d), bo %p\n",
162 __func__,
163 ret,
164 amdgpu_crtc->cursor_bo);
165 }
166
167 drm_gem_object_unreference_unlocked(amdgpu_crtc->cursor_bo);
168 amdgpu_crtc->cursor_bo = NULL;
169 }
170
171 return ret;
172 }
173
174 static int dm_crtc_pin_cursor_bo_new(
175 struct drm_crtc *crtc,
176 struct drm_file *file_priv,
177 uint32_t handle,
178 struct amdgpu_bo **ret_obj)
179 {
180 struct amdgpu_crtc *amdgpu_crtc;
181 struct amdgpu_bo *robj;
182 struct drm_gem_object *obj;
183 int ret = -EINVAL;
184
185 if (NULL != crtc) {
186 struct drm_device *dev = crtc->dev;
187 struct amdgpu_device *adev = dev->dev_private;
188 uint64_t gpu_addr;
189
190 amdgpu_crtc = to_amdgpu_crtc(crtc);
191
192 obj = drm_gem_object_lookup(file_priv, handle);
193
194 if (!obj) {
195 DRM_ERROR(
196 "Cannot find cursor object %x for crtc %d\n",
197 handle,
198 amdgpu_crtc->crtc_id);
199 goto release;
200 }
201 robj = gem_to_amdgpu_bo(obj);
202
203 ret = amdgpu_bo_reserve(robj, false);
204
205 if (unlikely(ret != 0)) {
206 drm_gem_object_unreference_unlocked(obj);
207 DRM_ERROR("dm_crtc_pin_cursor_bo_new ret %x, handle %x\n",
208 ret, handle);
209 goto release;
210 }
211
212 ret = amdgpu_bo_pin_restricted(robj, AMDGPU_GEM_DOMAIN_VRAM, 0,
213 adev->mc.visible_vram_size,
214 &gpu_addr);
215
216 if (ret == 0) {
217 amdgpu_crtc->cursor_addr = gpu_addr;
218 *ret_obj = robj;
219 }
220 amdgpu_bo_unreserve(robj);
221 if (ret)
222 drm_gem_object_unreference_unlocked(obj);
223
224 }
225 release:
226
227 return ret;
228 }
229
230 static int dm_crtc_cursor_set(
231 struct drm_crtc *crtc,
232 struct drm_file *file_priv,
233 uint32_t handle,
234 uint32_t width,
235 uint32_t height)
236 {
237 struct amdgpu_bo *new_cursor_bo;
238 struct dc_cursor_position position;
239
240 int ret;
241
242 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
243
244 ret = EINVAL;
245 new_cursor_bo = NULL;
246
247 DRM_DEBUG_KMS(
248 "%s: crtc_id=%d with handle %d and size %d to %d, bo_object %p\n",
249 __func__,
250 amdgpu_crtc->crtc_id,
251 handle,
252 width,
253 height,
254 amdgpu_crtc->cursor_bo);
255
256 if (!handle) {
257 /* turn off cursor */
258 position.enable = false;
259 position.x = 0;
260 position.y = 0;
261 position.hot_spot_enable = false;
262
263 if (amdgpu_crtc->stream) {
264 /*set cursor visible false*/
265 dc_stream_set_cursor_position(
266 amdgpu_crtc->stream,
267 &position);
268 }
269 /*unpin old cursor buffer and update cache*/
270 ret = dm_crtc_unpin_cursor_bo_old(amdgpu_crtc);
271 goto release;
272
273 }
274
275 if ((width > amdgpu_crtc->max_cursor_width) ||
276 (height > amdgpu_crtc->max_cursor_height)) {
277 DRM_ERROR(
278 "%s: bad cursor width or height %d x %d\n",
279 __func__,
280 width,
281 height);
282 goto release;
283 }
284 /*try to pin new cursor bo*/
285 ret = dm_crtc_pin_cursor_bo_new(crtc, file_priv, handle, &new_cursor_bo);
286 /*if map not successful then return an error*/
287 if (ret)
288 goto release;
289
290 /*program new cursor bo to hardware*/
291 dm_set_cursor(amdgpu_crtc, amdgpu_crtc->cursor_addr, width, height);
292
293 /*un map old, not used anymore cursor bo ,
294 * return memory and mapping back */
295 dm_crtc_unpin_cursor_bo_old(amdgpu_crtc);
296
297 /*assign new cursor bo to our internal cache*/
298 amdgpu_crtc->cursor_bo = &new_cursor_bo->gem_base;
299
300 release:
301 return ret;
302
303 }
304
305 static int dm_crtc_cursor_move(struct drm_crtc *crtc,
306 int x, int y)
307 {
308 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
309 int xorigin = 0, yorigin = 0;
310 struct dc_cursor_position position;
311
312 amdgpu_crtc->cursor_x = x;
313 amdgpu_crtc->cursor_y = y;
314
315 /* avivo cursor are offset into the total surface */
316 x += crtc->primary->state->src_x >> 16;
317 y += crtc->primary->state->src_y >> 16;
318
319 /*
320 * TODO: for cursor debugging unguard the following
321 */
322 #if 0
323 DRM_DEBUG_KMS(
324 "%s: x %d y %d c->x %d c->y %d\n",
325 __func__,
326 x,
327 y,
328 crtc->x,
329 crtc->y);
330 #endif
331
332 if (x < 0) {
333 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
334 x = 0;
335 }
336 if (y < 0) {
337 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
338 y = 0;
339 }
340
341 position.enable = true;
342 position.x = x;
343 position.y = y;
344
345 position.hot_spot_enable = true;
346 position.x_hotspot = xorigin;
347 position.y_hotspot = yorigin;
348
349 if (amdgpu_crtc->stream) {
350 if (!dc_stream_set_cursor_position(
351 amdgpu_crtc->stream,
352 &position)) {
353 DRM_ERROR("DC failed to set cursor position\n");
354 return -EINVAL;
355 }
356 }
357
358 return 0;
359 }
360
361 static void dm_crtc_cursor_reset(struct drm_crtc *crtc)
362 {
363 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
364
365 DRM_DEBUG_KMS(
366 "%s: with cursor_bo %p\n",
367 __func__,
368 amdgpu_crtc->cursor_bo);
369
370 if (amdgpu_crtc->cursor_bo && amdgpu_crtc->stream) {
371 dm_set_cursor(
372 amdgpu_crtc,
373 amdgpu_crtc->cursor_addr,
374 amdgpu_crtc->cursor_width,
375 amdgpu_crtc->cursor_height);
376 }
377 }
378 static bool fill_rects_from_plane_state(
379 const struct drm_plane_state *state,
380 struct dc_surface *surface)
381 {
382 surface->src_rect.x = state->src_x >> 16;
383 surface->src_rect.y = state->src_y >> 16;
384 /*we ignore for now mantissa and do not to deal with floating pixels :(*/
385 surface->src_rect.width = state->src_w >> 16;
386
387 if (surface->src_rect.width == 0)
388 return false;
389
390 surface->src_rect.height = state->src_h >> 16;
391 if (surface->src_rect.height == 0)
392 return false;
393
394 surface->dst_rect.x = state->crtc_x;
395 surface->dst_rect.y = state->crtc_y;
396
397 if (state->crtc_w == 0)
398 return false;
399
400 surface->dst_rect.width = state->crtc_w;
401
402 if (state->crtc_h == 0)
403 return false;
404
405 surface->dst_rect.height = state->crtc_h;
406
407 surface->clip_rect = surface->dst_rect;
408
409 switch (state->rotation & DRM_MODE_ROTATE_MASK) {
410 case DRM_MODE_ROTATE_0:
411 surface->rotation = ROTATION_ANGLE_0;
412 break;
413 case DRM_MODE_ROTATE_90:
414 surface->rotation = ROTATION_ANGLE_90;
415 break;
416 case DRM_MODE_ROTATE_180:
417 surface->rotation = ROTATION_ANGLE_180;
418 break;
419 case DRM_MODE_ROTATE_270:
420 surface->rotation = ROTATION_ANGLE_270;
421 break;
422 default:
423 surface->rotation = ROTATION_ANGLE_0;
424 break;
425 }
426
427 return true;
428 }
429 static bool get_fb_info(
430 const struct amdgpu_framebuffer *amdgpu_fb,
431 uint64_t *tiling_flags,
432 uint64_t *fb_location)
433 {
434 struct amdgpu_bo *rbo = gem_to_amdgpu_bo(amdgpu_fb->obj);
435 int r = amdgpu_bo_reserve(rbo, false);
436 if (unlikely(r != 0)){
437 DRM_ERROR("Unable to reserve buffer\n");
438 return false;
439 }
440
441 if (fb_location)
442 *fb_location = amdgpu_bo_gpu_offset(rbo);
443
444 if (tiling_flags)
445 amdgpu_bo_get_tiling_flags(rbo, tiling_flags);
446
447 amdgpu_bo_unreserve(rbo);
448
449 return true;
450 }
451 static void fill_plane_attributes_from_fb(
452 struct amdgpu_device *adev,
453 struct dc_surface *surface,
454 const struct amdgpu_framebuffer *amdgpu_fb, bool addReq)
455 {
456 uint64_t tiling_flags;
457 uint64_t fb_location = 0;
458 const struct drm_framebuffer *fb = &amdgpu_fb->base;
459 struct drm_format_name_buf format_name;
460
461 get_fb_info(
462 amdgpu_fb,
463 &tiling_flags,
464 addReq == true ? &fb_location:NULL);
465
466 surface->address.type = PLN_ADDR_TYPE_GRAPHICS;
467 surface->address.grph.addr.low_part = lower_32_bits(fb_location);
468 surface->address.grph.addr.high_part = upper_32_bits(fb_location);
469
470 switch (fb->format->format) {
471 case DRM_FORMAT_C8:
472 surface->format = SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
473 break;
474 case DRM_FORMAT_RGB565:
475 surface->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
476 break;
477 case DRM_FORMAT_XRGB8888:
478 case DRM_FORMAT_ARGB8888:
479 surface->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
480 break;
481 case DRM_FORMAT_XRGB2101010:
482 case DRM_FORMAT_ARGB2101010:
483 surface->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
484 break;
485 case DRM_FORMAT_XBGR2101010:
486 case DRM_FORMAT_ABGR2101010:
487 surface->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
488 break;
489 default:
490 DRM_ERROR("Unsupported screen format %s\n",
491 drm_get_format_name(fb->format->format, &format_name));
492 return;
493 }
494
495 memset(&surface->tiling_info, 0, sizeof(surface->tiling_info));
496
497 /* Fill GFX8 params */
498 if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1)
499 {
500 unsigned bankw, bankh, mtaspect, tile_split, num_banks;
501
502 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
503 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
504 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
505 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
506 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
507
508 /* XXX fix me for VI */
509 surface->tiling_info.gfx8.num_banks = num_banks;
510 surface->tiling_info.gfx8.array_mode =
511 DC_ARRAY_2D_TILED_THIN1;
512 surface->tiling_info.gfx8.tile_split = tile_split;
513 surface->tiling_info.gfx8.bank_width = bankw;
514 surface->tiling_info.gfx8.bank_height = bankh;
515 surface->tiling_info.gfx8.tile_aspect = mtaspect;
516 surface->tiling_info.gfx8.tile_mode =
517 DC_ADDR_SURF_MICRO_TILING_DISPLAY;
518 } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
519 == DC_ARRAY_1D_TILED_THIN1) {
520 surface->tiling_info.gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
521 }
522
523 surface->tiling_info.gfx8.pipe_config =
524 AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
525
526 surface->plane_size.grph.surface_size.x = 0;
527 surface->plane_size.grph.surface_size.y = 0;
528 surface->plane_size.grph.surface_size.width = fb->width;
529 surface->plane_size.grph.surface_size.height = fb->height;
530 surface->plane_size.grph.surface_pitch =
531 fb->pitches[0] / fb->format->cpp[0];
532
533 surface->visible = true;
534 surface->scaling_quality.h_taps_c = 0;
535 surface->scaling_quality.v_taps_c = 0;
536
537 /* TODO: unhardcode */
538 surface->color_space = COLOR_SPACE_SRGB;
539 /* is this needed? is surface zeroed at allocation? */
540 surface->scaling_quality.h_taps = 0;
541 surface->scaling_quality.v_taps = 0;
542 surface->stereo_format = PLANE_STEREO_FORMAT_NONE;
543
544 }
545
546 #define NUM_OF_RAW_GAMMA_RAMP_RGB_256 256
547
548 static void fill_gamma_from_crtc(
549 const struct drm_crtc *crtc,
550 struct dc_surface *dc_surface)
551 {
552 int i;
553 struct dc_gamma *gamma;
554 struct drm_crtc_state *state = crtc->state;
555 struct drm_color_lut *lut = (struct drm_color_lut *) state->gamma_lut->data;
556
557 gamma = dc_create_gamma();
558
559 if (gamma == NULL)
560 return;
561
562 for (i = 0; i < NUM_OF_RAW_GAMMA_RAMP_RGB_256; i++) {
563 gamma->red[i] = lut[i].red;
564 gamma->green[i] = lut[i].green;
565 gamma->blue[i] = lut[i].blue;
566 }
567
568 dc_surface->gamma_correction = gamma;
569 }
570
571 static void fill_plane_attributes(
572 struct amdgpu_device *adev,
573 struct dc_surface *surface,
574 struct drm_plane_state *state, bool addrReq)
575 {
576 const struct amdgpu_framebuffer *amdgpu_fb =
577 to_amdgpu_framebuffer(state->fb);
578 const struct drm_crtc *crtc = state->crtc;
579 struct dc_transfer_func *input_tf;
580
581 fill_rects_from_plane_state(state, surface);
582 fill_plane_attributes_from_fb(
583 crtc->dev->dev_private,
584 surface,
585 amdgpu_fb,
586 addrReq);
587
588 input_tf = dc_create_transfer_func();
589
590 if (input_tf == NULL)
591 return;
592
593 input_tf->type = TF_TYPE_PREDEFINED;
594 input_tf->tf = TRANSFER_FUNCTION_SRGB;
595
596 surface->in_transfer_func = input_tf;
597
598 /* In case of gamma set, update gamma value */
599 if (state->crtc->state->gamma_lut) {
600 fill_gamma_from_crtc(crtc, surface);
601 }
602 }
603
604 /*****************************************************************************/
605
606 struct amdgpu_connector *aconnector_from_drm_crtc_id(
607 const struct drm_crtc *crtc)
608 {
609 struct drm_device *dev = crtc->dev;
610 struct drm_connector *connector;
611 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
612 struct amdgpu_connector *aconnector;
613
614 list_for_each_entry(connector,
615 &dev->mode_config.connector_list, head) {
616
617 aconnector = to_amdgpu_connector(connector);
618
619 if (aconnector->base.state->crtc != &acrtc->base)
620 continue;
621
622 /* Found the connector */
623 return aconnector;
624 }
625
626 /* If we get here, not found. */
627 return NULL;
628 }
629
630 static void update_stream_scaling_settings(
631 const struct drm_display_mode *mode,
632 const struct dm_connector_state *dm_state,
633 const struct dc_stream *stream)
634 {
635 struct amdgpu_device *adev = dm_state->base.crtc->dev->dev_private;
636 enum amdgpu_rmx_type rmx_type;
637
638 struct rect src = { 0 }; /* viewport in composition space*/
639 struct rect dst = { 0 }; /* stream addressable area */
640
641 /* no mode. nothing to be done */
642 if (!mode)
643 return;
644
645 /* Full screen scaling by default */
646 src.width = mode->hdisplay;
647 src.height = mode->vdisplay;
648 dst.width = stream->timing.h_addressable;
649 dst.height = stream->timing.v_addressable;
650
651 rmx_type = dm_state->scaling;
652 if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
653 if (src.width * dst.height <
654 src.height * dst.width) {
655 /* height needs less upscaling/more downscaling */
656 dst.width = src.width *
657 dst.height / src.height;
658 } else {
659 /* width needs less upscaling/more downscaling */
660 dst.height = src.height *
661 dst.width / src.width;
662 }
663 } else if (rmx_type == RMX_CENTER) {
664 dst = src;
665 }
666
667 dst.x = (stream->timing.h_addressable - dst.width) / 2;
668 dst.y = (stream->timing.v_addressable - dst.height) / 2;
669
670 if (dm_state->underscan_enable) {
671 dst.x += dm_state->underscan_hborder / 2;
672 dst.y += dm_state->underscan_vborder / 2;
673 dst.width -= dm_state->underscan_hborder;
674 dst.height -= dm_state->underscan_vborder;
675 }
676
677 adev->dm.dc->stream_funcs.stream_update_scaling(adev->dm.dc, stream, &src, &dst);
678
679 DRM_DEBUG_KMS("Destination Rectangle x:%d y:%d width:%d height:%d\n",
680 dst.x, dst.y, dst.width, dst.height);
681
682 }
683
684 static void dm_dc_surface_commit(
685 struct dc *dc,
686 struct drm_crtc *crtc)
687 {
688 struct dc_surface *dc_surface;
689 const struct dc_surface *dc_surfaces[1];
690 const struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
691 const struct dc_stream *dc_stream = acrtc->stream;
692
693 if (!dc_stream) {
694 dm_error(
695 "%s: Failed to obtain stream on crtc (%d)!\n",
696 __func__,
697 acrtc->crtc_id);
698 goto fail;
699 }
700
701 dc_surface = dc_create_surface(dc);
702
703 if (!dc_surface) {
704 dm_error(
705 "%s: Failed to create a surface!\n",
706 __func__);
707 goto fail;
708 }
709
710 /* Surface programming */
711 fill_plane_attributes(
712 crtc->dev->dev_private,
713 dc_surface,
714 crtc->primary->state,
715 true);
716
717 dc_surfaces[0] = dc_surface;
718
719 if (false == dc_commit_surfaces_to_stream(
720 dc,
721 dc_surfaces,
722 1,
723 dc_stream)) {
724 dm_error(
725 "%s: Failed to attach surface!\n",
726 __func__);
727 }
728
729 dc_surface_release(dc_surface);
730 fail:
731 return;
732 }
733
734 static enum dc_color_depth convert_color_depth_from_display_info(
735 const struct drm_connector *connector)
736 {
737 uint32_t bpc = connector->display_info.bpc;
738
739 /* Limited color depth to 8bit
740 * TODO: Still need to handle deep color*/
741 if (bpc > 8)
742 bpc = 8;
743
744 switch (bpc) {
745 case 0:
746 /* Temporary Work around, DRM don't parse color depth for
747 * EDID revision before 1.4
748 * TODO: Fix edid parsing
749 */
750 return COLOR_DEPTH_888;
751 case 6:
752 return COLOR_DEPTH_666;
753 case 8:
754 return COLOR_DEPTH_888;
755 case 10:
756 return COLOR_DEPTH_101010;
757 case 12:
758 return COLOR_DEPTH_121212;
759 case 14:
760 return COLOR_DEPTH_141414;
761 case 16:
762 return COLOR_DEPTH_161616;
763 default:
764 return COLOR_DEPTH_UNDEFINED;
765 }
766 }
767
768 static enum dc_aspect_ratio get_aspect_ratio(
769 const struct drm_display_mode *mode_in)
770 {
771 int32_t width = mode_in->crtc_hdisplay * 9;
772 int32_t height = mode_in->crtc_vdisplay * 16;
773 if ((width - height) < 10 && (width - height) > -10)
774 return ASPECT_RATIO_16_9;
775 else
776 return ASPECT_RATIO_4_3;
777 }
778
779 static enum dc_color_space get_output_color_space(
780 const struct dc_crtc_timing *dc_crtc_timing)
781 {
782 enum dc_color_space color_space = COLOR_SPACE_SRGB;
783
784 switch (dc_crtc_timing->pixel_encoding) {
785 case PIXEL_ENCODING_YCBCR422:
786 case PIXEL_ENCODING_YCBCR444:
787 case PIXEL_ENCODING_YCBCR420:
788 {
789 /*
790 * 27030khz is the separation point between HDTV and SDTV
791 * according to HDMI spec, we use YCbCr709 and YCbCr601
792 * respectively
793 */
794 if (dc_crtc_timing->pix_clk_khz > 27030) {
795 if (dc_crtc_timing->flags.Y_ONLY)
796 color_space =
797 COLOR_SPACE_YCBCR709_LIMITED;
798 else
799 color_space = COLOR_SPACE_YCBCR709;
800 } else {
801 if (dc_crtc_timing->flags.Y_ONLY)
802 color_space =
803 COLOR_SPACE_YCBCR601_LIMITED;
804 else
805 color_space = COLOR_SPACE_YCBCR601;
806 }
807
808 }
809 break;
810 case PIXEL_ENCODING_RGB:
811 color_space = COLOR_SPACE_SRGB;
812 break;
813
814 default:
815 WARN_ON(1);
816 break;
817 }
818
819 return color_space;
820 }
821
822 /*****************************************************************************/
823
824 static void fill_stream_properties_from_drm_display_mode(
825 struct dc_stream *stream,
826 const struct drm_display_mode *mode_in,
827 const struct drm_connector *connector)
828 {
829 struct dc_crtc_timing *timing_out = &stream->timing;
830 memset(timing_out, 0, sizeof(struct dc_crtc_timing));
831
832 timing_out->h_border_left = 0;
833 timing_out->h_border_right = 0;
834 timing_out->v_border_top = 0;
835 timing_out->v_border_bottom = 0;
836 /* TODO: un-hardcode */
837
838 if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
839 && stream->sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A)
840 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
841 else
842 timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
843
844 timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
845 timing_out->display_color_depth = convert_color_depth_from_display_info(
846 connector);
847 timing_out->scan_type = SCANNING_TYPE_NODATA;
848 timing_out->hdmi_vic = 0;
849 timing_out->vic = drm_match_cea_mode(mode_in);
850
851 timing_out->h_addressable = mode_in->crtc_hdisplay;
852 timing_out->h_total = mode_in->crtc_htotal;
853 timing_out->h_sync_width =
854 mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
855 timing_out->h_front_porch =
856 mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
857 timing_out->v_total = mode_in->crtc_vtotal;
858 timing_out->v_addressable = mode_in->crtc_vdisplay;
859 timing_out->v_front_porch =
860 mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
861 timing_out->v_sync_width =
862 mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
863 timing_out->pix_clk_khz = mode_in->crtc_clock;
864 timing_out->aspect_ratio = get_aspect_ratio(mode_in);
865 if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
866 timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
867 if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
868 timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
869
870 stream->output_color_space = get_output_color_space(timing_out);
871
872 {
873 struct dc_transfer_func *tf = dc_create_transfer_func();
874 tf->type = TF_TYPE_PREDEFINED;
875 tf->tf = TRANSFER_FUNCTION_SRGB;
876 stream->out_transfer_func = tf;
877 }
878 }
879
880 static void fill_audio_info(
881 struct audio_info *audio_info,
882 const struct drm_connector *drm_connector,
883 const struct dc_sink *dc_sink)
884 {
885 int i = 0;
886 int cea_revision = 0;
887 const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
888
889 audio_info->manufacture_id = edid_caps->manufacturer_id;
890 audio_info->product_id = edid_caps->product_id;
891
892 cea_revision = drm_connector->display_info.cea_rev;
893
894 while (i < AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS &&
895 edid_caps->display_name[i]) {
896 audio_info->display_name[i] = edid_caps->display_name[i];
897 i++;
898 }
899
900 if(cea_revision >= 3) {
901 audio_info->mode_count = edid_caps->audio_mode_count;
902
903 for (i = 0; i < audio_info->mode_count; ++i) {
904 audio_info->modes[i].format_code =
905 (enum audio_format_code)
906 (edid_caps->audio_modes[i].format_code);
907 audio_info->modes[i].channel_count =
908 edid_caps->audio_modes[i].channel_count;
909 audio_info->modes[i].sample_rates.all =
910 edid_caps->audio_modes[i].sample_rate;
911 audio_info->modes[i].sample_size =
912 edid_caps->audio_modes[i].sample_size;
913 }
914 }
915
916 audio_info->flags.all = edid_caps->speaker_flags;
917
918 /* TODO: We only check for the progressive mode, check for interlace mode too */
919 if(drm_connector->latency_present[0]) {
920 audio_info->video_latency = drm_connector->video_latency[0];
921 audio_info->audio_latency = drm_connector->audio_latency[0];
922 }
923
924 /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
925
926 }
927
928 static void copy_crtc_timing_for_drm_display_mode(
929 const struct drm_display_mode *src_mode,
930 struct drm_display_mode *dst_mode)
931 {
932 dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
933 dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
934 dst_mode->crtc_clock = src_mode->crtc_clock;
935 dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
936 dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
937 dst_mode->crtc_hsync_start= src_mode->crtc_hsync_start;
938 dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
939 dst_mode->crtc_htotal = src_mode->crtc_htotal;
940 dst_mode->crtc_hskew = src_mode->crtc_hskew;
941 dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
942 dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
943 dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
944 dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
945 dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
946 }
947
948 static void decide_crtc_timing_for_drm_display_mode(
949 struct drm_display_mode *drm_mode,
950 const struct drm_display_mode *native_mode,
951 bool scale_enabled)
952 {
953 if (scale_enabled) {
954 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
955 } else if (native_mode->clock == drm_mode->clock &&
956 native_mode->htotal == drm_mode->htotal &&
957 native_mode->vtotal == drm_mode->vtotal) {
958 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
959 } else {
960 /* no scaling nor amdgpu inserted, no need to patch */
961 }
962 }
963
964 static struct dc_stream *create_stream_for_sink(
965 struct amdgpu_connector *aconnector,
966 const struct drm_display_mode *drm_mode,
967 const struct dm_connector_state *dm_state)
968 {
969 struct drm_display_mode *preferred_mode = NULL;
970 const struct drm_connector *drm_connector;
971 struct dc_stream *stream = NULL;
972 struct drm_display_mode mode = *drm_mode;
973 bool native_mode_found = false;
974
975 if (NULL == aconnector) {
976 DRM_ERROR("aconnector is NULL!\n");
977 goto drm_connector_null;
978 }
979
980 if (NULL == dm_state) {
981 DRM_ERROR("dm_state is NULL!\n");
982 goto dm_state_null;
983 }
984
985 drm_connector = &aconnector->base;
986 stream = dc_create_stream_for_sink(aconnector->dc_sink);
987
988 if (NULL == stream) {
989 DRM_ERROR("Failed to create stream for sink!\n");
990 goto stream_create_fail;
991 }
992
993 list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
994 /* Search for preferred mode */
995 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
996 native_mode_found = true;
997 break;
998 }
999 }
1000 if (!native_mode_found)
1001 preferred_mode = list_first_entry_or_null(
1002 &aconnector->base.modes,
1003 struct drm_display_mode,
1004 head);
1005
1006 if (NULL == preferred_mode) {
1007 /* This may not be an error, the use case is when we we have no
1008 * usermode calls to reset and set mode upon hotplug. In this
1009 * case, we call set mode ourselves to restore the previous mode
1010 * and the modelist may not be filled in in time.
1011 */
1012 DRM_INFO("No preferred mode found\n");
1013 } else {
1014 decide_crtc_timing_for_drm_display_mode(
1015 &mode, preferred_mode,
1016 dm_state->scaling != RMX_OFF);
1017 }
1018
1019 fill_stream_properties_from_drm_display_mode(stream,
1020 &mode, &aconnector->base);
1021 update_stream_scaling_settings(&mode, dm_state, stream);
1022
1023 fill_audio_info(
1024 &stream->audio_info,
1025 drm_connector,
1026 aconnector->dc_sink);
1027
1028 stream_create_fail:
1029 dm_state_null:
1030 drm_connector_null:
1031 return stream;
1032 }
1033
1034 void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
1035 {
1036 drm_crtc_cleanup(crtc);
1037 kfree(crtc);
1038 }
1039
1040 /* Implemented only the options currently availible for the driver */
1041 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
1042 .reset = drm_atomic_helper_crtc_reset,
1043 .cursor_set = dm_crtc_cursor_set,
1044 .cursor_move = dm_crtc_cursor_move,
1045 .destroy = amdgpu_dm_crtc_destroy,
1046 .gamma_set = drm_atomic_helper_legacy_gamma_set,
1047 .set_config = drm_atomic_helper_set_config,
1048 .page_flip = drm_atomic_helper_page_flip,
1049 .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
1050 .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
1051 };
1052
1053 static enum drm_connector_status
1054 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
1055 {
1056 bool connected;
1057 struct amdgpu_connector *aconnector = to_amdgpu_connector(connector);
1058
1059 /* Notes:
1060 * 1. This interface is NOT called in context of HPD irq.
1061 * 2. This interface *is called* in context of user-mode ioctl. Which
1062 * makes it a bad place for *any* MST-related activit. */
1063
1064 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
1065 connected = (aconnector->dc_sink != NULL);
1066 else
1067 connected = (aconnector->base.force == DRM_FORCE_ON);
1068
1069 return (connected ? connector_status_connected :
1070 connector_status_disconnected);
1071 }
1072
1073 int amdgpu_dm_connector_atomic_set_property(
1074 struct drm_connector *connector,
1075 struct drm_connector_state *connector_state,
1076 struct drm_property *property,
1077 uint64_t val)
1078 {
1079 struct drm_device *dev = connector->dev;
1080 struct amdgpu_device *adev = dev->dev_private;
1081 struct dm_connector_state *dm_old_state =
1082 to_dm_connector_state(connector->state);
1083 struct dm_connector_state *dm_new_state =
1084 to_dm_connector_state(connector_state);
1085
1086 struct drm_crtc_state *new_crtc_state;
1087 struct drm_crtc *crtc;
1088 int i;
1089 int ret = -EINVAL;
1090
1091 if (property == dev->mode_config.scaling_mode_property) {
1092 enum amdgpu_rmx_type rmx_type;
1093
1094 switch (val) {
1095 case DRM_MODE_SCALE_CENTER:
1096 rmx_type = RMX_CENTER;
1097 break;
1098 case DRM_MODE_SCALE_ASPECT:
1099 rmx_type = RMX_ASPECT;
1100 break;
1101 case DRM_MODE_SCALE_FULLSCREEN:
1102 rmx_type = RMX_FULL;
1103 break;
1104 case DRM_MODE_SCALE_NONE:
1105 default:
1106 rmx_type = RMX_OFF;
1107 break;
1108 }
1109
1110 if (dm_old_state->scaling == rmx_type)
1111 return 0;
1112
1113 dm_new_state->scaling = rmx_type;
1114 ret = 0;
1115 } else if (property == adev->mode_info.underscan_hborder_property) {
1116 dm_new_state->underscan_hborder = val;
1117 ret = 0;
1118 } else if (property == adev->mode_info.underscan_vborder_property) {
1119 dm_new_state->underscan_vborder = val;
1120 ret = 0;
1121 } else if (property == adev->mode_info.underscan_property) {
1122 dm_new_state->underscan_enable = val;
1123 ret = 0;
1124 }
1125
1126 for_each_crtc_in_state(
1127 connector_state->state,
1128 crtc,
1129 new_crtc_state,
1130 i) {
1131
1132 if (crtc == connector_state->crtc) {
1133 struct drm_plane_state *plane_state;
1134
1135 /*
1136 * Bit of magic done here. We need to ensure
1137 * that planes get update after mode is set.
1138 * So, we need to add primary plane to state,
1139 * and this way atomic_update would be called
1140 * for it
1141 */
1142 plane_state =
1143 drm_atomic_get_plane_state(
1144 connector_state->state,
1145 crtc->primary);
1146
1147 if (!plane_state)
1148 return -EINVAL;
1149 }
1150 }
1151
1152 return ret;
1153 }
1154
1155 void amdgpu_dm_connector_destroy(struct drm_connector *connector)
1156 {
1157 struct amdgpu_connector *aconnector = to_amdgpu_connector(connector);
1158 const struct dc_link *link = aconnector->dc_link;
1159 struct amdgpu_device *adev = connector->dev->dev_private;
1160 struct amdgpu_display_manager *dm = &adev->dm;
1161 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
1162 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
1163
1164 if (link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) {
1165 amdgpu_dm_register_backlight_device(dm);
1166
1167 if (dm->backlight_dev) {
1168 backlight_device_unregister(dm->backlight_dev);
1169 dm->backlight_dev = NULL;
1170 }
1171
1172 }
1173 #endif
1174 drm_connector_unregister(connector);
1175 drm_connector_cleanup(connector);
1176 kfree(connector);
1177 }
1178
1179 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
1180 {
1181 struct dm_connector_state *state =
1182 to_dm_connector_state(connector->state);
1183
1184 kfree(state);
1185
1186 state = kzalloc(sizeof(*state), GFP_KERNEL);
1187
1188 if (state) {
1189 state->scaling = RMX_OFF;
1190 state->underscan_enable = false;
1191 state->underscan_hborder = 0;
1192 state->underscan_vborder = 0;
1193
1194 connector->state = &state->base;
1195 connector->state->connector = connector;
1196 }
1197 }
1198
1199 struct drm_connector_state *amdgpu_dm_connector_atomic_duplicate_state(
1200 struct drm_connector *connector)
1201 {
1202 struct dm_connector_state *state =
1203 to_dm_connector_state(connector->state);
1204
1205 struct dm_connector_state *new_state =
1206 kmemdup(state, sizeof(*state), GFP_KERNEL);
1207
1208 if (new_state) {
1209 __drm_atomic_helper_connector_duplicate_state(connector,
1210 &new_state->base);
1211 return &new_state->base;
1212 }
1213
1214 return NULL;
1215 }
1216
1217 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
1218 .reset = amdgpu_dm_connector_funcs_reset,
1219 .detect = amdgpu_dm_connector_detect,
1220 .fill_modes = drm_helper_probe_single_connector_modes,
1221 .destroy = amdgpu_dm_connector_destroy,
1222 .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
1223 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
1224 .atomic_set_property = amdgpu_dm_connector_atomic_set_property
1225 };
1226
1227 static struct drm_encoder *best_encoder(struct drm_connector *connector)
1228 {
1229 int enc_id = connector->encoder_ids[0];
1230 struct drm_mode_object *obj;
1231 struct drm_encoder *encoder;
1232
1233 DRM_DEBUG_KMS("Finding the best encoder\n");
1234
1235 /* pick the encoder ids */
1236 if (enc_id) {
1237 obj = drm_mode_object_find(connector->dev, enc_id, DRM_MODE_OBJECT_ENCODER);
1238 if (!obj) {
1239 DRM_ERROR("Couldn't find a matching encoder for our connector\n");
1240 return NULL;
1241 }
1242 encoder = obj_to_encoder(obj);
1243 return encoder;
1244 }
1245 DRM_ERROR("No encoder id\n");
1246 return NULL;
1247 }
1248
1249 static int get_modes(struct drm_connector *connector)
1250 {
1251 return amdgpu_dm_connector_get_modes(connector);
1252 }
1253
1254 static void create_eml_sink(struct amdgpu_connector *aconnector)
1255 {
1256 struct dc_sink_init_data init_params = {
1257 .link = aconnector->dc_link,
1258 .sink_signal = SIGNAL_TYPE_VIRTUAL
1259 };
1260 struct edid *edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
1261
1262 if (!aconnector->base.edid_blob_ptr ||
1263 !aconnector->base.edid_blob_ptr->data) {
1264 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
1265 aconnector->base.name);
1266
1267 aconnector->base.force = DRM_FORCE_OFF;
1268 aconnector->base.override_edid = false;
1269 return;
1270 }
1271
1272 aconnector->edid = edid;
1273
1274 aconnector->dc_em_sink = dc_link_add_remote_sink(
1275 aconnector->dc_link,
1276 (uint8_t *)edid,
1277 (edid->extensions + 1) * EDID_LENGTH,
1278 &init_params);
1279
1280 if (aconnector->base.force
1281 == DRM_FORCE_ON)
1282 aconnector->dc_sink = aconnector->dc_link->local_sink ?
1283 aconnector->dc_link->local_sink :
1284 aconnector->dc_em_sink;
1285 }
1286
1287 static void handle_edid_mgmt(struct amdgpu_connector *aconnector)
1288 {
1289 struct dc_link *link = (struct dc_link *)aconnector->dc_link;
1290
1291 /* In case of headless boot with force on for DP managed connector
1292 * Those settings have to be != 0 to get initial modeset
1293 */
1294 if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
1295 link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
1296 link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
1297 }
1298
1299
1300 aconnector->base.override_edid = true;
1301 create_eml_sink(aconnector);
1302 }
1303
1304 int amdgpu_dm_connector_mode_valid(
1305 struct drm_connector *connector,
1306 struct drm_display_mode *mode)
1307 {
1308 int result = MODE_ERROR;
1309 const struct dc_sink *dc_sink;
1310 struct amdgpu_device *adev = connector->dev->dev_private;
1311 struct dc_validation_set val_set = { 0 };
1312 /* TODO: Unhardcode stream count */
1313 struct dc_stream *stream;
1314 struct amdgpu_connector *aconnector = to_amdgpu_connector(connector);
1315
1316 if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
1317 (mode->flags & DRM_MODE_FLAG_DBLSCAN))
1318 return result;
1319
1320 /* Only run this the first time mode_valid is called to initilialize
1321 * EDID mgmt
1322 */
1323 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
1324 !aconnector->dc_em_sink)
1325 handle_edid_mgmt(aconnector);
1326
1327 dc_sink = to_amdgpu_connector(connector)->dc_sink;
1328
1329 if (NULL == dc_sink) {
1330 DRM_ERROR("dc_sink is NULL!\n");
1331 goto null_sink;
1332 }
1333
1334 stream = dc_create_stream_for_sink(dc_sink);
1335 if (NULL == stream) {
1336 DRM_ERROR("Failed to create stream for sink!\n");
1337 goto stream_create_fail;
1338 }
1339
1340 drm_mode_set_crtcinfo(mode, 0);
1341 fill_stream_properties_from_drm_display_mode(stream, mode, connector);
1342
1343 val_set.stream = stream;
1344 val_set.surface_count = 0;
1345 stream->src.width = mode->hdisplay;
1346 stream->src.height = mode->vdisplay;
1347 stream->dst = stream->src;
1348
1349 if (dc_validate_resources(adev->dm.dc, &val_set, 1))
1350 result = MODE_OK;
1351
1352 dc_stream_release(stream);
1353
1354 stream_create_fail:
1355 null_sink:
1356 /* TODO: error handling*/
1357 return result;
1358 }
1359
1360 static const struct drm_connector_helper_funcs
1361 amdgpu_dm_connector_helper_funcs = {
1362 /*
1363 * If hotplug a second bigger display in FB Con mode, bigger resolution
1364 * modes will be filtered by drm_mode_validate_size(), and those modes
1365 * is missing after user start lightdm. So we need to renew modes list.
1366 * in get_modes call back, not just return the modes count
1367 */
1368 .get_modes = get_modes,
1369 .mode_valid = amdgpu_dm_connector_mode_valid,
1370 .best_encoder = best_encoder
1371 };
1372
1373 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
1374 {
1375 }
1376
1377 static int dm_crtc_helper_atomic_check(
1378 struct drm_crtc *crtc,
1379 struct drm_crtc_state *state)
1380 {
1381 return 0;
1382 }
1383
1384 static bool dm_crtc_helper_mode_fixup(
1385 struct drm_crtc *crtc,
1386 const struct drm_display_mode *mode,
1387 struct drm_display_mode *adjusted_mode)
1388 {
1389 return true;
1390 }
1391
1392 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
1393 .disable = dm_crtc_helper_disable,
1394 .atomic_check = dm_crtc_helper_atomic_check,
1395 .mode_fixup = dm_crtc_helper_mode_fixup
1396 };
1397
1398 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
1399 {
1400
1401 }
1402
1403 static int dm_encoder_helper_atomic_check(
1404 struct drm_encoder *encoder,
1405 struct drm_crtc_state *crtc_state,
1406 struct drm_connector_state *conn_state)
1407 {
1408 return 0;
1409 }
1410
1411 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
1412 .disable = dm_encoder_helper_disable,
1413 .atomic_check = dm_encoder_helper_atomic_check
1414 };
1415
1416 static const struct drm_plane_funcs dm_plane_funcs = {
1417 .reset = drm_atomic_helper_plane_reset,
1418 .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
1419 .atomic_destroy_state = drm_atomic_helper_plane_destroy_state
1420 };
1421
1422 static void clear_unrelated_fields(struct drm_plane_state *state)
1423 {
1424 state->crtc = NULL;
1425 state->fb = NULL;
1426 state->state = NULL;
1427 state->fence = NULL;
1428 }
1429
1430 static bool page_flip_needed(
1431 const struct drm_plane_state *new_state,
1432 const struct drm_plane_state *old_state,
1433 struct drm_pending_vblank_event *event,
1434 bool commit_surface_required)
1435 {
1436 struct drm_plane_state old_state_tmp;
1437 struct drm_plane_state new_state_tmp;
1438
1439 struct amdgpu_framebuffer *amdgpu_fb_old;
1440 struct amdgpu_framebuffer *amdgpu_fb_new;
1441 struct amdgpu_crtc *acrtc_new;
1442
1443 uint64_t old_tiling_flags;
1444 uint64_t new_tiling_flags;
1445
1446 bool page_flip_required;
1447
1448 if (!old_state)
1449 return false;
1450
1451 if (!old_state->fb)
1452 return false;
1453
1454 if (!new_state)
1455 return false;
1456
1457 if (!new_state->fb)
1458 return false;
1459
1460 old_state_tmp = *old_state;
1461 new_state_tmp = *new_state;
1462
1463 if (!event)
1464 return false;
1465
1466 amdgpu_fb_old = to_amdgpu_framebuffer(old_state->fb);
1467 amdgpu_fb_new = to_amdgpu_framebuffer(new_state->fb);
1468
1469 if (!get_fb_info(amdgpu_fb_old, &old_tiling_flags, NULL))
1470 return false;
1471
1472 if (!get_fb_info(amdgpu_fb_new, &new_tiling_flags, NULL))
1473 return false;
1474
1475 if (commit_surface_required == true &&
1476 old_tiling_flags != new_tiling_flags)
1477 return false;
1478
1479 clear_unrelated_fields(&old_state_tmp);
1480 clear_unrelated_fields(&new_state_tmp);
1481
1482 page_flip_required = memcmp(&old_state_tmp,
1483 &new_state_tmp,
1484 sizeof(old_state_tmp)) == 0 ? true:false;
1485 if (new_state->crtc && page_flip_required == false) {
1486 acrtc_new = to_amdgpu_crtc(new_state->crtc);
1487 if (acrtc_new->flip_flags & DRM_MODE_PAGE_FLIP_ASYNC)
1488 page_flip_required = true;
1489 }
1490 return page_flip_required;
1491 }
1492
1493 static int dm_plane_helper_prepare_fb(
1494 struct drm_plane *plane,
1495 struct drm_plane_state *new_state)
1496 {
1497 struct amdgpu_framebuffer *afb;
1498 struct drm_gem_object *obj;
1499 struct amdgpu_bo *rbo;
1500 int r;
1501
1502 if (!new_state->fb) {
1503 DRM_DEBUG_KMS("No FB bound\n");
1504 return 0;
1505 }
1506
1507 afb = to_amdgpu_framebuffer(new_state->fb);
1508
1509 obj = afb->obj;
1510 rbo = gem_to_amdgpu_bo(obj);
1511 r = amdgpu_bo_reserve(rbo, false);
1512 if (unlikely(r != 0))
1513 return r;
1514
1515 r = amdgpu_bo_pin(rbo, AMDGPU_GEM_DOMAIN_VRAM, NULL);
1516
1517 amdgpu_bo_unreserve(rbo);
1518
1519 if (unlikely(r != 0)) {
1520 DRM_ERROR("Failed to pin framebuffer\n");
1521 return r;
1522 }
1523
1524 return 0;
1525 }
1526
1527 static void dm_plane_helper_cleanup_fb(
1528 struct drm_plane *plane,
1529 struct drm_plane_state *old_state)
1530 {
1531 struct amdgpu_bo *rbo;
1532 struct amdgpu_framebuffer *afb;
1533 int r;
1534
1535 if (!old_state->fb)
1536 return;
1537
1538 afb = to_amdgpu_framebuffer(old_state->fb);
1539 rbo = gem_to_amdgpu_bo(afb->obj);
1540 r = amdgpu_bo_reserve(rbo, false);
1541 if (unlikely(r)) {
1542 DRM_ERROR("failed to reserve rbo before unpin\n");
1543 return;
1544 } else {
1545 amdgpu_bo_unpin(rbo);
1546 amdgpu_bo_unreserve(rbo);
1547 }
1548 }
1549
1550 int dm_create_validation_set_for_connector(struct drm_connector *connector,
1551 struct drm_display_mode *mode, struct dc_validation_set *val_set)
1552 {
1553 int result = MODE_ERROR;
1554 const struct dc_sink *dc_sink =
1555 to_amdgpu_connector(connector)->dc_sink;
1556 /* TODO: Unhardcode stream count */
1557 struct dc_stream *stream;
1558
1559 if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
1560 (mode->flags & DRM_MODE_FLAG_DBLSCAN))
1561 return result;
1562
1563 if (NULL == dc_sink) {
1564 DRM_ERROR("dc_sink is NULL!\n");
1565 return result;
1566 }
1567
1568 stream = dc_create_stream_for_sink(dc_sink);
1569
1570 if (NULL == stream) {
1571 DRM_ERROR("Failed to create stream for sink!\n");
1572 return result;
1573 }
1574
1575 drm_mode_set_crtcinfo(mode, 0);
1576
1577 fill_stream_properties_from_drm_display_mode(stream, mode, connector);
1578
1579 val_set->stream = stream;
1580
1581 stream->src.width = mode->hdisplay;
1582 stream->src.height = mode->vdisplay;
1583 stream->dst = stream->src;
1584
1585 return MODE_OK;
1586 }
1587
1588 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
1589 .prepare_fb = dm_plane_helper_prepare_fb,
1590 .cleanup_fb = dm_plane_helper_cleanup_fb,
1591 };
1592
1593 /*
1594 * TODO: these are currently initialized to rgb formats only.
1595 * For future use cases we should either initialize them dynamically based on
1596 * plane capabilities, or initialize this array to all formats, so internal drm
1597 * check will succeed, and let DC to implement proper check
1598 */
1599 static uint32_t rgb_formats[] = {
1600 DRM_FORMAT_XRGB4444,
1601 DRM_FORMAT_ARGB4444,
1602 DRM_FORMAT_RGBA4444,
1603 DRM_FORMAT_ARGB1555,
1604 DRM_FORMAT_RGB565,
1605 DRM_FORMAT_RGB888,
1606 DRM_FORMAT_XRGB8888,
1607 DRM_FORMAT_ARGB8888,
1608 DRM_FORMAT_RGBA8888,
1609 DRM_FORMAT_XRGB2101010,
1610 DRM_FORMAT_XBGR2101010,
1611 DRM_FORMAT_ARGB2101010,
1612 DRM_FORMAT_ABGR2101010,
1613 };
1614
1615 int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
1616 struct amdgpu_crtc *acrtc,
1617 uint32_t crtc_index)
1618 {
1619 int res = -ENOMEM;
1620
1621 struct drm_plane *primary_plane =
1622 kzalloc(sizeof(*primary_plane), GFP_KERNEL);
1623
1624 if (!primary_plane)
1625 goto fail_plane;
1626
1627 primary_plane->format_default = true;
1628
1629 res = drm_universal_plane_init(
1630 dm->adev->ddev,
1631 primary_plane,
1632 0,
1633 &dm_plane_funcs,
1634 rgb_formats,
1635 ARRAY_SIZE(rgb_formats),
1636 NULL,
1637 DRM_PLANE_TYPE_PRIMARY, NULL);
1638
1639 primary_plane->crtc = &acrtc->base;
1640
1641 drm_plane_helper_add(primary_plane, &dm_plane_helper_funcs);
1642
1643 res = drm_crtc_init_with_planes(
1644 dm->ddev,
1645 &acrtc->base,
1646 primary_plane,
1647 NULL,
1648 &amdgpu_dm_crtc_funcs, NULL);
1649
1650 if (res)
1651 goto fail;
1652
1653 drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
1654
1655 acrtc->max_cursor_width = 128;
1656 acrtc->max_cursor_height = 128;
1657
1658 acrtc->crtc_id = crtc_index;
1659 acrtc->base.enabled = false;
1660
1661 dm->adev->mode_info.crtcs[crtc_index] = acrtc;
1662 drm_mode_crtc_set_gamma_size(&acrtc->base, 256);
1663
1664 return 0;
1665 fail:
1666 kfree(primary_plane);
1667 fail_plane:
1668 acrtc->crtc_id = -1;
1669 return res;
1670 }
1671
1672 static int to_drm_connector_type(enum signal_type st)
1673 {
1674 switch (st) {
1675 case SIGNAL_TYPE_HDMI_TYPE_A:
1676 return DRM_MODE_CONNECTOR_HDMIA;
1677 case SIGNAL_TYPE_EDP:
1678 return DRM_MODE_CONNECTOR_eDP;
1679 case SIGNAL_TYPE_RGB:
1680 return DRM_MODE_CONNECTOR_VGA;
1681 case SIGNAL_TYPE_DISPLAY_PORT:
1682 case SIGNAL_TYPE_DISPLAY_PORT_MST:
1683 return DRM_MODE_CONNECTOR_DisplayPort;
1684 case SIGNAL_TYPE_DVI_DUAL_LINK:
1685 case SIGNAL_TYPE_DVI_SINGLE_LINK:
1686 return DRM_MODE_CONNECTOR_DVID;
1687 case SIGNAL_TYPE_VIRTUAL:
1688 return DRM_MODE_CONNECTOR_VIRTUAL;
1689
1690 default:
1691 return DRM_MODE_CONNECTOR_Unknown;
1692 }
1693 }
1694
1695 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
1696 {
1697 const struct drm_connector_helper_funcs *helper =
1698 connector->helper_private;
1699 struct drm_encoder *encoder;
1700 struct amdgpu_encoder *amdgpu_encoder;
1701
1702 encoder = helper->best_encoder(connector);
1703
1704 if (encoder == NULL)
1705 return;
1706
1707 amdgpu_encoder = to_amdgpu_encoder(encoder);
1708
1709 amdgpu_encoder->native_mode.clock = 0;
1710
1711 if (!list_empty(&connector->probed_modes)) {
1712 struct drm_display_mode *preferred_mode = NULL;
1713 list_for_each_entry(preferred_mode,
1714 &connector->probed_modes,
1715 head) {
1716 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
1717 amdgpu_encoder->native_mode = *preferred_mode;
1718 }
1719 break;
1720 }
1721
1722 }
1723 }
1724
1725 static struct drm_display_mode *amdgpu_dm_create_common_mode(
1726 struct drm_encoder *encoder, char *name,
1727 int hdisplay, int vdisplay)
1728 {
1729 struct drm_device *dev = encoder->dev;
1730 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1731 struct drm_display_mode *mode = NULL;
1732 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
1733
1734 mode = drm_mode_duplicate(dev, native_mode);
1735
1736 if(mode == NULL)
1737 return NULL;
1738
1739 mode->hdisplay = hdisplay;
1740 mode->vdisplay = vdisplay;
1741 mode->type &= ~DRM_MODE_TYPE_PREFERRED;
1742 strncpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
1743
1744 return mode;
1745
1746 }
1747
1748 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
1749 struct drm_connector *connector)
1750 {
1751 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1752 struct drm_display_mode *mode = NULL;
1753 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
1754 struct amdgpu_connector *amdgpu_connector =
1755 to_amdgpu_connector(connector);
1756 int i;
1757 int n;
1758 struct mode_size {
1759 char name[DRM_DISPLAY_MODE_LEN];
1760 int w;
1761 int h;
1762 }common_modes[] = {
1763 { "640x480", 640, 480},
1764 { "800x600", 800, 600},
1765 { "1024x768", 1024, 768},
1766 { "1280x720", 1280, 720},
1767 { "1280x800", 1280, 800},
1768 {"1280x1024", 1280, 1024},
1769 { "1440x900", 1440, 900},
1770 {"1680x1050", 1680, 1050},
1771 {"1600x1200", 1600, 1200},
1772 {"1920x1080", 1920, 1080},
1773 {"1920x1200", 1920, 1200}
1774 };
1775
1776 n = sizeof(common_modes) / sizeof(common_modes[0]);
1777
1778 for (i = 0; i < n; i++) {
1779 struct drm_display_mode *curmode = NULL;
1780 bool mode_existed = false;
1781
1782 if (common_modes[i].w > native_mode->hdisplay ||
1783 common_modes[i].h > native_mode->vdisplay ||
1784 (common_modes[i].w == native_mode->hdisplay &&
1785 common_modes[i].h == native_mode->vdisplay))
1786 continue;
1787
1788 list_for_each_entry(curmode, &connector->probed_modes, head) {
1789 if (common_modes[i].w == curmode->hdisplay &&
1790 common_modes[i].h == curmode->vdisplay) {
1791 mode_existed = true;
1792 break;
1793 }
1794 }
1795
1796 if (mode_existed)
1797 continue;
1798
1799 mode = amdgpu_dm_create_common_mode(encoder,
1800 common_modes[i].name, common_modes[i].w,
1801 common_modes[i].h);
1802 drm_mode_probed_add(connector, mode);
1803 amdgpu_connector->num_modes++;
1804 }
1805 }
1806
1807 static void amdgpu_dm_connector_ddc_get_modes(
1808 struct drm_connector *connector,
1809 struct edid *edid)
1810 {
1811 struct amdgpu_connector *amdgpu_connector =
1812 to_amdgpu_connector(connector);
1813
1814 if (edid) {
1815 /* empty probed_modes */
1816 INIT_LIST_HEAD(&connector->probed_modes);
1817 amdgpu_connector->num_modes =
1818 drm_add_edid_modes(connector, edid);
1819
1820 drm_edid_to_eld(connector, edid);
1821
1822 amdgpu_dm_get_native_mode(connector);
1823 } else
1824 amdgpu_connector->num_modes = 0;
1825 }
1826
1827 int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
1828 {
1829 const struct drm_connector_helper_funcs *helper =
1830 connector->helper_private;
1831 struct amdgpu_connector *amdgpu_connector =
1832 to_amdgpu_connector(connector);
1833 struct drm_encoder *encoder;
1834 struct edid *edid = amdgpu_connector->edid;
1835
1836 encoder = helper->best_encoder(connector);
1837
1838 amdgpu_dm_connector_ddc_get_modes(connector, edid);
1839 amdgpu_dm_connector_add_common_modes(encoder, connector);
1840 return amdgpu_connector->num_modes;
1841 }
1842
1843 void amdgpu_dm_connector_init_helper(
1844 struct amdgpu_display_manager *dm,
1845 struct amdgpu_connector *aconnector,
1846 int connector_type,
1847 const struct dc_link *link,
1848 int link_index)
1849 {
1850 struct amdgpu_device *adev = dm->ddev->dev_private;
1851
1852 aconnector->connector_id = link_index;
1853 aconnector->dc_link = link;
1854 aconnector->base.interlace_allowed = false;
1855 aconnector->base.doublescan_allowed = false;
1856 aconnector->base.stereo_allowed = false;
1857 aconnector->base.dpms = DRM_MODE_DPMS_OFF;
1858 aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
1859
1860 mutex_init(&aconnector->hpd_lock);
1861
1862 /*configure suport HPD hot plug connector_>polled default value is 0
1863 * which means HPD hot plug not supported*/
1864 switch (connector_type) {
1865 case DRM_MODE_CONNECTOR_HDMIA:
1866 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
1867 break;
1868 case DRM_MODE_CONNECTOR_DisplayPort:
1869 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
1870 break;
1871 case DRM_MODE_CONNECTOR_DVID:
1872 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
1873 break;
1874 default:
1875 break;
1876 }
1877
1878 drm_object_attach_property(&aconnector->base.base,
1879 dm->ddev->mode_config.scaling_mode_property,
1880 DRM_MODE_SCALE_NONE);
1881
1882 drm_object_attach_property(&aconnector->base.base,
1883 adev->mode_info.underscan_property,
1884 UNDERSCAN_OFF);
1885 drm_object_attach_property(&aconnector->base.base,
1886 adev->mode_info.underscan_hborder_property,
1887 0);
1888 drm_object_attach_property(&aconnector->base.base,
1889 adev->mode_info.underscan_vborder_property,
1890 0);
1891
1892 }
1893
1894 int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
1895 struct i2c_msg *msgs, int num)
1896 {
1897 struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
1898 struct i2c_command cmd;
1899 int i;
1900 int result = -EIO;
1901
1902 cmd.payloads = kzalloc(num * sizeof(struct i2c_payload), GFP_KERNEL);
1903
1904 if (!cmd.payloads)
1905 return result;
1906
1907 cmd.number_of_payloads = num;
1908 cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
1909 cmd.speed = 100;
1910
1911 for (i = 0; i < num; i++) {
1912 cmd.payloads[i].write = (msgs[i].flags & I2C_M_RD);
1913 cmd.payloads[i].address = msgs[i].addr;
1914 cmd.payloads[i].length = msgs[i].len;
1915 cmd.payloads[i].data = msgs[i].buf;
1916 }
1917
1918 if (dc_submit_i2c(i2c->dm->dc, i2c->link_index, &cmd))
1919 result = num;
1920
1921 kfree(cmd.payloads);
1922
1923 return result;
1924 }
1925
1926 u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
1927 {
1928 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
1929 }
1930
1931 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
1932 .master_xfer = amdgpu_dm_i2c_xfer,
1933 .functionality = amdgpu_dm_i2c_func,
1934 };
1935
1936 struct amdgpu_i2c_adapter *create_i2c(unsigned int link_index, struct amdgpu_display_manager *dm, int *res)
1937 {
1938 struct amdgpu_i2c_adapter *i2c;
1939
1940 i2c = kzalloc(sizeof (struct amdgpu_i2c_adapter), GFP_KERNEL);
1941 i2c->dm = dm;
1942 i2c->base.owner = THIS_MODULE;
1943 i2c->base.class = I2C_CLASS_DDC;
1944 i2c->base.dev.parent = &dm->adev->pdev->dev;
1945 i2c->base.algo = &amdgpu_dm_i2c_algo;
1946 snprintf(i2c->base.name, sizeof (i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
1947 i2c->link_index = link_index;
1948 i2c_set_adapdata(&i2c->base, i2c);
1949
1950 return i2c;
1951 }
1952
1953 /* Note: this function assumes that dc_link_detect() was called for the
1954 * dc_link which will be represented by this aconnector. */
1955 int amdgpu_dm_connector_init(
1956 struct amdgpu_display_manager *dm,
1957 struct amdgpu_connector *aconnector,
1958 uint32_t link_index,
1959 struct amdgpu_encoder *aencoder)
1960 {
1961 int res = 0;
1962 int connector_type;
1963 struct dc *dc = dm->dc;
1964 const struct dc_link *link = dc_get_link_at_index(dc, link_index);
1965 struct amdgpu_i2c_adapter *i2c;
1966
1967 DRM_DEBUG_KMS("%s()\n", __func__);
1968
1969 i2c = create_i2c(link->link_index, dm, &res);
1970 aconnector->i2c = i2c;
1971 res = i2c_add_adapter(&i2c->base);
1972
1973 if (res) {
1974 DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
1975 goto out_free;
1976 }
1977
1978 connector_type = to_drm_connector_type(link->connector_signal);
1979
1980 res = drm_connector_init(
1981 dm->ddev,
1982 &aconnector->base,
1983 &amdgpu_dm_connector_funcs,
1984 connector_type);
1985
1986 if (res) {
1987 DRM_ERROR("connector_init failed\n");
1988 aconnector->connector_id = -1;
1989 goto out_free;
1990 }
1991
1992 drm_connector_helper_add(
1993 &aconnector->base,
1994 &amdgpu_dm_connector_helper_funcs);
1995
1996 amdgpu_dm_connector_init_helper(
1997 dm,
1998 aconnector,
1999 connector_type,
2000 link,
2001 link_index);
2002
2003 drm_mode_connector_attach_encoder(
2004 &aconnector->base, &aencoder->base);
2005
2006 drm_connector_register(&aconnector->base);
2007
2008 if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
2009 || connector_type == DRM_MODE_CONNECTOR_eDP)
2010 amdgpu_dm_initialize_mst_connector(dm, aconnector);
2011
2012 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
2013 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
2014
2015 /* NOTE: this currently will create backlight device even if a panel
2016 * is not connected to the eDP/LVDS connector.
2017 *
2018 * This is less than ideal but we don't have sink information at this
2019 * stage since detection happens after. We can't do detection earlier
2020 * since MST detection needs connectors to be created first.
2021 */
2022 if (link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) {
2023 /* Event if registration failed, we should continue with
2024 * DM initialization because not having a backlight control
2025 * is better then a black screen. */
2026 amdgpu_dm_register_backlight_device(dm);
2027
2028 if (dm->backlight_dev)
2029 dm->backlight_link = link;
2030 }
2031 #endif
2032
2033 out_free:
2034 if (res) {
2035 kfree(i2c);
2036 aconnector->i2c = NULL;
2037 }
2038 return res;
2039 }
2040
2041 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
2042 {
2043 switch (adev->mode_info.num_crtc) {
2044 case 1:
2045 return 0x1;
2046 case 2:
2047 return 0x3;
2048 case 3:
2049 return 0x7;
2050 case 4:
2051 return 0xf;
2052 case 5:
2053 return 0x1f;
2054 case 6:
2055 default:
2056 return 0x3f;
2057 }
2058 }
2059
2060 int amdgpu_dm_encoder_init(
2061 struct drm_device *dev,
2062 struct amdgpu_encoder *aencoder,
2063 uint32_t link_index)
2064 {
2065 struct amdgpu_device *adev = dev->dev_private;
2066
2067 int res = drm_encoder_init(dev,
2068 &aencoder->base,
2069 &amdgpu_dm_encoder_funcs,
2070 DRM_MODE_ENCODER_TMDS,
2071 NULL);
2072
2073 aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
2074
2075 if (!res)
2076 aencoder->encoder_id = link_index;
2077 else
2078 aencoder->encoder_id = -1;
2079
2080 drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
2081
2082 return res;
2083 }
2084
2085 enum dm_commit_action {
2086 DM_COMMIT_ACTION_NOTHING,
2087 DM_COMMIT_ACTION_RESET,
2088 DM_COMMIT_ACTION_DPMS_ON,
2089 DM_COMMIT_ACTION_DPMS_OFF,
2090 DM_COMMIT_ACTION_SET
2091 };
2092
2093 static enum dm_commit_action get_dm_commit_action(struct drm_crtc_state *state)
2094 {
2095 /* mode changed means either actually mode changed or enabled changed */
2096 /* active changed means dpms changed */
2097
2098 DRM_DEBUG_KMS("crtc_state_flags: enable:%d, active:%d, planes_changed:%d, mode_changed:%d,active_changed:%d,connectors_changed:%d\n",
2099 state->enable,
2100 state->active,
2101 state->planes_changed,
2102 state->mode_changed,
2103 state->active_changed,
2104 state->connectors_changed);
2105
2106 if (state->mode_changed) {
2107 /* if it is got disabled - call reset mode */
2108 if (!state->enable)
2109 return DM_COMMIT_ACTION_RESET;
2110
2111 if (state->active)
2112 return DM_COMMIT_ACTION_SET;
2113 else
2114 return DM_COMMIT_ACTION_RESET;
2115 } else {
2116 /* ! mode_changed */
2117
2118 /* if it is remain disable - skip it */
2119 if (!state->enable)
2120 return DM_COMMIT_ACTION_NOTHING;
2121
2122 if (state->active && state->connectors_changed)
2123 return DM_COMMIT_ACTION_SET;
2124
2125 if (state->active_changed) {
2126 if (state->active) {
2127 return DM_COMMIT_ACTION_DPMS_ON;
2128 } else {
2129 return DM_COMMIT_ACTION_DPMS_OFF;
2130 }
2131 } else {
2132 /* ! active_changed */
2133 return DM_COMMIT_ACTION_NOTHING;
2134 }
2135 }
2136 }
2137
2138
2139 typedef bool (*predicate)(struct amdgpu_crtc *acrtc);
2140
2141 static void wait_while_pflip_status(struct amdgpu_device *adev,
2142 struct amdgpu_crtc *acrtc, predicate f) {
2143 int count = 0;
2144 while (f(acrtc)) {
2145 /* Spin Wait*/
2146 msleep(1);
2147 count++;
2148 if (count == 1000) {
2149 DRM_ERROR("%s - crtc:%d[%p], pflip_stat:%d, probable hang!\n",
2150 __func__, acrtc->crtc_id,
2151 acrtc,
2152 acrtc->pflip_status);
2153
2154 /* we do not expect to hit this case except on Polaris with PHY PLL
2155 * 1. DP to HDMI passive dongle connected
2156 * 2. unplug (headless)
2157 * 3. plug in DP
2158 * 3a. on plug in, DP will try verify link by training, and training
2159 * would disable PHY PLL which HDMI rely on to drive TG
2160 * 3b. this will cause flip interrupt cannot be generated, and we
2161 * exit when timeout expired. however we do not have code to clean
2162 * up flip, flip clean up will happen when the address is written
2163 * with the restore mode change
2164 */
2165 WARN_ON(1);
2166 break;
2167 }
2168 }
2169
2170 DRM_DEBUG_DRIVER("%s - Finished waiting for:%d msec, crtc:%d[%p], pflip_stat:%d \n",
2171 __func__,
2172 count,
2173 acrtc->crtc_id,
2174 acrtc,
2175 acrtc->pflip_status);
2176 }
2177
2178 static bool pflip_in_progress_predicate(struct amdgpu_crtc *acrtc)
2179 {
2180 return acrtc->pflip_status != AMDGPU_FLIP_NONE;
2181 }
2182
2183 static void manage_dm_interrupts(
2184 struct amdgpu_device *adev,
2185 struct amdgpu_crtc *acrtc,
2186 bool enable)
2187 {
2188 /*
2189 * this is not correct translation but will work as soon as VBLANK
2190 * constant is the same as PFLIP
2191 */
2192 int irq_type =
2193 amdgpu_crtc_idx_to_irq_type(
2194 adev,
2195 acrtc->crtc_id);
2196
2197 if (enable) {
2198 drm_crtc_vblank_on(&acrtc->base);
2199 amdgpu_irq_get(
2200 adev,
2201 &adev->pageflip_irq,
2202 irq_type);
2203 } else {
2204 wait_while_pflip_status(adev, acrtc,
2205 pflip_in_progress_predicate);
2206
2207 amdgpu_irq_put(
2208 adev,
2209 &adev->pageflip_irq,
2210 irq_type);
2211 drm_crtc_vblank_off(&acrtc->base);
2212 }
2213 }
2214
2215
2216 static bool pflip_pending_predicate(struct amdgpu_crtc *acrtc)
2217 {
2218 return acrtc->pflip_status == AMDGPU_FLIP_PENDING;
2219 }
2220
2221 static bool is_scaling_state_different(
2222 const struct dm_connector_state *dm_state,
2223 const struct dm_connector_state *old_dm_state)
2224 {
2225 if (dm_state->scaling != old_dm_state->scaling)
2226 return true;
2227 if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
2228 if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
2229 return true;
2230 } else if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
2231 if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
2232 return true;
2233 } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder
2234 || dm_state->underscan_vborder != old_dm_state->underscan_vborder)
2235 return true;
2236 return false;
2237 }
2238
2239 static void remove_stream(struct amdgpu_device *adev, struct amdgpu_crtc *acrtc)
2240 {
2241 /*
2242 * we evade vblanks and pflips on crtc that
2243 * should be changed
2244 */
2245 manage_dm_interrupts(adev, acrtc, false);
2246
2247 /* this is the update mode case */
2248 if (adev->dm.freesync_module)
2249 mod_freesync_remove_stream(adev->dm.freesync_module,
2250 acrtc->stream);
2251
2252 dc_stream_release(acrtc->stream);
2253 acrtc->stream = NULL;
2254 acrtc->otg_inst = -1;
2255 acrtc->enabled = false;
2256 }
2257
2258 int amdgpu_dm_atomic_commit(
2259 struct drm_device *dev,
2260 struct drm_atomic_state *state,
2261 bool nonblock)
2262 {
2263 struct amdgpu_device *adev = dev->dev_private;
2264 struct amdgpu_display_manager *dm = &adev->dm;
2265 struct drm_plane *plane;
2266 struct drm_plane_state *new_plane_state;
2267 struct drm_plane_state *old_plane_state;
2268 uint32_t i;
2269 int32_t ret = 0;
2270 uint32_t commit_streams_count = 0;
2271 uint32_t new_crtcs_count = 0;
2272 uint32_t flip_crtcs_count = 0;
2273 struct drm_crtc *crtc;
2274 struct drm_crtc_state *old_crtc_state;
2275 const struct dc_stream *commit_streams[MAX_STREAMS];
2276 struct amdgpu_crtc *new_crtcs[MAX_STREAMS];
2277 const struct dc_stream *new_stream;
2278 struct drm_crtc *flip_crtcs[MAX_STREAMS];
2279 struct amdgpu_flip_work *work[MAX_STREAMS] = {0};
2280 struct amdgpu_bo *new_abo[MAX_STREAMS] = {0};
2281
2282 /* In this step all new fb would be pinned */
2283
2284 /*
2285 * TODO: Revisit when we support true asynchronous commit.
2286 * Right now we receive async commit only from pageflip, in which case
2287 * we should not pin/unpin the fb here, it should be done in
2288 * amdgpu_crtc_flip and from the vblank irq handler.
2289 */
2290 if (!nonblock) {
2291 ret = drm_atomic_helper_prepare_planes(dev, state);
2292 if (ret)
2293 return ret;
2294 }
2295
2296 /* Page flip if needed */
2297 for_each_plane_in_state(state, plane, new_plane_state, i) {
2298 struct drm_plane_state *old_plane_state = plane->state;
2299 struct drm_crtc *crtc = new_plane_state->crtc;
2300 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
2301 struct drm_framebuffer *fb = new_plane_state->fb;
2302 struct drm_crtc_state *crtc_state;
2303
2304 if (!fb || !crtc)
2305 continue;
2306
2307 crtc_state = drm_atomic_get_crtc_state(state, crtc);
2308
2309 if (!crtc_state->planes_changed || !crtc_state->active)
2310 continue;
2311
2312 if (page_flip_needed(
2313 new_plane_state,
2314 old_plane_state,
2315 crtc_state->event,
2316 false)) {
2317 ret = amdgpu_crtc_prepare_flip(crtc,
2318 fb,
2319 crtc_state->event,
2320 acrtc->flip_flags,
2321 drm_crtc_vblank_count(crtc),
2322 &work[flip_crtcs_count],
2323 &new_abo[flip_crtcs_count]);
2324
2325 if (ret) {
2326 /* According to atomic_commit hook API, EINVAL is not allowed */
2327 if (unlikely(ret == -EINVAL))
2328 ret = -ENOMEM;
2329
2330 DRM_ERROR("Atomic commit: Flip for crtc id %d: [%p], "
2331 "failed, errno = %d\n",
2332 acrtc->crtc_id,
2333 acrtc,
2334 ret);
2335 /* cleanup all flip configurations which
2336 * succeeded in this commit
2337 */
2338 for (i = 0; i < flip_crtcs_count; i++)
2339 amdgpu_crtc_cleanup_flip_ctx(
2340 work[i],
2341 new_abo[i]);
2342
2343 return ret;
2344 }
2345
2346 flip_crtcs[flip_crtcs_count] = crtc;
2347 flip_crtcs_count++;
2348 }
2349 }
2350
2351 /*
2352 * This is the point of no return - everything below never fails except
2353 * when the hw goes bonghits. Which means we can commit the new state on
2354 * the software side now.
2355 */
2356
2357 drm_atomic_helper_swap_state(state, true);
2358
2359 /*
2360 * From this point state become old state really. New state is
2361 * initialized to appropriate objects and could be accessed from there
2362 */
2363
2364 /*
2365 * there is no fences usage yet in state. We can skip the following line
2366 * wait_for_fences(dev, state);
2367 */
2368
2369 drm_atomic_helper_update_legacy_modeset_state(dev, state);
2370
2371 /* update changed items */
2372 for_each_crtc_in_state(state, crtc, old_crtc_state, i) {
2373 struct amdgpu_crtc *acrtc;
2374 struct amdgpu_connector *aconnector = NULL;
2375 enum dm_commit_action action;
2376 struct drm_crtc_state *new_state = crtc->state;
2377
2378 acrtc = to_amdgpu_crtc(crtc);
2379
2380 aconnector =
2381 amdgpu_dm_find_first_crct_matching_connector(
2382 state,
2383 crtc,
2384 false);
2385
2386 /* handles headless hotplug case, updating new_state and
2387 * aconnector as needed
2388 */
2389
2390 action = get_dm_commit_action(new_state);
2391
2392 switch (action) {
2393 case DM_COMMIT_ACTION_DPMS_ON:
2394 case DM_COMMIT_ACTION_SET: {
2395 struct dm_connector_state *dm_state = NULL;
2396 new_stream = NULL;
2397
2398 if (aconnector)
2399 dm_state = to_dm_connector_state(aconnector->base.state);
2400
2401 new_stream = create_stream_for_sink(
2402 aconnector,
2403 &crtc->state->mode,
2404 dm_state);
2405
2406 DRM_INFO("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
2407
2408 if (!new_stream) {
2409 /*
2410 * this could happen because of issues with
2411 * userspace notifications delivery.
2412 * In this case userspace tries to set mode on
2413 * display which is disconnect in fact.
2414 * dc_sink in NULL in this case on aconnector.
2415 * We expect reset mode will come soon.
2416 *
2417 * This can also happen when unplug is done
2418 * during resume sequence ended
2419 *
2420 * In this case, we want to pretend we still
2421 * have a sink to keep the pipe running so that
2422 * hw state is consistent with the sw state
2423 */
2424 DRM_DEBUG_KMS("%s: Failed to create new stream for crtc %d\n",
2425 __func__, acrtc->base.base.id);
2426 break;
2427 }
2428
2429 if (acrtc->stream)
2430 remove_stream(adev, acrtc);
2431
2432 /*
2433 * this loop saves set mode crtcs
2434 * we needed to enable vblanks once all
2435 * resources acquired in dc after dc_commit_streams
2436 */
2437 new_crtcs[new_crtcs_count] = acrtc;
2438 new_crtcs_count++;
2439
2440 acrtc->stream = new_stream;
2441 acrtc->enabled = true;
2442 acrtc->hw_mode = crtc->state->mode;
2443 crtc->hwmode = crtc->state->mode;
2444
2445 break;
2446 }
2447
2448 case DM_COMMIT_ACTION_NOTHING: {
2449 struct dm_connector_state *dm_state = NULL;
2450
2451 if (!aconnector)
2452 break;
2453
2454 dm_state = to_dm_connector_state(aconnector->base.state);
2455
2456 /* Scaling update */
2457 update_stream_scaling_settings(&crtc->state->mode,
2458 dm_state, acrtc->stream);
2459
2460 break;
2461 }
2462 case DM_COMMIT_ACTION_DPMS_OFF:
2463 case DM_COMMIT_ACTION_RESET:
2464 DRM_INFO("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
2465 /* i.e. reset mode */
2466 if (acrtc->stream)
2467 remove_stream(adev, acrtc);
2468 break;
2469 } /* switch() */
2470 } /* for_each_crtc_in_state() */
2471
2472 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
2473
2474 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
2475
2476 if (acrtc->stream) {
2477 commit_streams[commit_streams_count] = acrtc->stream;
2478 ++commit_streams_count;
2479 }
2480 }
2481
2482 /*
2483 * Add streams after required streams from new and replaced streams
2484 * are removed from freesync module
2485 */
2486 if (adev->dm.freesync_module) {
2487 for (i = 0; i < new_crtcs_count; i++) {
2488 struct amdgpu_connector *aconnector = NULL;
2489 new_stream = new_crtcs[i]->stream;
2490 aconnector =
2491 amdgpu_dm_find_first_crct_matching_connector(
2492 state,
2493 &new_crtcs[i]->base,
2494 false);
2495 if (!aconnector) {
2496 DRM_INFO(
2497 "Atomic commit: Failed to find connector for acrtc id:%d "
2498 "skipping freesync init\n",
2499 new_crtcs[i]->crtc_id);
2500 continue;
2501 }
2502
2503 mod_freesync_add_stream(adev->dm.freesync_module,
2504 new_stream, &aconnector->caps);
2505 }
2506 }
2507
2508 /* DC is optimized not to do anything if 'streams' didn't change. */
2509 dc_commit_streams(dm->dc, commit_streams, commit_streams_count);
2510
2511 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
2512 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
2513
2514 if (acrtc->stream != NULL)
2515 acrtc->otg_inst =
2516 dc_stream_get_status(acrtc->stream)->primary_otg_inst;
2517 }
2518
2519 /* update planes when needed */
2520 for_each_plane_in_state(state, plane, old_plane_state, i) {
2521 struct drm_plane_state *plane_state = plane->state;
2522 struct drm_crtc *crtc = plane_state->crtc;
2523 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
2524 struct drm_framebuffer *fb = plane_state->fb;
2525 struct drm_connector *connector;
2526 struct dm_connector_state *dm_state = NULL;
2527 enum dm_commit_action action;
2528
2529 if (!fb || !crtc || !crtc->state->active)
2530 continue;
2531
2532 action = get_dm_commit_action(crtc->state);
2533
2534 /* Surfaces are created under two scenarios:
2535 * 1. This commit is not a page flip.
2536 * 2. This commit is a page flip, and streams are created.
2537 */
2538 if (!page_flip_needed(
2539 plane_state,
2540 old_plane_state,
2541 crtc->state->event, true) ||
2542 action == DM_COMMIT_ACTION_DPMS_ON ||
2543 action == DM_COMMIT_ACTION_SET) {
2544 list_for_each_entry(connector,
2545 &dev->mode_config.connector_list, head) {
2546 if (connector->state->crtc == crtc) {
2547 dm_state = to_dm_connector_state(
2548 connector->state);
2549 break;
2550 }
2551 }
2552
2553 /*
2554 * This situation happens in the following case:
2555 * we are about to get set mode for connector who's only
2556 * possible crtc (in encoder crtc mask) is used by
2557 * another connector, that is why it will try to
2558 * re-assing crtcs in order to make configuration
2559 * supported. For our implementation we need to make all
2560 * encoders support all crtcs, then this issue will
2561 * never arise again. But to guard code from this issue
2562 * check is left.
2563 *
2564 * Also it should be needed when used with actual
2565 * drm_atomic_commit ioctl in future
2566 */
2567 if (!dm_state)
2568 continue;
2569
2570 /*
2571 * if flip is pending (ie, still waiting for fence to return
2572 * before address is submitted) here, we cannot commit_surface
2573 * as commit_surface will pre-maturely write out the future
2574 * address. wait until flip is submitted before proceeding.
2575 */
2576 wait_while_pflip_status(adev, acrtc, pflip_pending_predicate);
2577
2578 dm_dc_surface_commit(dm->dc, crtc);
2579 }
2580 }
2581
2582 for (i = 0; i < new_crtcs_count; i++) {
2583 /*
2584 * loop to enable interrupts on newly arrived crtc
2585 */
2586 struct amdgpu_crtc *acrtc = new_crtcs[i];
2587
2588 if (adev->dm.freesync_module)
2589 mod_freesync_notify_mode_change(
2590 adev->dm.freesync_module, &acrtc->stream, 1);
2591
2592 manage_dm_interrupts(adev, acrtc, true);
2593 dm_crtc_cursor_reset(&acrtc->base);
2594
2595 }
2596
2597 /* Do actual flip */
2598 flip_crtcs_count = 0;
2599 for_each_plane_in_state(state, plane, old_plane_state, i) {
2600 struct drm_plane_state *plane_state = plane->state;
2601 struct drm_crtc *crtc = plane_state->crtc;
2602 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
2603 struct drm_framebuffer *fb = plane_state->fb;
2604
2605 if (!fb || !crtc || !crtc->state->planes_changed ||
2606 !crtc->state->active)
2607 continue;
2608
2609 if (page_flip_needed(
2610 plane_state,
2611 old_plane_state,
2612 crtc->state->event,
2613 false)) {
2614 amdgpu_crtc_submit_flip(
2615 crtc,
2616 fb,
2617 work[flip_crtcs_count],
2618 new_abo[i]);
2619 flip_crtcs_count++;
2620 /*clean up the flags for next usage*/
2621 acrtc->flip_flags = 0;
2622 }
2623 }
2624
2625 /* In this state all old framebuffers would be unpinned */
2626
2627 /* TODO: Revisit when we support true asynchronous commit.*/
2628 if (!nonblock)
2629 drm_atomic_helper_cleanup_planes(dev, state);
2630
2631 drm_atomic_state_put(state);
2632
2633 return ret;
2634 }
2635 /*
2636 * This functions handle all cases when set mode does not come upon hotplug.
2637 * This include when the same display is unplugged then plugged back into the
2638 * same port and when we are running without usermode desktop manager supprot
2639 */
2640 void dm_restore_drm_connector_state(struct drm_device *dev, struct drm_connector *connector)
2641 {
2642 struct drm_crtc *crtc;
2643 struct amdgpu_device *adev = dev->dev_private;
2644 struct dc *dc = adev->dm.dc;
2645 struct amdgpu_connector *aconnector = to_amdgpu_connector(connector);
2646 struct amdgpu_crtc *disconnected_acrtc;
2647 const struct dc_sink *sink;
2648 const struct dc_stream *commit_streams[MAX_STREAMS];
2649 const struct dc_stream *current_stream;
2650 uint32_t commit_streams_count = 0;
2651
2652 if (!aconnector->dc_sink || !connector->state || !connector->encoder)
2653 return;
2654
2655 disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
2656
2657 if (!disconnected_acrtc || !disconnected_acrtc->stream)
2658 return;
2659
2660 sink = disconnected_acrtc->stream->sink;
2661
2662 /*
2663 * If the previous sink is not released and different from the current,
2664 * we deduce we are in a state where we can not rely on usermode call
2665 * to turn on the display, so we do it here
2666 */
2667 if (sink != aconnector->dc_sink) {
2668 struct dm_connector_state *dm_state =
2669 to_dm_connector_state(aconnector->base.state);
2670
2671 struct dc_stream *new_stream =
2672 create_stream_for_sink(
2673 aconnector,
2674 &disconnected_acrtc->base.state->mode,
2675 dm_state);
2676
2677 DRM_INFO("Headless hotplug, restoring connector state\n");
2678 /*
2679 * we evade vblanks and pflips on crtc that
2680 * should be changed
2681 */
2682 manage_dm_interrupts(adev, disconnected_acrtc, false);
2683 /* this is the update mode case */
2684
2685 current_stream = disconnected_acrtc->stream;
2686
2687 disconnected_acrtc->stream = new_stream;
2688 disconnected_acrtc->enabled = true;
2689 disconnected_acrtc->hw_mode = disconnected_acrtc->base.state->mode;
2690
2691 commit_streams_count = 0;
2692
2693 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
2694 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
2695
2696 if (acrtc->stream) {
2697 commit_streams[commit_streams_count] = acrtc->stream;
2698 ++commit_streams_count;
2699 }
2700 }
2701
2702 /* DC is optimized not to do anything if 'streams' didn't change. */
2703 if (!dc_commit_streams(dc, commit_streams,
2704 commit_streams_count)) {
2705 DRM_INFO("Failed to restore connector state!\n");
2706 dc_stream_release(disconnected_acrtc->stream);
2707 disconnected_acrtc->stream = current_stream;
2708 manage_dm_interrupts(adev, disconnected_acrtc, true);
2709 return;
2710 }
2711
2712 if (adev->dm.freesync_module) {
2713 mod_freesync_remove_stream(adev->dm.freesync_module,
2714 current_stream);
2715
2716 mod_freesync_add_stream(adev->dm.freesync_module,
2717 new_stream, &aconnector->caps);
2718 }
2719
2720 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
2721 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
2722
2723 if (acrtc->stream != NULL) {
2724 acrtc->otg_inst =
2725 dc_stream_get_status(acrtc->stream)->primary_otg_inst;
2726 }
2727 }
2728
2729 dc_stream_release(current_stream);
2730
2731 dm_dc_surface_commit(dc, &disconnected_acrtc->base);
2732
2733 manage_dm_interrupts(adev, disconnected_acrtc, true);
2734 dm_crtc_cursor_reset(&disconnected_acrtc->base);
2735
2736 }
2737 }
2738
2739 static uint32_t add_val_sets_surface(
2740 struct dc_validation_set *val_sets,
2741 uint32_t set_count,
2742 const struct dc_stream *stream,
2743 const struct dc_surface *surface)
2744 {
2745 uint32_t i = 0;
2746
2747 while (i < set_count) {
2748 if (val_sets[i].stream == stream)
2749 break;
2750 ++i;
2751 }
2752
2753 val_sets[i].surfaces[val_sets[i].surface_count] = surface;
2754 val_sets[i].surface_count++;
2755
2756 return val_sets[i].surface_count;
2757 }
2758
2759 static uint32_t update_in_val_sets_stream(
2760 struct dc_validation_set *val_sets,
2761 struct drm_crtc **crtcs,
2762 uint32_t set_count,
2763 const struct dc_stream *old_stream,
2764 const struct dc_stream *new_stream,
2765 struct drm_crtc *crtc)
2766 {
2767 uint32_t i = 0;
2768
2769 while (i < set_count) {
2770 if (val_sets[i].stream == old_stream)
2771 break;
2772 ++i;
2773 }
2774
2775 val_sets[i].stream = new_stream;
2776 crtcs[i] = crtc;
2777
2778 if (i == set_count) {
2779 /* nothing found. add new one to the end */
2780 return set_count + 1;
2781 }
2782
2783 return set_count;
2784 }
2785
2786 static uint32_t remove_from_val_sets(
2787 struct dc_validation_set *val_sets,
2788 uint32_t set_count,
2789 const struct dc_stream *stream)
2790 {
2791 int i;
2792
2793 for (i = 0; i < set_count; i++)
2794 if (val_sets[i].stream == stream)
2795 break;
2796
2797 if (i == set_count) {
2798 /* nothing found */
2799 return set_count;
2800 }
2801
2802 set_count--;
2803
2804 for (; i < set_count; i++) {
2805 val_sets[i] = val_sets[i + 1];
2806 }
2807
2808 return set_count;
2809 }
2810
2811 int amdgpu_dm_atomic_check(struct drm_device *dev,
2812 struct drm_atomic_state *state)
2813 {
2814 struct drm_crtc *crtc;
2815 struct drm_crtc_state *crtc_state;
2816 struct drm_plane *plane;
2817 struct drm_plane_state *plane_state;
2818 int i, j;
2819 int ret;
2820 int set_count;
2821 int new_stream_count;
2822 struct dc_validation_set set[MAX_STREAMS] = {{ 0 }};
2823 struct dc_stream *new_streams[MAX_STREAMS] = { 0 };
2824 struct drm_crtc *crtc_set[MAX_STREAMS] = { 0 };
2825 struct amdgpu_device *adev = dev->dev_private;
2826 struct dc *dc = adev->dm.dc;
2827 bool need_to_validate = false;
2828
2829 ret = drm_atomic_helper_check(dev, state);
2830
2831 if (ret) {
2832 DRM_ERROR("Atomic state validation failed with error :%d !\n",
2833 ret);
2834 return ret;
2835 }
2836
2837 ret = -EINVAL;
2838
2839 /* copy existing configuration */
2840 new_stream_count = 0;
2841 set_count = 0;
2842 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
2843
2844 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
2845
2846 if (acrtc->stream) {
2847 set[set_count].stream = acrtc->stream;
2848 crtc_set[set_count] = crtc;
2849 ++set_count;
2850 }
2851 }
2852
2853 /* update changed items */
2854 for_each_crtc_in_state(state, crtc, crtc_state, i) {
2855 struct amdgpu_crtc *acrtc = NULL;
2856 struct amdgpu_connector *aconnector = NULL;
2857 enum dm_commit_action action;
2858
2859 acrtc = to_amdgpu_crtc(crtc);
2860
2861 aconnector = amdgpu_dm_find_first_crct_matching_connector(state, crtc, true);
2862
2863 action = get_dm_commit_action(crtc_state);
2864
2865 switch (action) {
2866 case DM_COMMIT_ACTION_DPMS_ON:
2867 case DM_COMMIT_ACTION_SET: {
2868 struct dc_stream *new_stream = NULL;
2869 struct drm_connector_state *conn_state = NULL;
2870 struct dm_connector_state *dm_state = NULL;
2871
2872 if (aconnector) {
2873 conn_state = drm_atomic_get_connector_state(state, &aconnector->base);
2874 if (IS_ERR(conn_state))
2875 return ret;
2876 dm_state = to_dm_connector_state(conn_state);
2877 }
2878
2879 new_stream = create_stream_for_sink(aconnector, &crtc_state->mode, dm_state);
2880
2881 /*
2882 * we can have no stream on ACTION_SET if a display
2883 * was disconnected during S3, in this case it not and
2884 * error, the OS will be updated after detection, and
2885 * do the right thing on next atomic commit
2886 */
2887 if (!new_stream) {
2888 DRM_DEBUG_KMS("%s: Failed to create new stream for crtc %d\n",
2889 __func__, acrtc->base.base.id);
2890 break;
2891 }
2892
2893 new_streams[new_stream_count] = new_stream;
2894 set_count = update_in_val_sets_stream(
2895 set,
2896 crtc_set,
2897 set_count,
2898 acrtc->stream,
2899 new_stream,
2900 crtc);
2901
2902 new_stream_count++;
2903 need_to_validate = true;
2904 break;
2905 }
2906
2907 case DM_COMMIT_ACTION_NOTHING: {
2908 const struct drm_connector *drm_connector = NULL;
2909 struct drm_connector_state *conn_state = NULL;
2910 struct dm_connector_state *dm_state = NULL;
2911 struct dm_connector_state *old_dm_state = NULL;
2912 struct dc_stream *new_stream;
2913
2914 if (!aconnector)
2915 break;
2916
2917 for_each_connector_in_state(
2918 state, drm_connector, conn_state, j) {
2919 if (&aconnector->base == drm_connector)
2920 break;
2921 }
2922
2923 old_dm_state = to_dm_connector_state(drm_connector->state);
2924 dm_state = to_dm_connector_state(conn_state);
2925
2926 /* Support underscan adjustment*/
2927 if (!is_scaling_state_different(dm_state, old_dm_state))
2928 break;
2929
2930 new_stream = create_stream_for_sink(aconnector, &crtc_state->mode, dm_state);
2931
2932 if (!new_stream) {
2933 DRM_ERROR("%s: Failed to create new stream for crtc %d\n",
2934 __func__, acrtc->base.base.id);
2935 break;
2936 }
2937
2938 new_streams[new_stream_count] = new_stream;
2939 set_count = update_in_val_sets_stream(
2940 set,
2941 crtc_set,
2942 set_count,
2943 acrtc->stream,
2944 new_stream,
2945 crtc);
2946
2947 new_stream_count++;
2948 need_to_validate = true;
2949
2950 break;
2951 }
2952 case DM_COMMIT_ACTION_DPMS_OFF:
2953 case DM_COMMIT_ACTION_RESET:
2954 /* i.e. reset mode */
2955 if (acrtc->stream) {
2956 set_count = remove_from_val_sets(
2957 set,
2958 set_count,
2959 acrtc->stream);
2960 }
2961 break;
2962 }
2963
2964 /*
2965 * TODO revisit when removing commit action
2966 * and looking at atomic flags directly
2967 */
2968
2969 /* commit needs planes right now (for gamma, eg.) */
2970 /* TODO rework commit to chack crtc for gamma change */
2971 ret = drm_atomic_add_affected_planes(state, crtc);
2972 if (ret)
2973 return ret;
2974 }
2975
2976 for (i = 0; i < set_count; i++) {
2977 for_each_plane_in_state(state, plane, plane_state, j) {
2978 struct drm_plane_state *old_plane_state = plane->state;
2979 struct drm_crtc *crtc = plane_state->crtc;
2980 struct drm_framebuffer *fb = plane_state->fb;
2981 struct drm_connector *connector;
2982 struct dm_connector_state *dm_state = NULL;
2983 enum dm_commit_action action;
2984 struct drm_crtc_state *crtc_state;
2985
2986
2987 if (!fb || !crtc || crtc_set[i] != crtc ||
2988 !crtc->state->planes_changed || !crtc->state->active)
2989 continue;
2990
2991 action = get_dm_commit_action(crtc->state);
2992
2993 /* Surfaces are created under two scenarios:
2994 * 1. This commit is not a page flip.
2995 * 2. This commit is a page flip, and streams are created.
2996 */
2997 crtc_state = drm_atomic_get_crtc_state(state, crtc);
2998 if (!page_flip_needed(plane_state, old_plane_state,
2999 crtc_state->event, true) ||
3000 action == DM_COMMIT_ACTION_DPMS_ON ||
3001 action == DM_COMMIT_ACTION_SET) {
3002 struct dc_surface *surface;
3003
3004 list_for_each_entry(connector,
3005 &dev->mode_config.connector_list, head) {
3006 if (connector->state->crtc == crtc) {
3007 dm_state = to_dm_connector_state(
3008 connector->state);
3009 break;
3010 }
3011 }
3012
3013 /*
3014 * This situation happens in the following case:
3015 * we are about to get set mode for connector who's only
3016 * possible crtc (in encoder crtc mask) is used by
3017 * another connector, that is why it will try to
3018 * re-assing crtcs in order to make configuration
3019 * supported. For our implementation we need to make all
3020 * encoders support all crtcs, then this issue will
3021 * never arise again. But to guard code from this issue
3022 * check is left.
3023 *
3024 * Also it should be needed when used with actual
3025 * drm_atomic_commit ioctl in future
3026 */
3027 if (!dm_state)
3028 continue;
3029
3030 surface = dc_create_surface(dc);
3031 fill_plane_attributes(
3032 crtc->dev->dev_private,
3033 surface,
3034 plane_state,
3035 false);
3036
3037 add_val_sets_surface(
3038 set,
3039 set_count,
3040 set[i].stream,
3041 surface);
3042
3043 need_to_validate = true;
3044 }
3045 }
3046 }
3047
3048 if (need_to_validate == false || set_count == 0 ||
3049 dc_validate_resources(dc, set, set_count))
3050 ret = 0;
3051
3052 for (i = 0; i < set_count; i++) {
3053 for (j = 0; j < set[i].surface_count; j++) {
3054 dc_surface_release(set[i].surfaces[j]);
3055 }
3056 }
3057 for (i = 0; i < new_stream_count; i++)
3058 dc_stream_release(new_streams[i]);
3059
3060 if (ret != 0)
3061 DRM_ERROR("Atomic check failed.\n");
3062
3063 return ret;
3064 }
3065
3066 static bool is_dp_capable_without_timing_msa(
3067 struct dc *dc,
3068 struct amdgpu_connector *amdgpu_connector)
3069 {
3070 uint8_t dpcd_data;
3071 bool capable = false;
3072 if (amdgpu_connector->dc_link &&
3073 dc_read_dpcd(dc, amdgpu_connector->dc_link->link_index,
3074 DP_DOWN_STREAM_PORT_COUNT,
3075 &dpcd_data, sizeof(dpcd_data)) )
3076 capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
3077
3078 return capable;
3079 }
3080 void amdgpu_dm_add_sink_to_freesync_module(
3081 struct drm_connector *connector,
3082 struct edid *edid)
3083 {
3084 int i;
3085 uint64_t val_capable;
3086 bool edid_check_required;
3087 struct detailed_timing *timing;
3088 struct detailed_non_pixel *data;
3089 struct detailed_data_monitor_range *range;
3090 struct amdgpu_connector *amdgpu_connector =
3091 to_amdgpu_connector(connector);
3092
3093 struct drm_device *dev = connector->dev;
3094 struct amdgpu_device *adev = dev->dev_private;
3095 edid_check_required = false;
3096 if (!amdgpu_connector->dc_sink) {
3097 DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
3098 return;
3099 }
3100 if (!adev->dm.freesync_module)
3101 return;
3102 /*
3103 * if edid non zero restrict freesync only for dp and edp
3104 */
3105 if (edid) {
3106 if (amdgpu_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
3107 || amdgpu_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
3108 edid_check_required = is_dp_capable_without_timing_msa(
3109 adev->dm.dc,
3110 amdgpu_connector);
3111 }
3112 }
3113 val_capable = 0;
3114 if (edid_check_required == true && (edid->version > 1 ||
3115 (edid->version == 1 && edid->revision > 1))) {
3116 for (i = 0; i < 4; i++) {
3117
3118 timing = &edid->detailed_timings[i];
3119 data = &timing->data.other_data;
3120 range = &data->data.range;
3121 /*
3122 * Check if monitor has continuous frequency mode
3123 */
3124 if (data->type != EDID_DETAIL_MONITOR_RANGE)
3125 continue;
3126 /*
3127 * Check for flag range limits only. If flag == 1 then
3128 * no additional timing information provided.
3129 * Default GTF, GTF Secondary curve and CVT are not
3130 * supported
3131 */
3132 if (range->flags != 1)
3133 continue;
3134
3135 amdgpu_connector->min_vfreq = range->min_vfreq;
3136 amdgpu_connector->max_vfreq = range->max_vfreq;
3137 amdgpu_connector->pixel_clock_mhz =
3138 range->pixel_clock_mhz * 10;
3139 break;
3140 }
3141
3142 if (amdgpu_connector->max_vfreq -
3143 amdgpu_connector->min_vfreq > 10) {
3144 amdgpu_connector->caps.supported = true;
3145 amdgpu_connector->caps.min_refresh_in_micro_hz =
3146 amdgpu_connector->min_vfreq * 1000000;
3147 amdgpu_connector->caps.max_refresh_in_micro_hz =
3148 amdgpu_connector->max_vfreq * 1000000;
3149 val_capable = 1;
3150 }
3151 }
3152
3153 /*
3154 * TODO figure out how to notify user-mode or DRM of freesync caps
3155 * once we figure out how to deal with freesync in an upstreamable
3156 * fashion
3157 */
3158
3159 }
3160
3161 void amdgpu_dm_remove_sink_from_freesync_module(
3162 struct drm_connector *connector)
3163 {
3164 /*
3165 * TODO fill in once we figure out how to deal with freesync in
3166 * an upstreamable fashion
3167 */
3168 }