]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_types.c
drm/amd/display: Remove DCE12 guards
[mirror_ubuntu-bionic-kernel.git] / drivers / gpu / drm / amd / display / amdgpu_dm / amdgpu_dm_types.c
1 /*
2 * Copyright 2012-13 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26 #include <linux/types.h>
27 #include <linux/version.h>
28
29 #include <drm/drmP.h>
30 #include <drm/drm_atomic_helper.h>
31 #include <drm/drm_fb_helper.h>
32 #include <drm/drm_atomic.h>
33 #include <drm/drm_edid.h>
34
35 #include "amdgpu.h"
36 #include "amdgpu_pm.h"
37 #include "dm_services_types.h"
38
39 // We need to #undef FRAME_SIZE and DEPRECATED because they conflict
40 // with ptrace-abi.h's #define's of them.
41 #undef FRAME_SIZE
42 #undef DEPRECATED
43
44 #include "dc.h"
45
46 #include "amdgpu_dm_types.h"
47 #include "amdgpu_dm_mst_types.h"
48
49 #include "modules/inc/mod_freesync.h"
50
51 struct dm_connector_state {
52 struct drm_connector_state base;
53
54 enum amdgpu_rmx_type scaling;
55 uint8_t underscan_vborder;
56 uint8_t underscan_hborder;
57 bool underscan_enable;
58 };
59
60 #define to_dm_connector_state(x)\
61 container_of((x), struct dm_connector_state, base)
62
63
64 void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
65 {
66 drm_encoder_cleanup(encoder);
67 kfree(encoder);
68 }
69
70 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
71 .destroy = amdgpu_dm_encoder_destroy,
72 };
73
74 static void dm_set_cursor(
75 struct amdgpu_crtc *amdgpu_crtc,
76 uint64_t gpu_addr,
77 uint32_t width,
78 uint32_t height)
79 {
80 struct dc_cursor_attributes attributes;
81 struct dc_cursor_position position;
82 struct drm_crtc *crtc = &amdgpu_crtc->base;
83 int x, y;
84 int xorigin = 0, yorigin = 0;
85
86 amdgpu_crtc->cursor_width = width;
87 amdgpu_crtc->cursor_height = height;
88
89 attributes.address.high_part = upper_32_bits(gpu_addr);
90 attributes.address.low_part = lower_32_bits(gpu_addr);
91 attributes.width = width;
92 attributes.height = height;
93 attributes.x_hot = 0;
94 attributes.y_hot = 0;
95 attributes.color_format = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
96 attributes.rotation_angle = 0;
97 attributes.attribute_flags.value = 0;
98
99 x = amdgpu_crtc->cursor_x;
100 y = amdgpu_crtc->cursor_y;
101
102 /* avivo cursor are offset into the total surface */
103 x += crtc->primary->state->src_x >> 16;
104 y += crtc->primary->state->src_y >> 16;
105
106 if (x < 0) {
107 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
108 x = 0;
109 }
110 if (y < 0) {
111 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
112 y = 0;
113 }
114
115 position.enable = true;
116 position.x = x;
117 position.y = y;
118
119 position.hot_spot_enable = true;
120 position.x_hotspot = xorigin;
121 position.y_hotspot = yorigin;
122
123 if (!dc_stream_set_cursor_attributes(
124 amdgpu_crtc->stream,
125 &attributes)) {
126 DRM_ERROR("DC failed to set cursor attributes\n");
127 }
128
129 if (!dc_stream_set_cursor_position(
130 amdgpu_crtc->stream,
131 &position)) {
132 DRM_ERROR("DC failed to set cursor position\n");
133 }
134 }
135
136 static int dm_crtc_unpin_cursor_bo_old(
137 struct amdgpu_crtc *amdgpu_crtc)
138 {
139 struct amdgpu_bo *robj;
140 int ret = 0;
141
142 if (NULL != amdgpu_crtc && NULL != amdgpu_crtc->cursor_bo) {
143 robj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
144
145 ret = amdgpu_bo_reserve(robj, false);
146
147 if (likely(ret == 0)) {
148 ret = amdgpu_bo_unpin(robj);
149
150 if (unlikely(ret != 0)) {
151 DRM_ERROR(
152 "%s: unpin failed (ret=%d), bo %p\n",
153 __func__,
154 ret,
155 amdgpu_crtc->cursor_bo);
156 }
157
158 amdgpu_bo_unreserve(robj);
159 } else {
160 DRM_ERROR(
161 "%s: reserve failed (ret=%d), bo %p\n",
162 __func__,
163 ret,
164 amdgpu_crtc->cursor_bo);
165 }
166
167 drm_gem_object_unreference_unlocked(amdgpu_crtc->cursor_bo);
168 amdgpu_crtc->cursor_bo = NULL;
169 }
170
171 return ret;
172 }
173
174 static int dm_crtc_pin_cursor_bo_new(
175 struct drm_crtc *crtc,
176 struct drm_file *file_priv,
177 uint32_t handle,
178 struct amdgpu_bo **ret_obj)
179 {
180 struct amdgpu_crtc *amdgpu_crtc;
181 struct amdgpu_bo *robj;
182 struct drm_gem_object *obj;
183 int ret = -EINVAL;
184
185 if (NULL != crtc) {
186 struct drm_device *dev = crtc->dev;
187 struct amdgpu_device *adev = dev->dev_private;
188 uint64_t gpu_addr;
189
190 amdgpu_crtc = to_amdgpu_crtc(crtc);
191
192 obj = drm_gem_object_lookup(file_priv, handle);
193
194 if (!obj) {
195 DRM_ERROR(
196 "Cannot find cursor object %x for crtc %d\n",
197 handle,
198 amdgpu_crtc->crtc_id);
199 goto release;
200 }
201 robj = gem_to_amdgpu_bo(obj);
202
203 ret = amdgpu_bo_reserve(robj, false);
204
205 if (unlikely(ret != 0)) {
206 drm_gem_object_unreference_unlocked(obj);
207 DRM_ERROR("dm_crtc_pin_cursor_bo_new ret %x, handle %x\n",
208 ret, handle);
209 goto release;
210 }
211
212 ret = amdgpu_bo_pin_restricted(robj, AMDGPU_GEM_DOMAIN_VRAM, 0,
213 adev->mc.visible_vram_size,
214 &gpu_addr);
215
216 if (ret == 0) {
217 amdgpu_crtc->cursor_addr = gpu_addr;
218 *ret_obj = robj;
219 }
220 amdgpu_bo_unreserve(robj);
221 if (ret)
222 drm_gem_object_unreference_unlocked(obj);
223
224 }
225 release:
226
227 return ret;
228 }
229
230 static int dm_crtc_cursor_set(
231 struct drm_crtc *crtc,
232 struct drm_file *file_priv,
233 uint32_t handle,
234 uint32_t width,
235 uint32_t height)
236 {
237 struct amdgpu_bo *new_cursor_bo;
238 struct dc_cursor_position position;
239
240 int ret;
241
242 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
243
244 ret = EINVAL;
245 new_cursor_bo = NULL;
246
247 DRM_DEBUG_KMS(
248 "%s: crtc_id=%d with handle %d and size %d to %d, bo_object %p\n",
249 __func__,
250 amdgpu_crtc->crtc_id,
251 handle,
252 width,
253 height,
254 amdgpu_crtc->cursor_bo);
255
256 if (!handle) {
257 /* turn off cursor */
258 position.enable = false;
259 position.x = 0;
260 position.y = 0;
261 position.hot_spot_enable = false;
262
263 if (amdgpu_crtc->stream) {
264 /*set cursor visible false*/
265 dc_stream_set_cursor_position(
266 amdgpu_crtc->stream,
267 &position);
268 }
269 /*unpin old cursor buffer and update cache*/
270 ret = dm_crtc_unpin_cursor_bo_old(amdgpu_crtc);
271 goto release;
272
273 }
274
275 if ((width > amdgpu_crtc->max_cursor_width) ||
276 (height > amdgpu_crtc->max_cursor_height)) {
277 DRM_ERROR(
278 "%s: bad cursor width or height %d x %d\n",
279 __func__,
280 width,
281 height);
282 goto release;
283 }
284 /*try to pin new cursor bo*/
285 ret = dm_crtc_pin_cursor_bo_new(crtc, file_priv, handle, &new_cursor_bo);
286 /*if map not successful then return an error*/
287 if (ret)
288 goto release;
289
290 /*program new cursor bo to hardware*/
291 dm_set_cursor(amdgpu_crtc, amdgpu_crtc->cursor_addr, width, height);
292
293 /*un map old, not used anymore cursor bo ,
294 * return memory and mapping back */
295 dm_crtc_unpin_cursor_bo_old(amdgpu_crtc);
296
297 /*assign new cursor bo to our internal cache*/
298 amdgpu_crtc->cursor_bo = &new_cursor_bo->gem_base;
299
300 release:
301 return ret;
302
303 }
304
305 static int dm_crtc_cursor_move(struct drm_crtc *crtc,
306 int x, int y)
307 {
308 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
309 int xorigin = 0, yorigin = 0;
310 struct dc_cursor_position position;
311
312 amdgpu_crtc->cursor_x = x;
313 amdgpu_crtc->cursor_y = y;
314
315 /* avivo cursor are offset into the total surface */
316 x += crtc->primary->state->src_x >> 16;
317 y += crtc->primary->state->src_y >> 16;
318
319 /*
320 * TODO: for cursor debugging unguard the following
321 */
322 #if 0
323 DRM_DEBUG_KMS(
324 "%s: x %d y %d c->x %d c->y %d\n",
325 __func__,
326 x,
327 y,
328 crtc->x,
329 crtc->y);
330 #endif
331
332 if (x < 0) {
333 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
334 x = 0;
335 }
336 if (y < 0) {
337 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
338 y = 0;
339 }
340
341 position.enable = true;
342 position.x = x;
343 position.y = y;
344
345 position.hot_spot_enable = true;
346 position.x_hotspot = xorigin;
347 position.y_hotspot = yorigin;
348
349 if (amdgpu_crtc->stream) {
350 if (!dc_stream_set_cursor_position(
351 amdgpu_crtc->stream,
352 &position)) {
353 DRM_ERROR("DC failed to set cursor position\n");
354 return -EINVAL;
355 }
356 }
357
358 return 0;
359 }
360
361 static void dm_crtc_cursor_reset(struct drm_crtc *crtc)
362 {
363 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
364
365 DRM_DEBUG_KMS(
366 "%s: with cursor_bo %p\n",
367 __func__,
368 amdgpu_crtc->cursor_bo);
369
370 if (amdgpu_crtc->cursor_bo && amdgpu_crtc->stream) {
371 dm_set_cursor(
372 amdgpu_crtc,
373 amdgpu_crtc->cursor_addr,
374 amdgpu_crtc->cursor_width,
375 amdgpu_crtc->cursor_height);
376 }
377 }
378 static bool fill_rects_from_plane_state(
379 const struct drm_plane_state *state,
380 struct dc_surface *surface)
381 {
382 surface->src_rect.x = state->src_x >> 16;
383 surface->src_rect.y = state->src_y >> 16;
384 /*we ignore for now mantissa and do not to deal with floating pixels :(*/
385 surface->src_rect.width = state->src_w >> 16;
386
387 if (surface->src_rect.width == 0)
388 return false;
389
390 surface->src_rect.height = state->src_h >> 16;
391 if (surface->src_rect.height == 0)
392 return false;
393
394 surface->dst_rect.x = state->crtc_x;
395 surface->dst_rect.y = state->crtc_y;
396
397 if (state->crtc_w == 0)
398 return false;
399
400 surface->dst_rect.width = state->crtc_w;
401
402 if (state->crtc_h == 0)
403 return false;
404
405 surface->dst_rect.height = state->crtc_h;
406
407 surface->clip_rect = surface->dst_rect;
408
409 switch (state->rotation & DRM_MODE_ROTATE_MASK) {
410 case DRM_MODE_ROTATE_0:
411 surface->rotation = ROTATION_ANGLE_0;
412 break;
413 case DRM_MODE_ROTATE_90:
414 surface->rotation = ROTATION_ANGLE_90;
415 break;
416 case DRM_MODE_ROTATE_180:
417 surface->rotation = ROTATION_ANGLE_180;
418 break;
419 case DRM_MODE_ROTATE_270:
420 surface->rotation = ROTATION_ANGLE_270;
421 break;
422 default:
423 surface->rotation = ROTATION_ANGLE_0;
424 break;
425 }
426
427 return true;
428 }
429 static bool get_fb_info(
430 const struct amdgpu_framebuffer *amdgpu_fb,
431 uint64_t *tiling_flags,
432 uint64_t *fb_location)
433 {
434 struct amdgpu_bo *rbo = gem_to_amdgpu_bo(amdgpu_fb->obj);
435 int r = amdgpu_bo_reserve(rbo, false);
436 if (unlikely(r != 0)){
437 DRM_ERROR("Unable to reserve buffer\n");
438 return false;
439 }
440
441 if (fb_location)
442 *fb_location = amdgpu_bo_gpu_offset(rbo);
443
444 if (tiling_flags)
445 amdgpu_bo_get_tiling_flags(rbo, tiling_flags);
446
447 amdgpu_bo_unreserve(rbo);
448
449 return true;
450 }
451 static void fill_plane_attributes_from_fb(
452 struct amdgpu_device *adev,
453 struct dc_surface *surface,
454 const struct amdgpu_framebuffer *amdgpu_fb, bool addReq)
455 {
456 uint64_t tiling_flags;
457 uint64_t fb_location = 0;
458 const struct drm_framebuffer *fb = &amdgpu_fb->base;
459 struct drm_format_name_buf format_name;
460
461 get_fb_info(
462 amdgpu_fb,
463 &tiling_flags,
464 addReq == true ? &fb_location:NULL);
465
466 surface->address.type = PLN_ADDR_TYPE_GRAPHICS;
467 surface->address.grph.addr.low_part = lower_32_bits(fb_location);
468 surface->address.grph.addr.high_part = upper_32_bits(fb_location);
469
470 switch (fb->format->format) {
471 case DRM_FORMAT_C8:
472 surface->format = SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
473 break;
474 case DRM_FORMAT_RGB565:
475 surface->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
476 break;
477 case DRM_FORMAT_XRGB8888:
478 case DRM_FORMAT_ARGB8888:
479 surface->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
480 break;
481 case DRM_FORMAT_XRGB2101010:
482 case DRM_FORMAT_ARGB2101010:
483 surface->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
484 break;
485 case DRM_FORMAT_XBGR2101010:
486 case DRM_FORMAT_ABGR2101010:
487 surface->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
488 break;
489 default:
490 DRM_ERROR("Unsupported screen format %s\n",
491 drm_get_format_name(fb->format->format, &format_name));
492 return;
493 }
494
495 memset(&surface->tiling_info, 0, sizeof(surface->tiling_info));
496
497 /* Fill GFX params */
498 if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1)
499 {
500 unsigned bankw, bankh, mtaspect, tile_split, num_banks;
501
502 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
503 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
504 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
505 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
506 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
507
508 /* XXX fix me for VI */
509 surface->tiling_info.gfx8.num_banks = num_banks;
510 surface->tiling_info.gfx8.array_mode =
511 DC_ARRAY_2D_TILED_THIN1;
512 surface->tiling_info.gfx8.tile_split = tile_split;
513 surface->tiling_info.gfx8.bank_width = bankw;
514 surface->tiling_info.gfx8.bank_height = bankh;
515 surface->tiling_info.gfx8.tile_aspect = mtaspect;
516 surface->tiling_info.gfx8.tile_mode =
517 DC_ADDR_SURF_MICRO_TILING_DISPLAY;
518 } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
519 == DC_ARRAY_1D_TILED_THIN1) {
520 surface->tiling_info.gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
521 }
522
523 surface->tiling_info.gfx8.pipe_config =
524 AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
525
526 if (adev->asic_type == CHIP_VEGA10) {
527 /* Fill GFX9 params */
528 surface->tiling_info.gfx9.num_pipes =
529 adev->gfx.config.gb_addr_config_fields.num_pipes;
530 surface->tiling_info.gfx9.num_banks =
531 adev->gfx.config.gb_addr_config_fields.num_banks;
532 surface->tiling_info.gfx9.pipe_interleave =
533 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
534 surface->tiling_info.gfx9.num_shader_engines =
535 adev->gfx.config.gb_addr_config_fields.num_se;
536 surface->tiling_info.gfx9.max_compressed_frags =
537 adev->gfx.config.gb_addr_config_fields.max_compress_frags;
538 surface->tiling_info.gfx9.swizzle =
539 AMDGPU_TILING_GET(tiling_flags, SWIZZLE_MODE);
540 surface->tiling_info.gfx9.shaderEnable = 1;
541 }
542
543
544 surface->plane_size.grph.surface_size.x = 0;
545 surface->plane_size.grph.surface_size.y = 0;
546 surface->plane_size.grph.surface_size.width = fb->width;
547 surface->plane_size.grph.surface_size.height = fb->height;
548 surface->plane_size.grph.surface_pitch =
549 fb->pitches[0] / fb->format->cpp[0];
550
551 surface->visible = true;
552 surface->scaling_quality.h_taps_c = 0;
553 surface->scaling_quality.v_taps_c = 0;
554
555 /* TODO: unhardcode */
556 surface->color_space = COLOR_SPACE_SRGB;
557 /* is this needed? is surface zeroed at allocation? */
558 surface->scaling_quality.h_taps = 0;
559 surface->scaling_quality.v_taps = 0;
560 surface->stereo_format = PLANE_STEREO_FORMAT_NONE;
561
562 }
563
564 #define NUM_OF_RAW_GAMMA_RAMP_RGB_256 256
565
566 static void fill_gamma_from_crtc(
567 const struct drm_crtc *crtc,
568 struct dc_surface *dc_surface)
569 {
570 int i;
571 struct dc_gamma *gamma;
572 struct drm_crtc_state *state = crtc->state;
573 struct drm_color_lut *lut = (struct drm_color_lut *) state->gamma_lut->data;
574
575 gamma = dc_create_gamma();
576
577 if (gamma == NULL)
578 return;
579
580 for (i = 0; i < NUM_OF_RAW_GAMMA_RAMP_RGB_256; i++) {
581 gamma->red[i] = lut[i].red;
582 gamma->green[i] = lut[i].green;
583 gamma->blue[i] = lut[i].blue;
584 }
585
586 dc_surface->gamma_correction = gamma;
587 }
588
589 static void fill_plane_attributes(
590 struct amdgpu_device *adev,
591 struct dc_surface *surface,
592 struct drm_plane_state *state, bool addrReq)
593 {
594 const struct amdgpu_framebuffer *amdgpu_fb =
595 to_amdgpu_framebuffer(state->fb);
596 const struct drm_crtc *crtc = state->crtc;
597 struct dc_transfer_func *input_tf;
598
599 fill_rects_from_plane_state(state, surface);
600 fill_plane_attributes_from_fb(
601 crtc->dev->dev_private,
602 surface,
603 amdgpu_fb,
604 addrReq);
605
606 input_tf = dc_create_transfer_func();
607
608 if (input_tf == NULL)
609 return;
610
611 input_tf->type = TF_TYPE_PREDEFINED;
612 input_tf->tf = TRANSFER_FUNCTION_SRGB;
613
614 surface->in_transfer_func = input_tf;
615
616 /* In case of gamma set, update gamma value */
617 if (state->crtc->state->gamma_lut) {
618 fill_gamma_from_crtc(crtc, surface);
619 }
620 }
621
622 /*****************************************************************************/
623
624 struct amdgpu_connector *aconnector_from_drm_crtc_id(
625 const struct drm_crtc *crtc)
626 {
627 struct drm_device *dev = crtc->dev;
628 struct drm_connector *connector;
629 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
630 struct amdgpu_connector *aconnector;
631
632 list_for_each_entry(connector,
633 &dev->mode_config.connector_list, head) {
634
635 aconnector = to_amdgpu_connector(connector);
636
637 if (aconnector->base.state->crtc != &acrtc->base)
638 continue;
639
640 /* Found the connector */
641 return aconnector;
642 }
643
644 /* If we get here, not found. */
645 return NULL;
646 }
647
648 static void update_stream_scaling_settings(
649 const struct drm_display_mode *mode,
650 const struct dm_connector_state *dm_state,
651 const struct dc_stream *stream)
652 {
653 struct amdgpu_device *adev = dm_state->base.crtc->dev->dev_private;
654 enum amdgpu_rmx_type rmx_type;
655
656 struct rect src = { 0 }; /* viewport in composition space*/
657 struct rect dst = { 0 }; /* stream addressable area */
658
659 /* no mode. nothing to be done */
660 if (!mode)
661 return;
662
663 /* Full screen scaling by default */
664 src.width = mode->hdisplay;
665 src.height = mode->vdisplay;
666 dst.width = stream->timing.h_addressable;
667 dst.height = stream->timing.v_addressable;
668
669 rmx_type = dm_state->scaling;
670 if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
671 if (src.width * dst.height <
672 src.height * dst.width) {
673 /* height needs less upscaling/more downscaling */
674 dst.width = src.width *
675 dst.height / src.height;
676 } else {
677 /* width needs less upscaling/more downscaling */
678 dst.height = src.height *
679 dst.width / src.width;
680 }
681 } else if (rmx_type == RMX_CENTER) {
682 dst = src;
683 }
684
685 dst.x = (stream->timing.h_addressable - dst.width) / 2;
686 dst.y = (stream->timing.v_addressable - dst.height) / 2;
687
688 if (dm_state->underscan_enable) {
689 dst.x += dm_state->underscan_hborder / 2;
690 dst.y += dm_state->underscan_vborder / 2;
691 dst.width -= dm_state->underscan_hborder;
692 dst.height -= dm_state->underscan_vborder;
693 }
694
695 adev->dm.dc->stream_funcs.stream_update_scaling(adev->dm.dc, stream, &src, &dst);
696
697 DRM_DEBUG_KMS("Destination Rectangle x:%d y:%d width:%d height:%d\n",
698 dst.x, dst.y, dst.width, dst.height);
699
700 }
701
702 static void dm_dc_surface_commit(
703 struct dc *dc,
704 struct drm_crtc *crtc)
705 {
706 struct dc_surface *dc_surface;
707 const struct dc_surface *dc_surfaces[1];
708 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
709 const struct dc_stream *dc_stream = acrtc->stream;
710 unsigned long flags;
711
712 spin_lock_irqsave(&crtc->dev->event_lock, flags);
713 if (acrtc->pflip_status != AMDGPU_FLIP_NONE) {
714 DRM_ERROR("dm_dc_surface_commit: acrtc %d, already busy\n", acrtc->crtc_id);
715 spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
716 /* In comit tail framework this cannot happen */
717 BUG_ON(0);
718 }
719 spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
720
721 if (!dc_stream) {
722 dm_error(
723 "%s: Failed to obtain stream on crtc (%d)!\n",
724 __func__,
725 acrtc->crtc_id);
726 goto fail;
727 }
728
729 dc_surface = dc_create_surface(dc);
730
731 if (!dc_surface) {
732 dm_error(
733 "%s: Failed to create a surface!\n",
734 __func__);
735 goto fail;
736 }
737
738 /* Surface programming */
739 fill_plane_attributes(
740 crtc->dev->dev_private,
741 dc_surface,
742 crtc->primary->state,
743 true);
744
745 dc_surfaces[0] = dc_surface;
746
747 if (false == dc_commit_surfaces_to_stream(
748 dc,
749 dc_surfaces,
750 1,
751 dc_stream)) {
752 dm_error(
753 "%s: Failed to attach surface!\n",
754 __func__);
755 }
756
757 dc_surface_release(dc_surface);
758 fail:
759 return;
760 }
761
762 static enum dc_color_depth convert_color_depth_from_display_info(
763 const struct drm_connector *connector)
764 {
765 uint32_t bpc = connector->display_info.bpc;
766
767 /* Limited color depth to 8bit
768 * TODO: Still need to handle deep color*/
769 if (bpc > 8)
770 bpc = 8;
771
772 switch (bpc) {
773 case 0:
774 /* Temporary Work around, DRM don't parse color depth for
775 * EDID revision before 1.4
776 * TODO: Fix edid parsing
777 */
778 return COLOR_DEPTH_888;
779 case 6:
780 return COLOR_DEPTH_666;
781 case 8:
782 return COLOR_DEPTH_888;
783 case 10:
784 return COLOR_DEPTH_101010;
785 case 12:
786 return COLOR_DEPTH_121212;
787 case 14:
788 return COLOR_DEPTH_141414;
789 case 16:
790 return COLOR_DEPTH_161616;
791 default:
792 return COLOR_DEPTH_UNDEFINED;
793 }
794 }
795
796 static enum dc_aspect_ratio get_aspect_ratio(
797 const struct drm_display_mode *mode_in)
798 {
799 int32_t width = mode_in->crtc_hdisplay * 9;
800 int32_t height = mode_in->crtc_vdisplay * 16;
801 if ((width - height) < 10 && (width - height) > -10)
802 return ASPECT_RATIO_16_9;
803 else
804 return ASPECT_RATIO_4_3;
805 }
806
807 static enum dc_color_space get_output_color_space(
808 const struct dc_crtc_timing *dc_crtc_timing)
809 {
810 enum dc_color_space color_space = COLOR_SPACE_SRGB;
811
812 switch (dc_crtc_timing->pixel_encoding) {
813 case PIXEL_ENCODING_YCBCR422:
814 case PIXEL_ENCODING_YCBCR444:
815 case PIXEL_ENCODING_YCBCR420:
816 {
817 /*
818 * 27030khz is the separation point between HDTV and SDTV
819 * according to HDMI spec, we use YCbCr709 and YCbCr601
820 * respectively
821 */
822 if (dc_crtc_timing->pix_clk_khz > 27030) {
823 if (dc_crtc_timing->flags.Y_ONLY)
824 color_space =
825 COLOR_SPACE_YCBCR709_LIMITED;
826 else
827 color_space = COLOR_SPACE_YCBCR709;
828 } else {
829 if (dc_crtc_timing->flags.Y_ONLY)
830 color_space =
831 COLOR_SPACE_YCBCR601_LIMITED;
832 else
833 color_space = COLOR_SPACE_YCBCR601;
834 }
835
836 }
837 break;
838 case PIXEL_ENCODING_RGB:
839 color_space = COLOR_SPACE_SRGB;
840 break;
841
842 default:
843 WARN_ON(1);
844 break;
845 }
846
847 return color_space;
848 }
849
850 /*****************************************************************************/
851
852 static void fill_stream_properties_from_drm_display_mode(
853 struct dc_stream *stream,
854 const struct drm_display_mode *mode_in,
855 const struct drm_connector *connector)
856 {
857 struct dc_crtc_timing *timing_out = &stream->timing;
858 memset(timing_out, 0, sizeof(struct dc_crtc_timing));
859
860 timing_out->h_border_left = 0;
861 timing_out->h_border_right = 0;
862 timing_out->v_border_top = 0;
863 timing_out->v_border_bottom = 0;
864 /* TODO: un-hardcode */
865
866 if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
867 && stream->sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A)
868 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
869 else
870 timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
871
872 timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
873 timing_out->display_color_depth = convert_color_depth_from_display_info(
874 connector);
875 timing_out->scan_type = SCANNING_TYPE_NODATA;
876 timing_out->hdmi_vic = 0;
877 timing_out->vic = drm_match_cea_mode(mode_in);
878
879 timing_out->h_addressable = mode_in->crtc_hdisplay;
880 timing_out->h_total = mode_in->crtc_htotal;
881 timing_out->h_sync_width =
882 mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
883 timing_out->h_front_porch =
884 mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
885 timing_out->v_total = mode_in->crtc_vtotal;
886 timing_out->v_addressable = mode_in->crtc_vdisplay;
887 timing_out->v_front_porch =
888 mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
889 timing_out->v_sync_width =
890 mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
891 timing_out->pix_clk_khz = mode_in->crtc_clock;
892 timing_out->aspect_ratio = get_aspect_ratio(mode_in);
893 if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
894 timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
895 if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
896 timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
897
898 stream->output_color_space = get_output_color_space(timing_out);
899
900 {
901 struct dc_transfer_func *tf = dc_create_transfer_func();
902 tf->type = TF_TYPE_PREDEFINED;
903 tf->tf = TRANSFER_FUNCTION_SRGB;
904 stream->out_transfer_func = tf;
905 }
906 }
907
908 static void fill_audio_info(
909 struct audio_info *audio_info,
910 const struct drm_connector *drm_connector,
911 const struct dc_sink *dc_sink)
912 {
913 int i = 0;
914 int cea_revision = 0;
915 const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
916
917 audio_info->manufacture_id = edid_caps->manufacturer_id;
918 audio_info->product_id = edid_caps->product_id;
919
920 cea_revision = drm_connector->display_info.cea_rev;
921
922 while (i < AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS &&
923 edid_caps->display_name[i]) {
924 audio_info->display_name[i] = edid_caps->display_name[i];
925 i++;
926 }
927
928 if(cea_revision >= 3) {
929 audio_info->mode_count = edid_caps->audio_mode_count;
930
931 for (i = 0; i < audio_info->mode_count; ++i) {
932 audio_info->modes[i].format_code =
933 (enum audio_format_code)
934 (edid_caps->audio_modes[i].format_code);
935 audio_info->modes[i].channel_count =
936 edid_caps->audio_modes[i].channel_count;
937 audio_info->modes[i].sample_rates.all =
938 edid_caps->audio_modes[i].sample_rate;
939 audio_info->modes[i].sample_size =
940 edid_caps->audio_modes[i].sample_size;
941 }
942 }
943
944 audio_info->flags.all = edid_caps->speaker_flags;
945
946 /* TODO: We only check for the progressive mode, check for interlace mode too */
947 if(drm_connector->latency_present[0]) {
948 audio_info->video_latency = drm_connector->video_latency[0];
949 audio_info->audio_latency = drm_connector->audio_latency[0];
950 }
951
952 /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
953
954 }
955
956 static void copy_crtc_timing_for_drm_display_mode(
957 const struct drm_display_mode *src_mode,
958 struct drm_display_mode *dst_mode)
959 {
960 dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
961 dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
962 dst_mode->crtc_clock = src_mode->crtc_clock;
963 dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
964 dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
965 dst_mode->crtc_hsync_start= src_mode->crtc_hsync_start;
966 dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
967 dst_mode->crtc_htotal = src_mode->crtc_htotal;
968 dst_mode->crtc_hskew = src_mode->crtc_hskew;
969 dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
970 dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
971 dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
972 dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
973 dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
974 }
975
976 static void decide_crtc_timing_for_drm_display_mode(
977 struct drm_display_mode *drm_mode,
978 const struct drm_display_mode *native_mode,
979 bool scale_enabled)
980 {
981 if (scale_enabled) {
982 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
983 } else if (native_mode->clock == drm_mode->clock &&
984 native_mode->htotal == drm_mode->htotal &&
985 native_mode->vtotal == drm_mode->vtotal) {
986 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
987 } else {
988 /* no scaling nor amdgpu inserted, no need to patch */
989 }
990 }
991
992 static struct dc_stream *create_stream_for_sink(
993 struct amdgpu_connector *aconnector,
994 const struct drm_display_mode *drm_mode,
995 const struct dm_connector_state *dm_state)
996 {
997 struct drm_display_mode *preferred_mode = NULL;
998 const struct drm_connector *drm_connector;
999 struct dc_stream *stream = NULL;
1000 struct drm_display_mode mode = *drm_mode;
1001 bool native_mode_found = false;
1002
1003 if (NULL == aconnector) {
1004 DRM_ERROR("aconnector is NULL!\n");
1005 goto drm_connector_null;
1006 }
1007
1008 if (NULL == dm_state) {
1009 DRM_ERROR("dm_state is NULL!\n");
1010 goto dm_state_null;
1011 }
1012
1013 drm_connector = &aconnector->base;
1014 stream = dc_create_stream_for_sink(aconnector->dc_sink);
1015
1016 if (NULL == stream) {
1017 DRM_ERROR("Failed to create stream for sink!\n");
1018 goto stream_create_fail;
1019 }
1020
1021 list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
1022 /* Search for preferred mode */
1023 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
1024 native_mode_found = true;
1025 break;
1026 }
1027 }
1028 if (!native_mode_found)
1029 preferred_mode = list_first_entry_or_null(
1030 &aconnector->base.modes,
1031 struct drm_display_mode,
1032 head);
1033
1034 if (NULL == preferred_mode) {
1035 /* This may not be an error, the use case is when we we have no
1036 * usermode calls to reset and set mode upon hotplug. In this
1037 * case, we call set mode ourselves to restore the previous mode
1038 * and the modelist may not be filled in in time.
1039 */
1040 DRM_INFO("No preferred mode found\n");
1041 } else {
1042 decide_crtc_timing_for_drm_display_mode(
1043 &mode, preferred_mode,
1044 dm_state->scaling != RMX_OFF);
1045 }
1046
1047 fill_stream_properties_from_drm_display_mode(stream,
1048 &mode, &aconnector->base);
1049 update_stream_scaling_settings(&mode, dm_state, stream);
1050
1051 fill_audio_info(
1052 &stream->audio_info,
1053 drm_connector,
1054 aconnector->dc_sink);
1055
1056 stream_create_fail:
1057 dm_state_null:
1058 drm_connector_null:
1059 return stream;
1060 }
1061
1062 void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
1063 {
1064 drm_crtc_cleanup(crtc);
1065 kfree(crtc);
1066 }
1067
1068 /* Implemented only the options currently availible for the driver */
1069 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
1070 .reset = drm_atomic_helper_crtc_reset,
1071 .cursor_set = dm_crtc_cursor_set,
1072 .cursor_move = dm_crtc_cursor_move,
1073 .destroy = amdgpu_dm_crtc_destroy,
1074 .gamma_set = drm_atomic_helper_legacy_gamma_set,
1075 .set_config = drm_atomic_helper_set_config,
1076 .page_flip = drm_atomic_helper_page_flip,
1077 .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
1078 .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
1079 };
1080
1081 static enum drm_connector_status
1082 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
1083 {
1084 bool connected;
1085 struct amdgpu_connector *aconnector = to_amdgpu_connector(connector);
1086
1087 /* Notes:
1088 * 1. This interface is NOT called in context of HPD irq.
1089 * 2. This interface *is called* in context of user-mode ioctl. Which
1090 * makes it a bad place for *any* MST-related activit. */
1091
1092 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
1093 connected = (aconnector->dc_sink != NULL);
1094 else
1095 connected = (aconnector->base.force == DRM_FORCE_ON);
1096
1097 return (connected ? connector_status_connected :
1098 connector_status_disconnected);
1099 }
1100
1101 int amdgpu_dm_connector_atomic_set_property(
1102 struct drm_connector *connector,
1103 struct drm_connector_state *connector_state,
1104 struct drm_property *property,
1105 uint64_t val)
1106 {
1107 struct drm_device *dev = connector->dev;
1108 struct amdgpu_device *adev = dev->dev_private;
1109 struct dm_connector_state *dm_old_state =
1110 to_dm_connector_state(connector->state);
1111 struct dm_connector_state *dm_new_state =
1112 to_dm_connector_state(connector_state);
1113
1114 struct drm_crtc_state *new_crtc_state;
1115 struct drm_crtc *crtc;
1116 int i;
1117 int ret = -EINVAL;
1118
1119 if (property == dev->mode_config.scaling_mode_property) {
1120 enum amdgpu_rmx_type rmx_type;
1121
1122 switch (val) {
1123 case DRM_MODE_SCALE_CENTER:
1124 rmx_type = RMX_CENTER;
1125 break;
1126 case DRM_MODE_SCALE_ASPECT:
1127 rmx_type = RMX_ASPECT;
1128 break;
1129 case DRM_MODE_SCALE_FULLSCREEN:
1130 rmx_type = RMX_FULL;
1131 break;
1132 case DRM_MODE_SCALE_NONE:
1133 default:
1134 rmx_type = RMX_OFF;
1135 break;
1136 }
1137
1138 if (dm_old_state->scaling == rmx_type)
1139 return 0;
1140
1141 dm_new_state->scaling = rmx_type;
1142 ret = 0;
1143 } else if (property == adev->mode_info.underscan_hborder_property) {
1144 dm_new_state->underscan_hborder = val;
1145 ret = 0;
1146 } else if (property == adev->mode_info.underscan_vborder_property) {
1147 dm_new_state->underscan_vborder = val;
1148 ret = 0;
1149 } else if (property == adev->mode_info.underscan_property) {
1150 dm_new_state->underscan_enable = val;
1151 ret = 0;
1152 }
1153
1154 for_each_crtc_in_state(
1155 connector_state->state,
1156 crtc,
1157 new_crtc_state,
1158 i) {
1159
1160 if (crtc == connector_state->crtc) {
1161 struct drm_plane_state *plane_state;
1162
1163 /*
1164 * Bit of magic done here. We need to ensure
1165 * that planes get update after mode is set.
1166 * So, we need to add primary plane to state,
1167 * and this way atomic_update would be called
1168 * for it
1169 */
1170 plane_state =
1171 drm_atomic_get_plane_state(
1172 connector_state->state,
1173 crtc->primary);
1174
1175 if (!plane_state)
1176 return -EINVAL;
1177 }
1178 }
1179
1180 return ret;
1181 }
1182
1183 void amdgpu_dm_connector_destroy(struct drm_connector *connector)
1184 {
1185 struct amdgpu_connector *aconnector = to_amdgpu_connector(connector);
1186 const struct dc_link *link = aconnector->dc_link;
1187 struct amdgpu_device *adev = connector->dev->dev_private;
1188 struct amdgpu_display_manager *dm = &adev->dm;
1189 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
1190 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
1191
1192 if (link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) {
1193 amdgpu_dm_register_backlight_device(dm);
1194
1195 if (dm->backlight_dev) {
1196 backlight_device_unregister(dm->backlight_dev);
1197 dm->backlight_dev = NULL;
1198 }
1199
1200 }
1201 #endif
1202 drm_connector_unregister(connector);
1203 drm_connector_cleanup(connector);
1204 kfree(connector);
1205 }
1206
1207 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
1208 {
1209 struct dm_connector_state *state =
1210 to_dm_connector_state(connector->state);
1211
1212 kfree(state);
1213
1214 state = kzalloc(sizeof(*state), GFP_KERNEL);
1215
1216 if (state) {
1217 state->scaling = RMX_OFF;
1218 state->underscan_enable = false;
1219 state->underscan_hborder = 0;
1220 state->underscan_vborder = 0;
1221
1222 connector->state = &state->base;
1223 connector->state->connector = connector;
1224 }
1225 }
1226
1227 struct drm_connector_state *amdgpu_dm_connector_atomic_duplicate_state(
1228 struct drm_connector *connector)
1229 {
1230 struct dm_connector_state *state =
1231 to_dm_connector_state(connector->state);
1232
1233 struct dm_connector_state *new_state =
1234 kmemdup(state, sizeof(*state), GFP_KERNEL);
1235
1236 if (new_state) {
1237 __drm_atomic_helper_connector_duplicate_state(connector,
1238 &new_state->base);
1239 return &new_state->base;
1240 }
1241
1242 return NULL;
1243 }
1244
1245 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
1246 .reset = amdgpu_dm_connector_funcs_reset,
1247 .detect = amdgpu_dm_connector_detect,
1248 .fill_modes = drm_helper_probe_single_connector_modes,
1249 .destroy = amdgpu_dm_connector_destroy,
1250 .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
1251 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
1252 .atomic_set_property = amdgpu_dm_connector_atomic_set_property
1253 };
1254
1255 static struct drm_encoder *best_encoder(struct drm_connector *connector)
1256 {
1257 int enc_id = connector->encoder_ids[0];
1258 struct drm_mode_object *obj;
1259 struct drm_encoder *encoder;
1260
1261 DRM_DEBUG_KMS("Finding the best encoder\n");
1262
1263 /* pick the encoder ids */
1264 if (enc_id) {
1265 obj = drm_mode_object_find(connector->dev, enc_id, DRM_MODE_OBJECT_ENCODER);
1266 if (!obj) {
1267 DRM_ERROR("Couldn't find a matching encoder for our connector\n");
1268 return NULL;
1269 }
1270 encoder = obj_to_encoder(obj);
1271 return encoder;
1272 }
1273 DRM_ERROR("No encoder id\n");
1274 return NULL;
1275 }
1276
1277 static int get_modes(struct drm_connector *connector)
1278 {
1279 return amdgpu_dm_connector_get_modes(connector);
1280 }
1281
1282 static void create_eml_sink(struct amdgpu_connector *aconnector)
1283 {
1284 struct dc_sink_init_data init_params = {
1285 .link = aconnector->dc_link,
1286 .sink_signal = SIGNAL_TYPE_VIRTUAL
1287 };
1288 struct edid *edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
1289
1290 if (!aconnector->base.edid_blob_ptr ||
1291 !aconnector->base.edid_blob_ptr->data) {
1292 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
1293 aconnector->base.name);
1294
1295 aconnector->base.force = DRM_FORCE_OFF;
1296 aconnector->base.override_edid = false;
1297 return;
1298 }
1299
1300 aconnector->edid = edid;
1301
1302 aconnector->dc_em_sink = dc_link_add_remote_sink(
1303 aconnector->dc_link,
1304 (uint8_t *)edid,
1305 (edid->extensions + 1) * EDID_LENGTH,
1306 &init_params);
1307
1308 if (aconnector->base.force
1309 == DRM_FORCE_ON)
1310 aconnector->dc_sink = aconnector->dc_link->local_sink ?
1311 aconnector->dc_link->local_sink :
1312 aconnector->dc_em_sink;
1313 }
1314
1315 static void handle_edid_mgmt(struct amdgpu_connector *aconnector)
1316 {
1317 struct dc_link *link = (struct dc_link *)aconnector->dc_link;
1318
1319 /* In case of headless boot with force on for DP managed connector
1320 * Those settings have to be != 0 to get initial modeset
1321 */
1322 if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
1323 link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
1324 link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
1325 }
1326
1327
1328 aconnector->base.override_edid = true;
1329 create_eml_sink(aconnector);
1330 }
1331
1332 int amdgpu_dm_connector_mode_valid(
1333 struct drm_connector *connector,
1334 struct drm_display_mode *mode)
1335 {
1336 int result = MODE_ERROR;
1337 const struct dc_sink *dc_sink;
1338 struct amdgpu_device *adev = connector->dev->dev_private;
1339 struct dc_validation_set val_set = { 0 };
1340 /* TODO: Unhardcode stream count */
1341 struct dc_stream *stream;
1342 struct amdgpu_connector *aconnector = to_amdgpu_connector(connector);
1343
1344 if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
1345 (mode->flags & DRM_MODE_FLAG_DBLSCAN))
1346 return result;
1347
1348 /* Only run this the first time mode_valid is called to initilialize
1349 * EDID mgmt
1350 */
1351 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
1352 !aconnector->dc_em_sink)
1353 handle_edid_mgmt(aconnector);
1354
1355 dc_sink = to_amdgpu_connector(connector)->dc_sink;
1356
1357 if (NULL == dc_sink) {
1358 DRM_ERROR("dc_sink is NULL!\n");
1359 goto null_sink;
1360 }
1361
1362 stream = dc_create_stream_for_sink(dc_sink);
1363 if (NULL == stream) {
1364 DRM_ERROR("Failed to create stream for sink!\n");
1365 goto stream_create_fail;
1366 }
1367
1368 drm_mode_set_crtcinfo(mode, 0);
1369 fill_stream_properties_from_drm_display_mode(stream, mode, connector);
1370
1371 val_set.stream = stream;
1372 val_set.surface_count = 0;
1373 stream->src.width = mode->hdisplay;
1374 stream->src.height = mode->vdisplay;
1375 stream->dst = stream->src;
1376
1377 if (dc_validate_resources(adev->dm.dc, &val_set, 1))
1378 result = MODE_OK;
1379
1380 dc_stream_release(stream);
1381
1382 stream_create_fail:
1383 null_sink:
1384 /* TODO: error handling*/
1385 return result;
1386 }
1387
1388 static const struct drm_connector_helper_funcs
1389 amdgpu_dm_connector_helper_funcs = {
1390 /*
1391 * If hotplug a second bigger display in FB Con mode, bigger resolution
1392 * modes will be filtered by drm_mode_validate_size(), and those modes
1393 * is missing after user start lightdm. So we need to renew modes list.
1394 * in get_modes call back, not just return the modes count
1395 */
1396 .get_modes = get_modes,
1397 .mode_valid = amdgpu_dm_connector_mode_valid,
1398 .best_encoder = best_encoder
1399 };
1400
1401 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
1402 {
1403 }
1404
1405 static int dm_crtc_helper_atomic_check(
1406 struct drm_crtc *crtc,
1407 struct drm_crtc_state *state)
1408 {
1409 return 0;
1410 }
1411
1412 static bool dm_crtc_helper_mode_fixup(
1413 struct drm_crtc *crtc,
1414 const struct drm_display_mode *mode,
1415 struct drm_display_mode *adjusted_mode)
1416 {
1417 return true;
1418 }
1419
1420 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
1421 .disable = dm_crtc_helper_disable,
1422 .atomic_check = dm_crtc_helper_atomic_check,
1423 .mode_fixup = dm_crtc_helper_mode_fixup
1424 };
1425
1426 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
1427 {
1428
1429 }
1430
1431 static int dm_encoder_helper_atomic_check(
1432 struct drm_encoder *encoder,
1433 struct drm_crtc_state *crtc_state,
1434 struct drm_connector_state *conn_state)
1435 {
1436 return 0;
1437 }
1438
1439 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
1440 .disable = dm_encoder_helper_disable,
1441 .atomic_check = dm_encoder_helper_atomic_check
1442 };
1443
1444 static const struct drm_plane_funcs dm_plane_funcs = {
1445 .reset = drm_atomic_helper_plane_reset,
1446 .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
1447 .atomic_destroy_state = drm_atomic_helper_plane_destroy_state
1448 };
1449
1450 static int dm_plane_helper_prepare_fb(
1451 struct drm_plane *plane,
1452 struct drm_plane_state *new_state)
1453 {
1454 struct amdgpu_framebuffer *afb;
1455 struct drm_gem_object *obj;
1456 struct amdgpu_bo *rbo;
1457 int r;
1458
1459 if (!new_state->fb) {
1460 DRM_DEBUG_KMS("No FB bound\n");
1461 return 0;
1462 }
1463
1464 afb = to_amdgpu_framebuffer(new_state->fb);
1465
1466 obj = afb->obj;
1467 rbo = gem_to_amdgpu_bo(obj);
1468 r = amdgpu_bo_reserve(rbo, false);
1469 if (unlikely(r != 0))
1470 return r;
1471
1472 r = amdgpu_bo_pin(rbo, AMDGPU_GEM_DOMAIN_VRAM, &afb->address);
1473
1474 amdgpu_bo_unreserve(rbo);
1475
1476 if (unlikely(r != 0)) {
1477 DRM_ERROR("Failed to pin framebuffer\n");
1478 return r;
1479 }
1480
1481 amdgpu_bo_ref(rbo);
1482 return 0;
1483 }
1484
1485 static void dm_plane_helper_cleanup_fb(
1486 struct drm_plane *plane,
1487 struct drm_plane_state *old_state)
1488 {
1489 struct amdgpu_bo *rbo;
1490 struct amdgpu_framebuffer *afb;
1491 int r;
1492
1493 if (!old_state->fb)
1494 return;
1495
1496 afb = to_amdgpu_framebuffer(old_state->fb);
1497 rbo = gem_to_amdgpu_bo(afb->obj);
1498 r = amdgpu_bo_reserve(rbo, false);
1499 if (unlikely(r)) {
1500 DRM_ERROR("failed to reserve rbo before unpin\n");
1501 return;
1502 } else {
1503 amdgpu_bo_unpin(rbo);
1504 amdgpu_bo_unreserve(rbo);
1505 amdgpu_bo_unref(&rbo);
1506 }
1507
1508 afb->address = 0;
1509 }
1510
1511 int dm_create_validation_set_for_connector(struct drm_connector *connector,
1512 struct drm_display_mode *mode, struct dc_validation_set *val_set)
1513 {
1514 int result = MODE_ERROR;
1515 const struct dc_sink *dc_sink =
1516 to_amdgpu_connector(connector)->dc_sink;
1517 /* TODO: Unhardcode stream count */
1518 struct dc_stream *stream;
1519
1520 if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
1521 (mode->flags & DRM_MODE_FLAG_DBLSCAN))
1522 return result;
1523
1524 if (NULL == dc_sink) {
1525 DRM_ERROR("dc_sink is NULL!\n");
1526 return result;
1527 }
1528
1529 stream = dc_create_stream_for_sink(dc_sink);
1530
1531 if (NULL == stream) {
1532 DRM_ERROR("Failed to create stream for sink!\n");
1533 return result;
1534 }
1535
1536 drm_mode_set_crtcinfo(mode, 0);
1537
1538 fill_stream_properties_from_drm_display_mode(stream, mode, connector);
1539
1540 val_set->stream = stream;
1541
1542 stream->src.width = mode->hdisplay;
1543 stream->src.height = mode->vdisplay;
1544 stream->dst = stream->src;
1545
1546 return MODE_OK;
1547 }
1548
1549 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
1550 .prepare_fb = dm_plane_helper_prepare_fb,
1551 .cleanup_fb = dm_plane_helper_cleanup_fb,
1552 };
1553
1554 /*
1555 * TODO: these are currently initialized to rgb formats only.
1556 * For future use cases we should either initialize them dynamically based on
1557 * plane capabilities, or initialize this array to all formats, so internal drm
1558 * check will succeed, and let DC to implement proper check
1559 */
1560 static uint32_t rgb_formats[] = {
1561 DRM_FORMAT_XRGB4444,
1562 DRM_FORMAT_ARGB4444,
1563 DRM_FORMAT_RGBA4444,
1564 DRM_FORMAT_ARGB1555,
1565 DRM_FORMAT_RGB565,
1566 DRM_FORMAT_RGB888,
1567 DRM_FORMAT_XRGB8888,
1568 DRM_FORMAT_ARGB8888,
1569 DRM_FORMAT_RGBA8888,
1570 DRM_FORMAT_XRGB2101010,
1571 DRM_FORMAT_XBGR2101010,
1572 DRM_FORMAT_ARGB2101010,
1573 DRM_FORMAT_ABGR2101010,
1574 };
1575
1576 int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
1577 struct amdgpu_crtc *acrtc,
1578 uint32_t crtc_index)
1579 {
1580 int res = -ENOMEM;
1581
1582 struct drm_plane *primary_plane =
1583 kzalloc(sizeof(*primary_plane), GFP_KERNEL);
1584
1585 if (!primary_plane)
1586 goto fail_plane;
1587
1588 primary_plane->format_default = true;
1589
1590 res = drm_universal_plane_init(
1591 dm->adev->ddev,
1592 primary_plane,
1593 0,
1594 &dm_plane_funcs,
1595 rgb_formats,
1596 ARRAY_SIZE(rgb_formats),
1597 NULL,
1598 DRM_PLANE_TYPE_PRIMARY, NULL);
1599
1600 primary_plane->crtc = &acrtc->base;
1601
1602 drm_plane_helper_add(primary_plane, &dm_plane_helper_funcs);
1603
1604 res = drm_crtc_init_with_planes(
1605 dm->ddev,
1606 &acrtc->base,
1607 primary_plane,
1608 NULL,
1609 &amdgpu_dm_crtc_funcs, NULL);
1610
1611 if (res)
1612 goto fail;
1613
1614 drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
1615
1616 acrtc->max_cursor_width = 128;
1617 acrtc->max_cursor_height = 128;
1618
1619 acrtc->crtc_id = crtc_index;
1620 acrtc->base.enabled = false;
1621
1622 dm->adev->mode_info.crtcs[crtc_index] = acrtc;
1623 drm_mode_crtc_set_gamma_size(&acrtc->base, 256);
1624
1625 return 0;
1626 fail:
1627 kfree(primary_plane);
1628 fail_plane:
1629 acrtc->crtc_id = -1;
1630 return res;
1631 }
1632
1633 static int to_drm_connector_type(enum signal_type st)
1634 {
1635 switch (st) {
1636 case SIGNAL_TYPE_HDMI_TYPE_A:
1637 return DRM_MODE_CONNECTOR_HDMIA;
1638 case SIGNAL_TYPE_EDP:
1639 return DRM_MODE_CONNECTOR_eDP;
1640 case SIGNAL_TYPE_RGB:
1641 return DRM_MODE_CONNECTOR_VGA;
1642 case SIGNAL_TYPE_DISPLAY_PORT:
1643 case SIGNAL_TYPE_DISPLAY_PORT_MST:
1644 return DRM_MODE_CONNECTOR_DisplayPort;
1645 case SIGNAL_TYPE_DVI_DUAL_LINK:
1646 case SIGNAL_TYPE_DVI_SINGLE_LINK:
1647 return DRM_MODE_CONNECTOR_DVID;
1648 case SIGNAL_TYPE_VIRTUAL:
1649 return DRM_MODE_CONNECTOR_VIRTUAL;
1650
1651 default:
1652 return DRM_MODE_CONNECTOR_Unknown;
1653 }
1654 }
1655
1656 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
1657 {
1658 const struct drm_connector_helper_funcs *helper =
1659 connector->helper_private;
1660 struct drm_encoder *encoder;
1661 struct amdgpu_encoder *amdgpu_encoder;
1662
1663 encoder = helper->best_encoder(connector);
1664
1665 if (encoder == NULL)
1666 return;
1667
1668 amdgpu_encoder = to_amdgpu_encoder(encoder);
1669
1670 amdgpu_encoder->native_mode.clock = 0;
1671
1672 if (!list_empty(&connector->probed_modes)) {
1673 struct drm_display_mode *preferred_mode = NULL;
1674 list_for_each_entry(preferred_mode,
1675 &connector->probed_modes,
1676 head) {
1677 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
1678 amdgpu_encoder->native_mode = *preferred_mode;
1679 }
1680 break;
1681 }
1682
1683 }
1684 }
1685
1686 static struct drm_display_mode *amdgpu_dm_create_common_mode(
1687 struct drm_encoder *encoder, char *name,
1688 int hdisplay, int vdisplay)
1689 {
1690 struct drm_device *dev = encoder->dev;
1691 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1692 struct drm_display_mode *mode = NULL;
1693 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
1694
1695 mode = drm_mode_duplicate(dev, native_mode);
1696
1697 if(mode == NULL)
1698 return NULL;
1699
1700 mode->hdisplay = hdisplay;
1701 mode->vdisplay = vdisplay;
1702 mode->type &= ~DRM_MODE_TYPE_PREFERRED;
1703 strncpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
1704
1705 return mode;
1706
1707 }
1708
1709 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
1710 struct drm_connector *connector)
1711 {
1712 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1713 struct drm_display_mode *mode = NULL;
1714 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
1715 struct amdgpu_connector *amdgpu_connector =
1716 to_amdgpu_connector(connector);
1717 int i;
1718 int n;
1719 struct mode_size {
1720 char name[DRM_DISPLAY_MODE_LEN];
1721 int w;
1722 int h;
1723 }common_modes[] = {
1724 { "640x480", 640, 480},
1725 { "800x600", 800, 600},
1726 { "1024x768", 1024, 768},
1727 { "1280x720", 1280, 720},
1728 { "1280x800", 1280, 800},
1729 {"1280x1024", 1280, 1024},
1730 { "1440x900", 1440, 900},
1731 {"1680x1050", 1680, 1050},
1732 {"1600x1200", 1600, 1200},
1733 {"1920x1080", 1920, 1080},
1734 {"1920x1200", 1920, 1200}
1735 };
1736
1737 n = sizeof(common_modes) / sizeof(common_modes[0]);
1738
1739 for (i = 0; i < n; i++) {
1740 struct drm_display_mode *curmode = NULL;
1741 bool mode_existed = false;
1742
1743 if (common_modes[i].w > native_mode->hdisplay ||
1744 common_modes[i].h > native_mode->vdisplay ||
1745 (common_modes[i].w == native_mode->hdisplay &&
1746 common_modes[i].h == native_mode->vdisplay))
1747 continue;
1748
1749 list_for_each_entry(curmode, &connector->probed_modes, head) {
1750 if (common_modes[i].w == curmode->hdisplay &&
1751 common_modes[i].h == curmode->vdisplay) {
1752 mode_existed = true;
1753 break;
1754 }
1755 }
1756
1757 if (mode_existed)
1758 continue;
1759
1760 mode = amdgpu_dm_create_common_mode(encoder,
1761 common_modes[i].name, common_modes[i].w,
1762 common_modes[i].h);
1763 drm_mode_probed_add(connector, mode);
1764 amdgpu_connector->num_modes++;
1765 }
1766 }
1767
1768 static void amdgpu_dm_connector_ddc_get_modes(
1769 struct drm_connector *connector,
1770 struct edid *edid)
1771 {
1772 struct amdgpu_connector *amdgpu_connector =
1773 to_amdgpu_connector(connector);
1774
1775 if (edid) {
1776 /* empty probed_modes */
1777 INIT_LIST_HEAD(&connector->probed_modes);
1778 amdgpu_connector->num_modes =
1779 drm_add_edid_modes(connector, edid);
1780
1781 drm_edid_to_eld(connector, edid);
1782
1783 amdgpu_dm_get_native_mode(connector);
1784 } else
1785 amdgpu_connector->num_modes = 0;
1786 }
1787
1788 int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
1789 {
1790 const struct drm_connector_helper_funcs *helper =
1791 connector->helper_private;
1792 struct amdgpu_connector *amdgpu_connector =
1793 to_amdgpu_connector(connector);
1794 struct drm_encoder *encoder;
1795 struct edid *edid = amdgpu_connector->edid;
1796
1797 encoder = helper->best_encoder(connector);
1798
1799 amdgpu_dm_connector_ddc_get_modes(connector, edid);
1800 amdgpu_dm_connector_add_common_modes(encoder, connector);
1801 return amdgpu_connector->num_modes;
1802 }
1803
1804 void amdgpu_dm_connector_init_helper(
1805 struct amdgpu_display_manager *dm,
1806 struct amdgpu_connector *aconnector,
1807 int connector_type,
1808 const struct dc_link *link,
1809 int link_index)
1810 {
1811 struct amdgpu_device *adev = dm->ddev->dev_private;
1812
1813 aconnector->connector_id = link_index;
1814 aconnector->dc_link = link;
1815 aconnector->base.interlace_allowed = false;
1816 aconnector->base.doublescan_allowed = false;
1817 aconnector->base.stereo_allowed = false;
1818 aconnector->base.dpms = DRM_MODE_DPMS_OFF;
1819 aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
1820
1821 mutex_init(&aconnector->hpd_lock);
1822
1823 /*configure suport HPD hot plug connector_>polled default value is 0
1824 * which means HPD hot plug not supported*/
1825 switch (connector_type) {
1826 case DRM_MODE_CONNECTOR_HDMIA:
1827 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
1828 break;
1829 case DRM_MODE_CONNECTOR_DisplayPort:
1830 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
1831 break;
1832 case DRM_MODE_CONNECTOR_DVID:
1833 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
1834 break;
1835 default:
1836 break;
1837 }
1838
1839 drm_object_attach_property(&aconnector->base.base,
1840 dm->ddev->mode_config.scaling_mode_property,
1841 DRM_MODE_SCALE_NONE);
1842
1843 drm_object_attach_property(&aconnector->base.base,
1844 adev->mode_info.underscan_property,
1845 UNDERSCAN_OFF);
1846 drm_object_attach_property(&aconnector->base.base,
1847 adev->mode_info.underscan_hborder_property,
1848 0);
1849 drm_object_attach_property(&aconnector->base.base,
1850 adev->mode_info.underscan_vborder_property,
1851 0);
1852
1853 }
1854
1855 int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
1856 struct i2c_msg *msgs, int num)
1857 {
1858 struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
1859 struct i2c_command cmd;
1860 int i;
1861 int result = -EIO;
1862
1863 cmd.payloads = kzalloc(num * sizeof(struct i2c_payload), GFP_KERNEL);
1864
1865 if (!cmd.payloads)
1866 return result;
1867
1868 cmd.number_of_payloads = num;
1869 cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
1870 cmd.speed = 100;
1871
1872 for (i = 0; i < num; i++) {
1873 cmd.payloads[i].write = (msgs[i].flags & I2C_M_RD);
1874 cmd.payloads[i].address = msgs[i].addr;
1875 cmd.payloads[i].length = msgs[i].len;
1876 cmd.payloads[i].data = msgs[i].buf;
1877 }
1878
1879 if (dc_submit_i2c(i2c->dm->dc, i2c->link_index, &cmd))
1880 result = num;
1881
1882 kfree(cmd.payloads);
1883
1884 return result;
1885 }
1886
1887 u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
1888 {
1889 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
1890 }
1891
1892 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
1893 .master_xfer = amdgpu_dm_i2c_xfer,
1894 .functionality = amdgpu_dm_i2c_func,
1895 };
1896
1897 struct amdgpu_i2c_adapter *create_i2c(unsigned int link_index, struct amdgpu_display_manager *dm, int *res)
1898 {
1899 struct amdgpu_i2c_adapter *i2c;
1900
1901 i2c = kzalloc(sizeof (struct amdgpu_i2c_adapter), GFP_KERNEL);
1902 i2c->dm = dm;
1903 i2c->base.owner = THIS_MODULE;
1904 i2c->base.class = I2C_CLASS_DDC;
1905 i2c->base.dev.parent = &dm->adev->pdev->dev;
1906 i2c->base.algo = &amdgpu_dm_i2c_algo;
1907 snprintf(i2c->base.name, sizeof (i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
1908 i2c->link_index = link_index;
1909 i2c_set_adapdata(&i2c->base, i2c);
1910
1911 return i2c;
1912 }
1913
1914 /* Note: this function assumes that dc_link_detect() was called for the
1915 * dc_link which will be represented by this aconnector. */
1916 int amdgpu_dm_connector_init(
1917 struct amdgpu_display_manager *dm,
1918 struct amdgpu_connector *aconnector,
1919 uint32_t link_index,
1920 struct amdgpu_encoder *aencoder)
1921 {
1922 int res = 0;
1923 int connector_type;
1924 struct dc *dc = dm->dc;
1925 const struct dc_link *link = dc_get_link_at_index(dc, link_index);
1926 struct amdgpu_i2c_adapter *i2c;
1927
1928 DRM_DEBUG_KMS("%s()\n", __func__);
1929
1930 i2c = create_i2c(link->link_index, dm, &res);
1931 aconnector->i2c = i2c;
1932 res = i2c_add_adapter(&i2c->base);
1933
1934 if (res) {
1935 DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
1936 goto out_free;
1937 }
1938
1939 connector_type = to_drm_connector_type(link->connector_signal);
1940
1941 res = drm_connector_init(
1942 dm->ddev,
1943 &aconnector->base,
1944 &amdgpu_dm_connector_funcs,
1945 connector_type);
1946
1947 if (res) {
1948 DRM_ERROR("connector_init failed\n");
1949 aconnector->connector_id = -1;
1950 goto out_free;
1951 }
1952
1953 drm_connector_helper_add(
1954 &aconnector->base,
1955 &amdgpu_dm_connector_helper_funcs);
1956
1957 amdgpu_dm_connector_init_helper(
1958 dm,
1959 aconnector,
1960 connector_type,
1961 link,
1962 link_index);
1963
1964 drm_mode_connector_attach_encoder(
1965 &aconnector->base, &aencoder->base);
1966
1967 drm_connector_register(&aconnector->base);
1968
1969 if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
1970 || connector_type == DRM_MODE_CONNECTOR_eDP)
1971 amdgpu_dm_initialize_mst_connector(dm, aconnector);
1972
1973 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
1974 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
1975
1976 /* NOTE: this currently will create backlight device even if a panel
1977 * is not connected to the eDP/LVDS connector.
1978 *
1979 * This is less than ideal but we don't have sink information at this
1980 * stage since detection happens after. We can't do detection earlier
1981 * since MST detection needs connectors to be created first.
1982 */
1983 if (link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) {
1984 /* Event if registration failed, we should continue with
1985 * DM initialization because not having a backlight control
1986 * is better then a black screen. */
1987 amdgpu_dm_register_backlight_device(dm);
1988
1989 if (dm->backlight_dev)
1990 dm->backlight_link = link;
1991 }
1992 #endif
1993
1994 out_free:
1995 if (res) {
1996 kfree(i2c);
1997 aconnector->i2c = NULL;
1998 }
1999 return res;
2000 }
2001
2002 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
2003 {
2004 switch (adev->mode_info.num_crtc) {
2005 case 1:
2006 return 0x1;
2007 case 2:
2008 return 0x3;
2009 case 3:
2010 return 0x7;
2011 case 4:
2012 return 0xf;
2013 case 5:
2014 return 0x1f;
2015 case 6:
2016 default:
2017 return 0x3f;
2018 }
2019 }
2020
2021 int amdgpu_dm_encoder_init(
2022 struct drm_device *dev,
2023 struct amdgpu_encoder *aencoder,
2024 uint32_t link_index)
2025 {
2026 struct amdgpu_device *adev = dev->dev_private;
2027
2028 int res = drm_encoder_init(dev,
2029 &aencoder->base,
2030 &amdgpu_dm_encoder_funcs,
2031 DRM_MODE_ENCODER_TMDS,
2032 NULL);
2033
2034 aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
2035
2036 if (!res)
2037 aencoder->encoder_id = link_index;
2038 else
2039 aencoder->encoder_id = -1;
2040
2041 drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
2042
2043 return res;
2044 }
2045
2046 enum dm_commit_action {
2047 DM_COMMIT_ACTION_NOTHING,
2048 DM_COMMIT_ACTION_RESET,
2049 DM_COMMIT_ACTION_DPMS_ON,
2050 DM_COMMIT_ACTION_DPMS_OFF,
2051 DM_COMMIT_ACTION_SET
2052 };
2053
2054 static enum dm_commit_action get_dm_commit_action(struct drm_crtc_state *state)
2055 {
2056 /* mode changed means either actually mode changed or enabled changed */
2057 /* active changed means dpms changed */
2058
2059 DRM_DEBUG_KMS("crtc_state_flags: enable:%d, active:%d, planes_changed:%d, mode_changed:%d,active_changed:%d,connectors_changed:%d\n",
2060 state->enable,
2061 state->active,
2062 state->planes_changed,
2063 state->mode_changed,
2064 state->active_changed,
2065 state->connectors_changed);
2066
2067 if (state->mode_changed) {
2068 /* if it is got disabled - call reset mode */
2069 if (!state->enable)
2070 return DM_COMMIT_ACTION_RESET;
2071
2072 if (state->active)
2073 return DM_COMMIT_ACTION_SET;
2074 else
2075 return DM_COMMIT_ACTION_RESET;
2076 } else {
2077 /* ! mode_changed */
2078
2079 /* if it is remain disable - skip it */
2080 if (!state->enable)
2081 return DM_COMMIT_ACTION_NOTHING;
2082
2083 if (state->active && state->connectors_changed)
2084 return DM_COMMIT_ACTION_SET;
2085
2086 if (state->active_changed) {
2087 if (state->active) {
2088 return DM_COMMIT_ACTION_DPMS_ON;
2089 } else {
2090 return DM_COMMIT_ACTION_DPMS_OFF;
2091 }
2092 } else {
2093 /* ! active_changed */
2094 return DM_COMMIT_ACTION_NOTHING;
2095 }
2096 }
2097 }
2098
2099 static void manage_dm_interrupts(
2100 struct amdgpu_device *adev,
2101 struct amdgpu_crtc *acrtc,
2102 bool enable)
2103 {
2104 /*
2105 * this is not correct translation but will work as soon as VBLANK
2106 * constant is the same as PFLIP
2107 */
2108 int irq_type =
2109 amdgpu_crtc_idx_to_irq_type(
2110 adev,
2111 acrtc->crtc_id);
2112
2113 if (enable) {
2114 drm_crtc_vblank_on(&acrtc->base);
2115 amdgpu_irq_get(
2116 adev,
2117 &adev->pageflip_irq,
2118 irq_type);
2119 } else {
2120
2121 amdgpu_irq_put(
2122 adev,
2123 &adev->pageflip_irq,
2124 irq_type);
2125 drm_crtc_vblank_off(&acrtc->base);
2126 }
2127 }
2128
2129 static bool is_scaling_state_different(
2130 const struct dm_connector_state *dm_state,
2131 const struct dm_connector_state *old_dm_state)
2132 {
2133 if (dm_state->scaling != old_dm_state->scaling)
2134 return true;
2135 if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
2136 if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
2137 return true;
2138 } else if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
2139 if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
2140 return true;
2141 } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder
2142 || dm_state->underscan_vborder != old_dm_state->underscan_vborder)
2143 return true;
2144 return false;
2145 }
2146
2147 static void remove_stream(struct amdgpu_device *adev, struct amdgpu_crtc *acrtc)
2148 {
2149 /*
2150 * we evade vblanks and pflips on crtc that
2151 * should be changed
2152 */
2153 manage_dm_interrupts(adev, acrtc, false);
2154
2155 /* this is the update mode case */
2156 if (adev->dm.freesync_module)
2157 mod_freesync_remove_stream(adev->dm.freesync_module,
2158 acrtc->stream);
2159
2160 dc_stream_release(acrtc->stream);
2161 acrtc->stream = NULL;
2162 acrtc->otg_inst = -1;
2163 acrtc->enabled = false;
2164 }
2165
2166
2167 /*
2168 * Executes flip
2169 *
2170 * Waits on all BO's fences and for proper vblank count
2171 */
2172 static void amdgpu_dm_do_flip(
2173 struct drm_crtc *crtc,
2174 struct drm_framebuffer *fb,
2175 uint32_t target)
2176 {
2177 unsigned long flags;
2178 uint32_t target_vblank;
2179 int r, vpos, hpos;
2180 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
2181 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
2182 struct amdgpu_bo *abo = gem_to_amdgpu_bo(afb->obj);
2183 struct amdgpu_device *adev = crtc->dev->dev_private;
2184 bool async_flip = (acrtc->flip_flags & DRM_MODE_PAGE_FLIP_ASYNC) != 0;
2185
2186
2187 /*TODO This might fail and hence better not used, wait
2188 * explicitly on fences instead
2189 * and in general should be called for
2190 * blocking commit to as per framework helpers
2191 * */
2192 r = amdgpu_bo_reserve(abo, true);
2193 if (unlikely(r != 0)) {
2194 DRM_ERROR("failed to reserve buffer before flip\n");
2195 BUG_ON(0);
2196 }
2197
2198 /* Wait for all fences on this FB */
2199 WARN_ON(reservation_object_wait_timeout_rcu(abo->tbo.resv, true, false,
2200 MAX_SCHEDULE_TIMEOUT) < 0);
2201
2202 amdgpu_bo_unreserve(abo);
2203
2204 /* Wait for target vblank */
2205 /* Wait until we're out of the vertical blank period before the one
2206 * targeted by the flip
2207 */
2208 target_vblank = target - drm_crtc_vblank_count(crtc) +
2209 amdgpu_get_vblank_counter_kms(crtc->dev, acrtc->crtc_id);
2210
2211 while ((acrtc->enabled &&
2212 (amdgpu_get_crtc_scanoutpos(adev->ddev, acrtc->crtc_id, 0,
2213 &vpos, &hpos, NULL, NULL,
2214 &crtc->hwmode)
2215 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
2216 (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
2217 (int)(target_vblank -
2218 amdgpu_get_vblank_counter_kms(adev->ddev, acrtc->crtc_id)) > 0)) {
2219 usleep_range(1000, 1100);
2220 }
2221
2222 /* Flip */
2223 spin_lock_irqsave(&crtc->dev->event_lock, flags);
2224 /* update crtc fb */
2225 crtc->primary->fb = fb;
2226
2227 /* Do the flip (mmio) */
2228 adev->mode_info.funcs->page_flip(adev, acrtc->crtc_id, afb->address, async_flip);
2229
2230 spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
2231 DRM_DEBUG_DRIVER("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
2232 acrtc->crtc_id);
2233 }
2234
2235 void amdgpu_dm_atomic_commit_tail(
2236 struct drm_atomic_state *state)
2237 {
2238 struct drm_device *dev = state->dev;
2239 struct amdgpu_device *adev = dev->dev_private;
2240 struct amdgpu_display_manager *dm = &adev->dm;
2241 struct drm_plane *plane;
2242 struct drm_plane_state *old_plane_state;
2243 uint32_t i;
2244 uint32_t commit_streams_count = 0;
2245 uint32_t new_crtcs_count = 0;
2246 struct drm_crtc *crtc;
2247 struct drm_crtc_state *old_crtc_state;
2248 const struct dc_stream *commit_streams[MAX_STREAMS];
2249 struct amdgpu_crtc *new_crtcs[MAX_STREAMS];
2250 const struct dc_stream *new_stream;
2251 unsigned long flags;
2252 bool wait_for_vblank = true;
2253
2254
2255 drm_atomic_helper_update_legacy_modeset_state(dev, state);
2256
2257 /* update changed items */
2258 for_each_crtc_in_state(state, crtc, old_crtc_state, i) {
2259 struct amdgpu_crtc *acrtc;
2260 struct amdgpu_connector *aconnector = NULL;
2261 enum dm_commit_action action;
2262 struct drm_crtc_state *new_state = crtc->state;
2263
2264 acrtc = to_amdgpu_crtc(crtc);
2265
2266 aconnector =
2267 amdgpu_dm_find_first_crct_matching_connector(
2268 state,
2269 crtc,
2270 false);
2271
2272 /* handles headless hotplug case, updating new_state and
2273 * aconnector as needed
2274 */
2275
2276 action = get_dm_commit_action(new_state);
2277
2278 switch (action) {
2279 case DM_COMMIT_ACTION_DPMS_ON:
2280 case DM_COMMIT_ACTION_SET: {
2281 struct dm_connector_state *dm_state = NULL;
2282 new_stream = NULL;
2283
2284 if (aconnector)
2285 dm_state = to_dm_connector_state(aconnector->base.state);
2286
2287 new_stream = create_stream_for_sink(
2288 aconnector,
2289 &crtc->state->mode,
2290 dm_state);
2291
2292 DRM_INFO("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
2293
2294 if (!new_stream) {
2295 /*
2296 * this could happen because of issues with
2297 * userspace notifications delivery.
2298 * In this case userspace tries to set mode on
2299 * display which is disconnect in fact.
2300 * dc_sink in NULL in this case on aconnector.
2301 * We expect reset mode will come soon.
2302 *
2303 * This can also happen when unplug is done
2304 * during resume sequence ended
2305 *
2306 * In this case, we want to pretend we still
2307 * have a sink to keep the pipe running so that
2308 * hw state is consistent with the sw state
2309 */
2310 DRM_DEBUG_KMS("%s: Failed to create new stream for crtc %d\n",
2311 __func__, acrtc->base.base.id);
2312 break;
2313 }
2314
2315 if (acrtc->stream)
2316 remove_stream(adev, acrtc);
2317
2318 /*
2319 * this loop saves set mode crtcs
2320 * we needed to enable vblanks once all
2321 * resources acquired in dc after dc_commit_streams
2322 */
2323 new_crtcs[new_crtcs_count] = acrtc;
2324 new_crtcs_count++;
2325
2326 acrtc->stream = new_stream;
2327 acrtc->enabled = true;
2328 acrtc->hw_mode = crtc->state->mode;
2329 crtc->hwmode = crtc->state->mode;
2330
2331 break;
2332 }
2333
2334 case DM_COMMIT_ACTION_NOTHING: {
2335 struct dm_connector_state *dm_state = NULL;
2336
2337 if (!aconnector)
2338 break;
2339
2340 dm_state = to_dm_connector_state(aconnector->base.state);
2341
2342 /* Scaling update */
2343 update_stream_scaling_settings(&crtc->state->mode,
2344 dm_state, acrtc->stream);
2345
2346 break;
2347 }
2348 case DM_COMMIT_ACTION_DPMS_OFF:
2349 case DM_COMMIT_ACTION_RESET:
2350 DRM_INFO("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
2351 /* i.e. reset mode */
2352 if (acrtc->stream)
2353 remove_stream(adev, acrtc);
2354 break;
2355 } /* switch() */
2356 } /* for_each_crtc_in_state() */
2357
2358 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
2359
2360 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
2361
2362 if (acrtc->stream) {
2363 commit_streams[commit_streams_count] = acrtc->stream;
2364 ++commit_streams_count;
2365 }
2366 }
2367
2368 /*
2369 * Add streams after required streams from new and replaced streams
2370 * are removed from freesync module
2371 */
2372 if (adev->dm.freesync_module) {
2373 for (i = 0; i < new_crtcs_count; i++) {
2374 struct amdgpu_connector *aconnector = NULL;
2375 new_stream = new_crtcs[i]->stream;
2376 aconnector =
2377 amdgpu_dm_find_first_crct_matching_connector(
2378 state,
2379 &new_crtcs[i]->base,
2380 false);
2381 if (!aconnector) {
2382 DRM_INFO(
2383 "Atomic commit: Failed to find connector for acrtc id:%d "
2384 "skipping freesync init\n",
2385 new_crtcs[i]->crtc_id);
2386 continue;
2387 }
2388
2389 mod_freesync_add_stream(adev->dm.freesync_module,
2390 new_stream, &aconnector->caps);
2391 }
2392 }
2393
2394 /* DC is optimized not to do anything if 'streams' didn't change. */
2395 dc_commit_streams(dm->dc, commit_streams, commit_streams_count);
2396
2397 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
2398 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
2399
2400 if (acrtc->stream != NULL)
2401 acrtc->otg_inst =
2402 dc_stream_get_status(acrtc->stream)->primary_otg_inst;
2403 }
2404
2405 /* update planes when needed */
2406 for_each_plane_in_state(state, plane, old_plane_state, i) {
2407 struct drm_plane_state *plane_state = plane->state;
2408 struct drm_crtc *crtc = plane_state->crtc;
2409 struct drm_framebuffer *fb = plane_state->fb;
2410 struct drm_connector *connector;
2411 struct dm_connector_state *dm_state = NULL;
2412 enum dm_commit_action action;
2413 bool pflip_needed;
2414
2415 if (!fb || !crtc || !crtc->state->active)
2416 continue;
2417
2418 action = get_dm_commit_action(crtc->state);
2419
2420 /* Surfaces are created under two scenarios:
2421 * 1. This commit is not a page flip.
2422 * 2. This commit is a page flip, and streams are created.
2423 */
2424 pflip_needed = !state->allow_modeset;
2425 if (!pflip_needed ||
2426 action == DM_COMMIT_ACTION_DPMS_ON ||
2427 action == DM_COMMIT_ACTION_SET) {
2428 list_for_each_entry(connector,
2429 &dev->mode_config.connector_list, head) {
2430 if (connector->state->crtc == crtc) {
2431 dm_state = to_dm_connector_state(
2432 connector->state);
2433 break;
2434 }
2435 }
2436
2437 /*
2438 * This situation happens in the following case:
2439 * we are about to get set mode for connector who's only
2440 * possible crtc (in encoder crtc mask) is used by
2441 * another connector, that is why it will try to
2442 * re-assing crtcs in order to make configuration
2443 * supported. For our implementation we need to make all
2444 * encoders support all crtcs, then this issue will
2445 * never arise again. But to guard code from this issue
2446 * check is left.
2447 *
2448 * Also it should be needed when used with actual
2449 * drm_atomic_commit ioctl in future
2450 */
2451 if (!dm_state)
2452 continue;
2453
2454 dm_dc_surface_commit(dm->dc, crtc);
2455 }
2456 }
2457
2458 for (i = 0; i < new_crtcs_count; i++) {
2459 /*
2460 * loop to enable interrupts on newly arrived crtc
2461 */
2462 struct amdgpu_crtc *acrtc = new_crtcs[i];
2463
2464 if (adev->dm.freesync_module)
2465 mod_freesync_notify_mode_change(
2466 adev->dm.freesync_module, &acrtc->stream, 1);
2467
2468 manage_dm_interrupts(adev, acrtc, true);
2469 dm_crtc_cursor_reset(&acrtc->base);
2470
2471 }
2472
2473 for_each_plane_in_state(state, plane, old_plane_state, i) {
2474 struct drm_plane_state *plane_state = plane->state;
2475 struct drm_crtc *crtc = plane_state->crtc;
2476 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
2477 struct drm_framebuffer *fb = plane_state->fb;
2478 bool pflip_needed;
2479
2480 if (!fb || !crtc || !crtc->state->planes_changed ||
2481 !crtc->state->active)
2482 continue;
2483 pflip_needed = !state->allow_modeset;
2484
2485 if (pflip_needed) {
2486 amdgpu_dm_do_flip(
2487 crtc,
2488 fb,
2489 drm_crtc_vblank_count(crtc));
2490
2491 wait_for_vblank =
2492 acrtc->flip_flags & DRM_MODE_PAGE_FLIP_ASYNC ?
2493 false : true;
2494 /*clean up the flags for next usage*/
2495 acrtc->flip_flags = 0;
2496 }
2497 }
2498
2499
2500 /*TODO mark consumed event on all crtc assigned event
2501 * in drm_atomic_helper_setup_commit just to signal completion
2502 */
2503 spin_lock_irqsave(&adev->ddev->event_lock, flags);
2504 for_each_crtc_in_state(state, crtc, old_crtc_state, i) {
2505 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
2506
2507 if (acrtc->base.state->event &&
2508 acrtc->base.state->event->event.base.type != DRM_EVENT_FLIP_COMPLETE) {
2509 acrtc->event = acrtc->base.state->event;
2510 acrtc->base.state->event = NULL;
2511 }
2512 }
2513 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
2514
2515 /* Signal HW programming completion */
2516 drm_atomic_helper_commit_hw_done(state);
2517
2518 if (wait_for_vblank)
2519 drm_atomic_helper_wait_for_vblanks(dev, state);
2520
2521 /*TODO send vblank event on all crtc assigned event
2522 * in drm_atomic_helper_setup_commit just to signal completion
2523 */
2524 spin_lock_irqsave(&adev->ddev->event_lock, flags);
2525 for_each_crtc_in_state(state, crtc, old_crtc_state, i) {
2526 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
2527
2528 if (acrtc->event &&
2529 acrtc->event->event.base.type != DRM_EVENT_FLIP_COMPLETE) {
2530 drm_send_event_locked(dev, &acrtc->event->base);
2531 acrtc->event = NULL;
2532 }
2533 }
2534 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
2535
2536 /*TODO Is it to early if actual flip haven't happened yet ?*/
2537 /* Release old FB */
2538 drm_atomic_helper_cleanup_planes(dev, state);
2539 }
2540
2541
2542 static int dm_force_atomic_commit(struct drm_connector *connector)
2543 {
2544 int ret = 0;
2545 struct drm_device *ddev = connector->dev;
2546 struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
2547 struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
2548 struct drm_plane *plane = disconnected_acrtc->base.primary;
2549 struct drm_connector_state *conn_state;
2550 struct drm_crtc_state *crtc_state;
2551 struct drm_plane_state *plane_state;
2552
2553 if (!state)
2554 return -ENOMEM;
2555
2556 state->acquire_ctx = ddev->mode_config.acquire_ctx;
2557
2558 /* Construct an atomic state to restore previous display setting */
2559
2560 /*
2561 * Attach connectors to drm_atomic_state
2562 */
2563 conn_state = drm_atomic_get_connector_state(state, connector);
2564
2565 ret = PTR_ERR_OR_ZERO(conn_state);
2566 if (ret)
2567 goto err;
2568
2569 /* Attach crtc to drm_atomic_state*/
2570 crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
2571
2572 ret = PTR_ERR_OR_ZERO(crtc_state);
2573 if (ret)
2574 goto err;
2575
2576 /* force a restore */
2577 crtc_state->mode_changed = true;
2578
2579 /* Attach plane to drm_atomic_state */
2580 plane_state = drm_atomic_get_plane_state(state, plane);
2581
2582 ret = PTR_ERR_OR_ZERO(plane_state);
2583 if (ret)
2584 goto err;
2585
2586
2587 /* Call commit internally with the state we just constructed */
2588 ret = drm_atomic_commit(state);
2589 if (!ret)
2590 return 0;
2591
2592 err:
2593 DRM_ERROR("Restoring old state failed with %i\n", ret);
2594 drm_atomic_state_put(state);
2595
2596 return ret;
2597 }
2598
2599 /*
2600 * This functions handle all cases when set mode does not come upon hotplug.
2601 * This include when the same display is unplugged then plugged back into the
2602 * same port and when we are running without usermode desktop manager supprot
2603 */
2604 void dm_restore_drm_connector_state(struct drm_device *dev, struct drm_connector *connector)
2605 {
2606 struct amdgpu_connector *aconnector = to_amdgpu_connector(connector);
2607 struct amdgpu_crtc *disconnected_acrtc;
2608
2609 if (!aconnector->dc_sink || !connector->state || !connector->encoder)
2610 return;
2611
2612 disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
2613
2614 if (!disconnected_acrtc || !disconnected_acrtc->stream)
2615 return;
2616
2617 /*
2618 * If the previous sink is not released and different from the current,
2619 * we deduce we are in a state where we can not rely on usermode call
2620 * to turn on the display, so we do it here
2621 */
2622 if (disconnected_acrtc->stream->sink != aconnector->dc_sink)
2623 dm_force_atomic_commit(&aconnector->base);
2624 }
2625
2626 static uint32_t add_val_sets_surface(
2627 struct dc_validation_set *val_sets,
2628 uint32_t set_count,
2629 const struct dc_stream *stream,
2630 const struct dc_surface *surface)
2631 {
2632 uint32_t i = 0;
2633
2634 while (i < set_count) {
2635 if (val_sets[i].stream == stream)
2636 break;
2637 ++i;
2638 }
2639
2640 val_sets[i].surfaces[val_sets[i].surface_count] = surface;
2641 val_sets[i].surface_count++;
2642
2643 return val_sets[i].surface_count;
2644 }
2645
2646 static uint32_t update_in_val_sets_stream(
2647 struct dc_validation_set *val_sets,
2648 struct drm_crtc **crtcs,
2649 uint32_t set_count,
2650 const struct dc_stream *old_stream,
2651 const struct dc_stream *new_stream,
2652 struct drm_crtc *crtc)
2653 {
2654 uint32_t i = 0;
2655
2656 while (i < set_count) {
2657 if (val_sets[i].stream == old_stream)
2658 break;
2659 ++i;
2660 }
2661
2662 val_sets[i].stream = new_stream;
2663 crtcs[i] = crtc;
2664
2665 if (i == set_count) {
2666 /* nothing found. add new one to the end */
2667 return set_count + 1;
2668 }
2669
2670 return set_count;
2671 }
2672
2673 static uint32_t remove_from_val_sets(
2674 struct dc_validation_set *val_sets,
2675 uint32_t set_count,
2676 const struct dc_stream *stream)
2677 {
2678 int i;
2679
2680 for (i = 0; i < set_count; i++)
2681 if (val_sets[i].stream == stream)
2682 break;
2683
2684 if (i == set_count) {
2685 /* nothing found */
2686 return set_count;
2687 }
2688
2689 set_count--;
2690
2691 for (; i < set_count; i++) {
2692 val_sets[i] = val_sets[i + 1];
2693 }
2694
2695 return set_count;
2696 }
2697
2698 int amdgpu_dm_atomic_check(struct drm_device *dev,
2699 struct drm_atomic_state *state)
2700 {
2701 struct drm_crtc *crtc;
2702 struct drm_crtc_state *crtc_state;
2703 struct drm_plane *plane;
2704 struct drm_plane_state *plane_state;
2705 int i, j;
2706 int ret;
2707 int set_count;
2708 int new_stream_count;
2709 struct dc_validation_set set[MAX_STREAMS] = {{ 0 }};
2710 struct dc_stream *new_streams[MAX_STREAMS] = { 0 };
2711 struct drm_crtc *crtc_set[MAX_STREAMS] = { 0 };
2712 struct amdgpu_device *adev = dev->dev_private;
2713 struct dc *dc = adev->dm.dc;
2714 bool need_to_validate = false;
2715
2716 ret = drm_atomic_helper_check(dev, state);
2717
2718 if (ret) {
2719 DRM_ERROR("Atomic state validation failed with error :%d !\n",
2720 ret);
2721 return ret;
2722 }
2723
2724 ret = -EINVAL;
2725
2726 /* copy existing configuration */
2727 new_stream_count = 0;
2728 set_count = 0;
2729 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
2730
2731 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
2732
2733 if (acrtc->stream) {
2734 set[set_count].stream = acrtc->stream;
2735 crtc_set[set_count] = crtc;
2736 ++set_count;
2737 }
2738 }
2739
2740 /* update changed items */
2741 for_each_crtc_in_state(state, crtc, crtc_state, i) {
2742 struct amdgpu_crtc *acrtc = NULL;
2743 struct amdgpu_connector *aconnector = NULL;
2744 enum dm_commit_action action;
2745
2746 acrtc = to_amdgpu_crtc(crtc);
2747
2748 aconnector = amdgpu_dm_find_first_crct_matching_connector(state, crtc, true);
2749
2750 action = get_dm_commit_action(crtc_state);
2751
2752 switch (action) {
2753 case DM_COMMIT_ACTION_DPMS_ON:
2754 case DM_COMMIT_ACTION_SET: {
2755 struct dc_stream *new_stream = NULL;
2756 struct drm_connector_state *conn_state = NULL;
2757 struct dm_connector_state *dm_state = NULL;
2758
2759 if (aconnector) {
2760 conn_state = drm_atomic_get_connector_state(state, &aconnector->base);
2761 if (IS_ERR(conn_state))
2762 return ret;
2763 dm_state = to_dm_connector_state(conn_state);
2764 }
2765
2766 new_stream = create_stream_for_sink(aconnector, &crtc_state->mode, dm_state);
2767
2768 /*
2769 * we can have no stream on ACTION_SET if a display
2770 * was disconnected during S3, in this case it not and
2771 * error, the OS will be updated after detection, and
2772 * do the right thing on next atomic commit
2773 */
2774 if (!new_stream) {
2775 DRM_DEBUG_KMS("%s: Failed to create new stream for crtc %d\n",
2776 __func__, acrtc->base.base.id);
2777 break;
2778 }
2779
2780 new_streams[new_stream_count] = new_stream;
2781 set_count = update_in_val_sets_stream(
2782 set,
2783 crtc_set,
2784 set_count,
2785 acrtc->stream,
2786 new_stream,
2787 crtc);
2788
2789 new_stream_count++;
2790 need_to_validate = true;
2791 break;
2792 }
2793
2794 case DM_COMMIT_ACTION_NOTHING: {
2795 const struct drm_connector *drm_connector = NULL;
2796 struct drm_connector_state *conn_state = NULL;
2797 struct dm_connector_state *dm_state = NULL;
2798 struct dm_connector_state *old_dm_state = NULL;
2799 struct dc_stream *new_stream;
2800
2801 if (!aconnector)
2802 break;
2803
2804 for_each_connector_in_state(
2805 state, drm_connector, conn_state, j) {
2806 if (&aconnector->base == drm_connector)
2807 break;
2808 }
2809
2810 old_dm_state = to_dm_connector_state(drm_connector->state);
2811 dm_state = to_dm_connector_state(conn_state);
2812
2813 /* Support underscan adjustment*/
2814 if (!is_scaling_state_different(dm_state, old_dm_state))
2815 break;
2816
2817 new_stream = create_stream_for_sink(aconnector, &crtc_state->mode, dm_state);
2818
2819 if (!new_stream) {
2820 DRM_ERROR("%s: Failed to create new stream for crtc %d\n",
2821 __func__, acrtc->base.base.id);
2822 break;
2823 }
2824
2825 new_streams[new_stream_count] = new_stream;
2826 set_count = update_in_val_sets_stream(
2827 set,
2828 crtc_set,
2829 set_count,
2830 acrtc->stream,
2831 new_stream,
2832 crtc);
2833
2834 new_stream_count++;
2835 need_to_validate = true;
2836
2837 break;
2838 }
2839 case DM_COMMIT_ACTION_DPMS_OFF:
2840 case DM_COMMIT_ACTION_RESET:
2841 /* i.e. reset mode */
2842 if (acrtc->stream) {
2843 set_count = remove_from_val_sets(
2844 set,
2845 set_count,
2846 acrtc->stream);
2847 }
2848 break;
2849 }
2850
2851 /*
2852 * TODO revisit when removing commit action
2853 * and looking at atomic flags directly
2854 */
2855
2856 /* commit needs planes right now (for gamma, eg.) */
2857 /* TODO rework commit to chack crtc for gamma change */
2858 ret = drm_atomic_add_affected_planes(state, crtc);
2859 if (ret)
2860 return ret;
2861 }
2862
2863 for (i = 0; i < set_count; i++) {
2864 for_each_plane_in_state(state, plane, plane_state, j) {
2865 struct drm_crtc *crtc = plane_state->crtc;
2866 struct drm_framebuffer *fb = plane_state->fb;
2867 struct drm_connector *connector;
2868 struct dm_connector_state *dm_state = NULL;
2869 enum dm_commit_action action;
2870 struct drm_crtc_state *crtc_state;
2871 bool pflip_needed;
2872
2873
2874 if (!fb || !crtc || crtc_set[i] != crtc ||
2875 !crtc->state->planes_changed || !crtc->state->active)
2876 continue;
2877
2878 action = get_dm_commit_action(crtc->state);
2879
2880 /* Surfaces are created under two scenarios:
2881 * 1. This commit is not a page flip.
2882 * 2. This commit is a page flip, and streams are created.
2883 */
2884 crtc_state = drm_atomic_get_crtc_state(state, crtc);
2885 pflip_needed = !state->allow_modeset;
2886 if (!pflip_needed ||
2887 action == DM_COMMIT_ACTION_DPMS_ON ||
2888 action == DM_COMMIT_ACTION_SET) {
2889 struct dc_surface *surface;
2890
2891 list_for_each_entry(connector,
2892 &dev->mode_config.connector_list, head) {
2893 if (connector->state->crtc == crtc) {
2894 dm_state = to_dm_connector_state(
2895 connector->state);
2896 break;
2897 }
2898 }
2899
2900 /*
2901 * This situation happens in the following case:
2902 * we are about to get set mode for connector who's only
2903 * possible crtc (in encoder crtc mask) is used by
2904 * another connector, that is why it will try to
2905 * re-assing crtcs in order to make configuration
2906 * supported. For our implementation we need to make all
2907 * encoders support all crtcs, then this issue will
2908 * never arise again. But to guard code from this issue
2909 * check is left.
2910 *
2911 * Also it should be needed when used with actual
2912 * drm_atomic_commit ioctl in future
2913 */
2914 if (!dm_state)
2915 continue;
2916
2917 surface = dc_create_surface(dc);
2918 fill_plane_attributes(
2919 crtc->dev->dev_private,
2920 surface,
2921 plane_state,
2922 false);
2923
2924 add_val_sets_surface(
2925 set,
2926 set_count,
2927 set[i].stream,
2928 surface);
2929
2930 need_to_validate = true;
2931 }
2932 }
2933 }
2934
2935 if (need_to_validate == false || set_count == 0 ||
2936 dc_validate_resources(dc, set, set_count))
2937 ret = 0;
2938
2939 for (i = 0; i < set_count; i++) {
2940 for (j = 0; j < set[i].surface_count; j++) {
2941 dc_surface_release(set[i].surfaces[j]);
2942 }
2943 }
2944 for (i = 0; i < new_stream_count; i++)
2945 dc_stream_release(new_streams[i]);
2946
2947 if (ret != 0)
2948 DRM_ERROR("Atomic check failed.\n");
2949
2950 return ret;
2951 }
2952
2953 static bool is_dp_capable_without_timing_msa(
2954 struct dc *dc,
2955 struct amdgpu_connector *amdgpu_connector)
2956 {
2957 uint8_t dpcd_data;
2958 bool capable = false;
2959 if (amdgpu_connector->dc_link &&
2960 dc_read_dpcd(dc, amdgpu_connector->dc_link->link_index,
2961 DP_DOWN_STREAM_PORT_COUNT,
2962 &dpcd_data, sizeof(dpcd_data)) )
2963 capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
2964
2965 return capable;
2966 }
2967 void amdgpu_dm_add_sink_to_freesync_module(
2968 struct drm_connector *connector,
2969 struct edid *edid)
2970 {
2971 int i;
2972 uint64_t val_capable;
2973 bool edid_check_required;
2974 struct detailed_timing *timing;
2975 struct detailed_non_pixel *data;
2976 struct detailed_data_monitor_range *range;
2977 struct amdgpu_connector *amdgpu_connector =
2978 to_amdgpu_connector(connector);
2979
2980 struct drm_device *dev = connector->dev;
2981 struct amdgpu_device *adev = dev->dev_private;
2982 edid_check_required = false;
2983 if (!amdgpu_connector->dc_sink) {
2984 DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
2985 return;
2986 }
2987 if (!adev->dm.freesync_module)
2988 return;
2989 /*
2990 * if edid non zero restrict freesync only for dp and edp
2991 */
2992 if (edid) {
2993 if (amdgpu_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
2994 || amdgpu_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
2995 edid_check_required = is_dp_capable_without_timing_msa(
2996 adev->dm.dc,
2997 amdgpu_connector);
2998 }
2999 }
3000 val_capable = 0;
3001 if (edid_check_required == true && (edid->version > 1 ||
3002 (edid->version == 1 && edid->revision > 1))) {
3003 for (i = 0; i < 4; i++) {
3004
3005 timing = &edid->detailed_timings[i];
3006 data = &timing->data.other_data;
3007 range = &data->data.range;
3008 /*
3009 * Check if monitor has continuous frequency mode
3010 */
3011 if (data->type != EDID_DETAIL_MONITOR_RANGE)
3012 continue;
3013 /*
3014 * Check for flag range limits only. If flag == 1 then
3015 * no additional timing information provided.
3016 * Default GTF, GTF Secondary curve and CVT are not
3017 * supported
3018 */
3019 if (range->flags != 1)
3020 continue;
3021
3022 amdgpu_connector->min_vfreq = range->min_vfreq;
3023 amdgpu_connector->max_vfreq = range->max_vfreq;
3024 amdgpu_connector->pixel_clock_mhz =
3025 range->pixel_clock_mhz * 10;
3026 break;
3027 }
3028
3029 if (amdgpu_connector->max_vfreq -
3030 amdgpu_connector->min_vfreq > 10) {
3031 amdgpu_connector->caps.supported = true;
3032 amdgpu_connector->caps.min_refresh_in_micro_hz =
3033 amdgpu_connector->min_vfreq * 1000000;
3034 amdgpu_connector->caps.max_refresh_in_micro_hz =
3035 amdgpu_connector->max_vfreq * 1000000;
3036 val_capable = 1;
3037 }
3038 }
3039
3040 /*
3041 * TODO figure out how to notify user-mode or DRM of freesync caps
3042 * once we figure out how to deal with freesync in an upstreamable
3043 * fashion
3044 */
3045
3046 }
3047
3048 void amdgpu_dm_remove_sink_from_freesync_module(
3049 struct drm_connector *connector)
3050 {
3051 /*
3052 * TODO fill in once we figure out how to deal with freesync in
3053 * an upstreamable fashion
3054 */
3055 }