]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_types.c
drm/amd/display: Reset gamma to NULL after release
[mirror_ubuntu-bionic-kernel.git] / drivers / gpu / drm / amd / display / amdgpu_dm / amdgpu_dm_types.c
CommitLineData
4562236b
HW
1/*
2 * Copyright 2012-13 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#include <linux/types.h>
27#include <linux/version.h>
28
29#include <drm/drmP.h>
30#include <drm/drm_atomic_helper.h>
31#include <drm/drm_fb_helper.h>
32#include <drm/drm_atomic.h>
33#include <drm/drm_edid.h>
34
35#include "amdgpu.h"
36#include "amdgpu_pm.h"
37#include "dm_services_types.h"
38
39// We need to #undef FRAME_SIZE and DEPRECATED because they conflict
40// with ptrace-abi.h's #define's of them.
41#undef FRAME_SIZE
42#undef DEPRECATED
43
44#include "dc.h"
45
46#include "amdgpu_dm_types.h"
47#include "amdgpu_dm_mst_types.h"
48
49#include "modules/inc/mod_freesync.h"
50
51struct dm_connector_state {
52 struct drm_connector_state base;
53
54 enum amdgpu_rmx_type scaling;
55 uint8_t underscan_vborder;
56 uint8_t underscan_hborder;
57 bool underscan_enable;
58};
59
60#define to_dm_connector_state(x)\
61 container_of((x), struct dm_connector_state, base)
62
63
64void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
65{
66 drm_encoder_cleanup(encoder);
67 kfree(encoder);
68}
69
70static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
71 .destroy = amdgpu_dm_encoder_destroy,
72};
73
74static void dm_set_cursor(
75 struct amdgpu_crtc *amdgpu_crtc,
76 uint64_t gpu_addr,
77 uint32_t width,
78 uint32_t height)
79{
80 struct dc_cursor_attributes attributes;
81 amdgpu_crtc->cursor_width = width;
82 amdgpu_crtc->cursor_height = height;
83
84 attributes.address.high_part = upper_32_bits(gpu_addr);
85 attributes.address.low_part = lower_32_bits(gpu_addr);
86 attributes.width = width;
87 attributes.height = height;
88 attributes.x_hot = 0;
89 attributes.y_hot = 0;
90 attributes.color_format = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
91 attributes.rotation_angle = 0;
92 attributes.attribute_flags.value = 0;
93
94 if (!dc_target_set_cursor_attributes(
95 amdgpu_crtc->target,
96 &attributes)) {
97 DRM_ERROR("DC failed to set cursor attributes\n");
98 }
99}
100
101static int dm_crtc_unpin_cursor_bo_old(
102 struct amdgpu_crtc *amdgpu_crtc)
103{
104 struct amdgpu_bo *robj;
105 int ret = 0;
106
107 if (NULL != amdgpu_crtc && NULL != amdgpu_crtc->cursor_bo) {
108 robj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
109
110 ret = amdgpu_bo_reserve(robj, false);
111
112 if (likely(ret == 0)) {
113 ret = amdgpu_bo_unpin(robj);
114
115 if (unlikely(ret != 0)) {
116 DRM_ERROR(
117 "%s: unpin failed (ret=%d), bo %p\n",
118 __func__,
119 ret,
120 amdgpu_crtc->cursor_bo);
121 }
122
123 amdgpu_bo_unreserve(robj);
124 } else {
125 DRM_ERROR(
126 "%s: reserve failed (ret=%d), bo %p\n",
127 __func__,
128 ret,
129 amdgpu_crtc->cursor_bo);
130 }
131
132 drm_gem_object_unreference_unlocked(amdgpu_crtc->cursor_bo);
133 amdgpu_crtc->cursor_bo = NULL;
134 }
135
136 return ret;
137}
138
139static int dm_crtc_pin_cursor_bo_new(
140 struct drm_crtc *crtc,
141 struct drm_file *file_priv,
142 uint32_t handle,
143 struct amdgpu_bo **ret_obj)
144{
145 struct amdgpu_crtc *amdgpu_crtc;
146 struct amdgpu_bo *robj;
147 struct drm_gem_object *obj;
148 int ret = -EINVAL;
149
150 if (NULL != crtc) {
151 struct drm_device *dev = crtc->dev;
152 struct amdgpu_device *adev = dev->dev_private;
153 uint64_t gpu_addr;
154
155 amdgpu_crtc = to_amdgpu_crtc(crtc);
156
157 obj = drm_gem_object_lookup(file_priv, handle);
158
159 if (!obj) {
160 DRM_ERROR(
161 "Cannot find cursor object %x for crtc %d\n",
162 handle,
163 amdgpu_crtc->crtc_id);
164 goto release;
165 }
166 robj = gem_to_amdgpu_bo(obj);
167
168 ret = amdgpu_bo_reserve(robj, false);
169
170 if (unlikely(ret != 0)) {
171 drm_gem_object_unreference_unlocked(obj);
172 DRM_ERROR("dm_crtc_pin_cursor_bo_new ret %x, handle %x\n",
173 ret, handle);
174 goto release;
175 }
176
177 ret = amdgpu_bo_pin_restricted(robj, AMDGPU_GEM_DOMAIN_VRAM, 0,
178 adev->mc.visible_vram_size,
179 &gpu_addr);
180
181 if (ret == 0) {
182 amdgpu_crtc->cursor_addr = gpu_addr;
183 *ret_obj = robj;
184 }
185 amdgpu_bo_unreserve(robj);
186 if (ret)
187 drm_gem_object_unreference_unlocked(obj);
188
189 }
190release:
191
192 return ret;
193}
194
195static int dm_crtc_cursor_set(
196 struct drm_crtc *crtc,
197 struct drm_file *file_priv,
198 uint32_t handle,
199 uint32_t width,
200 uint32_t height)
201{
202 struct amdgpu_bo *new_cursor_bo;
203 struct dc_cursor_position position;
204
205 int ret;
206
207 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
208
209 ret = EINVAL;
210 new_cursor_bo = NULL;
211
212 DRM_DEBUG_KMS(
213 "%s: crtc_id=%d with handle %d and size %d to %d, bo_object %p\n",
214 __func__,
215 amdgpu_crtc->crtc_id,
216 handle,
217 width,
218 height,
219 amdgpu_crtc->cursor_bo);
220
221 if (!handle) {
222 /* turn off cursor */
223 position.enable = false;
224 position.x = 0;
225 position.y = 0;
226 position.hot_spot_enable = false;
227
228 if (amdgpu_crtc->target) {
229 /*set cursor visible false*/
230 dc_target_set_cursor_position(
231 amdgpu_crtc->target,
232 &position);
233 }
234 /*unpin old cursor buffer and update cache*/
235 ret = dm_crtc_unpin_cursor_bo_old(amdgpu_crtc);
236 goto release;
237
238 }
239
240 if ((width > amdgpu_crtc->max_cursor_width) ||
241 (height > amdgpu_crtc->max_cursor_height)) {
242 DRM_ERROR(
243 "%s: bad cursor width or height %d x %d\n",
244 __func__,
245 width,
246 height);
247 goto release;
248 }
249 /*try to pin new cursor bo*/
250 ret = dm_crtc_pin_cursor_bo_new(crtc, file_priv, handle, &new_cursor_bo);
251 /*if map not successful then return an error*/
252 if (ret)
253 goto release;
254
255 /*program new cursor bo to hardware*/
256 dm_set_cursor(amdgpu_crtc, amdgpu_crtc->cursor_addr, width, height);
257
258 /*un map old, not used anymore cursor bo ,
259 * return memory and mapping back */
260 dm_crtc_unpin_cursor_bo_old(amdgpu_crtc);
261
262 /*assign new cursor bo to our internal cache*/
263 amdgpu_crtc->cursor_bo = &new_cursor_bo->gem_base;
264
265release:
266 return ret;
267
268}
269
270static int dm_crtc_cursor_move(struct drm_crtc *crtc,
271 int x, int y)
272{
273 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
274 int xorigin = 0, yorigin = 0;
275 struct dc_cursor_position position;
276
277 /* avivo cursor are offset into the total surface */
278 x += crtc->primary->state->src_x >> 16;
279 y += crtc->primary->state->src_y >> 16;
280
281 /*
282 * TODO: for cursor debugging unguard the following
283 */
284#if 0
285 DRM_DEBUG_KMS(
286 "%s: x %d y %d c->x %d c->y %d\n",
287 __func__,
288 x,
289 y,
290 crtc->x,
291 crtc->y);
292#endif
293
294 if (x < 0) {
295 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
296 x = 0;
297 }
298 if (y < 0) {
299 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
300 y = 0;
301 }
302
303 position.enable = true;
304 position.x = x;
305 position.y = y;
306
307 position.hot_spot_enable = true;
308 position.x_hotspot = xorigin;
309 position.y_hotspot = yorigin;
310
311 if (amdgpu_crtc->target) {
312 if (!dc_target_set_cursor_position(
313 amdgpu_crtc->target,
314 &position)) {
315 DRM_ERROR("DC failed to set cursor position\n");
316 return -EINVAL;
317 }
318 }
319
320 return 0;
321}
322
323static void dm_crtc_cursor_reset(struct drm_crtc *crtc)
324{
325 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
326
327 DRM_DEBUG_KMS(
328 "%s: with cursor_bo %p\n",
329 __func__,
330 amdgpu_crtc->cursor_bo);
331
332 if (amdgpu_crtc->cursor_bo && amdgpu_crtc->target) {
333 dm_set_cursor(
334 amdgpu_crtc,
335 amdgpu_crtc->cursor_addr,
336 amdgpu_crtc->cursor_width,
337 amdgpu_crtc->cursor_height);
338 }
339}
340static bool fill_rects_from_plane_state(
341 const struct drm_plane_state *state,
342 struct dc_surface *surface)
343{
344 surface->src_rect.x = state->src_x >> 16;
345 surface->src_rect.y = state->src_y >> 16;
346 /*we ignore for now mantissa and do not to deal with floating pixels :(*/
347 surface->src_rect.width = state->src_w >> 16;
348
349 if (surface->src_rect.width == 0)
350 return false;
351
352 surface->src_rect.height = state->src_h >> 16;
353 if (surface->src_rect.height == 0)
354 return false;
355
356 surface->dst_rect.x = state->crtc_x;
357 surface->dst_rect.y = state->crtc_y;
358
359 if (state->crtc_w == 0)
360 return false;
361
362 surface->dst_rect.width = state->crtc_w;
363
364 if (state->crtc_h == 0)
365 return false;
366
367 surface->dst_rect.height = state->crtc_h;
368
369 surface->clip_rect = surface->dst_rect;
370
371 switch (state->rotation & DRM_MODE_ROTATE_MASK) {
372 case DRM_MODE_ROTATE_0:
373 surface->rotation = ROTATION_ANGLE_0;
374 break;
375 case DRM_MODE_ROTATE_90:
376 surface->rotation = ROTATION_ANGLE_90;
377 break;
378 case DRM_MODE_ROTATE_180:
379 surface->rotation = ROTATION_ANGLE_180;
380 break;
381 case DRM_MODE_ROTATE_270:
382 surface->rotation = ROTATION_ANGLE_270;
383 break;
384 default:
385 surface->rotation = ROTATION_ANGLE_0;
386 break;
387 }
388
389 return true;
390}
391static bool get_fb_info(
392 const struct amdgpu_framebuffer *amdgpu_fb,
393 uint64_t *tiling_flags,
394 uint64_t *fb_location)
395{
396 struct amdgpu_bo *rbo = gem_to_amdgpu_bo(amdgpu_fb->obj);
397 int r = amdgpu_bo_reserve(rbo, false);
398 if (unlikely(r != 0)){
399 DRM_ERROR("Unable to reserve buffer\n");
400 return false;
401 }
402
403 if (fb_location)
404 *fb_location = amdgpu_bo_gpu_offset(rbo);
405
406 if (tiling_flags)
407 amdgpu_bo_get_tiling_flags(rbo, tiling_flags);
408
409 amdgpu_bo_unreserve(rbo);
410
411 return true;
412}
413static void fill_plane_attributes_from_fb(
414 struct dc_surface *surface,
415 const struct amdgpu_framebuffer *amdgpu_fb, bool addReq)
416{
417 uint64_t tiling_flags;
418 uint64_t fb_location = 0;
419 const struct drm_framebuffer *fb = &amdgpu_fb->base;
420 struct drm_format_name_buf format_name;
421
422 get_fb_info(
423 amdgpu_fb,
424 &tiling_flags,
425 addReq == true ? &fb_location:NULL);
426
427 surface->address.type = PLN_ADDR_TYPE_GRAPHICS;
428 surface->address.grph.addr.low_part = lower_32_bits(fb_location);
429 surface->address.grph.addr.high_part = upper_32_bits(fb_location);
430
431 switch (fb->format->format) {
432 case DRM_FORMAT_C8:
433 surface->format = SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
434 break;
435 case DRM_FORMAT_RGB565:
436 surface->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
437 break;
438 case DRM_FORMAT_XRGB8888:
439 case DRM_FORMAT_ARGB8888:
440 surface->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
441 break;
442 case DRM_FORMAT_XRGB2101010:
443 case DRM_FORMAT_ARGB2101010:
444 surface->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
445 break;
446 case DRM_FORMAT_XBGR2101010:
447 case DRM_FORMAT_ABGR2101010:
448 surface->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
449 break;
450 default:
451 DRM_ERROR("Unsupported screen format %s\n",
452 drm_get_format_name(fb->format->format, &format_name));
453 return;
454 }
455
456 memset(&surface->tiling_info, 0, sizeof(surface->tiling_info));
457
458 if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1)
459 {
460 unsigned bankw, bankh, mtaspect, tile_split, num_banks;
461
462 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
463 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
464 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
465 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
466 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
467
468 /* XXX fix me for VI */
469 surface->tiling_info.gfx8.num_banks = num_banks;
470 surface->tiling_info.gfx8.array_mode =
471 DC_ARRAY_2D_TILED_THIN1;
472 surface->tiling_info.gfx8.tile_split = tile_split;
473 surface->tiling_info.gfx8.bank_width = bankw;
474 surface->tiling_info.gfx8.bank_height = bankh;
475 surface->tiling_info.gfx8.tile_aspect = mtaspect;
476 surface->tiling_info.gfx8.tile_mode =
477 DC_ADDR_SURF_MICRO_TILING_DISPLAY;
478 } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
479 == DC_ARRAY_1D_TILED_THIN1) {
480 surface->tiling_info.gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
481 }
482
483 surface->tiling_info.gfx8.pipe_config =
484 AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
485
486 surface->plane_size.grph.surface_size.x = 0;
487 surface->plane_size.grph.surface_size.y = 0;
488 surface->plane_size.grph.surface_size.width = fb->width;
489 surface->plane_size.grph.surface_size.height = fb->height;
490 surface->plane_size.grph.surface_pitch =
491 fb->pitches[0] / fb->format->cpp[0];
492
493 surface->visible = true;
494 surface->scaling_quality.h_taps_c = 0;
495 surface->scaling_quality.v_taps_c = 0;
496
497 /* TODO: unhardcode */
498 surface->color_space = COLOR_SPACE_SRGB;
499 /* is this needed? is surface zeroed at allocation? */
500 surface->scaling_quality.h_taps = 0;
501 surface->scaling_quality.v_taps = 0;
502 surface->stereo_format = PLANE_STEREO_FORMAT_NONE;
503
504}
505
506#define NUM_OF_RAW_GAMMA_RAMP_RGB_256 256
507
508static void fill_gamma_from_crtc(
509 const struct drm_crtc *crtc,
510 struct dc_surface *dc_surface)
511{
512 int i;
513 struct dc_gamma *gamma;
514 struct drm_crtc_state *state = crtc->state;
515 struct drm_color_lut *lut = (struct drm_color_lut *) state->gamma_lut->data;
90e508ba 516 struct dc_transfer_func *input_tf;
4562236b
HW
517
518 gamma = dc_create_gamma();
519
520 if (gamma == NULL)
521 return;
522
523 for (i = 0; i < NUM_OF_RAW_GAMMA_RAMP_RGB_256; i++) {
524 gamma->gamma_ramp_rgb256x3x16.red[i] = lut[i].red;
525 gamma->gamma_ramp_rgb256x3x16.green[i] = lut[i].green;
526 gamma->gamma_ramp_rgb256x3x16.blue[i] = lut[i].blue;
527 }
528
529 gamma->type = GAMMA_RAMP_RBG256X3X16;
530 gamma->size = sizeof(gamma->gamma_ramp_rgb256x3x16);
531
532 dc_surface->gamma_correction = gamma;
90e508ba
AK
533
534 input_tf = dc_create_transfer_func();
535
536 if (input_tf == NULL)
537 return;
538
539 input_tf->type = TF_TYPE_PREDEFINED;
540 input_tf->tf = TRANSFER_FUNCTION_SRGB;
541
542 dc_surface->in_transfer_func = input_tf;
4562236b
HW
543}
544
545static void fill_plane_attributes(
546 struct dc_surface *surface,
547 struct drm_plane_state *state, bool addrReq)
548{
549 const struct amdgpu_framebuffer *amdgpu_fb =
550 to_amdgpu_framebuffer(state->fb);
551 const struct drm_crtc *crtc = state->crtc;
552
553 fill_rects_from_plane_state(state, surface);
554 fill_plane_attributes_from_fb(
555 surface,
556 amdgpu_fb,
557 addrReq);
558
559 /* In case of gamma set, update gamma value */
560 if (state->crtc->state->gamma_lut) {
561 fill_gamma_from_crtc(crtc, surface);
562 }
563}
564
565/*****************************************************************************/
566
567struct amdgpu_connector *aconnector_from_drm_crtc_id(
568 const struct drm_crtc *crtc)
569{
570 struct drm_device *dev = crtc->dev;
571 struct drm_connector *connector;
572 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
573 struct amdgpu_connector *aconnector;
574
575 list_for_each_entry(connector,
576 &dev->mode_config.connector_list, head) {
577
578 aconnector = to_amdgpu_connector(connector);
579
580 if (aconnector->base.state->crtc != &acrtc->base)
581 continue;
582
583 /* Found the connector */
584 return aconnector;
585 }
586
587 /* If we get here, not found. */
588 return NULL;
589}
590
591static void update_stream_scaling_settings(
592 const struct drm_display_mode *mode,
593 const struct dm_connector_state *dm_state,
594 const struct dc_stream *stream)
595{
596 struct amdgpu_device *adev = dm_state->base.crtc->dev->dev_private;
597 enum amdgpu_rmx_type rmx_type;
598
599 struct rect src = { 0 }; /* viewport in target space*/
600 struct rect dst = { 0 }; /* stream addressable area */
601
602 /* Full screen scaling by default */
603 src.width = mode->hdisplay;
604 src.height = mode->vdisplay;
605 dst.width = stream->timing.h_addressable;
606 dst.height = stream->timing.v_addressable;
607
608 rmx_type = dm_state->scaling;
609 if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
610 if (src.width * dst.height <
611 src.height * dst.width) {
612 /* height needs less upscaling/more downscaling */
613 dst.width = src.width *
614 dst.height / src.height;
615 } else {
616 /* width needs less upscaling/more downscaling */
617 dst.height = src.height *
618 dst.width / src.width;
619 }
620 } else if (rmx_type == RMX_CENTER) {
621 dst = src;
622 }
623
624 dst.x = (stream->timing.h_addressable - dst.width) / 2;
625 dst.y = (stream->timing.v_addressable - dst.height) / 2;
626
627 if (dm_state->underscan_enable) {
628 dst.x += dm_state->underscan_hborder / 2;
629 dst.y += dm_state->underscan_vborder / 2;
630 dst.width -= dm_state->underscan_hborder;
631 dst.height -= dm_state->underscan_vborder;
632 }
633
634 adev->dm.dc->stream_funcs.stream_update_scaling(adev->dm.dc, stream, &src, &dst);
635
636 DRM_DEBUG_KMS("Destination Rectangle x:%d y:%d width:%d height:%d\n",
637 dst.x, dst.y, dst.width, dst.height);
638
639}
640
641static void dm_dc_surface_commit(
642 struct dc *dc,
643 struct drm_crtc *crtc)
644{
645 struct dc_surface *dc_surface;
646 const struct dc_surface *dc_surfaces[1];
647 const struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
648 struct dc_target *dc_target = acrtc->target;
649
650 if (!dc_target) {
651 dm_error(
652 "%s: Failed to obtain target on crtc (%d)!\n",
653 __func__,
654 acrtc->crtc_id);
655 goto fail;
656 }
657
658 dc_surface = dc_create_surface(dc);
659
660 if (!dc_surface) {
661 dm_error(
662 "%s: Failed to create a surface!\n",
663 __func__);
664 goto fail;
665 }
666
667 /* Surface programming */
668 fill_plane_attributes(dc_surface, crtc->primary->state, true);
669
670 dc_surfaces[0] = dc_surface;
671
672 if (false == dc_commit_surfaces_to_target(
673 dc,
674 dc_surfaces,
675 1,
676 dc_target)) {
677 dm_error(
678 "%s: Failed to attach surface!\n",
679 __func__);
680 }
681
682 dc_surface_release(dc_surface);
683fail:
684 return;
685}
686
687static enum dc_color_depth convert_color_depth_from_display_info(
688 const struct drm_connector *connector)
689{
690 uint32_t bpc = connector->display_info.bpc;
691
692 /* Limited color depth to 8bit
693 * TODO: Still need to handle deep color*/
694 if (bpc > 8)
695 bpc = 8;
696
697 switch (bpc) {
698 case 0:
699 /* Temporary Work around, DRM don't parse color depth for
700 * EDID revision before 1.4
701 * TODO: Fix edid parsing
702 */
703 return COLOR_DEPTH_888;
704 case 6:
705 return COLOR_DEPTH_666;
706 case 8:
707 return COLOR_DEPTH_888;
708 case 10:
709 return COLOR_DEPTH_101010;
710 case 12:
711 return COLOR_DEPTH_121212;
712 case 14:
713 return COLOR_DEPTH_141414;
714 case 16:
715 return COLOR_DEPTH_161616;
716 default:
717 return COLOR_DEPTH_UNDEFINED;
718 }
719}
720
721static enum dc_aspect_ratio get_aspect_ratio(
722 const struct drm_display_mode *mode_in)
723{
724 int32_t width = mode_in->crtc_hdisplay * 9;
725 int32_t height = mode_in->crtc_vdisplay * 16;
726 if ((width - height) < 10 && (width - height) > -10)
727 return ASPECT_RATIO_16_9;
728 else
729 return ASPECT_RATIO_4_3;
730}
731
732static enum dc_color_space get_output_color_space(
733 const struct dc_crtc_timing *dc_crtc_timing)
734{
735 enum dc_color_space color_space = COLOR_SPACE_SRGB;
736
737 switch (dc_crtc_timing->pixel_encoding) {
738 case PIXEL_ENCODING_YCBCR422:
739 case PIXEL_ENCODING_YCBCR444:
740 case PIXEL_ENCODING_YCBCR420:
741 {
742 /*
743 * 27030khz is the separation point between HDTV and SDTV
744 * according to HDMI spec, we use YCbCr709 and YCbCr601
745 * respectively
746 */
747 if (dc_crtc_timing->pix_clk_khz > 27030) {
748 if (dc_crtc_timing->flags.Y_ONLY)
749 color_space =
750 COLOR_SPACE_YCBCR709_LIMITED;
751 else
752 color_space = COLOR_SPACE_YCBCR709;
753 } else {
754 if (dc_crtc_timing->flags.Y_ONLY)
755 color_space =
756 COLOR_SPACE_YCBCR601_LIMITED;
757 else
758 color_space = COLOR_SPACE_YCBCR601;
759 }
760
761 }
762 break;
763 case PIXEL_ENCODING_RGB:
764 color_space = COLOR_SPACE_SRGB;
765 break;
766
767 default:
768 WARN_ON(1);
769 break;
770 }
771
772 return color_space;
773}
774
775/*****************************************************************************/
776
777static void fill_stream_properties_from_drm_display_mode(
778 struct dc_stream *stream,
779 const struct drm_display_mode *mode_in,
780 const struct drm_connector *connector)
781{
782 struct dc_crtc_timing *timing_out = &stream->timing;
783 memset(timing_out, 0, sizeof(struct dc_crtc_timing));
784
785 timing_out->h_border_left = 0;
786 timing_out->h_border_right = 0;
787 timing_out->v_border_top = 0;
788 timing_out->v_border_bottom = 0;
789 /* TODO: un-hardcode */
790
791 if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
792 && stream->sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A)
793 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
794 else
795 timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
796
797 timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
798 timing_out->display_color_depth = convert_color_depth_from_display_info(
799 connector);
800 timing_out->scan_type = SCANNING_TYPE_NODATA;
801 timing_out->hdmi_vic = 0;
802 timing_out->vic = drm_match_cea_mode(mode_in);
803
804 timing_out->h_addressable = mode_in->crtc_hdisplay;
805 timing_out->h_total = mode_in->crtc_htotal;
806 timing_out->h_sync_width =
807 mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
808 timing_out->h_front_porch =
809 mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
810 timing_out->v_total = mode_in->crtc_vtotal;
811 timing_out->v_addressable = mode_in->crtc_vdisplay;
812 timing_out->v_front_porch =
813 mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
814 timing_out->v_sync_width =
815 mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
816 timing_out->pix_clk_khz = mode_in->crtc_clock;
817 timing_out->aspect_ratio = get_aspect_ratio(mode_in);
818 if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
819 timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
820 if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
821 timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
822
823 stream->output_color_space = get_output_color_space(timing_out);
824
825}
826
827static void fill_audio_info(
828 struct audio_info *audio_info,
829 const struct drm_connector *drm_connector,
830 const struct dc_sink *dc_sink)
831{
832 int i = 0;
833 int cea_revision = 0;
834 const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
835
836 audio_info->manufacture_id = edid_caps->manufacturer_id;
837 audio_info->product_id = edid_caps->product_id;
838
839 cea_revision = drm_connector->display_info.cea_rev;
840
841 while (i < AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS &&
842 edid_caps->display_name[i]) {
843 audio_info->display_name[i] = edid_caps->display_name[i];
844 i++;
845 }
846
847 if(cea_revision >= 3) {
848 audio_info->mode_count = edid_caps->audio_mode_count;
849
850 for (i = 0; i < audio_info->mode_count; ++i) {
851 audio_info->modes[i].format_code =
852 (enum audio_format_code)
853 (edid_caps->audio_modes[i].format_code);
854 audio_info->modes[i].channel_count =
855 edid_caps->audio_modes[i].channel_count;
856 audio_info->modes[i].sample_rates.all =
857 edid_caps->audio_modes[i].sample_rate;
858 audio_info->modes[i].sample_size =
859 edid_caps->audio_modes[i].sample_size;
860 }
861 }
862
863 audio_info->flags.all = edid_caps->speaker_flags;
864
865 /* TODO: We only check for the progressive mode, check for interlace mode too */
866 if(drm_connector->latency_present[0]) {
867 audio_info->video_latency = drm_connector->video_latency[0];
868 audio_info->audio_latency = drm_connector->audio_latency[0];
869 }
870
871 /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
872
873}
874
875static void copy_crtc_timing_for_drm_display_mode(
876 const struct drm_display_mode *src_mode,
877 struct drm_display_mode *dst_mode)
878{
879 dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
880 dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
881 dst_mode->crtc_clock = src_mode->crtc_clock;
882 dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
883 dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
884 dst_mode->crtc_hsync_start= src_mode->crtc_hsync_start;
885 dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
886 dst_mode->crtc_htotal = src_mode->crtc_htotal;
887 dst_mode->crtc_hskew = src_mode->crtc_hskew;
888 dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;;
889 dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;;
890 dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;;
891 dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;;
892 dst_mode->crtc_vtotal = src_mode->crtc_vtotal;;
893}
894
895static void decide_crtc_timing_for_drm_display_mode(
896 struct drm_display_mode *drm_mode,
897 const struct drm_display_mode *native_mode,
898 bool scale_enabled)
899{
900 if (scale_enabled) {
901 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
902 } else if (native_mode->clock == drm_mode->clock &&
903 native_mode->htotal == drm_mode->htotal &&
904 native_mode->vtotal == drm_mode->vtotal) {
905 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
906 } else {
907 /* no scaling nor amdgpu inserted, no need to patch */
908 }
909}
910
911static struct dc_target *create_target_for_sink(
912 const struct amdgpu_connector *aconnector,
913 const struct drm_display_mode *drm_mode,
914 const struct dm_connector_state *dm_state)
915{
916 struct drm_display_mode *preferred_mode = NULL;
917 const struct drm_connector *drm_connector;
918 struct dc_target *target = NULL;
919 struct dc_stream *stream;
920 struct drm_display_mode mode = *drm_mode;
921 bool native_mode_found = false;
922
923 if (NULL == aconnector) {
924 DRM_ERROR("aconnector is NULL!\n");
925 goto drm_connector_null;
926 }
927
928 if (NULL == dm_state) {
929 DRM_ERROR("dm_state is NULL!\n");
930 goto dm_state_null;
931 }
932
933 drm_connector = &aconnector->base;
934 stream = dc_create_stream_for_sink(aconnector->dc_sink);
935
936 if (NULL == stream) {
937 DRM_ERROR("Failed to create stream for sink!\n");
938 goto stream_create_fail;
939 }
940
941 list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
942 /* Search for preferred mode */
943 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
944 native_mode_found = true;
945 break;
946 }
947 }
948 if (!native_mode_found)
949 preferred_mode = list_first_entry_or_null(
950 &aconnector->base.modes,
951 struct drm_display_mode,
952 head);
953
954 if (NULL == preferred_mode) {
955 /* This may not be an error, the use case is when we we have no
956 * usermode calls to reset and set mode upon hotplug. In this
957 * case, we call set mode ourselves to restore the previous mode
958 * and the modelist may not be filled in in time.
959 */
960 DRM_INFO("No preferred mode found\n");
961 } else {
962 decide_crtc_timing_for_drm_display_mode(
963 &mode, preferred_mode,
964 dm_state->scaling != RMX_OFF);
965 }
966
967 fill_stream_properties_from_drm_display_mode(stream,
968 &mode, &aconnector->base);
969 update_stream_scaling_settings(&mode, dm_state, stream);
970
971 fill_audio_info(
972 &stream->audio_info,
973 drm_connector,
974 aconnector->dc_sink);
975
976 target = dc_create_target_for_streams(&stream, 1);
977 dc_stream_release(stream);
978
979 if (NULL == target) {
980 DRM_ERROR("Failed to create target with streams!\n");
981 goto target_create_fail;
982 }
983
984dm_state_null:
985drm_connector_null:
986target_create_fail:
987stream_create_fail:
988 return target;
989}
990
991void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
992{
993 drm_crtc_cleanup(crtc);
994 kfree(crtc);
995}
996
997/* Implemented only the options currently availible for the driver */
998static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
999 .reset = drm_atomic_helper_crtc_reset,
1000 .cursor_set = dm_crtc_cursor_set,
1001 .cursor_move = dm_crtc_cursor_move,
1002 .destroy = amdgpu_dm_crtc_destroy,
1003 .gamma_set = drm_atomic_helper_legacy_gamma_set,
1004 .set_config = drm_atomic_helper_set_config,
1005 .page_flip = drm_atomic_helper_page_flip,
1006 .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
1007 .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
1008};
1009
1010static enum drm_connector_status
1011amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
1012{
1013 bool connected;
1014 struct amdgpu_connector *aconnector = to_amdgpu_connector(connector);
1015
1016 /* Notes:
1017 * 1. This interface is NOT called in context of HPD irq.
1018 * 2. This interface *is called* in context of user-mode ioctl. Which
1019 * makes it a bad place for *any* MST-related activit. */
1020
1021 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
1022 connected = (aconnector->dc_sink != NULL);
1023 else
1024 connected = (aconnector->base.force == DRM_FORCE_ON);
1025
1026 return (connected ? connector_status_connected :
1027 connector_status_disconnected);
1028}
1029
1030int amdgpu_dm_connector_atomic_set_property(
1031 struct drm_connector *connector,
1032 struct drm_connector_state *connector_state,
1033 struct drm_property *property,
1034 uint64_t val)
1035{
1036 struct drm_device *dev = connector->dev;
1037 struct amdgpu_device *adev = dev->dev_private;
1038 struct dm_connector_state *dm_old_state =
1039 to_dm_connector_state(connector->state);
1040 struct dm_connector_state *dm_new_state =
1041 to_dm_connector_state(connector_state);
1042
1043 struct drm_crtc_state *new_crtc_state;
1044 struct drm_crtc *crtc;
1045 int i;
1046 int ret = -EINVAL;
1047
1048 if (property == dev->mode_config.scaling_mode_property) {
1049 enum amdgpu_rmx_type rmx_type;
1050
1051 switch (val) {
1052 case DRM_MODE_SCALE_CENTER:
1053 rmx_type = RMX_CENTER;
1054 break;
1055 case DRM_MODE_SCALE_ASPECT:
1056 rmx_type = RMX_ASPECT;
1057 break;
1058 case DRM_MODE_SCALE_FULLSCREEN:
1059 rmx_type = RMX_FULL;
1060 break;
1061 case DRM_MODE_SCALE_NONE:
1062 default:
1063 rmx_type = RMX_OFF;
1064 break;
1065 }
1066
1067 if (dm_old_state->scaling == rmx_type)
1068 return 0;
1069
1070 dm_new_state->scaling = rmx_type;
1071 ret = 0;
1072 } else if (property == adev->mode_info.underscan_hborder_property) {
1073 dm_new_state->underscan_hborder = val;
1074 ret = 0;
1075 } else if (property == adev->mode_info.underscan_vborder_property) {
1076 dm_new_state->underscan_vborder = val;
1077 ret = 0;
1078 } else if (property == adev->mode_info.underscan_property) {
1079 dm_new_state->underscan_enable = val;
1080 ret = 0;
1081 }
1082
1083 for_each_crtc_in_state(
1084 connector_state->state,
1085 crtc,
1086 new_crtc_state,
1087 i) {
1088
1089 if (crtc == connector_state->crtc) {
1090 struct drm_plane_state *plane_state;
1091
1092 /*
1093 * Bit of magic done here. We need to ensure
1094 * that planes get update after mode is set.
1095 * So, we need to add primary plane to state,
1096 * and this way atomic_update would be called
1097 * for it
1098 */
1099 plane_state =
1100 drm_atomic_get_plane_state(
1101 connector_state->state,
1102 crtc->primary);
1103
1104 if (!plane_state)
1105 return -EINVAL;
1106 }
1107 }
1108
1109 return ret;
1110}
1111
1112void amdgpu_dm_connector_destroy(struct drm_connector *connector)
1113{
1114 struct amdgpu_connector *aconnector = to_amdgpu_connector(connector);
1115 const struct dc_link *link = aconnector->dc_link;
1116 struct amdgpu_device *adev = connector->dev->dev_private;
1117 struct amdgpu_display_manager *dm = &adev->dm;
1118#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
1119 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
1120
1121 if (link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) {
1122 amdgpu_dm_register_backlight_device(dm);
1123
1124 if (dm->backlight_dev) {
1125 backlight_device_unregister(dm->backlight_dev);
1126 dm->backlight_dev = NULL;
1127 }
1128
1129 }
1130#endif
1131 drm_connector_unregister(connector);
1132 drm_connector_cleanup(connector);
1133 kfree(connector);
1134}
1135
1136void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
1137{
1138 struct dm_connector_state *state =
1139 to_dm_connector_state(connector->state);
1140
1141 kfree(state);
1142
1143 state = kzalloc(sizeof(*state), GFP_KERNEL);
1144
1145 if (state) {
1146 state->scaling = RMX_OFF;
1147 state->underscan_enable = false;
1148 state->underscan_hborder = 0;
1149 state->underscan_vborder = 0;
1150
1151 connector->state = &state->base;
1152 connector->state->connector = connector;
1153 }
1154}
1155
1156struct drm_connector_state *amdgpu_dm_connector_atomic_duplicate_state(
1157 struct drm_connector *connector)
1158{
1159 struct dm_connector_state *state =
1160 to_dm_connector_state(connector->state);
1161
1162 struct dm_connector_state *new_state =
1163 kmemdup(state, sizeof(*state), GFP_KERNEL);
1164
1165 if (new_state) {
1166 __drm_atomic_helper_connector_duplicate_state(connector,
1167 &new_state->base);
1168 return &new_state->base;
1169 }
1170
1171 return NULL;
1172}
1173
1174static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
1175 .reset = amdgpu_dm_connector_funcs_reset,
1176 .detect = amdgpu_dm_connector_detect,
1177 .fill_modes = drm_helper_probe_single_connector_modes,
1178 .destroy = amdgpu_dm_connector_destroy,
1179 .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
1180 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
1181 .atomic_set_property = amdgpu_dm_connector_atomic_set_property
1182};
1183
1184static struct drm_encoder *best_encoder(struct drm_connector *connector)
1185{
1186 int enc_id = connector->encoder_ids[0];
1187 struct drm_mode_object *obj;
1188 struct drm_encoder *encoder;
1189
1190 DRM_DEBUG_KMS("Finding the best encoder\n");
1191
1192 /* pick the encoder ids */
1193 if (enc_id) {
1194 obj = drm_mode_object_find(connector->dev, enc_id, DRM_MODE_OBJECT_ENCODER);
1195 if (!obj) {
1196 DRM_ERROR("Couldn't find a matching encoder for our connector\n");
1197 return NULL;
1198 }
1199 encoder = obj_to_encoder(obj);
1200 return encoder;
1201 }
1202 DRM_ERROR("No encoder id\n");
1203 return NULL;
1204}
1205
1206static int get_modes(struct drm_connector *connector)
1207{
1208 return amdgpu_dm_connector_get_modes(connector);
1209}
1210
1211static void create_eml_sink(struct amdgpu_connector *aconnector)
1212{
1213 struct dc_sink_init_data init_params = {
1214 .link = aconnector->dc_link,
1215 .sink_signal = SIGNAL_TYPE_VIRTUAL
1216 };
1217 struct edid *edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
1218
1219 if (!aconnector->base.edid_blob_ptr ||
1220 !aconnector->base.edid_blob_ptr->data) {
1221 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
1222 aconnector->base.name);
1223
1224 aconnector->base.force = DRM_FORCE_OFF;
1225 aconnector->base.override_edid = false;
1226 return;
1227 }
1228
1229 aconnector->edid = edid;
1230
1231 aconnector->dc_em_sink = dc_link_add_remote_sink(
1232 aconnector->dc_link,
1233 (uint8_t *)edid,
1234 (edid->extensions + 1) * EDID_LENGTH,
1235 &init_params);
1236
1237 if (aconnector->base.force
1238 == DRM_FORCE_ON)
1239 aconnector->dc_sink = aconnector->dc_link->local_sink ?
1240 aconnector->dc_link->local_sink :
1241 aconnector->dc_em_sink;
1242}
1243
1244static void handle_edid_mgmt(struct amdgpu_connector *aconnector)
1245{
1246 struct dc_link *link = (struct dc_link *)aconnector->dc_link;
1247
1248 /* In case of headless boot with force on for DP managed connector
1249 * Those settings have to be != 0 to get initial modeset
1250 */
1251 if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
1252 link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
1253 link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
1254 }
1255
1256
1257 aconnector->base.override_edid = true;
1258 create_eml_sink(aconnector);
1259}
1260
1261int amdgpu_dm_connector_mode_valid(
1262 struct drm_connector *connector,
1263 struct drm_display_mode *mode)
1264{
1265 int result = MODE_ERROR;
1266 const struct dc_sink *dc_sink;
1267 struct amdgpu_device *adev = connector->dev->dev_private;
1268 struct dc_validation_set val_set = { 0 };
1269 /* TODO: Unhardcode stream count */
1270 struct dc_stream *streams[1];
1271 struct dc_target *target;
1272 struct amdgpu_connector *aconnector = to_amdgpu_connector(connector);
1273
1274 if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
1275 (mode->flags & DRM_MODE_FLAG_DBLSCAN))
1276 return result;
1277
1278 /* Only run this the first time mode_valid is called to initilialize
1279 * EDID mgmt
1280 */
1281 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
1282 !aconnector->dc_em_sink)
1283 handle_edid_mgmt(aconnector);
1284
1285 dc_sink = to_amdgpu_connector(connector)->dc_sink;
1286
1287 if (NULL == dc_sink) {
1288 DRM_ERROR("dc_sink is NULL!\n");
1289 goto stream_create_fail;
1290 }
1291
1292 streams[0] = dc_create_stream_for_sink(dc_sink);
1293
1294 if (NULL == streams[0]) {
1295 DRM_ERROR("Failed to create stream for sink!\n");
1296 goto stream_create_fail;
1297 }
1298
1299 drm_mode_set_crtcinfo(mode, 0);
1300 fill_stream_properties_from_drm_display_mode(streams[0], mode, connector);
1301
1302 target = dc_create_target_for_streams(streams, 1);
1303 val_set.target = target;
1304
1305 if (NULL == val_set.target) {
1306 DRM_ERROR("Failed to create target with stream!\n");
1307 goto target_create_fail;
1308 }
1309
1310 val_set.surface_count = 0;
1311 streams[0]->src.width = mode->hdisplay;
1312 streams[0]->src.height = mode->vdisplay;
1313 streams[0]->dst = streams[0]->src;
1314
1315 if (dc_validate_resources(adev->dm.dc, &val_set, 1))
1316 result = MODE_OK;
1317
1318 dc_target_release(target);
1319target_create_fail:
1320 dc_stream_release(streams[0]);
1321stream_create_fail:
1322 /* TODO: error handling*/
1323 return result;
1324}
1325
1326static const struct drm_connector_helper_funcs
1327amdgpu_dm_connector_helper_funcs = {
1328 /*
1329 * If hotplug a second bigger display in FB Con mode, bigger resolution
1330 * modes will be filtered by drm_mode_validate_size(), and those modes
1331 * is missing after user start lightdm. So we need to renew modes list.
1332 * in get_modes call back, not just return the modes count
1333 */
1334 .get_modes = get_modes,
1335 .mode_valid = amdgpu_dm_connector_mode_valid,
1336 .best_encoder = best_encoder
1337};
1338
1339static void dm_crtc_helper_disable(struct drm_crtc *crtc)
1340{
1341}
1342
1343static int dm_crtc_helper_atomic_check(
1344 struct drm_crtc *crtc,
1345 struct drm_crtc_state *state)
1346{
1347 return 0;
1348}
1349
1350static bool dm_crtc_helper_mode_fixup(
1351 struct drm_crtc *crtc,
1352 const struct drm_display_mode *mode,
1353 struct drm_display_mode *adjusted_mode)
1354{
1355 return true;
1356}
1357
1358static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
1359 .disable = dm_crtc_helper_disable,
1360 .atomic_check = dm_crtc_helper_atomic_check,
1361 .mode_fixup = dm_crtc_helper_mode_fixup
1362};
1363
1364static void dm_encoder_helper_disable(struct drm_encoder *encoder)
1365{
1366
1367}
1368
1369static int dm_encoder_helper_atomic_check(
1370 struct drm_encoder *encoder,
1371 struct drm_crtc_state *crtc_state,
1372 struct drm_connector_state *conn_state)
1373{
1374 return 0;
1375}
1376
1377const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
1378 .disable = dm_encoder_helper_disable,
1379 .atomic_check = dm_encoder_helper_atomic_check
1380};
1381
1382static const struct drm_plane_funcs dm_plane_funcs = {
1383 .reset = drm_atomic_helper_plane_reset,
1384 .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
1385 .atomic_destroy_state = drm_atomic_helper_plane_destroy_state
1386};
1387
1388static void clear_unrelated_fields(struct drm_plane_state *state)
1389{
1390 state->crtc = NULL;
1391 state->fb = NULL;
1392 state->state = NULL;
1393 state->fence = NULL;
1394}
1395
1396static bool page_flip_needed(
1397 const struct drm_plane_state *new_state,
1398 const struct drm_plane_state *old_state,
1399 struct drm_pending_vblank_event *event,
1400 bool commit_surface_required)
1401{
1402 struct drm_plane_state old_state_tmp;
1403 struct drm_plane_state new_state_tmp;
1404
1405 struct amdgpu_framebuffer *amdgpu_fb_old;
1406 struct amdgpu_framebuffer *amdgpu_fb_new;
1407 struct amdgpu_crtc *acrtc_new;
1408
1409 uint64_t old_tiling_flags;
1410 uint64_t new_tiling_flags;
1411
1412 bool page_flip_required;
1413
1414 if (!old_state)
1415 return false;
1416
1417 if (!old_state->fb)
1418 return false;
1419
1420 if (!new_state)
1421 return false;
1422
1423 if (!new_state->fb)
1424 return false;
1425
1426 old_state_tmp = *old_state;
1427 new_state_tmp = *new_state;
1428
1429 if (!event)
1430 return false;
1431
1432 amdgpu_fb_old = to_amdgpu_framebuffer(old_state->fb);
1433 amdgpu_fb_new = to_amdgpu_framebuffer(new_state->fb);
1434
1435 if (!get_fb_info(amdgpu_fb_old, &old_tiling_flags, NULL))
1436 return false;
1437
1438 if (!get_fb_info(amdgpu_fb_new, &new_tiling_flags, NULL))
1439 return false;
1440
1441 if (commit_surface_required == true &&
1442 old_tiling_flags != new_tiling_flags)
1443 return false;
1444
1445 clear_unrelated_fields(&old_state_tmp);
1446 clear_unrelated_fields(&new_state_tmp);
1447
1448 page_flip_required = memcmp(&old_state_tmp,
1449 &new_state_tmp,
1450 sizeof(old_state_tmp)) == 0 ? true:false;
1451 if (new_state->crtc && page_flip_required == false) {
1452 acrtc_new = to_amdgpu_crtc(new_state->crtc);
1453 if (acrtc_new->flip_flags & DRM_MODE_PAGE_FLIP_ASYNC)
1454 page_flip_required = true;
1455 }
1456 return page_flip_required;
1457}
1458
1459static int dm_plane_helper_prepare_fb(
1460 struct drm_plane *plane,
1461 struct drm_plane_state *new_state)
1462{
1463 struct amdgpu_framebuffer *afb;
1464 struct drm_gem_object *obj;
1465 struct amdgpu_bo *rbo;
1466 int r;
1467
1468 if (!new_state->fb) {
1469 DRM_DEBUG_KMS("No FB bound\n");
1470 return 0;
1471 }
1472
1473 afb = to_amdgpu_framebuffer(new_state->fb);
1474
1475 obj = afb->obj;
1476 rbo = gem_to_amdgpu_bo(obj);
1477 r = amdgpu_bo_reserve(rbo, false);
1478 if (unlikely(r != 0))
1479 return r;
1480
1481 r = amdgpu_bo_pin(rbo, AMDGPU_GEM_DOMAIN_VRAM, NULL);
1482
1483 amdgpu_bo_unreserve(rbo);
1484
1485 if (unlikely(r != 0)) {
1486 DRM_ERROR("Failed to pin framebuffer\n");
1487 return r;
1488 }
1489
1490 return 0;
1491}
1492
1493static void dm_plane_helper_cleanup_fb(
1494 struct drm_plane *plane,
1495 struct drm_plane_state *old_state)
1496{
1497 struct amdgpu_bo *rbo;
1498 struct amdgpu_framebuffer *afb;
1499 int r;
1500
1501 if (!old_state->fb)
1502 return;
1503
1504 afb = to_amdgpu_framebuffer(old_state->fb);
1505 rbo = gem_to_amdgpu_bo(afb->obj);
1506 r = amdgpu_bo_reserve(rbo, false);
1507 if (unlikely(r)) {
1508 DRM_ERROR("failed to reserve rbo before unpin\n");
1509 return;
1510 } else {
1511 amdgpu_bo_unpin(rbo);
1512 amdgpu_bo_unreserve(rbo);
1513 }
1514}
1515
1516int dm_create_validation_set_for_target(struct drm_connector *connector,
1517 struct drm_display_mode *mode, struct dc_validation_set *val_set)
1518{
1519 int result = MODE_ERROR;
1520 const struct dc_sink *dc_sink =
1521 to_amdgpu_connector(connector)->dc_sink;
1522 /* TODO: Unhardcode stream count */
1523 struct dc_stream *streams[1];
1524 struct dc_target *target;
1525
1526 if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
1527 (mode->flags & DRM_MODE_FLAG_DBLSCAN))
1528 return result;
1529
1530 if (NULL == dc_sink) {
1531 DRM_ERROR("dc_sink is NULL!\n");
1532 return result;
1533 }
1534
1535 streams[0] = dc_create_stream_for_sink(dc_sink);
1536
1537 if (NULL == streams[0]) {
1538 DRM_ERROR("Failed to create stream for sink!\n");
1539 return result;
1540 }
1541
1542 drm_mode_set_crtcinfo(mode, 0);
1543
1544 fill_stream_properties_from_drm_display_mode(streams[0], mode, connector);
1545
1546 target = dc_create_target_for_streams(streams, 1);
1547 val_set->target = target;
1548
1549 if (NULL == val_set->target) {
1550 DRM_ERROR("Failed to create target with stream!\n");
1551 goto fail;
1552 }
1553
1554 streams[0]->src.width = mode->hdisplay;
1555 streams[0]->src.height = mode->vdisplay;
1556 streams[0]->dst = streams[0]->src;
1557
1558 return MODE_OK;
1559
1560fail:
1561 dc_stream_release(streams[0]);
1562 return result;
1563
1564}
1565
1566static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
1567 .prepare_fb = dm_plane_helper_prepare_fb,
1568 .cleanup_fb = dm_plane_helper_cleanup_fb,
1569};
1570
1571/*
1572 * TODO: these are currently initialized to rgb formats only.
1573 * For future use cases we should either initialize them dynamically based on
1574 * plane capabilities, or initialize this array to all formats, so internal drm
1575 * check will succeed, and let DC to implement proper check
1576 */
1577static uint32_t rgb_formats[] = {
1578 DRM_FORMAT_XRGB4444,
1579 DRM_FORMAT_ARGB4444,
1580 DRM_FORMAT_RGBA4444,
1581 DRM_FORMAT_ARGB1555,
1582 DRM_FORMAT_RGB565,
1583 DRM_FORMAT_RGB888,
1584 DRM_FORMAT_XRGB8888,
1585 DRM_FORMAT_ARGB8888,
1586 DRM_FORMAT_RGBA8888,
1587 DRM_FORMAT_XRGB2101010,
1588 DRM_FORMAT_XBGR2101010,
1589 DRM_FORMAT_ARGB2101010,
1590 DRM_FORMAT_ABGR2101010,
1591};
1592
1593int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
1594 struct amdgpu_crtc *acrtc,
1595 uint32_t crtc_index)
1596{
1597 int res = -ENOMEM;
1598
1599 struct drm_plane *primary_plane =
1600 kzalloc(sizeof(*primary_plane), GFP_KERNEL);
1601
1602 if (!primary_plane)
1603 goto fail_plane;
1604
1605 primary_plane->format_default = true;
1606
1607 res = drm_universal_plane_init(
1608 dm->adev->ddev,
1609 primary_plane,
1610 0,
1611 &dm_plane_funcs,
1612 rgb_formats,
1613 ARRAY_SIZE(rgb_formats),
1614 NULL,
1615 DRM_PLANE_TYPE_PRIMARY, NULL);
1616
1617 primary_plane->crtc = &acrtc->base;
1618
1619 drm_plane_helper_add(primary_plane, &dm_plane_helper_funcs);
1620
1621 res = drm_crtc_init_with_planes(
1622 dm->ddev,
1623 &acrtc->base,
1624 primary_plane,
1625 NULL,
1626 &amdgpu_dm_crtc_funcs, NULL);
1627
1628 if (res)
1629 goto fail;
1630
1631 drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
1632
1633 acrtc->max_cursor_width = 128;
1634 acrtc->max_cursor_height = 128;
1635
1636 acrtc->crtc_id = crtc_index;
1637 acrtc->base.enabled = false;
1638
1639 dm->adev->mode_info.crtcs[crtc_index] = acrtc;
1640 drm_mode_crtc_set_gamma_size(&acrtc->base, 256);
1641
1642 return 0;
1643fail:
1644 kfree(primary_plane);
1645fail_plane:
1646 acrtc->crtc_id = -1;
1647 return res;
1648}
1649
1650static int to_drm_connector_type(enum signal_type st)
1651{
1652 switch (st) {
1653 case SIGNAL_TYPE_HDMI_TYPE_A:
1654 return DRM_MODE_CONNECTOR_HDMIA;
1655 case SIGNAL_TYPE_EDP:
1656 return DRM_MODE_CONNECTOR_eDP;
1657 case SIGNAL_TYPE_RGB:
1658 return DRM_MODE_CONNECTOR_VGA;
1659 case SIGNAL_TYPE_DISPLAY_PORT:
1660 case SIGNAL_TYPE_DISPLAY_PORT_MST:
1661 return DRM_MODE_CONNECTOR_DisplayPort;
1662 case SIGNAL_TYPE_DVI_DUAL_LINK:
1663 case SIGNAL_TYPE_DVI_SINGLE_LINK:
1664 return DRM_MODE_CONNECTOR_DVID;
1665 case SIGNAL_TYPE_VIRTUAL:
1666 return DRM_MODE_CONNECTOR_VIRTUAL;
1667
1668 default:
1669 return DRM_MODE_CONNECTOR_Unknown;
1670 }
1671}
1672
1673static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
1674{
1675 const struct drm_connector_helper_funcs *helper =
1676 connector->helper_private;
1677 struct drm_encoder *encoder;
1678 struct amdgpu_encoder *amdgpu_encoder;
1679
1680 encoder = helper->best_encoder(connector);
1681
1682 if (encoder == NULL)
1683 return;
1684
1685 amdgpu_encoder = to_amdgpu_encoder(encoder);
1686
1687 amdgpu_encoder->native_mode.clock = 0;
1688
1689 if (!list_empty(&connector->probed_modes)) {
1690 struct drm_display_mode *preferred_mode = NULL;
1691 list_for_each_entry(preferred_mode,
1692 &connector->probed_modes,
1693 head) {
1694 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
1695 amdgpu_encoder->native_mode = *preferred_mode;
1696 }
1697 break;
1698 }
1699
1700 }
1701}
1702
1703static struct drm_display_mode *amdgpu_dm_create_common_mode(
1704 struct drm_encoder *encoder, char *name,
1705 int hdisplay, int vdisplay)
1706{
1707 struct drm_device *dev = encoder->dev;
1708 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1709 struct drm_display_mode *mode = NULL;
1710 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
1711
1712 mode = drm_mode_duplicate(dev, native_mode);
1713
1714 if(mode == NULL)
1715 return NULL;
1716
1717 mode->hdisplay = hdisplay;
1718 mode->vdisplay = vdisplay;
1719 mode->type &= ~DRM_MODE_TYPE_PREFERRED;
1720 strncpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
1721
1722 return mode;
1723
1724}
1725
1726static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
1727 struct drm_connector *connector)
1728{
1729 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1730 struct drm_display_mode *mode = NULL;
1731 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
1732 struct amdgpu_connector *amdgpu_connector =
1733 to_amdgpu_connector(connector);
1734 int i;
1735 int n;
1736 struct mode_size {
1737 char name[DRM_DISPLAY_MODE_LEN];
1738 int w;
1739 int h;
1740 }common_modes[] = {
1741 { "640x480", 640, 480},
1742 { "800x600", 800, 600},
1743 { "1024x768", 1024, 768},
1744 { "1280x720", 1280, 720},
1745 { "1280x800", 1280, 800},
1746 {"1280x1024", 1280, 1024},
1747 { "1440x900", 1440, 900},
1748 {"1680x1050", 1680, 1050},
1749 {"1600x1200", 1600, 1200},
1750 {"1920x1080", 1920, 1080},
1751 {"1920x1200", 1920, 1200}
1752 };
1753
1754 n = sizeof(common_modes) / sizeof(common_modes[0]);
1755
1756 for (i = 0; i < n; i++) {
1757 struct drm_display_mode *curmode = NULL;
1758 bool mode_existed = false;
1759
1760 if (common_modes[i].w > native_mode->hdisplay ||
1761 common_modes[i].h > native_mode->vdisplay ||
1762 (common_modes[i].w == native_mode->hdisplay &&
1763 common_modes[i].h == native_mode->vdisplay))
1764 continue;
1765
1766 list_for_each_entry(curmode, &connector->probed_modes, head) {
1767 if (common_modes[i].w == curmode->hdisplay &&
1768 common_modes[i].h == curmode->vdisplay) {
1769 mode_existed = true;
1770 break;
1771 }
1772 }
1773
1774 if (mode_existed)
1775 continue;
1776
1777 mode = amdgpu_dm_create_common_mode(encoder,
1778 common_modes[i].name, common_modes[i].w,
1779 common_modes[i].h);
1780 drm_mode_probed_add(connector, mode);
1781 amdgpu_connector->num_modes++;
1782 }
1783}
1784
1785static void amdgpu_dm_connector_ddc_get_modes(
1786 struct drm_connector *connector,
1787 struct edid *edid)
1788{
1789 struct amdgpu_connector *amdgpu_connector =
1790 to_amdgpu_connector(connector);
1791
1792 if (edid) {
1793 /* empty probed_modes */
1794 INIT_LIST_HEAD(&connector->probed_modes);
1795 amdgpu_connector->num_modes =
1796 drm_add_edid_modes(connector, edid);
1797
1798 drm_edid_to_eld(connector, edid);
1799
1800 amdgpu_dm_get_native_mode(connector);
1801 } else
1802 amdgpu_connector->num_modes = 0;
1803}
1804
1805int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
1806{
1807 const struct drm_connector_helper_funcs *helper =
1808 connector->helper_private;
1809 struct amdgpu_connector *amdgpu_connector =
1810 to_amdgpu_connector(connector);
1811 struct drm_encoder *encoder;
1812 struct edid *edid = amdgpu_connector->edid;
1813
1814 encoder = helper->best_encoder(connector);
1815
1816 amdgpu_dm_connector_ddc_get_modes(connector, edid);
1817 amdgpu_dm_connector_add_common_modes(encoder, connector);
1818 return amdgpu_connector->num_modes;
1819}
1820
1821void amdgpu_dm_connector_init_helper(
1822 struct amdgpu_display_manager *dm,
1823 struct amdgpu_connector *aconnector,
1824 int connector_type,
1825 const struct dc_link *link,
1826 int link_index)
1827{
1828 struct amdgpu_device *adev = dm->ddev->dev_private;
1829
1830 aconnector->connector_id = link_index;
1831 aconnector->dc_link = link;
1832 aconnector->base.interlace_allowed = true;
1833 aconnector->base.doublescan_allowed = true;
1834 aconnector->base.dpms = DRM_MODE_DPMS_OFF;
1835 aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
1836
1837 mutex_init(&aconnector->hpd_lock);
1838
1839 /*configure suport HPD hot plug connector_>polled default value is 0
1840 * which means HPD hot plug not supported*/
1841 switch (connector_type) {
1842 case DRM_MODE_CONNECTOR_HDMIA:
1843 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
1844 break;
1845 case DRM_MODE_CONNECTOR_DisplayPort:
1846 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
1847 break;
1848 case DRM_MODE_CONNECTOR_DVID:
1849 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
1850 break;
1851 default:
1852 break;
1853 }
1854
1855 drm_object_attach_property(&aconnector->base.base,
1856 dm->ddev->mode_config.scaling_mode_property,
1857 DRM_MODE_SCALE_NONE);
1858
1859 drm_object_attach_property(&aconnector->base.base,
1860 adev->mode_info.underscan_property,
1861 UNDERSCAN_OFF);
1862 drm_object_attach_property(&aconnector->base.base,
1863 adev->mode_info.underscan_hborder_property,
1864 0);
1865 drm_object_attach_property(&aconnector->base.base,
1866 adev->mode_info.underscan_vborder_property,
1867 0);
1868
1869}
1870
1871int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
1872 struct i2c_msg *msgs, int num)
1873{
1874 struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
1875 struct i2c_command cmd;
1876 int i;
1877 int result = -EIO;
1878
1879 cmd.payloads = kzalloc(num * sizeof(struct i2c_payload), GFP_KERNEL);
1880
1881 if (!cmd.payloads)
1882 return result;
1883
1884 cmd.number_of_payloads = num;
1885 cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
1886 cmd.speed = 100;
1887
1888 for (i = 0; i < num; i++) {
1889 cmd.payloads[i].write = (msgs[i].flags & I2C_M_RD);
1890 cmd.payloads[i].address = msgs[i].addr;
1891 cmd.payloads[i].length = msgs[i].len;
1892 cmd.payloads[i].data = msgs[i].buf;
1893 }
1894
1895 if (dc_submit_i2c(i2c->dm->dc, i2c->link_index, &cmd))
1896 result = num;
1897
1898 kfree(cmd.payloads);
1899
1900 return result;
1901}
1902
1903u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
1904{
1905 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
1906}
1907
1908static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
1909 .master_xfer = amdgpu_dm_i2c_xfer,
1910 .functionality = amdgpu_dm_i2c_func,
1911};
1912
1913struct amdgpu_i2c_adapter *create_i2c(unsigned int link_index, struct amdgpu_display_manager *dm, int *res)
1914{
1915 struct amdgpu_i2c_adapter *i2c;
1916
1917 i2c = kzalloc(sizeof (struct amdgpu_i2c_adapter), GFP_KERNEL);
1918 i2c->dm = dm;
1919 i2c->base.owner = THIS_MODULE;
1920 i2c->base.class = I2C_CLASS_DDC;
1921 i2c->base.dev.parent = &dm->adev->pdev->dev;
1922 i2c->base.algo = &amdgpu_dm_i2c_algo;
1923 snprintf(i2c->base.name, sizeof (i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
1924 i2c->link_index = link_index;
1925 i2c_set_adapdata(&i2c->base, i2c);
1926
1927 return i2c;
1928}
1929
1930/* Note: this function assumes that dc_link_detect() was called for the
1931 * dc_link which will be represented by this aconnector. */
1932int amdgpu_dm_connector_init(
1933 struct amdgpu_display_manager *dm,
1934 struct amdgpu_connector *aconnector,
1935 uint32_t link_index,
1936 struct amdgpu_encoder *aencoder)
1937{
1938 int res = 0;
1939 int connector_type;
1940 struct dc *dc = dm->dc;
1941 const struct dc_link *link = dc_get_link_at_index(dc, link_index);
1942 struct amdgpu_i2c_adapter *i2c;
1943
1944 DRM_DEBUG_KMS("%s()\n", __func__);
1945
1946 i2c = create_i2c(link->link_index, dm, &res);
1947 aconnector->i2c = i2c;
1948 res = i2c_add_adapter(&i2c->base);
1949
1950 if (res) {
1951 DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
1952 goto out_free;
1953 }
1954
1955 connector_type = to_drm_connector_type(link->connector_signal);
1956
1957 res = drm_connector_init(
1958 dm->ddev,
1959 &aconnector->base,
1960 &amdgpu_dm_connector_funcs,
1961 connector_type);
1962
1963 if (res) {
1964 DRM_ERROR("connector_init failed\n");
1965 aconnector->connector_id = -1;
1966 goto out_free;
1967 }
1968
1969 drm_connector_helper_add(
1970 &aconnector->base,
1971 &amdgpu_dm_connector_helper_funcs);
1972
1973 amdgpu_dm_connector_init_helper(
1974 dm,
1975 aconnector,
1976 connector_type,
1977 link,
1978 link_index);
1979
1980 drm_mode_connector_attach_encoder(
1981 &aconnector->base, &aencoder->base);
1982
1983 drm_connector_register(&aconnector->base);
1984
1985 if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
1986 || connector_type == DRM_MODE_CONNECTOR_eDP)
1987 amdgpu_dm_initialize_mst_connector(dm, aconnector);
1988
1989#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
1990 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
1991
1992 /* NOTE: this currently will create backlight device even if a panel
1993 * is not connected to the eDP/LVDS connector.
1994 *
1995 * This is less than ideal but we don't have sink information at this
1996 * stage since detection happens after. We can't do detection earlier
1997 * since MST detection needs connectors to be created first.
1998 */
1999 if (link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) {
2000 /* Event if registration failed, we should continue with
2001 * DM initialization because not having a backlight control
2002 * is better then a black screen. */
2003 amdgpu_dm_register_backlight_device(dm);
2004
2005 if (dm->backlight_dev)
2006 dm->backlight_link = link;
2007 }
2008#endif
2009
2010out_free:
2011 if (res) {
2012 kfree(i2c);
2013 aconnector->i2c = NULL;
2014 }
2015 return res;
2016}
2017
2018int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
2019{
2020 switch (adev->mode_info.num_crtc) {
2021 case 1:
2022 return 0x1;
2023 case 2:
2024 return 0x3;
2025 case 3:
2026 return 0x7;
2027 case 4:
2028 return 0xf;
2029 case 5:
2030 return 0x1f;
2031 case 6:
2032 default:
2033 return 0x3f;
2034 }
2035}
2036
2037int amdgpu_dm_encoder_init(
2038 struct drm_device *dev,
2039 struct amdgpu_encoder *aencoder,
2040 uint32_t link_index)
2041{
2042 struct amdgpu_device *adev = dev->dev_private;
2043
2044 int res = drm_encoder_init(dev,
2045 &aencoder->base,
2046 &amdgpu_dm_encoder_funcs,
2047 DRM_MODE_ENCODER_TMDS,
2048 NULL);
2049
2050 aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
2051
2052 if (!res)
2053 aencoder->encoder_id = link_index;
2054 else
2055 aencoder->encoder_id = -1;
2056
2057 drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
2058
2059 return res;
2060}
2061
2062enum dm_commit_action {
2063 DM_COMMIT_ACTION_NOTHING,
2064 DM_COMMIT_ACTION_RESET,
2065 DM_COMMIT_ACTION_DPMS_ON,
2066 DM_COMMIT_ACTION_DPMS_OFF,
2067 DM_COMMIT_ACTION_SET
2068};
2069
2070static enum dm_commit_action get_dm_commit_action(struct drm_crtc_state *state)
2071{
2072 /* mode changed means either actually mode changed or enabled changed */
2073 /* active changed means dpms changed */
2074
2075 DRM_DEBUG_KMS("crtc_state_flags: enable:%d, active:%d, planes_changed:%d, mode_changed:%d,active_changed:%d,connectors_changed:%d\n",
2076 state->enable,
2077 state->active,
2078 state->planes_changed,
2079 state->mode_changed,
2080 state->active_changed,
2081 state->connectors_changed);
2082
2083 if (state->mode_changed) {
2084 /* if it is got disabled - call reset mode */
2085 if (!state->enable)
2086 return DM_COMMIT_ACTION_RESET;
2087
2088 if (state->active)
2089 return DM_COMMIT_ACTION_SET;
2090 else
2091 return DM_COMMIT_ACTION_RESET;
2092 } else {
2093 /* ! mode_changed */
2094
2095 /* if it is remain disable - skip it */
2096 if (!state->enable)
2097 return DM_COMMIT_ACTION_NOTHING;
2098
2099 if (state->active && state->connectors_changed)
2100 return DM_COMMIT_ACTION_SET;
2101
2102 if (state->active_changed) {
2103 if (state->active) {
2104 return DM_COMMIT_ACTION_DPMS_ON;
2105 } else {
2106 return DM_COMMIT_ACTION_DPMS_OFF;
2107 }
2108 } else {
2109 /* ! active_changed */
2110 return DM_COMMIT_ACTION_NOTHING;
2111 }
2112 }
2113}
2114
2115
2116typedef bool (*predicate)(struct amdgpu_crtc *acrtc);
2117
2118static void wait_while_pflip_status(struct amdgpu_device *adev,
2119 struct amdgpu_crtc *acrtc, predicate f) {
2120 int count = 0;
2121 while (f(acrtc)) {
2122 /* Spin Wait*/
2123 msleep(1);
2124 count++;
2125 if (count == 1000) {
2126 DRM_ERROR("%s - crtc:%d[%p], pflip_stat:%d, probable hang!\n",
2127 __func__, acrtc->crtc_id,
2128 acrtc,
2129 acrtc->pflip_status);
2130
2131 /* we do not expect to hit this case except on Polaris with PHY PLL
2132 * 1. DP to HDMI passive dongle connected
2133 * 2. unplug (headless)
2134 * 3. plug in DP
2135 * 3a. on plug in, DP will try verify link by training, and training
2136 * would disable PHY PLL which HDMI rely on to drive TG
2137 * 3b. this will cause flip interrupt cannot be generated, and we
2138 * exit when timeout expired. however we do not have code to clean
2139 * up flip, flip clean up will happen when the address is written
2140 * with the restore mode change
2141 */
2142 WARN_ON(1);
2143 break;
2144 }
2145 }
2146
2147 DRM_DEBUG_DRIVER("%s - Finished waiting for:%d msec, crtc:%d[%p], pflip_stat:%d \n",
2148 __func__,
2149 count,
2150 acrtc->crtc_id,
2151 acrtc,
2152 acrtc->pflip_status);
2153}
2154
2155static bool pflip_in_progress_predicate(struct amdgpu_crtc *acrtc)
2156{
2157 return acrtc->pflip_status != AMDGPU_FLIP_NONE;
2158}
2159
2160static void manage_dm_interrupts(
2161 struct amdgpu_device *adev,
2162 struct amdgpu_crtc *acrtc,
2163 bool enable)
2164{
2165 /*
2166 * this is not correct translation but will work as soon as VBLANK
2167 * constant is the same as PFLIP
2168 */
2169 int irq_type =
2170 amdgpu_crtc_idx_to_irq_type(
2171 adev,
2172 acrtc->crtc_id);
2173
2174 if (enable) {
2175 drm_crtc_vblank_on(&acrtc->base);
2176 amdgpu_irq_get(
2177 adev,
2178 &adev->pageflip_irq,
2179 irq_type);
2180 } else {
2181 wait_while_pflip_status(adev, acrtc,
2182 pflip_in_progress_predicate);
2183
2184 amdgpu_irq_put(
2185 adev,
2186 &adev->pageflip_irq,
2187 irq_type);
2188 drm_crtc_vblank_off(&acrtc->base);
2189 }
2190}
2191
2192
2193static bool pflip_pending_predicate(struct amdgpu_crtc *acrtc)
2194{
2195 return acrtc->pflip_status == AMDGPU_FLIP_PENDING;
2196}
2197
2198static bool is_scaling_state_different(
2199 const struct dm_connector_state *dm_state,
2200 const struct dm_connector_state *old_dm_state)
2201{
2202 if (dm_state->scaling != old_dm_state->scaling)
2203 return true;
2204 if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
2205 if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
2206 return true;
2207 } else if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
2208 if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
2209 return true;
2210 } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder
2211 || dm_state->underscan_vborder != old_dm_state->underscan_vborder)
2212 return true;
2213 return false;
2214}
2215
2216static void remove_target(struct amdgpu_device *adev, struct amdgpu_crtc *acrtc)
2217{
2218 int i;
2219
2220 /*
2221 * we evade vblanks and pflips on crtc that
2222 * should be changed
2223 */
2224 manage_dm_interrupts(adev, acrtc, false);
2225 /* this is the update mode case */
2226 if (adev->dm.freesync_module)
2227 for (i = 0; i < acrtc->target->stream_count; i++)
2228 mod_freesync_remove_stream(
2229 adev->dm.freesync_module,
2230 acrtc->target->streams[i]);
2231 dc_target_release(acrtc->target);
2232 acrtc->target = NULL;
2233 acrtc->otg_inst = -1;
2234 acrtc->enabled = false;
2235}
2236
2237int amdgpu_dm_atomic_commit(
2238 struct drm_device *dev,
2239 struct drm_atomic_state *state,
2240 bool async)
2241{
2242 struct amdgpu_device *adev = dev->dev_private;
2243 struct amdgpu_display_manager *dm = &adev->dm;
2244 struct drm_plane *plane;
2245 struct drm_plane_state *new_plane_state;
2246 struct drm_plane_state *old_plane_state;
2247 uint32_t i, j;
2248 int32_t ret = 0;
2249 uint32_t commit_targets_count = 0;
2250 uint32_t new_crtcs_count = 0;
2251 uint32_t flip_crtcs_count = 0;
2252 struct drm_crtc *crtc;
2253 struct drm_crtc_state *old_crtc_state;
2254
2255 struct dc_target *commit_targets[MAX_TARGETS];
2256 struct amdgpu_crtc *new_crtcs[MAX_TARGETS];
2257 struct dc_target *new_target;
2258 struct drm_crtc *flip_crtcs[MAX_TARGETS];
2259 struct amdgpu_flip_work *work[MAX_TARGETS] = {0};
2260 struct amdgpu_bo *new_abo[MAX_TARGETS] = {0};
2261
2262 /* In this step all new fb would be pinned */
2263
2264 /*
2265 * TODO: Revisit when we support true asynchronous commit.
2266 * Right now we receive async commit only from pageflip, in which case
2267 * we should not pin/unpin the fb here, it should be done in
2268 * amdgpu_crtc_flip and from the vblank irq handler.
2269 */
2270 if (!async) {
2271 ret = drm_atomic_helper_prepare_planes(dev, state);
2272 if (ret)
2273 return ret;
2274 }
2275
2276 /* Page flip if needed */
2277 for_each_plane_in_state(state, plane, new_plane_state, i) {
2278 struct drm_plane_state *old_plane_state = plane->state;
2279 struct drm_crtc *crtc = new_plane_state->crtc;
2280 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
2281 struct drm_framebuffer *fb = new_plane_state->fb;
2282 struct drm_crtc_state *crtc_state;
2283
2284 if (!fb || !crtc)
2285 continue;
2286
2287 crtc_state = drm_atomic_get_crtc_state(state, crtc);
2288
2289 if (!crtc_state->planes_changed || !crtc_state->active)
2290 continue;
2291
2292 if (page_flip_needed(
2293 new_plane_state,
2294 old_plane_state,
2295 crtc_state->event,
2296 false)) {
2297 ret = amdgpu_crtc_prepare_flip(crtc,
2298 fb,
2299 crtc_state->event,
2300 acrtc->flip_flags,
2301 drm_crtc_vblank_count(crtc),
2302 &work[flip_crtcs_count],
2303 &new_abo[flip_crtcs_count]);
2304
2305 if (ret) {
2306 /* According to atomic_commit hook API, EINVAL is not allowed */
2307 if (unlikely(ret == -EINVAL))
2308 ret = -ENOMEM;
2309
2310 DRM_ERROR("Atomic commit: Flip for crtc id %d: [%p], "
2311 "failed, errno = %d\n",
2312 acrtc->crtc_id,
2313 acrtc,
2314 ret);
2315 /* cleanup all flip configurations which
2316 * succeeded in this commit
2317 */
2318 for (i = 0; i < flip_crtcs_count; i++)
2319 amdgpu_crtc_cleanup_flip_ctx(
2320 work[i],
2321 new_abo[i]);
2322
2323 return ret;
2324 }
2325
2326 flip_crtcs[flip_crtcs_count] = crtc;
2327 flip_crtcs_count++;
2328 }
2329 }
2330
2331 /*
2332 * This is the point of no return - everything below never fails except
2333 * when the hw goes bonghits. Which means we can commit the new state on
2334 * the software side now.
2335 */
2336
2337 drm_atomic_helper_swap_state(state, true);
2338
2339 /*
2340 * From this point state become old state really. New state is
2341 * initialized to appropriate objects and could be accessed from there
2342 */
2343
2344 /*
2345 * there is no fences usage yet in state. We can skip the following line
2346 * wait_for_fences(dev, state);
2347 */
2348
2349 drm_atomic_helper_update_legacy_modeset_state(dev, state);
2350
2351 /* update changed items */
2352 for_each_crtc_in_state(state, crtc, old_crtc_state, i) {
2353 struct amdgpu_crtc *acrtc;
2354 struct amdgpu_connector *aconnector = NULL;
2355 enum dm_commit_action action;
2356 struct drm_crtc_state *new_state = crtc->state;
2357
2358 acrtc = to_amdgpu_crtc(crtc);
2359
2360 aconnector =
2361 amdgpu_dm_find_first_crct_matching_connector(
2362 state,
2363 crtc,
2364 false);
2365
2366 /* handles headless hotplug case, updating new_state and
2367 * aconnector as needed
2368 */
2369
2370 action = get_dm_commit_action(new_state);
2371
2372 switch (action) {
2373 case DM_COMMIT_ACTION_DPMS_ON:
2374 case DM_COMMIT_ACTION_SET: {
2375 struct dm_connector_state *dm_state = NULL;
2376 new_target = NULL;
2377
2378 if (aconnector)
2379 dm_state = to_dm_connector_state(aconnector->base.state);
2380
2381 new_target = create_target_for_sink(
2382 aconnector,
2383 &crtc->state->mode,
2384 dm_state);
2385
2386 DRM_INFO("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
2387
2388 if (!new_target) {
2389 /*
2390 * this could happen because of issues with
2391 * userspace notifications delivery.
2392 * In this case userspace tries to set mode on
2393 * display which is disconnect in fact.
2394 * dc_sink in NULL in this case on aconnector.
2395 * We expect reset mode will come soon.
2396 *
2397 * This can also happen when unplug is done
2398 * during resume sequence ended
2399 *
2400 * In this case, we want to pretend we still
2401 * have a sink to keep the pipe running so that
2402 * hw state is consistent with the sw state
2403 */
2404 DRM_DEBUG_KMS("%s: Failed to create new target for crtc %d\n",
2405 __func__, acrtc->base.base.id);
2406 break;
2407 }
2408
2409 if (acrtc->target)
2410 remove_target(adev, acrtc);
2411
2412 /*
2413 * this loop saves set mode crtcs
2414 * we needed to enable vblanks once all
2415 * resources acquired in dc after dc_commit_targets
2416 */
2417 new_crtcs[new_crtcs_count] = acrtc;
2418 new_crtcs_count++;
2419
2420 acrtc->target = new_target;
2421 acrtc->enabled = true;
2422 acrtc->hw_mode = crtc->state->mode;
2423 crtc->hwmode = crtc->state->mode;
2424
2425 break;
2426 }
2427
2428 case DM_COMMIT_ACTION_NOTHING: {
2429 struct dm_connector_state *dm_state = NULL;
2430
2431 if (!aconnector)
2432 break;
2433
2434 dm_state = to_dm_connector_state(aconnector->base.state);
2435
2436 /* Scaling update */
2437 update_stream_scaling_settings(
2438 &crtc->state->mode,
2439 dm_state,
2440 acrtc->target->streams[0]);
2441
2442 break;
2443 }
2444 case DM_COMMIT_ACTION_DPMS_OFF:
2445 case DM_COMMIT_ACTION_RESET:
2446 DRM_INFO("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
2447 /* i.e. reset mode */
2448 if (acrtc->target)
2449 remove_target(adev, acrtc);
2450 break;
2451 } /* switch() */
2452 } /* for_each_crtc_in_state() */
2453
2454 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
2455
2456 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
2457
2458 if (acrtc->target) {
2459 commit_targets[commit_targets_count] = acrtc->target;
2460 ++commit_targets_count;
2461 }
2462 }
2463
2464 /*
2465 * Add streams after required streams from new and replaced targets
2466 * are removed from freesync module
2467 */
2468 if (adev->dm.freesync_module) {
2469 for (i = 0; i < new_crtcs_count; i++) {
2470 struct amdgpu_connector *aconnector = NULL;
2471 new_target = new_crtcs[i]->target;
2472 aconnector =
2473 amdgpu_dm_find_first_crct_matching_connector(
2474 state,
2475 &new_crtcs[i]->base,
2476 false);
2477 if (!aconnector) {
2478 DRM_INFO(
2479 "Atomic commit: Failed to find connector for acrtc id:%d "
2480 "skipping freesync init\n",
2481 new_crtcs[i]->crtc_id);
2482 continue;
2483 }
2484
2485 for (j = 0; j < new_target->stream_count; j++)
2486 mod_freesync_add_stream(
2487 adev->dm.freesync_module,
2488 new_target->streams[j], &aconnector->caps);
2489 }
2490 }
2491
2492 /* DC is optimized not to do anything if 'targets' didn't change. */
2493 dc_commit_targets(dm->dc, commit_targets, commit_targets_count);
2494
2495 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
2496 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
2497
2498 if (acrtc->target != NULL)
2499 acrtc->otg_inst =
2500 dc_target_get_status(acrtc->target)->primary_otg_inst;
2501 }
2502
2503 /* update planes when needed */
2504 for_each_plane_in_state(state, plane, old_plane_state, i) {
2505 struct drm_plane_state *plane_state = plane->state;
2506 struct drm_crtc *crtc = plane_state->crtc;
2507 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
2508 struct drm_framebuffer *fb = plane_state->fb;
2509 struct drm_connector *connector;
2510 struct dm_connector_state *dm_state = NULL;
2511 enum dm_commit_action action;
2512
2513 if (!fb || !crtc || !crtc->state->active)
2514 continue;
2515
2516 action = get_dm_commit_action(crtc->state);
2517
2518 /* Surfaces are created under two scenarios:
2519 * 1. This commit is not a page flip.
2520 * 2. This commit is a page flip, and targets are created.
2521 */
2522 if (!page_flip_needed(
2523 plane_state,
2524 old_plane_state,
2525 crtc->state->event, true) ||
2526 action == DM_COMMIT_ACTION_DPMS_ON ||
2527 action == DM_COMMIT_ACTION_SET) {
2528 list_for_each_entry(connector,
2529 &dev->mode_config.connector_list, head) {
2530 if (connector->state->crtc == crtc) {
2531 dm_state = to_dm_connector_state(
2532 connector->state);
2533 break;
2534 }
2535 }
2536
2537 /*
2538 * This situation happens in the following case:
2539 * we are about to get set mode for connector who's only
2540 * possible crtc (in encoder crtc mask) is used by
2541 * another connector, that is why it will try to
2542 * re-assing crtcs in order to make configuration
2543 * supported. For our implementation we need to make all
2544 * encoders support all crtcs, then this issue will
2545 * never arise again. But to guard code from this issue
2546 * check is left.
2547 *
2548 * Also it should be needed when used with actual
2549 * drm_atomic_commit ioctl in future
2550 */
2551 if (!dm_state)
2552 continue;
2553
2554 /*
2555 * if flip is pending (ie, still waiting for fence to return
2556 * before address is submitted) here, we cannot commit_surface
2557 * as commit_surface will pre-maturely write out the future
2558 * address. wait until flip is submitted before proceeding.
2559 */
2560 wait_while_pflip_status(adev, acrtc, pflip_pending_predicate);
2561
2562 dm_dc_surface_commit(dm->dc, crtc);
2563 }
2564 }
2565
2566 for (i = 0; i < new_crtcs_count; i++) {
2567 /*
2568 * loop to enable interrupts on newly arrived crtc
2569 */
2570 struct amdgpu_crtc *acrtc = new_crtcs[i];
2571
2572 if (adev->dm.freesync_module) {
2573 for (j = 0; j < acrtc->target->stream_count; j++)
2574 mod_freesync_notify_mode_change(
2575 adev->dm.freesync_module,
2576 acrtc->target->streams,
2577 acrtc->target->stream_count);
2578 }
2579
2580 manage_dm_interrupts(adev, acrtc, true);
2581 dm_crtc_cursor_reset(&acrtc->base);
2582
2583 }
2584
2585 /* Do actual flip */
2586 flip_crtcs_count = 0;
2587 for_each_plane_in_state(state, plane, old_plane_state, i) {
2588 struct drm_plane_state *plane_state = plane->state;
2589 struct drm_crtc *crtc = plane_state->crtc;
2590 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
2591 struct drm_framebuffer *fb = plane_state->fb;
2592
2593 if (!fb || !crtc || !crtc->state->planes_changed ||
2594 !crtc->state->active)
2595 continue;
2596
2597 if (page_flip_needed(
2598 plane_state,
2599 old_plane_state,
2600 crtc->state->event,
2601 false)) {
2602 amdgpu_crtc_submit_flip(
2603 crtc,
2604 fb,
2605 work[flip_crtcs_count],
2606 new_abo[i]);
2607 flip_crtcs_count++;
2608 /*clean up the flags for next usage*/
2609 acrtc->flip_flags = 0;
2610 }
2611 }
2612
2613 /* In this state all old framebuffers would be unpinned */
2614
2615 /* TODO: Revisit when we support true asynchronous commit.*/
2616 if (!async)
2617 drm_atomic_helper_cleanup_planes(dev, state);
2618
2619 drm_atomic_state_put(state);
2620
2621 return ret;
2622}
2623/*
2624 * This functions handle all cases when set mode does not come upon hotplug.
2625 * This include when the same display is unplugged then plugged back into the
2626 * same port and when we are running without usermode desktop manager supprot
2627 */
2628void dm_restore_drm_connector_state(struct drm_device *dev, struct drm_connector *connector)
2629{
2630 struct drm_crtc *crtc;
2631 struct amdgpu_device *adev = dev->dev_private;
2632 struct dc *dc = adev->dm.dc;
2633 struct amdgpu_connector *aconnector = to_amdgpu_connector(connector);
2634 struct amdgpu_crtc *disconnected_acrtc;
2635 const struct dc_sink *sink;
2636 struct dc_target *commit_targets[6];
2637 struct dc_target *current_target;
2638 uint32_t commit_targets_count = 0;
2639 int i;
2640
2641 if (!aconnector->dc_sink || !connector->state || !connector->encoder)
2642 return;
2643
2644 disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
2645
2646 if (!disconnected_acrtc || !disconnected_acrtc->target)
2647 return;
2648
2649 sink = disconnected_acrtc->target->streams[0]->sink;
2650
2651 /*
2652 * If the previous sink is not released and different from the current,
2653 * we deduce we are in a state where we can not rely on usermode call
2654 * to turn on the display, so we do it here
2655 */
2656 if (sink != aconnector->dc_sink) {
2657 struct dm_connector_state *dm_state =
2658 to_dm_connector_state(aconnector->base.state);
2659
2660 struct dc_target *new_target =
2661 create_target_for_sink(
2662 aconnector,
2663 &disconnected_acrtc->base.state->mode,
2664 dm_state);
2665
2666 DRM_INFO("Headless hotplug, restoring connector state\n");
2667 /*
2668 * we evade vblanks and pflips on crtc that
2669 * should be changed
2670 */
2671 manage_dm_interrupts(adev, disconnected_acrtc, false);
2672 /* this is the update mode case */
2673
2674 current_target = disconnected_acrtc->target;
2675
2676 disconnected_acrtc->target = new_target;
2677 disconnected_acrtc->enabled = true;
2678 disconnected_acrtc->hw_mode = disconnected_acrtc->base.state->mode;
2679
2680 commit_targets_count = 0;
2681
2682 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
2683 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
2684
2685 if (acrtc->target) {
2686 commit_targets[commit_targets_count] = acrtc->target;
2687 ++commit_targets_count;
2688 }
2689 }
2690
2691 /* DC is optimized not to do anything if 'targets' didn't change. */
2692 if (!dc_commit_targets(dc, commit_targets,
2693 commit_targets_count)) {
2694 DRM_INFO("Failed to restore connector state!\n");
2695 dc_target_release(disconnected_acrtc->target);
2696 disconnected_acrtc->target = current_target;
2697 manage_dm_interrupts(adev, disconnected_acrtc, true);
2698 return;
2699 }
2700
2701 if (adev->dm.freesync_module) {
2702
2703 for (i = 0; i < current_target->stream_count; i++)
2704 mod_freesync_remove_stream(
2705 adev->dm.freesync_module,
2706 current_target->streams[i]);
2707
2708 for (i = 0; i < new_target->stream_count; i++)
2709 mod_freesync_add_stream(
2710 adev->dm.freesync_module,
2711 new_target->streams[i],
2712 &aconnector->caps);
2713 }
2714 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
2715 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
2716
2717 if (acrtc->target != NULL) {
2718 acrtc->otg_inst =
2719 dc_target_get_status(acrtc->target)->primary_otg_inst;
2720 }
2721 }
2722
2723 dc_target_release(current_target);
2724
2725 dm_dc_surface_commit(dc, &disconnected_acrtc->base);
2726
2727 manage_dm_interrupts(adev, disconnected_acrtc, true);
2728 dm_crtc_cursor_reset(&disconnected_acrtc->base);
2729
2730 }
2731}
2732
2733static uint32_t add_val_sets_surface(
2734 struct dc_validation_set *val_sets,
2735 uint32_t set_count,
2736 const struct dc_target *target,
2737 const struct dc_surface *surface)
2738{
2739 uint32_t i = 0;
2740
2741 while (i < set_count) {
2742 if (val_sets[i].target == target)
2743 break;
2744 ++i;
2745 }
2746
2747 val_sets[i].surfaces[val_sets[i].surface_count] = surface;
2748 val_sets[i].surface_count++;
2749
2750 return val_sets[i].surface_count;
2751}
2752
2753static uint32_t update_in_val_sets_target(
2754 struct dc_validation_set *val_sets,
2755 struct drm_crtc **crtcs,
2756 uint32_t set_count,
2757 const struct dc_target *old_target,
2758 const struct dc_target *new_target,
2759 struct drm_crtc *crtc)
2760{
2761 uint32_t i = 0;
2762
2763 while (i < set_count) {
2764 if (val_sets[i].target == old_target)
2765 break;
2766 ++i;
2767 }
2768
2769 val_sets[i].target = new_target;
2770 crtcs[i] = crtc;
2771
2772 if (i == set_count) {
2773 /* nothing found. add new one to the end */
2774 return set_count + 1;
2775 }
2776
2777 return set_count;
2778}
2779
2780static uint32_t remove_from_val_sets(
2781 struct dc_validation_set *val_sets,
2782 uint32_t set_count,
2783 const struct dc_target *target)
2784{
2785 int i;
2786
2787 for (i = 0; i < set_count; i++)
2788 if (val_sets[i].target == target)
2789 break;
2790
2791 if (i == set_count) {
2792 /* nothing found */
2793 return set_count;
2794 }
2795
2796 set_count--;
2797
2798 for (; i < set_count; i++) {
2799 val_sets[i] = val_sets[i + 1];
2800 }
2801
2802 return set_count;
2803}
2804
2805int amdgpu_dm_atomic_check(struct drm_device *dev,
2806 struct drm_atomic_state *state)
2807{
2808 struct drm_crtc *crtc;
2809 struct drm_crtc_state *crtc_state;
2810 struct drm_plane *plane;
2811 struct drm_plane_state *plane_state;
2812 int i, j;
2813 int ret;
2814 int set_count;
2815 int new_target_count;
2816 struct dc_validation_set set[MAX_TARGETS] = {{ 0 }};
2817 struct dc_target *new_targets[MAX_TARGETS] = { 0 };
2818 struct drm_crtc *crtc_set[MAX_TARGETS] = { 0 };
2819 struct amdgpu_device *adev = dev->dev_private;
2820 struct dc *dc = adev->dm.dc;
2821 bool need_to_validate = false;
2822
2823 ret = drm_atomic_helper_check(dev, state);
2824
2825 if (ret) {
2826 DRM_ERROR("Atomic state validation failed with error :%d !\n",
2827 ret);
2828 return ret;
2829 }
2830
2831 ret = -EINVAL;
2832
2833 /* copy existing configuration */
2834 new_target_count = 0;
2835 set_count = 0;
2836 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
2837
2838 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
2839
2840 if (acrtc->target) {
2841 set[set_count].target = acrtc->target;
2842 crtc_set[set_count] = crtc;
2843 ++set_count;
2844 }
2845 }
2846
2847 /* update changed items */
2848 for_each_crtc_in_state(state, crtc, crtc_state, i) {
2849 struct amdgpu_crtc *acrtc = NULL;
2850 struct amdgpu_connector *aconnector = NULL;
2851 enum dm_commit_action action;
2852
2853 acrtc = to_amdgpu_crtc(crtc);
2854
2855 aconnector = amdgpu_dm_find_first_crct_matching_connector(state, crtc, true);
2856
2857 action = get_dm_commit_action(crtc_state);
2858
2859 switch (action) {
2860 case DM_COMMIT_ACTION_DPMS_ON:
2861 case DM_COMMIT_ACTION_SET: {
2862 struct dc_target *new_target = NULL;
2863 struct drm_connector_state *conn_state = NULL;
2864 struct dm_connector_state *dm_state = NULL;
2865
2866 if (aconnector) {
2867 conn_state = drm_atomic_get_connector_state(state, &aconnector->base);
2868 if (IS_ERR(conn_state))
2869 return ret;
2870 dm_state = to_dm_connector_state(conn_state);
2871 }
2872
2873 new_target = create_target_for_sink(aconnector, &crtc_state->mode, dm_state);
2874
2875 /*
2876 * we can have no target on ACTION_SET if a display
2877 * was disconnected during S3, in this case it not and
2878 * error, the OS will be updated after detection, and
2879 * do the right thing on next atomic commit
2880 */
2881 if (!new_target) {
2882 DRM_DEBUG_KMS("%s: Failed to create new target for crtc %d\n",
2883 __func__, acrtc->base.base.id);
2884 break;
2885 }
2886
2887 new_targets[new_target_count] = new_target;
2888 set_count = update_in_val_sets_target(
2889 set,
2890 crtc_set,
2891 set_count,
2892 acrtc->target,
2893 new_target,
2894 crtc);
2895
2896 new_target_count++;
2897 need_to_validate = true;
2898 break;
2899 }
2900
2901 case DM_COMMIT_ACTION_NOTHING: {
2902 const struct drm_connector *drm_connector = NULL;
2903 struct drm_connector_state *conn_state = NULL;
2904 struct dm_connector_state *dm_state = NULL;
2905 struct dm_connector_state *old_dm_state = NULL;
2906 struct dc_target *new_target;
2907
2908 if (!aconnector)
2909 break;
2910
2911 for_each_connector_in_state(
2912 state, drm_connector, conn_state, j) {
2913 if (&aconnector->base == drm_connector)
2914 break;
2915 }
2916
2917 old_dm_state = to_dm_connector_state(drm_connector->state);
2918 dm_state = to_dm_connector_state(conn_state);
2919
2920 /* Support underscan adjustment*/
2921 if (!is_scaling_state_different(dm_state, old_dm_state))
2922 break;
2923
2924 new_target = create_target_for_sink(aconnector, &crtc_state->mode, dm_state);
2925
2926 if (!new_target) {
2927 DRM_ERROR("%s: Failed to create new target for crtc %d\n",
2928 __func__, acrtc->base.base.id);
2929 break;
2930 }
2931
2932 new_targets[new_target_count] = new_target;
2933 set_count = update_in_val_sets_target(
2934 set,
2935 crtc_set,
2936 set_count,
2937 acrtc->target,
2938 new_target,
2939 crtc);
2940
2941 new_target_count++;
2942 need_to_validate = true;
2943
2944 break;
2945 }
2946 case DM_COMMIT_ACTION_DPMS_OFF:
2947 case DM_COMMIT_ACTION_RESET:
2948 /* i.e. reset mode */
2949 if (acrtc->target) {
2950 set_count = remove_from_val_sets(
2951 set,
2952 set_count,
2953 acrtc->target);
2954 }
2955 break;
2956 }
2957
2958 /*
2959 * TODO revisit when removing commit action
2960 * and looking at atomic flags directly
2961 */
2962
2963 /* commit needs planes right now (for gamma, eg.) */
2964 /* TODO rework commit to chack crtc for gamma change */
2965 ret = drm_atomic_add_affected_planes(state, crtc);
2966 if (ret)
2967 return ret;
2968 }
2969
2970 for (i = 0; i < set_count; i++) {
2971 for_each_plane_in_state(state, plane, plane_state, j) {
2972 struct drm_plane_state *old_plane_state = plane->state;
2973 struct drm_crtc *crtc = plane_state->crtc;
2974 struct drm_framebuffer *fb = plane_state->fb;
2975 struct drm_connector *connector;
2976 struct dm_connector_state *dm_state = NULL;
2977 enum dm_commit_action action;
2978 struct drm_crtc_state *crtc_state;
2979
2980
2981 if (!fb || !crtc || crtc_set[i] != crtc ||
2982 !crtc->state->planes_changed || !crtc->state->active)
2983 continue;
2984
2985 action = get_dm_commit_action(crtc->state);
2986
2987 /* Surfaces are created under two scenarios:
2988 * 1. This commit is not a page flip.
2989 * 2. This commit is a page flip, and targets are created.
2990 */
2991 crtc_state = drm_atomic_get_crtc_state(state, crtc);
2992 if (!page_flip_needed(plane_state, old_plane_state,
2993 crtc_state->event, true) ||
2994 action == DM_COMMIT_ACTION_DPMS_ON ||
2995 action == DM_COMMIT_ACTION_SET) {
2996 struct dc_surface *surface;
2997
2998 list_for_each_entry(connector,
2999 &dev->mode_config.connector_list, head) {
3000 if (connector->state->crtc == crtc) {
3001 dm_state = to_dm_connector_state(
3002 connector->state);
3003 break;
3004 }
3005 }
3006
3007 /*
3008 * This situation happens in the following case:
3009 * we are about to get set mode for connector who's only
3010 * possible crtc (in encoder crtc mask) is used by
3011 * another connector, that is why it will try to
3012 * re-assing crtcs in order to make configuration
3013 * supported. For our implementation we need to make all
3014 * encoders support all crtcs, then this issue will
3015 * never arise again. But to guard code from this issue
3016 * check is left.
3017 *
3018 * Also it should be needed when used with actual
3019 * drm_atomic_commit ioctl in future
3020 */
3021 if (!dm_state)
3022 continue;
3023
3024 surface = dc_create_surface(dc);
3025 fill_plane_attributes(
3026 surface,
3027 plane_state,
3028 false);
3029
3030 add_val_sets_surface(
3031 set,
3032 set_count,
3033 set[i].target,
3034 surface);
3035
3036 need_to_validate = true;
3037 }
3038 }
3039 }
3040
3041 if (need_to_validate == false || set_count == 0 ||
3042 dc_validate_resources(dc, set, set_count))
3043 ret = 0;
3044
3045 for (i = 0; i < set_count; i++) {
3046 for (j = 0; j < set[i].surface_count; j++) {
3047 dc_surface_release(set[i].surfaces[j]);
3048 }
3049 }
3050 for (i = 0; i < new_target_count; i++)
3051 dc_target_release(new_targets[i]);
3052
3053 if (ret != 0)
3054 DRM_ERROR("Atomic check failed.\n");
3055
3056 return ret;
3057}
3058
3059static bool is_dp_capable_without_timing_msa(
3060 struct dc *dc,
3061 struct amdgpu_connector *amdgpu_connector)
3062{
3063 uint8_t dpcd_data;
3064 bool capable = false;
3065 if (amdgpu_connector->dc_link &&
3066 dc_read_dpcd(dc, amdgpu_connector->dc_link->link_index,
3067 DP_DOWN_STREAM_PORT_COUNT,
3068 &dpcd_data, sizeof(dpcd_data)) )
3069 capable = dpcd_data & DP_MSA_TIMING_PAR_IGNORED? true:false;
3070
3071 return capable;
3072}
3073void amdgpu_dm_add_sink_to_freesync_module(
3074 struct drm_connector *connector,
3075 struct edid *edid)
3076{
3077 int i;
3078 uint64_t val_capable;
3079 bool edid_check_required;
3080 struct detailed_timing *timing;
3081 struct detailed_non_pixel *data;
3082 struct detailed_data_monitor_range *range;
3083 struct amdgpu_connector *amdgpu_connector =
3084 to_amdgpu_connector(connector);
3085
3086 struct drm_device *dev = connector->dev;
3087 struct amdgpu_device *adev = dev->dev_private;
3088 edid_check_required = false;
3089 if (!amdgpu_connector->dc_sink) {
3090 DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
3091 return;
3092 }
3093 if (!adev->dm.freesync_module)
3094 return;
3095 /*
3096 * if edid non zero restrict freesync only for dp and edp
3097 */
3098 if (edid) {
3099 if (amdgpu_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
3100 || amdgpu_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
3101 edid_check_required = is_dp_capable_without_timing_msa(
3102 adev->dm.dc,
3103 amdgpu_connector);
3104 }
3105 }
3106 val_capable = 0;
3107 if (edid_check_required == true && (edid->version > 1 ||
3108 (edid->version == 1 && edid->revision > 1))) {
3109 for (i = 0; i < 4; i++) {
3110
3111 timing = &edid->detailed_timings[i];
3112 data = &timing->data.other_data;
3113 range = &data->data.range;
3114 /*
3115 * Check if monitor has continuous frequency mode
3116 */
3117 if (data->type != EDID_DETAIL_MONITOR_RANGE)
3118 continue;
3119 /*
3120 * Check for flag range limits only. If flag == 1 then
3121 * no additional timing information provided.
3122 * Default GTF, GTF Secondary curve and CVT are not
3123 * supported
3124 */
3125 if (range->flags != 1)
3126 continue;
3127
3128 amdgpu_connector->min_vfreq = range->min_vfreq;
3129 amdgpu_connector->max_vfreq = range->max_vfreq;
3130 amdgpu_connector->pixel_clock_mhz =
3131 range->pixel_clock_mhz * 10;
3132 break;
3133 }
3134
3135 if (amdgpu_connector->max_vfreq -
3136 amdgpu_connector->min_vfreq > 10) {
3137 amdgpu_connector->caps.supported = true;
3138 amdgpu_connector->caps.min_refresh_in_micro_hz =
3139 amdgpu_connector->min_vfreq * 1000000;
3140 amdgpu_connector->caps.max_refresh_in_micro_hz =
3141 amdgpu_connector->max_vfreq * 1000000;
3142 val_capable = 1;
3143 }
3144 }
3145
3146 /*
3147 * TODO figure out how to notify user-mode or DRM of freesync caps
3148 * once we figure out how to deal with freesync in an upstreamable
3149 * fashion
3150 */
3151
3152}
3153
3154void amdgpu_dm_remove_sink_from_freesync_module(
3155 struct drm_connector *connector)
3156{
3157 /*
3158 * TODO fill in once we figure out how to deal with freesync in
3159 * an upstreamable fashion
3160 */
3161}