]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_types.c
drm/amd/display: fix dce_calc surface pitch setting for non underlay pipes
[mirror_ubuntu-bionic-kernel.git] / drivers / gpu / drm / amd / display / amdgpu_dm / amdgpu_dm_types.c
CommitLineData
4562236b
HW
1/*
2 * Copyright 2012-13 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#include <linux/types.h>
27#include <linux/version.h>
28
29#include <drm/drmP.h>
30#include <drm/drm_atomic_helper.h>
31#include <drm/drm_fb_helper.h>
32#include <drm/drm_atomic.h>
33#include <drm/drm_edid.h>
34
35#include "amdgpu.h"
36#include "amdgpu_pm.h"
37#include "dm_services_types.h"
38
39// We need to #undef FRAME_SIZE and DEPRECATED because they conflict
40// with ptrace-abi.h's #define's of them.
41#undef FRAME_SIZE
42#undef DEPRECATED
43
44#include "dc.h"
45
46#include "amdgpu_dm_types.h"
47#include "amdgpu_dm_mst_types.h"
48
49#include "modules/inc/mod_freesync.h"
50
51struct dm_connector_state {
52 struct drm_connector_state base;
53
54 enum amdgpu_rmx_type scaling;
55 uint8_t underscan_vborder;
56 uint8_t underscan_hborder;
57 bool underscan_enable;
58};
59
60#define to_dm_connector_state(x)\
61 container_of((x), struct dm_connector_state, base)
62
63
64void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
65{
66 drm_encoder_cleanup(encoder);
67 kfree(encoder);
68}
69
70static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
71 .destroy = amdgpu_dm_encoder_destroy,
72};
73
74static void dm_set_cursor(
75 struct amdgpu_crtc *amdgpu_crtc,
76 uint64_t gpu_addr,
77 uint32_t width,
78 uint32_t height)
79{
80 struct dc_cursor_attributes attributes;
cf388c0d
AN
81 struct dc_cursor_position position;
82 struct drm_crtc *crtc = &amdgpu_crtc->base;
83 int x, y;
84 int xorigin = 0, yorigin = 0;
85
4562236b
HW
86 amdgpu_crtc->cursor_width = width;
87 amdgpu_crtc->cursor_height = height;
88
89 attributes.address.high_part = upper_32_bits(gpu_addr);
90 attributes.address.low_part = lower_32_bits(gpu_addr);
91 attributes.width = width;
92 attributes.height = height;
93 attributes.x_hot = 0;
94 attributes.y_hot = 0;
95 attributes.color_format = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
96 attributes.rotation_angle = 0;
97 attributes.attribute_flags.value = 0;
98
0702a01f
AG
99 attributes.pitch = attributes.width;
100
cf388c0d
AN
101 x = amdgpu_crtc->cursor_x;
102 y = amdgpu_crtc->cursor_y;
103
104 /* avivo cursor are offset into the total surface */
105 x += crtc->primary->state->src_x >> 16;
106 y += crtc->primary->state->src_y >> 16;
107
108 if (x < 0) {
109 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
110 x = 0;
111 }
112 if (y < 0) {
113 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
114 y = 0;
115 }
116
117 position.enable = true;
118 position.x = x;
119 position.y = y;
120
121 position.hot_spot_enable = true;
122 position.x_hotspot = xorigin;
123 position.y_hotspot = yorigin;
124
ab2541b6
AC
125 if (!dc_stream_set_cursor_attributes(
126 amdgpu_crtc->stream,
4562236b
HW
127 &attributes)) {
128 DRM_ERROR("DC failed to set cursor attributes\n");
129 }
cf388c0d 130
ab2541b6
AC
131 if (!dc_stream_set_cursor_position(
132 amdgpu_crtc->stream,
cf388c0d
AN
133 &position)) {
134 DRM_ERROR("DC failed to set cursor position\n");
135 }
4562236b
HW
136}
137
138static int dm_crtc_unpin_cursor_bo_old(
139 struct amdgpu_crtc *amdgpu_crtc)
140{
141 struct amdgpu_bo *robj;
142 int ret = 0;
143
144 if (NULL != amdgpu_crtc && NULL != amdgpu_crtc->cursor_bo) {
145 robj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
146
147 ret = amdgpu_bo_reserve(robj, false);
148
149 if (likely(ret == 0)) {
150 ret = amdgpu_bo_unpin(robj);
151
152 if (unlikely(ret != 0)) {
153 DRM_ERROR(
154 "%s: unpin failed (ret=%d), bo %p\n",
155 __func__,
156 ret,
157 amdgpu_crtc->cursor_bo);
158 }
159
160 amdgpu_bo_unreserve(robj);
161 } else {
162 DRM_ERROR(
163 "%s: reserve failed (ret=%d), bo %p\n",
164 __func__,
165 ret,
166 amdgpu_crtc->cursor_bo);
167 }
168
169 drm_gem_object_unreference_unlocked(amdgpu_crtc->cursor_bo);
170 amdgpu_crtc->cursor_bo = NULL;
171 }
172
173 return ret;
174}
175
176static int dm_crtc_pin_cursor_bo_new(
177 struct drm_crtc *crtc,
178 struct drm_file *file_priv,
179 uint32_t handle,
180 struct amdgpu_bo **ret_obj)
181{
182 struct amdgpu_crtc *amdgpu_crtc;
183 struct amdgpu_bo *robj;
184 struct drm_gem_object *obj;
185 int ret = -EINVAL;
186
187 if (NULL != crtc) {
188 struct drm_device *dev = crtc->dev;
189 struct amdgpu_device *adev = dev->dev_private;
190 uint64_t gpu_addr;
191
192 amdgpu_crtc = to_amdgpu_crtc(crtc);
193
194 obj = drm_gem_object_lookup(file_priv, handle);
195
196 if (!obj) {
197 DRM_ERROR(
198 "Cannot find cursor object %x for crtc %d\n",
199 handle,
200 amdgpu_crtc->crtc_id);
201 goto release;
202 }
203 robj = gem_to_amdgpu_bo(obj);
204
205 ret = amdgpu_bo_reserve(robj, false);
206
207 if (unlikely(ret != 0)) {
208 drm_gem_object_unreference_unlocked(obj);
209 DRM_ERROR("dm_crtc_pin_cursor_bo_new ret %x, handle %x\n",
210 ret, handle);
211 goto release;
212 }
213
214 ret = amdgpu_bo_pin_restricted(robj, AMDGPU_GEM_DOMAIN_VRAM, 0,
215 adev->mc.visible_vram_size,
216 &gpu_addr);
217
218 if (ret == 0) {
219 amdgpu_crtc->cursor_addr = gpu_addr;
220 *ret_obj = robj;
221 }
222 amdgpu_bo_unreserve(robj);
223 if (ret)
224 drm_gem_object_unreference_unlocked(obj);
225
226 }
227release:
228
229 return ret;
230}
231
232static int dm_crtc_cursor_set(
233 struct drm_crtc *crtc,
234 struct drm_file *file_priv,
235 uint32_t handle,
236 uint32_t width,
237 uint32_t height)
238{
239 struct amdgpu_bo *new_cursor_bo;
240 struct dc_cursor_position position;
241
242 int ret;
243
244 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
245
246 ret = EINVAL;
247 new_cursor_bo = NULL;
248
249 DRM_DEBUG_KMS(
250 "%s: crtc_id=%d with handle %d and size %d to %d, bo_object %p\n",
251 __func__,
252 amdgpu_crtc->crtc_id,
253 handle,
254 width,
255 height,
256 amdgpu_crtc->cursor_bo);
257
258 if (!handle) {
259 /* turn off cursor */
260 position.enable = false;
261 position.x = 0;
262 position.y = 0;
263 position.hot_spot_enable = false;
264
ab2541b6 265 if (amdgpu_crtc->stream) {
4562236b 266 /*set cursor visible false*/
ab2541b6
AC
267 dc_stream_set_cursor_position(
268 amdgpu_crtc->stream,
4562236b
HW
269 &position);
270 }
271 /*unpin old cursor buffer and update cache*/
272 ret = dm_crtc_unpin_cursor_bo_old(amdgpu_crtc);
273 goto release;
274
275 }
276
277 if ((width > amdgpu_crtc->max_cursor_width) ||
278 (height > amdgpu_crtc->max_cursor_height)) {
279 DRM_ERROR(
280 "%s: bad cursor width or height %d x %d\n",
281 __func__,
282 width,
283 height);
284 goto release;
285 }
286 /*try to pin new cursor bo*/
287 ret = dm_crtc_pin_cursor_bo_new(crtc, file_priv, handle, &new_cursor_bo);
288 /*if map not successful then return an error*/
289 if (ret)
290 goto release;
291
292 /*program new cursor bo to hardware*/
293 dm_set_cursor(amdgpu_crtc, amdgpu_crtc->cursor_addr, width, height);
294
295 /*un map old, not used anymore cursor bo ,
296 * return memory and mapping back */
297 dm_crtc_unpin_cursor_bo_old(amdgpu_crtc);
298
299 /*assign new cursor bo to our internal cache*/
300 amdgpu_crtc->cursor_bo = &new_cursor_bo->gem_base;
301
302release:
303 return ret;
304
305}
306
307static int dm_crtc_cursor_move(struct drm_crtc *crtc,
308 int x, int y)
309{
310 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
311 int xorigin = 0, yorigin = 0;
312 struct dc_cursor_position position;
313
cf388c0d
AN
314 amdgpu_crtc->cursor_x = x;
315 amdgpu_crtc->cursor_y = y;
316
4562236b
HW
317 /* avivo cursor are offset into the total surface */
318 x += crtc->primary->state->src_x >> 16;
319 y += crtc->primary->state->src_y >> 16;
320
321 /*
322 * TODO: for cursor debugging unguard the following
323 */
324#if 0
325 DRM_DEBUG_KMS(
326 "%s: x %d y %d c->x %d c->y %d\n",
327 __func__,
328 x,
329 y,
330 crtc->x,
331 crtc->y);
332#endif
333
334 if (x < 0) {
335 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
336 x = 0;
337 }
338 if (y < 0) {
339 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
340 y = 0;
341 }
342
343 position.enable = true;
344 position.x = x;
345 position.y = y;
346
347 position.hot_spot_enable = true;
348 position.x_hotspot = xorigin;
349 position.y_hotspot = yorigin;
350
ab2541b6
AC
351 if (amdgpu_crtc->stream) {
352 if (!dc_stream_set_cursor_position(
353 amdgpu_crtc->stream,
4562236b
HW
354 &position)) {
355 DRM_ERROR("DC failed to set cursor position\n");
356 return -EINVAL;
357 }
358 }
359
360 return 0;
361}
362
363static void dm_crtc_cursor_reset(struct drm_crtc *crtc)
364{
365 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
366
367 DRM_DEBUG_KMS(
368 "%s: with cursor_bo %p\n",
369 __func__,
370 amdgpu_crtc->cursor_bo);
371
ab2541b6 372 if (amdgpu_crtc->cursor_bo && amdgpu_crtc->stream) {
4562236b
HW
373 dm_set_cursor(
374 amdgpu_crtc,
375 amdgpu_crtc->cursor_addr,
376 amdgpu_crtc->cursor_width,
377 amdgpu_crtc->cursor_height);
378 }
379}
380static bool fill_rects_from_plane_state(
381 const struct drm_plane_state *state,
382 struct dc_surface *surface)
383{
384 surface->src_rect.x = state->src_x >> 16;
385 surface->src_rect.y = state->src_y >> 16;
386 /*we ignore for now mantissa and do not to deal with floating pixels :(*/
387 surface->src_rect.width = state->src_w >> 16;
388
389 if (surface->src_rect.width == 0)
390 return false;
391
392 surface->src_rect.height = state->src_h >> 16;
393 if (surface->src_rect.height == 0)
394 return false;
395
396 surface->dst_rect.x = state->crtc_x;
397 surface->dst_rect.y = state->crtc_y;
398
399 if (state->crtc_w == 0)
400 return false;
401
402 surface->dst_rect.width = state->crtc_w;
403
404 if (state->crtc_h == 0)
405 return false;
406
407 surface->dst_rect.height = state->crtc_h;
408
409 surface->clip_rect = surface->dst_rect;
410
411 switch (state->rotation & DRM_MODE_ROTATE_MASK) {
412 case DRM_MODE_ROTATE_0:
413 surface->rotation = ROTATION_ANGLE_0;
414 break;
415 case DRM_MODE_ROTATE_90:
416 surface->rotation = ROTATION_ANGLE_90;
417 break;
418 case DRM_MODE_ROTATE_180:
419 surface->rotation = ROTATION_ANGLE_180;
420 break;
421 case DRM_MODE_ROTATE_270:
422 surface->rotation = ROTATION_ANGLE_270;
423 break;
424 default:
425 surface->rotation = ROTATION_ANGLE_0;
426 break;
427 }
428
429 return true;
430}
431static bool get_fb_info(
432 const struct amdgpu_framebuffer *amdgpu_fb,
433 uint64_t *tiling_flags,
434 uint64_t *fb_location)
435{
436 struct amdgpu_bo *rbo = gem_to_amdgpu_bo(amdgpu_fb->obj);
437 int r = amdgpu_bo_reserve(rbo, false);
438 if (unlikely(r != 0)){
439 DRM_ERROR("Unable to reserve buffer\n");
440 return false;
441 }
442
443 if (fb_location)
444 *fb_location = amdgpu_bo_gpu_offset(rbo);
445
446 if (tiling_flags)
447 amdgpu_bo_get_tiling_flags(rbo, tiling_flags);
448
449 amdgpu_bo_unreserve(rbo);
450
451 return true;
452}
453static void fill_plane_attributes_from_fb(
6a1f8cab 454 struct amdgpu_device *adev,
4562236b
HW
455 struct dc_surface *surface,
456 const struct amdgpu_framebuffer *amdgpu_fb, bool addReq)
457{
458 uint64_t tiling_flags;
459 uint64_t fb_location = 0;
460 const struct drm_framebuffer *fb = &amdgpu_fb->base;
461 struct drm_format_name_buf format_name;
462
463 get_fb_info(
464 amdgpu_fb,
465 &tiling_flags,
466 addReq == true ? &fb_location:NULL);
467
468 surface->address.type = PLN_ADDR_TYPE_GRAPHICS;
469 surface->address.grph.addr.low_part = lower_32_bits(fb_location);
470 surface->address.grph.addr.high_part = upper_32_bits(fb_location);
471
472 switch (fb->format->format) {
473 case DRM_FORMAT_C8:
474 surface->format = SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
475 break;
476 case DRM_FORMAT_RGB565:
477 surface->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
478 break;
479 case DRM_FORMAT_XRGB8888:
480 case DRM_FORMAT_ARGB8888:
481 surface->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
482 break;
483 case DRM_FORMAT_XRGB2101010:
484 case DRM_FORMAT_ARGB2101010:
485 surface->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
486 break;
487 case DRM_FORMAT_XBGR2101010:
488 case DRM_FORMAT_ABGR2101010:
489 surface->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
490 break;
491 default:
492 DRM_ERROR("Unsupported screen format %s\n",
493 drm_get_format_name(fb->format->format, &format_name));
494 return;
495 }
496
497 memset(&surface->tiling_info, 0, sizeof(surface->tiling_info));
498
2c8ad2d5 499 /* Fill GFX params */
4562236b
HW
500 if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1)
501 {
502 unsigned bankw, bankh, mtaspect, tile_split, num_banks;
503
504 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
505 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
506 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
507 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
508 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
509
510 /* XXX fix me for VI */
511 surface->tiling_info.gfx8.num_banks = num_banks;
512 surface->tiling_info.gfx8.array_mode =
513 DC_ARRAY_2D_TILED_THIN1;
514 surface->tiling_info.gfx8.tile_split = tile_split;
515 surface->tiling_info.gfx8.bank_width = bankw;
516 surface->tiling_info.gfx8.bank_height = bankh;
517 surface->tiling_info.gfx8.tile_aspect = mtaspect;
518 surface->tiling_info.gfx8.tile_mode =
519 DC_ADDR_SURF_MICRO_TILING_DISPLAY;
520 } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
521 == DC_ARRAY_1D_TILED_THIN1) {
522 surface->tiling_info.gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
523 }
524
525 surface->tiling_info.gfx8.pipe_config =
526 AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
527
2c8ad2d5
AD
528 if (adev->asic_type == CHIP_VEGA10) {
529 /* Fill GFX9 params */
530 surface->tiling_info.gfx9.num_pipes =
531 adev->gfx.config.gb_addr_config_fields.num_pipes;
532 surface->tiling_info.gfx9.num_banks =
533 adev->gfx.config.gb_addr_config_fields.num_banks;
534 surface->tiling_info.gfx9.pipe_interleave =
535 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
536 surface->tiling_info.gfx9.num_shader_engines =
537 adev->gfx.config.gb_addr_config_fields.num_se;
538 surface->tiling_info.gfx9.max_compressed_frags =
539 adev->gfx.config.gb_addr_config_fields.max_compress_frags;
d092bf65
AG
540 surface->tiling_info.gfx9.num_rb_per_se =
541 adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
2c8ad2d5
AD
542 surface->tiling_info.gfx9.swizzle =
543 AMDGPU_TILING_GET(tiling_flags, SWIZZLE_MODE);
544 surface->tiling_info.gfx9.shaderEnable = 1;
545 }
2c8ad2d5
AD
546
547
4562236b
HW
548 surface->plane_size.grph.surface_size.x = 0;
549 surface->plane_size.grph.surface_size.y = 0;
550 surface->plane_size.grph.surface_size.width = fb->width;
551 surface->plane_size.grph.surface_size.height = fb->height;
552 surface->plane_size.grph.surface_pitch =
553 fb->pitches[0] / fb->format->cpp[0];
554
555 surface->visible = true;
556 surface->scaling_quality.h_taps_c = 0;
557 surface->scaling_quality.v_taps_c = 0;
558
559 /* TODO: unhardcode */
560 surface->color_space = COLOR_SPACE_SRGB;
561 /* is this needed? is surface zeroed at allocation? */
562 surface->scaling_quality.h_taps = 0;
563 surface->scaling_quality.v_taps = 0;
564 surface->stereo_format = PLANE_STEREO_FORMAT_NONE;
565
566}
567
568#define NUM_OF_RAW_GAMMA_RAMP_RGB_256 256
569
570static void fill_gamma_from_crtc(
571 const struct drm_crtc *crtc,
572 struct dc_surface *dc_surface)
573{
574 int i;
575 struct dc_gamma *gamma;
576 struct drm_crtc_state *state = crtc->state;
577 struct drm_color_lut *lut = (struct drm_color_lut *) state->gamma_lut->data;
578
579 gamma = dc_create_gamma();
580
581 if (gamma == NULL)
582 return;
583
584 for (i = 0; i < NUM_OF_RAW_GAMMA_RAMP_RGB_256; i++) {
d7194cf6
AC
585 gamma->red[i] = lut[i].red;
586 gamma->green[i] = lut[i].green;
587 gamma->blue[i] = lut[i].blue;
4562236b
HW
588 }
589
4562236b
HW
590 dc_surface->gamma_correction = gamma;
591}
592
593static void fill_plane_attributes(
6a1f8cab 594 struct amdgpu_device *adev,
4562236b
HW
595 struct dc_surface *surface,
596 struct drm_plane_state *state, bool addrReq)
597{
598 const struct amdgpu_framebuffer *amdgpu_fb =
599 to_amdgpu_framebuffer(state->fb);
600 const struct drm_crtc *crtc = state->crtc;
18f39f2d 601 struct dc_transfer_func *input_tf;
4562236b
HW
602
603 fill_rects_from_plane_state(state, surface);
604 fill_plane_attributes_from_fb(
6a1f8cab 605 crtc->dev->dev_private,
4562236b
HW
606 surface,
607 amdgpu_fb,
608 addrReq);
609
18f39f2d
RL
610 input_tf = dc_create_transfer_func();
611
612 if (input_tf == NULL)
613 return;
614
615 input_tf->type = TF_TYPE_PREDEFINED;
616 input_tf->tf = TRANSFER_FUNCTION_SRGB;
617
618 surface->in_transfer_func = input_tf;
619
4562236b
HW
620 /* In case of gamma set, update gamma value */
621 if (state->crtc->state->gamma_lut) {
622 fill_gamma_from_crtc(crtc, surface);
623 }
624}
625
626/*****************************************************************************/
627
628struct amdgpu_connector *aconnector_from_drm_crtc_id(
629 const struct drm_crtc *crtc)
630{
631 struct drm_device *dev = crtc->dev;
632 struct drm_connector *connector;
633 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
634 struct amdgpu_connector *aconnector;
635
636 list_for_each_entry(connector,
637 &dev->mode_config.connector_list, head) {
638
639 aconnector = to_amdgpu_connector(connector);
640
641 if (aconnector->base.state->crtc != &acrtc->base)
642 continue;
643
644 /* Found the connector */
645 return aconnector;
646 }
647
648 /* If we get here, not found. */
649 return NULL;
650}
651
652static void update_stream_scaling_settings(
653 const struct drm_display_mode *mode,
654 const struct dm_connector_state *dm_state,
655 const struct dc_stream *stream)
656{
657 struct amdgpu_device *adev = dm_state->base.crtc->dev->dev_private;
658 enum amdgpu_rmx_type rmx_type;
659
ab2541b6 660 struct rect src = { 0 }; /* viewport in composition space*/
4562236b
HW
661 struct rect dst = { 0 }; /* stream addressable area */
662
f7f3cfee
HW
663 /* no mode. nothing to be done */
664 if (!mode)
665 return;
666
4562236b
HW
667 /* Full screen scaling by default */
668 src.width = mode->hdisplay;
669 src.height = mode->vdisplay;
670 dst.width = stream->timing.h_addressable;
671 dst.height = stream->timing.v_addressable;
672
673 rmx_type = dm_state->scaling;
674 if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
675 if (src.width * dst.height <
676 src.height * dst.width) {
677 /* height needs less upscaling/more downscaling */
678 dst.width = src.width *
679 dst.height / src.height;
680 } else {
681 /* width needs less upscaling/more downscaling */
682 dst.height = src.height *
683 dst.width / src.width;
684 }
685 } else if (rmx_type == RMX_CENTER) {
686 dst = src;
687 }
688
689 dst.x = (stream->timing.h_addressable - dst.width) / 2;
690 dst.y = (stream->timing.v_addressable - dst.height) / 2;
691
692 if (dm_state->underscan_enable) {
693 dst.x += dm_state->underscan_hborder / 2;
694 dst.y += dm_state->underscan_vborder / 2;
695 dst.width -= dm_state->underscan_hborder;
696 dst.height -= dm_state->underscan_vborder;
697 }
698
699 adev->dm.dc->stream_funcs.stream_update_scaling(adev->dm.dc, stream, &src, &dst);
700
701 DRM_DEBUG_KMS("Destination Rectangle x:%d y:%d width:%d height:%d\n",
702 dst.x, dst.y, dst.width, dst.height);
703
704}
705
706static void dm_dc_surface_commit(
707 struct dc *dc,
708 struct drm_crtc *crtc)
709{
710 struct dc_surface *dc_surface;
711 const struct dc_surface *dc_surfaces[1];
54f5499a 712 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
ab2541b6 713 const struct dc_stream *dc_stream = acrtc->stream;
54f5499a
AG
714 unsigned long flags;
715
716 spin_lock_irqsave(&crtc->dev->event_lock, flags);
717 if (acrtc->pflip_status != AMDGPU_FLIP_NONE) {
718 DRM_ERROR("dm_dc_surface_commit: acrtc %d, already busy\n", acrtc->crtc_id);
719 spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
720 /* In comit tail framework this cannot happen */
721 BUG_ON(0);
722 }
723 spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
4562236b 724
ab2541b6 725 if (!dc_stream) {
4562236b 726 dm_error(
ab2541b6 727 "%s: Failed to obtain stream on crtc (%d)!\n",
4562236b
HW
728 __func__,
729 acrtc->crtc_id);
730 goto fail;
731 }
732
733 dc_surface = dc_create_surface(dc);
734
735 if (!dc_surface) {
736 dm_error(
737 "%s: Failed to create a surface!\n",
738 __func__);
739 goto fail;
740 }
741
742 /* Surface programming */
6a1f8cab
AG
743 fill_plane_attributes(
744 crtc->dev->dev_private,
745 dc_surface,
746 crtc->primary->state,
747 true);
4562236b
HW
748
749 dc_surfaces[0] = dc_surface;
750
ab2541b6 751 if (false == dc_commit_surfaces_to_stream(
4562236b
HW
752 dc,
753 dc_surfaces,
754 1,
ab2541b6 755 dc_stream)) {
4562236b
HW
756 dm_error(
757 "%s: Failed to attach surface!\n",
758 __func__);
759 }
760
761 dc_surface_release(dc_surface);
762fail:
763 return;
764}
765
766static enum dc_color_depth convert_color_depth_from_display_info(
767 const struct drm_connector *connector)
768{
769 uint32_t bpc = connector->display_info.bpc;
770
771 /* Limited color depth to 8bit
772 * TODO: Still need to handle deep color*/
773 if (bpc > 8)
774 bpc = 8;
775
776 switch (bpc) {
777 case 0:
778 /* Temporary Work around, DRM don't parse color depth for
779 * EDID revision before 1.4
780 * TODO: Fix edid parsing
781 */
782 return COLOR_DEPTH_888;
783 case 6:
784 return COLOR_DEPTH_666;
785 case 8:
786 return COLOR_DEPTH_888;
787 case 10:
788 return COLOR_DEPTH_101010;
789 case 12:
790 return COLOR_DEPTH_121212;
791 case 14:
792 return COLOR_DEPTH_141414;
793 case 16:
794 return COLOR_DEPTH_161616;
795 default:
796 return COLOR_DEPTH_UNDEFINED;
797 }
798}
799
800static enum dc_aspect_ratio get_aspect_ratio(
801 const struct drm_display_mode *mode_in)
802{
803 int32_t width = mode_in->crtc_hdisplay * 9;
804 int32_t height = mode_in->crtc_vdisplay * 16;
805 if ((width - height) < 10 && (width - height) > -10)
806 return ASPECT_RATIO_16_9;
807 else
808 return ASPECT_RATIO_4_3;
809}
810
811static enum dc_color_space get_output_color_space(
812 const struct dc_crtc_timing *dc_crtc_timing)
813{
814 enum dc_color_space color_space = COLOR_SPACE_SRGB;
815
816 switch (dc_crtc_timing->pixel_encoding) {
817 case PIXEL_ENCODING_YCBCR422:
818 case PIXEL_ENCODING_YCBCR444:
819 case PIXEL_ENCODING_YCBCR420:
820 {
821 /*
822 * 27030khz is the separation point between HDTV and SDTV
823 * according to HDMI spec, we use YCbCr709 and YCbCr601
824 * respectively
825 */
826 if (dc_crtc_timing->pix_clk_khz > 27030) {
827 if (dc_crtc_timing->flags.Y_ONLY)
828 color_space =
829 COLOR_SPACE_YCBCR709_LIMITED;
830 else
831 color_space = COLOR_SPACE_YCBCR709;
832 } else {
833 if (dc_crtc_timing->flags.Y_ONLY)
834 color_space =
835 COLOR_SPACE_YCBCR601_LIMITED;
836 else
837 color_space = COLOR_SPACE_YCBCR601;
838 }
839
840 }
841 break;
842 case PIXEL_ENCODING_RGB:
843 color_space = COLOR_SPACE_SRGB;
844 break;
845
846 default:
847 WARN_ON(1);
848 break;
849 }
850
851 return color_space;
852}
853
854/*****************************************************************************/
855
856static void fill_stream_properties_from_drm_display_mode(
857 struct dc_stream *stream,
858 const struct drm_display_mode *mode_in,
859 const struct drm_connector *connector)
860{
861 struct dc_crtc_timing *timing_out = &stream->timing;
862 memset(timing_out, 0, sizeof(struct dc_crtc_timing));
863
864 timing_out->h_border_left = 0;
865 timing_out->h_border_right = 0;
866 timing_out->v_border_top = 0;
867 timing_out->v_border_bottom = 0;
868 /* TODO: un-hardcode */
869
870 if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
871 && stream->sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A)
872 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
873 else
874 timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
875
876 timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
877 timing_out->display_color_depth = convert_color_depth_from_display_info(
878 connector);
879 timing_out->scan_type = SCANNING_TYPE_NODATA;
880 timing_out->hdmi_vic = 0;
881 timing_out->vic = drm_match_cea_mode(mode_in);
882
883 timing_out->h_addressable = mode_in->crtc_hdisplay;
884 timing_out->h_total = mode_in->crtc_htotal;
885 timing_out->h_sync_width =
886 mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
887 timing_out->h_front_porch =
888 mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
889 timing_out->v_total = mode_in->crtc_vtotal;
890 timing_out->v_addressable = mode_in->crtc_vdisplay;
891 timing_out->v_front_porch =
892 mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
893 timing_out->v_sync_width =
894 mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
895 timing_out->pix_clk_khz = mode_in->crtc_clock;
896 timing_out->aspect_ratio = get_aspect_ratio(mode_in);
897 if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
898 timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
899 if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
900 timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
901
902 stream->output_color_space = get_output_color_space(timing_out);
903
d7194cf6
AC
904 {
905 struct dc_transfer_func *tf = dc_create_transfer_func();
906 tf->type = TF_TYPE_PREDEFINED;
907 tf->tf = TRANSFER_FUNCTION_SRGB;
908 stream->out_transfer_func = tf;
909 }
4562236b
HW
910}
911
912static void fill_audio_info(
913 struct audio_info *audio_info,
914 const struct drm_connector *drm_connector,
915 const struct dc_sink *dc_sink)
916{
917 int i = 0;
918 int cea_revision = 0;
919 const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
920
921 audio_info->manufacture_id = edid_caps->manufacturer_id;
922 audio_info->product_id = edid_caps->product_id;
923
924 cea_revision = drm_connector->display_info.cea_rev;
925
926 while (i < AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS &&
927 edid_caps->display_name[i]) {
928 audio_info->display_name[i] = edid_caps->display_name[i];
929 i++;
930 }
931
932 if(cea_revision >= 3) {
933 audio_info->mode_count = edid_caps->audio_mode_count;
934
935 for (i = 0; i < audio_info->mode_count; ++i) {
936 audio_info->modes[i].format_code =
937 (enum audio_format_code)
938 (edid_caps->audio_modes[i].format_code);
939 audio_info->modes[i].channel_count =
940 edid_caps->audio_modes[i].channel_count;
941 audio_info->modes[i].sample_rates.all =
942 edid_caps->audio_modes[i].sample_rate;
943 audio_info->modes[i].sample_size =
944 edid_caps->audio_modes[i].sample_size;
945 }
946 }
947
948 audio_info->flags.all = edid_caps->speaker_flags;
949
950 /* TODO: We only check for the progressive mode, check for interlace mode too */
951 if(drm_connector->latency_present[0]) {
952 audio_info->video_latency = drm_connector->video_latency[0];
953 audio_info->audio_latency = drm_connector->audio_latency[0];
954 }
955
956 /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
957
958}
959
960static void copy_crtc_timing_for_drm_display_mode(
961 const struct drm_display_mode *src_mode,
962 struct drm_display_mode *dst_mode)
963{
964 dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
965 dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
966 dst_mode->crtc_clock = src_mode->crtc_clock;
967 dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
968 dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
969 dst_mode->crtc_hsync_start= src_mode->crtc_hsync_start;
970 dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
971 dst_mode->crtc_htotal = src_mode->crtc_htotal;
972 dst_mode->crtc_hskew = src_mode->crtc_hskew;
5866e7cf
JL
973 dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
974 dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
975 dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
976 dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
977 dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
4562236b
HW
978}
979
980static void decide_crtc_timing_for_drm_display_mode(
981 struct drm_display_mode *drm_mode,
982 const struct drm_display_mode *native_mode,
983 bool scale_enabled)
984{
985 if (scale_enabled) {
986 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
987 } else if (native_mode->clock == drm_mode->clock &&
988 native_mode->htotal == drm_mode->htotal &&
989 native_mode->vtotal == drm_mode->vtotal) {
990 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
991 } else {
992 /* no scaling nor amdgpu inserted, no need to patch */
993 }
994}
995
ab2541b6 996static struct dc_stream *create_stream_for_sink(
0d70570f 997 struct amdgpu_connector *aconnector,
4562236b
HW
998 const struct drm_display_mode *drm_mode,
999 const struct dm_connector_state *dm_state)
1000{
1001 struct drm_display_mode *preferred_mode = NULL;
1002 const struct drm_connector *drm_connector;
ab2541b6 1003 struct dc_stream *stream = NULL;
4562236b
HW
1004 struct drm_display_mode mode = *drm_mode;
1005 bool native_mode_found = false;
1006
1007 if (NULL == aconnector) {
1008 DRM_ERROR("aconnector is NULL!\n");
1009 goto drm_connector_null;
1010 }
1011
1012 if (NULL == dm_state) {
1013 DRM_ERROR("dm_state is NULL!\n");
1014 goto dm_state_null;
1015 }
1016
1017 drm_connector = &aconnector->base;
1018 stream = dc_create_stream_for_sink(aconnector->dc_sink);
1019
1020 if (NULL == stream) {
1021 DRM_ERROR("Failed to create stream for sink!\n");
1022 goto stream_create_fail;
1023 }
1024
1025 list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
1026 /* Search for preferred mode */
1027 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
1028 native_mode_found = true;
1029 break;
1030 }
1031 }
1032 if (!native_mode_found)
1033 preferred_mode = list_first_entry_or_null(
1034 &aconnector->base.modes,
1035 struct drm_display_mode,
1036 head);
1037
1038 if (NULL == preferred_mode) {
1039 /* This may not be an error, the use case is when we we have no
1040 * usermode calls to reset and set mode upon hotplug. In this
1041 * case, we call set mode ourselves to restore the previous mode
1042 * and the modelist may not be filled in in time.
1043 */
1044 DRM_INFO("No preferred mode found\n");
1045 } else {
1046 decide_crtc_timing_for_drm_display_mode(
1047 &mode, preferred_mode,
1048 dm_state->scaling != RMX_OFF);
1049 }
1050
1051 fill_stream_properties_from_drm_display_mode(stream,
1052 &mode, &aconnector->base);
1053 update_stream_scaling_settings(&mode, dm_state, stream);
1054
1055 fill_audio_info(
1056 &stream->audio_info,
1057 drm_connector,
1058 aconnector->dc_sink);
1059
ab2541b6 1060stream_create_fail:
4562236b
HW
1061dm_state_null:
1062drm_connector_null:
ab2541b6 1063 return stream;
4562236b
HW
1064}
1065
1066void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
1067{
1068 drm_crtc_cleanup(crtc);
1069 kfree(crtc);
1070}
1071
1072/* Implemented only the options currently availible for the driver */
1073static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
1074 .reset = drm_atomic_helper_crtc_reset,
1075 .cursor_set = dm_crtc_cursor_set,
1076 .cursor_move = dm_crtc_cursor_move,
1077 .destroy = amdgpu_dm_crtc_destroy,
1078 .gamma_set = drm_atomic_helper_legacy_gamma_set,
1079 .set_config = drm_atomic_helper_set_config,
1080 .page_flip = drm_atomic_helper_page_flip,
1081 .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
1082 .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
1083};
1084
1085static enum drm_connector_status
1086amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
1087{
1088 bool connected;
1089 struct amdgpu_connector *aconnector = to_amdgpu_connector(connector);
1090
1091 /* Notes:
1092 * 1. This interface is NOT called in context of HPD irq.
1093 * 2. This interface *is called* in context of user-mode ioctl. Which
1094 * makes it a bad place for *any* MST-related activit. */
1095
1096 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
1097 connected = (aconnector->dc_sink != NULL);
1098 else
1099 connected = (aconnector->base.force == DRM_FORCE_ON);
1100
1101 return (connected ? connector_status_connected :
1102 connector_status_disconnected);
1103}
1104
1105int amdgpu_dm_connector_atomic_set_property(
1106 struct drm_connector *connector,
1107 struct drm_connector_state *connector_state,
1108 struct drm_property *property,
1109 uint64_t val)
1110{
1111 struct drm_device *dev = connector->dev;
1112 struct amdgpu_device *adev = dev->dev_private;
1113 struct dm_connector_state *dm_old_state =
1114 to_dm_connector_state(connector->state);
1115 struct dm_connector_state *dm_new_state =
1116 to_dm_connector_state(connector_state);
1117
1118 struct drm_crtc_state *new_crtc_state;
1119 struct drm_crtc *crtc;
1120 int i;
1121 int ret = -EINVAL;
1122
1123 if (property == dev->mode_config.scaling_mode_property) {
1124 enum amdgpu_rmx_type rmx_type;
1125
1126 switch (val) {
1127 case DRM_MODE_SCALE_CENTER:
1128 rmx_type = RMX_CENTER;
1129 break;
1130 case DRM_MODE_SCALE_ASPECT:
1131 rmx_type = RMX_ASPECT;
1132 break;
1133 case DRM_MODE_SCALE_FULLSCREEN:
1134 rmx_type = RMX_FULL;
1135 break;
1136 case DRM_MODE_SCALE_NONE:
1137 default:
1138 rmx_type = RMX_OFF;
1139 break;
1140 }
1141
1142 if (dm_old_state->scaling == rmx_type)
1143 return 0;
1144
1145 dm_new_state->scaling = rmx_type;
1146 ret = 0;
1147 } else if (property == adev->mode_info.underscan_hborder_property) {
1148 dm_new_state->underscan_hborder = val;
1149 ret = 0;
1150 } else if (property == adev->mode_info.underscan_vborder_property) {
1151 dm_new_state->underscan_vborder = val;
1152 ret = 0;
1153 } else if (property == adev->mode_info.underscan_property) {
1154 dm_new_state->underscan_enable = val;
1155 ret = 0;
1156 }
1157
1158 for_each_crtc_in_state(
1159 connector_state->state,
1160 crtc,
1161 new_crtc_state,
1162 i) {
1163
1164 if (crtc == connector_state->crtc) {
1165 struct drm_plane_state *plane_state;
1166
1167 /*
1168 * Bit of magic done here. We need to ensure
1169 * that planes get update after mode is set.
1170 * So, we need to add primary plane to state,
1171 * and this way atomic_update would be called
1172 * for it
1173 */
1174 plane_state =
1175 drm_atomic_get_plane_state(
1176 connector_state->state,
1177 crtc->primary);
1178
1179 if (!plane_state)
1180 return -EINVAL;
1181 }
1182 }
1183
1184 return ret;
1185}
1186
1187void amdgpu_dm_connector_destroy(struct drm_connector *connector)
1188{
1189 struct amdgpu_connector *aconnector = to_amdgpu_connector(connector);
1190 const struct dc_link *link = aconnector->dc_link;
1191 struct amdgpu_device *adev = connector->dev->dev_private;
1192 struct amdgpu_display_manager *dm = &adev->dm;
1193#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
1194 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
1195
1196 if (link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) {
1197 amdgpu_dm_register_backlight_device(dm);
1198
1199 if (dm->backlight_dev) {
1200 backlight_device_unregister(dm->backlight_dev);
1201 dm->backlight_dev = NULL;
1202 }
1203
1204 }
1205#endif
1206 drm_connector_unregister(connector);
1207 drm_connector_cleanup(connector);
1208 kfree(connector);
1209}
1210
1211void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
1212{
1213 struct dm_connector_state *state =
1214 to_dm_connector_state(connector->state);
1215
1216 kfree(state);
1217
1218 state = kzalloc(sizeof(*state), GFP_KERNEL);
1219
1220 if (state) {
1221 state->scaling = RMX_OFF;
1222 state->underscan_enable = false;
1223 state->underscan_hborder = 0;
1224 state->underscan_vborder = 0;
1225
1226 connector->state = &state->base;
1227 connector->state->connector = connector;
1228 }
1229}
1230
1231struct drm_connector_state *amdgpu_dm_connector_atomic_duplicate_state(
1232 struct drm_connector *connector)
1233{
1234 struct dm_connector_state *state =
1235 to_dm_connector_state(connector->state);
1236
1237 struct dm_connector_state *new_state =
1238 kmemdup(state, sizeof(*state), GFP_KERNEL);
1239
1240 if (new_state) {
1241 __drm_atomic_helper_connector_duplicate_state(connector,
1242 &new_state->base);
1243 return &new_state->base;
1244 }
1245
1246 return NULL;
1247}
1248
1249static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
1250 .reset = amdgpu_dm_connector_funcs_reset,
1251 .detect = amdgpu_dm_connector_detect,
1252 .fill_modes = drm_helper_probe_single_connector_modes,
1253 .destroy = amdgpu_dm_connector_destroy,
1254 .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
1255 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
1256 .atomic_set_property = amdgpu_dm_connector_atomic_set_property
1257};
1258
1259static struct drm_encoder *best_encoder(struct drm_connector *connector)
1260{
1261 int enc_id = connector->encoder_ids[0];
1262 struct drm_mode_object *obj;
1263 struct drm_encoder *encoder;
1264
1265 DRM_DEBUG_KMS("Finding the best encoder\n");
1266
1267 /* pick the encoder ids */
1268 if (enc_id) {
1269 obj = drm_mode_object_find(connector->dev, enc_id, DRM_MODE_OBJECT_ENCODER);
1270 if (!obj) {
1271 DRM_ERROR("Couldn't find a matching encoder for our connector\n");
1272 return NULL;
1273 }
1274 encoder = obj_to_encoder(obj);
1275 return encoder;
1276 }
1277 DRM_ERROR("No encoder id\n");
1278 return NULL;
1279}
1280
1281static int get_modes(struct drm_connector *connector)
1282{
1283 return amdgpu_dm_connector_get_modes(connector);
1284}
1285
1286static void create_eml_sink(struct amdgpu_connector *aconnector)
1287{
1288 struct dc_sink_init_data init_params = {
1289 .link = aconnector->dc_link,
1290 .sink_signal = SIGNAL_TYPE_VIRTUAL
1291 };
1292 struct edid *edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
1293
1294 if (!aconnector->base.edid_blob_ptr ||
1295 !aconnector->base.edid_blob_ptr->data) {
1296 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
1297 aconnector->base.name);
1298
1299 aconnector->base.force = DRM_FORCE_OFF;
1300 aconnector->base.override_edid = false;
1301 return;
1302 }
1303
1304 aconnector->edid = edid;
1305
1306 aconnector->dc_em_sink = dc_link_add_remote_sink(
1307 aconnector->dc_link,
1308 (uint8_t *)edid,
1309 (edid->extensions + 1) * EDID_LENGTH,
1310 &init_params);
1311
1312 if (aconnector->base.force
1313 == DRM_FORCE_ON)
1314 aconnector->dc_sink = aconnector->dc_link->local_sink ?
1315 aconnector->dc_link->local_sink :
1316 aconnector->dc_em_sink;
1317}
1318
1319static void handle_edid_mgmt(struct amdgpu_connector *aconnector)
1320{
1321 struct dc_link *link = (struct dc_link *)aconnector->dc_link;
1322
1323 /* In case of headless boot with force on for DP managed connector
1324 * Those settings have to be != 0 to get initial modeset
1325 */
1326 if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
1327 link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
1328 link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
1329 }
1330
1331
1332 aconnector->base.override_edid = true;
1333 create_eml_sink(aconnector);
1334}
1335
1336int amdgpu_dm_connector_mode_valid(
1337 struct drm_connector *connector,
1338 struct drm_display_mode *mode)
1339{
1340 int result = MODE_ERROR;
1341 const struct dc_sink *dc_sink;
1342 struct amdgpu_device *adev = connector->dev->dev_private;
1343 struct dc_validation_set val_set = { 0 };
1344 /* TODO: Unhardcode stream count */
ab2541b6 1345 struct dc_stream *stream;
4562236b
HW
1346 struct amdgpu_connector *aconnector = to_amdgpu_connector(connector);
1347
1348 if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
1349 (mode->flags & DRM_MODE_FLAG_DBLSCAN))
1350 return result;
1351
1352 /* Only run this the first time mode_valid is called to initilialize
1353 * EDID mgmt
1354 */
1355 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
1356 !aconnector->dc_em_sink)
1357 handle_edid_mgmt(aconnector);
1358
1359 dc_sink = to_amdgpu_connector(connector)->dc_sink;
1360
1361 if (NULL == dc_sink) {
1362 DRM_ERROR("dc_sink is NULL!\n");
ab2541b6 1363 goto null_sink;
4562236b
HW
1364 }
1365
ab2541b6
AC
1366 stream = dc_create_stream_for_sink(dc_sink);
1367 if (NULL == stream) {
4562236b
HW
1368 DRM_ERROR("Failed to create stream for sink!\n");
1369 goto stream_create_fail;
1370 }
1371
1372 drm_mode_set_crtcinfo(mode, 0);
ab2541b6 1373 fill_stream_properties_from_drm_display_mode(stream, mode, connector);
4562236b 1374
ab2541b6 1375 val_set.stream = stream;
4562236b 1376 val_set.surface_count = 0;
ab2541b6
AC
1377 stream->src.width = mode->hdisplay;
1378 stream->src.height = mode->vdisplay;
1379 stream->dst = stream->src;
4562236b
HW
1380
1381 if (dc_validate_resources(adev->dm.dc, &val_set, 1))
1382 result = MODE_OK;
1383
ab2541b6
AC
1384 dc_stream_release(stream);
1385
4562236b 1386stream_create_fail:
ab2541b6 1387null_sink:
4562236b
HW
1388 /* TODO: error handling*/
1389 return result;
1390}
1391
1392static const struct drm_connector_helper_funcs
1393amdgpu_dm_connector_helper_funcs = {
1394 /*
1395 * If hotplug a second bigger display in FB Con mode, bigger resolution
1396 * modes will be filtered by drm_mode_validate_size(), and those modes
1397 * is missing after user start lightdm. So we need to renew modes list.
1398 * in get_modes call back, not just return the modes count
1399 */
1400 .get_modes = get_modes,
1401 .mode_valid = amdgpu_dm_connector_mode_valid,
1402 .best_encoder = best_encoder
1403};
1404
1405static void dm_crtc_helper_disable(struct drm_crtc *crtc)
1406{
1407}
1408
1409static int dm_crtc_helper_atomic_check(
1410 struct drm_crtc *crtc,
1411 struct drm_crtc_state *state)
1412{
1413 return 0;
1414}
1415
1416static bool dm_crtc_helper_mode_fixup(
1417 struct drm_crtc *crtc,
1418 const struct drm_display_mode *mode,
1419 struct drm_display_mode *adjusted_mode)
1420{
1421 return true;
1422}
1423
1424static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
1425 .disable = dm_crtc_helper_disable,
1426 .atomic_check = dm_crtc_helper_atomic_check,
1427 .mode_fixup = dm_crtc_helper_mode_fixup
1428};
1429
1430static void dm_encoder_helper_disable(struct drm_encoder *encoder)
1431{
1432
1433}
1434
1435static int dm_encoder_helper_atomic_check(
1436 struct drm_encoder *encoder,
1437 struct drm_crtc_state *crtc_state,
1438 struct drm_connector_state *conn_state)
1439{
1440 return 0;
1441}
1442
1443const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
1444 .disable = dm_encoder_helper_disable,
1445 .atomic_check = dm_encoder_helper_atomic_check
1446};
1447
64d8b780
S
1448static void dm_drm_plane_reset(struct drm_plane *plane)
1449{
1450 struct amdgpu_drm_plane_state *amdgpu_state;
1451
1452 if (plane->state) {
1453 amdgpu_state = to_amdgpu_plane_state(plane->state);
1454 if (amdgpu_state->base.fb)
1455 drm_framebuffer_unreference(amdgpu_state->base.fb);
1456 kfree(amdgpu_state);
1457 plane->state = NULL;
1458 }
1459
1460 amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
1461 if (amdgpu_state) {
1462 plane->state = &amdgpu_state->base;
1463 plane->state->plane = plane;
1464 }
1465}
1466
1467static struct drm_plane_state *
1468dm_drm_plane_duplicate_state(struct drm_plane *plane)
1469{
1470 struct amdgpu_drm_plane_state *amdgpu_state;
1471 struct amdgpu_drm_plane_state *copy;
1472
1473 amdgpu_state = to_amdgpu_plane_state(plane->state);
1474 copy = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
1475 if (!copy)
1476 return NULL;
1477
1478 __drm_atomic_helper_plane_duplicate_state(plane, &copy->base);
1479 return &copy->base;
1480}
1481
1482static void dm_drm_plane_destroy_state(struct drm_plane *plane,
1483 struct drm_plane_state *old_state)
1484{
1485 struct amdgpu_drm_plane_state *old_amdgpu_state =
1486 to_amdgpu_plane_state(old_state);
1487 __drm_atomic_helper_plane_destroy_state(old_state);
1488 kfree(old_amdgpu_state);
1489}
1490
4562236b 1491static const struct drm_plane_funcs dm_plane_funcs = {
64d8b780
S
1492 .update_plane = drm_atomic_helper_update_plane,
1493 .disable_plane = drm_atomic_helper_disable_plane,
1494 .destroy = drm_plane_cleanup,
1495 .reset = dm_drm_plane_reset,
1496 .atomic_duplicate_state = dm_drm_plane_duplicate_state,
1497 .atomic_destroy_state = dm_drm_plane_destroy_state,
4562236b
HW
1498};
1499
4562236b
HW
1500static int dm_plane_helper_prepare_fb(
1501 struct drm_plane *plane,
1502 struct drm_plane_state *new_state)
1503{
1504 struct amdgpu_framebuffer *afb;
1505 struct drm_gem_object *obj;
1506 struct amdgpu_bo *rbo;
1507 int r;
1508
1509 if (!new_state->fb) {
1510 DRM_DEBUG_KMS("No FB bound\n");
1511 return 0;
1512 }
1513
1514 afb = to_amdgpu_framebuffer(new_state->fb);
1515
1516 obj = afb->obj;
1517 rbo = gem_to_amdgpu_bo(obj);
1518 r = amdgpu_bo_reserve(rbo, false);
1519 if (unlikely(r != 0))
1520 return r;
1521
54f5499a 1522 r = amdgpu_bo_pin(rbo, AMDGPU_GEM_DOMAIN_VRAM, &afb->address);
4562236b
HW
1523
1524 amdgpu_bo_unreserve(rbo);
1525
1526 if (unlikely(r != 0)) {
1527 DRM_ERROR("Failed to pin framebuffer\n");
1528 return r;
1529 }
1530
54f5499a 1531 amdgpu_bo_ref(rbo);
4562236b
HW
1532 return 0;
1533}
1534
1535static void dm_plane_helper_cleanup_fb(
1536 struct drm_plane *plane,
1537 struct drm_plane_state *old_state)
1538{
1539 struct amdgpu_bo *rbo;
1540 struct amdgpu_framebuffer *afb;
1541 int r;
1542
1543 if (!old_state->fb)
1544 return;
1545
1546 afb = to_amdgpu_framebuffer(old_state->fb);
1547 rbo = gem_to_amdgpu_bo(afb->obj);
1548 r = amdgpu_bo_reserve(rbo, false);
1549 if (unlikely(r)) {
1550 DRM_ERROR("failed to reserve rbo before unpin\n");
1551 return;
1552 } else {
1553 amdgpu_bo_unpin(rbo);
1554 amdgpu_bo_unreserve(rbo);
54f5499a 1555 amdgpu_bo_unref(&rbo);
4562236b 1556 }
54f5499a
AG
1557
1558 afb->address = 0;
4562236b
HW
1559}
1560
ab2541b6 1561int dm_create_validation_set_for_connector(struct drm_connector *connector,
4562236b
HW
1562 struct drm_display_mode *mode, struct dc_validation_set *val_set)
1563{
1564 int result = MODE_ERROR;
1565 const struct dc_sink *dc_sink =
1566 to_amdgpu_connector(connector)->dc_sink;
1567 /* TODO: Unhardcode stream count */
ab2541b6 1568 struct dc_stream *stream;
4562236b
HW
1569
1570 if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
1571 (mode->flags & DRM_MODE_FLAG_DBLSCAN))
1572 return result;
1573
1574 if (NULL == dc_sink) {
1575 DRM_ERROR("dc_sink is NULL!\n");
1576 return result;
1577 }
1578
ab2541b6 1579 stream = dc_create_stream_for_sink(dc_sink);
4562236b 1580
ab2541b6 1581 if (NULL == stream) {
4562236b
HW
1582 DRM_ERROR("Failed to create stream for sink!\n");
1583 return result;
1584 }
1585
1586 drm_mode_set_crtcinfo(mode, 0);
1587
ab2541b6 1588 fill_stream_properties_from_drm_display_mode(stream, mode, connector);
4562236b 1589
ab2541b6 1590 val_set->stream = stream;
4562236b 1591
ab2541b6
AC
1592 stream->src.width = mode->hdisplay;
1593 stream->src.height = mode->vdisplay;
1594 stream->dst = stream->src;
4562236b
HW
1595
1596 return MODE_OK;
4562236b
HW
1597}
1598
1599static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
1600 .prepare_fb = dm_plane_helper_prepare_fb,
1601 .cleanup_fb = dm_plane_helper_cleanup_fb,
1602};
1603
1604/*
1605 * TODO: these are currently initialized to rgb formats only.
1606 * For future use cases we should either initialize them dynamically based on
1607 * plane capabilities, or initialize this array to all formats, so internal drm
1608 * check will succeed, and let DC to implement proper check
1609 */
1610static uint32_t rgb_formats[] = {
1611 DRM_FORMAT_XRGB4444,
1612 DRM_FORMAT_ARGB4444,
1613 DRM_FORMAT_RGBA4444,
1614 DRM_FORMAT_ARGB1555,
1615 DRM_FORMAT_RGB565,
1616 DRM_FORMAT_RGB888,
1617 DRM_FORMAT_XRGB8888,
1618 DRM_FORMAT_ARGB8888,
1619 DRM_FORMAT_RGBA8888,
1620 DRM_FORMAT_XRGB2101010,
1621 DRM_FORMAT_XBGR2101010,
1622 DRM_FORMAT_ARGB2101010,
1623 DRM_FORMAT_ABGR2101010,
1624};
1625
d4e13b0d
AD
1626static uint32_t yuv_formats[] = {
1627 DRM_FORMAT_YUYV,
1628 DRM_FORMAT_YVYU,
1629 DRM_FORMAT_UYVY,
1630 DRM_FORMAT_VYUY,
1631};
4562236b 1632
d4e13b0d
AD
1633int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
1634 struct amdgpu_plane *aplane,
1635 unsigned long possible_crtcs)
1636{
1637 int res = -EPERM;
1638
1639 switch (aplane->plane_type) {
1640 case DRM_PLANE_TYPE_PRIMARY:
1641 aplane->base.format_default = true;
1642
1643 res = drm_universal_plane_init(
1644 dm->adev->ddev,
1645 &aplane->base,
1646 possible_crtcs,
1647 &dm_plane_funcs,
1648 rgb_formats,
1649 ARRAY_SIZE(rgb_formats),
1650 NULL, aplane->plane_type, NULL);
1651 break;
1652 case DRM_PLANE_TYPE_OVERLAY:
1653 res = drm_universal_plane_init(
1654 dm->adev->ddev,
1655 &aplane->base,
1656 possible_crtcs,
1657 &dm_plane_funcs,
1658 yuv_formats,
1659 ARRAY_SIZE(yuv_formats),
1660 NULL, aplane->plane_type, NULL);
1661 break;
1662 case DRM_PLANE_TYPE_CURSOR:
1663 DRM_ERROR("KMS: Cursor plane not implemented.");
1664 break;
1665 }
4562236b 1666
d4e13b0d 1667 drm_plane_helper_add(&aplane->base, &dm_plane_helper_funcs);
4562236b 1668
d4e13b0d
AD
1669 return res;
1670}
4562236b 1671
d4e13b0d
AD
1672int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
1673 struct drm_plane *plane,
1674 uint32_t crtc_index)
1675{
1676 struct amdgpu_crtc *acrtc;
1677 int res = -ENOMEM;
4562236b 1678
d4e13b0d
AD
1679 acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
1680 if (!acrtc)
1681 goto fail;
4562236b
HW
1682
1683 res = drm_crtc_init_with_planes(
1684 dm->ddev,
1685 &acrtc->base,
d4e13b0d 1686 plane,
4562236b
HW
1687 NULL,
1688 &amdgpu_dm_crtc_funcs, NULL);
1689
1690 if (res)
1691 goto fail;
1692
1693 drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
1694
10349345
AG
1695 acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
1696 acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
4562236b
HW
1697
1698 acrtc->crtc_id = crtc_index;
1699 acrtc->base.enabled = false;
1700
1701 dm->adev->mode_info.crtcs[crtc_index] = acrtc;
1702 drm_mode_crtc_set_gamma_size(&acrtc->base, 256);
1703
1704 return 0;
1705fail:
d4e13b0d 1706 kfree(acrtc);
4562236b
HW
1707 acrtc->crtc_id = -1;
1708 return res;
1709}
1710
1711static int to_drm_connector_type(enum signal_type st)
1712{
1713 switch (st) {
1714 case SIGNAL_TYPE_HDMI_TYPE_A:
1715 return DRM_MODE_CONNECTOR_HDMIA;
1716 case SIGNAL_TYPE_EDP:
1717 return DRM_MODE_CONNECTOR_eDP;
1718 case SIGNAL_TYPE_RGB:
1719 return DRM_MODE_CONNECTOR_VGA;
1720 case SIGNAL_TYPE_DISPLAY_PORT:
1721 case SIGNAL_TYPE_DISPLAY_PORT_MST:
1722 return DRM_MODE_CONNECTOR_DisplayPort;
1723 case SIGNAL_TYPE_DVI_DUAL_LINK:
1724 case SIGNAL_TYPE_DVI_SINGLE_LINK:
1725 return DRM_MODE_CONNECTOR_DVID;
1726 case SIGNAL_TYPE_VIRTUAL:
1727 return DRM_MODE_CONNECTOR_VIRTUAL;
1728
1729 default:
1730 return DRM_MODE_CONNECTOR_Unknown;
1731 }
1732}
1733
1734static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
1735{
1736 const struct drm_connector_helper_funcs *helper =
1737 connector->helper_private;
1738 struct drm_encoder *encoder;
1739 struct amdgpu_encoder *amdgpu_encoder;
1740
1741 encoder = helper->best_encoder(connector);
1742
1743 if (encoder == NULL)
1744 return;
1745
1746 amdgpu_encoder = to_amdgpu_encoder(encoder);
1747
1748 amdgpu_encoder->native_mode.clock = 0;
1749
1750 if (!list_empty(&connector->probed_modes)) {
1751 struct drm_display_mode *preferred_mode = NULL;
1752 list_for_each_entry(preferred_mode,
1753 &connector->probed_modes,
1754 head) {
1755 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
1756 amdgpu_encoder->native_mode = *preferred_mode;
1757 }
1758 break;
1759 }
1760
1761 }
1762}
1763
1764static struct drm_display_mode *amdgpu_dm_create_common_mode(
1765 struct drm_encoder *encoder, char *name,
1766 int hdisplay, int vdisplay)
1767{
1768 struct drm_device *dev = encoder->dev;
1769 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1770 struct drm_display_mode *mode = NULL;
1771 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
1772
1773 mode = drm_mode_duplicate(dev, native_mode);
1774
1775 if(mode == NULL)
1776 return NULL;
1777
1778 mode->hdisplay = hdisplay;
1779 mode->vdisplay = vdisplay;
1780 mode->type &= ~DRM_MODE_TYPE_PREFERRED;
1781 strncpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
1782
1783 return mode;
1784
1785}
1786
1787static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
1788 struct drm_connector *connector)
1789{
1790 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1791 struct drm_display_mode *mode = NULL;
1792 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
1793 struct amdgpu_connector *amdgpu_connector =
1794 to_amdgpu_connector(connector);
1795 int i;
1796 int n;
1797 struct mode_size {
1798 char name[DRM_DISPLAY_MODE_LEN];
1799 int w;
1800 int h;
1801 }common_modes[] = {
1802 { "640x480", 640, 480},
1803 { "800x600", 800, 600},
1804 { "1024x768", 1024, 768},
1805 { "1280x720", 1280, 720},
1806 { "1280x800", 1280, 800},
1807 {"1280x1024", 1280, 1024},
1808 { "1440x900", 1440, 900},
1809 {"1680x1050", 1680, 1050},
1810 {"1600x1200", 1600, 1200},
1811 {"1920x1080", 1920, 1080},
1812 {"1920x1200", 1920, 1200}
1813 };
1814
1815 n = sizeof(common_modes) / sizeof(common_modes[0]);
1816
1817 for (i = 0; i < n; i++) {
1818 struct drm_display_mode *curmode = NULL;
1819 bool mode_existed = false;
1820
1821 if (common_modes[i].w > native_mode->hdisplay ||
1822 common_modes[i].h > native_mode->vdisplay ||
1823 (common_modes[i].w == native_mode->hdisplay &&
1824 common_modes[i].h == native_mode->vdisplay))
1825 continue;
1826
1827 list_for_each_entry(curmode, &connector->probed_modes, head) {
1828 if (common_modes[i].w == curmode->hdisplay &&
1829 common_modes[i].h == curmode->vdisplay) {
1830 mode_existed = true;
1831 break;
1832 }
1833 }
1834
1835 if (mode_existed)
1836 continue;
1837
1838 mode = amdgpu_dm_create_common_mode(encoder,
1839 common_modes[i].name, common_modes[i].w,
1840 common_modes[i].h);
1841 drm_mode_probed_add(connector, mode);
1842 amdgpu_connector->num_modes++;
1843 }
1844}
1845
1846static void amdgpu_dm_connector_ddc_get_modes(
1847 struct drm_connector *connector,
1848 struct edid *edid)
1849{
1850 struct amdgpu_connector *amdgpu_connector =
1851 to_amdgpu_connector(connector);
1852
1853 if (edid) {
1854 /* empty probed_modes */
1855 INIT_LIST_HEAD(&connector->probed_modes);
1856 amdgpu_connector->num_modes =
1857 drm_add_edid_modes(connector, edid);
1858
1859 drm_edid_to_eld(connector, edid);
1860
1861 amdgpu_dm_get_native_mode(connector);
1862 } else
1863 amdgpu_connector->num_modes = 0;
1864}
1865
1866int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
1867{
1868 const struct drm_connector_helper_funcs *helper =
1869 connector->helper_private;
1870 struct amdgpu_connector *amdgpu_connector =
1871 to_amdgpu_connector(connector);
1872 struct drm_encoder *encoder;
1873 struct edid *edid = amdgpu_connector->edid;
1874
1875 encoder = helper->best_encoder(connector);
1876
1877 amdgpu_dm_connector_ddc_get_modes(connector, edid);
1878 amdgpu_dm_connector_add_common_modes(encoder, connector);
1879 return amdgpu_connector->num_modes;
1880}
1881
1882void amdgpu_dm_connector_init_helper(
1883 struct amdgpu_display_manager *dm,
1884 struct amdgpu_connector *aconnector,
1885 int connector_type,
1886 const struct dc_link *link,
1887 int link_index)
1888{
1889 struct amdgpu_device *adev = dm->ddev->dev_private;
1890
1891 aconnector->connector_id = link_index;
1892 aconnector->dc_link = link;
67a27705
HW
1893 aconnector->base.interlace_allowed = false;
1894 aconnector->base.doublescan_allowed = false;
1895 aconnector->base.stereo_allowed = false;
4562236b
HW
1896 aconnector->base.dpms = DRM_MODE_DPMS_OFF;
1897 aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
1898
1899 mutex_init(&aconnector->hpd_lock);
1900
1901 /*configure suport HPD hot plug connector_>polled default value is 0
1902 * which means HPD hot plug not supported*/
1903 switch (connector_type) {
1904 case DRM_MODE_CONNECTOR_HDMIA:
1905 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
1906 break;
1907 case DRM_MODE_CONNECTOR_DisplayPort:
1908 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
1909 break;
1910 case DRM_MODE_CONNECTOR_DVID:
1911 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
1912 break;
1913 default:
1914 break;
1915 }
1916
1917 drm_object_attach_property(&aconnector->base.base,
1918 dm->ddev->mode_config.scaling_mode_property,
1919 DRM_MODE_SCALE_NONE);
1920
1921 drm_object_attach_property(&aconnector->base.base,
1922 adev->mode_info.underscan_property,
1923 UNDERSCAN_OFF);
1924 drm_object_attach_property(&aconnector->base.base,
1925 adev->mode_info.underscan_hborder_property,
1926 0);
1927 drm_object_attach_property(&aconnector->base.base,
1928 adev->mode_info.underscan_vborder_property,
1929 0);
1930
1931}
1932
1933int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
1934 struct i2c_msg *msgs, int num)
1935{
1936 struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
1937 struct i2c_command cmd;
1938 int i;
1939 int result = -EIO;
1940
1941 cmd.payloads = kzalloc(num * sizeof(struct i2c_payload), GFP_KERNEL);
1942
1943 if (!cmd.payloads)
1944 return result;
1945
1946 cmd.number_of_payloads = num;
1947 cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
1948 cmd.speed = 100;
1949
1950 for (i = 0; i < num; i++) {
1951 cmd.payloads[i].write = (msgs[i].flags & I2C_M_RD);
1952 cmd.payloads[i].address = msgs[i].addr;
1953 cmd.payloads[i].length = msgs[i].len;
1954 cmd.payloads[i].data = msgs[i].buf;
1955 }
1956
1957 if (dc_submit_i2c(i2c->dm->dc, i2c->link_index, &cmd))
1958 result = num;
1959
1960 kfree(cmd.payloads);
1961
1962 return result;
1963}
1964
1965u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
1966{
1967 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
1968}
1969
1970static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
1971 .master_xfer = amdgpu_dm_i2c_xfer,
1972 .functionality = amdgpu_dm_i2c_func,
1973};
1974
1975struct amdgpu_i2c_adapter *create_i2c(unsigned int link_index, struct amdgpu_display_manager *dm, int *res)
1976{
1977 struct amdgpu_i2c_adapter *i2c;
1978
1979 i2c = kzalloc(sizeof (struct amdgpu_i2c_adapter), GFP_KERNEL);
1980 i2c->dm = dm;
1981 i2c->base.owner = THIS_MODULE;
1982 i2c->base.class = I2C_CLASS_DDC;
1983 i2c->base.dev.parent = &dm->adev->pdev->dev;
1984 i2c->base.algo = &amdgpu_dm_i2c_algo;
1985 snprintf(i2c->base.name, sizeof (i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
1986 i2c->link_index = link_index;
1987 i2c_set_adapdata(&i2c->base, i2c);
1988
1989 return i2c;
1990}
1991
1992/* Note: this function assumes that dc_link_detect() was called for the
1993 * dc_link which will be represented by this aconnector. */
1994int amdgpu_dm_connector_init(
1995 struct amdgpu_display_manager *dm,
1996 struct amdgpu_connector *aconnector,
1997 uint32_t link_index,
1998 struct amdgpu_encoder *aencoder)
1999{
2000 int res = 0;
2001 int connector_type;
2002 struct dc *dc = dm->dc;
2003 const struct dc_link *link = dc_get_link_at_index(dc, link_index);
2004 struct amdgpu_i2c_adapter *i2c;
2005
2006 DRM_DEBUG_KMS("%s()\n", __func__);
2007
2008 i2c = create_i2c(link->link_index, dm, &res);
2009 aconnector->i2c = i2c;
2010 res = i2c_add_adapter(&i2c->base);
2011
2012 if (res) {
2013 DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
2014 goto out_free;
2015 }
2016
2017 connector_type = to_drm_connector_type(link->connector_signal);
2018
2019 res = drm_connector_init(
2020 dm->ddev,
2021 &aconnector->base,
2022 &amdgpu_dm_connector_funcs,
2023 connector_type);
2024
2025 if (res) {
2026 DRM_ERROR("connector_init failed\n");
2027 aconnector->connector_id = -1;
2028 goto out_free;
2029 }
2030
2031 drm_connector_helper_add(
2032 &aconnector->base,
2033 &amdgpu_dm_connector_helper_funcs);
2034
2035 amdgpu_dm_connector_init_helper(
2036 dm,
2037 aconnector,
2038 connector_type,
2039 link,
2040 link_index);
2041
2042 drm_mode_connector_attach_encoder(
2043 &aconnector->base, &aencoder->base);
2044
2045 drm_connector_register(&aconnector->base);
2046
2047 if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
2048 || connector_type == DRM_MODE_CONNECTOR_eDP)
2049 amdgpu_dm_initialize_mst_connector(dm, aconnector);
2050
2051#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
2052 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
2053
2054 /* NOTE: this currently will create backlight device even if a panel
2055 * is not connected to the eDP/LVDS connector.
2056 *
2057 * This is less than ideal but we don't have sink information at this
2058 * stage since detection happens after. We can't do detection earlier
2059 * since MST detection needs connectors to be created first.
2060 */
2061 if (link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) {
2062 /* Event if registration failed, we should continue with
2063 * DM initialization because not having a backlight control
2064 * is better then a black screen. */
2065 amdgpu_dm_register_backlight_device(dm);
2066
2067 if (dm->backlight_dev)
2068 dm->backlight_link = link;
2069 }
2070#endif
2071
2072out_free:
2073 if (res) {
2074 kfree(i2c);
2075 aconnector->i2c = NULL;
2076 }
2077 return res;
2078}
2079
2080int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
2081{
2082 switch (adev->mode_info.num_crtc) {
2083 case 1:
2084 return 0x1;
2085 case 2:
2086 return 0x3;
2087 case 3:
2088 return 0x7;
2089 case 4:
2090 return 0xf;
2091 case 5:
2092 return 0x1f;
2093 case 6:
2094 default:
2095 return 0x3f;
2096 }
2097}
2098
2099int amdgpu_dm_encoder_init(
2100 struct drm_device *dev,
2101 struct amdgpu_encoder *aencoder,
2102 uint32_t link_index)
2103{
2104 struct amdgpu_device *adev = dev->dev_private;
2105
2106 int res = drm_encoder_init(dev,
2107 &aencoder->base,
2108 &amdgpu_dm_encoder_funcs,
2109 DRM_MODE_ENCODER_TMDS,
2110 NULL);
2111
2112 aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
2113
2114 if (!res)
2115 aencoder->encoder_id = link_index;
2116 else
2117 aencoder->encoder_id = -1;
2118
2119 drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
2120
2121 return res;
2122}
2123
2124enum dm_commit_action {
2125 DM_COMMIT_ACTION_NOTHING,
2126 DM_COMMIT_ACTION_RESET,
2127 DM_COMMIT_ACTION_DPMS_ON,
2128 DM_COMMIT_ACTION_DPMS_OFF,
2129 DM_COMMIT_ACTION_SET
2130};
2131
2132static enum dm_commit_action get_dm_commit_action(struct drm_crtc_state *state)
2133{
2134 /* mode changed means either actually mode changed or enabled changed */
2135 /* active changed means dpms changed */
2136
2137 DRM_DEBUG_KMS("crtc_state_flags: enable:%d, active:%d, planes_changed:%d, mode_changed:%d,active_changed:%d,connectors_changed:%d\n",
2138 state->enable,
2139 state->active,
2140 state->planes_changed,
2141 state->mode_changed,
2142 state->active_changed,
2143 state->connectors_changed);
2144
2145 if (state->mode_changed) {
2146 /* if it is got disabled - call reset mode */
2147 if (!state->enable)
2148 return DM_COMMIT_ACTION_RESET;
2149
2150 if (state->active)
2151 return DM_COMMIT_ACTION_SET;
2152 else
2153 return DM_COMMIT_ACTION_RESET;
2154 } else {
2155 /* ! mode_changed */
2156
2157 /* if it is remain disable - skip it */
2158 if (!state->enable)
2159 return DM_COMMIT_ACTION_NOTHING;
2160
2161 if (state->active && state->connectors_changed)
2162 return DM_COMMIT_ACTION_SET;
2163
2164 if (state->active_changed) {
2165 if (state->active) {
2166 return DM_COMMIT_ACTION_DPMS_ON;
2167 } else {
2168 return DM_COMMIT_ACTION_DPMS_OFF;
2169 }
2170 } else {
2171 /* ! active_changed */
2172 return DM_COMMIT_ACTION_NOTHING;
2173 }
2174 }
2175}
2176
4562236b
HW
2177static void manage_dm_interrupts(
2178 struct amdgpu_device *adev,
2179 struct amdgpu_crtc *acrtc,
2180 bool enable)
2181{
2182 /*
2183 * this is not correct translation but will work as soon as VBLANK
2184 * constant is the same as PFLIP
2185 */
2186 int irq_type =
2187 amdgpu_crtc_idx_to_irq_type(
2188 adev,
2189 acrtc->crtc_id);
2190
2191 if (enable) {
2192 drm_crtc_vblank_on(&acrtc->base);
2193 amdgpu_irq_get(
2194 adev,
2195 &adev->pageflip_irq,
2196 irq_type);
2197 } else {
4562236b
HW
2198
2199 amdgpu_irq_put(
2200 adev,
2201 &adev->pageflip_irq,
2202 irq_type);
2203 drm_crtc_vblank_off(&acrtc->base);
2204 }
2205}
2206
4562236b
HW
2207static bool is_scaling_state_different(
2208 const struct dm_connector_state *dm_state,
2209 const struct dm_connector_state *old_dm_state)
2210{
2211 if (dm_state->scaling != old_dm_state->scaling)
2212 return true;
2213 if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
2214 if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
2215 return true;
2216 } else if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
2217 if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
2218 return true;
2219 } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder
2220 || dm_state->underscan_vborder != old_dm_state->underscan_vborder)
2221 return true;
2222 return false;
2223}
2224
ab2541b6 2225static void remove_stream(struct amdgpu_device *adev, struct amdgpu_crtc *acrtc)
4562236b 2226{
4562236b
HW
2227 /*
2228 * we evade vblanks and pflips on crtc that
2229 * should be changed
2230 */
2231 manage_dm_interrupts(adev, acrtc, false);
ab2541b6 2232
4562236b
HW
2233 /* this is the update mode case */
2234 if (adev->dm.freesync_module)
ab2541b6
AC
2235 mod_freesync_remove_stream(adev->dm.freesync_module,
2236 acrtc->stream);
2237
2238 dc_stream_release(acrtc->stream);
2239 acrtc->stream = NULL;
4562236b
HW
2240 acrtc->otg_inst = -1;
2241 acrtc->enabled = false;
2242}
2243
54f5499a
AG
2244
2245/*
2246 * Executes flip
2247 *
2248 * Waits on all BO's fences and for proper vblank count
2249 */
2250static void amdgpu_dm_do_flip(
2251 struct drm_crtc *crtc,
2252 struct drm_framebuffer *fb,
2253 uint32_t target)
2254{
2255 unsigned long flags;
2256 uint32_t target_vblank;
2257 int r, vpos, hpos;
2258 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
2259 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
2260 struct amdgpu_bo *abo = gem_to_amdgpu_bo(afb->obj);
2261 struct amdgpu_device *adev = crtc->dev->dev_private;
2262 bool async_flip = (acrtc->flip_flags & DRM_MODE_PAGE_FLIP_ASYNC) != 0;
2263
2264
2265 /*TODO This might fail and hence better not used, wait
2266 * explicitly on fences instead
2267 * and in general should be called for
2268 * blocking commit to as per framework helpers
2269 * */
2270 r = amdgpu_bo_reserve(abo, true);
2271 if (unlikely(r != 0)) {
2272 DRM_ERROR("failed to reserve buffer before flip\n");
2273 BUG_ON(0);
2274 }
2275
2276 /* Wait for all fences on this FB */
2277 WARN_ON(reservation_object_wait_timeout_rcu(abo->tbo.resv, true, false,
2278 MAX_SCHEDULE_TIMEOUT) < 0);
2279
2280 amdgpu_bo_unreserve(abo);
2281
2282 /* Wait for target vblank */
2283 /* Wait until we're out of the vertical blank period before the one
2284 * targeted by the flip
2285 */
2286 target_vblank = target - drm_crtc_vblank_count(crtc) +
2287 amdgpu_get_vblank_counter_kms(crtc->dev, acrtc->crtc_id);
2288
2289 while ((acrtc->enabled &&
2290 (amdgpu_get_crtc_scanoutpos(adev->ddev, acrtc->crtc_id, 0,
2291 &vpos, &hpos, NULL, NULL,
2292 &crtc->hwmode)
2293 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
2294 (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
2295 (int)(target_vblank -
2296 amdgpu_get_vblank_counter_kms(adev->ddev, acrtc->crtc_id)) > 0)) {
2297 usleep_range(1000, 1100);
2298 }
2299
2300 /* Flip */
2301 spin_lock_irqsave(&crtc->dev->event_lock, flags);
2302 /* update crtc fb */
2303 crtc->primary->fb = fb;
2304
2305 /* Do the flip (mmio) */
2306 adev->mode_info.funcs->page_flip(adev, acrtc->crtc_id, afb->address, async_flip);
2307
2308 spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
2309 DRM_DEBUG_DRIVER("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
2310 acrtc->crtc_id);
2311}
2312
2313void amdgpu_dm_atomic_commit_tail(
2314 struct drm_atomic_state *state)
4562236b 2315{
54f5499a 2316 struct drm_device *dev = state->dev;
4562236b
HW
2317 struct amdgpu_device *adev = dev->dev_private;
2318 struct amdgpu_display_manager *dm = &adev->dm;
2319 struct drm_plane *plane;
4562236b 2320 struct drm_plane_state *old_plane_state;
ab2541b6 2321 uint32_t i;
ab2541b6 2322 uint32_t commit_streams_count = 0;
4562236b 2323 uint32_t new_crtcs_count = 0;
4562236b
HW
2324 struct drm_crtc *crtc;
2325 struct drm_crtc_state *old_crtc_state;
ab2541b6
AC
2326 const struct dc_stream *commit_streams[MAX_STREAMS];
2327 struct amdgpu_crtc *new_crtcs[MAX_STREAMS];
2328 const struct dc_stream *new_stream;
54f5499a
AG
2329 unsigned long flags;
2330 bool wait_for_vblank = true;
4562236b 2331
4562236b
HW
2332
2333 drm_atomic_helper_update_legacy_modeset_state(dev, state);
2334
2335 /* update changed items */
2336 for_each_crtc_in_state(state, crtc, old_crtc_state, i) {
2337 struct amdgpu_crtc *acrtc;
2338 struct amdgpu_connector *aconnector = NULL;
2339 enum dm_commit_action action;
2340 struct drm_crtc_state *new_state = crtc->state;
2341
2342 acrtc = to_amdgpu_crtc(crtc);
2343
2344 aconnector =
2345 amdgpu_dm_find_first_crct_matching_connector(
2346 state,
2347 crtc,
2348 false);
2349
2350 /* handles headless hotplug case, updating new_state and
2351 * aconnector as needed
2352 */
2353
2354 action = get_dm_commit_action(new_state);
2355
2356 switch (action) {
2357 case DM_COMMIT_ACTION_DPMS_ON:
2358 case DM_COMMIT_ACTION_SET: {
2359 struct dm_connector_state *dm_state = NULL;
ab2541b6 2360 new_stream = NULL;
4562236b
HW
2361
2362 if (aconnector)
2363 dm_state = to_dm_connector_state(aconnector->base.state);
2364
ab2541b6 2365 new_stream = create_stream_for_sink(
4562236b
HW
2366 aconnector,
2367 &crtc->state->mode,
2368 dm_state);
2369
2370 DRM_INFO("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
2371
ab2541b6 2372 if (!new_stream) {
4562236b
HW
2373 /*
2374 * this could happen because of issues with
2375 * userspace notifications delivery.
2376 * In this case userspace tries to set mode on
2377 * display which is disconnect in fact.
2378 * dc_sink in NULL in this case on aconnector.
2379 * We expect reset mode will come soon.
2380 *
2381 * This can also happen when unplug is done
2382 * during resume sequence ended
2383 *
2384 * In this case, we want to pretend we still
2385 * have a sink to keep the pipe running so that
2386 * hw state is consistent with the sw state
2387 */
ab2541b6 2388 DRM_DEBUG_KMS("%s: Failed to create new stream for crtc %d\n",
4562236b
HW
2389 __func__, acrtc->base.base.id);
2390 break;
2391 }
2392
ab2541b6
AC
2393 if (acrtc->stream)
2394 remove_stream(adev, acrtc);
4562236b
HW
2395
2396 /*
2397 * this loop saves set mode crtcs
2398 * we needed to enable vblanks once all
ab2541b6 2399 * resources acquired in dc after dc_commit_streams
4562236b
HW
2400 */
2401 new_crtcs[new_crtcs_count] = acrtc;
2402 new_crtcs_count++;
2403
ab2541b6 2404 acrtc->stream = new_stream;
4562236b
HW
2405 acrtc->enabled = true;
2406 acrtc->hw_mode = crtc->state->mode;
2407 crtc->hwmode = crtc->state->mode;
2408
2409 break;
2410 }
2411
2412 case DM_COMMIT_ACTION_NOTHING: {
2413 struct dm_connector_state *dm_state = NULL;
2414
2415 if (!aconnector)
2416 break;
2417
2418 dm_state = to_dm_connector_state(aconnector->base.state);
2419
2420 /* Scaling update */
ab2541b6
AC
2421 update_stream_scaling_settings(&crtc->state->mode,
2422 dm_state, acrtc->stream);
4562236b
HW
2423
2424 break;
2425 }
2426 case DM_COMMIT_ACTION_DPMS_OFF:
2427 case DM_COMMIT_ACTION_RESET:
2428 DRM_INFO("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
2429 /* i.e. reset mode */
ab2541b6
AC
2430 if (acrtc->stream)
2431 remove_stream(adev, acrtc);
4562236b
HW
2432 break;
2433 } /* switch() */
2434 } /* for_each_crtc_in_state() */
2435
2436 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
2437
2438 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
2439
ab2541b6
AC
2440 if (acrtc->stream) {
2441 commit_streams[commit_streams_count] = acrtc->stream;
2442 ++commit_streams_count;
4562236b
HW
2443 }
2444 }
2445
2446 /*
ab2541b6 2447 * Add streams after required streams from new and replaced streams
4562236b
HW
2448 * are removed from freesync module
2449 */
2450 if (adev->dm.freesync_module) {
2451 for (i = 0; i < new_crtcs_count; i++) {
2452 struct amdgpu_connector *aconnector = NULL;
ab2541b6 2453 new_stream = new_crtcs[i]->stream;
4562236b
HW
2454 aconnector =
2455 amdgpu_dm_find_first_crct_matching_connector(
2456 state,
2457 &new_crtcs[i]->base,
2458 false);
2459 if (!aconnector) {
2460 DRM_INFO(
2461 "Atomic commit: Failed to find connector for acrtc id:%d "
2462 "skipping freesync init\n",
2463 new_crtcs[i]->crtc_id);
2464 continue;
2465 }
2466
ab2541b6
AC
2467 mod_freesync_add_stream(adev->dm.freesync_module,
2468 new_stream, &aconnector->caps);
4562236b
HW
2469 }
2470 }
2471
ab2541b6 2472 /* DC is optimized not to do anything if 'streams' didn't change. */
53d35dc6 2473 WARN_ON(!dc_commit_streams(dm->dc, commit_streams, commit_streams_count));
4562236b
HW
2474
2475 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
2476 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
2477
ab2541b6 2478 if (acrtc->stream != NULL)
4562236b 2479 acrtc->otg_inst =
ab2541b6 2480 dc_stream_get_status(acrtc->stream)->primary_otg_inst;
4562236b
HW
2481 }
2482
2483 /* update planes when needed */
2484 for_each_plane_in_state(state, plane, old_plane_state, i) {
2485 struct drm_plane_state *plane_state = plane->state;
2486 struct drm_crtc *crtc = plane_state->crtc;
4562236b
HW
2487 struct drm_framebuffer *fb = plane_state->fb;
2488 struct drm_connector *connector;
2489 struct dm_connector_state *dm_state = NULL;
2490 enum dm_commit_action action;
54f5499a 2491 bool pflip_needed;
4562236b
HW
2492
2493 if (!fb || !crtc || !crtc->state->active)
2494 continue;
2495
2496 action = get_dm_commit_action(crtc->state);
2497
2498 /* Surfaces are created under two scenarios:
2499 * 1. This commit is not a page flip.
ab2541b6 2500 * 2. This commit is a page flip, and streams are created.
4562236b 2501 */
2d60ded1 2502 pflip_needed = !state->allow_modeset;
54f5499a
AG
2503 if (!pflip_needed ||
2504 action == DM_COMMIT_ACTION_DPMS_ON ||
2505 action == DM_COMMIT_ACTION_SET) {
4562236b
HW
2506 list_for_each_entry(connector,
2507 &dev->mode_config.connector_list, head) {
2508 if (connector->state->crtc == crtc) {
2509 dm_state = to_dm_connector_state(
2510 connector->state);
2511 break;
2512 }
2513 }
2514
2515 /*
2516 * This situation happens in the following case:
2517 * we are about to get set mode for connector who's only
2518 * possible crtc (in encoder crtc mask) is used by
2519 * another connector, that is why it will try to
2520 * re-assing crtcs in order to make configuration
2521 * supported. For our implementation we need to make all
2522 * encoders support all crtcs, then this issue will
2523 * never arise again. But to guard code from this issue
2524 * check is left.
2525 *
2526 * Also it should be needed when used with actual
2527 * drm_atomic_commit ioctl in future
2528 */
2529 if (!dm_state)
2530 continue;
2531
4562236b
HW
2532 dm_dc_surface_commit(dm->dc, crtc);
2533 }
2534 }
2535
2536 for (i = 0; i < new_crtcs_count; i++) {
2537 /*
2538 * loop to enable interrupts on newly arrived crtc
2539 */
2540 struct amdgpu_crtc *acrtc = new_crtcs[i];
2541
ab2541b6
AC
2542 if (adev->dm.freesync_module)
2543 mod_freesync_notify_mode_change(
2544 adev->dm.freesync_module, &acrtc->stream, 1);
4562236b
HW
2545
2546 manage_dm_interrupts(adev, acrtc, true);
2547 dm_crtc_cursor_reset(&acrtc->base);
2548
2549 }
2550
4562236b
HW
2551 for_each_plane_in_state(state, plane, old_plane_state, i) {
2552 struct drm_plane_state *plane_state = plane->state;
2553 struct drm_crtc *crtc = plane_state->crtc;
2554 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
2555 struct drm_framebuffer *fb = plane_state->fb;
54f5499a 2556 bool pflip_needed;
4562236b
HW
2557
2558 if (!fb || !crtc || !crtc->state->planes_changed ||
2559 !crtc->state->active)
2560 continue;
2d60ded1 2561 pflip_needed = !state->allow_modeset;
54f5499a
AG
2562
2563 if (pflip_needed) {
2564 amdgpu_dm_do_flip(
2565 crtc,
2566 fb,
2567 drm_crtc_vblank_count(crtc));
2568
2569 wait_for_vblank =
2570 acrtc->flip_flags & DRM_MODE_PAGE_FLIP_ASYNC ?
2571 false : true;
4562236b
HW
2572 /*clean up the flags for next usage*/
2573 acrtc->flip_flags = 0;
2574 }
2575 }
2576
4562236b 2577
54f5499a
AG
2578 /*TODO mark consumed event on all crtc assigned event
2579 * in drm_atomic_helper_setup_commit just to signal completion
2580 */
2581 spin_lock_irqsave(&adev->ddev->event_lock, flags);
2582 for_each_crtc_in_state(state, crtc, old_crtc_state, i) {
2583 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
2584
2585 if (acrtc->base.state->event &&
2586 acrtc->base.state->event->event.base.type != DRM_EVENT_FLIP_COMPLETE) {
2587 acrtc->event = acrtc->base.state->event;
2588 acrtc->base.state->event = NULL;
2589 }
2590 }
2591 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
2592
2593 /* Signal HW programming completion */
2594 drm_atomic_helper_commit_hw_done(state);
2595
2596 if (wait_for_vblank)
2597 drm_atomic_helper_wait_for_vblanks(dev, state);
2598
2599 /*TODO send vblank event on all crtc assigned event
2600 * in drm_atomic_helper_setup_commit just to signal completion
2601 */
2602 spin_lock_irqsave(&adev->ddev->event_lock, flags);
2603 for_each_crtc_in_state(state, crtc, old_crtc_state, i) {
2604 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
4562236b 2605
54f5499a
AG
2606 if (acrtc->event &&
2607 acrtc->event->event.base.type != DRM_EVENT_FLIP_COMPLETE) {
2608 drm_send_event_locked(dev, &acrtc->event->base);
2609 acrtc->event = NULL;
2610 }
2611 }
2612 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
4562236b 2613
54f5499a
AG
2614 /*TODO Is it to early if actual flip haven't happened yet ?*/
2615 /* Release old FB */
2616 drm_atomic_helper_cleanup_planes(dev, state);
4562236b 2617}
92cc37fb
AG
2618
2619
2620static int dm_force_atomic_commit(struct drm_connector *connector)
2621{
2622 int ret = 0;
2623 struct drm_device *ddev = connector->dev;
2624 struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
2625 struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
2626 struct drm_plane *plane = disconnected_acrtc->base.primary;
2627 struct drm_connector_state *conn_state;
2628 struct drm_crtc_state *crtc_state;
2629 struct drm_plane_state *plane_state;
2630
2631 if (!state)
2632 return -ENOMEM;
2633
2634 state->acquire_ctx = ddev->mode_config.acquire_ctx;
2635
2636 /* Construct an atomic state to restore previous display setting */
2637
2638 /*
2639 * Attach connectors to drm_atomic_state
2640 */
2641 conn_state = drm_atomic_get_connector_state(state, connector);
2642
2643 ret = PTR_ERR_OR_ZERO(conn_state);
2644 if (ret)
2645 goto err;
2646
2647 /* Attach crtc to drm_atomic_state*/
2648 crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
2649
2650 ret = PTR_ERR_OR_ZERO(crtc_state);
2651 if (ret)
2652 goto err;
2653
2654 /* force a restore */
2655 crtc_state->mode_changed = true;
2656
2657 /* Attach plane to drm_atomic_state */
2658 plane_state = drm_atomic_get_plane_state(state, plane);
2659
2660 ret = PTR_ERR_OR_ZERO(plane_state);
2661 if (ret)
2662 goto err;
2663
2664
2665 /* Call commit internally with the state we just constructed */
2666 ret = drm_atomic_commit(state);
2667 if (!ret)
2668 return 0;
2669
2670err:
2671 DRM_ERROR("Restoring old state failed with %i\n", ret);
2672 drm_atomic_state_put(state);
2673
2674 return ret;
2675}
2676
4562236b
HW
2677/*
2678 * This functions handle all cases when set mode does not come upon hotplug.
2679 * This include when the same display is unplugged then plugged back into the
2680 * same port and when we are running without usermode desktop manager supprot
2681 */
2682void dm_restore_drm_connector_state(struct drm_device *dev, struct drm_connector *connector)
2683{
4562236b
HW
2684 struct amdgpu_connector *aconnector = to_amdgpu_connector(connector);
2685 struct amdgpu_crtc *disconnected_acrtc;
4562236b
HW
2686
2687 if (!aconnector->dc_sink || !connector->state || !connector->encoder)
2688 return;
2689
2690 disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
2691
ab2541b6 2692 if (!disconnected_acrtc || !disconnected_acrtc->stream)
4562236b
HW
2693 return;
2694
4562236b
HW
2695 /*
2696 * If the previous sink is not released and different from the current,
2697 * we deduce we are in a state where we can not rely on usermode call
2698 * to turn on the display, so we do it here
2699 */
92cc37fb
AG
2700 if (disconnected_acrtc->stream->sink != aconnector->dc_sink)
2701 dm_force_atomic_commit(&aconnector->base);
4562236b
HW
2702}
2703
2704static uint32_t add_val_sets_surface(
2705 struct dc_validation_set *val_sets,
2706 uint32_t set_count,
ab2541b6 2707 const struct dc_stream *stream,
4562236b
HW
2708 const struct dc_surface *surface)
2709{
2710 uint32_t i = 0;
2711
2712 while (i < set_count) {
ab2541b6 2713 if (val_sets[i].stream == stream)
4562236b
HW
2714 break;
2715 ++i;
2716 }
2717
2718 val_sets[i].surfaces[val_sets[i].surface_count] = surface;
2719 val_sets[i].surface_count++;
2720
2721 return val_sets[i].surface_count;
2722}
2723
ab2541b6 2724static uint32_t update_in_val_sets_stream(
4562236b
HW
2725 struct dc_validation_set *val_sets,
2726 struct drm_crtc **crtcs,
2727 uint32_t set_count,
ab2541b6
AC
2728 const struct dc_stream *old_stream,
2729 const struct dc_stream *new_stream,
4562236b
HW
2730 struct drm_crtc *crtc)
2731{
2732 uint32_t i = 0;
2733
2734 while (i < set_count) {
ab2541b6 2735 if (val_sets[i].stream == old_stream)
4562236b
HW
2736 break;
2737 ++i;
2738 }
2739
ab2541b6 2740 val_sets[i].stream = new_stream;
4562236b
HW
2741 crtcs[i] = crtc;
2742
2743 if (i == set_count) {
2744 /* nothing found. add new one to the end */
2745 return set_count + 1;
2746 }
2747
2748 return set_count;
2749}
2750
2751static uint32_t remove_from_val_sets(
2752 struct dc_validation_set *val_sets,
2753 uint32_t set_count,
ab2541b6 2754 const struct dc_stream *stream)
4562236b
HW
2755{
2756 int i;
2757
2758 for (i = 0; i < set_count; i++)
ab2541b6 2759 if (val_sets[i].stream == stream)
4562236b
HW
2760 break;
2761
2762 if (i == set_count) {
2763 /* nothing found */
2764 return set_count;
2765 }
2766
2767 set_count--;
2768
2769 for (; i < set_count; i++) {
2770 val_sets[i] = val_sets[i + 1];
2771 }
2772
2773 return set_count;
2774}
2775
2776int amdgpu_dm_atomic_check(struct drm_device *dev,
2777 struct drm_atomic_state *state)
2778{
2779 struct drm_crtc *crtc;
2780 struct drm_crtc_state *crtc_state;
2781 struct drm_plane *plane;
2782 struct drm_plane_state *plane_state;
2783 int i, j;
2784 int ret;
2785 int set_count;
ab2541b6
AC
2786 int new_stream_count;
2787 struct dc_validation_set set[MAX_STREAMS] = {{ 0 }};
2788 struct dc_stream *new_streams[MAX_STREAMS] = { 0 };
2789 struct drm_crtc *crtc_set[MAX_STREAMS] = { 0 };
4562236b
HW
2790 struct amdgpu_device *adev = dev->dev_private;
2791 struct dc *dc = adev->dm.dc;
2792 bool need_to_validate = false;
2793
2794 ret = drm_atomic_helper_check(dev, state);
2795
2796 if (ret) {
2797 DRM_ERROR("Atomic state validation failed with error :%d !\n",
2798 ret);
2799 return ret;
2800 }
2801
2802 ret = -EINVAL;
2803
2804 /* copy existing configuration */
ab2541b6 2805 new_stream_count = 0;
4562236b
HW
2806 set_count = 0;
2807 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
2808
2809 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
2810
ab2541b6
AC
2811 if (acrtc->stream) {
2812 set[set_count].stream = acrtc->stream;
4562236b
HW
2813 crtc_set[set_count] = crtc;
2814 ++set_count;
2815 }
2816 }
2817
2818 /* update changed items */
2819 for_each_crtc_in_state(state, crtc, crtc_state, i) {
2820 struct amdgpu_crtc *acrtc = NULL;
2821 struct amdgpu_connector *aconnector = NULL;
2822 enum dm_commit_action action;
2823
2824 acrtc = to_amdgpu_crtc(crtc);
2825
2826 aconnector = amdgpu_dm_find_first_crct_matching_connector(state, crtc, true);
2827
2828 action = get_dm_commit_action(crtc_state);
2829
2830 switch (action) {
2831 case DM_COMMIT_ACTION_DPMS_ON:
2832 case DM_COMMIT_ACTION_SET: {
ab2541b6 2833 struct dc_stream *new_stream = NULL;
4562236b
HW
2834 struct drm_connector_state *conn_state = NULL;
2835 struct dm_connector_state *dm_state = NULL;
2836
2837 if (aconnector) {
2838 conn_state = drm_atomic_get_connector_state(state, &aconnector->base);
2839 if (IS_ERR(conn_state))
2840 return ret;
2841 dm_state = to_dm_connector_state(conn_state);
2842 }
2843
ab2541b6 2844 new_stream = create_stream_for_sink(aconnector, &crtc_state->mode, dm_state);
4562236b
HW
2845
2846 /*
ab2541b6 2847 * we can have no stream on ACTION_SET if a display
4562236b
HW
2848 * was disconnected during S3, in this case it not and
2849 * error, the OS will be updated after detection, and
2850 * do the right thing on next atomic commit
2851 */
ab2541b6
AC
2852 if (!new_stream) {
2853 DRM_DEBUG_KMS("%s: Failed to create new stream for crtc %d\n",
4562236b
HW
2854 __func__, acrtc->base.base.id);
2855 break;
2856 }
2857
ab2541b6
AC
2858 new_streams[new_stream_count] = new_stream;
2859 set_count = update_in_val_sets_stream(
4562236b
HW
2860 set,
2861 crtc_set,
2862 set_count,
ab2541b6
AC
2863 acrtc->stream,
2864 new_stream,
4562236b
HW
2865 crtc);
2866
ab2541b6 2867 new_stream_count++;
4562236b
HW
2868 need_to_validate = true;
2869 break;
2870 }
2871
2872 case DM_COMMIT_ACTION_NOTHING: {
2873 const struct drm_connector *drm_connector = NULL;
2874 struct drm_connector_state *conn_state = NULL;
2875 struct dm_connector_state *dm_state = NULL;
2876 struct dm_connector_state *old_dm_state = NULL;
ab2541b6 2877 struct dc_stream *new_stream;
4562236b
HW
2878
2879 if (!aconnector)
2880 break;
2881
2882 for_each_connector_in_state(
2883 state, drm_connector, conn_state, j) {
2884 if (&aconnector->base == drm_connector)
2885 break;
2886 }
2887
2888 old_dm_state = to_dm_connector_state(drm_connector->state);
2889 dm_state = to_dm_connector_state(conn_state);
2890
2891 /* Support underscan adjustment*/
2892 if (!is_scaling_state_different(dm_state, old_dm_state))
2893 break;
2894
ab2541b6 2895 new_stream = create_stream_for_sink(aconnector, &crtc_state->mode, dm_state);
4562236b 2896
ab2541b6
AC
2897 if (!new_stream) {
2898 DRM_ERROR("%s: Failed to create new stream for crtc %d\n",
4562236b
HW
2899 __func__, acrtc->base.base.id);
2900 break;
2901 }
2902
ab2541b6
AC
2903 new_streams[new_stream_count] = new_stream;
2904 set_count = update_in_val_sets_stream(
4562236b
HW
2905 set,
2906 crtc_set,
2907 set_count,
ab2541b6
AC
2908 acrtc->stream,
2909 new_stream,
4562236b
HW
2910 crtc);
2911
ab2541b6 2912 new_stream_count++;
4562236b
HW
2913 need_to_validate = true;
2914
2915 break;
2916 }
2917 case DM_COMMIT_ACTION_DPMS_OFF:
2918 case DM_COMMIT_ACTION_RESET:
2919 /* i.e. reset mode */
ab2541b6 2920 if (acrtc->stream) {
4562236b
HW
2921 set_count = remove_from_val_sets(
2922 set,
2923 set_count,
ab2541b6 2924 acrtc->stream);
4562236b
HW
2925 }
2926 break;
2927 }
2928
2929 /*
2930 * TODO revisit when removing commit action
2931 * and looking at atomic flags directly
2932 */
2933
2934 /* commit needs planes right now (for gamma, eg.) */
2935 /* TODO rework commit to chack crtc for gamma change */
2936 ret = drm_atomic_add_affected_planes(state, crtc);
2937 if (ret)
2938 return ret;
53d35dc6
AG
2939
2940 ret = -EINVAL;
4562236b
HW
2941 }
2942
2943 for (i = 0; i < set_count; i++) {
2944 for_each_plane_in_state(state, plane, plane_state, j) {
4562236b
HW
2945 struct drm_crtc *crtc = plane_state->crtc;
2946 struct drm_framebuffer *fb = plane_state->fb;
2947 struct drm_connector *connector;
2948 struct dm_connector_state *dm_state = NULL;
2949 enum dm_commit_action action;
2950 struct drm_crtc_state *crtc_state;
54f5499a 2951 bool pflip_needed;
4562236b
HW
2952
2953
2954 if (!fb || !crtc || crtc_set[i] != crtc ||
2955 !crtc->state->planes_changed || !crtc->state->active)
2956 continue;
2957
2958 action = get_dm_commit_action(crtc->state);
2959
2960 /* Surfaces are created under two scenarios:
2961 * 1. This commit is not a page flip.
ab2541b6 2962 * 2. This commit is a page flip, and streams are created.
4562236b
HW
2963 */
2964 crtc_state = drm_atomic_get_crtc_state(state, crtc);
2d60ded1 2965 pflip_needed = !state->allow_modeset;
54f5499a
AG
2966 if (!pflip_needed ||
2967 action == DM_COMMIT_ACTION_DPMS_ON ||
2968 action == DM_COMMIT_ACTION_SET) {
4562236b
HW
2969 struct dc_surface *surface;
2970
2971 list_for_each_entry(connector,
2972 &dev->mode_config.connector_list, head) {
2973 if (connector->state->crtc == crtc) {
2974 dm_state = to_dm_connector_state(
2975 connector->state);
2976 break;
2977 }
2978 }
2979
2980 /*
2981 * This situation happens in the following case:
2982 * we are about to get set mode for connector who's only
2983 * possible crtc (in encoder crtc mask) is used by
2984 * another connector, that is why it will try to
2985 * re-assing crtcs in order to make configuration
2986 * supported. For our implementation we need to make all
2987 * encoders support all crtcs, then this issue will
2988 * never arise again. But to guard code from this issue
2989 * check is left.
2990 *
2991 * Also it should be needed when used with actual
2992 * drm_atomic_commit ioctl in future
2993 */
2994 if (!dm_state)
2995 continue;
2996
2997 surface = dc_create_surface(dc);
2998 fill_plane_attributes(
6a1f8cab 2999 crtc->dev->dev_private,
4562236b
HW
3000 surface,
3001 plane_state,
3002 false);
3003
3004 add_val_sets_surface(
3005 set,
3006 set_count,
ab2541b6 3007 set[i].stream,
4562236b
HW
3008 surface);
3009
3010 need_to_validate = true;
3011 }
3012 }
3013 }
3014
3015 if (need_to_validate == false || set_count == 0 ||
3016 dc_validate_resources(dc, set, set_count))
3017 ret = 0;
3018
3019 for (i = 0; i < set_count; i++) {
3020 for (j = 0; j < set[i].surface_count; j++) {
3021 dc_surface_release(set[i].surfaces[j]);
3022 }
3023 }
ab2541b6
AC
3024 for (i = 0; i < new_stream_count; i++)
3025 dc_stream_release(new_streams[i]);
4562236b
HW
3026
3027 if (ret != 0)
3028 DRM_ERROR("Atomic check failed.\n");
3029
3030 return ret;
3031}
3032
3033static bool is_dp_capable_without_timing_msa(
3034 struct dc *dc,
3035 struct amdgpu_connector *amdgpu_connector)
3036{
3037 uint8_t dpcd_data;
3038 bool capable = false;
3039 if (amdgpu_connector->dc_link &&
3040 dc_read_dpcd(dc, amdgpu_connector->dc_link->link_index,
3041 DP_DOWN_STREAM_PORT_COUNT,
3042 &dpcd_data, sizeof(dpcd_data)) )
d7194cf6 3043 capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
4562236b
HW
3044
3045 return capable;
3046}
3047void amdgpu_dm_add_sink_to_freesync_module(
3048 struct drm_connector *connector,
3049 struct edid *edid)
3050{
3051 int i;
3052 uint64_t val_capable;
3053 bool edid_check_required;
3054 struct detailed_timing *timing;
3055 struct detailed_non_pixel *data;
3056 struct detailed_data_monitor_range *range;
3057 struct amdgpu_connector *amdgpu_connector =
3058 to_amdgpu_connector(connector);
3059
3060 struct drm_device *dev = connector->dev;
3061 struct amdgpu_device *adev = dev->dev_private;
3062 edid_check_required = false;
3063 if (!amdgpu_connector->dc_sink) {
3064 DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
3065 return;
3066 }
3067 if (!adev->dm.freesync_module)
3068 return;
3069 /*
3070 * if edid non zero restrict freesync only for dp and edp
3071 */
3072 if (edid) {
3073 if (amdgpu_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
3074 || amdgpu_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
3075 edid_check_required = is_dp_capable_without_timing_msa(
3076 adev->dm.dc,
3077 amdgpu_connector);
3078 }
3079 }
3080 val_capable = 0;
3081 if (edid_check_required == true && (edid->version > 1 ||
3082 (edid->version == 1 && edid->revision > 1))) {
3083 for (i = 0; i < 4; i++) {
3084
3085 timing = &edid->detailed_timings[i];
3086 data = &timing->data.other_data;
3087 range = &data->data.range;
3088 /*
3089 * Check if monitor has continuous frequency mode
3090 */
3091 if (data->type != EDID_DETAIL_MONITOR_RANGE)
3092 continue;
3093 /*
3094 * Check for flag range limits only. If flag == 1 then
3095 * no additional timing information provided.
3096 * Default GTF, GTF Secondary curve and CVT are not
3097 * supported
3098 */
3099 if (range->flags != 1)
3100 continue;
3101
3102 amdgpu_connector->min_vfreq = range->min_vfreq;
3103 amdgpu_connector->max_vfreq = range->max_vfreq;
3104 amdgpu_connector->pixel_clock_mhz =
3105 range->pixel_clock_mhz * 10;
3106 break;
3107 }
3108
3109 if (amdgpu_connector->max_vfreq -
3110 amdgpu_connector->min_vfreq > 10) {
3111 amdgpu_connector->caps.supported = true;
3112 amdgpu_connector->caps.min_refresh_in_micro_hz =
3113 amdgpu_connector->min_vfreq * 1000000;
3114 amdgpu_connector->caps.max_refresh_in_micro_hz =
3115 amdgpu_connector->max_vfreq * 1000000;
3116 val_capable = 1;
3117 }
3118 }
3119
3120 /*
3121 * TODO figure out how to notify user-mode or DRM of freesync caps
3122 * once we figure out how to deal with freesync in an upstreamable
3123 * fashion
3124 */
3125
3126}
3127
3128void amdgpu_dm_remove_sink_from_freesync_module(
3129 struct drm_connector *connector)
3130{
3131 /*
3132 * TODO fill in once we figure out how to deal with freesync in
3133 * an upstreamable fashion
3134 */
3135}