2 * Copyright 2012-13 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
26 #include <linux/types.h>
27 #include <linux/version.h>
30 #include <drm/drm_atomic_helper.h>
31 #include <drm/drm_fb_helper.h>
32 #include <drm/drm_atomic.h>
33 #include <drm/drm_edid.h>
36 #include "amdgpu_pm.h"
37 #include "dm_services_types.h"
39 // We need to #undef FRAME_SIZE and DEPRECATED because they conflict
40 // with ptrace-abi.h's #define's of them.
46 #include "amdgpu_dm_types.h"
47 #include "amdgpu_dm_mst_types.h"
49 #include "modules/inc/mod_freesync.h"
51 struct dm_connector_state
{
52 struct drm_connector_state base
;
54 enum amdgpu_rmx_type scaling
;
55 uint8_t underscan_vborder
;
56 uint8_t underscan_hborder
;
57 bool underscan_enable
;
60 #define to_dm_connector_state(x)\
61 container_of((x), struct dm_connector_state, base)
64 void amdgpu_dm_encoder_destroy(struct drm_encoder
*encoder
)
66 drm_encoder_cleanup(encoder
);
70 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs
= {
71 .destroy
= amdgpu_dm_encoder_destroy
,
74 static void dm_set_cursor(
75 struct amdgpu_crtc
*amdgpu_crtc
,
80 struct dc_cursor_attributes attributes
;
81 amdgpu_crtc
->cursor_width
= width
;
82 amdgpu_crtc
->cursor_height
= height
;
84 attributes
.address
.high_part
= upper_32_bits(gpu_addr
);
85 attributes
.address
.low_part
= lower_32_bits(gpu_addr
);
86 attributes
.width
= width
;
87 attributes
.height
= height
;
90 attributes
.color_format
= CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA
;
91 attributes
.rotation_angle
= 0;
92 attributes
.attribute_flags
.value
= 0;
94 if (!dc_target_set_cursor_attributes(
97 DRM_ERROR("DC failed to set cursor attributes\n");
101 static int dm_crtc_unpin_cursor_bo_old(
102 struct amdgpu_crtc
*amdgpu_crtc
)
104 struct amdgpu_bo
*robj
;
107 if (NULL
!= amdgpu_crtc
&& NULL
!= amdgpu_crtc
->cursor_bo
) {
108 robj
= gem_to_amdgpu_bo(amdgpu_crtc
->cursor_bo
);
110 ret
= amdgpu_bo_reserve(robj
, false);
112 if (likely(ret
== 0)) {
113 ret
= amdgpu_bo_unpin(robj
);
115 if (unlikely(ret
!= 0)) {
117 "%s: unpin failed (ret=%d), bo %p\n",
120 amdgpu_crtc
->cursor_bo
);
123 amdgpu_bo_unreserve(robj
);
126 "%s: reserve failed (ret=%d), bo %p\n",
129 amdgpu_crtc
->cursor_bo
);
132 drm_gem_object_unreference_unlocked(amdgpu_crtc
->cursor_bo
);
133 amdgpu_crtc
->cursor_bo
= NULL
;
139 static int dm_crtc_pin_cursor_bo_new(
140 struct drm_crtc
*crtc
,
141 struct drm_file
*file_priv
,
143 struct amdgpu_bo
**ret_obj
)
145 struct amdgpu_crtc
*amdgpu_crtc
;
146 struct amdgpu_bo
*robj
;
147 struct drm_gem_object
*obj
;
151 struct drm_device
*dev
= crtc
->dev
;
152 struct amdgpu_device
*adev
= dev
->dev_private
;
155 amdgpu_crtc
= to_amdgpu_crtc(crtc
);
157 obj
= drm_gem_object_lookup(file_priv
, handle
);
161 "Cannot find cursor object %x for crtc %d\n",
163 amdgpu_crtc
->crtc_id
);
166 robj
= gem_to_amdgpu_bo(obj
);
168 ret
= amdgpu_bo_reserve(robj
, false);
170 if (unlikely(ret
!= 0)) {
171 drm_gem_object_unreference_unlocked(obj
);
172 DRM_ERROR("dm_crtc_pin_cursor_bo_new ret %x, handle %x\n",
177 ret
= amdgpu_bo_pin_restricted(robj
, AMDGPU_GEM_DOMAIN_VRAM
, 0,
178 adev
->mc
.visible_vram_size
,
182 amdgpu_crtc
->cursor_addr
= gpu_addr
;
185 amdgpu_bo_unreserve(robj
);
187 drm_gem_object_unreference_unlocked(obj
);
195 static int dm_crtc_cursor_set(
196 struct drm_crtc
*crtc
,
197 struct drm_file
*file_priv
,
202 struct amdgpu_bo
*new_cursor_bo
;
203 struct dc_cursor_position position
;
207 struct amdgpu_crtc
*amdgpu_crtc
= to_amdgpu_crtc(crtc
);
210 new_cursor_bo
= NULL
;
213 "%s: crtc_id=%d with handle %d and size %d to %d, bo_object %p\n",
215 amdgpu_crtc
->crtc_id
,
219 amdgpu_crtc
->cursor_bo
);
222 /* turn off cursor */
223 position
.enable
= false;
226 position
.hot_spot_enable
= false;
228 if (amdgpu_crtc
->target
) {
229 /*set cursor visible false*/
230 dc_target_set_cursor_position(
234 /*unpin old cursor buffer and update cache*/
235 ret
= dm_crtc_unpin_cursor_bo_old(amdgpu_crtc
);
240 if ((width
> amdgpu_crtc
->max_cursor_width
) ||
241 (height
> amdgpu_crtc
->max_cursor_height
)) {
243 "%s: bad cursor width or height %d x %d\n",
249 /*try to pin new cursor bo*/
250 ret
= dm_crtc_pin_cursor_bo_new(crtc
, file_priv
, handle
, &new_cursor_bo
);
251 /*if map not successful then return an error*/
255 /*program new cursor bo to hardware*/
256 dm_set_cursor(amdgpu_crtc
, amdgpu_crtc
->cursor_addr
, width
, height
);
258 /*un map old, not used anymore cursor bo ,
259 * return memory and mapping back */
260 dm_crtc_unpin_cursor_bo_old(amdgpu_crtc
);
262 /*assign new cursor bo to our internal cache*/
263 amdgpu_crtc
->cursor_bo
= &new_cursor_bo
->gem_base
;
270 static int dm_crtc_cursor_move(struct drm_crtc
*crtc
,
273 struct amdgpu_crtc
*amdgpu_crtc
= to_amdgpu_crtc(crtc
);
274 int xorigin
= 0, yorigin
= 0;
275 struct dc_cursor_position position
;
277 /* avivo cursor are offset into the total surface */
278 x
+= crtc
->primary
->state
->src_x
>> 16;
279 y
+= crtc
->primary
->state
->src_y
>> 16;
282 * TODO: for cursor debugging unguard the following
286 "%s: x %d y %d c->x %d c->y %d\n",
295 xorigin
= min(-x
, amdgpu_crtc
->max_cursor_width
- 1);
299 yorigin
= min(-y
, amdgpu_crtc
->max_cursor_height
- 1);
303 position
.enable
= true;
307 position
.hot_spot_enable
= true;
308 position
.x_hotspot
= xorigin
;
309 position
.y_hotspot
= yorigin
;
311 if (amdgpu_crtc
->target
) {
312 if (!dc_target_set_cursor_position(
315 DRM_ERROR("DC failed to set cursor position\n");
323 static void dm_crtc_cursor_reset(struct drm_crtc
*crtc
)
325 struct amdgpu_crtc
*amdgpu_crtc
= to_amdgpu_crtc(crtc
);
328 "%s: with cursor_bo %p\n",
330 amdgpu_crtc
->cursor_bo
);
332 if (amdgpu_crtc
->cursor_bo
&& amdgpu_crtc
->target
) {
335 amdgpu_crtc
->cursor_addr
,
336 amdgpu_crtc
->cursor_width
,
337 amdgpu_crtc
->cursor_height
);
340 static bool fill_rects_from_plane_state(
341 const struct drm_plane_state
*state
,
342 struct dc_surface
*surface
)
344 surface
->src_rect
.x
= state
->src_x
>> 16;
345 surface
->src_rect
.y
= state
->src_y
>> 16;
346 /*we ignore for now mantissa and do not to deal with floating pixels :(*/
347 surface
->src_rect
.width
= state
->src_w
>> 16;
349 if (surface
->src_rect
.width
== 0)
352 surface
->src_rect
.height
= state
->src_h
>> 16;
353 if (surface
->src_rect
.height
== 0)
356 surface
->dst_rect
.x
= state
->crtc_x
;
357 surface
->dst_rect
.y
= state
->crtc_y
;
359 if (state
->crtc_w
== 0)
362 surface
->dst_rect
.width
= state
->crtc_w
;
364 if (state
->crtc_h
== 0)
367 surface
->dst_rect
.height
= state
->crtc_h
;
369 surface
->clip_rect
= surface
->dst_rect
;
371 switch (state
->rotation
& DRM_MODE_ROTATE_MASK
) {
372 case DRM_MODE_ROTATE_0
:
373 surface
->rotation
= ROTATION_ANGLE_0
;
375 case DRM_MODE_ROTATE_90
:
376 surface
->rotation
= ROTATION_ANGLE_90
;
378 case DRM_MODE_ROTATE_180
:
379 surface
->rotation
= ROTATION_ANGLE_180
;
381 case DRM_MODE_ROTATE_270
:
382 surface
->rotation
= ROTATION_ANGLE_270
;
385 surface
->rotation
= ROTATION_ANGLE_0
;
391 static bool get_fb_info(
392 const struct amdgpu_framebuffer
*amdgpu_fb
,
393 uint64_t *tiling_flags
,
394 uint64_t *fb_location
)
396 struct amdgpu_bo
*rbo
= gem_to_amdgpu_bo(amdgpu_fb
->obj
);
397 int r
= amdgpu_bo_reserve(rbo
, false);
398 if (unlikely(r
!= 0)){
399 DRM_ERROR("Unable to reserve buffer\n");
404 *fb_location
= amdgpu_bo_gpu_offset(rbo
);
407 amdgpu_bo_get_tiling_flags(rbo
, tiling_flags
);
409 amdgpu_bo_unreserve(rbo
);
413 static void fill_plane_attributes_from_fb(
414 struct amdgpu_device
*adev
,
415 struct dc_surface
*surface
,
416 const struct amdgpu_framebuffer
*amdgpu_fb
, bool addReq
)
418 uint64_t tiling_flags
;
419 uint64_t fb_location
= 0;
420 const struct drm_framebuffer
*fb
= &amdgpu_fb
->base
;
421 struct drm_format_name_buf format_name
;
426 addReq
== true ? &fb_location
:NULL
);
428 surface
->address
.type
= PLN_ADDR_TYPE_GRAPHICS
;
429 surface
->address
.grph
.addr
.low_part
= lower_32_bits(fb_location
);
430 surface
->address
.grph
.addr
.high_part
= upper_32_bits(fb_location
);
432 switch (fb
->format
->format
) {
434 surface
->format
= SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS
;
436 case DRM_FORMAT_RGB565
:
437 surface
->format
= SURFACE_PIXEL_FORMAT_GRPH_RGB565
;
439 case DRM_FORMAT_XRGB8888
:
440 case DRM_FORMAT_ARGB8888
:
441 surface
->format
= SURFACE_PIXEL_FORMAT_GRPH_ARGB8888
;
443 case DRM_FORMAT_XRGB2101010
:
444 case DRM_FORMAT_ARGB2101010
:
445 surface
->format
= SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010
;
447 case DRM_FORMAT_XBGR2101010
:
448 case DRM_FORMAT_ABGR2101010
:
449 surface
->format
= SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010
;
452 DRM_ERROR("Unsupported screen format %s\n",
453 drm_get_format_name(fb
->format
->format
, &format_name
));
457 memset(&surface
->tiling_info
, 0, sizeof(surface
->tiling_info
));
459 /* Fill GFX8 params */
460 if (AMDGPU_TILING_GET(tiling_flags
, ARRAY_MODE
) == DC_ARRAY_2D_TILED_THIN1
)
462 unsigned bankw
, bankh
, mtaspect
, tile_split
, num_banks
;
464 bankw
= AMDGPU_TILING_GET(tiling_flags
, BANK_WIDTH
);
465 bankh
= AMDGPU_TILING_GET(tiling_flags
, BANK_HEIGHT
);
466 mtaspect
= AMDGPU_TILING_GET(tiling_flags
, MACRO_TILE_ASPECT
);
467 tile_split
= AMDGPU_TILING_GET(tiling_flags
, TILE_SPLIT
);
468 num_banks
= AMDGPU_TILING_GET(tiling_flags
, NUM_BANKS
);
470 /* XXX fix me for VI */
471 surface
->tiling_info
.gfx8
.num_banks
= num_banks
;
472 surface
->tiling_info
.gfx8
.array_mode
=
473 DC_ARRAY_2D_TILED_THIN1
;
474 surface
->tiling_info
.gfx8
.tile_split
= tile_split
;
475 surface
->tiling_info
.gfx8
.bank_width
= bankw
;
476 surface
->tiling_info
.gfx8
.bank_height
= bankh
;
477 surface
->tiling_info
.gfx8
.tile_aspect
= mtaspect
;
478 surface
->tiling_info
.gfx8
.tile_mode
=
479 DC_ADDR_SURF_MICRO_TILING_DISPLAY
;
480 } else if (AMDGPU_TILING_GET(tiling_flags
, ARRAY_MODE
)
481 == DC_ARRAY_1D_TILED_THIN1
) {
482 surface
->tiling_info
.gfx8
.array_mode
= DC_ARRAY_1D_TILED_THIN1
;
485 surface
->tiling_info
.gfx8
.pipe_config
=
486 AMDGPU_TILING_GET(tiling_flags
, PIPE_CONFIG
);
488 surface
->plane_size
.grph
.surface_size
.x
= 0;
489 surface
->plane_size
.grph
.surface_size
.y
= 0;
490 surface
->plane_size
.grph
.surface_size
.width
= fb
->width
;
491 surface
->plane_size
.grph
.surface_size
.height
= fb
->height
;
492 surface
->plane_size
.grph
.surface_pitch
=
493 fb
->pitches
[0] / fb
->format
->cpp
[0];
495 surface
->visible
= true;
496 surface
->scaling_quality
.h_taps_c
= 0;
497 surface
->scaling_quality
.v_taps_c
= 0;
499 /* TODO: unhardcode */
500 surface
->color_space
= COLOR_SPACE_SRGB
;
501 /* is this needed? is surface zeroed at allocation? */
502 surface
->scaling_quality
.h_taps
= 0;
503 surface
->scaling_quality
.v_taps
= 0;
504 surface
->stereo_format
= PLANE_STEREO_FORMAT_NONE
;
508 #define NUM_OF_RAW_GAMMA_RAMP_RGB_256 256
510 static void fill_gamma_from_crtc(
511 const struct drm_crtc
*crtc
,
512 struct dc_surface
*dc_surface
)
515 struct dc_gamma
*gamma
;
516 struct drm_crtc_state
*state
= crtc
->state
;
517 struct drm_color_lut
*lut
= (struct drm_color_lut
*) state
->gamma_lut
->data
;
519 gamma
= dc_create_gamma();
524 for (i
= 0; i
< NUM_OF_RAW_GAMMA_RAMP_RGB_256
; i
++) {
525 gamma
->red
[i
] = lut
[i
].red
;
526 gamma
->green
[i
] = lut
[i
].green
;
527 gamma
->blue
[i
] = lut
[i
].blue
;
530 dc_surface
->gamma_correction
= gamma
;
533 static void fill_plane_attributes(
534 struct amdgpu_device
*adev
,
535 struct dc_surface
*surface
,
536 struct drm_plane_state
*state
, bool addrReq
)
538 const struct amdgpu_framebuffer
*amdgpu_fb
=
539 to_amdgpu_framebuffer(state
->fb
);
540 const struct drm_crtc
*crtc
= state
->crtc
;
541 struct dc_transfer_func
*input_tf
;
543 fill_rects_from_plane_state(state
, surface
);
544 fill_plane_attributes_from_fb(
545 crtc
->dev
->dev_private
,
550 input_tf
= dc_create_transfer_func();
552 if (input_tf
== NULL
)
555 input_tf
->type
= TF_TYPE_PREDEFINED
;
556 input_tf
->tf
= TRANSFER_FUNCTION_SRGB
;
558 surface
->in_transfer_func
= input_tf
;
560 /* In case of gamma set, update gamma value */
561 if (state
->crtc
->state
->gamma_lut
) {
562 fill_gamma_from_crtc(crtc
, surface
);
566 /*****************************************************************************/
568 struct amdgpu_connector
*aconnector_from_drm_crtc_id(
569 const struct drm_crtc
*crtc
)
571 struct drm_device
*dev
= crtc
->dev
;
572 struct drm_connector
*connector
;
573 struct amdgpu_crtc
*acrtc
= to_amdgpu_crtc(crtc
);
574 struct amdgpu_connector
*aconnector
;
576 list_for_each_entry(connector
,
577 &dev
->mode_config
.connector_list
, head
) {
579 aconnector
= to_amdgpu_connector(connector
);
581 if (aconnector
->base
.state
->crtc
!= &acrtc
->base
)
584 /* Found the connector */
588 /* If we get here, not found. */
592 static void update_stream_scaling_settings(
593 const struct drm_display_mode
*mode
,
594 const struct dm_connector_state
*dm_state
,
595 const struct dc_stream
*stream
)
597 struct amdgpu_device
*adev
= dm_state
->base
.crtc
->dev
->dev_private
;
598 enum amdgpu_rmx_type rmx_type
;
600 struct rect src
= { 0 }; /* viewport in target space*/
601 struct rect dst
= { 0 }; /* stream addressable area */
603 /* Full screen scaling by default */
604 src
.width
= mode
->hdisplay
;
605 src
.height
= mode
->vdisplay
;
606 dst
.width
= stream
->timing
.h_addressable
;
607 dst
.height
= stream
->timing
.v_addressable
;
609 rmx_type
= dm_state
->scaling
;
610 if (rmx_type
== RMX_ASPECT
|| rmx_type
== RMX_OFF
) {
611 if (src
.width
* dst
.height
<
612 src
.height
* dst
.width
) {
613 /* height needs less upscaling/more downscaling */
614 dst
.width
= src
.width
*
615 dst
.height
/ src
.height
;
617 /* width needs less upscaling/more downscaling */
618 dst
.height
= src
.height
*
619 dst
.width
/ src
.width
;
621 } else if (rmx_type
== RMX_CENTER
) {
625 dst
.x
= (stream
->timing
.h_addressable
- dst
.width
) / 2;
626 dst
.y
= (stream
->timing
.v_addressable
- dst
.height
) / 2;
628 if (dm_state
->underscan_enable
) {
629 dst
.x
+= dm_state
->underscan_hborder
/ 2;
630 dst
.y
+= dm_state
->underscan_vborder
/ 2;
631 dst
.width
-= dm_state
->underscan_hborder
;
632 dst
.height
-= dm_state
->underscan_vborder
;
635 adev
->dm
.dc
->stream_funcs
.stream_update_scaling(adev
->dm
.dc
, stream
, &src
, &dst
);
637 DRM_DEBUG_KMS("Destination Rectangle x:%d y:%d width:%d height:%d\n",
638 dst
.x
, dst
.y
, dst
.width
, dst
.height
);
642 static void dm_dc_surface_commit(
644 struct drm_crtc
*crtc
)
646 struct dc_surface
*dc_surface
;
647 const struct dc_surface
*dc_surfaces
[1];
648 const struct amdgpu_crtc
*acrtc
= to_amdgpu_crtc(crtc
);
649 struct dc_target
*dc_target
= acrtc
->target
;
653 "%s: Failed to obtain target on crtc (%d)!\n",
659 dc_surface
= dc_create_surface(dc
);
663 "%s: Failed to create a surface!\n",
668 /* Surface programming */
669 fill_plane_attributes(
670 crtc
->dev
->dev_private
,
672 crtc
->primary
->state
,
675 dc_surfaces
[0] = dc_surface
;
677 if (false == dc_commit_surfaces_to_target(
683 "%s: Failed to attach surface!\n",
687 dc_surface_release(dc_surface
);
692 static enum dc_color_depth
convert_color_depth_from_display_info(
693 const struct drm_connector
*connector
)
695 uint32_t bpc
= connector
->display_info
.bpc
;
697 /* Limited color depth to 8bit
698 * TODO: Still need to handle deep color*/
704 /* Temporary Work around, DRM don't parse color depth for
705 * EDID revision before 1.4
706 * TODO: Fix edid parsing
708 return COLOR_DEPTH_888
;
710 return COLOR_DEPTH_666
;
712 return COLOR_DEPTH_888
;
714 return COLOR_DEPTH_101010
;
716 return COLOR_DEPTH_121212
;
718 return COLOR_DEPTH_141414
;
720 return COLOR_DEPTH_161616
;
722 return COLOR_DEPTH_UNDEFINED
;
726 static enum dc_aspect_ratio
get_aspect_ratio(
727 const struct drm_display_mode
*mode_in
)
729 int32_t width
= mode_in
->crtc_hdisplay
* 9;
730 int32_t height
= mode_in
->crtc_vdisplay
* 16;
731 if ((width
- height
) < 10 && (width
- height
) > -10)
732 return ASPECT_RATIO_16_9
;
734 return ASPECT_RATIO_4_3
;
737 static enum dc_color_space
get_output_color_space(
738 const struct dc_crtc_timing
*dc_crtc_timing
)
740 enum dc_color_space color_space
= COLOR_SPACE_SRGB
;
742 switch (dc_crtc_timing
->pixel_encoding
) {
743 case PIXEL_ENCODING_YCBCR422
:
744 case PIXEL_ENCODING_YCBCR444
:
745 case PIXEL_ENCODING_YCBCR420
:
748 * 27030khz is the separation point between HDTV and SDTV
749 * according to HDMI spec, we use YCbCr709 and YCbCr601
752 if (dc_crtc_timing
->pix_clk_khz
> 27030) {
753 if (dc_crtc_timing
->flags
.Y_ONLY
)
755 COLOR_SPACE_YCBCR709_LIMITED
;
757 color_space
= COLOR_SPACE_YCBCR709
;
759 if (dc_crtc_timing
->flags
.Y_ONLY
)
761 COLOR_SPACE_YCBCR601_LIMITED
;
763 color_space
= COLOR_SPACE_YCBCR601
;
768 case PIXEL_ENCODING_RGB
:
769 color_space
= COLOR_SPACE_SRGB
;
780 /*****************************************************************************/
782 static void fill_stream_properties_from_drm_display_mode(
783 struct dc_stream
*stream
,
784 const struct drm_display_mode
*mode_in
,
785 const struct drm_connector
*connector
)
787 struct dc_crtc_timing
*timing_out
= &stream
->timing
;
788 memset(timing_out
, 0, sizeof(struct dc_crtc_timing
));
790 timing_out
->h_border_left
= 0;
791 timing_out
->h_border_right
= 0;
792 timing_out
->v_border_top
= 0;
793 timing_out
->v_border_bottom
= 0;
794 /* TODO: un-hardcode */
796 if ((connector
->display_info
.color_formats
& DRM_COLOR_FORMAT_YCRCB444
)
797 && stream
->sink
->sink_signal
== SIGNAL_TYPE_HDMI_TYPE_A
)
798 timing_out
->pixel_encoding
= PIXEL_ENCODING_YCBCR444
;
800 timing_out
->pixel_encoding
= PIXEL_ENCODING_RGB
;
802 timing_out
->timing_3d_format
= TIMING_3D_FORMAT_NONE
;
803 timing_out
->display_color_depth
= convert_color_depth_from_display_info(
805 timing_out
->scan_type
= SCANNING_TYPE_NODATA
;
806 timing_out
->hdmi_vic
= 0;
807 timing_out
->vic
= drm_match_cea_mode(mode_in
);
809 timing_out
->h_addressable
= mode_in
->crtc_hdisplay
;
810 timing_out
->h_total
= mode_in
->crtc_htotal
;
811 timing_out
->h_sync_width
=
812 mode_in
->crtc_hsync_end
- mode_in
->crtc_hsync_start
;
813 timing_out
->h_front_porch
=
814 mode_in
->crtc_hsync_start
- mode_in
->crtc_hdisplay
;
815 timing_out
->v_total
= mode_in
->crtc_vtotal
;
816 timing_out
->v_addressable
= mode_in
->crtc_vdisplay
;
817 timing_out
->v_front_porch
=
818 mode_in
->crtc_vsync_start
- mode_in
->crtc_vdisplay
;
819 timing_out
->v_sync_width
=
820 mode_in
->crtc_vsync_end
- mode_in
->crtc_vsync_start
;
821 timing_out
->pix_clk_khz
= mode_in
->crtc_clock
;
822 timing_out
->aspect_ratio
= get_aspect_ratio(mode_in
);
823 if (mode_in
->flags
& DRM_MODE_FLAG_PHSYNC
)
824 timing_out
->flags
.HSYNC_POSITIVE_POLARITY
= 1;
825 if (mode_in
->flags
& DRM_MODE_FLAG_PVSYNC
)
826 timing_out
->flags
.VSYNC_POSITIVE_POLARITY
= 1;
828 stream
->output_color_space
= get_output_color_space(timing_out
);
831 struct dc_transfer_func
*tf
= dc_create_transfer_func();
832 tf
->type
= TF_TYPE_PREDEFINED
;
833 tf
->tf
= TRANSFER_FUNCTION_SRGB
;
834 stream
->out_transfer_func
= tf
;
838 static void fill_audio_info(
839 struct audio_info
*audio_info
,
840 const struct drm_connector
*drm_connector
,
841 const struct dc_sink
*dc_sink
)
844 int cea_revision
= 0;
845 const struct dc_edid_caps
*edid_caps
= &dc_sink
->edid_caps
;
847 audio_info
->manufacture_id
= edid_caps
->manufacturer_id
;
848 audio_info
->product_id
= edid_caps
->product_id
;
850 cea_revision
= drm_connector
->display_info
.cea_rev
;
852 while (i
< AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS
&&
853 edid_caps
->display_name
[i
]) {
854 audio_info
->display_name
[i
] = edid_caps
->display_name
[i
];
858 if(cea_revision
>= 3) {
859 audio_info
->mode_count
= edid_caps
->audio_mode_count
;
861 for (i
= 0; i
< audio_info
->mode_count
; ++i
) {
862 audio_info
->modes
[i
].format_code
=
863 (enum audio_format_code
)
864 (edid_caps
->audio_modes
[i
].format_code
);
865 audio_info
->modes
[i
].channel_count
=
866 edid_caps
->audio_modes
[i
].channel_count
;
867 audio_info
->modes
[i
].sample_rates
.all
=
868 edid_caps
->audio_modes
[i
].sample_rate
;
869 audio_info
->modes
[i
].sample_size
=
870 edid_caps
->audio_modes
[i
].sample_size
;
874 audio_info
->flags
.all
= edid_caps
->speaker_flags
;
876 /* TODO: We only check for the progressive mode, check for interlace mode too */
877 if(drm_connector
->latency_present
[0]) {
878 audio_info
->video_latency
= drm_connector
->video_latency
[0];
879 audio_info
->audio_latency
= drm_connector
->audio_latency
[0];
882 /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
886 static void copy_crtc_timing_for_drm_display_mode(
887 const struct drm_display_mode
*src_mode
,
888 struct drm_display_mode
*dst_mode
)
890 dst_mode
->crtc_hdisplay
= src_mode
->crtc_hdisplay
;
891 dst_mode
->crtc_vdisplay
= src_mode
->crtc_vdisplay
;
892 dst_mode
->crtc_clock
= src_mode
->crtc_clock
;
893 dst_mode
->crtc_hblank_start
= src_mode
->crtc_hblank_start
;
894 dst_mode
->crtc_hblank_end
= src_mode
->crtc_hblank_end
;
895 dst_mode
->crtc_hsync_start
= src_mode
->crtc_hsync_start
;
896 dst_mode
->crtc_hsync_end
= src_mode
->crtc_hsync_end
;
897 dst_mode
->crtc_htotal
= src_mode
->crtc_htotal
;
898 dst_mode
->crtc_hskew
= src_mode
->crtc_hskew
;
899 dst_mode
->crtc_vblank_start
= src_mode
->crtc_vblank_start
;;
900 dst_mode
->crtc_vblank_end
= src_mode
->crtc_vblank_end
;;
901 dst_mode
->crtc_vsync_start
= src_mode
->crtc_vsync_start
;;
902 dst_mode
->crtc_vsync_end
= src_mode
->crtc_vsync_end
;;
903 dst_mode
->crtc_vtotal
= src_mode
->crtc_vtotal
;;
906 static void decide_crtc_timing_for_drm_display_mode(
907 struct drm_display_mode
*drm_mode
,
908 const struct drm_display_mode
*native_mode
,
912 copy_crtc_timing_for_drm_display_mode(native_mode
, drm_mode
);
913 } else if (native_mode
->clock
== drm_mode
->clock
&&
914 native_mode
->htotal
== drm_mode
->htotal
&&
915 native_mode
->vtotal
== drm_mode
->vtotal
) {
916 copy_crtc_timing_for_drm_display_mode(native_mode
, drm_mode
);
918 /* no scaling nor amdgpu inserted, no need to patch */
922 static struct dc_target
*create_target_for_sink(
923 const struct amdgpu_connector
*aconnector
,
924 const struct drm_display_mode
*drm_mode
,
925 const struct dm_connector_state
*dm_state
)
927 struct drm_display_mode
*preferred_mode
= NULL
;
928 const struct drm_connector
*drm_connector
;
929 struct dc_target
*target
= NULL
;
930 struct dc_stream
*stream
;
931 struct drm_display_mode mode
= *drm_mode
;
932 bool native_mode_found
= false;
934 if (NULL
== aconnector
) {
935 DRM_ERROR("aconnector is NULL!\n");
936 goto drm_connector_null
;
939 if (NULL
== dm_state
) {
940 DRM_ERROR("dm_state is NULL!\n");
944 drm_connector
= &aconnector
->base
;
945 stream
= dc_create_stream_for_sink(aconnector
->dc_sink
);
947 if (NULL
== stream
) {
948 DRM_ERROR("Failed to create stream for sink!\n");
949 goto stream_create_fail
;
952 list_for_each_entry(preferred_mode
, &aconnector
->base
.modes
, head
) {
953 /* Search for preferred mode */
954 if (preferred_mode
->type
& DRM_MODE_TYPE_PREFERRED
) {
955 native_mode_found
= true;
959 if (!native_mode_found
)
960 preferred_mode
= list_first_entry_or_null(
961 &aconnector
->base
.modes
,
962 struct drm_display_mode
,
965 if (NULL
== preferred_mode
) {
966 /* This may not be an error, the use case is when we we have no
967 * usermode calls to reset and set mode upon hotplug. In this
968 * case, we call set mode ourselves to restore the previous mode
969 * and the modelist may not be filled in in time.
971 DRM_INFO("No preferred mode found\n");
973 decide_crtc_timing_for_drm_display_mode(
974 &mode
, preferred_mode
,
975 dm_state
->scaling
!= RMX_OFF
);
978 fill_stream_properties_from_drm_display_mode(stream
,
979 &mode
, &aconnector
->base
);
980 update_stream_scaling_settings(&mode
, dm_state
, stream
);
985 aconnector
->dc_sink
);
987 target
= dc_create_target_for_streams(&stream
, 1);
988 dc_stream_release(stream
);
990 if (NULL
== target
) {
991 DRM_ERROR("Failed to create target with streams!\n");
992 goto target_create_fail
;
1002 void amdgpu_dm_crtc_destroy(struct drm_crtc
*crtc
)
1004 drm_crtc_cleanup(crtc
);
1008 /* Implemented only the options currently availible for the driver */
1009 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs
= {
1010 .reset
= drm_atomic_helper_crtc_reset
,
1011 .cursor_set
= dm_crtc_cursor_set
,
1012 .cursor_move
= dm_crtc_cursor_move
,
1013 .destroy
= amdgpu_dm_crtc_destroy
,
1014 .gamma_set
= drm_atomic_helper_legacy_gamma_set
,
1015 .set_config
= drm_atomic_helper_set_config
,
1016 .page_flip
= drm_atomic_helper_page_flip
,
1017 .atomic_duplicate_state
= drm_atomic_helper_crtc_duplicate_state
,
1018 .atomic_destroy_state
= drm_atomic_helper_crtc_destroy_state
,
1021 static enum drm_connector_status
1022 amdgpu_dm_connector_detect(struct drm_connector
*connector
, bool force
)
1025 struct amdgpu_connector
*aconnector
= to_amdgpu_connector(connector
);
1028 * 1. This interface is NOT called in context of HPD irq.
1029 * 2. This interface *is called* in context of user-mode ioctl. Which
1030 * makes it a bad place for *any* MST-related activit. */
1032 if (aconnector
->base
.force
== DRM_FORCE_UNSPECIFIED
)
1033 connected
= (aconnector
->dc_sink
!= NULL
);
1035 connected
= (aconnector
->base
.force
== DRM_FORCE_ON
);
1037 return (connected
? connector_status_connected
:
1038 connector_status_disconnected
);
1041 int amdgpu_dm_connector_atomic_set_property(
1042 struct drm_connector
*connector
,
1043 struct drm_connector_state
*connector_state
,
1044 struct drm_property
*property
,
1047 struct drm_device
*dev
= connector
->dev
;
1048 struct amdgpu_device
*adev
= dev
->dev_private
;
1049 struct dm_connector_state
*dm_old_state
=
1050 to_dm_connector_state(connector
->state
);
1051 struct dm_connector_state
*dm_new_state
=
1052 to_dm_connector_state(connector_state
);
1054 struct drm_crtc_state
*new_crtc_state
;
1055 struct drm_crtc
*crtc
;
1059 if (property
== dev
->mode_config
.scaling_mode_property
) {
1060 enum amdgpu_rmx_type rmx_type
;
1063 case DRM_MODE_SCALE_CENTER
:
1064 rmx_type
= RMX_CENTER
;
1066 case DRM_MODE_SCALE_ASPECT
:
1067 rmx_type
= RMX_ASPECT
;
1069 case DRM_MODE_SCALE_FULLSCREEN
:
1070 rmx_type
= RMX_FULL
;
1072 case DRM_MODE_SCALE_NONE
:
1078 if (dm_old_state
->scaling
== rmx_type
)
1081 dm_new_state
->scaling
= rmx_type
;
1083 } else if (property
== adev
->mode_info
.underscan_hborder_property
) {
1084 dm_new_state
->underscan_hborder
= val
;
1086 } else if (property
== adev
->mode_info
.underscan_vborder_property
) {
1087 dm_new_state
->underscan_vborder
= val
;
1089 } else if (property
== adev
->mode_info
.underscan_property
) {
1090 dm_new_state
->underscan_enable
= val
;
1094 for_each_crtc_in_state(
1095 connector_state
->state
,
1100 if (crtc
== connector_state
->crtc
) {
1101 struct drm_plane_state
*plane_state
;
1104 * Bit of magic done here. We need to ensure
1105 * that planes get update after mode is set.
1106 * So, we need to add primary plane to state,
1107 * and this way atomic_update would be called
1111 drm_atomic_get_plane_state(
1112 connector_state
->state
,
1123 void amdgpu_dm_connector_destroy(struct drm_connector
*connector
)
1125 struct amdgpu_connector
*aconnector
= to_amdgpu_connector(connector
);
1126 const struct dc_link
*link
= aconnector
->dc_link
;
1127 struct amdgpu_device
*adev
= connector
->dev
->dev_private
;
1128 struct amdgpu_display_manager
*dm
= &adev
->dm
;
1129 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
1130 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
1132 if (link
->connector_signal
& (SIGNAL_TYPE_EDP
| SIGNAL_TYPE_LVDS
)) {
1133 amdgpu_dm_register_backlight_device(dm
);
1135 if (dm
->backlight_dev
) {
1136 backlight_device_unregister(dm
->backlight_dev
);
1137 dm
->backlight_dev
= NULL
;
1142 drm_connector_unregister(connector
);
1143 drm_connector_cleanup(connector
);
1147 void amdgpu_dm_connector_funcs_reset(struct drm_connector
*connector
)
1149 struct dm_connector_state
*state
=
1150 to_dm_connector_state(connector
->state
);
1154 state
= kzalloc(sizeof(*state
), GFP_KERNEL
);
1157 state
->scaling
= RMX_OFF
;
1158 state
->underscan_enable
= false;
1159 state
->underscan_hborder
= 0;
1160 state
->underscan_vborder
= 0;
1162 connector
->state
= &state
->base
;
1163 connector
->state
->connector
= connector
;
1167 struct drm_connector_state
*amdgpu_dm_connector_atomic_duplicate_state(
1168 struct drm_connector
*connector
)
1170 struct dm_connector_state
*state
=
1171 to_dm_connector_state(connector
->state
);
1173 struct dm_connector_state
*new_state
=
1174 kmemdup(state
, sizeof(*state
), GFP_KERNEL
);
1177 __drm_atomic_helper_connector_duplicate_state(connector
,
1179 return &new_state
->base
;
1185 static const struct drm_connector_funcs amdgpu_dm_connector_funcs
= {
1186 .reset
= amdgpu_dm_connector_funcs_reset
,
1187 .detect
= amdgpu_dm_connector_detect
,
1188 .fill_modes
= drm_helper_probe_single_connector_modes
,
1189 .destroy
= amdgpu_dm_connector_destroy
,
1190 .atomic_duplicate_state
= amdgpu_dm_connector_atomic_duplicate_state
,
1191 .atomic_destroy_state
= drm_atomic_helper_connector_destroy_state
,
1192 .atomic_set_property
= amdgpu_dm_connector_atomic_set_property
1195 static struct drm_encoder
*best_encoder(struct drm_connector
*connector
)
1197 int enc_id
= connector
->encoder_ids
[0];
1198 struct drm_mode_object
*obj
;
1199 struct drm_encoder
*encoder
;
1201 DRM_DEBUG_KMS("Finding the best encoder\n");
1203 /* pick the encoder ids */
1205 obj
= drm_mode_object_find(connector
->dev
, enc_id
, DRM_MODE_OBJECT_ENCODER
);
1207 DRM_ERROR("Couldn't find a matching encoder for our connector\n");
1210 encoder
= obj_to_encoder(obj
);
1213 DRM_ERROR("No encoder id\n");
1217 static int get_modes(struct drm_connector
*connector
)
1219 return amdgpu_dm_connector_get_modes(connector
);
1222 static void create_eml_sink(struct amdgpu_connector
*aconnector
)
1224 struct dc_sink_init_data init_params
= {
1225 .link
= aconnector
->dc_link
,
1226 .sink_signal
= SIGNAL_TYPE_VIRTUAL
1228 struct edid
*edid
= (struct edid
*) aconnector
->base
.edid_blob_ptr
->data
;
1230 if (!aconnector
->base
.edid_blob_ptr
||
1231 !aconnector
->base
.edid_blob_ptr
->data
) {
1232 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
1233 aconnector
->base
.name
);
1235 aconnector
->base
.force
= DRM_FORCE_OFF
;
1236 aconnector
->base
.override_edid
= false;
1240 aconnector
->edid
= edid
;
1242 aconnector
->dc_em_sink
= dc_link_add_remote_sink(
1243 aconnector
->dc_link
,
1245 (edid
->extensions
+ 1) * EDID_LENGTH
,
1248 if (aconnector
->base
.force
1250 aconnector
->dc_sink
= aconnector
->dc_link
->local_sink
?
1251 aconnector
->dc_link
->local_sink
:
1252 aconnector
->dc_em_sink
;
1255 static void handle_edid_mgmt(struct amdgpu_connector
*aconnector
)
1257 struct dc_link
*link
= (struct dc_link
*)aconnector
->dc_link
;
1259 /* In case of headless boot with force on for DP managed connector
1260 * Those settings have to be != 0 to get initial modeset
1262 if (link
->connector_signal
== SIGNAL_TYPE_DISPLAY_PORT
) {
1263 link
->verified_link_cap
.lane_count
= LANE_COUNT_FOUR
;
1264 link
->verified_link_cap
.link_rate
= LINK_RATE_HIGH2
;
1268 aconnector
->base
.override_edid
= true;
1269 create_eml_sink(aconnector
);
1272 int amdgpu_dm_connector_mode_valid(
1273 struct drm_connector
*connector
,
1274 struct drm_display_mode
*mode
)
1276 int result
= MODE_ERROR
;
1277 const struct dc_sink
*dc_sink
;
1278 struct amdgpu_device
*adev
= connector
->dev
->dev_private
;
1279 struct dc_validation_set val_set
= { 0 };
1280 /* TODO: Unhardcode stream count */
1281 struct dc_stream
*streams
[1];
1282 struct dc_target
*target
;
1283 struct amdgpu_connector
*aconnector
= to_amdgpu_connector(connector
);
1285 if ((mode
->flags
& DRM_MODE_FLAG_INTERLACE
) ||
1286 (mode
->flags
& DRM_MODE_FLAG_DBLSCAN
))
1289 /* Only run this the first time mode_valid is called to initilialize
1292 if (aconnector
->base
.force
!= DRM_FORCE_UNSPECIFIED
&&
1293 !aconnector
->dc_em_sink
)
1294 handle_edid_mgmt(aconnector
);
1296 dc_sink
= to_amdgpu_connector(connector
)->dc_sink
;
1298 if (NULL
== dc_sink
) {
1299 DRM_ERROR("dc_sink is NULL!\n");
1300 goto stream_create_fail
;
1303 streams
[0] = dc_create_stream_for_sink(dc_sink
);
1305 if (NULL
== streams
[0]) {
1306 DRM_ERROR("Failed to create stream for sink!\n");
1307 goto stream_create_fail
;
1310 drm_mode_set_crtcinfo(mode
, 0);
1311 fill_stream_properties_from_drm_display_mode(streams
[0], mode
, connector
);
1313 target
= dc_create_target_for_streams(streams
, 1);
1314 val_set
.target
= target
;
1316 if (NULL
== val_set
.target
) {
1317 DRM_ERROR("Failed to create target with stream!\n");
1318 goto target_create_fail
;
1321 val_set
.surface_count
= 0;
1322 streams
[0]->src
.width
= mode
->hdisplay
;
1323 streams
[0]->src
.height
= mode
->vdisplay
;
1324 streams
[0]->dst
= streams
[0]->src
;
1326 if (dc_validate_resources(adev
->dm
.dc
, &val_set
, 1))
1329 dc_target_release(target
);
1331 dc_stream_release(streams
[0]);
1333 /* TODO: error handling*/
1337 static const struct drm_connector_helper_funcs
1338 amdgpu_dm_connector_helper_funcs
= {
1340 * If hotplug a second bigger display in FB Con mode, bigger resolution
1341 * modes will be filtered by drm_mode_validate_size(), and those modes
1342 * is missing after user start lightdm. So we need to renew modes list.
1343 * in get_modes call back, not just return the modes count
1345 .get_modes
= get_modes
,
1346 .mode_valid
= amdgpu_dm_connector_mode_valid
,
1347 .best_encoder
= best_encoder
1350 static void dm_crtc_helper_disable(struct drm_crtc
*crtc
)
1354 static int dm_crtc_helper_atomic_check(
1355 struct drm_crtc
*crtc
,
1356 struct drm_crtc_state
*state
)
1361 static bool dm_crtc_helper_mode_fixup(
1362 struct drm_crtc
*crtc
,
1363 const struct drm_display_mode
*mode
,
1364 struct drm_display_mode
*adjusted_mode
)
1369 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs
= {
1370 .disable
= dm_crtc_helper_disable
,
1371 .atomic_check
= dm_crtc_helper_atomic_check
,
1372 .mode_fixup
= dm_crtc_helper_mode_fixup
1375 static void dm_encoder_helper_disable(struct drm_encoder
*encoder
)
1380 static int dm_encoder_helper_atomic_check(
1381 struct drm_encoder
*encoder
,
1382 struct drm_crtc_state
*crtc_state
,
1383 struct drm_connector_state
*conn_state
)
1388 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs
= {
1389 .disable
= dm_encoder_helper_disable
,
1390 .atomic_check
= dm_encoder_helper_atomic_check
1393 static const struct drm_plane_funcs dm_plane_funcs
= {
1394 .reset
= drm_atomic_helper_plane_reset
,
1395 .atomic_duplicate_state
= drm_atomic_helper_plane_duplicate_state
,
1396 .atomic_destroy_state
= drm_atomic_helper_plane_destroy_state
1399 static void clear_unrelated_fields(struct drm_plane_state
*state
)
1403 state
->state
= NULL
;
1404 state
->fence
= NULL
;
1407 static bool page_flip_needed(
1408 const struct drm_plane_state
*new_state
,
1409 const struct drm_plane_state
*old_state
,
1410 struct drm_pending_vblank_event
*event
,
1411 bool commit_surface_required
)
1413 struct drm_plane_state old_state_tmp
;
1414 struct drm_plane_state new_state_tmp
;
1416 struct amdgpu_framebuffer
*amdgpu_fb_old
;
1417 struct amdgpu_framebuffer
*amdgpu_fb_new
;
1418 struct amdgpu_crtc
*acrtc_new
;
1420 uint64_t old_tiling_flags
;
1421 uint64_t new_tiling_flags
;
1423 bool page_flip_required
;
1437 old_state_tmp
= *old_state
;
1438 new_state_tmp
= *new_state
;
1443 amdgpu_fb_old
= to_amdgpu_framebuffer(old_state
->fb
);
1444 amdgpu_fb_new
= to_amdgpu_framebuffer(new_state
->fb
);
1446 if (!get_fb_info(amdgpu_fb_old
, &old_tiling_flags
, NULL
))
1449 if (!get_fb_info(amdgpu_fb_new
, &new_tiling_flags
, NULL
))
1452 if (commit_surface_required
== true &&
1453 old_tiling_flags
!= new_tiling_flags
)
1456 clear_unrelated_fields(&old_state_tmp
);
1457 clear_unrelated_fields(&new_state_tmp
);
1459 page_flip_required
= memcmp(&old_state_tmp
,
1461 sizeof(old_state_tmp
)) == 0 ? true:false;
1462 if (new_state
->crtc
&& page_flip_required
== false) {
1463 acrtc_new
= to_amdgpu_crtc(new_state
->crtc
);
1464 if (acrtc_new
->flip_flags
& DRM_MODE_PAGE_FLIP_ASYNC
)
1465 page_flip_required
= true;
1467 return page_flip_required
;
1470 static int dm_plane_helper_prepare_fb(
1471 struct drm_plane
*plane
,
1472 struct drm_plane_state
*new_state
)
1474 struct amdgpu_framebuffer
*afb
;
1475 struct drm_gem_object
*obj
;
1476 struct amdgpu_bo
*rbo
;
1479 if (!new_state
->fb
) {
1480 DRM_DEBUG_KMS("No FB bound\n");
1484 afb
= to_amdgpu_framebuffer(new_state
->fb
);
1487 rbo
= gem_to_amdgpu_bo(obj
);
1488 r
= amdgpu_bo_reserve(rbo
, false);
1489 if (unlikely(r
!= 0))
1492 r
= amdgpu_bo_pin(rbo
, AMDGPU_GEM_DOMAIN_VRAM
, NULL
);
1494 amdgpu_bo_unreserve(rbo
);
1496 if (unlikely(r
!= 0)) {
1497 DRM_ERROR("Failed to pin framebuffer\n");
1504 static void dm_plane_helper_cleanup_fb(
1505 struct drm_plane
*plane
,
1506 struct drm_plane_state
*old_state
)
1508 struct amdgpu_bo
*rbo
;
1509 struct amdgpu_framebuffer
*afb
;
1515 afb
= to_amdgpu_framebuffer(old_state
->fb
);
1516 rbo
= gem_to_amdgpu_bo(afb
->obj
);
1517 r
= amdgpu_bo_reserve(rbo
, false);
1519 DRM_ERROR("failed to reserve rbo before unpin\n");
1522 amdgpu_bo_unpin(rbo
);
1523 amdgpu_bo_unreserve(rbo
);
1527 int dm_create_validation_set_for_target(struct drm_connector
*connector
,
1528 struct drm_display_mode
*mode
, struct dc_validation_set
*val_set
)
1530 int result
= MODE_ERROR
;
1531 const struct dc_sink
*dc_sink
=
1532 to_amdgpu_connector(connector
)->dc_sink
;
1533 /* TODO: Unhardcode stream count */
1534 struct dc_stream
*streams
[1];
1535 struct dc_target
*target
;
1537 if ((mode
->flags
& DRM_MODE_FLAG_INTERLACE
) ||
1538 (mode
->flags
& DRM_MODE_FLAG_DBLSCAN
))
1541 if (NULL
== dc_sink
) {
1542 DRM_ERROR("dc_sink is NULL!\n");
1546 streams
[0] = dc_create_stream_for_sink(dc_sink
);
1548 if (NULL
== streams
[0]) {
1549 DRM_ERROR("Failed to create stream for sink!\n");
1553 drm_mode_set_crtcinfo(mode
, 0);
1555 fill_stream_properties_from_drm_display_mode(streams
[0], mode
, connector
);
1557 target
= dc_create_target_for_streams(streams
, 1);
1558 val_set
->target
= target
;
1560 if (NULL
== val_set
->target
) {
1561 DRM_ERROR("Failed to create target with stream!\n");
1565 streams
[0]->src
.width
= mode
->hdisplay
;
1566 streams
[0]->src
.height
= mode
->vdisplay
;
1567 streams
[0]->dst
= streams
[0]->src
;
1572 dc_stream_release(streams
[0]);
1577 static const struct drm_plane_helper_funcs dm_plane_helper_funcs
= {
1578 .prepare_fb
= dm_plane_helper_prepare_fb
,
1579 .cleanup_fb
= dm_plane_helper_cleanup_fb
,
1583 * TODO: these are currently initialized to rgb formats only.
1584 * For future use cases we should either initialize them dynamically based on
1585 * plane capabilities, or initialize this array to all formats, so internal drm
1586 * check will succeed, and let DC to implement proper check
1588 static uint32_t rgb_formats
[] = {
1589 DRM_FORMAT_XRGB4444
,
1590 DRM_FORMAT_ARGB4444
,
1591 DRM_FORMAT_RGBA4444
,
1592 DRM_FORMAT_ARGB1555
,
1595 DRM_FORMAT_XRGB8888
,
1596 DRM_FORMAT_ARGB8888
,
1597 DRM_FORMAT_RGBA8888
,
1598 DRM_FORMAT_XRGB2101010
,
1599 DRM_FORMAT_XBGR2101010
,
1600 DRM_FORMAT_ARGB2101010
,
1601 DRM_FORMAT_ABGR2101010
,
1604 int amdgpu_dm_crtc_init(struct amdgpu_display_manager
*dm
,
1605 struct amdgpu_crtc
*acrtc
,
1606 uint32_t crtc_index
)
1610 struct drm_plane
*primary_plane
=
1611 kzalloc(sizeof(*primary_plane
), GFP_KERNEL
);
1616 primary_plane
->format_default
= true;
1618 res
= drm_universal_plane_init(
1624 ARRAY_SIZE(rgb_formats
),
1626 DRM_PLANE_TYPE_PRIMARY
, NULL
);
1628 primary_plane
->crtc
= &acrtc
->base
;
1630 drm_plane_helper_add(primary_plane
, &dm_plane_helper_funcs
);
1632 res
= drm_crtc_init_with_planes(
1637 &amdgpu_dm_crtc_funcs
, NULL
);
1642 drm_crtc_helper_add(&acrtc
->base
, &amdgpu_dm_crtc_helper_funcs
);
1644 acrtc
->max_cursor_width
= 128;
1645 acrtc
->max_cursor_height
= 128;
1647 acrtc
->crtc_id
= crtc_index
;
1648 acrtc
->base
.enabled
= false;
1650 dm
->adev
->mode_info
.crtcs
[crtc_index
] = acrtc
;
1651 drm_mode_crtc_set_gamma_size(&acrtc
->base
, 256);
1655 kfree(primary_plane
);
1657 acrtc
->crtc_id
= -1;
1661 static int to_drm_connector_type(enum signal_type st
)
1664 case SIGNAL_TYPE_HDMI_TYPE_A
:
1665 return DRM_MODE_CONNECTOR_HDMIA
;
1666 case SIGNAL_TYPE_EDP
:
1667 return DRM_MODE_CONNECTOR_eDP
;
1668 case SIGNAL_TYPE_RGB
:
1669 return DRM_MODE_CONNECTOR_VGA
;
1670 case SIGNAL_TYPE_DISPLAY_PORT
:
1671 case SIGNAL_TYPE_DISPLAY_PORT_MST
:
1672 return DRM_MODE_CONNECTOR_DisplayPort
;
1673 case SIGNAL_TYPE_DVI_DUAL_LINK
:
1674 case SIGNAL_TYPE_DVI_SINGLE_LINK
:
1675 return DRM_MODE_CONNECTOR_DVID
;
1676 case SIGNAL_TYPE_VIRTUAL
:
1677 return DRM_MODE_CONNECTOR_VIRTUAL
;
1680 return DRM_MODE_CONNECTOR_Unknown
;
1684 static void amdgpu_dm_get_native_mode(struct drm_connector
*connector
)
1686 const struct drm_connector_helper_funcs
*helper
=
1687 connector
->helper_private
;
1688 struct drm_encoder
*encoder
;
1689 struct amdgpu_encoder
*amdgpu_encoder
;
1691 encoder
= helper
->best_encoder(connector
);
1693 if (encoder
== NULL
)
1696 amdgpu_encoder
= to_amdgpu_encoder(encoder
);
1698 amdgpu_encoder
->native_mode
.clock
= 0;
1700 if (!list_empty(&connector
->probed_modes
)) {
1701 struct drm_display_mode
*preferred_mode
= NULL
;
1702 list_for_each_entry(preferred_mode
,
1703 &connector
->probed_modes
,
1705 if (preferred_mode
->type
& DRM_MODE_TYPE_PREFERRED
) {
1706 amdgpu_encoder
->native_mode
= *preferred_mode
;
1714 static struct drm_display_mode
*amdgpu_dm_create_common_mode(
1715 struct drm_encoder
*encoder
, char *name
,
1716 int hdisplay
, int vdisplay
)
1718 struct drm_device
*dev
= encoder
->dev
;
1719 struct amdgpu_encoder
*amdgpu_encoder
= to_amdgpu_encoder(encoder
);
1720 struct drm_display_mode
*mode
= NULL
;
1721 struct drm_display_mode
*native_mode
= &amdgpu_encoder
->native_mode
;
1723 mode
= drm_mode_duplicate(dev
, native_mode
);
1728 mode
->hdisplay
= hdisplay
;
1729 mode
->vdisplay
= vdisplay
;
1730 mode
->type
&= ~DRM_MODE_TYPE_PREFERRED
;
1731 strncpy(mode
->name
, name
, DRM_DISPLAY_MODE_LEN
);
1737 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder
*encoder
,
1738 struct drm_connector
*connector
)
1740 struct amdgpu_encoder
*amdgpu_encoder
= to_amdgpu_encoder(encoder
);
1741 struct drm_display_mode
*mode
= NULL
;
1742 struct drm_display_mode
*native_mode
= &amdgpu_encoder
->native_mode
;
1743 struct amdgpu_connector
*amdgpu_connector
=
1744 to_amdgpu_connector(connector
);
1748 char name
[DRM_DISPLAY_MODE_LEN
];
1752 { "640x480", 640, 480},
1753 { "800x600", 800, 600},
1754 { "1024x768", 1024, 768},
1755 { "1280x720", 1280, 720},
1756 { "1280x800", 1280, 800},
1757 {"1280x1024", 1280, 1024},
1758 { "1440x900", 1440, 900},
1759 {"1680x1050", 1680, 1050},
1760 {"1600x1200", 1600, 1200},
1761 {"1920x1080", 1920, 1080},
1762 {"1920x1200", 1920, 1200}
1765 n
= sizeof(common_modes
) / sizeof(common_modes
[0]);
1767 for (i
= 0; i
< n
; i
++) {
1768 struct drm_display_mode
*curmode
= NULL
;
1769 bool mode_existed
= false;
1771 if (common_modes
[i
].w
> native_mode
->hdisplay
||
1772 common_modes
[i
].h
> native_mode
->vdisplay
||
1773 (common_modes
[i
].w
== native_mode
->hdisplay
&&
1774 common_modes
[i
].h
== native_mode
->vdisplay
))
1777 list_for_each_entry(curmode
, &connector
->probed_modes
, head
) {
1778 if (common_modes
[i
].w
== curmode
->hdisplay
&&
1779 common_modes
[i
].h
== curmode
->vdisplay
) {
1780 mode_existed
= true;
1788 mode
= amdgpu_dm_create_common_mode(encoder
,
1789 common_modes
[i
].name
, common_modes
[i
].w
,
1791 drm_mode_probed_add(connector
, mode
);
1792 amdgpu_connector
->num_modes
++;
1796 static void amdgpu_dm_connector_ddc_get_modes(
1797 struct drm_connector
*connector
,
1800 struct amdgpu_connector
*amdgpu_connector
=
1801 to_amdgpu_connector(connector
);
1804 /* empty probed_modes */
1805 INIT_LIST_HEAD(&connector
->probed_modes
);
1806 amdgpu_connector
->num_modes
=
1807 drm_add_edid_modes(connector
, edid
);
1809 drm_edid_to_eld(connector
, edid
);
1811 amdgpu_dm_get_native_mode(connector
);
1813 amdgpu_connector
->num_modes
= 0;
1816 int amdgpu_dm_connector_get_modes(struct drm_connector
*connector
)
1818 const struct drm_connector_helper_funcs
*helper
=
1819 connector
->helper_private
;
1820 struct amdgpu_connector
*amdgpu_connector
=
1821 to_amdgpu_connector(connector
);
1822 struct drm_encoder
*encoder
;
1823 struct edid
*edid
= amdgpu_connector
->edid
;
1825 encoder
= helper
->best_encoder(connector
);
1827 amdgpu_dm_connector_ddc_get_modes(connector
, edid
);
1828 amdgpu_dm_connector_add_common_modes(encoder
, connector
);
1829 return amdgpu_connector
->num_modes
;
1832 void amdgpu_dm_connector_init_helper(
1833 struct amdgpu_display_manager
*dm
,
1834 struct amdgpu_connector
*aconnector
,
1836 const struct dc_link
*link
,
1839 struct amdgpu_device
*adev
= dm
->ddev
->dev_private
;
1841 aconnector
->connector_id
= link_index
;
1842 aconnector
->dc_link
= link
;
1843 aconnector
->base
.interlace_allowed
= true;
1844 aconnector
->base
.doublescan_allowed
= true;
1845 aconnector
->base
.dpms
= DRM_MODE_DPMS_OFF
;
1846 aconnector
->hpd
.hpd
= AMDGPU_HPD_NONE
; /* not used */
1848 mutex_init(&aconnector
->hpd_lock
);
1850 /*configure suport HPD hot plug connector_>polled default value is 0
1851 * which means HPD hot plug not supported*/
1852 switch (connector_type
) {
1853 case DRM_MODE_CONNECTOR_HDMIA
:
1854 aconnector
->base
.polled
= DRM_CONNECTOR_POLL_HPD
;
1856 case DRM_MODE_CONNECTOR_DisplayPort
:
1857 aconnector
->base
.polled
= DRM_CONNECTOR_POLL_HPD
;
1859 case DRM_MODE_CONNECTOR_DVID
:
1860 aconnector
->base
.polled
= DRM_CONNECTOR_POLL_HPD
;
1866 drm_object_attach_property(&aconnector
->base
.base
,
1867 dm
->ddev
->mode_config
.scaling_mode_property
,
1868 DRM_MODE_SCALE_NONE
);
1870 drm_object_attach_property(&aconnector
->base
.base
,
1871 adev
->mode_info
.underscan_property
,
1873 drm_object_attach_property(&aconnector
->base
.base
,
1874 adev
->mode_info
.underscan_hborder_property
,
1876 drm_object_attach_property(&aconnector
->base
.base
,
1877 adev
->mode_info
.underscan_vborder_property
,
1882 int amdgpu_dm_i2c_xfer(struct i2c_adapter
*i2c_adap
,
1883 struct i2c_msg
*msgs
, int num
)
1885 struct amdgpu_i2c_adapter
*i2c
= i2c_get_adapdata(i2c_adap
);
1886 struct i2c_command cmd
;
1890 cmd
.payloads
= kzalloc(num
* sizeof(struct i2c_payload
), GFP_KERNEL
);
1895 cmd
.number_of_payloads
= num
;
1896 cmd
.engine
= I2C_COMMAND_ENGINE_DEFAULT
;
1899 for (i
= 0; i
< num
; i
++) {
1900 cmd
.payloads
[i
].write
= (msgs
[i
].flags
& I2C_M_RD
);
1901 cmd
.payloads
[i
].address
= msgs
[i
].addr
;
1902 cmd
.payloads
[i
].length
= msgs
[i
].len
;
1903 cmd
.payloads
[i
].data
= msgs
[i
].buf
;
1906 if (dc_submit_i2c(i2c
->dm
->dc
, i2c
->link_index
, &cmd
))
1909 kfree(cmd
.payloads
);
1914 u32
amdgpu_dm_i2c_func(struct i2c_adapter
*adap
)
1916 return I2C_FUNC_I2C
| I2C_FUNC_SMBUS_EMUL
;
1919 static const struct i2c_algorithm amdgpu_dm_i2c_algo
= {
1920 .master_xfer
= amdgpu_dm_i2c_xfer
,
1921 .functionality
= amdgpu_dm_i2c_func
,
1924 struct amdgpu_i2c_adapter
*create_i2c(unsigned int link_index
, struct amdgpu_display_manager
*dm
, int *res
)
1926 struct amdgpu_i2c_adapter
*i2c
;
1928 i2c
= kzalloc(sizeof (struct amdgpu_i2c_adapter
), GFP_KERNEL
);
1930 i2c
->base
.owner
= THIS_MODULE
;
1931 i2c
->base
.class = I2C_CLASS_DDC
;
1932 i2c
->base
.dev
.parent
= &dm
->adev
->pdev
->dev
;
1933 i2c
->base
.algo
= &amdgpu_dm_i2c_algo
;
1934 snprintf(i2c
->base
.name
, sizeof (i2c
->base
.name
), "AMDGPU DM i2c hw bus %d", link_index
);
1935 i2c
->link_index
= link_index
;
1936 i2c_set_adapdata(&i2c
->base
, i2c
);
1941 /* Note: this function assumes that dc_link_detect() was called for the
1942 * dc_link which will be represented by this aconnector. */
1943 int amdgpu_dm_connector_init(
1944 struct amdgpu_display_manager
*dm
,
1945 struct amdgpu_connector
*aconnector
,
1946 uint32_t link_index
,
1947 struct amdgpu_encoder
*aencoder
)
1951 struct dc
*dc
= dm
->dc
;
1952 const struct dc_link
*link
= dc_get_link_at_index(dc
, link_index
);
1953 struct amdgpu_i2c_adapter
*i2c
;
1955 DRM_DEBUG_KMS("%s()\n", __func__
);
1957 i2c
= create_i2c(link
->link_index
, dm
, &res
);
1958 aconnector
->i2c
= i2c
;
1959 res
= i2c_add_adapter(&i2c
->base
);
1962 DRM_ERROR("Failed to register hw i2c %d\n", link
->link_index
);
1966 connector_type
= to_drm_connector_type(link
->connector_signal
);
1968 res
= drm_connector_init(
1971 &amdgpu_dm_connector_funcs
,
1975 DRM_ERROR("connector_init failed\n");
1976 aconnector
->connector_id
= -1;
1980 drm_connector_helper_add(
1982 &amdgpu_dm_connector_helper_funcs
);
1984 amdgpu_dm_connector_init_helper(
1991 drm_mode_connector_attach_encoder(
1992 &aconnector
->base
, &aencoder
->base
);
1994 drm_connector_register(&aconnector
->base
);
1996 if (connector_type
== DRM_MODE_CONNECTOR_DisplayPort
1997 || connector_type
== DRM_MODE_CONNECTOR_eDP
)
1998 amdgpu_dm_initialize_mst_connector(dm
, aconnector
);
2000 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
2001 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
2003 /* NOTE: this currently will create backlight device even if a panel
2004 * is not connected to the eDP/LVDS connector.
2006 * This is less than ideal but we don't have sink information at this
2007 * stage since detection happens after. We can't do detection earlier
2008 * since MST detection needs connectors to be created first.
2010 if (link
->connector_signal
& (SIGNAL_TYPE_EDP
| SIGNAL_TYPE_LVDS
)) {
2011 /* Event if registration failed, we should continue with
2012 * DM initialization because not having a backlight control
2013 * is better then a black screen. */
2014 amdgpu_dm_register_backlight_device(dm
);
2016 if (dm
->backlight_dev
)
2017 dm
->backlight_link
= link
;
2024 aconnector
->i2c
= NULL
;
2029 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device
*adev
)
2031 switch (adev
->mode_info
.num_crtc
) {
2048 int amdgpu_dm_encoder_init(
2049 struct drm_device
*dev
,
2050 struct amdgpu_encoder
*aencoder
,
2051 uint32_t link_index
)
2053 struct amdgpu_device
*adev
= dev
->dev_private
;
2055 int res
= drm_encoder_init(dev
,
2057 &amdgpu_dm_encoder_funcs
,
2058 DRM_MODE_ENCODER_TMDS
,
2061 aencoder
->base
.possible_crtcs
= amdgpu_dm_get_encoder_crtc_mask(adev
);
2064 aencoder
->encoder_id
= link_index
;
2066 aencoder
->encoder_id
= -1;
2068 drm_encoder_helper_add(&aencoder
->base
, &amdgpu_dm_encoder_helper_funcs
);
2073 enum dm_commit_action
{
2074 DM_COMMIT_ACTION_NOTHING
,
2075 DM_COMMIT_ACTION_RESET
,
2076 DM_COMMIT_ACTION_DPMS_ON
,
2077 DM_COMMIT_ACTION_DPMS_OFF
,
2078 DM_COMMIT_ACTION_SET
2081 static enum dm_commit_action
get_dm_commit_action(struct drm_crtc_state
*state
)
2083 /* mode changed means either actually mode changed or enabled changed */
2084 /* active changed means dpms changed */
2086 DRM_DEBUG_KMS("crtc_state_flags: enable:%d, active:%d, planes_changed:%d, mode_changed:%d,active_changed:%d,connectors_changed:%d\n",
2089 state
->planes_changed
,
2090 state
->mode_changed
,
2091 state
->active_changed
,
2092 state
->connectors_changed
);
2094 if (state
->mode_changed
) {
2095 /* if it is got disabled - call reset mode */
2097 return DM_COMMIT_ACTION_RESET
;
2100 return DM_COMMIT_ACTION_SET
;
2102 return DM_COMMIT_ACTION_RESET
;
2104 /* ! mode_changed */
2106 /* if it is remain disable - skip it */
2108 return DM_COMMIT_ACTION_NOTHING
;
2110 if (state
->active
&& state
->connectors_changed
)
2111 return DM_COMMIT_ACTION_SET
;
2113 if (state
->active_changed
) {
2114 if (state
->active
) {
2115 return DM_COMMIT_ACTION_DPMS_ON
;
2117 return DM_COMMIT_ACTION_DPMS_OFF
;
2120 /* ! active_changed */
2121 return DM_COMMIT_ACTION_NOTHING
;
2127 typedef bool (*predicate
)(struct amdgpu_crtc
*acrtc
);
2129 static void wait_while_pflip_status(struct amdgpu_device
*adev
,
2130 struct amdgpu_crtc
*acrtc
, predicate f
) {
2136 if (count
== 1000) {
2137 DRM_ERROR("%s - crtc:%d[%p], pflip_stat:%d, probable hang!\n",
2138 __func__
, acrtc
->crtc_id
,
2140 acrtc
->pflip_status
);
2142 /* we do not expect to hit this case except on Polaris with PHY PLL
2143 * 1. DP to HDMI passive dongle connected
2144 * 2. unplug (headless)
2146 * 3a. on plug in, DP will try verify link by training, and training
2147 * would disable PHY PLL which HDMI rely on to drive TG
2148 * 3b. this will cause flip interrupt cannot be generated, and we
2149 * exit when timeout expired. however we do not have code to clean
2150 * up flip, flip clean up will happen when the address is written
2151 * with the restore mode change
2158 DRM_DEBUG_DRIVER("%s - Finished waiting for:%d msec, crtc:%d[%p], pflip_stat:%d \n",
2163 acrtc
->pflip_status
);
2166 static bool pflip_in_progress_predicate(struct amdgpu_crtc
*acrtc
)
2168 return acrtc
->pflip_status
!= AMDGPU_FLIP_NONE
;
2171 static void manage_dm_interrupts(
2172 struct amdgpu_device
*adev
,
2173 struct amdgpu_crtc
*acrtc
,
2177 * this is not correct translation but will work as soon as VBLANK
2178 * constant is the same as PFLIP
2181 amdgpu_crtc_idx_to_irq_type(
2186 drm_crtc_vblank_on(&acrtc
->base
);
2189 &adev
->pageflip_irq
,
2192 wait_while_pflip_status(adev
, acrtc
,
2193 pflip_in_progress_predicate
);
2197 &adev
->pageflip_irq
,
2199 drm_crtc_vblank_off(&acrtc
->base
);
2204 static bool pflip_pending_predicate(struct amdgpu_crtc
*acrtc
)
2206 return acrtc
->pflip_status
== AMDGPU_FLIP_PENDING
;
2209 static bool is_scaling_state_different(
2210 const struct dm_connector_state
*dm_state
,
2211 const struct dm_connector_state
*old_dm_state
)
2213 if (dm_state
->scaling
!= old_dm_state
->scaling
)
2215 if (!dm_state
->underscan_enable
&& old_dm_state
->underscan_enable
) {
2216 if (old_dm_state
->underscan_hborder
!= 0 && old_dm_state
->underscan_vborder
!= 0)
2218 } else if (dm_state
->underscan_enable
&& !old_dm_state
->underscan_enable
) {
2219 if (dm_state
->underscan_hborder
!= 0 && dm_state
->underscan_vborder
!= 0)
2221 } else if (dm_state
->underscan_hborder
!= old_dm_state
->underscan_hborder
2222 || dm_state
->underscan_vborder
!= old_dm_state
->underscan_vborder
)
2227 static void remove_target(struct amdgpu_device
*adev
, struct amdgpu_crtc
*acrtc
)
2232 * we evade vblanks and pflips on crtc that
2235 manage_dm_interrupts(adev
, acrtc
, false);
2236 /* this is the update mode case */
2237 if (adev
->dm
.freesync_module
)
2238 for (i
= 0; i
< acrtc
->target
->stream_count
; i
++)
2239 mod_freesync_remove_stream(
2240 adev
->dm
.freesync_module
,
2241 acrtc
->target
->streams
[i
]);
2242 dc_target_release(acrtc
->target
);
2243 acrtc
->target
= NULL
;
2244 acrtc
->otg_inst
= -1;
2245 acrtc
->enabled
= false;
2248 int amdgpu_dm_atomic_commit(
2249 struct drm_device
*dev
,
2250 struct drm_atomic_state
*state
,
2253 struct amdgpu_device
*adev
= dev
->dev_private
;
2254 struct amdgpu_display_manager
*dm
= &adev
->dm
;
2255 struct drm_plane
*plane
;
2256 struct drm_plane_state
*new_plane_state
;
2257 struct drm_plane_state
*old_plane_state
;
2260 uint32_t commit_targets_count
= 0;
2261 uint32_t new_crtcs_count
= 0;
2262 uint32_t flip_crtcs_count
= 0;
2263 struct drm_crtc
*crtc
;
2264 struct drm_crtc_state
*old_crtc_state
;
2266 struct dc_target
*commit_targets
[MAX_TARGETS
];
2267 struct amdgpu_crtc
*new_crtcs
[MAX_TARGETS
];
2268 struct dc_target
*new_target
;
2269 struct drm_crtc
*flip_crtcs
[MAX_TARGETS
];
2270 struct amdgpu_flip_work
*work
[MAX_TARGETS
] = {0};
2271 struct amdgpu_bo
*new_abo
[MAX_TARGETS
] = {0};
2273 /* In this step all new fb would be pinned */
2276 * TODO: Revisit when we support true asynchronous commit.
2277 * Right now we receive async commit only from pageflip, in which case
2278 * we should not pin/unpin the fb here, it should be done in
2279 * amdgpu_crtc_flip and from the vblank irq handler.
2282 ret
= drm_atomic_helper_prepare_planes(dev
, state
);
2287 /* Page flip if needed */
2288 for_each_plane_in_state(state
, plane
, new_plane_state
, i
) {
2289 struct drm_plane_state
*old_plane_state
= plane
->state
;
2290 struct drm_crtc
*crtc
= new_plane_state
->crtc
;
2291 struct amdgpu_crtc
*acrtc
= to_amdgpu_crtc(crtc
);
2292 struct drm_framebuffer
*fb
= new_plane_state
->fb
;
2293 struct drm_crtc_state
*crtc_state
;
2298 crtc_state
= drm_atomic_get_crtc_state(state
, crtc
);
2300 if (!crtc_state
->planes_changed
|| !crtc_state
->active
)
2303 if (page_flip_needed(
2308 ret
= amdgpu_crtc_prepare_flip(crtc
,
2312 drm_crtc_vblank_count(crtc
),
2313 &work
[flip_crtcs_count
],
2314 &new_abo
[flip_crtcs_count
]);
2317 /* According to atomic_commit hook API, EINVAL is not allowed */
2318 if (unlikely(ret
== -EINVAL
))
2321 DRM_ERROR("Atomic commit: Flip for crtc id %d: [%p], "
2322 "failed, errno = %d\n",
2326 /* cleanup all flip configurations which
2327 * succeeded in this commit
2329 for (i
= 0; i
< flip_crtcs_count
; i
++)
2330 amdgpu_crtc_cleanup_flip_ctx(
2337 flip_crtcs
[flip_crtcs_count
] = crtc
;
2343 * This is the point of no return - everything below never fails except
2344 * when the hw goes bonghits. Which means we can commit the new state on
2345 * the software side now.
2348 drm_atomic_helper_swap_state(state
, true);
2351 * From this point state become old state really. New state is
2352 * initialized to appropriate objects and could be accessed from there
2356 * there is no fences usage yet in state. We can skip the following line
2357 * wait_for_fences(dev, state);
2360 drm_atomic_helper_update_legacy_modeset_state(dev
, state
);
2362 /* update changed items */
2363 for_each_crtc_in_state(state
, crtc
, old_crtc_state
, i
) {
2364 struct amdgpu_crtc
*acrtc
;
2365 struct amdgpu_connector
*aconnector
= NULL
;
2366 enum dm_commit_action action
;
2367 struct drm_crtc_state
*new_state
= crtc
->state
;
2369 acrtc
= to_amdgpu_crtc(crtc
);
2372 amdgpu_dm_find_first_crct_matching_connector(
2377 /* handles headless hotplug case, updating new_state and
2378 * aconnector as needed
2381 action
= get_dm_commit_action(new_state
);
2384 case DM_COMMIT_ACTION_DPMS_ON
:
2385 case DM_COMMIT_ACTION_SET
: {
2386 struct dm_connector_state
*dm_state
= NULL
;
2390 dm_state
= to_dm_connector_state(aconnector
->base
.state
);
2392 new_target
= create_target_for_sink(
2397 DRM_INFO("Atomic commit: SET crtc id %d: [%p]\n", acrtc
->crtc_id
, acrtc
);
2401 * this could happen because of issues with
2402 * userspace notifications delivery.
2403 * In this case userspace tries to set mode on
2404 * display which is disconnect in fact.
2405 * dc_sink in NULL in this case on aconnector.
2406 * We expect reset mode will come soon.
2408 * This can also happen when unplug is done
2409 * during resume sequence ended
2411 * In this case, we want to pretend we still
2412 * have a sink to keep the pipe running so that
2413 * hw state is consistent with the sw state
2415 DRM_DEBUG_KMS("%s: Failed to create new target for crtc %d\n",
2416 __func__
, acrtc
->base
.base
.id
);
2421 remove_target(adev
, acrtc
);
2424 * this loop saves set mode crtcs
2425 * we needed to enable vblanks once all
2426 * resources acquired in dc after dc_commit_targets
2428 new_crtcs
[new_crtcs_count
] = acrtc
;
2431 acrtc
->target
= new_target
;
2432 acrtc
->enabled
= true;
2433 acrtc
->hw_mode
= crtc
->state
->mode
;
2434 crtc
->hwmode
= crtc
->state
->mode
;
2439 case DM_COMMIT_ACTION_NOTHING
: {
2440 struct dm_connector_state
*dm_state
= NULL
;
2445 dm_state
= to_dm_connector_state(aconnector
->base
.state
);
2447 /* Scaling update */
2448 update_stream_scaling_settings(
2451 acrtc
->target
->streams
[0]);
2455 case DM_COMMIT_ACTION_DPMS_OFF
:
2456 case DM_COMMIT_ACTION_RESET
:
2457 DRM_INFO("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc
->crtc_id
, acrtc
);
2458 /* i.e. reset mode */
2460 remove_target(adev
, acrtc
);
2463 } /* for_each_crtc_in_state() */
2465 list_for_each_entry(crtc
, &dev
->mode_config
.crtc_list
, head
) {
2467 struct amdgpu_crtc
*acrtc
= to_amdgpu_crtc(crtc
);
2469 if (acrtc
->target
) {
2470 commit_targets
[commit_targets_count
] = acrtc
->target
;
2471 ++commit_targets_count
;
2476 * Add streams after required streams from new and replaced targets
2477 * are removed from freesync module
2479 if (adev
->dm
.freesync_module
) {
2480 for (i
= 0; i
< new_crtcs_count
; i
++) {
2481 struct amdgpu_connector
*aconnector
= NULL
;
2482 new_target
= new_crtcs
[i
]->target
;
2484 amdgpu_dm_find_first_crct_matching_connector(
2486 &new_crtcs
[i
]->base
,
2490 "Atomic commit: Failed to find connector for acrtc id:%d "
2491 "skipping freesync init\n",
2492 new_crtcs
[i
]->crtc_id
);
2496 for (j
= 0; j
< new_target
->stream_count
; j
++)
2497 mod_freesync_add_stream(
2498 adev
->dm
.freesync_module
,
2499 new_target
->streams
[j
], &aconnector
->caps
);
2503 /* DC is optimized not to do anything if 'targets' didn't change. */
2504 dc_commit_targets(dm
->dc
, commit_targets
, commit_targets_count
);
2506 list_for_each_entry(crtc
, &dev
->mode_config
.crtc_list
, head
) {
2507 struct amdgpu_crtc
*acrtc
= to_amdgpu_crtc(crtc
);
2509 if (acrtc
->target
!= NULL
)
2511 dc_target_get_status(acrtc
->target
)->primary_otg_inst
;
2514 /* update planes when needed */
2515 for_each_plane_in_state(state
, plane
, old_plane_state
, i
) {
2516 struct drm_plane_state
*plane_state
= plane
->state
;
2517 struct drm_crtc
*crtc
= plane_state
->crtc
;
2518 struct amdgpu_crtc
*acrtc
= to_amdgpu_crtc(crtc
);
2519 struct drm_framebuffer
*fb
= plane_state
->fb
;
2520 struct drm_connector
*connector
;
2521 struct dm_connector_state
*dm_state
= NULL
;
2522 enum dm_commit_action action
;
2524 if (!fb
|| !crtc
|| !crtc
->state
->active
)
2527 action
= get_dm_commit_action(crtc
->state
);
2529 /* Surfaces are created under two scenarios:
2530 * 1. This commit is not a page flip.
2531 * 2. This commit is a page flip, and targets are created.
2533 if (!page_flip_needed(
2536 crtc
->state
->event
, true) ||
2537 action
== DM_COMMIT_ACTION_DPMS_ON
||
2538 action
== DM_COMMIT_ACTION_SET
) {
2539 list_for_each_entry(connector
,
2540 &dev
->mode_config
.connector_list
, head
) {
2541 if (connector
->state
->crtc
== crtc
) {
2542 dm_state
= to_dm_connector_state(
2549 * This situation happens in the following case:
2550 * we are about to get set mode for connector who's only
2551 * possible crtc (in encoder crtc mask) is used by
2552 * another connector, that is why it will try to
2553 * re-assing crtcs in order to make configuration
2554 * supported. For our implementation we need to make all
2555 * encoders support all crtcs, then this issue will
2556 * never arise again. But to guard code from this issue
2559 * Also it should be needed when used with actual
2560 * drm_atomic_commit ioctl in future
2566 * if flip is pending (ie, still waiting for fence to return
2567 * before address is submitted) here, we cannot commit_surface
2568 * as commit_surface will pre-maturely write out the future
2569 * address. wait until flip is submitted before proceeding.
2571 wait_while_pflip_status(adev
, acrtc
, pflip_pending_predicate
);
2573 dm_dc_surface_commit(dm
->dc
, crtc
);
2577 for (i
= 0; i
< new_crtcs_count
; i
++) {
2579 * loop to enable interrupts on newly arrived crtc
2581 struct amdgpu_crtc
*acrtc
= new_crtcs
[i
];
2583 if (adev
->dm
.freesync_module
) {
2584 for (j
= 0; j
< acrtc
->target
->stream_count
; j
++)
2585 mod_freesync_notify_mode_change(
2586 adev
->dm
.freesync_module
,
2587 acrtc
->target
->streams
,
2588 acrtc
->target
->stream_count
);
2591 manage_dm_interrupts(adev
, acrtc
, true);
2592 dm_crtc_cursor_reset(&acrtc
->base
);
2596 /* Do actual flip */
2597 flip_crtcs_count
= 0;
2598 for_each_plane_in_state(state
, plane
, old_plane_state
, i
) {
2599 struct drm_plane_state
*plane_state
= plane
->state
;
2600 struct drm_crtc
*crtc
= plane_state
->crtc
;
2601 struct amdgpu_crtc
*acrtc
= to_amdgpu_crtc(crtc
);
2602 struct drm_framebuffer
*fb
= plane_state
->fb
;
2604 if (!fb
|| !crtc
|| !crtc
->state
->planes_changed
||
2605 !crtc
->state
->active
)
2608 if (page_flip_needed(
2613 amdgpu_crtc_submit_flip(
2616 work
[flip_crtcs_count
],
2619 /*clean up the flags for next usage*/
2620 acrtc
->flip_flags
= 0;
2624 /* In this state all old framebuffers would be unpinned */
2626 /* TODO: Revisit when we support true asynchronous commit.*/
2628 drm_atomic_helper_cleanup_planes(dev
, state
);
2630 drm_atomic_state_put(state
);
2635 * This functions handle all cases when set mode does not come upon hotplug.
2636 * This include when the same display is unplugged then plugged back into the
2637 * same port and when we are running without usermode desktop manager supprot
2639 void dm_restore_drm_connector_state(struct drm_device
*dev
, struct drm_connector
*connector
)
2641 struct drm_crtc
*crtc
;
2642 struct amdgpu_device
*adev
= dev
->dev_private
;
2643 struct dc
*dc
= adev
->dm
.dc
;
2644 struct amdgpu_connector
*aconnector
= to_amdgpu_connector(connector
);
2645 struct amdgpu_crtc
*disconnected_acrtc
;
2646 const struct dc_sink
*sink
;
2647 struct dc_target
*commit_targets
[6];
2648 struct dc_target
*current_target
;
2649 uint32_t commit_targets_count
= 0;
2652 if (!aconnector
->dc_sink
|| !connector
->state
|| !connector
->encoder
)
2655 disconnected_acrtc
= to_amdgpu_crtc(connector
->encoder
->crtc
);
2657 if (!disconnected_acrtc
|| !disconnected_acrtc
->target
)
2660 sink
= disconnected_acrtc
->target
->streams
[0]->sink
;
2663 * If the previous sink is not released and different from the current,
2664 * we deduce we are in a state where we can not rely on usermode call
2665 * to turn on the display, so we do it here
2667 if (sink
!= aconnector
->dc_sink
) {
2668 struct dm_connector_state
*dm_state
=
2669 to_dm_connector_state(aconnector
->base
.state
);
2671 struct dc_target
*new_target
=
2672 create_target_for_sink(
2674 &disconnected_acrtc
->base
.state
->mode
,
2677 DRM_INFO("Headless hotplug, restoring connector state\n");
2679 * we evade vblanks and pflips on crtc that
2682 manage_dm_interrupts(adev
, disconnected_acrtc
, false);
2683 /* this is the update mode case */
2685 current_target
= disconnected_acrtc
->target
;
2687 disconnected_acrtc
->target
= new_target
;
2688 disconnected_acrtc
->enabled
= true;
2689 disconnected_acrtc
->hw_mode
= disconnected_acrtc
->base
.state
->mode
;
2691 commit_targets_count
= 0;
2693 list_for_each_entry(crtc
, &dev
->mode_config
.crtc_list
, head
) {
2694 struct amdgpu_crtc
*acrtc
= to_amdgpu_crtc(crtc
);
2696 if (acrtc
->target
) {
2697 commit_targets
[commit_targets_count
] = acrtc
->target
;
2698 ++commit_targets_count
;
2702 /* DC is optimized not to do anything if 'targets' didn't change. */
2703 if (!dc_commit_targets(dc
, commit_targets
,
2704 commit_targets_count
)) {
2705 DRM_INFO("Failed to restore connector state!\n");
2706 dc_target_release(disconnected_acrtc
->target
);
2707 disconnected_acrtc
->target
= current_target
;
2708 manage_dm_interrupts(adev
, disconnected_acrtc
, true);
2712 if (adev
->dm
.freesync_module
) {
2714 for (i
= 0; i
< current_target
->stream_count
; i
++)
2715 mod_freesync_remove_stream(
2716 adev
->dm
.freesync_module
,
2717 current_target
->streams
[i
]);
2719 for (i
= 0; i
< new_target
->stream_count
; i
++)
2720 mod_freesync_add_stream(
2721 adev
->dm
.freesync_module
,
2722 new_target
->streams
[i
],
2725 list_for_each_entry(crtc
, &dev
->mode_config
.crtc_list
, head
) {
2726 struct amdgpu_crtc
*acrtc
= to_amdgpu_crtc(crtc
);
2728 if (acrtc
->target
!= NULL
) {
2730 dc_target_get_status(acrtc
->target
)->primary_otg_inst
;
2734 dc_target_release(current_target
);
2736 dm_dc_surface_commit(dc
, &disconnected_acrtc
->base
);
2738 manage_dm_interrupts(adev
, disconnected_acrtc
, true);
2739 dm_crtc_cursor_reset(&disconnected_acrtc
->base
);
2744 static uint32_t add_val_sets_surface(
2745 struct dc_validation_set
*val_sets
,
2747 const struct dc_target
*target
,
2748 const struct dc_surface
*surface
)
2752 while (i
< set_count
) {
2753 if (val_sets
[i
].target
== target
)
2758 val_sets
[i
].surfaces
[val_sets
[i
].surface_count
] = surface
;
2759 val_sets
[i
].surface_count
++;
2761 return val_sets
[i
].surface_count
;
2764 static uint32_t update_in_val_sets_target(
2765 struct dc_validation_set
*val_sets
,
2766 struct drm_crtc
**crtcs
,
2768 const struct dc_target
*old_target
,
2769 const struct dc_target
*new_target
,
2770 struct drm_crtc
*crtc
)
2774 while (i
< set_count
) {
2775 if (val_sets
[i
].target
== old_target
)
2780 val_sets
[i
].target
= new_target
;
2783 if (i
== set_count
) {
2784 /* nothing found. add new one to the end */
2785 return set_count
+ 1;
2791 static uint32_t remove_from_val_sets(
2792 struct dc_validation_set
*val_sets
,
2794 const struct dc_target
*target
)
2798 for (i
= 0; i
< set_count
; i
++)
2799 if (val_sets
[i
].target
== target
)
2802 if (i
== set_count
) {
2809 for (; i
< set_count
; i
++) {
2810 val_sets
[i
] = val_sets
[i
+ 1];
2816 int amdgpu_dm_atomic_check(struct drm_device
*dev
,
2817 struct drm_atomic_state
*state
)
2819 struct drm_crtc
*crtc
;
2820 struct drm_crtc_state
*crtc_state
;
2821 struct drm_plane
*plane
;
2822 struct drm_plane_state
*plane_state
;
2826 int new_target_count
;
2827 struct dc_validation_set set
[MAX_TARGETS
] = {{ 0 }};
2828 struct dc_target
*new_targets
[MAX_TARGETS
] = { 0 };
2829 struct drm_crtc
*crtc_set
[MAX_TARGETS
] = { 0 };
2830 struct amdgpu_device
*adev
= dev
->dev_private
;
2831 struct dc
*dc
= adev
->dm
.dc
;
2832 bool need_to_validate
= false;
2834 ret
= drm_atomic_helper_check(dev
, state
);
2837 DRM_ERROR("Atomic state validation failed with error :%d !\n",
2844 /* copy existing configuration */
2845 new_target_count
= 0;
2847 list_for_each_entry(crtc
, &dev
->mode_config
.crtc_list
, head
) {
2849 struct amdgpu_crtc
*acrtc
= to_amdgpu_crtc(crtc
);
2851 if (acrtc
->target
) {
2852 set
[set_count
].target
= acrtc
->target
;
2853 crtc_set
[set_count
] = crtc
;
2858 /* update changed items */
2859 for_each_crtc_in_state(state
, crtc
, crtc_state
, i
) {
2860 struct amdgpu_crtc
*acrtc
= NULL
;
2861 struct amdgpu_connector
*aconnector
= NULL
;
2862 enum dm_commit_action action
;
2864 acrtc
= to_amdgpu_crtc(crtc
);
2866 aconnector
= amdgpu_dm_find_first_crct_matching_connector(state
, crtc
, true);
2868 action
= get_dm_commit_action(crtc_state
);
2871 case DM_COMMIT_ACTION_DPMS_ON
:
2872 case DM_COMMIT_ACTION_SET
: {
2873 struct dc_target
*new_target
= NULL
;
2874 struct drm_connector_state
*conn_state
= NULL
;
2875 struct dm_connector_state
*dm_state
= NULL
;
2878 conn_state
= drm_atomic_get_connector_state(state
, &aconnector
->base
);
2879 if (IS_ERR(conn_state
))
2881 dm_state
= to_dm_connector_state(conn_state
);
2884 new_target
= create_target_for_sink(aconnector
, &crtc_state
->mode
, dm_state
);
2887 * we can have no target on ACTION_SET if a display
2888 * was disconnected during S3, in this case it not and
2889 * error, the OS will be updated after detection, and
2890 * do the right thing on next atomic commit
2893 DRM_DEBUG_KMS("%s: Failed to create new target for crtc %d\n",
2894 __func__
, acrtc
->base
.base
.id
);
2898 new_targets
[new_target_count
] = new_target
;
2899 set_count
= update_in_val_sets_target(
2908 need_to_validate
= true;
2912 case DM_COMMIT_ACTION_NOTHING
: {
2913 const struct drm_connector
*drm_connector
= NULL
;
2914 struct drm_connector_state
*conn_state
= NULL
;
2915 struct dm_connector_state
*dm_state
= NULL
;
2916 struct dm_connector_state
*old_dm_state
= NULL
;
2917 struct dc_target
*new_target
;
2922 for_each_connector_in_state(
2923 state
, drm_connector
, conn_state
, j
) {
2924 if (&aconnector
->base
== drm_connector
)
2928 old_dm_state
= to_dm_connector_state(drm_connector
->state
);
2929 dm_state
= to_dm_connector_state(conn_state
);
2931 /* Support underscan adjustment*/
2932 if (!is_scaling_state_different(dm_state
, old_dm_state
))
2935 new_target
= create_target_for_sink(aconnector
, &crtc_state
->mode
, dm_state
);
2938 DRM_ERROR("%s: Failed to create new target for crtc %d\n",
2939 __func__
, acrtc
->base
.base
.id
);
2943 new_targets
[new_target_count
] = new_target
;
2944 set_count
= update_in_val_sets_target(
2953 need_to_validate
= true;
2957 case DM_COMMIT_ACTION_DPMS_OFF
:
2958 case DM_COMMIT_ACTION_RESET
:
2959 /* i.e. reset mode */
2960 if (acrtc
->target
) {
2961 set_count
= remove_from_val_sets(
2970 * TODO revisit when removing commit action
2971 * and looking at atomic flags directly
2974 /* commit needs planes right now (for gamma, eg.) */
2975 /* TODO rework commit to chack crtc for gamma change */
2976 ret
= drm_atomic_add_affected_planes(state
, crtc
);
2981 for (i
= 0; i
< set_count
; i
++) {
2982 for_each_plane_in_state(state
, plane
, plane_state
, j
) {
2983 struct drm_plane_state
*old_plane_state
= plane
->state
;
2984 struct drm_crtc
*crtc
= plane_state
->crtc
;
2985 struct drm_framebuffer
*fb
= plane_state
->fb
;
2986 struct drm_connector
*connector
;
2987 struct dm_connector_state
*dm_state
= NULL
;
2988 enum dm_commit_action action
;
2989 struct drm_crtc_state
*crtc_state
;
2992 if (!fb
|| !crtc
|| crtc_set
[i
] != crtc
||
2993 !crtc
->state
->planes_changed
|| !crtc
->state
->active
)
2996 action
= get_dm_commit_action(crtc
->state
);
2998 /* Surfaces are created under two scenarios:
2999 * 1. This commit is not a page flip.
3000 * 2. This commit is a page flip, and targets are created.
3002 crtc_state
= drm_atomic_get_crtc_state(state
, crtc
);
3003 if (!page_flip_needed(plane_state
, old_plane_state
,
3004 crtc_state
->event
, true) ||
3005 action
== DM_COMMIT_ACTION_DPMS_ON
||
3006 action
== DM_COMMIT_ACTION_SET
) {
3007 struct dc_surface
*surface
;
3009 list_for_each_entry(connector
,
3010 &dev
->mode_config
.connector_list
, head
) {
3011 if (connector
->state
->crtc
== crtc
) {
3012 dm_state
= to_dm_connector_state(
3019 * This situation happens in the following case:
3020 * we are about to get set mode for connector who's only
3021 * possible crtc (in encoder crtc mask) is used by
3022 * another connector, that is why it will try to
3023 * re-assing crtcs in order to make configuration
3024 * supported. For our implementation we need to make all
3025 * encoders support all crtcs, then this issue will
3026 * never arise again. But to guard code from this issue
3029 * Also it should be needed when used with actual
3030 * drm_atomic_commit ioctl in future
3035 surface
= dc_create_surface(dc
);
3036 fill_plane_attributes(
3037 crtc
->dev
->dev_private
,
3042 add_val_sets_surface(
3048 need_to_validate
= true;
3053 if (need_to_validate
== false || set_count
== 0 ||
3054 dc_validate_resources(dc
, set
, set_count
))
3057 for (i
= 0; i
< set_count
; i
++) {
3058 for (j
= 0; j
< set
[i
].surface_count
; j
++) {
3059 dc_surface_release(set
[i
].surfaces
[j
]);
3062 for (i
= 0; i
< new_target_count
; i
++)
3063 dc_target_release(new_targets
[i
]);
3066 DRM_ERROR("Atomic check failed.\n");
3071 static bool is_dp_capable_without_timing_msa(
3073 struct amdgpu_connector
*amdgpu_connector
)
3076 bool capable
= false;
3077 if (amdgpu_connector
->dc_link
&&
3078 dc_read_dpcd(dc
, amdgpu_connector
->dc_link
->link_index
,
3079 DP_DOWN_STREAM_PORT_COUNT
,
3080 &dpcd_data
, sizeof(dpcd_data
)) )
3081 capable
= (dpcd_data
& DP_MSA_TIMING_PAR_IGNORED
) ? true:false;
3085 void amdgpu_dm_add_sink_to_freesync_module(
3086 struct drm_connector
*connector
,
3090 uint64_t val_capable
;
3091 bool edid_check_required
;
3092 struct detailed_timing
*timing
;
3093 struct detailed_non_pixel
*data
;
3094 struct detailed_data_monitor_range
*range
;
3095 struct amdgpu_connector
*amdgpu_connector
=
3096 to_amdgpu_connector(connector
);
3098 struct drm_device
*dev
= connector
->dev
;
3099 struct amdgpu_device
*adev
= dev
->dev_private
;
3100 edid_check_required
= false;
3101 if (!amdgpu_connector
->dc_sink
) {
3102 DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
3105 if (!adev
->dm
.freesync_module
)
3108 * if edid non zero restrict freesync only for dp and edp
3111 if (amdgpu_connector
->dc_sink
->sink_signal
== SIGNAL_TYPE_DISPLAY_PORT
3112 || amdgpu_connector
->dc_sink
->sink_signal
== SIGNAL_TYPE_EDP
) {
3113 edid_check_required
= is_dp_capable_without_timing_msa(
3119 if (edid_check_required
== true && (edid
->version
> 1 ||
3120 (edid
->version
== 1 && edid
->revision
> 1))) {
3121 for (i
= 0; i
< 4; i
++) {
3123 timing
= &edid
->detailed_timings
[i
];
3124 data
= &timing
->data
.other_data
;
3125 range
= &data
->data
.range
;
3127 * Check if monitor has continuous frequency mode
3129 if (data
->type
!= EDID_DETAIL_MONITOR_RANGE
)
3132 * Check for flag range limits only. If flag == 1 then
3133 * no additional timing information provided.
3134 * Default GTF, GTF Secondary curve and CVT are not
3137 if (range
->flags
!= 1)
3140 amdgpu_connector
->min_vfreq
= range
->min_vfreq
;
3141 amdgpu_connector
->max_vfreq
= range
->max_vfreq
;
3142 amdgpu_connector
->pixel_clock_mhz
=
3143 range
->pixel_clock_mhz
* 10;
3147 if (amdgpu_connector
->max_vfreq
-
3148 amdgpu_connector
->min_vfreq
> 10) {
3149 amdgpu_connector
->caps
.supported
= true;
3150 amdgpu_connector
->caps
.min_refresh_in_micro_hz
=
3151 amdgpu_connector
->min_vfreq
* 1000000;
3152 amdgpu_connector
->caps
.max_refresh_in_micro_hz
=
3153 amdgpu_connector
->max_vfreq
* 1000000;
3159 * TODO figure out how to notify user-mode or DRM of freesync caps
3160 * once we figure out how to deal with freesync in an upstreamable
3166 void amdgpu_dm_remove_sink_from_freesync_module(
3167 struct drm_connector
*connector
)
3170 * TODO fill in once we figure out how to deal with freesync in
3171 * an upstreamable fashion