2 * Copyright 2012-13 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
26 #include <linux/types.h>
27 #include <linux/version.h>
30 #include <drm/drm_atomic_helper.h>
31 #include <drm/drm_fb_helper.h>
32 #include <drm/drm_atomic.h>
33 #include <drm/drm_edid.h>
37 #include "amdgpu_pm.h"
38 #include "dm_helpers.h"
39 #include "dm_services_types.h"
41 // We need to #undef FRAME_SIZE and DEPRECATED because they conflict
42 // with ptrace-abi.h's #define's of them.
48 #include "amdgpu_dm_types.h"
49 #include "amdgpu_dm_mst_types.h"
51 #include "modules/inc/mod_freesync.h"
53 #include "i2caux_interface.h"
55 struct dm_connector_state
{
56 struct drm_connector_state base
;
58 enum amdgpu_rmx_type scaling
;
59 uint8_t underscan_vborder
;
60 uint8_t underscan_hborder
;
61 bool underscan_enable
;
64 #define to_dm_connector_state(x)\
65 container_of((x), struct dm_connector_state, base)
67 static bool modeset_required(struct drm_crtc_state
*crtc_state
)
69 if (!drm_atomic_crtc_needs_modeset(crtc_state
))
72 if (!crtc_state
->enable
)
75 return crtc_state
->active
;
78 static bool modereset_required(struct drm_crtc_state
*crtc_state
)
80 if (!drm_atomic_crtc_needs_modeset(crtc_state
))
83 return !crtc_state
->enable
|| !crtc_state
->active
;
86 void amdgpu_dm_encoder_destroy(struct drm_encoder
*encoder
)
88 drm_encoder_cleanup(encoder
);
92 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs
= {
93 .destroy
= amdgpu_dm_encoder_destroy
,
96 static void dm_set_cursor(
97 struct amdgpu_crtc
*amdgpu_crtc
,
102 struct dc_cursor_attributes attributes
;
103 struct dc_cursor_position position
;
104 struct drm_crtc
*crtc
= &amdgpu_crtc
->base
;
106 int xorigin
= 0, yorigin
= 0;
107 struct dm_crtc_state
*acrtc_state
= to_dm_crtc_state(crtc
->state
);
109 amdgpu_crtc
->cursor_width
= width
;
110 amdgpu_crtc
->cursor_height
= height
;
112 attributes
.address
.high_part
= upper_32_bits(gpu_addr
);
113 attributes
.address
.low_part
= lower_32_bits(gpu_addr
);
114 attributes
.width
= width
;
115 attributes
.height
= height
;
116 attributes
.color_format
= CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA
;
117 attributes
.rotation_angle
= 0;
118 attributes
.attribute_flags
.value
= 0;
120 attributes
.pitch
= attributes
.width
;
122 x
= amdgpu_crtc
->cursor_x
;
123 y
= amdgpu_crtc
->cursor_y
;
125 /* avivo cursor are offset into the total surface */
126 x
+= crtc
->primary
->state
->src_x
>> 16;
127 y
+= crtc
->primary
->state
->src_y
>> 16;
130 xorigin
= min(-x
, amdgpu_crtc
->max_cursor_width
- 1);
134 yorigin
= min(-y
, amdgpu_crtc
->max_cursor_height
- 1);
138 position
.enable
= true;
142 position
.x_hotspot
= xorigin
;
143 position
.y_hotspot
= yorigin
;
145 if (!dc_stream_set_cursor_attributes(
148 DRM_ERROR("DC failed to set cursor attributes\n");
151 if (!dc_stream_set_cursor_position(
154 DRM_ERROR("DC failed to set cursor position\n");
158 static int dm_crtc_cursor_set(
159 struct drm_crtc
*crtc
,
164 struct dc_cursor_position position
;
165 struct dm_crtc_state
*acrtc_state
= to_dm_crtc_state(crtc
->state
);
169 struct amdgpu_crtc
*amdgpu_crtc
= to_amdgpu_crtc(crtc
);
173 "%s: crtc_id=%d with size %d to %d \n",
175 amdgpu_crtc
->crtc_id
,
180 /* turn off cursor */
181 position
.enable
= false;
185 if (acrtc_state
->stream
) {
186 /*set cursor visible false*/
187 dc_stream_set_cursor_position(
195 if ((width
> amdgpu_crtc
->max_cursor_width
) ||
196 (height
> amdgpu_crtc
->max_cursor_height
)) {
198 "%s: bad cursor width or height %d x %d\n",
205 /*program new cursor bo to hardware*/
206 dm_set_cursor(amdgpu_crtc
, address
, width
, height
);
213 static int dm_crtc_cursor_move(struct drm_crtc
*crtc
,
216 struct amdgpu_crtc
*amdgpu_crtc
= to_amdgpu_crtc(crtc
);
217 int xorigin
= 0, yorigin
= 0;
218 struct dc_cursor_position position
;
219 struct dm_crtc_state
*acrtc_state
= to_dm_crtc_state(crtc
->state
);
221 amdgpu_crtc
->cursor_x
= x
;
222 amdgpu_crtc
->cursor_y
= y
;
224 /* avivo cursor are offset into the total surface */
225 x
+= crtc
->primary
->state
->src_x
>> 16;
226 y
+= crtc
->primary
->state
->src_y
>> 16;
229 * TODO: for cursor debugging unguard the following
233 "%s: x %d y %d c->x %d c->y %d\n",
242 xorigin
= min(-x
, amdgpu_crtc
->max_cursor_width
- 1);
246 yorigin
= min(-y
, amdgpu_crtc
->max_cursor_height
- 1);
250 position
.enable
= true;
254 position
.x_hotspot
= xorigin
;
255 position
.y_hotspot
= yorigin
;
257 if (acrtc_state
->stream
) {
258 if (!dc_stream_set_cursor_position(
261 DRM_ERROR("DC failed to set cursor position\n");
269 static bool fill_rects_from_plane_state(
270 const struct drm_plane_state
*state
,
271 struct dc_surface
*surface
)
273 surface
->src_rect
.x
= state
->src_x
>> 16;
274 surface
->src_rect
.y
= state
->src_y
>> 16;
275 /*we ignore for now mantissa and do not to deal with floating pixels :(*/
276 surface
->src_rect
.width
= state
->src_w
>> 16;
278 if (surface
->src_rect
.width
== 0)
281 surface
->src_rect
.height
= state
->src_h
>> 16;
282 if (surface
->src_rect
.height
== 0)
285 surface
->dst_rect
.x
= state
->crtc_x
;
286 surface
->dst_rect
.y
= state
->crtc_y
;
288 if (state
->crtc_w
== 0)
291 surface
->dst_rect
.width
= state
->crtc_w
;
293 if (state
->crtc_h
== 0)
296 surface
->dst_rect
.height
= state
->crtc_h
;
298 surface
->clip_rect
= surface
->dst_rect
;
300 switch (state
->rotation
& DRM_MODE_ROTATE_MASK
) {
301 case DRM_MODE_ROTATE_0
:
302 surface
->rotation
= ROTATION_ANGLE_0
;
304 case DRM_MODE_ROTATE_90
:
305 surface
->rotation
= ROTATION_ANGLE_90
;
307 case DRM_MODE_ROTATE_180
:
308 surface
->rotation
= ROTATION_ANGLE_180
;
310 case DRM_MODE_ROTATE_270
:
311 surface
->rotation
= ROTATION_ANGLE_270
;
314 surface
->rotation
= ROTATION_ANGLE_0
;
320 static int get_fb_info(
321 const struct amdgpu_framebuffer
*amdgpu_fb
,
322 uint64_t *tiling_flags
,
323 uint64_t *fb_location
)
325 struct amdgpu_bo
*rbo
= gem_to_amdgpu_bo(amdgpu_fb
->obj
);
326 int r
= amdgpu_bo_reserve(rbo
, false);
328 DRM_ERROR("Unable to reserve buffer\n");
333 *fb_location
= amdgpu_bo_gpu_offset(rbo
);
336 amdgpu_bo_get_tiling_flags(rbo
, tiling_flags
);
338 amdgpu_bo_unreserve(rbo
);
343 static int fill_plane_attributes_from_fb(
344 struct amdgpu_device
*adev
,
345 struct dc_surface
*surface
,
346 const struct amdgpu_framebuffer
*amdgpu_fb
, bool addReq
)
348 uint64_t tiling_flags
;
349 uint64_t fb_location
= 0;
351 const struct drm_framebuffer
*fb
= &amdgpu_fb
->base
;
353 struct drm_format_name_buf format_name
;
358 addReq
== true ? &fb_location
:NULL
);
363 switch (fb
->format
->format
) {
365 surface
->format
= SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS
;
367 case DRM_FORMAT_RGB565
:
368 surface
->format
= SURFACE_PIXEL_FORMAT_GRPH_RGB565
;
370 case DRM_FORMAT_XRGB8888
:
371 case DRM_FORMAT_ARGB8888
:
372 surface
->format
= SURFACE_PIXEL_FORMAT_GRPH_ARGB8888
;
374 case DRM_FORMAT_XRGB2101010
:
375 case DRM_FORMAT_ARGB2101010
:
376 surface
->format
= SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010
;
378 case DRM_FORMAT_XBGR2101010
:
379 case DRM_FORMAT_ABGR2101010
:
380 surface
->format
= SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010
;
382 case DRM_FORMAT_NV21
:
383 surface
->format
= SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr
;
385 case DRM_FORMAT_NV12
:
386 surface
->format
= SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb
;
389 DRM_ERROR("Unsupported screen format %s\n",
390 drm_get_format_name(fb
->format
->format
, &format_name
));
394 if (surface
->format
< SURFACE_PIXEL_FORMAT_VIDEO_BEGIN
) {
395 surface
->address
.type
= PLN_ADDR_TYPE_GRAPHICS
;
396 surface
->address
.grph
.addr
.low_part
= lower_32_bits(fb_location
);
397 surface
->address
.grph
.addr
.high_part
= upper_32_bits(fb_location
);
398 surface
->plane_size
.grph
.surface_size
.x
= 0;
399 surface
->plane_size
.grph
.surface_size
.y
= 0;
400 surface
->plane_size
.grph
.surface_size
.width
= fb
->width
;
401 surface
->plane_size
.grph
.surface_size
.height
= fb
->height
;
402 surface
->plane_size
.grph
.surface_pitch
=
403 fb
->pitches
[0] / fb
->format
->cpp
[0];
404 /* TODO: unhardcode */
405 surface
->color_space
= COLOR_SPACE_SRGB
;
408 awidth
= ALIGN(fb
->width
, 64);
409 surface
->address
.type
= PLN_ADDR_TYPE_VIDEO_PROGRESSIVE
;
410 surface
->address
.video_progressive
.luma_addr
.low_part
411 = lower_32_bits(fb_location
);
412 surface
->address
.video_progressive
.chroma_addr
.low_part
413 = lower_32_bits(fb_location
) +
414 (awidth
* fb
->height
);
415 surface
->plane_size
.video
.luma_size
.x
= 0;
416 surface
->plane_size
.video
.luma_size
.y
= 0;
417 surface
->plane_size
.video
.luma_size
.width
= awidth
;
418 surface
->plane_size
.video
.luma_size
.height
= fb
->height
;
419 /* TODO: unhardcode */
420 surface
->plane_size
.video
.luma_pitch
= awidth
;
422 surface
->plane_size
.video
.chroma_size
.x
= 0;
423 surface
->plane_size
.video
.chroma_size
.y
= 0;
424 surface
->plane_size
.video
.chroma_size
.width
= awidth
;
425 surface
->plane_size
.video
.chroma_size
.height
= fb
->height
;
426 surface
->plane_size
.video
.chroma_pitch
= awidth
/ 2;
428 /* TODO: unhardcode */
429 surface
->color_space
= COLOR_SPACE_YCBCR709
;
432 memset(&surface
->tiling_info
, 0, sizeof(surface
->tiling_info
));
434 /* Fill GFX params */
435 if (AMDGPU_TILING_GET(tiling_flags
, ARRAY_MODE
) == DC_ARRAY_2D_TILED_THIN1
)
437 unsigned bankw
, bankh
, mtaspect
, tile_split
, num_banks
;
439 bankw
= AMDGPU_TILING_GET(tiling_flags
, BANK_WIDTH
);
440 bankh
= AMDGPU_TILING_GET(tiling_flags
, BANK_HEIGHT
);
441 mtaspect
= AMDGPU_TILING_GET(tiling_flags
, MACRO_TILE_ASPECT
);
442 tile_split
= AMDGPU_TILING_GET(tiling_flags
, TILE_SPLIT
);
443 num_banks
= AMDGPU_TILING_GET(tiling_flags
, NUM_BANKS
);
445 /* XXX fix me for VI */
446 surface
->tiling_info
.gfx8
.num_banks
= num_banks
;
447 surface
->tiling_info
.gfx8
.array_mode
=
448 DC_ARRAY_2D_TILED_THIN1
;
449 surface
->tiling_info
.gfx8
.tile_split
= tile_split
;
450 surface
->tiling_info
.gfx8
.bank_width
= bankw
;
451 surface
->tiling_info
.gfx8
.bank_height
= bankh
;
452 surface
->tiling_info
.gfx8
.tile_aspect
= mtaspect
;
453 surface
->tiling_info
.gfx8
.tile_mode
=
454 DC_ADDR_SURF_MICRO_TILING_DISPLAY
;
455 } else if (AMDGPU_TILING_GET(tiling_flags
, ARRAY_MODE
)
456 == DC_ARRAY_1D_TILED_THIN1
) {
457 surface
->tiling_info
.gfx8
.array_mode
= DC_ARRAY_1D_TILED_THIN1
;
460 surface
->tiling_info
.gfx8
.pipe_config
=
461 AMDGPU_TILING_GET(tiling_flags
, PIPE_CONFIG
);
463 if (adev
->asic_type
== CHIP_VEGA10
||
464 adev
->asic_type
== CHIP_RAVEN
) {
465 /* Fill GFX9 params */
466 surface
->tiling_info
.gfx9
.num_pipes
=
467 adev
->gfx
.config
.gb_addr_config_fields
.num_pipes
;
468 surface
->tiling_info
.gfx9
.num_banks
=
469 adev
->gfx
.config
.gb_addr_config_fields
.num_banks
;
470 surface
->tiling_info
.gfx9
.pipe_interleave
=
471 adev
->gfx
.config
.gb_addr_config_fields
.pipe_interleave_size
;
472 surface
->tiling_info
.gfx9
.num_shader_engines
=
473 adev
->gfx
.config
.gb_addr_config_fields
.num_se
;
474 surface
->tiling_info
.gfx9
.max_compressed_frags
=
475 adev
->gfx
.config
.gb_addr_config_fields
.max_compress_frags
;
476 surface
->tiling_info
.gfx9
.num_rb_per_se
=
477 adev
->gfx
.config
.gb_addr_config_fields
.num_rb_per_se
;
478 surface
->tiling_info
.gfx9
.swizzle
=
479 AMDGPU_TILING_GET(tiling_flags
, SWIZZLE_MODE
);
480 surface
->tiling_info
.gfx9
.shaderEnable
= 1;
483 surface
->visible
= true;
484 surface
->scaling_quality
.h_taps_c
= 0;
485 surface
->scaling_quality
.v_taps_c
= 0;
487 /* is this needed? is surface zeroed at allocation? */
488 surface
->scaling_quality
.h_taps
= 0;
489 surface
->scaling_quality
.v_taps
= 0;
490 surface
->stereo_format
= PLANE_STEREO_FORMAT_NONE
;
496 #define NUM_OF_RAW_GAMMA_RAMP_RGB_256 256
498 static void fill_gamma_from_crtc_state(
499 const struct drm_crtc_state
*crtc_state
,
500 struct dc_surface
*dc_surface
)
503 struct dc_gamma
*gamma
;
504 struct drm_color_lut
*lut
= (struct drm_color_lut
*) crtc_state
->gamma_lut
->data
;
506 gamma
= dc_create_gamma();
513 for (i
= 0; i
< NUM_OF_RAW_GAMMA_RAMP_RGB_256
; i
++) {
514 gamma
->red
[i
] = lut
[i
].red
;
515 gamma
->green
[i
] = lut
[i
].green
;
516 gamma
->blue
[i
] = lut
[i
].blue
;
519 dc_surface
->gamma_correction
= gamma
;
522 static int fill_plane_attributes(
523 struct amdgpu_device
*adev
,
524 struct dc_surface
*surface
,
525 struct drm_plane_state
*plane_state
,
526 struct drm_crtc_state
*crtc_state
,
529 const struct amdgpu_framebuffer
*amdgpu_fb
=
530 to_amdgpu_framebuffer(plane_state
->fb
);
531 const struct drm_crtc
*crtc
= plane_state
->crtc
;
532 struct dc_transfer_func
*input_tf
;
535 if (!fill_rects_from_plane_state(plane_state
, surface
))
538 ret
= fill_plane_attributes_from_fb(
539 crtc
->dev
->dev_private
,
547 input_tf
= dc_create_transfer_func();
549 if (input_tf
== NULL
)
552 input_tf
->type
= TF_TYPE_PREDEFINED
;
553 input_tf
->tf
= TRANSFER_FUNCTION_SRGB
;
555 surface
->in_transfer_func
= input_tf
;
557 /* In case of gamma set, update gamma value */
558 if (crtc_state
->gamma_lut
)
559 fill_gamma_from_crtc_state(crtc_state
, surface
);
564 /*****************************************************************************/
566 struct amdgpu_connector
*aconnector_from_drm_crtc_id(
567 const struct drm_crtc
*crtc
)
569 struct drm_device
*dev
= crtc
->dev
;
570 struct drm_connector
*connector
;
571 struct amdgpu_crtc
*acrtc
= to_amdgpu_crtc(crtc
);
572 struct amdgpu_connector
*aconnector
;
574 list_for_each_entry(connector
,
575 &dev
->mode_config
.connector_list
, head
) {
577 aconnector
= to_amdgpu_connector(connector
);
579 if (aconnector
->base
.state
->crtc
!= &acrtc
->base
)
582 /* Found the connector */
586 /* If we get here, not found. */
590 static void update_stream_scaling_settings(
591 const struct drm_display_mode
*mode
,
592 const struct dm_connector_state
*dm_state
,
593 struct dc_stream
*stream
)
595 enum amdgpu_rmx_type rmx_type
;
597 struct rect src
= { 0 }; /* viewport in composition space*/
598 struct rect dst
= { 0 }; /* stream addressable area */
600 /* no mode. nothing to be done */
604 /* Full screen scaling by default */
605 src
.width
= mode
->hdisplay
;
606 src
.height
= mode
->vdisplay
;
607 dst
.width
= stream
->timing
.h_addressable
;
608 dst
.height
= stream
->timing
.v_addressable
;
610 rmx_type
= dm_state
->scaling
;
611 if (rmx_type
== RMX_ASPECT
|| rmx_type
== RMX_OFF
) {
612 if (src
.width
* dst
.height
<
613 src
.height
* dst
.width
) {
614 /* height needs less upscaling/more downscaling */
615 dst
.width
= src
.width
*
616 dst
.height
/ src
.height
;
618 /* width needs less upscaling/more downscaling */
619 dst
.height
= src
.height
*
620 dst
.width
/ src
.width
;
622 } else if (rmx_type
== RMX_CENTER
) {
626 dst
.x
= (stream
->timing
.h_addressable
- dst
.width
) / 2;
627 dst
.y
= (stream
->timing
.v_addressable
- dst
.height
) / 2;
629 if (dm_state
->underscan_enable
) {
630 dst
.x
+= dm_state
->underscan_hborder
/ 2;
631 dst
.y
+= dm_state
->underscan_vborder
/ 2;
632 dst
.width
-= dm_state
->underscan_hborder
;
633 dst
.height
-= dm_state
->underscan_vborder
;
639 DRM_DEBUG_KMS("Destination Rectangle x:%d y:%d width:%d height:%d\n",
640 dst
.x
, dst
.y
, dst
.width
, dst
.height
);
644 static enum dc_color_depth
convert_color_depth_from_display_info(
645 const struct drm_connector
*connector
)
647 uint32_t bpc
= connector
->display_info
.bpc
;
649 /* Limited color depth to 8bit
650 * TODO: Still need to handle deep color*/
656 /* Temporary Work around, DRM don't parse color depth for
657 * EDID revision before 1.4
658 * TODO: Fix edid parsing
660 return COLOR_DEPTH_888
;
662 return COLOR_DEPTH_666
;
664 return COLOR_DEPTH_888
;
666 return COLOR_DEPTH_101010
;
668 return COLOR_DEPTH_121212
;
670 return COLOR_DEPTH_141414
;
672 return COLOR_DEPTH_161616
;
674 return COLOR_DEPTH_UNDEFINED
;
678 static enum dc_aspect_ratio
get_aspect_ratio(
679 const struct drm_display_mode
*mode_in
)
681 int32_t width
= mode_in
->crtc_hdisplay
* 9;
682 int32_t height
= mode_in
->crtc_vdisplay
* 16;
683 if ((width
- height
) < 10 && (width
- height
) > -10)
684 return ASPECT_RATIO_16_9
;
686 return ASPECT_RATIO_4_3
;
689 static enum dc_color_space
get_output_color_space(
690 const struct dc_crtc_timing
*dc_crtc_timing
)
692 enum dc_color_space color_space
= COLOR_SPACE_SRGB
;
694 switch (dc_crtc_timing
->pixel_encoding
) {
695 case PIXEL_ENCODING_YCBCR422
:
696 case PIXEL_ENCODING_YCBCR444
:
697 case PIXEL_ENCODING_YCBCR420
:
700 * 27030khz is the separation point between HDTV and SDTV
701 * according to HDMI spec, we use YCbCr709 and YCbCr601
704 if (dc_crtc_timing
->pix_clk_khz
> 27030) {
705 if (dc_crtc_timing
->flags
.Y_ONLY
)
707 COLOR_SPACE_YCBCR709_LIMITED
;
709 color_space
= COLOR_SPACE_YCBCR709
;
711 if (dc_crtc_timing
->flags
.Y_ONLY
)
713 COLOR_SPACE_YCBCR601_LIMITED
;
715 color_space
= COLOR_SPACE_YCBCR601
;
720 case PIXEL_ENCODING_RGB
:
721 color_space
= COLOR_SPACE_SRGB
;
732 /*****************************************************************************/
734 static void fill_stream_properties_from_drm_display_mode(
735 struct dc_stream
*stream
,
736 const struct drm_display_mode
*mode_in
,
737 const struct drm_connector
*connector
)
739 struct dc_crtc_timing
*timing_out
= &stream
->timing
;
740 memset(timing_out
, 0, sizeof(struct dc_crtc_timing
));
742 timing_out
->h_border_left
= 0;
743 timing_out
->h_border_right
= 0;
744 timing_out
->v_border_top
= 0;
745 timing_out
->v_border_bottom
= 0;
746 /* TODO: un-hardcode */
748 if ((connector
->display_info
.color_formats
& DRM_COLOR_FORMAT_YCRCB444
)
749 && stream
->sink
->sink_signal
== SIGNAL_TYPE_HDMI_TYPE_A
)
750 timing_out
->pixel_encoding
= PIXEL_ENCODING_YCBCR444
;
752 timing_out
->pixel_encoding
= PIXEL_ENCODING_RGB
;
754 timing_out
->timing_3d_format
= TIMING_3D_FORMAT_NONE
;
755 timing_out
->display_color_depth
= convert_color_depth_from_display_info(
757 timing_out
->scan_type
= SCANNING_TYPE_NODATA
;
758 timing_out
->hdmi_vic
= 0;
759 timing_out
->vic
= drm_match_cea_mode(mode_in
);
761 timing_out
->h_addressable
= mode_in
->crtc_hdisplay
;
762 timing_out
->h_total
= mode_in
->crtc_htotal
;
763 timing_out
->h_sync_width
=
764 mode_in
->crtc_hsync_end
- mode_in
->crtc_hsync_start
;
765 timing_out
->h_front_porch
=
766 mode_in
->crtc_hsync_start
- mode_in
->crtc_hdisplay
;
767 timing_out
->v_total
= mode_in
->crtc_vtotal
;
768 timing_out
->v_addressable
= mode_in
->crtc_vdisplay
;
769 timing_out
->v_front_porch
=
770 mode_in
->crtc_vsync_start
- mode_in
->crtc_vdisplay
;
771 timing_out
->v_sync_width
=
772 mode_in
->crtc_vsync_end
- mode_in
->crtc_vsync_start
;
773 timing_out
->pix_clk_khz
= mode_in
->crtc_clock
;
774 timing_out
->aspect_ratio
= get_aspect_ratio(mode_in
);
775 if (mode_in
->flags
& DRM_MODE_FLAG_PHSYNC
)
776 timing_out
->flags
.HSYNC_POSITIVE_POLARITY
= 1;
777 if (mode_in
->flags
& DRM_MODE_FLAG_PVSYNC
)
778 timing_out
->flags
.VSYNC_POSITIVE_POLARITY
= 1;
780 stream
->output_color_space
= get_output_color_space(timing_out
);
783 struct dc_transfer_func
*tf
= dc_create_transfer_func();
784 tf
->type
= TF_TYPE_PREDEFINED
;
785 tf
->tf
= TRANSFER_FUNCTION_SRGB
;
786 stream
->out_transfer_func
= tf
;
790 static void fill_audio_info(
791 struct audio_info
*audio_info
,
792 const struct drm_connector
*drm_connector
,
793 const struct dc_sink
*dc_sink
)
796 int cea_revision
= 0;
797 const struct dc_edid_caps
*edid_caps
= &dc_sink
->edid_caps
;
799 audio_info
->manufacture_id
= edid_caps
->manufacturer_id
;
800 audio_info
->product_id
= edid_caps
->product_id
;
802 cea_revision
= drm_connector
->display_info
.cea_rev
;
804 while (i
< AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS
&&
805 edid_caps
->display_name
[i
]) {
806 audio_info
->display_name
[i
] = edid_caps
->display_name
[i
];
810 if(cea_revision
>= 3) {
811 audio_info
->mode_count
= edid_caps
->audio_mode_count
;
813 for (i
= 0; i
< audio_info
->mode_count
; ++i
) {
814 audio_info
->modes
[i
].format_code
=
815 (enum audio_format_code
)
816 (edid_caps
->audio_modes
[i
].format_code
);
817 audio_info
->modes
[i
].channel_count
=
818 edid_caps
->audio_modes
[i
].channel_count
;
819 audio_info
->modes
[i
].sample_rates
.all
=
820 edid_caps
->audio_modes
[i
].sample_rate
;
821 audio_info
->modes
[i
].sample_size
=
822 edid_caps
->audio_modes
[i
].sample_size
;
826 audio_info
->flags
.all
= edid_caps
->speaker_flags
;
828 /* TODO: We only check for the progressive mode, check for interlace mode too */
829 if(drm_connector
->latency_present
[0]) {
830 audio_info
->video_latency
= drm_connector
->video_latency
[0];
831 audio_info
->audio_latency
= drm_connector
->audio_latency
[0];
834 /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
838 static void copy_crtc_timing_for_drm_display_mode(
839 const struct drm_display_mode
*src_mode
,
840 struct drm_display_mode
*dst_mode
)
842 dst_mode
->crtc_hdisplay
= src_mode
->crtc_hdisplay
;
843 dst_mode
->crtc_vdisplay
= src_mode
->crtc_vdisplay
;
844 dst_mode
->crtc_clock
= src_mode
->crtc_clock
;
845 dst_mode
->crtc_hblank_start
= src_mode
->crtc_hblank_start
;
846 dst_mode
->crtc_hblank_end
= src_mode
->crtc_hblank_end
;
847 dst_mode
->crtc_hsync_start
= src_mode
->crtc_hsync_start
;
848 dst_mode
->crtc_hsync_end
= src_mode
->crtc_hsync_end
;
849 dst_mode
->crtc_htotal
= src_mode
->crtc_htotal
;
850 dst_mode
->crtc_hskew
= src_mode
->crtc_hskew
;
851 dst_mode
->crtc_vblank_start
= src_mode
->crtc_vblank_start
;
852 dst_mode
->crtc_vblank_end
= src_mode
->crtc_vblank_end
;
853 dst_mode
->crtc_vsync_start
= src_mode
->crtc_vsync_start
;
854 dst_mode
->crtc_vsync_end
= src_mode
->crtc_vsync_end
;
855 dst_mode
->crtc_vtotal
= src_mode
->crtc_vtotal
;
858 static void decide_crtc_timing_for_drm_display_mode(
859 struct drm_display_mode
*drm_mode
,
860 const struct drm_display_mode
*native_mode
,
864 copy_crtc_timing_for_drm_display_mode(native_mode
, drm_mode
);
865 } else if (native_mode
->clock
== drm_mode
->clock
&&
866 native_mode
->htotal
== drm_mode
->htotal
&&
867 native_mode
->vtotal
== drm_mode
->vtotal
) {
868 copy_crtc_timing_for_drm_display_mode(native_mode
, drm_mode
);
870 /* no scaling nor amdgpu inserted, no need to patch */
874 static struct dc_stream
*create_stream_for_sink(
875 struct amdgpu_connector
*aconnector
,
876 const struct drm_display_mode
*drm_mode
,
877 const struct dm_connector_state
*dm_state
)
879 struct drm_display_mode
*preferred_mode
= NULL
;
880 const struct drm_connector
*drm_connector
;
881 struct dc_stream
*stream
= NULL
;
882 struct drm_display_mode mode
= *drm_mode
;
883 bool native_mode_found
= false;
885 if (NULL
== aconnector
) {
886 DRM_ERROR("aconnector is NULL!\n");
887 goto drm_connector_null
;
890 if (NULL
== dm_state
) {
891 DRM_ERROR("dm_state is NULL!\n");
895 drm_connector
= &aconnector
->base
;
896 stream
= dc_create_stream_for_sink(aconnector
->dc_sink
);
898 if (NULL
== stream
) {
899 DRM_ERROR("Failed to create stream for sink!\n");
900 goto stream_create_fail
;
903 list_for_each_entry(preferred_mode
, &aconnector
->base
.modes
, head
) {
904 /* Search for preferred mode */
905 if (preferred_mode
->type
& DRM_MODE_TYPE_PREFERRED
) {
906 native_mode_found
= true;
910 if (!native_mode_found
)
911 preferred_mode
= list_first_entry_or_null(
912 &aconnector
->base
.modes
,
913 struct drm_display_mode
,
916 if (NULL
== preferred_mode
) {
917 /* This may not be an error, the use case is when we we have no
918 * usermode calls to reset and set mode upon hotplug. In this
919 * case, we call set mode ourselves to restore the previous mode
920 * and the modelist may not be filled in in time.
922 DRM_INFO("No preferred mode found\n");
924 decide_crtc_timing_for_drm_display_mode(
925 &mode
, preferred_mode
,
926 dm_state
->scaling
!= RMX_OFF
);
929 fill_stream_properties_from_drm_display_mode(stream
,
930 &mode
, &aconnector
->base
);
931 update_stream_scaling_settings(&mode
, dm_state
, stream
);
936 aconnector
->dc_sink
);
944 void amdgpu_dm_crtc_destroy(struct drm_crtc
*crtc
)
946 drm_crtc_cleanup(crtc
);
950 static void dm_crtc_destroy_state(struct drm_crtc
*crtc
,
951 struct drm_crtc_state
*state
)
953 struct dm_crtc_state
*cur
= to_dm_crtc_state(state
);
955 /* TODO Destroy dc_stream objects are stream object is flattened */
957 dc_stream_release(cur
->stream
);
960 __drm_atomic_helper_crtc_destroy_state(state
);
966 static void dm_crtc_reset_state(struct drm_crtc
*crtc
)
968 struct dm_crtc_state
*state
;
971 dm_crtc_destroy_state(crtc
, crtc
->state
);
973 state
= kzalloc(sizeof(*state
), GFP_KERNEL
);
977 crtc
->state
= &state
->base
;
978 crtc
->state
->crtc
= crtc
;
982 static struct drm_crtc_state
*
983 dm_crtc_duplicate_state(struct drm_crtc
*crtc
)
985 struct dm_crtc_state
*state
, *cur
;
987 cur
= to_dm_crtc_state(crtc
->state
);
989 if (WARN_ON(!crtc
->state
))
992 state
= dm_alloc(sizeof(*state
));
994 __drm_atomic_helper_crtc_duplicate_state(crtc
, &state
->base
);
997 state
->stream
= cur
->stream
;
998 dc_stream_retain(state
->stream
);
1001 /* TODO Duplicate dc_stream after objects are stream object is flattened */
1003 return &state
->base
;
1006 /* Implemented only the options currently availible for the driver */
1007 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs
= {
1008 .reset
= dm_crtc_reset_state
,
1009 .destroy
= amdgpu_dm_crtc_destroy
,
1010 .gamma_set
= drm_atomic_helper_legacy_gamma_set
,
1011 .set_config
= drm_atomic_helper_set_config
,
1012 .page_flip
= drm_atomic_helper_page_flip
,
1013 .atomic_duplicate_state
= dm_crtc_duplicate_state
,
1014 .atomic_destroy_state
= dm_crtc_destroy_state
,
1017 static enum drm_connector_status
1018 amdgpu_dm_connector_detect(struct drm_connector
*connector
, bool force
)
1021 struct amdgpu_connector
*aconnector
= to_amdgpu_connector(connector
);
1024 * 1. This interface is NOT called in context of HPD irq.
1025 * 2. This interface *is called* in context of user-mode ioctl. Which
1026 * makes it a bad place for *any* MST-related activit. */
1028 if (aconnector
->base
.force
== DRM_FORCE_UNSPECIFIED
)
1029 connected
= (aconnector
->dc_sink
!= NULL
);
1031 connected
= (aconnector
->base
.force
== DRM_FORCE_ON
);
1033 return (connected
? connector_status_connected
:
1034 connector_status_disconnected
);
1037 int amdgpu_dm_connector_atomic_set_property(
1038 struct drm_connector
*connector
,
1039 struct drm_connector_state
*connector_state
,
1040 struct drm_property
*property
,
1043 struct drm_device
*dev
= connector
->dev
;
1044 struct amdgpu_device
*adev
= dev
->dev_private
;
1045 struct dm_connector_state
*dm_old_state
=
1046 to_dm_connector_state(connector
->state
);
1047 struct dm_connector_state
*dm_new_state
=
1048 to_dm_connector_state(connector_state
);
1052 if (property
== dev
->mode_config
.scaling_mode_property
) {
1053 enum amdgpu_rmx_type rmx_type
;
1056 case DRM_MODE_SCALE_CENTER
:
1057 rmx_type
= RMX_CENTER
;
1059 case DRM_MODE_SCALE_ASPECT
:
1060 rmx_type
= RMX_ASPECT
;
1062 case DRM_MODE_SCALE_FULLSCREEN
:
1063 rmx_type
= RMX_FULL
;
1065 case DRM_MODE_SCALE_NONE
:
1071 if (dm_old_state
->scaling
== rmx_type
)
1074 dm_new_state
->scaling
= rmx_type
;
1076 } else if (property
== adev
->mode_info
.underscan_hborder_property
) {
1077 dm_new_state
->underscan_hborder
= val
;
1079 } else if (property
== adev
->mode_info
.underscan_vborder_property
) {
1080 dm_new_state
->underscan_vborder
= val
;
1082 } else if (property
== adev
->mode_info
.underscan_property
) {
1083 dm_new_state
->underscan_enable
= val
;
1090 int amdgpu_dm_connector_atomic_get_property(
1091 struct drm_connector
*connector
,
1092 const struct drm_connector_state
*state
,
1093 struct drm_property
*property
,
1096 struct drm_device
*dev
= connector
->dev
;
1097 struct amdgpu_device
*adev
= dev
->dev_private
;
1098 struct dm_connector_state
*dm_state
=
1099 to_dm_connector_state(state
);
1102 if (property
== dev
->mode_config
.scaling_mode_property
) {
1103 switch (dm_state
->scaling
) {
1105 *val
= DRM_MODE_SCALE_CENTER
;
1108 *val
= DRM_MODE_SCALE_ASPECT
;
1111 *val
= DRM_MODE_SCALE_FULLSCREEN
;
1115 *val
= DRM_MODE_SCALE_NONE
;
1119 } else if (property
== adev
->mode_info
.underscan_hborder_property
) {
1120 *val
= dm_state
->underscan_hborder
;
1122 } else if (property
== adev
->mode_info
.underscan_vborder_property
) {
1123 *val
= dm_state
->underscan_vborder
;
1125 } else if (property
== adev
->mode_info
.underscan_property
) {
1126 *val
= dm_state
->underscan_enable
;
1132 void amdgpu_dm_connector_destroy(struct drm_connector
*connector
)
1134 struct amdgpu_connector
*aconnector
= to_amdgpu_connector(connector
);
1135 const struct dc_link
*link
= aconnector
->dc_link
;
1136 struct amdgpu_device
*adev
= connector
->dev
->dev_private
;
1137 struct amdgpu_display_manager
*dm
= &adev
->dm
;
1138 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
1139 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
1141 if (link
->connector_signal
& (SIGNAL_TYPE_EDP
| SIGNAL_TYPE_LVDS
)) {
1142 amdgpu_dm_register_backlight_device(dm
);
1144 if (dm
->backlight_dev
) {
1145 backlight_device_unregister(dm
->backlight_dev
);
1146 dm
->backlight_dev
= NULL
;
1151 drm_connector_unregister(connector
);
1152 drm_connector_cleanup(connector
);
1156 void amdgpu_dm_connector_funcs_reset(struct drm_connector
*connector
)
1158 struct dm_connector_state
*state
=
1159 to_dm_connector_state(connector
->state
);
1163 state
= kzalloc(sizeof(*state
), GFP_KERNEL
);
1166 state
->scaling
= RMX_OFF
;
1167 state
->underscan_enable
= false;
1168 state
->underscan_hborder
= 0;
1169 state
->underscan_vborder
= 0;
1171 connector
->state
= &state
->base
;
1172 connector
->state
->connector
= connector
;
1176 struct drm_connector_state
*amdgpu_dm_connector_atomic_duplicate_state(
1177 struct drm_connector
*connector
)
1179 struct dm_connector_state
*state
=
1180 to_dm_connector_state(connector
->state
);
1182 struct dm_connector_state
*new_state
=
1183 kmemdup(state
, sizeof(*state
), GFP_KERNEL
);
1186 __drm_atomic_helper_connector_duplicate_state(connector
,
1188 return &new_state
->base
;
1194 static const struct drm_connector_funcs amdgpu_dm_connector_funcs
= {
1195 .reset
= amdgpu_dm_connector_funcs_reset
,
1196 .detect
= amdgpu_dm_connector_detect
,
1197 .fill_modes
= drm_helper_probe_single_connector_modes
,
1198 .destroy
= amdgpu_dm_connector_destroy
,
1199 .atomic_duplicate_state
= amdgpu_dm_connector_atomic_duplicate_state
,
1200 .atomic_destroy_state
= drm_atomic_helper_connector_destroy_state
,
1201 .atomic_set_property
= amdgpu_dm_connector_atomic_set_property
,
1202 .atomic_get_property
= amdgpu_dm_connector_atomic_get_property
1205 static struct drm_encoder
*best_encoder(struct drm_connector
*connector
)
1207 int enc_id
= connector
->encoder_ids
[0];
1208 struct drm_mode_object
*obj
;
1209 struct drm_encoder
*encoder
;
1211 DRM_DEBUG_KMS("Finding the best encoder\n");
1213 /* pick the encoder ids */
1215 obj
= drm_mode_object_find(connector
->dev
, enc_id
, DRM_MODE_OBJECT_ENCODER
);
1217 DRM_ERROR("Couldn't find a matching encoder for our connector\n");
1220 encoder
= obj_to_encoder(obj
);
1223 DRM_ERROR("No encoder id\n");
1227 static int get_modes(struct drm_connector
*connector
)
1229 return amdgpu_dm_connector_get_modes(connector
);
1232 static void create_eml_sink(struct amdgpu_connector
*aconnector
)
1234 struct dc_sink_init_data init_params
= {
1235 .link
= aconnector
->dc_link
,
1236 .sink_signal
= SIGNAL_TYPE_VIRTUAL
1238 struct edid
*edid
= (struct edid
*) aconnector
->base
.edid_blob_ptr
->data
;
1240 if (!aconnector
->base
.edid_blob_ptr
||
1241 !aconnector
->base
.edid_blob_ptr
->data
) {
1242 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
1243 aconnector
->base
.name
);
1245 aconnector
->base
.force
= DRM_FORCE_OFF
;
1246 aconnector
->base
.override_edid
= false;
1250 aconnector
->edid
= edid
;
1252 aconnector
->dc_em_sink
= dc_link_add_remote_sink(
1253 aconnector
->dc_link
,
1255 (edid
->extensions
+ 1) * EDID_LENGTH
,
1258 if (aconnector
->base
.force
1260 aconnector
->dc_sink
= aconnector
->dc_link
->local_sink
?
1261 aconnector
->dc_link
->local_sink
:
1262 aconnector
->dc_em_sink
;
1265 static void handle_edid_mgmt(struct amdgpu_connector
*aconnector
)
1267 struct dc_link
*link
= (struct dc_link
*)aconnector
->dc_link
;
1269 /* In case of headless boot with force on for DP managed connector
1270 * Those settings have to be != 0 to get initial modeset
1272 if (link
->connector_signal
== SIGNAL_TYPE_DISPLAY_PORT
) {
1273 link
->verified_link_cap
.lane_count
= LANE_COUNT_FOUR
;
1274 link
->verified_link_cap
.link_rate
= LINK_RATE_HIGH2
;
1278 aconnector
->base
.override_edid
= true;
1279 create_eml_sink(aconnector
);
1282 int amdgpu_dm_connector_mode_valid(
1283 struct drm_connector
*connector
,
1284 struct drm_display_mode
*mode
)
1286 int result
= MODE_ERROR
;
1287 struct dc_sink
*dc_sink
;
1288 struct amdgpu_device
*adev
= connector
->dev
->dev_private
;
1289 /* TODO: Unhardcode stream count */
1290 struct dc_stream
*stream
;
1291 struct amdgpu_connector
*aconnector
= to_amdgpu_connector(connector
);
1293 if ((mode
->flags
& DRM_MODE_FLAG_INTERLACE
) ||
1294 (mode
->flags
& DRM_MODE_FLAG_DBLSCAN
))
1297 /* Only run this the first time mode_valid is called to initilialize
1300 if (aconnector
->base
.force
!= DRM_FORCE_UNSPECIFIED
&&
1301 !aconnector
->dc_em_sink
)
1302 handle_edid_mgmt(aconnector
);
1304 dc_sink
= to_amdgpu_connector(connector
)->dc_sink
;
1306 if (NULL
== dc_sink
) {
1307 DRM_ERROR("dc_sink is NULL!\n");
1311 stream
= dc_create_stream_for_sink(dc_sink
);
1312 if (NULL
== stream
) {
1313 DRM_ERROR("Failed to create stream for sink!\n");
1317 drm_mode_set_crtcinfo(mode
, 0);
1318 fill_stream_properties_from_drm_display_mode(stream
, mode
, connector
);
1320 stream
->src
.width
= mode
->hdisplay
;
1321 stream
->src
.height
= mode
->vdisplay
;
1322 stream
->dst
= stream
->src
;
1324 if (dc_validate_stream(adev
->dm
.dc
, stream
))
1327 dc_stream_release(stream
);
1330 /* TODO: error handling*/
1334 static const struct drm_connector_helper_funcs
1335 amdgpu_dm_connector_helper_funcs
= {
1337 * If hotplug a second bigger display in FB Con mode, bigger resolution
1338 * modes will be filtered by drm_mode_validate_size(), and those modes
1339 * is missing after user start lightdm. So we need to renew modes list.
1340 * in get_modes call back, not just return the modes count
1342 .get_modes
= get_modes
,
1343 .mode_valid
= amdgpu_dm_connector_mode_valid
,
1344 .best_encoder
= best_encoder
1347 static void dm_crtc_helper_disable(struct drm_crtc
*crtc
)
1351 static int dm_crtc_helper_atomic_check(
1352 struct drm_crtc
*crtc
,
1353 struct drm_crtc_state
*state
)
1355 struct amdgpu_device
*adev
= crtc
->dev
->dev_private
;
1356 struct dc
*dc
= adev
->dm
.dc
;
1357 struct dm_crtc_state
*dm_crtc_state
= to_dm_crtc_state(state
);
1360 if (unlikely(!dm_crtc_state
->stream
&& modeset_required(state
))) {
1365 /* In some use cases, like reset, no stream is attached */
1366 if (!dm_crtc_state
->stream
)
1369 if (dc_validate_stream(dc
, dm_crtc_state
->stream
))
1375 static bool dm_crtc_helper_mode_fixup(
1376 struct drm_crtc
*crtc
,
1377 const struct drm_display_mode
*mode
,
1378 struct drm_display_mode
*adjusted_mode
)
1383 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs
= {
1384 .disable
= dm_crtc_helper_disable
,
1385 .atomic_check
= dm_crtc_helper_atomic_check
,
1386 .mode_fixup
= dm_crtc_helper_mode_fixup
1389 static void dm_encoder_helper_disable(struct drm_encoder
*encoder
)
1394 static int dm_encoder_helper_atomic_check(
1395 struct drm_encoder
*encoder
,
1396 struct drm_crtc_state
*crtc_state
,
1397 struct drm_connector_state
*conn_state
)
1402 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs
= {
1403 .disable
= dm_encoder_helper_disable
,
1404 .atomic_check
= dm_encoder_helper_atomic_check
1407 static void dm_drm_plane_reset(struct drm_plane
*plane
)
1409 struct dm_plane_state
*amdgpu_state
= NULL
;
1412 plane
->funcs
->atomic_destroy_state(plane
, plane
->state
);
1414 amdgpu_state
= kzalloc(sizeof(*amdgpu_state
), GFP_KERNEL
);
1417 plane
->state
= &amdgpu_state
->base
;
1418 plane
->state
->plane
= plane
;
1419 plane
->state
->rotation
= DRM_MODE_ROTATE_0
;
1425 static struct drm_plane_state
*
1426 dm_drm_plane_duplicate_state(struct drm_plane
*plane
)
1428 struct dm_plane_state
*dm_plane_state
, *old_dm_plane_state
;
1430 old_dm_plane_state
= to_dm_plane_state(plane
->state
);
1431 dm_plane_state
= kzalloc(sizeof(*dm_plane_state
), GFP_KERNEL
);
1432 if (!dm_plane_state
)
1435 __drm_atomic_helper_plane_duplicate_state(plane
, &dm_plane_state
->base
);
1437 if (old_dm_plane_state
->surface
) {
1438 dm_plane_state
->surface
= old_dm_plane_state
->surface
;
1439 dc_surface_retain(dm_plane_state
->surface
);
1442 return &dm_plane_state
->base
;
1445 void dm_drm_plane_destroy_state(struct drm_plane
*plane
,
1446 struct drm_plane_state
*state
)
1448 struct dm_plane_state
*dm_plane_state
= to_dm_plane_state(state
);
1450 if (dm_plane_state
->surface
)
1451 dc_surface_release(dm_plane_state
->surface
);
1453 __drm_atomic_helper_plane_destroy_state(state
);
1454 kfree(dm_plane_state
);
1457 static const struct drm_plane_funcs dm_plane_funcs
= {
1458 .update_plane
= drm_atomic_helper_update_plane
,
1459 .disable_plane
= drm_atomic_helper_disable_plane
,
1460 .destroy
= drm_plane_cleanup
,
1461 .reset
= dm_drm_plane_reset
,
1462 .atomic_duplicate_state
= dm_drm_plane_duplicate_state
,
1463 .atomic_destroy_state
= dm_drm_plane_destroy_state
,
1466 static int dm_plane_helper_prepare_fb(
1467 struct drm_plane
*plane
,
1468 struct drm_plane_state
*new_state
)
1470 struct amdgpu_framebuffer
*afb
;
1471 struct drm_gem_object
*obj
;
1472 struct amdgpu_bo
*rbo
;
1474 struct dm_plane_state
*dm_plane_state_new
, *dm_plane_state_old
;
1475 unsigned int awidth
;
1477 dm_plane_state_old
= to_dm_plane_state(plane
->state
);
1478 dm_plane_state_new
= to_dm_plane_state(new_state
);
1480 if (!new_state
->fb
) {
1481 DRM_DEBUG_KMS("No FB bound\n");
1485 afb
= to_amdgpu_framebuffer(new_state
->fb
);
1488 rbo
= gem_to_amdgpu_bo(obj
);
1489 r
= amdgpu_bo_reserve(rbo
, false);
1490 if (unlikely(r
!= 0))
1493 r
= amdgpu_bo_pin(rbo
, AMDGPU_GEM_DOMAIN_VRAM
, &afb
->address
);
1496 amdgpu_bo_unreserve(rbo
);
1498 if (unlikely(r
!= 0)) {
1499 DRM_ERROR("Failed to pin framebuffer\n");
1505 if (dm_plane_state_new
->surface
&&
1506 dm_plane_state_old
->surface
!= dm_plane_state_new
->surface
) {
1507 struct dc_surface
*surface
= dm_plane_state_new
->surface
;
1509 if (surface
->format
< SURFACE_PIXEL_FORMAT_VIDEO_BEGIN
) {
1510 surface
->address
.grph
.addr
.low_part
= lower_32_bits(afb
->address
);
1511 surface
->address
.grph
.addr
.high_part
= upper_32_bits(afb
->address
);
1513 awidth
= ALIGN(new_state
->fb
->width
, 64);
1514 surface
->address
.video_progressive
.luma_addr
.low_part
1515 = lower_32_bits(afb
->address
);
1516 surface
->address
.video_progressive
.chroma_addr
.low_part
1517 = lower_32_bits(afb
->address
) +
1518 (awidth
* new_state
->fb
->height
);
1522 /* It's a hack for s3 since in 4.9 kernel filter out cursor buffer
1523 * prepare and cleanup in drm_atomic_helper_prepare_planes
1524 * and drm_atomic_helper_cleanup_planes because fb doens't in s3.
1525 * IN 4.10 kernel this code should be removed and amdgpu_device_suspend
1526 * code touching fram buffers should be avoided for DC.
1528 if (plane
->type
== DRM_PLANE_TYPE_CURSOR
) {
1529 struct amdgpu_crtc
*acrtc
= to_amdgpu_crtc(new_state
->crtc
);
1531 acrtc
->cursor_bo
= obj
;
1536 static void dm_plane_helper_cleanup_fb(
1537 struct drm_plane
*plane
,
1538 struct drm_plane_state
*old_state
)
1540 struct amdgpu_bo
*rbo
;
1541 struct amdgpu_framebuffer
*afb
;
1547 afb
= to_amdgpu_framebuffer(old_state
->fb
);
1548 rbo
= gem_to_amdgpu_bo(afb
->obj
);
1549 r
= amdgpu_bo_reserve(rbo
, false);
1551 DRM_ERROR("failed to reserve rbo before unpin\n");
1554 amdgpu_bo_unpin(rbo
);
1555 amdgpu_bo_unreserve(rbo
);
1556 amdgpu_bo_unref(&rbo
);
1560 int dm_create_validation_set_for_connector(struct drm_connector
*connector
,
1561 struct drm_display_mode
*mode
, struct dc_validation_set
*val_set
)
1563 int result
= MODE_ERROR
;
1564 struct dc_sink
*dc_sink
=
1565 to_amdgpu_connector(connector
)->dc_sink
;
1566 /* TODO: Unhardcode stream count */
1567 struct dc_stream
*stream
;
1569 if ((mode
->flags
& DRM_MODE_FLAG_INTERLACE
) ||
1570 (mode
->flags
& DRM_MODE_FLAG_DBLSCAN
))
1573 if (NULL
== dc_sink
) {
1574 DRM_ERROR("dc_sink is NULL!\n");
1578 stream
= dc_create_stream_for_sink(dc_sink
);
1580 if (NULL
== stream
) {
1581 DRM_ERROR("Failed to create stream for sink!\n");
1585 drm_mode_set_crtcinfo(mode
, 0);
1587 fill_stream_properties_from_drm_display_mode(stream
, mode
, connector
);
1589 val_set
->stream
= stream
;
1591 stream
->src
.width
= mode
->hdisplay
;
1592 stream
->src
.height
= mode
->vdisplay
;
1593 stream
->dst
= stream
->src
;
1598 static const struct drm_plane_helper_funcs dm_plane_helper_funcs
= {
1599 .prepare_fb
= dm_plane_helper_prepare_fb
,
1600 .cleanup_fb
= dm_plane_helper_cleanup_fb
,
1604 * TODO: these are currently initialized to rgb formats only.
1605 * For future use cases we should either initialize them dynamically based on
1606 * plane capabilities, or initialize this array to all formats, so internal drm
1607 * check will succeed, and let DC to implement proper check
1609 static uint32_t rgb_formats
[] = {
1611 DRM_FORMAT_XRGB8888
,
1612 DRM_FORMAT_ARGB8888
,
1613 DRM_FORMAT_RGBA8888
,
1614 DRM_FORMAT_XRGB2101010
,
1615 DRM_FORMAT_XBGR2101010
,
1616 DRM_FORMAT_ARGB2101010
,
1617 DRM_FORMAT_ABGR2101010
,
1620 static uint32_t yuv_formats
[] = {
1625 static const u32 cursor_formats
[] = {
1629 int amdgpu_dm_plane_init(struct amdgpu_display_manager
*dm
,
1630 struct amdgpu_plane
*aplane
,
1631 unsigned long possible_crtcs
)
1635 switch (aplane
->base
.type
) {
1636 case DRM_PLANE_TYPE_PRIMARY
:
1637 aplane
->base
.format_default
= true;
1639 res
= drm_universal_plane_init(
1645 ARRAY_SIZE(rgb_formats
),
1646 NULL
, aplane
->base
.type
, NULL
);
1648 case DRM_PLANE_TYPE_OVERLAY
:
1649 res
= drm_universal_plane_init(
1655 ARRAY_SIZE(yuv_formats
),
1656 NULL
, aplane
->base
.type
, NULL
);
1658 case DRM_PLANE_TYPE_CURSOR
:
1659 res
= drm_universal_plane_init(
1665 ARRAY_SIZE(cursor_formats
),
1666 NULL
, aplane
->base
.type
, NULL
);
1670 drm_plane_helper_add(&aplane
->base
, &dm_plane_helper_funcs
);
1675 int amdgpu_dm_crtc_init(struct amdgpu_display_manager
*dm
,
1676 struct drm_plane
*plane
,
1677 uint32_t crtc_index
)
1679 struct amdgpu_crtc
*acrtc
= NULL
;
1680 struct amdgpu_plane
*cursor_plane
;
1684 cursor_plane
= kzalloc(sizeof(*cursor_plane
), GFP_KERNEL
);
1688 cursor_plane
->base
.type
= DRM_PLANE_TYPE_CURSOR
;
1689 res
= amdgpu_dm_plane_init(dm
, cursor_plane
, 0);
1691 acrtc
= kzalloc(sizeof(struct amdgpu_crtc
), GFP_KERNEL
);
1695 res
= drm_crtc_init_with_planes(
1699 &cursor_plane
->base
,
1700 &amdgpu_dm_crtc_funcs
, NULL
);
1705 drm_crtc_helper_add(&acrtc
->base
, &amdgpu_dm_crtc_helper_funcs
);
1707 acrtc
->max_cursor_width
= dm
->adev
->dm
.dc
->caps
.max_cursor_size
;
1708 acrtc
->max_cursor_height
= dm
->adev
->dm
.dc
->caps
.max_cursor_size
;
1710 acrtc
->crtc_id
= crtc_index
;
1711 acrtc
->base
.enabled
= false;
1713 dm
->adev
->mode_info
.crtcs
[crtc_index
] = acrtc
;
1714 drm_mode_crtc_set_gamma_size(&acrtc
->base
, 256);
1722 kfree(cursor_plane
);
1723 acrtc
->crtc_id
= -1;
1728 static int to_drm_connector_type(enum signal_type st
)
1731 case SIGNAL_TYPE_HDMI_TYPE_A
:
1732 return DRM_MODE_CONNECTOR_HDMIA
;
1733 case SIGNAL_TYPE_EDP
:
1734 return DRM_MODE_CONNECTOR_eDP
;
1735 case SIGNAL_TYPE_RGB
:
1736 return DRM_MODE_CONNECTOR_VGA
;
1737 case SIGNAL_TYPE_DISPLAY_PORT
:
1738 case SIGNAL_TYPE_DISPLAY_PORT_MST
:
1739 return DRM_MODE_CONNECTOR_DisplayPort
;
1740 case SIGNAL_TYPE_DVI_DUAL_LINK
:
1741 case SIGNAL_TYPE_DVI_SINGLE_LINK
:
1742 return DRM_MODE_CONNECTOR_DVID
;
1743 case SIGNAL_TYPE_VIRTUAL
:
1744 return DRM_MODE_CONNECTOR_VIRTUAL
;
1747 return DRM_MODE_CONNECTOR_Unknown
;
1751 static void amdgpu_dm_get_native_mode(struct drm_connector
*connector
)
1753 const struct drm_connector_helper_funcs
*helper
=
1754 connector
->helper_private
;
1755 struct drm_encoder
*encoder
;
1756 struct amdgpu_encoder
*amdgpu_encoder
;
1758 encoder
= helper
->best_encoder(connector
);
1760 if (encoder
== NULL
)
1763 amdgpu_encoder
= to_amdgpu_encoder(encoder
);
1765 amdgpu_encoder
->native_mode
.clock
= 0;
1767 if (!list_empty(&connector
->probed_modes
)) {
1768 struct drm_display_mode
*preferred_mode
= NULL
;
1769 list_for_each_entry(preferred_mode
,
1770 &connector
->probed_modes
,
1772 if (preferred_mode
->type
& DRM_MODE_TYPE_PREFERRED
) {
1773 amdgpu_encoder
->native_mode
= *preferred_mode
;
1781 static struct drm_display_mode
*amdgpu_dm_create_common_mode(
1782 struct drm_encoder
*encoder
, char *name
,
1783 int hdisplay
, int vdisplay
)
1785 struct drm_device
*dev
= encoder
->dev
;
1786 struct amdgpu_encoder
*amdgpu_encoder
= to_amdgpu_encoder(encoder
);
1787 struct drm_display_mode
*mode
= NULL
;
1788 struct drm_display_mode
*native_mode
= &amdgpu_encoder
->native_mode
;
1790 mode
= drm_mode_duplicate(dev
, native_mode
);
1795 mode
->hdisplay
= hdisplay
;
1796 mode
->vdisplay
= vdisplay
;
1797 mode
->type
&= ~DRM_MODE_TYPE_PREFERRED
;
1798 strncpy(mode
->name
, name
, DRM_DISPLAY_MODE_LEN
);
1804 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder
*encoder
,
1805 struct drm_connector
*connector
)
1807 struct amdgpu_encoder
*amdgpu_encoder
= to_amdgpu_encoder(encoder
);
1808 struct drm_display_mode
*mode
= NULL
;
1809 struct drm_display_mode
*native_mode
= &amdgpu_encoder
->native_mode
;
1810 struct amdgpu_connector
*amdgpu_connector
=
1811 to_amdgpu_connector(connector
);
1815 char name
[DRM_DISPLAY_MODE_LEN
];
1819 { "640x480", 640, 480},
1820 { "800x600", 800, 600},
1821 { "1024x768", 1024, 768},
1822 { "1280x720", 1280, 720},
1823 { "1280x800", 1280, 800},
1824 {"1280x1024", 1280, 1024},
1825 { "1440x900", 1440, 900},
1826 {"1680x1050", 1680, 1050},
1827 {"1600x1200", 1600, 1200},
1828 {"1920x1080", 1920, 1080},
1829 {"1920x1200", 1920, 1200}
1832 n
= sizeof(common_modes
) / sizeof(common_modes
[0]);
1834 for (i
= 0; i
< n
; i
++) {
1835 struct drm_display_mode
*curmode
= NULL
;
1836 bool mode_existed
= false;
1838 if (common_modes
[i
].w
> native_mode
->hdisplay
||
1839 common_modes
[i
].h
> native_mode
->vdisplay
||
1840 (common_modes
[i
].w
== native_mode
->hdisplay
&&
1841 common_modes
[i
].h
== native_mode
->vdisplay
))
1844 list_for_each_entry(curmode
, &connector
->probed_modes
, head
) {
1845 if (common_modes
[i
].w
== curmode
->hdisplay
&&
1846 common_modes
[i
].h
== curmode
->vdisplay
) {
1847 mode_existed
= true;
1855 mode
= amdgpu_dm_create_common_mode(encoder
,
1856 common_modes
[i
].name
, common_modes
[i
].w
,
1858 drm_mode_probed_add(connector
, mode
);
1859 amdgpu_connector
->num_modes
++;
1863 static void amdgpu_dm_connector_ddc_get_modes(
1864 struct drm_connector
*connector
,
1867 struct amdgpu_connector
*amdgpu_connector
=
1868 to_amdgpu_connector(connector
);
1871 /* empty probed_modes */
1872 INIT_LIST_HEAD(&connector
->probed_modes
);
1873 amdgpu_connector
->num_modes
=
1874 drm_add_edid_modes(connector
, edid
);
1876 drm_edid_to_eld(connector
, edid
);
1878 amdgpu_dm_get_native_mode(connector
);
1880 amdgpu_connector
->num_modes
= 0;
1883 int amdgpu_dm_connector_get_modes(struct drm_connector
*connector
)
1885 const struct drm_connector_helper_funcs
*helper
=
1886 connector
->helper_private
;
1887 struct amdgpu_connector
*amdgpu_connector
=
1888 to_amdgpu_connector(connector
);
1889 struct drm_encoder
*encoder
;
1890 struct edid
*edid
= amdgpu_connector
->edid
;
1892 encoder
= helper
->best_encoder(connector
);
1894 amdgpu_dm_connector_ddc_get_modes(connector
, edid
);
1895 amdgpu_dm_connector_add_common_modes(encoder
, connector
);
1896 return amdgpu_connector
->num_modes
;
1899 void amdgpu_dm_connector_init_helper(
1900 struct amdgpu_display_manager
*dm
,
1901 struct amdgpu_connector
*aconnector
,
1903 struct dc_link
*link
,
1906 struct amdgpu_device
*adev
= dm
->ddev
->dev_private
;
1908 aconnector
->connector_id
= link_index
;
1909 aconnector
->dc_link
= link
;
1910 aconnector
->base
.interlace_allowed
= false;
1911 aconnector
->base
.doublescan_allowed
= false;
1912 aconnector
->base
.stereo_allowed
= false;
1913 aconnector
->base
.dpms
= DRM_MODE_DPMS_OFF
;
1914 aconnector
->hpd
.hpd
= AMDGPU_HPD_NONE
; /* not used */
1916 mutex_init(&aconnector
->hpd_lock
);
1918 /*configure suport HPD hot plug connector_>polled default value is 0
1919 * which means HPD hot plug not supported*/
1920 switch (connector_type
) {
1921 case DRM_MODE_CONNECTOR_HDMIA
:
1922 aconnector
->base
.polled
= DRM_CONNECTOR_POLL_HPD
;
1924 case DRM_MODE_CONNECTOR_DisplayPort
:
1925 aconnector
->base
.polled
= DRM_CONNECTOR_POLL_HPD
;
1927 case DRM_MODE_CONNECTOR_DVID
:
1928 aconnector
->base
.polled
= DRM_CONNECTOR_POLL_HPD
;
1934 drm_object_attach_property(&aconnector
->base
.base
,
1935 dm
->ddev
->mode_config
.scaling_mode_property
,
1936 DRM_MODE_SCALE_NONE
);
1938 drm_object_attach_property(&aconnector
->base
.base
,
1939 adev
->mode_info
.underscan_property
,
1941 drm_object_attach_property(&aconnector
->base
.base
,
1942 adev
->mode_info
.underscan_hborder_property
,
1944 drm_object_attach_property(&aconnector
->base
.base
,
1945 adev
->mode_info
.underscan_vborder_property
,
1950 int amdgpu_dm_i2c_xfer(struct i2c_adapter
*i2c_adap
,
1951 struct i2c_msg
*msgs
, int num
)
1953 struct amdgpu_i2c_adapter
*i2c
= i2c_get_adapdata(i2c_adap
);
1954 struct ddc_service
*ddc_service
= i2c
->ddc_service
;
1955 struct i2c_command cmd
;
1959 cmd
.payloads
= kzalloc(num
* sizeof(struct i2c_payload
), GFP_KERNEL
);
1964 cmd
.number_of_payloads
= num
;
1965 cmd
.engine
= I2C_COMMAND_ENGINE_DEFAULT
;
1968 for (i
= 0; i
< num
; i
++) {
1969 cmd
.payloads
[i
].write
= !(msgs
[i
].flags
& I2C_M_RD
);
1970 cmd
.payloads
[i
].address
= msgs
[i
].addr
;
1971 cmd
.payloads
[i
].length
= msgs
[i
].len
;
1972 cmd
.payloads
[i
].data
= msgs
[i
].buf
;
1975 if (dal_i2caux_submit_i2c_command(
1976 ddc_service
->ctx
->i2caux
,
1977 ddc_service
->ddc_pin
,
1981 kfree(cmd
.payloads
);
1985 u32
amdgpu_dm_i2c_func(struct i2c_adapter
*adap
)
1987 return I2C_FUNC_I2C
| I2C_FUNC_SMBUS_EMUL
;
1990 static const struct i2c_algorithm amdgpu_dm_i2c_algo
= {
1991 .master_xfer
= amdgpu_dm_i2c_xfer
,
1992 .functionality
= amdgpu_dm_i2c_func
,
1995 static struct amdgpu_i2c_adapter
*create_i2c(
1996 struct ddc_service
*ddc_service
,
2000 struct amdgpu_device
*adev
= ddc_service
->ctx
->driver_context
;
2001 struct amdgpu_i2c_adapter
*i2c
;
2003 i2c
= kzalloc(sizeof (struct amdgpu_i2c_adapter
), GFP_KERNEL
);
2004 i2c
->base
.owner
= THIS_MODULE
;
2005 i2c
->base
.class = I2C_CLASS_DDC
;
2006 i2c
->base
.dev
.parent
= &adev
->pdev
->dev
;
2007 i2c
->base
.algo
= &amdgpu_dm_i2c_algo
;
2008 snprintf(i2c
->base
.name
, sizeof (i2c
->base
.name
), "AMDGPU DM i2c hw bus %d", link_index
);
2009 i2c_set_adapdata(&i2c
->base
, i2c
);
2010 i2c
->ddc_service
= ddc_service
;
2015 /* Note: this function assumes that dc_link_detect() was called for the
2016 * dc_link which will be represented by this aconnector. */
2017 int amdgpu_dm_connector_init(
2018 struct amdgpu_display_manager
*dm
,
2019 struct amdgpu_connector
*aconnector
,
2020 uint32_t link_index
,
2021 struct amdgpu_encoder
*aencoder
)
2025 struct dc
*dc
= dm
->dc
;
2026 struct dc_link
*link
= dc_get_link_at_index(dc
, link_index
);
2027 struct amdgpu_i2c_adapter
*i2c
;
2028 ((struct dc_link
*)link
)->priv
= aconnector
;
2030 DRM_DEBUG_KMS("%s()\n", __func__
);
2032 i2c
= create_i2c(link
->ddc
, link
->link_index
, &res
);
2033 aconnector
->i2c
= i2c
;
2034 res
= i2c_add_adapter(&i2c
->base
);
2037 DRM_ERROR("Failed to register hw i2c %d\n", link
->link_index
);
2041 connector_type
= to_drm_connector_type(link
->connector_signal
);
2043 res
= drm_connector_init(
2046 &amdgpu_dm_connector_funcs
,
2050 DRM_ERROR("connector_init failed\n");
2051 aconnector
->connector_id
= -1;
2055 drm_connector_helper_add(
2057 &amdgpu_dm_connector_helper_funcs
);
2059 amdgpu_dm_connector_init_helper(
2066 drm_mode_connector_attach_encoder(
2067 &aconnector
->base
, &aencoder
->base
);
2069 drm_connector_register(&aconnector
->base
);
2071 if (connector_type
== DRM_MODE_CONNECTOR_DisplayPort
2072 || connector_type
== DRM_MODE_CONNECTOR_eDP
)
2073 amdgpu_dm_initialize_dp_connector(dm
, aconnector
);
2075 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
2076 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
2078 /* NOTE: this currently will create backlight device even if a panel
2079 * is not connected to the eDP/LVDS connector.
2081 * This is less than ideal but we don't have sink information at this
2082 * stage since detection happens after. We can't do detection earlier
2083 * since MST detection needs connectors to be created first.
2085 if (link
->connector_signal
& (SIGNAL_TYPE_EDP
| SIGNAL_TYPE_LVDS
)) {
2086 /* Event if registration failed, we should continue with
2087 * DM initialization because not having a backlight control
2088 * is better then a black screen. */
2089 amdgpu_dm_register_backlight_device(dm
);
2091 if (dm
->backlight_dev
)
2092 dm
->backlight_link
= link
;
2099 aconnector
->i2c
= NULL
;
2104 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device
*adev
)
2106 switch (adev
->mode_info
.num_crtc
) {
2123 int amdgpu_dm_encoder_init(
2124 struct drm_device
*dev
,
2125 struct amdgpu_encoder
*aencoder
,
2126 uint32_t link_index
)
2128 struct amdgpu_device
*adev
= dev
->dev_private
;
2130 int res
= drm_encoder_init(dev
,
2132 &amdgpu_dm_encoder_funcs
,
2133 DRM_MODE_ENCODER_TMDS
,
2136 aencoder
->base
.possible_crtcs
= amdgpu_dm_get_encoder_crtc_mask(adev
);
2139 aencoder
->encoder_id
= link_index
;
2141 aencoder
->encoder_id
= -1;
2143 drm_encoder_helper_add(&aencoder
->base
, &amdgpu_dm_encoder_helper_funcs
);
2148 static void manage_dm_interrupts(
2149 struct amdgpu_device
*adev
,
2150 struct amdgpu_crtc
*acrtc
,
2154 * this is not correct translation but will work as soon as VBLANK
2155 * constant is the same as PFLIP
2158 amdgpu_crtc_idx_to_irq_type(
2163 drm_crtc_vblank_on(&acrtc
->base
);
2166 &adev
->pageflip_irq
,
2172 &adev
->pageflip_irq
,
2174 drm_crtc_vblank_off(&acrtc
->base
);
2178 static bool is_scaling_state_different(
2179 const struct dm_connector_state
*dm_state
,
2180 const struct dm_connector_state
*old_dm_state
)
2182 if (dm_state
->scaling
!= old_dm_state
->scaling
)
2184 if (!dm_state
->underscan_enable
&& old_dm_state
->underscan_enable
) {
2185 if (old_dm_state
->underscan_hborder
!= 0 && old_dm_state
->underscan_vborder
!= 0)
2187 } else if (dm_state
->underscan_enable
&& !old_dm_state
->underscan_enable
) {
2188 if (dm_state
->underscan_hborder
!= 0 && dm_state
->underscan_vborder
!= 0)
2190 } else if (dm_state
->underscan_hborder
!= old_dm_state
->underscan_hborder
2191 || dm_state
->underscan_vborder
!= old_dm_state
->underscan_vborder
)
2196 static void remove_stream(
2197 struct amdgpu_device
*adev
,
2198 struct amdgpu_crtc
*acrtc
,
2199 struct dc_stream
*stream
)
2201 /* this is the update mode case */
2202 if (adev
->dm
.freesync_module
)
2203 mod_freesync_remove_stream(adev
->dm
.freesync_module
, stream
);
2205 acrtc
->otg_inst
= -1;
2206 acrtc
->enabled
= false;
2209 static void handle_cursor_update(
2210 struct drm_plane
*plane
,
2211 struct drm_plane_state
*old_plane_state
)
2213 if (!plane
->state
->fb
&& !old_plane_state
->fb
)
2216 /* Check if it's a cursor on/off update or just cursor move*/
2217 if (plane
->state
->fb
== old_plane_state
->fb
)
2218 dm_crtc_cursor_move(
2220 plane
->state
->crtc_x
,
2221 plane
->state
->crtc_y
);
2223 struct amdgpu_framebuffer
*afb
=
2224 to_amdgpu_framebuffer(plane
->state
->fb
);
2226 (!!plane
->state
->fb
) ?
2227 plane
->state
->crtc
:
2228 old_plane_state
->crtc
,
2229 (!!plane
->state
->fb
) ?
2232 plane
->state
->crtc_w
,
2233 plane
->state
->crtc_h
);
2238 static void prepare_flip_isr(struct amdgpu_crtc
*acrtc
)
2241 assert_spin_locked(&acrtc
->base
.dev
->event_lock
);
2242 WARN_ON(acrtc
->event
);
2244 acrtc
->event
= acrtc
->base
.state
->event
;
2246 /* Set the flip status */
2247 acrtc
->pflip_status
= AMDGPU_FLIP_SUBMITTED
;
2249 /* Mark this event as consumed */
2250 acrtc
->base
.state
->event
= NULL
;
2252 DRM_DEBUG_DRIVER("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
2259 * Waits on all BO's fences and for proper vblank count
2261 static void amdgpu_dm_do_flip(
2262 struct drm_crtc
*crtc
,
2263 struct drm_framebuffer
*fb
,
2266 unsigned long flags
;
2267 uint32_t target_vblank
;
2269 struct amdgpu_crtc
*acrtc
= to_amdgpu_crtc(crtc
);
2270 struct amdgpu_framebuffer
*afb
= to_amdgpu_framebuffer(fb
);
2271 struct amdgpu_bo
*abo
= gem_to_amdgpu_bo(afb
->obj
);
2272 struct amdgpu_device
*adev
= crtc
->dev
->dev_private
;
2273 bool async_flip
= (acrtc
->flip_flags
& DRM_MODE_PAGE_FLIP_ASYNC
) != 0;
2274 struct dc_flip_addrs addr
= { {0} };
2275 struct dc_surface_update surface_updates
[1] = { {0} };
2276 struct dm_crtc_state
*acrtc_state
= to_dm_crtc_state(crtc
->state
);
2279 /* Prepare wait for target vblank early - before the fence-waits */
2280 target_vblank
= target
- drm_crtc_vblank_count(crtc
) +
2281 amdgpu_get_vblank_counter_kms(crtc
->dev
, acrtc
->crtc_id
);
2283 /*TODO This might fail and hence better not used, wait
2284 * explicitly on fences instead
2285 * and in general should be called for
2286 * blocking commit to as per framework helpers
2288 r
= amdgpu_bo_reserve(abo
, true);
2289 if (unlikely(r
!= 0)) {
2290 DRM_ERROR("failed to reserve buffer before flip\n");
2294 /* Wait for all fences on this FB */
2295 WARN_ON(reservation_object_wait_timeout_rcu(abo
->tbo
.resv
, true, false,
2296 MAX_SCHEDULE_TIMEOUT
) < 0);
2298 amdgpu_bo_unreserve(abo
);
2300 /* Wait until we're out of the vertical blank period before the one
2301 * targeted by the flip
2303 while ((acrtc
->enabled
&&
2304 (amdgpu_get_crtc_scanoutpos(adev
->ddev
, acrtc
->crtc_id
, 0,
2305 &vpos
, &hpos
, NULL
, NULL
,
2307 & (DRM_SCANOUTPOS_VALID
| DRM_SCANOUTPOS_IN_VBLANK
)) ==
2308 (DRM_SCANOUTPOS_VALID
| DRM_SCANOUTPOS_IN_VBLANK
) &&
2309 (int)(target_vblank
-
2310 amdgpu_get_vblank_counter_kms(adev
->ddev
, acrtc
->crtc_id
)) > 0)) {
2311 usleep_range(1000, 1100);
2315 spin_lock_irqsave(&crtc
->dev
->event_lock
, flags
);
2316 /* update crtc fb */
2317 crtc
->primary
->fb
= fb
;
2319 WARN_ON(acrtc
->pflip_status
!= AMDGPU_FLIP_NONE
);
2320 WARN_ON(!acrtc_state
->stream
);
2322 addr
.address
.grph
.addr
.low_part
= lower_32_bits(afb
->address
);
2323 addr
.address
.grph
.addr
.high_part
= upper_32_bits(afb
->address
);
2324 addr
.flip_immediate
= async_flip
;
2327 if (acrtc
->base
.state
->event
)
2328 prepare_flip_isr(acrtc
);
2330 surface_updates
->surface
= dc_stream_get_status(acrtc_state
->stream
)->surfaces
[0];
2331 surface_updates
->flip_addr
= &addr
;
2334 dc_update_surfaces_and_stream(adev
->dm
.dc
, surface_updates
, 1, acrtc_state
->stream
, NULL
);
2336 DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x \n",
2338 addr
.address
.grph
.addr
.high_part
,
2339 addr
.address
.grph
.addr
.low_part
);
2342 spin_unlock_irqrestore(&crtc
->dev
->event_lock
, flags
);
2345 static void amdgpu_dm_commit_surfaces(struct drm_atomic_state
*state
,
2346 struct drm_device
*dev
,
2347 struct amdgpu_display_manager
*dm
,
2348 struct drm_crtc
*pcrtc
,
2349 bool *wait_for_vblank
)
2352 struct drm_plane
*plane
;
2353 struct drm_plane_state
*old_plane_state
;
2354 struct dc_stream
*dc_stream_attach
;
2355 struct dc_surface
*dc_surfaces_constructed
[MAX_SURFACES
];
2356 struct amdgpu_crtc
*acrtc_attach
= to_amdgpu_crtc(pcrtc
);
2357 struct dm_crtc_state
*acrtc_state
= to_dm_crtc_state(pcrtc
->state
);
2358 int planes_count
= 0;
2359 unsigned long flags
;
2361 /* update planes when needed */
2362 for_each_plane_in_state(state
, plane
, old_plane_state
, i
) {
2363 struct drm_plane_state
*plane_state
= plane
->state
;
2364 struct drm_crtc
*crtc
= plane_state
->crtc
;
2365 struct drm_framebuffer
*fb
= plane_state
->fb
;
2367 struct dm_plane_state
*dm_plane_state
= to_dm_plane_state(plane_state
);
2369 if (plane
->type
== DRM_PLANE_TYPE_CURSOR
) {
2370 handle_cursor_update(plane
, old_plane_state
);
2374 if (!fb
|| !crtc
|| pcrtc
!= crtc
|| !crtc
->state
->active
||
2375 (!crtc
->state
->planes_changed
&&
2376 !pcrtc
->state
->color_mgmt_changed
))
2379 pflip_needed
= !state
->allow_modeset
;
2381 spin_lock_irqsave(&crtc
->dev
->event_lock
, flags
);
2382 if (acrtc_attach
->pflip_status
!= AMDGPU_FLIP_NONE
) {
2383 DRM_ERROR("add_surface: acrtc %d, already busy\n",
2384 acrtc_attach
->crtc_id
);
2385 spin_unlock_irqrestore(&crtc
->dev
->event_lock
, flags
);
2386 /* In comit tail framework this cannot happen */
2389 spin_unlock_irqrestore(&crtc
->dev
->event_lock
, flags
);
2391 if (!pflip_needed
) {
2392 WARN_ON(!dm_plane_state
->surface
);
2394 dc_surfaces_constructed
[planes_count
] = dm_plane_state
->surface
;
2396 dc_stream_attach
= acrtc_state
->stream
;
2399 } else if (crtc
->state
->planes_changed
) {
2400 /* Assume even ONE crtc with immediate flip means
2401 * entire can't wait for VBLANK
2402 * TODO Check if it's correct
2405 acrtc_attach
->flip_flags
& DRM_MODE_PAGE_FLIP_ASYNC
?
2408 /* TODO: Needs rework for multiplane flip */
2409 if (plane
->type
== DRM_PLANE_TYPE_PRIMARY
)
2410 drm_crtc_vblank_get(crtc
);
2415 drm_crtc_vblank_count(crtc
) + *wait_for_vblank
);
2417 /*TODO BUG remove ASAP in 4.12 to avoid race between worker and flip IOCTL */
2419 /*clean up the flags for next usage*/
2420 acrtc_attach
->flip_flags
= 0;
2426 unsigned long flags
;
2428 if (pcrtc
->state
->event
) {
2430 drm_crtc_vblank_get(pcrtc
);
2432 spin_lock_irqsave(&pcrtc
->dev
->event_lock
, flags
);
2433 prepare_flip_isr(acrtc_attach
);
2434 spin_unlock_irqrestore(&pcrtc
->dev
->event_lock
, flags
);
2437 if (false == dc_commit_surfaces_to_stream(dm
->dc
,
2438 dc_surfaces_constructed
,
2441 dm_error("%s: Failed to attach surface!\n", __func__
);
2443 /*TODO BUG Here should go disable planes on CRTC. */
2448 int amdgpu_dm_atomic_commit(
2449 struct drm_device
*dev
,
2450 struct drm_atomic_state
*state
,
2453 struct drm_crtc
*crtc
;
2454 struct drm_crtc_state
*new_state
;
2455 struct amdgpu_device
*adev
= dev
->dev_private
;
2459 * We evade vblanks and pflips on crtc that
2460 * should be changed. We do it here to flush & disable
2461 * interrupts before drm_swap_state is called in drm_atomic_helper_commit
2462 * it will update crtc->dm_crtc_state->stream pointer which is used in
2465 for_each_crtc_in_state(state
, crtc
, new_state
, i
) {
2466 struct dm_crtc_state
*old_acrtc_state
= to_dm_crtc_state(crtc
->state
);
2467 struct amdgpu_crtc
*acrtc
= to_amdgpu_crtc(crtc
);
2469 if (drm_atomic_crtc_needs_modeset(new_state
) && old_acrtc_state
->stream
)
2470 manage_dm_interrupts(adev
, acrtc
, false);
2473 return drm_atomic_helper_commit(dev
, state
, nonblock
);
2475 /*TODO Handle EINTR, reenable IRQ*/
2478 void amdgpu_dm_atomic_commit_tail(
2479 struct drm_atomic_state
*state
)
2481 struct drm_device
*dev
= state
->dev
;
2482 struct amdgpu_device
*adev
= dev
->dev_private
;
2483 struct amdgpu_display_manager
*dm
= &adev
->dm
;
2484 struct dm_atomic_state
*dm_state
;
2486 uint32_t new_crtcs_count
= 0;
2487 struct drm_crtc
*crtc
, *pcrtc
;
2488 struct drm_crtc_state
*old_crtc_state
;
2489 struct amdgpu_crtc
*new_crtcs
[MAX_STREAMS
];
2490 struct dc_stream
*new_stream
= NULL
;
2491 unsigned long flags
;
2492 bool wait_for_vblank
= true;
2493 struct drm_connector
*connector
;
2494 struct drm_connector_state
*old_conn_state
;
2495 struct dm_crtc_state
*old_acrtc_state
, *new_acrtc_state
;
2497 drm_atomic_helper_update_legacy_modeset_state(dev
, state
);
2499 dm_state
= to_dm_atomic_state(state
);
2501 /* update changed items */
2502 for_each_crtc_in_state(state
, crtc
, old_crtc_state
, i
) {
2503 struct amdgpu_crtc
*acrtc
= to_amdgpu_crtc(crtc
);
2504 struct drm_crtc_state
*new_state
= crtc
->state
;
2505 new_acrtc_state
= to_dm_crtc_state(new_state
);
2506 old_acrtc_state
= to_dm_crtc_state(old_crtc_state
);
2509 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
2510 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
2511 "connectors_changed:%d\n",
2515 new_state
->planes_changed
,
2516 new_state
->mode_changed
,
2517 new_state
->active_changed
,
2518 new_state
->connectors_changed
);
2520 /* handles headless hotplug case, updating new_state and
2521 * aconnector as needed
2524 if (modeset_required(new_state
)) {
2526 DRM_INFO("Atomic commit: SET crtc id %d: [%p]\n", acrtc
->crtc_id
, acrtc
);
2528 if (!new_acrtc_state
->stream
) {
2530 * this could happen because of issues with
2531 * userspace notifications delivery.
2532 * In this case userspace tries to set mode on
2533 * display which is disconnect in fact.
2534 * dc_sink in NULL in this case on aconnector.
2535 * We expect reset mode will come soon.
2537 * This can also happen when unplug is done
2538 * during resume sequence ended
2540 * In this case, we want to pretend we still
2541 * have a sink to keep the pipe running so that
2542 * hw state is consistent with the sw state
2544 DRM_DEBUG_KMS("%s: Failed to create new stream for crtc %d\n",
2545 __func__
, acrtc
->base
.base
.id
);
2550 if (old_acrtc_state
->stream
)
2551 remove_stream(adev
, acrtc
, old_acrtc_state
->stream
);
2555 * this loop saves set mode crtcs
2556 * we needed to enable vblanks once all
2557 * resources acquired in dc after dc_commit_streams
2560 /*TODO move all this into dm_crtc_state, get rid of
2561 * new_crtcs array and use old and new atomic states
2564 new_crtcs
[new_crtcs_count
] = acrtc
;
2567 acrtc
->enabled
= true;
2568 acrtc
->hw_mode
= crtc
->state
->mode
;
2569 crtc
->hwmode
= crtc
->state
->mode
;
2570 } else if (modereset_required(new_state
)) {
2571 DRM_INFO("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc
->crtc_id
, acrtc
);
2573 /* i.e. reset mode */
2574 if (old_acrtc_state
->stream
)
2575 remove_stream(adev
, acrtc
, old_acrtc_state
->stream
);
2577 } /* for_each_crtc_in_state() */
2580 * Add streams after required streams from new and replaced streams
2581 * are removed from freesync module
2583 if (adev
->dm
.freesync_module
) {
2584 for (i
= 0; i
< new_crtcs_count
; i
++) {
2585 struct amdgpu_connector
*aconnector
= NULL
;
2586 new_acrtc_state
= to_dm_crtc_state(new_crtcs
[i
]->base
.state
);
2588 new_stream
= new_acrtc_state
->stream
;
2590 amdgpu_dm_find_first_crct_matching_connector(
2592 &new_crtcs
[i
]->base
,
2596 "Atomic commit: Failed to find connector for acrtc id:%d "
2597 "skipping freesync init\n",
2598 new_crtcs
[i
]->crtc_id
);
2602 mod_freesync_add_stream(adev
->dm
.freesync_module
,
2603 new_stream
, &aconnector
->caps
);
2607 if (dm_state
->context
)
2608 WARN_ON(!dc_commit_context(dm
->dc
, dm_state
->context
));
2611 list_for_each_entry(crtc
, &dev
->mode_config
.crtc_list
, head
) {
2612 struct amdgpu_crtc
*acrtc
= to_amdgpu_crtc(crtc
);
2613 new_acrtc_state
= to_dm_crtc_state(crtc
->state
);
2615 if (new_acrtc_state
->stream
!= NULL
) {
2616 const struct dc_stream_status
*status
=
2617 dc_stream_get_status(new_acrtc_state
->stream
);
2620 DC_ERR("got no status for stream %p on acrtc%p\n", new_acrtc_state
->stream
, acrtc
);
2622 acrtc
->otg_inst
= status
->primary_otg_inst
;
2626 /* Handle scaling and undersacn changes*/
2627 for_each_connector_in_state(state
, connector
, old_conn_state
, i
) {
2628 struct amdgpu_connector
*aconnector
= to_amdgpu_connector(connector
);
2629 struct dm_connector_state
*con_new_state
=
2630 to_dm_connector_state(aconnector
->base
.state
);
2631 struct dm_connector_state
*con_old_state
=
2632 to_dm_connector_state(old_conn_state
);
2633 struct amdgpu_crtc
*acrtc
= to_amdgpu_crtc(con_new_state
->base
.crtc
);
2634 struct dc_stream_status
*status
= NULL
;
2636 /* Skip any modesets/resets */
2637 if (!acrtc
|| drm_atomic_crtc_needs_modeset(acrtc
->base
.state
))
2640 /* Skip any thing not scale or underscan changes */
2641 if (!is_scaling_state_different(con_new_state
, con_old_state
))
2644 new_acrtc_state
= to_dm_crtc_state(acrtc
->base
.state
);
2646 update_stream_scaling_settings(&con_new_state
->base
.crtc
->mode
,
2647 con_new_state
, (struct dc_stream
*)new_acrtc_state
->stream
);
2649 status
= dc_stream_get_status(new_acrtc_state
->stream
);
2651 WARN_ON(!status
->surface_count
);
2653 if (!new_acrtc_state
->stream
)
2656 /*TODO How it works with MPO ?*/
2657 if (!dc_commit_surfaces_to_stream(
2660 status
->surface_count
,
2661 new_acrtc_state
->stream
))
2662 dm_error("%s: Failed to update stream scaling!\n", __func__
);
2665 for (i
= 0; i
< new_crtcs_count
; i
++) {
2667 * loop to enable interrupts on newly arrived crtc
2669 struct amdgpu_crtc
*acrtc
= new_crtcs
[i
];
2670 new_acrtc_state
= to_dm_crtc_state(acrtc
->base
.state
);
2672 if (adev
->dm
.freesync_module
)
2673 mod_freesync_notify_mode_change(
2674 adev
->dm
.freesync_module
, &new_acrtc_state
->stream
, 1);
2676 manage_dm_interrupts(adev
, acrtc
, true);
2679 /* update planes when needed per crtc*/
2680 for_each_crtc_in_state(state
, pcrtc
, old_crtc_state
, j
) {
2681 new_acrtc_state
= to_dm_crtc_state(pcrtc
->state
);
2683 if (new_acrtc_state
->stream
)
2684 amdgpu_dm_commit_surfaces(state
, dev
, dm
, pcrtc
, &wait_for_vblank
);
2689 * send vblank event on all events not handled in flip and
2690 * mark consumed event for drm_atomic_helper_commit_hw_done
2692 spin_lock_irqsave(&adev
->ddev
->event_lock
, flags
);
2693 for_each_crtc_in_state(state
, crtc
, old_crtc_state
, i
) {
2694 struct amdgpu_crtc
*acrtc
= to_amdgpu_crtc(crtc
);
2696 if (acrtc
->base
.state
->event
)
2697 drm_send_event_locked(dev
, &crtc
->state
->event
->base
);
2699 acrtc
->base
.state
->event
= NULL
;
2701 spin_unlock_irqrestore(&adev
->ddev
->event_lock
, flags
);
2703 /* Signal HW programming completion */
2704 drm_atomic_helper_commit_hw_done(state
);
2706 if (wait_for_vblank
)
2707 drm_atomic_helper_wait_for_vblanks(dev
, state
);
2709 drm_atomic_helper_cleanup_planes(dev
, state
);
2713 static int dm_force_atomic_commit(struct drm_connector
*connector
)
2716 struct drm_device
*ddev
= connector
->dev
;
2717 struct drm_atomic_state
*state
= drm_atomic_state_alloc(ddev
);
2718 struct amdgpu_crtc
*disconnected_acrtc
= to_amdgpu_crtc(connector
->encoder
->crtc
);
2719 struct drm_plane
*plane
= disconnected_acrtc
->base
.primary
;
2720 struct drm_connector_state
*conn_state
;
2721 struct drm_crtc_state
*crtc_state
;
2722 struct drm_plane_state
*plane_state
;
2727 state
->acquire_ctx
= ddev
->mode_config
.acquire_ctx
;
2729 /* Construct an atomic state to restore previous display setting */
2732 * Attach connectors to drm_atomic_state
2734 conn_state
= drm_atomic_get_connector_state(state
, connector
);
2736 ret
= PTR_ERR_OR_ZERO(conn_state
);
2740 /* Attach crtc to drm_atomic_state*/
2741 crtc_state
= drm_atomic_get_crtc_state(state
, &disconnected_acrtc
->base
);
2743 ret
= PTR_ERR_OR_ZERO(crtc_state
);
2747 /* force a restore */
2748 crtc_state
->mode_changed
= true;
2750 /* Attach plane to drm_atomic_state */
2751 plane_state
= drm_atomic_get_plane_state(state
, plane
);
2753 ret
= PTR_ERR_OR_ZERO(plane_state
);
2758 /* Call commit internally with the state we just constructed */
2759 ret
= drm_atomic_commit(state
);
2764 DRM_ERROR("Restoring old state failed with %i\n", ret
);
2765 drm_atomic_state_put(state
);
2771 * This functions handle all cases when set mode does not come upon hotplug.
2772 * This include when the same display is unplugged then plugged back into the
2773 * same port and when we are running without usermode desktop manager supprot
2775 void dm_restore_drm_connector_state(struct drm_device
*dev
, struct drm_connector
*connector
)
2777 struct amdgpu_connector
*aconnector
= to_amdgpu_connector(connector
);
2778 struct amdgpu_crtc
*disconnected_acrtc
;
2779 struct dm_crtc_state
*acrtc_state
;
2781 if (!aconnector
->dc_sink
|| !connector
->state
|| !connector
->encoder
)
2784 disconnected_acrtc
= to_amdgpu_crtc(connector
->encoder
->crtc
);
2785 acrtc_state
= to_dm_crtc_state(disconnected_acrtc
->base
.state
);
2787 if (!disconnected_acrtc
|| !acrtc_state
->stream
)
2791 * If the previous sink is not released and different from the current,
2792 * we deduce we are in a state where we can not rely on usermode call
2793 * to turn on the display, so we do it here
2795 if (acrtc_state
->stream
->sink
!= aconnector
->dc_sink
)
2796 dm_force_atomic_commit(&aconnector
->base
);
2799 static uint32_t add_val_sets_surface(
2800 struct dc_validation_set
*val_sets
,
2802 const struct dc_stream
*stream
,
2803 struct dc_surface
*surface
)
2805 uint32_t i
= 0, j
= 0;
2807 while (i
< set_count
) {
2808 if (val_sets
[i
].stream
== stream
) {
2809 while (val_sets
[i
].surfaces
[j
])
2816 val_sets
[i
].surfaces
[j
] = surface
;
2817 val_sets
[i
].surface_count
++;
2819 return val_sets
[i
].surface_count
;
2822 static uint32_t update_in_val_sets_stream(
2823 struct dc_validation_set
*val_sets
,
2825 struct dc_stream
*old_stream
,
2826 struct dc_stream
*new_stream
,
2827 struct drm_crtc
*crtc
)
2831 while (i
< set_count
) {
2832 if (val_sets
[i
].stream
== old_stream
)
2837 val_sets
[i
].stream
= new_stream
;
2840 /* nothing found. add new one to the end */
2841 return set_count
+ 1;
2846 static uint32_t remove_from_val_sets(
2847 struct dc_validation_set
*val_sets
,
2849 const struct dc_stream
*stream
)
2853 for (i
= 0; i
< set_count
; i
++)
2854 if (val_sets
[i
].stream
== stream
)
2857 if (i
== set_count
) {
2864 for (; i
< set_count
; i
++) {
2865 val_sets
[i
] = val_sets
[i
+ 1];
2872 * Grabs all modesetting locks to serialize against any blocking commits,
2873 * Waits for completion of all non blocking commits.
2875 static int do_aquire_global_lock(
2876 struct drm_device
*dev
,
2877 struct drm_atomic_state
*state
)
2879 struct drm_crtc
*crtc
;
2880 struct drm_crtc_commit
*commit
;
2883 /* Adding all modeset locks to aquire_ctx will
2884 * ensure that when the framework release it the
2885 * extra locks we are locking here will get released to
2887 ret
= drm_modeset_lock_all_ctx(dev
, state
->acquire_ctx
);
2891 list_for_each_entry(crtc
, &dev
->mode_config
.crtc_list
, head
) {
2892 spin_lock(&crtc
->commit_lock
);
2893 commit
= list_first_entry_or_null(&crtc
->commit_list
,
2894 struct drm_crtc_commit
, commit_entry
);
2896 drm_crtc_commit_get(commit
);
2897 spin_unlock(&crtc
->commit_lock
);
2902 /* Make sure all pending HW programming completed and
2905 ret
= wait_for_completion_interruptible_timeout(&commit
->hw_done
, 10*HZ
);
2908 ret
= wait_for_completion_interruptible_timeout(
2909 &commit
->flip_done
, 10*HZ
);
2912 DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
2913 "timed out\n", crtc
->base
.id
, crtc
->name
);
2915 drm_crtc_commit_put(commit
);
2918 return ret
< 0 ? ret
: 0;
2921 int amdgpu_dm_atomic_check(struct drm_device
*dev
,
2922 struct drm_atomic_state
*state
)
2924 struct dm_atomic_state
*dm_state
;
2925 struct drm_crtc
*crtc
;
2926 struct drm_crtc_state
*crtc_state
;
2927 struct drm_plane
*plane
;
2928 struct drm_plane_state
*plane_state
;
2931 struct amdgpu_device
*adev
= dev
->dev_private
;
2932 struct dc
*dc
= adev
->dm
.dc
;
2933 struct drm_connector
*connector
;
2934 struct drm_connector_state
*conn_state
;
2936 struct dc_validation_set set
[MAX_STREAMS
] = { { 0 } };
2937 struct dm_crtc_state
*old_acrtc_state
, *new_acrtc_state
;
2940 * This bool will be set for true for any modeset/reset
2941 * or surface update which implies non fast surface update.
2943 bool lock_and_validation_needed
= false;
2945 ret
= drm_atomic_helper_check_modeset(dev
, state
);
2948 DRM_ERROR("Atomic state validation failed with error :%d !\n", ret
);
2952 dm_state
= to_dm_atomic_state(state
);
2954 /* copy existing configuration */
2956 list_for_each_entry(crtc
, &dev
->mode_config
.crtc_list
, head
) {
2958 old_acrtc_state
= to_dm_crtc_state(crtc
->state
);
2960 if (old_acrtc_state
->stream
) {
2961 dc_stream_retain(old_acrtc_state
->stream
);
2962 set
[set_count
].stream
= old_acrtc_state
->stream
;
2967 /*TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set */
2968 /* update changed items */
2969 for_each_crtc_in_state(state
, crtc
, crtc_state
, i
) {
2970 struct amdgpu_crtc
*acrtc
= NULL
;
2971 struct amdgpu_connector
*aconnector
= NULL
;
2972 old_acrtc_state
= to_dm_crtc_state(crtc
->state
);
2973 new_acrtc_state
= to_dm_crtc_state(crtc_state
);
2974 acrtc
= to_amdgpu_crtc(crtc
);
2976 aconnector
= amdgpu_dm_find_first_crct_matching_connector(state
, crtc
, true);
2979 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
2980 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
2981 "connectors_changed:%d\n",
2985 crtc_state
->planes_changed
,
2986 crtc_state
->mode_changed
,
2987 crtc_state
->active_changed
,
2988 crtc_state
->connectors_changed
);
2990 if (modeset_required(crtc_state
)) {
2992 struct dc_stream
*new_stream
= NULL
;
2993 struct drm_connector_state
*conn_state
= NULL
;
2994 struct dm_connector_state
*dm_conn_state
= NULL
;
2997 conn_state
= drm_atomic_get_connector_state(state
, &aconnector
->base
);
2998 if (IS_ERR(conn_state
)) {
2999 ret
= PTR_ERR_OR_ZERO(conn_state
);
3003 dm_conn_state
= to_dm_connector_state(conn_state
);
3006 new_stream
= create_stream_for_sink(aconnector
, &crtc_state
->mode
, dm_conn_state
);
3009 * we can have no stream on ACTION_SET if a display
3010 * was disconnected during S3, in this case it not and
3011 * error, the OS will be updated after detection, and
3012 * do the right thing on next atomic commit
3015 DRM_DEBUG_KMS("%s: Failed to create new stream for crtc %d\n",
3016 __func__
, acrtc
->base
.base
.id
);
3020 if (new_acrtc_state
->stream
)
3021 dc_stream_release(new_acrtc_state
->stream
);
3023 new_acrtc_state
->stream
= new_stream
;
3025 set_count
= update_in_val_sets_stream(
3028 old_acrtc_state
->stream
,
3029 new_acrtc_state
->stream
,
3032 lock_and_validation_needed
= true;
3034 } else if (modereset_required(crtc_state
)) {
3036 /* i.e. reset mode */
3037 if (new_acrtc_state
->stream
) {
3038 set_count
= remove_from_val_sets(
3041 new_acrtc_state
->stream
);
3043 dc_stream_release(new_acrtc_state
->stream
);
3044 new_acrtc_state
->stream
= NULL
;
3046 lock_and_validation_needed
= true;
3052 * Hack: Commit needs planes right now, specifically for gamma
3053 * TODO rework commit to check CRTC for gamma change
3055 if (crtc_state
->color_mgmt_changed
) {
3057 ret
= drm_atomic_add_affected_planes(state
, crtc
);
3063 /* Check scaling and undersacn changes*/
3064 /*TODO Removed scaling changes validation due to inability to commit
3065 * new stream into context w\o causing full reset. Need to
3066 * decide how to handle.
3068 for_each_connector_in_state(state
, connector
, conn_state
, i
) {
3069 struct amdgpu_connector
*aconnector
= to_amdgpu_connector(connector
);
3070 struct dm_connector_state
*con_old_state
=
3071 to_dm_connector_state(aconnector
->base
.state
);
3072 struct dm_connector_state
*con_new_state
=
3073 to_dm_connector_state(conn_state
);
3074 struct amdgpu_crtc
*acrtc
= to_amdgpu_crtc(con_new_state
->base
.crtc
);
3076 /* Skip any modesets/resets */
3077 if (!acrtc
|| drm_atomic_crtc_needs_modeset(acrtc
->base
.state
))
3080 /* Skip any thing not scale or underscan chnages */
3081 if (!is_scaling_state_different(con_new_state
, con_old_state
))
3084 lock_and_validation_needed
= true;
3087 for_each_crtc_in_state(state
, crtc
, crtc_state
, i
) {
3088 new_acrtc_state
= to_dm_crtc_state(crtc_state
);
3090 for_each_plane_in_state(state
, plane
, plane_state
, j
) {
3091 struct drm_crtc
*plane_crtc
= plane_state
->crtc
;
3092 struct drm_framebuffer
*fb
= plane_state
->fb
;
3094 struct dm_plane_state
*dm_plane_state
= to_dm_plane_state(plane_state
);
3096 /*TODO Implement atomic check for cursor plane */
3097 if (plane
->type
== DRM_PLANE_TYPE_CURSOR
)
3100 if (!fb
|| !plane_crtc
|| crtc
!= plane_crtc
|| !crtc_state
->active
)
3103 WARN_ON(!new_acrtc_state
->stream
);
3105 pflip_needed
= !state
->allow_modeset
;
3106 if (!pflip_needed
) {
3107 struct dc_surface
*surface
;
3109 surface
= dc_create_surface(dc
);
3111 ret
= fill_plane_attributes(
3112 plane_crtc
->dev
->dev_private
,
3121 if (dm_plane_state
->surface
)
3122 dc_surface_release(dm_plane_state
->surface
);
3124 dm_plane_state
->surface
= surface
;
3126 add_val_sets_surface(set
,
3128 new_acrtc_state
->stream
,
3131 lock_and_validation_needed
= true;
3136 /* Run this here since we want to validate the streams we created */
3137 ret
= drm_atomic_helper_check_planes(dev
, state
);
3142 * For full updates case when
3143 * removing/adding/updating streams on once CRTC while flipping
3145 * acquiring global lock will guarantee that any such full
3147 * will wait for completion of any outstanding flip using DRMs
3148 * synchronization events.
3151 if (lock_and_validation_needed
) {
3153 ret
= do_aquire_global_lock(dev
, state
);
3156 WARN_ON(dm_state
->context
);
3157 dm_state
->context
= dc_get_validate_context(dc
, set
, set_count
);
3158 if (!dm_state
->context
) {
3164 /* Must be success */
3169 if (ret
== -EDEADLK
)
3170 DRM_DEBUG_KMS("Atomic check stopped due to to deadlock.\n");
3171 else if (ret
== -EINTR
|| ret
== -EAGAIN
|| ret
== -ERESTARTSYS
)
3172 DRM_DEBUG_KMS("Atomic check stopped due to to signal.\n");
3174 DRM_ERROR("Atomic check failed with err: %d .\n", ret
);
3179 static bool is_dp_capable_without_timing_msa(
3181 struct amdgpu_connector
*amdgpu_connector
)
3184 bool capable
= false;
3186 if (amdgpu_connector
->dc_link
&&
3187 dm_helpers_dp_read_dpcd(
3189 amdgpu_connector
->dc_link
,
3190 DP_DOWN_STREAM_PORT_COUNT
,
3192 sizeof(dpcd_data
))) {
3193 capable
= (dpcd_data
& DP_MSA_TIMING_PAR_IGNORED
) ? true:false;
3198 void amdgpu_dm_add_sink_to_freesync_module(
3199 struct drm_connector
*connector
,
3203 uint64_t val_capable
;
3204 bool edid_check_required
;
3205 struct detailed_timing
*timing
;
3206 struct detailed_non_pixel
*data
;
3207 struct detailed_data_monitor_range
*range
;
3208 struct amdgpu_connector
*amdgpu_connector
=
3209 to_amdgpu_connector(connector
);
3211 struct drm_device
*dev
= connector
->dev
;
3212 struct amdgpu_device
*adev
= dev
->dev_private
;
3213 edid_check_required
= false;
3214 if (!amdgpu_connector
->dc_sink
) {
3215 DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
3218 if (!adev
->dm
.freesync_module
)
3221 * if edid non zero restrict freesync only for dp and edp
3224 if (amdgpu_connector
->dc_sink
->sink_signal
== SIGNAL_TYPE_DISPLAY_PORT
3225 || amdgpu_connector
->dc_sink
->sink_signal
== SIGNAL_TYPE_EDP
) {
3226 edid_check_required
= is_dp_capable_without_timing_msa(
3232 if (edid_check_required
== true && (edid
->version
> 1 ||
3233 (edid
->version
== 1 && edid
->revision
> 1))) {
3234 for (i
= 0; i
< 4; i
++) {
3236 timing
= &edid
->detailed_timings
[i
];
3237 data
= &timing
->data
.other_data
;
3238 range
= &data
->data
.range
;
3240 * Check if monitor has continuous frequency mode
3242 if (data
->type
!= EDID_DETAIL_MONITOR_RANGE
)
3245 * Check for flag range limits only. If flag == 1 then
3246 * no additional timing information provided.
3247 * Default GTF, GTF Secondary curve and CVT are not
3250 if (range
->flags
!= 1)
3253 amdgpu_connector
->min_vfreq
= range
->min_vfreq
;
3254 amdgpu_connector
->max_vfreq
= range
->max_vfreq
;
3255 amdgpu_connector
->pixel_clock_mhz
=
3256 range
->pixel_clock_mhz
* 10;
3260 if (amdgpu_connector
->max_vfreq
-
3261 amdgpu_connector
->min_vfreq
> 10) {
3262 amdgpu_connector
->caps
.supported
= true;
3263 amdgpu_connector
->caps
.min_refresh_in_micro_hz
=
3264 amdgpu_connector
->min_vfreq
* 1000000;
3265 amdgpu_connector
->caps
.max_refresh_in_micro_hz
=
3266 amdgpu_connector
->max_vfreq
* 1000000;
3272 * TODO figure out how to notify user-mode or DRM of freesync caps
3273 * once we figure out how to deal with freesync in an upstreamable
3279 void amdgpu_dm_remove_sink_from_freesync_module(
3280 struct drm_connector
*connector
)
3283 * TODO fill in once we figure out how to deal with freesync in
3284 * an upstreamable fashion