2 * Copyright 2012-13 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
26 #include <linux/types.h>
27 #include <linux/version.h>
30 #include <drm/drm_atomic_helper.h>
31 #include <drm/drm_fb_helper.h>
32 #include <drm/drm_atomic.h>
33 #include <drm/drm_edid.h>
36 #include "amdgpu_pm.h"
37 #include "dm_services_types.h"
39 // We need to #undef FRAME_SIZE and DEPRECATED because they conflict
40 // with ptrace-abi.h's #define's of them.
46 #include "amdgpu_dm_types.h"
47 #include "amdgpu_dm_mst_types.h"
49 #include "modules/inc/mod_freesync.h"
51 struct dm_connector_state
{
52 struct drm_connector_state base
;
54 enum amdgpu_rmx_type scaling
;
55 uint8_t underscan_vborder
;
56 uint8_t underscan_hborder
;
57 bool underscan_enable
;
60 #define to_dm_connector_state(x)\
61 container_of((x), struct dm_connector_state, base)
64 void amdgpu_dm_encoder_destroy(struct drm_encoder
*encoder
)
66 drm_encoder_cleanup(encoder
);
70 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs
= {
71 .destroy
= amdgpu_dm_encoder_destroy
,
74 static void dm_set_cursor(
75 struct amdgpu_crtc
*amdgpu_crtc
,
80 struct dc_cursor_attributes attributes
;
81 struct dc_cursor_position position
;
82 struct drm_crtc
*crtc
= &amdgpu_crtc
->base
;
84 int xorigin
= 0, yorigin
= 0;
86 amdgpu_crtc
->cursor_width
= width
;
87 amdgpu_crtc
->cursor_height
= height
;
89 attributes
.address
.high_part
= upper_32_bits(gpu_addr
);
90 attributes
.address
.low_part
= lower_32_bits(gpu_addr
);
91 attributes
.width
= width
;
92 attributes
.height
= height
;
95 attributes
.color_format
= CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA
;
96 attributes
.rotation_angle
= 0;
97 attributes
.attribute_flags
.value
= 0;
99 attributes
.pitch
= attributes
.width
;
101 x
= amdgpu_crtc
->cursor_x
;
102 y
= amdgpu_crtc
->cursor_y
;
104 /* avivo cursor are offset into the total surface */
105 x
+= crtc
->primary
->state
->src_x
>> 16;
106 y
+= crtc
->primary
->state
->src_y
>> 16;
109 xorigin
= min(-x
, amdgpu_crtc
->max_cursor_width
- 1);
113 yorigin
= min(-y
, amdgpu_crtc
->max_cursor_height
- 1);
117 position
.enable
= true;
121 position
.hot_spot_enable
= true;
122 position
.x_hotspot
= xorigin
;
123 position
.y_hotspot
= yorigin
;
125 if (!dc_stream_set_cursor_attributes(
128 DRM_ERROR("DC failed to set cursor attributes\n");
131 if (!dc_stream_set_cursor_position(
134 DRM_ERROR("DC failed to set cursor position\n");
138 static int dm_crtc_unpin_cursor_bo_old(
139 struct amdgpu_crtc
*amdgpu_crtc
)
141 struct amdgpu_bo
*robj
;
144 if (NULL
!= amdgpu_crtc
&& NULL
!= amdgpu_crtc
->cursor_bo
) {
145 robj
= gem_to_amdgpu_bo(amdgpu_crtc
->cursor_bo
);
147 ret
= amdgpu_bo_reserve(robj
, false);
149 if (likely(ret
== 0)) {
150 ret
= amdgpu_bo_unpin(robj
);
152 if (unlikely(ret
!= 0)) {
154 "%s: unpin failed (ret=%d), bo %p\n",
157 amdgpu_crtc
->cursor_bo
);
160 amdgpu_bo_unreserve(robj
);
163 "%s: reserve failed (ret=%d), bo %p\n",
166 amdgpu_crtc
->cursor_bo
);
169 drm_gem_object_unreference_unlocked(amdgpu_crtc
->cursor_bo
);
170 amdgpu_crtc
->cursor_bo
= NULL
;
176 static int dm_crtc_pin_cursor_bo_new(
177 struct drm_crtc
*crtc
,
178 struct drm_file
*file_priv
,
180 struct amdgpu_bo
**ret_obj
)
182 struct amdgpu_crtc
*amdgpu_crtc
;
183 struct amdgpu_bo
*robj
;
184 struct drm_gem_object
*obj
;
188 struct drm_device
*dev
= crtc
->dev
;
189 struct amdgpu_device
*adev
= dev
->dev_private
;
192 amdgpu_crtc
= to_amdgpu_crtc(crtc
);
194 obj
= drm_gem_object_lookup(file_priv
, handle
);
198 "Cannot find cursor object %x for crtc %d\n",
200 amdgpu_crtc
->crtc_id
);
203 robj
= gem_to_amdgpu_bo(obj
);
205 ret
= amdgpu_bo_reserve(robj
, false);
207 if (unlikely(ret
!= 0)) {
208 drm_gem_object_unreference_unlocked(obj
);
209 DRM_ERROR("dm_crtc_pin_cursor_bo_new ret %x, handle %x\n",
214 ret
= amdgpu_bo_pin_restricted(robj
, AMDGPU_GEM_DOMAIN_VRAM
, 0,
215 adev
->mc
.visible_vram_size
,
219 amdgpu_crtc
->cursor_addr
= gpu_addr
;
222 amdgpu_bo_unreserve(robj
);
224 drm_gem_object_unreference_unlocked(obj
);
232 static int dm_crtc_cursor_set(
233 struct drm_crtc
*crtc
,
234 struct drm_file
*file_priv
,
239 struct amdgpu_bo
*new_cursor_bo
;
240 struct dc_cursor_position position
;
244 struct amdgpu_crtc
*amdgpu_crtc
= to_amdgpu_crtc(crtc
);
247 new_cursor_bo
= NULL
;
250 "%s: crtc_id=%d with handle %d and size %d to %d, bo_object %p\n",
252 amdgpu_crtc
->crtc_id
,
256 amdgpu_crtc
->cursor_bo
);
259 /* turn off cursor */
260 position
.enable
= false;
263 position
.hot_spot_enable
= false;
265 if (amdgpu_crtc
->stream
) {
266 /*set cursor visible false*/
267 dc_stream_set_cursor_position(
271 /*unpin old cursor buffer and update cache*/
272 ret
= dm_crtc_unpin_cursor_bo_old(amdgpu_crtc
);
277 if ((width
> amdgpu_crtc
->max_cursor_width
) ||
278 (height
> amdgpu_crtc
->max_cursor_height
)) {
280 "%s: bad cursor width or height %d x %d\n",
286 /*try to pin new cursor bo*/
287 ret
= dm_crtc_pin_cursor_bo_new(crtc
, file_priv
, handle
, &new_cursor_bo
);
288 /*if map not successful then return an error*/
292 /*program new cursor bo to hardware*/
293 dm_set_cursor(amdgpu_crtc
, amdgpu_crtc
->cursor_addr
, width
, height
);
295 /*un map old, not used anymore cursor bo ,
296 * return memory and mapping back */
297 dm_crtc_unpin_cursor_bo_old(amdgpu_crtc
);
299 /*assign new cursor bo to our internal cache*/
300 amdgpu_crtc
->cursor_bo
= &new_cursor_bo
->gem_base
;
307 static int dm_crtc_cursor_move(struct drm_crtc
*crtc
,
310 struct amdgpu_crtc
*amdgpu_crtc
= to_amdgpu_crtc(crtc
);
311 int xorigin
= 0, yorigin
= 0;
312 struct dc_cursor_position position
;
314 amdgpu_crtc
->cursor_x
= x
;
315 amdgpu_crtc
->cursor_y
= y
;
317 /* avivo cursor are offset into the total surface */
318 x
+= crtc
->primary
->state
->src_x
>> 16;
319 y
+= crtc
->primary
->state
->src_y
>> 16;
322 * TODO: for cursor debugging unguard the following
326 "%s: x %d y %d c->x %d c->y %d\n",
335 xorigin
= min(-x
, amdgpu_crtc
->max_cursor_width
- 1);
339 yorigin
= min(-y
, amdgpu_crtc
->max_cursor_height
- 1);
343 position
.enable
= true;
347 position
.hot_spot_enable
= true;
348 position
.x_hotspot
= xorigin
;
349 position
.y_hotspot
= yorigin
;
351 if (amdgpu_crtc
->stream
) {
352 if (!dc_stream_set_cursor_position(
355 DRM_ERROR("DC failed to set cursor position\n");
363 static void dm_crtc_cursor_reset(struct drm_crtc
*crtc
)
365 struct amdgpu_crtc
*amdgpu_crtc
= to_amdgpu_crtc(crtc
);
368 "%s: with cursor_bo %p\n",
370 amdgpu_crtc
->cursor_bo
);
372 if (amdgpu_crtc
->cursor_bo
&& amdgpu_crtc
->stream
) {
375 amdgpu_crtc
->cursor_addr
,
376 amdgpu_crtc
->cursor_width
,
377 amdgpu_crtc
->cursor_height
);
380 static bool fill_rects_from_plane_state(
381 const struct drm_plane_state
*state
,
382 struct dc_surface
*surface
)
384 surface
->src_rect
.x
= state
->src_x
>> 16;
385 surface
->src_rect
.y
= state
->src_y
>> 16;
386 /*we ignore for now mantissa and do not to deal with floating pixels :(*/
387 surface
->src_rect
.width
= state
->src_w
>> 16;
389 if (surface
->src_rect
.width
== 0)
392 surface
->src_rect
.height
= state
->src_h
>> 16;
393 if (surface
->src_rect
.height
== 0)
396 surface
->dst_rect
.x
= state
->crtc_x
;
397 surface
->dst_rect
.y
= state
->crtc_y
;
399 if (state
->crtc_w
== 0)
402 surface
->dst_rect
.width
= state
->crtc_w
;
404 if (state
->crtc_h
== 0)
407 surface
->dst_rect
.height
= state
->crtc_h
;
409 surface
->clip_rect
= surface
->dst_rect
;
411 switch (state
->rotation
& DRM_MODE_ROTATE_MASK
) {
412 case DRM_MODE_ROTATE_0
:
413 surface
->rotation
= ROTATION_ANGLE_0
;
415 case DRM_MODE_ROTATE_90
:
416 surface
->rotation
= ROTATION_ANGLE_90
;
418 case DRM_MODE_ROTATE_180
:
419 surface
->rotation
= ROTATION_ANGLE_180
;
421 case DRM_MODE_ROTATE_270
:
422 surface
->rotation
= ROTATION_ANGLE_270
;
425 surface
->rotation
= ROTATION_ANGLE_0
;
431 static bool get_fb_info(
432 const struct amdgpu_framebuffer
*amdgpu_fb
,
433 uint64_t *tiling_flags
,
434 uint64_t *fb_location
)
436 struct amdgpu_bo
*rbo
= gem_to_amdgpu_bo(amdgpu_fb
->obj
);
437 int r
= amdgpu_bo_reserve(rbo
, false);
438 if (unlikely(r
!= 0)){
439 DRM_ERROR("Unable to reserve buffer\n");
444 *fb_location
= amdgpu_bo_gpu_offset(rbo
);
447 amdgpu_bo_get_tiling_flags(rbo
, tiling_flags
);
449 amdgpu_bo_unreserve(rbo
);
453 static void fill_plane_attributes_from_fb(
454 struct amdgpu_device
*adev
,
455 struct dc_surface
*surface
,
456 const struct amdgpu_framebuffer
*amdgpu_fb
, bool addReq
)
458 uint64_t tiling_flags
;
459 uint64_t fb_location
= 0;
460 const struct drm_framebuffer
*fb
= &amdgpu_fb
->base
;
461 struct drm_format_name_buf format_name
;
466 addReq
== true ? &fb_location
:NULL
);
468 surface
->address
.type
= PLN_ADDR_TYPE_GRAPHICS
;
469 surface
->address
.grph
.addr
.low_part
= lower_32_bits(fb_location
);
470 surface
->address
.grph
.addr
.high_part
= upper_32_bits(fb_location
);
472 switch (fb
->format
->format
) {
474 surface
->format
= SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS
;
476 case DRM_FORMAT_RGB565
:
477 surface
->format
= SURFACE_PIXEL_FORMAT_GRPH_RGB565
;
479 case DRM_FORMAT_XRGB8888
:
480 case DRM_FORMAT_ARGB8888
:
481 surface
->format
= SURFACE_PIXEL_FORMAT_GRPH_ARGB8888
;
483 case DRM_FORMAT_XRGB2101010
:
484 case DRM_FORMAT_ARGB2101010
:
485 surface
->format
= SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010
;
487 case DRM_FORMAT_XBGR2101010
:
488 case DRM_FORMAT_ABGR2101010
:
489 surface
->format
= SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010
;
492 DRM_ERROR("Unsupported screen format %s\n",
493 drm_get_format_name(fb
->format
->format
, &format_name
));
497 memset(&surface
->tiling_info
, 0, sizeof(surface
->tiling_info
));
499 /* Fill GFX params */
500 if (AMDGPU_TILING_GET(tiling_flags
, ARRAY_MODE
) == DC_ARRAY_2D_TILED_THIN1
)
502 unsigned bankw
, bankh
, mtaspect
, tile_split
, num_banks
;
504 bankw
= AMDGPU_TILING_GET(tiling_flags
, BANK_WIDTH
);
505 bankh
= AMDGPU_TILING_GET(tiling_flags
, BANK_HEIGHT
);
506 mtaspect
= AMDGPU_TILING_GET(tiling_flags
, MACRO_TILE_ASPECT
);
507 tile_split
= AMDGPU_TILING_GET(tiling_flags
, TILE_SPLIT
);
508 num_banks
= AMDGPU_TILING_GET(tiling_flags
, NUM_BANKS
);
510 /* XXX fix me for VI */
511 surface
->tiling_info
.gfx8
.num_banks
= num_banks
;
512 surface
->tiling_info
.gfx8
.array_mode
=
513 DC_ARRAY_2D_TILED_THIN1
;
514 surface
->tiling_info
.gfx8
.tile_split
= tile_split
;
515 surface
->tiling_info
.gfx8
.bank_width
= bankw
;
516 surface
->tiling_info
.gfx8
.bank_height
= bankh
;
517 surface
->tiling_info
.gfx8
.tile_aspect
= mtaspect
;
518 surface
->tiling_info
.gfx8
.tile_mode
=
519 DC_ADDR_SURF_MICRO_TILING_DISPLAY
;
520 } else if (AMDGPU_TILING_GET(tiling_flags
, ARRAY_MODE
)
521 == DC_ARRAY_1D_TILED_THIN1
) {
522 surface
->tiling_info
.gfx8
.array_mode
= DC_ARRAY_1D_TILED_THIN1
;
525 surface
->tiling_info
.gfx8
.pipe_config
=
526 AMDGPU_TILING_GET(tiling_flags
, PIPE_CONFIG
);
528 if (adev
->asic_type
== CHIP_VEGA10
) {
529 /* Fill GFX9 params */
530 surface
->tiling_info
.gfx9
.num_pipes
=
531 adev
->gfx
.config
.gb_addr_config_fields
.num_pipes
;
532 surface
->tiling_info
.gfx9
.num_banks
=
533 adev
->gfx
.config
.gb_addr_config_fields
.num_banks
;
534 surface
->tiling_info
.gfx9
.pipe_interleave
=
535 adev
->gfx
.config
.gb_addr_config_fields
.pipe_interleave_size
;
536 surface
->tiling_info
.gfx9
.num_shader_engines
=
537 adev
->gfx
.config
.gb_addr_config_fields
.num_se
;
538 surface
->tiling_info
.gfx9
.max_compressed_frags
=
539 adev
->gfx
.config
.gb_addr_config_fields
.max_compress_frags
;
540 surface
->tiling_info
.gfx9
.num_rb_per_se
=
541 adev
->gfx
.config
.gb_addr_config_fields
.num_rb_per_se
;
542 surface
->tiling_info
.gfx9
.swizzle
=
543 AMDGPU_TILING_GET(tiling_flags
, SWIZZLE_MODE
);
544 surface
->tiling_info
.gfx9
.shaderEnable
= 1;
548 surface
->plane_size
.grph
.surface_size
.x
= 0;
549 surface
->plane_size
.grph
.surface_size
.y
= 0;
550 surface
->plane_size
.grph
.surface_size
.width
= fb
->width
;
551 surface
->plane_size
.grph
.surface_size
.height
= fb
->height
;
552 surface
->plane_size
.grph
.surface_pitch
=
553 fb
->pitches
[0] / fb
->format
->cpp
[0];
555 surface
->visible
= true;
556 surface
->scaling_quality
.h_taps_c
= 0;
557 surface
->scaling_quality
.v_taps_c
= 0;
559 /* TODO: unhardcode */
560 surface
->color_space
= COLOR_SPACE_SRGB
;
561 /* is this needed? is surface zeroed at allocation? */
562 surface
->scaling_quality
.h_taps
= 0;
563 surface
->scaling_quality
.v_taps
= 0;
564 surface
->stereo_format
= PLANE_STEREO_FORMAT_NONE
;
568 #define NUM_OF_RAW_GAMMA_RAMP_RGB_256 256
570 static void fill_gamma_from_crtc(
571 const struct drm_crtc
*crtc
,
572 struct dc_surface
*dc_surface
)
575 struct dc_gamma
*gamma
;
576 struct drm_crtc_state
*state
= crtc
->state
;
577 struct drm_color_lut
*lut
= (struct drm_color_lut
*) state
->gamma_lut
->data
;
579 gamma
= dc_create_gamma();
584 for (i
= 0; i
< NUM_OF_RAW_GAMMA_RAMP_RGB_256
; i
++) {
585 gamma
->red
[i
] = lut
[i
].red
;
586 gamma
->green
[i
] = lut
[i
].green
;
587 gamma
->blue
[i
] = lut
[i
].blue
;
590 dc_surface
->gamma_correction
= gamma
;
593 static void fill_plane_attributes(
594 struct amdgpu_device
*adev
,
595 struct dc_surface
*surface
,
596 struct drm_plane_state
*state
, bool addrReq
)
598 const struct amdgpu_framebuffer
*amdgpu_fb
=
599 to_amdgpu_framebuffer(state
->fb
);
600 const struct drm_crtc
*crtc
= state
->crtc
;
601 struct dc_transfer_func
*input_tf
;
603 fill_rects_from_plane_state(state
, surface
);
604 fill_plane_attributes_from_fb(
605 crtc
->dev
->dev_private
,
610 input_tf
= dc_create_transfer_func();
612 if (input_tf
== NULL
)
615 input_tf
->type
= TF_TYPE_PREDEFINED
;
616 input_tf
->tf
= TRANSFER_FUNCTION_SRGB
;
618 surface
->in_transfer_func
= input_tf
;
620 /* In case of gamma set, update gamma value */
621 if (state
->crtc
->state
->gamma_lut
) {
622 fill_gamma_from_crtc(crtc
, surface
);
626 /*****************************************************************************/
628 struct amdgpu_connector
*aconnector_from_drm_crtc_id(
629 const struct drm_crtc
*crtc
)
631 struct drm_device
*dev
= crtc
->dev
;
632 struct drm_connector
*connector
;
633 struct amdgpu_crtc
*acrtc
= to_amdgpu_crtc(crtc
);
634 struct amdgpu_connector
*aconnector
;
636 list_for_each_entry(connector
,
637 &dev
->mode_config
.connector_list
, head
) {
639 aconnector
= to_amdgpu_connector(connector
);
641 if (aconnector
->base
.state
->crtc
!= &acrtc
->base
)
644 /* Found the connector */
648 /* If we get here, not found. */
652 static void update_stream_scaling_settings(
653 const struct drm_display_mode
*mode
,
654 const struct dm_connector_state
*dm_state
,
655 const struct dc_stream
*stream
)
657 struct amdgpu_device
*adev
= dm_state
->base
.crtc
->dev
->dev_private
;
658 enum amdgpu_rmx_type rmx_type
;
660 struct rect src
= { 0 }; /* viewport in composition space*/
661 struct rect dst
= { 0 }; /* stream addressable area */
663 /* no mode. nothing to be done */
667 /* Full screen scaling by default */
668 src
.width
= mode
->hdisplay
;
669 src
.height
= mode
->vdisplay
;
670 dst
.width
= stream
->timing
.h_addressable
;
671 dst
.height
= stream
->timing
.v_addressable
;
673 rmx_type
= dm_state
->scaling
;
674 if (rmx_type
== RMX_ASPECT
|| rmx_type
== RMX_OFF
) {
675 if (src
.width
* dst
.height
<
676 src
.height
* dst
.width
) {
677 /* height needs less upscaling/more downscaling */
678 dst
.width
= src
.width
*
679 dst
.height
/ src
.height
;
681 /* width needs less upscaling/more downscaling */
682 dst
.height
= src
.height
*
683 dst
.width
/ src
.width
;
685 } else if (rmx_type
== RMX_CENTER
) {
689 dst
.x
= (stream
->timing
.h_addressable
- dst
.width
) / 2;
690 dst
.y
= (stream
->timing
.v_addressable
- dst
.height
) / 2;
692 if (dm_state
->underscan_enable
) {
693 dst
.x
+= dm_state
->underscan_hborder
/ 2;
694 dst
.y
+= dm_state
->underscan_vborder
/ 2;
695 dst
.width
-= dm_state
->underscan_hborder
;
696 dst
.height
-= dm_state
->underscan_vborder
;
699 adev
->dm
.dc
->stream_funcs
.stream_update_scaling(adev
->dm
.dc
, stream
, &src
, &dst
);
701 DRM_DEBUG_KMS("Destination Rectangle x:%d y:%d width:%d height:%d\n",
702 dst
.x
, dst
.y
, dst
.width
, dst
.height
);
706 static void dm_dc_surface_commit(
708 struct drm_crtc
*crtc
)
710 struct dc_surface
*dc_surface
;
711 const struct dc_surface
*dc_surfaces
[1];
712 struct amdgpu_crtc
*acrtc
= to_amdgpu_crtc(crtc
);
713 const struct dc_stream
*dc_stream
= acrtc
->stream
;
716 spin_lock_irqsave(&crtc
->dev
->event_lock
, flags
);
717 if (acrtc
->pflip_status
!= AMDGPU_FLIP_NONE
) {
718 DRM_ERROR("dm_dc_surface_commit: acrtc %d, already busy\n", acrtc
->crtc_id
);
719 spin_unlock_irqrestore(&crtc
->dev
->event_lock
, flags
);
720 /* In comit tail framework this cannot happen */
723 spin_unlock_irqrestore(&crtc
->dev
->event_lock
, flags
);
727 "%s: Failed to obtain stream on crtc (%d)!\n",
733 dc_surface
= dc_create_surface(dc
);
737 "%s: Failed to create a surface!\n",
742 /* Surface programming */
743 fill_plane_attributes(
744 crtc
->dev
->dev_private
,
746 crtc
->primary
->state
,
749 dc_surfaces
[0] = dc_surface
;
751 if (false == dc_commit_surfaces_to_stream(
757 "%s: Failed to attach surface!\n",
761 dc_surface_release(dc_surface
);
766 static enum dc_color_depth
convert_color_depth_from_display_info(
767 const struct drm_connector
*connector
)
769 uint32_t bpc
= connector
->display_info
.bpc
;
771 /* Limited color depth to 8bit
772 * TODO: Still need to handle deep color*/
778 /* Temporary Work around, DRM don't parse color depth for
779 * EDID revision before 1.4
780 * TODO: Fix edid parsing
782 return COLOR_DEPTH_888
;
784 return COLOR_DEPTH_666
;
786 return COLOR_DEPTH_888
;
788 return COLOR_DEPTH_101010
;
790 return COLOR_DEPTH_121212
;
792 return COLOR_DEPTH_141414
;
794 return COLOR_DEPTH_161616
;
796 return COLOR_DEPTH_UNDEFINED
;
800 static enum dc_aspect_ratio
get_aspect_ratio(
801 const struct drm_display_mode
*mode_in
)
803 int32_t width
= mode_in
->crtc_hdisplay
* 9;
804 int32_t height
= mode_in
->crtc_vdisplay
* 16;
805 if ((width
- height
) < 10 && (width
- height
) > -10)
806 return ASPECT_RATIO_16_9
;
808 return ASPECT_RATIO_4_3
;
811 static enum dc_color_space
get_output_color_space(
812 const struct dc_crtc_timing
*dc_crtc_timing
)
814 enum dc_color_space color_space
= COLOR_SPACE_SRGB
;
816 switch (dc_crtc_timing
->pixel_encoding
) {
817 case PIXEL_ENCODING_YCBCR422
:
818 case PIXEL_ENCODING_YCBCR444
:
819 case PIXEL_ENCODING_YCBCR420
:
822 * 27030khz is the separation point between HDTV and SDTV
823 * according to HDMI spec, we use YCbCr709 and YCbCr601
826 if (dc_crtc_timing
->pix_clk_khz
> 27030) {
827 if (dc_crtc_timing
->flags
.Y_ONLY
)
829 COLOR_SPACE_YCBCR709_LIMITED
;
831 color_space
= COLOR_SPACE_YCBCR709
;
833 if (dc_crtc_timing
->flags
.Y_ONLY
)
835 COLOR_SPACE_YCBCR601_LIMITED
;
837 color_space
= COLOR_SPACE_YCBCR601
;
842 case PIXEL_ENCODING_RGB
:
843 color_space
= COLOR_SPACE_SRGB
;
854 /*****************************************************************************/
856 static void fill_stream_properties_from_drm_display_mode(
857 struct dc_stream
*stream
,
858 const struct drm_display_mode
*mode_in
,
859 const struct drm_connector
*connector
)
861 struct dc_crtc_timing
*timing_out
= &stream
->timing
;
862 memset(timing_out
, 0, sizeof(struct dc_crtc_timing
));
864 timing_out
->h_border_left
= 0;
865 timing_out
->h_border_right
= 0;
866 timing_out
->v_border_top
= 0;
867 timing_out
->v_border_bottom
= 0;
868 /* TODO: un-hardcode */
870 if ((connector
->display_info
.color_formats
& DRM_COLOR_FORMAT_YCRCB444
)
871 && stream
->sink
->sink_signal
== SIGNAL_TYPE_HDMI_TYPE_A
)
872 timing_out
->pixel_encoding
= PIXEL_ENCODING_YCBCR444
;
874 timing_out
->pixel_encoding
= PIXEL_ENCODING_RGB
;
876 timing_out
->timing_3d_format
= TIMING_3D_FORMAT_NONE
;
877 timing_out
->display_color_depth
= convert_color_depth_from_display_info(
879 timing_out
->scan_type
= SCANNING_TYPE_NODATA
;
880 timing_out
->hdmi_vic
= 0;
881 timing_out
->vic
= drm_match_cea_mode(mode_in
);
883 timing_out
->h_addressable
= mode_in
->crtc_hdisplay
;
884 timing_out
->h_total
= mode_in
->crtc_htotal
;
885 timing_out
->h_sync_width
=
886 mode_in
->crtc_hsync_end
- mode_in
->crtc_hsync_start
;
887 timing_out
->h_front_porch
=
888 mode_in
->crtc_hsync_start
- mode_in
->crtc_hdisplay
;
889 timing_out
->v_total
= mode_in
->crtc_vtotal
;
890 timing_out
->v_addressable
= mode_in
->crtc_vdisplay
;
891 timing_out
->v_front_porch
=
892 mode_in
->crtc_vsync_start
- mode_in
->crtc_vdisplay
;
893 timing_out
->v_sync_width
=
894 mode_in
->crtc_vsync_end
- mode_in
->crtc_vsync_start
;
895 timing_out
->pix_clk_khz
= mode_in
->crtc_clock
;
896 timing_out
->aspect_ratio
= get_aspect_ratio(mode_in
);
897 if (mode_in
->flags
& DRM_MODE_FLAG_PHSYNC
)
898 timing_out
->flags
.HSYNC_POSITIVE_POLARITY
= 1;
899 if (mode_in
->flags
& DRM_MODE_FLAG_PVSYNC
)
900 timing_out
->flags
.VSYNC_POSITIVE_POLARITY
= 1;
902 stream
->output_color_space
= get_output_color_space(timing_out
);
905 struct dc_transfer_func
*tf
= dc_create_transfer_func();
906 tf
->type
= TF_TYPE_PREDEFINED
;
907 tf
->tf
= TRANSFER_FUNCTION_SRGB
;
908 stream
->out_transfer_func
= tf
;
912 static void fill_audio_info(
913 struct audio_info
*audio_info
,
914 const struct drm_connector
*drm_connector
,
915 const struct dc_sink
*dc_sink
)
918 int cea_revision
= 0;
919 const struct dc_edid_caps
*edid_caps
= &dc_sink
->edid_caps
;
921 audio_info
->manufacture_id
= edid_caps
->manufacturer_id
;
922 audio_info
->product_id
= edid_caps
->product_id
;
924 cea_revision
= drm_connector
->display_info
.cea_rev
;
926 while (i
< AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS
&&
927 edid_caps
->display_name
[i
]) {
928 audio_info
->display_name
[i
] = edid_caps
->display_name
[i
];
932 if(cea_revision
>= 3) {
933 audio_info
->mode_count
= edid_caps
->audio_mode_count
;
935 for (i
= 0; i
< audio_info
->mode_count
; ++i
) {
936 audio_info
->modes
[i
].format_code
=
937 (enum audio_format_code
)
938 (edid_caps
->audio_modes
[i
].format_code
);
939 audio_info
->modes
[i
].channel_count
=
940 edid_caps
->audio_modes
[i
].channel_count
;
941 audio_info
->modes
[i
].sample_rates
.all
=
942 edid_caps
->audio_modes
[i
].sample_rate
;
943 audio_info
->modes
[i
].sample_size
=
944 edid_caps
->audio_modes
[i
].sample_size
;
948 audio_info
->flags
.all
= edid_caps
->speaker_flags
;
950 /* TODO: We only check for the progressive mode, check for interlace mode too */
951 if(drm_connector
->latency_present
[0]) {
952 audio_info
->video_latency
= drm_connector
->video_latency
[0];
953 audio_info
->audio_latency
= drm_connector
->audio_latency
[0];
956 /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
960 static void copy_crtc_timing_for_drm_display_mode(
961 const struct drm_display_mode
*src_mode
,
962 struct drm_display_mode
*dst_mode
)
964 dst_mode
->crtc_hdisplay
= src_mode
->crtc_hdisplay
;
965 dst_mode
->crtc_vdisplay
= src_mode
->crtc_vdisplay
;
966 dst_mode
->crtc_clock
= src_mode
->crtc_clock
;
967 dst_mode
->crtc_hblank_start
= src_mode
->crtc_hblank_start
;
968 dst_mode
->crtc_hblank_end
= src_mode
->crtc_hblank_end
;
969 dst_mode
->crtc_hsync_start
= src_mode
->crtc_hsync_start
;
970 dst_mode
->crtc_hsync_end
= src_mode
->crtc_hsync_end
;
971 dst_mode
->crtc_htotal
= src_mode
->crtc_htotal
;
972 dst_mode
->crtc_hskew
= src_mode
->crtc_hskew
;
973 dst_mode
->crtc_vblank_start
= src_mode
->crtc_vblank_start
;
974 dst_mode
->crtc_vblank_end
= src_mode
->crtc_vblank_end
;
975 dst_mode
->crtc_vsync_start
= src_mode
->crtc_vsync_start
;
976 dst_mode
->crtc_vsync_end
= src_mode
->crtc_vsync_end
;
977 dst_mode
->crtc_vtotal
= src_mode
->crtc_vtotal
;
980 static void decide_crtc_timing_for_drm_display_mode(
981 struct drm_display_mode
*drm_mode
,
982 const struct drm_display_mode
*native_mode
,
986 copy_crtc_timing_for_drm_display_mode(native_mode
, drm_mode
);
987 } else if (native_mode
->clock
== drm_mode
->clock
&&
988 native_mode
->htotal
== drm_mode
->htotal
&&
989 native_mode
->vtotal
== drm_mode
->vtotal
) {
990 copy_crtc_timing_for_drm_display_mode(native_mode
, drm_mode
);
992 /* no scaling nor amdgpu inserted, no need to patch */
996 static struct dc_stream
*create_stream_for_sink(
997 struct amdgpu_connector
*aconnector
,
998 const struct drm_display_mode
*drm_mode
,
999 const struct dm_connector_state
*dm_state
)
1001 struct drm_display_mode
*preferred_mode
= NULL
;
1002 const struct drm_connector
*drm_connector
;
1003 struct dc_stream
*stream
= NULL
;
1004 struct drm_display_mode mode
= *drm_mode
;
1005 bool native_mode_found
= false;
1007 if (NULL
== aconnector
) {
1008 DRM_ERROR("aconnector is NULL!\n");
1009 goto drm_connector_null
;
1012 if (NULL
== dm_state
) {
1013 DRM_ERROR("dm_state is NULL!\n");
1017 drm_connector
= &aconnector
->base
;
1018 stream
= dc_create_stream_for_sink(aconnector
->dc_sink
);
1020 if (NULL
== stream
) {
1021 DRM_ERROR("Failed to create stream for sink!\n");
1022 goto stream_create_fail
;
1025 list_for_each_entry(preferred_mode
, &aconnector
->base
.modes
, head
) {
1026 /* Search for preferred mode */
1027 if (preferred_mode
->type
& DRM_MODE_TYPE_PREFERRED
) {
1028 native_mode_found
= true;
1032 if (!native_mode_found
)
1033 preferred_mode
= list_first_entry_or_null(
1034 &aconnector
->base
.modes
,
1035 struct drm_display_mode
,
1038 if (NULL
== preferred_mode
) {
1039 /* This may not be an error, the use case is when we we have no
1040 * usermode calls to reset and set mode upon hotplug. In this
1041 * case, we call set mode ourselves to restore the previous mode
1042 * and the modelist may not be filled in in time.
1044 DRM_INFO("No preferred mode found\n");
1046 decide_crtc_timing_for_drm_display_mode(
1047 &mode
, preferred_mode
,
1048 dm_state
->scaling
!= RMX_OFF
);
1051 fill_stream_properties_from_drm_display_mode(stream
,
1052 &mode
, &aconnector
->base
);
1053 update_stream_scaling_settings(&mode
, dm_state
, stream
);
1056 &stream
->audio_info
,
1058 aconnector
->dc_sink
);
1066 void amdgpu_dm_crtc_destroy(struct drm_crtc
*crtc
)
1068 drm_crtc_cleanup(crtc
);
1072 /* Implemented only the options currently availible for the driver */
1073 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs
= {
1074 .reset
= drm_atomic_helper_crtc_reset
,
1075 .cursor_set
= dm_crtc_cursor_set
,
1076 .cursor_move
= dm_crtc_cursor_move
,
1077 .destroy
= amdgpu_dm_crtc_destroy
,
1078 .gamma_set
= drm_atomic_helper_legacy_gamma_set
,
1079 .set_config
= drm_atomic_helper_set_config
,
1080 .page_flip
= drm_atomic_helper_page_flip
,
1081 .atomic_duplicate_state
= drm_atomic_helper_crtc_duplicate_state
,
1082 .atomic_destroy_state
= drm_atomic_helper_crtc_destroy_state
,
1085 static enum drm_connector_status
1086 amdgpu_dm_connector_detect(struct drm_connector
*connector
, bool force
)
1089 struct amdgpu_connector
*aconnector
= to_amdgpu_connector(connector
);
1092 * 1. This interface is NOT called in context of HPD irq.
1093 * 2. This interface *is called* in context of user-mode ioctl. Which
1094 * makes it a bad place for *any* MST-related activit. */
1096 if (aconnector
->base
.force
== DRM_FORCE_UNSPECIFIED
)
1097 connected
= (aconnector
->dc_sink
!= NULL
);
1099 connected
= (aconnector
->base
.force
== DRM_FORCE_ON
);
1101 return (connected
? connector_status_connected
:
1102 connector_status_disconnected
);
1105 int amdgpu_dm_connector_atomic_set_property(
1106 struct drm_connector
*connector
,
1107 struct drm_connector_state
*connector_state
,
1108 struct drm_property
*property
,
1111 struct drm_device
*dev
= connector
->dev
;
1112 struct amdgpu_device
*adev
= dev
->dev_private
;
1113 struct dm_connector_state
*dm_old_state
=
1114 to_dm_connector_state(connector
->state
);
1115 struct dm_connector_state
*dm_new_state
=
1116 to_dm_connector_state(connector_state
);
1118 struct drm_crtc_state
*new_crtc_state
;
1119 struct drm_crtc
*crtc
;
1123 if (property
== dev
->mode_config
.scaling_mode_property
) {
1124 enum amdgpu_rmx_type rmx_type
;
1127 case DRM_MODE_SCALE_CENTER
:
1128 rmx_type
= RMX_CENTER
;
1130 case DRM_MODE_SCALE_ASPECT
:
1131 rmx_type
= RMX_ASPECT
;
1133 case DRM_MODE_SCALE_FULLSCREEN
:
1134 rmx_type
= RMX_FULL
;
1136 case DRM_MODE_SCALE_NONE
:
1142 if (dm_old_state
->scaling
== rmx_type
)
1145 dm_new_state
->scaling
= rmx_type
;
1147 } else if (property
== adev
->mode_info
.underscan_hborder_property
) {
1148 dm_new_state
->underscan_hborder
= val
;
1150 } else if (property
== adev
->mode_info
.underscan_vborder_property
) {
1151 dm_new_state
->underscan_vborder
= val
;
1153 } else if (property
== adev
->mode_info
.underscan_property
) {
1154 dm_new_state
->underscan_enable
= val
;
1158 for_each_crtc_in_state(
1159 connector_state
->state
,
1164 if (crtc
== connector_state
->crtc
) {
1165 struct drm_plane_state
*plane_state
;
1168 * Bit of magic done here. We need to ensure
1169 * that planes get update after mode is set.
1170 * So, we need to add primary plane to state,
1171 * and this way atomic_update would be called
1175 drm_atomic_get_plane_state(
1176 connector_state
->state
,
1187 int amdgpu_dm_connector_atomic_get_property(
1188 struct drm_connector
*connector
,
1189 const struct drm_connector_state
*state
,
1190 struct drm_property
*property
,
1193 struct drm_device
*dev
= connector
->dev
;
1194 struct amdgpu_device
*adev
= dev
->dev_private
;
1195 struct dm_connector_state
*dm_state
=
1196 to_dm_connector_state(state
);
1199 if (property
== dev
->mode_config
.scaling_mode_property
) {
1200 switch (dm_state
->scaling
) {
1202 *val
= DRM_MODE_SCALE_CENTER
;
1205 *val
= DRM_MODE_SCALE_ASPECT
;
1208 *val
= DRM_MODE_SCALE_FULLSCREEN
;
1212 *val
= DRM_MODE_SCALE_NONE
;
1216 } else if (property
== adev
->mode_info
.underscan_hborder_property
) {
1217 *val
= dm_state
->underscan_hborder
;
1219 } else if (property
== adev
->mode_info
.underscan_vborder_property
) {
1220 *val
= dm_state
->underscan_vborder
;
1222 } else if (property
== adev
->mode_info
.underscan_property
) {
1223 *val
= dm_state
->underscan_enable
;
1229 void amdgpu_dm_connector_destroy(struct drm_connector
*connector
)
1231 struct amdgpu_connector
*aconnector
= to_amdgpu_connector(connector
);
1232 const struct dc_link
*link
= aconnector
->dc_link
;
1233 struct amdgpu_device
*adev
= connector
->dev
->dev_private
;
1234 struct amdgpu_display_manager
*dm
= &adev
->dm
;
1235 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
1236 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
1238 if (link
->connector_signal
& (SIGNAL_TYPE_EDP
| SIGNAL_TYPE_LVDS
)) {
1239 amdgpu_dm_register_backlight_device(dm
);
1241 if (dm
->backlight_dev
) {
1242 backlight_device_unregister(dm
->backlight_dev
);
1243 dm
->backlight_dev
= NULL
;
1248 drm_connector_unregister(connector
);
1249 drm_connector_cleanup(connector
);
1253 void amdgpu_dm_connector_funcs_reset(struct drm_connector
*connector
)
1255 struct dm_connector_state
*state
=
1256 to_dm_connector_state(connector
->state
);
1260 state
= kzalloc(sizeof(*state
), GFP_KERNEL
);
1263 state
->scaling
= RMX_OFF
;
1264 state
->underscan_enable
= false;
1265 state
->underscan_hborder
= 0;
1266 state
->underscan_vborder
= 0;
1268 connector
->state
= &state
->base
;
1269 connector
->state
->connector
= connector
;
1273 struct drm_connector_state
*amdgpu_dm_connector_atomic_duplicate_state(
1274 struct drm_connector
*connector
)
1276 struct dm_connector_state
*state
=
1277 to_dm_connector_state(connector
->state
);
1279 struct dm_connector_state
*new_state
=
1280 kmemdup(state
, sizeof(*state
), GFP_KERNEL
);
1283 __drm_atomic_helper_connector_duplicate_state(connector
,
1285 return &new_state
->base
;
1291 static const struct drm_connector_funcs amdgpu_dm_connector_funcs
= {
1292 .reset
= amdgpu_dm_connector_funcs_reset
,
1293 .detect
= amdgpu_dm_connector_detect
,
1294 .fill_modes
= drm_helper_probe_single_connector_modes
,
1295 .destroy
= amdgpu_dm_connector_destroy
,
1296 .atomic_duplicate_state
= amdgpu_dm_connector_atomic_duplicate_state
,
1297 .atomic_destroy_state
= drm_atomic_helper_connector_destroy_state
,
1298 .atomic_set_property
= amdgpu_dm_connector_atomic_set_property
,
1299 .atomic_get_property
= amdgpu_dm_connector_atomic_get_property
1302 static struct drm_encoder
*best_encoder(struct drm_connector
*connector
)
1304 int enc_id
= connector
->encoder_ids
[0];
1305 struct drm_mode_object
*obj
;
1306 struct drm_encoder
*encoder
;
1308 DRM_DEBUG_KMS("Finding the best encoder\n");
1310 /* pick the encoder ids */
1312 obj
= drm_mode_object_find(connector
->dev
, enc_id
, DRM_MODE_OBJECT_ENCODER
);
1314 DRM_ERROR("Couldn't find a matching encoder for our connector\n");
1317 encoder
= obj_to_encoder(obj
);
1320 DRM_ERROR("No encoder id\n");
1324 static int get_modes(struct drm_connector
*connector
)
1326 return amdgpu_dm_connector_get_modes(connector
);
1329 static void create_eml_sink(struct amdgpu_connector
*aconnector
)
1331 struct dc_sink_init_data init_params
= {
1332 .link
= aconnector
->dc_link
,
1333 .sink_signal
= SIGNAL_TYPE_VIRTUAL
1335 struct edid
*edid
= (struct edid
*) aconnector
->base
.edid_blob_ptr
->data
;
1337 if (!aconnector
->base
.edid_blob_ptr
||
1338 !aconnector
->base
.edid_blob_ptr
->data
) {
1339 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
1340 aconnector
->base
.name
);
1342 aconnector
->base
.force
= DRM_FORCE_OFF
;
1343 aconnector
->base
.override_edid
= false;
1347 aconnector
->edid
= edid
;
1349 aconnector
->dc_em_sink
= dc_link_add_remote_sink(
1350 aconnector
->dc_link
,
1352 (edid
->extensions
+ 1) * EDID_LENGTH
,
1355 if (aconnector
->base
.force
1357 aconnector
->dc_sink
= aconnector
->dc_link
->local_sink
?
1358 aconnector
->dc_link
->local_sink
:
1359 aconnector
->dc_em_sink
;
1362 static void handle_edid_mgmt(struct amdgpu_connector
*aconnector
)
1364 struct dc_link
*link
= (struct dc_link
*)aconnector
->dc_link
;
1366 /* In case of headless boot with force on for DP managed connector
1367 * Those settings have to be != 0 to get initial modeset
1369 if (link
->connector_signal
== SIGNAL_TYPE_DISPLAY_PORT
) {
1370 link
->verified_link_cap
.lane_count
= LANE_COUNT_FOUR
;
1371 link
->verified_link_cap
.link_rate
= LINK_RATE_HIGH2
;
1375 aconnector
->base
.override_edid
= true;
1376 create_eml_sink(aconnector
);
1379 int amdgpu_dm_connector_mode_valid(
1380 struct drm_connector
*connector
,
1381 struct drm_display_mode
*mode
)
1383 int result
= MODE_ERROR
;
1384 const struct dc_sink
*dc_sink
;
1385 struct amdgpu_device
*adev
= connector
->dev
->dev_private
;
1386 struct dc_validation_set val_set
= { 0 };
1387 /* TODO: Unhardcode stream count */
1388 struct dc_stream
*stream
;
1389 struct amdgpu_connector
*aconnector
= to_amdgpu_connector(connector
);
1391 if ((mode
->flags
& DRM_MODE_FLAG_INTERLACE
) ||
1392 (mode
->flags
& DRM_MODE_FLAG_DBLSCAN
))
1395 /* Only run this the first time mode_valid is called to initilialize
1398 if (aconnector
->base
.force
!= DRM_FORCE_UNSPECIFIED
&&
1399 !aconnector
->dc_em_sink
)
1400 handle_edid_mgmt(aconnector
);
1402 dc_sink
= to_amdgpu_connector(connector
)->dc_sink
;
1404 if (NULL
== dc_sink
) {
1405 DRM_ERROR("dc_sink is NULL!\n");
1409 stream
= dc_create_stream_for_sink(dc_sink
);
1410 if (NULL
== stream
) {
1411 DRM_ERROR("Failed to create stream for sink!\n");
1412 goto stream_create_fail
;
1415 drm_mode_set_crtcinfo(mode
, 0);
1416 fill_stream_properties_from_drm_display_mode(stream
, mode
, connector
);
1418 val_set
.stream
= stream
;
1419 val_set
.surface_count
= 0;
1420 stream
->src
.width
= mode
->hdisplay
;
1421 stream
->src
.height
= mode
->vdisplay
;
1422 stream
->dst
= stream
->src
;
1424 if (dc_validate_resources(adev
->dm
.dc
, &val_set
, 1))
1427 dc_stream_release(stream
);
1431 /* TODO: error handling*/
1435 static const struct drm_connector_helper_funcs
1436 amdgpu_dm_connector_helper_funcs
= {
1438 * If hotplug a second bigger display in FB Con mode, bigger resolution
1439 * modes will be filtered by drm_mode_validate_size(), and those modes
1440 * is missing after user start lightdm. So we need to renew modes list.
1441 * in get_modes call back, not just return the modes count
1443 .get_modes
= get_modes
,
1444 .mode_valid
= amdgpu_dm_connector_mode_valid
,
1445 .best_encoder
= best_encoder
1448 static void dm_crtc_helper_disable(struct drm_crtc
*crtc
)
1452 static int dm_crtc_helper_atomic_check(
1453 struct drm_crtc
*crtc
,
1454 struct drm_crtc_state
*state
)
1459 static bool dm_crtc_helper_mode_fixup(
1460 struct drm_crtc
*crtc
,
1461 const struct drm_display_mode
*mode
,
1462 struct drm_display_mode
*adjusted_mode
)
1467 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs
= {
1468 .disable
= dm_crtc_helper_disable
,
1469 .atomic_check
= dm_crtc_helper_atomic_check
,
1470 .mode_fixup
= dm_crtc_helper_mode_fixup
1473 static void dm_encoder_helper_disable(struct drm_encoder
*encoder
)
1478 static int dm_encoder_helper_atomic_check(
1479 struct drm_encoder
*encoder
,
1480 struct drm_crtc_state
*crtc_state
,
1481 struct drm_connector_state
*conn_state
)
1486 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs
= {
1487 .disable
= dm_encoder_helper_disable
,
1488 .atomic_check
= dm_encoder_helper_atomic_check
1491 static void dm_drm_plane_reset(struct drm_plane
*plane
)
1493 struct amdgpu_drm_plane_state
*amdgpu_state
;
1496 amdgpu_state
= to_amdgpu_plane_state(plane
->state
);
1497 if (amdgpu_state
->base
.fb
)
1498 drm_framebuffer_unreference(amdgpu_state
->base
.fb
);
1499 kfree(amdgpu_state
);
1500 plane
->state
= NULL
;
1503 amdgpu_state
= kzalloc(sizeof(*amdgpu_state
), GFP_KERNEL
);
1505 plane
->state
= &amdgpu_state
->base
;
1506 plane
->state
->plane
= plane
;
1510 static struct drm_plane_state
*
1511 dm_drm_plane_duplicate_state(struct drm_plane
*plane
)
1513 struct amdgpu_drm_plane_state
*amdgpu_state
;
1514 struct amdgpu_drm_plane_state
*copy
;
1516 amdgpu_state
= to_amdgpu_plane_state(plane
->state
);
1517 copy
= kzalloc(sizeof(*amdgpu_state
), GFP_KERNEL
);
1521 __drm_atomic_helper_plane_duplicate_state(plane
, ©
->base
);
1525 static void dm_drm_plane_destroy_state(struct drm_plane
*plane
,
1526 struct drm_plane_state
*old_state
)
1528 struct amdgpu_drm_plane_state
*old_amdgpu_state
=
1529 to_amdgpu_plane_state(old_state
);
1530 __drm_atomic_helper_plane_destroy_state(old_state
);
1531 kfree(old_amdgpu_state
);
1534 static const struct drm_plane_funcs dm_plane_funcs
= {
1535 .update_plane
= drm_atomic_helper_update_plane
,
1536 .disable_plane
= drm_atomic_helper_disable_plane
,
1537 .destroy
= drm_plane_cleanup
,
1538 .reset
= dm_drm_plane_reset
,
1539 .atomic_duplicate_state
= dm_drm_plane_duplicate_state
,
1540 .atomic_destroy_state
= dm_drm_plane_destroy_state
,
1543 static int dm_plane_helper_prepare_fb(
1544 struct drm_plane
*plane
,
1545 struct drm_plane_state
*new_state
)
1547 struct amdgpu_framebuffer
*afb
;
1548 struct drm_gem_object
*obj
;
1549 struct amdgpu_bo
*rbo
;
1552 if (!new_state
->fb
) {
1553 DRM_DEBUG_KMS("No FB bound\n");
1557 afb
= to_amdgpu_framebuffer(new_state
->fb
);
1560 rbo
= gem_to_amdgpu_bo(obj
);
1561 r
= amdgpu_bo_reserve(rbo
, false);
1562 if (unlikely(r
!= 0))
1565 r
= amdgpu_bo_pin(rbo
, AMDGPU_GEM_DOMAIN_VRAM
, &afb
->address
);
1567 amdgpu_bo_unreserve(rbo
);
1569 if (unlikely(r
!= 0)) {
1570 DRM_ERROR("Failed to pin framebuffer\n");
1578 static void dm_plane_helper_cleanup_fb(
1579 struct drm_plane
*plane
,
1580 struct drm_plane_state
*old_state
)
1582 struct amdgpu_bo
*rbo
;
1583 struct amdgpu_framebuffer
*afb
;
1589 afb
= to_amdgpu_framebuffer(old_state
->fb
);
1590 rbo
= gem_to_amdgpu_bo(afb
->obj
);
1591 r
= amdgpu_bo_reserve(rbo
, false);
1593 DRM_ERROR("failed to reserve rbo before unpin\n");
1596 amdgpu_bo_unpin(rbo
);
1597 amdgpu_bo_unreserve(rbo
);
1598 amdgpu_bo_unref(&rbo
);
1604 int dm_create_validation_set_for_connector(struct drm_connector
*connector
,
1605 struct drm_display_mode
*mode
, struct dc_validation_set
*val_set
)
1607 int result
= MODE_ERROR
;
1608 const struct dc_sink
*dc_sink
=
1609 to_amdgpu_connector(connector
)->dc_sink
;
1610 /* TODO: Unhardcode stream count */
1611 struct dc_stream
*stream
;
1613 if ((mode
->flags
& DRM_MODE_FLAG_INTERLACE
) ||
1614 (mode
->flags
& DRM_MODE_FLAG_DBLSCAN
))
1617 if (NULL
== dc_sink
) {
1618 DRM_ERROR("dc_sink is NULL!\n");
1622 stream
= dc_create_stream_for_sink(dc_sink
);
1624 if (NULL
== stream
) {
1625 DRM_ERROR("Failed to create stream for sink!\n");
1629 drm_mode_set_crtcinfo(mode
, 0);
1631 fill_stream_properties_from_drm_display_mode(stream
, mode
, connector
);
1633 val_set
->stream
= stream
;
1635 stream
->src
.width
= mode
->hdisplay
;
1636 stream
->src
.height
= mode
->vdisplay
;
1637 stream
->dst
= stream
->src
;
1642 static const struct drm_plane_helper_funcs dm_plane_helper_funcs
= {
1643 .prepare_fb
= dm_plane_helper_prepare_fb
,
1644 .cleanup_fb
= dm_plane_helper_cleanup_fb
,
1648 * TODO: these are currently initialized to rgb formats only.
1649 * For future use cases we should either initialize them dynamically based on
1650 * plane capabilities, or initialize this array to all formats, so internal drm
1651 * check will succeed, and let DC to implement proper check
1653 static uint32_t rgb_formats
[] = {
1654 DRM_FORMAT_XRGB4444
,
1655 DRM_FORMAT_ARGB4444
,
1656 DRM_FORMAT_RGBA4444
,
1657 DRM_FORMAT_ARGB1555
,
1660 DRM_FORMAT_XRGB8888
,
1661 DRM_FORMAT_ARGB8888
,
1662 DRM_FORMAT_RGBA8888
,
1663 DRM_FORMAT_XRGB2101010
,
1664 DRM_FORMAT_XBGR2101010
,
1665 DRM_FORMAT_ARGB2101010
,
1666 DRM_FORMAT_ABGR2101010
,
1669 static uint32_t yuv_formats
[] = {
1676 int amdgpu_dm_plane_init(struct amdgpu_display_manager
*dm
,
1677 struct amdgpu_plane
*aplane
,
1678 unsigned long possible_crtcs
)
1682 switch (aplane
->plane_type
) {
1683 case DRM_PLANE_TYPE_PRIMARY
:
1684 aplane
->base
.format_default
= true;
1686 res
= drm_universal_plane_init(
1692 ARRAY_SIZE(rgb_formats
),
1693 NULL
, aplane
->plane_type
, NULL
);
1695 case DRM_PLANE_TYPE_OVERLAY
:
1696 res
= drm_universal_plane_init(
1702 ARRAY_SIZE(yuv_formats
),
1703 NULL
, aplane
->plane_type
, NULL
);
1705 case DRM_PLANE_TYPE_CURSOR
:
1706 DRM_ERROR("KMS: Cursor plane not implemented.");
1710 drm_plane_helper_add(&aplane
->base
, &dm_plane_helper_funcs
);
1715 int amdgpu_dm_crtc_init(struct amdgpu_display_manager
*dm
,
1716 struct drm_plane
*plane
,
1717 uint32_t crtc_index
)
1719 struct amdgpu_crtc
*acrtc
;
1722 acrtc
= kzalloc(sizeof(struct amdgpu_crtc
), GFP_KERNEL
);
1726 res
= drm_crtc_init_with_planes(
1731 &amdgpu_dm_crtc_funcs
, NULL
);
1736 drm_crtc_helper_add(&acrtc
->base
, &amdgpu_dm_crtc_helper_funcs
);
1738 acrtc
->max_cursor_width
= dm
->adev
->dm
.dc
->caps
.max_cursor_size
;
1739 acrtc
->max_cursor_height
= dm
->adev
->dm
.dc
->caps
.max_cursor_size
;
1741 acrtc
->crtc_id
= crtc_index
;
1742 acrtc
->base
.enabled
= false;
1744 dm
->adev
->mode_info
.crtcs
[crtc_index
] = acrtc
;
1745 drm_mode_crtc_set_gamma_size(&acrtc
->base
, 256);
1750 acrtc
->crtc_id
= -1;
1754 static int to_drm_connector_type(enum signal_type st
)
1757 case SIGNAL_TYPE_HDMI_TYPE_A
:
1758 return DRM_MODE_CONNECTOR_HDMIA
;
1759 case SIGNAL_TYPE_EDP
:
1760 return DRM_MODE_CONNECTOR_eDP
;
1761 case SIGNAL_TYPE_RGB
:
1762 return DRM_MODE_CONNECTOR_VGA
;
1763 case SIGNAL_TYPE_DISPLAY_PORT
:
1764 case SIGNAL_TYPE_DISPLAY_PORT_MST
:
1765 return DRM_MODE_CONNECTOR_DisplayPort
;
1766 case SIGNAL_TYPE_DVI_DUAL_LINK
:
1767 case SIGNAL_TYPE_DVI_SINGLE_LINK
:
1768 return DRM_MODE_CONNECTOR_DVID
;
1769 case SIGNAL_TYPE_VIRTUAL
:
1770 return DRM_MODE_CONNECTOR_VIRTUAL
;
1773 return DRM_MODE_CONNECTOR_Unknown
;
1777 static void amdgpu_dm_get_native_mode(struct drm_connector
*connector
)
1779 const struct drm_connector_helper_funcs
*helper
=
1780 connector
->helper_private
;
1781 struct drm_encoder
*encoder
;
1782 struct amdgpu_encoder
*amdgpu_encoder
;
1784 encoder
= helper
->best_encoder(connector
);
1786 if (encoder
== NULL
)
1789 amdgpu_encoder
= to_amdgpu_encoder(encoder
);
1791 amdgpu_encoder
->native_mode
.clock
= 0;
1793 if (!list_empty(&connector
->probed_modes
)) {
1794 struct drm_display_mode
*preferred_mode
= NULL
;
1795 list_for_each_entry(preferred_mode
,
1796 &connector
->probed_modes
,
1798 if (preferred_mode
->type
& DRM_MODE_TYPE_PREFERRED
) {
1799 amdgpu_encoder
->native_mode
= *preferred_mode
;
1807 static struct drm_display_mode
*amdgpu_dm_create_common_mode(
1808 struct drm_encoder
*encoder
, char *name
,
1809 int hdisplay
, int vdisplay
)
1811 struct drm_device
*dev
= encoder
->dev
;
1812 struct amdgpu_encoder
*amdgpu_encoder
= to_amdgpu_encoder(encoder
);
1813 struct drm_display_mode
*mode
= NULL
;
1814 struct drm_display_mode
*native_mode
= &amdgpu_encoder
->native_mode
;
1816 mode
= drm_mode_duplicate(dev
, native_mode
);
1821 mode
->hdisplay
= hdisplay
;
1822 mode
->vdisplay
= vdisplay
;
1823 mode
->type
&= ~DRM_MODE_TYPE_PREFERRED
;
1824 strncpy(mode
->name
, name
, DRM_DISPLAY_MODE_LEN
);
1830 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder
*encoder
,
1831 struct drm_connector
*connector
)
1833 struct amdgpu_encoder
*amdgpu_encoder
= to_amdgpu_encoder(encoder
);
1834 struct drm_display_mode
*mode
= NULL
;
1835 struct drm_display_mode
*native_mode
= &amdgpu_encoder
->native_mode
;
1836 struct amdgpu_connector
*amdgpu_connector
=
1837 to_amdgpu_connector(connector
);
1841 char name
[DRM_DISPLAY_MODE_LEN
];
1845 { "640x480", 640, 480},
1846 { "800x600", 800, 600},
1847 { "1024x768", 1024, 768},
1848 { "1280x720", 1280, 720},
1849 { "1280x800", 1280, 800},
1850 {"1280x1024", 1280, 1024},
1851 { "1440x900", 1440, 900},
1852 {"1680x1050", 1680, 1050},
1853 {"1600x1200", 1600, 1200},
1854 {"1920x1080", 1920, 1080},
1855 {"1920x1200", 1920, 1200}
1858 n
= sizeof(common_modes
) / sizeof(common_modes
[0]);
1860 for (i
= 0; i
< n
; i
++) {
1861 struct drm_display_mode
*curmode
= NULL
;
1862 bool mode_existed
= false;
1864 if (common_modes
[i
].w
> native_mode
->hdisplay
||
1865 common_modes
[i
].h
> native_mode
->vdisplay
||
1866 (common_modes
[i
].w
== native_mode
->hdisplay
&&
1867 common_modes
[i
].h
== native_mode
->vdisplay
))
1870 list_for_each_entry(curmode
, &connector
->probed_modes
, head
) {
1871 if (common_modes
[i
].w
== curmode
->hdisplay
&&
1872 common_modes
[i
].h
== curmode
->vdisplay
) {
1873 mode_existed
= true;
1881 mode
= amdgpu_dm_create_common_mode(encoder
,
1882 common_modes
[i
].name
, common_modes
[i
].w
,
1884 drm_mode_probed_add(connector
, mode
);
1885 amdgpu_connector
->num_modes
++;
1889 static void amdgpu_dm_connector_ddc_get_modes(
1890 struct drm_connector
*connector
,
1893 struct amdgpu_connector
*amdgpu_connector
=
1894 to_amdgpu_connector(connector
);
1897 /* empty probed_modes */
1898 INIT_LIST_HEAD(&connector
->probed_modes
);
1899 amdgpu_connector
->num_modes
=
1900 drm_add_edid_modes(connector
, edid
);
1902 drm_edid_to_eld(connector
, edid
);
1904 amdgpu_dm_get_native_mode(connector
);
1906 amdgpu_connector
->num_modes
= 0;
1909 int amdgpu_dm_connector_get_modes(struct drm_connector
*connector
)
1911 const struct drm_connector_helper_funcs
*helper
=
1912 connector
->helper_private
;
1913 struct amdgpu_connector
*amdgpu_connector
=
1914 to_amdgpu_connector(connector
);
1915 struct drm_encoder
*encoder
;
1916 struct edid
*edid
= amdgpu_connector
->edid
;
1918 encoder
= helper
->best_encoder(connector
);
1920 amdgpu_dm_connector_ddc_get_modes(connector
, edid
);
1921 amdgpu_dm_connector_add_common_modes(encoder
, connector
);
1922 return amdgpu_connector
->num_modes
;
1925 void amdgpu_dm_connector_init_helper(
1926 struct amdgpu_display_manager
*dm
,
1927 struct amdgpu_connector
*aconnector
,
1929 const struct dc_link
*link
,
1932 struct amdgpu_device
*adev
= dm
->ddev
->dev_private
;
1934 aconnector
->connector_id
= link_index
;
1935 aconnector
->dc_link
= link
;
1936 aconnector
->base
.interlace_allowed
= false;
1937 aconnector
->base
.doublescan_allowed
= false;
1938 aconnector
->base
.stereo_allowed
= false;
1939 aconnector
->base
.dpms
= DRM_MODE_DPMS_OFF
;
1940 aconnector
->hpd
.hpd
= AMDGPU_HPD_NONE
; /* not used */
1942 mutex_init(&aconnector
->hpd_lock
);
1944 /*configure suport HPD hot plug connector_>polled default value is 0
1945 * which means HPD hot plug not supported*/
1946 switch (connector_type
) {
1947 case DRM_MODE_CONNECTOR_HDMIA
:
1948 aconnector
->base
.polled
= DRM_CONNECTOR_POLL_HPD
;
1950 case DRM_MODE_CONNECTOR_DisplayPort
:
1951 aconnector
->base
.polled
= DRM_CONNECTOR_POLL_HPD
;
1953 case DRM_MODE_CONNECTOR_DVID
:
1954 aconnector
->base
.polled
= DRM_CONNECTOR_POLL_HPD
;
1960 drm_object_attach_property(&aconnector
->base
.base
,
1961 dm
->ddev
->mode_config
.scaling_mode_property
,
1962 DRM_MODE_SCALE_NONE
);
1964 drm_object_attach_property(&aconnector
->base
.base
,
1965 adev
->mode_info
.underscan_property
,
1967 drm_object_attach_property(&aconnector
->base
.base
,
1968 adev
->mode_info
.underscan_hborder_property
,
1970 drm_object_attach_property(&aconnector
->base
.base
,
1971 adev
->mode_info
.underscan_vborder_property
,
1976 int amdgpu_dm_i2c_xfer(struct i2c_adapter
*i2c_adap
,
1977 struct i2c_msg
*msgs
, int num
)
1979 struct amdgpu_i2c_adapter
*i2c
= i2c_get_adapdata(i2c_adap
);
1980 struct i2c_command cmd
;
1984 cmd
.payloads
= kzalloc(num
* sizeof(struct i2c_payload
), GFP_KERNEL
);
1989 cmd
.number_of_payloads
= num
;
1990 cmd
.engine
= I2C_COMMAND_ENGINE_DEFAULT
;
1993 for (i
= 0; i
< num
; i
++) {
1994 cmd
.payloads
[i
].write
= !(msgs
[i
].flags
& I2C_M_RD
);
1995 cmd
.payloads
[i
].address
= msgs
[i
].addr
;
1996 cmd
.payloads
[i
].length
= msgs
[i
].len
;
1997 cmd
.payloads
[i
].data
= msgs
[i
].buf
;
2000 if (dc_submit_i2c(i2c
->dm
->dc
, i2c
->link_index
, &cmd
))
2003 kfree(cmd
.payloads
);
2008 u32
amdgpu_dm_i2c_func(struct i2c_adapter
*adap
)
2010 return I2C_FUNC_I2C
| I2C_FUNC_SMBUS_EMUL
;
2013 static const struct i2c_algorithm amdgpu_dm_i2c_algo
= {
2014 .master_xfer
= amdgpu_dm_i2c_xfer
,
2015 .functionality
= amdgpu_dm_i2c_func
,
2018 struct amdgpu_i2c_adapter
*create_i2c(unsigned int link_index
, struct amdgpu_display_manager
*dm
, int *res
)
2020 struct amdgpu_i2c_adapter
*i2c
;
2022 i2c
= kzalloc(sizeof (struct amdgpu_i2c_adapter
), GFP_KERNEL
);
2024 i2c
->base
.owner
= THIS_MODULE
;
2025 i2c
->base
.class = I2C_CLASS_DDC
;
2026 i2c
->base
.dev
.parent
= &dm
->adev
->pdev
->dev
;
2027 i2c
->base
.algo
= &amdgpu_dm_i2c_algo
;
2028 snprintf(i2c
->base
.name
, sizeof (i2c
->base
.name
), "AMDGPU DM i2c hw bus %d", link_index
);
2029 i2c
->link_index
= link_index
;
2030 i2c_set_adapdata(&i2c
->base
, i2c
);
2035 /* Note: this function assumes that dc_link_detect() was called for the
2036 * dc_link which will be represented by this aconnector. */
2037 int amdgpu_dm_connector_init(
2038 struct amdgpu_display_manager
*dm
,
2039 struct amdgpu_connector
*aconnector
,
2040 uint32_t link_index
,
2041 struct amdgpu_encoder
*aencoder
)
2045 struct dc
*dc
= dm
->dc
;
2046 const struct dc_link
*link
= dc_get_link_at_index(dc
, link_index
);
2047 struct amdgpu_i2c_adapter
*i2c
;
2048 ((struct dc_link
*)link
)->priv
= aconnector
;
2050 DRM_DEBUG_KMS("%s()\n", __func__
);
2052 i2c
= create_i2c(link
->link_index
, dm
, &res
);
2053 aconnector
->i2c
= i2c
;
2054 res
= i2c_add_adapter(&i2c
->base
);
2057 DRM_ERROR("Failed to register hw i2c %d\n", link
->link_index
);
2061 connector_type
= to_drm_connector_type(link
->connector_signal
);
2063 res
= drm_connector_init(
2066 &amdgpu_dm_connector_funcs
,
2070 DRM_ERROR("connector_init failed\n");
2071 aconnector
->connector_id
= -1;
2075 drm_connector_helper_add(
2077 &amdgpu_dm_connector_helper_funcs
);
2079 amdgpu_dm_connector_init_helper(
2086 drm_mode_connector_attach_encoder(
2087 &aconnector
->base
, &aencoder
->base
);
2089 drm_connector_register(&aconnector
->base
);
2091 if (connector_type
== DRM_MODE_CONNECTOR_DisplayPort
2092 || connector_type
== DRM_MODE_CONNECTOR_eDP
)
2093 amdgpu_dm_initialize_dp_connector(dm
, aconnector
);
2095 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
2096 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
2098 /* NOTE: this currently will create backlight device even if a panel
2099 * is not connected to the eDP/LVDS connector.
2101 * This is less than ideal but we don't have sink information at this
2102 * stage since detection happens after. We can't do detection earlier
2103 * since MST detection needs connectors to be created first.
2105 if (link
->connector_signal
& (SIGNAL_TYPE_EDP
| SIGNAL_TYPE_LVDS
)) {
2106 /* Event if registration failed, we should continue with
2107 * DM initialization because not having a backlight control
2108 * is better then a black screen. */
2109 amdgpu_dm_register_backlight_device(dm
);
2111 if (dm
->backlight_dev
)
2112 dm
->backlight_link
= link
;
2119 aconnector
->i2c
= NULL
;
2124 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device
*adev
)
2126 switch (adev
->mode_info
.num_crtc
) {
2143 int amdgpu_dm_encoder_init(
2144 struct drm_device
*dev
,
2145 struct amdgpu_encoder
*aencoder
,
2146 uint32_t link_index
)
2148 struct amdgpu_device
*adev
= dev
->dev_private
;
2150 int res
= drm_encoder_init(dev
,
2152 &amdgpu_dm_encoder_funcs
,
2153 DRM_MODE_ENCODER_TMDS
,
2156 aencoder
->base
.possible_crtcs
= amdgpu_dm_get_encoder_crtc_mask(adev
);
2159 aencoder
->encoder_id
= link_index
;
2161 aencoder
->encoder_id
= -1;
2163 drm_encoder_helper_add(&aencoder
->base
, &amdgpu_dm_encoder_helper_funcs
);
2168 enum dm_commit_action
{
2169 DM_COMMIT_ACTION_NOTHING
,
2170 DM_COMMIT_ACTION_RESET
,
2171 DM_COMMIT_ACTION_DPMS_ON
,
2172 DM_COMMIT_ACTION_DPMS_OFF
,
2173 DM_COMMIT_ACTION_SET
2176 static enum dm_commit_action
get_dm_commit_action(struct drm_crtc_state
*state
)
2178 /* mode changed means either actually mode changed or enabled changed */
2179 /* active changed means dpms changed */
2181 DRM_DEBUG_KMS("crtc_state_flags: enable:%d, active:%d, planes_changed:%d, mode_changed:%d,active_changed:%d,connectors_changed:%d\n",
2184 state
->planes_changed
,
2185 state
->mode_changed
,
2186 state
->active_changed
,
2187 state
->connectors_changed
);
2189 if (state
->mode_changed
) {
2190 /* if it is got disabled - call reset mode */
2192 return DM_COMMIT_ACTION_RESET
;
2195 return DM_COMMIT_ACTION_SET
;
2197 return DM_COMMIT_ACTION_RESET
;
2199 /* ! mode_changed */
2201 /* if it is remain disable - skip it */
2203 return DM_COMMIT_ACTION_NOTHING
;
2205 if (state
->active
&& state
->connectors_changed
)
2206 return DM_COMMIT_ACTION_SET
;
2208 if (state
->active_changed
) {
2209 if (state
->active
) {
2210 return DM_COMMIT_ACTION_DPMS_ON
;
2212 return DM_COMMIT_ACTION_DPMS_OFF
;
2215 /* ! active_changed */
2216 return DM_COMMIT_ACTION_NOTHING
;
2221 static void manage_dm_interrupts(
2222 struct amdgpu_device
*adev
,
2223 struct amdgpu_crtc
*acrtc
,
2227 * this is not correct translation but will work as soon as VBLANK
2228 * constant is the same as PFLIP
2231 amdgpu_crtc_idx_to_irq_type(
2236 drm_crtc_vblank_on(&acrtc
->base
);
2239 &adev
->pageflip_irq
,
2245 &adev
->pageflip_irq
,
2247 drm_crtc_vblank_off(&acrtc
->base
);
2251 static bool is_scaling_state_different(
2252 const struct dm_connector_state
*dm_state
,
2253 const struct dm_connector_state
*old_dm_state
)
2255 if (dm_state
->scaling
!= old_dm_state
->scaling
)
2257 if (!dm_state
->underscan_enable
&& old_dm_state
->underscan_enable
) {
2258 if (old_dm_state
->underscan_hborder
!= 0 && old_dm_state
->underscan_vborder
!= 0)
2260 } else if (dm_state
->underscan_enable
&& !old_dm_state
->underscan_enable
) {
2261 if (dm_state
->underscan_hborder
!= 0 && dm_state
->underscan_vborder
!= 0)
2263 } else if (dm_state
->underscan_hborder
!= old_dm_state
->underscan_hborder
2264 || dm_state
->underscan_vborder
!= old_dm_state
->underscan_vborder
)
2269 static void remove_stream(struct amdgpu_device
*adev
, struct amdgpu_crtc
*acrtc
)
2272 * we evade vblanks and pflips on crtc that
2275 manage_dm_interrupts(adev
, acrtc
, false);
2277 /* this is the update mode case */
2278 if (adev
->dm
.freesync_module
)
2279 mod_freesync_remove_stream(adev
->dm
.freesync_module
,
2282 dc_stream_release(acrtc
->stream
);
2283 acrtc
->stream
= NULL
;
2284 acrtc
->otg_inst
= -1;
2285 acrtc
->enabled
= false;
2292 * Waits on all BO's fences and for proper vblank count
2294 static void amdgpu_dm_do_flip(
2295 struct drm_crtc
*crtc
,
2296 struct drm_framebuffer
*fb
,
2299 unsigned long flags
;
2300 uint32_t target_vblank
;
2302 struct amdgpu_crtc
*acrtc
= to_amdgpu_crtc(crtc
);
2303 struct amdgpu_framebuffer
*afb
= to_amdgpu_framebuffer(fb
);
2304 struct amdgpu_bo
*abo
= gem_to_amdgpu_bo(afb
->obj
);
2305 struct amdgpu_device
*adev
= crtc
->dev
->dev_private
;
2306 bool async_flip
= (acrtc
->flip_flags
& DRM_MODE_PAGE_FLIP_ASYNC
) != 0;
2308 /* Prepare wait for target vblank early - before the fence-waits */
2309 target_vblank
= target
- drm_crtc_vblank_count(crtc
) +
2310 amdgpu_get_vblank_counter_kms(crtc
->dev
, acrtc
->crtc_id
);
2312 /*TODO This might fail and hence better not used, wait
2313 * explicitly on fences instead
2314 * and in general should be called for
2315 * blocking commit to as per framework helpers
2317 r
= amdgpu_bo_reserve(abo
, true);
2318 if (unlikely(r
!= 0)) {
2319 DRM_ERROR("failed to reserve buffer before flip\n");
2323 /* Wait for all fences on this FB */
2324 WARN_ON(reservation_object_wait_timeout_rcu(abo
->tbo
.resv
, true, false,
2325 MAX_SCHEDULE_TIMEOUT
) < 0);
2327 amdgpu_bo_unreserve(abo
);
2329 /* Wait until we're out of the vertical blank period before the one
2330 * targeted by the flip
2332 while ((acrtc
->enabled
&&
2333 (amdgpu_get_crtc_scanoutpos(adev
->ddev
, acrtc
->crtc_id
, 0,
2334 &vpos
, &hpos
, NULL
, NULL
,
2336 & (DRM_SCANOUTPOS_VALID
| DRM_SCANOUTPOS_IN_VBLANK
)) ==
2337 (DRM_SCANOUTPOS_VALID
| DRM_SCANOUTPOS_IN_VBLANK
) &&
2338 (int)(target_vblank
-
2339 amdgpu_get_vblank_counter_kms(adev
->ddev
, acrtc
->crtc_id
)) > 0)) {
2340 usleep_range(1000, 1100);
2344 spin_lock_irqsave(&crtc
->dev
->event_lock
, flags
);
2345 /* update crtc fb */
2346 crtc
->primary
->fb
= fb
;
2348 /* Do the flip (mmio) */
2349 adev
->mode_info
.funcs
->page_flip(adev
, acrtc
->crtc_id
, afb
->address
, async_flip
);
2351 spin_unlock_irqrestore(&crtc
->dev
->event_lock
, flags
);
2352 DRM_DEBUG_DRIVER("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
2356 void amdgpu_dm_atomic_commit_tail(
2357 struct drm_atomic_state
*state
)
2359 struct drm_device
*dev
= state
->dev
;
2360 struct amdgpu_device
*adev
= dev
->dev_private
;
2361 struct amdgpu_display_manager
*dm
= &adev
->dm
;
2362 struct drm_plane
*plane
;
2363 struct drm_plane_state
*old_plane_state
;
2365 uint32_t commit_streams_count
= 0;
2366 uint32_t new_crtcs_count
= 0;
2367 struct drm_crtc
*crtc
;
2368 struct drm_crtc_state
*old_crtc_state
;
2369 const struct dc_stream
*commit_streams
[MAX_STREAMS
];
2370 struct amdgpu_crtc
*new_crtcs
[MAX_STREAMS
];
2371 const struct dc_stream
*new_stream
;
2372 unsigned long flags
;
2373 bool wait_for_vblank
= true;
2376 drm_atomic_helper_update_legacy_modeset_state(dev
, state
);
2378 /* update changed items */
2379 for_each_crtc_in_state(state
, crtc
, old_crtc_state
, i
) {
2380 struct amdgpu_crtc
*acrtc
;
2381 struct amdgpu_connector
*aconnector
= NULL
;
2382 enum dm_commit_action action
;
2383 struct drm_crtc_state
*new_state
= crtc
->state
;
2385 acrtc
= to_amdgpu_crtc(crtc
);
2388 amdgpu_dm_find_first_crct_matching_connector(
2393 /* handles headless hotplug case, updating new_state and
2394 * aconnector as needed
2397 action
= get_dm_commit_action(new_state
);
2400 case DM_COMMIT_ACTION_DPMS_ON
:
2401 case DM_COMMIT_ACTION_SET
: {
2402 struct dm_connector_state
*dm_state
= NULL
;
2406 dm_state
= to_dm_connector_state(aconnector
->base
.state
);
2408 new_stream
= create_stream_for_sink(
2413 DRM_INFO("Atomic commit: SET crtc id %d: [%p]\n", acrtc
->crtc_id
, acrtc
);
2417 * this could happen because of issues with
2418 * userspace notifications delivery.
2419 * In this case userspace tries to set mode on
2420 * display which is disconnect in fact.
2421 * dc_sink in NULL in this case on aconnector.
2422 * We expect reset mode will come soon.
2424 * This can also happen when unplug is done
2425 * during resume sequence ended
2427 * In this case, we want to pretend we still
2428 * have a sink to keep the pipe running so that
2429 * hw state is consistent with the sw state
2431 DRM_DEBUG_KMS("%s: Failed to create new stream for crtc %d\n",
2432 __func__
, acrtc
->base
.base
.id
);
2437 remove_stream(adev
, acrtc
);
2440 * this loop saves set mode crtcs
2441 * we needed to enable vblanks once all
2442 * resources acquired in dc after dc_commit_streams
2444 new_crtcs
[new_crtcs_count
] = acrtc
;
2447 acrtc
->stream
= new_stream
;
2448 acrtc
->enabled
= true;
2449 acrtc
->hw_mode
= crtc
->state
->mode
;
2450 crtc
->hwmode
= crtc
->state
->mode
;
2455 case DM_COMMIT_ACTION_NOTHING
: {
2456 struct dm_connector_state
*dm_state
= NULL
;
2461 dm_state
= to_dm_connector_state(aconnector
->base
.state
);
2463 /* Scaling update */
2464 update_stream_scaling_settings(&crtc
->state
->mode
,
2465 dm_state
, acrtc
->stream
);
2469 case DM_COMMIT_ACTION_DPMS_OFF
:
2470 case DM_COMMIT_ACTION_RESET
:
2471 DRM_INFO("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc
->crtc_id
, acrtc
);
2472 /* i.e. reset mode */
2474 remove_stream(adev
, acrtc
);
2477 } /* for_each_crtc_in_state() */
2479 list_for_each_entry(crtc
, &dev
->mode_config
.crtc_list
, head
) {
2481 struct amdgpu_crtc
*acrtc
= to_amdgpu_crtc(crtc
);
2483 if (acrtc
->stream
) {
2484 commit_streams
[commit_streams_count
] = acrtc
->stream
;
2485 ++commit_streams_count
;
2490 * Add streams after required streams from new and replaced streams
2491 * are removed from freesync module
2493 if (adev
->dm
.freesync_module
) {
2494 for (i
= 0; i
< new_crtcs_count
; i
++) {
2495 struct amdgpu_connector
*aconnector
= NULL
;
2496 new_stream
= new_crtcs
[i
]->stream
;
2498 amdgpu_dm_find_first_crct_matching_connector(
2500 &new_crtcs
[i
]->base
,
2504 "Atomic commit: Failed to find connector for acrtc id:%d "
2505 "skipping freesync init\n",
2506 new_crtcs
[i
]->crtc_id
);
2510 mod_freesync_add_stream(adev
->dm
.freesync_module
,
2511 new_stream
, &aconnector
->caps
);
2515 /* DC is optimized not to do anything if 'streams' didn't change. */
2516 WARN_ON(!dc_commit_streams(dm
->dc
, commit_streams
, commit_streams_count
));
2518 list_for_each_entry(crtc
, &dev
->mode_config
.crtc_list
, head
) {
2519 struct amdgpu_crtc
*acrtc
= to_amdgpu_crtc(crtc
);
2521 if (acrtc
->stream
!= NULL
)
2523 dc_stream_get_status(acrtc
->stream
)->primary_otg_inst
;
2526 /* update planes when needed */
2527 for_each_plane_in_state(state
, plane
, old_plane_state
, i
) {
2528 struct drm_plane_state
*plane_state
= plane
->state
;
2529 struct drm_crtc
*crtc
= plane_state
->crtc
;
2530 struct drm_framebuffer
*fb
= plane_state
->fb
;
2531 struct drm_connector
*connector
;
2532 struct dm_connector_state
*dm_state
= NULL
;
2533 enum dm_commit_action action
;
2536 if (!fb
|| !crtc
|| !crtc
->state
->active
)
2539 action
= get_dm_commit_action(crtc
->state
);
2541 /* Surfaces are created under two scenarios:
2542 * 1. This commit is not a page flip.
2543 * 2. This commit is a page flip, and streams are created.
2545 pflip_needed
= !state
->allow_modeset
;
2546 if (!pflip_needed
||
2547 action
== DM_COMMIT_ACTION_DPMS_ON
||
2548 action
== DM_COMMIT_ACTION_SET
) {
2549 list_for_each_entry(connector
,
2550 &dev
->mode_config
.connector_list
, head
) {
2551 if (connector
->state
->crtc
== crtc
) {
2552 dm_state
= to_dm_connector_state(
2559 * This situation happens in the following case:
2560 * we are about to get set mode for connector who's only
2561 * possible crtc (in encoder crtc mask) is used by
2562 * another connector, that is why it will try to
2563 * re-assing crtcs in order to make configuration
2564 * supported. For our implementation we need to make all
2565 * encoders support all crtcs, then this issue will
2566 * never arise again. But to guard code from this issue
2569 * Also it should be needed when used with actual
2570 * drm_atomic_commit ioctl in future
2575 dm_dc_surface_commit(dm
->dc
, crtc
);
2579 for (i
= 0; i
< new_crtcs_count
; i
++) {
2581 * loop to enable interrupts on newly arrived crtc
2583 struct amdgpu_crtc
*acrtc
= new_crtcs
[i
];
2585 if (adev
->dm
.freesync_module
)
2586 mod_freesync_notify_mode_change(
2587 adev
->dm
.freesync_module
, &acrtc
->stream
, 1);
2589 manage_dm_interrupts(adev
, acrtc
, true);
2590 dm_crtc_cursor_reset(&acrtc
->base
);
2594 for_each_plane_in_state(state
, plane
, old_plane_state
, i
) {
2595 struct drm_plane_state
*plane_state
= plane
->state
;
2596 struct drm_crtc
*crtc
= plane_state
->crtc
;
2597 struct amdgpu_crtc
*acrtc
= to_amdgpu_crtc(crtc
);
2598 struct drm_framebuffer
*fb
= plane_state
->fb
;
2601 if (!fb
|| !crtc
|| !crtc
->state
->planes_changed
||
2602 !crtc
->state
->active
)
2604 pflip_needed
= !state
->allow_modeset
;
2608 acrtc
->flip_flags
& DRM_MODE_PAGE_FLIP_ASYNC
?
2614 drm_crtc_vblank_count(crtc
) + wait_for_vblank
);
2616 /*clean up the flags for next usage*/
2617 acrtc
->flip_flags
= 0;
2622 /*TODO mark consumed event on all crtc assigned event
2623 * in drm_atomic_helper_setup_commit just to signal completion
2625 spin_lock_irqsave(&adev
->ddev
->event_lock
, flags
);
2626 for_each_crtc_in_state(state
, crtc
, old_crtc_state
, i
) {
2627 struct amdgpu_crtc
*acrtc
= to_amdgpu_crtc(crtc
);
2629 if (acrtc
->base
.state
->event
&&
2630 acrtc
->base
.state
->event
->event
.base
.type
!= DRM_EVENT_FLIP_COMPLETE
) {
2631 acrtc
->event
= acrtc
->base
.state
->event
;
2632 acrtc
->base
.state
->event
= NULL
;
2635 spin_unlock_irqrestore(&adev
->ddev
->event_lock
, flags
);
2637 /* Signal HW programming completion */
2638 drm_atomic_helper_commit_hw_done(state
);
2640 if (wait_for_vblank
)
2641 drm_atomic_helper_wait_for_vblanks(dev
, state
);
2643 /*TODO send vblank event on all crtc assigned event
2644 * in drm_atomic_helper_setup_commit just to signal completion
2646 spin_lock_irqsave(&adev
->ddev
->event_lock
, flags
);
2647 for_each_crtc_in_state(state
, crtc
, old_crtc_state
, i
) {
2648 struct amdgpu_crtc
*acrtc
= to_amdgpu_crtc(crtc
);
2651 acrtc
->event
->event
.base
.type
!= DRM_EVENT_FLIP_COMPLETE
) {
2652 drm_send_event_locked(dev
, &acrtc
->event
->base
);
2653 acrtc
->event
= NULL
;
2656 spin_unlock_irqrestore(&adev
->ddev
->event_lock
, flags
);
2658 /*TODO Is it to early if actual flip haven't happened yet ?*/
2659 /* Release old FB */
2660 drm_atomic_helper_cleanup_planes(dev
, state
);
2664 static int dm_force_atomic_commit(struct drm_connector
*connector
)
2667 struct drm_device
*ddev
= connector
->dev
;
2668 struct drm_atomic_state
*state
= drm_atomic_state_alloc(ddev
);
2669 struct amdgpu_crtc
*disconnected_acrtc
= to_amdgpu_crtc(connector
->encoder
->crtc
);
2670 struct drm_plane
*plane
= disconnected_acrtc
->base
.primary
;
2671 struct drm_connector_state
*conn_state
;
2672 struct drm_crtc_state
*crtc_state
;
2673 struct drm_plane_state
*plane_state
;
2678 state
->acquire_ctx
= ddev
->mode_config
.acquire_ctx
;
2680 /* Construct an atomic state to restore previous display setting */
2683 * Attach connectors to drm_atomic_state
2685 conn_state
= drm_atomic_get_connector_state(state
, connector
);
2687 ret
= PTR_ERR_OR_ZERO(conn_state
);
2691 /* Attach crtc to drm_atomic_state*/
2692 crtc_state
= drm_atomic_get_crtc_state(state
, &disconnected_acrtc
->base
);
2694 ret
= PTR_ERR_OR_ZERO(crtc_state
);
2698 /* force a restore */
2699 crtc_state
->mode_changed
= true;
2701 /* Attach plane to drm_atomic_state */
2702 plane_state
= drm_atomic_get_plane_state(state
, plane
);
2704 ret
= PTR_ERR_OR_ZERO(plane_state
);
2709 /* Call commit internally with the state we just constructed */
2710 ret
= drm_atomic_commit(state
);
2715 DRM_ERROR("Restoring old state failed with %i\n", ret
);
2716 drm_atomic_state_put(state
);
2722 * This functions handle all cases when set mode does not come upon hotplug.
2723 * This include when the same display is unplugged then plugged back into the
2724 * same port and when we are running without usermode desktop manager supprot
2726 void dm_restore_drm_connector_state(struct drm_device
*dev
, struct drm_connector
*connector
)
2728 struct amdgpu_connector
*aconnector
= to_amdgpu_connector(connector
);
2729 struct amdgpu_crtc
*disconnected_acrtc
;
2731 if (!aconnector
->dc_sink
|| !connector
->state
|| !connector
->encoder
)
2734 disconnected_acrtc
= to_amdgpu_crtc(connector
->encoder
->crtc
);
2736 if (!disconnected_acrtc
|| !disconnected_acrtc
->stream
)
2740 * If the previous sink is not released and different from the current,
2741 * we deduce we are in a state where we can not rely on usermode call
2742 * to turn on the display, so we do it here
2744 if (disconnected_acrtc
->stream
->sink
!= aconnector
->dc_sink
)
2745 dm_force_atomic_commit(&aconnector
->base
);
2748 static uint32_t add_val_sets_surface(
2749 struct dc_validation_set
*val_sets
,
2751 const struct dc_stream
*stream
,
2752 const struct dc_surface
*surface
)
2756 while (i
< set_count
) {
2757 if (val_sets
[i
].stream
== stream
)
2762 val_sets
[i
].surfaces
[val_sets
[i
].surface_count
] = surface
;
2763 val_sets
[i
].surface_count
++;
2765 return val_sets
[i
].surface_count
;
2768 static uint32_t update_in_val_sets_stream(
2769 struct dc_validation_set
*val_sets
,
2770 struct drm_crtc
**crtcs
,
2772 const struct dc_stream
*old_stream
,
2773 const struct dc_stream
*new_stream
,
2774 struct drm_crtc
*crtc
)
2778 while (i
< set_count
) {
2779 if (val_sets
[i
].stream
== old_stream
)
2784 val_sets
[i
].stream
= new_stream
;
2787 if (i
== set_count
) {
2788 /* nothing found. add new one to the end */
2789 return set_count
+ 1;
2795 static uint32_t remove_from_val_sets(
2796 struct dc_validation_set
*val_sets
,
2798 const struct dc_stream
*stream
)
2802 for (i
= 0; i
< set_count
; i
++)
2803 if (val_sets
[i
].stream
== stream
)
2806 if (i
== set_count
) {
2813 for (; i
< set_count
; i
++) {
2814 val_sets
[i
] = val_sets
[i
+ 1];
2820 int amdgpu_dm_atomic_check(struct drm_device
*dev
,
2821 struct drm_atomic_state
*state
)
2823 struct drm_crtc
*crtc
;
2824 struct drm_crtc_state
*crtc_state
;
2825 struct drm_plane
*plane
;
2826 struct drm_plane_state
*plane_state
;
2830 int new_stream_count
;
2831 struct dc_validation_set set
[MAX_STREAMS
] = {{ 0 }};
2832 struct dc_stream
*new_streams
[MAX_STREAMS
] = { 0 };
2833 struct drm_crtc
*crtc_set
[MAX_STREAMS
] = { 0 };
2834 struct amdgpu_device
*adev
= dev
->dev_private
;
2835 struct dc
*dc
= adev
->dm
.dc
;
2836 bool need_to_validate
= false;
2838 ret
= drm_atomic_helper_check(dev
, state
);
2841 DRM_ERROR("Atomic state validation failed with error :%d !\n",
2848 /* copy existing configuration */
2849 new_stream_count
= 0;
2851 list_for_each_entry(crtc
, &dev
->mode_config
.crtc_list
, head
) {
2853 struct amdgpu_crtc
*acrtc
= to_amdgpu_crtc(crtc
);
2855 if (acrtc
->stream
) {
2856 set
[set_count
].stream
= acrtc
->stream
;
2857 crtc_set
[set_count
] = crtc
;
2862 /* update changed items */
2863 for_each_crtc_in_state(state
, crtc
, crtc_state
, i
) {
2864 struct amdgpu_crtc
*acrtc
= NULL
;
2865 struct amdgpu_connector
*aconnector
= NULL
;
2866 enum dm_commit_action action
;
2868 acrtc
= to_amdgpu_crtc(crtc
);
2870 aconnector
= amdgpu_dm_find_first_crct_matching_connector(state
, crtc
, true);
2872 action
= get_dm_commit_action(crtc_state
);
2875 case DM_COMMIT_ACTION_DPMS_ON
:
2876 case DM_COMMIT_ACTION_SET
: {
2877 struct dc_stream
*new_stream
= NULL
;
2878 struct drm_connector_state
*conn_state
= NULL
;
2879 struct dm_connector_state
*dm_state
= NULL
;
2882 conn_state
= drm_atomic_get_connector_state(state
, &aconnector
->base
);
2883 if (IS_ERR(conn_state
))
2885 dm_state
= to_dm_connector_state(conn_state
);
2888 new_stream
= create_stream_for_sink(aconnector
, &crtc_state
->mode
, dm_state
);
2891 * we can have no stream on ACTION_SET if a display
2892 * was disconnected during S3, in this case it not and
2893 * error, the OS will be updated after detection, and
2894 * do the right thing on next atomic commit
2897 DRM_DEBUG_KMS("%s: Failed to create new stream for crtc %d\n",
2898 __func__
, acrtc
->base
.base
.id
);
2902 new_streams
[new_stream_count
] = new_stream
;
2903 set_count
= update_in_val_sets_stream(
2912 need_to_validate
= true;
2916 case DM_COMMIT_ACTION_NOTHING
: {
2917 const struct drm_connector
*drm_connector
= NULL
;
2918 struct drm_connector_state
*conn_state
= NULL
;
2919 struct dm_connector_state
*dm_state
= NULL
;
2920 struct dm_connector_state
*old_dm_state
= NULL
;
2921 struct dc_stream
*new_stream
;
2926 for_each_connector_in_state(
2927 state
, drm_connector
, conn_state
, j
) {
2928 if (&aconnector
->base
== drm_connector
)
2932 old_dm_state
= to_dm_connector_state(drm_connector
->state
);
2933 dm_state
= to_dm_connector_state(conn_state
);
2935 /* Support underscan adjustment*/
2936 if (!is_scaling_state_different(dm_state
, old_dm_state
))
2939 new_stream
= create_stream_for_sink(aconnector
, &crtc_state
->mode
, dm_state
);
2942 DRM_ERROR("%s: Failed to create new stream for crtc %d\n",
2943 __func__
, acrtc
->base
.base
.id
);
2947 new_streams
[new_stream_count
] = new_stream
;
2948 set_count
= update_in_val_sets_stream(
2957 need_to_validate
= true;
2961 case DM_COMMIT_ACTION_DPMS_OFF
:
2962 case DM_COMMIT_ACTION_RESET
:
2963 /* i.e. reset mode */
2964 if (acrtc
->stream
) {
2965 set_count
= remove_from_val_sets(
2974 * TODO revisit when removing commit action
2975 * and looking at atomic flags directly
2978 /* commit needs planes right now (for gamma, eg.) */
2979 /* TODO rework commit to chack crtc for gamma change */
2980 ret
= drm_atomic_add_affected_planes(state
, crtc
);
2987 for (i
= 0; i
< set_count
; i
++) {
2988 for_each_plane_in_state(state
, plane
, plane_state
, j
) {
2989 struct drm_crtc
*crtc
= plane_state
->crtc
;
2990 struct drm_framebuffer
*fb
= plane_state
->fb
;
2991 struct drm_connector
*connector
;
2992 struct dm_connector_state
*dm_state
= NULL
;
2993 enum dm_commit_action action
;
2994 struct drm_crtc_state
*crtc_state
;
2998 if (!fb
|| !crtc
|| crtc_set
[i
] != crtc
||
2999 !crtc
->state
->planes_changed
|| !crtc
->state
->active
)
3002 action
= get_dm_commit_action(crtc
->state
);
3004 /* Surfaces are created under two scenarios:
3005 * 1. This commit is not a page flip.
3006 * 2. This commit is a page flip, and streams are created.
3008 crtc_state
= drm_atomic_get_crtc_state(state
, crtc
);
3009 pflip_needed
= !state
->allow_modeset
;
3010 if (!pflip_needed
||
3011 action
== DM_COMMIT_ACTION_DPMS_ON
||
3012 action
== DM_COMMIT_ACTION_SET
) {
3013 struct dc_surface
*surface
;
3015 list_for_each_entry(connector
,
3016 &dev
->mode_config
.connector_list
, head
) {
3017 if (connector
->state
->crtc
== crtc
) {
3018 dm_state
= to_dm_connector_state(
3025 * This situation happens in the following case:
3026 * we are about to get set mode for connector who's only
3027 * possible crtc (in encoder crtc mask) is used by
3028 * another connector, that is why it will try to
3029 * re-assing crtcs in order to make configuration
3030 * supported. For our implementation we need to make all
3031 * encoders support all crtcs, then this issue will
3032 * never arise again. But to guard code from this issue
3035 * Also it should be needed when used with actual
3036 * drm_atomic_commit ioctl in future
3041 surface
= dc_create_surface(dc
);
3042 fill_plane_attributes(
3043 crtc
->dev
->dev_private
,
3048 add_val_sets_surface(
3054 need_to_validate
= true;
3059 if (need_to_validate
== false || set_count
== 0 ||
3060 dc_validate_resources(dc
, set
, set_count
))
3063 for (i
= 0; i
< set_count
; i
++) {
3064 for (j
= 0; j
< set
[i
].surface_count
; j
++) {
3065 dc_surface_release(set
[i
].surfaces
[j
]);
3068 for (i
= 0; i
< new_stream_count
; i
++)
3069 dc_stream_release(new_streams
[i
]);
3072 DRM_ERROR("Atomic check failed.\n");
3077 static bool is_dp_capable_without_timing_msa(
3079 struct amdgpu_connector
*amdgpu_connector
)
3082 bool capable
= false;
3083 if (amdgpu_connector
->dc_link
&&
3086 amdgpu_connector
->dc_link
->link_index
,
3087 DP_DOWN_STREAM_PORT_COUNT
,
3088 &dpcd_data
, sizeof(dpcd_data
)))
3089 capable
= (dpcd_data
& DP_MSA_TIMING_PAR_IGNORED
) ? true:false;
3093 void amdgpu_dm_add_sink_to_freesync_module(
3094 struct drm_connector
*connector
,
3098 uint64_t val_capable
;
3099 bool edid_check_required
;
3100 struct detailed_timing
*timing
;
3101 struct detailed_non_pixel
*data
;
3102 struct detailed_data_monitor_range
*range
;
3103 struct amdgpu_connector
*amdgpu_connector
=
3104 to_amdgpu_connector(connector
);
3106 struct drm_device
*dev
= connector
->dev
;
3107 struct amdgpu_device
*adev
= dev
->dev_private
;
3108 edid_check_required
= false;
3109 if (!amdgpu_connector
->dc_sink
) {
3110 DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
3113 if (!adev
->dm
.freesync_module
)
3116 * if edid non zero restrict freesync only for dp and edp
3119 if (amdgpu_connector
->dc_sink
->sink_signal
== SIGNAL_TYPE_DISPLAY_PORT
3120 || amdgpu_connector
->dc_sink
->sink_signal
== SIGNAL_TYPE_EDP
) {
3121 edid_check_required
= is_dp_capable_without_timing_msa(
3127 if (edid_check_required
== true && (edid
->version
> 1 ||
3128 (edid
->version
== 1 && edid
->revision
> 1))) {
3129 for (i
= 0; i
< 4; i
++) {
3131 timing
= &edid
->detailed_timings
[i
];
3132 data
= &timing
->data
.other_data
;
3133 range
= &data
->data
.range
;
3135 * Check if monitor has continuous frequency mode
3137 if (data
->type
!= EDID_DETAIL_MONITOR_RANGE
)
3140 * Check for flag range limits only. If flag == 1 then
3141 * no additional timing information provided.
3142 * Default GTF, GTF Secondary curve and CVT are not
3145 if (range
->flags
!= 1)
3148 amdgpu_connector
->min_vfreq
= range
->min_vfreq
;
3149 amdgpu_connector
->max_vfreq
= range
->max_vfreq
;
3150 amdgpu_connector
->pixel_clock_mhz
=
3151 range
->pixel_clock_mhz
* 10;
3155 if (amdgpu_connector
->max_vfreq
-
3156 amdgpu_connector
->min_vfreq
> 10) {
3157 amdgpu_connector
->caps
.supported
= true;
3158 amdgpu_connector
->caps
.min_refresh_in_micro_hz
=
3159 amdgpu_connector
->min_vfreq
* 1000000;
3160 amdgpu_connector
->caps
.max_refresh_in_micro_hz
=
3161 amdgpu_connector
->max_vfreq
* 1000000;
3167 * TODO figure out how to notify user-mode or DRM of freesync caps
3168 * once we figure out how to deal with freesync in an upstreamable
3174 void amdgpu_dm_remove_sink_from_freesync_module(
3175 struct drm_connector
*connector
)
3178 * TODO fill in once we figure out how to deal with freesync in
3179 * an upstreamable fashion