2 * Copyright 2015 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
25 #include "dm_services.h"
29 #include "core_status.h"
30 #include "core_types.h"
31 #include "hw_sequencer.h"
35 #include "clock_source.h"
36 #include "dc_bios_types.h"
38 #include "dce_calcs.h"
39 #include "bios_parser_interface.h"
40 #include "include/irq_service_interface.h"
41 #include "transform.h"
42 #include "timing_generator.h"
43 #include "virtual/virtual_link_encoder.h"
45 #include "link_hwss.h"
46 #include "link_encoder.h"
48 #include "dc_link_ddc.h"
49 #include "dm_helpers.h"
50 #include "mem_input.h"
52 /*******************************************************************************
54 ******************************************************************************/
55 static void destroy_links(struct core_dc
*dc
)
59 for (i
= 0; i
< dc
->link_count
; i
++) {
60 if (NULL
!= dc
->links
[i
])
61 link_destroy(&dc
->links
[i
]);
65 static bool create_links(
67 uint32_t num_virtual_links
)
71 struct dc_bios
*bios
= dc
->ctx
->dc_bios
;
75 connectors_num
= bios
->funcs
->get_connectors_number(bios
);
77 if (connectors_num
> ENUM_ID_COUNT
) {
79 "DC: Number of connectors %d exceeds maximum of %d!\n",
85 if (connectors_num
== 0 && num_virtual_links
== 0) {
86 dm_error("DC: Number of connectors is zero!\n");
90 "DC: %s: connectors_num: physical:%d, virtual:%d\n",
95 for (i
= 0; i
< connectors_num
; i
++) {
96 struct link_init_data link_init_params
= {0};
99 link_init_params
.ctx
= dc
->ctx
;
100 /* next BIOS object table connector */
101 link_init_params
.connector_index
= i
;
102 link_init_params
.link_index
= dc
->link_count
;
103 link_init_params
.dc
= dc
;
104 link
= link_create(&link_init_params
);
107 dc
->links
[dc
->link_count
] = link
;
113 for (i
= 0; i
< num_virtual_links
; i
++) {
114 struct dc_link
*link
= dm_alloc(sizeof(*link
));
115 struct encoder_init_data enc_init
= {0};
124 link
->connector_signal
= SIGNAL_TYPE_VIRTUAL
;
125 link
->link_id
.type
= OBJECT_TYPE_CONNECTOR
;
126 link
->link_id
.id
= CONNECTOR_ID_VIRTUAL
;
127 link
->link_id
.enum_id
= ENUM_ID_1
;
128 link
->link_enc
= dm_alloc(sizeof(*link
->link_enc
));
130 enc_init
.ctx
= dc
->ctx
;
131 enc_init
.channel
= CHANNEL_ID_UNKNOWN
;
132 enc_init
.hpd_source
= HPD_SOURCEID_UNKNOWN
;
133 enc_init
.transmitter
= TRANSMITTER_UNKNOWN
;
134 enc_init
.connector
= link
->link_id
;
135 enc_init
.encoder
.type
= OBJECT_TYPE_ENCODER
;
136 enc_init
.encoder
.id
= ENCODER_ID_INTERNAL_VIRTUAL
;
137 enc_init
.encoder
.enum_id
= ENUM_ID_1
;
138 virtual_link_encoder_construct(link
->link_enc
, &enc_init
);
140 link
->link_index
= dc
->link_count
;
141 dc
->links
[dc
->link_count
] = link
;
151 static bool stream_adjust_vmin_vmax(struct dc
*dc
,
152 struct dc_stream_state
**streams
, int num_streams
,
155 /* TODO: Support multiple streams */
156 struct core_dc
*core_dc
= DC_TO_CORE(dc
);
157 struct dc_stream_state
*stream
= streams
[0];
161 for (i
= 0; i
< MAX_PIPES
; i
++) {
162 struct pipe_ctx
*pipe
= &core_dc
->current_context
->res_ctx
.pipe_ctx
[i
];
164 if (pipe
->stream
== stream
&& pipe
->stream_enc
) {
165 core_dc
->hwss
.set_drr(&pipe
, 1, vmin
, vmax
);
167 /* build and update the info frame */
168 resource_build_info_frame(pipe
);
169 core_dc
->hwss
.update_info_frame(pipe
);
177 static bool stream_get_crtc_position(struct dc
*dc
,
178 struct dc_stream_state
**streams
, int num_streams
,
179 unsigned int *v_pos
, unsigned int *nom_v_pos
)
181 /* TODO: Support multiple streams */
182 struct core_dc
*core_dc
= DC_TO_CORE(dc
);
183 struct dc_stream_state
*stream
= streams
[0];
186 struct crtc_position position
;
188 for (i
= 0; i
< MAX_PIPES
; i
++) {
189 struct pipe_ctx
*pipe
=
190 &core_dc
->current_context
->res_ctx
.pipe_ctx
[i
];
192 if (pipe
->stream
== stream
&& pipe
->stream_enc
) {
193 core_dc
->hwss
.get_position(&pipe
, 1, &position
);
195 *v_pos
= position
.vertical_count
;
196 *nom_v_pos
= position
.nominal_vcount
;
203 static bool set_gamut_remap(struct dc
*dc
, const struct dc_stream_state
*stream
)
205 struct core_dc
*core_dc
= DC_TO_CORE(dc
);
208 struct pipe_ctx
*pipes
;
210 for (i
= 0; i
< MAX_PIPES
; i
++) {
211 if (core_dc
->current_context
->res_ctx
.pipe_ctx
[i
].stream
== stream
) {
212 pipes
= &core_dc
->current_context
->res_ctx
.pipe_ctx
[i
];
213 core_dc
->hwss
.program_gamut_remap(pipes
);
221 static bool program_csc_matrix(struct dc
*dc
, struct dc_stream_state
*stream
)
223 struct core_dc
*core_dc
= DC_TO_CORE(dc
);
226 struct pipe_ctx
*pipes
;
228 for (i
= 0; i
< MAX_PIPES
; i
++) {
229 if (core_dc
->current_context
->res_ctx
.pipe_ctx
[i
].stream
232 pipes
= &core_dc
->current_context
->res_ctx
.pipe_ctx
[i
];
233 core_dc
->hwss
.program_csc_matrix(pipes
,
234 stream
->output_color_space
,
235 stream
->csc_color_matrix
.matrix
);
243 static void set_static_screen_events(struct dc
*dc
,
244 struct dc_stream_state
**streams
,
246 const struct dc_static_screen_events
*events
)
248 struct core_dc
*core_dc
= DC_TO_CORE(dc
);
251 struct pipe_ctx
*pipes_affected
[MAX_PIPES
];
252 int num_pipes_affected
= 0;
254 for (i
= 0; i
< num_streams
; i
++) {
255 struct dc_stream_state
*stream
= streams
[i
];
257 for (j
= 0; j
< MAX_PIPES
; j
++) {
258 if (core_dc
->current_context
->res_ctx
.pipe_ctx
[j
].stream
260 pipes_affected
[num_pipes_affected
++] =
261 &core_dc
->current_context
->res_ctx
.pipe_ctx
[j
];
266 core_dc
->hwss
.set_static_screen_control(pipes_affected
, num_pipes_affected
, events
);
269 static void set_drive_settings(struct dc
*dc
,
270 struct link_training_settings
*lt_settings
,
271 const struct dc_link
*link
)
273 struct core_dc
*core_dc
= DC_TO_CORE(dc
);
276 for (i
= 0; i
< core_dc
->link_count
; i
++) {
277 if (core_dc
->links
[i
] == link
)
281 if (i
>= core_dc
->link_count
)
282 ASSERT_CRITICAL(false);
284 dc_link_dp_set_drive_settings(core_dc
->links
[i
], lt_settings
);
287 static void perform_link_training(struct dc
*dc
,
288 struct dc_link_settings
*link_setting
,
289 bool skip_video_pattern
)
291 struct core_dc
*core_dc
= DC_TO_CORE(dc
);
294 for (i
= 0; i
< core_dc
->link_count
; i
++)
295 dc_link_dp_perform_link_training(
301 static void set_preferred_link_settings(struct dc
*dc
,
302 struct dc_link_settings
*link_setting
,
303 struct dc_link
*link
)
305 link
->preferred_link_setting
= *link_setting
;
306 dp_retrain_link_dp_test(link
, link_setting
, false);
309 static void enable_hpd(const struct dc_link
*link
)
311 dc_link_dp_enable_hpd(link
);
314 static void disable_hpd(const struct dc_link
*link
)
316 dc_link_dp_disable_hpd(link
);
320 static void set_test_pattern(
321 struct dc_link
*link
,
322 enum dp_test_pattern test_pattern
,
323 const struct link_training_settings
*p_link_settings
,
324 const unsigned char *p_custom_pattern
,
325 unsigned int cust_pattern_size
)
328 dc_link_dp_set_test_pattern(
336 void set_dither_option(struct dc_stream_state
*stream
,
337 enum dc_dither_option option
)
339 struct bit_depth_reduction_params params
;
340 struct dc_link
*link
= stream
->status
.link
;
341 struct pipe_ctx
*pipes
= link
->dc
->current_context
->res_ctx
.pipe_ctx
;
343 memset(¶ms
, 0, sizeof(params
));
346 if (option
> DITHER_OPTION_MAX
)
348 if (option
== DITHER_OPTION_DEFAULT
) {
349 switch (stream
->timing
.display_color_depth
) {
350 case COLOR_DEPTH_666
:
351 stream
->dither_option
= DITHER_OPTION_SPATIAL6
;
353 case COLOR_DEPTH_888
:
354 stream
->dither_option
= DITHER_OPTION_SPATIAL8
;
356 case COLOR_DEPTH_101010
:
357 stream
->dither_option
= DITHER_OPTION_SPATIAL10
;
360 option
= DITHER_OPTION_DISABLE
;
363 stream
->dither_option
= option
;
365 resource_build_bit_depth_reduction_params(stream
,
367 stream
->bit_depth_params
= params
;
369 opp_program_bit_depth_reduction(pipes
->opp
, ¶ms
);
372 static void allocate_dc_stream_funcs(struct core_dc
*core_dc
)
374 if (core_dc
->hwss
.set_drr
!= NULL
) {
375 core_dc
->public.stream_funcs
.adjust_vmin_vmax
=
376 stream_adjust_vmin_vmax
;
379 core_dc
->public.stream_funcs
.set_static_screen_events
=
380 set_static_screen_events
;
382 core_dc
->public.stream_funcs
.get_crtc_position
=
383 stream_get_crtc_position
;
385 core_dc
->public.stream_funcs
.set_gamut_remap
=
388 core_dc
->public.stream_funcs
.program_csc_matrix
=
391 core_dc
->public.stream_funcs
.set_dither_option
=
394 core_dc
->public.link_funcs
.set_drive_settings
=
397 core_dc
->public.link_funcs
.perform_link_training
=
398 perform_link_training
;
400 core_dc
->public.link_funcs
.set_preferred_link_settings
=
401 set_preferred_link_settings
;
403 core_dc
->public.link_funcs
.enable_hpd
=
406 core_dc
->public.link_funcs
.disable_hpd
=
409 core_dc
->public.link_funcs
.set_test_pattern
=
413 static void destruct(struct core_dc
*dc
)
415 dc_release_validate_context(dc
->current_context
);
416 dc
->current_context
= NULL
;
420 dc_destroy_resource_pool(dc
);
422 if (dc
->ctx
->gpio_service
)
423 dal_gpio_service_destroy(&dc
->ctx
->gpio_service
);
426 dal_i2caux_destroy(&dc
->ctx
->i2caux
);
428 if (dc
->ctx
->created_bios
)
429 dal_bios_parser_destroy(&dc
->ctx
->dc_bios
);
432 dal_logger_destroy(&dc
->ctx
->logger
);
438 static bool construct(struct core_dc
*dc
,
439 const struct dc_init_data
*init_params
)
441 struct dal_logger
*logger
;
442 struct dc_context
*dc_ctx
= dm_alloc(sizeof(*dc_ctx
));
443 enum dce_version dc_version
= DCE_VERSION_UNKNOWN
;
446 dm_error("%s: failed to create ctx\n", __func__
);
450 dc
->current_context
= dm_alloc(sizeof(*dc
->current_context
));
452 if (!dc
->current_context
) {
453 dm_error("%s: failed to create validate ctx\n", __func__
);
457 dc
->current_context
->ref_count
++;
459 dc_ctx
->cgs_device
= init_params
->cgs_device
;
460 dc_ctx
->driver_context
= init_params
->driver
;
461 dc_ctx
->dc
= &dc
->public;
462 dc_ctx
->asic_id
= init_params
->asic_id
;
465 logger
= dal_logger_create(dc_ctx
);
468 /* can *not* call logger. call base driver 'print error' */
469 dm_error("%s: failed to create Logger!\n", __func__
);
472 dc_ctx
->logger
= logger
;
474 dc
->ctx
->dce_environment
= init_params
->dce_environment
;
476 dc_version
= resource_parse_asic_id(init_params
->asic_id
);
477 dc
->ctx
->dce_version
= dc_version
;
479 dc
->ctx
->fbc_gpu_addr
= init_params
->fbc_gpu_addr
;
481 /* Resource should construct all asic specific resources.
482 * This should be the only place where we need to parse the asic id
484 if (init_params
->vbios_override
)
485 dc_ctx
->dc_bios
= init_params
->vbios_override
;
487 /* Create BIOS parser */
488 struct bp_init_data bp_init_data
;
490 bp_init_data
.ctx
= dc_ctx
;
491 bp_init_data
.bios
= init_params
->asic_id
.atombios_base_address
;
493 dc_ctx
->dc_bios
= dal_bios_parser_create(
494 &bp_init_data
, dc_version
);
496 if (!dc_ctx
->dc_bios
) {
497 ASSERT_CRITICAL(false);
501 dc_ctx
->created_bios
= true;
505 dc_ctx
->i2caux
= dal_i2caux_create(dc_ctx
);
507 if (!dc_ctx
->i2caux
) {
508 ASSERT_CRITICAL(false);
509 goto failed_to_create_i2caux
;
512 /* Create GPIO service */
513 dc_ctx
->gpio_service
= dal_gpio_service_create(
515 dc_ctx
->dce_environment
,
518 if (!dc_ctx
->gpio_service
) {
519 ASSERT_CRITICAL(false);
523 dc
->res_pool
= dc_create_resource_pool(
525 init_params
->num_virtual_links
,
527 init_params
->asic_id
);
529 goto create_resource_fail
;
531 if (!create_links(dc
, init_params
->num_virtual_links
))
532 goto create_links_fail
;
534 allocate_dc_stream_funcs(dc
);
538 /**** error handling here ****/
540 create_resource_fail
:
542 failed_to_create_i2caux
:
552 void ProgramPixelDurationV(unsigned int pixelClockInKHz )
554 fixed31_32 pixel_duration = Fixed31_32(100000000, pixelClockInKHz) * 10;
555 unsigned int pixDurationInPico = round(pixel_duration);
557 DPG_PIPE_ARBITRATION_CONTROL1 arb_control;
559 arb_control.u32All = ReadReg (mmDPGV0_PIPE_ARBITRATION_CONTROL1);
560 arb_control.bits.PIXEL_DURATION = pixDurationInPico;
561 WriteReg (mmDPGV0_PIPE_ARBITRATION_CONTROL1, arb_control.u32All);
563 arb_control.u32All = ReadReg (mmDPGV1_PIPE_ARBITRATION_CONTROL1);
564 arb_control.bits.PIXEL_DURATION = pixDurationInPico;
565 WriteReg (mmDPGV1_PIPE_ARBITRATION_CONTROL1, arb_control.u32All);
567 WriteReg (mmDPGV0_PIPE_ARBITRATION_CONTROL2, 0x4000800);
568 WriteReg (mmDPGV0_REPEATER_PROGRAM, 0x11);
570 WriteReg (mmDPGV1_PIPE_ARBITRATION_CONTROL2, 0x4000800);
571 WriteReg (mmDPGV1_REPEATER_PROGRAM, 0x11);
575 /*******************************************************************************
577 ******************************************************************************/
579 struct dc
*dc_create(const struct dc_init_data
*init_params
)
581 struct core_dc
*core_dc
= dm_alloc(sizeof(*core_dc
));
582 unsigned int full_pipe_count
;
587 if (false == construct(core_dc
, init_params
))
590 /*TODO: separate HW and SW initialization*/
591 core_dc
->hwss
.init_hw(core_dc
);
593 full_pipe_count
= core_dc
->res_pool
->pipe_count
;
594 if (core_dc
->res_pool
->underlay_pipe_index
!= NO_UNDERLAY_PIPE
)
596 core_dc
->public.caps
.max_streams
= min(
598 core_dc
->res_pool
->stream_enc_count
);
600 core_dc
->public.caps
.max_links
= core_dc
->link_count
;
601 core_dc
->public.caps
.max_audios
= core_dc
->res_pool
->audio_count
;
603 core_dc
->public.config
= init_params
->flags
;
605 dm_logger_write(core_dc
->ctx
->logger
, LOG_DC
,
606 "Display Core initialized\n");
609 /* TODO: missing feature to be enabled */
610 core_dc
->public.debug
.disable_dfs_bypass
= true;
612 return &core_dc
->public;
621 void dc_destroy(struct dc
**dc
)
623 struct core_dc
*core_dc
= DC_TO_CORE(*dc
);
629 static bool is_validation_required(
630 const struct core_dc
*dc
,
631 const struct dc_validation_set set
[],
634 const struct validate_context
*context
= dc
->current_context
;
637 if (context
->stream_count
!= set_count
)
640 for (i
= 0; i
< set_count
; i
++) {
642 if (set
[i
].surface_count
!= context
->stream_status
[i
].surface_count
)
644 if (!dc_is_stream_unchanged(set
[i
].stream
, context
->streams
[i
]))
647 for (j
= 0; j
< set
[i
].surface_count
; j
++) {
648 struct dc_plane_state temp_surf
;
649 memset(&temp_surf
, 0, sizeof(temp_surf
));
651 temp_surf
= *context
->stream_status
[i
].surfaces
[j
];
652 temp_surf
.clip_rect
= set
[i
].surfaces
[j
]->clip_rect
;
653 temp_surf
.dst_rect
.x
= set
[i
].surfaces
[j
]->dst_rect
.x
;
654 temp_surf
.dst_rect
.y
= set
[i
].surfaces
[j
]->dst_rect
.y
;
656 if (memcmp(&temp_surf
, set
[i
].surfaces
[j
], sizeof(temp_surf
)) != 0)
664 static bool validate_streams (
666 const struct dc_validation_set set
[],
671 for (i
= 0; i
< set_count
; i
++)
672 if (!dc_validate_stream(dc
, set
[i
].stream
))
678 static bool validate_surfaces(
680 const struct dc_validation_set set
[],
685 for (i
= 0; i
< set_count
; i
++)
686 for (j
= 0; j
< set
[i
].surface_count
; j
++)
687 if (!dc_validate_plane(dc
, set
[i
].surfaces
[j
]))
693 struct validate_context
*dc_get_validate_context(
695 const struct dc_validation_set set
[],
698 struct core_dc
*core_dc
= DC_TO_CORE(dc
);
699 enum dc_status result
= DC_ERROR_UNEXPECTED
;
700 struct validate_context
*context
;
703 context
= dm_alloc(sizeof(struct validate_context
));
705 goto context_alloc_fail
;
707 ++context
->ref_count
;
709 if (!is_validation_required(core_dc
, set
, set_count
)) {
710 dc_resource_validate_ctx_copy_construct(core_dc
->current_context
, context
);
714 result
= core_dc
->res_pool
->funcs
->validate_with_context(
715 core_dc
, set
, set_count
, context
, core_dc
->current_context
);
718 if (result
!= DC_OK
) {
719 dm_logger_write(core_dc
->ctx
->logger
, LOG_WARNING
,
720 "%s:resource validation failed, dc_status:%d\n",
724 dc_release_validate_context(context
);
732 bool dc_validate_resources(
734 const struct dc_validation_set set
[],
737 struct core_dc
*core_dc
= DC_TO_CORE(dc
);
738 enum dc_status result
= DC_ERROR_UNEXPECTED
;
739 struct validate_context
*context
;
741 if (!validate_streams(dc
, set
, set_count
))
744 if (!validate_surfaces(dc
, set
, set_count
))
747 context
= dm_alloc(sizeof(struct validate_context
));
749 goto context_alloc_fail
;
751 ++context
->ref_count
;
753 result
= core_dc
->res_pool
->funcs
->validate_with_context(
754 core_dc
, set
, set_count
, context
, NULL
);
757 if (result
!= DC_OK
) {
758 dm_logger_write(core_dc
->ctx
->logger
, LOG_WARNING
,
759 "%s:resource validation failed, dc_status:%d\n",
764 dc_release_validate_context(context
);
767 return result
== DC_OK
;
770 bool dc_validate_guaranteed(
772 struct dc_stream_state
*stream
)
774 struct core_dc
*core_dc
= DC_TO_CORE(dc
);
775 enum dc_status result
= DC_ERROR_UNEXPECTED
;
776 struct validate_context
*context
;
778 if (!dc_validate_stream(dc
, stream
))
781 context
= dm_alloc(sizeof(struct validate_context
));
783 goto context_alloc_fail
;
785 ++context
->ref_count
;
787 result
= core_dc
->res_pool
->funcs
->validate_guaranteed(
788 core_dc
, stream
, context
);
790 dc_release_validate_context(context
);
793 if (result
!= DC_OK
) {
794 dm_logger_write(core_dc
->ctx
->logger
, LOG_WARNING
,
795 "%s:guaranteed validation failed, dc_status:%d\n",
800 return (result
== DC_OK
);
803 static void program_timing_sync(
804 struct core_dc
*core_dc
,
805 struct validate_context
*ctx
)
809 int pipe_count
= core_dc
->res_pool
->pipe_count
;
810 struct pipe_ctx
*unsynced_pipes
[MAX_PIPES
] = { NULL
};
812 for (i
= 0; i
< pipe_count
; i
++) {
813 if (!ctx
->res_ctx
.pipe_ctx
[i
].stream
|| ctx
->res_ctx
.pipe_ctx
[i
].top_pipe
)
816 unsynced_pipes
[i
] = &ctx
->res_ctx
.pipe_ctx
[i
];
819 for (i
= 0; i
< pipe_count
; i
++) {
821 struct pipe_ctx
*pipe_set
[MAX_PIPES
];
823 if (!unsynced_pipes
[i
])
826 pipe_set
[0] = unsynced_pipes
[i
];
827 unsynced_pipes
[i
] = NULL
;
829 /* Add tg to the set, search rest of the tg's for ones with
830 * same timing, add all tgs with same timing to the group
832 for (j
= i
+ 1; j
< pipe_count
; j
++) {
833 if (!unsynced_pipes
[j
])
836 if (resource_are_streams_timing_synchronizable(
837 unsynced_pipes
[j
]->stream
,
838 pipe_set
[0]->stream
)) {
839 pipe_set
[group_size
] = unsynced_pipes
[j
];
840 unsynced_pipes
[j
] = NULL
;
845 /* set first unblanked pipe as master */
846 for (j
= 0; j
< group_size
; j
++) {
847 struct pipe_ctx
*temp
;
849 if (!pipe_set
[j
]->tg
->funcs
->is_blanked(pipe_set
[j
]->tg
)) {
854 pipe_set
[0] = pipe_set
[j
];
860 /* remove any other unblanked pipes as they have already been synced */
861 for (j
= j
+ 1; j
< group_size
; j
++) {
862 if (!pipe_set
[j
]->tg
->funcs
->is_blanked(pipe_set
[j
]->tg
)) {
864 pipe_set
[j
] = pipe_set
[group_size
];
869 if (group_size
> 1) {
870 core_dc
->hwss
.enable_timing_synchronization(
871 core_dc
, group_index
, group_size
, pipe_set
);
877 static bool context_changed(
879 struct validate_context
*context
)
883 if (context
->stream_count
!= dc
->current_context
->stream_count
)
886 for (i
= 0; i
< dc
->current_context
->stream_count
; i
++) {
887 if (dc
->current_context
->streams
[i
] != context
->streams
[i
])
894 static bool streams_changed(
896 struct dc_stream_state
*streams
[],
897 uint8_t stream_count
)
901 if (stream_count
!= dc
->current_context
->stream_count
)
904 for (i
= 0; i
< dc
->current_context
->stream_count
; i
++) {
905 if (dc
->current_context
->streams
[i
] != streams
[i
])
912 bool dc_enable_stereo(
914 struct validate_context
*context
,
915 struct dc_stream_state
*streams
[],
916 uint8_t stream_count
)
920 struct pipe_ctx
*pipe
;
921 struct core_dc
*core_dc
= DC_TO_CORE(dc
);
924 struct compressor
*fbc_compressor
= core_dc
->fbc_compressor
;
927 for (i
= 0; i
< MAX_PIPES
; i
++) {
929 pipe
= &context
->res_ctx
.pipe_ctx
[i
];
931 pipe
= &core_dc
->current_context
->res_ctx
.pipe_ctx
[i
];
932 for (j
= 0 ; pipe
&& j
< stream_count
; j
++) {
933 if (streams
[j
] && streams
[j
] == pipe
->stream
&&
934 core_dc
->hwss
.setup_stereo
)
935 core_dc
->hwss
.setup_stereo(pipe
, core_dc
);
940 if (fbc_compressor
!= NULL
&&
941 fbc_compressor
->funcs
->is_fbc_enabled_in_hw(core_dc
->fbc_compressor
,
943 fbc_compressor
->funcs
->disable_fbc(fbc_compressor
);
951 * Applies given context to HW and copy it into current context.
952 * It's up to the user to release the src context afterwards.
954 static bool dc_commit_context_no_check(struct dc
*dc
, struct validate_context
*context
)
956 struct core_dc
*core_dc
= DC_TO_CORE(dc
);
957 struct dc_bios
*dcb
= core_dc
->ctx
->dc_bios
;
958 enum dc_status result
= DC_ERROR_UNEXPECTED
;
959 struct pipe_ctx
*pipe
;
961 struct dc_stream_state
*dc_streams
[MAX_STREAMS
] = {0};
963 for (i
= 0; i
< context
->stream_count
; i
++)
964 dc_streams
[i
] = context
->streams
[i
];
966 if (!dcb
->funcs
->is_accelerated_mode(dcb
))
967 core_dc
->hwss
.enable_accelerated_mode(core_dc
);
969 for (i
= 0; i
< core_dc
->res_pool
->pipe_count
; i
++) {
970 pipe
= &context
->res_ctx
.pipe_ctx
[i
];
971 core_dc
->hwss
.wait_for_mpcc_disconnect(core_dc
, core_dc
->res_pool
, pipe
);
973 result
= core_dc
->hwss
.apply_ctx_to_hw(core_dc
, context
);
975 program_timing_sync(core_dc
, context
);
977 for (i
= 0; i
< context
->stream_count
; i
++) {
978 const struct dc_sink
*sink
= context
->streams
[i
]->sink
;
980 for (j
= 0; j
< context
->stream_status
[i
].surface_count
; j
++) {
981 const struct dc_plane_state
*surface
=
982 context
->stream_status
[i
].surfaces
[j
];
984 core_dc
->hwss
.apply_ctx_for_surface(core_dc
, surface
, context
);
988 * TODO rework dc_enable_stereo call to work with validation sets?
990 for (k
= 0; k
< MAX_PIPES
; k
++) {
991 pipe
= &context
->res_ctx
.pipe_ctx
[k
];
993 for (l
= 0 ; pipe
&& l
< context
->stream_count
; l
++) {
994 if (context
->streams
[l
] &&
995 context
->streams
[l
] == pipe
->stream
&&
996 core_dc
->hwss
.setup_stereo
)
997 core_dc
->hwss
.setup_stereo(pipe
, core_dc
);
1002 CONN_MSG_MODE(sink
->link
, "{%dx%d, %dx%d@%dKhz}",
1003 context
->streams
[i
]->timing
.h_addressable
,
1004 context
->streams
[i
]->timing
.v_addressable
,
1005 context
->streams
[i
]->timing
.h_total
,
1006 context
->streams
[i
]->timing
.v_total
,
1007 context
->streams
[i
]->timing
.pix_clk_khz
);
1010 dc_enable_stereo(dc
, context
, dc_streams
, context
->stream_count
);
1012 dc_release_validate_context(core_dc
->current_context
);
1014 core_dc
->current_context
= context
;
1016 dc_retain_validate_context(core_dc
->current_context
);
1018 return (result
== DC_OK
);
1021 bool dc_commit_context(struct dc
*dc
, struct validate_context
*context
)
1023 enum dc_status result
= DC_ERROR_UNEXPECTED
;
1024 struct core_dc
*core_dc
= DC_TO_CORE(dc
);
1027 if (false == context_changed(core_dc
, context
))
1030 dm_logger_write(core_dc
->ctx
->logger
, LOG_DC
, "%s: %d streams\n",
1031 __func__
, context
->stream_count
);
1033 for (i
= 0; i
< context
->stream_count
; i
++) {
1034 struct dc_stream_state
*stream
= context
->streams
[i
];
1036 dc_stream_log(stream
,
1037 core_dc
->ctx
->logger
,
1041 result
= dc_commit_context_no_check(dc
, context
);
1043 return (result
== DC_OK
);
1047 bool dc_commit_streams(
1049 struct dc_stream_state
*streams
[],
1050 uint8_t stream_count
)
1052 struct core_dc
*core_dc
= DC_TO_CORE(dc
);
1053 enum dc_status result
= DC_ERROR_UNEXPECTED
;
1054 struct validate_context
*context
;
1055 struct dc_validation_set set
[MAX_STREAMS
] = { {0, {0} } };
1058 if (false == streams_changed(core_dc
, streams
, stream_count
))
1061 dm_logger_write(core_dc
->ctx
->logger
, LOG_DC
, "%s: %d streams\n",
1062 __func__
, stream_count
);
1064 for (i
= 0; i
< stream_count
; i
++) {
1065 struct dc_stream_state
*stream
= streams
[i
];
1066 struct dc_stream_status
*status
= dc_stream_get_status(stream
);
1069 dc_stream_log(stream
,
1070 core_dc
->ctx
->logger
,
1073 set
[i
].stream
= stream
;
1076 set
[i
].surface_count
= status
->surface_count
;
1077 for (j
= 0; j
< status
->surface_count
; j
++)
1078 set
[i
].surfaces
[j
] = status
->surfaces
[j
];
1083 if (!validate_streams(dc
, set
, stream_count
))
1086 if (!validate_surfaces(dc
, set
, stream_count
))
1089 context
= dm_alloc(sizeof(struct validate_context
));
1090 if (context
== NULL
)
1091 goto context_alloc_fail
;
1093 ++context
->ref_count
;
1095 result
= core_dc
->res_pool
->funcs
->validate_with_context(
1096 core_dc
, set
, stream_count
, context
, core_dc
->current_context
);
1097 if (result
!= DC_OK
){
1098 dm_logger_write(core_dc
->ctx
->logger
, LOG_ERROR
,
1099 "%s: Context validation failed! dc_status:%d\n",
1102 BREAK_TO_DEBUGGER();
1106 result
= dc_commit_context_no_check(dc
, context
);
1109 dc_release_validate_context(context
);
1112 return (result
== DC_OK
);
1115 bool dc_post_update_surfaces_to_stream(struct dc
*dc
)
1118 struct core_dc
*core_dc
= DC_TO_CORE(dc
);
1119 struct validate_context
*context
= core_dc
->current_context
;
1121 post_surface_trace(dc
);
1123 for (i
= 0; i
< core_dc
->res_pool
->pipe_count
; i
++)
1124 if (context
->res_ctx
.pipe_ctx
[i
].stream
== NULL
1125 || context
->res_ctx
.pipe_ctx
[i
].surface
== NULL
)
1126 core_dc
->hwss
.power_down_front_end(core_dc
, i
);
1128 /* 3rd param should be true, temp w/a for RV*/
1129 #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
1130 core_dc
->hwss
.set_bandwidth(core_dc
, context
, core_dc
->ctx
->dce_version
< DCN_VERSION_1_0
);
1132 core_dc
->hwss
.set_bandwidth(core_dc
, context
, true);
1137 bool dc_commit_surfaces_to_stream(
1139 struct dc_plane_state
**new_surfaces
,
1140 uint8_t new_surface_count
,
1141 struct dc_stream_state
*dc_stream
)
1143 struct dc_surface_update updates
[MAX_SURFACES
];
1144 struct dc_flip_addrs flip_addr
[MAX_SURFACES
];
1145 struct dc_plane_info plane_info
[MAX_SURFACES
];
1146 struct dc_scaling_info scaling_info
[MAX_SURFACES
];
1148 struct dc_stream_update
*stream_update
=
1149 dm_alloc(sizeof(struct dc_stream_update
));
1151 if (!stream_update
) {
1152 BREAK_TO_DEBUGGER();
1156 memset(updates
, 0, sizeof(updates
));
1157 memset(flip_addr
, 0, sizeof(flip_addr
));
1158 memset(plane_info
, 0, sizeof(plane_info
));
1159 memset(scaling_info
, 0, sizeof(scaling_info
));
1161 stream_update
->src
= dc_stream
->src
;
1162 stream_update
->dst
= dc_stream
->dst
;
1163 stream_update
->out_transfer_func
= dc_stream
->out_transfer_func
;
1165 for (i
= 0; i
< new_surface_count
; i
++) {
1166 updates
[i
].surface
= new_surfaces
[i
];
1168 (struct dc_gamma
*)new_surfaces
[i
]->gamma_correction
;
1169 updates
[i
].in_transfer_func
= new_surfaces
[i
]->in_transfer_func
;
1170 flip_addr
[i
].address
= new_surfaces
[i
]->address
;
1171 flip_addr
[i
].flip_immediate
= new_surfaces
[i
]->flip_immediate
;
1172 plane_info
[i
].color_space
= new_surfaces
[i
]->color_space
;
1173 plane_info
[i
].format
= new_surfaces
[i
]->format
;
1174 plane_info
[i
].plane_size
= new_surfaces
[i
]->plane_size
;
1175 plane_info
[i
].rotation
= new_surfaces
[i
]->rotation
;
1176 plane_info
[i
].horizontal_mirror
= new_surfaces
[i
]->horizontal_mirror
;
1177 plane_info
[i
].stereo_format
= new_surfaces
[i
]->stereo_format
;
1178 plane_info
[i
].tiling_info
= new_surfaces
[i
]->tiling_info
;
1179 plane_info
[i
].visible
= new_surfaces
[i
]->visible
;
1180 plane_info
[i
].per_pixel_alpha
= new_surfaces
[i
]->per_pixel_alpha
;
1181 plane_info
[i
].dcc
= new_surfaces
[i
]->dcc
;
1182 scaling_info
[i
].scaling_quality
= new_surfaces
[i
]->scaling_quality
;
1183 scaling_info
[i
].src_rect
= new_surfaces
[i
]->src_rect
;
1184 scaling_info
[i
].dst_rect
= new_surfaces
[i
]->dst_rect
;
1185 scaling_info
[i
].clip_rect
= new_surfaces
[i
]->clip_rect
;
1187 updates
[i
].flip_addr
= &flip_addr
[i
];
1188 updates
[i
].plane_info
= &plane_info
[i
];
1189 updates
[i
].scaling_info
= &scaling_info
[i
];
1192 dc_update_surfaces_and_stream(
1196 dc_stream
, stream_update
);
1198 dc_post_update_surfaces_to_stream(dc
);
1200 dm_free(stream_update
);
1204 void dc_retain_validate_context(struct validate_context
*context
)
1206 ASSERT(context
->ref_count
> 0);
1207 ++context
->ref_count
;
1210 void dc_release_validate_context(struct validate_context
*context
)
1212 ASSERT(context
->ref_count
> 0);
1213 --context
->ref_count
;
1215 if (context
->ref_count
== 0) {
1216 dc_resource_validate_ctx_destruct(context
);
1221 static bool is_surface_in_context(
1222 const struct validate_context
*context
,
1223 const struct dc_plane_state
*surface
)
1227 for (j
= 0; j
< MAX_PIPES
; j
++) {
1228 const struct pipe_ctx
*pipe_ctx
= &context
->res_ctx
.pipe_ctx
[j
];
1230 if (surface
== pipe_ctx
->surface
) {
1238 static unsigned int pixel_format_to_bpp(enum surface_pixel_format format
)
1241 case SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr
:
1242 case SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb
:
1244 case SURFACE_PIXEL_FORMAT_GRPH_ARGB1555
:
1245 case SURFACE_PIXEL_FORMAT_GRPH_RGB565
:
1246 case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCbCr
:
1247 case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb
:
1249 case SURFACE_PIXEL_FORMAT_GRPH_ARGB8888
:
1250 case SURFACE_PIXEL_FORMAT_GRPH_ABGR8888
:
1251 case SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010
:
1252 case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010
:
1254 case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616
:
1255 case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F
:
1256 case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F
:
1259 ASSERT_CRITICAL(false);
1264 static enum surface_update_type
get_plane_info_update_type(
1265 const struct dc_surface_update
*u
,
1268 struct dc_plane_info temp_plane_info
;
1269 memset(&temp_plane_info
, 0, sizeof(temp_plane_info
));
1272 return UPDATE_TYPE_FAST
;
1274 temp_plane_info
= *u
->plane_info
;
1276 /* Copy all parameters that will cause a full update
1277 * from current surface, the rest of the parameters
1278 * from provided plane configuration.
1279 * Perform memory compare and special validation
1280 * for those that can cause fast/medium updates
1283 /* Full update parameters */
1284 temp_plane_info
.color_space
= u
->surface
->color_space
;
1285 temp_plane_info
.dcc
= u
->surface
->dcc
;
1286 temp_plane_info
.horizontal_mirror
= u
->surface
->horizontal_mirror
;
1287 temp_plane_info
.plane_size
= u
->surface
->plane_size
;
1288 temp_plane_info
.rotation
= u
->surface
->rotation
;
1289 temp_plane_info
.stereo_format
= u
->surface
->stereo_format
;
1290 temp_plane_info
.tiling_info
= u
->surface
->tiling_info
;
1292 if (surface_index
== 0)
1293 temp_plane_info
.visible
= u
->plane_info
->visible
;
1295 temp_plane_info
.visible
= u
->surface
->visible
;
1297 if (memcmp(u
->plane_info
, &temp_plane_info
,
1298 sizeof(struct dc_plane_info
)) != 0)
1299 return UPDATE_TYPE_FULL
;
1301 if (pixel_format_to_bpp(u
->plane_info
->format
) !=
1302 pixel_format_to_bpp(u
->surface
->format
)) {
1303 return UPDATE_TYPE_FULL
;
1305 return UPDATE_TYPE_MED
;
1309 static enum surface_update_type
get_scaling_info_update_type(
1310 const struct dc_surface_update
*u
)
1312 if (!u
->scaling_info
)
1313 return UPDATE_TYPE_FAST
;
1315 if (u
->scaling_info
->src_rect
.width
!= u
->surface
->src_rect
.width
1316 || u
->scaling_info
->src_rect
.height
!= u
->surface
->src_rect
.height
1317 || u
->scaling_info
->clip_rect
.width
!= u
->surface
->clip_rect
.width
1318 || u
->scaling_info
->clip_rect
.height
!= u
->surface
->clip_rect
.height
1319 || u
->scaling_info
->dst_rect
.width
!= u
->surface
->dst_rect
.width
1320 || u
->scaling_info
->dst_rect
.height
!= u
->surface
->dst_rect
.height
)
1321 return UPDATE_TYPE_FULL
;
1323 if (u
->scaling_info
->src_rect
.x
!= u
->surface
->src_rect
.x
1324 || u
->scaling_info
->src_rect
.y
!= u
->surface
->src_rect
.y
1325 || u
->scaling_info
->clip_rect
.x
!= u
->surface
->clip_rect
.x
1326 || u
->scaling_info
->clip_rect
.y
!= u
->surface
->clip_rect
.y
1327 || u
->scaling_info
->dst_rect
.x
!= u
->surface
->dst_rect
.x
1328 || u
->scaling_info
->dst_rect
.y
!= u
->surface
->dst_rect
.y
)
1329 return UPDATE_TYPE_MED
;
1331 return UPDATE_TYPE_FAST
;
1334 static enum surface_update_type
det_surface_update(
1335 const struct core_dc
*dc
,
1336 const struct dc_surface_update
*u
,
1339 const struct validate_context
*context
= dc
->current_context
;
1340 enum surface_update_type type
= UPDATE_TYPE_FAST
;
1341 enum surface_update_type overall_type
= UPDATE_TYPE_FAST
;
1343 if (!is_surface_in_context(context
, u
->surface
))
1344 return UPDATE_TYPE_FULL
;
1346 type
= get_plane_info_update_type(u
, surface_index
);
1347 if (overall_type
< type
)
1348 overall_type
= type
;
1350 type
= get_scaling_info_update_type(u
);
1351 if (overall_type
< type
)
1352 overall_type
= type
;
1354 if (u
->in_transfer_func
||
1355 u
->hdr_static_metadata
) {
1356 if (overall_type
< UPDATE_TYPE_MED
)
1357 overall_type
= UPDATE_TYPE_MED
;
1360 return overall_type
;
1363 enum surface_update_type
dc_check_update_surfaces_for_stream(
1365 struct dc_surface_update
*updates
,
1367 struct dc_stream_update
*stream_update
,
1368 const struct dc_stream_status
*stream_status
)
1370 struct core_dc
*core_dc
= DC_TO_CORE(dc
);
1372 enum surface_update_type overall_type
= UPDATE_TYPE_FAST
;
1374 if (stream_status
== NULL
|| stream_status
->surface_count
!= surface_count
)
1375 return UPDATE_TYPE_FULL
;
1378 return UPDATE_TYPE_FULL
;
1380 for (i
= 0 ; i
< surface_count
; i
++) {
1381 enum surface_update_type type
=
1382 det_surface_update(core_dc
, &updates
[i
], i
);
1384 if (type
== UPDATE_TYPE_FULL
)
1387 if (overall_type
< type
)
1388 overall_type
= type
;
1391 return overall_type
;
1394 enum surface_update_type update_surface_trace_level
= UPDATE_TYPE_FULL
;
1396 void dc_update_surfaces_and_stream(struct dc
*dc
,
1397 struct dc_surface_update
*srf_updates
, int surface_count
,
1398 struct dc_stream_state
*stream
,
1399 struct dc_stream_update
*stream_update
)
1401 struct core_dc
*core_dc
= DC_TO_CORE(dc
);
1402 struct validate_context
*context
;
1404 enum surface_update_type update_type
;
1405 const struct dc_stream_status
*stream_status
;
1406 struct dc_context
*dc_ctx
= core_dc
->ctx
;
1408 /* Currently this function do not result in any HW programming
1409 * when called with 0 surface. But proceeding will cause
1410 * SW state to be updated in validate_context. So we might as
1411 * well make it not do anything at all until the hw programming
1412 * is implemented properly to handle 0 surface case.
1413 * TODO: fix hw programming then remove this early return
1415 if (surface_count
== 0)
1418 stream_status
= dc_stream_get_status(stream
);
1420 ASSERT(stream_status
);
1422 return; /* Cannot commit surface to stream that is not committed */
1425 if (srf_updates
->flip_addr
) {
1426 if (srf_updates
->flip_addr
->address
.grph
.addr
.low_part
== 0)
1430 context
= core_dc
->current_context
;
1432 /* update current stream with the new updates */
1433 if (stream_update
) {
1434 if ((stream_update
->src
.height
!= 0) &&
1435 (stream_update
->src
.width
!= 0))
1436 stream
->src
= stream_update
->src
;
1438 if ((stream_update
->dst
.height
!= 0) &&
1439 (stream_update
->dst
.width
!= 0))
1440 stream
->dst
= stream_update
->dst
;
1442 if (stream_update
->out_transfer_func
&&
1443 stream_update
->out_transfer_func
!=
1444 stream
->out_transfer_func
) {
1445 if (stream
->out_transfer_func
!= NULL
)
1446 dc_transfer_func_release(stream
->out_transfer_func
);
1447 dc_transfer_func_retain(stream_update
->out_transfer_func
);
1448 stream
->out_transfer_func
=
1449 stream_update
->out_transfer_func
;
1453 /* do not perform surface update if surface has invalid dimensions
1454 * (all zero) and no scaling_info is provided
1456 if (surface_count
> 0 &&
1457 srf_updates
->surface
->src_rect
.width
== 0 &&
1458 srf_updates
->surface
->src_rect
.height
== 0 &&
1459 srf_updates
->surface
->dst_rect
.width
== 0 &&
1460 srf_updates
->surface
->dst_rect
.height
== 0 &&
1461 !srf_updates
->scaling_info
) {
1466 update_type
= dc_check_update_surfaces_for_stream(
1467 dc
, srf_updates
, surface_count
, stream_update
, stream_status
);
1469 if (update_type
>= update_surface_trace_level
)
1470 update_surface_trace(dc
, srf_updates
, surface_count
);
1472 if (update_type
>= UPDATE_TYPE_FULL
) {
1473 struct dc_plane_state
*new_surfaces
[MAX_SURFACES
] = {0};
1475 for (i
= 0; i
< surface_count
; i
++)
1476 new_surfaces
[i
] = srf_updates
[i
].surface
;
1478 /* initialize scratch memory for building context */
1479 context
= dm_alloc(sizeof(*context
));
1480 if (context
== NULL
)
1481 goto context_alloc_fail
;
1483 ++context
->ref_count
;
1485 dc_resource_validate_ctx_copy_construct(
1486 core_dc
->current_context
, context
);
1488 /* add surface to context */
1489 if (!resource_attach_surfaces_to_context(
1490 new_surfaces
, surface_count
, stream
,
1491 context
, core_dc
->res_pool
)) {
1492 BREAK_TO_DEBUGGER();
1497 /* save update parameters into surface */
1498 for (i
= 0; i
< surface_count
; i
++) {
1499 struct dc_plane_state
*surface
= srf_updates
[i
].surface
;
1501 if (srf_updates
[i
].flip_addr
) {
1502 surface
->address
= srf_updates
[i
].flip_addr
->address
;
1503 surface
->flip_immediate
=
1504 srf_updates
[i
].flip_addr
->flip_immediate
;
1507 if (srf_updates
[i
].scaling_info
) {
1508 surface
->scaling_quality
=
1509 srf_updates
[i
].scaling_info
->scaling_quality
;
1511 srf_updates
[i
].scaling_info
->dst_rect
;
1513 srf_updates
[i
].scaling_info
->src_rect
;
1514 surface
->clip_rect
=
1515 srf_updates
[i
].scaling_info
->clip_rect
;
1518 if (srf_updates
[i
].plane_info
) {
1519 surface
->color_space
=
1520 srf_updates
[i
].plane_info
->color_space
;
1522 srf_updates
[i
].plane_info
->format
;
1523 surface
->plane_size
=
1524 srf_updates
[i
].plane_info
->plane_size
;
1526 srf_updates
[i
].plane_info
->rotation
;
1527 surface
->horizontal_mirror
=
1528 srf_updates
[i
].plane_info
->horizontal_mirror
;
1529 surface
->stereo_format
=
1530 srf_updates
[i
].plane_info
->stereo_format
;
1531 surface
->tiling_info
=
1532 srf_updates
[i
].plane_info
->tiling_info
;
1534 srf_updates
[i
].plane_info
->visible
;
1535 surface
->per_pixel_alpha
=
1536 srf_updates
[i
].plane_info
->per_pixel_alpha
;
1538 srf_updates
[i
].plane_info
->dcc
;
1541 if (update_type
>= UPDATE_TYPE_MED
) {
1542 for (j
= 0; j
< core_dc
->res_pool
->pipe_count
; j
++) {
1543 struct pipe_ctx
*pipe_ctx
= &context
->res_ctx
.pipe_ctx
[j
];
1545 if (pipe_ctx
->surface
!= surface
)
1548 resource_build_scaling_params(pipe_ctx
);
1552 if (srf_updates
[i
].gamma
&&
1553 srf_updates
[i
].gamma
!= surface
->gamma_correction
) {
1554 if (surface
->gamma_correction
!= NULL
)
1555 dc_gamma_release(&surface
->gamma_correction
);
1557 dc_gamma_retain(srf_updates
[i
].gamma
);
1558 surface
->gamma_correction
= srf_updates
[i
].gamma
;
1561 if (srf_updates
[i
].in_transfer_func
&&
1562 srf_updates
[i
].in_transfer_func
!= surface
->in_transfer_func
) {
1563 if (surface
->in_transfer_func
!= NULL
)
1564 dc_transfer_func_release(
1568 dc_transfer_func_retain(
1569 srf_updates
[i
].in_transfer_func
);
1570 surface
->in_transfer_func
=
1571 srf_updates
[i
].in_transfer_func
;
1574 if (srf_updates
[i
].hdr_static_metadata
)
1575 surface
->hdr_static_ctx
=
1576 *(srf_updates
[i
].hdr_static_metadata
);
1579 if (update_type
== UPDATE_TYPE_FULL
) {
1580 if (!core_dc
->res_pool
->funcs
->validate_bandwidth(core_dc
, context
)) {
1581 BREAK_TO_DEBUGGER();
1584 core_dc
->hwss
.set_bandwidth(core_dc
, context
, false);
1585 context_clock_trace(dc
, context
);
1589 if (update_type
> UPDATE_TYPE_FAST
) {
1590 for (j
= 0; j
< core_dc
->res_pool
->pipe_count
; j
++) {
1591 struct pipe_ctx
*pipe_ctx
= &context
->res_ctx
.pipe_ctx
[j
];
1593 core_dc
->hwss
.wait_for_mpcc_disconnect(core_dc
, core_dc
->res_pool
, pipe_ctx
);
1597 if (surface_count
== 0)
1598 core_dc
->hwss
.apply_ctx_for_surface(core_dc
, NULL
, context
);
1600 /* Lock pipes for provided surfaces, or all active if full update*/
1601 for (i
= 0; i
< surface_count
; i
++) {
1602 struct dc_plane_state
*surface
= srf_updates
[i
].surface
;
1604 for (j
= 0; j
< core_dc
->res_pool
->pipe_count
; j
++) {
1605 struct pipe_ctx
*pipe_ctx
= &context
->res_ctx
.pipe_ctx
[j
];
1607 if (update_type
!= UPDATE_TYPE_FULL
&& pipe_ctx
->surface
!= surface
)
1609 if (!pipe_ctx
->surface
|| pipe_ctx
->top_pipe
)
1612 core_dc
->hwss
.pipe_control_lock(
1617 if (update_type
== UPDATE_TYPE_FULL
)
1622 for (j
= 0; j
< core_dc
->res_pool
->pipe_count
; j
++) {
1623 struct pipe_ctx
*pipe_ctx
= &context
->res_ctx
.pipe_ctx
[j
];
1624 struct pipe_ctx
*cur_pipe_ctx
= &core_dc
->current_context
->res_ctx
.pipe_ctx
[j
];
1625 bool is_new_pipe_surface
= cur_pipe_ctx
->surface
!= pipe_ctx
->surface
;
1626 struct dc_cursor_position position
= { 0 };
1628 if (update_type
!= UPDATE_TYPE_FULL
|| !pipe_ctx
->surface
)
1631 if (!pipe_ctx
->top_pipe
)
1632 core_dc
->hwss
.apply_ctx_for_surface(
1633 core_dc
, pipe_ctx
->surface
, context
);
1635 /* TODO: this is a hack w/a for switching from mpo to pipe split */
1636 dc_stream_set_cursor_position(pipe_ctx
->stream
, &position
);
1638 if (is_new_pipe_surface
) {
1639 core_dc
->hwss
.update_plane_addr(core_dc
, pipe_ctx
);
1640 core_dc
->hwss
.set_input_transfer_func(
1641 pipe_ctx
, pipe_ctx
->surface
);
1642 core_dc
->hwss
.set_output_transfer_func(
1643 pipe_ctx
, pipe_ctx
->stream
);
1647 if (update_type
> UPDATE_TYPE_FAST
)
1648 context_timing_trace(dc
, &context
->res_ctx
);
1650 /* Perform requested Updates */
1651 for (i
= 0; i
< surface_count
; i
++) {
1652 struct dc_plane_state
*surface
= srf_updates
[i
].surface
;
1654 if (update_type
== UPDATE_TYPE_MED
)
1655 core_dc
->hwss
.apply_ctx_for_surface(
1656 core_dc
, surface
, context
);
1658 for (j
= 0; j
< core_dc
->res_pool
->pipe_count
; j
++) {
1659 struct pipe_ctx
*pipe_ctx
= &context
->res_ctx
.pipe_ctx
[j
];
1661 if (pipe_ctx
->surface
!= surface
)
1664 if (srf_updates
[i
].flip_addr
)
1665 core_dc
->hwss
.update_plane_addr(core_dc
, pipe_ctx
);
1667 if (update_type
== UPDATE_TYPE_FAST
)
1670 if (srf_updates
[i
].in_transfer_func
)
1671 core_dc
->hwss
.set_input_transfer_func(
1672 pipe_ctx
, pipe_ctx
->surface
);
1674 if (stream_update
!= NULL
&&
1675 stream_update
->out_transfer_func
!= NULL
) {
1676 core_dc
->hwss
.set_output_transfer_func(
1677 pipe_ctx
, pipe_ctx
->stream
);
1680 if (srf_updates
[i
].hdr_static_metadata
) {
1681 resource_build_info_frame(pipe_ctx
);
1682 core_dc
->hwss
.update_info_frame(pipe_ctx
);
1688 for (i
= core_dc
->res_pool
->pipe_count
- 1; i
>= 0; i
--) {
1689 struct pipe_ctx
*pipe_ctx
= &context
->res_ctx
.pipe_ctx
[i
];
1691 for (j
= 0; j
< surface_count
; j
++) {
1692 if (update_type
!= UPDATE_TYPE_FULL
&&
1693 srf_updates
[j
].surface
!= pipe_ctx
->surface
)
1695 if (!pipe_ctx
->surface
|| pipe_ctx
->top_pipe
)
1698 core_dc
->hwss
.pipe_control_lock(
1707 if (core_dc
->current_context
!= context
) {
1708 dc_release_validate_context(core_dc
->current_context
);
1709 core_dc
->current_context
= context
;
1714 dc_release_validate_context(context
);
1717 DC_ERROR("Failed to allocate new validate context!\n");
1720 uint8_t dc_get_current_stream_count(const struct dc
*dc
)
1722 struct core_dc
*core_dc
= DC_TO_CORE(dc
);
1723 return core_dc
->current_context
->stream_count
;
1726 struct dc_stream_state
*dc_get_stream_at_index(const struct dc
*dc
, uint8_t i
)
1728 struct core_dc
*core_dc
= DC_TO_CORE(dc
);
1729 if (i
< core_dc
->current_context
->stream_count
)
1730 return core_dc
->current_context
->streams
[i
];
1734 struct dc_link
*dc_get_link_at_index(const struct dc
*dc
, uint32_t link_index
)
1736 struct core_dc
*core_dc
= DC_TO_CORE(dc
);
1737 return core_dc
->links
[link_index
];
1740 const struct graphics_object_id
dc_get_link_id_at_index(
1741 struct dc
*dc
, uint32_t link_index
)
1743 struct core_dc
*core_dc
= DC_TO_CORE(dc
);
1744 return core_dc
->links
[link_index
]->link_id
;
1747 enum dc_irq_source
dc_get_hpd_irq_source_at_index(
1748 struct dc
*dc
, uint32_t link_index
)
1750 struct core_dc
*core_dc
= DC_TO_CORE(dc
);
1751 return core_dc
->links
[link_index
]->irq_source_hpd
;
1754 const struct audio
**dc_get_audios(struct dc
*dc
)
1756 struct core_dc
*core_dc
= DC_TO_CORE(dc
);
1757 return (const struct audio
**)core_dc
->res_pool
->audios
;
1760 enum dc_irq_source
dc_interrupt_to_irq_source(
1765 struct core_dc
*core_dc
= DC_TO_CORE(dc
);
1766 return dal_irq_service_to_irq_source(core_dc
->res_pool
->irqs
, src_id
, ext_id
);
1769 void dc_interrupt_set(const struct dc
*dc
, enum dc_irq_source src
, bool enable
)
1771 struct core_dc
*core_dc
;
1775 core_dc
= DC_TO_CORE(dc
);
1777 dal_irq_service_set(core_dc
->res_pool
->irqs
, src
, enable
);
1780 void dc_interrupt_ack(struct dc
*dc
, enum dc_irq_source src
)
1782 struct core_dc
*core_dc
= DC_TO_CORE(dc
);
1783 dal_irq_service_ack(core_dc
->res_pool
->irqs
, src
);
1786 void dc_set_power_state(
1788 enum dc_acpi_cm_power_state power_state
)
1790 struct core_dc
*core_dc
= DC_TO_CORE(dc
);
1793 switch (power_state
) {
1794 case DC_ACPI_CM_POWER_STATE_D0
:
1795 core_dc
->hwss
.init_hw(core_dc
);
1799 core_dc
->hwss
.power_down(core_dc
);
1801 /* Zero out the current context so that on resume we start with
1802 * clean state, and dc hw programming optimizations will not
1803 * cause any trouble.
1806 /* Preserve refcount */
1807 ref_count
= core_dc
->current_context
->ref_count
;
1808 dc_resource_validate_ctx_destruct(core_dc
->current_context
);
1809 memset(core_dc
->current_context
, 0,
1810 sizeof(*core_dc
->current_context
));
1811 core_dc
->current_context
->ref_count
= ref_count
;
1818 void dc_resume(const struct dc
*dc
)
1820 struct core_dc
*core_dc
= DC_TO_CORE(dc
);
1824 for (i
= 0; i
< core_dc
->link_count
; i
++)
1825 core_link_resume(core_dc
->links
[i
]);
1828 bool dc_read_aux_dpcd(
1830 uint32_t link_index
,
1835 struct core_dc
*core_dc
= DC_TO_CORE(dc
);
1837 struct dc_link
*link
= core_dc
->links
[link_index
];
1838 enum ddc_result r
= dal_ddc_service_read_dpcd_data(
1845 return r
== DDC_RESULT_SUCESSFULL
;
1848 bool dc_write_aux_dpcd(
1850 uint32_t link_index
,
1852 const uint8_t *data
,
1855 struct core_dc
*core_dc
= DC_TO_CORE(dc
);
1856 struct dc_link
*link
= core_dc
->links
[link_index
];
1858 enum ddc_result r
= dal_ddc_service_write_dpcd_data(
1865 return r
== DDC_RESULT_SUCESSFULL
;
1868 bool dc_read_aux_i2c(
1870 uint32_t link_index
,
1871 enum i2c_mot_mode mot
,
1876 struct core_dc
*core_dc
= DC_TO_CORE(dc
);
1878 struct dc_link
*link
= core_dc
->links
[link_index
];
1879 enum ddc_result r
= dal_ddc_service_read_dpcd_data(
1886 return r
== DDC_RESULT_SUCESSFULL
;
1889 bool dc_write_aux_i2c(
1891 uint32_t link_index
,
1892 enum i2c_mot_mode mot
,
1894 const uint8_t *data
,
1897 struct core_dc
*core_dc
= DC_TO_CORE(dc
);
1898 struct dc_link
*link
= core_dc
->links
[link_index
];
1900 enum ddc_result r
= dal_ddc_service_write_dpcd_data(
1907 return r
== DDC_RESULT_SUCESSFULL
;
1910 bool dc_query_ddc_data(
1912 uint32_t link_index
,
1915 uint32_t write_size
,
1917 uint32_t read_size
) {
1919 struct core_dc
*core_dc
= DC_TO_CORE(dc
);
1921 struct dc_link
*link
= core_dc
->links
[link_index
];
1923 bool result
= dal_ddc_service_query_ddc_data(
1936 uint32_t link_index
,
1937 struct i2c_command
*cmd
)
1939 struct core_dc
*core_dc
= DC_TO_CORE(dc
);
1941 struct dc_link
*link
= core_dc
->links
[link_index
];
1942 struct ddc_service
*ddc
= link
->ddc
;
1944 return dal_i2caux_submit_i2c_command(
1950 static bool link_add_remote_sink_helper(struct dc_link
*dc_link
, struct dc_sink
*sink
)
1952 if (dc_link
->sink_count
>= MAX_SINKS_PER_LINK
) {
1953 BREAK_TO_DEBUGGER();
1957 dc_sink_retain(sink
);
1959 dc_link
->remote_sinks
[dc_link
->sink_count
] = sink
;
1960 dc_link
->sink_count
++;
1965 struct dc_sink
*dc_link_add_remote_sink(
1966 struct dc_link
*link
,
1967 const uint8_t *edid
,
1969 struct dc_sink_init_data
*init_data
)
1971 struct dc_sink
*dc_sink
;
1972 enum dc_edid_status edid_status
;
1974 if (len
> MAX_EDID_BUFFER_SIZE
) {
1975 dm_error("Max EDID buffer size breached!\n");
1980 BREAK_TO_DEBUGGER();
1984 if (!init_data
->link
) {
1985 BREAK_TO_DEBUGGER();
1989 dc_sink
= dc_sink_create(init_data
);
1994 memmove(dc_sink
->dc_edid
.raw_edid
, edid
, len
);
1995 dc_sink
->dc_edid
.length
= len
;
1997 if (!link_add_remote_sink_helper(
2002 edid_status
= dm_helpers_parse_edid_caps(
2005 &dc_sink
->edid_caps
);
2007 if (edid_status
!= EDID_OK
)
2012 dc_link_remove_remote_sink(link
, dc_sink
);
2014 dc_sink_release(dc_sink
);
2018 void dc_link_set_sink(struct dc_link
*link
, struct dc_sink
*sink
)
2020 link
->local_sink
= sink
;
2023 link
->type
= dc_connection_none
;
2025 link
->type
= dc_connection_single
;
2029 void dc_link_remove_remote_sink(struct dc_link
*link
, struct dc_sink
*sink
)
2033 if (!link
->sink_count
) {
2034 BREAK_TO_DEBUGGER();
2038 for (i
= 0; i
< link
->sink_count
; i
++) {
2039 if (link
->remote_sinks
[i
] == sink
) {
2040 dc_sink_release(sink
);
2041 link
->remote_sinks
[i
] = NULL
;
2043 /* shrink array to remove empty place */
2044 while (i
< link
->sink_count
- 1) {
2045 link
->remote_sinks
[i
] = link
->remote_sinks
[i
+1];
2048 link
->remote_sinks
[i
] = NULL
;
2055 bool dc_init_dchub(struct dc
*dc
, struct dchub_init_data
*dh_data
)
2058 struct core_dc
*core_dc
= DC_TO_CORE(dc
);
2059 struct mem_input
*mi
= NULL
;
2061 for (i
= 0; i
< core_dc
->res_pool
->pipe_count
; i
++) {
2062 if (core_dc
->res_pool
->mis
[i
] != NULL
) {
2063 mi
= core_dc
->res_pool
->mis
[i
];
2068 dm_error("no mem_input!\n");
2072 if (core_dc
->hwss
.update_dchub
)
2073 core_dc
->hwss
.update_dchub(core_dc
->hwseq
, dh_data
);
2075 ASSERT(core_dc
->hwss
.update_dchub
);
2082 void dc_log_hw_state(struct dc
*dc
)
2084 struct core_dc
*core_dc
= DC_TO_CORE(dc
);
2086 if (core_dc
->hwss
.log_hw_state
)
2087 core_dc
->hwss
.log_hw_state(core_dc
);