2 * Copyright 2015 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
25 #include "dm_services.h"
29 #include "core_status.h"
30 #include "core_types.h"
31 #include "hw_sequencer.h"
35 #include "clock_source.h"
36 #include "dc_bios_types.h"
38 #include "bios_parser_interface.h"
39 #include "include/irq_service_interface.h"
40 #include "transform.h"
42 #include "timing_generator.h"
43 #include "virtual/virtual_link_encoder.h"
45 #include "link_hwss.h"
46 #include "link_encoder.h"
48 #include "dc_link_ddc.h"
49 #include "dm_helpers.h"
50 #include "mem_input.h"
54 /*******************************************************************************
56 ******************************************************************************/
57 static void destroy_links(struct dc
*dc
)
61 for (i
= 0; i
< dc
->link_count
; i
++) {
62 if (NULL
!= dc
->links
[i
])
63 link_destroy(&dc
->links
[i
]);
67 static bool create_links(
69 uint32_t num_virtual_links
)
73 struct dc_bios
*bios
= dc
->ctx
->dc_bios
;
77 connectors_num
= bios
->funcs
->get_connectors_number(bios
);
79 if (connectors_num
> ENUM_ID_COUNT
) {
81 "DC: Number of connectors %d exceeds maximum of %d!\n",
87 if (connectors_num
== 0 && num_virtual_links
== 0) {
88 dm_error("DC: Number of connectors is zero!\n");
92 "DC: %s: connectors_num: physical:%d, virtual:%d\n",
97 for (i
= 0; i
< connectors_num
; i
++) {
98 struct link_init_data link_init_params
= {0};
101 link_init_params
.ctx
= dc
->ctx
;
102 /* next BIOS object table connector */
103 link_init_params
.connector_index
= i
;
104 link_init_params
.link_index
= dc
->link_count
;
105 link_init_params
.dc
= dc
;
106 link
= link_create(&link_init_params
);
109 dc
->links
[dc
->link_count
] = link
;
115 for (i
= 0; i
< num_virtual_links
; i
++) {
116 struct dc_link
*link
= kzalloc(sizeof(*link
), GFP_KERNEL
);
117 struct encoder_init_data enc_init
= {0};
124 link
->link_index
= dc
->link_count
;
125 dc
->links
[dc
->link_count
] = link
;
130 link
->connector_signal
= SIGNAL_TYPE_VIRTUAL
;
131 link
->link_id
.type
= OBJECT_TYPE_CONNECTOR
;
132 link
->link_id
.id
= CONNECTOR_ID_VIRTUAL
;
133 link
->link_id
.enum_id
= ENUM_ID_1
;
134 link
->link_enc
= kzalloc(sizeof(*link
->link_enc
), GFP_KERNEL
);
136 if (!link
->link_enc
) {
141 link
->link_status
.dpcd_caps
= &link
->dpcd_caps
;
143 enc_init
.ctx
= dc
->ctx
;
144 enc_init
.channel
= CHANNEL_ID_UNKNOWN
;
145 enc_init
.hpd_source
= HPD_SOURCEID_UNKNOWN
;
146 enc_init
.transmitter
= TRANSMITTER_UNKNOWN
;
147 enc_init
.connector
= link
->link_id
;
148 enc_init
.encoder
.type
= OBJECT_TYPE_ENCODER
;
149 enc_init
.encoder
.id
= ENCODER_ID_INTERNAL_VIRTUAL
;
150 enc_init
.encoder
.enum_id
= ENUM_ID_1
;
151 virtual_link_encoder_construct(link
->link_enc
, &enc_init
);
160 static bool stream_adjust_vmin_vmax(struct dc
*dc
,
161 struct dc_stream_state
**streams
, int num_streams
,
164 /* TODO: Support multiple streams */
165 struct dc_stream_state
*stream
= streams
[0];
169 for (i
= 0; i
< MAX_PIPES
; i
++) {
170 struct pipe_ctx
*pipe
= &dc
->current_state
->res_ctx
.pipe_ctx
[i
];
172 if (pipe
->stream
== stream
&& pipe
->stream_res
.stream_enc
) {
173 dc
->hwss
.set_drr(&pipe
, 1, vmin
, vmax
);
175 /* build and update the info frame */
176 resource_build_info_frame(pipe
);
177 dc
->hwss
.update_info_frame(pipe
);
185 static bool stream_get_crtc_position(struct dc
*dc
,
186 struct dc_stream_state
**streams
, int num_streams
,
187 unsigned int *v_pos
, unsigned int *nom_v_pos
)
189 /* TODO: Support multiple streams */
190 struct dc_stream_state
*stream
= streams
[0];
193 struct crtc_position position
;
195 for (i
= 0; i
< MAX_PIPES
; i
++) {
196 struct pipe_ctx
*pipe
=
197 &dc
->current_state
->res_ctx
.pipe_ctx
[i
];
199 if (pipe
->stream
== stream
&& pipe
->stream_res
.stream_enc
) {
200 dc
->hwss
.get_position(&pipe
, 1, &position
);
202 *v_pos
= position
.vertical_count
;
203 *nom_v_pos
= position
.nominal_vcount
;
210 static bool set_gamut_remap(struct dc
*dc
, const struct dc_stream_state
*stream
)
214 struct pipe_ctx
*pipes
;
216 for (i
= 0; i
< MAX_PIPES
; i
++) {
217 if (dc
->current_state
->res_ctx
.pipe_ctx
[i
].stream
== stream
) {
218 pipes
= &dc
->current_state
->res_ctx
.pipe_ctx
[i
];
219 dc
->hwss
.program_gamut_remap(pipes
);
227 static bool program_csc_matrix(struct dc
*dc
, struct dc_stream_state
*stream
)
231 struct pipe_ctx
*pipes
;
233 for (i
= 0; i
< MAX_PIPES
; i
++) {
234 if (dc
->current_state
->res_ctx
.pipe_ctx
[i
].stream
237 pipes
= &dc
->current_state
->res_ctx
.pipe_ctx
[i
];
238 dc
->hwss
.program_csc_matrix(pipes
,
239 stream
->output_color_space
,
240 stream
->csc_color_matrix
.matrix
);
248 static void set_static_screen_events(struct dc
*dc
,
249 struct dc_stream_state
**streams
,
251 const struct dc_static_screen_events
*events
)
255 struct pipe_ctx
*pipes_affected
[MAX_PIPES
];
256 int num_pipes_affected
= 0;
258 for (i
= 0; i
< num_streams
; i
++) {
259 struct dc_stream_state
*stream
= streams
[i
];
261 for (j
= 0; j
< MAX_PIPES
; j
++) {
262 if (dc
->current_state
->res_ctx
.pipe_ctx
[j
].stream
264 pipes_affected
[num_pipes_affected
++] =
265 &dc
->current_state
->res_ctx
.pipe_ctx
[j
];
270 dc
->hwss
.set_static_screen_control(pipes_affected
, num_pipes_affected
, events
);
273 static void set_drive_settings(struct dc
*dc
,
274 struct link_training_settings
*lt_settings
,
275 const struct dc_link
*link
)
280 for (i
= 0; i
< dc
->link_count
; i
++) {
281 if (dc
->links
[i
] == link
)
285 if (i
>= dc
->link_count
)
286 ASSERT_CRITICAL(false);
288 dc_link_dp_set_drive_settings(dc
->links
[i
], lt_settings
);
291 static void perform_link_training(struct dc
*dc
,
292 struct dc_link_settings
*link_setting
,
293 bool skip_video_pattern
)
297 for (i
= 0; i
< dc
->link_count
; i
++)
298 dc_link_dp_perform_link_training(
304 static void set_preferred_link_settings(struct dc
*dc
,
305 struct dc_link_settings
*link_setting
,
306 struct dc_link
*link
)
308 link
->preferred_link_setting
= *link_setting
;
309 dp_retrain_link_dp_test(link
, link_setting
, false);
312 static void enable_hpd(const struct dc_link
*link
)
314 dc_link_dp_enable_hpd(link
);
317 static void disable_hpd(const struct dc_link
*link
)
319 dc_link_dp_disable_hpd(link
);
323 static void set_test_pattern(
324 struct dc_link
*link
,
325 enum dp_test_pattern test_pattern
,
326 const struct link_training_settings
*p_link_settings
,
327 const unsigned char *p_custom_pattern
,
328 unsigned int cust_pattern_size
)
331 dc_link_dp_set_test_pattern(
339 static void set_dither_option(struct dc_stream_state
*stream
,
340 enum dc_dither_option option
)
342 struct bit_depth_reduction_params params
;
343 struct dc_link
*link
= stream
->status
.link
;
344 struct pipe_ctx
*pipes
= NULL
;
347 for (i
= 0; i
< MAX_PIPES
; i
++) {
348 if (link
->dc
->current_state
->res_ctx
.pipe_ctx
[i
].stream
==
350 pipes
= &link
->dc
->current_state
->res_ctx
.pipe_ctx
[i
];
355 memset(¶ms
, 0, sizeof(params
));
358 if (option
> DITHER_OPTION_MAX
)
361 stream
->dither_option
= option
;
363 resource_build_bit_depth_reduction_params(stream
,
365 stream
->bit_depth_params
= params
;
366 pipes
->stream_res
.opp
->funcs
->
367 opp_program_bit_depth_reduction(pipes
->stream_res
.opp
, ¶ms
);
372 struct dc_stream_state
*stream
,
375 struct pipe_ctx
*pipe_ctx
= NULL
;
378 for (i
= 0; i
< MAX_PIPES
; i
++) {
379 if (dc
->current_state
->res_ctx
.pipe_ctx
[i
].stream
== stream
) {
380 pipe_ctx
= &dc
->current_state
->res_ctx
.pipe_ctx
[i
];
390 if (stream
->dpms_off
!= dpms_off
) {
391 stream
->dpms_off
= dpms_off
;
393 core_link_disable_stream(pipe_ctx
,
394 KEEP_ACQUIRED_RESOURCE
);
396 core_link_enable_stream(dc
->current_state
, pipe_ctx
);
400 static void allocate_dc_stream_funcs(struct dc
*dc
)
402 if (dc
->hwss
.set_drr
!= NULL
) {
403 dc
->stream_funcs
.adjust_vmin_vmax
=
404 stream_adjust_vmin_vmax
;
407 dc
->stream_funcs
.set_static_screen_events
=
408 set_static_screen_events
;
410 dc
->stream_funcs
.get_crtc_position
=
411 stream_get_crtc_position
;
413 dc
->stream_funcs
.set_gamut_remap
=
416 dc
->stream_funcs
.program_csc_matrix
=
419 dc
->stream_funcs
.set_dither_option
=
422 dc
->stream_funcs
.set_dpms
=
425 dc
->link_funcs
.set_drive_settings
=
428 dc
->link_funcs
.perform_link_training
=
429 perform_link_training
;
431 dc
->link_funcs
.set_preferred_link_settings
=
432 set_preferred_link_settings
;
434 dc
->link_funcs
.enable_hpd
=
437 dc
->link_funcs
.disable_hpd
=
440 dc
->link_funcs
.set_test_pattern
=
444 static void destruct(struct dc
*dc
)
446 dc_release_state(dc
->current_state
);
447 dc
->current_state
= NULL
;
451 dc_destroy_resource_pool(dc
);
453 if (dc
->ctx
->gpio_service
)
454 dal_gpio_service_destroy(&dc
->ctx
->gpio_service
);
457 dal_i2caux_destroy(&dc
->ctx
->i2caux
);
459 if (dc
->ctx
->created_bios
)
460 dal_bios_parser_destroy(&dc
->ctx
->dc_bios
);
463 dal_logger_destroy(&dc
->ctx
->logger
);
474 #ifdef CONFIG_DRM_AMD_DC_DCN1_0
484 static bool construct(struct dc
*dc
,
485 const struct dc_init_data
*init_params
)
487 struct dal_logger
*logger
;
488 struct dc_context
*dc_ctx
= kzalloc(sizeof(*dc_ctx
), GFP_KERNEL
);
489 struct bw_calcs_dceip
*dc_dceip
= kzalloc(sizeof(*dc_dceip
),
491 struct bw_calcs_vbios
*dc_vbios
= kzalloc(sizeof(*dc_vbios
),
493 #ifdef CONFIG_DRM_AMD_DC_DCN1_0
494 struct dcn_soc_bounding_box
*dcn_soc
= kzalloc(sizeof(*dcn_soc
),
496 struct dcn_ip_params
*dcn_ip
= kzalloc(sizeof(*dcn_ip
), GFP_KERNEL
);
499 enum dce_version dc_version
= DCE_VERSION_UNKNOWN
;
502 dm_error("%s: failed to create dceip\n", __func__
);
506 dc
->bw_dceip
= dc_dceip
;
509 dm_error("%s: failed to create vbios\n", __func__
);
513 dc
->bw_vbios
= dc_vbios
;
514 #ifdef CONFIG_DRM_AMD_DC_DCN1_0
516 dm_error("%s: failed to create dcn_soc\n", __func__
);
520 dc
->dcn_soc
= dcn_soc
;
523 dm_error("%s: failed to create dcn_ip\n", __func__
);
531 dm_error("%s: failed to create ctx\n", __func__
);
535 dc
->current_state
= dc_create_state();
537 if (!dc
->current_state
) {
538 dm_error("%s: failed to create validate ctx\n", __func__
);
542 dc_ctx
->cgs_device
= init_params
->cgs_device
;
543 dc_ctx
->driver_context
= init_params
->driver
;
545 dc_ctx
->asic_id
= init_params
->asic_id
;
548 logger
= dal_logger_create(dc_ctx
, init_params
->log_mask
);
551 /* can *not* call logger. call base driver 'print error' */
552 dm_error("%s: failed to create Logger!\n", __func__
);
555 dc_ctx
->logger
= logger
;
557 dc
->ctx
->dce_environment
= init_params
->dce_environment
;
559 dc_version
= resource_parse_asic_id(init_params
->asic_id
);
560 dc
->ctx
->dce_version
= dc_version
;
561 #if defined(CONFIG_DRM_AMD_DC_FBC)
562 dc
->ctx
->fbc_gpu_addr
= init_params
->fbc_gpu_addr
;
564 /* Resource should construct all asic specific resources.
565 * This should be the only place where we need to parse the asic id
567 if (init_params
->vbios_override
)
568 dc_ctx
->dc_bios
= init_params
->vbios_override
;
570 /* Create BIOS parser */
571 struct bp_init_data bp_init_data
;
573 bp_init_data
.ctx
= dc_ctx
;
574 bp_init_data
.bios
= init_params
->asic_id
.atombios_base_address
;
576 dc_ctx
->dc_bios
= dal_bios_parser_create(
577 &bp_init_data
, dc_version
);
579 if (!dc_ctx
->dc_bios
) {
580 ASSERT_CRITICAL(false);
584 dc_ctx
->created_bios
= true;
588 dc_ctx
->i2caux
= dal_i2caux_create(dc_ctx
);
590 if (!dc_ctx
->i2caux
) {
591 ASSERT_CRITICAL(false);
595 /* Create GPIO service */
596 dc_ctx
->gpio_service
= dal_gpio_service_create(
598 dc_ctx
->dce_environment
,
601 if (!dc_ctx
->gpio_service
) {
602 ASSERT_CRITICAL(false);
606 dc
->res_pool
= dc_create_resource_pool(
608 init_params
->num_virtual_links
,
610 init_params
->asic_id
);
614 dc_resource_state_construct(dc
, dc
->current_state
);
616 if (!create_links(dc
, init_params
->num_virtual_links
))
619 allocate_dc_stream_funcs(dc
);
629 static void disable_dangling_plane(struct dc
*dc
, struct dc_state
*context
)
632 struct dc_state
*dangling_context
= dc_create_state();
633 struct dc_state
*current_ctx
;
635 if (dangling_context
== NULL
)
638 dc_resource_state_copy_construct(dc
->current_state
, dangling_context
);
640 for (i
= 0; i
< dc
->res_pool
->pipe_count
; i
++) {
641 struct dc_stream_state
*old_stream
=
642 dc
->current_state
->res_ctx
.pipe_ctx
[i
].stream
;
643 bool should_disable
= true;
645 for (j
= 0; j
< context
->stream_count
; j
++) {
646 if (old_stream
== context
->streams
[j
]) {
647 should_disable
= false;
651 if (should_disable
&& old_stream
) {
652 dc_rem_all_planes_for_stream(dc
, old_stream
, dangling_context
);
653 dc
->hwss
.apply_ctx_for_surface(dc
, old_stream
, 0, dangling_context
);
657 current_ctx
= dc
->current_state
;
658 dc
->current_state
= dangling_context
;
659 dc_release_state(current_ctx
);
662 /*******************************************************************************
664 ******************************************************************************/
666 struct dc
*dc_create(const struct dc_init_data
*init_params
)
668 struct dc
*dc
= kzalloc(sizeof(*dc
), GFP_KERNEL
);
669 unsigned int full_pipe_count
;
674 if (false == construct(dc
, init_params
))
677 /*TODO: separate HW and SW initialization*/
678 dc
->hwss
.init_hw(dc
);
680 full_pipe_count
= dc
->res_pool
->pipe_count
;
681 if (dc
->res_pool
->underlay_pipe_index
!= NO_UNDERLAY_PIPE
)
683 dc
->caps
.max_streams
= min(
685 dc
->res_pool
->stream_enc_count
);
687 dc
->caps
.max_links
= dc
->link_count
;
688 dc
->caps
.max_audios
= dc
->res_pool
->audio_count
;
690 dc
->config
= init_params
->flags
;
692 dm_logger_write(dc
->ctx
->logger
, LOG_DC
,
693 "Display Core initialized\n");
696 /* TODO: missing feature to be enabled */
697 dc
->debug
.disable_dfs_bypass
= true;
708 void dc_destroy(struct dc
**dc
)
715 static void program_timing_sync(
717 struct dc_state
*ctx
)
721 int pipe_count
= dc
->res_pool
->pipe_count
;
722 struct pipe_ctx
*unsynced_pipes
[MAX_PIPES
] = { NULL
};
724 for (i
= 0; i
< pipe_count
; i
++) {
725 if (!ctx
->res_ctx
.pipe_ctx
[i
].stream
|| ctx
->res_ctx
.pipe_ctx
[i
].top_pipe
)
728 unsynced_pipes
[i
] = &ctx
->res_ctx
.pipe_ctx
[i
];
731 for (i
= 0; i
< pipe_count
; i
++) {
733 struct pipe_ctx
*pipe_set
[MAX_PIPES
];
735 if (!unsynced_pipes
[i
])
738 pipe_set
[0] = unsynced_pipes
[i
];
739 unsynced_pipes
[i
] = NULL
;
741 /* Add tg to the set, search rest of the tg's for ones with
742 * same timing, add all tgs with same timing to the group
744 for (j
= i
+ 1; j
< pipe_count
; j
++) {
745 if (!unsynced_pipes
[j
])
748 if (resource_are_streams_timing_synchronizable(
749 unsynced_pipes
[j
]->stream
,
750 pipe_set
[0]->stream
)) {
751 pipe_set
[group_size
] = unsynced_pipes
[j
];
752 unsynced_pipes
[j
] = NULL
;
757 /* set first unblanked pipe as master */
758 for (j
= 0; j
< group_size
; j
++) {
759 struct pipe_ctx
*temp
;
761 if (!pipe_set
[j
]->stream_res
.tg
->funcs
->is_blanked(pipe_set
[j
]->stream_res
.tg
)) {
766 pipe_set
[0] = pipe_set
[j
];
772 /* remove any other unblanked pipes as they have already been synced */
773 for (j
= j
+ 1; j
< group_size
; j
++) {
774 if (!pipe_set
[j
]->stream_res
.tg
->funcs
->is_blanked(pipe_set
[j
]->stream_res
.tg
)) {
776 pipe_set
[j
] = pipe_set
[group_size
];
781 if (group_size
> 1) {
782 dc
->hwss
.enable_timing_synchronization(
783 dc
, group_index
, group_size
, pipe_set
);
789 static bool context_changed(
791 struct dc_state
*context
)
795 if (context
->stream_count
!= dc
->current_state
->stream_count
)
798 for (i
= 0; i
< dc
->current_state
->stream_count
; i
++) {
799 if (dc
->current_state
->streams
[i
] != context
->streams
[i
])
806 bool dc_enable_stereo(
808 struct dc_state
*context
,
809 struct dc_stream_state
*streams
[],
810 uint8_t stream_count
)
814 struct pipe_ctx
*pipe
;
816 for (i
= 0; i
< MAX_PIPES
; i
++) {
818 pipe
= &context
->res_ctx
.pipe_ctx
[i
];
820 pipe
= &dc
->current_state
->res_ctx
.pipe_ctx
[i
];
821 for (j
= 0 ; pipe
&& j
< stream_count
; j
++) {
822 if (streams
[j
] && streams
[j
] == pipe
->stream
&&
823 dc
->hwss
.setup_stereo
)
824 dc
->hwss
.setup_stereo(pipe
, dc
);
833 * Applies given context to HW and copy it into current context.
834 * It's up to the user to release the src context afterwards.
836 static enum dc_status
dc_commit_state_no_check(struct dc
*dc
, struct dc_state
*context
)
838 struct dc_bios
*dcb
= dc
->ctx
->dc_bios
;
839 enum dc_status result
= DC_ERROR_UNEXPECTED
;
840 struct pipe_ctx
*pipe
;
842 struct dc_stream_state
*dc_streams
[MAX_STREAMS
] = {0};
844 disable_dangling_plane(dc
, context
);
846 for (i
= 0; i
< context
->stream_count
; i
++)
847 dc_streams
[i
] = context
->streams
[i
];
849 if (!dcb
->funcs
->is_accelerated_mode(dcb
))
850 dc
->hwss
.enable_accelerated_mode(dc
);
852 for (i
= 0; i
< context
->stream_count
; i
++) {
853 const struct dc_sink
*sink
= context
->streams
[i
]->sink
;
855 dc
->hwss
.apply_ctx_for_surface(
856 dc
, context
->streams
[i
],
857 context
->stream_status
[i
].plane_count
,
862 * TODO rework dc_enable_stereo call to work with validation sets?
864 for (k
= 0; k
< MAX_PIPES
; k
++) {
865 pipe
= &context
->res_ctx
.pipe_ctx
[k
];
867 for (l
= 0 ; pipe
&& l
< context
->stream_count
; l
++) {
868 if (context
->streams
[l
] &&
869 context
->streams
[l
] == pipe
->stream
&&
870 dc
->hwss
.setup_stereo
)
871 dc
->hwss
.setup_stereo(pipe
, dc
);
875 CONN_MSG_MODE(sink
->link
, "{%dx%d, %dx%d@%dKhz}",
876 context
->streams
[i
]->timing
.h_addressable
,
877 context
->streams
[i
]->timing
.v_addressable
,
878 context
->streams
[i
]->timing
.h_total
,
879 context
->streams
[i
]->timing
.v_total
,
880 context
->streams
[i
]->timing
.pix_clk_khz
);
883 dc
->hwss
.ready_shared_resources(dc
, context
);
885 for (i
= 0; i
< dc
->res_pool
->pipe_count
; i
++) {
886 pipe
= &context
->res_ctx
.pipe_ctx
[i
];
887 dc
->hwss
.wait_for_mpcc_disconnect(dc
, dc
->res_pool
, pipe
);
889 result
= dc
->hwss
.apply_ctx_to_hw(dc
, context
);
891 program_timing_sync(dc
, context
);
893 dc_enable_stereo(dc
, context
, dc_streams
, context
->stream_count
);
895 for (i
= 0; i
< context
->stream_count
; i
++) {
896 for (j
= 0; j
< MAX_PIPES
; j
++) {
897 pipe
= &context
->res_ctx
.pipe_ctx
[j
];
899 if (!pipe
->top_pipe
&& pipe
->stream
== context
->streams
[i
])
900 dc
->hwss
.pipe_control_lock(dc
, pipe
, false);
904 dc_release_state(dc
->current_state
);
906 dc
->current_state
= context
;
908 dc_retain_state(dc
->current_state
);
910 dc
->hwss
.optimize_shared_resources(dc
);
915 bool dc_commit_state(struct dc
*dc
, struct dc_state
*context
)
917 enum dc_status result
= DC_ERROR_UNEXPECTED
;
920 if (false == context_changed(dc
, context
))
923 dm_logger_write(dc
->ctx
->logger
, LOG_DC
, "%s: %d streams\n",
924 __func__
, context
->stream_count
);
926 for (i
= 0; i
< context
->stream_count
; i
++) {
927 struct dc_stream_state
*stream
= context
->streams
[i
];
929 dc_stream_log(stream
,
934 result
= dc_commit_state_no_check(dc
, context
);
936 return (result
== DC_OK
);
940 bool dc_post_update_surfaces_to_stream(struct dc
*dc
)
943 struct dc_state
*context
= dc
->current_state
;
945 post_surface_trace(dc
);
947 for (i
= 0; i
< dc
->res_pool
->pipe_count
; i
++)
948 if (context
->res_ctx
.pipe_ctx
[i
].stream
== NULL
949 || context
->res_ctx
.pipe_ctx
[i
].plane_state
== NULL
)
950 dc
->hwss
.power_down_front_end(dc
, i
);
952 /* 3rd param should be true, temp w/a for RV*/
953 #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
954 dc
->hwss
.set_bandwidth(dc
, context
, dc
->ctx
->dce_version
< DCN_VERSION_1_0
);
956 dc
->hwss
.set_bandwidth(dc
, context
, true);
962 * TODO this whole function needs to go
964 * dc_surface_update is needlessly complex. See if we can just replace this
965 * with a dc_plane_state and follow the atomic model a bit more closely here.
967 bool dc_commit_planes_to_stream(
969 struct dc_plane_state
**plane_states
,
970 uint8_t new_plane_count
,
971 struct dc_stream_state
*dc_stream
,
972 struct dc_state
*state
)
974 /* no need to dynamically allocate this. it's pretty small */
975 struct dc_surface_update updates
[MAX_SURFACES
];
976 struct dc_flip_addrs
*flip_addr
;
977 struct dc_plane_info
*plane_info
;
978 struct dc_scaling_info
*scaling_info
;
980 struct dc_stream_update
*stream_update
=
981 kzalloc(sizeof(struct dc_stream_update
), GFP_KERNEL
);
983 if (!stream_update
) {
988 flip_addr
= kcalloc(MAX_SURFACES
, sizeof(struct dc_flip_addrs
),
990 plane_info
= kcalloc(MAX_SURFACES
, sizeof(struct dc_plane_info
),
992 scaling_info
= kcalloc(MAX_SURFACES
, sizeof(struct dc_scaling_info
),
995 if (!flip_addr
|| !plane_info
|| !scaling_info
) {
999 kfree(stream_update
);
1003 memset(updates
, 0, sizeof(updates
));
1005 stream_update
->src
= dc_stream
->src
;
1006 stream_update
->dst
= dc_stream
->dst
;
1007 stream_update
->out_transfer_func
= dc_stream
->out_transfer_func
;
1009 for (i
= 0; i
< new_plane_count
; i
++) {
1010 updates
[i
].surface
= plane_states
[i
];
1012 (struct dc_gamma
*)plane_states
[i
]->gamma_correction
;
1013 updates
[i
].in_transfer_func
= plane_states
[i
]->in_transfer_func
;
1014 flip_addr
[i
].address
= plane_states
[i
]->address
;
1015 flip_addr
[i
].flip_immediate
= plane_states
[i
]->flip_immediate
;
1016 plane_info
[i
].color_space
= plane_states
[i
]->color_space
;
1017 plane_info
[i
].format
= plane_states
[i
]->format
;
1018 plane_info
[i
].plane_size
= plane_states
[i
]->plane_size
;
1019 plane_info
[i
].rotation
= plane_states
[i
]->rotation
;
1020 plane_info
[i
].horizontal_mirror
= plane_states
[i
]->horizontal_mirror
;
1021 plane_info
[i
].stereo_format
= plane_states
[i
]->stereo_format
;
1022 plane_info
[i
].tiling_info
= plane_states
[i
]->tiling_info
;
1023 plane_info
[i
].visible
= plane_states
[i
]->visible
;
1024 plane_info
[i
].per_pixel_alpha
= plane_states
[i
]->per_pixel_alpha
;
1025 plane_info
[i
].dcc
= plane_states
[i
]->dcc
;
1026 scaling_info
[i
].scaling_quality
= plane_states
[i
]->scaling_quality
;
1027 scaling_info
[i
].src_rect
= plane_states
[i
]->src_rect
;
1028 scaling_info
[i
].dst_rect
= plane_states
[i
]->dst_rect
;
1029 scaling_info
[i
].clip_rect
= plane_states
[i
]->clip_rect
;
1031 updates
[i
].flip_addr
= &flip_addr
[i
];
1032 updates
[i
].plane_info
= &plane_info
[i
];
1033 updates
[i
].scaling_info
= &scaling_info
[i
];
1036 dc_commit_updates_for_stream(
1040 dc_stream
, stream_update
, plane_states
, state
);
1044 kfree(scaling_info
);
1045 kfree(stream_update
);
1049 struct dc_state
*dc_create_state(void)
1051 struct dc_state
*context
= kzalloc(sizeof(struct dc_state
),
1057 kref_init(&context
->refcount
);
1061 void dc_retain_state(struct dc_state
*context
)
1063 kref_get(&context
->refcount
);
1066 static void dc_state_free(struct kref
*kref
)
1068 struct dc_state
*context
= container_of(kref
, struct dc_state
, refcount
);
1069 dc_resource_state_destruct(context
);
1073 void dc_release_state(struct dc_state
*context
)
1075 kref_put(&context
->refcount
, dc_state_free
);
1078 static bool is_surface_in_context(
1079 const struct dc_state
*context
,
1080 const struct dc_plane_state
*plane_state
)
1084 for (j
= 0; j
< MAX_PIPES
; j
++) {
1085 const struct pipe_ctx
*pipe_ctx
= &context
->res_ctx
.pipe_ctx
[j
];
1087 if (plane_state
== pipe_ctx
->plane_state
) {
1095 static unsigned int pixel_format_to_bpp(enum surface_pixel_format format
)
1098 case SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr
:
1099 case SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb
:
1101 case SURFACE_PIXEL_FORMAT_GRPH_ARGB1555
:
1102 case SURFACE_PIXEL_FORMAT_GRPH_RGB565
:
1103 case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCbCr
:
1104 case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb
:
1106 case SURFACE_PIXEL_FORMAT_GRPH_ARGB8888
:
1107 case SURFACE_PIXEL_FORMAT_GRPH_ABGR8888
:
1108 case SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010
:
1109 case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010
:
1111 case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616
:
1112 case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F
:
1113 case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F
:
1116 ASSERT_CRITICAL(false);
1121 static enum surface_update_type
get_plane_info_update_type(
1122 const struct dc_surface_update
*u
,
1125 struct dc_plane_info temp_plane_info
;
1126 memset(&temp_plane_info
, 0, sizeof(temp_plane_info
));
1129 return UPDATE_TYPE_FAST
;
1131 temp_plane_info
= *u
->plane_info
;
1133 /* Copy all parameters that will cause a full update
1134 * from current surface, the rest of the parameters
1135 * from provided plane configuration.
1136 * Perform memory compare and special validation
1137 * for those that can cause fast/medium updates
1140 /* Full update parameters */
1141 temp_plane_info
.color_space
= u
->surface
->color_space
;
1142 temp_plane_info
.dcc
= u
->surface
->dcc
;
1143 temp_plane_info
.horizontal_mirror
= u
->surface
->horizontal_mirror
;
1144 temp_plane_info
.plane_size
= u
->surface
->plane_size
;
1145 temp_plane_info
.rotation
= u
->surface
->rotation
;
1146 temp_plane_info
.stereo_format
= u
->surface
->stereo_format
;
1148 if (surface_index
== 0)
1149 temp_plane_info
.visible
= u
->plane_info
->visible
;
1151 temp_plane_info
.visible
= u
->surface
->visible
;
1153 if (memcmp(u
->plane_info
, &temp_plane_info
,
1154 sizeof(struct dc_plane_info
)) != 0)
1155 return UPDATE_TYPE_FULL
;
1157 if (pixel_format_to_bpp(u
->plane_info
->format
) !=
1158 pixel_format_to_bpp(u
->surface
->format
)) {
1159 /* different bytes per element will require full bandwidth
1160 * and DML calculation
1162 return UPDATE_TYPE_FULL
;
1165 if (memcmp(&u
->plane_info
->tiling_info
, &u
->surface
->tiling_info
,
1166 sizeof(union dc_tiling_info
)) != 0) {
1167 /* todo: below are HW dependent, we should add a hook to
1168 * DCE/N resource and validated there.
1170 if (u
->plane_info
->tiling_info
.gfx9
.swizzle
!= DC_SW_LINEAR
) {
1171 /* swizzled mode requires RQ to be setup properly,
1172 * thus need to run DML to calculate RQ settings
1174 return UPDATE_TYPE_FULL
;
1178 return UPDATE_TYPE_MED
;
1181 static enum surface_update_type
get_scaling_info_update_type(
1182 const struct dc_surface_update
*u
)
1184 if (!u
->scaling_info
)
1185 return UPDATE_TYPE_FAST
;
1187 if (u
->scaling_info
->src_rect
.width
!= u
->surface
->src_rect
.width
1188 || u
->scaling_info
->src_rect
.height
!= u
->surface
->src_rect
.height
1189 || u
->scaling_info
->clip_rect
.width
!= u
->surface
->clip_rect
.width
1190 || u
->scaling_info
->clip_rect
.height
!= u
->surface
->clip_rect
.height
1191 || u
->scaling_info
->dst_rect
.width
!= u
->surface
->dst_rect
.width
1192 || u
->scaling_info
->dst_rect
.height
!= u
->surface
->dst_rect
.height
)
1193 return UPDATE_TYPE_FULL
;
1195 if (u
->scaling_info
->src_rect
.x
!= u
->surface
->src_rect
.x
1196 || u
->scaling_info
->src_rect
.y
!= u
->surface
->src_rect
.y
1197 || u
->scaling_info
->clip_rect
.x
!= u
->surface
->clip_rect
.x
1198 || u
->scaling_info
->clip_rect
.y
!= u
->surface
->clip_rect
.y
1199 || u
->scaling_info
->dst_rect
.x
!= u
->surface
->dst_rect
.x
1200 || u
->scaling_info
->dst_rect
.y
!= u
->surface
->dst_rect
.y
)
1201 return UPDATE_TYPE_MED
;
1203 return UPDATE_TYPE_FAST
;
1206 static enum surface_update_type
det_surface_update(
1207 const struct dc
*dc
,
1208 const struct dc_surface_update
*u
,
1211 const struct dc_state
*context
= dc
->current_state
;
1212 enum surface_update_type type
= UPDATE_TYPE_FAST
;
1213 enum surface_update_type overall_type
= UPDATE_TYPE_FAST
;
1215 if (!is_surface_in_context(context
, u
->surface
))
1216 return UPDATE_TYPE_FULL
;
1218 type
= get_plane_info_update_type(u
, surface_index
);
1219 if (overall_type
< type
)
1220 overall_type
= type
;
1222 type
= get_scaling_info_update_type(u
);
1223 if (overall_type
< type
)
1224 overall_type
= type
;
1226 if (u
->in_transfer_func
||
1227 u
->hdr_static_metadata
) {
1228 if (overall_type
< UPDATE_TYPE_MED
)
1229 overall_type
= UPDATE_TYPE_MED
;
1232 return overall_type
;
1235 enum surface_update_type
dc_check_update_surfaces_for_stream(
1237 struct dc_surface_update
*updates
,
1239 struct dc_stream_update
*stream_update
,
1240 const struct dc_stream_status
*stream_status
)
1243 enum surface_update_type overall_type
= UPDATE_TYPE_FAST
;
1245 if (stream_status
== NULL
|| stream_status
->plane_count
!= surface_count
)
1246 return UPDATE_TYPE_FULL
;
1249 return UPDATE_TYPE_FULL
;
1251 for (i
= 0 ; i
< surface_count
; i
++) {
1252 enum surface_update_type type
=
1253 det_surface_update(dc
, &updates
[i
], i
);
1255 if (type
== UPDATE_TYPE_FULL
)
1258 if (overall_type
< type
)
1259 overall_type
= type
;
1262 return overall_type
;
1265 static struct dc_stream_status
*stream_get_status(
1266 struct dc_state
*ctx
,
1267 struct dc_stream_state
*stream
)
1271 for (i
= 0; i
< ctx
->stream_count
; i
++) {
1272 if (stream
== ctx
->streams
[i
]) {
1273 return &ctx
->stream_status
[i
];
1280 static const enum surface_update_type update_surface_trace_level
= UPDATE_TYPE_FULL
;
1283 static void commit_planes_for_stream(struct dc
*dc
,
1284 struct dc_surface_update
*srf_updates
,
1286 struct dc_stream_state
*stream
,
1287 struct dc_stream_update
*stream_update
,
1288 enum surface_update_type update_type
,
1289 struct dc_state
*context
)
1293 if (update_type
== UPDATE_TYPE_FULL
) {
1294 dc
->hwss
.set_bandwidth(dc
, context
, false);
1295 context_clock_trace(dc
, context
);
1298 if (update_type
> UPDATE_TYPE_FAST
) {
1299 for (j
= 0; j
< dc
->res_pool
->pipe_count
; j
++) {
1300 struct pipe_ctx
*pipe_ctx
= &context
->res_ctx
.pipe_ctx
[j
];
1302 dc
->hwss
.wait_for_mpcc_disconnect(dc
, dc
->res_pool
, pipe_ctx
);
1306 if (surface_count
== 0) {
1308 * In case of turning off screen, no need to program front end a second time.
1309 * just return after program front end.
1311 dc
->hwss
.apply_ctx_for_surface(dc
, stream
, surface_count
, context
);
1315 /* Lock pipes for provided surfaces, or all active if full update*/
1316 for (i
= 0; i
< surface_count
; i
++) {
1317 struct dc_plane_state
*plane_state
= srf_updates
[i
].surface
;
1319 for (j
= 0; j
< dc
->res_pool
->pipe_count
; j
++) {
1320 struct pipe_ctx
*pipe_ctx
= &context
->res_ctx
.pipe_ctx
[j
];
1322 if (update_type
!= UPDATE_TYPE_FULL
&& pipe_ctx
->plane_state
!= plane_state
)
1324 if (!pipe_ctx
->plane_state
|| pipe_ctx
->top_pipe
)
1327 dc
->hwss
.pipe_control_lock(
1332 if (update_type
== UPDATE_TYPE_FULL
)
1337 for (j
= 0; j
< dc
->res_pool
->pipe_count
; j
++) {
1338 struct pipe_ctx
*pipe_ctx
= &context
->res_ctx
.pipe_ctx
[j
];
1340 if (update_type
!= UPDATE_TYPE_FULL
|| !pipe_ctx
->plane_state
)
1343 if (!pipe_ctx
->top_pipe
&& pipe_ctx
->stream
) {
1344 struct dc_stream_status
*stream_status
= stream_get_status(context
, pipe_ctx
->stream
);
1346 dc
->hwss
.apply_ctx_for_surface(
1347 dc
, pipe_ctx
->stream
, stream_status
->plane_count
, context
);
1351 if (update_type
> UPDATE_TYPE_FAST
)
1352 context_timing_trace(dc
, &context
->res_ctx
);
1354 /* Perform requested Updates */
1355 for (i
= 0; i
< surface_count
; i
++) {
1356 struct dc_plane_state
*plane_state
= srf_updates
[i
].surface
;
1358 if (update_type
== UPDATE_TYPE_MED
)
1359 dc
->hwss
.apply_ctx_for_surface(
1360 dc
, stream
, surface_count
, context
);
1362 for (j
= 0; j
< dc
->res_pool
->pipe_count
; j
++) {
1363 struct pipe_ctx
*pipe_ctx
= &context
->res_ctx
.pipe_ctx
[j
];
1365 if (pipe_ctx
->plane_state
!= plane_state
)
1368 if (srf_updates
[i
].flip_addr
)
1369 dc
->hwss
.update_plane_addr(dc
, pipe_ctx
);
1371 if (update_type
== UPDATE_TYPE_FAST
)
1374 /* work around to program degamma regs for split pipe after set mode. */
1375 if (srf_updates
[i
].in_transfer_func
|| (pipe_ctx
->top_pipe
&&
1376 pipe_ctx
->top_pipe
->plane_state
== pipe_ctx
->plane_state
))
1377 dc
->hwss
.set_input_transfer_func(
1378 pipe_ctx
, pipe_ctx
->plane_state
);
1380 if (stream_update
!= NULL
&&
1381 stream_update
->out_transfer_func
!= NULL
) {
1382 dc
->hwss
.set_output_transfer_func(
1383 pipe_ctx
, pipe_ctx
->stream
);
1386 if (srf_updates
[i
].hdr_static_metadata
) {
1387 resource_build_info_frame(pipe_ctx
);
1388 dc
->hwss
.update_info_frame(pipe_ctx
);
1394 for (i
= dc
->res_pool
->pipe_count
- 1; i
>= 0; i
--) {
1395 struct pipe_ctx
*pipe_ctx
= &context
->res_ctx
.pipe_ctx
[i
];
1397 for (j
= 0; j
< surface_count
; j
++) {
1398 if (update_type
!= UPDATE_TYPE_FULL
&&
1399 srf_updates
[j
].surface
!= pipe_ctx
->plane_state
)
1401 if (!pipe_ctx
->plane_state
|| pipe_ctx
->top_pipe
)
1404 dc
->hwss
.pipe_control_lock(
1414 void dc_commit_updates_for_stream(struct dc
*dc
,
1415 struct dc_surface_update
*srf_updates
,
1417 struct dc_stream_state
*stream
,
1418 struct dc_stream_update
*stream_update
,
1419 struct dc_plane_state
**plane_states
,
1420 struct dc_state
*state
)
1422 const struct dc_stream_status
*stream_status
;
1423 enum surface_update_type update_type
;
1424 struct dc_state
*context
;
1425 struct dc_context
*dc_ctx
= dc
->ctx
;
1428 stream_status
= dc_stream_get_status(stream
);
1429 context
= dc
->current_state
;
1431 update_type
= dc_check_update_surfaces_for_stream(
1432 dc
, srf_updates
, surface_count
, stream_update
, stream_status
);
1434 if (update_type
>= update_surface_trace_level
)
1435 update_surface_trace(dc
, srf_updates
, surface_count
);
1438 if (update_type
>= UPDATE_TYPE_FULL
) {
1440 /* initialize scratch memory for building context */
1441 context
= dc_create_state();
1442 if (context
== NULL
) {
1443 DC_ERROR("Failed to allocate new validate context!\n");
1447 dc_resource_state_copy_construct(state
, context
);
1451 for (i
= 0; i
< surface_count
; i
++) {
1452 struct dc_plane_state
*surface
= srf_updates
[i
].surface
;
1454 /* TODO: On flip we don't build the state, so it still has the
1455 * old address. Which is why we are updating the address here
1457 if (srf_updates
[i
].flip_addr
) {
1458 surface
->address
= srf_updates
[i
].flip_addr
->address
;
1459 surface
->flip_immediate
= srf_updates
[i
].flip_addr
->flip_immediate
;
1463 if (update_type
>= UPDATE_TYPE_MED
) {
1464 for (j
= 0; j
< dc
->res_pool
->pipe_count
; j
++) {
1465 struct pipe_ctx
*pipe_ctx
= &context
->res_ctx
.pipe_ctx
[j
];
1467 if (pipe_ctx
->plane_state
!= surface
)
1470 resource_build_scaling_params(pipe_ctx
);
1475 commit_planes_for_stream(
1484 if (update_type
>= UPDATE_TYPE_FULL
)
1485 dc_post_update_surfaces_to_stream(dc
);
1487 if (dc
->current_state
!= context
) {
1489 struct dc_state
*old
= dc
->current_state
;
1491 dc
->current_state
= context
;
1492 dc_release_state(old
);
1500 uint8_t dc_get_current_stream_count(struct dc
*dc
)
1502 return dc
->current_state
->stream_count
;
1505 struct dc_stream_state
*dc_get_stream_at_index(struct dc
*dc
, uint8_t i
)
1507 if (i
< dc
->current_state
->stream_count
)
1508 return dc
->current_state
->streams
[i
];
1512 enum dc_irq_source
dc_interrupt_to_irq_source(
1517 return dal_irq_service_to_irq_source(dc
->res_pool
->irqs
, src_id
, ext_id
);
1520 void dc_interrupt_set(struct dc
*dc
, enum dc_irq_source src
, bool enable
)
1526 dal_irq_service_set(dc
->res_pool
->irqs
, src
, enable
);
1529 void dc_interrupt_ack(struct dc
*dc
, enum dc_irq_source src
)
1531 dal_irq_service_ack(dc
->res_pool
->irqs
, src
);
1534 void dc_set_power_state(
1536 enum dc_acpi_cm_power_state power_state
)
1538 struct kref refcount
;
1540 switch (power_state
) {
1541 case DC_ACPI_CM_POWER_STATE_D0
:
1542 dc_resource_state_construct(dc
, dc
->current_state
);
1544 dc
->hwss
.init_hw(dc
);
1548 dc
->hwss
.power_down(dc
);
1550 /* Zero out the current context so that on resume we start with
1551 * clean state, and dc hw programming optimizations will not
1552 * cause any trouble.
1555 /* Preserve refcount */
1556 refcount
= dc
->current_state
->refcount
;
1557 dc_resource_state_destruct(dc
->current_state
);
1558 memset(dc
->current_state
, 0,
1559 sizeof(*dc
->current_state
));
1561 dc
->current_state
->refcount
= refcount
;
1568 void dc_resume(struct dc
*dc
)
1573 for (i
= 0; i
< dc
->link_count
; i
++)
1574 core_link_resume(dc
->links
[i
]);
1579 uint32_t link_index
,
1580 struct i2c_command
*cmd
)
1583 struct dc_link
*link
= dc
->links
[link_index
];
1584 struct ddc_service
*ddc
= link
->ddc
;
1586 return dal_i2caux_submit_i2c_command(
1592 static bool link_add_remote_sink_helper(struct dc_link
*dc_link
, struct dc_sink
*sink
)
1594 if (dc_link
->sink_count
>= MAX_SINKS_PER_LINK
) {
1595 BREAK_TO_DEBUGGER();
1599 dc_sink_retain(sink
);
1601 dc_link
->remote_sinks
[dc_link
->sink_count
] = sink
;
1602 dc_link
->sink_count
++;
1607 struct dc_sink
*dc_link_add_remote_sink(
1608 struct dc_link
*link
,
1609 const uint8_t *edid
,
1611 struct dc_sink_init_data
*init_data
)
1613 struct dc_sink
*dc_sink
;
1614 enum dc_edid_status edid_status
;
1616 if (len
> MAX_EDID_BUFFER_SIZE
) {
1617 dm_error("Max EDID buffer size breached!\n");
1622 BREAK_TO_DEBUGGER();
1626 if (!init_data
->link
) {
1627 BREAK_TO_DEBUGGER();
1631 dc_sink
= dc_sink_create(init_data
);
1636 memmove(dc_sink
->dc_edid
.raw_edid
, edid
, len
);
1637 dc_sink
->dc_edid
.length
= len
;
1639 if (!link_add_remote_sink_helper(
1644 edid_status
= dm_helpers_parse_edid_caps(
1647 &dc_sink
->edid_caps
);
1649 if (edid_status
!= EDID_OK
)
1654 dc_link_remove_remote_sink(link
, dc_sink
);
1656 dc_sink_release(dc_sink
);
1660 void dc_link_remove_remote_sink(struct dc_link
*link
, struct dc_sink
*sink
)
1664 if (!link
->sink_count
) {
1665 BREAK_TO_DEBUGGER();
1669 for (i
= 0; i
< link
->sink_count
; i
++) {
1670 if (link
->remote_sinks
[i
] == sink
) {
1671 dc_sink_release(sink
);
1672 link
->remote_sinks
[i
] = NULL
;
1674 /* shrink array to remove empty place */
1675 while (i
< link
->sink_count
- 1) {
1676 link
->remote_sinks
[i
] = link
->remote_sinks
[i
+1];
1679 link
->remote_sinks
[i
] = NULL
;