2 * Copyright 2015 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
25 #include "dm_services.h"
29 #include "core_status.h"
30 #include "core_types.h"
31 #include "hw_sequencer.h"
35 #include "clock_source.h"
36 #include "dc_bios_types.h"
38 #include "bios_parser_interface.h"
39 #include "include/irq_service_interface.h"
40 #include "transform.h"
42 #include "timing_generator.h"
43 #include "virtual/virtual_link_encoder.h"
45 #include "link_hwss.h"
46 #include "link_encoder.h"
48 #include "dc_link_ddc.h"
49 #include "dm_helpers.h"
50 #include "mem_input.h"
54 /*******************************************************************************
56 ******************************************************************************/
57 static void destroy_links(struct dc
*dc
)
61 for (i
= 0; i
< dc
->link_count
; i
++) {
62 if (NULL
!= dc
->links
[i
])
63 link_destroy(&dc
->links
[i
]);
67 static bool create_links(
69 uint32_t num_virtual_links
)
73 struct dc_bios
*bios
= dc
->ctx
->dc_bios
;
77 connectors_num
= bios
->funcs
->get_connectors_number(bios
);
79 if (connectors_num
> ENUM_ID_COUNT
) {
81 "DC: Number of connectors %d exceeds maximum of %d!\n",
87 if (connectors_num
== 0 && num_virtual_links
== 0) {
88 dm_error("DC: Number of connectors is zero!\n");
92 "DC: %s: connectors_num: physical:%d, virtual:%d\n",
97 for (i
= 0; i
< connectors_num
; i
++) {
98 struct link_init_data link_init_params
= {0};
101 link_init_params
.ctx
= dc
->ctx
;
102 /* next BIOS object table connector */
103 link_init_params
.connector_index
= i
;
104 link_init_params
.link_index
= dc
->link_count
;
105 link_init_params
.dc
= dc
;
106 link
= link_create(&link_init_params
);
109 dc
->links
[dc
->link_count
] = link
;
115 for (i
= 0; i
< num_virtual_links
; i
++) {
116 struct dc_link
*link
= kzalloc(sizeof(*link
), GFP_KERNEL
);
117 struct encoder_init_data enc_init
= {0};
126 link
->connector_signal
= SIGNAL_TYPE_VIRTUAL
;
127 link
->link_id
.type
= OBJECT_TYPE_CONNECTOR
;
128 link
->link_id
.id
= CONNECTOR_ID_VIRTUAL
;
129 link
->link_id
.enum_id
= ENUM_ID_1
;
130 link
->link_enc
= kzalloc(sizeof(*link
->link_enc
), GFP_KERNEL
);
132 enc_init
.ctx
= dc
->ctx
;
133 enc_init
.channel
= CHANNEL_ID_UNKNOWN
;
134 enc_init
.hpd_source
= HPD_SOURCEID_UNKNOWN
;
135 enc_init
.transmitter
= TRANSMITTER_UNKNOWN
;
136 enc_init
.connector
= link
->link_id
;
137 enc_init
.encoder
.type
= OBJECT_TYPE_ENCODER
;
138 enc_init
.encoder
.id
= ENCODER_ID_INTERNAL_VIRTUAL
;
139 enc_init
.encoder
.enum_id
= ENUM_ID_1
;
140 virtual_link_encoder_construct(link
->link_enc
, &enc_init
);
142 link
->link_index
= dc
->link_count
;
143 dc
->links
[dc
->link_count
] = link
;
153 static bool stream_adjust_vmin_vmax(struct dc
*dc
,
154 struct dc_stream_state
**streams
, int num_streams
,
157 /* TODO: Support multiple streams */
158 struct dc_stream_state
*stream
= streams
[0];
162 for (i
= 0; i
< MAX_PIPES
; i
++) {
163 struct pipe_ctx
*pipe
= &dc
->current_state
->res_ctx
.pipe_ctx
[i
];
165 if (pipe
->stream
== stream
&& pipe
->stream_res
.stream_enc
) {
166 dc
->hwss
.set_drr(&pipe
, 1, vmin
, vmax
);
168 /* build and update the info frame */
169 resource_build_info_frame(pipe
);
170 dc
->hwss
.update_info_frame(pipe
);
178 static bool stream_get_crtc_position(struct dc
*dc
,
179 struct dc_stream_state
**streams
, int num_streams
,
180 unsigned int *v_pos
, unsigned int *nom_v_pos
)
182 /* TODO: Support multiple streams */
183 struct dc_stream_state
*stream
= streams
[0];
186 struct crtc_position position
;
188 for (i
= 0; i
< MAX_PIPES
; i
++) {
189 struct pipe_ctx
*pipe
=
190 &dc
->current_state
->res_ctx
.pipe_ctx
[i
];
192 if (pipe
->stream
== stream
&& pipe
->stream_res
.stream_enc
) {
193 dc
->hwss
.get_position(&pipe
, 1, &position
);
195 *v_pos
= position
.vertical_count
;
196 *nom_v_pos
= position
.nominal_vcount
;
203 static bool set_gamut_remap(struct dc
*dc
, const struct dc_stream_state
*stream
)
207 struct pipe_ctx
*pipes
;
209 for (i
= 0; i
< MAX_PIPES
; i
++) {
210 if (dc
->current_state
->res_ctx
.pipe_ctx
[i
].stream
== stream
) {
211 pipes
= &dc
->current_state
->res_ctx
.pipe_ctx
[i
];
212 dc
->hwss
.program_gamut_remap(pipes
);
220 static bool program_csc_matrix(struct dc
*dc
, struct dc_stream_state
*stream
)
224 struct pipe_ctx
*pipes
;
226 for (i
= 0; i
< MAX_PIPES
; i
++) {
227 if (dc
->current_state
->res_ctx
.pipe_ctx
[i
].stream
230 pipes
= &dc
->current_state
->res_ctx
.pipe_ctx
[i
];
231 dc
->hwss
.program_csc_matrix(pipes
,
232 stream
->output_color_space
,
233 stream
->csc_color_matrix
.matrix
);
241 static void set_static_screen_events(struct dc
*dc
,
242 struct dc_stream_state
**streams
,
244 const struct dc_static_screen_events
*events
)
248 struct pipe_ctx
*pipes_affected
[MAX_PIPES
];
249 int num_pipes_affected
= 0;
251 for (i
= 0; i
< num_streams
; i
++) {
252 struct dc_stream_state
*stream
= streams
[i
];
254 for (j
= 0; j
< MAX_PIPES
; j
++) {
255 if (dc
->current_state
->res_ctx
.pipe_ctx
[j
].stream
257 pipes_affected
[num_pipes_affected
++] =
258 &dc
->current_state
->res_ctx
.pipe_ctx
[j
];
263 dc
->hwss
.set_static_screen_control(pipes_affected
, num_pipes_affected
, events
);
266 static void set_drive_settings(struct dc
*dc
,
267 struct link_training_settings
*lt_settings
,
268 const struct dc_link
*link
)
273 for (i
= 0; i
< dc
->link_count
; i
++) {
274 if (dc
->links
[i
] == link
)
278 if (i
>= dc
->link_count
)
279 ASSERT_CRITICAL(false);
281 dc_link_dp_set_drive_settings(dc
->links
[i
], lt_settings
);
284 static void perform_link_training(struct dc
*dc
,
285 struct dc_link_settings
*link_setting
,
286 bool skip_video_pattern
)
290 for (i
= 0; i
< dc
->link_count
; i
++)
291 dc_link_dp_perform_link_training(
297 static void set_preferred_link_settings(struct dc
*dc
,
298 struct dc_link_settings
*link_setting
,
299 struct dc_link
*link
)
301 link
->preferred_link_setting
= *link_setting
;
302 dp_retrain_link_dp_test(link
, link_setting
, false);
305 static void enable_hpd(const struct dc_link
*link
)
307 dc_link_dp_enable_hpd(link
);
310 static void disable_hpd(const struct dc_link
*link
)
312 dc_link_dp_disable_hpd(link
);
316 static void set_test_pattern(
317 struct dc_link
*link
,
318 enum dp_test_pattern test_pattern
,
319 const struct link_training_settings
*p_link_settings
,
320 const unsigned char *p_custom_pattern
,
321 unsigned int cust_pattern_size
)
324 dc_link_dp_set_test_pattern(
332 static void set_dither_option(struct dc_stream_state
*stream
,
333 enum dc_dither_option option
)
335 struct bit_depth_reduction_params params
;
336 struct dc_link
*link
= stream
->status
.link
;
337 struct pipe_ctx
*pipes
= NULL
;
340 for (i
= 0; i
< MAX_PIPES
; i
++) {
341 if (link
->dc
->current_state
->res_ctx
.pipe_ctx
[i
].stream
==
343 pipes
= &link
->dc
->current_state
->res_ctx
.pipe_ctx
[i
];
348 memset(¶ms
, 0, sizeof(params
));
351 if (option
> DITHER_OPTION_MAX
)
354 stream
->dither_option
= option
;
356 resource_build_bit_depth_reduction_params(stream
,
358 stream
->bit_depth_params
= params
;
359 pipes
->stream_res
.opp
->funcs
->
360 opp_program_bit_depth_reduction(pipes
->stream_res
.opp
, ¶ms
);
365 struct dc_stream_state
*stream
,
368 struct pipe_ctx
*pipe_ctx
= NULL
;
371 for (i
= 0; i
< MAX_PIPES
; i
++) {
372 if (dc
->current_state
->res_ctx
.pipe_ctx
[i
].stream
== stream
) {
373 pipe_ctx
= &dc
->current_state
->res_ctx
.pipe_ctx
[i
];
383 if (stream
->dpms_off
!= dpms_off
) {
384 stream
->dpms_off
= dpms_off
;
386 core_link_disable_stream(pipe_ctx
,
387 KEEP_ACQUIRED_RESOURCE
);
389 core_link_enable_stream(dc
->current_state
, pipe_ctx
);
393 static void allocate_dc_stream_funcs(struct dc
*dc
)
395 if (dc
->hwss
.set_drr
!= NULL
) {
396 dc
->stream_funcs
.adjust_vmin_vmax
=
397 stream_adjust_vmin_vmax
;
400 dc
->stream_funcs
.set_static_screen_events
=
401 set_static_screen_events
;
403 dc
->stream_funcs
.get_crtc_position
=
404 stream_get_crtc_position
;
406 dc
->stream_funcs
.set_gamut_remap
=
409 dc
->stream_funcs
.program_csc_matrix
=
412 dc
->stream_funcs
.set_dither_option
=
415 dc
->stream_funcs
.set_dpms
=
418 dc
->link_funcs
.set_drive_settings
=
421 dc
->link_funcs
.perform_link_training
=
422 perform_link_training
;
424 dc
->link_funcs
.set_preferred_link_settings
=
425 set_preferred_link_settings
;
427 dc
->link_funcs
.enable_hpd
=
430 dc
->link_funcs
.disable_hpd
=
433 dc
->link_funcs
.set_test_pattern
=
437 static void destruct(struct dc
*dc
)
439 dc_release_state(dc
->current_state
);
440 dc
->current_state
= NULL
;
444 dc_destroy_resource_pool(dc
);
446 if (dc
->ctx
->gpio_service
)
447 dal_gpio_service_destroy(&dc
->ctx
->gpio_service
);
450 dal_i2caux_destroy(&dc
->ctx
->i2caux
);
452 if (dc
->ctx
->created_bios
)
453 dal_bios_parser_destroy(&dc
->ctx
->dc_bios
);
456 dal_logger_destroy(&dc
->ctx
->logger
);
467 #ifdef CONFIG_DRM_AMD_DC_DCN1_0
477 static bool construct(struct dc
*dc
,
478 const struct dc_init_data
*init_params
)
480 struct dal_logger
*logger
;
481 struct dc_context
*dc_ctx
= kzalloc(sizeof(*dc_ctx
), GFP_KERNEL
);
482 struct bw_calcs_dceip
*dc_dceip
= kzalloc(sizeof(*dc_dceip
),
484 struct bw_calcs_vbios
*dc_vbios
= kzalloc(sizeof(*dc_vbios
),
486 #ifdef CONFIG_DRM_AMD_DC_DCN1_0
487 struct dcn_soc_bounding_box
*dcn_soc
= kzalloc(sizeof(*dcn_soc
),
489 struct dcn_ip_params
*dcn_ip
= kzalloc(sizeof(*dcn_ip
), GFP_KERNEL
);
492 enum dce_version dc_version
= DCE_VERSION_UNKNOWN
;
495 dm_error("%s: failed to create dceip\n", __func__
);
499 dc
->bw_dceip
= dc_dceip
;
502 dm_error("%s: failed to create vbios\n", __func__
);
506 dc
->bw_vbios
= dc_vbios
;
507 #ifdef CONFIG_DRM_AMD_DC_DCN1_0
509 dm_error("%s: failed to create dcn_soc\n", __func__
);
513 dc
->dcn_soc
= dcn_soc
;
516 dm_error("%s: failed to create dcn_ip\n", __func__
);
524 dm_error("%s: failed to create ctx\n", __func__
);
528 dc
->current_state
= dc_create_state();
530 if (!dc
->current_state
) {
531 dm_error("%s: failed to create validate ctx\n", __func__
);
535 dc_ctx
->cgs_device
= init_params
->cgs_device
;
536 dc_ctx
->driver_context
= init_params
->driver
;
538 dc_ctx
->asic_id
= init_params
->asic_id
;
541 logger
= dal_logger_create(dc_ctx
, init_params
->log_mask
);
544 /* can *not* call logger. call base driver 'print error' */
545 dm_error("%s: failed to create Logger!\n", __func__
);
548 dc_ctx
->logger
= logger
;
550 dc
->ctx
->dce_environment
= init_params
->dce_environment
;
552 dc_version
= resource_parse_asic_id(init_params
->asic_id
);
553 dc
->ctx
->dce_version
= dc_version
;
554 #if defined(CONFIG_DRM_AMD_DC_FBC)
555 dc
->ctx
->fbc_gpu_addr
= init_params
->fbc_gpu_addr
;
557 /* Resource should construct all asic specific resources.
558 * This should be the only place where we need to parse the asic id
560 if (init_params
->vbios_override
)
561 dc_ctx
->dc_bios
= init_params
->vbios_override
;
563 /* Create BIOS parser */
564 struct bp_init_data bp_init_data
;
566 bp_init_data
.ctx
= dc_ctx
;
567 bp_init_data
.bios
= init_params
->asic_id
.atombios_base_address
;
569 dc_ctx
->dc_bios
= dal_bios_parser_create(
570 &bp_init_data
, dc_version
);
572 if (!dc_ctx
->dc_bios
) {
573 ASSERT_CRITICAL(false);
577 dc_ctx
->created_bios
= true;
581 dc_ctx
->i2caux
= dal_i2caux_create(dc_ctx
);
583 if (!dc_ctx
->i2caux
) {
584 ASSERT_CRITICAL(false);
588 /* Create GPIO service */
589 dc_ctx
->gpio_service
= dal_gpio_service_create(
591 dc_ctx
->dce_environment
,
594 if (!dc_ctx
->gpio_service
) {
595 ASSERT_CRITICAL(false);
599 dc
->res_pool
= dc_create_resource_pool(
601 init_params
->num_virtual_links
,
603 init_params
->asic_id
);
607 dc_resource_state_construct(dc
, dc
->current_state
);
609 if (!create_links(dc
, init_params
->num_virtual_links
))
612 allocate_dc_stream_funcs(dc
);
622 static void disable_dangling_plane(struct dc
*dc
, struct dc_state
*context
)
625 struct dc_state
*dangling_context
= dc_create_state();
626 struct dc_state
*current_ctx
;
628 if (dangling_context
== NULL
)
631 dc_resource_state_copy_construct(dc
->current_state
, dangling_context
);
633 for (i
= 0; i
< dc
->res_pool
->pipe_count
; i
++) {
634 struct dc_stream_state
*old_stream
=
635 dc
->current_state
->res_ctx
.pipe_ctx
[i
].stream
;
636 bool should_disable
= true;
638 for (j
= 0; j
< context
->stream_count
; j
++) {
639 if (old_stream
== context
->streams
[j
]) {
640 should_disable
= false;
644 if (should_disable
&& old_stream
) {
645 dc_rem_all_planes_for_stream(dc
, old_stream
, dangling_context
);
646 dc
->hwss
.apply_ctx_for_surface(dc
, old_stream
, 0, dangling_context
);
650 current_ctx
= dc
->current_state
;
651 dc
->current_state
= dangling_context
;
652 dc_release_state(current_ctx
);
655 /*******************************************************************************
657 ******************************************************************************/
659 struct dc
*dc_create(const struct dc_init_data
*init_params
)
661 struct dc
*dc
= kzalloc(sizeof(*dc
), GFP_KERNEL
);
662 unsigned int full_pipe_count
;
667 if (false == construct(dc
, init_params
))
670 /*TODO: separate HW and SW initialization*/
671 dc
->hwss
.init_hw(dc
);
673 full_pipe_count
= dc
->res_pool
->pipe_count
;
674 if (dc
->res_pool
->underlay_pipe_index
!= NO_UNDERLAY_PIPE
)
676 dc
->caps
.max_streams
= min(
678 dc
->res_pool
->stream_enc_count
);
680 dc
->caps
.max_links
= dc
->link_count
;
681 dc
->caps
.max_audios
= dc
->res_pool
->audio_count
;
683 dc
->config
= init_params
->flags
;
685 dm_logger_write(dc
->ctx
->logger
, LOG_DC
,
686 "Display Core initialized\n");
689 /* TODO: missing feature to be enabled */
690 dc
->debug
.disable_dfs_bypass
= true;
701 void dc_destroy(struct dc
**dc
)
708 static void program_timing_sync(
710 struct dc_state
*ctx
)
714 int pipe_count
= dc
->res_pool
->pipe_count
;
715 struct pipe_ctx
*unsynced_pipes
[MAX_PIPES
] = { NULL
};
717 for (i
= 0; i
< pipe_count
; i
++) {
718 if (!ctx
->res_ctx
.pipe_ctx
[i
].stream
|| ctx
->res_ctx
.pipe_ctx
[i
].top_pipe
)
721 unsynced_pipes
[i
] = &ctx
->res_ctx
.pipe_ctx
[i
];
724 for (i
= 0; i
< pipe_count
; i
++) {
726 struct pipe_ctx
*pipe_set
[MAX_PIPES
];
728 if (!unsynced_pipes
[i
])
731 pipe_set
[0] = unsynced_pipes
[i
];
732 unsynced_pipes
[i
] = NULL
;
734 /* Add tg to the set, search rest of the tg's for ones with
735 * same timing, add all tgs with same timing to the group
737 for (j
= i
+ 1; j
< pipe_count
; j
++) {
738 if (!unsynced_pipes
[j
])
741 if (resource_are_streams_timing_synchronizable(
742 unsynced_pipes
[j
]->stream
,
743 pipe_set
[0]->stream
)) {
744 pipe_set
[group_size
] = unsynced_pipes
[j
];
745 unsynced_pipes
[j
] = NULL
;
750 /* set first unblanked pipe as master */
751 for (j
= 0; j
< group_size
; j
++) {
752 struct pipe_ctx
*temp
;
754 if (!pipe_set
[j
]->stream_res
.tg
->funcs
->is_blanked(pipe_set
[j
]->stream_res
.tg
)) {
759 pipe_set
[0] = pipe_set
[j
];
765 /* remove any other unblanked pipes as they have already been synced */
766 for (j
= j
+ 1; j
< group_size
; j
++) {
767 if (!pipe_set
[j
]->stream_res
.tg
->funcs
->is_blanked(pipe_set
[j
]->stream_res
.tg
)) {
769 pipe_set
[j
] = pipe_set
[group_size
];
774 if (group_size
> 1) {
775 dc
->hwss
.enable_timing_synchronization(
776 dc
, group_index
, group_size
, pipe_set
);
782 static bool context_changed(
784 struct dc_state
*context
)
788 if (context
->stream_count
!= dc
->current_state
->stream_count
)
791 for (i
= 0; i
< dc
->current_state
->stream_count
; i
++) {
792 if (dc
->current_state
->streams
[i
] != context
->streams
[i
])
799 bool dc_enable_stereo(
801 struct dc_state
*context
,
802 struct dc_stream_state
*streams
[],
803 uint8_t stream_count
)
807 struct pipe_ctx
*pipe
;
809 for (i
= 0; i
< MAX_PIPES
; i
++) {
811 pipe
= &context
->res_ctx
.pipe_ctx
[i
];
813 pipe
= &dc
->current_state
->res_ctx
.pipe_ctx
[i
];
814 for (j
= 0 ; pipe
&& j
< stream_count
; j
++) {
815 if (streams
[j
] && streams
[j
] == pipe
->stream
&&
816 dc
->hwss
.setup_stereo
)
817 dc
->hwss
.setup_stereo(pipe
, dc
);
826 * Applies given context to HW and copy it into current context.
827 * It's up to the user to release the src context afterwards.
829 static enum dc_status
dc_commit_state_no_check(struct dc
*dc
, struct dc_state
*context
)
831 struct dc_bios
*dcb
= dc
->ctx
->dc_bios
;
832 enum dc_status result
= DC_ERROR_UNEXPECTED
;
833 struct pipe_ctx
*pipe
;
835 struct dc_stream_state
*dc_streams
[MAX_STREAMS
] = {0};
837 disable_dangling_plane(dc
, context
);
839 for (i
= 0; i
< context
->stream_count
; i
++)
840 dc_streams
[i
] = context
->streams
[i
];
842 if (!dcb
->funcs
->is_accelerated_mode(dcb
))
843 dc
->hwss
.enable_accelerated_mode(dc
);
845 for (i
= 0; i
< context
->stream_count
; i
++) {
846 const struct dc_sink
*sink
= context
->streams
[i
]->sink
;
848 dc
->hwss
.apply_ctx_for_surface(
849 dc
, context
->streams
[i
],
850 context
->stream_status
[i
].plane_count
,
855 * TODO rework dc_enable_stereo call to work with validation sets?
857 for (k
= 0; k
< MAX_PIPES
; k
++) {
858 pipe
= &context
->res_ctx
.pipe_ctx
[k
];
860 for (l
= 0 ; pipe
&& l
< context
->stream_count
; l
++) {
861 if (context
->streams
[l
] &&
862 context
->streams
[l
] == pipe
->stream
&&
863 dc
->hwss
.setup_stereo
)
864 dc
->hwss
.setup_stereo(pipe
, dc
);
868 CONN_MSG_MODE(sink
->link
, "{%dx%d, %dx%d@%dKhz}",
869 context
->streams
[i
]->timing
.h_addressable
,
870 context
->streams
[i
]->timing
.v_addressable
,
871 context
->streams
[i
]->timing
.h_total
,
872 context
->streams
[i
]->timing
.v_total
,
873 context
->streams
[i
]->timing
.pix_clk_khz
);
876 dc
->hwss
.ready_shared_resources(dc
, context
);
878 for (i
= 0; i
< dc
->res_pool
->pipe_count
; i
++) {
879 pipe
= &context
->res_ctx
.pipe_ctx
[i
];
880 dc
->hwss
.wait_for_mpcc_disconnect(dc
, dc
->res_pool
, pipe
);
882 result
= dc
->hwss
.apply_ctx_to_hw(dc
, context
);
884 program_timing_sync(dc
, context
);
886 dc_enable_stereo(dc
, context
, dc_streams
, context
->stream_count
);
888 for (i
= 0; i
< context
->stream_count
; i
++) {
889 for (j
= 0; j
< MAX_PIPES
; j
++) {
890 pipe
= &context
->res_ctx
.pipe_ctx
[j
];
892 if (!pipe
->top_pipe
&& pipe
->stream
== context
->streams
[i
])
893 dc
->hwss
.pipe_control_lock(dc
, pipe
, false);
897 dc_release_state(dc
->current_state
);
899 dc
->current_state
= context
;
901 dc_retain_state(dc
->current_state
);
903 dc
->hwss
.optimize_shared_resources(dc
);
908 bool dc_commit_state(struct dc
*dc
, struct dc_state
*context
)
910 enum dc_status result
= DC_ERROR_UNEXPECTED
;
913 if (false == context_changed(dc
, context
))
916 dm_logger_write(dc
->ctx
->logger
, LOG_DC
, "%s: %d streams\n",
917 __func__
, context
->stream_count
);
919 for (i
= 0; i
< context
->stream_count
; i
++) {
920 struct dc_stream_state
*stream
= context
->streams
[i
];
922 dc_stream_log(stream
,
927 result
= dc_commit_state_no_check(dc
, context
);
929 return (result
== DC_OK
);
933 bool dc_post_update_surfaces_to_stream(struct dc
*dc
)
936 struct dc_state
*context
= dc
->current_state
;
938 post_surface_trace(dc
);
940 for (i
= 0; i
< dc
->res_pool
->pipe_count
; i
++)
941 if (context
->res_ctx
.pipe_ctx
[i
].stream
== NULL
942 || context
->res_ctx
.pipe_ctx
[i
].plane_state
== NULL
)
943 dc
->hwss
.power_down_front_end(dc
, i
);
945 /* 3rd param should be true, temp w/a for RV*/
946 #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
947 dc
->hwss
.set_bandwidth(dc
, context
, dc
->ctx
->dce_version
< DCN_VERSION_1_0
);
949 dc
->hwss
.set_bandwidth(dc
, context
, true);
955 * TODO this whole function needs to go
957 * dc_surface_update is needlessly complex. See if we can just replace this
958 * with a dc_plane_state and follow the atomic model a bit more closely here.
960 bool dc_commit_planes_to_stream(
962 struct dc_plane_state
**plane_states
,
963 uint8_t new_plane_count
,
964 struct dc_stream_state
*dc_stream
,
965 struct dc_state
*state
)
967 /* no need to dynamically allocate this. it's pretty small */
968 struct dc_surface_update updates
[MAX_SURFACES
];
969 struct dc_flip_addrs
*flip_addr
;
970 struct dc_plane_info
*plane_info
;
971 struct dc_scaling_info
*scaling_info
;
973 struct dc_stream_update
*stream_update
=
974 kzalloc(sizeof(struct dc_stream_update
), GFP_KERNEL
);
976 if (!stream_update
) {
981 flip_addr
= kcalloc(MAX_SURFACES
, sizeof(struct dc_flip_addrs
),
983 plane_info
= kcalloc(MAX_SURFACES
, sizeof(struct dc_plane_info
),
985 scaling_info
= kcalloc(MAX_SURFACES
, sizeof(struct dc_scaling_info
),
988 if (!flip_addr
|| !plane_info
|| !scaling_info
) {
992 kfree(stream_update
);
996 memset(updates
, 0, sizeof(updates
));
998 stream_update
->src
= dc_stream
->src
;
999 stream_update
->dst
= dc_stream
->dst
;
1000 stream_update
->out_transfer_func
= dc_stream
->out_transfer_func
;
1002 for (i
= 0; i
< new_plane_count
; i
++) {
1003 updates
[i
].surface
= plane_states
[i
];
1005 (struct dc_gamma
*)plane_states
[i
]->gamma_correction
;
1006 updates
[i
].in_transfer_func
= plane_states
[i
]->in_transfer_func
;
1007 flip_addr
[i
].address
= plane_states
[i
]->address
;
1008 flip_addr
[i
].flip_immediate
= plane_states
[i
]->flip_immediate
;
1009 plane_info
[i
].color_space
= plane_states
[i
]->color_space
;
1010 plane_info
[i
].format
= plane_states
[i
]->format
;
1011 plane_info
[i
].plane_size
= plane_states
[i
]->plane_size
;
1012 plane_info
[i
].rotation
= plane_states
[i
]->rotation
;
1013 plane_info
[i
].horizontal_mirror
= plane_states
[i
]->horizontal_mirror
;
1014 plane_info
[i
].stereo_format
= plane_states
[i
]->stereo_format
;
1015 plane_info
[i
].tiling_info
= plane_states
[i
]->tiling_info
;
1016 plane_info
[i
].visible
= plane_states
[i
]->visible
;
1017 plane_info
[i
].per_pixel_alpha
= plane_states
[i
]->per_pixel_alpha
;
1018 plane_info
[i
].dcc
= plane_states
[i
]->dcc
;
1019 scaling_info
[i
].scaling_quality
= plane_states
[i
]->scaling_quality
;
1020 scaling_info
[i
].src_rect
= plane_states
[i
]->src_rect
;
1021 scaling_info
[i
].dst_rect
= plane_states
[i
]->dst_rect
;
1022 scaling_info
[i
].clip_rect
= plane_states
[i
]->clip_rect
;
1024 updates
[i
].flip_addr
= &flip_addr
[i
];
1025 updates
[i
].plane_info
= &plane_info
[i
];
1026 updates
[i
].scaling_info
= &scaling_info
[i
];
1029 dc_commit_updates_for_stream(
1033 dc_stream
, stream_update
, plane_states
, state
);
1037 kfree(scaling_info
);
1038 kfree(stream_update
);
1042 struct dc_state
*dc_create_state(void)
1044 struct dc_state
*context
= kzalloc(sizeof(struct dc_state
),
1050 kref_init(&context
->refcount
);
1054 void dc_retain_state(struct dc_state
*context
)
1056 kref_get(&context
->refcount
);
1059 static void dc_state_free(struct kref
*kref
)
1061 struct dc_state
*context
= container_of(kref
, struct dc_state
, refcount
);
1062 dc_resource_state_destruct(context
);
1066 void dc_release_state(struct dc_state
*context
)
1068 kref_put(&context
->refcount
, dc_state_free
);
1071 static bool is_surface_in_context(
1072 const struct dc_state
*context
,
1073 const struct dc_plane_state
*plane_state
)
1077 for (j
= 0; j
< MAX_PIPES
; j
++) {
1078 const struct pipe_ctx
*pipe_ctx
= &context
->res_ctx
.pipe_ctx
[j
];
1080 if (plane_state
== pipe_ctx
->plane_state
) {
1088 static unsigned int pixel_format_to_bpp(enum surface_pixel_format format
)
1091 case SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr
:
1092 case SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb
:
1094 case SURFACE_PIXEL_FORMAT_GRPH_ARGB1555
:
1095 case SURFACE_PIXEL_FORMAT_GRPH_RGB565
:
1096 case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCbCr
:
1097 case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb
:
1099 case SURFACE_PIXEL_FORMAT_GRPH_ARGB8888
:
1100 case SURFACE_PIXEL_FORMAT_GRPH_ABGR8888
:
1101 case SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010
:
1102 case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010
:
1104 case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616
:
1105 case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F
:
1106 case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F
:
1109 ASSERT_CRITICAL(false);
1114 static enum surface_update_type
get_plane_info_update_type(
1115 const struct dc_surface_update
*u
,
1118 struct dc_plane_info temp_plane_info
;
1119 memset(&temp_plane_info
, 0, sizeof(temp_plane_info
));
1122 return UPDATE_TYPE_FAST
;
1124 temp_plane_info
= *u
->plane_info
;
1126 /* Copy all parameters that will cause a full update
1127 * from current surface, the rest of the parameters
1128 * from provided plane configuration.
1129 * Perform memory compare and special validation
1130 * for those that can cause fast/medium updates
1133 /* Full update parameters */
1134 temp_plane_info
.color_space
= u
->surface
->color_space
;
1135 temp_plane_info
.dcc
= u
->surface
->dcc
;
1136 temp_plane_info
.horizontal_mirror
= u
->surface
->horizontal_mirror
;
1137 temp_plane_info
.plane_size
= u
->surface
->plane_size
;
1138 temp_plane_info
.rotation
= u
->surface
->rotation
;
1139 temp_plane_info
.stereo_format
= u
->surface
->stereo_format
;
1141 if (surface_index
== 0)
1142 temp_plane_info
.visible
= u
->plane_info
->visible
;
1144 temp_plane_info
.visible
= u
->surface
->visible
;
1146 if (memcmp(u
->plane_info
, &temp_plane_info
,
1147 sizeof(struct dc_plane_info
)) != 0)
1148 return UPDATE_TYPE_FULL
;
1150 if (pixel_format_to_bpp(u
->plane_info
->format
) !=
1151 pixel_format_to_bpp(u
->surface
->format
)) {
1152 /* different bytes per element will require full bandwidth
1153 * and DML calculation
1155 return UPDATE_TYPE_FULL
;
1158 if (memcmp(&u
->plane_info
->tiling_info
, &u
->surface
->tiling_info
,
1159 sizeof(union dc_tiling_info
)) != 0) {
1160 /* todo: below are HW dependent, we should add a hook to
1161 * DCE/N resource and validated there.
1163 if (u
->plane_info
->tiling_info
.gfx9
.swizzle
!= DC_SW_LINEAR
) {
1164 /* swizzled mode requires RQ to be setup properly,
1165 * thus need to run DML to calculate RQ settings
1167 return UPDATE_TYPE_FULL
;
1171 return UPDATE_TYPE_MED
;
1174 static enum surface_update_type
get_scaling_info_update_type(
1175 const struct dc_surface_update
*u
)
1177 if (!u
->scaling_info
)
1178 return UPDATE_TYPE_FAST
;
1180 if (u
->scaling_info
->src_rect
.width
!= u
->surface
->src_rect
.width
1181 || u
->scaling_info
->src_rect
.height
!= u
->surface
->src_rect
.height
1182 || u
->scaling_info
->clip_rect
.width
!= u
->surface
->clip_rect
.width
1183 || u
->scaling_info
->clip_rect
.height
!= u
->surface
->clip_rect
.height
1184 || u
->scaling_info
->dst_rect
.width
!= u
->surface
->dst_rect
.width
1185 || u
->scaling_info
->dst_rect
.height
!= u
->surface
->dst_rect
.height
)
1186 return UPDATE_TYPE_FULL
;
1188 if (u
->scaling_info
->src_rect
.x
!= u
->surface
->src_rect
.x
1189 || u
->scaling_info
->src_rect
.y
!= u
->surface
->src_rect
.y
1190 || u
->scaling_info
->clip_rect
.x
!= u
->surface
->clip_rect
.x
1191 || u
->scaling_info
->clip_rect
.y
!= u
->surface
->clip_rect
.y
1192 || u
->scaling_info
->dst_rect
.x
!= u
->surface
->dst_rect
.x
1193 || u
->scaling_info
->dst_rect
.y
!= u
->surface
->dst_rect
.y
)
1194 return UPDATE_TYPE_MED
;
1196 return UPDATE_TYPE_FAST
;
1199 static enum surface_update_type
det_surface_update(
1200 const struct dc
*dc
,
1201 const struct dc_surface_update
*u
,
1204 const struct dc_state
*context
= dc
->current_state
;
1205 enum surface_update_type type
= UPDATE_TYPE_FAST
;
1206 enum surface_update_type overall_type
= UPDATE_TYPE_FAST
;
1208 if (!is_surface_in_context(context
, u
->surface
))
1209 return UPDATE_TYPE_FULL
;
1211 type
= get_plane_info_update_type(u
, surface_index
);
1212 if (overall_type
< type
)
1213 overall_type
= type
;
1215 type
= get_scaling_info_update_type(u
);
1216 if (overall_type
< type
)
1217 overall_type
= type
;
1219 if (u
->in_transfer_func
||
1220 u
->hdr_static_metadata
) {
1221 if (overall_type
< UPDATE_TYPE_MED
)
1222 overall_type
= UPDATE_TYPE_MED
;
1225 return overall_type
;
1228 enum surface_update_type
dc_check_update_surfaces_for_stream(
1230 struct dc_surface_update
*updates
,
1232 struct dc_stream_update
*stream_update
,
1233 const struct dc_stream_status
*stream_status
)
1236 enum surface_update_type overall_type
= UPDATE_TYPE_FAST
;
1238 if (stream_status
== NULL
|| stream_status
->plane_count
!= surface_count
)
1239 return UPDATE_TYPE_FULL
;
1242 return UPDATE_TYPE_FULL
;
1244 for (i
= 0 ; i
< surface_count
; i
++) {
1245 enum surface_update_type type
=
1246 det_surface_update(dc
, &updates
[i
], i
);
1248 if (type
== UPDATE_TYPE_FULL
)
1251 if (overall_type
< type
)
1252 overall_type
= type
;
1255 return overall_type
;
1258 static struct dc_stream_status
*stream_get_status(
1259 struct dc_state
*ctx
,
1260 struct dc_stream_state
*stream
)
1264 for (i
= 0; i
< ctx
->stream_count
; i
++) {
1265 if (stream
== ctx
->streams
[i
]) {
1266 return &ctx
->stream_status
[i
];
1273 static const enum surface_update_type update_surface_trace_level
= UPDATE_TYPE_FULL
;
1276 static void commit_planes_for_stream(struct dc
*dc
,
1277 struct dc_surface_update
*srf_updates
,
1279 struct dc_stream_state
*stream
,
1280 struct dc_stream_update
*stream_update
,
1281 enum surface_update_type update_type
,
1282 struct dc_state
*context
)
1286 if (update_type
== UPDATE_TYPE_FULL
) {
1287 dc
->hwss
.set_bandwidth(dc
, context
, false);
1288 context_clock_trace(dc
, context
);
1291 if (update_type
> UPDATE_TYPE_FAST
) {
1292 for (j
= 0; j
< dc
->res_pool
->pipe_count
; j
++) {
1293 struct pipe_ctx
*pipe_ctx
= &context
->res_ctx
.pipe_ctx
[j
];
1295 dc
->hwss
.wait_for_mpcc_disconnect(dc
, dc
->res_pool
, pipe_ctx
);
1299 if (surface_count
== 0) {
1301 * In case of turning off screen, no need to program front end a second time.
1302 * just return after program front end.
1304 dc
->hwss
.apply_ctx_for_surface(dc
, stream
, surface_count
, context
);
1308 /* Lock pipes for provided surfaces, or all active if full update*/
1309 for (i
= 0; i
< surface_count
; i
++) {
1310 struct dc_plane_state
*plane_state
= srf_updates
[i
].surface
;
1312 for (j
= 0; j
< dc
->res_pool
->pipe_count
; j
++) {
1313 struct pipe_ctx
*pipe_ctx
= &context
->res_ctx
.pipe_ctx
[j
];
1315 if (update_type
!= UPDATE_TYPE_FULL
&& pipe_ctx
->plane_state
!= plane_state
)
1317 if (!pipe_ctx
->plane_state
|| pipe_ctx
->top_pipe
)
1320 dc
->hwss
.pipe_control_lock(
1325 if (update_type
== UPDATE_TYPE_FULL
)
1330 for (j
= 0; j
< dc
->res_pool
->pipe_count
; j
++) {
1331 struct pipe_ctx
*pipe_ctx
= &context
->res_ctx
.pipe_ctx
[j
];
1333 if (update_type
!= UPDATE_TYPE_FULL
|| !pipe_ctx
->plane_state
)
1336 if (!pipe_ctx
->top_pipe
&& pipe_ctx
->stream
) {
1337 struct dc_stream_status
*stream_status
= stream_get_status(context
, pipe_ctx
->stream
);
1339 dc
->hwss
.apply_ctx_for_surface(
1340 dc
, pipe_ctx
->stream
, stream_status
->plane_count
, context
);
1344 if (update_type
> UPDATE_TYPE_FAST
)
1345 context_timing_trace(dc
, &context
->res_ctx
);
1347 /* Perform requested Updates */
1348 for (i
= 0; i
< surface_count
; i
++) {
1349 struct dc_plane_state
*plane_state
= srf_updates
[i
].surface
;
1351 if (update_type
== UPDATE_TYPE_MED
)
1352 dc
->hwss
.apply_ctx_for_surface(
1353 dc
, stream
, surface_count
, context
);
1355 for (j
= 0; j
< dc
->res_pool
->pipe_count
; j
++) {
1356 struct pipe_ctx
*pipe_ctx
= &context
->res_ctx
.pipe_ctx
[j
];
1358 if (pipe_ctx
->plane_state
!= plane_state
)
1361 if (srf_updates
[i
].flip_addr
)
1362 dc
->hwss
.update_plane_addr(dc
, pipe_ctx
);
1364 if (update_type
== UPDATE_TYPE_FAST
)
1367 /* work around to program degamma regs for split pipe after set mode. */
1368 if (srf_updates
[i
].in_transfer_func
|| (pipe_ctx
->top_pipe
&&
1369 pipe_ctx
->top_pipe
->plane_state
== pipe_ctx
->plane_state
))
1370 dc
->hwss
.set_input_transfer_func(
1371 pipe_ctx
, pipe_ctx
->plane_state
);
1373 if (stream_update
!= NULL
&&
1374 stream_update
->out_transfer_func
!= NULL
) {
1375 dc
->hwss
.set_output_transfer_func(
1376 pipe_ctx
, pipe_ctx
->stream
);
1379 if (srf_updates
[i
].hdr_static_metadata
) {
1380 resource_build_info_frame(pipe_ctx
);
1381 dc
->hwss
.update_info_frame(pipe_ctx
);
1387 for (i
= dc
->res_pool
->pipe_count
- 1; i
>= 0; i
--) {
1388 struct pipe_ctx
*pipe_ctx
= &context
->res_ctx
.pipe_ctx
[i
];
1390 for (j
= 0; j
< surface_count
; j
++) {
1391 if (update_type
!= UPDATE_TYPE_FULL
&&
1392 srf_updates
[j
].surface
!= pipe_ctx
->plane_state
)
1394 if (!pipe_ctx
->plane_state
|| pipe_ctx
->top_pipe
)
1397 dc
->hwss
.pipe_control_lock(
1407 void dc_commit_updates_for_stream(struct dc
*dc
,
1408 struct dc_surface_update
*srf_updates
,
1410 struct dc_stream_state
*stream
,
1411 struct dc_stream_update
*stream_update
,
1412 struct dc_plane_state
**plane_states
,
1413 struct dc_state
*state
)
1415 const struct dc_stream_status
*stream_status
;
1416 enum surface_update_type update_type
;
1417 struct dc_state
*context
;
1418 struct dc_context
*dc_ctx
= dc
->ctx
;
1421 stream_status
= dc_stream_get_status(stream
);
1422 context
= dc
->current_state
;
1424 update_type
= dc_check_update_surfaces_for_stream(
1425 dc
, srf_updates
, surface_count
, stream_update
, stream_status
);
1427 if (update_type
>= update_surface_trace_level
)
1428 update_surface_trace(dc
, srf_updates
, surface_count
);
1431 if (update_type
>= UPDATE_TYPE_FULL
) {
1433 /* initialize scratch memory for building context */
1434 context
= dc_create_state();
1435 if (context
== NULL
) {
1436 DC_ERROR("Failed to allocate new validate context!\n");
1440 dc_resource_state_copy_construct(state
, context
);
1444 for (i
= 0; i
< surface_count
; i
++) {
1445 struct dc_plane_state
*surface
= srf_updates
[i
].surface
;
1447 /* TODO: On flip we don't build the state, so it still has the
1448 * old address. Which is why we are updating the address here
1450 if (srf_updates
[i
].flip_addr
) {
1451 surface
->address
= srf_updates
[i
].flip_addr
->address
;
1452 surface
->flip_immediate
= srf_updates
[i
].flip_addr
->flip_immediate
;
1456 if (update_type
>= UPDATE_TYPE_MED
) {
1457 for (j
= 0; j
< dc
->res_pool
->pipe_count
; j
++) {
1458 struct pipe_ctx
*pipe_ctx
= &context
->res_ctx
.pipe_ctx
[j
];
1460 if (pipe_ctx
->plane_state
!= surface
)
1463 resource_build_scaling_params(pipe_ctx
);
1468 commit_planes_for_stream(
1477 if (update_type
>= UPDATE_TYPE_FULL
)
1478 dc_post_update_surfaces_to_stream(dc
);
1480 if (dc
->current_state
!= context
) {
1482 struct dc_state
*old
= dc
->current_state
;
1484 dc
->current_state
= context
;
1485 dc_release_state(old
);
1493 uint8_t dc_get_current_stream_count(struct dc
*dc
)
1495 return dc
->current_state
->stream_count
;
1498 struct dc_stream_state
*dc_get_stream_at_index(struct dc
*dc
, uint8_t i
)
1500 if (i
< dc
->current_state
->stream_count
)
1501 return dc
->current_state
->streams
[i
];
1505 enum dc_irq_source
dc_interrupt_to_irq_source(
1510 return dal_irq_service_to_irq_source(dc
->res_pool
->irqs
, src_id
, ext_id
);
1513 void dc_interrupt_set(struct dc
*dc
, enum dc_irq_source src
, bool enable
)
1519 dal_irq_service_set(dc
->res_pool
->irqs
, src
, enable
);
1522 void dc_interrupt_ack(struct dc
*dc
, enum dc_irq_source src
)
1524 dal_irq_service_ack(dc
->res_pool
->irqs
, src
);
1527 void dc_set_power_state(
1529 enum dc_acpi_cm_power_state power_state
)
1531 struct kref refcount
;
1533 switch (power_state
) {
1534 case DC_ACPI_CM_POWER_STATE_D0
:
1535 dc_resource_state_construct(dc
, dc
->current_state
);
1537 dc
->hwss
.init_hw(dc
);
1541 dc
->hwss
.power_down(dc
);
1543 /* Zero out the current context so that on resume we start with
1544 * clean state, and dc hw programming optimizations will not
1545 * cause any trouble.
1548 /* Preserve refcount */
1549 refcount
= dc
->current_state
->refcount
;
1550 dc_resource_state_destruct(dc
->current_state
);
1551 memset(dc
->current_state
, 0,
1552 sizeof(*dc
->current_state
));
1554 dc
->current_state
->refcount
= refcount
;
1561 void dc_resume(struct dc
*dc
)
1566 for (i
= 0; i
< dc
->link_count
; i
++)
1567 core_link_resume(dc
->links
[i
]);
1572 uint32_t link_index
,
1573 struct i2c_command
*cmd
)
1576 struct dc_link
*link
= dc
->links
[link_index
];
1577 struct ddc_service
*ddc
= link
->ddc
;
1579 return dal_i2caux_submit_i2c_command(
1585 static bool link_add_remote_sink_helper(struct dc_link
*dc_link
, struct dc_sink
*sink
)
1587 if (dc_link
->sink_count
>= MAX_SINKS_PER_LINK
) {
1588 BREAK_TO_DEBUGGER();
1592 dc_sink_retain(sink
);
1594 dc_link
->remote_sinks
[dc_link
->sink_count
] = sink
;
1595 dc_link
->sink_count
++;
1600 struct dc_sink
*dc_link_add_remote_sink(
1601 struct dc_link
*link
,
1602 const uint8_t *edid
,
1604 struct dc_sink_init_data
*init_data
)
1606 struct dc_sink
*dc_sink
;
1607 enum dc_edid_status edid_status
;
1609 if (len
> MAX_EDID_BUFFER_SIZE
) {
1610 dm_error("Max EDID buffer size breached!\n");
1615 BREAK_TO_DEBUGGER();
1619 if (!init_data
->link
) {
1620 BREAK_TO_DEBUGGER();
1624 dc_sink
= dc_sink_create(init_data
);
1629 memmove(dc_sink
->dc_edid
.raw_edid
, edid
, len
);
1630 dc_sink
->dc_edid
.length
= len
;
1632 if (!link_add_remote_sink_helper(
1637 edid_status
= dm_helpers_parse_edid_caps(
1640 &dc_sink
->edid_caps
);
1642 if (edid_status
!= EDID_OK
)
1647 dc_link_remove_remote_sink(link
, dc_sink
);
1649 dc_sink_release(dc_sink
);
1653 void dc_link_remove_remote_sink(struct dc_link
*link
, struct dc_sink
*sink
)
1657 if (!link
->sink_count
) {
1658 BREAK_TO_DEBUGGER();
1662 for (i
= 0; i
< link
->sink_count
; i
++) {
1663 if (link
->remote_sinks
[i
] == sink
) {
1664 dc_sink_release(sink
);
1665 link
->remote_sinks
[i
] = NULL
;
1667 /* shrink array to remove empty place */
1668 while (i
< link
->sink_count
- 1) {
1669 link
->remote_sinks
[i
] = link
->remote_sinks
[i
+1];
1672 link
->remote_sinks
[i
] = NULL
;