]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/gpu/drm/amd/display/dc/core/dc.c
Merge tag 'mmc-v4.15-2' of git://git.kernel.org/pub/scm/linux/kernel/git/ulfh/mmc
[mirror_ubuntu-bionic-kernel.git] / drivers / gpu / drm / amd / display / dc / core / dc.c
1 /*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 */
24
25 #include "dm_services.h"
26
27 #include "dc.h"
28
29 #include "core_status.h"
30 #include "core_types.h"
31 #include "hw_sequencer.h"
32
33 #include "resource.h"
34
35 #include "clock_source.h"
36 #include "dc_bios_types.h"
37
38 #include "bios_parser_interface.h"
39 #include "include/irq_service_interface.h"
40 #include "transform.h"
41 #include "dpp.h"
42 #include "timing_generator.h"
43 #include "virtual/virtual_link_encoder.h"
44
45 #include "link_hwss.h"
46 #include "link_encoder.h"
47
48 #include "dc_link_ddc.h"
49 #include "dm_helpers.h"
50 #include "mem_input.h"
51 #include "hubp.h"
52
53
54 /*******************************************************************************
55 * Private functions
56 ******************************************************************************/
57 static void destroy_links(struct dc *dc)
58 {
59 uint32_t i;
60
61 for (i = 0; i < dc->link_count; i++) {
62 if (NULL != dc->links[i])
63 link_destroy(&dc->links[i]);
64 }
65 }
66
67 static bool create_links(
68 struct dc *dc,
69 uint32_t num_virtual_links)
70 {
71 int i;
72 int connectors_num;
73 struct dc_bios *bios = dc->ctx->dc_bios;
74
75 dc->link_count = 0;
76
77 connectors_num = bios->funcs->get_connectors_number(bios);
78
79 if (connectors_num > ENUM_ID_COUNT) {
80 dm_error(
81 "DC: Number of connectors %d exceeds maximum of %d!\n",
82 connectors_num,
83 ENUM_ID_COUNT);
84 return false;
85 }
86
87 if (connectors_num == 0 && num_virtual_links == 0) {
88 dm_error("DC: Number of connectors is zero!\n");
89 }
90
91 dm_output_to_console(
92 "DC: %s: connectors_num: physical:%d, virtual:%d\n",
93 __func__,
94 connectors_num,
95 num_virtual_links);
96
97 for (i = 0; i < connectors_num; i++) {
98 struct link_init_data link_init_params = {0};
99 struct dc_link *link;
100
101 link_init_params.ctx = dc->ctx;
102 /* next BIOS object table connector */
103 link_init_params.connector_index = i;
104 link_init_params.link_index = dc->link_count;
105 link_init_params.dc = dc;
106 link = link_create(&link_init_params);
107
108 if (link) {
109 dc->links[dc->link_count] = link;
110 link->dc = dc;
111 ++dc->link_count;
112 }
113 }
114
115 for (i = 0; i < num_virtual_links; i++) {
116 struct dc_link *link = kzalloc(sizeof(*link), GFP_KERNEL);
117 struct encoder_init_data enc_init = {0};
118
119 if (link == NULL) {
120 BREAK_TO_DEBUGGER();
121 goto failed_alloc;
122 }
123
124 link->link_index = dc->link_count;
125 dc->links[dc->link_count] = link;
126 dc->link_count++;
127
128 link->ctx = dc->ctx;
129 link->dc = dc;
130 link->connector_signal = SIGNAL_TYPE_VIRTUAL;
131 link->link_id.type = OBJECT_TYPE_CONNECTOR;
132 link->link_id.id = CONNECTOR_ID_VIRTUAL;
133 link->link_id.enum_id = ENUM_ID_1;
134 link->link_enc = kzalloc(sizeof(*link->link_enc), GFP_KERNEL);
135
136 if (!link->link_enc) {
137 BREAK_TO_DEBUGGER();
138 goto failed_alloc;
139 }
140
141 link->link_status.dpcd_caps = &link->dpcd_caps;
142
143 enc_init.ctx = dc->ctx;
144 enc_init.channel = CHANNEL_ID_UNKNOWN;
145 enc_init.hpd_source = HPD_SOURCEID_UNKNOWN;
146 enc_init.transmitter = TRANSMITTER_UNKNOWN;
147 enc_init.connector = link->link_id;
148 enc_init.encoder.type = OBJECT_TYPE_ENCODER;
149 enc_init.encoder.id = ENCODER_ID_INTERNAL_VIRTUAL;
150 enc_init.encoder.enum_id = ENUM_ID_1;
151 virtual_link_encoder_construct(link->link_enc, &enc_init);
152 }
153
154 return true;
155
156 failed_alloc:
157 return false;
158 }
159
160 static bool stream_adjust_vmin_vmax(struct dc *dc,
161 struct dc_stream_state **streams, int num_streams,
162 int vmin, int vmax)
163 {
164 /* TODO: Support multiple streams */
165 struct dc_stream_state *stream = streams[0];
166 int i = 0;
167 bool ret = false;
168
169 for (i = 0; i < MAX_PIPES; i++) {
170 struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
171
172 if (pipe->stream == stream && pipe->stream_res.stream_enc) {
173 dc->hwss.set_drr(&pipe, 1, vmin, vmax);
174
175 /* build and update the info frame */
176 resource_build_info_frame(pipe);
177 dc->hwss.update_info_frame(pipe);
178
179 ret = true;
180 }
181 }
182 return ret;
183 }
184
185 static bool stream_get_crtc_position(struct dc *dc,
186 struct dc_stream_state **streams, int num_streams,
187 unsigned int *v_pos, unsigned int *nom_v_pos)
188 {
189 /* TODO: Support multiple streams */
190 struct dc_stream_state *stream = streams[0];
191 int i = 0;
192 bool ret = false;
193 struct crtc_position position;
194
195 for (i = 0; i < MAX_PIPES; i++) {
196 struct pipe_ctx *pipe =
197 &dc->current_state->res_ctx.pipe_ctx[i];
198
199 if (pipe->stream == stream && pipe->stream_res.stream_enc) {
200 dc->hwss.get_position(&pipe, 1, &position);
201
202 *v_pos = position.vertical_count;
203 *nom_v_pos = position.nominal_vcount;
204 ret = true;
205 }
206 }
207 return ret;
208 }
209
210 static bool set_gamut_remap(struct dc *dc, const struct dc_stream_state *stream)
211 {
212 int i = 0;
213 bool ret = false;
214 struct pipe_ctx *pipes;
215
216 for (i = 0; i < MAX_PIPES; i++) {
217 if (dc->current_state->res_ctx.pipe_ctx[i].stream == stream) {
218 pipes = &dc->current_state->res_ctx.pipe_ctx[i];
219 dc->hwss.program_gamut_remap(pipes);
220 ret = true;
221 }
222 }
223
224 return ret;
225 }
226
227 static bool program_csc_matrix(struct dc *dc, struct dc_stream_state *stream)
228 {
229 int i = 0;
230 bool ret = false;
231 struct pipe_ctx *pipes;
232
233 for (i = 0; i < MAX_PIPES; i++) {
234 if (dc->current_state->res_ctx.pipe_ctx[i].stream
235 == stream) {
236
237 pipes = &dc->current_state->res_ctx.pipe_ctx[i];
238 dc->hwss.program_csc_matrix(pipes,
239 stream->output_color_space,
240 stream->csc_color_matrix.matrix);
241 ret = true;
242 }
243 }
244
245 return ret;
246 }
247
248 static void set_static_screen_events(struct dc *dc,
249 struct dc_stream_state **streams,
250 int num_streams,
251 const struct dc_static_screen_events *events)
252 {
253 int i = 0;
254 int j = 0;
255 struct pipe_ctx *pipes_affected[MAX_PIPES];
256 int num_pipes_affected = 0;
257
258 for (i = 0; i < num_streams; i++) {
259 struct dc_stream_state *stream = streams[i];
260
261 for (j = 0; j < MAX_PIPES; j++) {
262 if (dc->current_state->res_ctx.pipe_ctx[j].stream
263 == stream) {
264 pipes_affected[num_pipes_affected++] =
265 &dc->current_state->res_ctx.pipe_ctx[j];
266 }
267 }
268 }
269
270 dc->hwss.set_static_screen_control(pipes_affected, num_pipes_affected, events);
271 }
272
273 static void set_drive_settings(struct dc *dc,
274 struct link_training_settings *lt_settings,
275 const struct dc_link *link)
276 {
277
278 int i;
279
280 for (i = 0; i < dc->link_count; i++) {
281 if (dc->links[i] == link)
282 break;
283 }
284
285 if (i >= dc->link_count)
286 ASSERT_CRITICAL(false);
287
288 dc_link_dp_set_drive_settings(dc->links[i], lt_settings);
289 }
290
291 static void perform_link_training(struct dc *dc,
292 struct dc_link_settings *link_setting,
293 bool skip_video_pattern)
294 {
295 int i;
296
297 for (i = 0; i < dc->link_count; i++)
298 dc_link_dp_perform_link_training(
299 dc->links[i],
300 link_setting,
301 skip_video_pattern);
302 }
303
304 static void set_preferred_link_settings(struct dc *dc,
305 struct dc_link_settings *link_setting,
306 struct dc_link *link)
307 {
308 link->preferred_link_setting = *link_setting;
309 dp_retrain_link_dp_test(link, link_setting, false);
310 }
311
312 static void enable_hpd(const struct dc_link *link)
313 {
314 dc_link_dp_enable_hpd(link);
315 }
316
317 static void disable_hpd(const struct dc_link *link)
318 {
319 dc_link_dp_disable_hpd(link);
320 }
321
322
323 static void set_test_pattern(
324 struct dc_link *link,
325 enum dp_test_pattern test_pattern,
326 const struct link_training_settings *p_link_settings,
327 const unsigned char *p_custom_pattern,
328 unsigned int cust_pattern_size)
329 {
330 if (link != NULL)
331 dc_link_dp_set_test_pattern(
332 link,
333 test_pattern,
334 p_link_settings,
335 p_custom_pattern,
336 cust_pattern_size);
337 }
338
339 static void set_dither_option(struct dc_stream_state *stream,
340 enum dc_dither_option option)
341 {
342 struct bit_depth_reduction_params params;
343 struct dc_link *link = stream->status.link;
344 struct pipe_ctx *pipes = NULL;
345 int i;
346
347 for (i = 0; i < MAX_PIPES; i++) {
348 if (link->dc->current_state->res_ctx.pipe_ctx[i].stream ==
349 stream) {
350 pipes = &link->dc->current_state->res_ctx.pipe_ctx[i];
351 break;
352 }
353 }
354
355 memset(&params, 0, sizeof(params));
356 if (!pipes)
357 return;
358 if (option > DITHER_OPTION_MAX)
359 return;
360
361 stream->dither_option = option;
362
363 resource_build_bit_depth_reduction_params(stream,
364 &params);
365 stream->bit_depth_params = params;
366 pipes->stream_res.opp->funcs->
367 opp_program_bit_depth_reduction(pipes->stream_res.opp, &params);
368 }
369
370 void set_dpms(
371 struct dc *dc,
372 struct dc_stream_state *stream,
373 bool dpms_off)
374 {
375 struct pipe_ctx *pipe_ctx = NULL;
376 int i;
377
378 for (i = 0; i < MAX_PIPES; i++) {
379 if (dc->current_state->res_ctx.pipe_ctx[i].stream == stream) {
380 pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
381 break;
382 }
383 }
384
385 if (!pipe_ctx) {
386 ASSERT(0);
387 return;
388 }
389
390 if (stream->dpms_off != dpms_off) {
391 stream->dpms_off = dpms_off;
392 if (dpms_off)
393 core_link_disable_stream(pipe_ctx,
394 KEEP_ACQUIRED_RESOURCE);
395 else
396 core_link_enable_stream(dc->current_state, pipe_ctx);
397 }
398 }
399
400 static void allocate_dc_stream_funcs(struct dc *dc)
401 {
402 if (dc->hwss.set_drr != NULL) {
403 dc->stream_funcs.adjust_vmin_vmax =
404 stream_adjust_vmin_vmax;
405 }
406
407 dc->stream_funcs.set_static_screen_events =
408 set_static_screen_events;
409
410 dc->stream_funcs.get_crtc_position =
411 stream_get_crtc_position;
412
413 dc->stream_funcs.set_gamut_remap =
414 set_gamut_remap;
415
416 dc->stream_funcs.program_csc_matrix =
417 program_csc_matrix;
418
419 dc->stream_funcs.set_dither_option =
420 set_dither_option;
421
422 dc->stream_funcs.set_dpms =
423 set_dpms;
424
425 dc->link_funcs.set_drive_settings =
426 set_drive_settings;
427
428 dc->link_funcs.perform_link_training =
429 perform_link_training;
430
431 dc->link_funcs.set_preferred_link_settings =
432 set_preferred_link_settings;
433
434 dc->link_funcs.enable_hpd =
435 enable_hpd;
436
437 dc->link_funcs.disable_hpd =
438 disable_hpd;
439
440 dc->link_funcs.set_test_pattern =
441 set_test_pattern;
442 }
443
444 static void destruct(struct dc *dc)
445 {
446 dc_release_state(dc->current_state);
447 dc->current_state = NULL;
448
449 destroy_links(dc);
450
451 dc_destroy_resource_pool(dc);
452
453 if (dc->ctx->gpio_service)
454 dal_gpio_service_destroy(&dc->ctx->gpio_service);
455
456 if (dc->ctx->i2caux)
457 dal_i2caux_destroy(&dc->ctx->i2caux);
458
459 if (dc->ctx->created_bios)
460 dal_bios_parser_destroy(&dc->ctx->dc_bios);
461
462 if (dc->ctx->logger)
463 dal_logger_destroy(&dc->ctx->logger);
464
465 kfree(dc->ctx);
466 dc->ctx = NULL;
467
468 kfree(dc->bw_vbios);
469 dc->bw_vbios = NULL;
470
471 kfree(dc->bw_dceip);
472 dc->bw_dceip = NULL;
473
474 #ifdef CONFIG_DRM_AMD_DC_DCN1_0
475 kfree(dc->dcn_soc);
476 dc->dcn_soc = NULL;
477
478 kfree(dc->dcn_ip);
479 dc->dcn_ip = NULL;
480
481 #endif
482 }
483
484 static bool construct(struct dc *dc,
485 const struct dc_init_data *init_params)
486 {
487 struct dal_logger *logger;
488 struct dc_context *dc_ctx = kzalloc(sizeof(*dc_ctx), GFP_KERNEL);
489 struct bw_calcs_dceip *dc_dceip = kzalloc(sizeof(*dc_dceip),
490 GFP_KERNEL);
491 struct bw_calcs_vbios *dc_vbios = kzalloc(sizeof(*dc_vbios),
492 GFP_KERNEL);
493 #ifdef CONFIG_DRM_AMD_DC_DCN1_0
494 struct dcn_soc_bounding_box *dcn_soc = kzalloc(sizeof(*dcn_soc),
495 GFP_KERNEL);
496 struct dcn_ip_params *dcn_ip = kzalloc(sizeof(*dcn_ip), GFP_KERNEL);
497 #endif
498
499 enum dce_version dc_version = DCE_VERSION_UNKNOWN;
500
501 if (!dc_dceip) {
502 dm_error("%s: failed to create dceip\n", __func__);
503 goto fail;
504 }
505
506 dc->bw_dceip = dc_dceip;
507
508 if (!dc_vbios) {
509 dm_error("%s: failed to create vbios\n", __func__);
510 goto fail;
511 }
512
513 dc->bw_vbios = dc_vbios;
514 #ifdef CONFIG_DRM_AMD_DC_DCN1_0
515 if (!dcn_soc) {
516 dm_error("%s: failed to create dcn_soc\n", __func__);
517 goto fail;
518 }
519
520 dc->dcn_soc = dcn_soc;
521
522 if (!dcn_ip) {
523 dm_error("%s: failed to create dcn_ip\n", __func__);
524 goto fail;
525 }
526
527 dc->dcn_ip = dcn_ip;
528 #endif
529
530 if (!dc_ctx) {
531 dm_error("%s: failed to create ctx\n", __func__);
532 goto fail;
533 }
534
535 dc->current_state = dc_create_state();
536
537 if (!dc->current_state) {
538 dm_error("%s: failed to create validate ctx\n", __func__);
539 goto fail;
540 }
541
542 dc_ctx->cgs_device = init_params->cgs_device;
543 dc_ctx->driver_context = init_params->driver;
544 dc_ctx->dc = dc;
545 dc_ctx->asic_id = init_params->asic_id;
546
547 /* Create logger */
548 logger = dal_logger_create(dc_ctx, init_params->log_mask);
549
550 if (!logger) {
551 /* can *not* call logger. call base driver 'print error' */
552 dm_error("%s: failed to create Logger!\n", __func__);
553 goto fail;
554 }
555 dc_ctx->logger = logger;
556 dc->ctx = dc_ctx;
557 dc->ctx->dce_environment = init_params->dce_environment;
558
559 dc_version = resource_parse_asic_id(init_params->asic_id);
560 dc->ctx->dce_version = dc_version;
561 #if defined(CONFIG_DRM_AMD_DC_FBC)
562 dc->ctx->fbc_gpu_addr = init_params->fbc_gpu_addr;
563 #endif
564 /* Resource should construct all asic specific resources.
565 * This should be the only place where we need to parse the asic id
566 */
567 if (init_params->vbios_override)
568 dc_ctx->dc_bios = init_params->vbios_override;
569 else {
570 /* Create BIOS parser */
571 struct bp_init_data bp_init_data;
572
573 bp_init_data.ctx = dc_ctx;
574 bp_init_data.bios = init_params->asic_id.atombios_base_address;
575
576 dc_ctx->dc_bios = dal_bios_parser_create(
577 &bp_init_data, dc_version);
578
579 if (!dc_ctx->dc_bios) {
580 ASSERT_CRITICAL(false);
581 goto fail;
582 }
583
584 dc_ctx->created_bios = true;
585 }
586
587 /* Create I2C AUX */
588 dc_ctx->i2caux = dal_i2caux_create(dc_ctx);
589
590 if (!dc_ctx->i2caux) {
591 ASSERT_CRITICAL(false);
592 goto fail;
593 }
594
595 /* Create GPIO service */
596 dc_ctx->gpio_service = dal_gpio_service_create(
597 dc_version,
598 dc_ctx->dce_environment,
599 dc_ctx);
600
601 if (!dc_ctx->gpio_service) {
602 ASSERT_CRITICAL(false);
603 goto fail;
604 }
605
606 dc->res_pool = dc_create_resource_pool(
607 dc,
608 init_params->num_virtual_links,
609 dc_version,
610 init_params->asic_id);
611 if (!dc->res_pool)
612 goto fail;
613
614 dc_resource_state_construct(dc, dc->current_state);
615
616 if (!create_links(dc, init_params->num_virtual_links))
617 goto fail;
618
619 allocate_dc_stream_funcs(dc);
620
621 return true;
622
623 fail:
624
625 destruct(dc);
626 return false;
627 }
628
629 static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
630 {
631 int i, j;
632 struct dc_state *dangling_context = dc_create_state();
633 struct dc_state *current_ctx;
634
635 if (dangling_context == NULL)
636 return;
637
638 dc_resource_state_copy_construct(dc->current_state, dangling_context);
639
640 for (i = 0; i < dc->res_pool->pipe_count; i++) {
641 struct dc_stream_state *old_stream =
642 dc->current_state->res_ctx.pipe_ctx[i].stream;
643 bool should_disable = true;
644
645 for (j = 0; j < context->stream_count; j++) {
646 if (old_stream == context->streams[j]) {
647 should_disable = false;
648 break;
649 }
650 }
651 if (should_disable && old_stream) {
652 dc_rem_all_planes_for_stream(dc, old_stream, dangling_context);
653 dc->hwss.apply_ctx_for_surface(dc, old_stream, 0, dangling_context);
654 }
655 }
656
657 current_ctx = dc->current_state;
658 dc->current_state = dangling_context;
659 dc_release_state(current_ctx);
660 }
661
662 /*******************************************************************************
663 * Public functions
664 ******************************************************************************/
665
666 struct dc *dc_create(const struct dc_init_data *init_params)
667 {
668 struct dc *dc = kzalloc(sizeof(*dc), GFP_KERNEL);
669 unsigned int full_pipe_count;
670
671 if (NULL == dc)
672 goto alloc_fail;
673
674 if (false == construct(dc, init_params))
675 goto construct_fail;
676
677 /*TODO: separate HW and SW initialization*/
678 dc->hwss.init_hw(dc);
679
680 full_pipe_count = dc->res_pool->pipe_count;
681 if (dc->res_pool->underlay_pipe_index != NO_UNDERLAY_PIPE)
682 full_pipe_count--;
683 dc->caps.max_streams = min(
684 full_pipe_count,
685 dc->res_pool->stream_enc_count);
686
687 dc->caps.max_links = dc->link_count;
688 dc->caps.max_audios = dc->res_pool->audio_count;
689
690 dc->config = init_params->flags;
691
692 dm_logger_write(dc->ctx->logger, LOG_DC,
693 "Display Core initialized\n");
694
695
696 /* TODO: missing feature to be enabled */
697 dc->debug.disable_dfs_bypass = true;
698
699 return dc;
700
701 construct_fail:
702 kfree(dc);
703
704 alloc_fail:
705 return NULL;
706 }
707
708 void dc_destroy(struct dc **dc)
709 {
710 destruct(*dc);
711 kfree(*dc);
712 *dc = NULL;
713 }
714
715 static void program_timing_sync(
716 struct dc *dc,
717 struct dc_state *ctx)
718 {
719 int i, j;
720 int group_index = 0;
721 int pipe_count = dc->res_pool->pipe_count;
722 struct pipe_ctx *unsynced_pipes[MAX_PIPES] = { NULL };
723
724 for (i = 0; i < pipe_count; i++) {
725 if (!ctx->res_ctx.pipe_ctx[i].stream || ctx->res_ctx.pipe_ctx[i].top_pipe)
726 continue;
727
728 unsynced_pipes[i] = &ctx->res_ctx.pipe_ctx[i];
729 }
730
731 for (i = 0; i < pipe_count; i++) {
732 int group_size = 1;
733 struct pipe_ctx *pipe_set[MAX_PIPES];
734
735 if (!unsynced_pipes[i])
736 continue;
737
738 pipe_set[0] = unsynced_pipes[i];
739 unsynced_pipes[i] = NULL;
740
741 /* Add tg to the set, search rest of the tg's for ones with
742 * same timing, add all tgs with same timing to the group
743 */
744 for (j = i + 1; j < pipe_count; j++) {
745 if (!unsynced_pipes[j])
746 continue;
747
748 if (resource_are_streams_timing_synchronizable(
749 unsynced_pipes[j]->stream,
750 pipe_set[0]->stream)) {
751 pipe_set[group_size] = unsynced_pipes[j];
752 unsynced_pipes[j] = NULL;
753 group_size++;
754 }
755 }
756
757 /* set first unblanked pipe as master */
758 for (j = 0; j < group_size; j++) {
759 struct pipe_ctx *temp;
760
761 if (!pipe_set[j]->stream_res.tg->funcs->is_blanked(pipe_set[j]->stream_res.tg)) {
762 if (j == 0)
763 break;
764
765 temp = pipe_set[0];
766 pipe_set[0] = pipe_set[j];
767 pipe_set[j] = temp;
768 break;
769 }
770 }
771
772 /* remove any other unblanked pipes as they have already been synced */
773 for (j = j + 1; j < group_size; j++) {
774 if (!pipe_set[j]->stream_res.tg->funcs->is_blanked(pipe_set[j]->stream_res.tg)) {
775 group_size--;
776 pipe_set[j] = pipe_set[group_size];
777 j--;
778 }
779 }
780
781 if (group_size > 1) {
782 dc->hwss.enable_timing_synchronization(
783 dc, group_index, group_size, pipe_set);
784 group_index++;
785 }
786 }
787 }
788
789 static bool context_changed(
790 struct dc *dc,
791 struct dc_state *context)
792 {
793 uint8_t i;
794
795 if (context->stream_count != dc->current_state->stream_count)
796 return true;
797
798 for (i = 0; i < dc->current_state->stream_count; i++) {
799 if (dc->current_state->streams[i] != context->streams[i])
800 return true;
801 }
802
803 return false;
804 }
805
806 bool dc_enable_stereo(
807 struct dc *dc,
808 struct dc_state *context,
809 struct dc_stream_state *streams[],
810 uint8_t stream_count)
811 {
812 bool ret = true;
813 int i, j;
814 struct pipe_ctx *pipe;
815
816 for (i = 0; i < MAX_PIPES; i++) {
817 if (context != NULL)
818 pipe = &context->res_ctx.pipe_ctx[i];
819 else
820 pipe = &dc->current_state->res_ctx.pipe_ctx[i];
821 for (j = 0 ; pipe && j < stream_count; j++) {
822 if (streams[j] && streams[j] == pipe->stream &&
823 dc->hwss.setup_stereo)
824 dc->hwss.setup_stereo(pipe, dc);
825 }
826 }
827
828 return ret;
829 }
830
831
832 /*
833 * Applies given context to HW and copy it into current context.
834 * It's up to the user to release the src context afterwards.
835 */
836 static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *context)
837 {
838 struct dc_bios *dcb = dc->ctx->dc_bios;
839 enum dc_status result = DC_ERROR_UNEXPECTED;
840 struct pipe_ctx *pipe;
841 int i, j, k, l;
842 struct dc_stream_state *dc_streams[MAX_STREAMS] = {0};
843
844 disable_dangling_plane(dc, context);
845
846 for (i = 0; i < context->stream_count; i++)
847 dc_streams[i] = context->streams[i];
848
849 if (!dcb->funcs->is_accelerated_mode(dcb))
850 dc->hwss.enable_accelerated_mode(dc);
851
852 for (i = 0; i < context->stream_count; i++) {
853 const struct dc_sink *sink = context->streams[i]->sink;
854
855 dc->hwss.apply_ctx_for_surface(
856 dc, context->streams[i],
857 context->stream_status[i].plane_count,
858 context);
859
860 /*
861 * enable stereo
862 * TODO rework dc_enable_stereo call to work with validation sets?
863 */
864 for (k = 0; k < MAX_PIPES; k++) {
865 pipe = &context->res_ctx.pipe_ctx[k];
866
867 for (l = 0 ; pipe && l < context->stream_count; l++) {
868 if (context->streams[l] &&
869 context->streams[l] == pipe->stream &&
870 dc->hwss.setup_stereo)
871 dc->hwss.setup_stereo(pipe, dc);
872 }
873 }
874
875 CONN_MSG_MODE(sink->link, "{%dx%d, %dx%d@%dKhz}",
876 context->streams[i]->timing.h_addressable,
877 context->streams[i]->timing.v_addressable,
878 context->streams[i]->timing.h_total,
879 context->streams[i]->timing.v_total,
880 context->streams[i]->timing.pix_clk_khz);
881 }
882
883 dc->hwss.ready_shared_resources(dc, context);
884
885 for (i = 0; i < dc->res_pool->pipe_count; i++) {
886 pipe = &context->res_ctx.pipe_ctx[i];
887 dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, pipe);
888 }
889 result = dc->hwss.apply_ctx_to_hw(dc, context);
890
891 program_timing_sync(dc, context);
892
893 dc_enable_stereo(dc, context, dc_streams, context->stream_count);
894
895 for (i = 0; i < context->stream_count; i++) {
896 for (j = 0; j < MAX_PIPES; j++) {
897 pipe = &context->res_ctx.pipe_ctx[j];
898
899 if (!pipe->top_pipe && pipe->stream == context->streams[i])
900 dc->hwss.pipe_control_lock(dc, pipe, false);
901 }
902 }
903
904 dc_release_state(dc->current_state);
905
906 dc->current_state = context;
907
908 dc_retain_state(dc->current_state);
909
910 dc->hwss.optimize_shared_resources(dc);
911
912 return result;
913 }
914
915 bool dc_commit_state(struct dc *dc, struct dc_state *context)
916 {
917 enum dc_status result = DC_ERROR_UNEXPECTED;
918 int i;
919
920 if (false == context_changed(dc, context))
921 return DC_OK;
922
923 dm_logger_write(dc->ctx->logger, LOG_DC, "%s: %d streams\n",
924 __func__, context->stream_count);
925
926 for (i = 0; i < context->stream_count; i++) {
927 struct dc_stream_state *stream = context->streams[i];
928
929 dc_stream_log(stream,
930 dc->ctx->logger,
931 LOG_DC);
932 }
933
934 result = dc_commit_state_no_check(dc, context);
935
936 return (result == DC_OK);
937 }
938
939
940 bool dc_post_update_surfaces_to_stream(struct dc *dc)
941 {
942 int i;
943 struct dc_state *context = dc->current_state;
944
945 post_surface_trace(dc);
946
947 for (i = 0; i < dc->res_pool->pipe_count; i++)
948 if (context->res_ctx.pipe_ctx[i].stream == NULL
949 || context->res_ctx.pipe_ctx[i].plane_state == NULL)
950 dc->hwss.power_down_front_end(dc, i);
951
952 /* 3rd param should be true, temp w/a for RV*/
953 #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
954 dc->hwss.set_bandwidth(dc, context, dc->ctx->dce_version < DCN_VERSION_1_0);
955 #else
956 dc->hwss.set_bandwidth(dc, context, true);
957 #endif
958 return true;
959 }
960
961 /*
962 * TODO this whole function needs to go
963 *
964 * dc_surface_update is needlessly complex. See if we can just replace this
965 * with a dc_plane_state and follow the atomic model a bit more closely here.
966 */
967 bool dc_commit_planes_to_stream(
968 struct dc *dc,
969 struct dc_plane_state **plane_states,
970 uint8_t new_plane_count,
971 struct dc_stream_state *dc_stream,
972 struct dc_state *state)
973 {
974 /* no need to dynamically allocate this. it's pretty small */
975 struct dc_surface_update updates[MAX_SURFACES];
976 struct dc_flip_addrs *flip_addr;
977 struct dc_plane_info *plane_info;
978 struct dc_scaling_info *scaling_info;
979 int i;
980 struct dc_stream_update *stream_update =
981 kzalloc(sizeof(struct dc_stream_update), GFP_KERNEL);
982
983 if (!stream_update) {
984 BREAK_TO_DEBUGGER();
985 return false;
986 }
987
988 flip_addr = kcalloc(MAX_SURFACES, sizeof(struct dc_flip_addrs),
989 GFP_KERNEL);
990 plane_info = kcalloc(MAX_SURFACES, sizeof(struct dc_plane_info),
991 GFP_KERNEL);
992 scaling_info = kcalloc(MAX_SURFACES, sizeof(struct dc_scaling_info),
993 GFP_KERNEL);
994
995 if (!flip_addr || !plane_info || !scaling_info) {
996 kfree(flip_addr);
997 kfree(plane_info);
998 kfree(scaling_info);
999 kfree(stream_update);
1000 return false;
1001 }
1002
1003 memset(updates, 0, sizeof(updates));
1004
1005 stream_update->src = dc_stream->src;
1006 stream_update->dst = dc_stream->dst;
1007 stream_update->out_transfer_func = dc_stream->out_transfer_func;
1008
1009 for (i = 0; i < new_plane_count; i++) {
1010 updates[i].surface = plane_states[i];
1011 updates[i].gamma =
1012 (struct dc_gamma *)plane_states[i]->gamma_correction;
1013 updates[i].in_transfer_func = plane_states[i]->in_transfer_func;
1014 flip_addr[i].address = plane_states[i]->address;
1015 flip_addr[i].flip_immediate = plane_states[i]->flip_immediate;
1016 plane_info[i].color_space = plane_states[i]->color_space;
1017 plane_info[i].format = plane_states[i]->format;
1018 plane_info[i].plane_size = plane_states[i]->plane_size;
1019 plane_info[i].rotation = plane_states[i]->rotation;
1020 plane_info[i].horizontal_mirror = plane_states[i]->horizontal_mirror;
1021 plane_info[i].stereo_format = plane_states[i]->stereo_format;
1022 plane_info[i].tiling_info = plane_states[i]->tiling_info;
1023 plane_info[i].visible = plane_states[i]->visible;
1024 plane_info[i].per_pixel_alpha = plane_states[i]->per_pixel_alpha;
1025 plane_info[i].dcc = plane_states[i]->dcc;
1026 scaling_info[i].scaling_quality = plane_states[i]->scaling_quality;
1027 scaling_info[i].src_rect = plane_states[i]->src_rect;
1028 scaling_info[i].dst_rect = plane_states[i]->dst_rect;
1029 scaling_info[i].clip_rect = plane_states[i]->clip_rect;
1030
1031 updates[i].flip_addr = &flip_addr[i];
1032 updates[i].plane_info = &plane_info[i];
1033 updates[i].scaling_info = &scaling_info[i];
1034 }
1035
1036 dc_commit_updates_for_stream(
1037 dc,
1038 updates,
1039 new_plane_count,
1040 dc_stream, stream_update, plane_states, state);
1041
1042 kfree(flip_addr);
1043 kfree(plane_info);
1044 kfree(scaling_info);
1045 kfree(stream_update);
1046 return true;
1047 }
1048
1049 struct dc_state *dc_create_state(void)
1050 {
1051 struct dc_state *context = kzalloc(sizeof(struct dc_state),
1052 GFP_KERNEL);
1053
1054 if (!context)
1055 return NULL;
1056
1057 kref_init(&context->refcount);
1058 return context;
1059 }
1060
1061 void dc_retain_state(struct dc_state *context)
1062 {
1063 kref_get(&context->refcount);
1064 }
1065
1066 static void dc_state_free(struct kref *kref)
1067 {
1068 struct dc_state *context = container_of(kref, struct dc_state, refcount);
1069 dc_resource_state_destruct(context);
1070 kfree(context);
1071 }
1072
1073 void dc_release_state(struct dc_state *context)
1074 {
1075 kref_put(&context->refcount, dc_state_free);
1076 }
1077
1078 static bool is_surface_in_context(
1079 const struct dc_state *context,
1080 const struct dc_plane_state *plane_state)
1081 {
1082 int j;
1083
1084 for (j = 0; j < MAX_PIPES; j++) {
1085 const struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
1086
1087 if (plane_state == pipe_ctx->plane_state) {
1088 return true;
1089 }
1090 }
1091
1092 return false;
1093 }
1094
1095 static unsigned int pixel_format_to_bpp(enum surface_pixel_format format)
1096 {
1097 switch (format) {
1098 case SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr:
1099 case SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb:
1100 return 12;
1101 case SURFACE_PIXEL_FORMAT_GRPH_ARGB1555:
1102 case SURFACE_PIXEL_FORMAT_GRPH_RGB565:
1103 case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCbCr:
1104 case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb:
1105 return 16;
1106 case SURFACE_PIXEL_FORMAT_GRPH_ARGB8888:
1107 case SURFACE_PIXEL_FORMAT_GRPH_ABGR8888:
1108 case SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010:
1109 case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010:
1110 return 32;
1111 case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
1112 case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F:
1113 case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F:
1114 return 64;
1115 default:
1116 ASSERT_CRITICAL(false);
1117 return -1;
1118 }
1119 }
1120
1121 static enum surface_update_type get_plane_info_update_type(
1122 const struct dc_surface_update *u,
1123 int surface_index)
1124 {
1125 struct dc_plane_info temp_plane_info;
1126 memset(&temp_plane_info, 0, sizeof(temp_plane_info));
1127
1128 if (!u->plane_info)
1129 return UPDATE_TYPE_FAST;
1130
1131 temp_plane_info = *u->plane_info;
1132
1133 /* Copy all parameters that will cause a full update
1134 * from current surface, the rest of the parameters
1135 * from provided plane configuration.
1136 * Perform memory compare and special validation
1137 * for those that can cause fast/medium updates
1138 */
1139
1140 /* Full update parameters */
1141 temp_plane_info.color_space = u->surface->color_space;
1142 temp_plane_info.dcc = u->surface->dcc;
1143 temp_plane_info.horizontal_mirror = u->surface->horizontal_mirror;
1144 temp_plane_info.plane_size = u->surface->plane_size;
1145 temp_plane_info.rotation = u->surface->rotation;
1146 temp_plane_info.stereo_format = u->surface->stereo_format;
1147
1148 if (surface_index == 0)
1149 temp_plane_info.visible = u->plane_info->visible;
1150 else
1151 temp_plane_info.visible = u->surface->visible;
1152
1153 if (memcmp(u->plane_info, &temp_plane_info,
1154 sizeof(struct dc_plane_info)) != 0)
1155 return UPDATE_TYPE_FULL;
1156
1157 if (pixel_format_to_bpp(u->plane_info->format) !=
1158 pixel_format_to_bpp(u->surface->format)) {
1159 /* different bytes per element will require full bandwidth
1160 * and DML calculation
1161 */
1162 return UPDATE_TYPE_FULL;
1163 }
1164
1165 if (memcmp(&u->plane_info->tiling_info, &u->surface->tiling_info,
1166 sizeof(union dc_tiling_info)) != 0) {
1167 /* todo: below are HW dependent, we should add a hook to
1168 * DCE/N resource and validated there.
1169 */
1170 if (u->plane_info->tiling_info.gfx9.swizzle != DC_SW_LINEAR) {
1171 /* swizzled mode requires RQ to be setup properly,
1172 * thus need to run DML to calculate RQ settings
1173 */
1174 return UPDATE_TYPE_FULL;
1175 }
1176 }
1177
1178 return UPDATE_TYPE_MED;
1179 }
1180
1181 static enum surface_update_type get_scaling_info_update_type(
1182 const struct dc_surface_update *u)
1183 {
1184 if (!u->scaling_info)
1185 return UPDATE_TYPE_FAST;
1186
1187 if (u->scaling_info->src_rect.width != u->surface->src_rect.width
1188 || u->scaling_info->src_rect.height != u->surface->src_rect.height
1189 || u->scaling_info->clip_rect.width != u->surface->clip_rect.width
1190 || u->scaling_info->clip_rect.height != u->surface->clip_rect.height
1191 || u->scaling_info->dst_rect.width != u->surface->dst_rect.width
1192 || u->scaling_info->dst_rect.height != u->surface->dst_rect.height)
1193 return UPDATE_TYPE_FULL;
1194
1195 if (u->scaling_info->src_rect.x != u->surface->src_rect.x
1196 || u->scaling_info->src_rect.y != u->surface->src_rect.y
1197 || u->scaling_info->clip_rect.x != u->surface->clip_rect.x
1198 || u->scaling_info->clip_rect.y != u->surface->clip_rect.y
1199 || u->scaling_info->dst_rect.x != u->surface->dst_rect.x
1200 || u->scaling_info->dst_rect.y != u->surface->dst_rect.y)
1201 return UPDATE_TYPE_MED;
1202
1203 return UPDATE_TYPE_FAST;
1204 }
1205
1206 static enum surface_update_type det_surface_update(
1207 const struct dc *dc,
1208 const struct dc_surface_update *u,
1209 int surface_index)
1210 {
1211 const struct dc_state *context = dc->current_state;
1212 enum surface_update_type type = UPDATE_TYPE_FAST;
1213 enum surface_update_type overall_type = UPDATE_TYPE_FAST;
1214
1215 if (!is_surface_in_context(context, u->surface))
1216 return UPDATE_TYPE_FULL;
1217
1218 type = get_plane_info_update_type(u, surface_index);
1219 if (overall_type < type)
1220 overall_type = type;
1221
1222 type = get_scaling_info_update_type(u);
1223 if (overall_type < type)
1224 overall_type = type;
1225
1226 if (u->in_transfer_func ||
1227 u->hdr_static_metadata) {
1228 if (overall_type < UPDATE_TYPE_MED)
1229 overall_type = UPDATE_TYPE_MED;
1230 }
1231
1232 return overall_type;
1233 }
1234
1235 enum surface_update_type dc_check_update_surfaces_for_stream(
1236 struct dc *dc,
1237 struct dc_surface_update *updates,
1238 int surface_count,
1239 struct dc_stream_update *stream_update,
1240 const struct dc_stream_status *stream_status)
1241 {
1242 int i;
1243 enum surface_update_type overall_type = UPDATE_TYPE_FAST;
1244
1245 if (stream_status == NULL || stream_status->plane_count != surface_count)
1246 return UPDATE_TYPE_FULL;
1247
1248 if (stream_update)
1249 return UPDATE_TYPE_FULL;
1250
1251 for (i = 0 ; i < surface_count; i++) {
1252 enum surface_update_type type =
1253 det_surface_update(dc, &updates[i], i);
1254
1255 if (type == UPDATE_TYPE_FULL)
1256 return type;
1257
1258 if (overall_type < type)
1259 overall_type = type;
1260 }
1261
1262 return overall_type;
1263 }
1264
1265 static struct dc_stream_status *stream_get_status(
1266 struct dc_state *ctx,
1267 struct dc_stream_state *stream)
1268 {
1269 uint8_t i;
1270
1271 for (i = 0; i < ctx->stream_count; i++) {
1272 if (stream == ctx->streams[i]) {
1273 return &ctx->stream_status[i];
1274 }
1275 }
1276
1277 return NULL;
1278 }
1279
1280 static const enum surface_update_type update_surface_trace_level = UPDATE_TYPE_FULL;
1281
1282
1283 static void commit_planes_for_stream(struct dc *dc,
1284 struct dc_surface_update *srf_updates,
1285 int surface_count,
1286 struct dc_stream_state *stream,
1287 struct dc_stream_update *stream_update,
1288 enum surface_update_type update_type,
1289 struct dc_state *context)
1290 {
1291 int i, j;
1292
1293 if (update_type == UPDATE_TYPE_FULL) {
1294 dc->hwss.set_bandwidth(dc, context, false);
1295 context_clock_trace(dc, context);
1296 }
1297
1298 if (update_type > UPDATE_TYPE_FAST) {
1299 for (j = 0; j < dc->res_pool->pipe_count; j++) {
1300 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
1301
1302 dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, pipe_ctx);
1303 }
1304 }
1305
1306 if (surface_count == 0) {
1307 /*
1308 * In case of turning off screen, no need to program front end a second time.
1309 * just return after program front end.
1310 */
1311 dc->hwss.apply_ctx_for_surface(dc, stream, surface_count, context);
1312 return;
1313 }
1314
1315 /* Lock pipes for provided surfaces, or all active if full update*/
1316 for (i = 0; i < surface_count; i++) {
1317 struct dc_plane_state *plane_state = srf_updates[i].surface;
1318
1319 for (j = 0; j < dc->res_pool->pipe_count; j++) {
1320 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
1321
1322 if (update_type != UPDATE_TYPE_FULL && pipe_ctx->plane_state != plane_state)
1323 continue;
1324 if (!pipe_ctx->plane_state || pipe_ctx->top_pipe)
1325 continue;
1326
1327 dc->hwss.pipe_control_lock(
1328 dc,
1329 pipe_ctx,
1330 true);
1331 }
1332 if (update_type == UPDATE_TYPE_FULL)
1333 break;
1334 }
1335
1336 /* Full fe update*/
1337 for (j = 0; j < dc->res_pool->pipe_count; j++) {
1338 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
1339
1340 if (update_type != UPDATE_TYPE_FULL || !pipe_ctx->plane_state)
1341 continue;
1342
1343 if (!pipe_ctx->top_pipe && pipe_ctx->stream) {
1344 struct dc_stream_status *stream_status = stream_get_status(context, pipe_ctx->stream);
1345
1346 dc->hwss.apply_ctx_for_surface(
1347 dc, pipe_ctx->stream, stream_status->plane_count, context);
1348 }
1349 }
1350
1351 if (update_type > UPDATE_TYPE_FAST)
1352 context_timing_trace(dc, &context->res_ctx);
1353
1354 /* Perform requested Updates */
1355 for (i = 0; i < surface_count; i++) {
1356 struct dc_plane_state *plane_state = srf_updates[i].surface;
1357
1358 if (update_type == UPDATE_TYPE_MED)
1359 dc->hwss.apply_ctx_for_surface(
1360 dc, stream, surface_count, context);
1361
1362 for (j = 0; j < dc->res_pool->pipe_count; j++) {
1363 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
1364
1365 if (pipe_ctx->plane_state != plane_state)
1366 continue;
1367
1368 if (srf_updates[i].flip_addr)
1369 dc->hwss.update_plane_addr(dc, pipe_ctx);
1370
1371 if (update_type == UPDATE_TYPE_FAST)
1372 continue;
1373
1374 /* work around to program degamma regs for split pipe after set mode. */
1375 if (srf_updates[i].in_transfer_func || (pipe_ctx->top_pipe &&
1376 pipe_ctx->top_pipe->plane_state == pipe_ctx->plane_state))
1377 dc->hwss.set_input_transfer_func(
1378 pipe_ctx, pipe_ctx->plane_state);
1379
1380 if (stream_update != NULL &&
1381 stream_update->out_transfer_func != NULL) {
1382 dc->hwss.set_output_transfer_func(
1383 pipe_ctx, pipe_ctx->stream);
1384 }
1385
1386 if (srf_updates[i].hdr_static_metadata) {
1387 resource_build_info_frame(pipe_ctx);
1388 dc->hwss.update_info_frame(pipe_ctx);
1389 }
1390 }
1391 }
1392
1393 /* Unlock pipes */
1394 for (i = dc->res_pool->pipe_count - 1; i >= 0; i--) {
1395 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1396
1397 for (j = 0; j < surface_count; j++) {
1398 if (update_type != UPDATE_TYPE_FULL &&
1399 srf_updates[j].surface != pipe_ctx->plane_state)
1400 continue;
1401 if (!pipe_ctx->plane_state || pipe_ctx->top_pipe)
1402 continue;
1403
1404 dc->hwss.pipe_control_lock(
1405 dc,
1406 pipe_ctx,
1407 false);
1408
1409 break;
1410 }
1411 }
1412 }
1413
1414 void dc_commit_updates_for_stream(struct dc *dc,
1415 struct dc_surface_update *srf_updates,
1416 int surface_count,
1417 struct dc_stream_state *stream,
1418 struct dc_stream_update *stream_update,
1419 struct dc_plane_state **plane_states,
1420 struct dc_state *state)
1421 {
1422 const struct dc_stream_status *stream_status;
1423 enum surface_update_type update_type;
1424 struct dc_state *context;
1425 struct dc_context *dc_ctx = dc->ctx;
1426 int i, j;
1427
1428 stream_status = dc_stream_get_status(stream);
1429 context = dc->current_state;
1430
1431 update_type = dc_check_update_surfaces_for_stream(
1432 dc, srf_updates, surface_count, stream_update, stream_status);
1433
1434 if (update_type >= update_surface_trace_level)
1435 update_surface_trace(dc, srf_updates, surface_count);
1436
1437
1438 if (update_type >= UPDATE_TYPE_FULL) {
1439
1440 /* initialize scratch memory for building context */
1441 context = dc_create_state();
1442 if (context == NULL) {
1443 DC_ERROR("Failed to allocate new validate context!\n");
1444 return;
1445 }
1446
1447 dc_resource_state_copy_construct(state, context);
1448 }
1449
1450
1451 for (i = 0; i < surface_count; i++) {
1452 struct dc_plane_state *surface = srf_updates[i].surface;
1453
1454 /* TODO: On flip we don't build the state, so it still has the
1455 * old address. Which is why we are updating the address here
1456 */
1457 if (srf_updates[i].flip_addr) {
1458 surface->address = srf_updates[i].flip_addr->address;
1459 surface->flip_immediate = srf_updates[i].flip_addr->flip_immediate;
1460
1461 }
1462
1463 if (update_type >= UPDATE_TYPE_MED) {
1464 for (j = 0; j < dc->res_pool->pipe_count; j++) {
1465 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
1466
1467 if (pipe_ctx->plane_state != surface)
1468 continue;
1469
1470 resource_build_scaling_params(pipe_ctx);
1471 }
1472 }
1473 }
1474
1475 commit_planes_for_stream(
1476 dc,
1477 srf_updates,
1478 surface_count,
1479 stream,
1480 stream_update,
1481 update_type,
1482 context);
1483
1484 if (update_type >= UPDATE_TYPE_FULL)
1485 dc_post_update_surfaces_to_stream(dc);
1486
1487 if (dc->current_state != context) {
1488
1489 struct dc_state *old = dc->current_state;
1490
1491 dc->current_state = context;
1492 dc_release_state(old);
1493
1494 }
1495
1496 return;
1497
1498 }
1499
1500 uint8_t dc_get_current_stream_count(struct dc *dc)
1501 {
1502 return dc->current_state->stream_count;
1503 }
1504
1505 struct dc_stream_state *dc_get_stream_at_index(struct dc *dc, uint8_t i)
1506 {
1507 if (i < dc->current_state->stream_count)
1508 return dc->current_state->streams[i];
1509 return NULL;
1510 }
1511
1512 enum dc_irq_source dc_interrupt_to_irq_source(
1513 struct dc *dc,
1514 uint32_t src_id,
1515 uint32_t ext_id)
1516 {
1517 return dal_irq_service_to_irq_source(dc->res_pool->irqs, src_id, ext_id);
1518 }
1519
1520 void dc_interrupt_set(struct dc *dc, enum dc_irq_source src, bool enable)
1521 {
1522
1523 if (dc == NULL)
1524 return;
1525
1526 dal_irq_service_set(dc->res_pool->irqs, src, enable);
1527 }
1528
1529 void dc_interrupt_ack(struct dc *dc, enum dc_irq_source src)
1530 {
1531 dal_irq_service_ack(dc->res_pool->irqs, src);
1532 }
1533
1534 void dc_set_power_state(
1535 struct dc *dc,
1536 enum dc_acpi_cm_power_state power_state)
1537 {
1538 struct kref refcount;
1539
1540 switch (power_state) {
1541 case DC_ACPI_CM_POWER_STATE_D0:
1542 dc_resource_state_construct(dc, dc->current_state);
1543
1544 dc->hwss.init_hw(dc);
1545 break;
1546 default:
1547
1548 dc->hwss.power_down(dc);
1549
1550 /* Zero out the current context so that on resume we start with
1551 * clean state, and dc hw programming optimizations will not
1552 * cause any trouble.
1553 */
1554
1555 /* Preserve refcount */
1556 refcount = dc->current_state->refcount;
1557 dc_resource_state_destruct(dc->current_state);
1558 memset(dc->current_state, 0,
1559 sizeof(*dc->current_state));
1560
1561 dc->current_state->refcount = refcount;
1562
1563 break;
1564 }
1565
1566 }
1567
1568 void dc_resume(struct dc *dc)
1569 {
1570
1571 uint32_t i;
1572
1573 for (i = 0; i < dc->link_count; i++)
1574 core_link_resume(dc->links[i]);
1575 }
1576
1577 bool dc_submit_i2c(
1578 struct dc *dc,
1579 uint32_t link_index,
1580 struct i2c_command *cmd)
1581 {
1582
1583 struct dc_link *link = dc->links[link_index];
1584 struct ddc_service *ddc = link->ddc;
1585
1586 return dal_i2caux_submit_i2c_command(
1587 ddc->ctx->i2caux,
1588 ddc->ddc_pin,
1589 cmd);
1590 }
1591
1592 static bool link_add_remote_sink_helper(struct dc_link *dc_link, struct dc_sink *sink)
1593 {
1594 if (dc_link->sink_count >= MAX_SINKS_PER_LINK) {
1595 BREAK_TO_DEBUGGER();
1596 return false;
1597 }
1598
1599 dc_sink_retain(sink);
1600
1601 dc_link->remote_sinks[dc_link->sink_count] = sink;
1602 dc_link->sink_count++;
1603
1604 return true;
1605 }
1606
1607 struct dc_sink *dc_link_add_remote_sink(
1608 struct dc_link *link,
1609 const uint8_t *edid,
1610 int len,
1611 struct dc_sink_init_data *init_data)
1612 {
1613 struct dc_sink *dc_sink;
1614 enum dc_edid_status edid_status;
1615
1616 if (len > MAX_EDID_BUFFER_SIZE) {
1617 dm_error("Max EDID buffer size breached!\n");
1618 return NULL;
1619 }
1620
1621 if (!init_data) {
1622 BREAK_TO_DEBUGGER();
1623 return NULL;
1624 }
1625
1626 if (!init_data->link) {
1627 BREAK_TO_DEBUGGER();
1628 return NULL;
1629 }
1630
1631 dc_sink = dc_sink_create(init_data);
1632
1633 if (!dc_sink)
1634 return NULL;
1635
1636 memmove(dc_sink->dc_edid.raw_edid, edid, len);
1637 dc_sink->dc_edid.length = len;
1638
1639 if (!link_add_remote_sink_helper(
1640 link,
1641 dc_sink))
1642 goto fail_add_sink;
1643
1644 edid_status = dm_helpers_parse_edid_caps(
1645 link->ctx,
1646 &dc_sink->dc_edid,
1647 &dc_sink->edid_caps);
1648
1649 if (edid_status != EDID_OK)
1650 goto fail;
1651
1652 return dc_sink;
1653 fail:
1654 dc_link_remove_remote_sink(link, dc_sink);
1655 fail_add_sink:
1656 dc_sink_release(dc_sink);
1657 return NULL;
1658 }
1659
1660 void dc_link_remove_remote_sink(struct dc_link *link, struct dc_sink *sink)
1661 {
1662 int i;
1663
1664 if (!link->sink_count) {
1665 BREAK_TO_DEBUGGER();
1666 return;
1667 }
1668
1669 for (i = 0; i < link->sink_count; i++) {
1670 if (link->remote_sinks[i] == sink) {
1671 dc_sink_release(sink);
1672 link->remote_sinks[i] = NULL;
1673
1674 /* shrink array to remove empty place */
1675 while (i < link->sink_count - 1) {
1676 link->remote_sinks[i] = link->remote_sinks[i+1];
1677 i++;
1678 }
1679 link->remote_sinks[i] = NULL;
1680 link->sink_count--;
1681 return;
1682 }
1683 }
1684 }