]> git.proxmox.com Git - mirror_ubuntu-focal-kernel.git/blob - drivers/gpu/drm/amd/display/dc/core/dc.c
ASoC: tlv320aic31xx: Reset registers during power up
[mirror_ubuntu-focal-kernel.git] / drivers / gpu / drm / amd / display / dc / core / dc.c
1 /*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 */
24
25 #include "dm_services.h"
26
27 #include "dc.h"
28
29 #include "core_status.h"
30 #include "core_types.h"
31 #include "hw_sequencer.h"
32
33 #include "resource.h"
34
35 #include "clock_source.h"
36 #include "dc_bios_types.h"
37
38 #include "bios_parser_interface.h"
39 #include "include/irq_service_interface.h"
40 #include "transform.h"
41 #include "dpp.h"
42 #include "timing_generator.h"
43 #include "virtual/virtual_link_encoder.h"
44
45 #include "link_hwss.h"
46 #include "link_encoder.h"
47
48 #include "dc_link_ddc.h"
49 #include "dm_helpers.h"
50 #include "mem_input.h"
51 #include "hubp.h"
52
53
54 /*******************************************************************************
55 * Private functions
56 ******************************************************************************/
57 static void destroy_links(struct dc *dc)
58 {
59 uint32_t i;
60
61 for (i = 0; i < dc->link_count; i++) {
62 if (NULL != dc->links[i])
63 link_destroy(&dc->links[i]);
64 }
65 }
66
67 static bool create_links(
68 struct dc *dc,
69 uint32_t num_virtual_links)
70 {
71 int i;
72 int connectors_num;
73 struct dc_bios *bios = dc->ctx->dc_bios;
74
75 dc->link_count = 0;
76
77 connectors_num = bios->funcs->get_connectors_number(bios);
78
79 if (connectors_num > ENUM_ID_COUNT) {
80 dm_error(
81 "DC: Number of connectors %d exceeds maximum of %d!\n",
82 connectors_num,
83 ENUM_ID_COUNT);
84 return false;
85 }
86
87 if (connectors_num == 0 && num_virtual_links == 0) {
88 dm_error("DC: Number of connectors is zero!\n");
89 }
90
91 dm_output_to_console(
92 "DC: %s: connectors_num: physical:%d, virtual:%d\n",
93 __func__,
94 connectors_num,
95 num_virtual_links);
96
97 for (i = 0; i < connectors_num; i++) {
98 struct link_init_data link_init_params = {0};
99 struct dc_link *link;
100
101 link_init_params.ctx = dc->ctx;
102 /* next BIOS object table connector */
103 link_init_params.connector_index = i;
104 link_init_params.link_index = dc->link_count;
105 link_init_params.dc = dc;
106 link = link_create(&link_init_params);
107
108 if (link) {
109 dc->links[dc->link_count] = link;
110 link->dc = dc;
111 ++dc->link_count;
112 }
113 }
114
115 for (i = 0; i < num_virtual_links; i++) {
116 struct dc_link *link = kzalloc(sizeof(*link), GFP_KERNEL);
117 struct encoder_init_data enc_init = {0};
118
119 if (link == NULL) {
120 BREAK_TO_DEBUGGER();
121 goto failed_alloc;
122 }
123
124 link->ctx = dc->ctx;
125 link->dc = dc;
126 link->connector_signal = SIGNAL_TYPE_VIRTUAL;
127 link->link_id.type = OBJECT_TYPE_CONNECTOR;
128 link->link_id.id = CONNECTOR_ID_VIRTUAL;
129 link->link_id.enum_id = ENUM_ID_1;
130 link->link_enc = kzalloc(sizeof(*link->link_enc), GFP_KERNEL);
131
132 enc_init.ctx = dc->ctx;
133 enc_init.channel = CHANNEL_ID_UNKNOWN;
134 enc_init.hpd_source = HPD_SOURCEID_UNKNOWN;
135 enc_init.transmitter = TRANSMITTER_UNKNOWN;
136 enc_init.connector = link->link_id;
137 enc_init.encoder.type = OBJECT_TYPE_ENCODER;
138 enc_init.encoder.id = ENCODER_ID_INTERNAL_VIRTUAL;
139 enc_init.encoder.enum_id = ENUM_ID_1;
140 virtual_link_encoder_construct(link->link_enc, &enc_init);
141
142 link->link_index = dc->link_count;
143 dc->links[dc->link_count] = link;
144 dc->link_count++;
145 }
146
147 return true;
148
149 failed_alloc:
150 return false;
151 }
152
153 static bool stream_adjust_vmin_vmax(struct dc *dc,
154 struct dc_stream_state **streams, int num_streams,
155 int vmin, int vmax)
156 {
157 /* TODO: Support multiple streams */
158 struct dc_stream_state *stream = streams[0];
159 int i = 0;
160 bool ret = false;
161
162 for (i = 0; i < MAX_PIPES; i++) {
163 struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
164
165 if (pipe->stream == stream && pipe->stream_res.stream_enc) {
166 dc->hwss.set_drr(&pipe, 1, vmin, vmax);
167
168 /* build and update the info frame */
169 resource_build_info_frame(pipe);
170 dc->hwss.update_info_frame(pipe);
171
172 ret = true;
173 }
174 }
175 return ret;
176 }
177
178 static bool stream_get_crtc_position(struct dc *dc,
179 struct dc_stream_state **streams, int num_streams,
180 unsigned int *v_pos, unsigned int *nom_v_pos)
181 {
182 /* TODO: Support multiple streams */
183 struct dc_stream_state *stream = streams[0];
184 int i = 0;
185 bool ret = false;
186 struct crtc_position position;
187
188 for (i = 0; i < MAX_PIPES; i++) {
189 struct pipe_ctx *pipe =
190 &dc->current_state->res_ctx.pipe_ctx[i];
191
192 if (pipe->stream == stream && pipe->stream_res.stream_enc) {
193 dc->hwss.get_position(&pipe, 1, &position);
194
195 *v_pos = position.vertical_count;
196 *nom_v_pos = position.nominal_vcount;
197 ret = true;
198 }
199 }
200 return ret;
201 }
202
203 static bool set_gamut_remap(struct dc *dc, const struct dc_stream_state *stream)
204 {
205 int i = 0;
206 bool ret = false;
207 struct pipe_ctx *pipes;
208
209 for (i = 0; i < MAX_PIPES; i++) {
210 if (dc->current_state->res_ctx.pipe_ctx[i].stream == stream) {
211 pipes = &dc->current_state->res_ctx.pipe_ctx[i];
212 dc->hwss.program_gamut_remap(pipes);
213 ret = true;
214 }
215 }
216
217 return ret;
218 }
219
220 static bool program_csc_matrix(struct dc *dc, struct dc_stream_state *stream)
221 {
222 int i = 0;
223 bool ret = false;
224 struct pipe_ctx *pipes;
225
226 for (i = 0; i < MAX_PIPES; i++) {
227 if (dc->current_state->res_ctx.pipe_ctx[i].stream
228 == stream) {
229
230 pipes = &dc->current_state->res_ctx.pipe_ctx[i];
231 dc->hwss.program_csc_matrix(pipes,
232 stream->output_color_space,
233 stream->csc_color_matrix.matrix);
234 ret = true;
235 }
236 }
237
238 return ret;
239 }
240
241 static void set_static_screen_events(struct dc *dc,
242 struct dc_stream_state **streams,
243 int num_streams,
244 const struct dc_static_screen_events *events)
245 {
246 int i = 0;
247 int j = 0;
248 struct pipe_ctx *pipes_affected[MAX_PIPES];
249 int num_pipes_affected = 0;
250
251 for (i = 0; i < num_streams; i++) {
252 struct dc_stream_state *stream = streams[i];
253
254 for (j = 0; j < MAX_PIPES; j++) {
255 if (dc->current_state->res_ctx.pipe_ctx[j].stream
256 == stream) {
257 pipes_affected[num_pipes_affected++] =
258 &dc->current_state->res_ctx.pipe_ctx[j];
259 }
260 }
261 }
262
263 dc->hwss.set_static_screen_control(pipes_affected, num_pipes_affected, events);
264 }
265
266 static void set_drive_settings(struct dc *dc,
267 struct link_training_settings *lt_settings,
268 const struct dc_link *link)
269 {
270
271 int i;
272
273 for (i = 0; i < dc->link_count; i++) {
274 if (dc->links[i] == link)
275 break;
276 }
277
278 if (i >= dc->link_count)
279 ASSERT_CRITICAL(false);
280
281 dc_link_dp_set_drive_settings(dc->links[i], lt_settings);
282 }
283
284 static void perform_link_training(struct dc *dc,
285 struct dc_link_settings *link_setting,
286 bool skip_video_pattern)
287 {
288 int i;
289
290 for (i = 0; i < dc->link_count; i++)
291 dc_link_dp_perform_link_training(
292 dc->links[i],
293 link_setting,
294 skip_video_pattern);
295 }
296
297 static void set_preferred_link_settings(struct dc *dc,
298 struct dc_link_settings *link_setting,
299 struct dc_link *link)
300 {
301 link->preferred_link_setting = *link_setting;
302 dp_retrain_link_dp_test(link, link_setting, false);
303 }
304
305 static void enable_hpd(const struct dc_link *link)
306 {
307 dc_link_dp_enable_hpd(link);
308 }
309
310 static void disable_hpd(const struct dc_link *link)
311 {
312 dc_link_dp_disable_hpd(link);
313 }
314
315
316 static void set_test_pattern(
317 struct dc_link *link,
318 enum dp_test_pattern test_pattern,
319 const struct link_training_settings *p_link_settings,
320 const unsigned char *p_custom_pattern,
321 unsigned int cust_pattern_size)
322 {
323 if (link != NULL)
324 dc_link_dp_set_test_pattern(
325 link,
326 test_pattern,
327 p_link_settings,
328 p_custom_pattern,
329 cust_pattern_size);
330 }
331
332 static void set_dither_option(struct dc_stream_state *stream,
333 enum dc_dither_option option)
334 {
335 struct bit_depth_reduction_params params;
336 struct dc_link *link = stream->status.link;
337 struct pipe_ctx *pipes = NULL;
338 int i;
339
340 for (i = 0; i < MAX_PIPES; i++) {
341 if (link->dc->current_state->res_ctx.pipe_ctx[i].stream ==
342 stream) {
343 pipes = &link->dc->current_state->res_ctx.pipe_ctx[i];
344 break;
345 }
346 }
347
348 memset(&params, 0, sizeof(params));
349 if (!pipes)
350 return;
351 if (option > DITHER_OPTION_MAX)
352 return;
353
354 stream->dither_option = option;
355
356 resource_build_bit_depth_reduction_params(stream,
357 &params);
358 stream->bit_depth_params = params;
359 pipes->stream_res.opp->funcs->
360 opp_program_bit_depth_reduction(pipes->stream_res.opp, &params);
361 }
362
363 void set_dpms(
364 struct dc *dc,
365 struct dc_stream_state *stream,
366 bool dpms_off)
367 {
368 struct pipe_ctx *pipe_ctx = NULL;
369 int i;
370
371 for (i = 0; i < MAX_PIPES; i++) {
372 if (dc->current_state->res_ctx.pipe_ctx[i].stream == stream) {
373 pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
374 break;
375 }
376 }
377
378 if (!pipe_ctx) {
379 ASSERT(0);
380 return;
381 }
382
383 if (stream->dpms_off != dpms_off) {
384 stream->dpms_off = dpms_off;
385 if (dpms_off)
386 core_link_disable_stream(pipe_ctx,
387 KEEP_ACQUIRED_RESOURCE);
388 else
389 core_link_enable_stream(dc->current_state, pipe_ctx);
390 }
391 }
392
393 static void allocate_dc_stream_funcs(struct dc *dc)
394 {
395 if (dc->hwss.set_drr != NULL) {
396 dc->stream_funcs.adjust_vmin_vmax =
397 stream_adjust_vmin_vmax;
398 }
399
400 dc->stream_funcs.set_static_screen_events =
401 set_static_screen_events;
402
403 dc->stream_funcs.get_crtc_position =
404 stream_get_crtc_position;
405
406 dc->stream_funcs.set_gamut_remap =
407 set_gamut_remap;
408
409 dc->stream_funcs.program_csc_matrix =
410 program_csc_matrix;
411
412 dc->stream_funcs.set_dither_option =
413 set_dither_option;
414
415 dc->stream_funcs.set_dpms =
416 set_dpms;
417
418 dc->link_funcs.set_drive_settings =
419 set_drive_settings;
420
421 dc->link_funcs.perform_link_training =
422 perform_link_training;
423
424 dc->link_funcs.set_preferred_link_settings =
425 set_preferred_link_settings;
426
427 dc->link_funcs.enable_hpd =
428 enable_hpd;
429
430 dc->link_funcs.disable_hpd =
431 disable_hpd;
432
433 dc->link_funcs.set_test_pattern =
434 set_test_pattern;
435 }
436
437 static void destruct(struct dc *dc)
438 {
439 dc_release_state(dc->current_state);
440 dc->current_state = NULL;
441
442 destroy_links(dc);
443
444 dc_destroy_resource_pool(dc);
445
446 if (dc->ctx->gpio_service)
447 dal_gpio_service_destroy(&dc->ctx->gpio_service);
448
449 if (dc->ctx->i2caux)
450 dal_i2caux_destroy(&dc->ctx->i2caux);
451
452 if (dc->ctx->created_bios)
453 dal_bios_parser_destroy(&dc->ctx->dc_bios);
454
455 if (dc->ctx->logger)
456 dal_logger_destroy(&dc->ctx->logger);
457
458 kfree(dc->ctx);
459 dc->ctx = NULL;
460
461 kfree(dc->bw_vbios);
462 dc->bw_vbios = NULL;
463
464 kfree(dc->bw_dceip);
465 dc->bw_dceip = NULL;
466
467 #ifdef CONFIG_DRM_AMD_DC_DCN1_0
468 kfree(dc->dcn_soc);
469 dc->dcn_soc = NULL;
470
471 kfree(dc->dcn_ip);
472 dc->dcn_ip = NULL;
473
474 #endif
475 }
476
477 static bool construct(struct dc *dc,
478 const struct dc_init_data *init_params)
479 {
480 struct dal_logger *logger;
481 struct dc_context *dc_ctx = kzalloc(sizeof(*dc_ctx), GFP_KERNEL);
482 struct bw_calcs_dceip *dc_dceip = kzalloc(sizeof(*dc_dceip),
483 GFP_KERNEL);
484 struct bw_calcs_vbios *dc_vbios = kzalloc(sizeof(*dc_vbios),
485 GFP_KERNEL);
486 #ifdef CONFIG_DRM_AMD_DC_DCN1_0
487 struct dcn_soc_bounding_box *dcn_soc = kzalloc(sizeof(*dcn_soc),
488 GFP_KERNEL);
489 struct dcn_ip_params *dcn_ip = kzalloc(sizeof(*dcn_ip), GFP_KERNEL);
490 #endif
491
492 enum dce_version dc_version = DCE_VERSION_UNKNOWN;
493
494 if (!dc_dceip) {
495 dm_error("%s: failed to create dceip\n", __func__);
496 goto fail;
497 }
498
499 dc->bw_dceip = dc_dceip;
500
501 if (!dc_vbios) {
502 dm_error("%s: failed to create vbios\n", __func__);
503 goto fail;
504 }
505
506 dc->bw_vbios = dc_vbios;
507 #ifdef CONFIG_DRM_AMD_DC_DCN1_0
508 if (!dcn_soc) {
509 dm_error("%s: failed to create dcn_soc\n", __func__);
510 goto fail;
511 }
512
513 dc->dcn_soc = dcn_soc;
514
515 if (!dcn_ip) {
516 dm_error("%s: failed to create dcn_ip\n", __func__);
517 goto fail;
518 }
519
520 dc->dcn_ip = dcn_ip;
521 #endif
522
523 if (!dc_ctx) {
524 dm_error("%s: failed to create ctx\n", __func__);
525 goto fail;
526 }
527
528 dc->current_state = dc_create_state();
529
530 if (!dc->current_state) {
531 dm_error("%s: failed to create validate ctx\n", __func__);
532 goto fail;
533 }
534
535 dc_ctx->cgs_device = init_params->cgs_device;
536 dc_ctx->driver_context = init_params->driver;
537 dc_ctx->dc = dc;
538 dc_ctx->asic_id = init_params->asic_id;
539
540 /* Create logger */
541 logger = dal_logger_create(dc_ctx, init_params->log_mask);
542
543 if (!logger) {
544 /* can *not* call logger. call base driver 'print error' */
545 dm_error("%s: failed to create Logger!\n", __func__);
546 goto fail;
547 }
548 dc_ctx->logger = logger;
549 dc->ctx = dc_ctx;
550 dc->ctx->dce_environment = init_params->dce_environment;
551
552 dc_version = resource_parse_asic_id(init_params->asic_id);
553 dc->ctx->dce_version = dc_version;
554 #if defined(CONFIG_DRM_AMD_DC_FBC)
555 dc->ctx->fbc_gpu_addr = init_params->fbc_gpu_addr;
556 #endif
557 /* Resource should construct all asic specific resources.
558 * This should be the only place where we need to parse the asic id
559 */
560 if (init_params->vbios_override)
561 dc_ctx->dc_bios = init_params->vbios_override;
562 else {
563 /* Create BIOS parser */
564 struct bp_init_data bp_init_data;
565
566 bp_init_data.ctx = dc_ctx;
567 bp_init_data.bios = init_params->asic_id.atombios_base_address;
568
569 dc_ctx->dc_bios = dal_bios_parser_create(
570 &bp_init_data, dc_version);
571
572 if (!dc_ctx->dc_bios) {
573 ASSERT_CRITICAL(false);
574 goto fail;
575 }
576
577 dc_ctx->created_bios = true;
578 }
579
580 /* Create I2C AUX */
581 dc_ctx->i2caux = dal_i2caux_create(dc_ctx);
582
583 if (!dc_ctx->i2caux) {
584 ASSERT_CRITICAL(false);
585 goto fail;
586 }
587
588 /* Create GPIO service */
589 dc_ctx->gpio_service = dal_gpio_service_create(
590 dc_version,
591 dc_ctx->dce_environment,
592 dc_ctx);
593
594 if (!dc_ctx->gpio_service) {
595 ASSERT_CRITICAL(false);
596 goto fail;
597 }
598
599 dc->res_pool = dc_create_resource_pool(
600 dc,
601 init_params->num_virtual_links,
602 dc_version,
603 init_params->asic_id);
604 if (!dc->res_pool)
605 goto fail;
606
607 dc_resource_state_construct(dc, dc->current_state);
608
609 if (!create_links(dc, init_params->num_virtual_links))
610 goto fail;
611
612 allocate_dc_stream_funcs(dc);
613
614 return true;
615
616 fail:
617
618 destruct(dc);
619 return false;
620 }
621
622 static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
623 {
624 int i, j;
625 struct dc_state *dangling_context = dc_create_state();
626 struct dc_state *current_ctx;
627
628 if (dangling_context == NULL)
629 return;
630
631 dc_resource_state_copy_construct(dc->current_state, dangling_context);
632
633 for (i = 0; i < dc->res_pool->pipe_count; i++) {
634 struct dc_stream_state *old_stream =
635 dc->current_state->res_ctx.pipe_ctx[i].stream;
636 bool should_disable = true;
637
638 for (j = 0; j < context->stream_count; j++) {
639 if (old_stream == context->streams[j]) {
640 should_disable = false;
641 break;
642 }
643 }
644 if (should_disable && old_stream) {
645 dc_rem_all_planes_for_stream(dc, old_stream, dangling_context);
646 dc->hwss.apply_ctx_for_surface(dc, old_stream, 0, dangling_context);
647 }
648 }
649
650 current_ctx = dc->current_state;
651 dc->current_state = dangling_context;
652 dc_release_state(current_ctx);
653 }
654
655 /*******************************************************************************
656 * Public functions
657 ******************************************************************************/
658
659 struct dc *dc_create(const struct dc_init_data *init_params)
660 {
661 struct dc *dc = kzalloc(sizeof(*dc), GFP_KERNEL);
662 unsigned int full_pipe_count;
663
664 if (NULL == dc)
665 goto alloc_fail;
666
667 if (false == construct(dc, init_params))
668 goto construct_fail;
669
670 /*TODO: separate HW and SW initialization*/
671 dc->hwss.init_hw(dc);
672
673 full_pipe_count = dc->res_pool->pipe_count;
674 if (dc->res_pool->underlay_pipe_index != NO_UNDERLAY_PIPE)
675 full_pipe_count--;
676 dc->caps.max_streams = min(
677 full_pipe_count,
678 dc->res_pool->stream_enc_count);
679
680 dc->caps.max_links = dc->link_count;
681 dc->caps.max_audios = dc->res_pool->audio_count;
682
683 dc->config = init_params->flags;
684
685 dm_logger_write(dc->ctx->logger, LOG_DC,
686 "Display Core initialized\n");
687
688
689 /* TODO: missing feature to be enabled */
690 dc->debug.disable_dfs_bypass = true;
691
692 return dc;
693
694 construct_fail:
695 kfree(dc);
696
697 alloc_fail:
698 return NULL;
699 }
700
701 void dc_destroy(struct dc **dc)
702 {
703 destruct(*dc);
704 kfree(*dc);
705 *dc = NULL;
706 }
707
708 static void program_timing_sync(
709 struct dc *dc,
710 struct dc_state *ctx)
711 {
712 int i, j;
713 int group_index = 0;
714 int pipe_count = dc->res_pool->pipe_count;
715 struct pipe_ctx *unsynced_pipes[MAX_PIPES] = { NULL };
716
717 for (i = 0; i < pipe_count; i++) {
718 if (!ctx->res_ctx.pipe_ctx[i].stream || ctx->res_ctx.pipe_ctx[i].top_pipe)
719 continue;
720
721 unsynced_pipes[i] = &ctx->res_ctx.pipe_ctx[i];
722 }
723
724 for (i = 0; i < pipe_count; i++) {
725 int group_size = 1;
726 struct pipe_ctx *pipe_set[MAX_PIPES];
727
728 if (!unsynced_pipes[i])
729 continue;
730
731 pipe_set[0] = unsynced_pipes[i];
732 unsynced_pipes[i] = NULL;
733
734 /* Add tg to the set, search rest of the tg's for ones with
735 * same timing, add all tgs with same timing to the group
736 */
737 for (j = i + 1; j < pipe_count; j++) {
738 if (!unsynced_pipes[j])
739 continue;
740
741 if (resource_are_streams_timing_synchronizable(
742 unsynced_pipes[j]->stream,
743 pipe_set[0]->stream)) {
744 pipe_set[group_size] = unsynced_pipes[j];
745 unsynced_pipes[j] = NULL;
746 group_size++;
747 }
748 }
749
750 /* set first unblanked pipe as master */
751 for (j = 0; j < group_size; j++) {
752 struct pipe_ctx *temp;
753
754 if (!pipe_set[j]->stream_res.tg->funcs->is_blanked(pipe_set[j]->stream_res.tg)) {
755 if (j == 0)
756 break;
757
758 temp = pipe_set[0];
759 pipe_set[0] = pipe_set[j];
760 pipe_set[j] = temp;
761 break;
762 }
763 }
764
765 /* remove any other unblanked pipes as they have already been synced */
766 for (j = j + 1; j < group_size; j++) {
767 if (!pipe_set[j]->stream_res.tg->funcs->is_blanked(pipe_set[j]->stream_res.tg)) {
768 group_size--;
769 pipe_set[j] = pipe_set[group_size];
770 j--;
771 }
772 }
773
774 if (group_size > 1) {
775 dc->hwss.enable_timing_synchronization(
776 dc, group_index, group_size, pipe_set);
777 group_index++;
778 }
779 }
780 }
781
782 static bool context_changed(
783 struct dc *dc,
784 struct dc_state *context)
785 {
786 uint8_t i;
787
788 if (context->stream_count != dc->current_state->stream_count)
789 return true;
790
791 for (i = 0; i < dc->current_state->stream_count; i++) {
792 if (dc->current_state->streams[i] != context->streams[i])
793 return true;
794 }
795
796 return false;
797 }
798
799 bool dc_enable_stereo(
800 struct dc *dc,
801 struct dc_state *context,
802 struct dc_stream_state *streams[],
803 uint8_t stream_count)
804 {
805 bool ret = true;
806 int i, j;
807 struct pipe_ctx *pipe;
808
809 for (i = 0; i < MAX_PIPES; i++) {
810 if (context != NULL)
811 pipe = &context->res_ctx.pipe_ctx[i];
812 else
813 pipe = &dc->current_state->res_ctx.pipe_ctx[i];
814 for (j = 0 ; pipe && j < stream_count; j++) {
815 if (streams[j] && streams[j] == pipe->stream &&
816 dc->hwss.setup_stereo)
817 dc->hwss.setup_stereo(pipe, dc);
818 }
819 }
820
821 return ret;
822 }
823
824
825 /*
826 * Applies given context to HW and copy it into current context.
827 * It's up to the user to release the src context afterwards.
828 */
829 static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *context)
830 {
831 struct dc_bios *dcb = dc->ctx->dc_bios;
832 enum dc_status result = DC_ERROR_UNEXPECTED;
833 struct pipe_ctx *pipe;
834 int i, j, k, l;
835 struct dc_stream_state *dc_streams[MAX_STREAMS] = {0};
836
837 disable_dangling_plane(dc, context);
838
839 for (i = 0; i < context->stream_count; i++)
840 dc_streams[i] = context->streams[i];
841
842 if (!dcb->funcs->is_accelerated_mode(dcb))
843 dc->hwss.enable_accelerated_mode(dc);
844
845 for (i = 0; i < context->stream_count; i++) {
846 const struct dc_sink *sink = context->streams[i]->sink;
847
848 dc->hwss.apply_ctx_for_surface(
849 dc, context->streams[i],
850 context->stream_status[i].plane_count,
851 context);
852
853 /*
854 * enable stereo
855 * TODO rework dc_enable_stereo call to work with validation sets?
856 */
857 for (k = 0; k < MAX_PIPES; k++) {
858 pipe = &context->res_ctx.pipe_ctx[k];
859
860 for (l = 0 ; pipe && l < context->stream_count; l++) {
861 if (context->streams[l] &&
862 context->streams[l] == pipe->stream &&
863 dc->hwss.setup_stereo)
864 dc->hwss.setup_stereo(pipe, dc);
865 }
866 }
867
868 CONN_MSG_MODE(sink->link, "{%dx%d, %dx%d@%dKhz}",
869 context->streams[i]->timing.h_addressable,
870 context->streams[i]->timing.v_addressable,
871 context->streams[i]->timing.h_total,
872 context->streams[i]->timing.v_total,
873 context->streams[i]->timing.pix_clk_khz);
874 }
875
876 dc->hwss.ready_shared_resources(dc, context);
877
878 for (i = 0; i < dc->res_pool->pipe_count; i++) {
879 pipe = &context->res_ctx.pipe_ctx[i];
880 dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, pipe);
881 }
882 result = dc->hwss.apply_ctx_to_hw(dc, context);
883
884 program_timing_sync(dc, context);
885
886 dc_enable_stereo(dc, context, dc_streams, context->stream_count);
887
888 for (i = 0; i < context->stream_count; i++) {
889 for (j = 0; j < MAX_PIPES; j++) {
890 pipe = &context->res_ctx.pipe_ctx[j];
891
892 if (!pipe->top_pipe && pipe->stream == context->streams[i])
893 dc->hwss.pipe_control_lock(dc, pipe, false);
894 }
895 }
896
897 dc_release_state(dc->current_state);
898
899 dc->current_state = context;
900
901 dc_retain_state(dc->current_state);
902
903 dc->hwss.optimize_shared_resources(dc);
904
905 return result;
906 }
907
908 bool dc_commit_state(struct dc *dc, struct dc_state *context)
909 {
910 enum dc_status result = DC_ERROR_UNEXPECTED;
911 int i;
912
913 if (false == context_changed(dc, context))
914 return DC_OK;
915
916 dm_logger_write(dc->ctx->logger, LOG_DC, "%s: %d streams\n",
917 __func__, context->stream_count);
918
919 for (i = 0; i < context->stream_count; i++) {
920 struct dc_stream_state *stream = context->streams[i];
921
922 dc_stream_log(stream,
923 dc->ctx->logger,
924 LOG_DC);
925 }
926
927 result = dc_commit_state_no_check(dc, context);
928
929 return (result == DC_OK);
930 }
931
932
933 bool dc_post_update_surfaces_to_stream(struct dc *dc)
934 {
935 int i;
936 struct dc_state *context = dc->current_state;
937
938 post_surface_trace(dc);
939
940 for (i = 0; i < dc->res_pool->pipe_count; i++)
941 if (context->res_ctx.pipe_ctx[i].stream == NULL
942 || context->res_ctx.pipe_ctx[i].plane_state == NULL)
943 dc->hwss.power_down_front_end(dc, i);
944
945 /* 3rd param should be true, temp w/a for RV*/
946 #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
947 dc->hwss.set_bandwidth(dc, context, dc->ctx->dce_version < DCN_VERSION_1_0);
948 #else
949 dc->hwss.set_bandwidth(dc, context, true);
950 #endif
951 return true;
952 }
953
954 /*
955 * TODO this whole function needs to go
956 *
957 * dc_surface_update is needlessly complex. See if we can just replace this
958 * with a dc_plane_state and follow the atomic model a bit more closely here.
959 */
960 bool dc_commit_planes_to_stream(
961 struct dc *dc,
962 struct dc_plane_state **plane_states,
963 uint8_t new_plane_count,
964 struct dc_stream_state *dc_stream,
965 struct dc_state *state)
966 {
967 /* no need to dynamically allocate this. it's pretty small */
968 struct dc_surface_update updates[MAX_SURFACES];
969 struct dc_flip_addrs *flip_addr;
970 struct dc_plane_info *plane_info;
971 struct dc_scaling_info *scaling_info;
972 int i;
973 struct dc_stream_update *stream_update =
974 kzalloc(sizeof(struct dc_stream_update), GFP_KERNEL);
975
976 if (!stream_update) {
977 BREAK_TO_DEBUGGER();
978 return false;
979 }
980
981 flip_addr = kcalloc(MAX_SURFACES, sizeof(struct dc_flip_addrs),
982 GFP_KERNEL);
983 plane_info = kcalloc(MAX_SURFACES, sizeof(struct dc_plane_info),
984 GFP_KERNEL);
985 scaling_info = kcalloc(MAX_SURFACES, sizeof(struct dc_scaling_info),
986 GFP_KERNEL);
987
988 if (!flip_addr || !plane_info || !scaling_info) {
989 kfree(flip_addr);
990 kfree(plane_info);
991 kfree(scaling_info);
992 kfree(stream_update);
993 return false;
994 }
995
996 memset(updates, 0, sizeof(updates));
997
998 stream_update->src = dc_stream->src;
999 stream_update->dst = dc_stream->dst;
1000 stream_update->out_transfer_func = dc_stream->out_transfer_func;
1001
1002 for (i = 0; i < new_plane_count; i++) {
1003 updates[i].surface = plane_states[i];
1004 updates[i].gamma =
1005 (struct dc_gamma *)plane_states[i]->gamma_correction;
1006 updates[i].in_transfer_func = plane_states[i]->in_transfer_func;
1007 flip_addr[i].address = plane_states[i]->address;
1008 flip_addr[i].flip_immediate = plane_states[i]->flip_immediate;
1009 plane_info[i].color_space = plane_states[i]->color_space;
1010 plane_info[i].format = plane_states[i]->format;
1011 plane_info[i].plane_size = plane_states[i]->plane_size;
1012 plane_info[i].rotation = plane_states[i]->rotation;
1013 plane_info[i].horizontal_mirror = plane_states[i]->horizontal_mirror;
1014 plane_info[i].stereo_format = plane_states[i]->stereo_format;
1015 plane_info[i].tiling_info = plane_states[i]->tiling_info;
1016 plane_info[i].visible = plane_states[i]->visible;
1017 plane_info[i].per_pixel_alpha = plane_states[i]->per_pixel_alpha;
1018 plane_info[i].dcc = plane_states[i]->dcc;
1019 scaling_info[i].scaling_quality = plane_states[i]->scaling_quality;
1020 scaling_info[i].src_rect = plane_states[i]->src_rect;
1021 scaling_info[i].dst_rect = plane_states[i]->dst_rect;
1022 scaling_info[i].clip_rect = plane_states[i]->clip_rect;
1023
1024 updates[i].flip_addr = &flip_addr[i];
1025 updates[i].plane_info = &plane_info[i];
1026 updates[i].scaling_info = &scaling_info[i];
1027 }
1028
1029 dc_commit_updates_for_stream(
1030 dc,
1031 updates,
1032 new_plane_count,
1033 dc_stream, stream_update, plane_states, state);
1034
1035 kfree(flip_addr);
1036 kfree(plane_info);
1037 kfree(scaling_info);
1038 kfree(stream_update);
1039 return true;
1040 }
1041
1042 struct dc_state *dc_create_state(void)
1043 {
1044 struct dc_state *context = kzalloc(sizeof(struct dc_state),
1045 GFP_KERNEL);
1046
1047 if (!context)
1048 return NULL;
1049
1050 kref_init(&context->refcount);
1051 return context;
1052 }
1053
1054 void dc_retain_state(struct dc_state *context)
1055 {
1056 kref_get(&context->refcount);
1057 }
1058
1059 static void dc_state_free(struct kref *kref)
1060 {
1061 struct dc_state *context = container_of(kref, struct dc_state, refcount);
1062 dc_resource_state_destruct(context);
1063 kfree(context);
1064 }
1065
1066 void dc_release_state(struct dc_state *context)
1067 {
1068 kref_put(&context->refcount, dc_state_free);
1069 }
1070
1071 static bool is_surface_in_context(
1072 const struct dc_state *context,
1073 const struct dc_plane_state *plane_state)
1074 {
1075 int j;
1076
1077 for (j = 0; j < MAX_PIPES; j++) {
1078 const struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
1079
1080 if (plane_state == pipe_ctx->plane_state) {
1081 return true;
1082 }
1083 }
1084
1085 return false;
1086 }
1087
1088 static unsigned int pixel_format_to_bpp(enum surface_pixel_format format)
1089 {
1090 switch (format) {
1091 case SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr:
1092 case SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb:
1093 return 12;
1094 case SURFACE_PIXEL_FORMAT_GRPH_ARGB1555:
1095 case SURFACE_PIXEL_FORMAT_GRPH_RGB565:
1096 case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCbCr:
1097 case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb:
1098 return 16;
1099 case SURFACE_PIXEL_FORMAT_GRPH_ARGB8888:
1100 case SURFACE_PIXEL_FORMAT_GRPH_ABGR8888:
1101 case SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010:
1102 case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010:
1103 return 32;
1104 case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
1105 case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F:
1106 case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F:
1107 return 64;
1108 default:
1109 ASSERT_CRITICAL(false);
1110 return -1;
1111 }
1112 }
1113
1114 static enum surface_update_type get_plane_info_update_type(
1115 const struct dc_surface_update *u,
1116 int surface_index)
1117 {
1118 struct dc_plane_info temp_plane_info;
1119 memset(&temp_plane_info, 0, sizeof(temp_plane_info));
1120
1121 if (!u->plane_info)
1122 return UPDATE_TYPE_FAST;
1123
1124 temp_plane_info = *u->plane_info;
1125
1126 /* Copy all parameters that will cause a full update
1127 * from current surface, the rest of the parameters
1128 * from provided plane configuration.
1129 * Perform memory compare and special validation
1130 * for those that can cause fast/medium updates
1131 */
1132
1133 /* Full update parameters */
1134 temp_plane_info.color_space = u->surface->color_space;
1135 temp_plane_info.dcc = u->surface->dcc;
1136 temp_plane_info.horizontal_mirror = u->surface->horizontal_mirror;
1137 temp_plane_info.plane_size = u->surface->plane_size;
1138 temp_plane_info.rotation = u->surface->rotation;
1139 temp_plane_info.stereo_format = u->surface->stereo_format;
1140
1141 if (surface_index == 0)
1142 temp_plane_info.visible = u->plane_info->visible;
1143 else
1144 temp_plane_info.visible = u->surface->visible;
1145
1146 if (memcmp(u->plane_info, &temp_plane_info,
1147 sizeof(struct dc_plane_info)) != 0)
1148 return UPDATE_TYPE_FULL;
1149
1150 if (pixel_format_to_bpp(u->plane_info->format) !=
1151 pixel_format_to_bpp(u->surface->format)) {
1152 /* different bytes per element will require full bandwidth
1153 * and DML calculation
1154 */
1155 return UPDATE_TYPE_FULL;
1156 }
1157
1158 if (memcmp(&u->plane_info->tiling_info, &u->surface->tiling_info,
1159 sizeof(union dc_tiling_info)) != 0) {
1160 /* todo: below are HW dependent, we should add a hook to
1161 * DCE/N resource and validated there.
1162 */
1163 if (u->plane_info->tiling_info.gfx9.swizzle != DC_SW_LINEAR) {
1164 /* swizzled mode requires RQ to be setup properly,
1165 * thus need to run DML to calculate RQ settings
1166 */
1167 return UPDATE_TYPE_FULL;
1168 }
1169 }
1170
1171 return UPDATE_TYPE_MED;
1172 }
1173
1174 static enum surface_update_type get_scaling_info_update_type(
1175 const struct dc_surface_update *u)
1176 {
1177 if (!u->scaling_info)
1178 return UPDATE_TYPE_FAST;
1179
1180 if (u->scaling_info->src_rect.width != u->surface->src_rect.width
1181 || u->scaling_info->src_rect.height != u->surface->src_rect.height
1182 || u->scaling_info->clip_rect.width != u->surface->clip_rect.width
1183 || u->scaling_info->clip_rect.height != u->surface->clip_rect.height
1184 || u->scaling_info->dst_rect.width != u->surface->dst_rect.width
1185 || u->scaling_info->dst_rect.height != u->surface->dst_rect.height)
1186 return UPDATE_TYPE_FULL;
1187
1188 if (u->scaling_info->src_rect.x != u->surface->src_rect.x
1189 || u->scaling_info->src_rect.y != u->surface->src_rect.y
1190 || u->scaling_info->clip_rect.x != u->surface->clip_rect.x
1191 || u->scaling_info->clip_rect.y != u->surface->clip_rect.y
1192 || u->scaling_info->dst_rect.x != u->surface->dst_rect.x
1193 || u->scaling_info->dst_rect.y != u->surface->dst_rect.y)
1194 return UPDATE_TYPE_MED;
1195
1196 return UPDATE_TYPE_FAST;
1197 }
1198
1199 static enum surface_update_type det_surface_update(
1200 const struct dc *dc,
1201 const struct dc_surface_update *u,
1202 int surface_index)
1203 {
1204 const struct dc_state *context = dc->current_state;
1205 enum surface_update_type type = UPDATE_TYPE_FAST;
1206 enum surface_update_type overall_type = UPDATE_TYPE_FAST;
1207
1208 if (!is_surface_in_context(context, u->surface))
1209 return UPDATE_TYPE_FULL;
1210
1211 type = get_plane_info_update_type(u, surface_index);
1212 if (overall_type < type)
1213 overall_type = type;
1214
1215 type = get_scaling_info_update_type(u);
1216 if (overall_type < type)
1217 overall_type = type;
1218
1219 if (u->in_transfer_func ||
1220 u->hdr_static_metadata) {
1221 if (overall_type < UPDATE_TYPE_MED)
1222 overall_type = UPDATE_TYPE_MED;
1223 }
1224
1225 return overall_type;
1226 }
1227
1228 enum surface_update_type dc_check_update_surfaces_for_stream(
1229 struct dc *dc,
1230 struct dc_surface_update *updates,
1231 int surface_count,
1232 struct dc_stream_update *stream_update,
1233 const struct dc_stream_status *stream_status)
1234 {
1235 int i;
1236 enum surface_update_type overall_type = UPDATE_TYPE_FAST;
1237
1238 if (stream_status == NULL || stream_status->plane_count != surface_count)
1239 return UPDATE_TYPE_FULL;
1240
1241 if (stream_update)
1242 return UPDATE_TYPE_FULL;
1243
1244 for (i = 0 ; i < surface_count; i++) {
1245 enum surface_update_type type =
1246 det_surface_update(dc, &updates[i], i);
1247
1248 if (type == UPDATE_TYPE_FULL)
1249 return type;
1250
1251 if (overall_type < type)
1252 overall_type = type;
1253 }
1254
1255 return overall_type;
1256 }
1257
1258 static struct dc_stream_status *stream_get_status(
1259 struct dc_state *ctx,
1260 struct dc_stream_state *stream)
1261 {
1262 uint8_t i;
1263
1264 for (i = 0; i < ctx->stream_count; i++) {
1265 if (stream == ctx->streams[i]) {
1266 return &ctx->stream_status[i];
1267 }
1268 }
1269
1270 return NULL;
1271 }
1272
1273 static const enum surface_update_type update_surface_trace_level = UPDATE_TYPE_FULL;
1274
1275
1276 static void commit_planes_for_stream(struct dc *dc,
1277 struct dc_surface_update *srf_updates,
1278 int surface_count,
1279 struct dc_stream_state *stream,
1280 struct dc_stream_update *stream_update,
1281 enum surface_update_type update_type,
1282 struct dc_state *context)
1283 {
1284 int i, j;
1285
1286 if (update_type == UPDATE_TYPE_FULL) {
1287 dc->hwss.set_bandwidth(dc, context, false);
1288 context_clock_trace(dc, context);
1289 }
1290
1291 if (update_type > UPDATE_TYPE_FAST) {
1292 for (j = 0; j < dc->res_pool->pipe_count; j++) {
1293 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
1294
1295 dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, pipe_ctx);
1296 }
1297 }
1298
1299 if (surface_count == 0) {
1300 /*
1301 * In case of turning off screen, no need to program front end a second time.
1302 * just return after program front end.
1303 */
1304 dc->hwss.apply_ctx_for_surface(dc, stream, surface_count, context);
1305 return;
1306 }
1307
1308 /* Lock pipes for provided surfaces, or all active if full update*/
1309 for (i = 0; i < surface_count; i++) {
1310 struct dc_plane_state *plane_state = srf_updates[i].surface;
1311
1312 for (j = 0; j < dc->res_pool->pipe_count; j++) {
1313 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
1314
1315 if (update_type != UPDATE_TYPE_FULL && pipe_ctx->plane_state != plane_state)
1316 continue;
1317 if (!pipe_ctx->plane_state || pipe_ctx->top_pipe)
1318 continue;
1319
1320 dc->hwss.pipe_control_lock(
1321 dc,
1322 pipe_ctx,
1323 true);
1324 }
1325 if (update_type == UPDATE_TYPE_FULL)
1326 break;
1327 }
1328
1329 /* Full fe update*/
1330 for (j = 0; j < dc->res_pool->pipe_count; j++) {
1331 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
1332
1333 if (update_type != UPDATE_TYPE_FULL || !pipe_ctx->plane_state)
1334 continue;
1335
1336 if (!pipe_ctx->top_pipe && pipe_ctx->stream) {
1337 struct dc_stream_status *stream_status = stream_get_status(context, pipe_ctx->stream);
1338
1339 dc->hwss.apply_ctx_for_surface(
1340 dc, pipe_ctx->stream, stream_status->plane_count, context);
1341 }
1342 }
1343
1344 if (update_type > UPDATE_TYPE_FAST)
1345 context_timing_trace(dc, &context->res_ctx);
1346
1347 /* Perform requested Updates */
1348 for (i = 0; i < surface_count; i++) {
1349 struct dc_plane_state *plane_state = srf_updates[i].surface;
1350
1351 if (update_type == UPDATE_TYPE_MED)
1352 dc->hwss.apply_ctx_for_surface(
1353 dc, stream, surface_count, context);
1354
1355 for (j = 0; j < dc->res_pool->pipe_count; j++) {
1356 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
1357
1358 if (pipe_ctx->plane_state != plane_state)
1359 continue;
1360
1361 if (srf_updates[i].flip_addr)
1362 dc->hwss.update_plane_addr(dc, pipe_ctx);
1363
1364 if (update_type == UPDATE_TYPE_FAST)
1365 continue;
1366
1367 /* work around to program degamma regs for split pipe after set mode. */
1368 if (srf_updates[i].in_transfer_func || (pipe_ctx->top_pipe &&
1369 pipe_ctx->top_pipe->plane_state == pipe_ctx->plane_state))
1370 dc->hwss.set_input_transfer_func(
1371 pipe_ctx, pipe_ctx->plane_state);
1372
1373 if (stream_update != NULL &&
1374 stream_update->out_transfer_func != NULL) {
1375 dc->hwss.set_output_transfer_func(
1376 pipe_ctx, pipe_ctx->stream);
1377 }
1378
1379 if (srf_updates[i].hdr_static_metadata) {
1380 resource_build_info_frame(pipe_ctx);
1381 dc->hwss.update_info_frame(pipe_ctx);
1382 }
1383 }
1384 }
1385
1386 /* Unlock pipes */
1387 for (i = dc->res_pool->pipe_count - 1; i >= 0; i--) {
1388 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1389
1390 for (j = 0; j < surface_count; j++) {
1391 if (update_type != UPDATE_TYPE_FULL &&
1392 srf_updates[j].surface != pipe_ctx->plane_state)
1393 continue;
1394 if (!pipe_ctx->plane_state || pipe_ctx->top_pipe)
1395 continue;
1396
1397 dc->hwss.pipe_control_lock(
1398 dc,
1399 pipe_ctx,
1400 false);
1401
1402 break;
1403 }
1404 }
1405 }
1406
1407 void dc_commit_updates_for_stream(struct dc *dc,
1408 struct dc_surface_update *srf_updates,
1409 int surface_count,
1410 struct dc_stream_state *stream,
1411 struct dc_stream_update *stream_update,
1412 struct dc_plane_state **plane_states,
1413 struct dc_state *state)
1414 {
1415 const struct dc_stream_status *stream_status;
1416 enum surface_update_type update_type;
1417 struct dc_state *context;
1418 struct dc_context *dc_ctx = dc->ctx;
1419 int i, j;
1420
1421 stream_status = dc_stream_get_status(stream);
1422 context = dc->current_state;
1423
1424 update_type = dc_check_update_surfaces_for_stream(
1425 dc, srf_updates, surface_count, stream_update, stream_status);
1426
1427 if (update_type >= update_surface_trace_level)
1428 update_surface_trace(dc, srf_updates, surface_count);
1429
1430
1431 if (update_type >= UPDATE_TYPE_FULL) {
1432
1433 /* initialize scratch memory for building context */
1434 context = dc_create_state();
1435 if (context == NULL) {
1436 DC_ERROR("Failed to allocate new validate context!\n");
1437 return;
1438 }
1439
1440 dc_resource_state_copy_construct(state, context);
1441 }
1442
1443
1444 for (i = 0; i < surface_count; i++) {
1445 struct dc_plane_state *surface = srf_updates[i].surface;
1446
1447 /* TODO: On flip we don't build the state, so it still has the
1448 * old address. Which is why we are updating the address here
1449 */
1450 if (srf_updates[i].flip_addr) {
1451 surface->address = srf_updates[i].flip_addr->address;
1452 surface->flip_immediate = srf_updates[i].flip_addr->flip_immediate;
1453
1454 }
1455
1456 if (update_type >= UPDATE_TYPE_MED) {
1457 for (j = 0; j < dc->res_pool->pipe_count; j++) {
1458 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
1459
1460 if (pipe_ctx->plane_state != surface)
1461 continue;
1462
1463 resource_build_scaling_params(pipe_ctx);
1464 }
1465 }
1466 }
1467
1468 commit_planes_for_stream(
1469 dc,
1470 srf_updates,
1471 surface_count,
1472 stream,
1473 stream_update,
1474 update_type,
1475 context);
1476
1477 if (update_type >= UPDATE_TYPE_FULL)
1478 dc_post_update_surfaces_to_stream(dc);
1479
1480 if (dc->current_state != context) {
1481
1482 struct dc_state *old = dc->current_state;
1483
1484 dc->current_state = context;
1485 dc_release_state(old);
1486
1487 }
1488
1489 return;
1490
1491 }
1492
1493 uint8_t dc_get_current_stream_count(struct dc *dc)
1494 {
1495 return dc->current_state->stream_count;
1496 }
1497
1498 struct dc_stream_state *dc_get_stream_at_index(struct dc *dc, uint8_t i)
1499 {
1500 if (i < dc->current_state->stream_count)
1501 return dc->current_state->streams[i];
1502 return NULL;
1503 }
1504
1505 enum dc_irq_source dc_interrupt_to_irq_source(
1506 struct dc *dc,
1507 uint32_t src_id,
1508 uint32_t ext_id)
1509 {
1510 return dal_irq_service_to_irq_source(dc->res_pool->irqs, src_id, ext_id);
1511 }
1512
1513 void dc_interrupt_set(struct dc *dc, enum dc_irq_source src, bool enable)
1514 {
1515
1516 if (dc == NULL)
1517 return;
1518
1519 dal_irq_service_set(dc->res_pool->irqs, src, enable);
1520 }
1521
1522 void dc_interrupt_ack(struct dc *dc, enum dc_irq_source src)
1523 {
1524 dal_irq_service_ack(dc->res_pool->irqs, src);
1525 }
1526
1527 void dc_set_power_state(
1528 struct dc *dc,
1529 enum dc_acpi_cm_power_state power_state)
1530 {
1531 struct kref refcount;
1532
1533 switch (power_state) {
1534 case DC_ACPI_CM_POWER_STATE_D0:
1535 dc_resource_state_construct(dc, dc->current_state);
1536
1537 dc->hwss.init_hw(dc);
1538 break;
1539 default:
1540
1541 dc->hwss.power_down(dc);
1542
1543 /* Zero out the current context so that on resume we start with
1544 * clean state, and dc hw programming optimizations will not
1545 * cause any trouble.
1546 */
1547
1548 /* Preserve refcount */
1549 refcount = dc->current_state->refcount;
1550 dc_resource_state_destruct(dc->current_state);
1551 memset(dc->current_state, 0,
1552 sizeof(*dc->current_state));
1553
1554 dc->current_state->refcount = refcount;
1555
1556 break;
1557 }
1558
1559 }
1560
1561 void dc_resume(struct dc *dc)
1562 {
1563
1564 uint32_t i;
1565
1566 for (i = 0; i < dc->link_count; i++)
1567 core_link_resume(dc->links[i]);
1568 }
1569
1570 bool dc_submit_i2c(
1571 struct dc *dc,
1572 uint32_t link_index,
1573 struct i2c_command *cmd)
1574 {
1575
1576 struct dc_link *link = dc->links[link_index];
1577 struct ddc_service *ddc = link->ddc;
1578
1579 return dal_i2caux_submit_i2c_command(
1580 ddc->ctx->i2caux,
1581 ddc->ddc_pin,
1582 cmd);
1583 }
1584
1585 static bool link_add_remote_sink_helper(struct dc_link *dc_link, struct dc_sink *sink)
1586 {
1587 if (dc_link->sink_count >= MAX_SINKS_PER_LINK) {
1588 BREAK_TO_DEBUGGER();
1589 return false;
1590 }
1591
1592 dc_sink_retain(sink);
1593
1594 dc_link->remote_sinks[dc_link->sink_count] = sink;
1595 dc_link->sink_count++;
1596
1597 return true;
1598 }
1599
1600 struct dc_sink *dc_link_add_remote_sink(
1601 struct dc_link *link,
1602 const uint8_t *edid,
1603 int len,
1604 struct dc_sink_init_data *init_data)
1605 {
1606 struct dc_sink *dc_sink;
1607 enum dc_edid_status edid_status;
1608
1609 if (len > MAX_EDID_BUFFER_SIZE) {
1610 dm_error("Max EDID buffer size breached!\n");
1611 return NULL;
1612 }
1613
1614 if (!init_data) {
1615 BREAK_TO_DEBUGGER();
1616 return NULL;
1617 }
1618
1619 if (!init_data->link) {
1620 BREAK_TO_DEBUGGER();
1621 return NULL;
1622 }
1623
1624 dc_sink = dc_sink_create(init_data);
1625
1626 if (!dc_sink)
1627 return NULL;
1628
1629 memmove(dc_sink->dc_edid.raw_edid, edid, len);
1630 dc_sink->dc_edid.length = len;
1631
1632 if (!link_add_remote_sink_helper(
1633 link,
1634 dc_sink))
1635 goto fail_add_sink;
1636
1637 edid_status = dm_helpers_parse_edid_caps(
1638 link->ctx,
1639 &dc_sink->dc_edid,
1640 &dc_sink->edid_caps);
1641
1642 if (edid_status != EDID_OK)
1643 goto fail;
1644
1645 return dc_sink;
1646 fail:
1647 dc_link_remove_remote_sink(link, dc_sink);
1648 fail_add_sink:
1649 dc_sink_release(dc_sink);
1650 return NULL;
1651 }
1652
1653 void dc_link_remove_remote_sink(struct dc_link *link, struct dc_sink *sink)
1654 {
1655 int i;
1656
1657 if (!link->sink_count) {
1658 BREAK_TO_DEBUGGER();
1659 return;
1660 }
1661
1662 for (i = 0; i < link->sink_count; i++) {
1663 if (link->remote_sinks[i] == sink) {
1664 dc_sink_release(sink);
1665 link->remote_sinks[i] = NULL;
1666
1667 /* shrink array to remove empty place */
1668 while (i < link->sink_count - 1) {
1669 link->remote_sinks[i] = link->remote_sinks[i+1];
1670 i++;
1671 }
1672 link->remote_sinks[i] = NULL;
1673 link->sink_count--;
1674 return;
1675 }
1676 }
1677 }