]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/gpu/drm/amd/display/dc/core/dc.c
drm/amd/dc: Add dc display driver (v2)
[mirror_ubuntu-bionic-kernel.git] / drivers / gpu / drm / amd / display / dc / core / dc.c
CommitLineData
4562236b
HW
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 */
24
25#include "dm_services.h"
26
27#include "dc.h"
28
29#include "core_status.h"
30#include "core_types.h"
31#include "hw_sequencer.h"
32
33#include "resource.h"
34
35#include "clock_source.h"
36#include "dc_bios_types.h"
37
38#include "bandwidth_calcs.h"
39#include "bios_parser_interface.h"
40#include "include/irq_service_interface.h"
41#include "transform.h"
42#include "timing_generator.h"
43#include "virtual/virtual_link_encoder.h"
44
45#include "link_hwss.h"
46#include "link_encoder.h"
47
48#include "dc_link_ddc.h"
49#include "dm_helpers.h"
50#include "mem_input.h"
51
52/*******************************************************************************
53 * Private structures
54 ******************************************************************************/
55
56struct dc_target_sync_report {
57 uint32_t h_count;
58 uint32_t v_count;
59};
60
61/*******************************************************************************
62 * Private functions
63 ******************************************************************************/
64static void destroy_links(struct core_dc *dc)
65{
66 uint32_t i;
67
68 for (i = 0; i < dc->link_count; i++) {
69 if (NULL != dc->links[i])
70 link_destroy(&dc->links[i]);
71 }
72}
73
74static bool create_links(
75 struct core_dc *dc,
76 uint32_t num_virtual_links)
77{
78 int i;
79 int connectors_num;
80 struct dc_bios *bios = dc->ctx->dc_bios;
81
82 dc->link_count = 0;
83
84 connectors_num = bios->funcs->get_connectors_number(bios);
85
86 if (connectors_num > ENUM_ID_COUNT) {
87 dm_error(
88 "DC: Number of connectors %d exceeds maximum of %d!\n",
89 connectors_num,
90 ENUM_ID_COUNT);
91 return false;
92 }
93
94 if (connectors_num == 0 && num_virtual_links == 0) {
95 dm_error("DC: Number of connectors is zero!\n");
96 }
97
98 dm_output_to_console(
99 "DC: %s: connectors_num: physical:%d, virtual:%d\n",
100 __func__,
101 connectors_num,
102 num_virtual_links);
103
104 for (i = 0; i < connectors_num; i++) {
105 struct link_init_data link_init_params = {0};
106 struct core_link *link;
107
108 link_init_params.ctx = dc->ctx;
109 link_init_params.connector_index = i;
110 link_init_params.link_index = dc->link_count;
111 link_init_params.dc = dc;
112 link = link_create(&link_init_params);
113
114 if (link) {
115 dc->links[dc->link_count] = link;
116 link->dc = dc;
117 ++dc->link_count;
118 } else {
119 dm_error("DC: failed to create link!\n");
120 }
121 }
122
123 for (i = 0; i < num_virtual_links; i++) {
124 struct core_link *link = dm_alloc(sizeof(*link));
125 struct encoder_init_data enc_init = {0};
126
127 if (link == NULL) {
128 BREAK_TO_DEBUGGER();
129 goto failed_alloc;
130 }
131
132 link->ctx = dc->ctx;
133 link->dc = dc;
134 link->public.connector_signal = SIGNAL_TYPE_VIRTUAL;
135 link->link_id.type = OBJECT_TYPE_CONNECTOR;
136 link->link_id.id = CONNECTOR_ID_VIRTUAL;
137 link->link_id.enum_id = ENUM_ID_1;
138 link->link_enc = dm_alloc(sizeof(*link->link_enc));
139
140 enc_init.ctx = dc->ctx;
141 enc_init.channel = CHANNEL_ID_UNKNOWN;
142 enc_init.hpd_source = HPD_SOURCEID_UNKNOWN;
143 enc_init.transmitter = TRANSMITTER_UNKNOWN;
144 enc_init.connector = link->link_id;
145 enc_init.encoder.type = OBJECT_TYPE_ENCODER;
146 enc_init.encoder.id = ENCODER_ID_INTERNAL_VIRTUAL;
147 enc_init.encoder.enum_id = ENUM_ID_1;
148 virtual_link_encoder_construct(link->link_enc, &enc_init);
149
150 link->public.link_index = dc->link_count;
151 dc->links[dc->link_count] = link;
152 dc->link_count++;
153 }
154
155 return true;
156
157failed_alloc:
158 return false;
159}
160
161static bool stream_adjust_vmin_vmax(struct dc *dc,
162 const struct dc_stream **stream, int num_streams,
163 int vmin, int vmax)
164{
165 /* TODO: Support multiple streams */
166 struct core_dc *core_dc = DC_TO_CORE(dc);
167 struct core_stream *core_stream = DC_STREAM_TO_CORE(stream[0]);
168 int i = 0;
169 bool ret = false;
170 struct pipe_ctx *pipes;
171 unsigned int underlay_idx = core_dc->res_pool->underlay_pipe_index;
172
173 for (i = 0; i < MAX_PIPES; i++) {
174 if (core_dc->current_context->res_ctx.pipe_ctx[i].stream == core_stream
175 && i != underlay_idx) {
176
177 pipes = &core_dc->current_context->res_ctx.pipe_ctx[i];
178 core_dc->hwss.set_drr(&pipes, 1, vmin, vmax);
179
180 /* build and update the info frame */
181 resource_build_info_frame(pipes);
182 core_dc->hwss.update_info_frame(pipes);
183
184 ret = true;
185 }
186 }
187
188 return ret;
189}
190
191
192static bool set_gamut_remap(struct dc *dc,
193 const struct dc_stream **stream, int num_streams)
194{
195 struct core_dc *core_dc = DC_TO_CORE(dc);
196 struct core_stream *core_stream = DC_STREAM_TO_CORE(stream[0]);
197 int i = 0;
198 bool ret = false;
199 struct pipe_ctx *pipes;
200
201 for (i = 0; i < MAX_PIPES; i++) {
202 if (core_dc->current_context->res_ctx.pipe_ctx[i].stream
203 == core_stream) {
204
205 pipes = &core_dc->current_context->res_ctx.pipe_ctx[i];
206 core_dc->hwss.set_plane_config(core_dc, pipes,
207 &core_dc->current_context->res_ctx);
208 ret = true;
209 }
210 }
211
212 return ret;
213}
214
215/* This function is not expected to fail, proper implementation of
216 * validation will prevent this from ever being called for unsupported
217 * configurations.
218 */
219static void stream_update_scaling(
220 const struct dc *dc,
221 const struct dc_stream *dc_stream,
222 const struct rect *src,
223 const struct rect *dst)
224{
225 struct core_stream *stream = DC_STREAM_TO_CORE(dc_stream);
226 struct core_dc *core_dc = DC_TO_CORE(dc);
227 struct validate_context *cur_ctx = core_dc->current_context;
228 int i, j;
229
230 if (src)
231 stream->public.src = *src;
232
233 if (dst)
234 stream->public.dst = *dst;
235
236 for (i = 0; i < cur_ctx->target_count; i++) {
237 struct core_target *target = cur_ctx->targets[i];
238 struct dc_target_status *status = &cur_ctx->target_status[i];
239
240 for (j = 0; j < target->public.stream_count; j++) {
241 if (target->public.streams[j] != dc_stream)
242 continue;
243
244 if (status->surface_count)
245 if (!dc_commit_surfaces_to_target(
246 &core_dc->public,
247 status->surfaces,
248 status->surface_count,
249 &target->public))
250 /* Need to debug validation */
251 BREAK_TO_DEBUGGER();
252
253 return;
254 }
255 }
256}
257
258static bool set_backlight(struct dc *dc, unsigned int backlight_level,
259 unsigned int frame_ramp, const struct dc_stream *stream)
260{
261 struct core_dc *core_dc = DC_TO_CORE(dc);
262 int i;
263
264 if (stream->sink->sink_signal == SIGNAL_TYPE_EDP) {
265 for (i = 0; i < core_dc->link_count; i++)
266 dc_link_set_backlight_level(&core_dc->links[i]->public,
267 backlight_level, frame_ramp, stream);
268 }
269
270 return true;
271
272}
273
274static bool init_dmcu_backlight_settings(struct dc *dc)
275{
276 struct core_dc *core_dc = DC_TO_CORE(dc);
277 int i;
278
279 for (i = 0; i < core_dc->link_count; i++)
280 dc_link_init_dmcu_backlight_settings
281 (&core_dc->links[i]->public);
282
283 return true;
284}
285
286
287static bool set_abm_level(struct dc *dc, unsigned int abm_level)
288{
289 struct core_dc *core_dc = DC_TO_CORE(dc);
290 int i;
291
292 for (i = 0; i < core_dc->link_count; i++)
293 dc_link_set_abm_level(&core_dc->links[i]->public,
294 abm_level);
295
296 return true;
297}
298
299static bool set_psr_enable(struct dc *dc, bool enable)
300{
301 struct core_dc *core_dc = DC_TO_CORE(dc);
302 int i;
303
304 for (i = 0; i < core_dc->link_count; i++)
305 dc_link_set_psr_enable(&core_dc->links[i]->public,
306 enable);
307
308 return true;
309}
310
311
312static bool setup_psr(struct dc *dc, const struct dc_stream *stream)
313{
314 struct core_dc *core_dc = DC_TO_CORE(dc);
315 struct core_stream *core_stream = DC_STREAM_TO_CORE(stream);
316 struct pipe_ctx *pipes;
317 int i;
318 unsigned int underlay_idx = core_dc->res_pool->underlay_pipe_index;
319
320 for (i = 0; i < core_dc->link_count; i++) {
321 if (core_stream->sink->link == core_dc->links[i])
322 dc_link_setup_psr(&core_dc->links[i]->public,
323 stream);
324 }
325
326 for (i = 0; i < MAX_PIPES; i++) {
327 if (core_dc->current_context->res_ctx.pipe_ctx[i].stream
328 == core_stream && i != underlay_idx) {
329 pipes = &core_dc->current_context->res_ctx.pipe_ctx[i];
330 core_dc->hwss.set_static_screen_control(&pipes, 1,
331 0x182);
332 }
333 }
334
335 return true;
336}
337
338static void set_drive_settings(struct dc *dc,
339 struct link_training_settings *lt_settings)
340{
341 struct core_dc *core_dc = DC_TO_CORE(dc);
342 int i;
343
344 for (i = 0; i < core_dc->link_count; i++)
345 dc_link_dp_set_drive_settings(&core_dc->links[i]->public,
346 lt_settings);
347}
348
349static void perform_link_training(struct dc *dc,
350 struct dc_link_settings *link_setting,
351 bool skip_video_pattern)
352{
353 struct core_dc *core_dc = DC_TO_CORE(dc);
354 int i;
355
356 for (i = 0; i < core_dc->link_count; i++)
357 dc_link_dp_perform_link_training(
358 &core_dc->links[i]->public,
359 link_setting,
360 skip_video_pattern);
361}
362
363static void set_preferred_link_settings(struct dc *dc,
364 struct dc_link_settings *link_setting)
365{
366 struct core_dc *core_dc = DC_TO_CORE(dc);
367 int i;
368
369 for (i = 0; i < core_dc->link_count; i++) {
370 core_dc->links[i]->public.verified_link_cap.lane_count =
371 link_setting->lane_count;
372 core_dc->links[i]->public.verified_link_cap.link_rate =
373 link_setting->link_rate;
374 }
375}
376
377static void enable_hpd(const struct dc_link *link)
378{
379 dc_link_dp_enable_hpd(link);
380}
381
382static void disable_hpd(const struct dc_link *link)
383{
384 dc_link_dp_disable_hpd(link);
385}
386
387
388static void set_test_pattern(
389 const struct dc_link *link,
390 enum dp_test_pattern test_pattern,
391 const struct link_training_settings *p_link_settings,
392 const unsigned char *p_custom_pattern,
393 unsigned int cust_pattern_size)
394{
395 if (link != NULL)
396 dc_link_dp_set_test_pattern(
397 link,
398 test_pattern,
399 p_link_settings,
400 p_custom_pattern,
401 cust_pattern_size);
402}
403
404static void allocate_dc_stream_funcs(struct core_dc *core_dc)
405{
406 core_dc->public.stream_funcs.stream_update_scaling = stream_update_scaling;
407 if (core_dc->hwss.set_drr != NULL) {
408 core_dc->public.stream_funcs.adjust_vmin_vmax =
409 stream_adjust_vmin_vmax;
410 }
411
412 core_dc->public.stream_funcs.set_gamut_remap =
413 set_gamut_remap;
414
415 core_dc->public.stream_funcs.set_backlight =
416 set_backlight;
417
418 core_dc->public.stream_funcs.init_dmcu_backlight_settings =
419 init_dmcu_backlight_settings;
420
421 core_dc->public.stream_funcs.set_abm_level =
422 set_abm_level;
423
424 core_dc->public.stream_funcs.set_psr_enable =
425 set_psr_enable;
426
427 core_dc->public.stream_funcs.setup_psr =
428 setup_psr;
429
430 core_dc->public.link_funcs.set_drive_settings =
431 set_drive_settings;
432
433 core_dc->public.link_funcs.perform_link_training =
434 perform_link_training;
435
436 core_dc->public.link_funcs.set_preferred_link_settings =
437 set_preferred_link_settings;
438
439 core_dc->public.link_funcs.enable_hpd =
440 enable_hpd;
441
442 core_dc->public.link_funcs.disable_hpd =
443 disable_hpd;
444
445 core_dc->public.link_funcs.set_test_pattern =
446 set_test_pattern;
447}
448
449static void destruct(struct core_dc *dc)
450{
451 resource_validate_ctx_destruct(dc->current_context);
452
453 dm_free(dc->temp_flip_context);
454 dc->temp_flip_context = NULL;
455
456 destroy_links(dc);
457
458 dc_destroy_resource_pool(dc);
459
460 if (dc->ctx->gpio_service)
461 dal_gpio_service_destroy(&dc->ctx->gpio_service);
462
463 if (dc->ctx->i2caux)
464 dal_i2caux_destroy(&dc->ctx->i2caux);
465
466 if (dc->ctx->created_bios)
467 dal_bios_parser_destroy(&dc->ctx->dc_bios);
468
469 if (dc->ctx->logger)
470 dal_logger_destroy(&dc->ctx->logger);
471
472 dm_free(dc->current_context);
473 dc->current_context = NULL;
474
475 dm_free(dc->ctx);
476 dc->ctx = NULL;
477}
478
479static bool construct(struct core_dc *dc,
480 const struct dc_init_data *init_params)
481{
482 struct dal_logger *logger;
483 struct dc_context *dc_ctx = dm_alloc(sizeof(*dc_ctx));
484 enum dce_version dc_version = DCE_VERSION_UNKNOWN;
485
486 if (!dc_ctx) {
487 dm_error("%s: failed to create ctx\n", __func__);
488 goto ctx_fail;
489 }
490
491 dc->current_context = dm_alloc(sizeof(*dc->current_context));
492 dc->temp_flip_context = dm_alloc(sizeof(*dc->temp_flip_context));
493
494 if (!dc->current_context || !dc->temp_flip_context) {
495 dm_error("%s: failed to create validate ctx\n", __func__);
496 goto val_ctx_fail;
497 }
498
499 dc_ctx->cgs_device = init_params->cgs_device;
500 dc_ctx->driver_context = init_params->driver;
501 dc_ctx->dc = &dc->public;
502 dc_ctx->asic_id = init_params->asic_id;
503
504 /* Create logger */
505 logger = dal_logger_create(dc_ctx);
506
507 if (!logger) {
508 /* can *not* call logger. call base driver 'print error' */
509 dm_error("%s: failed to create Logger!\n", __func__);
510 goto logger_fail;
511 }
512 dc_ctx->logger = logger;
513 dc->ctx = dc_ctx;
514 dc->ctx->dce_environment = init_params->dce_environment;
515
516 dc_version = resource_parse_asic_id(init_params->asic_id);
517 dc->ctx->dce_version = dc_version;
518
519 /* Resource should construct all asic specific resources.
520 * This should be the only place where we need to parse the asic id
521 */
522 if (init_params->vbios_override)
523 dc_ctx->dc_bios = init_params->vbios_override;
524 else {
525 /* Create BIOS parser */
526 struct bp_init_data bp_init_data;
527 bp_init_data.ctx = dc_ctx;
528 bp_init_data.bios = init_params->asic_id.atombios_base_address;
529
530 dc_ctx->dc_bios = dal_bios_parser_create(
531 &bp_init_data, dc_version);
532
533 if (!dc_ctx->dc_bios) {
534 ASSERT_CRITICAL(false);
535 goto bios_fail;
536 }
537
538 dc_ctx->created_bios = true;
539 }
540
541 /* Create I2C AUX */
542 dc_ctx->i2caux = dal_i2caux_create(dc_ctx);
543
544 if (!dc_ctx->i2caux) {
545 ASSERT_CRITICAL(false);
546 goto failed_to_create_i2caux;
547 }
548
549 /* Create GPIO service */
550 dc_ctx->gpio_service = dal_gpio_service_create(
551 dc_version,
552 dc_ctx->dce_environment,
553 dc_ctx);
554
555 if (!dc_ctx->gpio_service) {
556 ASSERT_CRITICAL(false);
557 goto gpio_fail;
558 }
559
560 dc->res_pool = dc_create_resource_pool(
561 dc,
562 init_params->num_virtual_links,
563 dc_version,
564 init_params->asic_id);
565 if (!dc->res_pool)
566 goto create_resource_fail;
567
568 if (!create_links(dc, init_params->num_virtual_links))
569 goto create_links_fail;
570
571 allocate_dc_stream_funcs(dc);
572
573 return true;
574
575 /**** error handling here ****/
576create_links_fail:
577create_resource_fail:
578gpio_fail:
579failed_to_create_i2caux:
580bios_fail:
581logger_fail:
582val_ctx_fail:
583ctx_fail:
584 destruct(dc);
585 return false;
586}
587
588/*
589void ProgramPixelDurationV(unsigned int pixelClockInKHz )
590{
591 fixed31_32 pixel_duration = Fixed31_32(100000000, pixelClockInKHz) * 10;
592 unsigned int pixDurationInPico = round(pixel_duration);
593
594 DPG_PIPE_ARBITRATION_CONTROL1 arb_control;
595
596 arb_control.u32All = ReadReg (mmDPGV0_PIPE_ARBITRATION_CONTROL1);
597 arb_control.bits.PIXEL_DURATION = pixDurationInPico;
598 WriteReg (mmDPGV0_PIPE_ARBITRATION_CONTROL1, arb_control.u32All);
599
600 arb_control.u32All = ReadReg (mmDPGV1_PIPE_ARBITRATION_CONTROL1);
601 arb_control.bits.PIXEL_DURATION = pixDurationInPico;
602 WriteReg (mmDPGV1_PIPE_ARBITRATION_CONTROL1, arb_control.u32All);
603
604 WriteReg (mmDPGV0_PIPE_ARBITRATION_CONTROL2, 0x4000800);
605 WriteReg (mmDPGV0_REPEATER_PROGRAM, 0x11);
606
607 WriteReg (mmDPGV1_PIPE_ARBITRATION_CONTROL2, 0x4000800);
608 WriteReg (mmDPGV1_REPEATER_PROGRAM, 0x11);
609}
610*/
611
612/*******************************************************************************
613 * Public functions
614 ******************************************************************************/
615
616struct dc *dc_create(const struct dc_init_data *init_params)
617 {
618 struct core_dc *core_dc = dm_alloc(sizeof(*core_dc));
619 unsigned int full_pipe_count;
620
621 if (NULL == core_dc)
622 goto alloc_fail;
623
624 if (false == construct(core_dc, init_params))
625 goto construct_fail;
626
627 /*TODO: separate HW and SW initialization*/
628 core_dc->hwss.init_hw(core_dc);
629
630 full_pipe_count = core_dc->res_pool->pipe_count;
631 if (core_dc->res_pool->underlay_pipe_index >= 0)
632 full_pipe_count--;
633 core_dc->public.caps.max_targets = dm_min(
634 full_pipe_count,
635 core_dc->res_pool->stream_enc_count);
636
637 core_dc->public.caps.max_links = core_dc->link_count;
638 core_dc->public.caps.max_audios = core_dc->res_pool->audio_count;
639
640 core_dc->public.config = init_params->flags;
641
642 dm_logger_write(core_dc->ctx->logger, LOG_DC,
643 "Display Core initialized\n");
644
645
646 /* TODO: missing feature to be enabled */
647 core_dc->public.debug.disable_dfs_bypass = true;
648
649 return &core_dc->public;
650
651construct_fail:
652 dm_free(core_dc);
653
654alloc_fail:
655 return NULL;
656}
657
658void dc_destroy(struct dc **dc)
659{
660 struct core_dc *core_dc = DC_TO_CORE(*dc);
661 destruct(core_dc);
662 dm_free(core_dc);
663 *dc = NULL;
664}
665
666static bool is_validation_required(
667 const struct core_dc *dc,
668 const struct dc_validation_set set[],
669 int set_count)
670{
671 const struct validate_context *context = dc->current_context;
672 int i, j;
673
674 if (context->target_count != set_count)
675 return true;
676
677 for (i = 0; i < set_count; i++) {
678
679 if (set[i].surface_count != context->target_status[i].surface_count)
680 return true;
681 if (!is_target_unchanged(DC_TARGET_TO_CORE(set[i].target), context->targets[i]))
682 return true;
683
684 for (j = 0; j < set[i].surface_count; j++) {
685 struct dc_surface temp_surf = { 0 };
686
687 temp_surf = *context->target_status[i].surfaces[j];
688 temp_surf.clip_rect = set[i].surfaces[j]->clip_rect;
689 temp_surf.dst_rect.x = set[i].surfaces[j]->dst_rect.x;
690 temp_surf.dst_rect.y = set[i].surfaces[j]->dst_rect.y;
691
692 if (memcmp(&temp_surf, set[i].surfaces[j], sizeof(temp_surf)) != 0)
693 return true;
694 }
695 }
696
697 return false;
698}
699
700bool dc_validate_resources(
701 const struct dc *dc,
702 const struct dc_validation_set set[],
703 uint8_t set_count)
704{
705 struct core_dc *core_dc = DC_TO_CORE(dc);
706 enum dc_status result = DC_ERROR_UNEXPECTED;
707 struct validate_context *context;
708
709 if (!is_validation_required(core_dc, set, set_count))
710 return true;
711
712 context = dm_alloc(sizeof(struct validate_context));
713 if(context == NULL)
714 goto context_alloc_fail;
715
716 result = core_dc->res_pool->funcs->validate_with_context(
717 core_dc, set, set_count, context);
718
719 resource_validate_ctx_destruct(context);
720 dm_free(context);
721
722context_alloc_fail:
723 if (result != DC_OK) {
724 dm_logger_write(core_dc->ctx->logger, LOG_WARNING,
725 "%s:resource validation failed, dc_status:%d\n",
726 __func__,
727 result);
728 }
729
730 return (result == DC_OK);
731
732}
733
734bool dc_validate_guaranteed(
735 const struct dc *dc,
736 const struct dc_target *dc_target)
737{
738 struct core_dc *core_dc = DC_TO_CORE(dc);
739 enum dc_status result = DC_ERROR_UNEXPECTED;
740 struct validate_context *context;
741
742 context = dm_alloc(sizeof(struct validate_context));
743 if (context == NULL)
744 goto context_alloc_fail;
745
746 result = core_dc->res_pool->funcs->validate_guaranteed(
747 core_dc, dc_target, context);
748
749 resource_validate_ctx_destruct(context);
750 dm_free(context);
751
752context_alloc_fail:
753 if (result != DC_OK) {
754 dm_logger_write(core_dc->ctx->logger, LOG_WARNING,
755 "%s:guaranteed validation failed, dc_status:%d\n",
756 __func__,
757 result);
758 }
759
760 return (result == DC_OK);
761}
762
763static void program_timing_sync(
764 struct core_dc *core_dc,
765 struct validate_context *ctx)
766{
767 int i, j;
768 int group_index = 0;
769 int pipe_count = ctx->res_ctx.pool->pipe_count;
770 struct pipe_ctx *unsynced_pipes[MAX_PIPES] = { NULL };
771
772 for (i = 0; i < pipe_count; i++) {
773 if (!ctx->res_ctx.pipe_ctx[i].stream || ctx->res_ctx.pipe_ctx[i].top_pipe)
774 continue;
775
776 unsynced_pipes[i] = &ctx->res_ctx.pipe_ctx[i];
777 }
778
779 for (i = 0; i < pipe_count; i++) {
780 int group_size = 1;
781 struct pipe_ctx *pipe_set[MAX_PIPES];
782
783 if (!unsynced_pipes[i])
784 continue;
785
786 pipe_set[0] = unsynced_pipes[i];
787 unsynced_pipes[i] = NULL;
788
789 /* Add tg to the set, search rest of the tg's for ones with
790 * same timing, add all tgs with same timing to the group
791 */
792 for (j = i + 1; j < pipe_count; j++) {
793 if (!unsynced_pipes[j])
794 continue;
795
796 if (resource_are_streams_timing_synchronizable(
797 unsynced_pipes[j]->stream,
798 pipe_set[0]->stream)) {
799 pipe_set[group_size] = unsynced_pipes[j];
800 unsynced_pipes[j] = NULL;
801 group_size++;
802 }
803 }
804
805 /* set first unblanked pipe as master */
806 for (j = 0; j < group_size; j++) {
807 struct pipe_ctx *temp;
808
809 if (!pipe_set[j]->tg->funcs->is_blanked(pipe_set[j]->tg)) {
810 if (j == 0)
811 break;
812
813 temp = pipe_set[0];
814 pipe_set[0] = pipe_set[j];
815 pipe_set[j] = temp;
816 break;
817 }
818 }
819
820 /* remove any other unblanked pipes as they have already been synced */
821 for (j = j + 1; j < group_size; j++) {
822 if (!pipe_set[j]->tg->funcs->is_blanked(pipe_set[j]->tg)) {
823 group_size--;
824 pipe_set[j] = pipe_set[group_size];
825 j--;
826 }
827 }
828
829 if (group_size > 1) {
830 core_dc->hwss.enable_timing_synchronization(
831 core_dc, group_index, group_size, pipe_set);
832 group_index++;
833 }
834 }
835}
836
837static bool targets_changed(
838 struct core_dc *dc,
839 struct dc_target *targets[],
840 uint8_t target_count)
841{
842 uint8_t i;
843
844 if (target_count != dc->current_context->target_count)
845 return true;
846
847 for (i = 0; i < dc->current_context->target_count; i++) {
848 if (&dc->current_context->targets[i]->public != targets[i])
849 return true;
850 }
851
852 return false;
853}
854
855static void fill_display_configs(
856 const struct validate_context *context,
857 struct dm_pp_display_configuration *pp_display_cfg)
858{
859 uint8_t i, j, k;
860 uint8_t num_cfgs = 0;
861
862 for (i = 0; i < context->target_count; i++) {
863 const struct core_target *target = context->targets[i];
864
865 for (j = 0; j < target->public.stream_count; j++) {
866 const struct core_stream *stream =
867 DC_STREAM_TO_CORE(target->public.streams[j]);
868 struct dm_pp_single_disp_config *cfg =
869 &pp_display_cfg->disp_configs[num_cfgs];
870 const struct pipe_ctx *pipe_ctx = NULL;
871
872 for (k = 0; k < MAX_PIPES; k++)
873 if (stream ==
874 context->res_ctx.pipe_ctx[k].stream) {
875 pipe_ctx = &context->res_ctx.pipe_ctx[k];
876 break;
877 }
878
879 ASSERT(pipe_ctx != NULL);
880
881 num_cfgs++;
882 cfg->signal = pipe_ctx->stream->signal;
883 cfg->pipe_idx = pipe_ctx->pipe_idx;
884 cfg->src_height = stream->public.src.height;
885 cfg->src_width = stream->public.src.width;
886 cfg->ddi_channel_mapping =
887 stream->sink->link->ddi_channel_mapping.raw;
888 cfg->transmitter =
889 stream->sink->link->link_enc->transmitter;
890 cfg->link_settings.lane_count = stream->sink->link->public.cur_link_settings.lane_count;
891 cfg->link_settings.link_rate = stream->sink->link->public.cur_link_settings.link_rate;
892 cfg->link_settings.link_spread = stream->sink->link->public.cur_link_settings.link_spread;
893 cfg->sym_clock = stream->phy_pix_clk;
894 /* Round v_refresh*/
895 cfg->v_refresh = stream->public.timing.pix_clk_khz * 1000;
896 cfg->v_refresh /= stream->public.timing.h_total;
897 cfg->v_refresh = (cfg->v_refresh + stream->public.timing.v_total / 2)
898 / stream->public.timing.v_total;
899 }
900 }
901 pp_display_cfg->display_count = num_cfgs;
902}
903
904static uint32_t get_min_vblank_time_us(const struct validate_context *context)
905{
906 uint8_t i, j;
907 uint32_t min_vertical_blank_time = -1;
908
909 for (i = 0; i < context->target_count; i++) {
910 const struct core_target *target = context->targets[i];
911
912 for (j = 0; j < target->public.stream_count; j++) {
913 const struct dc_stream *stream =
914 target->public.streams[j];
915 uint32_t vertical_blank_in_pixels = 0;
916 uint32_t vertical_blank_time = 0;
917
918 vertical_blank_in_pixels = stream->timing.h_total *
919 (stream->timing.v_total
920 - stream->timing.v_addressable);
921 vertical_blank_time = vertical_blank_in_pixels
922 * 1000 / stream->timing.pix_clk_khz;
923 if (min_vertical_blank_time > vertical_blank_time)
924 min_vertical_blank_time = vertical_blank_time;
925 }
926 }
927 return min_vertical_blank_time;
928}
929
930static int determine_sclk_from_bounding_box(
931 const struct core_dc *dc,
932 int required_sclk)
933{
934 int i;
935
936 /*
937 * Some asics do not give us sclk levels, so we just report the actual
938 * required sclk
939 */
940 if (dc->sclk_lvls.num_levels == 0)
941 return required_sclk;
942
943 for (i = 0; i < dc->sclk_lvls.num_levels; i++) {
944 if (dc->sclk_lvls.clocks_in_khz[i] >= required_sclk)
945 return dc->sclk_lvls.clocks_in_khz[i];
946 }
947 /*
948 * even maximum level could not satisfy requirement, this
949 * is unexpected at this stage, should have been caught at
950 * validation time
951 */
952 ASSERT(0);
953 return dc->sclk_lvls.clocks_in_khz[dc->sclk_lvls.num_levels - 1];
954}
955
956void pplib_apply_display_requirements(
957 struct core_dc *dc,
958 const struct validate_context *context,
959 struct dm_pp_display_configuration *pp_display_cfg)
960{
961 pp_display_cfg->all_displays_in_sync =
962 context->bw_results.all_displays_in_sync;
963 pp_display_cfg->nb_pstate_switch_disable =
964 context->bw_results.nbp_state_change_enable == false;
965 pp_display_cfg->cpu_cc6_disable =
966 context->bw_results.cpuc_state_change_enable == false;
967 pp_display_cfg->cpu_pstate_disable =
968 context->bw_results.cpup_state_change_enable == false;
969 pp_display_cfg->cpu_pstate_separation_time =
970 context->bw_results.blackout_recovery_time_us;
971
972 pp_display_cfg->min_memory_clock_khz = context->bw_results.required_yclk
973 / MEMORY_TYPE_MULTIPLIER;
974
975 pp_display_cfg->min_engine_clock_khz = determine_sclk_from_bounding_box(
976 dc,
977 context->bw_results.required_sclk);
978
979 pp_display_cfg->min_engine_clock_deep_sleep_khz
980 = context->bw_results.required_sclk_deep_sleep;
981
982 pp_display_cfg->avail_mclk_switch_time_us =
983 get_min_vblank_time_us(context);
984 /* TODO: dce11.2*/
985 pp_display_cfg->avail_mclk_switch_time_in_disp_active_us = 0;
986
987 pp_display_cfg->disp_clk_khz = context->bw_results.dispclk_khz;
988
989 fill_display_configs(context, pp_display_cfg);
990
991 /* TODO: is this still applicable?*/
992 if (pp_display_cfg->display_count == 1) {
993 const struct dc_crtc_timing *timing =
994 &context->targets[0]->public.streams[0]->timing;
995
996 pp_display_cfg->crtc_index =
997 pp_display_cfg->disp_configs[0].pipe_idx;
998 pp_display_cfg->line_time_in_us = timing->h_total * 1000
999 / timing->pix_clk_khz;
1000 }
1001
1002 if (memcmp(&dc->prev_display_config, pp_display_cfg, sizeof(
1003 struct dm_pp_display_configuration)) != 0)
1004 dm_pp_apply_display_requirements(dc->ctx, pp_display_cfg);
1005
1006 dc->prev_display_config = *pp_display_cfg;
1007
1008}
1009
1010bool dc_commit_targets(
1011 struct dc *dc,
1012 struct dc_target *targets[],
1013 uint8_t target_count)
1014{
1015 struct core_dc *core_dc = DC_TO_CORE(dc);
1016 struct dc_bios *dcb = core_dc->ctx->dc_bios;
1017 enum dc_status result = DC_ERROR_UNEXPECTED;
1018 struct validate_context *context;
1019 struct dc_validation_set set[MAX_TARGETS];
1020 int i, j, k;
1021
1022 if (false == targets_changed(core_dc, targets, target_count))
1023 return DC_OK;
1024
1025 dm_logger_write(core_dc->ctx->logger, LOG_DC,
1026 "%s: %d targets\n",
1027 __func__,
1028 target_count);
1029
1030 for (i = 0; i < target_count; i++) {
1031 struct dc_target *target = targets[i];
1032
1033 dc_target_log(target,
1034 core_dc->ctx->logger,
1035 LOG_DC);
1036
1037 set[i].target = targets[i];
1038 set[i].surface_count = 0;
1039
1040 }
1041
1042 context = dm_alloc(sizeof(struct validate_context));
1043 if (context == NULL)
1044 goto context_alloc_fail;
1045
1046 result = core_dc->res_pool->funcs->validate_with_context(core_dc, set, target_count, context);
1047 if (result != DC_OK){
1048 dm_logger_write(core_dc->ctx->logger, LOG_ERROR,
1049 "%s: Context validation failed! dc_status:%d\n",
1050 __func__,
1051 result);
1052 BREAK_TO_DEBUGGER();
1053 resource_validate_ctx_destruct(context);
1054 goto fail;
1055 }
1056
1057 if (!dcb->funcs->is_accelerated_mode(dcb)) {
1058 core_dc->hwss.enable_accelerated_mode(core_dc);
1059 }
1060
1061 if (result == DC_OK) {
1062 result = core_dc->hwss.apply_ctx_to_hw(core_dc, context);
1063 }
1064
1065 program_timing_sync(core_dc, context);
1066
1067 for (i = 0; i < context->target_count; i++) {
1068 struct dc_target *dc_target = &context->targets[i]->public;
1069 struct core_sink *sink = DC_SINK_TO_CORE(dc_target->streams[0]->sink);
1070
1071 for (j = 0; j < context->target_status[i].surface_count; j++) {
1072 const struct dc_surface *dc_surface =
1073 context->target_status[i].surfaces[j];
1074
1075 for (k = 0; k < context->res_ctx.pool->pipe_count; k++) {
1076 struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[k];
1077
1078 if (dc_surface != &pipe->surface->public
1079 || !dc_surface->visible)
1080 continue;
1081
1082 pipe->tg->funcs->set_blank(pipe->tg, false);
1083 }
1084 }
1085
1086 CONN_MSG_MODE(sink->link, "{%dx%d, %dx%d@%dKhz}",
1087 dc_target->streams[0]->timing.h_addressable,
1088 dc_target->streams[0]->timing.v_addressable,
1089 dc_target->streams[0]->timing.h_total,
1090 dc_target->streams[0]->timing.v_total,
1091 dc_target->streams[0]->timing.pix_clk_khz);
1092 }
1093
1094 pplib_apply_display_requirements(core_dc,
1095 context, &context->pp_display_cfg);
1096
1097 resource_validate_ctx_destruct(core_dc->current_context);
1098
1099 dm_free(core_dc->current_context);
1100 core_dc->current_context = context;
1101
1102 return (result == DC_OK);
1103
1104fail:
1105 dm_free(context);
1106
1107context_alloc_fail:
1108 return (result == DC_OK);
1109}
1110
1111bool dc_pre_update_surfaces_to_target(
1112 struct dc *dc,
1113 const struct dc_surface *const *new_surfaces,
1114 uint8_t new_surface_count,
1115 struct dc_target *dc_target)
1116{
1117 int i, j;
1118 struct core_dc *core_dc = DC_TO_CORE(dc);
1119 uint32_t prev_disp_clk = core_dc->current_context->bw_results.dispclk_khz;
1120 struct core_target *target = DC_TARGET_TO_CORE(dc_target);
1121 struct dc_target_status *target_status = NULL;
1122 struct validate_context *context;
1123 struct validate_context *temp_context;
1124 bool ret = true;
1125
1126 pre_surface_trace(dc, new_surfaces, new_surface_count);
1127
1128 if (core_dc->current_context->target_count == 0)
1129 return false;
1130
1131 /* Cannot commit surface to a target that is not commited */
1132 for (i = 0; i < core_dc->current_context->target_count; i++)
1133 if (target == core_dc->current_context->targets[i])
1134 break;
1135
1136 if (i == core_dc->current_context->target_count)
1137 return false;
1138
1139 target_status = &core_dc->current_context->target_status[i];
1140
1141 if (new_surface_count == target_status->surface_count) {
1142 bool skip_pre = true;
1143
1144 for (i = 0; i < target_status->surface_count; i++) {
1145 struct dc_surface temp_surf = { 0 };
1146
1147 temp_surf = *target_status->surfaces[i];
1148 temp_surf.clip_rect = new_surfaces[i]->clip_rect;
1149 temp_surf.dst_rect.x = new_surfaces[i]->dst_rect.x;
1150 temp_surf.dst_rect.y = new_surfaces[i]->dst_rect.y;
1151
1152 if (memcmp(&temp_surf, new_surfaces[i], sizeof(temp_surf)) != 0) {
1153 skip_pre = false;
1154 break;
1155 }
1156 }
1157
1158 if (skip_pre)
1159 return true;
1160 }
1161
1162 context = dm_alloc(sizeof(struct validate_context));
1163
1164 if (!context) {
1165 dm_error("%s: failed to create validate ctx\n", __func__);
1166 ret = false;
1167 goto val_ctx_fail;
1168 }
1169
1170 resource_validate_ctx_copy_construct(core_dc->current_context, context);
1171
1172 dm_logger_write(core_dc->ctx->logger, LOG_DC,
1173 "%s: commit %d surfaces to target 0x%x\n",
1174 __func__,
1175 new_surface_count,
1176 dc_target);
1177
1178 if (!resource_attach_surfaces_to_context(
1179 new_surfaces, new_surface_count, dc_target, context)) {
1180 BREAK_TO_DEBUGGER();
1181 ret = false;
1182 goto unexpected_fail;
1183 }
1184
1185 for (i = 0; i < new_surface_count; i++)
1186 for (j = 0; j < context->res_ctx.pool->pipe_count; j++) {
1187 if (context->res_ctx.pipe_ctx[j].surface !=
1188 DC_SURFACE_TO_CORE(new_surfaces[i]))
1189 continue;
1190
1191 resource_build_scaling_params(
1192 new_surfaces[i], &context->res_ctx.pipe_ctx[j]);
1193
1194 if (dc->debug.surface_visual_confirm) {
1195 context->res_ctx.pipe_ctx[j].scl_data.recout.height -= 2;
1196 context->res_ctx.pipe_ctx[j].scl_data.recout.width -= 2;
1197 }
1198 }
1199
1200 if (core_dc->res_pool->funcs->validate_bandwidth(core_dc, context) != DC_OK) {
1201 BREAK_TO_DEBUGGER();
1202 ret = false;
1203 goto unexpected_fail;
1204 }
1205
1206 if (core_dc->res_pool->funcs->apply_clk_constraints) {
1207 temp_context = core_dc->res_pool->funcs->apply_clk_constraints(
1208 core_dc,
1209 context);
1210 if (!temp_context) {
1211 dm_error("%s:failed apply clk constraints\n", __func__);
1212 ret = false;
1213 goto unexpected_fail;
1214 }
1215 resource_validate_ctx_destruct(context);
1216 dm_free(context);
1217 context = temp_context;
1218 }
1219
1220 if (prev_disp_clk < context->bw_results.dispclk_khz) {
1221 pplib_apply_display_requirements(core_dc, context,
1222 &context->pp_display_cfg);
1223 core_dc->hwss.set_display_clock(context);
1224 core_dc->current_context->bw_results.dispclk_khz =
1225 context->bw_results.dispclk_khz;
1226 }
1227
1228 for (i = 0; i < new_surface_count; i++)
1229 for (j = 0; j < context->res_ctx.pool->pipe_count; j++) {
1230 if (context->res_ctx.pipe_ctx[j].surface !=
1231 DC_SURFACE_TO_CORE(new_surfaces[i]))
1232 continue;
1233
1234 core_dc->hwss.prepare_pipe_for_context(
1235 core_dc,
1236 &context->res_ctx.pipe_ctx[j],
1237 context);
1238
1239 if (!new_surfaces[i]->visible)
1240 context->res_ctx.pipe_ctx[j].tg->funcs->set_blank(
1241 context->res_ctx.pipe_ctx[j].tg, true);
1242 }
1243
1244unexpected_fail:
1245 resource_validate_ctx_destruct(context);
1246 dm_free(context);
1247val_ctx_fail:
1248
1249 return ret;
1250}
1251
1252bool dc_post_update_surfaces_to_target(struct dc *dc)
1253{
1254 struct core_dc *core_dc = DC_TO_CORE(dc);
1255 int i;
1256
1257 post_surface_trace(dc);
1258
1259 for (i = 0; i < core_dc->current_context->res_ctx.pool->pipe_count; i++)
1260 if (core_dc->current_context->res_ctx.pipe_ctx[i].stream == NULL)
1261 core_dc->hwss.power_down_front_end(
1262 core_dc, &core_dc->current_context->res_ctx.pipe_ctx[i]);
1263
1264 if (core_dc->res_pool->funcs->validate_bandwidth(core_dc, core_dc->current_context)
1265 != DC_OK) {
1266 BREAK_TO_DEBUGGER();
1267 return false;
1268 }
1269
1270 core_dc->hwss.set_bandwidth(core_dc);
1271
1272 pplib_apply_display_requirements(
1273 core_dc, core_dc->current_context, &core_dc->current_context->pp_display_cfg);
1274
1275 return true;
1276}
1277
1278bool dc_commit_surfaces_to_target(
1279 struct dc *dc,
1280 const struct dc_surface **new_surfaces,
1281 uint8_t new_surface_count,
1282 struct dc_target *dc_target)
1283{
1284 struct dc_surface_update updates[MAX_SURFACES] = { 0 };
1285 struct dc_flip_addrs flip_addr[MAX_SURFACES] = { 0 };
1286 struct dc_plane_info plane_info[MAX_SURFACES] = { 0 };
1287 struct dc_scaling_info scaling_info[MAX_SURFACES] = { 0 };
1288 int i;
1289
1290 if (!dc_pre_update_surfaces_to_target(
1291 dc, new_surfaces, new_surface_count, dc_target))
1292 return false;
1293
1294 for (i = 0; i < new_surface_count; i++) {
1295 updates[i].surface = new_surfaces[i];
1296 updates[i].gamma = (struct dc_gamma *)new_surfaces[i]->gamma_correction;
1297
1298 flip_addr[i].address = new_surfaces[i]->address;
1299 flip_addr[i].flip_immediate = new_surfaces[i]->flip_immediate;
1300 plane_info[i].color_space = new_surfaces[i]->color_space;
1301 plane_info[i].format = new_surfaces[i]->format;
1302 plane_info[i].plane_size = new_surfaces[i]->plane_size;
1303 plane_info[i].rotation = new_surfaces[i]->rotation;
1304 plane_info[i].horizontal_mirror = new_surfaces[i]->horizontal_mirror;
1305 plane_info[i].stereo_format = new_surfaces[i]->stereo_format;
1306 plane_info[i].tiling_info = new_surfaces[i]->tiling_info;
1307 plane_info[i].visible = new_surfaces[i]->visible;
1308 scaling_info[i].scaling_quality = new_surfaces[i]->scaling_quality;
1309 scaling_info[i].src_rect = new_surfaces[i]->src_rect;
1310 scaling_info[i].dst_rect = new_surfaces[i]->dst_rect;
1311 scaling_info[i].clip_rect = new_surfaces[i]->clip_rect;
1312
1313 updates[i].flip_addr = &flip_addr[i];
1314 updates[i].plane_info = &plane_info[i];
1315 updates[i].scaling_info = &scaling_info[i];
1316 }
1317 dc_update_surfaces_for_target(dc, updates, new_surface_count, dc_target);
1318
1319 return dc_post_update_surfaces_to_target(dc);
1320}
1321
1322void dc_update_surfaces_for_target(struct dc *dc, struct dc_surface_update *updates,
1323 int surface_count, struct dc_target *dc_target)
1324{
1325 struct core_dc *core_dc = DC_TO_CORE(dc);
1326 struct validate_context *context = core_dc->temp_flip_context;
1327 int i, j;
1328 bool is_new_pipe_surface[MAX_SURFACES];
1329 const struct dc_surface *new_surfaces[MAX_SURFACES] = { 0 };
1330
1331 update_surface_trace(dc, updates, surface_count);
1332
1333 *context = *core_dc->current_context;
1334
1335 for (i = 0; i < context->res_ctx.pool->pipe_count; i++) {
1336 struct pipe_ctx *cur_pipe = &context->res_ctx.pipe_ctx[i];
1337
1338 if (cur_pipe->top_pipe)
1339 cur_pipe->top_pipe =
1340 &context->res_ctx.pipe_ctx[cur_pipe->top_pipe->pipe_idx];
1341
1342 if (cur_pipe->bottom_pipe)
1343 cur_pipe->bottom_pipe =
1344 &context->res_ctx.pipe_ctx[cur_pipe->bottom_pipe->pipe_idx];
1345 }
1346
1347 for (j = 0; j < MAX_SURFACES; j++)
1348 is_new_pipe_surface[j] = true;
1349
1350 for (i = 0 ; i < surface_count; i++) {
1351 struct core_surface *surface = DC_SURFACE_TO_CORE(updates[i].surface);
1352
1353 new_surfaces[i] = updates[i].surface;
1354 for (j = 0; j < context->res_ctx.pool->pipe_count; j++) {
1355 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
1356
1357 if (surface == pipe_ctx->surface)
1358 is_new_pipe_surface[i] = false;
1359 }
1360 }
1361
1362 if (dc_target) {
1363 struct core_target *target = DC_TARGET_TO_CORE(dc_target);
1364
1365 if (core_dc->current_context->target_count == 0)
1366 return;
1367
1368 /* Cannot commit surface to a target that is not commited */
1369 for (i = 0; i < core_dc->current_context->target_count; i++)
1370 if (target == core_dc->current_context->targets[i])
1371 break;
1372 if (i == core_dc->current_context->target_count)
1373 return;
1374
1375 if (!resource_attach_surfaces_to_context(
1376 new_surfaces, surface_count, dc_target, context)) {
1377 BREAK_TO_DEBUGGER();
1378 return;
1379 }
1380 }
1381
1382 for (i = 0; i < surface_count; i++) {
1383 struct core_surface *surface = DC_SURFACE_TO_CORE(updates[i].surface);
1384
1385 for (j = 0; j < context->res_ctx.pool->pipe_count; j++) {
1386 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
1387
1388 if (pipe_ctx->surface != surface)
1389 continue;
1390
1391 if (updates[i].flip_addr) {
1392 surface->public.address = updates[i].flip_addr->address;
1393 surface->public.flip_immediate =
1394 updates[i].flip_addr->flip_immediate;
1395 }
1396
1397 if (updates[i].plane_info || updates[i].scaling_info
1398 || is_new_pipe_surface[j]) {
1399
1400 if (updates[i].plane_info) {
1401 surface->public.color_space =
1402 updates[i].plane_info->color_space;
1403 surface->public.format =
1404 updates[i].plane_info->format;
1405 surface->public.plane_size =
1406 updates[i].plane_info->plane_size;
1407 surface->public.rotation =
1408 updates[i].plane_info->rotation;
1409 surface->public.horizontal_mirror =
1410 updates[i].plane_info->horizontal_mirror;
1411 surface->public.stereo_format =
1412 updates[i].plane_info->stereo_format;
1413 surface->public.tiling_info =
1414 updates[i].plane_info->tiling_info;
1415 surface->public.visible =
1416 updates[i].plane_info->visible;
1417 }
1418
1419 if (updates[i].scaling_info) {
1420 surface->public.scaling_quality =
1421 updates[i].scaling_info->scaling_quality;
1422 surface->public.dst_rect =
1423 updates[i].scaling_info->dst_rect;
1424 surface->public.src_rect =
1425 updates[i].scaling_info->src_rect;
1426 surface->public.clip_rect =
1427 updates[i].scaling_info->clip_rect;
1428 }
1429
1430 resource_build_scaling_params(updates[i].surface, pipe_ctx);
1431 if (dc->debug.surface_visual_confirm) {
1432 pipe_ctx->scl_data.recout.height -= 2;
1433 pipe_ctx->scl_data.recout.width -= 2;
1434 }
1435 }
1436 }
1437 }
1438
1439 for (i = 0; i < surface_count; i++) {
1440 struct core_surface *surface = DC_SURFACE_TO_CORE(updates[i].surface);
1441 bool apply_ctx = false;
1442
1443 for (j = 0; j < context->res_ctx.pool->pipe_count; j++) {
1444 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
1445
1446 if (pipe_ctx->surface != surface)
1447 continue;
1448
1449 if (updates[i].flip_addr) {
1450 core_dc->hwss.pipe_control_lock(
1451 core_dc->hwseq,
1452 pipe_ctx->pipe_idx,
1453 PIPE_LOCK_CONTROL_SURFACE,
1454 true);
1455 core_dc->hwss.update_plane_addr(core_dc, pipe_ctx);
1456 }
1457
1458 if (updates[i].plane_info || updates[i].scaling_info
1459 || is_new_pipe_surface[j]) {
1460
1461 apply_ctx = true;
1462
1463 if (!pipe_ctx->tg->funcs->is_blanked(pipe_ctx->tg)) {
1464 core_dc->hwss.pipe_control_lock(
1465 core_dc->hwseq,
1466 pipe_ctx->pipe_idx,
1467 PIPE_LOCK_CONTROL_SURFACE |
1468 PIPE_LOCK_CONTROL_GRAPHICS |
1469 PIPE_LOCK_CONTROL_SCL |
1470 PIPE_LOCK_CONTROL_BLENDER |
1471 PIPE_LOCK_CONTROL_MODE,
1472 true);
1473 }
1474 }
1475
1476 if (updates[i].gamma)
1477 core_dc->hwss.prepare_pipe_for_context(
1478 core_dc, pipe_ctx, context);
1479 }
1480 if (apply_ctx)
1481 core_dc->hwss.apply_ctx_for_surface(core_dc, surface, context);
1482 }
1483
1484 for (i = context->res_ctx.pool->pipe_count - 1; i >= 0; i--) {
1485 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1486
1487 for (j = 0; j < surface_count; j++) {
1488 if (updates[j].surface == &pipe_ctx->surface->public) {
1489 if (!pipe_ctx->tg->funcs->is_blanked(pipe_ctx->tg)) {
1490 core_dc->hwss.pipe_control_lock(
1491 core_dc->hwseq,
1492 pipe_ctx->pipe_idx,
1493 PIPE_LOCK_CONTROL_GRAPHICS |
1494 PIPE_LOCK_CONTROL_SCL |
1495 PIPE_LOCK_CONTROL_BLENDER |
1496 PIPE_LOCK_CONTROL_SURFACE,
1497 false);
1498 }
1499 break;
1500 }
1501 }
1502 }
1503
1504 core_dc->temp_flip_context = core_dc->current_context;
1505 core_dc->current_context = context;
1506}
1507
1508uint8_t dc_get_current_target_count(const struct dc *dc)
1509{
1510 struct core_dc *core_dc = DC_TO_CORE(dc);
1511 return core_dc->current_context->target_count;
1512}
1513
1514struct dc_target *dc_get_target_at_index(const struct dc *dc, uint8_t i)
1515{
1516 struct core_dc *core_dc = DC_TO_CORE(dc);
1517 if (i < core_dc->current_context->target_count)
1518 return &(core_dc->current_context->targets[i]->public);
1519 return NULL;
1520}
1521
1522const struct dc_link *dc_get_link_at_index(const struct dc *dc, uint32_t link_index)
1523{
1524 struct core_dc *core_dc = DC_TO_CORE(dc);
1525 return &core_dc->links[link_index]->public;
1526}
1527
1528const struct graphics_object_id dc_get_link_id_at_index(
1529 struct dc *dc, uint32_t link_index)
1530{
1531 struct core_dc *core_dc = DC_TO_CORE(dc);
1532 return core_dc->links[link_index]->link_id;
1533}
1534
1535const struct ddc_service *dc_get_ddc_at_index(
1536 struct dc *dc, uint32_t link_index)
1537{
1538 struct core_dc *core_dc = DC_TO_CORE(dc);
1539 return core_dc->links[link_index]->ddc;
1540}
1541
1542enum dc_irq_source dc_get_hpd_irq_source_at_index(
1543 struct dc *dc, uint32_t link_index)
1544{
1545 struct core_dc *core_dc = DC_TO_CORE(dc);
1546 return core_dc->links[link_index]->public.irq_source_hpd;
1547}
1548
1549const struct audio **dc_get_audios(struct dc *dc)
1550{
1551 struct core_dc *core_dc = DC_TO_CORE(dc);
1552 return (const struct audio **)core_dc->res_pool->audios;
1553}
1554
1555void dc_flip_surface_addrs(
1556 struct dc *dc,
1557 const struct dc_surface *const surfaces[],
1558 struct dc_flip_addrs flip_addrs[],
1559 uint32_t count)
1560{
1561 struct core_dc *core_dc = DC_TO_CORE(dc);
1562 int i, j;
1563
1564 for (i = 0; i < count; i++) {
1565 struct core_surface *surface = DC_SURFACE_TO_CORE(surfaces[i]);
1566
1567 surface->public.address = flip_addrs[i].address;
1568 surface->public.flip_immediate = flip_addrs[i].flip_immediate;
1569
1570 for (j = 0; j < core_dc->res_pool->pipe_count; j++) {
1571 struct pipe_ctx *pipe_ctx = &core_dc->current_context->res_ctx.pipe_ctx[j];
1572
1573 if (pipe_ctx->surface != surface)
1574 continue;
1575
1576 core_dc->hwss.update_plane_addr(core_dc, pipe_ctx);
1577 }
1578 }
1579}
1580
1581enum dc_irq_source dc_interrupt_to_irq_source(
1582 struct dc *dc,
1583 uint32_t src_id,
1584 uint32_t ext_id)
1585{
1586 struct core_dc *core_dc = DC_TO_CORE(dc);
1587 return dal_irq_service_to_irq_source(core_dc->res_pool->irqs, src_id, ext_id);
1588}
1589
1590void dc_interrupt_set(const struct dc *dc, enum dc_irq_source src, bool enable)
1591{
1592 struct core_dc *core_dc = DC_TO_CORE(dc);
1593 dal_irq_service_set(core_dc->res_pool->irqs, src, enable);
1594}
1595
1596void dc_interrupt_ack(struct dc *dc, enum dc_irq_source src)
1597{
1598 struct core_dc *core_dc = DC_TO_CORE(dc);
1599 dal_irq_service_ack(core_dc->res_pool->irqs, src);
1600}
1601
1602void dc_set_power_state(
1603 struct dc *dc,
1604 enum dc_acpi_cm_power_state power_state,
1605 enum dc_video_power_state video_power_state)
1606{
1607 struct core_dc *core_dc = DC_TO_CORE(dc);
1608
1609 core_dc->previous_power_state = core_dc->current_power_state;
1610 core_dc->current_power_state = video_power_state;
1611
1612 switch (power_state) {
1613 case DC_ACPI_CM_POWER_STATE_D0:
1614 core_dc->hwss.init_hw(core_dc);
1615 break;
1616 default:
1617 /* NULL means "reset/release all DC targets" */
1618 dc_commit_targets(dc, NULL, 0);
1619
1620 core_dc->hwss.power_down(core_dc);
1621
1622 /* Zero out the current context so that on resume we start with
1623 * clean state, and dc hw programming optimizations will not
1624 * cause any trouble.
1625 */
1626 memset(core_dc->current_context, 0,
1627 sizeof(*core_dc->current_context));
1628
1629 core_dc->current_context->res_ctx.pool = core_dc->res_pool;
1630
1631 break;
1632 }
1633
1634}
1635
1636void dc_resume(const struct dc *dc)
1637{
1638 struct core_dc *core_dc = DC_TO_CORE(dc);
1639
1640 uint32_t i;
1641
1642 for (i = 0; i < core_dc->link_count; i++)
1643 core_link_resume(core_dc->links[i]);
1644}
1645
1646bool dc_read_dpcd(
1647 struct dc *dc,
1648 uint32_t link_index,
1649 uint32_t address,
1650 uint8_t *data,
1651 uint32_t size)
1652{
1653 struct core_dc *core_dc = DC_TO_CORE(dc);
1654
1655 struct core_link *link = core_dc->links[link_index];
1656 enum ddc_result r = dal_ddc_service_read_dpcd_data(
1657 link->ddc,
1658 address,
1659 data,
1660 size);
1661 return r == DDC_RESULT_SUCESSFULL;
1662}
1663
1664bool dc_write_dpcd(
1665 struct dc *dc,
1666 uint32_t link_index,
1667 uint32_t address,
1668 const uint8_t *data,
1669 uint32_t size)
1670{
1671 struct core_dc *core_dc = DC_TO_CORE(dc);
1672
1673 struct core_link *link = core_dc->links[link_index];
1674
1675 enum ddc_result r = dal_ddc_service_write_dpcd_data(
1676 link->ddc,
1677 address,
1678 data,
1679 size);
1680 return r == DDC_RESULT_SUCESSFULL;
1681}
1682
1683bool dc_submit_i2c(
1684 struct dc *dc,
1685 uint32_t link_index,
1686 struct i2c_command *cmd)
1687{
1688 struct core_dc *core_dc = DC_TO_CORE(dc);
1689
1690 struct core_link *link = core_dc->links[link_index];
1691 struct ddc_service *ddc = link->ddc;
1692
1693 return dal_i2caux_submit_i2c_command(
1694 ddc->ctx->i2caux,
1695 ddc->ddc_pin,
1696 cmd);
1697}
1698
1699static bool link_add_remote_sink_helper(struct core_link *core_link, struct dc_sink *sink)
1700{
1701 struct dc_link *dc_link = &core_link->public;
1702
1703 if (dc_link->sink_count >= MAX_SINKS_PER_LINK) {
1704 BREAK_TO_DEBUGGER();
1705 return false;
1706 }
1707
1708 dc_sink_retain(sink);
1709
1710 dc_link->remote_sinks[dc_link->sink_count] = sink;
1711 dc_link->sink_count++;
1712
1713 return true;
1714}
1715
1716struct dc_sink *dc_link_add_remote_sink(
1717 const struct dc_link *link,
1718 const uint8_t *edid,
1719 int len,
1720 struct dc_sink_init_data *init_data)
1721{
1722 struct dc_sink *dc_sink;
1723 enum dc_edid_status edid_status;
1724 struct core_link *core_link = DC_LINK_TO_LINK(link);
1725
1726 if (len > MAX_EDID_BUFFER_SIZE) {
1727 dm_error("Max EDID buffer size breached!\n");
1728 return NULL;
1729 }
1730
1731 if (!init_data) {
1732 BREAK_TO_DEBUGGER();
1733 return NULL;
1734 }
1735
1736 if (!init_data->link) {
1737 BREAK_TO_DEBUGGER();
1738 return NULL;
1739 }
1740
1741 dc_sink = dc_sink_create(init_data);
1742
1743 if (!dc_sink)
1744 return NULL;
1745
1746 memmove(dc_sink->dc_edid.raw_edid, edid, len);
1747 dc_sink->dc_edid.length = len;
1748
1749 if (!link_add_remote_sink_helper(
1750 core_link,
1751 dc_sink))
1752 goto fail_add_sink;
1753
1754 edid_status = dm_helpers_parse_edid_caps(
1755 core_link->ctx,
1756 &dc_sink->dc_edid,
1757 &dc_sink->edid_caps);
1758
1759 if (edid_status != EDID_OK)
1760 goto fail;
1761
1762 return dc_sink;
1763fail:
1764 dc_link_remove_remote_sink(link, dc_sink);
1765fail_add_sink:
1766 dc_sink_release(dc_sink);
1767 return NULL;
1768}
1769
1770void dc_link_set_sink(const struct dc_link *link, struct dc_sink *sink)
1771{
1772 struct core_link *core_link = DC_LINK_TO_LINK(link);
1773 struct dc_link *dc_link = &core_link->public;
1774
1775 dc_link->local_sink = sink;
1776
1777 if (sink == NULL) {
1778 dc_link->type = dc_connection_none;
1779 } else {
1780 dc_link->type = dc_connection_single;
1781 }
1782}
1783
1784void dc_link_remove_remote_sink(const struct dc_link *link, const struct dc_sink *sink)
1785{
1786 int i;
1787 struct core_link *core_link = DC_LINK_TO_LINK(link);
1788 struct dc_link *dc_link = &core_link->public;
1789
1790 if (!link->sink_count) {
1791 BREAK_TO_DEBUGGER();
1792 return;
1793 }
1794
1795 for (i = 0; i < dc_link->sink_count; i++) {
1796 if (dc_link->remote_sinks[i] == sink) {
1797 dc_sink_release(sink);
1798 dc_link->remote_sinks[i] = NULL;
1799
1800 /* shrink array to remove empty place */
1801 while (i < dc_link->sink_count - 1) {
1802 dc_link->remote_sinks[i] = dc_link->remote_sinks[i+1];
1803 i++;
1804 }
1805
1806 dc_link->sink_count--;
1807 return;
1808 }
1809 }
1810}
1811
1812const struct dc_stream_status *dc_stream_get_status(
1813 const struct dc_stream *dc_stream)
1814{
1815 struct core_stream *stream = DC_STREAM_TO_CORE(dc_stream);
1816
1817 return &stream->status;
1818}
1819
1820bool dc_init_dchub(struct dc *dc, struct dchub_init_data *dh_data)
1821{
1822 int i;
1823 struct core_dc *core_dc = DC_TO_CORE(dc);
1824 struct mem_input *mi = NULL;
1825
1826 for (i = 0; i < core_dc->res_pool->pipe_count; i++) {
1827 if (core_dc->res_pool->mis[i] != NULL) {
1828 mi = core_dc->res_pool->mis[i];
1829 break;
1830 }
1831 }
1832 if (mi == NULL) {
1833 dm_error("no mem_input!\n");
1834 return false;
1835 }
1836
1837 if (mi->funcs->mem_input_update_dchub)
1838 mi->funcs->mem_input_update_dchub(mi, dh_data);
1839 else
1840 ASSERT(mi->funcs->mem_input_update_dchub);
1841
1842
1843 return true;
1844
1845}
1846