]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/gpu/drm/amd/display/dc/core/dc.c
drm/amdgpu/soc15: enable dc on vega10
[mirror_ubuntu-bionic-kernel.git] / drivers / gpu / drm / amd / display / dc / core / dc.c
CommitLineData
4562236b
HW
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 */
24
25#include "dm_services.h"
26
27#include "dc.h"
28
29#include "core_status.h"
30#include "core_types.h"
31#include "hw_sequencer.h"
32
33#include "resource.h"
34
35#include "clock_source.h"
36#include "dc_bios_types.h"
37
38#include "bandwidth_calcs.h"
39#include "bios_parser_interface.h"
40#include "include/irq_service_interface.h"
41#include "transform.h"
42#include "timing_generator.h"
43#include "virtual/virtual_link_encoder.h"
44
45#include "link_hwss.h"
46#include "link_encoder.h"
47
48#include "dc_link_ddc.h"
49#include "dm_helpers.h"
50#include "mem_input.h"
51
4562236b
HW
52/*******************************************************************************
53 * Private functions
54 ******************************************************************************/
55static void destroy_links(struct core_dc *dc)
56{
57 uint32_t i;
58
59 for (i = 0; i < dc->link_count; i++) {
60 if (NULL != dc->links[i])
61 link_destroy(&dc->links[i]);
62 }
63}
64
65static bool create_links(
66 struct core_dc *dc,
67 uint32_t num_virtual_links)
68{
69 int i;
70 int connectors_num;
71 struct dc_bios *bios = dc->ctx->dc_bios;
72
73 dc->link_count = 0;
74
75 connectors_num = bios->funcs->get_connectors_number(bios);
76
77 if (connectors_num > ENUM_ID_COUNT) {
78 dm_error(
79 "DC: Number of connectors %d exceeds maximum of %d!\n",
80 connectors_num,
81 ENUM_ID_COUNT);
82 return false;
83 }
84
85 if (connectors_num == 0 && num_virtual_links == 0) {
86 dm_error("DC: Number of connectors is zero!\n");
87 }
88
89 dm_output_to_console(
90 "DC: %s: connectors_num: physical:%d, virtual:%d\n",
91 __func__,
92 connectors_num,
93 num_virtual_links);
94
95 for (i = 0; i < connectors_num; i++) {
96 struct link_init_data link_init_params = {0};
97 struct core_link *link;
98
99 link_init_params.ctx = dc->ctx;
100 link_init_params.connector_index = i;
101 link_init_params.link_index = dc->link_count;
102 link_init_params.dc = dc;
103 link = link_create(&link_init_params);
104
105 if (link) {
106 dc->links[dc->link_count] = link;
107 link->dc = dc;
108 ++dc->link_count;
109 } else {
110 dm_error("DC: failed to create link!\n");
111 }
112 }
113
114 for (i = 0; i < num_virtual_links; i++) {
115 struct core_link *link = dm_alloc(sizeof(*link));
116 struct encoder_init_data enc_init = {0};
117
118 if (link == NULL) {
119 BREAK_TO_DEBUGGER();
120 goto failed_alloc;
121 }
122
123 link->ctx = dc->ctx;
124 link->dc = dc;
125 link->public.connector_signal = SIGNAL_TYPE_VIRTUAL;
126 link->link_id.type = OBJECT_TYPE_CONNECTOR;
127 link->link_id.id = CONNECTOR_ID_VIRTUAL;
128 link->link_id.enum_id = ENUM_ID_1;
129 link->link_enc = dm_alloc(sizeof(*link->link_enc));
130
131 enc_init.ctx = dc->ctx;
132 enc_init.channel = CHANNEL_ID_UNKNOWN;
133 enc_init.hpd_source = HPD_SOURCEID_UNKNOWN;
134 enc_init.transmitter = TRANSMITTER_UNKNOWN;
135 enc_init.connector = link->link_id;
136 enc_init.encoder.type = OBJECT_TYPE_ENCODER;
137 enc_init.encoder.id = ENCODER_ID_INTERNAL_VIRTUAL;
138 enc_init.encoder.enum_id = ENUM_ID_1;
139 virtual_link_encoder_construct(link->link_enc, &enc_init);
140
141 link->public.link_index = dc->link_count;
142 dc->links[dc->link_count] = link;
143 dc->link_count++;
144 }
145
146 return true;
147
148failed_alloc:
149 return false;
150}
151
152static bool stream_adjust_vmin_vmax(struct dc *dc,
153 const struct dc_stream **stream, int num_streams,
154 int vmin, int vmax)
155{
156 /* TODO: Support multiple streams */
157 struct core_dc *core_dc = DC_TO_CORE(dc);
158 struct core_stream *core_stream = DC_STREAM_TO_CORE(stream[0]);
159 int i = 0;
160 bool ret = false;
4562236b
HW
161
162 for (i = 0; i < MAX_PIPES; i++) {
6680b6a1 163 struct pipe_ctx *pipe = &core_dc->current_context->res_ctx.pipe_ctx[i];
4562236b 164
6680b6a1
YS
165 if (pipe->stream == core_stream && pipe->stream_enc) {
166 core_dc->hwss.set_drr(&pipe, 1, vmin, vmax);
4562236b
HW
167
168 /* build and update the info frame */
6680b6a1
YS
169 resource_build_info_frame(pipe);
170 core_dc->hwss.update_info_frame(pipe);
4562236b
HW
171
172 ret = true;
173 }
174 }
4562236b
HW
175 return ret;
176}
177
178
179static bool set_gamut_remap(struct dc *dc,
180 const struct dc_stream **stream, int num_streams)
181{
182 struct core_dc *core_dc = DC_TO_CORE(dc);
183 struct core_stream *core_stream = DC_STREAM_TO_CORE(stream[0]);
184 int i = 0;
185 bool ret = false;
186 struct pipe_ctx *pipes;
187
188 for (i = 0; i < MAX_PIPES; i++) {
189 if (core_dc->current_context->res_ctx.pipe_ctx[i].stream
190 == core_stream) {
191
192 pipes = &core_dc->current_context->res_ctx.pipe_ctx[i];
193 core_dc->hwss.set_plane_config(core_dc, pipes,
194 &core_dc->current_context->res_ctx);
195 ret = true;
196 }
197 }
198
199 return ret;
200}
201
202/* This function is not expected to fail, proper implementation of
203 * validation will prevent this from ever being called for unsupported
204 * configurations.
205 */
206static void stream_update_scaling(
207 const struct dc *dc,
208 const struct dc_stream *dc_stream,
209 const struct rect *src,
210 const struct rect *dst)
211{
212 struct core_stream *stream = DC_STREAM_TO_CORE(dc_stream);
213 struct core_dc *core_dc = DC_TO_CORE(dc);
214 struct validate_context *cur_ctx = core_dc->current_context;
ab2541b6 215 int i;
4562236b
HW
216
217 if (src)
218 stream->public.src = *src;
219
220 if (dst)
221 stream->public.dst = *dst;
222
ab2541b6
AC
223 for (i = 0; i < cur_ctx->stream_count; i++) {
224 struct core_stream *cur_stream = cur_ctx->streams[i];
4562236b 225
ab2541b6
AC
226 if (stream == cur_stream) {
227 struct dc_stream_status *status = &cur_ctx->stream_status[i];
4562236b
HW
228
229 if (status->surface_count)
ab2541b6 230 if (!dc_commit_surfaces_to_stream(
4562236b
HW
231 &core_dc->public,
232 status->surfaces,
233 status->surface_count,
ab2541b6 234 &cur_stream->public))
4562236b
HW
235 /* Need to debug validation */
236 BREAK_TO_DEBUGGER();
237
238 return;
239 }
240 }
241}
242
4562236b
HW
243static bool set_psr_enable(struct dc *dc, bool enable)
244{
245 struct core_dc *core_dc = DC_TO_CORE(dc);
246 int i;
247
248 for (i = 0; i < core_dc->link_count; i++)
249 dc_link_set_psr_enable(&core_dc->links[i]->public,
250 enable);
251
252 return true;
253}
254
255
256static bool setup_psr(struct dc *dc, const struct dc_stream *stream)
257{
258 struct core_dc *core_dc = DC_TO_CORE(dc);
259 struct core_stream *core_stream = DC_STREAM_TO_CORE(stream);
260 struct pipe_ctx *pipes;
261 int i;
262 unsigned int underlay_idx = core_dc->res_pool->underlay_pipe_index;
263
264 for (i = 0; i < core_dc->link_count; i++) {
265 if (core_stream->sink->link == core_dc->links[i])
266 dc_link_setup_psr(&core_dc->links[i]->public,
267 stream);
268 }
269
270 for (i = 0; i < MAX_PIPES; i++) {
271 if (core_dc->current_context->res_ctx.pipe_ctx[i].stream
272 == core_stream && i != underlay_idx) {
273 pipes = &core_dc->current_context->res_ctx.pipe_ctx[i];
274 core_dc->hwss.set_static_screen_control(&pipes, 1,
275 0x182);
276 }
277 }
278
279 return true;
280}
281
282static void set_drive_settings(struct dc *dc,
bf5cda33
HW
283 struct link_training_settings *lt_settings,
284 const struct dc_link *link)
4562236b
HW
285{
286 struct core_dc *core_dc = DC_TO_CORE(dc);
287 int i;
288
bf5cda33
HW
289 for (i = 0; i < core_dc->link_count; i++) {
290 if (&core_dc->links[i]->public == link)
291 break;
292 }
293
294 if (i >= core_dc->link_count)
295 ASSERT_CRITICAL(false);
296
297 dc_link_dp_set_drive_settings(&core_dc->links[i]->public, lt_settings);
4562236b
HW
298}
299
300static void perform_link_training(struct dc *dc,
301 struct dc_link_settings *link_setting,
302 bool skip_video_pattern)
303{
304 struct core_dc *core_dc = DC_TO_CORE(dc);
305 int i;
306
307 for (i = 0; i < core_dc->link_count; i++)
308 dc_link_dp_perform_link_training(
309 &core_dc->links[i]->public,
310 link_setting,
311 skip_video_pattern);
312}
313
314static void set_preferred_link_settings(struct dc *dc,
88639168
ZF
315 struct dc_link_settings *link_setting,
316 const struct dc_link *link)
4562236b 317{
88639168 318 struct core_link *core_link = DC_LINK_TO_CORE(link);
4562236b 319
88639168 320 core_link->public.verified_link_cap.lane_count =
4562236b 321 link_setting->lane_count;
88639168 322 core_link->public.verified_link_cap.link_rate =
4562236b 323 link_setting->link_rate;
73c72602 324 dp_retrain_link_dp_test(core_link, link_setting, false);
4562236b
HW
325}
326
327static void enable_hpd(const struct dc_link *link)
328{
329 dc_link_dp_enable_hpd(link);
330}
331
332static void disable_hpd(const struct dc_link *link)
333{
334 dc_link_dp_disable_hpd(link);
335}
336
337
338static void set_test_pattern(
339 const struct dc_link *link,
340 enum dp_test_pattern test_pattern,
341 const struct link_training_settings *p_link_settings,
342 const unsigned char *p_custom_pattern,
343 unsigned int cust_pattern_size)
344{
345 if (link != NULL)
346 dc_link_dp_set_test_pattern(
347 link,
348 test_pattern,
349 p_link_settings,
350 p_custom_pattern,
351 cust_pattern_size);
352}
353
354static void allocate_dc_stream_funcs(struct core_dc *core_dc)
355{
356 core_dc->public.stream_funcs.stream_update_scaling = stream_update_scaling;
357 if (core_dc->hwss.set_drr != NULL) {
358 core_dc->public.stream_funcs.adjust_vmin_vmax =
359 stream_adjust_vmin_vmax;
360 }
361
362 core_dc->public.stream_funcs.set_gamut_remap =
363 set_gamut_remap;
364
4562236b
HW
365 core_dc->public.stream_funcs.set_psr_enable =
366 set_psr_enable;
367
368 core_dc->public.stream_funcs.setup_psr =
369 setup_psr;
370
371 core_dc->public.link_funcs.set_drive_settings =
372 set_drive_settings;
373
374 core_dc->public.link_funcs.perform_link_training =
375 perform_link_training;
376
377 core_dc->public.link_funcs.set_preferred_link_settings =
378 set_preferred_link_settings;
379
380 core_dc->public.link_funcs.enable_hpd =
381 enable_hpd;
382
383 core_dc->public.link_funcs.disable_hpd =
384 disable_hpd;
385
386 core_dc->public.link_funcs.set_test_pattern =
387 set_test_pattern;
388}
389
390static void destruct(struct core_dc *dc)
391{
392 resource_validate_ctx_destruct(dc->current_context);
393
4562236b
HW
394 destroy_links(dc);
395
396 dc_destroy_resource_pool(dc);
397
398 if (dc->ctx->gpio_service)
399 dal_gpio_service_destroy(&dc->ctx->gpio_service);
400
401 if (dc->ctx->i2caux)
402 dal_i2caux_destroy(&dc->ctx->i2caux);
403
404 if (dc->ctx->created_bios)
405 dal_bios_parser_destroy(&dc->ctx->dc_bios);
406
407 if (dc->ctx->logger)
408 dal_logger_destroy(&dc->ctx->logger);
409
410 dm_free(dc->current_context);
411 dc->current_context = NULL;
5ea81b91
DL
412 dm_free(dc->temp_flip_context);
413 dc->temp_flip_context = NULL;
414 dm_free(dc->scratch_val_ctx);
415 dc->scratch_val_ctx = NULL;
4562236b
HW
416
417 dm_free(dc->ctx);
418 dc->ctx = NULL;
419}
420
421static bool construct(struct core_dc *dc,
422 const struct dc_init_data *init_params)
423{
424 struct dal_logger *logger;
425 struct dc_context *dc_ctx = dm_alloc(sizeof(*dc_ctx));
426 enum dce_version dc_version = DCE_VERSION_UNKNOWN;
427
428 if (!dc_ctx) {
429 dm_error("%s: failed to create ctx\n", __func__);
430 goto ctx_fail;
431 }
432
433 dc->current_context = dm_alloc(sizeof(*dc->current_context));
434 dc->temp_flip_context = dm_alloc(sizeof(*dc->temp_flip_context));
5ea81b91 435 dc->scratch_val_ctx = dm_alloc(sizeof(*dc->scratch_val_ctx));
4562236b
HW
436
437 if (!dc->current_context || !dc->temp_flip_context) {
438 dm_error("%s: failed to create validate ctx\n", __func__);
439 goto val_ctx_fail;
440 }
441
442 dc_ctx->cgs_device = init_params->cgs_device;
443 dc_ctx->driver_context = init_params->driver;
444 dc_ctx->dc = &dc->public;
445 dc_ctx->asic_id = init_params->asic_id;
446
447 /* Create logger */
448 logger = dal_logger_create(dc_ctx);
449
450 if (!logger) {
451 /* can *not* call logger. call base driver 'print error' */
452 dm_error("%s: failed to create Logger!\n", __func__);
453 goto logger_fail;
454 }
455 dc_ctx->logger = logger;
456 dc->ctx = dc_ctx;
457 dc->ctx->dce_environment = init_params->dce_environment;
458
459 dc_version = resource_parse_asic_id(init_params->asic_id);
460 dc->ctx->dce_version = dc_version;
461
462 /* Resource should construct all asic specific resources.
463 * This should be the only place where we need to parse the asic id
464 */
465 if (init_params->vbios_override)
466 dc_ctx->dc_bios = init_params->vbios_override;
467 else {
468 /* Create BIOS parser */
469 struct bp_init_data bp_init_data;
e8c963d6 470
4562236b
HW
471 bp_init_data.ctx = dc_ctx;
472 bp_init_data.bios = init_params->asic_id.atombios_base_address;
473
474 dc_ctx->dc_bios = dal_bios_parser_create(
475 &bp_init_data, dc_version);
476
477 if (!dc_ctx->dc_bios) {
478 ASSERT_CRITICAL(false);
479 goto bios_fail;
480 }
481
482 dc_ctx->created_bios = true;
e8c963d6 483 }
4562236b
HW
484
485 /* Create I2C AUX */
486 dc_ctx->i2caux = dal_i2caux_create(dc_ctx);
487
488 if (!dc_ctx->i2caux) {
489 ASSERT_CRITICAL(false);
490 goto failed_to_create_i2caux;
491 }
492
493 /* Create GPIO service */
494 dc_ctx->gpio_service = dal_gpio_service_create(
495 dc_version,
496 dc_ctx->dce_environment,
497 dc_ctx);
498
499 if (!dc_ctx->gpio_service) {
500 ASSERT_CRITICAL(false);
501 goto gpio_fail;
502 }
503
504 dc->res_pool = dc_create_resource_pool(
505 dc,
506 init_params->num_virtual_links,
507 dc_version,
508 init_params->asic_id);
509 if (!dc->res_pool)
510 goto create_resource_fail;
511
512 if (!create_links(dc, init_params->num_virtual_links))
513 goto create_links_fail;
514
515 allocate_dc_stream_funcs(dc);
516
517 return true;
518
519 /**** error handling here ****/
520create_links_fail:
521create_resource_fail:
522gpio_fail:
523failed_to_create_i2caux:
524bios_fail:
525logger_fail:
526val_ctx_fail:
527ctx_fail:
528 destruct(dc);
529 return false;
530}
531
532/*
533void ProgramPixelDurationV(unsigned int pixelClockInKHz )
534{
535 fixed31_32 pixel_duration = Fixed31_32(100000000, pixelClockInKHz) * 10;
536 unsigned int pixDurationInPico = round(pixel_duration);
537
538 DPG_PIPE_ARBITRATION_CONTROL1 arb_control;
539
540 arb_control.u32All = ReadReg (mmDPGV0_PIPE_ARBITRATION_CONTROL1);
541 arb_control.bits.PIXEL_DURATION = pixDurationInPico;
542 WriteReg (mmDPGV0_PIPE_ARBITRATION_CONTROL1, arb_control.u32All);
543
544 arb_control.u32All = ReadReg (mmDPGV1_PIPE_ARBITRATION_CONTROL1);
545 arb_control.bits.PIXEL_DURATION = pixDurationInPico;
546 WriteReg (mmDPGV1_PIPE_ARBITRATION_CONTROL1, arb_control.u32All);
547
548 WriteReg (mmDPGV0_PIPE_ARBITRATION_CONTROL2, 0x4000800);
549 WriteReg (mmDPGV0_REPEATER_PROGRAM, 0x11);
550
551 WriteReg (mmDPGV1_PIPE_ARBITRATION_CONTROL2, 0x4000800);
552 WriteReg (mmDPGV1_REPEATER_PROGRAM, 0x11);
553}
554*/
555
556/*******************************************************************************
557 * Public functions
558 ******************************************************************************/
559
560struct dc *dc_create(const struct dc_init_data *init_params)
561 {
562 struct core_dc *core_dc = dm_alloc(sizeof(*core_dc));
563 unsigned int full_pipe_count;
564
565 if (NULL == core_dc)
566 goto alloc_fail;
567
568 if (false == construct(core_dc, init_params))
569 goto construct_fail;
570
571 /*TODO: separate HW and SW initialization*/
572 core_dc->hwss.init_hw(core_dc);
573
574 full_pipe_count = core_dc->res_pool->pipe_count;
f0e3db90 575 if (core_dc->res_pool->underlay_pipe_index != NO_UNDERLAY_PIPE)
4562236b 576 full_pipe_count--;
ab2541b6 577 core_dc->public.caps.max_streams = min(
4562236b
HW
578 full_pipe_count,
579 core_dc->res_pool->stream_enc_count);
580
581 core_dc->public.caps.max_links = core_dc->link_count;
582 core_dc->public.caps.max_audios = core_dc->res_pool->audio_count;
583
584 core_dc->public.config = init_params->flags;
585
586 dm_logger_write(core_dc->ctx->logger, LOG_DC,
587 "Display Core initialized\n");
588
589
590 /* TODO: missing feature to be enabled */
591 core_dc->public.debug.disable_dfs_bypass = true;
592
593 return &core_dc->public;
594
595construct_fail:
596 dm_free(core_dc);
597
598alloc_fail:
599 return NULL;
600}
601
602void dc_destroy(struct dc **dc)
603{
604 struct core_dc *core_dc = DC_TO_CORE(*dc);
605 destruct(core_dc);
606 dm_free(core_dc);
607 *dc = NULL;
608}
609
610static bool is_validation_required(
611 const struct core_dc *dc,
612 const struct dc_validation_set set[],
613 int set_count)
614{
615 const struct validate_context *context = dc->current_context;
616 int i, j;
617
ab2541b6 618 if (context->stream_count != set_count)
4562236b
HW
619 return true;
620
621 for (i = 0; i < set_count; i++) {
622
ab2541b6 623 if (set[i].surface_count != context->stream_status[i].surface_count)
4562236b 624 return true;
ab2541b6 625 if (!is_stream_unchanged(DC_STREAM_TO_CORE(set[i].stream), context->streams[i]))
4562236b
HW
626 return true;
627
628 for (j = 0; j < set[i].surface_count; j++) {
629 struct dc_surface temp_surf = { 0 };
630
ab2541b6 631 temp_surf = *context->stream_status[i].surfaces[j];
4562236b
HW
632 temp_surf.clip_rect = set[i].surfaces[j]->clip_rect;
633 temp_surf.dst_rect.x = set[i].surfaces[j]->dst_rect.x;
634 temp_surf.dst_rect.y = set[i].surfaces[j]->dst_rect.y;
635
636 if (memcmp(&temp_surf, set[i].surfaces[j], sizeof(temp_surf)) != 0)
637 return true;
638 }
639 }
640
641 return false;
642}
643
644bool dc_validate_resources(
645 const struct dc *dc,
646 const struct dc_validation_set set[],
647 uint8_t set_count)
648{
649 struct core_dc *core_dc = DC_TO_CORE(dc);
650 enum dc_status result = DC_ERROR_UNEXPECTED;
651 struct validate_context *context;
652
653 if (!is_validation_required(core_dc, set, set_count))
654 return true;
655
656 context = dm_alloc(sizeof(struct validate_context));
657 if(context == NULL)
658 goto context_alloc_fail;
659
660 result = core_dc->res_pool->funcs->validate_with_context(
661 core_dc, set, set_count, context);
662
663 resource_validate_ctx_destruct(context);
664 dm_free(context);
665
666context_alloc_fail:
667 if (result != DC_OK) {
668 dm_logger_write(core_dc->ctx->logger, LOG_WARNING,
669 "%s:resource validation failed, dc_status:%d\n",
670 __func__,
671 result);
672 }
673
674 return (result == DC_OK);
675
676}
677
678bool dc_validate_guaranteed(
679 const struct dc *dc,
ab2541b6 680 const struct dc_stream *stream)
4562236b
HW
681{
682 struct core_dc *core_dc = DC_TO_CORE(dc);
683 enum dc_status result = DC_ERROR_UNEXPECTED;
684 struct validate_context *context;
685
686 context = dm_alloc(sizeof(struct validate_context));
687 if (context == NULL)
688 goto context_alloc_fail;
689
690 result = core_dc->res_pool->funcs->validate_guaranteed(
ab2541b6 691 core_dc, stream, context);
4562236b
HW
692
693 resource_validate_ctx_destruct(context);
694 dm_free(context);
695
696context_alloc_fail:
697 if (result != DC_OK) {
698 dm_logger_write(core_dc->ctx->logger, LOG_WARNING,
699 "%s:guaranteed validation failed, dc_status:%d\n",
700 __func__,
701 result);
702 }
703
704 return (result == DC_OK);
705}
706
707static void program_timing_sync(
708 struct core_dc *core_dc,
709 struct validate_context *ctx)
710{
711 int i, j;
712 int group_index = 0;
713 int pipe_count = ctx->res_ctx.pool->pipe_count;
714 struct pipe_ctx *unsynced_pipes[MAX_PIPES] = { NULL };
715
716 for (i = 0; i < pipe_count; i++) {
717 if (!ctx->res_ctx.pipe_ctx[i].stream || ctx->res_ctx.pipe_ctx[i].top_pipe)
718 continue;
719
720 unsynced_pipes[i] = &ctx->res_ctx.pipe_ctx[i];
721 }
722
723 for (i = 0; i < pipe_count; i++) {
724 int group_size = 1;
725 struct pipe_ctx *pipe_set[MAX_PIPES];
726
727 if (!unsynced_pipes[i])
728 continue;
729
730 pipe_set[0] = unsynced_pipes[i];
731 unsynced_pipes[i] = NULL;
732
733 /* Add tg to the set, search rest of the tg's for ones with
734 * same timing, add all tgs with same timing to the group
735 */
736 for (j = i + 1; j < pipe_count; j++) {
737 if (!unsynced_pipes[j])
738 continue;
739
740 if (resource_are_streams_timing_synchronizable(
741 unsynced_pipes[j]->stream,
742 pipe_set[0]->stream)) {
743 pipe_set[group_size] = unsynced_pipes[j];
744 unsynced_pipes[j] = NULL;
745 group_size++;
746 }
747 }
748
749 /* set first unblanked pipe as master */
750 for (j = 0; j < group_size; j++) {
751 struct pipe_ctx *temp;
752
753 if (!pipe_set[j]->tg->funcs->is_blanked(pipe_set[j]->tg)) {
754 if (j == 0)
755 break;
756
757 temp = pipe_set[0];
758 pipe_set[0] = pipe_set[j];
759 pipe_set[j] = temp;
760 break;
761 }
762 }
763
764 /* remove any other unblanked pipes as they have already been synced */
765 for (j = j + 1; j < group_size; j++) {
766 if (!pipe_set[j]->tg->funcs->is_blanked(pipe_set[j]->tg)) {
767 group_size--;
768 pipe_set[j] = pipe_set[group_size];
769 j--;
770 }
771 }
772
773 if (group_size > 1) {
774 core_dc->hwss.enable_timing_synchronization(
775 core_dc, group_index, group_size, pipe_set);
776 group_index++;
777 }
778 }
779}
780
ab2541b6 781static bool streams_changed(
4562236b 782 struct core_dc *dc,
ab2541b6
AC
783 const struct dc_stream *streams[],
784 uint8_t stream_count)
4562236b
HW
785{
786 uint8_t i;
787
ab2541b6 788 if (stream_count != dc->current_context->stream_count)
4562236b
HW
789 return true;
790
ab2541b6
AC
791 for (i = 0; i < dc->current_context->stream_count; i++) {
792 if (&dc->current_context->streams[i]->public != streams[i])
4562236b
HW
793 return true;
794 }
795
796 return false;
797}
798
ab2541b6 799bool dc_commit_streams(
4562236b 800 struct dc *dc,
ab2541b6
AC
801 const struct dc_stream *streams[],
802 uint8_t stream_count)
4562236b
HW
803{
804 struct core_dc *core_dc = DC_TO_CORE(dc);
805 struct dc_bios *dcb = core_dc->ctx->dc_bios;
806 enum dc_status result = DC_ERROR_UNEXPECTED;
807 struct validate_context *context;
e72f0acd 808 struct dc_validation_set set[MAX_STREAMS] = { {0, {0} } };
f196f080 809 int i, j;
4562236b 810
ab2541b6 811 if (false == streams_changed(core_dc, streams, stream_count))
4562236b
HW
812 return DC_OK;
813
ab2541b6
AC
814 dm_logger_write(core_dc->ctx->logger, LOG_DC, "%s: %d streams\n",
815 __func__, stream_count);
4562236b 816
ab2541b6
AC
817 for (i = 0; i < stream_count; i++) {
818 const struct dc_stream *stream = streams[i];
f84a8161
TC
819 const struct dc_stream_status *status = dc_stream_get_status(stream);
820 int j;
4562236b 821
ab2541b6 822 dc_stream_log(stream,
4562236b
HW
823 core_dc->ctx->logger,
824 LOG_DC);
825
ab2541b6 826 set[i].stream = stream;
f84a8161
TC
827
828 if (status) {
829 set[i].surface_count = status->surface_count;
830 for (j = 0; j < status->surface_count; j++)
831 set[i].surfaces[j] = status->surfaces[j];
832 }
4562236b
HW
833
834 }
835
836 context = dm_alloc(sizeof(struct validate_context));
837 if (context == NULL)
838 goto context_alloc_fail;
839
ab2541b6 840 result = core_dc->res_pool->funcs->validate_with_context(core_dc, set, stream_count, context);
4562236b
HW
841 if (result != DC_OK){
842 dm_logger_write(core_dc->ctx->logger, LOG_ERROR,
843 "%s: Context validation failed! dc_status:%d\n",
844 __func__,
845 result);
846 BREAK_TO_DEBUGGER();
847 resource_validate_ctx_destruct(context);
848 goto fail;
849 }
850
851 if (!dcb->funcs->is_accelerated_mode(dcb)) {
852 core_dc->hwss.enable_accelerated_mode(core_dc);
853 }
854
855 if (result == DC_OK) {
856 result = core_dc->hwss.apply_ctx_to_hw(core_dc, context);
857 }
858
859 program_timing_sync(core_dc, context);
860
ab2541b6
AC
861 for (i = 0; i < context->stream_count; i++) {
862 const struct core_sink *sink = context->streams[i]->sink;
4562236b 863
ab2541b6 864 for (j = 0; j < context->stream_status[i].surface_count; j++) {
f196f080
YS
865 struct core_surface *surface =
866 DC_SURFACE_TO_CORE(context->stream_status[i].surfaces[j]);
4562236b 867
f196f080 868 core_dc->hwss.apply_ctx_for_surface(core_dc, surface, context);
4562236b
HW
869 }
870
871 CONN_MSG_MODE(sink->link, "{%dx%d, %dx%d@%dKhz}",
ab2541b6
AC
872 context->streams[i]->public.timing.h_addressable,
873 context->streams[i]->public.timing.v_addressable,
874 context->streams[i]->public.timing.h_total,
875 context->streams[i]->public.timing.v_total,
876 context->streams[i]->public.timing.pix_clk_khz);
4562236b
HW
877 }
878
4562236b
HW
879 resource_validate_ctx_destruct(core_dc->current_context);
880
ead964f2 881 if (core_dc->temp_flip_context != core_dc->current_context) {
882 dm_free(core_dc->temp_flip_context);
883 core_dc->temp_flip_context = core_dc->current_context;
884 }
4562236b 885 core_dc->current_context = context;
ead964f2 886 memset(core_dc->temp_flip_context, 0, sizeof(*core_dc->temp_flip_context));
4562236b
HW
887
888 return (result == DC_OK);
889
890fail:
891 dm_free(context);
892
893context_alloc_fail:
894 return (result == DC_OK);
895}
896
ab2541b6 897bool dc_pre_update_surfaces_to_stream(
4562236b
HW
898 struct dc *dc,
899 const struct dc_surface *const *new_surfaces,
900 uint8_t new_surface_count,
ab2541b6 901 const struct dc_stream *dc_stream)
4562236b
HW
902{
903 int i, j;
904 struct core_dc *core_dc = DC_TO_CORE(dc);
ab2541b6 905 struct dc_stream_status *stream_status = NULL;
4562236b 906 struct validate_context *context;
4562236b
HW
907 bool ret = true;
908
909 pre_surface_trace(dc, new_surfaces, new_surface_count);
910
ab2541b6 911 if (core_dc->current_context->stream_count == 0)
4562236b
HW
912 return false;
913
ab2541b6
AC
914 /* Cannot commit surface to a stream that is not commited */
915 for (i = 0; i < core_dc->current_context->stream_count; i++)
916 if (dc_stream == &core_dc->current_context->streams[i]->public)
4562236b
HW
917 break;
918
ab2541b6 919 if (i == core_dc->current_context->stream_count)
4562236b
HW
920 return false;
921
ab2541b6 922 stream_status = &core_dc->current_context->stream_status[i];
4562236b 923
ab2541b6 924 if (new_surface_count == stream_status->surface_count) {
4562236b
HW
925 bool skip_pre = true;
926
ab2541b6 927 for (i = 0; i < stream_status->surface_count; i++) {
4562236b
HW
928 struct dc_surface temp_surf = { 0 };
929
ab2541b6 930 temp_surf = *stream_status->surfaces[i];
4562236b
HW
931 temp_surf.clip_rect = new_surfaces[i]->clip_rect;
932 temp_surf.dst_rect.x = new_surfaces[i]->dst_rect.x;
933 temp_surf.dst_rect.y = new_surfaces[i]->dst_rect.y;
934
935 if (memcmp(&temp_surf, new_surfaces[i], sizeof(temp_surf)) != 0) {
936 skip_pre = false;
937 break;
938 }
939 }
940
941 if (skip_pre)
942 return true;
943 }
944
945 context = dm_alloc(sizeof(struct validate_context));
946
947 if (!context) {
948 dm_error("%s: failed to create validate ctx\n", __func__);
949 ret = false;
950 goto val_ctx_fail;
951 }
952
953 resource_validate_ctx_copy_construct(core_dc->current_context, context);
954
955 dm_logger_write(core_dc->ctx->logger, LOG_DC,
ab2541b6 956 "%s: commit %d surfaces to stream 0x%x\n",
4562236b
HW
957 __func__,
958 new_surface_count,
ab2541b6 959 dc_stream);
4562236b
HW
960
961 if (!resource_attach_surfaces_to_context(
ab2541b6 962 new_surfaces, new_surface_count, dc_stream, context)) {
4562236b
HW
963 BREAK_TO_DEBUGGER();
964 ret = false;
965 goto unexpected_fail;
966 }
967
968 for (i = 0; i < new_surface_count; i++)
969 for (j = 0; j < context->res_ctx.pool->pipe_count; j++) {
970 if (context->res_ctx.pipe_ctx[j].surface !=
971 DC_SURFACE_TO_CORE(new_surfaces[i]))
972 continue;
973
b2d0a103 974 resource_build_scaling_params(&context->res_ctx.pipe_ctx[j]);
4562236b
HW
975 }
976
45209ef7
DL
977 if (!core_dc->res_pool->funcs->validate_bandwidth(core_dc, context)) {
978 BREAK_TO_DEBUGGER();
979 ret = false;
980 goto unexpected_fail;
981 }
4562236b 982
cf437593 983 core_dc->hwss.set_bandwidth(core_dc, context, false);
4562236b
HW
984
985 for (i = 0; i < new_surface_count; i++)
986 for (j = 0; j < context->res_ctx.pool->pipe_count; j++) {
987 if (context->res_ctx.pipe_ctx[j].surface !=
988 DC_SURFACE_TO_CORE(new_surfaces[i]))
989 continue;
990
991 core_dc->hwss.prepare_pipe_for_context(
992 core_dc,
993 &context->res_ctx.pipe_ctx[j],
994 context);
4562236b
HW
995 }
996
997unexpected_fail:
998 resource_validate_ctx_destruct(context);
999 dm_free(context);
1000val_ctx_fail:
1001
1002 return ret;
1003}
1004
ab2541b6 1005bool dc_post_update_surfaces_to_stream(struct dc *dc)
4562236b 1006{
4562236b 1007 int i;
45209ef7
DL
1008 struct core_dc *core_dc = DC_TO_CORE(dc);
1009 struct validate_context *context = dm_alloc(sizeof(struct validate_context));
1010
1011 if (!context) {
1012 dm_error("%s: failed to create validate ctx\n", __func__);
1013 return false;
1014 }
1015 resource_validate_ctx_copy_construct(core_dc->current_context, context);
4562236b
HW
1016
1017 post_surface_trace(dc);
1018
45209ef7
DL
1019 for (i = 0; i < context->res_ctx.pool->pipe_count; i++)
1020 if (context->res_ctx.pipe_ctx[i].stream == NULL) {
1021 context->res_ctx.pipe_ctx[i].pipe_idx = i;
4562236b 1022 core_dc->hwss.power_down_front_end(
45209ef7 1023 core_dc, &context->res_ctx.pipe_ctx[i]);
bb9042da 1024 }
45209ef7
DL
1025 if (!core_dc->res_pool->funcs->validate_bandwidth(core_dc, context)) {
1026 BREAK_TO_DEBUGGER();
1027 return false;
1028 }
4562236b 1029
cf437593 1030 core_dc->hwss.set_bandwidth(core_dc, context, true);
45209ef7
DL
1031
1032 resource_validate_ctx_destruct(core_dc->current_context);
68339af3
LE
1033 if (core_dc->current_context)
1034 dm_free(core_dc->current_context);
1035
45209ef7 1036 core_dc->current_context = context;
4562236b
HW
1037
1038 return true;
1039}
1040
ab2541b6 1041bool dc_commit_surfaces_to_stream(
4562236b
HW
1042 struct dc *dc,
1043 const struct dc_surface **new_surfaces,
1044 uint8_t new_surface_count,
ab2541b6 1045 const struct dc_stream *dc_stream)
4562236b 1046{
ab2541b6
AC
1047 struct dc_surface_update updates[MAX_SURFACES];
1048 struct dc_flip_addrs flip_addr[MAX_SURFACES];
1049 struct dc_plane_info plane_info[MAX_SURFACES];
1050 struct dc_scaling_info scaling_info[MAX_SURFACES];
4562236b
HW
1051 int i;
1052
ab2541b6
AC
1053 if (!dc_pre_update_surfaces_to_stream(
1054 dc, new_surfaces, new_surface_count, dc_stream))
4562236b
HW
1055 return false;
1056
ab2541b6
AC
1057 memset(updates, 0, sizeof(updates));
1058 memset(flip_addr, 0, sizeof(flip_addr));
1059 memset(plane_info, 0, sizeof(plane_info));
1060 memset(scaling_info, 0, sizeof(scaling_info));
1061
4562236b
HW
1062 for (i = 0; i < new_surface_count; i++) {
1063 updates[i].surface = new_surfaces[i];
89e89630
AZ
1064 updates[i].gamma =
1065 (struct dc_gamma *)new_surfaces[i]->gamma_correction;
4562236b
HW
1066 flip_addr[i].address = new_surfaces[i]->address;
1067 flip_addr[i].flip_immediate = new_surfaces[i]->flip_immediate;
1068 plane_info[i].color_space = new_surfaces[i]->color_space;
1069 plane_info[i].format = new_surfaces[i]->format;
1070 plane_info[i].plane_size = new_surfaces[i]->plane_size;
1071 plane_info[i].rotation = new_surfaces[i]->rotation;
1072 plane_info[i].horizontal_mirror = new_surfaces[i]->horizontal_mirror;
1073 plane_info[i].stereo_format = new_surfaces[i]->stereo_format;
1074 plane_info[i].tiling_info = new_surfaces[i]->tiling_info;
1075 plane_info[i].visible = new_surfaces[i]->visible;
5c1879b6 1076 plane_info[i].dcc = new_surfaces[i]->dcc;
4562236b
HW
1077 scaling_info[i].scaling_quality = new_surfaces[i]->scaling_quality;
1078 scaling_info[i].src_rect = new_surfaces[i]->src_rect;
1079 scaling_info[i].dst_rect = new_surfaces[i]->dst_rect;
1080 scaling_info[i].clip_rect = new_surfaces[i]->clip_rect;
1081
1082 updates[i].flip_addr = &flip_addr[i];
1083 updates[i].plane_info = &plane_info[i];
1084 updates[i].scaling_info = &scaling_info[i];
1085 }
ab2541b6 1086 dc_update_surfaces_for_stream(dc, updates, new_surface_count, dc_stream);
4562236b 1087
ab2541b6 1088 return dc_post_update_surfaces_to_stream(dc);
4562236b
HW
1089}
1090
e72f0acd
TC
1091static bool is_surface_in_context(
1092 const struct validate_context *context,
1093 const struct dc_surface *surface)
4562236b 1094{
e72f0acd 1095 int j;
4562236b 1096
e72f0acd
TC
1097 for (j = 0; j < context->res_ctx.pool->pipe_count; j++) {
1098 const struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
4562236b 1099
e72f0acd
TC
1100 if (surface == &pipe_ctx->surface->public) {
1101 return true;
1102 }
1103 }
4562236b 1104
e72f0acd
TC
1105 return false;
1106}
4562236b 1107
5869b0f6
LE
1108static unsigned int pixel_format_to_bpp(enum surface_pixel_format format)
1109{
1110 switch (format) {
1111 case SURFACE_PIXEL_FORMAT_GRPH_ARGB1555:
1112 case SURFACE_PIXEL_FORMAT_GRPH_RGB565:
1113 return 16;
1114 case SURFACE_PIXEL_FORMAT_GRPH_ARGB8888:
1115 case SURFACE_PIXEL_FORMAT_GRPH_ABGR8888:
1116 case SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010:
1117 case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010:
1118 return 32;
1119 case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
1120 case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F:
1121 case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F:
1122 return 64;
1123 default:
1124 ASSERT_CRITICAL(false);
1125 return -1;
1126 }
1127}
1128
1129static enum surface_update_type get_plane_info_update_type(
1130 const struct dc_surface_update *u)
1131{
1132 struct dc_plane_info temp_plane_info = { { { { 0 } } } };
1133
1134 if (!u->plane_info)
1135 return UPDATE_TYPE_FAST;
1136
1137 /* Copy all parameters that will cause a full update
1138 * from current surface, the rest of the parameters
1139 * from provided plane configuration.
1140 * Perform memory compare and special validation
1141 * for those that can cause fast/medium updates
1142 */
1143
1144 /* Full update parameters */
1145 temp_plane_info.color_space = u->surface->color_space;
1146 temp_plane_info.dcc = u->surface->dcc;
1147 temp_plane_info.horizontal_mirror = u->surface->horizontal_mirror;
1148 temp_plane_info.plane_size = u->surface->plane_size;
1149 temp_plane_info.rotation = u->surface->rotation;
1150 temp_plane_info.stereo_format = u->surface->stereo_format;
1151 temp_plane_info.tiling_info = u->surface->tiling_info;
1152 temp_plane_info.visible = u->surface->visible;
1153
1154 /* Special Validation parameters */
1155 temp_plane_info.format = u->plane_info->format;
1156
1157 if (memcmp(u->plane_info, &temp_plane_info,
1158 sizeof(struct dc_plane_info)) != 0)
1159 return UPDATE_TYPE_FULL;
1160
1161 if (pixel_format_to_bpp(u->plane_info->format) !=
1162 pixel_format_to_bpp(u->surface->format)) {
1163 return UPDATE_TYPE_FULL;
1164 } else {
1165 return UPDATE_TYPE_MED;
1166 }
1167}
1168
1169static enum surface_update_type get_scaling_info_update_type(
1170 const struct dc_surface_update *u)
1171{
1172 struct dc_scaling_info temp_scaling_info = { { 0 } };
1173
1174 if (!u->scaling_info)
1175 return UPDATE_TYPE_FAST;
1176
1177 /* Copy all parameters that will cause a full update
1178 * from current surface, the rest of the parameters
1179 * from provided plane configuration.
1180 * Perform memory compare and special validation
1181 * for those that can cause fast/medium updates
1182 */
1183
1184 /* Full Update Parameters */
1185 temp_scaling_info.dst_rect = u->surface->dst_rect;
1186 temp_scaling_info.src_rect = u->surface->src_rect;
1187 temp_scaling_info.scaling_quality = u->surface->scaling_quality;
1188
1189 /* Special validation required */
1190 temp_scaling_info.clip_rect = u->scaling_info->clip_rect;
1191
1192 if (memcmp(u->scaling_info, &temp_scaling_info,
1193 sizeof(struct dc_scaling_info)) != 0)
1194 return UPDATE_TYPE_FULL;
1195
1196 /* Check Clip rectangles if not equal
1197 * difference is in offsets == > UPDATE_TYPE_FAST
1198 * difference is in dimensions == > UPDATE_TYPE_FULL
1199 */
1200 if (memcmp(&u->scaling_info->clip_rect,
1201 &u->surface->clip_rect, sizeof(struct rect)) != 0) {
1202 if ((u->scaling_info->clip_rect.height ==
1203 u->surface->clip_rect.height) &&
1204 (u->scaling_info->clip_rect.width ==
1205 u->surface->clip_rect.width)) {
1206 return UPDATE_TYPE_FAST;
1207 } else {
1208 return UPDATE_TYPE_FULL;
1209 }
1210 }
1211
1212 return UPDATE_TYPE_FAST;
1213}
4562236b 1214
e72f0acd
TC
1215static enum surface_update_type det_surface_update(
1216 const struct core_dc *dc,
1217 const struct dc_surface_update *u)
1218{
1219 const struct validate_context *context = dc->current_context;
5869b0f6
LE
1220 enum surface_update_type type = UPDATE_TYPE_FAST;
1221 enum surface_update_type overall_type = UPDATE_TYPE_FAST;
4562236b 1222
e72f0acd
TC
1223 if (!is_surface_in_context(context, u->surface))
1224 return UPDATE_TYPE_FULL;
4562236b 1225
5869b0f6
LE
1226 type = get_plane_info_update_type(u);
1227 if (overall_type < type)
1228 overall_type = type;
1229
1230 type = get_scaling_info_update_type(u);
1231 if (overall_type < type)
1232 overall_type = type;
1233
e72f0acd
TC
1234 if (u->in_transfer_func ||
1235 u->out_transfer_func ||
5869b0f6
LE
1236 u->hdr_static_metadata) {
1237 if (overall_type < UPDATE_TYPE_MED)
1238 overall_type = UPDATE_TYPE_MED;
1239 }
1c4e6bce 1240
5869b0f6 1241 return overall_type;
e72f0acd 1242}
4562236b 1243
5869b0f6
LE
1244enum surface_update_type dc_check_update_surfaces_for_stream(
1245 struct dc *dc,
e72f0acd
TC
1246 struct dc_surface_update *updates,
1247 int surface_count,
1248 const struct dc_stream_status *stream_status)
1249{
5869b0f6 1250 struct core_dc *core_dc = DC_TO_CORE(dc);
e72f0acd
TC
1251 int i;
1252 enum surface_update_type overall_type = UPDATE_TYPE_FAST;
1253
1254 if (stream_status->surface_count != surface_count)
1255 return UPDATE_TYPE_FULL;
1256
1257 for (i = 0 ; i < surface_count; i++) {
1258 enum surface_update_type type =
5869b0f6 1259 det_surface_update(core_dc, &updates[i]);
e72f0acd
TC
1260
1261 if (type == UPDATE_TYPE_FULL)
1262 return type;
1c4e6bce 1263
e72f0acd
TC
1264 if (overall_type < type)
1265 overall_type = type;
4562236b
HW
1266 }
1267
e72f0acd
TC
1268 return overall_type;
1269}
4562236b 1270
e72f0acd 1271enum surface_update_type update_surface_trace_level = UPDATE_TYPE_FULL;
4562236b 1272
e72f0acd
TC
1273void dc_update_surfaces_for_stream(struct dc *dc,
1274 struct dc_surface_update *updates, int surface_count,
1275 const struct dc_stream *dc_stream)
1276{
1277 struct core_dc *core_dc = DC_TO_CORE(dc);
1278 struct validate_context *context;
1279 int i, j;
1280
1281 enum surface_update_type update_type;
1282 const struct dc_stream_status *stream_status;
1283
1284 stream_status = dc_stream_get_status(dc_stream);
1285 ASSERT(stream_status);
1286 if (!stream_status)
1287 return; /* Cannot commit surface to stream that is not committed */
1288
5869b0f6
LE
1289 update_type = dc_check_update_surfaces_for_stream(
1290 dc, updates, surface_count, stream_status);
4562236b 1291
e72f0acd
TC
1292 if (update_type >= update_surface_trace_level)
1293 update_surface_trace(dc, updates, surface_count);
1294
1295 if (update_type >= UPDATE_TYPE_FULL) {
1296 const struct dc_surface *new_surfaces[MAX_SURFACES] = { 0 };
1297
1298 for (i = 0; i < surface_count; i++)
1299 new_surfaces[i] = updates[i].surface;
1300
1301 /* initialize scratch memory for building context */
1302 context = core_dc->temp_flip_context;
1303 resource_validate_ctx_copy_construct(
1304 core_dc->current_context, context);
1305
1306 /* add surface to context */
4562236b 1307 if (!resource_attach_surfaces_to_context(
ab2541b6 1308 new_surfaces, surface_count, dc_stream, context)) {
4562236b
HW
1309 BREAK_TO_DEBUGGER();
1310 return;
1311 }
e72f0acd
TC
1312 } else {
1313 context = core_dc->current_context;
4562236b 1314 }
4562236b 1315 for (i = 0; i < surface_count; i++) {
e72f0acd 1316 /* save update param into surface */
4562236b 1317 struct core_surface *surface = DC_SURFACE_TO_CORE(updates[i].surface);
e72f0acd 1318 struct core_stream *stream = DC_STREAM_TO_CORE(dc_stream);
4562236b 1319
e72f0acd
TC
1320 if (updates[i].flip_addr) {
1321 surface->public.address = updates[i].flip_addr->address;
1322 surface->public.flip_immediate =
1323 updates[i].flip_addr->flip_immediate;
1324 }
1325
1326 if (updates[i].scaling_info) {
1327 surface->public.scaling_quality =
1328 updates[i].scaling_info->scaling_quality;
1329 surface->public.dst_rect =
1330 updates[i].scaling_info->dst_rect;
1331 surface->public.src_rect =
1332 updates[i].scaling_info->src_rect;
1333 surface->public.clip_rect =
1334 updates[i].scaling_info->clip_rect;
1335 }
1336
1337 if (updates[i].plane_info) {
1338 surface->public.color_space =
1339 updates[i].plane_info->color_space;
1340 surface->public.format =
1341 updates[i].plane_info->format;
1342 surface->public.plane_size =
1343 updates[i].plane_info->plane_size;
1344 surface->public.rotation =
1345 updates[i].plane_info->rotation;
1346 surface->public.horizontal_mirror =
1347 updates[i].plane_info->horizontal_mirror;
1348 surface->public.stereo_format =
1349 updates[i].plane_info->stereo_format;
1350 surface->public.tiling_info =
1351 updates[i].plane_info->tiling_info;
1352 surface->public.visible =
1353 updates[i].plane_info->visible;
1354 surface->public.dcc =
1355 updates[i].plane_info->dcc;
1356 }
1357
1358 /* not sure if we still need this */
ed151940
YS
1359 if (update_type == UPDATE_TYPE_FULL) {
1360 for (j = 0; j < context->res_ctx.pool->pipe_count; j++) {
1361 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
4562236b 1362
ed151940
YS
1363 if (pipe_ctx->surface != surface)
1364 continue;
4562236b 1365
b2d0a103 1366 resource_build_scaling_params(pipe_ctx);
4562236b 1367 }
e72f0acd 1368 }
89e89630 1369
e72f0acd
TC
1370 if (updates[i].gamma &&
1371 updates[i].gamma != surface->public.gamma_correction) {
1372 if (surface->public.gamma_correction != NULL)
1373 dc_gamma_release(&surface->public.
1374 gamma_correction);
89e89630 1375
e72f0acd
TC
1376 dc_gamma_retain(updates[i].gamma);
1377 surface->public.gamma_correction =
1378 updates[i].gamma;
1379 }
fb735a9f 1380
e72f0acd
TC
1381 if (updates[i].in_transfer_func &&
1382 updates[i].in_transfer_func != surface->public.in_transfer_func) {
1383 if (surface->public.in_transfer_func != NULL)
1384 dc_transfer_func_release(
1385 surface->public.
1386 in_transfer_func);
1387
1388 dc_transfer_func_retain(
1389 updates[i].in_transfer_func);
1390 surface->public.in_transfer_func =
1391 updates[i].in_transfer_func;
1392 }
fb735a9f 1393
e72f0acd
TC
1394 if (updates[i].out_transfer_func &&
1395 updates[i].out_transfer_func != dc_stream->out_transfer_func) {
1396 if (dc_stream->out_transfer_func != NULL)
1397 dc_transfer_func_release(dc_stream->out_transfer_func);
1398 dc_transfer_func_retain(updates[i].out_transfer_func);
1399 stream->public.out_transfer_func = updates[i].out_transfer_func;
4562236b 1400 }
e72f0acd
TC
1401 if (updates[i].hdr_static_metadata)
1402 surface->public.hdr_static_ctx =
1403 *(updates[i].hdr_static_metadata);
4562236b
HW
1404 }
1405
45209ef7
DL
1406 if (update_type == UPDATE_TYPE_FULL &&
1407 !core_dc->res_pool->funcs->validate_bandwidth(core_dc, context)) {
1408 BREAK_TO_DEBUGGER();
1409 return;
1410 }
e72f0acd
TC
1411
1412 if (!surface_count) /* reset */
1413 core_dc->hwss.apply_ctx_for_surface(core_dc, NULL, context);
1414
4562236b
HW
1415 for (i = 0; i < surface_count; i++) {
1416 struct core_surface *surface = DC_SURFACE_TO_CORE(updates[i].surface);
4562236b 1417
f0828115
CL
1418 for (j = 0; j < context->res_ctx.pool->pipe_count; j++) {
1419 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
92a65e32
CL
1420 struct pipe_ctx *cur_pipe_ctx;
1421 bool is_new_pipe_surface = true;
1422
f0828115
CL
1423 if (pipe_ctx->surface != surface)
1424 continue;
d98e5cc2 1425
5869b0f6 1426 if (update_type >= UPDATE_TYPE_MED) {
d98e5cc2
TC
1427 /* only apply for top pipe */
1428 if (!pipe_ctx->top_pipe) {
1429 core_dc->hwss.apply_ctx_for_surface(core_dc,
1430 surface, context);
1431 context_timing_trace(dc, &context->res_ctx);
1432 }
f0828115 1433 }
4562236b 1434
d98e5cc2 1435 if (!pipe_ctx->tg->funcs->is_blanked(pipe_ctx->tg)) {
f0828115
CL
1436 core_dc->hwss.pipe_control_lock(
1437 core_dc,
1438 pipe_ctx,
e72f0acd
TC
1439 true);
1440 }
4562236b 1441
17c06cab 1442 if (updates[i].flip_addr)
e72f0acd 1443 core_dc->hwss.update_plane_addr(core_dc, pipe_ctx);
4562236b 1444
e72f0acd
TC
1445 if (update_type == UPDATE_TYPE_FAST)
1446 continue;
1447
1448 cur_pipe_ctx = &core_dc->current_context->res_ctx.pipe_ctx[j];
1449 if (cur_pipe_ctx->surface == pipe_ctx->surface)
1450 is_new_pipe_surface = false;
1451
e72f0acd 1452 if (is_new_pipe_surface ||
90e508ba
AK
1453 updates[i].in_transfer_func)
1454 core_dc->hwss.set_input_transfer_func(
1455 pipe_ctx, pipe_ctx->surface);
1456
e72f0acd 1457 if (is_new_pipe_surface ||
fb735a9f 1458 updates[i].out_transfer_func)
90e508ba
AK
1459 core_dc->hwss.set_output_transfer_func(
1460 pipe_ctx,
1461 pipe_ctx->surface,
1462 pipe_ctx->stream);
1463
fcd2f4bf
AZ
1464 if (updates[i].hdr_static_metadata) {
1465 resource_build_info_frame(pipe_ctx);
1466 core_dc->hwss.update_info_frame(pipe_ctx);
1467 }
9474980a 1468 }
4562236b
HW
1469 }
1470
1471 for (i = context->res_ctx.pool->pipe_count - 1; i >= 0; i--) {
1472 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1473
1474 for (j = 0; j < surface_count; j++) {
1475 if (updates[j].surface == &pipe_ctx->surface->public) {
1476 if (!pipe_ctx->tg->funcs->is_blanked(pipe_ctx->tg)) {
1477 core_dc->hwss.pipe_control_lock(
f0828115
CL
1478 core_dc,
1479 pipe_ctx,
4562236b
HW
1480 false);
1481 }
1482 break;
1483 }
1484 }
1485 }
1486
e72f0acd
TC
1487 if (core_dc->current_context != context) {
1488 resource_validate_ctx_destruct(core_dc->current_context);
1489 core_dc->temp_flip_context = core_dc->current_context;
1490
1491 core_dc->current_context = context;
1492 }
4562236b
HW
1493}
1494
ab2541b6 1495uint8_t dc_get_current_stream_count(const struct dc *dc)
4562236b
HW
1496{
1497 struct core_dc *core_dc = DC_TO_CORE(dc);
ab2541b6 1498 return core_dc->current_context->stream_count;
4562236b
HW
1499}
1500
ab2541b6 1501struct dc_stream *dc_get_stream_at_index(const struct dc *dc, uint8_t i)
4562236b
HW
1502{
1503 struct core_dc *core_dc = DC_TO_CORE(dc);
ab2541b6
AC
1504 if (i < core_dc->current_context->stream_count)
1505 return &(core_dc->current_context->streams[i]->public);
4562236b
HW
1506 return NULL;
1507}
1508
1509const struct dc_link *dc_get_link_at_index(const struct dc *dc, uint32_t link_index)
1510{
1511 struct core_dc *core_dc = DC_TO_CORE(dc);
1512 return &core_dc->links[link_index]->public;
1513}
1514
1515const struct graphics_object_id dc_get_link_id_at_index(
1516 struct dc *dc, uint32_t link_index)
1517{
1518 struct core_dc *core_dc = DC_TO_CORE(dc);
1519 return core_dc->links[link_index]->link_id;
1520}
1521
1522const struct ddc_service *dc_get_ddc_at_index(
1523 struct dc *dc, uint32_t link_index)
1524{
1525 struct core_dc *core_dc = DC_TO_CORE(dc);
1526 return core_dc->links[link_index]->ddc;
1527}
1528
1529enum dc_irq_source dc_get_hpd_irq_source_at_index(
1530 struct dc *dc, uint32_t link_index)
1531{
1532 struct core_dc *core_dc = DC_TO_CORE(dc);
1533 return core_dc->links[link_index]->public.irq_source_hpd;
1534}
1535
1536const struct audio **dc_get_audios(struct dc *dc)
1537{
1538 struct core_dc *core_dc = DC_TO_CORE(dc);
1539 return (const struct audio **)core_dc->res_pool->audios;
1540}
1541
1542void dc_flip_surface_addrs(
1543 struct dc *dc,
1544 const struct dc_surface *const surfaces[],
1545 struct dc_flip_addrs flip_addrs[],
1546 uint32_t count)
1547{
1548 struct core_dc *core_dc = DC_TO_CORE(dc);
1549 int i, j;
1550
1551 for (i = 0; i < count; i++) {
1552 struct core_surface *surface = DC_SURFACE_TO_CORE(surfaces[i]);
1553
1554 surface->public.address = flip_addrs[i].address;
1555 surface->public.flip_immediate = flip_addrs[i].flip_immediate;
1556
1557 for (j = 0; j < core_dc->res_pool->pipe_count; j++) {
1558 struct pipe_ctx *pipe_ctx = &core_dc->current_context->res_ctx.pipe_ctx[j];
1559
1560 if (pipe_ctx->surface != surface)
1561 continue;
1562
1563 core_dc->hwss.update_plane_addr(core_dc, pipe_ctx);
1564 }
1565 }
1566}
1567
1568enum dc_irq_source dc_interrupt_to_irq_source(
1569 struct dc *dc,
1570 uint32_t src_id,
1571 uint32_t ext_id)
1572{
1573 struct core_dc *core_dc = DC_TO_CORE(dc);
1574 return dal_irq_service_to_irq_source(core_dc->res_pool->irqs, src_id, ext_id);
1575}
1576
1577void dc_interrupt_set(const struct dc *dc, enum dc_irq_source src, bool enable)
1578{
1579 struct core_dc *core_dc = DC_TO_CORE(dc);
1580 dal_irq_service_set(core_dc->res_pool->irqs, src, enable);
1581}
1582
1583void dc_interrupt_ack(struct dc *dc, enum dc_irq_source src)
1584{
1585 struct core_dc *core_dc = DC_TO_CORE(dc);
1586 dal_irq_service_ack(core_dc->res_pool->irqs, src);
1587}
1588
1589void dc_set_power_state(
1590 struct dc *dc,
a3621485 1591 enum dc_acpi_cm_power_state power_state)
4562236b
HW
1592{
1593 struct core_dc *core_dc = DC_TO_CORE(dc);
1594
4562236b
HW
1595 switch (power_state) {
1596 case DC_ACPI_CM_POWER_STATE_D0:
1597 core_dc->hwss.init_hw(core_dc);
1598 break;
1599 default:
4562236b
HW
1600
1601 core_dc->hwss.power_down(core_dc);
1602
1603 /* Zero out the current context so that on resume we start with
1604 * clean state, and dc hw programming optimizations will not
1605 * cause any trouble.
1606 */
1607 memset(core_dc->current_context, 0,
1608 sizeof(*core_dc->current_context));
1609
1610 core_dc->current_context->res_ctx.pool = core_dc->res_pool;
1611
1612 break;
1613 }
1614
1615}
1616
1617void dc_resume(const struct dc *dc)
1618{
1619 struct core_dc *core_dc = DC_TO_CORE(dc);
1620
1621 uint32_t i;
1622
1623 for (i = 0; i < core_dc->link_count; i++)
1624 core_link_resume(core_dc->links[i]);
1625}
1626
1627bool dc_read_dpcd(
1628 struct dc *dc,
1629 uint32_t link_index,
1630 uint32_t address,
1631 uint8_t *data,
1632 uint32_t size)
1633{
1634 struct core_dc *core_dc = DC_TO_CORE(dc);
1635
1636 struct core_link *link = core_dc->links[link_index];
1637 enum ddc_result r = dal_ddc_service_read_dpcd_data(
1638 link->ddc,
1639 address,
1640 data,
1641 size);
1642 return r == DDC_RESULT_SUCESSFULL;
1643}
1644
2b230ea3
ZF
1645bool dc_query_ddc_data(
1646 struct dc *dc,
1647 uint32_t link_index,
1648 uint32_t address,
1649 uint8_t *write_buf,
1650 uint32_t write_size,
1651 uint8_t *read_buf,
1652 uint32_t read_size) {
1653
1654 struct core_dc *core_dc = DC_TO_CORE(dc);
1655
1656 struct core_link *link = core_dc->links[link_index];
1657
1658 bool result = dal_ddc_service_query_ddc_data(
1659 link->ddc,
1660 address,
1661 write_buf,
1662 write_size,
1663 read_buf,
1664 read_size);
1665
1666 return result;
1667}
1668
1669
4562236b
HW
1670bool dc_write_dpcd(
1671 struct dc *dc,
1672 uint32_t link_index,
1673 uint32_t address,
1674 const uint8_t *data,
1675 uint32_t size)
1676{
1677 struct core_dc *core_dc = DC_TO_CORE(dc);
1678
1679 struct core_link *link = core_dc->links[link_index];
1680
1681 enum ddc_result r = dal_ddc_service_write_dpcd_data(
1682 link->ddc,
1683 address,
1684 data,
1685 size);
1686 return r == DDC_RESULT_SUCESSFULL;
1687}
1688
1689bool dc_submit_i2c(
1690 struct dc *dc,
1691 uint32_t link_index,
1692 struct i2c_command *cmd)
1693{
1694 struct core_dc *core_dc = DC_TO_CORE(dc);
1695
1696 struct core_link *link = core_dc->links[link_index];
1697 struct ddc_service *ddc = link->ddc;
1698
1699 return dal_i2caux_submit_i2c_command(
1700 ddc->ctx->i2caux,
1701 ddc->ddc_pin,
1702 cmd);
1703}
1704
1705static bool link_add_remote_sink_helper(struct core_link *core_link, struct dc_sink *sink)
1706{
1707 struct dc_link *dc_link = &core_link->public;
1708
1709 if (dc_link->sink_count >= MAX_SINKS_PER_LINK) {
1710 BREAK_TO_DEBUGGER();
1711 return false;
1712 }
1713
1714 dc_sink_retain(sink);
1715
1716 dc_link->remote_sinks[dc_link->sink_count] = sink;
1717 dc_link->sink_count++;
1718
1719 return true;
1720}
1721
1722struct dc_sink *dc_link_add_remote_sink(
1723 const struct dc_link *link,
1724 const uint8_t *edid,
1725 int len,
1726 struct dc_sink_init_data *init_data)
1727{
1728 struct dc_sink *dc_sink;
1729 enum dc_edid_status edid_status;
1730 struct core_link *core_link = DC_LINK_TO_LINK(link);
1731
1732 if (len > MAX_EDID_BUFFER_SIZE) {
1733 dm_error("Max EDID buffer size breached!\n");
1734 return NULL;
1735 }
1736
1737 if (!init_data) {
1738 BREAK_TO_DEBUGGER();
1739 return NULL;
1740 }
1741
1742 if (!init_data->link) {
1743 BREAK_TO_DEBUGGER();
1744 return NULL;
1745 }
1746
1747 dc_sink = dc_sink_create(init_data);
1748
1749 if (!dc_sink)
1750 return NULL;
1751
1752 memmove(dc_sink->dc_edid.raw_edid, edid, len);
1753 dc_sink->dc_edid.length = len;
1754
1755 if (!link_add_remote_sink_helper(
1756 core_link,
1757 dc_sink))
1758 goto fail_add_sink;
1759
1760 edid_status = dm_helpers_parse_edid_caps(
1761 core_link->ctx,
1762 &dc_sink->dc_edid,
1763 &dc_sink->edid_caps);
1764
1765 if (edid_status != EDID_OK)
1766 goto fail;
1767
1768 return dc_sink;
1769fail:
1770 dc_link_remove_remote_sink(link, dc_sink);
1771fail_add_sink:
1772 dc_sink_release(dc_sink);
1773 return NULL;
1774}
1775
1776void dc_link_set_sink(const struct dc_link *link, struct dc_sink *sink)
1777{
1778 struct core_link *core_link = DC_LINK_TO_LINK(link);
1779 struct dc_link *dc_link = &core_link->public;
1780
1781 dc_link->local_sink = sink;
1782
1783 if (sink == NULL) {
1784 dc_link->type = dc_connection_none;
1785 } else {
1786 dc_link->type = dc_connection_single;
1787 }
1788}
1789
1790void dc_link_remove_remote_sink(const struct dc_link *link, const struct dc_sink *sink)
1791{
1792 int i;
1793 struct core_link *core_link = DC_LINK_TO_LINK(link);
1794 struct dc_link *dc_link = &core_link->public;
1795
1796 if (!link->sink_count) {
1797 BREAK_TO_DEBUGGER();
1798 return;
1799 }
1800
1801 for (i = 0; i < dc_link->sink_count; i++) {
1802 if (dc_link->remote_sinks[i] == sink) {
1803 dc_sink_release(sink);
1804 dc_link->remote_sinks[i] = NULL;
1805
1806 /* shrink array to remove empty place */
1807 while (i < dc_link->sink_count - 1) {
1808 dc_link->remote_sinks[i] = dc_link->remote_sinks[i+1];
1809 i++;
1810 }
1811
1812 dc_link->sink_count--;
1813 return;
1814 }
1815 }
1816}
1817
2c8ad2d5
AD
1818bool dc_init_dchub(struct dc *dc, struct dchub_init_data *dh_data)
1819{
1820 int i;
1821 struct core_dc *core_dc = DC_TO_CORE(dc);
1822 struct mem_input *mi = NULL;
1823
1824 for (i = 0; i < core_dc->res_pool->pipe_count; i++) {
1825 if (core_dc->res_pool->mis[i] != NULL) {
1826 mi = core_dc->res_pool->mis[i];
1827 break;
1828 }
1829 }
1830 if (mi == NULL) {
1831 dm_error("no mem_input!\n");
1832 return false;
1833 }
1834
1835 if (mi->funcs->mem_input_update_dchub)
1836 mi->funcs->mem_input_update_dchub(mi, dh_data);
1837 else
1838 ASSERT(mi->funcs->mem_input_update_dchub);
1839
1840
1841 return true;
1842
1843}
2c8ad2d5 1844