]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/gpu/drm/amd/display/dc/core/dc.c
drm/amd/display: Remove unused scratch_val_ctx
[mirror_ubuntu-bionic-kernel.git] / drivers / gpu / drm / amd / display / dc / core / dc.c
CommitLineData
4562236b
HW
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 */
24
25#include "dm_services.h"
26
27#include "dc.h"
28
29#include "core_status.h"
30#include "core_types.h"
31#include "hw_sequencer.h"
32
33#include "resource.h"
34
35#include "clock_source.h"
36#include "dc_bios_types.h"
37
5e141de4 38#include "dce_calcs.h"
4562236b
HW
39#include "bios_parser_interface.h"
40#include "include/irq_service_interface.h"
41#include "transform.h"
42#include "timing_generator.h"
43#include "virtual/virtual_link_encoder.h"
44
45#include "link_hwss.h"
46#include "link_encoder.h"
47
48#include "dc_link_ddc.h"
49#include "dm_helpers.h"
50#include "mem_input.h"
51
4562236b
HW
52/*******************************************************************************
53 * Private functions
54 ******************************************************************************/
55static void destroy_links(struct core_dc *dc)
56{
57 uint32_t i;
58
59 for (i = 0; i < dc->link_count; i++) {
60 if (NULL != dc->links[i])
61 link_destroy(&dc->links[i]);
62 }
63}
64
65static bool create_links(
66 struct core_dc *dc,
67 uint32_t num_virtual_links)
68{
69 int i;
70 int connectors_num;
71 struct dc_bios *bios = dc->ctx->dc_bios;
72
73 dc->link_count = 0;
74
75 connectors_num = bios->funcs->get_connectors_number(bios);
76
77 if (connectors_num > ENUM_ID_COUNT) {
78 dm_error(
79 "DC: Number of connectors %d exceeds maximum of %d!\n",
80 connectors_num,
81 ENUM_ID_COUNT);
82 return false;
83 }
84
85 if (connectors_num == 0 && num_virtual_links == 0) {
86 dm_error("DC: Number of connectors is zero!\n");
87 }
88
89 dm_output_to_console(
90 "DC: %s: connectors_num: physical:%d, virtual:%d\n",
91 __func__,
92 connectors_num,
93 num_virtual_links);
94
95 for (i = 0; i < connectors_num; i++) {
96 struct link_init_data link_init_params = {0};
97 struct core_link *link;
98
99 link_init_params.ctx = dc->ctx;
100 link_init_params.connector_index = i;
101 link_init_params.link_index = dc->link_count;
102 link_init_params.dc = dc;
103 link = link_create(&link_init_params);
104
105 if (link) {
106 dc->links[dc->link_count] = link;
107 link->dc = dc;
108 ++dc->link_count;
109 } else {
110 dm_error("DC: failed to create link!\n");
111 }
112 }
113
114 for (i = 0; i < num_virtual_links; i++) {
115 struct core_link *link = dm_alloc(sizeof(*link));
116 struct encoder_init_data enc_init = {0};
117
118 if (link == NULL) {
119 BREAK_TO_DEBUGGER();
120 goto failed_alloc;
121 }
122
123 link->ctx = dc->ctx;
124 link->dc = dc;
125 link->public.connector_signal = SIGNAL_TYPE_VIRTUAL;
126 link->link_id.type = OBJECT_TYPE_CONNECTOR;
127 link->link_id.id = CONNECTOR_ID_VIRTUAL;
128 link->link_id.enum_id = ENUM_ID_1;
129 link->link_enc = dm_alloc(sizeof(*link->link_enc));
130
131 enc_init.ctx = dc->ctx;
132 enc_init.channel = CHANNEL_ID_UNKNOWN;
133 enc_init.hpd_source = HPD_SOURCEID_UNKNOWN;
134 enc_init.transmitter = TRANSMITTER_UNKNOWN;
135 enc_init.connector = link->link_id;
136 enc_init.encoder.type = OBJECT_TYPE_ENCODER;
137 enc_init.encoder.id = ENCODER_ID_INTERNAL_VIRTUAL;
138 enc_init.encoder.enum_id = ENUM_ID_1;
139 virtual_link_encoder_construct(link->link_enc, &enc_init);
140
141 link->public.link_index = dc->link_count;
142 dc->links[dc->link_count] = link;
143 dc->link_count++;
144 }
145
146 return true;
147
148failed_alloc:
149 return false;
150}
151
152static bool stream_adjust_vmin_vmax(struct dc *dc,
153 const struct dc_stream **stream, int num_streams,
154 int vmin, int vmax)
155{
156 /* TODO: Support multiple streams */
157 struct core_dc *core_dc = DC_TO_CORE(dc);
158 struct core_stream *core_stream = DC_STREAM_TO_CORE(stream[0]);
159 int i = 0;
160 bool ret = false;
4562236b
HW
161
162 for (i = 0; i < MAX_PIPES; i++) {
6680b6a1 163 struct pipe_ctx *pipe = &core_dc->current_context->res_ctx.pipe_ctx[i];
4562236b 164
6680b6a1
YS
165 if (pipe->stream == core_stream && pipe->stream_enc) {
166 core_dc->hwss.set_drr(&pipe, 1, vmin, vmax);
4562236b
HW
167
168 /* build and update the info frame */
6680b6a1
YS
169 resource_build_info_frame(pipe);
170 core_dc->hwss.update_info_frame(pipe);
4562236b
HW
171
172 ret = true;
173 }
174 }
4562236b
HW
175 return ret;
176}
177
178
179static bool set_gamut_remap(struct dc *dc,
180 const struct dc_stream **stream, int num_streams)
181{
182 struct core_dc *core_dc = DC_TO_CORE(dc);
183 struct core_stream *core_stream = DC_STREAM_TO_CORE(stream[0]);
184 int i = 0;
185 bool ret = false;
186 struct pipe_ctx *pipes;
187
188 for (i = 0; i < MAX_PIPES; i++) {
189 if (core_dc->current_context->res_ctx.pipe_ctx[i].stream
190 == core_stream) {
191
192 pipes = &core_dc->current_context->res_ctx.pipe_ctx[i];
193 core_dc->hwss.set_plane_config(core_dc, pipes,
194 &core_dc->current_context->res_ctx);
195 ret = true;
196 }
197 }
198
199 return ret;
200}
201
202/* This function is not expected to fail, proper implementation of
203 * validation will prevent this from ever being called for unsupported
204 * configurations.
205 */
206static void stream_update_scaling(
207 const struct dc *dc,
208 const struct dc_stream *dc_stream,
209 const struct rect *src,
210 const struct rect *dst)
211{
212 struct core_stream *stream = DC_STREAM_TO_CORE(dc_stream);
213 struct core_dc *core_dc = DC_TO_CORE(dc);
214 struct validate_context *cur_ctx = core_dc->current_context;
ab2541b6 215 int i;
4562236b
HW
216
217 if (src)
218 stream->public.src = *src;
219
220 if (dst)
221 stream->public.dst = *dst;
222
ab2541b6
AC
223 for (i = 0; i < cur_ctx->stream_count; i++) {
224 struct core_stream *cur_stream = cur_ctx->streams[i];
4562236b 225
ab2541b6
AC
226 if (stream == cur_stream) {
227 struct dc_stream_status *status = &cur_ctx->stream_status[i];
4562236b
HW
228
229 if (status->surface_count)
ab2541b6 230 if (!dc_commit_surfaces_to_stream(
4562236b
HW
231 &core_dc->public,
232 status->surfaces,
233 status->surface_count,
ab2541b6 234 &cur_stream->public))
4562236b
HW
235 /* Need to debug validation */
236 BREAK_TO_DEBUGGER();
237
238 return;
239 }
240 }
241}
242
4562236b
HW
243static bool set_psr_enable(struct dc *dc, bool enable)
244{
245 struct core_dc *core_dc = DC_TO_CORE(dc);
246 int i;
247
248 for (i = 0; i < core_dc->link_count; i++)
249 dc_link_set_psr_enable(&core_dc->links[i]->public,
250 enable);
251
252 return true;
253}
254
255
256static bool setup_psr(struct dc *dc, const struct dc_stream *stream)
257{
258 struct core_dc *core_dc = DC_TO_CORE(dc);
259 struct core_stream *core_stream = DC_STREAM_TO_CORE(stream);
260 struct pipe_ctx *pipes;
261 int i;
262 unsigned int underlay_idx = core_dc->res_pool->underlay_pipe_index;
263
264 for (i = 0; i < core_dc->link_count; i++) {
265 if (core_stream->sink->link == core_dc->links[i])
266 dc_link_setup_psr(&core_dc->links[i]->public,
267 stream);
268 }
269
270 for (i = 0; i < MAX_PIPES; i++) {
271 if (core_dc->current_context->res_ctx.pipe_ctx[i].stream
272 == core_stream && i != underlay_idx) {
273 pipes = &core_dc->current_context->res_ctx.pipe_ctx[i];
274 core_dc->hwss.set_static_screen_control(&pipes, 1,
275 0x182);
276 }
277 }
278
279 return true;
280}
281
282static void set_drive_settings(struct dc *dc,
bf5cda33
HW
283 struct link_training_settings *lt_settings,
284 const struct dc_link *link)
4562236b
HW
285{
286 struct core_dc *core_dc = DC_TO_CORE(dc);
287 int i;
288
bf5cda33
HW
289 for (i = 0; i < core_dc->link_count; i++) {
290 if (&core_dc->links[i]->public == link)
291 break;
292 }
293
294 if (i >= core_dc->link_count)
295 ASSERT_CRITICAL(false);
296
297 dc_link_dp_set_drive_settings(&core_dc->links[i]->public, lt_settings);
4562236b
HW
298}
299
300static void perform_link_training(struct dc *dc,
301 struct dc_link_settings *link_setting,
302 bool skip_video_pattern)
303{
304 struct core_dc *core_dc = DC_TO_CORE(dc);
305 int i;
306
307 for (i = 0; i < core_dc->link_count; i++)
308 dc_link_dp_perform_link_training(
309 &core_dc->links[i]->public,
310 link_setting,
311 skip_video_pattern);
312}
313
314static void set_preferred_link_settings(struct dc *dc,
88639168
ZF
315 struct dc_link_settings *link_setting,
316 const struct dc_link *link)
4562236b 317{
88639168 318 struct core_link *core_link = DC_LINK_TO_CORE(link);
4562236b 319
88639168 320 core_link->public.verified_link_cap.lane_count =
4562236b 321 link_setting->lane_count;
88639168 322 core_link->public.verified_link_cap.link_rate =
4562236b 323 link_setting->link_rate;
73c72602 324 dp_retrain_link_dp_test(core_link, link_setting, false);
4562236b
HW
325}
326
327static void enable_hpd(const struct dc_link *link)
328{
329 dc_link_dp_enable_hpd(link);
330}
331
332static void disable_hpd(const struct dc_link *link)
333{
334 dc_link_dp_disable_hpd(link);
335}
336
337
338static void set_test_pattern(
339 const struct dc_link *link,
340 enum dp_test_pattern test_pattern,
341 const struct link_training_settings *p_link_settings,
342 const unsigned char *p_custom_pattern,
343 unsigned int cust_pattern_size)
344{
345 if (link != NULL)
346 dc_link_dp_set_test_pattern(
347 link,
348 test_pattern,
349 p_link_settings,
350 p_custom_pattern,
351 cust_pattern_size);
352}
353
354static void allocate_dc_stream_funcs(struct core_dc *core_dc)
355{
356 core_dc->public.stream_funcs.stream_update_scaling = stream_update_scaling;
357 if (core_dc->hwss.set_drr != NULL) {
358 core_dc->public.stream_funcs.adjust_vmin_vmax =
359 stream_adjust_vmin_vmax;
360 }
361
362 core_dc->public.stream_funcs.set_gamut_remap =
363 set_gamut_remap;
364
4562236b
HW
365 core_dc->public.stream_funcs.set_psr_enable =
366 set_psr_enable;
367
368 core_dc->public.stream_funcs.setup_psr =
369 setup_psr;
370
371 core_dc->public.link_funcs.set_drive_settings =
372 set_drive_settings;
373
374 core_dc->public.link_funcs.perform_link_training =
375 perform_link_training;
376
377 core_dc->public.link_funcs.set_preferred_link_settings =
378 set_preferred_link_settings;
379
380 core_dc->public.link_funcs.enable_hpd =
381 enable_hpd;
382
383 core_dc->public.link_funcs.disable_hpd =
384 disable_hpd;
385
386 core_dc->public.link_funcs.set_test_pattern =
387 set_test_pattern;
388}
389
390static void destruct(struct core_dc *dc)
391{
392 resource_validate_ctx_destruct(dc->current_context);
393
4562236b
HW
394 destroy_links(dc);
395
396 dc_destroy_resource_pool(dc);
397
398 if (dc->ctx->gpio_service)
399 dal_gpio_service_destroy(&dc->ctx->gpio_service);
400
401 if (dc->ctx->i2caux)
402 dal_i2caux_destroy(&dc->ctx->i2caux);
403
404 if (dc->ctx->created_bios)
405 dal_bios_parser_destroy(&dc->ctx->dc_bios);
406
407 if (dc->ctx->logger)
408 dal_logger_destroy(&dc->ctx->logger);
409
410 dm_free(dc->current_context);
411 dc->current_context = NULL;
5ea81b91
DL
412 dm_free(dc->temp_flip_context);
413 dc->temp_flip_context = NULL;
4562236b
HW
414
415 dm_free(dc->ctx);
416 dc->ctx = NULL;
417}
418
419static bool construct(struct core_dc *dc,
420 const struct dc_init_data *init_params)
421{
422 struct dal_logger *logger;
423 struct dc_context *dc_ctx = dm_alloc(sizeof(*dc_ctx));
424 enum dce_version dc_version = DCE_VERSION_UNKNOWN;
425
426 if (!dc_ctx) {
427 dm_error("%s: failed to create ctx\n", __func__);
428 goto ctx_fail;
429 }
430
431 dc->current_context = dm_alloc(sizeof(*dc->current_context));
432 dc->temp_flip_context = dm_alloc(sizeof(*dc->temp_flip_context));
433
434 if (!dc->current_context || !dc->temp_flip_context) {
435 dm_error("%s: failed to create validate ctx\n", __func__);
436 goto val_ctx_fail;
437 }
438
439 dc_ctx->cgs_device = init_params->cgs_device;
440 dc_ctx->driver_context = init_params->driver;
441 dc_ctx->dc = &dc->public;
442 dc_ctx->asic_id = init_params->asic_id;
443
444 /* Create logger */
445 logger = dal_logger_create(dc_ctx);
446
447 if (!logger) {
448 /* can *not* call logger. call base driver 'print error' */
449 dm_error("%s: failed to create Logger!\n", __func__);
450 goto logger_fail;
451 }
452 dc_ctx->logger = logger;
453 dc->ctx = dc_ctx;
454 dc->ctx->dce_environment = init_params->dce_environment;
455
456 dc_version = resource_parse_asic_id(init_params->asic_id);
457 dc->ctx->dce_version = dc_version;
458
459 /* Resource should construct all asic specific resources.
460 * This should be the only place where we need to parse the asic id
461 */
462 if (init_params->vbios_override)
463 dc_ctx->dc_bios = init_params->vbios_override;
464 else {
465 /* Create BIOS parser */
466 struct bp_init_data bp_init_data;
e8c963d6 467
4562236b
HW
468 bp_init_data.ctx = dc_ctx;
469 bp_init_data.bios = init_params->asic_id.atombios_base_address;
470
471 dc_ctx->dc_bios = dal_bios_parser_create(
472 &bp_init_data, dc_version);
473
474 if (!dc_ctx->dc_bios) {
475 ASSERT_CRITICAL(false);
476 goto bios_fail;
477 }
478
479 dc_ctx->created_bios = true;
e8c963d6 480 }
4562236b
HW
481
482 /* Create I2C AUX */
483 dc_ctx->i2caux = dal_i2caux_create(dc_ctx);
484
485 if (!dc_ctx->i2caux) {
486 ASSERT_CRITICAL(false);
487 goto failed_to_create_i2caux;
488 }
489
490 /* Create GPIO service */
491 dc_ctx->gpio_service = dal_gpio_service_create(
492 dc_version,
493 dc_ctx->dce_environment,
494 dc_ctx);
495
496 if (!dc_ctx->gpio_service) {
497 ASSERT_CRITICAL(false);
498 goto gpio_fail;
499 }
500
501 dc->res_pool = dc_create_resource_pool(
502 dc,
503 init_params->num_virtual_links,
504 dc_version,
505 init_params->asic_id);
506 if (!dc->res_pool)
507 goto create_resource_fail;
508
509 if (!create_links(dc, init_params->num_virtual_links))
510 goto create_links_fail;
511
512 allocate_dc_stream_funcs(dc);
513
514 return true;
515
516 /**** error handling here ****/
517create_links_fail:
518create_resource_fail:
519gpio_fail:
520failed_to_create_i2caux:
521bios_fail:
522logger_fail:
523val_ctx_fail:
524ctx_fail:
525 destruct(dc);
526 return false;
527}
528
529/*
530void ProgramPixelDurationV(unsigned int pixelClockInKHz )
531{
532 fixed31_32 pixel_duration = Fixed31_32(100000000, pixelClockInKHz) * 10;
533 unsigned int pixDurationInPico = round(pixel_duration);
534
535 DPG_PIPE_ARBITRATION_CONTROL1 arb_control;
536
537 arb_control.u32All = ReadReg (mmDPGV0_PIPE_ARBITRATION_CONTROL1);
538 arb_control.bits.PIXEL_DURATION = pixDurationInPico;
539 WriteReg (mmDPGV0_PIPE_ARBITRATION_CONTROL1, arb_control.u32All);
540
541 arb_control.u32All = ReadReg (mmDPGV1_PIPE_ARBITRATION_CONTROL1);
542 arb_control.bits.PIXEL_DURATION = pixDurationInPico;
543 WriteReg (mmDPGV1_PIPE_ARBITRATION_CONTROL1, arb_control.u32All);
544
545 WriteReg (mmDPGV0_PIPE_ARBITRATION_CONTROL2, 0x4000800);
546 WriteReg (mmDPGV0_REPEATER_PROGRAM, 0x11);
547
548 WriteReg (mmDPGV1_PIPE_ARBITRATION_CONTROL2, 0x4000800);
549 WriteReg (mmDPGV1_REPEATER_PROGRAM, 0x11);
550}
551*/
552
553/*******************************************************************************
554 * Public functions
555 ******************************************************************************/
556
557struct dc *dc_create(const struct dc_init_data *init_params)
558 {
559 struct core_dc *core_dc = dm_alloc(sizeof(*core_dc));
560 unsigned int full_pipe_count;
561
562 if (NULL == core_dc)
563 goto alloc_fail;
564
565 if (false == construct(core_dc, init_params))
566 goto construct_fail;
567
568 /*TODO: separate HW and SW initialization*/
569 core_dc->hwss.init_hw(core_dc);
570
571 full_pipe_count = core_dc->res_pool->pipe_count;
f0e3db90 572 if (core_dc->res_pool->underlay_pipe_index != NO_UNDERLAY_PIPE)
4562236b 573 full_pipe_count--;
ab2541b6 574 core_dc->public.caps.max_streams = min(
4562236b
HW
575 full_pipe_count,
576 core_dc->res_pool->stream_enc_count);
577
578 core_dc->public.caps.max_links = core_dc->link_count;
579 core_dc->public.caps.max_audios = core_dc->res_pool->audio_count;
580
581 core_dc->public.config = init_params->flags;
582
583 dm_logger_write(core_dc->ctx->logger, LOG_DC,
584 "Display Core initialized\n");
585
586
587 /* TODO: missing feature to be enabled */
588 core_dc->public.debug.disable_dfs_bypass = true;
589
590 return &core_dc->public;
591
592construct_fail:
593 dm_free(core_dc);
594
595alloc_fail:
596 return NULL;
597}
598
599void dc_destroy(struct dc **dc)
600{
601 struct core_dc *core_dc = DC_TO_CORE(*dc);
602 destruct(core_dc);
603 dm_free(core_dc);
604 *dc = NULL;
605}
606
607static bool is_validation_required(
608 const struct core_dc *dc,
609 const struct dc_validation_set set[],
610 int set_count)
611{
612 const struct validate_context *context = dc->current_context;
613 int i, j;
614
ab2541b6 615 if (context->stream_count != set_count)
4562236b
HW
616 return true;
617
618 for (i = 0; i < set_count; i++) {
619
ab2541b6 620 if (set[i].surface_count != context->stream_status[i].surface_count)
4562236b 621 return true;
ab2541b6 622 if (!is_stream_unchanged(DC_STREAM_TO_CORE(set[i].stream), context->streams[i]))
4562236b
HW
623 return true;
624
625 for (j = 0; j < set[i].surface_count; j++) {
626 struct dc_surface temp_surf = { 0 };
627
ab2541b6 628 temp_surf = *context->stream_status[i].surfaces[j];
4562236b
HW
629 temp_surf.clip_rect = set[i].surfaces[j]->clip_rect;
630 temp_surf.dst_rect.x = set[i].surfaces[j]->dst_rect.x;
631 temp_surf.dst_rect.y = set[i].surfaces[j]->dst_rect.y;
632
633 if (memcmp(&temp_surf, set[i].surfaces[j], sizeof(temp_surf)) != 0)
634 return true;
635 }
636 }
637
638 return false;
639}
640
641bool dc_validate_resources(
642 const struct dc *dc,
643 const struct dc_validation_set set[],
644 uint8_t set_count)
645{
646 struct core_dc *core_dc = DC_TO_CORE(dc);
647 enum dc_status result = DC_ERROR_UNEXPECTED;
648 struct validate_context *context;
649
650 if (!is_validation_required(core_dc, set, set_count))
651 return true;
652
653 context = dm_alloc(sizeof(struct validate_context));
654 if(context == NULL)
655 goto context_alloc_fail;
656
657 result = core_dc->res_pool->funcs->validate_with_context(
658 core_dc, set, set_count, context);
659
660 resource_validate_ctx_destruct(context);
661 dm_free(context);
662
663context_alloc_fail:
664 if (result != DC_OK) {
665 dm_logger_write(core_dc->ctx->logger, LOG_WARNING,
666 "%s:resource validation failed, dc_status:%d\n",
667 __func__,
668 result);
669 }
670
671 return (result == DC_OK);
672
673}
674
675bool dc_validate_guaranteed(
676 const struct dc *dc,
ab2541b6 677 const struct dc_stream *stream)
4562236b
HW
678{
679 struct core_dc *core_dc = DC_TO_CORE(dc);
680 enum dc_status result = DC_ERROR_UNEXPECTED;
681 struct validate_context *context;
682
683 context = dm_alloc(sizeof(struct validate_context));
684 if (context == NULL)
685 goto context_alloc_fail;
686
687 result = core_dc->res_pool->funcs->validate_guaranteed(
ab2541b6 688 core_dc, stream, context);
4562236b
HW
689
690 resource_validate_ctx_destruct(context);
691 dm_free(context);
692
693context_alloc_fail:
694 if (result != DC_OK) {
695 dm_logger_write(core_dc->ctx->logger, LOG_WARNING,
696 "%s:guaranteed validation failed, dc_status:%d\n",
697 __func__,
698 result);
699 }
700
701 return (result == DC_OK);
702}
703
704static void program_timing_sync(
705 struct core_dc *core_dc,
706 struct validate_context *ctx)
707{
708 int i, j;
709 int group_index = 0;
710 int pipe_count = ctx->res_ctx.pool->pipe_count;
711 struct pipe_ctx *unsynced_pipes[MAX_PIPES] = { NULL };
712
713 for (i = 0; i < pipe_count; i++) {
714 if (!ctx->res_ctx.pipe_ctx[i].stream || ctx->res_ctx.pipe_ctx[i].top_pipe)
715 continue;
716
717 unsynced_pipes[i] = &ctx->res_ctx.pipe_ctx[i];
718 }
719
720 for (i = 0; i < pipe_count; i++) {
721 int group_size = 1;
722 struct pipe_ctx *pipe_set[MAX_PIPES];
723
724 if (!unsynced_pipes[i])
725 continue;
726
727 pipe_set[0] = unsynced_pipes[i];
728 unsynced_pipes[i] = NULL;
729
730 /* Add tg to the set, search rest of the tg's for ones with
731 * same timing, add all tgs with same timing to the group
732 */
733 for (j = i + 1; j < pipe_count; j++) {
734 if (!unsynced_pipes[j])
735 continue;
736
737 if (resource_are_streams_timing_synchronizable(
738 unsynced_pipes[j]->stream,
739 pipe_set[0]->stream)) {
740 pipe_set[group_size] = unsynced_pipes[j];
741 unsynced_pipes[j] = NULL;
742 group_size++;
743 }
744 }
745
746 /* set first unblanked pipe as master */
747 for (j = 0; j < group_size; j++) {
748 struct pipe_ctx *temp;
749
750 if (!pipe_set[j]->tg->funcs->is_blanked(pipe_set[j]->tg)) {
751 if (j == 0)
752 break;
753
754 temp = pipe_set[0];
755 pipe_set[0] = pipe_set[j];
756 pipe_set[j] = temp;
757 break;
758 }
759 }
760
761 /* remove any other unblanked pipes as they have already been synced */
762 for (j = j + 1; j < group_size; j++) {
763 if (!pipe_set[j]->tg->funcs->is_blanked(pipe_set[j]->tg)) {
764 group_size--;
765 pipe_set[j] = pipe_set[group_size];
766 j--;
767 }
768 }
769
770 if (group_size > 1) {
771 core_dc->hwss.enable_timing_synchronization(
772 core_dc, group_index, group_size, pipe_set);
773 group_index++;
774 }
775 }
776}
777
ab2541b6 778static bool streams_changed(
4562236b 779 struct core_dc *dc,
ab2541b6
AC
780 const struct dc_stream *streams[],
781 uint8_t stream_count)
4562236b
HW
782{
783 uint8_t i;
784
ab2541b6 785 if (stream_count != dc->current_context->stream_count)
4562236b
HW
786 return true;
787
ab2541b6
AC
788 for (i = 0; i < dc->current_context->stream_count; i++) {
789 if (&dc->current_context->streams[i]->public != streams[i])
4562236b
HW
790 return true;
791 }
792
793 return false;
794}
795
ab2541b6 796bool dc_commit_streams(
4562236b 797 struct dc *dc,
ab2541b6
AC
798 const struct dc_stream *streams[],
799 uint8_t stream_count)
4562236b
HW
800{
801 struct core_dc *core_dc = DC_TO_CORE(dc);
802 struct dc_bios *dcb = core_dc->ctx->dc_bios;
803 enum dc_status result = DC_ERROR_UNEXPECTED;
804 struct validate_context *context;
e72f0acd 805 struct dc_validation_set set[MAX_STREAMS] = { {0, {0} } };
f196f080 806 int i, j;
4562236b 807
ab2541b6 808 if (false == streams_changed(core_dc, streams, stream_count))
4562236b
HW
809 return DC_OK;
810
ab2541b6
AC
811 dm_logger_write(core_dc->ctx->logger, LOG_DC, "%s: %d streams\n",
812 __func__, stream_count);
4562236b 813
ab2541b6
AC
814 for (i = 0; i < stream_count; i++) {
815 const struct dc_stream *stream = streams[i];
f84a8161
TC
816 const struct dc_stream_status *status = dc_stream_get_status(stream);
817 int j;
4562236b 818
ab2541b6 819 dc_stream_log(stream,
4562236b
HW
820 core_dc->ctx->logger,
821 LOG_DC);
822
ab2541b6 823 set[i].stream = stream;
f84a8161
TC
824
825 if (status) {
826 set[i].surface_count = status->surface_count;
827 for (j = 0; j < status->surface_count; j++)
828 set[i].surfaces[j] = status->surfaces[j];
829 }
4562236b
HW
830
831 }
832
833 context = dm_alloc(sizeof(struct validate_context));
834 if (context == NULL)
835 goto context_alloc_fail;
836
ab2541b6 837 result = core_dc->res_pool->funcs->validate_with_context(core_dc, set, stream_count, context);
4562236b
HW
838 if (result != DC_OK){
839 dm_logger_write(core_dc->ctx->logger, LOG_ERROR,
840 "%s: Context validation failed! dc_status:%d\n",
841 __func__,
842 result);
843 BREAK_TO_DEBUGGER();
844 resource_validate_ctx_destruct(context);
845 goto fail;
846 }
847
848 if (!dcb->funcs->is_accelerated_mode(dcb)) {
849 core_dc->hwss.enable_accelerated_mode(core_dc);
850 }
851
852 if (result == DC_OK) {
853 result = core_dc->hwss.apply_ctx_to_hw(core_dc, context);
854 }
855
856 program_timing_sync(core_dc, context);
857
ab2541b6
AC
858 for (i = 0; i < context->stream_count; i++) {
859 const struct core_sink *sink = context->streams[i]->sink;
4562236b 860
ab2541b6 861 for (j = 0; j < context->stream_status[i].surface_count; j++) {
f196f080
YS
862 struct core_surface *surface =
863 DC_SURFACE_TO_CORE(context->stream_status[i].surfaces[j]);
4562236b 864
f196f080 865 core_dc->hwss.apply_ctx_for_surface(core_dc, surface, context);
4562236b
HW
866 }
867
868 CONN_MSG_MODE(sink->link, "{%dx%d, %dx%d@%dKhz}",
ab2541b6
AC
869 context->streams[i]->public.timing.h_addressable,
870 context->streams[i]->public.timing.v_addressable,
871 context->streams[i]->public.timing.h_total,
872 context->streams[i]->public.timing.v_total,
873 context->streams[i]->public.timing.pix_clk_khz);
4562236b
HW
874 }
875
4562236b
HW
876 resource_validate_ctx_destruct(core_dc->current_context);
877
ead964f2 878 if (core_dc->temp_flip_context != core_dc->current_context) {
879 dm_free(core_dc->temp_flip_context);
880 core_dc->temp_flip_context = core_dc->current_context;
881 }
4562236b 882 core_dc->current_context = context;
ead964f2 883 memset(core_dc->temp_flip_context, 0, sizeof(*core_dc->temp_flip_context));
4562236b
HW
884
885 return (result == DC_OK);
886
887fail:
888 dm_free(context);
889
890context_alloc_fail:
891 return (result == DC_OK);
892}
893
ab2541b6 894bool dc_pre_update_surfaces_to_stream(
4562236b
HW
895 struct dc *dc,
896 const struct dc_surface *const *new_surfaces,
897 uint8_t new_surface_count,
ab2541b6 898 const struct dc_stream *dc_stream)
4562236b 899{
745cc746 900 return true;
4562236b
HW
901}
902
ab2541b6 903bool dc_post_update_surfaces_to_stream(struct dc *dc)
4562236b 904{
4562236b 905 int i;
45209ef7
DL
906 struct core_dc *core_dc = DC_TO_CORE(dc);
907 struct validate_context *context = dm_alloc(sizeof(struct validate_context));
908
909 if (!context) {
910 dm_error("%s: failed to create validate ctx\n", __func__);
911 return false;
912 }
913 resource_validate_ctx_copy_construct(core_dc->current_context, context);
4562236b
HW
914
915 post_surface_trace(dc);
916
45209ef7
DL
917 for (i = 0; i < context->res_ctx.pool->pipe_count; i++)
918 if (context->res_ctx.pipe_ctx[i].stream == NULL) {
919 context->res_ctx.pipe_ctx[i].pipe_idx = i;
4562236b 920 core_dc->hwss.power_down_front_end(
45209ef7 921 core_dc, &context->res_ctx.pipe_ctx[i]);
bb9042da 922 }
45209ef7
DL
923 if (!core_dc->res_pool->funcs->validate_bandwidth(core_dc, context)) {
924 BREAK_TO_DEBUGGER();
925 return false;
926 }
4562236b 927
cf437593 928 core_dc->hwss.set_bandwidth(core_dc, context, true);
45209ef7
DL
929
930 resource_validate_ctx_destruct(core_dc->current_context);
68339af3
LE
931 if (core_dc->current_context)
932 dm_free(core_dc->current_context);
933
45209ef7 934 core_dc->current_context = context;
4562236b
HW
935
936 return true;
937}
938
ab2541b6 939bool dc_commit_surfaces_to_stream(
4562236b
HW
940 struct dc *dc,
941 const struct dc_surface **new_surfaces,
942 uint8_t new_surface_count,
ab2541b6 943 const struct dc_stream *dc_stream)
4562236b 944{
ab2541b6
AC
945 struct dc_surface_update updates[MAX_SURFACES];
946 struct dc_flip_addrs flip_addr[MAX_SURFACES];
947 struct dc_plane_info plane_info[MAX_SURFACES];
948 struct dc_scaling_info scaling_info[MAX_SURFACES];
4562236b
HW
949 int i;
950
ab2541b6
AC
951 memset(updates, 0, sizeof(updates));
952 memset(flip_addr, 0, sizeof(flip_addr));
953 memset(plane_info, 0, sizeof(plane_info));
954 memset(scaling_info, 0, sizeof(scaling_info));
955
4562236b
HW
956 for (i = 0; i < new_surface_count; i++) {
957 updates[i].surface = new_surfaces[i];
89e89630
AZ
958 updates[i].gamma =
959 (struct dc_gamma *)new_surfaces[i]->gamma_correction;
4562236b
HW
960 flip_addr[i].address = new_surfaces[i]->address;
961 flip_addr[i].flip_immediate = new_surfaces[i]->flip_immediate;
962 plane_info[i].color_space = new_surfaces[i]->color_space;
963 plane_info[i].format = new_surfaces[i]->format;
964 plane_info[i].plane_size = new_surfaces[i]->plane_size;
965 plane_info[i].rotation = new_surfaces[i]->rotation;
966 plane_info[i].horizontal_mirror = new_surfaces[i]->horizontal_mirror;
967 plane_info[i].stereo_format = new_surfaces[i]->stereo_format;
968 plane_info[i].tiling_info = new_surfaces[i]->tiling_info;
969 plane_info[i].visible = new_surfaces[i]->visible;
5c1879b6 970 plane_info[i].dcc = new_surfaces[i]->dcc;
4562236b
HW
971 scaling_info[i].scaling_quality = new_surfaces[i]->scaling_quality;
972 scaling_info[i].src_rect = new_surfaces[i]->src_rect;
973 scaling_info[i].dst_rect = new_surfaces[i]->dst_rect;
974 scaling_info[i].clip_rect = new_surfaces[i]->clip_rect;
975
976 updates[i].flip_addr = &flip_addr[i];
977 updates[i].plane_info = &plane_info[i];
978 updates[i].scaling_info = &scaling_info[i];
979 }
ab2541b6 980 dc_update_surfaces_for_stream(dc, updates, new_surface_count, dc_stream);
4562236b 981
ab2541b6 982 return dc_post_update_surfaces_to_stream(dc);
4562236b
HW
983}
984
e72f0acd
TC
985static bool is_surface_in_context(
986 const struct validate_context *context,
987 const struct dc_surface *surface)
4562236b 988{
e72f0acd 989 int j;
4562236b 990
e72f0acd
TC
991 for (j = 0; j < context->res_ctx.pool->pipe_count; j++) {
992 const struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
4562236b 993
e72f0acd
TC
994 if (surface == &pipe_ctx->surface->public) {
995 return true;
996 }
997 }
4562236b 998
e72f0acd
TC
999 return false;
1000}
4562236b 1001
5869b0f6
LE
1002static unsigned int pixel_format_to_bpp(enum surface_pixel_format format)
1003{
1004 switch (format) {
1005 case SURFACE_PIXEL_FORMAT_GRPH_ARGB1555:
1006 case SURFACE_PIXEL_FORMAT_GRPH_RGB565:
1007 return 16;
1008 case SURFACE_PIXEL_FORMAT_GRPH_ARGB8888:
1009 case SURFACE_PIXEL_FORMAT_GRPH_ABGR8888:
1010 case SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010:
1011 case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010:
1012 return 32;
1013 case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
1014 case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F:
1015 case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F:
1016 return 64;
1017 default:
1018 ASSERT_CRITICAL(false);
1019 return -1;
1020 }
1021}
1022
1023static enum surface_update_type get_plane_info_update_type(
fb9611d2
YS
1024 const struct dc_surface_update *u,
1025 int surface_index)
5869b0f6
LE
1026{
1027 struct dc_plane_info temp_plane_info = { { { { 0 } } } };
1028
1029 if (!u->plane_info)
1030 return UPDATE_TYPE_FAST;
1031
1032 /* Copy all parameters that will cause a full update
1033 * from current surface, the rest of the parameters
1034 * from provided plane configuration.
1035 * Perform memory compare and special validation
1036 * for those that can cause fast/medium updates
1037 */
1038
1039 /* Full update parameters */
1040 temp_plane_info.color_space = u->surface->color_space;
1041 temp_plane_info.dcc = u->surface->dcc;
1042 temp_plane_info.horizontal_mirror = u->surface->horizontal_mirror;
1043 temp_plane_info.plane_size = u->surface->plane_size;
1044 temp_plane_info.rotation = u->surface->rotation;
1045 temp_plane_info.stereo_format = u->surface->stereo_format;
1046 temp_plane_info.tiling_info = u->surface->tiling_info;
5869b0f6
LE
1047
1048 /* Special Validation parameters */
1049 temp_plane_info.format = u->plane_info->format;
fb9611d2
YS
1050
1051 if (surface_index == 0)
1052 temp_plane_info.visible = u->plane_info->visible;
1053 else
1054 temp_plane_info.visible = u->surface->visible;
5869b0f6
LE
1055
1056 if (memcmp(u->plane_info, &temp_plane_info,
1057 sizeof(struct dc_plane_info)) != 0)
1058 return UPDATE_TYPE_FULL;
1059
1060 if (pixel_format_to_bpp(u->plane_info->format) !=
1061 pixel_format_to_bpp(u->surface->format)) {
1062 return UPDATE_TYPE_FULL;
1063 } else {
1064 return UPDATE_TYPE_MED;
1065 }
1066}
1067
1068static enum surface_update_type get_scaling_info_update_type(
1069 const struct dc_surface_update *u)
1070{
1071 struct dc_scaling_info temp_scaling_info = { { 0 } };
1072
1073 if (!u->scaling_info)
1074 return UPDATE_TYPE_FAST;
1075
1076 /* Copy all parameters that will cause a full update
1077 * from current surface, the rest of the parameters
1078 * from provided plane configuration.
1079 * Perform memory compare and special validation
1080 * for those that can cause fast/medium updates
1081 */
1082
1083 /* Full Update Parameters */
1084 temp_scaling_info.dst_rect = u->surface->dst_rect;
1085 temp_scaling_info.src_rect = u->surface->src_rect;
1086 temp_scaling_info.scaling_quality = u->surface->scaling_quality;
1087
1088 /* Special validation required */
1089 temp_scaling_info.clip_rect = u->scaling_info->clip_rect;
1090
1091 if (memcmp(u->scaling_info, &temp_scaling_info,
1092 sizeof(struct dc_scaling_info)) != 0)
1093 return UPDATE_TYPE_FULL;
1094
1095 /* Check Clip rectangles if not equal
1096 * difference is in offsets == > UPDATE_TYPE_FAST
1097 * difference is in dimensions == > UPDATE_TYPE_FULL
1098 */
1099 if (memcmp(&u->scaling_info->clip_rect,
1100 &u->surface->clip_rect, sizeof(struct rect)) != 0) {
1101 if ((u->scaling_info->clip_rect.height ==
1102 u->surface->clip_rect.height) &&
1103 (u->scaling_info->clip_rect.width ==
1104 u->surface->clip_rect.width)) {
1105 return UPDATE_TYPE_FAST;
1106 } else {
1107 return UPDATE_TYPE_FULL;
1108 }
1109 }
1110
1111 return UPDATE_TYPE_FAST;
1112}
4562236b 1113
e72f0acd
TC
1114static enum surface_update_type det_surface_update(
1115 const struct core_dc *dc,
fb9611d2
YS
1116 const struct dc_surface_update *u,
1117 int surface_index)
e72f0acd
TC
1118{
1119 const struct validate_context *context = dc->current_context;
5869b0f6
LE
1120 enum surface_update_type type = UPDATE_TYPE_FAST;
1121 enum surface_update_type overall_type = UPDATE_TYPE_FAST;
4562236b 1122
e72f0acd
TC
1123 if (!is_surface_in_context(context, u->surface))
1124 return UPDATE_TYPE_FULL;
4562236b 1125
fb9611d2 1126 type = get_plane_info_update_type(u, surface_index);
5869b0f6
LE
1127 if (overall_type < type)
1128 overall_type = type;
1129
1130 type = get_scaling_info_update_type(u);
1131 if (overall_type < type)
1132 overall_type = type;
1133
e72f0acd
TC
1134 if (u->in_transfer_func ||
1135 u->out_transfer_func ||
5869b0f6
LE
1136 u->hdr_static_metadata) {
1137 if (overall_type < UPDATE_TYPE_MED)
1138 overall_type = UPDATE_TYPE_MED;
1139 }
1c4e6bce 1140
5869b0f6 1141 return overall_type;
e72f0acd 1142}
4562236b 1143
5869b0f6
LE
1144enum surface_update_type dc_check_update_surfaces_for_stream(
1145 struct dc *dc,
e72f0acd
TC
1146 struct dc_surface_update *updates,
1147 int surface_count,
ee8f63e1 1148 struct dc_stream_update *stream_update,
e72f0acd
TC
1149 const struct dc_stream_status *stream_status)
1150{
5869b0f6 1151 struct core_dc *core_dc = DC_TO_CORE(dc);
e72f0acd
TC
1152 int i;
1153 enum surface_update_type overall_type = UPDATE_TYPE_FAST;
1154
1ce71fcd 1155 if (stream_status == NULL || stream_status->surface_count != surface_count)
e72f0acd
TC
1156 return UPDATE_TYPE_FULL;
1157
ee8f63e1
LE
1158 if (stream_update)
1159 return UPDATE_TYPE_FULL;
1160
e72f0acd
TC
1161 for (i = 0 ; i < surface_count; i++) {
1162 enum surface_update_type type =
fb9611d2 1163 det_surface_update(core_dc, &updates[i], i);
e72f0acd
TC
1164
1165 if (type == UPDATE_TYPE_FULL)
1166 return type;
1c4e6bce 1167
e72f0acd
TC
1168 if (overall_type < type)
1169 overall_type = type;
4562236b
HW
1170 }
1171
e72f0acd
TC
1172 return overall_type;
1173}
4562236b 1174
ee8f63e1 1175void dc_update_surfaces_for_stream(struct dc *dc,
a783e7b5 1176 struct dc_surface_update *surface_updates, int surface_count,
ee8f63e1 1177 const struct dc_stream *dc_stream)
a783e7b5 1178{
ee8f63e1
LE
1179 dc_update_surfaces_and_stream(dc, surface_updates, surface_count,
1180 dc_stream, NULL);
a783e7b5
LE
1181}
1182
e72f0acd 1183enum surface_update_type update_surface_trace_level = UPDATE_TYPE_FULL;
4562236b 1184
ee8f63e1
LE
1185void dc_update_surfaces_and_stream(struct dc *dc,
1186 struct dc_surface_update *srf_updates, int surface_count,
1187 const struct dc_stream *dc_stream,
1188 struct dc_stream_update *stream_update)
e72f0acd
TC
1189{
1190 struct core_dc *core_dc = DC_TO_CORE(dc);
1191 struct validate_context *context;
1192 int i, j;
e72f0acd
TC
1193 enum surface_update_type update_type;
1194 const struct dc_stream_status *stream_status;
ee8f63e1 1195 struct core_stream *stream = DC_STREAM_TO_CORE(dc_stream);
e72f0acd
TC
1196
1197 stream_status = dc_stream_get_status(dc_stream);
1198 ASSERT(stream_status);
1199 if (!stream_status)
1200 return; /* Cannot commit surface to stream that is not committed */
1201
5869b0f6 1202 update_type = dc_check_update_surfaces_for_stream(
ee8f63e1 1203 dc, srf_updates, surface_count, stream_update, stream_status);
4562236b 1204
e72f0acd 1205 if (update_type >= update_surface_trace_level)
ee8f63e1 1206 update_surface_trace(dc, srf_updates, surface_count);
e72f0acd
TC
1207
1208 if (update_type >= UPDATE_TYPE_FULL) {
1209 const struct dc_surface *new_surfaces[MAX_SURFACES] = { 0 };
1210
1211 for (i = 0; i < surface_count; i++)
ee8f63e1 1212 new_surfaces[i] = srf_updates[i].surface;
e72f0acd
TC
1213
1214 /* initialize scratch memory for building context */
1215 context = core_dc->temp_flip_context;
1216 resource_validate_ctx_copy_construct(
1217 core_dc->current_context, context);
1218
1219 /* add surface to context */
4562236b 1220 if (!resource_attach_surfaces_to_context(
ab2541b6 1221 new_surfaces, surface_count, dc_stream, context)) {
4562236b
HW
1222 BREAK_TO_DEBUGGER();
1223 return;
1224 }
e72f0acd
TC
1225 } else {
1226 context = core_dc->current_context;
4562236b 1227 }
ee8f63e1
LE
1228
1229 /* update current stream with the new updates */
1230 if (stream_update) {
1231 stream->public.src = stream_update->src;
1232 stream->public.dst = stream_update->dst;
1233 }
1234
1235 /* save update parameters into surface */
4562236b 1236 for (i = 0; i < surface_count; i++) {
ee8f63e1
LE
1237 struct core_surface *surface =
1238 DC_SURFACE_TO_CORE(srf_updates[i].surface);
4562236b 1239
ee8f63e1
LE
1240 if (srf_updates[i].flip_addr) {
1241 surface->public.address = srf_updates[i].flip_addr->address;
e72f0acd 1242 surface->public.flip_immediate =
ee8f63e1 1243 srf_updates[i].flip_addr->flip_immediate;
e72f0acd
TC
1244 }
1245
ee8f63e1 1246 if (srf_updates[i].scaling_info) {
e72f0acd 1247 surface->public.scaling_quality =
ee8f63e1 1248 srf_updates[i].scaling_info->scaling_quality;
e72f0acd 1249 surface->public.dst_rect =
ee8f63e1 1250 srf_updates[i].scaling_info->dst_rect;
e72f0acd 1251 surface->public.src_rect =
ee8f63e1 1252 srf_updates[i].scaling_info->src_rect;
e72f0acd 1253 surface->public.clip_rect =
ee8f63e1 1254 srf_updates[i].scaling_info->clip_rect;
e72f0acd
TC
1255 }
1256
ee8f63e1 1257 if (srf_updates[i].plane_info) {
e72f0acd 1258 surface->public.color_space =
ee8f63e1 1259 srf_updates[i].plane_info->color_space;
e72f0acd 1260 surface->public.format =
ee8f63e1 1261 srf_updates[i].plane_info->format;
e72f0acd 1262 surface->public.plane_size =
ee8f63e1 1263 srf_updates[i].plane_info->plane_size;
e72f0acd 1264 surface->public.rotation =
ee8f63e1 1265 srf_updates[i].plane_info->rotation;
e72f0acd 1266 surface->public.horizontal_mirror =
ee8f63e1 1267 srf_updates[i].plane_info->horizontal_mirror;
e72f0acd 1268 surface->public.stereo_format =
ee8f63e1 1269 srf_updates[i].plane_info->stereo_format;
e72f0acd 1270 surface->public.tiling_info =
ee8f63e1 1271 srf_updates[i].plane_info->tiling_info;
e72f0acd 1272 surface->public.visible =
ee8f63e1 1273 srf_updates[i].plane_info->visible;
e72f0acd 1274 surface->public.dcc =
ee8f63e1 1275 srf_updates[i].plane_info->dcc;
e72f0acd
TC
1276 }
1277
1278 /* not sure if we still need this */
ed151940
YS
1279 if (update_type == UPDATE_TYPE_FULL) {
1280 for (j = 0; j < context->res_ctx.pool->pipe_count; j++) {
1281 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
4562236b 1282
ed151940
YS
1283 if (pipe_ctx->surface != surface)
1284 continue;
4562236b 1285
b2d0a103 1286 resource_build_scaling_params(pipe_ctx);
4562236b 1287 }
e72f0acd 1288 }
89e89630 1289
ee8f63e1
LE
1290 if (srf_updates[i].gamma &&
1291 srf_updates[i].gamma != surface->public.gamma_correction) {
e72f0acd
TC
1292 if (surface->public.gamma_correction != NULL)
1293 dc_gamma_release(&surface->public.
1294 gamma_correction);
89e89630 1295
ee8f63e1 1296 dc_gamma_retain(srf_updates[i].gamma);
e72f0acd 1297 surface->public.gamma_correction =
ee8f63e1 1298 srf_updates[i].gamma;
e72f0acd 1299 }
fb735a9f 1300
ee8f63e1
LE
1301 if (srf_updates[i].in_transfer_func &&
1302 srf_updates[i].in_transfer_func != surface->public.in_transfer_func) {
e72f0acd
TC
1303 if (surface->public.in_transfer_func != NULL)
1304 dc_transfer_func_release(
1305 surface->public.
1306 in_transfer_func);
1307
1308 dc_transfer_func_retain(
ee8f63e1 1309 srf_updates[i].in_transfer_func);
e72f0acd 1310 surface->public.in_transfer_func =
ee8f63e1 1311 srf_updates[i].in_transfer_func;
e72f0acd 1312 }
fb735a9f 1313
ee8f63e1
LE
1314 if (srf_updates[i].out_transfer_func &&
1315 srf_updates[i].out_transfer_func != dc_stream->out_transfer_func) {
e72f0acd
TC
1316 if (dc_stream->out_transfer_func != NULL)
1317 dc_transfer_func_release(dc_stream->out_transfer_func);
ee8f63e1
LE
1318 dc_transfer_func_retain(srf_updates[i].out_transfer_func);
1319 stream->public.out_transfer_func = srf_updates[i].out_transfer_func;
4562236b 1320 }
ee8f63e1 1321 if (srf_updates[i].hdr_static_metadata)
e72f0acd 1322 surface->public.hdr_static_ctx =
ee8f63e1 1323 *(srf_updates[i].hdr_static_metadata);
4562236b
HW
1324 }
1325
745cc746
DL
1326 if (update_type == UPDATE_TYPE_FULL) {
1327 if (!core_dc->res_pool->funcs->validate_bandwidth(core_dc, context)) {
1328 BREAK_TO_DEBUGGER();
1329 return;
1330 } else
1331 core_dc->hwss.set_bandwidth(core_dc, context, false);
45209ef7 1332 }
e72f0acd
TC
1333
1334 if (!surface_count) /* reset */
1335 core_dc->hwss.apply_ctx_for_surface(core_dc, NULL, context);
1336
00f02019 1337 /* Lock pipes for provided surfaces */
4562236b 1338 for (i = 0; i < surface_count; i++) {
ee8f63e1 1339 struct core_surface *surface = DC_SURFACE_TO_CORE(srf_updates[i].surface);
4562236b 1340
f0828115
CL
1341 for (j = 0; j < context->res_ctx.pool->pipe_count; j++) {
1342 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
92a65e32 1343
f0828115
CL
1344 if (pipe_ctx->surface != surface)
1345 continue;
d98e5cc2 1346 if (!pipe_ctx->tg->funcs->is_blanked(pipe_ctx->tg)) {
f0828115
CL
1347 core_dc->hwss.pipe_control_lock(
1348 core_dc,
1349 pipe_ctx,
e72f0acd
TC
1350 true);
1351 }
00f02019
LE
1352 }
1353 }
1354
1355 /* Perform requested Updates */
1356 for (i = 0; i < surface_count; i++) {
1357 struct core_surface *surface = DC_SURFACE_TO_CORE(srf_updates[i].surface);
1358
1359 if (update_type >= UPDATE_TYPE_MED) {
1360 core_dc->hwss.apply_ctx_for_surface(
1361 core_dc, surface, context);
1362 context_timing_trace(dc, &context->res_ctx);
1363 }
1364
1365 for (j = 0; j < context->res_ctx.pool->pipe_count; j++) {
1366 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
1367 struct pipe_ctx *cur_pipe_ctx;
1368 bool is_new_pipe_surface = true;
1369
1370 if (pipe_ctx->surface != surface)
1371 continue;
4562236b 1372
ee8f63e1 1373 if (srf_updates[i].flip_addr)
e72f0acd 1374 core_dc->hwss.update_plane_addr(core_dc, pipe_ctx);
4562236b 1375
e72f0acd
TC
1376 if (update_type == UPDATE_TYPE_FAST)
1377 continue;
1378
1379 cur_pipe_ctx = &core_dc->current_context->res_ctx.pipe_ctx[j];
1380 if (cur_pipe_ctx->surface == pipe_ctx->surface)
1381 is_new_pipe_surface = false;
1382
e72f0acd 1383 if (is_new_pipe_surface ||
ee8f63e1 1384 srf_updates[i].in_transfer_func)
90e508ba
AK
1385 core_dc->hwss.set_input_transfer_func(
1386 pipe_ctx, pipe_ctx->surface);
1387
e72f0acd 1388 if (is_new_pipe_surface ||
ee8f63e1 1389 srf_updates[i].out_transfer_func)
90e508ba
AK
1390 core_dc->hwss.set_output_transfer_func(
1391 pipe_ctx,
1392 pipe_ctx->surface,
1393 pipe_ctx->stream);
1394
ee8f63e1 1395 if (srf_updates[i].hdr_static_metadata) {
fcd2f4bf
AZ
1396 resource_build_info_frame(pipe_ctx);
1397 core_dc->hwss.update_info_frame(pipe_ctx);
1398 }
9474980a 1399 }
4562236b
HW
1400 }
1401
00f02019 1402 /* Unlock pipes */
4562236b
HW
1403 for (i = context->res_ctx.pool->pipe_count - 1; i >= 0; i--) {
1404 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1405
1406 for (j = 0; j < surface_count; j++) {
ee8f63e1 1407 if (srf_updates[j].surface == &pipe_ctx->surface->public) {
4562236b
HW
1408 if (!pipe_ctx->tg->funcs->is_blanked(pipe_ctx->tg)) {
1409 core_dc->hwss.pipe_control_lock(
f0828115
CL
1410 core_dc,
1411 pipe_ctx,
4562236b
HW
1412 false);
1413 }
1414 break;
1415 }
1416 }
1417 }
1418
e72f0acd
TC
1419 if (core_dc->current_context != context) {
1420 resource_validate_ctx_destruct(core_dc->current_context);
1421 core_dc->temp_flip_context = core_dc->current_context;
1422
1423 core_dc->current_context = context;
1424 }
4562236b
HW
1425}
1426
ab2541b6 1427uint8_t dc_get_current_stream_count(const struct dc *dc)
4562236b
HW
1428{
1429 struct core_dc *core_dc = DC_TO_CORE(dc);
ab2541b6 1430 return core_dc->current_context->stream_count;
4562236b
HW
1431}
1432
ab2541b6 1433struct dc_stream *dc_get_stream_at_index(const struct dc *dc, uint8_t i)
4562236b
HW
1434{
1435 struct core_dc *core_dc = DC_TO_CORE(dc);
ab2541b6
AC
1436 if (i < core_dc->current_context->stream_count)
1437 return &(core_dc->current_context->streams[i]->public);
4562236b
HW
1438 return NULL;
1439}
1440
1441const struct dc_link *dc_get_link_at_index(const struct dc *dc, uint32_t link_index)
1442{
1443 struct core_dc *core_dc = DC_TO_CORE(dc);
1444 return &core_dc->links[link_index]->public;
1445}
1446
1447const struct graphics_object_id dc_get_link_id_at_index(
1448 struct dc *dc, uint32_t link_index)
1449{
1450 struct core_dc *core_dc = DC_TO_CORE(dc);
1451 return core_dc->links[link_index]->link_id;
1452}
1453
1454const struct ddc_service *dc_get_ddc_at_index(
1455 struct dc *dc, uint32_t link_index)
1456{
1457 struct core_dc *core_dc = DC_TO_CORE(dc);
1458 return core_dc->links[link_index]->ddc;
1459}
1460
1461enum dc_irq_source dc_get_hpd_irq_source_at_index(
1462 struct dc *dc, uint32_t link_index)
1463{
1464 struct core_dc *core_dc = DC_TO_CORE(dc);
1465 return core_dc->links[link_index]->public.irq_source_hpd;
1466}
1467
1468const struct audio **dc_get_audios(struct dc *dc)
1469{
1470 struct core_dc *core_dc = DC_TO_CORE(dc);
1471 return (const struct audio **)core_dc->res_pool->audios;
1472}
1473
1474void dc_flip_surface_addrs(
1475 struct dc *dc,
1476 const struct dc_surface *const surfaces[],
1477 struct dc_flip_addrs flip_addrs[],
1478 uint32_t count)
1479{
1480 struct core_dc *core_dc = DC_TO_CORE(dc);
1481 int i, j;
1482
1483 for (i = 0; i < count; i++) {
1484 struct core_surface *surface = DC_SURFACE_TO_CORE(surfaces[i]);
1485
1486 surface->public.address = flip_addrs[i].address;
1487 surface->public.flip_immediate = flip_addrs[i].flip_immediate;
1488
1489 for (j = 0; j < core_dc->res_pool->pipe_count; j++) {
1490 struct pipe_ctx *pipe_ctx = &core_dc->current_context->res_ctx.pipe_ctx[j];
1491
1492 if (pipe_ctx->surface != surface)
1493 continue;
1494
1495 core_dc->hwss.update_plane_addr(core_dc, pipe_ctx);
1496 }
1497 }
1498}
1499
1500enum dc_irq_source dc_interrupt_to_irq_source(
1501 struct dc *dc,
1502 uint32_t src_id,
1503 uint32_t ext_id)
1504{
1505 struct core_dc *core_dc = DC_TO_CORE(dc);
1506 return dal_irq_service_to_irq_source(core_dc->res_pool->irqs, src_id, ext_id);
1507}
1508
1509void dc_interrupt_set(const struct dc *dc, enum dc_irq_source src, bool enable)
1510{
1511 struct core_dc *core_dc = DC_TO_CORE(dc);
1512 dal_irq_service_set(core_dc->res_pool->irqs, src, enable);
1513}
1514
1515void dc_interrupt_ack(struct dc *dc, enum dc_irq_source src)
1516{
1517 struct core_dc *core_dc = DC_TO_CORE(dc);
1518 dal_irq_service_ack(core_dc->res_pool->irqs, src);
1519}
1520
1521void dc_set_power_state(
1522 struct dc *dc,
a3621485 1523 enum dc_acpi_cm_power_state power_state)
4562236b
HW
1524{
1525 struct core_dc *core_dc = DC_TO_CORE(dc);
1526
4562236b
HW
1527 switch (power_state) {
1528 case DC_ACPI_CM_POWER_STATE_D0:
1529 core_dc->hwss.init_hw(core_dc);
1530 break;
1531 default:
4562236b
HW
1532
1533 core_dc->hwss.power_down(core_dc);
1534
1535 /* Zero out the current context so that on resume we start with
1536 * clean state, and dc hw programming optimizations will not
1537 * cause any trouble.
1538 */
1539 memset(core_dc->current_context, 0,
1540 sizeof(*core_dc->current_context));
1541
1542 core_dc->current_context->res_ctx.pool = core_dc->res_pool;
1543
1544 break;
1545 }
1546
1547}
1548
1549void dc_resume(const struct dc *dc)
1550{
1551 struct core_dc *core_dc = DC_TO_CORE(dc);
1552
1553 uint32_t i;
1554
1555 for (i = 0; i < core_dc->link_count; i++)
1556 core_link_resume(core_dc->links[i]);
1557}
1558
7c7f5b15 1559bool dc_read_aux_dpcd(
4562236b
HW
1560 struct dc *dc,
1561 uint32_t link_index,
1562 uint32_t address,
1563 uint8_t *data,
1564 uint32_t size)
1565{
1566 struct core_dc *core_dc = DC_TO_CORE(dc);
1567
1568 struct core_link *link = core_dc->links[link_index];
1569 enum ddc_result r = dal_ddc_service_read_dpcd_data(
1570 link->ddc,
7c7f5b15
AG
1571 false,
1572 I2C_MOT_UNDEF,
4562236b
HW
1573 address,
1574 data,
1575 size);
1576 return r == DDC_RESULT_SUCESSFULL;
1577}
1578
7c7f5b15 1579bool dc_write_aux_dpcd(
2b230ea3
ZF
1580 struct dc *dc,
1581 uint32_t link_index,
1582 uint32_t address,
7c7f5b15
AG
1583 const uint8_t *data,
1584 uint32_t size)
1585{
2b230ea3 1586 struct core_dc *core_dc = DC_TO_CORE(dc);
2b230ea3
ZF
1587 struct core_link *link = core_dc->links[link_index];
1588
7c7f5b15 1589 enum ddc_result r = dal_ddc_service_write_dpcd_data(
2b230ea3 1590 link->ddc,
7c7f5b15
AG
1591 false,
1592 I2C_MOT_UNDEF,
2b230ea3 1593 address,
7c7f5b15
AG
1594 data,
1595 size);
1596 return r == DDC_RESULT_SUCESSFULL;
2b230ea3
ZF
1597}
1598
7c7f5b15
AG
1599bool dc_read_aux_i2c(
1600 struct dc *dc,
1601 uint32_t link_index,
1602 enum i2c_mot_mode mot,
1603 uint32_t address,
1604 uint8_t *data,
1605 uint32_t size)
1606{
1607 struct core_dc *core_dc = DC_TO_CORE(dc);
2b230ea3 1608
7c7f5b15
AG
1609 struct core_link *link = core_dc->links[link_index];
1610 enum ddc_result r = dal_ddc_service_read_dpcd_data(
1611 link->ddc,
1612 true,
1613 mot,
1614 address,
1615 data,
1616 size);
1617 return r == DDC_RESULT_SUCESSFULL;
1618}
1619
1620bool dc_write_aux_i2c(
4562236b
HW
1621 struct dc *dc,
1622 uint32_t link_index,
7c7f5b15 1623 enum i2c_mot_mode mot,
4562236b
HW
1624 uint32_t address,
1625 const uint8_t *data,
1626 uint32_t size)
1627{
1628 struct core_dc *core_dc = DC_TO_CORE(dc);
4562236b
HW
1629 struct core_link *link = core_dc->links[link_index];
1630
1631 enum ddc_result r = dal_ddc_service_write_dpcd_data(
1632 link->ddc,
7c7f5b15
AG
1633 true,
1634 mot,
4562236b
HW
1635 address,
1636 data,
1637 size);
1638 return r == DDC_RESULT_SUCESSFULL;
1639}
1640
7c7f5b15
AG
1641bool dc_query_ddc_data(
1642 struct dc *dc,
1643 uint32_t link_index,
1644 uint32_t address,
1645 uint8_t *write_buf,
1646 uint32_t write_size,
1647 uint8_t *read_buf,
1648 uint32_t read_size) {
1649
1650 struct core_dc *core_dc = DC_TO_CORE(dc);
1651
1652 struct core_link *link = core_dc->links[link_index];
1653
1654 bool result = dal_ddc_service_query_ddc_data(
1655 link->ddc,
1656 address,
1657 write_buf,
1658 write_size,
1659 read_buf,
1660 read_size);
1661
1662 return result;
1663}
1664
4562236b
HW
1665bool dc_submit_i2c(
1666 struct dc *dc,
1667 uint32_t link_index,
1668 struct i2c_command *cmd)
1669{
1670 struct core_dc *core_dc = DC_TO_CORE(dc);
1671
1672 struct core_link *link = core_dc->links[link_index];
1673 struct ddc_service *ddc = link->ddc;
1674
1675 return dal_i2caux_submit_i2c_command(
1676 ddc->ctx->i2caux,
1677 ddc->ddc_pin,
1678 cmd);
1679}
1680
1681static bool link_add_remote_sink_helper(struct core_link *core_link, struct dc_sink *sink)
1682{
1683 struct dc_link *dc_link = &core_link->public;
1684
1685 if (dc_link->sink_count >= MAX_SINKS_PER_LINK) {
1686 BREAK_TO_DEBUGGER();
1687 return false;
1688 }
1689
1690 dc_sink_retain(sink);
1691
1692 dc_link->remote_sinks[dc_link->sink_count] = sink;
1693 dc_link->sink_count++;
1694
1695 return true;
1696}
1697
1698struct dc_sink *dc_link_add_remote_sink(
1699 const struct dc_link *link,
1700 const uint8_t *edid,
1701 int len,
1702 struct dc_sink_init_data *init_data)
1703{
1704 struct dc_sink *dc_sink;
1705 enum dc_edid_status edid_status;
1706 struct core_link *core_link = DC_LINK_TO_LINK(link);
1707
1708 if (len > MAX_EDID_BUFFER_SIZE) {
1709 dm_error("Max EDID buffer size breached!\n");
1710 return NULL;
1711 }
1712
1713 if (!init_data) {
1714 BREAK_TO_DEBUGGER();
1715 return NULL;
1716 }
1717
1718 if (!init_data->link) {
1719 BREAK_TO_DEBUGGER();
1720 return NULL;
1721 }
1722
1723 dc_sink = dc_sink_create(init_data);
1724
1725 if (!dc_sink)
1726 return NULL;
1727
1728 memmove(dc_sink->dc_edid.raw_edid, edid, len);
1729 dc_sink->dc_edid.length = len;
1730
1731 if (!link_add_remote_sink_helper(
1732 core_link,
1733 dc_sink))
1734 goto fail_add_sink;
1735
1736 edid_status = dm_helpers_parse_edid_caps(
1737 core_link->ctx,
1738 &dc_sink->dc_edid,
1739 &dc_sink->edid_caps);
1740
1741 if (edid_status != EDID_OK)
1742 goto fail;
1743
1744 return dc_sink;
1745fail:
1746 dc_link_remove_remote_sink(link, dc_sink);
1747fail_add_sink:
1748 dc_sink_release(dc_sink);
1749 return NULL;
1750}
1751
1752void dc_link_set_sink(const struct dc_link *link, struct dc_sink *sink)
1753{
1754 struct core_link *core_link = DC_LINK_TO_LINK(link);
1755 struct dc_link *dc_link = &core_link->public;
1756
1757 dc_link->local_sink = sink;
1758
1759 if (sink == NULL) {
1760 dc_link->type = dc_connection_none;
1761 } else {
1762 dc_link->type = dc_connection_single;
1763 }
1764}
1765
1766void dc_link_remove_remote_sink(const struct dc_link *link, const struct dc_sink *sink)
1767{
1768 int i;
1769 struct core_link *core_link = DC_LINK_TO_LINK(link);
1770 struct dc_link *dc_link = &core_link->public;
1771
1772 if (!link->sink_count) {
1773 BREAK_TO_DEBUGGER();
1774 return;
1775 }
1776
1777 for (i = 0; i < dc_link->sink_count; i++) {
1778 if (dc_link->remote_sinks[i] == sink) {
1779 dc_sink_release(sink);
1780 dc_link->remote_sinks[i] = NULL;
1781
1782 /* shrink array to remove empty place */
1783 while (i < dc_link->sink_count - 1) {
1784 dc_link->remote_sinks[i] = dc_link->remote_sinks[i+1];
1785 i++;
1786 }
b64875fe 1787 dc_link->remote_sinks[i] = NULL;
4562236b
HW
1788 dc_link->sink_count--;
1789 return;
1790 }
1791 }
1792}
1793
2c8ad2d5
AD
1794bool dc_init_dchub(struct dc *dc, struct dchub_init_data *dh_data)
1795{
1796 int i;
1797 struct core_dc *core_dc = DC_TO_CORE(dc);
1798 struct mem_input *mi = NULL;
1799
1800 for (i = 0; i < core_dc->res_pool->pipe_count; i++) {
1801 if (core_dc->res_pool->mis[i] != NULL) {
1802 mi = core_dc->res_pool->mis[i];
1803 break;
1804 }
1805 }
1806 if (mi == NULL) {
1807 dm_error("no mem_input!\n");
1808 return false;
1809 }
1810
1811 if (mi->funcs->mem_input_update_dchub)
1812 mi->funcs->mem_input_update_dchub(mi, dh_data);
1813 else
1814 ASSERT(mi->funcs->mem_input_update_dchub);
1815
1816
1817 return true;
1818
1819}
2c8ad2d5 1820