]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/gpu/drm/amd/display/dc/core/dc_resource.c
drm/amd/display: Fixed split update bug.
[mirror_ubuntu-bionic-kernel.git] / drivers / gpu / drm / amd / display / dc / core / dc_resource.c
CommitLineData
4562236b
HW
1/*
2* Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25#include "dm_services.h"
26
27#include "resource.h"
28#include "include/irq_service_interface.h"
29#include "link_encoder.h"
30#include "stream_encoder.h"
31#include "opp.h"
32#include "timing_generator.h"
33#include "transform.h"
34#include "set_mode_types.h"
35
36#include "virtual/virtual_stream_encoder.h"
37
38#include "dce80/dce80_resource.h"
39#include "dce100/dce100_resource.h"
40#include "dce110/dce110_resource.h"
41#include "dce112/dce112_resource.h"
42
43enum dce_version resource_parse_asic_id(struct hw_asic_id asic_id)
44{
45 enum dce_version dc_version = DCE_VERSION_UNKNOWN;
46 switch (asic_id.chip_family) {
47
48 case FAMILY_CI:
49 case FAMILY_KV:
50 dc_version = DCE_VERSION_8_0;
51 break;
52 case FAMILY_CZ:
53 dc_version = DCE_VERSION_11_0;
54 break;
55
56 case FAMILY_VI:
57 if (ASIC_REV_IS_TONGA_P(asic_id.hw_internal_rev) ||
58 ASIC_REV_IS_FIJI_P(asic_id.hw_internal_rev)) {
59 dc_version = DCE_VERSION_10_0;
60 break;
61 }
62 if (ASIC_REV_IS_POLARIS10_P(asic_id.hw_internal_rev) ||
b264d345
JL
63 ASIC_REV_IS_POLARIS11_M(asic_id.hw_internal_rev) ||
64 ASIC_REV_IS_POLARIS12_V(asic_id.hw_internal_rev)) {
4562236b
HW
65 dc_version = DCE_VERSION_11_2;
66 }
67 break;
68 default:
69 dc_version = DCE_VERSION_UNKNOWN;
70 break;
71 }
72 return dc_version;
73}
74
75struct resource_pool *dc_create_resource_pool(
76 struct core_dc *dc,
77 int num_virtual_links,
78 enum dce_version dc_version,
79 struct hw_asic_id asic_id)
80{
81
82 switch (dc_version) {
83 case DCE_VERSION_8_0:
84 return dce80_create_resource_pool(
85 num_virtual_links, dc);
86 case DCE_VERSION_10_0:
87 return dce100_create_resource_pool(
88 num_virtual_links, dc);
89 case DCE_VERSION_11_0:
90 return dce110_create_resource_pool(
91 num_virtual_links, dc, asic_id);
92 case DCE_VERSION_11_2:
93 return dce112_create_resource_pool(
94 num_virtual_links, dc);
95 default:
96 break;
97 }
98
99 return false;
100}
101
102void dc_destroy_resource_pool(struct core_dc *dc)
103{
104 if (dc) {
105 if (dc->res_pool)
106 dc->res_pool->funcs->destroy(&dc->res_pool);
107
108 if (dc->hwseq)
109 dm_free(dc->hwseq);
110 }
111}
112
113static void update_num_audio(
114 const struct resource_straps *straps,
115 unsigned int *num_audio,
116 struct audio_support *aud_support)
117{
118 if (straps->hdmi_disable == 0) {
119 aud_support->hdmi_audio_native = true;
120 aud_support->hdmi_audio_on_dongle = true;
121 aud_support->dp_audio = true;
122 } else {
123 if (straps->dc_pinstraps_audio & 0x2) {
124 aud_support->hdmi_audio_on_dongle = true;
125 aud_support->dp_audio = true;
126 } else {
127 aud_support->dp_audio = true;
128 }
129 }
130
131 switch (straps->audio_stream_number) {
132 case 0: /* multi streams supported */
133 break;
134 case 1: /* multi streams not supported */
135 *num_audio = 1;
136 break;
137 default:
138 DC_ERR("DC: unexpected audio fuse!\n");
139 };
140}
141
142bool resource_construct(
143 unsigned int num_virtual_links,
144 struct core_dc *dc,
145 struct resource_pool *pool,
146 const struct resource_create_funcs *create_funcs)
147{
148 struct dc_context *ctx = dc->ctx;
149 const struct resource_caps *caps = pool->res_cap;
150 int i;
151 unsigned int num_audio = caps->num_audio;
152 struct resource_straps straps = {0};
153
154 if (create_funcs->read_dce_straps)
155 create_funcs->read_dce_straps(dc->ctx, &straps);
156
157 pool->audio_count = 0;
158 if (create_funcs->create_audio) {
159 /* find the total number of streams available via the
160 * AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT
161 * registers (one for each pin) starting from pin 1
162 * up to the max number of audio pins.
163 * We stop on the first pin where
164 * PORT_CONNECTIVITY == 1 (as instructed by HW team).
165 */
166 update_num_audio(&straps, &num_audio, &pool->audio_support);
167 for (i = 0; i < pool->pipe_count && i < num_audio; i++) {
168 struct audio *aud = create_funcs->create_audio(ctx, i);
169
170 if (aud == NULL) {
171 DC_ERR("DC: failed to create audio!\n");
172 return false;
173 }
174
175 if (!aud->funcs->endpoint_valid(aud)) {
176 aud->funcs->destroy(&aud);
177 break;
178 }
179
180 pool->audios[i] = aud;
181 pool->audio_count++;
182 }
183 }
184
185 pool->stream_enc_count = 0;
186 if (create_funcs->create_stream_encoder) {
187 for (i = 0; i < caps->num_stream_encoder; i++) {
188 pool->stream_enc[i] = create_funcs->create_stream_encoder(i, ctx);
189 if (pool->stream_enc[i] == NULL)
190 DC_ERR("DC: failed to create stream_encoder!\n");
191 pool->stream_enc_count++;
192 }
193 }
194
195 for (i = 0; i < num_virtual_links; i++) {
196 pool->stream_enc[pool->stream_enc_count] =
197 virtual_stream_encoder_create(
198 ctx, ctx->dc_bios);
199 if (pool->stream_enc[pool->stream_enc_count] == NULL) {
200 DC_ERR("DC: failed to create stream_encoder!\n");
201 return false;
202 }
203 pool->stream_enc_count++;
204 }
205
206 dc->hwseq = create_funcs->create_hwseq(ctx);
207
208 return true;
209}
210
211
212void resource_unreference_clock_source(
213 struct resource_context *res_ctx,
214 struct clock_source *clock_source)
215{
216 int i;
217 for (i = 0; i < res_ctx->pool->clk_src_count; i++) {
218 if (res_ctx->pool->clock_sources[i] != clock_source)
219 continue;
220
221 res_ctx->clock_source_ref_count[i]--;
222
223 if (res_ctx->clock_source_ref_count[i] == 0)
224 clock_source->funcs->cs_power_down(clock_source);
225
226 break;
227 }
228
229 if (res_ctx->pool->dp_clock_source == clock_source) {
230 res_ctx->dp_clock_source_ref_count--;
231
232 if (res_ctx->dp_clock_source_ref_count == 0)
233 clock_source->funcs->cs_power_down(clock_source);
234 }
235}
236
237void resource_reference_clock_source(
238 struct resource_context *res_ctx,
239 struct clock_source *clock_source)
240{
241 int i;
242 for (i = 0; i < res_ctx->pool->clk_src_count; i++) {
243 if (res_ctx->pool->clock_sources[i] != clock_source)
244 continue;
245
246 res_ctx->clock_source_ref_count[i]++;
247 break;
248 }
249
250 if (res_ctx->pool->dp_clock_source == clock_source)
251 res_ctx->dp_clock_source_ref_count++;
252}
253
254bool resource_are_streams_timing_synchronizable(
255 const struct core_stream *stream1,
256 const struct core_stream *stream2)
257{
258 if (stream1->public.timing.h_total != stream2->public.timing.h_total)
259 return false;
260
261 if (stream1->public.timing.v_total != stream2->public.timing.v_total)
262 return false;
263
264 if (stream1->public.timing.h_addressable
265 != stream2->public.timing.h_addressable)
266 return false;
267
268 if (stream1->public.timing.v_addressable
269 != stream2->public.timing.v_addressable)
270 return false;
271
272 if (stream1->public.timing.pix_clk_khz
273 != stream2->public.timing.pix_clk_khz)
274 return false;
275
276 if (stream1->phy_pix_clk != stream2->phy_pix_clk
277 && !dc_is_dp_signal(stream1->signal)
278 && !dc_is_dp_signal(stream2->signal))
279 return false;
280
281 return true;
282}
283
284static bool is_sharable_clk_src(
285 const struct pipe_ctx *pipe_with_clk_src,
286 const struct pipe_ctx *pipe)
287{
288 if (pipe_with_clk_src->clock_source == NULL)
289 return false;
290
0f4e66cd
YS
291 if (pipe_with_clk_src->stream == NULL) {
292 ASSERT(0);
293 return false;
294 }
295
4562236b
HW
296 if (pipe_with_clk_src->stream->signal == SIGNAL_TYPE_VIRTUAL)
297 return false;
298
299 if (dc_is_dp_signal(pipe_with_clk_src->stream->signal))
300 return false;
301
302 if (dc_is_hdmi_signal(pipe_with_clk_src->stream->signal)
303 && dc_is_dvi_signal(pipe->stream->signal))
304 return false;
305
306 if (dc_is_hdmi_signal(pipe->stream->signal)
307 && dc_is_dvi_signal(pipe_with_clk_src->stream->signal))
308 return false;
309
310 if (!resource_are_streams_timing_synchronizable(
311 pipe_with_clk_src->stream, pipe->stream))
312 return false;
313
314 return true;
315}
316
317struct clock_source *resource_find_used_clk_src_for_sharing(
318 struct resource_context *res_ctx,
319 struct pipe_ctx *pipe_ctx)
320{
321 int i;
322
323 for (i = 0; i < MAX_PIPES; i++) {
324 if (is_sharable_clk_src(&res_ctx->pipe_ctx[i], pipe_ctx))
325 return res_ctx->pipe_ctx[i].clock_source;
326 }
327
328 return NULL;
329}
330
331static enum pixel_format convert_pixel_format_to_dalsurface(
332 enum surface_pixel_format surface_pixel_format)
333{
334 enum pixel_format dal_pixel_format = PIXEL_FORMAT_UNKNOWN;
335
336 switch (surface_pixel_format) {
337 case SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS:
338 dal_pixel_format = PIXEL_FORMAT_INDEX8;
339 break;
340 case SURFACE_PIXEL_FORMAT_GRPH_ARGB1555:
341 dal_pixel_format = PIXEL_FORMAT_RGB565;
342 break;
343 case SURFACE_PIXEL_FORMAT_GRPH_RGB565:
344 dal_pixel_format = PIXEL_FORMAT_RGB565;
345 break;
346 case SURFACE_PIXEL_FORMAT_GRPH_ARGB8888:
347 dal_pixel_format = PIXEL_FORMAT_ARGB8888;
348 break;
349 case SURFACE_PIXEL_FORMAT_GRPH_BGRA8888:
350 dal_pixel_format = PIXEL_FORMAT_ARGB8888;
351 break;
352 case SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010:
353 dal_pixel_format = PIXEL_FORMAT_ARGB2101010;
354 break;
355 case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010:
356 dal_pixel_format = PIXEL_FORMAT_ARGB2101010;
357 break;
358 case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010_XR_BIAS:
359 dal_pixel_format = PIXEL_FORMAT_ARGB2101010_XRBIAS;
360 break;
361 case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F:
362 case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F:
363 dal_pixel_format = PIXEL_FORMAT_FP16;
364 break;
365 case SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr:
366 dal_pixel_format = PIXEL_FORMAT_420BPP12;
367 break;
368 case SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb:
369 dal_pixel_format = PIXEL_FORMAT_420BPP12;
370 break;
371 case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
372 default:
373 dal_pixel_format = PIXEL_FORMAT_UNKNOWN;
374 break;
375 }
376 return dal_pixel_format;
377}
378
379static void rect_swap_helper(struct rect *rect)
380{
381 uint32_t temp = 0;
382
383 temp = rect->height;
384 rect->height = rect->width;
385 rect->width = temp;
386
387 temp = rect->x;
388 rect->x = rect->y;
389 rect->y = temp;
390}
391
392static void calculate_viewport(
393 const struct dc_surface *surface,
394 struct pipe_ctx *pipe_ctx)
395{
396 struct rect stream_src = pipe_ctx->stream->public.src;
397 struct rect src = surface->src_rect;
398 struct rect dst = surface->dst_rect;
399 struct rect surface_clip = surface->clip_rect;
400 struct rect clip = {0};
401
402
403 if (surface->rotation == ROTATION_ANGLE_90 ||
404 surface->rotation == ROTATION_ANGLE_270) {
405 rect_swap_helper(&src);
406 rect_swap_helper(&dst);
407 rect_swap_helper(&surface_clip);
408 rect_swap_helper(&stream_src);
409 }
410
411 /* The actual clip is an intersection between stream
412 * source and surface clip
413 */
414 clip.x = stream_src.x > surface_clip.x ?
415 stream_src.x : surface_clip.x;
416
417 clip.width = stream_src.x + stream_src.width <
418 surface_clip.x + surface_clip.width ?
419 stream_src.x + stream_src.width - clip.x :
420 surface_clip.x + surface_clip.width - clip.x ;
421
422 clip.y = stream_src.y > surface_clip.y ?
423 stream_src.y : surface_clip.y;
424
425 clip.height = stream_src.y + stream_src.height <
426 surface_clip.y + surface_clip.height ?
427 stream_src.y + stream_src.height - clip.y :
428 surface_clip.y + surface_clip.height - clip.y ;
429
430 /* offset = src.ofs + (clip.ofs - dst.ofs) * scl_ratio
431 * num_pixels = clip.num_pix * scl_ratio
432 */
433 pipe_ctx->scl_data.viewport.x = src.x + (clip.x - dst.x) *
434 src.width / dst.width;
435 pipe_ctx->scl_data.viewport.width = clip.width *
436 src.width / dst.width;
437
438 pipe_ctx->scl_data.viewport.y = src.y + (clip.y - dst.y) *
439 src.height / dst.height;
440 pipe_ctx->scl_data.viewport.height = clip.height *
441 src.height / dst.height;
442
443 /* Minimum viewport such that 420/422 chroma vp is non 0 */
444 if (pipe_ctx->scl_data.viewport.width < 2)
445 pipe_ctx->scl_data.viewport.width = 2;
446 if (pipe_ctx->scl_data.viewport.height < 2)
447 pipe_ctx->scl_data.viewport.height = 2;
448}
449
450static void calculate_recout(
451 const struct dc_surface *surface,
452 struct pipe_ctx *pipe_ctx)
453{
454 struct core_stream *stream = pipe_ctx->stream;
455 struct rect clip = surface->clip_rect;
456
457 pipe_ctx->scl_data.recout.x = stream->public.dst.x;
458 if (stream->public.src.x < clip.x)
459 pipe_ctx->scl_data.recout.x += (clip.x
460 - stream->public.src.x) * stream->public.dst.width
461 / stream->public.src.width;
462
463 pipe_ctx->scl_data.recout.width = clip.width *
464 stream->public.dst.width / stream->public.src.width;
465 if (pipe_ctx->scl_data.recout.width + pipe_ctx->scl_data.recout.x >
466 stream->public.dst.x + stream->public.dst.width)
467 pipe_ctx->scl_data.recout.width =
468 stream->public.dst.x + stream->public.dst.width
469 - pipe_ctx->scl_data.recout.x;
470
471 pipe_ctx->scl_data.recout.y = stream->public.dst.y;
472 if (stream->public.src.y < clip.y)
473 pipe_ctx->scl_data.recout.y += (clip.y
474 - stream->public.src.y) * stream->public.dst.height
475 / stream->public.src.height;
476
477 pipe_ctx->scl_data.recout.height = clip.height *
478 stream->public.dst.height / stream->public.src.height;
479 if (pipe_ctx->scl_data.recout.height + pipe_ctx->scl_data.recout.y >
480 stream->public.dst.y + stream->public.dst.height)
481 pipe_ctx->scl_data.recout.height =
482 stream->public.dst.y + stream->public.dst.height
483 - pipe_ctx->scl_data.recout.y;
484}
485
486static void calculate_scaling_ratios(
487 const struct dc_surface *surface,
488 struct pipe_ctx *pipe_ctx)
489{
490 struct core_stream *stream = pipe_ctx->stream;
491 const uint32_t in_w = stream->public.src.width;
492 const uint32_t in_h = stream->public.src.height;
493 const uint32_t out_w = stream->public.dst.width;
494 const uint32_t out_h = stream->public.dst.height;
495
496 pipe_ctx->scl_data.ratios.horz = dal_fixed31_32_from_fraction(
497 surface->src_rect.width,
498 surface->dst_rect.width);
499 pipe_ctx->scl_data.ratios.vert = dal_fixed31_32_from_fraction(
500 surface->src_rect.height,
501 surface->dst_rect.height);
502
503 if (surface->stereo_format == PLANE_STEREO_FORMAT_SIDE_BY_SIDE)
504 pipe_ctx->scl_data.ratios.horz.value *= 2;
505 else if (surface->stereo_format == PLANE_STEREO_FORMAT_TOP_AND_BOTTOM)
506 pipe_ctx->scl_data.ratios.vert.value *= 2;
507
508 pipe_ctx->scl_data.ratios.vert.value = div64_s64(
509 pipe_ctx->scl_data.ratios.vert.value * in_h, out_h);
510 pipe_ctx->scl_data.ratios.horz.value = div64_s64(
511 pipe_ctx->scl_data.ratios.horz.value * in_w, out_w);
512
513 pipe_ctx->scl_data.ratios.horz_c = pipe_ctx->scl_data.ratios.horz;
514 pipe_ctx->scl_data.ratios.vert_c = pipe_ctx->scl_data.ratios.vert;
515
516 if (pipe_ctx->scl_data.format == PIXEL_FORMAT_420BPP12) {
517 pipe_ctx->scl_data.ratios.horz_c.value /= 2;
518 pipe_ctx->scl_data.ratios.vert_c.value /= 2;
519 }
520}
521
522bool resource_build_scaling_params(
523 const struct dc_surface *surface,
524 struct pipe_ctx *pipe_ctx)
525{
526 bool res;
527 struct dc_crtc_timing *timing = &pipe_ctx->stream->public.timing;
528 /* Important: scaling ratio calculation requires pixel format,
529 * lb depth calculation requires recout and taps require scaling ratios.
530 */
531 pipe_ctx->scl_data.format = convert_pixel_format_to_dalsurface(surface->format);
532
533 calculate_viewport(surface, pipe_ctx);
534
535 if (pipe_ctx->scl_data.viewport.height < 16 || pipe_ctx->scl_data.viewport.width < 16)
536 return false;
537
538 calculate_scaling_ratios(surface, pipe_ctx);
539
540 calculate_recout(surface, pipe_ctx);
541
542 /**
543 * Setting line buffer pixel depth to 24bpp yields banding
544 * on certain displays, such as the Sharp 4k
545 */
546 pipe_ctx->scl_data.lb_params.depth = LB_PIXEL_DEPTH_30BPP;
547
548 pipe_ctx->scl_data.h_active = timing->h_addressable;
549 pipe_ctx->scl_data.v_active = timing->v_addressable;
550
551 /* Taps calculations */
552 res = pipe_ctx->xfm->funcs->transform_get_optimal_number_of_taps(
553 pipe_ctx->xfm, &pipe_ctx->scl_data, &surface->scaling_quality);
554
555 if (!res) {
556 /* Try 24 bpp linebuffer */
557 pipe_ctx->scl_data.lb_params.depth = LB_PIXEL_DEPTH_24BPP;
558
559 res = pipe_ctx->xfm->funcs->transform_get_optimal_number_of_taps(
560 pipe_ctx->xfm, &pipe_ctx->scl_data, &surface->scaling_quality);
561 }
562
563 dm_logger_write(pipe_ctx->stream->ctx->logger, LOG_SCALER,
564 "%s: Viewport:\nheight:%d width:%d x:%d "
565 "y:%d\n dst_rect:\nheight:%d width:%d x:%d "
566 "y:%d\n",
567 __func__,
568 pipe_ctx->scl_data.viewport.height,
569 pipe_ctx->scl_data.viewport.width,
570 pipe_ctx->scl_data.viewport.x,
571 pipe_ctx->scl_data.viewport.y,
572 surface->dst_rect.height,
573 surface->dst_rect.width,
574 surface->dst_rect.x,
575 surface->dst_rect.y);
576
577 return res;
578}
579
580
581enum dc_status resource_build_scaling_params_for_context(
582 const struct core_dc *dc,
583 struct validate_context *context)
584{
585 int i;
586
587 for (i = 0; i < MAX_PIPES; i++) {
588 if (context->res_ctx.pipe_ctx[i].surface != NULL &&
589 context->res_ctx.pipe_ctx[i].stream != NULL)
590 if (!resource_build_scaling_params(
591 &context->res_ctx.pipe_ctx[i].surface->public,
592 &context->res_ctx.pipe_ctx[i]))
593 return DC_FAIL_BANDWIDTH_VALIDATE;
594 }
595
596 return DC_OK;
597}
598
599static void detach_surfaces_for_target(
600 struct validate_context *context,
601 const struct dc_target *dc_target)
602{
603 int i;
604 struct core_stream *stream = DC_STREAM_TO_CORE(dc_target->streams[0]);
605
606 for (i = 0; i < context->res_ctx.pool->pipe_count; i++) {
607 struct pipe_ctx *cur_pipe = &context->res_ctx.pipe_ctx[i];
608 if (cur_pipe->stream == stream) {
609 cur_pipe->surface = NULL;
610 cur_pipe->top_pipe = NULL;
611 cur_pipe->bottom_pipe = NULL;
612 }
613 }
614}
615
616struct pipe_ctx *find_idle_secondary_pipe(struct resource_context *res_ctx)
617{
618 int i;
619 struct pipe_ctx *secondary_pipe = NULL;
620
621 /*
622 * search backwards for the second pipe to keep pipe
623 * assignment more consistent
624 */
625
626 for (i = res_ctx->pool->pipe_count - 1; i >= 0; i--) {
627 if (res_ctx->pipe_ctx[i].stream == NULL) {
628 secondary_pipe = &res_ctx->pipe_ctx[i];
629 secondary_pipe->pipe_idx = i;
630 break;
631 }
632 }
633
634
635 return secondary_pipe;
636}
637
638struct pipe_ctx *resource_get_head_pipe_for_stream(
639 struct resource_context *res_ctx,
640 const struct core_stream *stream)
641{
642 int i;
643 for (i = 0; i < res_ctx->pool->pipe_count; i++) {
644 if (res_ctx->pipe_ctx[i].stream == stream &&
645 !res_ctx->pipe_ctx[i].top_pipe) {
646 return &res_ctx->pipe_ctx[i];
647 break;
648 }
649 }
650 return NULL;
651}
652
653/*
654 * A free_pipe for a target is defined here as a pipe with a stream that belongs
655 * to the target but has no surface attached yet
656 */
657static struct pipe_ctx *acquire_free_pipe_for_target(
658 struct resource_context *res_ctx,
659 const struct dc_target *dc_target)
660{
661 int i;
662 struct core_stream *stream = DC_STREAM_TO_CORE(dc_target->streams[0]);
663
664 struct pipe_ctx *head_pipe = NULL;
665
666 /* Find head pipe, which has the back end set up*/
667
668 head_pipe = resource_get_head_pipe_for_stream(res_ctx, stream);
669
670 if (!head_pipe)
671 ASSERT(0);
672
673 if (!head_pipe->surface)
674 return head_pipe;
675
676 /* Re-use pipe already acquired for this stream if available*/
677 for (i = res_ctx->pool->pipe_count - 1; i >= 0; i--) {
678 if (res_ctx->pipe_ctx[i].stream == stream &&
679 !res_ctx->pipe_ctx[i].surface) {
680 return &res_ctx->pipe_ctx[i];
681 }
682 }
683
684 /*
685 * At this point we have no re-useable pipe for this stream and we need
686 * to acquire an idle one to satisfy the request
687 */
688
689 if(!res_ctx->pool->funcs->acquire_idle_pipe_for_layer)
690 return NULL;
691
692 return res_ctx->pool->funcs->acquire_idle_pipe_for_layer(res_ctx, stream);
693
694}
695
696static void release_free_pipes_for_target(
697 struct resource_context *res_ctx,
698 const struct dc_target *dc_target)
699{
700 int i;
701 struct core_stream *stream = DC_STREAM_TO_CORE(dc_target->streams[0]);
702
703 for (i = res_ctx->pool->pipe_count - 1; i >= 0; i--) {
704 if (res_ctx->pipe_ctx[i].stream == stream &&
705 !res_ctx->pipe_ctx[i].surface) {
706 res_ctx->pipe_ctx[i].stream = NULL;
707 }
708 }
709}
710
711bool resource_attach_surfaces_to_context(
712 const struct dc_surface * const *surfaces,
713 int surface_count,
714 const struct dc_target *dc_target,
715 struct validate_context *context)
716{
717 int i;
718 struct pipe_ctx *tail_pipe;
719 struct dc_target_status *target_status = NULL;
720
721
722 if (surface_count > MAX_SURFACE_NUM) {
723 dm_error("Surface: can not attach %d surfaces! Maximum is: %d\n",
724 surface_count, MAX_SURFACE_NUM);
725 return false;
726 }
727
728 for (i = 0; i < context->target_count; i++)
729 if (&context->targets[i]->public == dc_target) {
730 target_status = &context->target_status[i];
731 break;
732 }
733 if (target_status == NULL) {
734 dm_error("Existing target not found; failed to attach surfaces\n");
735 return false;
736 }
737
738 /* retain new surfaces */
739 for (i = 0; i < surface_count; i++)
740 dc_surface_retain(surfaces[i]);
741
742 detach_surfaces_for_target(context, dc_target);
743
744 /* release existing surfaces*/
745 for (i = 0; i < target_status->surface_count; i++)
746 dc_surface_release(target_status->surfaces[i]);
747
748 for (i = surface_count; i < target_status->surface_count; i++)
749 target_status->surfaces[i] = NULL;
750
751 target_status->surface_count = 0;
752
753 if (surface_count == 0)
754 return true;
755
756 tail_pipe = NULL;
757 for (i = 0; i < surface_count; i++) {
758 struct core_surface *surface = DC_SURFACE_TO_CORE(surfaces[i]);
759 struct pipe_ctx *free_pipe = acquire_free_pipe_for_target(
760 &context->res_ctx, dc_target);
761
762 if (!free_pipe) {
763 target_status->surfaces[i] = NULL;
764 return false;
765 }
766
767 free_pipe->surface = surface;
768
769 if (tail_pipe) {
770 free_pipe->top_pipe = tail_pipe;
771 tail_pipe->bottom_pipe = free_pipe;
772 }
773
774 tail_pipe = free_pipe;
775 }
776
777 release_free_pipes_for_target(&context->res_ctx, dc_target);
778
779 /* assign new surfaces*/
780 for (i = 0; i < surface_count; i++)
781 target_status->surfaces[i] = surfaces[i];
782
783 target_status->surface_count = surface_count;
784
785 return true;
786}
787
788
789static bool is_timing_changed(const struct core_stream *cur_stream,
790 const struct core_stream *new_stream)
791{
792 if (cur_stream == NULL)
793 return true;
794
795 /* If sink pointer changed, it means this is a hotplug, we should do
796 * full hw setting.
797 */
798 if (cur_stream->sink != new_stream->sink)
799 return true;
800
801 /* If output color space is changed, need to reprogram info frames */
802 if (cur_stream->public.output_color_space !=
803 new_stream->public.output_color_space)
804 return true;
805
806 return memcmp(
807 &cur_stream->public.timing,
808 &new_stream->public.timing,
809 sizeof(struct dc_crtc_timing)) != 0;
810}
811
812static bool are_stream_backends_same(
813 const struct core_stream *stream_a, const struct core_stream *stream_b)
814{
815 if (stream_a == stream_b)
816 return true;
817
818 if (stream_a == NULL || stream_b == NULL)
819 return false;
820
821 if (is_timing_changed(stream_a, stream_b))
822 return false;
823
824 return true;
825}
826
827bool is_target_unchanged(
828 const struct core_target *old_target, const struct core_target *target)
829{
830 int i;
831
832 if (old_target == target)
833 return true;
834 if (old_target->public.stream_count != target->public.stream_count)
835 return false;
836
837 for (i = 0; i < old_target->public.stream_count; i++) {
838 const struct core_stream *old_stream = DC_STREAM_TO_CORE(
839 old_target->public.streams[i]);
840 const struct core_stream *stream = DC_STREAM_TO_CORE(
841 target->public.streams[i]);
842
843 if (!are_stream_backends_same(old_stream, stream))
844 return false;
845 }
846
847 return true;
848}
849
850bool resource_validate_attach_surfaces(
851 const struct dc_validation_set set[],
852 int set_count,
853 const struct validate_context *old_context,
854 struct validate_context *context)
855{
856 int i, j;
857
858 for (i = 0; i < set_count; i++) {
859 for (j = 0; j < old_context->target_count; j++)
860 if (is_target_unchanged(
861 old_context->targets[j],
862 context->targets[i])) {
863 if (!resource_attach_surfaces_to_context(
864 old_context->target_status[j].surfaces,
865 old_context->target_status[j].surface_count,
866 &context->targets[i]->public,
867 context))
868 return false;
869 context->target_status[i] = old_context->target_status[j];
870 }
871 if (set[i].surface_count != 0)
872 if (!resource_attach_surfaces_to_context(
873 set[i].surfaces,
874 set[i].surface_count,
875 &context->targets[i]->public,
876 context))
877 return false;
878
879 }
880
881 return true;
882}
883
884/* Maximum TMDS single link pixel clock 165MHz */
885#define TMDS_MAX_PIXEL_CLOCK_IN_KHZ 165000
886
887static void set_stream_engine_in_use(
888 struct resource_context *res_ctx,
889 struct stream_encoder *stream_enc)
890{
891 int i;
892
893 for (i = 0; i < res_ctx->pool->stream_enc_count; i++) {
894 if (res_ctx->pool->stream_enc[i] == stream_enc)
895 res_ctx->is_stream_enc_acquired[i] = true;
896 }
897}
898
899/* TODO: release audio object */
900static void set_audio_in_use(
901 struct resource_context *res_ctx,
902 struct audio *audio)
903{
904 int i;
905 for (i = 0; i < res_ctx->pool->audio_count; i++) {
906 if (res_ctx->pool->audios[i] == audio) {
907 res_ctx->is_audio_acquired[i] = true;
908 }
909 }
910}
911
912static int acquire_first_free_pipe(
913 struct resource_context *res_ctx,
914 struct core_stream *stream)
915{
916 int i;
917
918 for (i = 0; i < res_ctx->pool->pipe_count; i++) {
919 if (!res_ctx->pipe_ctx[i].stream) {
920 struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i];
921
922 pipe_ctx->tg = res_ctx->pool->timing_generators[i];
923 pipe_ctx->mi = res_ctx->pool->mis[i];
924 pipe_ctx->ipp = res_ctx->pool->ipps[i];
925 pipe_ctx->xfm = res_ctx->pool->transforms[i];
926 pipe_ctx->opp = res_ctx->pool->opps[i];
927 pipe_ctx->dis_clk = res_ctx->pool->display_clock;
928 pipe_ctx->pipe_idx = i;
929
930 pipe_ctx->stream = stream;
931 return i;
932 }
933 }
934 return -1;
935}
936
937static struct stream_encoder *find_first_free_match_stream_enc_for_link(
938 struct resource_context *res_ctx,
939 struct core_stream *stream)
940{
941 int i;
942 int j = -1;
943 struct core_link *link = stream->sink->link;
944
945 for (i = 0; i < res_ctx->pool->stream_enc_count; i++) {
946 if (!res_ctx->is_stream_enc_acquired[i] &&
947 res_ctx->pool->stream_enc[i]) {
948 /* Store first available for MST second display
949 * in daisy chain use case */
950 j = i;
951 if (res_ctx->pool->stream_enc[i]->id ==
952 link->link_enc->preferred_engine)
953 return res_ctx->pool->stream_enc[i];
954 }
955 }
956
957 /*
958 * below can happen in cases when stream encoder is acquired:
959 * 1) for second MST display in chain, so preferred engine already
960 * acquired;
961 * 2) for another link, which preferred engine already acquired by any
962 * MST configuration.
963 *
964 * If signal is of DP type and preferred engine not found, return last available
965 *
966 * TODO - This is just a patch up and a generic solution is
967 * required for non DP connectors.
968 */
969
970 if (j >= 0 && dc_is_dp_signal(stream->signal))
971 return res_ctx->pool->stream_enc[j];
972
973 return NULL;
974}
975
976static struct audio *find_first_free_audio(struct resource_context *res_ctx)
977{
978 int i;
979 for (i = 0; i < res_ctx->pool->audio_count; i++) {
980 if (res_ctx->is_audio_acquired[i] == false) {
981 return res_ctx->pool->audios[i];
982 }
983 }
984
985 return 0;
986}
987
988static void update_stream_signal(struct core_stream *stream)
989{
990 const struct dc_sink *dc_sink = stream->public.sink;
991
992 stream->signal = dc_sink->sink_signal;
993 /* For asic supports dual link DVI, we should adjust signal type
994 * based on timing pixel clock. If pixel clock more than 165Mhz,
995 * signal is dual link, otherwise, single link.
996 */
997 if (dc_sink->sink_signal == SIGNAL_TYPE_DVI_SINGLE_LINK ||
998 dc_sink->sink_signal == SIGNAL_TYPE_DVI_DUAL_LINK) {
999 if (stream->public.timing.pix_clk_khz >
1000 TMDS_MAX_PIXEL_CLOCK_IN_KHZ)
1001 stream->signal = SIGNAL_TYPE_DVI_DUAL_LINK;
1002 else
1003 stream->signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
1004 }
1005}
1006
1007bool resource_is_stream_unchanged(
1008 const struct validate_context *old_context, struct core_stream *stream)
1009{
1010 int i, j;
1011
1012 for (i = 0; i < old_context->target_count; i++) {
1013 struct core_target *old_target = old_context->targets[i];
1014
1015 for (j = 0; j < old_target->public.stream_count; j++) {
1016 struct core_stream *old_stream =
1017 DC_STREAM_TO_CORE(old_target->public.streams[j]);
1018
1019 if (are_stream_backends_same(old_stream, stream))
1020 return true;
1021 }
1022 }
1023
1024 return false;
1025}
1026
1027static void copy_pipe_ctx(
1028 const struct pipe_ctx *from_pipe_ctx, struct pipe_ctx *to_pipe_ctx)
1029{
1030 struct core_surface *surface = to_pipe_ctx->surface;
1031 struct core_stream *stream = to_pipe_ctx->stream;
1032
1033 *to_pipe_ctx = *from_pipe_ctx;
1034 to_pipe_ctx->stream = stream;
1035 if (surface != NULL)
1036 to_pipe_ctx->surface = surface;
1037}
1038
1039static struct core_stream *find_pll_sharable_stream(
1040 const struct core_stream *stream_needs_pll,
1041 struct validate_context *context)
1042{
1043 int i, j;
1044
1045 for (i = 0; i < context->target_count; i++) {
1046 struct core_target *target = context->targets[i];
1047
1048 for (j = 0; j < target->public.stream_count; j++) {
1049 struct core_stream *stream_has_pll =
1050 DC_STREAM_TO_CORE(target->public.streams[j]);
1051
1052 /* We are looking for non dp, non virtual stream */
1053 if (resource_are_streams_timing_synchronizable(
1054 stream_needs_pll, stream_has_pll)
1055 && !dc_is_dp_signal(stream_has_pll->signal)
1056 && stream_has_pll->sink->link->public.connector_signal
1057 != SIGNAL_TYPE_VIRTUAL)
1058 return stream_has_pll;
1059 }
1060 }
1061
1062 return NULL;
1063}
1064
1065static int get_norm_pix_clk(const struct dc_crtc_timing *timing)
1066{
1067 uint32_t pix_clk = timing->pix_clk_khz;
1068 uint32_t normalized_pix_clk = pix_clk;
1069
1070 if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR420)
1071 pix_clk /= 2;
1072
1073 switch (timing->display_color_depth) {
1074 case COLOR_DEPTH_888:
1075 normalized_pix_clk = pix_clk;
1076 break;
1077 case COLOR_DEPTH_101010:
1078 normalized_pix_clk = (pix_clk * 30) / 24;
1079 break;
1080 case COLOR_DEPTH_121212:
1081 normalized_pix_clk = (pix_clk * 36) / 24;
1082 break;
1083 case COLOR_DEPTH_161616:
1084 normalized_pix_clk = (pix_clk * 48) / 24;
1085 break;
1086 default:
1087 ASSERT(0);
1088 break;
1089 }
1090
1091 return normalized_pix_clk;
1092}
1093
1094static void calculate_phy_pix_clks(
1095 const struct core_dc *dc,
1096 struct validate_context *context)
1097{
1098 int i, j;
1099
1100 for (i = 0; i < context->target_count; i++) {
1101 struct core_target *target = context->targets[i];
1102
1103 for (j = 0; j < target->public.stream_count; j++) {
1104 struct core_stream *stream =
1105 DC_STREAM_TO_CORE(target->public.streams[j]);
1106
1107 update_stream_signal(stream);
1108
1109 /* update actual pixel clock on all streams */
1110 if (dc_is_hdmi_signal(stream->signal))
1111 stream->phy_pix_clk = get_norm_pix_clk(
1112 &stream->public.timing);
1113 else
1114 stream->phy_pix_clk =
1115 stream->public.timing.pix_clk_khz;
1116 }
1117 }
1118}
1119
1120enum dc_status resource_map_pool_resources(
1121 const struct core_dc *dc,
1122 struct validate_context *context)
1123{
1124 int i, j, k;
1125
1126 calculate_phy_pix_clks(dc, context);
1127
1128 for (i = 0; i < context->target_count; i++) {
1129 struct core_target *target = context->targets[i];
1130
1131 for (j = 0; j < target->public.stream_count; j++) {
1132 struct core_stream *stream =
1133 DC_STREAM_TO_CORE(target->public.streams[j]);
1134
1135 if (!resource_is_stream_unchanged(dc->current_context, stream))
1136 continue;
1137
1138 /* mark resources used for stream that is already active */
1139 for (k = 0; k < MAX_PIPES; k++) {
1140 struct pipe_ctx *pipe_ctx =
1141 &context->res_ctx.pipe_ctx[k];
1142 const struct pipe_ctx *old_pipe_ctx =
1143 &dc->current_context->res_ctx.pipe_ctx[k];
1144
1145 if (!are_stream_backends_same(old_pipe_ctx->stream, stream))
1146 continue;
1147
1148 pipe_ctx->stream = stream;
1149 copy_pipe_ctx(old_pipe_ctx, pipe_ctx);
1150
1151 set_stream_engine_in_use(
1152 &context->res_ctx,
1153 pipe_ctx->stream_enc);
1154
1155 /* Switch to dp clock source only if there is
1156 * no non dp stream that shares the same timing
1157 * with the dp stream.
1158 */
1159 if (dc_is_dp_signal(pipe_ctx->stream->signal) &&
1160 !find_pll_sharable_stream(stream, context))
1161 pipe_ctx->clock_source =
1162 context->res_ctx.pool->dp_clock_source;
1163
1164 resource_reference_clock_source(
1165 &context->res_ctx,
1166 pipe_ctx->clock_source);
1167
1168 set_audio_in_use(&context->res_ctx,
1169 pipe_ctx->audio);
1170 }
1171 }
1172 }
1173
1174 for (i = 0; i < context->target_count; i++) {
1175 struct core_target *target = context->targets[i];
1176
1177 for (j = 0; j < target->public.stream_count; j++) {
1178 struct core_stream *stream =
1179 DC_STREAM_TO_CORE(target->public.streams[j]);
1180 struct pipe_ctx *pipe_ctx = NULL;
1181 int pipe_idx = -1;
1182
1183 if (resource_is_stream_unchanged(dc->current_context, stream))
1184 continue;
1185 /* acquire new resources */
1186 pipe_idx = acquire_first_free_pipe(
1187 &context->res_ctx, stream);
1188 if (pipe_idx < 0)
1189 return DC_NO_CONTROLLER_RESOURCE;
1190
1191
1192 pipe_ctx = &context->res_ctx.pipe_ctx[pipe_idx];
1193
1194 pipe_ctx->stream_enc =
1195 find_first_free_match_stream_enc_for_link(
1196 &context->res_ctx, stream);
1197
1198 if (!pipe_ctx->stream_enc)
1199 return DC_NO_STREAM_ENG_RESOURCE;
1200
1201 set_stream_engine_in_use(
1202 &context->res_ctx,
1203 pipe_ctx->stream_enc);
1204
1205 /* TODO: Add check if ASIC support and EDID audio */
1206 if (!stream->sink->converter_disable_audio &&
1207 dc_is_audio_capable_signal(pipe_ctx->stream->signal) &&
1208 stream->public.audio_info.mode_count) {
1209 pipe_ctx->audio = find_first_free_audio(
1210 &context->res_ctx);
1211
1212 /*
1213 * Audio assigned in order first come first get.
1214 * There are asics which has number of audio
1215 * resources less then number of pipes
1216 */
1217 if (pipe_ctx->audio)
1218 set_audio_in_use(
1219 &context->res_ctx,
1220 pipe_ctx->audio);
1221 }
1222
1223 if (j == 0) {
1224 context->target_status[i].primary_otg_inst =
1225 pipe_ctx->tg->inst;
1226 }
1227 }
1228 }
1229
1230 return DC_OK;
1231}
1232
1233/* first target in the context is used to populate the rest */
1234void validate_guaranteed_copy_target(
1235 struct validate_context *context,
1236 int max_targets)
1237{
1238 int i;
1239
1240 for (i = 1; i < max_targets; i++) {
1241 context->targets[i] = context->targets[0];
1242
1243 copy_pipe_ctx(&context->res_ctx.pipe_ctx[0],
1244 &context->res_ctx.pipe_ctx[i]);
1245 context->res_ctx.pipe_ctx[i].stream =
1246 context->res_ctx.pipe_ctx[0].stream;
1247
1248 dc_target_retain(&context->targets[i]->public);
1249 context->target_count++;
1250 }
1251}
1252
1253static void translate_info_frame(const struct hw_info_frame *hw_info_frame,
1254 struct encoder_info_frame *encoder_info_frame)
1255{
1256 memset(
1257 encoder_info_frame, 0, sizeof(struct encoder_info_frame));
1258
1259 /* For gamut we recalc checksum */
1260 if (hw_info_frame->gamut_packet.valid) {
1261 uint8_t chk_sum = 0;
1262 uint8_t *ptr;
1263 uint8_t i;
1264
1265 memmove(
1266 &encoder_info_frame->gamut,
1267 &hw_info_frame->gamut_packet,
1268 sizeof(struct hw_info_packet));
1269
1270 /*start of the Gamut data. */
1271 ptr = &encoder_info_frame->gamut.sb[3];
1272
1273 for (i = 0; i <= encoder_info_frame->gamut.sb[1]; i++)
1274 chk_sum += ptr[i];
1275
1276 encoder_info_frame->gamut.sb[2] = (uint8_t) (0x100 - chk_sum);
1277 }
1278
1279 if (hw_info_frame->avi_info_packet.valid) {
1280 memmove(
1281 &encoder_info_frame->avi,
1282 &hw_info_frame->avi_info_packet,
1283 sizeof(struct hw_info_packet));
1284 }
1285
1286 if (hw_info_frame->vendor_info_packet.valid) {
1287 memmove(
1288 &encoder_info_frame->vendor,
1289 &hw_info_frame->vendor_info_packet,
1290 sizeof(struct hw_info_packet));
1291 }
1292
1293 if (hw_info_frame->spd_packet.valid) {
1294 memmove(
1295 &encoder_info_frame->spd,
1296 &hw_info_frame->spd_packet,
1297 sizeof(struct hw_info_packet));
1298 }
1299
1300 if (hw_info_frame->vsc_packet.valid) {
1301 memmove(
1302 &encoder_info_frame->vsc,
1303 &hw_info_frame->vsc_packet,
1304 sizeof(struct hw_info_packet));
1305 }
1306}
1307
1308static void set_avi_info_frame(
1309 struct hw_info_packet *info_packet,
1310 struct pipe_ctx *pipe_ctx)
1311{
1312 struct core_stream *stream = pipe_ctx->stream;
1313 enum dc_color_space color_space = COLOR_SPACE_UNKNOWN;
1314 struct info_frame info_frame = { {0} };
1315 uint32_t pixel_encoding = 0;
1316 enum scanning_type scan_type = SCANNING_TYPE_NODATA;
1317 enum dc_aspect_ratio aspect = ASPECT_RATIO_NO_DATA;
1318 bool itc = false;
1319 uint8_t cn0_cn1 = 0;
1320 uint8_t *check_sum = NULL;
1321 uint8_t byte_index = 0;
1322
1323 if (info_packet == NULL)
1324 return;
1325
1326 color_space = pipe_ctx->stream->public.output_color_space;
1327
1328 /* Initialize header */
1329 info_frame.avi_info_packet.info_packet_hdmi.bits.header.
1330 info_frame_type = INFO_FRAME_AVI;
1331 /* InfoFrameVersion_3 is defined by CEA861F (Section 6.4), but shall
1332 * not be used in HDMI 2.0 (Section 10.1) */
1333 info_frame.avi_info_packet.info_packet_hdmi.bits.header.version =
1334 INFO_FRAME_VERSION_2;
1335 info_frame.avi_info_packet.info_packet_hdmi.bits.header.length =
1336 INFO_FRAME_SIZE_AVI;
1337
1338 /*
1339 * IDO-defined (Y2,Y1,Y0 = 1,1,1) shall not be used by devices built
1340 * according to HDMI 2.0 spec (Section 10.1)
1341 */
1342
1343 switch (stream->public.timing.pixel_encoding) {
1344 case PIXEL_ENCODING_YCBCR422:
1345 pixel_encoding = 1;
1346 break;
1347
1348 case PIXEL_ENCODING_YCBCR444:
1349 pixel_encoding = 2;
1350 break;
1351 case PIXEL_ENCODING_YCBCR420:
1352 pixel_encoding = 3;
1353 break;
1354
1355 case PIXEL_ENCODING_RGB:
1356 default:
1357 pixel_encoding = 0;
1358 }
1359
1360 /* Y0_Y1_Y2 : The pixel encoding */
1361 /* H14b AVI InfoFrame has extension on Y-field from 2 bits to 3 bits */
1362 info_frame.avi_info_packet.info_packet_hdmi.bits.Y0_Y1_Y2 =
1363 pixel_encoding;
1364
1365 /* A0 = 1 Active Format Information valid */
1366 info_frame.avi_info_packet.info_packet_hdmi.bits.A0 =
1367 ACTIVE_FORMAT_VALID;
1368
1369 /* B0, B1 = 3; Bar info data is valid */
1370 info_frame.avi_info_packet.info_packet_hdmi.bits.B0_B1 =
1371 BAR_INFO_BOTH_VALID;
1372
1373 info_frame.avi_info_packet.info_packet_hdmi.bits.SC0_SC1 =
1374 PICTURE_SCALING_UNIFORM;
1375
1376 /* S0, S1 : Underscan / Overscan */
1377 /* TODO: un-hardcode scan type */
1378 scan_type = SCANNING_TYPE_UNDERSCAN;
1379 info_frame.avi_info_packet.info_packet_hdmi.bits.S0_S1 = scan_type;
1380
1381 /* C0, C1 : Colorimetry */
1382 if (color_space == COLOR_SPACE_YCBCR709)
1383 info_frame.avi_info_packet.info_packet_hdmi.bits.C0_C1 =
1384 COLORIMETRY_ITU709;
1385 else if (color_space == COLOR_SPACE_YCBCR601)
1386 info_frame.avi_info_packet.info_packet_hdmi.bits.C0_C1 =
1387 COLORIMETRY_ITU601;
1388 else
1389 info_frame.avi_info_packet.info_packet_hdmi.bits.C0_C1 =
1390 COLORIMETRY_NO_DATA;
1391
1392 /* TODO: un-hardcode aspect ratio */
1393 aspect = stream->public.timing.aspect_ratio;
1394
1395 switch (aspect) {
1396 case ASPECT_RATIO_4_3:
1397 case ASPECT_RATIO_16_9:
1398 info_frame.avi_info_packet.info_packet_hdmi.bits.M0_M1 = aspect;
1399 break;
1400
1401 case ASPECT_RATIO_NO_DATA:
1402 case ASPECT_RATIO_64_27:
1403 case ASPECT_RATIO_256_135:
1404 default:
1405 info_frame.avi_info_packet.info_packet_hdmi.bits.M0_M1 = 0;
1406 }
1407
1408 /* Active Format Aspect ratio - same as Picture Aspect Ratio. */
1409 info_frame.avi_info_packet.info_packet_hdmi.bits.R0_R3 =
1410 ACTIVE_FORMAT_ASPECT_RATIO_SAME_AS_PICTURE;
1411
1412 /* TODO: un-hardcode cn0_cn1 and itc */
1413 cn0_cn1 = 0;
1414 itc = false;
1415
1416 if (itc) {
1417 info_frame.avi_info_packet.info_packet_hdmi.bits.ITC = 1;
1418 info_frame.avi_info_packet.info_packet_hdmi.bits.CN0_CN1 =
1419 cn0_cn1;
1420 }
1421
1422 /* TODO : We should handle YCC quantization */
1423 /* but we do not have matrix calculation */
1424 if (color_space == COLOR_SPACE_SRGB) {
1425 info_frame.avi_info_packet.info_packet_hdmi.bits.Q0_Q1 =
1426 RGB_QUANTIZATION_FULL_RANGE;
1427 info_frame.avi_info_packet.info_packet_hdmi.bits.YQ0_YQ1 =
1428 YYC_QUANTIZATION_FULL_RANGE;
1429 } else if (color_space == COLOR_SPACE_SRGB_LIMITED) {
1430 info_frame.avi_info_packet.info_packet_hdmi.bits.Q0_Q1 =
1431 RGB_QUANTIZATION_LIMITED_RANGE;
1432 info_frame.avi_info_packet.info_packet_hdmi.bits.YQ0_YQ1 =
1433 YYC_QUANTIZATION_LIMITED_RANGE;
1434 } else {
1435 info_frame.avi_info_packet.info_packet_hdmi.bits.Q0_Q1 =
1436 RGB_QUANTIZATION_DEFAULT_RANGE;
1437 info_frame.avi_info_packet.info_packet_hdmi.bits.YQ0_YQ1 =
1438 YYC_QUANTIZATION_LIMITED_RANGE;
1439 }
1440
1441 info_frame.avi_info_packet.info_packet_hdmi.bits.VIC0_VIC7 =
1442 stream->public.timing.vic;
1443
1444 /* pixel repetition
1445 * PR0 - PR3 start from 0 whereas pHwPathMode->mode.timing.flags.pixel
1446 * repetition start from 1 */
1447 info_frame.avi_info_packet.info_packet_hdmi.bits.PR0_PR3 = 0;
1448
1449 /* Bar Info
1450 * barTop: Line Number of End of Top Bar.
1451 * barBottom: Line Number of Start of Bottom Bar.
1452 * barLeft: Pixel Number of End of Left Bar.
1453 * barRight: Pixel Number of Start of Right Bar. */
1454 info_frame.avi_info_packet.info_packet_hdmi.bits.bar_top =
1455 stream->public.timing.v_border_top;
1456 info_frame.avi_info_packet.info_packet_hdmi.bits.bar_bottom =
1457 (stream->public.timing.v_border_top
1458 - stream->public.timing.v_border_bottom + 1);
1459 info_frame.avi_info_packet.info_packet_hdmi.bits.bar_left =
1460 stream->public.timing.h_border_left;
1461 info_frame.avi_info_packet.info_packet_hdmi.bits.bar_right =
1462 (stream->public.timing.h_total
1463 - stream->public.timing.h_border_right + 1);
1464
1465 /* check_sum - Calculate AFMT_AVI_INFO0 ~ AFMT_AVI_INFO3 */
1466 check_sum =
1467 &info_frame.
1468 avi_info_packet.info_packet_hdmi.packet_raw_data.sb[0];
1469 *check_sum = INFO_FRAME_AVI + INFO_FRAME_SIZE_AVI
1470 + INFO_FRAME_VERSION_2;
1471
1472 for (byte_index = 1; byte_index <= INFO_FRAME_SIZE_AVI; byte_index++)
1473 *check_sum += info_frame.avi_info_packet.info_packet_hdmi.
1474 packet_raw_data.sb[byte_index];
1475
1476 /* one byte complement */
1477 *check_sum = (uint8_t) (0x100 - *check_sum);
1478
1479 /* Store in hw_path_mode */
1480 info_packet->hb0 =
1481 info_frame.avi_info_packet.info_packet_hdmi.packet_raw_data.hb0;
1482 info_packet->hb1 =
1483 info_frame.avi_info_packet.info_packet_hdmi.packet_raw_data.hb1;
1484 info_packet->hb2 =
1485 info_frame.avi_info_packet.info_packet_hdmi.packet_raw_data.hb2;
1486
1487 for (byte_index = 0; byte_index < sizeof(info_packet->sb); byte_index++)
1488 info_packet->sb[byte_index] = info_frame.avi_info_packet.
1489 info_packet_hdmi.packet_raw_data.sb[byte_index];
1490
1491 info_packet->valid = true;
1492}
1493
1494static void set_vendor_info_packet(struct core_stream *stream,
1495 struct hw_info_packet *info_packet)
1496{
1497 uint32_t length = 0;
1498 bool hdmi_vic_mode = false;
1499 uint8_t checksum = 0;
1500 uint32_t i = 0;
1501 enum dc_timing_3d_format format;
1502
1503 ASSERT_CRITICAL(stream != NULL);
1504 ASSERT_CRITICAL(info_packet != NULL);
1505
1506 format = stream->public.timing.timing_3d_format;
1507
1508 /* Can be different depending on packet content */
1509 length = 5;
1510
1511 if (stream->public.timing.hdmi_vic != 0
1512 && stream->public.timing.h_total >= 3840
1513 && stream->public.timing.v_total >= 2160)
1514 hdmi_vic_mode = true;
1515
1516 /* According to HDMI 1.4a CTS, VSIF should be sent
1517 * for both 3D stereo and HDMI VIC modes.
1518 * For all other modes, there is no VSIF sent. */
1519
1520 if (format == TIMING_3D_FORMAT_NONE && !hdmi_vic_mode)
1521 return;
1522
1523 /* 24bit IEEE Registration identifier (0x000c03). LSB first. */
1524 info_packet->sb[1] = 0x03;
1525 info_packet->sb[2] = 0x0C;
1526 info_packet->sb[3] = 0x00;
1527
1528 /*PB4: 5 lower bytes = 0 (reserved). 3 higher bits = HDMI_Video_Format.
1529 * The value for HDMI_Video_Format are:
1530 * 0x0 (0b000) - No additional HDMI video format is presented in this
1531 * packet
1532 * 0x1 (0b001) - Extended resolution format present. 1 byte of HDMI_VIC
1533 * parameter follows
1534 * 0x2 (0b010) - 3D format indication present. 3D_Structure and
1535 * potentially 3D_Ext_Data follows
1536 * 0x3..0x7 (0b011..0b111) - reserved for future use */
1537 if (format != TIMING_3D_FORMAT_NONE)
1538 info_packet->sb[4] = (2 << 5);
1539 else if (hdmi_vic_mode)
1540 info_packet->sb[4] = (1 << 5);
1541
1542 /* PB5: If PB4 claims 3D timing (HDMI_Video_Format = 0x2):
1543 * 4 lower bites = 0 (reserved). 4 higher bits = 3D_Structure.
1544 * The value for 3D_Structure are:
1545 * 0x0 - Frame Packing
1546 * 0x1 - Field Alternative
1547 * 0x2 - Line Alternative
1548 * 0x3 - Side-by-Side (full)
1549 * 0x4 - L + depth
1550 * 0x5 - L + depth + graphics + graphics-depth
1551 * 0x6 - Top-and-Bottom
1552 * 0x7 - Reserved for future use
1553 * 0x8 - Side-by-Side (Half)
1554 * 0x9..0xE - Reserved for future use
1555 * 0xF - Not used */
1556 switch (format) {
1557 case TIMING_3D_FORMAT_HW_FRAME_PACKING:
1558 case TIMING_3D_FORMAT_SW_FRAME_PACKING:
1559 info_packet->sb[5] = (0x0 << 4);
1560 break;
1561
1562 case TIMING_3D_FORMAT_SIDE_BY_SIDE:
1563 case TIMING_3D_FORMAT_SBS_SW_PACKED:
1564 info_packet->sb[5] = (0x8 << 4);
1565 length = 6;
1566 break;
1567
1568 case TIMING_3D_FORMAT_TOP_AND_BOTTOM:
1569 case TIMING_3D_FORMAT_TB_SW_PACKED:
1570 info_packet->sb[5] = (0x6 << 4);
1571 break;
1572
1573 default:
1574 break;
1575 }
1576
1577 /*PB5: If PB4 is set to 0x1 (extended resolution format)
1578 * fill PB5 with the correct HDMI VIC code */
1579 if (hdmi_vic_mode)
1580 info_packet->sb[5] = stream->public.timing.hdmi_vic;
1581
1582 /* Header */
1583 info_packet->hb0 = 0x81; /* VSIF packet type. */
1584 info_packet->hb1 = 0x01; /* Version */
1585
1586 /* 4 lower bits = Length, 4 higher bits = 0 (reserved) */
1587 info_packet->hb2 = (uint8_t) (length);
1588
1589 /* Calculate checksum */
1590 checksum = 0;
1591 checksum += info_packet->hb0;
1592 checksum += info_packet->hb1;
1593 checksum += info_packet->hb2;
1594
1595 for (i = 1; i <= length; i++)
1596 checksum += info_packet->sb[i];
1597
1598 info_packet->sb[0] = (uint8_t) (0x100 - checksum);
1599
1600 info_packet->valid = true;
1601}
1602
1603static void set_spd_info_packet(struct core_stream *stream,
1604 struct hw_info_packet *info_packet)
1605{
1606 /* SPD info packet for FreeSync */
1607
1608 unsigned char checksum = 0;
1609 unsigned int idx, payload_size = 0;
1610
1611 /* Check if Freesync is supported. Return if false. If true,
1612 * set the corresponding bit in the info packet
1613 */
1614 if (stream->public.freesync_ctx.supported == false)
1615 return;
1616
1617 if (dc_is_hdmi_signal(stream->signal)) {
1618
1619 /* HEADER */
1620
1621 /* HB0 = Packet Type = 0x83 (Source Product
1622 * Descriptor InfoFrame)
1623 */
1624 info_packet->hb0 = 0x83;
1625
1626 /* HB1 = Version = 0x01 */
1627 info_packet->hb1 = 0x01;
1628
1629 /* HB2 = [Bits 7:5 = 0] [Bits 4:0 = Length = 0x08] */
1630 info_packet->hb2 = 0x08;
1631
1632 payload_size = 0x08;
1633
1634 } else if (dc_is_dp_signal(stream->signal)) {
1635
1636 /* HEADER */
1637
1638 /* HB0 = Secondary-data Packet ID = 0 - Only non-zero
1639 * when used to associate audio related info packets
1640 */
1641 info_packet->hb0 = 0x00;
1642
1643 /* HB1 = Packet Type = 0x83 (Source Product
1644 * Descriptor InfoFrame)
1645 */
1646 info_packet->hb1 = 0x83;
1647
1648 /* HB2 = [Bits 7:0 = Least significant eight bits -
1649 * For INFOFRAME, the value must be 1Bh]
1650 */
1651 info_packet->hb2 = 0x1B;
1652
1653 /* HB3 = [Bits 7:2 = INFOFRAME SDP Version Number = 0x1]
1654 * [Bits 1:0 = Most significant two bits = 0x00]
1655 */
1656 info_packet->hb3 = 0x04;
1657
1658 payload_size = 0x1B;
1659 }
1660
1661 /* PB1 = 0x1A (24bit AMD IEEE OUI (0x00001A) - Byte 0) */
1662 info_packet->sb[1] = 0x1A;
1663
1664 /* PB2 = 0x00 (24bit AMD IEEE OUI (0x00001A) - Byte 1) */
1665 info_packet->sb[2] = 0x00;
1666
1667 /* PB3 = 0x00 (24bit AMD IEEE OUI (0x00001A) - Byte 2) */
1668 info_packet->sb[3] = 0x00;
1669
1670 /* PB4 = Reserved */
1671 info_packet->sb[4] = 0x00;
1672
1673 /* PB5 = Reserved */
1674 info_packet->sb[5] = 0x00;
1675
1676 /* PB6 = [Bits 7:3 = Reserved] */
1677 info_packet->sb[6] = 0x00;
1678
1679 if (stream->public.freesync_ctx.supported == true)
1680 /* PB6 = [Bit 0 = FreeSync Supported] */
1681 info_packet->sb[6] |= 0x01;
1682
1683 if (stream->public.freesync_ctx.enabled == true)
1684 /* PB6 = [Bit 1 = FreeSync Enabled] */
1685 info_packet->sb[6] |= 0x02;
1686
1687 if (stream->public.freesync_ctx.active == true)
1688 /* PB6 = [Bit 2 = FreeSync Active] */
1689 info_packet->sb[6] |= 0x04;
1690
1691 /* PB7 = FreeSync Minimum refresh rate (Hz) */
1692 info_packet->sb[7] = (unsigned char) (stream->public.freesync_ctx.
1693 min_refresh_in_micro_hz / 1000000);
1694
1695 /* PB8 = FreeSync Maximum refresh rate (Hz)
1696 *
1697 * Note: We do not use the maximum capable refresh rate
1698 * of the panel, because we should never go above the field
1699 * rate of the mode timing set.
1700 */
1701 info_packet->sb[8] = (unsigned char) (stream->public.freesync_ctx.
1702 nominal_refresh_in_micro_hz / 1000000);
1703
1704 /* PB9 - PB27 = Reserved */
1705 for (idx = 9; idx <= 27; idx++)
1706 info_packet->sb[idx] = 0x00;
1707
1708 /* Calculate checksum */
1709 checksum += info_packet->hb0;
1710 checksum += info_packet->hb1;
1711 checksum += info_packet->hb2;
1712 checksum += info_packet->hb3;
1713
1714 for (idx = 1; idx <= payload_size; idx++)
1715 checksum += info_packet->sb[idx];
1716
1717 /* PB0 = Checksum (one byte complement) */
1718 info_packet->sb[0] = (unsigned char) (0x100 - checksum);
1719
1720 info_packet->valid = true;
1721}
1722
1723static void set_vsc_info_packet(struct core_stream *stream,
1724 struct hw_info_packet *info_packet)
1725{
1726 unsigned int vscPacketRevision = 0;
1727 unsigned int i;
1728
1729 if (stream->sink->link->public.psr_caps.psr_version != 0) {
1730 vscPacketRevision = 2;
1731 }
1732
1733 /* VSC packet not needed based on the features
1734 * supported by this DP display
1735 */
1736 if (vscPacketRevision == 0)
1737 return;
1738
1739 if (vscPacketRevision == 0x2) {
1740 /* Secondary-data Packet ID = 0*/
1741 info_packet->hb0 = 0x00;
1742 /* 07h - Packet Type Value indicating Video
1743 * Stream Configuration packet
1744 */
1745 info_packet->hb1 = 0x07;
1746 /* 02h = VSC SDP supporting 3D stereo and PSR
1747 * (applies to eDP v1.3 or higher).
1748 */
1749 info_packet->hb2 = 0x02;
1750 /* 08h = VSC packet supporting 3D stereo + PSR
1751 * (HB2 = 02h).
1752 */
1753 info_packet->hb3 = 0x08;
1754
1755 for (i = 0; i < 28; i++)
1756 info_packet->sb[i] = 0;
1757
1758 info_packet->valid = true;
1759 }
1760
1761 /*TODO: stereo 3D support and extend pixel encoding colorimetry*/
1762}
1763
1764void resource_validate_ctx_destruct(struct validate_context *context)
1765{
1766 int i, j;
1767
1768 for (i = 0; i < context->target_count; i++) {
1769 for (j = 0; j < context->target_status[i].surface_count; j++)
1770 dc_surface_release(
1771 context->target_status[i].surfaces[j]);
1772
1773 context->target_status[i].surface_count = 0;
1774 dc_target_release(&context->targets[i]->public);
1775 }
1776}
1777
1778/*
1779 * Copy src_ctx into dst_ctx and retain all surfaces and targets referenced
1780 * by the src_ctx
1781 */
1782void resource_validate_ctx_copy_construct(
1783 const struct validate_context *src_ctx,
1784 struct validate_context *dst_ctx)
1785{
1786 int i, j;
1787
1788 *dst_ctx = *src_ctx;
1789
1790 for (i = 0; i < dst_ctx->res_ctx.pool->pipe_count; i++) {
1791 struct pipe_ctx *cur_pipe = &dst_ctx->res_ctx.pipe_ctx[i];
1792
1793 if (cur_pipe->top_pipe)
1794 cur_pipe->top_pipe = &dst_ctx->res_ctx.pipe_ctx[cur_pipe->top_pipe->pipe_idx];
1795
1796 if (cur_pipe->bottom_pipe)
1797 cur_pipe->bottom_pipe = &dst_ctx->res_ctx.pipe_ctx[cur_pipe->bottom_pipe->pipe_idx];
1798
1799 }
1800
1801 for (i = 0; i < dst_ctx->target_count; i++) {
1802 dc_target_retain(&dst_ctx->targets[i]->public);
1803 for (j = 0; j < dst_ctx->target_status[i].surface_count; j++)
1804 dc_surface_retain(
1805 dst_ctx->target_status[i].surfaces[j]);
1806 }
1807}
1808
1809struct clock_source *dc_resource_find_first_free_pll(
1810 struct resource_context *res_ctx)
1811{
1812 int i;
1813
1814 for (i = 0; i < res_ctx->pool->clk_src_count; ++i) {
1815 if (res_ctx->clock_source_ref_count[i] == 0)
1816 return res_ctx->pool->clock_sources[i];
1817 }
1818
1819 return NULL;
1820}
1821
1822void resource_build_info_frame(struct pipe_ctx *pipe_ctx)
1823{
1824 enum signal_type signal = SIGNAL_TYPE_NONE;
1825 struct hw_info_frame info_frame = { { 0 } };
1826
1827 /* default all packets to invalid */
1828 info_frame.avi_info_packet.valid = false;
1829 info_frame.gamut_packet.valid = false;
1830 info_frame.vendor_info_packet.valid = false;
1831 info_frame.spd_packet.valid = false;
1832 info_frame.vsc_packet.valid = false;
1833
1834 signal = pipe_ctx->stream->signal;
1835
1836 /* HDMi and DP have different info packets*/
1837 if (dc_is_hdmi_signal(signal)) {
1838 set_avi_info_frame(
1839 &info_frame.avi_info_packet, pipe_ctx);
1840 set_vendor_info_packet(
1841 pipe_ctx->stream, &info_frame.vendor_info_packet);
1842 set_spd_info_packet(pipe_ctx->stream, &info_frame.spd_packet);
a33fa99d 1843 } else if (dc_is_dp_signal(signal)) {
4562236b
HW
1844 set_vsc_info_packet(pipe_ctx->stream, &info_frame.vsc_packet);
1845 set_spd_info_packet(pipe_ctx->stream, &info_frame.spd_packet);
a33fa99d 1846 }
4562236b
HW
1847
1848 translate_info_frame(&info_frame,
1849 &pipe_ctx->encoder_info_frame);
1850}
1851
1852enum dc_status resource_map_clock_resources(
1853 const struct core_dc *dc,
1854 struct validate_context *context)
1855{
1856 int i, j, k;
1857
1858 /* acquire new resources */
1859 for (i = 0; i < context->target_count; i++) {
1860 struct core_target *target = context->targets[i];
1861
1862 for (j = 0; j < target->public.stream_count; j++) {
1863 struct core_stream *stream =
1864 DC_STREAM_TO_CORE(target->public.streams[j]);
1865
1866 if (resource_is_stream_unchanged(dc->current_context, stream))
1867 continue;
1868
1869 for (k = 0; k < MAX_PIPES; k++) {
1870 struct pipe_ctx *pipe_ctx =
1871 &context->res_ctx.pipe_ctx[k];
1872
1873 if (context->res_ctx.pipe_ctx[k].stream != stream)
1874 continue;
1875
1876 if (dc_is_dp_signal(pipe_ctx->stream->signal)
1877 || pipe_ctx->stream->signal == SIGNAL_TYPE_VIRTUAL)
1878 pipe_ctx->clock_source =
1879 context->res_ctx.pool->dp_clock_source;
1880 else {
1881 pipe_ctx->clock_source = NULL;
1882
1883 if (!dc->public.config.disable_disp_pll_sharing)
1884 resource_find_used_clk_src_for_sharing(
1885 &context->res_ctx,
1886 pipe_ctx);
1887
1888 if (pipe_ctx->clock_source == NULL)
1889 pipe_ctx->clock_source =
1890 dc_resource_find_first_free_pll(&context->res_ctx);
1891 }
1892
1893 if (pipe_ctx->clock_source == NULL)
1894 return DC_NO_CLOCK_SOURCE_RESOURCE;
1895
1896 resource_reference_clock_source(
1897 &context->res_ctx,
1898 pipe_ctx->clock_source);
1899
1900 /* only one cs per stream regardless of mpo */
1901 break;
1902 }
1903 }
1904 }
1905
1906 return DC_OK;
1907}
1908
1909/*
1910 * Note: We need to disable output if clock sources change,
1911 * since bios does optimization and doesn't apply if changing
1912 * PHY when not already disabled.
1913 */
1914bool pipe_need_reprogram(
1915 struct pipe_ctx *pipe_ctx_old,
1916 struct pipe_ctx *pipe_ctx)
1917{
1918 if (pipe_ctx_old->stream->sink != pipe_ctx->stream->sink)
1919 return true;
1920
1921 if (pipe_ctx_old->stream->signal != pipe_ctx->stream->signal)
1922 return true;
1923
1924 if (pipe_ctx_old->audio != pipe_ctx->audio)
1925 return true;
1926
1927 if (pipe_ctx_old->clock_source != pipe_ctx->clock_source
1928 && pipe_ctx_old->stream != pipe_ctx->stream)
1929 return true;
1930
1931 if (pipe_ctx_old->stream_enc != pipe_ctx->stream_enc)
1932 return true;
1933
1934 if (is_timing_changed(pipe_ctx_old->stream, pipe_ctx->stream))
1935 return true;
1936
1937
1938 return false;
1939}