]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
drm/amd/display: Rename more dc_surface stuff to plane_state
[mirror_ubuntu-bionic-kernel.git] / drivers / gpu / drm / amd / display / dc / dcn10 / dcn10_resource.c
1 /*
2 * Copyright 2016 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26 #include "dm_services.h"
27 #include "dc.h"
28
29 #include "resource.h"
30 #include "include/irq_service_interface.h"
31 #include "dcn10/dcn10_resource.h"
32
33 #include "dcn10/dcn10_ipp.h"
34 #include "dcn10/dcn10_mpc.h"
35 #include "irq/dcn10/irq_service_dcn10.h"
36 #include "dcn10/dcn10_dpp.h"
37 #include "dcn10/dcn10_timing_generator.h"
38 #include "dcn10/dcn10_hw_sequencer.h"
39 #include "dce110/dce110_hw_sequencer.h"
40 #include "dcn10/dcn10_opp.h"
41 #include "dce/dce_link_encoder.h"
42 #include "dce/dce_stream_encoder.h"
43 #include "dce/dce_clocks.h"
44 #include "dce/dce_clock_source.h"
45 #include "dcn10/dcn10_mem_input.h"
46 #include "dce/dce_audio.h"
47 #include "dce/dce_hwseq.h"
48 #include "../virtual/virtual_stream_encoder.h"
49 #include "dce110/dce110_resource.h"
50
51 #include "vega10/soc15ip.h"
52
53 #include "raven1/DCN/dcn_1_0_offset.h"
54 #include "raven1/DCN/dcn_1_0_sh_mask.h"
55
56 #include "raven1/NBIO/nbio_7_0_offset.h"
57
58 #include "raven1/MMHUB/mmhub_9_1_offset.h"
59 #include "raven1/MMHUB/mmhub_9_1_sh_mask.h"
60
61 #include "reg_helper.h"
62 #include "dce/dce_abm.h"
63 #include "dce/dce_dmcu.h"
64
65 #ifndef mmDP0_DP_DPHY_INTERNAL_CTRL
66 #define mmDP0_DP_DPHY_INTERNAL_CTRL 0x210f
67 #define mmDP0_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2
68 #define mmDP1_DP_DPHY_INTERNAL_CTRL 0x220f
69 #define mmDP1_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2
70 #define mmDP2_DP_DPHY_INTERNAL_CTRL 0x230f
71 #define mmDP2_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2
72 #define mmDP3_DP_DPHY_INTERNAL_CTRL 0x240f
73 #define mmDP3_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2
74 #define mmDP4_DP_DPHY_INTERNAL_CTRL 0x250f
75 #define mmDP4_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2
76 #define mmDP5_DP_DPHY_INTERNAL_CTRL 0x260f
77 #define mmDP5_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2
78 #define mmDP6_DP_DPHY_INTERNAL_CTRL 0x270f
79 #define mmDP6_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2
80 #endif
81
82
83 enum dcn10_clk_src_array_id {
84 DCN10_CLK_SRC_PLL0,
85 DCN10_CLK_SRC_PLL1,
86 DCN10_CLK_SRC_PLL2,
87 DCN10_CLK_SRC_PLL3,
88 DCN10_CLK_SRC_TOTAL
89 };
90
91 /* begin *********************
92 * macros to expend register list macro defined in HW object header file */
93
94 /* DCN */
95 #define BASE_INNER(seg) \
96 DCE_BASE__INST0_SEG ## seg
97
98 #define BASE(seg) \
99 BASE_INNER(seg)
100
101 #define SR(reg_name)\
102 .reg_name = BASE(mm ## reg_name ## _BASE_IDX) + \
103 mm ## reg_name
104
105 #define SRI(reg_name, block, id)\
106 .reg_name = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
107 mm ## block ## id ## _ ## reg_name
108
109
110 #define SRII(reg_name, block, id)\
111 .reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
112 mm ## block ## id ## _ ## reg_name
113
114 /* NBIO */
115 #define NBIO_BASE_INNER(seg) \
116 NBIF_BASE__INST0_SEG ## seg
117
118 #define NBIO_BASE(seg) \
119 NBIO_BASE_INNER(seg)
120
121 #define NBIO_SR(reg_name)\
122 .reg_name = NBIO_BASE(mm ## reg_name ## _BASE_IDX) + \
123 mm ## reg_name
124
125 /* MMHUB */
126 #define MMHUB_BASE_INNER(seg) \
127 MMHUB_BASE__INST0_SEG ## seg
128
129 #define MMHUB_BASE(seg) \
130 MMHUB_BASE_INNER(seg)
131
132 #define MMHUB_SR(reg_name)\
133 .reg_name = MMHUB_BASE(mm ## reg_name ## _BASE_IDX) + \
134 mm ## reg_name
135
136 /* macros to expend register list macro defined in HW object header file
137 * end *********************/
138
139
140 static const struct dce_dmcu_registers dmcu_regs = {
141 DMCU_DCN10_REG_LIST()
142 };
143
144 static const struct dce_dmcu_shift dmcu_shift = {
145 DMCU_MASK_SH_LIST_DCN10(__SHIFT)
146 };
147
148 static const struct dce_dmcu_mask dmcu_mask = {
149 DMCU_MASK_SH_LIST_DCN10(_MASK)
150 };
151
152 static const struct dce_abm_registers abm_regs = {
153 ABM_DCN10_REG_LIST(0)
154 };
155
156 static const struct dce_abm_shift abm_shift = {
157 ABM_MASK_SH_LIST_DCN10(__SHIFT)
158 };
159
160 static const struct dce_abm_mask abm_mask = {
161 ABM_MASK_SH_LIST_DCN10(_MASK)
162 };
163
164 #define stream_enc_regs(id)\
165 [id] = {\
166 SE_DCN_REG_LIST(id),\
167 .TMDS_CNTL = 0,\
168 .AFMT_AVI_INFO0 = 0,\
169 .AFMT_AVI_INFO1 = 0,\
170 .AFMT_AVI_INFO2 = 0,\
171 .AFMT_AVI_INFO3 = 0,\
172 }
173
174 static const struct dce110_stream_enc_registers stream_enc_regs[] = {
175 stream_enc_regs(0),
176 stream_enc_regs(1),
177 stream_enc_regs(2),
178 stream_enc_regs(3),
179 };
180
181 static const struct dce_stream_encoder_shift se_shift = {
182 SE_COMMON_MASK_SH_LIST_DCN10(__SHIFT)
183 };
184
185 static const struct dce_stream_encoder_mask se_mask = {
186 SE_COMMON_MASK_SH_LIST_DCN10(_MASK),
187 .AFMT_GENERIC0_UPDATE = 0,
188 .AFMT_GENERIC2_UPDATE = 0,
189 .DP_DYN_RANGE = 0,
190 .DP_YCBCR_RANGE = 0,
191 .HDMI_AVI_INFO_SEND = 0,
192 .HDMI_AVI_INFO_CONT = 0,
193 .HDMI_AVI_INFO_LINE = 0,
194 .DP_SEC_AVI_ENABLE = 0,
195 .AFMT_AVI_INFO_VERSION = 0
196 };
197
198 #define audio_regs(id)\
199 [id] = {\
200 AUD_COMMON_REG_LIST(id)\
201 }
202
203 static const struct dce_audio_registers audio_regs[] = {
204 audio_regs(0),
205 audio_regs(1),
206 audio_regs(2),
207 audio_regs(3),
208 };
209
210 #define DCE120_AUD_COMMON_MASK_SH_LIST(mask_sh)\
211 SF(AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_INDEX, AZALIA_ENDPOINT_REG_INDEX, mask_sh),\
212 SF(AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_DATA, AZALIA_ENDPOINT_REG_DATA, mask_sh),\
213 AUD_COMMON_MASK_SH_LIST_BASE(mask_sh)
214
215 static const struct dce_audio_shift audio_shift = {
216 DCE120_AUD_COMMON_MASK_SH_LIST(__SHIFT)
217 };
218
219 static const struct dce_aduio_mask audio_mask = {
220 DCE120_AUD_COMMON_MASK_SH_LIST(_MASK)
221 };
222
223 #define aux_regs(id)\
224 [id] = {\
225 AUX_REG_LIST(id)\
226 }
227
228 static const struct dce110_link_enc_aux_registers link_enc_aux_regs[] = {
229 aux_regs(0),
230 aux_regs(1),
231 aux_regs(2),
232 aux_regs(3),
233 aux_regs(4),
234 aux_regs(5)
235 };
236
237 #define hpd_regs(id)\
238 [id] = {\
239 HPD_REG_LIST(id)\
240 }
241
242 static const struct dce110_link_enc_hpd_registers link_enc_hpd_regs[] = {
243 hpd_regs(0),
244 hpd_regs(1),
245 hpd_regs(2),
246 hpd_regs(3),
247 hpd_regs(4),
248 hpd_regs(5)
249 };
250
251 #define link_regs(id)\
252 [id] = {\
253 LE_DCN10_REG_LIST(id), \
254 SRI(DP_DPHY_INTERNAL_CTRL, DP, id) \
255 }
256
257 static const struct dce110_link_enc_registers link_enc_regs[] = {
258 link_regs(0),
259 link_regs(1),
260 link_regs(2),
261 link_regs(3),
262 link_regs(4),
263 link_regs(5),
264 link_regs(6),
265 };
266
267 #define ipp_regs(id)\
268 [id] = {\
269 IPP_REG_LIST_DCN10(id),\
270 }
271
272 static const struct dcn10_ipp_registers ipp_regs[] = {
273 ipp_regs(0),
274 ipp_regs(1),
275 ipp_regs(2),
276 ipp_regs(3),
277 };
278
279 static const struct dcn10_ipp_shift ipp_shift = {
280 IPP_MASK_SH_LIST_DCN10(__SHIFT)
281 };
282
283 static const struct dcn10_ipp_mask ipp_mask = {
284 IPP_MASK_SH_LIST_DCN10(_MASK),
285 };
286
287 #define opp_regs(id)\
288 [id] = {\
289 OPP_REG_LIST_DCN10(id),\
290 }
291
292 static const struct dcn10_opp_registers opp_regs[] = {
293 opp_regs(0),
294 opp_regs(1),
295 opp_regs(2),
296 opp_regs(3),
297 };
298
299 static const struct dcn10_opp_shift opp_shift = {
300 OPP_MASK_SH_LIST_DCN10(__SHIFT)
301 };
302
303 static const struct dcn10_opp_mask opp_mask = {
304 OPP_MASK_SH_LIST_DCN10(_MASK),
305 };
306
307 #define tf_regs(id)\
308 [id] = {\
309 TF_REG_LIST_DCN10(id),\
310 }
311
312 static const struct dcn_dpp_registers tf_regs[] = {
313 tf_regs(0),
314 tf_regs(1),
315 tf_regs(2),
316 tf_regs(3),
317 };
318
319 static const struct dcn_dpp_shift tf_shift = {
320 TF_REG_LIST_SH_MASK_DCN10(__SHIFT)
321 };
322
323 static const struct dcn_dpp_mask tf_mask = {
324 TF_REG_LIST_SH_MASK_DCN10(_MASK),
325 };
326
327 static const struct dcn_mpc_registers mpc_regs = {
328 MPC_COMMON_REG_LIST_DCN1_0(0),
329 MPC_COMMON_REG_LIST_DCN1_0(1),
330 MPC_COMMON_REG_LIST_DCN1_0(2),
331 MPC_COMMON_REG_LIST_DCN1_0(3)
332 };
333
334 static const struct dcn_mpc_shift mpc_shift = {
335 MPC_COMMON_MASK_SH_LIST_DCN1_0(__SHIFT)
336 };
337
338 static const struct dcn_mpc_mask mpc_mask = {
339 MPC_COMMON_MASK_SH_LIST_DCN1_0(_MASK),
340 };
341
342 #define tg_regs(id)\
343 [id] = {TG_COMMON_REG_LIST_DCN1_0(id)}
344
345 static const struct dcn_tg_registers tg_regs[] = {
346 tg_regs(0),
347 tg_regs(1),
348 tg_regs(2),
349 tg_regs(3),
350 };
351
352 static const struct dcn_tg_shift tg_shift = {
353 TG_COMMON_MASK_SH_LIST_DCN1_0(__SHIFT)
354 };
355
356 static const struct dcn_tg_mask tg_mask = {
357 TG_COMMON_MASK_SH_LIST_DCN1_0(_MASK)
358 };
359
360
361 static const struct bios_registers bios_regs = {
362 NBIO_SR(BIOS_SCRATCH_6)
363 };
364
365 #define mi_regs(id)\
366 [id] = {\
367 MI_REG_LIST_DCN10(id)\
368 }
369
370
371 static const struct dcn_mi_registers mi_regs[] = {
372 mi_regs(0),
373 mi_regs(1),
374 mi_regs(2),
375 mi_regs(3),
376 };
377
378 static const struct dcn_mi_shift mi_shift = {
379 MI_MASK_SH_LIST_DCN10(__SHIFT)
380 };
381
382 static const struct dcn_mi_mask mi_mask = {
383 MI_MASK_SH_LIST_DCN10(_MASK)
384 };
385
386 #define clk_src_regs(index, pllid)\
387 [index] = {\
388 CS_COMMON_REG_LIST_DCN1_0(index, pllid),\
389 }
390
391 static const struct dce110_clk_src_regs clk_src_regs[] = {
392 clk_src_regs(0, A),
393 clk_src_regs(1, B),
394 clk_src_regs(2, C),
395 clk_src_regs(3, D)
396 };
397
398 static const struct dce110_clk_src_shift cs_shift = {
399 CS_COMMON_MASK_SH_LIST_DCN1_0(__SHIFT)
400 };
401
402 static const struct dce110_clk_src_mask cs_mask = {
403 CS_COMMON_MASK_SH_LIST_DCN1_0(_MASK)
404 };
405
406
407 static const struct resource_caps res_cap = {
408 .num_timing_generator = 4,
409 .num_video_plane = 4,
410 .num_audio = 4,
411 .num_stream_encoder = 4,
412 .num_pll = 4,
413 };
414
415 static const struct dc_debug debug_defaults_drv = {
416 .disable_dcc = false,
417 .sanity_checks = true,
418 .disable_dmcu = true,
419 .force_abm_enable = false,
420 .timing_trace = false,
421 .clock_trace = true,
422 .disable_pplib_clock_request = true,
423 .disable_pplib_wm_range = false,
424 #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
425 .use_dml_wm = false,
426 .disable_pipe_split = true
427 #endif
428 };
429
430 static const struct dc_debug debug_defaults_diags = {
431 .disable_dmcu = true,
432 .force_abm_enable = false,
433 .timing_trace = true,
434 .clock_trace = true,
435 #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
436 .disable_pplib_clock_request = true,
437 .disable_pplib_wm_range = true,
438 .use_dml_wm = false,
439 .disable_pipe_split = false
440 #endif
441 };
442
443 static void dcn10_dpp_destroy(struct transform **xfm)
444 {
445 dm_free(TO_DCN10_DPP(*xfm));
446 *xfm = NULL;
447 }
448
449 static struct transform *dcn10_dpp_create(
450 struct dc_context *ctx,
451 uint32_t inst)
452 {
453 struct dcn10_dpp *dpp =
454 dm_alloc(sizeof(struct dcn10_dpp));
455
456 if (!dpp)
457 return NULL;
458
459 if (dcn10_dpp_construct(dpp, ctx, inst,
460 &tf_regs[inst], &tf_shift, &tf_mask))
461 return &dpp->base;
462
463 BREAK_TO_DEBUGGER();
464 dm_free(dpp);
465 return NULL;
466 }
467
468 static struct input_pixel_processor *dcn10_ipp_create(
469 struct dc_context *ctx, uint32_t inst)
470 {
471 struct dcn10_ipp *ipp =
472 dm_alloc(sizeof(struct dcn10_ipp));
473
474 if (!ipp) {
475 BREAK_TO_DEBUGGER();
476 return NULL;
477 }
478
479 dcn10_ipp_construct(ipp, ctx, inst,
480 &ipp_regs[inst], &ipp_shift, &ipp_mask);
481 return &ipp->base;
482 }
483
484
485 static struct output_pixel_processor *dcn10_opp_create(
486 struct dc_context *ctx, uint32_t inst)
487 {
488 struct dcn10_opp *opp =
489 dm_alloc(sizeof(struct dcn10_opp));
490
491 if (!opp) {
492 BREAK_TO_DEBUGGER();
493 return NULL;
494 }
495
496 dcn10_opp_construct(opp, ctx, inst,
497 &opp_regs[inst], &opp_shift, &opp_mask);
498 return &opp->base;
499 }
500
501 static struct mpc *dcn10_mpc_create(struct dc_context *ctx)
502 {
503 struct dcn10_mpc *mpc10 = dm_alloc(sizeof(struct dcn10_mpc));
504
505 if (!mpc10)
506 return NULL;
507
508 dcn10_mpc_construct(mpc10, ctx,
509 &mpc_regs,
510 &mpc_shift,
511 &mpc_mask,
512 4);
513
514 return &mpc10->base;
515 }
516
517 static struct timing_generator *dcn10_timing_generator_create(
518 struct dc_context *ctx,
519 uint32_t instance)
520 {
521 struct dcn10_timing_generator *tgn10 =
522 dm_alloc(sizeof(struct dcn10_timing_generator));
523
524 if (!tgn10)
525 return NULL;
526
527 tgn10->base.inst = instance;
528 tgn10->base.ctx = ctx;
529
530 tgn10->tg_regs = &tg_regs[instance];
531 tgn10->tg_shift = &tg_shift;
532 tgn10->tg_mask = &tg_mask;
533
534 dcn10_timing_generator_init(tgn10);
535
536 return &tgn10->base;
537 }
538
539 static const struct encoder_feature_support link_enc_feature = {
540 .max_hdmi_deep_color = COLOR_DEPTH_121212,
541 .max_hdmi_pixel_clock = 600000,
542 .ycbcr420_supported = true,
543 .flags.bits.IS_HBR2_CAPABLE = true,
544 .flags.bits.IS_HBR3_CAPABLE = true,
545 .flags.bits.IS_TPS3_CAPABLE = true,
546 .flags.bits.IS_TPS4_CAPABLE = true,
547 .flags.bits.IS_YCBCR_CAPABLE = true
548 };
549
550 struct link_encoder *dcn10_link_encoder_create(
551 const struct encoder_init_data *enc_init_data)
552 {
553 struct dce110_link_encoder *enc110 =
554 dm_alloc(sizeof(struct dce110_link_encoder));
555
556 if (!enc110)
557 return NULL;
558
559 if (dce110_link_encoder_construct(
560 enc110,
561 enc_init_data,
562 &link_enc_feature,
563 &link_enc_regs[enc_init_data->transmitter],
564 &link_enc_aux_regs[enc_init_data->channel - 1],
565 &link_enc_hpd_regs[enc_init_data->hpd_source])) {
566
567 return &enc110->base;
568 }
569
570 BREAK_TO_DEBUGGER();
571 dm_free(enc110);
572 return NULL;
573 }
574
575 struct clock_source *dcn10_clock_source_create(
576 struct dc_context *ctx,
577 struct dc_bios *bios,
578 enum clock_source_id id,
579 const struct dce110_clk_src_regs *regs,
580 bool dp_clk_src)
581 {
582 struct dce110_clk_src *clk_src =
583 dm_alloc(sizeof(struct dce110_clk_src));
584
585 if (!clk_src)
586 return NULL;
587
588 if (dce110_clk_src_construct(clk_src, ctx, bios, id,
589 regs, &cs_shift, &cs_mask)) {
590 clk_src->base.dp_clk_src = dp_clk_src;
591 return &clk_src->base;
592 }
593
594 BREAK_TO_DEBUGGER();
595 return NULL;
596 }
597
598 static void read_dce_straps(
599 struct dc_context *ctx,
600 struct resource_straps *straps)
601 {
602 generic_reg_get(ctx, mmDC_PINSTRAPS + BASE(mmDC_PINSTRAPS_BASE_IDX),
603 FN(DC_PINSTRAPS, DC_PINSTRAPS_AUDIO), &straps->dc_pinstraps_audio);
604 }
605
606 static struct audio *create_audio(
607 struct dc_context *ctx, unsigned int inst)
608 {
609 return dce_audio_create(ctx, inst,
610 &audio_regs[inst], &audio_shift, &audio_mask);
611 }
612
613 static struct stream_encoder *dcn10_stream_encoder_create(
614 enum engine_id eng_id,
615 struct dc_context *ctx)
616 {
617 struct dce110_stream_encoder *enc110 =
618 dm_alloc(sizeof(struct dce110_stream_encoder));
619
620 if (!enc110)
621 return NULL;
622
623 if (dce110_stream_encoder_construct(
624 enc110, ctx, ctx->dc_bios, eng_id,
625 &stream_enc_regs[eng_id], &se_shift, &se_mask))
626 return &enc110->base;
627
628 BREAK_TO_DEBUGGER();
629 dm_free(enc110);
630 return NULL;
631 }
632
633 static const struct dce_hwseq_registers hwseq_reg = {
634 HWSEQ_DCN1_REG_LIST()
635 };
636
637 static const struct dce_hwseq_shift hwseq_shift = {
638 HWSEQ_DCN1_MASK_SH_LIST(__SHIFT)
639 };
640
641 static const struct dce_hwseq_mask hwseq_mask = {
642 HWSEQ_DCN1_MASK_SH_LIST(_MASK)
643 };
644
645 static struct dce_hwseq *dcn10_hwseq_create(
646 struct dc_context *ctx)
647 {
648 struct dce_hwseq *hws = dm_alloc(sizeof(struct dce_hwseq));
649
650 if (hws) {
651 hws->ctx = ctx;
652 hws->regs = &hwseq_reg;
653 hws->shifts = &hwseq_shift;
654 hws->masks = &hwseq_mask;
655 }
656 return hws;
657 }
658
659 static const struct resource_create_funcs res_create_funcs = {
660 .read_dce_straps = read_dce_straps,
661 .create_audio = create_audio,
662 .create_stream_encoder = dcn10_stream_encoder_create,
663 .create_hwseq = dcn10_hwseq_create,
664 };
665
666 static const struct resource_create_funcs res_create_maximus_funcs = {
667 .read_dce_straps = NULL,
668 .create_audio = NULL,
669 .create_stream_encoder = NULL,
670 .create_hwseq = dcn10_hwseq_create,
671 };
672
673 void dcn10_clock_source_destroy(struct clock_source **clk_src)
674 {
675 dm_free(TO_DCE110_CLK_SRC(*clk_src));
676 *clk_src = NULL;
677 }
678
679 static void destruct(struct dcn10_resource_pool *pool)
680 {
681 unsigned int i;
682
683 for (i = 0; i < pool->base.stream_enc_count; i++) {
684 if (pool->base.stream_enc[i] != NULL) {
685 /* TODO: free dcn version of stream encoder once implemented
686 * rather than using virtual stream encoder
687 */
688 dm_free(pool->base.stream_enc[i]);
689 pool->base.stream_enc[i] = NULL;
690 }
691 }
692
693 if (pool->base.mpc != NULL) {
694 dm_free(TO_DCN10_MPC(pool->base.mpc));
695 pool->base.mpc = NULL;
696 }
697 for (i = 0; i < pool->base.pipe_count; i++) {
698 if (pool->base.opps[i] != NULL)
699 pool->base.opps[i]->funcs->opp_destroy(&pool->base.opps[i]);
700
701 if (pool->base.transforms[i] != NULL)
702 dcn10_dpp_destroy(&pool->base.transforms[i]);
703
704 if (pool->base.ipps[i] != NULL)
705 pool->base.ipps[i]->funcs->ipp_destroy(&pool->base.ipps[i]);
706
707 if (pool->base.mis[i] != NULL) {
708 dm_free(TO_DCN10_MEM_INPUT(pool->base.mis[i]));
709 pool->base.mis[i] = NULL;
710 }
711
712 if (pool->base.irqs != NULL) {
713 dal_irq_service_destroy(&pool->base.irqs);
714 }
715
716 if (pool->base.timing_generators[i] != NULL) {
717 dm_free(DCN10TG_FROM_TG(pool->base.timing_generators[i]));
718 pool->base.timing_generators[i] = NULL;
719 }
720 }
721
722 for (i = 0; i < pool->base.stream_enc_count; i++) {
723 if (pool->base.stream_enc[i] != NULL)
724 dm_free(DCE110STRENC_FROM_STRENC(pool->base.stream_enc[i]));
725 }
726
727 for (i = 0; i < pool->base.audio_count; i++) {
728 if (pool->base.audios[i])
729 dce_aud_destroy(&pool->base.audios[i]);
730 }
731
732 for (i = 0; i < pool->base.clk_src_count; i++) {
733 if (pool->base.clock_sources[i] != NULL) {
734 dcn10_clock_source_destroy(&pool->base.clock_sources[i]);
735 pool->base.clock_sources[i] = NULL;
736 }
737 }
738
739 if (pool->base.dp_clock_source != NULL) {
740 dcn10_clock_source_destroy(&pool->base.dp_clock_source);
741 pool->base.dp_clock_source = NULL;
742 }
743
744 if (pool->base.abm != NULL)
745 dce_abm_destroy(&pool->base.abm);
746
747 if (pool->base.dmcu != NULL)
748 dce_dmcu_destroy(&pool->base.dmcu);
749
750 if (pool->base.display_clock != NULL)
751 dce_disp_clk_destroy(&pool->base.display_clock);
752 }
753
754 static struct mem_input *dcn10_mem_input_create(
755 struct dc_context *ctx,
756 uint32_t inst)
757 {
758 struct dcn10_mem_input *mem_inputn10 =
759 dm_alloc(sizeof(struct dcn10_mem_input));
760
761 if (!mem_inputn10)
762 return NULL;
763
764 if (dcn10_mem_input_construct(mem_inputn10, ctx, inst,
765 &mi_regs[inst], &mi_shift, &mi_mask))
766 return &mem_inputn10->base;
767
768 BREAK_TO_DEBUGGER();
769 dm_free(mem_inputn10);
770 return NULL;
771 }
772
773 static void get_pixel_clock_parameters(
774 const struct pipe_ctx *pipe_ctx,
775 struct pixel_clk_params *pixel_clk_params)
776 {
777 const struct dc_stream_state *stream = pipe_ctx->stream;
778 pixel_clk_params->requested_pix_clk = stream->timing.pix_clk_khz;
779 pixel_clk_params->encoder_object_id = stream->sink->link->link_enc->id;
780 pixel_clk_params->signal_type = pipe_ctx->stream->signal;
781 pixel_clk_params->controller_id = pipe_ctx->pipe_idx + 1;
782 /* TODO: un-hardcode*/
783 pixel_clk_params->requested_sym_clk = LINK_RATE_LOW *
784 LINK_RATE_REF_FREQ_IN_KHZ;
785 pixel_clk_params->flags.ENABLE_SS = 0;
786 pixel_clk_params->color_depth =
787 stream->timing.display_color_depth;
788 pixel_clk_params->flags.DISPLAY_BLANKED = 1;
789 pixel_clk_params->pixel_encoding = stream->timing.pixel_encoding;
790
791 if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR422)
792 pixel_clk_params->color_depth = COLOR_DEPTH_888;
793
794 if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420)
795 pixel_clk_params->requested_pix_clk /= 2;
796
797 }
798
799 static void build_clamping_params(struct dc_stream_state *stream)
800 {
801 stream->clamping.clamping_level = CLAMPING_FULL_RANGE;
802 stream->clamping.c_depth = stream->timing.display_color_depth;
803 stream->clamping.pixel_encoding = stream->timing.pixel_encoding;
804 }
805
806 static enum dc_status build_pipe_hw_param(struct pipe_ctx *pipe_ctx)
807 {
808
809 get_pixel_clock_parameters(pipe_ctx, &pipe_ctx->pix_clk_params);
810
811 pipe_ctx->clock_source->funcs->get_pix_clk_dividers(
812 pipe_ctx->clock_source,
813 &pipe_ctx->pix_clk_params,
814 &pipe_ctx->pll_settings);
815
816 pipe_ctx->stream->clamping.pixel_encoding = pipe_ctx->stream->timing.pixel_encoding;
817
818 resource_build_bit_depth_reduction_params(pipe_ctx->stream,
819 &pipe_ctx->stream->bit_depth_params);
820 build_clamping_params(pipe_ctx->stream);
821
822 return DC_OK;
823 }
824
825 static enum dc_status build_mapped_resource(
826 const struct core_dc *dc,
827 struct validate_context *context,
828 struct validate_context *old_context)
829 {
830 enum dc_status status = DC_OK;
831 uint8_t i, j;
832
833 for (i = 0; i < context->stream_count; i++) {
834 struct dc_stream_state *stream = context->streams[i];
835
836 if (old_context && resource_is_stream_unchanged(old_context, stream)) {
837 if (stream != NULL && old_context->streams[i] != NULL) {
838 /* todo: shouldn't have to copy missing parameter here */
839 resource_build_bit_depth_reduction_params(stream,
840 &stream->bit_depth_params);
841 stream->clamping.pixel_encoding =
842 stream->timing.pixel_encoding;
843
844 resource_build_bit_depth_reduction_params(stream,
845 &stream->bit_depth_params);
846 build_clamping_params(stream);
847
848 continue;
849 }
850 }
851
852 for (j = 0; j < dc->res_pool->pipe_count ; j++) {
853 struct pipe_ctx *pipe_ctx =
854 &context->res_ctx.pipe_ctx[j];
855
856 if (context->res_ctx.pipe_ctx[j].stream != stream)
857 continue;
858
859 status = build_pipe_hw_param(pipe_ctx);
860
861 if (status != DC_OK)
862 return status;
863
864 /* do not need to validate non root pipes */
865 break;
866 }
867 }
868
869 return DC_OK;
870 }
871
872 enum dc_status dcn10_validate_with_context(
873 const struct core_dc *dc,
874 const struct dc_validation_set set[],
875 int set_count,
876 struct validate_context *context,
877 struct validate_context *old_context)
878 {
879 enum dc_status result = DC_OK;
880 int i;
881
882 if (set_count == 0)
883 return result;
884
885 for (i = 0; i < set_count; i++) {
886 context->streams[i] = set[i].stream;
887 dc_stream_retain(context->streams[i]);
888 context->stream_count++;
889 }
890
891 result = resource_map_pool_resources(dc, context, old_context);
892 if (result != DC_OK)
893 return result;
894
895 result = resource_map_phy_clock_resources(dc, context, old_context);
896 if (result != DC_OK)
897 return result;
898
899 result = build_mapped_resource(dc, context, old_context);
900 if (result != DC_OK)
901 return result;
902
903 if (!resource_validate_attach_surfaces(set, set_count,
904 old_context, context, dc->res_pool))
905 return DC_FAIL_ATTACH_SURFACES;
906
907 result = resource_build_scaling_params_for_context(dc, context);
908 if (result != DC_OK)
909 return result;
910
911 if (!dcn_validate_bandwidth(dc, context))
912 return DC_FAIL_BANDWIDTH_VALIDATE;
913
914 return result;
915 }
916
917 enum dc_status dcn10_validate_guaranteed(
918 const struct core_dc *dc,
919 struct dc_stream_state *dc_stream,
920 struct validate_context *context)
921 {
922 enum dc_status result = DC_ERROR_UNEXPECTED;
923
924 context->streams[0] = dc_stream;
925 dc_stream_retain(context->streams[0]);
926 context->stream_count++;
927
928 result = resource_map_pool_resources(dc, context, NULL);
929
930 if (result == DC_OK)
931 result = resource_map_phy_clock_resources(dc, context, NULL);
932
933 if (result == DC_OK)
934 result = build_mapped_resource(dc, context, NULL);
935
936 if (result == DC_OK) {
937 validate_guaranteed_copy_streams(
938 context, dc->public.caps.max_streams);
939 result = resource_build_scaling_params_for_context(dc, context);
940 }
941 if (result == DC_OK && !dcn_validate_bandwidth(dc, context))
942 return DC_FAIL_BANDWIDTH_VALIDATE;
943
944 return result;
945 }
946
947 static struct pipe_ctx *dcn10_acquire_idle_pipe_for_layer(
948 struct validate_context *context,
949 const struct resource_pool *pool,
950 struct dc_stream_state *stream)
951 {
952 struct resource_context *res_ctx = &context->res_ctx;
953 struct pipe_ctx *head_pipe = resource_get_head_pipe_for_stream(res_ctx, stream);
954 struct pipe_ctx *idle_pipe = find_idle_secondary_pipe(res_ctx, pool);
955
956 if (!head_pipe)
957 ASSERT(0);
958
959 if (!idle_pipe)
960 return false;
961
962 idle_pipe->stream = head_pipe->stream;
963 idle_pipe->tg = head_pipe->tg;
964 idle_pipe->opp = head_pipe->opp;
965
966 idle_pipe->mi = pool->mis[idle_pipe->pipe_idx];
967 idle_pipe->ipp = pool->ipps[idle_pipe->pipe_idx];
968 idle_pipe->xfm = pool->transforms[idle_pipe->pipe_idx];
969
970 return idle_pipe;
971 }
972
973 enum dcc_control {
974 dcc_control__256_256_xxx,
975 dcc_control__128_128_xxx,
976 dcc_control__256_64_64,
977 };
978
979 enum segment_order {
980 segment_order__na,
981 segment_order__contiguous,
982 segment_order__non_contiguous,
983 };
984
985 static bool dcc_support_pixel_format(
986 enum surface_pixel_format format,
987 unsigned int *bytes_per_element)
988 {
989 /* DML: get_bytes_per_element */
990 switch (format) {
991 case SURFACE_PIXEL_FORMAT_GRPH_ARGB1555:
992 case SURFACE_PIXEL_FORMAT_GRPH_RGB565:
993 *bytes_per_element = 2;
994 return true;
995 case SURFACE_PIXEL_FORMAT_GRPH_ARGB8888:
996 case SURFACE_PIXEL_FORMAT_GRPH_ABGR8888:
997 case SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010:
998 case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010:
999 *bytes_per_element = 4;
1000 return true;
1001 case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
1002 case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F:
1003 case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F:
1004 *bytes_per_element = 8;
1005 return true;
1006 default:
1007 return false;
1008 }
1009 }
1010
1011 static bool dcc_support_swizzle(
1012 enum swizzle_mode_values swizzle,
1013 unsigned int bytes_per_element,
1014 enum segment_order *segment_order_horz,
1015 enum segment_order *segment_order_vert)
1016 {
1017 bool standard_swizzle = false;
1018 bool display_swizzle = false;
1019
1020 switch (swizzle) {
1021 case DC_SW_4KB_S:
1022 case DC_SW_64KB_S:
1023 case DC_SW_VAR_S:
1024 case DC_SW_4KB_S_X:
1025 case DC_SW_64KB_S_X:
1026 case DC_SW_VAR_S_X:
1027 standard_swizzle = true;
1028 break;
1029 case DC_SW_4KB_D:
1030 case DC_SW_64KB_D:
1031 case DC_SW_VAR_D:
1032 case DC_SW_4KB_D_X:
1033 case DC_SW_64KB_D_X:
1034 case DC_SW_VAR_D_X:
1035 display_swizzle = true;
1036 break;
1037 default:
1038 break;
1039 }
1040
1041 if (bytes_per_element == 1 && standard_swizzle) {
1042 *segment_order_horz = segment_order__contiguous;
1043 *segment_order_vert = segment_order__na;
1044 return true;
1045 }
1046 if (bytes_per_element == 2 && standard_swizzle) {
1047 *segment_order_horz = segment_order__non_contiguous;
1048 *segment_order_vert = segment_order__contiguous;
1049 return true;
1050 }
1051 if (bytes_per_element == 4 && standard_swizzle) {
1052 *segment_order_horz = segment_order__non_contiguous;
1053 *segment_order_vert = segment_order__contiguous;
1054 return true;
1055 }
1056 if (bytes_per_element == 8 && standard_swizzle) {
1057 *segment_order_horz = segment_order__na;
1058 *segment_order_vert = segment_order__contiguous;
1059 return true;
1060 }
1061 if (bytes_per_element == 8 && display_swizzle) {
1062 *segment_order_horz = segment_order__contiguous;
1063 *segment_order_vert = segment_order__non_contiguous;
1064 return true;
1065 }
1066
1067 return false;
1068 }
1069
1070 static void get_blk256_size(unsigned int *blk256_width, unsigned int *blk256_height,
1071 unsigned int bytes_per_element)
1072 {
1073 /* copied from DML. might want to refactor DML to leverage from DML */
1074 /* DML : get_blk256_size */
1075 if (bytes_per_element == 1) {
1076 *blk256_width = 16;
1077 *blk256_height = 16;
1078 } else if (bytes_per_element == 2) {
1079 *blk256_width = 16;
1080 *blk256_height = 8;
1081 } else if (bytes_per_element == 4) {
1082 *blk256_width = 8;
1083 *blk256_height = 8;
1084 } else if (bytes_per_element == 8) {
1085 *blk256_width = 8;
1086 *blk256_height = 4;
1087 }
1088 }
1089
1090 static void det_request_size(
1091 unsigned int height,
1092 unsigned int width,
1093 unsigned int bpe,
1094 bool *req128_horz_wc,
1095 bool *req128_vert_wc)
1096 {
1097 unsigned int detile_buf_size = 164 * 1024; /* 164KB for DCN1.0 */
1098
1099 unsigned int blk256_height = 0;
1100 unsigned int blk256_width = 0;
1101 unsigned int swath_bytes_horz_wc, swath_bytes_vert_wc;
1102
1103 get_blk256_size(&blk256_width, &blk256_height, bpe);
1104
1105 swath_bytes_horz_wc = height * blk256_height * bpe;
1106 swath_bytes_vert_wc = width * blk256_width * bpe;
1107
1108 *req128_horz_wc = (2 * swath_bytes_horz_wc <= detile_buf_size) ?
1109 false : /* full 256B request */
1110 true; /* half 128b request */
1111
1112 *req128_vert_wc = (2 * swath_bytes_vert_wc <= detile_buf_size) ?
1113 false : /* full 256B request */
1114 true; /* half 128b request */
1115 }
1116
1117 static bool get_dcc_compression_cap(const struct dc *dc,
1118 const struct dc_dcc_surface_param *input,
1119 struct dc_surface_dcc_cap *output)
1120 {
1121 /* implement section 1.6.2.1 of DCN1_Programming_Guide.docx */
1122 enum dcc_control dcc_control;
1123 unsigned int bpe;
1124 enum segment_order segment_order_horz, segment_order_vert;
1125 bool req128_horz_wc, req128_vert_wc;
1126
1127 memset(output, 0, sizeof(*output));
1128
1129 if (dc->debug.disable_dcc)
1130 return false;
1131
1132 if (!dcc_support_pixel_format(input->format,
1133 &bpe))
1134 return false;
1135
1136 if (!dcc_support_swizzle(input->swizzle_mode, bpe,
1137 &segment_order_horz, &segment_order_vert))
1138 return false;
1139
1140 det_request_size(input->surface_size.height, input->surface_size.width,
1141 bpe, &req128_horz_wc, &req128_vert_wc);
1142
1143 if (!req128_horz_wc && !req128_vert_wc) {
1144 dcc_control = dcc_control__256_256_xxx;
1145 } else if (input->scan == SCAN_DIRECTION_HORIZONTAL) {
1146 if (!req128_horz_wc)
1147 dcc_control = dcc_control__256_256_xxx;
1148 else if (segment_order_horz == segment_order__contiguous)
1149 dcc_control = dcc_control__128_128_xxx;
1150 else
1151 dcc_control = dcc_control__256_64_64;
1152 } else if (input->scan == SCAN_DIRECTION_VERTICAL) {
1153 if (!req128_vert_wc)
1154 dcc_control = dcc_control__256_256_xxx;
1155 else if (segment_order_vert == segment_order__contiguous)
1156 dcc_control = dcc_control__128_128_xxx;
1157 else
1158 dcc_control = dcc_control__256_64_64;
1159 } else {
1160 if ((req128_horz_wc &&
1161 segment_order_horz == segment_order__non_contiguous) ||
1162 (req128_vert_wc &&
1163 segment_order_vert == segment_order__non_contiguous))
1164 /* access_dir not known, must use most constraining */
1165 dcc_control = dcc_control__256_64_64;
1166 else
1167 /* reg128 is true for either horz and vert
1168 * but segment_order is contiguous
1169 */
1170 dcc_control = dcc_control__128_128_xxx;
1171 }
1172
1173 switch (dcc_control) {
1174 case dcc_control__256_256_xxx:
1175 output->grph.rgb.max_uncompressed_blk_size = 256;
1176 output->grph.rgb.max_compressed_blk_size = 256;
1177 output->grph.rgb.independent_64b_blks = false;
1178 break;
1179 case dcc_control__128_128_xxx:
1180 output->grph.rgb.max_uncompressed_blk_size = 128;
1181 output->grph.rgb.max_compressed_blk_size = 128;
1182 output->grph.rgb.independent_64b_blks = false;
1183 break;
1184 case dcc_control__256_64_64:
1185 output->grph.rgb.max_uncompressed_blk_size = 256;
1186 output->grph.rgb.max_compressed_blk_size = 64;
1187 output->grph.rgb.independent_64b_blks = true;
1188 break;
1189 }
1190 output->capable = true;
1191 output->const_color_support = false;
1192
1193 return true;
1194 }
1195
1196
1197 static void dcn10_destroy_resource_pool(struct resource_pool **pool)
1198 {
1199 struct dcn10_resource_pool *dcn10_pool = TO_DCN10_RES_POOL(*pool);
1200
1201 destruct(dcn10_pool);
1202 dm_free(dcn10_pool);
1203 *pool = NULL;
1204 }
1205
1206
1207 static struct dc_cap_funcs cap_funcs = {
1208 .get_dcc_compression_cap = get_dcc_compression_cap
1209 };
1210
1211 static struct resource_funcs dcn10_res_pool_funcs = {
1212 .destroy = dcn10_destroy_resource_pool,
1213 .link_enc_create = dcn10_link_encoder_create,
1214 .validate_with_context = dcn10_validate_with_context,
1215 .validate_guaranteed = dcn10_validate_guaranteed,
1216 .validate_bandwidth = dcn_validate_bandwidth,
1217 .acquire_idle_pipe_for_layer = dcn10_acquire_idle_pipe_for_layer,
1218 };
1219
1220 static bool construct(
1221 uint8_t num_virtual_links,
1222 struct core_dc *dc,
1223 struct dcn10_resource_pool *pool)
1224 {
1225 int i;
1226 struct dc_context *ctx = dc->ctx;
1227
1228 ctx->dc_bios->regs = &bios_regs;
1229
1230 pool->base.res_cap = &res_cap;
1231 pool->base.funcs = &dcn10_res_pool_funcs;
1232
1233 /*
1234 * TODO fill in from actual raven resource when we create
1235 * more than virtual encoder
1236 */
1237
1238 /*************************************************
1239 * Resource + asic cap harcoding *
1240 *************************************************/
1241 pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE;
1242
1243 /* TODO: Hardcode to correct number of functional controllers */
1244 pool->base.pipe_count = 4;
1245 dc->public.caps.max_downscale_ratio = 200;
1246 dc->public.caps.i2c_speed_in_khz = 100;
1247 dc->public.caps.max_cursor_size = 256;
1248
1249 dc->public.caps.max_slave_planes = 1;
1250
1251 if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV)
1252 dc->public.debug = debug_defaults_drv;
1253 else
1254 dc->public.debug = debug_defaults_diags;
1255
1256 /*************************************************
1257 * Create resources *
1258 *************************************************/
1259
1260 pool->base.clock_sources[DCN10_CLK_SRC_PLL0] =
1261 dcn10_clock_source_create(ctx, ctx->dc_bios,
1262 CLOCK_SOURCE_COMBO_PHY_PLL0,
1263 &clk_src_regs[0], false);
1264 pool->base.clock_sources[DCN10_CLK_SRC_PLL1] =
1265 dcn10_clock_source_create(ctx, ctx->dc_bios,
1266 CLOCK_SOURCE_COMBO_PHY_PLL1,
1267 &clk_src_regs[1], false);
1268 pool->base.clock_sources[DCN10_CLK_SRC_PLL2] =
1269 dcn10_clock_source_create(ctx, ctx->dc_bios,
1270 CLOCK_SOURCE_COMBO_PHY_PLL2,
1271 &clk_src_regs[2], false);
1272 pool->base.clock_sources[DCN10_CLK_SRC_PLL3] =
1273 dcn10_clock_source_create(ctx, ctx->dc_bios,
1274 CLOCK_SOURCE_COMBO_PHY_PLL3,
1275 &clk_src_regs[3], false);
1276
1277 pool->base.clk_src_count = DCN10_CLK_SRC_TOTAL;
1278
1279 pool->base.dp_clock_source =
1280 dcn10_clock_source_create(ctx, ctx->dc_bios,
1281 CLOCK_SOURCE_ID_DP_DTO,
1282 /* todo: not reuse phy_pll registers */
1283 &clk_src_regs[0], true);
1284
1285 for (i = 0; i < pool->base.clk_src_count; i++) {
1286 if (pool->base.clock_sources[i] == NULL) {
1287 dm_error("DC: failed to create clock sources!\n");
1288 BREAK_TO_DEBUGGER();
1289 goto clock_source_create_fail;
1290 }
1291 }
1292
1293 if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
1294 pool->base.display_clock = dce120_disp_clk_create(ctx);
1295 if (pool->base.display_clock == NULL) {
1296 dm_error("DC: failed to create display clock!\n");
1297 BREAK_TO_DEBUGGER();
1298 goto disp_clk_create_fail;
1299 }
1300 }
1301
1302 pool->base.dmcu = dcn10_dmcu_create(ctx,
1303 &dmcu_regs,
1304 &dmcu_shift,
1305 &dmcu_mask);
1306 if (pool->base.dmcu == NULL) {
1307 dm_error("DC: failed to create dmcu!\n");
1308 BREAK_TO_DEBUGGER();
1309 goto res_create_fail;
1310 }
1311
1312 pool->base.abm = dce_abm_create(ctx,
1313 &abm_regs,
1314 &abm_shift,
1315 &abm_mask);
1316 if (pool->base.abm == NULL) {
1317 dm_error("DC: failed to create abm!\n");
1318 BREAK_TO_DEBUGGER();
1319 goto res_create_fail;
1320 }
1321
1322 dml_init_instance(&dc->dml, DML_PROJECT_RAVEN1);
1323 dc->dcn_ip = dcn10_ip_defaults;
1324 dc->dcn_soc = dcn10_soc_defaults;
1325
1326 dc->dcn_soc.number_of_channels = dc->ctx->asic_id.vram_width / ddr4_dram_width;
1327 ASSERT(dc->dcn_soc.number_of_channels < 3);
1328 if (dc->dcn_soc.number_of_channels == 0)/*old sbios bug*/
1329 dc->dcn_soc.number_of_channels = 2;
1330
1331 if (dc->dcn_soc.number_of_channels == 1) {
1332 dc->dcn_soc.fabric_and_dram_bandwidth_vmax0p9 = 19.2f;
1333 dc->dcn_soc.fabric_and_dram_bandwidth_vnom0p8 = 17.066f;
1334 dc->dcn_soc.fabric_and_dram_bandwidth_vmid0p72 = 14.933f;
1335 dc->dcn_soc.fabric_and_dram_bandwidth_vmin0p65 = 12.8f;
1336 }
1337
1338 if (!dc->public.debug.disable_pplib_clock_request)
1339 dcn_bw_update_from_pplib(dc);
1340 dcn_bw_sync_calcs_and_dml(dc);
1341 if (!dc->public.debug.disable_pplib_wm_range)
1342 dcn_bw_notify_pplib_of_wm_ranges(dc);
1343
1344 {
1345 #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
1346 struct irq_service_init_data init_data;
1347 init_data.ctx = dc->ctx;
1348 pool->base.irqs = dal_irq_service_dcn10_create(&init_data);
1349 if (!pool->base.irqs)
1350 goto irqs_create_fail;
1351 #endif
1352 }
1353
1354 /* mem input -> ipp -> dpp -> opp -> TG */
1355 for (i = 0; i < pool->base.pipe_count; i++) {
1356 pool->base.mis[i] = dcn10_mem_input_create(ctx, i);
1357 if (pool->base.mis[i] == NULL) {
1358 BREAK_TO_DEBUGGER();
1359 dm_error(
1360 "DC: failed to create memory input!\n");
1361 goto mi_create_fail;
1362 }
1363
1364 pool->base.ipps[i] = dcn10_ipp_create(ctx, i);
1365 if (pool->base.ipps[i] == NULL) {
1366 BREAK_TO_DEBUGGER();
1367 dm_error(
1368 "DC: failed to create input pixel processor!\n");
1369 goto ipp_create_fail;
1370 }
1371
1372 pool->base.transforms[i] = dcn10_dpp_create(ctx, i);
1373 if (pool->base.transforms[i] == NULL) {
1374 BREAK_TO_DEBUGGER();
1375 dm_error(
1376 "DC: failed to create dpp!\n");
1377 goto dpp_create_fail;
1378 }
1379
1380 pool->base.opps[i] = dcn10_opp_create(ctx, i);
1381 if (pool->base.opps[i] == NULL) {
1382 BREAK_TO_DEBUGGER();
1383 dm_error(
1384 "DC: failed to create output pixel processor!\n");
1385 goto opp_create_fail;
1386 }
1387
1388 pool->base.timing_generators[i] = dcn10_timing_generator_create(
1389 ctx, i);
1390 if (pool->base.timing_generators[i] == NULL) {
1391 BREAK_TO_DEBUGGER();
1392 dm_error("DC: failed to create tg!\n");
1393 goto otg_create_fail;
1394 }
1395 }
1396 pool->base.mpc = dcn10_mpc_create(ctx);
1397 if (pool->base.mpc == NULL) {
1398 BREAK_TO_DEBUGGER();
1399 dm_error("DC: failed to create mpc!\n");
1400 goto mpc_create_fail;
1401 }
1402
1403 if (!resource_construct(num_virtual_links, dc, &pool->base,
1404 (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment) ?
1405 &res_create_funcs : &res_create_maximus_funcs)))
1406 goto res_create_fail;
1407
1408 dcn10_hw_sequencer_construct(dc);
1409 dc->public.caps.max_planes = pool->base.pipe_count;
1410
1411 dc->public.cap_funcs = cap_funcs;
1412
1413 return true;
1414
1415 disp_clk_create_fail:
1416 mpc_create_fail:
1417 otg_create_fail:
1418 opp_create_fail:
1419 dpp_create_fail:
1420 ipp_create_fail:
1421 mi_create_fail:
1422 irqs_create_fail:
1423 res_create_fail:
1424 clock_source_create_fail:
1425
1426 destruct(pool);
1427
1428 return false;
1429 }
1430
1431 struct resource_pool *dcn10_create_resource_pool(
1432 uint8_t num_virtual_links,
1433 struct core_dc *dc)
1434 {
1435 struct dcn10_resource_pool *pool =
1436 dm_alloc(sizeof(struct dcn10_resource_pool));
1437
1438 if (!pool)
1439 return NULL;
1440
1441 if (construct(num_virtual_links, dc, pool))
1442 return &pool->base;
1443
1444 BREAK_TO_DEBUGGER();
1445 return NULL;
1446 }