2 * Copyright (c) 2014-2016, NVIDIA CORPORATION. All rights reserved.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
22 * Shamelessly ripped off from ChromeOS's gk20a/clk_pllg.c
25 #define gk20a_clk(p) container_of((p), struct gk20a_clk, base)
28 #include <core/tegra.h>
29 #include <subdev/timer.h>
32 #define MHZ (KHZ * 1000)
34 #define MASK(w) ((1 << w) - 1)
36 #define SYS_GPCPLL_CFG_BASE 0x00137000
37 #define GPC_BCASE_GPCPLL_CFG_BASE 0x00132800
39 #define GPCPLL_CFG (SYS_GPCPLL_CFG_BASE + 0)
40 #define GPCPLL_CFG_ENABLE BIT(0)
41 #define GPCPLL_CFG_IDDQ BIT(1)
42 #define GPCPLL_CFG_LOCK_DET_OFF BIT(4)
43 #define GPCPLL_CFG_LOCK BIT(17)
45 #define GPCPLL_COEFF (SYS_GPCPLL_CFG_BASE + 4)
46 #define GPCPLL_COEFF_M_SHIFT 0
47 #define GPCPLL_COEFF_M_WIDTH 8
48 #define GPCPLL_COEFF_N_SHIFT 8
49 #define GPCPLL_COEFF_N_WIDTH 8
50 #define GPCPLL_COEFF_P_SHIFT 16
51 #define GPCPLL_COEFF_P_WIDTH 6
53 #define GPCPLL_CFG2 (SYS_GPCPLL_CFG_BASE + 0xc)
54 #define GPCPLL_CFG2_SETUP2_SHIFT 16
55 #define GPCPLL_CFG2_PLL_STEPA_SHIFT 24
57 #define GPCPLL_CFG3 (SYS_GPCPLL_CFG_BASE + 0x18)
58 #define GPCPLL_CFG3_PLL_STEPB_SHIFT 16
60 #define GPCPLL_NDIV_SLOWDOWN (SYS_GPCPLL_CFG_BASE + 0x1c)
61 #define GPCPLL_NDIV_SLOWDOWN_NDIV_LO_SHIFT 0
62 #define GPCPLL_NDIV_SLOWDOWN_NDIV_MID_SHIFT 8
63 #define GPCPLL_NDIV_SLOWDOWN_STEP_SIZE_LO2MID_SHIFT 16
64 #define GPCPLL_NDIV_SLOWDOWN_SLOWDOWN_USING_PLL_SHIFT 22
65 #define GPCPLL_NDIV_SLOWDOWN_EN_DYNRAMP_SHIFT 31
67 #define SEL_VCO (SYS_GPCPLL_CFG_BASE + 0x100)
68 #define SEL_VCO_GPC2CLK_OUT_SHIFT 0
70 #define GPC2CLK_OUT (SYS_GPCPLL_CFG_BASE + 0x250)
71 #define GPC2CLK_OUT_SDIV14_INDIV4_WIDTH 1
72 #define GPC2CLK_OUT_SDIV14_INDIV4_SHIFT 31
73 #define GPC2CLK_OUT_SDIV14_INDIV4_MODE 1
74 #define GPC2CLK_OUT_VCODIV_WIDTH 6
75 #define GPC2CLK_OUT_VCODIV_SHIFT 8
76 #define GPC2CLK_OUT_VCODIV1 0
77 #define GPC2CLK_OUT_VCODIV_MASK (MASK(GPC2CLK_OUT_VCODIV_WIDTH) << \
78 GPC2CLK_OUT_VCODIV_SHIFT)
79 #define GPC2CLK_OUT_BYPDIV_WIDTH 6
80 #define GPC2CLK_OUT_BYPDIV_SHIFT 0
81 #define GPC2CLK_OUT_BYPDIV31 0x3c
82 #define GPC2CLK_OUT_INIT_MASK ((MASK(GPC2CLK_OUT_SDIV14_INDIV4_WIDTH) << \
83 GPC2CLK_OUT_SDIV14_INDIV4_SHIFT)\
84 | (MASK(GPC2CLK_OUT_VCODIV_WIDTH) << GPC2CLK_OUT_VCODIV_SHIFT)\
85 | (MASK(GPC2CLK_OUT_BYPDIV_WIDTH) << GPC2CLK_OUT_BYPDIV_SHIFT))
86 #define GPC2CLK_OUT_INIT_VAL ((GPC2CLK_OUT_SDIV14_INDIV4_MODE << \
87 GPC2CLK_OUT_SDIV14_INDIV4_SHIFT) \
88 | (GPC2CLK_OUT_VCODIV1 << GPC2CLK_OUT_VCODIV_SHIFT) \
89 | (GPC2CLK_OUT_BYPDIV31 << GPC2CLK_OUT_BYPDIV_SHIFT))
91 #define GPC_BCAST_NDIV_SLOWDOWN_DEBUG (GPC_BCASE_GPCPLL_CFG_BASE + 0xa0)
92 #define GPC_BCAST_NDIV_SLOWDOWN_DEBUG_PLL_DYNRAMP_DONE_SYNCED_SHIFT 24
93 #define GPC_BCAST_NDIV_SLOWDOWN_DEBUG_PLL_DYNRAMP_DONE_SYNCED_MASK \
94 (0x1 << GPC_BCAST_NDIV_SLOWDOWN_DEBUG_PLL_DYNRAMP_DONE_SYNCED_SHIFT)
96 static const u8 pl_to_div
[] = {
97 /* PL: 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14 */
98 /* p: */ 1, 2, 3, 4, 5, 6, 8, 10, 12, 16, 12, 16, 20, 24, 32,
101 /* All frequencies in Khz */
102 struct gk20a_clk_pllg_params
{
103 u32 min_vco
, max_vco
;
110 static const struct gk20a_clk_pllg_params gk20a_pllg_params
= {
111 .min_vco
= 1000000, .max_vco
= 2064000,
112 .min_u
= 12000, .max_u
= 38000,
113 .min_m
= 1, .max_m
= 255,
114 .min_n
= 8, .max_n
= 255,
115 .min_pl
= 1, .max_pl
= 32,
119 struct nvkm_clk base
;
120 const struct gk20a_clk_pllg_params
*params
;
126 gk20a_pllg_read_mnp(struct gk20a_clk
*clk
)
128 struct nvkm_device
*device
= clk
->base
.subdev
.device
;
131 val
= nvkm_rd32(device
, GPCPLL_COEFF
);
132 clk
->m
= (val
>> GPCPLL_COEFF_M_SHIFT
) & MASK(GPCPLL_COEFF_M_WIDTH
);
133 clk
->n
= (val
>> GPCPLL_COEFF_N_SHIFT
) & MASK(GPCPLL_COEFF_N_WIDTH
);
134 clk
->pl
= (val
>> GPCPLL_COEFF_P_SHIFT
) & MASK(GPCPLL_COEFF_P_WIDTH
);
138 gk20a_pllg_calc_rate(struct gk20a_clk
*clk
)
143 rate
= clk
->parent_rate
* clk
->n
;
144 divider
= clk
->m
* pl_to_div
[clk
->pl
];
146 return rate
/ divider
/ 2;
150 gk20a_pllg_calc_mnp(struct gk20a_clk
*clk
, unsigned long rate
)
152 struct nvkm_subdev
*subdev
= &clk
->base
.subdev
;
153 u32 target_clk_f
, ref_clk_f
, target_freq
;
154 u32 min_vco_f
, max_vco_f
;
155 u32 low_pl
, high_pl
, best_pl
;
161 target_clk_f
= rate
* 2 / KHZ
;
162 ref_clk_f
= clk
->parent_rate
/ KHZ
;
164 max_vco_f
= clk
->params
->max_vco
;
165 min_vco_f
= clk
->params
->min_vco
;
166 best_m
= clk
->params
->max_m
;
167 best_n
= clk
->params
->min_n
;
168 best_pl
= clk
->params
->min_pl
;
170 target_vco_f
= target_clk_f
+ target_clk_f
/ 50;
171 if (max_vco_f
< target_vco_f
)
172 max_vco_f
= target_vco_f
;
174 /* min_pl <= high_pl <= max_pl */
175 high_pl
= (max_vco_f
+ target_vco_f
- 1) / target_vco_f
;
176 high_pl
= min(high_pl
, clk
->params
->max_pl
);
177 high_pl
= max(high_pl
, clk
->params
->min_pl
);
179 /* min_pl <= low_pl <= max_pl */
180 low_pl
= min_vco_f
/ target_vco_f
;
181 low_pl
= min(low_pl
, clk
->params
->max_pl
);
182 low_pl
= max(low_pl
, clk
->params
->min_pl
);
184 /* Find Indices of high_pl and low_pl */
185 for (pl
= 0; pl
< ARRAY_SIZE(pl_to_div
) - 1; pl
++) {
186 if (pl_to_div
[pl
] >= low_pl
) {
191 for (pl
= 0; pl
< ARRAY_SIZE(pl_to_div
) - 1; pl
++) {
192 if (pl_to_div
[pl
] >= high_pl
) {
198 nvkm_debug(subdev
, "low_PL %d(div%d), high_PL %d(div%d)", low_pl
,
199 pl_to_div
[low_pl
], high_pl
, pl_to_div
[high_pl
]);
201 /* Select lowest possible VCO */
202 for (pl
= low_pl
; pl
<= high_pl
; pl
++) {
205 target_vco_f
= target_clk_f
* pl_to_div
[pl
];
206 for (m
= clk
->params
->min_m
; m
<= clk
->params
->max_m
; m
++) {
211 if (u_f
< clk
->params
->min_u
)
213 if (u_f
> clk
->params
->max_u
)
216 n
= (target_vco_f
* m
) / ref_clk_f
;
217 n2
= ((target_vco_f
* m
) + (ref_clk_f
- 1)) / ref_clk_f
;
219 if (n
> clk
->params
->max_n
)
222 for (; n
<= n2
; n
++) {
223 if (n
< clk
->params
->min_n
)
225 if (n
> clk
->params
->max_n
)
228 vco_f
= ref_clk_f
* n
/ m
;
230 if (vco_f
>= min_vco_f
&& vco_f
<= max_vco_f
) {
233 lwv
= (vco_f
+ (pl_to_div
[pl
] / 2))
235 delta
= abs(lwv
- target_clk_f
);
237 if (delta
< best_delta
) {
252 WARN_ON(best_delta
== ~0);
256 "no best match for target @ %dMHz on gpc_pll",
263 target_freq
= gk20a_pllg_calc_rate(clk
);
266 "actual target freq %d MHz, M %d, N %d, PL %d(div%d)\n",
267 target_freq
/ MHZ
, clk
->m
, clk
->n
, clk
->pl
,
273 gk20a_pllg_slide(struct gk20a_clk
*clk
, u32 n
)
275 struct nvkm_subdev
*subdev
= &clk
->base
.subdev
;
276 struct nvkm_device
*device
= subdev
->device
;
280 /* get old coefficients */
281 val
= nvkm_rd32(device
, GPCPLL_COEFF
);
282 /* do nothing if NDIV is the same */
283 if (n
== ((val
>> GPCPLL_COEFF_N_SHIFT
) & MASK(GPCPLL_COEFF_N_WIDTH
)))
287 nvkm_mask(device
, GPCPLL_CFG2
, 0xff << GPCPLL_CFG2_PLL_STEPA_SHIFT
,
288 0x2b << GPCPLL_CFG2_PLL_STEPA_SHIFT
);
289 nvkm_mask(device
, GPCPLL_CFG3
, 0xff << GPCPLL_CFG3_PLL_STEPB_SHIFT
,
290 0xb << GPCPLL_CFG3_PLL_STEPB_SHIFT
);
292 /* pll slowdown mode */
293 nvkm_mask(device
, GPCPLL_NDIV_SLOWDOWN
,
294 BIT(GPCPLL_NDIV_SLOWDOWN_SLOWDOWN_USING_PLL_SHIFT
),
295 BIT(GPCPLL_NDIV_SLOWDOWN_SLOWDOWN_USING_PLL_SHIFT
));
297 /* new ndiv ready for ramp */
298 val
= nvkm_rd32(device
, GPCPLL_COEFF
);
299 val
&= ~(MASK(GPCPLL_COEFF_N_WIDTH
) << GPCPLL_COEFF_N_SHIFT
);
300 val
|= (n
& MASK(GPCPLL_COEFF_N_WIDTH
)) << GPCPLL_COEFF_N_SHIFT
;
302 nvkm_wr32(device
, GPCPLL_COEFF
, val
);
304 /* dynamic ramp to new ndiv */
305 val
= nvkm_rd32(device
, GPCPLL_NDIV_SLOWDOWN
);
306 val
|= 0x1 << GPCPLL_NDIV_SLOWDOWN_EN_DYNRAMP_SHIFT
;
308 nvkm_wr32(device
, GPCPLL_NDIV_SLOWDOWN
, val
);
310 for (ramp_timeout
= 500; ramp_timeout
> 0; ramp_timeout
--) {
312 val
= nvkm_rd32(device
, GPC_BCAST_NDIV_SLOWDOWN_DEBUG
);
313 if (val
& GPC_BCAST_NDIV_SLOWDOWN_DEBUG_PLL_DYNRAMP_DONE_SYNCED_MASK
)
317 /* exit slowdown mode */
318 nvkm_mask(device
, GPCPLL_NDIV_SLOWDOWN
,
319 BIT(GPCPLL_NDIV_SLOWDOWN_SLOWDOWN_USING_PLL_SHIFT
) |
320 BIT(GPCPLL_NDIV_SLOWDOWN_EN_DYNRAMP_SHIFT
), 0);
321 nvkm_rd32(device
, GPCPLL_NDIV_SLOWDOWN
);
323 if (ramp_timeout
<= 0) {
324 nvkm_error(subdev
, "gpcpll dynamic ramp timeout\n");
332 gk20a_pllg_enable(struct gk20a_clk
*clk
)
334 struct nvkm_device
*device
= clk
->base
.subdev
.device
;
336 nvkm_mask(device
, GPCPLL_CFG
, GPCPLL_CFG_ENABLE
, GPCPLL_CFG_ENABLE
);
337 nvkm_rd32(device
, GPCPLL_CFG
);
341 gk20a_pllg_disable(struct gk20a_clk
*clk
)
343 struct nvkm_device
*device
= clk
->base
.subdev
.device
;
345 nvkm_mask(device
, GPCPLL_CFG
, GPCPLL_CFG_ENABLE
, 0);
346 nvkm_rd32(device
, GPCPLL_CFG
);
350 _gk20a_pllg_program_mnp(struct gk20a_clk
*clk
, bool allow_slide
)
352 struct nvkm_subdev
*subdev
= &clk
->base
.subdev
;
353 struct nvkm_device
*device
= subdev
->device
;
355 u32 m_old
, pl_old
, n_lo
;
357 /* get old coefficients */
358 val
= nvkm_rd32(device
, GPCPLL_COEFF
);
359 m_old
= (val
>> GPCPLL_COEFF_M_SHIFT
) & MASK(GPCPLL_COEFF_M_WIDTH
);
360 pl_old
= (val
>> GPCPLL_COEFF_P_SHIFT
) & MASK(GPCPLL_COEFF_P_WIDTH
);
362 /* do NDIV slide if there is no change in M and PL */
363 cfg
= nvkm_rd32(device
, GPCPLL_CFG
);
364 if (allow_slide
&& clk
->m
== m_old
&& clk
->pl
== pl_old
&&
365 (cfg
& GPCPLL_CFG_ENABLE
)) {
366 return gk20a_pllg_slide(clk
, clk
->n
);
369 /* slide down to NDIV_LO */
370 if (allow_slide
&& (cfg
& GPCPLL_CFG_ENABLE
)) {
373 n_lo
= DIV_ROUND_UP(m_old
* clk
->params
->min_vco
,
374 clk
->parent_rate
/ KHZ
);
375 ret
= gk20a_pllg_slide(clk
, n_lo
);
381 /* split FO-to-bypass jump in halfs by setting out divider 1:2 */
382 nvkm_mask(device
, GPC2CLK_OUT
, GPC2CLK_OUT_VCODIV_MASK
,
383 0x2 << GPC2CLK_OUT_VCODIV_SHIFT
);
385 /* put PLL in bypass before programming it */
386 val
= nvkm_rd32(device
, SEL_VCO
);
387 val
&= ~(BIT(SEL_VCO_GPC2CLK_OUT_SHIFT
));
389 nvkm_wr32(device
, SEL_VCO
, val
);
391 /* get out from IDDQ */
392 val
= nvkm_rd32(device
, GPCPLL_CFG
);
393 if (val
& GPCPLL_CFG_IDDQ
) {
394 val
&= ~GPCPLL_CFG_IDDQ
;
395 nvkm_wr32(device
, GPCPLL_CFG
, val
);
396 nvkm_rd32(device
, GPCPLL_CFG
);
400 gk20a_pllg_disable(clk
);
402 nvkm_debug(subdev
, "%s: m=%d n=%d pl=%d\n", __func__
,
403 clk
->m
, clk
->n
, clk
->pl
);
405 n_lo
= DIV_ROUND_UP(clk
->m
* clk
->params
->min_vco
,
406 clk
->parent_rate
/ KHZ
);
407 val
= clk
->m
<< GPCPLL_COEFF_M_SHIFT
;
408 val
|= (allow_slide
? n_lo
: clk
->n
) << GPCPLL_COEFF_N_SHIFT
;
409 val
|= clk
->pl
<< GPCPLL_COEFF_P_SHIFT
;
410 nvkm_wr32(device
, GPCPLL_COEFF
, val
);
412 gk20a_pllg_enable(clk
);
414 val
= nvkm_rd32(device
, GPCPLL_CFG
);
415 if (val
& GPCPLL_CFG_LOCK_DET_OFF
) {
416 val
&= ~GPCPLL_CFG_LOCK_DET_OFF
;
417 nvkm_wr32(device
, GPCPLL_CFG
, val
);
420 if (nvkm_usec(device
, 300,
421 if (nvkm_rd32(device
, GPCPLL_CFG
) & GPCPLL_CFG_LOCK
)
426 /* switch to VCO mode */
427 nvkm_mask(device
, SEL_VCO
, BIT(SEL_VCO_GPC2CLK_OUT_SHIFT
),
428 BIT(SEL_VCO_GPC2CLK_OUT_SHIFT
));
430 /* restore out divider 1:1 */
431 val
= nvkm_rd32(device
, GPC2CLK_OUT
);
432 if ((val
& GPC2CLK_OUT_VCODIV_MASK
) !=
433 (GPC2CLK_OUT_VCODIV1
<< GPC2CLK_OUT_VCODIV_SHIFT
)) {
434 val
&= ~GPC2CLK_OUT_VCODIV_MASK
;
435 val
|= GPC2CLK_OUT_VCODIV1
<< GPC2CLK_OUT_VCODIV_SHIFT
;
437 nvkm_wr32(device
, GPC2CLK_OUT
, val
);
438 /* Intentional 2nd write to assure linear divider operation */
439 nvkm_wr32(device
, GPC2CLK_OUT
, val
);
440 nvkm_rd32(device
, GPC2CLK_OUT
);
443 /* slide up to new NDIV */
444 return allow_slide
? gk20a_pllg_slide(clk
, clk
->n
) : 0;
448 gk20a_pllg_program_mnp(struct gk20a_clk
*clk
)
452 err
= _gk20a_pllg_program_mnp(clk
, true);
454 err
= _gk20a_pllg_program_mnp(clk
, false);
459 #define GK20A_CLK_GPC_MDIV 1000
461 static struct nvkm_pstate
465 .domain
[nv_clk_src_gpc
] = 72000,
471 .domain
[nv_clk_src_gpc
] = 108000,
477 .domain
[nv_clk_src_gpc
] = 180000,
483 .domain
[nv_clk_src_gpc
] = 252000,
489 .domain
[nv_clk_src_gpc
] = 324000,
495 .domain
[nv_clk_src_gpc
] = 396000,
501 .domain
[nv_clk_src_gpc
] = 468000,
507 .domain
[nv_clk_src_gpc
] = 540000,
513 .domain
[nv_clk_src_gpc
] = 612000,
519 .domain
[nv_clk_src_gpc
] = 648000,
525 .domain
[nv_clk_src_gpc
] = 684000,
531 .domain
[nv_clk_src_gpc
] = 708000,
537 .domain
[nv_clk_src_gpc
] = 756000,
543 .domain
[nv_clk_src_gpc
] = 804000,
549 .domain
[nv_clk_src_gpc
] = 852000,
556 gk20a_clk_read(struct nvkm_clk
*base
, enum nv_clk_src src
)
558 struct gk20a_clk
*clk
= gk20a_clk(base
);
559 struct nvkm_subdev
*subdev
= &clk
->base
.subdev
;
560 struct nvkm_device
*device
= subdev
->device
;
563 case nv_clk_src_crystal
:
564 return device
->crystal
;
566 gk20a_pllg_read_mnp(clk
);
567 return gk20a_pllg_calc_rate(clk
) / GK20A_CLK_GPC_MDIV
;
569 nvkm_error(subdev
, "invalid clock source %d\n", src
);
575 gk20a_clk_calc(struct nvkm_clk
*base
, struct nvkm_cstate
*cstate
)
577 struct gk20a_clk
*clk
= gk20a_clk(base
);
579 return gk20a_pllg_calc_mnp(clk
, cstate
->domain
[nv_clk_src_gpc
] *
584 gk20a_clk_prog(struct nvkm_clk
*base
)
586 struct gk20a_clk
*clk
= gk20a_clk(base
);
588 return gk20a_pllg_program_mnp(clk
);
592 gk20a_clk_tidy(struct nvkm_clk
*base
)
597 gk20a_clk_fini(struct nvkm_clk
*base
)
599 struct nvkm_device
*device
= base
->subdev
.device
;
600 struct gk20a_clk
*clk
= gk20a_clk(base
);
603 /* slide to VCO min */
604 val
= nvkm_rd32(device
, GPCPLL_CFG
);
605 if (val
& GPCPLL_CFG_ENABLE
) {
608 coef
= nvkm_rd32(device
, GPCPLL_COEFF
);
609 m
= (coef
>> GPCPLL_COEFF_M_SHIFT
) & MASK(GPCPLL_COEFF_M_WIDTH
);
610 n_lo
= DIV_ROUND_UP(m
* clk
->params
->min_vco
,
611 clk
->parent_rate
/ KHZ
);
612 gk20a_pllg_slide(clk
, n_lo
);
615 /* put PLL in bypass before disabling it */
616 nvkm_mask(device
, SEL_VCO
, BIT(SEL_VCO_GPC2CLK_OUT_SHIFT
), 0);
618 gk20a_pllg_disable(clk
);
622 gk20a_clk_init(struct nvkm_clk
*base
)
624 struct gk20a_clk
*clk
= gk20a_clk(base
);
625 struct nvkm_subdev
*subdev
= &clk
->base
.subdev
;
626 struct nvkm_device
*device
= subdev
->device
;
629 nvkm_mask(device
, GPC2CLK_OUT
, GPC2CLK_OUT_INIT_MASK
, GPC2CLK_OUT_INIT_VAL
);
631 ret
= gk20a_clk_prog(&clk
->base
);
633 nvkm_error(subdev
, "cannot initialize clock\n");
640 static const struct nvkm_clk_func
642 .init
= gk20a_clk_init
,
643 .fini
= gk20a_clk_fini
,
644 .read
= gk20a_clk_read
,
645 .calc
= gk20a_clk_calc
,
646 .prog
= gk20a_clk_prog
,
647 .tidy
= gk20a_clk_tidy
,
648 .pstates
= gk20a_pstates
,
649 .nr_pstates
= ARRAY_SIZE(gk20a_pstates
),
651 { nv_clk_src_crystal
, 0xff },
652 { nv_clk_src_gpc
, 0xff, 0, "core", GK20A_CLK_GPC_MDIV
},
658 gk20a_clk_new(struct nvkm_device
*device
, int index
, struct nvkm_clk
**pclk
)
660 struct nvkm_device_tegra
*tdev
= device
->func
->tegra(device
);
661 struct gk20a_clk
*clk
;
664 if (!(clk
= kzalloc(sizeof(*clk
), GFP_KERNEL
)))
668 /* Finish initializing the pstates */
669 for (i
= 0; i
< ARRAY_SIZE(gk20a_pstates
); i
++) {
670 INIT_LIST_HEAD(&gk20a_pstates
[i
].list
);
671 gk20a_pstates
[i
].pstate
= i
+ 1;
674 clk
->params
= &gk20a_pllg_params
;
675 clk
->parent_rate
= clk_get_rate(tdev
->clk
);
677 ret
= nvkm_clk_ctor(&gk20a_clk
, device
, index
, true, &clk
->base
);
678 nvkm_info(&clk
->base
.subdev
, "parent clock rate: %d Khz\n",
679 clk
->parent_rate
/ KHZ
);