]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/gpu/drm/nouveau/nvc0_pm.c
UAPI: (Scripted) Convert #include "..." to #include <path/...> in drivers/gpu/
[mirror_ubuntu-bionic-kernel.git] / drivers / gpu / drm / nouveau / nvc0_pm.c
1 /*
2 * Copyright 2011 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25 #include <drm/drmP.h>
26 #include "nouveau_drv.h"
27 #include "nouveau_bios.h"
28 #include "nouveau_pm.h"
29
30 static u32 read_div(struct drm_device *, int, u32, u32);
31 static u32 read_pll(struct drm_device *, u32);
32
33 static u32
34 read_vco(struct drm_device *dev, u32 dsrc)
35 {
36 u32 ssrc = nv_rd32(dev, dsrc);
37 if (!(ssrc & 0x00000100))
38 return read_pll(dev, 0x00e800);
39 return read_pll(dev, 0x00e820);
40 }
41
42 static u32
43 read_pll(struct drm_device *dev, u32 pll)
44 {
45 u32 ctrl = nv_rd32(dev, pll + 0);
46 u32 coef = nv_rd32(dev, pll + 4);
47 u32 P = (coef & 0x003f0000) >> 16;
48 u32 N = (coef & 0x0000ff00) >> 8;
49 u32 M = (coef & 0x000000ff) >> 0;
50 u32 sclk, doff;
51
52 if (!(ctrl & 0x00000001))
53 return 0;
54
55 switch (pll & 0xfff000) {
56 case 0x00e000:
57 sclk = 27000;
58 P = 1;
59 break;
60 case 0x137000:
61 doff = (pll - 0x137000) / 0x20;
62 sclk = read_div(dev, doff, 0x137120, 0x137140);
63 break;
64 case 0x132000:
65 switch (pll) {
66 case 0x132000:
67 sclk = read_pll(dev, 0x132020);
68 break;
69 case 0x132020:
70 sclk = read_div(dev, 0, 0x137320, 0x137330);
71 break;
72 default:
73 return 0;
74 }
75 break;
76 default:
77 return 0;
78 }
79
80 return sclk * N / M / P;
81 }
82
83 static u32
84 read_div(struct drm_device *dev, int doff, u32 dsrc, u32 dctl)
85 {
86 u32 ssrc = nv_rd32(dev, dsrc + (doff * 4));
87 u32 sctl = nv_rd32(dev, dctl + (doff * 4));
88
89 switch (ssrc & 0x00000003) {
90 case 0:
91 if ((ssrc & 0x00030000) != 0x00030000)
92 return 27000;
93 return 108000;
94 case 2:
95 return 100000;
96 case 3:
97 if (sctl & 0x80000000) {
98 u32 sclk = read_vco(dev, dsrc + (doff * 4));
99 u32 sdiv = (sctl & 0x0000003f) + 2;
100 return (sclk * 2) / sdiv;
101 }
102
103 return read_vco(dev, dsrc + (doff * 4));
104 default:
105 return 0;
106 }
107 }
108
109 static u32
110 read_mem(struct drm_device *dev)
111 {
112 u32 ssel = nv_rd32(dev, 0x1373f0);
113 if (ssel & 0x00000001)
114 return read_div(dev, 0, 0x137300, 0x137310);
115 return read_pll(dev, 0x132000);
116 }
117
118 static u32
119 read_clk(struct drm_device *dev, int clk)
120 {
121 u32 sctl = nv_rd32(dev, 0x137250 + (clk * 4));
122 u32 ssel = nv_rd32(dev, 0x137100);
123 u32 sclk, sdiv;
124
125 if (ssel & (1 << clk)) {
126 if (clk < 7)
127 sclk = read_pll(dev, 0x137000 + (clk * 0x20));
128 else
129 sclk = read_pll(dev, 0x1370e0);
130 sdiv = ((sctl & 0x00003f00) >> 8) + 2;
131 } else {
132 sclk = read_div(dev, clk, 0x137160, 0x1371d0);
133 sdiv = ((sctl & 0x0000003f) >> 0) + 2;
134 }
135
136 if (sctl & 0x80000000)
137 return (sclk * 2) / sdiv;
138 return sclk;
139 }
140
141 int
142 nvc0_pm_clocks_get(struct drm_device *dev, struct nouveau_pm_level *perflvl)
143 {
144 perflvl->shader = read_clk(dev, 0x00);
145 perflvl->core = perflvl->shader / 2;
146 perflvl->memory = read_mem(dev);
147 perflvl->rop = read_clk(dev, 0x01);
148 perflvl->hub07 = read_clk(dev, 0x02);
149 perflvl->hub06 = read_clk(dev, 0x07);
150 perflvl->hub01 = read_clk(dev, 0x08);
151 perflvl->copy = read_clk(dev, 0x09);
152 perflvl->daemon = read_clk(dev, 0x0c);
153 perflvl->vdec = read_clk(dev, 0x0e);
154 return 0;
155 }
156
157 struct nvc0_pm_clock {
158 u32 freq;
159 u32 ssel;
160 u32 mdiv;
161 u32 dsrc;
162 u32 ddiv;
163 u32 coef;
164 };
165
166 struct nvc0_pm_state {
167 struct nouveau_pm_level *perflvl;
168 struct nvc0_pm_clock eng[16];
169 struct nvc0_pm_clock mem;
170 };
171
172 static u32
173 calc_div(struct drm_device *dev, int clk, u32 ref, u32 freq, u32 *ddiv)
174 {
175 u32 div = min((ref * 2) / freq, (u32)65);
176 if (div < 2)
177 div = 2;
178
179 *ddiv = div - 2;
180 return (ref * 2) / div;
181 }
182
183 static u32
184 calc_src(struct drm_device *dev, int clk, u32 freq, u32 *dsrc, u32 *ddiv)
185 {
186 u32 sclk;
187
188 /* use one of the fixed frequencies if possible */
189 *ddiv = 0x00000000;
190 switch (freq) {
191 case 27000:
192 case 108000:
193 *dsrc = 0x00000000;
194 if (freq == 108000)
195 *dsrc |= 0x00030000;
196 return freq;
197 case 100000:
198 *dsrc = 0x00000002;
199 return freq;
200 default:
201 *dsrc = 0x00000003;
202 break;
203 }
204
205 /* otherwise, calculate the closest divider */
206 sclk = read_vco(dev, clk);
207 if (clk < 7)
208 sclk = calc_div(dev, clk, sclk, freq, ddiv);
209 return sclk;
210 }
211
212 static u32
213 calc_pll(struct drm_device *dev, int clk, u32 freq, u32 *coef)
214 {
215 struct pll_lims limits;
216 int N, M, P, ret;
217
218 ret = get_pll_limits(dev, 0x137000 + (clk * 0x20), &limits);
219 if (ret)
220 return 0;
221
222 limits.refclk = read_div(dev, clk, 0x137120, 0x137140);
223 if (!limits.refclk)
224 return 0;
225
226 ret = nva3_calc_pll(dev, &limits, freq, &N, NULL, &M, &P);
227 if (ret <= 0)
228 return 0;
229
230 *coef = (P << 16) | (N << 8) | M;
231 return ret;
232 }
233
234 /* A (likely rather simplified and incomplete) view of the clock tree
235 *
236 * Key:
237 *
238 * S: source select
239 * D: divider
240 * P: pll
241 * F: switch
242 *
243 * Engine clocks:
244 *
245 * 137250(D) ---- 137100(F0) ---- 137160(S)/1371d0(D) ------------------- ref
246 * (F1) ---- 1370X0(P) ---- 137120(S)/137140(D) ---- ref
247 *
248 * Not all registers exist for all clocks. For example: clocks >= 8 don't
249 * have their own PLL (all tied to clock 7's PLL when in PLL mode), nor do
250 * they have the divider at 1371d0, though the source selection at 137160
251 * still exists. You must use the divider at 137250 for these instead.
252 *
253 * Memory clock:
254 *
255 * TBD, read_mem() above is likely very wrong...
256 *
257 */
258
259 static int
260 calc_clk(struct drm_device *dev, int clk, struct nvc0_pm_clock *info, u32 freq)
261 {
262 u32 src0, div0, div1D, div1P = 0;
263 u32 clk0, clk1 = 0;
264
265 /* invalid clock domain */
266 if (!freq)
267 return 0;
268
269 /* first possible path, using only dividers */
270 clk0 = calc_src(dev, clk, freq, &src0, &div0);
271 clk0 = calc_div(dev, clk, clk0, freq, &div1D);
272
273 /* see if we can get any closer using PLLs */
274 if (clk0 != freq && (0x00004387 & (1 << clk))) {
275 if (clk < 7)
276 clk1 = calc_pll(dev, clk, freq, &info->coef);
277 else
278 clk1 = read_pll(dev, 0x1370e0);
279 clk1 = calc_div(dev, clk, clk1, freq, &div1P);
280 }
281
282 /* select the method which gets closest to target freq */
283 if (abs((int)freq - clk0) <= abs((int)freq - clk1)) {
284 info->dsrc = src0;
285 if (div0) {
286 info->ddiv |= 0x80000000;
287 info->ddiv |= div0 << 8;
288 info->ddiv |= div0;
289 }
290 if (div1D) {
291 info->mdiv |= 0x80000000;
292 info->mdiv |= div1D;
293 }
294 info->ssel = 0;
295 info->freq = clk0;
296 } else {
297 if (div1P) {
298 info->mdiv |= 0x80000000;
299 info->mdiv |= div1P << 8;
300 }
301 info->ssel = (1 << clk);
302 info->freq = clk1;
303 }
304
305 return 0;
306 }
307
308 static int
309 calc_mem(struct drm_device *dev, struct nvc0_pm_clock *info, u32 freq)
310 {
311 struct pll_lims pll;
312 int N, M, P, ret;
313 u32 ctrl;
314
315 /* mclk pll input freq comes from another pll, make sure it's on */
316 ctrl = nv_rd32(dev, 0x132020);
317 if (!(ctrl & 0x00000001)) {
318 /* if not, program it to 567MHz. nfi where this value comes
319 * from - it looks like it's in the pll limits table for
320 * 132000 but the binary driver ignores all my attempts to
321 * change this value.
322 */
323 nv_wr32(dev, 0x137320, 0x00000103);
324 nv_wr32(dev, 0x137330, 0x81200606);
325 nv_wait(dev, 0x132020, 0x00010000, 0x00010000);
326 nv_wr32(dev, 0x132024, 0x0001150f);
327 nv_mask(dev, 0x132020, 0x00000001, 0x00000001);
328 nv_wait(dev, 0x137390, 0x00020000, 0x00020000);
329 nv_mask(dev, 0x132020, 0x00000004, 0x00000004);
330 }
331
332 /* for the moment, until the clock tree is better understood, use
333 * pll mode for all clock frequencies
334 */
335 ret = get_pll_limits(dev, 0x132000, &pll);
336 if (ret == 0) {
337 pll.refclk = read_pll(dev, 0x132020);
338 if (pll.refclk) {
339 ret = nva3_calc_pll(dev, &pll, freq, &N, NULL, &M, &P);
340 if (ret > 0) {
341 info->coef = (P << 16) | (N << 8) | M;
342 return 0;
343 }
344 }
345 }
346
347 return -EINVAL;
348 }
349
350 void *
351 nvc0_pm_clocks_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl)
352 {
353 struct drm_nouveau_private *dev_priv = dev->dev_private;
354 struct nvc0_pm_state *info;
355 int ret;
356
357 info = kzalloc(sizeof(*info), GFP_KERNEL);
358 if (!info)
359 return ERR_PTR(-ENOMEM);
360
361 /* NFI why this is still in the performance table, the ROPCs appear
362 * to get their clock from clock 2 ("hub07", actually hub05 on this
363 * chip, but, anyway...) as well. nvatiming confirms hub05 and ROP
364 * are always the same freq with the binary driver even when the
365 * performance table says they should differ.
366 */
367 if (dev_priv->chipset == 0xd9)
368 perflvl->rop = 0;
369
370 if ((ret = calc_clk(dev, 0x00, &info->eng[0x00], perflvl->shader)) ||
371 (ret = calc_clk(dev, 0x01, &info->eng[0x01], perflvl->rop)) ||
372 (ret = calc_clk(dev, 0x02, &info->eng[0x02], perflvl->hub07)) ||
373 (ret = calc_clk(dev, 0x07, &info->eng[0x07], perflvl->hub06)) ||
374 (ret = calc_clk(dev, 0x08, &info->eng[0x08], perflvl->hub01)) ||
375 (ret = calc_clk(dev, 0x09, &info->eng[0x09], perflvl->copy)) ||
376 (ret = calc_clk(dev, 0x0c, &info->eng[0x0c], perflvl->daemon)) ||
377 (ret = calc_clk(dev, 0x0e, &info->eng[0x0e], perflvl->vdec))) {
378 kfree(info);
379 return ERR_PTR(ret);
380 }
381
382 if (perflvl->memory) {
383 ret = calc_mem(dev, &info->mem, perflvl->memory);
384 if (ret) {
385 kfree(info);
386 return ERR_PTR(ret);
387 }
388 }
389
390 info->perflvl = perflvl;
391 return info;
392 }
393
394 static void
395 prog_clk(struct drm_device *dev, int clk, struct nvc0_pm_clock *info)
396 {
397 /* program dividers at 137160/1371d0 first */
398 if (clk < 7 && !info->ssel) {
399 nv_mask(dev, 0x1371d0 + (clk * 0x04), 0x80003f3f, info->ddiv);
400 nv_wr32(dev, 0x137160 + (clk * 0x04), info->dsrc);
401 }
402
403 /* switch clock to non-pll mode */
404 nv_mask(dev, 0x137100, (1 << clk), 0x00000000);
405 nv_wait(dev, 0x137100, (1 << clk), 0x00000000);
406
407 /* reprogram pll */
408 if (clk < 7) {
409 /* make sure it's disabled first... */
410 u32 base = 0x137000 + (clk * 0x20);
411 u32 ctrl = nv_rd32(dev, base + 0x00);
412 if (ctrl & 0x00000001) {
413 nv_mask(dev, base + 0x00, 0x00000004, 0x00000000);
414 nv_mask(dev, base + 0x00, 0x00000001, 0x00000000);
415 }
416 /* program it to new values, if necessary */
417 if (info->ssel) {
418 nv_wr32(dev, base + 0x04, info->coef);
419 nv_mask(dev, base + 0x00, 0x00000001, 0x00000001);
420 nv_wait(dev, base + 0x00, 0x00020000, 0x00020000);
421 nv_mask(dev, base + 0x00, 0x00020004, 0x00000004);
422 }
423 }
424
425 /* select pll/non-pll mode, and program final clock divider */
426 nv_mask(dev, 0x137100, (1 << clk), info->ssel);
427 nv_wait(dev, 0x137100, (1 << clk), info->ssel);
428 nv_mask(dev, 0x137250 + (clk * 0x04), 0x00003f3f, info->mdiv);
429 }
430
431 static void
432 mclk_precharge(struct nouveau_mem_exec_func *exec)
433 {
434 }
435
436 static void
437 mclk_refresh(struct nouveau_mem_exec_func *exec)
438 {
439 }
440
441 static void
442 mclk_refresh_auto(struct nouveau_mem_exec_func *exec, bool enable)
443 {
444 nv_wr32(exec->dev, 0x10f210, enable ? 0x80000000 : 0x00000000);
445 }
446
447 static void
448 mclk_refresh_self(struct nouveau_mem_exec_func *exec, bool enable)
449 {
450 }
451
452 static void
453 mclk_wait(struct nouveau_mem_exec_func *exec, u32 nsec)
454 {
455 udelay((nsec + 500) / 1000);
456 }
457
458 static u32
459 mclk_mrg(struct nouveau_mem_exec_func *exec, int mr)
460 {
461 struct drm_device *dev = exec->dev;
462 struct drm_nouveau_private *dev_priv = dev->dev_private;
463 if (dev_priv->vram_type != NV_MEM_TYPE_GDDR5) {
464 if (mr <= 1)
465 return nv_rd32(dev, 0x10f300 + ((mr - 0) * 4));
466 return nv_rd32(dev, 0x10f320 + ((mr - 2) * 4));
467 } else {
468 if (mr == 0)
469 return nv_rd32(dev, 0x10f300 + (mr * 4));
470 else
471 if (mr <= 7)
472 return nv_rd32(dev, 0x10f32c + (mr * 4));
473 return nv_rd32(dev, 0x10f34c);
474 }
475 }
476
477 static void
478 mclk_mrs(struct nouveau_mem_exec_func *exec, int mr, u32 data)
479 {
480 struct drm_device *dev = exec->dev;
481 struct drm_nouveau_private *dev_priv = dev->dev_private;
482 if (dev_priv->vram_type != NV_MEM_TYPE_GDDR5) {
483 if (mr <= 1) {
484 nv_wr32(dev, 0x10f300 + ((mr - 0) * 4), data);
485 if (dev_priv->vram_rank_B)
486 nv_wr32(dev, 0x10f308 + ((mr - 0) * 4), data);
487 } else
488 if (mr <= 3) {
489 nv_wr32(dev, 0x10f320 + ((mr - 2) * 4), data);
490 if (dev_priv->vram_rank_B)
491 nv_wr32(dev, 0x10f328 + ((mr - 2) * 4), data);
492 }
493 } else {
494 if (mr == 0) nv_wr32(dev, 0x10f300 + (mr * 4), data);
495 else if (mr <= 7) nv_wr32(dev, 0x10f32c + (mr * 4), data);
496 else if (mr == 15) nv_wr32(dev, 0x10f34c, data);
497 }
498 }
499
500 static void
501 mclk_clock_set(struct nouveau_mem_exec_func *exec)
502 {
503 struct nvc0_pm_state *info = exec->priv;
504 struct drm_device *dev = exec->dev;
505 u32 ctrl = nv_rd32(dev, 0x132000);
506
507 nv_wr32(dev, 0x137360, 0x00000001);
508 nv_wr32(dev, 0x137370, 0x00000000);
509 nv_wr32(dev, 0x137380, 0x00000000);
510 if (ctrl & 0x00000001)
511 nv_wr32(dev, 0x132000, (ctrl &= ~0x00000001));
512
513 nv_wr32(dev, 0x132004, info->mem.coef);
514 nv_wr32(dev, 0x132000, (ctrl |= 0x00000001));
515 nv_wait(dev, 0x137390, 0x00000002, 0x00000002);
516 nv_wr32(dev, 0x132018, 0x00005000);
517
518 nv_wr32(dev, 0x137370, 0x00000001);
519 nv_wr32(dev, 0x137380, 0x00000001);
520 nv_wr32(dev, 0x137360, 0x00000000);
521 }
522
523 static void
524 mclk_timing_set(struct nouveau_mem_exec_func *exec)
525 {
526 struct nvc0_pm_state *info = exec->priv;
527 struct nouveau_pm_level *perflvl = info->perflvl;
528 int i;
529
530 for (i = 0; i < 5; i++)
531 nv_wr32(exec->dev, 0x10f290 + (i * 4), perflvl->timing.reg[i]);
532 }
533
534 static void
535 prog_mem(struct drm_device *dev, struct nvc0_pm_state *info)
536 {
537 struct drm_nouveau_private *dev_priv = dev->dev_private;
538 struct nouveau_mem_exec_func exec = {
539 .dev = dev,
540 .precharge = mclk_precharge,
541 .refresh = mclk_refresh,
542 .refresh_auto = mclk_refresh_auto,
543 .refresh_self = mclk_refresh_self,
544 .wait = mclk_wait,
545 .mrg = mclk_mrg,
546 .mrs = mclk_mrs,
547 .clock_set = mclk_clock_set,
548 .timing_set = mclk_timing_set,
549 .priv = info
550 };
551
552 if (dev_priv->chipset < 0xd0)
553 nv_wr32(dev, 0x611200, 0x00003300);
554 else
555 nv_wr32(dev, 0x62c000, 0x03030000);
556
557 nouveau_mem_exec(&exec, info->perflvl);
558
559 if (dev_priv->chipset < 0xd0)
560 nv_wr32(dev, 0x611200, 0x00003330);
561 else
562 nv_wr32(dev, 0x62c000, 0x03030300);
563 }
564 int
565 nvc0_pm_clocks_set(struct drm_device *dev, void *data)
566 {
567 struct nvc0_pm_state *info = data;
568 int i;
569
570 if (info->mem.coef)
571 prog_mem(dev, info);
572
573 for (i = 0; i < 16; i++) {
574 if (!info->eng[i].freq)
575 continue;
576 prog_clk(dev, i, &info->eng[i]);
577 }
578
579 kfree(info);
580 return 0;
581 }