]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c
9ee24ec2869b654cd0d90de5424c67e42ea40912
[mirror_ubuntu-zesty-kernel.git] / drivers / gpu / drm / nouveau / nvkm / subdev / clk / base.c
1 /*
2 * Copyright 2013 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24 #include "priv.h"
25
26 #include <subdev/bios.h>
27 #include <subdev/bios/boost.h>
28 #include <subdev/bios/cstep.h>
29 #include <subdev/bios/perf.h>
30 #include <subdev/fb.h>
31 #include <subdev/therm.h>
32 #include <subdev/volt.h>
33
34 #include <core/option.h>
35
36 /******************************************************************************
37 * misc
38 *****************************************************************************/
39 static u32
40 nvkm_clk_adjust(struct nvkm_clk *clk, bool adjust,
41 u8 pstate, u8 domain, u32 input)
42 {
43 struct nvkm_bios *bios = clk->subdev.device->bios;
44 struct nvbios_boostE boostE;
45 u8 ver, hdr, cnt, len;
46 u16 data;
47
48 data = nvbios_boostEm(bios, pstate, &ver, &hdr, &cnt, &len, &boostE);
49 if (data) {
50 struct nvbios_boostS boostS;
51 u8 idx = 0, sver, shdr;
52 u16 subd;
53
54 input = max(boostE.min, input);
55 input = min(boostE.max, input);
56 do {
57 sver = ver;
58 shdr = hdr;
59 subd = nvbios_boostSp(bios, idx++, data, &sver, &shdr,
60 cnt, len, &boostS);
61 if (subd && boostS.domain == domain) {
62 if (adjust)
63 input = input * boostS.percent / 100;
64 input = max(boostS.min, input);
65 input = min(boostS.max, input);
66 break;
67 }
68 } while (subd);
69 }
70
71 return input;
72 }
73
74 /******************************************************************************
75 * C-States
76 *****************************************************************************/
77 static int
78 nvkm_cstate_prog(struct nvkm_clk *clk, struct nvkm_pstate *pstate, int cstatei)
79 {
80 struct nvkm_subdev *subdev = &clk->subdev;
81 struct nvkm_device *device = subdev->device;
82 struct nvkm_therm *therm = device->therm;
83 struct nvkm_volt *volt = device->volt;
84 struct nvkm_cstate *cstate;
85 int ret;
86
87 if (!list_empty(&pstate->list)) {
88 cstate = list_entry(pstate->list.prev, typeof(*cstate), head);
89 } else {
90 cstate = &pstate->base;
91 }
92
93 if (therm) {
94 ret = nvkm_therm_cstate(therm, pstate->fanspeed, +1);
95 if (ret && ret != -ENODEV) {
96 nvkm_error(subdev, "failed to raise fan speed: %d\n", ret);
97 return ret;
98 }
99 }
100
101 if (volt) {
102 ret = nvkm_volt_set_id(volt, cstate->voltage,
103 pstate->base.voltage, +1);
104 if (ret && ret != -ENODEV) {
105 nvkm_error(subdev, "failed to raise voltage: %d\n", ret);
106 return ret;
107 }
108 }
109
110 ret = clk->func->calc(clk, cstate);
111 if (ret == 0) {
112 ret = clk->func->prog(clk);
113 clk->func->tidy(clk);
114 }
115
116 if (volt) {
117 ret = nvkm_volt_set_id(volt, cstate->voltage,
118 pstate->base.voltage, -1);
119 if (ret && ret != -ENODEV)
120 nvkm_error(subdev, "failed to lower voltage: %d\n", ret);
121 }
122
123 if (therm) {
124 ret = nvkm_therm_cstate(therm, pstate->fanspeed, -1);
125 if (ret && ret != -ENODEV)
126 nvkm_error(subdev, "failed to lower fan speed: %d\n", ret);
127 }
128
129 return ret;
130 }
131
132 static void
133 nvkm_cstate_del(struct nvkm_cstate *cstate)
134 {
135 list_del(&cstate->head);
136 kfree(cstate);
137 }
138
139 static int
140 nvkm_cstate_new(struct nvkm_clk *clk, int idx, struct nvkm_pstate *pstate)
141 {
142 struct nvkm_bios *bios = clk->subdev.device->bios;
143 struct nvkm_volt *volt = clk->subdev.device->volt;
144 const struct nvkm_domain *domain = clk->domains;
145 struct nvkm_cstate *cstate = NULL;
146 struct nvbios_cstepX cstepX;
147 u8 ver, hdr;
148 u16 data;
149
150 data = nvbios_cstepXp(bios, idx, &ver, &hdr, &cstepX);
151 if (!data)
152 return -ENOENT;
153
154 if (volt && nvkm_volt_map_min(volt, cstepX.voltage) > volt->max_uv)
155 return -EINVAL;
156
157 cstate = kzalloc(sizeof(*cstate), GFP_KERNEL);
158 if (!cstate)
159 return -ENOMEM;
160
161 *cstate = pstate->base;
162 cstate->voltage = cstepX.voltage;
163
164 while (domain && domain->name != nv_clk_src_max) {
165 if (domain->flags & NVKM_CLK_DOM_FLAG_CORE) {
166 u32 freq = nvkm_clk_adjust(clk, true, pstate->pstate,
167 domain->bios, cstepX.freq);
168 cstate->domain[domain->name] = freq;
169 }
170 domain++;
171 }
172
173 list_add(&cstate->head, &pstate->list);
174 return 0;
175 }
176
177 /******************************************************************************
178 * P-States
179 *****************************************************************************/
180 static int
181 nvkm_pstate_prog(struct nvkm_clk *clk, int pstatei)
182 {
183 struct nvkm_subdev *subdev = &clk->subdev;
184 struct nvkm_fb *fb = subdev->device->fb;
185 struct nvkm_pci *pci = subdev->device->pci;
186 struct nvkm_pstate *pstate;
187 int ret, idx = 0;
188
189 list_for_each_entry(pstate, &clk->states, head) {
190 if (idx++ == pstatei)
191 break;
192 }
193
194 nvkm_debug(subdev, "setting performance state %d\n", pstatei);
195 clk->pstate = pstatei;
196
197 nvkm_pcie_set_link(pci, pstate->pcie_speed, pstate->pcie_width);
198
199 if (fb && fb->ram && fb->ram->func->calc) {
200 struct nvkm_ram *ram = fb->ram;
201 int khz = pstate->base.domain[nv_clk_src_mem];
202 do {
203 ret = ram->func->calc(ram, khz);
204 if (ret == 0)
205 ret = ram->func->prog(ram);
206 } while (ret > 0);
207 ram->func->tidy(ram);
208 }
209
210 return nvkm_cstate_prog(clk, pstate, 0);
211 }
212
213 static void
214 nvkm_pstate_work(struct work_struct *work)
215 {
216 struct nvkm_clk *clk = container_of(work, typeof(*clk), work);
217 struct nvkm_subdev *subdev = &clk->subdev;
218 int pstate;
219
220 if (!atomic_xchg(&clk->waiting, 0))
221 return;
222 clk->pwrsrc = power_supply_is_system_supplied();
223
224 nvkm_trace(subdev, "P %d PWR %d U(AC) %d U(DC) %d A %d T %d D %d\n",
225 clk->pstate, clk->pwrsrc, clk->ustate_ac, clk->ustate_dc,
226 clk->astate, clk->tstate, clk->dstate);
227
228 pstate = clk->pwrsrc ? clk->ustate_ac : clk->ustate_dc;
229 if (clk->state_nr && pstate != -1) {
230 pstate = (pstate < 0) ? clk->astate : pstate;
231 pstate = min(pstate, clk->state_nr - 1 + clk->tstate);
232 pstate = max(pstate, clk->dstate);
233 } else {
234 pstate = clk->pstate = -1;
235 }
236
237 nvkm_trace(subdev, "-> %d\n", pstate);
238 if (pstate != clk->pstate) {
239 int ret = nvkm_pstate_prog(clk, pstate);
240 if (ret) {
241 nvkm_error(subdev, "error setting pstate %d: %d\n",
242 pstate, ret);
243 }
244 }
245
246 wake_up_all(&clk->wait);
247 nvkm_notify_get(&clk->pwrsrc_ntfy);
248 }
249
250 static int
251 nvkm_pstate_calc(struct nvkm_clk *clk, bool wait)
252 {
253 atomic_set(&clk->waiting, 1);
254 schedule_work(&clk->work);
255 if (wait)
256 wait_event(clk->wait, !atomic_read(&clk->waiting));
257 return 0;
258 }
259
260 static void
261 nvkm_pstate_info(struct nvkm_clk *clk, struct nvkm_pstate *pstate)
262 {
263 const struct nvkm_domain *clock = clk->domains - 1;
264 struct nvkm_cstate *cstate;
265 struct nvkm_subdev *subdev = &clk->subdev;
266 char info[3][32] = { "", "", "" };
267 char name[4] = "--";
268 int i = -1;
269
270 if (pstate->pstate != 0xff)
271 snprintf(name, sizeof(name), "%02x", pstate->pstate);
272
273 while ((++clock)->name != nv_clk_src_max) {
274 u32 lo = pstate->base.domain[clock->name];
275 u32 hi = lo;
276 if (hi == 0)
277 continue;
278
279 nvkm_debug(subdev, "%02x: %10d KHz\n", clock->name, lo);
280 list_for_each_entry(cstate, &pstate->list, head) {
281 u32 freq = cstate->domain[clock->name];
282 lo = min(lo, freq);
283 hi = max(hi, freq);
284 nvkm_debug(subdev, "%10d KHz\n", freq);
285 }
286
287 if (clock->mname && ++i < ARRAY_SIZE(info)) {
288 lo /= clock->mdiv;
289 hi /= clock->mdiv;
290 if (lo == hi) {
291 snprintf(info[i], sizeof(info[i]), "%s %d MHz",
292 clock->mname, lo);
293 } else {
294 snprintf(info[i], sizeof(info[i]),
295 "%s %d-%d MHz", clock->mname, lo, hi);
296 }
297 }
298 }
299
300 nvkm_debug(subdev, "%s: %s %s %s\n", name, info[0], info[1], info[2]);
301 }
302
303 static void
304 nvkm_pstate_del(struct nvkm_pstate *pstate)
305 {
306 struct nvkm_cstate *cstate, *temp;
307
308 list_for_each_entry_safe(cstate, temp, &pstate->list, head) {
309 nvkm_cstate_del(cstate);
310 }
311
312 list_del(&pstate->head);
313 kfree(pstate);
314 }
315
316 static int
317 nvkm_pstate_new(struct nvkm_clk *clk, int idx)
318 {
319 struct nvkm_bios *bios = clk->subdev.device->bios;
320 const struct nvkm_domain *domain = clk->domains - 1;
321 struct nvkm_pstate *pstate;
322 struct nvkm_cstate *cstate;
323 struct nvbios_cstepE cstepE;
324 struct nvbios_perfE perfE;
325 u8 ver, hdr, cnt, len;
326 u16 data;
327
328 data = nvbios_perfEp(bios, idx, &ver, &hdr, &cnt, &len, &perfE);
329 if (!data)
330 return -EINVAL;
331 if (perfE.pstate == 0xff)
332 return 0;
333
334 pstate = kzalloc(sizeof(*pstate), GFP_KERNEL);
335 cstate = &pstate->base;
336 if (!pstate)
337 return -ENOMEM;
338
339 INIT_LIST_HEAD(&pstate->list);
340
341 pstate->pstate = perfE.pstate;
342 pstate->fanspeed = perfE.fanspeed;
343 pstate->pcie_speed = perfE.pcie_speed;
344 pstate->pcie_width = perfE.pcie_width;
345 cstate->voltage = perfE.voltage;
346 cstate->domain[nv_clk_src_core] = perfE.core;
347 cstate->domain[nv_clk_src_shader] = perfE.shader;
348 cstate->domain[nv_clk_src_mem] = perfE.memory;
349 cstate->domain[nv_clk_src_vdec] = perfE.vdec;
350 cstate->domain[nv_clk_src_dom6] = perfE.disp;
351
352 while (ver >= 0x40 && (++domain)->name != nv_clk_src_max) {
353 struct nvbios_perfS perfS;
354 u8 sver = ver, shdr = hdr;
355 u32 perfSe = nvbios_perfSp(bios, data, domain->bios,
356 &sver, &shdr, cnt, len, &perfS);
357 if (perfSe == 0 || sver != 0x40)
358 continue;
359
360 if (domain->flags & NVKM_CLK_DOM_FLAG_CORE) {
361 perfS.v40.freq = nvkm_clk_adjust(clk, false,
362 pstate->pstate,
363 domain->bios,
364 perfS.v40.freq);
365 }
366
367 cstate->domain[domain->name] = perfS.v40.freq;
368 }
369
370 data = nvbios_cstepEm(bios, pstate->pstate, &ver, &hdr, &cstepE);
371 if (data) {
372 int idx = cstepE.index;
373 do {
374 nvkm_cstate_new(clk, idx, pstate);
375 } while(idx--);
376 }
377
378 nvkm_pstate_info(clk, pstate);
379 list_add_tail(&pstate->head, &clk->states);
380 clk->state_nr++;
381 return 0;
382 }
383
384 /******************************************************************************
385 * Adjustment triggers
386 *****************************************************************************/
387 static int
388 nvkm_clk_ustate_update(struct nvkm_clk *clk, int req)
389 {
390 struct nvkm_pstate *pstate;
391 int i = 0;
392
393 if (!clk->allow_reclock)
394 return -ENOSYS;
395
396 if (req != -1 && req != -2) {
397 list_for_each_entry(pstate, &clk->states, head) {
398 if (pstate->pstate == req)
399 break;
400 i++;
401 }
402
403 if (pstate->pstate != req)
404 return -EINVAL;
405 req = i;
406 }
407
408 return req + 2;
409 }
410
411 static int
412 nvkm_clk_nstate(struct nvkm_clk *clk, const char *mode, int arglen)
413 {
414 int ret = 1;
415
416 if (clk->allow_reclock && !strncasecmpz(mode, "auto", arglen))
417 return -2;
418
419 if (strncasecmpz(mode, "disabled", arglen)) {
420 char save = mode[arglen];
421 long v;
422
423 ((char *)mode)[arglen] = '\0';
424 if (!kstrtol(mode, 0, &v)) {
425 ret = nvkm_clk_ustate_update(clk, v);
426 if (ret < 0)
427 ret = 1;
428 }
429 ((char *)mode)[arglen] = save;
430 }
431
432 return ret - 2;
433 }
434
435 int
436 nvkm_clk_ustate(struct nvkm_clk *clk, int req, int pwr)
437 {
438 int ret = nvkm_clk_ustate_update(clk, req);
439 if (ret >= 0) {
440 if (ret -= 2, pwr) clk->ustate_ac = ret;
441 else clk->ustate_dc = ret;
442 return nvkm_pstate_calc(clk, true);
443 }
444 return ret;
445 }
446
447 int
448 nvkm_clk_astate(struct nvkm_clk *clk, int req, int rel, bool wait)
449 {
450 if (!rel) clk->astate = req;
451 if ( rel) clk->astate += rel;
452 clk->astate = min(clk->astate, clk->state_nr - 1);
453 clk->astate = max(clk->astate, 0);
454 return nvkm_pstate_calc(clk, wait);
455 }
456
457 int
458 nvkm_clk_tstate(struct nvkm_clk *clk, int req, int rel)
459 {
460 if (!rel) clk->tstate = req;
461 if ( rel) clk->tstate += rel;
462 clk->tstate = min(clk->tstate, 0);
463 clk->tstate = max(clk->tstate, -(clk->state_nr - 1));
464 return nvkm_pstate_calc(clk, true);
465 }
466
467 int
468 nvkm_clk_dstate(struct nvkm_clk *clk, int req, int rel)
469 {
470 if (!rel) clk->dstate = req;
471 if ( rel) clk->dstate += rel;
472 clk->dstate = min(clk->dstate, clk->state_nr - 1);
473 clk->dstate = max(clk->dstate, 0);
474 return nvkm_pstate_calc(clk, true);
475 }
476
477 static int
478 nvkm_clk_pwrsrc(struct nvkm_notify *notify)
479 {
480 struct nvkm_clk *clk =
481 container_of(notify, typeof(*clk), pwrsrc_ntfy);
482 nvkm_pstate_calc(clk, false);
483 return NVKM_NOTIFY_DROP;
484 }
485
486 /******************************************************************************
487 * subdev base class implementation
488 *****************************************************************************/
489
490 int
491 nvkm_clk_read(struct nvkm_clk *clk, enum nv_clk_src src)
492 {
493 return clk->func->read(clk, src);
494 }
495
496 static int
497 nvkm_clk_fini(struct nvkm_subdev *subdev, bool suspend)
498 {
499 struct nvkm_clk *clk = nvkm_clk(subdev);
500 nvkm_notify_put(&clk->pwrsrc_ntfy);
501 flush_work(&clk->work);
502 if (clk->func->fini)
503 clk->func->fini(clk);
504 return 0;
505 }
506
507 static int
508 nvkm_clk_init(struct nvkm_subdev *subdev)
509 {
510 struct nvkm_clk *clk = nvkm_clk(subdev);
511 const struct nvkm_domain *clock = clk->domains;
512 int ret;
513
514 memset(&clk->bstate, 0x00, sizeof(clk->bstate));
515 INIT_LIST_HEAD(&clk->bstate.list);
516 clk->bstate.pstate = 0xff;
517
518 while (clock->name != nv_clk_src_max) {
519 ret = nvkm_clk_read(clk, clock->name);
520 if (ret < 0) {
521 nvkm_error(subdev, "%02x freq unknown\n", clock->name);
522 return ret;
523 }
524 clk->bstate.base.domain[clock->name] = ret;
525 clock++;
526 }
527
528 nvkm_pstate_info(clk, &clk->bstate);
529
530 if (clk->func->init)
531 return clk->func->init(clk);
532
533 clk->astate = clk->state_nr - 1;
534 clk->tstate = 0;
535 clk->dstate = 0;
536 clk->pstate = -1;
537 nvkm_pstate_calc(clk, true);
538 return 0;
539 }
540
541 static void *
542 nvkm_clk_dtor(struct nvkm_subdev *subdev)
543 {
544 struct nvkm_clk *clk = nvkm_clk(subdev);
545 struct nvkm_pstate *pstate, *temp;
546
547 nvkm_notify_fini(&clk->pwrsrc_ntfy);
548
549 /* Early return if the pstates have been provided statically */
550 if (clk->func->pstates)
551 return clk;
552
553 list_for_each_entry_safe(pstate, temp, &clk->states, head) {
554 nvkm_pstate_del(pstate);
555 }
556
557 return clk;
558 }
559
560 static const struct nvkm_subdev_func
561 nvkm_clk = {
562 .dtor = nvkm_clk_dtor,
563 .init = nvkm_clk_init,
564 .fini = nvkm_clk_fini,
565 };
566
567 int
568 nvkm_clk_ctor(const struct nvkm_clk_func *func, struct nvkm_device *device,
569 int index, bool allow_reclock, struct nvkm_clk *clk)
570 {
571 int ret, idx, arglen;
572 const char *mode;
573
574 nvkm_subdev_ctor(&nvkm_clk, device, index, &clk->subdev);
575 clk->func = func;
576 INIT_LIST_HEAD(&clk->states);
577 clk->domains = func->domains;
578 clk->ustate_ac = -1;
579 clk->ustate_dc = -1;
580 clk->allow_reclock = allow_reclock;
581
582 INIT_WORK(&clk->work, nvkm_pstate_work);
583 init_waitqueue_head(&clk->wait);
584 atomic_set(&clk->waiting, 0);
585
586 /* If no pstates are provided, try and fetch them from the BIOS */
587 if (!func->pstates) {
588 idx = 0;
589 do {
590 ret = nvkm_pstate_new(clk, idx++);
591 } while (ret == 0);
592 } else {
593 for (idx = 0; idx < func->nr_pstates; idx++)
594 list_add_tail(&func->pstates[idx].head, &clk->states);
595 clk->state_nr = func->nr_pstates;
596 }
597
598 ret = nvkm_notify_init(NULL, &device->event, nvkm_clk_pwrsrc, true,
599 NULL, 0, 0, &clk->pwrsrc_ntfy);
600 if (ret)
601 return ret;
602
603 mode = nvkm_stropt(device->cfgopt, "NvClkMode", &arglen);
604 if (mode) {
605 clk->ustate_ac = nvkm_clk_nstate(clk, mode, arglen);
606 clk->ustate_dc = nvkm_clk_nstate(clk, mode, arglen);
607 }
608
609 mode = nvkm_stropt(device->cfgopt, "NvClkModeAC", &arglen);
610 if (mode)
611 clk->ustate_ac = nvkm_clk_nstate(clk, mode, arglen);
612
613 mode = nvkm_stropt(device->cfgopt, "NvClkModeDC", &arglen);
614 if (mode)
615 clk->ustate_dc = nvkm_clk_nstate(clk, mode, arglen);
616
617 return 0;
618 }
619
620 int
621 nvkm_clk_new_(const struct nvkm_clk_func *func, struct nvkm_device *device,
622 int index, bool allow_reclock, struct nvkm_clk **pclk)
623 {
624 if (!(*pclk = kzalloc(sizeof(**pclk), GFP_KERNEL)))
625 return -ENOMEM;
626 return nvkm_clk_ctor(func, device, index, allow_reclock, *pclk);
627 }