]>
git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c
9ee24ec2869b654cd0d90de5424c67e42ea40912
2 * Copyright 2013 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
26 #include <subdev/bios.h>
27 #include <subdev/bios/boost.h>
28 #include <subdev/bios/cstep.h>
29 #include <subdev/bios/perf.h>
30 #include <subdev/fb.h>
31 #include <subdev/therm.h>
32 #include <subdev/volt.h>
34 #include <core/option.h>
36 /******************************************************************************
38 *****************************************************************************/
40 nvkm_clk_adjust(struct nvkm_clk
*clk
, bool adjust
,
41 u8 pstate
, u8 domain
, u32 input
)
43 struct nvkm_bios
*bios
= clk
->subdev
.device
->bios
;
44 struct nvbios_boostE boostE
;
45 u8 ver
, hdr
, cnt
, len
;
48 data
= nvbios_boostEm(bios
, pstate
, &ver
, &hdr
, &cnt
, &len
, &boostE
);
50 struct nvbios_boostS boostS
;
51 u8 idx
= 0, sver
, shdr
;
54 input
= max(boostE
.min
, input
);
55 input
= min(boostE
.max
, input
);
59 subd
= nvbios_boostSp(bios
, idx
++, data
, &sver
, &shdr
,
61 if (subd
&& boostS
.domain
== domain
) {
63 input
= input
* boostS
.percent
/ 100;
64 input
= max(boostS
.min
, input
);
65 input
= min(boostS
.max
, input
);
74 /******************************************************************************
76 *****************************************************************************/
78 nvkm_cstate_prog(struct nvkm_clk
*clk
, struct nvkm_pstate
*pstate
, int cstatei
)
80 struct nvkm_subdev
*subdev
= &clk
->subdev
;
81 struct nvkm_device
*device
= subdev
->device
;
82 struct nvkm_therm
*therm
= device
->therm
;
83 struct nvkm_volt
*volt
= device
->volt
;
84 struct nvkm_cstate
*cstate
;
87 if (!list_empty(&pstate
->list
)) {
88 cstate
= list_entry(pstate
->list
.prev
, typeof(*cstate
), head
);
90 cstate
= &pstate
->base
;
94 ret
= nvkm_therm_cstate(therm
, pstate
->fanspeed
, +1);
95 if (ret
&& ret
!= -ENODEV
) {
96 nvkm_error(subdev
, "failed to raise fan speed: %d\n", ret
);
102 ret
= nvkm_volt_set_id(volt
, cstate
->voltage
,
103 pstate
->base
.voltage
, +1);
104 if (ret
&& ret
!= -ENODEV
) {
105 nvkm_error(subdev
, "failed to raise voltage: %d\n", ret
);
110 ret
= clk
->func
->calc(clk
, cstate
);
112 ret
= clk
->func
->prog(clk
);
113 clk
->func
->tidy(clk
);
117 ret
= nvkm_volt_set_id(volt
, cstate
->voltage
,
118 pstate
->base
.voltage
, -1);
119 if (ret
&& ret
!= -ENODEV
)
120 nvkm_error(subdev
, "failed to lower voltage: %d\n", ret
);
124 ret
= nvkm_therm_cstate(therm
, pstate
->fanspeed
, -1);
125 if (ret
&& ret
!= -ENODEV
)
126 nvkm_error(subdev
, "failed to lower fan speed: %d\n", ret
);
133 nvkm_cstate_del(struct nvkm_cstate
*cstate
)
135 list_del(&cstate
->head
);
140 nvkm_cstate_new(struct nvkm_clk
*clk
, int idx
, struct nvkm_pstate
*pstate
)
142 struct nvkm_bios
*bios
= clk
->subdev
.device
->bios
;
143 struct nvkm_volt
*volt
= clk
->subdev
.device
->volt
;
144 const struct nvkm_domain
*domain
= clk
->domains
;
145 struct nvkm_cstate
*cstate
= NULL
;
146 struct nvbios_cstepX cstepX
;
150 data
= nvbios_cstepXp(bios
, idx
, &ver
, &hdr
, &cstepX
);
154 if (volt
&& nvkm_volt_map_min(volt
, cstepX
.voltage
) > volt
->max_uv
)
157 cstate
= kzalloc(sizeof(*cstate
), GFP_KERNEL
);
161 *cstate
= pstate
->base
;
162 cstate
->voltage
= cstepX
.voltage
;
164 while (domain
&& domain
->name
!= nv_clk_src_max
) {
165 if (domain
->flags
& NVKM_CLK_DOM_FLAG_CORE
) {
166 u32 freq
= nvkm_clk_adjust(clk
, true, pstate
->pstate
,
167 domain
->bios
, cstepX
.freq
);
168 cstate
->domain
[domain
->name
] = freq
;
173 list_add(&cstate
->head
, &pstate
->list
);
177 /******************************************************************************
179 *****************************************************************************/
181 nvkm_pstate_prog(struct nvkm_clk
*clk
, int pstatei
)
183 struct nvkm_subdev
*subdev
= &clk
->subdev
;
184 struct nvkm_fb
*fb
= subdev
->device
->fb
;
185 struct nvkm_pci
*pci
= subdev
->device
->pci
;
186 struct nvkm_pstate
*pstate
;
189 list_for_each_entry(pstate
, &clk
->states
, head
) {
190 if (idx
++ == pstatei
)
194 nvkm_debug(subdev
, "setting performance state %d\n", pstatei
);
195 clk
->pstate
= pstatei
;
197 nvkm_pcie_set_link(pci
, pstate
->pcie_speed
, pstate
->pcie_width
);
199 if (fb
&& fb
->ram
&& fb
->ram
->func
->calc
) {
200 struct nvkm_ram
*ram
= fb
->ram
;
201 int khz
= pstate
->base
.domain
[nv_clk_src_mem
];
203 ret
= ram
->func
->calc(ram
, khz
);
205 ret
= ram
->func
->prog(ram
);
207 ram
->func
->tidy(ram
);
210 return nvkm_cstate_prog(clk
, pstate
, 0);
214 nvkm_pstate_work(struct work_struct
*work
)
216 struct nvkm_clk
*clk
= container_of(work
, typeof(*clk
), work
);
217 struct nvkm_subdev
*subdev
= &clk
->subdev
;
220 if (!atomic_xchg(&clk
->waiting
, 0))
222 clk
->pwrsrc
= power_supply_is_system_supplied();
224 nvkm_trace(subdev
, "P %d PWR %d U(AC) %d U(DC) %d A %d T %d D %d\n",
225 clk
->pstate
, clk
->pwrsrc
, clk
->ustate_ac
, clk
->ustate_dc
,
226 clk
->astate
, clk
->tstate
, clk
->dstate
);
228 pstate
= clk
->pwrsrc
? clk
->ustate_ac
: clk
->ustate_dc
;
229 if (clk
->state_nr
&& pstate
!= -1) {
230 pstate
= (pstate
< 0) ? clk
->astate
: pstate
;
231 pstate
= min(pstate
, clk
->state_nr
- 1 + clk
->tstate
);
232 pstate
= max(pstate
, clk
->dstate
);
234 pstate
= clk
->pstate
= -1;
237 nvkm_trace(subdev
, "-> %d\n", pstate
);
238 if (pstate
!= clk
->pstate
) {
239 int ret
= nvkm_pstate_prog(clk
, pstate
);
241 nvkm_error(subdev
, "error setting pstate %d: %d\n",
246 wake_up_all(&clk
->wait
);
247 nvkm_notify_get(&clk
->pwrsrc_ntfy
);
251 nvkm_pstate_calc(struct nvkm_clk
*clk
, bool wait
)
253 atomic_set(&clk
->waiting
, 1);
254 schedule_work(&clk
->work
);
256 wait_event(clk
->wait
, !atomic_read(&clk
->waiting
));
261 nvkm_pstate_info(struct nvkm_clk
*clk
, struct nvkm_pstate
*pstate
)
263 const struct nvkm_domain
*clock
= clk
->domains
- 1;
264 struct nvkm_cstate
*cstate
;
265 struct nvkm_subdev
*subdev
= &clk
->subdev
;
266 char info
[3][32] = { "", "", "" };
270 if (pstate
->pstate
!= 0xff)
271 snprintf(name
, sizeof(name
), "%02x", pstate
->pstate
);
273 while ((++clock
)->name
!= nv_clk_src_max
) {
274 u32 lo
= pstate
->base
.domain
[clock
->name
];
279 nvkm_debug(subdev
, "%02x: %10d KHz\n", clock
->name
, lo
);
280 list_for_each_entry(cstate
, &pstate
->list
, head
) {
281 u32 freq
= cstate
->domain
[clock
->name
];
284 nvkm_debug(subdev
, "%10d KHz\n", freq
);
287 if (clock
->mname
&& ++i
< ARRAY_SIZE(info
)) {
291 snprintf(info
[i
], sizeof(info
[i
]), "%s %d MHz",
294 snprintf(info
[i
], sizeof(info
[i
]),
295 "%s %d-%d MHz", clock
->mname
, lo
, hi
);
300 nvkm_debug(subdev
, "%s: %s %s %s\n", name
, info
[0], info
[1], info
[2]);
304 nvkm_pstate_del(struct nvkm_pstate
*pstate
)
306 struct nvkm_cstate
*cstate
, *temp
;
308 list_for_each_entry_safe(cstate
, temp
, &pstate
->list
, head
) {
309 nvkm_cstate_del(cstate
);
312 list_del(&pstate
->head
);
317 nvkm_pstate_new(struct nvkm_clk
*clk
, int idx
)
319 struct nvkm_bios
*bios
= clk
->subdev
.device
->bios
;
320 const struct nvkm_domain
*domain
= clk
->domains
- 1;
321 struct nvkm_pstate
*pstate
;
322 struct nvkm_cstate
*cstate
;
323 struct nvbios_cstepE cstepE
;
324 struct nvbios_perfE perfE
;
325 u8 ver
, hdr
, cnt
, len
;
328 data
= nvbios_perfEp(bios
, idx
, &ver
, &hdr
, &cnt
, &len
, &perfE
);
331 if (perfE
.pstate
== 0xff)
334 pstate
= kzalloc(sizeof(*pstate
), GFP_KERNEL
);
335 cstate
= &pstate
->base
;
339 INIT_LIST_HEAD(&pstate
->list
);
341 pstate
->pstate
= perfE
.pstate
;
342 pstate
->fanspeed
= perfE
.fanspeed
;
343 pstate
->pcie_speed
= perfE
.pcie_speed
;
344 pstate
->pcie_width
= perfE
.pcie_width
;
345 cstate
->voltage
= perfE
.voltage
;
346 cstate
->domain
[nv_clk_src_core
] = perfE
.core
;
347 cstate
->domain
[nv_clk_src_shader
] = perfE
.shader
;
348 cstate
->domain
[nv_clk_src_mem
] = perfE
.memory
;
349 cstate
->domain
[nv_clk_src_vdec
] = perfE
.vdec
;
350 cstate
->domain
[nv_clk_src_dom6
] = perfE
.disp
;
352 while (ver
>= 0x40 && (++domain
)->name
!= nv_clk_src_max
) {
353 struct nvbios_perfS perfS
;
354 u8 sver
= ver
, shdr
= hdr
;
355 u32 perfSe
= nvbios_perfSp(bios
, data
, domain
->bios
,
356 &sver
, &shdr
, cnt
, len
, &perfS
);
357 if (perfSe
== 0 || sver
!= 0x40)
360 if (domain
->flags
& NVKM_CLK_DOM_FLAG_CORE
) {
361 perfS
.v40
.freq
= nvkm_clk_adjust(clk
, false,
367 cstate
->domain
[domain
->name
] = perfS
.v40
.freq
;
370 data
= nvbios_cstepEm(bios
, pstate
->pstate
, &ver
, &hdr
, &cstepE
);
372 int idx
= cstepE
.index
;
374 nvkm_cstate_new(clk
, idx
, pstate
);
378 nvkm_pstate_info(clk
, pstate
);
379 list_add_tail(&pstate
->head
, &clk
->states
);
384 /******************************************************************************
385 * Adjustment triggers
386 *****************************************************************************/
388 nvkm_clk_ustate_update(struct nvkm_clk
*clk
, int req
)
390 struct nvkm_pstate
*pstate
;
393 if (!clk
->allow_reclock
)
396 if (req
!= -1 && req
!= -2) {
397 list_for_each_entry(pstate
, &clk
->states
, head
) {
398 if (pstate
->pstate
== req
)
403 if (pstate
->pstate
!= req
)
412 nvkm_clk_nstate(struct nvkm_clk
*clk
, const char *mode
, int arglen
)
416 if (clk
->allow_reclock
&& !strncasecmpz(mode
, "auto", arglen
))
419 if (strncasecmpz(mode
, "disabled", arglen
)) {
420 char save
= mode
[arglen
];
423 ((char *)mode
)[arglen
] = '\0';
424 if (!kstrtol(mode
, 0, &v
)) {
425 ret
= nvkm_clk_ustate_update(clk
, v
);
429 ((char *)mode
)[arglen
] = save
;
436 nvkm_clk_ustate(struct nvkm_clk
*clk
, int req
, int pwr
)
438 int ret
= nvkm_clk_ustate_update(clk
, req
);
440 if (ret
-= 2, pwr
) clk
->ustate_ac
= ret
;
441 else clk
->ustate_dc
= ret
;
442 return nvkm_pstate_calc(clk
, true);
448 nvkm_clk_astate(struct nvkm_clk
*clk
, int req
, int rel
, bool wait
)
450 if (!rel
) clk
->astate
= req
;
451 if ( rel
) clk
->astate
+= rel
;
452 clk
->astate
= min(clk
->astate
, clk
->state_nr
- 1);
453 clk
->astate
= max(clk
->astate
, 0);
454 return nvkm_pstate_calc(clk
, wait
);
458 nvkm_clk_tstate(struct nvkm_clk
*clk
, int req
, int rel
)
460 if (!rel
) clk
->tstate
= req
;
461 if ( rel
) clk
->tstate
+= rel
;
462 clk
->tstate
= min(clk
->tstate
, 0);
463 clk
->tstate
= max(clk
->tstate
, -(clk
->state_nr
- 1));
464 return nvkm_pstate_calc(clk
, true);
468 nvkm_clk_dstate(struct nvkm_clk
*clk
, int req
, int rel
)
470 if (!rel
) clk
->dstate
= req
;
471 if ( rel
) clk
->dstate
+= rel
;
472 clk
->dstate
= min(clk
->dstate
, clk
->state_nr
- 1);
473 clk
->dstate
= max(clk
->dstate
, 0);
474 return nvkm_pstate_calc(clk
, true);
478 nvkm_clk_pwrsrc(struct nvkm_notify
*notify
)
480 struct nvkm_clk
*clk
=
481 container_of(notify
, typeof(*clk
), pwrsrc_ntfy
);
482 nvkm_pstate_calc(clk
, false);
483 return NVKM_NOTIFY_DROP
;
486 /******************************************************************************
487 * subdev base class implementation
488 *****************************************************************************/
491 nvkm_clk_read(struct nvkm_clk
*clk
, enum nv_clk_src src
)
493 return clk
->func
->read(clk
, src
);
497 nvkm_clk_fini(struct nvkm_subdev
*subdev
, bool suspend
)
499 struct nvkm_clk
*clk
= nvkm_clk(subdev
);
500 nvkm_notify_put(&clk
->pwrsrc_ntfy
);
501 flush_work(&clk
->work
);
503 clk
->func
->fini(clk
);
508 nvkm_clk_init(struct nvkm_subdev
*subdev
)
510 struct nvkm_clk
*clk
= nvkm_clk(subdev
);
511 const struct nvkm_domain
*clock
= clk
->domains
;
514 memset(&clk
->bstate
, 0x00, sizeof(clk
->bstate
));
515 INIT_LIST_HEAD(&clk
->bstate
.list
);
516 clk
->bstate
.pstate
= 0xff;
518 while (clock
->name
!= nv_clk_src_max
) {
519 ret
= nvkm_clk_read(clk
, clock
->name
);
521 nvkm_error(subdev
, "%02x freq unknown\n", clock
->name
);
524 clk
->bstate
.base
.domain
[clock
->name
] = ret
;
528 nvkm_pstate_info(clk
, &clk
->bstate
);
531 return clk
->func
->init(clk
);
533 clk
->astate
= clk
->state_nr
- 1;
537 nvkm_pstate_calc(clk
, true);
542 nvkm_clk_dtor(struct nvkm_subdev
*subdev
)
544 struct nvkm_clk
*clk
= nvkm_clk(subdev
);
545 struct nvkm_pstate
*pstate
, *temp
;
547 nvkm_notify_fini(&clk
->pwrsrc_ntfy
);
549 /* Early return if the pstates have been provided statically */
550 if (clk
->func
->pstates
)
553 list_for_each_entry_safe(pstate
, temp
, &clk
->states
, head
) {
554 nvkm_pstate_del(pstate
);
560 static const struct nvkm_subdev_func
562 .dtor
= nvkm_clk_dtor
,
563 .init
= nvkm_clk_init
,
564 .fini
= nvkm_clk_fini
,
568 nvkm_clk_ctor(const struct nvkm_clk_func
*func
, struct nvkm_device
*device
,
569 int index
, bool allow_reclock
, struct nvkm_clk
*clk
)
571 int ret
, idx
, arglen
;
574 nvkm_subdev_ctor(&nvkm_clk
, device
, index
, &clk
->subdev
);
576 INIT_LIST_HEAD(&clk
->states
);
577 clk
->domains
= func
->domains
;
580 clk
->allow_reclock
= allow_reclock
;
582 INIT_WORK(&clk
->work
, nvkm_pstate_work
);
583 init_waitqueue_head(&clk
->wait
);
584 atomic_set(&clk
->waiting
, 0);
586 /* If no pstates are provided, try and fetch them from the BIOS */
587 if (!func
->pstates
) {
590 ret
= nvkm_pstate_new(clk
, idx
++);
593 for (idx
= 0; idx
< func
->nr_pstates
; idx
++)
594 list_add_tail(&func
->pstates
[idx
].head
, &clk
->states
);
595 clk
->state_nr
= func
->nr_pstates
;
598 ret
= nvkm_notify_init(NULL
, &device
->event
, nvkm_clk_pwrsrc
, true,
599 NULL
, 0, 0, &clk
->pwrsrc_ntfy
);
603 mode
= nvkm_stropt(device
->cfgopt
, "NvClkMode", &arglen
);
605 clk
->ustate_ac
= nvkm_clk_nstate(clk
, mode
, arglen
);
606 clk
->ustate_dc
= nvkm_clk_nstate(clk
, mode
, arglen
);
609 mode
= nvkm_stropt(device
->cfgopt
, "NvClkModeAC", &arglen
);
611 clk
->ustate_ac
= nvkm_clk_nstate(clk
, mode
, arglen
);
613 mode
= nvkm_stropt(device
->cfgopt
, "NvClkModeDC", &arglen
);
615 clk
->ustate_dc
= nvkm_clk_nstate(clk
, mode
, arglen
);
621 nvkm_clk_new_(const struct nvkm_clk_func
*func
, struct nvkm_device
*device
,
622 int index
, bool allow_reclock
, struct nvkm_clk
**pclk
)
624 if (!(*pclk
= kzalloc(sizeof(**pclk
), GFP_KERNEL
)))
626 return nvkm_clk_ctor(func
, device
, index
, allow_reclock
, *pclk
);