]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/cpufreq/powernv-cpufreq.c
UBUNTU: [Config] arm64: snapdragon: DRM_MSM=m
[mirror_ubuntu-bionic-kernel.git] / drivers / cpufreq / powernv-cpufreq.c
CommitLineData
b3d627a5
VS
1/*
2 * POWERNV cpufreq driver for the IBM POWER processors
3 *
4 * (C) Copyright IBM 2014
5 *
6 * Author: Vaidyanathan Srinivasan <svaidy at linux.vnet.ibm.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
11 * any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 */
19
20#define pr_fmt(fmt) "powernv-cpufreq: " fmt
21
22#include <linux/kernel.h>
23#include <linux/sysfs.h>
24#include <linux/cpumask.h>
25#include <linux/module.h>
26#include <linux/cpufreq.h>
27#include <linux/smp.h>
28#include <linux/of.h>
cf30af76 29#include <linux/reboot.h>
053819e0 30#include <linux/slab.h>
6d167a44 31#include <linux/cpu.h>
c89f2682 32#include <trace/events/power.h>
b3d627a5
VS
33
34#include <asm/cputhreads.h>
6174bac8 35#include <asm/firmware.h>
b3d627a5 36#include <asm/reg.h>
f3cae355 37#include <asm/smp.h> /* Required for cpu_sibling_mask() in UP configs */
cb166fa9 38#include <asm/opal.h>
eaa2c3ae 39#include <linux/timer.h>
b3d627a5
VS
40
41#define POWERNV_MAX_PSTATES 256
09a972d1
SB
42#define PMSR_PSAFE_ENABLE (1UL << 30)
43#define PMSR_SPR_EM_DISABLE (1UL << 31)
116a9365 44#define MAX_PSTATE_SHIFT 32
20b15b76
AA
45#define LPSTATE_SHIFT 48
46#define GPSTATE_SHIFT 56
b3d627a5 47
eaa2c3ae
AA
48#define MAX_RAMP_DOWN_TIME 5120
49/*
50 * On an idle system we want the global pstate to ramp-down from max value to
51 * min over a span of ~5 secs. Also we want it to initially ramp-down slowly and
52 * then ramp-down rapidly later on.
53 *
54 * This gives a percentage rampdown for time elapsed in milliseconds.
55 * ramp_down_percentage = ((ms * ms) >> 18)
56 * ~= 3.8 * (sec * sec)
57 *
58 * At 0 ms ramp_down_percent = 0
59 * At 5120 ms ramp_down_percent = 100
60 */
61#define ramp_down_percent(time) ((time * time) >> 18)
62
63/* Interval after which the timer is queued to bring down global pstate */
64#define GPSTATE_TIMER_INTERVAL 2000
65
66/**
67 * struct global_pstate_info - Per policy data structure to maintain history of
68 * global pstates
09ca4c9b
AA
69 * @highest_lpstate_idx: The local pstate index from which we are
70 * ramping down
eaa2c3ae 71 * @elapsed_time: Time in ms spent in ramping down from
09ca4c9b 72 * highest_lpstate_idx
eaa2c3ae
AA
73 * @last_sampled_time: Time from boot in ms when global pstates were
74 * last set
09ca4c9b
AA
75 * @last_lpstate_idx, Last set value of local pstate and global
76 * last_gpstate_idx pstate in terms of cpufreq table index
eaa2c3ae
AA
77 * @timer: Is used for ramping down if cpu goes idle for
78 * a long time with global pstate held high
79 * @gpstate_lock: A spinlock to maintain synchronization between
80 * routines called by the timer handler and
81 * governer's target_index calls
82 */
83struct global_pstate_info {
09ca4c9b 84 int highest_lpstate_idx;
eaa2c3ae
AA
85 unsigned int elapsed_time;
86 unsigned int last_sampled_time;
09ca4c9b
AA
87 int last_lpstate_idx;
88 int last_gpstate_idx;
eaa2c3ae
AA
89 spinlock_t gpstate_lock;
90 struct timer_list timer;
1d1fe902 91 struct cpufreq_policy *policy;
eaa2c3ae
AA
92};
93
b3d627a5 94static struct cpufreq_frequency_table powernv_freqs[POWERNV_MAX_PSTATES+1];
116a9365 95u32 pstate_sign_prefix;
cb166fa9 96static bool rebooting, throttled, occ_reset;
b3d627a5 97
c89f2682
SB
98static const char * const throttle_reason[] = {
99 "No throttling",
100 "Power Cap",
101 "Processor Over Temperature",
102 "Power Supply Failure",
103 "Over Current",
104 "OCC Reset"
105};
106
1b028984
SB
107enum throttle_reason_type {
108 NO_THROTTLE = 0,
109 POWERCAP,
110 CPU_OVERTEMP,
111 POWER_SUPPLY_FAILURE,
112 OVERCURRENT,
113 OCC_RESET_THROTTLE,
114 OCC_MAX_REASON
115};
116
053819e0
SB
117static struct chip {
118 unsigned int id;
119 bool throttled;
c89f2682
SB
120 bool restore;
121 u8 throttle_reason;
735366fc
SB
122 cpumask_t mask;
123 struct work_struct throttle;
1b028984
SB
124 int throttle_turbo;
125 int throttle_sub_turbo;
126 int reason[OCC_MAX_REASON];
053819e0
SB
127} *chips;
128
129static int nr_chips;
3e5963bc 130static DEFINE_PER_CPU(struct chip *, chip_info);
053819e0 131
b3d627a5 132/*
09ca4c9b
AA
133 * Note:
134 * The set of pstates consists of contiguous integers.
135 * powernv_pstate_info stores the index of the frequency table for
136 * max, min and nominal frequencies. It also stores number of
137 * available frequencies.
b3d627a5 138 *
09ca4c9b
AA
139 * powernv_pstate_info.nominal indicates the index to the highest
140 * non-turbo frequency.
b3d627a5
VS
141 */
142static struct powernv_pstate_info {
09ca4c9b
AA
143 unsigned int min;
144 unsigned int max;
145 unsigned int nominal;
146 unsigned int nr_pstates;
b12f7a2b 147 bool wof_enabled;
b3d627a5
VS
148} powernv_pstate_info;
149
116a9365
GS
150static inline int extract_pstate(u64 pmsr_val, unsigned int shift)
151{
152 int ret = ((pmsr_val >> shift) & 0xFF);
153
154 if (!ret)
155 return ret;
156
157 return (pstate_sign_prefix | ret);
158}
159
160#define extract_local_pstate(x) extract_pstate(x, LPSTATE_SHIFT)
161#define extract_global_pstate(x) extract_pstate(x, GPSTATE_SHIFT)
162#define extract_max_pstate(x) extract_pstate(x, MAX_PSTATE_SHIFT)
163
09ca4c9b
AA
164/* Use following macros for conversions between pstate_id and index */
165static inline int idx_to_pstate(unsigned int i)
166{
8e859467
AA
167 if (unlikely(i >= powernv_pstate_info.nr_pstates)) {
168 pr_warn_once("index %u is out of bound\n", i);
169 return powernv_freqs[powernv_pstate_info.nominal].driver_data;
170 }
171
09ca4c9b
AA
172 return powernv_freqs[i].driver_data;
173}
174
175static inline unsigned int pstate_to_idx(int pstate)
176{
8e859467
AA
177 int min = powernv_freqs[powernv_pstate_info.min].driver_data;
178 int max = powernv_freqs[powernv_pstate_info.max].driver_data;
179
180 if (min > 0) {
181 if (unlikely((pstate < max) || (pstate > min))) {
182 pr_warn_once("pstate %d is out of bound\n", pstate);
183 return powernv_pstate_info.nominal;
184 }
185 } else {
186 if (unlikely((pstate > max) || (pstate < min))) {
187 pr_warn_once("pstate %d is out of bound\n", pstate);
188 return powernv_pstate_info.nominal;
189 }
190 }
09ca4c9b
AA
191 /*
192 * abs() is deliberately used so that is works with
193 * both monotonically increasing and decreasing
194 * pstate values
195 */
196 return abs(pstate - idx_to_pstate(powernv_pstate_info.max));
197}
198
eaa2c3ae
AA
199static inline void reset_gpstates(struct cpufreq_policy *policy)
200{
201 struct global_pstate_info *gpstates = policy->driver_data;
202
09ca4c9b 203 gpstates->highest_lpstate_idx = 0;
eaa2c3ae
AA
204 gpstates->elapsed_time = 0;
205 gpstates->last_sampled_time = 0;
09ca4c9b
AA
206 gpstates->last_lpstate_idx = 0;
207 gpstates->last_gpstate_idx = 0;
eaa2c3ae
AA
208}
209
b3d627a5
VS
210/*
211 * Initialize the freq table based on data obtained
212 * from the firmware passed via device-tree
213 */
214static int init_powernv_pstates(void)
215{
216 struct device_node *power_mgt;
09ca4c9b 217 int i, nr_pstates = 0;
b3d627a5
VS
218 const __be32 *pstate_ids, *pstate_freqs;
219 u32 len_ids, len_freqs;
09ca4c9b 220 u32 pstate_min, pstate_max, pstate_nominal;
b12f7a2b 221 u32 pstate_turbo, pstate_ultra_turbo;
b3d627a5
VS
222
223 power_mgt = of_find_node_by_path("/ibm,opal/power-mgt");
224 if (!power_mgt) {
225 pr_warn("power-mgt node not found\n");
226 return -ENODEV;
227 }
228
229 if (of_property_read_u32(power_mgt, "ibm,pstate-min", &pstate_min)) {
230 pr_warn("ibm,pstate-min node not found\n");
231 return -ENODEV;
232 }
233
234 if (of_property_read_u32(power_mgt, "ibm,pstate-max", &pstate_max)) {
235 pr_warn("ibm,pstate-max node not found\n");
236 return -ENODEV;
237 }
238
239 if (of_property_read_u32(power_mgt, "ibm,pstate-nominal",
240 &pstate_nominal)) {
241 pr_warn("ibm,pstate-nominal not found\n");
242 return -ENODEV;
243 }
b12f7a2b
SB
244
245 if (of_property_read_u32(power_mgt, "ibm,pstate-ultra-turbo",
246 &pstate_ultra_turbo)) {
247 powernv_pstate_info.wof_enabled = false;
248 goto next;
249 }
250
251 if (of_property_read_u32(power_mgt, "ibm,pstate-turbo",
252 &pstate_turbo)) {
253 powernv_pstate_info.wof_enabled = false;
254 goto next;
255 }
256
257 if (pstate_turbo == pstate_ultra_turbo)
258 powernv_pstate_info.wof_enabled = false;
259 else
260 powernv_pstate_info.wof_enabled = true;
261
262next:
b3d627a5
VS
263 pr_info("cpufreq pstate min %d nominal %d max %d\n", pstate_min,
264 pstate_nominal, pstate_max);
b12f7a2b
SB
265 pr_info("Workload Optimized Frequency is %s in the platform\n",
266 (powernv_pstate_info.wof_enabled) ? "enabled" : "disabled");
b3d627a5
VS
267
268 pstate_ids = of_get_property(power_mgt, "ibm,pstate-ids", &len_ids);
269 if (!pstate_ids) {
270 pr_warn("ibm,pstate-ids not found\n");
271 return -ENODEV;
272 }
273
274 pstate_freqs = of_get_property(power_mgt, "ibm,pstate-frequencies-mhz",
275 &len_freqs);
276 if (!pstate_freqs) {
277 pr_warn("ibm,pstate-frequencies-mhz not found\n");
278 return -ENODEV;
279 }
280
6174bac8
VS
281 if (len_ids != len_freqs) {
282 pr_warn("Entries in ibm,pstate-ids and "
283 "ibm,pstate-frequencies-mhz does not match\n");
284 }
285
b3d627a5
VS
286 nr_pstates = min(len_ids, len_freqs) / sizeof(u32);
287 if (!nr_pstates) {
288 pr_warn("No PStates found\n");
289 return -ENODEV;
290 }
291
09ca4c9b 292 powernv_pstate_info.nr_pstates = nr_pstates;
b3d627a5 293 pr_debug("NR PStates %d\n", nr_pstates);
116a9365
GS
294
295 pstate_sign_prefix = pstate_min & ~0xFF;
296
b3d627a5
VS
297 for (i = 0; i < nr_pstates; i++) {
298 u32 id = be32_to_cpu(pstate_ids[i]);
299 u32 freq = be32_to_cpu(pstate_freqs[i]);
300
301 pr_debug("PState id %d freq %d MHz\n", id, freq);
302 powernv_freqs[i].frequency = freq * 1000; /* kHz */
0692c691 303 powernv_freqs[i].driver_data = id;
09ca4c9b
AA
304
305 if (id == pstate_max)
306 powernv_pstate_info.max = i;
cf0de9a0 307 if (id == pstate_nominal)
09ca4c9b 308 powernv_pstate_info.nominal = i;
cf0de9a0 309 if (id == pstate_min)
09ca4c9b 310 powernv_pstate_info.min = i;
b12f7a2b
SB
311
312 if (powernv_pstate_info.wof_enabled && id == pstate_turbo) {
313 int j;
314
315 for (j = i - 1; j >= (int)powernv_pstate_info.max; j--)
316 powernv_freqs[j].flags = CPUFREQ_BOOST_FREQ;
317 }
b3d627a5 318 }
09ca4c9b 319
b3d627a5
VS
320 /* End of list marker entry */
321 powernv_freqs[i].frequency = CPUFREQ_TABLE_END;
b3d627a5
VS
322 return 0;
323}
324
325/* Returns the CPU frequency corresponding to the pstate_id. */
326static unsigned int pstate_id_to_freq(int pstate_id)
327{
328 int i;
329
09ca4c9b 330 i = pstate_to_idx(pstate_id);
6174bac8
VS
331 if (i >= powernv_pstate_info.nr_pstates || i < 0) {
332 pr_warn("PState id %d outside of PState table, "
333 "reporting nominal id %d instead\n",
09ca4c9b
AA
334 pstate_id, idx_to_pstate(powernv_pstate_info.nominal));
335 i = powernv_pstate_info.nominal;
6174bac8 336 }
b3d627a5
VS
337
338 return powernv_freqs[i].frequency;
339}
340
341/*
342 * cpuinfo_nominal_freq_show - Show the nominal CPU frequency as indicated by
343 * the firmware
344 */
345static ssize_t cpuinfo_nominal_freq_show(struct cpufreq_policy *policy,
346 char *buf)
347{
348 return sprintf(buf, "%u\n",
09ca4c9b 349 powernv_freqs[powernv_pstate_info.nominal].frequency);
b3d627a5
VS
350}
351
352struct freq_attr cpufreq_freq_attr_cpuinfo_nominal_freq =
353 __ATTR_RO(cpuinfo_nominal_freq);
354
b12f7a2b
SB
355#define SCALING_BOOST_FREQS_ATTR_INDEX 2
356
b3d627a5
VS
357static struct freq_attr *powernv_cpu_freq_attr[] = {
358 &cpufreq_freq_attr_scaling_available_freqs,
359 &cpufreq_freq_attr_cpuinfo_nominal_freq,
b12f7a2b 360 &cpufreq_freq_attr_scaling_boost_freqs,
b3d627a5
VS
361 NULL,
362};
363
1b028984
SB
364#define throttle_attr(name, member) \
365static ssize_t name##_show(struct cpufreq_policy *policy, char *buf) \
366{ \
367 struct chip *chip = per_cpu(chip_info, policy->cpu); \
368 \
369 return sprintf(buf, "%u\n", chip->member); \
370} \
371 \
372static struct freq_attr throttle_attr_##name = __ATTR_RO(name) \
373
374throttle_attr(unthrottle, reason[NO_THROTTLE]);
375throttle_attr(powercap, reason[POWERCAP]);
376throttle_attr(overtemp, reason[CPU_OVERTEMP]);
377throttle_attr(supply_fault, reason[POWER_SUPPLY_FAILURE]);
378throttle_attr(overcurrent, reason[OVERCURRENT]);
379throttle_attr(occ_reset, reason[OCC_RESET_THROTTLE]);
380throttle_attr(turbo_stat, throttle_turbo);
381throttle_attr(sub_turbo_stat, throttle_sub_turbo);
382
383static struct attribute *throttle_attrs[] = {
384 &throttle_attr_unthrottle.attr,
385 &throttle_attr_powercap.attr,
386 &throttle_attr_overtemp.attr,
387 &throttle_attr_supply_fault.attr,
388 &throttle_attr_overcurrent.attr,
389 &throttle_attr_occ_reset.attr,
390 &throttle_attr_turbo_stat.attr,
391 &throttle_attr_sub_turbo_stat.attr,
392 NULL,
393};
394
395static const struct attribute_group throttle_attr_grp = {
396 .name = "throttle_stats",
397 .attrs = throttle_attrs,
398};
399
b3d627a5
VS
400/* Helper routines */
401
402/* Access helpers to power mgt SPR */
403
404static inline unsigned long get_pmspr(unsigned long sprn)
405{
406 switch (sprn) {
407 case SPRN_PMCR:
408 return mfspr(SPRN_PMCR);
409
410 case SPRN_PMICR:
411 return mfspr(SPRN_PMICR);
412
413 case SPRN_PMSR:
414 return mfspr(SPRN_PMSR);
415 }
416 BUG();
417}
418
419static inline void set_pmspr(unsigned long sprn, unsigned long val)
420{
421 switch (sprn) {
422 case SPRN_PMCR:
423 mtspr(SPRN_PMCR, val);
424 return;
425
426 case SPRN_PMICR:
427 mtspr(SPRN_PMICR, val);
428 return;
429 }
430 BUG();
431}
432
433/*
434 * Use objects of this type to query/update
435 * pstates on a remote CPU via smp_call_function.
436 */
437struct powernv_smp_call_data {
438 unsigned int freq;
439 int pstate_id;
eaa2c3ae 440 int gpstate_id;
b3d627a5
VS
441};
442
443/*
444 * powernv_read_cpu_freq: Reads the current frequency on this CPU.
445 *
446 * Called via smp_call_function.
447 *
448 * Note: The caller of the smp_call_function should pass an argument of
449 * the type 'struct powernv_smp_call_data *' along with this function.
450 *
451 * The current frequency on this CPU will be returned via
452 * ((struct powernv_smp_call_data *)arg)->freq;
453 */
454static void powernv_read_cpu_freq(void *arg)
455{
456 unsigned long pmspr_val;
b3d627a5
VS
457 struct powernv_smp_call_data *freq_data = arg;
458
459 pmspr_val = get_pmspr(SPRN_PMSR);
116a9365 460 freq_data->pstate_id = extract_local_pstate(pmspr_val);
b3d627a5
VS
461 freq_data->freq = pstate_id_to_freq(freq_data->pstate_id);
462
463 pr_debug("cpu %d pmsr %016lX pstate_id %d frequency %d kHz\n",
464 raw_smp_processor_id(), pmspr_val, freq_data->pstate_id,
465 freq_data->freq);
466}
467
468/*
469 * powernv_cpufreq_get: Returns the CPU frequency as reported by the
470 * firmware for CPU 'cpu'. This value is reported through the sysfs
471 * file cpuinfo_cur_freq.
472 */
60d1ea4e 473static unsigned int powernv_cpufreq_get(unsigned int cpu)
b3d627a5
VS
474{
475 struct powernv_smp_call_data freq_data;
476
477 smp_call_function_any(cpu_sibling_mask(cpu), powernv_read_cpu_freq,
478 &freq_data, 1);
479
480 return freq_data.freq;
481}
482
483/*
484 * set_pstate: Sets the pstate on this CPU.
485 *
486 * This is called via an smp_call_function.
487 *
488 * The caller must ensure that freq_data is of the type
489 * (struct powernv_smp_call_data *) and the pstate_id which needs to be set
490 * on this CPU should be present in freq_data->pstate_id.
491 */
eaa2c3ae 492static void set_pstate(void *data)
b3d627a5
VS
493{
494 unsigned long val;
eaa2c3ae
AA
495 struct powernv_smp_call_data *freq_data = data;
496 unsigned long pstate_ul = freq_data->pstate_id;
497 unsigned long gpstate_ul = freq_data->gpstate_id;
b3d627a5
VS
498
499 val = get_pmspr(SPRN_PMCR);
500 val = val & 0x0000FFFFFFFFFFFFULL;
501
502 pstate_ul = pstate_ul & 0xFF;
eaa2c3ae 503 gpstate_ul = gpstate_ul & 0xFF;
b3d627a5
VS
504
505 /* Set both global(bits 56..63) and local(bits 48..55) PStates */
eaa2c3ae 506 val = val | (gpstate_ul << 56) | (pstate_ul << 48);
b3d627a5
VS
507
508 pr_debug("Setting cpu %d pmcr to %016lX\n",
509 raw_smp_processor_id(), val);
510 set_pmspr(SPRN_PMCR, val);
511}
512
cf30af76
SB
513/*
514 * get_nominal_index: Returns the index corresponding to the nominal
515 * pstate in the cpufreq table
516 */
517static inline unsigned int get_nominal_index(void)
518{
09ca4c9b 519 return powernv_pstate_info.nominal;
cf30af76
SB
520}
521
735366fc 522static void powernv_cpufreq_throttle_check(void *data)
09a972d1 523{
3e5963bc 524 struct chip *chip;
735366fc 525 unsigned int cpu = smp_processor_id();
09a972d1 526 unsigned long pmsr;
3e5963bc 527 int pmsr_pmax;
09ca4c9b 528 unsigned int pmsr_pmax_idx;
09a972d1
SB
529
530 pmsr = get_pmspr(SPRN_PMSR);
3e5963bc 531 chip = this_cpu_read(chip_info);
053819e0 532
09a972d1 533 /* Check for Pmax Capping */
116a9365 534 pmsr_pmax = extract_max_pstate(pmsr);
09ca4c9b
AA
535 pmsr_pmax_idx = pstate_to_idx(pmsr_pmax);
536 if (pmsr_pmax_idx != powernv_pstate_info.max) {
3e5963bc 537 if (chip->throttled)
053819e0 538 goto next;
3e5963bc 539 chip->throttled = true;
09ca4c9b
AA
540 if (pmsr_pmax_idx > powernv_pstate_info.nominal) {
541 pr_warn_once("CPU %d on Chip %u has Pmax(%d) reduced below nominal frequency(%d)\n",
3e5963bc 542 cpu, chip->id, pmsr_pmax,
09ca4c9b 543 idx_to_pstate(powernv_pstate_info.nominal));
1b028984
SB
544 chip->throttle_sub_turbo++;
545 } else {
546 chip->throttle_turbo++;
547 }
3e5963bc
MN
548 trace_powernv_throttle(chip->id,
549 throttle_reason[chip->throttle_reason],
c89f2682 550 pmsr_pmax);
3e5963bc
MN
551 } else if (chip->throttled) {
552 chip->throttled = false;
553 trace_powernv_throttle(chip->id,
554 throttle_reason[chip->throttle_reason],
c89f2682 555 pmsr_pmax);
09a972d1
SB
556 }
557
3dd3ebe5 558 /* Check if Psafe_mode_active is set in PMSR. */
053819e0 559next:
3dd3ebe5 560 if (pmsr & PMSR_PSAFE_ENABLE) {
09a972d1
SB
561 throttled = true;
562 pr_info("Pstate set to safe frequency\n");
563 }
564
565 /* Check if SPR_EM_DISABLE is set in PMSR */
566 if (pmsr & PMSR_SPR_EM_DISABLE) {
567 throttled = true;
568 pr_info("Frequency Control disabled from OS\n");
569 }
570
571 if (throttled) {
572 pr_info("PMSR = %16lx\n", pmsr);
c89f2682 573 pr_warn("CPU Frequency could be throttled\n");
09a972d1
SB
574 }
575}
576
eaa2c3ae
AA
577/**
578 * calc_global_pstate - Calculate global pstate
09ca4c9b
AA
579 * @elapsed_time: Elapsed time in milliseconds
580 * @local_pstate_idx: New local pstate
581 * @highest_lpstate_idx: pstate from which its ramping down
eaa2c3ae
AA
582 *
583 * Finds the appropriate global pstate based on the pstate from which its
584 * ramping down and the time elapsed in ramping down. It follows a quadratic
585 * equation which ensures that it reaches ramping down to pmin in 5sec.
586 */
587static inline int calc_global_pstate(unsigned int elapsed_time,
09ca4c9b
AA
588 int highest_lpstate_idx,
589 int local_pstate_idx)
eaa2c3ae 590{
09ca4c9b 591 int index_diff;
eaa2c3ae
AA
592
593 /*
594 * Using ramp_down_percent we get the percentage of rampdown
595 * that we are expecting to be dropping. Difference between
09ca4c9b 596 * highest_lpstate_idx and powernv_pstate_info.min will give a absolute
eaa2c3ae
AA
597 * number of how many pstates we will drop eventually by the end of
598 * 5 seconds, then just scale it get the number pstates to be dropped.
599 */
09ca4c9b
AA
600 index_diff = ((int)ramp_down_percent(elapsed_time) *
601 (powernv_pstate_info.min - highest_lpstate_idx)) / 100;
eaa2c3ae
AA
602
603 /* Ensure that global pstate is >= to local pstate */
09ca4c9b
AA
604 if (highest_lpstate_idx + index_diff >= local_pstate_idx)
605 return local_pstate_idx;
eaa2c3ae 606 else
09ca4c9b 607 return highest_lpstate_idx + index_diff;
eaa2c3ae
AA
608}
609
610static inline void queue_gpstate_timer(struct global_pstate_info *gpstates)
611{
612 unsigned int timer_interval;
613
614 /*
615 * Setting up timer to fire after GPSTATE_TIMER_INTERVAL ms, But
616 * if it exceeds MAX_RAMP_DOWN_TIME ms for ramp down time.
617 * Set timer such that it fires exactly at MAX_RAMP_DOWN_TIME
618 * seconds of ramp down time.
619 */
620 if ((gpstates->elapsed_time + GPSTATE_TIMER_INTERVAL)
621 > MAX_RAMP_DOWN_TIME)
622 timer_interval = MAX_RAMP_DOWN_TIME - gpstates->elapsed_time;
623 else
624 timer_interval = GPSTATE_TIMER_INTERVAL;
625
7bc54b65 626 mod_timer(&gpstates->timer, jiffies + msecs_to_jiffies(timer_interval));
eaa2c3ae
AA
627}
628
629/**
630 * gpstate_timer_handler
631 *
632 * @data: pointer to cpufreq_policy on which timer was queued
633 *
634 * This handler brings down the global pstate closer to the local pstate
635 * according quadratic equation. Queues a new timer if it is still not equal
636 * to local pstate
637 */
1d1fe902 638void gpstate_timer_handler(struct timer_list *t)
eaa2c3ae 639{
1d1fe902
KC
640 struct global_pstate_info *gpstates = from_timer(gpstates, t, timer);
641 struct cpufreq_policy *policy = gpstates->policy;
20b15b76
AA
642 int gpstate_idx, lpstate_idx;
643 unsigned long val;
eaa2c3ae
AA
644 unsigned int time_diff = jiffies_to_msecs(jiffies)
645 - gpstates->last_sampled_time;
646 struct powernv_smp_call_data freq_data;
647
648 if (!spin_trylock(&gpstates->gpstate_lock))
649 return;
fc870428
SB
650 /*
651 * If the timer has migrated to the different cpu then bring
652 * it back to one of the policy->cpus
653 */
654 if (!cpumask_test_cpu(raw_smp_processor_id(), policy->cpus)) {
655 gpstates->timer.expires = jiffies + msecs_to_jiffies(1);
656 add_timer_on(&gpstates->timer, cpumask_first(policy->cpus));
657 spin_unlock(&gpstates->gpstate_lock);
658 return;
659 }
eaa2c3ae 660
20b15b76
AA
661 /*
662 * If PMCR was last updated was using fast_swtich then
663 * We may have wrong in gpstate->last_lpstate_idx
664 * value. Hence, read from PMCR to get correct data.
665 */
666 val = get_pmspr(SPRN_PMCR);
116a9365
GS
667 freq_data.gpstate_id = extract_global_pstate(val);
668 freq_data.pstate_id = extract_local_pstate(val);
20b15b76
AA
669 if (freq_data.gpstate_id == freq_data.pstate_id) {
670 reset_gpstates(policy);
671 spin_unlock(&gpstates->gpstate_lock);
672 return;
673 }
674
eaa2c3ae
AA
675 gpstates->last_sampled_time += time_diff;
676 gpstates->elapsed_time += time_diff;
eaa2c3ae 677
20b15b76 678 if (gpstates->elapsed_time > MAX_RAMP_DOWN_TIME) {
09ca4c9b 679 gpstate_idx = pstate_to_idx(freq_data.pstate_id);
c9a81e68 680 lpstate_idx = gpstate_idx;
eaa2c3ae 681 reset_gpstates(policy);
09ca4c9b 682 gpstates->highest_lpstate_idx = gpstate_idx;
eaa2c3ae 683 } else {
20b15b76 684 lpstate_idx = pstate_to_idx(freq_data.pstate_id);
09ca4c9b
AA
685 gpstate_idx = calc_global_pstate(gpstates->elapsed_time,
686 gpstates->highest_lpstate_idx,
20b15b76 687 lpstate_idx);
eaa2c3ae 688 }
20b15b76
AA
689 freq_data.gpstate_id = idx_to_pstate(gpstate_idx);
690 gpstates->last_gpstate_idx = gpstate_idx;
691 gpstates->last_lpstate_idx = lpstate_idx;
eaa2c3ae
AA
692 /*
693 * If local pstate is equal to global pstate, rampdown is over
694 * So timer is not required to be queued.
695 */
09ca4c9b 696 if (gpstate_idx != gpstates->last_lpstate_idx)
eaa2c3ae
AA
697 queue_gpstate_timer(gpstates);
698
fc870428 699 set_pstate(&freq_data);
1fd3ff28 700 spin_unlock(&gpstates->gpstate_lock);
eaa2c3ae
AA
701}
702
b3d627a5
VS
703/*
704 * powernv_cpufreq_target_index: Sets the frequency corresponding to
705 * the cpufreq table entry indexed by new_index on the cpus in the
706 * mask policy->cpus
707 */
708static int powernv_cpufreq_target_index(struct cpufreq_policy *policy,
709 unsigned int new_index)
710{
711 struct powernv_smp_call_data freq_data;
09ca4c9b 712 unsigned int cur_msec, gpstate_idx;
eaa2c3ae 713 struct global_pstate_info *gpstates = policy->driver_data;
b3d627a5 714
cf30af76
SB
715 if (unlikely(rebooting) && new_index != get_nominal_index())
716 return 0;
717
8a10c06a
DK
718 if (!throttled) {
719 /* we don't want to be preempted while
720 * checking if the CPU frequency has been throttled
721 */
722 preempt_disable();
735366fc 723 powernv_cpufreq_throttle_check(NULL);
8a10c06a
DK
724 preempt_enable();
725 }
09a972d1 726
eaa2c3ae
AA
727 cur_msec = jiffies_to_msecs(get_jiffies_64());
728
1fd3ff28 729 spin_lock(&gpstates->gpstate_lock);
09ca4c9b 730 freq_data.pstate_id = idx_to_pstate(new_index);
b3d627a5 731
eaa2c3ae 732 if (!gpstates->last_sampled_time) {
09ca4c9b
AA
733 gpstate_idx = new_index;
734 gpstates->highest_lpstate_idx = new_index;
eaa2c3ae
AA
735 goto gpstates_done;
736 }
737
09ca4c9b 738 if (gpstates->last_gpstate_idx < new_index) {
eaa2c3ae
AA
739 gpstates->elapsed_time += cur_msec -
740 gpstates->last_sampled_time;
741
742 /*
743 * If its has been ramping down for more than MAX_RAMP_DOWN_TIME
744 * we should be resetting all global pstate related data. Set it
745 * equal to local pstate to start fresh.
746 */
747 if (gpstates->elapsed_time > MAX_RAMP_DOWN_TIME) {
748 reset_gpstates(policy);
09ca4c9b
AA
749 gpstates->highest_lpstate_idx = new_index;
750 gpstate_idx = new_index;
eaa2c3ae
AA
751 } else {
752 /* Elaspsed_time is less than 5 seconds, continue to rampdown */
09ca4c9b
AA
753 gpstate_idx = calc_global_pstate(gpstates->elapsed_time,
754 gpstates->highest_lpstate_idx,
755 new_index);
eaa2c3ae
AA
756 }
757 } else {
758 reset_gpstates(policy);
09ca4c9b
AA
759 gpstates->highest_lpstate_idx = new_index;
760 gpstate_idx = new_index;
eaa2c3ae
AA
761 }
762
763 /*
764 * If local pstate is equal to global pstate, rampdown is over
765 * So timer is not required to be queued.
766 */
09ca4c9b 767 if (gpstate_idx != new_index)
eaa2c3ae 768 queue_gpstate_timer(gpstates);
0bc10b93
AA
769 else
770 del_timer_sync(&gpstates->timer);
eaa2c3ae
AA
771
772gpstates_done:
09ca4c9b 773 freq_data.gpstate_id = idx_to_pstate(gpstate_idx);
eaa2c3ae 774 gpstates->last_sampled_time = cur_msec;
09ca4c9b
AA
775 gpstates->last_gpstate_idx = gpstate_idx;
776 gpstates->last_lpstate_idx = new_index;
eaa2c3ae 777
1fd3ff28
AA
778 spin_unlock(&gpstates->gpstate_lock);
779
b3d627a5
VS
780 /*
781 * Use smp_call_function to send IPI and execute the
782 * mtspr on target CPU. We could do that without IPI
783 * if current CPU is within policy->cpus (core)
784 */
785 smp_call_function_any(policy->cpus, set_pstate, &freq_data, 1);
b3d627a5
VS
786 return 0;
787}
788
789static int powernv_cpufreq_cpu_init(struct cpufreq_policy *policy)
790{
eaa2c3ae 791 int base, i, ret;
2920e9ce 792 struct kernfs_node *kn;
eaa2c3ae 793 struct global_pstate_info *gpstates;
b3d627a5
VS
794
795 base = cpu_first_thread_sibling(policy->cpu);
796
797 for (i = 0; i < threads_per_core; i++)
798 cpumask_set_cpu(base + i, policy->cpus);
799
2920e9ce
SB
800 kn = kernfs_find_and_get(policy->kobj.sd, throttle_attr_grp.name);
801 if (!kn) {
1b028984
SB
802 int ret;
803
804 ret = sysfs_create_group(&policy->kobj, &throttle_attr_grp);
805 if (ret) {
806 pr_info("Failed to create throttle stats directory for cpu %d\n",
807 policy->cpu);
808 return ret;
809 }
2920e9ce
SB
810 } else {
811 kernfs_put(kn);
1b028984 812 }
eaa2c3ae
AA
813
814 gpstates = kzalloc(sizeof(*gpstates), GFP_KERNEL);
815 if (!gpstates)
816 return -ENOMEM;
817
818 policy->driver_data = gpstates;
819
820 /* initialize timer */
1d1fe902
KC
821 gpstates->policy = policy;
822 timer_setup(&gpstates->timer, gpstate_timer_handler,
823 TIMER_PINNED | TIMER_DEFERRABLE);
eaa2c3ae
AA
824 gpstates->timer.expires = jiffies +
825 msecs_to_jiffies(GPSTATE_TIMER_INTERVAL);
826 spin_lock_init(&gpstates->gpstate_lock);
827 ret = cpufreq_table_validate_and_show(policy, powernv_freqs);
828
60c9efb8 829 if (ret < 0) {
eaa2c3ae 830 kfree(policy->driver_data);
60c9efb8
AA
831 return ret;
832 }
eaa2c3ae 833
60c9efb8 834 policy->fast_switch_possible = true;
eaa2c3ae
AA
835 return ret;
836}
837
838static int powernv_cpufreq_cpu_exit(struct cpufreq_policy *policy)
839{
840 /* timer is deleted in cpufreq_cpu_stop() */
841 kfree(policy->driver_data);
842
843 return 0;
b3d627a5
VS
844}
845
cf30af76
SB
846static int powernv_cpufreq_reboot_notifier(struct notifier_block *nb,
847 unsigned long action, void *unused)
848{
849 int cpu;
850 struct cpufreq_policy cpu_policy;
851
852 rebooting = true;
853 for_each_online_cpu(cpu) {
854 cpufreq_get_policy(&cpu_policy, cpu);
855 powernv_cpufreq_target_index(&cpu_policy, get_nominal_index());
856 }
857
858 return NOTIFY_DONE;
859}
860
861static struct notifier_block powernv_cpufreq_reboot_nb = {
862 .notifier_call = powernv_cpufreq_reboot_notifier,
863};
864
735366fc
SB
865void powernv_cpufreq_work_fn(struct work_struct *work)
866{
867 struct chip *chip = container_of(work, struct chip, throttle);
22794280 868 unsigned int cpu;
6d167a44 869 cpumask_t mask;
735366fc 870
6d167a44
SB
871 get_online_cpus();
872 cpumask_and(&mask, &chip->mask, cpu_online_mask);
873 smp_call_function_any(&mask,
735366fc 874 powernv_cpufreq_throttle_check, NULL, 0);
22794280
SB
875
876 if (!chip->restore)
6d167a44 877 goto out;
22794280
SB
878
879 chip->restore = false;
6d167a44
SB
880 for_each_cpu(cpu, &mask) {
881 int index;
22794280
SB
882 struct cpufreq_policy policy;
883
884 cpufreq_get_policy(&policy, cpu);
82577360 885 index = cpufreq_table_find_index_c(&policy, policy.cur);
22794280 886 powernv_cpufreq_target_index(&policy, index);
6d167a44 887 cpumask_andnot(&mask, &mask, policy.cpus);
22794280 888 }
6d167a44
SB
889out:
890 put_online_cpus();
735366fc
SB
891}
892
cb166fa9
SB
893static int powernv_cpufreq_occ_msg(struct notifier_block *nb,
894 unsigned long msg_type, void *_msg)
895{
896 struct opal_msg *msg = _msg;
897 struct opal_occ_msg omsg;
735366fc 898 int i;
cb166fa9
SB
899
900 if (msg_type != OPAL_MSG_OCC)
901 return 0;
902
903 omsg.type = be64_to_cpu(msg->params[0]);
904
905 switch (omsg.type) {
906 case OCC_RESET:
907 occ_reset = true;
309d0631 908 pr_info("OCC (On Chip Controller - enforces hard thermal/power limits) Resetting\n");
cb166fa9
SB
909 /*
910 * powernv_cpufreq_throttle_check() is called in
911 * target() callback which can detect the throttle state
912 * for governors like ondemand.
913 * But static governors will not call target() often thus
914 * report throttling here.
915 */
916 if (!throttled) {
917 throttled = true;
c89f2682 918 pr_warn("CPU frequency is throttled for duration\n");
cb166fa9 919 }
309d0631 920
cb166fa9
SB
921 break;
922 case OCC_LOAD:
309d0631 923 pr_info("OCC Loading, CPU frequency is throttled until OCC is started\n");
cb166fa9
SB
924 break;
925 case OCC_THROTTLE:
926 omsg.chip = be64_to_cpu(msg->params[1]);
927 omsg.throttle_status = be64_to_cpu(msg->params[2]);
928
929 if (occ_reset) {
930 occ_reset = false;
931 throttled = false;
309d0631 932 pr_info("OCC Active, CPU frequency is no longer throttled\n");
735366fc 933
22794280
SB
934 for (i = 0; i < nr_chips; i++) {
935 chips[i].restore = true;
735366fc 936 schedule_work(&chips[i].throttle);
22794280 937 }
735366fc 938
cb166fa9
SB
939 return 0;
940 }
941
c89f2682
SB
942 for (i = 0; i < nr_chips; i++)
943 if (chips[i].id == omsg.chip)
944 break;
945
946 if (omsg.throttle_status >= 0 &&
1b028984 947 omsg.throttle_status <= OCC_MAX_THROTTLE_STATUS) {
c89f2682 948 chips[i].throttle_reason = omsg.throttle_status;
1b028984
SB
949 chips[i].reason[omsg.throttle_status]++;
950 }
735366fc 951
c89f2682
SB
952 if (!omsg.throttle_status)
953 chips[i].restore = true;
954
955 schedule_work(&chips[i].throttle);
cb166fa9
SB
956 }
957 return 0;
958}
959
960static struct notifier_block powernv_cpufreq_opal_nb = {
961 .notifier_call = powernv_cpufreq_occ_msg,
962 .next = NULL,
963 .priority = 0,
964};
965
b120339c
PM
966static void powernv_cpufreq_stop_cpu(struct cpufreq_policy *policy)
967{
968 struct powernv_smp_call_data freq_data;
eaa2c3ae 969 struct global_pstate_info *gpstates = policy->driver_data;
b120339c 970
09ca4c9b
AA
971 freq_data.pstate_id = idx_to_pstate(powernv_pstate_info.min);
972 freq_data.gpstate_id = idx_to_pstate(powernv_pstate_info.min);
b120339c 973 smp_call_function_single(policy->cpu, set_pstate, &freq_data, 1);
eaa2c3ae 974 del_timer_sync(&gpstates->timer);
b120339c
PM
975}
976
60c9efb8
AA
977static unsigned int powernv_fast_switch(struct cpufreq_policy *policy,
978 unsigned int target_freq)
979{
980 int index;
981 struct powernv_smp_call_data freq_data;
982
983 index = cpufreq_table_find_index_dl(policy, target_freq);
984 freq_data.pstate_id = powernv_freqs[index].driver_data;
985 freq_data.gpstate_id = powernv_freqs[index].driver_data;
986 set_pstate(&freq_data);
987
988 return powernv_freqs[index].frequency;
989}
990
b3d627a5
VS
991static struct cpufreq_driver powernv_cpufreq_driver = {
992 .name = "powernv-cpufreq",
993 .flags = CPUFREQ_CONST_LOOPS,
994 .init = powernv_cpufreq_cpu_init,
eaa2c3ae 995 .exit = powernv_cpufreq_cpu_exit,
b3d627a5
VS
996 .verify = cpufreq_generic_frequency_table_verify,
997 .target_index = powernv_cpufreq_target_index,
60c9efb8 998 .fast_switch = powernv_fast_switch,
b3d627a5 999 .get = powernv_cpufreq_get,
b120339c 1000 .stop_cpu = powernv_cpufreq_stop_cpu,
b3d627a5
VS
1001 .attr = powernv_cpu_freq_attr,
1002};
1003
053819e0
SB
1004static int init_chip_info(void)
1005{
1006 unsigned int chip[256];
1007 unsigned int cpu, i;
1008 unsigned int prev_chip_id = UINT_MAX;
96c4726f 1009
3e5963bc 1010 for_each_possible_cpu(cpu) {
053819e0
SB
1011 unsigned int id = cpu_to_chip_id(cpu);
1012
1013 if (prev_chip_id != id) {
1014 prev_chip_id = id;
1015 chip[nr_chips++] = id;
1016 }
1017 }
1018
c89f2682 1019 chips = kcalloc(nr_chips, sizeof(struct chip), GFP_KERNEL);
053819e0 1020 if (!chips)
3e5963bc 1021 return -ENOMEM;
053819e0
SB
1022
1023 for (i = 0; i < nr_chips; i++) {
1024 chips[i].id = chip[i];
735366fc
SB
1025 cpumask_copy(&chips[i].mask, cpumask_of_node(chip[i]));
1026 INIT_WORK(&chips[i].throttle, powernv_cpufreq_work_fn);
3e5963bc
MN
1027 for_each_cpu(cpu, &chips[i].mask)
1028 per_cpu(chip_info, cpu) = &chips[i];
053819e0
SB
1029 }
1030
1031 return 0;
1032}
1033
c5e29ea7
SB
1034static inline void clean_chip_info(void)
1035{
1036 kfree(chips);
c5e29ea7
SB
1037}
1038
1039static inline void unregister_all_notifiers(void)
1040{
1041 opal_message_notifier_unregister(OPAL_MSG_OCC,
1042 &powernv_cpufreq_opal_nb);
1043 unregister_reboot_notifier(&powernv_cpufreq_reboot_nb);
1044}
1045
b3d627a5
VS
1046static int __init powernv_cpufreq_init(void)
1047{
1048 int rc = 0;
1049
6174bac8 1050 /* Don't probe on pseries (guest) platforms */
e4d54f71 1051 if (!firmware_has_feature(FW_FEATURE_OPAL))
6174bac8
VS
1052 return -ENODEV;
1053
b3d627a5
VS
1054 /* Discover pstates from device tree and init */
1055 rc = init_powernv_pstates();
c5e29ea7
SB
1056 if (rc)
1057 goto out;
b3d627a5 1058
053819e0
SB
1059 /* Populate chip info */
1060 rc = init_chip_info();
1061 if (rc)
c5e29ea7 1062 goto out;
053819e0 1063
cf30af76 1064 register_reboot_notifier(&powernv_cpufreq_reboot_nb);
cb166fa9 1065 opal_message_notifier_register(OPAL_MSG_OCC, &powernv_cpufreq_opal_nb);
c5e29ea7 1066
b12f7a2b
SB
1067 if (powernv_pstate_info.wof_enabled)
1068 powernv_cpufreq_driver.boost_enabled = true;
1069 else
1070 powernv_cpu_freq_attr[SCALING_BOOST_FREQS_ATTR_INDEX] = NULL;
1071
c5e29ea7 1072 rc = cpufreq_register_driver(&powernv_cpufreq_driver);
b12f7a2b
SB
1073 if (rc) {
1074 pr_info("Failed to register the cpufreq driver (%d)\n", rc);
1075 goto cleanup_notifiers;
1076 }
c5e29ea7 1077
b12f7a2b
SB
1078 if (powernv_pstate_info.wof_enabled)
1079 cpufreq_enable_boost_support();
1080
1081 return 0;
1082cleanup_notifiers:
c5e29ea7
SB
1083 unregister_all_notifiers();
1084 clean_chip_info();
1085out:
1086 pr_info("Platform driver disabled. System does not support PState control\n");
1087 return rc;
b3d627a5
VS
1088}
1089module_init(powernv_cpufreq_init);
1090
1091static void __exit powernv_cpufreq_exit(void)
1092{
1093 cpufreq_unregister_driver(&powernv_cpufreq_driver);
c5e29ea7
SB
1094 unregister_all_notifiers();
1095 clean_chip_info();
b3d627a5
VS
1096}
1097module_exit(powernv_cpufreq_exit);
1098
1099MODULE_LICENSE("GPL");
1100MODULE_AUTHOR("Vaidyanathan Srinivasan <svaidy at linux.vnet.ibm.com>");