]>
Commit | Line | Data |
---|---|---|
2aacdfff | 1 | /* |
2 | * drivers/cpufreq/cpufreq_governor.c | |
3 | * | |
4 | * CPUFREQ governors common code | |
5 | * | |
4471a34f VK |
6 | * Copyright (C) 2001 Russell King |
7 | * (C) 2003 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>. | |
8 | * (C) 2003 Jun Nakajima <jun.nakajima@intel.com> | |
9 | * (C) 2009 Alexander Clouter <alex@digriz.org.uk> | |
10 | * (c) 2012 Viresh Kumar <viresh.kumar@linaro.org> | |
11 | * | |
2aacdfff | 12 | * This program is free software; you can redistribute it and/or modify |
13 | * it under the terms of the GNU General Public License version 2 as | |
14 | * published by the Free Software Foundation. | |
15 | */ | |
16 | ||
4471a34f VK |
17 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
18 | ||
2aacdfff | 19 | #include <linux/export.h> |
20 | #include <linux/kernel_stat.h> | |
4d5dcc42 | 21 | #include <linux/slab.h> |
4471a34f VK |
22 | |
23 | #include "cpufreq_governor.h" | |
24 | ||
2bb8d94f RW |
25 | DEFINE_MUTEX(dbs_data_mutex); |
26 | EXPORT_SYMBOL_GPL(dbs_data_mutex); | |
27 | ||
ea59ee0d | 28 | static struct attribute_group *get_sysfs_attr(struct dbs_governor *gov) |
4d5dcc42 | 29 | { |
ea59ee0d RW |
30 | return have_governor_per_policy() ? |
31 | gov->attr_group_gov_pol : gov->attr_group_gov_sys; | |
4d5dcc42 VK |
32 | } |
33 | ||
d10b5eb5 | 34 | void dbs_check_cpu(struct cpufreq_policy *policy) |
4471a34f | 35 | { |
d10b5eb5 | 36 | int cpu = policy->cpu; |
ea59ee0d | 37 | struct dbs_governor *gov = dbs_governor_of(policy); |
bc505475 RW |
38 | struct policy_dbs_info *policy_dbs = policy->governor_data; |
39 | struct dbs_data *dbs_data = policy_dbs->dbs_data; | |
4471a34f VK |
40 | struct od_dbs_tuners *od_tuners = dbs_data->tuners; |
41 | struct cs_dbs_tuners *cs_tuners = dbs_data->tuners; | |
18b46abd | 42 | unsigned int sampling_rate; |
4471a34f VK |
43 | unsigned int max_load = 0; |
44 | unsigned int ignore_nice; | |
45 | unsigned int j; | |
46 | ||
ea59ee0d | 47 | if (gov->governor == GOV_ONDEMAND) { |
18b46abd | 48 | struct od_cpu_dbs_info_s *od_dbs_info = |
ea59ee0d | 49 | gov->get_cpu_dbs_info_s(cpu); |
18b46abd SB |
50 | |
51 | /* | |
52 | * Sometimes, the ondemand governor uses an additional | |
53 | * multiplier to give long delays. So apply this multiplier to | |
54 | * the 'sampling_rate', so as to keep the wake-up-from-idle | |
55 | * detection logic a bit conservative. | |
56 | */ | |
57 | sampling_rate = od_tuners->sampling_rate; | |
58 | sampling_rate *= od_dbs_info->rate_mult; | |
59 | ||
6c4640c3 | 60 | ignore_nice = od_tuners->ignore_nice_load; |
18b46abd SB |
61 | } else { |
62 | sampling_rate = cs_tuners->sampling_rate; | |
6c4640c3 | 63 | ignore_nice = cs_tuners->ignore_nice_load; |
18b46abd | 64 | } |
4471a34f | 65 | |
dfa5bb62 | 66 | /* Get Absolute Load */ |
4471a34f | 67 | for_each_cpu(j, policy->cpus) { |
875b8508 | 68 | struct cpu_dbs_info *j_cdbs; |
9366d840 SK |
69 | u64 cur_wall_time, cur_idle_time; |
70 | unsigned int idle_time, wall_time; | |
4471a34f | 71 | unsigned int load; |
9366d840 | 72 | int io_busy = 0; |
4471a34f | 73 | |
ea59ee0d | 74 | j_cdbs = gov->get_cpu_cdbs(j); |
4471a34f | 75 | |
9366d840 SK |
76 | /* |
77 | * For the purpose of ondemand, waiting for disk IO is | |
78 | * an indication that you're performance critical, and | |
79 | * not that the system is actually idle. So do not add | |
80 | * the iowait time to the cpu idle time. | |
81 | */ | |
ea59ee0d | 82 | if (gov->governor == GOV_ONDEMAND) |
9366d840 SK |
83 | io_busy = od_tuners->io_is_busy; |
84 | cur_idle_time = get_cpu_idle_time(j, &cur_wall_time, io_busy); | |
4471a34f VK |
85 | |
86 | wall_time = (unsigned int) | |
87 | (cur_wall_time - j_cdbs->prev_cpu_wall); | |
88 | j_cdbs->prev_cpu_wall = cur_wall_time; | |
89 | ||
0df35026 CY |
90 | if (cur_idle_time < j_cdbs->prev_cpu_idle) |
91 | cur_idle_time = j_cdbs->prev_cpu_idle; | |
92 | ||
4471a34f VK |
93 | idle_time = (unsigned int) |
94 | (cur_idle_time - j_cdbs->prev_cpu_idle); | |
95 | j_cdbs->prev_cpu_idle = cur_idle_time; | |
96 | ||
97 | if (ignore_nice) { | |
bc505475 | 98 | struct cpu_dbs_info *cdbs = gov->get_cpu_cdbs(cpu); |
4471a34f VK |
99 | u64 cur_nice; |
100 | unsigned long cur_nice_jiffies; | |
101 | ||
102 | cur_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE] - | |
103 | cdbs->prev_cpu_nice; | |
104 | /* | |
105 | * Assumption: nice time between sampling periods will | |
106 | * be less than 2^32 jiffies for 32 bit sys | |
107 | */ | |
108 | cur_nice_jiffies = (unsigned long) | |
109 | cputime64_to_jiffies64(cur_nice); | |
110 | ||
111 | cdbs->prev_cpu_nice = | |
112 | kcpustat_cpu(j).cpustat[CPUTIME_NICE]; | |
113 | idle_time += jiffies_to_usecs(cur_nice_jiffies); | |
114 | } | |
115 | ||
4471a34f VK |
116 | if (unlikely(!wall_time || wall_time < idle_time)) |
117 | continue; | |
118 | ||
18b46abd SB |
119 | /* |
120 | * If the CPU had gone completely idle, and a task just woke up | |
121 | * on this CPU now, it would be unfair to calculate 'load' the | |
122 | * usual way for this elapsed time-window, because it will show | |
123 | * near-zero load, irrespective of how CPU intensive that task | |
124 | * actually is. This is undesirable for latency-sensitive bursty | |
125 | * workloads. | |
126 | * | |
127 | * To avoid this, we reuse the 'load' from the previous | |
128 | * time-window and give this task a chance to start with a | |
129 | * reasonably high CPU frequency. (However, we shouldn't over-do | |
130 | * this copy, lest we get stuck at a high load (high frequency) | |
131 | * for too long, even when the current system load has actually | |
132 | * dropped down. So we perform the copy only once, upon the | |
133 | * first wake-up from idle.) | |
134 | * | |
9be4fd2c RW |
135 | * Detecting this situation is easy: the governor's utilization |
136 | * update handler would not have run during CPU-idle periods. | |
137 | * Hence, an unusually large 'wall_time' (as compared to the | |
138 | * sampling rate) indicates this scenario. | |
c8ae481b VK |
139 | * |
140 | * prev_load can be zero in two cases and we must recalculate it | |
141 | * for both cases: | |
142 | * - during long idle intervals | |
143 | * - explicitly set to zero | |
18b46abd | 144 | */ |
c8ae481b VK |
145 | if (unlikely(wall_time > (2 * sampling_rate) && |
146 | j_cdbs->prev_load)) { | |
18b46abd | 147 | load = j_cdbs->prev_load; |
c8ae481b VK |
148 | |
149 | /* | |
150 | * Perform a destructive copy, to ensure that we copy | |
151 | * the previous load only once, upon the first wake-up | |
152 | * from idle. | |
153 | */ | |
154 | j_cdbs->prev_load = 0; | |
18b46abd SB |
155 | } else { |
156 | load = 100 * (wall_time - idle_time) / wall_time; | |
157 | j_cdbs->prev_load = load; | |
18b46abd | 158 | } |
4471a34f | 159 | |
4471a34f VK |
160 | if (load > max_load) |
161 | max_load = load; | |
162 | } | |
163 | ||
ea59ee0d | 164 | gov->gov_check_cpu(cpu, max_load); |
4471a34f VK |
165 | } |
166 | EXPORT_SYMBOL_GPL(dbs_check_cpu); | |
167 | ||
e40e7b25 | 168 | void gov_set_update_util(struct policy_dbs_info *policy_dbs, |
9be4fd2c | 169 | unsigned int delay_us) |
4471a34f | 170 | { |
e40e7b25 | 171 | struct cpufreq_policy *policy = policy_dbs->policy; |
ea59ee0d | 172 | struct dbs_governor *gov = dbs_governor_of(policy); |
70f43e5e | 173 | int cpu; |
031299b3 | 174 | |
e40e7b25 RW |
175 | gov_update_sample_delay(policy_dbs, delay_us); |
176 | policy_dbs->last_sample_time = 0; | |
9be4fd2c | 177 | |
70f43e5e | 178 | for_each_cpu(cpu, policy->cpus) { |
ea59ee0d | 179 | struct cpu_dbs_info *cdbs = gov->get_cpu_cdbs(cpu); |
9be4fd2c RW |
180 | |
181 | cpufreq_set_update_util_data(cpu, &cdbs->update_util); | |
031299b3 VK |
182 | } |
183 | } | |
9be4fd2c | 184 | EXPORT_SYMBOL_GPL(gov_set_update_util); |
031299b3 | 185 | |
9be4fd2c | 186 | static inline void gov_clear_update_util(struct cpufreq_policy *policy) |
031299b3 | 187 | { |
031299b3 | 188 | int i; |
58ddcead | 189 | |
9be4fd2c RW |
190 | for_each_cpu(i, policy->cpus) |
191 | cpufreq_set_update_util_data(i, NULL); | |
192 | ||
193 | synchronize_rcu(); | |
4471a34f VK |
194 | } |
195 | ||
e40e7b25 | 196 | static void gov_cancel_work(struct policy_dbs_info *policy_dbs) |
70f43e5e | 197 | { |
9be4fd2c | 198 | /* Tell dbs_update_util_handler() to skip queuing up work items. */ |
686cc637 | 199 | atomic_inc(&policy_dbs->work_count); |
70f43e5e | 200 | /* |
9be4fd2c | 201 | * If dbs_update_util_handler() is already running, it may not notice |
686cc637 | 202 | * the incremented work_count, so wait for it to complete to prevent its |
9be4fd2c | 203 | * work item from being queued up after the cancel_work_sync() below. |
70f43e5e | 204 | */ |
e40e7b25 RW |
205 | gov_clear_update_util(policy_dbs->policy); |
206 | irq_work_sync(&policy_dbs->irq_work); | |
207 | cancel_work_sync(&policy_dbs->work); | |
686cc637 | 208 | atomic_set(&policy_dbs->work_count, 0); |
70f43e5e | 209 | } |
43e0ee36 | 210 | |
70f43e5e | 211 | static void dbs_work_handler(struct work_struct *work) |
43e0ee36 | 212 | { |
e40e7b25 | 213 | struct policy_dbs_info *policy_dbs; |
3a91b069 | 214 | struct cpufreq_policy *policy; |
ea59ee0d | 215 | struct dbs_governor *gov; |
9be4fd2c | 216 | unsigned int delay; |
43e0ee36 | 217 | |
e40e7b25 RW |
218 | policy_dbs = container_of(work, struct policy_dbs_info, work); |
219 | policy = policy_dbs->policy; | |
ea59ee0d | 220 | gov = dbs_governor_of(policy); |
3a91b069 | 221 | |
70f43e5e | 222 | /* |
9be4fd2c RW |
223 | * Make sure cpufreq_governor_limits() isn't evaluating load or the |
224 | * ondemand governor isn't updating the sampling rate in parallel. | |
70f43e5e | 225 | */ |
e40e7b25 | 226 | mutex_lock(&policy_dbs->timer_mutex); |
ea59ee0d | 227 | delay = gov->gov_dbs_timer(policy); |
e40e7b25 RW |
228 | policy_dbs->sample_delay_ns = jiffies_to_nsecs(delay); |
229 | mutex_unlock(&policy_dbs->timer_mutex); | |
70f43e5e | 230 | |
9be4fd2c RW |
231 | /* |
232 | * If the atomic operation below is reordered with respect to the | |
233 | * sample delay modification, the utilization update handler may end | |
234 | * up using a stale sample delay value. | |
235 | */ | |
236 | smp_mb__before_atomic(); | |
686cc637 | 237 | atomic_dec(&policy_dbs->work_count); |
9be4fd2c RW |
238 | } |
239 | ||
240 | static void dbs_irq_work(struct irq_work *irq_work) | |
241 | { | |
e40e7b25 | 242 | struct policy_dbs_info *policy_dbs; |
70f43e5e | 243 | |
e40e7b25 RW |
244 | policy_dbs = container_of(irq_work, struct policy_dbs_info, irq_work); |
245 | schedule_work(&policy_dbs->work); | |
70f43e5e VK |
246 | } |
247 | ||
e40e7b25 | 248 | static inline void gov_queue_irq_work(struct policy_dbs_info *policy_dbs) |
70f43e5e | 249 | { |
9be4fd2c | 250 | #ifdef CONFIG_SMP |
e40e7b25 | 251 | irq_work_queue_on(&policy_dbs->irq_work, smp_processor_id()); |
9be4fd2c | 252 | #else |
e40e7b25 | 253 | irq_work_queue(&policy_dbs->irq_work); |
9be4fd2c RW |
254 | #endif |
255 | } | |
256 | ||
257 | static void dbs_update_util_handler(struct update_util_data *data, u64 time, | |
258 | unsigned long util, unsigned long max) | |
259 | { | |
260 | struct cpu_dbs_info *cdbs = container_of(data, struct cpu_dbs_info, update_util); | |
e40e7b25 | 261 | struct policy_dbs_info *policy_dbs = cdbs->policy_dbs; |
70f43e5e VK |
262 | |
263 | /* | |
9be4fd2c RW |
264 | * The work may not be allowed to be queued up right now. |
265 | * Possible reasons: | |
266 | * - Work has already been queued up or is in progress. | |
267 | * - The governor is being stopped. | |
268 | * - It is too early (too little time from the previous sample). | |
70f43e5e | 269 | */ |
686cc637 | 270 | if (atomic_inc_return(&policy_dbs->work_count) == 1) { |
9be4fd2c RW |
271 | u64 delta_ns; |
272 | ||
e40e7b25 RW |
273 | delta_ns = time - policy_dbs->last_sample_time; |
274 | if ((s64)delta_ns >= policy_dbs->sample_delay_ns) { | |
275 | policy_dbs->last_sample_time = time; | |
276 | gov_queue_irq_work(policy_dbs); | |
9be4fd2c RW |
277 | return; |
278 | } | |
279 | } | |
686cc637 | 280 | atomic_dec(&policy_dbs->work_count); |
43e0ee36 | 281 | } |
4447266b | 282 | |
4d5dcc42 | 283 | static void set_sampling_rate(struct dbs_data *dbs_data, |
ea59ee0d RW |
284 | struct dbs_governor *gov, |
285 | unsigned int sampling_rate) | |
4d5dcc42 | 286 | { |
ea59ee0d | 287 | if (gov->governor == GOV_CONSERVATIVE) { |
4d5dcc42 VK |
288 | struct cs_dbs_tuners *cs_tuners = dbs_data->tuners; |
289 | cs_tuners->sampling_rate = sampling_rate; | |
290 | } else { | |
291 | struct od_dbs_tuners *od_tuners = dbs_data->tuners; | |
292 | od_tuners->sampling_rate = sampling_rate; | |
293 | } | |
294 | } | |
295 | ||
bc505475 RW |
296 | static struct policy_dbs_info *alloc_policy_dbs_info(struct cpufreq_policy *policy, |
297 | struct dbs_governor *gov) | |
44152cb8 | 298 | { |
e40e7b25 | 299 | struct policy_dbs_info *policy_dbs; |
44152cb8 VK |
300 | int j; |
301 | ||
302 | /* Allocate memory for the common information for policy->cpus */ | |
e40e7b25 RW |
303 | policy_dbs = kzalloc(sizeof(*policy_dbs), GFP_KERNEL); |
304 | if (!policy_dbs) | |
bc505475 | 305 | return NULL; |
44152cb8 | 306 | |
e40e7b25 | 307 | mutex_init(&policy_dbs->timer_mutex); |
686cc637 | 308 | atomic_set(&policy_dbs->work_count, 0); |
e40e7b25 RW |
309 | init_irq_work(&policy_dbs->irq_work, dbs_irq_work); |
310 | INIT_WORK(&policy_dbs->work, dbs_work_handler); | |
cea6a9e7 RW |
311 | |
312 | /* Set policy_dbs for all CPUs, online+offline */ | |
313 | for_each_cpu(j, policy->related_cpus) { | |
314 | struct cpu_dbs_info *j_cdbs = gov->get_cpu_cdbs(j); | |
315 | ||
316 | j_cdbs->policy_dbs = policy_dbs; | |
317 | j_cdbs->update_util.func = dbs_update_util_handler; | |
318 | } | |
bc505475 | 319 | return policy_dbs; |
44152cb8 VK |
320 | } |
321 | ||
e40e7b25 | 322 | static void free_policy_dbs_info(struct cpufreq_policy *policy, |
7bdad34d | 323 | struct dbs_governor *gov) |
44152cb8 | 324 | { |
7bdad34d | 325 | struct cpu_dbs_info *cdbs = gov->get_cpu_cdbs(policy->cpu); |
e40e7b25 | 326 | struct policy_dbs_info *policy_dbs = cdbs->policy_dbs; |
44152cb8 VK |
327 | int j; |
328 | ||
e40e7b25 | 329 | mutex_destroy(&policy_dbs->timer_mutex); |
5e4500d8 | 330 | |
cea6a9e7 RW |
331 | for_each_cpu(j, policy->related_cpus) { |
332 | struct cpu_dbs_info *j_cdbs = gov->get_cpu_cdbs(j); | |
44152cb8 | 333 | |
cea6a9e7 RW |
334 | j_cdbs->policy_dbs = NULL; |
335 | j_cdbs->update_util.func = NULL; | |
336 | } | |
e40e7b25 | 337 | kfree(policy_dbs); |
44152cb8 VK |
338 | } |
339 | ||
906a6e5a | 340 | static int cpufreq_governor_init(struct cpufreq_policy *policy) |
4471a34f | 341 | { |
ea59ee0d | 342 | struct dbs_governor *gov = dbs_governor_of(policy); |
7bdad34d | 343 | struct dbs_data *dbs_data = gov->gdbs_data; |
bc505475 | 344 | struct policy_dbs_info *policy_dbs; |
714a2d9c VK |
345 | unsigned int latency; |
346 | int ret; | |
4471a34f | 347 | |
a72c4959 VK |
348 | /* State should be equivalent to EXIT */ |
349 | if (policy->governor_data) | |
350 | return -EBUSY; | |
351 | ||
bc505475 RW |
352 | policy_dbs = alloc_policy_dbs_info(policy, gov); |
353 | if (!policy_dbs) | |
354 | return -ENOMEM; | |
44152cb8 | 355 | |
bc505475 RW |
356 | if (dbs_data) { |
357 | if (WARN_ON(have_governor_per_policy())) { | |
358 | ret = -EINVAL; | |
359 | goto free_policy_dbs_info; | |
360 | } | |
714a2d9c | 361 | dbs_data->usage_count++; |
bc505475 RW |
362 | policy_dbs->dbs_data = dbs_data; |
363 | policy->governor_data = policy_dbs; | |
714a2d9c VK |
364 | return 0; |
365 | } | |
4d5dcc42 | 366 | |
714a2d9c | 367 | dbs_data = kzalloc(sizeof(*dbs_data), GFP_KERNEL); |
bc505475 RW |
368 | if (!dbs_data) { |
369 | ret = -ENOMEM; | |
370 | goto free_policy_dbs_info; | |
371 | } | |
44152cb8 | 372 | |
714a2d9c | 373 | dbs_data->usage_count = 1; |
4d5dcc42 | 374 | |
7bdad34d | 375 | ret = gov->init(dbs_data, !policy->governor->initialized); |
714a2d9c | 376 | if (ret) |
e40e7b25 | 377 | goto free_policy_dbs_info; |
4d5dcc42 | 378 | |
714a2d9c VK |
379 | /* policy latency is in ns. Convert it to us first */ |
380 | latency = policy->cpuinfo.transition_latency / 1000; | |
381 | if (latency == 0) | |
382 | latency = 1; | |
4d5dcc42 | 383 | |
714a2d9c VK |
384 | /* Bring kernel and HW constraints together */ |
385 | dbs_data->min_sampling_rate = max(dbs_data->min_sampling_rate, | |
386 | MIN_LATENCY_MULTIPLIER * latency); | |
ea59ee0d | 387 | set_sampling_rate(dbs_data, gov, max(dbs_data->min_sampling_rate, |
714a2d9c | 388 | latency * LATENCY_MULTIPLIER)); |
2361be23 | 389 | |
8eec1020 | 390 | if (!have_governor_per_policy()) |
7bdad34d | 391 | gov->gdbs_data = dbs_data; |
4d5dcc42 | 392 | |
bc505475 RW |
393 | policy_dbs->dbs_data = dbs_data; |
394 | policy->governor_data = policy_dbs; | |
e4b133cc | 395 | |
714a2d9c | 396 | ret = sysfs_create_group(get_governor_parent_kobj(policy), |
ea59ee0d | 397 | get_sysfs_attr(gov)); |
fafd5e8a RW |
398 | if (!ret) |
399 | return 0; | |
4d5dcc42 | 400 | |
fafd5e8a | 401 | /* Failure, so roll back. */ |
4d5dcc42 | 402 | |
e4b133cc VK |
403 | policy->governor_data = NULL; |
404 | ||
8eec1020 | 405 | if (!have_governor_per_policy()) |
7bdad34d RW |
406 | gov->gdbs_data = NULL; |
407 | gov->exit(dbs_data, !policy->governor->initialized); | |
bc505475 RW |
408 | kfree(dbs_data); |
409 | ||
e40e7b25 RW |
410 | free_policy_dbs_info: |
411 | free_policy_dbs_info(policy, gov); | |
714a2d9c VK |
412 | return ret; |
413 | } | |
4d5dcc42 | 414 | |
5da3dd1e | 415 | static int cpufreq_governor_exit(struct cpufreq_policy *policy) |
714a2d9c | 416 | { |
ea59ee0d | 417 | struct dbs_governor *gov = dbs_governor_of(policy); |
bc505475 RW |
418 | struct policy_dbs_info *policy_dbs = policy->governor_data; |
419 | struct dbs_data *dbs_data = policy_dbs->dbs_data; | |
a72c4959 VK |
420 | |
421 | /* State should be equivalent to INIT */ | |
bc505475 | 422 | if (policy_dbs->policy) |
a72c4959 | 423 | return -EBUSY; |
4d5dcc42 | 424 | |
714a2d9c VK |
425 | if (!--dbs_data->usage_count) { |
426 | sysfs_remove_group(get_governor_parent_kobj(policy), | |
ea59ee0d | 427 | get_sysfs_attr(gov)); |
2361be23 | 428 | |
e4b133cc VK |
429 | policy->governor_data = NULL; |
430 | ||
8eec1020 | 431 | if (!have_governor_per_policy()) |
7bdad34d | 432 | gov->gdbs_data = NULL; |
4471a34f | 433 | |
7bdad34d | 434 | gov->exit(dbs_data, policy->governor->initialized == 1); |
714a2d9c | 435 | kfree(dbs_data); |
e4b133cc VK |
436 | } else { |
437 | policy->governor_data = NULL; | |
4d5dcc42 | 438 | } |
44152cb8 | 439 | |
e40e7b25 | 440 | free_policy_dbs_info(policy, gov); |
a72c4959 | 441 | return 0; |
714a2d9c | 442 | } |
4d5dcc42 | 443 | |
5da3dd1e | 444 | static int cpufreq_governor_start(struct cpufreq_policy *policy) |
714a2d9c | 445 | { |
ea59ee0d | 446 | struct dbs_governor *gov = dbs_governor_of(policy); |
bc505475 RW |
447 | struct policy_dbs_info *policy_dbs = policy->governor_data; |
448 | struct dbs_data *dbs_data = policy_dbs->dbs_data; | |
714a2d9c | 449 | unsigned int sampling_rate, ignore_nice, j, cpu = policy->cpu; |
714a2d9c VK |
450 | int io_busy = 0; |
451 | ||
452 | if (!policy->cur) | |
453 | return -EINVAL; | |
454 | ||
a72c4959 | 455 | /* State should be equivalent to INIT */ |
bc505475 | 456 | if (policy_dbs->policy) |
a72c4959 VK |
457 | return -EBUSY; |
458 | ||
7bdad34d | 459 | if (gov->governor == GOV_CONSERVATIVE) { |
714a2d9c | 460 | struct cs_dbs_tuners *cs_tuners = dbs_data->tuners; |
4d5dcc42 | 461 | |
4d5dcc42 | 462 | sampling_rate = cs_tuners->sampling_rate; |
6c4640c3 | 463 | ignore_nice = cs_tuners->ignore_nice_load; |
4471a34f | 464 | } else { |
714a2d9c VK |
465 | struct od_dbs_tuners *od_tuners = dbs_data->tuners; |
466 | ||
4d5dcc42 | 467 | sampling_rate = od_tuners->sampling_rate; |
6c4640c3 | 468 | ignore_nice = od_tuners->ignore_nice_load; |
9366d840 | 469 | io_busy = od_tuners->io_is_busy; |
4471a34f VK |
470 | } |
471 | ||
714a2d9c | 472 | for_each_cpu(j, policy->cpus) { |
7bdad34d | 473 | struct cpu_dbs_info *j_cdbs = gov->get_cpu_cdbs(j); |
714a2d9c | 474 | unsigned int prev_load; |
4471a34f | 475 | |
714a2d9c VK |
476 | j_cdbs->prev_cpu_idle = |
477 | get_cpu_idle_time(j, &j_cdbs->prev_cpu_wall, io_busy); | |
4471a34f | 478 | |
714a2d9c VK |
479 | prev_load = (unsigned int)(j_cdbs->prev_cpu_wall - |
480 | j_cdbs->prev_cpu_idle); | |
481 | j_cdbs->prev_load = 100 * prev_load / | |
482 | (unsigned int)j_cdbs->prev_cpu_wall; | |
18b46abd | 483 | |
714a2d9c VK |
484 | if (ignore_nice) |
485 | j_cdbs->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE]; | |
714a2d9c | 486 | } |
e40e7b25 | 487 | policy_dbs->policy = policy; |
2abfa876 | 488 | |
7bdad34d | 489 | if (gov->governor == GOV_CONSERVATIVE) { |
714a2d9c | 490 | struct cs_cpu_dbs_info_s *cs_dbs_info = |
7bdad34d | 491 | gov->get_cpu_dbs_info_s(cpu); |
4471a34f | 492 | |
714a2d9c | 493 | cs_dbs_info->down_skip = 0; |
714a2d9c VK |
494 | cs_dbs_info->requested_freq = policy->cur; |
495 | } else { | |
7bdad34d RW |
496 | struct od_ops *od_ops = gov->gov_ops; |
497 | struct od_cpu_dbs_info_s *od_dbs_info = gov->get_cpu_dbs_info_s(cpu); | |
4471a34f | 498 | |
714a2d9c VK |
499 | od_dbs_info->rate_mult = 1; |
500 | od_dbs_info->sample_type = OD_NORMAL_SAMPLE; | |
501 | od_ops->powersave_bias_init_cpu(cpu); | |
502 | } | |
4471a34f | 503 | |
e40e7b25 | 504 | gov_set_update_util(policy_dbs, sampling_rate); |
714a2d9c VK |
505 | return 0; |
506 | } | |
507 | ||
5da3dd1e | 508 | static int cpufreq_governor_stop(struct cpufreq_policy *policy) |
714a2d9c | 509 | { |
bc505475 | 510 | struct policy_dbs_info *policy_dbs = policy->governor_data; |
44152cb8 | 511 | |
a72c4959 | 512 | /* State should be equivalent to START */ |
bc505475 | 513 | if (!policy_dbs->policy) |
a72c4959 VK |
514 | return -EBUSY; |
515 | ||
e40e7b25 RW |
516 | gov_cancel_work(policy_dbs); |
517 | policy_dbs->policy = NULL; | |
3a91b069 | 518 | |
a72c4959 | 519 | return 0; |
714a2d9c | 520 | } |
4471a34f | 521 | |
5da3dd1e | 522 | static int cpufreq_governor_limits(struct cpufreq_policy *policy) |
714a2d9c | 523 | { |
bc505475 | 524 | struct policy_dbs_info *policy_dbs = policy->governor_data; |
8eeed095 | 525 | |
a72c4959 | 526 | /* State should be equivalent to START */ |
bc505475 | 527 | if (!policy_dbs->policy) |
a72c4959 | 528 | return -EBUSY; |
4471a34f | 529 | |
e9751894 RW |
530 | mutex_lock(&policy_dbs->timer_mutex); |
531 | if (policy->max < policy->cur) | |
532 | __cpufreq_driver_target(policy, policy->max, CPUFREQ_RELATION_H); | |
533 | else if (policy->min > policy->cur) | |
534 | __cpufreq_driver_target(policy, policy->min, CPUFREQ_RELATION_L); | |
d10b5eb5 | 535 | dbs_check_cpu(policy); |
e9751894 | 536 | mutex_unlock(&policy_dbs->timer_mutex); |
a72c4959 VK |
537 | |
538 | return 0; | |
714a2d9c | 539 | } |
4471a34f | 540 | |
906a6e5a | 541 | int cpufreq_governor_dbs(struct cpufreq_policy *policy, unsigned int event) |
714a2d9c | 542 | { |
5da3dd1e | 543 | int ret = -EINVAL; |
714a2d9c | 544 | |
732b6d61 | 545 | /* Lock governor to block concurrent initialization of governor */ |
2bb8d94f | 546 | mutex_lock(&dbs_data_mutex); |
732b6d61 | 547 | |
5da3dd1e | 548 | if (event == CPUFREQ_GOV_POLICY_INIT) { |
906a6e5a | 549 | ret = cpufreq_governor_init(policy); |
5da3dd1e RW |
550 | } else if (policy->governor_data) { |
551 | switch (event) { | |
552 | case CPUFREQ_GOV_POLICY_EXIT: | |
553 | ret = cpufreq_governor_exit(policy); | |
554 | break; | |
555 | case CPUFREQ_GOV_START: | |
556 | ret = cpufreq_governor_start(policy); | |
557 | break; | |
558 | case CPUFREQ_GOV_STOP: | |
559 | ret = cpufreq_governor_stop(policy); | |
560 | break; | |
561 | case CPUFREQ_GOV_LIMITS: | |
562 | ret = cpufreq_governor_limits(policy); | |
563 | break; | |
564 | } | |
4471a34f | 565 | } |
714a2d9c | 566 | |
2bb8d94f | 567 | mutex_unlock(&dbs_data_mutex); |
714a2d9c | 568 | return ret; |
4471a34f VK |
569 | } |
570 | EXPORT_SYMBOL_GPL(cpufreq_governor_dbs); |