]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - drivers/cpufreq/cpufreq_governor.c
cpufreq: ondemand: Simplify conditionals in od_dbs_timer()
[mirror_ubuntu-hirsute-kernel.git] / drivers / cpufreq / cpufreq_governor.c
CommitLineData
2aacdfff
VK
1/*
2 * drivers/cpufreq/cpufreq_governor.c
3 *
4 * CPUFREQ governors common code
5 *
4471a34f
VK
6 * Copyright (C) 2001 Russell King
7 * (C) 2003 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>.
8 * (C) 2003 Jun Nakajima <jun.nakajima@intel.com>
9 * (C) 2009 Alexander Clouter <alex@digriz.org.uk>
10 * (c) 2012 Viresh Kumar <viresh.kumar@linaro.org>
11 *
2aacdfff
VK
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License version 2 as
14 * published by the Free Software Foundation.
15 */
16
4471a34f
VK
17#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18
2aacdfff
VK
19#include <linux/export.h>
20#include <linux/kernel_stat.h>
4d5dcc42 21#include <linux/slab.h>
4471a34f
VK
22
23#include "cpufreq_governor.h"
24
2bb8d94f
RW
25DEFINE_MUTEX(dbs_data_mutex);
26EXPORT_SYMBOL_GPL(dbs_data_mutex);
27
aded387b
VK
28/* Common sysfs tunables */
29/**
30 * store_sampling_rate - update sampling rate effective immediately if needed.
31 *
32 * If new rate is smaller than the old, simply updating
33 * dbs.sampling_rate might not be appropriate. For example, if the
34 * original sampling_rate was 1 second and the requested new sampling rate is 10
35 * ms because the user needs immediate reaction from ondemand governor, but not
36 * sure if higher frequency will be required or not, then, the governor may
37 * change the sampling rate too late; up to 1 second later. Thus, if we are
38 * reducing the sampling rate, we need to make the new value effective
39 * immediately.
40 *
aded387b
VK
41 * This must be called with dbs_data->mutex held, otherwise traversing
42 * policy_dbs_list isn't safe.
43 */
44ssize_t store_sampling_rate(struct dbs_data *dbs_data, const char *buf,
45 size_t count)
46{
47 struct policy_dbs_info *policy_dbs;
48 unsigned int rate;
49 int ret;
50 ret = sscanf(buf, "%u", &rate);
51 if (ret != 1)
52 return -EINVAL;
53
54 dbs_data->sampling_rate = max(rate, dbs_data->min_sampling_rate);
55
56 /*
57 * We are operating under dbs_data->mutex and so the list and its
58 * entries can't be freed concurrently.
59 */
60 list_for_each_entry(policy_dbs, &dbs_data->policy_dbs_list, list) {
61 mutex_lock(&policy_dbs->timer_mutex);
62 /*
63 * On 32-bit architectures this may race with the
64 * sample_delay_ns read in dbs_update_util_handler(), but that
65 * really doesn't matter. If the read returns a value that's
66 * too big, the sample will be skipped, but the next invocation
67 * of dbs_update_util_handler() (when the update has been
78347cdb 68 * completed) will take a sample.
aded387b
VK
69 *
70 * If this runs in parallel with dbs_work_handler(), we may end
71 * up overwriting the sample_delay_ns value that it has just
78347cdb
RW
72 * written, but it will be corrected next time a sample is
73 * taken, so it shouldn't be significant.
aded387b 74 */
78347cdb 75 gov_update_sample_delay(policy_dbs, 0);
aded387b
VK
76 mutex_unlock(&policy_dbs->timer_mutex);
77 }
78
79 return count;
80}
81EXPORT_SYMBOL_GPL(store_sampling_rate);
82
c4435630 83static inline struct dbs_data *to_dbs_data(struct kobject *kobj)
4d5dcc42 84{
c4435630 85 return container_of(kobj, struct dbs_data, kobj);
4d5dcc42
VK
86}
87
c4435630
VK
88static inline struct governor_attr *to_gov_attr(struct attribute *attr)
89{
90 return container_of(attr, struct governor_attr, attr);
91}
92
93static ssize_t governor_show(struct kobject *kobj, struct attribute *attr,
94 char *buf)
95{
96 struct dbs_data *dbs_data = to_dbs_data(kobj);
97 struct governor_attr *gattr = to_gov_attr(attr);
98 int ret = -EIO;
99
100 if (gattr->show)
101 ret = gattr->show(dbs_data, buf);
102
103 return ret;
104}
105
106static ssize_t governor_store(struct kobject *kobj, struct attribute *attr,
107 const char *buf, size_t count)
108{
109 struct dbs_data *dbs_data = to_dbs_data(kobj);
110 struct governor_attr *gattr = to_gov_attr(attr);
111 int ret = -EIO;
112
113 mutex_lock(&dbs_data->mutex);
114
115 if (gattr->store)
116 ret = gattr->store(dbs_data, buf, count);
117
118 mutex_unlock(&dbs_data->mutex);
119
120 return ret;
121}
122
123/*
124 * Sysfs Ops for accessing governor attributes.
125 *
126 * All show/store invocations for governor specific sysfs attributes, will first
127 * call the below show/store callbacks and the attribute specific callback will
128 * be called from within it.
129 */
130static const struct sysfs_ops governor_sysfs_ops = {
131 .show = governor_show,
132 .store = governor_store,
133};
134
4cccf755 135unsigned int dbs_update(struct cpufreq_policy *policy)
4471a34f 136{
ea59ee0d 137 struct dbs_governor *gov = dbs_governor_of(policy);
bc505475
RW
138 struct policy_dbs_info *policy_dbs = policy->governor_data;
139 struct dbs_data *dbs_data = policy_dbs->dbs_data;
4471a34f 140 struct od_dbs_tuners *od_tuners = dbs_data->tuners;
ff4b1789 141 unsigned int ignore_nice = dbs_data->ignore_nice_load;
4471a34f 142 unsigned int max_load = 0;
57dc3bcd 143 unsigned int sampling_rate, j;
4471a34f 144
57dc3bcd
RW
145 /*
146 * Sometimes governors may use an additional multiplier to increase
147 * sample delays temporarily. Apply that multiplier to sampling_rate
148 * so as to keep the wake-up-from-idle detection logic a bit
149 * conservative.
150 */
151 sampling_rate = dbs_data->sampling_rate * policy_dbs->rate_mult;
4471a34f 152
dfa5bb62 153 /* Get Absolute Load */
4471a34f 154 for_each_cpu(j, policy->cpus) {
875b8508 155 struct cpu_dbs_info *j_cdbs;
9366d840
SK
156 u64 cur_wall_time, cur_idle_time;
157 unsigned int idle_time, wall_time;
4471a34f 158 unsigned int load;
9366d840 159 int io_busy = 0;
4471a34f 160
ea59ee0d 161 j_cdbs = gov->get_cpu_cdbs(j);
4471a34f 162
9366d840
SK
163 /*
164 * For the purpose of ondemand, waiting for disk IO is
165 * an indication that you're performance critical, and
166 * not that the system is actually idle. So do not add
167 * the iowait time to the cpu idle time.
168 */
ea59ee0d 169 if (gov->governor == GOV_ONDEMAND)
9366d840
SK
170 io_busy = od_tuners->io_is_busy;
171 cur_idle_time = get_cpu_idle_time(j, &cur_wall_time, io_busy);
4471a34f 172
57eb832f 173 wall_time = cur_wall_time - j_cdbs->prev_cpu_wall;
4471a34f
VK
174 j_cdbs->prev_cpu_wall = cur_wall_time;
175
57eb832f
RW
176 if (cur_idle_time <= j_cdbs->prev_cpu_idle) {
177 idle_time = 0;
178 } else {
179 idle_time = cur_idle_time - j_cdbs->prev_cpu_idle;
180 j_cdbs->prev_cpu_idle = cur_idle_time;
181 }
4471a34f
VK
182
183 if (ignore_nice) {
679b8fe4
RW
184 u64 cur_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE];
185
186 idle_time += cputime_to_usecs(cur_nice - j_cdbs->prev_cpu_nice);
187 j_cdbs->prev_cpu_nice = cur_nice;
4471a34f
VK
188 }
189
4471a34f
VK
190 if (unlikely(!wall_time || wall_time < idle_time))
191 continue;
192
18b46abd
SB
193 /*
194 * If the CPU had gone completely idle, and a task just woke up
195 * on this CPU now, it would be unfair to calculate 'load' the
196 * usual way for this elapsed time-window, because it will show
197 * near-zero load, irrespective of how CPU intensive that task
198 * actually is. This is undesirable for latency-sensitive bursty
199 * workloads.
200 *
201 * To avoid this, we reuse the 'load' from the previous
202 * time-window and give this task a chance to start with a
203 * reasonably high CPU frequency. (However, we shouldn't over-do
204 * this copy, lest we get stuck at a high load (high frequency)
205 * for too long, even when the current system load has actually
206 * dropped down. So we perform the copy only once, upon the
207 * first wake-up from idle.)
208 *
9be4fd2c
RW
209 * Detecting this situation is easy: the governor's utilization
210 * update handler would not have run during CPU-idle periods.
211 * Hence, an unusually large 'wall_time' (as compared to the
212 * sampling rate) indicates this scenario.
c8ae481b
VK
213 *
214 * prev_load can be zero in two cases and we must recalculate it
215 * for both cases:
216 * - during long idle intervals
217 * - explicitly set to zero
18b46abd 218 */
c8ae481b
VK
219 if (unlikely(wall_time > (2 * sampling_rate) &&
220 j_cdbs->prev_load)) {
18b46abd 221 load = j_cdbs->prev_load;
c8ae481b
VK
222
223 /*
224 * Perform a destructive copy, to ensure that we copy
225 * the previous load only once, upon the first wake-up
226 * from idle.
227 */
228 j_cdbs->prev_load = 0;
18b46abd
SB
229 } else {
230 load = 100 * (wall_time - idle_time) / wall_time;
231 j_cdbs->prev_load = load;
18b46abd 232 }
4471a34f 233
4471a34f
VK
234 if (load > max_load)
235 max_load = load;
236 }
4cccf755 237 return max_load;
4471a34f 238}
4cccf755 239EXPORT_SYMBOL_GPL(dbs_update);
4471a34f 240
e40e7b25 241void gov_set_update_util(struct policy_dbs_info *policy_dbs,
9be4fd2c 242 unsigned int delay_us)
4471a34f 243{
e40e7b25 244 struct cpufreq_policy *policy = policy_dbs->policy;
ea59ee0d 245 struct dbs_governor *gov = dbs_governor_of(policy);
70f43e5e 246 int cpu;
031299b3 247
e40e7b25
RW
248 gov_update_sample_delay(policy_dbs, delay_us);
249 policy_dbs->last_sample_time = 0;
9be4fd2c 250
70f43e5e 251 for_each_cpu(cpu, policy->cpus) {
ea59ee0d 252 struct cpu_dbs_info *cdbs = gov->get_cpu_cdbs(cpu);
9be4fd2c
RW
253
254 cpufreq_set_update_util_data(cpu, &cdbs->update_util);
031299b3
VK
255 }
256}
9be4fd2c 257EXPORT_SYMBOL_GPL(gov_set_update_util);
031299b3 258
9be4fd2c 259static inline void gov_clear_update_util(struct cpufreq_policy *policy)
031299b3 260{
031299b3 261 int i;
58ddcead 262
9be4fd2c
RW
263 for_each_cpu(i, policy->cpus)
264 cpufreq_set_update_util_data(i, NULL);
265
266 synchronize_rcu();
4471a34f
VK
267}
268
581c214b 269static void gov_cancel_work(struct cpufreq_policy *policy)
70f43e5e 270{
581c214b
VK
271 struct policy_dbs_info *policy_dbs = policy->governor_data;
272
e40e7b25
RW
273 gov_clear_update_util(policy_dbs->policy);
274 irq_work_sync(&policy_dbs->irq_work);
275 cancel_work_sync(&policy_dbs->work);
686cc637 276 atomic_set(&policy_dbs->work_count, 0);
e4db2813 277 policy_dbs->work_in_progress = false;
70f43e5e 278}
43e0ee36 279
70f43e5e 280static void dbs_work_handler(struct work_struct *work)
43e0ee36 281{
e40e7b25 282 struct policy_dbs_info *policy_dbs;
3a91b069 283 struct cpufreq_policy *policy;
ea59ee0d 284 struct dbs_governor *gov;
9be4fd2c 285 unsigned int delay;
43e0ee36 286
e40e7b25
RW
287 policy_dbs = container_of(work, struct policy_dbs_info, work);
288 policy = policy_dbs->policy;
ea59ee0d 289 gov = dbs_governor_of(policy);
3a91b069 290
70f43e5e 291 /*
9be4fd2c
RW
292 * Make sure cpufreq_governor_limits() isn't evaluating load or the
293 * ondemand governor isn't updating the sampling rate in parallel.
70f43e5e 294 */
e40e7b25 295 mutex_lock(&policy_dbs->timer_mutex);
ea59ee0d 296 delay = gov->gov_dbs_timer(policy);
e40e7b25
RW
297 policy_dbs->sample_delay_ns = jiffies_to_nsecs(delay);
298 mutex_unlock(&policy_dbs->timer_mutex);
70f43e5e 299
e4db2813
RW
300 /* Allow the utilization update handler to queue up more work. */
301 atomic_set(&policy_dbs->work_count, 0);
9be4fd2c 302 /*
e4db2813
RW
303 * If the update below is reordered with respect to the sample delay
304 * modification, the utilization update handler may end up using a stale
305 * sample delay value.
9be4fd2c 306 */
e4db2813
RW
307 smp_wmb();
308 policy_dbs->work_in_progress = false;
9be4fd2c
RW
309}
310
311static void dbs_irq_work(struct irq_work *irq_work)
312{
e40e7b25 313 struct policy_dbs_info *policy_dbs;
70f43e5e 314
e40e7b25
RW
315 policy_dbs = container_of(irq_work, struct policy_dbs_info, irq_work);
316 schedule_work(&policy_dbs->work);
70f43e5e
VK
317}
318
9be4fd2c
RW
319static void dbs_update_util_handler(struct update_util_data *data, u64 time,
320 unsigned long util, unsigned long max)
321{
322 struct cpu_dbs_info *cdbs = container_of(data, struct cpu_dbs_info, update_util);
e40e7b25 323 struct policy_dbs_info *policy_dbs = cdbs->policy_dbs;
e4db2813 324 u64 delta_ns;
70f43e5e
VK
325
326 /*
9be4fd2c
RW
327 * The work may not be allowed to be queued up right now.
328 * Possible reasons:
329 * - Work has already been queued up or is in progress.
9be4fd2c 330 * - It is too early (too little time from the previous sample).
70f43e5e 331 */
e4db2813
RW
332 if (policy_dbs->work_in_progress)
333 return;
334
335 /*
336 * If the reads below are reordered before the check above, the value
337 * of sample_delay_ns used in the computation may be stale.
338 */
339 smp_rmb();
340 delta_ns = time - policy_dbs->last_sample_time;
341 if ((s64)delta_ns < policy_dbs->sample_delay_ns)
342 return;
343
344 /*
345 * If the policy is not shared, the irq_work may be queued up right away
346 * at this point. Otherwise, we need to ensure that only one of the
347 * CPUs sharing the policy will do that.
348 */
349 if (policy_dbs->is_shared &&
350 !atomic_add_unless(&policy_dbs->work_count, 1, 1))
351 return;
352
353 policy_dbs->last_sample_time = time;
354 policy_dbs->work_in_progress = true;
355 irq_work_queue(&policy_dbs->irq_work);
43e0ee36 356}
4447266b 357
bc505475
RW
358static struct policy_dbs_info *alloc_policy_dbs_info(struct cpufreq_policy *policy,
359 struct dbs_governor *gov)
44152cb8 360{
e40e7b25 361 struct policy_dbs_info *policy_dbs;
44152cb8
VK
362 int j;
363
364 /* Allocate memory for the common information for policy->cpus */
e40e7b25
RW
365 policy_dbs = kzalloc(sizeof(*policy_dbs), GFP_KERNEL);
366 if (!policy_dbs)
bc505475 367 return NULL;
44152cb8 368
581c214b 369 policy_dbs->policy = policy;
e40e7b25 370 mutex_init(&policy_dbs->timer_mutex);
686cc637 371 atomic_set(&policy_dbs->work_count, 0);
e40e7b25
RW
372 init_irq_work(&policy_dbs->irq_work, dbs_irq_work);
373 INIT_WORK(&policy_dbs->work, dbs_work_handler);
cea6a9e7
RW
374
375 /* Set policy_dbs for all CPUs, online+offline */
376 for_each_cpu(j, policy->related_cpus) {
377 struct cpu_dbs_info *j_cdbs = gov->get_cpu_cdbs(j);
378
379 j_cdbs->policy_dbs = policy_dbs;
380 j_cdbs->update_util.func = dbs_update_util_handler;
381 }
bc505475 382 return policy_dbs;
44152cb8
VK
383}
384
e40e7b25 385static void free_policy_dbs_info(struct cpufreq_policy *policy,
7bdad34d 386 struct dbs_governor *gov)
44152cb8 387{
7bdad34d 388 struct cpu_dbs_info *cdbs = gov->get_cpu_cdbs(policy->cpu);
e40e7b25 389 struct policy_dbs_info *policy_dbs = cdbs->policy_dbs;
44152cb8
VK
390 int j;
391
e40e7b25 392 mutex_destroy(&policy_dbs->timer_mutex);
5e4500d8 393
cea6a9e7
RW
394 for_each_cpu(j, policy->related_cpus) {
395 struct cpu_dbs_info *j_cdbs = gov->get_cpu_cdbs(j);
44152cb8 396
cea6a9e7
RW
397 j_cdbs->policy_dbs = NULL;
398 j_cdbs->update_util.func = NULL;
399 }
e40e7b25 400 kfree(policy_dbs);
44152cb8
VK
401}
402
906a6e5a 403static int cpufreq_governor_init(struct cpufreq_policy *policy)
4471a34f 404{
ea59ee0d 405 struct dbs_governor *gov = dbs_governor_of(policy);
7bdad34d 406 struct dbs_data *dbs_data = gov->gdbs_data;
bc505475 407 struct policy_dbs_info *policy_dbs;
714a2d9c
VK
408 unsigned int latency;
409 int ret;
4471a34f 410
a72c4959
VK
411 /* State should be equivalent to EXIT */
412 if (policy->governor_data)
413 return -EBUSY;
414
bc505475
RW
415 policy_dbs = alloc_policy_dbs_info(policy, gov);
416 if (!policy_dbs)
417 return -ENOMEM;
44152cb8 418
bc505475
RW
419 if (dbs_data) {
420 if (WARN_ON(have_governor_per_policy())) {
421 ret = -EINVAL;
422 goto free_policy_dbs_info;
423 }
bc505475
RW
424 policy_dbs->dbs_data = dbs_data;
425 policy->governor_data = policy_dbs;
c54df071
VK
426
427 mutex_lock(&dbs_data->mutex);
428 dbs_data->usage_count++;
429 list_add(&policy_dbs->list, &dbs_data->policy_dbs_list);
430 mutex_unlock(&dbs_data->mutex);
431
714a2d9c
VK
432 return 0;
433 }
4d5dcc42 434
714a2d9c 435 dbs_data = kzalloc(sizeof(*dbs_data), GFP_KERNEL);
bc505475
RW
436 if (!dbs_data) {
437 ret = -ENOMEM;
438 goto free_policy_dbs_info;
439 }
44152cb8 440
c54df071 441 INIT_LIST_HEAD(&dbs_data->policy_dbs_list);
c4435630 442 mutex_init(&dbs_data->mutex);
4d5dcc42 443
7bdad34d 444 ret = gov->init(dbs_data, !policy->governor->initialized);
714a2d9c 445 if (ret)
e40e7b25 446 goto free_policy_dbs_info;
4d5dcc42 447
714a2d9c
VK
448 /* policy latency is in ns. Convert it to us first */
449 latency = policy->cpuinfo.transition_latency / 1000;
450 if (latency == 0)
451 latency = 1;
4d5dcc42 452
714a2d9c
VK
453 /* Bring kernel and HW constraints together */
454 dbs_data->min_sampling_rate = max(dbs_data->min_sampling_rate,
455 MIN_LATENCY_MULTIPLIER * latency);
ff4b1789
VK
456 dbs_data->sampling_rate = max(dbs_data->min_sampling_rate,
457 LATENCY_MULTIPLIER * latency);
2361be23 458
8eec1020 459 if (!have_governor_per_policy())
7bdad34d 460 gov->gdbs_data = dbs_data;
4d5dcc42 461
bc505475 462 policy->governor_data = policy_dbs;
e4b133cc 463
c54df071
VK
464 policy_dbs->dbs_data = dbs_data;
465 dbs_data->usage_count = 1;
466 list_add(&policy_dbs->list, &dbs_data->policy_dbs_list);
467
c4435630
VK
468 gov->kobj_type.sysfs_ops = &governor_sysfs_ops;
469 ret = kobject_init_and_add(&dbs_data->kobj, &gov->kobj_type,
470 get_governor_parent_kobj(policy),
471 "%s", gov->gov.name);
fafd5e8a
RW
472 if (!ret)
473 return 0;
4d5dcc42 474
fafd5e8a 475 /* Failure, so roll back. */
c4435630 476 pr_err("cpufreq: Governor initialization failed (dbs_data kobject init error %d)\n", ret);
4d5dcc42 477
e4b133cc
VK
478 policy->governor_data = NULL;
479
8eec1020 480 if (!have_governor_per_policy())
7bdad34d
RW
481 gov->gdbs_data = NULL;
482 gov->exit(dbs_data, !policy->governor->initialized);
bc505475
RW
483 kfree(dbs_data);
484
e40e7b25
RW
485free_policy_dbs_info:
486 free_policy_dbs_info(policy, gov);
714a2d9c
VK
487 return ret;
488}
4d5dcc42 489
5da3dd1e 490static int cpufreq_governor_exit(struct cpufreq_policy *policy)
714a2d9c 491{
ea59ee0d 492 struct dbs_governor *gov = dbs_governor_of(policy);
bc505475
RW
493 struct policy_dbs_info *policy_dbs = policy->governor_data;
494 struct dbs_data *dbs_data = policy_dbs->dbs_data;
c54df071 495 int count;
a72c4959 496
c54df071
VK
497 mutex_lock(&dbs_data->mutex);
498 list_del(&policy_dbs->list);
499 count = --dbs_data->usage_count;
500 mutex_unlock(&dbs_data->mutex);
501
502 if (!count) {
c4435630 503 kobject_put(&dbs_data->kobj);
2361be23 504
e4b133cc
VK
505 policy->governor_data = NULL;
506
8eec1020 507 if (!have_governor_per_policy())
7bdad34d 508 gov->gdbs_data = NULL;
4471a34f 509
7bdad34d 510 gov->exit(dbs_data, policy->governor->initialized == 1);
c4435630 511 mutex_destroy(&dbs_data->mutex);
714a2d9c 512 kfree(dbs_data);
e4b133cc
VK
513 } else {
514 policy->governor_data = NULL;
4d5dcc42 515 }
44152cb8 516
e40e7b25 517 free_policy_dbs_info(policy, gov);
a72c4959 518 return 0;
714a2d9c 519}
4d5dcc42 520
5da3dd1e 521static int cpufreq_governor_start(struct cpufreq_policy *policy)
714a2d9c 522{
ea59ee0d 523 struct dbs_governor *gov = dbs_governor_of(policy);
bc505475
RW
524 struct policy_dbs_info *policy_dbs = policy->governor_data;
525 struct dbs_data *dbs_data = policy_dbs->dbs_data;
714a2d9c 526 unsigned int sampling_rate, ignore_nice, j, cpu = policy->cpu;
714a2d9c
VK
527 int io_busy = 0;
528
529 if (!policy->cur)
530 return -EINVAL;
531
e4db2813 532 policy_dbs->is_shared = policy_is_shared(policy);
57dc3bcd 533 policy_dbs->rate_mult = 1;
e4db2813 534
ff4b1789
VK
535 sampling_rate = dbs_data->sampling_rate;
536 ignore_nice = dbs_data->ignore_nice_load;
4d5dcc42 537
ff4b1789 538 if (gov->governor == GOV_ONDEMAND) {
714a2d9c
VK
539 struct od_dbs_tuners *od_tuners = dbs_data->tuners;
540
9366d840 541 io_busy = od_tuners->io_is_busy;
4471a34f
VK
542 }
543
714a2d9c 544 for_each_cpu(j, policy->cpus) {
7bdad34d 545 struct cpu_dbs_info *j_cdbs = gov->get_cpu_cdbs(j);
714a2d9c 546 unsigned int prev_load;
4471a34f 547
57eb832f 548 j_cdbs->prev_cpu_idle = get_cpu_idle_time(j, &j_cdbs->prev_cpu_wall, io_busy);
4471a34f 549
57eb832f
RW
550 prev_load = j_cdbs->prev_cpu_wall - j_cdbs->prev_cpu_idle;
551 j_cdbs->prev_load = 100 * prev_load / (unsigned int)j_cdbs->prev_cpu_wall;
18b46abd 552
714a2d9c
VK
553 if (ignore_nice)
554 j_cdbs->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE];
714a2d9c 555 }
2abfa876 556
7bdad34d 557 if (gov->governor == GOV_CONSERVATIVE) {
714a2d9c 558 struct cs_cpu_dbs_info_s *cs_dbs_info =
7bdad34d 559 gov->get_cpu_dbs_info_s(cpu);
4471a34f 560
714a2d9c 561 cs_dbs_info->down_skip = 0;
714a2d9c
VK
562 cs_dbs_info->requested_freq = policy->cur;
563 } else {
7bdad34d
RW
564 struct od_ops *od_ops = gov->gov_ops;
565 struct od_cpu_dbs_info_s *od_dbs_info = gov->get_cpu_dbs_info_s(cpu);
4471a34f 566
714a2d9c
VK
567 od_dbs_info->sample_type = OD_NORMAL_SAMPLE;
568 od_ops->powersave_bias_init_cpu(cpu);
569 }
4471a34f 570
e40e7b25 571 gov_set_update_util(policy_dbs, sampling_rate);
714a2d9c
VK
572 return 0;
573}
574
5da3dd1e 575static int cpufreq_governor_stop(struct cpufreq_policy *policy)
714a2d9c 576{
581c214b 577 gov_cancel_work(policy);
3a91b069 578
a72c4959 579 return 0;
714a2d9c 580}
4471a34f 581
5da3dd1e 582static int cpufreq_governor_limits(struct cpufreq_policy *policy)
714a2d9c 583{
bc505475 584 struct policy_dbs_info *policy_dbs = policy->governor_data;
8eeed095 585
e9751894 586 mutex_lock(&policy_dbs->timer_mutex);
4cccf755 587
e9751894
RW
588 if (policy->max < policy->cur)
589 __cpufreq_driver_target(policy, policy->max, CPUFREQ_RELATION_H);
590 else if (policy->min > policy->cur)
591 __cpufreq_driver_target(policy, policy->min, CPUFREQ_RELATION_L);
4cccf755
RW
592
593 gov_update_sample_delay(policy_dbs, 0);
594
e9751894 595 mutex_unlock(&policy_dbs->timer_mutex);
a72c4959
VK
596
597 return 0;
714a2d9c 598}
4471a34f 599
906a6e5a 600int cpufreq_governor_dbs(struct cpufreq_policy *policy, unsigned int event)
714a2d9c 601{
5da3dd1e 602 int ret = -EINVAL;
714a2d9c 603
732b6d61 604 /* Lock governor to block concurrent initialization of governor */
2bb8d94f 605 mutex_lock(&dbs_data_mutex);
732b6d61 606
5da3dd1e 607 if (event == CPUFREQ_GOV_POLICY_INIT) {
906a6e5a 608 ret = cpufreq_governor_init(policy);
5da3dd1e
RW
609 } else if (policy->governor_data) {
610 switch (event) {
611 case CPUFREQ_GOV_POLICY_EXIT:
612 ret = cpufreq_governor_exit(policy);
613 break;
614 case CPUFREQ_GOV_START:
615 ret = cpufreq_governor_start(policy);
616 break;
617 case CPUFREQ_GOV_STOP:
618 ret = cpufreq_governor_stop(policy);
619 break;
620 case CPUFREQ_GOV_LIMITS:
621 ret = cpufreq_governor_limits(policy);
622 break;
623 }
4471a34f 624 }
714a2d9c 625
2bb8d94f 626 mutex_unlock(&dbs_data_mutex);
714a2d9c 627 return ret;
4471a34f
VK
628}
629EXPORT_SYMBOL_GPL(cpufreq_governor_dbs);