]>
Commit | Line | Data |
---|---|---|
2aacdfff VK |
1 | /* |
2 | * drivers/cpufreq/cpufreq_governor.c | |
3 | * | |
4 | * CPUFREQ governors common code | |
5 | * | |
4471a34f VK |
6 | * Copyright (C) 2001 Russell King |
7 | * (C) 2003 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>. | |
8 | * (C) 2003 Jun Nakajima <jun.nakajima@intel.com> | |
9 | * (C) 2009 Alexander Clouter <alex@digriz.org.uk> | |
10 | * (c) 2012 Viresh Kumar <viresh.kumar@linaro.org> | |
11 | * | |
2aacdfff VK |
12 | * This program is free software; you can redistribute it and/or modify |
13 | * it under the terms of the GNU General Public License version 2 as | |
14 | * published by the Free Software Foundation. | |
15 | */ | |
16 | ||
4471a34f VK |
17 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
18 | ||
2aacdfff VK |
19 | #include <linux/export.h> |
20 | #include <linux/kernel_stat.h> | |
4d5dcc42 | 21 | #include <linux/slab.h> |
4471a34f VK |
22 | |
23 | #include "cpufreq_governor.h" | |
24 | ||
2bb8d94f RW |
25 | DEFINE_MUTEX(dbs_data_mutex); |
26 | EXPORT_SYMBOL_GPL(dbs_data_mutex); | |
27 | ||
aded387b VK |
28 | /* Common sysfs tunables */ |
29 | /** | |
30 | * store_sampling_rate - update sampling rate effective immediately if needed. | |
31 | * | |
32 | * If new rate is smaller than the old, simply updating | |
33 | * dbs.sampling_rate might not be appropriate. For example, if the | |
34 | * original sampling_rate was 1 second and the requested new sampling rate is 10 | |
35 | * ms because the user needs immediate reaction from ondemand governor, but not | |
36 | * sure if higher frequency will be required or not, then, the governor may | |
37 | * change the sampling rate too late; up to 1 second later. Thus, if we are | |
38 | * reducing the sampling rate, we need to make the new value effective | |
39 | * immediately. | |
40 | * | |
aded387b VK |
41 | * This must be called with dbs_data->mutex held, otherwise traversing |
42 | * policy_dbs_list isn't safe. | |
43 | */ | |
44 | ssize_t store_sampling_rate(struct dbs_data *dbs_data, const char *buf, | |
45 | size_t count) | |
46 | { | |
47 | struct policy_dbs_info *policy_dbs; | |
48 | unsigned int rate; | |
49 | int ret; | |
50 | ret = sscanf(buf, "%u", &rate); | |
51 | if (ret != 1) | |
52 | return -EINVAL; | |
53 | ||
54 | dbs_data->sampling_rate = max(rate, dbs_data->min_sampling_rate); | |
55 | ||
56 | /* | |
57 | * We are operating under dbs_data->mutex and so the list and its | |
58 | * entries can't be freed concurrently. | |
59 | */ | |
60 | list_for_each_entry(policy_dbs, &dbs_data->policy_dbs_list, list) { | |
61 | mutex_lock(&policy_dbs->timer_mutex); | |
62 | /* | |
63 | * On 32-bit architectures this may race with the | |
64 | * sample_delay_ns read in dbs_update_util_handler(), but that | |
65 | * really doesn't matter. If the read returns a value that's | |
66 | * too big, the sample will be skipped, but the next invocation | |
67 | * of dbs_update_util_handler() (when the update has been | |
78347cdb | 68 | * completed) will take a sample. |
aded387b VK |
69 | * |
70 | * If this runs in parallel with dbs_work_handler(), we may end | |
71 | * up overwriting the sample_delay_ns value that it has just | |
78347cdb RW |
72 | * written, but it will be corrected next time a sample is |
73 | * taken, so it shouldn't be significant. | |
aded387b | 74 | */ |
78347cdb | 75 | gov_update_sample_delay(policy_dbs, 0); |
aded387b VK |
76 | mutex_unlock(&policy_dbs->timer_mutex); |
77 | } | |
78 | ||
79 | return count; | |
80 | } | |
81 | EXPORT_SYMBOL_GPL(store_sampling_rate); | |
82 | ||
c4435630 | 83 | static inline struct dbs_data *to_dbs_data(struct kobject *kobj) |
4d5dcc42 | 84 | { |
c4435630 | 85 | return container_of(kobj, struct dbs_data, kobj); |
4d5dcc42 VK |
86 | } |
87 | ||
c4435630 VK |
88 | static inline struct governor_attr *to_gov_attr(struct attribute *attr) |
89 | { | |
90 | return container_of(attr, struct governor_attr, attr); | |
91 | } | |
92 | ||
93 | static ssize_t governor_show(struct kobject *kobj, struct attribute *attr, | |
94 | char *buf) | |
95 | { | |
96 | struct dbs_data *dbs_data = to_dbs_data(kobj); | |
97 | struct governor_attr *gattr = to_gov_attr(attr); | |
98 | int ret = -EIO; | |
99 | ||
100 | if (gattr->show) | |
101 | ret = gattr->show(dbs_data, buf); | |
102 | ||
103 | return ret; | |
104 | } | |
105 | ||
106 | static ssize_t governor_store(struct kobject *kobj, struct attribute *attr, | |
107 | const char *buf, size_t count) | |
108 | { | |
109 | struct dbs_data *dbs_data = to_dbs_data(kobj); | |
110 | struct governor_attr *gattr = to_gov_attr(attr); | |
111 | int ret = -EIO; | |
112 | ||
113 | mutex_lock(&dbs_data->mutex); | |
114 | ||
115 | if (gattr->store) | |
116 | ret = gattr->store(dbs_data, buf, count); | |
117 | ||
118 | mutex_unlock(&dbs_data->mutex); | |
119 | ||
120 | return ret; | |
121 | } | |
122 | ||
123 | /* | |
124 | * Sysfs Ops for accessing governor attributes. | |
125 | * | |
126 | * All show/store invocations for governor specific sysfs attributes, will first | |
127 | * call the below show/store callbacks and the attribute specific callback will | |
128 | * be called from within it. | |
129 | */ | |
130 | static const struct sysfs_ops governor_sysfs_ops = { | |
131 | .show = governor_show, | |
132 | .store = governor_store, | |
133 | }; | |
134 | ||
4cccf755 | 135 | unsigned int dbs_update(struct cpufreq_policy *policy) |
4471a34f | 136 | { |
ea59ee0d | 137 | struct dbs_governor *gov = dbs_governor_of(policy); |
bc505475 RW |
138 | struct policy_dbs_info *policy_dbs = policy->governor_data; |
139 | struct dbs_data *dbs_data = policy_dbs->dbs_data; | |
4471a34f | 140 | struct od_dbs_tuners *od_tuners = dbs_data->tuners; |
ff4b1789 VK |
141 | unsigned int sampling_rate = dbs_data->sampling_rate; |
142 | unsigned int ignore_nice = dbs_data->ignore_nice_load; | |
4471a34f | 143 | unsigned int max_load = 0; |
4471a34f VK |
144 | unsigned int j; |
145 | ||
ea59ee0d | 146 | if (gov->governor == GOV_ONDEMAND) { |
18b46abd | 147 | struct od_cpu_dbs_info_s *od_dbs_info = |
4cccf755 | 148 | gov->get_cpu_dbs_info_s(policy->cpu); |
18b46abd SB |
149 | |
150 | /* | |
151 | * Sometimes, the ondemand governor uses an additional | |
152 | * multiplier to give long delays. So apply this multiplier to | |
153 | * the 'sampling_rate', so as to keep the wake-up-from-idle | |
154 | * detection logic a bit conservative. | |
155 | */ | |
18b46abd SB |
156 | sampling_rate *= od_dbs_info->rate_mult; |
157 | ||
18b46abd | 158 | } |
4471a34f | 159 | |
dfa5bb62 | 160 | /* Get Absolute Load */ |
4471a34f | 161 | for_each_cpu(j, policy->cpus) { |
875b8508 | 162 | struct cpu_dbs_info *j_cdbs; |
9366d840 SK |
163 | u64 cur_wall_time, cur_idle_time; |
164 | unsigned int idle_time, wall_time; | |
4471a34f | 165 | unsigned int load; |
9366d840 | 166 | int io_busy = 0; |
4471a34f | 167 | |
ea59ee0d | 168 | j_cdbs = gov->get_cpu_cdbs(j); |
4471a34f | 169 | |
9366d840 SK |
170 | /* |
171 | * For the purpose of ondemand, waiting for disk IO is | |
172 | * an indication that you're performance critical, and | |
173 | * not that the system is actually idle. So do not add | |
174 | * the iowait time to the cpu idle time. | |
175 | */ | |
ea59ee0d | 176 | if (gov->governor == GOV_ONDEMAND) |
9366d840 SK |
177 | io_busy = od_tuners->io_is_busy; |
178 | cur_idle_time = get_cpu_idle_time(j, &cur_wall_time, io_busy); | |
4471a34f | 179 | |
57eb832f | 180 | wall_time = cur_wall_time - j_cdbs->prev_cpu_wall; |
4471a34f VK |
181 | j_cdbs->prev_cpu_wall = cur_wall_time; |
182 | ||
57eb832f RW |
183 | if (cur_idle_time <= j_cdbs->prev_cpu_idle) { |
184 | idle_time = 0; | |
185 | } else { | |
186 | idle_time = cur_idle_time - j_cdbs->prev_cpu_idle; | |
187 | j_cdbs->prev_cpu_idle = cur_idle_time; | |
188 | } | |
4471a34f VK |
189 | |
190 | if (ignore_nice) { | |
679b8fe4 RW |
191 | u64 cur_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE]; |
192 | ||
193 | idle_time += cputime_to_usecs(cur_nice - j_cdbs->prev_cpu_nice); | |
194 | j_cdbs->prev_cpu_nice = cur_nice; | |
4471a34f VK |
195 | } |
196 | ||
4471a34f VK |
197 | if (unlikely(!wall_time || wall_time < idle_time)) |
198 | continue; | |
199 | ||
18b46abd SB |
200 | /* |
201 | * If the CPU had gone completely idle, and a task just woke up | |
202 | * on this CPU now, it would be unfair to calculate 'load' the | |
203 | * usual way for this elapsed time-window, because it will show | |
204 | * near-zero load, irrespective of how CPU intensive that task | |
205 | * actually is. This is undesirable for latency-sensitive bursty | |
206 | * workloads. | |
207 | * | |
208 | * To avoid this, we reuse the 'load' from the previous | |
209 | * time-window and give this task a chance to start with a | |
210 | * reasonably high CPU frequency. (However, we shouldn't over-do | |
211 | * this copy, lest we get stuck at a high load (high frequency) | |
212 | * for too long, even when the current system load has actually | |
213 | * dropped down. So we perform the copy only once, upon the | |
214 | * first wake-up from idle.) | |
215 | * | |
9be4fd2c RW |
216 | * Detecting this situation is easy: the governor's utilization |
217 | * update handler would not have run during CPU-idle periods. | |
218 | * Hence, an unusually large 'wall_time' (as compared to the | |
219 | * sampling rate) indicates this scenario. | |
c8ae481b VK |
220 | * |
221 | * prev_load can be zero in two cases and we must recalculate it | |
222 | * for both cases: | |
223 | * - during long idle intervals | |
224 | * - explicitly set to zero | |
18b46abd | 225 | */ |
c8ae481b VK |
226 | if (unlikely(wall_time > (2 * sampling_rate) && |
227 | j_cdbs->prev_load)) { | |
18b46abd | 228 | load = j_cdbs->prev_load; |
c8ae481b VK |
229 | |
230 | /* | |
231 | * Perform a destructive copy, to ensure that we copy | |
232 | * the previous load only once, upon the first wake-up | |
233 | * from idle. | |
234 | */ | |
235 | j_cdbs->prev_load = 0; | |
18b46abd SB |
236 | } else { |
237 | load = 100 * (wall_time - idle_time) / wall_time; | |
238 | j_cdbs->prev_load = load; | |
18b46abd | 239 | } |
4471a34f | 240 | |
4471a34f VK |
241 | if (load > max_load) |
242 | max_load = load; | |
243 | } | |
4cccf755 | 244 | return max_load; |
4471a34f | 245 | } |
4cccf755 | 246 | EXPORT_SYMBOL_GPL(dbs_update); |
4471a34f | 247 | |
e40e7b25 | 248 | void gov_set_update_util(struct policy_dbs_info *policy_dbs, |
9be4fd2c | 249 | unsigned int delay_us) |
4471a34f | 250 | { |
e40e7b25 | 251 | struct cpufreq_policy *policy = policy_dbs->policy; |
ea59ee0d | 252 | struct dbs_governor *gov = dbs_governor_of(policy); |
70f43e5e | 253 | int cpu; |
031299b3 | 254 | |
e40e7b25 RW |
255 | gov_update_sample_delay(policy_dbs, delay_us); |
256 | policy_dbs->last_sample_time = 0; | |
9be4fd2c | 257 | |
70f43e5e | 258 | for_each_cpu(cpu, policy->cpus) { |
ea59ee0d | 259 | struct cpu_dbs_info *cdbs = gov->get_cpu_cdbs(cpu); |
9be4fd2c RW |
260 | |
261 | cpufreq_set_update_util_data(cpu, &cdbs->update_util); | |
031299b3 VK |
262 | } |
263 | } | |
9be4fd2c | 264 | EXPORT_SYMBOL_GPL(gov_set_update_util); |
031299b3 | 265 | |
9be4fd2c | 266 | static inline void gov_clear_update_util(struct cpufreq_policy *policy) |
031299b3 | 267 | { |
031299b3 | 268 | int i; |
58ddcead | 269 | |
9be4fd2c RW |
270 | for_each_cpu(i, policy->cpus) |
271 | cpufreq_set_update_util_data(i, NULL); | |
272 | ||
273 | synchronize_rcu(); | |
4471a34f VK |
274 | } |
275 | ||
581c214b | 276 | static void gov_cancel_work(struct cpufreq_policy *policy) |
70f43e5e | 277 | { |
581c214b VK |
278 | struct policy_dbs_info *policy_dbs = policy->governor_data; |
279 | ||
e40e7b25 RW |
280 | gov_clear_update_util(policy_dbs->policy); |
281 | irq_work_sync(&policy_dbs->irq_work); | |
282 | cancel_work_sync(&policy_dbs->work); | |
686cc637 | 283 | atomic_set(&policy_dbs->work_count, 0); |
e4db2813 | 284 | policy_dbs->work_in_progress = false; |
70f43e5e | 285 | } |
43e0ee36 | 286 | |
70f43e5e | 287 | static void dbs_work_handler(struct work_struct *work) |
43e0ee36 | 288 | { |
e40e7b25 | 289 | struct policy_dbs_info *policy_dbs; |
3a91b069 | 290 | struct cpufreq_policy *policy; |
ea59ee0d | 291 | struct dbs_governor *gov; |
9be4fd2c | 292 | unsigned int delay; |
43e0ee36 | 293 | |
e40e7b25 RW |
294 | policy_dbs = container_of(work, struct policy_dbs_info, work); |
295 | policy = policy_dbs->policy; | |
ea59ee0d | 296 | gov = dbs_governor_of(policy); |
3a91b069 | 297 | |
70f43e5e | 298 | /* |
9be4fd2c RW |
299 | * Make sure cpufreq_governor_limits() isn't evaluating load or the |
300 | * ondemand governor isn't updating the sampling rate in parallel. | |
70f43e5e | 301 | */ |
e40e7b25 | 302 | mutex_lock(&policy_dbs->timer_mutex); |
ea59ee0d | 303 | delay = gov->gov_dbs_timer(policy); |
e40e7b25 RW |
304 | policy_dbs->sample_delay_ns = jiffies_to_nsecs(delay); |
305 | mutex_unlock(&policy_dbs->timer_mutex); | |
70f43e5e | 306 | |
e4db2813 RW |
307 | /* Allow the utilization update handler to queue up more work. */ |
308 | atomic_set(&policy_dbs->work_count, 0); | |
9be4fd2c | 309 | /* |
e4db2813 RW |
310 | * If the update below is reordered with respect to the sample delay |
311 | * modification, the utilization update handler may end up using a stale | |
312 | * sample delay value. | |
9be4fd2c | 313 | */ |
e4db2813 RW |
314 | smp_wmb(); |
315 | policy_dbs->work_in_progress = false; | |
9be4fd2c RW |
316 | } |
317 | ||
318 | static void dbs_irq_work(struct irq_work *irq_work) | |
319 | { | |
e40e7b25 | 320 | struct policy_dbs_info *policy_dbs; |
70f43e5e | 321 | |
e40e7b25 RW |
322 | policy_dbs = container_of(irq_work, struct policy_dbs_info, irq_work); |
323 | schedule_work(&policy_dbs->work); | |
70f43e5e VK |
324 | } |
325 | ||
9be4fd2c RW |
326 | static void dbs_update_util_handler(struct update_util_data *data, u64 time, |
327 | unsigned long util, unsigned long max) | |
328 | { | |
329 | struct cpu_dbs_info *cdbs = container_of(data, struct cpu_dbs_info, update_util); | |
e40e7b25 | 330 | struct policy_dbs_info *policy_dbs = cdbs->policy_dbs; |
e4db2813 | 331 | u64 delta_ns; |
70f43e5e VK |
332 | |
333 | /* | |
9be4fd2c RW |
334 | * The work may not be allowed to be queued up right now. |
335 | * Possible reasons: | |
336 | * - Work has already been queued up or is in progress. | |
9be4fd2c | 337 | * - It is too early (too little time from the previous sample). |
70f43e5e | 338 | */ |
e4db2813 RW |
339 | if (policy_dbs->work_in_progress) |
340 | return; | |
341 | ||
342 | /* | |
343 | * If the reads below are reordered before the check above, the value | |
344 | * of sample_delay_ns used in the computation may be stale. | |
345 | */ | |
346 | smp_rmb(); | |
347 | delta_ns = time - policy_dbs->last_sample_time; | |
348 | if ((s64)delta_ns < policy_dbs->sample_delay_ns) | |
349 | return; | |
350 | ||
351 | /* | |
352 | * If the policy is not shared, the irq_work may be queued up right away | |
353 | * at this point. Otherwise, we need to ensure that only one of the | |
354 | * CPUs sharing the policy will do that. | |
355 | */ | |
356 | if (policy_dbs->is_shared && | |
357 | !atomic_add_unless(&policy_dbs->work_count, 1, 1)) | |
358 | return; | |
359 | ||
360 | policy_dbs->last_sample_time = time; | |
361 | policy_dbs->work_in_progress = true; | |
362 | irq_work_queue(&policy_dbs->irq_work); | |
43e0ee36 | 363 | } |
4447266b | 364 | |
bc505475 RW |
365 | static struct policy_dbs_info *alloc_policy_dbs_info(struct cpufreq_policy *policy, |
366 | struct dbs_governor *gov) | |
44152cb8 | 367 | { |
e40e7b25 | 368 | struct policy_dbs_info *policy_dbs; |
44152cb8 VK |
369 | int j; |
370 | ||
371 | /* Allocate memory for the common information for policy->cpus */ | |
e40e7b25 RW |
372 | policy_dbs = kzalloc(sizeof(*policy_dbs), GFP_KERNEL); |
373 | if (!policy_dbs) | |
bc505475 | 374 | return NULL; |
44152cb8 | 375 | |
581c214b | 376 | policy_dbs->policy = policy; |
e40e7b25 | 377 | mutex_init(&policy_dbs->timer_mutex); |
686cc637 | 378 | atomic_set(&policy_dbs->work_count, 0); |
e40e7b25 RW |
379 | init_irq_work(&policy_dbs->irq_work, dbs_irq_work); |
380 | INIT_WORK(&policy_dbs->work, dbs_work_handler); | |
cea6a9e7 RW |
381 | |
382 | /* Set policy_dbs for all CPUs, online+offline */ | |
383 | for_each_cpu(j, policy->related_cpus) { | |
384 | struct cpu_dbs_info *j_cdbs = gov->get_cpu_cdbs(j); | |
385 | ||
386 | j_cdbs->policy_dbs = policy_dbs; | |
387 | j_cdbs->update_util.func = dbs_update_util_handler; | |
388 | } | |
bc505475 | 389 | return policy_dbs; |
44152cb8 VK |
390 | } |
391 | ||
e40e7b25 | 392 | static void free_policy_dbs_info(struct cpufreq_policy *policy, |
7bdad34d | 393 | struct dbs_governor *gov) |
44152cb8 | 394 | { |
7bdad34d | 395 | struct cpu_dbs_info *cdbs = gov->get_cpu_cdbs(policy->cpu); |
e40e7b25 | 396 | struct policy_dbs_info *policy_dbs = cdbs->policy_dbs; |
44152cb8 VK |
397 | int j; |
398 | ||
e40e7b25 | 399 | mutex_destroy(&policy_dbs->timer_mutex); |
5e4500d8 | 400 | |
cea6a9e7 RW |
401 | for_each_cpu(j, policy->related_cpus) { |
402 | struct cpu_dbs_info *j_cdbs = gov->get_cpu_cdbs(j); | |
44152cb8 | 403 | |
cea6a9e7 RW |
404 | j_cdbs->policy_dbs = NULL; |
405 | j_cdbs->update_util.func = NULL; | |
406 | } | |
e40e7b25 | 407 | kfree(policy_dbs); |
44152cb8 VK |
408 | } |
409 | ||
906a6e5a | 410 | static int cpufreq_governor_init(struct cpufreq_policy *policy) |
4471a34f | 411 | { |
ea59ee0d | 412 | struct dbs_governor *gov = dbs_governor_of(policy); |
7bdad34d | 413 | struct dbs_data *dbs_data = gov->gdbs_data; |
bc505475 | 414 | struct policy_dbs_info *policy_dbs; |
714a2d9c VK |
415 | unsigned int latency; |
416 | int ret; | |
4471a34f | 417 | |
a72c4959 VK |
418 | /* State should be equivalent to EXIT */ |
419 | if (policy->governor_data) | |
420 | return -EBUSY; | |
421 | ||
bc505475 RW |
422 | policy_dbs = alloc_policy_dbs_info(policy, gov); |
423 | if (!policy_dbs) | |
424 | return -ENOMEM; | |
44152cb8 | 425 | |
bc505475 RW |
426 | if (dbs_data) { |
427 | if (WARN_ON(have_governor_per_policy())) { | |
428 | ret = -EINVAL; | |
429 | goto free_policy_dbs_info; | |
430 | } | |
bc505475 RW |
431 | policy_dbs->dbs_data = dbs_data; |
432 | policy->governor_data = policy_dbs; | |
c54df071 VK |
433 | |
434 | mutex_lock(&dbs_data->mutex); | |
435 | dbs_data->usage_count++; | |
436 | list_add(&policy_dbs->list, &dbs_data->policy_dbs_list); | |
437 | mutex_unlock(&dbs_data->mutex); | |
438 | ||
714a2d9c VK |
439 | return 0; |
440 | } | |
4d5dcc42 | 441 | |
714a2d9c | 442 | dbs_data = kzalloc(sizeof(*dbs_data), GFP_KERNEL); |
bc505475 RW |
443 | if (!dbs_data) { |
444 | ret = -ENOMEM; | |
445 | goto free_policy_dbs_info; | |
446 | } | |
44152cb8 | 447 | |
c54df071 | 448 | INIT_LIST_HEAD(&dbs_data->policy_dbs_list); |
c4435630 | 449 | mutex_init(&dbs_data->mutex); |
4d5dcc42 | 450 | |
7bdad34d | 451 | ret = gov->init(dbs_data, !policy->governor->initialized); |
714a2d9c | 452 | if (ret) |
e40e7b25 | 453 | goto free_policy_dbs_info; |
4d5dcc42 | 454 | |
714a2d9c VK |
455 | /* policy latency is in ns. Convert it to us first */ |
456 | latency = policy->cpuinfo.transition_latency / 1000; | |
457 | if (latency == 0) | |
458 | latency = 1; | |
4d5dcc42 | 459 | |
714a2d9c VK |
460 | /* Bring kernel and HW constraints together */ |
461 | dbs_data->min_sampling_rate = max(dbs_data->min_sampling_rate, | |
462 | MIN_LATENCY_MULTIPLIER * latency); | |
ff4b1789 VK |
463 | dbs_data->sampling_rate = max(dbs_data->min_sampling_rate, |
464 | LATENCY_MULTIPLIER * latency); | |
2361be23 | 465 | |
8eec1020 | 466 | if (!have_governor_per_policy()) |
7bdad34d | 467 | gov->gdbs_data = dbs_data; |
4d5dcc42 | 468 | |
bc505475 | 469 | policy->governor_data = policy_dbs; |
e4b133cc | 470 | |
c54df071 VK |
471 | policy_dbs->dbs_data = dbs_data; |
472 | dbs_data->usage_count = 1; | |
473 | list_add(&policy_dbs->list, &dbs_data->policy_dbs_list); | |
474 | ||
c4435630 VK |
475 | gov->kobj_type.sysfs_ops = &governor_sysfs_ops; |
476 | ret = kobject_init_and_add(&dbs_data->kobj, &gov->kobj_type, | |
477 | get_governor_parent_kobj(policy), | |
478 | "%s", gov->gov.name); | |
fafd5e8a RW |
479 | if (!ret) |
480 | return 0; | |
4d5dcc42 | 481 | |
fafd5e8a | 482 | /* Failure, so roll back. */ |
c4435630 | 483 | pr_err("cpufreq: Governor initialization failed (dbs_data kobject init error %d)\n", ret); |
4d5dcc42 | 484 | |
e4b133cc VK |
485 | policy->governor_data = NULL; |
486 | ||
8eec1020 | 487 | if (!have_governor_per_policy()) |
7bdad34d RW |
488 | gov->gdbs_data = NULL; |
489 | gov->exit(dbs_data, !policy->governor->initialized); | |
bc505475 RW |
490 | kfree(dbs_data); |
491 | ||
e40e7b25 RW |
492 | free_policy_dbs_info: |
493 | free_policy_dbs_info(policy, gov); | |
714a2d9c VK |
494 | return ret; |
495 | } | |
4d5dcc42 | 496 | |
5da3dd1e | 497 | static int cpufreq_governor_exit(struct cpufreq_policy *policy) |
714a2d9c | 498 | { |
ea59ee0d | 499 | struct dbs_governor *gov = dbs_governor_of(policy); |
bc505475 RW |
500 | struct policy_dbs_info *policy_dbs = policy->governor_data; |
501 | struct dbs_data *dbs_data = policy_dbs->dbs_data; | |
c54df071 | 502 | int count; |
a72c4959 | 503 | |
c54df071 VK |
504 | mutex_lock(&dbs_data->mutex); |
505 | list_del(&policy_dbs->list); | |
506 | count = --dbs_data->usage_count; | |
507 | mutex_unlock(&dbs_data->mutex); | |
508 | ||
509 | if (!count) { | |
c4435630 | 510 | kobject_put(&dbs_data->kobj); |
2361be23 | 511 | |
e4b133cc VK |
512 | policy->governor_data = NULL; |
513 | ||
8eec1020 | 514 | if (!have_governor_per_policy()) |
7bdad34d | 515 | gov->gdbs_data = NULL; |
4471a34f | 516 | |
7bdad34d | 517 | gov->exit(dbs_data, policy->governor->initialized == 1); |
c4435630 | 518 | mutex_destroy(&dbs_data->mutex); |
714a2d9c | 519 | kfree(dbs_data); |
e4b133cc VK |
520 | } else { |
521 | policy->governor_data = NULL; | |
4d5dcc42 | 522 | } |
44152cb8 | 523 | |
e40e7b25 | 524 | free_policy_dbs_info(policy, gov); |
a72c4959 | 525 | return 0; |
714a2d9c | 526 | } |
4d5dcc42 | 527 | |
5da3dd1e | 528 | static int cpufreq_governor_start(struct cpufreq_policy *policy) |
714a2d9c | 529 | { |
ea59ee0d | 530 | struct dbs_governor *gov = dbs_governor_of(policy); |
bc505475 RW |
531 | struct policy_dbs_info *policy_dbs = policy->governor_data; |
532 | struct dbs_data *dbs_data = policy_dbs->dbs_data; | |
714a2d9c | 533 | unsigned int sampling_rate, ignore_nice, j, cpu = policy->cpu; |
714a2d9c VK |
534 | int io_busy = 0; |
535 | ||
536 | if (!policy->cur) | |
537 | return -EINVAL; | |
538 | ||
e4db2813 RW |
539 | policy_dbs->is_shared = policy_is_shared(policy); |
540 | ||
ff4b1789 VK |
541 | sampling_rate = dbs_data->sampling_rate; |
542 | ignore_nice = dbs_data->ignore_nice_load; | |
4d5dcc42 | 543 | |
ff4b1789 | 544 | if (gov->governor == GOV_ONDEMAND) { |
714a2d9c VK |
545 | struct od_dbs_tuners *od_tuners = dbs_data->tuners; |
546 | ||
9366d840 | 547 | io_busy = od_tuners->io_is_busy; |
4471a34f VK |
548 | } |
549 | ||
714a2d9c | 550 | for_each_cpu(j, policy->cpus) { |
7bdad34d | 551 | struct cpu_dbs_info *j_cdbs = gov->get_cpu_cdbs(j); |
714a2d9c | 552 | unsigned int prev_load; |
4471a34f | 553 | |
57eb832f | 554 | j_cdbs->prev_cpu_idle = get_cpu_idle_time(j, &j_cdbs->prev_cpu_wall, io_busy); |
4471a34f | 555 | |
57eb832f RW |
556 | prev_load = j_cdbs->prev_cpu_wall - j_cdbs->prev_cpu_idle; |
557 | j_cdbs->prev_load = 100 * prev_load / (unsigned int)j_cdbs->prev_cpu_wall; | |
18b46abd | 558 | |
714a2d9c VK |
559 | if (ignore_nice) |
560 | j_cdbs->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE]; | |
714a2d9c | 561 | } |
2abfa876 | 562 | |
7bdad34d | 563 | if (gov->governor == GOV_CONSERVATIVE) { |
714a2d9c | 564 | struct cs_cpu_dbs_info_s *cs_dbs_info = |
7bdad34d | 565 | gov->get_cpu_dbs_info_s(cpu); |
4471a34f | 566 | |
714a2d9c | 567 | cs_dbs_info->down_skip = 0; |
714a2d9c VK |
568 | cs_dbs_info->requested_freq = policy->cur; |
569 | } else { | |
7bdad34d RW |
570 | struct od_ops *od_ops = gov->gov_ops; |
571 | struct od_cpu_dbs_info_s *od_dbs_info = gov->get_cpu_dbs_info_s(cpu); | |
4471a34f | 572 | |
714a2d9c VK |
573 | od_dbs_info->rate_mult = 1; |
574 | od_dbs_info->sample_type = OD_NORMAL_SAMPLE; | |
575 | od_ops->powersave_bias_init_cpu(cpu); | |
576 | } | |
4471a34f | 577 | |
e40e7b25 | 578 | gov_set_update_util(policy_dbs, sampling_rate); |
714a2d9c VK |
579 | return 0; |
580 | } | |
581 | ||
5da3dd1e | 582 | static int cpufreq_governor_stop(struct cpufreq_policy *policy) |
714a2d9c | 583 | { |
581c214b | 584 | gov_cancel_work(policy); |
3a91b069 | 585 | |
a72c4959 | 586 | return 0; |
714a2d9c | 587 | } |
4471a34f | 588 | |
5da3dd1e | 589 | static int cpufreq_governor_limits(struct cpufreq_policy *policy) |
714a2d9c | 590 | { |
bc505475 | 591 | struct policy_dbs_info *policy_dbs = policy->governor_data; |
8eeed095 | 592 | |
e9751894 | 593 | mutex_lock(&policy_dbs->timer_mutex); |
4cccf755 | 594 | |
e9751894 RW |
595 | if (policy->max < policy->cur) |
596 | __cpufreq_driver_target(policy, policy->max, CPUFREQ_RELATION_H); | |
597 | else if (policy->min > policy->cur) | |
598 | __cpufreq_driver_target(policy, policy->min, CPUFREQ_RELATION_L); | |
4cccf755 RW |
599 | |
600 | gov_update_sample_delay(policy_dbs, 0); | |
601 | ||
e9751894 | 602 | mutex_unlock(&policy_dbs->timer_mutex); |
a72c4959 VK |
603 | |
604 | return 0; | |
714a2d9c | 605 | } |
4471a34f | 606 | |
906a6e5a | 607 | int cpufreq_governor_dbs(struct cpufreq_policy *policy, unsigned int event) |
714a2d9c | 608 | { |
5da3dd1e | 609 | int ret = -EINVAL; |
714a2d9c | 610 | |
732b6d61 | 611 | /* Lock governor to block concurrent initialization of governor */ |
2bb8d94f | 612 | mutex_lock(&dbs_data_mutex); |
732b6d61 | 613 | |
5da3dd1e | 614 | if (event == CPUFREQ_GOV_POLICY_INIT) { |
906a6e5a | 615 | ret = cpufreq_governor_init(policy); |
5da3dd1e RW |
616 | } else if (policy->governor_data) { |
617 | switch (event) { | |
618 | case CPUFREQ_GOV_POLICY_EXIT: | |
619 | ret = cpufreq_governor_exit(policy); | |
620 | break; | |
621 | case CPUFREQ_GOV_START: | |
622 | ret = cpufreq_governor_start(policy); | |
623 | break; | |
624 | case CPUFREQ_GOV_STOP: | |
625 | ret = cpufreq_governor_stop(policy); | |
626 | break; | |
627 | case CPUFREQ_GOV_LIMITS: | |
628 | ret = cpufreq_governor_limits(policy); | |
629 | break; | |
630 | } | |
4471a34f | 631 | } |
714a2d9c | 632 | |
2bb8d94f | 633 | mutex_unlock(&dbs_data_mutex); |
714a2d9c | 634 | return ret; |
4471a34f VK |
635 | } |
636 | EXPORT_SYMBOL_GPL(cpufreq_governor_dbs); |