]>
Commit | Line | Data |
---|---|---|
2aacdfff | 1 | /* |
2 | * drivers/cpufreq/cpufreq_governor.c | |
3 | * | |
4 | * CPUFREQ governors common code | |
5 | * | |
4471a34f VK |
6 | * Copyright (C) 2001 Russell King |
7 | * (C) 2003 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>. | |
8 | * (C) 2003 Jun Nakajima <jun.nakajima@intel.com> | |
9 | * (C) 2009 Alexander Clouter <alex@digriz.org.uk> | |
10 | * (c) 2012 Viresh Kumar <viresh.kumar@linaro.org> | |
11 | * | |
2aacdfff | 12 | * This program is free software; you can redistribute it and/or modify |
13 | * it under the terms of the GNU General Public License version 2 as | |
14 | * published by the Free Software Foundation. | |
15 | */ | |
16 | ||
4471a34f VK |
17 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
18 | ||
2aacdfff | 19 | #include <linux/export.h> |
20 | #include <linux/kernel_stat.h> | |
adaf9fcd | 21 | #include <linux/sched.h> |
4d5dcc42 | 22 | #include <linux/slab.h> |
4471a34f VK |
23 | |
24 | #include "cpufreq_governor.h" | |
25 | ||
8c8f77fd RW |
26 | static DEFINE_PER_CPU(struct cpu_dbs_info, cpu_dbs); |
27 | ||
1112e9d8 | 28 | static DEFINE_MUTEX(gov_dbs_data_mutex); |
2bb8d94f | 29 | |
aded387b VK |
30 | /* Common sysfs tunables */ |
31 | /** | |
32 | * store_sampling_rate - update sampling rate effective immediately if needed. | |
33 | * | |
34 | * If new rate is smaller than the old, simply updating | |
35 | * dbs.sampling_rate might not be appropriate. For example, if the | |
36 | * original sampling_rate was 1 second and the requested new sampling rate is 10 | |
37 | * ms because the user needs immediate reaction from ondemand governor, but not | |
38 | * sure if higher frequency will be required or not, then, the governor may | |
39 | * change the sampling rate too late; up to 1 second later. Thus, if we are | |
40 | * reducing the sampling rate, we need to make the new value effective | |
41 | * immediately. | |
42 | * | |
aded387b VK |
43 | * This must be called with dbs_data->mutex held, otherwise traversing |
44 | * policy_dbs_list isn't safe. | |
45 | */ | |
46 | ssize_t store_sampling_rate(struct dbs_data *dbs_data, const char *buf, | |
47 | size_t count) | |
48 | { | |
49 | struct policy_dbs_info *policy_dbs; | |
50 | unsigned int rate; | |
51 | int ret; | |
52 | ret = sscanf(buf, "%u", &rate); | |
53 | if (ret != 1) | |
54 | return -EINVAL; | |
55 | ||
56 | dbs_data->sampling_rate = max(rate, dbs_data->min_sampling_rate); | |
57 | ||
58 | /* | |
59 | * We are operating under dbs_data->mutex and so the list and its | |
60 | * entries can't be freed concurrently. | |
61 | */ | |
62 | list_for_each_entry(policy_dbs, &dbs_data->policy_dbs_list, list) { | |
63 | mutex_lock(&policy_dbs->timer_mutex); | |
64 | /* | |
65 | * On 32-bit architectures this may race with the | |
66 | * sample_delay_ns read in dbs_update_util_handler(), but that | |
67 | * really doesn't matter. If the read returns a value that's | |
68 | * too big, the sample will be skipped, but the next invocation | |
69 | * of dbs_update_util_handler() (when the update has been | |
78347cdb | 70 | * completed) will take a sample. |
aded387b VK |
71 | * |
72 | * If this runs in parallel with dbs_work_handler(), we may end | |
73 | * up overwriting the sample_delay_ns value that it has just | |
78347cdb RW |
74 | * written, but it will be corrected next time a sample is |
75 | * taken, so it shouldn't be significant. | |
aded387b | 76 | */ |
78347cdb | 77 | gov_update_sample_delay(policy_dbs, 0); |
aded387b VK |
78 | mutex_unlock(&policy_dbs->timer_mutex); |
79 | } | |
80 | ||
81 | return count; | |
82 | } | |
83 | EXPORT_SYMBOL_GPL(store_sampling_rate); | |
84 | ||
a33cce1c RW |
85 | /** |
86 | * gov_update_cpu_data - Update CPU load data. | |
a33cce1c RW |
87 | * @dbs_data: Top-level governor data pointer. |
88 | * | |
89 | * Update CPU load data for all CPUs in the domain governed by @dbs_data | |
90 | * (that may be a single policy or a bunch of them if governor tunables are | |
91 | * system-wide). | |
92 | * | |
93 | * Call under the @dbs_data mutex. | |
94 | */ | |
8c8f77fd | 95 | void gov_update_cpu_data(struct dbs_data *dbs_data) |
a33cce1c RW |
96 | { |
97 | struct policy_dbs_info *policy_dbs; | |
98 | ||
99 | list_for_each_entry(policy_dbs, &dbs_data->policy_dbs_list, list) { | |
100 | unsigned int j; | |
101 | ||
102 | for_each_cpu(j, policy_dbs->policy->cpus) { | |
8c8f77fd | 103 | struct cpu_dbs_info *j_cdbs = &per_cpu(cpu_dbs, j); |
a33cce1c RW |
104 | |
105 | j_cdbs->prev_cpu_idle = get_cpu_idle_time(j, &j_cdbs->prev_cpu_wall, | |
106 | dbs_data->io_is_busy); | |
107 | if (dbs_data->ignore_nice_load) | |
108 | j_cdbs->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE]; | |
109 | } | |
110 | } | |
111 | } | |
112 | EXPORT_SYMBOL_GPL(gov_update_cpu_data); | |
113 | ||
c4435630 | 114 | static inline struct dbs_data *to_dbs_data(struct kobject *kobj) |
4d5dcc42 | 115 | { |
c4435630 | 116 | return container_of(kobj, struct dbs_data, kobj); |
4d5dcc42 VK |
117 | } |
118 | ||
c4435630 VK |
119 | static inline struct governor_attr *to_gov_attr(struct attribute *attr) |
120 | { | |
121 | return container_of(attr, struct governor_attr, attr); | |
122 | } | |
123 | ||
124 | static ssize_t governor_show(struct kobject *kobj, struct attribute *attr, | |
125 | char *buf) | |
126 | { | |
127 | struct dbs_data *dbs_data = to_dbs_data(kobj); | |
128 | struct governor_attr *gattr = to_gov_attr(attr); | |
c4435630 | 129 | |
f737236b | 130 | return gattr->show(dbs_data, buf); |
c4435630 VK |
131 | } |
132 | ||
133 | static ssize_t governor_store(struct kobject *kobj, struct attribute *attr, | |
134 | const char *buf, size_t count) | |
135 | { | |
136 | struct dbs_data *dbs_data = to_dbs_data(kobj); | |
137 | struct governor_attr *gattr = to_gov_attr(attr); | |
f737236b | 138 | int ret = -EBUSY; |
c4435630 VK |
139 | |
140 | mutex_lock(&dbs_data->mutex); | |
141 | ||
f737236b | 142 | if (dbs_data->usage_count) |
c4435630 VK |
143 | ret = gattr->store(dbs_data, buf, count); |
144 | ||
145 | mutex_unlock(&dbs_data->mutex); | |
146 | ||
147 | return ret; | |
148 | } | |
149 | ||
150 | /* | |
151 | * Sysfs Ops for accessing governor attributes. | |
152 | * | |
153 | * All show/store invocations for governor specific sysfs attributes, will first | |
154 | * call the below show/store callbacks and the attribute specific callback will | |
155 | * be called from within it. | |
156 | */ | |
157 | static const struct sysfs_ops governor_sysfs_ops = { | |
158 | .show = governor_show, | |
159 | .store = governor_store, | |
160 | }; | |
161 | ||
4cccf755 | 162 | unsigned int dbs_update(struct cpufreq_policy *policy) |
4471a34f | 163 | { |
bc505475 RW |
164 | struct policy_dbs_info *policy_dbs = policy->governor_data; |
165 | struct dbs_data *dbs_data = policy_dbs->dbs_data; | |
ff4b1789 | 166 | unsigned int ignore_nice = dbs_data->ignore_nice_load; |
4471a34f | 167 | unsigned int max_load = 0; |
8847e038 | 168 | unsigned int sampling_rate, io_busy, j; |
4471a34f | 169 | |
57dc3bcd RW |
170 | /* |
171 | * Sometimes governors may use an additional multiplier to increase | |
172 | * sample delays temporarily. Apply that multiplier to sampling_rate | |
173 | * so as to keep the wake-up-from-idle detection logic a bit | |
174 | * conservative. | |
175 | */ | |
176 | sampling_rate = dbs_data->sampling_rate * policy_dbs->rate_mult; | |
8847e038 RW |
177 | /* |
178 | * For the purpose of ondemand, waiting for disk IO is an indication | |
179 | * that you're performance critical, and not that the system is actually | |
180 | * idle, so do not add the iowait time to the CPU idle time then. | |
181 | */ | |
182 | io_busy = dbs_data->io_is_busy; | |
4471a34f | 183 | |
dfa5bb62 | 184 | /* Get Absolute Load */ |
4471a34f | 185 | for_each_cpu(j, policy->cpus) { |
8c8f77fd | 186 | struct cpu_dbs_info *j_cdbs = &per_cpu(cpu_dbs, j); |
9366d840 SK |
187 | u64 cur_wall_time, cur_idle_time; |
188 | unsigned int idle_time, wall_time; | |
4471a34f VK |
189 | unsigned int load; |
190 | ||
9366d840 | 191 | cur_idle_time = get_cpu_idle_time(j, &cur_wall_time, io_busy); |
4471a34f | 192 | |
57eb832f | 193 | wall_time = cur_wall_time - j_cdbs->prev_cpu_wall; |
4471a34f VK |
194 | j_cdbs->prev_cpu_wall = cur_wall_time; |
195 | ||
57eb832f RW |
196 | if (cur_idle_time <= j_cdbs->prev_cpu_idle) { |
197 | idle_time = 0; | |
198 | } else { | |
199 | idle_time = cur_idle_time - j_cdbs->prev_cpu_idle; | |
200 | j_cdbs->prev_cpu_idle = cur_idle_time; | |
201 | } | |
4471a34f VK |
202 | |
203 | if (ignore_nice) { | |
679b8fe4 RW |
204 | u64 cur_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE]; |
205 | ||
206 | idle_time += cputime_to_usecs(cur_nice - j_cdbs->prev_cpu_nice); | |
207 | j_cdbs->prev_cpu_nice = cur_nice; | |
4471a34f VK |
208 | } |
209 | ||
4471a34f VK |
210 | if (unlikely(!wall_time || wall_time < idle_time)) |
211 | continue; | |
212 | ||
18b46abd SB |
213 | /* |
214 | * If the CPU had gone completely idle, and a task just woke up | |
215 | * on this CPU now, it would be unfair to calculate 'load' the | |
216 | * usual way for this elapsed time-window, because it will show | |
217 | * near-zero load, irrespective of how CPU intensive that task | |
218 | * actually is. This is undesirable for latency-sensitive bursty | |
219 | * workloads. | |
220 | * | |
221 | * To avoid this, we reuse the 'load' from the previous | |
222 | * time-window and give this task a chance to start with a | |
223 | * reasonably high CPU frequency. (However, we shouldn't over-do | |
224 | * this copy, lest we get stuck at a high load (high frequency) | |
225 | * for too long, even when the current system load has actually | |
226 | * dropped down. So we perform the copy only once, upon the | |
227 | * first wake-up from idle.) | |
228 | * | |
9be4fd2c RW |
229 | * Detecting this situation is easy: the governor's utilization |
230 | * update handler would not have run during CPU-idle periods. | |
231 | * Hence, an unusually large 'wall_time' (as compared to the | |
232 | * sampling rate) indicates this scenario. | |
c8ae481b VK |
233 | * |
234 | * prev_load can be zero in two cases and we must recalculate it | |
235 | * for both cases: | |
236 | * - during long idle intervals | |
237 | * - explicitly set to zero | |
18b46abd | 238 | */ |
c8ae481b VK |
239 | if (unlikely(wall_time > (2 * sampling_rate) && |
240 | j_cdbs->prev_load)) { | |
18b46abd | 241 | load = j_cdbs->prev_load; |
c8ae481b VK |
242 | |
243 | /* | |
244 | * Perform a destructive copy, to ensure that we copy | |
245 | * the previous load only once, upon the first wake-up | |
246 | * from idle. | |
247 | */ | |
248 | j_cdbs->prev_load = 0; | |
18b46abd SB |
249 | } else { |
250 | load = 100 * (wall_time - idle_time) / wall_time; | |
251 | j_cdbs->prev_load = load; | |
18b46abd | 252 | } |
4471a34f | 253 | |
4471a34f VK |
254 | if (load > max_load) |
255 | max_load = load; | |
256 | } | |
4cccf755 | 257 | return max_load; |
4471a34f | 258 | } |
4cccf755 | 259 | EXPORT_SYMBOL_GPL(dbs_update); |
4471a34f | 260 | |
94ab5e03 RW |
261 | static void gov_set_update_util(struct policy_dbs_info *policy_dbs, |
262 | unsigned int delay_us) | |
4471a34f | 263 | { |
e40e7b25 | 264 | struct cpufreq_policy *policy = policy_dbs->policy; |
70f43e5e | 265 | int cpu; |
031299b3 | 266 | |
e40e7b25 RW |
267 | gov_update_sample_delay(policy_dbs, delay_us); |
268 | policy_dbs->last_sample_time = 0; | |
9be4fd2c | 269 | |
70f43e5e | 270 | for_each_cpu(cpu, policy->cpus) { |
8c8f77fd | 271 | struct cpu_dbs_info *cdbs = &per_cpu(cpu_dbs, cpu); |
9be4fd2c RW |
272 | |
273 | cpufreq_set_update_util_data(cpu, &cdbs->update_util); | |
031299b3 VK |
274 | } |
275 | } | |
031299b3 | 276 | |
9be4fd2c | 277 | static inline void gov_clear_update_util(struct cpufreq_policy *policy) |
031299b3 | 278 | { |
031299b3 | 279 | int i; |
58ddcead | 280 | |
9be4fd2c RW |
281 | for_each_cpu(i, policy->cpus) |
282 | cpufreq_set_update_util_data(i, NULL); | |
283 | ||
08f511fd | 284 | synchronize_sched(); |
4471a34f VK |
285 | } |
286 | ||
581c214b | 287 | static void gov_cancel_work(struct cpufreq_policy *policy) |
70f43e5e | 288 | { |
581c214b VK |
289 | struct policy_dbs_info *policy_dbs = policy->governor_data; |
290 | ||
e40e7b25 RW |
291 | gov_clear_update_util(policy_dbs->policy); |
292 | irq_work_sync(&policy_dbs->irq_work); | |
293 | cancel_work_sync(&policy_dbs->work); | |
686cc637 | 294 | atomic_set(&policy_dbs->work_count, 0); |
e4db2813 | 295 | policy_dbs->work_in_progress = false; |
70f43e5e | 296 | } |
43e0ee36 | 297 | |
70f43e5e | 298 | static void dbs_work_handler(struct work_struct *work) |
43e0ee36 | 299 | { |
e40e7b25 | 300 | struct policy_dbs_info *policy_dbs; |
3a91b069 | 301 | struct cpufreq_policy *policy; |
ea59ee0d | 302 | struct dbs_governor *gov; |
43e0ee36 | 303 | |
e40e7b25 RW |
304 | policy_dbs = container_of(work, struct policy_dbs_info, work); |
305 | policy = policy_dbs->policy; | |
ea59ee0d | 306 | gov = dbs_governor_of(policy); |
3a91b069 | 307 | |
70f43e5e | 308 | /* |
9be4fd2c RW |
309 | * Make sure cpufreq_governor_limits() isn't evaluating load or the |
310 | * ondemand governor isn't updating the sampling rate in parallel. | |
70f43e5e | 311 | */ |
e40e7b25 | 312 | mutex_lock(&policy_dbs->timer_mutex); |
07aa4402 | 313 | gov_update_sample_delay(policy_dbs, gov->gov_dbs_timer(policy)); |
e40e7b25 | 314 | mutex_unlock(&policy_dbs->timer_mutex); |
70f43e5e | 315 | |
e4db2813 RW |
316 | /* Allow the utilization update handler to queue up more work. */ |
317 | atomic_set(&policy_dbs->work_count, 0); | |
9be4fd2c | 318 | /* |
e4db2813 RW |
319 | * If the update below is reordered with respect to the sample delay |
320 | * modification, the utilization update handler may end up using a stale | |
321 | * sample delay value. | |
9be4fd2c | 322 | */ |
e4db2813 RW |
323 | smp_wmb(); |
324 | policy_dbs->work_in_progress = false; | |
9be4fd2c RW |
325 | } |
326 | ||
327 | static void dbs_irq_work(struct irq_work *irq_work) | |
328 | { | |
e40e7b25 | 329 | struct policy_dbs_info *policy_dbs; |
70f43e5e | 330 | |
e40e7b25 | 331 | policy_dbs = container_of(irq_work, struct policy_dbs_info, irq_work); |
539a4c42 | 332 | schedule_work_on(smp_processor_id(), &policy_dbs->work); |
70f43e5e VK |
333 | } |
334 | ||
9be4fd2c RW |
335 | static void dbs_update_util_handler(struct update_util_data *data, u64 time, |
336 | unsigned long util, unsigned long max) | |
337 | { | |
338 | struct cpu_dbs_info *cdbs = container_of(data, struct cpu_dbs_info, update_util); | |
e40e7b25 | 339 | struct policy_dbs_info *policy_dbs = cdbs->policy_dbs; |
27de3482 | 340 | u64 delta_ns, lst; |
70f43e5e VK |
341 | |
342 | /* | |
9be4fd2c RW |
343 | * The work may not be allowed to be queued up right now. |
344 | * Possible reasons: | |
345 | * - Work has already been queued up or is in progress. | |
9be4fd2c | 346 | * - It is too early (too little time from the previous sample). |
70f43e5e | 347 | */ |
e4db2813 RW |
348 | if (policy_dbs->work_in_progress) |
349 | return; | |
350 | ||
351 | /* | |
352 | * If the reads below are reordered before the check above, the value | |
353 | * of sample_delay_ns used in the computation may be stale. | |
354 | */ | |
355 | smp_rmb(); | |
27de3482 RW |
356 | lst = READ_ONCE(policy_dbs->last_sample_time); |
357 | delta_ns = time - lst; | |
e4db2813 RW |
358 | if ((s64)delta_ns < policy_dbs->sample_delay_ns) |
359 | return; | |
360 | ||
361 | /* | |
362 | * If the policy is not shared, the irq_work may be queued up right away | |
363 | * at this point. Otherwise, we need to ensure that only one of the | |
364 | * CPUs sharing the policy will do that. | |
365 | */ | |
27de3482 RW |
366 | if (policy_dbs->is_shared) { |
367 | if (!atomic_add_unless(&policy_dbs->work_count, 1, 1)) | |
368 | return; | |
369 | ||
370 | /* | |
371 | * If another CPU updated last_sample_time in the meantime, we | |
372 | * shouldn't be here, so clear the work counter and bail out. | |
373 | */ | |
374 | if (unlikely(lst != READ_ONCE(policy_dbs->last_sample_time))) { | |
375 | atomic_set(&policy_dbs->work_count, 0); | |
376 | return; | |
377 | } | |
378 | } | |
e4db2813 RW |
379 | |
380 | policy_dbs->last_sample_time = time; | |
381 | policy_dbs->work_in_progress = true; | |
382 | irq_work_queue(&policy_dbs->irq_work); | |
43e0ee36 | 383 | } |
4447266b | 384 | |
bc505475 RW |
385 | static struct policy_dbs_info *alloc_policy_dbs_info(struct cpufreq_policy *policy, |
386 | struct dbs_governor *gov) | |
44152cb8 | 387 | { |
e40e7b25 | 388 | struct policy_dbs_info *policy_dbs; |
44152cb8 VK |
389 | int j; |
390 | ||
7d5a9956 RW |
391 | /* Allocate memory for per-policy governor data. */ |
392 | policy_dbs = gov->alloc(); | |
e40e7b25 | 393 | if (!policy_dbs) |
bc505475 | 394 | return NULL; |
44152cb8 | 395 | |
581c214b | 396 | policy_dbs->policy = policy; |
e40e7b25 | 397 | mutex_init(&policy_dbs->timer_mutex); |
686cc637 | 398 | atomic_set(&policy_dbs->work_count, 0); |
e40e7b25 RW |
399 | init_irq_work(&policy_dbs->irq_work, dbs_irq_work); |
400 | INIT_WORK(&policy_dbs->work, dbs_work_handler); | |
cea6a9e7 RW |
401 | |
402 | /* Set policy_dbs for all CPUs, online+offline */ | |
403 | for_each_cpu(j, policy->related_cpus) { | |
8c8f77fd | 404 | struct cpu_dbs_info *j_cdbs = &per_cpu(cpu_dbs, j); |
cea6a9e7 RW |
405 | |
406 | j_cdbs->policy_dbs = policy_dbs; | |
407 | j_cdbs->update_util.func = dbs_update_util_handler; | |
408 | } | |
bc505475 | 409 | return policy_dbs; |
44152cb8 VK |
410 | } |
411 | ||
8c8f77fd | 412 | static void free_policy_dbs_info(struct policy_dbs_info *policy_dbs, |
7bdad34d | 413 | struct dbs_governor *gov) |
44152cb8 | 414 | { |
44152cb8 VK |
415 | int j; |
416 | ||
e40e7b25 | 417 | mutex_destroy(&policy_dbs->timer_mutex); |
5e4500d8 | 418 | |
8c8f77fd RW |
419 | for_each_cpu(j, policy_dbs->policy->related_cpus) { |
420 | struct cpu_dbs_info *j_cdbs = &per_cpu(cpu_dbs, j); | |
44152cb8 | 421 | |
cea6a9e7 RW |
422 | j_cdbs->policy_dbs = NULL; |
423 | j_cdbs->update_util.func = NULL; | |
424 | } | |
7d5a9956 | 425 | gov->free(policy_dbs); |
44152cb8 VK |
426 | } |
427 | ||
906a6e5a | 428 | static int cpufreq_governor_init(struct cpufreq_policy *policy) |
4471a34f | 429 | { |
ea59ee0d | 430 | struct dbs_governor *gov = dbs_governor_of(policy); |
1112e9d8 | 431 | struct dbs_data *dbs_data; |
bc505475 | 432 | struct policy_dbs_info *policy_dbs; |
714a2d9c | 433 | unsigned int latency; |
1112e9d8 | 434 | int ret = 0; |
4471a34f | 435 | |
a72c4959 VK |
436 | /* State should be equivalent to EXIT */ |
437 | if (policy->governor_data) | |
438 | return -EBUSY; | |
439 | ||
bc505475 RW |
440 | policy_dbs = alloc_policy_dbs_info(policy, gov); |
441 | if (!policy_dbs) | |
442 | return -ENOMEM; | |
44152cb8 | 443 | |
1112e9d8 RW |
444 | /* Protect gov->gdbs_data against concurrent updates. */ |
445 | mutex_lock(&gov_dbs_data_mutex); | |
446 | ||
447 | dbs_data = gov->gdbs_data; | |
bc505475 RW |
448 | if (dbs_data) { |
449 | if (WARN_ON(have_governor_per_policy())) { | |
450 | ret = -EINVAL; | |
451 | goto free_policy_dbs_info; | |
452 | } | |
bc505475 RW |
453 | policy_dbs->dbs_data = dbs_data; |
454 | policy->governor_data = policy_dbs; | |
c54df071 VK |
455 | |
456 | mutex_lock(&dbs_data->mutex); | |
457 | dbs_data->usage_count++; | |
458 | list_add(&policy_dbs->list, &dbs_data->policy_dbs_list); | |
459 | mutex_unlock(&dbs_data->mutex); | |
1112e9d8 | 460 | goto out; |
714a2d9c | 461 | } |
4d5dcc42 | 462 | |
714a2d9c | 463 | dbs_data = kzalloc(sizeof(*dbs_data), GFP_KERNEL); |
bc505475 RW |
464 | if (!dbs_data) { |
465 | ret = -ENOMEM; | |
466 | goto free_policy_dbs_info; | |
467 | } | |
44152cb8 | 468 | |
c54df071 | 469 | INIT_LIST_HEAD(&dbs_data->policy_dbs_list); |
c4435630 | 470 | mutex_init(&dbs_data->mutex); |
4d5dcc42 | 471 | |
7bdad34d | 472 | ret = gov->init(dbs_data, !policy->governor->initialized); |
714a2d9c | 473 | if (ret) |
e40e7b25 | 474 | goto free_policy_dbs_info; |
4d5dcc42 | 475 | |
714a2d9c VK |
476 | /* policy latency is in ns. Convert it to us first */ |
477 | latency = policy->cpuinfo.transition_latency / 1000; | |
478 | if (latency == 0) | |
479 | latency = 1; | |
4d5dcc42 | 480 | |
714a2d9c VK |
481 | /* Bring kernel and HW constraints together */ |
482 | dbs_data->min_sampling_rate = max(dbs_data->min_sampling_rate, | |
483 | MIN_LATENCY_MULTIPLIER * latency); | |
ff4b1789 VK |
484 | dbs_data->sampling_rate = max(dbs_data->min_sampling_rate, |
485 | LATENCY_MULTIPLIER * latency); | |
2361be23 | 486 | |
8eec1020 | 487 | if (!have_governor_per_policy()) |
7bdad34d | 488 | gov->gdbs_data = dbs_data; |
4d5dcc42 | 489 | |
bc505475 | 490 | policy->governor_data = policy_dbs; |
e4b133cc | 491 | |
c54df071 VK |
492 | policy_dbs->dbs_data = dbs_data; |
493 | dbs_data->usage_count = 1; | |
494 | list_add(&policy_dbs->list, &dbs_data->policy_dbs_list); | |
495 | ||
c4435630 VK |
496 | gov->kobj_type.sysfs_ops = &governor_sysfs_ops; |
497 | ret = kobject_init_and_add(&dbs_data->kobj, &gov->kobj_type, | |
498 | get_governor_parent_kobj(policy), | |
499 | "%s", gov->gov.name); | |
fafd5e8a | 500 | if (!ret) |
1112e9d8 | 501 | goto out; |
4d5dcc42 | 502 | |
fafd5e8a | 503 | /* Failure, so roll back. */ |
c4435630 | 504 | pr_err("cpufreq: Governor initialization failed (dbs_data kobject init error %d)\n", ret); |
4d5dcc42 | 505 | |
e4b133cc VK |
506 | policy->governor_data = NULL; |
507 | ||
8eec1020 | 508 | if (!have_governor_per_policy()) |
7bdad34d RW |
509 | gov->gdbs_data = NULL; |
510 | gov->exit(dbs_data, !policy->governor->initialized); | |
bc505475 RW |
511 | kfree(dbs_data); |
512 | ||
e40e7b25 | 513 | free_policy_dbs_info: |
8c8f77fd | 514 | free_policy_dbs_info(policy_dbs, gov); |
1112e9d8 RW |
515 | |
516 | out: | |
517 | mutex_unlock(&gov_dbs_data_mutex); | |
714a2d9c VK |
518 | return ret; |
519 | } | |
4d5dcc42 | 520 | |
5da3dd1e | 521 | static int cpufreq_governor_exit(struct cpufreq_policy *policy) |
714a2d9c | 522 | { |
ea59ee0d | 523 | struct dbs_governor *gov = dbs_governor_of(policy); |
bc505475 RW |
524 | struct policy_dbs_info *policy_dbs = policy->governor_data; |
525 | struct dbs_data *dbs_data = policy_dbs->dbs_data; | |
c54df071 | 526 | int count; |
a72c4959 | 527 | |
1112e9d8 RW |
528 | /* Protect gov->gdbs_data against concurrent updates. */ |
529 | mutex_lock(&gov_dbs_data_mutex); | |
530 | ||
c54df071 VK |
531 | mutex_lock(&dbs_data->mutex); |
532 | list_del(&policy_dbs->list); | |
533 | count = --dbs_data->usage_count; | |
534 | mutex_unlock(&dbs_data->mutex); | |
535 | ||
536 | if (!count) { | |
c4435630 | 537 | kobject_put(&dbs_data->kobj); |
2361be23 | 538 | |
e4b133cc VK |
539 | policy->governor_data = NULL; |
540 | ||
8eec1020 | 541 | if (!have_governor_per_policy()) |
7bdad34d | 542 | gov->gdbs_data = NULL; |
4471a34f | 543 | |
7bdad34d | 544 | gov->exit(dbs_data, policy->governor->initialized == 1); |
c4435630 | 545 | mutex_destroy(&dbs_data->mutex); |
714a2d9c | 546 | kfree(dbs_data); |
e4b133cc VK |
547 | } else { |
548 | policy->governor_data = NULL; | |
4d5dcc42 | 549 | } |
44152cb8 | 550 | |
8c8f77fd | 551 | free_policy_dbs_info(policy_dbs, gov); |
1112e9d8 RW |
552 | |
553 | mutex_unlock(&gov_dbs_data_mutex); | |
a72c4959 | 554 | return 0; |
714a2d9c | 555 | } |
4d5dcc42 | 556 | |
5da3dd1e | 557 | static int cpufreq_governor_start(struct cpufreq_policy *policy) |
714a2d9c | 558 | { |
ea59ee0d | 559 | struct dbs_governor *gov = dbs_governor_of(policy); |
bc505475 RW |
560 | struct policy_dbs_info *policy_dbs = policy->governor_data; |
561 | struct dbs_data *dbs_data = policy_dbs->dbs_data; | |
702c9e54 | 562 | unsigned int sampling_rate, ignore_nice, j; |
8847e038 | 563 | unsigned int io_busy; |
714a2d9c VK |
564 | |
565 | if (!policy->cur) | |
566 | return -EINVAL; | |
567 | ||
e4db2813 | 568 | policy_dbs->is_shared = policy_is_shared(policy); |
57dc3bcd | 569 | policy_dbs->rate_mult = 1; |
e4db2813 | 570 | |
ff4b1789 VK |
571 | sampling_rate = dbs_data->sampling_rate; |
572 | ignore_nice = dbs_data->ignore_nice_load; | |
8847e038 | 573 | io_busy = dbs_data->io_is_busy; |
4471a34f | 574 | |
714a2d9c | 575 | for_each_cpu(j, policy->cpus) { |
8c8f77fd | 576 | struct cpu_dbs_info *j_cdbs = &per_cpu(cpu_dbs, j); |
714a2d9c | 577 | unsigned int prev_load; |
4471a34f | 578 | |
57eb832f | 579 | j_cdbs->prev_cpu_idle = get_cpu_idle_time(j, &j_cdbs->prev_cpu_wall, io_busy); |
4471a34f | 580 | |
57eb832f RW |
581 | prev_load = j_cdbs->prev_cpu_wall - j_cdbs->prev_cpu_idle; |
582 | j_cdbs->prev_load = 100 * prev_load / (unsigned int)j_cdbs->prev_cpu_wall; | |
18b46abd | 583 | |
714a2d9c VK |
584 | if (ignore_nice) |
585 | j_cdbs->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE]; | |
714a2d9c | 586 | } |
2abfa876 | 587 | |
702c9e54 | 588 | gov->start(policy); |
4471a34f | 589 | |
e40e7b25 | 590 | gov_set_update_util(policy_dbs, sampling_rate); |
714a2d9c VK |
591 | return 0; |
592 | } | |
593 | ||
5da3dd1e | 594 | static int cpufreq_governor_stop(struct cpufreq_policy *policy) |
714a2d9c | 595 | { |
581c214b | 596 | gov_cancel_work(policy); |
a72c4959 | 597 | return 0; |
714a2d9c | 598 | } |
4471a34f | 599 | |
5da3dd1e | 600 | static int cpufreq_governor_limits(struct cpufreq_policy *policy) |
714a2d9c | 601 | { |
bc505475 | 602 | struct policy_dbs_info *policy_dbs = policy->governor_data; |
8eeed095 | 603 | |
e9751894 | 604 | mutex_lock(&policy_dbs->timer_mutex); |
4cccf755 | 605 | |
e9751894 RW |
606 | if (policy->max < policy->cur) |
607 | __cpufreq_driver_target(policy, policy->max, CPUFREQ_RELATION_H); | |
608 | else if (policy->min > policy->cur) | |
609 | __cpufreq_driver_target(policy, policy->min, CPUFREQ_RELATION_L); | |
4cccf755 RW |
610 | |
611 | gov_update_sample_delay(policy_dbs, 0); | |
612 | ||
e9751894 | 613 | mutex_unlock(&policy_dbs->timer_mutex); |
a72c4959 VK |
614 | |
615 | return 0; | |
714a2d9c | 616 | } |
4471a34f | 617 | |
906a6e5a | 618 | int cpufreq_governor_dbs(struct cpufreq_policy *policy, unsigned int event) |
714a2d9c | 619 | { |
5da3dd1e | 620 | if (event == CPUFREQ_GOV_POLICY_INIT) { |
1112e9d8 | 621 | return cpufreq_governor_init(policy); |
5da3dd1e RW |
622 | } else if (policy->governor_data) { |
623 | switch (event) { | |
624 | case CPUFREQ_GOV_POLICY_EXIT: | |
1112e9d8 | 625 | return cpufreq_governor_exit(policy); |
5da3dd1e | 626 | case CPUFREQ_GOV_START: |
1112e9d8 | 627 | return cpufreq_governor_start(policy); |
5da3dd1e | 628 | case CPUFREQ_GOV_STOP: |
1112e9d8 | 629 | return cpufreq_governor_stop(policy); |
5da3dd1e | 630 | case CPUFREQ_GOV_LIMITS: |
1112e9d8 | 631 | return cpufreq_governor_limits(policy); |
5da3dd1e | 632 | } |
4471a34f | 633 | } |
1112e9d8 | 634 | return -EINVAL; |
4471a34f VK |
635 | } |
636 | EXPORT_SYMBOL_GPL(cpufreq_governor_dbs); |