]>
Commit | Line | Data |
---|---|---|
8e0af514 SL |
1 | /* |
2 | * acpi_pad.c ACPI Processor Aggregator Driver | |
3 | * | |
4 | * Copyright (c) 2009, Intel Corporation. | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or modify it | |
7 | * under the terms and conditions of the GNU General Public License, | |
8 | * version 2, as published by the Free Software Foundation. | |
9 | * | |
10 | * This program is distributed in the hope it will be useful, but WITHOUT | |
11 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
12 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
13 | * more details. | |
14 | * | |
15 | * You should have received a copy of the GNU General Public License along with | |
16 | * this program; if not, write to the Free Software Foundation, Inc., | |
17 | * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | |
18 | * | |
19 | */ | |
20 | ||
21 | #include <linux/kernel.h> | |
22 | #include <linux/cpumask.h> | |
23 | #include <linux/module.h> | |
24 | #include <linux/init.h> | |
25 | #include <linux/types.h> | |
26 | #include <linux/kthread.h> | |
27 | #include <linux/freezer.h> | |
28 | #include <linux/cpu.h> | |
29 | #include <linux/clockchips.h> | |
5a0e3ad6 | 30 | #include <linux/slab.h> |
8e0af514 SL |
31 | #include <acpi/acpi_bus.h> |
32 | #include <acpi/acpi_drivers.h> | |
33 | ||
a40770a9 | 34 | #define ACPI_PROCESSOR_AGGREGATOR_CLASS "acpi_pad" |
8e0af514 SL |
35 | #define ACPI_PROCESSOR_AGGREGATOR_DEVICE_NAME "Processor Aggregator" |
36 | #define ACPI_PROCESSOR_AGGREGATOR_NOTIFY 0x80 | |
37 | static DEFINE_MUTEX(isolated_cpus_lock); | |
38 | ||
39 | #define MWAIT_SUBSTATE_MASK (0xf) | |
40 | #define MWAIT_CSTATE_MASK (0xf) | |
41 | #define MWAIT_SUBSTATE_SIZE (4) | |
42 | #define CPUID_MWAIT_LEAF (5) | |
43 | #define CPUID5_ECX_EXTENSIONS_SUPPORTED (0x1) | |
44 | #define CPUID5_ECX_INTERRUPT_BREAK (0x2) | |
45 | static unsigned long power_saving_mwait_eax; | |
46 | static void power_saving_mwait_init(void) | |
47 | { | |
48 | unsigned int eax, ebx, ecx, edx; | |
49 | unsigned int highest_cstate = 0; | |
50 | unsigned int highest_subcstate = 0; | |
51 | int i; | |
52 | ||
53 | if (!boot_cpu_has(X86_FEATURE_MWAIT)) | |
54 | return; | |
55 | if (boot_cpu_data.cpuid_level < CPUID_MWAIT_LEAF) | |
56 | return; | |
57 | ||
58 | cpuid(CPUID_MWAIT_LEAF, &eax, &ebx, &ecx, &edx); | |
59 | ||
60 | if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED) || | |
61 | !(ecx & CPUID5_ECX_INTERRUPT_BREAK)) | |
62 | return; | |
63 | ||
64 | edx >>= MWAIT_SUBSTATE_SIZE; | |
65 | for (i = 0; i < 7 && edx; i++, edx >>= MWAIT_SUBSTATE_SIZE) { | |
66 | if (edx & MWAIT_SUBSTATE_MASK) { | |
67 | highest_cstate = i; | |
68 | highest_subcstate = edx & MWAIT_SUBSTATE_MASK; | |
69 | } | |
70 | } | |
71 | power_saving_mwait_eax = (highest_cstate << MWAIT_SUBSTATE_SIZE) | | |
72 | (highest_subcstate - 1); | |
73 | ||
74 | for_each_online_cpu(i) | |
75 | clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ON, &i); | |
76 | ||
77 | #if defined(CONFIG_GENERIC_TIME) && defined(CONFIG_X86) | |
78 | switch (boot_cpu_data.x86_vendor) { | |
79 | case X86_VENDOR_AMD: | |
80 | case X86_VENDOR_INTEL: | |
81 | /* | |
82 | * AMD Fam10h TSC will tick in all | |
83 | * C/P/S0/S1 states when this bit is set. | |
84 | */ | |
85 | if (boot_cpu_has(X86_FEATURE_NONSTOP_TSC)) | |
86 | return; | |
87 | ||
88 | /*FALL THROUGH*/ | |
89 | default: | |
90 | /* TSC could halt in idle, so notify users */ | |
91 | mark_tsc_unstable("TSC halts in idle"); | |
92 | } | |
93 | #endif | |
94 | } | |
95 | ||
96 | static unsigned long cpu_weight[NR_CPUS]; | |
97 | static int tsk_in_cpu[NR_CPUS] = {[0 ... NR_CPUS-1] = -1}; | |
98 | static DECLARE_BITMAP(pad_busy_cpus_bits, NR_CPUS); | |
99 | static void round_robin_cpu(unsigned int tsk_index) | |
100 | { | |
101 | struct cpumask *pad_busy_cpus = to_cpumask(pad_busy_cpus_bits); | |
102 | cpumask_var_t tmp; | |
103 | int cpu; | |
f67538f8 AM |
104 | unsigned long min_weight = -1; |
105 | unsigned long uninitialized_var(preferred_cpu); | |
8e0af514 SL |
106 | |
107 | if (!alloc_cpumask_var(&tmp, GFP_KERNEL)) | |
108 | return; | |
109 | ||
110 | mutex_lock(&isolated_cpus_lock); | |
111 | cpumask_clear(tmp); | |
112 | for_each_cpu(cpu, pad_busy_cpus) | |
113 | cpumask_or(tmp, tmp, topology_thread_cpumask(cpu)); | |
114 | cpumask_andnot(tmp, cpu_online_mask, tmp); | |
115 | /* avoid HT sibilings if possible */ | |
116 | if (cpumask_empty(tmp)) | |
117 | cpumask_andnot(tmp, cpu_online_mask, pad_busy_cpus); | |
118 | if (cpumask_empty(tmp)) { | |
119 | mutex_unlock(&isolated_cpus_lock); | |
120 | return; | |
121 | } | |
122 | for_each_cpu(cpu, tmp) { | |
123 | if (cpu_weight[cpu] < min_weight) { | |
124 | min_weight = cpu_weight[cpu]; | |
125 | preferred_cpu = cpu; | |
126 | } | |
127 | } | |
128 | ||
129 | if (tsk_in_cpu[tsk_index] != -1) | |
130 | cpumask_clear_cpu(tsk_in_cpu[tsk_index], pad_busy_cpus); | |
131 | tsk_in_cpu[tsk_index] = preferred_cpu; | |
132 | cpumask_set_cpu(preferred_cpu, pad_busy_cpus); | |
133 | cpu_weight[preferred_cpu]++; | |
134 | mutex_unlock(&isolated_cpus_lock); | |
135 | ||
136 | set_cpus_allowed_ptr(current, cpumask_of(preferred_cpu)); | |
137 | } | |
138 | ||
139 | static void exit_round_robin(unsigned int tsk_index) | |
140 | { | |
141 | struct cpumask *pad_busy_cpus = to_cpumask(pad_busy_cpus_bits); | |
142 | cpumask_clear_cpu(tsk_in_cpu[tsk_index], pad_busy_cpus); | |
143 | tsk_in_cpu[tsk_index] = -1; | |
144 | } | |
145 | ||
146 | static unsigned int idle_pct = 5; /* percentage */ | |
147 | static unsigned int round_robin_time = 10; /* second */ | |
148 | static int power_saving_thread(void *data) | |
149 | { | |
150 | struct sched_param param = {.sched_priority = 1}; | |
151 | int do_sleep; | |
152 | unsigned int tsk_index = (unsigned long)data; | |
153 | u64 last_jiffies = 0; | |
154 | ||
155 | sched_setscheduler(current, SCHED_RR, ¶m); | |
156 | ||
157 | while (!kthread_should_stop()) { | |
158 | int cpu; | |
159 | u64 expire_time; | |
160 | ||
161 | try_to_freeze(); | |
162 | ||
163 | /* round robin to cpus */ | |
164 | if (last_jiffies + round_robin_time * HZ < jiffies) { | |
165 | last_jiffies = jiffies; | |
166 | round_robin_cpu(tsk_index); | |
167 | } | |
168 | ||
169 | do_sleep = 0; | |
170 | ||
171 | current_thread_info()->status &= ~TS_POLLING; | |
172 | /* | |
173 | * TS_POLLING-cleared state must be visible before we test | |
174 | * NEED_RESCHED: | |
175 | */ | |
176 | smp_mb(); | |
177 | ||
178 | expire_time = jiffies + HZ * (100 - idle_pct) / 100; | |
179 | ||
180 | while (!need_resched()) { | |
181 | local_irq_disable(); | |
182 | cpu = smp_processor_id(); | |
183 | clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, | |
184 | &cpu); | |
185 | stop_critical_timings(); | |
186 | ||
187 | __monitor((void *)¤t_thread_info()->flags, 0, 0); | |
188 | smp_mb(); | |
189 | if (!need_resched()) | |
190 | __mwait(power_saving_mwait_eax, 1); | |
191 | ||
192 | start_critical_timings(); | |
193 | clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, | |
194 | &cpu); | |
195 | local_irq_enable(); | |
196 | ||
197 | if (jiffies > expire_time) { | |
198 | do_sleep = 1; | |
199 | break; | |
200 | } | |
201 | } | |
202 | ||
203 | current_thread_info()->status |= TS_POLLING; | |
204 | ||
205 | /* | |
206 | * current sched_rt has threshold for rt task running time. | |
207 | * When a rt task uses 95% CPU time, the rt thread will be | |
208 | * scheduled out for 5% CPU time to not starve other tasks. But | |
209 | * the mechanism only works when all CPUs have RT task running, | |
210 | * as if one CPU hasn't RT task, RT task from other CPUs will | |
211 | * borrow CPU time from this CPU and cause RT task use > 95% | |
3b8cb427 | 212 | * CPU time. To make 'avoid starvation' work, takes a nap here. |
8e0af514 SL |
213 | */ |
214 | if (do_sleep) | |
215 | schedule_timeout_killable(HZ * idle_pct / 100); | |
216 | } | |
217 | ||
218 | exit_round_robin(tsk_index); | |
219 | return 0; | |
220 | } | |
221 | ||
222 | static struct task_struct *ps_tsks[NR_CPUS]; | |
223 | static unsigned int ps_tsk_num; | |
224 | static int create_power_saving_task(void) | |
225 | { | |
3b8cb427 CG |
226 | int rc = -ENOMEM; |
227 | ||
8e0af514 SL |
228 | ps_tsks[ps_tsk_num] = kthread_run(power_saving_thread, |
229 | (void *)(unsigned long)ps_tsk_num, | |
230 | "power_saving/%d", ps_tsk_num); | |
3b8cb427 CG |
231 | rc = IS_ERR(ps_tsks[ps_tsk_num]) ? PTR_ERR(ps_tsks[ps_tsk_num]) : 0; |
232 | if (!rc) | |
8e0af514 | 233 | ps_tsk_num++; |
3b8cb427 CG |
234 | else |
235 | ps_tsks[ps_tsk_num] = NULL; | |
236 | ||
237 | return rc; | |
8e0af514 SL |
238 | } |
239 | ||
240 | static void destroy_power_saving_task(void) | |
241 | { | |
242 | if (ps_tsk_num > 0) { | |
243 | ps_tsk_num--; | |
244 | kthread_stop(ps_tsks[ps_tsk_num]); | |
3b8cb427 | 245 | ps_tsks[ps_tsk_num] = NULL; |
8e0af514 SL |
246 | } |
247 | } | |
248 | ||
249 | static void set_power_saving_task_num(unsigned int num) | |
250 | { | |
251 | if (num > ps_tsk_num) { | |
252 | while (ps_tsk_num < num) { | |
253 | if (create_power_saving_task()) | |
254 | return; | |
255 | } | |
256 | } else if (num < ps_tsk_num) { | |
257 | while (ps_tsk_num > num) | |
258 | destroy_power_saving_task(); | |
259 | } | |
260 | } | |
261 | ||
3b8cb427 | 262 | static void acpi_pad_idle_cpus(unsigned int num_cpus) |
8e0af514 SL |
263 | { |
264 | get_online_cpus(); | |
265 | ||
266 | num_cpus = min_t(unsigned int, num_cpus, num_online_cpus()); | |
267 | set_power_saving_task_num(num_cpus); | |
268 | ||
269 | put_online_cpus(); | |
8e0af514 SL |
270 | } |
271 | ||
272 | static uint32_t acpi_pad_idle_cpus_num(void) | |
273 | { | |
274 | return ps_tsk_num; | |
275 | } | |
276 | ||
277 | static ssize_t acpi_pad_rrtime_store(struct device *dev, | |
278 | struct device_attribute *attr, const char *buf, size_t count) | |
279 | { | |
280 | unsigned long num; | |
281 | if (strict_strtoul(buf, 0, &num)) | |
282 | return -EINVAL; | |
283 | if (num < 1 || num >= 100) | |
284 | return -EINVAL; | |
285 | mutex_lock(&isolated_cpus_lock); | |
286 | round_robin_time = num; | |
287 | mutex_unlock(&isolated_cpus_lock); | |
288 | return count; | |
289 | } | |
290 | ||
291 | static ssize_t acpi_pad_rrtime_show(struct device *dev, | |
292 | struct device_attribute *attr, char *buf) | |
293 | { | |
294 | return scnprintf(buf, PAGE_SIZE, "%d", round_robin_time); | |
295 | } | |
296 | static DEVICE_ATTR(rrtime, S_IRUGO|S_IWUSR, | |
297 | acpi_pad_rrtime_show, | |
298 | acpi_pad_rrtime_store); | |
299 | ||
300 | static ssize_t acpi_pad_idlepct_store(struct device *dev, | |
301 | struct device_attribute *attr, const char *buf, size_t count) | |
302 | { | |
303 | unsigned long num; | |
304 | if (strict_strtoul(buf, 0, &num)) | |
305 | return -EINVAL; | |
306 | if (num < 1 || num >= 100) | |
307 | return -EINVAL; | |
308 | mutex_lock(&isolated_cpus_lock); | |
309 | idle_pct = num; | |
310 | mutex_unlock(&isolated_cpus_lock); | |
311 | return count; | |
312 | } | |
313 | ||
314 | static ssize_t acpi_pad_idlepct_show(struct device *dev, | |
315 | struct device_attribute *attr, char *buf) | |
316 | { | |
317 | return scnprintf(buf, PAGE_SIZE, "%d", idle_pct); | |
318 | } | |
319 | static DEVICE_ATTR(idlepct, S_IRUGO|S_IWUSR, | |
320 | acpi_pad_idlepct_show, | |
321 | acpi_pad_idlepct_store); | |
322 | ||
323 | static ssize_t acpi_pad_idlecpus_store(struct device *dev, | |
324 | struct device_attribute *attr, const char *buf, size_t count) | |
325 | { | |
326 | unsigned long num; | |
327 | if (strict_strtoul(buf, 0, &num)) | |
328 | return -EINVAL; | |
329 | mutex_lock(&isolated_cpus_lock); | |
330 | acpi_pad_idle_cpus(num); | |
331 | mutex_unlock(&isolated_cpus_lock); | |
332 | return count; | |
333 | } | |
334 | ||
335 | static ssize_t acpi_pad_idlecpus_show(struct device *dev, | |
336 | struct device_attribute *attr, char *buf) | |
337 | { | |
338 | return cpumask_scnprintf(buf, PAGE_SIZE, | |
339 | to_cpumask(pad_busy_cpus_bits)); | |
340 | } | |
341 | static DEVICE_ATTR(idlecpus, S_IRUGO|S_IWUSR, | |
342 | acpi_pad_idlecpus_show, | |
343 | acpi_pad_idlecpus_store); | |
344 | ||
345 | static int acpi_pad_add_sysfs(struct acpi_device *device) | |
346 | { | |
347 | int result; | |
348 | ||
349 | result = device_create_file(&device->dev, &dev_attr_idlecpus); | |
350 | if (result) | |
351 | return -ENODEV; | |
352 | result = device_create_file(&device->dev, &dev_attr_idlepct); | |
353 | if (result) { | |
354 | device_remove_file(&device->dev, &dev_attr_idlecpus); | |
355 | return -ENODEV; | |
356 | } | |
357 | result = device_create_file(&device->dev, &dev_attr_rrtime); | |
358 | if (result) { | |
359 | device_remove_file(&device->dev, &dev_attr_idlecpus); | |
360 | device_remove_file(&device->dev, &dev_attr_idlepct); | |
361 | return -ENODEV; | |
362 | } | |
363 | return 0; | |
364 | } | |
365 | ||
366 | static void acpi_pad_remove_sysfs(struct acpi_device *device) | |
367 | { | |
368 | device_remove_file(&device->dev, &dev_attr_idlecpus); | |
369 | device_remove_file(&device->dev, &dev_attr_idlepct); | |
370 | device_remove_file(&device->dev, &dev_attr_rrtime); | |
371 | } | |
372 | ||
373 | /* Query firmware how many CPUs should be idle */ | |
374 | static int acpi_pad_pur(acpi_handle handle, int *num_cpus) | |
375 | { | |
376 | struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL}; | |
8e0af514 SL |
377 | union acpi_object *package; |
378 | int rev, num, ret = -EINVAL; | |
379 | ||
3b8cb427 | 380 | if (ACPI_FAILURE(acpi_evaluate_object(handle, "_PUR", NULL, &buffer))) |
8e0af514 | 381 | return -EINVAL; |
3b8cb427 CG |
382 | |
383 | if (!buffer.length || !buffer.pointer) | |
384 | return -EINVAL; | |
385 | ||
8e0af514 SL |
386 | package = buffer.pointer; |
387 | if (package->type != ACPI_TYPE_PACKAGE || package->package.count != 2) | |
388 | goto out; | |
389 | rev = package->package.elements[0].integer.value; | |
390 | num = package->package.elements[1].integer.value; | |
3b8cb427 | 391 | if (rev != 1 || num < 0) |
8e0af514 SL |
392 | goto out; |
393 | *num_cpus = num; | |
394 | ret = 0; | |
395 | out: | |
396 | kfree(buffer.pointer); | |
397 | return ret; | |
398 | } | |
399 | ||
400 | /* Notify firmware how many CPUs are idle */ | |
401 | static void acpi_pad_ost(acpi_handle handle, int stat, | |
402 | uint32_t idle_cpus) | |
403 | { | |
404 | union acpi_object params[3] = { | |
405 | {.type = ACPI_TYPE_INTEGER,}, | |
406 | {.type = ACPI_TYPE_INTEGER,}, | |
407 | {.type = ACPI_TYPE_BUFFER,}, | |
408 | }; | |
409 | struct acpi_object_list arg_list = {3, params}; | |
410 | ||
411 | params[0].integer.value = ACPI_PROCESSOR_AGGREGATOR_NOTIFY; | |
412 | params[1].integer.value = stat; | |
413 | params[2].buffer.length = 4; | |
414 | params[2].buffer.pointer = (void *)&idle_cpus; | |
415 | acpi_evaluate_object(handle, "_OST", &arg_list, NULL); | |
416 | } | |
417 | ||
418 | static void acpi_pad_handle_notify(acpi_handle handle) | |
419 | { | |
3b8cb427 | 420 | int num_cpus; |
8e0af514 SL |
421 | uint32_t idle_cpus; |
422 | ||
423 | mutex_lock(&isolated_cpus_lock); | |
424 | if (acpi_pad_pur(handle, &num_cpus)) { | |
425 | mutex_unlock(&isolated_cpus_lock); | |
426 | return; | |
427 | } | |
3b8cb427 | 428 | acpi_pad_idle_cpus(num_cpus); |
8e0af514 | 429 | idle_cpus = acpi_pad_idle_cpus_num(); |
3b8cb427 | 430 | acpi_pad_ost(handle, 0, idle_cpus); |
8e0af514 SL |
431 | mutex_unlock(&isolated_cpus_lock); |
432 | } | |
433 | ||
434 | static void acpi_pad_notify(acpi_handle handle, u32 event, | |
435 | void *data) | |
436 | { | |
437 | struct acpi_device *device = data; | |
438 | ||
439 | switch (event) { | |
440 | case ACPI_PROCESSOR_AGGREGATOR_NOTIFY: | |
441 | acpi_pad_handle_notify(handle); | |
442 | acpi_bus_generate_proc_event(device, event, 0); | |
443 | acpi_bus_generate_netlink_event(device->pnp.device_class, | |
444 | dev_name(&device->dev), event, 0); | |
445 | break; | |
446 | default: | |
447 | printk(KERN_WARNING"Unsupported event [0x%x]\n", event); | |
448 | break; | |
449 | } | |
450 | } | |
451 | ||
452 | static int acpi_pad_add(struct acpi_device *device) | |
453 | { | |
454 | acpi_status status; | |
455 | ||
456 | strcpy(acpi_device_name(device), ACPI_PROCESSOR_AGGREGATOR_DEVICE_NAME); | |
457 | strcpy(acpi_device_class(device), ACPI_PROCESSOR_AGGREGATOR_CLASS); | |
458 | ||
459 | if (acpi_pad_add_sysfs(device)) | |
460 | return -ENODEV; | |
461 | ||
462 | status = acpi_install_notify_handler(device->handle, | |
463 | ACPI_DEVICE_NOTIFY, acpi_pad_notify, device); | |
464 | if (ACPI_FAILURE(status)) { | |
465 | acpi_pad_remove_sysfs(device); | |
466 | return -ENODEV; | |
467 | } | |
468 | ||
469 | return 0; | |
470 | } | |
471 | ||
472 | static int acpi_pad_remove(struct acpi_device *device, | |
473 | int type) | |
474 | { | |
475 | mutex_lock(&isolated_cpus_lock); | |
476 | acpi_pad_idle_cpus(0); | |
477 | mutex_unlock(&isolated_cpus_lock); | |
478 | ||
479 | acpi_remove_notify_handler(device->handle, | |
480 | ACPI_DEVICE_NOTIFY, acpi_pad_notify); | |
481 | acpi_pad_remove_sysfs(device); | |
482 | return 0; | |
483 | } | |
484 | ||
485 | static const struct acpi_device_id pad_device_ids[] = { | |
486 | {"ACPI000C", 0}, | |
487 | {"", 0}, | |
488 | }; | |
489 | MODULE_DEVICE_TABLE(acpi, pad_device_ids); | |
490 | ||
491 | static struct acpi_driver acpi_pad_driver = { | |
492 | .name = "processor_aggregator", | |
493 | .class = ACPI_PROCESSOR_AGGREGATOR_CLASS, | |
494 | .ids = pad_device_ids, | |
495 | .ops = { | |
496 | .add = acpi_pad_add, | |
497 | .remove = acpi_pad_remove, | |
498 | }, | |
499 | }; | |
500 | ||
501 | static int __init acpi_pad_init(void) | |
502 | { | |
503 | power_saving_mwait_init(); | |
504 | if (power_saving_mwait_eax == 0) | |
505 | return -EINVAL; | |
506 | ||
507 | return acpi_bus_register_driver(&acpi_pad_driver); | |
508 | } | |
509 | ||
510 | static void __exit acpi_pad_exit(void) | |
511 | { | |
512 | acpi_bus_unregister_driver(&acpi_pad_driver); | |
513 | } | |
514 | ||
515 | module_init(acpi_pad_init); | |
516 | module_exit(acpi_pad_exit); | |
517 | MODULE_AUTHOR("Shaohua Li<shaohua.li@intel.com>"); | |
518 | MODULE_DESCRIPTION("ACPI Processor Aggregator Driver"); | |
519 | MODULE_LICENSE("GPL"); |