]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * processor_throttling.c - Throttling submodule of the ACPI processor driver | |
3 | * | |
4 | * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com> | |
5 | * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com> | |
6 | * Copyright (C) 2004 Dominik Brodowski <linux@brodo.de> | |
7 | * Copyright (C) 2004 Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> | |
8 | * - Added processor hotplug support | |
9 | * | |
10 | * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ | |
11 | * | |
12 | * This program is free software; you can redistribute it and/or modify | |
13 | * it under the terms of the GNU General Public License as published by | |
14 | * the Free Software Foundation; either version 2 of the License, or (at | |
15 | * your option) any later version. | |
16 | * | |
17 | * This program is distributed in the hope that it will be useful, but | |
18 | * WITHOUT ANY WARRANTY; without even the implied warranty of | |
19 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
20 | * General Public License for more details. | |
21 | * | |
22 | * You should have received a copy of the GNU General Public License along | |
23 | * with this program; if not, write to the Free Software Foundation, Inc., | |
24 | * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. | |
25 | * | |
26 | * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ | |
27 | */ | |
28 | ||
29 | #include <linux/kernel.h> | |
30 | #include <linux/module.h> | |
5a0e3ad6 | 31 | #include <linux/slab.h> |
1da177e4 | 32 | #include <linux/init.h> |
357dc4c3 | 33 | #include <linux/sched.h> |
1da177e4 | 34 | #include <linux/cpufreq.h> |
8b48463f LZ |
35 | #include <linux/acpi.h> |
36 | #include <acpi/processor.h> | |
1da177e4 LT |
37 | #include <asm/io.h> |
38 | #include <asm/uaccess.h> | |
39 | ||
a192a958 LB |
40 | #define PREFIX "ACPI: " |
41 | ||
1da177e4 | 42 | #define ACPI_PROCESSOR_CLASS "processor" |
1da177e4 | 43 | #define _COMPONENT ACPI_PROCESSOR_COMPONENT |
f52fd66d | 44 | ACPI_MODULE_NAME("processor_throttling"); |
1da177e4 | 45 | |
56c213fa ZR |
46 | /* ignore_tpc: |
47 | * 0 -> acpi processor driver doesn't ignore _TPC values | |
48 | * 1 -> acpi processor driver ignores _TPC values | |
49 | */ | |
50 | static int ignore_tpc; | |
51 | module_param(ignore_tpc, int, 0644); | |
52 | MODULE_PARM_DESC(ignore_tpc, "Disable broken BIOS _TPC throttling support"); | |
53 | ||
e4aa5cb2 ZY |
54 | struct throttling_tstate { |
55 | unsigned int cpu; /* cpu nr */ | |
56 | int target_state; /* target T-state */ | |
57 | }; | |
58 | ||
59 | #define THROTTLING_PRECHANGE (1) | |
60 | #define THROTTLING_POSTCHANGE (2) | |
61 | ||
ff55a9ce | 62 | static int acpi_processor_get_throttling(struct acpi_processor *pr); |
2a908002 FP |
63 | int acpi_processor_set_throttling(struct acpi_processor *pr, |
64 | int state, bool force); | |
01854e69 | 65 | |
1180509f ZY |
66 | static int acpi_processor_update_tsd_coord(void) |
67 | { | |
68 | int count, count_target; | |
69 | int retval = 0; | |
70 | unsigned int i, j; | |
2fdf66b4 | 71 | cpumask_var_t covered_cpus; |
1180509f ZY |
72 | struct acpi_processor *pr, *match_pr; |
73 | struct acpi_tsd_package *pdomain, *match_pdomain; | |
74 | struct acpi_processor_throttling *pthrottling, *match_pthrottling; | |
75 | ||
79f55997 | 76 | if (!zalloc_cpumask_var(&covered_cpus, GFP_KERNEL)) |
2fdf66b4 RR |
77 | return -ENOMEM; |
78 | ||
1180509f ZY |
79 | /* |
80 | * Now that we have _TSD data from all CPUs, lets setup T-state | |
33a2a529 | 81 | * coordination between all CPUs. |
1180509f ZY |
82 | */ |
83 | for_each_possible_cpu(i) { | |
706546d0 | 84 | pr = per_cpu(processors, i); |
1180509f ZY |
85 | if (!pr) |
86 | continue; | |
87 | ||
88 | /* Basic validity check for domain info */ | |
89 | pthrottling = &(pr->throttling); | |
90 | ||
91 | /* | |
92 | * If tsd package for one cpu is invalid, the coordination | |
93 | * among all CPUs is thought as invalid. | |
94 | * Maybe it is ugly. | |
95 | */ | |
96 | if (!pthrottling->tsd_valid_flag) { | |
97 | retval = -EINVAL; | |
98 | break; | |
99 | } | |
100 | } | |
101 | if (retval) | |
102 | goto err_ret; | |
103 | ||
1180509f | 104 | for_each_possible_cpu(i) { |
706546d0 | 105 | pr = per_cpu(processors, i); |
1180509f ZY |
106 | if (!pr) |
107 | continue; | |
108 | ||
2fdf66b4 | 109 | if (cpumask_test_cpu(i, covered_cpus)) |
1180509f ZY |
110 | continue; |
111 | pthrottling = &pr->throttling; | |
112 | ||
113 | pdomain = &(pthrottling->domain_info); | |
2fdf66b4 RR |
114 | cpumask_set_cpu(i, pthrottling->shared_cpu_map); |
115 | cpumask_set_cpu(i, covered_cpus); | |
1180509f ZY |
116 | /* |
117 | * If the number of processor in the TSD domain is 1, it is | |
118 | * unnecessary to parse the coordination for this CPU. | |
119 | */ | |
120 | if (pdomain->num_processors <= 1) | |
121 | continue; | |
122 | ||
123 | /* Validate the Domain info */ | |
124 | count_target = pdomain->num_processors; | |
125 | count = 1; | |
126 | ||
127 | for_each_possible_cpu(j) { | |
128 | if (i == j) | |
129 | continue; | |
130 | ||
706546d0 | 131 | match_pr = per_cpu(processors, j); |
1180509f ZY |
132 | if (!match_pr) |
133 | continue; | |
134 | ||
135 | match_pthrottling = &(match_pr->throttling); | |
136 | match_pdomain = &(match_pthrottling->domain_info); | |
137 | if (match_pdomain->domain != pdomain->domain) | |
138 | continue; | |
139 | ||
140 | /* Here i and j are in the same domain. | |
141 | * If two TSD packages have the same domain, they | |
142 | * should have the same num_porcessors and | |
143 | * coordination type. Otherwise it will be regarded | |
144 | * as illegal. | |
145 | */ | |
146 | if (match_pdomain->num_processors != count_target) { | |
147 | retval = -EINVAL; | |
148 | goto err_ret; | |
149 | } | |
150 | ||
151 | if (pdomain->coord_type != match_pdomain->coord_type) { | |
152 | retval = -EINVAL; | |
153 | goto err_ret; | |
154 | } | |
155 | ||
2fdf66b4 RR |
156 | cpumask_set_cpu(j, covered_cpus); |
157 | cpumask_set_cpu(j, pthrottling->shared_cpu_map); | |
1180509f ZY |
158 | count++; |
159 | } | |
160 | for_each_possible_cpu(j) { | |
161 | if (i == j) | |
162 | continue; | |
163 | ||
706546d0 | 164 | match_pr = per_cpu(processors, j); |
1180509f ZY |
165 | if (!match_pr) |
166 | continue; | |
167 | ||
168 | match_pthrottling = &(match_pr->throttling); | |
169 | match_pdomain = &(match_pthrottling->domain_info); | |
170 | if (match_pdomain->domain != pdomain->domain) | |
171 | continue; | |
172 | ||
173 | /* | |
174 | * If some CPUS have the same domain, they | |
175 | * will have the same shared_cpu_map. | |
176 | */ | |
2fdf66b4 RR |
177 | cpumask_copy(match_pthrottling->shared_cpu_map, |
178 | pthrottling->shared_cpu_map); | |
1180509f ZY |
179 | } |
180 | } | |
181 | ||
182 | err_ret: | |
2fdf66b4 RR |
183 | free_cpumask_var(covered_cpus); |
184 | ||
1180509f | 185 | for_each_possible_cpu(i) { |
706546d0 | 186 | pr = per_cpu(processors, i); |
1180509f ZY |
187 | if (!pr) |
188 | continue; | |
189 | ||
190 | /* | |
191 | * Assume no coordination on any error parsing domain info. | |
192 | * The coordination type will be forced as SW_ALL. | |
193 | */ | |
194 | if (retval) { | |
195 | pthrottling = &(pr->throttling); | |
2fdf66b4 RR |
196 | cpumask_clear(pthrottling->shared_cpu_map); |
197 | cpumask_set_cpu(i, pthrottling->shared_cpu_map); | |
1180509f ZY |
198 | pthrottling->shared_type = DOMAIN_COORD_TYPE_SW_ALL; |
199 | } | |
200 | } | |
201 | ||
202 | return retval; | |
203 | } | |
204 | ||
205 | /* | |
206 | * Update the T-state coordination after the _TSD | |
207 | * data for all cpus is obtained. | |
208 | */ | |
209 | void acpi_processor_throttling_init(void) | |
210 | { | |
2d5914cc | 211 | if (acpi_processor_update_tsd_coord()) { |
1180509f ZY |
212 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, |
213 | "Assume no T-state coordination\n")); | |
2d5914cc | 214 | } |
1180509f ZY |
215 | |
216 | return; | |
217 | } | |
218 | ||
e4aa5cb2 ZY |
219 | static int acpi_processor_throttling_notifier(unsigned long event, void *data) |
220 | { | |
221 | struct throttling_tstate *p_tstate = data; | |
222 | struct acpi_processor *pr; | |
223 | unsigned int cpu ; | |
224 | int target_state; | |
225 | struct acpi_processor_limit *p_limit; | |
226 | struct acpi_processor_throttling *p_throttling; | |
227 | ||
228 | cpu = p_tstate->cpu; | |
706546d0 | 229 | pr = per_cpu(processors, cpu); |
e4aa5cb2 ZY |
230 | if (!pr) { |
231 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Invalid pr pointer\n")); | |
232 | return 0; | |
233 | } | |
234 | if (!pr->flags.throttling) { | |
235 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Throttling control is " | |
236 | "unsupported on CPU %d\n", cpu)); | |
237 | return 0; | |
238 | } | |
239 | target_state = p_tstate->target_state; | |
240 | p_throttling = &(pr->throttling); | |
241 | switch (event) { | |
242 | case THROTTLING_PRECHANGE: | |
243 | /* | |
244 | * Prechange event is used to choose one proper t-state, | |
245 | * which meets the limits of thermal, user and _TPC. | |
246 | */ | |
247 | p_limit = &pr->limit; | |
248 | if (p_limit->thermal.tx > target_state) | |
249 | target_state = p_limit->thermal.tx; | |
250 | if (p_limit->user.tx > target_state) | |
251 | target_state = p_limit->user.tx; | |
252 | if (pr->throttling_platform_limit > target_state) | |
253 | target_state = pr->throttling_platform_limit; | |
254 | if (target_state >= p_throttling->state_count) { | |
255 | printk(KERN_WARNING | |
256 | "Exceed the limit of T-state \n"); | |
257 | target_state = p_throttling->state_count - 1; | |
258 | } | |
259 | p_tstate->target_state = target_state; | |
260 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, "PreChange Event:" | |
261 | "target T-state of CPU %d is T%d\n", | |
262 | cpu, target_state)); | |
263 | break; | |
264 | case THROTTLING_POSTCHANGE: | |
265 | /* | |
266 | * Postchange event is only used to update the | |
267 | * T-state flag of acpi_processor_throttling. | |
268 | */ | |
269 | p_throttling->state = target_state; | |
270 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, "PostChange Event:" | |
271 | "CPU %d is switched to T%d\n", | |
272 | cpu, target_state)); | |
273 | break; | |
274 | default: | |
275 | printk(KERN_WARNING | |
276 | "Unsupported Throttling notifier event\n"); | |
277 | break; | |
278 | } | |
279 | ||
280 | return 0; | |
281 | } | |
282 | ||
c30c620e LB |
283 | /* |
284 | * _TPC - Throttling Present Capabilities | |
285 | */ | |
01854e69 LY |
286 | static int acpi_processor_get_platform_limit(struct acpi_processor *pr) |
287 | { | |
288 | acpi_status status = 0; | |
27663c58 | 289 | unsigned long long tpc = 0; |
01854e69 | 290 | |
ff55a9ce | 291 | if (!pr) |
01854e69 | 292 | return -EINVAL; |
56c213fa ZR |
293 | |
294 | if (ignore_tpc) | |
295 | goto end; | |
296 | ||
01854e69 | 297 | status = acpi_evaluate_integer(pr->handle, "_TPC", NULL, &tpc); |
c30c620e LB |
298 | if (ACPI_FAILURE(status)) { |
299 | if (status != AE_NOT_FOUND) { | |
300 | ACPI_EXCEPTION((AE_INFO, status, "Evaluating _TPC")); | |
301 | } | |
01854e69 LY |
302 | return -ENODEV; |
303 | } | |
56c213fa ZR |
304 | |
305 | end: | |
01854e69 LY |
306 | pr->throttling_platform_limit = (int)tpc; |
307 | return 0; | |
308 | } | |
309 | ||
310 | int acpi_processor_tstate_has_changed(struct acpi_processor *pr) | |
311 | { | |
ef54d5ad ZY |
312 | int result = 0; |
313 | int throttling_limit; | |
314 | int current_state; | |
315 | struct acpi_processor_limit *limit; | |
316 | int target_state; | |
317 | ||
56c213fa ZR |
318 | if (ignore_tpc) |
319 | return 0; | |
320 | ||
ef54d5ad ZY |
321 | result = acpi_processor_get_platform_limit(pr); |
322 | if (result) { | |
323 | /* Throttling Limit is unsupported */ | |
324 | return result; | |
325 | } | |
326 | ||
327 | throttling_limit = pr->throttling_platform_limit; | |
328 | if (throttling_limit >= pr->throttling.state_count) { | |
329 | /* Uncorrect Throttling Limit */ | |
330 | return -EINVAL; | |
331 | } | |
332 | ||
333 | current_state = pr->throttling.state; | |
334 | if (current_state > throttling_limit) { | |
335 | /* | |
336 | * The current state can meet the requirement of | |
337 | * _TPC limit. But it is reasonable that OSPM changes | |
338 | * t-states from high to low for better performance. | |
339 | * Of course the limit condition of thermal | |
340 | * and user should be considered. | |
341 | */ | |
342 | limit = &pr->limit; | |
343 | target_state = throttling_limit; | |
344 | if (limit->thermal.tx > target_state) | |
345 | target_state = limit->thermal.tx; | |
346 | if (limit->user.tx > target_state) | |
347 | target_state = limit->user.tx; | |
348 | } else if (current_state == throttling_limit) { | |
349 | /* | |
350 | * Unnecessary to change the throttling state | |
351 | */ | |
352 | return 0; | |
353 | } else { | |
354 | /* | |
355 | * If the current state is lower than the limit of _TPC, it | |
356 | * will be forced to switch to the throttling state defined | |
357 | * by throttling_platfor_limit. | |
358 | * Because the previous state meets with the limit condition | |
359 | * of thermal and user, it is unnecessary to check it again. | |
360 | */ | |
361 | target_state = throttling_limit; | |
362 | } | |
2a908002 | 363 | return acpi_processor_set_throttling(pr, target_state, false); |
01854e69 LY |
364 | } |
365 | ||
5a344a50 ZY |
366 | /* |
367 | * This function is used to reevaluate whether the T-state is valid | |
368 | * after one CPU is onlined/offlined. | |
369 | * It is noted that it won't reevaluate the following properties for | |
370 | * the T-state. | |
371 | * 1. Control method. | |
372 | * 2. the number of supported T-state | |
373 | * 3. TSD domain | |
374 | */ | |
375 | void acpi_processor_reevaluate_tstate(struct acpi_processor *pr, | |
376 | unsigned long action) | |
377 | { | |
378 | int result = 0; | |
379 | ||
380 | if (action == CPU_DEAD) { | |
381 | /* When one CPU is offline, the T-state throttling | |
382 | * will be invalidated. | |
383 | */ | |
384 | pr->flags.throttling = 0; | |
385 | return; | |
386 | } | |
387 | /* the following is to recheck whether the T-state is valid for | |
388 | * the online CPU | |
389 | */ | |
390 | if (!pr->throttling.state_count) { | |
391 | /* If the number of T-state is invalid, it is | |
392 | * invalidated. | |
393 | */ | |
394 | pr->flags.throttling = 0; | |
395 | return; | |
396 | } | |
397 | pr->flags.throttling = 1; | |
398 | ||
399 | /* Disable throttling (if enabled). We'll let subsequent | |
400 | * policy (e.g.thermal) decide to lower performance if it | |
401 | * so chooses, but for now we'll crank up the speed. | |
402 | */ | |
403 | ||
404 | result = acpi_processor_get_throttling(pr); | |
405 | if (result) | |
406 | goto end; | |
407 | ||
408 | if (pr->throttling.state) { | |
409 | result = acpi_processor_set_throttling(pr, 0, false); | |
410 | if (result) | |
411 | goto end; | |
412 | } | |
413 | ||
414 | end: | |
415 | if (result) | |
416 | pr->flags.throttling = 0; | |
417 | } | |
c30c620e LB |
418 | /* |
419 | * _PTC - Processor Throttling Control (and status) register location | |
420 | */ | |
01854e69 LY |
421 | static int acpi_processor_get_throttling_control(struct acpi_processor *pr) |
422 | { | |
423 | int result = 0; | |
424 | acpi_status status = 0; | |
425 | struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; | |
426 | union acpi_object *ptc = NULL; | |
427 | union acpi_object obj = { 0 }; | |
9bcb2721 | 428 | struct acpi_processor_throttling *throttling; |
01854e69 LY |
429 | |
430 | status = acpi_evaluate_object(pr->handle, "_PTC", NULL, &buffer); | |
431 | if (ACPI_FAILURE(status)) { | |
c30c620e LB |
432 | if (status != AE_NOT_FOUND) { |
433 | ACPI_EXCEPTION((AE_INFO, status, "Evaluating _PTC")); | |
434 | } | |
01854e69 LY |
435 | return -ENODEV; |
436 | } | |
437 | ||
438 | ptc = (union acpi_object *)buffer.pointer; | |
439 | if (!ptc || (ptc->type != ACPI_TYPE_PACKAGE) | |
440 | || (ptc->package.count != 2)) { | |
441 | printk(KERN_ERR PREFIX "Invalid _PTC data\n"); | |
442 | result = -EFAULT; | |
443 | goto end; | |
444 | } | |
445 | ||
446 | /* | |
447 | * control_register | |
448 | */ | |
449 | ||
450 | obj = ptc->package.elements[0]; | |
451 | ||
452 | if ((obj.type != ACPI_TYPE_BUFFER) | |
453 | || (obj.buffer.length < sizeof(struct acpi_ptc_register)) | |
454 | || (obj.buffer.pointer == NULL)) { | |
ff55a9ce LB |
455 | printk(KERN_ERR PREFIX |
456 | "Invalid _PTC data (control_register)\n"); | |
01854e69 LY |
457 | result = -EFAULT; |
458 | goto end; | |
459 | } | |
460 | memcpy(&pr->throttling.control_register, obj.buffer.pointer, | |
461 | sizeof(struct acpi_ptc_register)); | |
462 | ||
463 | /* | |
464 | * status_register | |
465 | */ | |
466 | ||
467 | obj = ptc->package.elements[1]; | |
468 | ||
469 | if ((obj.type != ACPI_TYPE_BUFFER) | |
470 | || (obj.buffer.length < sizeof(struct acpi_ptc_register)) | |
471 | || (obj.buffer.pointer == NULL)) { | |
472 | printk(KERN_ERR PREFIX "Invalid _PTC data (status_register)\n"); | |
473 | result = -EFAULT; | |
474 | goto end; | |
475 | } | |
476 | ||
477 | memcpy(&pr->throttling.status_register, obj.buffer.pointer, | |
ff55a9ce | 478 | sizeof(struct acpi_ptc_register)); |
01854e69 | 479 | |
9bcb2721 ZY |
480 | throttling = &pr->throttling; |
481 | ||
482 | if ((throttling->control_register.bit_width + | |
483 | throttling->control_register.bit_offset) > 32) { | |
484 | printk(KERN_ERR PREFIX "Invalid _PTC control register\n"); | |
485 | result = -EFAULT; | |
486 | goto end; | |
487 | } | |
488 | ||
489 | if ((throttling->status_register.bit_width + | |
490 | throttling->status_register.bit_offset) > 32) { | |
491 | printk(KERN_ERR PREFIX "Invalid _PTC status register\n"); | |
492 | result = -EFAULT; | |
493 | goto end; | |
494 | } | |
495 | ||
ff55a9ce | 496 | end: |
01854e69 LY |
497 | kfree(buffer.pointer); |
498 | ||
499 | return result; | |
500 | } | |
c30c620e LB |
501 | |
502 | /* | |
503 | * _TSS - Throttling Supported States | |
504 | */ | |
01854e69 LY |
505 | static int acpi_processor_get_throttling_states(struct acpi_processor *pr) |
506 | { | |
507 | int result = 0; | |
508 | acpi_status status = AE_OK; | |
509 | struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; | |
510 | struct acpi_buffer format = { sizeof("NNNNN"), "NNNNN" }; | |
511 | struct acpi_buffer state = { 0, NULL }; | |
512 | union acpi_object *tss = NULL; | |
513 | int i; | |
514 | ||
515 | status = acpi_evaluate_object(pr->handle, "_TSS", NULL, &buffer); | |
516 | if (ACPI_FAILURE(status)) { | |
c30c620e LB |
517 | if (status != AE_NOT_FOUND) { |
518 | ACPI_EXCEPTION((AE_INFO, status, "Evaluating _TSS")); | |
519 | } | |
01854e69 LY |
520 | return -ENODEV; |
521 | } | |
522 | ||
523 | tss = buffer.pointer; | |
524 | if (!tss || (tss->type != ACPI_TYPE_PACKAGE)) { | |
525 | printk(KERN_ERR PREFIX "Invalid _TSS data\n"); | |
526 | result = -EFAULT; | |
527 | goto end; | |
528 | } | |
529 | ||
530 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found %d throttling states\n", | |
531 | tss->package.count)); | |
532 | ||
533 | pr->throttling.state_count = tss->package.count; | |
534 | pr->throttling.states_tss = | |
535 | kmalloc(sizeof(struct acpi_processor_tx_tss) * tss->package.count, | |
536 | GFP_KERNEL); | |
537 | if (!pr->throttling.states_tss) { | |
538 | result = -ENOMEM; | |
539 | goto end; | |
540 | } | |
541 | ||
542 | for (i = 0; i < pr->throttling.state_count; i++) { | |
543 | ||
ff55a9ce LB |
544 | struct acpi_processor_tx_tss *tx = |
545 | (struct acpi_processor_tx_tss *)&(pr->throttling. | |
546 | states_tss[i]); | |
01854e69 LY |
547 | |
548 | state.length = sizeof(struct acpi_processor_tx_tss); | |
549 | state.pointer = tx; | |
550 | ||
551 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Extracting state %d\n", i)); | |
552 | ||
553 | status = acpi_extract_package(&(tss->package.elements[i]), | |
554 | &format, &state); | |
555 | if (ACPI_FAILURE(status)) { | |
556 | ACPI_EXCEPTION((AE_INFO, status, "Invalid _TSS data")); | |
557 | result = -EFAULT; | |
558 | kfree(pr->throttling.states_tss); | |
559 | goto end; | |
560 | } | |
561 | ||
562 | if (!tx->freqpercentage) { | |
563 | printk(KERN_ERR PREFIX | |
ff55a9ce | 564 | "Invalid _TSS data: freq is zero\n"); |
01854e69 LY |
565 | result = -EFAULT; |
566 | kfree(pr->throttling.states_tss); | |
567 | goto end; | |
568 | } | |
569 | } | |
570 | ||
571 | end: | |
572 | kfree(buffer.pointer); | |
573 | ||
574 | return result; | |
575 | } | |
c30c620e LB |
576 | |
577 | /* | |
578 | * _TSD - T-State Dependencies | |
579 | */ | |
ff55a9ce | 580 | static int acpi_processor_get_tsd(struct acpi_processor *pr) |
01854e69 LY |
581 | { |
582 | int result = 0; | |
583 | acpi_status status = AE_OK; | |
ff55a9ce LB |
584 | struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; |
585 | struct acpi_buffer format = { sizeof("NNNNN"), "NNNNN" }; | |
586 | struct acpi_buffer state = { 0, NULL }; | |
587 | union acpi_object *tsd = NULL; | |
01854e69 | 588 | struct acpi_tsd_package *pdomain; |
1180509f ZY |
589 | struct acpi_processor_throttling *pthrottling; |
590 | ||
591 | pthrottling = &pr->throttling; | |
592 | pthrottling->tsd_valid_flag = 0; | |
01854e69 LY |
593 | |
594 | status = acpi_evaluate_object(pr->handle, "_TSD", NULL, &buffer); | |
595 | if (ACPI_FAILURE(status)) { | |
c30c620e LB |
596 | if (status != AE_NOT_FOUND) { |
597 | ACPI_EXCEPTION((AE_INFO, status, "Evaluating _TSD")); | |
598 | } | |
01854e69 LY |
599 | return -ENODEV; |
600 | } | |
601 | ||
602 | tsd = buffer.pointer; | |
603 | if (!tsd || (tsd->type != ACPI_TYPE_PACKAGE)) { | |
55ac9a01 | 604 | printk(KERN_ERR PREFIX "Invalid _TSD data\n"); |
01854e69 LY |
605 | result = -EFAULT; |
606 | goto end; | |
607 | } | |
608 | ||
609 | if (tsd->package.count != 1) { | |
55ac9a01 | 610 | printk(KERN_ERR PREFIX "Invalid _TSD data\n"); |
01854e69 LY |
611 | result = -EFAULT; |
612 | goto end; | |
613 | } | |
614 | ||
615 | pdomain = &(pr->throttling.domain_info); | |
616 | ||
617 | state.length = sizeof(struct acpi_tsd_package); | |
618 | state.pointer = pdomain; | |
619 | ||
620 | status = acpi_extract_package(&(tsd->package.elements[0]), | |
ff55a9ce | 621 | &format, &state); |
01854e69 | 622 | if (ACPI_FAILURE(status)) { |
55ac9a01 | 623 | printk(KERN_ERR PREFIX "Invalid _TSD data\n"); |
01854e69 LY |
624 | result = -EFAULT; |
625 | goto end; | |
626 | } | |
627 | ||
628 | if (pdomain->num_entries != ACPI_TSD_REV0_ENTRIES) { | |
55ac9a01 | 629 | printk(KERN_ERR PREFIX "Unknown _TSD:num_entries\n"); |
01854e69 LY |
630 | result = -EFAULT; |
631 | goto end; | |
632 | } | |
633 | ||
634 | if (pdomain->revision != ACPI_TSD_REV0_REVISION) { | |
55ac9a01 | 635 | printk(KERN_ERR PREFIX "Unknown _TSD:revision\n"); |
01854e69 LY |
636 | result = -EFAULT; |
637 | goto end; | |
638 | } | |
639 | ||
1180509f ZY |
640 | pthrottling = &pr->throttling; |
641 | pthrottling->tsd_valid_flag = 1; | |
642 | pthrottling->shared_type = pdomain->coord_type; | |
2fdf66b4 | 643 | cpumask_set_cpu(pr->id, pthrottling->shared_cpu_map); |
1180509f ZY |
644 | /* |
645 | * If the coordination type is not defined in ACPI spec, | |
646 | * the tsd_valid_flag will be clear and coordination type | |
647 | * will be forecd as DOMAIN_COORD_TYPE_SW_ALL. | |
648 | */ | |
649 | if (pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ALL && | |
650 | pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ANY && | |
651 | pdomain->coord_type != DOMAIN_COORD_TYPE_HW_ALL) { | |
652 | pthrottling->tsd_valid_flag = 0; | |
653 | pthrottling->shared_type = DOMAIN_COORD_TYPE_SW_ALL; | |
654 | } | |
655 | ||
ff55a9ce | 656 | end: |
01854e69 LY |
657 | kfree(buffer.pointer); |
658 | return result; | |
659 | } | |
660 | ||
1da177e4 LT |
661 | /* -------------------------------------------------------------------------- |
662 | Throttling Control | |
663 | -------------------------------------------------------------------------- */ | |
01854e69 | 664 | static int acpi_processor_get_throttling_fadt(struct acpi_processor *pr) |
1da177e4 | 665 | { |
4be44fcd LB |
666 | int state = 0; |
667 | u32 value = 0; | |
668 | u32 duty_mask = 0; | |
669 | u32 duty_value = 0; | |
1da177e4 | 670 | |
1da177e4 | 671 | if (!pr) |
d550d98d | 672 | return -EINVAL; |
1da177e4 LT |
673 | |
674 | if (!pr->flags.throttling) | |
d550d98d | 675 | return -ENODEV; |
1da177e4 LT |
676 | |
677 | pr->throttling.state = 0; | |
678 | ||
679 | duty_mask = pr->throttling.state_count - 1; | |
680 | ||
681 | duty_mask <<= pr->throttling.duty_offset; | |
682 | ||
683 | local_irq_disable(); | |
684 | ||
685 | value = inl(pr->throttling.address); | |
686 | ||
687 | /* | |
688 | * Compute the current throttling state when throttling is enabled | |
689 | * (bit 4 is on). | |
690 | */ | |
691 | if (value & 0x10) { | |
692 | duty_value = value & duty_mask; | |
693 | duty_value >>= pr->throttling.duty_offset; | |
694 | ||
695 | if (duty_value) | |
4be44fcd | 696 | state = pr->throttling.state_count - duty_value; |
1da177e4 LT |
697 | } |
698 | ||
699 | pr->throttling.state = state; | |
700 | ||
701 | local_irq_enable(); | |
702 | ||
703 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, | |
4be44fcd LB |
704 | "Throttling state is T%d (%d%% throttling applied)\n", |
705 | state, pr->throttling.states[state].performance)); | |
1da177e4 | 706 | |
d550d98d | 707 | return 0; |
1da177e4 LT |
708 | } |
709 | ||
f79f06ab | 710 | #ifdef CONFIG_X86 |
9d42a53e | 711 | static int acpi_throttling_rdmsr(u64 *value) |
f79f06ab | 712 | { |
f79f06ab | 713 | u64 msr_high, msr_low; |
f79f06ab ZY |
714 | u64 msr = 0; |
715 | int ret = -1; | |
716 | ||
9d42a53e CL |
717 | if ((this_cpu_read(cpu_info.x86_vendor) != X86_VENDOR_INTEL) || |
718 | !this_cpu_has(X86_FEATURE_ACPI)) { | |
f79f06ab ZY |
719 | printk(KERN_ERR PREFIX |
720 | "HARDWARE addr space,NOT supported yet\n"); | |
721 | } else { | |
722 | msr_low = 0; | |
723 | msr_high = 0; | |
357dc4c3 | 724 | rdmsr_safe(MSR_IA32_THERM_CONTROL, |
f79f06ab ZY |
725 | (u32 *)&msr_low , (u32 *) &msr_high); |
726 | msr = (msr_high << 32) | msr_low; | |
439913ff | 727 | *value = (u64) msr; |
f79f06ab ZY |
728 | ret = 0; |
729 | } | |
730 | return ret; | |
731 | } | |
732 | ||
9d42a53e | 733 | static int acpi_throttling_wrmsr(u64 value) |
f79f06ab | 734 | { |
f79f06ab ZY |
735 | int ret = -1; |
736 | u64 msr; | |
737 | ||
9d42a53e CL |
738 | if ((this_cpu_read(cpu_info.x86_vendor) != X86_VENDOR_INTEL) || |
739 | !this_cpu_has(X86_FEATURE_ACPI)) { | |
f79f06ab ZY |
740 | printk(KERN_ERR PREFIX |
741 | "HARDWARE addr space,NOT supported yet\n"); | |
742 | } else { | |
743 | msr = value; | |
357dc4c3 | 744 | wrmsr_safe(MSR_IA32_THERM_CONTROL, |
f79f06ab ZY |
745 | msr & 0xffffffff, msr >> 32); |
746 | ret = 0; | |
747 | } | |
748 | return ret; | |
749 | } | |
750 | #else | |
9d42a53e | 751 | static int acpi_throttling_rdmsr(u64 *value) |
f79f06ab ZY |
752 | { |
753 | printk(KERN_ERR PREFIX | |
754 | "HARDWARE addr space,NOT supported yet\n"); | |
755 | return -1; | |
756 | } | |
757 | ||
9d42a53e | 758 | static int acpi_throttling_wrmsr(u64 value) |
f79f06ab ZY |
759 | { |
760 | printk(KERN_ERR PREFIX | |
761 | "HARDWARE addr space,NOT supported yet\n"); | |
762 | return -1; | |
763 | } | |
764 | #endif | |
765 | ||
0753f6e0 | 766 | static int acpi_read_throttling_status(struct acpi_processor *pr, |
439913ff | 767 | u64 *value) |
01854e69 | 768 | { |
9bcb2721 | 769 | u32 bit_width, bit_offset; |
344e222e | 770 | u32 ptc_value; |
9bcb2721 | 771 | u64 ptc_mask; |
0753f6e0 ZY |
772 | struct acpi_processor_throttling *throttling; |
773 | int ret = -1; | |
774 | ||
775 | throttling = &pr->throttling; | |
01854e69 LY |
776 | switch (throttling->status_register.space_id) { |
777 | case ACPI_ADR_SPACE_SYSTEM_IO: | |
9bcb2721 ZY |
778 | bit_width = throttling->status_register.bit_width; |
779 | bit_offset = throttling->status_register.bit_offset; | |
780 | ||
ff55a9ce | 781 | acpi_os_read_port((acpi_io_address) throttling->status_register. |
344e222e | 782 | address, &ptc_value, |
9bcb2721 ZY |
783 | (u32) (bit_width + bit_offset)); |
784 | ptc_mask = (1 << bit_width) - 1; | |
439913ff | 785 | *value = (u64) ((ptc_value >> bit_offset) & ptc_mask); |
0753f6e0 | 786 | ret = 0; |
01854e69 LY |
787 | break; |
788 | case ACPI_ADR_SPACE_FIXED_HARDWARE: | |
9d42a53e | 789 | ret = acpi_throttling_rdmsr(value); |
01854e69 LY |
790 | break; |
791 | default: | |
792 | printk(KERN_ERR PREFIX "Unknown addr space %d\n", | |
ff55a9ce | 793 | (u32) (throttling->status_register.space_id)); |
01854e69 | 794 | } |
0753f6e0 | 795 | return ret; |
01854e69 LY |
796 | } |
797 | ||
0753f6e0 | 798 | static int acpi_write_throttling_state(struct acpi_processor *pr, |
439913ff | 799 | u64 value) |
01854e69 | 800 | { |
9bcb2721 | 801 | u32 bit_width, bit_offset; |
0753f6e0 | 802 | u64 ptc_value; |
9bcb2721 | 803 | u64 ptc_mask; |
0753f6e0 | 804 | struct acpi_processor_throttling *throttling; |
01854e69 LY |
805 | int ret = -1; |
806 | ||
0753f6e0 | 807 | throttling = &pr->throttling; |
01854e69 LY |
808 | switch (throttling->control_register.space_id) { |
809 | case ACPI_ADR_SPACE_SYSTEM_IO: | |
9bcb2721 ZY |
810 | bit_width = throttling->control_register.bit_width; |
811 | bit_offset = throttling->control_register.bit_offset; | |
812 | ptc_mask = (1 << bit_width) - 1; | |
813 | ptc_value = value & ptc_mask; | |
814 | ||
ff55a9ce | 815 | acpi_os_write_port((acpi_io_address) throttling-> |
9bcb2721 ZY |
816 | control_register.address, |
817 | (u32) (ptc_value << bit_offset), | |
818 | (u32) (bit_width + bit_offset)); | |
01854e69 LY |
819 | ret = 0; |
820 | break; | |
821 | case ACPI_ADR_SPACE_FIXED_HARDWARE: | |
9d42a53e | 822 | ret = acpi_throttling_wrmsr(value); |
01854e69 LY |
823 | break; |
824 | default: | |
825 | printk(KERN_ERR PREFIX "Unknown addr space %d\n", | |
ff55a9ce | 826 | (u32) (throttling->control_register.space_id)); |
01854e69 LY |
827 | } |
828 | return ret; | |
829 | } | |
830 | ||
0753f6e0 | 831 | static int acpi_get_throttling_state(struct acpi_processor *pr, |
439913ff | 832 | u64 value) |
01854e69 LY |
833 | { |
834 | int i; | |
835 | ||
836 | for (i = 0; i < pr->throttling.state_count; i++) { | |
ff55a9ce LB |
837 | struct acpi_processor_tx_tss *tx = |
838 | (struct acpi_processor_tx_tss *)&(pr->throttling. | |
839 | states_tss[i]); | |
840 | if (tx->control == value) | |
53af9cfb | 841 | return i; |
01854e69 | 842 | } |
53af9cfb | 843 | return -1; |
01854e69 LY |
844 | } |
845 | ||
0753f6e0 | 846 | static int acpi_get_throttling_value(struct acpi_processor *pr, |
439913ff | 847 | int state, u64 *value) |
01854e69 | 848 | { |
0753f6e0 ZY |
849 | int ret = -1; |
850 | ||
ff55a9ce LB |
851 | if (state >= 0 && state <= pr->throttling.state_count) { |
852 | struct acpi_processor_tx_tss *tx = | |
853 | (struct acpi_processor_tx_tss *)&(pr->throttling. | |
854 | states_tss[state]); | |
0753f6e0 ZY |
855 | *value = tx->control; |
856 | ret = 0; | |
01854e69 | 857 | } |
0753f6e0 | 858 | return ret; |
01854e69 LY |
859 | } |
860 | ||
861 | static int acpi_processor_get_throttling_ptc(struct acpi_processor *pr) | |
862 | { | |
863 | int state = 0; | |
0753f6e0 | 864 | int ret; |
439913ff | 865 | u64 value; |
01854e69 | 866 | |
01854e69 LY |
867 | if (!pr) |
868 | return -EINVAL; | |
869 | ||
870 | if (!pr->flags.throttling) | |
871 | return -ENODEV; | |
872 | ||
873 | pr->throttling.state = 0; | |
357dc4c3 | 874 | |
0753f6e0 ZY |
875 | value = 0; |
876 | ret = acpi_read_throttling_status(pr, &value); | |
877 | if (ret >= 0) { | |
ff55a9ce | 878 | state = acpi_get_throttling_state(pr, value); |
4973b22a | 879 | if (state == -1) { |
bdf57de4 FP |
880 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, |
881 | "Invalid throttling state, reset\n")); | |
4973b22a | 882 | state = 0; |
2a908002 | 883 | ret = acpi_processor_set_throttling(pr, state, true); |
4973b22a ZR |
884 | if (ret) |
885 | return ret; | |
886 | } | |
01854e69 LY |
887 | pr->throttling.state = state; |
888 | } | |
01854e69 LY |
889 | |
890 | return 0; | |
891 | } | |
892 | ||
01854e69 LY |
893 | static int acpi_processor_get_throttling(struct acpi_processor *pr) |
894 | { | |
2fdf66b4 | 895 | cpumask_var_t saved_mask; |
357dc4c3 ZY |
896 | int ret; |
897 | ||
87654273 ZY |
898 | if (!pr) |
899 | return -EINVAL; | |
900 | ||
901 | if (!pr->flags.throttling) | |
902 | return -ENODEV; | |
2fdf66b4 RR |
903 | |
904 | if (!alloc_cpumask_var(&saved_mask, GFP_KERNEL)) | |
905 | return -ENOMEM; | |
906 | ||
357dc4c3 ZY |
907 | /* |
908 | * Migrate task to the cpu pointed by pr. | |
909 | */ | |
2fdf66b4 RR |
910 | cpumask_copy(saved_mask, ¤t->cpus_allowed); |
911 | /* FIXME: use work_on_cpu() */ | |
daef1f35 ZY |
912 | if (set_cpus_allowed_ptr(current, cpumask_of(pr->id))) { |
913 | /* Can't migrate to the target pr->id CPU. Exit */ | |
914 | free_cpumask_var(saved_mask); | |
915 | return -ENODEV; | |
916 | } | |
357dc4c3 ZY |
917 | ret = pr->throttling.acpi_processor_get_throttling(pr); |
918 | /* restore the previous state */ | |
2fdf66b4 RR |
919 | set_cpus_allowed_ptr(current, saved_mask); |
920 | free_cpumask_var(saved_mask); | |
357dc4c3 ZY |
921 | |
922 | return ret; | |
01854e69 LY |
923 | } |
924 | ||
22cc5019 ZY |
925 | static int acpi_processor_get_fadt_info(struct acpi_processor *pr) |
926 | { | |
927 | int i, step; | |
928 | ||
929 | if (!pr->throttling.address) { | |
930 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No throttling register\n")); | |
931 | return -EINVAL; | |
932 | } else if (!pr->throttling.duty_width) { | |
933 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No throttling states\n")); | |
934 | return -EINVAL; | |
935 | } | |
936 | /* TBD: Support duty_cycle values that span bit 4. */ | |
937 | else if ((pr->throttling.duty_offset + pr->throttling.duty_width) > 4) { | |
938 | printk(KERN_WARNING PREFIX "duty_cycle spans bit 4\n"); | |
939 | return -EINVAL; | |
940 | } | |
941 | ||
942 | pr->throttling.state_count = 1 << acpi_gbl_FADT.duty_width; | |
943 | ||
944 | /* | |
945 | * Compute state values. Note that throttling displays a linear power | |
946 | * performance relationship (at 50% performance the CPU will consume | |
947 | * 50% power). Values are in 1/10th of a percent to preserve accuracy. | |
948 | */ | |
949 | ||
950 | step = (1000 / pr->throttling.state_count); | |
951 | ||
952 | for (i = 0; i < pr->throttling.state_count; i++) { | |
953 | pr->throttling.states[i].performance = 1000 - step * i; | |
954 | pr->throttling.states[i].power = 1000 - step * i; | |
955 | } | |
956 | return 0; | |
957 | } | |
958 | ||
6c5cf8aa | 959 | static int acpi_processor_set_throttling_fadt(struct acpi_processor *pr, |
2a908002 | 960 | int state, bool force) |
1da177e4 | 961 | { |
4be44fcd LB |
962 | u32 value = 0; |
963 | u32 duty_mask = 0; | |
964 | u32 duty_value = 0; | |
1da177e4 | 965 | |
1da177e4 | 966 | if (!pr) |
d550d98d | 967 | return -EINVAL; |
1da177e4 LT |
968 | |
969 | if ((state < 0) || (state > (pr->throttling.state_count - 1))) | |
d550d98d | 970 | return -EINVAL; |
1da177e4 LT |
971 | |
972 | if (!pr->flags.throttling) | |
d550d98d | 973 | return -ENODEV; |
1da177e4 | 974 | |
2a908002 | 975 | if (!force && (state == pr->throttling.state)) |
d550d98d | 976 | return 0; |
1da177e4 | 977 | |
01854e69 LY |
978 | if (state < pr->throttling_platform_limit) |
979 | return -EPERM; | |
1da177e4 LT |
980 | /* |
981 | * Calculate the duty_value and duty_mask. | |
982 | */ | |
983 | if (state) { | |
984 | duty_value = pr->throttling.state_count - state; | |
985 | ||
986 | duty_value <<= pr->throttling.duty_offset; | |
987 | ||
988 | /* Used to clear all duty_value bits */ | |
989 | duty_mask = pr->throttling.state_count - 1; | |
990 | ||
cee324b1 | 991 | duty_mask <<= acpi_gbl_FADT.duty_offset; |
1da177e4 LT |
992 | duty_mask = ~duty_mask; |
993 | } | |
994 | ||
995 | local_irq_disable(); | |
996 | ||
997 | /* | |
998 | * Disable throttling by writing a 0 to bit 4. Note that we must | |
999 | * turn it off before you can change the duty_value. | |
1000 | */ | |
1001 | value = inl(pr->throttling.address); | |
1002 | if (value & 0x10) { | |
1003 | value &= 0xFFFFFFEF; | |
1004 | outl(value, pr->throttling.address); | |
1005 | } | |
1006 | ||
1007 | /* | |
1008 | * Write the new duty_value and then enable throttling. Note | |
1009 | * that a state value of 0 leaves throttling disabled. | |
1010 | */ | |
1011 | if (state) { | |
1012 | value &= duty_mask; | |
1013 | value |= duty_value; | |
1014 | outl(value, pr->throttling.address); | |
1015 | ||
1016 | value |= 0x00000010; | |
1017 | outl(value, pr->throttling.address); | |
1018 | } | |
1019 | ||
1020 | pr->throttling.state = state; | |
1021 | ||
1022 | local_irq_enable(); | |
1023 | ||
1024 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, | |
4be44fcd LB |
1025 | "Throttling state set to T%d (%d%%)\n", state, |
1026 | (pr->throttling.states[state].performance ? pr-> | |
1027 | throttling.states[state].performance / 10 : 0))); | |
1da177e4 | 1028 | |
d550d98d | 1029 | return 0; |
1da177e4 LT |
1030 | } |
1031 | ||
6c5cf8aa | 1032 | static int acpi_processor_set_throttling_ptc(struct acpi_processor *pr, |
2a908002 | 1033 | int state, bool force) |
01854e69 | 1034 | { |
0753f6e0 | 1035 | int ret; |
439913ff | 1036 | u64 value; |
01854e69 LY |
1037 | |
1038 | if (!pr) | |
1039 | return -EINVAL; | |
1040 | ||
1041 | if ((state < 0) || (state > (pr->throttling.state_count - 1))) | |
1042 | return -EINVAL; | |
1043 | ||
1044 | if (!pr->flags.throttling) | |
1045 | return -ENODEV; | |
1046 | ||
2a908002 | 1047 | if (!force && (state == pr->throttling.state)) |
01854e69 LY |
1048 | return 0; |
1049 | ||
1050 | if (state < pr->throttling_platform_limit) | |
1051 | return -EPERM; | |
1052 | ||
0753f6e0 ZY |
1053 | value = 0; |
1054 | ret = acpi_get_throttling_value(pr, state, &value); | |
1055 | if (ret >= 0) { | |
1056 | acpi_write_throttling_state(pr, value); | |
01854e69 LY |
1057 | pr->throttling.state = state; |
1058 | } | |
01854e69 LY |
1059 | |
1060 | return 0; | |
1061 | } | |
1062 | ||
2a908002 FP |
1063 | int acpi_processor_set_throttling(struct acpi_processor *pr, |
1064 | int state, bool force) | |
01854e69 | 1065 | { |
2fdf66b4 | 1066 | cpumask_var_t saved_mask; |
3391a76f | 1067 | int ret = 0; |
33a2a529 ZY |
1068 | unsigned int i; |
1069 | struct acpi_processor *match_pr; | |
1070 | struct acpi_processor_throttling *p_throttling; | |
1071 | struct throttling_tstate t_state; | |
2fdf66b4 | 1072 | cpumask_var_t online_throttling_cpus; |
87654273 ZY |
1073 | |
1074 | if (!pr) | |
1075 | return -EINVAL; | |
1076 | ||
1077 | if (!pr->flags.throttling) | |
1078 | return -ENODEV; | |
1079 | ||
1080 | if ((state < 0) || (state > (pr->throttling.state_count - 1))) | |
1081 | return -EINVAL; | |
1082 | ||
2fdf66b4 RR |
1083 | if (!alloc_cpumask_var(&saved_mask, GFP_KERNEL)) |
1084 | return -ENOMEM; | |
1085 | ||
1086 | if (!alloc_cpumask_var(&online_throttling_cpus, GFP_KERNEL)) { | |
1087 | free_cpumask_var(saved_mask); | |
1088 | return -ENOMEM; | |
1089 | } | |
1090 | ||
daef1f35 ZY |
1091 | if (cpu_is_offline(pr->id)) { |
1092 | /* | |
1093 | * the cpu pointed by pr->id is offline. Unnecessary to change | |
1094 | * the throttling state any more. | |
1095 | */ | |
1096 | return -ENODEV; | |
1097 | } | |
1098 | ||
2fdf66b4 | 1099 | cpumask_copy(saved_mask, ¤t->cpus_allowed); |
33a2a529 ZY |
1100 | t_state.target_state = state; |
1101 | p_throttling = &(pr->throttling); | |
2fdf66b4 RR |
1102 | cpumask_and(online_throttling_cpus, cpu_online_mask, |
1103 | p_throttling->shared_cpu_map); | |
357dc4c3 | 1104 | /* |
33a2a529 ZY |
1105 | * The throttling notifier will be called for every |
1106 | * affected cpu in order to get one proper T-state. | |
1107 | * The notifier event is THROTTLING_PRECHANGE. | |
357dc4c3 | 1108 | */ |
2fdf66b4 | 1109 | for_each_cpu(i, online_throttling_cpus) { |
33a2a529 ZY |
1110 | t_state.cpu = i; |
1111 | acpi_processor_throttling_notifier(THROTTLING_PRECHANGE, | |
1112 | &t_state); | |
1113 | } | |
1114 | /* | |
1115 | * The function of acpi_processor_set_throttling will be called | |
1116 | * to switch T-state. If the coordination type is SW_ALL or HW_ALL, | |
1117 | * it is necessary to call it for every affected cpu. Otherwise | |
1118 | * it can be called only for the cpu pointed by pr. | |
1119 | */ | |
1120 | if (p_throttling->shared_type == DOMAIN_COORD_TYPE_SW_ANY) { | |
2fdf66b4 | 1121 | /* FIXME: use work_on_cpu() */ |
daef1f35 ZY |
1122 | if (set_cpus_allowed_ptr(current, cpumask_of(pr->id))) { |
1123 | /* Can't migrate to the pr->id CPU. Exit */ | |
1124 | ret = -ENODEV; | |
1125 | goto exit; | |
1126 | } | |
33a2a529 | 1127 | ret = p_throttling->acpi_processor_set_throttling(pr, |
2a908002 | 1128 | t_state.target_state, force); |
33a2a529 ZY |
1129 | } else { |
1130 | /* | |
1131 | * When the T-state coordination is SW_ALL or HW_ALL, | |
1132 | * it is necessary to set T-state for every affected | |
1133 | * cpus. | |
1134 | */ | |
2fdf66b4 | 1135 | for_each_cpu(i, online_throttling_cpus) { |
706546d0 | 1136 | match_pr = per_cpu(processors, i); |
33a2a529 ZY |
1137 | /* |
1138 | * If the pointer is invalid, we will report the | |
1139 | * error message and continue. | |
1140 | */ | |
1141 | if (!match_pr) { | |
1142 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, | |
1143 | "Invalid Pointer for CPU %d\n", i)); | |
1144 | continue; | |
1145 | } | |
1146 | /* | |
1147 | * If the throttling control is unsupported on CPU i, | |
1148 | * we will report the error message and continue. | |
1149 | */ | |
1150 | if (!match_pr->flags.throttling) { | |
1151 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, | |
25985edc | 1152 | "Throttling Control is unsupported " |
33a2a529 ZY |
1153 | "on CPU %d\n", i)); |
1154 | continue; | |
1155 | } | |
1156 | t_state.cpu = i; | |
2fdf66b4 | 1157 | /* FIXME: use work_on_cpu() */ |
daef1f35 ZY |
1158 | if (set_cpus_allowed_ptr(current, cpumask_of(i))) |
1159 | continue; | |
33a2a529 ZY |
1160 | ret = match_pr->throttling. |
1161 | acpi_processor_set_throttling( | |
2a908002 | 1162 | match_pr, t_state.target_state, force); |
33a2a529 ZY |
1163 | } |
1164 | } | |
1165 | /* | |
1166 | * After the set_throttling is called, the | |
1167 | * throttling notifier is called for every | |
1168 | * affected cpu to update the T-states. | |
1169 | * The notifier event is THROTTLING_POSTCHANGE | |
1170 | */ | |
2fdf66b4 | 1171 | for_each_cpu(i, online_throttling_cpus) { |
33a2a529 ZY |
1172 | t_state.cpu = i; |
1173 | acpi_processor_throttling_notifier(THROTTLING_POSTCHANGE, | |
1174 | &t_state); | |
1175 | } | |
357dc4c3 | 1176 | /* restore the previous state */ |
2fdf66b4 RR |
1177 | /* FIXME: use work_on_cpu() */ |
1178 | set_cpus_allowed_ptr(current, saved_mask); | |
daef1f35 | 1179 | exit: |
2fdf66b4 RR |
1180 | free_cpumask_var(online_throttling_cpus); |
1181 | free_cpumask_var(saved_mask); | |
357dc4c3 | 1182 | return ret; |
01854e69 LY |
1183 | } |
1184 | ||
4be44fcd | 1185 | int acpi_processor_get_throttling_info(struct acpi_processor *pr) |
1da177e4 | 1186 | { |
4be44fcd | 1187 | int result = 0; |
1180509f | 1188 | struct acpi_processor_throttling *pthrottling; |
1da177e4 | 1189 | |
1da177e4 | 1190 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, |
4be44fcd LB |
1191 | "pblk_address[0x%08x] duty_offset[%d] duty_width[%d]\n", |
1192 | pr->throttling.address, | |
1193 | pr->throttling.duty_offset, | |
1194 | pr->throttling.duty_width)); | |
1da177e4 | 1195 | |
c30c620e LB |
1196 | /* |
1197 | * Evaluate _PTC, _TSS and _TPC | |
1198 | * They must all be present or none of them can be used. | |
1199 | */ | |
1200 | if (acpi_processor_get_throttling_control(pr) || | |
1201 | acpi_processor_get_throttling_states(pr) || | |
1202 | acpi_processor_get_platform_limit(pr)) | |
1203 | { | |
ff55a9ce LB |
1204 | pr->throttling.acpi_processor_get_throttling = |
1205 | &acpi_processor_get_throttling_fadt; | |
1206 | pr->throttling.acpi_processor_set_throttling = | |
1207 | &acpi_processor_set_throttling_fadt; | |
d1154be3 AS |
1208 | if (acpi_processor_get_fadt_info(pr)) |
1209 | return 0; | |
01854e69 | 1210 | } else { |
ff55a9ce LB |
1211 | pr->throttling.acpi_processor_get_throttling = |
1212 | &acpi_processor_get_throttling_ptc; | |
1213 | pr->throttling.acpi_processor_set_throttling = | |
1214 | &acpi_processor_set_throttling_ptc; | |
01854e69 | 1215 | } |
1da177e4 | 1216 | |
1180509f ZY |
1217 | /* |
1218 | * If TSD package for one CPU can't be parsed successfully, it means | |
1219 | * that this CPU will have no coordination with other CPUs. | |
1220 | */ | |
1221 | if (acpi_processor_get_tsd(pr)) { | |
1222 | pthrottling = &pr->throttling; | |
1223 | pthrottling->tsd_valid_flag = 0; | |
2fdf66b4 | 1224 | cpumask_set_cpu(pr->id, pthrottling->shared_cpu_map); |
1180509f ZY |
1225 | pthrottling->shared_type = DOMAIN_COORD_TYPE_SW_ALL; |
1226 | } | |
c30c620e | 1227 | |
1da177e4 LT |
1228 | /* |
1229 | * PIIX4 Errata: We don't support throttling on the original PIIX4. | |
1230 | * This shouldn't be an issue as few (if any) mobile systems ever | |
1231 | * used this part. | |
1232 | */ | |
1233 | if (errata.piix4.throttle) { | |
1234 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, | |
4be44fcd | 1235 | "Throttling not supported on PIIX4 A- or B-step\n")); |
d550d98d | 1236 | return 0; |
1da177e4 LT |
1237 | } |
1238 | ||
1da177e4 | 1239 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found %d throttling states\n", |
4be44fcd | 1240 | pr->throttling.state_count)); |
1da177e4 LT |
1241 | |
1242 | pr->flags.throttling = 1; | |
1243 | ||
1244 | /* | |
1245 | * Disable throttling (if enabled). We'll let subsequent policy (e.g. | |
1246 | * thermal) decide to lower performance if it so chooses, but for now | |
1247 | * we'll crank up the speed. | |
1248 | */ | |
1249 | ||
1250 | result = acpi_processor_get_throttling(pr); | |
1251 | if (result) | |
1252 | goto end; | |
1253 | ||
1254 | if (pr->throttling.state) { | |
4be44fcd LB |
1255 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, |
1256 | "Disabling throttling (was T%d)\n", | |
1257 | pr->throttling.state)); | |
2a908002 | 1258 | result = acpi_processor_set_throttling(pr, 0, false); |
1da177e4 LT |
1259 | if (result) |
1260 | goto end; | |
1261 | } | |
1262 | ||
4be44fcd | 1263 | end: |
1da177e4 LT |
1264 | if (result) |
1265 | pr->flags.throttling = 0; | |
1266 | ||
d550d98d | 1267 | return result; |
1da177e4 LT |
1268 | } |
1269 |