]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - arch/x86/kernel/cpu/mcheck/therm_throt.c
thermal: cpu_cooling: fix stub function
[mirror_ubuntu-artful-kernel.git] / arch / x86 / kernel / cpu / mcheck / therm_throt.c
1 /*
2 * Thermal throttle event support code (such as syslog messaging and rate
3 * limiting) that was factored out from x86_64 (mce_intel.c) and i386 (p4.c).
4 *
5 * This allows consistent reporting of CPU thermal throttle events.
6 *
7 * Maintains a counter in /sys that keeps track of the number of thermal
8 * events, such that the user knows how bad the thermal problem might be
9 * (since the logging to syslog and mcelog is rate limited).
10 *
11 * Author: Dmitriy Zavin (dmitriyz@google.com)
12 *
13 * Credits: Adapted from Zwane Mwaikambo's original code in mce_intel.c.
14 * Inspired by Ross Biro's and Al Borchers' counter code.
15 */
16 #include <linux/interrupt.h>
17 #include <linux/notifier.h>
18 #include <linux/jiffies.h>
19 #include <linux/kernel.h>
20 #include <linux/percpu.h>
21 #include <linux/export.h>
22 #include <linux/types.h>
23 #include <linux/init.h>
24 #include <linux/smp.h>
25 #include <linux/cpu.h>
26
27 #include <asm/processor.h>
28 #include <asm/apic.h>
29 #include <asm/idle.h>
30 #include <asm/mce.h>
31 #include <asm/msr.h>
32
33 /* How long to wait between reporting thermal events */
34 #define CHECK_INTERVAL (300 * HZ)
35
36 #define THERMAL_THROTTLING_EVENT 0
37 #define POWER_LIMIT_EVENT 1
38
39 /*
40 * Current thermal event state:
41 */
42 struct _thermal_state {
43 bool new_event;
44 int event;
45 u64 next_check;
46 unsigned long count;
47 unsigned long last_count;
48 };
49
50 struct thermal_state {
51 struct _thermal_state core_throttle;
52 struct _thermal_state core_power_limit;
53 struct _thermal_state package_throttle;
54 struct _thermal_state package_power_limit;
55 struct _thermal_state core_thresh0;
56 struct _thermal_state core_thresh1;
57 struct _thermal_state pkg_thresh0;
58 struct _thermal_state pkg_thresh1;
59 };
60
61 /* Callback to handle core threshold interrupts */
62 int (*platform_thermal_notify)(__u64 msr_val);
63 EXPORT_SYMBOL(platform_thermal_notify);
64
65 /* Callback to handle core package threshold_interrupts */
66 int (*platform_thermal_package_notify)(__u64 msr_val);
67 EXPORT_SYMBOL_GPL(platform_thermal_package_notify);
68
69 /* Callback support of rate control, return true, if
70 * callback has rate control */
71 bool (*platform_thermal_package_rate_control)(void);
72 EXPORT_SYMBOL_GPL(platform_thermal_package_rate_control);
73
74
75 static DEFINE_PER_CPU(struct thermal_state, thermal_state);
76
77 static atomic_t therm_throt_en = ATOMIC_INIT(0);
78
79 static u32 lvtthmr_init __read_mostly;
80
81 #ifdef CONFIG_SYSFS
82 #define define_therm_throt_device_one_ro(_name) \
83 static DEVICE_ATTR(_name, 0444, \
84 therm_throt_device_show_##_name, \
85 NULL) \
86
87 #define define_therm_throt_device_show_func(event, name) \
88 \
89 static ssize_t therm_throt_device_show_##event##_##name( \
90 struct device *dev, \
91 struct device_attribute *attr, \
92 char *buf) \
93 { \
94 unsigned int cpu = dev->id; \
95 ssize_t ret; \
96 \
97 preempt_disable(); /* CPU hotplug */ \
98 if (cpu_online(cpu)) { \
99 ret = sprintf(buf, "%lu\n", \
100 per_cpu(thermal_state, cpu).event.name); \
101 } else \
102 ret = 0; \
103 preempt_enable(); \
104 \
105 return ret; \
106 }
107
108 define_therm_throt_device_show_func(core_throttle, count);
109 define_therm_throt_device_one_ro(core_throttle_count);
110
111 define_therm_throt_device_show_func(core_power_limit, count);
112 define_therm_throt_device_one_ro(core_power_limit_count);
113
114 define_therm_throt_device_show_func(package_throttle, count);
115 define_therm_throt_device_one_ro(package_throttle_count);
116
117 define_therm_throt_device_show_func(package_power_limit, count);
118 define_therm_throt_device_one_ro(package_power_limit_count);
119
120 static struct attribute *thermal_throttle_attrs[] = {
121 &dev_attr_core_throttle_count.attr,
122 NULL
123 };
124
125 static struct attribute_group thermal_attr_group = {
126 .attrs = thermal_throttle_attrs,
127 .name = "thermal_throttle"
128 };
129 #endif /* CONFIG_SYSFS */
130
131 #define CORE_LEVEL 0
132 #define PACKAGE_LEVEL 1
133
134 /***
135 * therm_throt_process - Process thermal throttling event from interrupt
136 * @curr: Whether the condition is current or not (boolean), since the
137 * thermal interrupt normally gets called both when the thermal
138 * event begins and once the event has ended.
139 *
140 * This function is called by the thermal interrupt after the
141 * IRQ has been acknowledged.
142 *
143 * It will take care of rate limiting and printing messages to the syslog.
144 *
145 * Returns: 0 : Event should NOT be further logged, i.e. still in
146 * "timeout" from previous log message.
147 * 1 : Event should be logged further, and a message has been
148 * printed to the syslog.
149 */
150 static int therm_throt_process(bool new_event, int event, int level)
151 {
152 struct _thermal_state *state;
153 unsigned int this_cpu = smp_processor_id();
154 bool old_event;
155 u64 now;
156 struct thermal_state *pstate = &per_cpu(thermal_state, this_cpu);
157
158 now = get_jiffies_64();
159 if (level == CORE_LEVEL) {
160 if (event == THERMAL_THROTTLING_EVENT)
161 state = &pstate->core_throttle;
162 else if (event == POWER_LIMIT_EVENT)
163 state = &pstate->core_power_limit;
164 else
165 return 0;
166 } else if (level == PACKAGE_LEVEL) {
167 if (event == THERMAL_THROTTLING_EVENT)
168 state = &pstate->package_throttle;
169 else if (event == POWER_LIMIT_EVENT)
170 state = &pstate->package_power_limit;
171 else
172 return 0;
173 } else
174 return 0;
175
176 old_event = state->new_event;
177 state->new_event = new_event;
178
179 if (new_event)
180 state->count++;
181
182 if (time_before64(now, state->next_check) &&
183 state->count != state->last_count)
184 return 0;
185
186 state->next_check = now + CHECK_INTERVAL;
187 state->last_count = state->count;
188
189 /* if we just entered the thermal event */
190 if (new_event) {
191 if (event == THERMAL_THROTTLING_EVENT)
192 printk(KERN_CRIT "CPU%d: %s temperature above threshold, cpu clock throttled (total events = %lu)\n",
193 this_cpu,
194 level == CORE_LEVEL ? "Core" : "Package",
195 state->count);
196 else
197 printk(KERN_CRIT "CPU%d: %s power limit notification (total events = %lu)\n",
198 this_cpu,
199 level == CORE_LEVEL ? "Core" : "Package",
200 state->count);
201 return 1;
202 }
203 if (old_event) {
204 if (event == THERMAL_THROTTLING_EVENT)
205 printk(KERN_INFO "CPU%d: %s temperature/speed normal\n",
206 this_cpu,
207 level == CORE_LEVEL ? "Core" : "Package");
208 else
209 printk(KERN_INFO "CPU%d: %s power limit normal\n",
210 this_cpu,
211 level == CORE_LEVEL ? "Core" : "Package");
212 return 1;
213 }
214
215 return 0;
216 }
217
218 static int thresh_event_valid(int level, int event)
219 {
220 struct _thermal_state *state;
221 unsigned int this_cpu = smp_processor_id();
222 struct thermal_state *pstate = &per_cpu(thermal_state, this_cpu);
223 u64 now = get_jiffies_64();
224
225 if (level == PACKAGE_LEVEL)
226 state = (event == 0) ? &pstate->pkg_thresh0 :
227 &pstate->pkg_thresh1;
228 else
229 state = (event == 0) ? &pstate->core_thresh0 :
230 &pstate->core_thresh1;
231
232 if (time_before64(now, state->next_check))
233 return 0;
234
235 state->next_check = now + CHECK_INTERVAL;
236
237 return 1;
238 }
239
240 #ifdef CONFIG_SYSFS
241 /* Add/Remove thermal_throttle interface for CPU device: */
242 static __cpuinit int thermal_throttle_add_dev(struct device *dev,
243 unsigned int cpu)
244 {
245 int err;
246 struct cpuinfo_x86 *c = &cpu_data(cpu);
247
248 err = sysfs_create_group(&dev->kobj, &thermal_attr_group);
249 if (err)
250 return err;
251
252 if (cpu_has(c, X86_FEATURE_PLN))
253 err = sysfs_add_file_to_group(&dev->kobj,
254 &dev_attr_core_power_limit_count.attr,
255 thermal_attr_group.name);
256 if (cpu_has(c, X86_FEATURE_PTS)) {
257 err = sysfs_add_file_to_group(&dev->kobj,
258 &dev_attr_package_throttle_count.attr,
259 thermal_attr_group.name);
260 if (cpu_has(c, X86_FEATURE_PLN))
261 err = sysfs_add_file_to_group(&dev->kobj,
262 &dev_attr_package_power_limit_count.attr,
263 thermal_attr_group.name);
264 }
265
266 return err;
267 }
268
269 static __cpuinit void thermal_throttle_remove_dev(struct device *dev)
270 {
271 sysfs_remove_group(&dev->kobj, &thermal_attr_group);
272 }
273
274 /* Mutex protecting device creation against CPU hotplug: */
275 static DEFINE_MUTEX(therm_cpu_lock);
276
277 /* Get notified when a cpu comes on/off. Be hotplug friendly. */
278 static __cpuinit int
279 thermal_throttle_cpu_callback(struct notifier_block *nfb,
280 unsigned long action,
281 void *hcpu)
282 {
283 unsigned int cpu = (unsigned long)hcpu;
284 struct device *dev;
285 int err = 0;
286
287 dev = get_cpu_device(cpu);
288
289 switch (action) {
290 case CPU_UP_PREPARE:
291 case CPU_UP_PREPARE_FROZEN:
292 mutex_lock(&therm_cpu_lock);
293 err = thermal_throttle_add_dev(dev, cpu);
294 mutex_unlock(&therm_cpu_lock);
295 WARN_ON(err);
296 break;
297 case CPU_UP_CANCELED:
298 case CPU_UP_CANCELED_FROZEN:
299 case CPU_DEAD:
300 case CPU_DEAD_FROZEN:
301 mutex_lock(&therm_cpu_lock);
302 thermal_throttle_remove_dev(dev);
303 mutex_unlock(&therm_cpu_lock);
304 break;
305 }
306 return notifier_from_errno(err);
307 }
308
309 static struct notifier_block thermal_throttle_cpu_notifier __cpuinitdata =
310 {
311 .notifier_call = thermal_throttle_cpu_callback,
312 };
313
314 static __init int thermal_throttle_init_device(void)
315 {
316 unsigned int cpu = 0;
317 int err;
318
319 if (!atomic_read(&therm_throt_en))
320 return 0;
321
322 register_hotcpu_notifier(&thermal_throttle_cpu_notifier);
323
324 #ifdef CONFIG_HOTPLUG_CPU
325 mutex_lock(&therm_cpu_lock);
326 #endif
327 /* connect live CPUs to sysfs */
328 for_each_online_cpu(cpu) {
329 err = thermal_throttle_add_dev(get_cpu_device(cpu), cpu);
330 WARN_ON(err);
331 }
332 #ifdef CONFIG_HOTPLUG_CPU
333 mutex_unlock(&therm_cpu_lock);
334 #endif
335
336 return 0;
337 }
338 device_initcall(thermal_throttle_init_device);
339
340 #endif /* CONFIG_SYSFS */
341
342 static void notify_package_thresholds(__u64 msr_val)
343 {
344 bool notify_thres_0 = false;
345 bool notify_thres_1 = false;
346
347 if (!platform_thermal_package_notify)
348 return;
349
350 /* lower threshold check */
351 if (msr_val & THERM_LOG_THRESHOLD0)
352 notify_thres_0 = true;
353 /* higher threshold check */
354 if (msr_val & THERM_LOG_THRESHOLD1)
355 notify_thres_1 = true;
356
357 if (!notify_thres_0 && !notify_thres_1)
358 return;
359
360 if (platform_thermal_package_rate_control &&
361 platform_thermal_package_rate_control()) {
362 /* Rate control is implemented in callback */
363 platform_thermal_package_notify(msr_val);
364 return;
365 }
366
367 /* lower threshold reached */
368 if (notify_thres_0 && thresh_event_valid(PACKAGE_LEVEL, 0))
369 platform_thermal_package_notify(msr_val);
370 /* higher threshold reached */
371 if (notify_thres_1 && thresh_event_valid(PACKAGE_LEVEL, 1))
372 platform_thermal_package_notify(msr_val);
373 }
374
375 static void notify_thresholds(__u64 msr_val)
376 {
377 /* check whether the interrupt handler is defined;
378 * otherwise simply return
379 */
380 if (!platform_thermal_notify)
381 return;
382
383 /* lower threshold reached */
384 if ((msr_val & THERM_LOG_THRESHOLD0) &&
385 thresh_event_valid(CORE_LEVEL, 0))
386 platform_thermal_notify(msr_val);
387 /* higher threshold reached */
388 if ((msr_val & THERM_LOG_THRESHOLD1) &&
389 thresh_event_valid(CORE_LEVEL, 1))
390 platform_thermal_notify(msr_val);
391 }
392
393 /* Thermal transition interrupt handler */
394 static void intel_thermal_interrupt(void)
395 {
396 __u64 msr_val;
397
398 rdmsrl(MSR_IA32_THERM_STATUS, msr_val);
399
400 /* Check for violation of core thermal thresholds*/
401 notify_thresholds(msr_val);
402
403 if (therm_throt_process(msr_val & THERM_STATUS_PROCHOT,
404 THERMAL_THROTTLING_EVENT,
405 CORE_LEVEL) != 0)
406 mce_log_therm_throt_event(msr_val);
407
408 if (this_cpu_has(X86_FEATURE_PLN))
409 therm_throt_process(msr_val & THERM_STATUS_POWER_LIMIT,
410 POWER_LIMIT_EVENT,
411 CORE_LEVEL);
412
413 if (this_cpu_has(X86_FEATURE_PTS)) {
414 rdmsrl(MSR_IA32_PACKAGE_THERM_STATUS, msr_val);
415 /* check violations of package thermal thresholds */
416 notify_package_thresholds(msr_val);
417 therm_throt_process(msr_val & PACKAGE_THERM_STATUS_PROCHOT,
418 THERMAL_THROTTLING_EVENT,
419 PACKAGE_LEVEL);
420 if (this_cpu_has(X86_FEATURE_PLN))
421 therm_throt_process(msr_val &
422 PACKAGE_THERM_STATUS_POWER_LIMIT,
423 POWER_LIMIT_EVENT,
424 PACKAGE_LEVEL);
425 }
426 }
427
428 static void unexpected_thermal_interrupt(void)
429 {
430 printk(KERN_ERR "CPU%d: Unexpected LVT thermal interrupt!\n",
431 smp_processor_id());
432 }
433
434 static void (*smp_thermal_vector)(void) = unexpected_thermal_interrupt;
435
436 asmlinkage void smp_thermal_interrupt(struct pt_regs *regs)
437 {
438 irq_enter();
439 exit_idle();
440 inc_irq_stat(irq_thermal_count);
441 smp_thermal_vector();
442 irq_exit();
443 /* Ack only at the end to avoid potential reentry */
444 ack_APIC_irq();
445 }
446
447 /* Thermal monitoring depends on APIC, ACPI and clock modulation */
448 static int intel_thermal_supported(struct cpuinfo_x86 *c)
449 {
450 if (!cpu_has_apic)
451 return 0;
452 if (!cpu_has(c, X86_FEATURE_ACPI) || !cpu_has(c, X86_FEATURE_ACC))
453 return 0;
454 return 1;
455 }
456
457 void __init mcheck_intel_therm_init(void)
458 {
459 /*
460 * This function is only called on boot CPU. Save the init thermal
461 * LVT value on BSP and use that value to restore APs' thermal LVT
462 * entry BIOS programmed later
463 */
464 if (intel_thermal_supported(&boot_cpu_data))
465 lvtthmr_init = apic_read(APIC_LVTTHMR);
466 }
467
468 void intel_init_thermal(struct cpuinfo_x86 *c)
469 {
470 unsigned int cpu = smp_processor_id();
471 int tm2 = 0;
472 u32 l, h;
473
474 if (!intel_thermal_supported(c))
475 return;
476
477 /*
478 * First check if its enabled already, in which case there might
479 * be some SMM goo which handles it, so we can't even put a handler
480 * since it might be delivered via SMI already:
481 */
482 rdmsr(MSR_IA32_MISC_ENABLE, l, h);
483
484 h = lvtthmr_init;
485 /*
486 * The initial value of thermal LVT entries on all APs always reads
487 * 0x10000 because APs are woken up by BSP issuing INIT-SIPI-SIPI
488 * sequence to them and LVT registers are reset to 0s except for
489 * the mask bits which are set to 1s when APs receive INIT IPI.
490 * If BIOS takes over the thermal interrupt and sets its interrupt
491 * delivery mode to SMI (not fixed), it restores the value that the
492 * BIOS has programmed on AP based on BSP's info we saved since BIOS
493 * is always setting the same value for all threads/cores.
494 */
495 if ((h & APIC_DM_FIXED_MASK) != APIC_DM_FIXED)
496 apic_write(APIC_LVTTHMR, lvtthmr_init);
497
498
499 if ((l & MSR_IA32_MISC_ENABLE_TM1) && (h & APIC_DM_SMI)) {
500 printk(KERN_DEBUG
501 "CPU%d: Thermal monitoring handled by SMI\n", cpu);
502 return;
503 }
504
505 /* Check whether a vector already exists */
506 if (h & APIC_VECTOR_MASK) {
507 printk(KERN_DEBUG
508 "CPU%d: Thermal LVT vector (%#x) already installed\n",
509 cpu, (h & APIC_VECTOR_MASK));
510 return;
511 }
512
513 /* early Pentium M models use different method for enabling TM2 */
514 if (cpu_has(c, X86_FEATURE_TM2)) {
515 if (c->x86 == 6 && (c->x86_model == 9 || c->x86_model == 13)) {
516 rdmsr(MSR_THERM2_CTL, l, h);
517 if (l & MSR_THERM2_CTL_TM_SELECT)
518 tm2 = 1;
519 } else if (l & MSR_IA32_MISC_ENABLE_TM2)
520 tm2 = 1;
521 }
522
523 /* We'll mask the thermal vector in the lapic till we're ready: */
524 h = THERMAL_APIC_VECTOR | APIC_DM_FIXED | APIC_LVT_MASKED;
525 apic_write(APIC_LVTTHMR, h);
526
527 rdmsr(MSR_IA32_THERM_INTERRUPT, l, h);
528 if (cpu_has(c, X86_FEATURE_PLN))
529 wrmsr(MSR_IA32_THERM_INTERRUPT,
530 l | (THERM_INT_LOW_ENABLE
531 | THERM_INT_HIGH_ENABLE | THERM_INT_PLN_ENABLE), h);
532 else
533 wrmsr(MSR_IA32_THERM_INTERRUPT,
534 l | (THERM_INT_LOW_ENABLE | THERM_INT_HIGH_ENABLE), h);
535
536 if (cpu_has(c, X86_FEATURE_PTS)) {
537 rdmsr(MSR_IA32_PACKAGE_THERM_INTERRUPT, l, h);
538 if (cpu_has(c, X86_FEATURE_PLN))
539 wrmsr(MSR_IA32_PACKAGE_THERM_INTERRUPT,
540 l | (PACKAGE_THERM_INT_LOW_ENABLE
541 | PACKAGE_THERM_INT_HIGH_ENABLE
542 | PACKAGE_THERM_INT_PLN_ENABLE), h);
543 else
544 wrmsr(MSR_IA32_PACKAGE_THERM_INTERRUPT,
545 l | (PACKAGE_THERM_INT_LOW_ENABLE
546 | PACKAGE_THERM_INT_HIGH_ENABLE), h);
547 }
548
549 smp_thermal_vector = intel_thermal_interrupt;
550
551 rdmsr(MSR_IA32_MISC_ENABLE, l, h);
552 wrmsr(MSR_IA32_MISC_ENABLE, l | MSR_IA32_MISC_ENABLE_TM1, h);
553
554 /* Unmask the thermal vector: */
555 l = apic_read(APIC_LVTTHMR);
556 apic_write(APIC_LVTTHMR, l & ~APIC_LVT_MASKED);
557
558 printk_once(KERN_INFO "CPU0: Thermal monitoring enabled (%s)\n",
559 tm2 ? "TM2" : "TM1");
560
561 /* enable thermal throttle processing */
562 atomic_set(&therm_throt_en, 1);
563 }