]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - arch/x86/kernel/process.c
x86: prevent C-states hang on AMD C1E enabled machines
[mirror_ubuntu-bionic-kernel.git] / arch / x86 / kernel / process.c
1 #include <linux/errno.h>
2 #include <linux/kernel.h>
3 #include <linux/mm.h>
4 #include <linux/smp.h>
5 #include <linux/slab.h>
6 #include <linux/sched.h>
7 #include <linux/module.h>
8 #include <linux/pm.h>
9 #include <linux/clockchips.h>
10 #include <asm/system.h>
11
12 unsigned long idle_halt;
13 EXPORT_SYMBOL(idle_halt);
14 unsigned long idle_nomwait;
15 EXPORT_SYMBOL(idle_nomwait);
16
17 struct kmem_cache *task_xstate_cachep;
18 static int force_mwait __cpuinitdata;
19
20 int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
21 {
22 *dst = *src;
23 if (src->thread.xstate) {
24 dst->thread.xstate = kmem_cache_alloc(task_xstate_cachep,
25 GFP_KERNEL);
26 if (!dst->thread.xstate)
27 return -ENOMEM;
28 WARN_ON((unsigned long)dst->thread.xstate & 15);
29 memcpy(dst->thread.xstate, src->thread.xstate, xstate_size);
30 }
31 return 0;
32 }
33
34 void free_thread_xstate(struct task_struct *tsk)
35 {
36 if (tsk->thread.xstate) {
37 kmem_cache_free(task_xstate_cachep, tsk->thread.xstate);
38 tsk->thread.xstate = NULL;
39 }
40 }
41
42 void free_thread_info(struct thread_info *ti)
43 {
44 free_thread_xstate(ti->task);
45 free_pages((unsigned long)ti, get_order(THREAD_SIZE));
46 }
47
48 void arch_task_cache_init(void)
49 {
50 task_xstate_cachep =
51 kmem_cache_create("task_xstate", xstate_size,
52 __alignof__(union thread_xstate),
53 SLAB_PANIC, NULL);
54 }
55
56 /*
57 * Idle related variables and functions
58 */
59 unsigned long boot_option_idle_override = 0;
60 EXPORT_SYMBOL(boot_option_idle_override);
61
62 /*
63 * Powermanagement idle function, if any..
64 */
65 void (*pm_idle)(void);
66 EXPORT_SYMBOL(pm_idle);
67
68 #ifdef CONFIG_X86_32
69 /*
70 * This halt magic was a workaround for ancient floppy DMA
71 * wreckage. It should be safe to remove.
72 */
73 static int hlt_counter;
74 void disable_hlt(void)
75 {
76 hlt_counter++;
77 }
78 EXPORT_SYMBOL(disable_hlt);
79
80 void enable_hlt(void)
81 {
82 hlt_counter--;
83 }
84 EXPORT_SYMBOL(enable_hlt);
85
86 static inline int hlt_use_halt(void)
87 {
88 return (!hlt_counter && boot_cpu_data.hlt_works_ok);
89 }
90 #else
91 static inline int hlt_use_halt(void)
92 {
93 return 1;
94 }
95 #endif
96
97 /*
98 * We use this if we don't have any better
99 * idle routine..
100 */
101 void default_idle(void)
102 {
103 if (hlt_use_halt()) {
104 current_thread_info()->status &= ~TS_POLLING;
105 /*
106 * TS_POLLING-cleared state must be visible before we
107 * test NEED_RESCHED:
108 */
109 smp_mb();
110
111 if (!need_resched())
112 safe_halt(); /* enables interrupts racelessly */
113 else
114 local_irq_enable();
115 current_thread_info()->status |= TS_POLLING;
116 } else {
117 local_irq_enable();
118 /* loop is done by the caller */
119 cpu_relax();
120 }
121 }
122 #ifdef CONFIG_APM_MODULE
123 EXPORT_SYMBOL(default_idle);
124 #endif
125
126 static void do_nothing(void *unused)
127 {
128 }
129
130 /*
131 * cpu_idle_wait - Used to ensure that all the CPUs discard old value of
132 * pm_idle and update to new pm_idle value. Required while changing pm_idle
133 * handler on SMP systems.
134 *
135 * Caller must have changed pm_idle to the new value before the call. Old
136 * pm_idle value will not be used by any CPU after the return of this function.
137 */
138 void cpu_idle_wait(void)
139 {
140 smp_mb();
141 /* kick all the CPUs so that they exit out of pm_idle */
142 smp_call_function(do_nothing, NULL, 1);
143 }
144 EXPORT_SYMBOL_GPL(cpu_idle_wait);
145
146 /*
147 * This uses new MONITOR/MWAIT instructions on P4 processors with PNI,
148 * which can obviate IPI to trigger checking of need_resched.
149 * We execute MONITOR against need_resched and enter optimized wait state
150 * through MWAIT. Whenever someone changes need_resched, we would be woken
151 * up from MWAIT (without an IPI).
152 *
153 * New with Core Duo processors, MWAIT can take some hints based on CPU
154 * capability.
155 */
156 void mwait_idle_with_hints(unsigned long ax, unsigned long cx)
157 {
158 if (!need_resched()) {
159 __monitor((void *)&current_thread_info()->flags, 0, 0);
160 smp_mb();
161 if (!need_resched())
162 __mwait(ax, cx);
163 }
164 }
165
166 /* Default MONITOR/MWAIT with no hints, used for default C1 state */
167 static void mwait_idle(void)
168 {
169 if (!need_resched()) {
170 __monitor((void *)&current_thread_info()->flags, 0, 0);
171 smp_mb();
172 if (!need_resched())
173 __sti_mwait(0, 0);
174 else
175 local_irq_enable();
176 } else
177 local_irq_enable();
178 }
179
180 /*
181 * On SMP it's slightly faster (but much more power-consuming!)
182 * to poll the ->work.need_resched flag instead of waiting for the
183 * cross-CPU IPI to arrive. Use this option with caution.
184 */
185 static void poll_idle(void)
186 {
187 local_irq_enable();
188 cpu_relax();
189 }
190
191 /*
192 * mwait selection logic:
193 *
194 * It depends on the CPU. For AMD CPUs that support MWAIT this is
195 * wrong. Family 0x10 and 0x11 CPUs will enter C1 on HLT. Powersavings
196 * then depend on a clock divisor and current Pstate of the core. If
197 * all cores of a processor are in halt state (C1) the processor can
198 * enter the C1E (C1 enhanced) state. If mwait is used this will never
199 * happen.
200 *
201 * idle=mwait overrides this decision and forces the usage of mwait.
202 */
203 static int __cpuinitdata force_mwait;
204
205 #define MWAIT_INFO 0x05
206 #define MWAIT_ECX_EXTENDED_INFO 0x01
207 #define MWAIT_EDX_C1 0xf0
208
209 static int __cpuinit mwait_usable(const struct cpuinfo_x86 *c)
210 {
211 u32 eax, ebx, ecx, edx;
212
213 if (force_mwait)
214 return 1;
215
216 if (c->cpuid_level < MWAIT_INFO)
217 return 0;
218
219 cpuid(MWAIT_INFO, &eax, &ebx, &ecx, &edx);
220 /* Check, whether EDX has extended info about MWAIT */
221 if (!(ecx & MWAIT_ECX_EXTENDED_INFO))
222 return 1;
223
224 /*
225 * edx enumeratios MONITOR/MWAIT extensions. Check, whether
226 * C1 supports MWAIT
227 */
228 return (edx & MWAIT_EDX_C1);
229 }
230
231 /*
232 * Check for AMD CPUs, which have potentially C1E support
233 */
234 static int __cpuinit check_c1e_idle(const struct cpuinfo_x86 *c)
235 {
236 if (c->x86_vendor != X86_VENDOR_AMD)
237 return 0;
238
239 if (c->x86 < 0x0F)
240 return 0;
241
242 /* Family 0x0f models < rev F do not have C1E */
243 if (c->x86 == 0x0f && c->x86_model < 0x40)
244 return 0;
245
246 return 1;
247 }
248
249 static cpumask_t c1e_mask = CPU_MASK_NONE;
250 static int c1e_detected;
251
252 void c1e_remove_cpu(int cpu)
253 {
254 cpu_clear(cpu, c1e_mask);
255 }
256
257 /*
258 * C1E aware idle routine. We check for C1E active in the interrupt
259 * pending message MSR. If we detect C1E, then we handle it the same
260 * way as C3 power states (local apic timer and TSC stop)
261 */
262 static void c1e_idle(void)
263 {
264 if (need_resched())
265 return;
266
267 if (!c1e_detected) {
268 u32 lo, hi;
269
270 rdmsr(MSR_K8_INT_PENDING_MSG, lo, hi);
271 if (lo & K8_INTP_C1E_ACTIVE_MASK) {
272 c1e_detected = 1;
273 mark_tsc_unstable("TSC halt in C1E");
274 printk(KERN_INFO "System has C1E enabled\n");
275 set_cpu_cap(&boot_cpu_data, X86_FEATURE_AMDC1E);
276 }
277 }
278
279 if (c1e_detected) {
280 int cpu = smp_processor_id();
281
282 if (!cpu_isset(cpu, c1e_mask)) {
283 cpu_set(cpu, c1e_mask);
284 /*
285 * Force broadcast so ACPI can not interfere. Needs
286 * to run with interrupts enabled as it uses
287 * smp_function_call.
288 */
289 local_irq_enable();
290 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_FORCE,
291 &cpu);
292 printk(KERN_INFO "Switch to broadcast mode on CPU%d\n",
293 cpu);
294 local_irq_disable();
295 }
296 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu);
297
298 default_idle();
299
300 /*
301 * The switch back from broadcast mode needs to be
302 * called with interrupts disabled.
303 */
304 local_irq_disable();
305 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu);
306 local_irq_enable();
307 } else
308 default_idle();
309 }
310
311 void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c)
312 {
313 #ifdef CONFIG_X86_SMP
314 if (pm_idle == poll_idle && smp_num_siblings > 1) {
315 printk(KERN_WARNING "WARNING: polling idle and HT enabled,"
316 " performance may degrade.\n");
317 }
318 #endif
319 if (pm_idle)
320 return;
321
322 if (cpu_has(c, X86_FEATURE_MWAIT) && mwait_usable(c)) {
323 /*
324 * One CPU supports mwait => All CPUs supports mwait
325 */
326 printk(KERN_INFO "using mwait in idle threads.\n");
327 pm_idle = mwait_idle;
328 } else if (check_c1e_idle(c)) {
329 printk(KERN_INFO "using C1E aware idle routine\n");
330 pm_idle = c1e_idle;
331 } else
332 pm_idle = default_idle;
333 }
334
335 static int __init idle_setup(char *str)
336 {
337 if (!str)
338 return -EINVAL;
339
340 if (!strcmp(str, "poll")) {
341 printk("using polling idle threads.\n");
342 pm_idle = poll_idle;
343 } else if (!strcmp(str, "mwait"))
344 force_mwait = 1;
345 else if (!strcmp(str, "halt")) {
346 /*
347 * When the boot option of idle=halt is added, halt is
348 * forced to be used for CPU idle. In such case CPU C2/C3
349 * won't be used again.
350 * To continue to load the CPU idle driver, don't touch
351 * the boot_option_idle_override.
352 */
353 pm_idle = default_idle;
354 idle_halt = 1;
355 return 0;
356 } else if (!strcmp(str, "nomwait")) {
357 /*
358 * If the boot option of "idle=nomwait" is added,
359 * it means that mwait will be disabled for CPU C2/C3
360 * states. In such case it won't touch the variable
361 * of boot_option_idle_override.
362 */
363 idle_nomwait = 1;
364 return 0;
365 } else
366 return -1;
367
368 boot_option_idle_override = 1;
369 return 0;
370 }
371 early_param("idle", idle_setup);
372