1 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
3 #include <linux/errno.h>
4 #include <linux/kernel.h>
7 #include <linux/prctl.h>
8 #include <linux/slab.h>
9 #include <linux/sched.h>
10 #include <linux/module.h>
12 #include <linux/tick.h>
13 #include <linux/random.h>
14 #include <linux/user-return-notifier.h>
15 #include <linux/dmi.h>
16 #include <linux/utsname.h>
17 #include <linux/stackprotector.h>
18 #include <linux/tick.h>
19 #include <linux/cpuidle.h>
20 #include <trace/events/power.h>
21 #include <linux/hw_breakpoint.h>
24 #include <asm/syscalls.h>
26 #include <asm/uaccess.h>
27 #include <asm/mwait.h>
29 #include <asm/fpu-internal.h>
30 #include <asm/debugreg.h>
32 #include <asm/tlbflush.h>
35 * per-CPU TSS segments. Threads are completely 'soft' on Linux,
36 * no more per-task TSS's. The TSS size is kept cacheline-aligned
37 * so they are allowed to end up in the .data..cacheline_aligned
38 * section. Since TSS's are completely CPU-local, we want them
39 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
41 __visible
DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct
, cpu_tss
) = {
43 .sp0
= TOP_OF_INIT_STACK
,
47 .io_bitmap_base
= INVALID_IO_BITMAP_OFFSET
,
52 * Note that the .io_bitmap member must be extra-big. This is because
53 * the CPU will access an additional byte beyond the end of the IO
54 * permission bitmap. The extra byte must be all 1 bits, and must
55 * be within the limit.
57 .io_bitmap
= { [0 ... IO_BITMAP_LONGS
] = ~0 },
60 EXPORT_PER_CPU_SYMBOL(cpu_tss
);
63 static DEFINE_PER_CPU(unsigned char, is_idle
);
64 static ATOMIC_NOTIFIER_HEAD(idle_notifier
);
66 void idle_notifier_register(struct notifier_block
*n
)
68 atomic_notifier_chain_register(&idle_notifier
, n
);
70 EXPORT_SYMBOL_GPL(idle_notifier_register
);
72 void idle_notifier_unregister(struct notifier_block
*n
)
74 atomic_notifier_chain_unregister(&idle_notifier
, n
);
76 EXPORT_SYMBOL_GPL(idle_notifier_unregister
);
79 struct kmem_cache
*task_xstate_cachep
;
80 EXPORT_SYMBOL_GPL(task_xstate_cachep
);
83 * this gets called so that we can store lazy state into memory and copy the
84 * current task into the new thread.
86 int arch_dup_task_struct(struct task_struct
*dst
, struct task_struct
*src
)
90 dst
->thread
.fpu_counter
= 0;
91 dst
->thread
.fpu
.has_fpu
= 0;
92 dst
->thread
.fpu
.state
= NULL
;
93 task_disable_lazy_fpu_restore(dst
);
94 if (tsk_used_math(src
)) {
95 int err
= fpu_alloc(&dst
->thread
.fpu
);
103 void free_thread_xstate(struct task_struct
*tsk
)
105 fpu_free(&tsk
->thread
.fpu
);
108 void arch_release_task_struct(struct task_struct
*tsk
)
110 free_thread_xstate(tsk
);
113 void arch_task_cache_init(void)
116 kmem_cache_create("task_xstate", xstate_size
,
117 __alignof__(union thread_xstate
),
118 SLAB_PANIC
| SLAB_NOTRACK
, NULL
);
123 * Free current thread data structures etc..
125 void exit_thread(void)
127 struct task_struct
*me
= current
;
128 struct thread_struct
*t
= &me
->thread
;
129 unsigned long *bp
= t
->io_bitmap_ptr
;
132 struct tss_struct
*tss
= &per_cpu(cpu_tss
, get_cpu());
134 t
->io_bitmap_ptr
= NULL
;
135 clear_thread_flag(TIF_IO_BITMAP
);
137 * Careful, clear this in the TSS too:
139 memset(tss
->io_bitmap
, 0xff, t
->io_bitmap_max
);
140 t
->io_bitmap_max
= 0;
148 void flush_thread(void)
150 struct task_struct
*tsk
= current
;
152 flush_ptrace_hw_breakpoint(tsk
);
153 memset(tsk
->thread
.tls_array
, 0, sizeof(tsk
->thread
.tls_array
));
155 if (!use_eager_fpu()) {
156 /* FPU state will be reallocated lazily at the first use. */
158 free_thread_xstate(tsk
);
160 if (!tsk_used_math(tsk
)) {
161 /* kthread execs. TODO: cleanup this horror. */
162 if (WARN_ON(init_fpu(tsk
)))
163 force_sig(SIGKILL
, tsk
);
166 restore_init_xstate();
170 static void hard_disable_TSC(void)
172 cr4_set_bits(X86_CR4_TSD
);
175 void disable_TSC(void)
178 if (!test_and_set_thread_flag(TIF_NOTSC
))
180 * Must flip the CPU state synchronously with
181 * TIF_NOTSC in the current running context.
187 static void hard_enable_TSC(void)
189 cr4_clear_bits(X86_CR4_TSD
);
192 static void enable_TSC(void)
195 if (test_and_clear_thread_flag(TIF_NOTSC
))
197 * Must flip the CPU state synchronously with
198 * TIF_NOTSC in the current running context.
204 int get_tsc_mode(unsigned long adr
)
208 if (test_thread_flag(TIF_NOTSC
))
209 val
= PR_TSC_SIGSEGV
;
213 return put_user(val
, (unsigned int __user
*)adr
);
216 int set_tsc_mode(unsigned int val
)
218 if (val
== PR_TSC_SIGSEGV
)
220 else if (val
== PR_TSC_ENABLE
)
228 void __switch_to_xtra(struct task_struct
*prev_p
, struct task_struct
*next_p
,
229 struct tss_struct
*tss
)
231 struct thread_struct
*prev
, *next
;
233 prev
= &prev_p
->thread
;
234 next
= &next_p
->thread
;
236 if (test_tsk_thread_flag(prev_p
, TIF_BLOCKSTEP
) ^
237 test_tsk_thread_flag(next_p
, TIF_BLOCKSTEP
)) {
238 unsigned long debugctl
= get_debugctlmsr();
240 debugctl
&= ~DEBUGCTLMSR_BTF
;
241 if (test_tsk_thread_flag(next_p
, TIF_BLOCKSTEP
))
242 debugctl
|= DEBUGCTLMSR_BTF
;
244 update_debugctlmsr(debugctl
);
247 if (test_tsk_thread_flag(prev_p
, TIF_NOTSC
) ^
248 test_tsk_thread_flag(next_p
, TIF_NOTSC
)) {
249 /* prev and next are different */
250 if (test_tsk_thread_flag(next_p
, TIF_NOTSC
))
256 if (test_tsk_thread_flag(next_p
, TIF_IO_BITMAP
)) {
258 * Copy the relevant range of the IO bitmap.
259 * Normally this is 128 bytes or less:
261 memcpy(tss
->io_bitmap
, next
->io_bitmap_ptr
,
262 max(prev
->io_bitmap_max
, next
->io_bitmap_max
));
263 } else if (test_tsk_thread_flag(prev_p
, TIF_IO_BITMAP
)) {
265 * Clear any possible leftover bits:
267 memset(tss
->io_bitmap
, 0xff, prev
->io_bitmap_max
);
269 propagate_user_return_notify(prev_p
, next_p
);
273 * Idle related variables and functions
275 unsigned long boot_option_idle_override
= IDLE_NO_OVERRIDE
;
276 EXPORT_SYMBOL(boot_option_idle_override
);
278 static void (*x86_idle
)(void);
281 static inline void play_dead(void)
288 void enter_idle(void)
290 this_cpu_write(is_idle
, 1);
291 atomic_notifier_call_chain(&idle_notifier
, IDLE_START
, NULL
);
294 static void __exit_idle(void)
296 if (x86_test_and_clear_bit_percpu(0, is_idle
) == 0)
298 atomic_notifier_call_chain(&idle_notifier
, IDLE_END
, NULL
);
301 /* Called from interrupts to signify idle end */
304 /* idle loop has pid 0 */
311 void arch_cpu_idle_enter(void)
317 void arch_cpu_idle_exit(void)
322 void arch_cpu_idle_dead(void)
328 * Called from the generic idle code.
330 void arch_cpu_idle(void)
336 * We use this if we don't have any better idle routine..
338 void default_idle(void)
340 trace_cpu_idle_rcuidle(1, smp_processor_id());
342 trace_cpu_idle_rcuidle(PWR_EVENT_EXIT
, smp_processor_id());
344 #ifdef CONFIG_APM_MODULE
345 EXPORT_SYMBOL(default_idle
);
349 bool xen_set_default_idle(void)
351 bool ret
= !!x86_idle
;
353 x86_idle
= default_idle
;
358 void stop_this_cpu(void *dummy
)
364 set_cpu_online(smp_processor_id(), false);
365 disable_local_APIC();
371 bool amd_e400_c1e_detected
;
372 EXPORT_SYMBOL(amd_e400_c1e_detected
);
374 static cpumask_var_t amd_e400_c1e_mask
;
376 void amd_e400_remove_cpu(int cpu
)
378 if (amd_e400_c1e_mask
!= NULL
)
379 cpumask_clear_cpu(cpu
, amd_e400_c1e_mask
);
383 * AMD Erratum 400 aware idle routine. We check for C1E active in the interrupt
384 * pending message MSR. If we detect C1E, then we handle it the same
385 * way as C3 power states (local apic timer and TSC stop)
387 static void amd_e400_idle(void)
389 if (!amd_e400_c1e_detected
) {
392 rdmsr(MSR_K8_INT_PENDING_MSG
, lo
, hi
);
394 if (lo
& K8_INTP_C1E_ACTIVE_MASK
) {
395 amd_e400_c1e_detected
= true;
396 if (!boot_cpu_has(X86_FEATURE_NONSTOP_TSC
))
397 mark_tsc_unstable("TSC halt in AMD C1E");
398 pr_info("System has AMD C1E enabled\n");
402 if (amd_e400_c1e_detected
) {
403 int cpu
= smp_processor_id();
405 if (!cpumask_test_cpu(cpu
, amd_e400_c1e_mask
)) {
406 cpumask_set_cpu(cpu
, amd_e400_c1e_mask
);
407 /* Force broadcast so ACPI can not interfere. */
408 tick_broadcast_force();
409 pr_info("Switch to broadcast mode on CPU%d\n", cpu
);
411 tick_broadcast_enter();
416 * The switch back from broadcast mode needs to be
417 * called with interrupts disabled.
420 tick_broadcast_exit();
427 * Intel Core2 and older machines prefer MWAIT over HALT for C1.
428 * We can't rely on cpuidle installing MWAIT, because it will not load
429 * on systems that support only C1 -- so the boot default must be MWAIT.
431 * Some AMD machines are the opposite, they depend on using HALT.
433 * So for default C1, which is used during boot until cpuidle loads,
434 * use MWAIT-C1 on Intel HW that has it, else use HALT.
436 static int prefer_mwait_c1_over_halt(const struct cpuinfo_x86
*c
)
438 if (c
->x86_vendor
!= X86_VENDOR_INTEL
)
441 if (!cpu_has(c
, X86_FEATURE_MWAIT
))
448 * MONITOR/MWAIT with no hints, used for default default C1 state.
449 * This invokes MWAIT with interrutps enabled and no flags,
450 * which is backwards compatible with the original MWAIT implementation.
453 static void mwait_idle(void)
455 if (!current_set_polling_and_test()) {
456 if (this_cpu_has(X86_BUG_CLFLUSH_MONITOR
)) {
457 smp_mb(); /* quirk */
458 clflush((void *)¤t_thread_info()->flags
);
459 smp_mb(); /* quirk */
462 __monitor((void *)¤t_thread_info()->flags
, 0, 0);
470 __current_clr_polling();
473 void select_idle_routine(const struct cpuinfo_x86
*c
)
476 if (boot_option_idle_override
== IDLE_POLL
&& smp_num_siblings
> 1)
477 pr_warn_once("WARNING: polling idle and HT enabled, performance may degrade\n");
479 if (x86_idle
|| boot_option_idle_override
== IDLE_POLL
)
482 if (cpu_has_bug(c
, X86_BUG_AMD_APIC_C1E
)) {
483 /* E400: APIC timer interrupt does not wake up CPU from C1e */
484 pr_info("using AMD E400 aware idle routine\n");
485 x86_idle
= amd_e400_idle
;
486 } else if (prefer_mwait_c1_over_halt(c
)) {
487 pr_info("using mwait in idle threads\n");
488 x86_idle
= mwait_idle
;
490 x86_idle
= default_idle
;
493 void __init
init_amd_e400_c1e_mask(void)
495 /* If we're using amd_e400_idle, we need to allocate amd_e400_c1e_mask. */
496 if (x86_idle
== amd_e400_idle
)
497 zalloc_cpumask_var(&amd_e400_c1e_mask
, GFP_KERNEL
);
500 static int __init
idle_setup(char *str
)
505 if (!strcmp(str
, "poll")) {
506 pr_info("using polling idle threads\n");
507 boot_option_idle_override
= IDLE_POLL
;
508 cpu_idle_poll_ctrl(true);
509 } else if (!strcmp(str
, "halt")) {
511 * When the boot option of idle=halt is added, halt is
512 * forced to be used for CPU idle. In such case CPU C2/C3
513 * won't be used again.
514 * To continue to load the CPU idle driver, don't touch
515 * the boot_option_idle_override.
517 x86_idle
= default_idle
;
518 boot_option_idle_override
= IDLE_HALT
;
519 } else if (!strcmp(str
, "nomwait")) {
521 * If the boot option of "idle=nomwait" is added,
522 * it means that mwait will be disabled for CPU C2/C3
523 * states. In such case it won't touch the variable
524 * of boot_option_idle_override.
526 boot_option_idle_override
= IDLE_NOMWAIT
;
532 early_param("idle", idle_setup
);
534 unsigned long arch_align_stack(unsigned long sp
)
536 if (!(current
->personality
& ADDR_NO_RANDOMIZE
) && randomize_va_space
)
537 sp
-= get_random_int() % 8192;
541 unsigned long arch_randomize_brk(struct mm_struct
*mm
)
543 unsigned long range_end
= mm
->brk
+ 0x02000000;
544 return randomize_range(mm
->brk
, range_end
, 0) ? : mm
->brk
;