1 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
3 #include <linux/errno.h>
4 #include <linux/kernel.h>
7 #include <linux/prctl.h>
8 #include <linux/slab.h>
9 #include <linux/sched.h>
10 #include <linux/init.h>
11 #include <linux/export.h>
13 #include <linux/tick.h>
14 #include <linux/random.h>
15 #include <linux/user-return-notifier.h>
16 #include <linux/dmi.h>
17 #include <linux/utsname.h>
18 #include <linux/stackprotector.h>
19 #include <linux/tick.h>
20 #include <linux/cpuidle.h>
21 #include <trace/events/power.h>
22 #include <linux/hw_breakpoint.h>
25 #include <asm/syscalls.h>
27 #include <asm/uaccess.h>
28 #include <asm/mwait.h>
29 #include <asm/fpu/internal.h>
30 #include <asm/debugreg.h>
32 #include <asm/tlbflush.h>
35 #include <asm/switch_to.h>
38 * per-CPU TSS segments. Threads are completely 'soft' on Linux,
39 * no more per-task TSS's. The TSS size is kept cacheline-aligned
40 * so they are allowed to end up in the .data..cacheline_aligned
41 * section. Since TSS's are completely CPU-local, we want them
42 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
44 __visible
DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct
, cpu_tss
) = {
46 .sp0
= TOP_OF_INIT_STACK
,
50 .io_bitmap_base
= INVALID_IO_BITMAP_OFFSET
,
55 * Note that the .io_bitmap member must be extra-big. This is because
56 * the CPU will access an additional byte beyond the end of the IO
57 * permission bitmap. The extra byte must be all 1 bits, and must
58 * be within the limit.
60 .io_bitmap
= { [0 ... IO_BITMAP_LONGS
] = ~0 },
63 .SYSENTER_stack_canary
= STACK_END_MAGIC
,
66 EXPORT_PER_CPU_SYMBOL(cpu_tss
);
69 * this gets called so that we can store lazy state into memory and copy the
70 * current task into the new thread.
72 int arch_dup_task_struct(struct task_struct
*dst
, struct task_struct
*src
)
74 memcpy(dst
, src
, arch_task_struct_size
);
76 dst
->thread
.vm86
= NULL
;
79 return fpu__copy(&dst
->thread
.fpu
, &src
->thread
.fpu
);
83 * Free current thread data structures etc..
85 void exit_thread(struct task_struct
*tsk
)
87 struct thread_struct
*t
= &tsk
->thread
;
88 unsigned long *bp
= t
->io_bitmap_ptr
;
89 struct fpu
*fpu
= &t
->fpu
;
92 struct tss_struct
*tss
= &per_cpu(cpu_tss
, get_cpu());
94 t
->io_bitmap_ptr
= NULL
;
95 clear_thread_flag(TIF_IO_BITMAP
);
97 * Careful, clear this in the TSS too:
99 memset(tss
->io_bitmap
, 0xff, t
->io_bitmap_max
);
100 t
->io_bitmap_max
= 0;
110 void flush_thread(void)
112 struct task_struct
*tsk
= current
;
114 flush_ptrace_hw_breakpoint(tsk
);
115 memset(tsk
->thread
.tls_array
, 0, sizeof(tsk
->thread
.tls_array
));
117 fpu__clear(&tsk
->thread
.fpu
);
120 static void hard_disable_TSC(void)
122 cr4_set_bits(X86_CR4_TSD
);
125 void disable_TSC(void)
128 if (!test_and_set_thread_flag(TIF_NOTSC
))
130 * Must flip the CPU state synchronously with
131 * TIF_NOTSC in the current running context.
137 static void hard_enable_TSC(void)
139 cr4_clear_bits(X86_CR4_TSD
);
142 static void enable_TSC(void)
145 if (test_and_clear_thread_flag(TIF_NOTSC
))
147 * Must flip the CPU state synchronously with
148 * TIF_NOTSC in the current running context.
154 int get_tsc_mode(unsigned long adr
)
158 if (test_thread_flag(TIF_NOTSC
))
159 val
= PR_TSC_SIGSEGV
;
163 return put_user(val
, (unsigned int __user
*)adr
);
166 int set_tsc_mode(unsigned int val
)
168 if (val
== PR_TSC_SIGSEGV
)
170 else if (val
== PR_TSC_ENABLE
)
178 void __switch_to_xtra(struct task_struct
*prev_p
, struct task_struct
*next_p
,
179 struct tss_struct
*tss
)
181 struct thread_struct
*prev
, *next
;
183 prev
= &prev_p
->thread
;
184 next
= &next_p
->thread
;
186 if (test_tsk_thread_flag(prev_p
, TIF_BLOCKSTEP
) ^
187 test_tsk_thread_flag(next_p
, TIF_BLOCKSTEP
)) {
188 unsigned long debugctl
= get_debugctlmsr();
190 debugctl
&= ~DEBUGCTLMSR_BTF
;
191 if (test_tsk_thread_flag(next_p
, TIF_BLOCKSTEP
))
192 debugctl
|= DEBUGCTLMSR_BTF
;
194 update_debugctlmsr(debugctl
);
197 if (test_tsk_thread_flag(prev_p
, TIF_NOTSC
) ^
198 test_tsk_thread_flag(next_p
, TIF_NOTSC
)) {
199 /* prev and next are different */
200 if (test_tsk_thread_flag(next_p
, TIF_NOTSC
))
206 if (test_tsk_thread_flag(next_p
, TIF_IO_BITMAP
)) {
208 * Copy the relevant range of the IO bitmap.
209 * Normally this is 128 bytes or less:
211 memcpy(tss
->io_bitmap
, next
->io_bitmap_ptr
,
212 max(prev
->io_bitmap_max
, next
->io_bitmap_max
));
213 } else if (test_tsk_thread_flag(prev_p
, TIF_IO_BITMAP
)) {
215 * Clear any possible leftover bits:
217 memset(tss
->io_bitmap
, 0xff, prev
->io_bitmap_max
);
219 propagate_user_return_notify(prev_p
, next_p
);
223 * Idle related variables and functions
225 unsigned long boot_option_idle_override
= IDLE_NO_OVERRIDE
;
226 EXPORT_SYMBOL(boot_option_idle_override
);
228 static void (*x86_idle
)(void);
231 static inline void play_dead(void)
237 void arch_cpu_idle_enter(void)
242 void arch_cpu_idle_dead(void)
248 * Called from the generic idle code.
250 void arch_cpu_idle(void)
256 * We use this if we don't have any better idle routine..
258 void __cpuidle
default_idle(void)
260 trace_cpu_idle_rcuidle(1, smp_processor_id());
262 trace_cpu_idle_rcuidle(PWR_EVENT_EXIT
, smp_processor_id());
264 #ifdef CONFIG_APM_MODULE
265 EXPORT_SYMBOL(default_idle
);
269 bool xen_set_default_idle(void)
271 bool ret
= !!x86_idle
;
273 x86_idle
= default_idle
;
278 void stop_this_cpu(void *dummy
)
284 set_cpu_online(smp_processor_id(), false);
285 disable_local_APIC();
286 mcheck_cpu_clear(this_cpu_ptr(&cpu_info
));
292 bool amd_e400_c1e_detected
;
293 EXPORT_SYMBOL(amd_e400_c1e_detected
);
295 static cpumask_var_t amd_e400_c1e_mask
;
297 void amd_e400_remove_cpu(int cpu
)
299 if (amd_e400_c1e_mask
!= NULL
)
300 cpumask_clear_cpu(cpu
, amd_e400_c1e_mask
);
304 * AMD Erratum 400 aware idle routine. We check for C1E active in the interrupt
305 * pending message MSR. If we detect C1E, then we handle it the same
306 * way as C3 power states (local apic timer and TSC stop)
308 static void amd_e400_idle(void)
310 if (!amd_e400_c1e_detected
) {
313 rdmsr(MSR_K8_INT_PENDING_MSG
, lo
, hi
);
315 if (lo
& K8_INTP_C1E_ACTIVE_MASK
) {
316 amd_e400_c1e_detected
= true;
317 if (!boot_cpu_has(X86_FEATURE_NONSTOP_TSC
))
318 mark_tsc_unstable("TSC halt in AMD C1E");
319 pr_info("System has AMD C1E enabled\n");
323 if (amd_e400_c1e_detected
) {
324 int cpu
= smp_processor_id();
326 if (!cpumask_test_cpu(cpu
, amd_e400_c1e_mask
)) {
327 cpumask_set_cpu(cpu
, amd_e400_c1e_mask
);
328 /* Force broadcast so ACPI can not interfere. */
329 tick_broadcast_force();
330 pr_info("Switch to broadcast mode on CPU%d\n", cpu
);
332 tick_broadcast_enter();
337 * The switch back from broadcast mode needs to be
338 * called with interrupts disabled.
341 tick_broadcast_exit();
348 * Intel Core2 and older machines prefer MWAIT over HALT for C1.
349 * We can't rely on cpuidle installing MWAIT, because it will not load
350 * on systems that support only C1 -- so the boot default must be MWAIT.
352 * Some AMD machines are the opposite, they depend on using HALT.
354 * So for default C1, which is used during boot until cpuidle loads,
355 * use MWAIT-C1 on Intel HW that has it, else use HALT.
357 static int prefer_mwait_c1_over_halt(const struct cpuinfo_x86
*c
)
359 if (c
->x86_vendor
!= X86_VENDOR_INTEL
)
362 if (!cpu_has(c
, X86_FEATURE_MWAIT
) || static_cpu_has_bug(X86_BUG_MONITOR
))
369 * MONITOR/MWAIT with no hints, used for default C1 state. This invokes MWAIT
370 * with interrupts enabled and no flags, which is backwards compatible with the
371 * original MWAIT implementation.
373 static __cpuidle
void mwait_idle(void)
375 if (!current_set_polling_and_test()) {
376 trace_cpu_idle_rcuidle(1, smp_processor_id());
377 if (this_cpu_has(X86_BUG_CLFLUSH_MONITOR
)) {
379 clflush((void *)¤t_thread_info()->flags
);
383 __monitor((void *)¤t_thread_info()->flags
, 0, 0);
388 trace_cpu_idle_rcuidle(PWR_EVENT_EXIT
, smp_processor_id());
392 __current_clr_polling();
395 void select_idle_routine(const struct cpuinfo_x86
*c
)
398 if (boot_option_idle_override
== IDLE_POLL
&& smp_num_siblings
> 1)
399 pr_warn_once("WARNING: polling idle and HT enabled, performance may degrade\n");
401 if (x86_idle
|| boot_option_idle_override
== IDLE_POLL
)
404 if (boot_cpu_has_bug(X86_BUG_AMD_E400
)) {
405 pr_info("using AMD E400 aware idle routine\n");
406 x86_idle
= amd_e400_idle
;
407 } else if (prefer_mwait_c1_over_halt(c
)) {
408 pr_info("using mwait in idle threads\n");
409 x86_idle
= mwait_idle
;
411 x86_idle
= default_idle
;
414 void __init
init_amd_e400_c1e_mask(void)
416 /* If we're using amd_e400_idle, we need to allocate amd_e400_c1e_mask. */
417 if (x86_idle
== amd_e400_idle
)
418 zalloc_cpumask_var(&amd_e400_c1e_mask
, GFP_KERNEL
);
421 static int __init
idle_setup(char *str
)
426 if (!strcmp(str
, "poll")) {
427 pr_info("using polling idle threads\n");
428 boot_option_idle_override
= IDLE_POLL
;
429 cpu_idle_poll_ctrl(true);
430 } else if (!strcmp(str
, "halt")) {
432 * When the boot option of idle=halt is added, halt is
433 * forced to be used for CPU idle. In such case CPU C2/C3
434 * won't be used again.
435 * To continue to load the CPU idle driver, don't touch
436 * the boot_option_idle_override.
438 x86_idle
= default_idle
;
439 boot_option_idle_override
= IDLE_HALT
;
440 } else if (!strcmp(str
, "nomwait")) {
442 * If the boot option of "idle=nomwait" is added,
443 * it means that mwait will be disabled for CPU C2/C3
444 * states. In such case it won't touch the variable
445 * of boot_option_idle_override.
447 boot_option_idle_override
= IDLE_NOMWAIT
;
453 early_param("idle", idle_setup
);
455 unsigned long arch_align_stack(unsigned long sp
)
457 if (!(current
->personality
& ADDR_NO_RANDOMIZE
) && randomize_va_space
)
458 sp
-= get_random_int() % 8192;
462 unsigned long arch_randomize_brk(struct mm_struct
*mm
)
464 return randomize_page(mm
->brk
, 0x02000000);
468 * Return saved PC of a blocked thread.
469 * What is this good for? it will be always the scheduler or ret_from_fork.
471 unsigned long thread_saved_pc(struct task_struct
*tsk
)
473 struct inactive_task_frame
*frame
=
474 (struct inactive_task_frame
*) READ_ONCE(tsk
->thread
.sp
);
475 return READ_ONCE_NOCHECK(frame
->ret_addr
);
479 * Called from fs/proc with a reference on @p to find the function
480 * which called into schedule(). This needs to be done carefully
481 * because the task might wake up and we might look at a stack
484 unsigned long get_wchan(struct task_struct
*p
)
486 unsigned long start
, bottom
, top
, sp
, fp
, ip
, ret
= 0;
489 if (!p
|| p
== current
|| p
->state
== TASK_RUNNING
)
492 if (!try_get_task_stack(p
))
495 start
= (unsigned long)task_stack_page(p
);
500 * Layout of the stack page:
502 * ----------- topmax = start + THREAD_SIZE - sizeof(unsigned long)
504 * ----------- top = topmax - TOP_OF_KERNEL_STACK_PADDING
506 * ----------- bottom = start
508 * The tasks stack pointer points at the location where the
509 * framepointer is stored. The data on the stack is:
510 * ... IP FP ... IP FP
512 * We need to read FP and IP, so we need to adjust the upper
513 * bound by another unsigned long.
515 top
= start
+ THREAD_SIZE
- TOP_OF_KERNEL_STACK_PADDING
;
516 top
-= 2 * sizeof(unsigned long);
519 sp
= READ_ONCE(p
->thread
.sp
);
520 if (sp
< bottom
|| sp
> top
)
523 fp
= READ_ONCE_NOCHECK(((struct inactive_task_frame
*)sp
)->bp
);
525 if (fp
< bottom
|| fp
> top
)
527 ip
= READ_ONCE_NOCHECK(*(unsigned long *)(fp
+ sizeof(unsigned long)));
528 if (!in_sched_functions(ip
)) {
532 fp
= READ_ONCE_NOCHECK(*(unsigned long *)fp
);
533 } while (count
++ < 16 && p
->state
!= TASK_RUNNING
);