1 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
3 #include <linux/errno.h>
4 #include <linux/kernel.h>
7 #include <linux/prctl.h>
8 #include <linux/slab.h>
9 #include <linux/sched.h>
10 #include <linux/init.h>
11 #include <linux/export.h>
13 #include <linux/tick.h>
14 #include <linux/random.h>
15 #include <linux/user-return-notifier.h>
16 #include <linux/dmi.h>
17 #include <linux/utsname.h>
18 #include <linux/stackprotector.h>
19 #include <linux/tick.h>
20 #include <linux/cpuidle.h>
21 #include <trace/events/power.h>
22 #include <linux/hw_breakpoint.h>
25 #include <asm/syscalls.h>
26 #include <asm/uaccess.h>
27 #include <asm/mwait.h>
28 #include <asm/fpu/internal.h>
29 #include <asm/debugreg.h>
31 #include <asm/tlbflush.h>
34 #include <asm/switch_to.h>
37 * per-CPU TSS segments. Threads are completely 'soft' on Linux,
38 * no more per-task TSS's. The TSS size is kept cacheline-aligned
39 * so they are allowed to end up in the .data..cacheline_aligned
40 * section. Since TSS's are completely CPU-local, we want them
41 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
43 __visible
DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct
, cpu_tss
) = {
45 .sp0
= TOP_OF_INIT_STACK
,
49 .io_bitmap_base
= INVALID_IO_BITMAP_OFFSET
,
54 * Note that the .io_bitmap member must be extra-big. This is because
55 * the CPU will access an additional byte beyond the end of the IO
56 * permission bitmap. The extra byte must be all 1 bits, and must
57 * be within the limit.
59 .io_bitmap
= { [0 ... IO_BITMAP_LONGS
] = ~0 },
62 .SYSENTER_stack_canary
= STACK_END_MAGIC
,
65 EXPORT_PER_CPU_SYMBOL(cpu_tss
);
68 * this gets called so that we can store lazy state into memory and copy the
69 * current task into the new thread.
71 int arch_dup_task_struct(struct task_struct
*dst
, struct task_struct
*src
)
73 memcpy(dst
, src
, arch_task_struct_size
);
75 dst
->thread
.vm86
= NULL
;
78 return fpu__copy(&dst
->thread
.fpu
, &src
->thread
.fpu
);
82 * Free current thread data structures etc..
84 void exit_thread(struct task_struct
*tsk
)
86 struct thread_struct
*t
= &tsk
->thread
;
87 unsigned long *bp
= t
->io_bitmap_ptr
;
88 struct fpu
*fpu
= &t
->fpu
;
91 struct tss_struct
*tss
= &per_cpu(cpu_tss
, get_cpu());
93 t
->io_bitmap_ptr
= NULL
;
94 clear_thread_flag(TIF_IO_BITMAP
);
96 * Careful, clear this in the TSS too:
98 memset(tss
->io_bitmap
, 0xff, t
->io_bitmap_max
);
109 void flush_thread(void)
111 struct task_struct
*tsk
= current
;
113 flush_ptrace_hw_breakpoint(tsk
);
114 memset(tsk
->thread
.tls_array
, 0, sizeof(tsk
->thread
.tls_array
));
116 fpu__clear(&tsk
->thread
.fpu
);
119 static void hard_disable_TSC(void)
121 cr4_set_bits(X86_CR4_TSD
);
124 void disable_TSC(void)
127 if (!test_and_set_thread_flag(TIF_NOTSC
))
129 * Must flip the CPU state synchronously with
130 * TIF_NOTSC in the current running context.
136 static void hard_enable_TSC(void)
138 cr4_clear_bits(X86_CR4_TSD
);
141 static void enable_TSC(void)
144 if (test_and_clear_thread_flag(TIF_NOTSC
))
146 * Must flip the CPU state synchronously with
147 * TIF_NOTSC in the current running context.
153 int get_tsc_mode(unsigned long adr
)
157 if (test_thread_flag(TIF_NOTSC
))
158 val
= PR_TSC_SIGSEGV
;
162 return put_user(val
, (unsigned int __user
*)adr
);
165 int set_tsc_mode(unsigned int val
)
167 if (val
== PR_TSC_SIGSEGV
)
169 else if (val
== PR_TSC_ENABLE
)
177 void __switch_to_xtra(struct task_struct
*prev_p
, struct task_struct
*next_p
,
178 struct tss_struct
*tss
)
180 struct thread_struct
*prev
, *next
;
182 prev
= &prev_p
->thread
;
183 next
= &next_p
->thread
;
185 if (test_tsk_thread_flag(prev_p
, TIF_BLOCKSTEP
) ^
186 test_tsk_thread_flag(next_p
, TIF_BLOCKSTEP
)) {
187 unsigned long debugctl
= get_debugctlmsr();
189 debugctl
&= ~DEBUGCTLMSR_BTF
;
190 if (test_tsk_thread_flag(next_p
, TIF_BLOCKSTEP
))
191 debugctl
|= DEBUGCTLMSR_BTF
;
193 update_debugctlmsr(debugctl
);
196 if (test_tsk_thread_flag(prev_p
, TIF_NOTSC
) ^
197 test_tsk_thread_flag(next_p
, TIF_NOTSC
)) {
198 /* prev and next are different */
199 if (test_tsk_thread_flag(next_p
, TIF_NOTSC
))
205 if (test_tsk_thread_flag(next_p
, TIF_IO_BITMAP
)) {
207 * Copy the relevant range of the IO bitmap.
208 * Normally this is 128 bytes or less:
210 memcpy(tss
->io_bitmap
, next
->io_bitmap_ptr
,
211 max(prev
->io_bitmap_max
, next
->io_bitmap_max
));
212 } else if (test_tsk_thread_flag(prev_p
, TIF_IO_BITMAP
)) {
214 * Clear any possible leftover bits:
216 memset(tss
->io_bitmap
, 0xff, prev
->io_bitmap_max
);
218 propagate_user_return_notify(prev_p
, next_p
);
222 * Idle related variables and functions
224 unsigned long boot_option_idle_override
= IDLE_NO_OVERRIDE
;
225 EXPORT_SYMBOL(boot_option_idle_override
);
227 static void (*x86_idle
)(void);
230 static inline void play_dead(void)
236 void arch_cpu_idle_enter(void)
241 void arch_cpu_idle_dead(void)
247 * Called from the generic idle code.
249 void arch_cpu_idle(void)
255 * We use this if we don't have any better idle routine..
257 void __cpuidle
default_idle(void)
259 trace_cpu_idle_rcuidle(1, smp_processor_id());
261 trace_cpu_idle_rcuidle(PWR_EVENT_EXIT
, smp_processor_id());
263 #ifdef CONFIG_APM_MODULE
264 EXPORT_SYMBOL(default_idle
);
268 bool xen_set_default_idle(void)
270 bool ret
= !!x86_idle
;
272 x86_idle
= default_idle
;
277 void stop_this_cpu(void *dummy
)
283 set_cpu_online(smp_processor_id(), false);
284 disable_local_APIC();
285 mcheck_cpu_clear(this_cpu_ptr(&cpu_info
));
292 * AMD Erratum 400 aware idle routine. We handle it the same way as C3 power
293 * states (local apic timer and TSC stop).
295 static void amd_e400_idle(void)
298 * We cannot use static_cpu_has_bug() here because X86_BUG_AMD_APIC_C1E
299 * gets set after static_cpu_has() places have been converted via
302 if (!boot_cpu_has_bug(X86_BUG_AMD_APIC_C1E
)) {
307 tick_broadcast_enter();
312 * The switch back from broadcast mode needs to be called with
313 * interrupts disabled.
316 tick_broadcast_exit();
321 * Intel Core2 and older machines prefer MWAIT over HALT for C1.
322 * We can't rely on cpuidle installing MWAIT, because it will not load
323 * on systems that support only C1 -- so the boot default must be MWAIT.
325 * Some AMD machines are the opposite, they depend on using HALT.
327 * So for default C1, which is used during boot until cpuidle loads,
328 * use MWAIT-C1 on Intel HW that has it, else use HALT.
330 static int prefer_mwait_c1_over_halt(const struct cpuinfo_x86
*c
)
332 if (c
->x86_vendor
!= X86_VENDOR_INTEL
)
335 if (!cpu_has(c
, X86_FEATURE_MWAIT
) || static_cpu_has_bug(X86_BUG_MONITOR
))
342 * MONITOR/MWAIT with no hints, used for default C1 state. This invokes MWAIT
343 * with interrupts enabled and no flags, which is backwards compatible with the
344 * original MWAIT implementation.
346 static __cpuidle
void mwait_idle(void)
348 if (!current_set_polling_and_test()) {
349 trace_cpu_idle_rcuidle(1, smp_processor_id());
350 if (this_cpu_has(X86_BUG_CLFLUSH_MONITOR
)) {
352 clflush((void *)¤t_thread_info()->flags
);
356 __monitor((void *)¤t_thread_info()->flags
, 0, 0);
361 trace_cpu_idle_rcuidle(PWR_EVENT_EXIT
, smp_processor_id());
365 __current_clr_polling();
368 void select_idle_routine(const struct cpuinfo_x86
*c
)
371 if (boot_option_idle_override
== IDLE_POLL
&& smp_num_siblings
> 1)
372 pr_warn_once("WARNING: polling idle and HT enabled, performance may degrade\n");
374 if (x86_idle
|| boot_option_idle_override
== IDLE_POLL
)
377 if (boot_cpu_has_bug(X86_BUG_AMD_E400
)) {
378 pr_info("using AMD E400 aware idle routine\n");
379 x86_idle
= amd_e400_idle
;
380 } else if (prefer_mwait_c1_over_halt(c
)) {
381 pr_info("using mwait in idle threads\n");
382 x86_idle
= mwait_idle
;
384 x86_idle
= default_idle
;
387 void amd_e400_c1e_apic_setup(void)
389 if (boot_cpu_has_bug(X86_BUG_AMD_APIC_C1E
)) {
390 pr_info("Switch to broadcast mode on CPU%d\n", smp_processor_id());
392 tick_broadcast_force();
397 void __init
arch_post_acpi_subsys_init(void)
401 if (!boot_cpu_has_bug(X86_BUG_AMD_E400
))
405 * AMD E400 detection needs to happen after ACPI has been enabled. If
406 * the machine is affected K8_INTP_C1E_ACTIVE_MASK bits are set in
407 * MSR_K8_INT_PENDING_MSG.
409 rdmsr(MSR_K8_INT_PENDING_MSG
, lo
, hi
);
410 if (!(lo
& K8_INTP_C1E_ACTIVE_MASK
))
413 boot_cpu_set_bug(X86_BUG_AMD_APIC_C1E
);
415 if (!boot_cpu_has(X86_FEATURE_NONSTOP_TSC
))
416 mark_tsc_unstable("TSC halt in AMD C1E");
417 pr_info("System has AMD C1E enabled\n");
420 static int __init
idle_setup(char *str
)
425 if (!strcmp(str
, "poll")) {
426 pr_info("using polling idle threads\n");
427 boot_option_idle_override
= IDLE_POLL
;
428 cpu_idle_poll_ctrl(true);
429 } else if (!strcmp(str
, "halt")) {
431 * When the boot option of idle=halt is added, halt is
432 * forced to be used for CPU idle. In such case CPU C2/C3
433 * won't be used again.
434 * To continue to load the CPU idle driver, don't touch
435 * the boot_option_idle_override.
437 x86_idle
= default_idle
;
438 boot_option_idle_override
= IDLE_HALT
;
439 } else if (!strcmp(str
, "nomwait")) {
441 * If the boot option of "idle=nomwait" is added,
442 * it means that mwait will be disabled for CPU C2/C3
443 * states. In such case it won't touch the variable
444 * of boot_option_idle_override.
446 boot_option_idle_override
= IDLE_NOMWAIT
;
452 early_param("idle", idle_setup
);
454 unsigned long arch_align_stack(unsigned long sp
)
456 if (!(current
->personality
& ADDR_NO_RANDOMIZE
) && randomize_va_space
)
457 sp
-= get_random_int() % 8192;
461 unsigned long arch_randomize_brk(struct mm_struct
*mm
)
463 return randomize_page(mm
->brk
, 0x02000000);
467 * Return saved PC of a blocked thread.
468 * What is this good for? it will be always the scheduler or ret_from_fork.
470 unsigned long thread_saved_pc(struct task_struct
*tsk
)
472 struct inactive_task_frame
*frame
=
473 (struct inactive_task_frame
*) READ_ONCE(tsk
->thread
.sp
);
474 return READ_ONCE_NOCHECK(frame
->ret_addr
);
478 * Called from fs/proc with a reference on @p to find the function
479 * which called into schedule(). This needs to be done carefully
480 * because the task might wake up and we might look at a stack
483 unsigned long get_wchan(struct task_struct
*p
)
485 unsigned long start
, bottom
, top
, sp
, fp
, ip
, ret
= 0;
488 if (!p
|| p
== current
|| p
->state
== TASK_RUNNING
)
491 if (!try_get_task_stack(p
))
494 start
= (unsigned long)task_stack_page(p
);
499 * Layout of the stack page:
501 * ----------- topmax = start + THREAD_SIZE - sizeof(unsigned long)
503 * ----------- top = topmax - TOP_OF_KERNEL_STACK_PADDING
505 * ----------- bottom = start
507 * The tasks stack pointer points at the location where the
508 * framepointer is stored. The data on the stack is:
509 * ... IP FP ... IP FP
511 * We need to read FP and IP, so we need to adjust the upper
512 * bound by another unsigned long.
514 top
= start
+ THREAD_SIZE
- TOP_OF_KERNEL_STACK_PADDING
;
515 top
-= 2 * sizeof(unsigned long);
518 sp
= READ_ONCE(p
->thread
.sp
);
519 if (sp
< bottom
|| sp
> top
)
522 fp
= READ_ONCE_NOCHECK(((struct inactive_task_frame
*)sp
)->bp
);
524 if (fp
< bottom
|| fp
> top
)
526 ip
= READ_ONCE_NOCHECK(*(unsigned long *)(fp
+ sizeof(unsigned long)));
527 if (!in_sched_functions(ip
)) {
531 fp
= READ_ONCE_NOCHECK(*(unsigned long *)fp
);
532 } while (count
++ < 16 && p
->state
!= TASK_RUNNING
);