1 // SPDX-License-Identifier: GPL-2.0
2 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
4 #include <linux/errno.h>
5 #include <linux/kernel.h>
8 #include <linux/prctl.h>
9 #include <linux/slab.h>
10 #include <linux/sched.h>
11 #include <linux/sched/idle.h>
12 #include <linux/sched/debug.h>
13 #include <linux/sched/task.h>
14 #include <linux/sched/task_stack.h>
15 #include <linux/init.h>
16 #include <linux/export.h>
18 #include <linux/tick.h>
19 #include <linux/random.h>
20 #include <linux/user-return-notifier.h>
21 #include <linux/dmi.h>
22 #include <linux/utsname.h>
23 #include <linux/stackprotector.h>
24 #include <linux/cpuidle.h>
25 #include <linux/acpi.h>
26 #include <linux/elf-randomize.h>
27 #include <trace/events/power.h>
28 #include <linux/hw_breakpoint.h>
31 #include <linux/uaccess.h>
32 #include <asm/mwait.h>
33 #include <asm/fpu/api.h>
34 #include <asm/fpu/sched.h>
35 #include <asm/debugreg.h>
37 #include <asm/tlbflush.h>
40 #include <asm/switch_to.h>
42 #include <asm/prctl.h>
43 #include <asm/spec-ctrl.h>
44 #include <asm/io_bitmap.h>
45 #include <asm/proto.h>
46 #include <asm/frame.h>
51 * per-CPU TSS segments. Threads are completely 'soft' on Linux,
52 * no more per-task TSS's. The TSS size is kept cacheline-aligned
53 * so they are allowed to end up in the .data..cacheline_aligned
54 * section. Since TSS's are completely CPU-local, we want them
55 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
57 __visible
DEFINE_PER_CPU_PAGE_ALIGNED(struct tss_struct
, cpu_tss_rw
) = {
60 * .sp0 is only used when entering ring 0 from a lower
61 * privilege level. Since the init task never runs anything
62 * but ring 0 code, there is no need for a valid value here.
65 .sp0
= (1UL << (BITS_PER_LONG
-1)) + 1,
68 .sp1
= TOP_OF_INIT_STACK
,
73 .io_bitmap_base
= IO_BITMAP_OFFSET_INVALID
,
76 EXPORT_PER_CPU_SYMBOL(cpu_tss_rw
);
78 DEFINE_PER_CPU(bool, __tss_limit_invalid
);
79 EXPORT_PER_CPU_SYMBOL_GPL(__tss_limit_invalid
);
82 * this gets called so that we can store lazy state into memory and copy the
83 * current task into the new thread.
85 int arch_dup_task_struct(struct task_struct
*dst
, struct task_struct
*src
)
87 memcpy(dst
, src
, arch_task_struct_size
);
89 dst
->thread
.vm86
= NULL
;
91 /* Drop the copied pointer to current's fpstate */
92 dst
->thread
.fpu
.fpstate
= NULL
;
97 * Free thread data structures etc..
99 void exit_thread(struct task_struct
*tsk
)
101 struct thread_struct
*t
= &tsk
->thread
;
102 struct fpu
*fpu
= &t
->fpu
;
104 if (test_thread_flag(TIF_IO_BITMAP
))
112 static int set_new_tls(struct task_struct
*p
, unsigned long tls
)
114 struct user_desc __user
*utls
= (struct user_desc __user
*)tls
;
116 if (in_ia32_syscall())
117 return do_set_thread_area(p
, -1, utls
, 0);
119 return do_set_thread_area_64(p
, ARCH_SET_FS
, tls
);
122 int copy_thread(unsigned long clone_flags
, unsigned long sp
, unsigned long arg
,
123 struct task_struct
*p
, unsigned long tls
)
125 struct inactive_task_frame
*frame
;
126 struct fork_frame
*fork_frame
;
127 struct pt_regs
*childregs
;
130 childregs
= task_pt_regs(p
);
131 fork_frame
= container_of(childregs
, struct fork_frame
, regs
);
132 frame
= &fork_frame
->frame
;
134 frame
->bp
= encode_frame_pointer(childregs
);
135 frame
->ret_addr
= (unsigned long) ret_from_fork
;
136 p
->thread
.sp
= (unsigned long) fork_frame
;
137 p
->thread
.io_bitmap
= NULL
;
138 p
->thread
.iopl_warn
= 0;
139 memset(p
->thread
.ptrace_bps
, 0, sizeof(p
->thread
.ptrace_bps
));
143 p
->thread
.fsindex
= current
->thread
.fsindex
;
144 p
->thread
.fsbase
= current
->thread
.fsbase
;
145 p
->thread
.gsindex
= current
->thread
.gsindex
;
146 p
->thread
.gsbase
= current
->thread
.gsbase
;
148 savesegment(es
, p
->thread
.es
);
149 savesegment(ds
, p
->thread
.ds
);
151 p
->thread
.sp0
= (unsigned long) (childregs
+ 1);
153 * Clear all status flags including IF and set fixed bit. 64bit
154 * does not have this initialization as the frame does not contain
155 * flags. The flags consistency (especially vs. AC) is there
156 * ensured via objtool, which lacks 32bit support.
158 frame
->flags
= X86_EFLAGS_FIXED
;
163 /* Kernel thread ? */
164 if (unlikely(p
->flags
& PF_KTHREAD
)) {
165 p
->thread
.pkru
= pkru_get_init_value();
166 memset(childregs
, 0, sizeof(struct pt_regs
));
167 kthread_frame_init(frame
, sp
, arg
);
172 * Clone current's PKRU value from hardware. tsk->thread.pkru
173 * is only valid when scheduled out.
175 p
->thread
.pkru
= read_pkru();
178 *childregs
= *current_pt_regs();
184 task_user_gs(p
) = get_user_gs(current_pt_regs());
187 if (unlikely(p
->flags
& PF_IO_WORKER
)) {
189 * An IO thread is a user space thread, but it doesn't
190 * return to ret_after_fork().
192 * In order to indicate that to tools like gdb,
193 * we reset the stack and instruction pointers.
195 * It does the same kernel frame setup to return to a kernel
196 * function that a kernel thread does.
200 kthread_frame_init(frame
, sp
, arg
);
204 /* Set a new TLS for the child thread? */
205 if (clone_flags
& CLONE_SETTLS
)
206 ret
= set_new_tls(p
, tls
);
208 if (!ret
&& unlikely(test_tsk_thread_flag(current
, TIF_IO_BITMAP
)))
214 static void pkru_flush_thread(void)
217 * If PKRU is enabled the default PKRU value has to be loaded into
218 * the hardware right here (similar to context switch).
220 pkru_write_default();
223 void flush_thread(void)
225 struct task_struct
*tsk
= current
;
227 flush_ptrace_hw_breakpoint(tsk
);
228 memset(tsk
->thread
.tls_array
, 0, sizeof(tsk
->thread
.tls_array
));
234 void disable_TSC(void)
237 if (!test_and_set_thread_flag(TIF_NOTSC
))
239 * Must flip the CPU state synchronously with
240 * TIF_NOTSC in the current running context.
242 cr4_set_bits(X86_CR4_TSD
);
246 static void enable_TSC(void)
249 if (test_and_clear_thread_flag(TIF_NOTSC
))
251 * Must flip the CPU state synchronously with
252 * TIF_NOTSC in the current running context.
254 cr4_clear_bits(X86_CR4_TSD
);
258 int get_tsc_mode(unsigned long adr
)
262 if (test_thread_flag(TIF_NOTSC
))
263 val
= PR_TSC_SIGSEGV
;
267 return put_user(val
, (unsigned int __user
*)adr
);
270 int set_tsc_mode(unsigned int val
)
272 if (val
== PR_TSC_SIGSEGV
)
274 else if (val
== PR_TSC_ENABLE
)
282 DEFINE_PER_CPU(u64
, msr_misc_features_shadow
);
284 static void set_cpuid_faulting(bool on
)
288 msrval
= this_cpu_read(msr_misc_features_shadow
);
289 msrval
&= ~MSR_MISC_FEATURES_ENABLES_CPUID_FAULT
;
290 msrval
|= (on
<< MSR_MISC_FEATURES_ENABLES_CPUID_FAULT_BIT
);
291 this_cpu_write(msr_misc_features_shadow
, msrval
);
292 wrmsrl(MSR_MISC_FEATURES_ENABLES
, msrval
);
295 static void disable_cpuid(void)
298 if (!test_and_set_thread_flag(TIF_NOCPUID
)) {
300 * Must flip the CPU state synchronously with
301 * TIF_NOCPUID in the current running context.
303 set_cpuid_faulting(true);
308 static void enable_cpuid(void)
311 if (test_and_clear_thread_flag(TIF_NOCPUID
)) {
313 * Must flip the CPU state synchronously with
314 * TIF_NOCPUID in the current running context.
316 set_cpuid_faulting(false);
321 static int get_cpuid_mode(void)
323 return !test_thread_flag(TIF_NOCPUID
);
326 static int set_cpuid_mode(struct task_struct
*task
, unsigned long cpuid_enabled
)
328 if (!boot_cpu_has(X86_FEATURE_CPUID_FAULT
))
340 * Called immediately after a successful exec.
342 void arch_setup_new_exec(void)
344 /* If cpuid was previously disabled for this task, re-enable it. */
345 if (test_thread_flag(TIF_NOCPUID
))
349 * Don't inherit TIF_SSBD across exec boundary when
350 * PR_SPEC_DISABLE_NOEXEC is used.
352 if (test_thread_flag(TIF_SSBD
) &&
353 task_spec_ssb_noexec(current
)) {
354 clear_thread_flag(TIF_SSBD
);
355 task_clear_spec_ssb_disable(current
);
356 task_clear_spec_ssb_noexec(current
);
357 speculation_ctrl_update(task_thread_info(current
)->flags
);
361 #ifdef CONFIG_X86_IOPL_IOPERM
362 static inline void switch_to_bitmap(unsigned long tifp
)
365 * Invalidate I/O bitmap if the previous task used it. This prevents
366 * any possible leakage of an active I/O bitmap.
368 * If the next task has an I/O bitmap it will handle it on exit to
371 if (tifp
& _TIF_IO_BITMAP
)
372 tss_invalidate_io_bitmap();
375 static void tss_copy_io_bitmap(struct tss_struct
*tss
, struct io_bitmap
*iobm
)
378 * Copy at least the byte range of the incoming tasks bitmap which
379 * covers the permitted I/O ports.
381 * If the previous task which used an I/O bitmap had more bits
382 * permitted, then the copy needs to cover those as well so they
385 memcpy(tss
->io_bitmap
.bitmap
, iobm
->bitmap
,
386 max(tss
->io_bitmap
.prev_max
, iobm
->max
));
389 * Store the new max and the sequence number of this bitmap
390 * and a pointer to the bitmap itself.
392 tss
->io_bitmap
.prev_max
= iobm
->max
;
393 tss
->io_bitmap
.prev_sequence
= iobm
->sequence
;
397 * tss_update_io_bitmap - Update I/O bitmap before exiting to usermode
399 void native_tss_update_io_bitmap(void)
401 struct tss_struct
*tss
= this_cpu_ptr(&cpu_tss_rw
);
402 struct thread_struct
*t
= ¤t
->thread
;
403 u16
*base
= &tss
->x86_tss
.io_bitmap_base
;
405 if (!test_thread_flag(TIF_IO_BITMAP
)) {
406 native_tss_invalidate_io_bitmap();
410 if (IS_ENABLED(CONFIG_X86_IOPL_IOPERM
) && t
->iopl_emul
== 3) {
411 *base
= IO_BITMAP_OFFSET_VALID_ALL
;
413 struct io_bitmap
*iobm
= t
->io_bitmap
;
416 * Only copy bitmap data when the sequence number differs. The
417 * update time is accounted to the incoming task.
419 if (tss
->io_bitmap
.prev_sequence
!= iobm
->sequence
)
420 tss_copy_io_bitmap(tss
, iobm
);
422 /* Enable the bitmap */
423 *base
= IO_BITMAP_OFFSET_VALID_MAP
;
427 * Make sure that the TSS limit is covering the IO bitmap. It might have
428 * been cut down by a VMEXIT to 0x67 which would cause a subsequent I/O
429 * access from user space to trigger a #GP because tbe bitmap is outside
434 #else /* CONFIG_X86_IOPL_IOPERM */
435 static inline void switch_to_bitmap(unsigned long tifp
) { }
441 struct ssb_state
*shared_state
;
443 unsigned int disable_state
;
444 unsigned long local_state
;
449 static DEFINE_PER_CPU(struct ssb_state
, ssb_state
);
451 void speculative_store_bypass_ht_init(void)
453 struct ssb_state
*st
= this_cpu_ptr(&ssb_state
);
454 unsigned int this_cpu
= smp_processor_id();
460 * Shared state setup happens once on the first bringup
461 * of the CPU. It's not destroyed on CPU hotunplug.
463 if (st
->shared_state
)
466 raw_spin_lock_init(&st
->lock
);
469 * Go over HT siblings and check whether one of them has set up the
470 * shared state pointer already.
472 for_each_cpu(cpu
, topology_sibling_cpumask(this_cpu
)) {
476 if (!per_cpu(ssb_state
, cpu
).shared_state
)
479 /* Link it to the state of the sibling: */
480 st
->shared_state
= per_cpu(ssb_state
, cpu
).shared_state
;
485 * First HT sibling to come up on the core. Link shared state of
486 * the first HT sibling to itself. The siblings on the same core
487 * which come up later will see the shared state pointer and link
488 * themselves to the state of this CPU.
490 st
->shared_state
= st
;
494 * Logic is: First HT sibling enables SSBD for both siblings in the core
495 * and last sibling to disable it, disables it for the whole core. This how
496 * MSR_SPEC_CTRL works in "hardware":
498 * CORE_SPEC_CTRL = THREAD0_SPEC_CTRL | THREAD1_SPEC_CTRL
500 static __always_inline
void amd_set_core_ssb_state(unsigned long tifn
)
502 struct ssb_state
*st
= this_cpu_ptr(&ssb_state
);
503 u64 msr
= x86_amd_ls_cfg_base
;
505 if (!static_cpu_has(X86_FEATURE_ZEN
)) {
506 msr
|= ssbd_tif_to_amd_ls_cfg(tifn
);
507 wrmsrl(MSR_AMD64_LS_CFG
, msr
);
511 if (tifn
& _TIF_SSBD
) {
513 * Since this can race with prctl(), block reentry on the
516 if (__test_and_set_bit(LSTATE_SSB
, &st
->local_state
))
519 msr
|= x86_amd_ls_cfg_ssbd_mask
;
521 raw_spin_lock(&st
->shared_state
->lock
);
522 /* First sibling enables SSBD: */
523 if (!st
->shared_state
->disable_state
)
524 wrmsrl(MSR_AMD64_LS_CFG
, msr
);
525 st
->shared_state
->disable_state
++;
526 raw_spin_unlock(&st
->shared_state
->lock
);
528 if (!__test_and_clear_bit(LSTATE_SSB
, &st
->local_state
))
531 raw_spin_lock(&st
->shared_state
->lock
);
532 st
->shared_state
->disable_state
--;
533 if (!st
->shared_state
->disable_state
)
534 wrmsrl(MSR_AMD64_LS_CFG
, msr
);
535 raw_spin_unlock(&st
->shared_state
->lock
);
539 static __always_inline
void amd_set_core_ssb_state(unsigned long tifn
)
541 u64 msr
= x86_amd_ls_cfg_base
| ssbd_tif_to_amd_ls_cfg(tifn
);
543 wrmsrl(MSR_AMD64_LS_CFG
, msr
);
547 static __always_inline
void amd_set_ssb_virt_state(unsigned long tifn
)
550 * SSBD has the same definition in SPEC_CTRL and VIRT_SPEC_CTRL,
551 * so ssbd_tif_to_spec_ctrl() just works.
553 wrmsrl(MSR_AMD64_VIRT_SPEC_CTRL
, ssbd_tif_to_spec_ctrl(tifn
));
557 * Update the MSRs managing speculation control, during context switch.
559 * tifp: Previous task's thread flags
560 * tifn: Next task's thread flags
562 static __always_inline
void __speculation_ctrl_update(unsigned long tifp
,
565 unsigned long tif_diff
= tifp
^ tifn
;
566 u64 msr
= x86_spec_ctrl_base
;
569 lockdep_assert_irqs_disabled();
571 /* Handle change of TIF_SSBD depending on the mitigation method. */
572 if (static_cpu_has(X86_FEATURE_VIRT_SSBD
)) {
573 if (tif_diff
& _TIF_SSBD
)
574 amd_set_ssb_virt_state(tifn
);
575 } else if (static_cpu_has(X86_FEATURE_LS_CFG_SSBD
)) {
576 if (tif_diff
& _TIF_SSBD
)
577 amd_set_core_ssb_state(tifn
);
578 } else if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD
) ||
579 static_cpu_has(X86_FEATURE_AMD_SSBD
)) {
580 updmsr
|= !!(tif_diff
& _TIF_SSBD
);
581 msr
|= ssbd_tif_to_spec_ctrl(tifn
);
584 /* Only evaluate TIF_SPEC_IB if conditional STIBP is enabled. */
585 if (IS_ENABLED(CONFIG_SMP
) &&
586 static_branch_unlikely(&switch_to_cond_stibp
)) {
587 updmsr
|= !!(tif_diff
& _TIF_SPEC_IB
);
588 msr
|= stibp_tif_to_spec_ctrl(tifn
);
592 wrmsrl(MSR_IA32_SPEC_CTRL
, msr
);
595 static unsigned long speculation_ctrl_update_tif(struct task_struct
*tsk
)
597 if (test_and_clear_tsk_thread_flag(tsk
, TIF_SPEC_FORCE_UPDATE
)) {
598 if (task_spec_ssb_disable(tsk
))
599 set_tsk_thread_flag(tsk
, TIF_SSBD
);
601 clear_tsk_thread_flag(tsk
, TIF_SSBD
);
603 if (task_spec_ib_disable(tsk
))
604 set_tsk_thread_flag(tsk
, TIF_SPEC_IB
);
606 clear_tsk_thread_flag(tsk
, TIF_SPEC_IB
);
608 /* Return the updated threadinfo flags*/
609 return task_thread_info(tsk
)->flags
;
612 void speculation_ctrl_update(unsigned long tif
)
616 /* Forced update. Make sure all relevant TIF flags are different */
617 local_irq_save(flags
);
618 __speculation_ctrl_update(~tif
, tif
);
619 local_irq_restore(flags
);
622 /* Called from seccomp/prctl update */
623 void speculation_ctrl_update_current(void)
626 speculation_ctrl_update(speculation_ctrl_update_tif(current
));
630 static inline void cr4_toggle_bits_irqsoff(unsigned long mask
)
632 unsigned long newval
, cr4
= this_cpu_read(cpu_tlbstate
.cr4
);
636 this_cpu_write(cpu_tlbstate
.cr4
, newval
);
641 void __switch_to_xtra(struct task_struct
*prev_p
, struct task_struct
*next_p
)
643 unsigned long tifp
, tifn
;
645 tifn
= READ_ONCE(task_thread_info(next_p
)->flags
);
646 tifp
= READ_ONCE(task_thread_info(prev_p
)->flags
);
648 switch_to_bitmap(tifp
);
650 propagate_user_return_notify(prev_p
, next_p
);
652 if ((tifp
& _TIF_BLOCKSTEP
|| tifn
& _TIF_BLOCKSTEP
) &&
653 arch_has_block_step()) {
654 unsigned long debugctl
, msk
;
656 rdmsrl(MSR_IA32_DEBUGCTLMSR
, debugctl
);
657 debugctl
&= ~DEBUGCTLMSR_BTF
;
658 msk
= tifn
& _TIF_BLOCKSTEP
;
659 debugctl
|= (msk
>> TIF_BLOCKSTEP
) << DEBUGCTLMSR_BTF_SHIFT
;
660 wrmsrl(MSR_IA32_DEBUGCTLMSR
, debugctl
);
663 if ((tifp
^ tifn
) & _TIF_NOTSC
)
664 cr4_toggle_bits_irqsoff(X86_CR4_TSD
);
666 if ((tifp
^ tifn
) & _TIF_NOCPUID
)
667 set_cpuid_faulting(!!(tifn
& _TIF_NOCPUID
));
669 if (likely(!((tifp
| tifn
) & _TIF_SPEC_FORCE_UPDATE
))) {
670 __speculation_ctrl_update(tifp
, tifn
);
672 speculation_ctrl_update_tif(prev_p
);
673 tifn
= speculation_ctrl_update_tif(next_p
);
675 /* Enforce MSR update to ensure consistent state */
676 __speculation_ctrl_update(~tifn
, tifn
);
679 if ((tifp
^ tifn
) & _TIF_SLD
)
684 * Idle related variables and functions
686 unsigned long boot_option_idle_override
= IDLE_NO_OVERRIDE
;
687 EXPORT_SYMBOL(boot_option_idle_override
);
689 static void (*x86_idle
)(void);
692 static inline void play_dead(void)
698 void arch_cpu_idle_enter(void)
700 tsc_verify_tsc_adjust(false);
704 void arch_cpu_idle_dead(void)
710 * Called from the generic idle code.
712 void arch_cpu_idle(void)
718 * We use this if we don't have any better idle routine..
720 void __cpuidle
default_idle(void)
724 #if defined(CONFIG_APM_MODULE) || defined(CONFIG_HALTPOLL_CPUIDLE_MODULE)
725 EXPORT_SYMBOL(default_idle
);
729 bool xen_set_default_idle(void)
731 bool ret
= !!x86_idle
;
733 x86_idle
= default_idle
;
739 void stop_this_cpu(void *dummy
)
745 set_cpu_online(smp_processor_id(), false);
746 disable_local_APIC();
747 mcheck_cpu_clear(this_cpu_ptr(&cpu_info
));
750 * Use wbinvd on processors that support SME. This provides support
751 * for performing a successful kexec when going from SME inactive
752 * to SME active (or vice-versa). The cache must be cleared so that
753 * if there are entries with the same physical address, both with and
754 * without the encryption bit, they don't race each other when flushed
755 * and potentially end up with the wrong entry being committed to
758 if (boot_cpu_has(X86_FEATURE_SME
))
762 * Use native_halt() so that memory contents don't change
763 * (stack usage and variables) after possibly issuing the
764 * native_wbinvd() above.
771 * AMD Erratum 400 aware idle routine. We handle it the same way as C3 power
772 * states (local apic timer and TSC stop).
774 * XXX this function is completely buggered vs RCU and tracing.
776 static void amd_e400_idle(void)
779 * We cannot use static_cpu_has_bug() here because X86_BUG_AMD_APIC_C1E
780 * gets set after static_cpu_has() places have been converted via
783 if (!boot_cpu_has_bug(X86_BUG_AMD_APIC_C1E
)) {
788 tick_broadcast_enter();
793 * The switch back from broadcast mode needs to be called with
794 * interrupts disabled.
796 raw_local_irq_disable();
797 tick_broadcast_exit();
798 raw_local_irq_enable();
802 * Intel Core2 and older machines prefer MWAIT over HALT for C1.
803 * We can't rely on cpuidle installing MWAIT, because it will not load
804 * on systems that support only C1 -- so the boot default must be MWAIT.
806 * Some AMD machines are the opposite, they depend on using HALT.
808 * So for default C1, which is used during boot until cpuidle loads,
809 * use MWAIT-C1 on Intel HW that has it, else use HALT.
811 static int prefer_mwait_c1_over_halt(const struct cpuinfo_x86
*c
)
813 if (c
->x86_vendor
!= X86_VENDOR_INTEL
)
816 if (!cpu_has(c
, X86_FEATURE_MWAIT
) || boot_cpu_has_bug(X86_BUG_MONITOR
))
823 * MONITOR/MWAIT with no hints, used for default C1 state. This invokes MWAIT
824 * with interrupts enabled and no flags, which is backwards compatible with the
825 * original MWAIT implementation.
827 static __cpuidle
void mwait_idle(void)
829 if (!current_set_polling_and_test()) {
830 if (this_cpu_has(X86_BUG_CLFLUSH_MONITOR
)) {
832 clflush((void *)¤t_thread_info()->flags
);
836 __monitor((void *)¤t_thread_info()->flags
, 0, 0);
840 raw_local_irq_enable();
842 raw_local_irq_enable();
844 __current_clr_polling();
847 void select_idle_routine(const struct cpuinfo_x86
*c
)
850 if (boot_option_idle_override
== IDLE_POLL
&& smp_num_siblings
> 1)
851 pr_warn_once("WARNING: polling idle and HT enabled, performance may degrade\n");
853 if (x86_idle
|| boot_option_idle_override
== IDLE_POLL
)
856 if (boot_cpu_has_bug(X86_BUG_AMD_E400
)) {
857 pr_info("using AMD E400 aware idle routine\n");
858 x86_idle
= amd_e400_idle
;
859 } else if (prefer_mwait_c1_over_halt(c
)) {
860 pr_info("using mwait in idle threads\n");
861 x86_idle
= mwait_idle
;
863 x86_idle
= default_idle
;
866 void amd_e400_c1e_apic_setup(void)
868 if (boot_cpu_has_bug(X86_BUG_AMD_APIC_C1E
)) {
869 pr_info("Switch to broadcast mode on CPU%d\n", smp_processor_id());
871 tick_broadcast_force();
876 void __init
arch_post_acpi_subsys_init(void)
880 if (!boot_cpu_has_bug(X86_BUG_AMD_E400
))
884 * AMD E400 detection needs to happen after ACPI has been enabled. If
885 * the machine is affected K8_INTP_C1E_ACTIVE_MASK bits are set in
886 * MSR_K8_INT_PENDING_MSG.
888 rdmsr(MSR_K8_INT_PENDING_MSG
, lo
, hi
);
889 if (!(lo
& K8_INTP_C1E_ACTIVE_MASK
))
892 boot_cpu_set_bug(X86_BUG_AMD_APIC_C1E
);
894 if (!boot_cpu_has(X86_FEATURE_NONSTOP_TSC
))
895 mark_tsc_unstable("TSC halt in AMD C1E");
896 pr_info("System has AMD C1E enabled\n");
899 static int __init
idle_setup(char *str
)
904 if (!strcmp(str
, "poll")) {
905 pr_info("using polling idle threads\n");
906 boot_option_idle_override
= IDLE_POLL
;
907 cpu_idle_poll_ctrl(true);
908 } else if (!strcmp(str
, "halt")) {
910 * When the boot option of idle=halt is added, halt is
911 * forced to be used for CPU idle. In such case CPU C2/C3
912 * won't be used again.
913 * To continue to load the CPU idle driver, don't touch
914 * the boot_option_idle_override.
916 x86_idle
= default_idle
;
917 boot_option_idle_override
= IDLE_HALT
;
918 } else if (!strcmp(str
, "nomwait")) {
920 * If the boot option of "idle=nomwait" is added,
921 * it means that mwait will be disabled for CPU C2/C3
922 * states. In such case it won't touch the variable
923 * of boot_option_idle_override.
925 boot_option_idle_override
= IDLE_NOMWAIT
;
931 early_param("idle", idle_setup
);
933 unsigned long arch_align_stack(unsigned long sp
)
935 if (!(current
->personality
& ADDR_NO_RANDOMIZE
) && randomize_va_space
)
936 sp
-= get_random_int() % 8192;
940 unsigned long arch_randomize_brk(struct mm_struct
*mm
)
942 return randomize_page(mm
->brk
, 0x02000000);
946 * Called from fs/proc with a reference on @p to find the function
947 * which called into schedule(). This needs to be done carefully
948 * because the task might wake up and we might look at a stack
951 unsigned long get_wchan(struct task_struct
*p
)
953 unsigned long start
, bottom
, top
, sp
, fp
, ip
, ret
= 0;
956 if (p
== current
|| task_is_running(p
))
959 if (!try_get_task_stack(p
))
962 start
= (unsigned long)task_stack_page(p
);
967 * Layout of the stack page:
969 * ----------- topmax = start + THREAD_SIZE - sizeof(unsigned long)
971 * ----------- top = topmax - TOP_OF_KERNEL_STACK_PADDING
973 * ----------- bottom = start
975 * The tasks stack pointer points at the location where the
976 * framepointer is stored. The data on the stack is:
977 * ... IP FP ... IP FP
979 * We need to read FP and IP, so we need to adjust the upper
980 * bound by another unsigned long.
982 top
= start
+ THREAD_SIZE
- TOP_OF_KERNEL_STACK_PADDING
;
983 top
-= 2 * sizeof(unsigned long);
986 sp
= READ_ONCE(p
->thread
.sp
);
987 if (sp
< bottom
|| sp
> top
)
990 fp
= READ_ONCE_NOCHECK(((struct inactive_task_frame
*)sp
)->bp
);
992 if (fp
< bottom
|| fp
> top
)
994 ip
= READ_ONCE_NOCHECK(*(unsigned long *)(fp
+ sizeof(unsigned long)));
995 if (!in_sched_functions(ip
)) {
999 fp
= READ_ONCE_NOCHECK(*(unsigned long *)fp
);
1000 } while (count
++ < 16 && !task_is_running(p
));
1007 long do_arch_prctl_common(struct task_struct
*task
, int option
,
1011 case ARCH_GET_CPUID
:
1012 return get_cpuid_mode();
1013 case ARCH_SET_CPUID
:
1014 return set_cpuid_mode(task
, arg2
);
1015 case ARCH_GET_XCOMP_SUPP
:
1016 case ARCH_GET_XCOMP_PERM
:
1017 case ARCH_REQ_XCOMP_PERM
:
1018 return fpu_xstate_prctl(task
, option
, arg2
);