]>
Commit | Line | Data |
---|---|---|
c2daa3be PZ |
1 | #ifndef __ASM_PREEMPT_H |
2 | #define __ASM_PREEMPT_H | |
3 | ||
4 | #include <asm/rmwcc.h> | |
5 | #include <asm/percpu.h> | |
6 | #include <linux/thread_info.h> | |
7 | ||
8 | DECLARE_PER_CPU(int, __preempt_count); | |
9 | ||
ba1f14fb PZ |
10 | /* |
11 | * We use the PREEMPT_NEED_RESCHED bit as an inverted NEED_RESCHED such | |
12 | * that a decrement hitting 0 means we can and should reschedule. | |
13 | */ | |
14 | #define PREEMPT_ENABLED (0 + PREEMPT_NEED_RESCHED) | |
15 | ||
c2daa3be PZ |
16 | /* |
17 | * We mask the PREEMPT_NEED_RESCHED bit so as not to confuse all current users | |
18 | * that think a non-zero value indicates we cannot preempt. | |
19 | */ | |
20 | static __always_inline int preempt_count(void) | |
21 | { | |
b3ca1c10 | 22 | return raw_cpu_read_4(__preempt_count) & ~PREEMPT_NEED_RESCHED; |
c2daa3be PZ |
23 | } |
24 | ||
25 | static __always_inline void preempt_count_set(int pc) | |
26 | { | |
b3ca1c10 | 27 | raw_cpu_write_4(__preempt_count, pc); |
c2daa3be PZ |
28 | } |
29 | ||
30 | /* | |
31 | * must be macros to avoid header recursion hell | |
32 | */ | |
c2daa3be PZ |
33 | #define init_task_preempt_count(p) do { \ |
34 | task_thread_info(p)->saved_preempt_count = PREEMPT_DISABLED; \ | |
35 | } while (0) | |
36 | ||
37 | #define init_idle_preempt_count(p, cpu) do { \ | |
38 | task_thread_info(p)->saved_preempt_count = PREEMPT_ENABLED; \ | |
39 | per_cpu(__preempt_count, (cpu)) = PREEMPT_ENABLED; \ | |
40 | } while (0) | |
41 | ||
42 | /* | |
43 | * We fold the NEED_RESCHED bit into the preempt count such that | |
44 | * preempt_enable() can decrement and test for needing to reschedule with a | |
45 | * single instruction. | |
46 | * | |
47 | * We invert the actual bit, so that when the decrement hits 0 we know we both | |
48 | * need to resched (the bit is cleared) and can resched (no preempt count). | |
49 | */ | |
50 | ||
51 | static __always_inline void set_preempt_need_resched(void) | |
52 | { | |
b3ca1c10 | 53 | raw_cpu_and_4(__preempt_count, ~PREEMPT_NEED_RESCHED); |
c2daa3be PZ |
54 | } |
55 | ||
56 | static __always_inline void clear_preempt_need_resched(void) | |
57 | { | |
b3ca1c10 | 58 | raw_cpu_or_4(__preempt_count, PREEMPT_NEED_RESCHED); |
c2daa3be PZ |
59 | } |
60 | ||
61 | static __always_inline bool test_preempt_need_resched(void) | |
62 | { | |
b3ca1c10 | 63 | return !(raw_cpu_read_4(__preempt_count) & PREEMPT_NEED_RESCHED); |
c2daa3be PZ |
64 | } |
65 | ||
66 | /* | |
67 | * The various preempt_count add/sub methods | |
68 | */ | |
69 | ||
70 | static __always_inline void __preempt_count_add(int val) | |
71 | { | |
b3ca1c10 | 72 | raw_cpu_add_4(__preempt_count, val); |
c2daa3be PZ |
73 | } |
74 | ||
75 | static __always_inline void __preempt_count_sub(int val) | |
76 | { | |
b3ca1c10 | 77 | raw_cpu_add_4(__preempt_count, -val); |
c2daa3be PZ |
78 | } |
79 | ||
ba1f14fb PZ |
80 | /* |
81 | * Because we keep PREEMPT_NEED_RESCHED set when we do _not_ need to reschedule | |
82 | * a decrement which hits zero means we have no preempt_count and should | |
83 | * reschedule. | |
84 | */ | |
c2daa3be PZ |
85 | static __always_inline bool __preempt_count_dec_and_test(void) |
86 | { | |
87 | GEN_UNARY_RMWcc("decl", __preempt_count, __percpu_arg(0), "e"); | |
88 | } | |
89 | ||
c2daa3be PZ |
90 | /* |
91 | * Returns true when we need to resched and can (barring IRQ state). | |
92 | */ | |
93 | static __always_inline bool should_resched(void) | |
94 | { | |
b3ca1c10 | 95 | return unlikely(!raw_cpu_read_4(__preempt_count)); |
c2daa3be PZ |
96 | } |
97 | ||
1a338ac3 PZ |
98 | #ifdef CONFIG_PREEMPT |
99 | extern asmlinkage void ___preempt_schedule(void); | |
100 | # define __preempt_schedule() asm ("call ___preempt_schedule") | |
101 | extern asmlinkage void preempt_schedule(void); | |
4eaca0a8 FW |
102 | extern asmlinkage void ___preempt_schedule_notrace(void); |
103 | # define __preempt_schedule_notrace() asm ("call ___preempt_schedule_notrace") | |
104 | extern asmlinkage void preempt_schedule_notrace(void); | |
1a338ac3 PZ |
105 | #endif |
106 | ||
c2daa3be | 107 | #endif /* __ASM_PREEMPT_H */ |