]>
Commit | Line | Data |
---|---|---|
c2daa3be PZ |
1 | #ifndef __ASM_PREEMPT_H |
2 | #define __ASM_PREEMPT_H | |
3 | ||
4 | #include <asm/rmwcc.h> | |
5 | #include <asm/percpu.h> | |
6 | #include <linux/thread_info.h> | |
7 | ||
8 | DECLARE_PER_CPU(int, __preempt_count); | |
9 | ||
10 | /* | |
11 | * We mask the PREEMPT_NEED_RESCHED bit so as not to confuse all current users | |
12 | * that think a non-zero value indicates we cannot preempt. | |
13 | */ | |
14 | static __always_inline int preempt_count(void) | |
15 | { | |
16 | return __this_cpu_read_4(__preempt_count) & ~PREEMPT_NEED_RESCHED; | |
17 | } | |
18 | ||
19 | static __always_inline void preempt_count_set(int pc) | |
20 | { | |
21 | __this_cpu_write_4(__preempt_count, pc); | |
22 | } | |
23 | ||
24 | /* | |
25 | * must be macros to avoid header recursion hell | |
26 | */ | |
27 | #define task_preempt_count(p) \ | |
28 | (task_thread_info(p)->saved_preempt_count & ~PREEMPT_NEED_RESCHED) | |
29 | ||
30 | #define init_task_preempt_count(p) do { \ | |
31 | task_thread_info(p)->saved_preempt_count = PREEMPT_DISABLED; \ | |
32 | } while (0) | |
33 | ||
34 | #define init_idle_preempt_count(p, cpu) do { \ | |
35 | task_thread_info(p)->saved_preempt_count = PREEMPT_ENABLED; \ | |
36 | per_cpu(__preempt_count, (cpu)) = PREEMPT_ENABLED; \ | |
37 | } while (0) | |
38 | ||
39 | /* | |
40 | * We fold the NEED_RESCHED bit into the preempt count such that | |
41 | * preempt_enable() can decrement and test for needing to reschedule with a | |
42 | * single instruction. | |
43 | * | |
44 | * We invert the actual bit, so that when the decrement hits 0 we know we both | |
45 | * need to resched (the bit is cleared) and can resched (no preempt count). | |
46 | */ | |
47 | ||
48 | static __always_inline void set_preempt_need_resched(void) | |
49 | { | |
50 | __this_cpu_and_4(__preempt_count, ~PREEMPT_NEED_RESCHED); | |
51 | } | |
52 | ||
53 | static __always_inline void clear_preempt_need_resched(void) | |
54 | { | |
55 | __this_cpu_or_4(__preempt_count, PREEMPT_NEED_RESCHED); | |
56 | } | |
57 | ||
58 | static __always_inline bool test_preempt_need_resched(void) | |
59 | { | |
60 | return !(__this_cpu_read_4(__preempt_count) & PREEMPT_NEED_RESCHED); | |
61 | } | |
62 | ||
63 | /* | |
64 | * The various preempt_count add/sub methods | |
65 | */ | |
66 | ||
67 | static __always_inline void __preempt_count_add(int val) | |
68 | { | |
69 | __this_cpu_add_4(__preempt_count, val); | |
70 | } | |
71 | ||
72 | static __always_inline void __preempt_count_sub(int val) | |
73 | { | |
74 | __this_cpu_add_4(__preempt_count, -val); | |
75 | } | |
76 | ||
77 | static __always_inline bool __preempt_count_dec_and_test(void) | |
78 | { | |
79 | GEN_UNARY_RMWcc("decl", __preempt_count, __percpu_arg(0), "e"); | |
80 | } | |
81 | ||
82 | /* | |
83 | * Returns true when we need to resched -- even if we can not. | |
84 | */ | |
85 | static __always_inline bool need_resched(void) | |
86 | { | |
87 | return unlikely(test_preempt_need_resched()); | |
88 | } | |
89 | ||
90 | /* | |
91 | * Returns true when we need to resched and can (barring IRQ state). | |
92 | */ | |
93 | static __always_inline bool should_resched(void) | |
94 | { | |
95 | return unlikely(!__this_cpu_read_4(__preempt_count)); | |
96 | } | |
97 | ||
1a338ac3 PZ |
98 | #ifdef CONFIG_PREEMPT |
99 | extern asmlinkage void ___preempt_schedule(void); | |
100 | # define __preempt_schedule() asm ("call ___preempt_schedule") | |
101 | extern asmlinkage void preempt_schedule(void); | |
102 | # ifdef CONFIG_CONTEXT_TRACKING | |
103 | extern asmlinkage void ___preempt_schedule_context(void); | |
104 | # define __preempt_schedule_context() asm ("call ___preempt_schedule_context") | |
105 | # endif | |
106 | #endif | |
107 | ||
c2daa3be | 108 | #endif /* __ASM_PREEMPT_H */ |