]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Context tracking: Probe on high level context boundaries such as kernel | |
3 | * and userspace. This includes syscalls and exceptions entry/exit. | |
4 | * | |
5 | * This is used by RCU to remove its dependency on the timer tick while a CPU | |
6 | * runs in userspace. | |
7 | * | |
8 | * Started by Frederic Weisbecker: | |
9 | * | |
10 | * Copyright (C) 2012 Red Hat, Inc., Frederic Weisbecker <fweisbec@redhat.com> | |
11 | * | |
12 | * Many thanks to Gilad Ben-Yossef, Paul McKenney, Ingo Molnar, Andrew Morton, | |
13 | * Steven Rostedt, Peter Zijlstra for suggestions and improvements. | |
14 | * | |
15 | */ | |
16 | ||
17 | #include <linux/context_tracking.h> | |
18 | #include <linux/rcupdate.h> | |
19 | #include <linux/sched.h> | |
20 | #include <linux/hardirq.h> | |
21 | #include <linux/export.h> | |
22 | ||
23 | struct static_key context_tracking_enabled = STATIC_KEY_INIT_FALSE; | |
24 | EXPORT_SYMBOL_GPL(context_tracking_enabled); | |
25 | ||
26 | DEFINE_PER_CPU(struct context_tracking, context_tracking); | |
27 | EXPORT_SYMBOL_GPL(context_tracking); | |
28 | ||
29 | void context_tracking_cpu_set(int cpu) | |
30 | { | |
31 | if (!per_cpu(context_tracking.active, cpu)) { | |
32 | per_cpu(context_tracking.active, cpu) = true; | |
33 | static_key_slow_inc(&context_tracking_enabled); | |
34 | } | |
35 | } | |
36 | ||
37 | /** | |
38 | * context_tracking_user_enter - Inform the context tracking that the CPU is going to | |
39 | * enter userspace mode. | |
40 | * | |
41 | * This function must be called right before we switch from the kernel | |
42 | * to userspace, when it's guaranteed the remaining kernel instructions | |
43 | * to execute won't use any RCU read side critical section because this | |
44 | * function sets RCU in extended quiescent state. | |
45 | */ | |
46 | void context_tracking_user_enter(void) | |
47 | { | |
48 | unsigned long flags; | |
49 | ||
50 | /* | |
51 | * Some contexts may involve an exception occuring in an irq, | |
52 | * leading to that nesting: | |
53 | * rcu_irq_enter() rcu_user_exit() rcu_user_exit() rcu_irq_exit() | |
54 | * This would mess up the dyntick_nesting count though. And rcu_irq_*() | |
55 | * helpers are enough to protect RCU uses inside the exception. So | |
56 | * just return immediately if we detect we are in an IRQ. | |
57 | */ | |
58 | if (in_interrupt()) | |
59 | return; | |
60 | ||
61 | /* Kernel threads aren't supposed to go to userspace */ | |
62 | WARN_ON_ONCE(!current->mm); | |
63 | ||
64 | local_irq_save(flags); | |
65 | if ( __this_cpu_read(context_tracking.state) != IN_USER) { | |
66 | if (__this_cpu_read(context_tracking.active)) { | |
67 | /* | |
68 | * At this stage, only low level arch entry code remains and | |
69 | * then we'll run in userspace. We can assume there won't be | |
70 | * any RCU read-side critical section until the next call to | |
71 | * user_exit() or rcu_irq_enter(). Let's remove RCU's dependency | |
72 | * on the tick. | |
73 | */ | |
74 | vtime_user_enter(current); | |
75 | rcu_user_enter(); | |
76 | } | |
77 | /* | |
78 | * Even if context tracking is disabled on this CPU, because it's outside | |
79 | * the full dynticks mask for example, we still have to keep track of the | |
80 | * context transitions and states to prevent inconsistency on those of | |
81 | * other CPUs. | |
82 | * If a task triggers an exception in userspace, sleep on the exception | |
83 | * handler and then migrate to another CPU, that new CPU must know where | |
84 | * the exception returns by the time we call exception_exit(). | |
85 | * This information can only be provided by the previous CPU when it called | |
86 | * exception_enter(). | |
87 | * OTOH we can spare the calls to vtime and RCU when context_tracking.active | |
88 | * is false because we know that CPU is not tickless. | |
89 | */ | |
90 | __this_cpu_write(context_tracking.state, IN_USER); | |
91 | } | |
92 | local_irq_restore(flags); | |
93 | } | |
94 | ||
95 | #ifdef CONFIG_PREEMPT | |
96 | /** | |
97 | * preempt_schedule_context - preempt_schedule called by tracing | |
98 | * | |
99 | * The tracing infrastructure uses preempt_enable_notrace to prevent | |
100 | * recursion and tracing preempt enabling caused by the tracing | |
101 | * infrastructure itself. But as tracing can happen in areas coming | |
102 | * from userspace or just about to enter userspace, a preempt enable | |
103 | * can occur before user_exit() is called. This will cause the scheduler | |
104 | * to be called when the system is still in usermode. | |
105 | * | |
106 | * To prevent this, the preempt_enable_notrace will use this function | |
107 | * instead of preempt_schedule() to exit user context if needed before | |
108 | * calling the scheduler. | |
109 | */ | |
110 | void __sched notrace preempt_schedule_context(void) | |
111 | { | |
112 | enum ctx_state prev_ctx; | |
113 | ||
114 | if (likely(!preemptible())) | |
115 | return; | |
116 | ||
117 | /* | |
118 | * Need to disable preemption in case user_exit() is traced | |
119 | * and the tracer calls preempt_enable_notrace() causing | |
120 | * an infinite recursion. | |
121 | */ | |
122 | preempt_disable_notrace(); | |
123 | prev_ctx = exception_enter(); | |
124 | preempt_enable_no_resched_notrace(); | |
125 | ||
126 | preempt_schedule(); | |
127 | ||
128 | preempt_disable_notrace(); | |
129 | exception_exit(prev_ctx); | |
130 | preempt_enable_notrace(); | |
131 | } | |
132 | EXPORT_SYMBOL_GPL(preempt_schedule_context); | |
133 | #endif /* CONFIG_PREEMPT */ | |
134 | ||
135 | /** | |
136 | * context_tracking_user_exit - Inform the context tracking that the CPU is | |
137 | * exiting userspace mode and entering the kernel. | |
138 | * | |
139 | * This function must be called after we entered the kernel from userspace | |
140 | * before any use of RCU read side critical section. This potentially include | |
141 | * any high level kernel code like syscalls, exceptions, signal handling, etc... | |
142 | * | |
143 | * This call supports re-entrancy. This way it can be called from any exception | |
144 | * handler without needing to know if we came from userspace or not. | |
145 | */ | |
146 | void context_tracking_user_exit(void) | |
147 | { | |
148 | unsigned long flags; | |
149 | ||
150 | if (in_interrupt()) | |
151 | return; | |
152 | ||
153 | local_irq_save(flags); | |
154 | if (__this_cpu_read(context_tracking.state) == IN_USER) { | |
155 | if (__this_cpu_read(context_tracking.active)) { | |
156 | /* | |
157 | * We are going to run code that may use RCU. Inform | |
158 | * RCU core about that (ie: we may need the tick again). | |
159 | */ | |
160 | rcu_user_exit(); | |
161 | vtime_user_exit(current); | |
162 | } | |
163 | __this_cpu_write(context_tracking.state, IN_KERNEL); | |
164 | } | |
165 | local_irq_restore(flags); | |
166 | } | |
167 | ||
168 | /** | |
169 | * __context_tracking_task_switch - context switch the syscall callbacks | |
170 | * @prev: the task that is being switched out | |
171 | * @next: the task that is being switched in | |
172 | * | |
173 | * The context tracking uses the syscall slow path to implement its user-kernel | |
174 | * boundaries probes on syscalls. This way it doesn't impact the syscall fast | |
175 | * path on CPUs that don't do context tracking. | |
176 | * | |
177 | * But we need to clear the flag on the previous task because it may later | |
178 | * migrate to some CPU that doesn't do the context tracking. As such the TIF | |
179 | * flag may not be desired there. | |
180 | */ | |
181 | void __context_tracking_task_switch(struct task_struct *prev, | |
182 | struct task_struct *next) | |
183 | { | |
184 | clear_tsk_thread_flag(prev, TIF_NOHZ); | |
185 | set_tsk_thread_flag(next, TIF_NOHZ); | |
186 | } | |
187 | ||
188 | #ifdef CONFIG_CONTEXT_TRACKING_FORCE | |
189 | void __init context_tracking_init(void) | |
190 | { | |
191 | int cpu; | |
192 | ||
193 | for_each_possible_cpu(cpu) | |
194 | context_tracking_cpu_set(cpu); | |
195 | } | |
196 | #endif |