]>
Commit | Line | Data |
---|---|---|
bbad9379 | 1 | /* |
a57eb940 | 2 | * Read-Copy Update mechanism for mutual exclusion, the Bloatwatch edition |
bbad9379 | 3 | * Internal non-public definitions that provide either classic |
a57eb940 | 4 | * or preemptible semantics. |
bbad9379 PM |
5 | * |
6 | * This program is free software; you can redistribute it and/or modify | |
7 | * it under the terms of the GNU General Public License as published by | |
8 | * the Free Software Foundation; either version 2 of the License, or | |
9 | * (at your option) any later version. | |
10 | * | |
11 | * This program is distributed in the hope that it will be useful, | |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
14 | * GNU General Public License for more details. | |
15 | * | |
16 | * You should have received a copy of the GNU General Public License | |
17 | * along with this program; if not, write to the Free Software | |
18 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | |
19 | * | |
a57eb940 | 20 | * Copyright (c) 2010 Linaro |
bbad9379 PM |
21 | * |
22 | * Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com> | |
23 | */ | |
24 | ||
b2c0710c | 25 | #include <linux/kthread.h> |
bdfa97bf | 26 | #include <linux/module.h> |
9e571a82 PM |
27 | #include <linux/debugfs.h> |
28 | #include <linux/seq_file.h> | |
29 | ||
24278d14 PM |
30 | /* Global control variables for rcupdate callback mechanism. */ |
31 | struct rcu_ctrlblk { | |
32 | struct rcu_head *rcucblist; /* List of pending callbacks (CBs). */ | |
33 | struct rcu_head **donetail; /* ->next pointer of last "done" CB. */ | |
34 | struct rcu_head **curtail; /* ->next pointer of last CB. */ | |
9e571a82 | 35 | RCU_TRACE(long qlen); /* Number of pending CBs. */ |
e99033c5 | 36 | RCU_TRACE(char *name); /* Name of RCU type. */ |
24278d14 PM |
37 | }; |
38 | ||
39 | /* Definition for rcupdate control block. */ | |
40 | static struct rcu_ctrlblk rcu_sched_ctrlblk = { | |
41 | .donetail = &rcu_sched_ctrlblk.rcucblist, | |
42 | .curtail = &rcu_sched_ctrlblk.rcucblist, | |
e99033c5 | 43 | RCU_TRACE(.name = "rcu_sched") |
24278d14 PM |
44 | }; |
45 | ||
46 | static struct rcu_ctrlblk rcu_bh_ctrlblk = { | |
47 | .donetail = &rcu_bh_ctrlblk.rcucblist, | |
48 | .curtail = &rcu_bh_ctrlblk.rcucblist, | |
e99033c5 | 49 | RCU_TRACE(.name = "rcu_bh") |
24278d14 PM |
50 | }; |
51 | ||
52 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | |
53 | int rcu_scheduler_active __read_mostly; | |
54 | EXPORT_SYMBOL_GPL(rcu_scheduler_active); | |
55 | #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ | |
56 | ||
a57eb940 PM |
57 | #ifdef CONFIG_TINY_PREEMPT_RCU |
58 | ||
59 | #include <linux/delay.h> | |
60 | ||
a57eb940 PM |
61 | /* Global control variables for preemptible RCU. */ |
62 | struct rcu_preempt_ctrlblk { | |
63 | struct rcu_ctrlblk rcb; /* curtail: ->next ptr of last CB for GP. */ | |
64 | struct rcu_head **nexttail; | |
65 | /* Tasks blocked in a preemptible RCU */ | |
66 | /* read-side critical section while an */ | |
67 | /* preemptible-RCU grace period is in */ | |
68 | /* progress must wait for a later grace */ | |
69 | /* period. This pointer points to the */ | |
70 | /* ->next pointer of the last task that */ | |
71 | /* must wait for a later grace period, or */ | |
72 | /* to &->rcb.rcucblist if there is no */ | |
73 | /* such task. */ | |
74 | struct list_head blkd_tasks; | |
75 | /* Tasks blocked in RCU read-side critical */ | |
76 | /* section. Tasks are placed at the head */ | |
77 | /* of this list and age towards the tail. */ | |
78 | struct list_head *gp_tasks; | |
79 | /* Pointer to the first task blocking the */ | |
80 | /* current grace period, or NULL if there */ | |
24278d14 | 81 | /* is no such task. */ |
a57eb940 PM |
82 | struct list_head *exp_tasks; |
83 | /* Pointer to first task blocking the */ | |
84 | /* current expedited grace period, or NULL */ | |
85 | /* if there is no such task. If there */ | |
86 | /* is no current expedited grace period, */ | |
87 | /* then there cannot be any such task. */ | |
24278d14 PM |
88 | #ifdef CONFIG_RCU_BOOST |
89 | struct list_head *boost_tasks; | |
90 | /* Pointer to first task that needs to be */ | |
91 | /* priority-boosted, or NULL if no priority */ | |
92 | /* boosting is needed. If there is no */ | |
93 | /* current or expedited grace period, there */ | |
94 | /* can be no such task. */ | |
95 | #endif /* #ifdef CONFIG_RCU_BOOST */ | |
a57eb940 PM |
96 | u8 gpnum; /* Current grace period. */ |
97 | u8 gpcpu; /* Last grace period blocked by the CPU. */ | |
98 | u8 completed; /* Last grace period completed. */ | |
99 | /* If all three are equal, RCU is idle. */ | |
9e571a82 | 100 | #ifdef CONFIG_RCU_BOOST |
24278d14 | 101 | unsigned long boost_time; /* When to start boosting (jiffies) */ |
9e571a82 PM |
102 | #endif /* #ifdef CONFIG_RCU_BOOST */ |
103 | #ifdef CONFIG_RCU_TRACE | |
104 | unsigned long n_grace_periods; | |
105 | #ifdef CONFIG_RCU_BOOST | |
106 | unsigned long n_tasks_boosted; | |
7e8b4c72 | 107 | /* Total number of tasks boosted. */ |
9e571a82 | 108 | unsigned long n_exp_boosts; |
7e8b4c72 | 109 | /* Number of tasks boosted for expedited GP. */ |
9e571a82 | 110 | unsigned long n_normal_boosts; |
7e8b4c72 PM |
111 | /* Number of tasks boosted for normal GP. */ |
112 | unsigned long n_balk_blkd_tasks; | |
113 | /* Refused to boost: no blocked tasks. */ | |
114 | unsigned long n_balk_exp_gp_tasks; | |
115 | /* Refused to boost: nothing blocking GP. */ | |
116 | unsigned long n_balk_boost_tasks; | |
117 | /* Refused to boost: already boosting. */ | |
118 | unsigned long n_balk_notyet; | |
119 | /* Refused to boost: not yet time. */ | |
120 | unsigned long n_balk_nos; | |
121 | /* Refused to boost: not sure why, though. */ | |
122 | /* This can happen due to race conditions. */ | |
9e571a82 PM |
123 | #endif /* #ifdef CONFIG_RCU_BOOST */ |
124 | #endif /* #ifdef CONFIG_RCU_TRACE */ | |
a57eb940 PM |
125 | }; |
126 | ||
127 | static struct rcu_preempt_ctrlblk rcu_preempt_ctrlblk = { | |
128 | .rcb.donetail = &rcu_preempt_ctrlblk.rcb.rcucblist, | |
129 | .rcb.curtail = &rcu_preempt_ctrlblk.rcb.rcucblist, | |
130 | .nexttail = &rcu_preempt_ctrlblk.rcb.rcucblist, | |
131 | .blkd_tasks = LIST_HEAD_INIT(rcu_preempt_ctrlblk.blkd_tasks), | |
e99033c5 | 132 | RCU_TRACE(.rcb.name = "rcu_preempt") |
a57eb940 PM |
133 | }; |
134 | ||
26861faf | 135 | static void rcu_read_unlock_special(struct task_struct *t); |
a57eb940 PM |
136 | static int rcu_preempted_readers_exp(void); |
137 | static void rcu_report_exp_done(void); | |
138 | ||
139 | /* | |
140 | * Return true if the CPU has not yet responded to the current grace period. | |
141 | */ | |
dd7c4d89 | 142 | static int rcu_cpu_blocking_cur_gp(void) |
a57eb940 PM |
143 | { |
144 | return rcu_preempt_ctrlblk.gpcpu != rcu_preempt_ctrlblk.gpnum; | |
145 | } | |
146 | ||
147 | /* | |
148 | * Check for a running RCU reader. Because there is only one CPU, | |
149 | * there can be but one running RCU reader at a time. ;-) | |
26861faf PM |
150 | * |
151 | * Returns zero if there are no running readers. Returns a positive | |
152 | * number if there is at least one reader within its RCU read-side | |
153 | * critical section. Returns a negative number if an outermost reader | |
154 | * is in the midst of exiting from its RCU read-side critical section | |
155 | * | |
156 | * Returns zero if there are no running readers. Returns a positive | |
157 | * number if there is at least one reader within its RCU read-side | |
158 | * critical section. Returns a negative number if an outermost reader | |
159 | * is in the midst of exiting from its RCU read-side critical section. | |
a57eb940 PM |
160 | */ |
161 | static int rcu_preempt_running_reader(void) | |
162 | { | |
163 | return current->rcu_read_lock_nesting; | |
164 | } | |
165 | ||
166 | /* | |
167 | * Check for preempted RCU readers blocking any grace period. | |
168 | * If the caller needs a reliable answer, it must disable hard irqs. | |
169 | */ | |
170 | static int rcu_preempt_blocked_readers_any(void) | |
171 | { | |
172 | return !list_empty(&rcu_preempt_ctrlblk.blkd_tasks); | |
173 | } | |
174 | ||
175 | /* | |
176 | * Check for preempted RCU readers blocking the current grace period. | |
177 | * If the caller needs a reliable answer, it must disable hard irqs. | |
178 | */ | |
179 | static int rcu_preempt_blocked_readers_cgp(void) | |
180 | { | |
181 | return rcu_preempt_ctrlblk.gp_tasks != NULL; | |
182 | } | |
183 | ||
184 | /* | |
185 | * Return true if another preemptible-RCU grace period is needed. | |
186 | */ | |
187 | static int rcu_preempt_needs_another_gp(void) | |
188 | { | |
189 | return *rcu_preempt_ctrlblk.rcb.curtail != NULL; | |
190 | } | |
191 | ||
192 | /* | |
193 | * Return true if a preemptible-RCU grace period is in progress. | |
194 | * The caller must disable hardirqs. | |
195 | */ | |
196 | static int rcu_preempt_gp_in_progress(void) | |
197 | { | |
198 | return rcu_preempt_ctrlblk.completed != rcu_preempt_ctrlblk.gpnum; | |
199 | } | |
200 | ||
24278d14 PM |
201 | /* |
202 | * Advance a ->blkd_tasks-list pointer to the next entry, instead | |
203 | * returning NULL if at the end of the list. | |
204 | */ | |
205 | static struct list_head *rcu_next_node_entry(struct task_struct *t) | |
206 | { | |
207 | struct list_head *np; | |
208 | ||
209 | np = t->rcu_node_entry.next; | |
210 | if (np == &rcu_preempt_ctrlblk.blkd_tasks) | |
211 | np = NULL; | |
212 | return np; | |
213 | } | |
214 | ||
9e571a82 PM |
215 | #ifdef CONFIG_RCU_TRACE |
216 | ||
217 | #ifdef CONFIG_RCU_BOOST | |
218 | static void rcu_initiate_boost_trace(void); | |
9e571a82 PM |
219 | #endif /* #ifdef CONFIG_RCU_BOOST */ |
220 | ||
221 | /* | |
222 | * Dump additional statistice for TINY_PREEMPT_RCU. | |
223 | */ | |
224 | static void show_tiny_preempt_stats(struct seq_file *m) | |
225 | { | |
226 | seq_printf(m, "rcu_preempt: qlen=%ld gp=%lu g%u/p%u/c%u tasks=%c%c%c\n", | |
227 | rcu_preempt_ctrlblk.rcb.qlen, | |
228 | rcu_preempt_ctrlblk.n_grace_periods, | |
229 | rcu_preempt_ctrlblk.gpnum, | |
230 | rcu_preempt_ctrlblk.gpcpu, | |
231 | rcu_preempt_ctrlblk.completed, | |
232 | "T."[list_empty(&rcu_preempt_ctrlblk.blkd_tasks)], | |
233 | "N."[!rcu_preempt_ctrlblk.gp_tasks], | |
234 | "E."[!rcu_preempt_ctrlblk.exp_tasks]); | |
235 | #ifdef CONFIG_RCU_BOOST | |
203373c8 PM |
236 | seq_printf(m, "%sttb=%c ntb=%lu neb=%lu nnb=%lu j=%04x bt=%04x\n", |
237 | " ", | |
238 | "B."[!rcu_preempt_ctrlblk.boost_tasks], | |
9e571a82 PM |
239 | rcu_preempt_ctrlblk.n_tasks_boosted, |
240 | rcu_preempt_ctrlblk.n_exp_boosts, | |
241 | rcu_preempt_ctrlblk.n_normal_boosts, | |
242 | (int)(jiffies & 0xffff), | |
243 | (int)(rcu_preempt_ctrlblk.boost_time & 0xffff)); | |
7e8b4c72 PM |
244 | seq_printf(m, "%s: nt=%lu egt=%lu bt=%lu ny=%lu nos=%lu\n", |
245 | " balk", | |
246 | rcu_preempt_ctrlblk.n_balk_blkd_tasks, | |
247 | rcu_preempt_ctrlblk.n_balk_exp_gp_tasks, | |
248 | rcu_preempt_ctrlblk.n_balk_boost_tasks, | |
249 | rcu_preempt_ctrlblk.n_balk_notyet, | |
250 | rcu_preempt_ctrlblk.n_balk_nos); | |
9e571a82 PM |
251 | #endif /* #ifdef CONFIG_RCU_BOOST */ |
252 | } | |
253 | ||
254 | #endif /* #ifdef CONFIG_RCU_TRACE */ | |
255 | ||
24278d14 PM |
256 | #ifdef CONFIG_RCU_BOOST |
257 | ||
258 | #include "rtmutex_common.h" | |
259 | ||
965a002b PM |
260 | #define RCU_BOOST_PRIO CONFIG_RCU_BOOST_PRIO |
261 | ||
262 | /* Controls for rcu_kthread() kthread. */ | |
263 | static struct task_struct *rcu_kthread_task; | |
264 | static DECLARE_WAIT_QUEUE_HEAD(rcu_kthread_wq); | |
265 | static unsigned long have_rcu_kthread_work; | |
266 | ||
24278d14 PM |
267 | /* |
268 | * Carry out RCU priority boosting on the task indicated by ->boost_tasks, | |
269 | * and advance ->boost_tasks to the next task in the ->blkd_tasks list. | |
270 | */ | |
271 | static int rcu_boost(void) | |
272 | { | |
273 | unsigned long flags; | |
274 | struct rt_mutex mtx; | |
24278d14 | 275 | struct task_struct *t; |
7e8b4c72 | 276 | struct list_head *tb; |
24278d14 | 277 | |
7e8b4c72 PM |
278 | if (rcu_preempt_ctrlblk.boost_tasks == NULL && |
279 | rcu_preempt_ctrlblk.exp_tasks == NULL) | |
24278d14 | 280 | return 0; /* Nothing to boost. */ |
7e8b4c72 | 281 | |
24278d14 | 282 | raw_local_irq_save(flags); |
7e8b4c72 PM |
283 | |
284 | /* | |
285 | * Recheck with irqs disabled: all tasks in need of boosting | |
286 | * might exit their RCU read-side critical sections on their own | |
287 | * if we are preempted just before disabling irqs. | |
288 | */ | |
289 | if (rcu_preempt_ctrlblk.boost_tasks == NULL && | |
290 | rcu_preempt_ctrlblk.exp_tasks == NULL) { | |
291 | raw_local_irq_restore(flags); | |
292 | return 0; | |
293 | } | |
294 | ||
295 | /* | |
296 | * Preferentially boost tasks blocking expedited grace periods. | |
297 | * This cannot starve the normal grace periods because a second | |
298 | * expedited grace period must boost all blocked tasks, including | |
299 | * those blocking the pre-existing normal grace period. | |
300 | */ | |
301 | if (rcu_preempt_ctrlblk.exp_tasks != NULL) { | |
302 | tb = rcu_preempt_ctrlblk.exp_tasks; | |
303 | RCU_TRACE(rcu_preempt_ctrlblk.n_exp_boosts++); | |
304 | } else { | |
305 | tb = rcu_preempt_ctrlblk.boost_tasks; | |
306 | RCU_TRACE(rcu_preempt_ctrlblk.n_normal_boosts++); | |
307 | } | |
308 | RCU_TRACE(rcu_preempt_ctrlblk.n_tasks_boosted++); | |
309 | ||
310 | /* | |
311 | * We boost task t by manufacturing an rt_mutex that appears to | |
312 | * be held by task t. We leave a pointer to that rt_mutex where | |
313 | * task t can find it, and task t will release the mutex when it | |
314 | * exits its outermost RCU read-side critical section. Then | |
315 | * simply acquiring this artificial rt_mutex will boost task | |
316 | * t's priority. (Thanks to tglx for suggesting this approach!) | |
317 | */ | |
318 | t = container_of(tb, struct task_struct, rcu_node_entry); | |
24278d14 PM |
319 | rt_mutex_init_proxy_locked(&mtx, t); |
320 | t->rcu_boost_mutex = &mtx; | |
321 | t->rcu_read_unlock_special |= RCU_READ_UNLOCK_BOOSTED; | |
322 | raw_local_irq_restore(flags); | |
323 | rt_mutex_lock(&mtx); | |
7e8b4c72 PM |
324 | rt_mutex_unlock(&mtx); /* Keep lockdep happy. */ |
325 | ||
4f89b336 PM |
326 | return ACCESS_ONCE(rcu_preempt_ctrlblk.boost_tasks) != NULL || |
327 | ACCESS_ONCE(rcu_preempt_ctrlblk.exp_tasks) != NULL; | |
24278d14 PM |
328 | } |
329 | ||
330 | /* | |
331 | * Check to see if it is now time to start boosting RCU readers blocking | |
332 | * the current grace period, and, if so, tell the rcu_kthread_task to | |
333 | * start boosting them. If there is an expedited boost in progress, | |
334 | * we wait for it to complete. | |
9e571a82 PM |
335 | * |
336 | * If there are no blocked readers blocking the current grace period, | |
337 | * return 0 to let the caller know, otherwise return 1. Note that this | |
338 | * return value is independent of whether or not boosting was done. | |
24278d14 | 339 | */ |
9e571a82 | 340 | static int rcu_initiate_boost(void) |
24278d14 | 341 | { |
7e8b4c72 PM |
342 | if (!rcu_preempt_blocked_readers_cgp() && |
343 | rcu_preempt_ctrlblk.exp_tasks == NULL) { | |
344 | RCU_TRACE(rcu_preempt_ctrlblk.n_balk_exp_gp_tasks++); | |
9e571a82 PM |
345 | return 0; |
346 | } | |
7e8b4c72 PM |
347 | if (rcu_preempt_ctrlblk.exp_tasks != NULL || |
348 | (rcu_preempt_ctrlblk.gp_tasks != NULL && | |
349 | rcu_preempt_ctrlblk.boost_tasks == NULL && | |
350 | ULONG_CMP_GE(jiffies, rcu_preempt_ctrlblk.boost_time))) { | |
351 | if (rcu_preempt_ctrlblk.exp_tasks == NULL) | |
352 | rcu_preempt_ctrlblk.boost_tasks = | |
353 | rcu_preempt_ctrlblk.gp_tasks; | |
965a002b | 354 | invoke_rcu_callbacks(); |
9e571a82 PM |
355 | } else |
356 | RCU_TRACE(rcu_initiate_boost_trace()); | |
357 | return 1; | |
24278d14 PM |
358 | } |
359 | ||
ddeb7581 | 360 | #define RCU_BOOST_DELAY_JIFFIES DIV_ROUND_UP(CONFIG_RCU_BOOST_DELAY * HZ, 1000) |
24278d14 PM |
361 | |
362 | /* | |
363 | * Do priority-boost accounting for the start of a new grace period. | |
364 | */ | |
365 | static void rcu_preempt_boost_start_gp(void) | |
366 | { | |
367 | rcu_preempt_ctrlblk.boost_time = jiffies + RCU_BOOST_DELAY_JIFFIES; | |
24278d14 PM |
368 | } |
369 | ||
370 | #else /* #ifdef CONFIG_RCU_BOOST */ | |
371 | ||
24278d14 | 372 | /* |
9e571a82 PM |
373 | * If there is no RCU priority boosting, we don't initiate boosting, |
374 | * but we do indicate whether there are blocked readers blocking the | |
375 | * current grace period. | |
24278d14 | 376 | */ |
9e571a82 | 377 | static int rcu_initiate_boost(void) |
24278d14 | 378 | { |
9e571a82 | 379 | return rcu_preempt_blocked_readers_cgp(); |
24278d14 PM |
380 | } |
381 | ||
24278d14 PM |
382 | /* |
383 | * If there is no RCU priority boosting, nothing to do at grace-period start. | |
384 | */ | |
385 | static void rcu_preempt_boost_start_gp(void) | |
386 | { | |
387 | } | |
388 | ||
389 | #endif /* else #ifdef CONFIG_RCU_BOOST */ | |
390 | ||
a57eb940 PM |
391 | /* |
392 | * Record a preemptible-RCU quiescent state for the specified CPU. Note | |
393 | * that this just means that the task currently running on the CPU is | |
394 | * in a quiescent state. There might be any number of tasks blocked | |
395 | * while in an RCU read-side critical section. | |
396 | * | |
397 | * Unlike the other rcu_*_qs() functions, callers to this function | |
398 | * must disable irqs in order to protect the assignment to | |
399 | * ->rcu_read_unlock_special. | |
400 | * | |
401 | * Because this is a single-CPU implementation, the only way a grace | |
402 | * period can end is if the CPU is in a quiescent state. The reason is | |
403 | * that a blocked preemptible-RCU reader can exit its critical section | |
404 | * only if the CPU is running it at the time. Therefore, when the | |
405 | * last task blocking the current grace period exits its RCU read-side | |
406 | * critical section, neither the CPU nor blocked tasks will be stopping | |
407 | * the current grace period. (In contrast, SMP implementations | |
408 | * might have CPUs running in RCU read-side critical sections that | |
409 | * block later grace periods -- but this is not possible given only | |
410 | * one CPU.) | |
411 | */ | |
412 | static void rcu_preempt_cpu_qs(void) | |
413 | { | |
414 | /* Record both CPU and task as having responded to current GP. */ | |
415 | rcu_preempt_ctrlblk.gpcpu = rcu_preempt_ctrlblk.gpnum; | |
416 | current->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_NEED_QS; | |
417 | ||
24278d14 | 418 | /* If there is no GP then there is nothing more to do. */ |
9e571a82 | 419 | if (!rcu_preempt_gp_in_progress()) |
a57eb940 | 420 | return; |
9e571a82 | 421 | /* |
ddeb7581 | 422 | * Check up on boosting. If there are readers blocking the |
9e571a82 PM |
423 | * current grace period, leave. |
424 | */ | |
425 | if (rcu_initiate_boost()) | |
24278d14 | 426 | return; |
a57eb940 PM |
427 | |
428 | /* Advance callbacks. */ | |
429 | rcu_preempt_ctrlblk.completed = rcu_preempt_ctrlblk.gpnum; | |
430 | rcu_preempt_ctrlblk.rcb.donetail = rcu_preempt_ctrlblk.rcb.curtail; | |
431 | rcu_preempt_ctrlblk.rcb.curtail = rcu_preempt_ctrlblk.nexttail; | |
432 | ||
433 | /* If there are no blocked readers, next GP is done instantly. */ | |
434 | if (!rcu_preempt_blocked_readers_any()) | |
435 | rcu_preempt_ctrlblk.rcb.donetail = rcu_preempt_ctrlblk.nexttail; | |
436 | ||
b2c0710c | 437 | /* If there are done callbacks, cause them to be invoked. */ |
a57eb940 | 438 | if (*rcu_preempt_ctrlblk.rcb.donetail != NULL) |
965a002b | 439 | invoke_rcu_callbacks(); |
a57eb940 PM |
440 | } |
441 | ||
442 | /* | |
443 | * Start a new RCU grace period if warranted. Hard irqs must be disabled. | |
444 | */ | |
445 | static void rcu_preempt_start_gp(void) | |
446 | { | |
447 | if (!rcu_preempt_gp_in_progress() && rcu_preempt_needs_another_gp()) { | |
448 | ||
449 | /* Official start of GP. */ | |
450 | rcu_preempt_ctrlblk.gpnum++; | |
9e571a82 | 451 | RCU_TRACE(rcu_preempt_ctrlblk.n_grace_periods++); |
a57eb940 PM |
452 | |
453 | /* Any blocked RCU readers block new GP. */ | |
454 | if (rcu_preempt_blocked_readers_any()) | |
455 | rcu_preempt_ctrlblk.gp_tasks = | |
456 | rcu_preempt_ctrlblk.blkd_tasks.next; | |
457 | ||
24278d14 PM |
458 | /* Set up for RCU priority boosting. */ |
459 | rcu_preempt_boost_start_gp(); | |
460 | ||
a57eb940 PM |
461 | /* If there is no running reader, CPU is done with GP. */ |
462 | if (!rcu_preempt_running_reader()) | |
463 | rcu_preempt_cpu_qs(); | |
464 | } | |
465 | } | |
466 | ||
467 | /* | |
468 | * We have entered the scheduler, and the current task might soon be | |
469 | * context-switched away from. If this task is in an RCU read-side | |
470 | * critical section, we will no longer be able to rely on the CPU to | |
471 | * record that fact, so we enqueue the task on the blkd_tasks list. | |
472 | * If the task started after the current grace period began, as recorded | |
473 | * by ->gpcpu, we enqueue at the beginning of the list. Otherwise | |
474 | * before the element referenced by ->gp_tasks (or at the tail if | |
475 | * ->gp_tasks is NULL) and point ->gp_tasks at the newly added element. | |
476 | * The task will dequeue itself when it exits the outermost enclosing | |
477 | * RCU read-side critical section. Therefore, the current grace period | |
478 | * cannot be permitted to complete until the ->gp_tasks pointer becomes | |
479 | * NULL. | |
480 | * | |
481 | * Caller must disable preemption. | |
482 | */ | |
483 | void rcu_preempt_note_context_switch(void) | |
484 | { | |
485 | struct task_struct *t = current; | |
486 | unsigned long flags; | |
487 | ||
488 | local_irq_save(flags); /* must exclude scheduler_tick(). */ | |
26861faf | 489 | if (rcu_preempt_running_reader() > 0 && |
a57eb940 PM |
490 | (t->rcu_read_unlock_special & RCU_READ_UNLOCK_BLOCKED) == 0) { |
491 | ||
492 | /* Possibly blocking in an RCU read-side critical section. */ | |
493 | t->rcu_read_unlock_special |= RCU_READ_UNLOCK_BLOCKED; | |
494 | ||
495 | /* | |
496 | * If this CPU has already checked in, then this task | |
497 | * will hold up the next grace period rather than the | |
498 | * current grace period. Queue the task accordingly. | |
499 | * If the task is queued for the current grace period | |
500 | * (i.e., this CPU has not yet passed through a quiescent | |
501 | * state for the current grace period), then as long | |
502 | * as that task remains queued, the current grace period | |
503 | * cannot end. | |
504 | */ | |
505 | list_add(&t->rcu_node_entry, &rcu_preempt_ctrlblk.blkd_tasks); | |
dd7c4d89 | 506 | if (rcu_cpu_blocking_cur_gp()) |
a57eb940 | 507 | rcu_preempt_ctrlblk.gp_tasks = &t->rcu_node_entry; |
26861faf PM |
508 | } else if (rcu_preempt_running_reader() < 0 && |
509 | t->rcu_read_unlock_special) { | |
510 | /* | |
511 | * Complete exit from RCU read-side critical section on | |
512 | * behalf of preempted instance of __rcu_read_unlock(). | |
513 | */ | |
514 | rcu_read_unlock_special(t); | |
a57eb940 PM |
515 | } |
516 | ||
517 | /* | |
518 | * Either we were not in an RCU read-side critical section to | |
519 | * begin with, or we have now recorded that critical section | |
520 | * globally. Either way, we can now note a quiescent state | |
521 | * for this CPU. Again, if we were in an RCU read-side critical | |
522 | * section, and if that critical section was blocking the current | |
523 | * grace period, then the fact that the task has been enqueued | |
524 | * means that current grace period continues to be blocked. | |
525 | */ | |
526 | rcu_preempt_cpu_qs(); | |
527 | local_irq_restore(flags); | |
528 | } | |
529 | ||
530 | /* | |
531 | * Tiny-preemptible RCU implementation for rcu_read_lock(). | |
532 | * Just increment ->rcu_read_lock_nesting, shared state will be updated | |
533 | * if we block. | |
534 | */ | |
535 | void __rcu_read_lock(void) | |
536 | { | |
537 | current->rcu_read_lock_nesting++; | |
538 | barrier(); /* needed if we ever invoke rcu_read_lock in rcutiny.c */ | |
539 | } | |
540 | EXPORT_SYMBOL_GPL(__rcu_read_lock); | |
541 | ||
542 | /* | |
543 | * Handle special cases during rcu_read_unlock(), such as needing to | |
544 | * notify RCU core processing or task having blocked during the RCU | |
545 | * read-side critical section. | |
546 | */ | |
afef2054 | 547 | static noinline void rcu_read_unlock_special(struct task_struct *t) |
a57eb940 PM |
548 | { |
549 | int empty; | |
550 | int empty_exp; | |
551 | unsigned long flags; | |
552 | struct list_head *np; | |
553 | int special; | |
554 | ||
555 | /* | |
556 | * NMI handlers cannot block and cannot safely manipulate state. | |
557 | * They therefore cannot possibly be special, so just leave. | |
558 | */ | |
559 | if (in_nmi()) | |
560 | return; | |
561 | ||
562 | local_irq_save(flags); | |
563 | ||
564 | /* | |
565 | * If RCU core is waiting for this CPU to exit critical section, | |
566 | * let it know that we have done so. | |
567 | */ | |
568 | special = t->rcu_read_unlock_special; | |
569 | if (special & RCU_READ_UNLOCK_NEED_QS) | |
570 | rcu_preempt_cpu_qs(); | |
571 | ||
572 | /* Hardware IRQ handlers cannot block. */ | |
573 | if (in_irq()) { | |
574 | local_irq_restore(flags); | |
575 | return; | |
576 | } | |
577 | ||
578 | /* Clean up if blocked during RCU read-side critical section. */ | |
579 | if (special & RCU_READ_UNLOCK_BLOCKED) { | |
580 | t->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_BLOCKED; | |
581 | ||
582 | /* | |
583 | * Remove this task from the ->blkd_tasks list and adjust | |
584 | * any pointers that might have been referencing it. | |
585 | */ | |
586 | empty = !rcu_preempt_blocked_readers_cgp(); | |
587 | empty_exp = rcu_preempt_ctrlblk.exp_tasks == NULL; | |
24278d14 | 588 | np = rcu_next_node_entry(t); |
ddeb7581 | 589 | list_del_init(&t->rcu_node_entry); |
a57eb940 PM |
590 | if (&t->rcu_node_entry == rcu_preempt_ctrlblk.gp_tasks) |
591 | rcu_preempt_ctrlblk.gp_tasks = np; | |
592 | if (&t->rcu_node_entry == rcu_preempt_ctrlblk.exp_tasks) | |
593 | rcu_preempt_ctrlblk.exp_tasks = np; | |
24278d14 PM |
594 | #ifdef CONFIG_RCU_BOOST |
595 | if (&t->rcu_node_entry == rcu_preempt_ctrlblk.boost_tasks) | |
596 | rcu_preempt_ctrlblk.boost_tasks = np; | |
597 | #endif /* #ifdef CONFIG_RCU_BOOST */ | |
a57eb940 PM |
598 | |
599 | /* | |
600 | * If this was the last task on the current list, and if | |
601 | * we aren't waiting on the CPU, report the quiescent state | |
602 | * and start a new grace period if needed. | |
603 | */ | |
604 | if (!empty && !rcu_preempt_blocked_readers_cgp()) { | |
605 | rcu_preempt_cpu_qs(); | |
606 | rcu_preempt_start_gp(); | |
607 | } | |
608 | ||
609 | /* | |
610 | * If this was the last task on the expedited lists, | |
611 | * then we need wake up the waiting task. | |
612 | */ | |
613 | if (!empty_exp && rcu_preempt_ctrlblk.exp_tasks == NULL) | |
614 | rcu_report_exp_done(); | |
615 | } | |
24278d14 PM |
616 | #ifdef CONFIG_RCU_BOOST |
617 | /* Unboost self if was boosted. */ | |
618 | if (special & RCU_READ_UNLOCK_BOOSTED) { | |
619 | t->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_BOOSTED; | |
620 | rt_mutex_unlock(t->rcu_boost_mutex); | |
621 | t->rcu_boost_mutex = NULL; | |
622 | } | |
623 | #endif /* #ifdef CONFIG_RCU_BOOST */ | |
a57eb940 PM |
624 | local_irq_restore(flags); |
625 | } | |
626 | ||
627 | /* | |
628 | * Tiny-preemptible RCU implementation for rcu_read_unlock(). | |
629 | * Decrement ->rcu_read_lock_nesting. If the result is zero (outermost | |
630 | * rcu_read_unlock()) and ->rcu_read_unlock_special is non-zero, then | |
631 | * invoke rcu_read_unlock_special() to clean up after a context switch | |
632 | * in an RCU read-side critical section and other special cases. | |
633 | */ | |
634 | void __rcu_read_unlock(void) | |
635 | { | |
636 | struct task_struct *t = current; | |
637 | ||
638 | barrier(); /* needed if we ever invoke rcu_read_unlock in rcutiny.c */ | |
26861faf PM |
639 | if (t->rcu_read_lock_nesting != 1) |
640 | --t->rcu_read_lock_nesting; | |
641 | else { | |
642 | t->rcu_read_lock_nesting = INT_MIN; | |
643 | barrier(); /* assign before ->rcu_read_unlock_special load */ | |
644 | if (unlikely(ACCESS_ONCE(t->rcu_read_unlock_special))) | |
645 | rcu_read_unlock_special(t); | |
646 | barrier(); /* ->rcu_read_unlock_special load before assign */ | |
647 | t->rcu_read_lock_nesting = 0; | |
648 | } | |
a57eb940 | 649 | #ifdef CONFIG_PROVE_LOCKING |
26861faf PM |
650 | { |
651 | int rrln = ACCESS_ONCE(t->rcu_read_lock_nesting); | |
652 | ||
653 | WARN_ON_ONCE(rrln < 0 && rrln > INT_MIN / 2); | |
654 | } | |
a57eb940 PM |
655 | #endif /* #ifdef CONFIG_PROVE_LOCKING */ |
656 | } | |
657 | EXPORT_SYMBOL_GPL(__rcu_read_unlock); | |
658 | ||
659 | /* | |
660 | * Check for a quiescent state from the current CPU. When a task blocks, | |
661 | * the task is recorded in the rcu_preempt_ctrlblk structure, which is | |
662 | * checked elsewhere. This is called from the scheduling-clock interrupt. | |
663 | * | |
664 | * Caller must disable hard irqs. | |
665 | */ | |
666 | static void rcu_preempt_check_callbacks(void) | |
667 | { | |
668 | struct task_struct *t = current; | |
669 | ||
dd7c4d89 PM |
670 | if (rcu_preempt_gp_in_progress() && |
671 | (!rcu_preempt_running_reader() || | |
672 | !rcu_cpu_blocking_cur_gp())) | |
a57eb940 PM |
673 | rcu_preempt_cpu_qs(); |
674 | if (&rcu_preempt_ctrlblk.rcb.rcucblist != | |
675 | rcu_preempt_ctrlblk.rcb.donetail) | |
965a002b | 676 | invoke_rcu_callbacks(); |
dd7c4d89 PM |
677 | if (rcu_preempt_gp_in_progress() && |
678 | rcu_cpu_blocking_cur_gp() && | |
26861faf | 679 | rcu_preempt_running_reader() > 0) |
a57eb940 PM |
680 | t->rcu_read_unlock_special |= RCU_READ_UNLOCK_NEED_QS; |
681 | } | |
682 | ||
683 | /* | |
684 | * TINY_PREEMPT_RCU has an extra callback-list tail pointer to | |
b2c0710c | 685 | * update, so this is invoked from rcu_process_callbacks() to |
a57eb940 PM |
686 | * handle that case. Of course, it is invoked for all flavors of |
687 | * RCU, but RCU callbacks can appear only on one of the lists, and | |
688 | * neither ->nexttail nor ->donetail can possibly be NULL, so there | |
689 | * is no need for an explicit check. | |
690 | */ | |
691 | static void rcu_preempt_remove_callbacks(struct rcu_ctrlblk *rcp) | |
692 | { | |
693 | if (rcu_preempt_ctrlblk.nexttail == rcp->donetail) | |
694 | rcu_preempt_ctrlblk.nexttail = &rcp->rcucblist; | |
695 | } | |
696 | ||
697 | /* | |
698 | * Process callbacks for preemptible RCU. | |
699 | */ | |
700 | static void rcu_preempt_process_callbacks(void) | |
701 | { | |
965a002b | 702 | __rcu_process_callbacks(&rcu_preempt_ctrlblk.rcb); |
a57eb940 PM |
703 | } |
704 | ||
705 | /* | |
706 | * Queue a preemptible -RCU callback for invocation after a grace period. | |
707 | */ | |
708 | void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu)) | |
709 | { | |
710 | unsigned long flags; | |
711 | ||
712 | debug_rcu_head_queue(head); | |
713 | head->func = func; | |
714 | head->next = NULL; | |
715 | ||
716 | local_irq_save(flags); | |
717 | *rcu_preempt_ctrlblk.nexttail = head; | |
718 | rcu_preempt_ctrlblk.nexttail = &head->next; | |
9e571a82 | 719 | RCU_TRACE(rcu_preempt_ctrlblk.rcb.qlen++); |
a57eb940 PM |
720 | rcu_preempt_start_gp(); /* checks to see if GP needed. */ |
721 | local_irq_restore(flags); | |
722 | } | |
723 | EXPORT_SYMBOL_GPL(call_rcu); | |
724 | ||
a57eb940 PM |
725 | /* |
726 | * synchronize_rcu - wait until a grace period has elapsed. | |
727 | * | |
728 | * Control will return to the caller some time after a full grace | |
729 | * period has elapsed, in other words after all currently executing RCU | |
730 | * read-side critical sections have completed. RCU read-side critical | |
731 | * sections are delimited by rcu_read_lock() and rcu_read_unlock(), | |
732 | * and may be nested. | |
733 | */ | |
734 | void synchronize_rcu(void) | |
735 | { | |
fe15d706 PM |
736 | rcu_lockdep_assert(!lock_is_held(&rcu_bh_lock_map) && |
737 | !lock_is_held(&rcu_lock_map) && | |
738 | !lock_is_held(&rcu_sched_lock_map), | |
739 | "Illegal synchronize_rcu() in RCU read-side critical section"); | |
740 | ||
a57eb940 PM |
741 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
742 | if (!rcu_scheduler_active) | |
743 | return; | |
744 | #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ | |
745 | ||
746 | WARN_ON_ONCE(rcu_preempt_running_reader()); | |
747 | if (!rcu_preempt_blocked_readers_any()) | |
748 | return; | |
749 | ||
750 | /* Once we get past the fastpath checks, same code as rcu_barrier(). */ | |
751 | rcu_barrier(); | |
752 | } | |
753 | EXPORT_SYMBOL_GPL(synchronize_rcu); | |
754 | ||
755 | static DECLARE_WAIT_QUEUE_HEAD(sync_rcu_preempt_exp_wq); | |
756 | static unsigned long sync_rcu_preempt_exp_count; | |
757 | static DEFINE_MUTEX(sync_rcu_preempt_exp_mutex); | |
758 | ||
759 | /* | |
760 | * Return non-zero if there are any tasks in RCU read-side critical | |
761 | * sections blocking the current preemptible-RCU expedited grace period. | |
762 | * If there is no preemptible-RCU expedited grace period currently in | |
763 | * progress, returns zero unconditionally. | |
764 | */ | |
765 | static int rcu_preempted_readers_exp(void) | |
766 | { | |
767 | return rcu_preempt_ctrlblk.exp_tasks != NULL; | |
768 | } | |
769 | ||
770 | /* | |
771 | * Report the exit from RCU read-side critical section for the last task | |
772 | * that queued itself during or before the current expedited preemptible-RCU | |
773 | * grace period. | |
774 | */ | |
775 | static void rcu_report_exp_done(void) | |
776 | { | |
777 | wake_up(&sync_rcu_preempt_exp_wq); | |
778 | } | |
779 | ||
780 | /* | |
781 | * Wait for an rcu-preempt grace period, but expedite it. The basic idea | |
782 | * is to rely in the fact that there is but one CPU, and that it is | |
783 | * illegal for a task to invoke synchronize_rcu_expedited() while in a | |
784 | * preemptible-RCU read-side critical section. Therefore, any such | |
785 | * critical sections must correspond to blocked tasks, which must therefore | |
786 | * be on the ->blkd_tasks list. So just record the current head of the | |
787 | * list in the ->exp_tasks pointer, and wait for all tasks including and | |
788 | * after the task pointed to by ->exp_tasks to drain. | |
789 | */ | |
790 | void synchronize_rcu_expedited(void) | |
791 | { | |
792 | unsigned long flags; | |
793 | struct rcu_preempt_ctrlblk *rpcp = &rcu_preempt_ctrlblk; | |
794 | unsigned long snap; | |
795 | ||
796 | barrier(); /* ensure prior action seen before grace period. */ | |
797 | ||
798 | WARN_ON_ONCE(rcu_preempt_running_reader()); | |
799 | ||
800 | /* | |
801 | * Acquire lock so that there is only one preemptible RCU grace | |
802 | * period in flight. Of course, if someone does the expedited | |
803 | * grace period for us while we are acquiring the lock, just leave. | |
804 | */ | |
805 | snap = sync_rcu_preempt_exp_count + 1; | |
806 | mutex_lock(&sync_rcu_preempt_exp_mutex); | |
807 | if (ULONG_CMP_LT(snap, sync_rcu_preempt_exp_count)) | |
808 | goto unlock_mb_ret; /* Others did our work for us. */ | |
809 | ||
810 | local_irq_save(flags); | |
811 | ||
812 | /* | |
813 | * All RCU readers have to already be on blkd_tasks because | |
814 | * we cannot legally be executing in an RCU read-side critical | |
815 | * section. | |
816 | */ | |
817 | ||
818 | /* Snapshot current head of ->blkd_tasks list. */ | |
819 | rpcp->exp_tasks = rpcp->blkd_tasks.next; | |
820 | if (rpcp->exp_tasks == &rpcp->blkd_tasks) | |
821 | rpcp->exp_tasks = NULL; | |
a57eb940 PM |
822 | |
823 | /* Wait for tail of ->blkd_tasks list to drain. */ | |
7e8b4c72 PM |
824 | if (!rcu_preempted_readers_exp()) |
825 | local_irq_restore(flags); | |
826 | else { | |
827 | rcu_initiate_boost(); | |
828 | local_irq_restore(flags); | |
a57eb940 PM |
829 | wait_event(sync_rcu_preempt_exp_wq, |
830 | !rcu_preempted_readers_exp()); | |
7e8b4c72 | 831 | } |
a57eb940 PM |
832 | |
833 | /* Clean up and exit. */ | |
834 | barrier(); /* ensure expedited GP seen before counter increment. */ | |
835 | sync_rcu_preempt_exp_count++; | |
836 | unlock_mb_ret: | |
837 | mutex_unlock(&sync_rcu_preempt_exp_mutex); | |
838 | barrier(); /* ensure subsequent action seen after grace period. */ | |
839 | } | |
840 | EXPORT_SYMBOL_GPL(synchronize_rcu_expedited); | |
841 | ||
842 | /* | |
843 | * Does preemptible RCU need the CPU to stay out of dynticks mode? | |
844 | */ | |
845 | int rcu_preempt_needs_cpu(void) | |
846 | { | |
847 | if (!rcu_preempt_running_reader()) | |
848 | rcu_preempt_cpu_qs(); | |
849 | return rcu_preempt_ctrlblk.rcb.rcucblist != NULL; | |
850 | } | |
851 | ||
852 | /* | |
853 | * Check for a task exiting while in a preemptible -RCU read-side | |
854 | * critical section, clean up if so. No need to issue warnings, | |
855 | * as debug_check_no_locks_held() already does this if lockdep | |
856 | * is enabled. | |
857 | */ | |
858 | void exit_rcu(void) | |
859 | { | |
860 | struct task_struct *t = current; | |
861 | ||
862 | if (t->rcu_read_lock_nesting == 0) | |
863 | return; | |
864 | t->rcu_read_lock_nesting = 1; | |
ba74f4d7 | 865 | __rcu_read_unlock(); |
a57eb940 PM |
866 | } |
867 | ||
868 | #else /* #ifdef CONFIG_TINY_PREEMPT_RCU */ | |
869 | ||
9e571a82 PM |
870 | #ifdef CONFIG_RCU_TRACE |
871 | ||
872 | /* | |
873 | * Because preemptible RCU does not exist, it is not necessary to | |
874 | * dump out its statistics. | |
875 | */ | |
876 | static void show_tiny_preempt_stats(struct seq_file *m) | |
877 | { | |
878 | } | |
879 | ||
880 | #endif /* #ifdef CONFIG_RCU_TRACE */ | |
881 | ||
a57eb940 PM |
882 | /* |
883 | * Because preemptible RCU does not exist, it never has any callbacks | |
884 | * to check. | |
885 | */ | |
886 | static void rcu_preempt_check_callbacks(void) | |
887 | { | |
888 | } | |
889 | ||
890 | /* | |
891 | * Because preemptible RCU does not exist, it never has any callbacks | |
892 | * to remove. | |
893 | */ | |
894 | static void rcu_preempt_remove_callbacks(struct rcu_ctrlblk *rcp) | |
895 | { | |
896 | } | |
897 | ||
898 | /* | |
899 | * Because preemptible RCU does not exist, it never has any callbacks | |
900 | * to process. | |
901 | */ | |
902 | static void rcu_preempt_process_callbacks(void) | |
903 | { | |
904 | } | |
905 | ||
906 | #endif /* #else #ifdef CONFIG_TINY_PREEMPT_RCU */ | |
907 | ||
965a002b PM |
908 | #ifdef CONFIG_RCU_BOOST |
909 | ||
910 | /* | |
911 | * Wake up rcu_kthread() to process callbacks now eligible for invocation | |
912 | * or to boost readers. | |
913 | */ | |
914 | static void invoke_rcu_callbacks(void) | |
915 | { | |
916 | have_rcu_kthread_work = 1; | |
768dfffd PM |
917 | if (rcu_kthread_task != NULL) |
918 | wake_up(&rcu_kthread_wq); | |
965a002b PM |
919 | } |
920 | ||
4968c300 PM |
921 | #ifdef CONFIG_RCU_TRACE |
922 | ||
923 | /* | |
924 | * Is the current CPU running the RCU-callbacks kthread? | |
925 | * Caller must have preemption disabled. | |
926 | */ | |
927 | static bool rcu_is_callbacks_kthread(void) | |
928 | { | |
929 | return rcu_kthread_task == current; | |
930 | } | |
931 | ||
932 | #endif /* #ifdef CONFIG_RCU_TRACE */ | |
933 | ||
965a002b PM |
934 | /* |
935 | * This kthread invokes RCU callbacks whose grace periods have | |
936 | * elapsed. It is awakened as needed, and takes the place of the | |
937 | * RCU_SOFTIRQ that is used for this purpose when boosting is disabled. | |
938 | * This is a kthread, but it is never stopped, at least not until | |
939 | * the system goes down. | |
940 | */ | |
941 | static int rcu_kthread(void *arg) | |
942 | { | |
943 | unsigned long work; | |
944 | unsigned long morework; | |
945 | unsigned long flags; | |
946 | ||
947 | for (;;) { | |
948 | wait_event_interruptible(rcu_kthread_wq, | |
949 | have_rcu_kthread_work != 0); | |
950 | morework = rcu_boost(); | |
951 | local_irq_save(flags); | |
952 | work = have_rcu_kthread_work; | |
953 | have_rcu_kthread_work = morework; | |
954 | local_irq_restore(flags); | |
955 | if (work) | |
956 | rcu_process_callbacks(NULL); | |
957 | schedule_timeout_interruptible(1); /* Leave CPU for others. */ | |
958 | } | |
959 | ||
960 | return 0; /* Not reached, but needed to shut gcc up. */ | |
961 | } | |
962 | ||
963 | /* | |
964 | * Spawn the kthread that invokes RCU callbacks. | |
965 | */ | |
966 | static int __init rcu_spawn_kthreads(void) | |
967 | { | |
968 | struct sched_param sp; | |
969 | ||
970 | rcu_kthread_task = kthread_run(rcu_kthread, NULL, "rcu_kthread"); | |
971 | sp.sched_priority = RCU_BOOST_PRIO; | |
972 | sched_setscheduler_nocheck(rcu_kthread_task, SCHED_FIFO, &sp); | |
973 | return 0; | |
974 | } | |
975 | early_initcall(rcu_spawn_kthreads); | |
976 | ||
977 | #else /* #ifdef CONFIG_RCU_BOOST */ | |
978 | ||
768dfffd PM |
979 | /* Hold off callback invocation until early_initcall() time. */ |
980 | static int rcu_scheduler_fully_active __read_mostly; | |
981 | ||
965a002b PM |
982 | /* |
983 | * Start up softirq processing of callbacks. | |
984 | */ | |
985 | void invoke_rcu_callbacks(void) | |
986 | { | |
768dfffd PM |
987 | if (rcu_scheduler_fully_active) |
988 | raise_softirq(RCU_SOFTIRQ); | |
965a002b PM |
989 | } |
990 | ||
4968c300 PM |
991 | #ifdef CONFIG_RCU_TRACE |
992 | ||
993 | /* | |
994 | * There is no callback kthread, so this thread is never it. | |
995 | */ | |
996 | static bool rcu_is_callbacks_kthread(void) | |
997 | { | |
998 | return false; | |
999 | } | |
1000 | ||
1001 | #endif /* #ifdef CONFIG_RCU_TRACE */ | |
1002 | ||
768dfffd | 1003 | static int __init rcu_scheduler_really_started(void) |
965a002b | 1004 | { |
768dfffd | 1005 | rcu_scheduler_fully_active = 1; |
965a002b | 1006 | open_softirq(RCU_SOFTIRQ, rcu_process_callbacks); |
768dfffd PM |
1007 | raise_softirq(RCU_SOFTIRQ); /* Invoke any callbacks from early boot. */ |
1008 | return 0; | |
965a002b | 1009 | } |
768dfffd | 1010 | early_initcall(rcu_scheduler_really_started); |
965a002b PM |
1011 | |
1012 | #endif /* #else #ifdef CONFIG_RCU_BOOST */ | |
1013 | ||
bbad9379 | 1014 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
bbad9379 PM |
1015 | #include <linux/kernel_stat.h> |
1016 | ||
1017 | /* | |
1018 | * During boot, we forgive RCU lockdep issues. After this function is | |
1019 | * invoked, we start taking RCU lockdep issues seriously. | |
1020 | */ | |
b2c0710c | 1021 | void __init rcu_scheduler_starting(void) |
bbad9379 PM |
1022 | { |
1023 | WARN_ON(nr_context_switches() > 0); | |
1024 | rcu_scheduler_active = 1; | |
1025 | } | |
1026 | ||
1027 | #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ | |
24278d14 | 1028 | |
9e571a82 PM |
1029 | #ifdef CONFIG_RCU_TRACE |
1030 | ||
1031 | #ifdef CONFIG_RCU_BOOST | |
1032 | ||
1033 | static void rcu_initiate_boost_trace(void) | |
1034 | { | |
7e8b4c72 PM |
1035 | if (list_empty(&rcu_preempt_ctrlblk.blkd_tasks)) |
1036 | rcu_preempt_ctrlblk.n_balk_blkd_tasks++; | |
1037 | else if (rcu_preempt_ctrlblk.gp_tasks == NULL && | |
1038 | rcu_preempt_ctrlblk.exp_tasks == NULL) | |
1039 | rcu_preempt_ctrlblk.n_balk_exp_gp_tasks++; | |
9e571a82 | 1040 | else if (rcu_preempt_ctrlblk.boost_tasks != NULL) |
7e8b4c72 | 1041 | rcu_preempt_ctrlblk.n_balk_boost_tasks++; |
9e571a82 | 1042 | else if (!ULONG_CMP_GE(jiffies, rcu_preempt_ctrlblk.boost_time)) |
7e8b4c72 | 1043 | rcu_preempt_ctrlblk.n_balk_notyet++; |
9e571a82 | 1044 | else |
7e8b4c72 | 1045 | rcu_preempt_ctrlblk.n_balk_nos++; |
9e571a82 PM |
1046 | } |
1047 | ||
1048 | #endif /* #ifdef CONFIG_RCU_BOOST */ | |
1049 | ||
1050 | static void rcu_trace_sub_qlen(struct rcu_ctrlblk *rcp, int n) | |
1051 | { | |
1052 | unsigned long flags; | |
1053 | ||
1054 | raw_local_irq_save(flags); | |
1055 | rcp->qlen -= n; | |
1056 | raw_local_irq_restore(flags); | |
1057 | } | |
1058 | ||
1059 | /* | |
1060 | * Dump statistics for TINY_RCU, such as they are. | |
1061 | */ | |
1062 | static int show_tiny_stats(struct seq_file *m, void *unused) | |
1063 | { | |
1064 | show_tiny_preempt_stats(m); | |
1065 | seq_printf(m, "rcu_sched: qlen: %ld\n", rcu_sched_ctrlblk.qlen); | |
1066 | seq_printf(m, "rcu_bh: qlen: %ld\n", rcu_bh_ctrlblk.qlen); | |
1067 | return 0; | |
1068 | } | |
1069 | ||
1070 | static int show_tiny_stats_open(struct inode *inode, struct file *file) | |
1071 | { | |
1072 | return single_open(file, show_tiny_stats, NULL); | |
1073 | } | |
1074 | ||
1075 | static const struct file_operations show_tiny_stats_fops = { | |
1076 | .owner = THIS_MODULE, | |
1077 | .open = show_tiny_stats_open, | |
1078 | .read = seq_read, | |
1079 | .llseek = seq_lseek, | |
1080 | .release = single_release, | |
1081 | }; | |
1082 | ||
1083 | static struct dentry *rcudir; | |
1084 | ||
1085 | static int __init rcutiny_trace_init(void) | |
1086 | { | |
1087 | struct dentry *retval; | |
1088 | ||
1089 | rcudir = debugfs_create_dir("rcu", NULL); | |
1090 | if (!rcudir) | |
1091 | goto free_out; | |
1092 | retval = debugfs_create_file("rcudata", 0444, rcudir, | |
1093 | NULL, &show_tiny_stats_fops); | |
1094 | if (!retval) | |
1095 | goto free_out; | |
1096 | return 0; | |
1097 | free_out: | |
1098 | debugfs_remove_recursive(rcudir); | |
1099 | return 1; | |
1100 | } | |
1101 | ||
1102 | static void __exit rcutiny_trace_cleanup(void) | |
1103 | { | |
1104 | debugfs_remove_recursive(rcudir); | |
1105 | } | |
1106 | ||
1107 | module_init(rcutiny_trace_init); | |
1108 | module_exit(rcutiny_trace_cleanup); | |
1109 | ||
1110 | MODULE_AUTHOR("Paul E. McKenney"); | |
1111 | MODULE_DESCRIPTION("Read-Copy Update tracing for tiny implementation"); | |
1112 | MODULE_LICENSE("GPL"); | |
1113 | ||
1114 | #endif /* #ifdef CONFIG_RCU_TRACE */ |