]>
Commit | Line | Data |
---|---|---|
9b1d82fa PM |
1 | /* |
2 | * Read-Copy Update mechanism for mutual exclusion, the Bloatwatch edition. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or modify | |
5 | * it under the terms of the GNU General Public License as published by | |
6 | * the Free Software Foundation; either version 2 of the License, or | |
7 | * (at your option) any later version. | |
8 | * | |
9 | * This program is distributed in the hope that it will be useful, | |
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
12 | * GNU General Public License for more details. | |
13 | * | |
14 | * You should have received a copy of the GNU General Public License | |
15 | * along with this program; if not, write to the Free Software | |
16 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | |
17 | * | |
18 | * Copyright IBM Corporation, 2008 | |
19 | * | |
20 | * Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com> | |
21 | * | |
22 | * For detailed explanation of Read-Copy Update mechanism see - | |
4ce5b903 | 23 | * Documentation/RCU |
9b1d82fa | 24 | */ |
9b1d82fa | 25 | #include <linux/moduleparam.h> |
4ce5b903 IM |
26 | #include <linux/completion.h> |
27 | #include <linux/interrupt.h> | |
9b1d82fa | 28 | #include <linux/notifier.h> |
4ce5b903 IM |
29 | #include <linux/rcupdate.h> |
30 | #include <linux/kernel.h> | |
31 | #include <linux/module.h> | |
9b1d82fa | 32 | #include <linux/mutex.h> |
4ce5b903 IM |
33 | #include <linux/sched.h> |
34 | #include <linux/types.h> | |
35 | #include <linux/init.h> | |
9b1d82fa | 36 | #include <linux/time.h> |
4ce5b903 | 37 | #include <linux/cpu.h> |
9b1d82fa PM |
38 | |
39 | /* Global control variables for rcupdate callback mechanism. */ | |
40 | struct rcu_ctrlblk { | |
41 | struct rcu_head *rcucblist; /* List of pending callbacks (CBs). */ | |
42 | struct rcu_head **donetail; /* ->next pointer of last "done" CB. */ | |
43 | struct rcu_head **curtail; /* ->next pointer of last CB. */ | |
44 | }; | |
45 | ||
46 | /* Definition for rcupdate control block. */ | |
47 | static struct rcu_ctrlblk rcu_ctrlblk = { | |
4ce5b903 IM |
48 | .donetail = &rcu_ctrlblk.rcucblist, |
49 | .curtail = &rcu_ctrlblk.rcucblist, | |
9b1d82fa | 50 | }; |
4ce5b903 | 51 | |
9b1d82fa | 52 | static struct rcu_ctrlblk rcu_bh_ctrlblk = { |
4ce5b903 IM |
53 | .donetail = &rcu_bh_ctrlblk.rcucblist, |
54 | .curtail = &rcu_bh_ctrlblk.rcucblist, | |
9b1d82fa PM |
55 | }; |
56 | ||
57 | #ifdef CONFIG_NO_HZ | |
58 | ||
59 | static long rcu_dynticks_nesting = 1; | |
60 | ||
61 | /* | |
62 | * Enter dynticks-idle mode, which is an extended quiescent state | |
63 | * if we have fully entered that mode (i.e., if the new value of | |
64 | * dynticks_nesting is zero). | |
65 | */ | |
66 | void rcu_enter_nohz(void) | |
67 | { | |
68 | if (--rcu_dynticks_nesting == 0) | |
69 | rcu_sched_qs(0); /* implies rcu_bh_qsctr_inc(0) */ | |
70 | } | |
71 | ||
72 | /* | |
73 | * Exit dynticks-idle mode, so that we are no longer in an extended | |
74 | * quiescent state. | |
75 | */ | |
76 | void rcu_exit_nohz(void) | |
77 | { | |
78 | rcu_dynticks_nesting++; | |
79 | } | |
80 | ||
81 | #endif /* #ifdef CONFIG_NO_HZ */ | |
82 | ||
83 | /* | |
84 | * Helper function for rcu_qsctr_inc() and rcu_bh_qsctr_inc(). | |
4ce5b903 IM |
85 | * Also disable irqs to avoid confusion due to interrupt handlers |
86 | * invoking call_rcu(). | |
9b1d82fa PM |
87 | */ |
88 | static int rcu_qsctr_help(struct rcu_ctrlblk *rcp) | |
89 | { | |
90 | unsigned long flags; | |
91 | ||
92 | local_irq_save(flags); | |
93 | if (rcp->rcucblist != NULL && | |
94 | rcp->donetail != rcp->curtail) { | |
95 | rcp->donetail = rcp->curtail; | |
96 | local_irq_restore(flags); | |
97 | return 1; | |
98 | } | |
99 | local_irq_restore(flags); | |
4ce5b903 | 100 | |
9b1d82fa PM |
101 | return 0; |
102 | } | |
103 | ||
104 | /* | |
105 | * Record an rcu quiescent state. And an rcu_bh quiescent state while we | |
106 | * are at it, given that any rcu quiescent state is also an rcu_bh | |
107 | * quiescent state. Use "+" instead of "||" to defeat short circuiting. | |
108 | */ | |
109 | void rcu_sched_qs(int cpu) | |
110 | { | |
111 | if (rcu_qsctr_help(&rcu_ctrlblk) + rcu_qsctr_help(&rcu_bh_ctrlblk)) | |
112 | raise_softirq(RCU_SOFTIRQ); | |
113 | } | |
114 | ||
115 | /* | |
116 | * Record an rcu_bh quiescent state. | |
117 | */ | |
118 | void rcu_bh_qs(int cpu) | |
119 | { | |
120 | if (rcu_qsctr_help(&rcu_bh_ctrlblk)) | |
121 | raise_softirq(RCU_SOFTIRQ); | |
122 | } | |
123 | ||
124 | /* | |
125 | * Check to see if the scheduling-clock interrupt came from an extended | |
126 | * quiescent state, and, if so, tell RCU about it. | |
127 | */ | |
128 | void rcu_check_callbacks(int cpu, int user) | |
129 | { | |
130 | if (user || | |
131 | (idle_cpu(cpu) && | |
132 | !in_softirq() && | |
133 | hardirq_count() <= (1 << HARDIRQ_SHIFT))) | |
134 | rcu_sched_qs(cpu); | |
135 | else if (!in_softirq()) | |
136 | rcu_bh_qs(cpu); | |
137 | } | |
138 | ||
139 | /* | |
140 | * Helper function for rcu_process_callbacks() that operates on the | |
141 | * specified rcu_ctrlkblk structure. | |
142 | */ | |
143 | static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp) | |
144 | { | |
9b1d82fa | 145 | struct rcu_head *next, *list; |
4ce5b903 | 146 | unsigned long flags; |
9b1d82fa PM |
147 | |
148 | /* If no RCU callbacks ready to invoke, just return. */ | |
149 | if (&rcp->rcucblist == rcp->donetail) | |
150 | return; | |
151 | ||
152 | /* Move the ready-to-invoke callbacks to a local list. */ | |
153 | local_irq_save(flags); | |
154 | list = rcp->rcucblist; | |
155 | rcp->rcucblist = *rcp->donetail; | |
156 | *rcp->donetail = NULL; | |
157 | if (rcp->curtail == rcp->donetail) | |
158 | rcp->curtail = &rcp->rcucblist; | |
159 | rcp->donetail = &rcp->rcucblist; | |
160 | local_irq_restore(flags); | |
161 | ||
162 | /* Invoke the callbacks on the local list. */ | |
163 | while (list) { | |
164 | next = list->next; | |
165 | prefetch(next); | |
166 | list->func(list); | |
167 | list = next; | |
168 | } | |
169 | } | |
170 | ||
171 | /* | |
172 | * Invoke any callbacks whose grace period has completed. | |
173 | */ | |
174 | static void rcu_process_callbacks(struct softirq_action *unused) | |
175 | { | |
176 | __rcu_process_callbacks(&rcu_ctrlblk); | |
177 | __rcu_process_callbacks(&rcu_bh_ctrlblk); | |
178 | } | |
179 | ||
9b1d82fa PM |
180 | /* |
181 | * Wait for a grace period to elapse. But it is illegal to invoke | |
182 | * synchronize_sched() from within an RCU read-side critical section. | |
183 | * Therefore, any legal call to synchronize_sched() is a quiescent | |
184 | * state, and so on a UP system, synchronize_sched() need do nothing. | |
185 | * Ditto for synchronize_rcu_bh(). (But Lai Jiangshan points out the | |
186 | * benefits of doing might_sleep() to reduce latency.) | |
187 | * | |
188 | * Cool, huh? (Due to Josh Triplett.) | |
189 | * | |
190 | * But we want to make this a static inline later. | |
191 | */ | |
192 | void synchronize_sched(void) | |
193 | { | |
194 | cond_resched(); | |
195 | } | |
196 | EXPORT_SYMBOL_GPL(synchronize_sched); | |
197 | ||
198 | void synchronize_rcu_bh(void) | |
199 | { | |
200 | synchronize_sched(); | |
201 | } | |
202 | EXPORT_SYMBOL_GPL(synchronize_rcu_bh); | |
203 | ||
204 | /* | |
205 | * Helper function for call_rcu() and call_rcu_bh(). | |
206 | */ | |
207 | static void __call_rcu(struct rcu_head *head, | |
208 | void (*func)(struct rcu_head *rcu), | |
209 | struct rcu_ctrlblk *rcp) | |
210 | { | |
211 | unsigned long flags; | |
212 | ||
213 | head->func = func; | |
214 | head->next = NULL; | |
4ce5b903 | 215 | |
9b1d82fa PM |
216 | local_irq_save(flags); |
217 | *rcp->curtail = head; | |
218 | rcp->curtail = &head->next; | |
219 | local_irq_restore(flags); | |
220 | } | |
221 | ||
222 | /* | |
223 | * Post an RCU callback to be invoked after the end of an RCU grace | |
224 | * period. But since we have but one CPU, that would be after any | |
225 | * quiescent state. | |
226 | */ | |
4ce5b903 | 227 | void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu)) |
9b1d82fa PM |
228 | { |
229 | __call_rcu(head, func, &rcu_ctrlblk); | |
230 | } | |
231 | EXPORT_SYMBOL_GPL(call_rcu); | |
232 | ||
233 | /* | |
234 | * Post an RCU bottom-half callback to be invoked after any subsequent | |
235 | * quiescent state. | |
236 | */ | |
4ce5b903 | 237 | void call_rcu_bh(struct rcu_head *head, void (*func)(struct rcu_head *rcu)) |
9b1d82fa PM |
238 | { |
239 | __call_rcu(head, func, &rcu_bh_ctrlblk); | |
240 | } | |
241 | EXPORT_SYMBOL_GPL(call_rcu_bh); | |
242 | ||
243 | void rcu_barrier(void) | |
244 | { | |
245 | struct rcu_synchronize rcu; | |
246 | ||
247 | init_completion(&rcu.completion); | |
248 | /* Will wake me after RCU finished. */ | |
249 | call_rcu(&rcu.head, wakeme_after_rcu); | |
250 | /* Wait for it. */ | |
251 | wait_for_completion(&rcu.completion); | |
252 | } | |
253 | EXPORT_SYMBOL_GPL(rcu_barrier); | |
254 | ||
255 | void rcu_barrier_bh(void) | |
256 | { | |
257 | struct rcu_synchronize rcu; | |
258 | ||
259 | init_completion(&rcu.completion); | |
260 | /* Will wake me after RCU finished. */ | |
261 | call_rcu_bh(&rcu.head, wakeme_after_rcu); | |
262 | /* Wait for it. */ | |
263 | wait_for_completion(&rcu.completion); | |
264 | } | |
265 | EXPORT_SYMBOL_GPL(rcu_barrier_bh); | |
266 | ||
267 | void rcu_barrier_sched(void) | |
268 | { | |
269 | struct rcu_synchronize rcu; | |
270 | ||
271 | init_completion(&rcu.completion); | |
272 | /* Will wake me after RCU finished. */ | |
273 | call_rcu_sched(&rcu.head, wakeme_after_rcu); | |
274 | /* Wait for it. */ | |
275 | wait_for_completion(&rcu.completion); | |
276 | } | |
277 | EXPORT_SYMBOL_GPL(rcu_barrier_sched); | |
278 | ||
9f680ab4 | 279 | void __init rcu_init(void) |
9b1d82fa PM |
280 | { |
281 | open_softirq(RCU_SOFTIRQ, rcu_process_callbacks); | |
282 | } |