]>
Commit | Line | Data |
---|---|---|
f41d911f PM |
1 | /* |
2 | * Read-Copy Update mechanism for mutual exclusion (tree-based version) | |
3 | * Internal non-public definitions that provide either classic | |
4 | * or preemptable semantics. | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or modify | |
7 | * it under the terms of the GNU General Public License as published by | |
8 | * the Free Software Foundation; either version 2 of the License, or | |
9 | * (at your option) any later version. | |
10 | * | |
11 | * This program is distributed in the hope that it will be useful, | |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
14 | * GNU General Public License for more details. | |
15 | * | |
16 | * You should have received a copy of the GNU General Public License | |
17 | * along with this program; if not, write to the Free Software | |
18 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | |
19 | * | |
20 | * Copyright Red Hat, 2009 | |
21 | * Copyright IBM Corporation, 2009 | |
22 | * | |
23 | * Author: Ingo Molnar <mingo@elte.hu> | |
24 | * Paul E. McKenney <paulmck@linux.vnet.ibm.com> | |
25 | */ | |
26 | ||
27 | ||
28 | #ifdef CONFIG_TREE_PREEMPT_RCU | |
29 | ||
30 | struct rcu_state rcu_preempt_state = RCU_STATE_INITIALIZER(rcu_preempt_state); | |
31 | DEFINE_PER_CPU(struct rcu_data, rcu_preempt_data); | |
32 | ||
33 | /* | |
34 | * Tell them what RCU they are running. | |
35 | */ | |
36 | static inline void rcu_bootup_announce(void) | |
37 | { | |
38 | printk(KERN_INFO | |
39 | "Experimental preemptable hierarchical RCU implementation.\n"); | |
40 | } | |
41 | ||
42 | /* | |
43 | * Return the number of RCU-preempt batches processed thus far | |
44 | * for debug and statistics. | |
45 | */ | |
46 | long rcu_batches_completed_preempt(void) | |
47 | { | |
48 | return rcu_preempt_state.completed; | |
49 | } | |
50 | EXPORT_SYMBOL_GPL(rcu_batches_completed_preempt); | |
51 | ||
52 | /* | |
53 | * Return the number of RCU batches processed thus far for debug & stats. | |
54 | */ | |
55 | long rcu_batches_completed(void) | |
56 | { | |
57 | return rcu_batches_completed_preempt(); | |
58 | } | |
59 | EXPORT_SYMBOL_GPL(rcu_batches_completed); | |
60 | ||
61 | /* | |
62 | * Record a preemptable-RCU quiescent state for the specified CPU. Note | |
63 | * that this just means that the task currently running on the CPU is | |
64 | * not in a quiescent state. There might be any number of tasks blocked | |
65 | * while in an RCU read-side critical section. | |
66 | */ | |
67 | static void rcu_preempt_qs_record(int cpu) | |
68 | { | |
69 | struct rcu_data *rdp = &per_cpu(rcu_preempt_data, cpu); | |
70 | rdp->passed_quiesc = 1; | |
71 | rdp->passed_quiesc_completed = rdp->completed; | |
72 | } | |
73 | ||
74 | /* | |
75 | * We have entered the scheduler or are between softirqs in ksoftirqd. | |
76 | * If we are in an RCU read-side critical section, we need to reflect | |
77 | * that in the state of the rcu_node structure corresponding to this CPU. | |
78 | * Caller must disable hardirqs. | |
79 | */ | |
80 | static void rcu_preempt_qs(int cpu) | |
81 | { | |
82 | struct task_struct *t = current; | |
83 | int phase; | |
84 | struct rcu_data *rdp; | |
85 | struct rcu_node *rnp; | |
86 | ||
87 | if (t->rcu_read_lock_nesting && | |
88 | (t->rcu_read_unlock_special & RCU_READ_UNLOCK_BLOCKED) == 0) { | |
b0e165c0 | 89 | WARN_ON_ONCE(cpu != smp_processor_id()); |
f41d911f PM |
90 | |
91 | /* Possibly blocking in an RCU read-side critical section. */ | |
92 | rdp = rcu_preempt_state.rda[cpu]; | |
93 | rnp = rdp->mynode; | |
94 | spin_lock(&rnp->lock); | |
95 | t->rcu_read_unlock_special |= RCU_READ_UNLOCK_BLOCKED; | |
86848966 | 96 | t->rcu_blocked_node = rnp; |
f41d911f PM |
97 | |
98 | /* | |
99 | * If this CPU has already checked in, then this task | |
100 | * will hold up the next grace period rather than the | |
101 | * current grace period. Queue the task accordingly. | |
102 | * If the task is queued for the current grace period | |
103 | * (i.e., this CPU has not yet passed through a quiescent | |
104 | * state for the current grace period), then as long | |
105 | * as that task remains queued, the current grace period | |
106 | * cannot end. | |
b0e165c0 PM |
107 | * |
108 | * But first, note that the current CPU must still be | |
109 | * on line! | |
f41d911f | 110 | */ |
b0e165c0 | 111 | WARN_ON_ONCE((rdp->grpmask & rnp->qsmaskinit) == 0); |
f41d911f PM |
112 | phase = !(rnp->qsmask & rdp->grpmask) ^ (rnp->gpnum & 0x1); |
113 | list_add(&t->rcu_node_entry, &rnp->blocked_tasks[phase]); | |
114 | smp_mb(); /* Ensure later ctxt swtch seen after above. */ | |
115 | spin_unlock(&rnp->lock); | |
116 | } | |
117 | ||
118 | /* | |
119 | * Either we were not in an RCU read-side critical section to | |
120 | * begin with, or we have now recorded that critical section | |
121 | * globally. Either way, we can now note a quiescent state | |
122 | * for this CPU. Again, if we were in an RCU read-side critical | |
123 | * section, and if that critical section was blocking the current | |
124 | * grace period, then the fact that the task has been enqueued | |
125 | * means that we continue to block the current grace period. | |
126 | */ | |
127 | rcu_preempt_qs_record(cpu); | |
128 | t->rcu_read_unlock_special &= ~(RCU_READ_UNLOCK_NEED_QS | | |
129 | RCU_READ_UNLOCK_GOT_QS); | |
130 | } | |
131 | ||
132 | /* | |
133 | * Tree-preemptable RCU implementation for rcu_read_lock(). | |
134 | * Just increment ->rcu_read_lock_nesting, shared state will be updated | |
135 | * if we block. | |
136 | */ | |
137 | void __rcu_read_lock(void) | |
138 | { | |
139 | ACCESS_ONCE(current->rcu_read_lock_nesting)++; | |
140 | barrier(); /* needed if we ever invoke rcu_read_lock in rcutree.c */ | |
141 | } | |
142 | EXPORT_SYMBOL_GPL(__rcu_read_lock); | |
143 | ||
144 | static void rcu_read_unlock_special(struct task_struct *t) | |
145 | { | |
146 | int empty; | |
147 | unsigned long flags; | |
148 | unsigned long mask; | |
149 | struct rcu_node *rnp; | |
150 | int special; | |
151 | ||
152 | /* NMI handlers cannot block and cannot safely manipulate state. */ | |
153 | if (in_nmi()) | |
154 | return; | |
155 | ||
156 | local_irq_save(flags); | |
157 | ||
158 | /* | |
159 | * If RCU core is waiting for this CPU to exit critical section, | |
160 | * let it know that we have done so. | |
161 | */ | |
162 | special = t->rcu_read_unlock_special; | |
163 | if (special & RCU_READ_UNLOCK_NEED_QS) { | |
164 | t->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_NEED_QS; | |
165 | t->rcu_read_unlock_special |= RCU_READ_UNLOCK_GOT_QS; | |
166 | } | |
167 | ||
168 | /* Hardware IRQ handlers cannot block. */ | |
169 | if (in_irq()) { | |
170 | local_irq_restore(flags); | |
171 | return; | |
172 | } | |
173 | ||
174 | /* Clean up if blocked during RCU read-side critical section. */ | |
175 | if (special & RCU_READ_UNLOCK_BLOCKED) { | |
176 | t->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_BLOCKED; | |
177 | ||
dd5d19ba PM |
178 | /* |
179 | * Remove this task from the list it blocked on. The | |
180 | * task can migrate while we acquire the lock, but at | |
181 | * most one time. So at most two passes through loop. | |
182 | */ | |
183 | for (;;) { | |
86848966 | 184 | rnp = t->rcu_blocked_node; |
dd5d19ba | 185 | spin_lock(&rnp->lock); |
86848966 | 186 | if (rnp == t->rcu_blocked_node) |
dd5d19ba PM |
187 | break; |
188 | spin_unlock(&rnp->lock); | |
189 | } | |
f41d911f PM |
190 | empty = list_empty(&rnp->blocked_tasks[rnp->gpnum & 0x1]); |
191 | list_del_init(&t->rcu_node_entry); | |
dd5d19ba | 192 | t->rcu_blocked_node = NULL; |
f41d911f PM |
193 | |
194 | /* | |
195 | * If this was the last task on the current list, and if | |
196 | * we aren't waiting on any CPUs, report the quiescent state. | |
197 | * Note that both cpu_quiet_msk_finish() and cpu_quiet_msk() | |
198 | * drop rnp->lock and restore irq. | |
199 | */ | |
200 | if (!empty && rnp->qsmask == 0 && | |
201 | list_empty(&rnp->blocked_tasks[rnp->gpnum & 0x1])) { | |
202 | t->rcu_read_unlock_special &= | |
203 | ~(RCU_READ_UNLOCK_NEED_QS | | |
204 | RCU_READ_UNLOCK_GOT_QS); | |
205 | if (rnp->parent == NULL) { | |
206 | /* Only one rcu_node in the tree. */ | |
207 | cpu_quiet_msk_finish(&rcu_preempt_state, flags); | |
208 | return; | |
209 | } | |
210 | /* Report up the rest of the hierarchy. */ | |
211 | mask = rnp->grpmask; | |
212 | spin_unlock_irqrestore(&rnp->lock, flags); | |
213 | rnp = rnp->parent; | |
214 | spin_lock_irqsave(&rnp->lock, flags); | |
215 | cpu_quiet_msk(mask, &rcu_preempt_state, rnp, flags); | |
216 | return; | |
217 | } | |
218 | spin_unlock(&rnp->lock); | |
219 | } | |
220 | local_irq_restore(flags); | |
221 | } | |
222 | ||
223 | /* | |
224 | * Tree-preemptable RCU implementation for rcu_read_unlock(). | |
225 | * Decrement ->rcu_read_lock_nesting. If the result is zero (outermost | |
226 | * rcu_read_unlock()) and ->rcu_read_unlock_special is non-zero, then | |
227 | * invoke rcu_read_unlock_special() to clean up after a context switch | |
228 | * in an RCU read-side critical section and other special cases. | |
229 | */ | |
230 | void __rcu_read_unlock(void) | |
231 | { | |
232 | struct task_struct *t = current; | |
233 | ||
234 | barrier(); /* needed if we ever invoke rcu_read_unlock in rcutree.c */ | |
235 | if (--ACCESS_ONCE(t->rcu_read_lock_nesting) == 0 && | |
236 | unlikely(ACCESS_ONCE(t->rcu_read_unlock_special))) | |
237 | rcu_read_unlock_special(t); | |
238 | } | |
239 | EXPORT_SYMBOL_GPL(__rcu_read_unlock); | |
240 | ||
241 | #ifdef CONFIG_RCU_CPU_STALL_DETECTOR | |
242 | ||
243 | /* | |
244 | * Scan the current list of tasks blocked within RCU read-side critical | |
245 | * sections, printing out the tid of each. | |
246 | */ | |
247 | static void rcu_print_task_stall(struct rcu_node *rnp) | |
248 | { | |
249 | unsigned long flags; | |
250 | struct list_head *lp; | |
251 | int phase = rnp->gpnum & 0x1; | |
252 | struct task_struct *t; | |
253 | ||
254 | if (!list_empty(&rnp->blocked_tasks[phase])) { | |
255 | spin_lock_irqsave(&rnp->lock, flags); | |
256 | phase = rnp->gpnum & 0x1; /* re-read under lock. */ | |
257 | lp = &rnp->blocked_tasks[phase]; | |
258 | list_for_each_entry(t, lp, rcu_node_entry) | |
259 | printk(" P%d", t->pid); | |
260 | spin_unlock_irqrestore(&rnp->lock, flags); | |
261 | } | |
262 | } | |
263 | ||
264 | #endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ | |
265 | ||
b0e165c0 PM |
266 | /* |
267 | * Check that the list of blocked tasks for the newly completed grace | |
268 | * period is in fact empty. It is a serious bug to complete a grace | |
269 | * period that still has RCU readers blocked! This function must be | |
270 | * invoked -before- updating this rnp's ->gpnum, and the rnp's ->lock | |
271 | * must be held by the caller. | |
272 | */ | |
273 | static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp) | |
274 | { | |
275 | WARN_ON_ONCE(!list_empty(&rnp->blocked_tasks[rnp->gpnum & 0x1])); | |
276 | } | |
277 | ||
f41d911f PM |
278 | /* |
279 | * Check for preempted RCU readers for the specified rcu_node structure. | |
280 | * If the caller needs a reliable answer, it must hold the rcu_node's | |
281 | * >lock. | |
282 | */ | |
283 | static int rcu_preempted_readers(struct rcu_node *rnp) | |
284 | { | |
285 | return !list_empty(&rnp->blocked_tasks[rnp->gpnum & 0x1]); | |
286 | } | |
287 | ||
33f76148 PM |
288 | #ifdef CONFIG_HOTPLUG_CPU |
289 | ||
dd5d19ba PM |
290 | /* |
291 | * Handle tasklist migration for case in which all CPUs covered by the | |
292 | * specified rcu_node have gone offline. Move them up to the root | |
293 | * rcu_node. The reason for not just moving them to the immediate | |
294 | * parent is to remove the need for rcu_read_unlock_special() to | |
295 | * make more than two attempts to acquire the target rcu_node's lock. | |
296 | * | |
297 | * The caller must hold rnp->lock with irqs disabled. | |
298 | */ | |
299 | static void rcu_preempt_offline_tasks(struct rcu_state *rsp, | |
300 | struct rcu_node *rnp) | |
301 | { | |
302 | int i; | |
303 | struct list_head *lp; | |
304 | struct list_head *lp_root; | |
305 | struct rcu_node *rnp_root = rcu_get_root(rsp); | |
306 | struct task_struct *tp; | |
307 | ||
86848966 PM |
308 | if (rnp == rnp_root) { |
309 | WARN_ONCE(1, "Last CPU thought to be offlined?"); | |
dd5d19ba | 310 | return; /* Shouldn't happen: at least one CPU online. */ |
86848966 | 311 | } |
dd5d19ba PM |
312 | |
313 | /* | |
314 | * Move tasks up to root rcu_node. Rely on the fact that the | |
315 | * root rcu_node can be at most one ahead of the rest of the | |
316 | * rcu_nodes in terms of gp_num value. This fact allows us to | |
317 | * move the blocked_tasks[] array directly, element by element. | |
318 | */ | |
319 | for (i = 0; i < 2; i++) { | |
320 | lp = &rnp->blocked_tasks[i]; | |
321 | lp_root = &rnp_root->blocked_tasks[i]; | |
322 | while (!list_empty(lp)) { | |
323 | tp = list_entry(lp->next, typeof(*tp), rcu_node_entry); | |
324 | spin_lock(&rnp_root->lock); /* irqs already disabled */ | |
325 | list_del(&tp->rcu_node_entry); | |
326 | tp->rcu_blocked_node = rnp_root; | |
327 | list_add(&tp->rcu_node_entry, lp_root); | |
328 | spin_unlock(&rnp_root->lock); /* irqs remain disabled */ | |
329 | } | |
330 | } | |
331 | } | |
332 | ||
33f76148 PM |
333 | /* |
334 | * Do CPU-offline processing for preemptable RCU. | |
335 | */ | |
336 | static void rcu_preempt_offline_cpu(int cpu) | |
337 | { | |
338 | __rcu_offline_cpu(cpu, &rcu_preempt_state); | |
339 | } | |
340 | ||
341 | #endif /* #ifdef CONFIG_HOTPLUG_CPU */ | |
342 | ||
f41d911f PM |
343 | /* |
344 | * Check for a quiescent state from the current CPU. When a task blocks, | |
345 | * the task is recorded in the corresponding CPU's rcu_node structure, | |
346 | * which is checked elsewhere. | |
347 | * | |
348 | * Caller must disable hard irqs. | |
349 | */ | |
350 | static void rcu_preempt_check_callbacks(int cpu) | |
351 | { | |
352 | struct task_struct *t = current; | |
353 | ||
354 | if (t->rcu_read_lock_nesting == 0) { | |
355 | t->rcu_read_unlock_special &= | |
356 | ~(RCU_READ_UNLOCK_NEED_QS | RCU_READ_UNLOCK_GOT_QS); | |
357 | rcu_preempt_qs_record(cpu); | |
358 | return; | |
359 | } | |
360 | if (per_cpu(rcu_preempt_data, cpu).qs_pending) { | |
361 | if (t->rcu_read_unlock_special & RCU_READ_UNLOCK_GOT_QS) { | |
362 | rcu_preempt_qs_record(cpu); | |
363 | t->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_GOT_QS; | |
364 | } else if (!(t->rcu_read_unlock_special & | |
365 | RCU_READ_UNLOCK_NEED_QS)) { | |
366 | t->rcu_read_unlock_special |= RCU_READ_UNLOCK_NEED_QS; | |
367 | } | |
368 | } | |
369 | } | |
370 | ||
371 | /* | |
372 | * Process callbacks for preemptable RCU. | |
373 | */ | |
374 | static void rcu_preempt_process_callbacks(void) | |
375 | { | |
376 | __rcu_process_callbacks(&rcu_preempt_state, | |
377 | &__get_cpu_var(rcu_preempt_data)); | |
378 | } | |
379 | ||
380 | /* | |
381 | * Queue a preemptable-RCU callback for invocation after a grace period. | |
382 | */ | |
383 | void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu)) | |
384 | { | |
385 | __call_rcu(head, func, &rcu_preempt_state); | |
386 | } | |
387 | EXPORT_SYMBOL_GPL(call_rcu); | |
388 | ||
389 | /* | |
390 | * Check to see if there is any immediate preemptable-RCU-related work | |
391 | * to be done. | |
392 | */ | |
393 | static int rcu_preempt_pending(int cpu) | |
394 | { | |
395 | return __rcu_pending(&rcu_preempt_state, | |
396 | &per_cpu(rcu_preempt_data, cpu)); | |
397 | } | |
398 | ||
399 | /* | |
400 | * Does preemptable RCU need the CPU to stay out of dynticks mode? | |
401 | */ | |
402 | static int rcu_preempt_needs_cpu(int cpu) | |
403 | { | |
404 | return !!per_cpu(rcu_preempt_data, cpu).nxtlist; | |
405 | } | |
406 | ||
407 | /* | |
408 | * Initialize preemptable RCU's per-CPU data. | |
409 | */ | |
410 | static void __cpuinit rcu_preempt_init_percpu_data(int cpu) | |
411 | { | |
412 | rcu_init_percpu_data(cpu, &rcu_preempt_state, 1); | |
413 | } | |
414 | ||
415 | /* | |
416 | * Check for a task exiting while in a preemptable-RCU read-side | |
417 | * critical section, clean up if so. No need to issue warnings, | |
418 | * as debug_check_no_locks_held() already does this if lockdep | |
419 | * is enabled. | |
420 | */ | |
421 | void exit_rcu(void) | |
422 | { | |
423 | struct task_struct *t = current; | |
424 | ||
425 | if (t->rcu_read_lock_nesting == 0) | |
426 | return; | |
427 | t->rcu_read_lock_nesting = 1; | |
428 | rcu_read_unlock(); | |
429 | } | |
430 | ||
431 | #else /* #ifdef CONFIG_TREE_PREEMPT_RCU */ | |
432 | ||
433 | /* | |
434 | * Tell them what RCU they are running. | |
435 | */ | |
436 | static inline void rcu_bootup_announce(void) | |
437 | { | |
438 | printk(KERN_INFO "Hierarchical RCU implementation.\n"); | |
439 | } | |
440 | ||
441 | /* | |
442 | * Return the number of RCU batches processed thus far for debug & stats. | |
443 | */ | |
444 | long rcu_batches_completed(void) | |
445 | { | |
446 | return rcu_batches_completed_sched(); | |
447 | } | |
448 | EXPORT_SYMBOL_GPL(rcu_batches_completed); | |
449 | ||
450 | /* | |
451 | * Because preemptable RCU does not exist, we never have to check for | |
452 | * CPUs being in quiescent states. | |
453 | */ | |
454 | static void rcu_preempt_qs(int cpu) | |
455 | { | |
456 | } | |
457 | ||
458 | #ifdef CONFIG_RCU_CPU_STALL_DETECTOR | |
459 | ||
460 | /* | |
461 | * Because preemptable RCU does not exist, we never have to check for | |
462 | * tasks blocked within RCU read-side critical sections. | |
463 | */ | |
464 | static void rcu_print_task_stall(struct rcu_node *rnp) | |
465 | { | |
466 | } | |
467 | ||
468 | #endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ | |
469 | ||
b0e165c0 PM |
470 | /* |
471 | * Because there is no preemptable RCU, there can be no readers blocked, | |
472 | * so there is no need to check for blocked tasks. | |
473 | */ | |
474 | static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp) | |
475 | { | |
476 | } | |
477 | ||
f41d911f PM |
478 | /* |
479 | * Because preemptable RCU does not exist, there are never any preempted | |
480 | * RCU readers. | |
481 | */ | |
482 | static int rcu_preempted_readers(struct rcu_node *rnp) | |
483 | { | |
484 | return 0; | |
485 | } | |
486 | ||
33f76148 PM |
487 | #ifdef CONFIG_HOTPLUG_CPU |
488 | ||
dd5d19ba PM |
489 | /* |
490 | * Because preemptable RCU does not exist, it never needs to migrate | |
491 | * tasks that were blocked within RCU read-side critical sections. | |
492 | */ | |
493 | static void rcu_preempt_offline_tasks(struct rcu_state *rsp, | |
494 | struct rcu_node *rnp) | |
495 | { | |
496 | } | |
497 | ||
33f76148 PM |
498 | /* |
499 | * Because preemptable RCU does not exist, it never needs CPU-offline | |
500 | * processing. | |
501 | */ | |
502 | static void rcu_preempt_offline_cpu(int cpu) | |
503 | { | |
504 | } | |
505 | ||
506 | #endif /* #ifdef CONFIG_HOTPLUG_CPU */ | |
507 | ||
f41d911f PM |
508 | /* |
509 | * Because preemptable RCU does not exist, it never has any callbacks | |
510 | * to check. | |
511 | */ | |
512 | void rcu_preempt_check_callbacks(int cpu) | |
513 | { | |
514 | } | |
515 | ||
516 | /* | |
517 | * Because preemptable RCU does not exist, it never has any callbacks | |
518 | * to process. | |
519 | */ | |
520 | void rcu_preempt_process_callbacks(void) | |
521 | { | |
522 | } | |
523 | ||
524 | /* | |
525 | * In classic RCU, call_rcu() is just call_rcu_sched(). | |
526 | */ | |
527 | void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu)) | |
528 | { | |
529 | call_rcu_sched(head, func); | |
530 | } | |
531 | EXPORT_SYMBOL_GPL(call_rcu); | |
532 | ||
533 | /* | |
534 | * Because preemptable RCU does not exist, it never has any work to do. | |
535 | */ | |
536 | static int rcu_preempt_pending(int cpu) | |
537 | { | |
538 | return 0; | |
539 | } | |
540 | ||
541 | /* | |
542 | * Because preemptable RCU does not exist, it never needs any CPU. | |
543 | */ | |
544 | static int rcu_preempt_needs_cpu(int cpu) | |
545 | { | |
546 | return 0; | |
547 | } | |
548 | ||
549 | /* | |
550 | * Because preemptable RCU does not exist, there is no per-CPU | |
551 | * data to initialize. | |
552 | */ | |
553 | static void __cpuinit rcu_preempt_init_percpu_data(int cpu) | |
554 | { | |
555 | } | |
556 | ||
557 | #endif /* #else #ifdef CONFIG_TREE_PREEMPT_RCU */ |