]>
Commit | Line | Data |
---|---|---|
e260be67 PM |
1 | /* |
2 | * Read-Copy Update mechanism for mutual exclusion (RT implementation) | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or modify | |
5 | * it under the terms of the GNU General Public License as published by | |
6 | * the Free Software Foundation; either version 2 of the License, or | |
7 | * (at your option) any later version. | |
8 | * | |
9 | * This program is distributed in the hope that it will be useful, | |
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
12 | * GNU General Public License for more details. | |
13 | * | |
14 | * You should have received a copy of the GNU General Public License | |
15 | * along with this program; if not, write to the Free Software | |
16 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | |
17 | * | |
18 | * Copyright (C) IBM Corporation, 2006 | |
19 | * | |
20 | * Author: Paul McKenney <paulmck@us.ibm.com> | |
21 | * | |
22 | * Based on the original work by Paul McKenney <paul.mckenney@us.ibm.com> | |
23 | * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen. | |
24 | * Papers: | |
25 | * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf | |
26 | * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001) | |
27 | * | |
28 | * For detailed explanation of Read-Copy Update mechanism see - | |
29 | * Documentation/RCU | |
30 | * | |
31 | */ | |
32 | ||
33 | #ifndef __LINUX_RCUPREEMPT_H | |
34 | #define __LINUX_RCUPREEMPT_H | |
35 | ||
e260be67 PM |
36 | #include <linux/cache.h> |
37 | #include <linux/spinlock.h> | |
38 | #include <linux/threads.h> | |
39 | #include <linux/percpu.h> | |
40 | #include <linux/cpumask.h> | |
41 | #include <linux/seqlock.h> | |
42 | ||
4446a36f PM |
43 | struct rcu_dyntick_sched { |
44 | int dynticks; | |
45 | int dynticks_snap; | |
46 | int sched_qs; | |
47 | int sched_qs_snap; | |
48 | int sched_dynticks_snap; | |
49 | }; | |
50 | ||
51 | DECLARE_PER_CPU(struct rcu_dyntick_sched, rcu_dyntick_sched); | |
52 | ||
53 | static inline void rcu_qsctr_inc(int cpu) | |
54 | { | |
55 | struct rcu_dyntick_sched *rdssp = &per_cpu(rcu_dyntick_sched, cpu); | |
56 | ||
57 | rdssp->sched_qs++; | |
58 | } | |
e260be67 PM |
59 | #define rcu_bh_qsctr_inc(cpu) |
60 | #define call_rcu_bh(head, rcu) call_rcu(head, rcu) | |
61 | ||
4446a36f PM |
62 | /** |
63 | * call_rcu_sched - Queue RCU callback for invocation after sched grace period. | |
64 | * @head: structure to be used for queueing the RCU updates. | |
65 | * @func: actual update function to be invoked after the grace period | |
66 | * | |
67 | * The update function will be invoked some time after a full | |
68 | * synchronize_sched()-style grace period elapses, in other words after | |
69 | * all currently executing preempt-disabled sections of code (including | |
70 | * hardirq handlers, NMI handlers, and local_irq_save() blocks) have | |
71 | * completed. | |
72 | */ | |
73 | extern void call_rcu_sched(struct rcu_head *head, | |
74 | void (*func)(struct rcu_head *head)); | |
75 | ||
b55ab616 PM |
76 | extern void __rcu_read_lock(void) __acquires(RCU); |
77 | extern void __rcu_read_unlock(void) __releases(RCU); | |
e260be67 PM |
78 | extern int rcu_pending(int cpu); |
79 | extern int rcu_needs_cpu(int cpu); | |
80 | ||
81 | #define __rcu_read_lock_bh() { rcu_read_lock(); local_bh_disable(); } | |
82 | #define __rcu_read_unlock_bh() { local_bh_enable(); rcu_read_unlock(); } | |
83 | ||
84 | extern void __synchronize_sched(void); | |
85 | ||
86 | extern void __rcu_init(void); | |
4446a36f | 87 | extern void rcu_init_sched(void); |
e260be67 PM |
88 | extern void rcu_check_callbacks(int cpu, int user); |
89 | extern void rcu_restart_cpu(int cpu); | |
90 | extern long rcu_batches_completed(void); | |
91 | ||
92 | /* | |
93 | * Return the number of RCU batches processed thus far. Useful for debug | |
94 | * and statistic. The _bh variant is identifcal to straight RCU | |
95 | */ | |
96 | static inline long rcu_batches_completed_bh(void) | |
97 | { | |
98 | return rcu_batches_completed(); | |
99 | } | |
100 | ||
101 | #ifdef CONFIG_RCU_TRACE | |
102 | struct rcupreempt_trace; | |
103 | extern long *rcupreempt_flipctr(int cpu); | |
104 | extern long rcupreempt_data_completed(void); | |
105 | extern int rcupreempt_flip_flag(int cpu); | |
106 | extern int rcupreempt_mb_flag(int cpu); | |
107 | extern char *rcupreempt_try_flip_state_name(void); | |
108 | extern struct rcupreempt_trace *rcupreempt_trace_cpu(int cpu); | |
109 | #endif | |
110 | ||
111 | struct softirq_action; | |
112 | ||
2232c2d8 | 113 | #ifdef CONFIG_NO_HZ |
4446a36f | 114 | DECLARE_PER_CPU(struct rcu_dyntick_sched, rcu_dyntick_sched); |
2232c2d8 SR |
115 | |
116 | static inline void rcu_enter_nohz(void) | |
117 | { | |
ae66be9b | 118 | smp_mb(); /* CPUs seeing ++ must see prior RCU read-side crit sects */ |
4446a36f PM |
119 | __get_cpu_var(rcu_dyntick_sched).dynticks++; |
120 | WARN_ON(__get_cpu_var(rcu_dyntick_sched).dynticks & 0x1); | |
2232c2d8 SR |
121 | } |
122 | ||
123 | static inline void rcu_exit_nohz(void) | |
124 | { | |
ae66be9b | 125 | smp_mb(); /* CPUs seeing ++ must see later RCU read-side crit sects */ |
4446a36f PM |
126 | __get_cpu_var(rcu_dyntick_sched).dynticks++; |
127 | WARN_ON(!(__get_cpu_var(rcu_dyntick_sched).dynticks & 0x1)); | |
2232c2d8 SR |
128 | } |
129 | ||
130 | #else /* CONFIG_NO_HZ */ | |
131 | #define rcu_enter_nohz() do { } while (0) | |
132 | #define rcu_exit_nohz() do { } while (0) | |
133 | #endif /* CONFIG_NO_HZ */ | |
134 | ||
e260be67 | 135 | #endif /* __LINUX_RCUPREEMPT_H */ |