]>
Commit | Line | Data |
---|---|---|
9b1d82fa PM |
1 | /* |
2 | * Read-Copy Update mechanism for mutual exclusion, the Bloatwatch edition. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or modify | |
5 | * it under the terms of the GNU General Public License as published by | |
6 | * the Free Software Foundation; either version 2 of the License, or | |
7 | * (at your option) any later version. | |
8 | * | |
9 | * This program is distributed in the hope that it will be useful, | |
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
12 | * GNU General Public License for more details. | |
13 | * | |
14 | * You should have received a copy of the GNU General Public License | |
87de1cfd PM |
15 | * along with this program; if not, you can access it online at |
16 | * http://www.gnu.org/licenses/gpl-2.0.html. | |
9b1d82fa PM |
17 | * |
18 | * Copyright IBM Corporation, 2008 | |
19 | * | |
20 | * Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com> | |
21 | * | |
22 | * For detailed explanation of Read-Copy Update mechanism see - | |
4ce5b903 | 23 | * Documentation/RCU |
9b1d82fa | 24 | */ |
9b1d82fa PM |
25 | #ifndef __LINUX_TINY_H |
26 | #define __LINUX_TINY_H | |
27 | ||
5f192ab0 | 28 | #include <linux/ktime.h> |
9b1d82fa | 29 | |
02a5c550 PM |
30 | struct rcu_dynticks; |
31 | static inline int rcu_dynticks_snap(struct rcu_dynticks *rdtp) | |
32 | { | |
33 | return 0; | |
34 | } | |
35 | ||
71c40fd0 PM |
36 | /* Never flag non-existent other CPUs! */ |
37 | static inline bool rcu_eqs_special_set(int cpu) { return false; } | |
b8c17e66 | 38 | |
765a3f4f PM |
39 | static inline unsigned long get_state_synchronize_rcu(void) |
40 | { | |
41 | return 0; | |
42 | } | |
43 | ||
44 | static inline void cond_synchronize_rcu(unsigned long oldstate) | |
45 | { | |
46 | might_sleep(); | |
47 | } | |
48 | ||
24560056 PM |
49 | static inline unsigned long get_state_synchronize_sched(void) |
50 | { | |
51 | return 0; | |
52 | } | |
53 | ||
54 | static inline void cond_synchronize_sched(unsigned long oldstate) | |
55 | { | |
56 | might_sleep(); | |
57 | } | |
58 | ||
f9411ebe IM |
59 | extern void rcu_barrier_bh(void); |
60 | extern void rcu_barrier_sched(void); | |
2c42818e | 61 | |
a57eb940 | 62 | static inline void synchronize_rcu_expedited(void) |
bf66f18e | 63 | { |
a57eb940 | 64 | synchronize_sched(); /* Only one CPU, so pretty fast anyway!!! */ |
bf66f18e PM |
65 | } |
66 | ||
a57eb940 | 67 | static inline void rcu_barrier(void) |
bf66f18e | 68 | { |
a57eb940 | 69 | rcu_barrier_sched(); /* Only one CPU, so only one list of callbacks! */ |
bf66f18e PM |
70 | } |
71 | ||
a57eb940 | 72 | static inline void synchronize_rcu_bh(void) |
9b1d82fa PM |
73 | { |
74 | synchronize_sched(); | |
75 | } | |
76 | ||
77 | static inline void synchronize_rcu_bh_expedited(void) | |
78 | { | |
79 | synchronize_sched(); | |
80 | } | |
81 | ||
7b27d547 LJ |
82 | static inline void synchronize_sched_expedited(void) |
83 | { | |
84 | synchronize_sched(); | |
85 | } | |
86 | ||
486e2593 | 87 | static inline void kfree_call_rcu(struct rcu_head *head, |
b6a4ae76 | 88 | rcu_callback_t func) |
486e2593 PM |
89 | { |
90 | call_rcu(head, func); | |
91 | } | |
92 | ||
bcbfdd01 PM |
93 | #define rcu_note_context_switch(preempt) \ |
94 | do { \ | |
95 | rcu_sched_qs(); \ | |
96 | rcu_note_voluntary_context_switch_lite(current); \ | |
97 | } while (0) | |
a57eb940 | 98 | |
5f192ab0 PM |
99 | static inline int rcu_needs_cpu(u64 basemono, u64 *nextevt) |
100 | { | |
101 | *nextevt = KTIME_MAX; | |
102 | return 0; | |
103 | } | |
104 | ||
29ce8310 GN |
105 | /* |
106 | * Take advantage of the fact that there is only one CPU, which | |
107 | * allows us to ignore virtualization-based context switches. | |
108 | */ | |
71c40fd0 PM |
109 | static inline void rcu_virt_note_context_switch(int cpu) { } |
110 | static inline void rcu_cpu_stall_reset(void) { } | |
111 | static inline void rcu_idle_enter(void) { } | |
112 | static inline void rcu_idle_exit(void) { } | |
113 | static inline void rcu_irq_enter(void) { } | |
114 | static inline bool rcu_irq_enter_disabled(void) { return false; } | |
115 | static inline void rcu_irq_exit_irqson(void) { } | |
116 | static inline void rcu_irq_enter_irqson(void) { } | |
117 | static inline void rcu_irq_exit(void) { } | |
118 | static inline void exit_rcu(void) { } | |
825c5bd2 | 119 | #ifdef CONFIG_SRCU |
584dc4ce | 120 | void rcu_scheduler_starting(void); |
825c5bd2 | 121 | #else /* #ifndef CONFIG_SRCU */ |
71c40fd0 | 122 | static inline void rcu_scheduler_starting(void) { } |
825c5bd2 | 123 | #endif /* #else #ifndef CONFIG_SRCU */ |
d2b1654f | 124 | static inline void rcu_end_inkernel_boot(void) { } |
71c40fd0 | 125 | static inline bool rcu_is_watching(void) { return true; } |
5c173eb8 | 126 | |
71c40fd0 PM |
127 | /* Avoid RCU read-side critical sections leaking across. */ |
128 | static inline void rcu_all_qs(void) { barrier(); } | |
5cd37193 | 129 | |
4df83742 TG |
130 | /* RCUtree hotplug events */ |
131 | #define rcutree_prepare_cpu NULL | |
132 | #define rcutree_online_cpu NULL | |
133 | #define rcutree_offline_cpu NULL | |
134 | #define rcutree_dead_cpu NULL | |
135 | #define rcutree_dying_cpu NULL | |
136 | ||
9b1d82fa | 137 | #endif /* __LINUX_RCUTINY_H */ |