]>
Commit | Line | Data |
---|---|---|
fb1c8f93 IM |
1 | #ifndef __LINUX_SPINLOCK_API_SMP_H |
2 | #define __LINUX_SPINLOCK_API_SMP_H | |
3 | ||
4 | #ifndef __LINUX_SPINLOCK_H | |
5 | # error "please don't include this file directly" | |
6 | #endif | |
7 | ||
8 | /* | |
9 | * include/linux/spinlock_api_smp.h | |
10 | * | |
11 | * spinlock API declarations on SMP (and debug) | |
12 | * (implemented in kernel/spinlock.c) | |
13 | * | |
14 | * portions Copyright 2005, Red Hat, Inc., Ingo Molnar | |
15 | * Released under the General Public License (GPL). | |
16 | */ | |
17 | ||
18 | int in_lock_functions(unsigned long addr); | |
19 | ||
c2f21ce2 | 20 | #define assert_raw_spin_locked(x) BUG_ON(!raw_spin_is_locked(x)) |
fb1c8f93 | 21 | |
c2f21ce2 TG |
22 | void __lockfunc _spin_lock(raw_spinlock_t *lock) __acquires(lock); |
23 | void __lockfunc _spin_lock_nested(raw_spinlock_t *lock, int subclass) | |
9f50b93f | 24 | __acquires(lock); |
c2f21ce2 TG |
25 | void __lockfunc |
26 | _spin_lock_nest_lock(raw_spinlock_t *lock, struct lockdep_map *map) | |
b7d39aff | 27 | __acquires(lock); |
c2f21ce2 TG |
28 | void __lockfunc _spin_lock_bh(raw_spinlock_t *lock) __acquires(lock); |
29 | void __lockfunc _spin_lock_irq(raw_spinlock_t *lock) __acquires(lock); | |
6b6b4792 | 30 | |
c2f21ce2 | 31 | unsigned long __lockfunc _spin_lock_irqsave(raw_spinlock_t *lock) |
9f50b93f | 32 | __acquires(lock); |
c2f21ce2 TG |
33 | unsigned long __lockfunc |
34 | _spin_lock_irqsave_nested(raw_spinlock_t *lock, int subclass) | |
cfd3ef23 | 35 | __acquires(lock); |
c2f21ce2 TG |
36 | int __lockfunc _spin_trylock(raw_spinlock_t *lock); |
37 | int __lockfunc _spin_trylock_bh(raw_spinlock_t *lock); | |
38 | void __lockfunc _spin_unlock(raw_spinlock_t *lock) __releases(lock); | |
39 | void __lockfunc _spin_unlock_bh(raw_spinlock_t *lock) __releases(lock); | |
40 | void __lockfunc _spin_unlock_irq(raw_spinlock_t *lock) __releases(lock); | |
41 | void __lockfunc | |
42 | _spin_unlock_irqrestore(raw_spinlock_t *lock, unsigned long flags) | |
9f50b93f | 43 | __releases(lock); |
fb1c8f93 | 44 | |
6beb0009 | 45 | #ifdef CONFIG_INLINE_SPIN_LOCK |
892a7c67 HC |
46 | #define _spin_lock(lock) __spin_lock(lock) |
47 | #endif | |
48 | ||
6beb0009 | 49 | #ifdef CONFIG_INLINE_SPIN_LOCK_BH |
892a7c67 HC |
50 | #define _spin_lock_bh(lock) __spin_lock_bh(lock) |
51 | #endif | |
52 | ||
6beb0009 | 53 | #ifdef CONFIG_INLINE_SPIN_LOCK_IRQ |
892a7c67 HC |
54 | #define _spin_lock_irq(lock) __spin_lock_irq(lock) |
55 | #endif | |
56 | ||
6beb0009 | 57 | #ifdef CONFIG_INLINE_SPIN_LOCK_IRQSAVE |
892a7c67 HC |
58 | #define _spin_lock_irqsave(lock) __spin_lock_irqsave(lock) |
59 | #endif | |
60 | ||
6beb0009 | 61 | #ifdef CONFIG_INLINE_SPIN_TRYLOCK |
892a7c67 HC |
62 | #define _spin_trylock(lock) __spin_trylock(lock) |
63 | #endif | |
64 | ||
6beb0009 | 65 | #ifdef CONFIG_INLINE_SPIN_TRYLOCK_BH |
892a7c67 HC |
66 | #define _spin_trylock_bh(lock) __spin_trylock_bh(lock) |
67 | #endif | |
68 | ||
6beb0009 | 69 | #ifdef CONFIG_INLINE_SPIN_UNLOCK |
892a7c67 HC |
70 | #define _spin_unlock(lock) __spin_unlock(lock) |
71 | #endif | |
72 | ||
6beb0009 | 73 | #ifdef CONFIG_INLINE_SPIN_UNLOCK_BH |
892a7c67 HC |
74 | #define _spin_unlock_bh(lock) __spin_unlock_bh(lock) |
75 | #endif | |
76 | ||
6beb0009 | 77 | #ifdef CONFIG_INLINE_SPIN_UNLOCK_IRQ |
892a7c67 HC |
78 | #define _spin_unlock_irq(lock) __spin_unlock_irq(lock) |
79 | #endif | |
80 | ||
6beb0009 | 81 | #ifdef CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE |
892a7c67 HC |
82 | #define _spin_unlock_irqrestore(lock, flags) __spin_unlock_irqrestore(lock, flags) |
83 | #endif | |
84 | ||
c2f21ce2 | 85 | static inline int __spin_trylock(raw_spinlock_t *lock) |
69d0ee73 HC |
86 | { |
87 | preempt_disable(); | |
9828ea9d | 88 | if (do_raw_spin_trylock(lock)) { |
69d0ee73 HC |
89 | spin_acquire(&lock->dep_map, 0, 1, _RET_IP_); |
90 | return 1; | |
91 | } | |
92 | preempt_enable(); | |
93 | return 0; | |
94 | } | |
95 | ||
69d0ee73 HC |
96 | /* |
97 | * If lockdep is enabled then we use the non-preemption spin-ops | |
98 | * even on CONFIG_PREEMPT, because lockdep assumes that interrupts are | |
99 | * not re-enabled during lock-acquire (which the preempt-spin-ops do): | |
100 | */ | |
101 | #if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC) | |
102 | ||
c2f21ce2 | 103 | static inline unsigned long __spin_lock_irqsave(raw_spinlock_t *lock) |
69d0ee73 HC |
104 | { |
105 | unsigned long flags; | |
106 | ||
107 | local_irq_save(flags); | |
108 | preempt_disable(); | |
109 | spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); | |
110 | /* | |
111 | * On lockdep we dont want the hand-coded irq-enable of | |
9828ea9d | 112 | * do_raw_spin_lock_flags() code, because lockdep assumes |
69d0ee73 HC |
113 | * that interrupts are not re-enabled during lock-acquire: |
114 | */ | |
115 | #ifdef CONFIG_LOCKDEP | |
9828ea9d | 116 | LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock); |
69d0ee73 | 117 | #else |
9828ea9d | 118 | do_raw_spin_lock_flags(lock, &flags); |
69d0ee73 HC |
119 | #endif |
120 | return flags; | |
121 | } | |
122 | ||
c2f21ce2 | 123 | static inline void __spin_lock_irq(raw_spinlock_t *lock) |
69d0ee73 HC |
124 | { |
125 | local_irq_disable(); | |
126 | preempt_disable(); | |
127 | spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); | |
9828ea9d | 128 | LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock); |
69d0ee73 HC |
129 | } |
130 | ||
c2f21ce2 | 131 | static inline void __spin_lock_bh(raw_spinlock_t *lock) |
69d0ee73 HC |
132 | { |
133 | local_bh_disable(); | |
134 | preempt_disable(); | |
135 | spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); | |
9828ea9d | 136 | LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock); |
69d0ee73 HC |
137 | } |
138 | ||
c2f21ce2 | 139 | static inline void __spin_lock(raw_spinlock_t *lock) |
69d0ee73 HC |
140 | { |
141 | preempt_disable(); | |
142 | spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); | |
9828ea9d | 143 | LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock); |
69d0ee73 HC |
144 | } |
145 | ||
69d0ee73 HC |
146 | #endif /* CONFIG_PREEMPT */ |
147 | ||
c2f21ce2 | 148 | static inline void __spin_unlock(raw_spinlock_t *lock) |
69d0ee73 HC |
149 | { |
150 | spin_release(&lock->dep_map, 1, _RET_IP_); | |
9828ea9d | 151 | do_raw_spin_unlock(lock); |
69d0ee73 HC |
152 | preempt_enable(); |
153 | } | |
154 | ||
c2f21ce2 | 155 | static inline void __spin_unlock_irqrestore(raw_spinlock_t *lock, |
69d0ee73 HC |
156 | unsigned long flags) |
157 | { | |
158 | spin_release(&lock->dep_map, 1, _RET_IP_); | |
9828ea9d | 159 | do_raw_spin_unlock(lock); |
69d0ee73 HC |
160 | local_irq_restore(flags); |
161 | preempt_enable(); | |
162 | } | |
163 | ||
c2f21ce2 | 164 | static inline void __spin_unlock_irq(raw_spinlock_t *lock) |
69d0ee73 HC |
165 | { |
166 | spin_release(&lock->dep_map, 1, _RET_IP_); | |
9828ea9d | 167 | do_raw_spin_unlock(lock); |
69d0ee73 HC |
168 | local_irq_enable(); |
169 | preempt_enable(); | |
170 | } | |
171 | ||
c2f21ce2 | 172 | static inline void __spin_unlock_bh(raw_spinlock_t *lock) |
69d0ee73 HC |
173 | { |
174 | spin_release(&lock->dep_map, 1, _RET_IP_); | |
9828ea9d | 175 | do_raw_spin_unlock(lock); |
69d0ee73 HC |
176 | preempt_enable_no_resched(); |
177 | local_bh_enable_ip((unsigned long)__builtin_return_address(0)); | |
178 | } | |
179 | ||
c2f21ce2 | 180 | static inline int __spin_trylock_bh(raw_spinlock_t *lock) |
69d0ee73 HC |
181 | { |
182 | local_bh_disable(); | |
183 | preempt_disable(); | |
9828ea9d | 184 | if (do_raw_spin_trylock(lock)) { |
69d0ee73 HC |
185 | spin_acquire(&lock->dep_map, 0, 1, _RET_IP_); |
186 | return 1; | |
187 | } | |
188 | preempt_enable_no_resched(); | |
189 | local_bh_enable_ip((unsigned long)__builtin_return_address(0)); | |
190 | return 0; | |
191 | } | |
192 | ||
6b6b4792 TG |
193 | #include <linux/rwlock_api_smp.h> |
194 | ||
fb1c8f93 | 195 | #endif /* __LINUX_SPINLOCK_API_SMP_H */ |