]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * This file is subject to the terms and conditions of the GNU General Public | |
3 | * License. See the file "COPYING" in the main directory of this archive | |
4 | * for more details. | |
5 | * | |
102fa15c | 6 | * Copyright (c) 1994 - 1997, 99, 2000, 06, 07 Ralf Baechle (ralf@linux-mips.org) |
1da177e4 LT |
7 | * Copyright (c) 1999, 2000 Silicon Graphics, Inc. |
8 | */ | |
9 | #ifndef _ASM_BITOPS_H | |
10 | #define _ASM_BITOPS_H | |
11 | ||
0624517d JS |
12 | #ifndef _LINUX_BITOPS_H |
13 | #error only <linux/bitops.h> can be included directly | |
14 | #endif | |
15 | ||
1da177e4 LT |
16 | #include <linux/compiler.h> |
17 | #include <linux/types.h> | |
0004a9df | 18 | #include <asm/barrier.h> |
1da177e4 LT |
19 | #include <asm/byteorder.h> /* sigh ... */ |
20 | #include <asm/cpu-features.h> | |
4ffd8b38 RB |
21 | #include <asm/sgidefs.h> |
22 | #include <asm/war.h> | |
1da177e4 | 23 | |
49a89efb | 24 | #if _MIPS_SZLONG == 32 |
1da177e4 LT |
25 | #define SZLONG_LOG 5 |
26 | #define SZLONG_MASK 31UL | |
aac8aa77 MR |
27 | #define __LL "ll " |
28 | #define __SC "sc " | |
70342287 RB |
29 | #define __INS "ins " |
30 | #define __EXT "ext " | |
49a89efb | 31 | #elif _MIPS_SZLONG == 64 |
1da177e4 LT |
32 | #define SZLONG_LOG 6 |
33 | #define SZLONG_MASK 63UL | |
aac8aa77 MR |
34 | #define __LL "lld " |
35 | #define __SC "scd " | |
70342287 RB |
36 | #define __INS "dins " |
37 | #define __EXT "dext " | |
1da177e4 LT |
38 | #endif |
39 | ||
1da177e4 LT |
40 | /* |
41 | * clear_bit() doesn't provide any barrier for the compiler. | |
42 | */ | |
f252ffd5 | 43 | #define smp_mb__before_clear_bit() smp_mb__before_llsc() |
17099b11 | 44 | #define smp_mb__after_clear_bit() smp_llsc_mb() |
1da177e4 | 45 | |
92d11594 JQ |
46 | |
47 | /* | |
48 | * These are the "slower" versions of the functions and are in bitops.c. | |
49 | * These functions call raw_local_irq_{save,restore}(). | |
50 | */ | |
51 | void __mips_set_bit(unsigned long nr, volatile unsigned long *addr); | |
52 | void __mips_clear_bit(unsigned long nr, volatile unsigned long *addr); | |
53 | void __mips_change_bit(unsigned long nr, volatile unsigned long *addr); | |
54 | int __mips_test_and_set_bit(unsigned long nr, | |
55 | volatile unsigned long *addr); | |
56 | int __mips_test_and_set_bit_lock(unsigned long nr, | |
57 | volatile unsigned long *addr); | |
58 | int __mips_test_and_clear_bit(unsigned long nr, | |
59 | volatile unsigned long *addr); | |
60 | int __mips_test_and_change_bit(unsigned long nr, | |
61 | volatile unsigned long *addr); | |
62 | ||
63 | ||
1da177e4 LT |
64 | /* |
65 | * set_bit - Atomically set a bit in memory | |
66 | * @nr: the bit to set | |
67 | * @addr: the address to start counting from | |
68 | * | |
69 | * This function is atomic and may not be reordered. See __set_bit() | |
70 | * if you do not require the atomic guarantees. | |
71 | * Note that @nr may be almost arbitrarily large; this function is not | |
72 | * restricted to acting on a single-word quantity. | |
73 | */ | |
74 | static inline void set_bit(unsigned long nr, volatile unsigned long *addr) | |
75 | { | |
76 | unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); | |
9de79c50 | 77 | int bit = nr & SZLONG_MASK; |
1da177e4 LT |
78 | unsigned long temp; |
79 | ||
b791d119 | 80 | if (kernel_uses_llsc && R10000_LLSC_WAR) { |
1da177e4 | 81 | __asm__ __volatile__( |
c4559f67 | 82 | " .set mips3 \n" |
1da177e4 LT |
83 | "1: " __LL "%0, %1 # set_bit \n" |
84 | " or %0, %2 \n" | |
aac8aa77 | 85 | " " __SC "%0, %1 \n" |
1da177e4 | 86 | " beqzl %0, 1b \n" |
aac8aa77 | 87 | " .set mips0 \n" |
1da177e4 | 88 | : "=&r" (temp), "=m" (*m) |
b961153b | 89 | : "ir" (1UL << bit), "m" (*m)); |
102fa15c | 90 | #ifdef CONFIG_CPU_MIPSR2 |
b791d119 | 91 | } else if (kernel_uses_llsc && __builtin_constant_p(bit)) { |
7837314d RB |
92 | do { |
93 | __asm__ __volatile__( | |
94 | " " __LL "%0, %1 # set_bit \n" | |
95 | " " __INS "%0, %3, %2, 1 \n" | |
96 | " " __SC "%0, %1 \n" | |
97 | : "=&r" (temp), "+m" (*m) | |
98 | : "ir" (bit), "r" (~0)); | |
99 | } while (unlikely(!temp)); | |
102fa15c | 100 | #endif /* CONFIG_CPU_MIPSR2 */ |
b791d119 | 101 | } else if (kernel_uses_llsc) { |
7837314d RB |
102 | do { |
103 | __asm__ __volatile__( | |
104 | " .set mips3 \n" | |
105 | " " __LL "%0, %1 # set_bit \n" | |
106 | " or %0, %2 \n" | |
107 | " " __SC "%0, %1 \n" | |
108 | " .set mips0 \n" | |
109 | : "=&r" (temp), "+m" (*m) | |
110 | : "ir" (1UL << bit)); | |
111 | } while (unlikely(!temp)); | |
92d11594 JQ |
112 | } else |
113 | __mips_set_bit(nr, addr); | |
1da177e4 LT |
114 | } |
115 | ||
1da177e4 LT |
116 | /* |
117 | * clear_bit - Clears a bit in memory | |
118 | * @nr: Bit to clear | |
119 | * @addr: Address to start counting from | |
120 | * | |
121 | * clear_bit() is atomic and may not be reordered. However, it does | |
122 | * not contain a memory barrier, so if it is used for locking purposes, | |
123 | * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit() | |
124 | * in order to ensure changes are visible on other processors. | |
125 | */ | |
126 | static inline void clear_bit(unsigned long nr, volatile unsigned long *addr) | |
127 | { | |
128 | unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); | |
9de79c50 | 129 | int bit = nr & SZLONG_MASK; |
1da177e4 LT |
130 | unsigned long temp; |
131 | ||
b791d119 | 132 | if (kernel_uses_llsc && R10000_LLSC_WAR) { |
1da177e4 | 133 | __asm__ __volatile__( |
c4559f67 | 134 | " .set mips3 \n" |
1da177e4 LT |
135 | "1: " __LL "%0, %1 # clear_bit \n" |
136 | " and %0, %2 \n" | |
137 | " " __SC "%0, %1 \n" | |
138 | " beqzl %0, 1b \n" | |
aac8aa77 | 139 | " .set mips0 \n" |
7837314d RB |
140 | : "=&r" (temp), "+m" (*m) |
141 | : "ir" (~(1UL << bit))); | |
102fa15c | 142 | #ifdef CONFIG_CPU_MIPSR2 |
b791d119 | 143 | } else if (kernel_uses_llsc && __builtin_constant_p(bit)) { |
7837314d RB |
144 | do { |
145 | __asm__ __volatile__( | |
146 | " " __LL "%0, %1 # clear_bit \n" | |
147 | " " __INS "%0, $0, %2, 1 \n" | |
148 | " " __SC "%0, %1 \n" | |
149 | : "=&r" (temp), "+m" (*m) | |
150 | : "ir" (bit)); | |
151 | } while (unlikely(!temp)); | |
102fa15c | 152 | #endif /* CONFIG_CPU_MIPSR2 */ |
b791d119 | 153 | } else if (kernel_uses_llsc) { |
7837314d RB |
154 | do { |
155 | __asm__ __volatile__( | |
156 | " .set mips3 \n" | |
157 | " " __LL "%0, %1 # clear_bit \n" | |
158 | " and %0, %2 \n" | |
159 | " " __SC "%0, %1 \n" | |
160 | " .set mips0 \n" | |
161 | : "=&r" (temp), "+m" (*m) | |
162 | : "ir" (~(1UL << bit))); | |
163 | } while (unlikely(!temp)); | |
92d11594 JQ |
164 | } else |
165 | __mips_clear_bit(nr, addr); | |
1da177e4 LT |
166 | } |
167 | ||
728697cd NP |
168 | /* |
169 | * clear_bit_unlock - Clears a bit in memory | |
170 | * @nr: Bit to clear | |
171 | * @addr: Address to start counting from | |
172 | * | |
173 | * clear_bit() is atomic and implies release semantics before the memory | |
174 | * operation. It can be used for an unlock. | |
175 | */ | |
176 | static inline void clear_bit_unlock(unsigned long nr, volatile unsigned long *addr) | |
177 | { | |
178 | smp_mb__before_clear_bit(); | |
179 | clear_bit(nr, addr); | |
180 | } | |
181 | ||
1da177e4 LT |
182 | /* |
183 | * change_bit - Toggle a bit in memory | |
184 | * @nr: Bit to change | |
185 | * @addr: Address to start counting from | |
186 | * | |
187 | * change_bit() is atomic and may not be reordered. | |
188 | * Note that @nr may be almost arbitrarily large; this function is not | |
189 | * restricted to acting on a single-word quantity. | |
190 | */ | |
191 | static inline void change_bit(unsigned long nr, volatile unsigned long *addr) | |
192 | { | |
9de79c50 | 193 | int bit = nr & SZLONG_MASK; |
b961153b | 194 | |
b791d119 | 195 | if (kernel_uses_llsc && R10000_LLSC_WAR) { |
1da177e4 LT |
196 | unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); |
197 | unsigned long temp; | |
198 | ||
199 | __asm__ __volatile__( | |
c4559f67 | 200 | " .set mips3 \n" |
1da177e4 LT |
201 | "1: " __LL "%0, %1 # change_bit \n" |
202 | " xor %0, %2 \n" | |
aac8aa77 | 203 | " " __SC "%0, %1 \n" |
1da177e4 | 204 | " beqzl %0, 1b \n" |
aac8aa77 | 205 | " .set mips0 \n" |
7837314d RB |
206 | : "=&r" (temp), "+m" (*m) |
207 | : "ir" (1UL << bit)); | |
b791d119 | 208 | } else if (kernel_uses_llsc) { |
1da177e4 LT |
209 | unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); |
210 | unsigned long temp; | |
211 | ||
7837314d RB |
212 | do { |
213 | __asm__ __volatile__( | |
214 | " .set mips3 \n" | |
215 | " " __LL "%0, %1 # change_bit \n" | |
216 | " xor %0, %2 \n" | |
217 | " " __SC "%0, %1 \n" | |
218 | " .set mips0 \n" | |
219 | : "=&r" (temp), "+m" (*m) | |
220 | : "ir" (1UL << bit)); | |
221 | } while (unlikely(!temp)); | |
92d11594 JQ |
222 | } else |
223 | __mips_change_bit(nr, addr); | |
1da177e4 LT |
224 | } |
225 | ||
1da177e4 LT |
226 | /* |
227 | * test_and_set_bit - Set a bit and return its old value | |
228 | * @nr: Bit to set | |
229 | * @addr: Address to count from | |
230 | * | |
231 | * This operation is atomic and cannot be reordered. | |
232 | * It also implies a memory barrier. | |
233 | */ | |
234 | static inline int test_and_set_bit(unsigned long nr, | |
235 | volatile unsigned long *addr) | |
236 | { | |
9de79c50 | 237 | int bit = nr & SZLONG_MASK; |
ff72b7a6 | 238 | unsigned long res; |
b961153b | 239 | |
f252ffd5 | 240 | smp_mb__before_llsc(); |
c8f30ae5 | 241 | |
b791d119 | 242 | if (kernel_uses_llsc && R10000_LLSC_WAR) { |
1da177e4 | 243 | unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); |
ff72b7a6 | 244 | unsigned long temp; |
1da177e4 LT |
245 | |
246 | __asm__ __volatile__( | |
c4559f67 | 247 | " .set mips3 \n" |
1da177e4 LT |
248 | "1: " __LL "%0, %1 # test_and_set_bit \n" |
249 | " or %2, %0, %3 \n" | |
250 | " " __SC "%2, %1 \n" | |
251 | " beqzl %2, 1b \n" | |
252 | " and %2, %0, %3 \n" | |
aac8aa77 | 253 | " .set mips0 \n" |
7837314d RB |
254 | : "=&r" (temp), "+m" (*m), "=&r" (res) |
255 | : "r" (1UL << bit) | |
1da177e4 | 256 | : "memory"); |
b791d119 | 257 | } else if (kernel_uses_llsc) { |
1da177e4 | 258 | unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); |
ff72b7a6 | 259 | unsigned long temp; |
1da177e4 | 260 | |
7837314d RB |
261 | do { |
262 | __asm__ __volatile__( | |
263 | " .set mips3 \n" | |
264 | " " __LL "%0, %1 # test_and_set_bit \n" | |
265 | " or %2, %0, %3 \n" | |
266 | " " __SC "%2, %1 \n" | |
267 | " .set mips0 \n" | |
268 | : "=&r" (temp), "+m" (*m), "=&r" (res) | |
269 | : "r" (1UL << bit) | |
270 | : "memory"); | |
271 | } while (unlikely(!res)); | |
272 | ||
273 | res = temp & (1UL << bit); | |
92d11594 JQ |
274 | } else |
275 | res = __mips_test_and_set_bit(nr, addr); | |
0004a9df | 276 | |
17099b11 | 277 | smp_llsc_mb(); |
ff72b7a6 RB |
278 | |
279 | return res != 0; | |
1da177e4 LT |
280 | } |
281 | ||
728697cd NP |
282 | /* |
283 | * test_and_set_bit_lock - Set a bit and return its old value | |
284 | * @nr: Bit to set | |
285 | * @addr: Address to count from | |
286 | * | |
287 | * This operation is atomic and implies acquire ordering semantics | |
288 | * after the memory operation. | |
289 | */ | |
290 | static inline int test_and_set_bit_lock(unsigned long nr, | |
291 | volatile unsigned long *addr) | |
292 | { | |
9de79c50 | 293 | int bit = nr & SZLONG_MASK; |
728697cd NP |
294 | unsigned long res; |
295 | ||
b791d119 | 296 | if (kernel_uses_llsc && R10000_LLSC_WAR) { |
728697cd NP |
297 | unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); |
298 | unsigned long temp; | |
299 | ||
300 | __asm__ __volatile__( | |
301 | " .set mips3 \n" | |
302 | "1: " __LL "%0, %1 # test_and_set_bit \n" | |
303 | " or %2, %0, %3 \n" | |
304 | " " __SC "%2, %1 \n" | |
305 | " beqzl %2, 1b \n" | |
306 | " and %2, %0, %3 \n" | |
307 | " .set mips0 \n" | |
7837314d RB |
308 | : "=&r" (temp), "+m" (*m), "=&r" (res) |
309 | : "r" (1UL << bit) | |
728697cd | 310 | : "memory"); |
b791d119 | 311 | } else if (kernel_uses_llsc) { |
728697cd NP |
312 | unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); |
313 | unsigned long temp; | |
314 | ||
7837314d RB |
315 | do { |
316 | __asm__ __volatile__( | |
317 | " .set mips3 \n" | |
318 | " " __LL "%0, %1 # test_and_set_bit \n" | |
319 | " or %2, %0, %3 \n" | |
320 | " " __SC "%2, %1 \n" | |
321 | " .set mips0 \n" | |
322 | : "=&r" (temp), "+m" (*m), "=&r" (res) | |
323 | : "r" (1UL << bit) | |
324 | : "memory"); | |
325 | } while (unlikely(!res)); | |
326 | ||
327 | res = temp & (1UL << bit); | |
92d11594 JQ |
328 | } else |
329 | res = __mips_test_and_set_bit_lock(nr, addr); | |
728697cd NP |
330 | |
331 | smp_llsc_mb(); | |
332 | ||
333 | return res != 0; | |
334 | } | |
1da177e4 LT |
335 | /* |
336 | * test_and_clear_bit - Clear a bit and return its old value | |
337 | * @nr: Bit to clear | |
338 | * @addr: Address to count from | |
339 | * | |
340 | * This operation is atomic and cannot be reordered. | |
341 | * It also implies a memory barrier. | |
342 | */ | |
343 | static inline int test_and_clear_bit(unsigned long nr, | |
344 | volatile unsigned long *addr) | |
345 | { | |
9de79c50 | 346 | int bit = nr & SZLONG_MASK; |
ff72b7a6 | 347 | unsigned long res; |
b961153b | 348 | |
f252ffd5 | 349 | smp_mb__before_llsc(); |
c8f30ae5 | 350 | |
b791d119 | 351 | if (kernel_uses_llsc && R10000_LLSC_WAR) { |
1da177e4 | 352 | unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); |
8e09ffb6 | 353 | unsigned long temp; |
1da177e4 LT |
354 | |
355 | __asm__ __volatile__( | |
c4559f67 | 356 | " .set mips3 \n" |
1da177e4 LT |
357 | "1: " __LL "%0, %1 # test_and_clear_bit \n" |
358 | " or %2, %0, %3 \n" | |
359 | " xor %2, %3 \n" | |
70342287 | 360 | " " __SC "%2, %1 \n" |
1da177e4 LT |
361 | " beqzl %2, 1b \n" |
362 | " and %2, %0, %3 \n" | |
aac8aa77 | 363 | " .set mips0 \n" |
7837314d RB |
364 | : "=&r" (temp), "+m" (*m), "=&r" (res) |
365 | : "r" (1UL << bit) | |
1da177e4 | 366 | : "memory"); |
102fa15c | 367 | #ifdef CONFIG_CPU_MIPSR2 |
b791d119 | 368 | } else if (kernel_uses_llsc && __builtin_constant_p(nr)) { |
102fa15c | 369 | unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); |
ff72b7a6 | 370 | unsigned long temp; |
102fa15c | 371 | |
7837314d RB |
372 | do { |
373 | __asm__ __volatile__( | |
70342287 | 374 | " " __LL "%0, %1 # test_and_clear_bit \n" |
7837314d | 375 | " " __EXT "%2, %0, %3, 1 \n" |
70342287 RB |
376 | " " __INS "%0, $0, %3, 1 \n" |
377 | " " __SC "%0, %1 \n" | |
7837314d RB |
378 | : "=&r" (temp), "+m" (*m), "=&r" (res) |
379 | : "ir" (bit) | |
380 | : "memory"); | |
381 | } while (unlikely(!temp)); | |
102fa15c | 382 | #endif |
b791d119 | 383 | } else if (kernel_uses_llsc) { |
1da177e4 | 384 | unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); |
ff72b7a6 | 385 | unsigned long temp; |
1da177e4 | 386 | |
7837314d RB |
387 | do { |
388 | __asm__ __volatile__( | |
389 | " .set mips3 \n" | |
70342287 | 390 | " " __LL "%0, %1 # test_and_clear_bit \n" |
7837314d RB |
391 | " or %2, %0, %3 \n" |
392 | " xor %2, %3 \n" | |
70342287 | 393 | " " __SC "%2, %1 \n" |
7837314d RB |
394 | " .set mips0 \n" |
395 | : "=&r" (temp), "+m" (*m), "=&r" (res) | |
396 | : "r" (1UL << bit) | |
397 | : "memory"); | |
398 | } while (unlikely(!res)); | |
399 | ||
400 | res = temp & (1UL << bit); | |
92d11594 JQ |
401 | } else |
402 | res = __mips_test_and_clear_bit(nr, addr); | |
0004a9df | 403 | |
17099b11 | 404 | smp_llsc_mb(); |
ff72b7a6 RB |
405 | |
406 | return res != 0; | |
1da177e4 LT |
407 | } |
408 | ||
1da177e4 LT |
409 | /* |
410 | * test_and_change_bit - Change a bit and return its old value | |
411 | * @nr: Bit to change | |
412 | * @addr: Address to count from | |
413 | * | |
414 | * This operation is atomic and cannot be reordered. | |
415 | * It also implies a memory barrier. | |
416 | */ | |
417 | static inline int test_and_change_bit(unsigned long nr, | |
418 | volatile unsigned long *addr) | |
419 | { | |
9de79c50 | 420 | int bit = nr & SZLONG_MASK; |
ff72b7a6 | 421 | unsigned long res; |
b961153b | 422 | |
f252ffd5 | 423 | smp_mb__before_llsc(); |
c8f30ae5 | 424 | |
b791d119 | 425 | if (kernel_uses_llsc && R10000_LLSC_WAR) { |
1da177e4 | 426 | unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); |
ff72b7a6 | 427 | unsigned long temp; |
1da177e4 LT |
428 | |
429 | __asm__ __volatile__( | |
c4559f67 | 430 | " .set mips3 \n" |
aac8aa77 | 431 | "1: " __LL "%0, %1 # test_and_change_bit \n" |
1da177e4 | 432 | " xor %2, %0, %3 \n" |
aac8aa77 | 433 | " " __SC "%2, %1 \n" |
1da177e4 LT |
434 | " beqzl %2, 1b \n" |
435 | " and %2, %0, %3 \n" | |
aac8aa77 | 436 | " .set mips0 \n" |
7837314d RB |
437 | : "=&r" (temp), "+m" (*m), "=&r" (res) |
438 | : "r" (1UL << bit) | |
1da177e4 | 439 | : "memory"); |
b791d119 | 440 | } else if (kernel_uses_llsc) { |
1da177e4 | 441 | unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); |
ff72b7a6 | 442 | unsigned long temp; |
1da177e4 | 443 | |
7837314d RB |
444 | do { |
445 | __asm__ __volatile__( | |
446 | " .set mips3 \n" | |
70342287 | 447 | " " __LL "%0, %1 # test_and_change_bit \n" |
7837314d RB |
448 | " xor %2, %0, %3 \n" |
449 | " " __SC "\t%2, %1 \n" | |
450 | " .set mips0 \n" | |
451 | : "=&r" (temp), "+m" (*m), "=&r" (res) | |
452 | : "r" (1UL << bit) | |
453 | : "memory"); | |
454 | } while (unlikely(!res)); | |
455 | ||
456 | res = temp & (1UL << bit); | |
92d11594 JQ |
457 | } else |
458 | res = __mips_test_and_change_bit(nr, addr); | |
0004a9df | 459 | |
17099b11 | 460 | smp_llsc_mb(); |
ff72b7a6 RB |
461 | |
462 | return res != 0; | |
1da177e4 LT |
463 | } |
464 | ||
3c9ee7ef | 465 | #include <asm-generic/bitops/non-atomic.h> |
1da177e4 | 466 | |
728697cd NP |
467 | /* |
468 | * __clear_bit_unlock - Clears a bit in memory | |
469 | * @nr: Bit to clear | |
470 | * @addr: Address to start counting from | |
471 | * | |
472 | * __clear_bit() is non-atomic and implies release semantics before the memory | |
473 | * operation. It can be used for an unlock if no other CPUs can concurrently | |
474 | * modify other bits in the word. | |
475 | */ | |
476 | static inline void __clear_bit_unlock(unsigned long nr, volatile unsigned long *addr) | |
477 | { | |
478 | smp_mb(); | |
479 | __clear_bit(nr, addr); | |
480 | } | |
481 | ||
1da177e4 | 482 | /* |
ec917c2c | 483 | * Return the bit position (0..63) of the most significant 1 bit in a word |
65903265 RB |
484 | * Returns -1 if no 1 bit exists |
485 | */ | |
4816227b | 486 | static inline unsigned long __fls(unsigned long word) |
65903265 | 487 | { |
4816227b | 488 | int num; |
65903265 | 489 | |
4816227b | 490 | if (BITS_PER_LONG == 32 && |
47740eb8 | 491 | __builtin_constant_p(cpu_has_clo_clz) && cpu_has_clo_clz) { |
49a89efb | 492 | __asm__( |
ec917c2c RB |
493 | " .set push \n" |
494 | " .set mips32 \n" | |
495 | " clz %0, %1 \n" | |
496 | " .set pop \n" | |
4816227b RB |
497 | : "=r" (num) |
498 | : "r" (word)); | |
65903265 | 499 | |
4816227b | 500 | return 31 - num; |
ec917c2c RB |
501 | } |
502 | ||
4816227b RB |
503 | if (BITS_PER_LONG == 64 && |
504 | __builtin_constant_p(cpu_has_mips64) && cpu_has_mips64) { | |
505 | __asm__( | |
506 | " .set push \n" | |
507 | " .set mips64 \n" | |
508 | " dclz %0, %1 \n" | |
509 | " .set pop \n" | |
510 | : "=r" (num) | |
511 | : "r" (word)); | |
65903265 | 512 | |
4816227b RB |
513 | return 63 - num; |
514 | } | |
515 | ||
516 | num = BITS_PER_LONG - 1; | |
65903265 | 517 | |
4816227b RB |
518 | #if BITS_PER_LONG == 64 |
519 | if (!(word & (~0ul << 32))) { | |
520 | num -= 32; | |
521 | word <<= 32; | |
522 | } | |
523 | #endif | |
524 | if (!(word & (~0ul << (BITS_PER_LONG-16)))) { | |
525 | num -= 16; | |
526 | word <<= 16; | |
527 | } | |
528 | if (!(word & (~0ul << (BITS_PER_LONG-8)))) { | |
529 | num -= 8; | |
530 | word <<= 8; | |
531 | } | |
532 | if (!(word & (~0ul << (BITS_PER_LONG-4)))) { | |
533 | num -= 4; | |
534 | word <<= 4; | |
535 | } | |
536 | if (!(word & (~0ul << (BITS_PER_LONG-2)))) { | |
537 | num -= 2; | |
538 | word <<= 2; | |
539 | } | |
540 | if (!(word & (~0ul << (BITS_PER_LONG-1)))) | |
541 | num -= 1; | |
542 | return num; | |
65903265 | 543 | } |
65903265 RB |
544 | |
545 | /* | |
546 | * __ffs - find first bit in word. | |
1da177e4 LT |
547 | * @word: The word to search |
548 | * | |
65903265 RB |
549 | * Returns 0..SZLONG-1 |
550 | * Undefined if no bit exists, so code should check against 0 first. | |
1da177e4 | 551 | */ |
65903265 | 552 | static inline unsigned long __ffs(unsigned long word) |
1da177e4 | 553 | { |
ddc0d009 | 554 | return __fls(word & -word); |
1da177e4 LT |
555 | } |
556 | ||
557 | /* | |
bc818247 | 558 | * fls - find last bit set. |
1da177e4 LT |
559 | * @word: The word to search |
560 | * | |
bc818247 AN |
561 | * This is defined the same way as ffs. |
562 | * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32. | |
1da177e4 | 563 | */ |
4816227b | 564 | static inline int fls(int x) |
1da177e4 | 565 | { |
4816227b | 566 | int r; |
65903265 | 567 | |
47740eb8 | 568 | if (__builtin_constant_p(cpu_has_clo_clz) && cpu_has_clo_clz) { |
4816227b | 569 | __asm__("clz %0, %1" : "=r" (x) : "r" (x)); |
1da177e4 | 570 | |
4816227b RB |
571 | return 32 - x; |
572 | } | |
bc818247 | 573 | |
4816227b RB |
574 | r = 32; |
575 | if (!x) | |
576 | return 0; | |
577 | if (!(x & 0xffff0000u)) { | |
578 | x <<= 16; | |
579 | r -= 16; | |
580 | } | |
581 | if (!(x & 0xff000000u)) { | |
582 | x <<= 8; | |
583 | r -= 8; | |
584 | } | |
585 | if (!(x & 0xf0000000u)) { | |
586 | x <<= 4; | |
587 | r -= 4; | |
588 | } | |
589 | if (!(x & 0xc0000000u)) { | |
590 | x <<= 2; | |
591 | r -= 2; | |
592 | } | |
593 | if (!(x & 0x80000000u)) { | |
594 | x <<= 1; | |
595 | r -= 1; | |
596 | } | |
597 | return r; | |
65903265 | 598 | } |
4816227b | 599 | |
bc818247 | 600 | #include <asm-generic/bitops/fls64.h> |
65903265 RB |
601 | |
602 | /* | |
bc818247 | 603 | * ffs - find first bit set. |
65903265 RB |
604 | * @word: The word to search |
605 | * | |
bc818247 AN |
606 | * This is defined the same way as |
607 | * the libc and compiler builtin ffs routines, therefore | |
608 | * differs in spirit from the above ffz (man ffs). | |
65903265 | 609 | */ |
bc818247 | 610 | static inline int ffs(int word) |
65903265 | 611 | { |
bc818247 AN |
612 | if (!word) |
613 | return 0; | |
2caf1900 | 614 | |
bc818247 | 615 | return fls(word & -word); |
65903265 RB |
616 | } |
617 | ||
bc818247 | 618 | #include <asm-generic/bitops/ffz.h> |
3c9ee7ef | 619 | #include <asm-generic/bitops/find.h> |
1da177e4 LT |
620 | |
621 | #ifdef __KERNEL__ | |
622 | ||
3c9ee7ef | 623 | #include <asm-generic/bitops/sched.h> |
1a403d1d DD |
624 | |
625 | #include <asm/arch_hweight.h> | |
626 | #include <asm-generic/bitops/const_hweight.h> | |
627 | ||
861b5ae7 | 628 | #include <asm-generic/bitops/le.h> |
3c9ee7ef | 629 | #include <asm-generic/bitops/ext2-atomic.h> |
1da177e4 LT |
630 | |
631 | #endif /* __KERNEL__ */ | |
632 | ||
633 | #endif /* _ASM_BITOPS_H */ |