]>
Commit | Line | Data |
---|---|---|
1965aae3 PA |
1 | #ifndef _ASM_X86_BITOPS_H |
2 | #define _ASM_X86_BITOPS_H | |
1c54d770 JF |
3 | |
4 | /* | |
5 | * Copyright 1992, Linus Torvalds. | |
c8399943 AK |
6 | * |
7 | * Note: inlines with more than a single statement should be marked | |
8 | * __always_inline to avoid problems with older gcc's inlining heuristics. | |
1c54d770 JF |
9 | */ |
10 | ||
11 | #ifndef _LINUX_BITOPS_H | |
12 | #error only <linux/bitops.h> can be included directly | |
13 | #endif | |
14 | ||
15 | #include <linux/compiler.h> | |
16 | #include <asm/alternative.h> | |
0c44c2d0 | 17 | #include <asm/rmwcc.h> |
d00a5692 | 18 | #include <asm/barrier.h> |
1c54d770 | 19 | |
9b710506 PA |
20 | #if BITS_PER_LONG == 32 |
21 | # define _BITOPS_LONG_SHIFT 5 | |
22 | #elif BITS_PER_LONG == 64 | |
23 | # define _BITOPS_LONG_SHIFT 6 | |
24 | #else | |
25 | # error "Unexpected BITS_PER_LONG" | |
26 | #endif | |
27 | ||
e8f380e0 BP |
28 | #define BIT_64(n) (U64_C(1) << (n)) |
29 | ||
1c54d770 JF |
30 | /* |
31 | * These have to be done with inline assembly: that way the bit-setting | |
32 | * is guaranteed to be atomic. All bit operations return 0 if the bit | |
33 | * was cleared before the operation and != 0 if it was not. | |
34 | * | |
35 | * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1). | |
36 | */ | |
37 | ||
38 | #if __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 1) | |
39 | /* Technically wrong, but this avoids compilation errors on some gcc | |
40 | versions. */ | |
1a750e0c | 41 | #define BITOP_ADDR(x) "=m" (*(volatile long *) (x)) |
1c54d770 | 42 | #else |
1a750e0c | 43 | #define BITOP_ADDR(x) "+m" (*(volatile long *) (x)) |
1c54d770 JF |
44 | #endif |
45 | ||
7dbceaf9 | 46 | #define ADDR BITOP_ADDR(addr) |
1a750e0c LT |
47 | |
48 | /* | |
49 | * We do the locked ops that don't return the old value as | |
50 | * a mask operation on a byte. | |
51 | */ | |
7dbceaf9 IM |
52 | #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr)) |
53 | #define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3)) | |
54 | #define CONST_MASK(nr) (1 << ((nr) & 7)) | |
1a750e0c | 55 | |
1c54d770 JF |
56 | /** |
57 | * set_bit - Atomically set a bit in memory | |
58 | * @nr: the bit to set | |
59 | * @addr: the address to start counting from | |
60 | * | |
61 | * This function is atomic and may not be reordered. See __set_bit() | |
62 | * if you do not require the atomic guarantees. | |
63 | * | |
64 | * Note: there are no guarantees that this function will not be reordered | |
65 | * on non x86 architectures, so if you are writing portable code, | |
66 | * make sure not to rely on its reordering guarantees. | |
67 | * | |
68 | * Note that @nr may be almost arbitrarily large; this function is not | |
69 | * restricted to acting on a single-word quantity. | |
70 | */ | |
c8399943 | 71 | static __always_inline void |
9b710506 | 72 | set_bit(long nr, volatile unsigned long *addr) |
1c54d770 | 73 | { |
7dbceaf9 IM |
74 | if (IS_IMMEDIATE(nr)) { |
75 | asm volatile(LOCK_PREFIX "orb %1,%0" | |
76 | : CONST_MASK_ADDR(nr, addr) | |
437a0a54 | 77 | : "iq" ((u8)CONST_MASK(nr)) |
7dbceaf9 IM |
78 | : "memory"); |
79 | } else { | |
80 | asm volatile(LOCK_PREFIX "bts %1,%0" | |
81 | : BITOP_ADDR(addr) : "Ir" (nr) : "memory"); | |
82 | } | |
1c54d770 JF |
83 | } |
84 | ||
85 | /** | |
86 | * __set_bit - Set a bit in memory | |
87 | * @nr: the bit to set | |
88 | * @addr: the address to start counting from | |
89 | * | |
90 | * Unlike set_bit(), this function is non-atomic and may be reordered. | |
91 | * If it's called on the same region of memory simultaneously, the effect | |
92 | * may be that only one operation succeeds. | |
93 | */ | |
8dd5032d | 94 | static __always_inline void __set_bit(long nr, volatile unsigned long *addr) |
1c54d770 | 95 | { |
f19dcf4a | 96 | asm volatile("bts %1,%0" : ADDR : "Ir" (nr) : "memory"); |
1c54d770 JF |
97 | } |
98 | ||
1c54d770 JF |
99 | /** |
100 | * clear_bit - Clears a bit in memory | |
101 | * @nr: Bit to clear | |
102 | * @addr: Address to start counting from | |
103 | * | |
104 | * clear_bit() is atomic and may not be reordered. However, it does | |
105 | * not contain a memory barrier, so if it is used for locking purposes, | |
d00a5692 | 106 | * you should call smp_mb__before_atomic() and/or smp_mb__after_atomic() |
1c54d770 JF |
107 | * in order to ensure changes are visible on other processors. |
108 | */ | |
c8399943 | 109 | static __always_inline void |
9b710506 | 110 | clear_bit(long nr, volatile unsigned long *addr) |
1c54d770 | 111 | { |
7dbceaf9 IM |
112 | if (IS_IMMEDIATE(nr)) { |
113 | asm volatile(LOCK_PREFIX "andb %1,%0" | |
114 | : CONST_MASK_ADDR(nr, addr) | |
437a0a54 | 115 | : "iq" ((u8)~CONST_MASK(nr))); |
7dbceaf9 IM |
116 | } else { |
117 | asm volatile(LOCK_PREFIX "btr %1,%0" | |
118 | : BITOP_ADDR(addr) | |
119 | : "Ir" (nr)); | |
120 | } | |
1c54d770 JF |
121 | } |
122 | ||
123 | /* | |
124 | * clear_bit_unlock - Clears a bit in memory | |
125 | * @nr: Bit to clear | |
126 | * @addr: Address to start counting from | |
127 | * | |
128 | * clear_bit() is atomic and implies release semantics before the memory | |
129 | * operation. It can be used for an unlock. | |
130 | */ | |
8dd5032d | 131 | static __always_inline void clear_bit_unlock(long nr, volatile unsigned long *addr) |
1c54d770 JF |
132 | { |
133 | barrier(); | |
134 | clear_bit(nr, addr); | |
135 | } | |
136 | ||
8dd5032d | 137 | static __always_inline void __clear_bit(long nr, volatile unsigned long *addr) |
1c54d770 | 138 | { |
eb2b4e68 | 139 | asm volatile("btr %1,%0" : ADDR : "Ir" (nr)); |
1c54d770 JF |
140 | } |
141 | ||
b91e1302 LT |
142 | static __always_inline bool clear_bit_unlock_is_negative_byte(long nr, volatile unsigned long *addr) |
143 | { | |
144 | bool negative; | |
1c3f29ec | 145 | asm volatile(LOCK_PREFIX "andb %2,%1" |
b91e1302 LT |
146 | CC_SET(s) |
147 | : CC_OUT(s) (negative), ADDR | |
148 | : "ir" ((char) ~(1 << nr)) : "memory"); | |
149 | return negative; | |
150 | } | |
151 | ||
152 | // Let everybody know we have it | |
153 | #define clear_bit_unlock_is_negative_byte clear_bit_unlock_is_negative_byte | |
154 | ||
1c54d770 JF |
155 | /* |
156 | * __clear_bit_unlock - Clears a bit in memory | |
157 | * @nr: Bit to clear | |
158 | * @addr: Address to start counting from | |
159 | * | |
160 | * __clear_bit() is non-atomic and implies release semantics before the memory | |
161 | * operation. It can be used for an unlock if no other CPUs can concurrently | |
162 | * modify other bits in the word. | |
163 | * | |
164 | * No memory barrier is required here, because x86 cannot reorder stores past | |
165 | * older loads. Same principle as spin_unlock. | |
166 | */ | |
8dd5032d | 167 | static __always_inline void __clear_bit_unlock(long nr, volatile unsigned long *addr) |
1c54d770 JF |
168 | { |
169 | barrier(); | |
170 | __clear_bit(nr, addr); | |
171 | } | |
172 | ||
1c54d770 JF |
173 | /** |
174 | * __change_bit - Toggle a bit in memory | |
175 | * @nr: the bit to change | |
176 | * @addr: the address to start counting from | |
177 | * | |
178 | * Unlike change_bit(), this function is non-atomic and may be reordered. | |
179 | * If it's called on the same region of memory simultaneously, the effect | |
180 | * may be that only one operation succeeds. | |
181 | */ | |
8dd5032d | 182 | static __always_inline void __change_bit(long nr, volatile unsigned long *addr) |
1c54d770 | 183 | { |
eb2b4e68 | 184 | asm volatile("btc %1,%0" : ADDR : "Ir" (nr)); |
1c54d770 JF |
185 | } |
186 | ||
187 | /** | |
188 | * change_bit - Toggle a bit in memory | |
189 | * @nr: Bit to change | |
190 | * @addr: Address to start counting from | |
191 | * | |
192 | * change_bit() is atomic and may not be reordered. | |
193 | * Note that @nr may be almost arbitrarily large; this function is not | |
194 | * restricted to acting on a single-word quantity. | |
195 | */ | |
8dd5032d | 196 | static __always_inline void change_bit(long nr, volatile unsigned long *addr) |
1c54d770 | 197 | { |
838e8bb7 UB |
198 | if (IS_IMMEDIATE(nr)) { |
199 | asm volatile(LOCK_PREFIX "xorb %1,%0" | |
200 | : CONST_MASK_ADDR(nr, addr) | |
201 | : "iq" ((u8)CONST_MASK(nr))); | |
202 | } else { | |
203 | asm volatile(LOCK_PREFIX "btc %1,%0" | |
204 | : BITOP_ADDR(addr) | |
205 | : "Ir" (nr)); | |
206 | } | |
1c54d770 JF |
207 | } |
208 | ||
209 | /** | |
210 | * test_and_set_bit - Set a bit and return its old value | |
211 | * @nr: Bit to set | |
212 | * @addr: Address to count from | |
213 | * | |
214 | * This operation is atomic and cannot be reordered. | |
215 | * It also implies a memory barrier. | |
216 | */ | |
117780ee | 217 | static __always_inline bool test_and_set_bit(long nr, volatile unsigned long *addr) |
1c54d770 | 218 | { |
18fe5822 | 219 | GEN_BINARY_RMWcc(LOCK_PREFIX "bts", *addr, "Ir", nr, "%0", c); |
1c54d770 JF |
220 | } |
221 | ||
222 | /** | |
223 | * test_and_set_bit_lock - Set a bit and return its old value for lock | |
224 | * @nr: Bit to set | |
225 | * @addr: Address to count from | |
226 | * | |
227 | * This is the same as test_and_set_bit on x86. | |
228 | */ | |
117780ee | 229 | static __always_inline bool |
9b710506 | 230 | test_and_set_bit_lock(long nr, volatile unsigned long *addr) |
1c54d770 JF |
231 | { |
232 | return test_and_set_bit(nr, addr); | |
233 | } | |
234 | ||
235 | /** | |
236 | * __test_and_set_bit - Set a bit and return its old value | |
237 | * @nr: Bit to set | |
238 | * @addr: Address to count from | |
239 | * | |
240 | * This operation is non-atomic and can be reordered. | |
241 | * If two examples of this operation race, one can appear to succeed | |
242 | * but actually fail. You must protect multiple accesses with a lock. | |
243 | */ | |
117780ee | 244 | static __always_inline bool __test_and_set_bit(long nr, volatile unsigned long *addr) |
1c54d770 | 245 | { |
117780ee | 246 | bool oldbit; |
1c54d770 | 247 | |
1c3f29ec | 248 | asm("bts %2,%1" |
86b61240 PA |
249 | CC_SET(c) |
250 | : CC_OUT(c) (oldbit), ADDR | |
eb2b4e68 | 251 | : "Ir" (nr)); |
1c54d770 JF |
252 | return oldbit; |
253 | } | |
254 | ||
255 | /** | |
256 | * test_and_clear_bit - Clear a bit and return its old value | |
257 | * @nr: Bit to clear | |
258 | * @addr: Address to count from | |
259 | * | |
260 | * This operation is atomic and cannot be reordered. | |
261 | * It also implies a memory barrier. | |
262 | */ | |
117780ee | 263 | static __always_inline bool test_and_clear_bit(long nr, volatile unsigned long *addr) |
1c54d770 | 264 | { |
18fe5822 | 265 | GEN_BINARY_RMWcc(LOCK_PREFIX "btr", *addr, "Ir", nr, "%0", c); |
1c54d770 JF |
266 | } |
267 | ||
268 | /** | |
269 | * __test_and_clear_bit - Clear a bit and return its old value | |
270 | * @nr: Bit to clear | |
271 | * @addr: Address to count from | |
272 | * | |
273 | * This operation is non-atomic and can be reordered. | |
274 | * If two examples of this operation race, one can appear to succeed | |
275 | * but actually fail. You must protect multiple accesses with a lock. | |
d0a69d63 MT |
276 | * |
277 | * Note: the operation is performed atomically with respect to | |
278 | * the local CPU, but not other CPUs. Portable code should not | |
279 | * rely on this behaviour. | |
280 | * KVM relies on this behaviour on x86 for modifying memory that is also | |
281 | * accessed from a hypervisor on the same CPU if running in a VM: don't change | |
282 | * this without also updating arch/x86/kernel/kvm.c | |
1c54d770 | 283 | */ |
117780ee | 284 | static __always_inline bool __test_and_clear_bit(long nr, volatile unsigned long *addr) |
1c54d770 | 285 | { |
117780ee | 286 | bool oldbit; |
1c54d770 | 287 | |
1c3f29ec | 288 | asm volatile("btr %2,%1" |
86b61240 PA |
289 | CC_SET(c) |
290 | : CC_OUT(c) (oldbit), ADDR | |
eb2b4e68 | 291 | : "Ir" (nr)); |
1c54d770 JF |
292 | return oldbit; |
293 | } | |
294 | ||
295 | /* WARNING: non atomic and it can be reordered! */ | |
117780ee | 296 | static __always_inline bool __test_and_change_bit(long nr, volatile unsigned long *addr) |
1c54d770 | 297 | { |
117780ee | 298 | bool oldbit; |
1c54d770 | 299 | |
1c3f29ec | 300 | asm volatile("btc %2,%1" |
86b61240 PA |
301 | CC_SET(c) |
302 | : CC_OUT(c) (oldbit), ADDR | |
eb2b4e68 | 303 | : "Ir" (nr) : "memory"); |
1c54d770 JF |
304 | |
305 | return oldbit; | |
306 | } | |
307 | ||
308 | /** | |
309 | * test_and_change_bit - Change a bit and return its old value | |
310 | * @nr: Bit to change | |
311 | * @addr: Address to count from | |
312 | * | |
313 | * This operation is atomic and cannot be reordered. | |
314 | * It also implies a memory barrier. | |
315 | */ | |
117780ee | 316 | static __always_inline bool test_and_change_bit(long nr, volatile unsigned long *addr) |
1c54d770 | 317 | { |
18fe5822 | 318 | GEN_BINARY_RMWcc(LOCK_PREFIX "btc", *addr, "Ir", nr, "%0", c); |
1c54d770 JF |
319 | } |
320 | ||
117780ee | 321 | static __always_inline bool constant_test_bit(long nr, const volatile unsigned long *addr) |
1c54d770 | 322 | { |
9b710506 PA |
323 | return ((1UL << (nr & (BITS_PER_LONG-1))) & |
324 | (addr[nr >> _BITOPS_LONG_SHIFT])) != 0; | |
1c54d770 JF |
325 | } |
326 | ||
117780ee | 327 | static __always_inline bool variable_test_bit(long nr, volatile const unsigned long *addr) |
1c54d770 | 328 | { |
117780ee | 329 | bool oldbit; |
1c54d770 | 330 | |
1c3f29ec | 331 | asm volatile("bt %2,%1" |
86b61240 PA |
332 | CC_SET(c) |
333 | : CC_OUT(c) (oldbit) | |
eb2b4e68 | 334 | : "m" (*(unsigned long *)addr), "Ir" (nr)); |
1c54d770 JF |
335 | |
336 | return oldbit; | |
337 | } | |
338 | ||
339 | #if 0 /* Fool kernel-doc since it doesn't do macros yet */ | |
340 | /** | |
341 | * test_bit - Determine whether a bit is set | |
342 | * @nr: bit number to test | |
343 | * @addr: Address to start counting from | |
344 | */ | |
117780ee | 345 | static bool test_bit(int nr, const volatile unsigned long *addr); |
1c54d770 JF |
346 | #endif |
347 | ||
f19dcf4a JP |
348 | #define test_bit(nr, addr) \ |
349 | (__builtin_constant_p((nr)) \ | |
350 | ? constant_test_bit((nr), (addr)) \ | |
351 | : variable_test_bit((nr), (addr))) | |
1c54d770 | 352 | |
12d9c842 AH |
353 | /** |
354 | * __ffs - find first set bit in word | |
355 | * @word: The word to search | |
356 | * | |
357 | * Undefined if no bit exists, so code should check against 0 first. | |
358 | */ | |
8dd5032d | 359 | static __always_inline unsigned long __ffs(unsigned long word) |
12d9c842 | 360 | { |
e26a44a2 | 361 | asm("rep; bsf %1,%0" |
f19dcf4a JP |
362 | : "=r" (word) |
363 | : "rm" (word)); | |
12d9c842 AH |
364 | return word; |
365 | } | |
366 | ||
367 | /** | |
368 | * ffz - find first zero bit in word | |
369 | * @word: The word to search | |
370 | * | |
371 | * Undefined if no zero exists, so code should check against ~0UL first. | |
372 | */ | |
8dd5032d | 373 | static __always_inline unsigned long ffz(unsigned long word) |
12d9c842 | 374 | { |
e26a44a2 | 375 | asm("rep; bsf %1,%0" |
f19dcf4a JP |
376 | : "=r" (word) |
377 | : "r" (~word)); | |
12d9c842 AH |
378 | return word; |
379 | } | |
380 | ||
381 | /* | |
382 | * __fls: find last set bit in word | |
383 | * @word: The word to search | |
384 | * | |
8450e853 | 385 | * Undefined if no set bit exists, so code should check against 0 first. |
12d9c842 | 386 | */ |
8dd5032d | 387 | static __always_inline unsigned long __fls(unsigned long word) |
12d9c842 | 388 | { |
f19dcf4a JP |
389 | asm("bsr %1,%0" |
390 | : "=r" (word) | |
391 | : "rm" (word)); | |
12d9c842 AH |
392 | return word; |
393 | } | |
394 | ||
83d99df7 PA |
395 | #undef ADDR |
396 | ||
12d9c842 AH |
397 | #ifdef __KERNEL__ |
398 | /** | |
399 | * ffs - find first set bit in word | |
400 | * @x: the word to search | |
401 | * | |
402 | * This is defined the same way as the libc and compiler builtin ffs | |
403 | * routines, therefore differs in spirit from the other bitops. | |
404 | * | |
405 | * ffs(value) returns 0 if value is 0 or the position of the first | |
406 | * set bit if value is nonzero. The first (least significant) bit | |
407 | * is at position 1. | |
408 | */ | |
8dd5032d | 409 | static __always_inline int ffs(int x) |
12d9c842 AH |
410 | { |
411 | int r; | |
ca3d30cc DH |
412 | |
413 | #ifdef CONFIG_X86_64 | |
414 | /* | |
415 | * AMD64 says BSFL won't clobber the dest reg if x==0; Intel64 says the | |
416 | * dest reg is undefined if x==0, but their CPU architect says its | |
417 | * value is written to set it to the same as before, except that the | |
418 | * top 32 bits will be cleared. | |
419 | * | |
420 | * We cannot do this on 32 bits because at the very least some | |
421 | * 486 CPUs did not behave this way. | |
422 | */ | |
ca3d30cc DH |
423 | asm("bsfl %1,%0" |
424 | : "=r" (r) | |
1edfbb41 | 425 | : "rm" (x), "0" (-1)); |
ca3d30cc | 426 | #elif defined(CONFIG_X86_CMOV) |
f19dcf4a JP |
427 | asm("bsfl %1,%0\n\t" |
428 | "cmovzl %2,%0" | |
ca3d30cc | 429 | : "=&r" (r) : "rm" (x), "r" (-1)); |
12d9c842 | 430 | #else |
f19dcf4a JP |
431 | asm("bsfl %1,%0\n\t" |
432 | "jnz 1f\n\t" | |
433 | "movl $-1,%0\n" | |
434 | "1:" : "=r" (r) : "rm" (x)); | |
12d9c842 AH |
435 | #endif |
436 | return r + 1; | |
437 | } | |
438 | ||
439 | /** | |
440 | * fls - find last set bit in word | |
441 | * @x: the word to search | |
442 | * | |
443 | * This is defined in a similar way as the libc and compiler builtin | |
444 | * ffs, but returns the position of the most significant set bit. | |
445 | * | |
446 | * fls(value) returns 0 if value is 0 or the position of the last | |
447 | * set bit if value is nonzero. The last (most significant) bit is | |
448 | * at position 32. | |
449 | */ | |
8dd5032d | 450 | static __always_inline int fls(int x) |
12d9c842 AH |
451 | { |
452 | int r; | |
ca3d30cc DH |
453 | |
454 | #ifdef CONFIG_X86_64 | |
455 | /* | |
456 | * AMD64 says BSRL won't clobber the dest reg if x==0; Intel64 says the | |
457 | * dest reg is undefined if x==0, but their CPU architect says its | |
458 | * value is written to set it to the same as before, except that the | |
459 | * top 32 bits will be cleared. | |
460 | * | |
461 | * We cannot do this on 32 bits because at the very least some | |
462 | * 486 CPUs did not behave this way. | |
463 | */ | |
ca3d30cc DH |
464 | asm("bsrl %1,%0" |
465 | : "=r" (r) | |
1edfbb41 | 466 | : "rm" (x), "0" (-1)); |
ca3d30cc | 467 | #elif defined(CONFIG_X86_CMOV) |
f19dcf4a JP |
468 | asm("bsrl %1,%0\n\t" |
469 | "cmovzl %2,%0" | |
470 | : "=&r" (r) : "rm" (x), "rm" (-1)); | |
12d9c842 | 471 | #else |
f19dcf4a JP |
472 | asm("bsrl %1,%0\n\t" |
473 | "jnz 1f\n\t" | |
474 | "movl $-1,%0\n" | |
475 | "1:" : "=r" (r) : "rm" (x)); | |
12d9c842 AH |
476 | #endif |
477 | return r + 1; | |
478 | } | |
d66462f5 | 479 | |
ca3d30cc DH |
480 | /** |
481 | * fls64 - find last set bit in a 64-bit word | |
482 | * @x: the word to search | |
483 | * | |
484 | * This is defined in a similar way as the libc and compiler builtin | |
485 | * ffsll, but returns the position of the most significant set bit. | |
486 | * | |
487 | * fls64(value) returns 0 if value is 0 or the position of the last | |
488 | * set bit if value is nonzero. The last (most significant) bit is | |
489 | * at position 64. | |
490 | */ | |
491 | #ifdef CONFIG_X86_64 | |
492 | static __always_inline int fls64(__u64 x) | |
493 | { | |
1edfbb41 | 494 | int bitpos = -1; |
ca3d30cc DH |
495 | /* |
496 | * AMD64 says BSRQ won't clobber the dest reg if x==0; Intel64 says the | |
497 | * dest reg is undefined if x==0, but their CPU architect says its | |
498 | * value is written to set it to the same as before. | |
499 | */ | |
1edfbb41 | 500 | asm("bsrq %1,%q0" |
ca3d30cc DH |
501 | : "+r" (bitpos) |
502 | : "rm" (x)); | |
503 | return bitpos + 1; | |
504 | } | |
505 | #else | |
506 | #include <asm-generic/bitops/fls64.h> | |
507 | #endif | |
508 | ||
708ff2a0 AM |
509 | #include <asm-generic/bitops/find.h> |
510 | ||
d66462f5 AH |
511 | #include <asm-generic/bitops/sched.h> |
512 | ||
d61931d8 BP |
513 | #include <asm/arch_hweight.h> |
514 | ||
515 | #include <asm-generic/bitops/const_hweight.h> | |
1c54d770 | 516 | |
861b5ae7 | 517 | #include <asm-generic/bitops/le.h> |
d66462f5 | 518 | |
148817ba | 519 | #include <asm-generic/bitops/ext2-atomic-setbit.h> |
d66462f5 | 520 | |
d66462f5 | 521 | #endif /* __KERNEL__ */ |
1965aae3 | 522 | #endif /* _ASM_X86_BITOPS_H */ |