]>
git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - arch/x86/include/asm/bitops.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_X86_BITOPS_H
3 #define _ASM_X86_BITOPS_H
6 * Copyright 1992, Linus Torvalds.
8 * Note: inlines with more than a single statement should be marked
9 * __always_inline to avoid problems with older gcc's inlining heuristics.
12 #ifndef _LINUX_BITOPS_H
13 #error only <linux/bitops.h> can be included directly
16 #include <linux/compiler.h>
17 #include <asm/alternative.h>
18 #include <asm/rmwcc.h>
19 #include <asm/barrier.h>
21 #if BITS_PER_LONG == 32
22 # define _BITOPS_LONG_SHIFT 5
23 #elif BITS_PER_LONG == 64
24 # define _BITOPS_LONG_SHIFT 6
26 # error "Unexpected BITS_PER_LONG"
29 #define BIT_64(n) (U64_C(1) << (n))
32 * These have to be done with inline assembly: that way the bit-setting
33 * is guaranteed to be atomic. All bit operations return 0 if the bit
34 * was cleared before the operation and != 0 if it was not.
36 * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1).
39 #if __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 1)
40 /* Technically wrong, but this avoids compilation errors on some gcc
42 #define BITOP_ADDR(x) "=m" (*(volatile long *) (x))
44 #define BITOP_ADDR(x) "+m" (*(volatile long *) (x))
47 #define ADDR BITOP_ADDR(addr)
50 * We do the locked ops that don't return the old value as
51 * a mask operation on a byte.
53 #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
54 #define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
55 #define CONST_MASK(nr) (1 << ((nr) & 7))
58 * set_bit - Atomically set a bit in memory
60 * @addr: the address to start counting from
62 * This function is atomic and may not be reordered. See __set_bit()
63 * if you do not require the atomic guarantees.
65 * Note: there are no guarantees that this function will not be reordered
66 * on non x86 architectures, so if you are writing portable code,
67 * make sure not to rely on its reordering guarantees.
69 * Note that @nr may be almost arbitrarily large; this function is not
70 * restricted to acting on a single-word quantity.
72 static __always_inline
void
73 set_bit(long nr
, volatile unsigned long *addr
)
75 if (IS_IMMEDIATE(nr
)) {
76 asm volatile(LOCK_PREFIX
"orb %1,%0"
77 : CONST_MASK_ADDR(nr
, addr
)
78 : "iq" ((u8
)CONST_MASK(nr
))
81 asm volatile(LOCK_PREFIX
"bts %1,%0"
82 : BITOP_ADDR(addr
) : "Ir" (nr
) : "memory");
87 * __set_bit - Set a bit in memory
89 * @addr: the address to start counting from
91 * Unlike set_bit(), this function is non-atomic and may be reordered.
92 * If it's called on the same region of memory simultaneously, the effect
93 * may be that only one operation succeeds.
95 static __always_inline
void __set_bit(long nr
, volatile unsigned long *addr
)
97 asm volatile("bts %1,%0" : ADDR
: "Ir" (nr
) : "memory");
101 * clear_bit - Clears a bit in memory
103 * @addr: Address to start counting from
105 * clear_bit() is atomic and may not be reordered. However, it does
106 * not contain a memory barrier, so if it is used for locking purposes,
107 * you should call smp_mb__before_atomic() and/or smp_mb__after_atomic()
108 * in order to ensure changes are visible on other processors.
110 static __always_inline
void
111 clear_bit(long nr
, volatile unsigned long *addr
)
113 if (IS_IMMEDIATE(nr
)) {
114 asm volatile(LOCK_PREFIX
"andb %1,%0"
115 : CONST_MASK_ADDR(nr
, addr
)
116 : "iq" ((u8
)~CONST_MASK(nr
)));
118 asm volatile(LOCK_PREFIX
"btr %1,%0"
125 * clear_bit_unlock - Clears a bit in memory
127 * @addr: Address to start counting from
129 * clear_bit() is atomic and implies release semantics before the memory
130 * operation. It can be used for an unlock.
132 static __always_inline
void clear_bit_unlock(long nr
, volatile unsigned long *addr
)
138 static __always_inline
void __clear_bit(long nr
, volatile unsigned long *addr
)
140 asm volatile("btr %1,%0" : ADDR
: "Ir" (nr
));
143 static __always_inline
bool clear_bit_unlock_is_negative_byte(long nr
, volatile unsigned long *addr
)
146 asm volatile(LOCK_PREFIX
"andb %2,%1"
148 : CC_OUT(s
) (negative
), ADDR
149 : "ir" ((char) ~(1 << nr
)) : "memory");
153 // Let everybody know we have it
154 #define clear_bit_unlock_is_negative_byte clear_bit_unlock_is_negative_byte
157 * __clear_bit_unlock - Clears a bit in memory
159 * @addr: Address to start counting from
161 * __clear_bit() is non-atomic and implies release semantics before the memory
162 * operation. It can be used for an unlock if no other CPUs can concurrently
163 * modify other bits in the word.
165 * No memory barrier is required here, because x86 cannot reorder stores past
166 * older loads. Same principle as spin_unlock.
168 static __always_inline
void __clear_bit_unlock(long nr
, volatile unsigned long *addr
)
171 __clear_bit(nr
, addr
);
175 * __change_bit - Toggle a bit in memory
176 * @nr: the bit to change
177 * @addr: the address to start counting from
179 * Unlike change_bit(), this function is non-atomic and may be reordered.
180 * If it's called on the same region of memory simultaneously, the effect
181 * may be that only one operation succeeds.
183 static __always_inline
void __change_bit(long nr
, volatile unsigned long *addr
)
185 asm volatile("btc %1,%0" : ADDR
: "Ir" (nr
));
189 * change_bit - Toggle a bit in memory
191 * @addr: Address to start counting from
193 * change_bit() is atomic and may not be reordered.
194 * Note that @nr may be almost arbitrarily large; this function is not
195 * restricted to acting on a single-word quantity.
197 static __always_inline
void change_bit(long nr
, volatile unsigned long *addr
)
199 if (IS_IMMEDIATE(nr
)) {
200 asm volatile(LOCK_PREFIX
"xorb %1,%0"
201 : CONST_MASK_ADDR(nr
, addr
)
202 : "iq" ((u8
)CONST_MASK(nr
)));
204 asm volatile(LOCK_PREFIX
"btc %1,%0"
211 * test_and_set_bit - Set a bit and return its old value
213 * @addr: Address to count from
215 * This operation is atomic and cannot be reordered.
216 * It also implies a memory barrier.
218 static __always_inline
bool test_and_set_bit(long nr
, volatile unsigned long *addr
)
220 GEN_BINARY_RMWcc(LOCK_PREFIX
"bts", *addr
, "Ir", nr
, "%0", c
);
224 * test_and_set_bit_lock - Set a bit and return its old value for lock
226 * @addr: Address to count from
228 * This is the same as test_and_set_bit on x86.
230 static __always_inline
bool
231 test_and_set_bit_lock(long nr
, volatile unsigned long *addr
)
233 return test_and_set_bit(nr
, addr
);
237 * __test_and_set_bit - Set a bit and return its old value
239 * @addr: Address to count from
241 * This operation is non-atomic and can be reordered.
242 * If two examples of this operation race, one can appear to succeed
243 * but actually fail. You must protect multiple accesses with a lock.
245 static __always_inline
bool __test_and_set_bit(long nr
, volatile unsigned long *addr
)
251 : CC_OUT(c
) (oldbit
), ADDR
257 * test_and_clear_bit - Clear a bit and return its old value
259 * @addr: Address to count from
261 * This operation is atomic and cannot be reordered.
262 * It also implies a memory barrier.
264 static __always_inline
bool test_and_clear_bit(long nr
, volatile unsigned long *addr
)
266 GEN_BINARY_RMWcc(LOCK_PREFIX
"btr", *addr
, "Ir", nr
, "%0", c
);
270 * __test_and_clear_bit - Clear a bit and return its old value
272 * @addr: Address to count from
274 * This operation is non-atomic and can be reordered.
275 * If two examples of this operation race, one can appear to succeed
276 * but actually fail. You must protect multiple accesses with a lock.
278 * Note: the operation is performed atomically with respect to
279 * the local CPU, but not other CPUs. Portable code should not
280 * rely on this behaviour.
281 * KVM relies on this behaviour on x86 for modifying memory that is also
282 * accessed from a hypervisor on the same CPU if running in a VM: don't change
283 * this without also updating arch/x86/kernel/kvm.c
285 static __always_inline
bool __test_and_clear_bit(long nr
, volatile unsigned long *addr
)
289 asm volatile("btr %2,%1"
291 : CC_OUT(c
) (oldbit
), ADDR
296 /* WARNING: non atomic and it can be reordered! */
297 static __always_inline
bool __test_and_change_bit(long nr
, volatile unsigned long *addr
)
301 asm volatile("btc %2,%1"
303 : CC_OUT(c
) (oldbit
), ADDR
304 : "Ir" (nr
) : "memory");
310 * test_and_change_bit - Change a bit and return its old value
312 * @addr: Address to count from
314 * This operation is atomic and cannot be reordered.
315 * It also implies a memory barrier.
317 static __always_inline
bool test_and_change_bit(long nr
, volatile unsigned long *addr
)
319 GEN_BINARY_RMWcc(LOCK_PREFIX
"btc", *addr
, "Ir", nr
, "%0", c
);
322 static __always_inline
bool constant_test_bit(long nr
, const volatile unsigned long *addr
)
324 return ((1UL << (nr
& (BITS_PER_LONG
-1))) &
325 (addr
[nr
>> _BITOPS_LONG_SHIFT
])) != 0;
328 static __always_inline
bool variable_test_bit(long nr
, volatile const unsigned long *addr
)
332 asm volatile("bt %2,%1"
335 : "m" (*(unsigned long *)addr
), "Ir" (nr
));
340 #if 0 /* Fool kernel-doc since it doesn't do macros yet */
342 * test_bit - Determine whether a bit is set
343 * @nr: bit number to test
344 * @addr: Address to start counting from
346 static bool test_bit(int nr
, const volatile unsigned long *addr
);
349 #define test_bit(nr, addr) \
350 (__builtin_constant_p((nr)) \
351 ? constant_test_bit((nr), (addr)) \
352 : variable_test_bit((nr), (addr)))
355 * __ffs - find first set bit in word
356 * @word: The word to search
358 * Undefined if no bit exists, so code should check against 0 first.
360 static __always_inline
unsigned long __ffs(unsigned long word
)
369 * ffz - find first zero bit in word
370 * @word: The word to search
372 * Undefined if no zero exists, so code should check against ~0UL first.
374 static __always_inline
unsigned long ffz(unsigned long word
)
383 * __fls: find last set bit in word
384 * @word: The word to search
386 * Undefined if no set bit exists, so code should check against 0 first.
388 static __always_inline
unsigned long __fls(unsigned long word
)
400 * ffs - find first set bit in word
401 * @x: the word to search
403 * This is defined the same way as the libc and compiler builtin ffs
404 * routines, therefore differs in spirit from the other bitops.
406 * ffs(value) returns 0 if value is 0 or the position of the first
407 * set bit if value is nonzero. The first (least significant) bit
410 static __always_inline
int ffs(int x
)
416 * AMD64 says BSFL won't clobber the dest reg if x==0; Intel64 says the
417 * dest reg is undefined if x==0, but their CPU architect says its
418 * value is written to set it to the same as before, except that the
419 * top 32 bits will be cleared.
421 * We cannot do this on 32 bits because at the very least some
422 * 486 CPUs did not behave this way.
426 : "rm" (x
), "0" (-1));
427 #elif defined(CONFIG_X86_CMOV)
430 : "=&r" (r
) : "rm" (x
), "r" (-1));
435 "1:" : "=r" (r
) : "rm" (x
));
441 * fls - find last set bit in word
442 * @x: the word to search
444 * This is defined in a similar way as the libc and compiler builtin
445 * ffs, but returns the position of the most significant set bit.
447 * fls(value) returns 0 if value is 0 or the position of the last
448 * set bit if value is nonzero. The last (most significant) bit is
451 static __always_inline
int fls(int x
)
457 * AMD64 says BSRL won't clobber the dest reg if x==0; Intel64 says the
458 * dest reg is undefined if x==0, but their CPU architect says its
459 * value is written to set it to the same as before, except that the
460 * top 32 bits will be cleared.
462 * We cannot do this on 32 bits because at the very least some
463 * 486 CPUs did not behave this way.
467 : "rm" (x
), "0" (-1));
468 #elif defined(CONFIG_X86_CMOV)
471 : "=&r" (r
) : "rm" (x
), "rm" (-1));
476 "1:" : "=r" (r
) : "rm" (x
));
482 * fls64 - find last set bit in a 64-bit word
483 * @x: the word to search
485 * This is defined in a similar way as the libc and compiler builtin
486 * ffsll, but returns the position of the most significant set bit.
488 * fls64(value) returns 0 if value is 0 or the position of the last
489 * set bit if value is nonzero. The last (most significant) bit is
493 static __always_inline
int fls64(__u64 x
)
497 * AMD64 says BSRQ won't clobber the dest reg if x==0; Intel64 says the
498 * dest reg is undefined if x==0, but their CPU architect says its
499 * value is written to set it to the same as before.
507 #include <asm-generic/bitops/fls64.h>
510 #include <asm-generic/bitops/find.h>
512 #include <asm-generic/bitops/sched.h>
514 #include <asm/arch_hweight.h>
516 #include <asm-generic/bitops/const_hweight.h>
518 #include <asm-generic/bitops/le.h>
520 #include <asm-generic/bitops/ext2-atomic-setbit.h>
522 #endif /* __KERNEL__ */
523 #endif /* _ASM_X86_BITOPS_H */