]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - include/linux/bitops.h
mm/kmemleak.c: use %u to print ->checksum
[mirror_ubuntu-artful-kernel.git] / include / linux / bitops.h
CommitLineData
1da177e4
LT
1#ifndef _LINUX_BITOPS_H
2#define _LINUX_BITOPS_H
3#include <asm/types.h>
4
d05be13b 5#ifdef __KERNEL__
93043ece 6#define BIT(nr) (1UL << (nr))
bfd1ff63 7#define BIT_ULL(nr) (1ULL << (nr))
d05be13b
JS
8#define BIT_MASK(nr) (1UL << ((nr) % BITS_PER_LONG))
9#define BIT_WORD(nr) ((nr) / BITS_PER_LONG)
bfd1ff63
SP
10#define BIT_ULL_MASK(nr) (1ULL << ((nr) % BITS_PER_LONG_LONG))
11#define BIT_ULL_WORD(nr) ((nr) / BITS_PER_LONG_LONG)
d05be13b 12#define BITS_PER_BYTE 8
ede9c697 13#define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(long))
d05be13b
JS
14#endif
15
10ef6b0d
CG
16/*
17 * Create a contiguous bitmask starting at bit position @l and ending at
18 * position @h. For example
19 * GENMASK_ULL(39, 21) gives us the 64bit vector 0x000000ffffe00000.
20 */
21#define GENMASK(h, l) (((U32_C(1) << ((h) - (l) + 1)) - 1) << (l))
22#define GENMASK_ULL(h, l) (((U64_C(1) << ((h) - (l) + 1)) - 1) << (l))
23
4677d4a5
BP
24extern unsigned int __sw_hweight8(unsigned int w);
25extern unsigned int __sw_hweight16(unsigned int w);
26extern unsigned int __sw_hweight32(unsigned int w);
27extern unsigned long __sw_hweight64(__u64 w);
28
1da177e4
LT
29/*
30 * Include this here because some architectures need generic_ffs/fls in
31 * scope
32 */
33#include <asm/bitops.h>
34
febdbfe8
PZ
35/*
36 * Provide __deprecated wrappers for the new interface, avoid flag day changes.
37 * We need the ugly external functions to break header recursion hell.
38 */
39#ifndef smp_mb__before_clear_bit
40static inline void __deprecated smp_mb__before_clear_bit(void)
41{
42 extern void __smp_mb__before_atomic(void);
43 __smp_mb__before_atomic();
44}
45#endif
46
47#ifndef smp_mb__after_clear_bit
48static inline void __deprecated smp_mb__after_clear_bit(void)
49{
50 extern void __smp_mb__after_atomic(void);
51 __smp_mb__after_atomic();
52}
53#endif
54
984b3f57 55#define for_each_set_bit(bit, addr, size) \
1e2ad28f
RR
56 for ((bit) = find_first_bit((addr), (size)); \
57 (bit) < (size); \
58 (bit) = find_next_bit((addr), (size), (bit) + 1))
59
60/* same as for_each_set_bit() but use bit as value to start with */
307b1cd7 61#define for_each_set_bit_from(bit, addr, size) \
1e2ad28f
RR
62 for ((bit) = find_next_bit((addr), (size), (bit)); \
63 (bit) < (size); \
3e037454
SN
64 (bit) = find_next_bit((addr), (size), (bit) + 1))
65
03f4a822
AM
66#define for_each_clear_bit(bit, addr, size) \
67 for ((bit) = find_first_zero_bit((addr), (size)); \
68 (bit) < (size); \
69 (bit) = find_next_zero_bit((addr), (size), (bit) + 1))
70
71/* same as for_each_clear_bit() but use bit as value to start with */
72#define for_each_clear_bit_from(bit, addr, size) \
73 for ((bit) = find_next_zero_bit((addr), (size), (bit)); \
74 (bit) < (size); \
75 (bit) = find_next_zero_bit((addr), (size), (bit) + 1))
76
1da177e4
LT
77static __inline__ int get_bitmask_order(unsigned int count)
78{
79 int order;
9f41699e 80
1da177e4
LT
81 order = fls(count);
82 return order; /* We could be slightly more clever with -1 here... */
83}
84
94605eff
SS
85static __inline__ int get_count_order(unsigned int count)
86{
87 int order;
9f41699e 88
94605eff
SS
89 order = fls(count) - 1;
90 if (count & (count - 1))
91 order++;
92 return order;
93}
94
1da177e4
LT
95static inline unsigned long hweight_long(unsigned long w)
96{
e9bebd6f 97 return sizeof(w) == 4 ? hweight32(w) : hweight64(w);
1da177e4
LT
98}
99
f2ea0f5f
AD
100/**
101 * rol64 - rotate a 64-bit value left
102 * @word: value to rotate
103 * @shift: bits to roll
104 */
105static inline __u64 rol64(__u64 word, unsigned int shift)
106{
107 return (word << shift) | (word >> (64 - shift));
108}
109
110/**
111 * ror64 - rotate a 64-bit value right
112 * @word: value to rotate
113 * @shift: bits to roll
114 */
115static inline __u64 ror64(__u64 word, unsigned int shift)
116{
117 return (word >> shift) | (word << (64 - shift));
118}
119
45f8bde0 120/**
1da177e4 121 * rol32 - rotate a 32-bit value left
1da177e4
LT
122 * @word: value to rotate
123 * @shift: bits to roll
124 */
125static inline __u32 rol32(__u32 word, unsigned int shift)
126{
127 return (word << shift) | (word >> (32 - shift));
128}
129
45f8bde0 130/**
1da177e4 131 * ror32 - rotate a 32-bit value right
1da177e4
LT
132 * @word: value to rotate
133 * @shift: bits to roll
134 */
135static inline __u32 ror32(__u32 word, unsigned int shift)
136{
137 return (word >> shift) | (word << (32 - shift));
138}
139
3afe3925
HH
140/**
141 * rol16 - rotate a 16-bit value left
142 * @word: value to rotate
143 * @shift: bits to roll
144 */
145static inline __u16 rol16(__u16 word, unsigned int shift)
146{
147 return (word << shift) | (word >> (16 - shift));
148}
149
150/**
151 * ror16 - rotate a 16-bit value right
152 * @word: value to rotate
153 * @shift: bits to roll
154 */
155static inline __u16 ror16(__u16 word, unsigned int shift)
156{
157 return (word >> shift) | (word << (16 - shift));
158}
159
160/**
161 * rol8 - rotate an 8-bit value left
162 * @word: value to rotate
163 * @shift: bits to roll
164 */
165static inline __u8 rol8(__u8 word, unsigned int shift)
166{
167 return (word << shift) | (word >> (8 - shift));
168}
169
170/**
171 * ror8 - rotate an 8-bit value right
172 * @word: value to rotate
173 * @shift: bits to roll
174 */
175static inline __u8 ror8(__u8 word, unsigned int shift)
176{
177 return (word >> shift) | (word << (8 - shift));
178}
7919a57b
AH
179
180/**
181 * sign_extend32 - sign extend a 32-bit value using specified bit as sign-bit
182 * @value: value to sign extend
183 * @index: 0 based bit index (0<=index<32) to sign bit
184 */
185static inline __s32 sign_extend32(__u32 value, int index)
186{
187 __u8 shift = 31 - index;
188 return (__s32)(value << shift) >> shift;
189}
3afe3925 190
962749af
AM
191static inline unsigned fls_long(unsigned long l)
192{
193 if (sizeof(l) == 4)
194 return fls(l);
195 return fls64(l);
196}
197
952043ac
SW
198/**
199 * __ffs64 - find first set bit in a 64 bit word
200 * @word: The 64 bit word
201 *
202 * On 64 bit arches this is a synomyn for __ffs
203 * The result is not defined if no bits are set, so check that @word
204 * is non-zero before calling this.
205 */
206static inline unsigned long __ffs64(u64 word)
207{
208#if BITS_PER_LONG == 32
209 if (((u32)word) == 0UL)
210 return __ffs((u32)(word >> 32)) + 32;
211#elif BITS_PER_LONG != 64
212#error BITS_PER_LONG not 32 or 64
213#endif
214 return __ffs((unsigned long)word);
215}
216
64970b68 217#ifdef __KERNEL__
77b9bd9c 218
00a1a053
TT
219#ifndef set_mask_bits
220#define set_mask_bits(ptr, _mask, _bits) \
221({ \
222 const typeof(*ptr) mask = (_mask), bits = (_bits); \
223 typeof(*ptr) old, new; \
224 \
225 do { \
226 old = ACCESS_ONCE(*ptr); \
227 new = (old & ~mask) | bits; \
228 } while (cmpxchg(ptr, old, new) != old); \
229 \
230 new; \
231})
232#endif
233
19de85ef 234#ifndef find_last_bit
ab53d472
RR
235/**
236 * find_last_bit - find the last set bit in a memory region
237 * @addr: The address to start the search at
238 * @size: The maximum size to search
239 *
240 * Returns the bit number of the first set bit, or size.
241 */
242extern unsigned long find_last_bit(const unsigned long *addr,
243 unsigned long size);
19de85ef 244#endif
ab53d472 245
64970b68 246#endif /* __KERNEL__ */
1da177e4 247#endif