]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - include/linux/bitops.h
Merge tag 'fs.move_mount.move_mount_set_group.v5.15' of git://git.kernel.org/pub...
[mirror_ubuntu-jammy-kernel.git] / include / linux / bitops.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_BITOPS_H
3 #define _LINUX_BITOPS_H
4
5 #include <asm/types.h>
6 #include <linux/bits.h>
7
8 #include <uapi/linux/kernel.h>
9
10 /* Set bits in the first 'n' bytes when loaded from memory */
11 #ifdef __LITTLE_ENDIAN
12 # define aligned_byte_mask(n) ((1UL << 8*(n))-1)
13 #else
14 # define aligned_byte_mask(n) (~0xffUL << (BITS_PER_LONG - 8 - 8*(n)))
15 #endif
16
17 #define BITS_PER_TYPE(type) (sizeof(type) * BITS_PER_BYTE)
18 #define BITS_TO_LONGS(nr) __KERNEL_DIV_ROUND_UP(nr, BITS_PER_TYPE(long))
19 #define BITS_TO_U64(nr) __KERNEL_DIV_ROUND_UP(nr, BITS_PER_TYPE(u64))
20 #define BITS_TO_U32(nr) __KERNEL_DIV_ROUND_UP(nr, BITS_PER_TYPE(u32))
21 #define BITS_TO_BYTES(nr) __KERNEL_DIV_ROUND_UP(nr, BITS_PER_TYPE(char))
22
23 extern unsigned int __sw_hweight8(unsigned int w);
24 extern unsigned int __sw_hweight16(unsigned int w);
25 extern unsigned int __sw_hweight32(unsigned int w);
26 extern unsigned long __sw_hweight64(__u64 w);
27
28 /*
29 * Include this here because some architectures need generic_ffs/fls in
30 * scope
31 */
32 #include <asm/bitops.h>
33
34 #define for_each_set_bit(bit, addr, size) \
35 for ((bit) = find_first_bit((addr), (size)); \
36 (bit) < (size); \
37 (bit) = find_next_bit((addr), (size), (bit) + 1))
38
39 /* same as for_each_set_bit() but use bit as value to start with */
40 #define for_each_set_bit_from(bit, addr, size) \
41 for ((bit) = find_next_bit((addr), (size), (bit)); \
42 (bit) < (size); \
43 (bit) = find_next_bit((addr), (size), (bit) + 1))
44
45 #define for_each_clear_bit(bit, addr, size) \
46 for ((bit) = find_first_zero_bit((addr), (size)); \
47 (bit) < (size); \
48 (bit) = find_next_zero_bit((addr), (size), (bit) + 1))
49
50 /* same as for_each_clear_bit() but use bit as value to start with */
51 #define for_each_clear_bit_from(bit, addr, size) \
52 for ((bit) = find_next_zero_bit((addr), (size), (bit)); \
53 (bit) < (size); \
54 (bit) = find_next_zero_bit((addr), (size), (bit) + 1))
55
56 /**
57 * for_each_set_clump8 - iterate over bitmap for each 8-bit clump with set bits
58 * @start: bit offset to start search and to store the current iteration offset
59 * @clump: location to store copy of current 8-bit clump
60 * @bits: bitmap address to base the search on
61 * @size: bitmap size in number of bits
62 */
63 #define for_each_set_clump8(start, clump, bits, size) \
64 for ((start) = find_first_clump8(&(clump), (bits), (size)); \
65 (start) < (size); \
66 (start) = find_next_clump8(&(clump), (bits), (size), (start) + 8))
67
68 static inline int get_bitmask_order(unsigned int count)
69 {
70 int order;
71
72 order = fls(count);
73 return order; /* We could be slightly more clever with -1 here... */
74 }
75
76 static __always_inline unsigned long hweight_long(unsigned long w)
77 {
78 return sizeof(w) == 4 ? hweight32(w) : hweight64((__u64)w);
79 }
80
81 /**
82 * rol64 - rotate a 64-bit value left
83 * @word: value to rotate
84 * @shift: bits to roll
85 */
86 static inline __u64 rol64(__u64 word, unsigned int shift)
87 {
88 return (word << (shift & 63)) | (word >> ((-shift) & 63));
89 }
90
91 /**
92 * ror64 - rotate a 64-bit value right
93 * @word: value to rotate
94 * @shift: bits to roll
95 */
96 static inline __u64 ror64(__u64 word, unsigned int shift)
97 {
98 return (word >> (shift & 63)) | (word << ((-shift) & 63));
99 }
100
101 /**
102 * rol32 - rotate a 32-bit value left
103 * @word: value to rotate
104 * @shift: bits to roll
105 */
106 static inline __u32 rol32(__u32 word, unsigned int shift)
107 {
108 return (word << (shift & 31)) | (word >> ((-shift) & 31));
109 }
110
111 /**
112 * ror32 - rotate a 32-bit value right
113 * @word: value to rotate
114 * @shift: bits to roll
115 */
116 static inline __u32 ror32(__u32 word, unsigned int shift)
117 {
118 return (word >> (shift & 31)) | (word << ((-shift) & 31));
119 }
120
121 /**
122 * rol16 - rotate a 16-bit value left
123 * @word: value to rotate
124 * @shift: bits to roll
125 */
126 static inline __u16 rol16(__u16 word, unsigned int shift)
127 {
128 return (word << (shift & 15)) | (word >> ((-shift) & 15));
129 }
130
131 /**
132 * ror16 - rotate a 16-bit value right
133 * @word: value to rotate
134 * @shift: bits to roll
135 */
136 static inline __u16 ror16(__u16 word, unsigned int shift)
137 {
138 return (word >> (shift & 15)) | (word << ((-shift) & 15));
139 }
140
141 /**
142 * rol8 - rotate an 8-bit value left
143 * @word: value to rotate
144 * @shift: bits to roll
145 */
146 static inline __u8 rol8(__u8 word, unsigned int shift)
147 {
148 return (word << (shift & 7)) | (word >> ((-shift) & 7));
149 }
150
151 /**
152 * ror8 - rotate an 8-bit value right
153 * @word: value to rotate
154 * @shift: bits to roll
155 */
156 static inline __u8 ror8(__u8 word, unsigned int shift)
157 {
158 return (word >> (shift & 7)) | (word << ((-shift) & 7));
159 }
160
161 /**
162 * sign_extend32 - sign extend a 32-bit value using specified bit as sign-bit
163 * @value: value to sign extend
164 * @index: 0 based bit index (0<=index<32) to sign bit
165 *
166 * This is safe to use for 16- and 8-bit types as well.
167 */
168 static __always_inline __s32 sign_extend32(__u32 value, int index)
169 {
170 __u8 shift = 31 - index;
171 return (__s32)(value << shift) >> shift;
172 }
173
174 /**
175 * sign_extend64 - sign extend a 64-bit value using specified bit as sign-bit
176 * @value: value to sign extend
177 * @index: 0 based bit index (0<=index<64) to sign bit
178 */
179 static __always_inline __s64 sign_extend64(__u64 value, int index)
180 {
181 __u8 shift = 63 - index;
182 return (__s64)(value << shift) >> shift;
183 }
184
185 static inline unsigned fls_long(unsigned long l)
186 {
187 if (sizeof(l) == 4)
188 return fls(l);
189 return fls64(l);
190 }
191
192 static inline int get_count_order(unsigned int count)
193 {
194 if (count == 0)
195 return -1;
196
197 return fls(--count);
198 }
199
200 /**
201 * get_count_order_long - get order after rounding @l up to power of 2
202 * @l: parameter
203 *
204 * it is same as get_count_order() but with long type parameter
205 */
206 static inline int get_count_order_long(unsigned long l)
207 {
208 if (l == 0UL)
209 return -1;
210 return (int)fls_long(--l);
211 }
212
213 /**
214 * __ffs64 - find first set bit in a 64 bit word
215 * @word: The 64 bit word
216 *
217 * On 64 bit arches this is a synonym for __ffs
218 * The result is not defined if no bits are set, so check that @word
219 * is non-zero before calling this.
220 */
221 static inline unsigned long __ffs64(u64 word)
222 {
223 #if BITS_PER_LONG == 32
224 if (((u32)word) == 0UL)
225 return __ffs((u32)(word >> 32)) + 32;
226 #elif BITS_PER_LONG != 64
227 #error BITS_PER_LONG not 32 or 64
228 #endif
229 return __ffs((unsigned long)word);
230 }
231
232 /**
233 * assign_bit - Assign value to a bit in memory
234 * @nr: the bit to set
235 * @addr: the address to start counting from
236 * @value: the value to assign
237 */
238 static __always_inline void assign_bit(long nr, volatile unsigned long *addr,
239 bool value)
240 {
241 if (value)
242 set_bit(nr, addr);
243 else
244 clear_bit(nr, addr);
245 }
246
247 static __always_inline void __assign_bit(long nr, volatile unsigned long *addr,
248 bool value)
249 {
250 if (value)
251 __set_bit(nr, addr);
252 else
253 __clear_bit(nr, addr);
254 }
255
256 #ifdef __KERNEL__
257
258 #ifndef set_mask_bits
259 #define set_mask_bits(ptr, mask, bits) \
260 ({ \
261 const typeof(*(ptr)) mask__ = (mask), bits__ = (bits); \
262 typeof(*(ptr)) old__, new__; \
263 \
264 do { \
265 old__ = READ_ONCE(*(ptr)); \
266 new__ = (old__ & ~mask__) | bits__; \
267 } while (cmpxchg(ptr, old__, new__) != old__); \
268 \
269 old__; \
270 })
271 #endif
272
273 #ifndef bit_clear_unless
274 #define bit_clear_unless(ptr, clear, test) \
275 ({ \
276 const typeof(*(ptr)) clear__ = (clear), test__ = (test);\
277 typeof(*(ptr)) old__, new__; \
278 \
279 do { \
280 old__ = READ_ONCE(*(ptr)); \
281 new__ = old__ & ~clear__; \
282 } while (!(old__ & test__) && \
283 cmpxchg(ptr, old__, new__) != old__); \
284 \
285 !(old__ & test__); \
286 })
287 #endif
288
289 #endif /* __KERNEL__ */
290 #endif