]> git.proxmox.com Git - mirror_ubuntu-focal-kernel.git/blame - arch/ia64/include/asm/bitops.h
Merge remote-tracking branches 'asoc/topic/ac97', 'asoc/topic/ac97-mfd', 'asoc/topic...
[mirror_ubuntu-focal-kernel.git] / arch / ia64 / include / asm / bitops.h
CommitLineData
b2441318 1/* SPDX-License-Identifier: GPL-2.0 */
1da177e4
LT
2#ifndef _ASM_IA64_BITOPS_H
3#define _ASM_IA64_BITOPS_H
4
5/*
6 * Copyright (C) 1998-2003 Hewlett-Packard Co
7 * David Mosberger-Tang <davidm@hpl.hp.com>
8 *
2875aef8
AM
9 * 02/06/02 find_next_bit() and find_first_bit() added from Erich Focht's ia64
10 * O(1) scheduler patch
1da177e4
LT
11 */
12
0624517d
JS
13#ifndef _LINUX_BITOPS_H
14#error only <linux/bitops.h> can be included directly
15#endif
16
1da177e4
LT
17#include <linux/compiler.h>
18#include <linux/types.h>
1da177e4 19#include <asm/intrinsics.h>
0cd64efb 20#include <asm/barrier.h>
1da177e4
LT
21
22/**
23 * set_bit - Atomically set a bit in memory
24 * @nr: the bit to set
25 * @addr: the address to start counting from
26 *
27 * This function is atomic and may not be reordered. See __set_bit()
28 * if you do not require the atomic guarantees.
29 * Note that @nr may be almost arbitrarily large; this function is not
30 * restricted to acting on a single-word quantity.
31 *
32 * The address must be (at least) "long" aligned.
2875aef8
AM
33 * Note that there are driver (e.g., eepro100) which use these operations to
34 * operate on hw-defined data-structures, so we can't easily change these
35 * operations to force a bigger alignment.
1da177e4
LT
36 *
37 * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1).
38 */
39static __inline__ void
40set_bit (int nr, volatile void *addr)
41{
42 __u32 bit, old, new;
43 volatile __u32 *m;
44 CMPXCHG_BUGCHECK_DECL
45
46 m = (volatile __u32 *) addr + (nr >> 5);
47 bit = 1 << (nr & 31);
48 do {
49 CMPXCHG_BUGCHECK(m);
50 old = *m;
51 new = old | bit;
52 } while (cmpxchg_acq(m, old, new) != old);
53}
54
55/**
56 * __set_bit - Set a bit in memory
57 * @nr: the bit to set
58 * @addr: the address to start counting from
59 *
60 * Unlike set_bit(), this function is non-atomic and may be reordered.
61 * If it's called on the same region of memory simultaneously, the effect
62 * may be that only one operation succeeds.
63 */
64static __inline__ void
65__set_bit (int nr, volatile void *addr)
66{
67 *((__u32 *) addr + (nr >> 5)) |= (1 << (nr & 31));
68}
69
1da177e4
LT
70/**
71 * clear_bit - Clears a bit in memory
72 * @nr: Bit to clear
73 * @addr: Address to start counting from
74 *
75 * clear_bit() is atomic and may not be reordered. However, it does
76 * not contain a memory barrier, so if it is used for locking purposes,
0cd64efb 77 * you should call smp_mb__before_atomic() and/or smp_mb__after_atomic()
1da177e4
LT
78 * in order to ensure changes are visible on other processors.
79 */
80static __inline__ void
81clear_bit (int nr, volatile void *addr)
82{
83 __u32 mask, old, new;
84 volatile __u32 *m;
85 CMPXCHG_BUGCHECK_DECL
86
87 m = (volatile __u32 *) addr + (nr >> 5);
88 mask = ~(1 << (nr & 31));
89 do {
90 CMPXCHG_BUGCHECK(m);
91 old = *m;
92 new = old & mask;
93 } while (cmpxchg_acq(m, old, new) != old);
94}
95
87371e4f
NP
96/**
97 * clear_bit_unlock - Clears a bit in memory with release
98 * @nr: Bit to clear
99 * @addr: Address to start counting from
100 *
101 * clear_bit_unlock() is atomic and may not be reordered. It does
102 * contain a memory barrier suitable for unlock type operations.
103 */
104static __inline__ void
105clear_bit_unlock (int nr, volatile void *addr)
106{
107 __u32 mask, old, new;
108 volatile __u32 *m;
109 CMPXCHG_BUGCHECK_DECL
110
111 m = (volatile __u32 *) addr + (nr >> 5);
112 mask = ~(1 << (nr & 31));
113 do {
114 CMPXCHG_BUGCHECK(m);
115 old = *m;
116 new = old & mask;
117 } while (cmpxchg_rel(m, old, new) != old);
118}
119
120/**
5302ac50
ZM
121 * __clear_bit_unlock - Non-atomically clears a bit in memory with release
122 * @nr: Bit to clear
123 * @addr: Address to start counting from
87371e4f 124 *
5302ac50 125 * Similarly to clear_bit_unlock, the implementation uses a store
0199c4e6 126 * with release semantics. See also arch_spin_unlock().
87371e4f 127 */
a3ebdb6c 128static __inline__ void
5302ac50 129__clear_bit_unlock(int nr, void *addr)
a3ebdb6c 130{
5302ac50
ZM
131 __u32 * const m = (__u32 *) addr + (nr >> 5);
132 __u32 const new = *m & ~(1 << (nr & 31));
a3ebdb6c 133
a3ebdb6c
CL
134 ia64_st4_rel_nta(m, new);
135}
87371e4f 136
1da177e4
LT
137/**
138 * __clear_bit - Clears a bit in memory (non-atomic version)
5302ac50
ZM
139 * @nr: the bit to clear
140 * @addr: the address to start counting from
141 *
142 * Unlike clear_bit(), this function is non-atomic and may be reordered.
143 * If it's called on the same region of memory simultaneously, the effect
144 * may be that only one operation succeeds.
1da177e4
LT
145 */
146static __inline__ void
147__clear_bit (int nr, volatile void *addr)
148{
5302ac50 149 *((__u32 *) addr + (nr >> 5)) &= ~(1 << (nr & 31));
1da177e4
LT
150}
151
152/**
153 * change_bit - Toggle a bit in memory
5302ac50 154 * @nr: Bit to toggle
1da177e4
LT
155 * @addr: Address to start counting from
156 *
157 * change_bit() is atomic and may not be reordered.
158 * Note that @nr may be almost arbitrarily large; this function is not
159 * restricted to acting on a single-word quantity.
160 */
161static __inline__ void
162change_bit (int nr, volatile void *addr)
163{
164 __u32 bit, old, new;
165 volatile __u32 *m;
166 CMPXCHG_BUGCHECK_DECL
167
168 m = (volatile __u32 *) addr + (nr >> 5);
169 bit = (1 << (nr & 31));
170 do {
171 CMPXCHG_BUGCHECK(m);
172 old = *m;
173 new = old ^ bit;
174 } while (cmpxchg_acq(m, old, new) != old);
175}
176
177/**
178 * __change_bit - Toggle a bit in memory
5302ac50 179 * @nr: the bit to toggle
1da177e4
LT
180 * @addr: the address to start counting from
181 *
182 * Unlike change_bit(), this function is non-atomic and may be reordered.
183 * If it's called on the same region of memory simultaneously, the effect
184 * may be that only one operation succeeds.
185 */
186static __inline__ void
187__change_bit (int nr, volatile void *addr)
188{
189 *((__u32 *) addr + (nr >> 5)) ^= (1 << (nr & 31));
190}
191
192/**
193 * test_and_set_bit - Set a bit and return its old value
194 * @nr: Bit to set
195 * @addr: Address to count from
196 *
197 * This operation is atomic and cannot be reordered.
5302ac50 198 * It also implies the acquisition side of the memory barrier.
1da177e4
LT
199 */
200static __inline__ int
201test_and_set_bit (int nr, volatile void *addr)
202{
203 __u32 bit, old, new;
204 volatile __u32 *m;
205 CMPXCHG_BUGCHECK_DECL
206
207 m = (volatile __u32 *) addr + (nr >> 5);
208 bit = 1 << (nr & 31);
209 do {
210 CMPXCHG_BUGCHECK(m);
211 old = *m;
212 new = old | bit;
213 } while (cmpxchg_acq(m, old, new) != old);
214 return (old & bit) != 0;
215}
216
87371e4f
NP
217/**
218 * test_and_set_bit_lock - Set a bit and return its old value for lock
219 * @nr: Bit to set
220 * @addr: Address to count from
221 *
222 * This is the same as test_and_set_bit on ia64
223 */
224#define test_and_set_bit_lock test_and_set_bit
225
1da177e4
LT
226/**
227 * __test_and_set_bit - Set a bit and return its old value
228 * @nr: Bit to set
229 * @addr: Address to count from
230 *
231 * This operation is non-atomic and can be reordered.
232 * If two examples of this operation race, one can appear to succeed
233 * but actually fail. You must protect multiple accesses with a lock.
234 */
235static __inline__ int
236__test_and_set_bit (int nr, volatile void *addr)
237{
238 __u32 *p = (__u32 *) addr + (nr >> 5);
239 __u32 m = 1 << (nr & 31);
240 int oldbitset = (*p & m) != 0;
241
242 *p |= m;
243 return oldbitset;
244}
245
246/**
247 * test_and_clear_bit - Clear a bit and return its old value
5302ac50 248 * @nr: Bit to clear
1da177e4
LT
249 * @addr: Address to count from
250 *
251 * This operation is atomic and cannot be reordered.
5302ac50 252 * It also implies the acquisition side of the memory barrier.
1da177e4
LT
253 */
254static __inline__ int
255test_and_clear_bit (int nr, volatile void *addr)
256{
257 __u32 mask, old, new;
258 volatile __u32 *m;
259 CMPXCHG_BUGCHECK_DECL
260
261 m = (volatile __u32 *) addr + (nr >> 5);
262 mask = ~(1 << (nr & 31));
263 do {
264 CMPXCHG_BUGCHECK(m);
265 old = *m;
266 new = old & mask;
267 } while (cmpxchg_acq(m, old, new) != old);
268 return (old & ~mask) != 0;
269}
270
271/**
272 * __test_and_clear_bit - Clear a bit and return its old value
5302ac50 273 * @nr: Bit to clear
1da177e4
LT
274 * @addr: Address to count from
275 *
276 * This operation is non-atomic and can be reordered.
277 * If two examples of this operation race, one can appear to succeed
278 * but actually fail. You must protect multiple accesses with a lock.
279 */
280static __inline__ int
281__test_and_clear_bit(int nr, volatile void * addr)
282{
283 __u32 *p = (__u32 *) addr + (nr >> 5);
284 __u32 m = 1 << (nr & 31);
8d6f9af9 285 int oldbitset = (*p & m) != 0;
1da177e4
LT
286
287 *p &= ~m;
288 return oldbitset;
289}
290
291/**
292 * test_and_change_bit - Change a bit and return its old value
5302ac50 293 * @nr: Bit to change
1da177e4
LT
294 * @addr: Address to count from
295 *
296 * This operation is atomic and cannot be reordered.
5302ac50 297 * It also implies the acquisition side of the memory barrier.
1da177e4
LT
298 */
299static __inline__ int
300test_and_change_bit (int nr, volatile void *addr)
301{
302 __u32 bit, old, new;
303 volatile __u32 *m;
304 CMPXCHG_BUGCHECK_DECL
305
306 m = (volatile __u32 *) addr + (nr >> 5);
307 bit = (1 << (nr & 31));
308 do {
309 CMPXCHG_BUGCHECK(m);
310 old = *m;
311 new = old ^ bit;
312 } while (cmpxchg_acq(m, old, new) != old);
313 return (old & bit) != 0;
314}
315
5302ac50
ZM
316/**
317 * __test_and_change_bit - Change a bit and return its old value
318 * @nr: Bit to change
319 * @addr: Address to count from
320 *
321 * This operation is non-atomic and can be reordered.
1da177e4
LT
322 */
323static __inline__ int
324__test_and_change_bit (int nr, void *addr)
325{
326 __u32 old, bit = (1 << (nr & 31));
327 __u32 *m = (__u32 *) addr + (nr >> 5);
328
329 old = *m;
330 *m = old ^ bit;
331 return (old & bit) != 0;
332}
333
334static __inline__ int
335test_bit (int nr, const volatile void *addr)
336{
337 return 1 & (((const volatile __u32 *) addr)[nr >> 5] >> (nr & 31));
338}
339
340/**
341 * ffz - find the first zero bit in a long word
342 * @x: The long word to find the bit in
343 *
2875aef8
AM
344 * Returns the bit-number (0..63) of the first (least significant) zero bit.
345 * Undefined if no zero exists, so code should check against ~0UL first...
1da177e4
LT
346 */
347static inline unsigned long
348ffz (unsigned long x)
349{
350 unsigned long result;
351
352 result = ia64_popcnt(x & (~x - 1));
353 return result;
354}
355
356/**
357 * __ffs - find first bit in word.
358 * @x: The word to search
359 *
360 * Undefined if no bit exists, so code should check against 0 first.
361 */
362static __inline__ unsigned long
363__ffs (unsigned long x)
364{
365 unsigned long result;
366
367 result = ia64_popcnt((x-1) & ~x);
368 return result;
369}
370
371#ifdef __KERNEL__
372
373/*
821376bf
DMT
374 * Return bit number of last (most-significant) bit set. Undefined
375 * for x==0. Bits are numbered from 0..63 (e.g., ia64_fls(9) == 3).
1da177e4
LT
376 */
377static inline unsigned long
378ia64_fls (unsigned long x)
379{
380 long double d = x;
381 long exp;
382
383 exp = ia64_getf_exp(d);
384 return exp - 0xffff;
385}
386
821376bf
DMT
387/*
388 * Find the last (most significant) bit set. Returns 0 for x==0 and
389 * bits are numbered from 1..32 (e.g., fls(9) == 4).
390 */
1da177e4 391static inline int
821376bf 392fls (int t)
1da177e4 393{
821376bf
DMT
394 unsigned long x = t & 0xffffffffu;
395
396 if (!x)
397 return 0;
398 x |= x >> 1;
399 x |= x >> 2;
400 x |= x >> 4;
401 x |= x >> 8;
402 x |= x >> 16;
403 return ia64_popcnt(x);
1da177e4 404}
2875aef8 405
56a6b1eb
AH
406/*
407 * Find the last (most significant) bit set. Undefined for x==0.
408 * Bits are numbered from 0..63 (e.g., __fls(9) == 3).
409 */
410static inline unsigned long
411__fls (unsigned long x)
412{
413 x |= x >> 1;
414 x |= x >> 2;
415 x |= x >> 4;
416 x |= x >> 8;
417 x |= x >> 16;
418 x |= x >> 32;
419 return ia64_popcnt(x) - 1;
420}
421
2875aef8 422#include <asm-generic/bitops/fls64.h>
1da177e4 423
44fd81fe 424#include <asm-generic/bitops/builtin-ffs.h>
1da177e4
LT
425
426/*
427 * hweightN: returns the hamming weight (i.e. the number
428 * of bits set) of a N-bit word
429 */
1527bc8b 430static __inline__ unsigned long __arch_hweight64(unsigned long x)
1da177e4
LT
431{
432 unsigned long result;
433 result = ia64_popcnt(x);
434 return result;
435}
436
1527bc8b
PZ
437#define __arch_hweight32(x) ((unsigned int) __arch_hweight64((x) & 0xfffffffful))
438#define __arch_hweight16(x) ((unsigned int) __arch_hweight64((x) & 0xfffful))
439#define __arch_hweight8(x) ((unsigned int) __arch_hweight64((x) & 0xfful))
440
441#include <asm-generic/bitops/const_hweight.h>
1da177e4
LT
442
443#endif /* __KERNEL__ */
444
2875aef8 445#include <asm-generic/bitops/find.h>
1da177e4
LT
446
447#ifdef __KERNEL__
448
861b5ae7 449#include <asm-generic/bitops/le.h>
1da177e4 450
148817ba 451#include <asm-generic/bitops/ext2-atomic-setbit.h>
1da177e4 452
2875aef8 453#include <asm-generic/bitops/sched.h>
1da177e4
LT
454
455#endif /* __KERNEL__ */
456
457#endif /* _ASM_IA64_BITOPS_H */