]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - arch/riscv/include/asm/bitops.h
Merge branch 'for-arm-soc' of git://git.armlinux.org.uk/~rmk/linux-arm into arm/soc
[mirror_ubuntu-jammy-kernel.git] / arch / riscv / include / asm / bitops.h
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3 * Copyright (C) 2012 Regents of the University of California
4 */
5
6 #ifndef _ASM_RISCV_BITOPS_H
7 #define _ASM_RISCV_BITOPS_H
8
9 #ifndef _LINUX_BITOPS_H
10 #error "Only <linux/bitops.h> can be included directly"
11 #endif /* _LINUX_BITOPS_H */
12
13 #include <linux/compiler.h>
14 #include <linux/irqflags.h>
15 #include <asm/barrier.h>
16 #include <asm/bitsperlong.h>
17
18 #ifndef smp_mb__before_clear_bit
19 #define smp_mb__before_clear_bit() smp_mb()
20 #define smp_mb__after_clear_bit() smp_mb()
21 #endif /* smp_mb__before_clear_bit */
22
23 #include <asm-generic/bitops/__ffs.h>
24 #include <asm-generic/bitops/ffz.h>
25 #include <asm-generic/bitops/fls.h>
26 #include <asm-generic/bitops/__fls.h>
27 #include <asm-generic/bitops/fls64.h>
28 #include <asm-generic/bitops/find.h>
29 #include <asm-generic/bitops/sched.h>
30 #include <asm-generic/bitops/ffs.h>
31
32 #include <asm-generic/bitops/hweight.h>
33
34 #if (BITS_PER_LONG == 64)
35 #define __AMO(op) "amo" #op ".d"
36 #elif (BITS_PER_LONG == 32)
37 #define __AMO(op) "amo" #op ".w"
38 #else
39 #error "Unexpected BITS_PER_LONG"
40 #endif
41
42 #define __test_and_op_bit_ord(op, mod, nr, addr, ord) \
43 ({ \
44 unsigned long __res, __mask; \
45 __mask = BIT_MASK(nr); \
46 __asm__ __volatile__ ( \
47 __AMO(op) #ord " %0, %2, %1" \
48 : "=r" (__res), "+A" (addr[BIT_WORD(nr)]) \
49 : "r" (mod(__mask)) \
50 : "memory"); \
51 ((__res & __mask) != 0); \
52 })
53
54 #define __op_bit_ord(op, mod, nr, addr, ord) \
55 __asm__ __volatile__ ( \
56 __AMO(op) #ord " zero, %1, %0" \
57 : "+A" (addr[BIT_WORD(nr)]) \
58 : "r" (mod(BIT_MASK(nr))) \
59 : "memory");
60
61 #define __test_and_op_bit(op, mod, nr, addr) \
62 __test_and_op_bit_ord(op, mod, nr, addr, .aqrl)
63 #define __op_bit(op, mod, nr, addr) \
64 __op_bit_ord(op, mod, nr, addr, )
65
66 /* Bitmask modifiers */
67 #define __NOP(x) (x)
68 #define __NOT(x) (~(x))
69
70 /**
71 * test_and_set_bit - Set a bit and return its old value
72 * @nr: Bit to set
73 * @addr: Address to count from
74 *
75 * This operation may be reordered on other architectures than x86.
76 */
77 static inline int test_and_set_bit(int nr, volatile unsigned long *addr)
78 {
79 return __test_and_op_bit(or, __NOP, nr, addr);
80 }
81
82 /**
83 * test_and_clear_bit - Clear a bit and return its old value
84 * @nr: Bit to clear
85 * @addr: Address to count from
86 *
87 * This operation can be reordered on other architectures other than x86.
88 */
89 static inline int test_and_clear_bit(int nr, volatile unsigned long *addr)
90 {
91 return __test_and_op_bit(and, __NOT, nr, addr);
92 }
93
94 /**
95 * test_and_change_bit - Change a bit and return its old value
96 * @nr: Bit to change
97 * @addr: Address to count from
98 *
99 * This operation is atomic and cannot be reordered.
100 * It also implies a memory barrier.
101 */
102 static inline int test_and_change_bit(int nr, volatile unsigned long *addr)
103 {
104 return __test_and_op_bit(xor, __NOP, nr, addr);
105 }
106
107 /**
108 * set_bit - Atomically set a bit in memory
109 * @nr: the bit to set
110 * @addr: the address to start counting from
111 *
112 * Note: there are no guarantees that this function will not be reordered
113 * on non x86 architectures, so if you are writing portable code,
114 * make sure not to rely on its reordering guarantees.
115 *
116 * Note that @nr may be almost arbitrarily large; this function is not
117 * restricted to acting on a single-word quantity.
118 */
119 static inline void set_bit(int nr, volatile unsigned long *addr)
120 {
121 __op_bit(or, __NOP, nr, addr);
122 }
123
124 /**
125 * clear_bit - Clears a bit in memory
126 * @nr: Bit to clear
127 * @addr: Address to start counting from
128 *
129 * Note: there are no guarantees that this function will not be reordered
130 * on non x86 architectures, so if you are writing portable code,
131 * make sure not to rely on its reordering guarantees.
132 */
133 static inline void clear_bit(int nr, volatile unsigned long *addr)
134 {
135 __op_bit(and, __NOT, nr, addr);
136 }
137
138 /**
139 * change_bit - Toggle a bit in memory
140 * @nr: Bit to change
141 * @addr: Address to start counting from
142 *
143 * change_bit() may be reordered on other architectures than x86.
144 * Note that @nr may be almost arbitrarily large; this function is not
145 * restricted to acting on a single-word quantity.
146 */
147 static inline void change_bit(int nr, volatile unsigned long *addr)
148 {
149 __op_bit(xor, __NOP, nr, addr);
150 }
151
152 /**
153 * test_and_set_bit_lock - Set a bit and return its old value, for lock
154 * @nr: Bit to set
155 * @addr: Address to count from
156 *
157 * This operation is atomic and provides acquire barrier semantics.
158 * It can be used to implement bit locks.
159 */
160 static inline int test_and_set_bit_lock(
161 unsigned long nr, volatile unsigned long *addr)
162 {
163 return __test_and_op_bit_ord(or, __NOP, nr, addr, .aq);
164 }
165
166 /**
167 * clear_bit_unlock - Clear a bit in memory, for unlock
168 * @nr: the bit to set
169 * @addr: the address to start counting from
170 *
171 * This operation is atomic and provides release barrier semantics.
172 */
173 static inline void clear_bit_unlock(
174 unsigned long nr, volatile unsigned long *addr)
175 {
176 __op_bit_ord(and, __NOT, nr, addr, .rl);
177 }
178
179 /**
180 * __clear_bit_unlock - Clear a bit in memory, for unlock
181 * @nr: the bit to set
182 * @addr: the address to start counting from
183 *
184 * This operation is like clear_bit_unlock, however it is not atomic.
185 * It does provide release barrier semantics so it can be used to unlock
186 * a bit lock, however it would only be used if no other CPU can modify
187 * any bits in the memory until the lock is released (a good example is
188 * if the bit lock itself protects access to the other bits in the word).
189 *
190 * On RISC-V systems there seems to be no benefit to taking advantage of the
191 * non-atomic property here: it's a lot more instructions and we still have to
192 * provide release semantics anyway.
193 */
194 static inline void __clear_bit_unlock(
195 unsigned long nr, volatile unsigned long *addr)
196 {
197 clear_bit_unlock(nr, addr);
198 }
199
200 #undef __test_and_op_bit
201 #undef __op_bit
202 #undef __NOP
203 #undef __NOT
204 #undef __AMO
205
206 #include <asm-generic/bitops/non-atomic.h>
207 #include <asm-generic/bitops/le.h>
208 #include <asm-generic/bitops/ext2-atomic.h>
209
210 #endif /* _ASM_RISCV_BITOPS_H */