]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - arch/arc/include/asm/bitops.h
UBUNTU: Ubuntu-4.15.0-96.97
[mirror_ubuntu-bionic-kernel.git] / arch / arc / include / asm / bitops.h
CommitLineData
14e968ba
VG
1/*
2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9#ifndef _ASM_BITOPS_H
10#define _ASM_BITOPS_H
11
12#ifndef _LINUX_BITOPS_H
13#error only <linux/bitops.h> can be included directly
14#endif
15
14e968ba
VG
16#ifndef __ASSEMBLY__
17
18#include <linux/types.h>
19#include <linux/compiler.h>
d594ffa9 20#include <asm/barrier.h>
04e2eee4
VG
21#ifndef CONFIG_ARC_HAS_LLSC
22#include <asm/smp.h>
23#endif
14e968ba 24
a5a10d99 25#ifdef CONFIG_ARC_HAS_LLSC
14e968ba 26
04e2eee4
VG
27/*
28 * Hardware assisted Atomic-R-M-W
29 */
14e968ba 30
04e2eee4
VG
31#define BIT_OP(op, c_op, asm_op) \
32static inline void op##_bit(unsigned long nr, volatile unsigned long *m)\
33{ \
34 unsigned int temp; \
35 \
36 m += nr >> 5; \
37 \
80f42084 38 nr &= 0x1f; \
04e2eee4
VG
39 \
40 __asm__ __volatile__( \
41 "1: llock %0, [%1] \n" \
42 " " #asm_op " %0, %0, %2 \n" \
43 " scond %0, [%1] \n" \
44 " bnz 1b \n" \
45 : "=&r"(temp) /* Early clobber, to prevent reg reuse */ \
46 : "r"(m), /* Not "m": llock only supports reg direct addr mode */ \
47 "ir"(nr) \
48 : "cc"); \
14e968ba
VG
49}
50
51/*
52 * Semantically:
53 * Test the bit
54 * if clear
55 * set it and return 0 (old value)
56 * else
57 * return 1 (old value).
58 *
59 * Since ARC lacks a equivalent h/w primitive, the bit is set unconditionally
60 * and the old value of bit is returned
61 */
04e2eee4
VG
62#define TEST_N_BIT_OP(op, c_op, asm_op) \
63static inline int test_and_##op##_bit(unsigned long nr, volatile unsigned long *m)\
64{ \
65 unsigned long old, temp; \
66 \
67 m += nr >> 5; \
68 \
80f42084 69 nr &= 0x1f; \
04e2eee4
VG
70 \
71 /* \
72 * Explicit full memory barrier needed before/after as \
73 * LLOCK/SCOND themselves don't provide any such smenatic \
74 */ \
75 smp_mb(); \
76 \
77 __asm__ __volatile__( \
78 "1: llock %0, [%2] \n" \
79 " " #asm_op " %1, %0, %3 \n" \
80 " scond %1, [%2] \n" \
81 " bnz 1b \n" \
82 : "=&r"(old), "=&r"(temp) \
83 : "r"(m), "ir"(nr) \
84 : "cc"); \
85 \
86 smp_mb(); \
87 \
88 return (old & (1 << nr)) != 0; \
14e968ba
VG
89}
90
a5a10d99 91#elif !defined(CONFIG_ARC_PLAT_EZNPS)
14e968ba 92
14e968ba
VG
93/*
94 * Non hardware assisted Atomic-R-M-W
95 * Locking would change to irq-disabling only (UP) and spinlocks (SMP)
96 *
97 * There's "significant" micro-optimization in writing our own variants of
98 * bitops (over generic variants)
99 *
100 * (1) The generic APIs have "signed" @nr while we have it "unsigned"
101 * This avoids extra code to be generated for pointer arithmatic, since
102 * is "not sure" that index is NOT -ve
103 * (2) Utilize the fact that ARCompact bit fidding insn (BSET/BCLR/ASL) etc
104 * only consider bottom 5 bits of @nr, so NO need to mask them off.
105 * (GCC Quirk: however for constant @nr we still need to do the masking
106 * at compile time)
107 */
108
04e2eee4
VG
109#define BIT_OP(op, c_op, asm_op) \
110static inline void op##_bit(unsigned long nr, volatile unsigned long *m)\
111{ \
112 unsigned long temp, flags; \
113 m += nr >> 5; \
114 \
04e2eee4
VG
115 /* \
116 * spin lock/unlock provide the needed smp_mb() before/after \
117 */ \
118 bitops_lock(flags); \
119 \
120 temp = *m; \
80f42084 121 *m = temp c_op (1UL << (nr & 0x1f)); \
04e2eee4
VG
122 \
123 bitops_unlock(flags); \
124}
125
126#define TEST_N_BIT_OP(op, c_op, asm_op) \
127static inline int test_and_##op##_bit(unsigned long nr, volatile unsigned long *m)\
128{ \
129 unsigned long old, flags; \
130 m += nr >> 5; \
131 \
04e2eee4
VG
132 bitops_lock(flags); \
133 \
134 old = *m; \
80f42084 135 *m = old c_op (1UL << (nr & 0x1f)); \
04e2eee4
VG
136 \
137 bitops_unlock(flags); \
138 \
80f42084 139 return (old & (1UL << (nr & 0x1f))) != 0; \
14e968ba
VG
140}
141
a5a10d99
NC
142#else /* CONFIG_ARC_PLAT_EZNPS */
143
144#define BIT_OP(op, c_op, asm_op) \
145static inline void op##_bit(unsigned long nr, volatile unsigned long *m)\
146{ \
147 m += nr >> 5; \
148 \
149 nr = (1UL << (nr & 0x1f)); \
150 if (asm_op == CTOP_INST_AAND_DI_R2_R2_R3) \
151 nr = ~nr; \
152 \
153 __asm__ __volatile__( \
154 " mov r2, %0\n" \
155 " mov r3, %1\n" \
156 " .word %2\n" \
157 : \
158 : "r"(nr), "r"(m), "i"(asm_op) \
159 : "r2", "r3", "memory"); \
160}
161
162#define TEST_N_BIT_OP(op, c_op, asm_op) \
163static inline int test_and_##op##_bit(unsigned long nr, volatile unsigned long *m)\
164{ \
165 unsigned long old; \
166 \
167 m += nr >> 5; \
168 \
169 nr = old = (1UL << (nr & 0x1f)); \
170 if (asm_op == CTOP_INST_AAND_DI_R2_R2_R3) \
171 old = ~old; \
172 \
173 /* Explicit full memory barrier needed before/after */ \
174 smp_mb(); \
175 \
176 __asm__ __volatile__( \
177 " mov r2, %0\n" \
178 " mov r3, %1\n" \
179 " .word %2\n" \
180 " mov %0, r2" \
181 : "+r"(old) \
182 : "r"(m), "i"(asm_op) \
183 : "r2", "r3", "memory"); \
184 \
185 smp_mb(); \
186 \
187 return (old & nr) != 0; \
188}
189
190#endif /* CONFIG_ARC_PLAT_EZNPS */
14e968ba
VG
191
192/***************************************
193 * Non atomic variants
194 **************************************/
195
04e2eee4
VG
196#define __BIT_OP(op, c_op, asm_op) \
197static inline void __##op##_bit(unsigned long nr, volatile unsigned long *m) \
198{ \
199 unsigned long temp; \
200 m += nr >> 5; \
201 \
04e2eee4 202 temp = *m; \
80f42084 203 *m = temp c_op (1UL << (nr & 0x1f)); \
04e2eee4
VG
204}
205
206#define __TEST_N_BIT_OP(op, c_op, asm_op) \
207static inline int __test_and_##op##_bit(unsigned long nr, volatile unsigned long *m)\
208{ \
209 unsigned long old; \
210 m += nr >> 5; \
211 \
04e2eee4 212 old = *m; \
80f42084 213 *m = old c_op (1UL << (nr & 0x1f)); \
04e2eee4 214 \
80f42084 215 return (old & (1UL << (nr & 0x1f))) != 0; \
04e2eee4
VG
216}
217
218#define BIT_OPS(op, c_op, asm_op) \
219 \
220 /* set_bit(), clear_bit(), change_bit() */ \
221 BIT_OP(op, c_op, asm_op) \
222 \
223 /* test_and_set_bit(), test_and_clear_bit(), test_and_change_bit() */\
224 TEST_N_BIT_OP(op, c_op, asm_op) \
225 \
226 /* __set_bit(), __clear_bit(), __change_bit() */ \
227 __BIT_OP(op, c_op, asm_op) \
228 \
229 /* __test_and_set_bit(), __test_and_clear_bit(), __test_and_change_bit() */\
230 __TEST_N_BIT_OP(op, c_op, asm_op)
231
a5a10d99 232#ifndef CONFIG_ARC_PLAT_EZNPS
04e2eee4
VG
233BIT_OPS(set, |, bset)
234BIT_OPS(clear, & ~, bclr)
235BIT_OPS(change, ^, bxor)
a5a10d99
NC
236#else
237BIT_OPS(set, |, CTOP_INST_AOR_DI_R2_R2_R3)
238BIT_OPS(clear, & ~, CTOP_INST_AAND_DI_R2_R2_R3)
239BIT_OPS(change, ^, CTOP_INST_AXOR_DI_R2_R2_R3)
240#endif
14e968ba
VG
241
242/*
243 * This routine doesn't need to be atomic.
244 */
245static inline int
de60c1a1 246test_bit(unsigned int nr, const volatile unsigned long *addr)
14e968ba
VG
247{
248 unsigned long mask;
249
250 addr += nr >> 5;
251
80f42084 252 mask = 1UL << (nr & 0x1f);
14e968ba
VG
253
254 return ((mask & *addr) != 0);
255}
256
1f6ccfff
VG
257#ifdef CONFIG_ISA_ARCOMPACT
258
14e968ba
VG
259/*
260 * Count the number of zeros, starting from MSB
261 * Helper for fls( ) friends
262 * This is a pure count, so (1-32) or (0-31) doesn't apply
263 * It could be 0 to 32, based on num of 0's in there
264 * clz(0x8000_0000) = 0, clz(0xFFFF_FFFF)=0, clz(0) = 32, clz(1) = 31
265 */
266static inline __attribute__ ((const)) int clz(unsigned int x)
267{
268 unsigned int res;
269
270 __asm__ __volatile__(
271 " norm.f %0, %1 \n"
272 " mov.n %0, 0 \n"
273 " add.p %0, %0, 1 \n"
274 : "=r"(res)
275 : "r"(x)
276 : "cc");
277
278 return res;
279}
280
281static inline int constant_fls(int x)
282{
283 int r = 32;
284
285 if (!x)
286 return 0;
287 if (!(x & 0xffff0000u)) {
288 x <<= 16;
289 r -= 16;
290 }
291 if (!(x & 0xff000000u)) {
292 x <<= 8;
293 r -= 8;
294 }
295 if (!(x & 0xf0000000u)) {
296 x <<= 4;
297 r -= 4;
298 }
299 if (!(x & 0xc0000000u)) {
300 x <<= 2;
301 r -= 2;
302 }
303 if (!(x & 0x80000000u)) {
304 x <<= 1;
305 r -= 1;
306 }
307 return r;
308}
309
310/*
311 * fls = Find Last Set in word
312 * @result: [1-32]
313 * fls(1) = 1, fls(0x80000000) = 32, fls(0) = 0
314 */
315static inline __attribute__ ((const)) int fls(unsigned long x)
316{
317 if (__builtin_constant_p(x))
318 return constant_fls(x);
319
320 return 32 - clz(x);
321}
322
323/*
324 * __fls: Similar to fls, but zero based (0-31)
325 */
326static inline __attribute__ ((const)) int __fls(unsigned long x)
327{
328 if (!x)
329 return 0;
330 else
331 return fls(x) - 1;
332}
333
334/*
335 * ffs = Find First Set in word (LSB to MSB)
336 * @result: [1-32], 0 if all 0's
337 */
338#define ffs(x) ({ unsigned long __t = (x); fls(__t & -__t); })
339
340/*
341 * __ffs: Similar to ffs, but zero based (0-31)
342 */
e440614d 343static inline __attribute__ ((const)) unsigned long __ffs(unsigned long word)
14e968ba
VG
344{
345 if (!word)
346 return word;
347
348 return ffs(word) - 1;
349}
350
1f6ccfff
VG
351#else /* CONFIG_ISA_ARCV2 */
352
353/*
354 * fls = Find Last Set in word
355 * @result: [1-32]
356 * fls(1) = 1, fls(0x80000000) = 32, fls(0) = 0
357 */
358static inline __attribute__ ((const)) int fls(unsigned long x)
359{
360 int n;
361
362 asm volatile(
363 " fls.f %0, %1 \n" /* 0:31; 0(Z) if src 0 */
364 " add.nz %0, %0, 1 \n" /* 0:31 -> 1:32 */
365 : "=r"(n) /* Early clobber not needed */
366 : "r"(x)
367 : "cc");
368
369 return n;
370}
371
372/*
373 * __fls: Similar to fls, but zero based (0-31). Also 0 if no bit set
374 */
375static inline __attribute__ ((const)) int __fls(unsigned long x)
376{
377 /* FLS insn has exactly same semantics as the API */
378 return __builtin_arc_fls(x);
379}
380
381/*
382 * ffs = Find First Set in word (LSB to MSB)
383 * @result: [1-32], 0 if all 0's
384 */
385static inline __attribute__ ((const)) int ffs(unsigned long x)
386{
387 int n;
388
389 asm volatile(
390 " ffs.f %0, %1 \n" /* 0:31; 31(Z) if src 0 */
391 " add.nz %0, %0, 1 \n" /* 0:31 -> 1:32 */
392 " mov.z %0, 0 \n" /* 31(Z)-> 0 */
393 : "=r"(n) /* Early clobber not needed */
394 : "r"(x)
395 : "cc");
396
397 return n;
398}
399
400/*
401 * __ffs: Similar to ffs, but zero based (0-31)
402 */
e440614d 403static inline __attribute__ ((const)) unsigned long __ffs(unsigned long x)
1f6ccfff 404{
e440614d 405 unsigned long n;
1f6ccfff
VG
406
407 asm volatile(
408 " ffs.f %0, %1 \n" /* 0:31; 31(Z) if src 0 */
409 " mov.z %0, 0 \n" /* 31(Z)-> 0 */
410 : "=r"(n)
411 : "r"(x)
412 : "cc");
413
414 return n;
415
416}
417
418#endif /* CONFIG_ISA_ARCOMPACT */
419
14e968ba
VG
420/*
421 * ffz = Find First Zero in word.
422 * @return:[0-31], 32 if all 1's
423 */
424#define ffz(x) __ffs(~(x))
425
14e968ba
VG
426#include <asm-generic/bitops/hweight.h>
427#include <asm-generic/bitops/fls64.h>
428#include <asm-generic/bitops/sched.h>
429#include <asm-generic/bitops/lock.h>
430
431#include <asm-generic/bitops/find.h>
432#include <asm-generic/bitops/le.h>
433#include <asm-generic/bitops/ext2-atomic-setbit.h>
434
435#endif /* !__ASSEMBLY__ */
436
14e968ba 437#endif