]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or modify | |
5 | * it under the terms of the GNU General Public License version 2 as | |
6 | * published by the Free Software Foundation. | |
7 | */ | |
8 | ||
9 | #ifndef _ASM_ARC_ATOMIC_H | |
10 | #define _ASM_ARC_ATOMIC_H | |
11 | ||
12 | #ifndef __ASSEMBLY__ | |
13 | ||
14 | #include <linux/types.h> | |
15 | #include <linux/compiler.h> | |
16 | #include <asm/cmpxchg.h> | |
17 | #include <asm/barrier.h> | |
18 | #include <asm/smp.h> | |
19 | ||
20 | #define atomic_read(v) ((v)->counter) | |
21 | ||
22 | #ifdef CONFIG_ARC_HAS_LLSC | |
23 | ||
24 | #define atomic_set(v, i) (((v)->counter) = (i)) | |
25 | ||
26 | #ifdef CONFIG_ARC_STAR_9000923308 | |
27 | ||
28 | #define SCOND_FAIL_RETRY_VAR_DEF \ | |
29 | unsigned int delay = 1, tmp; \ | |
30 | ||
31 | #define SCOND_FAIL_RETRY_ASM \ | |
32 | " bz 4f \n" \ | |
33 | " ; --- scond fail delay --- \n" \ | |
34 | " mov %[tmp], %[delay] \n" /* tmp = delay */ \ | |
35 | "2: brne.d %[tmp], 0, 2b \n" /* while (tmp != 0) */ \ | |
36 | " sub %[tmp], %[tmp], 1 \n" /* tmp-- */ \ | |
37 | " rol %[delay], %[delay] \n" /* delay *= 2 */ \ | |
38 | " b 1b \n" /* start over */ \ | |
39 | "4: ; --- success --- \n" \ | |
40 | ||
41 | #define SCOND_FAIL_RETRY_VARS \ | |
42 | ,[delay] "+&r" (delay),[tmp] "=&r" (tmp) \ | |
43 | ||
44 | #else /* !CONFIG_ARC_STAR_9000923308 */ | |
45 | ||
46 | #define SCOND_FAIL_RETRY_VAR_DEF | |
47 | ||
48 | #define SCOND_FAIL_RETRY_ASM \ | |
49 | " bnz 1b \n" \ | |
50 | ||
51 | #define SCOND_FAIL_RETRY_VARS | |
52 | ||
53 | #endif | |
54 | ||
55 | #define ATOMIC_OP(op, c_op, asm_op) \ | |
56 | static inline void atomic_##op(int i, atomic_t *v) \ | |
57 | { \ | |
58 | unsigned int val; \ | |
59 | SCOND_FAIL_RETRY_VAR_DEF \ | |
60 | \ | |
61 | __asm__ __volatile__( \ | |
62 | "1: llock %[val], [%[ctr]] \n" \ | |
63 | " " #asm_op " %[val], %[val], %[i] \n" \ | |
64 | " scond %[val], [%[ctr]] \n" \ | |
65 | " \n" \ | |
66 | SCOND_FAIL_RETRY_ASM \ | |
67 | \ | |
68 | : [val] "=&r" (val) /* Early clobber to prevent reg reuse */ \ | |
69 | SCOND_FAIL_RETRY_VARS \ | |
70 | : [ctr] "r" (&v->counter), /* Not "m": llock only supports reg direct addr mode */ \ | |
71 | [i] "ir" (i) \ | |
72 | : "cc"); \ | |
73 | } \ | |
74 | ||
75 | #define ATOMIC_OP_RETURN(op, c_op, asm_op) \ | |
76 | static inline int atomic_##op##_return(int i, atomic_t *v) \ | |
77 | { \ | |
78 | unsigned int val; \ | |
79 | SCOND_FAIL_RETRY_VAR_DEF \ | |
80 | \ | |
81 | /* \ | |
82 | * Explicit full memory barrier needed before/after as \ | |
83 | * LLOCK/SCOND thmeselves don't provide any such semantics \ | |
84 | */ \ | |
85 | smp_mb(); \ | |
86 | \ | |
87 | __asm__ __volatile__( \ | |
88 | "1: llock %[val], [%[ctr]] \n" \ | |
89 | " " #asm_op " %[val], %[val], %[i] \n" \ | |
90 | " scond %[val], [%[ctr]] \n" \ | |
91 | " \n" \ | |
92 | SCOND_FAIL_RETRY_ASM \ | |
93 | \ | |
94 | : [val] "=&r" (val) \ | |
95 | SCOND_FAIL_RETRY_VARS \ | |
96 | : [ctr] "r" (&v->counter), \ | |
97 | [i] "ir" (i) \ | |
98 | : "cc"); \ | |
99 | \ | |
100 | smp_mb(); \ | |
101 | \ | |
102 | return val; \ | |
103 | } | |
104 | ||
105 | #else /* !CONFIG_ARC_HAS_LLSC */ | |
106 | ||
107 | #ifndef CONFIG_SMP | |
108 | ||
109 | /* violating atomic_xxx API locking protocol in UP for optimization sake */ | |
110 | #define atomic_set(v, i) (((v)->counter) = (i)) | |
111 | ||
112 | #else | |
113 | ||
114 | static inline void atomic_set(atomic_t *v, int i) | |
115 | { | |
116 | /* | |
117 | * Independent of hardware support, all of the atomic_xxx() APIs need | |
118 | * to follow the same locking rules to make sure that a "hardware" | |
119 | * atomic insn (e.g. LD) doesn't clobber an "emulated" atomic insn | |
120 | * sequence | |
121 | * | |
122 | * Thus atomic_set() despite being 1 insn (and seemingly atomic) | |
123 | * requires the locking. | |
124 | */ | |
125 | unsigned long flags; | |
126 | ||
127 | atomic_ops_lock(flags); | |
128 | v->counter = i; | |
129 | atomic_ops_unlock(flags); | |
130 | } | |
131 | ||
132 | #endif | |
133 | ||
134 | /* | |
135 | * Non hardware assisted Atomic-R-M-W | |
136 | * Locking would change to irq-disabling only (UP) and spinlocks (SMP) | |
137 | */ | |
138 | ||
139 | #define ATOMIC_OP(op, c_op, asm_op) \ | |
140 | static inline void atomic_##op(int i, atomic_t *v) \ | |
141 | { \ | |
142 | unsigned long flags; \ | |
143 | \ | |
144 | atomic_ops_lock(flags); \ | |
145 | v->counter c_op i; \ | |
146 | atomic_ops_unlock(flags); \ | |
147 | } | |
148 | ||
149 | #define ATOMIC_OP_RETURN(op, c_op, asm_op) \ | |
150 | static inline int atomic_##op##_return(int i, atomic_t *v) \ | |
151 | { \ | |
152 | unsigned long flags; \ | |
153 | unsigned long temp; \ | |
154 | \ | |
155 | /* \ | |
156 | * spin lock/unlock provides the needed smp_mb() before/after \ | |
157 | */ \ | |
158 | atomic_ops_lock(flags); \ | |
159 | temp = v->counter; \ | |
160 | temp c_op i; \ | |
161 | v->counter = temp; \ | |
162 | atomic_ops_unlock(flags); \ | |
163 | \ | |
164 | return temp; \ | |
165 | } | |
166 | ||
167 | #endif /* !CONFIG_ARC_HAS_LLSC */ | |
168 | ||
169 | #define ATOMIC_OPS(op, c_op, asm_op) \ | |
170 | ATOMIC_OP(op, c_op, asm_op) \ | |
171 | ATOMIC_OP_RETURN(op, c_op, asm_op) | |
172 | ||
173 | ATOMIC_OPS(add, +=, add) | |
174 | ATOMIC_OPS(sub, -=, sub) | |
175 | ATOMIC_OP(and, &=, and) | |
176 | ||
177 | #define atomic_clear_mask(mask, v) atomic_and(~(mask), (v)) | |
178 | ||
179 | #undef ATOMIC_OPS | |
180 | #undef ATOMIC_OP_RETURN | |
181 | #undef ATOMIC_OP | |
182 | #undef SCOND_FAIL_RETRY_VAR_DEF | |
183 | #undef SCOND_FAIL_RETRY_ASM | |
184 | #undef SCOND_FAIL_RETRY_VARS | |
185 | ||
186 | /** | |
187 | * __atomic_add_unless - add unless the number is a given value | |
188 | * @v: pointer of type atomic_t | |
189 | * @a: the amount to add to v... | |
190 | * @u: ...unless v is equal to u. | |
191 | * | |
192 | * Atomically adds @a to @v, so long as it was not @u. | |
193 | * Returns the old value of @v | |
194 | */ | |
195 | #define __atomic_add_unless(v, a, u) \ | |
196 | ({ \ | |
197 | int c, old; \ | |
198 | \ | |
199 | /* \ | |
200 | * Explicit full memory barrier needed before/after as \ | |
201 | * LLOCK/SCOND thmeselves don't provide any such semantics \ | |
202 | */ \ | |
203 | smp_mb(); \ | |
204 | \ | |
205 | c = atomic_read(v); \ | |
206 | while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c)\ | |
207 | c = old; \ | |
208 | \ | |
209 | smp_mb(); \ | |
210 | \ | |
211 | c; \ | |
212 | }) | |
213 | ||
214 | #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) | |
215 | ||
216 | #define atomic_inc(v) atomic_add(1, v) | |
217 | #define atomic_dec(v) atomic_sub(1, v) | |
218 | ||
219 | #define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0) | |
220 | #define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0) | |
221 | #define atomic_inc_return(v) atomic_add_return(1, (v)) | |
222 | #define atomic_dec_return(v) atomic_sub_return(1, (v)) | |
223 | #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0) | |
224 | ||
225 | #define atomic_add_negative(i, v) (atomic_add_return(i, v) < 0) | |
226 | ||
227 | #define ATOMIC_INIT(i) { (i) } | |
228 | ||
229 | #include <asm-generic/atomic64.h> | |
230 | ||
231 | #endif | |
232 | ||
233 | #endif |