]>
Commit | Line | Data |
---|---|---|
867e359b CM |
1 | /* |
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or | |
5 | * modify it under the terms of the GNU General Public License | |
6 | * as published by the Free Software Foundation, version 2. | |
7 | * | |
8 | * This program is distributed in the hope that it will be useful, but | |
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | |
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | |
11 | * NON INFRINGEMENT. See the GNU General Public License for | |
12 | * more details. | |
13 | * | |
14 | * Do not include directly; use <asm/atomic.h>. | |
15 | */ | |
16 | ||
17 | #ifndef _ASM_TILE_ATOMIC_32_H | |
18 | #define _ASM_TILE_ATOMIC_32_H | |
19 | ||
20 | #include <arch/chip.h> | |
21 | ||
22 | #ifndef __ASSEMBLY__ | |
23 | ||
24 | /* Tile-specific routines to support <asm/atomic.h>. */ | |
25 | int _atomic_xchg(atomic_t *v, int n); | |
26 | int _atomic_xchg_add(atomic_t *v, int i); | |
27 | int _atomic_xchg_add_unless(atomic_t *v, int a, int u); | |
28 | int _atomic_cmpxchg(atomic_t *v, int o, int n); | |
29 | ||
30 | /** | |
31 | * atomic_xchg - atomically exchange contents of memory with a new value | |
32 | * @v: pointer of type atomic_t | |
33 | * @i: integer value to store in memory | |
34 | * | |
35 | * Atomically sets @v to @i and returns old @v | |
36 | */ | |
37 | static inline int atomic_xchg(atomic_t *v, int n) | |
38 | { | |
39 | smp_mb(); /* barrier for proper semantics */ | |
40 | return _atomic_xchg(v, n); | |
41 | } | |
42 | ||
43 | /** | |
44 | * atomic_cmpxchg - atomically exchange contents of memory if it matches | |
45 | * @v: pointer of type atomic_t | |
46 | * @o: old value that memory should have | |
47 | * @n: new value to write to memory if it matches | |
48 | * | |
49 | * Atomically checks if @v holds @o and replaces it with @n if so. | |
50 | * Returns the old value at @v. | |
51 | */ | |
52 | static inline int atomic_cmpxchg(atomic_t *v, int o, int n) | |
53 | { | |
54 | smp_mb(); /* barrier for proper semantics */ | |
55 | return _atomic_cmpxchg(v, o, n); | |
56 | } | |
57 | ||
58 | /** | |
59 | * atomic_add - add integer to atomic variable | |
60 | * @i: integer value to add | |
61 | * @v: pointer of type atomic_t | |
62 | * | |
63 | * Atomically adds @i to @v. | |
64 | */ | |
65 | static inline void atomic_add(int i, atomic_t *v) | |
66 | { | |
67 | _atomic_xchg_add(v, i); | |
68 | } | |
69 | ||
70 | /** | |
71 | * atomic_add_return - add integer and return | |
72 | * @v: pointer of type atomic_t | |
73 | * @i: integer value to add | |
74 | * | |
75 | * Atomically adds @i to @v and returns @i + @v | |
76 | */ | |
77 | static inline int atomic_add_return(int i, atomic_t *v) | |
78 | { | |
79 | smp_mb(); /* barrier for proper semantics */ | |
80 | return _atomic_xchg_add(v, i) + i; | |
81 | } | |
82 | ||
83 | /** | |
84 | * atomic_add_unless - add unless the number is already a given value | |
85 | * @v: pointer of type atomic_t | |
86 | * @a: the amount to add to v... | |
87 | * @u: ...unless v is equal to u. | |
88 | * | |
89 | * Atomically adds @a to @v, so long as @v was not already @u. | |
90 | * Returns non-zero if @v was not @u, and zero otherwise. | |
91 | */ | |
92 | static inline int atomic_add_unless(atomic_t *v, int a, int u) | |
93 | { | |
94 | smp_mb(); /* barrier for proper semantics */ | |
95 | return _atomic_xchg_add_unless(v, a, u) != u; | |
96 | } | |
97 | ||
98 | /** | |
99 | * atomic_set - set atomic variable | |
100 | * @v: pointer of type atomic_t | |
101 | * @i: required value | |
102 | * | |
103 | * Atomically sets the value of @v to @i. | |
104 | * | |
105 | * atomic_set() can't be just a raw store, since it would be lost if it | |
106 | * fell between the load and store of one of the other atomic ops. | |
107 | */ | |
108 | static inline void atomic_set(atomic_t *v, int n) | |
109 | { | |
110 | _atomic_xchg(v, n); | |
111 | } | |
112 | ||
113 | #define xchg(ptr, x) ((typeof(*(ptr))) \ | |
114 | ((sizeof(*(ptr)) == sizeof(atomic_t)) ? \ | |
115 | atomic_xchg((atomic_t *)(ptr), (long)(x)) : \ | |
116 | __xchg_called_with_bad_pointer())) | |
117 | ||
118 | #define cmpxchg(ptr, o, n) ((typeof(*(ptr))) \ | |
119 | ((sizeof(*(ptr)) == sizeof(atomic_t)) ? \ | |
120 | atomic_cmpxchg((atomic_t *)(ptr), (long)(o), (long)(n)) : \ | |
121 | __cmpxchg_called_with_bad_pointer())) | |
122 | ||
123 | /* A 64bit atomic type */ | |
124 | ||
125 | typedef struct { | |
126 | u64 __aligned(8) counter; | |
127 | } atomic64_t; | |
128 | ||
129 | #define ATOMIC64_INIT(val) { (val) } | |
130 | ||
131 | u64 _atomic64_xchg(atomic64_t *v, u64 n); | |
132 | u64 _atomic64_xchg_add(atomic64_t *v, u64 i); | |
133 | u64 _atomic64_xchg_add_unless(atomic64_t *v, u64 a, u64 u); | |
134 | u64 _atomic64_cmpxchg(atomic64_t *v, u64 o, u64 n); | |
135 | ||
136 | /** | |
137 | * atomic64_read - read atomic variable | |
138 | * @v: pointer of type atomic64_t | |
139 | * | |
140 | * Atomically reads the value of @v. | |
141 | */ | |
142 | static inline u64 atomic64_read(const atomic64_t *v) | |
143 | { | |
144 | /* | |
145 | * Requires an atomic op to read both 32-bit parts consistently. | |
146 | * Casting away const is safe since the atomic support routines | |
147 | * do not write to memory if the value has not been modified. | |
148 | */ | |
149 | return _atomic64_xchg_add((atomic64_t *)v, 0); | |
150 | } | |
151 | ||
152 | /** | |
153 | * atomic64_xchg - atomically exchange contents of memory with a new value | |
154 | * @v: pointer of type atomic64_t | |
155 | * @i: integer value to store in memory | |
156 | * | |
157 | * Atomically sets @v to @i and returns old @v | |
158 | */ | |
159 | static inline u64 atomic64_xchg(atomic64_t *v, u64 n) | |
160 | { | |
161 | smp_mb(); /* barrier for proper semantics */ | |
162 | return _atomic64_xchg(v, n); | |
163 | } | |
164 | ||
165 | /** | |
166 | * atomic64_cmpxchg - atomically exchange contents of memory if it matches | |
167 | * @v: pointer of type atomic64_t | |
168 | * @o: old value that memory should have | |
169 | * @n: new value to write to memory if it matches | |
170 | * | |
171 | * Atomically checks if @v holds @o and replaces it with @n if so. | |
172 | * Returns the old value at @v. | |
173 | */ | |
174 | static inline u64 atomic64_cmpxchg(atomic64_t *v, u64 o, u64 n) | |
175 | { | |
176 | smp_mb(); /* barrier for proper semantics */ | |
177 | return _atomic64_cmpxchg(v, o, n); | |
178 | } | |
179 | ||
180 | /** | |
181 | * atomic64_add - add integer to atomic variable | |
182 | * @i: integer value to add | |
183 | * @v: pointer of type atomic64_t | |
184 | * | |
185 | * Atomically adds @i to @v. | |
186 | */ | |
187 | static inline void atomic64_add(u64 i, atomic64_t *v) | |
188 | { | |
189 | _atomic64_xchg_add(v, i); | |
190 | } | |
191 | ||
192 | /** | |
193 | * atomic64_add_return - add integer and return | |
194 | * @v: pointer of type atomic64_t | |
195 | * @i: integer value to add | |
196 | * | |
197 | * Atomically adds @i to @v and returns @i + @v | |
198 | */ | |
199 | static inline u64 atomic64_add_return(u64 i, atomic64_t *v) | |
200 | { | |
201 | smp_mb(); /* barrier for proper semantics */ | |
202 | return _atomic64_xchg_add(v, i) + i; | |
203 | } | |
204 | ||
205 | /** | |
206 | * atomic64_add_unless - add unless the number is already a given value | |
207 | * @v: pointer of type atomic64_t | |
208 | * @a: the amount to add to v... | |
209 | * @u: ...unless v is equal to u. | |
210 | * | |
211 | * Atomically adds @a to @v, so long as @v was not already @u. | |
212 | * Returns non-zero if @v was not @u, and zero otherwise. | |
213 | */ | |
214 | static inline u64 atomic64_add_unless(atomic64_t *v, u64 a, u64 u) | |
215 | { | |
216 | smp_mb(); /* barrier for proper semantics */ | |
217 | return _atomic64_xchg_add_unless(v, a, u) != u; | |
218 | } | |
219 | ||
220 | /** | |
221 | * atomic64_set - set atomic variable | |
222 | * @v: pointer of type atomic64_t | |
223 | * @i: required value | |
224 | * | |
225 | * Atomically sets the value of @v to @i. | |
226 | * | |
227 | * atomic64_set() can't be just a raw store, since it would be lost if it | |
228 | * fell between the load and store of one of the other atomic ops. | |
229 | */ | |
230 | static inline void atomic64_set(atomic64_t *v, u64 n) | |
231 | { | |
232 | _atomic64_xchg(v, n); | |
233 | } | |
234 | ||
235 | #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0) | |
236 | #define atomic64_inc(v) atomic64_add(1LL, (v)) | |
237 | #define atomic64_inc_return(v) atomic64_add_return(1LL, (v)) | |
238 | #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0) | |
239 | #define atomic64_sub_return(i, v) atomic64_add_return(-(i), (v)) | |
240 | #define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0) | |
241 | #define atomic64_sub(i, v) atomic64_add(-(i), (v)) | |
242 | #define atomic64_dec(v) atomic64_sub(1LL, (v)) | |
243 | #define atomic64_dec_return(v) atomic64_sub_return(1LL, (v)) | |
244 | #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0) | |
245 | #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL) | |
246 | ||
247 | /* | |
248 | * We need to barrier before modifying the word, since the _atomic_xxx() | |
249 | * routines just tns the lock and then read/modify/write of the word. | |
250 | * But after the word is updated, the routine issues an "mf" before returning, | |
251 | * and since it's a function call, we don't even need a compiler barrier. | |
252 | */ | |
253 | #define smp_mb__before_atomic_dec() smp_mb() | |
254 | #define smp_mb__before_atomic_inc() smp_mb() | |
255 | #define smp_mb__after_atomic_dec() do { } while (0) | |
256 | #define smp_mb__after_atomic_inc() do { } while (0) | |
257 | ||
258 | ||
259 | /* | |
260 | * Support "tns" atomic integers. These are atomic integers that can | |
261 | * hold any value but "1". They are more efficient than regular atomic | |
262 | * operations because the "lock" (aka acquire) step is a single "tns" | |
263 | * in the uncontended case, and the "unlock" (aka release) step is a | |
264 | * single "store" without an mf. (However, note that on tilepro the | |
265 | * "tns" will evict the local cache line, so it's not all upside.) | |
266 | * | |
267 | * Note that you can ONLY observe the value stored in the pointer | |
268 | * using these operations; a direct read of the value may confusingly | |
269 | * return the special value "1". | |
270 | */ | |
271 | ||
272 | int __tns_atomic_acquire(atomic_t *); | |
273 | void __tns_atomic_release(atomic_t *p, int v); | |
274 | ||
275 | static inline void tns_atomic_set(atomic_t *v, int i) | |
276 | { | |
277 | __tns_atomic_acquire(v); | |
278 | __tns_atomic_release(v, i); | |
279 | } | |
280 | ||
281 | static inline int tns_atomic_cmpxchg(atomic_t *v, int o, int n) | |
282 | { | |
283 | int ret = __tns_atomic_acquire(v); | |
284 | __tns_atomic_release(v, (ret == o) ? n : ret); | |
285 | return ret; | |
286 | } | |
287 | ||
288 | static inline int tns_atomic_xchg(atomic_t *v, int n) | |
289 | { | |
290 | int ret = __tns_atomic_acquire(v); | |
291 | __tns_atomic_release(v, n); | |
292 | return ret; | |
293 | } | |
294 | ||
295 | #endif /* !__ASSEMBLY__ */ | |
296 | ||
297 | /* | |
298 | * Internal definitions only beyond this point. | |
299 | */ | |
300 | ||
301 | #define ATOMIC_LOCKS_FOUND_VIA_TABLE() \ | |
302 | (!CHIP_HAS_CBOX_HOME_MAP() && defined(CONFIG_SMP)) | |
303 | ||
304 | #if ATOMIC_LOCKS_FOUND_VIA_TABLE() | |
305 | ||
306 | /* Number of entries in atomic_lock_ptr[]. */ | |
307 | #define ATOMIC_HASH_L1_SHIFT 6 | |
308 | #define ATOMIC_HASH_L1_SIZE (1 << ATOMIC_HASH_L1_SHIFT) | |
309 | ||
310 | /* Number of locks in each struct pointed to by atomic_lock_ptr[]. */ | |
311 | #define ATOMIC_HASH_L2_SHIFT (CHIP_L2_LOG_LINE_SIZE() - 2) | |
312 | #define ATOMIC_HASH_L2_SIZE (1 << ATOMIC_HASH_L2_SHIFT) | |
313 | ||
314 | #else /* ATOMIC_LOCKS_FOUND_VIA_TABLE() */ | |
315 | ||
316 | /* | |
317 | * Number of atomic locks in atomic_locks[]. Must be a power of two. | |
318 | * There is no reason for more than PAGE_SIZE / 8 entries, since that | |
319 | * is the maximum number of pointer bits we can use to index this. | |
320 | * And we cannot have more than PAGE_SIZE / 4, since this has to | |
321 | * fit on a single page and each entry takes 4 bytes. | |
322 | */ | |
323 | #define ATOMIC_HASH_SHIFT (PAGE_SHIFT - 3) | |
324 | #define ATOMIC_HASH_SIZE (1 << ATOMIC_HASH_SHIFT) | |
325 | ||
326 | #ifndef __ASSEMBLY__ | |
327 | extern int atomic_locks[]; | |
328 | #endif | |
329 | ||
330 | #endif /* ATOMIC_LOCKS_FOUND_VIA_TABLE() */ | |
331 | ||
332 | /* | |
333 | * All the code that may fault while holding an atomic lock must | |
334 | * place the pointer to the lock in ATOMIC_LOCK_REG so the fault code | |
335 | * can correctly release and reacquire the lock. Note that we | |
336 | * mention the register number in a comment in "lib/atomic_asm.S" to help | |
337 | * assembly coders from using this register by mistake, so if it | |
338 | * is changed here, change that comment as well. | |
339 | */ | |
340 | #define ATOMIC_LOCK_REG 20 | |
341 | #define ATOMIC_LOCK_REG_NAME r20 | |
342 | ||
343 | #ifndef __ASSEMBLY__ | |
344 | /* Called from setup to initialize a hash table to point to per_cpu locks. */ | |
345 | void __init_atomic_per_cpu(void); | |
346 | ||
347 | #ifdef CONFIG_SMP | |
348 | /* Support releasing the atomic lock in do_page_fault_ics(). */ | |
349 | void __atomic_fault_unlock(int *lock_ptr); | |
350 | #endif | |
0707ad30 CM |
351 | |
352 | /* Private helper routines in lib/atomic_asm_32.S */ | |
353 | extern struct __get_user __atomic_cmpxchg(volatile int *p, | |
354 | int *lock, int o, int n); | |
355 | extern struct __get_user __atomic_xchg(volatile int *p, int *lock, int n); | |
356 | extern struct __get_user __atomic_xchg_add(volatile int *p, int *lock, int n); | |
357 | extern struct __get_user __atomic_xchg_add_unless(volatile int *p, | |
358 | int *lock, int o, int n); | |
359 | extern struct __get_user __atomic_or(volatile int *p, int *lock, int n); | |
360 | extern struct __get_user __atomic_andn(volatile int *p, int *lock, int n); | |
361 | extern struct __get_user __atomic_xor(volatile int *p, int *lock, int n); | |
362 | extern u64 __atomic64_cmpxchg(volatile u64 *p, int *lock, u64 o, u64 n); | |
363 | extern u64 __atomic64_xchg(volatile u64 *p, int *lock, u64 n); | |
364 | extern u64 __atomic64_xchg_add(volatile u64 *p, int *lock, u64 n); | |
365 | extern u64 __atomic64_xchg_add_unless(volatile u64 *p, | |
366 | int *lock, u64 o, u64 n); | |
367 | ||
867e359b CM |
368 | #endif /* !__ASSEMBLY__ */ |
369 | ||
370 | #endif /* _ASM_TILE_ATOMIC_32_H */ |