]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #ifndef _ASM_IA64_ATOMIC_H |
2 | #define _ASM_IA64_ATOMIC_H | |
3 | ||
4 | /* | |
5 | * Atomic operations that C can't guarantee us. Useful for | |
6 | * resource counting etc.. | |
7 | * | |
8 | * NOTE: don't mess with the types below! The "unsigned long" and | |
9 | * "int" types were carefully placed so as to ensure proper operation | |
10 | * of the macros. | |
11 | * | |
12 | * Copyright (C) 1998, 1999, 2002-2003 Hewlett-Packard Co | |
13 | * David Mosberger-Tang <davidm@hpl.hp.com> | |
14 | */ | |
15 | #include <linux/types.h> | |
16 | ||
17 | #include <asm/intrinsics.h> | |
18 | ||
19 | /* | |
20 | * On IA-64, counter must always be volatile to ensure that that the | |
21 | * memory accesses are ordered. | |
22 | */ | |
23 | typedef struct { volatile __s32 counter; } atomic_t; | |
24 | typedef struct { volatile __s64 counter; } atomic64_t; | |
25 | ||
26 | #define ATOMIC_INIT(i) ((atomic_t) { (i) }) | |
27 | #define ATOMIC64_INIT(i) ((atomic64_t) { (i) }) | |
28 | ||
29 | #define atomic_read(v) ((v)->counter) | |
30 | #define atomic64_read(v) ((v)->counter) | |
31 | ||
32 | #define atomic_set(v,i) (((v)->counter) = (i)) | |
33 | #define atomic64_set(v,i) (((v)->counter) = (i)) | |
34 | ||
35 | static __inline__ int | |
36 | ia64_atomic_add (int i, atomic_t *v) | |
37 | { | |
38 | __s32 old, new; | |
39 | CMPXCHG_BUGCHECK_DECL | |
40 | ||
41 | do { | |
42 | CMPXCHG_BUGCHECK(v); | |
43 | old = atomic_read(v); | |
44 | new = old + i; | |
45 | } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic_t)) != old); | |
46 | return new; | |
47 | } | |
48 | ||
49 | static __inline__ int | |
50 | ia64_atomic64_add (__s64 i, atomic64_t *v) | |
51 | { | |
52 | __s64 old, new; | |
53 | CMPXCHG_BUGCHECK_DECL | |
54 | ||
55 | do { | |
56 | CMPXCHG_BUGCHECK(v); | |
57 | old = atomic_read(v); | |
58 | new = old + i; | |
59 | } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic64_t)) != old); | |
60 | return new; | |
61 | } | |
62 | ||
63 | static __inline__ int | |
64 | ia64_atomic_sub (int i, atomic_t *v) | |
65 | { | |
66 | __s32 old, new; | |
67 | CMPXCHG_BUGCHECK_DECL | |
68 | ||
69 | do { | |
70 | CMPXCHG_BUGCHECK(v); | |
71 | old = atomic_read(v); | |
72 | new = old - i; | |
73 | } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic_t)) != old); | |
74 | return new; | |
75 | } | |
76 | ||
77 | static __inline__ int | |
78 | ia64_atomic64_sub (__s64 i, atomic64_t *v) | |
79 | { | |
80 | __s64 old, new; | |
81 | CMPXCHG_BUGCHECK_DECL | |
82 | ||
83 | do { | |
84 | CMPXCHG_BUGCHECK(v); | |
85 | old = atomic_read(v); | |
86 | new = old - i; | |
87 | } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic64_t)) != old); | |
88 | return new; | |
89 | } | |
90 | ||
4a6dae6d NP |
91 | #define atomic_cmpxchg(v, old, new) ((int)cmpxchg(&((v)->counter), old, new)) |
92 | ||
1da177e4 LT |
93 | #define atomic_add_return(i,v) \ |
94 | ({ \ | |
95 | int __ia64_aar_i = (i); \ | |
96 | (__builtin_constant_p(i) \ | |
97 | && ( (__ia64_aar_i == 1) || (__ia64_aar_i == 4) \ | |
98 | || (__ia64_aar_i == 8) || (__ia64_aar_i == 16) \ | |
99 | || (__ia64_aar_i == -1) || (__ia64_aar_i == -4) \ | |
100 | || (__ia64_aar_i == -8) || (__ia64_aar_i == -16))) \ | |
101 | ? ia64_fetch_and_add(__ia64_aar_i, &(v)->counter) \ | |
102 | : ia64_atomic_add(__ia64_aar_i, v); \ | |
103 | }) | |
104 | ||
105 | #define atomic64_add_return(i,v) \ | |
106 | ({ \ | |
107 | long __ia64_aar_i = (i); \ | |
108 | (__builtin_constant_p(i) \ | |
109 | && ( (__ia64_aar_i == 1) || (__ia64_aar_i == 4) \ | |
110 | || (__ia64_aar_i == 8) || (__ia64_aar_i == 16) \ | |
111 | || (__ia64_aar_i == -1) || (__ia64_aar_i == -4) \ | |
112 | || (__ia64_aar_i == -8) || (__ia64_aar_i == -16))) \ | |
113 | ? ia64_fetch_and_add(__ia64_aar_i, &(v)->counter) \ | |
114 | : ia64_atomic64_add(__ia64_aar_i, v); \ | |
115 | }) | |
116 | ||
117 | /* | |
118 | * Atomically add I to V and return TRUE if the resulting value is | |
119 | * negative. | |
120 | */ | |
121 | static __inline__ int | |
122 | atomic_add_negative (int i, atomic_t *v) | |
123 | { | |
124 | return atomic_add_return(i, v) < 0; | |
125 | } | |
126 | ||
127 | static __inline__ int | |
128 | atomic64_add_negative (__s64 i, atomic64_t *v) | |
129 | { | |
130 | return atomic64_add_return(i, v) < 0; | |
131 | } | |
132 | ||
133 | #define atomic_sub_return(i,v) \ | |
134 | ({ \ | |
135 | int __ia64_asr_i = (i); \ | |
136 | (__builtin_constant_p(i) \ | |
137 | && ( (__ia64_asr_i == 1) || (__ia64_asr_i == 4) \ | |
138 | || (__ia64_asr_i == 8) || (__ia64_asr_i == 16) \ | |
139 | || (__ia64_asr_i == -1) || (__ia64_asr_i == -4) \ | |
140 | || (__ia64_asr_i == -8) || (__ia64_asr_i == -16))) \ | |
141 | ? ia64_fetch_and_add(-__ia64_asr_i, &(v)->counter) \ | |
142 | : ia64_atomic_sub(__ia64_asr_i, v); \ | |
143 | }) | |
144 | ||
145 | #define atomic64_sub_return(i,v) \ | |
146 | ({ \ | |
147 | long __ia64_asr_i = (i); \ | |
148 | (__builtin_constant_p(i) \ | |
149 | && ( (__ia64_asr_i == 1) || (__ia64_asr_i == 4) \ | |
150 | || (__ia64_asr_i == 8) || (__ia64_asr_i == 16) \ | |
151 | || (__ia64_asr_i == -1) || (__ia64_asr_i == -4) \ | |
152 | || (__ia64_asr_i == -8) || (__ia64_asr_i == -16))) \ | |
153 | ? ia64_fetch_and_add(-__ia64_asr_i, &(v)->counter) \ | |
154 | : ia64_atomic64_sub(__ia64_asr_i, v); \ | |
155 | }) | |
156 | ||
157 | #define atomic_dec_return(v) atomic_sub_return(1, (v)) | |
158 | #define atomic_inc_return(v) atomic_add_return(1, (v)) | |
159 | #define atomic64_dec_return(v) atomic64_sub_return(1, (v)) | |
160 | #define atomic64_inc_return(v) atomic64_add_return(1, (v)) | |
161 | ||
162 | #define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0) | |
163 | #define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0) | |
164 | #define atomic_inc_and_test(v) (atomic_add_return(1, (v)) == 0) | |
165 | #define atomic64_sub_and_test(i,v) (atomic64_sub_return((i), (v)) == 0) | |
166 | #define atomic64_dec_and_test(v) (atomic64_sub_return(1, (v)) == 0) | |
167 | #define atomic64_inc_and_test(v) (atomic64_add_return(1, (v)) == 0) | |
168 | ||
169 | #define atomic_add(i,v) atomic_add_return((i), (v)) | |
170 | #define atomic_sub(i,v) atomic_sub_return((i), (v)) | |
171 | #define atomic_inc(v) atomic_add(1, (v)) | |
172 | #define atomic_dec(v) atomic_sub(1, (v)) | |
173 | ||
174 | #define atomic64_add(i,v) atomic64_add_return((i), (v)) | |
175 | #define atomic64_sub(i,v) atomic64_sub_return((i), (v)) | |
176 | #define atomic64_inc(v) atomic64_add(1, (v)) | |
177 | #define atomic64_dec(v) atomic64_sub(1, (v)) | |
178 | ||
179 | /* Atomic operations are already serializing */ | |
180 | #define smp_mb__before_atomic_dec() barrier() | |
181 | #define smp_mb__after_atomic_dec() barrier() | |
182 | #define smp_mb__before_atomic_inc() barrier() | |
183 | #define smp_mb__after_atomic_inc() barrier() | |
184 | ||
185 | #endif /* _ASM_IA64_ATOMIC_H */ |