]> git.proxmox.com Git - mirror_ubuntu-kernels.git/blob - include/asm-ia64/atomic.h
Linux-2.6.12-rc2
[mirror_ubuntu-kernels.git] / include / asm-ia64 / atomic.h
1 #ifndef _ASM_IA64_ATOMIC_H
2 #define _ASM_IA64_ATOMIC_H
3
4 /*
5 * Atomic operations that C can't guarantee us. Useful for
6 * resource counting etc..
7 *
8 * NOTE: don't mess with the types below! The "unsigned long" and
9 * "int" types were carefully placed so as to ensure proper operation
10 * of the macros.
11 *
12 * Copyright (C) 1998, 1999, 2002-2003 Hewlett-Packard Co
13 * David Mosberger-Tang <davidm@hpl.hp.com>
14 */
15 #include <linux/types.h>
16
17 #include <asm/intrinsics.h>
18
19 /*
20 * On IA-64, counter must always be volatile to ensure that that the
21 * memory accesses are ordered.
22 */
23 typedef struct { volatile __s32 counter; } atomic_t;
24 typedef struct { volatile __s64 counter; } atomic64_t;
25
26 #define ATOMIC_INIT(i) ((atomic_t) { (i) })
27 #define ATOMIC64_INIT(i) ((atomic64_t) { (i) })
28
29 #define atomic_read(v) ((v)->counter)
30 #define atomic64_read(v) ((v)->counter)
31
32 #define atomic_set(v,i) (((v)->counter) = (i))
33 #define atomic64_set(v,i) (((v)->counter) = (i))
34
35 static __inline__ int
36 ia64_atomic_add (int i, atomic_t *v)
37 {
38 __s32 old, new;
39 CMPXCHG_BUGCHECK_DECL
40
41 do {
42 CMPXCHG_BUGCHECK(v);
43 old = atomic_read(v);
44 new = old + i;
45 } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic_t)) != old);
46 return new;
47 }
48
49 static __inline__ int
50 ia64_atomic64_add (__s64 i, atomic64_t *v)
51 {
52 __s64 old, new;
53 CMPXCHG_BUGCHECK_DECL
54
55 do {
56 CMPXCHG_BUGCHECK(v);
57 old = atomic_read(v);
58 new = old + i;
59 } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic64_t)) != old);
60 return new;
61 }
62
63 static __inline__ int
64 ia64_atomic_sub (int i, atomic_t *v)
65 {
66 __s32 old, new;
67 CMPXCHG_BUGCHECK_DECL
68
69 do {
70 CMPXCHG_BUGCHECK(v);
71 old = atomic_read(v);
72 new = old - i;
73 } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic_t)) != old);
74 return new;
75 }
76
77 static __inline__ int
78 ia64_atomic64_sub (__s64 i, atomic64_t *v)
79 {
80 __s64 old, new;
81 CMPXCHG_BUGCHECK_DECL
82
83 do {
84 CMPXCHG_BUGCHECK(v);
85 old = atomic_read(v);
86 new = old - i;
87 } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic64_t)) != old);
88 return new;
89 }
90
91 #define atomic_add_return(i,v) \
92 ({ \
93 int __ia64_aar_i = (i); \
94 (__builtin_constant_p(i) \
95 && ( (__ia64_aar_i == 1) || (__ia64_aar_i == 4) \
96 || (__ia64_aar_i == 8) || (__ia64_aar_i == 16) \
97 || (__ia64_aar_i == -1) || (__ia64_aar_i == -4) \
98 || (__ia64_aar_i == -8) || (__ia64_aar_i == -16))) \
99 ? ia64_fetch_and_add(__ia64_aar_i, &(v)->counter) \
100 : ia64_atomic_add(__ia64_aar_i, v); \
101 })
102
103 #define atomic64_add_return(i,v) \
104 ({ \
105 long __ia64_aar_i = (i); \
106 (__builtin_constant_p(i) \
107 && ( (__ia64_aar_i == 1) || (__ia64_aar_i == 4) \
108 || (__ia64_aar_i == 8) || (__ia64_aar_i == 16) \
109 || (__ia64_aar_i == -1) || (__ia64_aar_i == -4) \
110 || (__ia64_aar_i == -8) || (__ia64_aar_i == -16))) \
111 ? ia64_fetch_and_add(__ia64_aar_i, &(v)->counter) \
112 : ia64_atomic64_add(__ia64_aar_i, v); \
113 })
114
115 /*
116 * Atomically add I to V and return TRUE if the resulting value is
117 * negative.
118 */
119 static __inline__ int
120 atomic_add_negative (int i, atomic_t *v)
121 {
122 return atomic_add_return(i, v) < 0;
123 }
124
125 static __inline__ int
126 atomic64_add_negative (__s64 i, atomic64_t *v)
127 {
128 return atomic64_add_return(i, v) < 0;
129 }
130
131 #define atomic_sub_return(i,v) \
132 ({ \
133 int __ia64_asr_i = (i); \
134 (__builtin_constant_p(i) \
135 && ( (__ia64_asr_i == 1) || (__ia64_asr_i == 4) \
136 || (__ia64_asr_i == 8) || (__ia64_asr_i == 16) \
137 || (__ia64_asr_i == -1) || (__ia64_asr_i == -4) \
138 || (__ia64_asr_i == -8) || (__ia64_asr_i == -16))) \
139 ? ia64_fetch_and_add(-__ia64_asr_i, &(v)->counter) \
140 : ia64_atomic_sub(__ia64_asr_i, v); \
141 })
142
143 #define atomic64_sub_return(i,v) \
144 ({ \
145 long __ia64_asr_i = (i); \
146 (__builtin_constant_p(i) \
147 && ( (__ia64_asr_i == 1) || (__ia64_asr_i == 4) \
148 || (__ia64_asr_i == 8) || (__ia64_asr_i == 16) \
149 || (__ia64_asr_i == -1) || (__ia64_asr_i == -4) \
150 || (__ia64_asr_i == -8) || (__ia64_asr_i == -16))) \
151 ? ia64_fetch_and_add(-__ia64_asr_i, &(v)->counter) \
152 : ia64_atomic64_sub(__ia64_asr_i, v); \
153 })
154
155 #define atomic_dec_return(v) atomic_sub_return(1, (v))
156 #define atomic_inc_return(v) atomic_add_return(1, (v))
157 #define atomic64_dec_return(v) atomic64_sub_return(1, (v))
158 #define atomic64_inc_return(v) atomic64_add_return(1, (v))
159
160 #define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0)
161 #define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
162 #define atomic_inc_and_test(v) (atomic_add_return(1, (v)) == 0)
163 #define atomic64_sub_and_test(i,v) (atomic64_sub_return((i), (v)) == 0)
164 #define atomic64_dec_and_test(v) (atomic64_sub_return(1, (v)) == 0)
165 #define atomic64_inc_and_test(v) (atomic64_add_return(1, (v)) == 0)
166
167 #define atomic_add(i,v) atomic_add_return((i), (v))
168 #define atomic_sub(i,v) atomic_sub_return((i), (v))
169 #define atomic_inc(v) atomic_add(1, (v))
170 #define atomic_dec(v) atomic_sub(1, (v))
171
172 #define atomic64_add(i,v) atomic64_add_return((i), (v))
173 #define atomic64_sub(i,v) atomic64_sub_return((i), (v))
174 #define atomic64_inc(v) atomic64_add(1, (v))
175 #define atomic64_dec(v) atomic64_sub(1, (v))
176
177 /* Atomic operations are already serializing */
178 #define smp_mb__before_atomic_dec() barrier()
179 #define smp_mb__after_atomic_dec() barrier()
180 #define smp_mb__before_atomic_inc() barrier()
181 #define smp_mb__after_atomic_inc() barrier()
182
183 #endif /* _ASM_IA64_ATOMIC_H */