]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - arch/parisc/include/asm/atomic.h
Merge branch 'arch-frv' into no-rebases
[mirror_ubuntu-artful-kernel.git] / arch / parisc / include / asm / atomic.h
CommitLineData
2e13b31e
KM
1/* Copyright (C) 2000 Philipp Rumpf <prumpf@tux.org>
2 * Copyright (C) 2006 Kyle McMartin <kyle@parisc-linux.org>
3 */
4
1da177e4
LT
5#ifndef _ASM_PARISC_ATOMIC_H_
6#define _ASM_PARISC_ATOMIC_H_
7
2e13b31e 8#include <linux/types.h>
9e5228ce 9#include <asm/cmpxchg.h>
1da177e4
LT
10
11/*
12 * Atomic operations that C can't guarantee us. Useful for
13 * resource counting etc..
14 *
15 * And probably incredibly slow on parisc. OTOH, we don't
16 * have to write any serious assembly. prumpf
17 */
18
19#ifdef CONFIG_SMP
20#include <asm/spinlock.h>
21#include <asm/cache.h> /* we use L1_CACHE_BYTES */
22
23/* Use an array of spinlocks for our atomic_ts.
24 * Hash function to index into a different SPINLOCK.
25 * Since "a" is usually an address, use one spinlock per cacheline.
26 */
27# define ATOMIC_HASH_SIZE 4
47e669ce 28# define ATOMIC_HASH(a) (&(__atomic_hash[ (((unsigned long) (a))/L1_CACHE_BYTES) & (ATOMIC_HASH_SIZE-1) ]))
1da177e4 29
445c8951 30extern arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned;
1da177e4 31
fb1c8f93 32/* Can't use raw_spin_lock_irq because of #include problems, so
1da177e4
LT
33 * this is the substitute */
34#define _atomic_spin_lock_irqsave(l,f) do { \
445c8951 35 arch_spinlock_t *s = ATOMIC_HASH(l); \
1da177e4 36 local_irq_save(f); \
0199c4e6 37 arch_spin_lock(s); \
1da177e4
LT
38} while(0)
39
40#define _atomic_spin_unlock_irqrestore(l,f) do { \
445c8951 41 arch_spinlock_t *s = ATOMIC_HASH(l); \
0199c4e6 42 arch_spin_unlock(s); \
1da177e4
LT
43 local_irq_restore(f); \
44} while(0)
45
46
47#else
48# define _atomic_spin_lock_irqsave(l,f) do { local_irq_save(f); } while (0)
49# define _atomic_spin_unlock_irqrestore(l,f) do { local_irq_restore(f); } while (0)
50#endif
51
ea435467
MW
52/*
53 * Note that we need not lock read accesses - aligned word writes/reads
54 * are atomic, so a reader never sees inconsistent values.
2e13b31e 55 */
1da177e4 56
1da177e4
LT
57/* It's possible to reduce all atomic operations to either
58 * __atomic_add_return, atomic_set and atomic_read (the latter
59 * is there only for consistency).
60 */
61
62static __inline__ int __atomic_add_return(int i, atomic_t *v)
63{
64 int ret;
65 unsigned long flags;
66 _atomic_spin_lock_irqsave(v, flags);
67
68 ret = (v->counter += i);
69
70 _atomic_spin_unlock_irqrestore(v, flags);
71 return ret;
72}
73
74static __inline__ void atomic_set(atomic_t *v, int i)
75{
76 unsigned long flags;
77 _atomic_spin_lock_irqsave(v, flags);
78
79 v->counter = i;
80
81 _atomic_spin_unlock_irqrestore(v, flags);
82}
83
84static __inline__ int atomic_read(const atomic_t *v)
85{
f3d46f9d 86 return (*(volatile int *)&(v)->counter);
1da177e4
LT
87}
88
89/* exported interface */
8ffe9d0b 90#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
ffbf670f 91#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
1da177e4 92
8426e1f6 93/**
f24219b4 94 * __atomic_add_unless - add unless the number is a given value
8426e1f6
NP
95 * @v: pointer of type atomic_t
96 * @a: the amount to add to v...
97 * @u: ...unless v is equal to u.
98 *
99 * Atomically adds @a to @v, so long as it was not @u.
f24219b4 100 * Returns the old value of @v.
8426e1f6 101 */
f24219b4 102static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
2856f5e3
MD
103{
104 int c, old;
105 c = atomic_read(v);
106 for (;;) {
107 if (unlikely(c == (u)))
108 break;
109 old = atomic_cmpxchg((v), c, c + (a));
110 if (likely(old == c))
111 break;
112 c = old;
113 }
f24219b4 114 return c;
2856f5e3
MD
115}
116
8426e1f6 117
692c14a5
BB
118#define atomic_add(i,v) ((void)(__atomic_add_return( (i),(v))))
119#define atomic_sub(i,v) ((void)(__atomic_add_return(-(i),(v))))
1da177e4
LT
120#define atomic_inc(v) ((void)(__atomic_add_return( 1,(v))))
121#define atomic_dec(v) ((void)(__atomic_add_return( -1,(v))))
122
692c14a5
BB
123#define atomic_add_return(i,v) (__atomic_add_return( (i),(v)))
124#define atomic_sub_return(i,v) (__atomic_add_return(-(i),(v)))
1da177e4
LT
125#define atomic_inc_return(v) (__atomic_add_return( 1,(v)))
126#define atomic_dec_return(v) (__atomic_add_return( -1,(v)))
127
128#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
129
130/*
131 * atomic_inc_and_test - increment and test
132 * @v: pointer of type atomic_t
133 *
134 * Atomically increments @v by 1
135 * and returns true if the result is zero, or false for all
136 * other cases.
137 */
138#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
139
140#define atomic_dec_and_test(v) (atomic_dec_return(v) == 0)
141
4da9f131
KM
142#define atomic_sub_and_test(i,v) (atomic_sub_return((i),(v)) == 0)
143
bba3d8c3 144#define ATOMIC_INIT(i) { (i) }
1da177e4
LT
145
146#define smp_mb__before_atomic_dec() smp_mb()
147#define smp_mb__after_atomic_dec() smp_mb()
148#define smp_mb__before_atomic_inc() smp_mb()
149#define smp_mb__after_atomic_inc() smp_mb()
150
513e7ecd 151#ifdef CONFIG_64BIT
2e13b31e 152
bba3d8c3 153#define ATOMIC64_INIT(i) { (i) }
2e13b31e 154
548c210f 155static __inline__ s64
2e13b31e
KM
156__atomic64_add_return(s64 i, atomic64_t *v)
157{
548c210f 158 s64 ret;
2e13b31e
KM
159 unsigned long flags;
160 _atomic_spin_lock_irqsave(v, flags);
161
162 ret = (v->counter += i);
163
164 _atomic_spin_unlock_irqrestore(v, flags);
165 return ret;
166}
167
168static __inline__ void
169atomic64_set(atomic64_t *v, s64 i)
170{
171 unsigned long flags;
172 _atomic_spin_lock_irqsave(v, flags);
173
174 v->counter = i;
175
176 _atomic_spin_unlock_irqrestore(v, flags);
177}
178
179static __inline__ s64
180atomic64_read(const atomic64_t *v)
181{
f3d46f9d 182 return (*(volatile long *)&(v)->counter);
2e13b31e
KM
183}
184
47e669ce
JB
185#define atomic64_add(i,v) ((void)(__atomic64_add_return( ((s64)(i)),(v))))
186#define atomic64_sub(i,v) ((void)(__atomic64_add_return(-((s64)(i)),(v))))
2e13b31e
KM
187#define atomic64_inc(v) ((void)(__atomic64_add_return( 1,(v))))
188#define atomic64_dec(v) ((void)(__atomic64_add_return( -1,(v))))
189
47e669ce
JB
190#define atomic64_add_return(i,v) (__atomic64_add_return( ((s64)(i)),(v)))
191#define atomic64_sub_return(i,v) (__atomic64_add_return(-((s64)(i)),(v)))
2e13b31e
KM
192#define atomic64_inc_return(v) (__atomic64_add_return( 1,(v)))
193#define atomic64_dec_return(v) (__atomic64_add_return( -1,(v)))
194
195#define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
196
197#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
198#define atomic64_dec_and_test(v) (atomic64_dec_return(v) == 0)
4da9f131 199#define atomic64_sub_and_test(i,v) (atomic64_sub_return((i),(v)) == 0)
2e13b31e 200
8ffe9d0b
MD
201/* exported interface */
202#define atomic64_cmpxchg(v, o, n) \
203 ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
204#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
205
206/**
207 * atomic64_add_unless - add unless the number is a given value
208 * @v: pointer of type atomic64_t
209 * @a: the amount to add to v...
210 * @u: ...unless v is equal to u.
211 *
212 * Atomically adds @a to @v, so long as it was not @u.
f24219b4 213 * Returns the old value of @v.
8ffe9d0b 214 */
2856f5e3
MD
215static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
216{
217 long c, old;
218 c = atomic64_read(v);
219 for (;;) {
220 if (unlikely(c == (u)))
221 break;
222 old = atomic64_cmpxchg((v), c, c + (a));
223 if (likely(old == c))
224 break;
225 c = old;
226 }
227 return c != (u);
228}
229
8ffe9d0b
MD
230#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
231
64daa443 232#endif /* !CONFIG_64BIT */
2e13b31e 233
2e13b31e
KM
234
235#endif /* _ASM_PARISC_ATOMIC_H_ */