]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Generic implementation of 64-bit atomics using spinlocks, | |
3 | * useful on processors that don't have 64-bit atomic instructions. | |
4 | * | |
5 | * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com> | |
6 | * | |
7 | * This program is free software; you can redistribute it and/or | |
8 | * modify it under the terms of the GNU General Public License | |
9 | * as published by the Free Software Foundation; either version | |
10 | * 2 of the License, or (at your option) any later version. | |
11 | */ | |
12 | #include <linux/types.h> | |
13 | #include <linux/cache.h> | |
14 | #include <linux/spinlock.h> | |
15 | #include <linux/init.h> | |
16 | #include <linux/export.h> | |
17 | #include <linux/atomic.h> | |
18 | ||
19 | /* | |
20 | * We use a hashed array of spinlocks to provide exclusive access | |
21 | * to each atomic64_t variable. Since this is expected to used on | |
22 | * systems with small numbers of CPUs (<= 4 or so), we use a | |
23 | * relatively small array of 16 spinlocks to avoid wasting too much | |
24 | * memory on the spinlock array. | |
25 | */ | |
26 | #define NR_LOCKS 16 | |
27 | ||
28 | /* | |
29 | * Ensure each lock is in a separate cacheline. | |
30 | */ | |
31 | static union { | |
32 | raw_spinlock_t lock; | |
33 | char pad[L1_CACHE_BYTES]; | |
34 | } atomic64_lock[NR_LOCKS] __cacheline_aligned_in_smp = { | |
35 | [0 ... (NR_LOCKS - 1)] = { | |
36 | .lock = __RAW_SPIN_LOCK_UNLOCKED(atomic64_lock.lock), | |
37 | }, | |
38 | }; | |
39 | ||
40 | static inline raw_spinlock_t *lock_addr(const atomic64_t *v) | |
41 | { | |
42 | unsigned long addr = (unsigned long) v; | |
43 | ||
44 | addr >>= L1_CACHE_SHIFT; | |
45 | addr ^= (addr >> 8) ^ (addr >> 16); | |
46 | return &atomic64_lock[addr & (NR_LOCKS - 1)].lock; | |
47 | } | |
48 | ||
49 | long long atomic64_read(const atomic64_t *v) | |
50 | { | |
51 | unsigned long flags; | |
52 | raw_spinlock_t *lock = lock_addr(v); | |
53 | long long val; | |
54 | ||
55 | raw_spin_lock_irqsave(lock, flags); | |
56 | val = v->counter; | |
57 | raw_spin_unlock_irqrestore(lock, flags); | |
58 | return val; | |
59 | } | |
60 | EXPORT_SYMBOL(atomic64_read); | |
61 | ||
62 | void atomic64_set(atomic64_t *v, long long i) | |
63 | { | |
64 | unsigned long flags; | |
65 | raw_spinlock_t *lock = lock_addr(v); | |
66 | ||
67 | raw_spin_lock_irqsave(lock, flags); | |
68 | v->counter = i; | |
69 | raw_spin_unlock_irqrestore(lock, flags); | |
70 | } | |
71 | EXPORT_SYMBOL(atomic64_set); | |
72 | ||
73 | #define ATOMIC64_OP(op, c_op) \ | |
74 | void atomic64_##op(long long a, atomic64_t *v) \ | |
75 | { \ | |
76 | unsigned long flags; \ | |
77 | raw_spinlock_t *lock = lock_addr(v); \ | |
78 | \ | |
79 | raw_spin_lock_irqsave(lock, flags); \ | |
80 | v->counter c_op a; \ | |
81 | raw_spin_unlock_irqrestore(lock, flags); \ | |
82 | } \ | |
83 | EXPORT_SYMBOL(atomic64_##op); | |
84 | ||
85 | #define ATOMIC64_OP_RETURN(op, c_op) \ | |
86 | long long atomic64_##op##_return(long long a, atomic64_t *v) \ | |
87 | { \ | |
88 | unsigned long flags; \ | |
89 | raw_spinlock_t *lock = lock_addr(v); \ | |
90 | long long val; \ | |
91 | \ | |
92 | raw_spin_lock_irqsave(lock, flags); \ | |
93 | val = (v->counter c_op a); \ | |
94 | raw_spin_unlock_irqrestore(lock, flags); \ | |
95 | return val; \ | |
96 | } \ | |
97 | EXPORT_SYMBOL(atomic64_##op##_return); | |
98 | ||
99 | #define ATOMIC64_OPS(op, c_op) \ | |
100 | ATOMIC64_OP(op, c_op) \ | |
101 | ATOMIC64_OP_RETURN(op, c_op) | |
102 | ||
103 | ATOMIC64_OPS(add, +=) | |
104 | ATOMIC64_OPS(sub, -=) | |
105 | ATOMIC64_OP(and, &=) | |
106 | ATOMIC64_OP(or, |=) | |
107 | ATOMIC64_OP(xor, ^=) | |
108 | ||
109 | #undef ATOMIC64_OPS | |
110 | #undef ATOMIC64_OP_RETURN | |
111 | #undef ATOMIC64_OP | |
112 | ||
113 | long long atomic64_dec_if_positive(atomic64_t *v) | |
114 | { | |
115 | unsigned long flags; | |
116 | raw_spinlock_t *lock = lock_addr(v); | |
117 | long long val; | |
118 | ||
119 | raw_spin_lock_irqsave(lock, flags); | |
120 | val = v->counter - 1; | |
121 | if (val >= 0) | |
122 | v->counter = val; | |
123 | raw_spin_unlock_irqrestore(lock, flags); | |
124 | return val; | |
125 | } | |
126 | EXPORT_SYMBOL(atomic64_dec_if_positive); | |
127 | ||
128 | long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n) | |
129 | { | |
130 | unsigned long flags; | |
131 | raw_spinlock_t *lock = lock_addr(v); | |
132 | long long val; | |
133 | ||
134 | raw_spin_lock_irqsave(lock, flags); | |
135 | val = v->counter; | |
136 | if (val == o) | |
137 | v->counter = n; | |
138 | raw_spin_unlock_irqrestore(lock, flags); | |
139 | return val; | |
140 | } | |
141 | EXPORT_SYMBOL(atomic64_cmpxchg); | |
142 | ||
143 | long long atomic64_xchg(atomic64_t *v, long long new) | |
144 | { | |
145 | unsigned long flags; | |
146 | raw_spinlock_t *lock = lock_addr(v); | |
147 | long long val; | |
148 | ||
149 | raw_spin_lock_irqsave(lock, flags); | |
150 | val = v->counter; | |
151 | v->counter = new; | |
152 | raw_spin_unlock_irqrestore(lock, flags); | |
153 | return val; | |
154 | } | |
155 | EXPORT_SYMBOL(atomic64_xchg); | |
156 | ||
157 | int atomic64_add_unless(atomic64_t *v, long long a, long long u) | |
158 | { | |
159 | unsigned long flags; | |
160 | raw_spinlock_t *lock = lock_addr(v); | |
161 | int ret = 0; | |
162 | ||
163 | raw_spin_lock_irqsave(lock, flags); | |
164 | if (v->counter != u) { | |
165 | v->counter += a; | |
166 | ret = 1; | |
167 | } | |
168 | raw_spin_unlock_irqrestore(lock, flags); | |
169 | return ret; | |
170 | } | |
171 | EXPORT_SYMBOL(atomic64_add_unless); |