]>
Commit | Line | Data |
---|---|---|
09d4e0ed PM |
1 | /* |
2 | * Generic implementation of 64-bit atomics using spinlocks, | |
3 | * useful on processors that don't have 64-bit atomic instructions. | |
4 | * | |
5 | * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com> | |
6 | * | |
7 | * This program is free software; you can redistribute it and/or | |
8 | * modify it under the terms of the GNU General Public License | |
9 | * as published by the Free Software Foundation; either version | |
10 | * 2 of the License, or (at your option) any later version. | |
11 | */ | |
12 | #include <linux/types.h> | |
13 | #include <linux/cache.h> | |
14 | #include <linux/spinlock.h> | |
15 | #include <linux/init.h> | |
16 | #include <asm/atomic.h> | |
17 | ||
18 | /* | |
19 | * We use a hashed array of spinlocks to provide exclusive access | |
20 | * to each atomic64_t variable. Since this is expected to used on | |
21 | * systems with small numbers of CPUs (<= 4 or so), we use a | |
22 | * relatively small array of 16 spinlocks to avoid wasting too much | |
23 | * memory on the spinlock array. | |
24 | */ | |
25 | #define NR_LOCKS 16 | |
26 | ||
27 | /* | |
28 | * Ensure each lock is in a separate cacheline. | |
29 | */ | |
30 | static union { | |
31 | spinlock_t lock; | |
32 | char pad[L1_CACHE_BYTES]; | |
33 | } atomic64_lock[NR_LOCKS] __cacheline_aligned_in_smp; | |
34 | ||
35 | static inline spinlock_t *lock_addr(const atomic64_t *v) | |
36 | { | |
37 | unsigned long addr = (unsigned long) v; | |
38 | ||
39 | addr >>= L1_CACHE_SHIFT; | |
40 | addr ^= (addr >> 8) ^ (addr >> 16); | |
41 | return &atomic64_lock[addr & (NR_LOCKS - 1)].lock; | |
42 | } | |
43 | ||
44 | long long atomic64_read(const atomic64_t *v) | |
45 | { | |
46 | unsigned long flags; | |
47 | spinlock_t *lock = lock_addr(v); | |
48 | long long val; | |
49 | ||
50 | spin_lock_irqsave(lock, flags); | |
51 | val = v->counter; | |
52 | spin_unlock_irqrestore(lock, flags); | |
53 | return val; | |
54 | } | |
55 | ||
56 | void atomic64_set(atomic64_t *v, long long i) | |
57 | { | |
58 | unsigned long flags; | |
59 | spinlock_t *lock = lock_addr(v); | |
60 | ||
61 | spin_lock_irqsave(lock, flags); | |
62 | v->counter = i; | |
63 | spin_unlock_irqrestore(lock, flags); | |
64 | } | |
65 | ||
66 | void atomic64_add(long long a, atomic64_t *v) | |
67 | { | |
68 | unsigned long flags; | |
69 | spinlock_t *lock = lock_addr(v); | |
70 | ||
71 | spin_lock_irqsave(lock, flags); | |
72 | v->counter += a; | |
73 | spin_unlock_irqrestore(lock, flags); | |
74 | } | |
75 | ||
76 | long long atomic64_add_return(long long a, atomic64_t *v) | |
77 | { | |
78 | unsigned long flags; | |
79 | spinlock_t *lock = lock_addr(v); | |
80 | long long val; | |
81 | ||
82 | spin_lock_irqsave(lock, flags); | |
83 | val = v->counter += a; | |
84 | spin_unlock_irqrestore(lock, flags); | |
85 | return val; | |
86 | } | |
87 | ||
88 | void atomic64_sub(long long a, atomic64_t *v) | |
89 | { | |
90 | unsigned long flags; | |
91 | spinlock_t *lock = lock_addr(v); | |
92 | ||
93 | spin_lock_irqsave(lock, flags); | |
94 | v->counter -= a; | |
95 | spin_unlock_irqrestore(lock, flags); | |
96 | } | |
97 | ||
98 | long long atomic64_sub_return(long long a, atomic64_t *v) | |
99 | { | |
100 | unsigned long flags; | |
101 | spinlock_t *lock = lock_addr(v); | |
102 | long long val; | |
103 | ||
104 | spin_lock_irqsave(lock, flags); | |
105 | val = v->counter -= a; | |
106 | spin_unlock_irqrestore(lock, flags); | |
107 | return val; | |
108 | } | |
109 | ||
110 | long long atomic64_dec_if_positive(atomic64_t *v) | |
111 | { | |
112 | unsigned long flags; | |
113 | spinlock_t *lock = lock_addr(v); | |
114 | long long val; | |
115 | ||
116 | spin_lock_irqsave(lock, flags); | |
117 | val = v->counter - 1; | |
118 | if (val >= 0) | |
119 | v->counter = val; | |
120 | spin_unlock_irqrestore(lock, flags); | |
121 | return val; | |
122 | } | |
123 | ||
124 | long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n) | |
125 | { | |
126 | unsigned long flags; | |
127 | spinlock_t *lock = lock_addr(v); | |
128 | long long val; | |
129 | ||
130 | spin_lock_irqsave(lock, flags); | |
131 | val = v->counter; | |
132 | if (val == o) | |
133 | v->counter = n; | |
134 | spin_unlock_irqrestore(lock, flags); | |
135 | return val; | |
136 | } | |
137 | ||
138 | long long atomic64_xchg(atomic64_t *v, long long new) | |
139 | { | |
140 | unsigned long flags; | |
141 | spinlock_t *lock = lock_addr(v); | |
142 | long long val; | |
143 | ||
144 | spin_lock_irqsave(lock, flags); | |
145 | val = v->counter; | |
146 | v->counter = new; | |
147 | spin_unlock_irqrestore(lock, flags); | |
148 | return val; | |
149 | } | |
150 | ||
151 | int atomic64_add_unless(atomic64_t *v, long long a, long long u) | |
152 | { | |
153 | unsigned long flags; | |
154 | spinlock_t *lock = lock_addr(v); | |
155 | int ret = 1; | |
156 | ||
157 | spin_lock_irqsave(lock, flags); | |
158 | if (v->counter != u) { | |
159 | v->counter += a; | |
160 | ret = 0; | |
161 | } | |
162 | spin_unlock_irqrestore(lock, flags); | |
163 | return ret; | |
164 | } | |
165 | ||
166 | static int init_atomic64_lock(void) | |
167 | { | |
168 | int i; | |
169 | ||
170 | for (i = 0; i < NR_LOCKS; ++i) | |
171 | spin_lock_init(&atomic64_lock[i].lock); | |
172 | return 0; | |
173 | } | |
174 | ||
175 | pure_initcall(init_atomic64_lock); |