]>
Commit | Line | Data |
---|---|---|
18aecc2b CM |
1 | /* |
2 | * Copyright 2011 Tilera Corporation. All Rights Reserved. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or | |
5 | * modify it under the terms of the GNU General Public License | |
6 | * as published by the Free Software Foundation, version 2. | |
7 | * | |
8 | * This program is distributed in the hope that it will be useful, but | |
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | |
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | |
11 | * NON INFRINGEMENT. See the GNU General Public License for | |
12 | * more details. | |
13 | * | |
60063497 | 14 | * Do not include directly; use <linux/atomic.h>. |
18aecc2b CM |
15 | */ |
16 | ||
17 | #ifndef _ASM_TILE_ATOMIC_64_H | |
18 | #define _ASM_TILE_ATOMIC_64_H | |
19 | ||
20 | #ifndef __ASSEMBLY__ | |
21 | ||
bd119c69 | 22 | #include <asm/barrier.h> |
18aecc2b CM |
23 | #include <arch/spr_def.h> |
24 | ||
25 | /* First, the 32-bit atomic ops that are "real" on our 64-bit platform. */ | |
26 | ||
62e8a325 | 27 | #define atomic_set(v, i) WRITE_ONCE((v)->counter, (i)) |
18aecc2b CM |
28 | |
29 | /* | |
30 | * The smp_mb() operations throughout are to support the fact that | |
31 | * Linux requires memory barriers before and after the operation, | |
32 | * on any routine which updates memory and returns a value. | |
33 | */ | |
34 | ||
18aecc2b CM |
35 | static inline void atomic_add(int i, atomic_t *v) |
36 | { | |
37 | __insn_fetchadd4((void *)&v->counter, i); | |
38 | } | |
39 | ||
15384758 CM |
40 | /* |
41 | * Note a subtlety of the locking here. We are required to provide a | |
42 | * full memory barrier before and after the operation. However, we | |
43 | * only provide an explicit mb before the operation. After the | |
44 | * operation, we use barrier() to get a full mb for free, because: | |
45 | * | |
46 | * (1) The barrier directive to the compiler prohibits any instructions | |
47 | * being statically hoisted before the barrier; | |
48 | * (2) the microarchitecture will not issue any further instructions | |
49 | * until the fetchadd result is available for the "+ i" add instruction; | |
50 | * (3) the smb_mb before the fetchadd ensures that no other memory | |
51 | * operations are in flight at this point. | |
52 | */ | |
18aecc2b CM |
53 | static inline int atomic_add_return(int i, atomic_t *v) |
54 | { | |
55 | int val; | |
56 | smp_mb(); /* barrier for proper semantics */ | |
57 | val = __insn_fetchadd4((void *)&v->counter, i) + i; | |
15384758 | 58 | barrier(); /* equivalent to smp_mb(); see block comment above */ |
18aecc2b CM |
59 | return val; |
60 | } | |
61 | ||
f24219b4 | 62 | static inline int __atomic_add_unless(atomic_t *v, int a, int u) |
18aecc2b CM |
63 | { |
64 | int guess, oldval = v->counter; | |
65 | do { | |
66 | if (oldval == u) | |
67 | break; | |
68 | guess = oldval; | |
6dc9658f | 69 | oldval = cmpxchg(&v->counter, guess, guess + a); |
18aecc2b | 70 | } while (guess != oldval); |
f24219b4 | 71 | return oldval; |
18aecc2b CM |
72 | } |
73 | ||
2957c035 CM |
74 | static inline void atomic_and(int i, atomic_t *v) |
75 | { | |
76 | __insn_fetchand4((void *)&v->counter, i); | |
77 | } | |
78 | ||
79 | static inline void atomic_or(int i, atomic_t *v) | |
80 | { | |
81 | __insn_fetchor4((void *)&v->counter, i); | |
82 | } | |
83 | ||
84 | static inline void atomic_xor(int i, atomic_t *v) | |
85 | { | |
86 | int guess, oldval = v->counter; | |
87 | do { | |
88 | guess = oldval; | |
89 | __insn_mtspr(SPR_CMPEXCH_VALUE, guess); | |
90 | oldval = __insn_cmpexch4(&v->counter, guess ^ i); | |
91 | } while (guess != oldval); | |
92 | } | |
93 | ||
18aecc2b CM |
94 | /* Now the true 64-bit operations. */ |
95 | ||
96 | #define ATOMIC64_INIT(i) { (i) } | |
97 | ||
62e8a325 PZ |
98 | #define atomic64_read(v) READ_ONCE((v)->counter) |
99 | #define atomic64_set(v, i) WRITE_ONCE((v)->counter, (i)) | |
18aecc2b | 100 | |
18aecc2b CM |
101 | static inline void atomic64_add(long i, atomic64_t *v) |
102 | { | |
103 | __insn_fetchadd((void *)&v->counter, i); | |
104 | } | |
105 | ||
106 | static inline long atomic64_add_return(long i, atomic64_t *v) | |
107 | { | |
108 | int val; | |
109 | smp_mb(); /* barrier for proper semantics */ | |
110 | val = __insn_fetchadd((void *)&v->counter, i) + i; | |
15384758 | 111 | barrier(); /* equivalent to smp_mb; see atomic_add_return() */ |
18aecc2b CM |
112 | return val; |
113 | } | |
114 | ||
115 | static inline long atomic64_add_unless(atomic64_t *v, long a, long u) | |
116 | { | |
117 | long guess, oldval = v->counter; | |
118 | do { | |
119 | if (oldval == u) | |
120 | break; | |
121 | guess = oldval; | |
6dc9658f | 122 | oldval = cmpxchg(&v->counter, guess, guess + a); |
18aecc2b CM |
123 | } while (guess != oldval); |
124 | return oldval != u; | |
125 | } | |
126 | ||
2957c035 CM |
127 | static inline void atomic64_and(long i, atomic64_t *v) |
128 | { | |
129 | __insn_fetchand((void *)&v->counter, i); | |
130 | } | |
131 | ||
132 | static inline void atomic64_or(long i, atomic64_t *v) | |
133 | { | |
134 | __insn_fetchor((void *)&v->counter, i); | |
135 | } | |
136 | ||
137 | static inline void atomic64_xor(long i, atomic64_t *v) | |
138 | { | |
139 | long guess, oldval = v->counter; | |
140 | do { | |
141 | guess = oldval; | |
142 | __insn_mtspr(SPR_CMPEXCH_VALUE, guess); | |
143 | oldval = __insn_cmpexch(&v->counter, guess ^ i); | |
144 | } while (guess != oldval); | |
145 | } | |
146 | ||
18aecc2b CM |
147 | #define atomic64_sub_return(i, v) atomic64_add_return(-(i), (v)) |
148 | #define atomic64_sub(i, v) atomic64_add(-(i), (v)) | |
149 | #define atomic64_inc_return(v) atomic64_add_return(1, (v)) | |
150 | #define atomic64_dec_return(v) atomic64_sub_return(1, (v)) | |
151 | #define atomic64_inc(v) atomic64_add(1, (v)) | |
152 | #define atomic64_dec(v) atomic64_sub(1, (v)) | |
153 | ||
154 | #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0) | |
155 | #define atomic64_dec_and_test(v) (atomic64_dec_return(v) == 0) | |
156 | #define atomic64_sub_and_test(i, v) (atomic64_sub_return((i), (v)) == 0) | |
157 | #define atomic64_add_negative(i, v) (atomic64_add_return((i), (v)) < 0) | |
158 | ||
159 | #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) | |
160 | ||
18aecc2b CM |
161 | #endif /* !__ASSEMBLY__ */ |
162 | ||
163 | #endif /* _ASM_TILE_ATOMIC_64_H */ |