]>
Commit | Line | Data |
---|---|---|
18aecc2b CM |
1 | /* |
2 | * Copyright 2011 Tilera Corporation. All Rights Reserved. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or | |
5 | * modify it under the terms of the GNU General Public License | |
6 | * as published by the Free Software Foundation, version 2. | |
7 | * | |
8 | * This program is distributed in the hope that it will be useful, but | |
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | |
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | |
11 | * NON INFRINGEMENT. See the GNU General Public License for | |
12 | * more details. | |
13 | * | |
60063497 | 14 | * Do not include directly; use <linux/atomic.h>. |
18aecc2b CM |
15 | */ |
16 | ||
17 | #ifndef _ASM_TILE_ATOMIC_64_H | |
18 | #define _ASM_TILE_ATOMIC_64_H | |
19 | ||
20 | #ifndef __ASSEMBLY__ | |
21 | ||
bd119c69 | 22 | #include <asm/barrier.h> |
18aecc2b CM |
23 | #include <arch/spr_def.h> |
24 | ||
25 | /* First, the 32-bit atomic ops that are "real" on our 64-bit platform. */ | |
26 | ||
27 | #define atomic_set(v, i) ((v)->counter = (i)) | |
28 | ||
29 | /* | |
30 | * The smp_mb() operations throughout are to support the fact that | |
31 | * Linux requires memory barriers before and after the operation, | |
32 | * on any routine which updates memory and returns a value. | |
33 | */ | |
34 | ||
35 | static inline int atomic_cmpxchg(atomic_t *v, int o, int n) | |
36 | { | |
37 | int val; | |
38 | __insn_mtspr(SPR_CMPEXCH_VALUE, o); | |
39 | smp_mb(); /* barrier for proper semantics */ | |
40 | val = __insn_cmpexch4((void *)&v->counter, n); | |
41 | smp_mb(); /* barrier for proper semantics */ | |
42 | return val; | |
43 | } | |
44 | ||
45 | static inline int atomic_xchg(atomic_t *v, int n) | |
46 | { | |
47 | int val; | |
48 | smp_mb(); /* barrier for proper semantics */ | |
49 | val = __insn_exch4((void *)&v->counter, n); | |
50 | smp_mb(); /* barrier for proper semantics */ | |
51 | return val; | |
52 | } | |
53 | ||
54 | static inline void atomic_add(int i, atomic_t *v) | |
55 | { | |
56 | __insn_fetchadd4((void *)&v->counter, i); | |
57 | } | |
58 | ||
59 | static inline int atomic_add_return(int i, atomic_t *v) | |
60 | { | |
61 | int val; | |
62 | smp_mb(); /* barrier for proper semantics */ | |
63 | val = __insn_fetchadd4((void *)&v->counter, i) + i; | |
64 | barrier(); /* the "+ i" above will wait on memory */ | |
65 | return val; | |
66 | } | |
67 | ||
f24219b4 | 68 | static inline int __atomic_add_unless(atomic_t *v, int a, int u) |
18aecc2b CM |
69 | { |
70 | int guess, oldval = v->counter; | |
71 | do { | |
72 | if (oldval == u) | |
73 | break; | |
74 | guess = oldval; | |
75 | oldval = atomic_cmpxchg(v, guess, guess + a); | |
76 | } while (guess != oldval); | |
f24219b4 | 77 | return oldval; |
18aecc2b CM |
78 | } |
79 | ||
80 | /* Now the true 64-bit operations. */ | |
81 | ||
82 | #define ATOMIC64_INIT(i) { (i) } | |
83 | ||
84 | #define atomic64_read(v) ((v)->counter) | |
85 | #define atomic64_set(v, i) ((v)->counter = (i)) | |
86 | ||
87 | static inline long atomic64_cmpxchg(atomic64_t *v, long o, long n) | |
88 | { | |
89 | long val; | |
90 | smp_mb(); /* barrier for proper semantics */ | |
91 | __insn_mtspr(SPR_CMPEXCH_VALUE, o); | |
92 | val = __insn_cmpexch((void *)&v->counter, n); | |
93 | smp_mb(); /* barrier for proper semantics */ | |
94 | return val; | |
95 | } | |
96 | ||
97 | static inline long atomic64_xchg(atomic64_t *v, long n) | |
98 | { | |
99 | long val; | |
100 | smp_mb(); /* barrier for proper semantics */ | |
101 | val = __insn_exch((void *)&v->counter, n); | |
102 | smp_mb(); /* barrier for proper semantics */ | |
103 | return val; | |
104 | } | |
105 | ||
106 | static inline void atomic64_add(long i, atomic64_t *v) | |
107 | { | |
108 | __insn_fetchadd((void *)&v->counter, i); | |
109 | } | |
110 | ||
111 | static inline long atomic64_add_return(long i, atomic64_t *v) | |
112 | { | |
113 | int val; | |
114 | smp_mb(); /* barrier for proper semantics */ | |
115 | val = __insn_fetchadd((void *)&v->counter, i) + i; | |
116 | barrier(); /* the "+ i" above will wait on memory */ | |
117 | return val; | |
118 | } | |
119 | ||
120 | static inline long atomic64_add_unless(atomic64_t *v, long a, long u) | |
121 | { | |
122 | long guess, oldval = v->counter; | |
123 | do { | |
124 | if (oldval == u) | |
125 | break; | |
126 | guess = oldval; | |
127 | oldval = atomic64_cmpxchg(v, guess, guess + a); | |
128 | } while (guess != oldval); | |
129 | return oldval != u; | |
130 | } | |
131 | ||
132 | #define atomic64_sub_return(i, v) atomic64_add_return(-(i), (v)) | |
133 | #define atomic64_sub(i, v) atomic64_add(-(i), (v)) | |
134 | #define atomic64_inc_return(v) atomic64_add_return(1, (v)) | |
135 | #define atomic64_dec_return(v) atomic64_sub_return(1, (v)) | |
136 | #define atomic64_inc(v) atomic64_add(1, (v)) | |
137 | #define atomic64_dec(v) atomic64_sub(1, (v)) | |
138 | ||
139 | #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0) | |
140 | #define atomic64_dec_and_test(v) (atomic64_dec_return(v) == 0) | |
141 | #define atomic64_sub_and_test(i, v) (atomic64_sub_return((i), (v)) == 0) | |
142 | #define atomic64_add_negative(i, v) (atomic64_add_return((i), (v)) < 0) | |
143 | ||
144 | #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) | |
145 | ||
146 | /* Atomic dec and inc don't implement barrier, so provide them if needed. */ | |
147 | #define smp_mb__before_atomic_dec() smp_mb() | |
148 | #define smp_mb__after_atomic_dec() smp_mb() | |
149 | #define smp_mb__before_atomic_inc() smp_mb() | |
150 | #define smp_mb__after_atomic_inc() smp_mb() | |
151 | ||
8aaf1dda CM |
152 | /* Define this to indicate that cmpxchg is an efficient operation. */ |
153 | #define __HAVE_ARCH_CMPXCHG | |
18aecc2b CM |
154 | |
155 | #endif /* !__ASSEMBLY__ */ | |
156 | ||
157 | #endif /* _ASM_TILE_ATOMIC_64_H */ |