]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - arch/s390/include/asm/spinlock.h
Merge branch 'for-next/gcc-plugin-infrastructure' into for-linus/gcc-plugins
[mirror_ubuntu-artful-kernel.git] / arch / s390 / include / asm / spinlock.h
1 /*
2 * S390 version
3 * Copyright IBM Corp. 1999
4 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
5 *
6 * Derived from "include/asm-i386/spinlock.h"
7 */
8
9 #ifndef __ASM_SPINLOCK_H
10 #define __ASM_SPINLOCK_H
11
12 #include <linux/smp.h>
13 #include <asm/barrier.h>
14 #include <asm/processor.h>
15
16 #define SPINLOCK_LOCKVAL (S390_lowcore.spinlock_lockval)
17
18 extern int spin_retry;
19
20 static inline int
21 _raw_compare_and_swap(unsigned int *lock, unsigned int old, unsigned int new)
22 {
23 return __sync_bool_compare_and_swap(lock, old, new);
24 }
25
26 #ifndef CONFIG_SMP
27 static inline bool arch_vcpu_is_preempted(int cpu) { return false; }
28 #else
29 bool arch_vcpu_is_preempted(int cpu);
30 #endif
31
32 #define vcpu_is_preempted arch_vcpu_is_preempted
33
34 /*
35 * Simple spin lock operations. There are two variants, one clears IRQ's
36 * on the local processor, one does not.
37 *
38 * We make no fairness assumptions. They have a cost.
39 *
40 * (the type definitions are in asm/spinlock_types.h)
41 */
42
43 void arch_lock_relax(unsigned int cpu);
44
45 void arch_spin_lock_wait(arch_spinlock_t *);
46 int arch_spin_trylock_retry(arch_spinlock_t *);
47 void arch_spin_lock_wait_flags(arch_spinlock_t *, unsigned long flags);
48
49 static inline void arch_spin_relax(arch_spinlock_t *lock)
50 {
51 arch_lock_relax(lock->lock);
52 }
53
54 static inline u32 arch_spin_lockval(int cpu)
55 {
56 return ~cpu;
57 }
58
59 static inline int arch_spin_value_unlocked(arch_spinlock_t lock)
60 {
61 return lock.lock == 0;
62 }
63
64 static inline int arch_spin_is_locked(arch_spinlock_t *lp)
65 {
66 return ACCESS_ONCE(lp->lock) != 0;
67 }
68
69 static inline int arch_spin_trylock_once(arch_spinlock_t *lp)
70 {
71 barrier();
72 return likely(arch_spin_value_unlocked(*lp) &&
73 _raw_compare_and_swap(&lp->lock, 0, SPINLOCK_LOCKVAL));
74 }
75
76 static inline void arch_spin_lock(arch_spinlock_t *lp)
77 {
78 if (!arch_spin_trylock_once(lp))
79 arch_spin_lock_wait(lp);
80 }
81
82 static inline void arch_spin_lock_flags(arch_spinlock_t *lp,
83 unsigned long flags)
84 {
85 if (!arch_spin_trylock_once(lp))
86 arch_spin_lock_wait_flags(lp, flags);
87 }
88
89 static inline int arch_spin_trylock(arch_spinlock_t *lp)
90 {
91 if (!arch_spin_trylock_once(lp))
92 return arch_spin_trylock_retry(lp);
93 return 1;
94 }
95
96 static inline void arch_spin_unlock(arch_spinlock_t *lp)
97 {
98 typecheck(unsigned int, lp->lock);
99 asm volatile(
100 "st %1,%0\n"
101 : "+Q" (lp->lock)
102 : "d" (0)
103 : "cc", "memory");
104 }
105
106 static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
107 {
108 while (arch_spin_is_locked(lock))
109 arch_spin_relax(lock);
110 smp_acquire__after_ctrl_dep();
111 }
112
113 /*
114 * Read-write spinlocks, allowing multiple readers
115 * but only one writer.
116 *
117 * NOTE! it is quite common to have readers in interrupts
118 * but no interrupt writers. For those circumstances we
119 * can "mix" irq-safe locks - any writer needs to get a
120 * irq-safe write-lock, but readers can get non-irqsafe
121 * read-locks.
122 */
123
124 /**
125 * read_can_lock - would read_trylock() succeed?
126 * @lock: the rwlock in question.
127 */
128 #define arch_read_can_lock(x) ((int)(x)->lock >= 0)
129
130 /**
131 * write_can_lock - would write_trylock() succeed?
132 * @lock: the rwlock in question.
133 */
134 #define arch_write_can_lock(x) ((x)->lock == 0)
135
136 extern int _raw_read_trylock_retry(arch_rwlock_t *lp);
137 extern int _raw_write_trylock_retry(arch_rwlock_t *lp);
138
139 #define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
140 #define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
141
142 static inline int arch_read_trylock_once(arch_rwlock_t *rw)
143 {
144 unsigned int old = ACCESS_ONCE(rw->lock);
145 return likely((int) old >= 0 &&
146 _raw_compare_and_swap(&rw->lock, old, old + 1));
147 }
148
149 static inline int arch_write_trylock_once(arch_rwlock_t *rw)
150 {
151 unsigned int old = ACCESS_ONCE(rw->lock);
152 return likely(old == 0 &&
153 _raw_compare_and_swap(&rw->lock, 0, 0x80000000));
154 }
155
156 #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
157
158 #define __RAW_OP_OR "lao"
159 #define __RAW_OP_AND "lan"
160 #define __RAW_OP_ADD "laa"
161
162 #define __RAW_LOCK(ptr, op_val, op_string) \
163 ({ \
164 unsigned int old_val; \
165 \
166 typecheck(unsigned int *, ptr); \
167 asm volatile( \
168 op_string " %0,%2,%1\n" \
169 "bcr 14,0\n" \
170 : "=d" (old_val), "+Q" (*ptr) \
171 : "d" (op_val) \
172 : "cc", "memory"); \
173 old_val; \
174 })
175
176 #define __RAW_UNLOCK(ptr, op_val, op_string) \
177 ({ \
178 unsigned int old_val; \
179 \
180 typecheck(unsigned int *, ptr); \
181 asm volatile( \
182 op_string " %0,%2,%1\n" \
183 : "=d" (old_val), "+Q" (*ptr) \
184 : "d" (op_val) \
185 : "cc", "memory"); \
186 old_val; \
187 })
188
189 extern void _raw_read_lock_wait(arch_rwlock_t *lp);
190 extern void _raw_write_lock_wait(arch_rwlock_t *lp, unsigned int prev);
191
192 static inline void arch_read_lock(arch_rwlock_t *rw)
193 {
194 unsigned int old;
195
196 old = __RAW_LOCK(&rw->lock, 1, __RAW_OP_ADD);
197 if ((int) old < 0)
198 _raw_read_lock_wait(rw);
199 }
200
201 static inline void arch_read_unlock(arch_rwlock_t *rw)
202 {
203 __RAW_UNLOCK(&rw->lock, -1, __RAW_OP_ADD);
204 }
205
206 static inline void arch_write_lock(arch_rwlock_t *rw)
207 {
208 unsigned int old;
209
210 old = __RAW_LOCK(&rw->lock, 0x80000000, __RAW_OP_OR);
211 if (old != 0)
212 _raw_write_lock_wait(rw, old);
213 rw->owner = SPINLOCK_LOCKVAL;
214 }
215
216 static inline void arch_write_unlock(arch_rwlock_t *rw)
217 {
218 rw->owner = 0;
219 __RAW_UNLOCK(&rw->lock, 0x7fffffff, __RAW_OP_AND);
220 }
221
222 #else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
223
224 extern void _raw_read_lock_wait(arch_rwlock_t *lp);
225 extern void _raw_write_lock_wait(arch_rwlock_t *lp);
226
227 static inline void arch_read_lock(arch_rwlock_t *rw)
228 {
229 if (!arch_read_trylock_once(rw))
230 _raw_read_lock_wait(rw);
231 }
232
233 static inline void arch_read_unlock(arch_rwlock_t *rw)
234 {
235 unsigned int old;
236
237 do {
238 old = ACCESS_ONCE(rw->lock);
239 } while (!_raw_compare_and_swap(&rw->lock, old, old - 1));
240 }
241
242 static inline void arch_write_lock(arch_rwlock_t *rw)
243 {
244 if (!arch_write_trylock_once(rw))
245 _raw_write_lock_wait(rw);
246 rw->owner = SPINLOCK_LOCKVAL;
247 }
248
249 static inline void arch_write_unlock(arch_rwlock_t *rw)
250 {
251 typecheck(unsigned int, rw->lock);
252
253 rw->owner = 0;
254 asm volatile(
255 "st %1,%0\n"
256 : "+Q" (rw->lock)
257 : "d" (0)
258 : "cc", "memory");
259 }
260
261 #endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
262
263 static inline int arch_read_trylock(arch_rwlock_t *rw)
264 {
265 if (!arch_read_trylock_once(rw))
266 return _raw_read_trylock_retry(rw);
267 return 1;
268 }
269
270 static inline int arch_write_trylock(arch_rwlock_t *rw)
271 {
272 if (!arch_write_trylock_once(rw) && !_raw_write_trylock_retry(rw))
273 return 0;
274 rw->owner = SPINLOCK_LOCKVAL;
275 return 1;
276 }
277
278 static inline void arch_read_relax(arch_rwlock_t *rw)
279 {
280 arch_lock_relax(rw->owner);
281 }
282
283 static inline void arch_write_relax(arch_rwlock_t *rw)
284 {
285 arch_lock_relax(rw->owner);
286 }
287
288 #endif /* __ASM_SPINLOCK_H */