]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - arch/s390/include/asm/spinlock.h
Merge branch 'for-next/gcc-plugin-infrastructure' into for-linus/gcc-plugins
[mirror_ubuntu-artful-kernel.git] / arch / s390 / include / asm / spinlock.h
CommitLineData
1da177e4 1/*
1da177e4 2 * S390 version
a53c8fab 3 * Copyright IBM Corp. 1999
1da177e4
LT
4 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
5 *
6 * Derived from "include/asm-i386/spinlock.h"
7 */
8
9#ifndef __ASM_SPINLOCK_H
10#define __ASM_SPINLOCK_H
11
3c1fcfe2 12#include <linux/smp.h>
726328d9
PZ
13#include <asm/barrier.h>
14#include <asm/processor.h>
3c1fcfe2 15
6c8cd5bb
PH
16#define SPINLOCK_LOCKVAL (S390_lowcore.spinlock_lockval)
17
638ad34a
MS
18extern int spin_retry;
19
94c12cc7 20static inline int
5b3f683e 21_raw_compare_and_swap(unsigned int *lock, unsigned int old, unsigned int new)
94c12cc7 22{
f318a122 23 return __sync_bool_compare_and_swap(lock, old, new);
94c12cc7
MS
24}
25
760928c0
CB
26#ifndef CONFIG_SMP
27static inline bool arch_vcpu_is_preempted(int cpu) { return false; }
28#else
29bool arch_vcpu_is_preempted(int cpu);
30#endif
31
32#define vcpu_is_preempted arch_vcpu_is_preempted
33
1da177e4
LT
34/*
35 * Simple spin lock operations. There are two variants, one clears IRQ's
36 * on the local processor, one does not.
37 *
38 * We make no fairness assumptions. They have a cost.
fb1c8f93
IM
39 *
40 * (the type definitions are in asm/spinlock_types.h)
1da177e4
LT
41 */
42
d59b93da
MS
43void arch_lock_relax(unsigned int cpu);
44
5b3f683e
PH
45void arch_spin_lock_wait(arch_spinlock_t *);
46int arch_spin_trylock_retry(arch_spinlock_t *);
5b3f683e 47void arch_spin_lock_wait_flags(arch_spinlock_t *, unsigned long flags);
951f22d5 48
d59b93da
MS
49static inline void arch_spin_relax(arch_spinlock_t *lock)
50{
51 arch_lock_relax(lock->lock);
52}
53
6c8cd5bb
PH
54static inline u32 arch_spin_lockval(int cpu)
55{
56 return ~cpu;
57}
58
efc1d23b
HC
59static inline int arch_spin_value_unlocked(arch_spinlock_t lock)
60{
5b3f683e 61 return lock.lock == 0;
efc1d23b
HC
62}
63
5b3f683e
PH
64static inline int arch_spin_is_locked(arch_spinlock_t *lp)
65{
66 return ACCESS_ONCE(lp->lock) != 0;
67}
68
69static inline int arch_spin_trylock_once(arch_spinlock_t *lp)
1da177e4 70{
bae8f567
MS
71 barrier();
72 return likely(arch_spin_value_unlocked(*lp) &&
73 _raw_compare_and_swap(&lp->lock, 0, SPINLOCK_LOCKVAL));
1da177e4
LT
74}
75
5b3f683e 76static inline void arch_spin_lock(arch_spinlock_t *lp)
1da177e4 77{
bae8f567 78 if (!arch_spin_trylock_once(lp))
5b3f683e
PH
79 arch_spin_lock_wait(lp);
80}
951f22d5 81
5b3f683e
PH
82static inline void arch_spin_lock_flags(arch_spinlock_t *lp,
83 unsigned long flags)
84{
bae8f567 85 if (!arch_spin_trylock_once(lp))
5b3f683e
PH
86 arch_spin_lock_wait_flags(lp, flags);
87}
88
89static inline int arch_spin_trylock(arch_spinlock_t *lp)
90{
bae8f567 91 if (!arch_spin_trylock_once(lp))
5b3f683e
PH
92 return arch_spin_trylock_retry(lp);
93 return 1;
1da177e4
LT
94}
95
0199c4e6 96static inline void arch_spin_unlock(arch_spinlock_t *lp)
1da177e4 97{
44230282
HC
98 typecheck(unsigned int, lp->lock);
99 asm volatile(
44230282
HC
100 "st %1,%0\n"
101 : "+Q" (lp->lock)
102 : "d" (0)
103 : "cc", "memory");
1da177e4 104}
5b3f683e
PH
105
106static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
107{
108 while (arch_spin_is_locked(lock))
109 arch_spin_relax(lock);
726328d9 110 smp_acquire__after_ctrl_dep();
5b3f683e
PH
111}
112
1da177e4
LT
113/*
114 * Read-write spinlocks, allowing multiple readers
115 * but only one writer.
116 *
117 * NOTE! it is quite common to have readers in interrupts
118 * but no interrupt writers. For those circumstances we
119 * can "mix" irq-safe locks - any writer needs to get a
120 * irq-safe write-lock, but readers can get non-irqsafe
121 * read-locks.
122 */
1da177e4
LT
123
124/**
125 * read_can_lock - would read_trylock() succeed?
126 * @lock: the rwlock in question.
127 */
e5931943 128#define arch_read_can_lock(x) ((int)(x)->lock >= 0)
1da177e4
LT
129
130/**
131 * write_can_lock - would write_trylock() succeed?
132 * @lock: the rwlock in question.
133 */
e5931943 134#define arch_write_can_lock(x) ((x)->lock == 0)
1da177e4 135
2684e73a 136extern int _raw_read_trylock_retry(arch_rwlock_t *lp);
fb3a6bbc
TG
137extern int _raw_write_trylock_retry(arch_rwlock_t *lp);
138
2684e73a
MS
139#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
140#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
141
bae8f567
MS
142static inline int arch_read_trylock_once(arch_rwlock_t *rw)
143{
144 unsigned int old = ACCESS_ONCE(rw->lock);
145 return likely((int) old >= 0 &&
146 _raw_compare_and_swap(&rw->lock, old, old + 1));
147}
148
149static inline int arch_write_trylock_once(arch_rwlock_t *rw)
150{
151 unsigned int old = ACCESS_ONCE(rw->lock);
152 return likely(old == 0 &&
153 _raw_compare_and_swap(&rw->lock, 0, 0x80000000));
154}
155
bbae71bf
MS
156#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
157
158#define __RAW_OP_OR "lao"
159#define __RAW_OP_AND "lan"
160#define __RAW_OP_ADD "laa"
161
162#define __RAW_LOCK(ptr, op_val, op_string) \
163({ \
164 unsigned int old_val; \
165 \
166 typecheck(unsigned int *, ptr); \
167 asm volatile( \
168 op_string " %0,%2,%1\n" \
169 "bcr 14,0\n" \
170 : "=d" (old_val), "+Q" (*ptr) \
171 : "d" (op_val) \
172 : "cc", "memory"); \
173 old_val; \
174})
175
176#define __RAW_UNLOCK(ptr, op_val, op_string) \
177({ \
178 unsigned int old_val; \
179 \
180 typecheck(unsigned int *, ptr); \
181 asm volatile( \
bbae71bf
MS
182 op_string " %0,%2,%1\n" \
183 : "=d" (old_val), "+Q" (*ptr) \
184 : "d" (op_val) \
185 : "cc", "memory"); \
186 old_val; \
187})
188
189extern void _raw_read_lock_wait(arch_rwlock_t *lp);
190extern void _raw_write_lock_wait(arch_rwlock_t *lp, unsigned int prev);
191
192static inline void arch_read_lock(arch_rwlock_t *rw)
193{
194 unsigned int old;
195
196 old = __RAW_LOCK(&rw->lock, 1, __RAW_OP_ADD);
197 if ((int) old < 0)
198 _raw_read_lock_wait(rw);
199}
200
201static inline void arch_read_unlock(arch_rwlock_t *rw)
202{
203 __RAW_UNLOCK(&rw->lock, -1, __RAW_OP_ADD);
204}
205
206static inline void arch_write_lock(arch_rwlock_t *rw)
207{
208 unsigned int old;
209
210 old = __RAW_LOCK(&rw->lock, 0x80000000, __RAW_OP_OR);
211 if (old != 0)
212 _raw_write_lock_wait(rw, old);
213 rw->owner = SPINLOCK_LOCKVAL;
214}
215
216static inline void arch_write_unlock(arch_rwlock_t *rw)
217{
218 rw->owner = 0;
219 __RAW_UNLOCK(&rw->lock, 0x7fffffff, __RAW_OP_AND);
220}
221
222#else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
223
224extern void _raw_read_lock_wait(arch_rwlock_t *lp);
225extern void _raw_write_lock_wait(arch_rwlock_t *lp);
226
e5931943 227static inline void arch_read_lock(arch_rwlock_t *rw)
951f22d5 228{
bae8f567 229 if (!arch_read_trylock_once(rw))
951f22d5
MS
230 _raw_read_lock_wait(rw);
231}
232
e5931943 233static inline void arch_read_unlock(arch_rwlock_t *rw)
951f22d5 234{
5b3f683e 235 unsigned int old;
951f22d5 236
951f22d5 237 do {
5b3f683e
PH
238 old = ACCESS_ONCE(rw->lock);
239 } while (!_raw_compare_and_swap(&rw->lock, old, old - 1));
951f22d5
MS
240}
241
e5931943 242static inline void arch_write_lock(arch_rwlock_t *rw)
951f22d5 243{
bae8f567 244 if (!arch_write_trylock_once(rw))
951f22d5 245 _raw_write_lock_wait(rw);
d59b93da 246 rw->owner = SPINLOCK_LOCKVAL;
951f22d5
MS
247}
248
e5931943 249static inline void arch_write_unlock(arch_rwlock_t *rw)
951f22d5 250{
44230282 251 typecheck(unsigned int, rw->lock);
d59b93da
MS
252
253 rw->owner = 0;
44230282 254 asm volatile(
44230282
HC
255 "st %1,%0\n"
256 : "+Q" (rw->lock)
257 : "d" (0)
258 : "cc", "memory");
951f22d5
MS
259}
260
bbae71bf
MS
261#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
262
e5931943 263static inline int arch_read_trylock(arch_rwlock_t *rw)
951f22d5 264{
bae8f567
MS
265 if (!arch_read_trylock_once(rw))
266 return _raw_read_trylock_retry(rw);
267 return 1;
951f22d5
MS
268}
269
e5931943 270static inline int arch_write_trylock(arch_rwlock_t *rw)
1da177e4 271{
d59b93da
MS
272 if (!arch_write_trylock_once(rw) && !_raw_write_trylock_retry(rw))
273 return 0;
274 rw->owner = SPINLOCK_LOCKVAL;
bae8f567 275 return 1;
1da177e4
LT
276}
277
d59b93da
MS
278static inline void arch_read_relax(arch_rwlock_t *rw)
279{
280 arch_lock_relax(rw->owner);
281}
282
283static inline void arch_write_relax(arch_rwlock_t *rw)
284{
285 arch_lock_relax(rw->owner);
286}
ef6edc97 287
1da177e4 288#endif /* __ASM_SPINLOCK_H */