]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - arch/s390/include/asm/spinlock.h
License cleanup: add SPDX GPL-2.0 license identifier to files with no license
[mirror_ubuntu-bionic-kernel.git] / arch / s390 / include / asm / spinlock.h
CommitLineData
b2441318 1/* SPDX-License-Identifier: GPL-2.0 */
1da177e4 2/*
1da177e4 3 * S390 version
a53c8fab 4 * Copyright IBM Corp. 1999
1da177e4
LT
5 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
6 *
7 * Derived from "include/asm-i386/spinlock.h"
8 */
9
10#ifndef __ASM_SPINLOCK_H
11#define __ASM_SPINLOCK_H
12
3c1fcfe2 13#include <linux/smp.h>
02c503ff 14#include <asm/atomic_ops.h>
726328d9
PZ
15#include <asm/barrier.h>
16#include <asm/processor.h>
3c1fcfe2 17
6c8cd5bb
PH
18#define SPINLOCK_LOCKVAL (S390_lowcore.spinlock_lockval)
19
638ad34a
MS
20extern int spin_retry;
21
760928c0
CB
22#ifndef CONFIG_SMP
23static inline bool arch_vcpu_is_preempted(int cpu) { return false; }
24#else
25bool arch_vcpu_is_preempted(int cpu);
26#endif
27
28#define vcpu_is_preempted arch_vcpu_is_preempted
29
1da177e4
LT
30/*
31 * Simple spin lock operations. There are two variants, one clears IRQ's
32 * on the local processor, one does not.
33 *
34 * We make no fairness assumptions. They have a cost.
fb1c8f93
IM
35 *
36 * (the type definitions are in asm/spinlock_types.h)
1da177e4
LT
37 */
38
02c503ff 39void arch_lock_relax(int cpu);
d59b93da 40
5b3f683e
PH
41void arch_spin_lock_wait(arch_spinlock_t *);
42int arch_spin_trylock_retry(arch_spinlock_t *);
5b3f683e 43void arch_spin_lock_wait_flags(arch_spinlock_t *, unsigned long flags);
951f22d5 44
d59b93da
MS
45static inline void arch_spin_relax(arch_spinlock_t *lock)
46{
47 arch_lock_relax(lock->lock);
48}
49
6c8cd5bb
PH
50static inline u32 arch_spin_lockval(int cpu)
51{
52 return ~cpu;
53}
54
efc1d23b
HC
55static inline int arch_spin_value_unlocked(arch_spinlock_t lock)
56{
5b3f683e 57 return lock.lock == 0;
efc1d23b
HC
58}
59
5b3f683e
PH
60static inline int arch_spin_is_locked(arch_spinlock_t *lp)
61{
187b5f41 62 return READ_ONCE(lp->lock) != 0;
5b3f683e
PH
63}
64
65static inline int arch_spin_trylock_once(arch_spinlock_t *lp)
1da177e4 66{
bae8f567
MS
67 barrier();
68 return likely(arch_spin_value_unlocked(*lp) &&
02c503ff 69 __atomic_cmpxchg_bool(&lp->lock, 0, SPINLOCK_LOCKVAL));
1da177e4
LT
70}
71
5b3f683e 72static inline void arch_spin_lock(arch_spinlock_t *lp)
1da177e4 73{
bae8f567 74 if (!arch_spin_trylock_once(lp))
5b3f683e
PH
75 arch_spin_lock_wait(lp);
76}
951f22d5 77
5b3f683e
PH
78static inline void arch_spin_lock_flags(arch_spinlock_t *lp,
79 unsigned long flags)
80{
bae8f567 81 if (!arch_spin_trylock_once(lp))
5b3f683e
PH
82 arch_spin_lock_wait_flags(lp, flags);
83}
84
85static inline int arch_spin_trylock(arch_spinlock_t *lp)
86{
bae8f567 87 if (!arch_spin_trylock_once(lp))
5b3f683e
PH
88 return arch_spin_trylock_retry(lp);
89 return 1;
1da177e4
LT
90}
91
0199c4e6 92static inline void arch_spin_unlock(arch_spinlock_t *lp)
1da177e4 93{
02c503ff 94 typecheck(int, lp->lock);
44230282 95 asm volatile(
7f7e6e28
MS
96#ifdef CONFIG_HAVE_MARCH_ZEC12_FEATURES
97 " .long 0xb2fa0070\n" /* NIAI 7 */
98#endif
99 " st %1,%0\n"
100 : "=Q" (lp->lock) : "d" (0) : "cc", "memory");
1da177e4 101}
5b3f683e 102
1da177e4
LT
103/*
104 * Read-write spinlocks, allowing multiple readers
105 * but only one writer.
106 *
107 * NOTE! it is quite common to have readers in interrupts
108 * but no interrupt writers. For those circumstances we
109 * can "mix" irq-safe locks - any writer needs to get a
110 * irq-safe write-lock, but readers can get non-irqsafe
111 * read-locks.
112 */
1da177e4
LT
113
114/**
115 * read_can_lock - would read_trylock() succeed?
116 * @lock: the rwlock in question.
117 */
e5931943 118#define arch_read_can_lock(x) ((int)(x)->lock >= 0)
1da177e4
LT
119
120/**
121 * write_can_lock - would write_trylock() succeed?
122 * @lock: the rwlock in question.
123 */
e5931943 124#define arch_write_can_lock(x) ((x)->lock == 0)
1da177e4 125
2684e73a 126extern int _raw_read_trylock_retry(arch_rwlock_t *lp);
fb3a6bbc
TG
127extern int _raw_write_trylock_retry(arch_rwlock_t *lp);
128
2684e73a
MS
129#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
130#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
131
bae8f567
MS
132static inline int arch_read_trylock_once(arch_rwlock_t *rw)
133{
02c503ff
MS
134 int old = ACCESS_ONCE(rw->lock);
135 return likely(old >= 0 &&
136 __atomic_cmpxchg_bool(&rw->lock, old, old + 1));
bae8f567
MS
137}
138
139static inline int arch_write_trylock_once(arch_rwlock_t *rw)
140{
02c503ff 141 int old = ACCESS_ONCE(rw->lock);
bae8f567 142 return likely(old == 0 &&
02c503ff 143 __atomic_cmpxchg_bool(&rw->lock, 0, 0x80000000));
bae8f567
MS
144}
145
bbae71bf
MS
146#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
147
148#define __RAW_OP_OR "lao"
149#define __RAW_OP_AND "lan"
150#define __RAW_OP_ADD "laa"
151
152#define __RAW_LOCK(ptr, op_val, op_string) \
153({ \
02c503ff 154 int old_val; \
bbae71bf 155 \
02c503ff 156 typecheck(int *, ptr); \
bbae71bf
MS
157 asm volatile( \
158 op_string " %0,%2,%1\n" \
159 "bcr 14,0\n" \
160 : "=d" (old_val), "+Q" (*ptr) \
161 : "d" (op_val) \
162 : "cc", "memory"); \
163 old_val; \
164})
165
166#define __RAW_UNLOCK(ptr, op_val, op_string) \
167({ \
02c503ff 168 int old_val; \
bbae71bf 169 \
02c503ff 170 typecheck(int *, ptr); \
bbae71bf 171 asm volatile( \
bbae71bf
MS
172 op_string " %0,%2,%1\n" \
173 : "=d" (old_val), "+Q" (*ptr) \
174 : "d" (op_val) \
175 : "cc", "memory"); \
176 old_val; \
177})
178
179extern void _raw_read_lock_wait(arch_rwlock_t *lp);
02c503ff 180extern void _raw_write_lock_wait(arch_rwlock_t *lp, int prev);
bbae71bf
MS
181
182static inline void arch_read_lock(arch_rwlock_t *rw)
183{
02c503ff 184 int old;
bbae71bf
MS
185
186 old = __RAW_LOCK(&rw->lock, 1, __RAW_OP_ADD);
02c503ff 187 if (old < 0)
bbae71bf
MS
188 _raw_read_lock_wait(rw);
189}
190
191static inline void arch_read_unlock(arch_rwlock_t *rw)
192{
193 __RAW_UNLOCK(&rw->lock, -1, __RAW_OP_ADD);
194}
195
196static inline void arch_write_lock(arch_rwlock_t *rw)
197{
02c503ff 198 int old;
bbae71bf
MS
199
200 old = __RAW_LOCK(&rw->lock, 0x80000000, __RAW_OP_OR);
201 if (old != 0)
202 _raw_write_lock_wait(rw, old);
203 rw->owner = SPINLOCK_LOCKVAL;
204}
205
206static inline void arch_write_unlock(arch_rwlock_t *rw)
207{
208 rw->owner = 0;
209 __RAW_UNLOCK(&rw->lock, 0x7fffffff, __RAW_OP_AND);
210}
211
212#else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
213
214extern void _raw_read_lock_wait(arch_rwlock_t *lp);
215extern void _raw_write_lock_wait(arch_rwlock_t *lp);
216
e5931943 217static inline void arch_read_lock(arch_rwlock_t *rw)
951f22d5 218{
bae8f567 219 if (!arch_read_trylock_once(rw))
951f22d5
MS
220 _raw_read_lock_wait(rw);
221}
222
e5931943 223static inline void arch_read_unlock(arch_rwlock_t *rw)
951f22d5 224{
02c503ff 225 int old;
951f22d5 226
951f22d5 227 do {
5b3f683e 228 old = ACCESS_ONCE(rw->lock);
02c503ff 229 } while (!__atomic_cmpxchg_bool(&rw->lock, old, old - 1));
951f22d5
MS
230}
231
e5931943 232static inline void arch_write_lock(arch_rwlock_t *rw)
951f22d5 233{
bae8f567 234 if (!arch_write_trylock_once(rw))
951f22d5 235 _raw_write_lock_wait(rw);
d59b93da 236 rw->owner = SPINLOCK_LOCKVAL;
951f22d5
MS
237}
238
e5931943 239static inline void arch_write_unlock(arch_rwlock_t *rw)
951f22d5 240{
02c503ff 241 typecheck(int, rw->lock);
d59b93da
MS
242
243 rw->owner = 0;
44230282 244 asm volatile(
44230282
HC
245 "st %1,%0\n"
246 : "+Q" (rw->lock)
247 : "d" (0)
248 : "cc", "memory");
951f22d5
MS
249}
250
bbae71bf
MS
251#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
252
e5931943 253static inline int arch_read_trylock(arch_rwlock_t *rw)
951f22d5 254{
bae8f567
MS
255 if (!arch_read_trylock_once(rw))
256 return _raw_read_trylock_retry(rw);
257 return 1;
951f22d5
MS
258}
259
e5931943 260static inline int arch_write_trylock(arch_rwlock_t *rw)
1da177e4 261{
d59b93da
MS
262 if (!arch_write_trylock_once(rw) && !_raw_write_trylock_retry(rw))
263 return 0;
264 rw->owner = SPINLOCK_LOCKVAL;
bae8f567 265 return 1;
1da177e4
LT
266}
267
d59b93da
MS
268static inline void arch_read_relax(arch_rwlock_t *rw)
269{
270 arch_lock_relax(rw->owner);
271}
272
273static inline void arch_write_relax(arch_rwlock_t *rw)
274{
275 arch_lock_relax(rw->owner);
276}
ef6edc97 277
1da177e4 278#endif /* __ASM_SPINLOCK_H */