]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - arch/s390/include/asm/spinlock.h
locking/atomics: COCCINELLE/treewide: Convert trivial ACCESS_ONCE() patterns to READ_...
[mirror_ubuntu-hirsute-kernel.git] / arch / s390 / include / asm / spinlock.h
CommitLineData
1da177e4 1/*
1da177e4 2 * S390 version
a53c8fab 3 * Copyright IBM Corp. 1999
1da177e4
LT
4 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
5 *
6 * Derived from "include/asm-i386/spinlock.h"
7 */
8
9#ifndef __ASM_SPINLOCK_H
10#define __ASM_SPINLOCK_H
11
3c1fcfe2 12#include <linux/smp.h>
02c503ff 13#include <asm/atomic_ops.h>
726328d9
PZ
14#include <asm/barrier.h>
15#include <asm/processor.h>
3c1fcfe2 16
6c8cd5bb
PH
17#define SPINLOCK_LOCKVAL (S390_lowcore.spinlock_lockval)
18
638ad34a
MS
19extern int spin_retry;
20
760928c0
CB
21#ifndef CONFIG_SMP
22static inline bool arch_vcpu_is_preempted(int cpu) { return false; }
23#else
24bool arch_vcpu_is_preempted(int cpu);
25#endif
26
27#define vcpu_is_preempted arch_vcpu_is_preempted
28
1da177e4
LT
29/*
30 * Simple spin lock operations. There are two variants, one clears IRQ's
31 * on the local processor, one does not.
32 *
33 * We make no fairness assumptions. They have a cost.
fb1c8f93
IM
34 *
35 * (the type definitions are in asm/spinlock_types.h)
1da177e4
LT
36 */
37
02c503ff 38void arch_lock_relax(int cpu);
d59b93da 39
5b3f683e
PH
40void arch_spin_lock_wait(arch_spinlock_t *);
41int arch_spin_trylock_retry(arch_spinlock_t *);
5b3f683e 42void arch_spin_lock_wait_flags(arch_spinlock_t *, unsigned long flags);
951f22d5 43
d59b93da
MS
44static inline void arch_spin_relax(arch_spinlock_t *lock)
45{
46 arch_lock_relax(lock->lock);
47}
0160fb17 48#define arch_spin_relax arch_spin_relax
d59b93da 49
6c8cd5bb
PH
50static inline u32 arch_spin_lockval(int cpu)
51{
52 return ~cpu;
53}
54
efc1d23b
HC
55static inline int arch_spin_value_unlocked(arch_spinlock_t lock)
56{
5b3f683e 57 return lock.lock == 0;
efc1d23b
HC
58}
59
5b3f683e
PH
60static inline int arch_spin_is_locked(arch_spinlock_t *lp)
61{
187b5f41 62 return READ_ONCE(lp->lock) != 0;
5b3f683e
PH
63}
64
65static inline int arch_spin_trylock_once(arch_spinlock_t *lp)
1da177e4 66{
bae8f567
MS
67 barrier();
68 return likely(arch_spin_value_unlocked(*lp) &&
02c503ff 69 __atomic_cmpxchg_bool(&lp->lock, 0, SPINLOCK_LOCKVAL));
1da177e4
LT
70}
71
5b3f683e 72static inline void arch_spin_lock(arch_spinlock_t *lp)
1da177e4 73{
bae8f567 74 if (!arch_spin_trylock_once(lp))
5b3f683e
PH
75 arch_spin_lock_wait(lp);
76}
951f22d5 77
5b3f683e
PH
78static inline void arch_spin_lock_flags(arch_spinlock_t *lp,
79 unsigned long flags)
80{
bae8f567 81 if (!arch_spin_trylock_once(lp))
5b3f683e
PH
82 arch_spin_lock_wait_flags(lp, flags);
83}
a4c1887d 84#define arch_spin_lock_flags arch_spin_lock_flags
5b3f683e
PH
85
86static inline int arch_spin_trylock(arch_spinlock_t *lp)
87{
bae8f567 88 if (!arch_spin_trylock_once(lp))
5b3f683e
PH
89 return arch_spin_trylock_retry(lp);
90 return 1;
1da177e4
LT
91}
92
0199c4e6 93static inline void arch_spin_unlock(arch_spinlock_t *lp)
1da177e4 94{
02c503ff 95 typecheck(int, lp->lock);
44230282 96 asm volatile(
7f7e6e28
MS
97#ifdef CONFIG_HAVE_MARCH_ZEC12_FEATURES
98 " .long 0xb2fa0070\n" /* NIAI 7 */
99#endif
100 " st %1,%0\n"
101 : "=Q" (lp->lock) : "d" (0) : "cc", "memory");
1da177e4 102}
5b3f683e 103
1da177e4
LT
104/*
105 * Read-write spinlocks, allowing multiple readers
106 * but only one writer.
107 *
108 * NOTE! it is quite common to have readers in interrupts
109 * but no interrupt writers. For those circumstances we
110 * can "mix" irq-safe locks - any writer needs to get a
111 * irq-safe write-lock, but readers can get non-irqsafe
112 * read-locks.
113 */
1da177e4 114
2684e73a 115extern int _raw_read_trylock_retry(arch_rwlock_t *lp);
fb3a6bbc
TG
116extern int _raw_write_trylock_retry(arch_rwlock_t *lp);
117
bae8f567
MS
118static inline int arch_read_trylock_once(arch_rwlock_t *rw)
119{
6aa7de05 120 int old = READ_ONCE(rw->lock);
02c503ff
MS
121 return likely(old >= 0 &&
122 __atomic_cmpxchg_bool(&rw->lock, old, old + 1));
bae8f567
MS
123}
124
125static inline int arch_write_trylock_once(arch_rwlock_t *rw)
126{
6aa7de05 127 int old = READ_ONCE(rw->lock);
bae8f567 128 return likely(old == 0 &&
02c503ff 129 __atomic_cmpxchg_bool(&rw->lock, 0, 0x80000000));
bae8f567
MS
130}
131
bbae71bf
MS
132#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
133
134#define __RAW_OP_OR "lao"
135#define __RAW_OP_AND "lan"
136#define __RAW_OP_ADD "laa"
137
138#define __RAW_LOCK(ptr, op_val, op_string) \
139({ \
02c503ff 140 int old_val; \
bbae71bf 141 \
02c503ff 142 typecheck(int *, ptr); \
bbae71bf
MS
143 asm volatile( \
144 op_string " %0,%2,%1\n" \
145 "bcr 14,0\n" \
146 : "=d" (old_val), "+Q" (*ptr) \
147 : "d" (op_val) \
148 : "cc", "memory"); \
149 old_val; \
150})
151
152#define __RAW_UNLOCK(ptr, op_val, op_string) \
153({ \
02c503ff 154 int old_val; \
bbae71bf 155 \
02c503ff 156 typecheck(int *, ptr); \
bbae71bf 157 asm volatile( \
bbae71bf
MS
158 op_string " %0,%2,%1\n" \
159 : "=d" (old_val), "+Q" (*ptr) \
160 : "d" (op_val) \
161 : "cc", "memory"); \
162 old_val; \
163})
164
165extern void _raw_read_lock_wait(arch_rwlock_t *lp);
02c503ff 166extern void _raw_write_lock_wait(arch_rwlock_t *lp, int prev);
bbae71bf
MS
167
168static inline void arch_read_lock(arch_rwlock_t *rw)
169{
02c503ff 170 int old;
bbae71bf
MS
171
172 old = __RAW_LOCK(&rw->lock, 1, __RAW_OP_ADD);
02c503ff 173 if (old < 0)
bbae71bf
MS
174 _raw_read_lock_wait(rw);
175}
176
177static inline void arch_read_unlock(arch_rwlock_t *rw)
178{
179 __RAW_UNLOCK(&rw->lock, -1, __RAW_OP_ADD);
180}
181
182static inline void arch_write_lock(arch_rwlock_t *rw)
183{
02c503ff 184 int old;
bbae71bf
MS
185
186 old = __RAW_LOCK(&rw->lock, 0x80000000, __RAW_OP_OR);
187 if (old != 0)
188 _raw_write_lock_wait(rw, old);
189 rw->owner = SPINLOCK_LOCKVAL;
190}
191
192static inline void arch_write_unlock(arch_rwlock_t *rw)
193{
194 rw->owner = 0;
195 __RAW_UNLOCK(&rw->lock, 0x7fffffff, __RAW_OP_AND);
196}
197
198#else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
199
200extern void _raw_read_lock_wait(arch_rwlock_t *lp);
201extern void _raw_write_lock_wait(arch_rwlock_t *lp);
202
e5931943 203static inline void arch_read_lock(arch_rwlock_t *rw)
951f22d5 204{
bae8f567 205 if (!arch_read_trylock_once(rw))
951f22d5
MS
206 _raw_read_lock_wait(rw);
207}
208
e5931943 209static inline void arch_read_unlock(arch_rwlock_t *rw)
951f22d5 210{
02c503ff 211 int old;
951f22d5 212
951f22d5 213 do {
6aa7de05 214 old = READ_ONCE(rw->lock);
02c503ff 215 } while (!__atomic_cmpxchg_bool(&rw->lock, old, old - 1));
951f22d5
MS
216}
217
e5931943 218static inline void arch_write_lock(arch_rwlock_t *rw)
951f22d5 219{
bae8f567 220 if (!arch_write_trylock_once(rw))
951f22d5 221 _raw_write_lock_wait(rw);
d59b93da 222 rw->owner = SPINLOCK_LOCKVAL;
951f22d5
MS
223}
224
e5931943 225static inline void arch_write_unlock(arch_rwlock_t *rw)
951f22d5 226{
02c503ff 227 typecheck(int, rw->lock);
d59b93da
MS
228
229 rw->owner = 0;
44230282 230 asm volatile(
44230282
HC
231 "st %1,%0\n"
232 : "+Q" (rw->lock)
233 : "d" (0)
234 : "cc", "memory");
951f22d5
MS
235}
236
bbae71bf
MS
237#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
238
e5931943 239static inline int arch_read_trylock(arch_rwlock_t *rw)
951f22d5 240{
bae8f567
MS
241 if (!arch_read_trylock_once(rw))
242 return _raw_read_trylock_retry(rw);
243 return 1;
951f22d5
MS
244}
245
e5931943 246static inline int arch_write_trylock(arch_rwlock_t *rw)
1da177e4 247{
d59b93da
MS
248 if (!arch_write_trylock_once(rw) && !_raw_write_trylock_retry(rw))
249 return 0;
250 rw->owner = SPINLOCK_LOCKVAL;
bae8f567 251 return 1;
1da177e4
LT
252}
253
d59b93da
MS
254static inline void arch_read_relax(arch_rwlock_t *rw)
255{
256 arch_lock_relax(rw->owner);
257}
0160fb17 258#define arch_read_relax arch_read_relax
d59b93da
MS
259
260static inline void arch_write_relax(arch_rwlock_t *rw)
261{
262 arch_lock_relax(rw->owner);
263}
0160fb17 264#define arch_write_relax arch_write_relax
ef6edc97 265
1da177e4 266#endif /* __ASM_SPINLOCK_H */