]>
git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - arch/s390/include/asm/spinlock.h
2da4a6d13f54148cc382946ac24baa72b5cada32
3 * Copyright IBM Corp. 1999
4 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
6 * Derived from "include/asm-i386/spinlock.h"
9 #ifndef __ASM_SPINLOCK_H
10 #define __ASM_SPINLOCK_H
12 #include <linux/smp.h>
13 #include <asm/atomic_ops.h>
14 #include <asm/barrier.h>
15 #include <asm/processor.h>
17 #define SPINLOCK_LOCKVAL (S390_lowcore.spinlock_lockval)
19 extern int spin_retry
;
22 static inline bool arch_vcpu_is_preempted(int cpu
) { return false; }
24 bool arch_vcpu_is_preempted(int cpu
);
27 #define vcpu_is_preempted arch_vcpu_is_preempted
30 * Simple spin lock operations. There are two variants, one clears IRQ's
31 * on the local processor, one does not.
33 * We make no fairness assumptions. They have a cost.
35 * (the type definitions are in asm/spinlock_types.h)
38 void arch_lock_relax(int cpu
);
39 void arch_spin_relax(arch_spinlock_t
*lock
);
41 void arch_spin_lock_wait(arch_spinlock_t
*);
42 int arch_spin_trylock_retry(arch_spinlock_t
*);
43 void arch_spin_lock_setup(int cpu
);
45 static inline u32
arch_spin_lockval(int cpu
)
50 static inline int arch_spin_value_unlocked(arch_spinlock_t lock
)
52 return lock
.lock
== 0;
55 static inline int arch_spin_is_locked(arch_spinlock_t
*lp
)
57 return READ_ONCE(lp
->lock
) != 0;
60 static inline int arch_spin_trylock_once(arch_spinlock_t
*lp
)
63 return likely(__atomic_cmpxchg_bool(&lp
->lock
, 0, SPINLOCK_LOCKVAL
));
66 static inline void arch_spin_lock(arch_spinlock_t
*lp
)
68 if (!arch_spin_trylock_once(lp
))
69 arch_spin_lock_wait(lp
);
72 static inline void arch_spin_lock_flags(arch_spinlock_t
*lp
,
75 if (!arch_spin_trylock_once(lp
))
76 arch_spin_lock_wait(lp
);
79 static inline int arch_spin_trylock(arch_spinlock_t
*lp
)
81 if (!arch_spin_trylock_once(lp
))
82 return arch_spin_trylock_retry(lp
);
86 static inline void arch_spin_unlock(arch_spinlock_t
*lp
)
88 typecheck(int, lp
->lock
);
90 #ifdef CONFIG_HAVE_MARCH_ZEC12_FEATURES
91 " .long 0xb2fa0070\n" /* NIAI 7 */
94 : "=Q" (((unsigned short *) &lp
->lock
)[1])
95 : "d" (0) : "cc", "memory");
99 * Read-write spinlocks, allowing multiple readers
100 * but only one writer.
102 * NOTE! it is quite common to have readers in interrupts
103 * but no interrupt writers. For those circumstances we
104 * can "mix" irq-safe locks - any writer needs to get a
105 * irq-safe write-lock, but readers can get non-irqsafe
110 * read_can_lock - would read_trylock() succeed?
111 * @lock: the rwlock in question.
113 #define arch_read_can_lock(x) ((int)(x)->lock >= 0)
116 * write_can_lock - would write_trylock() succeed?
117 * @lock: the rwlock in question.
119 #define arch_write_can_lock(x) ((x)->lock == 0)
121 extern int _raw_read_trylock_retry(arch_rwlock_t
*lp
);
122 extern int _raw_write_trylock_retry(arch_rwlock_t
*lp
);
124 #define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
125 #define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
127 static inline int arch_read_trylock_once(arch_rwlock_t
*rw
)
129 int old
= ACCESS_ONCE(rw
->lock
);
130 return likely(old
>= 0 &&
131 __atomic_cmpxchg_bool(&rw
->lock
, old
, old
+ 1));
134 static inline int arch_write_trylock_once(arch_rwlock_t
*rw
)
136 int old
= ACCESS_ONCE(rw
->lock
);
137 return likely(old
== 0 &&
138 __atomic_cmpxchg_bool(&rw
->lock
, 0, 0x80000000));
141 #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
143 #define __RAW_OP_OR "lao"
144 #define __RAW_OP_AND "lan"
145 #define __RAW_OP_ADD "laa"
147 #define __RAW_LOCK(ptr, op_val, op_string) \
151 typecheck(int *, ptr); \
153 op_string " %0,%2,%1\n" \
155 : "=d" (old_val), "+Q" (*ptr) \
161 #define __RAW_UNLOCK(ptr, op_val, op_string) \
165 typecheck(int *, ptr); \
167 op_string " %0,%2,%1\n" \
168 : "=d" (old_val), "+Q" (*ptr) \
174 extern void _raw_read_lock_wait(arch_rwlock_t
*lp
);
175 extern void _raw_write_lock_wait(arch_rwlock_t
*lp
, int prev
);
177 static inline void arch_read_lock(arch_rwlock_t
*rw
)
181 old
= __RAW_LOCK(&rw
->lock
, 1, __RAW_OP_ADD
);
183 _raw_read_lock_wait(rw
);
186 static inline void arch_read_unlock(arch_rwlock_t
*rw
)
188 __RAW_UNLOCK(&rw
->lock
, -1, __RAW_OP_ADD
);
191 static inline void arch_write_lock(arch_rwlock_t
*rw
)
195 old
= __RAW_LOCK(&rw
->lock
, 0x80000000, __RAW_OP_OR
);
197 _raw_write_lock_wait(rw
, old
);
198 rw
->owner
= SPINLOCK_LOCKVAL
;
201 static inline void arch_write_unlock(arch_rwlock_t
*rw
)
204 __RAW_UNLOCK(&rw
->lock
, 0x7fffffff, __RAW_OP_AND
);
207 #else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
209 extern void _raw_read_lock_wait(arch_rwlock_t
*lp
);
210 extern void _raw_write_lock_wait(arch_rwlock_t
*lp
);
212 static inline void arch_read_lock(arch_rwlock_t
*rw
)
214 if (!arch_read_trylock_once(rw
))
215 _raw_read_lock_wait(rw
);
218 static inline void arch_read_unlock(arch_rwlock_t
*rw
)
223 old
= ACCESS_ONCE(rw
->lock
);
224 } while (!__atomic_cmpxchg_bool(&rw
->lock
, old
, old
- 1));
227 static inline void arch_write_lock(arch_rwlock_t
*rw
)
229 if (!arch_write_trylock_once(rw
))
230 _raw_write_lock_wait(rw
);
231 rw
->owner
= SPINLOCK_LOCKVAL
;
234 static inline void arch_write_unlock(arch_rwlock_t
*rw
)
236 typecheck(int, rw
->lock
);
246 #endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
248 static inline int arch_read_trylock(arch_rwlock_t
*rw
)
250 if (!arch_read_trylock_once(rw
))
251 return _raw_read_trylock_retry(rw
);
255 static inline int arch_write_trylock(arch_rwlock_t
*rw
)
257 if (!arch_write_trylock_once(rw
) && !_raw_write_trylock_retry(rw
))
259 rw
->owner
= SPINLOCK_LOCKVAL
;
263 static inline void arch_read_relax(arch_rwlock_t
*rw
)
265 arch_lock_relax(rw
->owner
);
268 static inline void arch_write_relax(arch_rwlock_t
*rw
)
270 arch_lock_relax(rw
->owner
);
273 #endif /* __ASM_SPINLOCK_H */