]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - arch/s390/lib/spinlock.c
Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
[mirror_ubuntu-jammy-kernel.git] / arch / s390 / lib / spinlock.c
CommitLineData
b2441318 1// SPDX-License-Identifier: GPL-2.0
951f22d5 2/*
951f22d5
MS
3 * Out of line spinlock code.
4 *
a53c8fab 5 * Copyright IBM Corp. 2004, 2006
951f22d5
MS
6 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
7 */
8
9#include <linux/types.h>
d3217967 10#include <linux/export.h>
951f22d5 11#include <linux/spinlock.h>
b96f7d88 12#include <linux/jiffies.h>
951f22d5 13#include <linux/init.h>
8b646bd7 14#include <linux/smp.h>
b96f7d88 15#include <linux/percpu.h>
f554be42 16#include <asm/alternative.h>
951f22d5
MS
17#include <asm/io.h>
18
2c72a44e
MS
19int spin_retry = -1;
20
21static int __init spin_retry_init(void)
22{
23 if (spin_retry < 0)
b13de4b7 24 spin_retry = 1000;
2c72a44e
MS
25 return 0;
26}
27early_initcall(spin_retry_init);
951f22d5
MS
28
29/**
30 * spin_retry= parameter
31 */
32static int __init spin_retry_setup(char *str)
33{
34 spin_retry = simple_strtoul(str, &str, 0);
35 return 1;
36}
37__setup("spin_retry=", spin_retry_setup);
38
b96f7d88
MS
39struct spin_wait {
40 struct spin_wait *next, *prev;
41 int node_id;
42} __aligned(32);
43
44static DEFINE_PER_CPU_ALIGNED(struct spin_wait, spin_wait[4]);
45
46#define _Q_LOCK_CPU_OFFSET 0
47#define _Q_LOCK_STEAL_OFFSET 16
48#define _Q_TAIL_IDX_OFFSET 18
49#define _Q_TAIL_CPU_OFFSET 20
50
51#define _Q_LOCK_CPU_MASK 0x0000ffff
52#define _Q_LOCK_STEAL_ADD 0x00010000
53#define _Q_LOCK_STEAL_MASK 0x00030000
54#define _Q_TAIL_IDX_MASK 0x000c0000
55#define _Q_TAIL_CPU_MASK 0xfff00000
56
57#define _Q_LOCK_MASK (_Q_LOCK_CPU_MASK | _Q_LOCK_STEAL_MASK)
58#define _Q_TAIL_MASK (_Q_TAIL_IDX_MASK | _Q_TAIL_CPU_MASK)
59
60void arch_spin_lock_setup(int cpu)
61{
62 struct spin_wait *node;
63 int ix;
64
65 node = per_cpu_ptr(&spin_wait[0], cpu);
66 for (ix = 0; ix < 4; ix++, node++) {
67 memset(node, 0, sizeof(*node));
68 node->node_id = ((cpu + 1) << _Q_TAIL_CPU_OFFSET) +
69 (ix << _Q_TAIL_IDX_OFFSET);
70 }
71}
72
7f7e6e28
MS
73static inline int arch_load_niai4(int *lock)
74{
75 int owner;
76
cceb0183 77 asm_inline volatile(
f554be42 78 ALTERNATIVE("", ".long 0xb2fa0040", 49) /* NIAI 4 */
7f7e6e28
MS
79 " l %0,%1\n"
80 : "=d" (owner) : "Q" (*lock) : "memory");
78ca4fe3 81 return owner;
7f7e6e28
MS
82}
83
84static inline int arch_cmpxchg_niai8(int *lock, int old, int new)
85{
86 int expected = old;
87
cceb0183 88 asm_inline volatile(
f554be42 89 ALTERNATIVE("", ".long 0xb2fa0080", 49) /* NIAI 8 */
7f7e6e28
MS
90 " cs %0,%3,%1\n"
91 : "=d" (old), "=Q" (*lock)
92 : "0" (old), "d" (new), "Q" (*lock)
93 : "cc", "memory");
94 return expected == old;
95}
96
b96f7d88 97static inline struct spin_wait *arch_spin_decode_tail(int lock)
951f22d5 98{
b96f7d88
MS
99 int ix, cpu;
100
101 ix = (lock & _Q_TAIL_IDX_MASK) >> _Q_TAIL_IDX_OFFSET;
102 cpu = (lock & _Q_TAIL_CPU_MASK) >> _Q_TAIL_CPU_OFFSET;
103 return per_cpu_ptr(&spin_wait[ix], cpu - 1);
104}
105
106static inline int arch_spin_yield_target(int lock, struct spin_wait *node)
107{
108 if (lock & _Q_LOCK_CPU_MASK)
109 return lock & _Q_LOCK_CPU_MASK;
110 if (node == NULL || node->prev == NULL)
111 return 0; /* 0 -> no target cpu */
112 while (node->prev)
113 node = node->prev;
114 return node->node_id >> _Q_TAIL_CPU_OFFSET;
115}
116
117static inline void arch_spin_lock_queued(arch_spinlock_t *lp)
118{
119 struct spin_wait *node, *next;
120 int lockval, ix, node_id, tail_id, old, new, owner, count;
121
122 ix = S390_lowcore.spinlock_index++;
123 barrier();
124 lockval = SPINLOCK_LOCKVAL; /* cpu + 1 */
125 node = this_cpu_ptr(&spin_wait[ix]);
126 node->prev = node->next = NULL;
127 node_id = node->node_id;
128
129 /* Enqueue the node for this CPU in the spinlock wait queue */
130 while (1) {
131 old = READ_ONCE(lp->lock);
132 if ((old & _Q_LOCK_CPU_MASK) == 0 &&
133 (old & _Q_LOCK_STEAL_MASK) != _Q_LOCK_STEAL_MASK) {
134 /*
135 * The lock is free but there may be waiters.
136 * With no waiters simply take the lock, if there
137 * are waiters try to steal the lock. The lock may
138 * be stolen three times before the next queued
139 * waiter will get the lock.
140 */
141 new = (old ? (old + _Q_LOCK_STEAL_ADD) : 0) | lockval;
142 if (__atomic_cmpxchg_bool(&lp->lock, old, new))
143 /* Got the lock */
144 goto out;
145 /* lock passing in progress */
146 continue;
147 }
148 /* Make the node of this CPU the new tail. */
149 new = node_id | (old & _Q_LOCK_MASK);
150 if (__atomic_cmpxchg_bool(&lp->lock, old, new))
151 break;
152 }
153 /* Set the 'next' pointer of the tail node in the queue */
154 tail_id = old & _Q_TAIL_MASK;
155 if (tail_id != 0) {
156 node->prev = arch_spin_decode_tail(tail_id);
157 WRITE_ONCE(node->prev->next, node);
158 }
7f7e6e28
MS
159
160 /* Pass the virtual CPU to the lock holder if it is not running */
b96f7d88 161 owner = arch_spin_yield_target(old, node);
81533803
MS
162 if (owner && arch_vcpu_is_preempted(owner - 1))
163 smp_yield_cpu(owner - 1);
951f22d5 164
b96f7d88
MS
165 /* Spin on the CPU local node->prev pointer */
166 if (tail_id != 0) {
167 count = spin_retry;
168 while (READ_ONCE(node->prev) != NULL) {
169 if (count-- >= 0)
170 continue;
171 count = spin_retry;
172 /* Query running state of lock holder again. */
173 owner = arch_spin_yield_target(old, node);
174 if (owner && arch_vcpu_is_preempted(owner - 1))
175 smp_yield_cpu(owner - 1);
176 }
177 }
178
179 /* Spin on the lock value in the spinlock_t */
7f7e6e28 180 count = spin_retry;
951f22d5 181 while (1) {
b96f7d88
MS
182 old = READ_ONCE(lp->lock);
183 owner = old & _Q_LOCK_CPU_MASK;
470ada6b 184 if (!owner) {
b96f7d88
MS
185 tail_id = old & _Q_TAIL_MASK;
186 new = ((tail_id != node_id) ? tail_id : 0) | lockval;
187 if (__atomic_cmpxchg_bool(&lp->lock, old, new))
188 /* Got the lock */
189 break;
470ada6b 190 continue;
951f22d5 191 }
7f7e6e28 192 if (count-- >= 0)
470ada6b 193 continue;
470ada6b 194 count = spin_retry;
81533803
MS
195 if (!MACHINE_IS_LPAR || arch_vcpu_is_preempted(owner - 1))
196 smp_yield_cpu(owner - 1);
951f22d5 197 }
b96f7d88
MS
198
199 /* Pass lock_spin job to next CPU in the queue */
200 if (node_id && tail_id != node_id) {
201 /* Wait until the next CPU has set up the 'next' pointer */
202 while ((next = READ_ONCE(node->next)) == NULL)
203 ;
204 next->prev = NULL;
205 }
206
207 out:
208 S390_lowcore.spinlock_index--;
951f22d5 209}
951f22d5 210
b96f7d88 211static inline void arch_spin_lock_classic(arch_spinlock_t *lp)
894cdde2 212{
b96f7d88 213 int lockval, old, new, owner, count;
894cdde2 214
b96f7d88 215 lockval = SPINLOCK_LOCKVAL; /* cpu + 1 */
7f7e6e28
MS
216
217 /* Pass the virtual CPU to the lock holder if it is not running */
8e9a2dba 218 owner = arch_spin_yield_target(READ_ONCE(lp->lock), NULL);
81533803
MS
219 if (owner && arch_vcpu_is_preempted(owner - 1))
220 smp_yield_cpu(owner - 1);
7f7e6e28
MS
221
222 count = spin_retry;
894cdde2 223 while (1) {
b96f7d88
MS
224 old = arch_load_niai4(&lp->lock);
225 owner = old & _Q_LOCK_CPU_MASK;
470ada6b
MS
226 /* Try to get the lock if it is free. */
227 if (!owner) {
b96f7d88 228 new = (old & _Q_TAIL_MASK) | lockval;
78ca4fe3 229 if (arch_cmpxchg_niai8(&lp->lock, old, new)) {
b96f7d88 230 /* Got the lock */
78ca4fe3
HC
231 return;
232 }
84976952 233 continue;
470ada6b 234 }
7f7e6e28 235 if (count-- >= 0)
470ada6b 236 continue;
470ada6b 237 count = spin_retry;
81533803
MS
238 if (!MACHINE_IS_LPAR || arch_vcpu_is_preempted(owner - 1))
239 smp_yield_cpu(owner - 1);
894cdde2
HH
240 }
241}
b96f7d88
MS
242
243void arch_spin_lock_wait(arch_spinlock_t *lp)
244{
b96f7d88
MS
245 if (test_cpu_flag(CIF_DEDICATED_CPU))
246 arch_spin_lock_queued(lp);
247 else
248 arch_spin_lock_classic(lp);
249}
250EXPORT_SYMBOL(arch_spin_lock_wait);
894cdde2 251
0199c4e6 252int arch_spin_trylock_retry(arch_spinlock_t *lp)
951f22d5 253{
02c503ff
MS
254 int cpu = SPINLOCK_LOCKVAL;
255 int owner, count;
951f22d5 256
2c72a44e 257 for (count = spin_retry; count > 0; count--) {
187b5f41 258 owner = READ_ONCE(lp->lock);
2c72a44e
MS
259 /* Try to get the lock if it is free. */
260 if (!owner) {
02c503ff 261 if (__atomic_cmpxchg_bool(&lp->lock, 0, cpu))
2c72a44e 262 return 1;
b13de4b7 263 }
2c72a44e 264 }
951f22d5
MS
265 return 0;
266}
0199c4e6 267EXPORT_SYMBOL(arch_spin_trylock_retry);
951f22d5 268
eb3b7b84 269void arch_read_lock_wait(arch_rwlock_t *rw)
951f22d5 270{
eb3b7b84
MS
271 if (unlikely(in_interrupt())) {
272 while (READ_ONCE(rw->cnts) & 0x10000)
273 barrier();
274 return;
951f22d5 275 }
951f22d5 276
eb3b7b84
MS
277 /* Remove this reader again to allow recursive read locking */
278 __atomic_add_const(-1, &rw->cnts);
279 /* Put the reader into the wait queue */
280 arch_spin_lock(&rw->wait);
281 /* Now add this reader to the count value again */
282 __atomic_add_const(1, &rw->cnts);
283 /* Loop until the writer is done */
284 while (READ_ONCE(rw->cnts) & 0x10000)
285 barrier();
286 arch_spin_unlock(&rw->wait);
951f22d5 287}
eb3b7b84 288EXPORT_SYMBOL(arch_read_lock_wait);
951f22d5 289
eb3b7b84 290void arch_write_lock_wait(arch_rwlock_t *rw)
951f22d5 291{
02c503ff 292 int old;
951f22d5 293
eb3b7b84
MS
294 /* Add this CPU to the write waiters */
295 __atomic_add(0x20000, &rw->cnts);
bbae71bf 296
eb3b7b84
MS
297 /* Put the writer into the wait queue */
298 arch_spin_lock(&rw->wait);
bbae71bf 299
bbae71bf 300 while (1) {
eb3b7b84
MS
301 old = READ_ONCE(rw->cnts);
302 if ((old & 0x1ffff) == 0 &&
303 __atomic_cmpxchg_bool(&rw->cnts, old, old | 0x10000))
304 /* Got the lock */
bbae71bf 305 break;
eb3b7b84 306 barrier();
bbae71bf 307 }
951f22d5 308
eb3b7b84 309 arch_spin_unlock(&rw->wait);
951f22d5 310}
eb3b7b84 311EXPORT_SYMBOL(arch_write_lock_wait);
bbae71bf 312
b96f7d88 313void arch_spin_relax(arch_spinlock_t *lp)
951f22d5 314{
b96f7d88 315 int cpu;
951f22d5 316
b96f7d88 317 cpu = READ_ONCE(lp->lock) & _Q_LOCK_CPU_MASK;
d59b93da
MS
318 if (!cpu)
319 return;
b96f7d88 320 if (MACHINE_IS_LPAR && !arch_vcpu_is_preempted(cpu - 1))
d59b93da 321 return;
b96f7d88 322 smp_yield_cpu(cpu - 1);
d59b93da 323}
b96f7d88 324EXPORT_SYMBOL(arch_spin_relax);