]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - kernel/locking/qspinlock.c
Merge tag 'powerpc-5.2-3' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc...
[mirror_ubuntu-jammy-kernel.git] / kernel / locking / qspinlock.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Queued spinlock
4 *
5 * (C) Copyright 2013-2015 Hewlett-Packard Development Company, L.P.
6 * (C) Copyright 2013-2014,2018 Red Hat, Inc.
7 * (C) Copyright 2015 Intel Corp.
8 * (C) Copyright 2015 Hewlett-Packard Enterprise Development LP
9 *
10 * Authors: Waiman Long <longman@redhat.com>
11 * Peter Zijlstra <peterz@infradead.org>
12 */
13
14 #ifndef _GEN_PV_LOCK_SLOWPATH
15
16 #include <linux/smp.h>
17 #include <linux/bug.h>
18 #include <linux/cpumask.h>
19 #include <linux/percpu.h>
20 #include <linux/hardirq.h>
21 #include <linux/mutex.h>
22 #include <linux/prefetch.h>
23 #include <asm/byteorder.h>
24 #include <asm/qspinlock.h>
25
26 /*
27 * Include queued spinlock statistics code
28 */
29 #include "qspinlock_stat.h"
30
31 /*
32 * The basic principle of a queue-based spinlock can best be understood
33 * by studying a classic queue-based spinlock implementation called the
34 * MCS lock. The paper below provides a good description for this kind
35 * of lock.
36 *
37 * http://www.cise.ufl.edu/tr/DOC/REP-1992-71.pdf
38 *
39 * This queued spinlock implementation is based on the MCS lock, however to make
40 * it fit the 4 bytes we assume spinlock_t to be, and preserve its existing
41 * API, we must modify it somehow.
42 *
43 * In particular; where the traditional MCS lock consists of a tail pointer
44 * (8 bytes) and needs the next pointer (another 8 bytes) of its own node to
45 * unlock the next pending (next->locked), we compress both these: {tail,
46 * next->locked} into a single u32 value.
47 *
48 * Since a spinlock disables recursion of its own context and there is a limit
49 * to the contexts that can nest; namely: task, softirq, hardirq, nmi. As there
50 * are at most 4 nesting levels, it can be encoded by a 2-bit number. Now
51 * we can encode the tail by combining the 2-bit nesting level with the cpu
52 * number. With one byte for the lock value and 3 bytes for the tail, only a
53 * 32-bit word is now needed. Even though we only need 1 bit for the lock,
54 * we extend it to a full byte to achieve better performance for architectures
55 * that support atomic byte write.
56 *
57 * We also change the first spinner to spin on the lock bit instead of its
58 * node; whereby avoiding the need to carry a node from lock to unlock, and
59 * preserving existing lock API. This also makes the unlock code simpler and
60 * faster.
61 *
62 * N.B. The current implementation only supports architectures that allow
63 * atomic operations on smaller 8-bit and 16-bit data types.
64 *
65 */
66
67 #include "mcs_spinlock.h"
68 #define MAX_NODES 4
69
70 /*
71 * On 64-bit architectures, the mcs_spinlock structure will be 16 bytes in
72 * size and four of them will fit nicely in one 64-byte cacheline. For
73 * pvqspinlock, however, we need more space for extra data. To accommodate
74 * that, we insert two more long words to pad it up to 32 bytes. IOW, only
75 * two of them can fit in a cacheline in this case. That is OK as it is rare
76 * to have more than 2 levels of slowpath nesting in actual use. We don't
77 * want to penalize pvqspinlocks to optimize for a rare case in native
78 * qspinlocks.
79 */
80 struct qnode {
81 struct mcs_spinlock mcs;
82 #ifdef CONFIG_PARAVIRT_SPINLOCKS
83 long reserved[2];
84 #endif
85 };
86
87 /*
88 * The pending bit spinning loop count.
89 * This heuristic is used to limit the number of lockword accesses
90 * made by atomic_cond_read_relaxed when waiting for the lock to
91 * transition out of the "== _Q_PENDING_VAL" state. We don't spin
92 * indefinitely because there's no guarantee that we'll make forward
93 * progress.
94 */
95 #ifndef _Q_PENDING_LOOPS
96 #define _Q_PENDING_LOOPS 1
97 #endif
98
99 /*
100 * Per-CPU queue node structures; we can never have more than 4 nested
101 * contexts: task, softirq, hardirq, nmi.
102 *
103 * Exactly fits one 64-byte cacheline on a 64-bit architecture.
104 *
105 * PV doubles the storage and uses the second cacheline for PV state.
106 */
107 static DEFINE_PER_CPU_ALIGNED(struct qnode, qnodes[MAX_NODES]);
108
109 /*
110 * We must be able to distinguish between no-tail and the tail at 0:0,
111 * therefore increment the cpu number by one.
112 */
113
114 static inline __pure u32 encode_tail(int cpu, int idx)
115 {
116 u32 tail;
117
118 tail = (cpu + 1) << _Q_TAIL_CPU_OFFSET;
119 tail |= idx << _Q_TAIL_IDX_OFFSET; /* assume < 4 */
120
121 return tail;
122 }
123
124 static inline __pure struct mcs_spinlock *decode_tail(u32 tail)
125 {
126 int cpu = (tail >> _Q_TAIL_CPU_OFFSET) - 1;
127 int idx = (tail & _Q_TAIL_IDX_MASK) >> _Q_TAIL_IDX_OFFSET;
128
129 return per_cpu_ptr(&qnodes[idx].mcs, cpu);
130 }
131
132 static inline __pure
133 struct mcs_spinlock *grab_mcs_node(struct mcs_spinlock *base, int idx)
134 {
135 return &((struct qnode *)base + idx)->mcs;
136 }
137
138 #define _Q_LOCKED_PENDING_MASK (_Q_LOCKED_MASK | _Q_PENDING_MASK)
139
140 #if _Q_PENDING_BITS == 8
141 /**
142 * clear_pending - clear the pending bit.
143 * @lock: Pointer to queued spinlock structure
144 *
145 * *,1,* -> *,0,*
146 */
147 static __always_inline void clear_pending(struct qspinlock *lock)
148 {
149 WRITE_ONCE(lock->pending, 0);
150 }
151
152 /**
153 * clear_pending_set_locked - take ownership and clear the pending bit.
154 * @lock: Pointer to queued spinlock structure
155 *
156 * *,1,0 -> *,0,1
157 *
158 * Lock stealing is not allowed if this function is used.
159 */
160 static __always_inline void clear_pending_set_locked(struct qspinlock *lock)
161 {
162 WRITE_ONCE(lock->locked_pending, _Q_LOCKED_VAL);
163 }
164
165 /*
166 * xchg_tail - Put in the new queue tail code word & retrieve previous one
167 * @lock : Pointer to queued spinlock structure
168 * @tail : The new queue tail code word
169 * Return: The previous queue tail code word
170 *
171 * xchg(lock, tail), which heads an address dependency
172 *
173 * p,*,* -> n,*,* ; prev = xchg(lock, node)
174 */
175 static __always_inline u32 xchg_tail(struct qspinlock *lock, u32 tail)
176 {
177 /*
178 * We can use relaxed semantics since the caller ensures that the
179 * MCS node is properly initialized before updating the tail.
180 */
181 return (u32)xchg_relaxed(&lock->tail,
182 tail >> _Q_TAIL_OFFSET) << _Q_TAIL_OFFSET;
183 }
184
185 #else /* _Q_PENDING_BITS == 8 */
186
187 /**
188 * clear_pending - clear the pending bit.
189 * @lock: Pointer to queued spinlock structure
190 *
191 * *,1,* -> *,0,*
192 */
193 static __always_inline void clear_pending(struct qspinlock *lock)
194 {
195 atomic_andnot(_Q_PENDING_VAL, &lock->val);
196 }
197
198 /**
199 * clear_pending_set_locked - take ownership and clear the pending bit.
200 * @lock: Pointer to queued spinlock structure
201 *
202 * *,1,0 -> *,0,1
203 */
204 static __always_inline void clear_pending_set_locked(struct qspinlock *lock)
205 {
206 atomic_add(-_Q_PENDING_VAL + _Q_LOCKED_VAL, &lock->val);
207 }
208
209 /**
210 * xchg_tail - Put in the new queue tail code word & retrieve previous one
211 * @lock : Pointer to queued spinlock structure
212 * @tail : The new queue tail code word
213 * Return: The previous queue tail code word
214 *
215 * xchg(lock, tail)
216 *
217 * p,*,* -> n,*,* ; prev = xchg(lock, node)
218 */
219 static __always_inline u32 xchg_tail(struct qspinlock *lock, u32 tail)
220 {
221 u32 old, new, val = atomic_read(&lock->val);
222
223 for (;;) {
224 new = (val & _Q_LOCKED_PENDING_MASK) | tail;
225 /*
226 * We can use relaxed semantics since the caller ensures that
227 * the MCS node is properly initialized before updating the
228 * tail.
229 */
230 old = atomic_cmpxchg_relaxed(&lock->val, val, new);
231 if (old == val)
232 break;
233
234 val = old;
235 }
236 return old;
237 }
238 #endif /* _Q_PENDING_BITS == 8 */
239
240 /**
241 * queued_fetch_set_pending_acquire - fetch the whole lock value and set pending
242 * @lock : Pointer to queued spinlock structure
243 * Return: The previous lock value
244 *
245 * *,*,* -> *,1,*
246 */
247 #ifndef queued_fetch_set_pending_acquire
248 static __always_inline u32 queued_fetch_set_pending_acquire(struct qspinlock *lock)
249 {
250 return atomic_fetch_or_acquire(_Q_PENDING_VAL, &lock->val);
251 }
252 #endif
253
254 /**
255 * set_locked - Set the lock bit and own the lock
256 * @lock: Pointer to queued spinlock structure
257 *
258 * *,*,0 -> *,0,1
259 */
260 static __always_inline void set_locked(struct qspinlock *lock)
261 {
262 WRITE_ONCE(lock->locked, _Q_LOCKED_VAL);
263 }
264
265
266 /*
267 * Generate the native code for queued_spin_unlock_slowpath(); provide NOPs for
268 * all the PV callbacks.
269 */
270
271 static __always_inline void __pv_init_node(struct mcs_spinlock *node) { }
272 static __always_inline void __pv_wait_node(struct mcs_spinlock *node,
273 struct mcs_spinlock *prev) { }
274 static __always_inline void __pv_kick_node(struct qspinlock *lock,
275 struct mcs_spinlock *node) { }
276 static __always_inline u32 __pv_wait_head_or_lock(struct qspinlock *lock,
277 struct mcs_spinlock *node)
278 { return 0; }
279
280 #define pv_enabled() false
281
282 #define pv_init_node __pv_init_node
283 #define pv_wait_node __pv_wait_node
284 #define pv_kick_node __pv_kick_node
285 #define pv_wait_head_or_lock __pv_wait_head_or_lock
286
287 #ifdef CONFIG_PARAVIRT_SPINLOCKS
288 #define queued_spin_lock_slowpath native_queued_spin_lock_slowpath
289 #endif
290
291 #endif /* _GEN_PV_LOCK_SLOWPATH */
292
293 /**
294 * queued_spin_lock_slowpath - acquire the queued spinlock
295 * @lock: Pointer to queued spinlock structure
296 * @val: Current value of the queued spinlock 32-bit word
297 *
298 * (queue tail, pending bit, lock value)
299 *
300 * fast : slow : unlock
301 * : :
302 * uncontended (0,0,0) -:--> (0,0,1) ------------------------------:--> (*,*,0)
303 * : | ^--------.------. / :
304 * : v \ \ | :
305 * pending : (0,1,1) +--> (0,1,0) \ | :
306 * : | ^--' | | :
307 * : v | | :
308 * uncontended : (n,x,y) +--> (n,0,0) --' | :
309 * queue : | ^--' | :
310 * : v | :
311 * contended : (*,x,y) +--> (*,0,0) ---> (*,0,1) -' :
312 * queue : ^--' :
313 */
314 void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val)
315 {
316 struct mcs_spinlock *prev, *next, *node;
317 u32 old, tail;
318 int idx;
319
320 BUILD_BUG_ON(CONFIG_NR_CPUS >= (1U << _Q_TAIL_CPU_BITS));
321
322 if (pv_enabled())
323 goto pv_queue;
324
325 if (virt_spin_lock(lock))
326 return;
327
328 /*
329 * Wait for in-progress pending->locked hand-overs with a bounded
330 * number of spins so that we guarantee forward progress.
331 *
332 * 0,1,0 -> 0,0,1
333 */
334 if (val == _Q_PENDING_VAL) {
335 int cnt = _Q_PENDING_LOOPS;
336 val = atomic_cond_read_relaxed(&lock->val,
337 (VAL != _Q_PENDING_VAL) || !cnt--);
338 }
339
340 /*
341 * If we observe any contention; queue.
342 */
343 if (val & ~_Q_LOCKED_MASK)
344 goto queue;
345
346 /*
347 * trylock || pending
348 *
349 * 0,0,* -> 0,1,* -> 0,0,1 pending, trylock
350 */
351 val = queued_fetch_set_pending_acquire(lock);
352
353 /*
354 * If we observe contention, there is a concurrent locker.
355 *
356 * Undo and queue; our setting of PENDING might have made the
357 * n,0,0 -> 0,0,0 transition fail and it will now be waiting
358 * on @next to become !NULL.
359 */
360 if (unlikely(val & ~_Q_LOCKED_MASK)) {
361
362 /* Undo PENDING if we set it. */
363 if (!(val & _Q_PENDING_MASK))
364 clear_pending(lock);
365
366 goto queue;
367 }
368
369 /*
370 * We're pending, wait for the owner to go away.
371 *
372 * 0,1,1 -> 0,1,0
373 *
374 * this wait loop must be a load-acquire such that we match the
375 * store-release that clears the locked bit and create lock
376 * sequentiality; this is because not all
377 * clear_pending_set_locked() implementations imply full
378 * barriers.
379 */
380 if (val & _Q_LOCKED_MASK)
381 atomic_cond_read_acquire(&lock->val, !(VAL & _Q_LOCKED_MASK));
382
383 /*
384 * take ownership and clear the pending bit.
385 *
386 * 0,1,0 -> 0,0,1
387 */
388 clear_pending_set_locked(lock);
389 lockevent_inc(lock_pending);
390 return;
391
392 /*
393 * End of pending bit optimistic spinning and beginning of MCS
394 * queuing.
395 */
396 queue:
397 lockevent_inc(lock_slowpath);
398 pv_queue:
399 node = this_cpu_ptr(&qnodes[0].mcs);
400 idx = node->count++;
401 tail = encode_tail(smp_processor_id(), idx);
402
403 /*
404 * 4 nodes are allocated based on the assumption that there will
405 * not be nested NMIs taking spinlocks. That may not be true in
406 * some architectures even though the chance of needing more than
407 * 4 nodes will still be extremely unlikely. When that happens,
408 * we fall back to spinning on the lock directly without using
409 * any MCS node. This is not the most elegant solution, but is
410 * simple enough.
411 */
412 if (unlikely(idx >= MAX_NODES)) {
413 lockevent_inc(lock_no_node);
414 while (!queued_spin_trylock(lock))
415 cpu_relax();
416 goto release;
417 }
418
419 node = grab_mcs_node(node, idx);
420
421 /*
422 * Keep counts of non-zero index values:
423 */
424 lockevent_cond_inc(lock_use_node2 + idx - 1, idx);
425
426 /*
427 * Ensure that we increment the head node->count before initialising
428 * the actual node. If the compiler is kind enough to reorder these
429 * stores, then an IRQ could overwrite our assignments.
430 */
431 barrier();
432
433 node->locked = 0;
434 node->next = NULL;
435 pv_init_node(node);
436
437 /*
438 * We touched a (possibly) cold cacheline in the per-cpu queue node;
439 * attempt the trylock once more in the hope someone let go while we
440 * weren't watching.
441 */
442 if (queued_spin_trylock(lock))
443 goto release;
444
445 /*
446 * Ensure that the initialisation of @node is complete before we
447 * publish the updated tail via xchg_tail() and potentially link
448 * @node into the waitqueue via WRITE_ONCE(prev->next, node) below.
449 */
450 smp_wmb();
451
452 /*
453 * Publish the updated tail.
454 * We have already touched the queueing cacheline; don't bother with
455 * pending stuff.
456 *
457 * p,*,* -> n,*,*
458 */
459 old = xchg_tail(lock, tail);
460 next = NULL;
461
462 /*
463 * if there was a previous node; link it and wait until reaching the
464 * head of the waitqueue.
465 */
466 if (old & _Q_TAIL_MASK) {
467 prev = decode_tail(old);
468
469 /* Link @node into the waitqueue. */
470 WRITE_ONCE(prev->next, node);
471
472 pv_wait_node(node, prev);
473 arch_mcs_spin_lock_contended(&node->locked);
474
475 /*
476 * While waiting for the MCS lock, the next pointer may have
477 * been set by another lock waiter. We optimistically load
478 * the next pointer & prefetch the cacheline for writing
479 * to reduce latency in the upcoming MCS unlock operation.
480 */
481 next = READ_ONCE(node->next);
482 if (next)
483 prefetchw(next);
484 }
485
486 /*
487 * we're at the head of the waitqueue, wait for the owner & pending to
488 * go away.
489 *
490 * *,x,y -> *,0,0
491 *
492 * this wait loop must use a load-acquire such that we match the
493 * store-release that clears the locked bit and create lock
494 * sequentiality; this is because the set_locked() function below
495 * does not imply a full barrier.
496 *
497 * The PV pv_wait_head_or_lock function, if active, will acquire
498 * the lock and return a non-zero value. So we have to skip the
499 * atomic_cond_read_acquire() call. As the next PV queue head hasn't
500 * been designated yet, there is no way for the locked value to become
501 * _Q_SLOW_VAL. So both the set_locked() and the
502 * atomic_cmpxchg_relaxed() calls will be safe.
503 *
504 * If PV isn't active, 0 will be returned instead.
505 *
506 */
507 if ((val = pv_wait_head_or_lock(lock, node)))
508 goto locked;
509
510 val = atomic_cond_read_acquire(&lock->val, !(VAL & _Q_LOCKED_PENDING_MASK));
511
512 locked:
513 /*
514 * claim the lock:
515 *
516 * n,0,0 -> 0,0,1 : lock, uncontended
517 * *,*,0 -> *,*,1 : lock, contended
518 *
519 * If the queue head is the only one in the queue (lock value == tail)
520 * and nobody is pending, clear the tail code and grab the lock.
521 * Otherwise, we only need to grab the lock.
522 */
523
524 /*
525 * In the PV case we might already have _Q_LOCKED_VAL set, because
526 * of lock stealing; therefore we must also allow:
527 *
528 * n,0,1 -> 0,0,1
529 *
530 * Note: at this point: (val & _Q_PENDING_MASK) == 0, because of the
531 * above wait condition, therefore any concurrent setting of
532 * PENDING will make the uncontended transition fail.
533 */
534 if ((val & _Q_TAIL_MASK) == tail) {
535 if (atomic_try_cmpxchg_relaxed(&lock->val, &val, _Q_LOCKED_VAL))
536 goto release; /* No contention */
537 }
538
539 /*
540 * Either somebody is queued behind us or _Q_PENDING_VAL got set
541 * which will then detect the remaining tail and queue behind us
542 * ensuring we'll see a @next.
543 */
544 set_locked(lock);
545
546 /*
547 * contended path; wait for next if not observed yet, release.
548 */
549 if (!next)
550 next = smp_cond_load_relaxed(&node->next, (VAL));
551
552 arch_mcs_spin_unlock_contended(&next->locked);
553 pv_kick_node(lock, next);
554
555 release:
556 /*
557 * release the node
558 */
559 __this_cpu_dec(qnodes[0].mcs.count);
560 }
561 EXPORT_SYMBOL(queued_spin_lock_slowpath);
562
563 /*
564 * Generate the paravirt code for queued_spin_unlock_slowpath().
565 */
566 #if !defined(_GEN_PV_LOCK_SLOWPATH) && defined(CONFIG_PARAVIRT_SPINLOCKS)
567 #define _GEN_PV_LOCK_SLOWPATH
568
569 #undef pv_enabled
570 #define pv_enabled() true
571
572 #undef pv_init_node
573 #undef pv_wait_node
574 #undef pv_kick_node
575 #undef pv_wait_head_or_lock
576
577 #undef queued_spin_lock_slowpath
578 #define queued_spin_lock_slowpath __pv_queued_spin_lock_slowpath
579
580 #include "qspinlock_paravirt.h"
581 #include "qspinlock.c"
582
583 #endif