4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * (C) Copyright 2013-2015 Hewlett-Packard Development Company, L.P.
15 * (C) Copyright 2013-2014,2018 Red Hat, Inc.
16 * (C) Copyright 2015 Intel Corp.
17 * (C) Copyright 2015 Hewlett-Packard Enterprise Development LP
19 * Authors: Waiman Long <longman@redhat.com>
20 * Peter Zijlstra <peterz@infradead.org>
23 #ifndef _GEN_PV_LOCK_SLOWPATH
25 #include <linux/smp.h>
26 #include <linux/bug.h>
27 #include <linux/cpumask.h>
28 #include <linux/percpu.h>
29 #include <linux/hardirq.h>
30 #include <linux/mutex.h>
31 #include <linux/prefetch.h>
32 #include <asm/byteorder.h>
33 #include <asm/qspinlock.h>
36 * Include queued spinlock statistics code
38 #include "qspinlock_stat.h"
41 * The basic principle of a queue-based spinlock can best be understood
42 * by studying a classic queue-based spinlock implementation called the
43 * MCS lock. The paper below provides a good description for this kind
46 * http://www.cise.ufl.edu/tr/DOC/REP-1992-71.pdf
48 * This queued spinlock implementation is based on the MCS lock, however to make
49 * it fit the 4 bytes we assume spinlock_t to be, and preserve its existing
50 * API, we must modify it somehow.
52 * In particular; where the traditional MCS lock consists of a tail pointer
53 * (8 bytes) and needs the next pointer (another 8 bytes) of its own node to
54 * unlock the next pending (next->locked), we compress both these: {tail,
55 * next->locked} into a single u32 value.
57 * Since a spinlock disables recursion of its own context and there is a limit
58 * to the contexts that can nest; namely: task, softirq, hardirq, nmi. As there
59 * are at most 4 nesting levels, it can be encoded by a 2-bit number. Now
60 * we can encode the tail by combining the 2-bit nesting level with the cpu
61 * number. With one byte for the lock value and 3 bytes for the tail, only a
62 * 32-bit word is now needed. Even though we only need 1 bit for the lock,
63 * we extend it to a full byte to achieve better performance for architectures
64 * that support atomic byte write.
66 * We also change the first spinner to spin on the lock bit instead of its
67 * node; whereby avoiding the need to carry a node from lock to unlock, and
68 * preserving existing lock API. This also makes the unlock code simpler and
71 * N.B. The current implementation only supports architectures that allow
72 * atomic operations on smaller 8-bit and 16-bit data types.
76 #include "mcs_spinlock.h"
80 * On 64-bit architectures, the mcs_spinlock structure will be 16 bytes in
81 * size and four of them will fit nicely in one 64-byte cacheline. For
82 * pvqspinlock, however, we need more space for extra data. To accommodate
83 * that, we insert two more long words to pad it up to 32 bytes. IOW, only
84 * two of them can fit in a cacheline in this case. That is OK as it is rare
85 * to have more than 2 levels of slowpath nesting in actual use. We don't
86 * want to penalize pvqspinlocks to optimize for a rare case in native
90 struct mcs_spinlock mcs
;
91 #ifdef CONFIG_PARAVIRT_SPINLOCKS
97 * The pending bit spinning loop count.
98 * This heuristic is used to limit the number of lockword accesses
99 * made by atomic_cond_read_relaxed when waiting for the lock to
100 * transition out of the "== _Q_PENDING_VAL" state. We don't spin
101 * indefinitely because there's no guarantee that we'll make forward
104 #ifndef _Q_PENDING_LOOPS
105 #define _Q_PENDING_LOOPS 1
109 * Per-CPU queue node structures; we can never have more than 4 nested
110 * contexts: task, softirq, hardirq, nmi.
112 * Exactly fits one 64-byte cacheline on a 64-bit architecture.
114 * PV doubles the storage and uses the second cacheline for PV state.
116 static DEFINE_PER_CPU_ALIGNED(struct qnode
, qnodes
[MAX_NODES
]);
119 * We must be able to distinguish between no-tail and the tail at 0:0,
120 * therefore increment the cpu number by one.
123 static inline __pure u32
encode_tail(int cpu
, int idx
)
127 tail
= (cpu
+ 1) << _Q_TAIL_CPU_OFFSET
;
128 tail
|= idx
<< _Q_TAIL_IDX_OFFSET
; /* assume < 4 */
133 static inline __pure
struct mcs_spinlock
*decode_tail(u32 tail
)
135 int cpu
= (tail
>> _Q_TAIL_CPU_OFFSET
) - 1;
136 int idx
= (tail
& _Q_TAIL_IDX_MASK
) >> _Q_TAIL_IDX_OFFSET
;
138 return per_cpu_ptr(&qnodes
[idx
].mcs
, cpu
);
142 struct mcs_spinlock
*grab_mcs_node(struct mcs_spinlock
*base
, int idx
)
144 return &((struct qnode
*)base
+ idx
)->mcs
;
147 #define _Q_LOCKED_PENDING_MASK (_Q_LOCKED_MASK | _Q_PENDING_MASK)
149 #if _Q_PENDING_BITS == 8
151 * clear_pending - clear the pending bit.
152 * @lock: Pointer to queued spinlock structure
156 static __always_inline
void clear_pending(struct qspinlock
*lock
)
158 WRITE_ONCE(lock
->pending
, 0);
162 * clear_pending_set_locked - take ownership and clear the pending bit.
163 * @lock: Pointer to queued spinlock structure
167 * Lock stealing is not allowed if this function is used.
169 static __always_inline
void clear_pending_set_locked(struct qspinlock
*lock
)
171 WRITE_ONCE(lock
->locked_pending
, _Q_LOCKED_VAL
);
175 * xchg_tail - Put in the new queue tail code word & retrieve previous one
176 * @lock : Pointer to queued spinlock structure
177 * @tail : The new queue tail code word
178 * Return: The previous queue tail code word
180 * xchg(lock, tail), which heads an address dependency
182 * p,*,* -> n,*,* ; prev = xchg(lock, node)
184 static __always_inline u32
xchg_tail(struct qspinlock
*lock
, u32 tail
)
187 * We can use relaxed semantics since the caller ensures that the
188 * MCS node is properly initialized before updating the tail.
190 return (u32
)xchg_relaxed(&lock
->tail
,
191 tail
>> _Q_TAIL_OFFSET
) << _Q_TAIL_OFFSET
;
194 #else /* _Q_PENDING_BITS == 8 */
197 * clear_pending - clear the pending bit.
198 * @lock: Pointer to queued spinlock structure
202 static __always_inline
void clear_pending(struct qspinlock
*lock
)
204 atomic_andnot(_Q_PENDING_VAL
, &lock
->val
);
208 * clear_pending_set_locked - take ownership and clear the pending bit.
209 * @lock: Pointer to queued spinlock structure
213 static __always_inline
void clear_pending_set_locked(struct qspinlock
*lock
)
215 atomic_add(-_Q_PENDING_VAL
+ _Q_LOCKED_VAL
, &lock
->val
);
219 * xchg_tail - Put in the new queue tail code word & retrieve previous one
220 * @lock : Pointer to queued spinlock structure
221 * @tail : The new queue tail code word
222 * Return: The previous queue tail code word
226 * p,*,* -> n,*,* ; prev = xchg(lock, node)
228 static __always_inline u32
xchg_tail(struct qspinlock
*lock
, u32 tail
)
230 u32 old
, new, val
= atomic_read(&lock
->val
);
233 new = (val
& _Q_LOCKED_PENDING_MASK
) | tail
;
235 * We can use relaxed semantics since the caller ensures that
236 * the MCS node is properly initialized before updating the
239 old
= atomic_cmpxchg_relaxed(&lock
->val
, val
, new);
247 #endif /* _Q_PENDING_BITS == 8 */
250 * queued_fetch_set_pending_acquire - fetch the whole lock value and set pending
251 * @lock : Pointer to queued spinlock structure
252 * Return: The previous lock value
256 #ifndef queued_fetch_set_pending_acquire
257 static __always_inline u32
queued_fetch_set_pending_acquire(struct qspinlock
*lock
)
259 return atomic_fetch_or_acquire(_Q_PENDING_VAL
, &lock
->val
);
264 * set_locked - Set the lock bit and own the lock
265 * @lock: Pointer to queued spinlock structure
269 static __always_inline
void set_locked(struct qspinlock
*lock
)
271 WRITE_ONCE(lock
->locked
, _Q_LOCKED_VAL
);
276 * Generate the native code for queued_spin_unlock_slowpath(); provide NOPs for
277 * all the PV callbacks.
280 static __always_inline
void __pv_init_node(struct mcs_spinlock
*node
) { }
281 static __always_inline
void __pv_wait_node(struct mcs_spinlock
*node
,
282 struct mcs_spinlock
*prev
) { }
283 static __always_inline
void __pv_kick_node(struct qspinlock
*lock
,
284 struct mcs_spinlock
*node
) { }
285 static __always_inline u32
__pv_wait_head_or_lock(struct qspinlock
*lock
,
286 struct mcs_spinlock
*node
)
289 #define pv_enabled() false
291 #define pv_init_node __pv_init_node
292 #define pv_wait_node __pv_wait_node
293 #define pv_kick_node __pv_kick_node
294 #define pv_wait_head_or_lock __pv_wait_head_or_lock
296 #ifdef CONFIG_PARAVIRT_SPINLOCKS
297 #define queued_spin_lock_slowpath native_queued_spin_lock_slowpath
300 #endif /* _GEN_PV_LOCK_SLOWPATH */
303 * queued_spin_lock_slowpath - acquire the queued spinlock
304 * @lock: Pointer to queued spinlock structure
305 * @val: Current value of the queued spinlock 32-bit word
307 * (queue tail, pending bit, lock value)
309 * fast : slow : unlock
311 * uncontended (0,0,0) -:--> (0,0,1) ------------------------------:--> (*,*,0)
312 * : | ^--------.------. / :
314 * pending : (0,1,1) +--> (0,1,0) \ | :
317 * uncontended : (n,x,y) +--> (n,0,0) --' | :
320 * contended : (*,x,y) +--> (*,0,0) ---> (*,0,1) -' :
323 void queued_spin_lock_slowpath(struct qspinlock
*lock
, u32 val
)
325 struct mcs_spinlock
*prev
, *next
, *node
;
329 BUILD_BUG_ON(CONFIG_NR_CPUS
>= (1U << _Q_TAIL_CPU_BITS
));
334 if (virt_spin_lock(lock
))
338 * Wait for in-progress pending->locked hand-overs with a bounded
339 * number of spins so that we guarantee forward progress.
343 if (val
== _Q_PENDING_VAL
) {
344 int cnt
= _Q_PENDING_LOOPS
;
345 val
= atomic_cond_read_relaxed(&lock
->val
,
346 (VAL
!= _Q_PENDING_VAL
) || !cnt
--);
350 * If we observe any contention; queue.
352 if (val
& ~_Q_LOCKED_MASK
)
358 * 0,0,* -> 0,1,* -> 0,0,1 pending, trylock
360 val
= queued_fetch_set_pending_acquire(lock
);
363 * If we observe contention, there is a concurrent locker.
365 * Undo and queue; our setting of PENDING might have made the
366 * n,0,0 -> 0,0,0 transition fail and it will now be waiting
367 * on @next to become !NULL.
369 if (unlikely(val
& ~_Q_LOCKED_MASK
)) {
371 /* Undo PENDING if we set it. */
372 if (!(val
& _Q_PENDING_MASK
))
379 * We're pending, wait for the owner to go away.
383 * this wait loop must be a load-acquire such that we match the
384 * store-release that clears the locked bit and create lock
385 * sequentiality; this is because not all
386 * clear_pending_set_locked() implementations imply full
389 if (val
& _Q_LOCKED_MASK
)
390 atomic_cond_read_acquire(&lock
->val
, !(VAL
& _Q_LOCKED_MASK
));
393 * take ownership and clear the pending bit.
397 clear_pending_set_locked(lock
);
398 lockevent_inc(lock_pending
);
402 * End of pending bit optimistic spinning and beginning of MCS
406 lockevent_inc(lock_slowpath
);
408 node
= this_cpu_ptr(&qnodes
[0].mcs
);
410 tail
= encode_tail(smp_processor_id(), idx
);
413 * 4 nodes are allocated based on the assumption that there will
414 * not be nested NMIs taking spinlocks. That may not be true in
415 * some architectures even though the chance of needing more than
416 * 4 nodes will still be extremely unlikely. When that happens,
417 * we fall back to spinning on the lock directly without using
418 * any MCS node. This is not the most elegant solution, but is
421 if (unlikely(idx
>= MAX_NODES
)) {
422 lockevent_inc(lock_no_node
);
423 while (!queued_spin_trylock(lock
))
428 node
= grab_mcs_node(node
, idx
);
431 * Keep counts of non-zero index values:
433 lockevent_cond_inc(lock_use_node2
+ idx
- 1, idx
);
436 * Ensure that we increment the head node->count before initialising
437 * the actual node. If the compiler is kind enough to reorder these
438 * stores, then an IRQ could overwrite our assignments.
447 * We touched a (possibly) cold cacheline in the per-cpu queue node;
448 * attempt the trylock once more in the hope someone let go while we
451 if (queued_spin_trylock(lock
))
455 * Ensure that the initialisation of @node is complete before we
456 * publish the updated tail via xchg_tail() and potentially link
457 * @node into the waitqueue via WRITE_ONCE(prev->next, node) below.
462 * Publish the updated tail.
463 * We have already touched the queueing cacheline; don't bother with
468 old
= xchg_tail(lock
, tail
);
472 * if there was a previous node; link it and wait until reaching the
473 * head of the waitqueue.
475 if (old
& _Q_TAIL_MASK
) {
476 prev
= decode_tail(old
);
478 /* Link @node into the waitqueue. */
479 WRITE_ONCE(prev
->next
, node
);
481 pv_wait_node(node
, prev
);
482 arch_mcs_spin_lock_contended(&node
->locked
);
485 * While waiting for the MCS lock, the next pointer may have
486 * been set by another lock waiter. We optimistically load
487 * the next pointer & prefetch the cacheline for writing
488 * to reduce latency in the upcoming MCS unlock operation.
490 next
= READ_ONCE(node
->next
);
496 * we're at the head of the waitqueue, wait for the owner & pending to
501 * this wait loop must use a load-acquire such that we match the
502 * store-release that clears the locked bit and create lock
503 * sequentiality; this is because the set_locked() function below
504 * does not imply a full barrier.
506 * The PV pv_wait_head_or_lock function, if active, will acquire
507 * the lock and return a non-zero value. So we have to skip the
508 * atomic_cond_read_acquire() call. As the next PV queue head hasn't
509 * been designated yet, there is no way for the locked value to become
510 * _Q_SLOW_VAL. So both the set_locked() and the
511 * atomic_cmpxchg_relaxed() calls will be safe.
513 * If PV isn't active, 0 will be returned instead.
516 if ((val
= pv_wait_head_or_lock(lock
, node
)))
519 val
= atomic_cond_read_acquire(&lock
->val
, !(VAL
& _Q_LOCKED_PENDING_MASK
));
525 * n,0,0 -> 0,0,1 : lock, uncontended
526 * *,*,0 -> *,*,1 : lock, contended
528 * If the queue head is the only one in the queue (lock value == tail)
529 * and nobody is pending, clear the tail code and grab the lock.
530 * Otherwise, we only need to grab the lock.
534 * In the PV case we might already have _Q_LOCKED_VAL set, because
535 * of lock stealing; therefore we must also allow:
539 * Note: at this point: (val & _Q_PENDING_MASK) == 0, because of the
540 * above wait condition, therefore any concurrent setting of
541 * PENDING will make the uncontended transition fail.
543 if ((val
& _Q_TAIL_MASK
) == tail
) {
544 if (atomic_try_cmpxchg_relaxed(&lock
->val
, &val
, _Q_LOCKED_VAL
))
545 goto release
; /* No contention */
549 * Either somebody is queued behind us or _Q_PENDING_VAL got set
550 * which will then detect the remaining tail and queue behind us
551 * ensuring we'll see a @next.
556 * contended path; wait for next if not observed yet, release.
559 next
= smp_cond_load_relaxed(&node
->next
, (VAL
));
561 arch_mcs_spin_unlock_contended(&next
->locked
);
562 pv_kick_node(lock
, next
);
568 __this_cpu_dec(qnodes
[0].mcs
.count
);
570 EXPORT_SYMBOL(queued_spin_lock_slowpath
);
573 * Generate the paravirt code for queued_spin_unlock_slowpath().
575 #if !defined(_GEN_PV_LOCK_SLOWPATH) && defined(CONFIG_PARAVIRT_SPINLOCKS)
576 #define _GEN_PV_LOCK_SLOWPATH
579 #define pv_enabled() true
584 #undef pv_wait_head_or_lock
586 #undef queued_spin_lock_slowpath
587 #define queued_spin_lock_slowpath __pv_queued_spin_lock_slowpath
589 #include "qspinlock_paravirt.h"
590 #include "qspinlock.c"