4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * (C) Copyright 2013-2015 Hewlett-Packard Development Company, L.P.
15 * (C) Copyright 2013-2014 Red Hat, Inc.
16 * (C) Copyright 2015 Intel Corp.
17 * (C) Copyright 2015 Hewlett-Packard Enterprise Development LP
19 * Authors: Waiman Long <waiman.long@hpe.com>
20 * Peter Zijlstra <peterz@infradead.org>
23 #ifndef _GEN_PV_LOCK_SLOWPATH
25 #include <linux/smp.h>
26 #include <linux/bug.h>
27 #include <linux/cpumask.h>
28 #include <linux/percpu.h>
29 #include <linux/hardirq.h>
30 #include <linux/mutex.h>
31 #include <linux/prefetch.h>
32 #include <asm/byteorder.h>
33 #include <asm/qspinlock.h>
36 * The basic principle of a queue-based spinlock can best be understood
37 * by studying a classic queue-based spinlock implementation called the
38 * MCS lock. The paper below provides a good description for this kind
41 * http://www.cise.ufl.edu/tr/DOC/REP-1992-71.pdf
43 * This queued spinlock implementation is based on the MCS lock, however to make
44 * it fit the 4 bytes we assume spinlock_t to be, and preserve its existing
45 * API, we must modify it somehow.
47 * In particular; where the traditional MCS lock consists of a tail pointer
48 * (8 bytes) and needs the next pointer (another 8 bytes) of its own node to
49 * unlock the next pending (next->locked), we compress both these: {tail,
50 * next->locked} into a single u32 value.
52 * Since a spinlock disables recursion of its own context and there is a limit
53 * to the contexts that can nest; namely: task, softirq, hardirq, nmi. As there
54 * are at most 4 nesting levels, it can be encoded by a 2-bit number. Now
55 * we can encode the tail by combining the 2-bit nesting level with the cpu
56 * number. With one byte for the lock value and 3 bytes for the tail, only a
57 * 32-bit word is now needed. Even though we only need 1 bit for the lock,
58 * we extend it to a full byte to achieve better performance for architectures
59 * that support atomic byte write.
61 * We also change the first spinner to spin on the lock bit instead of its
62 * node; whereby avoiding the need to carry a node from lock to unlock, and
63 * preserving existing lock API. This also makes the unlock code simpler and
66 * N.B. The current implementation only supports architectures that allow
67 * atomic operations on smaller 8-bit and 16-bit data types.
71 #include "mcs_spinlock.h"
73 #ifdef CONFIG_PARAVIRT_SPINLOCKS
80 * Per-CPU queue node structures; we can never have more than 4 nested
81 * contexts: task, softirq, hardirq, nmi.
83 * Exactly fits one 64-byte cacheline on a 64-bit architecture.
85 * PV doubles the storage and uses the second cacheline for PV state.
87 static DEFINE_PER_CPU_ALIGNED(struct mcs_spinlock
, mcs_nodes
[MAX_NODES
]);
90 * We must be able to distinguish between no-tail and the tail at 0:0,
91 * therefore increment the cpu number by one.
94 static inline __pure u32
encode_tail(int cpu
, int idx
)
98 #ifdef CONFIG_DEBUG_SPINLOCK
101 tail
= (cpu
+ 1) << _Q_TAIL_CPU_OFFSET
;
102 tail
|= idx
<< _Q_TAIL_IDX_OFFSET
; /* assume < 4 */
107 static inline __pure
struct mcs_spinlock
*decode_tail(u32 tail
)
109 int cpu
= (tail
>> _Q_TAIL_CPU_OFFSET
) - 1;
110 int idx
= (tail
& _Q_TAIL_IDX_MASK
) >> _Q_TAIL_IDX_OFFSET
;
112 return per_cpu_ptr(&mcs_nodes
[idx
], cpu
);
115 #define _Q_LOCKED_PENDING_MASK (_Q_LOCKED_MASK | _Q_PENDING_MASK)
118 * By using the whole 2nd least significant byte for the pending bit, we
119 * can allow better optimization of the lock acquisition for the pending
122 * This internal structure is also used by the set_locked function which
123 * is not restricted to _Q_PENDING_BITS == 8.
128 #ifdef __LITTLE_ENDIAN
151 #if _Q_PENDING_BITS == 8
153 * clear_pending_set_locked - take ownership and clear the pending bit.
154 * @lock: Pointer to queued spinlock structure
158 * Lock stealing is not allowed if this function is used.
160 static __always_inline
void clear_pending_set_locked(struct qspinlock
*lock
)
162 struct __qspinlock
*l
= (void *)lock
;
164 WRITE_ONCE(l
->locked_pending
, _Q_LOCKED_VAL
);
168 * xchg_tail - Put in the new queue tail code word & retrieve previous one
169 * @lock : Pointer to queued spinlock structure
170 * @tail : The new queue tail code word
171 * Return: The previous queue tail code word
175 * p,*,* -> n,*,* ; prev = xchg(lock, node)
177 static __always_inline u32
xchg_tail(struct qspinlock
*lock
, u32 tail
)
179 struct __qspinlock
*l
= (void *)lock
;
182 * Use release semantics to make sure that the MCS node is properly
183 * initialized before changing the tail code.
185 return (u32
)xchg_release(&l
->tail
,
186 tail
>> _Q_TAIL_OFFSET
) << _Q_TAIL_OFFSET
;
189 #else /* _Q_PENDING_BITS == 8 */
192 * clear_pending_set_locked - take ownership and clear the pending bit.
193 * @lock: Pointer to queued spinlock structure
197 static __always_inline
void clear_pending_set_locked(struct qspinlock
*lock
)
199 atomic_add(-_Q_PENDING_VAL
+ _Q_LOCKED_VAL
, &lock
->val
);
203 * xchg_tail - Put in the new queue tail code word & retrieve previous one
204 * @lock : Pointer to queued spinlock structure
205 * @tail : The new queue tail code word
206 * Return: The previous queue tail code word
210 * p,*,* -> n,*,* ; prev = xchg(lock, node)
212 static __always_inline u32
xchg_tail(struct qspinlock
*lock
, u32 tail
)
214 u32 old
, new, val
= atomic_read(&lock
->val
);
217 new = (val
& _Q_LOCKED_PENDING_MASK
) | tail
;
219 * Use release semantics to make sure that the MCS node is
220 * properly initialized before changing the tail code.
222 old
= atomic_cmpxchg_release(&lock
->val
, val
, new);
230 #endif /* _Q_PENDING_BITS == 8 */
233 * set_locked - Set the lock bit and own the lock
234 * @lock: Pointer to queued spinlock structure
238 static __always_inline
void set_locked(struct qspinlock
*lock
)
240 struct __qspinlock
*l
= (void *)lock
;
242 WRITE_ONCE(l
->locked
, _Q_LOCKED_VAL
);
247 * Generate the native code for queued_spin_unlock_slowpath(); provide NOPs for
248 * all the PV callbacks.
251 static __always_inline
void __pv_init_node(struct mcs_spinlock
*node
) { }
252 static __always_inline
void __pv_wait_node(struct mcs_spinlock
*node
,
253 struct mcs_spinlock
*prev
) { }
254 static __always_inline
void __pv_kick_node(struct qspinlock
*lock
,
255 struct mcs_spinlock
*node
) { }
256 static __always_inline u32
__pv_wait_head_or_lock(struct qspinlock
*lock
,
257 struct mcs_spinlock
*node
)
260 #define pv_enabled() false
262 #define pv_init_node __pv_init_node
263 #define pv_wait_node __pv_wait_node
264 #define pv_kick_node __pv_kick_node
265 #define pv_wait_head_or_lock __pv_wait_head_or_lock
267 #ifdef CONFIG_PARAVIRT_SPINLOCKS
268 #define queued_spin_lock_slowpath native_queued_spin_lock_slowpath
272 * Various notes on spin_is_locked() and spin_unlock_wait(), which are
273 * 'interesting' functions:
275 * PROBLEM: some architectures have an interesting issue with atomic ACQUIRE
276 * operations in that the ACQUIRE applies to the LOAD _not_ the STORE (ARM64,
277 * PPC). Also qspinlock has a similar issue per construction, the setting of
278 * the locked byte can be unordered acquiring the lock proper.
280 * This gets to be 'interesting' in the following cases, where the /should/s
281 * end up false because of this issue.
286 * So the spin_is_locked() correctness issue comes from something like:
290 * global_lock(); local_lock(i)
291 * spin_lock(&G) spin_lock(&L[i])
292 * for (i) if (!spin_is_locked(&G)) {
293 * spin_unlock_wait(&L[i]); smp_acquire__after_ctrl_dep();
298 * Where it is important CPU1 sees G locked or CPU0 sees L[i] locked such
299 * that there is exclusion between the two critical sections.
301 * The load from spin_is_locked(&G) /should/ be constrained by the ACQUIRE from
302 * spin_lock(&L[i]), and similarly the load(s) from spin_unlock_wait(&L[i])
303 * /should/ be constrained by the ACQUIRE from spin_lock(&G).
305 * Similarly, later stuff is constrained by the ACQUIRE from CTRL+RMB.
310 * For spin_unlock_wait() there is a second correctness issue, namely:
315 * smp_mb(); spin_lock(&l)
316 * spin_unlock_wait(&l); if (!flag)
317 * // add to lockless list
319 * // iterate lockless list
321 * Which wants to ensure that CPU1 will stop adding bits to the list and CPU0
322 * will observe the last entry on the list (if spin_unlock_wait() had ACQUIRE
325 * Where flag /should/ be ordered against the locked store of l.
329 * queued_spin_lock_slowpath() can (load-)ACQUIRE the lock before
330 * issuing an _unordered_ store to set _Q_LOCKED_VAL.
332 * This means that the store can be delayed, but no later than the
333 * store-release from the unlock. This means that simply observing
334 * _Q_LOCKED_VAL is not sufficient to determine if the lock is acquired.
336 * There are two paths that can issue the unordered store:
338 * (1) clear_pending_set_locked(): *,1,0 -> *,0,1
340 * (2) set_locked(): t,0,0 -> t,0,1 ; t != 0
341 * atomic_cmpxchg_relaxed(): t,0,0 -> 0,0,1
343 * However, in both cases we have other !0 state we've set before to queue
346 * For (1) we have the atomic_cmpxchg_acquire() that set _Q_PENDING_VAL, our
347 * load is constrained by that ACQUIRE to not pass before that, and thus must
350 * For (2) we have a more intersting scenario. We enqueue ourselves using
351 * xchg_tail(), which ends up being a RELEASE. This in itself is not
352 * sufficient, however that is followed by an smp_cond_acquire() on the same
353 * word, giving a RELEASE->ACQUIRE ordering. This again constrains our load and
354 * guarantees we must observe that store.
356 * Therefore both cases have other !0 state that is observable before the
357 * unordered locked byte store comes through. This means we can use that to
358 * wait for the lock store, and then wait for an unlock.
360 #ifndef queued_spin_unlock_wait
361 void queued_spin_unlock_wait(struct qspinlock
*lock
)
366 val
= atomic_read(&lock
->val
);
368 if (!val
) /* not locked, we're done */
371 if (val
& _Q_LOCKED_MASK
) /* locked, go wait for unlock */
374 /* not locked, but pending, wait until we observe the lock */
378 /* any unlock is good */
379 while (atomic_read(&lock
->val
) & _Q_LOCKED_MASK
)
383 smp_acquire__after_ctrl_dep();
385 EXPORT_SYMBOL(queued_spin_unlock_wait
);
388 #endif /* _GEN_PV_LOCK_SLOWPATH */
391 * queued_spin_lock_slowpath - acquire the queued spinlock
392 * @lock: Pointer to queued spinlock structure
393 * @val: Current value of the queued spinlock 32-bit word
395 * (queue tail, pending bit, lock value)
397 * fast : slow : unlock
399 * uncontended (0,0,0) -:--> (0,0,1) ------------------------------:--> (*,*,0)
400 * : | ^--------.------. / :
402 * pending : (0,1,1) +--> (0,1,0) \ | :
405 * uncontended : (n,x,y) +--> (n,0,0) --' | :
408 * contended : (*,x,y) +--> (*,0,0) ---> (*,0,1) -' :
411 void queued_spin_lock_slowpath(struct qspinlock
*lock
, u32 val
)
413 struct mcs_spinlock
*prev
, *next
, *node
;
417 BUILD_BUG_ON(CONFIG_NR_CPUS
>= (1U << _Q_TAIL_CPU_BITS
));
422 if (virt_spin_lock(lock
))
426 * wait for in-progress pending->locked hand-overs
430 if (val
== _Q_PENDING_VAL
) {
431 while ((val
= atomic_read(&lock
->val
)) == _Q_PENDING_VAL
)
438 * 0,0,0 -> 0,0,1 ; trylock
439 * 0,0,1 -> 0,1,1 ; pending
443 * If we observe any contention; queue.
445 if (val
& ~_Q_LOCKED_MASK
)
450 new |= _Q_PENDING_VAL
;
453 * Acquire semantic is required here as the function may
454 * return immediately if the lock was free.
456 old
= atomic_cmpxchg_acquire(&lock
->val
, val
, new);
466 if (new == _Q_LOCKED_VAL
)
470 * we're pending, wait for the owner to go away.
474 * this wait loop must be a load-acquire such that we match the
475 * store-release that clears the locked bit and create lock
476 * sequentiality; this is because not all clear_pending_set_locked()
477 * implementations imply full barriers.
479 smp_cond_load_acquire(&lock
->val
.counter
, !(VAL
& _Q_LOCKED_MASK
));
482 * take ownership and clear the pending bit.
486 clear_pending_set_locked(lock
);
490 * End of pending bit optimistic spinning and beginning of MCS
494 node
= this_cpu_ptr(&mcs_nodes
[0]);
496 tail
= encode_tail(smp_processor_id(), idx
);
504 * We touched a (possibly) cold cacheline in the per-cpu queue node;
505 * attempt the trylock once more in the hope someone let go while we
508 if (queued_spin_trylock(lock
))
512 * We have already touched the queueing cacheline; don't bother with
517 * RELEASE, such that the stores to @node must be complete.
519 old
= xchg_tail(lock
, tail
);
523 * if there was a previous node; link it and wait until reaching the
524 * head of the waitqueue.
526 if (old
& _Q_TAIL_MASK
) {
527 prev
= decode_tail(old
);
529 * The above xchg_tail() is also a load of @lock which generates,
530 * through decode_tail(), a pointer.
532 * The address dependency matches the RELEASE of xchg_tail()
533 * such that the access to @prev must happen after.
535 smp_read_barrier_depends();
537 WRITE_ONCE(prev
->next
, node
);
539 pv_wait_node(node
, prev
);
540 arch_mcs_spin_lock_contended(&node
->locked
);
543 * While waiting for the MCS lock, the next pointer may have
544 * been set by another lock waiter. We optimistically load
545 * the next pointer & prefetch the cacheline for writing
546 * to reduce latency in the upcoming MCS unlock operation.
548 next
= READ_ONCE(node
->next
);
554 * we're at the head of the waitqueue, wait for the owner & pending to
559 * this wait loop must use a load-acquire such that we match the
560 * store-release that clears the locked bit and create lock
561 * sequentiality; this is because the set_locked() function below
562 * does not imply a full barrier.
564 * The PV pv_wait_head_or_lock function, if active, will acquire
565 * the lock and return a non-zero value. So we have to skip the
566 * smp_cond_load_acquire() call. As the next PV queue head hasn't been
567 * designated yet, there is no way for the locked value to become
568 * _Q_SLOW_VAL. So both the set_locked() and the
569 * atomic_cmpxchg_relaxed() calls will be safe.
571 * If PV isn't active, 0 will be returned instead.
574 if ((val
= pv_wait_head_or_lock(lock
, node
)))
577 val
= smp_cond_load_acquire(&lock
->val
.counter
, !(VAL
& _Q_LOCKED_PENDING_MASK
));
583 * n,0,0 -> 0,0,1 : lock, uncontended
584 * *,0,0 -> *,0,1 : lock, contended
586 * If the queue head is the only one in the queue (lock value == tail),
587 * clear the tail code and grab the lock. Otherwise, we only need
591 /* In the PV case we might already have _Q_LOCKED_VAL set */
592 if ((val
& _Q_TAIL_MASK
) != tail
) {
597 * The smp_cond_load_acquire() call above has provided the
598 * necessary acquire semantics required for locking. At most
599 * two iterations of this loop may be ran.
601 old
= atomic_cmpxchg_relaxed(&lock
->val
, val
, _Q_LOCKED_VAL
);
603 goto release
; /* No contention */
609 * contended path; wait for next if not observed yet, release.
612 while (!(next
= READ_ONCE(node
->next
)))
616 arch_mcs_spin_unlock_contended(&next
->locked
);
617 pv_kick_node(lock
, next
);
623 __this_cpu_dec(mcs_nodes
[0].count
);
625 EXPORT_SYMBOL(queued_spin_lock_slowpath
);
628 * Generate the paravirt code for queued_spin_unlock_slowpath().
630 #if !defined(_GEN_PV_LOCK_SLOWPATH) && defined(CONFIG_PARAVIRT_SPINLOCKS)
631 #define _GEN_PV_LOCK_SLOWPATH
634 #define pv_enabled() true
639 #undef pv_wait_head_or_lock
641 #undef queued_spin_lock_slowpath
642 #define queued_spin_lock_slowpath __pv_queued_spin_lock_slowpath
644 #include "qspinlock_paravirt.h"
645 #include "qspinlock.c"