]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - arch/x86/include/asm/qspinlock.h
x86/cpu/bugs: Use __initconst for 'const' init data
[mirror_ubuntu-bionic-kernel.git] / arch / x86 / include / asm / qspinlock.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_X86_QSPINLOCK_H
3 #define _ASM_X86_QSPINLOCK_H
4
5 #include <linux/jump_label.h>
6 #include <asm/cpufeature.h>
7 #include <asm-generic/qspinlock_types.h>
8 #include <asm/paravirt.h>
9 #include <asm/rmwcc.h>
10
11 #define _Q_PENDING_LOOPS (1 << 9)
12
13 #define queued_fetch_set_pending_acquire queued_fetch_set_pending_acquire
14
15 static __always_inline bool __queued_RMW_btsl(struct qspinlock *lock)
16 {
17 GEN_BINARY_RMWcc(LOCK_PREFIX "btsl", lock->val.counter,
18 "I", _Q_PENDING_OFFSET, "%0", c);
19 }
20
21 static __always_inline u32 queued_fetch_set_pending_acquire(struct qspinlock *lock)
22 {
23 u32 val = 0;
24
25 if (__queued_RMW_btsl(lock))
26 val |= _Q_PENDING_VAL;
27
28 val |= atomic_read(&lock->val) & ~_Q_PENDING_MASK;
29
30 return val;
31 }
32
33 #define queued_spin_unlock queued_spin_unlock
34 /**
35 * queued_spin_unlock - release a queued spinlock
36 * @lock : Pointer to queued spinlock structure
37 *
38 * A smp_store_release() on the least-significant byte.
39 */
40 static inline void native_queued_spin_unlock(struct qspinlock *lock)
41 {
42 smp_store_release(&lock->locked, 0);
43 }
44
45 #ifdef CONFIG_PARAVIRT_SPINLOCKS
46 extern void native_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
47 extern void __pv_init_lock_hash(void);
48 extern void __pv_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
49 extern void __raw_callee_save___pv_queued_spin_unlock(struct qspinlock *lock);
50
51 static inline void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val)
52 {
53 pv_queued_spin_lock_slowpath(lock, val);
54 }
55
56 static inline void queued_spin_unlock(struct qspinlock *lock)
57 {
58 pv_queued_spin_unlock(lock);
59 }
60
61 #define vcpu_is_preempted vcpu_is_preempted
62 static inline bool vcpu_is_preempted(long cpu)
63 {
64 return pv_vcpu_is_preempted(cpu);
65 }
66 #else
67 static inline void queued_spin_unlock(struct qspinlock *lock)
68 {
69 native_queued_spin_unlock(lock);
70 }
71 #endif
72
73 #ifdef CONFIG_PARAVIRT
74 DECLARE_STATIC_KEY_TRUE(virt_spin_lock_key);
75
76 void native_pv_lock_init(void) __init;
77
78 #define virt_spin_lock virt_spin_lock
79 static inline bool virt_spin_lock(struct qspinlock *lock)
80 {
81 if (!static_branch_likely(&virt_spin_lock_key))
82 return false;
83
84 /*
85 * On hypervisors without PARAVIRT_SPINLOCKS support we fall
86 * back to a Test-and-Set spinlock, because fair locks have
87 * horrible lock 'holder' preemption issues.
88 */
89
90 do {
91 while (atomic_read(&lock->val) != 0)
92 cpu_relax();
93 } while (atomic_cmpxchg(&lock->val, 0, _Q_LOCKED_VAL) != 0);
94
95 return true;
96 }
97 #else
98 static inline void native_pv_lock_init(void)
99 {
100 }
101 #endif /* CONFIG_PARAVIRT */
102
103 #include <asm-generic/qspinlock.h>
104
105 #endif /* _ASM_X86_QSPINLOCK_H */