]>
Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
d73a3397 WL |
2 | #ifndef _ASM_X86_QSPINLOCK_H |
3 | #define _ASM_X86_QSPINLOCK_H | |
4 | ||
9043442b | 5 | #include <linux/jump_label.h> |
2aa79af6 | 6 | #include <asm/cpufeature.h> |
d73a3397 | 7 | #include <asm-generic/qspinlock_types.h> |
f233f7f1 | 8 | #include <asm/paravirt.h> |
301b923f | 9 | #include <asm/rmwcc.h> |
d73a3397 | 10 | |
044dfe72 WD |
11 | #define _Q_PENDING_LOOPS (1 << 9) |
12 | ||
301b923f PZ |
13 | #define queued_fetch_set_pending_acquire queued_fetch_set_pending_acquire |
14 | ||
15 | static __always_inline bool __queued_RMW_btsl(struct qspinlock *lock) | |
16 | { | |
17 | GEN_BINARY_RMWcc(LOCK_PREFIX "btsl", lock->val.counter, | |
18 | "I", _Q_PENDING_OFFSET, "%0", c); | |
19 | } | |
20 | ||
21 | static __always_inline u32 queued_fetch_set_pending_acquire(struct qspinlock *lock) | |
22 | { | |
23 | u32 val = 0; | |
24 | ||
25 | if (__queued_RMW_btsl(lock)) | |
26 | val |= _Q_PENDING_VAL; | |
27 | ||
28 | val |= atomic_read(&lock->val) & ~_Q_PENDING_MASK; | |
29 | ||
30 | return val; | |
31 | } | |
32 | ||
d73a3397 WL |
33 | #define queued_spin_unlock queued_spin_unlock |
34 | /** | |
35 | * queued_spin_unlock - release a queued spinlock | |
36 | * @lock : Pointer to queued spinlock structure | |
37 | * | |
38 | * A smp_store_release() on the least-significant byte. | |
39 | */ | |
f233f7f1 | 40 | static inline void native_queued_spin_unlock(struct qspinlock *lock) |
d73a3397 | 41 | { |
5c58f3ba | 42 | smp_store_release(&lock->locked, 0); |
d73a3397 WL |
43 | } |
44 | ||
f233f7f1 PZI |
45 | #ifdef CONFIG_PARAVIRT_SPINLOCKS |
46 | extern void native_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val); | |
47 | extern void __pv_init_lock_hash(void); | |
48 | extern void __pv_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val); | |
49 | extern void __raw_callee_save___pv_queued_spin_unlock(struct qspinlock *lock); | |
50 | ||
51 | static inline void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val) | |
52 | { | |
53 | pv_queued_spin_lock_slowpath(lock, val); | |
54 | } | |
55 | ||
56 | static inline void queued_spin_unlock(struct qspinlock *lock) | |
57 | { | |
58 | pv_queued_spin_unlock(lock); | |
59 | } | |
3cded417 PZ |
60 | |
61 | #define vcpu_is_preempted vcpu_is_preempted | |
6c62985d | 62 | static inline bool vcpu_is_preempted(long cpu) |
3cded417 PZ |
63 | { |
64 | return pv_vcpu_is_preempted(cpu); | |
65 | } | |
f233f7f1 PZI |
66 | #else |
67 | static inline void queued_spin_unlock(struct qspinlock *lock) | |
68 | { | |
69 | native_queued_spin_unlock(lock); | |
70 | } | |
71 | #endif | |
72 | ||
a6b27785 | 73 | #ifdef CONFIG_PARAVIRT |
9043442b JG |
74 | DECLARE_STATIC_KEY_TRUE(virt_spin_lock_key); |
75 | ||
76 | void native_pv_lock_init(void) __init; | |
77 | ||
43b3f028 | 78 | #define virt_spin_lock virt_spin_lock |
43b3f028 | 79 | static inline bool virt_spin_lock(struct qspinlock *lock) |
2aa79af6 | 80 | { |
9043442b | 81 | if (!static_branch_likely(&virt_spin_lock_key)) |
2aa79af6 PZI |
82 | return false; |
83 | ||
43b3f028 PZ |
84 | /* |
85 | * On hypervisors without PARAVIRT_SPINLOCKS support we fall | |
86 | * back to a Test-and-Set spinlock, because fair locks have | |
87 | * horrible lock 'holder' preemption issues. | |
88 | */ | |
89 | ||
90 | do { | |
91 | while (atomic_read(&lock->val) != 0) | |
92 | cpu_relax(); | |
93 | } while (atomic_cmpxchg(&lock->val, 0, _Q_LOCKED_VAL) != 0); | |
2aa79af6 PZI |
94 | |
95 | return true; | |
96 | } | |
9043442b JG |
97 | #else |
98 | static inline void native_pv_lock_init(void) | |
99 | { | |
100 | } | |
a6b27785 | 101 | #endif /* CONFIG_PARAVIRT */ |
2aa79af6 | 102 | |
d73a3397 WL |
103 | #include <asm-generic/qspinlock.h> |
104 | ||
105 | #endif /* _ASM_X86_QSPINLOCK_H */ |