]>
Commit | Line | Data |
---|---|---|
c942fddf | 1 | /* SPDX-License-Identifier: GPL-2.0-or-later */ |
70af2f8a WL |
2 | /* |
3 | * Queue read/write lock | |
4 | * | |
70af2f8a WL |
5 | * (C) Copyright 2013-2014 Hewlett-Packard Development Company, L.P. |
6 | * | |
7 | * Authors: Waiman Long <waiman.long@hp.com> | |
8 | */ | |
9 | #ifndef __ASM_GENERIC_QRWLOCK_H | |
10 | #define __ASM_GENERIC_QRWLOCK_H | |
11 | ||
12 | #include <linux/atomic.h> | |
13 | #include <asm/barrier.h> | |
14 | #include <asm/processor.h> | |
15 | ||
16 | #include <asm-generic/qrwlock_types.h> | |
d8d0da4e WL |
17 | |
18 | /* Must be included from asm/spinlock.h after defining arch_spin_is_locked. */ | |
70af2f8a WL |
19 | |
20 | /* | |
2db34e8b | 21 | * Writer states & reader shift and bias. |
70af2f8a | 22 | */ |
d1331661 WD |
23 | #define _QW_WAITING 0x100 /* A writer is waiting */ |
24 | #define _QW_LOCKED 0x0ff /* A writer holds the lock */ | |
25 | #define _QW_WMASK 0x1ff /* Writer mask */ | |
26 | #define _QR_SHIFT 9 /* Reader count shift */ | |
70af2f8a WL |
27 | #define _QR_BIAS (1U << _QR_SHIFT) |
28 | ||
29 | /* | |
30 | * External function declarations | |
31 | */ | |
b519b56e | 32 | extern void queued_read_lock_slowpath(struct qrwlock *lock); |
f7d71f20 | 33 | extern void queued_write_lock_slowpath(struct qrwlock *lock); |
70af2f8a | 34 | |
70af2f8a | 35 | /** |
f7d71f20 | 36 | * queued_read_trylock - try to acquire read lock of a queue rwlock |
70af2f8a WL |
37 | * @lock : Pointer to queue rwlock structure |
38 | * Return: 1 if lock acquired, 0 if failed | |
39 | */ | |
f7d71f20 | 40 | static inline int queued_read_trylock(struct qrwlock *lock) |
70af2f8a | 41 | { |
f44ca087 | 42 | int cnts; |
70af2f8a WL |
43 | |
44 | cnts = atomic_read(&lock->cnts); | |
45 | if (likely(!(cnts & _QW_WMASK))) { | |
77e430e3 | 46 | cnts = (u32)atomic_add_return_acquire(_QR_BIAS, &lock->cnts); |
70af2f8a WL |
47 | if (likely(!(cnts & _QW_WMASK))) |
48 | return 1; | |
49 | atomic_sub(_QR_BIAS, &lock->cnts); | |
50 | } | |
51 | return 0; | |
52 | } | |
53 | ||
54 | /** | |
f7d71f20 | 55 | * queued_write_trylock - try to acquire write lock of a queue rwlock |
70af2f8a WL |
56 | * @lock : Pointer to queue rwlock structure |
57 | * Return: 1 if lock acquired, 0 if failed | |
58 | */ | |
f7d71f20 | 59 | static inline int queued_write_trylock(struct qrwlock *lock) |
70af2f8a | 60 | { |
f44ca087 | 61 | int cnts; |
70af2f8a WL |
62 | |
63 | cnts = atomic_read(&lock->cnts); | |
64 | if (unlikely(cnts)) | |
65 | return 0; | |
66 | ||
27df8968 MW |
67 | return likely(atomic_try_cmpxchg_acquire(&lock->cnts, &cnts, |
68 | _QW_LOCKED)); | |
70af2f8a WL |
69 | } |
70 | /** | |
f7d71f20 | 71 | * queued_read_lock - acquire read lock of a queue rwlock |
70af2f8a WL |
72 | * @lock: Pointer to queue rwlock structure |
73 | */ | |
f7d71f20 | 74 | static inline void queued_read_lock(struct qrwlock *lock) |
70af2f8a | 75 | { |
f44ca087 | 76 | int cnts; |
70af2f8a | 77 | |
77e430e3 | 78 | cnts = atomic_add_return_acquire(_QR_BIAS, &lock->cnts); |
70af2f8a WL |
79 | if (likely(!(cnts & _QW_WMASK))) |
80 | return; | |
81 | ||
82 | /* The slowpath will decrement the reader count, if necessary. */ | |
b519b56e | 83 | queued_read_lock_slowpath(lock); |
70af2f8a WL |
84 | } |
85 | ||
86 | /** | |
f7d71f20 | 87 | * queued_write_lock - acquire write lock of a queue rwlock |
70af2f8a WL |
88 | * @lock : Pointer to queue rwlock structure |
89 | */ | |
f7d71f20 | 90 | static inline void queued_write_lock(struct qrwlock *lock) |
70af2f8a | 91 | { |
f44ca087 | 92 | int cnts = 0; |
70af2f8a | 93 | /* Optimize for the unfair lock case where the fair flag is 0. */ |
27df8968 | 94 | if (likely(atomic_try_cmpxchg_acquire(&lock->cnts, &cnts, _QW_LOCKED))) |
70af2f8a WL |
95 | return; |
96 | ||
f7d71f20 | 97 | queued_write_lock_slowpath(lock); |
70af2f8a WL |
98 | } |
99 | ||
100 | /** | |
f7d71f20 | 101 | * queued_read_unlock - release read lock of a queue rwlock |
70af2f8a WL |
102 | * @lock : Pointer to queue rwlock structure |
103 | */ | |
f7d71f20 | 104 | static inline void queued_read_unlock(struct qrwlock *lock) |
70af2f8a WL |
105 | { |
106 | /* | |
107 | * Atomically decrement the reader count | |
108 | */ | |
77e430e3 | 109 | (void)atomic_sub_return_release(_QR_BIAS, &lock->cnts); |
70af2f8a WL |
110 | } |
111 | ||
70af2f8a | 112 | /** |
f7d71f20 | 113 | * queued_write_unlock - release write lock of a queue rwlock |
70af2f8a WL |
114 | * @lock : Pointer to queue rwlock structure |
115 | */ | |
f7d71f20 | 116 | static inline void queued_write_unlock(struct qrwlock *lock) |
70af2f8a | 117 | { |
d1331661 | 118 | smp_store_release(&lock->wlocked, 0); |
70af2f8a | 119 | } |
70af2f8a | 120 | |
26128cb6 BG |
121 | /** |
122 | * queued_rwlock_is_contended - check if the lock is contended | |
123 | * @lock : Pointer to queue rwlock structure | |
124 | * Return: 1 if lock contended, 0 otherwise | |
125 | */ | |
126 | static inline int queued_rwlock_is_contended(struct qrwlock *lock) | |
127 | { | |
128 | return arch_spin_is_locked(&lock->wait_lock); | |
129 | } | |
130 | ||
70af2f8a WL |
131 | /* |
132 | * Remapping rwlock architecture specific functions to the corresponding | |
133 | * queue rwlock functions. | |
134 | */ | |
26128cb6 BG |
135 | #define arch_read_lock(l) queued_read_lock(l) |
136 | #define arch_write_lock(l) queued_write_lock(l) | |
137 | #define arch_read_trylock(l) queued_read_trylock(l) | |
138 | #define arch_write_trylock(l) queued_write_trylock(l) | |
139 | #define arch_read_unlock(l) queued_read_unlock(l) | |
140 | #define arch_write_unlock(l) queued_write_unlock(l) | |
141 | #define arch_rwlock_is_contended(l) queued_rwlock_is_contended(l) | |
70af2f8a WL |
142 | |
143 | #endif /* __ASM_GENERIC_QRWLOCK_H */ |