]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - fs/btrfs/locking.c
btrfs: use assertion helpers for spinning writers
[mirror_ubuntu-jammy-kernel.git] / fs / btrfs / locking.c
CommitLineData
c1d7c514 1// SPDX-License-Identifier: GPL-2.0
925baedd
CM
2/*
3 * Copyright (C) 2008 Oracle. All rights reserved.
925baedd 4 */
c1d7c514 5
925baedd 6#include <linux/sched.h>
925baedd
CM
7#include <linux/pagemap.h>
8#include <linux/spinlock.h>
9#include <linux/page-flags.h>
4881ee5a 10#include <asm/bug.h>
925baedd
CM
11#include "ctree.h"
12#include "extent_io.h"
13#include "locking.h"
14
48a3b636 15static void btrfs_assert_tree_read_locked(struct extent_buffer *eb);
d397712b 16
e4e9fd0f
DS
17#ifdef CONFIG_BTRFS_DEBUG
18static void btrfs_assert_spinning_writers_get(struct extent_buffer *eb)
19{
20 WARN_ON(atomic_read(&eb->spinning_writers));
21 atomic_inc(&eb->spinning_writers);
22}
23
24static void btrfs_assert_spinning_writers_put(struct extent_buffer *eb)
25{
26 WARN_ON(atomic_read(&eb->spinning_writers) != 1);
27 atomic_dec(&eb->spinning_writers);
28}
29
30static void btrfs_assert_no_spinning_writers(struct extent_buffer *eb)
31{
32 WARN_ON(atomic_read(&eb->spinning_writers));
33}
34
35#else
36static void btrfs_assert_spinning_writers_get(struct extent_buffer *eb) { }
37static void btrfs_assert_spinning_writers_put(struct extent_buffer *eb) { }
38static void btrfs_assert_no_spinning_writers(struct extent_buffer *eb) { }
39#endif
40
b95be2d9
DS
41void btrfs_set_lock_blocking_read(struct extent_buffer *eb)
42{
43 /*
44 * No lock is required. The lock owner may change if we have a read
45 * lock, but it won't change to or away from us. If we have the write
46 * lock, we are the owner and it'll never change.
47 */
48 if (eb->lock_nested && current->pid == eb->lock_owner)
49 return;
50 btrfs_assert_tree_read_locked(eb);
51 atomic_inc(&eb->blocking_readers);
52 WARN_ON(atomic_read(&eb->spinning_readers) == 0);
53 atomic_dec(&eb->spinning_readers);
54 read_unlock(&eb->lock);
55}
56
57void btrfs_set_lock_blocking_write(struct extent_buffer *eb)
925baedd 58{
ea4ebde0 59 /*
b95be2d9
DS
60 * No lock is required. The lock owner may change if we have a read
61 * lock, but it won't change to or away from us. If we have the write
62 * lock, we are the owner and it'll never change.
ea4ebde0
CM
63 */
64 if (eb->lock_nested && current->pid == eb->lock_owner)
65 return;
b95be2d9 66 if (atomic_read(&eb->blocking_writers) == 0) {
843ccf9f 67 btrfs_assert_spinning_writers_put(eb);
b95be2d9
DS
68 btrfs_assert_tree_locked(eb);
69 atomic_inc(&eb->blocking_writers);
70 write_unlock(&eb->lock);
b4ce94de 71 }
b4ce94de 72}
f9efa9c7 73
aa12c027
DS
74void btrfs_clear_lock_blocking_read(struct extent_buffer *eb)
75{
76 /*
77 * No lock is required. The lock owner may change if we have a read
78 * lock, but it won't change to or away from us. If we have the write
79 * lock, we are the owner and it'll never change.
80 */
81 if (eb->lock_nested && current->pid == eb->lock_owner)
82 return;
83 BUG_ON(atomic_read(&eb->blocking_readers) == 0);
84 read_lock(&eb->lock);
85 atomic_inc(&eb->spinning_readers);
86 /* atomic_dec_and_test implies a barrier */
87 if (atomic_dec_and_test(&eb->blocking_readers))
88 cond_wake_up_nomb(&eb->read_lock_wq);
89}
90
91void btrfs_clear_lock_blocking_write(struct extent_buffer *eb)
b4ce94de 92{
ea4ebde0
CM
93 /*
94 * no lock is required. The lock owner may change if
95 * we have a read lock, but it won't change to or away
96 * from us. If we have the write lock, we are the owner
97 * and it'll never change.
98 */
99 if (eb->lock_nested && current->pid == eb->lock_owner)
100 return;
aa12c027
DS
101 BUG_ON(atomic_read(&eb->blocking_writers) != 1);
102 write_lock(&eb->lock);
843ccf9f 103 btrfs_assert_spinning_writers_get(eb);
aa12c027
DS
104 /* atomic_dec_and_test implies a barrier */
105 if (atomic_dec_and_test(&eb->blocking_writers))
106 cond_wake_up_nomb(&eb->write_lock_wq);
b4ce94de
CM
107}
108
109/*
bd681513
CM
110 * take a spinning read lock. This will wait for any blocking
111 * writers
b4ce94de 112 */
bd681513 113void btrfs_tree_read_lock(struct extent_buffer *eb)
b4ce94de 114{
bd681513 115again:
ea4ebde0
CM
116 BUG_ON(!atomic_read(&eb->blocking_writers) &&
117 current->pid == eb->lock_owner);
118
5b25f70f
AJ
119 read_lock(&eb->lock);
120 if (atomic_read(&eb->blocking_writers) &&
121 current->pid == eb->lock_owner) {
122 /*
123 * This extent is already write-locked by our thread. We allow
124 * an additional read lock to be added because it's for the same
125 * thread. btrfs_find_all_roots() depends on this as it may be
126 * called on a partly (write-)locked tree.
127 */
128 BUG_ON(eb->lock_nested);
129 eb->lock_nested = 1;
130 read_unlock(&eb->lock);
131 return;
132 }
bd681513
CM
133 if (atomic_read(&eb->blocking_writers)) {
134 read_unlock(&eb->lock);
39f9d028
LB
135 wait_event(eb->write_lock_wq,
136 atomic_read(&eb->blocking_writers) == 0);
bd681513 137 goto again;
b4ce94de 138 }
bd681513
CM
139 atomic_inc(&eb->read_locks);
140 atomic_inc(&eb->spinning_readers);
b4ce94de
CM
141}
142
f82c458a
CM
143/*
144 * take a spinning read lock.
145 * returns 1 if we get the read lock and 0 if we don't
146 * this won't wait for blocking writers
147 */
148int btrfs_tree_read_lock_atomic(struct extent_buffer *eb)
149{
150 if (atomic_read(&eb->blocking_writers))
151 return 0;
152
153 read_lock(&eb->lock);
154 if (atomic_read(&eb->blocking_writers)) {
155 read_unlock(&eb->lock);
156 return 0;
157 }
158 atomic_inc(&eb->read_locks);
159 atomic_inc(&eb->spinning_readers);
160 return 1;
161}
162
b4ce94de 163/*
bd681513
CM
164 * returns 1 if we get the read lock and 0 if we don't
165 * this won't wait for blocking writers
b4ce94de 166 */
bd681513 167int btrfs_try_tree_read_lock(struct extent_buffer *eb)
b4ce94de 168{
bd681513
CM
169 if (atomic_read(&eb->blocking_writers))
170 return 0;
b4ce94de 171
ea4ebde0
CM
172 if (!read_trylock(&eb->lock))
173 return 0;
174
bd681513
CM
175 if (atomic_read(&eb->blocking_writers)) {
176 read_unlock(&eb->lock);
177 return 0;
b9473439 178 }
bd681513
CM
179 atomic_inc(&eb->read_locks);
180 atomic_inc(&eb->spinning_readers);
181 return 1;
b4ce94de
CM
182}
183
184/*
bd681513
CM
185 * returns 1 if we get the read lock and 0 if we don't
186 * this won't wait for blocking writers or readers
b4ce94de 187 */
bd681513 188int btrfs_try_tree_write_lock(struct extent_buffer *eb)
b4ce94de 189{
bd681513
CM
190 if (atomic_read(&eb->blocking_writers) ||
191 atomic_read(&eb->blocking_readers))
192 return 0;
ea4ebde0 193
f82c458a 194 write_lock(&eb->lock);
bd681513
CM
195 if (atomic_read(&eb->blocking_writers) ||
196 atomic_read(&eb->blocking_readers)) {
197 write_unlock(&eb->lock);
198 return 0;
199 }
200 atomic_inc(&eb->write_locks);
843ccf9f 201 btrfs_assert_spinning_writers_get(eb);
5b25f70f 202 eb->lock_owner = current->pid;
b4ce94de
CM
203 return 1;
204}
205
206/*
bd681513
CM
207 * drop a spinning read lock
208 */
209void btrfs_tree_read_unlock(struct extent_buffer *eb)
210{
ea4ebde0
CM
211 /*
212 * if we're nested, we have the write lock. No new locking
213 * is needed as long as we are the lock owner.
214 * The write unlock will do a barrier for us, and the lock_nested
215 * field only matters to the lock owner.
216 */
217 if (eb->lock_nested && current->pid == eb->lock_owner) {
218 eb->lock_nested = 0;
219 return;
5b25f70f 220 }
bd681513
CM
221 btrfs_assert_tree_read_locked(eb);
222 WARN_ON(atomic_read(&eb->spinning_readers) == 0);
223 atomic_dec(&eb->spinning_readers);
224 atomic_dec(&eb->read_locks);
225 read_unlock(&eb->lock);
226}
227
228/*
229 * drop a blocking read lock
230 */
231void btrfs_tree_read_unlock_blocking(struct extent_buffer *eb)
232{
ea4ebde0
CM
233 /*
234 * if we're nested, we have the write lock. No new locking
235 * is needed as long as we are the lock owner.
236 * The write unlock will do a barrier for us, and the lock_nested
237 * field only matters to the lock owner.
238 */
239 if (eb->lock_nested && current->pid == eb->lock_owner) {
240 eb->lock_nested = 0;
241 return;
5b25f70f 242 }
bd681513
CM
243 btrfs_assert_tree_read_locked(eb);
244 WARN_ON(atomic_read(&eb->blocking_readers) == 0);
093258e6
DS
245 /* atomic_dec_and_test implies a barrier */
246 if (atomic_dec_and_test(&eb->blocking_readers))
247 cond_wake_up_nomb(&eb->read_lock_wq);
bd681513
CM
248 atomic_dec(&eb->read_locks);
249}
250
251/*
252 * take a spinning write lock. This will wait for both
253 * blocking readers or writers
b4ce94de 254 */
143bede5 255void btrfs_tree_lock(struct extent_buffer *eb)
b4ce94de 256{
166f66d0 257 WARN_ON(eb->lock_owner == current->pid);
bd681513
CM
258again:
259 wait_event(eb->read_lock_wq, atomic_read(&eb->blocking_readers) == 0);
260 wait_event(eb->write_lock_wq, atomic_read(&eb->blocking_writers) == 0);
261 write_lock(&eb->lock);
970e74d9
DS
262 if (atomic_read(&eb->blocking_readers) ||
263 atomic_read(&eb->blocking_writers)) {
bd681513 264 write_unlock(&eb->lock);
bd681513
CM
265 goto again;
266 }
843ccf9f 267 btrfs_assert_spinning_writers_get(eb);
bd681513 268 atomic_inc(&eb->write_locks);
5b25f70f 269 eb->lock_owner = current->pid;
925baedd
CM
270}
271
bd681513
CM
272/*
273 * drop a spinning or a blocking write lock.
274 */
143bede5 275void btrfs_tree_unlock(struct extent_buffer *eb)
925baedd 276{
bd681513
CM
277 int blockers = atomic_read(&eb->blocking_writers);
278
279 BUG_ON(blockers > 1);
280
281 btrfs_assert_tree_locked(eb);
ea4ebde0 282 eb->lock_owner = 0;
bd681513
CM
283 atomic_dec(&eb->write_locks);
284
285 if (blockers) {
843ccf9f 286 btrfs_assert_no_spinning_writers(eb);
bd681513 287 atomic_dec(&eb->blocking_writers);
093258e6 288 /* Use the lighter barrier after atomic */
2e32ef87 289 smp_mb__after_atomic();
093258e6 290 cond_wake_up_nomb(&eb->write_lock_wq);
bd681513 291 } else {
843ccf9f 292 btrfs_assert_spinning_writers_put(eb);
bd681513
CM
293 write_unlock(&eb->lock);
294 }
925baedd
CM
295}
296
b9447ef8 297void btrfs_assert_tree_locked(struct extent_buffer *eb)
925baedd 298{
bd681513
CM
299 BUG_ON(!atomic_read(&eb->write_locks));
300}
301
48a3b636 302static void btrfs_assert_tree_read_locked(struct extent_buffer *eb)
bd681513
CM
303{
304 BUG_ON(!atomic_read(&eb->read_locks));
925baedd 305}