]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/blame - fs/btrfs/locking.c
UBUNTU: Ubuntu-5.3.0-29.31
[mirror_ubuntu-eoan-kernel.git] / fs / btrfs / locking.c
CommitLineData
c1d7c514 1// SPDX-License-Identifier: GPL-2.0
925baedd
CM
2/*
3 * Copyright (C) 2008 Oracle. All rights reserved.
925baedd 4 */
c1d7c514 5
925baedd 6#include <linux/sched.h>
925baedd
CM
7#include <linux/pagemap.h>
8#include <linux/spinlock.h>
9#include <linux/page-flags.h>
4881ee5a 10#include <asm/bug.h>
925baedd
CM
11#include "ctree.h"
12#include "extent_io.h"
13#include "locking.h"
14
e4e9fd0f
DS
15#ifdef CONFIG_BTRFS_DEBUG
16static void btrfs_assert_spinning_writers_get(struct extent_buffer *eb)
17{
f3dc24c5
DS
18 WARN_ON(eb->spinning_writers);
19 eb->spinning_writers++;
e4e9fd0f
DS
20}
21
22static void btrfs_assert_spinning_writers_put(struct extent_buffer *eb)
23{
f3dc24c5
DS
24 WARN_ON(eb->spinning_writers != 1);
25 eb->spinning_writers--;
e4e9fd0f
DS
26}
27
28static void btrfs_assert_no_spinning_writers(struct extent_buffer *eb)
29{
f3dc24c5 30 WARN_ON(eb->spinning_writers);
e4e9fd0f
DS
31}
32
225948de
DS
33static void btrfs_assert_spinning_readers_get(struct extent_buffer *eb)
34{
35 atomic_inc(&eb->spinning_readers);
36}
37
38static void btrfs_assert_spinning_readers_put(struct extent_buffer *eb)
39{
40 WARN_ON(atomic_read(&eb->spinning_readers) == 0);
41 atomic_dec(&eb->spinning_readers);
42}
43
58a2ddae
DS
44static void btrfs_assert_tree_read_locks_get(struct extent_buffer *eb)
45{
46 atomic_inc(&eb->read_locks);
47}
48
49static void btrfs_assert_tree_read_locks_put(struct extent_buffer *eb)
50{
51 atomic_dec(&eb->read_locks);
52}
53
54static void btrfs_assert_tree_read_locked(struct extent_buffer *eb)
55{
56 BUG_ON(!atomic_read(&eb->read_locks));
57}
58
e3f15388
DS
59static void btrfs_assert_tree_write_locks_get(struct extent_buffer *eb)
60{
00801ae4 61 eb->write_locks++;
e3f15388
DS
62}
63
64static void btrfs_assert_tree_write_locks_put(struct extent_buffer *eb)
65{
00801ae4 66 eb->write_locks--;
e3f15388
DS
67}
68
69void btrfs_assert_tree_locked(struct extent_buffer *eb)
70{
00801ae4 71 BUG_ON(!eb->write_locks);
e3f15388
DS
72}
73
e4e9fd0f
DS
74#else
75static void btrfs_assert_spinning_writers_get(struct extent_buffer *eb) { }
76static void btrfs_assert_spinning_writers_put(struct extent_buffer *eb) { }
77static void btrfs_assert_no_spinning_writers(struct extent_buffer *eb) { }
225948de
DS
78static void btrfs_assert_spinning_readers_put(struct extent_buffer *eb) { }
79static void btrfs_assert_spinning_readers_get(struct extent_buffer *eb) { }
58a2ddae
DS
80static void btrfs_assert_tree_read_locked(struct extent_buffer *eb) { }
81static void btrfs_assert_tree_read_locks_get(struct extent_buffer *eb) { }
82static void btrfs_assert_tree_read_locks_put(struct extent_buffer *eb) { }
e3f15388
DS
83void btrfs_assert_tree_locked(struct extent_buffer *eb) { }
84static void btrfs_assert_tree_write_locks_get(struct extent_buffer *eb) { }
85static void btrfs_assert_tree_write_locks_put(struct extent_buffer *eb) { }
e4e9fd0f
DS
86#endif
87
b95be2d9
DS
88void btrfs_set_lock_blocking_read(struct extent_buffer *eb)
89{
31aab402 90 trace_btrfs_set_lock_blocking_read(eb);
b95be2d9
DS
91 /*
92 * No lock is required. The lock owner may change if we have a read
93 * lock, but it won't change to or away from us. If we have the write
94 * lock, we are the owner and it'll never change.
95 */
96 if (eb->lock_nested && current->pid == eb->lock_owner)
97 return;
98 btrfs_assert_tree_read_locked(eb);
99 atomic_inc(&eb->blocking_readers);
afd495a8 100 btrfs_assert_spinning_readers_put(eb);
b95be2d9
DS
101 read_unlock(&eb->lock);
102}
103
104void btrfs_set_lock_blocking_write(struct extent_buffer *eb)
925baedd 105{
31aab402 106 trace_btrfs_set_lock_blocking_write(eb);
ea4ebde0 107 /*
b95be2d9
DS
108 * No lock is required. The lock owner may change if we have a read
109 * lock, but it won't change to or away from us. If we have the write
110 * lock, we are the owner and it'll never change.
ea4ebde0
CM
111 */
112 if (eb->lock_nested && current->pid == eb->lock_owner)
113 return;
06297d8c 114 if (eb->blocking_writers == 0) {
843ccf9f 115 btrfs_assert_spinning_writers_put(eb);
b95be2d9 116 btrfs_assert_tree_locked(eb);
06297d8c 117 eb->blocking_writers++;
b95be2d9 118 write_unlock(&eb->lock);
b4ce94de 119 }
b4ce94de 120}
f9efa9c7 121
aa12c027
DS
122void btrfs_clear_lock_blocking_read(struct extent_buffer *eb)
123{
31aab402 124 trace_btrfs_clear_lock_blocking_read(eb);
aa12c027
DS
125 /*
126 * No lock is required. The lock owner may change if we have a read
127 * lock, but it won't change to or away from us. If we have the write
128 * lock, we are the owner and it'll never change.
129 */
130 if (eb->lock_nested && current->pid == eb->lock_owner)
131 return;
132 BUG_ON(atomic_read(&eb->blocking_readers) == 0);
133 read_lock(&eb->lock);
afd495a8 134 btrfs_assert_spinning_readers_get(eb);
aa12c027
DS
135 /* atomic_dec_and_test implies a barrier */
136 if (atomic_dec_and_test(&eb->blocking_readers))
137 cond_wake_up_nomb(&eb->read_lock_wq);
138}
139
140void btrfs_clear_lock_blocking_write(struct extent_buffer *eb)
b4ce94de 141{
31aab402 142 trace_btrfs_clear_lock_blocking_write(eb);
ea4ebde0
CM
143 /*
144 * no lock is required. The lock owner may change if
145 * we have a read lock, but it won't change to or away
146 * from us. If we have the write lock, we are the owner
147 * and it'll never change.
148 */
149 if (eb->lock_nested && current->pid == eb->lock_owner)
150 return;
aa12c027 151 write_lock(&eb->lock);
06297d8c 152 BUG_ON(eb->blocking_writers != 1);
843ccf9f 153 btrfs_assert_spinning_writers_get(eb);
06297d8c
DS
154 if (--eb->blocking_writers == 0)
155 cond_wake_up(&eb->write_lock_wq);
b4ce94de
CM
156}
157
158/*
bd681513
CM
159 * take a spinning read lock. This will wait for any blocking
160 * writers
b4ce94de 161 */
bd681513 162void btrfs_tree_read_lock(struct extent_buffer *eb)
b4ce94de 163{
34e73cc9
QW
164 u64 start_ns = 0;
165
166 if (trace_btrfs_tree_read_lock_enabled())
167 start_ns = ktime_get_ns();
bd681513 168again:
5b25f70f 169 read_lock(&eb->lock);
06297d8c
DS
170 BUG_ON(eb->blocking_writers == 0 &&
171 current->pid == eb->lock_owner);
172 if (eb->blocking_writers && current->pid == eb->lock_owner) {
5b25f70f
AJ
173 /*
174 * This extent is already write-locked by our thread. We allow
175 * an additional read lock to be added because it's for the same
176 * thread. btrfs_find_all_roots() depends on this as it may be
177 * called on a partly (write-)locked tree.
178 */
179 BUG_ON(eb->lock_nested);
ed1b4ed7 180 eb->lock_nested = true;
5b25f70f 181 read_unlock(&eb->lock);
34e73cc9 182 trace_btrfs_tree_read_lock(eb, start_ns);
5b25f70f
AJ
183 return;
184 }
06297d8c 185 if (eb->blocking_writers) {
bd681513 186 read_unlock(&eb->lock);
39f9d028 187 wait_event(eb->write_lock_wq,
06297d8c 188 eb->blocking_writers == 0);
bd681513 189 goto again;
b4ce94de 190 }
5c9c799a 191 btrfs_assert_tree_read_locks_get(eb);
afd495a8 192 btrfs_assert_spinning_readers_get(eb);
34e73cc9 193 trace_btrfs_tree_read_lock(eb, start_ns);
b4ce94de
CM
194}
195
f82c458a
CM
196/*
197 * take a spinning read lock.
198 * returns 1 if we get the read lock and 0 if we don't
199 * this won't wait for blocking writers
200 */
201int btrfs_tree_read_lock_atomic(struct extent_buffer *eb)
202{
06297d8c 203 if (eb->blocking_writers)
f82c458a
CM
204 return 0;
205
206 read_lock(&eb->lock);
06297d8c 207 if (eb->blocking_writers) {
f82c458a
CM
208 read_unlock(&eb->lock);
209 return 0;
210 }
5c9c799a 211 btrfs_assert_tree_read_locks_get(eb);
afd495a8 212 btrfs_assert_spinning_readers_get(eb);
31aab402 213 trace_btrfs_tree_read_lock_atomic(eb);
f82c458a
CM
214 return 1;
215}
216
b4ce94de 217/*
bd681513
CM
218 * returns 1 if we get the read lock and 0 if we don't
219 * this won't wait for blocking writers
b4ce94de 220 */
bd681513 221int btrfs_try_tree_read_lock(struct extent_buffer *eb)
b4ce94de 222{
06297d8c 223 if (eb->blocking_writers)
bd681513 224 return 0;
b4ce94de 225
ea4ebde0
CM
226 if (!read_trylock(&eb->lock))
227 return 0;
228
06297d8c 229 if (eb->blocking_writers) {
bd681513
CM
230 read_unlock(&eb->lock);
231 return 0;
b9473439 232 }
5c9c799a 233 btrfs_assert_tree_read_locks_get(eb);
afd495a8 234 btrfs_assert_spinning_readers_get(eb);
31aab402 235 trace_btrfs_try_tree_read_lock(eb);
bd681513 236 return 1;
b4ce94de
CM
237}
238
239/*
bd681513
CM
240 * returns 1 if we get the read lock and 0 if we don't
241 * this won't wait for blocking writers or readers
b4ce94de 242 */
bd681513 243int btrfs_try_tree_write_lock(struct extent_buffer *eb)
b4ce94de 244{
06297d8c 245 if (eb->blocking_writers || atomic_read(&eb->blocking_readers))
bd681513 246 return 0;
ea4ebde0 247
f82c458a 248 write_lock(&eb->lock);
06297d8c 249 if (eb->blocking_writers || atomic_read(&eb->blocking_readers)) {
bd681513
CM
250 write_unlock(&eb->lock);
251 return 0;
252 }
c79adfc0 253 btrfs_assert_tree_write_locks_get(eb);
843ccf9f 254 btrfs_assert_spinning_writers_get(eb);
5b25f70f 255 eb->lock_owner = current->pid;
31aab402 256 trace_btrfs_try_tree_write_lock(eb);
b4ce94de
CM
257 return 1;
258}
259
260/*
bd681513
CM
261 * drop a spinning read lock
262 */
263void btrfs_tree_read_unlock(struct extent_buffer *eb)
264{
31aab402 265 trace_btrfs_tree_read_unlock(eb);
ea4ebde0
CM
266 /*
267 * if we're nested, we have the write lock. No new locking
268 * is needed as long as we are the lock owner.
269 * The write unlock will do a barrier for us, and the lock_nested
270 * field only matters to the lock owner.
271 */
272 if (eb->lock_nested && current->pid == eb->lock_owner) {
ed1b4ed7 273 eb->lock_nested = false;
ea4ebde0 274 return;
5b25f70f 275 }
bd681513 276 btrfs_assert_tree_read_locked(eb);
afd495a8 277 btrfs_assert_spinning_readers_put(eb);
5c9c799a 278 btrfs_assert_tree_read_locks_put(eb);
bd681513
CM
279 read_unlock(&eb->lock);
280}
281
282/*
283 * drop a blocking read lock
284 */
285void btrfs_tree_read_unlock_blocking(struct extent_buffer *eb)
286{
31aab402 287 trace_btrfs_tree_read_unlock_blocking(eb);
ea4ebde0
CM
288 /*
289 * if we're nested, we have the write lock. No new locking
290 * is needed as long as we are the lock owner.
291 * The write unlock will do a barrier for us, and the lock_nested
292 * field only matters to the lock owner.
293 */
294 if (eb->lock_nested && current->pid == eb->lock_owner) {
ed1b4ed7 295 eb->lock_nested = false;
ea4ebde0 296 return;
5b25f70f 297 }
bd681513
CM
298 btrfs_assert_tree_read_locked(eb);
299 WARN_ON(atomic_read(&eb->blocking_readers) == 0);
093258e6
DS
300 /* atomic_dec_and_test implies a barrier */
301 if (atomic_dec_and_test(&eb->blocking_readers))
302 cond_wake_up_nomb(&eb->read_lock_wq);
5c9c799a 303 btrfs_assert_tree_read_locks_put(eb);
bd681513
CM
304}
305
306/*
307 * take a spinning write lock. This will wait for both
308 * blocking readers or writers
b4ce94de 309 */
143bede5 310void btrfs_tree_lock(struct extent_buffer *eb)
b4ce94de 311{
34e73cc9
QW
312 u64 start_ns = 0;
313
314 if (trace_btrfs_tree_lock_enabled())
315 start_ns = ktime_get_ns();
316
166f66d0 317 WARN_ON(eb->lock_owner == current->pid);
bd681513
CM
318again:
319 wait_event(eb->read_lock_wq, atomic_read(&eb->blocking_readers) == 0);
06297d8c 320 wait_event(eb->write_lock_wq, eb->blocking_writers == 0);
bd681513 321 write_lock(&eb->lock);
06297d8c 322 if (atomic_read(&eb->blocking_readers) || eb->blocking_writers) {
bd681513 323 write_unlock(&eb->lock);
bd681513
CM
324 goto again;
325 }
843ccf9f 326 btrfs_assert_spinning_writers_get(eb);
c79adfc0 327 btrfs_assert_tree_write_locks_get(eb);
5b25f70f 328 eb->lock_owner = current->pid;
34e73cc9 329 trace_btrfs_tree_lock(eb, start_ns);
925baedd
CM
330}
331
bd681513
CM
332/*
333 * drop a spinning or a blocking write lock.
334 */
143bede5 335void btrfs_tree_unlock(struct extent_buffer *eb)
925baedd 336{
06297d8c 337 int blockers = eb->blocking_writers;
bd681513
CM
338
339 BUG_ON(blockers > 1);
340
341 btrfs_assert_tree_locked(eb);
31aab402 342 trace_btrfs_tree_unlock(eb);
ea4ebde0 343 eb->lock_owner = 0;
c79adfc0 344 btrfs_assert_tree_write_locks_put(eb);
bd681513
CM
345
346 if (blockers) {
843ccf9f 347 btrfs_assert_no_spinning_writers(eb);
06297d8c 348 eb->blocking_writers--;
6e7ca09b
NB
349 /*
350 * We need to order modifying blocking_writers above with
351 * actually waking up the sleepers to ensure they see the
352 * updated value of blocking_writers
353 */
354 cond_wake_up(&eb->write_lock_wq);
bd681513 355 } else {
843ccf9f 356 btrfs_assert_spinning_writers_put(eb);
bd681513
CM
357 write_unlock(&eb->lock);
358 }
925baedd 359}