]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - fs/btrfs/locking.c
Merge tag 'for-5.4/io_uring-2019-09-24' of git://git.kernel.dk/linux-block
[mirror_ubuntu-jammy-kernel.git] / fs / btrfs / locking.c
CommitLineData
c1d7c514 1// SPDX-License-Identifier: GPL-2.0
925baedd
CM
2/*
3 * Copyright (C) 2008 Oracle. All rights reserved.
925baedd 4 */
c1d7c514 5
925baedd 6#include <linux/sched.h>
925baedd
CM
7#include <linux/pagemap.h>
8#include <linux/spinlock.h>
9#include <linux/page-flags.h>
4881ee5a 10#include <asm/bug.h>
602cbe91 11#include "misc.h"
925baedd
CM
12#include "ctree.h"
13#include "extent_io.h"
14#include "locking.h"
15
e4e9fd0f
DS
16#ifdef CONFIG_BTRFS_DEBUG
17static void btrfs_assert_spinning_writers_get(struct extent_buffer *eb)
18{
f3dc24c5
DS
19 WARN_ON(eb->spinning_writers);
20 eb->spinning_writers++;
e4e9fd0f
DS
21}
22
23static void btrfs_assert_spinning_writers_put(struct extent_buffer *eb)
24{
f3dc24c5
DS
25 WARN_ON(eb->spinning_writers != 1);
26 eb->spinning_writers--;
e4e9fd0f
DS
27}
28
29static void btrfs_assert_no_spinning_writers(struct extent_buffer *eb)
30{
f3dc24c5 31 WARN_ON(eb->spinning_writers);
e4e9fd0f
DS
32}
33
225948de
DS
34static void btrfs_assert_spinning_readers_get(struct extent_buffer *eb)
35{
36 atomic_inc(&eb->spinning_readers);
37}
38
39static void btrfs_assert_spinning_readers_put(struct extent_buffer *eb)
40{
41 WARN_ON(atomic_read(&eb->spinning_readers) == 0);
42 atomic_dec(&eb->spinning_readers);
43}
44
58a2ddae
DS
45static void btrfs_assert_tree_read_locks_get(struct extent_buffer *eb)
46{
47 atomic_inc(&eb->read_locks);
48}
49
50static void btrfs_assert_tree_read_locks_put(struct extent_buffer *eb)
51{
52 atomic_dec(&eb->read_locks);
53}
54
55static void btrfs_assert_tree_read_locked(struct extent_buffer *eb)
56{
57 BUG_ON(!atomic_read(&eb->read_locks));
58}
59
e3f15388
DS
60static void btrfs_assert_tree_write_locks_get(struct extent_buffer *eb)
61{
00801ae4 62 eb->write_locks++;
e3f15388
DS
63}
64
65static void btrfs_assert_tree_write_locks_put(struct extent_buffer *eb)
66{
00801ae4 67 eb->write_locks--;
e3f15388
DS
68}
69
70void btrfs_assert_tree_locked(struct extent_buffer *eb)
71{
00801ae4 72 BUG_ON(!eb->write_locks);
e3f15388
DS
73}
74
e4e9fd0f
DS
75#else
76static void btrfs_assert_spinning_writers_get(struct extent_buffer *eb) { }
77static void btrfs_assert_spinning_writers_put(struct extent_buffer *eb) { }
78static void btrfs_assert_no_spinning_writers(struct extent_buffer *eb) { }
225948de
DS
79static void btrfs_assert_spinning_readers_put(struct extent_buffer *eb) { }
80static void btrfs_assert_spinning_readers_get(struct extent_buffer *eb) { }
58a2ddae
DS
81static void btrfs_assert_tree_read_locked(struct extent_buffer *eb) { }
82static void btrfs_assert_tree_read_locks_get(struct extent_buffer *eb) { }
83static void btrfs_assert_tree_read_locks_put(struct extent_buffer *eb) { }
e3f15388
DS
84void btrfs_assert_tree_locked(struct extent_buffer *eb) { }
85static void btrfs_assert_tree_write_locks_get(struct extent_buffer *eb) { }
86static void btrfs_assert_tree_write_locks_put(struct extent_buffer *eb) { }
e4e9fd0f
DS
87#endif
88
b95be2d9
DS
89void btrfs_set_lock_blocking_read(struct extent_buffer *eb)
90{
31aab402 91 trace_btrfs_set_lock_blocking_read(eb);
b95be2d9
DS
92 /*
93 * No lock is required. The lock owner may change if we have a read
94 * lock, but it won't change to or away from us. If we have the write
95 * lock, we are the owner and it'll never change.
96 */
97 if (eb->lock_nested && current->pid == eb->lock_owner)
98 return;
99 btrfs_assert_tree_read_locked(eb);
100 atomic_inc(&eb->blocking_readers);
afd495a8 101 btrfs_assert_spinning_readers_put(eb);
b95be2d9
DS
102 read_unlock(&eb->lock);
103}
104
105void btrfs_set_lock_blocking_write(struct extent_buffer *eb)
925baedd 106{
31aab402 107 trace_btrfs_set_lock_blocking_write(eb);
ea4ebde0 108 /*
b95be2d9
DS
109 * No lock is required. The lock owner may change if we have a read
110 * lock, but it won't change to or away from us. If we have the write
111 * lock, we are the owner and it'll never change.
ea4ebde0
CM
112 */
113 if (eb->lock_nested && current->pid == eb->lock_owner)
114 return;
06297d8c 115 if (eb->blocking_writers == 0) {
843ccf9f 116 btrfs_assert_spinning_writers_put(eb);
b95be2d9 117 btrfs_assert_tree_locked(eb);
06297d8c 118 eb->blocking_writers++;
b95be2d9 119 write_unlock(&eb->lock);
b4ce94de 120 }
b4ce94de 121}
f9efa9c7 122
b4ce94de 123/*
bd681513
CM
124 * take a spinning read lock. This will wait for any blocking
125 * writers
b4ce94de 126 */
bd681513 127void btrfs_tree_read_lock(struct extent_buffer *eb)
b4ce94de 128{
34e73cc9
QW
129 u64 start_ns = 0;
130
131 if (trace_btrfs_tree_read_lock_enabled())
132 start_ns = ktime_get_ns();
bd681513 133again:
5b25f70f 134 read_lock(&eb->lock);
06297d8c
DS
135 BUG_ON(eb->blocking_writers == 0 &&
136 current->pid == eb->lock_owner);
137 if (eb->blocking_writers && current->pid == eb->lock_owner) {
5b25f70f
AJ
138 /*
139 * This extent is already write-locked by our thread. We allow
140 * an additional read lock to be added because it's for the same
141 * thread. btrfs_find_all_roots() depends on this as it may be
142 * called on a partly (write-)locked tree.
143 */
144 BUG_ON(eb->lock_nested);
ed1b4ed7 145 eb->lock_nested = true;
5b25f70f 146 read_unlock(&eb->lock);
34e73cc9 147 trace_btrfs_tree_read_lock(eb, start_ns);
5b25f70f
AJ
148 return;
149 }
06297d8c 150 if (eb->blocking_writers) {
bd681513 151 read_unlock(&eb->lock);
39f9d028 152 wait_event(eb->write_lock_wq,
06297d8c 153 eb->blocking_writers == 0);
bd681513 154 goto again;
b4ce94de 155 }
5c9c799a 156 btrfs_assert_tree_read_locks_get(eb);
afd495a8 157 btrfs_assert_spinning_readers_get(eb);
34e73cc9 158 trace_btrfs_tree_read_lock(eb, start_ns);
b4ce94de
CM
159}
160
f82c458a
CM
161/*
162 * take a spinning read lock.
163 * returns 1 if we get the read lock and 0 if we don't
164 * this won't wait for blocking writers
165 */
166int btrfs_tree_read_lock_atomic(struct extent_buffer *eb)
167{
06297d8c 168 if (eb->blocking_writers)
f82c458a
CM
169 return 0;
170
171 read_lock(&eb->lock);
06297d8c 172 if (eb->blocking_writers) {
f82c458a
CM
173 read_unlock(&eb->lock);
174 return 0;
175 }
5c9c799a 176 btrfs_assert_tree_read_locks_get(eb);
afd495a8 177 btrfs_assert_spinning_readers_get(eb);
31aab402 178 trace_btrfs_tree_read_lock_atomic(eb);
f82c458a
CM
179 return 1;
180}
181
b4ce94de 182/*
bd681513
CM
183 * returns 1 if we get the read lock and 0 if we don't
184 * this won't wait for blocking writers
b4ce94de 185 */
bd681513 186int btrfs_try_tree_read_lock(struct extent_buffer *eb)
b4ce94de 187{
06297d8c 188 if (eb->blocking_writers)
bd681513 189 return 0;
b4ce94de 190
ea4ebde0
CM
191 if (!read_trylock(&eb->lock))
192 return 0;
193
06297d8c 194 if (eb->blocking_writers) {
bd681513
CM
195 read_unlock(&eb->lock);
196 return 0;
b9473439 197 }
5c9c799a 198 btrfs_assert_tree_read_locks_get(eb);
afd495a8 199 btrfs_assert_spinning_readers_get(eb);
31aab402 200 trace_btrfs_try_tree_read_lock(eb);
bd681513 201 return 1;
b4ce94de
CM
202}
203
204/*
bd681513
CM
205 * returns 1 if we get the read lock and 0 if we don't
206 * this won't wait for blocking writers or readers
b4ce94de 207 */
bd681513 208int btrfs_try_tree_write_lock(struct extent_buffer *eb)
b4ce94de 209{
06297d8c 210 if (eb->blocking_writers || atomic_read(&eb->blocking_readers))
bd681513 211 return 0;
ea4ebde0 212
f82c458a 213 write_lock(&eb->lock);
06297d8c 214 if (eb->blocking_writers || atomic_read(&eb->blocking_readers)) {
bd681513
CM
215 write_unlock(&eb->lock);
216 return 0;
217 }
c79adfc0 218 btrfs_assert_tree_write_locks_get(eb);
843ccf9f 219 btrfs_assert_spinning_writers_get(eb);
5b25f70f 220 eb->lock_owner = current->pid;
31aab402 221 trace_btrfs_try_tree_write_lock(eb);
b4ce94de
CM
222 return 1;
223}
224
225/*
bd681513
CM
226 * drop a spinning read lock
227 */
228void btrfs_tree_read_unlock(struct extent_buffer *eb)
229{
31aab402 230 trace_btrfs_tree_read_unlock(eb);
ea4ebde0
CM
231 /*
232 * if we're nested, we have the write lock. No new locking
233 * is needed as long as we are the lock owner.
234 * The write unlock will do a barrier for us, and the lock_nested
235 * field only matters to the lock owner.
236 */
237 if (eb->lock_nested && current->pid == eb->lock_owner) {
ed1b4ed7 238 eb->lock_nested = false;
ea4ebde0 239 return;
5b25f70f 240 }
bd681513 241 btrfs_assert_tree_read_locked(eb);
afd495a8 242 btrfs_assert_spinning_readers_put(eb);
5c9c799a 243 btrfs_assert_tree_read_locks_put(eb);
bd681513
CM
244 read_unlock(&eb->lock);
245}
246
247/*
248 * drop a blocking read lock
249 */
250void btrfs_tree_read_unlock_blocking(struct extent_buffer *eb)
251{
31aab402 252 trace_btrfs_tree_read_unlock_blocking(eb);
ea4ebde0
CM
253 /*
254 * if we're nested, we have the write lock. No new locking
255 * is needed as long as we are the lock owner.
256 * The write unlock will do a barrier for us, and the lock_nested
257 * field only matters to the lock owner.
258 */
259 if (eb->lock_nested && current->pid == eb->lock_owner) {
ed1b4ed7 260 eb->lock_nested = false;
ea4ebde0 261 return;
5b25f70f 262 }
bd681513
CM
263 btrfs_assert_tree_read_locked(eb);
264 WARN_ON(atomic_read(&eb->blocking_readers) == 0);
093258e6
DS
265 /* atomic_dec_and_test implies a barrier */
266 if (atomic_dec_and_test(&eb->blocking_readers))
267 cond_wake_up_nomb(&eb->read_lock_wq);
5c9c799a 268 btrfs_assert_tree_read_locks_put(eb);
bd681513
CM
269}
270
271/*
272 * take a spinning write lock. This will wait for both
273 * blocking readers or writers
b4ce94de 274 */
143bede5 275void btrfs_tree_lock(struct extent_buffer *eb)
b4ce94de 276{
34e73cc9
QW
277 u64 start_ns = 0;
278
279 if (trace_btrfs_tree_lock_enabled())
280 start_ns = ktime_get_ns();
281
166f66d0 282 WARN_ON(eb->lock_owner == current->pid);
bd681513
CM
283again:
284 wait_event(eb->read_lock_wq, atomic_read(&eb->blocking_readers) == 0);
06297d8c 285 wait_event(eb->write_lock_wq, eb->blocking_writers == 0);
bd681513 286 write_lock(&eb->lock);
06297d8c 287 if (atomic_read(&eb->blocking_readers) || eb->blocking_writers) {
bd681513 288 write_unlock(&eb->lock);
bd681513
CM
289 goto again;
290 }
843ccf9f 291 btrfs_assert_spinning_writers_get(eb);
c79adfc0 292 btrfs_assert_tree_write_locks_get(eb);
5b25f70f 293 eb->lock_owner = current->pid;
34e73cc9 294 trace_btrfs_tree_lock(eb, start_ns);
925baedd
CM
295}
296
bd681513
CM
297/*
298 * drop a spinning or a blocking write lock.
299 */
143bede5 300void btrfs_tree_unlock(struct extent_buffer *eb)
925baedd 301{
06297d8c 302 int blockers = eb->blocking_writers;
bd681513
CM
303
304 BUG_ON(blockers > 1);
305
306 btrfs_assert_tree_locked(eb);
31aab402 307 trace_btrfs_tree_unlock(eb);
ea4ebde0 308 eb->lock_owner = 0;
c79adfc0 309 btrfs_assert_tree_write_locks_put(eb);
bd681513
CM
310
311 if (blockers) {
843ccf9f 312 btrfs_assert_no_spinning_writers(eb);
06297d8c 313 eb->blocking_writers--;
6e7ca09b
NB
314 /*
315 * We need to order modifying blocking_writers above with
316 * actually waking up the sleepers to ensure they see the
317 * updated value of blocking_writers
318 */
319 cond_wake_up(&eb->write_lock_wq);
bd681513 320 } else {
843ccf9f 321 btrfs_assert_spinning_writers_put(eb);
bd681513
CM
322 write_unlock(&eb->lock);
323 }
925baedd 324}