]>
Commit | Line | Data |
---|---|---|
dc17ff8f CM |
1 | /* |
2 | * Copyright (C) 2007 Oracle. All rights reserved. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or | |
5 | * modify it under the terms of the GNU General Public | |
6 | * License v2 as published by the Free Software Foundation. | |
7 | * | |
8 | * This program is distributed in the hope that it will be useful, | |
9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
11 | * General Public License for more details. | |
12 | * | |
13 | * You should have received a copy of the GNU General Public | |
14 | * License along with this program; if not, write to the | |
15 | * Free Software Foundation, Inc., 59 Temple Place - Suite 330, | |
16 | * Boston, MA 021110-1307, USA. | |
17 | */ | |
18 | ||
dc17ff8f | 19 | #include <linux/slab.h> |
d6bfde87 | 20 | #include <linux/blkdev.h> |
f421950f CM |
21 | #include <linux/writeback.h> |
22 | #include <linux/pagevec.h> | |
dc17ff8f CM |
23 | #include "ctree.h" |
24 | #include "transaction.h" | |
25 | #include "btrfs_inode.h" | |
e6dcd2dc | 26 | #include "extent_io.h" |
199c2a9c | 27 | #include "disk-io.h" |
dc17ff8f | 28 | |
6352b91d MX |
29 | static struct kmem_cache *btrfs_ordered_extent_cache; |
30 | ||
e6dcd2dc | 31 | static u64 entry_end(struct btrfs_ordered_extent *entry) |
dc17ff8f | 32 | { |
e6dcd2dc CM |
33 | if (entry->file_offset + entry->len < entry->file_offset) |
34 | return (u64)-1; | |
35 | return entry->file_offset + entry->len; | |
dc17ff8f CM |
36 | } |
37 | ||
d352ac68 CM |
38 | /* returns NULL if the insertion worked, or it returns the node it did find |
39 | * in the tree | |
40 | */ | |
e6dcd2dc CM |
41 | static struct rb_node *tree_insert(struct rb_root *root, u64 file_offset, |
42 | struct rb_node *node) | |
dc17ff8f | 43 | { |
d397712b CM |
44 | struct rb_node **p = &root->rb_node; |
45 | struct rb_node *parent = NULL; | |
e6dcd2dc | 46 | struct btrfs_ordered_extent *entry; |
dc17ff8f | 47 | |
d397712b | 48 | while (*p) { |
dc17ff8f | 49 | parent = *p; |
e6dcd2dc | 50 | entry = rb_entry(parent, struct btrfs_ordered_extent, rb_node); |
dc17ff8f | 51 | |
e6dcd2dc | 52 | if (file_offset < entry->file_offset) |
dc17ff8f | 53 | p = &(*p)->rb_left; |
e6dcd2dc | 54 | else if (file_offset >= entry_end(entry)) |
dc17ff8f CM |
55 | p = &(*p)->rb_right; |
56 | else | |
57 | return parent; | |
58 | } | |
59 | ||
60 | rb_link_node(node, parent, p); | |
61 | rb_insert_color(node, root); | |
62 | return NULL; | |
63 | } | |
64 | ||
43c04fb1 JM |
65 | static void ordered_data_tree_panic(struct inode *inode, int errno, |
66 | u64 offset) | |
67 | { | |
68 | struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); | |
69 | btrfs_panic(fs_info, errno, "Inconsistency in ordered tree at offset " | |
c1c9ff7c | 70 | "%llu\n", offset); |
43c04fb1 JM |
71 | } |
72 | ||
d352ac68 CM |
73 | /* |
74 | * look for a given offset in the tree, and if it can't be found return the | |
75 | * first lesser offset | |
76 | */ | |
e6dcd2dc CM |
77 | static struct rb_node *__tree_search(struct rb_root *root, u64 file_offset, |
78 | struct rb_node **prev_ret) | |
dc17ff8f | 79 | { |
d397712b | 80 | struct rb_node *n = root->rb_node; |
dc17ff8f | 81 | struct rb_node *prev = NULL; |
e6dcd2dc CM |
82 | struct rb_node *test; |
83 | struct btrfs_ordered_extent *entry; | |
84 | struct btrfs_ordered_extent *prev_entry = NULL; | |
dc17ff8f | 85 | |
d397712b | 86 | while (n) { |
e6dcd2dc | 87 | entry = rb_entry(n, struct btrfs_ordered_extent, rb_node); |
dc17ff8f CM |
88 | prev = n; |
89 | prev_entry = entry; | |
dc17ff8f | 90 | |
e6dcd2dc | 91 | if (file_offset < entry->file_offset) |
dc17ff8f | 92 | n = n->rb_left; |
e6dcd2dc | 93 | else if (file_offset >= entry_end(entry)) |
dc17ff8f CM |
94 | n = n->rb_right; |
95 | else | |
96 | return n; | |
97 | } | |
98 | if (!prev_ret) | |
99 | return NULL; | |
100 | ||
d397712b | 101 | while (prev && file_offset >= entry_end(prev_entry)) { |
e6dcd2dc CM |
102 | test = rb_next(prev); |
103 | if (!test) | |
104 | break; | |
105 | prev_entry = rb_entry(test, struct btrfs_ordered_extent, | |
106 | rb_node); | |
107 | if (file_offset < entry_end(prev_entry)) | |
108 | break; | |
109 | ||
110 | prev = test; | |
111 | } | |
112 | if (prev) | |
113 | prev_entry = rb_entry(prev, struct btrfs_ordered_extent, | |
114 | rb_node); | |
d397712b | 115 | while (prev && file_offset < entry_end(prev_entry)) { |
e6dcd2dc CM |
116 | test = rb_prev(prev); |
117 | if (!test) | |
118 | break; | |
119 | prev_entry = rb_entry(test, struct btrfs_ordered_extent, | |
120 | rb_node); | |
121 | prev = test; | |
dc17ff8f CM |
122 | } |
123 | *prev_ret = prev; | |
124 | return NULL; | |
125 | } | |
126 | ||
d352ac68 CM |
127 | /* |
128 | * helper to check if a given offset is inside a given entry | |
129 | */ | |
e6dcd2dc CM |
130 | static int offset_in_entry(struct btrfs_ordered_extent *entry, u64 file_offset) |
131 | { | |
132 | if (file_offset < entry->file_offset || | |
133 | entry->file_offset + entry->len <= file_offset) | |
134 | return 0; | |
135 | return 1; | |
136 | } | |
137 | ||
4b46fce2 JB |
138 | static int range_overlaps(struct btrfs_ordered_extent *entry, u64 file_offset, |
139 | u64 len) | |
140 | { | |
141 | if (file_offset + len <= entry->file_offset || | |
142 | entry->file_offset + entry->len <= file_offset) | |
143 | return 0; | |
144 | return 1; | |
145 | } | |
146 | ||
d352ac68 CM |
147 | /* |
148 | * look find the first ordered struct that has this offset, otherwise | |
149 | * the first one less than this offset | |
150 | */ | |
e6dcd2dc CM |
151 | static inline struct rb_node *tree_search(struct btrfs_ordered_inode_tree *tree, |
152 | u64 file_offset) | |
dc17ff8f | 153 | { |
e6dcd2dc | 154 | struct rb_root *root = &tree->tree; |
c87fb6fd | 155 | struct rb_node *prev = NULL; |
dc17ff8f | 156 | struct rb_node *ret; |
e6dcd2dc CM |
157 | struct btrfs_ordered_extent *entry; |
158 | ||
159 | if (tree->last) { | |
160 | entry = rb_entry(tree->last, struct btrfs_ordered_extent, | |
161 | rb_node); | |
162 | if (offset_in_entry(entry, file_offset)) | |
163 | return tree->last; | |
164 | } | |
165 | ret = __tree_search(root, file_offset, &prev); | |
dc17ff8f | 166 | if (!ret) |
e6dcd2dc CM |
167 | ret = prev; |
168 | if (ret) | |
169 | tree->last = ret; | |
dc17ff8f CM |
170 | return ret; |
171 | } | |
172 | ||
eb84ae03 CM |
173 | /* allocate and add a new ordered_extent into the per-inode tree. |
174 | * file_offset is the logical offset in the file | |
175 | * | |
176 | * start is the disk block number of an extent already reserved in the | |
177 | * extent allocation tree | |
178 | * | |
179 | * len is the length of the extent | |
180 | * | |
eb84ae03 CM |
181 | * The tree is given a single reference on the ordered extent that was |
182 | * inserted. | |
183 | */ | |
4b46fce2 JB |
184 | static int __btrfs_add_ordered_extent(struct inode *inode, u64 file_offset, |
185 | u64 start, u64 len, u64 disk_len, | |
261507a0 | 186 | int type, int dio, int compress_type) |
dc17ff8f | 187 | { |
199c2a9c | 188 | struct btrfs_root *root = BTRFS_I(inode)->root; |
dc17ff8f | 189 | struct btrfs_ordered_inode_tree *tree; |
e6dcd2dc CM |
190 | struct rb_node *node; |
191 | struct btrfs_ordered_extent *entry; | |
dc17ff8f | 192 | |
e6dcd2dc | 193 | tree = &BTRFS_I(inode)->ordered_tree; |
6352b91d | 194 | entry = kmem_cache_zalloc(btrfs_ordered_extent_cache, GFP_NOFS); |
dc17ff8f CM |
195 | if (!entry) |
196 | return -ENOMEM; | |
197 | ||
e6dcd2dc CM |
198 | entry->file_offset = file_offset; |
199 | entry->start = start; | |
200 | entry->len = len; | |
2ab28f32 JB |
201 | if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM) && |
202 | !(type == BTRFS_ORDERED_NOCOW)) | |
203 | entry->csum_bytes_left = disk_len; | |
c8b97818 | 204 | entry->disk_len = disk_len; |
8b62b72b | 205 | entry->bytes_left = len; |
5fd02043 | 206 | entry->inode = igrab(inode); |
261507a0 | 207 | entry->compress_type = compress_type; |
77cef2ec | 208 | entry->truncated_len = (u64)-1; |
d899e052 | 209 | if (type != BTRFS_ORDERED_IO_DONE && type != BTRFS_ORDERED_COMPLETE) |
80ff3856 | 210 | set_bit(type, &entry->flags); |
3eaa2885 | 211 | |
4b46fce2 JB |
212 | if (dio) |
213 | set_bit(BTRFS_ORDERED_DIRECT, &entry->flags); | |
214 | ||
e6dcd2dc CM |
215 | /* one ref for the tree */ |
216 | atomic_set(&entry->refs, 1); | |
217 | init_waitqueue_head(&entry->wait); | |
218 | INIT_LIST_HEAD(&entry->list); | |
3eaa2885 | 219 | INIT_LIST_HEAD(&entry->root_extent_list); |
9afab882 MX |
220 | INIT_LIST_HEAD(&entry->work_list); |
221 | init_completion(&entry->completion); | |
2ab28f32 | 222 | INIT_LIST_HEAD(&entry->log_list); |
dc17ff8f | 223 | |
1abe9b8a | 224 | trace_btrfs_ordered_extent_add(inode, entry); |
225 | ||
5fd02043 | 226 | spin_lock_irq(&tree->lock); |
e6dcd2dc CM |
227 | node = tree_insert(&tree->tree, file_offset, |
228 | &entry->rb_node); | |
43c04fb1 JM |
229 | if (node) |
230 | ordered_data_tree_panic(inode, -EEXIST, file_offset); | |
5fd02043 | 231 | spin_unlock_irq(&tree->lock); |
d397712b | 232 | |
199c2a9c | 233 | spin_lock(&root->ordered_extent_lock); |
3eaa2885 | 234 | list_add_tail(&entry->root_extent_list, |
199c2a9c MX |
235 | &root->ordered_extents); |
236 | root->nr_ordered_extents++; | |
237 | if (root->nr_ordered_extents == 1) { | |
238 | spin_lock(&root->fs_info->ordered_root_lock); | |
239 | BUG_ON(!list_empty(&root->ordered_root)); | |
240 | list_add_tail(&root->ordered_root, | |
241 | &root->fs_info->ordered_roots); | |
242 | spin_unlock(&root->fs_info->ordered_root_lock); | |
243 | } | |
244 | spin_unlock(&root->ordered_extent_lock); | |
3eaa2885 | 245 | |
dc17ff8f CM |
246 | return 0; |
247 | } | |
248 | ||
4b46fce2 JB |
249 | int btrfs_add_ordered_extent(struct inode *inode, u64 file_offset, |
250 | u64 start, u64 len, u64 disk_len, int type) | |
251 | { | |
252 | return __btrfs_add_ordered_extent(inode, file_offset, start, len, | |
261507a0 LZ |
253 | disk_len, type, 0, |
254 | BTRFS_COMPRESS_NONE); | |
4b46fce2 JB |
255 | } |
256 | ||
257 | int btrfs_add_ordered_extent_dio(struct inode *inode, u64 file_offset, | |
258 | u64 start, u64 len, u64 disk_len, int type) | |
259 | { | |
260 | return __btrfs_add_ordered_extent(inode, file_offset, start, len, | |
261507a0 LZ |
261 | disk_len, type, 1, |
262 | BTRFS_COMPRESS_NONE); | |
263 | } | |
264 | ||
265 | int btrfs_add_ordered_extent_compress(struct inode *inode, u64 file_offset, | |
266 | u64 start, u64 len, u64 disk_len, | |
267 | int type, int compress_type) | |
268 | { | |
269 | return __btrfs_add_ordered_extent(inode, file_offset, start, len, | |
270 | disk_len, type, 0, | |
271 | compress_type); | |
4b46fce2 JB |
272 | } |
273 | ||
eb84ae03 CM |
274 | /* |
275 | * Add a struct btrfs_ordered_sum into the list of checksums to be inserted | |
3edf7d33 CM |
276 | * when an ordered extent is finished. If the list covers more than one |
277 | * ordered extent, it is split across multiples. | |
eb84ae03 | 278 | */ |
143bede5 JM |
279 | void btrfs_add_ordered_sum(struct inode *inode, |
280 | struct btrfs_ordered_extent *entry, | |
281 | struct btrfs_ordered_sum *sum) | |
dc17ff8f | 282 | { |
e6dcd2dc | 283 | struct btrfs_ordered_inode_tree *tree; |
dc17ff8f | 284 | |
e6dcd2dc | 285 | tree = &BTRFS_I(inode)->ordered_tree; |
5fd02043 | 286 | spin_lock_irq(&tree->lock); |
e6dcd2dc | 287 | list_add_tail(&sum->list, &entry->list); |
2ab28f32 JB |
288 | WARN_ON(entry->csum_bytes_left < sum->len); |
289 | entry->csum_bytes_left -= sum->len; | |
290 | if (entry->csum_bytes_left == 0) | |
291 | wake_up(&entry->wait); | |
5fd02043 | 292 | spin_unlock_irq(&tree->lock); |
dc17ff8f CM |
293 | } |
294 | ||
163cf09c CM |
295 | /* |
296 | * this is used to account for finished IO across a given range | |
297 | * of the file. The IO may span ordered extents. If | |
298 | * a given ordered_extent is completely done, 1 is returned, otherwise | |
299 | * 0. | |
300 | * | |
301 | * test_and_set_bit on a flag in the struct btrfs_ordered_extent is used | |
302 | * to make sure this function only returns 1 once for a given ordered extent. | |
303 | * | |
304 | * file_offset is updated to one byte past the range that is recorded as | |
305 | * complete. This allows you to walk forward in the file. | |
306 | */ | |
307 | int btrfs_dec_test_first_ordered_pending(struct inode *inode, | |
308 | struct btrfs_ordered_extent **cached, | |
5fd02043 | 309 | u64 *file_offset, u64 io_size, int uptodate) |
163cf09c CM |
310 | { |
311 | struct btrfs_ordered_inode_tree *tree; | |
312 | struct rb_node *node; | |
313 | struct btrfs_ordered_extent *entry = NULL; | |
314 | int ret; | |
5fd02043 | 315 | unsigned long flags; |
163cf09c CM |
316 | u64 dec_end; |
317 | u64 dec_start; | |
318 | u64 to_dec; | |
319 | ||
320 | tree = &BTRFS_I(inode)->ordered_tree; | |
5fd02043 | 321 | spin_lock_irqsave(&tree->lock, flags); |
163cf09c CM |
322 | node = tree_search(tree, *file_offset); |
323 | if (!node) { | |
324 | ret = 1; | |
325 | goto out; | |
326 | } | |
327 | ||
328 | entry = rb_entry(node, struct btrfs_ordered_extent, rb_node); | |
329 | if (!offset_in_entry(entry, *file_offset)) { | |
330 | ret = 1; | |
331 | goto out; | |
332 | } | |
333 | ||
334 | dec_start = max(*file_offset, entry->file_offset); | |
335 | dec_end = min(*file_offset + io_size, entry->file_offset + | |
336 | entry->len); | |
337 | *file_offset = dec_end; | |
338 | if (dec_start > dec_end) { | |
efe120a0 FH |
339 | btrfs_crit(BTRFS_I(inode)->root->fs_info, |
340 | "bad ordering dec_start %llu end %llu", dec_start, dec_end); | |
163cf09c CM |
341 | } |
342 | to_dec = dec_end - dec_start; | |
343 | if (to_dec > entry->bytes_left) { | |
efe120a0 FH |
344 | btrfs_crit(BTRFS_I(inode)->root->fs_info, |
345 | "bad ordered accounting left %llu size %llu", | |
346 | entry->bytes_left, to_dec); | |
163cf09c CM |
347 | } |
348 | entry->bytes_left -= to_dec; | |
5fd02043 JB |
349 | if (!uptodate) |
350 | set_bit(BTRFS_ORDERED_IOERR, &entry->flags); | |
351 | ||
163cf09c CM |
352 | if (entry->bytes_left == 0) |
353 | ret = test_and_set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags); | |
354 | else | |
355 | ret = 1; | |
356 | out: | |
357 | if (!ret && cached && entry) { | |
358 | *cached = entry; | |
359 | atomic_inc(&entry->refs); | |
360 | } | |
5fd02043 | 361 | spin_unlock_irqrestore(&tree->lock, flags); |
163cf09c CM |
362 | return ret == 0; |
363 | } | |
364 | ||
eb84ae03 CM |
365 | /* |
366 | * this is used to account for finished IO across a given range | |
367 | * of the file. The IO should not span ordered extents. If | |
368 | * a given ordered_extent is completely done, 1 is returned, otherwise | |
369 | * 0. | |
370 | * | |
371 | * test_and_set_bit on a flag in the struct btrfs_ordered_extent is used | |
372 | * to make sure this function only returns 1 once for a given ordered extent. | |
373 | */ | |
e6dcd2dc | 374 | int btrfs_dec_test_ordered_pending(struct inode *inode, |
5a1a3df1 | 375 | struct btrfs_ordered_extent **cached, |
5fd02043 | 376 | u64 file_offset, u64 io_size, int uptodate) |
dc17ff8f | 377 | { |
e6dcd2dc | 378 | struct btrfs_ordered_inode_tree *tree; |
dc17ff8f | 379 | struct rb_node *node; |
5a1a3df1 | 380 | struct btrfs_ordered_extent *entry = NULL; |
5fd02043 | 381 | unsigned long flags; |
e6dcd2dc CM |
382 | int ret; |
383 | ||
384 | tree = &BTRFS_I(inode)->ordered_tree; | |
5fd02043 JB |
385 | spin_lock_irqsave(&tree->lock, flags); |
386 | if (cached && *cached) { | |
387 | entry = *cached; | |
388 | goto have_entry; | |
389 | } | |
390 | ||
e6dcd2dc | 391 | node = tree_search(tree, file_offset); |
dc17ff8f | 392 | if (!node) { |
e6dcd2dc CM |
393 | ret = 1; |
394 | goto out; | |
dc17ff8f CM |
395 | } |
396 | ||
e6dcd2dc | 397 | entry = rb_entry(node, struct btrfs_ordered_extent, rb_node); |
5fd02043 | 398 | have_entry: |
e6dcd2dc CM |
399 | if (!offset_in_entry(entry, file_offset)) { |
400 | ret = 1; | |
401 | goto out; | |
dc17ff8f | 402 | } |
e6dcd2dc | 403 | |
8b62b72b | 404 | if (io_size > entry->bytes_left) { |
efe120a0 FH |
405 | btrfs_crit(BTRFS_I(inode)->root->fs_info, |
406 | "bad ordered accounting left %llu size %llu", | |
c1c9ff7c | 407 | entry->bytes_left, io_size); |
8b62b72b CM |
408 | } |
409 | entry->bytes_left -= io_size; | |
5fd02043 JB |
410 | if (!uptodate) |
411 | set_bit(BTRFS_ORDERED_IOERR, &entry->flags); | |
412 | ||
8b62b72b | 413 | if (entry->bytes_left == 0) |
e6dcd2dc | 414 | ret = test_and_set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags); |
8b62b72b CM |
415 | else |
416 | ret = 1; | |
e6dcd2dc | 417 | out: |
5a1a3df1 JB |
418 | if (!ret && cached && entry) { |
419 | *cached = entry; | |
420 | atomic_inc(&entry->refs); | |
421 | } | |
5fd02043 | 422 | spin_unlock_irqrestore(&tree->lock, flags); |
e6dcd2dc CM |
423 | return ret == 0; |
424 | } | |
dc17ff8f | 425 | |
2ab28f32 | 426 | /* Needs to either be called under a log transaction or the log_mutex */ |
827463c4 MX |
427 | void btrfs_get_logged_extents(struct inode *inode, |
428 | struct list_head *logged_list) | |
2ab28f32 JB |
429 | { |
430 | struct btrfs_ordered_inode_tree *tree; | |
431 | struct btrfs_ordered_extent *ordered; | |
432 | struct rb_node *n; | |
2ab28f32 JB |
433 | |
434 | tree = &BTRFS_I(inode)->ordered_tree; | |
435 | spin_lock_irq(&tree->lock); | |
436 | for (n = rb_first(&tree->tree); n; n = rb_next(n)) { | |
437 | ordered = rb_entry(n, struct btrfs_ordered_extent, rb_node); | |
827463c4 MX |
438 | if (!list_empty(&ordered->log_list)) |
439 | continue; | |
440 | list_add_tail(&ordered->log_list, logged_list); | |
441 | atomic_inc(&ordered->refs); | |
2ab28f32 JB |
442 | } |
443 | spin_unlock_irq(&tree->lock); | |
444 | } | |
445 | ||
827463c4 MX |
446 | void btrfs_put_logged_extents(struct list_head *logged_list) |
447 | { | |
448 | struct btrfs_ordered_extent *ordered; | |
449 | ||
450 | while (!list_empty(logged_list)) { | |
451 | ordered = list_first_entry(logged_list, | |
452 | struct btrfs_ordered_extent, | |
453 | log_list); | |
454 | list_del_init(&ordered->log_list); | |
455 | btrfs_put_ordered_extent(ordered); | |
456 | } | |
457 | } | |
458 | ||
459 | void btrfs_submit_logged_extents(struct list_head *logged_list, | |
460 | struct btrfs_root *log) | |
461 | { | |
462 | int index = log->log_transid % 2; | |
463 | ||
464 | spin_lock_irq(&log->log_extents_lock[index]); | |
465 | list_splice_tail(logged_list, &log->logged_list[index]); | |
466 | spin_unlock_irq(&log->log_extents_lock[index]); | |
467 | } | |
468 | ||
2ab28f32 JB |
469 | void btrfs_wait_logged_extents(struct btrfs_root *log, u64 transid) |
470 | { | |
471 | struct btrfs_ordered_extent *ordered; | |
472 | int index = transid % 2; | |
473 | ||
474 | spin_lock_irq(&log->log_extents_lock[index]); | |
475 | while (!list_empty(&log->logged_list[index])) { | |
476 | ordered = list_first_entry(&log->logged_list[index], | |
477 | struct btrfs_ordered_extent, | |
478 | log_list); | |
479 | list_del_init(&ordered->log_list); | |
480 | spin_unlock_irq(&log->log_extents_lock[index]); | |
481 | wait_event(ordered->wait, test_bit(BTRFS_ORDERED_IO_DONE, | |
482 | &ordered->flags)); | |
483 | btrfs_put_ordered_extent(ordered); | |
484 | spin_lock_irq(&log->log_extents_lock[index]); | |
485 | } | |
486 | spin_unlock_irq(&log->log_extents_lock[index]); | |
487 | } | |
488 | ||
489 | void btrfs_free_logged_extents(struct btrfs_root *log, u64 transid) | |
490 | { | |
491 | struct btrfs_ordered_extent *ordered; | |
492 | int index = transid % 2; | |
493 | ||
494 | spin_lock_irq(&log->log_extents_lock[index]); | |
495 | while (!list_empty(&log->logged_list[index])) { | |
496 | ordered = list_first_entry(&log->logged_list[index], | |
497 | struct btrfs_ordered_extent, | |
498 | log_list); | |
499 | list_del_init(&ordered->log_list); | |
500 | spin_unlock_irq(&log->log_extents_lock[index]); | |
501 | btrfs_put_ordered_extent(ordered); | |
502 | spin_lock_irq(&log->log_extents_lock[index]); | |
503 | } | |
504 | spin_unlock_irq(&log->log_extents_lock[index]); | |
505 | } | |
506 | ||
eb84ae03 CM |
507 | /* |
508 | * used to drop a reference on an ordered extent. This will free | |
509 | * the extent if the last reference is dropped | |
510 | */ | |
143bede5 | 511 | void btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry) |
e6dcd2dc | 512 | { |
ba1da2f4 CM |
513 | struct list_head *cur; |
514 | struct btrfs_ordered_sum *sum; | |
515 | ||
1abe9b8a | 516 | trace_btrfs_ordered_extent_put(entry->inode, entry); |
517 | ||
ba1da2f4 | 518 | if (atomic_dec_and_test(&entry->refs)) { |
5fd02043 JB |
519 | if (entry->inode) |
520 | btrfs_add_delayed_iput(entry->inode); | |
d397712b | 521 | while (!list_empty(&entry->list)) { |
ba1da2f4 CM |
522 | cur = entry->list.next; |
523 | sum = list_entry(cur, struct btrfs_ordered_sum, list); | |
524 | list_del(&sum->list); | |
525 | kfree(sum); | |
526 | } | |
6352b91d | 527 | kmem_cache_free(btrfs_ordered_extent_cache, entry); |
ba1da2f4 | 528 | } |
dc17ff8f | 529 | } |
cee36a03 | 530 | |
eb84ae03 CM |
531 | /* |
532 | * remove an ordered extent from the tree. No references are dropped | |
5fd02043 | 533 | * and waiters are woken up. |
eb84ae03 | 534 | */ |
5fd02043 JB |
535 | void btrfs_remove_ordered_extent(struct inode *inode, |
536 | struct btrfs_ordered_extent *entry) | |
cee36a03 | 537 | { |
e6dcd2dc | 538 | struct btrfs_ordered_inode_tree *tree; |
287a0ab9 | 539 | struct btrfs_root *root = BTRFS_I(inode)->root; |
cee36a03 | 540 | struct rb_node *node; |
cee36a03 | 541 | |
e6dcd2dc | 542 | tree = &BTRFS_I(inode)->ordered_tree; |
5fd02043 | 543 | spin_lock_irq(&tree->lock); |
e6dcd2dc | 544 | node = &entry->rb_node; |
cee36a03 | 545 | rb_erase(node, &tree->tree); |
1b8e7e45 FDBM |
546 | if (tree->last == node) |
547 | tree->last = NULL; | |
e6dcd2dc | 548 | set_bit(BTRFS_ORDERED_COMPLETE, &entry->flags); |
5fd02043 | 549 | spin_unlock_irq(&tree->lock); |
3eaa2885 | 550 | |
199c2a9c | 551 | spin_lock(&root->ordered_extent_lock); |
3eaa2885 | 552 | list_del_init(&entry->root_extent_list); |
199c2a9c | 553 | root->nr_ordered_extents--; |
5a3f23d5 | 554 | |
1abe9b8a | 555 | trace_btrfs_ordered_extent_remove(inode, entry); |
556 | ||
5a3f23d5 CM |
557 | /* |
558 | * we have no more ordered extents for this inode and | |
559 | * no dirty pages. We can safely remove it from the | |
560 | * list of ordered extents | |
561 | */ | |
562 | if (RB_EMPTY_ROOT(&tree->tree) && | |
563 | !mapping_tagged(inode->i_mapping, PAGECACHE_TAG_DIRTY)) { | |
93858769 | 564 | spin_lock(&root->fs_info->ordered_root_lock); |
5a3f23d5 | 565 | list_del_init(&BTRFS_I(inode)->ordered_operations); |
93858769 | 566 | spin_unlock(&root->fs_info->ordered_root_lock); |
5a3f23d5 | 567 | } |
199c2a9c MX |
568 | |
569 | if (!root->nr_ordered_extents) { | |
570 | spin_lock(&root->fs_info->ordered_root_lock); | |
571 | BUG_ON(list_empty(&root->ordered_root)); | |
572 | list_del_init(&root->ordered_root); | |
573 | spin_unlock(&root->fs_info->ordered_root_lock); | |
574 | } | |
575 | spin_unlock(&root->ordered_extent_lock); | |
e6dcd2dc | 576 | wake_up(&entry->wait); |
cee36a03 CM |
577 | } |
578 | ||
d458b054 | 579 | static void btrfs_run_ordered_extent_work(struct btrfs_work *work) |
9afab882 MX |
580 | { |
581 | struct btrfs_ordered_extent *ordered; | |
582 | ||
583 | ordered = container_of(work, struct btrfs_ordered_extent, flush_work); | |
584 | btrfs_start_ordered_extent(ordered->inode, ordered, 1); | |
585 | complete(&ordered->completion); | |
586 | } | |
587 | ||
d352ac68 CM |
588 | /* |
589 | * wait for all the ordered extents in a root. This is done when balancing | |
590 | * space between drives. | |
591 | */ | |
b0244199 | 592 | int btrfs_wait_ordered_extents(struct btrfs_root *root, int nr) |
3eaa2885 | 593 | { |
9afab882 | 594 | struct list_head splice, works; |
9afab882 | 595 | struct btrfs_ordered_extent *ordered, *next; |
b0244199 | 596 | int count = 0; |
3eaa2885 CM |
597 | |
598 | INIT_LIST_HEAD(&splice); | |
9afab882 | 599 | INIT_LIST_HEAD(&works); |
3eaa2885 | 600 | |
db1d607d | 601 | mutex_lock(&root->fs_info->ordered_operations_mutex); |
199c2a9c MX |
602 | spin_lock(&root->ordered_extent_lock); |
603 | list_splice_init(&root->ordered_extents, &splice); | |
b0244199 | 604 | while (!list_empty(&splice) && nr) { |
199c2a9c MX |
605 | ordered = list_first_entry(&splice, struct btrfs_ordered_extent, |
606 | root_extent_list); | |
607 | list_move_tail(&ordered->root_extent_list, | |
608 | &root->ordered_extents); | |
199c2a9c MX |
609 | atomic_inc(&ordered->refs); |
610 | spin_unlock(&root->ordered_extent_lock); | |
3eaa2885 | 611 | |
a44903ab QW |
612 | btrfs_init_work(&ordered->flush_work, |
613 | btrfs_run_ordered_extent_work, NULL, NULL); | |
199c2a9c | 614 | list_add_tail(&ordered->work_list, &works); |
a44903ab QW |
615 | btrfs_queue_work(root->fs_info->flush_workers, |
616 | &ordered->flush_work); | |
3eaa2885 | 617 | |
9afab882 | 618 | cond_resched(); |
199c2a9c | 619 | spin_lock(&root->ordered_extent_lock); |
b0244199 MX |
620 | if (nr != -1) |
621 | nr--; | |
622 | count++; | |
3eaa2885 | 623 | } |
b0244199 | 624 | list_splice_tail(&splice, &root->ordered_extents); |
199c2a9c | 625 | spin_unlock(&root->ordered_extent_lock); |
9afab882 MX |
626 | |
627 | list_for_each_entry_safe(ordered, next, &works, work_list) { | |
628 | list_del_init(&ordered->work_list); | |
629 | wait_for_completion(&ordered->completion); | |
9afab882 | 630 | btrfs_put_ordered_extent(ordered); |
9afab882 MX |
631 | cond_resched(); |
632 | } | |
db1d607d | 633 | mutex_unlock(&root->fs_info->ordered_operations_mutex); |
b0244199 MX |
634 | |
635 | return count; | |
3eaa2885 CM |
636 | } |
637 | ||
b0244199 | 638 | void btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, int nr) |
199c2a9c MX |
639 | { |
640 | struct btrfs_root *root; | |
641 | struct list_head splice; | |
b0244199 | 642 | int done; |
199c2a9c MX |
643 | |
644 | INIT_LIST_HEAD(&splice); | |
645 | ||
646 | spin_lock(&fs_info->ordered_root_lock); | |
647 | list_splice_init(&fs_info->ordered_roots, &splice); | |
b0244199 | 648 | while (!list_empty(&splice) && nr) { |
199c2a9c MX |
649 | root = list_first_entry(&splice, struct btrfs_root, |
650 | ordered_root); | |
651 | root = btrfs_grab_fs_root(root); | |
652 | BUG_ON(!root); | |
653 | list_move_tail(&root->ordered_root, | |
654 | &fs_info->ordered_roots); | |
655 | spin_unlock(&fs_info->ordered_root_lock); | |
656 | ||
b0244199 | 657 | done = btrfs_wait_ordered_extents(root, nr); |
199c2a9c MX |
658 | btrfs_put_fs_root(root); |
659 | ||
660 | spin_lock(&fs_info->ordered_root_lock); | |
b0244199 MX |
661 | if (nr != -1) { |
662 | nr -= done; | |
663 | WARN_ON(nr < 0); | |
664 | } | |
199c2a9c | 665 | } |
931aa877 | 666 | list_splice_tail(&splice, &fs_info->ordered_roots); |
199c2a9c MX |
667 | spin_unlock(&fs_info->ordered_root_lock); |
668 | } | |
669 | ||
5a3f23d5 CM |
670 | /* |
671 | * this is used during transaction commit to write all the inodes | |
672 | * added to the ordered operation list. These files must be fully on | |
673 | * disk before the transaction commits. | |
674 | * | |
675 | * we have two modes here, one is to just start the IO via filemap_flush | |
676 | * and the other is to wait for all the io. When we wait, we have an | |
677 | * extra check to make sure the ordered operation list really is empty | |
678 | * before we return | |
679 | */ | |
569e0f35 JB |
680 | int btrfs_run_ordered_operations(struct btrfs_trans_handle *trans, |
681 | struct btrfs_root *root, int wait) | |
5a3f23d5 CM |
682 | { |
683 | struct btrfs_inode *btrfs_inode; | |
684 | struct inode *inode; | |
569e0f35 | 685 | struct btrfs_transaction *cur_trans = trans->transaction; |
5a3f23d5 | 686 | struct list_head splice; |
25287e0a MX |
687 | struct list_head works; |
688 | struct btrfs_delalloc_work *work, *next; | |
689 | int ret = 0; | |
5a3f23d5 CM |
690 | |
691 | INIT_LIST_HEAD(&splice); | |
25287e0a | 692 | INIT_LIST_HEAD(&works); |
5a3f23d5 | 693 | |
9ffba8cd | 694 | mutex_lock(&root->fs_info->ordered_extent_flush_mutex); |
199c2a9c | 695 | spin_lock(&root->fs_info->ordered_root_lock); |
569e0f35 | 696 | list_splice_init(&cur_trans->ordered_operations, &splice); |
5a3f23d5 CM |
697 | while (!list_empty(&splice)) { |
698 | btrfs_inode = list_entry(splice.next, struct btrfs_inode, | |
699 | ordered_operations); | |
5a3f23d5 CM |
700 | inode = &btrfs_inode->vfs_inode; |
701 | ||
702 | list_del_init(&btrfs_inode->ordered_operations); | |
703 | ||
704 | /* | |
705 | * the inode may be getting freed (in sys_unlink path). | |
706 | */ | |
707 | inode = igrab(inode); | |
25287e0a MX |
708 | if (!inode) |
709 | continue; | |
5b947f1b MX |
710 | |
711 | if (!wait) | |
712 | list_add_tail(&BTRFS_I(inode)->ordered_operations, | |
569e0f35 | 713 | &cur_trans->ordered_operations); |
199c2a9c | 714 | spin_unlock(&root->fs_info->ordered_root_lock); |
5a3f23d5 | 715 | |
25287e0a MX |
716 | work = btrfs_alloc_delalloc_work(inode, wait, 1); |
717 | if (!work) { | |
199c2a9c | 718 | spin_lock(&root->fs_info->ordered_root_lock); |
25287e0a MX |
719 | if (list_empty(&BTRFS_I(inode)->ordered_operations)) |
720 | list_add_tail(&btrfs_inode->ordered_operations, | |
721 | &splice); | |
25287e0a | 722 | list_splice_tail(&splice, |
569e0f35 | 723 | &cur_trans->ordered_operations); |
199c2a9c | 724 | spin_unlock(&root->fs_info->ordered_root_lock); |
25287e0a MX |
725 | ret = -ENOMEM; |
726 | goto out; | |
5a3f23d5 | 727 | } |
25287e0a | 728 | list_add_tail(&work->list, &works); |
a44903ab QW |
729 | btrfs_queue_work(root->fs_info->flush_workers, |
730 | &work->work); | |
5a3f23d5 CM |
731 | |
732 | cond_resched(); | |
199c2a9c | 733 | spin_lock(&root->fs_info->ordered_root_lock); |
5a3f23d5 | 734 | } |
199c2a9c | 735 | spin_unlock(&root->fs_info->ordered_root_lock); |
25287e0a MX |
736 | out: |
737 | list_for_each_entry_safe(work, next, &works, list) { | |
738 | list_del_init(&work->list); | |
739 | btrfs_wait_and_free_delalloc_work(work); | |
740 | } | |
9ffba8cd | 741 | mutex_unlock(&root->fs_info->ordered_extent_flush_mutex); |
25287e0a | 742 | return ret; |
5a3f23d5 CM |
743 | } |
744 | ||
eb84ae03 CM |
745 | /* |
746 | * Used to start IO or wait for a given ordered extent to finish. | |
747 | * | |
748 | * If wait is one, this effectively waits on page writeback for all the pages | |
749 | * in the extent, and it waits on the io completion code to insert | |
750 | * metadata into the btree corresponding to the extent | |
751 | */ | |
752 | void btrfs_start_ordered_extent(struct inode *inode, | |
753 | struct btrfs_ordered_extent *entry, | |
754 | int wait) | |
e6dcd2dc CM |
755 | { |
756 | u64 start = entry->file_offset; | |
757 | u64 end = start + entry->len - 1; | |
e1b81e67 | 758 | |
1abe9b8a | 759 | trace_btrfs_ordered_extent_start(inode, entry); |
760 | ||
eb84ae03 CM |
761 | /* |
762 | * pages in the range can be dirty, clean or writeback. We | |
763 | * start IO on any dirty ones so the wait doesn't stall waiting | |
b2570314 | 764 | * for the flusher thread to find them |
eb84ae03 | 765 | */ |
4b46fce2 JB |
766 | if (!test_bit(BTRFS_ORDERED_DIRECT, &entry->flags)) |
767 | filemap_fdatawrite_range(inode->i_mapping, start, end); | |
c8b97818 | 768 | if (wait) { |
e6dcd2dc CM |
769 | wait_event(entry->wait, test_bit(BTRFS_ORDERED_COMPLETE, |
770 | &entry->flags)); | |
c8b97818 | 771 | } |
e6dcd2dc | 772 | } |
cee36a03 | 773 | |
eb84ae03 CM |
774 | /* |
775 | * Used to wait on ordered extents across a large range of bytes. | |
776 | */ | |
0ef8b726 | 777 | int btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len) |
e6dcd2dc | 778 | { |
0ef8b726 | 779 | int ret = 0; |
e6dcd2dc | 780 | u64 end; |
e5a2217e | 781 | u64 orig_end; |
e6dcd2dc | 782 | struct btrfs_ordered_extent *ordered; |
e5a2217e CM |
783 | |
784 | if (start + len < start) { | |
f421950f | 785 | orig_end = INT_LIMIT(loff_t); |
e5a2217e CM |
786 | } else { |
787 | orig_end = start + len - 1; | |
f421950f CM |
788 | if (orig_end > INT_LIMIT(loff_t)) |
789 | orig_end = INT_LIMIT(loff_t); | |
e5a2217e | 790 | } |
551ebb2d | 791 | |
e5a2217e CM |
792 | /* start IO across the range first to instantiate any delalloc |
793 | * extents | |
794 | */ | |
0ef8b726 JB |
795 | ret = filemap_fdatawrite_range(inode->i_mapping, start, orig_end); |
796 | if (ret) | |
797 | return ret; | |
7ddf5a42 JB |
798 | /* |
799 | * So with compression we will find and lock a dirty page and clear the | |
800 | * first one as dirty, setup an async extent, and immediately return | |
801 | * with the entire range locked but with nobody actually marked with | |
802 | * writeback. So we can't just filemap_write_and_wait_range() and | |
803 | * expect it to work since it will just kick off a thread to do the | |
804 | * actual work. So we need to call filemap_fdatawrite_range _again_ | |
805 | * since it will wait on the page lock, which won't be unlocked until | |
806 | * after the pages have been marked as writeback and so we're good to go | |
807 | * from there. We have to do this otherwise we'll miss the ordered | |
808 | * extents and that results in badness. Please Josef, do not think you | |
809 | * know better and pull this out at some point in the future, it is | |
810 | * right and you are wrong. | |
811 | */ | |
812 | if (test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT, | |
0ef8b726 JB |
813 | &BTRFS_I(inode)->runtime_flags)) { |
814 | ret = filemap_fdatawrite_range(inode->i_mapping, start, | |
815 | orig_end); | |
816 | if (ret) | |
817 | return ret; | |
818 | } | |
819 | ret = filemap_fdatawait_range(inode->i_mapping, start, orig_end); | |
820 | if (ret) | |
821 | return ret; | |
e5a2217e | 822 | |
f421950f | 823 | end = orig_end; |
d397712b | 824 | while (1) { |
e6dcd2dc | 825 | ordered = btrfs_lookup_first_ordered_extent(inode, end); |
d397712b | 826 | if (!ordered) |
e6dcd2dc | 827 | break; |
e5a2217e | 828 | if (ordered->file_offset > orig_end) { |
e6dcd2dc CM |
829 | btrfs_put_ordered_extent(ordered); |
830 | break; | |
831 | } | |
b52abf1e | 832 | if (ordered->file_offset + ordered->len <= start) { |
e6dcd2dc CM |
833 | btrfs_put_ordered_extent(ordered); |
834 | break; | |
835 | } | |
e5a2217e | 836 | btrfs_start_ordered_extent(inode, ordered, 1); |
e6dcd2dc | 837 | end = ordered->file_offset; |
0ef8b726 JB |
838 | if (test_bit(BTRFS_ORDERED_IOERR, &ordered->flags)) |
839 | ret = -EIO; | |
e6dcd2dc | 840 | btrfs_put_ordered_extent(ordered); |
0ef8b726 | 841 | if (ret || end == 0 || end == start) |
e6dcd2dc CM |
842 | break; |
843 | end--; | |
844 | } | |
0ef8b726 | 845 | return ret; |
cee36a03 CM |
846 | } |
847 | ||
eb84ae03 CM |
848 | /* |
849 | * find an ordered extent corresponding to file_offset. return NULL if | |
850 | * nothing is found, otherwise take a reference on the extent and return it | |
851 | */ | |
e6dcd2dc CM |
852 | struct btrfs_ordered_extent *btrfs_lookup_ordered_extent(struct inode *inode, |
853 | u64 file_offset) | |
854 | { | |
855 | struct btrfs_ordered_inode_tree *tree; | |
856 | struct rb_node *node; | |
857 | struct btrfs_ordered_extent *entry = NULL; | |
858 | ||
859 | tree = &BTRFS_I(inode)->ordered_tree; | |
5fd02043 | 860 | spin_lock_irq(&tree->lock); |
e6dcd2dc CM |
861 | node = tree_search(tree, file_offset); |
862 | if (!node) | |
863 | goto out; | |
864 | ||
865 | entry = rb_entry(node, struct btrfs_ordered_extent, rb_node); | |
866 | if (!offset_in_entry(entry, file_offset)) | |
867 | entry = NULL; | |
868 | if (entry) | |
869 | atomic_inc(&entry->refs); | |
870 | out: | |
5fd02043 | 871 | spin_unlock_irq(&tree->lock); |
e6dcd2dc CM |
872 | return entry; |
873 | } | |
874 | ||
4b46fce2 JB |
875 | /* Since the DIO code tries to lock a wide area we need to look for any ordered |
876 | * extents that exist in the range, rather than just the start of the range. | |
877 | */ | |
878 | struct btrfs_ordered_extent *btrfs_lookup_ordered_range(struct inode *inode, | |
879 | u64 file_offset, | |
880 | u64 len) | |
881 | { | |
882 | struct btrfs_ordered_inode_tree *tree; | |
883 | struct rb_node *node; | |
884 | struct btrfs_ordered_extent *entry = NULL; | |
885 | ||
886 | tree = &BTRFS_I(inode)->ordered_tree; | |
5fd02043 | 887 | spin_lock_irq(&tree->lock); |
4b46fce2 JB |
888 | node = tree_search(tree, file_offset); |
889 | if (!node) { | |
890 | node = tree_search(tree, file_offset + len); | |
891 | if (!node) | |
892 | goto out; | |
893 | } | |
894 | ||
895 | while (1) { | |
896 | entry = rb_entry(node, struct btrfs_ordered_extent, rb_node); | |
897 | if (range_overlaps(entry, file_offset, len)) | |
898 | break; | |
899 | ||
900 | if (entry->file_offset >= file_offset + len) { | |
901 | entry = NULL; | |
902 | break; | |
903 | } | |
904 | entry = NULL; | |
905 | node = rb_next(node); | |
906 | if (!node) | |
907 | break; | |
908 | } | |
909 | out: | |
910 | if (entry) | |
911 | atomic_inc(&entry->refs); | |
5fd02043 | 912 | spin_unlock_irq(&tree->lock); |
4b46fce2 JB |
913 | return entry; |
914 | } | |
915 | ||
eb84ae03 CM |
916 | /* |
917 | * lookup and return any extent before 'file_offset'. NULL is returned | |
918 | * if none is found | |
919 | */ | |
e6dcd2dc | 920 | struct btrfs_ordered_extent * |
d397712b | 921 | btrfs_lookup_first_ordered_extent(struct inode *inode, u64 file_offset) |
e6dcd2dc CM |
922 | { |
923 | struct btrfs_ordered_inode_tree *tree; | |
924 | struct rb_node *node; | |
925 | struct btrfs_ordered_extent *entry = NULL; | |
926 | ||
927 | tree = &BTRFS_I(inode)->ordered_tree; | |
5fd02043 | 928 | spin_lock_irq(&tree->lock); |
e6dcd2dc CM |
929 | node = tree_search(tree, file_offset); |
930 | if (!node) | |
931 | goto out; | |
932 | ||
933 | entry = rb_entry(node, struct btrfs_ordered_extent, rb_node); | |
934 | atomic_inc(&entry->refs); | |
935 | out: | |
5fd02043 | 936 | spin_unlock_irq(&tree->lock); |
e6dcd2dc | 937 | return entry; |
81d7ed29 | 938 | } |
dbe674a9 | 939 | |
eb84ae03 CM |
940 | /* |
941 | * After an extent is done, call this to conditionally update the on disk | |
942 | * i_size. i_size is updated to cover any fully written part of the file. | |
943 | */ | |
c2167754 | 944 | int btrfs_ordered_update_i_size(struct inode *inode, u64 offset, |
dbe674a9 CM |
945 | struct btrfs_ordered_extent *ordered) |
946 | { | |
947 | struct btrfs_ordered_inode_tree *tree = &BTRFS_I(inode)->ordered_tree; | |
dbe674a9 CM |
948 | u64 disk_i_size; |
949 | u64 new_i_size; | |
c2167754 | 950 | u64 i_size = i_size_read(inode); |
dbe674a9 | 951 | struct rb_node *node; |
c2167754 | 952 | struct rb_node *prev = NULL; |
dbe674a9 | 953 | struct btrfs_ordered_extent *test; |
c2167754 YZ |
954 | int ret = 1; |
955 | ||
77cef2ec JB |
956 | spin_lock_irq(&tree->lock); |
957 | if (ordered) { | |
c2167754 | 958 | offset = entry_end(ordered); |
77cef2ec JB |
959 | if (test_bit(BTRFS_ORDERED_TRUNCATED, &ordered->flags)) |
960 | offset = min(offset, | |
961 | ordered->file_offset + | |
962 | ordered->truncated_len); | |
963 | } else { | |
a038fab0 | 964 | offset = ALIGN(offset, BTRFS_I(inode)->root->sectorsize); |
77cef2ec | 965 | } |
dbe674a9 CM |
966 | disk_i_size = BTRFS_I(inode)->disk_i_size; |
967 | ||
c2167754 YZ |
968 | /* truncate file */ |
969 | if (disk_i_size > i_size) { | |
970 | BTRFS_I(inode)->disk_i_size = i_size; | |
971 | ret = 0; | |
972 | goto out; | |
973 | } | |
974 | ||
dbe674a9 CM |
975 | /* |
976 | * if the disk i_size is already at the inode->i_size, or | |
977 | * this ordered extent is inside the disk i_size, we're done | |
978 | */ | |
5d1f4020 JB |
979 | if (disk_i_size == i_size) |
980 | goto out; | |
981 | ||
982 | /* | |
983 | * We still need to update disk_i_size if outstanding_isize is greater | |
984 | * than disk_i_size. | |
985 | */ | |
986 | if (offset <= disk_i_size && | |
987 | (!ordered || ordered->outstanding_isize <= disk_i_size)) | |
dbe674a9 | 988 | goto out; |
dbe674a9 | 989 | |
dbe674a9 CM |
990 | /* |
991 | * walk backward from this ordered extent to disk_i_size. | |
992 | * if we find an ordered extent then we can't update disk i_size | |
993 | * yet | |
994 | */ | |
c2167754 YZ |
995 | if (ordered) { |
996 | node = rb_prev(&ordered->rb_node); | |
997 | } else { | |
998 | prev = tree_search(tree, offset); | |
999 | /* | |
1000 | * we insert file extents without involving ordered struct, | |
1001 | * so there should be no ordered struct cover this offset | |
1002 | */ | |
1003 | if (prev) { | |
1004 | test = rb_entry(prev, struct btrfs_ordered_extent, | |
1005 | rb_node); | |
1006 | BUG_ON(offset_in_entry(test, offset)); | |
1007 | } | |
1008 | node = prev; | |
1009 | } | |
5fd02043 | 1010 | for (; node; node = rb_prev(node)) { |
dbe674a9 | 1011 | test = rb_entry(node, struct btrfs_ordered_extent, rb_node); |
5fd02043 JB |
1012 | |
1013 | /* We treat this entry as if it doesnt exist */ | |
1014 | if (test_bit(BTRFS_ORDERED_UPDATED_ISIZE, &test->flags)) | |
1015 | continue; | |
dbe674a9 CM |
1016 | if (test->file_offset + test->len <= disk_i_size) |
1017 | break; | |
c2167754 | 1018 | if (test->file_offset >= i_size) |
dbe674a9 | 1019 | break; |
59fe4f41 | 1020 | if (entry_end(test) > disk_i_size) { |
b9a8cc5b MX |
1021 | /* |
1022 | * we don't update disk_i_size now, so record this | |
1023 | * undealt i_size. Or we will not know the real | |
1024 | * i_size. | |
1025 | */ | |
1026 | if (test->outstanding_isize < offset) | |
1027 | test->outstanding_isize = offset; | |
1028 | if (ordered && | |
1029 | ordered->outstanding_isize > | |
1030 | test->outstanding_isize) | |
1031 | test->outstanding_isize = | |
1032 | ordered->outstanding_isize; | |
dbe674a9 | 1033 | goto out; |
5fd02043 | 1034 | } |
dbe674a9 | 1035 | } |
b9a8cc5b | 1036 | new_i_size = min_t(u64, offset, i_size); |
dbe674a9 CM |
1037 | |
1038 | /* | |
b9a8cc5b MX |
1039 | * Some ordered extents may completed before the current one, and |
1040 | * we hold the real i_size in ->outstanding_isize. | |
dbe674a9 | 1041 | */ |
b9a8cc5b MX |
1042 | if (ordered && ordered->outstanding_isize > new_i_size) |
1043 | new_i_size = min_t(u64, ordered->outstanding_isize, i_size); | |
dbe674a9 | 1044 | BTRFS_I(inode)->disk_i_size = new_i_size; |
c2167754 | 1045 | ret = 0; |
dbe674a9 | 1046 | out: |
c2167754 | 1047 | /* |
5fd02043 JB |
1048 | * We need to do this because we can't remove ordered extents until |
1049 | * after the i_disk_size has been updated and then the inode has been | |
1050 | * updated to reflect the change, so we need to tell anybody who finds | |
1051 | * this ordered extent that we've already done all the real work, we | |
1052 | * just haven't completed all the other work. | |
c2167754 YZ |
1053 | */ |
1054 | if (ordered) | |
5fd02043 JB |
1055 | set_bit(BTRFS_ORDERED_UPDATED_ISIZE, &ordered->flags); |
1056 | spin_unlock_irq(&tree->lock); | |
c2167754 | 1057 | return ret; |
dbe674a9 | 1058 | } |
ba1da2f4 | 1059 | |
eb84ae03 CM |
1060 | /* |
1061 | * search the ordered extents for one corresponding to 'offset' and | |
1062 | * try to find a checksum. This is used because we allow pages to | |
1063 | * be reclaimed before their checksum is actually put into the btree | |
1064 | */ | |
d20f7043 | 1065 | int btrfs_find_ordered_sum(struct inode *inode, u64 offset, u64 disk_bytenr, |
e4100d98 | 1066 | u32 *sum, int len) |
ba1da2f4 CM |
1067 | { |
1068 | struct btrfs_ordered_sum *ordered_sum; | |
ba1da2f4 CM |
1069 | struct btrfs_ordered_extent *ordered; |
1070 | struct btrfs_ordered_inode_tree *tree = &BTRFS_I(inode)->ordered_tree; | |
3edf7d33 CM |
1071 | unsigned long num_sectors; |
1072 | unsigned long i; | |
1073 | u32 sectorsize = BTRFS_I(inode)->root->sectorsize; | |
e4100d98 | 1074 | int index = 0; |
ba1da2f4 CM |
1075 | |
1076 | ordered = btrfs_lookup_ordered_extent(inode, offset); | |
1077 | if (!ordered) | |
e4100d98 | 1078 | return 0; |
ba1da2f4 | 1079 | |
5fd02043 | 1080 | spin_lock_irq(&tree->lock); |
c6e30871 | 1081 | list_for_each_entry_reverse(ordered_sum, &ordered->list, list) { |
e4100d98 MX |
1082 | if (disk_bytenr >= ordered_sum->bytenr && |
1083 | disk_bytenr < ordered_sum->bytenr + ordered_sum->len) { | |
1084 | i = (disk_bytenr - ordered_sum->bytenr) >> | |
1085 | inode->i_sb->s_blocksize_bits; | |
e4100d98 MX |
1086 | num_sectors = ordered_sum->len >> |
1087 | inode->i_sb->s_blocksize_bits; | |
f51a4a18 MX |
1088 | num_sectors = min_t(int, len - index, num_sectors - i); |
1089 | memcpy(sum + index, ordered_sum->sums + i, | |
1090 | num_sectors); | |
1091 | ||
1092 | index += (int)num_sectors; | |
1093 | if (index == len) | |
1094 | goto out; | |
1095 | disk_bytenr += num_sectors * sectorsize; | |
ba1da2f4 CM |
1096 | } |
1097 | } | |
1098 | out: | |
5fd02043 | 1099 | spin_unlock_irq(&tree->lock); |
89642229 | 1100 | btrfs_put_ordered_extent(ordered); |
e4100d98 | 1101 | return index; |
ba1da2f4 CM |
1102 | } |
1103 | ||
f421950f | 1104 | |
5a3f23d5 CM |
1105 | /* |
1106 | * add a given inode to the list of inodes that must be fully on | |
1107 | * disk before a transaction commit finishes. | |
1108 | * | |
1109 | * This basically gives us the ext3 style data=ordered mode, and it is mostly | |
1110 | * used to make sure renamed files are fully on disk. | |
1111 | * | |
1112 | * It is a noop if the inode is already fully on disk. | |
1113 | * | |
1114 | * If trans is not null, we'll do a friendly check for a transaction that | |
1115 | * is already flushing things and force the IO down ourselves. | |
1116 | */ | |
143bede5 JM |
1117 | void btrfs_add_ordered_operation(struct btrfs_trans_handle *trans, |
1118 | struct btrfs_root *root, struct inode *inode) | |
5a3f23d5 | 1119 | { |
569e0f35 | 1120 | struct btrfs_transaction *cur_trans = trans->transaction; |
5a3f23d5 CM |
1121 | u64 last_mod; |
1122 | ||
1123 | last_mod = max(BTRFS_I(inode)->generation, BTRFS_I(inode)->last_trans); | |
1124 | ||
1125 | /* | |
1126 | * if this file hasn't been changed since the last transaction | |
1127 | * commit, we can safely return without doing anything | |
1128 | */ | |
5ede859b | 1129 | if (last_mod <= root->fs_info->last_trans_committed) |
143bede5 | 1130 | return; |
5a3f23d5 | 1131 | |
199c2a9c | 1132 | spin_lock(&root->fs_info->ordered_root_lock); |
5a3f23d5 CM |
1133 | if (list_empty(&BTRFS_I(inode)->ordered_operations)) { |
1134 | list_add_tail(&BTRFS_I(inode)->ordered_operations, | |
569e0f35 | 1135 | &cur_trans->ordered_operations); |
5a3f23d5 | 1136 | } |
199c2a9c | 1137 | spin_unlock(&root->fs_info->ordered_root_lock); |
5a3f23d5 | 1138 | } |
6352b91d MX |
1139 | |
1140 | int __init ordered_data_init(void) | |
1141 | { | |
1142 | btrfs_ordered_extent_cache = kmem_cache_create("btrfs_ordered_extent", | |
1143 | sizeof(struct btrfs_ordered_extent), 0, | |
1144 | SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, | |
1145 | NULL); | |
1146 | if (!btrfs_ordered_extent_cache) | |
1147 | return -ENOMEM; | |
25287e0a | 1148 | |
6352b91d MX |
1149 | return 0; |
1150 | } | |
1151 | ||
1152 | void ordered_data_exit(void) | |
1153 | { | |
1154 | if (btrfs_ordered_extent_cache) | |
1155 | kmem_cache_destroy(btrfs_ordered_extent_cache); | |
1156 | } |