]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - fs/btrfs/extent_io.c
Btrfs: check return value of bio_alloc() properly
[mirror_ubuntu-artful-kernel.git] / fs / btrfs / extent_io.c
CommitLineData
d1310b2e
CM
1#include <linux/bitops.h>
2#include <linux/slab.h>
3#include <linux/bio.h>
4#include <linux/mm.h>
d1310b2e
CM
5#include <linux/pagemap.h>
6#include <linux/page-flags.h>
7#include <linux/module.h>
8#include <linux/spinlock.h>
9#include <linux/blkdev.h>
10#include <linux/swap.h>
d1310b2e
CM
11#include <linux/writeback.h>
12#include <linux/pagevec.h>
268bb0ce 13#include <linux/prefetch.h>
90a887c9 14#include <linux/cleancache.h>
d1310b2e
CM
15#include "extent_io.h"
16#include "extent_map.h"
2db04966 17#include "compat.h"
902b22f3
DW
18#include "ctree.h"
19#include "btrfs_inode.h"
4a54c8c1 20#include "volumes.h"
21adbd5c 21#include "check-integrity.h"
0b32f4bb 22#include "locking.h"
d1310b2e 23
d1310b2e
CM
24static struct kmem_cache *extent_state_cache;
25static struct kmem_cache *extent_buffer_cache;
26
27static LIST_HEAD(buffers);
28static LIST_HEAD(states);
4bef0848 29
b47eda86 30#define LEAK_DEBUG 0
3935127c 31#if LEAK_DEBUG
d397712b 32static DEFINE_SPINLOCK(leak_lock);
4bef0848 33#endif
d1310b2e 34
d1310b2e
CM
35#define BUFFER_LRU_MAX 64
36
37struct tree_entry {
38 u64 start;
39 u64 end;
d1310b2e
CM
40 struct rb_node rb_node;
41};
42
43struct extent_page_data {
44 struct bio *bio;
45 struct extent_io_tree *tree;
46 get_extent_t *get_extent;
771ed689
CM
47
48 /* tells writepage not to lock the state bits for this range
49 * it still does the unlocking
50 */
ffbd517d
CM
51 unsigned int extent_locked:1;
52
53 /* tells the submit_bio code to use a WRITE_SYNC */
54 unsigned int sync_io:1;
d1310b2e
CM
55};
56
0b32f4bb 57static noinline void flush_write_bio(void *data);
c2d904e0
JM
58static inline struct btrfs_fs_info *
59tree_fs_info(struct extent_io_tree *tree)
60{
61 return btrfs_sb(tree->mapping->host->i_sb);
62}
0b32f4bb 63
d1310b2e
CM
64int __init extent_io_init(void)
65{
9601e3f6
CH
66 extent_state_cache = kmem_cache_create("extent_state",
67 sizeof(struct extent_state), 0,
68 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
d1310b2e
CM
69 if (!extent_state_cache)
70 return -ENOMEM;
71
9601e3f6
CH
72 extent_buffer_cache = kmem_cache_create("extent_buffers",
73 sizeof(struct extent_buffer), 0,
74 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
d1310b2e
CM
75 if (!extent_buffer_cache)
76 goto free_state_cache;
77 return 0;
78
79free_state_cache:
80 kmem_cache_destroy(extent_state_cache);
81 return -ENOMEM;
82}
83
84void extent_io_exit(void)
85{
86 struct extent_state *state;
2d2ae547 87 struct extent_buffer *eb;
d1310b2e
CM
88
89 while (!list_empty(&states)) {
2d2ae547 90 state = list_entry(states.next, struct extent_state, leak_list);
d397712b
CM
91 printk(KERN_ERR "btrfs state leak: start %llu end %llu "
92 "state %lu in tree %p refs %d\n",
93 (unsigned long long)state->start,
94 (unsigned long long)state->end,
95 state->state, state->tree, atomic_read(&state->refs));
2d2ae547 96 list_del(&state->leak_list);
d1310b2e
CM
97 kmem_cache_free(extent_state_cache, state);
98
99 }
100
2d2ae547
CM
101 while (!list_empty(&buffers)) {
102 eb = list_entry(buffers.next, struct extent_buffer, leak_list);
d397712b
CM
103 printk(KERN_ERR "btrfs buffer leak start %llu len %lu "
104 "refs %d\n", (unsigned long long)eb->start,
105 eb->len, atomic_read(&eb->refs));
2d2ae547
CM
106 list_del(&eb->leak_list);
107 kmem_cache_free(extent_buffer_cache, eb);
108 }
d1310b2e
CM
109 if (extent_state_cache)
110 kmem_cache_destroy(extent_state_cache);
111 if (extent_buffer_cache)
112 kmem_cache_destroy(extent_buffer_cache);
113}
114
115void extent_io_tree_init(struct extent_io_tree *tree,
f993c883 116 struct address_space *mapping)
d1310b2e 117{
6bef4d31 118 tree->state = RB_ROOT;
19fe0a8b 119 INIT_RADIX_TREE(&tree->buffer, GFP_ATOMIC);
d1310b2e
CM
120 tree->ops = NULL;
121 tree->dirty_bytes = 0;
70dec807 122 spin_lock_init(&tree->lock);
6af118ce 123 spin_lock_init(&tree->buffer_lock);
d1310b2e 124 tree->mapping = mapping;
d1310b2e 125}
d1310b2e 126
b2950863 127static struct extent_state *alloc_extent_state(gfp_t mask)
d1310b2e
CM
128{
129 struct extent_state *state;
3935127c 130#if LEAK_DEBUG
2d2ae547 131 unsigned long flags;
4bef0848 132#endif
d1310b2e
CM
133
134 state = kmem_cache_alloc(extent_state_cache, mask);
2b114d1d 135 if (!state)
d1310b2e
CM
136 return state;
137 state->state = 0;
d1310b2e 138 state->private = 0;
70dec807 139 state->tree = NULL;
3935127c 140#if LEAK_DEBUG
2d2ae547
CM
141 spin_lock_irqsave(&leak_lock, flags);
142 list_add(&state->leak_list, &states);
143 spin_unlock_irqrestore(&leak_lock, flags);
4bef0848 144#endif
d1310b2e
CM
145 atomic_set(&state->refs, 1);
146 init_waitqueue_head(&state->wq);
143bede5 147 trace_alloc_extent_state(state, mask, _RET_IP_);
d1310b2e
CM
148 return state;
149}
d1310b2e 150
4845e44f 151void free_extent_state(struct extent_state *state)
d1310b2e 152{
d1310b2e
CM
153 if (!state)
154 return;
155 if (atomic_dec_and_test(&state->refs)) {
3935127c 156#if LEAK_DEBUG
2d2ae547 157 unsigned long flags;
4bef0848 158#endif
70dec807 159 WARN_ON(state->tree);
3935127c 160#if LEAK_DEBUG
2d2ae547
CM
161 spin_lock_irqsave(&leak_lock, flags);
162 list_del(&state->leak_list);
163 spin_unlock_irqrestore(&leak_lock, flags);
4bef0848 164#endif
143bede5 165 trace_free_extent_state(state, _RET_IP_);
d1310b2e
CM
166 kmem_cache_free(extent_state_cache, state);
167 }
168}
d1310b2e
CM
169
170static struct rb_node *tree_insert(struct rb_root *root, u64 offset,
171 struct rb_node *node)
172{
d397712b
CM
173 struct rb_node **p = &root->rb_node;
174 struct rb_node *parent = NULL;
d1310b2e
CM
175 struct tree_entry *entry;
176
d397712b 177 while (*p) {
d1310b2e
CM
178 parent = *p;
179 entry = rb_entry(parent, struct tree_entry, rb_node);
180
181 if (offset < entry->start)
182 p = &(*p)->rb_left;
183 else if (offset > entry->end)
184 p = &(*p)->rb_right;
185 else
186 return parent;
187 }
188
189 entry = rb_entry(node, struct tree_entry, rb_node);
d1310b2e
CM
190 rb_link_node(node, parent, p);
191 rb_insert_color(node, root);
192 return NULL;
193}
194
80ea96b1 195static struct rb_node *__etree_search(struct extent_io_tree *tree, u64 offset,
d1310b2e
CM
196 struct rb_node **prev_ret,
197 struct rb_node **next_ret)
198{
80ea96b1 199 struct rb_root *root = &tree->state;
d397712b 200 struct rb_node *n = root->rb_node;
d1310b2e
CM
201 struct rb_node *prev = NULL;
202 struct rb_node *orig_prev = NULL;
203 struct tree_entry *entry;
204 struct tree_entry *prev_entry = NULL;
205
d397712b 206 while (n) {
d1310b2e
CM
207 entry = rb_entry(n, struct tree_entry, rb_node);
208 prev = n;
209 prev_entry = entry;
210
211 if (offset < entry->start)
212 n = n->rb_left;
213 else if (offset > entry->end)
214 n = n->rb_right;
d397712b 215 else
d1310b2e
CM
216 return n;
217 }
218
219 if (prev_ret) {
220 orig_prev = prev;
d397712b 221 while (prev && offset > prev_entry->end) {
d1310b2e
CM
222 prev = rb_next(prev);
223 prev_entry = rb_entry(prev, struct tree_entry, rb_node);
224 }
225 *prev_ret = prev;
226 prev = orig_prev;
227 }
228
229 if (next_ret) {
230 prev_entry = rb_entry(prev, struct tree_entry, rb_node);
d397712b 231 while (prev && offset < prev_entry->start) {
d1310b2e
CM
232 prev = rb_prev(prev);
233 prev_entry = rb_entry(prev, struct tree_entry, rb_node);
234 }
235 *next_ret = prev;
236 }
237 return NULL;
238}
239
80ea96b1
CM
240static inline struct rb_node *tree_search(struct extent_io_tree *tree,
241 u64 offset)
d1310b2e 242{
70dec807 243 struct rb_node *prev = NULL;
d1310b2e 244 struct rb_node *ret;
70dec807 245
80ea96b1 246 ret = __etree_search(tree, offset, &prev, NULL);
d397712b 247 if (!ret)
d1310b2e
CM
248 return prev;
249 return ret;
250}
251
9ed74f2d
JB
252static void merge_cb(struct extent_io_tree *tree, struct extent_state *new,
253 struct extent_state *other)
254{
255 if (tree->ops && tree->ops->merge_extent_hook)
256 tree->ops->merge_extent_hook(tree->mapping->host, new,
257 other);
258}
259
d1310b2e
CM
260/*
261 * utility function to look for merge candidates inside a given range.
262 * Any extents with matching state are merged together into a single
263 * extent in the tree. Extents with EXTENT_IO in their state field
264 * are not merged because the end_io handlers need to be able to do
265 * operations on them without sleeping (or doing allocations/splits).
266 *
267 * This should be called with the tree lock held.
268 */
1bf85046
JM
269static void merge_state(struct extent_io_tree *tree,
270 struct extent_state *state)
d1310b2e
CM
271{
272 struct extent_state *other;
273 struct rb_node *other_node;
274
5b21f2ed 275 if (state->state & (EXTENT_IOBITS | EXTENT_BOUNDARY))
1bf85046 276 return;
d1310b2e
CM
277
278 other_node = rb_prev(&state->rb_node);
279 if (other_node) {
280 other = rb_entry(other_node, struct extent_state, rb_node);
281 if (other->end == state->start - 1 &&
282 other->state == state->state) {
9ed74f2d 283 merge_cb(tree, state, other);
d1310b2e 284 state->start = other->start;
70dec807 285 other->tree = NULL;
d1310b2e
CM
286 rb_erase(&other->rb_node, &tree->state);
287 free_extent_state(other);
288 }
289 }
290 other_node = rb_next(&state->rb_node);
291 if (other_node) {
292 other = rb_entry(other_node, struct extent_state, rb_node);
293 if (other->start == state->end + 1 &&
294 other->state == state->state) {
9ed74f2d 295 merge_cb(tree, state, other);
df98b6e2
JB
296 state->end = other->end;
297 other->tree = NULL;
298 rb_erase(&other->rb_node, &tree->state);
299 free_extent_state(other);
d1310b2e
CM
300 }
301 }
d1310b2e
CM
302}
303
1bf85046 304static void set_state_cb(struct extent_io_tree *tree,
0ca1f7ce 305 struct extent_state *state, int *bits)
291d673e 306{
1bf85046
JM
307 if (tree->ops && tree->ops->set_bit_hook)
308 tree->ops->set_bit_hook(tree->mapping->host, state, bits);
291d673e
CM
309}
310
311static void clear_state_cb(struct extent_io_tree *tree,
0ca1f7ce 312 struct extent_state *state, int *bits)
291d673e 313{
9ed74f2d
JB
314 if (tree->ops && tree->ops->clear_bit_hook)
315 tree->ops->clear_bit_hook(tree->mapping->host, state, bits);
291d673e
CM
316}
317
3150b699
XG
318static void set_state_bits(struct extent_io_tree *tree,
319 struct extent_state *state, int *bits);
320
d1310b2e
CM
321/*
322 * insert an extent_state struct into the tree. 'bits' are set on the
323 * struct before it is inserted.
324 *
325 * This may return -EEXIST if the extent is already there, in which case the
326 * state struct is freed.
327 *
328 * The tree lock is not taken internally. This is a utility function and
329 * probably isn't what you want to call (see set/clear_extent_bit).
330 */
331static int insert_state(struct extent_io_tree *tree,
332 struct extent_state *state, u64 start, u64 end,
0ca1f7ce 333 int *bits)
d1310b2e
CM
334{
335 struct rb_node *node;
336
337 if (end < start) {
d397712b
CM
338 printk(KERN_ERR "btrfs end < start %llu %llu\n",
339 (unsigned long long)end,
340 (unsigned long long)start);
d1310b2e
CM
341 WARN_ON(1);
342 }
d1310b2e
CM
343 state->start = start;
344 state->end = end;
9ed74f2d 345
3150b699
XG
346 set_state_bits(tree, state, bits);
347
d1310b2e
CM
348 node = tree_insert(&tree->state, end, &state->rb_node);
349 if (node) {
350 struct extent_state *found;
351 found = rb_entry(node, struct extent_state, rb_node);
d397712b
CM
352 printk(KERN_ERR "btrfs found node %llu %llu on insert of "
353 "%llu %llu\n", (unsigned long long)found->start,
354 (unsigned long long)found->end,
355 (unsigned long long)start, (unsigned long long)end);
d1310b2e
CM
356 return -EEXIST;
357 }
70dec807 358 state->tree = tree;
d1310b2e
CM
359 merge_state(tree, state);
360 return 0;
361}
362
1bf85046 363static void split_cb(struct extent_io_tree *tree, struct extent_state *orig,
9ed74f2d
JB
364 u64 split)
365{
366 if (tree->ops && tree->ops->split_extent_hook)
1bf85046 367 tree->ops->split_extent_hook(tree->mapping->host, orig, split);
9ed74f2d
JB
368}
369
d1310b2e
CM
370/*
371 * split a given extent state struct in two, inserting the preallocated
372 * struct 'prealloc' as the newly created second half. 'split' indicates an
373 * offset inside 'orig' where it should be split.
374 *
375 * Before calling,
376 * the tree has 'orig' at [orig->start, orig->end]. After calling, there
377 * are two extent state structs in the tree:
378 * prealloc: [orig->start, split - 1]
379 * orig: [ split, orig->end ]
380 *
381 * The tree locks are not taken by this function. They need to be held
382 * by the caller.
383 */
384static int split_state(struct extent_io_tree *tree, struct extent_state *orig,
385 struct extent_state *prealloc, u64 split)
386{
387 struct rb_node *node;
9ed74f2d
JB
388
389 split_cb(tree, orig, split);
390
d1310b2e
CM
391 prealloc->start = orig->start;
392 prealloc->end = split - 1;
393 prealloc->state = orig->state;
394 orig->start = split;
395
396 node = tree_insert(&tree->state, prealloc->end, &prealloc->rb_node);
397 if (node) {
d1310b2e
CM
398 free_extent_state(prealloc);
399 return -EEXIST;
400 }
70dec807 401 prealloc->tree = tree;
d1310b2e
CM
402 return 0;
403}
404
405/*
406 * utility function to clear some bits in an extent state struct.
407 * it will optionally wake up any one waiting on this state (wake == 1), or
408 * forcibly remove the state from the tree (delete == 1).
409 *
410 * If no bits are set on the state struct after clearing things, the
411 * struct is freed and removed from the tree
412 */
413static int clear_state_bit(struct extent_io_tree *tree,
0ca1f7ce
YZ
414 struct extent_state *state,
415 int *bits, int wake)
d1310b2e 416{
0ca1f7ce 417 int bits_to_clear = *bits & ~EXTENT_CTLBITS;
32c00aff 418 int ret = state->state & bits_to_clear;
d1310b2e 419
0ca1f7ce 420 if ((bits_to_clear & EXTENT_DIRTY) && (state->state & EXTENT_DIRTY)) {
d1310b2e
CM
421 u64 range = state->end - state->start + 1;
422 WARN_ON(range > tree->dirty_bytes);
423 tree->dirty_bytes -= range;
424 }
291d673e 425 clear_state_cb(tree, state, bits);
32c00aff 426 state->state &= ~bits_to_clear;
d1310b2e
CM
427 if (wake)
428 wake_up(&state->wq);
0ca1f7ce 429 if (state->state == 0) {
70dec807 430 if (state->tree) {
d1310b2e 431 rb_erase(&state->rb_node, &tree->state);
70dec807 432 state->tree = NULL;
d1310b2e
CM
433 free_extent_state(state);
434 } else {
435 WARN_ON(1);
436 }
437 } else {
438 merge_state(tree, state);
439 }
440 return ret;
441}
442
8233767a
XG
443static struct extent_state *
444alloc_extent_state_atomic(struct extent_state *prealloc)
445{
446 if (!prealloc)
447 prealloc = alloc_extent_state(GFP_ATOMIC);
448
449 return prealloc;
450}
451
c2d904e0
JM
452void extent_io_tree_panic(struct extent_io_tree *tree, int err)
453{
454 btrfs_panic(tree_fs_info(tree), err, "Locking error: "
455 "Extent tree was modified by another "
456 "thread while locked.");
457}
458
d1310b2e
CM
459/*
460 * clear some bits on a range in the tree. This may require splitting
461 * or inserting elements in the tree, so the gfp mask is used to
462 * indicate which allocations or sleeping are allowed.
463 *
464 * pass 'wake' == 1 to kick any sleepers, and 'delete' == 1 to remove
465 * the given range from the tree regardless of state (ie for truncate).
466 *
467 * the range [start, end] is inclusive.
468 *
6763af84 469 * This takes the tree lock, and returns 0 on success and < 0 on error.
d1310b2e
CM
470 */
471int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
2c64c53d
CM
472 int bits, int wake, int delete,
473 struct extent_state **cached_state,
474 gfp_t mask)
d1310b2e
CM
475{
476 struct extent_state *state;
2c64c53d 477 struct extent_state *cached;
d1310b2e 478 struct extent_state *prealloc = NULL;
2c64c53d 479 struct rb_node *next_node;
d1310b2e 480 struct rb_node *node;
5c939df5 481 u64 last_end;
d1310b2e 482 int err;
2ac55d41 483 int clear = 0;
d1310b2e 484
0ca1f7ce
YZ
485 if (delete)
486 bits |= ~EXTENT_CTLBITS;
487 bits |= EXTENT_FIRST_DELALLOC;
488
2ac55d41
JB
489 if (bits & (EXTENT_IOBITS | EXTENT_BOUNDARY))
490 clear = 1;
d1310b2e
CM
491again:
492 if (!prealloc && (mask & __GFP_WAIT)) {
493 prealloc = alloc_extent_state(mask);
494 if (!prealloc)
495 return -ENOMEM;
496 }
497
cad321ad 498 spin_lock(&tree->lock);
2c64c53d
CM
499 if (cached_state) {
500 cached = *cached_state;
2ac55d41
JB
501
502 if (clear) {
503 *cached_state = NULL;
504 cached_state = NULL;
505 }
506
df98b6e2
JB
507 if (cached && cached->tree && cached->start <= start &&
508 cached->end > start) {
2ac55d41
JB
509 if (clear)
510 atomic_dec(&cached->refs);
2c64c53d 511 state = cached;
42daec29 512 goto hit_next;
2c64c53d 513 }
2ac55d41
JB
514 if (clear)
515 free_extent_state(cached);
2c64c53d 516 }
d1310b2e
CM
517 /*
518 * this search will find the extents that end after
519 * our range starts
520 */
80ea96b1 521 node = tree_search(tree, start);
d1310b2e
CM
522 if (!node)
523 goto out;
524 state = rb_entry(node, struct extent_state, rb_node);
2c64c53d 525hit_next:
d1310b2e
CM
526 if (state->start > end)
527 goto out;
528 WARN_ON(state->end < start);
5c939df5 529 last_end = state->end;
d1310b2e 530
0449314a
LB
531 if (state->end < end && !need_resched())
532 next_node = rb_next(&state->rb_node);
533 else
534 next_node = NULL;
535
536 /* the state doesn't have the wanted bits, go ahead */
537 if (!(state->state & bits))
538 goto next;
539
d1310b2e
CM
540 /*
541 * | ---- desired range ---- |
542 * | state | or
543 * | ------------- state -------------- |
544 *
545 * We need to split the extent we found, and may flip
546 * bits on second half.
547 *
548 * If the extent we found extends past our range, we
549 * just split and search again. It'll get split again
550 * the next time though.
551 *
552 * If the extent we found is inside our range, we clear
553 * the desired bit on it.
554 */
555
556 if (state->start < start) {
8233767a
XG
557 prealloc = alloc_extent_state_atomic(prealloc);
558 BUG_ON(!prealloc);
d1310b2e 559 err = split_state(tree, state, prealloc, start);
c2d904e0
JM
560 if (err)
561 extent_io_tree_panic(tree, err);
562
d1310b2e
CM
563 prealloc = NULL;
564 if (err)
565 goto out;
566 if (state->end <= end) {
6763af84 567 clear_state_bit(tree, state, &bits, wake);
5c939df5
YZ
568 if (last_end == (u64)-1)
569 goto out;
570 start = last_end + 1;
d1310b2e
CM
571 }
572 goto search_again;
573 }
574 /*
575 * | ---- desired range ---- |
576 * | state |
577 * We need to split the extent, and clear the bit
578 * on the first half
579 */
580 if (state->start <= end && state->end > end) {
8233767a
XG
581 prealloc = alloc_extent_state_atomic(prealloc);
582 BUG_ON(!prealloc);
d1310b2e 583 err = split_state(tree, state, prealloc, end + 1);
c2d904e0
JM
584 if (err)
585 extent_io_tree_panic(tree, err);
586
d1310b2e
CM
587 if (wake)
588 wake_up(&state->wq);
42daec29 589
6763af84 590 clear_state_bit(tree, prealloc, &bits, wake);
9ed74f2d 591
d1310b2e
CM
592 prealloc = NULL;
593 goto out;
594 }
42daec29 595
6763af84 596 clear_state_bit(tree, state, &bits, wake);
0449314a 597next:
5c939df5
YZ
598 if (last_end == (u64)-1)
599 goto out;
600 start = last_end + 1;
2c64c53d
CM
601 if (start <= end && next_node) {
602 state = rb_entry(next_node, struct extent_state,
603 rb_node);
692e5759 604 goto hit_next;
2c64c53d 605 }
d1310b2e
CM
606 goto search_again;
607
608out:
cad321ad 609 spin_unlock(&tree->lock);
d1310b2e
CM
610 if (prealloc)
611 free_extent_state(prealloc);
612
6763af84 613 return 0;
d1310b2e
CM
614
615search_again:
616 if (start > end)
617 goto out;
cad321ad 618 spin_unlock(&tree->lock);
d1310b2e
CM
619 if (mask & __GFP_WAIT)
620 cond_resched();
621 goto again;
622}
d1310b2e 623
143bede5
JM
624static void wait_on_state(struct extent_io_tree *tree,
625 struct extent_state *state)
641f5219
CH
626 __releases(tree->lock)
627 __acquires(tree->lock)
d1310b2e
CM
628{
629 DEFINE_WAIT(wait);
630 prepare_to_wait(&state->wq, &wait, TASK_UNINTERRUPTIBLE);
cad321ad 631 spin_unlock(&tree->lock);
d1310b2e 632 schedule();
cad321ad 633 spin_lock(&tree->lock);
d1310b2e 634 finish_wait(&state->wq, &wait);
d1310b2e
CM
635}
636
637/*
638 * waits for one or more bits to clear on a range in the state tree.
639 * The range [start, end] is inclusive.
640 * The tree lock is taken by this function
641 */
143bede5 642void wait_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, int bits)
d1310b2e
CM
643{
644 struct extent_state *state;
645 struct rb_node *node;
646
cad321ad 647 spin_lock(&tree->lock);
d1310b2e
CM
648again:
649 while (1) {
650 /*
651 * this search will find all the extents that end after
652 * our range starts
653 */
80ea96b1 654 node = tree_search(tree, start);
d1310b2e
CM
655 if (!node)
656 break;
657
658 state = rb_entry(node, struct extent_state, rb_node);
659
660 if (state->start > end)
661 goto out;
662
663 if (state->state & bits) {
664 start = state->start;
665 atomic_inc(&state->refs);
666 wait_on_state(tree, state);
667 free_extent_state(state);
668 goto again;
669 }
670 start = state->end + 1;
671
672 if (start > end)
673 break;
674
ded91f08 675 cond_resched_lock(&tree->lock);
d1310b2e
CM
676 }
677out:
cad321ad 678 spin_unlock(&tree->lock);
d1310b2e 679}
d1310b2e 680
1bf85046 681static void set_state_bits(struct extent_io_tree *tree,
d1310b2e 682 struct extent_state *state,
0ca1f7ce 683 int *bits)
d1310b2e 684{
0ca1f7ce 685 int bits_to_set = *bits & ~EXTENT_CTLBITS;
9ed74f2d 686
1bf85046 687 set_state_cb(tree, state, bits);
0ca1f7ce 688 if ((bits_to_set & EXTENT_DIRTY) && !(state->state & EXTENT_DIRTY)) {
d1310b2e
CM
689 u64 range = state->end - state->start + 1;
690 tree->dirty_bytes += range;
691 }
0ca1f7ce 692 state->state |= bits_to_set;
d1310b2e
CM
693}
694
2c64c53d
CM
695static void cache_state(struct extent_state *state,
696 struct extent_state **cached_ptr)
697{
698 if (cached_ptr && !(*cached_ptr)) {
699 if (state->state & (EXTENT_IOBITS | EXTENT_BOUNDARY)) {
700 *cached_ptr = state;
701 atomic_inc(&state->refs);
702 }
703 }
704}
705
507903b8
AJ
706static void uncache_state(struct extent_state **cached_ptr)
707{
708 if (cached_ptr && (*cached_ptr)) {
709 struct extent_state *state = *cached_ptr;
109b36a2
CM
710 *cached_ptr = NULL;
711 free_extent_state(state);
507903b8
AJ
712 }
713}
714
d1310b2e 715/*
1edbb734
CM
716 * set some bits on a range in the tree. This may require allocations or
717 * sleeping, so the gfp mask is used to indicate what is allowed.
d1310b2e 718 *
1edbb734
CM
719 * If any of the exclusive bits are set, this will fail with -EEXIST if some
720 * part of the range already has the desired bits set. The start of the
721 * existing range is returned in failed_start in this case.
d1310b2e 722 *
1edbb734 723 * [start, end] is inclusive This takes the tree lock.
d1310b2e 724 */
1edbb734 725
3fbe5c02
JM
726static int __must_check
727__set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
728 int bits, int exclusive_bits, u64 *failed_start,
729 struct extent_state **cached_state, gfp_t mask)
d1310b2e
CM
730{
731 struct extent_state *state;
732 struct extent_state *prealloc = NULL;
733 struct rb_node *node;
d1310b2e 734 int err = 0;
d1310b2e
CM
735 u64 last_start;
736 u64 last_end;
42daec29 737
0ca1f7ce 738 bits |= EXTENT_FIRST_DELALLOC;
d1310b2e
CM
739again:
740 if (!prealloc && (mask & __GFP_WAIT)) {
741 prealloc = alloc_extent_state(mask);
8233767a 742 BUG_ON(!prealloc);
d1310b2e
CM
743 }
744
cad321ad 745 spin_lock(&tree->lock);
9655d298
CM
746 if (cached_state && *cached_state) {
747 state = *cached_state;
df98b6e2
JB
748 if (state->start <= start && state->end > start &&
749 state->tree) {
9655d298
CM
750 node = &state->rb_node;
751 goto hit_next;
752 }
753 }
d1310b2e
CM
754 /*
755 * this search will find all the extents that end after
756 * our range starts.
757 */
80ea96b1 758 node = tree_search(tree, start);
d1310b2e 759 if (!node) {
8233767a
XG
760 prealloc = alloc_extent_state_atomic(prealloc);
761 BUG_ON(!prealloc);
0ca1f7ce 762 err = insert_state(tree, prealloc, start, end, &bits);
c2d904e0
JM
763 if (err)
764 extent_io_tree_panic(tree, err);
765
d1310b2e 766 prealloc = NULL;
d1310b2e
CM
767 goto out;
768 }
d1310b2e 769 state = rb_entry(node, struct extent_state, rb_node);
40431d6c 770hit_next:
d1310b2e
CM
771 last_start = state->start;
772 last_end = state->end;
773
774 /*
775 * | ---- desired range ---- |
776 * | state |
777 *
778 * Just lock what we found and keep going
779 */
780 if (state->start == start && state->end <= end) {
40431d6c 781 struct rb_node *next_node;
1edbb734 782 if (state->state & exclusive_bits) {
d1310b2e
CM
783 *failed_start = state->start;
784 err = -EEXIST;
785 goto out;
786 }
42daec29 787
1bf85046 788 set_state_bits(tree, state, &bits);
9ed74f2d 789
2c64c53d 790 cache_state(state, cached_state);
d1310b2e 791 merge_state(tree, state);
5c939df5
YZ
792 if (last_end == (u64)-1)
793 goto out;
40431d6c 794
5c939df5 795 start = last_end + 1;
df98b6e2 796 next_node = rb_next(&state->rb_node);
c7f895a2
XG
797 if (next_node && start < end && prealloc && !need_resched()) {
798 state = rb_entry(next_node, struct extent_state,
799 rb_node);
800 if (state->start == start)
801 goto hit_next;
40431d6c 802 }
d1310b2e
CM
803 goto search_again;
804 }
805
806 /*
807 * | ---- desired range ---- |
808 * | state |
809 * or
810 * | ------------- state -------------- |
811 *
812 * We need to split the extent we found, and may flip bits on
813 * second half.
814 *
815 * If the extent we found extends past our
816 * range, we just split and search again. It'll get split
817 * again the next time though.
818 *
819 * If the extent we found is inside our range, we set the
820 * desired bit on it.
821 */
822 if (state->start < start) {
1edbb734 823 if (state->state & exclusive_bits) {
d1310b2e
CM
824 *failed_start = start;
825 err = -EEXIST;
826 goto out;
827 }
8233767a
XG
828
829 prealloc = alloc_extent_state_atomic(prealloc);
830 BUG_ON(!prealloc);
d1310b2e 831 err = split_state(tree, state, prealloc, start);
c2d904e0
JM
832 if (err)
833 extent_io_tree_panic(tree, err);
834
d1310b2e
CM
835 prealloc = NULL;
836 if (err)
837 goto out;
838 if (state->end <= end) {
1bf85046 839 set_state_bits(tree, state, &bits);
2c64c53d 840 cache_state(state, cached_state);
d1310b2e 841 merge_state(tree, state);
5c939df5
YZ
842 if (last_end == (u64)-1)
843 goto out;
844 start = last_end + 1;
d1310b2e
CM
845 }
846 goto search_again;
847 }
848 /*
849 * | ---- desired range ---- |
850 * | state | or | state |
851 *
852 * There's a hole, we need to insert something in it and
853 * ignore the extent we found.
854 */
855 if (state->start > start) {
856 u64 this_end;
857 if (end < last_start)
858 this_end = end;
859 else
d397712b 860 this_end = last_start - 1;
8233767a
XG
861
862 prealloc = alloc_extent_state_atomic(prealloc);
863 BUG_ON(!prealloc);
c7f895a2
XG
864
865 /*
866 * Avoid to free 'prealloc' if it can be merged with
867 * the later extent.
868 */
d1310b2e 869 err = insert_state(tree, prealloc, start, this_end,
0ca1f7ce 870 &bits);
c2d904e0
JM
871 if (err)
872 extent_io_tree_panic(tree, err);
873
9ed74f2d
JB
874 cache_state(prealloc, cached_state);
875 prealloc = NULL;
d1310b2e
CM
876 start = this_end + 1;
877 goto search_again;
878 }
879 /*
880 * | ---- desired range ---- |
881 * | state |
882 * We need to split the extent, and set the bit
883 * on the first half
884 */
885 if (state->start <= end && state->end > end) {
1edbb734 886 if (state->state & exclusive_bits) {
d1310b2e
CM
887 *failed_start = start;
888 err = -EEXIST;
889 goto out;
890 }
8233767a
XG
891
892 prealloc = alloc_extent_state_atomic(prealloc);
893 BUG_ON(!prealloc);
d1310b2e 894 err = split_state(tree, state, prealloc, end + 1);
c2d904e0
JM
895 if (err)
896 extent_io_tree_panic(tree, err);
d1310b2e 897
1bf85046 898 set_state_bits(tree, prealloc, &bits);
2c64c53d 899 cache_state(prealloc, cached_state);
d1310b2e
CM
900 merge_state(tree, prealloc);
901 prealloc = NULL;
902 goto out;
903 }
904
905 goto search_again;
906
907out:
cad321ad 908 spin_unlock(&tree->lock);
d1310b2e
CM
909 if (prealloc)
910 free_extent_state(prealloc);
911
912 return err;
913
914search_again:
915 if (start > end)
916 goto out;
cad321ad 917 spin_unlock(&tree->lock);
d1310b2e
CM
918 if (mask & __GFP_WAIT)
919 cond_resched();
920 goto again;
921}
d1310b2e 922
3fbe5c02
JM
923int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, int bits,
924 u64 *failed_start, struct extent_state **cached_state,
925 gfp_t mask)
926{
927 return __set_extent_bit(tree, start, end, bits, 0, failed_start,
928 cached_state, mask);
929}
930
931
462d6fac
JB
932/**
933 * convert_extent - convert all bits in a given range from one bit to another
934 * @tree: the io tree to search
935 * @start: the start offset in bytes
936 * @end: the end offset in bytes (inclusive)
937 * @bits: the bits to set in this range
938 * @clear_bits: the bits to clear in this range
939 * @mask: the allocation mask
940 *
941 * This will go through and set bits for the given range. If any states exist
942 * already in this range they are set with the given bit and cleared of the
943 * clear_bits. This is only meant to be used by things that are mergeable, ie
944 * converting from say DELALLOC to DIRTY. This is not meant to be used with
945 * boundary bits like LOCK.
946 */
947int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
948 int bits, int clear_bits, gfp_t mask)
949{
950 struct extent_state *state;
951 struct extent_state *prealloc = NULL;
952 struct rb_node *node;
953 int err = 0;
954 u64 last_start;
955 u64 last_end;
956
957again:
958 if (!prealloc && (mask & __GFP_WAIT)) {
959 prealloc = alloc_extent_state(mask);
960 if (!prealloc)
961 return -ENOMEM;
962 }
963
964 spin_lock(&tree->lock);
965 /*
966 * this search will find all the extents that end after
967 * our range starts.
968 */
969 node = tree_search(tree, start);
970 if (!node) {
971 prealloc = alloc_extent_state_atomic(prealloc);
1cf4ffdb
LB
972 if (!prealloc) {
973 err = -ENOMEM;
974 goto out;
975 }
462d6fac
JB
976 err = insert_state(tree, prealloc, start, end, &bits);
977 prealloc = NULL;
c2d904e0
JM
978 if (err)
979 extent_io_tree_panic(tree, err);
462d6fac
JB
980 goto out;
981 }
982 state = rb_entry(node, struct extent_state, rb_node);
983hit_next:
984 last_start = state->start;
985 last_end = state->end;
986
987 /*
988 * | ---- desired range ---- |
989 * | state |
990 *
991 * Just lock what we found and keep going
992 */
993 if (state->start == start && state->end <= end) {
994 struct rb_node *next_node;
995
996 set_state_bits(tree, state, &bits);
997 clear_state_bit(tree, state, &clear_bits, 0);
462d6fac
JB
998 if (last_end == (u64)-1)
999 goto out;
1000
1001 start = last_end + 1;
1002 next_node = rb_next(&state->rb_node);
1003 if (next_node && start < end && prealloc && !need_resched()) {
1004 state = rb_entry(next_node, struct extent_state,
1005 rb_node);
1006 if (state->start == start)
1007 goto hit_next;
1008 }
1009 goto search_again;
1010 }
1011
1012 /*
1013 * | ---- desired range ---- |
1014 * | state |
1015 * or
1016 * | ------------- state -------------- |
1017 *
1018 * We need to split the extent we found, and may flip bits on
1019 * second half.
1020 *
1021 * If the extent we found extends past our
1022 * range, we just split and search again. It'll get split
1023 * again the next time though.
1024 *
1025 * If the extent we found is inside our range, we set the
1026 * desired bit on it.
1027 */
1028 if (state->start < start) {
1029 prealloc = alloc_extent_state_atomic(prealloc);
1cf4ffdb
LB
1030 if (!prealloc) {
1031 err = -ENOMEM;
1032 goto out;
1033 }
462d6fac 1034 err = split_state(tree, state, prealloc, start);
c2d904e0
JM
1035 if (err)
1036 extent_io_tree_panic(tree, err);
462d6fac
JB
1037 prealloc = NULL;
1038 if (err)
1039 goto out;
1040 if (state->end <= end) {
1041 set_state_bits(tree, state, &bits);
1042 clear_state_bit(tree, state, &clear_bits, 0);
462d6fac
JB
1043 if (last_end == (u64)-1)
1044 goto out;
1045 start = last_end + 1;
1046 }
1047 goto search_again;
1048 }
1049 /*
1050 * | ---- desired range ---- |
1051 * | state | or | state |
1052 *
1053 * There's a hole, we need to insert something in it and
1054 * ignore the extent we found.
1055 */
1056 if (state->start > start) {
1057 u64 this_end;
1058 if (end < last_start)
1059 this_end = end;
1060 else
1061 this_end = last_start - 1;
1062
1063 prealloc = alloc_extent_state_atomic(prealloc);
1cf4ffdb
LB
1064 if (!prealloc) {
1065 err = -ENOMEM;
1066 goto out;
1067 }
462d6fac
JB
1068
1069 /*
1070 * Avoid to free 'prealloc' if it can be merged with
1071 * the later extent.
1072 */
1073 err = insert_state(tree, prealloc, start, this_end,
1074 &bits);
c2d904e0
JM
1075 if (err)
1076 extent_io_tree_panic(tree, err);
462d6fac
JB
1077 prealloc = NULL;
1078 start = this_end + 1;
1079 goto search_again;
1080 }
1081 /*
1082 * | ---- desired range ---- |
1083 * | state |
1084 * We need to split the extent, and set the bit
1085 * on the first half
1086 */
1087 if (state->start <= end && state->end > end) {
1088 prealloc = alloc_extent_state_atomic(prealloc);
1cf4ffdb
LB
1089 if (!prealloc) {
1090 err = -ENOMEM;
1091 goto out;
1092 }
462d6fac
JB
1093
1094 err = split_state(tree, state, prealloc, end + 1);
c2d904e0
JM
1095 if (err)
1096 extent_io_tree_panic(tree, err);
462d6fac
JB
1097
1098 set_state_bits(tree, prealloc, &bits);
1099 clear_state_bit(tree, prealloc, &clear_bits, 0);
462d6fac
JB
1100 prealloc = NULL;
1101 goto out;
1102 }
1103
1104 goto search_again;
1105
1106out:
1107 spin_unlock(&tree->lock);
1108 if (prealloc)
1109 free_extent_state(prealloc);
1110
1111 return err;
1112
1113search_again:
1114 if (start > end)
1115 goto out;
1116 spin_unlock(&tree->lock);
1117 if (mask & __GFP_WAIT)
1118 cond_resched();
1119 goto again;
1120}
1121
d1310b2e
CM
1122/* wrappers around set/clear extent bit */
1123int set_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
1124 gfp_t mask)
1125{
3fbe5c02 1126 return set_extent_bit(tree, start, end, EXTENT_DIRTY, NULL,
2c64c53d 1127 NULL, mask);
d1310b2e 1128}
d1310b2e
CM
1129
1130int set_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
1131 int bits, gfp_t mask)
1132{
3fbe5c02 1133 return set_extent_bit(tree, start, end, bits, NULL,
2c64c53d 1134 NULL, mask);
d1310b2e 1135}
d1310b2e
CM
1136
1137int clear_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
1138 int bits, gfp_t mask)
1139{
2c64c53d 1140 return clear_extent_bit(tree, start, end, bits, 0, 0, NULL, mask);
d1310b2e 1141}
d1310b2e
CM
1142
1143int set_extent_delalloc(struct extent_io_tree *tree, u64 start, u64 end,
2ac55d41 1144 struct extent_state **cached_state, gfp_t mask)
d1310b2e
CM
1145{
1146 return set_extent_bit(tree, start, end,
fee187d9 1147 EXTENT_DELALLOC | EXTENT_UPTODATE,
3fbe5c02 1148 NULL, cached_state, mask);
d1310b2e 1149}
d1310b2e
CM
1150
1151int clear_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
1152 gfp_t mask)
1153{
1154 return clear_extent_bit(tree, start, end,
32c00aff 1155 EXTENT_DIRTY | EXTENT_DELALLOC |
0ca1f7ce 1156 EXTENT_DO_ACCOUNTING, 0, 0, NULL, mask);
d1310b2e 1157}
d1310b2e
CM
1158
1159int set_extent_new(struct extent_io_tree *tree, u64 start, u64 end,
1160 gfp_t mask)
1161{
3fbe5c02 1162 return set_extent_bit(tree, start, end, EXTENT_NEW, NULL,
2c64c53d 1163 NULL, mask);
d1310b2e 1164}
d1310b2e 1165
d1310b2e 1166int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end,
507903b8 1167 struct extent_state **cached_state, gfp_t mask)
d1310b2e 1168{
507903b8 1169 return set_extent_bit(tree, start, end, EXTENT_UPTODATE, 0,
3fbe5c02 1170 cached_state, mask);
d1310b2e 1171}
d1310b2e 1172
d397712b 1173static int clear_extent_uptodate(struct extent_io_tree *tree, u64 start,
2ac55d41
JB
1174 u64 end, struct extent_state **cached_state,
1175 gfp_t mask)
d1310b2e 1176{
2c64c53d 1177 return clear_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, 0,
2ac55d41 1178 cached_state, mask);
d1310b2e 1179}
d1310b2e 1180
d352ac68
CM
1181/*
1182 * either insert or lock state struct between start and end use mask to tell
1183 * us if waiting is desired.
1184 */
1edbb734 1185int lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
d0082371 1186 int bits, struct extent_state **cached_state)
d1310b2e
CM
1187{
1188 int err;
1189 u64 failed_start;
1190 while (1) {
3fbe5c02
JM
1191 err = __set_extent_bit(tree, start, end, EXTENT_LOCKED | bits,
1192 EXTENT_LOCKED, &failed_start,
1193 cached_state, GFP_NOFS);
d0082371 1194 if (err == -EEXIST) {
d1310b2e
CM
1195 wait_extent_bit(tree, failed_start, end, EXTENT_LOCKED);
1196 start = failed_start;
d0082371 1197 } else
d1310b2e 1198 break;
d1310b2e
CM
1199 WARN_ON(start > end);
1200 }
1201 return err;
1202}
d1310b2e 1203
d0082371 1204int lock_extent(struct extent_io_tree *tree, u64 start, u64 end)
1edbb734 1205{
d0082371 1206 return lock_extent_bits(tree, start, end, 0, NULL);
1edbb734
CM
1207}
1208
d0082371 1209int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end)
25179201
JB
1210{
1211 int err;
1212 u64 failed_start;
1213
3fbe5c02
JM
1214 err = __set_extent_bit(tree, start, end, EXTENT_LOCKED, EXTENT_LOCKED,
1215 &failed_start, NULL, GFP_NOFS);
6643558d
YZ
1216 if (err == -EEXIST) {
1217 if (failed_start > start)
1218 clear_extent_bit(tree, start, failed_start - 1,
d0082371 1219 EXTENT_LOCKED, 1, 0, NULL, GFP_NOFS);
25179201 1220 return 0;
6643558d 1221 }
25179201
JB
1222 return 1;
1223}
25179201 1224
2c64c53d
CM
1225int unlock_extent_cached(struct extent_io_tree *tree, u64 start, u64 end,
1226 struct extent_state **cached, gfp_t mask)
1227{
1228 return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, cached,
1229 mask);
1230}
1231
d0082371 1232int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end)
d1310b2e 1233{
2c64c53d 1234 return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, NULL,
d0082371 1235 GFP_NOFS);
d1310b2e 1236}
d1310b2e 1237
d1310b2e
CM
1238/*
1239 * helper function to set both pages and extents in the tree writeback
1240 */
b2950863 1241static int set_range_writeback(struct extent_io_tree *tree, u64 start, u64 end)
d1310b2e
CM
1242{
1243 unsigned long index = start >> PAGE_CACHE_SHIFT;
1244 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1245 struct page *page;
1246
1247 while (index <= end_index) {
1248 page = find_get_page(tree->mapping, index);
79787eaa 1249 BUG_ON(!page); /* Pages should be in the extent_io_tree */
d1310b2e
CM
1250 set_page_writeback(page);
1251 page_cache_release(page);
1252 index++;
1253 }
d1310b2e
CM
1254 return 0;
1255}
d1310b2e 1256
d352ac68
CM
1257/* find the first state struct with 'bits' set after 'start', and
1258 * return it. tree->lock must be held. NULL will returned if
1259 * nothing was found after 'start'
1260 */
d7fc640e
CM
1261struct extent_state *find_first_extent_bit_state(struct extent_io_tree *tree,
1262 u64 start, int bits)
1263{
1264 struct rb_node *node;
1265 struct extent_state *state;
1266
1267 /*
1268 * this search will find all the extents that end after
1269 * our range starts.
1270 */
1271 node = tree_search(tree, start);
d397712b 1272 if (!node)
d7fc640e 1273 goto out;
d7fc640e 1274
d397712b 1275 while (1) {
d7fc640e 1276 state = rb_entry(node, struct extent_state, rb_node);
d397712b 1277 if (state->end >= start && (state->state & bits))
d7fc640e 1278 return state;
d397712b 1279
d7fc640e
CM
1280 node = rb_next(node);
1281 if (!node)
1282 break;
1283 }
1284out:
1285 return NULL;
1286}
d7fc640e 1287
69261c4b
XG
1288/*
1289 * find the first offset in the io tree with 'bits' set. zero is
1290 * returned if we find something, and *start_ret and *end_ret are
1291 * set to reflect the state struct that was found.
1292 *
1293 * If nothing was found, 1 is returned, < 0 on error
1294 */
1295int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
1296 u64 *start_ret, u64 *end_ret, int bits)
1297{
1298 struct extent_state *state;
1299 int ret = 1;
1300
1301 spin_lock(&tree->lock);
1302 state = find_first_extent_bit_state(tree, start, bits);
1303 if (state) {
1304 *start_ret = state->start;
1305 *end_ret = state->end;
1306 ret = 0;
1307 }
1308 spin_unlock(&tree->lock);
1309 return ret;
1310}
1311
d352ac68
CM
1312/*
1313 * find a contiguous range of bytes in the file marked as delalloc, not
1314 * more than 'max_bytes'. start and end are used to return the range,
1315 *
1316 * 1 is returned if we find something, 0 if nothing was in the tree
1317 */
c8b97818 1318static noinline u64 find_delalloc_range(struct extent_io_tree *tree,
c2a128d2
JB
1319 u64 *start, u64 *end, u64 max_bytes,
1320 struct extent_state **cached_state)
d1310b2e
CM
1321{
1322 struct rb_node *node;
1323 struct extent_state *state;
1324 u64 cur_start = *start;
1325 u64 found = 0;
1326 u64 total_bytes = 0;
1327
cad321ad 1328 spin_lock(&tree->lock);
c8b97818 1329
d1310b2e
CM
1330 /*
1331 * this search will find all the extents that end after
1332 * our range starts.
1333 */
80ea96b1 1334 node = tree_search(tree, cur_start);
2b114d1d 1335 if (!node) {
3b951516
CM
1336 if (!found)
1337 *end = (u64)-1;
d1310b2e
CM
1338 goto out;
1339 }
1340
d397712b 1341 while (1) {
d1310b2e 1342 state = rb_entry(node, struct extent_state, rb_node);
5b21f2ed
ZY
1343 if (found && (state->start != cur_start ||
1344 (state->state & EXTENT_BOUNDARY))) {
d1310b2e
CM
1345 goto out;
1346 }
1347 if (!(state->state & EXTENT_DELALLOC)) {
1348 if (!found)
1349 *end = state->end;
1350 goto out;
1351 }
c2a128d2 1352 if (!found) {
d1310b2e 1353 *start = state->start;
c2a128d2
JB
1354 *cached_state = state;
1355 atomic_inc(&state->refs);
1356 }
d1310b2e
CM
1357 found++;
1358 *end = state->end;
1359 cur_start = state->end + 1;
1360 node = rb_next(node);
1361 if (!node)
1362 break;
1363 total_bytes += state->end - state->start + 1;
1364 if (total_bytes >= max_bytes)
1365 break;
1366 }
1367out:
cad321ad 1368 spin_unlock(&tree->lock);
d1310b2e
CM
1369 return found;
1370}
1371
143bede5
JM
1372static noinline void __unlock_for_delalloc(struct inode *inode,
1373 struct page *locked_page,
1374 u64 start, u64 end)
c8b97818
CM
1375{
1376 int ret;
1377 struct page *pages[16];
1378 unsigned long index = start >> PAGE_CACHE_SHIFT;
1379 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1380 unsigned long nr_pages = end_index - index + 1;
1381 int i;
1382
1383 if (index == locked_page->index && end_index == index)
143bede5 1384 return;
c8b97818 1385
d397712b 1386 while (nr_pages > 0) {
c8b97818 1387 ret = find_get_pages_contig(inode->i_mapping, index,
5b050f04
CM
1388 min_t(unsigned long, nr_pages,
1389 ARRAY_SIZE(pages)), pages);
c8b97818
CM
1390 for (i = 0; i < ret; i++) {
1391 if (pages[i] != locked_page)
1392 unlock_page(pages[i]);
1393 page_cache_release(pages[i]);
1394 }
1395 nr_pages -= ret;
1396 index += ret;
1397 cond_resched();
1398 }
c8b97818
CM
1399}
1400
1401static noinline int lock_delalloc_pages(struct inode *inode,
1402 struct page *locked_page,
1403 u64 delalloc_start,
1404 u64 delalloc_end)
1405{
1406 unsigned long index = delalloc_start >> PAGE_CACHE_SHIFT;
1407 unsigned long start_index = index;
1408 unsigned long end_index = delalloc_end >> PAGE_CACHE_SHIFT;
1409 unsigned long pages_locked = 0;
1410 struct page *pages[16];
1411 unsigned long nrpages;
1412 int ret;
1413 int i;
1414
1415 /* the caller is responsible for locking the start index */
1416 if (index == locked_page->index && index == end_index)
1417 return 0;
1418
1419 /* skip the page at the start index */
1420 nrpages = end_index - index + 1;
d397712b 1421 while (nrpages > 0) {
c8b97818 1422 ret = find_get_pages_contig(inode->i_mapping, index,
5b050f04
CM
1423 min_t(unsigned long,
1424 nrpages, ARRAY_SIZE(pages)), pages);
c8b97818
CM
1425 if (ret == 0) {
1426 ret = -EAGAIN;
1427 goto done;
1428 }
1429 /* now we have an array of pages, lock them all */
1430 for (i = 0; i < ret; i++) {
1431 /*
1432 * the caller is taking responsibility for
1433 * locked_page
1434 */
771ed689 1435 if (pages[i] != locked_page) {
c8b97818 1436 lock_page(pages[i]);
f2b1c41c
CM
1437 if (!PageDirty(pages[i]) ||
1438 pages[i]->mapping != inode->i_mapping) {
771ed689
CM
1439 ret = -EAGAIN;
1440 unlock_page(pages[i]);
1441 page_cache_release(pages[i]);
1442 goto done;
1443 }
1444 }
c8b97818 1445 page_cache_release(pages[i]);
771ed689 1446 pages_locked++;
c8b97818 1447 }
c8b97818
CM
1448 nrpages -= ret;
1449 index += ret;
1450 cond_resched();
1451 }
1452 ret = 0;
1453done:
1454 if (ret && pages_locked) {
1455 __unlock_for_delalloc(inode, locked_page,
1456 delalloc_start,
1457 ((u64)(start_index + pages_locked - 1)) <<
1458 PAGE_CACHE_SHIFT);
1459 }
1460 return ret;
1461}
1462
1463/*
1464 * find a contiguous range of bytes in the file marked as delalloc, not
1465 * more than 'max_bytes'. start and end are used to return the range,
1466 *
1467 * 1 is returned if we find something, 0 if nothing was in the tree
1468 */
1469static noinline u64 find_lock_delalloc_range(struct inode *inode,
1470 struct extent_io_tree *tree,
1471 struct page *locked_page,
1472 u64 *start, u64 *end,
1473 u64 max_bytes)
1474{
1475 u64 delalloc_start;
1476 u64 delalloc_end;
1477 u64 found;
9655d298 1478 struct extent_state *cached_state = NULL;
c8b97818
CM
1479 int ret;
1480 int loops = 0;
1481
1482again:
1483 /* step one, find a bunch of delalloc bytes starting at start */
1484 delalloc_start = *start;
1485 delalloc_end = 0;
1486 found = find_delalloc_range(tree, &delalloc_start, &delalloc_end,
c2a128d2 1487 max_bytes, &cached_state);
70b99e69 1488 if (!found || delalloc_end <= *start) {
c8b97818
CM
1489 *start = delalloc_start;
1490 *end = delalloc_end;
c2a128d2 1491 free_extent_state(cached_state);
c8b97818
CM
1492 return found;
1493 }
1494
70b99e69
CM
1495 /*
1496 * start comes from the offset of locked_page. We have to lock
1497 * pages in order, so we can't process delalloc bytes before
1498 * locked_page
1499 */
d397712b 1500 if (delalloc_start < *start)
70b99e69 1501 delalloc_start = *start;
70b99e69 1502
c8b97818
CM
1503 /*
1504 * make sure to limit the number of pages we try to lock down
1505 * if we're looping.
1506 */
d397712b 1507 if (delalloc_end + 1 - delalloc_start > max_bytes && loops)
771ed689 1508 delalloc_end = delalloc_start + PAGE_CACHE_SIZE - 1;
d397712b 1509
c8b97818
CM
1510 /* step two, lock all the pages after the page that has start */
1511 ret = lock_delalloc_pages(inode, locked_page,
1512 delalloc_start, delalloc_end);
1513 if (ret == -EAGAIN) {
1514 /* some of the pages are gone, lets avoid looping by
1515 * shortening the size of the delalloc range we're searching
1516 */
9655d298 1517 free_extent_state(cached_state);
c8b97818
CM
1518 if (!loops) {
1519 unsigned long offset = (*start) & (PAGE_CACHE_SIZE - 1);
1520 max_bytes = PAGE_CACHE_SIZE - offset;
1521 loops = 1;
1522 goto again;
1523 } else {
1524 found = 0;
1525 goto out_failed;
1526 }
1527 }
79787eaa 1528 BUG_ON(ret); /* Only valid values are 0 and -EAGAIN */
c8b97818
CM
1529
1530 /* step three, lock the state bits for the whole range */
d0082371 1531 lock_extent_bits(tree, delalloc_start, delalloc_end, 0, &cached_state);
c8b97818
CM
1532
1533 /* then test to make sure it is all still delalloc */
1534 ret = test_range_bit(tree, delalloc_start, delalloc_end,
9655d298 1535 EXTENT_DELALLOC, 1, cached_state);
c8b97818 1536 if (!ret) {
9655d298
CM
1537 unlock_extent_cached(tree, delalloc_start, delalloc_end,
1538 &cached_state, GFP_NOFS);
c8b97818
CM
1539 __unlock_for_delalloc(inode, locked_page,
1540 delalloc_start, delalloc_end);
1541 cond_resched();
1542 goto again;
1543 }
9655d298 1544 free_extent_state(cached_state);
c8b97818
CM
1545 *start = delalloc_start;
1546 *end = delalloc_end;
1547out_failed:
1548 return found;
1549}
1550
1551int extent_clear_unlock_delalloc(struct inode *inode,
1552 struct extent_io_tree *tree,
1553 u64 start, u64 end, struct page *locked_page,
a791e35e 1554 unsigned long op)
c8b97818
CM
1555{
1556 int ret;
1557 struct page *pages[16];
1558 unsigned long index = start >> PAGE_CACHE_SHIFT;
1559 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1560 unsigned long nr_pages = end_index - index + 1;
1561 int i;
771ed689 1562 int clear_bits = 0;
c8b97818 1563
a791e35e 1564 if (op & EXTENT_CLEAR_UNLOCK)
771ed689 1565 clear_bits |= EXTENT_LOCKED;
a791e35e 1566 if (op & EXTENT_CLEAR_DIRTY)
c8b97818
CM
1567 clear_bits |= EXTENT_DIRTY;
1568
a791e35e 1569 if (op & EXTENT_CLEAR_DELALLOC)
771ed689
CM
1570 clear_bits |= EXTENT_DELALLOC;
1571
2c64c53d 1572 clear_extent_bit(tree, start, end, clear_bits, 1, 0, NULL, GFP_NOFS);
32c00aff
JB
1573 if (!(op & (EXTENT_CLEAR_UNLOCK_PAGE | EXTENT_CLEAR_DIRTY |
1574 EXTENT_SET_WRITEBACK | EXTENT_END_WRITEBACK |
1575 EXTENT_SET_PRIVATE2)))
771ed689 1576 return 0;
c8b97818 1577
d397712b 1578 while (nr_pages > 0) {
c8b97818 1579 ret = find_get_pages_contig(inode->i_mapping, index,
5b050f04
CM
1580 min_t(unsigned long,
1581 nr_pages, ARRAY_SIZE(pages)), pages);
c8b97818 1582 for (i = 0; i < ret; i++) {
8b62b72b 1583
a791e35e 1584 if (op & EXTENT_SET_PRIVATE2)
8b62b72b
CM
1585 SetPagePrivate2(pages[i]);
1586
c8b97818
CM
1587 if (pages[i] == locked_page) {
1588 page_cache_release(pages[i]);
1589 continue;
1590 }
a791e35e 1591 if (op & EXTENT_CLEAR_DIRTY)
c8b97818 1592 clear_page_dirty_for_io(pages[i]);
a791e35e 1593 if (op & EXTENT_SET_WRITEBACK)
c8b97818 1594 set_page_writeback(pages[i]);
a791e35e 1595 if (op & EXTENT_END_WRITEBACK)
c8b97818 1596 end_page_writeback(pages[i]);
a791e35e 1597 if (op & EXTENT_CLEAR_UNLOCK_PAGE)
771ed689 1598 unlock_page(pages[i]);
c8b97818
CM
1599 page_cache_release(pages[i]);
1600 }
1601 nr_pages -= ret;
1602 index += ret;
1603 cond_resched();
1604 }
1605 return 0;
1606}
c8b97818 1607
d352ac68
CM
1608/*
1609 * count the number of bytes in the tree that have a given bit(s)
1610 * set. This can be fairly slow, except for EXTENT_DIRTY which is
1611 * cached. The total number found is returned.
1612 */
d1310b2e
CM
1613u64 count_range_bits(struct extent_io_tree *tree,
1614 u64 *start, u64 search_end, u64 max_bytes,
ec29ed5b 1615 unsigned long bits, int contig)
d1310b2e
CM
1616{
1617 struct rb_node *node;
1618 struct extent_state *state;
1619 u64 cur_start = *start;
1620 u64 total_bytes = 0;
ec29ed5b 1621 u64 last = 0;
d1310b2e
CM
1622 int found = 0;
1623
1624 if (search_end <= cur_start) {
d1310b2e
CM
1625 WARN_ON(1);
1626 return 0;
1627 }
1628
cad321ad 1629 spin_lock(&tree->lock);
d1310b2e
CM
1630 if (cur_start == 0 && bits == EXTENT_DIRTY) {
1631 total_bytes = tree->dirty_bytes;
1632 goto out;
1633 }
1634 /*
1635 * this search will find all the extents that end after
1636 * our range starts.
1637 */
80ea96b1 1638 node = tree_search(tree, cur_start);
d397712b 1639 if (!node)
d1310b2e 1640 goto out;
d1310b2e 1641
d397712b 1642 while (1) {
d1310b2e
CM
1643 state = rb_entry(node, struct extent_state, rb_node);
1644 if (state->start > search_end)
1645 break;
ec29ed5b
CM
1646 if (contig && found && state->start > last + 1)
1647 break;
1648 if (state->end >= cur_start && (state->state & bits) == bits) {
d1310b2e
CM
1649 total_bytes += min(search_end, state->end) + 1 -
1650 max(cur_start, state->start);
1651 if (total_bytes >= max_bytes)
1652 break;
1653 if (!found) {
af60bed2 1654 *start = max(cur_start, state->start);
d1310b2e
CM
1655 found = 1;
1656 }
ec29ed5b
CM
1657 last = state->end;
1658 } else if (contig && found) {
1659 break;
d1310b2e
CM
1660 }
1661 node = rb_next(node);
1662 if (!node)
1663 break;
1664 }
1665out:
cad321ad 1666 spin_unlock(&tree->lock);
d1310b2e
CM
1667 return total_bytes;
1668}
b2950863 1669
d352ac68
CM
1670/*
1671 * set the private field for a given byte offset in the tree. If there isn't
1672 * an extent_state there already, this does nothing.
1673 */
d1310b2e
CM
1674int set_state_private(struct extent_io_tree *tree, u64 start, u64 private)
1675{
1676 struct rb_node *node;
1677 struct extent_state *state;
1678 int ret = 0;
1679
cad321ad 1680 spin_lock(&tree->lock);
d1310b2e
CM
1681 /*
1682 * this search will find all the extents that end after
1683 * our range starts.
1684 */
80ea96b1 1685 node = tree_search(tree, start);
2b114d1d 1686 if (!node) {
d1310b2e
CM
1687 ret = -ENOENT;
1688 goto out;
1689 }
1690 state = rb_entry(node, struct extent_state, rb_node);
1691 if (state->start != start) {
1692 ret = -ENOENT;
1693 goto out;
1694 }
1695 state->private = private;
1696out:
cad321ad 1697 spin_unlock(&tree->lock);
d1310b2e
CM
1698 return ret;
1699}
1700
1701int get_state_private(struct extent_io_tree *tree, u64 start, u64 *private)
1702{
1703 struct rb_node *node;
1704 struct extent_state *state;
1705 int ret = 0;
1706
cad321ad 1707 spin_lock(&tree->lock);
d1310b2e
CM
1708 /*
1709 * this search will find all the extents that end after
1710 * our range starts.
1711 */
80ea96b1 1712 node = tree_search(tree, start);
2b114d1d 1713 if (!node) {
d1310b2e
CM
1714 ret = -ENOENT;
1715 goto out;
1716 }
1717 state = rb_entry(node, struct extent_state, rb_node);
1718 if (state->start != start) {
1719 ret = -ENOENT;
1720 goto out;
1721 }
1722 *private = state->private;
1723out:
cad321ad 1724 spin_unlock(&tree->lock);
d1310b2e
CM
1725 return ret;
1726}
1727
1728/*
1729 * searches a range in the state tree for a given mask.
70dec807 1730 * If 'filled' == 1, this returns 1 only if every extent in the tree
d1310b2e
CM
1731 * has the bits set. Otherwise, 1 is returned if any bit in the
1732 * range is found set.
1733 */
1734int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
9655d298 1735 int bits, int filled, struct extent_state *cached)
d1310b2e
CM
1736{
1737 struct extent_state *state = NULL;
1738 struct rb_node *node;
1739 int bitset = 0;
d1310b2e 1740
cad321ad 1741 spin_lock(&tree->lock);
df98b6e2
JB
1742 if (cached && cached->tree && cached->start <= start &&
1743 cached->end > start)
9655d298
CM
1744 node = &cached->rb_node;
1745 else
1746 node = tree_search(tree, start);
d1310b2e
CM
1747 while (node && start <= end) {
1748 state = rb_entry(node, struct extent_state, rb_node);
1749
1750 if (filled && state->start > start) {
1751 bitset = 0;
1752 break;
1753 }
1754
1755 if (state->start > end)
1756 break;
1757
1758 if (state->state & bits) {
1759 bitset = 1;
1760 if (!filled)
1761 break;
1762 } else if (filled) {
1763 bitset = 0;
1764 break;
1765 }
46562cec
CM
1766
1767 if (state->end == (u64)-1)
1768 break;
1769
d1310b2e
CM
1770 start = state->end + 1;
1771 if (start > end)
1772 break;
1773 node = rb_next(node);
1774 if (!node) {
1775 if (filled)
1776 bitset = 0;
1777 break;
1778 }
1779 }
cad321ad 1780 spin_unlock(&tree->lock);
d1310b2e
CM
1781 return bitset;
1782}
d1310b2e
CM
1783
1784/*
1785 * helper function to set a given page up to date if all the
1786 * extents in the tree for that page are up to date
1787 */
143bede5 1788static void check_page_uptodate(struct extent_io_tree *tree, struct page *page)
d1310b2e
CM
1789{
1790 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1791 u64 end = start + PAGE_CACHE_SIZE - 1;
9655d298 1792 if (test_range_bit(tree, start, end, EXTENT_UPTODATE, 1, NULL))
d1310b2e 1793 SetPageUptodate(page);
d1310b2e
CM
1794}
1795
1796/*
1797 * helper function to unlock a page if all the extents in the tree
1798 * for that page are unlocked
1799 */
143bede5 1800static void check_page_locked(struct extent_io_tree *tree, struct page *page)
d1310b2e
CM
1801{
1802 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1803 u64 end = start + PAGE_CACHE_SIZE - 1;
9655d298 1804 if (!test_range_bit(tree, start, end, EXTENT_LOCKED, 0, NULL))
d1310b2e 1805 unlock_page(page);
d1310b2e
CM
1806}
1807
1808/*
1809 * helper function to end page writeback if all the extents
1810 * in the tree for that page are done with writeback
1811 */
143bede5
JM
1812static void check_page_writeback(struct extent_io_tree *tree,
1813 struct page *page)
d1310b2e 1814{
1edbb734 1815 end_page_writeback(page);
d1310b2e
CM
1816}
1817
4a54c8c1
JS
1818/*
1819 * When IO fails, either with EIO or csum verification fails, we
1820 * try other mirrors that might have a good copy of the data. This
1821 * io_failure_record is used to record state as we go through all the
1822 * mirrors. If another mirror has good data, the page is set up to date
1823 * and things continue. If a good mirror can't be found, the original
1824 * bio end_io callback is called to indicate things have failed.
1825 */
1826struct io_failure_record {
1827 struct page *page;
1828 u64 start;
1829 u64 len;
1830 u64 logical;
1831 unsigned long bio_flags;
1832 int this_mirror;
1833 int failed_mirror;
1834 int in_validation;
1835};
1836
1837static int free_io_failure(struct inode *inode, struct io_failure_record *rec,
1838 int did_repair)
1839{
1840 int ret;
1841 int err = 0;
1842 struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree;
1843
1844 set_state_private(failure_tree, rec->start, 0);
1845 ret = clear_extent_bits(failure_tree, rec->start,
1846 rec->start + rec->len - 1,
1847 EXTENT_LOCKED | EXTENT_DIRTY, GFP_NOFS);
1848 if (ret)
1849 err = ret;
1850
1851 if (did_repair) {
1852 ret = clear_extent_bits(&BTRFS_I(inode)->io_tree, rec->start,
1853 rec->start + rec->len - 1,
1854 EXTENT_DAMAGED, GFP_NOFS);
1855 if (ret && !err)
1856 err = ret;
1857 }
1858
1859 kfree(rec);
1860 return err;
1861}
1862
1863static void repair_io_failure_callback(struct bio *bio, int err)
1864{
1865 complete(bio->bi_private);
1866}
1867
1868/*
1869 * this bypasses the standard btrfs submit functions deliberately, as
1870 * the standard behavior is to write all copies in a raid setup. here we only
1871 * want to write the one bad copy. so we do the mapping for ourselves and issue
1872 * submit_bio directly.
1873 * to avoid any synchonization issues, wait for the data after writing, which
1874 * actually prevents the read that triggered the error from finishing.
1875 * currently, there can be no more than two copies of every data bit. thus,
1876 * exactly one rewrite is required.
1877 */
1878int repair_io_failure(struct btrfs_mapping_tree *map_tree, u64 start,
1879 u64 length, u64 logical, struct page *page,
1880 int mirror_num)
1881{
1882 struct bio *bio;
1883 struct btrfs_device *dev;
1884 DECLARE_COMPLETION_ONSTACK(compl);
1885 u64 map_length = 0;
1886 u64 sector;
1887 struct btrfs_bio *bbio = NULL;
1888 int ret;
1889
1890 BUG_ON(!mirror_num);
1891
1892 bio = bio_alloc(GFP_NOFS, 1);
1893 if (!bio)
1894 return -EIO;
1895 bio->bi_private = &compl;
1896 bio->bi_end_io = repair_io_failure_callback;
1897 bio->bi_size = 0;
1898 map_length = length;
1899
1900 ret = btrfs_map_block(map_tree, WRITE, logical,
1901 &map_length, &bbio, mirror_num);
1902 if (ret) {
1903 bio_put(bio);
1904 return -EIO;
1905 }
1906 BUG_ON(mirror_num != bbio->mirror_num);
1907 sector = bbio->stripes[mirror_num-1].physical >> 9;
1908 bio->bi_sector = sector;
1909 dev = bbio->stripes[mirror_num-1].dev;
1910 kfree(bbio);
1911 if (!dev || !dev->bdev || !dev->writeable) {
1912 bio_put(bio);
1913 return -EIO;
1914 }
1915 bio->bi_bdev = dev->bdev;
1916 bio_add_page(bio, page, length, start-page_offset(page));
21adbd5c 1917 btrfsic_submit_bio(WRITE_SYNC, bio);
4a54c8c1
JS
1918 wait_for_completion(&compl);
1919
1920 if (!test_bit(BIO_UPTODATE, &bio->bi_flags)) {
1921 /* try to remap that extent elsewhere? */
1922 bio_put(bio);
1923 return -EIO;
1924 }
1925
1926 printk(KERN_INFO "btrfs read error corrected: ino %lu off %llu (dev %s "
1927 "sector %llu)\n", page->mapping->host->i_ino, start,
1928 dev->name, sector);
1929
1930 bio_put(bio);
1931 return 0;
1932}
1933
ea466794
JB
1934int repair_eb_io_failure(struct btrfs_root *root, struct extent_buffer *eb,
1935 int mirror_num)
1936{
1937 struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
1938 u64 start = eb->start;
1939 unsigned long i, num_pages = num_extent_pages(eb->start, eb->len);
d95603b2 1940 int ret = 0;
ea466794
JB
1941
1942 for (i = 0; i < num_pages; i++) {
1943 struct page *p = extent_buffer_page(eb, i);
1944 ret = repair_io_failure(map_tree, start, PAGE_CACHE_SIZE,
1945 start, p, mirror_num);
1946 if (ret)
1947 break;
1948 start += PAGE_CACHE_SIZE;
1949 }
1950
1951 return ret;
1952}
1953
4a54c8c1
JS
1954/*
1955 * each time an IO finishes, we do a fast check in the IO failure tree
1956 * to see if we need to process or clean up an io_failure_record
1957 */
1958static int clean_io_failure(u64 start, struct page *page)
1959{
1960 u64 private;
1961 u64 private_failure;
1962 struct io_failure_record *failrec;
1963 struct btrfs_mapping_tree *map_tree;
1964 struct extent_state *state;
1965 int num_copies;
1966 int did_repair = 0;
1967 int ret;
1968 struct inode *inode = page->mapping->host;
1969
1970 private = 0;
1971 ret = count_range_bits(&BTRFS_I(inode)->io_failure_tree, &private,
1972 (u64)-1, 1, EXTENT_DIRTY, 0);
1973 if (!ret)
1974 return 0;
1975
1976 ret = get_state_private(&BTRFS_I(inode)->io_failure_tree, start,
1977 &private_failure);
1978 if (ret)
1979 return 0;
1980
1981 failrec = (struct io_failure_record *)(unsigned long) private_failure;
1982 BUG_ON(!failrec->this_mirror);
1983
1984 if (failrec->in_validation) {
1985 /* there was no real error, just free the record */
1986 pr_debug("clean_io_failure: freeing dummy error at %llu\n",
1987 failrec->start);
1988 did_repair = 1;
1989 goto out;
1990 }
1991
1992 spin_lock(&BTRFS_I(inode)->io_tree.lock);
1993 state = find_first_extent_bit_state(&BTRFS_I(inode)->io_tree,
1994 failrec->start,
1995 EXTENT_LOCKED);
1996 spin_unlock(&BTRFS_I(inode)->io_tree.lock);
1997
1998 if (state && state->start == failrec->start) {
1999 map_tree = &BTRFS_I(inode)->root->fs_info->mapping_tree;
2000 num_copies = btrfs_num_copies(map_tree, failrec->logical,
2001 failrec->len);
2002 if (num_copies > 1) {
2003 ret = repair_io_failure(map_tree, start, failrec->len,
2004 failrec->logical, page,
2005 failrec->failed_mirror);
2006 did_repair = !ret;
2007 }
2008 }
2009
2010out:
2011 if (!ret)
2012 ret = free_io_failure(inode, failrec, did_repair);
2013
2014 return ret;
2015}
2016
2017/*
2018 * this is a generic handler for readpage errors (default
2019 * readpage_io_failed_hook). if other copies exist, read those and write back
2020 * good data to the failed position. does not investigate in remapping the
2021 * failed extent elsewhere, hoping the device will be smart enough to do this as
2022 * needed
2023 */
2024
2025static int bio_readpage_error(struct bio *failed_bio, struct page *page,
2026 u64 start, u64 end, int failed_mirror,
2027 struct extent_state *state)
2028{
2029 struct io_failure_record *failrec = NULL;
2030 u64 private;
2031 struct extent_map *em;
2032 struct inode *inode = page->mapping->host;
2033 struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree;
2034 struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
2035 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
2036 struct bio *bio;
2037 int num_copies;
2038 int ret;
2039 int read_mode;
2040 u64 logical;
2041
2042 BUG_ON(failed_bio->bi_rw & REQ_WRITE);
2043
2044 ret = get_state_private(failure_tree, start, &private);
2045 if (ret) {
2046 failrec = kzalloc(sizeof(*failrec), GFP_NOFS);
2047 if (!failrec)
2048 return -ENOMEM;
2049 failrec->start = start;
2050 failrec->len = end - start + 1;
2051 failrec->this_mirror = 0;
2052 failrec->bio_flags = 0;
2053 failrec->in_validation = 0;
2054
2055 read_lock(&em_tree->lock);
2056 em = lookup_extent_mapping(em_tree, start, failrec->len);
2057 if (!em) {
2058 read_unlock(&em_tree->lock);
2059 kfree(failrec);
2060 return -EIO;
2061 }
2062
2063 if (em->start > start || em->start + em->len < start) {
2064 free_extent_map(em);
2065 em = NULL;
2066 }
2067 read_unlock(&em_tree->lock);
2068
2069 if (!em || IS_ERR(em)) {
2070 kfree(failrec);
2071 return -EIO;
2072 }
2073 logical = start - em->start;
2074 logical = em->block_start + logical;
2075 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
2076 logical = em->block_start;
2077 failrec->bio_flags = EXTENT_BIO_COMPRESSED;
2078 extent_set_compress_type(&failrec->bio_flags,
2079 em->compress_type);
2080 }
2081 pr_debug("bio_readpage_error: (new) logical=%llu, start=%llu, "
2082 "len=%llu\n", logical, start, failrec->len);
2083 failrec->logical = logical;
2084 free_extent_map(em);
2085
2086 /* set the bits in the private failure tree */
2087 ret = set_extent_bits(failure_tree, start, end,
2088 EXTENT_LOCKED | EXTENT_DIRTY, GFP_NOFS);
2089 if (ret >= 0)
2090 ret = set_state_private(failure_tree, start,
2091 (u64)(unsigned long)failrec);
2092 /* set the bits in the inode's tree */
2093 if (ret >= 0)
2094 ret = set_extent_bits(tree, start, end, EXTENT_DAMAGED,
2095 GFP_NOFS);
2096 if (ret < 0) {
2097 kfree(failrec);
2098 return ret;
2099 }
2100 } else {
2101 failrec = (struct io_failure_record *)(unsigned long)private;
2102 pr_debug("bio_readpage_error: (found) logical=%llu, "
2103 "start=%llu, len=%llu, validation=%d\n",
2104 failrec->logical, failrec->start, failrec->len,
2105 failrec->in_validation);
2106 /*
2107 * when data can be on disk more than twice, add to failrec here
2108 * (e.g. with a list for failed_mirror) to make
2109 * clean_io_failure() clean all those errors at once.
2110 */
2111 }
2112 num_copies = btrfs_num_copies(
2113 &BTRFS_I(inode)->root->fs_info->mapping_tree,
2114 failrec->logical, failrec->len);
2115 if (num_copies == 1) {
2116 /*
2117 * we only have a single copy of the data, so don't bother with
2118 * all the retry and error correction code that follows. no
2119 * matter what the error is, it is very likely to persist.
2120 */
2121 pr_debug("bio_readpage_error: cannot repair, num_copies == 1. "
2122 "state=%p, num_copies=%d, next_mirror %d, "
2123 "failed_mirror %d\n", state, num_copies,
2124 failrec->this_mirror, failed_mirror);
2125 free_io_failure(inode, failrec, 0);
2126 return -EIO;
2127 }
2128
2129 if (!state) {
2130 spin_lock(&tree->lock);
2131 state = find_first_extent_bit_state(tree, failrec->start,
2132 EXTENT_LOCKED);
2133 if (state && state->start != failrec->start)
2134 state = NULL;
2135 spin_unlock(&tree->lock);
2136 }
2137
2138 /*
2139 * there are two premises:
2140 * a) deliver good data to the caller
2141 * b) correct the bad sectors on disk
2142 */
2143 if (failed_bio->bi_vcnt > 1) {
2144 /*
2145 * to fulfill b), we need to know the exact failing sectors, as
2146 * we don't want to rewrite any more than the failed ones. thus,
2147 * we need separate read requests for the failed bio
2148 *
2149 * if the following BUG_ON triggers, our validation request got
2150 * merged. we need separate requests for our algorithm to work.
2151 */
2152 BUG_ON(failrec->in_validation);
2153 failrec->in_validation = 1;
2154 failrec->this_mirror = failed_mirror;
2155 read_mode = READ_SYNC | REQ_FAILFAST_DEV;
2156 } else {
2157 /*
2158 * we're ready to fulfill a) and b) alongside. get a good copy
2159 * of the failed sector and if we succeed, we have setup
2160 * everything for repair_io_failure to do the rest for us.
2161 */
2162 if (failrec->in_validation) {
2163 BUG_ON(failrec->this_mirror != failed_mirror);
2164 failrec->in_validation = 0;
2165 failrec->this_mirror = 0;
2166 }
2167 failrec->failed_mirror = failed_mirror;
2168 failrec->this_mirror++;
2169 if (failrec->this_mirror == failed_mirror)
2170 failrec->this_mirror++;
2171 read_mode = READ_SYNC;
2172 }
2173
2174 if (!state || failrec->this_mirror > num_copies) {
2175 pr_debug("bio_readpage_error: (fail) state=%p, num_copies=%d, "
2176 "next_mirror %d, failed_mirror %d\n", state,
2177 num_copies, failrec->this_mirror, failed_mirror);
2178 free_io_failure(inode, failrec, 0);
2179 return -EIO;
2180 }
2181
2182 bio = bio_alloc(GFP_NOFS, 1);
e627ee7b
TI
2183 if (!bio) {
2184 free_io_failure(inode, failrec, 0);
2185 return -EIO;
2186 }
4a54c8c1
JS
2187 bio->bi_private = state;
2188 bio->bi_end_io = failed_bio->bi_end_io;
2189 bio->bi_sector = failrec->logical >> 9;
2190 bio->bi_bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev;
2191 bio->bi_size = 0;
2192
2193 bio_add_page(bio, page, failrec->len, start - page_offset(page));
2194
2195 pr_debug("bio_readpage_error: submitting new read[%#x] to "
2196 "this_mirror=%d, num_copies=%d, in_validation=%d\n", read_mode,
2197 failrec->this_mirror, num_copies, failrec->in_validation);
2198
013bd4c3
TI
2199 ret = tree->ops->submit_bio_hook(inode, read_mode, bio,
2200 failrec->this_mirror,
2201 failrec->bio_flags, 0);
2202 return ret;
4a54c8c1
JS
2203}
2204
d1310b2e
CM
2205/* lots and lots of room for performance fixes in the end_bio funcs */
2206
87826df0
JM
2207int end_extent_writepage(struct page *page, int err, u64 start, u64 end)
2208{
2209 int uptodate = (err == 0);
2210 struct extent_io_tree *tree;
2211 int ret;
2212
2213 tree = &BTRFS_I(page->mapping->host)->io_tree;
2214
2215 if (tree->ops && tree->ops->writepage_end_io_hook) {
2216 ret = tree->ops->writepage_end_io_hook(page, start,
2217 end, NULL, uptodate);
2218 if (ret)
2219 uptodate = 0;
2220 }
2221
2222 if (!uptodate && tree->ops &&
2223 tree->ops->writepage_io_failed_hook) {
2224 ret = tree->ops->writepage_io_failed_hook(NULL, page,
2225 start, end, NULL);
2226 /* Writeback already completed */
2227 if (ret == 0)
2228 return 1;
2229 }
2230
2231 if (!uptodate) {
2232 clear_extent_uptodate(tree, start, end, NULL, GFP_NOFS);
2233 ClearPageUptodate(page);
2234 SetPageError(page);
2235 }
2236 return 0;
2237}
2238
d1310b2e
CM
2239/*
2240 * after a writepage IO is done, we need to:
2241 * clear the uptodate bits on error
2242 * clear the writeback bits in the extent tree for this IO
2243 * end_page_writeback if the page has no more pending IO
2244 *
2245 * Scheduling is not allowed, so the extent state tree is expected
2246 * to have one and only one object corresponding to this IO.
2247 */
d1310b2e 2248static void end_bio_extent_writepage(struct bio *bio, int err)
d1310b2e 2249{
d1310b2e 2250 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
902b22f3 2251 struct extent_io_tree *tree;
d1310b2e
CM
2252 u64 start;
2253 u64 end;
2254 int whole_page;
2255
d1310b2e
CM
2256 do {
2257 struct page *page = bvec->bv_page;
902b22f3
DW
2258 tree = &BTRFS_I(page->mapping->host)->io_tree;
2259
d1310b2e
CM
2260 start = ((u64)page->index << PAGE_CACHE_SHIFT) +
2261 bvec->bv_offset;
2262 end = start + bvec->bv_len - 1;
2263
2264 if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE)
2265 whole_page = 1;
2266 else
2267 whole_page = 0;
2268
2269 if (--bvec >= bio->bi_io_vec)
2270 prefetchw(&bvec->bv_page->flags);
1259ab75 2271
87826df0
JM
2272 if (end_extent_writepage(page, err, start, end))
2273 continue;
70dec807 2274
d1310b2e
CM
2275 if (whole_page)
2276 end_page_writeback(page);
2277 else
2278 check_page_writeback(tree, page);
d1310b2e 2279 } while (bvec >= bio->bi_io_vec);
2b1f55b0 2280
d1310b2e 2281 bio_put(bio);
d1310b2e
CM
2282}
2283
2284/*
2285 * after a readpage IO is done, we need to:
2286 * clear the uptodate bits on error
2287 * set the uptodate bits if things worked
2288 * set the page up to date if all extents in the tree are uptodate
2289 * clear the lock bit in the extent tree
2290 * unlock the page if there are no other extents locked for it
2291 *
2292 * Scheduling is not allowed, so the extent state tree is expected
2293 * to have one and only one object corresponding to this IO.
2294 */
d1310b2e 2295static void end_bio_extent_readpage(struct bio *bio, int err)
d1310b2e
CM
2296{
2297 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
4125bf76
CM
2298 struct bio_vec *bvec_end = bio->bi_io_vec + bio->bi_vcnt - 1;
2299 struct bio_vec *bvec = bio->bi_io_vec;
902b22f3 2300 struct extent_io_tree *tree;
d1310b2e
CM
2301 u64 start;
2302 u64 end;
2303 int whole_page;
ea466794 2304 int failed_mirror;
d1310b2e
CM
2305 int ret;
2306
d20f7043
CM
2307 if (err)
2308 uptodate = 0;
2309
d1310b2e
CM
2310 do {
2311 struct page *page = bvec->bv_page;
507903b8
AJ
2312 struct extent_state *cached = NULL;
2313 struct extent_state *state;
2314
4a54c8c1
JS
2315 pr_debug("end_bio_extent_readpage: bi_vcnt=%d, idx=%d, err=%d, "
2316 "mirror=%ld\n", bio->bi_vcnt, bio->bi_idx, err,
2317 (long int)bio->bi_bdev);
902b22f3
DW
2318 tree = &BTRFS_I(page->mapping->host)->io_tree;
2319
d1310b2e
CM
2320 start = ((u64)page->index << PAGE_CACHE_SHIFT) +
2321 bvec->bv_offset;
2322 end = start + bvec->bv_len - 1;
2323
2324 if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE)
2325 whole_page = 1;
2326 else
2327 whole_page = 0;
2328
4125bf76 2329 if (++bvec <= bvec_end)
d1310b2e
CM
2330 prefetchw(&bvec->bv_page->flags);
2331
507903b8 2332 spin_lock(&tree->lock);
0d399205 2333 state = find_first_extent_bit_state(tree, start, EXTENT_LOCKED);
109b36a2 2334 if (state && state->start == start) {
507903b8
AJ
2335 /*
2336 * take a reference on the state, unlock will drop
2337 * the ref
2338 */
2339 cache_state(state, &cached);
2340 }
2341 spin_unlock(&tree->lock);
2342
d1310b2e 2343 if (uptodate && tree->ops && tree->ops->readpage_end_io_hook) {
70dec807 2344 ret = tree->ops->readpage_end_io_hook(page, start, end,
507903b8 2345 state);
d1310b2e
CM
2346 if (ret)
2347 uptodate = 0;
4a54c8c1
JS
2348 else
2349 clean_io_failure(start, page);
d1310b2e 2350 }
ea466794
JB
2351
2352 if (!uptodate)
32240a91 2353 failed_mirror = (int)(unsigned long)bio->bi_bdev;
ea466794
JB
2354
2355 if (!uptodate && tree->ops && tree->ops->readpage_io_failed_hook) {
2356 ret = tree->ops->readpage_io_failed_hook(page, failed_mirror);
2357 if (!ret && !err &&
2358 test_bit(BIO_UPTODATE, &bio->bi_flags))
2359 uptodate = 1;
2360 } else if (!uptodate) {
f4a8e656
JS
2361 /*
2362 * The generic bio_readpage_error handles errors the
2363 * following way: If possible, new read requests are
2364 * created and submitted and will end up in
2365 * end_bio_extent_readpage as well (if we're lucky, not
2366 * in the !uptodate case). In that case it returns 0 and
2367 * we just go on with the next page in our bio. If it
2368 * can't handle the error it will return -EIO and we
2369 * remain responsible for that page.
2370 */
2371 ret = bio_readpage_error(bio, page, start, end,
2372 failed_mirror, NULL);
7e38326f 2373 if (ret == 0) {
3b951516
CM
2374 uptodate =
2375 test_bit(BIO_UPTODATE, &bio->bi_flags);
d20f7043
CM
2376 if (err)
2377 uptodate = 0;
507903b8 2378 uncache_state(&cached);
7e38326f
CM
2379 continue;
2380 }
2381 }
d1310b2e 2382
0b32f4bb 2383 if (uptodate && tree->track_uptodate) {
507903b8 2384 set_extent_uptodate(tree, start, end, &cached,
902b22f3 2385 GFP_ATOMIC);
771ed689 2386 }
507903b8 2387 unlock_extent_cached(tree, start, end, &cached, GFP_ATOMIC);
d1310b2e 2388
70dec807
CM
2389 if (whole_page) {
2390 if (uptodate) {
2391 SetPageUptodate(page);
2392 } else {
2393 ClearPageUptodate(page);
2394 SetPageError(page);
2395 }
d1310b2e 2396 unlock_page(page);
70dec807
CM
2397 } else {
2398 if (uptodate) {
2399 check_page_uptodate(tree, page);
2400 } else {
2401 ClearPageUptodate(page);
2402 SetPageError(page);
2403 }
d1310b2e 2404 check_page_locked(tree, page);
70dec807 2405 }
4125bf76 2406 } while (bvec <= bvec_end);
d1310b2e
CM
2407
2408 bio_put(bio);
d1310b2e
CM
2409}
2410
88f794ed
MX
2411struct bio *
2412btrfs_bio_alloc(struct block_device *bdev, u64 first_sector, int nr_vecs,
2413 gfp_t gfp_flags)
d1310b2e
CM
2414{
2415 struct bio *bio;
2416
2417 bio = bio_alloc(gfp_flags, nr_vecs);
2418
2419 if (bio == NULL && (current->flags & PF_MEMALLOC)) {
2420 while (!bio && (nr_vecs /= 2))
2421 bio = bio_alloc(gfp_flags, nr_vecs);
2422 }
2423
2424 if (bio) {
e1c4b745 2425 bio->bi_size = 0;
d1310b2e
CM
2426 bio->bi_bdev = bdev;
2427 bio->bi_sector = first_sector;
2428 }
2429 return bio;
2430}
2431
79787eaa
JM
2432/*
2433 * Since writes are async, they will only return -ENOMEM.
2434 * Reads can return the full range of I/O error conditions.
2435 */
355808c2
JM
2436static int __must_check submit_one_bio(int rw, struct bio *bio,
2437 int mirror_num, unsigned long bio_flags)
d1310b2e 2438{
d1310b2e 2439 int ret = 0;
70dec807
CM
2440 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
2441 struct page *page = bvec->bv_page;
2442 struct extent_io_tree *tree = bio->bi_private;
70dec807 2443 u64 start;
70dec807
CM
2444
2445 start = ((u64)page->index << PAGE_CACHE_SHIFT) + bvec->bv_offset;
70dec807 2446
902b22f3 2447 bio->bi_private = NULL;
d1310b2e
CM
2448
2449 bio_get(bio);
2450
065631f6 2451 if (tree->ops && tree->ops->submit_bio_hook)
6b82ce8d 2452 ret = tree->ops->submit_bio_hook(page->mapping->host, rw, bio,
eaf25d93 2453 mirror_num, bio_flags, start);
0b86a832 2454 else
21adbd5c 2455 btrfsic_submit_bio(rw, bio);
4a54c8c1 2456
d1310b2e
CM
2457 if (bio_flagged(bio, BIO_EOPNOTSUPP))
2458 ret = -EOPNOTSUPP;
2459 bio_put(bio);
2460 return ret;
2461}
2462
3444a972
JM
2463static int merge_bio(struct extent_io_tree *tree, struct page *page,
2464 unsigned long offset, size_t size, struct bio *bio,
2465 unsigned long bio_flags)
2466{
2467 int ret = 0;
2468 if (tree->ops && tree->ops->merge_bio_hook)
2469 ret = tree->ops->merge_bio_hook(page, offset, size, bio,
2470 bio_flags);
2471 BUG_ON(ret < 0);
2472 return ret;
2473
2474}
2475
d1310b2e
CM
2476static int submit_extent_page(int rw, struct extent_io_tree *tree,
2477 struct page *page, sector_t sector,
2478 size_t size, unsigned long offset,
2479 struct block_device *bdev,
2480 struct bio **bio_ret,
2481 unsigned long max_pages,
f188591e 2482 bio_end_io_t end_io_func,
c8b97818
CM
2483 int mirror_num,
2484 unsigned long prev_bio_flags,
2485 unsigned long bio_flags)
d1310b2e
CM
2486{
2487 int ret = 0;
2488 struct bio *bio;
2489 int nr;
c8b97818
CM
2490 int contig = 0;
2491 int this_compressed = bio_flags & EXTENT_BIO_COMPRESSED;
2492 int old_compressed = prev_bio_flags & EXTENT_BIO_COMPRESSED;
5b050f04 2493 size_t page_size = min_t(size_t, size, PAGE_CACHE_SIZE);
d1310b2e
CM
2494
2495 if (bio_ret && *bio_ret) {
2496 bio = *bio_ret;
c8b97818
CM
2497 if (old_compressed)
2498 contig = bio->bi_sector == sector;
2499 else
2500 contig = bio->bi_sector + (bio->bi_size >> 9) ==
2501 sector;
2502
2503 if (prev_bio_flags != bio_flags || !contig ||
3444a972 2504 merge_bio(tree, page, offset, page_size, bio, bio_flags) ||
c8b97818
CM
2505 bio_add_page(bio, page, page_size, offset) < page_size) {
2506 ret = submit_one_bio(rw, bio, mirror_num,
2507 prev_bio_flags);
79787eaa
JM
2508 if (ret < 0)
2509 return ret;
d1310b2e
CM
2510 bio = NULL;
2511 } else {
2512 return 0;
2513 }
2514 }
c8b97818
CM
2515 if (this_compressed)
2516 nr = BIO_MAX_PAGES;
2517 else
2518 nr = bio_get_nr_vecs(bdev);
2519
88f794ed 2520 bio = btrfs_bio_alloc(bdev, sector, nr, GFP_NOFS | __GFP_HIGH);
5df67083
TI
2521 if (!bio)
2522 return -ENOMEM;
70dec807 2523
c8b97818 2524 bio_add_page(bio, page, page_size, offset);
d1310b2e
CM
2525 bio->bi_end_io = end_io_func;
2526 bio->bi_private = tree;
70dec807 2527
d397712b 2528 if (bio_ret)
d1310b2e 2529 *bio_ret = bio;
d397712b 2530 else
c8b97818 2531 ret = submit_one_bio(rw, bio, mirror_num, bio_flags);
d1310b2e
CM
2532
2533 return ret;
2534}
2535
4f2de97a 2536void attach_extent_buffer_page(struct extent_buffer *eb, struct page *page)
d1310b2e
CM
2537{
2538 if (!PagePrivate(page)) {
2539 SetPagePrivate(page);
d1310b2e 2540 page_cache_get(page);
4f2de97a
JB
2541 set_page_private(page, (unsigned long)eb);
2542 } else {
2543 WARN_ON(page->private != (unsigned long)eb);
d1310b2e
CM
2544 }
2545}
2546
4f2de97a 2547void set_page_extent_mapped(struct page *page)
d1310b2e 2548{
4f2de97a
JB
2549 if (!PagePrivate(page)) {
2550 SetPagePrivate(page);
2551 page_cache_get(page);
2552 set_page_private(page, EXTENT_PAGE_PRIVATE);
2553 }
d1310b2e
CM
2554}
2555
2556/*
2557 * basic readpage implementation. Locked extent state structs are inserted
2558 * into the tree that are removed when the IO is done (by the end_io
2559 * handlers)
79787eaa 2560 * XXX JDM: This needs looking at to ensure proper page locking
d1310b2e
CM
2561 */
2562static int __extent_read_full_page(struct extent_io_tree *tree,
2563 struct page *page,
2564 get_extent_t *get_extent,
c8b97818
CM
2565 struct bio **bio, int mirror_num,
2566 unsigned long *bio_flags)
d1310b2e
CM
2567{
2568 struct inode *inode = page->mapping->host;
2569 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
2570 u64 page_end = start + PAGE_CACHE_SIZE - 1;
2571 u64 end;
2572 u64 cur = start;
2573 u64 extent_offset;
2574 u64 last_byte = i_size_read(inode);
2575 u64 block_start;
2576 u64 cur_end;
2577 sector_t sector;
2578 struct extent_map *em;
2579 struct block_device *bdev;
11c65dcc 2580 struct btrfs_ordered_extent *ordered;
d1310b2e
CM
2581 int ret;
2582 int nr = 0;
306e16ce 2583 size_t pg_offset = 0;
d1310b2e 2584 size_t iosize;
c8b97818 2585 size_t disk_io_size;
d1310b2e 2586 size_t blocksize = inode->i_sb->s_blocksize;
c8b97818 2587 unsigned long this_bio_flag = 0;
d1310b2e
CM
2588
2589 set_page_extent_mapped(page);
2590
90a887c9
DM
2591 if (!PageUptodate(page)) {
2592 if (cleancache_get_page(page) == 0) {
2593 BUG_ON(blocksize != PAGE_SIZE);
2594 goto out;
2595 }
2596 }
2597
d1310b2e 2598 end = page_end;
11c65dcc 2599 while (1) {
d0082371 2600 lock_extent(tree, start, end);
11c65dcc
JB
2601 ordered = btrfs_lookup_ordered_extent(inode, start);
2602 if (!ordered)
2603 break;
d0082371 2604 unlock_extent(tree, start, end);
11c65dcc
JB
2605 btrfs_start_ordered_extent(inode, ordered, 1);
2606 btrfs_put_ordered_extent(ordered);
2607 }
d1310b2e 2608
c8b97818
CM
2609 if (page->index == last_byte >> PAGE_CACHE_SHIFT) {
2610 char *userpage;
2611 size_t zero_offset = last_byte & (PAGE_CACHE_SIZE - 1);
2612
2613 if (zero_offset) {
2614 iosize = PAGE_CACHE_SIZE - zero_offset;
2615 userpage = kmap_atomic(page, KM_USER0);
2616 memset(userpage + zero_offset, 0, iosize);
2617 flush_dcache_page(page);
2618 kunmap_atomic(userpage, KM_USER0);
2619 }
2620 }
d1310b2e
CM
2621 while (cur <= end) {
2622 if (cur >= last_byte) {
2623 char *userpage;
507903b8
AJ
2624 struct extent_state *cached = NULL;
2625
306e16ce 2626 iosize = PAGE_CACHE_SIZE - pg_offset;
d1310b2e 2627 userpage = kmap_atomic(page, KM_USER0);
306e16ce 2628 memset(userpage + pg_offset, 0, iosize);
d1310b2e
CM
2629 flush_dcache_page(page);
2630 kunmap_atomic(userpage, KM_USER0);
2631 set_extent_uptodate(tree, cur, cur + iosize - 1,
507903b8
AJ
2632 &cached, GFP_NOFS);
2633 unlock_extent_cached(tree, cur, cur + iosize - 1,
2634 &cached, GFP_NOFS);
d1310b2e
CM
2635 break;
2636 }
306e16ce 2637 em = get_extent(inode, page, pg_offset, cur,
d1310b2e 2638 end - cur + 1, 0);
c704005d 2639 if (IS_ERR_OR_NULL(em)) {
d1310b2e 2640 SetPageError(page);
d0082371 2641 unlock_extent(tree, cur, end);
d1310b2e
CM
2642 break;
2643 }
d1310b2e
CM
2644 extent_offset = cur - em->start;
2645 BUG_ON(extent_map_end(em) <= cur);
2646 BUG_ON(end < cur);
2647
261507a0 2648 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
c8b97818 2649 this_bio_flag = EXTENT_BIO_COMPRESSED;
261507a0
LZ
2650 extent_set_compress_type(&this_bio_flag,
2651 em->compress_type);
2652 }
c8b97818 2653
d1310b2e
CM
2654 iosize = min(extent_map_end(em) - cur, end - cur + 1);
2655 cur_end = min(extent_map_end(em) - 1, end);
2656 iosize = (iosize + blocksize - 1) & ~((u64)blocksize - 1);
c8b97818
CM
2657 if (this_bio_flag & EXTENT_BIO_COMPRESSED) {
2658 disk_io_size = em->block_len;
2659 sector = em->block_start >> 9;
2660 } else {
2661 sector = (em->block_start + extent_offset) >> 9;
2662 disk_io_size = iosize;
2663 }
d1310b2e
CM
2664 bdev = em->bdev;
2665 block_start = em->block_start;
d899e052
YZ
2666 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
2667 block_start = EXTENT_MAP_HOLE;
d1310b2e
CM
2668 free_extent_map(em);
2669 em = NULL;
2670
2671 /* we've found a hole, just zero and go on */
2672 if (block_start == EXTENT_MAP_HOLE) {
2673 char *userpage;
507903b8
AJ
2674 struct extent_state *cached = NULL;
2675
d1310b2e 2676 userpage = kmap_atomic(page, KM_USER0);
306e16ce 2677 memset(userpage + pg_offset, 0, iosize);
d1310b2e
CM
2678 flush_dcache_page(page);
2679 kunmap_atomic(userpage, KM_USER0);
2680
2681 set_extent_uptodate(tree, cur, cur + iosize - 1,
507903b8
AJ
2682 &cached, GFP_NOFS);
2683 unlock_extent_cached(tree, cur, cur + iosize - 1,
2684 &cached, GFP_NOFS);
d1310b2e 2685 cur = cur + iosize;
306e16ce 2686 pg_offset += iosize;
d1310b2e
CM
2687 continue;
2688 }
2689 /* the get_extent function already copied into the page */
9655d298
CM
2690 if (test_range_bit(tree, cur, cur_end,
2691 EXTENT_UPTODATE, 1, NULL)) {
a1b32a59 2692 check_page_uptodate(tree, page);
d0082371 2693 unlock_extent(tree, cur, cur + iosize - 1);
d1310b2e 2694 cur = cur + iosize;
306e16ce 2695 pg_offset += iosize;
d1310b2e
CM
2696 continue;
2697 }
70dec807
CM
2698 /* we have an inline extent but it didn't get marked up
2699 * to date. Error out
2700 */
2701 if (block_start == EXTENT_MAP_INLINE) {
2702 SetPageError(page);
d0082371 2703 unlock_extent(tree, cur, cur + iosize - 1);
70dec807 2704 cur = cur + iosize;
306e16ce 2705 pg_offset += iosize;
70dec807
CM
2706 continue;
2707 }
d1310b2e
CM
2708
2709 ret = 0;
2710 if (tree->ops && tree->ops->readpage_io_hook) {
2711 ret = tree->ops->readpage_io_hook(page, cur,
2712 cur + iosize - 1);
2713 }
2714 if (!ret) {
89642229
CM
2715 unsigned long pnr = (last_byte >> PAGE_CACHE_SHIFT) + 1;
2716 pnr -= page->index;
d1310b2e 2717 ret = submit_extent_page(READ, tree, page,
306e16ce 2718 sector, disk_io_size, pg_offset,
89642229 2719 bdev, bio, pnr,
c8b97818
CM
2720 end_bio_extent_readpage, mirror_num,
2721 *bio_flags,
2722 this_bio_flag);
79787eaa 2723 BUG_ON(ret == -ENOMEM);
89642229 2724 nr++;
c8b97818 2725 *bio_flags = this_bio_flag;
d1310b2e
CM
2726 }
2727 if (ret)
2728 SetPageError(page);
2729 cur = cur + iosize;
306e16ce 2730 pg_offset += iosize;
d1310b2e 2731 }
90a887c9 2732out:
d1310b2e
CM
2733 if (!nr) {
2734 if (!PageError(page))
2735 SetPageUptodate(page);
2736 unlock_page(page);
2737 }
2738 return 0;
2739}
2740
2741int extent_read_full_page(struct extent_io_tree *tree, struct page *page,
8ddc7d9c 2742 get_extent_t *get_extent, int mirror_num)
d1310b2e
CM
2743{
2744 struct bio *bio = NULL;
c8b97818 2745 unsigned long bio_flags = 0;
d1310b2e
CM
2746 int ret;
2747
8ddc7d9c 2748 ret = __extent_read_full_page(tree, page, get_extent, &bio, mirror_num,
c8b97818 2749 &bio_flags);
d1310b2e 2750 if (bio)
8ddc7d9c 2751 ret = submit_one_bio(READ, bio, mirror_num, bio_flags);
d1310b2e
CM
2752 return ret;
2753}
d1310b2e 2754
11c8349b
CM
2755static noinline void update_nr_written(struct page *page,
2756 struct writeback_control *wbc,
2757 unsigned long nr_written)
2758{
2759 wbc->nr_to_write -= nr_written;
2760 if (wbc->range_cyclic || (wbc->nr_to_write > 0 &&
2761 wbc->range_start == 0 && wbc->range_end == LLONG_MAX))
2762 page->mapping->writeback_index = page->index + nr_written;
2763}
2764
d1310b2e
CM
2765/*
2766 * the writepage semantics are similar to regular writepage. extent
2767 * records are inserted to lock ranges in the tree, and as dirty areas
2768 * are found, they are marked writeback. Then the lock bits are removed
2769 * and the end_io handler clears the writeback ranges
2770 */
2771static int __extent_writepage(struct page *page, struct writeback_control *wbc,
2772 void *data)
2773{
2774 struct inode *inode = page->mapping->host;
2775 struct extent_page_data *epd = data;
2776 struct extent_io_tree *tree = epd->tree;
2777 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
2778 u64 delalloc_start;
2779 u64 page_end = start + PAGE_CACHE_SIZE - 1;
2780 u64 end;
2781 u64 cur = start;
2782 u64 extent_offset;
2783 u64 last_byte = i_size_read(inode);
2784 u64 block_start;
2785 u64 iosize;
2786 sector_t sector;
2c64c53d 2787 struct extent_state *cached_state = NULL;
d1310b2e
CM
2788 struct extent_map *em;
2789 struct block_device *bdev;
2790 int ret;
2791 int nr = 0;
7f3c74fb 2792 size_t pg_offset = 0;
d1310b2e
CM
2793 size_t blocksize;
2794 loff_t i_size = i_size_read(inode);
2795 unsigned long end_index = i_size >> PAGE_CACHE_SHIFT;
2796 u64 nr_delalloc;
2797 u64 delalloc_end;
c8b97818
CM
2798 int page_started;
2799 int compressed;
ffbd517d 2800 int write_flags;
771ed689 2801 unsigned long nr_written = 0;
9e487107 2802 bool fill_delalloc = true;
d1310b2e 2803
ffbd517d 2804 if (wbc->sync_mode == WB_SYNC_ALL)
721a9602 2805 write_flags = WRITE_SYNC;
ffbd517d
CM
2806 else
2807 write_flags = WRITE;
2808
1abe9b8a 2809 trace___extent_writepage(page, inode, wbc);
2810
d1310b2e 2811 WARN_ON(!PageLocked(page));
bf0da8c1
CM
2812
2813 ClearPageError(page);
2814
7f3c74fb 2815 pg_offset = i_size & (PAGE_CACHE_SIZE - 1);
211c17f5 2816 if (page->index > end_index ||
7f3c74fb 2817 (page->index == end_index && !pg_offset)) {
39be25cd 2818 page->mapping->a_ops->invalidatepage(page, 0);
d1310b2e
CM
2819 unlock_page(page);
2820 return 0;
2821 }
2822
2823 if (page->index == end_index) {
2824 char *userpage;
2825
d1310b2e 2826 userpage = kmap_atomic(page, KM_USER0);
7f3c74fb
CM
2827 memset(userpage + pg_offset, 0,
2828 PAGE_CACHE_SIZE - pg_offset);
d1310b2e 2829 kunmap_atomic(userpage, KM_USER0);
211c17f5 2830 flush_dcache_page(page);
d1310b2e 2831 }
7f3c74fb 2832 pg_offset = 0;
d1310b2e
CM
2833
2834 set_page_extent_mapped(page);
2835
9e487107
JB
2836 if (!tree->ops || !tree->ops->fill_delalloc)
2837 fill_delalloc = false;
2838
d1310b2e
CM
2839 delalloc_start = start;
2840 delalloc_end = 0;
c8b97818 2841 page_started = 0;
9e487107 2842 if (!epd->extent_locked && fill_delalloc) {
f85d7d6c 2843 u64 delalloc_to_write = 0;
11c8349b
CM
2844 /*
2845 * make sure the wbc mapping index is at least updated
2846 * to this page.
2847 */
2848 update_nr_written(page, wbc, 0);
2849
d397712b 2850 while (delalloc_end < page_end) {
771ed689 2851 nr_delalloc = find_lock_delalloc_range(inode, tree,
c8b97818
CM
2852 page,
2853 &delalloc_start,
d1310b2e
CM
2854 &delalloc_end,
2855 128 * 1024 * 1024);
771ed689
CM
2856 if (nr_delalloc == 0) {
2857 delalloc_start = delalloc_end + 1;
2858 continue;
2859 }
013bd4c3
TI
2860 ret = tree->ops->fill_delalloc(inode, page,
2861 delalloc_start,
2862 delalloc_end,
2863 &page_started,
2864 &nr_written);
79787eaa
JM
2865 /* File system has been set read-only */
2866 if (ret) {
2867 SetPageError(page);
2868 goto done;
2869 }
f85d7d6c
CM
2870 /*
2871 * delalloc_end is already one less than the total
2872 * length, so we don't subtract one from
2873 * PAGE_CACHE_SIZE
2874 */
2875 delalloc_to_write += (delalloc_end - delalloc_start +
2876 PAGE_CACHE_SIZE) >>
2877 PAGE_CACHE_SHIFT;
d1310b2e 2878 delalloc_start = delalloc_end + 1;
d1310b2e 2879 }
f85d7d6c
CM
2880 if (wbc->nr_to_write < delalloc_to_write) {
2881 int thresh = 8192;
2882
2883 if (delalloc_to_write < thresh * 2)
2884 thresh = delalloc_to_write;
2885 wbc->nr_to_write = min_t(u64, delalloc_to_write,
2886 thresh);
2887 }
c8b97818 2888
771ed689
CM
2889 /* did the fill delalloc function already unlock and start
2890 * the IO?
2891 */
2892 if (page_started) {
2893 ret = 0;
11c8349b
CM
2894 /*
2895 * we've unlocked the page, so we can't update
2896 * the mapping's writeback index, just update
2897 * nr_to_write.
2898 */
2899 wbc->nr_to_write -= nr_written;
2900 goto done_unlocked;
771ed689 2901 }
c8b97818 2902 }
247e743c 2903 if (tree->ops && tree->ops->writepage_start_hook) {
c8b97818
CM
2904 ret = tree->ops->writepage_start_hook(page, start,
2905 page_end);
87826df0
JM
2906 if (ret) {
2907 /* Fixup worker will requeue */
2908 if (ret == -EBUSY)
2909 wbc->pages_skipped++;
2910 else
2911 redirty_page_for_writepage(wbc, page);
11c8349b 2912 update_nr_written(page, wbc, nr_written);
247e743c 2913 unlock_page(page);
771ed689 2914 ret = 0;
11c8349b 2915 goto done_unlocked;
247e743c
CM
2916 }
2917 }
2918
11c8349b
CM
2919 /*
2920 * we don't want to touch the inode after unlocking the page,
2921 * so we update the mapping writeback index now
2922 */
2923 update_nr_written(page, wbc, nr_written + 1);
771ed689 2924
d1310b2e 2925 end = page_end;
d1310b2e 2926 if (last_byte <= start) {
e6dcd2dc
CM
2927 if (tree->ops && tree->ops->writepage_end_io_hook)
2928 tree->ops->writepage_end_io_hook(page, start,
2929 page_end, NULL, 1);
d1310b2e
CM
2930 goto done;
2931 }
2932
d1310b2e
CM
2933 blocksize = inode->i_sb->s_blocksize;
2934
2935 while (cur <= end) {
2936 if (cur >= last_byte) {
e6dcd2dc
CM
2937 if (tree->ops && tree->ops->writepage_end_io_hook)
2938 tree->ops->writepage_end_io_hook(page, cur,
2939 page_end, NULL, 1);
d1310b2e
CM
2940 break;
2941 }
7f3c74fb 2942 em = epd->get_extent(inode, page, pg_offset, cur,
d1310b2e 2943 end - cur + 1, 1);
c704005d 2944 if (IS_ERR_OR_NULL(em)) {
d1310b2e
CM
2945 SetPageError(page);
2946 break;
2947 }
2948
2949 extent_offset = cur - em->start;
2950 BUG_ON(extent_map_end(em) <= cur);
2951 BUG_ON(end < cur);
2952 iosize = min(extent_map_end(em) - cur, end - cur + 1);
2953 iosize = (iosize + blocksize - 1) & ~((u64)blocksize - 1);
2954 sector = (em->block_start + extent_offset) >> 9;
2955 bdev = em->bdev;
2956 block_start = em->block_start;
c8b97818 2957 compressed = test_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
d1310b2e
CM
2958 free_extent_map(em);
2959 em = NULL;
2960
c8b97818
CM
2961 /*
2962 * compressed and inline extents are written through other
2963 * paths in the FS
2964 */
2965 if (compressed || block_start == EXTENT_MAP_HOLE ||
d1310b2e 2966 block_start == EXTENT_MAP_INLINE) {
c8b97818
CM
2967 /*
2968 * end_io notification does not happen here for
2969 * compressed extents
2970 */
2971 if (!compressed && tree->ops &&
2972 tree->ops->writepage_end_io_hook)
e6dcd2dc
CM
2973 tree->ops->writepage_end_io_hook(page, cur,
2974 cur + iosize - 1,
2975 NULL, 1);
c8b97818
CM
2976 else if (compressed) {
2977 /* we don't want to end_page_writeback on
2978 * a compressed extent. this happens
2979 * elsewhere
2980 */
2981 nr++;
2982 }
2983
2984 cur += iosize;
7f3c74fb 2985 pg_offset += iosize;
d1310b2e
CM
2986 continue;
2987 }
d1310b2e
CM
2988 /* leave this out until we have a page_mkwrite call */
2989 if (0 && !test_range_bit(tree, cur, cur + iosize - 1,
9655d298 2990 EXTENT_DIRTY, 0, NULL)) {
d1310b2e 2991 cur = cur + iosize;
7f3c74fb 2992 pg_offset += iosize;
d1310b2e
CM
2993 continue;
2994 }
c8b97818 2995
d1310b2e
CM
2996 if (tree->ops && tree->ops->writepage_io_hook) {
2997 ret = tree->ops->writepage_io_hook(page, cur,
2998 cur + iosize - 1);
2999 } else {
3000 ret = 0;
3001 }
1259ab75 3002 if (ret) {
d1310b2e 3003 SetPageError(page);
1259ab75 3004 } else {
d1310b2e 3005 unsigned long max_nr = end_index + 1;
7f3c74fb 3006
d1310b2e
CM
3007 set_range_writeback(tree, cur, cur + iosize - 1);
3008 if (!PageWriteback(page)) {
d397712b
CM
3009 printk(KERN_ERR "btrfs warning page %lu not "
3010 "writeback, cur %llu end %llu\n",
3011 page->index, (unsigned long long)cur,
d1310b2e
CM
3012 (unsigned long long)end);
3013 }
3014
ffbd517d
CM
3015 ret = submit_extent_page(write_flags, tree, page,
3016 sector, iosize, pg_offset,
3017 bdev, &epd->bio, max_nr,
c8b97818
CM
3018 end_bio_extent_writepage,
3019 0, 0, 0);
d1310b2e
CM
3020 if (ret)
3021 SetPageError(page);
3022 }
3023 cur = cur + iosize;
7f3c74fb 3024 pg_offset += iosize;
d1310b2e
CM
3025 nr++;
3026 }
3027done:
3028 if (nr == 0) {
3029 /* make sure the mapping tag for page dirty gets cleared */
3030 set_page_writeback(page);
3031 end_page_writeback(page);
3032 }
d1310b2e 3033 unlock_page(page);
771ed689 3034
11c8349b
CM
3035done_unlocked:
3036
2c64c53d
CM
3037 /* drop our reference on any cached states */
3038 free_extent_state(cached_state);
d1310b2e
CM
3039 return 0;
3040}
3041
0b32f4bb
JB
3042static int eb_wait(void *word)
3043{
3044 io_schedule();
3045 return 0;
3046}
3047
3048static void wait_on_extent_buffer_writeback(struct extent_buffer *eb)
3049{
3050 wait_on_bit(&eb->bflags, EXTENT_BUFFER_WRITEBACK, eb_wait,
3051 TASK_UNINTERRUPTIBLE);
3052}
3053
3054static int lock_extent_buffer_for_io(struct extent_buffer *eb,
3055 struct btrfs_fs_info *fs_info,
3056 struct extent_page_data *epd)
3057{
3058 unsigned long i, num_pages;
3059 int flush = 0;
3060 int ret = 0;
3061
3062 if (!btrfs_try_tree_write_lock(eb)) {
3063 flush = 1;
3064 flush_write_bio(epd);
3065 btrfs_tree_lock(eb);
3066 }
3067
3068 if (test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags)) {
3069 btrfs_tree_unlock(eb);
3070 if (!epd->sync_io)
3071 return 0;
3072 if (!flush) {
3073 flush_write_bio(epd);
3074 flush = 1;
3075 }
a098d8e8
CM
3076 while (1) {
3077 wait_on_extent_buffer_writeback(eb);
3078 btrfs_tree_lock(eb);
3079 if (!test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags))
3080 break;
0b32f4bb 3081 btrfs_tree_unlock(eb);
0b32f4bb
JB
3082 }
3083 }
3084
3085 if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)) {
3086 set_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags);
3087 btrfs_set_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN);
3088 spin_lock(&fs_info->delalloc_lock);
3089 if (fs_info->dirty_metadata_bytes >= eb->len)
3090 fs_info->dirty_metadata_bytes -= eb->len;
3091 else
3092 WARN_ON(1);
3093 spin_unlock(&fs_info->delalloc_lock);
3094 ret = 1;
3095 }
3096
3097 btrfs_tree_unlock(eb);
3098
3099 if (!ret)
3100 return ret;
3101
3102 num_pages = num_extent_pages(eb->start, eb->len);
3103 for (i = 0; i < num_pages; i++) {
3104 struct page *p = extent_buffer_page(eb, i);
3105
3106 if (!trylock_page(p)) {
3107 if (!flush) {
3108 flush_write_bio(epd);
3109 flush = 1;
3110 }
3111 lock_page(p);
3112 }
3113 }
3114
3115 return ret;
3116}
3117
3118static void end_extent_buffer_writeback(struct extent_buffer *eb)
3119{
3120 clear_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags);
3121 smp_mb__after_clear_bit();
3122 wake_up_bit(&eb->bflags, EXTENT_BUFFER_WRITEBACK);
3123}
3124
3125static void end_bio_extent_buffer_writepage(struct bio *bio, int err)
3126{
3127 int uptodate = err == 0;
3128 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
3129 struct extent_buffer *eb;
3130 int done;
3131
3132 do {
3133 struct page *page = bvec->bv_page;
3134
3135 bvec--;
3136 eb = (struct extent_buffer *)page->private;
3137 BUG_ON(!eb);
3138 done = atomic_dec_and_test(&eb->io_pages);
3139
3140 if (!uptodate || test_bit(EXTENT_BUFFER_IOERR, &eb->bflags)) {
3141 set_bit(EXTENT_BUFFER_IOERR, &eb->bflags);
3142 ClearPageUptodate(page);
3143 SetPageError(page);
3144 }
3145
3146 end_page_writeback(page);
3147
3148 if (!done)
3149 continue;
3150
3151 end_extent_buffer_writeback(eb);
3152 } while (bvec >= bio->bi_io_vec);
3153
3154 bio_put(bio);
3155
3156}
3157
3158static int write_one_eb(struct extent_buffer *eb,
3159 struct btrfs_fs_info *fs_info,
3160 struct writeback_control *wbc,
3161 struct extent_page_data *epd)
3162{
3163 struct block_device *bdev = fs_info->fs_devices->latest_bdev;
3164 u64 offset = eb->start;
3165 unsigned long i, num_pages;
3166 int rw = (epd->sync_io ? WRITE_SYNC : WRITE);
3167 int ret;
3168
3169 clear_bit(EXTENT_BUFFER_IOERR, &eb->bflags);
3170 num_pages = num_extent_pages(eb->start, eb->len);
3171 atomic_set(&eb->io_pages, num_pages);
3172 for (i = 0; i < num_pages; i++) {
3173 struct page *p = extent_buffer_page(eb, i);
3174
3175 clear_page_dirty_for_io(p);
3176 set_page_writeback(p);
3177 ret = submit_extent_page(rw, eb->tree, p, offset >> 9,
3178 PAGE_CACHE_SIZE, 0, bdev, &epd->bio,
3179 -1, end_bio_extent_buffer_writepage,
3180 0, 0, 0);
3181 if (ret) {
3182 set_bit(EXTENT_BUFFER_IOERR, &eb->bflags);
3183 SetPageError(p);
3184 if (atomic_sub_and_test(num_pages - i, &eb->io_pages))
3185 end_extent_buffer_writeback(eb);
3186 ret = -EIO;
3187 break;
3188 }
3189 offset += PAGE_CACHE_SIZE;
3190 update_nr_written(p, wbc, 1);
3191 unlock_page(p);
3192 }
3193
3194 if (unlikely(ret)) {
3195 for (; i < num_pages; i++) {
3196 struct page *p = extent_buffer_page(eb, i);
3197 unlock_page(p);
3198 }
3199 }
3200
3201 return ret;
3202}
3203
3204int btree_write_cache_pages(struct address_space *mapping,
3205 struct writeback_control *wbc)
3206{
3207 struct extent_io_tree *tree = &BTRFS_I(mapping->host)->io_tree;
3208 struct btrfs_fs_info *fs_info = BTRFS_I(mapping->host)->root->fs_info;
3209 struct extent_buffer *eb, *prev_eb = NULL;
3210 struct extent_page_data epd = {
3211 .bio = NULL,
3212 .tree = tree,
3213 .extent_locked = 0,
3214 .sync_io = wbc->sync_mode == WB_SYNC_ALL,
3215 };
3216 int ret = 0;
3217 int done = 0;
3218 int nr_to_write_done = 0;
3219 struct pagevec pvec;
3220 int nr_pages;
3221 pgoff_t index;
3222 pgoff_t end; /* Inclusive */
3223 int scanned = 0;
3224 int tag;
3225
3226 pagevec_init(&pvec, 0);
3227 if (wbc->range_cyclic) {
3228 index = mapping->writeback_index; /* Start from prev offset */
3229 end = -1;
3230 } else {
3231 index = wbc->range_start >> PAGE_CACHE_SHIFT;
3232 end = wbc->range_end >> PAGE_CACHE_SHIFT;
3233 scanned = 1;
3234 }
3235 if (wbc->sync_mode == WB_SYNC_ALL)
3236 tag = PAGECACHE_TAG_TOWRITE;
3237 else
3238 tag = PAGECACHE_TAG_DIRTY;
3239retry:
3240 if (wbc->sync_mode == WB_SYNC_ALL)
3241 tag_pages_for_writeback(mapping, index, end);
3242 while (!done && !nr_to_write_done && (index <= end) &&
3243 (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag,
3244 min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1))) {
3245 unsigned i;
3246
3247 scanned = 1;
3248 for (i = 0; i < nr_pages; i++) {
3249 struct page *page = pvec.pages[i];
3250
3251 if (!PagePrivate(page))
3252 continue;
3253
3254 if (!wbc->range_cyclic && page->index > end) {
3255 done = 1;
3256 break;
3257 }
3258
3259 eb = (struct extent_buffer *)page->private;
3260 if (!eb) {
3261 WARN_ON(1);
3262 continue;
3263 }
3264
3265 if (eb == prev_eb)
3266 continue;
3267
3268 if (!atomic_inc_not_zero(&eb->refs)) {
3269 WARN_ON(1);
3270 continue;
3271 }
3272
3273 prev_eb = eb;
3274 ret = lock_extent_buffer_for_io(eb, fs_info, &epd);
3275 if (!ret) {
3276 free_extent_buffer(eb);
3277 continue;
3278 }
3279
3280 ret = write_one_eb(eb, fs_info, wbc, &epd);
3281 if (ret) {
3282 done = 1;
3283 free_extent_buffer(eb);
3284 break;
3285 }
3286 free_extent_buffer(eb);
3287
3288 /*
3289 * the filesystem may choose to bump up nr_to_write.
3290 * We have to make sure to honor the new nr_to_write
3291 * at any time
3292 */
3293 nr_to_write_done = wbc->nr_to_write <= 0;
3294 }
3295 pagevec_release(&pvec);
3296 cond_resched();
3297 }
3298 if (!scanned && !done) {
3299 /*
3300 * We hit the last page and there is more work to be done: wrap
3301 * back to the start of the file
3302 */
3303 scanned = 1;
3304 index = 0;
3305 goto retry;
3306 }
3307 flush_write_bio(&epd);
3308 return ret;
3309}
3310
d1310b2e 3311/**
4bef0848 3312 * write_cache_pages - walk the list of dirty pages of the given address space and write all of them.
d1310b2e
CM
3313 * @mapping: address space structure to write
3314 * @wbc: subtract the number of written pages from *@wbc->nr_to_write
3315 * @writepage: function called for each page
3316 * @data: data passed to writepage function
3317 *
3318 * If a page is already under I/O, write_cache_pages() skips it, even
3319 * if it's dirty. This is desirable behaviour for memory-cleaning writeback,
3320 * but it is INCORRECT for data-integrity system calls such as fsync(). fsync()
3321 * and msync() need to guarantee that all the data which was dirty at the time
3322 * the call was made get new I/O started against them. If wbc->sync_mode is
3323 * WB_SYNC_ALL then we were called for data integrity and we must wait for
3324 * existing IO to complete.
3325 */
b2950863 3326static int extent_write_cache_pages(struct extent_io_tree *tree,
4bef0848
CM
3327 struct address_space *mapping,
3328 struct writeback_control *wbc,
d2c3f4f6
CM
3329 writepage_t writepage, void *data,
3330 void (*flush_fn)(void *))
d1310b2e 3331{
d1310b2e
CM
3332 int ret = 0;
3333 int done = 0;
f85d7d6c 3334 int nr_to_write_done = 0;
d1310b2e
CM
3335 struct pagevec pvec;
3336 int nr_pages;
3337 pgoff_t index;
3338 pgoff_t end; /* Inclusive */
3339 int scanned = 0;
f7aaa06b 3340 int tag;
d1310b2e 3341
d1310b2e
CM
3342 pagevec_init(&pvec, 0);
3343 if (wbc->range_cyclic) {
3344 index = mapping->writeback_index; /* Start from prev offset */
3345 end = -1;
3346 } else {
3347 index = wbc->range_start >> PAGE_CACHE_SHIFT;
3348 end = wbc->range_end >> PAGE_CACHE_SHIFT;
d1310b2e
CM
3349 scanned = 1;
3350 }
f7aaa06b
JB
3351 if (wbc->sync_mode == WB_SYNC_ALL)
3352 tag = PAGECACHE_TAG_TOWRITE;
3353 else
3354 tag = PAGECACHE_TAG_DIRTY;
d1310b2e 3355retry:
f7aaa06b
JB
3356 if (wbc->sync_mode == WB_SYNC_ALL)
3357 tag_pages_for_writeback(mapping, index, end);
f85d7d6c 3358 while (!done && !nr_to_write_done && (index <= end) &&
f7aaa06b
JB
3359 (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag,
3360 min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1))) {
d1310b2e
CM
3361 unsigned i;
3362
3363 scanned = 1;
3364 for (i = 0; i < nr_pages; i++) {
3365 struct page *page = pvec.pages[i];
3366
3367 /*
3368 * At this point we hold neither mapping->tree_lock nor
3369 * lock on the page itself: the page may be truncated or
3370 * invalidated (changing page->mapping to NULL), or even
3371 * swizzled back from swapper_space to tmpfs file
3372 * mapping
3373 */
01d658f2
CM
3374 if (tree->ops &&
3375 tree->ops->write_cache_pages_lock_hook) {
3376 tree->ops->write_cache_pages_lock_hook(page,
3377 data, flush_fn);
3378 } else {
3379 if (!trylock_page(page)) {
3380 flush_fn(data);
3381 lock_page(page);
3382 }
3383 }
d1310b2e
CM
3384
3385 if (unlikely(page->mapping != mapping)) {
3386 unlock_page(page);
3387 continue;
3388 }
3389
3390 if (!wbc->range_cyclic && page->index > end) {
3391 done = 1;
3392 unlock_page(page);
3393 continue;
3394 }
3395
d2c3f4f6 3396 if (wbc->sync_mode != WB_SYNC_NONE) {
0e6bd956
CM
3397 if (PageWriteback(page))
3398 flush_fn(data);
d1310b2e 3399 wait_on_page_writeback(page);
d2c3f4f6 3400 }
d1310b2e
CM
3401
3402 if (PageWriteback(page) ||
3403 !clear_page_dirty_for_io(page)) {
3404 unlock_page(page);
3405 continue;
3406 }
3407
3408 ret = (*writepage)(page, wbc, data);
3409
3410 if (unlikely(ret == AOP_WRITEPAGE_ACTIVATE)) {
3411 unlock_page(page);
3412 ret = 0;
3413 }
f85d7d6c 3414 if (ret)
d1310b2e 3415 done = 1;
f85d7d6c
CM
3416
3417 /*
3418 * the filesystem may choose to bump up nr_to_write.
3419 * We have to make sure to honor the new nr_to_write
3420 * at any time
3421 */
3422 nr_to_write_done = wbc->nr_to_write <= 0;
d1310b2e
CM
3423 }
3424 pagevec_release(&pvec);
3425 cond_resched();
3426 }
3427 if (!scanned && !done) {
3428 /*
3429 * We hit the last page and there is more work to be done: wrap
3430 * back to the start of the file
3431 */
3432 scanned = 1;
3433 index = 0;
3434 goto retry;
3435 }
d1310b2e
CM
3436 return ret;
3437}
d1310b2e 3438
ffbd517d 3439static void flush_epd_write_bio(struct extent_page_data *epd)
d2c3f4f6 3440{
d2c3f4f6 3441 if (epd->bio) {
355808c2
JM
3442 int rw = WRITE;
3443 int ret;
3444
ffbd517d 3445 if (epd->sync_io)
355808c2
JM
3446 rw = WRITE_SYNC;
3447
3448 ret = submit_one_bio(rw, epd->bio, 0, 0);
79787eaa 3449 BUG_ON(ret < 0); /* -ENOMEM */
d2c3f4f6
CM
3450 epd->bio = NULL;
3451 }
3452}
3453
ffbd517d
CM
3454static noinline void flush_write_bio(void *data)
3455{
3456 struct extent_page_data *epd = data;
3457 flush_epd_write_bio(epd);
3458}
3459
d1310b2e
CM
3460int extent_write_full_page(struct extent_io_tree *tree, struct page *page,
3461 get_extent_t *get_extent,
3462 struct writeback_control *wbc)
3463{
3464 int ret;
d1310b2e
CM
3465 struct extent_page_data epd = {
3466 .bio = NULL,
3467 .tree = tree,
3468 .get_extent = get_extent,
771ed689 3469 .extent_locked = 0,
ffbd517d 3470 .sync_io = wbc->sync_mode == WB_SYNC_ALL,
d1310b2e 3471 };
d1310b2e 3472
d1310b2e
CM
3473 ret = __extent_writepage(page, wbc, &epd);
3474
ffbd517d 3475 flush_epd_write_bio(&epd);
d1310b2e
CM
3476 return ret;
3477}
d1310b2e 3478
771ed689
CM
3479int extent_write_locked_range(struct extent_io_tree *tree, struct inode *inode,
3480 u64 start, u64 end, get_extent_t *get_extent,
3481 int mode)
3482{
3483 int ret = 0;
3484 struct address_space *mapping = inode->i_mapping;
3485 struct page *page;
3486 unsigned long nr_pages = (end - start + PAGE_CACHE_SIZE) >>
3487 PAGE_CACHE_SHIFT;
3488
3489 struct extent_page_data epd = {
3490 .bio = NULL,
3491 .tree = tree,
3492 .get_extent = get_extent,
3493 .extent_locked = 1,
ffbd517d 3494 .sync_io = mode == WB_SYNC_ALL,
771ed689
CM
3495 };
3496 struct writeback_control wbc_writepages = {
771ed689 3497 .sync_mode = mode,
771ed689
CM
3498 .nr_to_write = nr_pages * 2,
3499 .range_start = start,
3500 .range_end = end + 1,
3501 };
3502
d397712b 3503 while (start <= end) {
771ed689
CM
3504 page = find_get_page(mapping, start >> PAGE_CACHE_SHIFT);
3505 if (clear_page_dirty_for_io(page))
3506 ret = __extent_writepage(page, &wbc_writepages, &epd);
3507 else {
3508 if (tree->ops && tree->ops->writepage_end_io_hook)
3509 tree->ops->writepage_end_io_hook(page, start,
3510 start + PAGE_CACHE_SIZE - 1,
3511 NULL, 1);
3512 unlock_page(page);
3513 }
3514 page_cache_release(page);
3515 start += PAGE_CACHE_SIZE;
3516 }
3517
ffbd517d 3518 flush_epd_write_bio(&epd);
771ed689
CM
3519 return ret;
3520}
d1310b2e
CM
3521
3522int extent_writepages(struct extent_io_tree *tree,
3523 struct address_space *mapping,
3524 get_extent_t *get_extent,
3525 struct writeback_control *wbc)
3526{
3527 int ret = 0;
3528 struct extent_page_data epd = {
3529 .bio = NULL,
3530 .tree = tree,
3531 .get_extent = get_extent,
771ed689 3532 .extent_locked = 0,
ffbd517d 3533 .sync_io = wbc->sync_mode == WB_SYNC_ALL,
d1310b2e
CM
3534 };
3535
4bef0848 3536 ret = extent_write_cache_pages(tree, mapping, wbc,
d2c3f4f6
CM
3537 __extent_writepage, &epd,
3538 flush_write_bio);
ffbd517d 3539 flush_epd_write_bio(&epd);
d1310b2e
CM
3540 return ret;
3541}
d1310b2e
CM
3542
3543int extent_readpages(struct extent_io_tree *tree,
3544 struct address_space *mapping,
3545 struct list_head *pages, unsigned nr_pages,
3546 get_extent_t get_extent)
3547{
3548 struct bio *bio = NULL;
3549 unsigned page_idx;
c8b97818 3550 unsigned long bio_flags = 0;
d1310b2e 3551
d1310b2e
CM
3552 for (page_idx = 0; page_idx < nr_pages; page_idx++) {
3553 struct page *page = list_entry(pages->prev, struct page, lru);
3554
3555 prefetchw(&page->flags);
3556 list_del(&page->lru);
28ecb609 3557 if (!add_to_page_cache_lru(page, mapping,
43e817a1 3558 page->index, GFP_NOFS)) {
f188591e 3559 __extent_read_full_page(tree, page, get_extent,
c8b97818 3560 &bio, 0, &bio_flags);
d1310b2e
CM
3561 }
3562 page_cache_release(page);
3563 }
d1310b2e
CM
3564 BUG_ON(!list_empty(pages));
3565 if (bio)
79787eaa 3566 return submit_one_bio(READ, bio, 0, bio_flags);
d1310b2e
CM
3567 return 0;
3568}
d1310b2e
CM
3569
3570/*
3571 * basic invalidatepage code, this waits on any locked or writeback
3572 * ranges corresponding to the page, and then deletes any extent state
3573 * records from the tree
3574 */
3575int extent_invalidatepage(struct extent_io_tree *tree,
3576 struct page *page, unsigned long offset)
3577{
2ac55d41 3578 struct extent_state *cached_state = NULL;
d1310b2e
CM
3579 u64 start = ((u64)page->index << PAGE_CACHE_SHIFT);
3580 u64 end = start + PAGE_CACHE_SIZE - 1;
3581 size_t blocksize = page->mapping->host->i_sb->s_blocksize;
3582
d397712b 3583 start += (offset + blocksize - 1) & ~(blocksize - 1);
d1310b2e
CM
3584 if (start > end)
3585 return 0;
3586
d0082371 3587 lock_extent_bits(tree, start, end, 0, &cached_state);
1edbb734 3588 wait_on_page_writeback(page);
d1310b2e 3589 clear_extent_bit(tree, start, end,
32c00aff
JB
3590 EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC |
3591 EXTENT_DO_ACCOUNTING,
2ac55d41 3592 1, 1, &cached_state, GFP_NOFS);
d1310b2e
CM
3593 return 0;
3594}
d1310b2e 3595
7b13b7b1
CM
3596/*
3597 * a helper for releasepage, this tests for areas of the page that
3598 * are locked or under IO and drops the related state bits if it is safe
3599 * to drop the page.
3600 */
3601int try_release_extent_state(struct extent_map_tree *map,
3602 struct extent_io_tree *tree, struct page *page,
3603 gfp_t mask)
3604{
3605 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
3606 u64 end = start + PAGE_CACHE_SIZE - 1;
3607 int ret = 1;
3608
211f90e6 3609 if (test_range_bit(tree, start, end,
8b62b72b 3610 EXTENT_IOBITS, 0, NULL))
7b13b7b1
CM
3611 ret = 0;
3612 else {
3613 if ((mask & GFP_NOFS) == GFP_NOFS)
3614 mask = GFP_NOFS;
11ef160f
CM
3615 /*
3616 * at this point we can safely clear everything except the
3617 * locked bit and the nodatasum bit
3618 */
e3f24cc5 3619 ret = clear_extent_bit(tree, start, end,
11ef160f
CM
3620 ~(EXTENT_LOCKED | EXTENT_NODATASUM),
3621 0, 0, NULL, mask);
e3f24cc5
CM
3622
3623 /* if clear_extent_bit failed for enomem reasons,
3624 * we can't allow the release to continue.
3625 */
3626 if (ret < 0)
3627 ret = 0;
3628 else
3629 ret = 1;
7b13b7b1
CM
3630 }
3631 return ret;
3632}
7b13b7b1 3633
d1310b2e
CM
3634/*
3635 * a helper for releasepage. As long as there are no locked extents
3636 * in the range corresponding to the page, both state records and extent
3637 * map records are removed
3638 */
3639int try_release_extent_mapping(struct extent_map_tree *map,
70dec807
CM
3640 struct extent_io_tree *tree, struct page *page,
3641 gfp_t mask)
d1310b2e
CM
3642{
3643 struct extent_map *em;
3644 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
3645 u64 end = start + PAGE_CACHE_SIZE - 1;
7b13b7b1 3646
70dec807
CM
3647 if ((mask & __GFP_WAIT) &&
3648 page->mapping->host->i_size > 16 * 1024 * 1024) {
39b5637f 3649 u64 len;
70dec807 3650 while (start <= end) {
39b5637f 3651 len = end - start + 1;
890871be 3652 write_lock(&map->lock);
39b5637f 3653 em = lookup_extent_mapping(map, start, len);
285190d9 3654 if (!em) {
890871be 3655 write_unlock(&map->lock);
70dec807
CM
3656 break;
3657 }
7f3c74fb
CM
3658 if (test_bit(EXTENT_FLAG_PINNED, &em->flags) ||
3659 em->start != start) {
890871be 3660 write_unlock(&map->lock);
70dec807
CM
3661 free_extent_map(em);
3662 break;
3663 }
3664 if (!test_range_bit(tree, em->start,
3665 extent_map_end(em) - 1,
8b62b72b 3666 EXTENT_LOCKED | EXTENT_WRITEBACK,
9655d298 3667 0, NULL)) {
70dec807
CM
3668 remove_extent_mapping(map, em);
3669 /* once for the rb tree */
3670 free_extent_map(em);
3671 }
3672 start = extent_map_end(em);
890871be 3673 write_unlock(&map->lock);
70dec807
CM
3674
3675 /* once for us */
d1310b2e
CM
3676 free_extent_map(em);
3677 }
d1310b2e 3678 }
7b13b7b1 3679 return try_release_extent_state(map, tree, page, mask);
d1310b2e 3680}
d1310b2e 3681
ec29ed5b
CM
3682/*
3683 * helper function for fiemap, which doesn't want to see any holes.
3684 * This maps until we find something past 'last'
3685 */
3686static struct extent_map *get_extent_skip_holes(struct inode *inode,
3687 u64 offset,
3688 u64 last,
3689 get_extent_t *get_extent)
3690{
3691 u64 sectorsize = BTRFS_I(inode)->root->sectorsize;
3692 struct extent_map *em;
3693 u64 len;
3694
3695 if (offset >= last)
3696 return NULL;
3697
3698 while(1) {
3699 len = last - offset;
3700 if (len == 0)
3701 break;
3702 len = (len + sectorsize - 1) & ~(sectorsize - 1);
3703 em = get_extent(inode, NULL, 0, offset, len, 0);
c704005d 3704 if (IS_ERR_OR_NULL(em))
ec29ed5b
CM
3705 return em;
3706
3707 /* if this isn't a hole return it */
3708 if (!test_bit(EXTENT_FLAG_VACANCY, &em->flags) &&
3709 em->block_start != EXTENT_MAP_HOLE) {
3710 return em;
3711 }
3712
3713 /* this is a hole, advance to the next extent */
3714 offset = extent_map_end(em);
3715 free_extent_map(em);
3716 if (offset >= last)
3717 break;
3718 }
3719 return NULL;
3720}
3721
1506fcc8
YS
3722int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
3723 __u64 start, __u64 len, get_extent_t *get_extent)
3724{
975f84fe 3725 int ret = 0;
1506fcc8
YS
3726 u64 off = start;
3727 u64 max = start + len;
3728 u32 flags = 0;
975f84fe
JB
3729 u32 found_type;
3730 u64 last;
ec29ed5b 3731 u64 last_for_get_extent = 0;
1506fcc8 3732 u64 disko = 0;
ec29ed5b 3733 u64 isize = i_size_read(inode);
975f84fe 3734 struct btrfs_key found_key;
1506fcc8 3735 struct extent_map *em = NULL;
2ac55d41 3736 struct extent_state *cached_state = NULL;
975f84fe
JB
3737 struct btrfs_path *path;
3738 struct btrfs_file_extent_item *item;
1506fcc8 3739 int end = 0;
ec29ed5b
CM
3740 u64 em_start = 0;
3741 u64 em_len = 0;
3742 u64 em_end = 0;
1506fcc8 3743 unsigned long emflags;
1506fcc8
YS
3744
3745 if (len == 0)
3746 return -EINVAL;
3747
975f84fe
JB
3748 path = btrfs_alloc_path();
3749 if (!path)
3750 return -ENOMEM;
3751 path->leave_spinning = 1;
3752
4d479cf0
JB
3753 start = ALIGN(start, BTRFS_I(inode)->root->sectorsize);
3754 len = ALIGN(len, BTRFS_I(inode)->root->sectorsize);
3755
ec29ed5b
CM
3756 /*
3757 * lookup the last file extent. We're not using i_size here
3758 * because there might be preallocation past i_size
3759 */
975f84fe 3760 ret = btrfs_lookup_file_extent(NULL, BTRFS_I(inode)->root,
33345d01 3761 path, btrfs_ino(inode), -1, 0);
975f84fe
JB
3762 if (ret < 0) {
3763 btrfs_free_path(path);
3764 return ret;
3765 }
3766 WARN_ON(!ret);
3767 path->slots[0]--;
3768 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
3769 struct btrfs_file_extent_item);
3770 btrfs_item_key_to_cpu(path->nodes[0], &found_key, path->slots[0]);
3771 found_type = btrfs_key_type(&found_key);
3772
ec29ed5b 3773 /* No extents, but there might be delalloc bits */
33345d01 3774 if (found_key.objectid != btrfs_ino(inode) ||
975f84fe 3775 found_type != BTRFS_EXTENT_DATA_KEY) {
ec29ed5b
CM
3776 /* have to trust i_size as the end */
3777 last = (u64)-1;
3778 last_for_get_extent = isize;
3779 } else {
3780 /*
3781 * remember the start of the last extent. There are a
3782 * bunch of different factors that go into the length of the
3783 * extent, so its much less complex to remember where it started
3784 */
3785 last = found_key.offset;
3786 last_for_get_extent = last + 1;
975f84fe 3787 }
975f84fe
JB
3788 btrfs_free_path(path);
3789
ec29ed5b
CM
3790 /*
3791 * we might have some extents allocated but more delalloc past those
3792 * extents. so, we trust isize unless the start of the last extent is
3793 * beyond isize
3794 */
3795 if (last < isize) {
3796 last = (u64)-1;
3797 last_for_get_extent = isize;
3798 }
3799
2ac55d41 3800 lock_extent_bits(&BTRFS_I(inode)->io_tree, start, start + len, 0,
d0082371 3801 &cached_state);
ec29ed5b 3802
4d479cf0 3803 em = get_extent_skip_holes(inode, start, last_for_get_extent,
ec29ed5b 3804 get_extent);
1506fcc8
YS
3805 if (!em)
3806 goto out;
3807 if (IS_ERR(em)) {
3808 ret = PTR_ERR(em);
3809 goto out;
3810 }
975f84fe 3811
1506fcc8 3812 while (!end) {
ea8efc74
CM
3813 u64 offset_in_extent;
3814
3815 /* break if the extent we found is outside the range */
3816 if (em->start >= max || extent_map_end(em) < off)
3817 break;
3818
3819 /*
3820 * get_extent may return an extent that starts before our
3821 * requested range. We have to make sure the ranges
3822 * we return to fiemap always move forward and don't
3823 * overlap, so adjust the offsets here
3824 */
3825 em_start = max(em->start, off);
1506fcc8 3826
ea8efc74
CM
3827 /*
3828 * record the offset from the start of the extent
3829 * for adjusting the disk offset below
3830 */
3831 offset_in_extent = em_start - em->start;
ec29ed5b 3832 em_end = extent_map_end(em);
ea8efc74 3833 em_len = em_end - em_start;
ec29ed5b 3834 emflags = em->flags;
1506fcc8
YS
3835 disko = 0;
3836 flags = 0;
3837
ea8efc74
CM
3838 /*
3839 * bump off for our next call to get_extent
3840 */
3841 off = extent_map_end(em);
3842 if (off >= max)
3843 end = 1;
3844
93dbfad7 3845 if (em->block_start == EXTENT_MAP_LAST_BYTE) {
1506fcc8
YS
3846 end = 1;
3847 flags |= FIEMAP_EXTENT_LAST;
93dbfad7 3848 } else if (em->block_start == EXTENT_MAP_INLINE) {
1506fcc8
YS
3849 flags |= (FIEMAP_EXTENT_DATA_INLINE |
3850 FIEMAP_EXTENT_NOT_ALIGNED);
93dbfad7 3851 } else if (em->block_start == EXTENT_MAP_DELALLOC) {
1506fcc8
YS
3852 flags |= (FIEMAP_EXTENT_DELALLOC |
3853 FIEMAP_EXTENT_UNKNOWN);
93dbfad7 3854 } else {
ea8efc74 3855 disko = em->block_start + offset_in_extent;
1506fcc8
YS
3856 }
3857 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags))
3858 flags |= FIEMAP_EXTENT_ENCODED;
3859
1506fcc8
YS
3860 free_extent_map(em);
3861 em = NULL;
ec29ed5b
CM
3862 if ((em_start >= last) || em_len == (u64)-1 ||
3863 (last == (u64)-1 && isize <= em_end)) {
1506fcc8
YS
3864 flags |= FIEMAP_EXTENT_LAST;
3865 end = 1;
3866 }
3867
ec29ed5b
CM
3868 /* now scan forward to see if this is really the last extent. */
3869 em = get_extent_skip_holes(inode, off, last_for_get_extent,
3870 get_extent);
3871 if (IS_ERR(em)) {
3872 ret = PTR_ERR(em);
3873 goto out;
3874 }
3875 if (!em) {
975f84fe
JB
3876 flags |= FIEMAP_EXTENT_LAST;
3877 end = 1;
3878 }
ec29ed5b
CM
3879 ret = fiemap_fill_next_extent(fieinfo, em_start, disko,
3880 em_len, flags);
3881 if (ret)
3882 goto out_free;
1506fcc8
YS
3883 }
3884out_free:
3885 free_extent_map(em);
3886out:
2ac55d41
JB
3887 unlock_extent_cached(&BTRFS_I(inode)->io_tree, start, start + len,
3888 &cached_state, GFP_NOFS);
1506fcc8
YS
3889 return ret;
3890}
3891
4a54c8c1 3892inline struct page *extent_buffer_page(struct extent_buffer *eb,
d1310b2e
CM
3893 unsigned long i)
3894{
727011e0 3895 return eb->pages[i];
d1310b2e
CM
3896}
3897
4a54c8c1 3898inline unsigned long num_extent_pages(u64 start, u64 len)
728131d8 3899{
6af118ce
CM
3900 return ((start + len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT) -
3901 (start >> PAGE_CACHE_SHIFT);
728131d8
CM
3902}
3903
727011e0
CM
3904static void __free_extent_buffer(struct extent_buffer *eb)
3905{
3906#if LEAK_DEBUG
3907 unsigned long flags;
3908 spin_lock_irqsave(&leak_lock, flags);
3909 list_del(&eb->leak_list);
3910 spin_unlock_irqrestore(&leak_lock, flags);
3911#endif
3912 if (eb->pages && eb->pages != eb->inline_pages)
3913 kfree(eb->pages);
3914 kmem_cache_free(extent_buffer_cache, eb);
3915}
3916
d1310b2e
CM
3917static struct extent_buffer *__alloc_extent_buffer(struct extent_io_tree *tree,
3918 u64 start,
3919 unsigned long len,
3920 gfp_t mask)
3921{
3922 struct extent_buffer *eb = NULL;
3935127c 3923#if LEAK_DEBUG
2d2ae547 3924 unsigned long flags;
4bef0848 3925#endif
d1310b2e 3926
d1310b2e 3927 eb = kmem_cache_zalloc(extent_buffer_cache, mask);
91ca338d
TI
3928 if (eb == NULL)
3929 return NULL;
d1310b2e
CM
3930 eb->start = start;
3931 eb->len = len;
4f2de97a 3932 eb->tree = tree;
bd681513
CM
3933 rwlock_init(&eb->lock);
3934 atomic_set(&eb->write_locks, 0);
3935 atomic_set(&eb->read_locks, 0);
3936 atomic_set(&eb->blocking_readers, 0);
3937 atomic_set(&eb->blocking_writers, 0);
3938 atomic_set(&eb->spinning_readers, 0);
3939 atomic_set(&eb->spinning_writers, 0);
5b25f70f 3940 eb->lock_nested = 0;
bd681513
CM
3941 init_waitqueue_head(&eb->write_lock_wq);
3942 init_waitqueue_head(&eb->read_lock_wq);
b4ce94de 3943
3935127c 3944#if LEAK_DEBUG
2d2ae547
CM
3945 spin_lock_irqsave(&leak_lock, flags);
3946 list_add(&eb->leak_list, &buffers);
3947 spin_unlock_irqrestore(&leak_lock, flags);
4bef0848 3948#endif
3083ee2e 3949 spin_lock_init(&eb->refs_lock);
d1310b2e 3950 atomic_set(&eb->refs, 1);
0b32f4bb 3951 atomic_set(&eb->io_pages, 0);
727011e0
CM
3952
3953 if (len > MAX_INLINE_EXTENT_BUFFER_SIZE) {
3954 struct page **pages;
3955 int num_pages = (len + PAGE_CACHE_SIZE - 1) >>
3956 PAGE_CACHE_SHIFT;
3957 pages = kzalloc(num_pages, mask);
3958 if (!pages) {
3959 __free_extent_buffer(eb);
3960 return NULL;
3961 }
3962 eb->pages = pages;
3963 } else {
3964 eb->pages = eb->inline_pages;
3965 }
d1310b2e
CM
3966
3967 return eb;
3968}
3969
0b32f4bb
JB
3970static int extent_buffer_under_io(struct extent_buffer *eb)
3971{
3972 return (atomic_read(&eb->io_pages) ||
3973 test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags) ||
3974 test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
3975}
3976
897ca6e9
MX
3977/*
3978 * Helper for releasing extent buffer page.
3979 */
3980static void btrfs_release_extent_buffer_page(struct extent_buffer *eb,
3981 unsigned long start_idx)
3982{
3983 unsigned long index;
3984 struct page *page;
3985
0b32f4bb
JB
3986 BUG_ON(extent_buffer_under_io(eb));
3987
897ca6e9
MX
3988 index = num_extent_pages(eb->start, eb->len);
3989 if (start_idx >= index)
3990 return;
3991
3992 do {
3993 index--;
3994 page = extent_buffer_page(eb, index);
4f2de97a
JB
3995 if (page) {
3996 spin_lock(&page->mapping->private_lock);
3997 /*
3998 * We do this since we'll remove the pages after we've
3999 * removed the eb from the radix tree, so we could race
4000 * and have this page now attached to the new eb. So
4001 * only clear page_private if it's still connected to
4002 * this eb.
4003 */
4004 if (PagePrivate(page) &&
4005 page->private == (unsigned long)eb) {
0b32f4bb 4006 BUG_ON(test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
3083ee2e
JB
4007 BUG_ON(PageDirty(page));
4008 BUG_ON(PageWriteback(page));
4f2de97a
JB
4009 /*
4010 * We need to make sure we haven't be attached
4011 * to a new eb.
4012 */
4013 ClearPagePrivate(page);
4014 set_page_private(page, 0);
4015 /* One for the page private */
4016 page_cache_release(page);
4017 }
4018 spin_unlock(&page->mapping->private_lock);
4019
4020 /* One for when we alloced the page */
897ca6e9 4021 page_cache_release(page);
4f2de97a 4022 }
897ca6e9
MX
4023 } while (index != start_idx);
4024}
4025
4026/*
4027 * Helper for releasing the extent buffer.
4028 */
4029static inline void btrfs_release_extent_buffer(struct extent_buffer *eb)
4030{
4031 btrfs_release_extent_buffer_page(eb, 0);
4032 __free_extent_buffer(eb);
4033}
4034
0b32f4bb
JB
4035static void check_buffer_tree_ref(struct extent_buffer *eb)
4036{
4037 /* the ref bit is tricky. We have to make sure it is set
4038 * if we have the buffer dirty. Otherwise the
4039 * code to free a buffer can end up dropping a dirty
4040 * page
4041 *
4042 * Once the ref bit is set, it won't go away while the
4043 * buffer is dirty or in writeback, and it also won't
4044 * go away while we have the reference count on the
4045 * eb bumped.
4046 *
4047 * We can't just set the ref bit without bumping the
4048 * ref on the eb because free_extent_buffer might
4049 * see the ref bit and try to clear it. If this happens
4050 * free_extent_buffer might end up dropping our original
4051 * ref by mistake and freeing the page before we are able
4052 * to add one more ref.
4053 *
4054 * So bump the ref count first, then set the bit. If someone
4055 * beat us to it, drop the ref we added.
4056 */
4057 if (!test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) {
4058 atomic_inc(&eb->refs);
4059 if (test_and_set_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
4060 atomic_dec(&eb->refs);
4061 }
4062}
4063
5df4235e
JB
4064static void mark_extent_buffer_accessed(struct extent_buffer *eb)
4065{
4066 unsigned long num_pages, i;
4067
0b32f4bb
JB
4068 check_buffer_tree_ref(eb);
4069
5df4235e
JB
4070 num_pages = num_extent_pages(eb->start, eb->len);
4071 for (i = 0; i < num_pages; i++) {
4072 struct page *p = extent_buffer_page(eb, i);
4073 mark_page_accessed(p);
4074 }
4075}
4076
d1310b2e 4077struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree,
727011e0 4078 u64 start, unsigned long len)
d1310b2e
CM
4079{
4080 unsigned long num_pages = num_extent_pages(start, len);
4081 unsigned long i;
4082 unsigned long index = start >> PAGE_CACHE_SHIFT;
4083 struct extent_buffer *eb;
6af118ce 4084 struct extent_buffer *exists = NULL;
d1310b2e
CM
4085 struct page *p;
4086 struct address_space *mapping = tree->mapping;
4087 int uptodate = 1;
19fe0a8b 4088 int ret;
d1310b2e 4089
19fe0a8b
MX
4090 rcu_read_lock();
4091 eb = radix_tree_lookup(&tree->buffer, start >> PAGE_CACHE_SHIFT);
4092 if (eb && atomic_inc_not_zero(&eb->refs)) {
4093 rcu_read_unlock();
5df4235e 4094 mark_extent_buffer_accessed(eb);
6af118ce
CM
4095 return eb;
4096 }
19fe0a8b 4097 rcu_read_unlock();
6af118ce 4098
ba144192 4099 eb = __alloc_extent_buffer(tree, start, len, GFP_NOFS);
2b114d1d 4100 if (!eb)
d1310b2e
CM
4101 return NULL;
4102
727011e0 4103 for (i = 0; i < num_pages; i++, index++) {
a6591715 4104 p = find_or_create_page(mapping, index, GFP_NOFS);
d1310b2e
CM
4105 if (!p) {
4106 WARN_ON(1);
6af118ce 4107 goto free_eb;
d1310b2e 4108 }
4f2de97a
JB
4109
4110 spin_lock(&mapping->private_lock);
4111 if (PagePrivate(p)) {
4112 /*
4113 * We could have already allocated an eb for this page
4114 * and attached one so lets see if we can get a ref on
4115 * the existing eb, and if we can we know it's good and
4116 * we can just return that one, else we know we can just
4117 * overwrite page->private.
4118 */
4119 exists = (struct extent_buffer *)p->private;
4120 if (atomic_inc_not_zero(&exists->refs)) {
4121 spin_unlock(&mapping->private_lock);
4122 unlock_page(p);
5df4235e 4123 mark_extent_buffer_accessed(exists);
4f2de97a
JB
4124 goto free_eb;
4125 }
4126
0b32f4bb 4127 /*
4f2de97a
JB
4128 * Do this so attach doesn't complain and we need to
4129 * drop the ref the old guy had.
4130 */
4131 ClearPagePrivate(p);
0b32f4bb 4132 WARN_ON(PageDirty(p));
4f2de97a
JB
4133 page_cache_release(p);
4134 }
4135 attach_extent_buffer_page(eb, p);
4136 spin_unlock(&mapping->private_lock);
0b32f4bb 4137 WARN_ON(PageDirty(p));
d1310b2e 4138 mark_page_accessed(p);
727011e0 4139 eb->pages[i] = p;
d1310b2e
CM
4140 if (!PageUptodate(p))
4141 uptodate = 0;
eb14ab8e
CM
4142
4143 /*
4144 * see below about how we avoid a nasty race with release page
4145 * and why we unlock later
4146 */
d1310b2e
CM
4147 }
4148 if (uptodate)
b4ce94de 4149 set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
115391d2 4150again:
19fe0a8b
MX
4151 ret = radix_tree_preload(GFP_NOFS & ~__GFP_HIGHMEM);
4152 if (ret)
4153 goto free_eb;
4154
6af118ce 4155 spin_lock(&tree->buffer_lock);
19fe0a8b
MX
4156 ret = radix_tree_insert(&tree->buffer, start >> PAGE_CACHE_SHIFT, eb);
4157 if (ret == -EEXIST) {
4158 exists = radix_tree_lookup(&tree->buffer,
4159 start >> PAGE_CACHE_SHIFT);
115391d2
JB
4160 if (!atomic_inc_not_zero(&exists->refs)) {
4161 spin_unlock(&tree->buffer_lock);
4162 radix_tree_preload_end();
115391d2
JB
4163 exists = NULL;
4164 goto again;
4165 }
6af118ce 4166 spin_unlock(&tree->buffer_lock);
19fe0a8b 4167 radix_tree_preload_end();
5df4235e 4168 mark_extent_buffer_accessed(exists);
6af118ce
CM
4169 goto free_eb;
4170 }
6af118ce 4171 /* add one reference for the tree */
3083ee2e 4172 spin_lock(&eb->refs_lock);
0b32f4bb 4173 check_buffer_tree_ref(eb);
3083ee2e 4174 spin_unlock(&eb->refs_lock);
f044ba78 4175 spin_unlock(&tree->buffer_lock);
19fe0a8b 4176 radix_tree_preload_end();
eb14ab8e
CM
4177
4178 /*
4179 * there is a race where release page may have
4180 * tried to find this extent buffer in the radix
4181 * but failed. It will tell the VM it is safe to
4182 * reclaim the, and it will clear the page private bit.
4183 * We must make sure to set the page private bit properly
4184 * after the extent buffer is in the radix tree so
4185 * it doesn't get lost
4186 */
727011e0
CM
4187 SetPageChecked(eb->pages[0]);
4188 for (i = 1; i < num_pages; i++) {
4189 p = extent_buffer_page(eb, i);
727011e0
CM
4190 ClearPageChecked(p);
4191 unlock_page(p);
4192 }
4193 unlock_page(eb->pages[0]);
d1310b2e
CM
4194 return eb;
4195
6af118ce 4196free_eb:
727011e0
CM
4197 for (i = 0; i < num_pages; i++) {
4198 if (eb->pages[i])
4199 unlock_page(eb->pages[i]);
4200 }
eb14ab8e 4201
d1310b2e 4202 if (!atomic_dec_and_test(&eb->refs))
6af118ce 4203 return exists;
897ca6e9 4204 btrfs_release_extent_buffer(eb);
6af118ce 4205 return exists;
d1310b2e 4206}
d1310b2e
CM
4207
4208struct extent_buffer *find_extent_buffer(struct extent_io_tree *tree,
f09d1f60 4209 u64 start, unsigned long len)
d1310b2e 4210{
d1310b2e 4211 struct extent_buffer *eb;
d1310b2e 4212
19fe0a8b
MX
4213 rcu_read_lock();
4214 eb = radix_tree_lookup(&tree->buffer, start >> PAGE_CACHE_SHIFT);
4215 if (eb && atomic_inc_not_zero(&eb->refs)) {
4216 rcu_read_unlock();
5df4235e 4217 mark_extent_buffer_accessed(eb);
19fe0a8b
MX
4218 return eb;
4219 }
4220 rcu_read_unlock();
0f9dd46c 4221
19fe0a8b 4222 return NULL;
d1310b2e 4223}
d1310b2e 4224
3083ee2e
JB
4225static inline void btrfs_release_extent_buffer_rcu(struct rcu_head *head)
4226{
4227 struct extent_buffer *eb =
4228 container_of(head, struct extent_buffer, rcu_head);
4229
4230 __free_extent_buffer(eb);
4231}
4232
3083ee2e
JB
4233/* Expects to have eb->eb_lock already held */
4234static void release_extent_buffer(struct extent_buffer *eb, gfp_t mask)
4235{
4236 WARN_ON(atomic_read(&eb->refs) == 0);
4237 if (atomic_dec_and_test(&eb->refs)) {
4238 struct extent_io_tree *tree = eb->tree;
3083ee2e
JB
4239
4240 spin_unlock(&eb->refs_lock);
4241
3083ee2e
JB
4242 spin_lock(&tree->buffer_lock);
4243 radix_tree_delete(&tree->buffer,
4244 eb->start >> PAGE_CACHE_SHIFT);
4245 spin_unlock(&tree->buffer_lock);
4246
4247 /* Should be safe to release our pages at this point */
4248 btrfs_release_extent_buffer_page(eb, 0);
4249
4250 call_rcu(&eb->rcu_head, btrfs_release_extent_buffer_rcu);
4251 return;
4252 }
4253 spin_unlock(&eb->refs_lock);
4254}
4255
d1310b2e
CM
4256void free_extent_buffer(struct extent_buffer *eb)
4257{
d1310b2e
CM
4258 if (!eb)
4259 return;
4260
3083ee2e
JB
4261 spin_lock(&eb->refs_lock);
4262 if (atomic_read(&eb->refs) == 2 &&
4263 test_bit(EXTENT_BUFFER_STALE, &eb->bflags) &&
0b32f4bb 4264 !extent_buffer_under_io(eb) &&
3083ee2e
JB
4265 test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
4266 atomic_dec(&eb->refs);
4267
4268 /*
4269 * I know this is terrible, but it's temporary until we stop tracking
4270 * the uptodate bits and such for the extent buffers.
4271 */
4272 release_extent_buffer(eb, GFP_ATOMIC);
4273}
4274
4275void free_extent_buffer_stale(struct extent_buffer *eb)
4276{
4277 if (!eb)
d1310b2e
CM
4278 return;
4279
3083ee2e
JB
4280 spin_lock(&eb->refs_lock);
4281 set_bit(EXTENT_BUFFER_STALE, &eb->bflags);
4282
0b32f4bb 4283 if (atomic_read(&eb->refs) == 2 && !extent_buffer_under_io(eb) &&
3083ee2e
JB
4284 test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
4285 atomic_dec(&eb->refs);
4286 release_extent_buffer(eb, GFP_NOFS);
d1310b2e 4287}
d1310b2e 4288
1d4284bd 4289void clear_extent_buffer_dirty(struct extent_buffer *eb)
d1310b2e 4290{
d1310b2e
CM
4291 unsigned long i;
4292 unsigned long num_pages;
4293 struct page *page;
4294
d1310b2e
CM
4295 num_pages = num_extent_pages(eb->start, eb->len);
4296
4297 for (i = 0; i < num_pages; i++) {
4298 page = extent_buffer_page(eb, i);
b9473439 4299 if (!PageDirty(page))
d2c3f4f6
CM
4300 continue;
4301
a61e6f29 4302 lock_page(page);
eb14ab8e
CM
4303 WARN_ON(!PagePrivate(page));
4304
d1310b2e 4305 clear_page_dirty_for_io(page);
0ee0fda0 4306 spin_lock_irq(&page->mapping->tree_lock);
d1310b2e
CM
4307 if (!PageDirty(page)) {
4308 radix_tree_tag_clear(&page->mapping->page_tree,
4309 page_index(page),
4310 PAGECACHE_TAG_DIRTY);
4311 }
0ee0fda0 4312 spin_unlock_irq(&page->mapping->tree_lock);
bf0da8c1 4313 ClearPageError(page);
a61e6f29 4314 unlock_page(page);
d1310b2e 4315 }
0b32f4bb 4316 WARN_ON(atomic_read(&eb->refs) == 0);
d1310b2e 4317}
d1310b2e 4318
0b32f4bb 4319int set_extent_buffer_dirty(struct extent_buffer *eb)
d1310b2e
CM
4320{
4321 unsigned long i;
4322 unsigned long num_pages;
b9473439 4323 int was_dirty = 0;
d1310b2e 4324
0b32f4bb
JB
4325 check_buffer_tree_ref(eb);
4326
b9473439 4327 was_dirty = test_and_set_bit(EXTENT_BUFFER_DIRTY, &eb->bflags);
0b32f4bb 4328
d1310b2e 4329 num_pages = num_extent_pages(eb->start, eb->len);
3083ee2e 4330 WARN_ON(atomic_read(&eb->refs) == 0);
0b32f4bb
JB
4331 WARN_ON(!test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags));
4332
b9473439 4333 for (i = 0; i < num_pages; i++)
0b32f4bb 4334 set_page_dirty(extent_buffer_page(eb, i));
b9473439 4335 return was_dirty;
d1310b2e 4336}
d1310b2e 4337
0b32f4bb 4338static int range_straddles_pages(u64 start, u64 len)
19b6caf4
CM
4339{
4340 if (len < PAGE_CACHE_SIZE)
4341 return 1;
4342 if (start & (PAGE_CACHE_SIZE - 1))
4343 return 1;
4344 if ((start + len) & (PAGE_CACHE_SIZE - 1))
4345 return 1;
4346 return 0;
4347}
4348
0b32f4bb 4349int clear_extent_buffer_uptodate(struct extent_buffer *eb)
1259ab75
CM
4350{
4351 unsigned long i;
4352 struct page *page;
4353 unsigned long num_pages;
4354
b4ce94de 4355 clear_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
0b32f4bb 4356 num_pages = num_extent_pages(eb->start, eb->len);
1259ab75
CM
4357 for (i = 0; i < num_pages; i++) {
4358 page = extent_buffer_page(eb, i);
33958dc6
CM
4359 if (page)
4360 ClearPageUptodate(page);
1259ab75
CM
4361 }
4362 return 0;
4363}
4364
0b32f4bb 4365int set_extent_buffer_uptodate(struct extent_buffer *eb)
d1310b2e
CM
4366{
4367 unsigned long i;
4368 struct page *page;
4369 unsigned long num_pages;
4370
0b32f4bb 4371 set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
d1310b2e 4372 num_pages = num_extent_pages(eb->start, eb->len);
d1310b2e
CM
4373 for (i = 0; i < num_pages; i++) {
4374 page = extent_buffer_page(eb, i);
d1310b2e
CM
4375 SetPageUptodate(page);
4376 }
4377 return 0;
4378}
d1310b2e 4379
ce9adaa5
CM
4380int extent_range_uptodate(struct extent_io_tree *tree,
4381 u64 start, u64 end)
4382{
4383 struct page *page;
4384 int ret;
4385 int pg_uptodate = 1;
4386 int uptodate;
4387 unsigned long index;
4388
0b32f4bb 4389 if (range_straddles_pages(start, end - start + 1)) {
19b6caf4
CM
4390 ret = test_range_bit(tree, start, end,
4391 EXTENT_UPTODATE, 1, NULL);
4392 if (ret)
4393 return 1;
4394 }
d397712b 4395 while (start <= end) {
ce9adaa5
CM
4396 index = start >> PAGE_CACHE_SHIFT;
4397 page = find_get_page(tree->mapping, index);
8bedd51b
MH
4398 if (!page)
4399 return 1;
ce9adaa5
CM
4400 uptodate = PageUptodate(page);
4401 page_cache_release(page);
4402 if (!uptodate) {
4403 pg_uptodate = 0;
4404 break;
4405 }
4406 start += PAGE_CACHE_SIZE;
4407 }
4408 return pg_uptodate;
4409}
4410
0b32f4bb 4411int extent_buffer_uptodate(struct extent_buffer *eb)
d1310b2e 4412{
0b32f4bb 4413 return test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
d1310b2e 4414}
d1310b2e
CM
4415
4416int read_extent_buffer_pages(struct extent_io_tree *tree,
bb82ab88 4417 struct extent_buffer *eb, u64 start, int wait,
f188591e 4418 get_extent_t *get_extent, int mirror_num)
d1310b2e
CM
4419{
4420 unsigned long i;
4421 unsigned long start_i;
4422 struct page *page;
4423 int err;
4424 int ret = 0;
ce9adaa5
CM
4425 int locked_pages = 0;
4426 int all_uptodate = 1;
d1310b2e 4427 unsigned long num_pages;
727011e0 4428 unsigned long num_reads = 0;
a86c12c7 4429 struct bio *bio = NULL;
c8b97818 4430 unsigned long bio_flags = 0;
a86c12c7 4431
b4ce94de 4432 if (test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))
d1310b2e
CM
4433 return 0;
4434
d1310b2e
CM
4435 if (start) {
4436 WARN_ON(start < eb->start);
4437 start_i = (start >> PAGE_CACHE_SHIFT) -
4438 (eb->start >> PAGE_CACHE_SHIFT);
4439 } else {
4440 start_i = 0;
4441 }
4442
4443 num_pages = num_extent_pages(eb->start, eb->len);
4444 for (i = start_i; i < num_pages; i++) {
4445 page = extent_buffer_page(eb, i);
bb82ab88 4446 if (wait == WAIT_NONE) {
2db04966 4447 if (!trylock_page(page))
ce9adaa5 4448 goto unlock_exit;
d1310b2e
CM
4449 } else {
4450 lock_page(page);
4451 }
ce9adaa5 4452 locked_pages++;
727011e0
CM
4453 if (!PageUptodate(page)) {
4454 num_reads++;
ce9adaa5 4455 all_uptodate = 0;
727011e0 4456 }
ce9adaa5
CM
4457 }
4458 if (all_uptodate) {
4459 if (start_i == 0)
b4ce94de 4460 set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
ce9adaa5
CM
4461 goto unlock_exit;
4462 }
4463
ea466794
JB
4464 clear_bit(EXTENT_BUFFER_IOERR, &eb->bflags);
4465 eb->failed_mirror = 0;
0b32f4bb 4466 atomic_set(&eb->io_pages, num_reads);
ce9adaa5
CM
4467 for (i = start_i; i < num_pages; i++) {
4468 page = extent_buffer_page(eb, i);
ce9adaa5 4469 if (!PageUptodate(page)) {
f188591e 4470 ClearPageError(page);
a86c12c7 4471 err = __extent_read_full_page(tree, page,
f188591e 4472 get_extent, &bio,
c8b97818 4473 mirror_num, &bio_flags);
d397712b 4474 if (err)
d1310b2e 4475 ret = err;
d1310b2e
CM
4476 } else {
4477 unlock_page(page);
4478 }
4479 }
4480
355808c2
JM
4481 if (bio) {
4482 err = submit_one_bio(READ, bio, mirror_num, bio_flags);
79787eaa
JM
4483 if (err)
4484 return err;
355808c2 4485 }
a86c12c7 4486
bb82ab88 4487 if (ret || wait != WAIT_COMPLETE)
d1310b2e 4488 return ret;
d397712b 4489
d1310b2e
CM
4490 for (i = start_i; i < num_pages; i++) {
4491 page = extent_buffer_page(eb, i);
4492 wait_on_page_locked(page);
d397712b 4493 if (!PageUptodate(page))
d1310b2e 4494 ret = -EIO;
d1310b2e 4495 }
d397712b 4496
d1310b2e 4497 return ret;
ce9adaa5
CM
4498
4499unlock_exit:
4500 i = start_i;
d397712b 4501 while (locked_pages > 0) {
ce9adaa5
CM
4502 page = extent_buffer_page(eb, i);
4503 i++;
4504 unlock_page(page);
4505 locked_pages--;
4506 }
4507 return ret;
d1310b2e 4508}
d1310b2e
CM
4509
4510void read_extent_buffer(struct extent_buffer *eb, void *dstv,
4511 unsigned long start,
4512 unsigned long len)
4513{
4514 size_t cur;
4515 size_t offset;
4516 struct page *page;
4517 char *kaddr;
4518 char *dst = (char *)dstv;
4519 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
4520 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
d1310b2e
CM
4521
4522 WARN_ON(start > eb->len);
4523 WARN_ON(start + len > eb->start + eb->len);
4524
4525 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
4526
d397712b 4527 while (len > 0) {
d1310b2e 4528 page = extent_buffer_page(eb, i);
d1310b2e
CM
4529
4530 cur = min(len, (PAGE_CACHE_SIZE - offset));
a6591715 4531 kaddr = page_address(page);
d1310b2e 4532 memcpy(dst, kaddr + offset, cur);
d1310b2e
CM
4533
4534 dst += cur;
4535 len -= cur;
4536 offset = 0;
4537 i++;
4538 }
4539}
d1310b2e
CM
4540
4541int map_private_extent_buffer(struct extent_buffer *eb, unsigned long start,
a6591715 4542 unsigned long min_len, char **map,
d1310b2e 4543 unsigned long *map_start,
a6591715 4544 unsigned long *map_len)
d1310b2e
CM
4545{
4546 size_t offset = start & (PAGE_CACHE_SIZE - 1);
4547 char *kaddr;
4548 struct page *p;
4549 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
4550 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
4551 unsigned long end_i = (start_offset + start + min_len - 1) >>
4552 PAGE_CACHE_SHIFT;
4553
4554 if (i != end_i)
4555 return -EINVAL;
4556
4557 if (i == 0) {
4558 offset = start_offset;
4559 *map_start = 0;
4560 } else {
4561 offset = 0;
4562 *map_start = ((u64)i << PAGE_CACHE_SHIFT) - start_offset;
4563 }
d397712b 4564
d1310b2e 4565 if (start + min_len > eb->len) {
d397712b
CM
4566 printk(KERN_ERR "btrfs bad mapping eb start %llu len %lu, "
4567 "wanted %lu %lu\n", (unsigned long long)eb->start,
4568 eb->len, start, min_len);
d1310b2e 4569 WARN_ON(1);
85026533 4570 return -EINVAL;
d1310b2e
CM
4571 }
4572
4573 p = extent_buffer_page(eb, i);
a6591715 4574 kaddr = page_address(p);
d1310b2e
CM
4575 *map = kaddr + offset;
4576 *map_len = PAGE_CACHE_SIZE - offset;
4577 return 0;
4578}
d1310b2e 4579
d1310b2e
CM
4580int memcmp_extent_buffer(struct extent_buffer *eb, const void *ptrv,
4581 unsigned long start,
4582 unsigned long len)
4583{
4584 size_t cur;
4585 size_t offset;
4586 struct page *page;
4587 char *kaddr;
4588 char *ptr = (char *)ptrv;
4589 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
4590 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
4591 int ret = 0;
4592
4593 WARN_ON(start > eb->len);
4594 WARN_ON(start + len > eb->start + eb->len);
4595
4596 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
4597
d397712b 4598 while (len > 0) {
d1310b2e 4599 page = extent_buffer_page(eb, i);
d1310b2e
CM
4600
4601 cur = min(len, (PAGE_CACHE_SIZE - offset));
4602
a6591715 4603 kaddr = page_address(page);
d1310b2e 4604 ret = memcmp(ptr, kaddr + offset, cur);
d1310b2e
CM
4605 if (ret)
4606 break;
4607
4608 ptr += cur;
4609 len -= cur;
4610 offset = 0;
4611 i++;
4612 }
4613 return ret;
4614}
d1310b2e
CM
4615
4616void write_extent_buffer(struct extent_buffer *eb, const void *srcv,
4617 unsigned long start, unsigned long len)
4618{
4619 size_t cur;
4620 size_t offset;
4621 struct page *page;
4622 char *kaddr;
4623 char *src = (char *)srcv;
4624 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
4625 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
4626
4627 WARN_ON(start > eb->len);
4628 WARN_ON(start + len > eb->start + eb->len);
4629
4630 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
4631
d397712b 4632 while (len > 0) {
d1310b2e
CM
4633 page = extent_buffer_page(eb, i);
4634 WARN_ON(!PageUptodate(page));
4635
4636 cur = min(len, PAGE_CACHE_SIZE - offset);
a6591715 4637 kaddr = page_address(page);
d1310b2e 4638 memcpy(kaddr + offset, src, cur);
d1310b2e
CM
4639
4640 src += cur;
4641 len -= cur;
4642 offset = 0;
4643 i++;
4644 }
4645}
d1310b2e
CM
4646
4647void memset_extent_buffer(struct extent_buffer *eb, char c,
4648 unsigned long start, unsigned long len)
4649{
4650 size_t cur;
4651 size_t offset;
4652 struct page *page;
4653 char *kaddr;
4654 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
4655 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
4656
4657 WARN_ON(start > eb->len);
4658 WARN_ON(start + len > eb->start + eb->len);
4659
4660 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
4661
d397712b 4662 while (len > 0) {
d1310b2e
CM
4663 page = extent_buffer_page(eb, i);
4664 WARN_ON(!PageUptodate(page));
4665
4666 cur = min(len, PAGE_CACHE_SIZE - offset);
a6591715 4667 kaddr = page_address(page);
d1310b2e 4668 memset(kaddr + offset, c, cur);
d1310b2e
CM
4669
4670 len -= cur;
4671 offset = 0;
4672 i++;
4673 }
4674}
d1310b2e
CM
4675
4676void copy_extent_buffer(struct extent_buffer *dst, struct extent_buffer *src,
4677 unsigned long dst_offset, unsigned long src_offset,
4678 unsigned long len)
4679{
4680 u64 dst_len = dst->len;
4681 size_t cur;
4682 size_t offset;
4683 struct page *page;
4684 char *kaddr;
4685 size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
4686 unsigned long i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT;
4687
4688 WARN_ON(src->len != dst_len);
4689
4690 offset = (start_offset + dst_offset) &
4691 ((unsigned long)PAGE_CACHE_SIZE - 1);
4692
d397712b 4693 while (len > 0) {
d1310b2e
CM
4694 page = extent_buffer_page(dst, i);
4695 WARN_ON(!PageUptodate(page));
4696
4697 cur = min(len, (unsigned long)(PAGE_CACHE_SIZE - offset));
4698
a6591715 4699 kaddr = page_address(page);
d1310b2e 4700 read_extent_buffer(src, kaddr + offset, src_offset, cur);
d1310b2e
CM
4701
4702 src_offset += cur;
4703 len -= cur;
4704 offset = 0;
4705 i++;
4706 }
4707}
d1310b2e
CM
4708
4709static void move_pages(struct page *dst_page, struct page *src_page,
4710 unsigned long dst_off, unsigned long src_off,
4711 unsigned long len)
4712{
a6591715 4713 char *dst_kaddr = page_address(dst_page);
d1310b2e
CM
4714 if (dst_page == src_page) {
4715 memmove(dst_kaddr + dst_off, dst_kaddr + src_off, len);
4716 } else {
a6591715 4717 char *src_kaddr = page_address(src_page);
d1310b2e
CM
4718 char *p = dst_kaddr + dst_off + len;
4719 char *s = src_kaddr + src_off + len;
4720
4721 while (len--)
4722 *--p = *--s;
d1310b2e 4723 }
d1310b2e
CM
4724}
4725
3387206f
ST
4726static inline bool areas_overlap(unsigned long src, unsigned long dst, unsigned long len)
4727{
4728 unsigned long distance = (src > dst) ? src - dst : dst - src;
4729 return distance < len;
4730}
4731
d1310b2e
CM
4732static void copy_pages(struct page *dst_page, struct page *src_page,
4733 unsigned long dst_off, unsigned long src_off,
4734 unsigned long len)
4735{
a6591715 4736 char *dst_kaddr = page_address(dst_page);
d1310b2e 4737 char *src_kaddr;
727011e0 4738 int must_memmove = 0;
d1310b2e 4739
3387206f 4740 if (dst_page != src_page) {
a6591715 4741 src_kaddr = page_address(src_page);
3387206f 4742 } else {
d1310b2e 4743 src_kaddr = dst_kaddr;
727011e0
CM
4744 if (areas_overlap(src_off, dst_off, len))
4745 must_memmove = 1;
3387206f 4746 }
d1310b2e 4747
727011e0
CM
4748 if (must_memmove)
4749 memmove(dst_kaddr + dst_off, src_kaddr + src_off, len);
4750 else
4751 memcpy(dst_kaddr + dst_off, src_kaddr + src_off, len);
d1310b2e
CM
4752}
4753
4754void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
4755 unsigned long src_offset, unsigned long len)
4756{
4757 size_t cur;
4758 size_t dst_off_in_page;
4759 size_t src_off_in_page;
4760 size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
4761 unsigned long dst_i;
4762 unsigned long src_i;
4763
4764 if (src_offset + len > dst->len) {
d397712b
CM
4765 printk(KERN_ERR "btrfs memmove bogus src_offset %lu move "
4766 "len %lu dst len %lu\n", src_offset, len, dst->len);
d1310b2e
CM
4767 BUG_ON(1);
4768 }
4769 if (dst_offset + len > dst->len) {
d397712b
CM
4770 printk(KERN_ERR "btrfs memmove bogus dst_offset %lu move "
4771 "len %lu dst len %lu\n", dst_offset, len, dst->len);
d1310b2e
CM
4772 BUG_ON(1);
4773 }
4774
d397712b 4775 while (len > 0) {
d1310b2e
CM
4776 dst_off_in_page = (start_offset + dst_offset) &
4777 ((unsigned long)PAGE_CACHE_SIZE - 1);
4778 src_off_in_page = (start_offset + src_offset) &
4779 ((unsigned long)PAGE_CACHE_SIZE - 1);
4780
4781 dst_i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT;
4782 src_i = (start_offset + src_offset) >> PAGE_CACHE_SHIFT;
4783
4784 cur = min(len, (unsigned long)(PAGE_CACHE_SIZE -
4785 src_off_in_page));
4786 cur = min_t(unsigned long, cur,
4787 (unsigned long)(PAGE_CACHE_SIZE - dst_off_in_page));
4788
4789 copy_pages(extent_buffer_page(dst, dst_i),
4790 extent_buffer_page(dst, src_i),
4791 dst_off_in_page, src_off_in_page, cur);
4792
4793 src_offset += cur;
4794 dst_offset += cur;
4795 len -= cur;
4796 }
4797}
d1310b2e
CM
4798
4799void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
4800 unsigned long src_offset, unsigned long len)
4801{
4802 size_t cur;
4803 size_t dst_off_in_page;
4804 size_t src_off_in_page;
4805 unsigned long dst_end = dst_offset + len - 1;
4806 unsigned long src_end = src_offset + len - 1;
4807 size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
4808 unsigned long dst_i;
4809 unsigned long src_i;
4810
4811 if (src_offset + len > dst->len) {
d397712b
CM
4812 printk(KERN_ERR "btrfs memmove bogus src_offset %lu move "
4813 "len %lu len %lu\n", src_offset, len, dst->len);
d1310b2e
CM
4814 BUG_ON(1);
4815 }
4816 if (dst_offset + len > dst->len) {
d397712b
CM
4817 printk(KERN_ERR "btrfs memmove bogus dst_offset %lu move "
4818 "len %lu len %lu\n", dst_offset, len, dst->len);
d1310b2e
CM
4819 BUG_ON(1);
4820 }
727011e0 4821 if (dst_offset < src_offset) {
d1310b2e
CM
4822 memcpy_extent_buffer(dst, dst_offset, src_offset, len);
4823 return;
4824 }
d397712b 4825 while (len > 0) {
d1310b2e
CM
4826 dst_i = (start_offset + dst_end) >> PAGE_CACHE_SHIFT;
4827 src_i = (start_offset + src_end) >> PAGE_CACHE_SHIFT;
4828
4829 dst_off_in_page = (start_offset + dst_end) &
4830 ((unsigned long)PAGE_CACHE_SIZE - 1);
4831 src_off_in_page = (start_offset + src_end) &
4832 ((unsigned long)PAGE_CACHE_SIZE - 1);
4833
4834 cur = min_t(unsigned long, len, src_off_in_page + 1);
4835 cur = min(cur, dst_off_in_page + 1);
4836 move_pages(extent_buffer_page(dst, dst_i),
4837 extent_buffer_page(dst, src_i),
4838 dst_off_in_page - cur + 1,
4839 src_off_in_page - cur + 1, cur);
4840
4841 dst_end -= cur;
4842 src_end -= cur;
4843 len -= cur;
4844 }
4845}
6af118ce 4846
3083ee2e 4847int try_release_extent_buffer(struct page *page, gfp_t mask)
6af118ce 4848{
3083ee2e 4849 struct extent_buffer *eb;
6af118ce 4850
3083ee2e
JB
4851 /*
4852 * We need to make sure noboody is attaching this page to an eb right
4853 * now.
4854 */
4855 spin_lock(&page->mapping->private_lock);
4856 if (!PagePrivate(page)) {
4857 spin_unlock(&page->mapping->private_lock);
4f2de97a 4858 return 1;
3083ee2e 4859 }
6af118ce 4860
3083ee2e
JB
4861 eb = (struct extent_buffer *)page->private;
4862 BUG_ON(!eb);
4863
0b32f4bb 4864 /*
3083ee2e
JB
4865 * This is a little awful but should be ok, we need to make sure that
4866 * the eb doesn't disappear out from under us while we're looking at
4867 * this page.
4868 */
4869 spin_lock(&eb->refs_lock);
0b32f4bb 4870 if (atomic_read(&eb->refs) != 1 || extent_buffer_under_io(eb)) {
3083ee2e
JB
4871 spin_unlock(&eb->refs_lock);
4872 spin_unlock(&page->mapping->private_lock);
4873 return 0;
6af118ce 4874 }
3083ee2e
JB
4875 spin_unlock(&page->mapping->private_lock);
4876
4877 if ((mask & GFP_NOFS) == GFP_NOFS)
4878 mask = GFP_NOFS;
19fe0a8b
MX
4879
4880 /*
3083ee2e
JB
4881 * If tree ref isn't set then we know the ref on this eb is a real ref,
4882 * so just return, this page will likely be freed soon anyway.
19fe0a8b 4883 */
3083ee2e
JB
4884 if (!test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) {
4885 spin_unlock(&eb->refs_lock);
4886 return 0;
b9473439 4887 }
3083ee2e 4888 release_extent_buffer(eb, mask);
19fe0a8b 4889
3083ee2e 4890 return 1;
6af118ce 4891}