]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - fs/btrfs/extent_io.c
Btrfs: change the ordered tree to use a spinlock instead of a mutex
[mirror_ubuntu-hirsute-kernel.git] / fs / btrfs / extent_io.c
CommitLineData
d1310b2e
CM
1#include <linux/bitops.h>
2#include <linux/slab.h>
3#include <linux/bio.h>
4#include <linux/mm.h>
5#include <linux/gfp.h>
6#include <linux/pagemap.h>
7#include <linux/page-flags.h>
8#include <linux/module.h>
9#include <linux/spinlock.h>
10#include <linux/blkdev.h>
11#include <linux/swap.h>
d1310b2e
CM
12#include <linux/writeback.h>
13#include <linux/pagevec.h>
14#include "extent_io.h"
15#include "extent_map.h"
2db04966 16#include "compat.h"
902b22f3
DW
17#include "ctree.h"
18#include "btrfs_inode.h"
d1310b2e 19
d1310b2e
CM
20static struct kmem_cache *extent_state_cache;
21static struct kmem_cache *extent_buffer_cache;
22
23static LIST_HEAD(buffers);
24static LIST_HEAD(states);
4bef0848 25
b47eda86 26#define LEAK_DEBUG 0
3935127c 27#if LEAK_DEBUG
d397712b 28static DEFINE_SPINLOCK(leak_lock);
4bef0848 29#endif
d1310b2e 30
d1310b2e
CM
31#define BUFFER_LRU_MAX 64
32
33struct tree_entry {
34 u64 start;
35 u64 end;
d1310b2e
CM
36 struct rb_node rb_node;
37};
38
39struct extent_page_data {
40 struct bio *bio;
41 struct extent_io_tree *tree;
42 get_extent_t *get_extent;
771ed689
CM
43
44 /* tells writepage not to lock the state bits for this range
45 * it still does the unlocking
46 */
ffbd517d
CM
47 unsigned int extent_locked:1;
48
49 /* tells the submit_bio code to use a WRITE_SYNC */
50 unsigned int sync_io:1;
d1310b2e
CM
51};
52
53int __init extent_io_init(void)
54{
9601e3f6
CH
55 extent_state_cache = kmem_cache_create("extent_state",
56 sizeof(struct extent_state), 0,
57 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
d1310b2e
CM
58 if (!extent_state_cache)
59 return -ENOMEM;
60
9601e3f6
CH
61 extent_buffer_cache = kmem_cache_create("extent_buffers",
62 sizeof(struct extent_buffer), 0,
63 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
d1310b2e
CM
64 if (!extent_buffer_cache)
65 goto free_state_cache;
66 return 0;
67
68free_state_cache:
69 kmem_cache_destroy(extent_state_cache);
70 return -ENOMEM;
71}
72
73void extent_io_exit(void)
74{
75 struct extent_state *state;
2d2ae547 76 struct extent_buffer *eb;
d1310b2e
CM
77
78 while (!list_empty(&states)) {
2d2ae547 79 state = list_entry(states.next, struct extent_state, leak_list);
d397712b
CM
80 printk(KERN_ERR "btrfs state leak: start %llu end %llu "
81 "state %lu in tree %p refs %d\n",
82 (unsigned long long)state->start,
83 (unsigned long long)state->end,
84 state->state, state->tree, atomic_read(&state->refs));
2d2ae547 85 list_del(&state->leak_list);
d1310b2e
CM
86 kmem_cache_free(extent_state_cache, state);
87
88 }
89
2d2ae547
CM
90 while (!list_empty(&buffers)) {
91 eb = list_entry(buffers.next, struct extent_buffer, leak_list);
d397712b
CM
92 printk(KERN_ERR "btrfs buffer leak start %llu len %lu "
93 "refs %d\n", (unsigned long long)eb->start,
94 eb->len, atomic_read(&eb->refs));
2d2ae547
CM
95 list_del(&eb->leak_list);
96 kmem_cache_free(extent_buffer_cache, eb);
97 }
d1310b2e
CM
98 if (extent_state_cache)
99 kmem_cache_destroy(extent_state_cache);
100 if (extent_buffer_cache)
101 kmem_cache_destroy(extent_buffer_cache);
102}
103
104void extent_io_tree_init(struct extent_io_tree *tree,
105 struct address_space *mapping, gfp_t mask)
106{
6bef4d31
EP
107 tree->state = RB_ROOT;
108 tree->buffer = RB_ROOT;
d1310b2e
CM
109 tree->ops = NULL;
110 tree->dirty_bytes = 0;
70dec807 111 spin_lock_init(&tree->lock);
6af118ce 112 spin_lock_init(&tree->buffer_lock);
d1310b2e 113 tree->mapping = mapping;
d1310b2e 114}
d1310b2e 115
b2950863 116static struct extent_state *alloc_extent_state(gfp_t mask)
d1310b2e
CM
117{
118 struct extent_state *state;
3935127c 119#if LEAK_DEBUG
2d2ae547 120 unsigned long flags;
4bef0848 121#endif
d1310b2e
CM
122
123 state = kmem_cache_alloc(extent_state_cache, mask);
2b114d1d 124 if (!state)
d1310b2e
CM
125 return state;
126 state->state = 0;
d1310b2e 127 state->private = 0;
70dec807 128 state->tree = NULL;
3935127c 129#if LEAK_DEBUG
2d2ae547
CM
130 spin_lock_irqsave(&leak_lock, flags);
131 list_add(&state->leak_list, &states);
132 spin_unlock_irqrestore(&leak_lock, flags);
4bef0848 133#endif
d1310b2e
CM
134 atomic_set(&state->refs, 1);
135 init_waitqueue_head(&state->wq);
136 return state;
137}
d1310b2e 138
b2950863 139static void free_extent_state(struct extent_state *state)
d1310b2e 140{
d1310b2e
CM
141 if (!state)
142 return;
143 if (atomic_dec_and_test(&state->refs)) {
3935127c 144#if LEAK_DEBUG
2d2ae547 145 unsigned long flags;
4bef0848 146#endif
70dec807 147 WARN_ON(state->tree);
3935127c 148#if LEAK_DEBUG
2d2ae547
CM
149 spin_lock_irqsave(&leak_lock, flags);
150 list_del(&state->leak_list);
151 spin_unlock_irqrestore(&leak_lock, flags);
4bef0848 152#endif
d1310b2e
CM
153 kmem_cache_free(extent_state_cache, state);
154 }
155}
d1310b2e
CM
156
157static struct rb_node *tree_insert(struct rb_root *root, u64 offset,
158 struct rb_node *node)
159{
d397712b
CM
160 struct rb_node **p = &root->rb_node;
161 struct rb_node *parent = NULL;
d1310b2e
CM
162 struct tree_entry *entry;
163
d397712b 164 while (*p) {
d1310b2e
CM
165 parent = *p;
166 entry = rb_entry(parent, struct tree_entry, rb_node);
167
168 if (offset < entry->start)
169 p = &(*p)->rb_left;
170 else if (offset > entry->end)
171 p = &(*p)->rb_right;
172 else
173 return parent;
174 }
175
176 entry = rb_entry(node, struct tree_entry, rb_node);
d1310b2e
CM
177 rb_link_node(node, parent, p);
178 rb_insert_color(node, root);
179 return NULL;
180}
181
80ea96b1 182static struct rb_node *__etree_search(struct extent_io_tree *tree, u64 offset,
d1310b2e
CM
183 struct rb_node **prev_ret,
184 struct rb_node **next_ret)
185{
80ea96b1 186 struct rb_root *root = &tree->state;
d397712b 187 struct rb_node *n = root->rb_node;
d1310b2e
CM
188 struct rb_node *prev = NULL;
189 struct rb_node *orig_prev = NULL;
190 struct tree_entry *entry;
191 struct tree_entry *prev_entry = NULL;
192
d397712b 193 while (n) {
d1310b2e
CM
194 entry = rb_entry(n, struct tree_entry, rb_node);
195 prev = n;
196 prev_entry = entry;
197
198 if (offset < entry->start)
199 n = n->rb_left;
200 else if (offset > entry->end)
201 n = n->rb_right;
d397712b 202 else
d1310b2e
CM
203 return n;
204 }
205
206 if (prev_ret) {
207 orig_prev = prev;
d397712b 208 while (prev && offset > prev_entry->end) {
d1310b2e
CM
209 prev = rb_next(prev);
210 prev_entry = rb_entry(prev, struct tree_entry, rb_node);
211 }
212 *prev_ret = prev;
213 prev = orig_prev;
214 }
215
216 if (next_ret) {
217 prev_entry = rb_entry(prev, struct tree_entry, rb_node);
d397712b 218 while (prev && offset < prev_entry->start) {
d1310b2e
CM
219 prev = rb_prev(prev);
220 prev_entry = rb_entry(prev, struct tree_entry, rb_node);
221 }
222 *next_ret = prev;
223 }
224 return NULL;
225}
226
80ea96b1
CM
227static inline struct rb_node *tree_search(struct extent_io_tree *tree,
228 u64 offset)
d1310b2e 229{
70dec807 230 struct rb_node *prev = NULL;
d1310b2e 231 struct rb_node *ret;
70dec807 232
80ea96b1 233 ret = __etree_search(tree, offset, &prev, NULL);
d397712b 234 if (!ret)
d1310b2e
CM
235 return prev;
236 return ret;
237}
238
6af118ce
CM
239static struct extent_buffer *buffer_tree_insert(struct extent_io_tree *tree,
240 u64 offset, struct rb_node *node)
241{
242 struct rb_root *root = &tree->buffer;
d397712b
CM
243 struct rb_node **p = &root->rb_node;
244 struct rb_node *parent = NULL;
6af118ce
CM
245 struct extent_buffer *eb;
246
d397712b 247 while (*p) {
6af118ce
CM
248 parent = *p;
249 eb = rb_entry(parent, struct extent_buffer, rb_node);
250
251 if (offset < eb->start)
252 p = &(*p)->rb_left;
253 else if (offset > eb->start)
254 p = &(*p)->rb_right;
255 else
256 return eb;
257 }
258
259 rb_link_node(node, parent, p);
260 rb_insert_color(node, root);
261 return NULL;
262}
263
264static struct extent_buffer *buffer_search(struct extent_io_tree *tree,
265 u64 offset)
266{
267 struct rb_root *root = &tree->buffer;
d397712b 268 struct rb_node *n = root->rb_node;
6af118ce
CM
269 struct extent_buffer *eb;
270
d397712b 271 while (n) {
6af118ce
CM
272 eb = rb_entry(n, struct extent_buffer, rb_node);
273 if (offset < eb->start)
274 n = n->rb_left;
275 else if (offset > eb->start)
276 n = n->rb_right;
277 else
278 return eb;
279 }
280 return NULL;
281}
282
9ed74f2d
JB
283static void merge_cb(struct extent_io_tree *tree, struct extent_state *new,
284 struct extent_state *other)
285{
286 if (tree->ops && tree->ops->merge_extent_hook)
287 tree->ops->merge_extent_hook(tree->mapping->host, new,
288 other);
289}
290
d1310b2e
CM
291/*
292 * utility function to look for merge candidates inside a given range.
293 * Any extents with matching state are merged together into a single
294 * extent in the tree. Extents with EXTENT_IO in their state field
295 * are not merged because the end_io handlers need to be able to do
296 * operations on them without sleeping (or doing allocations/splits).
297 *
298 * This should be called with the tree lock held.
299 */
300static int merge_state(struct extent_io_tree *tree,
301 struct extent_state *state)
302{
303 struct extent_state *other;
304 struct rb_node *other_node;
305
5b21f2ed 306 if (state->state & (EXTENT_IOBITS | EXTENT_BOUNDARY))
d1310b2e
CM
307 return 0;
308
309 other_node = rb_prev(&state->rb_node);
310 if (other_node) {
311 other = rb_entry(other_node, struct extent_state, rb_node);
312 if (other->end == state->start - 1 &&
313 other->state == state->state) {
9ed74f2d 314 merge_cb(tree, state, other);
d1310b2e 315 state->start = other->start;
70dec807 316 other->tree = NULL;
d1310b2e
CM
317 rb_erase(&other->rb_node, &tree->state);
318 free_extent_state(other);
319 }
320 }
321 other_node = rb_next(&state->rb_node);
322 if (other_node) {
323 other = rb_entry(other_node, struct extent_state, rb_node);
324 if (other->start == state->end + 1 &&
325 other->state == state->state) {
9ed74f2d 326 merge_cb(tree, state, other);
d1310b2e 327 other->start = state->start;
70dec807 328 state->tree = NULL;
d1310b2e
CM
329 rb_erase(&state->rb_node, &tree->state);
330 free_extent_state(state);
9ed74f2d 331 state = NULL;
d1310b2e
CM
332 }
333 }
9ed74f2d 334
d1310b2e
CM
335 return 0;
336}
337
9ed74f2d 338static int set_state_cb(struct extent_io_tree *tree,
291d673e
CM
339 struct extent_state *state,
340 unsigned long bits)
341{
342 if (tree->ops && tree->ops->set_bit_hook) {
9ed74f2d
JB
343 return tree->ops->set_bit_hook(tree->mapping->host,
344 state->start, state->end,
345 state->state, bits);
291d673e 346 }
9ed74f2d
JB
347
348 return 0;
291d673e
CM
349}
350
351static void clear_state_cb(struct extent_io_tree *tree,
352 struct extent_state *state,
353 unsigned long bits)
354{
9ed74f2d
JB
355 if (tree->ops && tree->ops->clear_bit_hook)
356 tree->ops->clear_bit_hook(tree->mapping->host, state, bits);
291d673e
CM
357}
358
d1310b2e
CM
359/*
360 * insert an extent_state struct into the tree. 'bits' are set on the
361 * struct before it is inserted.
362 *
363 * This may return -EEXIST if the extent is already there, in which case the
364 * state struct is freed.
365 *
366 * The tree lock is not taken internally. This is a utility function and
367 * probably isn't what you want to call (see set/clear_extent_bit).
368 */
369static int insert_state(struct extent_io_tree *tree,
370 struct extent_state *state, u64 start, u64 end,
371 int bits)
372{
373 struct rb_node *node;
9ed74f2d 374 int ret;
d1310b2e
CM
375
376 if (end < start) {
d397712b
CM
377 printk(KERN_ERR "btrfs end < start %llu %llu\n",
378 (unsigned long long)end,
379 (unsigned long long)start);
d1310b2e
CM
380 WARN_ON(1);
381 }
d1310b2e
CM
382 state->start = start;
383 state->end = end;
9ed74f2d
JB
384 ret = set_state_cb(tree, state, bits);
385 if (ret)
386 return ret;
387
388 if (bits & EXTENT_DIRTY)
389 tree->dirty_bytes += end - start + 1;
e48c465b 390 state->state |= bits;
d1310b2e
CM
391 node = tree_insert(&tree->state, end, &state->rb_node);
392 if (node) {
393 struct extent_state *found;
394 found = rb_entry(node, struct extent_state, rb_node);
d397712b
CM
395 printk(KERN_ERR "btrfs found node %llu %llu on insert of "
396 "%llu %llu\n", (unsigned long long)found->start,
397 (unsigned long long)found->end,
398 (unsigned long long)start, (unsigned long long)end);
d1310b2e
CM
399 free_extent_state(state);
400 return -EEXIST;
401 }
70dec807 402 state->tree = tree;
d1310b2e
CM
403 merge_state(tree, state);
404 return 0;
405}
406
9ed74f2d
JB
407static int split_cb(struct extent_io_tree *tree, struct extent_state *orig,
408 u64 split)
409{
410 if (tree->ops && tree->ops->split_extent_hook)
411 return tree->ops->split_extent_hook(tree->mapping->host,
412 orig, split);
413 return 0;
414}
415
d1310b2e
CM
416/*
417 * split a given extent state struct in two, inserting the preallocated
418 * struct 'prealloc' as the newly created second half. 'split' indicates an
419 * offset inside 'orig' where it should be split.
420 *
421 * Before calling,
422 * the tree has 'orig' at [orig->start, orig->end]. After calling, there
423 * are two extent state structs in the tree:
424 * prealloc: [orig->start, split - 1]
425 * orig: [ split, orig->end ]
426 *
427 * The tree locks are not taken by this function. They need to be held
428 * by the caller.
429 */
430static int split_state(struct extent_io_tree *tree, struct extent_state *orig,
431 struct extent_state *prealloc, u64 split)
432{
433 struct rb_node *node;
9ed74f2d
JB
434
435 split_cb(tree, orig, split);
436
d1310b2e
CM
437 prealloc->start = orig->start;
438 prealloc->end = split - 1;
439 prealloc->state = orig->state;
440 orig->start = split;
441
442 node = tree_insert(&tree->state, prealloc->end, &prealloc->rb_node);
443 if (node) {
d1310b2e
CM
444 free_extent_state(prealloc);
445 return -EEXIST;
446 }
70dec807 447 prealloc->tree = tree;
d1310b2e
CM
448 return 0;
449}
450
451/*
452 * utility function to clear some bits in an extent state struct.
453 * it will optionally wake up any one waiting on this state (wake == 1), or
454 * forcibly remove the state from the tree (delete == 1).
455 *
456 * If no bits are set on the state struct after clearing things, the
457 * struct is freed and removed from the tree
458 */
459static int clear_state_bit(struct extent_io_tree *tree,
460 struct extent_state *state, int bits, int wake,
461 int delete)
462{
32c00aff
JB
463 int bits_to_clear = bits & ~EXTENT_DO_ACCOUNTING;
464 int ret = state->state & bits_to_clear;
d1310b2e
CM
465
466 if ((bits & EXTENT_DIRTY) && (state->state & EXTENT_DIRTY)) {
467 u64 range = state->end - state->start + 1;
468 WARN_ON(range > tree->dirty_bytes);
469 tree->dirty_bytes -= range;
470 }
291d673e 471 clear_state_cb(tree, state, bits);
32c00aff 472 state->state &= ~bits_to_clear;
d1310b2e
CM
473 if (wake)
474 wake_up(&state->wq);
475 if (delete || state->state == 0) {
70dec807 476 if (state->tree) {
ae9d1285 477 clear_state_cb(tree, state, state->state);
d1310b2e 478 rb_erase(&state->rb_node, &tree->state);
70dec807 479 state->tree = NULL;
d1310b2e
CM
480 free_extent_state(state);
481 } else {
482 WARN_ON(1);
483 }
484 } else {
485 merge_state(tree, state);
486 }
487 return ret;
488}
489
490/*
491 * clear some bits on a range in the tree. This may require splitting
492 * or inserting elements in the tree, so the gfp mask is used to
493 * indicate which allocations or sleeping are allowed.
494 *
495 * pass 'wake' == 1 to kick any sleepers, and 'delete' == 1 to remove
496 * the given range from the tree regardless of state (ie for truncate).
497 *
498 * the range [start, end] is inclusive.
499 *
500 * This takes the tree lock, and returns < 0 on error, > 0 if any of the
501 * bits were already set, or zero if none of the bits were already set.
502 */
503int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
2c64c53d
CM
504 int bits, int wake, int delete,
505 struct extent_state **cached_state,
506 gfp_t mask)
d1310b2e
CM
507{
508 struct extent_state *state;
2c64c53d 509 struct extent_state *cached;
d1310b2e 510 struct extent_state *prealloc = NULL;
2c64c53d 511 struct rb_node *next_node;
d1310b2e 512 struct rb_node *node;
5c939df5 513 u64 last_end;
d1310b2e
CM
514 int err;
515 int set = 0;
516
517again:
518 if (!prealloc && (mask & __GFP_WAIT)) {
519 prealloc = alloc_extent_state(mask);
520 if (!prealloc)
521 return -ENOMEM;
522 }
523
cad321ad 524 spin_lock(&tree->lock);
2c64c53d
CM
525 if (cached_state) {
526 cached = *cached_state;
527 *cached_state = NULL;
42daec29
CM
528 cached_state = NULL;
529 if (cached && cached->tree && cached->start == start) {
2c64c53d
CM
530 atomic_dec(&cached->refs);
531 state = cached;
42daec29 532 goto hit_next;
2c64c53d
CM
533 }
534 free_extent_state(cached);
535 }
d1310b2e
CM
536 /*
537 * this search will find the extents that end after
538 * our range starts
539 */
80ea96b1 540 node = tree_search(tree, start);
d1310b2e
CM
541 if (!node)
542 goto out;
543 state = rb_entry(node, struct extent_state, rb_node);
2c64c53d 544hit_next:
d1310b2e
CM
545 if (state->start > end)
546 goto out;
547 WARN_ON(state->end < start);
5c939df5 548 last_end = state->end;
d1310b2e
CM
549
550 /*
551 * | ---- desired range ---- |
552 * | state | or
553 * | ------------- state -------------- |
554 *
555 * We need to split the extent we found, and may flip
556 * bits on second half.
557 *
558 * If the extent we found extends past our range, we
559 * just split and search again. It'll get split again
560 * the next time though.
561 *
562 * If the extent we found is inside our range, we clear
563 * the desired bit on it.
564 */
565
566 if (state->start < start) {
70dec807
CM
567 if (!prealloc)
568 prealloc = alloc_extent_state(GFP_ATOMIC);
d1310b2e
CM
569 err = split_state(tree, state, prealloc, start);
570 BUG_ON(err == -EEXIST);
571 prealloc = NULL;
572 if (err)
573 goto out;
574 if (state->end <= end) {
9ed74f2d
JB
575 set |= clear_state_bit(tree, state, bits, wake,
576 delete);
5c939df5
YZ
577 if (last_end == (u64)-1)
578 goto out;
579 start = last_end + 1;
d1310b2e
CM
580 }
581 goto search_again;
582 }
583 /*
584 * | ---- desired range ---- |
585 * | state |
586 * We need to split the extent, and clear the bit
587 * on the first half
588 */
589 if (state->start <= end && state->end > end) {
70dec807
CM
590 if (!prealloc)
591 prealloc = alloc_extent_state(GFP_ATOMIC);
d1310b2e
CM
592 err = split_state(tree, state, prealloc, end + 1);
593 BUG_ON(err == -EEXIST);
d1310b2e
CM
594 if (wake)
595 wake_up(&state->wq);
42daec29 596
9ed74f2d
JB
597 set |= clear_state_bit(tree, prealloc, bits, wake, delete);
598
d1310b2e
CM
599 prealloc = NULL;
600 goto out;
601 }
42daec29 602
2c64c53d
CM
603 if (state->end < end && prealloc && !need_resched())
604 next_node = rb_next(&state->rb_node);
605 else
606 next_node = NULL;
42daec29 607
d1310b2e 608 set |= clear_state_bit(tree, state, bits, wake, delete);
5c939df5
YZ
609 if (last_end == (u64)-1)
610 goto out;
611 start = last_end + 1;
2c64c53d
CM
612 if (start <= end && next_node) {
613 state = rb_entry(next_node, struct extent_state,
614 rb_node);
615 if (state->start == start)
616 goto hit_next;
617 }
d1310b2e
CM
618 goto search_again;
619
620out:
cad321ad 621 spin_unlock(&tree->lock);
d1310b2e
CM
622 if (prealloc)
623 free_extent_state(prealloc);
624
625 return set;
626
627search_again:
628 if (start > end)
629 goto out;
cad321ad 630 spin_unlock(&tree->lock);
d1310b2e
CM
631 if (mask & __GFP_WAIT)
632 cond_resched();
633 goto again;
634}
d1310b2e
CM
635
636static int wait_on_state(struct extent_io_tree *tree,
637 struct extent_state *state)
641f5219
CH
638 __releases(tree->lock)
639 __acquires(tree->lock)
d1310b2e
CM
640{
641 DEFINE_WAIT(wait);
642 prepare_to_wait(&state->wq, &wait, TASK_UNINTERRUPTIBLE);
cad321ad 643 spin_unlock(&tree->lock);
d1310b2e 644 schedule();
cad321ad 645 spin_lock(&tree->lock);
d1310b2e
CM
646 finish_wait(&state->wq, &wait);
647 return 0;
648}
649
650/*
651 * waits for one or more bits to clear on a range in the state tree.
652 * The range [start, end] is inclusive.
653 * The tree lock is taken by this function
654 */
655int wait_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, int bits)
656{
657 struct extent_state *state;
658 struct rb_node *node;
659
cad321ad 660 spin_lock(&tree->lock);
d1310b2e
CM
661again:
662 while (1) {
663 /*
664 * this search will find all the extents that end after
665 * our range starts
666 */
80ea96b1 667 node = tree_search(tree, start);
d1310b2e
CM
668 if (!node)
669 break;
670
671 state = rb_entry(node, struct extent_state, rb_node);
672
673 if (state->start > end)
674 goto out;
675
676 if (state->state & bits) {
677 start = state->start;
678 atomic_inc(&state->refs);
679 wait_on_state(tree, state);
680 free_extent_state(state);
681 goto again;
682 }
683 start = state->end + 1;
684
685 if (start > end)
686 break;
687
688 if (need_resched()) {
cad321ad 689 spin_unlock(&tree->lock);
d1310b2e 690 cond_resched();
cad321ad 691 spin_lock(&tree->lock);
d1310b2e
CM
692 }
693 }
694out:
cad321ad 695 spin_unlock(&tree->lock);
d1310b2e
CM
696 return 0;
697}
d1310b2e 698
9ed74f2d 699static int set_state_bits(struct extent_io_tree *tree,
d1310b2e
CM
700 struct extent_state *state,
701 int bits)
702{
9ed74f2d
JB
703 int ret;
704
705 ret = set_state_cb(tree, state, bits);
706 if (ret)
707 return ret;
708
d1310b2e
CM
709 if ((bits & EXTENT_DIRTY) && !(state->state & EXTENT_DIRTY)) {
710 u64 range = state->end - state->start + 1;
711 tree->dirty_bytes += range;
712 }
b0c68f8b 713 state->state |= bits;
9ed74f2d
JB
714
715 return 0;
d1310b2e
CM
716}
717
2c64c53d
CM
718static void cache_state(struct extent_state *state,
719 struct extent_state **cached_ptr)
720{
721 if (cached_ptr && !(*cached_ptr)) {
722 if (state->state & (EXTENT_IOBITS | EXTENT_BOUNDARY)) {
723 *cached_ptr = state;
724 atomic_inc(&state->refs);
725 }
726 }
727}
728
d1310b2e 729/*
1edbb734
CM
730 * set some bits on a range in the tree. This may require allocations or
731 * sleeping, so the gfp mask is used to indicate what is allowed.
d1310b2e 732 *
1edbb734
CM
733 * If any of the exclusive bits are set, this will fail with -EEXIST if some
734 * part of the range already has the desired bits set. The start of the
735 * existing range is returned in failed_start in this case.
d1310b2e 736 *
1edbb734 737 * [start, end] is inclusive This takes the tree lock.
d1310b2e 738 */
1edbb734 739
d397712b 740static int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
1edbb734 741 int bits, int exclusive_bits, u64 *failed_start,
2c64c53d 742 struct extent_state **cached_state,
d397712b 743 gfp_t mask)
d1310b2e
CM
744{
745 struct extent_state *state;
746 struct extent_state *prealloc = NULL;
747 struct rb_node *node;
d1310b2e 748 int err = 0;
d1310b2e
CM
749 u64 last_start;
750 u64 last_end;
42daec29 751
d1310b2e
CM
752again:
753 if (!prealloc && (mask & __GFP_WAIT)) {
754 prealloc = alloc_extent_state(mask);
755 if (!prealloc)
756 return -ENOMEM;
757 }
758
cad321ad 759 spin_lock(&tree->lock);
9655d298
CM
760 if (cached_state && *cached_state) {
761 state = *cached_state;
762 if (state->start == start && state->tree) {
763 node = &state->rb_node;
764 goto hit_next;
765 }
766 }
d1310b2e
CM
767 /*
768 * this search will find all the extents that end after
769 * our range starts.
770 */
80ea96b1 771 node = tree_search(tree, start);
d1310b2e
CM
772 if (!node) {
773 err = insert_state(tree, prealloc, start, end, bits);
774 prealloc = NULL;
775 BUG_ON(err == -EEXIST);
776 goto out;
777 }
d1310b2e 778 state = rb_entry(node, struct extent_state, rb_node);
40431d6c 779hit_next:
d1310b2e
CM
780 last_start = state->start;
781 last_end = state->end;
782
783 /*
784 * | ---- desired range ---- |
785 * | state |
786 *
787 * Just lock what we found and keep going
788 */
789 if (state->start == start && state->end <= end) {
40431d6c 790 struct rb_node *next_node;
1edbb734 791 if (state->state & exclusive_bits) {
d1310b2e
CM
792 *failed_start = state->start;
793 err = -EEXIST;
794 goto out;
795 }
42daec29 796
9ed74f2d
JB
797 err = set_state_bits(tree, state, bits);
798 if (err)
799 goto out;
800
2c64c53d 801 cache_state(state, cached_state);
d1310b2e 802 merge_state(tree, state);
5c939df5
YZ
803 if (last_end == (u64)-1)
804 goto out;
40431d6c 805
5c939df5 806 start = last_end + 1;
40431d6c
CM
807 if (start < end && prealloc && !need_resched()) {
808 next_node = rb_next(node);
809 if (next_node) {
810 state = rb_entry(next_node, struct extent_state,
811 rb_node);
812 if (state->start == start)
813 goto hit_next;
814 }
815 }
d1310b2e
CM
816 goto search_again;
817 }
818
819 /*
820 * | ---- desired range ---- |
821 * | state |
822 * or
823 * | ------------- state -------------- |
824 *
825 * We need to split the extent we found, and may flip bits on
826 * second half.
827 *
828 * If the extent we found extends past our
829 * range, we just split and search again. It'll get split
830 * again the next time though.
831 *
832 * If the extent we found is inside our range, we set the
833 * desired bit on it.
834 */
835 if (state->start < start) {
1edbb734 836 if (state->state & exclusive_bits) {
d1310b2e
CM
837 *failed_start = start;
838 err = -EEXIST;
839 goto out;
840 }
841 err = split_state(tree, state, prealloc, start);
842 BUG_ON(err == -EEXIST);
843 prealloc = NULL;
844 if (err)
845 goto out;
846 if (state->end <= end) {
9ed74f2d
JB
847 err = set_state_bits(tree, state, bits);
848 if (err)
849 goto out;
2c64c53d 850 cache_state(state, cached_state);
d1310b2e 851 merge_state(tree, state);
5c939df5
YZ
852 if (last_end == (u64)-1)
853 goto out;
854 start = last_end + 1;
d1310b2e
CM
855 }
856 goto search_again;
857 }
858 /*
859 * | ---- desired range ---- |
860 * | state | or | state |
861 *
862 * There's a hole, we need to insert something in it and
863 * ignore the extent we found.
864 */
865 if (state->start > start) {
866 u64 this_end;
867 if (end < last_start)
868 this_end = end;
869 else
d397712b 870 this_end = last_start - 1;
d1310b2e
CM
871 err = insert_state(tree, prealloc, start, this_end,
872 bits);
d1310b2e 873 BUG_ON(err == -EEXIST);
9ed74f2d
JB
874 if (err) {
875 prealloc = NULL;
d1310b2e 876 goto out;
9ed74f2d
JB
877 }
878 cache_state(prealloc, cached_state);
879 prealloc = NULL;
d1310b2e
CM
880 start = this_end + 1;
881 goto search_again;
882 }
883 /*
884 * | ---- desired range ---- |
885 * | state |
886 * We need to split the extent, and set the bit
887 * on the first half
888 */
889 if (state->start <= end && state->end > end) {
1edbb734 890 if (state->state & exclusive_bits) {
d1310b2e
CM
891 *failed_start = start;
892 err = -EEXIST;
893 goto out;
894 }
895 err = split_state(tree, state, prealloc, end + 1);
896 BUG_ON(err == -EEXIST);
897
9ed74f2d
JB
898 err = set_state_bits(tree, prealloc, bits);
899 if (err) {
900 prealloc = NULL;
901 goto out;
902 }
2c64c53d 903 cache_state(prealloc, cached_state);
d1310b2e
CM
904 merge_state(tree, prealloc);
905 prealloc = NULL;
906 goto out;
907 }
908
909 goto search_again;
910
911out:
cad321ad 912 spin_unlock(&tree->lock);
d1310b2e
CM
913 if (prealloc)
914 free_extent_state(prealloc);
915
916 return err;
917
918search_again:
919 if (start > end)
920 goto out;
cad321ad 921 spin_unlock(&tree->lock);
d1310b2e
CM
922 if (mask & __GFP_WAIT)
923 cond_resched();
924 goto again;
925}
d1310b2e
CM
926
927/* wrappers around set/clear extent bit */
928int set_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
929 gfp_t mask)
930{
931 return set_extent_bit(tree, start, end, EXTENT_DIRTY, 0, NULL,
2c64c53d 932 NULL, mask);
d1310b2e 933}
d1310b2e
CM
934
935int set_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
936 int bits, gfp_t mask)
937{
938 return set_extent_bit(tree, start, end, bits, 0, NULL,
2c64c53d 939 NULL, mask);
d1310b2e 940}
d1310b2e
CM
941
942int clear_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
943 int bits, gfp_t mask)
944{
2c64c53d 945 return clear_extent_bit(tree, start, end, bits, 0, 0, NULL, mask);
d1310b2e 946}
d1310b2e
CM
947
948int set_extent_delalloc(struct extent_io_tree *tree, u64 start, u64 end,
949 gfp_t mask)
950{
951 return set_extent_bit(tree, start, end,
40431d6c 952 EXTENT_DELALLOC | EXTENT_DIRTY | EXTENT_UPTODATE,
2c64c53d 953 0, NULL, NULL, mask);
d1310b2e 954}
d1310b2e
CM
955
956int clear_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
957 gfp_t mask)
958{
959 return clear_extent_bit(tree, start, end,
32c00aff
JB
960 EXTENT_DIRTY | EXTENT_DELALLOC |
961 EXTENT_DO_ACCOUNTING, 0, 0,
2c64c53d 962 NULL, mask);
d1310b2e 963}
d1310b2e
CM
964
965int set_extent_new(struct extent_io_tree *tree, u64 start, u64 end,
966 gfp_t mask)
967{
968 return set_extent_bit(tree, start, end, EXTENT_NEW, 0, NULL,
2c64c53d 969 NULL, mask);
d1310b2e 970}
d1310b2e 971
b2950863 972static int clear_extent_new(struct extent_io_tree *tree, u64 start, u64 end,
d1310b2e
CM
973 gfp_t mask)
974{
2c64c53d
CM
975 return clear_extent_bit(tree, start, end, EXTENT_NEW, 0, 0,
976 NULL, mask);
d1310b2e 977}
d1310b2e
CM
978
979int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end,
980 gfp_t mask)
981{
982 return set_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, NULL,
2c64c53d 983 NULL, mask);
d1310b2e 984}
d1310b2e 985
d397712b
CM
986static int clear_extent_uptodate(struct extent_io_tree *tree, u64 start,
987 u64 end, gfp_t mask)
d1310b2e 988{
2c64c53d
CM
989 return clear_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, 0,
990 NULL, mask);
d1310b2e 991}
d1310b2e 992
d1310b2e
CM
993int wait_on_extent_writeback(struct extent_io_tree *tree, u64 start, u64 end)
994{
995 return wait_extent_bit(tree, start, end, EXTENT_WRITEBACK);
996}
d1310b2e 997
d352ac68
CM
998/*
999 * either insert or lock state struct between start and end use mask to tell
1000 * us if waiting is desired.
1001 */
1edbb734 1002int lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
2c64c53d 1003 int bits, struct extent_state **cached_state, gfp_t mask)
d1310b2e
CM
1004{
1005 int err;
1006 u64 failed_start;
1007 while (1) {
1edbb734 1008 err = set_extent_bit(tree, start, end, EXTENT_LOCKED | bits,
2c64c53d
CM
1009 EXTENT_LOCKED, &failed_start,
1010 cached_state, mask);
d1310b2e
CM
1011 if (err == -EEXIST && (mask & __GFP_WAIT)) {
1012 wait_extent_bit(tree, failed_start, end, EXTENT_LOCKED);
1013 start = failed_start;
1014 } else {
1015 break;
1016 }
1017 WARN_ON(start > end);
1018 }
1019 return err;
1020}
d1310b2e 1021
1edbb734
CM
1022int lock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask)
1023{
2c64c53d 1024 return lock_extent_bits(tree, start, end, 0, NULL, mask);
1edbb734
CM
1025}
1026
25179201
JB
1027int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end,
1028 gfp_t mask)
1029{
1030 int err;
1031 u64 failed_start;
1032
2c64c53d
CM
1033 err = set_extent_bit(tree, start, end, EXTENT_LOCKED, EXTENT_LOCKED,
1034 &failed_start, NULL, mask);
6643558d
YZ
1035 if (err == -EEXIST) {
1036 if (failed_start > start)
1037 clear_extent_bit(tree, start, failed_start - 1,
2c64c53d 1038 EXTENT_LOCKED, 1, 0, NULL, mask);
25179201 1039 return 0;
6643558d 1040 }
25179201
JB
1041 return 1;
1042}
25179201 1043
2c64c53d
CM
1044int unlock_extent_cached(struct extent_io_tree *tree, u64 start, u64 end,
1045 struct extent_state **cached, gfp_t mask)
1046{
1047 return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, cached,
1048 mask);
1049}
1050
d1310b2e
CM
1051int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end,
1052 gfp_t mask)
1053{
2c64c53d
CM
1054 return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, NULL,
1055 mask);
d1310b2e 1056}
d1310b2e
CM
1057
1058/*
1059 * helper function to set pages and extents in the tree dirty
1060 */
1061int set_range_dirty(struct extent_io_tree *tree, u64 start, u64 end)
1062{
1063 unsigned long index = start >> PAGE_CACHE_SHIFT;
1064 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1065 struct page *page;
1066
1067 while (index <= end_index) {
1068 page = find_get_page(tree->mapping, index);
1069 BUG_ON(!page);
1070 __set_page_dirty_nobuffers(page);
1071 page_cache_release(page);
1072 index++;
1073 }
d1310b2e
CM
1074 return 0;
1075}
d1310b2e
CM
1076
1077/*
1078 * helper function to set both pages and extents in the tree writeback
1079 */
b2950863 1080static int set_range_writeback(struct extent_io_tree *tree, u64 start, u64 end)
d1310b2e
CM
1081{
1082 unsigned long index = start >> PAGE_CACHE_SHIFT;
1083 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1084 struct page *page;
1085
1086 while (index <= end_index) {
1087 page = find_get_page(tree->mapping, index);
1088 BUG_ON(!page);
1089 set_page_writeback(page);
1090 page_cache_release(page);
1091 index++;
1092 }
d1310b2e
CM
1093 return 0;
1094}
d1310b2e 1095
d352ac68
CM
1096/*
1097 * find the first offset in the io tree with 'bits' set. zero is
1098 * returned if we find something, and *start_ret and *end_ret are
1099 * set to reflect the state struct that was found.
1100 *
1101 * If nothing was found, 1 is returned, < 0 on error
1102 */
d1310b2e
CM
1103int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
1104 u64 *start_ret, u64 *end_ret, int bits)
1105{
1106 struct rb_node *node;
1107 struct extent_state *state;
1108 int ret = 1;
1109
cad321ad 1110 spin_lock(&tree->lock);
d1310b2e
CM
1111 /*
1112 * this search will find all the extents that end after
1113 * our range starts.
1114 */
80ea96b1 1115 node = tree_search(tree, start);
d397712b 1116 if (!node)
d1310b2e 1117 goto out;
d1310b2e 1118
d397712b 1119 while (1) {
d1310b2e
CM
1120 state = rb_entry(node, struct extent_state, rb_node);
1121 if (state->end >= start && (state->state & bits)) {
1122 *start_ret = state->start;
1123 *end_ret = state->end;
1124 ret = 0;
1125 break;
1126 }
1127 node = rb_next(node);
1128 if (!node)
1129 break;
1130 }
1131out:
cad321ad 1132 spin_unlock(&tree->lock);
d1310b2e
CM
1133 return ret;
1134}
d1310b2e 1135
d352ac68
CM
1136/* find the first state struct with 'bits' set after 'start', and
1137 * return it. tree->lock must be held. NULL will returned if
1138 * nothing was found after 'start'
1139 */
d7fc640e
CM
1140struct extent_state *find_first_extent_bit_state(struct extent_io_tree *tree,
1141 u64 start, int bits)
1142{
1143 struct rb_node *node;
1144 struct extent_state *state;
1145
1146 /*
1147 * this search will find all the extents that end after
1148 * our range starts.
1149 */
1150 node = tree_search(tree, start);
d397712b 1151 if (!node)
d7fc640e 1152 goto out;
d7fc640e 1153
d397712b 1154 while (1) {
d7fc640e 1155 state = rb_entry(node, struct extent_state, rb_node);
d397712b 1156 if (state->end >= start && (state->state & bits))
d7fc640e 1157 return state;
d397712b 1158
d7fc640e
CM
1159 node = rb_next(node);
1160 if (!node)
1161 break;
1162 }
1163out:
1164 return NULL;
1165}
d7fc640e 1166
d352ac68
CM
1167/*
1168 * find a contiguous range of bytes in the file marked as delalloc, not
1169 * more than 'max_bytes'. start and end are used to return the range,
1170 *
1171 * 1 is returned if we find something, 0 if nothing was in the tree
1172 */
c8b97818
CM
1173static noinline u64 find_delalloc_range(struct extent_io_tree *tree,
1174 u64 *start, u64 *end, u64 max_bytes)
d1310b2e
CM
1175{
1176 struct rb_node *node;
1177 struct extent_state *state;
1178 u64 cur_start = *start;
1179 u64 found = 0;
1180 u64 total_bytes = 0;
1181
cad321ad 1182 spin_lock(&tree->lock);
c8b97818 1183
d1310b2e
CM
1184 /*
1185 * this search will find all the extents that end after
1186 * our range starts.
1187 */
80ea96b1 1188 node = tree_search(tree, cur_start);
2b114d1d 1189 if (!node) {
3b951516
CM
1190 if (!found)
1191 *end = (u64)-1;
d1310b2e
CM
1192 goto out;
1193 }
1194
d397712b 1195 while (1) {
d1310b2e 1196 state = rb_entry(node, struct extent_state, rb_node);
5b21f2ed
ZY
1197 if (found && (state->start != cur_start ||
1198 (state->state & EXTENT_BOUNDARY))) {
d1310b2e
CM
1199 goto out;
1200 }
1201 if (!(state->state & EXTENT_DELALLOC)) {
1202 if (!found)
1203 *end = state->end;
1204 goto out;
1205 }
d1310b2e
CM
1206 if (!found)
1207 *start = state->start;
1208 found++;
1209 *end = state->end;
1210 cur_start = state->end + 1;
1211 node = rb_next(node);
1212 if (!node)
1213 break;
1214 total_bytes += state->end - state->start + 1;
1215 if (total_bytes >= max_bytes)
1216 break;
1217 }
1218out:
cad321ad 1219 spin_unlock(&tree->lock);
d1310b2e
CM
1220 return found;
1221}
1222
c8b97818
CM
1223static noinline int __unlock_for_delalloc(struct inode *inode,
1224 struct page *locked_page,
1225 u64 start, u64 end)
1226{
1227 int ret;
1228 struct page *pages[16];
1229 unsigned long index = start >> PAGE_CACHE_SHIFT;
1230 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1231 unsigned long nr_pages = end_index - index + 1;
1232 int i;
1233
1234 if (index == locked_page->index && end_index == index)
1235 return 0;
1236
d397712b 1237 while (nr_pages > 0) {
c8b97818 1238 ret = find_get_pages_contig(inode->i_mapping, index,
5b050f04
CM
1239 min_t(unsigned long, nr_pages,
1240 ARRAY_SIZE(pages)), pages);
c8b97818
CM
1241 for (i = 0; i < ret; i++) {
1242 if (pages[i] != locked_page)
1243 unlock_page(pages[i]);
1244 page_cache_release(pages[i]);
1245 }
1246 nr_pages -= ret;
1247 index += ret;
1248 cond_resched();
1249 }
1250 return 0;
1251}
1252
1253static noinline int lock_delalloc_pages(struct inode *inode,
1254 struct page *locked_page,
1255 u64 delalloc_start,
1256 u64 delalloc_end)
1257{
1258 unsigned long index = delalloc_start >> PAGE_CACHE_SHIFT;
1259 unsigned long start_index = index;
1260 unsigned long end_index = delalloc_end >> PAGE_CACHE_SHIFT;
1261 unsigned long pages_locked = 0;
1262 struct page *pages[16];
1263 unsigned long nrpages;
1264 int ret;
1265 int i;
1266
1267 /* the caller is responsible for locking the start index */
1268 if (index == locked_page->index && index == end_index)
1269 return 0;
1270
1271 /* skip the page at the start index */
1272 nrpages = end_index - index + 1;
d397712b 1273 while (nrpages > 0) {
c8b97818 1274 ret = find_get_pages_contig(inode->i_mapping, index,
5b050f04
CM
1275 min_t(unsigned long,
1276 nrpages, ARRAY_SIZE(pages)), pages);
c8b97818
CM
1277 if (ret == 0) {
1278 ret = -EAGAIN;
1279 goto done;
1280 }
1281 /* now we have an array of pages, lock them all */
1282 for (i = 0; i < ret; i++) {
1283 /*
1284 * the caller is taking responsibility for
1285 * locked_page
1286 */
771ed689 1287 if (pages[i] != locked_page) {
c8b97818 1288 lock_page(pages[i]);
f2b1c41c
CM
1289 if (!PageDirty(pages[i]) ||
1290 pages[i]->mapping != inode->i_mapping) {
771ed689
CM
1291 ret = -EAGAIN;
1292 unlock_page(pages[i]);
1293 page_cache_release(pages[i]);
1294 goto done;
1295 }
1296 }
c8b97818 1297 page_cache_release(pages[i]);
771ed689 1298 pages_locked++;
c8b97818 1299 }
c8b97818
CM
1300 nrpages -= ret;
1301 index += ret;
1302 cond_resched();
1303 }
1304 ret = 0;
1305done:
1306 if (ret && pages_locked) {
1307 __unlock_for_delalloc(inode, locked_page,
1308 delalloc_start,
1309 ((u64)(start_index + pages_locked - 1)) <<
1310 PAGE_CACHE_SHIFT);
1311 }
1312 return ret;
1313}
1314
1315/*
1316 * find a contiguous range of bytes in the file marked as delalloc, not
1317 * more than 'max_bytes'. start and end are used to return the range,
1318 *
1319 * 1 is returned if we find something, 0 if nothing was in the tree
1320 */
1321static noinline u64 find_lock_delalloc_range(struct inode *inode,
1322 struct extent_io_tree *tree,
1323 struct page *locked_page,
1324 u64 *start, u64 *end,
1325 u64 max_bytes)
1326{
1327 u64 delalloc_start;
1328 u64 delalloc_end;
1329 u64 found;
9655d298 1330 struct extent_state *cached_state = NULL;
c8b97818
CM
1331 int ret;
1332 int loops = 0;
1333
1334again:
1335 /* step one, find a bunch of delalloc bytes starting at start */
1336 delalloc_start = *start;
1337 delalloc_end = 0;
1338 found = find_delalloc_range(tree, &delalloc_start, &delalloc_end,
1339 max_bytes);
70b99e69 1340 if (!found || delalloc_end <= *start) {
c8b97818
CM
1341 *start = delalloc_start;
1342 *end = delalloc_end;
1343 return found;
1344 }
1345
70b99e69
CM
1346 /*
1347 * start comes from the offset of locked_page. We have to lock
1348 * pages in order, so we can't process delalloc bytes before
1349 * locked_page
1350 */
d397712b 1351 if (delalloc_start < *start)
70b99e69 1352 delalloc_start = *start;
70b99e69 1353
c8b97818
CM
1354 /*
1355 * make sure to limit the number of pages we try to lock down
1356 * if we're looping.
1357 */
d397712b 1358 if (delalloc_end + 1 - delalloc_start > max_bytes && loops)
771ed689 1359 delalloc_end = delalloc_start + PAGE_CACHE_SIZE - 1;
d397712b 1360
c8b97818
CM
1361 /* step two, lock all the pages after the page that has start */
1362 ret = lock_delalloc_pages(inode, locked_page,
1363 delalloc_start, delalloc_end);
1364 if (ret == -EAGAIN) {
1365 /* some of the pages are gone, lets avoid looping by
1366 * shortening the size of the delalloc range we're searching
1367 */
9655d298 1368 free_extent_state(cached_state);
c8b97818
CM
1369 if (!loops) {
1370 unsigned long offset = (*start) & (PAGE_CACHE_SIZE - 1);
1371 max_bytes = PAGE_CACHE_SIZE - offset;
1372 loops = 1;
1373 goto again;
1374 } else {
1375 found = 0;
1376 goto out_failed;
1377 }
1378 }
1379 BUG_ON(ret);
1380
1381 /* step three, lock the state bits for the whole range */
9655d298
CM
1382 lock_extent_bits(tree, delalloc_start, delalloc_end,
1383 0, &cached_state, GFP_NOFS);
c8b97818
CM
1384
1385 /* then test to make sure it is all still delalloc */
1386 ret = test_range_bit(tree, delalloc_start, delalloc_end,
9655d298 1387 EXTENT_DELALLOC, 1, cached_state);
c8b97818 1388 if (!ret) {
9655d298
CM
1389 unlock_extent_cached(tree, delalloc_start, delalloc_end,
1390 &cached_state, GFP_NOFS);
c8b97818
CM
1391 __unlock_for_delalloc(inode, locked_page,
1392 delalloc_start, delalloc_end);
1393 cond_resched();
1394 goto again;
1395 }
9655d298 1396 free_extent_state(cached_state);
c8b97818
CM
1397 *start = delalloc_start;
1398 *end = delalloc_end;
1399out_failed:
1400 return found;
1401}
1402
1403int extent_clear_unlock_delalloc(struct inode *inode,
1404 struct extent_io_tree *tree,
1405 u64 start, u64 end, struct page *locked_page,
a791e35e 1406 unsigned long op)
c8b97818
CM
1407{
1408 int ret;
1409 struct page *pages[16];
1410 unsigned long index = start >> PAGE_CACHE_SHIFT;
1411 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1412 unsigned long nr_pages = end_index - index + 1;
1413 int i;
771ed689 1414 int clear_bits = 0;
c8b97818 1415
a791e35e 1416 if (op & EXTENT_CLEAR_UNLOCK)
771ed689 1417 clear_bits |= EXTENT_LOCKED;
a791e35e 1418 if (op & EXTENT_CLEAR_DIRTY)
c8b97818
CM
1419 clear_bits |= EXTENT_DIRTY;
1420
a791e35e 1421 if (op & EXTENT_CLEAR_DELALLOC)
771ed689
CM
1422 clear_bits |= EXTENT_DELALLOC;
1423
32c00aff
JB
1424 if (op & EXTENT_CLEAR_ACCOUNTING)
1425 clear_bits |= EXTENT_DO_ACCOUNTING;
1426
2c64c53d 1427 clear_extent_bit(tree, start, end, clear_bits, 1, 0, NULL, GFP_NOFS);
32c00aff
JB
1428 if (!(op & (EXTENT_CLEAR_UNLOCK_PAGE | EXTENT_CLEAR_DIRTY |
1429 EXTENT_SET_WRITEBACK | EXTENT_END_WRITEBACK |
1430 EXTENT_SET_PRIVATE2)))
771ed689 1431 return 0;
c8b97818 1432
d397712b 1433 while (nr_pages > 0) {
c8b97818 1434 ret = find_get_pages_contig(inode->i_mapping, index,
5b050f04
CM
1435 min_t(unsigned long,
1436 nr_pages, ARRAY_SIZE(pages)), pages);
c8b97818 1437 for (i = 0; i < ret; i++) {
8b62b72b 1438
a791e35e 1439 if (op & EXTENT_SET_PRIVATE2)
8b62b72b
CM
1440 SetPagePrivate2(pages[i]);
1441
c8b97818
CM
1442 if (pages[i] == locked_page) {
1443 page_cache_release(pages[i]);
1444 continue;
1445 }
a791e35e 1446 if (op & EXTENT_CLEAR_DIRTY)
c8b97818 1447 clear_page_dirty_for_io(pages[i]);
a791e35e 1448 if (op & EXTENT_SET_WRITEBACK)
c8b97818 1449 set_page_writeback(pages[i]);
a791e35e 1450 if (op & EXTENT_END_WRITEBACK)
c8b97818 1451 end_page_writeback(pages[i]);
a791e35e 1452 if (op & EXTENT_CLEAR_UNLOCK_PAGE)
771ed689 1453 unlock_page(pages[i]);
c8b97818
CM
1454 page_cache_release(pages[i]);
1455 }
1456 nr_pages -= ret;
1457 index += ret;
1458 cond_resched();
1459 }
1460 return 0;
1461}
c8b97818 1462
d352ac68
CM
1463/*
1464 * count the number of bytes in the tree that have a given bit(s)
1465 * set. This can be fairly slow, except for EXTENT_DIRTY which is
1466 * cached. The total number found is returned.
1467 */
d1310b2e
CM
1468u64 count_range_bits(struct extent_io_tree *tree,
1469 u64 *start, u64 search_end, u64 max_bytes,
1470 unsigned long bits)
1471{
1472 struct rb_node *node;
1473 struct extent_state *state;
1474 u64 cur_start = *start;
1475 u64 total_bytes = 0;
1476 int found = 0;
1477
1478 if (search_end <= cur_start) {
d1310b2e
CM
1479 WARN_ON(1);
1480 return 0;
1481 }
1482
cad321ad 1483 spin_lock(&tree->lock);
d1310b2e
CM
1484 if (cur_start == 0 && bits == EXTENT_DIRTY) {
1485 total_bytes = tree->dirty_bytes;
1486 goto out;
1487 }
1488 /*
1489 * this search will find all the extents that end after
1490 * our range starts.
1491 */
80ea96b1 1492 node = tree_search(tree, cur_start);
d397712b 1493 if (!node)
d1310b2e 1494 goto out;
d1310b2e 1495
d397712b 1496 while (1) {
d1310b2e
CM
1497 state = rb_entry(node, struct extent_state, rb_node);
1498 if (state->start > search_end)
1499 break;
1500 if (state->end >= cur_start && (state->state & bits)) {
1501 total_bytes += min(search_end, state->end) + 1 -
1502 max(cur_start, state->start);
1503 if (total_bytes >= max_bytes)
1504 break;
1505 if (!found) {
1506 *start = state->start;
1507 found = 1;
1508 }
1509 }
1510 node = rb_next(node);
1511 if (!node)
1512 break;
1513 }
1514out:
cad321ad 1515 spin_unlock(&tree->lock);
d1310b2e
CM
1516 return total_bytes;
1517}
b2950863 1518
d352ac68
CM
1519/*
1520 * set the private field for a given byte offset in the tree. If there isn't
1521 * an extent_state there already, this does nothing.
1522 */
d1310b2e
CM
1523int set_state_private(struct extent_io_tree *tree, u64 start, u64 private)
1524{
1525 struct rb_node *node;
1526 struct extent_state *state;
1527 int ret = 0;
1528
cad321ad 1529 spin_lock(&tree->lock);
d1310b2e
CM
1530 /*
1531 * this search will find all the extents that end after
1532 * our range starts.
1533 */
80ea96b1 1534 node = tree_search(tree, start);
2b114d1d 1535 if (!node) {
d1310b2e
CM
1536 ret = -ENOENT;
1537 goto out;
1538 }
1539 state = rb_entry(node, struct extent_state, rb_node);
1540 if (state->start != start) {
1541 ret = -ENOENT;
1542 goto out;
1543 }
1544 state->private = private;
1545out:
cad321ad 1546 spin_unlock(&tree->lock);
d1310b2e
CM
1547 return ret;
1548}
1549
1550int get_state_private(struct extent_io_tree *tree, u64 start, u64 *private)
1551{
1552 struct rb_node *node;
1553 struct extent_state *state;
1554 int ret = 0;
1555
cad321ad 1556 spin_lock(&tree->lock);
d1310b2e
CM
1557 /*
1558 * this search will find all the extents that end after
1559 * our range starts.
1560 */
80ea96b1 1561 node = tree_search(tree, start);
2b114d1d 1562 if (!node) {
d1310b2e
CM
1563 ret = -ENOENT;
1564 goto out;
1565 }
1566 state = rb_entry(node, struct extent_state, rb_node);
1567 if (state->start != start) {
1568 ret = -ENOENT;
1569 goto out;
1570 }
1571 *private = state->private;
1572out:
cad321ad 1573 spin_unlock(&tree->lock);
d1310b2e
CM
1574 return ret;
1575}
1576
1577/*
1578 * searches a range in the state tree for a given mask.
70dec807 1579 * If 'filled' == 1, this returns 1 only if every extent in the tree
d1310b2e
CM
1580 * has the bits set. Otherwise, 1 is returned if any bit in the
1581 * range is found set.
1582 */
1583int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
9655d298 1584 int bits, int filled, struct extent_state *cached)
d1310b2e
CM
1585{
1586 struct extent_state *state = NULL;
1587 struct rb_node *node;
1588 int bitset = 0;
d1310b2e 1589
cad321ad 1590 spin_lock(&tree->lock);
9655d298
CM
1591 if (cached && cached->tree && cached->start == start)
1592 node = &cached->rb_node;
1593 else
1594 node = tree_search(tree, start);
d1310b2e
CM
1595 while (node && start <= end) {
1596 state = rb_entry(node, struct extent_state, rb_node);
1597
1598 if (filled && state->start > start) {
1599 bitset = 0;
1600 break;
1601 }
1602
1603 if (state->start > end)
1604 break;
1605
1606 if (state->state & bits) {
1607 bitset = 1;
1608 if (!filled)
1609 break;
1610 } else if (filled) {
1611 bitset = 0;
1612 break;
1613 }
46562cec
CM
1614
1615 if (state->end == (u64)-1)
1616 break;
1617
d1310b2e
CM
1618 start = state->end + 1;
1619 if (start > end)
1620 break;
1621 node = rb_next(node);
1622 if (!node) {
1623 if (filled)
1624 bitset = 0;
1625 break;
1626 }
1627 }
cad321ad 1628 spin_unlock(&tree->lock);
d1310b2e
CM
1629 return bitset;
1630}
d1310b2e
CM
1631
1632/*
1633 * helper function to set a given page up to date if all the
1634 * extents in the tree for that page are up to date
1635 */
1636static int check_page_uptodate(struct extent_io_tree *tree,
1637 struct page *page)
1638{
1639 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1640 u64 end = start + PAGE_CACHE_SIZE - 1;
9655d298 1641 if (test_range_bit(tree, start, end, EXTENT_UPTODATE, 1, NULL))
d1310b2e
CM
1642 SetPageUptodate(page);
1643 return 0;
1644}
1645
1646/*
1647 * helper function to unlock a page if all the extents in the tree
1648 * for that page are unlocked
1649 */
1650static int check_page_locked(struct extent_io_tree *tree,
1651 struct page *page)
1652{
1653 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1654 u64 end = start + PAGE_CACHE_SIZE - 1;
9655d298 1655 if (!test_range_bit(tree, start, end, EXTENT_LOCKED, 0, NULL))
d1310b2e
CM
1656 unlock_page(page);
1657 return 0;
1658}
1659
1660/*
1661 * helper function to end page writeback if all the extents
1662 * in the tree for that page are done with writeback
1663 */
1664static int check_page_writeback(struct extent_io_tree *tree,
1665 struct page *page)
1666{
1edbb734 1667 end_page_writeback(page);
d1310b2e
CM
1668 return 0;
1669}
1670
1671/* lots and lots of room for performance fixes in the end_bio funcs */
1672
1673/*
1674 * after a writepage IO is done, we need to:
1675 * clear the uptodate bits on error
1676 * clear the writeback bits in the extent tree for this IO
1677 * end_page_writeback if the page has no more pending IO
1678 *
1679 * Scheduling is not allowed, so the extent state tree is expected
1680 * to have one and only one object corresponding to this IO.
1681 */
d1310b2e 1682static void end_bio_extent_writepage(struct bio *bio, int err)
d1310b2e 1683{
1259ab75 1684 int uptodate = err == 0;
d1310b2e 1685 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
902b22f3 1686 struct extent_io_tree *tree;
d1310b2e
CM
1687 u64 start;
1688 u64 end;
1689 int whole_page;
1259ab75 1690 int ret;
d1310b2e 1691
d1310b2e
CM
1692 do {
1693 struct page *page = bvec->bv_page;
902b22f3
DW
1694 tree = &BTRFS_I(page->mapping->host)->io_tree;
1695
d1310b2e
CM
1696 start = ((u64)page->index << PAGE_CACHE_SHIFT) +
1697 bvec->bv_offset;
1698 end = start + bvec->bv_len - 1;
1699
1700 if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE)
1701 whole_page = 1;
1702 else
1703 whole_page = 0;
1704
1705 if (--bvec >= bio->bi_io_vec)
1706 prefetchw(&bvec->bv_page->flags);
1259ab75
CM
1707 if (tree->ops && tree->ops->writepage_end_io_hook) {
1708 ret = tree->ops->writepage_end_io_hook(page, start,
902b22f3 1709 end, NULL, uptodate);
1259ab75
CM
1710 if (ret)
1711 uptodate = 0;
1712 }
1713
1714 if (!uptodate && tree->ops &&
1715 tree->ops->writepage_io_failed_hook) {
1716 ret = tree->ops->writepage_io_failed_hook(bio, page,
902b22f3 1717 start, end, NULL);
1259ab75 1718 if (ret == 0) {
1259ab75
CM
1719 uptodate = (err == 0);
1720 continue;
1721 }
1722 }
1723
d1310b2e 1724 if (!uptodate) {
1edbb734 1725 clear_extent_uptodate(tree, start, end, GFP_NOFS);
d1310b2e
CM
1726 ClearPageUptodate(page);
1727 SetPageError(page);
1728 }
70dec807 1729
d1310b2e
CM
1730 if (whole_page)
1731 end_page_writeback(page);
1732 else
1733 check_page_writeback(tree, page);
d1310b2e 1734 } while (bvec >= bio->bi_io_vec);
2b1f55b0 1735
d1310b2e 1736 bio_put(bio);
d1310b2e
CM
1737}
1738
1739/*
1740 * after a readpage IO is done, we need to:
1741 * clear the uptodate bits on error
1742 * set the uptodate bits if things worked
1743 * set the page up to date if all extents in the tree are uptodate
1744 * clear the lock bit in the extent tree
1745 * unlock the page if there are no other extents locked for it
1746 *
1747 * Scheduling is not allowed, so the extent state tree is expected
1748 * to have one and only one object corresponding to this IO.
1749 */
d1310b2e 1750static void end_bio_extent_readpage(struct bio *bio, int err)
d1310b2e
CM
1751{
1752 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
4125bf76
CM
1753 struct bio_vec *bvec_end = bio->bi_io_vec + bio->bi_vcnt - 1;
1754 struct bio_vec *bvec = bio->bi_io_vec;
902b22f3 1755 struct extent_io_tree *tree;
d1310b2e
CM
1756 u64 start;
1757 u64 end;
1758 int whole_page;
1759 int ret;
1760
d20f7043
CM
1761 if (err)
1762 uptodate = 0;
1763
d1310b2e
CM
1764 do {
1765 struct page *page = bvec->bv_page;
902b22f3
DW
1766 tree = &BTRFS_I(page->mapping->host)->io_tree;
1767
d1310b2e
CM
1768 start = ((u64)page->index << PAGE_CACHE_SHIFT) +
1769 bvec->bv_offset;
1770 end = start + bvec->bv_len - 1;
1771
1772 if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE)
1773 whole_page = 1;
1774 else
1775 whole_page = 0;
1776
4125bf76 1777 if (++bvec <= bvec_end)
d1310b2e
CM
1778 prefetchw(&bvec->bv_page->flags);
1779
1780 if (uptodate && tree->ops && tree->ops->readpage_end_io_hook) {
70dec807 1781 ret = tree->ops->readpage_end_io_hook(page, start, end,
902b22f3 1782 NULL);
d1310b2e
CM
1783 if (ret)
1784 uptodate = 0;
1785 }
7e38326f
CM
1786 if (!uptodate && tree->ops &&
1787 tree->ops->readpage_io_failed_hook) {
1788 ret = tree->ops->readpage_io_failed_hook(bio, page,
902b22f3 1789 start, end, NULL);
7e38326f 1790 if (ret == 0) {
3b951516
CM
1791 uptodate =
1792 test_bit(BIO_UPTODATE, &bio->bi_flags);
d20f7043
CM
1793 if (err)
1794 uptodate = 0;
7e38326f
CM
1795 continue;
1796 }
1797 }
d1310b2e 1798
771ed689 1799 if (uptodate) {
902b22f3
DW
1800 set_extent_uptodate(tree, start, end,
1801 GFP_ATOMIC);
771ed689 1802 }
902b22f3 1803 unlock_extent(tree, start, end, GFP_ATOMIC);
d1310b2e 1804
70dec807
CM
1805 if (whole_page) {
1806 if (uptodate) {
1807 SetPageUptodate(page);
1808 } else {
1809 ClearPageUptodate(page);
1810 SetPageError(page);
1811 }
d1310b2e 1812 unlock_page(page);
70dec807
CM
1813 } else {
1814 if (uptodate) {
1815 check_page_uptodate(tree, page);
1816 } else {
1817 ClearPageUptodate(page);
1818 SetPageError(page);
1819 }
d1310b2e 1820 check_page_locked(tree, page);
70dec807 1821 }
4125bf76 1822 } while (bvec <= bvec_end);
d1310b2e
CM
1823
1824 bio_put(bio);
d1310b2e
CM
1825}
1826
1827/*
1828 * IO done from prepare_write is pretty simple, we just unlock
1829 * the structs in the extent tree when done, and set the uptodate bits
1830 * as appropriate.
1831 */
d1310b2e 1832static void end_bio_extent_preparewrite(struct bio *bio, int err)
d1310b2e
CM
1833{
1834 const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
1835 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
902b22f3 1836 struct extent_io_tree *tree;
d1310b2e
CM
1837 u64 start;
1838 u64 end;
1839
d1310b2e
CM
1840 do {
1841 struct page *page = bvec->bv_page;
902b22f3
DW
1842 tree = &BTRFS_I(page->mapping->host)->io_tree;
1843
d1310b2e
CM
1844 start = ((u64)page->index << PAGE_CACHE_SHIFT) +
1845 bvec->bv_offset;
1846 end = start + bvec->bv_len - 1;
1847
1848 if (--bvec >= bio->bi_io_vec)
1849 prefetchw(&bvec->bv_page->flags);
1850
1851 if (uptodate) {
1852 set_extent_uptodate(tree, start, end, GFP_ATOMIC);
1853 } else {
1854 ClearPageUptodate(page);
1855 SetPageError(page);
1856 }
1857
1858 unlock_extent(tree, start, end, GFP_ATOMIC);
1859
1860 } while (bvec >= bio->bi_io_vec);
1861
1862 bio_put(bio);
d1310b2e
CM
1863}
1864
1865static struct bio *
1866extent_bio_alloc(struct block_device *bdev, u64 first_sector, int nr_vecs,
1867 gfp_t gfp_flags)
1868{
1869 struct bio *bio;
1870
1871 bio = bio_alloc(gfp_flags, nr_vecs);
1872
1873 if (bio == NULL && (current->flags & PF_MEMALLOC)) {
1874 while (!bio && (nr_vecs /= 2))
1875 bio = bio_alloc(gfp_flags, nr_vecs);
1876 }
1877
1878 if (bio) {
e1c4b745 1879 bio->bi_size = 0;
d1310b2e
CM
1880 bio->bi_bdev = bdev;
1881 bio->bi_sector = first_sector;
1882 }
1883 return bio;
1884}
1885
c8b97818
CM
1886static int submit_one_bio(int rw, struct bio *bio, int mirror_num,
1887 unsigned long bio_flags)
d1310b2e 1888{
d1310b2e 1889 int ret = 0;
70dec807
CM
1890 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1891 struct page *page = bvec->bv_page;
1892 struct extent_io_tree *tree = bio->bi_private;
70dec807
CM
1893 u64 start;
1894 u64 end;
1895
1896 start = ((u64)page->index << PAGE_CACHE_SHIFT) + bvec->bv_offset;
1897 end = start + bvec->bv_len - 1;
1898
902b22f3 1899 bio->bi_private = NULL;
d1310b2e
CM
1900
1901 bio_get(bio);
1902
065631f6 1903 if (tree->ops && tree->ops->submit_bio_hook)
f188591e 1904 tree->ops->submit_bio_hook(page->mapping->host, rw, bio,
c8b97818 1905 mirror_num, bio_flags);
0b86a832
CM
1906 else
1907 submit_bio(rw, bio);
d1310b2e
CM
1908 if (bio_flagged(bio, BIO_EOPNOTSUPP))
1909 ret = -EOPNOTSUPP;
1910 bio_put(bio);
1911 return ret;
1912}
1913
1914static int submit_extent_page(int rw, struct extent_io_tree *tree,
1915 struct page *page, sector_t sector,
1916 size_t size, unsigned long offset,
1917 struct block_device *bdev,
1918 struct bio **bio_ret,
1919 unsigned long max_pages,
f188591e 1920 bio_end_io_t end_io_func,
c8b97818
CM
1921 int mirror_num,
1922 unsigned long prev_bio_flags,
1923 unsigned long bio_flags)
d1310b2e
CM
1924{
1925 int ret = 0;
1926 struct bio *bio;
1927 int nr;
c8b97818
CM
1928 int contig = 0;
1929 int this_compressed = bio_flags & EXTENT_BIO_COMPRESSED;
1930 int old_compressed = prev_bio_flags & EXTENT_BIO_COMPRESSED;
5b050f04 1931 size_t page_size = min_t(size_t, size, PAGE_CACHE_SIZE);
d1310b2e
CM
1932
1933 if (bio_ret && *bio_ret) {
1934 bio = *bio_ret;
c8b97818
CM
1935 if (old_compressed)
1936 contig = bio->bi_sector == sector;
1937 else
1938 contig = bio->bi_sector + (bio->bi_size >> 9) ==
1939 sector;
1940
1941 if (prev_bio_flags != bio_flags || !contig ||
239b14b3 1942 (tree->ops && tree->ops->merge_bio_hook &&
c8b97818
CM
1943 tree->ops->merge_bio_hook(page, offset, page_size, bio,
1944 bio_flags)) ||
1945 bio_add_page(bio, page, page_size, offset) < page_size) {
1946 ret = submit_one_bio(rw, bio, mirror_num,
1947 prev_bio_flags);
d1310b2e
CM
1948 bio = NULL;
1949 } else {
1950 return 0;
1951 }
1952 }
c8b97818
CM
1953 if (this_compressed)
1954 nr = BIO_MAX_PAGES;
1955 else
1956 nr = bio_get_nr_vecs(bdev);
1957
d1310b2e 1958 bio = extent_bio_alloc(bdev, sector, nr, GFP_NOFS | __GFP_HIGH);
70dec807 1959
c8b97818 1960 bio_add_page(bio, page, page_size, offset);
d1310b2e
CM
1961 bio->bi_end_io = end_io_func;
1962 bio->bi_private = tree;
70dec807 1963
d397712b 1964 if (bio_ret)
d1310b2e 1965 *bio_ret = bio;
d397712b 1966 else
c8b97818 1967 ret = submit_one_bio(rw, bio, mirror_num, bio_flags);
d1310b2e
CM
1968
1969 return ret;
1970}
1971
1972void set_page_extent_mapped(struct page *page)
1973{
1974 if (!PagePrivate(page)) {
1975 SetPagePrivate(page);
d1310b2e 1976 page_cache_get(page);
6af118ce 1977 set_page_private(page, EXTENT_PAGE_PRIVATE);
d1310b2e
CM
1978 }
1979}
1980
b2950863 1981static void set_page_extent_head(struct page *page, unsigned long len)
d1310b2e
CM
1982{
1983 set_page_private(page, EXTENT_PAGE_PRIVATE_FIRST_PAGE | len << 2);
1984}
1985
1986/*
1987 * basic readpage implementation. Locked extent state structs are inserted
1988 * into the tree that are removed when the IO is done (by the end_io
1989 * handlers)
1990 */
1991static int __extent_read_full_page(struct extent_io_tree *tree,
1992 struct page *page,
1993 get_extent_t *get_extent,
c8b97818
CM
1994 struct bio **bio, int mirror_num,
1995 unsigned long *bio_flags)
d1310b2e
CM
1996{
1997 struct inode *inode = page->mapping->host;
1998 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1999 u64 page_end = start + PAGE_CACHE_SIZE - 1;
2000 u64 end;
2001 u64 cur = start;
2002 u64 extent_offset;
2003 u64 last_byte = i_size_read(inode);
2004 u64 block_start;
2005 u64 cur_end;
2006 sector_t sector;
2007 struct extent_map *em;
2008 struct block_device *bdev;
2009 int ret;
2010 int nr = 0;
2011 size_t page_offset = 0;
2012 size_t iosize;
c8b97818 2013 size_t disk_io_size;
d1310b2e 2014 size_t blocksize = inode->i_sb->s_blocksize;
c8b97818 2015 unsigned long this_bio_flag = 0;
d1310b2e
CM
2016
2017 set_page_extent_mapped(page);
2018
2019 end = page_end;
2020 lock_extent(tree, start, end, GFP_NOFS);
2021
c8b97818
CM
2022 if (page->index == last_byte >> PAGE_CACHE_SHIFT) {
2023 char *userpage;
2024 size_t zero_offset = last_byte & (PAGE_CACHE_SIZE - 1);
2025
2026 if (zero_offset) {
2027 iosize = PAGE_CACHE_SIZE - zero_offset;
2028 userpage = kmap_atomic(page, KM_USER0);
2029 memset(userpage + zero_offset, 0, iosize);
2030 flush_dcache_page(page);
2031 kunmap_atomic(userpage, KM_USER0);
2032 }
2033 }
d1310b2e
CM
2034 while (cur <= end) {
2035 if (cur >= last_byte) {
2036 char *userpage;
2037 iosize = PAGE_CACHE_SIZE - page_offset;
2038 userpage = kmap_atomic(page, KM_USER0);
2039 memset(userpage + page_offset, 0, iosize);
2040 flush_dcache_page(page);
2041 kunmap_atomic(userpage, KM_USER0);
2042 set_extent_uptodate(tree, cur, cur + iosize - 1,
2043 GFP_NOFS);
2044 unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
2045 break;
2046 }
2047 em = get_extent(inode, page, page_offset, cur,
2048 end - cur + 1, 0);
2049 if (IS_ERR(em) || !em) {
2050 SetPageError(page);
2051 unlock_extent(tree, cur, end, GFP_NOFS);
2052 break;
2053 }
d1310b2e
CM
2054 extent_offset = cur - em->start;
2055 BUG_ON(extent_map_end(em) <= cur);
2056 BUG_ON(end < cur);
2057
c8b97818
CM
2058 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags))
2059 this_bio_flag = EXTENT_BIO_COMPRESSED;
2060
d1310b2e
CM
2061 iosize = min(extent_map_end(em) - cur, end - cur + 1);
2062 cur_end = min(extent_map_end(em) - 1, end);
2063 iosize = (iosize + blocksize - 1) & ~((u64)blocksize - 1);
c8b97818
CM
2064 if (this_bio_flag & EXTENT_BIO_COMPRESSED) {
2065 disk_io_size = em->block_len;
2066 sector = em->block_start >> 9;
2067 } else {
2068 sector = (em->block_start + extent_offset) >> 9;
2069 disk_io_size = iosize;
2070 }
d1310b2e
CM
2071 bdev = em->bdev;
2072 block_start = em->block_start;
d899e052
YZ
2073 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
2074 block_start = EXTENT_MAP_HOLE;
d1310b2e
CM
2075 free_extent_map(em);
2076 em = NULL;
2077
2078 /* we've found a hole, just zero and go on */
2079 if (block_start == EXTENT_MAP_HOLE) {
2080 char *userpage;
2081 userpage = kmap_atomic(page, KM_USER0);
2082 memset(userpage + page_offset, 0, iosize);
2083 flush_dcache_page(page);
2084 kunmap_atomic(userpage, KM_USER0);
2085
2086 set_extent_uptodate(tree, cur, cur + iosize - 1,
2087 GFP_NOFS);
2088 unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
2089 cur = cur + iosize;
2090 page_offset += iosize;
2091 continue;
2092 }
2093 /* the get_extent function already copied into the page */
9655d298
CM
2094 if (test_range_bit(tree, cur, cur_end,
2095 EXTENT_UPTODATE, 1, NULL)) {
a1b32a59 2096 check_page_uptodate(tree, page);
d1310b2e
CM
2097 unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
2098 cur = cur + iosize;
2099 page_offset += iosize;
2100 continue;
2101 }
70dec807
CM
2102 /* we have an inline extent but it didn't get marked up
2103 * to date. Error out
2104 */
2105 if (block_start == EXTENT_MAP_INLINE) {
2106 SetPageError(page);
2107 unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
2108 cur = cur + iosize;
2109 page_offset += iosize;
2110 continue;
2111 }
d1310b2e
CM
2112
2113 ret = 0;
2114 if (tree->ops && tree->ops->readpage_io_hook) {
2115 ret = tree->ops->readpage_io_hook(page, cur,
2116 cur + iosize - 1);
2117 }
2118 if (!ret) {
89642229
CM
2119 unsigned long pnr = (last_byte >> PAGE_CACHE_SHIFT) + 1;
2120 pnr -= page->index;
d1310b2e 2121 ret = submit_extent_page(READ, tree, page,
c8b97818 2122 sector, disk_io_size, page_offset,
89642229 2123 bdev, bio, pnr,
c8b97818
CM
2124 end_bio_extent_readpage, mirror_num,
2125 *bio_flags,
2126 this_bio_flag);
89642229 2127 nr++;
c8b97818 2128 *bio_flags = this_bio_flag;
d1310b2e
CM
2129 }
2130 if (ret)
2131 SetPageError(page);
2132 cur = cur + iosize;
2133 page_offset += iosize;
d1310b2e
CM
2134 }
2135 if (!nr) {
2136 if (!PageError(page))
2137 SetPageUptodate(page);
2138 unlock_page(page);
2139 }
2140 return 0;
2141}
2142
2143int extent_read_full_page(struct extent_io_tree *tree, struct page *page,
2144 get_extent_t *get_extent)
2145{
2146 struct bio *bio = NULL;
c8b97818 2147 unsigned long bio_flags = 0;
d1310b2e
CM
2148 int ret;
2149
c8b97818
CM
2150 ret = __extent_read_full_page(tree, page, get_extent, &bio, 0,
2151 &bio_flags);
d1310b2e 2152 if (bio)
c8b97818 2153 submit_one_bio(READ, bio, 0, bio_flags);
d1310b2e
CM
2154 return ret;
2155}
d1310b2e 2156
11c8349b
CM
2157static noinline void update_nr_written(struct page *page,
2158 struct writeback_control *wbc,
2159 unsigned long nr_written)
2160{
2161 wbc->nr_to_write -= nr_written;
2162 if (wbc->range_cyclic || (wbc->nr_to_write > 0 &&
2163 wbc->range_start == 0 && wbc->range_end == LLONG_MAX))
2164 page->mapping->writeback_index = page->index + nr_written;
2165}
2166
d1310b2e
CM
2167/*
2168 * the writepage semantics are similar to regular writepage. extent
2169 * records are inserted to lock ranges in the tree, and as dirty areas
2170 * are found, they are marked writeback. Then the lock bits are removed
2171 * and the end_io handler clears the writeback ranges
2172 */
2173static int __extent_writepage(struct page *page, struct writeback_control *wbc,
2174 void *data)
2175{
2176 struct inode *inode = page->mapping->host;
2177 struct extent_page_data *epd = data;
2178 struct extent_io_tree *tree = epd->tree;
2179 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
2180 u64 delalloc_start;
2181 u64 page_end = start + PAGE_CACHE_SIZE - 1;
2182 u64 end;
2183 u64 cur = start;
2184 u64 extent_offset;
2185 u64 last_byte = i_size_read(inode);
2186 u64 block_start;
2187 u64 iosize;
e6dcd2dc 2188 u64 unlock_start;
d1310b2e 2189 sector_t sector;
2c64c53d 2190 struct extent_state *cached_state = NULL;
d1310b2e
CM
2191 struct extent_map *em;
2192 struct block_device *bdev;
2193 int ret;
2194 int nr = 0;
7f3c74fb 2195 size_t pg_offset = 0;
d1310b2e
CM
2196 size_t blocksize;
2197 loff_t i_size = i_size_read(inode);
2198 unsigned long end_index = i_size >> PAGE_CACHE_SHIFT;
2199 u64 nr_delalloc;
2200 u64 delalloc_end;
c8b97818
CM
2201 int page_started;
2202 int compressed;
ffbd517d 2203 int write_flags;
771ed689 2204 unsigned long nr_written = 0;
d1310b2e 2205
ffbd517d
CM
2206 if (wbc->sync_mode == WB_SYNC_ALL)
2207 write_flags = WRITE_SYNC_PLUG;
2208 else
2209 write_flags = WRITE;
2210
d1310b2e 2211 WARN_ON(!PageLocked(page));
7f3c74fb 2212 pg_offset = i_size & (PAGE_CACHE_SIZE - 1);
211c17f5 2213 if (page->index > end_index ||
7f3c74fb 2214 (page->index == end_index && !pg_offset)) {
39be25cd 2215 page->mapping->a_ops->invalidatepage(page, 0);
d1310b2e
CM
2216 unlock_page(page);
2217 return 0;
2218 }
2219
2220 if (page->index == end_index) {
2221 char *userpage;
2222
d1310b2e 2223 userpage = kmap_atomic(page, KM_USER0);
7f3c74fb
CM
2224 memset(userpage + pg_offset, 0,
2225 PAGE_CACHE_SIZE - pg_offset);
d1310b2e 2226 kunmap_atomic(userpage, KM_USER0);
211c17f5 2227 flush_dcache_page(page);
d1310b2e 2228 }
7f3c74fb 2229 pg_offset = 0;
d1310b2e
CM
2230
2231 set_page_extent_mapped(page);
2232
2233 delalloc_start = start;
2234 delalloc_end = 0;
c8b97818 2235 page_started = 0;
771ed689 2236 if (!epd->extent_locked) {
f85d7d6c 2237 u64 delalloc_to_write = 0;
11c8349b
CM
2238 /*
2239 * make sure the wbc mapping index is at least updated
2240 * to this page.
2241 */
2242 update_nr_written(page, wbc, 0);
2243
d397712b 2244 while (delalloc_end < page_end) {
771ed689 2245 nr_delalloc = find_lock_delalloc_range(inode, tree,
c8b97818
CM
2246 page,
2247 &delalloc_start,
d1310b2e
CM
2248 &delalloc_end,
2249 128 * 1024 * 1024);
771ed689
CM
2250 if (nr_delalloc == 0) {
2251 delalloc_start = delalloc_end + 1;
2252 continue;
2253 }
2254 tree->ops->fill_delalloc(inode, page, delalloc_start,
2255 delalloc_end, &page_started,
2256 &nr_written);
f85d7d6c
CM
2257 /*
2258 * delalloc_end is already one less than the total
2259 * length, so we don't subtract one from
2260 * PAGE_CACHE_SIZE
2261 */
2262 delalloc_to_write += (delalloc_end - delalloc_start +
2263 PAGE_CACHE_SIZE) >>
2264 PAGE_CACHE_SHIFT;
d1310b2e 2265 delalloc_start = delalloc_end + 1;
d1310b2e 2266 }
f85d7d6c
CM
2267 if (wbc->nr_to_write < delalloc_to_write) {
2268 int thresh = 8192;
2269
2270 if (delalloc_to_write < thresh * 2)
2271 thresh = delalloc_to_write;
2272 wbc->nr_to_write = min_t(u64, delalloc_to_write,
2273 thresh);
2274 }
c8b97818 2275
771ed689
CM
2276 /* did the fill delalloc function already unlock and start
2277 * the IO?
2278 */
2279 if (page_started) {
2280 ret = 0;
11c8349b
CM
2281 /*
2282 * we've unlocked the page, so we can't update
2283 * the mapping's writeback index, just update
2284 * nr_to_write.
2285 */
2286 wbc->nr_to_write -= nr_written;
2287 goto done_unlocked;
771ed689 2288 }
c8b97818 2289 }
247e743c 2290 if (tree->ops && tree->ops->writepage_start_hook) {
c8b97818
CM
2291 ret = tree->ops->writepage_start_hook(page, start,
2292 page_end);
247e743c 2293 if (ret == -EAGAIN) {
247e743c 2294 redirty_page_for_writepage(wbc, page);
11c8349b 2295 update_nr_written(page, wbc, nr_written);
247e743c 2296 unlock_page(page);
771ed689 2297 ret = 0;
11c8349b 2298 goto done_unlocked;
247e743c
CM
2299 }
2300 }
2301
11c8349b
CM
2302 /*
2303 * we don't want to touch the inode after unlocking the page,
2304 * so we update the mapping writeback index now
2305 */
2306 update_nr_written(page, wbc, nr_written + 1);
771ed689 2307
d1310b2e 2308 end = page_end;
d1310b2e 2309 if (last_byte <= start) {
e6dcd2dc
CM
2310 if (tree->ops && tree->ops->writepage_end_io_hook)
2311 tree->ops->writepage_end_io_hook(page, start,
2312 page_end, NULL, 1);
2313 unlock_start = page_end + 1;
d1310b2e
CM
2314 goto done;
2315 }
2316
d1310b2e
CM
2317 blocksize = inode->i_sb->s_blocksize;
2318
2319 while (cur <= end) {
2320 if (cur >= last_byte) {
e6dcd2dc
CM
2321 if (tree->ops && tree->ops->writepage_end_io_hook)
2322 tree->ops->writepage_end_io_hook(page, cur,
2323 page_end, NULL, 1);
2324 unlock_start = page_end + 1;
d1310b2e
CM
2325 break;
2326 }
7f3c74fb 2327 em = epd->get_extent(inode, page, pg_offset, cur,
d1310b2e
CM
2328 end - cur + 1, 1);
2329 if (IS_ERR(em) || !em) {
2330 SetPageError(page);
2331 break;
2332 }
2333
2334 extent_offset = cur - em->start;
2335 BUG_ON(extent_map_end(em) <= cur);
2336 BUG_ON(end < cur);
2337 iosize = min(extent_map_end(em) - cur, end - cur + 1);
2338 iosize = (iosize + blocksize - 1) & ~((u64)blocksize - 1);
2339 sector = (em->block_start + extent_offset) >> 9;
2340 bdev = em->bdev;
2341 block_start = em->block_start;
c8b97818 2342 compressed = test_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
d1310b2e
CM
2343 free_extent_map(em);
2344 em = NULL;
2345
c8b97818
CM
2346 /*
2347 * compressed and inline extents are written through other
2348 * paths in the FS
2349 */
2350 if (compressed || block_start == EXTENT_MAP_HOLE ||
d1310b2e 2351 block_start == EXTENT_MAP_INLINE) {
c8b97818
CM
2352 /*
2353 * end_io notification does not happen here for
2354 * compressed extents
2355 */
2356 if (!compressed && tree->ops &&
2357 tree->ops->writepage_end_io_hook)
e6dcd2dc
CM
2358 tree->ops->writepage_end_io_hook(page, cur,
2359 cur + iosize - 1,
2360 NULL, 1);
c8b97818
CM
2361 else if (compressed) {
2362 /* we don't want to end_page_writeback on
2363 * a compressed extent. this happens
2364 * elsewhere
2365 */
2366 nr++;
2367 }
2368
2369 cur += iosize;
7f3c74fb 2370 pg_offset += iosize;
e6dcd2dc 2371 unlock_start = cur;
d1310b2e
CM
2372 continue;
2373 }
d1310b2e
CM
2374 /* leave this out until we have a page_mkwrite call */
2375 if (0 && !test_range_bit(tree, cur, cur + iosize - 1,
9655d298 2376 EXTENT_DIRTY, 0, NULL)) {
d1310b2e 2377 cur = cur + iosize;
7f3c74fb 2378 pg_offset += iosize;
d1310b2e
CM
2379 continue;
2380 }
c8b97818 2381
d1310b2e
CM
2382 if (tree->ops && tree->ops->writepage_io_hook) {
2383 ret = tree->ops->writepage_io_hook(page, cur,
2384 cur + iosize - 1);
2385 } else {
2386 ret = 0;
2387 }
1259ab75 2388 if (ret) {
d1310b2e 2389 SetPageError(page);
1259ab75 2390 } else {
d1310b2e 2391 unsigned long max_nr = end_index + 1;
7f3c74fb 2392
d1310b2e
CM
2393 set_range_writeback(tree, cur, cur + iosize - 1);
2394 if (!PageWriteback(page)) {
d397712b
CM
2395 printk(KERN_ERR "btrfs warning page %lu not "
2396 "writeback, cur %llu end %llu\n",
2397 page->index, (unsigned long long)cur,
d1310b2e
CM
2398 (unsigned long long)end);
2399 }
2400
ffbd517d
CM
2401 ret = submit_extent_page(write_flags, tree, page,
2402 sector, iosize, pg_offset,
2403 bdev, &epd->bio, max_nr,
c8b97818
CM
2404 end_bio_extent_writepage,
2405 0, 0, 0);
d1310b2e
CM
2406 if (ret)
2407 SetPageError(page);
2408 }
2409 cur = cur + iosize;
7f3c74fb 2410 pg_offset += iosize;
d1310b2e
CM
2411 nr++;
2412 }
2413done:
2414 if (nr == 0) {
2415 /* make sure the mapping tag for page dirty gets cleared */
2416 set_page_writeback(page);
2417 end_page_writeback(page);
2418 }
d1310b2e 2419 unlock_page(page);
771ed689 2420
11c8349b
CM
2421done_unlocked:
2422
2c64c53d
CM
2423 /* drop our reference on any cached states */
2424 free_extent_state(cached_state);
d1310b2e
CM
2425 return 0;
2426}
2427
d1310b2e 2428/**
4bef0848 2429 * write_cache_pages - walk the list of dirty pages of the given address space and write all of them.
d1310b2e
CM
2430 * @mapping: address space structure to write
2431 * @wbc: subtract the number of written pages from *@wbc->nr_to_write
2432 * @writepage: function called for each page
2433 * @data: data passed to writepage function
2434 *
2435 * If a page is already under I/O, write_cache_pages() skips it, even
2436 * if it's dirty. This is desirable behaviour for memory-cleaning writeback,
2437 * but it is INCORRECT for data-integrity system calls such as fsync(). fsync()
2438 * and msync() need to guarantee that all the data which was dirty at the time
2439 * the call was made get new I/O started against them. If wbc->sync_mode is
2440 * WB_SYNC_ALL then we were called for data integrity and we must wait for
2441 * existing IO to complete.
2442 */
b2950863 2443static int extent_write_cache_pages(struct extent_io_tree *tree,
4bef0848
CM
2444 struct address_space *mapping,
2445 struct writeback_control *wbc,
d2c3f4f6
CM
2446 writepage_t writepage, void *data,
2447 void (*flush_fn)(void *))
d1310b2e 2448{
d1310b2e
CM
2449 int ret = 0;
2450 int done = 0;
f85d7d6c 2451 int nr_to_write_done = 0;
d1310b2e
CM
2452 struct pagevec pvec;
2453 int nr_pages;
2454 pgoff_t index;
2455 pgoff_t end; /* Inclusive */
2456 int scanned = 0;
2457 int range_whole = 0;
2458
d1310b2e
CM
2459 pagevec_init(&pvec, 0);
2460 if (wbc->range_cyclic) {
2461 index = mapping->writeback_index; /* Start from prev offset */
2462 end = -1;
2463 } else {
2464 index = wbc->range_start >> PAGE_CACHE_SHIFT;
2465 end = wbc->range_end >> PAGE_CACHE_SHIFT;
2466 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
2467 range_whole = 1;
2468 scanned = 1;
2469 }
2470retry:
f85d7d6c 2471 while (!done && !nr_to_write_done && (index <= end) &&
d1310b2e 2472 (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
d397712b
CM
2473 PAGECACHE_TAG_DIRTY, min(end - index,
2474 (pgoff_t)PAGEVEC_SIZE-1) + 1))) {
d1310b2e
CM
2475 unsigned i;
2476
2477 scanned = 1;
2478 for (i = 0; i < nr_pages; i++) {
2479 struct page *page = pvec.pages[i];
2480
2481 /*
2482 * At this point we hold neither mapping->tree_lock nor
2483 * lock on the page itself: the page may be truncated or
2484 * invalidated (changing page->mapping to NULL), or even
2485 * swizzled back from swapper_space to tmpfs file
2486 * mapping
2487 */
4bef0848
CM
2488 if (tree->ops && tree->ops->write_cache_pages_lock_hook)
2489 tree->ops->write_cache_pages_lock_hook(page);
2490 else
2491 lock_page(page);
d1310b2e
CM
2492
2493 if (unlikely(page->mapping != mapping)) {
2494 unlock_page(page);
2495 continue;
2496 }
2497
2498 if (!wbc->range_cyclic && page->index > end) {
2499 done = 1;
2500 unlock_page(page);
2501 continue;
2502 }
2503
d2c3f4f6 2504 if (wbc->sync_mode != WB_SYNC_NONE) {
0e6bd956
CM
2505 if (PageWriteback(page))
2506 flush_fn(data);
d1310b2e 2507 wait_on_page_writeback(page);
d2c3f4f6 2508 }
d1310b2e
CM
2509
2510 if (PageWriteback(page) ||
2511 !clear_page_dirty_for_io(page)) {
2512 unlock_page(page);
2513 continue;
2514 }
2515
2516 ret = (*writepage)(page, wbc, data);
2517
2518 if (unlikely(ret == AOP_WRITEPAGE_ACTIVATE)) {
2519 unlock_page(page);
2520 ret = 0;
2521 }
f85d7d6c 2522 if (ret)
d1310b2e 2523 done = 1;
f85d7d6c
CM
2524
2525 /*
2526 * the filesystem may choose to bump up nr_to_write.
2527 * We have to make sure to honor the new nr_to_write
2528 * at any time
2529 */
2530 nr_to_write_done = wbc->nr_to_write <= 0;
d1310b2e
CM
2531 }
2532 pagevec_release(&pvec);
2533 cond_resched();
2534 }
2535 if (!scanned && !done) {
2536 /*
2537 * We hit the last page and there is more work to be done: wrap
2538 * back to the start of the file
2539 */
2540 scanned = 1;
2541 index = 0;
2542 goto retry;
2543 }
d1310b2e
CM
2544 return ret;
2545}
d1310b2e 2546
ffbd517d 2547static void flush_epd_write_bio(struct extent_page_data *epd)
d2c3f4f6 2548{
d2c3f4f6 2549 if (epd->bio) {
ffbd517d
CM
2550 if (epd->sync_io)
2551 submit_one_bio(WRITE_SYNC, epd->bio, 0, 0);
2552 else
2553 submit_one_bio(WRITE, epd->bio, 0, 0);
d2c3f4f6
CM
2554 epd->bio = NULL;
2555 }
2556}
2557
ffbd517d
CM
2558static noinline void flush_write_bio(void *data)
2559{
2560 struct extent_page_data *epd = data;
2561 flush_epd_write_bio(epd);
2562}
2563
d1310b2e
CM
2564int extent_write_full_page(struct extent_io_tree *tree, struct page *page,
2565 get_extent_t *get_extent,
2566 struct writeback_control *wbc)
2567{
2568 int ret;
2569 struct address_space *mapping = page->mapping;
2570 struct extent_page_data epd = {
2571 .bio = NULL,
2572 .tree = tree,
2573 .get_extent = get_extent,
771ed689 2574 .extent_locked = 0,
ffbd517d 2575 .sync_io = wbc->sync_mode == WB_SYNC_ALL,
d1310b2e
CM
2576 };
2577 struct writeback_control wbc_writepages = {
2578 .bdi = wbc->bdi,
d313d7a3 2579 .sync_mode = wbc->sync_mode,
d1310b2e
CM
2580 .older_than_this = NULL,
2581 .nr_to_write = 64,
2582 .range_start = page_offset(page) + PAGE_CACHE_SIZE,
2583 .range_end = (loff_t)-1,
2584 };
2585
d1310b2e
CM
2586 ret = __extent_writepage(page, wbc, &epd);
2587
4bef0848 2588 extent_write_cache_pages(tree, mapping, &wbc_writepages,
d2c3f4f6 2589 __extent_writepage, &epd, flush_write_bio);
ffbd517d 2590 flush_epd_write_bio(&epd);
d1310b2e
CM
2591 return ret;
2592}
d1310b2e 2593
771ed689
CM
2594int extent_write_locked_range(struct extent_io_tree *tree, struct inode *inode,
2595 u64 start, u64 end, get_extent_t *get_extent,
2596 int mode)
2597{
2598 int ret = 0;
2599 struct address_space *mapping = inode->i_mapping;
2600 struct page *page;
2601 unsigned long nr_pages = (end - start + PAGE_CACHE_SIZE) >>
2602 PAGE_CACHE_SHIFT;
2603
2604 struct extent_page_data epd = {
2605 .bio = NULL,
2606 .tree = tree,
2607 .get_extent = get_extent,
2608 .extent_locked = 1,
ffbd517d 2609 .sync_io = mode == WB_SYNC_ALL,
771ed689
CM
2610 };
2611 struct writeback_control wbc_writepages = {
2612 .bdi = inode->i_mapping->backing_dev_info,
2613 .sync_mode = mode,
2614 .older_than_this = NULL,
2615 .nr_to_write = nr_pages * 2,
2616 .range_start = start,
2617 .range_end = end + 1,
2618 };
2619
d397712b 2620 while (start <= end) {
771ed689
CM
2621 page = find_get_page(mapping, start >> PAGE_CACHE_SHIFT);
2622 if (clear_page_dirty_for_io(page))
2623 ret = __extent_writepage(page, &wbc_writepages, &epd);
2624 else {
2625 if (tree->ops && tree->ops->writepage_end_io_hook)
2626 tree->ops->writepage_end_io_hook(page, start,
2627 start + PAGE_CACHE_SIZE - 1,
2628 NULL, 1);
2629 unlock_page(page);
2630 }
2631 page_cache_release(page);
2632 start += PAGE_CACHE_SIZE;
2633 }
2634
ffbd517d 2635 flush_epd_write_bio(&epd);
771ed689
CM
2636 return ret;
2637}
d1310b2e
CM
2638
2639int extent_writepages(struct extent_io_tree *tree,
2640 struct address_space *mapping,
2641 get_extent_t *get_extent,
2642 struct writeback_control *wbc)
2643{
2644 int ret = 0;
2645 struct extent_page_data epd = {
2646 .bio = NULL,
2647 .tree = tree,
2648 .get_extent = get_extent,
771ed689 2649 .extent_locked = 0,
ffbd517d 2650 .sync_io = wbc->sync_mode == WB_SYNC_ALL,
d1310b2e
CM
2651 };
2652
4bef0848 2653 ret = extent_write_cache_pages(tree, mapping, wbc,
d2c3f4f6
CM
2654 __extent_writepage, &epd,
2655 flush_write_bio);
ffbd517d 2656 flush_epd_write_bio(&epd);
d1310b2e
CM
2657 return ret;
2658}
d1310b2e
CM
2659
2660int extent_readpages(struct extent_io_tree *tree,
2661 struct address_space *mapping,
2662 struct list_head *pages, unsigned nr_pages,
2663 get_extent_t get_extent)
2664{
2665 struct bio *bio = NULL;
2666 unsigned page_idx;
2667 struct pagevec pvec;
c8b97818 2668 unsigned long bio_flags = 0;
d1310b2e
CM
2669
2670 pagevec_init(&pvec, 0);
2671 for (page_idx = 0; page_idx < nr_pages; page_idx++) {
2672 struct page *page = list_entry(pages->prev, struct page, lru);
2673
2674 prefetchw(&page->flags);
2675 list_del(&page->lru);
2676 /*
2677 * what we want to do here is call add_to_page_cache_lru,
2678 * but that isn't exported, so we reproduce it here
2679 */
2680 if (!add_to_page_cache(page, mapping,
2681 page->index, GFP_KERNEL)) {
2682
2683 /* open coding of lru_cache_add, also not exported */
2684 page_cache_get(page);
2685 if (!pagevec_add(&pvec, page))
15916de8 2686 __pagevec_lru_add_file(&pvec);
f188591e 2687 __extent_read_full_page(tree, page, get_extent,
c8b97818 2688 &bio, 0, &bio_flags);
d1310b2e
CM
2689 }
2690 page_cache_release(page);
2691 }
2692 if (pagevec_count(&pvec))
15916de8 2693 __pagevec_lru_add_file(&pvec);
d1310b2e
CM
2694 BUG_ON(!list_empty(pages));
2695 if (bio)
c8b97818 2696 submit_one_bio(READ, bio, 0, bio_flags);
d1310b2e
CM
2697 return 0;
2698}
d1310b2e
CM
2699
2700/*
2701 * basic invalidatepage code, this waits on any locked or writeback
2702 * ranges corresponding to the page, and then deletes any extent state
2703 * records from the tree
2704 */
2705int extent_invalidatepage(struct extent_io_tree *tree,
2706 struct page *page, unsigned long offset)
2707{
2708 u64 start = ((u64)page->index << PAGE_CACHE_SHIFT);
2709 u64 end = start + PAGE_CACHE_SIZE - 1;
2710 size_t blocksize = page->mapping->host->i_sb->s_blocksize;
2711
d397712b 2712 start += (offset + blocksize - 1) & ~(blocksize - 1);
d1310b2e
CM
2713 if (start > end)
2714 return 0;
2715
2716 lock_extent(tree, start, end, GFP_NOFS);
1edbb734 2717 wait_on_page_writeback(page);
d1310b2e 2718 clear_extent_bit(tree, start, end,
32c00aff
JB
2719 EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC |
2720 EXTENT_DO_ACCOUNTING,
2c64c53d 2721 1, 1, NULL, GFP_NOFS);
d1310b2e
CM
2722 return 0;
2723}
d1310b2e
CM
2724
2725/*
2726 * simple commit_write call, set_range_dirty is used to mark both
2727 * the pages and the extent records as dirty
2728 */
2729int extent_commit_write(struct extent_io_tree *tree,
2730 struct inode *inode, struct page *page,
2731 unsigned from, unsigned to)
2732{
2733 loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
2734
2735 set_page_extent_mapped(page);
2736 set_page_dirty(page);
2737
2738 if (pos > inode->i_size) {
2739 i_size_write(inode, pos);
2740 mark_inode_dirty(inode);
2741 }
2742 return 0;
2743}
d1310b2e
CM
2744
2745int extent_prepare_write(struct extent_io_tree *tree,
2746 struct inode *inode, struct page *page,
2747 unsigned from, unsigned to, get_extent_t *get_extent)
2748{
2749 u64 page_start = (u64)page->index << PAGE_CACHE_SHIFT;
2750 u64 page_end = page_start + PAGE_CACHE_SIZE - 1;
2751 u64 block_start;
2752 u64 orig_block_start;
2753 u64 block_end;
2754 u64 cur_end;
2755 struct extent_map *em;
2756 unsigned blocksize = 1 << inode->i_blkbits;
2757 size_t page_offset = 0;
2758 size_t block_off_start;
2759 size_t block_off_end;
2760 int err = 0;
2761 int iocount = 0;
2762 int ret = 0;
2763 int isnew;
2764
2765 set_page_extent_mapped(page);
2766
2767 block_start = (page_start + from) & ~((u64)blocksize - 1);
2768 block_end = (page_start + to - 1) | (blocksize - 1);
2769 orig_block_start = block_start;
2770
2771 lock_extent(tree, page_start, page_end, GFP_NOFS);
d397712b 2772 while (block_start <= block_end) {
d1310b2e
CM
2773 em = get_extent(inode, page, page_offset, block_start,
2774 block_end - block_start + 1, 1);
d397712b 2775 if (IS_ERR(em) || !em)
d1310b2e 2776 goto err;
d397712b 2777
d1310b2e
CM
2778 cur_end = min(block_end, extent_map_end(em) - 1);
2779 block_off_start = block_start & (PAGE_CACHE_SIZE - 1);
2780 block_off_end = block_off_start + blocksize;
2781 isnew = clear_extent_new(tree, block_start, cur_end, GFP_NOFS);
2782
2783 if (!PageUptodate(page) && isnew &&
2784 (block_off_end > to || block_off_start < from)) {
2785 void *kaddr;
2786
2787 kaddr = kmap_atomic(page, KM_USER0);
2788 if (block_off_end > to)
2789 memset(kaddr + to, 0, block_off_end - to);
2790 if (block_off_start < from)
2791 memset(kaddr + block_off_start, 0,
2792 from - block_off_start);
2793 flush_dcache_page(page);
2794 kunmap_atomic(kaddr, KM_USER0);
2795 }
2796 if ((em->block_start != EXTENT_MAP_HOLE &&
2797 em->block_start != EXTENT_MAP_INLINE) &&
2798 !isnew && !PageUptodate(page) &&
2799 (block_off_end > to || block_off_start < from) &&
2800 !test_range_bit(tree, block_start, cur_end,
9655d298 2801 EXTENT_UPTODATE, 1, NULL)) {
d1310b2e
CM
2802 u64 sector;
2803 u64 extent_offset = block_start - em->start;
2804 size_t iosize;
2805 sector = (em->block_start + extent_offset) >> 9;
2806 iosize = (cur_end - block_start + blocksize) &
2807 ~((u64)blocksize - 1);
2808 /*
2809 * we've already got the extent locked, but we
2810 * need to split the state such that our end_bio
2811 * handler can clear the lock.
2812 */
2813 set_extent_bit(tree, block_start,
2814 block_start + iosize - 1,
2c64c53d 2815 EXTENT_LOCKED, 0, NULL, NULL, GFP_NOFS);
d1310b2e
CM
2816 ret = submit_extent_page(READ, tree, page,
2817 sector, iosize, page_offset, em->bdev,
2818 NULL, 1,
c8b97818
CM
2819 end_bio_extent_preparewrite, 0,
2820 0, 0);
d1310b2e
CM
2821 iocount++;
2822 block_start = block_start + iosize;
2823 } else {
2824 set_extent_uptodate(tree, block_start, cur_end,
2825 GFP_NOFS);
2826 unlock_extent(tree, block_start, cur_end, GFP_NOFS);
2827 block_start = cur_end + 1;
2828 }
2829 page_offset = block_start & (PAGE_CACHE_SIZE - 1);
2830 free_extent_map(em);
2831 }
2832 if (iocount) {
2833 wait_extent_bit(tree, orig_block_start,
2834 block_end, EXTENT_LOCKED);
2835 }
2836 check_page_uptodate(tree, page);
2837err:
2838 /* FIXME, zero out newly allocated blocks on error */
2839 return err;
2840}
d1310b2e 2841
7b13b7b1
CM
2842/*
2843 * a helper for releasepage, this tests for areas of the page that
2844 * are locked or under IO and drops the related state bits if it is safe
2845 * to drop the page.
2846 */
2847int try_release_extent_state(struct extent_map_tree *map,
2848 struct extent_io_tree *tree, struct page *page,
2849 gfp_t mask)
2850{
2851 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
2852 u64 end = start + PAGE_CACHE_SIZE - 1;
2853 int ret = 1;
2854
211f90e6 2855 if (test_range_bit(tree, start, end,
8b62b72b 2856 EXTENT_IOBITS, 0, NULL))
7b13b7b1
CM
2857 ret = 0;
2858 else {
2859 if ((mask & GFP_NOFS) == GFP_NOFS)
2860 mask = GFP_NOFS;
11ef160f
CM
2861 /*
2862 * at this point we can safely clear everything except the
2863 * locked bit and the nodatasum bit
2864 */
2865 clear_extent_bit(tree, start, end,
2866 ~(EXTENT_LOCKED | EXTENT_NODATASUM),
2867 0, 0, NULL, mask);
7b13b7b1
CM
2868 }
2869 return ret;
2870}
7b13b7b1 2871
d1310b2e
CM
2872/*
2873 * a helper for releasepage. As long as there are no locked extents
2874 * in the range corresponding to the page, both state records and extent
2875 * map records are removed
2876 */
2877int try_release_extent_mapping(struct extent_map_tree *map,
70dec807
CM
2878 struct extent_io_tree *tree, struct page *page,
2879 gfp_t mask)
d1310b2e
CM
2880{
2881 struct extent_map *em;
2882 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
2883 u64 end = start + PAGE_CACHE_SIZE - 1;
7b13b7b1 2884
70dec807
CM
2885 if ((mask & __GFP_WAIT) &&
2886 page->mapping->host->i_size > 16 * 1024 * 1024) {
39b5637f 2887 u64 len;
70dec807 2888 while (start <= end) {
39b5637f 2889 len = end - start + 1;
890871be 2890 write_lock(&map->lock);
39b5637f 2891 em = lookup_extent_mapping(map, start, len);
70dec807 2892 if (!em || IS_ERR(em)) {
890871be 2893 write_unlock(&map->lock);
70dec807
CM
2894 break;
2895 }
7f3c74fb
CM
2896 if (test_bit(EXTENT_FLAG_PINNED, &em->flags) ||
2897 em->start != start) {
890871be 2898 write_unlock(&map->lock);
70dec807
CM
2899 free_extent_map(em);
2900 break;
2901 }
2902 if (!test_range_bit(tree, em->start,
2903 extent_map_end(em) - 1,
8b62b72b 2904 EXTENT_LOCKED | EXTENT_WRITEBACK,
9655d298 2905 0, NULL)) {
70dec807
CM
2906 remove_extent_mapping(map, em);
2907 /* once for the rb tree */
2908 free_extent_map(em);
2909 }
2910 start = extent_map_end(em);
890871be 2911 write_unlock(&map->lock);
70dec807
CM
2912
2913 /* once for us */
d1310b2e
CM
2914 free_extent_map(em);
2915 }
d1310b2e 2916 }
7b13b7b1 2917 return try_release_extent_state(map, tree, page, mask);
d1310b2e 2918}
d1310b2e
CM
2919
2920sector_t extent_bmap(struct address_space *mapping, sector_t iblock,
2921 get_extent_t *get_extent)
2922{
2923 struct inode *inode = mapping->host;
2924 u64 start = iblock << inode->i_blkbits;
2925 sector_t sector = 0;
d899e052 2926 size_t blksize = (1 << inode->i_blkbits);
d1310b2e
CM
2927 struct extent_map *em;
2928
d899e052
YZ
2929 lock_extent(&BTRFS_I(inode)->io_tree, start, start + blksize - 1,
2930 GFP_NOFS);
2931 em = get_extent(inode, NULL, 0, start, blksize, 0);
2932 unlock_extent(&BTRFS_I(inode)->io_tree, start, start + blksize - 1,
2933 GFP_NOFS);
d1310b2e
CM
2934 if (!em || IS_ERR(em))
2935 return 0;
2936
d899e052 2937 if (em->block_start > EXTENT_MAP_LAST_BYTE)
d1310b2e
CM
2938 goto out;
2939
2940 sector = (em->block_start + start - em->start) >> inode->i_blkbits;
d1310b2e
CM
2941out:
2942 free_extent_map(em);
2943 return sector;
2944}
2945
1506fcc8
YS
2946int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
2947 __u64 start, __u64 len, get_extent_t *get_extent)
2948{
2949 int ret;
2950 u64 off = start;
2951 u64 max = start + len;
2952 u32 flags = 0;
2953 u64 disko = 0;
2954 struct extent_map *em = NULL;
2955 int end = 0;
2956 u64 em_start = 0, em_len = 0;
2957 unsigned long emflags;
2958 ret = 0;
2959
2960 if (len == 0)
2961 return -EINVAL;
2962
2963 lock_extent(&BTRFS_I(inode)->io_tree, start, start + len,
2964 GFP_NOFS);
2965 em = get_extent(inode, NULL, 0, off, max - off, 0);
2966 if (!em)
2967 goto out;
2968 if (IS_ERR(em)) {
2969 ret = PTR_ERR(em);
2970 goto out;
2971 }
2972 while (!end) {
2973 off = em->start + em->len;
2974 if (off >= max)
2975 end = 1;
2976
2977 em_start = em->start;
2978 em_len = em->len;
2979
2980 disko = 0;
2981 flags = 0;
2982
93dbfad7 2983 if (em->block_start == EXTENT_MAP_LAST_BYTE) {
1506fcc8
YS
2984 end = 1;
2985 flags |= FIEMAP_EXTENT_LAST;
93dbfad7 2986 } else if (em->block_start == EXTENT_MAP_HOLE) {
1506fcc8 2987 flags |= FIEMAP_EXTENT_UNWRITTEN;
93dbfad7 2988 } else if (em->block_start == EXTENT_MAP_INLINE) {
1506fcc8
YS
2989 flags |= (FIEMAP_EXTENT_DATA_INLINE |
2990 FIEMAP_EXTENT_NOT_ALIGNED);
93dbfad7 2991 } else if (em->block_start == EXTENT_MAP_DELALLOC) {
1506fcc8
YS
2992 flags |= (FIEMAP_EXTENT_DELALLOC |
2993 FIEMAP_EXTENT_UNKNOWN);
93dbfad7 2994 } else {
1506fcc8 2995 disko = em->block_start;
1506fcc8
YS
2996 }
2997 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags))
2998 flags |= FIEMAP_EXTENT_ENCODED;
2999
3000 emflags = em->flags;
3001 free_extent_map(em);
3002 em = NULL;
3003
3004 if (!end) {
3005 em = get_extent(inode, NULL, 0, off, max - off, 0);
3006 if (!em)
3007 goto out;
3008 if (IS_ERR(em)) {
3009 ret = PTR_ERR(em);
3010 goto out;
3011 }
3012 emflags = em->flags;
3013 }
3014 if (test_bit(EXTENT_FLAG_VACANCY, &emflags)) {
3015 flags |= FIEMAP_EXTENT_LAST;
3016 end = 1;
3017 }
3018
3019 ret = fiemap_fill_next_extent(fieinfo, em_start, disko,
3020 em_len, flags);
3021 if (ret)
3022 goto out_free;
3023 }
3024out_free:
3025 free_extent_map(em);
3026out:
3027 unlock_extent(&BTRFS_I(inode)->io_tree, start, start + len,
3028 GFP_NOFS);
3029 return ret;
3030}
3031
d1310b2e
CM
3032static inline struct page *extent_buffer_page(struct extent_buffer *eb,
3033 unsigned long i)
3034{
3035 struct page *p;
3036 struct address_space *mapping;
3037
3038 if (i == 0)
3039 return eb->first_page;
3040 i += eb->start >> PAGE_CACHE_SHIFT;
3041 mapping = eb->first_page->mapping;
33958dc6
CM
3042 if (!mapping)
3043 return NULL;
0ee0fda0
SW
3044
3045 /*
3046 * extent_buffer_page is only called after pinning the page
3047 * by increasing the reference count. So we know the page must
3048 * be in the radix tree.
3049 */
0ee0fda0 3050 rcu_read_lock();
d1310b2e 3051 p = radix_tree_lookup(&mapping->page_tree, i);
0ee0fda0 3052 rcu_read_unlock();
2b1f55b0 3053
d1310b2e
CM
3054 return p;
3055}
3056
6af118ce 3057static inline unsigned long num_extent_pages(u64 start, u64 len)
728131d8 3058{
6af118ce
CM
3059 return ((start + len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT) -
3060 (start >> PAGE_CACHE_SHIFT);
728131d8
CM
3061}
3062
d1310b2e
CM
3063static struct extent_buffer *__alloc_extent_buffer(struct extent_io_tree *tree,
3064 u64 start,
3065 unsigned long len,
3066 gfp_t mask)
3067{
3068 struct extent_buffer *eb = NULL;
3935127c 3069#if LEAK_DEBUG
2d2ae547 3070 unsigned long flags;
4bef0848 3071#endif
d1310b2e 3072
d1310b2e 3073 eb = kmem_cache_zalloc(extent_buffer_cache, mask);
d1310b2e
CM
3074 eb->start = start;
3075 eb->len = len;
b4ce94de
CM
3076 spin_lock_init(&eb->lock);
3077 init_waitqueue_head(&eb->lock_wq);
3078
3935127c 3079#if LEAK_DEBUG
2d2ae547
CM
3080 spin_lock_irqsave(&leak_lock, flags);
3081 list_add(&eb->leak_list, &buffers);
3082 spin_unlock_irqrestore(&leak_lock, flags);
4bef0848 3083#endif
d1310b2e
CM
3084 atomic_set(&eb->refs, 1);
3085
3086 return eb;
3087}
3088
3089static void __free_extent_buffer(struct extent_buffer *eb)
3090{
3935127c 3091#if LEAK_DEBUG
2d2ae547
CM
3092 unsigned long flags;
3093 spin_lock_irqsave(&leak_lock, flags);
3094 list_del(&eb->leak_list);
3095 spin_unlock_irqrestore(&leak_lock, flags);
4bef0848 3096#endif
d1310b2e
CM
3097 kmem_cache_free(extent_buffer_cache, eb);
3098}
3099
3100struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree,
3101 u64 start, unsigned long len,
3102 struct page *page0,
3103 gfp_t mask)
3104{
3105 unsigned long num_pages = num_extent_pages(start, len);
3106 unsigned long i;
3107 unsigned long index = start >> PAGE_CACHE_SHIFT;
3108 struct extent_buffer *eb;
6af118ce 3109 struct extent_buffer *exists = NULL;
d1310b2e
CM
3110 struct page *p;
3111 struct address_space *mapping = tree->mapping;
3112 int uptodate = 1;
3113
6af118ce
CM
3114 spin_lock(&tree->buffer_lock);
3115 eb = buffer_search(tree, start);
3116 if (eb) {
3117 atomic_inc(&eb->refs);
3118 spin_unlock(&tree->buffer_lock);
0f9dd46c 3119 mark_page_accessed(eb->first_page);
6af118ce
CM
3120 return eb;
3121 }
3122 spin_unlock(&tree->buffer_lock);
3123
d1310b2e 3124 eb = __alloc_extent_buffer(tree, start, len, mask);
2b114d1d 3125 if (!eb)
d1310b2e
CM
3126 return NULL;
3127
d1310b2e
CM
3128 if (page0) {
3129 eb->first_page = page0;
3130 i = 1;
3131 index++;
3132 page_cache_get(page0);
3133 mark_page_accessed(page0);
3134 set_page_extent_mapped(page0);
d1310b2e 3135 set_page_extent_head(page0, len);
f188591e 3136 uptodate = PageUptodate(page0);
d1310b2e
CM
3137 } else {
3138 i = 0;
3139 }
3140 for (; i < num_pages; i++, index++) {
3141 p = find_or_create_page(mapping, index, mask | __GFP_HIGHMEM);
3142 if (!p) {
3143 WARN_ON(1);
6af118ce 3144 goto free_eb;
d1310b2e
CM
3145 }
3146 set_page_extent_mapped(p);
3147 mark_page_accessed(p);
3148 if (i == 0) {
3149 eb->first_page = p;
3150 set_page_extent_head(p, len);
3151 } else {
3152 set_page_private(p, EXTENT_PAGE_PRIVATE);
3153 }
3154 if (!PageUptodate(p))
3155 uptodate = 0;
3156 unlock_page(p);
3157 }
3158 if (uptodate)
b4ce94de 3159 set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
d1310b2e 3160
6af118ce
CM
3161 spin_lock(&tree->buffer_lock);
3162 exists = buffer_tree_insert(tree, start, &eb->rb_node);
3163 if (exists) {
3164 /* add one reference for the caller */
3165 atomic_inc(&exists->refs);
3166 spin_unlock(&tree->buffer_lock);
3167 goto free_eb;
3168 }
6af118ce
CM
3169 /* add one reference for the tree */
3170 atomic_inc(&eb->refs);
f044ba78 3171 spin_unlock(&tree->buffer_lock);
d1310b2e
CM
3172 return eb;
3173
6af118ce 3174free_eb:
d1310b2e 3175 if (!atomic_dec_and_test(&eb->refs))
6af118ce
CM
3176 return exists;
3177 for (index = 1; index < i; index++)
d1310b2e 3178 page_cache_release(extent_buffer_page(eb, index));
6af118ce 3179 page_cache_release(extent_buffer_page(eb, 0));
d1310b2e 3180 __free_extent_buffer(eb);
6af118ce 3181 return exists;
d1310b2e 3182}
d1310b2e
CM
3183
3184struct extent_buffer *find_extent_buffer(struct extent_io_tree *tree,
3185 u64 start, unsigned long len,
3186 gfp_t mask)
3187{
d1310b2e 3188 struct extent_buffer *eb;
d1310b2e 3189
6af118ce
CM
3190 spin_lock(&tree->buffer_lock);
3191 eb = buffer_search(tree, start);
3192 if (eb)
3193 atomic_inc(&eb->refs);
3194 spin_unlock(&tree->buffer_lock);
d1310b2e 3195
0f9dd46c
JB
3196 if (eb)
3197 mark_page_accessed(eb->first_page);
3198
d1310b2e 3199 return eb;
d1310b2e 3200}
d1310b2e
CM
3201
3202void free_extent_buffer(struct extent_buffer *eb)
3203{
d1310b2e
CM
3204 if (!eb)
3205 return;
3206
3207 if (!atomic_dec_and_test(&eb->refs))
3208 return;
3209
6af118ce 3210 WARN_ON(1);
d1310b2e 3211}
d1310b2e
CM
3212
3213int clear_extent_buffer_dirty(struct extent_io_tree *tree,
3214 struct extent_buffer *eb)
3215{
d1310b2e
CM
3216 unsigned long i;
3217 unsigned long num_pages;
3218 struct page *page;
3219
d1310b2e
CM
3220 num_pages = num_extent_pages(eb->start, eb->len);
3221
3222 for (i = 0; i < num_pages; i++) {
3223 page = extent_buffer_page(eb, i);
b9473439 3224 if (!PageDirty(page))
d2c3f4f6
CM
3225 continue;
3226
a61e6f29 3227 lock_page(page);
d1310b2e
CM
3228 if (i == 0)
3229 set_page_extent_head(page, eb->len);
3230 else
3231 set_page_private(page, EXTENT_PAGE_PRIVATE);
3232
d1310b2e 3233 clear_page_dirty_for_io(page);
0ee0fda0 3234 spin_lock_irq(&page->mapping->tree_lock);
d1310b2e
CM
3235 if (!PageDirty(page)) {
3236 radix_tree_tag_clear(&page->mapping->page_tree,
3237 page_index(page),
3238 PAGECACHE_TAG_DIRTY);
3239 }
0ee0fda0 3240 spin_unlock_irq(&page->mapping->tree_lock);
a61e6f29 3241 unlock_page(page);
d1310b2e
CM
3242 }
3243 return 0;
3244}
d1310b2e
CM
3245
3246int wait_on_extent_buffer_writeback(struct extent_io_tree *tree,
3247 struct extent_buffer *eb)
3248{
3249 return wait_on_extent_writeback(tree, eb->start,
3250 eb->start + eb->len - 1);
3251}
d1310b2e
CM
3252
3253int set_extent_buffer_dirty(struct extent_io_tree *tree,
3254 struct extent_buffer *eb)
3255{
3256 unsigned long i;
3257 unsigned long num_pages;
b9473439 3258 int was_dirty = 0;
d1310b2e 3259
b9473439 3260 was_dirty = test_and_set_bit(EXTENT_BUFFER_DIRTY, &eb->bflags);
d1310b2e 3261 num_pages = num_extent_pages(eb->start, eb->len);
b9473439 3262 for (i = 0; i < num_pages; i++)
d1310b2e 3263 __set_page_dirty_nobuffers(extent_buffer_page(eb, i));
b9473439 3264 return was_dirty;
d1310b2e 3265}
d1310b2e 3266
1259ab75
CM
3267int clear_extent_buffer_uptodate(struct extent_io_tree *tree,
3268 struct extent_buffer *eb)
3269{
3270 unsigned long i;
3271 struct page *page;
3272 unsigned long num_pages;
3273
3274 num_pages = num_extent_pages(eb->start, eb->len);
b4ce94de 3275 clear_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
1259ab75
CM
3276
3277 clear_extent_uptodate(tree, eb->start, eb->start + eb->len - 1,
3278 GFP_NOFS);
3279 for (i = 0; i < num_pages; i++) {
3280 page = extent_buffer_page(eb, i);
33958dc6
CM
3281 if (page)
3282 ClearPageUptodate(page);
1259ab75
CM
3283 }
3284 return 0;
3285}
3286
d1310b2e
CM
3287int set_extent_buffer_uptodate(struct extent_io_tree *tree,
3288 struct extent_buffer *eb)
3289{
3290 unsigned long i;
3291 struct page *page;
3292 unsigned long num_pages;
3293
3294 num_pages = num_extent_pages(eb->start, eb->len);
3295
3296 set_extent_uptodate(tree, eb->start, eb->start + eb->len - 1,
3297 GFP_NOFS);
3298 for (i = 0; i < num_pages; i++) {
3299 page = extent_buffer_page(eb, i);
3300 if ((i == 0 && (eb->start & (PAGE_CACHE_SIZE - 1))) ||
3301 ((i == num_pages - 1) &&
3302 ((eb->start + eb->len) & (PAGE_CACHE_SIZE - 1)))) {
3303 check_page_uptodate(tree, page);
3304 continue;
3305 }
3306 SetPageUptodate(page);
3307 }
3308 return 0;
3309}
d1310b2e 3310
ce9adaa5
CM
3311int extent_range_uptodate(struct extent_io_tree *tree,
3312 u64 start, u64 end)
3313{
3314 struct page *page;
3315 int ret;
3316 int pg_uptodate = 1;
3317 int uptodate;
3318 unsigned long index;
3319
9655d298 3320 ret = test_range_bit(tree, start, end, EXTENT_UPTODATE, 1, NULL);
ce9adaa5
CM
3321 if (ret)
3322 return 1;
d397712b 3323 while (start <= end) {
ce9adaa5
CM
3324 index = start >> PAGE_CACHE_SHIFT;
3325 page = find_get_page(tree->mapping, index);
3326 uptodate = PageUptodate(page);
3327 page_cache_release(page);
3328 if (!uptodate) {
3329 pg_uptodate = 0;
3330 break;
3331 }
3332 start += PAGE_CACHE_SIZE;
3333 }
3334 return pg_uptodate;
3335}
3336
d1310b2e 3337int extent_buffer_uptodate(struct extent_io_tree *tree,
ce9adaa5 3338 struct extent_buffer *eb)
d1310b2e 3339{
728131d8 3340 int ret = 0;
ce9adaa5
CM
3341 unsigned long num_pages;
3342 unsigned long i;
728131d8
CM
3343 struct page *page;
3344 int pg_uptodate = 1;
3345
b4ce94de 3346 if (test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))
4235298e 3347 return 1;
728131d8 3348
4235298e 3349 ret = test_range_bit(tree, eb->start, eb->start + eb->len - 1,
9655d298 3350 EXTENT_UPTODATE, 1, NULL);
4235298e
CM
3351 if (ret)
3352 return ret;
728131d8
CM
3353
3354 num_pages = num_extent_pages(eb->start, eb->len);
3355 for (i = 0; i < num_pages; i++) {
3356 page = extent_buffer_page(eb, i);
3357 if (!PageUptodate(page)) {
3358 pg_uptodate = 0;
3359 break;
3360 }
3361 }
4235298e 3362 return pg_uptodate;
d1310b2e 3363}
d1310b2e
CM
3364
3365int read_extent_buffer_pages(struct extent_io_tree *tree,
3366 struct extent_buffer *eb,
a86c12c7 3367 u64 start, int wait,
f188591e 3368 get_extent_t *get_extent, int mirror_num)
d1310b2e
CM
3369{
3370 unsigned long i;
3371 unsigned long start_i;
3372 struct page *page;
3373 int err;
3374 int ret = 0;
ce9adaa5
CM
3375 int locked_pages = 0;
3376 int all_uptodate = 1;
3377 int inc_all_pages = 0;
d1310b2e 3378 unsigned long num_pages;
a86c12c7 3379 struct bio *bio = NULL;
c8b97818 3380 unsigned long bio_flags = 0;
a86c12c7 3381
b4ce94de 3382 if (test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))
d1310b2e
CM
3383 return 0;
3384
ce9adaa5 3385 if (test_range_bit(tree, eb->start, eb->start + eb->len - 1,
9655d298 3386 EXTENT_UPTODATE, 1, NULL)) {
d1310b2e
CM
3387 return 0;
3388 }
3389
3390 if (start) {
3391 WARN_ON(start < eb->start);
3392 start_i = (start >> PAGE_CACHE_SHIFT) -
3393 (eb->start >> PAGE_CACHE_SHIFT);
3394 } else {
3395 start_i = 0;
3396 }
3397
3398 num_pages = num_extent_pages(eb->start, eb->len);
3399 for (i = start_i; i < num_pages; i++) {
3400 page = extent_buffer_page(eb, i);
d1310b2e 3401 if (!wait) {
2db04966 3402 if (!trylock_page(page))
ce9adaa5 3403 goto unlock_exit;
d1310b2e
CM
3404 } else {
3405 lock_page(page);
3406 }
ce9adaa5 3407 locked_pages++;
d397712b 3408 if (!PageUptodate(page))
ce9adaa5 3409 all_uptodate = 0;
ce9adaa5
CM
3410 }
3411 if (all_uptodate) {
3412 if (start_i == 0)
b4ce94de 3413 set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
ce9adaa5
CM
3414 goto unlock_exit;
3415 }
3416
3417 for (i = start_i; i < num_pages; i++) {
3418 page = extent_buffer_page(eb, i);
3419 if (inc_all_pages)
3420 page_cache_get(page);
3421 if (!PageUptodate(page)) {
3422 if (start_i == 0)
3423 inc_all_pages = 1;
f188591e 3424 ClearPageError(page);
a86c12c7 3425 err = __extent_read_full_page(tree, page,
f188591e 3426 get_extent, &bio,
c8b97818 3427 mirror_num, &bio_flags);
d397712b 3428 if (err)
d1310b2e 3429 ret = err;
d1310b2e
CM
3430 } else {
3431 unlock_page(page);
3432 }
3433 }
3434
a86c12c7 3435 if (bio)
c8b97818 3436 submit_one_bio(READ, bio, mirror_num, bio_flags);
a86c12c7 3437
d397712b 3438 if (ret || !wait)
d1310b2e 3439 return ret;
d397712b 3440
d1310b2e
CM
3441 for (i = start_i; i < num_pages; i++) {
3442 page = extent_buffer_page(eb, i);
3443 wait_on_page_locked(page);
d397712b 3444 if (!PageUptodate(page))
d1310b2e 3445 ret = -EIO;
d1310b2e 3446 }
d397712b 3447
d1310b2e 3448 if (!ret)
b4ce94de 3449 set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
d1310b2e 3450 return ret;
ce9adaa5
CM
3451
3452unlock_exit:
3453 i = start_i;
d397712b 3454 while (locked_pages > 0) {
ce9adaa5
CM
3455 page = extent_buffer_page(eb, i);
3456 i++;
3457 unlock_page(page);
3458 locked_pages--;
3459 }
3460 return ret;
d1310b2e 3461}
d1310b2e
CM
3462
3463void read_extent_buffer(struct extent_buffer *eb, void *dstv,
3464 unsigned long start,
3465 unsigned long len)
3466{
3467 size_t cur;
3468 size_t offset;
3469 struct page *page;
3470 char *kaddr;
3471 char *dst = (char *)dstv;
3472 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
3473 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
d1310b2e
CM
3474
3475 WARN_ON(start > eb->len);
3476 WARN_ON(start + len > eb->start + eb->len);
3477
3478 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
3479
d397712b 3480 while (len > 0) {
d1310b2e 3481 page = extent_buffer_page(eb, i);
d1310b2e
CM
3482
3483 cur = min(len, (PAGE_CACHE_SIZE - offset));
3484 kaddr = kmap_atomic(page, KM_USER1);
3485 memcpy(dst, kaddr + offset, cur);
3486 kunmap_atomic(kaddr, KM_USER1);
3487
3488 dst += cur;
3489 len -= cur;
3490 offset = 0;
3491 i++;
3492 }
3493}
d1310b2e
CM
3494
3495int map_private_extent_buffer(struct extent_buffer *eb, unsigned long start,
3496 unsigned long min_len, char **token, char **map,
3497 unsigned long *map_start,
3498 unsigned long *map_len, int km)
3499{
3500 size_t offset = start & (PAGE_CACHE_SIZE - 1);
3501 char *kaddr;
3502 struct page *p;
3503 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
3504 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
3505 unsigned long end_i = (start_offset + start + min_len - 1) >>
3506 PAGE_CACHE_SHIFT;
3507
3508 if (i != end_i)
3509 return -EINVAL;
3510
3511 if (i == 0) {
3512 offset = start_offset;
3513 *map_start = 0;
3514 } else {
3515 offset = 0;
3516 *map_start = ((u64)i << PAGE_CACHE_SHIFT) - start_offset;
3517 }
d397712b 3518
d1310b2e 3519 if (start + min_len > eb->len) {
d397712b
CM
3520 printk(KERN_ERR "btrfs bad mapping eb start %llu len %lu, "
3521 "wanted %lu %lu\n", (unsigned long long)eb->start,
3522 eb->len, start, min_len);
d1310b2e
CM
3523 WARN_ON(1);
3524 }
3525
3526 p = extent_buffer_page(eb, i);
d1310b2e
CM
3527 kaddr = kmap_atomic(p, km);
3528 *token = kaddr;
3529 *map = kaddr + offset;
3530 *map_len = PAGE_CACHE_SIZE - offset;
3531 return 0;
3532}
d1310b2e
CM
3533
3534int map_extent_buffer(struct extent_buffer *eb, unsigned long start,
3535 unsigned long min_len,
3536 char **token, char **map,
3537 unsigned long *map_start,
3538 unsigned long *map_len, int km)
3539{
3540 int err;
3541 int save = 0;
3542 if (eb->map_token) {
3543 unmap_extent_buffer(eb, eb->map_token, km);
3544 eb->map_token = NULL;
3545 save = 1;
3546 }
3547 err = map_private_extent_buffer(eb, start, min_len, token, map,
3548 map_start, map_len, km);
3549 if (!err && save) {
3550 eb->map_token = *token;
3551 eb->kaddr = *map;
3552 eb->map_start = *map_start;
3553 eb->map_len = *map_len;
3554 }
3555 return err;
3556}
d1310b2e
CM
3557
3558void unmap_extent_buffer(struct extent_buffer *eb, char *token, int km)
3559{
3560 kunmap_atomic(token, km);
3561}
d1310b2e
CM
3562
3563int memcmp_extent_buffer(struct extent_buffer *eb, const void *ptrv,
3564 unsigned long start,
3565 unsigned long len)
3566{
3567 size_t cur;
3568 size_t offset;
3569 struct page *page;
3570 char *kaddr;
3571 char *ptr = (char *)ptrv;
3572 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
3573 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
3574 int ret = 0;
3575
3576 WARN_ON(start > eb->len);
3577 WARN_ON(start + len > eb->start + eb->len);
3578
3579 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
3580
d397712b 3581 while (len > 0) {
d1310b2e 3582 page = extent_buffer_page(eb, i);
d1310b2e
CM
3583
3584 cur = min(len, (PAGE_CACHE_SIZE - offset));
3585
3586 kaddr = kmap_atomic(page, KM_USER0);
3587 ret = memcmp(ptr, kaddr + offset, cur);
3588 kunmap_atomic(kaddr, KM_USER0);
3589 if (ret)
3590 break;
3591
3592 ptr += cur;
3593 len -= cur;
3594 offset = 0;
3595 i++;
3596 }
3597 return ret;
3598}
d1310b2e
CM
3599
3600void write_extent_buffer(struct extent_buffer *eb, const void *srcv,
3601 unsigned long start, unsigned long len)
3602{
3603 size_t cur;
3604 size_t offset;
3605 struct page *page;
3606 char *kaddr;
3607 char *src = (char *)srcv;
3608 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
3609 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
3610
3611 WARN_ON(start > eb->len);
3612 WARN_ON(start + len > eb->start + eb->len);
3613
3614 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
3615
d397712b 3616 while (len > 0) {
d1310b2e
CM
3617 page = extent_buffer_page(eb, i);
3618 WARN_ON(!PageUptodate(page));
3619
3620 cur = min(len, PAGE_CACHE_SIZE - offset);
3621 kaddr = kmap_atomic(page, KM_USER1);
3622 memcpy(kaddr + offset, src, cur);
3623 kunmap_atomic(kaddr, KM_USER1);
3624
3625 src += cur;
3626 len -= cur;
3627 offset = 0;
3628 i++;
3629 }
3630}
d1310b2e
CM
3631
3632void memset_extent_buffer(struct extent_buffer *eb, char c,
3633 unsigned long start, unsigned long len)
3634{
3635 size_t cur;
3636 size_t offset;
3637 struct page *page;
3638 char *kaddr;
3639 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
3640 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
3641
3642 WARN_ON(start > eb->len);
3643 WARN_ON(start + len > eb->start + eb->len);
3644
3645 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
3646
d397712b 3647 while (len > 0) {
d1310b2e
CM
3648 page = extent_buffer_page(eb, i);
3649 WARN_ON(!PageUptodate(page));
3650
3651 cur = min(len, PAGE_CACHE_SIZE - offset);
3652 kaddr = kmap_atomic(page, KM_USER0);
3653 memset(kaddr + offset, c, cur);
3654 kunmap_atomic(kaddr, KM_USER0);
3655
3656 len -= cur;
3657 offset = 0;
3658 i++;
3659 }
3660}
d1310b2e
CM
3661
3662void copy_extent_buffer(struct extent_buffer *dst, struct extent_buffer *src,
3663 unsigned long dst_offset, unsigned long src_offset,
3664 unsigned long len)
3665{
3666 u64 dst_len = dst->len;
3667 size_t cur;
3668 size_t offset;
3669 struct page *page;
3670 char *kaddr;
3671 size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
3672 unsigned long i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT;
3673
3674 WARN_ON(src->len != dst_len);
3675
3676 offset = (start_offset + dst_offset) &
3677 ((unsigned long)PAGE_CACHE_SIZE - 1);
3678
d397712b 3679 while (len > 0) {
d1310b2e
CM
3680 page = extent_buffer_page(dst, i);
3681 WARN_ON(!PageUptodate(page));
3682
3683 cur = min(len, (unsigned long)(PAGE_CACHE_SIZE - offset));
3684
3685 kaddr = kmap_atomic(page, KM_USER0);
3686 read_extent_buffer(src, kaddr + offset, src_offset, cur);
3687 kunmap_atomic(kaddr, KM_USER0);
3688
3689 src_offset += cur;
3690 len -= cur;
3691 offset = 0;
3692 i++;
3693 }
3694}
d1310b2e
CM
3695
3696static void move_pages(struct page *dst_page, struct page *src_page,
3697 unsigned long dst_off, unsigned long src_off,
3698 unsigned long len)
3699{
3700 char *dst_kaddr = kmap_atomic(dst_page, KM_USER0);
3701 if (dst_page == src_page) {
3702 memmove(dst_kaddr + dst_off, dst_kaddr + src_off, len);
3703 } else {
3704 char *src_kaddr = kmap_atomic(src_page, KM_USER1);
3705 char *p = dst_kaddr + dst_off + len;
3706 char *s = src_kaddr + src_off + len;
3707
3708 while (len--)
3709 *--p = *--s;
3710
3711 kunmap_atomic(src_kaddr, KM_USER1);
3712 }
3713 kunmap_atomic(dst_kaddr, KM_USER0);
3714}
3715
3716static void copy_pages(struct page *dst_page, struct page *src_page,
3717 unsigned long dst_off, unsigned long src_off,
3718 unsigned long len)
3719{
3720 char *dst_kaddr = kmap_atomic(dst_page, KM_USER0);
3721 char *src_kaddr;
3722
3723 if (dst_page != src_page)
3724 src_kaddr = kmap_atomic(src_page, KM_USER1);
3725 else
3726 src_kaddr = dst_kaddr;
3727
3728 memcpy(dst_kaddr + dst_off, src_kaddr + src_off, len);
3729 kunmap_atomic(dst_kaddr, KM_USER0);
3730 if (dst_page != src_page)
3731 kunmap_atomic(src_kaddr, KM_USER1);
3732}
3733
3734void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
3735 unsigned long src_offset, unsigned long len)
3736{
3737 size_t cur;
3738 size_t dst_off_in_page;
3739 size_t src_off_in_page;
3740 size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
3741 unsigned long dst_i;
3742 unsigned long src_i;
3743
3744 if (src_offset + len > dst->len) {
d397712b
CM
3745 printk(KERN_ERR "btrfs memmove bogus src_offset %lu move "
3746 "len %lu dst len %lu\n", src_offset, len, dst->len);
d1310b2e
CM
3747 BUG_ON(1);
3748 }
3749 if (dst_offset + len > dst->len) {
d397712b
CM
3750 printk(KERN_ERR "btrfs memmove bogus dst_offset %lu move "
3751 "len %lu dst len %lu\n", dst_offset, len, dst->len);
d1310b2e
CM
3752 BUG_ON(1);
3753 }
3754
d397712b 3755 while (len > 0) {
d1310b2e
CM
3756 dst_off_in_page = (start_offset + dst_offset) &
3757 ((unsigned long)PAGE_CACHE_SIZE - 1);
3758 src_off_in_page = (start_offset + src_offset) &
3759 ((unsigned long)PAGE_CACHE_SIZE - 1);
3760
3761 dst_i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT;
3762 src_i = (start_offset + src_offset) >> PAGE_CACHE_SHIFT;
3763
3764 cur = min(len, (unsigned long)(PAGE_CACHE_SIZE -
3765 src_off_in_page));
3766 cur = min_t(unsigned long, cur,
3767 (unsigned long)(PAGE_CACHE_SIZE - dst_off_in_page));
3768
3769 copy_pages(extent_buffer_page(dst, dst_i),
3770 extent_buffer_page(dst, src_i),
3771 dst_off_in_page, src_off_in_page, cur);
3772
3773 src_offset += cur;
3774 dst_offset += cur;
3775 len -= cur;
3776 }
3777}
d1310b2e
CM
3778
3779void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
3780 unsigned long src_offset, unsigned long len)
3781{
3782 size_t cur;
3783 size_t dst_off_in_page;
3784 size_t src_off_in_page;
3785 unsigned long dst_end = dst_offset + len - 1;
3786 unsigned long src_end = src_offset + len - 1;
3787 size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
3788 unsigned long dst_i;
3789 unsigned long src_i;
3790
3791 if (src_offset + len > dst->len) {
d397712b
CM
3792 printk(KERN_ERR "btrfs memmove bogus src_offset %lu move "
3793 "len %lu len %lu\n", src_offset, len, dst->len);
d1310b2e
CM
3794 BUG_ON(1);
3795 }
3796 if (dst_offset + len > dst->len) {
d397712b
CM
3797 printk(KERN_ERR "btrfs memmove bogus dst_offset %lu move "
3798 "len %lu len %lu\n", dst_offset, len, dst->len);
d1310b2e
CM
3799 BUG_ON(1);
3800 }
3801 if (dst_offset < src_offset) {
3802 memcpy_extent_buffer(dst, dst_offset, src_offset, len);
3803 return;
3804 }
d397712b 3805 while (len > 0) {
d1310b2e
CM
3806 dst_i = (start_offset + dst_end) >> PAGE_CACHE_SHIFT;
3807 src_i = (start_offset + src_end) >> PAGE_CACHE_SHIFT;
3808
3809 dst_off_in_page = (start_offset + dst_end) &
3810 ((unsigned long)PAGE_CACHE_SIZE - 1);
3811 src_off_in_page = (start_offset + src_end) &
3812 ((unsigned long)PAGE_CACHE_SIZE - 1);
3813
3814 cur = min_t(unsigned long, len, src_off_in_page + 1);
3815 cur = min(cur, dst_off_in_page + 1);
3816 move_pages(extent_buffer_page(dst, dst_i),
3817 extent_buffer_page(dst, src_i),
3818 dst_off_in_page - cur + 1,
3819 src_off_in_page - cur + 1, cur);
3820
3821 dst_end -= cur;
3822 src_end -= cur;
3823 len -= cur;
3824 }
3825}
6af118ce
CM
3826
3827int try_release_extent_buffer(struct extent_io_tree *tree, struct page *page)
3828{
3829 u64 start = page_offset(page);
3830 struct extent_buffer *eb;
3831 int ret = 1;
3832 unsigned long i;
3833 unsigned long num_pages;
3834
3835 spin_lock(&tree->buffer_lock);
3836 eb = buffer_search(tree, start);
3837 if (!eb)
3838 goto out;
3839
3840 if (atomic_read(&eb->refs) > 1) {
3841 ret = 0;
3842 goto out;
3843 }
b9473439
CM
3844 if (test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)) {
3845 ret = 0;
3846 goto out;
3847 }
6af118ce
CM
3848 /* at this point we can safely release the extent buffer */
3849 num_pages = num_extent_pages(eb->start, eb->len);
b214107e
CH
3850 for (i = 0; i < num_pages; i++)
3851 page_cache_release(extent_buffer_page(eb, i));
6af118ce
CM
3852 rb_erase(&eb->rb_node, &tree->buffer);
3853 __free_extent_buffer(eb);
3854out:
3855 spin_unlock(&tree->buffer_lock);
3856 return ret;
3857}