]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - fs/btrfs/extent_io.c
Btrfs: drop WARN_ON from btrfs_add_leaf_ref
[mirror_ubuntu-hirsute-kernel.git] / fs / btrfs / extent_io.c
CommitLineData
d1310b2e
CM
1#include <linux/bitops.h>
2#include <linux/slab.h>
3#include <linux/bio.h>
4#include <linux/mm.h>
5#include <linux/gfp.h>
6#include <linux/pagemap.h>
7#include <linux/page-flags.h>
8#include <linux/module.h>
9#include <linux/spinlock.h>
10#include <linux/blkdev.h>
11#include <linux/swap.h>
12#include <linux/version.h>
13#include <linux/writeback.h>
14#include <linux/pagevec.h>
15#include "extent_io.h"
16#include "extent_map.h"
2db04966 17#include "compat.h"
902b22f3
DW
18#include "ctree.h"
19#include "btrfs_inode.h"
d1310b2e
CM
20
21/* temporary define until extent_map moves out of btrfs */
22struct kmem_cache *btrfs_cache_create(const char *name, size_t size,
23 unsigned long extra_flags,
24 void (*ctor)(void *, struct kmem_cache *,
25 unsigned long));
26
27static struct kmem_cache *extent_state_cache;
28static struct kmem_cache *extent_buffer_cache;
29
30static LIST_HEAD(buffers);
31static LIST_HEAD(states);
4bef0848
CM
32
33#ifdef LEAK_DEBUG
2d2ae547 34static spinlock_t leak_lock = SPIN_LOCK_UNLOCKED;
4bef0848 35#endif
d1310b2e 36
d1310b2e
CM
37#define BUFFER_LRU_MAX 64
38
39struct tree_entry {
40 u64 start;
41 u64 end;
d1310b2e
CM
42 struct rb_node rb_node;
43};
44
45struct extent_page_data {
46 struct bio *bio;
47 struct extent_io_tree *tree;
48 get_extent_t *get_extent;
49};
50
51int __init extent_io_init(void)
52{
53 extent_state_cache = btrfs_cache_create("extent_state",
54 sizeof(struct extent_state), 0,
55 NULL);
56 if (!extent_state_cache)
57 return -ENOMEM;
58
59 extent_buffer_cache = btrfs_cache_create("extent_buffers",
60 sizeof(struct extent_buffer), 0,
61 NULL);
62 if (!extent_buffer_cache)
63 goto free_state_cache;
64 return 0;
65
66free_state_cache:
67 kmem_cache_destroy(extent_state_cache);
68 return -ENOMEM;
69}
70
71void extent_io_exit(void)
72{
73 struct extent_state *state;
2d2ae547 74 struct extent_buffer *eb;
d1310b2e
CM
75
76 while (!list_empty(&states)) {
2d2ae547 77 state = list_entry(states.next, struct extent_state, leak_list);
70dec807 78 printk("state leak: start %Lu end %Lu state %lu in tree %p refs %d\n", state->start, state->end, state->state, state->tree, atomic_read(&state->refs));
2d2ae547 79 list_del(&state->leak_list);
d1310b2e
CM
80 kmem_cache_free(extent_state_cache, state);
81
82 }
83
2d2ae547
CM
84 while (!list_empty(&buffers)) {
85 eb = list_entry(buffers.next, struct extent_buffer, leak_list);
86 printk("buffer leak start %Lu len %lu refs %d\n", eb->start, eb->len, atomic_read(&eb->refs));
87 list_del(&eb->leak_list);
88 kmem_cache_free(extent_buffer_cache, eb);
89 }
d1310b2e
CM
90 if (extent_state_cache)
91 kmem_cache_destroy(extent_state_cache);
92 if (extent_buffer_cache)
93 kmem_cache_destroy(extent_buffer_cache);
94}
95
96void extent_io_tree_init(struct extent_io_tree *tree,
97 struct address_space *mapping, gfp_t mask)
98{
99 tree->state.rb_node = NULL;
6af118ce 100 tree->buffer.rb_node = NULL;
d1310b2e
CM
101 tree->ops = NULL;
102 tree->dirty_bytes = 0;
70dec807 103 spin_lock_init(&tree->lock);
6af118ce 104 spin_lock_init(&tree->buffer_lock);
d1310b2e 105 tree->mapping = mapping;
d1310b2e
CM
106}
107EXPORT_SYMBOL(extent_io_tree_init);
108
d1310b2e
CM
109struct extent_state *alloc_extent_state(gfp_t mask)
110{
111 struct extent_state *state;
4bef0848 112#ifdef LEAK_DEBUG
2d2ae547 113 unsigned long flags;
4bef0848 114#endif
d1310b2e
CM
115
116 state = kmem_cache_alloc(extent_state_cache, mask);
2b114d1d 117 if (!state)
d1310b2e
CM
118 return state;
119 state->state = 0;
d1310b2e 120 state->private = 0;
70dec807 121 state->tree = NULL;
4bef0848 122#ifdef LEAK_DEBUG
2d2ae547
CM
123 spin_lock_irqsave(&leak_lock, flags);
124 list_add(&state->leak_list, &states);
125 spin_unlock_irqrestore(&leak_lock, flags);
4bef0848 126#endif
d1310b2e
CM
127 atomic_set(&state->refs, 1);
128 init_waitqueue_head(&state->wq);
129 return state;
130}
131EXPORT_SYMBOL(alloc_extent_state);
132
133void free_extent_state(struct extent_state *state)
134{
d1310b2e
CM
135 if (!state)
136 return;
137 if (atomic_dec_and_test(&state->refs)) {
4bef0848 138#ifdef LEAK_DEBUG
2d2ae547 139 unsigned long flags;
4bef0848 140#endif
70dec807 141 WARN_ON(state->tree);
4bef0848 142#ifdef LEAK_DEBUG
2d2ae547
CM
143 spin_lock_irqsave(&leak_lock, flags);
144 list_del(&state->leak_list);
145 spin_unlock_irqrestore(&leak_lock, flags);
4bef0848 146#endif
d1310b2e
CM
147 kmem_cache_free(extent_state_cache, state);
148 }
149}
150EXPORT_SYMBOL(free_extent_state);
151
152static struct rb_node *tree_insert(struct rb_root *root, u64 offset,
153 struct rb_node *node)
154{
155 struct rb_node ** p = &root->rb_node;
156 struct rb_node * parent = NULL;
157 struct tree_entry *entry;
158
159 while(*p) {
160 parent = *p;
161 entry = rb_entry(parent, struct tree_entry, rb_node);
162
163 if (offset < entry->start)
164 p = &(*p)->rb_left;
165 else if (offset > entry->end)
166 p = &(*p)->rb_right;
167 else
168 return parent;
169 }
170
171 entry = rb_entry(node, struct tree_entry, rb_node);
d1310b2e
CM
172 rb_link_node(node, parent, p);
173 rb_insert_color(node, root);
174 return NULL;
175}
176
80ea96b1 177static struct rb_node *__etree_search(struct extent_io_tree *tree, u64 offset,
d1310b2e
CM
178 struct rb_node **prev_ret,
179 struct rb_node **next_ret)
180{
80ea96b1 181 struct rb_root *root = &tree->state;
d1310b2e
CM
182 struct rb_node * n = root->rb_node;
183 struct rb_node *prev = NULL;
184 struct rb_node *orig_prev = NULL;
185 struct tree_entry *entry;
186 struct tree_entry *prev_entry = NULL;
187
188 while(n) {
189 entry = rb_entry(n, struct tree_entry, rb_node);
190 prev = n;
191 prev_entry = entry;
192
193 if (offset < entry->start)
194 n = n->rb_left;
195 else if (offset > entry->end)
196 n = n->rb_right;
80ea96b1 197 else {
d1310b2e 198 return n;
80ea96b1 199 }
d1310b2e
CM
200 }
201
202 if (prev_ret) {
203 orig_prev = prev;
204 while(prev && offset > prev_entry->end) {
205 prev = rb_next(prev);
206 prev_entry = rb_entry(prev, struct tree_entry, rb_node);
207 }
208 *prev_ret = prev;
209 prev = orig_prev;
210 }
211
212 if (next_ret) {
213 prev_entry = rb_entry(prev, struct tree_entry, rb_node);
214 while(prev && offset < prev_entry->start) {
215 prev = rb_prev(prev);
216 prev_entry = rb_entry(prev, struct tree_entry, rb_node);
217 }
218 *next_ret = prev;
219 }
220 return NULL;
221}
222
80ea96b1
CM
223static inline struct rb_node *tree_search(struct extent_io_tree *tree,
224 u64 offset)
d1310b2e 225{
70dec807 226 struct rb_node *prev = NULL;
d1310b2e 227 struct rb_node *ret;
70dec807 228
80ea96b1
CM
229 ret = __etree_search(tree, offset, &prev, NULL);
230 if (!ret) {
d1310b2e 231 return prev;
80ea96b1 232 }
d1310b2e
CM
233 return ret;
234}
235
6af118ce
CM
236static struct extent_buffer *buffer_tree_insert(struct extent_io_tree *tree,
237 u64 offset, struct rb_node *node)
238{
239 struct rb_root *root = &tree->buffer;
240 struct rb_node ** p = &root->rb_node;
241 struct rb_node * parent = NULL;
242 struct extent_buffer *eb;
243
244 while(*p) {
245 parent = *p;
246 eb = rb_entry(parent, struct extent_buffer, rb_node);
247
248 if (offset < eb->start)
249 p = &(*p)->rb_left;
250 else if (offset > eb->start)
251 p = &(*p)->rb_right;
252 else
253 return eb;
254 }
255
256 rb_link_node(node, parent, p);
257 rb_insert_color(node, root);
258 return NULL;
259}
260
261static struct extent_buffer *buffer_search(struct extent_io_tree *tree,
262 u64 offset)
263{
264 struct rb_root *root = &tree->buffer;
265 struct rb_node * n = root->rb_node;
266 struct extent_buffer *eb;
267
268 while(n) {
269 eb = rb_entry(n, struct extent_buffer, rb_node);
270 if (offset < eb->start)
271 n = n->rb_left;
272 else if (offset > eb->start)
273 n = n->rb_right;
274 else
275 return eb;
276 }
277 return NULL;
278}
279
d1310b2e
CM
280/*
281 * utility function to look for merge candidates inside a given range.
282 * Any extents with matching state are merged together into a single
283 * extent in the tree. Extents with EXTENT_IO in their state field
284 * are not merged because the end_io handlers need to be able to do
285 * operations on them without sleeping (or doing allocations/splits).
286 *
287 * This should be called with the tree lock held.
288 */
289static int merge_state(struct extent_io_tree *tree,
290 struct extent_state *state)
291{
292 struct extent_state *other;
293 struct rb_node *other_node;
294
5b21f2ed 295 if (state->state & (EXTENT_IOBITS | EXTENT_BOUNDARY))
d1310b2e
CM
296 return 0;
297
298 other_node = rb_prev(&state->rb_node);
299 if (other_node) {
300 other = rb_entry(other_node, struct extent_state, rb_node);
301 if (other->end == state->start - 1 &&
302 other->state == state->state) {
303 state->start = other->start;
70dec807 304 other->tree = NULL;
d1310b2e
CM
305 rb_erase(&other->rb_node, &tree->state);
306 free_extent_state(other);
307 }
308 }
309 other_node = rb_next(&state->rb_node);
310 if (other_node) {
311 other = rb_entry(other_node, struct extent_state, rb_node);
312 if (other->start == state->end + 1 &&
313 other->state == state->state) {
314 other->start = state->start;
70dec807 315 state->tree = NULL;
d1310b2e
CM
316 rb_erase(&state->rb_node, &tree->state);
317 free_extent_state(state);
318 }
319 }
320 return 0;
321}
322
291d673e
CM
323static void set_state_cb(struct extent_io_tree *tree,
324 struct extent_state *state,
325 unsigned long bits)
326{
327 if (tree->ops && tree->ops->set_bit_hook) {
328 tree->ops->set_bit_hook(tree->mapping->host, state->start,
b0c68f8b 329 state->end, state->state, bits);
291d673e
CM
330 }
331}
332
333static void clear_state_cb(struct extent_io_tree *tree,
334 struct extent_state *state,
335 unsigned long bits)
336{
337 if (tree->ops && tree->ops->set_bit_hook) {
338 tree->ops->clear_bit_hook(tree->mapping->host, state->start,
b0c68f8b 339 state->end, state->state, bits);
291d673e
CM
340 }
341}
342
d1310b2e
CM
343/*
344 * insert an extent_state struct into the tree. 'bits' are set on the
345 * struct before it is inserted.
346 *
347 * This may return -EEXIST if the extent is already there, in which case the
348 * state struct is freed.
349 *
350 * The tree lock is not taken internally. This is a utility function and
351 * probably isn't what you want to call (see set/clear_extent_bit).
352 */
353static int insert_state(struct extent_io_tree *tree,
354 struct extent_state *state, u64 start, u64 end,
355 int bits)
356{
357 struct rb_node *node;
358
359 if (end < start) {
360 printk("end < start %Lu %Lu\n", end, start);
361 WARN_ON(1);
362 }
363 if (bits & EXTENT_DIRTY)
364 tree->dirty_bytes += end - start + 1;
b0c68f8b 365 set_state_cb(tree, state, bits);
d1310b2e
CM
366 state->state |= bits;
367 state->start = start;
368 state->end = end;
369 node = tree_insert(&tree->state, end, &state->rb_node);
370 if (node) {
371 struct extent_state *found;
372 found = rb_entry(node, struct extent_state, rb_node);
373 printk("found node %Lu %Lu on insert of %Lu %Lu\n", found->start, found->end, start, end);
374 free_extent_state(state);
375 return -EEXIST;
376 }
70dec807 377 state->tree = tree;
d1310b2e
CM
378 merge_state(tree, state);
379 return 0;
380}
381
382/*
383 * split a given extent state struct in two, inserting the preallocated
384 * struct 'prealloc' as the newly created second half. 'split' indicates an
385 * offset inside 'orig' where it should be split.
386 *
387 * Before calling,
388 * the tree has 'orig' at [orig->start, orig->end]. After calling, there
389 * are two extent state structs in the tree:
390 * prealloc: [orig->start, split - 1]
391 * orig: [ split, orig->end ]
392 *
393 * The tree locks are not taken by this function. They need to be held
394 * by the caller.
395 */
396static int split_state(struct extent_io_tree *tree, struct extent_state *orig,
397 struct extent_state *prealloc, u64 split)
398{
399 struct rb_node *node;
400 prealloc->start = orig->start;
401 prealloc->end = split - 1;
402 prealloc->state = orig->state;
403 orig->start = split;
404
405 node = tree_insert(&tree->state, prealloc->end, &prealloc->rb_node);
406 if (node) {
407 struct extent_state *found;
408 found = rb_entry(node, struct extent_state, rb_node);
409 printk("found node %Lu %Lu on insert of %Lu %Lu\n", found->start, found->end, prealloc->start, prealloc->end);
410 free_extent_state(prealloc);
411 return -EEXIST;
412 }
70dec807 413 prealloc->tree = tree;
d1310b2e
CM
414 return 0;
415}
416
417/*
418 * utility function to clear some bits in an extent state struct.
419 * it will optionally wake up any one waiting on this state (wake == 1), or
420 * forcibly remove the state from the tree (delete == 1).
421 *
422 * If no bits are set on the state struct after clearing things, the
423 * struct is freed and removed from the tree
424 */
425static int clear_state_bit(struct extent_io_tree *tree,
426 struct extent_state *state, int bits, int wake,
427 int delete)
428{
429 int ret = state->state & bits;
430
431 if ((bits & EXTENT_DIRTY) && (state->state & EXTENT_DIRTY)) {
432 u64 range = state->end - state->start + 1;
433 WARN_ON(range > tree->dirty_bytes);
434 tree->dirty_bytes -= range;
435 }
291d673e 436 clear_state_cb(tree, state, bits);
b0c68f8b 437 state->state &= ~bits;
d1310b2e
CM
438 if (wake)
439 wake_up(&state->wq);
440 if (delete || state->state == 0) {
70dec807 441 if (state->tree) {
ae9d1285 442 clear_state_cb(tree, state, state->state);
d1310b2e 443 rb_erase(&state->rb_node, &tree->state);
70dec807 444 state->tree = NULL;
d1310b2e
CM
445 free_extent_state(state);
446 } else {
447 WARN_ON(1);
448 }
449 } else {
450 merge_state(tree, state);
451 }
452 return ret;
453}
454
455/*
456 * clear some bits on a range in the tree. This may require splitting
457 * or inserting elements in the tree, so the gfp mask is used to
458 * indicate which allocations or sleeping are allowed.
459 *
460 * pass 'wake' == 1 to kick any sleepers, and 'delete' == 1 to remove
461 * the given range from the tree regardless of state (ie for truncate).
462 *
463 * the range [start, end] is inclusive.
464 *
465 * This takes the tree lock, and returns < 0 on error, > 0 if any of the
466 * bits were already set, or zero if none of the bits were already set.
467 */
468int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
469 int bits, int wake, int delete, gfp_t mask)
470{
471 struct extent_state *state;
472 struct extent_state *prealloc = NULL;
473 struct rb_node *node;
474 unsigned long flags;
475 int err;
476 int set = 0;
477
478again:
479 if (!prealloc && (mask & __GFP_WAIT)) {
480 prealloc = alloc_extent_state(mask);
481 if (!prealloc)
482 return -ENOMEM;
483 }
484
70dec807 485 spin_lock_irqsave(&tree->lock, flags);
d1310b2e
CM
486 /*
487 * this search will find the extents that end after
488 * our range starts
489 */
80ea96b1 490 node = tree_search(tree, start);
d1310b2e
CM
491 if (!node)
492 goto out;
493 state = rb_entry(node, struct extent_state, rb_node);
494 if (state->start > end)
495 goto out;
496 WARN_ON(state->end < start);
497
498 /*
499 * | ---- desired range ---- |
500 * | state | or
501 * | ------------- state -------------- |
502 *
503 * We need to split the extent we found, and may flip
504 * bits on second half.
505 *
506 * If the extent we found extends past our range, we
507 * just split and search again. It'll get split again
508 * the next time though.
509 *
510 * If the extent we found is inside our range, we clear
511 * the desired bit on it.
512 */
513
514 if (state->start < start) {
70dec807
CM
515 if (!prealloc)
516 prealloc = alloc_extent_state(GFP_ATOMIC);
d1310b2e
CM
517 err = split_state(tree, state, prealloc, start);
518 BUG_ON(err == -EEXIST);
519 prealloc = NULL;
520 if (err)
521 goto out;
522 if (state->end <= end) {
523 start = state->end + 1;
524 set |= clear_state_bit(tree, state, bits,
525 wake, delete);
526 } else {
527 start = state->start;
528 }
529 goto search_again;
530 }
531 /*
532 * | ---- desired range ---- |
533 * | state |
534 * We need to split the extent, and clear the bit
535 * on the first half
536 */
537 if (state->start <= end && state->end > end) {
70dec807
CM
538 if (!prealloc)
539 prealloc = alloc_extent_state(GFP_ATOMIC);
d1310b2e
CM
540 err = split_state(tree, state, prealloc, end + 1);
541 BUG_ON(err == -EEXIST);
542
543 if (wake)
544 wake_up(&state->wq);
545 set |= clear_state_bit(tree, prealloc, bits,
546 wake, delete);
547 prealloc = NULL;
548 goto out;
549 }
550
551 start = state->end + 1;
552 set |= clear_state_bit(tree, state, bits, wake, delete);
553 goto search_again;
554
555out:
70dec807 556 spin_unlock_irqrestore(&tree->lock, flags);
d1310b2e
CM
557 if (prealloc)
558 free_extent_state(prealloc);
559
560 return set;
561
562search_again:
563 if (start > end)
564 goto out;
70dec807 565 spin_unlock_irqrestore(&tree->lock, flags);
d1310b2e
CM
566 if (mask & __GFP_WAIT)
567 cond_resched();
568 goto again;
569}
570EXPORT_SYMBOL(clear_extent_bit);
571
572static int wait_on_state(struct extent_io_tree *tree,
573 struct extent_state *state)
574{
575 DEFINE_WAIT(wait);
576 prepare_to_wait(&state->wq, &wait, TASK_UNINTERRUPTIBLE);
70dec807 577 spin_unlock_irq(&tree->lock);
d1310b2e 578 schedule();
70dec807 579 spin_lock_irq(&tree->lock);
d1310b2e
CM
580 finish_wait(&state->wq, &wait);
581 return 0;
582}
583
584/*
585 * waits for one or more bits to clear on a range in the state tree.
586 * The range [start, end] is inclusive.
587 * The tree lock is taken by this function
588 */
589int wait_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, int bits)
590{
591 struct extent_state *state;
592 struct rb_node *node;
593
70dec807 594 spin_lock_irq(&tree->lock);
d1310b2e
CM
595again:
596 while (1) {
597 /*
598 * this search will find all the extents that end after
599 * our range starts
600 */
80ea96b1 601 node = tree_search(tree, start);
d1310b2e
CM
602 if (!node)
603 break;
604
605 state = rb_entry(node, struct extent_state, rb_node);
606
607 if (state->start > end)
608 goto out;
609
610 if (state->state & bits) {
611 start = state->start;
612 atomic_inc(&state->refs);
613 wait_on_state(tree, state);
614 free_extent_state(state);
615 goto again;
616 }
617 start = state->end + 1;
618
619 if (start > end)
620 break;
621
622 if (need_resched()) {
70dec807 623 spin_unlock_irq(&tree->lock);
d1310b2e 624 cond_resched();
70dec807 625 spin_lock_irq(&tree->lock);
d1310b2e
CM
626 }
627 }
628out:
70dec807 629 spin_unlock_irq(&tree->lock);
d1310b2e
CM
630 return 0;
631}
632EXPORT_SYMBOL(wait_extent_bit);
633
634static void set_state_bits(struct extent_io_tree *tree,
635 struct extent_state *state,
636 int bits)
637{
638 if ((bits & EXTENT_DIRTY) && !(state->state & EXTENT_DIRTY)) {
639 u64 range = state->end - state->start + 1;
640 tree->dirty_bytes += range;
641 }
291d673e 642 set_state_cb(tree, state, bits);
b0c68f8b 643 state->state |= bits;
d1310b2e
CM
644}
645
646/*
647 * set some bits on a range in the tree. This may require allocations
648 * or sleeping, so the gfp mask is used to indicate what is allowed.
649 *
650 * If 'exclusive' == 1, this will fail with -EEXIST if some part of the
651 * range already has the desired bits set. The start of the existing
652 * range is returned in failed_start in this case.
653 *
654 * [start, end] is inclusive
655 * This takes the tree lock.
656 */
657int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, int bits,
658 int exclusive, u64 *failed_start, gfp_t mask)
659{
660 struct extent_state *state;
661 struct extent_state *prealloc = NULL;
662 struct rb_node *node;
663 unsigned long flags;
664 int err = 0;
665 int set;
666 u64 last_start;
667 u64 last_end;
668again:
669 if (!prealloc && (mask & __GFP_WAIT)) {
670 prealloc = alloc_extent_state(mask);
671 if (!prealloc)
672 return -ENOMEM;
673 }
674
70dec807 675 spin_lock_irqsave(&tree->lock, flags);
d1310b2e
CM
676 /*
677 * this search will find all the extents that end after
678 * our range starts.
679 */
80ea96b1 680 node = tree_search(tree, start);
d1310b2e
CM
681 if (!node) {
682 err = insert_state(tree, prealloc, start, end, bits);
683 prealloc = NULL;
684 BUG_ON(err == -EEXIST);
685 goto out;
686 }
687
688 state = rb_entry(node, struct extent_state, rb_node);
689 last_start = state->start;
690 last_end = state->end;
691
692 /*
693 * | ---- desired range ---- |
694 * | state |
695 *
696 * Just lock what we found and keep going
697 */
698 if (state->start == start && state->end <= end) {
699 set = state->state & bits;
700 if (set && exclusive) {
701 *failed_start = state->start;
702 err = -EEXIST;
703 goto out;
704 }
705 set_state_bits(tree, state, bits);
706 start = state->end + 1;
707 merge_state(tree, state);
708 goto search_again;
709 }
710
711 /*
712 * | ---- desired range ---- |
713 * | state |
714 * or
715 * | ------------- state -------------- |
716 *
717 * We need to split the extent we found, and may flip bits on
718 * second half.
719 *
720 * If the extent we found extends past our
721 * range, we just split and search again. It'll get split
722 * again the next time though.
723 *
724 * If the extent we found is inside our range, we set the
725 * desired bit on it.
726 */
727 if (state->start < start) {
728 set = state->state & bits;
729 if (exclusive && set) {
730 *failed_start = start;
731 err = -EEXIST;
732 goto out;
733 }
734 err = split_state(tree, state, prealloc, start);
735 BUG_ON(err == -EEXIST);
736 prealloc = NULL;
737 if (err)
738 goto out;
739 if (state->end <= end) {
740 set_state_bits(tree, state, bits);
741 start = state->end + 1;
742 merge_state(tree, state);
743 } else {
744 start = state->start;
745 }
746 goto search_again;
747 }
748 /*
749 * | ---- desired range ---- |
750 * | state | or | state |
751 *
752 * There's a hole, we need to insert something in it and
753 * ignore the extent we found.
754 */
755 if (state->start > start) {
756 u64 this_end;
757 if (end < last_start)
758 this_end = end;
759 else
760 this_end = last_start -1;
761 err = insert_state(tree, prealloc, start, this_end,
762 bits);
763 prealloc = NULL;
764 BUG_ON(err == -EEXIST);
765 if (err)
766 goto out;
767 start = this_end + 1;
768 goto search_again;
769 }
770 /*
771 * | ---- desired range ---- |
772 * | state |
773 * We need to split the extent, and set the bit
774 * on the first half
775 */
776 if (state->start <= end && state->end > end) {
777 set = state->state & bits;
778 if (exclusive && set) {
779 *failed_start = start;
780 err = -EEXIST;
781 goto out;
782 }
783 err = split_state(tree, state, prealloc, end + 1);
784 BUG_ON(err == -EEXIST);
785
786 set_state_bits(tree, prealloc, bits);
787 merge_state(tree, prealloc);
788 prealloc = NULL;
789 goto out;
790 }
791
792 goto search_again;
793
794out:
70dec807 795 spin_unlock_irqrestore(&tree->lock, flags);
d1310b2e
CM
796 if (prealloc)
797 free_extent_state(prealloc);
798
799 return err;
800
801search_again:
802 if (start > end)
803 goto out;
70dec807 804 spin_unlock_irqrestore(&tree->lock, flags);
d1310b2e
CM
805 if (mask & __GFP_WAIT)
806 cond_resched();
807 goto again;
808}
809EXPORT_SYMBOL(set_extent_bit);
810
811/* wrappers around set/clear extent bit */
812int set_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
813 gfp_t mask)
814{
815 return set_extent_bit(tree, start, end, EXTENT_DIRTY, 0, NULL,
816 mask);
817}
818EXPORT_SYMBOL(set_extent_dirty);
819
e6dcd2dc
CM
820int set_extent_ordered(struct extent_io_tree *tree, u64 start, u64 end,
821 gfp_t mask)
822{
823 return set_extent_bit(tree, start, end, EXTENT_ORDERED, 0, NULL, mask);
824}
825EXPORT_SYMBOL(set_extent_ordered);
826
d1310b2e
CM
827int set_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
828 int bits, gfp_t mask)
829{
830 return set_extent_bit(tree, start, end, bits, 0, NULL,
831 mask);
832}
833EXPORT_SYMBOL(set_extent_bits);
834
835int clear_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
836 int bits, gfp_t mask)
837{
838 return clear_extent_bit(tree, start, end, bits, 0, 0, mask);
839}
840EXPORT_SYMBOL(clear_extent_bits);
841
842int set_extent_delalloc(struct extent_io_tree *tree, u64 start, u64 end,
843 gfp_t mask)
844{
845 return set_extent_bit(tree, start, end,
e6dcd2dc
CM
846 EXTENT_DELALLOC | EXTENT_DIRTY,
847 0, NULL, mask);
d1310b2e
CM
848}
849EXPORT_SYMBOL(set_extent_delalloc);
850
851int clear_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
852 gfp_t mask)
853{
854 return clear_extent_bit(tree, start, end,
855 EXTENT_DIRTY | EXTENT_DELALLOC, 0, 0, mask);
856}
857EXPORT_SYMBOL(clear_extent_dirty);
858
e6dcd2dc
CM
859int clear_extent_ordered(struct extent_io_tree *tree, u64 start, u64 end,
860 gfp_t mask)
861{
862 return clear_extent_bit(tree, start, end, EXTENT_ORDERED, 1, 0, mask);
863}
864EXPORT_SYMBOL(clear_extent_ordered);
865
d1310b2e
CM
866int set_extent_new(struct extent_io_tree *tree, u64 start, u64 end,
867 gfp_t mask)
868{
869 return set_extent_bit(tree, start, end, EXTENT_NEW, 0, NULL,
870 mask);
871}
872EXPORT_SYMBOL(set_extent_new);
873
874int clear_extent_new(struct extent_io_tree *tree, u64 start, u64 end,
875 gfp_t mask)
876{
877 return clear_extent_bit(tree, start, end, EXTENT_NEW, 0, 0, mask);
878}
879EXPORT_SYMBOL(clear_extent_new);
880
881int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end,
882 gfp_t mask)
883{
884 return set_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, NULL,
885 mask);
886}
887EXPORT_SYMBOL(set_extent_uptodate);
888
889int clear_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end,
890 gfp_t mask)
891{
892 return clear_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, 0, mask);
893}
894EXPORT_SYMBOL(clear_extent_uptodate);
895
896int set_extent_writeback(struct extent_io_tree *tree, u64 start, u64 end,
897 gfp_t mask)
898{
899 return set_extent_bit(tree, start, end, EXTENT_WRITEBACK,
900 0, NULL, mask);
901}
902EXPORT_SYMBOL(set_extent_writeback);
903
904int clear_extent_writeback(struct extent_io_tree *tree, u64 start, u64 end,
905 gfp_t mask)
906{
907 return clear_extent_bit(tree, start, end, EXTENT_WRITEBACK, 1, 0, mask);
908}
909EXPORT_SYMBOL(clear_extent_writeback);
910
911int wait_on_extent_writeback(struct extent_io_tree *tree, u64 start, u64 end)
912{
913 return wait_extent_bit(tree, start, end, EXTENT_WRITEBACK);
914}
915EXPORT_SYMBOL(wait_on_extent_writeback);
916
d1310b2e
CM
917int lock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask)
918{
919 int err;
920 u64 failed_start;
921 while (1) {
922 err = set_extent_bit(tree, start, end, EXTENT_LOCKED, 1,
923 &failed_start, mask);
924 if (err == -EEXIST && (mask & __GFP_WAIT)) {
925 wait_extent_bit(tree, failed_start, end, EXTENT_LOCKED);
926 start = failed_start;
927 } else {
928 break;
929 }
930 WARN_ON(start > end);
931 }
932 return err;
933}
934EXPORT_SYMBOL(lock_extent);
935
936int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end,
937 gfp_t mask)
938{
939 return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, mask);
940}
941EXPORT_SYMBOL(unlock_extent);
942
943/*
944 * helper function to set pages and extents in the tree dirty
945 */
946int set_range_dirty(struct extent_io_tree *tree, u64 start, u64 end)
947{
948 unsigned long index = start >> PAGE_CACHE_SHIFT;
949 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
950 struct page *page;
951
952 while (index <= end_index) {
953 page = find_get_page(tree->mapping, index);
954 BUG_ON(!page);
955 __set_page_dirty_nobuffers(page);
956 page_cache_release(page);
957 index++;
958 }
959 set_extent_dirty(tree, start, end, GFP_NOFS);
960 return 0;
961}
962EXPORT_SYMBOL(set_range_dirty);
963
964/*
965 * helper function to set both pages and extents in the tree writeback
966 */
967int set_range_writeback(struct extent_io_tree *tree, u64 start, u64 end)
968{
969 unsigned long index = start >> PAGE_CACHE_SHIFT;
970 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
971 struct page *page;
972
973 while (index <= end_index) {
974 page = find_get_page(tree->mapping, index);
975 BUG_ON(!page);
976 set_page_writeback(page);
977 page_cache_release(page);
978 index++;
979 }
980 set_extent_writeback(tree, start, end, GFP_NOFS);
981 return 0;
982}
983EXPORT_SYMBOL(set_range_writeback);
984
985int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
986 u64 *start_ret, u64 *end_ret, int bits)
987{
988 struct rb_node *node;
989 struct extent_state *state;
990 int ret = 1;
991
70dec807 992 spin_lock_irq(&tree->lock);
d1310b2e
CM
993 /*
994 * this search will find all the extents that end after
995 * our range starts.
996 */
80ea96b1 997 node = tree_search(tree, start);
2b114d1d 998 if (!node) {
d1310b2e
CM
999 goto out;
1000 }
1001
1002 while(1) {
1003 state = rb_entry(node, struct extent_state, rb_node);
1004 if (state->end >= start && (state->state & bits)) {
1005 *start_ret = state->start;
1006 *end_ret = state->end;
1007 ret = 0;
1008 break;
1009 }
1010 node = rb_next(node);
1011 if (!node)
1012 break;
1013 }
1014out:
70dec807 1015 spin_unlock_irq(&tree->lock);
d1310b2e
CM
1016 return ret;
1017}
1018EXPORT_SYMBOL(find_first_extent_bit);
1019
d7fc640e
CM
1020struct extent_state *find_first_extent_bit_state(struct extent_io_tree *tree,
1021 u64 start, int bits)
1022{
1023 struct rb_node *node;
1024 struct extent_state *state;
1025
1026 /*
1027 * this search will find all the extents that end after
1028 * our range starts.
1029 */
1030 node = tree_search(tree, start);
2b114d1d 1031 if (!node) {
d7fc640e
CM
1032 goto out;
1033 }
1034
1035 while(1) {
1036 state = rb_entry(node, struct extent_state, rb_node);
1037 if (state->end >= start && (state->state & bits)) {
1038 return state;
1039 }
1040 node = rb_next(node);
1041 if (!node)
1042 break;
1043 }
1044out:
1045 return NULL;
1046}
1047EXPORT_SYMBOL(find_first_extent_bit_state);
1048
d1310b2e
CM
1049u64 find_lock_delalloc_range(struct extent_io_tree *tree,
1050 u64 *start, u64 *end, u64 max_bytes)
1051{
1052 struct rb_node *node;
1053 struct extent_state *state;
1054 u64 cur_start = *start;
1055 u64 found = 0;
1056 u64 total_bytes = 0;
1057
70dec807 1058 spin_lock_irq(&tree->lock);
d1310b2e
CM
1059 /*
1060 * this search will find all the extents that end after
1061 * our range starts.
1062 */
1063search_again:
80ea96b1 1064 node = tree_search(tree, cur_start);
2b114d1d 1065 if (!node) {
3b951516
CM
1066 if (!found)
1067 *end = (u64)-1;
d1310b2e
CM
1068 goto out;
1069 }
1070
1071 while(1) {
1072 state = rb_entry(node, struct extent_state, rb_node);
5b21f2ed
ZY
1073 if (found && (state->start != cur_start ||
1074 (state->state & EXTENT_BOUNDARY))) {
d1310b2e
CM
1075 goto out;
1076 }
1077 if (!(state->state & EXTENT_DELALLOC)) {
1078 if (!found)
1079 *end = state->end;
1080 goto out;
1081 }
5b21f2ed 1082 if (!found && !(state->state & EXTENT_BOUNDARY)) {
d1310b2e
CM
1083 struct extent_state *prev_state;
1084 struct rb_node *prev_node = node;
1085 while(1) {
1086 prev_node = rb_prev(prev_node);
1087 if (!prev_node)
1088 break;
1089 prev_state = rb_entry(prev_node,
1090 struct extent_state,
1091 rb_node);
5b21f2ed
ZY
1092 if ((prev_state->end + 1 != state->start) ||
1093 !(prev_state->state & EXTENT_DELALLOC))
1094 break;
1095 if ((cur_start - prev_state->start) * 2 >
1096 max_bytes)
d1310b2e
CM
1097 break;
1098 state = prev_state;
1099 node = prev_node;
1100 }
1101 }
1102 if (state->state & EXTENT_LOCKED) {
1103 DEFINE_WAIT(wait);
1104 atomic_inc(&state->refs);
1105 prepare_to_wait(&state->wq, &wait,
1106 TASK_UNINTERRUPTIBLE);
70dec807 1107 spin_unlock_irq(&tree->lock);
d1310b2e 1108 schedule();
70dec807 1109 spin_lock_irq(&tree->lock);
d1310b2e
CM
1110 finish_wait(&state->wq, &wait);
1111 free_extent_state(state);
1112 goto search_again;
1113 }
291d673e 1114 set_state_cb(tree, state, EXTENT_LOCKED);
b0c68f8b 1115 state->state |= EXTENT_LOCKED;
d1310b2e
CM
1116 if (!found)
1117 *start = state->start;
1118 found++;
1119 *end = state->end;
1120 cur_start = state->end + 1;
1121 node = rb_next(node);
1122 if (!node)
1123 break;
1124 total_bytes += state->end - state->start + 1;
1125 if (total_bytes >= max_bytes)
1126 break;
1127 }
1128out:
70dec807 1129 spin_unlock_irq(&tree->lock);
d1310b2e
CM
1130 return found;
1131}
1132
1133u64 count_range_bits(struct extent_io_tree *tree,
1134 u64 *start, u64 search_end, u64 max_bytes,
1135 unsigned long bits)
1136{
1137 struct rb_node *node;
1138 struct extent_state *state;
1139 u64 cur_start = *start;
1140 u64 total_bytes = 0;
1141 int found = 0;
1142
1143 if (search_end <= cur_start) {
1144 printk("search_end %Lu start %Lu\n", search_end, cur_start);
1145 WARN_ON(1);
1146 return 0;
1147 }
1148
70dec807 1149 spin_lock_irq(&tree->lock);
d1310b2e
CM
1150 if (cur_start == 0 && bits == EXTENT_DIRTY) {
1151 total_bytes = tree->dirty_bytes;
1152 goto out;
1153 }
1154 /*
1155 * this search will find all the extents that end after
1156 * our range starts.
1157 */
80ea96b1 1158 node = tree_search(tree, cur_start);
2b114d1d 1159 if (!node) {
d1310b2e
CM
1160 goto out;
1161 }
1162
1163 while(1) {
1164 state = rb_entry(node, struct extent_state, rb_node);
1165 if (state->start > search_end)
1166 break;
1167 if (state->end >= cur_start && (state->state & bits)) {
1168 total_bytes += min(search_end, state->end) + 1 -
1169 max(cur_start, state->start);
1170 if (total_bytes >= max_bytes)
1171 break;
1172 if (!found) {
1173 *start = state->start;
1174 found = 1;
1175 }
1176 }
1177 node = rb_next(node);
1178 if (!node)
1179 break;
1180 }
1181out:
70dec807 1182 spin_unlock_irq(&tree->lock);
d1310b2e
CM
1183 return total_bytes;
1184}
1185/*
1186 * helper function to lock both pages and extents in the tree.
1187 * pages must be locked first.
1188 */
1189int lock_range(struct extent_io_tree *tree, u64 start, u64 end)
1190{
1191 unsigned long index = start >> PAGE_CACHE_SHIFT;
1192 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1193 struct page *page;
1194 int err;
1195
1196 while (index <= end_index) {
1197 page = grab_cache_page(tree->mapping, index);
1198 if (!page) {
1199 err = -ENOMEM;
1200 goto failed;
1201 }
1202 if (IS_ERR(page)) {
1203 err = PTR_ERR(page);
1204 goto failed;
1205 }
1206 index++;
1207 }
1208 lock_extent(tree, start, end, GFP_NOFS);
1209 return 0;
1210
1211failed:
1212 /*
1213 * we failed above in getting the page at 'index', so we undo here
1214 * up to but not including the page at 'index'
1215 */
1216 end_index = index;
1217 index = start >> PAGE_CACHE_SHIFT;
1218 while (index < end_index) {
1219 page = find_get_page(tree->mapping, index);
1220 unlock_page(page);
1221 page_cache_release(page);
1222 index++;
1223 }
1224 return err;
1225}
1226EXPORT_SYMBOL(lock_range);
1227
1228/*
1229 * helper function to unlock both pages and extents in the tree.
1230 */
1231int unlock_range(struct extent_io_tree *tree, u64 start, u64 end)
1232{
1233 unsigned long index = start >> PAGE_CACHE_SHIFT;
1234 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1235 struct page *page;
1236
1237 while (index <= end_index) {
1238 page = find_get_page(tree->mapping, index);
1239 unlock_page(page);
1240 page_cache_release(page);
1241 index++;
1242 }
1243 unlock_extent(tree, start, end, GFP_NOFS);
1244 return 0;
1245}
1246EXPORT_SYMBOL(unlock_range);
1247
1248int set_state_private(struct extent_io_tree *tree, u64 start, u64 private)
1249{
1250 struct rb_node *node;
1251 struct extent_state *state;
1252 int ret = 0;
1253
70dec807 1254 spin_lock_irq(&tree->lock);
d1310b2e
CM
1255 /*
1256 * this search will find all the extents that end after
1257 * our range starts.
1258 */
80ea96b1 1259 node = tree_search(tree, start);
2b114d1d 1260 if (!node) {
d1310b2e
CM
1261 ret = -ENOENT;
1262 goto out;
1263 }
1264 state = rb_entry(node, struct extent_state, rb_node);
1265 if (state->start != start) {
1266 ret = -ENOENT;
1267 goto out;
1268 }
1269 state->private = private;
1270out:
70dec807 1271 spin_unlock_irq(&tree->lock);
d1310b2e
CM
1272 return ret;
1273}
1274
1275int get_state_private(struct extent_io_tree *tree, u64 start, u64 *private)
1276{
1277 struct rb_node *node;
1278 struct extent_state *state;
1279 int ret = 0;
1280
70dec807 1281 spin_lock_irq(&tree->lock);
d1310b2e
CM
1282 /*
1283 * this search will find all the extents that end after
1284 * our range starts.
1285 */
80ea96b1 1286 node = tree_search(tree, start);
2b114d1d 1287 if (!node) {
d1310b2e
CM
1288 ret = -ENOENT;
1289 goto out;
1290 }
1291 state = rb_entry(node, struct extent_state, rb_node);
1292 if (state->start != start) {
1293 ret = -ENOENT;
1294 goto out;
1295 }
1296 *private = state->private;
1297out:
70dec807 1298 spin_unlock_irq(&tree->lock);
d1310b2e
CM
1299 return ret;
1300}
1301
1302/*
1303 * searches a range in the state tree for a given mask.
70dec807 1304 * If 'filled' == 1, this returns 1 only if every extent in the tree
d1310b2e
CM
1305 * has the bits set. Otherwise, 1 is returned if any bit in the
1306 * range is found set.
1307 */
1308int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
1309 int bits, int filled)
1310{
1311 struct extent_state *state = NULL;
1312 struct rb_node *node;
1313 int bitset = 0;
1314 unsigned long flags;
1315
70dec807 1316 spin_lock_irqsave(&tree->lock, flags);
80ea96b1 1317 node = tree_search(tree, start);
d1310b2e
CM
1318 while (node && start <= end) {
1319 state = rb_entry(node, struct extent_state, rb_node);
1320
1321 if (filled && state->start > start) {
1322 bitset = 0;
1323 break;
1324 }
1325
1326 if (state->start > end)
1327 break;
1328
1329 if (state->state & bits) {
1330 bitset = 1;
1331 if (!filled)
1332 break;
1333 } else if (filled) {
1334 bitset = 0;
1335 break;
1336 }
1337 start = state->end + 1;
1338 if (start > end)
1339 break;
1340 node = rb_next(node);
1341 if (!node) {
1342 if (filled)
1343 bitset = 0;
1344 break;
1345 }
1346 }
70dec807 1347 spin_unlock_irqrestore(&tree->lock, flags);
d1310b2e
CM
1348 return bitset;
1349}
1350EXPORT_SYMBOL(test_range_bit);
1351
1352/*
1353 * helper function to set a given page up to date if all the
1354 * extents in the tree for that page are up to date
1355 */
1356static int check_page_uptodate(struct extent_io_tree *tree,
1357 struct page *page)
1358{
1359 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1360 u64 end = start + PAGE_CACHE_SIZE - 1;
1361 if (test_range_bit(tree, start, end, EXTENT_UPTODATE, 1))
1362 SetPageUptodate(page);
1363 return 0;
1364}
1365
1366/*
1367 * helper function to unlock a page if all the extents in the tree
1368 * for that page are unlocked
1369 */
1370static int check_page_locked(struct extent_io_tree *tree,
1371 struct page *page)
1372{
1373 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1374 u64 end = start + PAGE_CACHE_SIZE - 1;
1375 if (!test_range_bit(tree, start, end, EXTENT_LOCKED, 0))
1376 unlock_page(page);
1377 return 0;
1378}
1379
1380/*
1381 * helper function to end page writeback if all the extents
1382 * in the tree for that page are done with writeback
1383 */
1384static int check_page_writeback(struct extent_io_tree *tree,
1385 struct page *page)
1386{
1387 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1388 u64 end = start + PAGE_CACHE_SIZE - 1;
1389 if (!test_range_bit(tree, start, end, EXTENT_WRITEBACK, 0))
1390 end_page_writeback(page);
1391 return 0;
1392}
1393
1394/* lots and lots of room for performance fixes in the end_bio funcs */
1395
1396/*
1397 * after a writepage IO is done, we need to:
1398 * clear the uptodate bits on error
1399 * clear the writeback bits in the extent tree for this IO
1400 * end_page_writeback if the page has no more pending IO
1401 *
1402 * Scheduling is not allowed, so the extent state tree is expected
1403 * to have one and only one object corresponding to this IO.
1404 */
d1310b2e 1405static void end_bio_extent_writepage(struct bio *bio, int err)
d1310b2e 1406{
1259ab75 1407 int uptodate = err == 0;
d1310b2e 1408 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
902b22f3 1409 struct extent_io_tree *tree;
d1310b2e
CM
1410 u64 start;
1411 u64 end;
1412 int whole_page;
1259ab75 1413 int ret;
d1310b2e 1414
d1310b2e
CM
1415 do {
1416 struct page *page = bvec->bv_page;
902b22f3
DW
1417 tree = &BTRFS_I(page->mapping->host)->io_tree;
1418
d1310b2e
CM
1419 start = ((u64)page->index << PAGE_CACHE_SHIFT) +
1420 bvec->bv_offset;
1421 end = start + bvec->bv_len - 1;
1422
1423 if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE)
1424 whole_page = 1;
1425 else
1426 whole_page = 0;
1427
1428 if (--bvec >= bio->bi_io_vec)
1429 prefetchw(&bvec->bv_page->flags);
1259ab75
CM
1430 if (tree->ops && tree->ops->writepage_end_io_hook) {
1431 ret = tree->ops->writepage_end_io_hook(page, start,
902b22f3 1432 end, NULL, uptodate);
1259ab75
CM
1433 if (ret)
1434 uptodate = 0;
1435 }
1436
1437 if (!uptodate && tree->ops &&
1438 tree->ops->writepage_io_failed_hook) {
1439 ret = tree->ops->writepage_io_failed_hook(bio, page,
902b22f3 1440 start, end, NULL);
1259ab75 1441 if (ret == 0) {
1259ab75
CM
1442 uptodate = (err == 0);
1443 continue;
1444 }
1445 }
1446
d1310b2e
CM
1447 if (!uptodate) {
1448 clear_extent_uptodate(tree, start, end, GFP_ATOMIC);
1449 ClearPageUptodate(page);
1450 SetPageError(page);
1451 }
70dec807 1452
902b22f3 1453 clear_extent_writeback(tree, start, end, GFP_ATOMIC);
d1310b2e
CM
1454
1455 if (whole_page)
1456 end_page_writeback(page);
1457 else
1458 check_page_writeback(tree, page);
d1310b2e 1459 } while (bvec >= bio->bi_io_vec);
2b1f55b0 1460
d1310b2e 1461 bio_put(bio);
d1310b2e
CM
1462}
1463
1464/*
1465 * after a readpage IO is done, we need to:
1466 * clear the uptodate bits on error
1467 * set the uptodate bits if things worked
1468 * set the page up to date if all extents in the tree are uptodate
1469 * clear the lock bit in the extent tree
1470 * unlock the page if there are no other extents locked for it
1471 *
1472 * Scheduling is not allowed, so the extent state tree is expected
1473 * to have one and only one object corresponding to this IO.
1474 */
d1310b2e 1475static void end_bio_extent_readpage(struct bio *bio, int err)
d1310b2e
CM
1476{
1477 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
1478 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
902b22f3 1479 struct extent_io_tree *tree;
d1310b2e
CM
1480 u64 start;
1481 u64 end;
1482 int whole_page;
1483 int ret;
1484
d1310b2e
CM
1485 do {
1486 struct page *page = bvec->bv_page;
902b22f3
DW
1487 tree = &BTRFS_I(page->mapping->host)->io_tree;
1488
d1310b2e
CM
1489 start = ((u64)page->index << PAGE_CACHE_SHIFT) +
1490 bvec->bv_offset;
1491 end = start + bvec->bv_len - 1;
1492
1493 if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE)
1494 whole_page = 1;
1495 else
1496 whole_page = 0;
1497
1498 if (--bvec >= bio->bi_io_vec)
1499 prefetchw(&bvec->bv_page->flags);
1500
1501 if (uptodate && tree->ops && tree->ops->readpage_end_io_hook) {
70dec807 1502 ret = tree->ops->readpage_end_io_hook(page, start, end,
902b22f3 1503 NULL);
d1310b2e
CM
1504 if (ret)
1505 uptodate = 0;
1506 }
7e38326f
CM
1507 if (!uptodate && tree->ops &&
1508 tree->ops->readpage_io_failed_hook) {
1509 ret = tree->ops->readpage_io_failed_hook(bio, page,
902b22f3 1510 start, end, NULL);
7e38326f 1511 if (ret == 0) {
3b951516
CM
1512 uptodate =
1513 test_bit(BIO_UPTODATE, &bio->bi_flags);
7e38326f
CM
1514 continue;
1515 }
1516 }
d1310b2e 1517
902b22f3
DW
1518 if (uptodate)
1519 set_extent_uptodate(tree, start, end,
1520 GFP_ATOMIC);
1521 unlock_extent(tree, start, end, GFP_ATOMIC);
d1310b2e 1522
70dec807
CM
1523 if (whole_page) {
1524 if (uptodate) {
1525 SetPageUptodate(page);
1526 } else {
1527 ClearPageUptodate(page);
1528 SetPageError(page);
1529 }
d1310b2e 1530 unlock_page(page);
70dec807
CM
1531 } else {
1532 if (uptodate) {
1533 check_page_uptodate(tree, page);
1534 } else {
1535 ClearPageUptodate(page);
1536 SetPageError(page);
1537 }
d1310b2e 1538 check_page_locked(tree, page);
70dec807 1539 }
d1310b2e
CM
1540 } while (bvec >= bio->bi_io_vec);
1541
1542 bio_put(bio);
d1310b2e
CM
1543}
1544
1545/*
1546 * IO done from prepare_write is pretty simple, we just unlock
1547 * the structs in the extent tree when done, and set the uptodate bits
1548 * as appropriate.
1549 */
d1310b2e 1550static void end_bio_extent_preparewrite(struct bio *bio, int err)
d1310b2e
CM
1551{
1552 const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
1553 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
902b22f3 1554 struct extent_io_tree *tree;
d1310b2e
CM
1555 u64 start;
1556 u64 end;
1557
d1310b2e
CM
1558 do {
1559 struct page *page = bvec->bv_page;
902b22f3
DW
1560 tree = &BTRFS_I(page->mapping->host)->io_tree;
1561
d1310b2e
CM
1562 start = ((u64)page->index << PAGE_CACHE_SHIFT) +
1563 bvec->bv_offset;
1564 end = start + bvec->bv_len - 1;
1565
1566 if (--bvec >= bio->bi_io_vec)
1567 prefetchw(&bvec->bv_page->flags);
1568
1569 if (uptodate) {
1570 set_extent_uptodate(tree, start, end, GFP_ATOMIC);
1571 } else {
1572 ClearPageUptodate(page);
1573 SetPageError(page);
1574 }
1575
1576 unlock_extent(tree, start, end, GFP_ATOMIC);
1577
1578 } while (bvec >= bio->bi_io_vec);
1579
1580 bio_put(bio);
d1310b2e
CM
1581}
1582
1583static struct bio *
1584extent_bio_alloc(struct block_device *bdev, u64 first_sector, int nr_vecs,
1585 gfp_t gfp_flags)
1586{
1587 struct bio *bio;
1588
1589 bio = bio_alloc(gfp_flags, nr_vecs);
1590
1591 if (bio == NULL && (current->flags & PF_MEMALLOC)) {
1592 while (!bio && (nr_vecs /= 2))
1593 bio = bio_alloc(gfp_flags, nr_vecs);
1594 }
1595
1596 if (bio) {
e1c4b745 1597 bio->bi_size = 0;
d1310b2e
CM
1598 bio->bi_bdev = bdev;
1599 bio->bi_sector = first_sector;
1600 }
1601 return bio;
1602}
1603
f188591e 1604static int submit_one_bio(int rw, struct bio *bio, int mirror_num)
d1310b2e 1605{
d1310b2e 1606 int ret = 0;
70dec807
CM
1607 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1608 struct page *page = bvec->bv_page;
1609 struct extent_io_tree *tree = bio->bi_private;
1610 struct rb_node *node;
1611 struct extent_state *state;
1612 u64 start;
1613 u64 end;
1614
1615 start = ((u64)page->index << PAGE_CACHE_SHIFT) + bvec->bv_offset;
1616 end = start + bvec->bv_len - 1;
1617
1618 spin_lock_irq(&tree->lock);
80ea96b1 1619 node = __etree_search(tree, start, NULL, NULL);
70dec807
CM
1620 BUG_ON(!node);
1621 state = rb_entry(node, struct extent_state, rb_node);
1622 while(state->end < end) {
1623 node = rb_next(node);
1624 state = rb_entry(node, struct extent_state, rb_node);
1625 }
1626 BUG_ON(state->end != end);
1627 spin_unlock_irq(&tree->lock);
1628
902b22f3 1629 bio->bi_private = NULL;
d1310b2e
CM
1630
1631 bio_get(bio);
1632
065631f6 1633 if (tree->ops && tree->ops->submit_bio_hook)
f188591e
CM
1634 tree->ops->submit_bio_hook(page->mapping->host, rw, bio,
1635 mirror_num);
0b86a832
CM
1636 else
1637 submit_bio(rw, bio);
d1310b2e
CM
1638 if (bio_flagged(bio, BIO_EOPNOTSUPP))
1639 ret = -EOPNOTSUPP;
1640 bio_put(bio);
1641 return ret;
1642}
1643
1644static int submit_extent_page(int rw, struct extent_io_tree *tree,
1645 struct page *page, sector_t sector,
1646 size_t size, unsigned long offset,
1647 struct block_device *bdev,
1648 struct bio **bio_ret,
1649 unsigned long max_pages,
f188591e
CM
1650 bio_end_io_t end_io_func,
1651 int mirror_num)
d1310b2e
CM
1652{
1653 int ret = 0;
1654 struct bio *bio;
1655 int nr;
1656
1657 if (bio_ret && *bio_ret) {
1658 bio = *bio_ret;
1659 if (bio->bi_sector + (bio->bi_size >> 9) != sector ||
239b14b3
CM
1660 (tree->ops && tree->ops->merge_bio_hook &&
1661 tree->ops->merge_bio_hook(page, offset, size, bio)) ||
d1310b2e 1662 bio_add_page(bio, page, size, offset) < size) {
f188591e 1663 ret = submit_one_bio(rw, bio, mirror_num);
d1310b2e
CM
1664 bio = NULL;
1665 } else {
1666 return 0;
1667 }
1668 }
961d0232 1669 nr = bio_get_nr_vecs(bdev);
d1310b2e
CM
1670 bio = extent_bio_alloc(bdev, sector, nr, GFP_NOFS | __GFP_HIGH);
1671 if (!bio) {
1672 printk("failed to allocate bio nr %d\n", nr);
1673 }
70dec807
CM
1674
1675
d1310b2e
CM
1676 bio_add_page(bio, page, size, offset);
1677 bio->bi_end_io = end_io_func;
1678 bio->bi_private = tree;
70dec807 1679
d1310b2e
CM
1680 if (bio_ret) {
1681 *bio_ret = bio;
1682 } else {
f188591e 1683 ret = submit_one_bio(rw, bio, mirror_num);
d1310b2e
CM
1684 }
1685
1686 return ret;
1687}
1688
1689void set_page_extent_mapped(struct page *page)
1690{
1691 if (!PagePrivate(page)) {
1692 SetPagePrivate(page);
d1310b2e 1693 page_cache_get(page);
6af118ce 1694 set_page_private(page, EXTENT_PAGE_PRIVATE);
d1310b2e
CM
1695 }
1696}
1697
1698void set_page_extent_head(struct page *page, unsigned long len)
1699{
1700 set_page_private(page, EXTENT_PAGE_PRIVATE_FIRST_PAGE | len << 2);
1701}
1702
1703/*
1704 * basic readpage implementation. Locked extent state structs are inserted
1705 * into the tree that are removed when the IO is done (by the end_io
1706 * handlers)
1707 */
1708static int __extent_read_full_page(struct extent_io_tree *tree,
1709 struct page *page,
1710 get_extent_t *get_extent,
f188591e 1711 struct bio **bio, int mirror_num)
d1310b2e
CM
1712{
1713 struct inode *inode = page->mapping->host;
1714 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1715 u64 page_end = start + PAGE_CACHE_SIZE - 1;
1716 u64 end;
1717 u64 cur = start;
1718 u64 extent_offset;
1719 u64 last_byte = i_size_read(inode);
1720 u64 block_start;
1721 u64 cur_end;
1722 sector_t sector;
1723 struct extent_map *em;
1724 struct block_device *bdev;
1725 int ret;
1726 int nr = 0;
1727 size_t page_offset = 0;
1728 size_t iosize;
1729 size_t blocksize = inode->i_sb->s_blocksize;
1730
1731 set_page_extent_mapped(page);
1732
1733 end = page_end;
1734 lock_extent(tree, start, end, GFP_NOFS);
1735
1736 while (cur <= end) {
1737 if (cur >= last_byte) {
1738 char *userpage;
1739 iosize = PAGE_CACHE_SIZE - page_offset;
1740 userpage = kmap_atomic(page, KM_USER0);
1741 memset(userpage + page_offset, 0, iosize);
1742 flush_dcache_page(page);
1743 kunmap_atomic(userpage, KM_USER0);
1744 set_extent_uptodate(tree, cur, cur + iosize - 1,
1745 GFP_NOFS);
1746 unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
1747 break;
1748 }
1749 em = get_extent(inode, page, page_offset, cur,
1750 end - cur + 1, 0);
1751 if (IS_ERR(em) || !em) {
1752 SetPageError(page);
1753 unlock_extent(tree, cur, end, GFP_NOFS);
1754 break;
1755 }
d1310b2e 1756 extent_offset = cur - em->start;
e6dcd2dc
CM
1757 if (extent_map_end(em) <= cur) {
1758printk("bad mapping em [%Lu %Lu] cur %Lu\n", em->start, extent_map_end(em), cur);
1759 }
d1310b2e 1760 BUG_ON(extent_map_end(em) <= cur);
e6dcd2dc
CM
1761 if (end < cur) {
1762printk("2bad mapping end %Lu cur %Lu\n", end, cur);
1763 }
d1310b2e
CM
1764 BUG_ON(end < cur);
1765
1766 iosize = min(extent_map_end(em) - cur, end - cur + 1);
1767 cur_end = min(extent_map_end(em) - 1, end);
1768 iosize = (iosize + blocksize - 1) & ~((u64)blocksize - 1);
1769 sector = (em->block_start + extent_offset) >> 9;
1770 bdev = em->bdev;
1771 block_start = em->block_start;
1772 free_extent_map(em);
1773 em = NULL;
1774
1775 /* we've found a hole, just zero and go on */
1776 if (block_start == EXTENT_MAP_HOLE) {
1777 char *userpage;
1778 userpage = kmap_atomic(page, KM_USER0);
1779 memset(userpage + page_offset, 0, iosize);
1780 flush_dcache_page(page);
1781 kunmap_atomic(userpage, KM_USER0);
1782
1783 set_extent_uptodate(tree, cur, cur + iosize - 1,
1784 GFP_NOFS);
1785 unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
1786 cur = cur + iosize;
1787 page_offset += iosize;
1788 continue;
1789 }
1790 /* the get_extent function already copied into the page */
1791 if (test_range_bit(tree, cur, cur_end, EXTENT_UPTODATE, 1)) {
a1b32a59 1792 check_page_uptodate(tree, page);
d1310b2e
CM
1793 unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
1794 cur = cur + iosize;
1795 page_offset += iosize;
1796 continue;
1797 }
70dec807
CM
1798 /* we have an inline extent but it didn't get marked up
1799 * to date. Error out
1800 */
1801 if (block_start == EXTENT_MAP_INLINE) {
1802 SetPageError(page);
1803 unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
1804 cur = cur + iosize;
1805 page_offset += iosize;
1806 continue;
1807 }
d1310b2e
CM
1808
1809 ret = 0;
1810 if (tree->ops && tree->ops->readpage_io_hook) {
1811 ret = tree->ops->readpage_io_hook(page, cur,
1812 cur + iosize - 1);
1813 }
1814 if (!ret) {
89642229
CM
1815 unsigned long pnr = (last_byte >> PAGE_CACHE_SHIFT) + 1;
1816 pnr -= page->index;
d1310b2e
CM
1817 ret = submit_extent_page(READ, tree, page,
1818 sector, iosize, page_offset,
89642229 1819 bdev, bio, pnr,
f188591e 1820 end_bio_extent_readpage, mirror_num);
89642229 1821 nr++;
d1310b2e
CM
1822 }
1823 if (ret)
1824 SetPageError(page);
1825 cur = cur + iosize;
1826 page_offset += iosize;
d1310b2e
CM
1827 }
1828 if (!nr) {
1829 if (!PageError(page))
1830 SetPageUptodate(page);
1831 unlock_page(page);
1832 }
1833 return 0;
1834}
1835
1836int extent_read_full_page(struct extent_io_tree *tree, struct page *page,
1837 get_extent_t *get_extent)
1838{
1839 struct bio *bio = NULL;
1840 int ret;
1841
f188591e 1842 ret = __extent_read_full_page(tree, page, get_extent, &bio, 0);
d1310b2e 1843 if (bio)
f188591e 1844 submit_one_bio(READ, bio, 0);
d1310b2e
CM
1845 return ret;
1846}
1847EXPORT_SYMBOL(extent_read_full_page);
1848
1849/*
1850 * the writepage semantics are similar to regular writepage. extent
1851 * records are inserted to lock ranges in the tree, and as dirty areas
1852 * are found, they are marked writeback. Then the lock bits are removed
1853 * and the end_io handler clears the writeback ranges
1854 */
1855static int __extent_writepage(struct page *page, struct writeback_control *wbc,
1856 void *data)
1857{
1858 struct inode *inode = page->mapping->host;
1859 struct extent_page_data *epd = data;
1860 struct extent_io_tree *tree = epd->tree;
1861 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1862 u64 delalloc_start;
1863 u64 page_end = start + PAGE_CACHE_SIZE - 1;
1864 u64 end;
1865 u64 cur = start;
1866 u64 extent_offset;
1867 u64 last_byte = i_size_read(inode);
1868 u64 block_start;
1869 u64 iosize;
e6dcd2dc 1870 u64 unlock_start;
d1310b2e
CM
1871 sector_t sector;
1872 struct extent_map *em;
1873 struct block_device *bdev;
1874 int ret;
1875 int nr = 0;
7f3c74fb 1876 size_t pg_offset = 0;
d1310b2e
CM
1877 size_t blocksize;
1878 loff_t i_size = i_size_read(inode);
1879 unsigned long end_index = i_size >> PAGE_CACHE_SHIFT;
1880 u64 nr_delalloc;
1881 u64 delalloc_end;
1882
1883 WARN_ON(!PageLocked(page));
7f3c74fb 1884 pg_offset = i_size & (PAGE_CACHE_SIZE - 1);
211c17f5 1885 if (page->index > end_index ||
7f3c74fb 1886 (page->index == end_index && !pg_offset)) {
211c17f5 1887 page->mapping->a_ops->invalidatepage(page, 0);
d1310b2e
CM
1888 unlock_page(page);
1889 return 0;
1890 }
1891
1892 if (page->index == end_index) {
1893 char *userpage;
1894
d1310b2e 1895 userpage = kmap_atomic(page, KM_USER0);
7f3c74fb
CM
1896 memset(userpage + pg_offset, 0,
1897 PAGE_CACHE_SIZE - pg_offset);
d1310b2e 1898 kunmap_atomic(userpage, KM_USER0);
211c17f5 1899 flush_dcache_page(page);
d1310b2e 1900 }
7f3c74fb 1901 pg_offset = 0;
d1310b2e
CM
1902
1903 set_page_extent_mapped(page);
1904
1905 delalloc_start = start;
1906 delalloc_end = 0;
1907 while(delalloc_end < page_end) {
1908 nr_delalloc = find_lock_delalloc_range(tree, &delalloc_start,
1909 &delalloc_end,
1910 128 * 1024 * 1024);
1911 if (nr_delalloc == 0) {
1912 delalloc_start = delalloc_end + 1;
1913 continue;
1914 }
1915 tree->ops->fill_delalloc(inode, delalloc_start,
1916 delalloc_end);
1917 clear_extent_bit(tree, delalloc_start,
1918 delalloc_end,
1919 EXTENT_LOCKED | EXTENT_DELALLOC,
1920 1, 0, GFP_NOFS);
1921 delalloc_start = delalloc_end + 1;
1922 }
1923 lock_extent(tree, start, page_end, GFP_NOFS);
e6dcd2dc 1924 unlock_start = start;
d1310b2e 1925
247e743c
CM
1926 if (tree->ops && tree->ops->writepage_start_hook) {
1927 ret = tree->ops->writepage_start_hook(page, start, page_end);
1928 if (ret == -EAGAIN) {
1929 unlock_extent(tree, start, page_end, GFP_NOFS);
1930 redirty_page_for_writepage(wbc, page);
1931 unlock_page(page);
1932 return 0;
1933 }
1934 }
1935
d1310b2e
CM
1936 end = page_end;
1937 if (test_range_bit(tree, start, page_end, EXTENT_DELALLOC, 0)) {
1938 printk("found delalloc bits after lock_extent\n");
1939 }
1940
1941 if (last_byte <= start) {
1942 clear_extent_dirty(tree, start, page_end, GFP_NOFS);
e6dcd2dc
CM
1943 unlock_extent(tree, start, page_end, GFP_NOFS);
1944 if (tree->ops && tree->ops->writepage_end_io_hook)
1945 tree->ops->writepage_end_io_hook(page, start,
1946 page_end, NULL, 1);
1947 unlock_start = page_end + 1;
d1310b2e
CM
1948 goto done;
1949 }
1950
1951 set_extent_uptodate(tree, start, page_end, GFP_NOFS);
1952 blocksize = inode->i_sb->s_blocksize;
1953
1954 while (cur <= end) {
1955 if (cur >= last_byte) {
1956 clear_extent_dirty(tree, cur, page_end, GFP_NOFS);
e6dcd2dc
CM
1957 unlock_extent(tree, unlock_start, page_end, GFP_NOFS);
1958 if (tree->ops && tree->ops->writepage_end_io_hook)
1959 tree->ops->writepage_end_io_hook(page, cur,
1960 page_end, NULL, 1);
1961 unlock_start = page_end + 1;
d1310b2e
CM
1962 break;
1963 }
7f3c74fb 1964 em = epd->get_extent(inode, page, pg_offset, cur,
d1310b2e
CM
1965 end - cur + 1, 1);
1966 if (IS_ERR(em) || !em) {
1967 SetPageError(page);
1968 break;
1969 }
1970
1971 extent_offset = cur - em->start;
1972 BUG_ON(extent_map_end(em) <= cur);
1973 BUG_ON(end < cur);
1974 iosize = min(extent_map_end(em) - cur, end - cur + 1);
1975 iosize = (iosize + blocksize - 1) & ~((u64)blocksize - 1);
1976 sector = (em->block_start + extent_offset) >> 9;
1977 bdev = em->bdev;
1978 block_start = em->block_start;
1979 free_extent_map(em);
1980 em = NULL;
1981
1982 if (block_start == EXTENT_MAP_HOLE ||
1983 block_start == EXTENT_MAP_INLINE) {
1984 clear_extent_dirty(tree, cur,
1985 cur + iosize - 1, GFP_NOFS);
e6dcd2dc
CM
1986
1987 unlock_extent(tree, unlock_start, cur + iosize -1,
1988 GFP_NOFS);
7f3c74fb 1989
e6dcd2dc
CM
1990 if (tree->ops && tree->ops->writepage_end_io_hook)
1991 tree->ops->writepage_end_io_hook(page, cur,
1992 cur + iosize - 1,
1993 NULL, 1);
d1310b2e 1994 cur = cur + iosize;
7f3c74fb 1995 pg_offset += iosize;
e6dcd2dc 1996 unlock_start = cur;
d1310b2e
CM
1997 continue;
1998 }
1999
2000 /* leave this out until we have a page_mkwrite call */
2001 if (0 && !test_range_bit(tree, cur, cur + iosize - 1,
2002 EXTENT_DIRTY, 0)) {
2003 cur = cur + iosize;
7f3c74fb 2004 pg_offset += iosize;
d1310b2e
CM
2005 continue;
2006 }
2007 clear_extent_dirty(tree, cur, cur + iosize - 1, GFP_NOFS);
2008 if (tree->ops && tree->ops->writepage_io_hook) {
2009 ret = tree->ops->writepage_io_hook(page, cur,
2010 cur + iosize - 1);
2011 } else {
2012 ret = 0;
2013 }
1259ab75 2014 if (ret) {
d1310b2e 2015 SetPageError(page);
1259ab75 2016 } else {
d1310b2e 2017 unsigned long max_nr = end_index + 1;
7f3c74fb 2018
d1310b2e
CM
2019 set_range_writeback(tree, cur, cur + iosize - 1);
2020 if (!PageWriteback(page)) {
2021 printk("warning page %lu not writeback, "
2022 "cur %llu end %llu\n", page->index,
2023 (unsigned long long)cur,
2024 (unsigned long long)end);
2025 }
2026
2027 ret = submit_extent_page(WRITE, tree, page, sector,
7f3c74fb 2028 iosize, pg_offset, bdev,
d1310b2e 2029 &epd->bio, max_nr,
f188591e 2030 end_bio_extent_writepage, 0);
d1310b2e
CM
2031 if (ret)
2032 SetPageError(page);
2033 }
2034 cur = cur + iosize;
7f3c74fb 2035 pg_offset += iosize;
d1310b2e
CM
2036 nr++;
2037 }
2038done:
2039 if (nr == 0) {
2040 /* make sure the mapping tag for page dirty gets cleared */
2041 set_page_writeback(page);
2042 end_page_writeback(page);
2043 }
e6dcd2dc
CM
2044 if (unlock_start <= page_end)
2045 unlock_extent(tree, unlock_start, page_end, GFP_NOFS);
d1310b2e
CM
2046 unlock_page(page);
2047 return 0;
2048}
2049
d1310b2e 2050/**
4bef0848 2051 * write_cache_pages - walk the list of dirty pages of the given address space and write all of them.
d1310b2e
CM
2052 * @mapping: address space structure to write
2053 * @wbc: subtract the number of written pages from *@wbc->nr_to_write
2054 * @writepage: function called for each page
2055 * @data: data passed to writepage function
2056 *
2057 * If a page is already under I/O, write_cache_pages() skips it, even
2058 * if it's dirty. This is desirable behaviour for memory-cleaning writeback,
2059 * but it is INCORRECT for data-integrity system calls such as fsync(). fsync()
2060 * and msync() need to guarantee that all the data which was dirty at the time
2061 * the call was made get new I/O started against them. If wbc->sync_mode is
2062 * WB_SYNC_ALL then we were called for data integrity and we must wait for
2063 * existing IO to complete.
2064 */
4bef0848
CM
2065int extent_write_cache_pages(struct extent_io_tree *tree,
2066 struct address_space *mapping,
2067 struct writeback_control *wbc,
2068 writepage_t writepage, void *data)
d1310b2e
CM
2069{
2070 struct backing_dev_info *bdi = mapping->backing_dev_info;
2071 int ret = 0;
2072 int done = 0;
2073 struct pagevec pvec;
2074 int nr_pages;
2075 pgoff_t index;
2076 pgoff_t end; /* Inclusive */
2077 int scanned = 0;
2078 int range_whole = 0;
2079
2080 if (wbc->nonblocking && bdi_write_congested(bdi)) {
2081 wbc->encountered_congestion = 1;
2082 return 0;
2083 }
2084
2085 pagevec_init(&pvec, 0);
2086 if (wbc->range_cyclic) {
2087 index = mapping->writeback_index; /* Start from prev offset */
2088 end = -1;
2089 } else {
2090 index = wbc->range_start >> PAGE_CACHE_SHIFT;
2091 end = wbc->range_end >> PAGE_CACHE_SHIFT;
2092 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
2093 range_whole = 1;
2094 scanned = 1;
2095 }
2096retry:
2097 while (!done && (index <= end) &&
2098 (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
2099 PAGECACHE_TAG_DIRTY,
2100 min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1))) {
2101 unsigned i;
2102
2103 scanned = 1;
2104 for (i = 0; i < nr_pages; i++) {
2105 struct page *page = pvec.pages[i];
2106
2107 /*
2108 * At this point we hold neither mapping->tree_lock nor
2109 * lock on the page itself: the page may be truncated or
2110 * invalidated (changing page->mapping to NULL), or even
2111 * swizzled back from swapper_space to tmpfs file
2112 * mapping
2113 */
4bef0848
CM
2114 if (tree->ops && tree->ops->write_cache_pages_lock_hook)
2115 tree->ops->write_cache_pages_lock_hook(page);
2116 else
2117 lock_page(page);
d1310b2e
CM
2118
2119 if (unlikely(page->mapping != mapping)) {
2120 unlock_page(page);
2121 continue;
2122 }
2123
2124 if (!wbc->range_cyclic && page->index > end) {
2125 done = 1;
2126 unlock_page(page);
2127 continue;
2128 }
2129
2130 if (wbc->sync_mode != WB_SYNC_NONE)
2131 wait_on_page_writeback(page);
2132
2133 if (PageWriteback(page) ||
2134 !clear_page_dirty_for_io(page)) {
2135 unlock_page(page);
2136 continue;
2137 }
2138
2139 ret = (*writepage)(page, wbc, data);
2140
2141 if (unlikely(ret == AOP_WRITEPAGE_ACTIVATE)) {
2142 unlock_page(page);
2143 ret = 0;
2144 }
2145 if (ret || (--(wbc->nr_to_write) <= 0))
2146 done = 1;
2147 if (wbc->nonblocking && bdi_write_congested(bdi)) {
2148 wbc->encountered_congestion = 1;
2149 done = 1;
2150 }
2151 }
2152 pagevec_release(&pvec);
2153 cond_resched();
2154 }
2155 if (!scanned && !done) {
2156 /*
2157 * We hit the last page and there is more work to be done: wrap
2158 * back to the start of the file
2159 */
2160 scanned = 1;
2161 index = 0;
2162 goto retry;
2163 }
2164 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
2165 mapping->writeback_index = index;
2b1f55b0 2166
4bef0848
CM
2167 if (wbc->range_cont)
2168 wbc->range_start = index << PAGE_CACHE_SHIFT;
d1310b2e
CM
2169 return ret;
2170}
4bef0848 2171EXPORT_SYMBOL(extent_write_cache_pages);
d1310b2e
CM
2172
2173int extent_write_full_page(struct extent_io_tree *tree, struct page *page,
2174 get_extent_t *get_extent,
2175 struct writeback_control *wbc)
2176{
2177 int ret;
2178 struct address_space *mapping = page->mapping;
2179 struct extent_page_data epd = {
2180 .bio = NULL,
2181 .tree = tree,
2182 .get_extent = get_extent,
2183 };
2184 struct writeback_control wbc_writepages = {
2185 .bdi = wbc->bdi,
2186 .sync_mode = WB_SYNC_NONE,
2187 .older_than_this = NULL,
2188 .nr_to_write = 64,
2189 .range_start = page_offset(page) + PAGE_CACHE_SIZE,
2190 .range_end = (loff_t)-1,
2191 };
2192
2193
2194 ret = __extent_writepage(page, wbc, &epd);
2195
4bef0848
CM
2196 extent_write_cache_pages(tree, mapping, &wbc_writepages,
2197 __extent_writepage, &epd);
d1310b2e 2198 if (epd.bio) {
f188591e 2199 submit_one_bio(WRITE, epd.bio, 0);
d1310b2e
CM
2200 }
2201 return ret;
2202}
2203EXPORT_SYMBOL(extent_write_full_page);
2204
2205
2206int extent_writepages(struct extent_io_tree *tree,
2207 struct address_space *mapping,
2208 get_extent_t *get_extent,
2209 struct writeback_control *wbc)
2210{
2211 int ret = 0;
2212 struct extent_page_data epd = {
2213 .bio = NULL,
2214 .tree = tree,
2215 .get_extent = get_extent,
2216 };
2217
4bef0848
CM
2218 ret = extent_write_cache_pages(tree, mapping, wbc,
2219 __extent_writepage, &epd);
d1310b2e 2220 if (epd.bio) {
f188591e 2221 submit_one_bio(WRITE, epd.bio, 0);
d1310b2e
CM
2222 }
2223 return ret;
2224}
2225EXPORT_SYMBOL(extent_writepages);
2226
2227int extent_readpages(struct extent_io_tree *tree,
2228 struct address_space *mapping,
2229 struct list_head *pages, unsigned nr_pages,
2230 get_extent_t get_extent)
2231{
2232 struct bio *bio = NULL;
2233 unsigned page_idx;
2234 struct pagevec pvec;
2235
2236 pagevec_init(&pvec, 0);
2237 for (page_idx = 0; page_idx < nr_pages; page_idx++) {
2238 struct page *page = list_entry(pages->prev, struct page, lru);
2239
2240 prefetchw(&page->flags);
2241 list_del(&page->lru);
2242 /*
2243 * what we want to do here is call add_to_page_cache_lru,
2244 * but that isn't exported, so we reproduce it here
2245 */
2246 if (!add_to_page_cache(page, mapping,
2247 page->index, GFP_KERNEL)) {
2248
2249 /* open coding of lru_cache_add, also not exported */
2250 page_cache_get(page);
2251 if (!pagevec_add(&pvec, page))
2252 __pagevec_lru_add(&pvec);
f188591e
CM
2253 __extent_read_full_page(tree, page, get_extent,
2254 &bio, 0);
d1310b2e
CM
2255 }
2256 page_cache_release(page);
2257 }
2258 if (pagevec_count(&pvec))
2259 __pagevec_lru_add(&pvec);
2260 BUG_ON(!list_empty(pages));
2261 if (bio)
f188591e 2262 submit_one_bio(READ, bio, 0);
d1310b2e
CM
2263 return 0;
2264}
2265EXPORT_SYMBOL(extent_readpages);
2266
2267/*
2268 * basic invalidatepage code, this waits on any locked or writeback
2269 * ranges corresponding to the page, and then deletes any extent state
2270 * records from the tree
2271 */
2272int extent_invalidatepage(struct extent_io_tree *tree,
2273 struct page *page, unsigned long offset)
2274{
2275 u64 start = ((u64)page->index << PAGE_CACHE_SHIFT);
2276 u64 end = start + PAGE_CACHE_SIZE - 1;
2277 size_t blocksize = page->mapping->host->i_sb->s_blocksize;
2278
2279 start += (offset + blocksize -1) & ~(blocksize - 1);
2280 if (start > end)
2281 return 0;
2282
2283 lock_extent(tree, start, end, GFP_NOFS);
2284 wait_on_extent_writeback(tree, start, end);
2285 clear_extent_bit(tree, start, end,
2286 EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC,
2287 1, 1, GFP_NOFS);
2288 return 0;
2289}
2290EXPORT_SYMBOL(extent_invalidatepage);
2291
2292/*
2293 * simple commit_write call, set_range_dirty is used to mark both
2294 * the pages and the extent records as dirty
2295 */
2296int extent_commit_write(struct extent_io_tree *tree,
2297 struct inode *inode, struct page *page,
2298 unsigned from, unsigned to)
2299{
2300 loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
2301
2302 set_page_extent_mapped(page);
2303 set_page_dirty(page);
2304
2305 if (pos > inode->i_size) {
2306 i_size_write(inode, pos);
2307 mark_inode_dirty(inode);
2308 }
2309 return 0;
2310}
2311EXPORT_SYMBOL(extent_commit_write);
2312
2313int extent_prepare_write(struct extent_io_tree *tree,
2314 struct inode *inode, struct page *page,
2315 unsigned from, unsigned to, get_extent_t *get_extent)
2316{
2317 u64 page_start = (u64)page->index << PAGE_CACHE_SHIFT;
2318 u64 page_end = page_start + PAGE_CACHE_SIZE - 1;
2319 u64 block_start;
2320 u64 orig_block_start;
2321 u64 block_end;
2322 u64 cur_end;
2323 struct extent_map *em;
2324 unsigned blocksize = 1 << inode->i_blkbits;
2325 size_t page_offset = 0;
2326 size_t block_off_start;
2327 size_t block_off_end;
2328 int err = 0;
2329 int iocount = 0;
2330 int ret = 0;
2331 int isnew;
2332
2333 set_page_extent_mapped(page);
2334
2335 block_start = (page_start + from) & ~((u64)blocksize - 1);
2336 block_end = (page_start + to - 1) | (blocksize - 1);
2337 orig_block_start = block_start;
2338
2339 lock_extent(tree, page_start, page_end, GFP_NOFS);
2340 while(block_start <= block_end) {
2341 em = get_extent(inode, page, page_offset, block_start,
2342 block_end - block_start + 1, 1);
2343 if (IS_ERR(em) || !em) {
2344 goto err;
2345 }
2346 cur_end = min(block_end, extent_map_end(em) - 1);
2347 block_off_start = block_start & (PAGE_CACHE_SIZE - 1);
2348 block_off_end = block_off_start + blocksize;
2349 isnew = clear_extent_new(tree, block_start, cur_end, GFP_NOFS);
2350
2351 if (!PageUptodate(page) && isnew &&
2352 (block_off_end > to || block_off_start < from)) {
2353 void *kaddr;
2354
2355 kaddr = kmap_atomic(page, KM_USER0);
2356 if (block_off_end > to)
2357 memset(kaddr + to, 0, block_off_end - to);
2358 if (block_off_start < from)
2359 memset(kaddr + block_off_start, 0,
2360 from - block_off_start);
2361 flush_dcache_page(page);
2362 kunmap_atomic(kaddr, KM_USER0);
2363 }
2364 if ((em->block_start != EXTENT_MAP_HOLE &&
2365 em->block_start != EXTENT_MAP_INLINE) &&
2366 !isnew && !PageUptodate(page) &&
2367 (block_off_end > to || block_off_start < from) &&
2368 !test_range_bit(tree, block_start, cur_end,
2369 EXTENT_UPTODATE, 1)) {
2370 u64 sector;
2371 u64 extent_offset = block_start - em->start;
2372 size_t iosize;
2373 sector = (em->block_start + extent_offset) >> 9;
2374 iosize = (cur_end - block_start + blocksize) &
2375 ~((u64)blocksize - 1);
2376 /*
2377 * we've already got the extent locked, but we
2378 * need to split the state such that our end_bio
2379 * handler can clear the lock.
2380 */
2381 set_extent_bit(tree, block_start,
2382 block_start + iosize - 1,
2383 EXTENT_LOCKED, 0, NULL, GFP_NOFS);
2384 ret = submit_extent_page(READ, tree, page,
2385 sector, iosize, page_offset, em->bdev,
2386 NULL, 1,
f188591e 2387 end_bio_extent_preparewrite, 0);
d1310b2e
CM
2388 iocount++;
2389 block_start = block_start + iosize;
2390 } else {
2391 set_extent_uptodate(tree, block_start, cur_end,
2392 GFP_NOFS);
2393 unlock_extent(tree, block_start, cur_end, GFP_NOFS);
2394 block_start = cur_end + 1;
2395 }
2396 page_offset = block_start & (PAGE_CACHE_SIZE - 1);
2397 free_extent_map(em);
2398 }
2399 if (iocount) {
2400 wait_extent_bit(tree, orig_block_start,
2401 block_end, EXTENT_LOCKED);
2402 }
2403 check_page_uptodate(tree, page);
2404err:
2405 /* FIXME, zero out newly allocated blocks on error */
2406 return err;
2407}
2408EXPORT_SYMBOL(extent_prepare_write);
2409
7b13b7b1
CM
2410/*
2411 * a helper for releasepage, this tests for areas of the page that
2412 * are locked or under IO and drops the related state bits if it is safe
2413 * to drop the page.
2414 */
2415int try_release_extent_state(struct extent_map_tree *map,
2416 struct extent_io_tree *tree, struct page *page,
2417 gfp_t mask)
2418{
2419 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
2420 u64 end = start + PAGE_CACHE_SIZE - 1;
2421 int ret = 1;
2422
211f90e6
CM
2423 if (test_range_bit(tree, start, end,
2424 EXTENT_IOBITS | EXTENT_ORDERED, 0))
7b13b7b1
CM
2425 ret = 0;
2426 else {
2427 if ((mask & GFP_NOFS) == GFP_NOFS)
2428 mask = GFP_NOFS;
2429 clear_extent_bit(tree, start, end, EXTENT_UPTODATE,
2430 1, 1, mask);
2431 }
2432 return ret;
2433}
2434EXPORT_SYMBOL(try_release_extent_state);
2435
d1310b2e
CM
2436/*
2437 * a helper for releasepage. As long as there are no locked extents
2438 * in the range corresponding to the page, both state records and extent
2439 * map records are removed
2440 */
2441int try_release_extent_mapping(struct extent_map_tree *map,
70dec807
CM
2442 struct extent_io_tree *tree, struct page *page,
2443 gfp_t mask)
d1310b2e
CM
2444{
2445 struct extent_map *em;
2446 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
2447 u64 end = start + PAGE_CACHE_SIZE - 1;
7b13b7b1 2448
70dec807
CM
2449 if ((mask & __GFP_WAIT) &&
2450 page->mapping->host->i_size > 16 * 1024 * 1024) {
39b5637f 2451 u64 len;
70dec807 2452 while (start <= end) {
39b5637f 2453 len = end - start + 1;
70dec807 2454 spin_lock(&map->lock);
39b5637f 2455 em = lookup_extent_mapping(map, start, len);
70dec807
CM
2456 if (!em || IS_ERR(em)) {
2457 spin_unlock(&map->lock);
2458 break;
2459 }
7f3c74fb
CM
2460 if (test_bit(EXTENT_FLAG_PINNED, &em->flags) ||
2461 em->start != start) {
70dec807
CM
2462 spin_unlock(&map->lock);
2463 free_extent_map(em);
2464 break;
2465 }
2466 if (!test_range_bit(tree, em->start,
2467 extent_map_end(em) - 1,
2468 EXTENT_LOCKED, 0)) {
2469 remove_extent_mapping(map, em);
2470 /* once for the rb tree */
2471 free_extent_map(em);
2472 }
2473 start = extent_map_end(em);
d1310b2e 2474 spin_unlock(&map->lock);
70dec807
CM
2475
2476 /* once for us */
d1310b2e
CM
2477 free_extent_map(em);
2478 }
d1310b2e 2479 }
7b13b7b1 2480 return try_release_extent_state(map, tree, page, mask);
d1310b2e
CM
2481}
2482EXPORT_SYMBOL(try_release_extent_mapping);
2483
2484sector_t extent_bmap(struct address_space *mapping, sector_t iblock,
2485 get_extent_t *get_extent)
2486{
2487 struct inode *inode = mapping->host;
2488 u64 start = iblock << inode->i_blkbits;
2489 sector_t sector = 0;
2490 struct extent_map *em;
2491
2492 em = get_extent(inode, NULL, 0, start, (1 << inode->i_blkbits), 0);
2493 if (!em || IS_ERR(em))
2494 return 0;
2495
2496 if (em->block_start == EXTENT_MAP_INLINE ||
2497 em->block_start == EXTENT_MAP_HOLE)
2498 goto out;
2499
2500 sector = (em->block_start + start - em->start) >> inode->i_blkbits;
d1310b2e
CM
2501out:
2502 free_extent_map(em);
2503 return sector;
2504}
2505
d1310b2e
CM
2506static inline struct page *extent_buffer_page(struct extent_buffer *eb,
2507 unsigned long i)
2508{
2509 struct page *p;
2510 struct address_space *mapping;
2511
2512 if (i == 0)
2513 return eb->first_page;
2514 i += eb->start >> PAGE_CACHE_SHIFT;
2515 mapping = eb->first_page->mapping;
33958dc6
CM
2516 if (!mapping)
2517 return NULL;
0ee0fda0
SW
2518
2519 /*
2520 * extent_buffer_page is only called after pinning the page
2521 * by increasing the reference count. So we know the page must
2522 * be in the radix tree.
2523 */
0ee0fda0 2524 rcu_read_lock();
d1310b2e 2525 p = radix_tree_lookup(&mapping->page_tree, i);
0ee0fda0 2526 rcu_read_unlock();
2b1f55b0 2527
d1310b2e
CM
2528 return p;
2529}
2530
6af118ce 2531static inline unsigned long num_extent_pages(u64 start, u64 len)
728131d8 2532{
6af118ce
CM
2533 return ((start + len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT) -
2534 (start >> PAGE_CACHE_SHIFT);
728131d8
CM
2535}
2536
d1310b2e
CM
2537static struct extent_buffer *__alloc_extent_buffer(struct extent_io_tree *tree,
2538 u64 start,
2539 unsigned long len,
2540 gfp_t mask)
2541{
2542 struct extent_buffer *eb = NULL;
4bef0848 2543#ifdef LEAK_DEBUG
2d2ae547 2544 unsigned long flags;
4bef0848 2545#endif
d1310b2e 2546
d1310b2e 2547 eb = kmem_cache_zalloc(extent_buffer_cache, mask);
d1310b2e
CM
2548 eb->start = start;
2549 eb->len = len;
a61e6f29 2550 mutex_init(&eb->mutex);
4bef0848 2551#ifdef LEAK_DEBUG
2d2ae547
CM
2552 spin_lock_irqsave(&leak_lock, flags);
2553 list_add(&eb->leak_list, &buffers);
2554 spin_unlock_irqrestore(&leak_lock, flags);
4bef0848 2555#endif
d1310b2e
CM
2556 atomic_set(&eb->refs, 1);
2557
2558 return eb;
2559}
2560
2561static void __free_extent_buffer(struct extent_buffer *eb)
2562{
4bef0848 2563#ifdef LEAK_DEBUG
2d2ae547
CM
2564 unsigned long flags;
2565 spin_lock_irqsave(&leak_lock, flags);
2566 list_del(&eb->leak_list);
2567 spin_unlock_irqrestore(&leak_lock, flags);
4bef0848 2568#endif
d1310b2e
CM
2569 kmem_cache_free(extent_buffer_cache, eb);
2570}
2571
2572struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree,
2573 u64 start, unsigned long len,
2574 struct page *page0,
2575 gfp_t mask)
2576{
2577 unsigned long num_pages = num_extent_pages(start, len);
2578 unsigned long i;
2579 unsigned long index = start >> PAGE_CACHE_SHIFT;
2580 struct extent_buffer *eb;
6af118ce 2581 struct extent_buffer *exists = NULL;
d1310b2e
CM
2582 struct page *p;
2583 struct address_space *mapping = tree->mapping;
2584 int uptodate = 1;
2585
6af118ce
CM
2586 spin_lock(&tree->buffer_lock);
2587 eb = buffer_search(tree, start);
2588 if (eb) {
2589 atomic_inc(&eb->refs);
2590 spin_unlock(&tree->buffer_lock);
0f9dd46c 2591 mark_page_accessed(eb->first_page);
6af118ce
CM
2592 return eb;
2593 }
2594 spin_unlock(&tree->buffer_lock);
2595
d1310b2e 2596 eb = __alloc_extent_buffer(tree, start, len, mask);
2b114d1d 2597 if (!eb)
d1310b2e
CM
2598 return NULL;
2599
d1310b2e
CM
2600 if (page0) {
2601 eb->first_page = page0;
2602 i = 1;
2603 index++;
2604 page_cache_get(page0);
2605 mark_page_accessed(page0);
2606 set_page_extent_mapped(page0);
d1310b2e 2607 set_page_extent_head(page0, len);
f188591e 2608 uptodate = PageUptodate(page0);
d1310b2e
CM
2609 } else {
2610 i = 0;
2611 }
2612 for (; i < num_pages; i++, index++) {
2613 p = find_or_create_page(mapping, index, mask | __GFP_HIGHMEM);
2614 if (!p) {
2615 WARN_ON(1);
6af118ce 2616 goto free_eb;
d1310b2e
CM
2617 }
2618 set_page_extent_mapped(p);
2619 mark_page_accessed(p);
2620 if (i == 0) {
2621 eb->first_page = p;
2622 set_page_extent_head(p, len);
2623 } else {
2624 set_page_private(p, EXTENT_PAGE_PRIVATE);
2625 }
2626 if (!PageUptodate(p))
2627 uptodate = 0;
2628 unlock_page(p);
2629 }
2630 if (uptodate)
2631 eb->flags |= EXTENT_UPTODATE;
2632 eb->flags |= EXTENT_BUFFER_FILLED;
2633
6af118ce
CM
2634 spin_lock(&tree->buffer_lock);
2635 exists = buffer_tree_insert(tree, start, &eb->rb_node);
2636 if (exists) {
2637 /* add one reference for the caller */
2638 atomic_inc(&exists->refs);
2639 spin_unlock(&tree->buffer_lock);
2640 goto free_eb;
2641 }
2642 spin_unlock(&tree->buffer_lock);
2643
2644 /* add one reference for the tree */
2645 atomic_inc(&eb->refs);
d1310b2e
CM
2646 return eb;
2647
6af118ce 2648free_eb:
d1310b2e 2649 if (!atomic_dec_and_test(&eb->refs))
6af118ce
CM
2650 return exists;
2651 for (index = 1; index < i; index++)
d1310b2e 2652 page_cache_release(extent_buffer_page(eb, index));
6af118ce 2653 page_cache_release(extent_buffer_page(eb, 0));
d1310b2e 2654 __free_extent_buffer(eb);
6af118ce 2655 return exists;
d1310b2e
CM
2656}
2657EXPORT_SYMBOL(alloc_extent_buffer);
2658
2659struct extent_buffer *find_extent_buffer(struct extent_io_tree *tree,
2660 u64 start, unsigned long len,
2661 gfp_t mask)
2662{
d1310b2e 2663 struct extent_buffer *eb;
d1310b2e 2664
6af118ce
CM
2665 spin_lock(&tree->buffer_lock);
2666 eb = buffer_search(tree, start);
2667 if (eb)
2668 atomic_inc(&eb->refs);
2669 spin_unlock(&tree->buffer_lock);
d1310b2e 2670
0f9dd46c
JB
2671 if (eb)
2672 mark_page_accessed(eb->first_page);
2673
d1310b2e 2674 return eb;
d1310b2e
CM
2675}
2676EXPORT_SYMBOL(find_extent_buffer);
2677
2678void free_extent_buffer(struct extent_buffer *eb)
2679{
d1310b2e
CM
2680 if (!eb)
2681 return;
2682
2683 if (!atomic_dec_and_test(&eb->refs))
2684 return;
2685
6af118ce 2686 WARN_ON(1);
d1310b2e
CM
2687}
2688EXPORT_SYMBOL(free_extent_buffer);
2689
2690int clear_extent_buffer_dirty(struct extent_io_tree *tree,
2691 struct extent_buffer *eb)
2692{
2693 int set;
2694 unsigned long i;
2695 unsigned long num_pages;
2696 struct page *page;
2697
2698 u64 start = eb->start;
2699 u64 end = start + eb->len - 1;
2700
2701 set = clear_extent_dirty(tree, start, end, GFP_NOFS);
2702 num_pages = num_extent_pages(eb->start, eb->len);
2703
2704 for (i = 0; i < num_pages; i++) {
2705 page = extent_buffer_page(eb, i);
a61e6f29 2706 lock_page(page);
d1310b2e
CM
2707 if (i == 0)
2708 set_page_extent_head(page, eb->len);
2709 else
2710 set_page_private(page, EXTENT_PAGE_PRIVATE);
2711
2712 /*
2713 * if we're on the last page or the first page and the
2714 * block isn't aligned on a page boundary, do extra checks
2715 * to make sure we don't clean page that is partially dirty
2716 */
2717 if ((i == 0 && (eb->start & (PAGE_CACHE_SIZE - 1))) ||
2718 ((i == num_pages - 1) &&
2719 ((eb->start + eb->len) & (PAGE_CACHE_SIZE - 1)))) {
2720 start = (u64)page->index << PAGE_CACHE_SHIFT;
2721 end = start + PAGE_CACHE_SIZE - 1;
2722 if (test_range_bit(tree, start, end,
2723 EXTENT_DIRTY, 0)) {
a61e6f29 2724 unlock_page(page);
d1310b2e
CM
2725 continue;
2726 }
2727 }
2728 clear_page_dirty_for_io(page);
0ee0fda0 2729 spin_lock_irq(&page->mapping->tree_lock);
d1310b2e
CM
2730 if (!PageDirty(page)) {
2731 radix_tree_tag_clear(&page->mapping->page_tree,
2732 page_index(page),
2733 PAGECACHE_TAG_DIRTY);
2734 }
0ee0fda0 2735 spin_unlock_irq(&page->mapping->tree_lock);
a61e6f29 2736 unlock_page(page);
d1310b2e
CM
2737 }
2738 return 0;
2739}
2740EXPORT_SYMBOL(clear_extent_buffer_dirty);
2741
2742int wait_on_extent_buffer_writeback(struct extent_io_tree *tree,
2743 struct extent_buffer *eb)
2744{
2745 return wait_on_extent_writeback(tree, eb->start,
2746 eb->start + eb->len - 1);
2747}
2748EXPORT_SYMBOL(wait_on_extent_buffer_writeback);
2749
2750int set_extent_buffer_dirty(struct extent_io_tree *tree,
2751 struct extent_buffer *eb)
2752{
2753 unsigned long i;
2754 unsigned long num_pages;
2755
2756 num_pages = num_extent_pages(eb->start, eb->len);
2757 for (i = 0; i < num_pages; i++) {
2758 struct page *page = extent_buffer_page(eb, i);
2759 /* writepage may need to do something special for the
2760 * first page, we have to make sure page->private is
2761 * properly set. releasepage may drop page->private
2762 * on us if the page isn't already dirty.
2763 */
a1b32a59 2764 lock_page(page);
d1310b2e 2765 if (i == 0) {
d1310b2e
CM
2766 set_page_extent_head(page, eb->len);
2767 } else if (PagePrivate(page) &&
2768 page->private != EXTENT_PAGE_PRIVATE) {
d1310b2e 2769 set_page_extent_mapped(page);
d1310b2e
CM
2770 }
2771 __set_page_dirty_nobuffers(extent_buffer_page(eb, i));
a1b32a59
CM
2772 set_extent_dirty(tree, page_offset(page),
2773 page_offset(page) + PAGE_CACHE_SIZE -1,
2774 GFP_NOFS);
2775 unlock_page(page);
d1310b2e 2776 }
a1b32a59 2777 return 0;
d1310b2e
CM
2778}
2779EXPORT_SYMBOL(set_extent_buffer_dirty);
2780
1259ab75
CM
2781int clear_extent_buffer_uptodate(struct extent_io_tree *tree,
2782 struct extent_buffer *eb)
2783{
2784 unsigned long i;
2785 struct page *page;
2786 unsigned long num_pages;
2787
2788 num_pages = num_extent_pages(eb->start, eb->len);
2789 eb->flags &= ~EXTENT_UPTODATE;
2790
2791 clear_extent_uptodate(tree, eb->start, eb->start + eb->len - 1,
2792 GFP_NOFS);
2793 for (i = 0; i < num_pages; i++) {
2794 page = extent_buffer_page(eb, i);
33958dc6
CM
2795 if (page)
2796 ClearPageUptodate(page);
1259ab75
CM
2797 }
2798 return 0;
2799}
2800
d1310b2e
CM
2801int set_extent_buffer_uptodate(struct extent_io_tree *tree,
2802 struct extent_buffer *eb)
2803{
2804 unsigned long i;
2805 struct page *page;
2806 unsigned long num_pages;
2807
2808 num_pages = num_extent_pages(eb->start, eb->len);
2809
2810 set_extent_uptodate(tree, eb->start, eb->start + eb->len - 1,
2811 GFP_NOFS);
2812 for (i = 0; i < num_pages; i++) {
2813 page = extent_buffer_page(eb, i);
2814 if ((i == 0 && (eb->start & (PAGE_CACHE_SIZE - 1))) ||
2815 ((i == num_pages - 1) &&
2816 ((eb->start + eb->len) & (PAGE_CACHE_SIZE - 1)))) {
2817 check_page_uptodate(tree, page);
2818 continue;
2819 }
2820 SetPageUptodate(page);
2821 }
2822 return 0;
2823}
2824EXPORT_SYMBOL(set_extent_buffer_uptodate);
2825
ce9adaa5
CM
2826int extent_range_uptodate(struct extent_io_tree *tree,
2827 u64 start, u64 end)
2828{
2829 struct page *page;
2830 int ret;
2831 int pg_uptodate = 1;
2832 int uptodate;
2833 unsigned long index;
2834
2835 ret = test_range_bit(tree, start, end, EXTENT_UPTODATE, 1);
2836 if (ret)
2837 return 1;
2838 while(start <= end) {
2839 index = start >> PAGE_CACHE_SHIFT;
2840 page = find_get_page(tree->mapping, index);
2841 uptodate = PageUptodate(page);
2842 page_cache_release(page);
2843 if (!uptodate) {
2844 pg_uptodate = 0;
2845 break;
2846 }
2847 start += PAGE_CACHE_SIZE;
2848 }
2849 return pg_uptodate;
2850}
2851
d1310b2e 2852int extent_buffer_uptodate(struct extent_io_tree *tree,
ce9adaa5 2853 struct extent_buffer *eb)
d1310b2e 2854{
728131d8 2855 int ret = 0;
ce9adaa5
CM
2856 unsigned long num_pages;
2857 unsigned long i;
728131d8
CM
2858 struct page *page;
2859 int pg_uptodate = 1;
2860
d1310b2e 2861 if (eb->flags & EXTENT_UPTODATE)
4235298e 2862 return 1;
728131d8 2863
4235298e 2864 ret = test_range_bit(tree, eb->start, eb->start + eb->len - 1,
d1310b2e 2865 EXTENT_UPTODATE, 1);
4235298e
CM
2866 if (ret)
2867 return ret;
728131d8
CM
2868
2869 num_pages = num_extent_pages(eb->start, eb->len);
2870 for (i = 0; i < num_pages; i++) {
2871 page = extent_buffer_page(eb, i);
2872 if (!PageUptodate(page)) {
2873 pg_uptodate = 0;
2874 break;
2875 }
2876 }
4235298e 2877 return pg_uptodate;
d1310b2e
CM
2878}
2879EXPORT_SYMBOL(extent_buffer_uptodate);
2880
2881int read_extent_buffer_pages(struct extent_io_tree *tree,
2882 struct extent_buffer *eb,
a86c12c7 2883 u64 start, int wait,
f188591e 2884 get_extent_t *get_extent, int mirror_num)
d1310b2e
CM
2885{
2886 unsigned long i;
2887 unsigned long start_i;
2888 struct page *page;
2889 int err;
2890 int ret = 0;
ce9adaa5
CM
2891 int locked_pages = 0;
2892 int all_uptodate = 1;
2893 int inc_all_pages = 0;
d1310b2e 2894 unsigned long num_pages;
a86c12c7
CM
2895 struct bio *bio = NULL;
2896
d1310b2e
CM
2897 if (eb->flags & EXTENT_UPTODATE)
2898 return 0;
2899
ce9adaa5 2900 if (test_range_bit(tree, eb->start, eb->start + eb->len - 1,
d1310b2e
CM
2901 EXTENT_UPTODATE, 1)) {
2902 return 0;
2903 }
2904
2905 if (start) {
2906 WARN_ON(start < eb->start);
2907 start_i = (start >> PAGE_CACHE_SHIFT) -
2908 (eb->start >> PAGE_CACHE_SHIFT);
2909 } else {
2910 start_i = 0;
2911 }
2912
2913 num_pages = num_extent_pages(eb->start, eb->len);
2914 for (i = start_i; i < num_pages; i++) {
2915 page = extent_buffer_page(eb, i);
d1310b2e 2916 if (!wait) {
2db04966 2917 if (!trylock_page(page))
ce9adaa5 2918 goto unlock_exit;
d1310b2e
CM
2919 } else {
2920 lock_page(page);
2921 }
ce9adaa5 2922 locked_pages++;
d1310b2e 2923 if (!PageUptodate(page)) {
ce9adaa5
CM
2924 all_uptodate = 0;
2925 }
2926 }
2927 if (all_uptodate) {
2928 if (start_i == 0)
2929 eb->flags |= EXTENT_UPTODATE;
a1b32a59
CM
2930 if (ret) {
2931 printk("all up to date but ret is %d\n", ret);
2932 }
ce9adaa5
CM
2933 goto unlock_exit;
2934 }
2935
2936 for (i = start_i; i < num_pages; i++) {
2937 page = extent_buffer_page(eb, i);
2938 if (inc_all_pages)
2939 page_cache_get(page);
2940 if (!PageUptodate(page)) {
2941 if (start_i == 0)
2942 inc_all_pages = 1;
f188591e 2943 ClearPageError(page);
a86c12c7 2944 err = __extent_read_full_page(tree, page,
f188591e
CM
2945 get_extent, &bio,
2946 mirror_num);
d1310b2e
CM
2947 if (err) {
2948 ret = err;
a1b32a59 2949 printk("err %d from __extent_read_full_page\n", ret);
d1310b2e
CM
2950 }
2951 } else {
2952 unlock_page(page);
2953 }
2954 }
2955
a86c12c7 2956 if (bio)
f188591e 2957 submit_one_bio(READ, bio, mirror_num);
a86c12c7 2958
d1310b2e 2959 if (ret || !wait) {
a1b32a59
CM
2960 if (ret)
2961 printk("ret %d wait %d returning\n", ret, wait);
d1310b2e
CM
2962 return ret;
2963 }
d1310b2e
CM
2964 for (i = start_i; i < num_pages; i++) {
2965 page = extent_buffer_page(eb, i);
2966 wait_on_page_locked(page);
2967 if (!PageUptodate(page)) {
a1b32a59 2968 printk("page not uptodate after wait_on_page_locked\n");
d1310b2e
CM
2969 ret = -EIO;
2970 }
2971 }
2972 if (!ret)
2973 eb->flags |= EXTENT_UPTODATE;
2974 return ret;
ce9adaa5
CM
2975
2976unlock_exit:
2977 i = start_i;
2978 while(locked_pages > 0) {
2979 page = extent_buffer_page(eb, i);
2980 i++;
2981 unlock_page(page);
2982 locked_pages--;
2983 }
2984 return ret;
d1310b2e
CM
2985}
2986EXPORT_SYMBOL(read_extent_buffer_pages);
2987
2988void read_extent_buffer(struct extent_buffer *eb, void *dstv,
2989 unsigned long start,
2990 unsigned long len)
2991{
2992 size_t cur;
2993 size_t offset;
2994 struct page *page;
2995 char *kaddr;
2996 char *dst = (char *)dstv;
2997 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
2998 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
d1310b2e
CM
2999
3000 WARN_ON(start > eb->len);
3001 WARN_ON(start + len > eb->start + eb->len);
3002
3003 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
3004
3005 while(len > 0) {
3006 page = extent_buffer_page(eb, i);
d1310b2e
CM
3007
3008 cur = min(len, (PAGE_CACHE_SIZE - offset));
3009 kaddr = kmap_atomic(page, KM_USER1);
3010 memcpy(dst, kaddr + offset, cur);
3011 kunmap_atomic(kaddr, KM_USER1);
3012
3013 dst += cur;
3014 len -= cur;
3015 offset = 0;
3016 i++;
3017 }
3018}
3019EXPORT_SYMBOL(read_extent_buffer);
3020
3021int map_private_extent_buffer(struct extent_buffer *eb, unsigned long start,
3022 unsigned long min_len, char **token, char **map,
3023 unsigned long *map_start,
3024 unsigned long *map_len, int km)
3025{
3026 size_t offset = start & (PAGE_CACHE_SIZE - 1);
3027 char *kaddr;
3028 struct page *p;
3029 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
3030 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
3031 unsigned long end_i = (start_offset + start + min_len - 1) >>
3032 PAGE_CACHE_SHIFT;
3033
3034 if (i != end_i)
3035 return -EINVAL;
3036
3037 if (i == 0) {
3038 offset = start_offset;
3039 *map_start = 0;
3040 } else {
3041 offset = 0;
3042 *map_start = ((u64)i << PAGE_CACHE_SHIFT) - start_offset;
3043 }
3044 if (start + min_len > eb->len) {
3045printk("bad mapping eb start %Lu len %lu, wanted %lu %lu\n", eb->start, eb->len, start, min_len);
3046 WARN_ON(1);
3047 }
3048
3049 p = extent_buffer_page(eb, i);
d1310b2e
CM
3050 kaddr = kmap_atomic(p, km);
3051 *token = kaddr;
3052 *map = kaddr + offset;
3053 *map_len = PAGE_CACHE_SIZE - offset;
3054 return 0;
3055}
3056EXPORT_SYMBOL(map_private_extent_buffer);
3057
3058int map_extent_buffer(struct extent_buffer *eb, unsigned long start,
3059 unsigned long min_len,
3060 char **token, char **map,
3061 unsigned long *map_start,
3062 unsigned long *map_len, int km)
3063{
3064 int err;
3065 int save = 0;
3066 if (eb->map_token) {
3067 unmap_extent_buffer(eb, eb->map_token, km);
3068 eb->map_token = NULL;
3069 save = 1;
3070 }
3071 err = map_private_extent_buffer(eb, start, min_len, token, map,
3072 map_start, map_len, km);
3073 if (!err && save) {
3074 eb->map_token = *token;
3075 eb->kaddr = *map;
3076 eb->map_start = *map_start;
3077 eb->map_len = *map_len;
3078 }
3079 return err;
3080}
3081EXPORT_SYMBOL(map_extent_buffer);
3082
3083void unmap_extent_buffer(struct extent_buffer *eb, char *token, int km)
3084{
3085 kunmap_atomic(token, km);
3086}
3087EXPORT_SYMBOL(unmap_extent_buffer);
3088
3089int memcmp_extent_buffer(struct extent_buffer *eb, const void *ptrv,
3090 unsigned long start,
3091 unsigned long len)
3092{
3093 size_t cur;
3094 size_t offset;
3095 struct page *page;
3096 char *kaddr;
3097 char *ptr = (char *)ptrv;
3098 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
3099 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
3100 int ret = 0;
3101
3102 WARN_ON(start > eb->len);
3103 WARN_ON(start + len > eb->start + eb->len);
3104
3105 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
3106
3107 while(len > 0) {
3108 page = extent_buffer_page(eb, i);
d1310b2e
CM
3109
3110 cur = min(len, (PAGE_CACHE_SIZE - offset));
3111
3112 kaddr = kmap_atomic(page, KM_USER0);
3113 ret = memcmp(ptr, kaddr + offset, cur);
3114 kunmap_atomic(kaddr, KM_USER0);
3115 if (ret)
3116 break;
3117
3118 ptr += cur;
3119 len -= cur;
3120 offset = 0;
3121 i++;
3122 }
3123 return ret;
3124}
3125EXPORT_SYMBOL(memcmp_extent_buffer);
3126
3127void write_extent_buffer(struct extent_buffer *eb, const void *srcv,
3128 unsigned long start, unsigned long len)
3129{
3130 size_t cur;
3131 size_t offset;
3132 struct page *page;
3133 char *kaddr;
3134 char *src = (char *)srcv;
3135 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
3136 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
3137
3138 WARN_ON(start > eb->len);
3139 WARN_ON(start + len > eb->start + eb->len);
3140
3141 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
3142
3143 while(len > 0) {
3144 page = extent_buffer_page(eb, i);
3145 WARN_ON(!PageUptodate(page));
3146
3147 cur = min(len, PAGE_CACHE_SIZE - offset);
3148 kaddr = kmap_atomic(page, KM_USER1);
3149 memcpy(kaddr + offset, src, cur);
3150 kunmap_atomic(kaddr, KM_USER1);
3151
3152 src += cur;
3153 len -= cur;
3154 offset = 0;
3155 i++;
3156 }
3157}
3158EXPORT_SYMBOL(write_extent_buffer);
3159
3160void memset_extent_buffer(struct extent_buffer *eb, char c,
3161 unsigned long start, unsigned long len)
3162{
3163 size_t cur;
3164 size_t offset;
3165 struct page *page;
3166 char *kaddr;
3167 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
3168 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
3169
3170 WARN_ON(start > eb->len);
3171 WARN_ON(start + len > eb->start + eb->len);
3172
3173 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
3174
3175 while(len > 0) {
3176 page = extent_buffer_page(eb, i);
3177 WARN_ON(!PageUptodate(page));
3178
3179 cur = min(len, PAGE_CACHE_SIZE - offset);
3180 kaddr = kmap_atomic(page, KM_USER0);
3181 memset(kaddr + offset, c, cur);
3182 kunmap_atomic(kaddr, KM_USER0);
3183
3184 len -= cur;
3185 offset = 0;
3186 i++;
3187 }
3188}
3189EXPORT_SYMBOL(memset_extent_buffer);
3190
3191void copy_extent_buffer(struct extent_buffer *dst, struct extent_buffer *src,
3192 unsigned long dst_offset, unsigned long src_offset,
3193 unsigned long len)
3194{
3195 u64 dst_len = dst->len;
3196 size_t cur;
3197 size_t offset;
3198 struct page *page;
3199 char *kaddr;
3200 size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
3201 unsigned long i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT;
3202
3203 WARN_ON(src->len != dst_len);
3204
3205 offset = (start_offset + dst_offset) &
3206 ((unsigned long)PAGE_CACHE_SIZE - 1);
3207
3208 while(len > 0) {
3209 page = extent_buffer_page(dst, i);
3210 WARN_ON(!PageUptodate(page));
3211
3212 cur = min(len, (unsigned long)(PAGE_CACHE_SIZE - offset));
3213
3214 kaddr = kmap_atomic(page, KM_USER0);
3215 read_extent_buffer(src, kaddr + offset, src_offset, cur);
3216 kunmap_atomic(kaddr, KM_USER0);
3217
3218 src_offset += cur;
3219 len -= cur;
3220 offset = 0;
3221 i++;
3222 }
3223}
3224EXPORT_SYMBOL(copy_extent_buffer);
3225
3226static void move_pages(struct page *dst_page, struct page *src_page,
3227 unsigned long dst_off, unsigned long src_off,
3228 unsigned long len)
3229{
3230 char *dst_kaddr = kmap_atomic(dst_page, KM_USER0);
3231 if (dst_page == src_page) {
3232 memmove(dst_kaddr + dst_off, dst_kaddr + src_off, len);
3233 } else {
3234 char *src_kaddr = kmap_atomic(src_page, KM_USER1);
3235 char *p = dst_kaddr + dst_off + len;
3236 char *s = src_kaddr + src_off + len;
3237
3238 while (len--)
3239 *--p = *--s;
3240
3241 kunmap_atomic(src_kaddr, KM_USER1);
3242 }
3243 kunmap_atomic(dst_kaddr, KM_USER0);
3244}
3245
3246static void copy_pages(struct page *dst_page, struct page *src_page,
3247 unsigned long dst_off, unsigned long src_off,
3248 unsigned long len)
3249{
3250 char *dst_kaddr = kmap_atomic(dst_page, KM_USER0);
3251 char *src_kaddr;
3252
3253 if (dst_page != src_page)
3254 src_kaddr = kmap_atomic(src_page, KM_USER1);
3255 else
3256 src_kaddr = dst_kaddr;
3257
3258 memcpy(dst_kaddr + dst_off, src_kaddr + src_off, len);
3259 kunmap_atomic(dst_kaddr, KM_USER0);
3260 if (dst_page != src_page)
3261 kunmap_atomic(src_kaddr, KM_USER1);
3262}
3263
3264void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
3265 unsigned long src_offset, unsigned long len)
3266{
3267 size_t cur;
3268 size_t dst_off_in_page;
3269 size_t src_off_in_page;
3270 size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
3271 unsigned long dst_i;
3272 unsigned long src_i;
3273
3274 if (src_offset + len > dst->len) {
3275 printk("memmove bogus src_offset %lu move len %lu len %lu\n",
3276 src_offset, len, dst->len);
3277 BUG_ON(1);
3278 }
3279 if (dst_offset + len > dst->len) {
3280 printk("memmove bogus dst_offset %lu move len %lu len %lu\n",
3281 dst_offset, len, dst->len);
3282 BUG_ON(1);
3283 }
3284
3285 while(len > 0) {
3286 dst_off_in_page = (start_offset + dst_offset) &
3287 ((unsigned long)PAGE_CACHE_SIZE - 1);
3288 src_off_in_page = (start_offset + src_offset) &
3289 ((unsigned long)PAGE_CACHE_SIZE - 1);
3290
3291 dst_i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT;
3292 src_i = (start_offset + src_offset) >> PAGE_CACHE_SHIFT;
3293
3294 cur = min(len, (unsigned long)(PAGE_CACHE_SIZE -
3295 src_off_in_page));
3296 cur = min_t(unsigned long, cur,
3297 (unsigned long)(PAGE_CACHE_SIZE - dst_off_in_page));
3298
3299 copy_pages(extent_buffer_page(dst, dst_i),
3300 extent_buffer_page(dst, src_i),
3301 dst_off_in_page, src_off_in_page, cur);
3302
3303 src_offset += cur;
3304 dst_offset += cur;
3305 len -= cur;
3306 }
3307}
3308EXPORT_SYMBOL(memcpy_extent_buffer);
3309
3310void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
3311 unsigned long src_offset, unsigned long len)
3312{
3313 size_t cur;
3314 size_t dst_off_in_page;
3315 size_t src_off_in_page;
3316 unsigned long dst_end = dst_offset + len - 1;
3317 unsigned long src_end = src_offset + len - 1;
3318 size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
3319 unsigned long dst_i;
3320 unsigned long src_i;
3321
3322 if (src_offset + len > dst->len) {
3323 printk("memmove bogus src_offset %lu move len %lu len %lu\n",
3324 src_offset, len, dst->len);
3325 BUG_ON(1);
3326 }
3327 if (dst_offset + len > dst->len) {
3328 printk("memmove bogus dst_offset %lu move len %lu len %lu\n",
3329 dst_offset, len, dst->len);
3330 BUG_ON(1);
3331 }
3332 if (dst_offset < src_offset) {
3333 memcpy_extent_buffer(dst, dst_offset, src_offset, len);
3334 return;
3335 }
3336 while(len > 0) {
3337 dst_i = (start_offset + dst_end) >> PAGE_CACHE_SHIFT;
3338 src_i = (start_offset + src_end) >> PAGE_CACHE_SHIFT;
3339
3340 dst_off_in_page = (start_offset + dst_end) &
3341 ((unsigned long)PAGE_CACHE_SIZE - 1);
3342 src_off_in_page = (start_offset + src_end) &
3343 ((unsigned long)PAGE_CACHE_SIZE - 1);
3344
3345 cur = min_t(unsigned long, len, src_off_in_page + 1);
3346 cur = min(cur, dst_off_in_page + 1);
3347 move_pages(extent_buffer_page(dst, dst_i),
3348 extent_buffer_page(dst, src_i),
3349 dst_off_in_page - cur + 1,
3350 src_off_in_page - cur + 1, cur);
3351
3352 dst_end -= cur;
3353 src_end -= cur;
3354 len -= cur;
3355 }
3356}
3357EXPORT_SYMBOL(memmove_extent_buffer);
6af118ce
CM
3358
3359int try_release_extent_buffer(struct extent_io_tree *tree, struct page *page)
3360{
3361 u64 start = page_offset(page);
3362 struct extent_buffer *eb;
3363 int ret = 1;
3364 unsigned long i;
3365 unsigned long num_pages;
3366
3367 spin_lock(&tree->buffer_lock);
3368 eb = buffer_search(tree, start);
3369 if (!eb)
3370 goto out;
3371
3372 if (atomic_read(&eb->refs) > 1) {
3373 ret = 0;
3374 goto out;
3375 }
3376 /* at this point we can safely release the extent buffer */
3377 num_pages = num_extent_pages(eb->start, eb->len);
b214107e
CH
3378 for (i = 0; i < num_pages; i++)
3379 page_cache_release(extent_buffer_page(eb, i));
6af118ce
CM
3380 rb_erase(&eb->rb_node, &tree->buffer);
3381 __free_extent_buffer(eb);
3382out:
3383 spin_unlock(&tree->buffer_lock);
3384 return ret;
3385}
3386EXPORT_SYMBOL(try_release_extent_buffer);