]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - fs/btrfs/extent_map.c
Btrfs: xattr fixes
[mirror_ubuntu-jammy-kernel.git] / fs / btrfs / extent_map.c
CommitLineData
a52d9a80
CM
1#include <linux/bitops.h>
2#include <linux/slab.h>
3#include <linux/bio.h>
4#include <linux/mm.h>
5#include <linux/gfp.h>
6#include <linux/pagemap.h>
7#include <linux/page-flags.h>
8#include <linux/module.h>
9#include <linux/spinlock.h>
10#include <linux/blkdev.h>
4dc11904 11#include <linux/swap.h>
0a2118df 12#include <linux/version.h>
b293f02e 13#include <linux/writeback.h>
3ab2fb5a 14#include <linux/pagevec.h>
a52d9a80
CM
15#include "extent_map.h"
16
86479a04
CM
17/* temporary define until extent_map moves out of btrfs */
18struct kmem_cache *btrfs_cache_create(const char *name, size_t size,
19 unsigned long extra_flags,
20 void (*ctor)(void *, struct kmem_cache *,
21 unsigned long));
22
a52d9a80
CM
23static struct kmem_cache *extent_map_cache;
24static struct kmem_cache *extent_state_cache;
6d36dcd4 25static struct kmem_cache *extent_buffer_cache;
f510cfec 26
f510cfec
CM
27static LIST_HEAD(buffers);
28static LIST_HEAD(states);
29
f510cfec 30static spinlock_t state_lock = SPIN_LOCK_UNLOCKED;
4dc11904 31#define BUFFER_LRU_MAX 64
a52d9a80
CM
32
33struct tree_entry {
34 u64 start;
35 u64 end;
36 int in_tree;
37 struct rb_node rb_node;
38};
39
b293f02e
CM
40struct extent_page_data {
41 struct bio *bio;
42 struct extent_map_tree *tree;
43 get_extent_t *get_extent;
44};
45
a52d9a80
CM
46void __init extent_map_init(void)
47{
86479a04 48 extent_map_cache = btrfs_cache_create("extent_map",
6d36dcd4 49 sizeof(struct extent_map), 0,
a52d9a80 50 NULL);
86479a04 51 extent_state_cache = btrfs_cache_create("extent_state",
6d36dcd4 52 sizeof(struct extent_state), 0,
a52d9a80 53 NULL);
6d36dcd4
CM
54 extent_buffer_cache = btrfs_cache_create("extent_buffers",
55 sizeof(struct extent_buffer), 0,
56 NULL);
a52d9a80
CM
57}
58
59void __exit extent_map_exit(void)
60{
f510cfec 61 struct extent_state *state;
6d36dcd4 62
f510cfec
CM
63 while (!list_empty(&states)) {
64 state = list_entry(states.next, struct extent_state, list);
65 printk("state leak: start %Lu end %Lu state %lu in tree %d refs %d\n", state->start, state->end, state->state, state->in_tree, atomic_read(&state->refs));
66 list_del(&state->list);
67 kmem_cache_free(extent_state_cache, state);
68
69 }
f510cfec 70
a52d9a80
CM
71 if (extent_map_cache)
72 kmem_cache_destroy(extent_map_cache);
73 if (extent_state_cache)
74 kmem_cache_destroy(extent_state_cache);
6d36dcd4
CM
75 if (extent_buffer_cache)
76 kmem_cache_destroy(extent_buffer_cache);
a52d9a80
CM
77}
78
79void extent_map_tree_init(struct extent_map_tree *tree,
80 struct address_space *mapping, gfp_t mask)
81{
82 tree->map.rb_node = NULL;
83 tree->state.rb_node = NULL;
07157aac 84 tree->ops = NULL;
a52d9a80 85 rwlock_init(&tree->lock);
4dc11904 86 spin_lock_init(&tree->lru_lock);
a52d9a80 87 tree->mapping = mapping;
4dc11904
CM
88 INIT_LIST_HEAD(&tree->buffer_lru);
89 tree->lru_size = 0;
a52d9a80
CM
90}
91EXPORT_SYMBOL(extent_map_tree_init);
92
19c00ddc 93void extent_map_tree_empty_lru(struct extent_map_tree *tree)
4dc11904
CM
94{
95 struct extent_buffer *eb;
96 while(!list_empty(&tree->buffer_lru)) {
97 eb = list_entry(tree->buffer_lru.next, struct extent_buffer,
98 lru);
0591fb56 99 list_del_init(&eb->lru);
4dc11904
CM
100 free_extent_buffer(eb);
101 }
102}
19c00ddc 103EXPORT_SYMBOL(extent_map_tree_empty_lru);
4dc11904 104
a52d9a80
CM
105struct extent_map *alloc_extent_map(gfp_t mask)
106{
107 struct extent_map *em;
108 em = kmem_cache_alloc(extent_map_cache, mask);
109 if (!em || IS_ERR(em))
110 return em;
111 em->in_tree = 0;
112 atomic_set(&em->refs, 1);
113 return em;
114}
115EXPORT_SYMBOL(alloc_extent_map);
116
117void free_extent_map(struct extent_map *em)
118{
2bf5a725
CM
119 if (!em)
120 return;
a52d9a80
CM
121 if (atomic_dec_and_test(&em->refs)) {
122 WARN_ON(em->in_tree);
123 kmem_cache_free(extent_map_cache, em);
124 }
125}
126EXPORT_SYMBOL(free_extent_map);
127
128
129struct extent_state *alloc_extent_state(gfp_t mask)
130{
131 struct extent_state *state;
f510cfec
CM
132 unsigned long flags;
133
a52d9a80
CM
134 state = kmem_cache_alloc(extent_state_cache, mask);
135 if (!state || IS_ERR(state))
136 return state;
137 state->state = 0;
138 state->in_tree = 0;
07157aac 139 state->private = 0;
f510cfec
CM
140
141 spin_lock_irqsave(&state_lock, flags);
142 list_add(&state->list, &states);
143 spin_unlock_irqrestore(&state_lock, flags);
144
a52d9a80
CM
145 atomic_set(&state->refs, 1);
146 init_waitqueue_head(&state->wq);
a52d9a80
CM
147 return state;
148}
149EXPORT_SYMBOL(alloc_extent_state);
150
151void free_extent_state(struct extent_state *state)
152{
f510cfec 153 unsigned long flags;
2bf5a725
CM
154 if (!state)
155 return;
a52d9a80
CM
156 if (atomic_dec_and_test(&state->refs)) {
157 WARN_ON(state->in_tree);
f510cfec
CM
158 spin_lock_irqsave(&state_lock, flags);
159 list_del(&state->list);
160 spin_unlock_irqrestore(&state_lock, flags);
a52d9a80
CM
161 kmem_cache_free(extent_state_cache, state);
162 }
163}
164EXPORT_SYMBOL(free_extent_state);
165
166static struct rb_node *tree_insert(struct rb_root *root, u64 offset,
167 struct rb_node *node)
168{
169 struct rb_node ** p = &root->rb_node;
170 struct rb_node * parent = NULL;
171 struct tree_entry *entry;
172
173 while(*p) {
174 parent = *p;
175 entry = rb_entry(parent, struct tree_entry, rb_node);
176
177 if (offset < entry->start)
178 p = &(*p)->rb_left;
179 else if (offset > entry->end)
180 p = &(*p)->rb_right;
181 else
182 return parent;
183 }
184
185 entry = rb_entry(node, struct tree_entry, rb_node);
186 entry->in_tree = 1;
187 rb_link_node(node, parent, p);
188 rb_insert_color(node, root);
189 return NULL;
190}
191
192static struct rb_node *__tree_search(struct rb_root *root, u64 offset,
193 struct rb_node **prev_ret)
194{
195 struct rb_node * n = root->rb_node;
196 struct rb_node *prev = NULL;
197 struct tree_entry *entry;
198 struct tree_entry *prev_entry = NULL;
199
200 while(n) {
201 entry = rb_entry(n, struct tree_entry, rb_node);
202 prev = n;
203 prev_entry = entry;
204
205 if (offset < entry->start)
206 n = n->rb_left;
207 else if (offset > entry->end)
208 n = n->rb_right;
209 else
210 return n;
211 }
212 if (!prev_ret)
213 return NULL;
214 while(prev && offset > prev_entry->end) {
215 prev = rb_next(prev);
216 prev_entry = rb_entry(prev, struct tree_entry, rb_node);
217 }
218 *prev_ret = prev;
219 return NULL;
220}
221
222static inline struct rb_node *tree_search(struct rb_root *root, u64 offset)
223{
224 struct rb_node *prev;
225 struct rb_node *ret;
226 ret = __tree_search(root, offset, &prev);
227 if (!ret)
228 return prev;
229 return ret;
230}
231
232static int tree_delete(struct rb_root *root, u64 offset)
233{
234 struct rb_node *node;
235 struct tree_entry *entry;
236
237 node = __tree_search(root, offset, NULL);
238 if (!node)
239 return -ENOENT;
240 entry = rb_entry(node, struct tree_entry, rb_node);
241 entry->in_tree = 0;
242 rb_erase(node, root);
243 return 0;
244}
245
246/*
247 * add_extent_mapping tries a simple backward merge with existing
248 * mappings. The extent_map struct passed in will be inserted into
249 * the tree directly (no copies made, just a reference taken).
250 */
251int add_extent_mapping(struct extent_map_tree *tree,
252 struct extent_map *em)
253{
254 int ret = 0;
255 struct extent_map *prev = NULL;
256 struct rb_node *rb;
257
258 write_lock_irq(&tree->lock);
259 rb = tree_insert(&tree->map, em->end, &em->rb_node);
260 if (rb) {
261 prev = rb_entry(rb, struct extent_map, rb_node);
262 printk("found extent map %Lu %Lu on insert of %Lu %Lu\n", prev->start, prev->end, em->start, em->end);
263 ret = -EEXIST;
264 goto out;
265 }
266 atomic_inc(&em->refs);
267 if (em->start != 0) {
268 rb = rb_prev(&em->rb_node);
269 if (rb)
270 prev = rb_entry(rb, struct extent_map, rb_node);
271 if (prev && prev->end + 1 == em->start &&
5f39d397
CM
272 ((em->block_start == EXTENT_MAP_HOLE &&
273 prev->block_start == EXTENT_MAP_HOLE) ||
179e29e4
CM
274 (em->block_start == EXTENT_MAP_INLINE &&
275 prev->block_start == EXTENT_MAP_INLINE) ||
276 (em->block_start == EXTENT_MAP_DELALLOC &&
277 prev->block_start == EXTENT_MAP_DELALLOC) ||
278 (em->block_start < EXTENT_MAP_DELALLOC - 1 &&
279 em->block_start == prev->block_end + 1))) {
a52d9a80
CM
280 em->start = prev->start;
281 em->block_start = prev->block_start;
282 rb_erase(&prev->rb_node, &tree->map);
283 prev->in_tree = 0;
284 free_extent_map(prev);
285 }
286 }
287out:
288 write_unlock_irq(&tree->lock);
289 return ret;
290}
291EXPORT_SYMBOL(add_extent_mapping);
292
293/*
294 * lookup_extent_mapping returns the first extent_map struct in the
295 * tree that intersects the [start, end] (inclusive) range. There may
296 * be additional objects in the tree that intersect, so check the object
297 * returned carefully to make sure you don't need additional lookups.
298 */
299struct extent_map *lookup_extent_mapping(struct extent_map_tree *tree,
300 u64 start, u64 end)
301{
302 struct extent_map *em;
303 struct rb_node *rb_node;
304
305 read_lock_irq(&tree->lock);
306 rb_node = tree_search(&tree->map, start);
307 if (!rb_node) {
308 em = NULL;
309 goto out;
310 }
311 if (IS_ERR(rb_node)) {
312 em = ERR_PTR(PTR_ERR(rb_node));
313 goto out;
314 }
315 em = rb_entry(rb_node, struct extent_map, rb_node);
316 if (em->end < start || em->start > end) {
317 em = NULL;
318 goto out;
319 }
320 atomic_inc(&em->refs);
321out:
322 read_unlock_irq(&tree->lock);
323 return em;
324}
325EXPORT_SYMBOL(lookup_extent_mapping);
326
327/*
328 * removes an extent_map struct from the tree. No reference counts are
329 * dropped, and no checks are done to see if the range is in use
330 */
331int remove_extent_mapping(struct extent_map_tree *tree, struct extent_map *em)
332{
333 int ret;
334
335 write_lock_irq(&tree->lock);
336 ret = tree_delete(&tree->map, em->end);
337 write_unlock_irq(&tree->lock);
338 return ret;
339}
340EXPORT_SYMBOL(remove_extent_mapping);
341
342/*
343 * utility function to look for merge candidates inside a given range.
344 * Any extents with matching state are merged together into a single
345 * extent in the tree. Extents with EXTENT_IO in their state field
346 * are not merged because the end_io handlers need to be able to do
347 * operations on them without sleeping (or doing allocations/splits).
348 *
349 * This should be called with the tree lock held.
350 */
351static int merge_state(struct extent_map_tree *tree,
352 struct extent_state *state)
353{
354 struct extent_state *other;
355 struct rb_node *other_node;
356
357 if (state->state & EXTENT_IOBITS)
358 return 0;
359
360 other_node = rb_prev(&state->rb_node);
361 if (other_node) {
362 other = rb_entry(other_node, struct extent_state, rb_node);
363 if (other->end == state->start - 1 &&
364 other->state == state->state) {
365 state->start = other->start;
366 other->in_tree = 0;
367 rb_erase(&other->rb_node, &tree->state);
368 free_extent_state(other);
369 }
370 }
371 other_node = rb_next(&state->rb_node);
372 if (other_node) {
373 other = rb_entry(other_node, struct extent_state, rb_node);
374 if (other->start == state->end + 1 &&
375 other->state == state->state) {
376 other->start = state->start;
377 state->in_tree = 0;
378 rb_erase(&state->rb_node, &tree->state);
379 free_extent_state(state);
380 }
381 }
382 return 0;
383}
384
385/*
386 * insert an extent_state struct into the tree. 'bits' are set on the
387 * struct before it is inserted.
388 *
389 * This may return -EEXIST if the extent is already there, in which case the
390 * state struct is freed.
391 *
392 * The tree lock is not taken internally. This is a utility function and
393 * probably isn't what you want to call (see set/clear_extent_bit).
394 */
395static int insert_state(struct extent_map_tree *tree,
396 struct extent_state *state, u64 start, u64 end,
397 int bits)
398{
399 struct rb_node *node;
400
401 if (end < start) {
402 printk("end < start %Lu %Lu\n", end, start);
403 WARN_ON(1);
404 }
405 state->state |= bits;
406 state->start = start;
407 state->end = end;
a52d9a80
CM
408 node = tree_insert(&tree->state, end, &state->rb_node);
409 if (node) {
410 struct extent_state *found;
411 found = rb_entry(node, struct extent_state, rb_node);
b888db2b 412 printk("found node %Lu %Lu on insert of %Lu %Lu\n", found->start, found->end, start, end);
a52d9a80
CM
413 free_extent_state(state);
414 return -EEXIST;
415 }
416 merge_state(tree, state);
417 return 0;
418}
419
420/*
421 * split a given extent state struct in two, inserting the preallocated
422 * struct 'prealloc' as the newly created second half. 'split' indicates an
423 * offset inside 'orig' where it should be split.
424 *
425 * Before calling,
426 * the tree has 'orig' at [orig->start, orig->end]. After calling, there
427 * are two extent state structs in the tree:
428 * prealloc: [orig->start, split - 1]
429 * orig: [ split, orig->end ]
430 *
431 * The tree locks are not taken by this function. They need to be held
432 * by the caller.
433 */
434static int split_state(struct extent_map_tree *tree, struct extent_state *orig,
435 struct extent_state *prealloc, u64 split)
436{
437 struct rb_node *node;
438 prealloc->start = orig->start;
439 prealloc->end = split - 1;
440 prealloc->state = orig->state;
441 orig->start = split;
f510cfec 442
a52d9a80
CM
443 node = tree_insert(&tree->state, prealloc->end, &prealloc->rb_node);
444 if (node) {
445 struct extent_state *found;
446 found = rb_entry(node, struct extent_state, rb_node);
b888db2b 447 printk("found node %Lu %Lu on insert of %Lu %Lu\n", found->start, found->end, prealloc->start, prealloc->end);
a52d9a80
CM
448 free_extent_state(prealloc);
449 return -EEXIST;
450 }
451 return 0;
452}
453
454/*
455 * utility function to clear some bits in an extent state struct.
456 * it will optionally wake up any one waiting on this state (wake == 1), or
457 * forcibly remove the state from the tree (delete == 1).
458 *
459 * If no bits are set on the state struct after clearing things, the
460 * struct is freed and removed from the tree
461 */
462static int clear_state_bit(struct extent_map_tree *tree,
463 struct extent_state *state, int bits, int wake,
464 int delete)
465{
466 int ret = state->state & bits;
467 state->state &= ~bits;
468 if (wake)
469 wake_up(&state->wq);
470 if (delete || state->state == 0) {
471 if (state->in_tree) {
472 rb_erase(&state->rb_node, &tree->state);
473 state->in_tree = 0;
474 free_extent_state(state);
475 } else {
476 WARN_ON(1);
477 }
478 } else {
479 merge_state(tree, state);
480 }
481 return ret;
482}
483
484/*
485 * clear some bits on a range in the tree. This may require splitting
486 * or inserting elements in the tree, so the gfp mask is used to
487 * indicate which allocations or sleeping are allowed.
488 *
489 * pass 'wake' == 1 to kick any sleepers, and 'delete' == 1 to remove
490 * the given range from the tree regardless of state (ie for truncate).
491 *
492 * the range [start, end] is inclusive.
493 *
494 * This takes the tree lock, and returns < 0 on error, > 0 if any of the
495 * bits were already set, or zero if none of the bits were already set.
496 */
497int clear_extent_bit(struct extent_map_tree *tree, u64 start, u64 end,
498 int bits, int wake, int delete, gfp_t mask)
499{
500 struct extent_state *state;
501 struct extent_state *prealloc = NULL;
502 struct rb_node *node;
90f1c19a 503 unsigned long flags;
a52d9a80
CM
504 int err;
505 int set = 0;
506
507again:
508 if (!prealloc && (mask & __GFP_WAIT)) {
509 prealloc = alloc_extent_state(mask);
510 if (!prealloc)
511 return -ENOMEM;
512 }
513
90f1c19a 514 write_lock_irqsave(&tree->lock, flags);
a52d9a80
CM
515 /*
516 * this search will find the extents that end after
517 * our range starts
518 */
519 node = tree_search(&tree->state, start);
520 if (!node)
521 goto out;
522 state = rb_entry(node, struct extent_state, rb_node);
523 if (state->start > end)
524 goto out;
525 WARN_ON(state->end < start);
526
527 /*
528 * | ---- desired range ---- |
529 * | state | or
530 * | ------------- state -------------- |
531 *
532 * We need to split the extent we found, and may flip
533 * bits on second half.
534 *
535 * If the extent we found extends past our range, we
536 * just split and search again. It'll get split again
537 * the next time though.
538 *
539 * If the extent we found is inside our range, we clear
540 * the desired bit on it.
541 */
542
543 if (state->start < start) {
544 err = split_state(tree, state, prealloc, start);
545 BUG_ON(err == -EEXIST);
546 prealloc = NULL;
547 if (err)
548 goto out;
549 if (state->end <= end) {
550 start = state->end + 1;
551 set |= clear_state_bit(tree, state, bits,
552 wake, delete);
553 } else {
554 start = state->start;
555 }
556 goto search_again;
557 }
558 /*
559 * | ---- desired range ---- |
560 * | state |
561 * We need to split the extent, and clear the bit
562 * on the first half
563 */
564 if (state->start <= end && state->end > end) {
565 err = split_state(tree, state, prealloc, end + 1);
566 BUG_ON(err == -EEXIST);
567
568 if (wake)
569 wake_up(&state->wq);
570 set |= clear_state_bit(tree, prealloc, bits,
571 wake, delete);
572 prealloc = NULL;
573 goto out;
574 }
575
576 start = state->end + 1;
577 set |= clear_state_bit(tree, state, bits, wake, delete);
578 goto search_again;
579
580out:
90f1c19a 581 write_unlock_irqrestore(&tree->lock, flags);
a52d9a80
CM
582 if (prealloc)
583 free_extent_state(prealloc);
584
585 return set;
586
587search_again:
96b5179d 588 if (start > end)
a52d9a80 589 goto out;
90f1c19a 590 write_unlock_irqrestore(&tree->lock, flags);
a52d9a80
CM
591 if (mask & __GFP_WAIT)
592 cond_resched();
593 goto again;
594}
595EXPORT_SYMBOL(clear_extent_bit);
596
597static int wait_on_state(struct extent_map_tree *tree,
598 struct extent_state *state)
599{
600 DEFINE_WAIT(wait);
601 prepare_to_wait(&state->wq, &wait, TASK_UNINTERRUPTIBLE);
602 read_unlock_irq(&tree->lock);
603 schedule();
604 read_lock_irq(&tree->lock);
605 finish_wait(&state->wq, &wait);
606 return 0;
607}
608
609/*
610 * waits for one or more bits to clear on a range in the state tree.
611 * The range [start, end] is inclusive.
612 * The tree lock is taken by this function
613 */
614int wait_extent_bit(struct extent_map_tree *tree, u64 start, u64 end, int bits)
615{
616 struct extent_state *state;
617 struct rb_node *node;
618
619 read_lock_irq(&tree->lock);
620again:
621 while (1) {
622 /*
623 * this search will find all the extents that end after
624 * our range starts
625 */
626 node = tree_search(&tree->state, start);
627 if (!node)
628 break;
629
630 state = rb_entry(node, struct extent_state, rb_node);
631
632 if (state->start > end)
633 goto out;
634
635 if (state->state & bits) {
636 start = state->start;
637 atomic_inc(&state->refs);
638 wait_on_state(tree, state);
639 free_extent_state(state);
640 goto again;
641 }
642 start = state->end + 1;
643
644 if (start > end)
645 break;
646
647 if (need_resched()) {
648 read_unlock_irq(&tree->lock);
649 cond_resched();
650 read_lock_irq(&tree->lock);
651 }
652 }
653out:
654 read_unlock_irq(&tree->lock);
655 return 0;
656}
657EXPORT_SYMBOL(wait_extent_bit);
658
659/*
660 * set some bits on a range in the tree. This may require allocations
661 * or sleeping, so the gfp mask is used to indicate what is allowed.
662 *
663 * If 'exclusive' == 1, this will fail with -EEXIST if some part of the
664 * range already has the desired bits set. The start of the existing
665 * range is returned in failed_start in this case.
666 *
667 * [start, end] is inclusive
668 * This takes the tree lock.
669 */
670int set_extent_bit(struct extent_map_tree *tree, u64 start, u64 end, int bits,
671 int exclusive, u64 *failed_start, gfp_t mask)
672{
673 struct extent_state *state;
674 struct extent_state *prealloc = NULL;
675 struct rb_node *node;
90f1c19a 676 unsigned long flags;
a52d9a80
CM
677 int err = 0;
678 int set;
679 u64 last_start;
680 u64 last_end;
681again:
682 if (!prealloc && (mask & __GFP_WAIT)) {
683 prealloc = alloc_extent_state(mask);
684 if (!prealloc)
685 return -ENOMEM;
686 }
687
90f1c19a 688 write_lock_irqsave(&tree->lock, flags);
a52d9a80
CM
689 /*
690 * this search will find all the extents that end after
691 * our range starts.
692 */
693 node = tree_search(&tree->state, start);
694 if (!node) {
695 err = insert_state(tree, prealloc, start, end, bits);
696 prealloc = NULL;
697 BUG_ON(err == -EEXIST);
698 goto out;
699 }
700
701 state = rb_entry(node, struct extent_state, rb_node);
702 last_start = state->start;
703 last_end = state->end;
704
705 /*
706 * | ---- desired range ---- |
707 * | state |
708 *
709 * Just lock what we found and keep going
710 */
711 if (state->start == start && state->end <= end) {
712 set = state->state & bits;
713 if (set && exclusive) {
714 *failed_start = state->start;
715 err = -EEXIST;
716 goto out;
717 }
718 state->state |= bits;
719 start = state->end + 1;
720 merge_state(tree, state);
721 goto search_again;
722 }
723
724 /*
725 * | ---- desired range ---- |
726 * | state |
727 * or
728 * | ------------- state -------------- |
729 *
730 * We need to split the extent we found, and may flip bits on
731 * second half.
732 *
733 * If the extent we found extends past our
734 * range, we just split and search again. It'll get split
735 * again the next time though.
736 *
737 * If the extent we found is inside our range, we set the
738 * desired bit on it.
739 */
740 if (state->start < start) {
741 set = state->state & bits;
742 if (exclusive && set) {
743 *failed_start = start;
744 err = -EEXIST;
745 goto out;
746 }
747 err = split_state(tree, state, prealloc, start);
748 BUG_ON(err == -EEXIST);
749 prealloc = NULL;
750 if (err)
751 goto out;
752 if (state->end <= end) {
753 state->state |= bits;
754 start = state->end + 1;
755 merge_state(tree, state);
756 } else {
757 start = state->start;
758 }
759 goto search_again;
760 }
a52d9a80
CM
761 /*
762 * | ---- desired range ---- |
763 * | state | or | state |
764 *
765 * There's a hole, we need to insert something in it and
766 * ignore the extent we found.
767 */
768 if (state->start > start) {
769 u64 this_end;
770 if (end < last_start)
771 this_end = end;
772 else
773 this_end = last_start -1;
774 err = insert_state(tree, prealloc, start, this_end,
775 bits);
776 prealloc = NULL;
777 BUG_ON(err == -EEXIST);
778 if (err)
779 goto out;
780 start = this_end + 1;
781 goto search_again;
782 }
a8c450b2
CM
783 /*
784 * | ---- desired range ---- |
785 * | state |
786 * We need to split the extent, and set the bit
787 * on the first half
788 */
789 if (state->start <= end && state->end > end) {
790 set = state->state & bits;
791 if (exclusive && set) {
792 *failed_start = start;
793 err = -EEXIST;
794 goto out;
795 }
796 err = split_state(tree, state, prealloc, end + 1);
797 BUG_ON(err == -EEXIST);
798
799 prealloc->state |= bits;
800 merge_state(tree, prealloc);
801 prealloc = NULL;
802 goto out;
803 }
804
a52d9a80
CM
805 goto search_again;
806
807out:
90f1c19a 808 write_unlock_irqrestore(&tree->lock, flags);
a52d9a80
CM
809 if (prealloc)
810 free_extent_state(prealloc);
811
812 return err;
813
814search_again:
815 if (start > end)
816 goto out;
90f1c19a 817 write_unlock_irqrestore(&tree->lock, flags);
a52d9a80
CM
818 if (mask & __GFP_WAIT)
819 cond_resched();
820 goto again;
821}
822EXPORT_SYMBOL(set_extent_bit);
823
824/* wrappers around set/clear extent bit */
825int set_extent_dirty(struct extent_map_tree *tree, u64 start, u64 end,
826 gfp_t mask)
827{
828 return set_extent_bit(tree, start, end, EXTENT_DIRTY, 0, NULL,
829 mask);
830}
831EXPORT_SYMBOL(set_extent_dirty);
832
96b5179d
CM
833int set_extent_bits(struct extent_map_tree *tree, u64 start, u64 end,
834 int bits, gfp_t mask)
835{
836 return set_extent_bit(tree, start, end, bits, 0, NULL,
837 mask);
838}
839EXPORT_SYMBOL(set_extent_bits);
840
841int clear_extent_bits(struct extent_map_tree *tree, u64 start, u64 end,
842 int bits, gfp_t mask)
843{
844 return clear_extent_bit(tree, start, end, bits, 0, 0, mask);
845}
846EXPORT_SYMBOL(clear_extent_bits);
847
b888db2b
CM
848int set_extent_delalloc(struct extent_map_tree *tree, u64 start, u64 end,
849 gfp_t mask)
850{
851 return set_extent_bit(tree, start, end,
852 EXTENT_DELALLOC | EXTENT_DIRTY, 0, NULL,
853 mask);
854}
855EXPORT_SYMBOL(set_extent_delalloc);
856
a52d9a80
CM
857int clear_extent_dirty(struct extent_map_tree *tree, u64 start, u64 end,
858 gfp_t mask)
859{
b888db2b
CM
860 return clear_extent_bit(tree, start, end,
861 EXTENT_DIRTY | EXTENT_DELALLOC, 0, 0, mask);
a52d9a80
CM
862}
863EXPORT_SYMBOL(clear_extent_dirty);
864
865int set_extent_new(struct extent_map_tree *tree, u64 start, u64 end,
866 gfp_t mask)
867{
868 return set_extent_bit(tree, start, end, EXTENT_NEW, 0, NULL,
869 mask);
870}
871EXPORT_SYMBOL(set_extent_new);
872
873int clear_extent_new(struct extent_map_tree *tree, u64 start, u64 end,
874 gfp_t mask)
875{
876 return clear_extent_bit(tree, start, end, EXTENT_NEW, 0, 0, mask);
877}
878EXPORT_SYMBOL(clear_extent_new);
879
880int set_extent_uptodate(struct extent_map_tree *tree, u64 start, u64 end,
881 gfp_t mask)
882{
883 return set_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, NULL,
884 mask);
885}
886EXPORT_SYMBOL(set_extent_uptodate);
887
888int clear_extent_uptodate(struct extent_map_tree *tree, u64 start, u64 end,
889 gfp_t mask)
890{
891 return clear_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, 0, mask);
892}
893EXPORT_SYMBOL(clear_extent_uptodate);
894
895int set_extent_writeback(struct extent_map_tree *tree, u64 start, u64 end,
896 gfp_t mask)
897{
898 return set_extent_bit(tree, start, end, EXTENT_WRITEBACK,
899 0, NULL, mask);
900}
901EXPORT_SYMBOL(set_extent_writeback);
902
903int clear_extent_writeback(struct extent_map_tree *tree, u64 start, u64 end,
904 gfp_t mask)
905{
906 return clear_extent_bit(tree, start, end, EXTENT_WRITEBACK, 1, 0, mask);
907}
908EXPORT_SYMBOL(clear_extent_writeback);
909
910int wait_on_extent_writeback(struct extent_map_tree *tree, u64 start, u64 end)
911{
912 return wait_extent_bit(tree, start, end, EXTENT_WRITEBACK);
913}
914EXPORT_SYMBOL(wait_on_extent_writeback);
915
916/*
917 * locks a range in ascending order, waiting for any locked regions
918 * it hits on the way. [start,end] are inclusive, and this will sleep.
919 */
920int lock_extent(struct extent_map_tree *tree, u64 start, u64 end, gfp_t mask)
921{
922 int err;
923 u64 failed_start;
924 while (1) {
925 err = set_extent_bit(tree, start, end, EXTENT_LOCKED, 1,
926 &failed_start, mask);
927 if (err == -EEXIST && (mask & __GFP_WAIT)) {
928 wait_extent_bit(tree, failed_start, end, EXTENT_LOCKED);
929 start = failed_start;
930 } else {
931 break;
932 }
933 WARN_ON(start > end);
934 }
935 return err;
936}
937EXPORT_SYMBOL(lock_extent);
938
939int unlock_extent(struct extent_map_tree *tree, u64 start, u64 end,
940 gfp_t mask)
941{
942 return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, mask);
943}
944EXPORT_SYMBOL(unlock_extent);
945
946/*
947 * helper function to set pages and extents in the tree dirty
948 */
949int set_range_dirty(struct extent_map_tree *tree, u64 start, u64 end)
950{
951 unsigned long index = start >> PAGE_CACHE_SHIFT;
952 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
953 struct page *page;
954
955 while (index <= end_index) {
956 page = find_get_page(tree->mapping, index);
957 BUG_ON(!page);
958 __set_page_dirty_nobuffers(page);
959 page_cache_release(page);
960 index++;
961 }
962 set_extent_dirty(tree, start, end, GFP_NOFS);
963 return 0;
964}
965EXPORT_SYMBOL(set_range_dirty);
966
967/*
968 * helper function to set both pages and extents in the tree writeback
969 */
970int set_range_writeback(struct extent_map_tree *tree, u64 start, u64 end)
971{
972 unsigned long index = start >> PAGE_CACHE_SHIFT;
973 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
974 struct page *page;
975
976 while (index <= end_index) {
977 page = find_get_page(tree->mapping, index);
978 BUG_ON(!page);
979 set_page_writeback(page);
980 page_cache_release(page);
981 index++;
982 }
983 set_extent_writeback(tree, start, end, GFP_NOFS);
984 return 0;
985}
986EXPORT_SYMBOL(set_range_writeback);
987
5f39d397
CM
988int find_first_extent_bit(struct extent_map_tree *tree, u64 start,
989 u64 *start_ret, u64 *end_ret, int bits)
990{
991 struct rb_node *node;
992 struct extent_state *state;
993 int ret = 1;
994
e19caa5f 995 read_lock_irq(&tree->lock);
5f39d397
CM
996 /*
997 * this search will find all the extents that end after
998 * our range starts.
999 */
1000 node = tree_search(&tree->state, start);
1001 if (!node || IS_ERR(node)) {
1002 goto out;
1003 }
1004
1005 while(1) {
1006 state = rb_entry(node, struct extent_state, rb_node);
e19caa5f 1007 if (state->end >= start && (state->state & bits)) {
5f39d397
CM
1008 *start_ret = state->start;
1009 *end_ret = state->end;
1010 ret = 0;
f510cfec 1011 break;
5f39d397
CM
1012 }
1013 node = rb_next(node);
1014 if (!node)
1015 break;
1016 }
1017out:
e19caa5f 1018 read_unlock_irq(&tree->lock);
5f39d397
CM
1019 return ret;
1020}
1021EXPORT_SYMBOL(find_first_extent_bit);
1022
b888db2b
CM
1023u64 find_lock_delalloc_range(struct extent_map_tree *tree,
1024 u64 start, u64 lock_start, u64 *end, u64 max_bytes)
1025{
1026 struct rb_node *node;
1027 struct extent_state *state;
1028 u64 cur_start = start;
1029 u64 found = 0;
1030 u64 total_bytes = 0;
1031
1032 write_lock_irq(&tree->lock);
1033 /*
1034 * this search will find all the extents that end after
1035 * our range starts.
1036 */
1037search_again:
1038 node = tree_search(&tree->state, cur_start);
1039 if (!node || IS_ERR(node)) {
1040 goto out;
1041 }
1042
1043 while(1) {
1044 state = rb_entry(node, struct extent_state, rb_node);
1045 if (state->start != cur_start) {
1046 goto out;
1047 }
1048 if (!(state->state & EXTENT_DELALLOC)) {
1049 goto out;
1050 }
1051 if (state->start >= lock_start) {
1052 if (state->state & EXTENT_LOCKED) {
1053 DEFINE_WAIT(wait);
1054 atomic_inc(&state->refs);
944746ec
Y
1055 prepare_to_wait(&state->wq, &wait,
1056 TASK_UNINTERRUPTIBLE);
b888db2b
CM
1057 write_unlock_irq(&tree->lock);
1058 schedule();
1059 write_lock_irq(&tree->lock);
1060 finish_wait(&state->wq, &wait);
1061 free_extent_state(state);
1062 goto search_again;
1063 }
1064 state->state |= EXTENT_LOCKED;
1065 }
1066 found++;
1067 *end = state->end;
1068 cur_start = state->end + 1;
1069 node = rb_next(node);
1070 if (!node)
1071 break;
944746ec 1072 total_bytes += state->end - state->start + 1;
b888db2b
CM
1073 if (total_bytes >= max_bytes)
1074 break;
1075 }
1076out:
1077 write_unlock_irq(&tree->lock);
1078 return found;
1079}
1080
a52d9a80
CM
1081/*
1082 * helper function to lock both pages and extents in the tree.
1083 * pages must be locked first.
1084 */
1085int lock_range(struct extent_map_tree *tree, u64 start, u64 end)
1086{
1087 unsigned long index = start >> PAGE_CACHE_SHIFT;
1088 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1089 struct page *page;
1090 int err;
1091
1092 while (index <= end_index) {
1093 page = grab_cache_page(tree->mapping, index);
1094 if (!page) {
1095 err = -ENOMEM;
1096 goto failed;
1097 }
1098 if (IS_ERR(page)) {
1099 err = PTR_ERR(page);
1100 goto failed;
1101 }
1102 index++;
1103 }
1104 lock_extent(tree, start, end, GFP_NOFS);
1105 return 0;
1106
1107failed:
1108 /*
1109 * we failed above in getting the page at 'index', so we undo here
1110 * up to but not including the page at 'index'
1111 */
1112 end_index = index;
1113 index = start >> PAGE_CACHE_SHIFT;
1114 while (index < end_index) {
1115 page = find_get_page(tree->mapping, index);
1116 unlock_page(page);
1117 page_cache_release(page);
1118 index++;
1119 }
1120 return err;
1121}
1122EXPORT_SYMBOL(lock_range);
1123
1124/*
1125 * helper function to unlock both pages and extents in the tree.
1126 */
1127int unlock_range(struct extent_map_tree *tree, u64 start, u64 end)
1128{
1129 unsigned long index = start >> PAGE_CACHE_SHIFT;
1130 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1131 struct page *page;
1132
1133 while (index <= end_index) {
1134 page = find_get_page(tree->mapping, index);
1135 unlock_page(page);
1136 page_cache_release(page);
1137 index++;
1138 }
1139 unlock_extent(tree, start, end, GFP_NOFS);
1140 return 0;
1141}
1142EXPORT_SYMBOL(unlock_range);
1143
07157aac
CM
1144int set_state_private(struct extent_map_tree *tree, u64 start, u64 private)
1145{
1146 struct rb_node *node;
1147 struct extent_state *state;
1148 int ret = 0;
1149
1150 write_lock_irq(&tree->lock);
1151 /*
1152 * this search will find all the extents that end after
1153 * our range starts.
1154 */
1155 node = tree_search(&tree->state, start);
1156 if (!node || IS_ERR(node)) {
1157 ret = -ENOENT;
1158 goto out;
1159 }
1160 state = rb_entry(node, struct extent_state, rb_node);
1161 if (state->start != start) {
1162 ret = -ENOENT;
1163 goto out;
1164 }
1165 state->private = private;
1166out:
1167 write_unlock_irq(&tree->lock);
1168 return ret;
07157aac
CM
1169}
1170
1171int get_state_private(struct extent_map_tree *tree, u64 start, u64 *private)
1172{
1173 struct rb_node *node;
1174 struct extent_state *state;
1175 int ret = 0;
1176
1177 read_lock_irq(&tree->lock);
1178 /*
1179 * this search will find all the extents that end after
1180 * our range starts.
1181 */
1182 node = tree_search(&tree->state, start);
1183 if (!node || IS_ERR(node)) {
1184 ret = -ENOENT;
1185 goto out;
1186 }
1187 state = rb_entry(node, struct extent_state, rb_node);
1188 if (state->start != start) {
1189 ret = -ENOENT;
1190 goto out;
1191 }
1192 *private = state->private;
1193out:
1194 read_unlock_irq(&tree->lock);
1195 return ret;
1196}
1197
a52d9a80
CM
1198/*
1199 * searches a range in the state tree for a given mask.
1200 * If 'filled' == 1, this returns 1 only if ever extent in the tree
1201 * has the bits set. Otherwise, 1 is returned if any bit in the
1202 * range is found set.
1203 */
1a5bc167
CM
1204int test_range_bit(struct extent_map_tree *tree, u64 start, u64 end,
1205 int bits, int filled)
a52d9a80
CM
1206{
1207 struct extent_state *state = NULL;
1208 struct rb_node *node;
1209 int bitset = 0;
1210
1211 read_lock_irq(&tree->lock);
1212 node = tree_search(&tree->state, start);
1213 while (node && start <= end) {
1214 state = rb_entry(node, struct extent_state, rb_node);
a52d9a80
CM
1215
1216 if (filled && state->start > start) {
1217 bitset = 0;
1218 break;
1219 }
0591fb56
CM
1220
1221 if (state->start > end)
1222 break;
1223
a52d9a80
CM
1224 if (state->state & bits) {
1225 bitset = 1;
1226 if (!filled)
1227 break;
1228 } else if (filled) {
1229 bitset = 0;
1230 break;
1231 }
1232 start = state->end + 1;
1233 if (start > end)
1234 break;
1235 node = rb_next(node);
1236 }
1237 read_unlock_irq(&tree->lock);
1238 return bitset;
1239}
1a5bc167 1240EXPORT_SYMBOL(test_range_bit);
a52d9a80
CM
1241
1242/*
1243 * helper function to set a given page up to date if all the
1244 * extents in the tree for that page are up to date
1245 */
1246static int check_page_uptodate(struct extent_map_tree *tree,
1247 struct page *page)
1248{
35ebb934 1249 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
a52d9a80
CM
1250 u64 end = start + PAGE_CACHE_SIZE - 1;
1251 if (test_range_bit(tree, start, end, EXTENT_UPTODATE, 1))
1252 SetPageUptodate(page);
1253 return 0;
1254}
1255
1256/*
1257 * helper function to unlock a page if all the extents in the tree
1258 * for that page are unlocked
1259 */
1260static int check_page_locked(struct extent_map_tree *tree,
1261 struct page *page)
1262{
35ebb934 1263 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
a52d9a80
CM
1264 u64 end = start + PAGE_CACHE_SIZE - 1;
1265 if (!test_range_bit(tree, start, end, EXTENT_LOCKED, 0))
1266 unlock_page(page);
1267 return 0;
1268}
1269
1270/*
1271 * helper function to end page writeback if all the extents
1272 * in the tree for that page are done with writeback
1273 */
1274static int check_page_writeback(struct extent_map_tree *tree,
1275 struct page *page)
1276{
35ebb934 1277 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
a52d9a80
CM
1278 u64 end = start + PAGE_CACHE_SIZE - 1;
1279 if (!test_range_bit(tree, start, end, EXTENT_WRITEBACK, 0))
1280 end_page_writeback(page);
1281 return 0;
1282}
1283
1284/* lots and lots of room for performance fixes in the end_bio funcs */
1285
1286/*
1287 * after a writepage IO is done, we need to:
1288 * clear the uptodate bits on error
1289 * clear the writeback bits in the extent tree for this IO
1290 * end_page_writeback if the page has no more pending IO
1291 *
1292 * Scheduling is not allowed, so the extent state tree is expected
1293 * to have one and only one object corresponding to this IO.
1294 */
0a2118df
JA
1295#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
1296static void end_bio_extent_writepage(struct bio *bio, int err)
1297#else
a52d9a80
CM
1298static int end_bio_extent_writepage(struct bio *bio,
1299 unsigned int bytes_done, int err)
0a2118df 1300#endif
a52d9a80
CM
1301{
1302 const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
1303 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1304 struct extent_map_tree *tree = bio->bi_private;
1305 u64 start;
1306 u64 end;
1307 int whole_page;
1308
0a2118df 1309#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
a52d9a80
CM
1310 if (bio->bi_size)
1311 return 1;
0a2118df 1312#endif
a52d9a80
CM
1313
1314 do {
1315 struct page *page = bvec->bv_page;
35ebb934
CM
1316 start = ((u64)page->index << PAGE_CACHE_SHIFT) +
1317 bvec->bv_offset;
a52d9a80
CM
1318 end = start + bvec->bv_len - 1;
1319
1320 if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE)
1321 whole_page = 1;
1322 else
1323 whole_page = 0;
1324
1325 if (--bvec >= bio->bi_io_vec)
1326 prefetchw(&bvec->bv_page->flags);
1327
1328 if (!uptodate) {
1329 clear_extent_uptodate(tree, start, end, GFP_ATOMIC);
1330 ClearPageUptodate(page);
1331 SetPageError(page);
1332 }
1333 clear_extent_writeback(tree, start, end, GFP_ATOMIC);
1334
1335 if (whole_page)
1336 end_page_writeback(page);
1337 else
1338 check_page_writeback(tree, page);
0e2752a7
CH
1339 if (tree->ops && tree->ops->writepage_end_io_hook)
1340 tree->ops->writepage_end_io_hook(page, start, end);
a52d9a80
CM
1341 } while (bvec >= bio->bi_io_vec);
1342
1343 bio_put(bio);
0a2118df 1344#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
a52d9a80 1345 return 0;
0a2118df 1346#endif
a52d9a80
CM
1347}
1348
1349/*
1350 * after a readpage IO is done, we need to:
1351 * clear the uptodate bits on error
1352 * set the uptodate bits if things worked
1353 * set the page up to date if all extents in the tree are uptodate
1354 * clear the lock bit in the extent tree
1355 * unlock the page if there are no other extents locked for it
1356 *
1357 * Scheduling is not allowed, so the extent state tree is expected
1358 * to have one and only one object corresponding to this IO.
1359 */
0a2118df
JA
1360#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
1361static void end_bio_extent_readpage(struct bio *bio, int err)
1362#else
a52d9a80
CM
1363static int end_bio_extent_readpage(struct bio *bio,
1364 unsigned int bytes_done, int err)
0a2118df 1365#endif
a52d9a80 1366{
07157aac 1367 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
a52d9a80
CM
1368 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1369 struct extent_map_tree *tree = bio->bi_private;
1370 u64 start;
1371 u64 end;
1372 int whole_page;
07157aac 1373 int ret;
a52d9a80 1374
0a2118df 1375#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
a52d9a80
CM
1376 if (bio->bi_size)
1377 return 1;
0a2118df 1378#endif
a52d9a80
CM
1379
1380 do {
1381 struct page *page = bvec->bv_page;
35ebb934
CM
1382 start = ((u64)page->index << PAGE_CACHE_SHIFT) +
1383 bvec->bv_offset;
a52d9a80
CM
1384 end = start + bvec->bv_len - 1;
1385
1386 if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE)
1387 whole_page = 1;
1388 else
1389 whole_page = 0;
1390
1391 if (--bvec >= bio->bi_io_vec)
1392 prefetchw(&bvec->bv_page->flags);
1393
07157aac
CM
1394 if (uptodate && tree->ops && tree->ops->readpage_end_io_hook) {
1395 ret = tree->ops->readpage_end_io_hook(page, start, end);
1396 if (ret)
1397 uptodate = 0;
1398 }
a52d9a80
CM
1399 if (uptodate) {
1400 set_extent_uptodate(tree, start, end, GFP_ATOMIC);
1401 if (whole_page)
1402 SetPageUptodate(page);
1403 else
1404 check_page_uptodate(tree, page);
1405 } else {
1406 ClearPageUptodate(page);
1407 SetPageError(page);
1408 }
1409
1410 unlock_extent(tree, start, end, GFP_ATOMIC);
1411
1412 if (whole_page)
1413 unlock_page(page);
1414 else
1415 check_page_locked(tree, page);
1416 } while (bvec >= bio->bi_io_vec);
1417
1418 bio_put(bio);
0a2118df 1419#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
a52d9a80 1420 return 0;
0a2118df 1421#endif
a52d9a80
CM
1422}
1423
1424/*
1425 * IO done from prepare_write is pretty simple, we just unlock
1426 * the structs in the extent tree when done, and set the uptodate bits
1427 * as appropriate.
1428 */
0a2118df
JA
1429#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
1430static void end_bio_extent_preparewrite(struct bio *bio, int err)
1431#else
a52d9a80
CM
1432static int end_bio_extent_preparewrite(struct bio *bio,
1433 unsigned int bytes_done, int err)
0a2118df 1434#endif
a52d9a80
CM
1435{
1436 const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
1437 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1438 struct extent_map_tree *tree = bio->bi_private;
1439 u64 start;
1440 u64 end;
1441
0a2118df 1442#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
a52d9a80
CM
1443 if (bio->bi_size)
1444 return 1;
0a2118df 1445#endif
a52d9a80
CM
1446
1447 do {
1448 struct page *page = bvec->bv_page;
35ebb934
CM
1449 start = ((u64)page->index << PAGE_CACHE_SHIFT) +
1450 bvec->bv_offset;
a52d9a80
CM
1451 end = start + bvec->bv_len - 1;
1452
1453 if (--bvec >= bio->bi_io_vec)
1454 prefetchw(&bvec->bv_page->flags);
1455
1456 if (uptodate) {
1457 set_extent_uptodate(tree, start, end, GFP_ATOMIC);
1458 } else {
1459 ClearPageUptodate(page);
1460 SetPageError(page);
1461 }
1462
1463 unlock_extent(tree, start, end, GFP_ATOMIC);
1464
1465 } while (bvec >= bio->bi_io_vec);
1466
1467 bio_put(bio);
0a2118df 1468#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
a52d9a80 1469 return 0;
0a2118df 1470#endif
a52d9a80
CM
1471}
1472
b293f02e
CM
1473static struct bio *
1474extent_bio_alloc(struct block_device *bdev, u64 first_sector, int nr_vecs,
1475 gfp_t gfp_flags)
a52d9a80
CM
1476{
1477 struct bio *bio;
a52d9a80 1478
b293f02e 1479 bio = bio_alloc(gfp_flags, nr_vecs);
a52d9a80 1480
b293f02e
CM
1481 if (bio == NULL && (current->flags & PF_MEMALLOC)) {
1482 while (!bio && (nr_vecs /= 2))
1483 bio = bio_alloc(gfp_flags, nr_vecs);
1484 }
a52d9a80 1485
b293f02e
CM
1486 if (bio) {
1487 bio->bi_bdev = bdev;
1488 bio->bi_sector = first_sector;
1489 }
1490 return bio;
1491}
a52d9a80 1492
b293f02e
CM
1493static int submit_one_bio(int rw, struct bio *bio)
1494{
1495 int ret = 0;
a52d9a80
CM
1496 bio_get(bio);
1497 submit_bio(rw, bio);
a52d9a80
CM
1498 if (bio_flagged(bio, BIO_EOPNOTSUPP))
1499 ret = -EOPNOTSUPP;
a52d9a80
CM
1500 bio_put(bio);
1501 return ret;
1502}
1503
b293f02e
CM
1504static int submit_extent_page(int rw, struct extent_map_tree *tree,
1505 struct page *page, sector_t sector,
1506 size_t size, unsigned long offset,
1507 struct block_device *bdev,
1508 struct bio **bio_ret,
3ab2fb5a 1509 unsigned long max_pages,
b293f02e
CM
1510 bio_end_io_t end_io_func)
1511{
1512 int ret = 0;
1513 struct bio *bio;
1514 int nr;
1515
1516 if (bio_ret && *bio_ret) {
1517 bio = *bio_ret;
1518 if (bio->bi_sector + (bio->bi_size >> 9) != sector ||
1519 bio_add_page(bio, page, size, offset) < size) {
1520 ret = submit_one_bio(rw, bio);
1521 bio = NULL;
1522 } else {
1523 return 0;
1524 }
1525 }
3ab2fb5a 1526 nr = min_t(int, max_pages, bio_get_nr_vecs(bdev));
b293f02e
CM
1527 bio = extent_bio_alloc(bdev, sector, nr, GFP_NOFS | __GFP_HIGH);
1528 if (!bio) {
1529 printk("failed to allocate bio nr %d\n", nr);
1530 }
1531 bio_add_page(bio, page, size, offset);
1532 bio->bi_end_io = end_io_func;
1533 bio->bi_private = tree;
1534 if (bio_ret) {
1535 *bio_ret = bio;
1536 } else {
1537 ret = submit_one_bio(rw, bio);
1538 }
1539
1540 return ret;
1541}
1542
b3cfa35a
CH
1543void set_page_extent_mapped(struct page *page)
1544{
1545 if (!PagePrivate(page)) {
1546 SetPagePrivate(page);
1547 WARN_ON(!page->mapping->a_ops->invalidatepage);
19c00ddc 1548 set_page_private(page, EXTENT_PAGE_PRIVATE);
b3cfa35a
CH
1549 page_cache_get(page);
1550 }
1551}
1552
a52d9a80
CM
1553/*
1554 * basic readpage implementation. Locked extent state structs are inserted
1555 * into the tree that are removed when the IO is done (by the end_io
1556 * handlers)
1557 */
3ab2fb5a
CM
1558static int __extent_read_full_page(struct extent_map_tree *tree,
1559 struct page *page,
1560 get_extent_t *get_extent,
1561 struct bio **bio)
a52d9a80
CM
1562{
1563 struct inode *inode = page->mapping->host;
35ebb934 1564 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
a52d9a80
CM
1565 u64 page_end = start + PAGE_CACHE_SIZE - 1;
1566 u64 end;
1567 u64 cur = start;
1568 u64 extent_offset;
1569 u64 last_byte = i_size_read(inode);
1570 u64 block_start;
1571 u64 cur_end;
1572 sector_t sector;
1573 struct extent_map *em;
1574 struct block_device *bdev;
1575 int ret;
1576 int nr = 0;
1577 size_t page_offset = 0;
1578 size_t iosize;
1579 size_t blocksize = inode->i_sb->s_blocksize;
1580
b3cfa35a 1581 set_page_extent_mapped(page);
a52d9a80
CM
1582
1583 end = page_end;
1584 lock_extent(tree, start, end, GFP_NOFS);
1585
1586 while (cur <= end) {
1587 if (cur >= last_byte) {
1588 iosize = PAGE_CACHE_SIZE - page_offset;
1589 zero_user_page(page, page_offset, iosize, KM_USER0);
1590 set_extent_uptodate(tree, cur, cur + iosize - 1,
1591 GFP_NOFS);
1592 unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
1593 break;
1594 }
1595 em = get_extent(inode, page, page_offset, cur, end, 0);
1596 if (IS_ERR(em) || !em) {
1597 SetPageError(page);
1598 unlock_extent(tree, cur, end, GFP_NOFS);
1599 break;
1600 }
1601
1602 extent_offset = cur - em->start;
1603 BUG_ON(em->end < cur);
1604 BUG_ON(end < cur);
1605
1606 iosize = min(em->end - cur, end - cur) + 1;
1607 cur_end = min(em->end, end);
1608 iosize = (iosize + blocksize - 1) & ~((u64)blocksize - 1);
1609 sector = (em->block_start + extent_offset) >> 9;
1610 bdev = em->bdev;
1611 block_start = em->block_start;
1612 free_extent_map(em);
1613 em = NULL;
1614
1615 /* we've found a hole, just zero and go on */
5f39d397 1616 if (block_start == EXTENT_MAP_HOLE) {
a52d9a80
CM
1617 zero_user_page(page, page_offset, iosize, KM_USER0);
1618 set_extent_uptodate(tree, cur, cur + iosize - 1,
1619 GFP_NOFS);
1620 unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
1621 cur = cur + iosize;
1622 page_offset += iosize;
1623 continue;
1624 }
1625 /* the get_extent function already copied into the page */
1626 if (test_range_bit(tree, cur, cur_end, EXTENT_UPTODATE, 1)) {
1627 unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
1628 cur = cur + iosize;
1629 page_offset += iosize;
1630 continue;
1631 }
1632
07157aac
CM
1633 ret = 0;
1634 if (tree->ops && tree->ops->readpage_io_hook) {
1635 ret = tree->ops->readpage_io_hook(page, cur,
1636 cur + iosize - 1);
1637 }
1638 if (!ret) {
3ab2fb5a
CM
1639 unsigned long nr = (last_byte >> PAGE_CACHE_SHIFT) + 1;
1640 nr -= page->index;
07157aac 1641 ret = submit_extent_page(READ, tree, page,
3ab2fb5a
CM
1642 sector, iosize, page_offset,
1643 bdev, bio, nr,
1644 end_bio_extent_readpage);
07157aac 1645 }
a52d9a80
CM
1646 if (ret)
1647 SetPageError(page);
1648 cur = cur + iosize;
1649 page_offset += iosize;
1650 nr++;
1651 }
1652 if (!nr) {
1653 if (!PageError(page))
1654 SetPageUptodate(page);
1655 unlock_page(page);
1656 }
1657 return 0;
1658}
3ab2fb5a
CM
1659
1660int extent_read_full_page(struct extent_map_tree *tree, struct page *page,
1661 get_extent_t *get_extent)
1662{
1663 struct bio *bio = NULL;
1664 int ret;
1665
1666 ret = __extent_read_full_page(tree, page, get_extent, &bio);
1667 if (bio)
1668 submit_one_bio(READ, bio);
1669 return ret;
1670}
a52d9a80
CM
1671EXPORT_SYMBOL(extent_read_full_page);
1672
1673/*
1674 * the writepage semantics are similar to regular writepage. extent
1675 * records are inserted to lock ranges in the tree, and as dirty areas
1676 * are found, they are marked writeback. Then the lock bits are removed
1677 * and the end_io handler clears the writeback ranges
1678 */
b293f02e
CM
1679static int __extent_writepage(struct page *page, struct writeback_control *wbc,
1680 void *data)
a52d9a80
CM
1681{
1682 struct inode *inode = page->mapping->host;
b293f02e
CM
1683 struct extent_page_data *epd = data;
1684 struct extent_map_tree *tree = epd->tree;
35ebb934 1685 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
a52d9a80
CM
1686 u64 page_end = start + PAGE_CACHE_SIZE - 1;
1687 u64 end;
1688 u64 cur = start;
1689 u64 extent_offset;
1690 u64 last_byte = i_size_read(inode);
1691 u64 block_start;
179e29e4 1692 u64 iosize;
a52d9a80
CM
1693 sector_t sector;
1694 struct extent_map *em;
1695 struct block_device *bdev;
1696 int ret;
1697 int nr = 0;
1698 size_t page_offset = 0;
a52d9a80
CM
1699 size_t blocksize;
1700 loff_t i_size = i_size_read(inode);
1701 unsigned long end_index = i_size >> PAGE_CACHE_SHIFT;
b888db2b
CM
1702 u64 nr_delalloc;
1703 u64 delalloc_end;
a52d9a80 1704
b888db2b 1705 WARN_ON(!PageLocked(page));
a52d9a80
CM
1706 if (page->index > end_index) {
1707 clear_extent_dirty(tree, start, page_end, GFP_NOFS);
1708 unlock_page(page);
1709 return 0;
1710 }
1711
1712 if (page->index == end_index) {
1713 size_t offset = i_size & (PAGE_CACHE_SIZE - 1);
1714 zero_user_page(page, offset,
1715 PAGE_CACHE_SIZE - offset, KM_USER0);
1716 }
1717
b3cfa35a 1718 set_page_extent_mapped(page);
a52d9a80 1719
a52d9a80 1720 lock_extent(tree, start, page_end, GFP_NOFS);
b888db2b
CM
1721 nr_delalloc = find_lock_delalloc_range(tree, start, page_end + 1,
1722 &delalloc_end,
1723 128 * 1024 * 1024);
1724 if (nr_delalloc) {
07157aac 1725 tree->ops->fill_delalloc(inode, start, delalloc_end);
b888db2b
CM
1726 if (delalloc_end >= page_end + 1) {
1727 clear_extent_bit(tree, page_end + 1, delalloc_end,
1728 EXTENT_LOCKED | EXTENT_DELALLOC,
1729 1, 0, GFP_NOFS);
1730 }
1731 clear_extent_bit(tree, start, page_end, EXTENT_DELALLOC,
1732 0, 0, GFP_NOFS);
1733 if (test_range_bit(tree, start, page_end, EXTENT_DELALLOC, 0)) {
1734 printk("found delalloc bits after clear extent_bit\n");
1735 }
1736 } else if (test_range_bit(tree, start, page_end, EXTENT_DELALLOC, 0)) {
1737 printk("found delalloc bits after find_delalloc_range returns 0\n");
1738 }
1739
1740 end = page_end;
1741 if (test_range_bit(tree, start, page_end, EXTENT_DELALLOC, 0)) {
1742 printk("found delalloc bits after lock_extent\n");
1743 }
a52d9a80
CM
1744
1745 if (last_byte <= start) {
1746 clear_extent_dirty(tree, start, page_end, GFP_NOFS);
1747 goto done;
1748 }
1749
1750 set_extent_uptodate(tree, start, page_end, GFP_NOFS);
1751 blocksize = inode->i_sb->s_blocksize;
1752
1753 while (cur <= end) {
1754 if (cur >= last_byte) {
1755 clear_extent_dirty(tree, cur, page_end, GFP_NOFS);
1756 break;
1757 }
b293f02e 1758 em = epd->get_extent(inode, page, page_offset, cur, end, 1);
a52d9a80
CM
1759 if (IS_ERR(em) || !em) {
1760 SetPageError(page);
1761 break;
1762 }
1763
1764 extent_offset = cur - em->start;
1765 BUG_ON(em->end < cur);
1766 BUG_ON(end < cur);
1767 iosize = min(em->end - cur, end - cur) + 1;
1768 iosize = (iosize + blocksize - 1) & ~((u64)blocksize - 1);
1769 sector = (em->block_start + extent_offset) >> 9;
1770 bdev = em->bdev;
1771 block_start = em->block_start;
1772 free_extent_map(em);
1773 em = NULL;
1774
5f39d397
CM
1775 if (block_start == EXTENT_MAP_HOLE ||
1776 block_start == EXTENT_MAP_INLINE) {
a52d9a80
CM
1777 clear_extent_dirty(tree, cur,
1778 cur + iosize - 1, GFP_NOFS);
1779 cur = cur + iosize;
1780 page_offset += iosize;
1781 continue;
1782 }
1783
1784 /* leave this out until we have a page_mkwrite call */
1785 if (0 && !test_range_bit(tree, cur, cur + iosize - 1,
1786 EXTENT_DIRTY, 0)) {
1787 cur = cur + iosize;
1788 page_offset += iosize;
1789 continue;
1790 }
1791 clear_extent_dirty(tree, cur, cur + iosize - 1, GFP_NOFS);
b06355f0
CH
1792 if (tree->ops && tree->ops->writepage_io_hook) {
1793 ret = tree->ops->writepage_io_hook(page, cur,
1794 cur + iosize - 1);
1795 } else {
1796 ret = 0;
1797 }
a52d9a80
CM
1798 if (ret)
1799 SetPageError(page);
07157aac 1800 else {
b293f02e 1801 unsigned long nr = end_index + 1;
07157aac 1802 set_range_writeback(tree, cur, cur + iosize - 1);
b293f02e 1803
07157aac
CM
1804 ret = submit_extent_page(WRITE, tree, page, sector,
1805 iosize, page_offset, bdev,
b293f02e 1806 &epd->bio, nr,
07157aac
CM
1807 end_bio_extent_writepage);
1808 if (ret)
1809 SetPageError(page);
1810 }
a52d9a80
CM
1811 cur = cur + iosize;
1812 page_offset += iosize;
1813 nr++;
1814 }
1815done:
a52d9a80
CM
1816 unlock_extent(tree, start, page_end, GFP_NOFS);
1817 unlock_page(page);
1818 return 0;
1819}
b293f02e
CM
1820
1821int extent_write_full_page(struct extent_map_tree *tree, struct page *page,
1822 get_extent_t *get_extent,
1823 struct writeback_control *wbc)
1824{
1825 int ret;
1826 struct extent_page_data epd = {
1827 .bio = NULL,
1828 .tree = tree,
1829 .get_extent = get_extent,
1830 };
1831
1832 ret = __extent_writepage(page, wbc, &epd);
1833 if (epd.bio)
1834 submit_one_bio(WRITE, epd.bio);
1835 return ret;
1836}
a52d9a80
CM
1837EXPORT_SYMBOL(extent_write_full_page);
1838
b293f02e
CM
1839int extent_writepages(struct extent_map_tree *tree,
1840 struct address_space *mapping,
1841 get_extent_t *get_extent,
1842 struct writeback_control *wbc)
1843{
1844 int ret;
1845 struct extent_page_data epd = {
1846 .bio = NULL,
1847 .tree = tree,
1848 .get_extent = get_extent,
1849 };
1850
1851 ret = write_cache_pages(mapping, wbc, __extent_writepage, &epd);
1852 if (epd.bio)
1853 submit_one_bio(WRITE, epd.bio);
1854 return ret;
1855}
1856EXPORT_SYMBOL(extent_writepages);
1857
3ab2fb5a
CM
1858int extent_readpages(struct extent_map_tree *tree,
1859 struct address_space *mapping,
1860 struct list_head *pages, unsigned nr_pages,
1861 get_extent_t get_extent)
1862{
1863 struct bio *bio = NULL;
1864 unsigned page_idx;
1865 struct pagevec pvec;
1866
1867 pagevec_init(&pvec, 0);
1868 for (page_idx = 0; page_idx < nr_pages; page_idx++) {
1869 struct page *page = list_entry(pages->prev, struct page, lru);
1870
1871 prefetchw(&page->flags);
1872 list_del(&page->lru);
1873 /*
1874 * what we want to do here is call add_to_page_cache_lru,
1875 * but that isn't exported, so we reproduce it here
1876 */
1877 if (!add_to_page_cache(page, mapping,
1878 page->index, GFP_KERNEL)) {
1879
1880 /* open coding of lru_cache_add, also not exported */
1881 page_cache_get(page);
1882 if (!pagevec_add(&pvec, page))
1883 __pagevec_lru_add(&pvec);
1884 __extent_read_full_page(tree, page, get_extent, &bio);
1885 }
1886 page_cache_release(page);
1887 }
1888 if (pagevec_count(&pvec))
1889 __pagevec_lru_add(&pvec);
1890 BUG_ON(!list_empty(pages));
1891 if (bio)
1892 submit_one_bio(READ, bio);
1893 return 0;
1894}
1895EXPORT_SYMBOL(extent_readpages);
1896
a52d9a80
CM
1897/*
1898 * basic invalidatepage code, this waits on any locked or writeback
1899 * ranges corresponding to the page, and then deletes any extent state
1900 * records from the tree
1901 */
1902int extent_invalidatepage(struct extent_map_tree *tree,
1903 struct page *page, unsigned long offset)
1904{
35ebb934 1905 u64 start = ((u64)page->index << PAGE_CACHE_SHIFT);
a52d9a80
CM
1906 u64 end = start + PAGE_CACHE_SIZE - 1;
1907 size_t blocksize = page->mapping->host->i_sb->s_blocksize;
1908
1909 start += (offset + blocksize -1) & ~(blocksize - 1);
1910 if (start > end)
1911 return 0;
1912
1913 lock_extent(tree, start, end, GFP_NOFS);
1914 wait_on_extent_writeback(tree, start, end);
2bf5a725
CM
1915 clear_extent_bit(tree, start, end,
1916 EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC,
a52d9a80
CM
1917 1, 1, GFP_NOFS);
1918 return 0;
1919}
1920EXPORT_SYMBOL(extent_invalidatepage);
1921
1922/*
1923 * simple commit_write call, set_range_dirty is used to mark both
1924 * the pages and the extent records as dirty
1925 */
1926int extent_commit_write(struct extent_map_tree *tree,
1927 struct inode *inode, struct page *page,
1928 unsigned from, unsigned to)
1929{
1930 loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
1931
b3cfa35a 1932 set_page_extent_mapped(page);
a52d9a80
CM
1933 set_page_dirty(page);
1934
1935 if (pos > inode->i_size) {
1936 i_size_write(inode, pos);
1937 mark_inode_dirty(inode);
1938 }
1939 return 0;
1940}
1941EXPORT_SYMBOL(extent_commit_write);
1942
1943int extent_prepare_write(struct extent_map_tree *tree,
1944 struct inode *inode, struct page *page,
1945 unsigned from, unsigned to, get_extent_t *get_extent)
1946{
35ebb934 1947 u64 page_start = (u64)page->index << PAGE_CACHE_SHIFT;
a52d9a80
CM
1948 u64 page_end = page_start + PAGE_CACHE_SIZE - 1;
1949 u64 block_start;
1950 u64 orig_block_start;
1951 u64 block_end;
1952 u64 cur_end;
1953 struct extent_map *em;
1954 unsigned blocksize = 1 << inode->i_blkbits;
1955 size_t page_offset = 0;
1956 size_t block_off_start;
1957 size_t block_off_end;
1958 int err = 0;
1959 int iocount = 0;
1960 int ret = 0;
1961 int isnew;
1962
b3cfa35a
CH
1963 set_page_extent_mapped(page);
1964
a52d9a80
CM
1965 block_start = (page_start + from) & ~((u64)blocksize - 1);
1966 block_end = (page_start + to - 1) | (blocksize - 1);
1967 orig_block_start = block_start;
1968
1969 lock_extent(tree, page_start, page_end, GFP_NOFS);
1970 while(block_start <= block_end) {
1971 em = get_extent(inode, page, page_offset, block_start,
1972 block_end, 1);
1973 if (IS_ERR(em) || !em) {
1974 goto err;
1975 }
1976 cur_end = min(block_end, em->end);
1977 block_off_start = block_start & (PAGE_CACHE_SIZE - 1);
1978 block_off_end = block_off_start + blocksize;
1979 isnew = clear_extent_new(tree, block_start, cur_end, GFP_NOFS);
1980
1981 if (!PageUptodate(page) && isnew &&
1982 (block_off_end > to || block_off_start < from)) {
1983 void *kaddr;
1984
1985 kaddr = kmap_atomic(page, KM_USER0);
1986 if (block_off_end > to)
1987 memset(kaddr + to, 0, block_off_end - to);
1988 if (block_off_start < from)
1989 memset(kaddr + block_off_start, 0,
1990 from - block_off_start);
1991 flush_dcache_page(page);
1992 kunmap_atomic(kaddr, KM_USER0);
1993 }
1994 if (!isnew && !PageUptodate(page) &&
1995 (block_off_end > to || block_off_start < from) &&
1996 !test_range_bit(tree, block_start, cur_end,
1997 EXTENT_UPTODATE, 1)) {
1998 u64 sector;
1999 u64 extent_offset = block_start - em->start;
2000 size_t iosize;
2001 sector = (em->block_start + extent_offset) >> 9;
2002 iosize = (cur_end - block_start + blocksize - 1) &
2003 ~((u64)blocksize - 1);
2004 /*
2005 * we've already got the extent locked, but we
2006 * need to split the state such that our end_bio
2007 * handler can clear the lock.
2008 */
2009 set_extent_bit(tree, block_start,
2010 block_start + iosize - 1,
2011 EXTENT_LOCKED, 0, NULL, GFP_NOFS);
2012 ret = submit_extent_page(READ, tree, page,
2013 sector, iosize, page_offset, em->bdev,
b293f02e 2014 NULL, 1,
a52d9a80
CM
2015 end_bio_extent_preparewrite);
2016 iocount++;
2017 block_start = block_start + iosize;
2018 } else {
2019 set_extent_uptodate(tree, block_start, cur_end,
2020 GFP_NOFS);
2021 unlock_extent(tree, block_start, cur_end, GFP_NOFS);
2022 block_start = cur_end + 1;
2023 }
2024 page_offset = block_start & (PAGE_CACHE_SIZE - 1);
2025 free_extent_map(em);
2026 }
2027 if (iocount) {
2028 wait_extent_bit(tree, orig_block_start,
2029 block_end, EXTENT_LOCKED);
2030 }
2031 check_page_uptodate(tree, page);
2032err:
2033 /* FIXME, zero out newly allocated blocks on error */
2034 return err;
2035}
2036EXPORT_SYMBOL(extent_prepare_write);
2037
2038/*
2039 * a helper for releasepage. As long as there are no locked extents
2040 * in the range corresponding to the page, both state records and extent
2041 * map records are removed
2042 */
2043int try_release_extent_mapping(struct extent_map_tree *tree, struct page *page)
2044{
2045 struct extent_map *em;
35ebb934 2046 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
a52d9a80
CM
2047 u64 end = start + PAGE_CACHE_SIZE - 1;
2048 u64 orig_start = start;
b888db2b 2049 int ret = 1;
a52d9a80
CM
2050
2051 while (start <= end) {
2052 em = lookup_extent_mapping(tree, start, end);
2053 if (!em || IS_ERR(em))
2054 break;
b888db2b
CM
2055 if (!test_range_bit(tree, em->start, em->end,
2056 EXTENT_LOCKED, 0)) {
2057 remove_extent_mapping(tree, em);
2058 /* once for the rb tree */
a52d9a80 2059 free_extent_map(em);
a52d9a80 2060 }
a52d9a80 2061 start = em->end + 1;
a52d9a80
CM
2062 /* once for us */
2063 free_extent_map(em);
2064 }
b888db2b
CM
2065 if (test_range_bit(tree, orig_start, end, EXTENT_LOCKED, 0))
2066 ret = 0;
2067 else
2068 clear_extent_bit(tree, orig_start, end, EXTENT_UPTODATE,
2069 1, 1, GFP_NOFS);
2070 return ret;
a52d9a80
CM
2071}
2072EXPORT_SYMBOL(try_release_extent_mapping);
2073
d396c6f5
CH
2074sector_t extent_bmap(struct address_space *mapping, sector_t iblock,
2075 get_extent_t *get_extent)
2076{
2077 struct inode *inode = mapping->host;
2078 u64 start = iblock << inode->i_blkbits;
2079 u64 end = start + (1 << inode->i_blkbits) - 1;
c67cda17 2080 sector_t sector = 0;
d396c6f5
CH
2081 struct extent_map *em;
2082
2083 em = get_extent(inode, NULL, 0, start, end, 0);
2084 if (!em || IS_ERR(em))
2085 return 0;
2086
d396c6f5 2087 if (em->block_start == EXTENT_MAP_INLINE ||
5f39d397 2088 em->block_start == EXTENT_MAP_HOLE)
c67cda17 2089 goto out;
d396c6f5 2090
c67cda17
Y
2091 sector = (em->block_start + start - em->start) >> inode->i_blkbits;
2092out:
2093 free_extent_map(em);
2094 return sector;
d396c6f5 2095}
5f39d397 2096
4dc11904 2097static int add_lru(struct extent_map_tree *tree, struct extent_buffer *eb)
6d36dcd4 2098{
4dc11904
CM
2099 if (list_empty(&eb->lru)) {
2100 extent_buffer_get(eb);
2101 list_add(&eb->lru, &tree->buffer_lru);
2102 tree->lru_size++;
2103 if (tree->lru_size >= BUFFER_LRU_MAX) {
2104 struct extent_buffer *rm;
2105 rm = list_entry(tree->buffer_lru.prev,
2106 struct extent_buffer, lru);
2107 tree->lru_size--;
856bf3e5 2108 list_del_init(&rm->lru);
4dc11904
CM
2109 free_extent_buffer(rm);
2110 }
2111 } else
2112 list_move(&eb->lru, &tree->buffer_lru);
2113 return 0;
2114}
2115static struct extent_buffer *find_lru(struct extent_map_tree *tree,
2116 u64 start, unsigned long len)
2117{
2118 struct list_head *lru = &tree->buffer_lru;
2119 struct list_head *cur = lru->next;
2120 struct extent_buffer *eb;
f510cfec 2121
4dc11904
CM
2122 if (list_empty(lru))
2123 return NULL;
f510cfec 2124
4dc11904
CM
2125 do {
2126 eb = list_entry(cur, struct extent_buffer, lru);
2127 if (eb->start == start && eb->len == len) {
2128 extent_buffer_get(eb);
2129 return eb;
2130 }
2131 cur = cur->next;
2132 } while (cur != lru);
2133 return NULL;
6d36dcd4
CM
2134}
2135
4dc11904 2136static inline unsigned long num_extent_pages(u64 start, u64 len)
6d36dcd4 2137{
4dc11904
CM
2138 return ((start + len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT) -
2139 (start >> PAGE_CACHE_SHIFT);
6d36dcd4
CM
2140}
2141
4dc11904
CM
2142static inline struct page *extent_buffer_page(struct extent_buffer *eb,
2143 unsigned long i)
6d36dcd4
CM
2144{
2145 struct page *p;
3685f791 2146 struct address_space *mapping;
db94535d 2147
4dc11904 2148 if (i == 0)
810191ff 2149 return eb->first_page;
6d36dcd4 2150 i += eb->start >> PAGE_CACHE_SHIFT;
3685f791
CM
2151 mapping = eb->first_page->mapping;
2152 read_lock_irq(&mapping->tree_lock);
2153 p = radix_tree_lookup(&mapping->page_tree, i);
2154 read_unlock_irq(&mapping->tree_lock);
6d36dcd4
CM
2155 return p;
2156}
2157
4dc11904
CM
2158static struct extent_buffer *__alloc_extent_buffer(struct extent_map_tree *tree,
2159 u64 start,
2160 unsigned long len,
2161 gfp_t mask)
db94535d 2162{
4dc11904
CM
2163 struct extent_buffer *eb = NULL;
2164
2165 spin_lock(&tree->lru_lock);
2166 eb = find_lru(tree, start, len);
4dc11904 2167 spin_unlock(&tree->lru_lock);
4dc11904 2168 if (eb) {
09be207d 2169 return eb;
4dc11904 2170 }
09be207d
CM
2171
2172 eb = kmem_cache_zalloc(extent_buffer_cache, mask);
4dc11904
CM
2173 INIT_LIST_HEAD(&eb->lru);
2174 eb->start = start;
2175 eb->len = len;
2176 atomic_set(&eb->refs, 1);
2177
4dc11904
CM
2178 return eb;
2179}
2180
2181static void __free_extent_buffer(struct extent_buffer *eb)
2182{
2183 kmem_cache_free(extent_buffer_cache, eb);
db94535d 2184}
4dc11904 2185
5f39d397
CM
2186struct extent_buffer *alloc_extent_buffer(struct extent_map_tree *tree,
2187 u64 start, unsigned long len,
19c00ddc 2188 struct page *page0,
5f39d397
CM
2189 gfp_t mask)
2190{
db94535d 2191 unsigned long num_pages = num_extent_pages(start, len);
5f39d397
CM
2192 unsigned long i;
2193 unsigned long index = start >> PAGE_CACHE_SHIFT;
2194 struct extent_buffer *eb;
2195 struct page *p;
2196 struct address_space *mapping = tree->mapping;
65555a06 2197 int uptodate = 1;
5f39d397 2198
4dc11904 2199 eb = __alloc_extent_buffer(tree, start, len, mask);
5f39d397
CM
2200 if (!eb || IS_ERR(eb))
2201 return NULL;
2202
4dc11904 2203 if (eb->flags & EXTENT_BUFFER_FILLED)
09be207d 2204 goto lru_add;
5f39d397 2205
19c00ddc
CM
2206 if (page0) {
2207 eb->first_page = page0;
2208 i = 1;
2209 index++;
2210 page_cache_get(page0);
ff79f819 2211 mark_page_accessed(page0);
19c00ddc 2212 set_page_extent_mapped(page0);
0591fb56 2213 WARN_ON(!PageUptodate(page0));
19c00ddc
CM
2214 set_page_private(page0, EXTENT_PAGE_PRIVATE_FIRST_PAGE |
2215 len << 2);
2216 } else {
2217 i = 0;
2218 }
2219 for (; i < num_pages; i++, index++) {
5f39d397 2220 p = find_or_create_page(mapping, index, mask | __GFP_HIGHMEM);
6d36dcd4 2221 if (!p) {
db94535d 2222 WARN_ON(1);
5f39d397 2223 goto fail;
6d36dcd4 2224 }
f510cfec 2225 set_page_extent_mapped(p);
ff79f819 2226 mark_page_accessed(p);
19c00ddc 2227 if (i == 0) {
810191ff 2228 eb->first_page = p;
19c00ddc
CM
2229 set_page_private(p, EXTENT_PAGE_PRIVATE_FIRST_PAGE |
2230 len << 2);
2231 } else {
2232 set_page_private(p, EXTENT_PAGE_PRIVATE);
2233 }
5f39d397
CM
2234 if (!PageUptodate(p))
2235 uptodate = 0;
2236 unlock_page(p);
2237 }
2238 if (uptodate)
2239 eb->flags |= EXTENT_UPTODATE;
4dc11904 2240 eb->flags |= EXTENT_BUFFER_FILLED;
09be207d
CM
2241
2242lru_add:
2243 spin_lock(&tree->lru_lock);
2244 add_lru(tree, eb);
2245 spin_unlock(&tree->lru_lock);
5f39d397 2246 return eb;
09be207d 2247
5f39d397 2248fail:
856bf3e5
CM
2249 spin_lock(&tree->lru_lock);
2250 list_del_init(&eb->lru);
2251 spin_unlock(&tree->lru_lock);
09be207d
CM
2252 if (!atomic_dec_and_test(&eb->refs))
2253 return NULL;
0591fb56 2254 for (index = 1; index < i; index++) {
09be207d
CM
2255 page_cache_release(extent_buffer_page(eb, index));
2256 }
0591fb56
CM
2257 if (i > 0)
2258 page_cache_release(extent_buffer_page(eb, 0));
09be207d 2259 __free_extent_buffer(eb);
5f39d397
CM
2260 return NULL;
2261}
2262EXPORT_SYMBOL(alloc_extent_buffer);
2263
2264struct extent_buffer *find_extent_buffer(struct extent_map_tree *tree,
2265 u64 start, unsigned long len,
2266 gfp_t mask)
2267{
db94535d 2268 unsigned long num_pages = num_extent_pages(start, len);
09be207d
CM
2269 unsigned long i;
2270 unsigned long index = start >> PAGE_CACHE_SHIFT;
5f39d397
CM
2271 struct extent_buffer *eb;
2272 struct page *p;
2273 struct address_space *mapping = tree->mapping;
14048ed0 2274 int uptodate = 1;
5f39d397 2275
4dc11904 2276 eb = __alloc_extent_buffer(tree, start, len, mask);
5f39d397
CM
2277 if (!eb || IS_ERR(eb))
2278 return NULL;
2279
4dc11904 2280 if (eb->flags & EXTENT_BUFFER_FILLED)
09be207d 2281 goto lru_add;
5f39d397
CM
2282
2283 for (i = 0; i < num_pages; i++, index++) {
14048ed0 2284 p = find_lock_page(mapping, index);
6d36dcd4 2285 if (!p) {
5f39d397 2286 goto fail;
6d36dcd4 2287 }
f510cfec 2288 set_page_extent_mapped(p);
ff79f819 2289 mark_page_accessed(p);
19c00ddc
CM
2290
2291 if (i == 0) {
810191ff 2292 eb->first_page = p;
19c00ddc
CM
2293 set_page_private(p, EXTENT_PAGE_PRIVATE_FIRST_PAGE |
2294 len << 2);
2295 } else {
2296 set_page_private(p, EXTENT_PAGE_PRIVATE);
2297 }
2298
14048ed0
CM
2299 if (!PageUptodate(p))
2300 uptodate = 0;
2301 unlock_page(p);
5f39d397 2302 }
14048ed0
CM
2303 if (uptodate)
2304 eb->flags |= EXTENT_UPTODATE;
4dc11904 2305 eb->flags |= EXTENT_BUFFER_FILLED;
09be207d
CM
2306
2307lru_add:
2308 spin_lock(&tree->lru_lock);
2309 add_lru(tree, eb);
2310 spin_unlock(&tree->lru_lock);
5f39d397
CM
2311 return eb;
2312fail:
856bf3e5
CM
2313 spin_lock(&tree->lru_lock);
2314 list_del_init(&eb->lru);
2315 spin_unlock(&tree->lru_lock);
09be207d
CM
2316 if (!atomic_dec_and_test(&eb->refs))
2317 return NULL;
0591fb56 2318 for (index = 1; index < i; index++) {
09be207d
CM
2319 page_cache_release(extent_buffer_page(eb, index));
2320 }
0591fb56
CM
2321 if (i > 0)
2322 page_cache_release(extent_buffer_page(eb, 0));
09be207d 2323 __free_extent_buffer(eb);
5f39d397
CM
2324 return NULL;
2325}
2326EXPORT_SYMBOL(find_extent_buffer);
2327
2328void free_extent_buffer(struct extent_buffer *eb)
2329{
2330 unsigned long i;
2331 unsigned long num_pages;
2332
2333 if (!eb)
2334 return;
2335
2336 if (!atomic_dec_and_test(&eb->refs))
2337 return;
2338
0591fb56 2339 WARN_ON(!list_empty(&eb->lru));
db94535d 2340 num_pages = num_extent_pages(eb->start, eb->len);
5f39d397 2341
0591fb56 2342 for (i = 1; i < num_pages; i++) {
6d36dcd4 2343 page_cache_release(extent_buffer_page(eb, i));
5f39d397 2344 }
0591fb56 2345 page_cache_release(extent_buffer_page(eb, 0));
6d36dcd4 2346 __free_extent_buffer(eb);
5f39d397
CM
2347}
2348EXPORT_SYMBOL(free_extent_buffer);
2349
2350int clear_extent_buffer_dirty(struct extent_map_tree *tree,
2351 struct extent_buffer *eb)
2352{
2353 int set;
2354 unsigned long i;
2355 unsigned long num_pages;
2356 struct page *page;
2357
2358 u64 start = eb->start;
2359 u64 end = start + eb->len - 1;
2360
2361 set = clear_extent_dirty(tree, start, end, GFP_NOFS);
db94535d 2362 num_pages = num_extent_pages(eb->start, eb->len);
5f39d397
CM
2363
2364 for (i = 0; i < num_pages; i++) {
6d36dcd4 2365 page = extent_buffer_page(eb, i);
5f39d397
CM
2366 lock_page(page);
2367 /*
2368 * if we're on the last page or the first page and the
2369 * block isn't aligned on a page boundary, do extra checks
2370 * to make sure we don't clean page that is partially dirty
2371 */
2372 if ((i == 0 && (eb->start & (PAGE_CACHE_SIZE - 1))) ||
2373 ((i == num_pages - 1) &&
65555a06 2374 ((eb->start + eb->len) & (PAGE_CACHE_SIZE - 1)))) {
35ebb934 2375 start = (u64)page->index << PAGE_CACHE_SHIFT;
5f39d397
CM
2376 end = start + PAGE_CACHE_SIZE - 1;
2377 if (test_range_bit(tree, start, end,
2378 EXTENT_DIRTY, 0)) {
2379 unlock_page(page);
2380 continue;
2381 }
2382 }
2383 clear_page_dirty_for_io(page);
2384 unlock_page(page);
2385 }
2386 return 0;
2387}
2388EXPORT_SYMBOL(clear_extent_buffer_dirty);
2389
2390int wait_on_extent_buffer_writeback(struct extent_map_tree *tree,
2391 struct extent_buffer *eb)
2392{
2393 return wait_on_extent_writeback(tree, eb->start,
2394 eb->start + eb->len - 1);
2395}
2396EXPORT_SYMBOL(wait_on_extent_buffer_writeback);
2397
2398int set_extent_buffer_dirty(struct extent_map_tree *tree,
2399 struct extent_buffer *eb)
2400{
810191ff
CM
2401 unsigned long i;
2402 unsigned long num_pages;
2403
2404 num_pages = num_extent_pages(eb->start, eb->len);
2405 for (i = 0; i < num_pages; i++) {
19c00ddc
CM
2406 struct page *page = extent_buffer_page(eb, i);
2407 /* writepage may need to do something special for the
2408 * first page, we have to make sure page->private is
2409 * properly set. releasepage may drop page->private
2410 * on us if the page isn't already dirty.
2411 */
2412 if (i == 0) {
2413 lock_page(page);
2414 set_page_private(page,
2415 EXTENT_PAGE_PRIVATE_FIRST_PAGE |
2416 eb->len << 2);
2417 }
810191ff 2418 __set_page_dirty_nobuffers(extent_buffer_page(eb, i));
19c00ddc
CM
2419 if (i == 0)
2420 unlock_page(page);
810191ff
CM
2421 }
2422 return set_extent_dirty(tree, eb->start,
2423 eb->start + eb->len - 1, GFP_NOFS);
5f39d397
CM
2424}
2425EXPORT_SYMBOL(set_extent_buffer_dirty);
2426
2427int set_extent_buffer_uptodate(struct extent_map_tree *tree,
2428 struct extent_buffer *eb)
2429{
2430 unsigned long i;
2431 struct page *page;
2432 unsigned long num_pages;
2433
db94535d 2434 num_pages = num_extent_pages(eb->start, eb->len);
5f39d397
CM
2435
2436 set_extent_uptodate(tree, eb->start, eb->start + eb->len - 1,
2437 GFP_NOFS);
2438 for (i = 0; i < num_pages; i++) {
6d36dcd4 2439 page = extent_buffer_page(eb, i);
5f39d397
CM
2440 if ((i == 0 && (eb->start & (PAGE_CACHE_SIZE - 1))) ||
2441 ((i == num_pages - 1) &&
65555a06 2442 ((eb->start + eb->len) & (PAGE_CACHE_SIZE - 1)))) {
5f39d397
CM
2443 check_page_uptodate(tree, page);
2444 continue;
2445 }
2446 SetPageUptodate(page);
2447 }
2448 return 0;
2449}
2450EXPORT_SYMBOL(set_extent_buffer_uptodate);
2451
2452int extent_buffer_uptodate(struct extent_map_tree *tree,
2453 struct extent_buffer *eb)
2454{
2455 if (eb->flags & EXTENT_UPTODATE)
2456 return 1;
2457 return test_range_bit(tree, eb->start, eb->start + eb->len - 1,
2458 EXTENT_UPTODATE, 1);
2459}
2460EXPORT_SYMBOL(extent_buffer_uptodate);
2461
2462int read_extent_buffer_pages(struct extent_map_tree *tree,
19c00ddc
CM
2463 struct extent_buffer *eb,
2464 u64 start,
2465 int wait)
5f39d397
CM
2466{
2467 unsigned long i;
19c00ddc 2468 unsigned long start_i;
5f39d397
CM
2469 struct page *page;
2470 int err;
2471 int ret = 0;
2472 unsigned long num_pages;
2473
2474 if (eb->flags & EXTENT_UPTODATE)
2475 return 0;
2476
14048ed0 2477 if (0 && test_range_bit(tree, eb->start, eb->start + eb->len - 1,
5f39d397
CM
2478 EXTENT_UPTODATE, 1)) {
2479 return 0;
2480 }
0591fb56 2481
19c00ddc
CM
2482 if (start) {
2483 WARN_ON(start < eb->start);
2484 start_i = (start >> PAGE_CACHE_SHIFT) -
2485 (eb->start >> PAGE_CACHE_SHIFT);
2486 } else {
2487 start_i = 0;
2488 }
5f39d397 2489
db94535d 2490 num_pages = num_extent_pages(eb->start, eb->len);
19c00ddc 2491 for (i = start_i; i < num_pages; i++) {
6d36dcd4 2492 page = extent_buffer_page(eb, i);
5f39d397
CM
2493 if (PageUptodate(page)) {
2494 continue;
2495 }
2496 if (!wait) {
2497 if (TestSetPageLocked(page)) {
2498 continue;
2499 }
2500 } else {
2501 lock_page(page);
2502 }
2503 if (!PageUptodate(page)) {
2504 err = page->mapping->a_ops->readpage(NULL, page);
2505 if (err) {
2506 ret = err;
2507 }
2508 } else {
2509 unlock_page(page);
2510 }
2511 }
2512
2513 if (ret || !wait) {
2514 return ret;
2515 }
2516
19c00ddc 2517 for (i = start_i; i < num_pages; i++) {
6d36dcd4 2518 page = extent_buffer_page(eb, i);
5f39d397
CM
2519 wait_on_page_locked(page);
2520 if (!PageUptodate(page)) {
2521 ret = -EIO;
2522 }
2523 }
4dc11904
CM
2524 if (!ret)
2525 eb->flags |= EXTENT_UPTODATE;
5f39d397
CM
2526 return ret;
2527}
2528EXPORT_SYMBOL(read_extent_buffer_pages);
2529
2530void read_extent_buffer(struct extent_buffer *eb, void *dstv,
2531 unsigned long start,
2532 unsigned long len)
2533{
2534 size_t cur;
2535 size_t offset;
2536 struct page *page;
2537 char *kaddr;
2538 char *dst = (char *)dstv;
2539 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
2540 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
14048ed0 2541 unsigned long num_pages = num_extent_pages(eb->start, eb->len);
5f39d397
CM
2542
2543 WARN_ON(start > eb->len);
2544 WARN_ON(start + len > eb->start + eb->len);
2545
3685f791 2546 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
5f39d397
CM
2547
2548 while(len > 0) {
6d36dcd4 2549 page = extent_buffer_page(eb, i);
14048ed0
CM
2550 if (!PageUptodate(page)) {
2551 printk("page %lu not up to date i %lu, total %lu, len %lu\n", page->index, i, num_pages, eb->len);
2552 WARN_ON(1);
2553 }
5f39d397
CM
2554 WARN_ON(!PageUptodate(page));
2555
2556 cur = min(len, (PAGE_CACHE_SIZE - offset));
59d169e2 2557 kaddr = kmap_atomic(page, KM_USER1);
5f39d397 2558 memcpy(dst, kaddr + offset, cur);
59d169e2 2559 kunmap_atomic(kaddr, KM_USER1);
5f39d397
CM
2560
2561 dst += cur;
2562 len -= cur;
2563 offset = 0;
2564 i++;
5f39d397
CM
2565 }
2566}
2567EXPORT_SYMBOL(read_extent_buffer);
2568
19c00ddc 2569int map_private_extent_buffer(struct extent_buffer *eb, unsigned long start,
db94535d
CM
2570 unsigned long min_len, char **token, char **map,
2571 unsigned long *map_start,
2572 unsigned long *map_len, int km)
5f39d397 2573{
479965d6 2574 size_t offset = start & (PAGE_CACHE_SIZE - 1);
5f39d397 2575 char *kaddr;
db94535d 2576 struct page *p;
5f39d397
CM
2577 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
2578 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
65555a06 2579 unsigned long end_i = (start_offset + start + min_len - 1) >>
810191ff 2580 PAGE_CACHE_SHIFT;
479965d6
CM
2581
2582 if (i != end_i)
2583 return -EINVAL;
5f39d397 2584
5f39d397
CM
2585 if (i == 0) {
2586 offset = start_offset;
2587 *map_start = 0;
2588 } else {
db94535d 2589 offset = 0;
0591fb56 2590 *map_start = ((u64)i << PAGE_CACHE_SHIFT) - start_offset;
5f39d397 2591 }
65555a06 2592 if (start + min_len > eb->len) {
19c00ddc
CM
2593printk("bad mapping eb start %Lu len %lu, wanted %lu %lu\n", eb->start, eb->len, start, min_len);
2594 WARN_ON(1);
2595 }
5f39d397 2596
db94535d
CM
2597 p = extent_buffer_page(eb, i);
2598 WARN_ON(!PageUptodate(p));
2599 kaddr = kmap_atomic(p, km);
5f39d397
CM
2600 *token = kaddr;
2601 *map = kaddr + offset;
2602 *map_len = PAGE_CACHE_SIZE - offset;
2603 return 0;
2604}
19c00ddc 2605EXPORT_SYMBOL(map_private_extent_buffer);
db94535d
CM
2606
2607int map_extent_buffer(struct extent_buffer *eb, unsigned long start,
2608 unsigned long min_len,
2609 char **token, char **map,
2610 unsigned long *map_start,
2611 unsigned long *map_len, int km)
2612{
2613 int err;
2614 int save = 0;
2615 if (eb->map_token) {
db94535d
CM
2616 unmap_extent_buffer(eb, eb->map_token, km);
2617 eb->map_token = NULL;
2618 save = 1;
2619 }
19c00ddc
CM
2620 err = map_private_extent_buffer(eb, start, min_len, token, map,
2621 map_start, map_len, km);
db94535d
CM
2622 if (!err && save) {
2623 eb->map_token = *token;
2624 eb->kaddr = *map;
2625 eb->map_start = *map_start;
2626 eb->map_len = *map_len;
2627 }
2628 return err;
2629}
5f39d397
CM
2630EXPORT_SYMBOL(map_extent_buffer);
2631
2632void unmap_extent_buffer(struct extent_buffer *eb, char *token, int km)
2633{
ae5252bd 2634 kunmap_atomic(token, km);
5f39d397
CM
2635}
2636EXPORT_SYMBOL(unmap_extent_buffer);
2637
2638int memcmp_extent_buffer(struct extent_buffer *eb, const void *ptrv,
2639 unsigned long start,
2640 unsigned long len)
2641{
2642 size_t cur;
2643 size_t offset;
2644 struct page *page;
2645 char *kaddr;
2646 char *ptr = (char *)ptrv;
2647 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
2648 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
2649 int ret = 0;
2650
2651 WARN_ON(start > eb->len);
2652 WARN_ON(start + len > eb->start + eb->len);
2653
3685f791 2654 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
5f39d397
CM
2655
2656 while(len > 0) {
6d36dcd4 2657 page = extent_buffer_page(eb, i);
5f39d397
CM
2658 WARN_ON(!PageUptodate(page));
2659
2660 cur = min(len, (PAGE_CACHE_SIZE - offset));
2661
ae5252bd 2662 kaddr = kmap_atomic(page, KM_USER0);
5f39d397 2663 ret = memcmp(ptr, kaddr + offset, cur);
ae5252bd 2664 kunmap_atomic(kaddr, KM_USER0);
5f39d397
CM
2665 if (ret)
2666 break;
2667
2668 ptr += cur;
2669 len -= cur;
2670 offset = 0;
2671 i++;
5f39d397
CM
2672 }
2673 return ret;
2674}
2675EXPORT_SYMBOL(memcmp_extent_buffer);
2676
2677void write_extent_buffer(struct extent_buffer *eb, const void *srcv,
2678 unsigned long start, unsigned long len)
2679{
2680 size_t cur;
2681 size_t offset;
2682 struct page *page;
2683 char *kaddr;
2684 char *src = (char *)srcv;
2685 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
2686 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
2687
2688 WARN_ON(start > eb->len);
2689 WARN_ON(start + len > eb->start + eb->len);
2690
3685f791 2691 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
5f39d397
CM
2692
2693 while(len > 0) {
6d36dcd4 2694 page = extent_buffer_page(eb, i);
5f39d397
CM
2695 WARN_ON(!PageUptodate(page));
2696
2697 cur = min(len, PAGE_CACHE_SIZE - offset);
59d169e2 2698 kaddr = kmap_atomic(page, KM_USER1);
5f39d397 2699 memcpy(kaddr + offset, src, cur);
59d169e2 2700 kunmap_atomic(kaddr, KM_USER1);
5f39d397
CM
2701
2702 src += cur;
2703 len -= cur;
2704 offset = 0;
2705 i++;
5f39d397
CM
2706 }
2707}
2708EXPORT_SYMBOL(write_extent_buffer);
2709
2710void memset_extent_buffer(struct extent_buffer *eb, char c,
2711 unsigned long start, unsigned long len)
2712{
2713 size_t cur;
2714 size_t offset;
2715 struct page *page;
2716 char *kaddr;
2717 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
2718 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
2719
2720 WARN_ON(start > eb->len);
2721 WARN_ON(start + len > eb->start + eb->len);
2722
3685f791 2723 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
5f39d397
CM
2724
2725 while(len > 0) {
6d36dcd4 2726 page = extent_buffer_page(eb, i);
5f39d397
CM
2727 WARN_ON(!PageUptodate(page));
2728
2729 cur = min(len, PAGE_CACHE_SIZE - offset);
ae5252bd 2730 kaddr = kmap_atomic(page, KM_USER0);
5f39d397 2731 memset(kaddr + offset, c, cur);
ae5252bd 2732 kunmap_atomic(kaddr, KM_USER0);
5f39d397
CM
2733
2734 len -= cur;
2735 offset = 0;
2736 i++;
5f39d397
CM
2737 }
2738}
2739EXPORT_SYMBOL(memset_extent_buffer);
2740
2741void copy_extent_buffer(struct extent_buffer *dst, struct extent_buffer *src,
2742 unsigned long dst_offset, unsigned long src_offset,
2743 unsigned long len)
2744{
2745 u64 dst_len = dst->len;
2746 size_t cur;
2747 size_t offset;
2748 struct page *page;
2749 char *kaddr;
2750 size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
2751 unsigned long i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT;
2752
2753 WARN_ON(src->len != dst_len);
2754
3685f791
CM
2755 offset = (start_offset + dst_offset) &
2756 ((unsigned long)PAGE_CACHE_SIZE - 1);
5f39d397
CM
2757
2758 while(len > 0) {
6d36dcd4 2759 page = extent_buffer_page(dst, i);
5f39d397
CM
2760 WARN_ON(!PageUptodate(page));
2761
2762 cur = min(len, (unsigned long)(PAGE_CACHE_SIZE - offset));
2763
ff190c0c 2764 kaddr = kmap_atomic(page, KM_USER0);
5f39d397 2765 read_extent_buffer(src, kaddr + offset, src_offset, cur);
ff190c0c 2766 kunmap_atomic(kaddr, KM_USER0);
5f39d397
CM
2767
2768 src_offset += cur;
2769 len -= cur;
2770 offset = 0;
2771 i++;
2772 }
2773}
2774EXPORT_SYMBOL(copy_extent_buffer);
2775
2776static void move_pages(struct page *dst_page, struct page *src_page,
2777 unsigned long dst_off, unsigned long src_off,
2778 unsigned long len)
2779{
ae5252bd 2780 char *dst_kaddr = kmap_atomic(dst_page, KM_USER0);
5f39d397
CM
2781 if (dst_page == src_page) {
2782 memmove(dst_kaddr + dst_off, dst_kaddr + src_off, len);
2783 } else {
ae5252bd 2784 char *src_kaddr = kmap_atomic(src_page, KM_USER1);
5f39d397
CM
2785 char *p = dst_kaddr + dst_off + len;
2786 char *s = src_kaddr + src_off + len;
2787
2788 while (len--)
2789 *--p = *--s;
2790
ae5252bd 2791 kunmap_atomic(src_kaddr, KM_USER1);
5f39d397 2792 }
ae5252bd 2793 kunmap_atomic(dst_kaddr, KM_USER0);
5f39d397
CM
2794}
2795
2796static void copy_pages(struct page *dst_page, struct page *src_page,
2797 unsigned long dst_off, unsigned long src_off,
2798 unsigned long len)
2799{
ae5252bd 2800 char *dst_kaddr = kmap_atomic(dst_page, KM_USER0);
5f39d397
CM
2801 char *src_kaddr;
2802
2803 if (dst_page != src_page)
ae5252bd 2804 src_kaddr = kmap_atomic(src_page, KM_USER1);
5f39d397
CM
2805 else
2806 src_kaddr = dst_kaddr;
2807
2808 memcpy(dst_kaddr + dst_off, src_kaddr + src_off, len);
5f39d397
CM
2809 kunmap_atomic(dst_kaddr, KM_USER0);
2810 if (dst_page != src_page)
2811 kunmap_atomic(src_kaddr, KM_USER1);
5f39d397
CM
2812}
2813
2814void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
2815 unsigned long src_offset, unsigned long len)
2816{
2817 size_t cur;
2818 size_t dst_off_in_page;
2819 size_t src_off_in_page;
2820 size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
2821 unsigned long dst_i;
2822 unsigned long src_i;
2823
2824 if (src_offset + len > dst->len) {
2825 printk("memmove bogus src_offset %lu move len %lu len %lu\n",
2826 src_offset, len, dst->len);
2827 BUG_ON(1);
2828 }
2829 if (dst_offset + len > dst->len) {
2830 printk("memmove bogus dst_offset %lu move len %lu len %lu\n",
2831 dst_offset, len, dst->len);
2832 BUG_ON(1);
2833 }
2834
2835 while(len > 0) {
3685f791 2836 dst_off_in_page = (start_offset + dst_offset) &
5f39d397 2837 ((unsigned long)PAGE_CACHE_SIZE - 1);
3685f791 2838 src_off_in_page = (start_offset + src_offset) &
5f39d397
CM
2839 ((unsigned long)PAGE_CACHE_SIZE - 1);
2840
2841 dst_i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT;
2842 src_i = (start_offset + src_offset) >> PAGE_CACHE_SHIFT;
2843
5f39d397
CM
2844 cur = min(len, (unsigned long)(PAGE_CACHE_SIZE -
2845 src_off_in_page));
ae2f5411
JA
2846 cur = min_t(unsigned long, cur,
2847 (unsigned long)(PAGE_CACHE_SIZE - dst_off_in_page));
5f39d397 2848
6d36dcd4
CM
2849 copy_pages(extent_buffer_page(dst, dst_i),
2850 extent_buffer_page(dst, src_i),
5f39d397
CM
2851 dst_off_in_page, src_off_in_page, cur);
2852
2853 src_offset += cur;
2854 dst_offset += cur;
2855 len -= cur;
2856 }
2857}
2858EXPORT_SYMBOL(memcpy_extent_buffer);
2859
2860void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
2861 unsigned long src_offset, unsigned long len)
2862{
2863 size_t cur;
2864 size_t dst_off_in_page;
2865 size_t src_off_in_page;
2866 unsigned long dst_end = dst_offset + len - 1;
2867 unsigned long src_end = src_offset + len - 1;
2868 size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
2869 unsigned long dst_i;
2870 unsigned long src_i;
2871
2872 if (src_offset + len > dst->len) {
2873 printk("memmove bogus src_offset %lu move len %lu len %lu\n",
2874 src_offset, len, dst->len);
2875 BUG_ON(1);
2876 }
2877 if (dst_offset + len > dst->len) {
2878 printk("memmove bogus dst_offset %lu move len %lu len %lu\n",
2879 dst_offset, len, dst->len);
2880 BUG_ON(1);
2881 }
2882 if (dst_offset < src_offset) {
2883 memcpy_extent_buffer(dst, dst_offset, src_offset, len);
2884 return;
2885 }
2886 while(len > 0) {
2887 dst_i = (start_offset + dst_end) >> PAGE_CACHE_SHIFT;
2888 src_i = (start_offset + src_end) >> PAGE_CACHE_SHIFT;
2889
3685f791 2890 dst_off_in_page = (start_offset + dst_end) &
5f39d397 2891 ((unsigned long)PAGE_CACHE_SIZE - 1);
3685f791 2892 src_off_in_page = (start_offset + src_end) &
5f39d397 2893 ((unsigned long)PAGE_CACHE_SIZE - 1);
5f39d397 2894
ae2f5411 2895 cur = min_t(unsigned long, len, src_off_in_page + 1);
5f39d397 2896 cur = min(cur, dst_off_in_page + 1);
6d36dcd4
CM
2897 move_pages(extent_buffer_page(dst, dst_i),
2898 extent_buffer_page(dst, src_i),
5f39d397
CM
2899 dst_off_in_page - cur + 1,
2900 src_off_in_page - cur + 1, cur);
2901
db94535d
CM
2902 dst_end -= cur;
2903 src_end -= cur;
5f39d397
CM
2904 len -= cur;
2905 }
2906}
2907EXPORT_SYMBOL(memmove_extent_buffer);