]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - fs/btrfs/extent_map.c
Btrfs: Properly update right_nritems in push_leaf_left
[mirror_ubuntu-artful-kernel.git] / fs / btrfs / extent_map.c
CommitLineData
a52d9a80
CM
1#include <linux/bitops.h>
2#include <linux/slab.h>
3#include <linux/bio.h>
4#include <linux/mm.h>
5#include <linux/gfp.h>
6#include <linux/pagemap.h>
7#include <linux/page-flags.h>
8#include <linux/module.h>
9#include <linux/spinlock.h>
10#include <linux/blkdev.h>
4dc11904 11#include <linux/swap.h>
0a2118df 12#include <linux/version.h>
b293f02e 13#include <linux/writeback.h>
3ab2fb5a 14#include <linux/pagevec.h>
a52d9a80
CM
15#include "extent_map.h"
16
86479a04
CM
17/* temporary define until extent_map moves out of btrfs */
18struct kmem_cache *btrfs_cache_create(const char *name, size_t size,
19 unsigned long extra_flags,
20 void (*ctor)(void *, struct kmem_cache *,
21 unsigned long));
22
a52d9a80
CM
23static struct kmem_cache *extent_map_cache;
24static struct kmem_cache *extent_state_cache;
6d36dcd4 25static struct kmem_cache *extent_buffer_cache;
f510cfec 26
f510cfec
CM
27static LIST_HEAD(buffers);
28static LIST_HEAD(states);
29
f510cfec 30static spinlock_t state_lock = SPIN_LOCK_UNLOCKED;
4dc11904 31#define BUFFER_LRU_MAX 64
a52d9a80
CM
32
33struct tree_entry {
34 u64 start;
35 u64 end;
36 int in_tree;
37 struct rb_node rb_node;
38};
39
b293f02e
CM
40struct extent_page_data {
41 struct bio *bio;
42 struct extent_map_tree *tree;
43 get_extent_t *get_extent;
44};
2f4cbe64 45int __init extent_map_init(void)
a52d9a80 46{
86479a04 47 extent_map_cache = btrfs_cache_create("extent_map",
6d36dcd4 48 sizeof(struct extent_map), 0,
a52d9a80 49 NULL);
2f4cbe64
WB
50 if (!extent_map_cache)
51 return -ENOMEM;
86479a04 52 extent_state_cache = btrfs_cache_create("extent_state",
6d36dcd4 53 sizeof(struct extent_state), 0,
a52d9a80 54 NULL);
2f4cbe64
WB
55 if (!extent_state_cache)
56 goto free_map_cache;
6d36dcd4
CM
57 extent_buffer_cache = btrfs_cache_create("extent_buffers",
58 sizeof(struct extent_buffer), 0,
59 NULL);
2f4cbe64
WB
60 if (!extent_buffer_cache)
61 goto free_state_cache;
62 return 0;
63
64free_state_cache:
65 kmem_cache_destroy(extent_state_cache);
66free_map_cache:
67 kmem_cache_destroy(extent_map_cache);
68 return -ENOMEM;
a52d9a80
CM
69}
70
71void __exit extent_map_exit(void)
72{
f510cfec 73 struct extent_state *state;
6d36dcd4 74
f510cfec
CM
75 while (!list_empty(&states)) {
76 state = list_entry(states.next, struct extent_state, list);
77 printk("state leak: start %Lu end %Lu state %lu in tree %d refs %d\n", state->start, state->end, state->state, state->in_tree, atomic_read(&state->refs));
78 list_del(&state->list);
79 kmem_cache_free(extent_state_cache, state);
80
81 }
f510cfec 82
a52d9a80
CM
83 if (extent_map_cache)
84 kmem_cache_destroy(extent_map_cache);
85 if (extent_state_cache)
86 kmem_cache_destroy(extent_state_cache);
6d36dcd4
CM
87 if (extent_buffer_cache)
88 kmem_cache_destroy(extent_buffer_cache);
a52d9a80
CM
89}
90
91void extent_map_tree_init(struct extent_map_tree *tree,
92 struct address_space *mapping, gfp_t mask)
93{
94 tree->map.rb_node = NULL;
95 tree->state.rb_node = NULL;
07157aac 96 tree->ops = NULL;
a52d9a80 97 rwlock_init(&tree->lock);
4dc11904 98 spin_lock_init(&tree->lru_lock);
a52d9a80 99 tree->mapping = mapping;
4dc11904
CM
100 INIT_LIST_HEAD(&tree->buffer_lru);
101 tree->lru_size = 0;
a52d9a80
CM
102}
103EXPORT_SYMBOL(extent_map_tree_init);
104
19c00ddc 105void extent_map_tree_empty_lru(struct extent_map_tree *tree)
4dc11904
CM
106{
107 struct extent_buffer *eb;
108 while(!list_empty(&tree->buffer_lru)) {
109 eb = list_entry(tree->buffer_lru.next, struct extent_buffer,
110 lru);
0591fb56 111 list_del_init(&eb->lru);
4dc11904
CM
112 free_extent_buffer(eb);
113 }
114}
19c00ddc 115EXPORT_SYMBOL(extent_map_tree_empty_lru);
4dc11904 116
a52d9a80
CM
117struct extent_map *alloc_extent_map(gfp_t mask)
118{
119 struct extent_map *em;
120 em = kmem_cache_alloc(extent_map_cache, mask);
121 if (!em || IS_ERR(em))
122 return em;
123 em->in_tree = 0;
124 atomic_set(&em->refs, 1);
125 return em;
126}
127EXPORT_SYMBOL(alloc_extent_map);
128
129void free_extent_map(struct extent_map *em)
130{
2bf5a725
CM
131 if (!em)
132 return;
a52d9a80
CM
133 if (atomic_dec_and_test(&em->refs)) {
134 WARN_ON(em->in_tree);
135 kmem_cache_free(extent_map_cache, em);
136 }
137}
138EXPORT_SYMBOL(free_extent_map);
139
140
141struct extent_state *alloc_extent_state(gfp_t mask)
142{
143 struct extent_state *state;
f510cfec
CM
144 unsigned long flags;
145
a52d9a80
CM
146 state = kmem_cache_alloc(extent_state_cache, mask);
147 if (!state || IS_ERR(state))
148 return state;
149 state->state = 0;
150 state->in_tree = 0;
07157aac 151 state->private = 0;
f510cfec
CM
152
153 spin_lock_irqsave(&state_lock, flags);
154 list_add(&state->list, &states);
155 spin_unlock_irqrestore(&state_lock, flags);
156
a52d9a80
CM
157 atomic_set(&state->refs, 1);
158 init_waitqueue_head(&state->wq);
a52d9a80
CM
159 return state;
160}
161EXPORT_SYMBOL(alloc_extent_state);
162
163void free_extent_state(struct extent_state *state)
164{
f510cfec 165 unsigned long flags;
2bf5a725
CM
166 if (!state)
167 return;
a52d9a80
CM
168 if (atomic_dec_and_test(&state->refs)) {
169 WARN_ON(state->in_tree);
f510cfec
CM
170 spin_lock_irqsave(&state_lock, flags);
171 list_del(&state->list);
172 spin_unlock_irqrestore(&state_lock, flags);
a52d9a80
CM
173 kmem_cache_free(extent_state_cache, state);
174 }
175}
176EXPORT_SYMBOL(free_extent_state);
177
178static struct rb_node *tree_insert(struct rb_root *root, u64 offset,
179 struct rb_node *node)
180{
181 struct rb_node ** p = &root->rb_node;
182 struct rb_node * parent = NULL;
183 struct tree_entry *entry;
184
185 while(*p) {
186 parent = *p;
187 entry = rb_entry(parent, struct tree_entry, rb_node);
188
189 if (offset < entry->start)
190 p = &(*p)->rb_left;
191 else if (offset > entry->end)
192 p = &(*p)->rb_right;
193 else
194 return parent;
195 }
196
197 entry = rb_entry(node, struct tree_entry, rb_node);
198 entry->in_tree = 1;
199 rb_link_node(node, parent, p);
200 rb_insert_color(node, root);
201 return NULL;
202}
203
204static struct rb_node *__tree_search(struct rb_root *root, u64 offset,
205 struct rb_node **prev_ret)
206{
207 struct rb_node * n = root->rb_node;
208 struct rb_node *prev = NULL;
209 struct tree_entry *entry;
210 struct tree_entry *prev_entry = NULL;
211
212 while(n) {
213 entry = rb_entry(n, struct tree_entry, rb_node);
214 prev = n;
215 prev_entry = entry;
216
217 if (offset < entry->start)
218 n = n->rb_left;
219 else if (offset > entry->end)
220 n = n->rb_right;
221 else
222 return n;
223 }
224 if (!prev_ret)
225 return NULL;
226 while(prev && offset > prev_entry->end) {
227 prev = rb_next(prev);
228 prev_entry = rb_entry(prev, struct tree_entry, rb_node);
229 }
230 *prev_ret = prev;
231 return NULL;
232}
233
234static inline struct rb_node *tree_search(struct rb_root *root, u64 offset)
235{
236 struct rb_node *prev;
237 struct rb_node *ret;
238 ret = __tree_search(root, offset, &prev);
239 if (!ret)
240 return prev;
241 return ret;
242}
243
244static int tree_delete(struct rb_root *root, u64 offset)
245{
246 struct rb_node *node;
247 struct tree_entry *entry;
248
249 node = __tree_search(root, offset, NULL);
250 if (!node)
251 return -ENOENT;
252 entry = rb_entry(node, struct tree_entry, rb_node);
253 entry->in_tree = 0;
254 rb_erase(node, root);
255 return 0;
256}
257
258/*
259 * add_extent_mapping tries a simple backward merge with existing
260 * mappings. The extent_map struct passed in will be inserted into
261 * the tree directly (no copies made, just a reference taken).
262 */
263int add_extent_mapping(struct extent_map_tree *tree,
264 struct extent_map *em)
265{
266 int ret = 0;
267 struct extent_map *prev = NULL;
268 struct rb_node *rb;
269
270 write_lock_irq(&tree->lock);
271 rb = tree_insert(&tree->map, em->end, &em->rb_node);
272 if (rb) {
273 prev = rb_entry(rb, struct extent_map, rb_node);
274 printk("found extent map %Lu %Lu on insert of %Lu %Lu\n", prev->start, prev->end, em->start, em->end);
275 ret = -EEXIST;
276 goto out;
277 }
278 atomic_inc(&em->refs);
279 if (em->start != 0) {
280 rb = rb_prev(&em->rb_node);
281 if (rb)
282 prev = rb_entry(rb, struct extent_map, rb_node);
283 if (prev && prev->end + 1 == em->start &&
5f39d397
CM
284 ((em->block_start == EXTENT_MAP_HOLE &&
285 prev->block_start == EXTENT_MAP_HOLE) ||
179e29e4
CM
286 (em->block_start == EXTENT_MAP_INLINE &&
287 prev->block_start == EXTENT_MAP_INLINE) ||
288 (em->block_start == EXTENT_MAP_DELALLOC &&
289 prev->block_start == EXTENT_MAP_DELALLOC) ||
290 (em->block_start < EXTENT_MAP_DELALLOC - 1 &&
291 em->block_start == prev->block_end + 1))) {
a52d9a80
CM
292 em->start = prev->start;
293 em->block_start = prev->block_start;
294 rb_erase(&prev->rb_node, &tree->map);
295 prev->in_tree = 0;
296 free_extent_map(prev);
297 }
298 }
299out:
300 write_unlock_irq(&tree->lock);
301 return ret;
302}
303EXPORT_SYMBOL(add_extent_mapping);
304
305/*
306 * lookup_extent_mapping returns the first extent_map struct in the
307 * tree that intersects the [start, end] (inclusive) range. There may
308 * be additional objects in the tree that intersect, so check the object
309 * returned carefully to make sure you don't need additional lookups.
310 */
311struct extent_map *lookup_extent_mapping(struct extent_map_tree *tree,
312 u64 start, u64 end)
313{
314 struct extent_map *em;
315 struct rb_node *rb_node;
316
317 read_lock_irq(&tree->lock);
318 rb_node = tree_search(&tree->map, start);
319 if (!rb_node) {
320 em = NULL;
321 goto out;
322 }
323 if (IS_ERR(rb_node)) {
324 em = ERR_PTR(PTR_ERR(rb_node));
325 goto out;
326 }
327 em = rb_entry(rb_node, struct extent_map, rb_node);
328 if (em->end < start || em->start > end) {
329 em = NULL;
330 goto out;
331 }
332 atomic_inc(&em->refs);
333out:
334 read_unlock_irq(&tree->lock);
335 return em;
336}
337EXPORT_SYMBOL(lookup_extent_mapping);
338
339/*
340 * removes an extent_map struct from the tree. No reference counts are
341 * dropped, and no checks are done to see if the range is in use
342 */
343int remove_extent_mapping(struct extent_map_tree *tree, struct extent_map *em)
344{
345 int ret;
346
347 write_lock_irq(&tree->lock);
348 ret = tree_delete(&tree->map, em->end);
349 write_unlock_irq(&tree->lock);
350 return ret;
351}
352EXPORT_SYMBOL(remove_extent_mapping);
353
354/*
355 * utility function to look for merge candidates inside a given range.
356 * Any extents with matching state are merged together into a single
357 * extent in the tree. Extents with EXTENT_IO in their state field
358 * are not merged because the end_io handlers need to be able to do
359 * operations on them without sleeping (or doing allocations/splits).
360 *
361 * This should be called with the tree lock held.
362 */
363static int merge_state(struct extent_map_tree *tree,
364 struct extent_state *state)
365{
366 struct extent_state *other;
367 struct rb_node *other_node;
368
369 if (state->state & EXTENT_IOBITS)
370 return 0;
371
372 other_node = rb_prev(&state->rb_node);
373 if (other_node) {
374 other = rb_entry(other_node, struct extent_state, rb_node);
375 if (other->end == state->start - 1 &&
376 other->state == state->state) {
377 state->start = other->start;
378 other->in_tree = 0;
379 rb_erase(&other->rb_node, &tree->state);
380 free_extent_state(other);
381 }
382 }
383 other_node = rb_next(&state->rb_node);
384 if (other_node) {
385 other = rb_entry(other_node, struct extent_state, rb_node);
386 if (other->start == state->end + 1 &&
387 other->state == state->state) {
388 other->start = state->start;
389 state->in_tree = 0;
390 rb_erase(&state->rb_node, &tree->state);
391 free_extent_state(state);
392 }
393 }
394 return 0;
395}
396
397/*
398 * insert an extent_state struct into the tree. 'bits' are set on the
399 * struct before it is inserted.
400 *
401 * This may return -EEXIST if the extent is already there, in which case the
402 * state struct is freed.
403 *
404 * The tree lock is not taken internally. This is a utility function and
405 * probably isn't what you want to call (see set/clear_extent_bit).
406 */
407static int insert_state(struct extent_map_tree *tree,
408 struct extent_state *state, u64 start, u64 end,
409 int bits)
410{
411 struct rb_node *node;
412
413 if (end < start) {
414 printk("end < start %Lu %Lu\n", end, start);
415 WARN_ON(1);
416 }
417 state->state |= bits;
418 state->start = start;
419 state->end = end;
a52d9a80
CM
420 node = tree_insert(&tree->state, end, &state->rb_node);
421 if (node) {
422 struct extent_state *found;
423 found = rb_entry(node, struct extent_state, rb_node);
b888db2b 424 printk("found node %Lu %Lu on insert of %Lu %Lu\n", found->start, found->end, start, end);
a52d9a80
CM
425 free_extent_state(state);
426 return -EEXIST;
427 }
428 merge_state(tree, state);
429 return 0;
430}
431
432/*
433 * split a given extent state struct in two, inserting the preallocated
434 * struct 'prealloc' as the newly created second half. 'split' indicates an
435 * offset inside 'orig' where it should be split.
436 *
437 * Before calling,
438 * the tree has 'orig' at [orig->start, orig->end]. After calling, there
439 * are two extent state structs in the tree:
440 * prealloc: [orig->start, split - 1]
441 * orig: [ split, orig->end ]
442 *
443 * The tree locks are not taken by this function. They need to be held
444 * by the caller.
445 */
446static int split_state(struct extent_map_tree *tree, struct extent_state *orig,
447 struct extent_state *prealloc, u64 split)
448{
449 struct rb_node *node;
450 prealloc->start = orig->start;
451 prealloc->end = split - 1;
452 prealloc->state = orig->state;
453 orig->start = split;
f510cfec 454
a52d9a80
CM
455 node = tree_insert(&tree->state, prealloc->end, &prealloc->rb_node);
456 if (node) {
457 struct extent_state *found;
458 found = rb_entry(node, struct extent_state, rb_node);
b888db2b 459 printk("found node %Lu %Lu on insert of %Lu %Lu\n", found->start, found->end, prealloc->start, prealloc->end);
a52d9a80
CM
460 free_extent_state(prealloc);
461 return -EEXIST;
462 }
463 return 0;
464}
465
466/*
467 * utility function to clear some bits in an extent state struct.
468 * it will optionally wake up any one waiting on this state (wake == 1), or
469 * forcibly remove the state from the tree (delete == 1).
470 *
471 * If no bits are set on the state struct after clearing things, the
472 * struct is freed and removed from the tree
473 */
474static int clear_state_bit(struct extent_map_tree *tree,
475 struct extent_state *state, int bits, int wake,
476 int delete)
477{
478 int ret = state->state & bits;
479 state->state &= ~bits;
480 if (wake)
481 wake_up(&state->wq);
482 if (delete || state->state == 0) {
483 if (state->in_tree) {
484 rb_erase(&state->rb_node, &tree->state);
485 state->in_tree = 0;
486 free_extent_state(state);
487 } else {
488 WARN_ON(1);
489 }
490 } else {
491 merge_state(tree, state);
492 }
493 return ret;
494}
495
496/*
497 * clear some bits on a range in the tree. This may require splitting
498 * or inserting elements in the tree, so the gfp mask is used to
499 * indicate which allocations or sleeping are allowed.
500 *
501 * pass 'wake' == 1 to kick any sleepers, and 'delete' == 1 to remove
502 * the given range from the tree regardless of state (ie for truncate).
503 *
504 * the range [start, end] is inclusive.
505 *
506 * This takes the tree lock, and returns < 0 on error, > 0 if any of the
507 * bits were already set, or zero if none of the bits were already set.
508 */
509int clear_extent_bit(struct extent_map_tree *tree, u64 start, u64 end,
510 int bits, int wake, int delete, gfp_t mask)
511{
512 struct extent_state *state;
513 struct extent_state *prealloc = NULL;
514 struct rb_node *node;
90f1c19a 515 unsigned long flags;
a52d9a80
CM
516 int err;
517 int set = 0;
518
519again:
520 if (!prealloc && (mask & __GFP_WAIT)) {
521 prealloc = alloc_extent_state(mask);
522 if (!prealloc)
523 return -ENOMEM;
524 }
525
90f1c19a 526 write_lock_irqsave(&tree->lock, flags);
a52d9a80
CM
527 /*
528 * this search will find the extents that end after
529 * our range starts
530 */
531 node = tree_search(&tree->state, start);
532 if (!node)
533 goto out;
534 state = rb_entry(node, struct extent_state, rb_node);
535 if (state->start > end)
536 goto out;
537 WARN_ON(state->end < start);
538
539 /*
540 * | ---- desired range ---- |
541 * | state | or
542 * | ------------- state -------------- |
543 *
544 * We need to split the extent we found, and may flip
545 * bits on second half.
546 *
547 * If the extent we found extends past our range, we
548 * just split and search again. It'll get split again
549 * the next time though.
550 *
551 * If the extent we found is inside our range, we clear
552 * the desired bit on it.
553 */
554
555 if (state->start < start) {
556 err = split_state(tree, state, prealloc, start);
557 BUG_ON(err == -EEXIST);
558 prealloc = NULL;
559 if (err)
560 goto out;
561 if (state->end <= end) {
562 start = state->end + 1;
563 set |= clear_state_bit(tree, state, bits,
564 wake, delete);
565 } else {
566 start = state->start;
567 }
568 goto search_again;
569 }
570 /*
571 * | ---- desired range ---- |
572 * | state |
573 * We need to split the extent, and clear the bit
574 * on the first half
575 */
576 if (state->start <= end && state->end > end) {
577 err = split_state(tree, state, prealloc, end + 1);
578 BUG_ON(err == -EEXIST);
579
580 if (wake)
581 wake_up(&state->wq);
582 set |= clear_state_bit(tree, prealloc, bits,
583 wake, delete);
584 prealloc = NULL;
585 goto out;
586 }
587
588 start = state->end + 1;
589 set |= clear_state_bit(tree, state, bits, wake, delete);
590 goto search_again;
591
592out:
90f1c19a 593 write_unlock_irqrestore(&tree->lock, flags);
a52d9a80
CM
594 if (prealloc)
595 free_extent_state(prealloc);
596
597 return set;
598
599search_again:
96b5179d 600 if (start > end)
a52d9a80 601 goto out;
90f1c19a 602 write_unlock_irqrestore(&tree->lock, flags);
a52d9a80
CM
603 if (mask & __GFP_WAIT)
604 cond_resched();
605 goto again;
606}
607EXPORT_SYMBOL(clear_extent_bit);
608
609static int wait_on_state(struct extent_map_tree *tree,
610 struct extent_state *state)
611{
612 DEFINE_WAIT(wait);
613 prepare_to_wait(&state->wq, &wait, TASK_UNINTERRUPTIBLE);
614 read_unlock_irq(&tree->lock);
615 schedule();
616 read_lock_irq(&tree->lock);
617 finish_wait(&state->wq, &wait);
618 return 0;
619}
620
621/*
622 * waits for one or more bits to clear on a range in the state tree.
623 * The range [start, end] is inclusive.
624 * The tree lock is taken by this function
625 */
626int wait_extent_bit(struct extent_map_tree *tree, u64 start, u64 end, int bits)
627{
628 struct extent_state *state;
629 struct rb_node *node;
630
631 read_lock_irq(&tree->lock);
632again:
633 while (1) {
634 /*
635 * this search will find all the extents that end after
636 * our range starts
637 */
638 node = tree_search(&tree->state, start);
639 if (!node)
640 break;
641
642 state = rb_entry(node, struct extent_state, rb_node);
643
644 if (state->start > end)
645 goto out;
646
647 if (state->state & bits) {
648 start = state->start;
649 atomic_inc(&state->refs);
650 wait_on_state(tree, state);
651 free_extent_state(state);
652 goto again;
653 }
654 start = state->end + 1;
655
656 if (start > end)
657 break;
658
659 if (need_resched()) {
660 read_unlock_irq(&tree->lock);
661 cond_resched();
662 read_lock_irq(&tree->lock);
663 }
664 }
665out:
666 read_unlock_irq(&tree->lock);
667 return 0;
668}
669EXPORT_SYMBOL(wait_extent_bit);
670
671/*
672 * set some bits on a range in the tree. This may require allocations
673 * or sleeping, so the gfp mask is used to indicate what is allowed.
674 *
675 * If 'exclusive' == 1, this will fail with -EEXIST if some part of the
676 * range already has the desired bits set. The start of the existing
677 * range is returned in failed_start in this case.
678 *
679 * [start, end] is inclusive
680 * This takes the tree lock.
681 */
682int set_extent_bit(struct extent_map_tree *tree, u64 start, u64 end, int bits,
683 int exclusive, u64 *failed_start, gfp_t mask)
684{
685 struct extent_state *state;
686 struct extent_state *prealloc = NULL;
687 struct rb_node *node;
90f1c19a 688 unsigned long flags;
a52d9a80
CM
689 int err = 0;
690 int set;
691 u64 last_start;
692 u64 last_end;
693again:
694 if (!prealloc && (mask & __GFP_WAIT)) {
695 prealloc = alloc_extent_state(mask);
696 if (!prealloc)
697 return -ENOMEM;
698 }
699
90f1c19a 700 write_lock_irqsave(&tree->lock, flags);
a52d9a80
CM
701 /*
702 * this search will find all the extents that end after
703 * our range starts.
704 */
705 node = tree_search(&tree->state, start);
706 if (!node) {
707 err = insert_state(tree, prealloc, start, end, bits);
708 prealloc = NULL;
709 BUG_ON(err == -EEXIST);
710 goto out;
711 }
712
713 state = rb_entry(node, struct extent_state, rb_node);
714 last_start = state->start;
715 last_end = state->end;
716
717 /*
718 * | ---- desired range ---- |
719 * | state |
720 *
721 * Just lock what we found and keep going
722 */
723 if (state->start == start && state->end <= end) {
724 set = state->state & bits;
725 if (set && exclusive) {
726 *failed_start = state->start;
727 err = -EEXIST;
728 goto out;
729 }
730 state->state |= bits;
731 start = state->end + 1;
732 merge_state(tree, state);
733 goto search_again;
734 }
735
736 /*
737 * | ---- desired range ---- |
738 * | state |
739 * or
740 * | ------------- state -------------- |
741 *
742 * We need to split the extent we found, and may flip bits on
743 * second half.
744 *
745 * If the extent we found extends past our
746 * range, we just split and search again. It'll get split
747 * again the next time though.
748 *
749 * If the extent we found is inside our range, we set the
750 * desired bit on it.
751 */
752 if (state->start < start) {
753 set = state->state & bits;
754 if (exclusive && set) {
755 *failed_start = start;
756 err = -EEXIST;
757 goto out;
758 }
759 err = split_state(tree, state, prealloc, start);
760 BUG_ON(err == -EEXIST);
761 prealloc = NULL;
762 if (err)
763 goto out;
764 if (state->end <= end) {
765 state->state |= bits;
766 start = state->end + 1;
767 merge_state(tree, state);
768 } else {
769 start = state->start;
770 }
771 goto search_again;
772 }
a52d9a80
CM
773 /*
774 * | ---- desired range ---- |
775 * | state | or | state |
776 *
777 * There's a hole, we need to insert something in it and
778 * ignore the extent we found.
779 */
780 if (state->start > start) {
781 u64 this_end;
782 if (end < last_start)
783 this_end = end;
784 else
785 this_end = last_start -1;
786 err = insert_state(tree, prealloc, start, this_end,
787 bits);
788 prealloc = NULL;
789 BUG_ON(err == -EEXIST);
790 if (err)
791 goto out;
792 start = this_end + 1;
793 goto search_again;
794 }
a8c450b2
CM
795 /*
796 * | ---- desired range ---- |
797 * | state |
798 * We need to split the extent, and set the bit
799 * on the first half
800 */
801 if (state->start <= end && state->end > end) {
802 set = state->state & bits;
803 if (exclusive && set) {
804 *failed_start = start;
805 err = -EEXIST;
806 goto out;
807 }
808 err = split_state(tree, state, prealloc, end + 1);
809 BUG_ON(err == -EEXIST);
810
811 prealloc->state |= bits;
812 merge_state(tree, prealloc);
813 prealloc = NULL;
814 goto out;
815 }
816
a52d9a80
CM
817 goto search_again;
818
819out:
90f1c19a 820 write_unlock_irqrestore(&tree->lock, flags);
a52d9a80
CM
821 if (prealloc)
822 free_extent_state(prealloc);
823
824 return err;
825
826search_again:
827 if (start > end)
828 goto out;
90f1c19a 829 write_unlock_irqrestore(&tree->lock, flags);
a52d9a80
CM
830 if (mask & __GFP_WAIT)
831 cond_resched();
832 goto again;
833}
834EXPORT_SYMBOL(set_extent_bit);
835
836/* wrappers around set/clear extent bit */
837int set_extent_dirty(struct extent_map_tree *tree, u64 start, u64 end,
838 gfp_t mask)
839{
840 return set_extent_bit(tree, start, end, EXTENT_DIRTY, 0, NULL,
841 mask);
842}
843EXPORT_SYMBOL(set_extent_dirty);
844
96b5179d
CM
845int set_extent_bits(struct extent_map_tree *tree, u64 start, u64 end,
846 int bits, gfp_t mask)
847{
848 return set_extent_bit(tree, start, end, bits, 0, NULL,
849 mask);
850}
851EXPORT_SYMBOL(set_extent_bits);
852
853int clear_extent_bits(struct extent_map_tree *tree, u64 start, u64 end,
854 int bits, gfp_t mask)
855{
856 return clear_extent_bit(tree, start, end, bits, 0, 0, mask);
857}
858EXPORT_SYMBOL(clear_extent_bits);
859
b888db2b
CM
860int set_extent_delalloc(struct extent_map_tree *tree, u64 start, u64 end,
861 gfp_t mask)
862{
863 return set_extent_bit(tree, start, end,
864 EXTENT_DELALLOC | EXTENT_DIRTY, 0, NULL,
865 mask);
866}
867EXPORT_SYMBOL(set_extent_delalloc);
868
a52d9a80
CM
869int clear_extent_dirty(struct extent_map_tree *tree, u64 start, u64 end,
870 gfp_t mask)
871{
b888db2b
CM
872 return clear_extent_bit(tree, start, end,
873 EXTENT_DIRTY | EXTENT_DELALLOC, 0, 0, mask);
a52d9a80
CM
874}
875EXPORT_SYMBOL(clear_extent_dirty);
876
877int set_extent_new(struct extent_map_tree *tree, u64 start, u64 end,
878 gfp_t mask)
879{
880 return set_extent_bit(tree, start, end, EXTENT_NEW, 0, NULL,
881 mask);
882}
883EXPORT_SYMBOL(set_extent_new);
884
885int clear_extent_new(struct extent_map_tree *tree, u64 start, u64 end,
886 gfp_t mask)
887{
888 return clear_extent_bit(tree, start, end, EXTENT_NEW, 0, 0, mask);
889}
890EXPORT_SYMBOL(clear_extent_new);
891
892int set_extent_uptodate(struct extent_map_tree *tree, u64 start, u64 end,
893 gfp_t mask)
894{
895 return set_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, NULL,
896 mask);
897}
898EXPORT_SYMBOL(set_extent_uptodate);
899
900int clear_extent_uptodate(struct extent_map_tree *tree, u64 start, u64 end,
901 gfp_t mask)
902{
903 return clear_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, 0, mask);
904}
905EXPORT_SYMBOL(clear_extent_uptodate);
906
907int set_extent_writeback(struct extent_map_tree *tree, u64 start, u64 end,
908 gfp_t mask)
909{
910 return set_extent_bit(tree, start, end, EXTENT_WRITEBACK,
911 0, NULL, mask);
912}
913EXPORT_SYMBOL(set_extent_writeback);
914
915int clear_extent_writeback(struct extent_map_tree *tree, u64 start, u64 end,
916 gfp_t mask)
917{
918 return clear_extent_bit(tree, start, end, EXTENT_WRITEBACK, 1, 0, mask);
919}
920EXPORT_SYMBOL(clear_extent_writeback);
921
922int wait_on_extent_writeback(struct extent_map_tree *tree, u64 start, u64 end)
923{
924 return wait_extent_bit(tree, start, end, EXTENT_WRITEBACK);
925}
926EXPORT_SYMBOL(wait_on_extent_writeback);
927
928/*
929 * locks a range in ascending order, waiting for any locked regions
930 * it hits on the way. [start,end] are inclusive, and this will sleep.
931 */
932int lock_extent(struct extent_map_tree *tree, u64 start, u64 end, gfp_t mask)
933{
934 int err;
935 u64 failed_start;
936 while (1) {
937 err = set_extent_bit(tree, start, end, EXTENT_LOCKED, 1,
938 &failed_start, mask);
939 if (err == -EEXIST && (mask & __GFP_WAIT)) {
940 wait_extent_bit(tree, failed_start, end, EXTENT_LOCKED);
941 start = failed_start;
942 } else {
943 break;
944 }
945 WARN_ON(start > end);
946 }
947 return err;
948}
949EXPORT_SYMBOL(lock_extent);
950
951int unlock_extent(struct extent_map_tree *tree, u64 start, u64 end,
952 gfp_t mask)
953{
954 return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, mask);
955}
956EXPORT_SYMBOL(unlock_extent);
957
958/*
959 * helper function to set pages and extents in the tree dirty
960 */
961int set_range_dirty(struct extent_map_tree *tree, u64 start, u64 end)
962{
963 unsigned long index = start >> PAGE_CACHE_SHIFT;
964 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
965 struct page *page;
966
967 while (index <= end_index) {
968 page = find_get_page(tree->mapping, index);
969 BUG_ON(!page);
970 __set_page_dirty_nobuffers(page);
971 page_cache_release(page);
972 index++;
973 }
974 set_extent_dirty(tree, start, end, GFP_NOFS);
975 return 0;
976}
977EXPORT_SYMBOL(set_range_dirty);
978
979/*
980 * helper function to set both pages and extents in the tree writeback
981 */
982int set_range_writeback(struct extent_map_tree *tree, u64 start, u64 end)
983{
984 unsigned long index = start >> PAGE_CACHE_SHIFT;
985 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
986 struct page *page;
987
988 while (index <= end_index) {
989 page = find_get_page(tree->mapping, index);
990 BUG_ON(!page);
991 set_page_writeback(page);
992 page_cache_release(page);
993 index++;
994 }
995 set_extent_writeback(tree, start, end, GFP_NOFS);
996 return 0;
997}
998EXPORT_SYMBOL(set_range_writeback);
999
5f39d397
CM
1000int find_first_extent_bit(struct extent_map_tree *tree, u64 start,
1001 u64 *start_ret, u64 *end_ret, int bits)
1002{
1003 struct rb_node *node;
1004 struct extent_state *state;
1005 int ret = 1;
1006
e19caa5f 1007 read_lock_irq(&tree->lock);
5f39d397
CM
1008 /*
1009 * this search will find all the extents that end after
1010 * our range starts.
1011 */
1012 node = tree_search(&tree->state, start);
1013 if (!node || IS_ERR(node)) {
1014 goto out;
1015 }
1016
1017 while(1) {
1018 state = rb_entry(node, struct extent_state, rb_node);
e19caa5f 1019 if (state->end >= start && (state->state & bits)) {
5f39d397
CM
1020 *start_ret = state->start;
1021 *end_ret = state->end;
1022 ret = 0;
f510cfec 1023 break;
5f39d397
CM
1024 }
1025 node = rb_next(node);
1026 if (!node)
1027 break;
1028 }
1029out:
e19caa5f 1030 read_unlock_irq(&tree->lock);
5f39d397
CM
1031 return ret;
1032}
1033EXPORT_SYMBOL(find_first_extent_bit);
1034
b888db2b 1035u64 find_lock_delalloc_range(struct extent_map_tree *tree,
3e9fd94f 1036 u64 *start, u64 *end, u64 max_bytes)
b888db2b
CM
1037{
1038 struct rb_node *node;
1039 struct extent_state *state;
3e9fd94f 1040 u64 cur_start = *start;
b888db2b
CM
1041 u64 found = 0;
1042 u64 total_bytes = 0;
1043
1044 write_lock_irq(&tree->lock);
1045 /*
1046 * this search will find all the extents that end after
1047 * our range starts.
1048 */
1049search_again:
1050 node = tree_search(&tree->state, cur_start);
1051 if (!node || IS_ERR(node)) {
1052 goto out;
1053 }
1054
1055 while(1) {
1056 state = rb_entry(node, struct extent_state, rb_node);
3e9fd94f 1057 if (found && state->start != cur_start) {
b888db2b
CM
1058 goto out;
1059 }
1060 if (!(state->state & EXTENT_DELALLOC)) {
1061 goto out;
1062 }
3e9fd94f
CM
1063 if (!found) {
1064 struct extent_state *prev_state;
1065 struct rb_node *prev_node = node;
1066 while(1) {
1067 prev_node = rb_prev(prev_node);
1068 if (!prev_node)
1069 break;
1070 prev_state = rb_entry(prev_node,
1071 struct extent_state,
1072 rb_node);
1073 if (!(prev_state->state & EXTENT_DELALLOC))
1074 break;
1075 state = prev_state;
1076 node = prev_node;
b888db2b 1077 }
b888db2b 1078 }
3e9fd94f
CM
1079 if (state->state & EXTENT_LOCKED) {
1080 DEFINE_WAIT(wait);
1081 atomic_inc(&state->refs);
1082 prepare_to_wait(&state->wq, &wait,
1083 TASK_UNINTERRUPTIBLE);
1084 write_unlock_irq(&tree->lock);
1085 schedule();
1086 write_lock_irq(&tree->lock);
1087 finish_wait(&state->wq, &wait);
1088 free_extent_state(state);
1089 goto search_again;
1090 }
1091 state->state |= EXTENT_LOCKED;
1092 if (!found)
1093 *start = state->start;
b888db2b
CM
1094 found++;
1095 *end = state->end;
1096 cur_start = state->end + 1;
1097 node = rb_next(node);
1098 if (!node)
1099 break;
944746ec 1100 total_bytes += state->end - state->start + 1;
b888db2b
CM
1101 if (total_bytes >= max_bytes)
1102 break;
1103 }
1104out:
1105 write_unlock_irq(&tree->lock);
1106 return found;
1107}
1108
a52d9a80
CM
1109/*
1110 * helper function to lock both pages and extents in the tree.
1111 * pages must be locked first.
1112 */
1113int lock_range(struct extent_map_tree *tree, u64 start, u64 end)
1114{
1115 unsigned long index = start >> PAGE_CACHE_SHIFT;
1116 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1117 struct page *page;
1118 int err;
1119
1120 while (index <= end_index) {
1121 page = grab_cache_page(tree->mapping, index);
1122 if (!page) {
1123 err = -ENOMEM;
1124 goto failed;
1125 }
1126 if (IS_ERR(page)) {
1127 err = PTR_ERR(page);
1128 goto failed;
1129 }
1130 index++;
1131 }
1132 lock_extent(tree, start, end, GFP_NOFS);
1133 return 0;
1134
1135failed:
1136 /*
1137 * we failed above in getting the page at 'index', so we undo here
1138 * up to but not including the page at 'index'
1139 */
1140 end_index = index;
1141 index = start >> PAGE_CACHE_SHIFT;
1142 while (index < end_index) {
1143 page = find_get_page(tree->mapping, index);
1144 unlock_page(page);
1145 page_cache_release(page);
1146 index++;
1147 }
1148 return err;
1149}
1150EXPORT_SYMBOL(lock_range);
1151
1152/*
1153 * helper function to unlock both pages and extents in the tree.
1154 */
1155int unlock_range(struct extent_map_tree *tree, u64 start, u64 end)
1156{
1157 unsigned long index = start >> PAGE_CACHE_SHIFT;
1158 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1159 struct page *page;
1160
1161 while (index <= end_index) {
1162 page = find_get_page(tree->mapping, index);
1163 unlock_page(page);
1164 page_cache_release(page);
1165 index++;
1166 }
1167 unlock_extent(tree, start, end, GFP_NOFS);
1168 return 0;
1169}
1170EXPORT_SYMBOL(unlock_range);
1171
07157aac
CM
1172int set_state_private(struct extent_map_tree *tree, u64 start, u64 private)
1173{
1174 struct rb_node *node;
1175 struct extent_state *state;
1176 int ret = 0;
1177
1178 write_lock_irq(&tree->lock);
1179 /*
1180 * this search will find all the extents that end after
1181 * our range starts.
1182 */
1183 node = tree_search(&tree->state, start);
1184 if (!node || IS_ERR(node)) {
1185 ret = -ENOENT;
1186 goto out;
1187 }
1188 state = rb_entry(node, struct extent_state, rb_node);
1189 if (state->start != start) {
1190 ret = -ENOENT;
1191 goto out;
1192 }
1193 state->private = private;
1194out:
1195 write_unlock_irq(&tree->lock);
1196 return ret;
07157aac
CM
1197}
1198
1199int get_state_private(struct extent_map_tree *tree, u64 start, u64 *private)
1200{
1201 struct rb_node *node;
1202 struct extent_state *state;
1203 int ret = 0;
1204
1205 read_lock_irq(&tree->lock);
1206 /*
1207 * this search will find all the extents that end after
1208 * our range starts.
1209 */
1210 node = tree_search(&tree->state, start);
1211 if (!node || IS_ERR(node)) {
1212 ret = -ENOENT;
1213 goto out;
1214 }
1215 state = rb_entry(node, struct extent_state, rb_node);
1216 if (state->start != start) {
1217 ret = -ENOENT;
1218 goto out;
1219 }
1220 *private = state->private;
1221out:
1222 read_unlock_irq(&tree->lock);
1223 return ret;
1224}
1225
a52d9a80
CM
1226/*
1227 * searches a range in the state tree for a given mask.
1228 * If 'filled' == 1, this returns 1 only if ever extent in the tree
1229 * has the bits set. Otherwise, 1 is returned if any bit in the
1230 * range is found set.
1231 */
1a5bc167
CM
1232int test_range_bit(struct extent_map_tree *tree, u64 start, u64 end,
1233 int bits, int filled)
a52d9a80
CM
1234{
1235 struct extent_state *state = NULL;
1236 struct rb_node *node;
1237 int bitset = 0;
1238
1239 read_lock_irq(&tree->lock);
1240 node = tree_search(&tree->state, start);
1241 while (node && start <= end) {
1242 state = rb_entry(node, struct extent_state, rb_node);
a52d9a80
CM
1243
1244 if (filled && state->start > start) {
1245 bitset = 0;
1246 break;
1247 }
0591fb56
CM
1248
1249 if (state->start > end)
1250 break;
1251
a52d9a80
CM
1252 if (state->state & bits) {
1253 bitset = 1;
1254 if (!filled)
1255 break;
1256 } else if (filled) {
1257 bitset = 0;
1258 break;
1259 }
1260 start = state->end + 1;
1261 if (start > end)
1262 break;
1263 node = rb_next(node);
1264 }
1265 read_unlock_irq(&tree->lock);
1266 return bitset;
1267}
1a5bc167 1268EXPORT_SYMBOL(test_range_bit);
a52d9a80
CM
1269
1270/*
1271 * helper function to set a given page up to date if all the
1272 * extents in the tree for that page are up to date
1273 */
1274static int check_page_uptodate(struct extent_map_tree *tree,
1275 struct page *page)
1276{
35ebb934 1277 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
a52d9a80
CM
1278 u64 end = start + PAGE_CACHE_SIZE - 1;
1279 if (test_range_bit(tree, start, end, EXTENT_UPTODATE, 1))
1280 SetPageUptodate(page);
1281 return 0;
1282}
1283
1284/*
1285 * helper function to unlock a page if all the extents in the tree
1286 * for that page are unlocked
1287 */
1288static int check_page_locked(struct extent_map_tree *tree,
1289 struct page *page)
1290{
35ebb934 1291 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
a52d9a80
CM
1292 u64 end = start + PAGE_CACHE_SIZE - 1;
1293 if (!test_range_bit(tree, start, end, EXTENT_LOCKED, 0))
1294 unlock_page(page);
1295 return 0;
1296}
1297
1298/*
1299 * helper function to end page writeback if all the extents
1300 * in the tree for that page are done with writeback
1301 */
1302static int check_page_writeback(struct extent_map_tree *tree,
1303 struct page *page)
1304{
35ebb934 1305 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
a52d9a80
CM
1306 u64 end = start + PAGE_CACHE_SIZE - 1;
1307 if (!test_range_bit(tree, start, end, EXTENT_WRITEBACK, 0))
1308 end_page_writeback(page);
1309 return 0;
1310}
1311
1312/* lots and lots of room for performance fixes in the end_bio funcs */
1313
1314/*
1315 * after a writepage IO is done, we need to:
1316 * clear the uptodate bits on error
1317 * clear the writeback bits in the extent tree for this IO
1318 * end_page_writeback if the page has no more pending IO
1319 *
1320 * Scheduling is not allowed, so the extent state tree is expected
1321 * to have one and only one object corresponding to this IO.
1322 */
0a2118df
JA
1323#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
1324static void end_bio_extent_writepage(struct bio *bio, int err)
1325#else
a52d9a80
CM
1326static int end_bio_extent_writepage(struct bio *bio,
1327 unsigned int bytes_done, int err)
0a2118df 1328#endif
a52d9a80
CM
1329{
1330 const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
1331 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1332 struct extent_map_tree *tree = bio->bi_private;
1333 u64 start;
1334 u64 end;
1335 int whole_page;
1336
0a2118df 1337#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
a52d9a80
CM
1338 if (bio->bi_size)
1339 return 1;
0a2118df 1340#endif
a52d9a80
CM
1341
1342 do {
1343 struct page *page = bvec->bv_page;
35ebb934
CM
1344 start = ((u64)page->index << PAGE_CACHE_SHIFT) +
1345 bvec->bv_offset;
a52d9a80
CM
1346 end = start + bvec->bv_len - 1;
1347
1348 if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE)
1349 whole_page = 1;
1350 else
1351 whole_page = 0;
1352
1353 if (--bvec >= bio->bi_io_vec)
1354 prefetchw(&bvec->bv_page->flags);
1355
1356 if (!uptodate) {
1357 clear_extent_uptodate(tree, start, end, GFP_ATOMIC);
1358 ClearPageUptodate(page);
1359 SetPageError(page);
1360 }
1361 clear_extent_writeback(tree, start, end, GFP_ATOMIC);
1362
1363 if (whole_page)
1364 end_page_writeback(page);
1365 else
1366 check_page_writeback(tree, page);
0e2752a7
CH
1367 if (tree->ops && tree->ops->writepage_end_io_hook)
1368 tree->ops->writepage_end_io_hook(page, start, end);
a52d9a80
CM
1369 } while (bvec >= bio->bi_io_vec);
1370
1371 bio_put(bio);
0a2118df 1372#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
a52d9a80 1373 return 0;
0a2118df 1374#endif
a52d9a80
CM
1375}
1376
1377/*
1378 * after a readpage IO is done, we need to:
1379 * clear the uptodate bits on error
1380 * set the uptodate bits if things worked
1381 * set the page up to date if all extents in the tree are uptodate
1382 * clear the lock bit in the extent tree
1383 * unlock the page if there are no other extents locked for it
1384 *
1385 * Scheduling is not allowed, so the extent state tree is expected
1386 * to have one and only one object corresponding to this IO.
1387 */
0a2118df
JA
1388#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
1389static void end_bio_extent_readpage(struct bio *bio, int err)
1390#else
a52d9a80
CM
1391static int end_bio_extent_readpage(struct bio *bio,
1392 unsigned int bytes_done, int err)
0a2118df 1393#endif
a52d9a80 1394{
07157aac 1395 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
a52d9a80
CM
1396 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1397 struct extent_map_tree *tree = bio->bi_private;
1398 u64 start;
1399 u64 end;
1400 int whole_page;
07157aac 1401 int ret;
a52d9a80 1402
0a2118df 1403#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
a52d9a80
CM
1404 if (bio->bi_size)
1405 return 1;
0a2118df 1406#endif
a52d9a80
CM
1407
1408 do {
1409 struct page *page = bvec->bv_page;
35ebb934
CM
1410 start = ((u64)page->index << PAGE_CACHE_SHIFT) +
1411 bvec->bv_offset;
a52d9a80
CM
1412 end = start + bvec->bv_len - 1;
1413
1414 if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE)
1415 whole_page = 1;
1416 else
1417 whole_page = 0;
1418
1419 if (--bvec >= bio->bi_io_vec)
1420 prefetchw(&bvec->bv_page->flags);
1421
07157aac
CM
1422 if (uptodate && tree->ops && tree->ops->readpage_end_io_hook) {
1423 ret = tree->ops->readpage_end_io_hook(page, start, end);
1424 if (ret)
1425 uptodate = 0;
1426 }
a52d9a80
CM
1427 if (uptodate) {
1428 set_extent_uptodate(tree, start, end, GFP_ATOMIC);
1429 if (whole_page)
1430 SetPageUptodate(page);
1431 else
1432 check_page_uptodate(tree, page);
1433 } else {
1434 ClearPageUptodate(page);
1435 SetPageError(page);
1436 }
1437
1438 unlock_extent(tree, start, end, GFP_ATOMIC);
1439
1440 if (whole_page)
1441 unlock_page(page);
1442 else
1443 check_page_locked(tree, page);
1444 } while (bvec >= bio->bi_io_vec);
1445
1446 bio_put(bio);
0a2118df 1447#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
a52d9a80 1448 return 0;
0a2118df 1449#endif
a52d9a80
CM
1450}
1451
1452/*
1453 * IO done from prepare_write is pretty simple, we just unlock
1454 * the structs in the extent tree when done, and set the uptodate bits
1455 * as appropriate.
1456 */
0a2118df
JA
1457#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
1458static void end_bio_extent_preparewrite(struct bio *bio, int err)
1459#else
a52d9a80
CM
1460static int end_bio_extent_preparewrite(struct bio *bio,
1461 unsigned int bytes_done, int err)
0a2118df 1462#endif
a52d9a80
CM
1463{
1464 const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
1465 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1466 struct extent_map_tree *tree = bio->bi_private;
1467 u64 start;
1468 u64 end;
1469
0a2118df 1470#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
a52d9a80
CM
1471 if (bio->bi_size)
1472 return 1;
0a2118df 1473#endif
a52d9a80
CM
1474
1475 do {
1476 struct page *page = bvec->bv_page;
35ebb934
CM
1477 start = ((u64)page->index << PAGE_CACHE_SHIFT) +
1478 bvec->bv_offset;
a52d9a80
CM
1479 end = start + bvec->bv_len - 1;
1480
1481 if (--bvec >= bio->bi_io_vec)
1482 prefetchw(&bvec->bv_page->flags);
1483
1484 if (uptodate) {
1485 set_extent_uptodate(tree, start, end, GFP_ATOMIC);
1486 } else {
1487 ClearPageUptodate(page);
1488 SetPageError(page);
1489 }
1490
1491 unlock_extent(tree, start, end, GFP_ATOMIC);
1492
1493 } while (bvec >= bio->bi_io_vec);
1494
1495 bio_put(bio);
0a2118df 1496#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
a52d9a80 1497 return 0;
0a2118df 1498#endif
a52d9a80
CM
1499}
1500
b293f02e
CM
1501static struct bio *
1502extent_bio_alloc(struct block_device *bdev, u64 first_sector, int nr_vecs,
1503 gfp_t gfp_flags)
a52d9a80
CM
1504{
1505 struct bio *bio;
a52d9a80 1506
b293f02e 1507 bio = bio_alloc(gfp_flags, nr_vecs);
a52d9a80 1508
b293f02e
CM
1509 if (bio == NULL && (current->flags & PF_MEMALLOC)) {
1510 while (!bio && (nr_vecs /= 2))
1511 bio = bio_alloc(gfp_flags, nr_vecs);
1512 }
a52d9a80 1513
b293f02e
CM
1514 if (bio) {
1515 bio->bi_bdev = bdev;
1516 bio->bi_sector = first_sector;
1517 }
1518 return bio;
1519}
a52d9a80 1520
b293f02e
CM
1521static int submit_one_bio(int rw, struct bio *bio)
1522{
1523 int ret = 0;
a52d9a80
CM
1524 bio_get(bio);
1525 submit_bio(rw, bio);
a52d9a80
CM
1526 if (bio_flagged(bio, BIO_EOPNOTSUPP))
1527 ret = -EOPNOTSUPP;
a52d9a80
CM
1528 bio_put(bio);
1529 return ret;
1530}
1531
b293f02e
CM
1532static int submit_extent_page(int rw, struct extent_map_tree *tree,
1533 struct page *page, sector_t sector,
1534 size_t size, unsigned long offset,
1535 struct block_device *bdev,
1536 struct bio **bio_ret,
3ab2fb5a 1537 unsigned long max_pages,
b293f02e
CM
1538 bio_end_io_t end_io_func)
1539{
1540 int ret = 0;
1541 struct bio *bio;
1542 int nr;
1543
1544 if (bio_ret && *bio_ret) {
1545 bio = *bio_ret;
1546 if (bio->bi_sector + (bio->bi_size >> 9) != sector ||
1547 bio_add_page(bio, page, size, offset) < size) {
1548 ret = submit_one_bio(rw, bio);
1549 bio = NULL;
1550 } else {
1551 return 0;
1552 }
1553 }
3ab2fb5a 1554 nr = min_t(int, max_pages, bio_get_nr_vecs(bdev));
b293f02e
CM
1555 bio = extent_bio_alloc(bdev, sector, nr, GFP_NOFS | __GFP_HIGH);
1556 if (!bio) {
1557 printk("failed to allocate bio nr %d\n", nr);
1558 }
1559 bio_add_page(bio, page, size, offset);
1560 bio->bi_end_io = end_io_func;
1561 bio->bi_private = tree;
1562 if (bio_ret) {
1563 *bio_ret = bio;
1564 } else {
1565 ret = submit_one_bio(rw, bio);
1566 }
1567
1568 return ret;
1569}
1570
b3cfa35a
CH
1571void set_page_extent_mapped(struct page *page)
1572{
1573 if (!PagePrivate(page)) {
1574 SetPagePrivate(page);
1575 WARN_ON(!page->mapping->a_ops->invalidatepage);
19c00ddc 1576 set_page_private(page, EXTENT_PAGE_PRIVATE);
b3cfa35a
CH
1577 page_cache_get(page);
1578 }
1579}
1580
a52d9a80
CM
1581/*
1582 * basic readpage implementation. Locked extent state structs are inserted
1583 * into the tree that are removed when the IO is done (by the end_io
1584 * handlers)
1585 */
3ab2fb5a
CM
1586static int __extent_read_full_page(struct extent_map_tree *tree,
1587 struct page *page,
1588 get_extent_t *get_extent,
1589 struct bio **bio)
a52d9a80
CM
1590{
1591 struct inode *inode = page->mapping->host;
35ebb934 1592 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
a52d9a80
CM
1593 u64 page_end = start + PAGE_CACHE_SIZE - 1;
1594 u64 end;
1595 u64 cur = start;
1596 u64 extent_offset;
1597 u64 last_byte = i_size_read(inode);
1598 u64 block_start;
1599 u64 cur_end;
1600 sector_t sector;
1601 struct extent_map *em;
1602 struct block_device *bdev;
1603 int ret;
1604 int nr = 0;
1605 size_t page_offset = 0;
1606 size_t iosize;
1607 size_t blocksize = inode->i_sb->s_blocksize;
1608
b3cfa35a 1609 set_page_extent_mapped(page);
a52d9a80
CM
1610
1611 end = page_end;
1612 lock_extent(tree, start, end, GFP_NOFS);
1613
1614 while (cur <= end) {
1615 if (cur >= last_byte) {
1616 iosize = PAGE_CACHE_SIZE - page_offset;
1617 zero_user_page(page, page_offset, iosize, KM_USER0);
1618 set_extent_uptodate(tree, cur, cur + iosize - 1,
1619 GFP_NOFS);
1620 unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
1621 break;
1622 }
1623 em = get_extent(inode, page, page_offset, cur, end, 0);
1624 if (IS_ERR(em) || !em) {
1625 SetPageError(page);
1626 unlock_extent(tree, cur, end, GFP_NOFS);
1627 break;
1628 }
1629
1630 extent_offset = cur - em->start;
1631 BUG_ON(em->end < cur);
1632 BUG_ON(end < cur);
1633
1634 iosize = min(em->end - cur, end - cur) + 1;
1635 cur_end = min(em->end, end);
1636 iosize = (iosize + blocksize - 1) & ~((u64)blocksize - 1);
1637 sector = (em->block_start + extent_offset) >> 9;
1638 bdev = em->bdev;
1639 block_start = em->block_start;
1640 free_extent_map(em);
1641 em = NULL;
1642
1643 /* we've found a hole, just zero and go on */
5f39d397 1644 if (block_start == EXTENT_MAP_HOLE) {
a52d9a80
CM
1645 zero_user_page(page, page_offset, iosize, KM_USER0);
1646 set_extent_uptodate(tree, cur, cur + iosize - 1,
1647 GFP_NOFS);
1648 unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
1649 cur = cur + iosize;
1650 page_offset += iosize;
1651 continue;
1652 }
1653 /* the get_extent function already copied into the page */
1654 if (test_range_bit(tree, cur, cur_end, EXTENT_UPTODATE, 1)) {
1655 unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
1656 cur = cur + iosize;
1657 page_offset += iosize;
1658 continue;
1659 }
1660
07157aac
CM
1661 ret = 0;
1662 if (tree->ops && tree->ops->readpage_io_hook) {
1663 ret = tree->ops->readpage_io_hook(page, cur,
1664 cur + iosize - 1);
1665 }
1666 if (!ret) {
3ab2fb5a
CM
1667 unsigned long nr = (last_byte >> PAGE_CACHE_SHIFT) + 1;
1668 nr -= page->index;
07157aac 1669 ret = submit_extent_page(READ, tree, page,
3ab2fb5a
CM
1670 sector, iosize, page_offset,
1671 bdev, bio, nr,
1672 end_bio_extent_readpage);
07157aac 1673 }
a52d9a80
CM
1674 if (ret)
1675 SetPageError(page);
1676 cur = cur + iosize;
1677 page_offset += iosize;
1678 nr++;
1679 }
1680 if (!nr) {
1681 if (!PageError(page))
1682 SetPageUptodate(page);
1683 unlock_page(page);
1684 }
1685 return 0;
1686}
3ab2fb5a
CM
1687
1688int extent_read_full_page(struct extent_map_tree *tree, struct page *page,
1689 get_extent_t *get_extent)
1690{
1691 struct bio *bio = NULL;
1692 int ret;
1693
1694 ret = __extent_read_full_page(tree, page, get_extent, &bio);
1695 if (bio)
1696 submit_one_bio(READ, bio);
1697 return ret;
1698}
a52d9a80
CM
1699EXPORT_SYMBOL(extent_read_full_page);
1700
1701/*
1702 * the writepage semantics are similar to regular writepage. extent
1703 * records are inserted to lock ranges in the tree, and as dirty areas
1704 * are found, they are marked writeback. Then the lock bits are removed
1705 * and the end_io handler clears the writeback ranges
1706 */
b293f02e
CM
1707static int __extent_writepage(struct page *page, struct writeback_control *wbc,
1708 void *data)
a52d9a80
CM
1709{
1710 struct inode *inode = page->mapping->host;
b293f02e
CM
1711 struct extent_page_data *epd = data;
1712 struct extent_map_tree *tree = epd->tree;
35ebb934 1713 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
3e9fd94f 1714 u64 delalloc_start;
a52d9a80
CM
1715 u64 page_end = start + PAGE_CACHE_SIZE - 1;
1716 u64 end;
1717 u64 cur = start;
1718 u64 extent_offset;
1719 u64 last_byte = i_size_read(inode);
1720 u64 block_start;
179e29e4 1721 u64 iosize;
a52d9a80
CM
1722 sector_t sector;
1723 struct extent_map *em;
1724 struct block_device *bdev;
1725 int ret;
1726 int nr = 0;
1727 size_t page_offset = 0;
a52d9a80
CM
1728 size_t blocksize;
1729 loff_t i_size = i_size_read(inode);
1730 unsigned long end_index = i_size >> PAGE_CACHE_SHIFT;
b888db2b
CM
1731 u64 nr_delalloc;
1732 u64 delalloc_end;
a52d9a80 1733
b888db2b 1734 WARN_ON(!PageLocked(page));
a52d9a80
CM
1735 if (page->index > end_index) {
1736 clear_extent_dirty(tree, start, page_end, GFP_NOFS);
1737 unlock_page(page);
1738 return 0;
1739 }
1740
1741 if (page->index == end_index) {
1742 size_t offset = i_size & (PAGE_CACHE_SIZE - 1);
1743 zero_user_page(page, offset,
1744 PAGE_CACHE_SIZE - offset, KM_USER0);
1745 }
1746
b3cfa35a 1747 set_page_extent_mapped(page);
a52d9a80 1748
3e9fd94f
CM
1749 delalloc_start = start;
1750 delalloc_end = 0;
1751 while(delalloc_end < page_end) {
1752 nr_delalloc = find_lock_delalloc_range(tree, &delalloc_start,
1753 &delalloc_end,
1754 128 * 1024 * 1024);
1755 if (nr_delalloc <= 0)
1756 break;
1757 tree->ops->fill_delalloc(inode, delalloc_start,
1758 delalloc_end);
1759 clear_extent_bit(tree, delalloc_start,
1760 delalloc_end,
1761 EXTENT_LOCKED | EXTENT_DELALLOC,
1762 1, 0, GFP_NOFS);
1763 delalloc_start = delalloc_end + 1;
b888db2b 1764 }
3e9fd94f 1765 lock_extent(tree, start, page_end, GFP_NOFS);
b888db2b
CM
1766
1767 end = page_end;
1768 if (test_range_bit(tree, start, page_end, EXTENT_DELALLOC, 0)) {
1769 printk("found delalloc bits after lock_extent\n");
1770 }
a52d9a80
CM
1771
1772 if (last_byte <= start) {
1773 clear_extent_dirty(tree, start, page_end, GFP_NOFS);
1774 goto done;
1775 }
1776
1777 set_extent_uptodate(tree, start, page_end, GFP_NOFS);
1778 blocksize = inode->i_sb->s_blocksize;
1779
1780 while (cur <= end) {
1781 if (cur >= last_byte) {
1782 clear_extent_dirty(tree, cur, page_end, GFP_NOFS);
1783 break;
1784 }
b293f02e 1785 em = epd->get_extent(inode, page, page_offset, cur, end, 1);
a52d9a80
CM
1786 if (IS_ERR(em) || !em) {
1787 SetPageError(page);
1788 break;
1789 }
1790
1791 extent_offset = cur - em->start;
1792 BUG_ON(em->end < cur);
1793 BUG_ON(end < cur);
1794 iosize = min(em->end - cur, end - cur) + 1;
1795 iosize = (iosize + blocksize - 1) & ~((u64)blocksize - 1);
1796 sector = (em->block_start + extent_offset) >> 9;
1797 bdev = em->bdev;
1798 block_start = em->block_start;
1799 free_extent_map(em);
1800 em = NULL;
1801
5f39d397
CM
1802 if (block_start == EXTENT_MAP_HOLE ||
1803 block_start == EXTENT_MAP_INLINE) {
a52d9a80
CM
1804 clear_extent_dirty(tree, cur,
1805 cur + iosize - 1, GFP_NOFS);
1806 cur = cur + iosize;
1807 page_offset += iosize;
1808 continue;
1809 }
1810
1811 /* leave this out until we have a page_mkwrite call */
1812 if (0 && !test_range_bit(tree, cur, cur + iosize - 1,
1813 EXTENT_DIRTY, 0)) {
1814 cur = cur + iosize;
1815 page_offset += iosize;
1816 continue;
1817 }
1818 clear_extent_dirty(tree, cur, cur + iosize - 1, GFP_NOFS);
b06355f0
CH
1819 if (tree->ops && tree->ops->writepage_io_hook) {
1820 ret = tree->ops->writepage_io_hook(page, cur,
1821 cur + iosize - 1);
1822 } else {
1823 ret = 0;
1824 }
a52d9a80
CM
1825 if (ret)
1826 SetPageError(page);
07157aac 1827 else {
7073c8e8 1828 unsigned long max_nr = end_index + 1;
07157aac 1829 set_range_writeback(tree, cur, cur + iosize - 1);
7073c8e8
CM
1830 if (!PageWriteback(page)) {
1831 printk("warning page %lu not writeback, "
1832 "cur %llu end %llu\n", page->index,
1833 (unsigned long long)cur,
1834 (unsigned long long)end);
1835 }
b293f02e 1836
07157aac
CM
1837 ret = submit_extent_page(WRITE, tree, page, sector,
1838 iosize, page_offset, bdev,
7073c8e8 1839 &epd->bio, max_nr,
07157aac
CM
1840 end_bio_extent_writepage);
1841 if (ret)
1842 SetPageError(page);
1843 }
a52d9a80
CM
1844 cur = cur + iosize;
1845 page_offset += iosize;
1846 nr++;
1847 }
1848done:
7073c8e8
CM
1849 if (nr == 0) {
1850 /* make sure the mapping tag for page dirty gets cleared */
1851 set_page_writeback(page);
1852 end_page_writeback(page);
1853 }
a52d9a80
CM
1854 unlock_extent(tree, start, page_end, GFP_NOFS);
1855 unlock_page(page);
1856 return 0;
1857}
b293f02e
CM
1858
1859int extent_write_full_page(struct extent_map_tree *tree, struct page *page,
1860 get_extent_t *get_extent,
1861 struct writeback_control *wbc)
1862{
1863 int ret;
1864 struct extent_page_data epd = {
1865 .bio = NULL,
1866 .tree = tree,
1867 .get_extent = get_extent,
1868 };
1869
1870 ret = __extent_writepage(page, wbc, &epd);
1871 if (epd.bio)
1872 submit_one_bio(WRITE, epd.bio);
1873 return ret;
1874}
a52d9a80
CM
1875EXPORT_SYMBOL(extent_write_full_page);
1876
b293f02e
CM
1877int extent_writepages(struct extent_map_tree *tree,
1878 struct address_space *mapping,
1879 get_extent_t *get_extent,
1880 struct writeback_control *wbc)
1881{
1882 int ret;
1883 struct extent_page_data epd = {
1884 .bio = NULL,
1885 .tree = tree,
1886 .get_extent = get_extent,
1887 };
1888
1889 ret = write_cache_pages(mapping, wbc, __extent_writepage, &epd);
1890 if (epd.bio)
1891 submit_one_bio(WRITE, epd.bio);
1892 return ret;
1893}
1894EXPORT_SYMBOL(extent_writepages);
1895
3ab2fb5a
CM
1896int extent_readpages(struct extent_map_tree *tree,
1897 struct address_space *mapping,
1898 struct list_head *pages, unsigned nr_pages,
1899 get_extent_t get_extent)
1900{
1901 struct bio *bio = NULL;
1902 unsigned page_idx;
1903 struct pagevec pvec;
1904
1905 pagevec_init(&pvec, 0);
1906 for (page_idx = 0; page_idx < nr_pages; page_idx++) {
1907 struct page *page = list_entry(pages->prev, struct page, lru);
1908
1909 prefetchw(&page->flags);
1910 list_del(&page->lru);
1911 /*
1912 * what we want to do here is call add_to_page_cache_lru,
1913 * but that isn't exported, so we reproduce it here
1914 */
1915 if (!add_to_page_cache(page, mapping,
1916 page->index, GFP_KERNEL)) {
1917
1918 /* open coding of lru_cache_add, also not exported */
1919 page_cache_get(page);
1920 if (!pagevec_add(&pvec, page))
1921 __pagevec_lru_add(&pvec);
1922 __extent_read_full_page(tree, page, get_extent, &bio);
1923 }
1924 page_cache_release(page);
1925 }
1926 if (pagevec_count(&pvec))
1927 __pagevec_lru_add(&pvec);
1928 BUG_ON(!list_empty(pages));
1929 if (bio)
1930 submit_one_bio(READ, bio);
1931 return 0;
1932}
1933EXPORT_SYMBOL(extent_readpages);
1934
a52d9a80
CM
1935/*
1936 * basic invalidatepage code, this waits on any locked or writeback
1937 * ranges corresponding to the page, and then deletes any extent state
1938 * records from the tree
1939 */
1940int extent_invalidatepage(struct extent_map_tree *tree,
1941 struct page *page, unsigned long offset)
1942{
35ebb934 1943 u64 start = ((u64)page->index << PAGE_CACHE_SHIFT);
a52d9a80
CM
1944 u64 end = start + PAGE_CACHE_SIZE - 1;
1945 size_t blocksize = page->mapping->host->i_sb->s_blocksize;
1946
1947 start += (offset + blocksize -1) & ~(blocksize - 1);
1948 if (start > end)
1949 return 0;
1950
1951 lock_extent(tree, start, end, GFP_NOFS);
1952 wait_on_extent_writeback(tree, start, end);
2bf5a725
CM
1953 clear_extent_bit(tree, start, end,
1954 EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC,
a52d9a80
CM
1955 1, 1, GFP_NOFS);
1956 return 0;
1957}
1958EXPORT_SYMBOL(extent_invalidatepage);
1959
1960/*
1961 * simple commit_write call, set_range_dirty is used to mark both
1962 * the pages and the extent records as dirty
1963 */
1964int extent_commit_write(struct extent_map_tree *tree,
1965 struct inode *inode, struct page *page,
1966 unsigned from, unsigned to)
1967{
1968 loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
1969
b3cfa35a 1970 set_page_extent_mapped(page);
a52d9a80
CM
1971 set_page_dirty(page);
1972
1973 if (pos > inode->i_size) {
1974 i_size_write(inode, pos);
1975 mark_inode_dirty(inode);
1976 }
1977 return 0;
1978}
1979EXPORT_SYMBOL(extent_commit_write);
1980
1981int extent_prepare_write(struct extent_map_tree *tree,
1982 struct inode *inode, struct page *page,
1983 unsigned from, unsigned to, get_extent_t *get_extent)
1984{
35ebb934 1985 u64 page_start = (u64)page->index << PAGE_CACHE_SHIFT;
a52d9a80
CM
1986 u64 page_end = page_start + PAGE_CACHE_SIZE - 1;
1987 u64 block_start;
1988 u64 orig_block_start;
1989 u64 block_end;
1990 u64 cur_end;
1991 struct extent_map *em;
1992 unsigned blocksize = 1 << inode->i_blkbits;
1993 size_t page_offset = 0;
1994 size_t block_off_start;
1995 size_t block_off_end;
1996 int err = 0;
1997 int iocount = 0;
1998 int ret = 0;
1999 int isnew;
2000
b3cfa35a
CH
2001 set_page_extent_mapped(page);
2002
a52d9a80
CM
2003 block_start = (page_start + from) & ~((u64)blocksize - 1);
2004 block_end = (page_start + to - 1) | (blocksize - 1);
2005 orig_block_start = block_start;
2006
2007 lock_extent(tree, page_start, page_end, GFP_NOFS);
2008 while(block_start <= block_end) {
2009 em = get_extent(inode, page, page_offset, block_start,
2010 block_end, 1);
2011 if (IS_ERR(em) || !em) {
2012 goto err;
2013 }
2014 cur_end = min(block_end, em->end);
2015 block_off_start = block_start & (PAGE_CACHE_SIZE - 1);
2016 block_off_end = block_off_start + blocksize;
2017 isnew = clear_extent_new(tree, block_start, cur_end, GFP_NOFS);
2018
2019 if (!PageUptodate(page) && isnew &&
2020 (block_off_end > to || block_off_start < from)) {
2021 void *kaddr;
2022
2023 kaddr = kmap_atomic(page, KM_USER0);
2024 if (block_off_end > to)
2025 memset(kaddr + to, 0, block_off_end - to);
2026 if (block_off_start < from)
2027 memset(kaddr + block_off_start, 0,
2028 from - block_off_start);
2029 flush_dcache_page(page);
2030 kunmap_atomic(kaddr, KM_USER0);
2031 }
2032 if (!isnew && !PageUptodate(page) &&
2033 (block_off_end > to || block_off_start < from) &&
2034 !test_range_bit(tree, block_start, cur_end,
2035 EXTENT_UPTODATE, 1)) {
2036 u64 sector;
2037 u64 extent_offset = block_start - em->start;
2038 size_t iosize;
2039 sector = (em->block_start + extent_offset) >> 9;
2040 iosize = (cur_end - block_start + blocksize - 1) &
2041 ~((u64)blocksize - 1);
2042 /*
2043 * we've already got the extent locked, but we
2044 * need to split the state such that our end_bio
2045 * handler can clear the lock.
2046 */
2047 set_extent_bit(tree, block_start,
2048 block_start + iosize - 1,
2049 EXTENT_LOCKED, 0, NULL, GFP_NOFS);
2050 ret = submit_extent_page(READ, tree, page,
2051 sector, iosize, page_offset, em->bdev,
b293f02e 2052 NULL, 1,
a52d9a80
CM
2053 end_bio_extent_preparewrite);
2054 iocount++;
2055 block_start = block_start + iosize;
2056 } else {
2057 set_extent_uptodate(tree, block_start, cur_end,
2058 GFP_NOFS);
2059 unlock_extent(tree, block_start, cur_end, GFP_NOFS);
2060 block_start = cur_end + 1;
2061 }
2062 page_offset = block_start & (PAGE_CACHE_SIZE - 1);
2063 free_extent_map(em);
2064 }
2065 if (iocount) {
2066 wait_extent_bit(tree, orig_block_start,
2067 block_end, EXTENT_LOCKED);
2068 }
2069 check_page_uptodate(tree, page);
2070err:
2071 /* FIXME, zero out newly allocated blocks on error */
2072 return err;
2073}
2074EXPORT_SYMBOL(extent_prepare_write);
2075
2076/*
2077 * a helper for releasepage. As long as there are no locked extents
2078 * in the range corresponding to the page, both state records and extent
2079 * map records are removed
2080 */
2081int try_release_extent_mapping(struct extent_map_tree *tree, struct page *page)
2082{
2083 struct extent_map *em;
35ebb934 2084 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
a52d9a80
CM
2085 u64 end = start + PAGE_CACHE_SIZE - 1;
2086 u64 orig_start = start;
b888db2b 2087 int ret = 1;
a52d9a80
CM
2088
2089 while (start <= end) {
2090 em = lookup_extent_mapping(tree, start, end);
2091 if (!em || IS_ERR(em))
2092 break;
b888db2b
CM
2093 if (!test_range_bit(tree, em->start, em->end,
2094 EXTENT_LOCKED, 0)) {
2095 remove_extent_mapping(tree, em);
2096 /* once for the rb tree */
a52d9a80 2097 free_extent_map(em);
a52d9a80 2098 }
a52d9a80 2099 start = em->end + 1;
a52d9a80
CM
2100 /* once for us */
2101 free_extent_map(em);
2102 }
b888db2b
CM
2103 if (test_range_bit(tree, orig_start, end, EXTENT_LOCKED, 0))
2104 ret = 0;
2105 else
2106 clear_extent_bit(tree, orig_start, end, EXTENT_UPTODATE,
2107 1, 1, GFP_NOFS);
2108 return ret;
a52d9a80
CM
2109}
2110EXPORT_SYMBOL(try_release_extent_mapping);
2111
d396c6f5
CH
2112sector_t extent_bmap(struct address_space *mapping, sector_t iblock,
2113 get_extent_t *get_extent)
2114{
2115 struct inode *inode = mapping->host;
2116 u64 start = iblock << inode->i_blkbits;
2117 u64 end = start + (1 << inode->i_blkbits) - 1;
c67cda17 2118 sector_t sector = 0;
d396c6f5
CH
2119 struct extent_map *em;
2120
2121 em = get_extent(inode, NULL, 0, start, end, 0);
2122 if (!em || IS_ERR(em))
2123 return 0;
2124
d396c6f5 2125 if (em->block_start == EXTENT_MAP_INLINE ||
5f39d397 2126 em->block_start == EXTENT_MAP_HOLE)
c67cda17 2127 goto out;
d396c6f5 2128
c67cda17
Y
2129 sector = (em->block_start + start - em->start) >> inode->i_blkbits;
2130out:
2131 free_extent_map(em);
2132 return sector;
d396c6f5 2133}
5f39d397 2134
4dc11904 2135static int add_lru(struct extent_map_tree *tree, struct extent_buffer *eb)
6d36dcd4 2136{
4dc11904
CM
2137 if (list_empty(&eb->lru)) {
2138 extent_buffer_get(eb);
2139 list_add(&eb->lru, &tree->buffer_lru);
2140 tree->lru_size++;
2141 if (tree->lru_size >= BUFFER_LRU_MAX) {
2142 struct extent_buffer *rm;
2143 rm = list_entry(tree->buffer_lru.prev,
2144 struct extent_buffer, lru);
2145 tree->lru_size--;
856bf3e5 2146 list_del_init(&rm->lru);
4dc11904
CM
2147 free_extent_buffer(rm);
2148 }
2149 } else
2150 list_move(&eb->lru, &tree->buffer_lru);
2151 return 0;
2152}
2153static struct extent_buffer *find_lru(struct extent_map_tree *tree,
2154 u64 start, unsigned long len)
2155{
2156 struct list_head *lru = &tree->buffer_lru;
2157 struct list_head *cur = lru->next;
2158 struct extent_buffer *eb;
f510cfec 2159
4dc11904
CM
2160 if (list_empty(lru))
2161 return NULL;
f510cfec 2162
4dc11904
CM
2163 do {
2164 eb = list_entry(cur, struct extent_buffer, lru);
2165 if (eb->start == start && eb->len == len) {
2166 extent_buffer_get(eb);
2167 return eb;
2168 }
2169 cur = cur->next;
2170 } while (cur != lru);
2171 return NULL;
6d36dcd4
CM
2172}
2173
4dc11904 2174static inline unsigned long num_extent_pages(u64 start, u64 len)
6d36dcd4 2175{
4dc11904
CM
2176 return ((start + len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT) -
2177 (start >> PAGE_CACHE_SHIFT);
6d36dcd4
CM
2178}
2179
4dc11904
CM
2180static inline struct page *extent_buffer_page(struct extent_buffer *eb,
2181 unsigned long i)
6d36dcd4
CM
2182{
2183 struct page *p;
3685f791 2184 struct address_space *mapping;
db94535d 2185
4dc11904 2186 if (i == 0)
810191ff 2187 return eb->first_page;
6d36dcd4 2188 i += eb->start >> PAGE_CACHE_SHIFT;
3685f791
CM
2189 mapping = eb->first_page->mapping;
2190 read_lock_irq(&mapping->tree_lock);
2191 p = radix_tree_lookup(&mapping->page_tree, i);
2192 read_unlock_irq(&mapping->tree_lock);
6d36dcd4
CM
2193 return p;
2194}
2195
4dc11904
CM
2196static struct extent_buffer *__alloc_extent_buffer(struct extent_map_tree *tree,
2197 u64 start,
2198 unsigned long len,
2199 gfp_t mask)
db94535d 2200{
4dc11904
CM
2201 struct extent_buffer *eb = NULL;
2202
2203 spin_lock(&tree->lru_lock);
2204 eb = find_lru(tree, start, len);
4dc11904 2205 spin_unlock(&tree->lru_lock);
4dc11904 2206 if (eb) {
09be207d 2207 return eb;
4dc11904 2208 }
09be207d
CM
2209
2210 eb = kmem_cache_zalloc(extent_buffer_cache, mask);
4dc11904
CM
2211 INIT_LIST_HEAD(&eb->lru);
2212 eb->start = start;
2213 eb->len = len;
2214 atomic_set(&eb->refs, 1);
2215
4dc11904
CM
2216 return eb;
2217}
2218
2219static void __free_extent_buffer(struct extent_buffer *eb)
2220{
2221 kmem_cache_free(extent_buffer_cache, eb);
db94535d 2222}
4dc11904 2223
5f39d397
CM
2224struct extent_buffer *alloc_extent_buffer(struct extent_map_tree *tree,
2225 u64 start, unsigned long len,
19c00ddc 2226 struct page *page0,
5f39d397
CM
2227 gfp_t mask)
2228{
db94535d 2229 unsigned long num_pages = num_extent_pages(start, len);
5f39d397
CM
2230 unsigned long i;
2231 unsigned long index = start >> PAGE_CACHE_SHIFT;
2232 struct extent_buffer *eb;
2233 struct page *p;
2234 struct address_space *mapping = tree->mapping;
65555a06 2235 int uptodate = 1;
5f39d397 2236
4dc11904 2237 eb = __alloc_extent_buffer(tree, start, len, mask);
5f39d397
CM
2238 if (!eb || IS_ERR(eb))
2239 return NULL;
2240
4dc11904 2241 if (eb->flags & EXTENT_BUFFER_FILLED)
09be207d 2242 goto lru_add;
5f39d397 2243
19c00ddc
CM
2244 if (page0) {
2245 eb->first_page = page0;
2246 i = 1;
2247 index++;
2248 page_cache_get(page0);
ff79f819 2249 mark_page_accessed(page0);
19c00ddc 2250 set_page_extent_mapped(page0);
0591fb56 2251 WARN_ON(!PageUptodate(page0));
19c00ddc
CM
2252 set_page_private(page0, EXTENT_PAGE_PRIVATE_FIRST_PAGE |
2253 len << 2);
2254 } else {
2255 i = 0;
2256 }
2257 for (; i < num_pages; i++, index++) {
5f39d397 2258 p = find_or_create_page(mapping, index, mask | __GFP_HIGHMEM);
6d36dcd4 2259 if (!p) {
db94535d 2260 WARN_ON(1);
5f39d397 2261 goto fail;
6d36dcd4 2262 }
f510cfec 2263 set_page_extent_mapped(p);
ff79f819 2264 mark_page_accessed(p);
19c00ddc 2265 if (i == 0) {
810191ff 2266 eb->first_page = p;
19c00ddc
CM
2267 set_page_private(p, EXTENT_PAGE_PRIVATE_FIRST_PAGE |
2268 len << 2);
2269 } else {
2270 set_page_private(p, EXTENT_PAGE_PRIVATE);
2271 }
5f39d397
CM
2272 if (!PageUptodate(p))
2273 uptodate = 0;
2274 unlock_page(p);
2275 }
2276 if (uptodate)
2277 eb->flags |= EXTENT_UPTODATE;
4dc11904 2278 eb->flags |= EXTENT_BUFFER_FILLED;
09be207d
CM
2279
2280lru_add:
2281 spin_lock(&tree->lru_lock);
2282 add_lru(tree, eb);
2283 spin_unlock(&tree->lru_lock);
5f39d397 2284 return eb;
09be207d 2285
5f39d397 2286fail:
856bf3e5
CM
2287 spin_lock(&tree->lru_lock);
2288 list_del_init(&eb->lru);
2289 spin_unlock(&tree->lru_lock);
09be207d
CM
2290 if (!atomic_dec_and_test(&eb->refs))
2291 return NULL;
0591fb56 2292 for (index = 1; index < i; index++) {
09be207d
CM
2293 page_cache_release(extent_buffer_page(eb, index));
2294 }
0591fb56
CM
2295 if (i > 0)
2296 page_cache_release(extent_buffer_page(eb, 0));
09be207d 2297 __free_extent_buffer(eb);
5f39d397
CM
2298 return NULL;
2299}
2300EXPORT_SYMBOL(alloc_extent_buffer);
2301
2302struct extent_buffer *find_extent_buffer(struct extent_map_tree *tree,
2303 u64 start, unsigned long len,
2304 gfp_t mask)
2305{
db94535d 2306 unsigned long num_pages = num_extent_pages(start, len);
09be207d
CM
2307 unsigned long i;
2308 unsigned long index = start >> PAGE_CACHE_SHIFT;
5f39d397
CM
2309 struct extent_buffer *eb;
2310 struct page *p;
2311 struct address_space *mapping = tree->mapping;
14048ed0 2312 int uptodate = 1;
5f39d397 2313
4dc11904 2314 eb = __alloc_extent_buffer(tree, start, len, mask);
5f39d397
CM
2315 if (!eb || IS_ERR(eb))
2316 return NULL;
2317
4dc11904 2318 if (eb->flags & EXTENT_BUFFER_FILLED)
09be207d 2319 goto lru_add;
5f39d397
CM
2320
2321 for (i = 0; i < num_pages; i++, index++) {
14048ed0 2322 p = find_lock_page(mapping, index);
6d36dcd4 2323 if (!p) {
5f39d397 2324 goto fail;
6d36dcd4 2325 }
f510cfec 2326 set_page_extent_mapped(p);
ff79f819 2327 mark_page_accessed(p);
19c00ddc
CM
2328
2329 if (i == 0) {
810191ff 2330 eb->first_page = p;
19c00ddc
CM
2331 set_page_private(p, EXTENT_PAGE_PRIVATE_FIRST_PAGE |
2332 len << 2);
2333 } else {
2334 set_page_private(p, EXTENT_PAGE_PRIVATE);
2335 }
2336
14048ed0
CM
2337 if (!PageUptodate(p))
2338 uptodate = 0;
2339 unlock_page(p);
5f39d397 2340 }
14048ed0
CM
2341 if (uptodate)
2342 eb->flags |= EXTENT_UPTODATE;
4dc11904 2343 eb->flags |= EXTENT_BUFFER_FILLED;
09be207d
CM
2344
2345lru_add:
2346 spin_lock(&tree->lru_lock);
2347 add_lru(tree, eb);
2348 spin_unlock(&tree->lru_lock);
5f39d397
CM
2349 return eb;
2350fail:
856bf3e5
CM
2351 spin_lock(&tree->lru_lock);
2352 list_del_init(&eb->lru);
2353 spin_unlock(&tree->lru_lock);
09be207d
CM
2354 if (!atomic_dec_and_test(&eb->refs))
2355 return NULL;
0591fb56 2356 for (index = 1; index < i; index++) {
09be207d
CM
2357 page_cache_release(extent_buffer_page(eb, index));
2358 }
0591fb56
CM
2359 if (i > 0)
2360 page_cache_release(extent_buffer_page(eb, 0));
09be207d 2361 __free_extent_buffer(eb);
5f39d397
CM
2362 return NULL;
2363}
2364EXPORT_SYMBOL(find_extent_buffer);
2365
2366void free_extent_buffer(struct extent_buffer *eb)
2367{
2368 unsigned long i;
2369 unsigned long num_pages;
2370
2371 if (!eb)
2372 return;
2373
2374 if (!atomic_dec_and_test(&eb->refs))
2375 return;
2376
0591fb56 2377 WARN_ON(!list_empty(&eb->lru));
db94535d 2378 num_pages = num_extent_pages(eb->start, eb->len);
5f39d397 2379
0591fb56 2380 for (i = 1; i < num_pages; i++) {
6d36dcd4 2381 page_cache_release(extent_buffer_page(eb, i));
5f39d397 2382 }
0591fb56 2383 page_cache_release(extent_buffer_page(eb, 0));
6d36dcd4 2384 __free_extent_buffer(eb);
5f39d397
CM
2385}
2386EXPORT_SYMBOL(free_extent_buffer);
2387
2388int clear_extent_buffer_dirty(struct extent_map_tree *tree,
2389 struct extent_buffer *eb)
2390{
2391 int set;
2392 unsigned long i;
2393 unsigned long num_pages;
2394 struct page *page;
2395
2396 u64 start = eb->start;
2397 u64 end = start + eb->len - 1;
2398
2399 set = clear_extent_dirty(tree, start, end, GFP_NOFS);
db94535d 2400 num_pages = num_extent_pages(eb->start, eb->len);
5f39d397
CM
2401
2402 for (i = 0; i < num_pages; i++) {
6d36dcd4 2403 page = extent_buffer_page(eb, i);
5f39d397
CM
2404 lock_page(page);
2405 /*
2406 * if we're on the last page or the first page and the
2407 * block isn't aligned on a page boundary, do extra checks
2408 * to make sure we don't clean page that is partially dirty
2409 */
2410 if ((i == 0 && (eb->start & (PAGE_CACHE_SIZE - 1))) ||
2411 ((i == num_pages - 1) &&
65555a06 2412 ((eb->start + eb->len) & (PAGE_CACHE_SIZE - 1)))) {
35ebb934 2413 start = (u64)page->index << PAGE_CACHE_SHIFT;
5f39d397
CM
2414 end = start + PAGE_CACHE_SIZE - 1;
2415 if (test_range_bit(tree, start, end,
2416 EXTENT_DIRTY, 0)) {
2417 unlock_page(page);
2418 continue;
2419 }
2420 }
2421 clear_page_dirty_for_io(page);
7073c8e8
CM
2422 write_lock_irq(&page->mapping->tree_lock);
2423 if (!PageDirty(page)) {
2424 radix_tree_tag_clear(&page->mapping->page_tree,
2425 page_index(page),
2426 PAGECACHE_TAG_DIRTY);
2427 }
2428 write_unlock_irq(&page->mapping->tree_lock);
5f39d397
CM
2429 unlock_page(page);
2430 }
2431 return 0;
2432}
2433EXPORT_SYMBOL(clear_extent_buffer_dirty);
2434
2435int wait_on_extent_buffer_writeback(struct extent_map_tree *tree,
2436 struct extent_buffer *eb)
2437{
2438 return wait_on_extent_writeback(tree, eb->start,
2439 eb->start + eb->len - 1);
2440}
2441EXPORT_SYMBOL(wait_on_extent_buffer_writeback);
2442
2443int set_extent_buffer_dirty(struct extent_map_tree *tree,
2444 struct extent_buffer *eb)
2445{
810191ff
CM
2446 unsigned long i;
2447 unsigned long num_pages;
2448
2449 num_pages = num_extent_pages(eb->start, eb->len);
2450 for (i = 0; i < num_pages; i++) {
19c00ddc
CM
2451 struct page *page = extent_buffer_page(eb, i);
2452 /* writepage may need to do something special for the
2453 * first page, we have to make sure page->private is
2454 * properly set. releasepage may drop page->private
2455 * on us if the page isn't already dirty.
2456 */
2457 if (i == 0) {
2458 lock_page(page);
2459 set_page_private(page,
2460 EXTENT_PAGE_PRIVATE_FIRST_PAGE |
2461 eb->len << 2);
2462 }
810191ff 2463 __set_page_dirty_nobuffers(extent_buffer_page(eb, i));
19c00ddc
CM
2464 if (i == 0)
2465 unlock_page(page);
810191ff
CM
2466 }
2467 return set_extent_dirty(tree, eb->start,
2468 eb->start + eb->len - 1, GFP_NOFS);
5f39d397
CM
2469}
2470EXPORT_SYMBOL(set_extent_buffer_dirty);
2471
2472int set_extent_buffer_uptodate(struct extent_map_tree *tree,
2473 struct extent_buffer *eb)
2474{
2475 unsigned long i;
2476 struct page *page;
2477 unsigned long num_pages;
2478
db94535d 2479 num_pages = num_extent_pages(eb->start, eb->len);
5f39d397
CM
2480
2481 set_extent_uptodate(tree, eb->start, eb->start + eb->len - 1,
2482 GFP_NOFS);
2483 for (i = 0; i < num_pages; i++) {
6d36dcd4 2484 page = extent_buffer_page(eb, i);
5f39d397
CM
2485 if ((i == 0 && (eb->start & (PAGE_CACHE_SIZE - 1))) ||
2486 ((i == num_pages - 1) &&
65555a06 2487 ((eb->start + eb->len) & (PAGE_CACHE_SIZE - 1)))) {
5f39d397
CM
2488 check_page_uptodate(tree, page);
2489 continue;
2490 }
2491 SetPageUptodate(page);
2492 }
2493 return 0;
2494}
2495EXPORT_SYMBOL(set_extent_buffer_uptodate);
2496
2497int extent_buffer_uptodate(struct extent_map_tree *tree,
2498 struct extent_buffer *eb)
2499{
2500 if (eb->flags & EXTENT_UPTODATE)
2501 return 1;
2502 return test_range_bit(tree, eb->start, eb->start + eb->len - 1,
2503 EXTENT_UPTODATE, 1);
2504}
2505EXPORT_SYMBOL(extent_buffer_uptodate);
2506
2507int read_extent_buffer_pages(struct extent_map_tree *tree,
19c00ddc
CM
2508 struct extent_buffer *eb,
2509 u64 start,
2510 int wait)
5f39d397
CM
2511{
2512 unsigned long i;
19c00ddc 2513 unsigned long start_i;
5f39d397
CM
2514 struct page *page;
2515 int err;
2516 int ret = 0;
2517 unsigned long num_pages;
2518
2519 if (eb->flags & EXTENT_UPTODATE)
2520 return 0;
2521
14048ed0 2522 if (0 && test_range_bit(tree, eb->start, eb->start + eb->len - 1,
5f39d397
CM
2523 EXTENT_UPTODATE, 1)) {
2524 return 0;
2525 }
0591fb56 2526
19c00ddc
CM
2527 if (start) {
2528 WARN_ON(start < eb->start);
2529 start_i = (start >> PAGE_CACHE_SHIFT) -
2530 (eb->start >> PAGE_CACHE_SHIFT);
2531 } else {
2532 start_i = 0;
2533 }
5f39d397 2534
db94535d 2535 num_pages = num_extent_pages(eb->start, eb->len);
19c00ddc 2536 for (i = start_i; i < num_pages; i++) {
6d36dcd4 2537 page = extent_buffer_page(eb, i);
5f39d397
CM
2538 if (PageUptodate(page)) {
2539 continue;
2540 }
2541 if (!wait) {
2542 if (TestSetPageLocked(page)) {
2543 continue;
2544 }
2545 } else {
2546 lock_page(page);
2547 }
2548 if (!PageUptodate(page)) {
2549 err = page->mapping->a_ops->readpage(NULL, page);
2550 if (err) {
2551 ret = err;
2552 }
2553 } else {
2554 unlock_page(page);
2555 }
2556 }
2557
2558 if (ret || !wait) {
2559 return ret;
2560 }
2561
19c00ddc 2562 for (i = start_i; i < num_pages; i++) {
6d36dcd4 2563 page = extent_buffer_page(eb, i);
5f39d397
CM
2564 wait_on_page_locked(page);
2565 if (!PageUptodate(page)) {
2566 ret = -EIO;
2567 }
2568 }
4dc11904
CM
2569 if (!ret)
2570 eb->flags |= EXTENT_UPTODATE;
5f39d397
CM
2571 return ret;
2572}
2573EXPORT_SYMBOL(read_extent_buffer_pages);
2574
2575void read_extent_buffer(struct extent_buffer *eb, void *dstv,
2576 unsigned long start,
2577 unsigned long len)
2578{
2579 size_t cur;
2580 size_t offset;
2581 struct page *page;
2582 char *kaddr;
2583 char *dst = (char *)dstv;
2584 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
2585 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
14048ed0 2586 unsigned long num_pages = num_extent_pages(eb->start, eb->len);
5f39d397
CM
2587
2588 WARN_ON(start > eb->len);
2589 WARN_ON(start + len > eb->start + eb->len);
2590
3685f791 2591 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
5f39d397
CM
2592
2593 while(len > 0) {
6d36dcd4 2594 page = extent_buffer_page(eb, i);
14048ed0
CM
2595 if (!PageUptodate(page)) {
2596 printk("page %lu not up to date i %lu, total %lu, len %lu\n", page->index, i, num_pages, eb->len);
2597 WARN_ON(1);
2598 }
5f39d397
CM
2599 WARN_ON(!PageUptodate(page));
2600
2601 cur = min(len, (PAGE_CACHE_SIZE - offset));
59d169e2 2602 kaddr = kmap_atomic(page, KM_USER1);
5f39d397 2603 memcpy(dst, kaddr + offset, cur);
59d169e2 2604 kunmap_atomic(kaddr, KM_USER1);
5f39d397
CM
2605
2606 dst += cur;
2607 len -= cur;
2608 offset = 0;
2609 i++;
5f39d397
CM
2610 }
2611}
2612EXPORT_SYMBOL(read_extent_buffer);
2613
19c00ddc 2614int map_private_extent_buffer(struct extent_buffer *eb, unsigned long start,
db94535d
CM
2615 unsigned long min_len, char **token, char **map,
2616 unsigned long *map_start,
2617 unsigned long *map_len, int km)
5f39d397 2618{
479965d6 2619 size_t offset = start & (PAGE_CACHE_SIZE - 1);
5f39d397 2620 char *kaddr;
db94535d 2621 struct page *p;
5f39d397
CM
2622 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
2623 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
65555a06 2624 unsigned long end_i = (start_offset + start + min_len - 1) >>
810191ff 2625 PAGE_CACHE_SHIFT;
479965d6
CM
2626
2627 if (i != end_i)
2628 return -EINVAL;
5f39d397 2629
5f39d397
CM
2630 if (i == 0) {
2631 offset = start_offset;
2632 *map_start = 0;
2633 } else {
db94535d 2634 offset = 0;
0591fb56 2635 *map_start = ((u64)i << PAGE_CACHE_SHIFT) - start_offset;
5f39d397 2636 }
65555a06 2637 if (start + min_len > eb->len) {
19c00ddc
CM
2638printk("bad mapping eb start %Lu len %lu, wanted %lu %lu\n", eb->start, eb->len, start, min_len);
2639 WARN_ON(1);
2640 }
5f39d397 2641
db94535d
CM
2642 p = extent_buffer_page(eb, i);
2643 WARN_ON(!PageUptodate(p));
2644 kaddr = kmap_atomic(p, km);
5f39d397
CM
2645 *token = kaddr;
2646 *map = kaddr + offset;
2647 *map_len = PAGE_CACHE_SIZE - offset;
2648 return 0;
2649}
19c00ddc 2650EXPORT_SYMBOL(map_private_extent_buffer);
db94535d
CM
2651
2652int map_extent_buffer(struct extent_buffer *eb, unsigned long start,
2653 unsigned long min_len,
2654 char **token, char **map,
2655 unsigned long *map_start,
2656 unsigned long *map_len, int km)
2657{
2658 int err;
2659 int save = 0;
2660 if (eb->map_token) {
db94535d
CM
2661 unmap_extent_buffer(eb, eb->map_token, km);
2662 eb->map_token = NULL;
2663 save = 1;
2664 }
19c00ddc
CM
2665 err = map_private_extent_buffer(eb, start, min_len, token, map,
2666 map_start, map_len, km);
db94535d
CM
2667 if (!err && save) {
2668 eb->map_token = *token;
2669 eb->kaddr = *map;
2670 eb->map_start = *map_start;
2671 eb->map_len = *map_len;
2672 }
2673 return err;
2674}
5f39d397
CM
2675EXPORT_SYMBOL(map_extent_buffer);
2676
2677void unmap_extent_buffer(struct extent_buffer *eb, char *token, int km)
2678{
ae5252bd 2679 kunmap_atomic(token, km);
5f39d397
CM
2680}
2681EXPORT_SYMBOL(unmap_extent_buffer);
2682
2683int memcmp_extent_buffer(struct extent_buffer *eb, const void *ptrv,
2684 unsigned long start,
2685 unsigned long len)
2686{
2687 size_t cur;
2688 size_t offset;
2689 struct page *page;
2690 char *kaddr;
2691 char *ptr = (char *)ptrv;
2692 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
2693 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
2694 int ret = 0;
2695
2696 WARN_ON(start > eb->len);
2697 WARN_ON(start + len > eb->start + eb->len);
2698
3685f791 2699 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
5f39d397
CM
2700
2701 while(len > 0) {
6d36dcd4 2702 page = extent_buffer_page(eb, i);
5f39d397
CM
2703 WARN_ON(!PageUptodate(page));
2704
2705 cur = min(len, (PAGE_CACHE_SIZE - offset));
2706
ae5252bd 2707 kaddr = kmap_atomic(page, KM_USER0);
5f39d397 2708 ret = memcmp(ptr, kaddr + offset, cur);
ae5252bd 2709 kunmap_atomic(kaddr, KM_USER0);
5f39d397
CM
2710 if (ret)
2711 break;
2712
2713 ptr += cur;
2714 len -= cur;
2715 offset = 0;
2716 i++;
5f39d397
CM
2717 }
2718 return ret;
2719}
2720EXPORT_SYMBOL(memcmp_extent_buffer);
2721
2722void write_extent_buffer(struct extent_buffer *eb, const void *srcv,
2723 unsigned long start, unsigned long len)
2724{
2725 size_t cur;
2726 size_t offset;
2727 struct page *page;
2728 char *kaddr;
2729 char *src = (char *)srcv;
2730 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
2731 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
2732
2733 WARN_ON(start > eb->len);
2734 WARN_ON(start + len > eb->start + eb->len);
2735
3685f791 2736 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
5f39d397
CM
2737
2738 while(len > 0) {
6d36dcd4 2739 page = extent_buffer_page(eb, i);
5f39d397
CM
2740 WARN_ON(!PageUptodate(page));
2741
2742 cur = min(len, PAGE_CACHE_SIZE - offset);
59d169e2 2743 kaddr = kmap_atomic(page, KM_USER1);
5f39d397 2744 memcpy(kaddr + offset, src, cur);
59d169e2 2745 kunmap_atomic(kaddr, KM_USER1);
5f39d397
CM
2746
2747 src += cur;
2748 len -= cur;
2749 offset = 0;
2750 i++;
5f39d397
CM
2751 }
2752}
2753EXPORT_SYMBOL(write_extent_buffer);
2754
2755void memset_extent_buffer(struct extent_buffer *eb, char c,
2756 unsigned long start, unsigned long len)
2757{
2758 size_t cur;
2759 size_t offset;
2760 struct page *page;
2761 char *kaddr;
2762 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
2763 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
2764
2765 WARN_ON(start > eb->len);
2766 WARN_ON(start + len > eb->start + eb->len);
2767
3685f791 2768 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
5f39d397
CM
2769
2770 while(len > 0) {
6d36dcd4 2771 page = extent_buffer_page(eb, i);
5f39d397
CM
2772 WARN_ON(!PageUptodate(page));
2773
2774 cur = min(len, PAGE_CACHE_SIZE - offset);
ae5252bd 2775 kaddr = kmap_atomic(page, KM_USER0);
5f39d397 2776 memset(kaddr + offset, c, cur);
ae5252bd 2777 kunmap_atomic(kaddr, KM_USER0);
5f39d397
CM
2778
2779 len -= cur;
2780 offset = 0;
2781 i++;
5f39d397
CM
2782 }
2783}
2784EXPORT_SYMBOL(memset_extent_buffer);
2785
2786void copy_extent_buffer(struct extent_buffer *dst, struct extent_buffer *src,
2787 unsigned long dst_offset, unsigned long src_offset,
2788 unsigned long len)
2789{
2790 u64 dst_len = dst->len;
2791 size_t cur;
2792 size_t offset;
2793 struct page *page;
2794 char *kaddr;
2795 size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
2796 unsigned long i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT;
2797
2798 WARN_ON(src->len != dst_len);
2799
3685f791
CM
2800 offset = (start_offset + dst_offset) &
2801 ((unsigned long)PAGE_CACHE_SIZE - 1);
5f39d397
CM
2802
2803 while(len > 0) {
6d36dcd4 2804 page = extent_buffer_page(dst, i);
5f39d397
CM
2805 WARN_ON(!PageUptodate(page));
2806
2807 cur = min(len, (unsigned long)(PAGE_CACHE_SIZE - offset));
2808
ff190c0c 2809 kaddr = kmap_atomic(page, KM_USER0);
5f39d397 2810 read_extent_buffer(src, kaddr + offset, src_offset, cur);
ff190c0c 2811 kunmap_atomic(kaddr, KM_USER0);
5f39d397
CM
2812
2813 src_offset += cur;
2814 len -= cur;
2815 offset = 0;
2816 i++;
2817 }
2818}
2819EXPORT_SYMBOL(copy_extent_buffer);
2820
2821static void move_pages(struct page *dst_page, struct page *src_page,
2822 unsigned long dst_off, unsigned long src_off,
2823 unsigned long len)
2824{
ae5252bd 2825 char *dst_kaddr = kmap_atomic(dst_page, KM_USER0);
5f39d397
CM
2826 if (dst_page == src_page) {
2827 memmove(dst_kaddr + dst_off, dst_kaddr + src_off, len);
2828 } else {
ae5252bd 2829 char *src_kaddr = kmap_atomic(src_page, KM_USER1);
5f39d397
CM
2830 char *p = dst_kaddr + dst_off + len;
2831 char *s = src_kaddr + src_off + len;
2832
2833 while (len--)
2834 *--p = *--s;
2835
ae5252bd 2836 kunmap_atomic(src_kaddr, KM_USER1);
5f39d397 2837 }
ae5252bd 2838 kunmap_atomic(dst_kaddr, KM_USER0);
5f39d397
CM
2839}
2840
2841static void copy_pages(struct page *dst_page, struct page *src_page,
2842 unsigned long dst_off, unsigned long src_off,
2843 unsigned long len)
2844{
ae5252bd 2845 char *dst_kaddr = kmap_atomic(dst_page, KM_USER0);
5f39d397
CM
2846 char *src_kaddr;
2847
2848 if (dst_page != src_page)
ae5252bd 2849 src_kaddr = kmap_atomic(src_page, KM_USER1);
5f39d397
CM
2850 else
2851 src_kaddr = dst_kaddr;
2852
2853 memcpy(dst_kaddr + dst_off, src_kaddr + src_off, len);
5f39d397
CM
2854 kunmap_atomic(dst_kaddr, KM_USER0);
2855 if (dst_page != src_page)
2856 kunmap_atomic(src_kaddr, KM_USER1);
5f39d397
CM
2857}
2858
2859void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
2860 unsigned long src_offset, unsigned long len)
2861{
2862 size_t cur;
2863 size_t dst_off_in_page;
2864 size_t src_off_in_page;
2865 size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
2866 unsigned long dst_i;
2867 unsigned long src_i;
2868
2869 if (src_offset + len > dst->len) {
2870 printk("memmove bogus src_offset %lu move len %lu len %lu\n",
2871 src_offset, len, dst->len);
2872 BUG_ON(1);
2873 }
2874 if (dst_offset + len > dst->len) {
2875 printk("memmove bogus dst_offset %lu move len %lu len %lu\n",
2876 dst_offset, len, dst->len);
2877 BUG_ON(1);
2878 }
2879
2880 while(len > 0) {
3685f791 2881 dst_off_in_page = (start_offset + dst_offset) &
5f39d397 2882 ((unsigned long)PAGE_CACHE_SIZE - 1);
3685f791 2883 src_off_in_page = (start_offset + src_offset) &
5f39d397
CM
2884 ((unsigned long)PAGE_CACHE_SIZE - 1);
2885
2886 dst_i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT;
2887 src_i = (start_offset + src_offset) >> PAGE_CACHE_SHIFT;
2888
5f39d397
CM
2889 cur = min(len, (unsigned long)(PAGE_CACHE_SIZE -
2890 src_off_in_page));
ae2f5411
JA
2891 cur = min_t(unsigned long, cur,
2892 (unsigned long)(PAGE_CACHE_SIZE - dst_off_in_page));
5f39d397 2893
6d36dcd4
CM
2894 copy_pages(extent_buffer_page(dst, dst_i),
2895 extent_buffer_page(dst, src_i),
5f39d397
CM
2896 dst_off_in_page, src_off_in_page, cur);
2897
2898 src_offset += cur;
2899 dst_offset += cur;
2900 len -= cur;
2901 }
2902}
2903EXPORT_SYMBOL(memcpy_extent_buffer);
2904
2905void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
2906 unsigned long src_offset, unsigned long len)
2907{
2908 size_t cur;
2909 size_t dst_off_in_page;
2910 size_t src_off_in_page;
2911 unsigned long dst_end = dst_offset + len - 1;
2912 unsigned long src_end = src_offset + len - 1;
2913 size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
2914 unsigned long dst_i;
2915 unsigned long src_i;
2916
2917 if (src_offset + len > dst->len) {
2918 printk("memmove bogus src_offset %lu move len %lu len %lu\n",
2919 src_offset, len, dst->len);
2920 BUG_ON(1);
2921 }
2922 if (dst_offset + len > dst->len) {
2923 printk("memmove bogus dst_offset %lu move len %lu len %lu\n",
2924 dst_offset, len, dst->len);
2925 BUG_ON(1);
2926 }
2927 if (dst_offset < src_offset) {
2928 memcpy_extent_buffer(dst, dst_offset, src_offset, len);
2929 return;
2930 }
2931 while(len > 0) {
2932 dst_i = (start_offset + dst_end) >> PAGE_CACHE_SHIFT;
2933 src_i = (start_offset + src_end) >> PAGE_CACHE_SHIFT;
2934
3685f791 2935 dst_off_in_page = (start_offset + dst_end) &
5f39d397 2936 ((unsigned long)PAGE_CACHE_SIZE - 1);
3685f791 2937 src_off_in_page = (start_offset + src_end) &
5f39d397 2938 ((unsigned long)PAGE_CACHE_SIZE - 1);
5f39d397 2939
ae2f5411 2940 cur = min_t(unsigned long, len, src_off_in_page + 1);
5f39d397 2941 cur = min(cur, dst_off_in_page + 1);
6d36dcd4
CM
2942 move_pages(extent_buffer_page(dst, dst_i),
2943 extent_buffer_page(dst, src_i),
5f39d397
CM
2944 dst_off_in_page - cur + 1,
2945 src_off_in_page - cur + 1, cur);
2946
db94535d
CM
2947 dst_end -= cur;
2948 src_end -= cur;
5f39d397
CM
2949 len -= cur;
2950 }
2951}
2952EXPORT_SYMBOL(memmove_extent_buffer);