]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - fs/btrfs/extent_io.h
btrfs: qgroup: Fix qgroup reserved space underflow caused by buffered write and quota...
[mirror_ubuntu-hirsute-kernel.git] / fs / btrfs / extent_io.h
CommitLineData
d1310b2e
CM
1#ifndef __EXTENTIO__
2#define __EXTENTIO__
3
4#include <linux/rbtree.h>
b7ac31b7 5#include <linux/refcount.h>
ac467772 6#include "ulist.h"
d1310b2e
CM
7
8/* bits for the extent state */
9ee49a04
DS
9#define EXTENT_DIRTY (1U << 0)
10#define EXTENT_WRITEBACK (1U << 1)
11#define EXTENT_UPTODATE (1U << 2)
12#define EXTENT_LOCKED (1U << 3)
13#define EXTENT_NEW (1U << 4)
14#define EXTENT_DELALLOC (1U << 5)
15#define EXTENT_DEFRAG (1U << 6)
16#define EXTENT_BOUNDARY (1U << 9)
17#define EXTENT_NODATASUM (1U << 10)
a315e68f 18#define EXTENT_CLEAR_META_RESV (1U << 11)
9ee49a04
DS
19#define EXTENT_FIRST_DELALLOC (1U << 12)
20#define EXTENT_NEED_WAIT (1U << 13)
21#define EXTENT_DAMAGED (1U << 14)
22#define EXTENT_NORESERVE (1U << 15)
52472553 23#define EXTENT_QGROUP_RESERVED (1U << 16)
18513091 24#define EXTENT_CLEAR_DATA_RESV (1U << 17)
a7e3b975 25#define EXTENT_DELALLOC_NEW (1U << 18)
9ee49a04 26#define EXTENT_IOBITS (EXTENT_LOCKED | EXTENT_WRITEBACK)
a315e68f
FM
27#define EXTENT_DO_ACCOUNTING (EXTENT_CLEAR_META_RESV | \
28 EXTENT_CLEAR_DATA_RESV)
9ee49a04 29#define EXTENT_CTLBITS (EXTENT_DO_ACCOUNTING | EXTENT_FIRST_DELALLOC)
d1310b2e 30
261507a0
LZ
31/*
32 * flags for bio submission. The high bits indicate the compression
33 * type for this bio
34 */
c8b97818 35#define EXTENT_BIO_COMPRESSED 1
de0022b9 36#define EXTENT_BIO_TREE_LOG 2
261507a0 37#define EXTENT_BIO_FLAG_SHIFT 16
c8b97818 38
b4ce94de
CM
39/* these are bit numbers for test/set bit */
40#define EXTENT_BUFFER_UPTODATE 0
b9473439 41#define EXTENT_BUFFER_DIRTY 2
a826d6dc 42#define EXTENT_BUFFER_CORRUPT 3
ab0fff03 43#define EXTENT_BUFFER_READAHEAD 4 /* this got triggered by readahead */
3083ee2e
JB
44#define EXTENT_BUFFER_TREE_REF 5
45#define EXTENT_BUFFER_STALE 6
0b32f4bb 46#define EXTENT_BUFFER_WRITEBACK 7
656f30db 47#define EXTENT_BUFFER_READ_ERR 8 /* read IO error */
815a51c7 48#define EXTENT_BUFFER_DUMMY 9
34b41ace 49#define EXTENT_BUFFER_IN_TREE 10
656f30db 50#define EXTENT_BUFFER_WRITE_ERR 11 /* write IO error */
b4ce94de 51
da2c7009 52/* these are flags for __process_pages_contig */
c2790a2e
JB
53#define PAGE_UNLOCK (1 << 0)
54#define PAGE_CLEAR_DIRTY (1 << 1)
55#define PAGE_SET_WRITEBACK (1 << 2)
56#define PAGE_END_WRITEBACK (1 << 3)
57#define PAGE_SET_PRIVATE2 (1 << 4)
704de49d 58#define PAGE_SET_ERROR (1 << 5)
da2c7009 59#define PAGE_LOCK (1 << 6)
a791e35e 60
d1310b2e
CM
61/*
62 * page->private values. Every page that is controlled by the extent
63 * map has page->private set to one.
64 */
65#define EXTENT_PAGE_PRIVATE 1
d1310b2e 66
2fe1d551
OS
67/*
68 * The extent buffer bitmap operations are done with byte granularity instead of
69 * word granularity for two reasons:
70 * 1. The bitmaps must be little-endian on disk.
71 * 2. Bitmap items are not guaranteed to be aligned to a word and therefore a
72 * single word in a bitmap may straddle two pages in the extent buffer.
73 */
74#define BIT_BYTE(nr) ((nr) / BITS_PER_BYTE)
75#define BYTE_MASK ((1 << BITS_PER_BYTE) - 1)
76#define BITMAP_FIRST_BYTE_MASK(start) \
77 ((BYTE_MASK << ((start) & (BITS_PER_BYTE - 1))) & BYTE_MASK)
78#define BITMAP_LAST_BYTE_MASK(nbits) \
79 (BYTE_MASK >> (-(nbits) & (BITS_PER_BYTE - 1)))
80
81static inline int le_test_bit(int nr, const u8 *addr)
82{
83 return 1U & (addr[BIT_BYTE(nr)] >> (nr & (BITS_PER_BYTE-1)));
84}
85
86extern void le_bitmap_set(u8 *map, unsigned int start, int len);
87extern void le_bitmap_clear(u8 *map, unsigned int start, int len);
88
70dec807 89struct extent_state;
ea466794 90struct btrfs_root;
6fc0ef68 91struct btrfs_inode;
facc8a22 92struct btrfs_io_bio;
47dc196a 93struct io_failure_record;
70dec807 94
c6100a4b 95typedef int (extent_submit_bio_hook_t)(void *private_data, struct bio *bio,
81a75f67
MC
96 int mirror_num, unsigned long bio_flags,
97 u64 bio_offset);
d1310b2e 98struct extent_io_ops {
4d53dddb
DS
99 /*
100 * The following callbacks must be allways defined, the function
101 * pointer will be called unconditionally.
102 */
44b8bd7e 103 extent_submit_bio_hook_t *submit_bio_hook;
4d53dddb
DS
104 int (*readpage_end_io_hook)(struct btrfs_io_bio *io_bio, u64 phy_offset,
105 struct page *page, u64 start, u64 end,
106 int mirror);
81a75f67 107 int (*merge_bio_hook)(struct page *page, unsigned long offset,
c8b97818
CM
108 size_t size, struct bio *bio,
109 unsigned long bio_flags);
20a7db8a 110 int (*readpage_io_failed_hook)(struct page *page, int failed_mirror);
c6100a4b
JB
111 struct btrfs_fs_info *(*tree_fs_info)(void *private_data);
112 void (*set_range_writeback)(void *private_data, u64 start, u64 end);
4d53dddb
DS
113
114 /*
115 * Optional hooks, called if the pointer is not NULL
116 */
c6100a4b 117 int (*fill_delalloc)(void *private_data, struct page *locked_page,
4d53dddb
DS
118 u64 start, u64 end, int *page_started,
119 unsigned long *nr_written);
4d53dddb
DS
120
121 int (*writepage_start_hook)(struct page *page, u64 start, u64 end);
c3988d63 122 void (*writepage_end_io_hook)(struct page *page, u64 start, u64 end,
e6dcd2dc 123 struct extent_state *state, int uptodate);
c6100a4b 124 void (*set_bit_hook)(void *private_data, struct extent_state *state,
9ee49a04 125 unsigned *bits);
c6100a4b 126 void (*clear_bit_hook)(void *private_data,
6fc0ef68
NB
127 struct extent_state *state,
128 unsigned *bits);
c6100a4b 129 void (*merge_extent_hook)(void *private_data,
1bf85046
JM
130 struct extent_state *new,
131 struct extent_state *other);
c6100a4b 132 void (*split_extent_hook)(void *private_data,
1bf85046 133 struct extent_state *orig, u64 split);
c6100a4b
JB
134 void (*check_extent_io_range)(void *private_data, const char *caller,
135 u64 start, u64 end);
d1310b2e
CM
136};
137
138struct extent_io_tree {
139 struct rb_root state;
c6100a4b 140 void *private_data;
d1310b2e 141 u64 dirty_bytes;
0b32f4bb 142 int track_uptodate;
70dec807 143 spinlock_t lock;
e8c9f186 144 const struct extent_io_ops *ops;
d1310b2e
CM
145};
146
147struct extent_state {
148 u64 start;
149 u64 end; /* inclusive */
d1310b2e 150 struct rb_node rb_node;
9ed74f2d
JB
151
152 /* ADD NEW ELEMENTS AFTER THIS */
d1310b2e 153 wait_queue_head_t wq;
b7ac31b7 154 refcount_t refs;
9ee49a04 155 unsigned state;
d1310b2e 156
47dc196a 157 struct io_failure_record *failrec;
d1310b2e 158
6d49ba1b 159#ifdef CONFIG_BTRFS_DEBUG
2d2ae547 160 struct list_head leak_list;
6d49ba1b 161#endif
d1310b2e
CM
162};
163
727011e0 164#define INLINE_EXTENT_BUFFER_PAGES 16
09cbfeaf 165#define MAX_INLINE_EXTENT_BUFFER_SIZE (INLINE_EXTENT_BUFFER_PAGES * PAGE_SIZE)
d1310b2e
CM
166struct extent_buffer {
167 u64 start;
168 unsigned long len;
b4ce94de 169 unsigned long bflags;
f28491e0 170 struct btrfs_fs_info *fs_info;
3083ee2e 171 spinlock_t refs_lock;
727011e0 172 atomic_t refs;
0b32f4bb 173 atomic_t io_pages;
5cf1ab56 174 int read_mirror;
19fe0a8b 175 struct rcu_head rcu_head;
5b25f70f 176 pid_t lock_owner;
b4ce94de 177
bd681513
CM
178 /* count of read lock holders on the extent buffer */
179 atomic_t write_locks;
180 atomic_t read_locks;
181 atomic_t blocking_writers;
182 atomic_t blocking_readers;
183 atomic_t spinning_readers;
184 atomic_t spinning_writers;
656f30db
FM
185 short lock_nested;
186 /* >= 0 if eb belongs to a log tree, -1 otherwise */
187 short log_index;
bd681513
CM
188
189 /* protects write locks */
190 rwlock_t lock;
191
192 /* readers use lock_wq while they wait for the write
193 * lock holders to unlock
194 */
195 wait_queue_head_t write_lock_wq;
b4ce94de 196
bd681513
CM
197 /* writers use read_lock_wq while they wait for readers
198 * to unlock
b4ce94de 199 */
bd681513 200 wait_queue_head_t read_lock_wq;
b8dae313 201 struct page *pages[INLINE_EXTENT_BUFFER_PAGES];
6d49ba1b
ES
202#ifdef CONFIG_BTRFS_DEBUG
203 struct list_head leak_list;
204#endif
d1310b2e
CM
205};
206
ac467772
QW
207/*
208 * Structure to record how many bytes and which ranges are set/cleared
209 */
210struct extent_changeset {
211 /* How many bytes are set/cleared in this operation */
7bc329c1 212 unsigned int bytes_changed;
ac467772
QW
213
214 /* Changed ranges */
53d32359 215 struct ulist range_changed;
ac467772
QW
216};
217
261507a0
LZ
218static inline void extent_set_compress_type(unsigned long *bio_flags,
219 int compress_type)
220{
221 *bio_flags |= compress_type << EXTENT_BIO_FLAG_SHIFT;
222}
223
224static inline int extent_compress_type(unsigned long bio_flags)
225{
226 return bio_flags >> EXTENT_BIO_FLAG_SHIFT;
227}
228
d1310b2e
CM
229struct extent_map_tree;
230
fc4f21b1 231typedef struct extent_map *(get_extent_t)(struct btrfs_inode *inode,
d1310b2e 232 struct page *page,
306e16ce 233 size_t pg_offset,
d1310b2e
CM
234 u64 start, u64 len,
235 int create);
236
c6100a4b 237void extent_io_tree_init(struct extent_io_tree *tree, void *private_data);
d1310b2e 238int try_release_extent_mapping(struct extent_map_tree *map,
70dec807
CM
239 struct extent_io_tree *tree, struct page *page,
240 gfp_t mask);
f7a52a40 241int try_release_extent_buffer(struct page *page);
1edbb734 242int lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
ff13db41 243 struct extent_state **cached);
cd716d8f
DS
244
245static inline int lock_extent(struct extent_io_tree *tree, u64 start, u64 end)
246{
247 return lock_extent_bits(tree, start, end, NULL);
248}
249
d0082371 250int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end);
d1310b2e 251int extent_read_full_page(struct extent_io_tree *tree, struct page *page,
8ddc7d9c 252 get_extent_t *get_extent, int mirror_num);
d1310b2e
CM
253int __init extent_io_init(void);
254void extent_io_exit(void);
255
256u64 count_range_bits(struct extent_io_tree *tree,
257 u64 *start, u64 search_end,
9ee49a04 258 u64 max_bytes, unsigned bits, int contig);
d1310b2e 259
4845e44f 260void free_extent_state(struct extent_state *state);
d1310b2e 261int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
9ee49a04 262 unsigned bits, int filled,
41074888 263 struct extent_state *cached_state);
fefdc557 264int clear_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
f734c44a 265 unsigned bits, struct extent_changeset *changeset);
e6dcd2dc 266int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
9ee49a04 267 unsigned bits, int wake, int delete,
41074888 268 struct extent_state **cached, gfp_t mask);
c6317955 269
e83b1d91
DS
270static inline int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end)
271{
272 return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, NULL,
273 GFP_NOFS);
274}
275
276static inline int unlock_extent_cached(struct extent_io_tree *tree, u64 start,
277 u64 end, struct extent_state **cached, gfp_t mask)
278{
279 return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, cached,
280 mask);
281}
282
283static inline int clear_extent_bits(struct extent_io_tree *tree, u64 start,
91166212 284 u64 end, unsigned bits)
e83b1d91
DS
285{
286 int wake = 0;
287
288 if (bits & EXTENT_LOCKED)
289 wake = 1;
290
91166212
DS
291 return clear_extent_bit(tree, start, end, bits, wake, 0, NULL,
292 GFP_NOFS);
e83b1d91
DS
293}
294
d38ed27f 295int set_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
2c53b912 296 unsigned bits, struct extent_changeset *changeset);
4845e44f 297int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
9ee49a04 298 unsigned bits, u64 *failed_start,
4845e44f 299 struct extent_state **cached_state, gfp_t mask);
c6317955
DS
300
301static inline int set_extent_bits(struct extent_io_tree *tree, u64 start,
ceeb0ae7 302 u64 end, unsigned bits)
c6317955 303{
ceeb0ae7 304 return set_extent_bit(tree, start, end, bits, NULL, NULL, GFP_NOFS);
c6317955
DS
305}
306
e83b1d91
DS
307static inline int clear_extent_uptodate(struct extent_io_tree *tree, u64 start,
308 u64 end, struct extent_state **cached_state, gfp_t mask)
309{
310 return clear_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, 0,
311 cached_state, mask);
312}
c6317955
DS
313
314static inline int set_extent_dirty(struct extent_io_tree *tree, u64 start,
315 u64 end, gfp_t mask)
316{
317 return set_extent_bit(tree, start, end, EXTENT_DIRTY, NULL,
318 NULL, mask);
319}
320
e83b1d91 321static inline int clear_extent_dirty(struct extent_io_tree *tree, u64 start,
af6f8f60 322 u64 end)
e83b1d91
DS
323{
324 return clear_extent_bit(tree, start, end,
325 EXTENT_DIRTY | EXTENT_DELALLOC |
af6f8f60 326 EXTENT_DO_ACCOUNTING, 0, 0, NULL, GFP_NOFS);
e83b1d91
DS
327}
328
462d6fac 329int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
9ee49a04 330 unsigned bits, unsigned clear_bits,
210aa277 331 struct extent_state **cached_state);
c6317955
DS
332
333static inline int set_extent_delalloc(struct extent_io_tree *tree, u64 start,
7cd8c752 334 u64 end, struct extent_state **cached_state)
c6317955
DS
335{
336 return set_extent_bit(tree, start, end,
337 EXTENT_DELALLOC | EXTENT_UPTODATE,
7cd8c752 338 NULL, cached_state, GFP_NOFS);
c6317955
DS
339}
340
341static inline int set_extent_defrag(struct extent_io_tree *tree, u64 start,
018ed4f7 342 u64 end, struct extent_state **cached_state)
c6317955
DS
343{
344 return set_extent_bit(tree, start, end,
345 EXTENT_DELALLOC | EXTENT_UPTODATE | EXTENT_DEFRAG,
018ed4f7 346 NULL, cached_state, GFP_NOFS);
c6317955
DS
347}
348
349static inline int set_extent_new(struct extent_io_tree *tree, u64 start,
3744dbeb 350 u64 end)
c6317955 351{
3744dbeb
DS
352 return set_extent_bit(tree, start, end, EXTENT_NEW, NULL, NULL,
353 GFP_NOFS);
c6317955
DS
354}
355
356static inline int set_extent_uptodate(struct extent_io_tree *tree, u64 start,
357 u64 end, struct extent_state **cached_state, gfp_t mask)
358{
359 return set_extent_bit(tree, start, end, EXTENT_UPTODATE, NULL,
360 cached_state, mask);
361}
362
d1310b2e 363int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
9ee49a04 364 u64 *start_ret, u64 *end_ret, unsigned bits,
e6138876 365 struct extent_state **cached_state);
d1310b2e
CM
366int extent_invalidatepage(struct extent_io_tree *tree,
367 struct page *page, unsigned long offset);
368int extent_write_full_page(struct extent_io_tree *tree, struct page *page,
369 get_extent_t *get_extent,
370 struct writeback_control *wbc);
771ed689
CM
371int extent_write_locked_range(struct extent_io_tree *tree, struct inode *inode,
372 u64 start, u64 end, get_extent_t *get_extent,
373 int mode);
d1310b2e
CM
374int extent_writepages(struct extent_io_tree *tree,
375 struct address_space *mapping,
376 get_extent_t *get_extent,
377 struct writeback_control *wbc);
0b32f4bb
JB
378int btree_write_cache_pages(struct address_space *mapping,
379 struct writeback_control *wbc);
d1310b2e
CM
380int extent_readpages(struct extent_io_tree *tree,
381 struct address_space *mapping,
382 struct list_head *pages, unsigned nr_pages,
383 get_extent_t get_extent);
1506fcc8
YS
384int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
385 __u64 start, __u64 len, get_extent_t *get_extent);
d1310b2e
CM
386void set_page_extent_mapped(struct page *page);
387
f28491e0 388struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
ce3e6984 389 u64 start);
0f331229
OS
390struct extent_buffer *__alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
391 u64 start, unsigned long len);
3f556f78 392struct extent_buffer *alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
da17066c 393 u64 start);
815a51c7 394struct extent_buffer *btrfs_clone_extent_buffer(struct extent_buffer *src);
f28491e0 395struct extent_buffer *find_extent_buffer(struct btrfs_fs_info *fs_info,
452c75c3 396 u64 start);
d1310b2e 397void free_extent_buffer(struct extent_buffer *eb);
3083ee2e 398void free_extent_buffer_stale(struct extent_buffer *eb);
bb82ab88
AJ
399#define WAIT_NONE 0
400#define WAIT_COMPLETE 1
401#define WAIT_PAGE_LOCK 2
d1310b2e 402int read_extent_buffer_pages(struct extent_io_tree *tree,
8436ea91 403 struct extent_buffer *eb, int wait,
f188591e 404 get_extent_t *get_extent, int mirror_num);
fd8b2b61 405void wait_on_extent_buffer_writeback(struct extent_buffer *eb);
479ed9ab
RD
406
407static inline unsigned long num_extent_pages(u64 start, u64 len)
408{
09cbfeaf
KS
409 return ((start + len + PAGE_SIZE - 1) >> PAGE_SHIFT) -
410 (start >> PAGE_SHIFT);
479ed9ab
RD
411}
412
d1310b2e
CM
413static inline void extent_buffer_get(struct extent_buffer *eb)
414{
415 atomic_inc(&eb->refs);
416}
417
418int memcmp_extent_buffer(struct extent_buffer *eb, const void *ptrv,
419 unsigned long start,
420 unsigned long len);
421void read_extent_buffer(struct extent_buffer *eb, void *dst,
422 unsigned long start,
423 unsigned long len);
550ac1d8
GH
424int read_extent_buffer_to_user(struct extent_buffer *eb, void __user *dst,
425 unsigned long start,
426 unsigned long len);
f157bf76
DS
427void write_extent_buffer_fsid(struct extent_buffer *eb, const void *src);
428void write_extent_buffer_chunk_tree_uuid(struct extent_buffer *eb,
429 const void *src);
d1310b2e
CM
430void write_extent_buffer(struct extent_buffer *eb, const void *src,
431 unsigned long start, unsigned long len);
58e8012c
DS
432void copy_extent_buffer_full(struct extent_buffer *dst,
433 struct extent_buffer *src);
d1310b2e
CM
434void copy_extent_buffer(struct extent_buffer *dst, struct extent_buffer *src,
435 unsigned long dst_offset, unsigned long src_offset,
436 unsigned long len);
437void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
438 unsigned long src_offset, unsigned long len);
439void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
440 unsigned long src_offset, unsigned long len);
b159fa28
DS
441void memzero_extent_buffer(struct extent_buffer *eb, unsigned long start,
442 unsigned long len);
3e1e8bb7
OS
443int extent_buffer_test_bit(struct extent_buffer *eb, unsigned long start,
444 unsigned long pos);
445void extent_buffer_bitmap_set(struct extent_buffer *eb, unsigned long start,
446 unsigned long pos, unsigned long len);
447void extent_buffer_bitmap_clear(struct extent_buffer *eb, unsigned long start,
448 unsigned long pos, unsigned long len);
1d4284bd 449void clear_extent_buffer_dirty(struct extent_buffer *eb);
0b32f4bb 450int set_extent_buffer_dirty(struct extent_buffer *eb);
09c25a8c 451void set_extent_buffer_uptodate(struct extent_buffer *eb);
69ba3927 452void clear_extent_buffer_uptodate(struct extent_buffer *eb);
0b32f4bb 453int extent_buffer_uptodate(struct extent_buffer *eb);
a26e8c9f 454int extent_buffer_under_io(struct extent_buffer *eb);
d1310b2e 455int map_private_extent_buffer(struct extent_buffer *eb, unsigned long offset,
a6591715 456 unsigned long min_len, char **map,
d1310b2e 457 unsigned long *map_start,
a6591715 458 unsigned long *map_len);
bd1fa4f0 459void extent_range_clear_dirty_for_io(struct inode *inode, u64 start, u64 end);
f6311572 460void extent_range_redirty_for_io(struct inode *inode, u64 start, u64 end);
a9d93e17 461void extent_clear_unlock_delalloc(struct inode *inode, u64 start, u64 end,
ba8b04c1 462 u64 delalloc_end, struct page *locked_page,
9ee49a04 463 unsigned bits_to_clear,
c2790a2e 464 unsigned long page_ops);
c821e7f3 465struct bio *btrfs_bio_alloc(struct block_device *bdev, u64 first_byte);
c5e4c3d7 466struct bio *btrfs_io_bio_alloc(unsigned int nr_iovecs);
8b6c1d56 467struct bio *btrfs_bio_clone(struct bio *bio);
e477094f 468struct bio *btrfs_bio_clone_partial(struct bio *orig, int offset, int size);
4a54c8c1 469
3ec706c8 470struct btrfs_fs_info;
9d4f7f8a 471struct btrfs_inode;
4a54c8c1 472
6ec656bc
JB
473int repair_io_failure(struct btrfs_fs_info *fs_info, u64 ino, u64 start,
474 u64 length, u64 logical, struct page *page,
475 unsigned int pg_offset, int mirror_num);
7870d082
JB
476int clean_io_failure(struct btrfs_fs_info *fs_info,
477 struct extent_io_tree *failure_tree,
478 struct extent_io_tree *io_tree, u64 start,
479 struct page *page, u64 ino, unsigned int pg_offset);
b5227c07 480void end_extent_writepage(struct page *page, int err, u64 start, u64 end);
2ff7e61e
JM
481int repair_eb_io_failure(struct btrfs_fs_info *fs_info,
482 struct extent_buffer *eb, int mirror_num);
2fe6303e
MX
483
484/*
485 * When IO fails, either with EIO or csum verification fails, we
486 * try other mirrors that might have a good copy of the data. This
487 * io_failure_record is used to record state as we go through all the
488 * mirrors. If another mirror has good data, the page is set up to date
489 * and things continue. If a good mirror can't be found, the original
490 * bio end_io callback is called to indicate things have failed.
491 */
492struct io_failure_record {
493 struct page *page;
494 u64 start;
495 u64 len;
496 u64 logical;
497 unsigned long bio_flags;
498 int this_mirror;
499 int failed_mirror;
500 int in_validation;
501};
502
4ac1f4ac 503
7ab7956e
NB
504void btrfs_free_io_failure_record(struct btrfs_inode *inode, u64 start,
505 u64 end);
2fe6303e
MX
506int btrfs_get_io_failure_record(struct inode *inode, u64 start, u64 end,
507 struct io_failure_record **failrec_ret);
508int btrfs_check_repairable(struct inode *inode, struct bio *failed_bio,
509 struct io_failure_record *failrec, int fail_mirror);
510struct bio *btrfs_create_repair_bio(struct inode *inode, struct bio *failed_bio,
511 struct io_failure_record *failrec,
512 struct page *page, int pg_offset, int icsum,
8b110e39 513 bio_end_io_t *endio_func, void *data);
7870d082
JB
514int free_io_failure(struct extent_io_tree *failure_tree,
515 struct extent_io_tree *io_tree,
516 struct io_failure_record *rec);
294e30fe
JB
517#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
518noinline u64 find_lock_delalloc_range(struct inode *inode,
519 struct extent_io_tree *tree,
520 struct page *locked_page, u64 *start,
521 u64 *end, u64 max_bytes);
0d4cf4e6 522#endif
faa2dbf0 523struct extent_buffer *alloc_test_extent_buffer(struct btrfs_fs_info *fs_info,
da17066c 524 u64 start);
294e30fe 525#endif