]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * include/linux/buffer_head.h | |
3 | * | |
4 | * Everything to do with buffer_heads. | |
5 | */ | |
6 | ||
7 | #ifndef _LINUX_BUFFER_HEAD_H | |
8 | #define _LINUX_BUFFER_HEAD_H | |
9 | ||
10 | #include <linux/types.h> | |
11 | #include <linux/fs.h> | |
12 | #include <linux/linkage.h> | |
13 | #include <linux/pagemap.h> | |
14 | #include <linux/wait.h> | |
15 | #include <asm/atomic.h> | |
16 | ||
17 | #ifdef CONFIG_BLOCK | |
18 | ||
19 | enum bh_state_bits { | |
20 | BH_Uptodate, /* Contains valid data */ | |
21 | BH_Dirty, /* Is dirty */ | |
22 | BH_Lock, /* Is locked */ | |
23 | BH_Req, /* Has been submitted for I/O */ | |
24 | BH_Uptodate_Lock,/* Used by the first bh in a page, to serialise | |
25 | * IO completion of other buffers in the page | |
26 | */ | |
27 | ||
28 | BH_Mapped, /* Has a disk mapping */ | |
29 | BH_New, /* Disk mapping was newly created by get_block */ | |
30 | BH_Async_Read, /* Is under end_buffer_async_read I/O */ | |
31 | BH_Async_Write, /* Is under end_buffer_async_write I/O */ | |
32 | BH_Delay, /* Buffer is not yet allocated on disk */ | |
33 | BH_Boundary, /* Block is followed by a discontiguity */ | |
34 | BH_Write_EIO, /* I/O error on write */ | |
35 | BH_Ordered, /* ordered write */ | |
36 | BH_Eopnotsupp, /* operation not supported (barrier) */ | |
37 | BH_Unwritten, /* Buffer is allocated on disk but not written */ | |
38 | BH_Quiet, /* Buffer Error Prinks to be quiet */ | |
39 | ||
40 | BH_PrivateStart,/* not a state bit, but the first bit available | |
41 | * for private allocation by other entities | |
42 | */ | |
43 | }; | |
44 | ||
45 | #define MAX_BUF_PER_PAGE (PAGE_CACHE_SIZE / 512) | |
46 | ||
47 | struct page; | |
48 | struct buffer_head; | |
49 | struct address_space; | |
50 | typedef void (bh_end_io_t)(struct buffer_head *bh, int uptodate); | |
51 | ||
52 | /* | |
53 | * Historically, a buffer_head was used to map a single block | |
54 | * within a page, and of course as the unit of I/O through the | |
55 | * filesystem and block layers. Nowadays the basic I/O unit | |
56 | * is the bio, and buffer_heads are used for extracting block | |
57 | * mappings (via a get_block_t call), for tracking state within | |
58 | * a page (via a page_mapping) and for wrapping bio submission | |
59 | * for backward compatibility reasons (e.g. submit_bh). | |
60 | */ | |
61 | struct buffer_head { | |
62 | unsigned long b_state; /* buffer state bitmap (see above) */ | |
63 | struct buffer_head *b_this_page;/* circular list of page's buffers */ | |
64 | struct page *b_page; /* the page this bh is mapped to */ | |
65 | ||
66 | sector_t b_blocknr; /* start block number */ | |
67 | size_t b_size; /* size of mapping */ | |
68 | char *b_data; /* pointer to data within the page */ | |
69 | ||
70 | struct block_device *b_bdev; | |
71 | bh_end_io_t *b_end_io; /* I/O completion */ | |
72 | void *b_private; /* reserved for b_end_io */ | |
73 | struct list_head b_assoc_buffers; /* associated with another mapping */ | |
74 | struct address_space *b_assoc_map; /* mapping this buffer is | |
75 | associated with */ | |
76 | atomic_t b_count; /* users using this buffer_head */ | |
77 | }; | |
78 | ||
79 | /* | |
80 | * macro tricks to expand the set_buffer_foo(), clear_buffer_foo() | |
81 | * and buffer_foo() functions. | |
82 | */ | |
83 | #define BUFFER_FNS(bit, name) \ | |
84 | static inline void set_buffer_##name(struct buffer_head *bh) \ | |
85 | { \ | |
86 | set_bit(BH_##bit, &(bh)->b_state); \ | |
87 | } \ | |
88 | static inline void clear_buffer_##name(struct buffer_head *bh) \ | |
89 | { \ | |
90 | clear_bit(BH_##bit, &(bh)->b_state); \ | |
91 | } \ | |
92 | static inline int buffer_##name(const struct buffer_head *bh) \ | |
93 | { \ | |
94 | return test_bit(BH_##bit, &(bh)->b_state); \ | |
95 | } | |
96 | ||
97 | /* | |
98 | * test_set_buffer_foo() and test_clear_buffer_foo() | |
99 | */ | |
100 | #define TAS_BUFFER_FNS(bit, name) \ | |
101 | static inline int test_set_buffer_##name(struct buffer_head *bh) \ | |
102 | { \ | |
103 | return test_and_set_bit(BH_##bit, &(bh)->b_state); \ | |
104 | } \ | |
105 | static inline int test_clear_buffer_##name(struct buffer_head *bh) \ | |
106 | { \ | |
107 | return test_and_clear_bit(BH_##bit, &(bh)->b_state); \ | |
108 | } \ | |
109 | ||
110 | /* | |
111 | * Emit the buffer bitops functions. Note that there are also functions | |
112 | * of the form "mark_buffer_foo()". These are higher-level functions which | |
113 | * do something in addition to setting a b_state bit. | |
114 | */ | |
115 | BUFFER_FNS(Uptodate, uptodate) | |
116 | BUFFER_FNS(Dirty, dirty) | |
117 | TAS_BUFFER_FNS(Dirty, dirty) | |
118 | BUFFER_FNS(Lock, locked) | |
119 | BUFFER_FNS(Req, req) | |
120 | TAS_BUFFER_FNS(Req, req) | |
121 | BUFFER_FNS(Mapped, mapped) | |
122 | BUFFER_FNS(New, new) | |
123 | BUFFER_FNS(Async_Read, async_read) | |
124 | BUFFER_FNS(Async_Write, async_write) | |
125 | BUFFER_FNS(Delay, delay) | |
126 | BUFFER_FNS(Boundary, boundary) | |
127 | BUFFER_FNS(Write_EIO, write_io_error) | |
128 | BUFFER_FNS(Ordered, ordered) | |
129 | BUFFER_FNS(Eopnotsupp, eopnotsupp) | |
130 | BUFFER_FNS(Unwritten, unwritten) | |
131 | ||
132 | #define bh_offset(bh) ((unsigned long)(bh)->b_data & ~PAGE_MASK) | |
133 | #define touch_buffer(bh) mark_page_accessed(bh->b_page) | |
134 | ||
135 | /* If we *know* page->private refers to buffer_heads */ | |
136 | #define page_buffers(page) \ | |
137 | ({ \ | |
138 | BUG_ON(!PagePrivate(page)); \ | |
139 | ((struct buffer_head *)page_private(page)); \ | |
140 | }) | |
141 | #define page_has_buffers(page) PagePrivate(page) | |
142 | ||
143 | /* | |
144 | * Declarations | |
145 | */ | |
146 | ||
147 | void mark_buffer_dirty(struct buffer_head *bh); | |
148 | void init_buffer(struct buffer_head *, bh_end_io_t *, void *); | |
149 | void set_bh_page(struct buffer_head *bh, | |
150 | struct page *page, unsigned long offset); | |
151 | int try_to_free_buffers(struct page *); | |
152 | struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size, | |
153 | int retry); | |
154 | void create_empty_buffers(struct page *, unsigned long, | |
155 | unsigned long b_state); | |
156 | void end_buffer_read_sync(struct buffer_head *bh, int uptodate); | |
157 | void end_buffer_write_sync(struct buffer_head *bh, int uptodate); | |
158 | ||
159 | /* Things to do with buffers at mapping->private_list */ | |
160 | void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode); | |
161 | int inode_has_buffers(struct inode *); | |
162 | void invalidate_inode_buffers(struct inode *); | |
163 | int remove_inode_buffers(struct inode *inode); | |
164 | int sync_mapping_buffers(struct address_space *mapping); | |
165 | void unmap_underlying_metadata(struct block_device *bdev, sector_t block); | |
166 | ||
167 | void mark_buffer_async_write(struct buffer_head *bh); | |
168 | void invalidate_bdev(struct block_device *); | |
169 | int sync_blockdev(struct block_device *bdev); | |
170 | void __wait_on_buffer(struct buffer_head *); | |
171 | wait_queue_head_t *bh_waitq_head(struct buffer_head *bh); | |
172 | int fsync_bdev(struct block_device *); | |
173 | struct super_block *freeze_bdev(struct block_device *); | |
174 | int thaw_bdev(struct block_device *, struct super_block *); | |
175 | int fsync_super(struct super_block *); | |
176 | int fsync_no_super(struct block_device *); | |
177 | struct buffer_head *__find_get_block(struct block_device *bdev, sector_t block, | |
178 | unsigned size); | |
179 | struct buffer_head *__getblk(struct block_device *bdev, sector_t block, | |
180 | unsigned size); | |
181 | void __brelse(struct buffer_head *); | |
182 | void __bforget(struct buffer_head *); | |
183 | void __breadahead(struct block_device *, sector_t block, unsigned int size); | |
184 | struct buffer_head *__bread(struct block_device *, sector_t block, unsigned size); | |
185 | void invalidate_bh_lrus(void); | |
186 | struct buffer_head *alloc_buffer_head(gfp_t gfp_flags); | |
187 | void free_buffer_head(struct buffer_head * bh); | |
188 | void unlock_buffer(struct buffer_head *bh); | |
189 | void __lock_buffer(struct buffer_head *bh); | |
190 | void ll_rw_block(int, int, struct buffer_head * bh[]); | |
191 | int sync_dirty_buffer(struct buffer_head *bh); | |
192 | int submit_bh(int, struct buffer_head *); | |
193 | void write_boundary_block(struct block_device *bdev, | |
194 | sector_t bblock, unsigned blocksize); | |
195 | int bh_uptodate_or_lock(struct buffer_head *bh); | |
196 | int bh_submit_read(struct buffer_head *bh); | |
197 | ||
198 | extern int buffer_heads_over_limit; | |
199 | ||
200 | /* | |
201 | * Generic address_space_operations implementations for buffer_head-backed | |
202 | * address_spaces. | |
203 | */ | |
204 | void block_invalidatepage(struct page *page, unsigned long offset); | |
205 | int block_write_full_page(struct page *page, get_block_t *get_block, | |
206 | struct writeback_control *wbc); | |
207 | int block_read_full_page(struct page*, get_block_t*); | |
208 | int block_is_partially_uptodate(struct page *page, read_descriptor_t *desc, | |
209 | unsigned long from); | |
210 | int block_write_begin(struct file *, struct address_space *, | |
211 | loff_t, unsigned, unsigned, | |
212 | struct page **, void **, get_block_t*); | |
213 | int block_write_end(struct file *, struct address_space *, | |
214 | loff_t, unsigned, unsigned, | |
215 | struct page *, void *); | |
216 | int generic_write_end(struct file *, struct address_space *, | |
217 | loff_t, unsigned, unsigned, | |
218 | struct page *, void *); | |
219 | void page_zero_new_buffers(struct page *page, unsigned from, unsigned to); | |
220 | int block_prepare_write(struct page*, unsigned, unsigned, get_block_t*); | |
221 | int cont_write_begin(struct file *, struct address_space *, loff_t, | |
222 | unsigned, unsigned, struct page **, void **, | |
223 | get_block_t *, loff_t *); | |
224 | int generic_cont_expand_simple(struct inode *inode, loff_t size); | |
225 | int block_commit_write(struct page *page, unsigned from, unsigned to); | |
226 | int block_page_mkwrite(struct vm_area_struct *vma, struct page *page, | |
227 | get_block_t get_block); | |
228 | void block_sync_page(struct page *); | |
229 | sector_t generic_block_bmap(struct address_space *, sector_t, get_block_t *); | |
230 | int block_truncate_page(struct address_space *, loff_t, get_block_t *); | |
231 | int file_fsync(struct file *, struct dentry *, int); | |
232 | int nobh_write_begin(struct file *, struct address_space *, | |
233 | loff_t, unsigned, unsigned, | |
234 | struct page **, void **, get_block_t*); | |
235 | int nobh_write_end(struct file *, struct address_space *, | |
236 | loff_t, unsigned, unsigned, | |
237 | struct page *, void *); | |
238 | int nobh_truncate_page(struct address_space *, loff_t, get_block_t *); | |
239 | int nobh_writepage(struct page *page, get_block_t *get_block, | |
240 | struct writeback_control *wbc); | |
241 | ||
242 | void buffer_init(void); | |
243 | ||
244 | /* | |
245 | * inline definitions | |
246 | */ | |
247 | ||
248 | static inline void attach_page_buffers(struct page *page, | |
249 | struct buffer_head *head) | |
250 | { | |
251 | page_cache_get(page); | |
252 | SetPagePrivate(page); | |
253 | set_page_private(page, (unsigned long)head); | |
254 | } | |
255 | ||
256 | static inline void get_bh(struct buffer_head *bh) | |
257 | { | |
258 | atomic_inc(&bh->b_count); | |
259 | } | |
260 | ||
261 | static inline void put_bh(struct buffer_head *bh) | |
262 | { | |
263 | smp_mb__before_atomic_dec(); | |
264 | atomic_dec(&bh->b_count); | |
265 | } | |
266 | ||
267 | static inline void brelse(struct buffer_head *bh) | |
268 | { | |
269 | if (bh) | |
270 | __brelse(bh); | |
271 | } | |
272 | ||
273 | static inline void bforget(struct buffer_head *bh) | |
274 | { | |
275 | if (bh) | |
276 | __bforget(bh); | |
277 | } | |
278 | ||
279 | static inline struct buffer_head * | |
280 | sb_bread(struct super_block *sb, sector_t block) | |
281 | { | |
282 | return __bread(sb->s_bdev, block, sb->s_blocksize); | |
283 | } | |
284 | ||
285 | static inline void | |
286 | sb_breadahead(struct super_block *sb, sector_t block) | |
287 | { | |
288 | __breadahead(sb->s_bdev, block, sb->s_blocksize); | |
289 | } | |
290 | ||
291 | static inline struct buffer_head * | |
292 | sb_getblk(struct super_block *sb, sector_t block) | |
293 | { | |
294 | return __getblk(sb->s_bdev, block, sb->s_blocksize); | |
295 | } | |
296 | ||
297 | static inline struct buffer_head * | |
298 | sb_find_get_block(struct super_block *sb, sector_t block) | |
299 | { | |
300 | return __find_get_block(sb->s_bdev, block, sb->s_blocksize); | |
301 | } | |
302 | ||
303 | static inline void | |
304 | map_bh(struct buffer_head *bh, struct super_block *sb, sector_t block) | |
305 | { | |
306 | set_buffer_mapped(bh); | |
307 | bh->b_bdev = sb->s_bdev; | |
308 | bh->b_blocknr = block; | |
309 | bh->b_size = sb->s_blocksize; | |
310 | } | |
311 | ||
312 | /* | |
313 | * Calling wait_on_buffer() for a zero-ref buffer is illegal, so we call into | |
314 | * __wait_on_buffer() just to trip a debug check. Because debug code in inline | |
315 | * functions is bloaty. | |
316 | */ | |
317 | static inline void wait_on_buffer(struct buffer_head *bh) | |
318 | { | |
319 | might_sleep(); | |
320 | if (buffer_locked(bh) || atomic_read(&bh->b_count) == 0) | |
321 | __wait_on_buffer(bh); | |
322 | } | |
323 | ||
324 | static inline int trylock_buffer(struct buffer_head *bh) | |
325 | { | |
326 | return likely(!test_and_set_bit_lock(BH_Lock, &bh->b_state)); | |
327 | } | |
328 | ||
329 | static inline void lock_buffer(struct buffer_head *bh) | |
330 | { | |
331 | might_sleep(); | |
332 | if (!trylock_buffer(bh)) | |
333 | __lock_buffer(bh); | |
334 | } | |
335 | ||
336 | extern int __set_page_dirty_buffers(struct page *page); | |
337 | ||
338 | #else /* CONFIG_BLOCK */ | |
339 | ||
340 | static inline void buffer_init(void) {} | |
341 | static inline int try_to_free_buffers(struct page *page) { return 1; } | |
342 | static inline int sync_blockdev(struct block_device *bdev) { return 0; } | |
343 | static inline int inode_has_buffers(struct inode *inode) { return 0; } | |
344 | static inline void invalidate_inode_buffers(struct inode *inode) {} | |
345 | static inline int remove_inode_buffers(struct inode *inode) { return 1; } | |
346 | static inline int sync_mapping_buffers(struct address_space *mapping) { return 0; } | |
347 | static inline void invalidate_bdev(struct block_device *bdev) {} | |
348 | ||
349 | static inline struct super_block *freeze_bdev(struct block_device *sb) | |
350 | { | |
351 | return NULL; | |
352 | } | |
353 | ||
354 | static inline int thaw_bdev(struct block_device *bdev, struct super_block *sb) | |
355 | { | |
356 | return 0; | |
357 | } | |
358 | ||
359 | #endif /* CONFIG_BLOCK */ | |
360 | #endif /* _LINUX_BUFFER_HEAD_H */ |