]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - include/linux/buffer_head.h
Merge branches 'intel_pstate', 'pm-cpufreq' and 'pm-cpufreq-sched'
[mirror_ubuntu-artful-kernel.git] / include / linux / buffer_head.h
1 /*
2 * include/linux/buffer_head.h
3 *
4 * Everything to do with buffer_heads.
5 */
6
7 #ifndef _LINUX_BUFFER_HEAD_H
8 #define _LINUX_BUFFER_HEAD_H
9
10 #include <linux/types.h>
11 #include <linux/fs.h>
12 #include <linux/linkage.h>
13 #include <linux/pagemap.h>
14 #include <linux/wait.h>
15 #include <linux/atomic.h>
16
17 #ifdef CONFIG_BLOCK
18
19 enum bh_state_bits {
20 BH_Uptodate, /* Contains valid data */
21 BH_Dirty, /* Is dirty */
22 BH_Lock, /* Is locked */
23 BH_Req, /* Has been submitted for I/O */
24 BH_Uptodate_Lock,/* Used by the first bh in a page, to serialise
25 * IO completion of other buffers in the page
26 */
27
28 BH_Mapped, /* Has a disk mapping */
29 BH_New, /* Disk mapping was newly created by get_block */
30 BH_Async_Read, /* Is under end_buffer_async_read I/O */
31 BH_Async_Write, /* Is under end_buffer_async_write I/O */
32 BH_Delay, /* Buffer is not yet allocated on disk */
33 BH_Boundary, /* Block is followed by a discontiguity */
34 BH_Write_EIO, /* I/O error on write */
35 BH_Unwritten, /* Buffer is allocated on disk but not written */
36 BH_Quiet, /* Buffer Error Prinks to be quiet */
37 BH_Meta, /* Buffer contains metadata */
38 BH_Prio, /* Buffer should be submitted with REQ_PRIO */
39 BH_Defer_Completion, /* Defer AIO completion to workqueue */
40
41 BH_PrivateStart,/* not a state bit, but the first bit available
42 * for private allocation by other entities
43 */
44 };
45
46 #define MAX_BUF_PER_PAGE (PAGE_SIZE / 512)
47
48 struct page;
49 struct buffer_head;
50 struct address_space;
51 typedef void (bh_end_io_t)(struct buffer_head *bh, int uptodate);
52
53 /*
54 * Historically, a buffer_head was used to map a single block
55 * within a page, and of course as the unit of I/O through the
56 * filesystem and block layers. Nowadays the basic I/O unit
57 * is the bio, and buffer_heads are used for extracting block
58 * mappings (via a get_block_t call), for tracking state within
59 * a page (via a page_mapping) and for wrapping bio submission
60 * for backward compatibility reasons (e.g. submit_bh).
61 */
62 struct buffer_head {
63 unsigned long b_state; /* buffer state bitmap (see above) */
64 struct buffer_head *b_this_page;/* circular list of page's buffers */
65 struct page *b_page; /* the page this bh is mapped to */
66
67 sector_t b_blocknr; /* start block number */
68 size_t b_size; /* size of mapping */
69 char *b_data; /* pointer to data within the page */
70
71 struct block_device *b_bdev;
72 bh_end_io_t *b_end_io; /* I/O completion */
73 void *b_private; /* reserved for b_end_io */
74 struct list_head b_assoc_buffers; /* associated with another mapping */
75 struct address_space *b_assoc_map; /* mapping this buffer is
76 associated with */
77 atomic_t b_count; /* users using this buffer_head */
78 };
79
80 /*
81 * macro tricks to expand the set_buffer_foo(), clear_buffer_foo()
82 * and buffer_foo() functions.
83 */
84 #define BUFFER_FNS(bit, name) \
85 static __always_inline void set_buffer_##name(struct buffer_head *bh) \
86 { \
87 set_bit(BH_##bit, &(bh)->b_state); \
88 } \
89 static __always_inline void clear_buffer_##name(struct buffer_head *bh) \
90 { \
91 clear_bit(BH_##bit, &(bh)->b_state); \
92 } \
93 static __always_inline int buffer_##name(const struct buffer_head *bh) \
94 { \
95 return test_bit(BH_##bit, &(bh)->b_state); \
96 }
97
98 /*
99 * test_set_buffer_foo() and test_clear_buffer_foo()
100 */
101 #define TAS_BUFFER_FNS(bit, name) \
102 static __always_inline int test_set_buffer_##name(struct buffer_head *bh) \
103 { \
104 return test_and_set_bit(BH_##bit, &(bh)->b_state); \
105 } \
106 static __always_inline int test_clear_buffer_##name(struct buffer_head *bh) \
107 { \
108 return test_and_clear_bit(BH_##bit, &(bh)->b_state); \
109 } \
110
111 /*
112 * Emit the buffer bitops functions. Note that there are also functions
113 * of the form "mark_buffer_foo()". These are higher-level functions which
114 * do something in addition to setting a b_state bit.
115 */
116 BUFFER_FNS(Uptodate, uptodate)
117 BUFFER_FNS(Dirty, dirty)
118 TAS_BUFFER_FNS(Dirty, dirty)
119 BUFFER_FNS(Lock, locked)
120 BUFFER_FNS(Req, req)
121 TAS_BUFFER_FNS(Req, req)
122 BUFFER_FNS(Mapped, mapped)
123 BUFFER_FNS(New, new)
124 BUFFER_FNS(Async_Read, async_read)
125 BUFFER_FNS(Async_Write, async_write)
126 BUFFER_FNS(Delay, delay)
127 BUFFER_FNS(Boundary, boundary)
128 BUFFER_FNS(Write_EIO, write_io_error)
129 BUFFER_FNS(Unwritten, unwritten)
130 BUFFER_FNS(Meta, meta)
131 BUFFER_FNS(Prio, prio)
132 BUFFER_FNS(Defer_Completion, defer_completion)
133
134 #define bh_offset(bh) ((unsigned long)(bh)->b_data & ~PAGE_MASK)
135
136 /* If we *know* page->private refers to buffer_heads */
137 #define page_buffers(page) \
138 ({ \
139 BUG_ON(!PagePrivate(page)); \
140 ((struct buffer_head *)page_private(page)); \
141 })
142 #define page_has_buffers(page) PagePrivate(page)
143
144 void buffer_check_dirty_writeback(struct page *page,
145 bool *dirty, bool *writeback);
146
147 /*
148 * Declarations
149 */
150
151 void mark_buffer_dirty(struct buffer_head *bh);
152 void init_buffer(struct buffer_head *, bh_end_io_t *, void *);
153 void touch_buffer(struct buffer_head *bh);
154 void set_bh_page(struct buffer_head *bh,
155 struct page *page, unsigned long offset);
156 int try_to_free_buffers(struct page *);
157 struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size,
158 int retry);
159 void create_empty_buffers(struct page *, unsigned long,
160 unsigned long b_state);
161 void end_buffer_read_sync(struct buffer_head *bh, int uptodate);
162 void end_buffer_write_sync(struct buffer_head *bh, int uptodate);
163 void end_buffer_async_write(struct buffer_head *bh, int uptodate);
164
165 /* Things to do with buffers at mapping->private_list */
166 void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode);
167 int inode_has_buffers(struct inode *);
168 void invalidate_inode_buffers(struct inode *);
169 int remove_inode_buffers(struct inode *inode);
170 int sync_mapping_buffers(struct address_space *mapping);
171 void clean_bdev_aliases(struct block_device *bdev, sector_t block,
172 sector_t len);
173 static inline void clean_bdev_bh_alias(struct buffer_head *bh)
174 {
175 clean_bdev_aliases(bh->b_bdev, bh->b_blocknr, 1);
176 }
177
178 void mark_buffer_async_write(struct buffer_head *bh);
179 void __wait_on_buffer(struct buffer_head *);
180 wait_queue_head_t *bh_waitq_head(struct buffer_head *bh);
181 struct buffer_head *__find_get_block(struct block_device *bdev, sector_t block,
182 unsigned size);
183 struct buffer_head *__getblk_gfp(struct block_device *bdev, sector_t block,
184 unsigned size, gfp_t gfp);
185 void __brelse(struct buffer_head *);
186 void __bforget(struct buffer_head *);
187 void __breadahead(struct block_device *, sector_t block, unsigned int size);
188 struct buffer_head *__bread_gfp(struct block_device *,
189 sector_t block, unsigned size, gfp_t gfp);
190 void invalidate_bh_lrus(void);
191 struct buffer_head *alloc_buffer_head(gfp_t gfp_flags);
192 void free_buffer_head(struct buffer_head * bh);
193 void unlock_buffer(struct buffer_head *bh);
194 void __lock_buffer(struct buffer_head *bh);
195 void ll_rw_block(int, int, int, struct buffer_head * bh[]);
196 int sync_dirty_buffer(struct buffer_head *bh);
197 int __sync_dirty_buffer(struct buffer_head *bh, int op_flags);
198 void write_dirty_buffer(struct buffer_head *bh, int op_flags);
199 int submit_bh(int, int, struct buffer_head *);
200 void write_boundary_block(struct block_device *bdev,
201 sector_t bblock, unsigned blocksize);
202 int bh_uptodate_or_lock(struct buffer_head *bh);
203 int bh_submit_read(struct buffer_head *bh);
204
205 extern int buffer_heads_over_limit;
206
207 /*
208 * Generic address_space_operations implementations for buffer_head-backed
209 * address_spaces.
210 */
211 void block_invalidatepage(struct page *page, unsigned int offset,
212 unsigned int length);
213 int block_write_full_page(struct page *page, get_block_t *get_block,
214 struct writeback_control *wbc);
215 int __block_write_full_page(struct inode *inode, struct page *page,
216 get_block_t *get_block, struct writeback_control *wbc,
217 bh_end_io_t *handler);
218 int block_read_full_page(struct page*, get_block_t*);
219 int block_is_partially_uptodate(struct page *page, unsigned long from,
220 unsigned long count);
221 int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len,
222 unsigned flags, struct page **pagep, get_block_t *get_block);
223 int __block_write_begin(struct page *page, loff_t pos, unsigned len,
224 get_block_t *get_block);
225 int block_write_end(struct file *, struct address_space *,
226 loff_t, unsigned, unsigned,
227 struct page *, void *);
228 int generic_write_end(struct file *, struct address_space *,
229 loff_t, unsigned, unsigned,
230 struct page *, void *);
231 void page_zero_new_buffers(struct page *page, unsigned from, unsigned to);
232 int cont_write_begin(struct file *, struct address_space *, loff_t,
233 unsigned, unsigned, struct page **, void **,
234 get_block_t *, loff_t *);
235 int generic_cont_expand_simple(struct inode *inode, loff_t size);
236 int block_commit_write(struct page *page, unsigned from, unsigned to);
237 int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
238 get_block_t get_block);
239 /* Convert errno to return value from ->page_mkwrite() call */
240 static inline int block_page_mkwrite_return(int err)
241 {
242 if (err == 0)
243 return VM_FAULT_LOCKED;
244 if (err == -EFAULT || err == -EAGAIN)
245 return VM_FAULT_NOPAGE;
246 if (err == -ENOMEM)
247 return VM_FAULT_OOM;
248 /* -ENOSPC, -EDQUOT, -EIO ... */
249 return VM_FAULT_SIGBUS;
250 }
251 sector_t generic_block_bmap(struct address_space *, sector_t, get_block_t *);
252 int block_truncate_page(struct address_space *, loff_t, get_block_t *);
253 int nobh_write_begin(struct address_space *, loff_t, unsigned, unsigned,
254 struct page **, void **, get_block_t*);
255 int nobh_write_end(struct file *, struct address_space *,
256 loff_t, unsigned, unsigned,
257 struct page *, void *);
258 int nobh_truncate_page(struct address_space *, loff_t, get_block_t *);
259 int nobh_writepage(struct page *page, get_block_t *get_block,
260 struct writeback_control *wbc);
261
262 void buffer_init(void);
263
264 /*
265 * inline definitions
266 */
267
268 static inline void attach_page_buffers(struct page *page,
269 struct buffer_head *head)
270 {
271 get_page(page);
272 SetPagePrivate(page);
273 set_page_private(page, (unsigned long)head);
274 }
275
276 static inline void get_bh(struct buffer_head *bh)
277 {
278 atomic_inc(&bh->b_count);
279 }
280
281 static inline void put_bh(struct buffer_head *bh)
282 {
283 smp_mb__before_atomic();
284 atomic_dec(&bh->b_count);
285 }
286
287 static inline void brelse(struct buffer_head *bh)
288 {
289 if (bh)
290 __brelse(bh);
291 }
292
293 static inline void bforget(struct buffer_head *bh)
294 {
295 if (bh)
296 __bforget(bh);
297 }
298
299 static inline struct buffer_head *
300 sb_bread(struct super_block *sb, sector_t block)
301 {
302 return __bread_gfp(sb->s_bdev, block, sb->s_blocksize, __GFP_MOVABLE);
303 }
304
305 static inline struct buffer_head *
306 sb_bread_unmovable(struct super_block *sb, sector_t block)
307 {
308 return __bread_gfp(sb->s_bdev, block, sb->s_blocksize, 0);
309 }
310
311 static inline void
312 sb_breadahead(struct super_block *sb, sector_t block)
313 {
314 __breadahead(sb->s_bdev, block, sb->s_blocksize);
315 }
316
317 static inline struct buffer_head *
318 sb_getblk(struct super_block *sb, sector_t block)
319 {
320 return __getblk_gfp(sb->s_bdev, block, sb->s_blocksize, __GFP_MOVABLE);
321 }
322
323
324 static inline struct buffer_head *
325 sb_getblk_gfp(struct super_block *sb, sector_t block, gfp_t gfp)
326 {
327 return __getblk_gfp(sb->s_bdev, block, sb->s_blocksize, gfp);
328 }
329
330 static inline struct buffer_head *
331 sb_find_get_block(struct super_block *sb, sector_t block)
332 {
333 return __find_get_block(sb->s_bdev, block, sb->s_blocksize);
334 }
335
336 static inline void
337 map_bh(struct buffer_head *bh, struct super_block *sb, sector_t block)
338 {
339 set_buffer_mapped(bh);
340 bh->b_bdev = sb->s_bdev;
341 bh->b_blocknr = block;
342 bh->b_size = sb->s_blocksize;
343 }
344
345 static inline void wait_on_buffer(struct buffer_head *bh)
346 {
347 might_sleep();
348 if (buffer_locked(bh))
349 __wait_on_buffer(bh);
350 }
351
352 static inline int trylock_buffer(struct buffer_head *bh)
353 {
354 return likely(!test_and_set_bit_lock(BH_Lock, &bh->b_state));
355 }
356
357 static inline void lock_buffer(struct buffer_head *bh)
358 {
359 might_sleep();
360 if (!trylock_buffer(bh))
361 __lock_buffer(bh);
362 }
363
364 static inline struct buffer_head *getblk_unmovable(struct block_device *bdev,
365 sector_t block,
366 unsigned size)
367 {
368 return __getblk_gfp(bdev, block, size, 0);
369 }
370
371 static inline struct buffer_head *__getblk(struct block_device *bdev,
372 sector_t block,
373 unsigned size)
374 {
375 return __getblk_gfp(bdev, block, size, __GFP_MOVABLE);
376 }
377
378 /**
379 * __bread() - reads a specified block and returns the bh
380 * @bdev: the block_device to read from
381 * @block: number of block
382 * @size: size (in bytes) to read
383 *
384 * Reads a specified block, and returns buffer head that contains it.
385 * The page cache is allocated from movable area so that it can be migrated.
386 * It returns NULL if the block was unreadable.
387 */
388 static inline struct buffer_head *
389 __bread(struct block_device *bdev, sector_t block, unsigned size)
390 {
391 return __bread_gfp(bdev, block, size, __GFP_MOVABLE);
392 }
393
394 extern int __set_page_dirty_buffers(struct page *page);
395
396 #else /* CONFIG_BLOCK */
397
398 static inline void buffer_init(void) {}
399 static inline int try_to_free_buffers(struct page *page) { return 1; }
400 static inline int inode_has_buffers(struct inode *inode) { return 0; }
401 static inline void invalidate_inode_buffers(struct inode *inode) {}
402 static inline int remove_inode_buffers(struct inode *inode) { return 1; }
403 static inline int sync_mapping_buffers(struct address_space *mapping) { return 0; }
404
405 #endif /* CONFIG_BLOCK */
406 #endif /* _LINUX_BUFFER_HEAD_H */