1 /* SPDX-License-Identifier: GPL-2.0 */
3 * include/linux/buffer_head.h
5 * Everything to do with buffer_heads.
8 #ifndef _LINUX_BUFFER_HEAD_H
9 #define _LINUX_BUFFER_HEAD_H
11 #include <linux/types.h>
13 #include <linux/linkage.h>
14 #include <linux/pagemap.h>
15 #include <linux/wait.h>
16 #include <linux/atomic.h>
21 BH_Uptodate
, /* Contains valid data */
22 BH_Dirty
, /* Is dirty */
23 BH_Lock
, /* Is locked */
24 BH_Req
, /* Has been submitted for I/O */
25 BH_Uptodate_Lock
,/* Used by the first bh in a page, to serialise
26 * IO completion of other buffers in the page
29 BH_Mapped
, /* Has a disk mapping */
30 BH_New
, /* Disk mapping was newly created by get_block */
31 BH_Async_Read
, /* Is under end_buffer_async_read I/O */
32 BH_Async_Write
, /* Is under end_buffer_async_write I/O */
33 BH_Delay
, /* Buffer is not yet allocated on disk */
34 BH_Boundary
, /* Block is followed by a discontiguity */
35 BH_Write_EIO
, /* I/O error on write */
36 BH_Unwritten
, /* Buffer is allocated on disk but not written */
37 BH_Quiet
, /* Buffer Error Prinks to be quiet */
38 BH_Meta
, /* Buffer contains metadata */
39 BH_Prio
, /* Buffer should be submitted with REQ_PRIO */
40 BH_Defer_Completion
, /* Defer AIO completion to workqueue */
42 BH_PrivateStart
,/* not a state bit, but the first bit available
43 * for private allocation by other entities
47 #define MAX_BUF_PER_PAGE (PAGE_SIZE / 512)
52 typedef void (bh_end_io_t
)(struct buffer_head
*bh
, int uptodate
);
55 * Historically, a buffer_head was used to map a single block
56 * within a page, and of course as the unit of I/O through the
57 * filesystem and block layers. Nowadays the basic I/O unit
58 * is the bio, and buffer_heads are used for extracting block
59 * mappings (via a get_block_t call), for tracking state within
60 * a page (via a page_mapping) and for wrapping bio submission
61 * for backward compatibility reasons (e.g. submit_bh).
64 unsigned long b_state
; /* buffer state bitmap (see above) */
65 struct buffer_head
*b_this_page
;/* circular list of page's buffers */
66 struct page
*b_page
; /* the page this bh is mapped to */
68 sector_t b_blocknr
; /* start block number */
69 size_t b_size
; /* size of mapping */
70 char *b_data
; /* pointer to data within the page */
72 struct block_device
*b_bdev
;
73 bh_end_io_t
*b_end_io
; /* I/O completion */
74 void *b_private
; /* reserved for b_end_io */
75 struct list_head b_assoc_buffers
; /* associated with another mapping */
76 struct address_space
*b_assoc_map
; /* mapping this buffer is
78 atomic_t b_count
; /* users using this buffer_head */
82 * macro tricks to expand the set_buffer_foo(), clear_buffer_foo()
83 * and buffer_foo() functions.
85 #define BUFFER_FNS(bit, name) \
86 static __always_inline void set_buffer_##name(struct buffer_head *bh) \
88 set_bit(BH_##bit, &(bh)->b_state); \
90 static __always_inline void clear_buffer_##name(struct buffer_head *bh) \
92 clear_bit(BH_##bit, &(bh)->b_state); \
94 static __always_inline int buffer_##name(const struct buffer_head *bh) \
96 return test_bit(BH_##bit, &(bh)->b_state); \
100 * test_set_buffer_foo() and test_clear_buffer_foo()
102 #define TAS_BUFFER_FNS(bit, name) \
103 static __always_inline int test_set_buffer_##name(struct buffer_head *bh) \
105 return test_and_set_bit(BH_##bit, &(bh)->b_state); \
107 static __always_inline int test_clear_buffer_##name(struct buffer_head *bh) \
109 return test_and_clear_bit(BH_##bit, &(bh)->b_state); \
113 * Emit the buffer bitops functions. Note that there are also functions
114 * of the form "mark_buffer_foo()". These are higher-level functions which
115 * do something in addition to setting a b_state bit.
117 BUFFER_FNS(Uptodate
, uptodate
)
118 BUFFER_FNS(Dirty
, dirty
)
119 TAS_BUFFER_FNS(Dirty
, dirty
)
120 BUFFER_FNS(Lock
, locked
)
122 TAS_BUFFER_FNS(Req
, req
)
123 BUFFER_FNS(Mapped
, mapped
)
125 BUFFER_FNS(Async_Read
, async_read
)
126 BUFFER_FNS(Async_Write
, async_write
)
127 BUFFER_FNS(Delay
, delay
)
128 BUFFER_FNS(Boundary
, boundary
)
129 BUFFER_FNS(Write_EIO
, write_io_error
)
130 BUFFER_FNS(Unwritten
, unwritten
)
131 BUFFER_FNS(Meta
, meta
)
132 BUFFER_FNS(Prio
, prio
)
133 BUFFER_FNS(Defer_Completion
, defer_completion
)
135 #define bh_offset(bh) ((unsigned long)(bh)->b_data & ~PAGE_MASK)
137 /* If we *know* page->private refers to buffer_heads */
138 #define page_buffers(page) \
140 BUG_ON(!PagePrivate(page)); \
141 ((struct buffer_head *)page_private(page)); \
143 #define page_has_buffers(page) PagePrivate(page)
145 void buffer_check_dirty_writeback(struct page
*page
,
146 bool *dirty
, bool *writeback
);
152 void mark_buffer_dirty(struct buffer_head
*bh
);
153 void mark_buffer_write_io_error(struct buffer_head
*bh
);
154 void init_buffer(struct buffer_head
*, bh_end_io_t
*, void *);
155 void touch_buffer(struct buffer_head
*bh
);
156 void set_bh_page(struct buffer_head
*bh
,
157 struct page
*page
, unsigned long offset
);
158 int try_to_free_buffers(struct page
*);
159 struct buffer_head
*alloc_page_buffers(struct page
*page
, unsigned long size
,
161 void create_empty_buffers(struct page
*, unsigned long,
162 unsigned long b_state
);
163 void end_buffer_read_sync(struct buffer_head
*bh
, int uptodate
);
164 void end_buffer_write_sync(struct buffer_head
*bh
, int uptodate
);
165 void end_buffer_async_write(struct buffer_head
*bh
, int uptodate
);
167 /* Things to do with buffers at mapping->private_list */
168 void mark_buffer_dirty_inode(struct buffer_head
*bh
, struct inode
*inode
);
169 int inode_has_buffers(struct inode
*);
170 void invalidate_inode_buffers(struct inode
*);
171 int remove_inode_buffers(struct inode
*inode
);
172 int sync_mapping_buffers(struct address_space
*mapping
);
173 void clean_bdev_aliases(struct block_device
*bdev
, sector_t block
,
175 static inline void clean_bdev_bh_alias(struct buffer_head
*bh
)
177 clean_bdev_aliases(bh
->b_bdev
, bh
->b_blocknr
, 1);
180 void mark_buffer_async_write(struct buffer_head
*bh
);
181 void __wait_on_buffer(struct buffer_head
*);
182 wait_queue_head_t
*bh_waitq_head(struct buffer_head
*bh
);
183 struct buffer_head
*__find_get_block(struct block_device
*bdev
, sector_t block
,
185 struct buffer_head
*__getblk_gfp(struct block_device
*bdev
, sector_t block
,
186 unsigned size
, gfp_t gfp
);
187 void __brelse(struct buffer_head
*);
188 void __bforget(struct buffer_head
*);
189 void __breadahead(struct block_device
*, sector_t block
, unsigned int size
);
190 struct buffer_head
*__bread_gfp(struct block_device
*,
191 sector_t block
, unsigned size
, gfp_t gfp
);
192 void invalidate_bh_lrus(void);
193 struct buffer_head
*alloc_buffer_head(gfp_t gfp_flags
);
194 void free_buffer_head(struct buffer_head
* bh
);
195 void unlock_buffer(struct buffer_head
*bh
);
196 void __lock_buffer(struct buffer_head
*bh
);
197 void ll_rw_block(int, int, int, struct buffer_head
* bh
[]);
198 int sync_dirty_buffer(struct buffer_head
*bh
);
199 int __sync_dirty_buffer(struct buffer_head
*bh
, int op_flags
);
200 void write_dirty_buffer(struct buffer_head
*bh
, int op_flags
);
201 int submit_bh(int, int, struct buffer_head
*);
202 void write_boundary_block(struct block_device
*bdev
,
203 sector_t bblock
, unsigned blocksize
);
204 int bh_uptodate_or_lock(struct buffer_head
*bh
);
205 int bh_submit_read(struct buffer_head
*bh
);
206 loff_t
page_cache_seek_hole_data(struct inode
*inode
, loff_t offset
,
207 loff_t length
, int whence
);
209 extern int buffer_heads_over_limit
;
212 * Generic address_space_operations implementations for buffer_head-backed
215 void block_invalidatepage(struct page
*page
, unsigned int offset
,
216 unsigned int length
);
217 int block_write_full_page(struct page
*page
, get_block_t
*get_block
,
218 struct writeback_control
*wbc
);
219 int __block_write_full_page(struct inode
*inode
, struct page
*page
,
220 get_block_t
*get_block
, struct writeback_control
*wbc
,
221 bh_end_io_t
*handler
);
222 int block_read_full_page(struct page
*, get_block_t
*);
223 int block_is_partially_uptodate(struct page
*page
, unsigned long from
,
224 unsigned long count
);
225 int block_write_begin(struct address_space
*mapping
, loff_t pos
, unsigned len
,
226 unsigned flags
, struct page
**pagep
, get_block_t
*get_block
);
227 int __block_write_begin(struct page
*page
, loff_t pos
, unsigned len
,
228 get_block_t
*get_block
);
229 int block_write_end(struct file
*, struct address_space
*,
230 loff_t
, unsigned, unsigned,
231 struct page
*, void *);
232 int generic_write_end(struct file
*, struct address_space
*,
233 loff_t
, unsigned, unsigned,
234 struct page
*, void *);
235 void page_zero_new_buffers(struct page
*page
, unsigned from
, unsigned to
);
236 void clean_page_buffers(struct page
*page
);
237 int cont_write_begin(struct file
*, struct address_space
*, loff_t
,
238 unsigned, unsigned, struct page
**, void **,
239 get_block_t
*, loff_t
*);
240 int generic_cont_expand_simple(struct inode
*inode
, loff_t size
);
241 int block_commit_write(struct page
*page
, unsigned from
, unsigned to
);
242 int block_page_mkwrite(struct vm_area_struct
*vma
, struct vm_fault
*vmf
,
243 get_block_t get_block
);
244 /* Convert errno to return value from ->page_mkwrite() call */
245 static inline int block_page_mkwrite_return(int err
)
248 return VM_FAULT_LOCKED
;
249 if (err
== -EFAULT
|| err
== -EAGAIN
)
250 return VM_FAULT_NOPAGE
;
253 /* -ENOSPC, -EDQUOT, -EIO ... */
254 return VM_FAULT_SIGBUS
;
256 sector_t
generic_block_bmap(struct address_space
*, sector_t
, get_block_t
*);
257 int block_truncate_page(struct address_space
*, loff_t
, get_block_t
*);
258 int nobh_write_begin(struct address_space
*, loff_t
, unsigned, unsigned,
259 struct page
**, void **, get_block_t
*);
260 int nobh_write_end(struct file
*, struct address_space
*,
261 loff_t
, unsigned, unsigned,
262 struct page
*, void *);
263 int nobh_truncate_page(struct address_space
*, loff_t
, get_block_t
*);
264 int nobh_writepage(struct page
*page
, get_block_t
*get_block
,
265 struct writeback_control
*wbc
);
267 void buffer_init(void);
273 static inline void attach_page_buffers(struct page
*page
,
274 struct buffer_head
*head
)
277 SetPagePrivate(page
);
278 set_page_private(page
, (unsigned long)head
);
281 static inline void get_bh(struct buffer_head
*bh
)
283 atomic_inc(&bh
->b_count
);
286 static inline void put_bh(struct buffer_head
*bh
)
288 smp_mb__before_atomic();
289 atomic_dec(&bh
->b_count
);
292 static inline void brelse(struct buffer_head
*bh
)
298 static inline void bforget(struct buffer_head
*bh
)
304 static inline struct buffer_head
*
305 sb_bread(struct super_block
*sb
, sector_t block
)
307 return __bread_gfp(sb
->s_bdev
, block
, sb
->s_blocksize
, __GFP_MOVABLE
);
310 static inline struct buffer_head
*
311 sb_bread_unmovable(struct super_block
*sb
, sector_t block
)
313 return __bread_gfp(sb
->s_bdev
, block
, sb
->s_blocksize
, 0);
317 sb_breadahead(struct super_block
*sb
, sector_t block
)
319 __breadahead(sb
->s_bdev
, block
, sb
->s_blocksize
);
322 static inline struct buffer_head
*
323 sb_getblk(struct super_block
*sb
, sector_t block
)
325 return __getblk_gfp(sb
->s_bdev
, block
, sb
->s_blocksize
, __GFP_MOVABLE
);
329 static inline struct buffer_head
*
330 sb_getblk_gfp(struct super_block
*sb
, sector_t block
, gfp_t gfp
)
332 return __getblk_gfp(sb
->s_bdev
, block
, sb
->s_blocksize
, gfp
);
335 static inline struct buffer_head
*
336 sb_find_get_block(struct super_block
*sb
, sector_t block
)
338 return __find_get_block(sb
->s_bdev
, block
, sb
->s_blocksize
);
342 map_bh(struct buffer_head
*bh
, struct super_block
*sb
, sector_t block
)
344 set_buffer_mapped(bh
);
345 bh
->b_bdev
= sb
->s_bdev
;
346 bh
->b_blocknr
= block
;
347 bh
->b_size
= sb
->s_blocksize
;
350 static inline void wait_on_buffer(struct buffer_head
*bh
)
353 if (buffer_locked(bh
))
354 __wait_on_buffer(bh
);
357 static inline int trylock_buffer(struct buffer_head
*bh
)
359 return likely(!test_and_set_bit_lock(BH_Lock
, &bh
->b_state
));
362 static inline void lock_buffer(struct buffer_head
*bh
)
365 if (!trylock_buffer(bh
))
369 static inline struct buffer_head
*getblk_unmovable(struct block_device
*bdev
,
373 return __getblk_gfp(bdev
, block
, size
, 0);
376 static inline struct buffer_head
*__getblk(struct block_device
*bdev
,
380 return __getblk_gfp(bdev
, block
, size
, __GFP_MOVABLE
);
384 * __bread() - reads a specified block and returns the bh
385 * @bdev: the block_device to read from
386 * @block: number of block
387 * @size: size (in bytes) to read
389 * Reads a specified block, and returns buffer head that contains it.
390 * The page cache is allocated from movable area so that it can be migrated.
391 * It returns NULL if the block was unreadable.
393 static inline struct buffer_head
*
394 __bread(struct block_device
*bdev
, sector_t block
, unsigned size
)
396 return __bread_gfp(bdev
, block
, size
, __GFP_MOVABLE
);
399 extern int __set_page_dirty_buffers(struct page
*page
);
401 #else /* CONFIG_BLOCK */
403 static inline void buffer_init(void) {}
404 static inline int try_to_free_buffers(struct page
*page
) { return 1; }
405 static inline int inode_has_buffers(struct inode
*inode
) { return 0; }
406 static inline void invalidate_inode_buffers(struct inode
*inode
) {}
407 static inline int remove_inode_buffers(struct inode
*inode
) { return 1; }
408 static inline int sync_mapping_buffers(struct address_space
*mapping
) { return 0; }
410 #endif /* CONFIG_BLOCK */
411 #endif /* _LINUX_BUFFER_HEAD_H */