]>
Commit | Line | Data |
---|---|---|
0b61f8a4 | 1 | // SPDX-License-Identifier: GPL-2.0 |
1da177e4 | 2 | /* |
7b718769 NS |
3 | * Copyright (c) 2000-2005 Silicon Graphics, Inc. |
4 | * All Rights Reserved. | |
1da177e4 | 5 | */ |
1da177e4 LT |
6 | #ifndef __XFS_BUF_H__ |
7 | #define __XFS_BUF_H__ | |
8 | ||
1da177e4 LT |
9 | #include <linux/list.h> |
10 | #include <linux/types.h> | |
11 | #include <linux/spinlock.h> | |
1da177e4 LT |
12 | #include <linux/mm.h> |
13 | #include <linux/fs.h> | |
c94c2acf | 14 | #include <linux/dax.h> |
1da177e4 | 15 | #include <linux/uio.h> |
e80dfa19 | 16 | #include <linux/list_lru.h> |
1da177e4 LT |
17 | |
18 | /* | |
19 | * Base types | |
20 | */ | |
b01d1461 | 21 | struct xfs_buf; |
1da177e4 | 22 | |
ce8e922c NS |
23 | #define XFS_BUF_DADDR_NULL ((xfs_daddr_t) (-1LL)) |
24 | ||
6fb8a90a CM |
25 | #define XBF_READ (1 << 0) /* buffer intended for reading from device */ |
26 | #define XBF_WRITE (1 << 1) /* buffer intended for writing to device */ | |
27 | #define XBF_READ_AHEAD (1 << 2) /* asynchronous read-ahead */ | |
c891c30a | 28 | #define XBF_NO_IOACCT (1 << 3) /* bypass I/O accounting (non-LRU bufs) */ |
6fb8a90a CM |
29 | #define XBF_ASYNC (1 << 4) /* initiator will not wait for completion */ |
30 | #define XBF_DONE (1 << 5) /* all pages in the buffer uptodate */ | |
31 | #define XBF_STALE (1 << 6) /* buffer has been staled, do not find it */ | |
ce89755c | 32 | #define XBF_WRITE_FAIL (1 << 7) /* async writes have failed on this buffer */ |
1d5ae5df | 33 | |
f593bf14 DC |
34 | /* buffer type flags for write callbacks */ |
35 | #define _XBF_INODES (1 << 16)/* inode buffer */ | |
0c7e5afb | 36 | #define _XBF_DQUOTS (1 << 17)/* dquot buffer */ |
9fe5c77c | 37 | #define _XBF_LOGRECOVERY (1 << 18)/* log recovery buffer */ |
1da177e4 | 38 | |
807cbbdb | 39 | /* flags used only internally */ |
6fb8a90a CM |
40 | #define _XBF_PAGES (1 << 20)/* backed by refcounted pages */ |
41 | #define _XBF_KMEM (1 << 21)/* backed by heap memory */ | |
42 | #define _XBF_DELWRI_Q (1 << 22)/* buffer on a delwri queue */ | |
6ab455ee | 43 | |
f593bf14 DC |
44 | /* flags used only as arguments to access routines */ |
45 | #define XBF_TRYLOCK (1 << 30)/* lock requested, but do not wait */ | |
46 | #define XBF_UNMAPPED (1 << 31)/* do not map the buffer */ | |
47 | ||
807cbbdb | 48 | typedef unsigned int xfs_buf_flags_t; |
1da177e4 | 49 | |
0b1b213f CH |
50 | #define XFS_BUF_FLAGS \ |
51 | { XBF_READ, "READ" }, \ | |
52 | { XBF_WRITE, "WRITE" }, \ | |
1d5ae5df | 53 | { XBF_READ_AHEAD, "READ_AHEAD" }, \ |
1247ec4c | 54 | { XBF_NO_IOACCT, "NO_IOACCT" }, \ |
0b1b213f CH |
55 | { XBF_ASYNC, "ASYNC" }, \ |
56 | { XBF_DONE, "DONE" }, \ | |
0b1b213f | 57 | { XBF_STALE, "STALE" }, \ |
ac8809f9 | 58 | { XBF_WRITE_FAIL, "WRITE_FAIL" }, \ |
f593bf14 | 59 | { _XBF_INODES, "INODES" }, \ |
0c7e5afb | 60 | { _XBF_DQUOTS, "DQUOTS" }, \ |
9fe5c77c | 61 | { _XBF_LOGRECOVERY, "LOG_RECOVERY" }, \ |
0b1b213f | 62 | { _XBF_PAGES, "PAGES" }, \ |
0e6e847f | 63 | { _XBF_KMEM, "KMEM" }, \ |
f593bf14 DC |
64 | { _XBF_DELWRI_Q, "DELWRI_Q" }, \ |
65 | /* The following interface flags should never be set */ \ | |
66 | { XBF_TRYLOCK, "TRYLOCK" }, \ | |
67 | { XBF_UNMAPPED, "UNMAPPED" } | |
ac8809f9 | 68 | |
a4082357 DC |
69 | /* |
70 | * Internal state flags. | |
71 | */ | |
72 | #define XFS_BSTATE_DISPOSE (1 << 0) /* buffer being discarded */ | |
63db7c81 | 73 | #define XFS_BSTATE_IN_FLIGHT (1 << 1) /* I/O in flight */ |
0b1b213f | 74 | |
7c71ee78 ES |
75 | /* |
76 | * The xfs_buftarg contains 2 notions of "sector size" - | |
77 | * | |
78 | * 1) The metadata sector size, which is the minimum unit and | |
79 | * alignment of IO which will be performed by metadata operations. | |
80 | * 2) The device logical sector size | |
81 | * | |
82 | * The first is specified at mkfs time, and is stored on-disk in the | |
83 | * superblock's sb_sectsize. | |
84 | * | |
85 | * The latter is derived from the underlying device, and controls direct IO | |
86 | * alignment constraints. | |
87 | */ | |
1da177e4 | 88 | typedef struct xfs_buftarg { |
ce8e922c NS |
89 | dev_t bt_dev; |
90 | struct block_device *bt_bdev; | |
486aff5e | 91 | struct dax_device *bt_daxdev; |
ebad861b | 92 | struct xfs_mount *bt_mount; |
6da54179 ES |
93 | unsigned int bt_meta_sectorsize; |
94 | size_t bt_meta_sectormask; | |
7c71ee78 ES |
95 | size_t bt_logical_sectorsize; |
96 | size_t bt_logical_sectormask; | |
ce8e922c | 97 | |
ff57ab21 DC |
98 | /* LRU control structures */ |
99 | struct shrinker bt_shrinker; | |
e80dfa19 | 100 | struct list_lru bt_lru; |
9c7504aa BF |
101 | |
102 | struct percpu_counter bt_io_count; | |
f9bccfcc | 103 | struct ratelimit_state bt_ioerror_rl; |
1da177e4 LT |
104 | } xfs_buftarg_t; |
105 | ||
ce8e922c | 106 | #define XB_PAGES 2 |
1da177e4 | 107 | |
cbb7baab DC |
108 | struct xfs_buf_map { |
109 | xfs_daddr_t bm_bn; /* block number for I/O */ | |
110 | int bm_len; /* size of I/O */ | |
111 | }; | |
112 | ||
3e85c868 DC |
113 | #define DEFINE_SINGLE_BUF_MAP(map, blkno, numblk) \ |
114 | struct xfs_buf_map (map) = { .bm_bn = (blkno), .bm_len = (numblk) }; | |
115 | ||
1813dd64 | 116 | struct xfs_buf_ops { |
233135b7 | 117 | char *name; |
15baadf7 DW |
118 | union { |
119 | __be32 magic[2]; /* v4 and v5 on disk magic values */ | |
120 | __be16 magic16[2]; /* v4 and v5 on disk magic values */ | |
121 | }; | |
1813dd64 DC |
122 | void (*verify_read)(struct xfs_buf *); |
123 | void (*verify_write)(struct xfs_buf *); | |
b5572597 | 124 | xfs_failaddr_t (*verify_struct)(struct xfs_buf *bp); |
1813dd64 DC |
125 | }; |
126 | ||
1da177e4 | 127 | typedef struct xfs_buf { |
50f59e8e DC |
128 | /* |
129 | * first cacheline holds all the fields needed for an uncontended cache | |
130 | * hit to be fully processed. The semaphore straddles the cacheline | |
131 | * boundary, but the counter and lock sits on the first cacheline, | |
132 | * which is the only bit that is touched if we hit the semaphore | |
133 | * fast-path on locking. | |
134 | */ | |
6031e73a | 135 | struct rhash_head b_rhash_head; /* pag buffer hash node */ |
cbb7baab | 136 | xfs_daddr_t b_bn; /* block number of buffer */ |
4e94b71b | 137 | int b_length; /* size of buffer in BBs */ |
50f59e8e | 138 | atomic_t b_hold; /* reference count */ |
430cbeb8 | 139 | atomic_t b_lru_ref; /* lru reclaim ref count */ |
50f59e8e | 140 | xfs_buf_flags_t b_flags; /* status flags */ |
ce8e922c | 141 | struct semaphore b_sema; /* semaphore for lockables */ |
50f59e8e | 142 | |
6fb8a90a CM |
143 | /* |
144 | * concurrent access to b_lru and b_lru_flags are protected by | |
145 | * bt_lru_lock and not by b_sema | |
146 | */ | |
430cbeb8 | 147 | struct list_head b_lru; /* lru list */ |
a4082357 DC |
148 | spinlock_t b_lock; /* internal state lock */ |
149 | unsigned int b_state; /* internal state flags */ | |
61be9c52 | 150 | int b_io_error; /* internal IO error state */ |
ce8e922c NS |
151 | wait_queue_head_t b_waiters; /* unpin waiters */ |
152 | struct list_head b_list; | |
74f75a0c | 153 | struct xfs_perag *b_pag; /* contains rbtree root */ |
dbd329f1 | 154 | struct xfs_mount *b_mount; |
ce8e922c | 155 | xfs_buftarg_t *b_target; /* buffer target (device) */ |
ce8e922c | 156 | void *b_addr; /* virtual address of buffer */ |
b29c70f5 | 157 | struct work_struct b_ioend_work; |
b4dd330b | 158 | struct completion b_iowait; /* queue for I/O waiters */ |
e99b4bd0 | 159 | struct xfs_buf_log_item *b_log_item; |
643c8c05 | 160 | struct list_head b_li_list; /* Log items list head */ |
bf9d9013 | 161 | struct xfs_trans *b_transp; |
ce8e922c NS |
162 | struct page **b_pages; /* array of page pointers */ |
163 | struct page *b_page_array[XB_PAGES]; /* inline pages */ | |
3e85c868 | 164 | struct xfs_buf_map *b_maps; /* compound buffer map */ |
d44d9bc6 | 165 | struct xfs_buf_map __b_map; /* inline compound buffer map */ |
3e85c868 | 166 | int b_map_count; |
50f59e8e DC |
167 | atomic_t b_pin_count; /* pin count */ |
168 | atomic_t b_io_remaining; /* #outstanding I/O requests */ | |
169 | unsigned int b_page_count; /* size of page array */ | |
170 | unsigned int b_offset; /* page offset in first page */ | |
2451337d | 171 | int b_error; /* error code on I/O */ |
a5ea70d2 CM |
172 | |
173 | /* | |
174 | * async write failure retry count. Initialised to zero on the first | |
175 | * failure, then when it exceeds the maximum configured without a | |
176 | * success the write is considered to be failed permanently and the | |
177 | * iodone handler will take appropriate action. | |
178 | * | |
179 | * For retry timeouts, we record the jiffie of the first failure. This | |
180 | * means that we can change the retry timeout for buffers already under | |
181 | * I/O and thus avoid getting stuck in a retry loop with a long timeout. | |
182 | * | |
183 | * last_error is used to ensure that we are getting repeated errors, not | |
184 | * different errors. e.g. a block device might change ENOSPC to EIO when | |
185 | * a failure timeout occurs, so we want to re-initialise the error | |
186 | * retry behaviour appropriately when that happens. | |
187 | */ | |
188 | int b_retries; | |
189 | unsigned long b_first_retry_time; /* in jiffies */ | |
190 | int b_last_error; | |
191 | ||
1813dd64 | 192 | const struct xfs_buf_ops *b_ops; |
1da177e4 LT |
193 | } xfs_buf_t; |
194 | ||
1da177e4 | 195 | /* Finding and Reading Buffers */ |
8925a3dc DC |
196 | struct xfs_buf *xfs_buf_incore(struct xfs_buftarg *target, |
197 | xfs_daddr_t blkno, size_t numblks, | |
198 | xfs_buf_flags_t flags); | |
3e85c868 | 199 | |
3848b5f6 DW |
200 | int xfs_buf_get_map(struct xfs_buftarg *target, struct xfs_buf_map *map, |
201 | int nmaps, xfs_buf_flags_t flags, struct xfs_buf **bpp); | |
4ed8e27b DW |
202 | int xfs_buf_read_map(struct xfs_buftarg *target, struct xfs_buf_map *map, |
203 | int nmaps, xfs_buf_flags_t flags, struct xfs_buf **bpp, | |
cdbcf82b | 204 | const struct xfs_buf_ops *ops, xfs_failaddr_t fa); |
6dde2707 | 205 | void xfs_buf_readahead_map(struct xfs_buftarg *target, |
c3f8fc73 | 206 | struct xfs_buf_map *map, int nmaps, |
1813dd64 | 207 | const struct xfs_buf_ops *ops); |
6dde2707 | 208 | |
841263e9 | 209 | static inline int |
6dde2707 DC |
210 | xfs_buf_get( |
211 | struct xfs_buftarg *target, | |
212 | xfs_daddr_t blkno, | |
841263e9 DW |
213 | size_t numblks, |
214 | struct xfs_buf **bpp) | |
6dde2707 DC |
215 | { |
216 | DEFINE_SINGLE_BUF_MAP(map, blkno, numblks); | |
3848b5f6 | 217 | |
841263e9 | 218 | return xfs_buf_get_map(target, &map, 1, 0, bpp); |
6dde2707 DC |
219 | } |
220 | ||
0e3eccce | 221 | static inline int |
6dde2707 DC |
222 | xfs_buf_read( |
223 | struct xfs_buftarg *target, | |
224 | xfs_daddr_t blkno, | |
225 | size_t numblks, | |
c3f8fc73 | 226 | xfs_buf_flags_t flags, |
0e3eccce | 227 | struct xfs_buf **bpp, |
1813dd64 | 228 | const struct xfs_buf_ops *ops) |
6dde2707 DC |
229 | { |
230 | DEFINE_SINGLE_BUF_MAP(map, blkno, numblks); | |
4ed8e27b | 231 | |
cdbcf82b DW |
232 | return xfs_buf_read_map(target, &map, 1, flags, bpp, ops, |
233 | __builtin_return_address(0)); | |
6dde2707 DC |
234 | } |
235 | ||
236 | static inline void | |
237 | xfs_buf_readahead( | |
238 | struct xfs_buftarg *target, | |
239 | xfs_daddr_t blkno, | |
c3f8fc73 | 240 | size_t numblks, |
1813dd64 | 241 | const struct xfs_buf_ops *ops) |
6dde2707 DC |
242 | { |
243 | DEFINE_SINGLE_BUF_MAP(map, blkno, numblks); | |
1813dd64 | 244 | return xfs_buf_readahead_map(target, &map, 1, ops); |
6dde2707 | 245 | } |
e70b73f8 | 246 | |
2842b6db DW |
247 | int xfs_buf_get_uncached(struct xfs_buftarg *target, size_t numblks, int flags, |
248 | struct xfs_buf **bpp); | |
ba372674 DC |
249 | int xfs_buf_read_uncached(struct xfs_buftarg *target, xfs_daddr_t daddr, |
250 | size_t numblks, int flags, struct xfs_buf **bpp, | |
251 | const struct xfs_buf_ops *ops); | |
26e32875 | 252 | int _xfs_buf_read(struct xfs_buf *bp, xfs_buf_flags_t flags); |
e70b73f8 | 253 | void xfs_buf_hold(struct xfs_buf *bp); |
1da177e4 LT |
254 | |
255 | /* Releasing Buffers */ | |
ce8e922c | 256 | extern void xfs_buf_rele(xfs_buf_t *); |
1da177e4 LT |
257 | |
258 | /* Locking and Unlocking Buffers */ | |
0c842ad4 | 259 | extern int xfs_buf_trylock(xfs_buf_t *); |
ce8e922c NS |
260 | extern void xfs_buf_lock(xfs_buf_t *); |
261 | extern void xfs_buf_unlock(xfs_buf_t *); | |
0c842ad4 CH |
262 | #define xfs_buf_islocked(bp) \ |
263 | ((bp)->b_sema.count <= 0) | |
1da177e4 | 264 | |
f593bf14 DC |
265 | static inline void xfs_buf_relse(xfs_buf_t *bp) |
266 | { | |
267 | xfs_buf_unlock(bp); | |
268 | xfs_buf_rele(bp); | |
269 | } | |
270 | ||
1da177e4 | 271 | /* Buffer Read and Write Routines */ |
c2b006c1 | 272 | extern int xfs_bwrite(struct xfs_buf *bp); |
f593bf14 | 273 | |
31ca03c9 DW |
274 | extern void __xfs_buf_ioerror(struct xfs_buf *bp, int error, |
275 | xfs_failaddr_t failaddr); | |
276 | #define xfs_buf_ioerror(bp, err) __xfs_buf_ioerror((bp), (err), __this_address) | |
cdbcf82b | 277 | extern void xfs_buf_ioerror_alert(struct xfs_buf *bp, xfs_failaddr_t fa); |
54b3b1f6 | 278 | void xfs_buf_ioend_fail(struct xfs_buf *); |
f9a196ee | 279 | void xfs_buf_zero(struct xfs_buf *bp, size_t boff, size_t bsize); |
8d57c216 DW |
280 | void __xfs_buf_mark_corrupt(struct xfs_buf *bp, xfs_failaddr_t fa); |
281 | #define xfs_buf_mark_corrupt(bp) __xfs_buf_mark_corrupt((bp), __this_address) | |
ce8e922c | 282 | |
1da177e4 | 283 | /* Buffer Utility Routines */ |
88ee2df7 | 284 | extern void *xfs_buf_offset(struct xfs_buf *, size_t); |
5cfd28b6 | 285 | extern void xfs_buf_stale(struct xfs_buf *bp); |
1da177e4 | 286 | |
1da177e4 | 287 | /* Delayed Write Buffer Routines */ |
20e8a063 | 288 | extern void xfs_buf_delwri_cancel(struct list_head *); |
43ff2122 CH |
289 | extern bool xfs_buf_delwri_queue(struct xfs_buf *, struct list_head *); |
290 | extern int xfs_buf_delwri_submit(struct list_head *); | |
291 | extern int xfs_buf_delwri_submit_nowait(struct list_head *); | |
7912e7fe | 292 | extern int xfs_buf_delwri_pushbuf(struct xfs_buf *, struct list_head *); |
1da177e4 LT |
293 | |
294 | /* Buffer Daemon Setup Routines */ | |
ce8e922c NS |
295 | extern int xfs_buf_init(void); |
296 | extern void xfs_buf_terminate(void); | |
1da177e4 | 297 | |
cbb7baab DC |
298 | /* |
299 | * These macros use the IO block map rather than b_bn. b_bn is now really | |
300 | * just for the buffer cache index for cached buffers. As IO does not use b_bn | |
301 | * anymore, uncached buffers do not use b_bn at all and hence must modify the IO | |
302 | * map directly. Uncached buffers are not allowed to be discontiguous, so this | |
303 | * is safe to do. | |
304 | * | |
305 | * In future, uncached buffers will pass the block number directly to the io | |
306 | * request function and hence these macros will go away at that point. | |
307 | */ | |
d44d9bc6 MT |
308 | #define XFS_BUF_ADDR(bp) ((bp)->b_maps[0].bm_bn) |
309 | #define XFS_BUF_SET_ADDR(bp, bno) ((bp)->b_maps[0].bm_bn = (xfs_daddr_t)(bno)) | |
ce8e922c | 310 | |
7561d27e | 311 | void xfs_buf_set_ref(struct xfs_buf *bp, int lru_ref); |
ce8e922c | 312 | |
879de98e DC |
313 | /* |
314 | * If the buffer is already on the LRU, do nothing. Otherwise set the buffer | |
315 | * up with a reference count of 0 so it will be tossed from the cache when | |
316 | * released. | |
317 | */ | |
318 | static inline void xfs_buf_oneshot(struct xfs_buf *bp) | |
319 | { | |
320 | if (!list_empty(&bp->b_lru) || atomic_read(&bp->b_lru_ref) > 1) | |
321 | return; | |
322 | atomic_set(&bp->b_lru_ref, 0); | |
323 | } | |
324 | ||
811e64c7 CS |
325 | static inline int xfs_buf_ispinned(struct xfs_buf *bp) |
326 | { | |
327 | return atomic_read(&bp->b_pin_count); | |
328 | } | |
ce8e922c | 329 | |
51582170 ES |
330 | static inline int |
331 | xfs_buf_verify_cksum(struct xfs_buf *bp, unsigned long cksum_offset) | |
332 | { | |
333 | return xfs_verify_cksum(bp->b_addr, BBTOB(bp->b_length), | |
334 | cksum_offset); | |
335 | } | |
336 | ||
f1dbcd7e ES |
337 | static inline void |
338 | xfs_buf_update_cksum(struct xfs_buf *bp, unsigned long cksum_offset) | |
339 | { | |
340 | xfs_update_cksum(bp->b_addr, BBTOB(bp->b_length), | |
341 | cksum_offset); | |
342 | } | |
343 | ||
1da177e4 LT |
344 | /* |
345 | * Handling of buftargs. | |
346 | */ | |
ebad861b | 347 | extern xfs_buftarg_t *xfs_alloc_buftarg(struct xfs_mount *, |
486aff5e | 348 | struct block_device *, struct dax_device *); |
a1f69417 | 349 | extern void xfs_free_buftarg(struct xfs_buftarg *); |
1da177e4 | 350 | extern void xfs_wait_buftarg(xfs_buftarg_t *); |
a96c4151 | 351 | extern int xfs_setsize_buftarg(xfs_buftarg_t *, unsigned int); |
d808f617 | 352 | |
ce8e922c NS |
353 | #define xfs_getsize_buftarg(buftarg) block_size((buftarg)->bt_bdev) |
354 | #define xfs_readonly_buftarg(buftarg) bdev_read_only((buftarg)->bt_bdev) | |
355 | ||
d916275a DC |
356 | static inline int |
357 | xfs_buftarg_dma_alignment(struct xfs_buftarg *bt) | |
358 | { | |
359 | return queue_dma_alignment(bt->bt_bdev->bd_disk->queue); | |
360 | } | |
361 | ||
75d02303 | 362 | int xfs_buf_reverify(struct xfs_buf *bp, const struct xfs_buf_ops *ops); |
15baadf7 DW |
363 | bool xfs_verify_magic(struct xfs_buf *bp, __be32 dmagic); |
364 | bool xfs_verify_magic16(struct xfs_buf *bp, __be16 dmagic); | |
1aff5696 | 365 | |
1da177e4 | 366 | #endif /* __XFS_BUF_H__ */ |