1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
9 #include <linux/list.h>
10 #include <linux/types.h>
11 #include <linux/spinlock.h>
14 #include <linux/dax.h>
15 #include <linux/uio.h>
16 #include <linux/list_lru.h>
23 #define XFS_BUF_DADDR_NULL ((xfs_daddr_t) (-1LL))
25 #define XBF_READ (1 << 0) /* buffer intended for reading from device */
26 #define XBF_WRITE (1 << 1) /* buffer intended for writing to device */
27 #define XBF_READ_AHEAD (1 << 2) /* asynchronous read-ahead */
28 #define XBF_NO_IOACCT (1 << 3) /* bypass I/O accounting (non-LRU bufs) */
29 #define XBF_ASYNC (1 << 4) /* initiator will not wait for completion */
30 #define XBF_DONE (1 << 5) /* all pages in the buffer uptodate */
31 #define XBF_STALE (1 << 6) /* buffer has been staled, do not find it */
32 #define XBF_WRITE_FAIL (1 << 7) /* async writes have failed on this buffer */
34 /* buffer type flags for write callbacks */
35 #define _XBF_INODES (1 << 16)/* inode buffer */
36 #define _XBF_DQUOTS (1 << 17)/* dquot buffer */
37 #define _XBF_LOGRECOVERY (1 << 18)/* log recovery buffer */
39 /* flags used only internally */
40 #define _XBF_PAGES (1 << 20)/* backed by refcounted pages */
41 #define _XBF_KMEM (1 << 21)/* backed by heap memory */
42 #define _XBF_DELWRI_Q (1 << 22)/* buffer on a delwri queue */
44 /* flags used only as arguments to access routines */
45 #define XBF_TRYLOCK (1 << 30)/* lock requested, but do not wait */
46 #define XBF_UNMAPPED (1 << 31)/* do not map the buffer */
48 typedef unsigned int xfs_buf_flags_t
;
50 #define XFS_BUF_FLAGS \
51 { XBF_READ, "READ" }, \
52 { XBF_WRITE, "WRITE" }, \
53 { XBF_READ_AHEAD, "READ_AHEAD" }, \
54 { XBF_NO_IOACCT, "NO_IOACCT" }, \
55 { XBF_ASYNC, "ASYNC" }, \
56 { XBF_DONE, "DONE" }, \
57 { XBF_STALE, "STALE" }, \
58 { XBF_WRITE_FAIL, "WRITE_FAIL" }, \
59 { _XBF_INODES, "INODES" }, \
60 { _XBF_DQUOTS, "DQUOTS" }, \
61 { _XBF_LOGRECOVERY, "LOG_RECOVERY" }, \
62 { _XBF_PAGES, "PAGES" }, \
63 { _XBF_KMEM, "KMEM" }, \
64 { _XBF_DELWRI_Q, "DELWRI_Q" }, \
65 /* The following interface flags should never be set */ \
66 { XBF_TRYLOCK, "TRYLOCK" }, \
67 { XBF_UNMAPPED, "UNMAPPED" }
70 * Internal state flags.
72 #define XFS_BSTATE_DISPOSE (1 << 0) /* buffer being discarded */
73 #define XFS_BSTATE_IN_FLIGHT (1 << 1) /* I/O in flight */
76 * The xfs_buftarg contains 2 notions of "sector size" -
78 * 1) The metadata sector size, which is the minimum unit and
79 * alignment of IO which will be performed by metadata operations.
80 * 2) The device logical sector size
82 * The first is specified at mkfs time, and is stored on-disk in the
83 * superblock's sb_sectsize.
85 * The latter is derived from the underlying device, and controls direct IO
86 * alignment constraints.
88 typedef struct xfs_buftarg
{
90 struct block_device
*bt_bdev
;
91 struct dax_device
*bt_daxdev
;
92 struct xfs_mount
*bt_mount
;
93 unsigned int bt_meta_sectorsize
;
94 size_t bt_meta_sectormask
;
95 size_t bt_logical_sectorsize
;
96 size_t bt_logical_sectormask
;
98 /* LRU control structures */
99 struct shrinker bt_shrinker
;
100 struct list_lru bt_lru
;
102 struct percpu_counter bt_io_count
;
103 struct ratelimit_state bt_ioerror_rl
;
109 xfs_daddr_t bm_bn
; /* block number for I/O */
110 int bm_len
; /* size of I/O */
113 #define DEFINE_SINGLE_BUF_MAP(map, blkno, numblk) \
114 struct xfs_buf_map (map) = { .bm_bn = (blkno), .bm_len = (numblk) };
119 __be32 magic
[2]; /* v4 and v5 on disk magic values */
120 __be16 magic16
[2]; /* v4 and v5 on disk magic values */
122 void (*verify_read
)(struct xfs_buf
*);
123 void (*verify_write
)(struct xfs_buf
*);
124 xfs_failaddr_t (*verify_struct
)(struct xfs_buf
*bp
);
129 * first cacheline holds all the fields needed for an uncontended cache
130 * hit to be fully processed. The semaphore straddles the cacheline
131 * boundary, but the counter and lock sits on the first cacheline,
132 * which is the only bit that is touched if we hit the semaphore
133 * fast-path on locking.
135 struct rhash_head b_rhash_head
; /* pag buffer hash node */
138 * b_bn is the cache index. Do not use directly, use b_maps[0].bm_bn
139 * for the buffer disk address instead.
142 int b_length
; /* size of buffer in BBs */
143 atomic_t b_hold
; /* reference count */
144 atomic_t b_lru_ref
; /* lru reclaim ref count */
145 xfs_buf_flags_t b_flags
; /* status flags */
146 struct semaphore b_sema
; /* semaphore for lockables */
149 * concurrent access to b_lru and b_lru_flags are protected by
150 * bt_lru_lock and not by b_sema
152 struct list_head b_lru
; /* lru list */
153 spinlock_t b_lock
; /* internal state lock */
154 unsigned int b_state
; /* internal state flags */
155 int b_io_error
; /* internal IO error state */
156 wait_queue_head_t b_waiters
; /* unpin waiters */
157 struct list_head b_list
;
158 struct xfs_perag
*b_pag
; /* contains rbtree root */
159 struct xfs_mount
*b_mount
;
160 struct xfs_buftarg
*b_target
; /* buffer target (device) */
161 void *b_addr
; /* virtual address of buffer */
162 struct work_struct b_ioend_work
;
163 struct completion b_iowait
; /* queue for I/O waiters */
164 struct xfs_buf_log_item
*b_log_item
;
165 struct list_head b_li_list
; /* Log items list head */
166 struct xfs_trans
*b_transp
;
167 struct page
**b_pages
; /* array of page pointers */
168 struct page
*b_page_array
[XB_PAGES
]; /* inline pages */
169 struct xfs_buf_map
*b_maps
; /* compound buffer map */
170 struct xfs_buf_map __b_map
; /* inline compound buffer map */
172 atomic_t b_pin_count
; /* pin count */
173 atomic_t b_io_remaining
; /* #outstanding I/O requests */
174 unsigned int b_page_count
; /* size of page array */
175 unsigned int b_offset
; /* page offset of b_addr,
176 only for _XBF_KMEM buffers */
177 int b_error
; /* error code on I/O */
180 * async write failure retry count. Initialised to zero on the first
181 * failure, then when it exceeds the maximum configured without a
182 * success the write is considered to be failed permanently and the
183 * iodone handler will take appropriate action.
185 * For retry timeouts, we record the jiffie of the first failure. This
186 * means that we can change the retry timeout for buffers already under
187 * I/O and thus avoid getting stuck in a retry loop with a long timeout.
189 * last_error is used to ensure that we are getting repeated errors, not
190 * different errors. e.g. a block device might change ENOSPC to EIO when
191 * a failure timeout occurs, so we want to re-initialise the error
192 * retry behaviour appropriately when that happens.
195 unsigned long b_first_retry_time
; /* in jiffies */
198 const struct xfs_buf_ops
*b_ops
;
201 /* Finding and Reading Buffers */
202 struct xfs_buf
*xfs_buf_incore(struct xfs_buftarg
*target
,
203 xfs_daddr_t blkno
, size_t numblks
,
204 xfs_buf_flags_t flags
);
206 int xfs_buf_get_map(struct xfs_buftarg
*target
, struct xfs_buf_map
*map
,
207 int nmaps
, xfs_buf_flags_t flags
, struct xfs_buf
**bpp
);
208 int xfs_buf_read_map(struct xfs_buftarg
*target
, struct xfs_buf_map
*map
,
209 int nmaps
, xfs_buf_flags_t flags
, struct xfs_buf
**bpp
,
210 const struct xfs_buf_ops
*ops
, xfs_failaddr_t fa
);
211 void xfs_buf_readahead_map(struct xfs_buftarg
*target
,
212 struct xfs_buf_map
*map
, int nmaps
,
213 const struct xfs_buf_ops
*ops
);
217 struct xfs_buftarg
*target
,
220 struct xfs_buf
**bpp
)
222 DEFINE_SINGLE_BUF_MAP(map
, blkno
, numblks
);
224 return xfs_buf_get_map(target
, &map
, 1, 0, bpp
);
229 struct xfs_buftarg
*target
,
232 xfs_buf_flags_t flags
,
233 struct xfs_buf
**bpp
,
234 const struct xfs_buf_ops
*ops
)
236 DEFINE_SINGLE_BUF_MAP(map
, blkno
, numblks
);
238 return xfs_buf_read_map(target
, &map
, 1, flags
, bpp
, ops
,
239 __builtin_return_address(0));
244 struct xfs_buftarg
*target
,
247 const struct xfs_buf_ops
*ops
)
249 DEFINE_SINGLE_BUF_MAP(map
, blkno
, numblks
);
250 return xfs_buf_readahead_map(target
, &map
, 1, ops
);
253 int xfs_buf_get_uncached(struct xfs_buftarg
*target
, size_t numblks
, int flags
,
254 struct xfs_buf
**bpp
);
255 int xfs_buf_read_uncached(struct xfs_buftarg
*target
, xfs_daddr_t daddr
,
256 size_t numblks
, int flags
, struct xfs_buf
**bpp
,
257 const struct xfs_buf_ops
*ops
);
258 int _xfs_buf_read(struct xfs_buf
*bp
, xfs_buf_flags_t flags
);
259 void xfs_buf_hold(struct xfs_buf
*bp
);
261 /* Releasing Buffers */
262 extern void xfs_buf_rele(struct xfs_buf
*);
264 /* Locking and Unlocking Buffers */
265 extern int xfs_buf_trylock(struct xfs_buf
*);
266 extern void xfs_buf_lock(struct xfs_buf
*);
267 extern void xfs_buf_unlock(struct xfs_buf
*);
268 #define xfs_buf_islocked(bp) \
269 ((bp)->b_sema.count <= 0)
271 static inline void xfs_buf_relse(struct xfs_buf
*bp
)
277 /* Buffer Read and Write Routines */
278 extern int xfs_bwrite(struct xfs_buf
*bp
);
280 extern void __xfs_buf_ioerror(struct xfs_buf
*bp
, int error
,
281 xfs_failaddr_t failaddr
);
282 #define xfs_buf_ioerror(bp, err) __xfs_buf_ioerror((bp), (err), __this_address)
283 extern void xfs_buf_ioerror_alert(struct xfs_buf
*bp
, xfs_failaddr_t fa
);
284 void xfs_buf_ioend_fail(struct xfs_buf
*);
285 void xfs_buf_zero(struct xfs_buf
*bp
, size_t boff
, size_t bsize
);
286 void __xfs_buf_mark_corrupt(struct xfs_buf
*bp
, xfs_failaddr_t fa
);
287 #define xfs_buf_mark_corrupt(bp) __xfs_buf_mark_corrupt((bp), __this_address)
289 /* Buffer Utility Routines */
290 extern void *xfs_buf_offset(struct xfs_buf
*, size_t);
291 extern void xfs_buf_stale(struct xfs_buf
*bp
);
293 /* Delayed Write Buffer Routines */
294 extern void xfs_buf_delwri_cancel(struct list_head
*);
295 extern bool xfs_buf_delwri_queue(struct xfs_buf
*, struct list_head
*);
296 extern int xfs_buf_delwri_submit(struct list_head
*);
297 extern int xfs_buf_delwri_submit_nowait(struct list_head
*);
298 extern int xfs_buf_delwri_pushbuf(struct xfs_buf
*, struct list_head
*);
300 /* Buffer Daemon Setup Routines */
301 extern int xfs_buf_init(void);
302 extern void xfs_buf_terminate(void);
305 * These macros use the IO block map rather than b_bn. b_bn is now really
306 * just for the buffer cache index for cached buffers. As IO does not use b_bn
307 * anymore, uncached buffers do not use b_bn at all and hence must modify the IO
308 * map directly. Uncached buffers are not allowed to be discontiguous, so this
311 * In future, uncached buffers will pass the block number directly to the io
312 * request function and hence these macros will go away at that point.
314 #define XFS_BUF_ADDR(bp) ((bp)->b_maps[0].bm_bn)
315 #define XFS_BUF_SET_ADDR(bp, bno) ((bp)->b_maps[0].bm_bn = (xfs_daddr_t)(bno))
317 void xfs_buf_set_ref(struct xfs_buf
*bp
, int lru_ref
);
320 * If the buffer is already on the LRU, do nothing. Otherwise set the buffer
321 * up with a reference count of 0 so it will be tossed from the cache when
324 static inline void xfs_buf_oneshot(struct xfs_buf
*bp
)
326 if (!list_empty(&bp
->b_lru
) || atomic_read(&bp
->b_lru_ref
) > 1)
328 atomic_set(&bp
->b_lru_ref
, 0);
331 static inline int xfs_buf_ispinned(struct xfs_buf
*bp
)
333 return atomic_read(&bp
->b_pin_count
);
337 xfs_buf_verify_cksum(struct xfs_buf
*bp
, unsigned long cksum_offset
)
339 return xfs_verify_cksum(bp
->b_addr
, BBTOB(bp
->b_length
),
344 xfs_buf_update_cksum(struct xfs_buf
*bp
, unsigned long cksum_offset
)
346 xfs_update_cksum(bp
->b_addr
, BBTOB(bp
->b_length
),
351 * Handling of buftargs.
353 extern struct xfs_buftarg
*xfs_alloc_buftarg(struct xfs_mount
*,
354 struct block_device
*, struct dax_device
*);
355 extern void xfs_free_buftarg(struct xfs_buftarg
*);
356 extern void xfs_buftarg_wait(struct xfs_buftarg
*);
357 extern void xfs_buftarg_drain(struct xfs_buftarg
*);
358 extern int xfs_setsize_buftarg(struct xfs_buftarg
*, unsigned int);
360 #define xfs_getsize_buftarg(buftarg) block_size((buftarg)->bt_bdev)
361 #define xfs_readonly_buftarg(buftarg) bdev_read_only((buftarg)->bt_bdev)
363 int xfs_buf_reverify(struct xfs_buf
*bp
, const struct xfs_buf_ops
*ops
);
364 bool xfs_verify_magic(struct xfs_buf
*bp
, __be32 dmagic
);
365 bool xfs_verify_magic16(struct xfs_buf
*bp
, __be16 dmagic
);
367 #endif /* __XFS_BUF_H__ */