1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
7 #include <linux/backing-dev.h>
9 #include "xfs_shared.h"
10 #include "xfs_format.h"
11 #include "xfs_log_format.h"
12 #include "xfs_trans_resv.h"
13 #include "xfs_mount.h"
14 #include "xfs_trace.h"
16 #include "xfs_log_recover.h"
17 #include "xfs_trans.h"
18 #include "xfs_buf_item.h"
19 #include "xfs_errortag.h"
20 #include "xfs_error.h"
23 static kmem_zone_t
*xfs_buf_zone
;
30 * b_sema (caller holds)
34 * b_sema (caller holds)
43 * xfs_buftarg_drain_rele
45 * b_lock (trylock due to inversion)
49 * b_lock (trylock due to inversion)
52 static int __xfs_buf_submit(struct xfs_buf
*bp
, bool wait
);
58 return __xfs_buf_submit(bp
, !(bp
->b_flags
& XBF_ASYNC
));
66 * Return true if the buffer is vmapped.
68 * b_addr is null if the buffer is not mapped, but the code is clever
69 * enough to know it doesn't have to map a single page, so the check has
70 * to be both for b_addr and bp->b_page_count > 1.
72 return bp
->b_addr
&& bp
->b_page_count
> 1;
79 return (bp
->b_page_count
* PAGE_SIZE
);
83 * Bump the I/O in flight count on the buftarg if we haven't yet done so for
84 * this buffer. The count is incremented once per buffer (per hold cycle)
85 * because the corresponding decrement is deferred to buffer release. Buffers
86 * can undergo I/O multiple times in a hold-release cycle and per buffer I/O
87 * tracking adds unnecessary overhead. This is used for sychronization purposes
88 * with unmount (see xfs_buftarg_drain()), so all we really need is a count of
91 * Buffers that are never released (e.g., superblock, iclog buffers) must set
92 * the XBF_NO_IOACCT flag before I/O submission. Otherwise, the buftarg count
93 * never reaches zero and unmount hangs indefinitely.
99 if (bp
->b_flags
& XBF_NO_IOACCT
)
102 ASSERT(bp
->b_flags
& XBF_ASYNC
);
103 spin_lock(&bp
->b_lock
);
104 if (!(bp
->b_state
& XFS_BSTATE_IN_FLIGHT
)) {
105 bp
->b_state
|= XFS_BSTATE_IN_FLIGHT
;
106 percpu_counter_inc(&bp
->b_target
->bt_io_count
);
108 spin_unlock(&bp
->b_lock
);
112 * Clear the in-flight state on a buffer about to be released to the LRU or
113 * freed and unaccount from the buftarg.
116 __xfs_buf_ioacct_dec(
119 lockdep_assert_held(&bp
->b_lock
);
121 if (bp
->b_state
& XFS_BSTATE_IN_FLIGHT
) {
122 bp
->b_state
&= ~XFS_BSTATE_IN_FLIGHT
;
123 percpu_counter_dec(&bp
->b_target
->bt_io_count
);
131 spin_lock(&bp
->b_lock
);
132 __xfs_buf_ioacct_dec(bp
);
133 spin_unlock(&bp
->b_lock
);
137 * When we mark a buffer stale, we remove the buffer from the LRU and clear the
138 * b_lru_ref count so that the buffer is freed immediately when the buffer
139 * reference count falls to zero. If the buffer is already on the LRU, we need
140 * to remove the reference that LRU holds on the buffer.
142 * This prevents build-up of stale buffers on the LRU.
148 ASSERT(xfs_buf_islocked(bp
));
150 bp
->b_flags
|= XBF_STALE
;
153 * Clear the delwri status so that a delwri queue walker will not
154 * flush this buffer to disk now that it is stale. The delwri queue has
155 * a reference to the buffer, so this is safe to do.
157 bp
->b_flags
&= ~_XBF_DELWRI_Q
;
160 * Once the buffer is marked stale and unlocked, a subsequent lookup
161 * could reset b_flags. There is no guarantee that the buffer is
162 * unaccounted (released to LRU) before that occurs. Drop in-flight
163 * status now to preserve accounting consistency.
165 spin_lock(&bp
->b_lock
);
166 __xfs_buf_ioacct_dec(bp
);
168 atomic_set(&bp
->b_lru_ref
, 0);
169 if (!(bp
->b_state
& XFS_BSTATE_DISPOSE
) &&
170 (list_lru_del(&bp
->b_target
->bt_lru
, &bp
->b_lru
)))
171 atomic_dec(&bp
->b_hold
);
173 ASSERT(atomic_read(&bp
->b_hold
) >= 1);
174 spin_unlock(&bp
->b_lock
);
182 ASSERT(bp
->b_maps
== NULL
);
183 bp
->b_map_count
= map_count
;
185 if (map_count
== 1) {
186 bp
->b_maps
= &bp
->__b_map
;
190 bp
->b_maps
= kmem_zalloc(map_count
* sizeof(struct xfs_buf_map
),
198 * Frees b_pages if it was allocated.
204 if (bp
->b_maps
!= &bp
->__b_map
) {
205 kmem_free(bp
->b_maps
);
212 struct xfs_buftarg
*target
,
213 struct xfs_buf_map
*map
,
215 xfs_buf_flags_t flags
,
216 struct xfs_buf
**bpp
)
223 bp
= kmem_cache_zalloc(xfs_buf_zone
, GFP_NOFS
| __GFP_NOFAIL
);
226 * We don't want certain flags to appear in b_flags unless they are
227 * specifically set by later operations on the buffer.
229 flags
&= ~(XBF_UNMAPPED
| XBF_TRYLOCK
| XBF_ASYNC
| XBF_READ_AHEAD
);
231 atomic_set(&bp
->b_hold
, 1);
232 atomic_set(&bp
->b_lru_ref
, 1);
233 init_completion(&bp
->b_iowait
);
234 INIT_LIST_HEAD(&bp
->b_lru
);
235 INIT_LIST_HEAD(&bp
->b_list
);
236 INIT_LIST_HEAD(&bp
->b_li_list
);
237 sema_init(&bp
->b_sema
, 0); /* held, no waiters */
238 spin_lock_init(&bp
->b_lock
);
239 bp
->b_target
= target
;
240 bp
->b_mount
= target
->bt_mount
;
244 * Set length and io_length to the same value initially.
245 * I/O routines should use io_length, which will be the same in
246 * most cases but may be reset (e.g. XFS recovery).
248 error
= xfs_buf_get_maps(bp
, nmaps
);
250 kmem_cache_free(xfs_buf_zone
, bp
);
254 bp
->b_bn
= map
[0].bm_bn
;
256 for (i
= 0; i
< nmaps
; i
++) {
257 bp
->b_maps
[i
].bm_bn
= map
[i
].bm_bn
;
258 bp
->b_maps
[i
].bm_len
= map
[i
].bm_len
;
259 bp
->b_length
+= map
[i
].bm_len
;
262 atomic_set(&bp
->b_pin_count
, 0);
263 init_waitqueue_head(&bp
->b_waiters
);
265 XFS_STATS_INC(bp
->b_mount
, xb_create
);
266 trace_xfs_buf_init(bp
, _RET_IP_
);
278 ASSERT(bp
->b_flags
& _XBF_PAGES
);
280 if (xfs_buf_is_vmapped(bp
))
281 vm_unmap_ram(bp
->b_addr
, bp
->b_page_count
);
283 for (i
= 0; i
< bp
->b_page_count
; i
++) {
285 __free_page(bp
->b_pages
[i
]);
287 if (current
->reclaim_state
)
288 current
->reclaim_state
->reclaimed_slab
+= bp
->b_page_count
;
290 if (bp
->b_pages
!= bp
->b_page_array
)
291 kmem_free(bp
->b_pages
);
293 bp
->b_flags
&= ~_XBF_PAGES
;
300 trace_xfs_buf_free(bp
, _RET_IP_
);
302 ASSERT(list_empty(&bp
->b_lru
));
304 if (bp
->b_flags
& _XBF_PAGES
)
305 xfs_buf_free_pages(bp
);
306 else if (bp
->b_flags
& _XBF_KMEM
)
307 kmem_free(bp
->b_addr
);
309 xfs_buf_free_maps(bp
);
310 kmem_cache_free(xfs_buf_zone
, bp
);
316 xfs_buf_flags_t flags
)
318 int align_mask
= xfs_buftarg_dma_alignment(bp
->b_target
);
319 xfs_km_flags_t kmflag_mask
= KM_NOFS
;
320 size_t size
= BBTOB(bp
->b_length
);
322 /* Assure zeroed buffer for non-read cases. */
323 if (!(flags
& XBF_READ
))
324 kmflag_mask
|= KM_ZERO
;
326 bp
->b_addr
= kmem_alloc_io(size
, align_mask
, kmflag_mask
);
330 if (((unsigned long)(bp
->b_addr
+ size
- 1) & PAGE_MASK
) !=
331 ((unsigned long)bp
->b_addr
& PAGE_MASK
)) {
332 /* b_addr spans two pages - use alloc_page instead */
333 kmem_free(bp
->b_addr
);
337 bp
->b_offset
= offset_in_page(bp
->b_addr
);
338 bp
->b_pages
= bp
->b_page_array
;
339 bp
->b_pages
[0] = kmem_to_page(bp
->b_addr
);
340 bp
->b_page_count
= 1;
341 bp
->b_flags
|= _XBF_KMEM
;
348 xfs_buf_flags_t flags
)
350 gfp_t gfp_mask
= __GFP_NOWARN
;
353 if (flags
& XBF_READ_AHEAD
)
354 gfp_mask
|= __GFP_NORETRY
;
356 gfp_mask
|= GFP_NOFS
;
358 /* Make sure that we have a page list */
359 bp
->b_page_count
= DIV_ROUND_UP(BBTOB(bp
->b_length
), PAGE_SIZE
);
360 if (bp
->b_page_count
<= XB_PAGES
) {
361 bp
->b_pages
= bp
->b_page_array
;
363 bp
->b_pages
= kzalloc(sizeof(struct page
*) * bp
->b_page_count
,
368 bp
->b_flags
|= _XBF_PAGES
;
370 /* Assure zeroed buffer for non-read cases. */
371 if (!(flags
& XBF_READ
))
372 gfp_mask
|= __GFP_ZERO
;
375 * Bulk filling of pages can take multiple calls. Not filling the entire
376 * array is not an allocation failure, so don't back off if we get at
377 * least one extra page.
382 filled
= alloc_pages_bulk_array(gfp_mask
, bp
->b_page_count
,
384 if (filled
== bp
->b_page_count
) {
385 XFS_STATS_INC(bp
->b_mount
, xb_page_found
);
392 if (flags
& XBF_READ_AHEAD
) {
393 xfs_buf_free_pages(bp
);
397 XFS_STATS_INC(bp
->b_mount
, xb_page_retries
);
398 congestion_wait(BLK_RW_ASYNC
, HZ
/ 50);
404 * Map buffer into kernel address-space if necessary.
411 ASSERT(bp
->b_flags
& _XBF_PAGES
);
412 if (bp
->b_page_count
== 1) {
413 /* A single page buffer is always mappable */
414 bp
->b_addr
= page_address(bp
->b_pages
[0]);
415 } else if (flags
& XBF_UNMAPPED
) {
422 * vm_map_ram() will allocate auxiliary structures (e.g.
423 * pagetables) with GFP_KERNEL, yet we are likely to be under
424 * GFP_NOFS context here. Hence we need to tell memory reclaim
425 * that we are in such a context via PF_MEMALLOC_NOFS to prevent
426 * memory reclaim re-entering the filesystem here and
427 * potentially deadlocking.
429 nofs_flag
= memalloc_nofs_save();
431 bp
->b_addr
= vm_map_ram(bp
->b_pages
, bp
->b_page_count
,
436 } while (retried
++ <= 1);
437 memalloc_nofs_restore(nofs_flag
);
447 * Finding and Reading Buffers
451 struct rhashtable_compare_arg
*arg
,
454 const struct xfs_buf_map
*map
= arg
->key
;
455 const struct xfs_buf
*bp
= obj
;
458 * The key hashing in the lookup path depends on the key being the
459 * first element of the compare_arg, make sure to assert this.
461 BUILD_BUG_ON(offsetof(struct xfs_buf_map
, bm_bn
) != 0);
463 if (bp
->b_bn
!= map
->bm_bn
)
466 if (unlikely(bp
->b_length
!= map
->bm_len
)) {
468 * found a block number match. If the range doesn't
469 * match, the only way this is allowed is if the buffer
470 * in the cache is stale and the transaction that made
471 * it stale has not yet committed. i.e. we are
472 * reallocating a busy extent. Skip this buffer and
473 * continue searching for an exact match.
475 ASSERT(bp
->b_flags
& XBF_STALE
);
481 static const struct rhashtable_params xfs_buf_hash_params
= {
482 .min_size
= 32, /* empty AGs have minimal footprint */
484 .key_len
= sizeof(xfs_daddr_t
),
485 .key_offset
= offsetof(struct xfs_buf
, b_bn
),
486 .head_offset
= offsetof(struct xfs_buf
, b_rhash_head
),
487 .automatic_shrinking
= true,
488 .obj_cmpfn
= _xfs_buf_obj_cmp
,
493 struct xfs_perag
*pag
)
495 spin_lock_init(&pag
->pag_buf_lock
);
496 return rhashtable_init(&pag
->pag_buf_hash
, &xfs_buf_hash_params
);
500 xfs_buf_hash_destroy(
501 struct xfs_perag
*pag
)
503 rhashtable_destroy(&pag
->pag_buf_hash
);
507 * Look up a buffer in the buffer cache and return it referenced and locked
510 * If @new_bp is supplied and we have a lookup miss, insert @new_bp into the
513 * If XBF_TRYLOCK is set in @flags, only try to lock the buffer and return
514 * -EAGAIN if we fail to lock it.
517 * -EFSCORRUPTED if have been supplied with an invalid address
518 * -EAGAIN on trylock failure
519 * -ENOENT if we fail to find a match and @new_bp was NULL
521 * - @new_bp if we inserted it into the cache
522 * - the buffer we found and locked.
526 struct xfs_buftarg
*btp
,
527 struct xfs_buf_map
*map
,
529 xfs_buf_flags_t flags
,
530 struct xfs_buf
*new_bp
,
531 struct xfs_buf
**found_bp
)
533 struct xfs_perag
*pag
;
535 struct xfs_buf_map cmap
= { .bm_bn
= map
[0].bm_bn
};
541 for (i
= 0; i
< nmaps
; i
++)
542 cmap
.bm_len
+= map
[i
].bm_len
;
544 /* Check for IOs smaller than the sector size / not sector aligned */
545 ASSERT(!(BBTOB(cmap
.bm_len
) < btp
->bt_meta_sectorsize
));
546 ASSERT(!(BBTOB(cmap
.bm_bn
) & (xfs_off_t
)btp
->bt_meta_sectormask
));
549 * Corrupted block numbers can get through to here, unfortunately, so we
550 * have to check that the buffer falls within the filesystem bounds.
552 eofs
= XFS_FSB_TO_BB(btp
->bt_mount
, btp
->bt_mount
->m_sb
.sb_dblocks
);
553 if (cmap
.bm_bn
< 0 || cmap
.bm_bn
>= eofs
) {
554 xfs_alert(btp
->bt_mount
,
555 "%s: daddr 0x%llx out of range, EOFS 0x%llx",
556 __func__
, cmap
.bm_bn
, eofs
);
558 return -EFSCORRUPTED
;
561 pag
= xfs_perag_get(btp
->bt_mount
,
562 xfs_daddr_to_agno(btp
->bt_mount
, cmap
.bm_bn
));
564 spin_lock(&pag
->pag_buf_lock
);
565 bp
= rhashtable_lookup_fast(&pag
->pag_buf_hash
, &cmap
,
566 xfs_buf_hash_params
);
568 atomic_inc(&bp
->b_hold
);
574 XFS_STATS_INC(btp
->bt_mount
, xb_miss_locked
);
575 spin_unlock(&pag
->pag_buf_lock
);
580 /* the buffer keeps the perag reference until it is freed */
582 rhashtable_insert_fast(&pag
->pag_buf_hash
, &new_bp
->b_rhash_head
,
583 xfs_buf_hash_params
);
584 spin_unlock(&pag
->pag_buf_lock
);
589 spin_unlock(&pag
->pag_buf_lock
);
592 if (!xfs_buf_trylock(bp
)) {
593 if (flags
& XBF_TRYLOCK
) {
595 XFS_STATS_INC(btp
->bt_mount
, xb_busy_locked
);
599 XFS_STATS_INC(btp
->bt_mount
, xb_get_locked_waited
);
603 * if the buffer is stale, clear all the external state associated with
604 * it. We need to keep flags such as how we allocated the buffer memory
607 if (bp
->b_flags
& XBF_STALE
) {
608 ASSERT((bp
->b_flags
& _XBF_DELWRI_Q
) == 0);
609 bp
->b_flags
&= _XBF_KMEM
| _XBF_PAGES
;
613 trace_xfs_buf_find(bp
, flags
, _RET_IP_
);
614 XFS_STATS_INC(btp
->bt_mount
, xb_get_locked
);
621 struct xfs_buftarg
*target
,
624 xfs_buf_flags_t flags
)
628 DEFINE_SINGLE_BUF_MAP(map
, blkno
, numblks
);
630 error
= xfs_buf_find(target
, &map
, 1, flags
, NULL
, &bp
);
637 * Assembles a buffer covering the specified range. The code is optimised for
638 * cache hits, as metadata intensive workloads will see 3 orders of magnitude
639 * more hits than misses.
643 struct xfs_buftarg
*target
,
644 struct xfs_buf_map
*map
,
646 xfs_buf_flags_t flags
,
647 struct xfs_buf
**bpp
)
650 struct xfs_buf
*new_bp
;
654 error
= xfs_buf_find(target
, map
, nmaps
, flags
, NULL
, &bp
);
657 if (error
!= -ENOENT
)
660 error
= _xfs_buf_alloc(target
, map
, nmaps
, flags
, &new_bp
);
665 * For buffers that fit entirely within a single page, first attempt to
666 * allocate the memory from the heap to minimise memory usage. If we
667 * can't get heap memory for these small buffers, we fall back to using
668 * the page allocator.
670 if (BBTOB(new_bp
->b_length
) >= PAGE_SIZE
||
671 xfs_buf_alloc_kmem(new_bp
, flags
) < 0) {
672 error
= xfs_buf_alloc_pages(new_bp
, flags
);
677 error
= xfs_buf_find(target
, map
, nmaps
, flags
, new_bp
, &bp
);
682 xfs_buf_free(new_bp
);
686 error
= _xfs_buf_map_pages(bp
, flags
);
687 if (unlikely(error
)) {
688 xfs_warn_ratelimited(target
->bt_mount
,
689 "%s: failed to map %u pages", __func__
,
697 * Clear b_error if this is a lookup from a caller that doesn't expect
698 * valid data to be found in the buffer.
700 if (!(flags
& XBF_READ
))
701 xfs_buf_ioerror(bp
, 0);
703 XFS_STATS_INC(target
->bt_mount
, xb_get
);
704 trace_xfs_buf_get(bp
, flags
, _RET_IP_
);
708 xfs_buf_free(new_bp
);
715 xfs_buf_flags_t flags
)
717 ASSERT(!(flags
& XBF_WRITE
));
718 ASSERT(bp
->b_maps
[0].bm_bn
!= XFS_BUF_DADDR_NULL
);
720 bp
->b_flags
&= ~(XBF_WRITE
| XBF_ASYNC
| XBF_READ_AHEAD
| XBF_DONE
);
721 bp
->b_flags
|= flags
& (XBF_READ
| XBF_ASYNC
| XBF_READ_AHEAD
);
723 return xfs_buf_submit(bp
);
727 * Reverify a buffer found in cache without an attached ->b_ops.
729 * If the caller passed an ops structure and the buffer doesn't have ops
730 * assigned, set the ops and use it to verify the contents. If verification
731 * fails, clear XBF_DONE. We assume the buffer has no recorded errors and is
732 * already in XBF_DONE state on entry.
734 * Under normal operations, every in-core buffer is verified on read I/O
735 * completion. There are two scenarios that can lead to in-core buffers without
736 * an assigned ->b_ops. The first is during log recovery of buffers on a V4
737 * filesystem, though these buffers are purged at the end of recovery. The
738 * other is online repair, which intentionally reads with a NULL buffer ops to
739 * run several verifiers across an in-core buffer in order to establish buffer
740 * type. If repair can't establish that, the buffer will be left in memory
741 * with NULL buffer ops.
746 const struct xfs_buf_ops
*ops
)
748 ASSERT(bp
->b_flags
& XBF_DONE
);
749 ASSERT(bp
->b_error
== 0);
751 if (!ops
|| bp
->b_ops
)
755 bp
->b_ops
->verify_read(bp
);
757 bp
->b_flags
&= ~XBF_DONE
;
763 struct xfs_buftarg
*target
,
764 struct xfs_buf_map
*map
,
766 xfs_buf_flags_t flags
,
767 struct xfs_buf
**bpp
,
768 const struct xfs_buf_ops
*ops
,
777 error
= xfs_buf_get_map(target
, map
, nmaps
, flags
, &bp
);
781 trace_xfs_buf_read(bp
, flags
, _RET_IP_
);
783 if (!(bp
->b_flags
& XBF_DONE
)) {
784 /* Initiate the buffer read and wait. */
785 XFS_STATS_INC(target
->bt_mount
, xb_get_read
);
787 error
= _xfs_buf_read(bp
, flags
);
789 /* Readahead iodone already dropped the buffer, so exit. */
790 if (flags
& XBF_ASYNC
)
793 /* Buffer already read; all we need to do is check it. */
794 error
= xfs_buf_reverify(bp
, ops
);
796 /* Readahead already finished; drop the buffer and exit. */
797 if (flags
& XBF_ASYNC
) {
802 /* We do not want read in the flags */
803 bp
->b_flags
&= ~XBF_READ
;
804 ASSERT(bp
->b_ops
!= NULL
|| ops
== NULL
);
808 * If we've had a read error, then the contents of the buffer are
809 * invalid and should not be used. To ensure that a followup read tries
810 * to pull the buffer from disk again, we clear the XBF_DONE flag and
811 * mark the buffer stale. This ensures that anyone who has a current
812 * reference to the buffer will interpret it's contents correctly and
813 * future cache lookups will also treat it as an empty, uninitialised
817 if (!XFS_FORCED_SHUTDOWN(target
->bt_mount
))
818 xfs_buf_ioerror_alert(bp
, fa
);
820 bp
->b_flags
&= ~XBF_DONE
;
824 /* bad CRC means corrupted metadata */
825 if (error
== -EFSBADCRC
)
826 error
= -EFSCORRUPTED
;
835 * If we are not low on memory then do the readahead in a deadlock
839 xfs_buf_readahead_map(
840 struct xfs_buftarg
*target
,
841 struct xfs_buf_map
*map
,
843 const struct xfs_buf_ops
*ops
)
847 if (bdi_read_congested(target
->bt_bdev
->bd_bdi
))
850 xfs_buf_read_map(target
, map
, nmaps
,
851 XBF_TRYLOCK
| XBF_ASYNC
| XBF_READ_AHEAD
, &bp
, ops
,
856 * Read an uncached buffer from disk. Allocates and returns a locked
857 * buffer containing the disk contents or nothing.
860 xfs_buf_read_uncached(
861 struct xfs_buftarg
*target
,
865 struct xfs_buf
**bpp
,
866 const struct xfs_buf_ops
*ops
)
873 error
= xfs_buf_get_uncached(target
, numblks
, flags
, &bp
);
877 /* set up the buffer for a read IO */
878 ASSERT(bp
->b_map_count
== 1);
879 bp
->b_bn
= XFS_BUF_DADDR_NULL
; /* always null for uncached buffers */
880 bp
->b_maps
[0].bm_bn
= daddr
;
881 bp
->b_flags
|= XBF_READ
;
896 xfs_buf_get_uncached(
897 struct xfs_buftarg
*target
,
900 struct xfs_buf
**bpp
)
904 DEFINE_SINGLE_BUF_MAP(map
, XFS_BUF_DADDR_NULL
, numblks
);
908 /* flags might contain irrelevant bits, pass only what we care about */
909 error
= _xfs_buf_alloc(target
, &map
, 1, flags
& XBF_NO_IOACCT
, &bp
);
913 error
= xfs_buf_alloc_pages(bp
, flags
);
917 error
= _xfs_buf_map_pages(bp
, 0);
918 if (unlikely(error
)) {
919 xfs_warn(target
->bt_mount
,
920 "%s: failed to map pages", __func__
);
924 trace_xfs_buf_get_uncached(bp
, _RET_IP_
);
934 * Increment reference count on buffer, to hold the buffer concurrently
935 * with another thread which may release (free) the buffer asynchronously.
936 * Must hold the buffer already to call this function.
942 trace_xfs_buf_hold(bp
, _RET_IP_
);
943 atomic_inc(&bp
->b_hold
);
947 * Release a hold on the specified buffer. If the hold count is 1, the buffer is
948 * placed on LRU or freed (depending on b_lru_ref).
954 struct xfs_perag
*pag
= bp
->b_pag
;
956 bool freebuf
= false;
958 trace_xfs_buf_rele(bp
, _RET_IP_
);
961 ASSERT(list_empty(&bp
->b_lru
));
962 if (atomic_dec_and_test(&bp
->b_hold
)) {
963 xfs_buf_ioacct_dec(bp
);
969 ASSERT(atomic_read(&bp
->b_hold
) > 0);
972 * We grab the b_lock here first to serialise racing xfs_buf_rele()
973 * calls. The pag_buf_lock being taken on the last reference only
974 * serialises against racing lookups in xfs_buf_find(). IOWs, the second
975 * to last reference we drop here is not serialised against the last
976 * reference until we take bp->b_lock. Hence if we don't grab b_lock
977 * first, the last "release" reference can win the race to the lock and
978 * free the buffer before the second-to-last reference is processed,
979 * leading to a use-after-free scenario.
981 spin_lock(&bp
->b_lock
);
982 release
= atomic_dec_and_lock(&bp
->b_hold
, &pag
->pag_buf_lock
);
985 * Drop the in-flight state if the buffer is already on the LRU
986 * and it holds the only reference. This is racy because we
987 * haven't acquired the pag lock, but the use of _XBF_IN_FLIGHT
988 * ensures the decrement occurs only once per-buf.
990 if ((atomic_read(&bp
->b_hold
) == 1) && !list_empty(&bp
->b_lru
))
991 __xfs_buf_ioacct_dec(bp
);
995 /* the last reference has been dropped ... */
996 __xfs_buf_ioacct_dec(bp
);
997 if (!(bp
->b_flags
& XBF_STALE
) && atomic_read(&bp
->b_lru_ref
)) {
999 * If the buffer is added to the LRU take a new reference to the
1000 * buffer for the LRU and clear the (now stale) dispose list
1003 if (list_lru_add(&bp
->b_target
->bt_lru
, &bp
->b_lru
)) {
1004 bp
->b_state
&= ~XFS_BSTATE_DISPOSE
;
1005 atomic_inc(&bp
->b_hold
);
1007 spin_unlock(&pag
->pag_buf_lock
);
1010 * most of the time buffers will already be removed from the
1011 * LRU, so optimise that case by checking for the
1012 * XFS_BSTATE_DISPOSE flag indicating the last list the buffer
1013 * was on was the disposal list
1015 if (!(bp
->b_state
& XFS_BSTATE_DISPOSE
)) {
1016 list_lru_del(&bp
->b_target
->bt_lru
, &bp
->b_lru
);
1018 ASSERT(list_empty(&bp
->b_lru
));
1021 ASSERT(!(bp
->b_flags
& _XBF_DELWRI_Q
));
1022 rhashtable_remove_fast(&pag
->pag_buf_hash
, &bp
->b_rhash_head
,
1023 xfs_buf_hash_params
);
1024 spin_unlock(&pag
->pag_buf_lock
);
1030 spin_unlock(&bp
->b_lock
);
1038 * Lock a buffer object, if it is not already locked.
1040 * If we come across a stale, pinned, locked buffer, we know that we are
1041 * being asked to lock a buffer that has been reallocated. Because it is
1042 * pinned, we know that the log has not been pushed to disk and hence it
1043 * will still be locked. Rather than continuing to have trylock attempts
1044 * fail until someone else pushes the log, push it ourselves before
1045 * returning. This means that the xfsaild will not get stuck trying
1046 * to push on stale inode buffers.
1054 locked
= down_trylock(&bp
->b_sema
) == 0;
1056 trace_xfs_buf_trylock(bp
, _RET_IP_
);
1058 trace_xfs_buf_trylock_fail(bp
, _RET_IP_
);
1063 * Lock a buffer object.
1065 * If we come across a stale, pinned, locked buffer, we know that we
1066 * are being asked to lock a buffer that has been reallocated. Because
1067 * it is pinned, we know that the log has not been pushed to disk and
1068 * hence it will still be locked. Rather than sleeping until someone
1069 * else pushes the log, push it ourselves before trying to get the lock.
1075 trace_xfs_buf_lock(bp
, _RET_IP_
);
1077 if (atomic_read(&bp
->b_pin_count
) && (bp
->b_flags
& XBF_STALE
))
1078 xfs_log_force(bp
->b_mount
, 0);
1081 trace_xfs_buf_lock_done(bp
, _RET_IP_
);
1088 ASSERT(xfs_buf_islocked(bp
));
1091 trace_xfs_buf_unlock(bp
, _RET_IP_
);
1098 DECLARE_WAITQUEUE (wait
, current
);
1100 if (atomic_read(&bp
->b_pin_count
) == 0)
1103 add_wait_queue(&bp
->b_waiters
, &wait
);
1105 set_current_state(TASK_UNINTERRUPTIBLE
);
1106 if (atomic_read(&bp
->b_pin_count
) == 0)
1110 remove_wait_queue(&bp
->b_waiters
, &wait
);
1111 set_current_state(TASK_RUNNING
);
1115 xfs_buf_ioerror_alert_ratelimited(
1118 static unsigned long lasttime
;
1119 static struct xfs_buftarg
*lasttarg
;
1121 if (bp
->b_target
!= lasttarg
||
1122 time_after(jiffies
, (lasttime
+ 5*HZ
))) {
1124 xfs_buf_ioerror_alert(bp
, __this_address
);
1126 lasttarg
= bp
->b_target
;
1130 * Account for this latest trip around the retry handler, and decide if
1131 * we've failed enough times to constitute a permanent failure.
1134 xfs_buf_ioerror_permanent(
1136 struct xfs_error_cfg
*cfg
)
1138 struct xfs_mount
*mp
= bp
->b_mount
;
1140 if (cfg
->max_retries
!= XFS_ERR_RETRY_FOREVER
&&
1141 ++bp
->b_retries
> cfg
->max_retries
)
1143 if (cfg
->retry_timeout
!= XFS_ERR_RETRY_FOREVER
&&
1144 time_after(jiffies
, cfg
->retry_timeout
+ bp
->b_first_retry_time
))
1147 /* At unmount we may treat errors differently */
1148 if ((mp
->m_flags
& XFS_MOUNT_UNMOUNTING
) && mp
->m_fail_unmount
)
1155 * On a sync write or shutdown we just want to stale the buffer and let the
1156 * caller handle the error in bp->b_error appropriately.
1158 * If the write was asynchronous then no one will be looking for the error. If
1159 * this is the first failure of this type, clear the error state and write the
1160 * buffer out again. This means we always retry an async write failure at least
1161 * once, but we also need to set the buffer up to behave correctly now for
1162 * repeated failures.
1164 * If we get repeated async write failures, then we take action according to the
1165 * error configuration we have been set up to use.
1167 * Returns true if this function took care of error handling and the caller must
1168 * not touch the buffer again. Return false if the caller should proceed with
1169 * normal I/O completion handling.
1172 xfs_buf_ioend_handle_error(
1175 struct xfs_mount
*mp
= bp
->b_mount
;
1176 struct xfs_error_cfg
*cfg
;
1179 * If we've already decided to shutdown the filesystem because of I/O
1180 * errors, there's no point in giving this a retry.
1182 if (XFS_FORCED_SHUTDOWN(mp
))
1185 xfs_buf_ioerror_alert_ratelimited(bp
);
1188 * We're not going to bother about retrying this during recovery.
1191 if (bp
->b_flags
& _XBF_LOGRECOVERY
) {
1192 xfs_force_shutdown(mp
, SHUTDOWN_META_IO_ERROR
);
1197 * Synchronous writes will have callers process the error.
1199 if (!(bp
->b_flags
& XBF_ASYNC
))
1202 trace_xfs_buf_iodone_async(bp
, _RET_IP_
);
1204 cfg
= xfs_error_get_cfg(mp
, XFS_ERR_METADATA
, bp
->b_error
);
1205 if (bp
->b_last_error
!= bp
->b_error
||
1206 !(bp
->b_flags
& (XBF_STALE
| XBF_WRITE_FAIL
))) {
1207 bp
->b_last_error
= bp
->b_error
;
1208 if (cfg
->retry_timeout
!= XFS_ERR_RETRY_FOREVER
&&
1209 !bp
->b_first_retry_time
)
1210 bp
->b_first_retry_time
= jiffies
;
1215 * Permanent error - we need to trigger a shutdown if we haven't already
1216 * to indicate that inconsistency will result from this action.
1218 if (xfs_buf_ioerror_permanent(bp
, cfg
)) {
1219 xfs_force_shutdown(mp
, SHUTDOWN_META_IO_ERROR
);
1223 /* Still considered a transient error. Caller will schedule retries. */
1224 if (bp
->b_flags
& _XBF_INODES
)
1225 xfs_buf_inode_io_fail(bp
);
1226 else if (bp
->b_flags
& _XBF_DQUOTS
)
1227 xfs_buf_dquot_io_fail(bp
);
1229 ASSERT(list_empty(&bp
->b_li_list
));
1230 xfs_buf_ioerror(bp
, 0);
1235 xfs_buf_ioerror(bp
, 0);
1236 bp
->b_flags
|= (XBF_DONE
| XBF_WRITE_FAIL
);
1241 bp
->b_flags
|= XBF_DONE
;
1242 bp
->b_flags
&= ~XBF_WRITE
;
1243 trace_xfs_buf_error_relse(bp
, _RET_IP_
);
1251 trace_xfs_buf_iodone(bp
, _RET_IP_
);
1254 * Pull in IO completion errors now. We are guaranteed to be running
1255 * single threaded, so we don't need the lock to read b_io_error.
1257 if (!bp
->b_error
&& bp
->b_io_error
)
1258 xfs_buf_ioerror(bp
, bp
->b_io_error
);
1260 if (bp
->b_flags
& XBF_READ
) {
1261 if (!bp
->b_error
&& bp
->b_ops
)
1262 bp
->b_ops
->verify_read(bp
);
1264 bp
->b_flags
|= XBF_DONE
;
1267 bp
->b_flags
&= ~XBF_WRITE_FAIL
;
1268 bp
->b_flags
|= XBF_DONE
;
1271 if (unlikely(bp
->b_error
) && xfs_buf_ioend_handle_error(bp
))
1274 /* clear the retry state */
1275 bp
->b_last_error
= 0;
1277 bp
->b_first_retry_time
= 0;
1280 * Note that for things like remote attribute buffers, there may
1281 * not be a buffer log item here, so processing the buffer log
1282 * item must remain optional.
1285 xfs_buf_item_done(bp
);
1287 if (bp
->b_flags
& _XBF_INODES
)
1288 xfs_buf_inode_iodone(bp
);
1289 else if (bp
->b_flags
& _XBF_DQUOTS
)
1290 xfs_buf_dquot_iodone(bp
);
1294 bp
->b_flags
&= ~(XBF_READ
| XBF_WRITE
| XBF_READ_AHEAD
|
1297 if (bp
->b_flags
& XBF_ASYNC
)
1300 complete(&bp
->b_iowait
);
1305 struct work_struct
*work
)
1307 struct xfs_buf
*bp
=
1308 container_of(work
, struct xfs_buf
, b_ioend_work
);
1314 xfs_buf_ioend_async(
1317 INIT_WORK(&bp
->b_ioend_work
, xfs_buf_ioend_work
);
1318 queue_work(bp
->b_mount
->m_buf_workqueue
, &bp
->b_ioend_work
);
1325 xfs_failaddr_t failaddr
)
1327 ASSERT(error
<= 0 && error
>= -1000);
1328 bp
->b_error
= error
;
1329 trace_xfs_buf_ioerror(bp
, error
, failaddr
);
1333 xfs_buf_ioerror_alert(
1335 xfs_failaddr_t func
)
1337 xfs_buf_alert_ratelimited(bp
, "XFS: metadata IO error",
1338 "metadata I/O error in \"%pS\" at daddr 0x%llx len %d error %d",
1339 func
, (uint64_t)XFS_BUF_ADDR(bp
),
1340 bp
->b_length
, -bp
->b_error
);
1344 * To simulate an I/O failure, the buffer must be locked and held with at least
1345 * three references. The LRU reference is dropped by the stale call. The buf
1346 * item reference is dropped via ioend processing. The third reference is owned
1347 * by the caller and is dropped on I/O completion if the buffer is XBF_ASYNC.
1353 bp
->b_flags
&= ~XBF_DONE
;
1355 xfs_buf_ioerror(bp
, -EIO
);
1365 ASSERT(xfs_buf_islocked(bp
));
1367 bp
->b_flags
|= XBF_WRITE
;
1368 bp
->b_flags
&= ~(XBF_ASYNC
| XBF_READ
| _XBF_DELWRI_Q
|
1371 error
= xfs_buf_submit(bp
);
1373 xfs_force_shutdown(bp
->b_mount
, SHUTDOWN_META_IO_ERROR
);
1381 struct xfs_buf
*bp
= (struct xfs_buf
*)bio
->bi_private
;
1383 if (!bio
->bi_status
&&
1384 (bp
->b_flags
& XBF_WRITE
) && (bp
->b_flags
& XBF_ASYNC
) &&
1385 XFS_TEST_ERROR(false, bp
->b_mount
, XFS_ERRTAG_BUF_IOERROR
))
1386 bio
->bi_status
= BLK_STS_IOERR
;
1389 * don't overwrite existing errors - otherwise we can lose errors on
1390 * buffers that require multiple bios to complete.
1392 if (bio
->bi_status
) {
1393 int error
= blk_status_to_errno(bio
->bi_status
);
1395 cmpxchg(&bp
->b_io_error
, 0, error
);
1398 if (!bp
->b_error
&& xfs_buf_is_vmapped(bp
) && (bp
->b_flags
& XBF_READ
))
1399 invalidate_kernel_vmap_range(bp
->b_addr
, xfs_buf_vmap_len(bp
));
1401 if (atomic_dec_and_test(&bp
->b_io_remaining
) == 1)
1402 xfs_buf_ioend_async(bp
);
1407 xfs_buf_ioapply_map(
1415 unsigned int total_nr_pages
= bp
->b_page_count
;
1418 sector_t sector
= bp
->b_maps
[map
].bm_bn
;
1422 /* skip the pages in the buffer before the start offset */
1424 offset
= *buf_offset
;
1425 while (offset
>= PAGE_SIZE
) {
1427 offset
-= PAGE_SIZE
;
1431 * Limit the IO size to the length of the current vector, and update the
1432 * remaining IO count for the next time around.
1434 size
= min_t(int, BBTOB(bp
->b_maps
[map
].bm_len
), *count
);
1436 *buf_offset
+= size
;
1439 atomic_inc(&bp
->b_io_remaining
);
1440 nr_pages
= bio_max_segs(total_nr_pages
);
1442 bio
= bio_alloc(GFP_NOIO
, nr_pages
);
1443 bio_set_dev(bio
, bp
->b_target
->bt_bdev
);
1444 bio
->bi_iter
.bi_sector
= sector
;
1445 bio
->bi_end_io
= xfs_buf_bio_end_io
;
1446 bio
->bi_private
= bp
;
1449 for (; size
&& nr_pages
; nr_pages
--, page_index
++) {
1450 int rbytes
, nbytes
= PAGE_SIZE
- offset
;
1455 rbytes
= bio_add_page(bio
, bp
->b_pages
[page_index
], nbytes
,
1457 if (rbytes
< nbytes
)
1461 sector
+= BTOBB(nbytes
);
1466 if (likely(bio
->bi_iter
.bi_size
)) {
1467 if (xfs_buf_is_vmapped(bp
)) {
1468 flush_kernel_vmap_range(bp
->b_addr
,
1469 xfs_buf_vmap_len(bp
));
1476 * This is guaranteed not to be the last io reference count
1477 * because the caller (xfs_buf_submit) holds a count itself.
1479 atomic_dec(&bp
->b_io_remaining
);
1480 xfs_buf_ioerror(bp
, -EIO
);
1490 struct blk_plug plug
;
1497 * Make sure we capture only current IO errors rather than stale errors
1498 * left over from previous use of the buffer (e.g. failed readahead).
1502 if (bp
->b_flags
& XBF_WRITE
) {
1506 * Run the write verifier callback function if it exists. If
1507 * this function fails it will mark the buffer with an error and
1508 * the IO should not be dispatched.
1511 bp
->b_ops
->verify_write(bp
);
1513 xfs_force_shutdown(bp
->b_mount
,
1514 SHUTDOWN_CORRUPT_INCORE
);
1517 } else if (bp
->b_bn
!= XFS_BUF_DADDR_NULL
) {
1518 struct xfs_mount
*mp
= bp
->b_mount
;
1521 * non-crc filesystems don't attach verifiers during
1522 * log recovery, so don't warn for such filesystems.
1524 if (xfs_sb_version_hascrc(&mp
->m_sb
)) {
1526 "%s: no buf ops on daddr 0x%llx len %d",
1527 __func__
, bp
->b_bn
, bp
->b_length
);
1528 xfs_hex_dump(bp
->b_addr
,
1529 XFS_CORRUPTION_DUMP_LEN
);
1535 if (bp
->b_flags
& XBF_READ_AHEAD
)
1539 /* we only use the buffer cache for meta-data */
1543 * Walk all the vectors issuing IO on them. Set up the initial offset
1544 * into the buffer and the desired IO size before we start -
1545 * _xfs_buf_ioapply_vec() will modify them appropriately for each
1548 offset
= bp
->b_offset
;
1549 size
= BBTOB(bp
->b_length
);
1550 blk_start_plug(&plug
);
1551 for (i
= 0; i
< bp
->b_map_count
; i
++) {
1552 xfs_buf_ioapply_map(bp
, i
, &offset
, &size
, op
);
1556 break; /* all done */
1558 blk_finish_plug(&plug
);
1562 * Wait for I/O completion of a sync buffer and return the I/O error code.
1568 ASSERT(!(bp
->b_flags
& XBF_ASYNC
));
1570 trace_xfs_buf_iowait(bp
, _RET_IP_
);
1571 wait_for_completion(&bp
->b_iowait
);
1572 trace_xfs_buf_iowait_done(bp
, _RET_IP_
);
1578 * Buffer I/O submission path, read or write. Asynchronous submission transfers
1579 * the buffer lock ownership and the current reference to the IO. It is not
1580 * safe to reference the buffer after a call to this function unless the caller
1581 * holds an additional reference itself.
1590 trace_xfs_buf_submit(bp
, _RET_IP_
);
1592 ASSERT(!(bp
->b_flags
& _XBF_DELWRI_Q
));
1594 /* on shutdown we stale and complete the buffer immediately */
1595 if (XFS_FORCED_SHUTDOWN(bp
->b_mount
)) {
1596 xfs_buf_ioend_fail(bp
);
1601 * Grab a reference so the buffer does not go away underneath us. For
1602 * async buffers, I/O completion drops the callers reference, which
1603 * could occur before submission returns.
1607 if (bp
->b_flags
& XBF_WRITE
)
1608 xfs_buf_wait_unpin(bp
);
1610 /* clear the internal error state to avoid spurious errors */
1614 * Set the count to 1 initially, this will stop an I/O completion
1615 * callout which happens before we have started all the I/O from calling
1616 * xfs_buf_ioend too early.
1618 atomic_set(&bp
->b_io_remaining
, 1);
1619 if (bp
->b_flags
& XBF_ASYNC
)
1620 xfs_buf_ioacct_inc(bp
);
1621 _xfs_buf_ioapply(bp
);
1624 * If _xfs_buf_ioapply failed, we can get back here with only the IO
1625 * reference we took above. If we drop it to zero, run completion so
1626 * that we don't return to the caller with completion still pending.
1628 if (atomic_dec_and_test(&bp
->b_io_remaining
) == 1) {
1629 if (bp
->b_error
|| !(bp
->b_flags
& XBF_ASYNC
))
1632 xfs_buf_ioend_async(bp
);
1636 error
= xfs_buf_iowait(bp
);
1639 * Release the hold that keeps the buffer referenced for the entire
1640 * I/O. Note that if the buffer is async, it is not safe to reference
1641 * after this release.
1655 return bp
->b_addr
+ offset
;
1657 page
= bp
->b_pages
[offset
>> PAGE_SHIFT
];
1658 return page_address(page
) + (offset
& (PAGE_SIZE
-1));
1669 bend
= boff
+ bsize
;
1670 while (boff
< bend
) {
1672 int page_index
, page_offset
, csize
;
1674 page_index
= (boff
+ bp
->b_offset
) >> PAGE_SHIFT
;
1675 page_offset
= (boff
+ bp
->b_offset
) & ~PAGE_MASK
;
1676 page
= bp
->b_pages
[page_index
];
1677 csize
= min_t(size_t, PAGE_SIZE
- page_offset
,
1678 BBTOB(bp
->b_length
) - boff
);
1680 ASSERT((csize
+ page_offset
) <= PAGE_SIZE
);
1682 memset(page_address(page
) + page_offset
, 0, csize
);
1689 * Log a message about and stale a buffer that a caller has decided is corrupt.
1691 * This function should be called for the kinds of metadata corruption that
1692 * cannot be detect from a verifier, such as incorrect inter-block relationship
1693 * data. Do /not/ call this function from a verifier function.
1695 * The buffer must be XBF_DONE prior to the call. Afterwards, the buffer will
1696 * be marked stale, but b_error will not be set. The caller is responsible for
1697 * releasing the buffer or fixing it.
1700 __xfs_buf_mark_corrupt(
1704 ASSERT(bp
->b_flags
& XBF_DONE
);
1706 xfs_buf_corruption_error(bp
, fa
);
1711 * Handling of buffer targets (buftargs).
1715 * Wait for any bufs with callbacks that have been submitted but have not yet
1716 * returned. These buffers will have an elevated hold count, so wait on those
1717 * while freeing all the buffers only held by the LRU.
1719 static enum lru_status
1720 xfs_buftarg_drain_rele(
1721 struct list_head
*item
,
1722 struct list_lru_one
*lru
,
1723 spinlock_t
*lru_lock
,
1727 struct xfs_buf
*bp
= container_of(item
, struct xfs_buf
, b_lru
);
1728 struct list_head
*dispose
= arg
;
1730 if (atomic_read(&bp
->b_hold
) > 1) {
1731 /* need to wait, so skip it this pass */
1732 trace_xfs_buf_drain_buftarg(bp
, _RET_IP_
);
1735 if (!spin_trylock(&bp
->b_lock
))
1739 * clear the LRU reference count so the buffer doesn't get
1740 * ignored in xfs_buf_rele().
1742 atomic_set(&bp
->b_lru_ref
, 0);
1743 bp
->b_state
|= XFS_BSTATE_DISPOSE
;
1744 list_lru_isolate_move(lru
, item
, dispose
);
1745 spin_unlock(&bp
->b_lock
);
1750 * Wait for outstanding I/O on the buftarg to complete.
1754 struct xfs_buftarg
*btp
)
1757 * First wait on the buftarg I/O count for all in-flight buffers to be
1758 * released. This is critical as new buffers do not make the LRU until
1759 * they are released.
1761 * Next, flush the buffer workqueue to ensure all completion processing
1762 * has finished. Just waiting on buffer locks is not sufficient for
1763 * async IO as the reference count held over IO is not released until
1764 * after the buffer lock is dropped. Hence we need to ensure here that
1765 * all reference counts have been dropped before we start walking the
1768 while (percpu_counter_sum(&btp
->bt_io_count
))
1770 flush_workqueue(btp
->bt_mount
->m_buf_workqueue
);
1775 struct xfs_buftarg
*btp
)
1779 bool write_fail
= false;
1781 xfs_buftarg_wait(btp
);
1783 /* loop until there is nothing left on the lru list. */
1784 while (list_lru_count(&btp
->bt_lru
)) {
1785 list_lru_walk(&btp
->bt_lru
, xfs_buftarg_drain_rele
,
1786 &dispose
, LONG_MAX
);
1788 while (!list_empty(&dispose
)) {
1790 bp
= list_first_entry(&dispose
, struct xfs_buf
, b_lru
);
1791 list_del_init(&bp
->b_lru
);
1792 if (bp
->b_flags
& XBF_WRITE_FAIL
) {
1794 xfs_buf_alert_ratelimited(bp
,
1795 "XFS: Corruption Alert",
1796 "Corruption Alert: Buffer at daddr 0x%llx had permanent write failures!",
1797 (long long)bp
->b_bn
);
1806 * If one or more failed buffers were freed, that means dirty metadata
1807 * was thrown away. This should only ever happen after I/O completion
1808 * handling has elevated I/O error(s) to permanent failures and shuts
1812 ASSERT(XFS_FORCED_SHUTDOWN(btp
->bt_mount
));
1813 xfs_alert(btp
->bt_mount
,
1814 "Please run xfs_repair to determine the extent of the problem.");
1818 static enum lru_status
1819 xfs_buftarg_isolate(
1820 struct list_head
*item
,
1821 struct list_lru_one
*lru
,
1822 spinlock_t
*lru_lock
,
1825 struct xfs_buf
*bp
= container_of(item
, struct xfs_buf
, b_lru
);
1826 struct list_head
*dispose
= arg
;
1829 * we are inverting the lru lock/bp->b_lock here, so use a trylock.
1830 * If we fail to get the lock, just skip it.
1832 if (!spin_trylock(&bp
->b_lock
))
1835 * Decrement the b_lru_ref count unless the value is already
1836 * zero. If the value is already zero, we need to reclaim the
1837 * buffer, otherwise it gets another trip through the LRU.
1839 if (atomic_add_unless(&bp
->b_lru_ref
, -1, 0)) {
1840 spin_unlock(&bp
->b_lock
);
1844 bp
->b_state
|= XFS_BSTATE_DISPOSE
;
1845 list_lru_isolate_move(lru
, item
, dispose
);
1846 spin_unlock(&bp
->b_lock
);
1850 static unsigned long
1851 xfs_buftarg_shrink_scan(
1852 struct shrinker
*shrink
,
1853 struct shrink_control
*sc
)
1855 struct xfs_buftarg
*btp
= container_of(shrink
,
1856 struct xfs_buftarg
, bt_shrinker
);
1858 unsigned long freed
;
1860 freed
= list_lru_shrink_walk(&btp
->bt_lru
, sc
,
1861 xfs_buftarg_isolate
, &dispose
);
1863 while (!list_empty(&dispose
)) {
1865 bp
= list_first_entry(&dispose
, struct xfs_buf
, b_lru
);
1866 list_del_init(&bp
->b_lru
);
1873 static unsigned long
1874 xfs_buftarg_shrink_count(
1875 struct shrinker
*shrink
,
1876 struct shrink_control
*sc
)
1878 struct xfs_buftarg
*btp
= container_of(shrink
,
1879 struct xfs_buftarg
, bt_shrinker
);
1880 return list_lru_shrink_count(&btp
->bt_lru
, sc
);
1885 struct xfs_buftarg
*btp
)
1887 unregister_shrinker(&btp
->bt_shrinker
);
1888 ASSERT(percpu_counter_sum(&btp
->bt_io_count
) == 0);
1889 percpu_counter_destroy(&btp
->bt_io_count
);
1890 list_lru_destroy(&btp
->bt_lru
);
1892 xfs_blkdev_issue_flush(btp
);
1898 xfs_setsize_buftarg(
1900 unsigned int sectorsize
)
1902 /* Set up metadata sector size info */
1903 btp
->bt_meta_sectorsize
= sectorsize
;
1904 btp
->bt_meta_sectormask
= sectorsize
- 1;
1906 if (set_blocksize(btp
->bt_bdev
, sectorsize
)) {
1907 xfs_warn(btp
->bt_mount
,
1908 "Cannot set_blocksize to %u on device %pg",
1909 sectorsize
, btp
->bt_bdev
);
1913 /* Set up device logical sector size mask */
1914 btp
->bt_logical_sectorsize
= bdev_logical_block_size(btp
->bt_bdev
);
1915 btp
->bt_logical_sectormask
= bdev_logical_block_size(btp
->bt_bdev
) - 1;
1921 * When allocating the initial buffer target we have not yet
1922 * read in the superblock, so don't know what sized sectors
1923 * are being used at this early stage. Play safe.
1926 xfs_setsize_buftarg_early(
1928 struct block_device
*bdev
)
1930 return xfs_setsize_buftarg(btp
, bdev_logical_block_size(bdev
));
1935 struct xfs_mount
*mp
,
1936 struct block_device
*bdev
,
1937 struct dax_device
*dax_dev
)
1941 btp
= kmem_zalloc(sizeof(*btp
), KM_NOFS
);
1944 btp
->bt_dev
= bdev
->bd_dev
;
1945 btp
->bt_bdev
= bdev
;
1946 btp
->bt_daxdev
= dax_dev
;
1949 * Buffer IO error rate limiting. Limit it to no more than 10 messages
1950 * per 30 seconds so as to not spam logs too much on repeated errors.
1952 ratelimit_state_init(&btp
->bt_ioerror_rl
, 30 * HZ
,
1953 DEFAULT_RATELIMIT_BURST
);
1955 if (xfs_setsize_buftarg_early(btp
, bdev
))
1958 if (list_lru_init(&btp
->bt_lru
))
1961 if (percpu_counter_init(&btp
->bt_io_count
, 0, GFP_KERNEL
))
1964 btp
->bt_shrinker
.count_objects
= xfs_buftarg_shrink_count
;
1965 btp
->bt_shrinker
.scan_objects
= xfs_buftarg_shrink_scan
;
1966 btp
->bt_shrinker
.seeks
= DEFAULT_SEEKS
;
1967 btp
->bt_shrinker
.flags
= SHRINKER_NUMA_AWARE
;
1968 if (register_shrinker(&btp
->bt_shrinker
))
1973 percpu_counter_destroy(&btp
->bt_io_count
);
1975 list_lru_destroy(&btp
->bt_lru
);
1982 * Cancel a delayed write list.
1984 * Remove each buffer from the list, clear the delwri queue flag and drop the
1985 * associated buffer reference.
1988 xfs_buf_delwri_cancel(
1989 struct list_head
*list
)
1993 while (!list_empty(list
)) {
1994 bp
= list_first_entry(list
, struct xfs_buf
, b_list
);
1997 bp
->b_flags
&= ~_XBF_DELWRI_Q
;
1998 list_del_init(&bp
->b_list
);
2004 * Add a buffer to the delayed write list.
2006 * This queues a buffer for writeout if it hasn't already been. Note that
2007 * neither this routine nor the buffer list submission functions perform
2008 * any internal synchronization. It is expected that the lists are thread-local
2011 * Returns true if we queued up the buffer, or false if it already had
2012 * been on the buffer list.
2015 xfs_buf_delwri_queue(
2017 struct list_head
*list
)
2019 ASSERT(xfs_buf_islocked(bp
));
2020 ASSERT(!(bp
->b_flags
& XBF_READ
));
2023 * If the buffer is already marked delwri it already is queued up
2024 * by someone else for imediate writeout. Just ignore it in that
2027 if (bp
->b_flags
& _XBF_DELWRI_Q
) {
2028 trace_xfs_buf_delwri_queued(bp
, _RET_IP_
);
2032 trace_xfs_buf_delwri_queue(bp
, _RET_IP_
);
2035 * If a buffer gets written out synchronously or marked stale while it
2036 * is on a delwri list we lazily remove it. To do this, the other party
2037 * clears the _XBF_DELWRI_Q flag but otherwise leaves the buffer alone.
2038 * It remains referenced and on the list. In a rare corner case it
2039 * might get readded to a delwri list after the synchronous writeout, in
2040 * which case we need just need to re-add the flag here.
2042 bp
->b_flags
|= _XBF_DELWRI_Q
;
2043 if (list_empty(&bp
->b_list
)) {
2044 atomic_inc(&bp
->b_hold
);
2045 list_add_tail(&bp
->b_list
, list
);
2052 * Compare function is more complex than it needs to be because
2053 * the return value is only 32 bits and we are doing comparisons
2059 const struct list_head
*a
,
2060 const struct list_head
*b
)
2062 struct xfs_buf
*ap
= container_of(a
, struct xfs_buf
, b_list
);
2063 struct xfs_buf
*bp
= container_of(b
, struct xfs_buf
, b_list
);
2066 diff
= ap
->b_maps
[0].bm_bn
- bp
->b_maps
[0].bm_bn
;
2075 * Submit buffers for write. If wait_list is specified, the buffers are
2076 * submitted using sync I/O and placed on the wait list such that the caller can
2077 * iowait each buffer. Otherwise async I/O is used and the buffers are released
2078 * at I/O completion time. In either case, buffers remain locked until I/O
2079 * completes and the buffer is released from the queue.
2082 xfs_buf_delwri_submit_buffers(
2083 struct list_head
*buffer_list
,
2084 struct list_head
*wait_list
)
2086 struct xfs_buf
*bp
, *n
;
2088 struct blk_plug plug
;
2090 list_sort(NULL
, buffer_list
, xfs_buf_cmp
);
2092 blk_start_plug(&plug
);
2093 list_for_each_entry_safe(bp
, n
, buffer_list
, b_list
) {
2095 if (xfs_buf_ispinned(bp
)) {
2099 if (!xfs_buf_trylock(bp
))
2106 * Someone else might have written the buffer synchronously or
2107 * marked it stale in the meantime. In that case only the
2108 * _XBF_DELWRI_Q flag got cleared, and we have to drop the
2109 * reference and remove it from the list here.
2111 if (!(bp
->b_flags
& _XBF_DELWRI_Q
)) {
2112 list_del_init(&bp
->b_list
);
2117 trace_xfs_buf_delwri_split(bp
, _RET_IP_
);
2120 * If we have a wait list, each buffer (and associated delwri
2121 * queue reference) transfers to it and is submitted
2122 * synchronously. Otherwise, drop the buffer from the delwri
2123 * queue and submit async.
2125 bp
->b_flags
&= ~_XBF_DELWRI_Q
;
2126 bp
->b_flags
|= XBF_WRITE
;
2128 bp
->b_flags
&= ~XBF_ASYNC
;
2129 list_move_tail(&bp
->b_list
, wait_list
);
2131 bp
->b_flags
|= XBF_ASYNC
;
2132 list_del_init(&bp
->b_list
);
2134 __xfs_buf_submit(bp
, false);
2136 blk_finish_plug(&plug
);
2142 * Write out a buffer list asynchronously.
2144 * This will take the @buffer_list, write all non-locked and non-pinned buffers
2145 * out and not wait for I/O completion on any of the buffers. This interface
2146 * is only safely useable for callers that can track I/O completion by higher
2147 * level means, e.g. AIL pushing as the @buffer_list is consumed in this
2150 * Note: this function will skip buffers it would block on, and in doing so
2151 * leaves them on @buffer_list so they can be retried on a later pass. As such,
2152 * it is up to the caller to ensure that the buffer list is fully submitted or
2153 * cancelled appropriately when they are finished with the list. Failure to
2154 * cancel or resubmit the list until it is empty will result in leaked buffers
2158 xfs_buf_delwri_submit_nowait(
2159 struct list_head
*buffer_list
)
2161 return xfs_buf_delwri_submit_buffers(buffer_list
, NULL
);
2165 * Write out a buffer list synchronously.
2167 * This will take the @buffer_list, write all buffers out and wait for I/O
2168 * completion on all of the buffers. @buffer_list is consumed by the function,
2169 * so callers must have some other way of tracking buffers if they require such
2173 xfs_buf_delwri_submit(
2174 struct list_head
*buffer_list
)
2176 LIST_HEAD (wait_list
);
2177 int error
= 0, error2
;
2180 xfs_buf_delwri_submit_buffers(buffer_list
, &wait_list
);
2182 /* Wait for IO to complete. */
2183 while (!list_empty(&wait_list
)) {
2184 bp
= list_first_entry(&wait_list
, struct xfs_buf
, b_list
);
2186 list_del_init(&bp
->b_list
);
2189 * Wait on the locked buffer, check for errors and unlock and
2190 * release the delwri queue reference.
2192 error2
= xfs_buf_iowait(bp
);
2202 * Push a single buffer on a delwri queue.
2204 * The purpose of this function is to submit a single buffer of a delwri queue
2205 * and return with the buffer still on the original queue. The waiting delwri
2206 * buffer submission infrastructure guarantees transfer of the delwri queue
2207 * buffer reference to a temporary wait list. We reuse this infrastructure to
2208 * transfer the buffer back to the original queue.
2210 * Note the buffer transitions from the queued state, to the submitted and wait
2211 * listed state and back to the queued state during this call. The buffer
2212 * locking and queue management logic between _delwri_pushbuf() and
2213 * _delwri_queue() guarantee that the buffer cannot be queued to another list
2217 xfs_buf_delwri_pushbuf(
2219 struct list_head
*buffer_list
)
2221 LIST_HEAD (submit_list
);
2224 ASSERT(bp
->b_flags
& _XBF_DELWRI_Q
);
2226 trace_xfs_buf_delwri_pushbuf(bp
, _RET_IP_
);
2229 * Isolate the buffer to a new local list so we can submit it for I/O
2230 * independently from the rest of the original list.
2233 list_move(&bp
->b_list
, &submit_list
);
2237 * Delwri submission clears the DELWRI_Q buffer flag and returns with
2238 * the buffer on the wait list with the original reference. Rather than
2239 * bounce the buffer from a local wait list back to the original list
2240 * after I/O completion, reuse the original list as the wait list.
2242 xfs_buf_delwri_submit_buffers(&submit_list
, buffer_list
);
2245 * The buffer is now locked, under I/O and wait listed on the original
2246 * delwri queue. Wait for I/O completion, restore the DELWRI_Q flag and
2247 * return with the buffer unlocked and on the original queue.
2249 error
= xfs_buf_iowait(bp
);
2250 bp
->b_flags
|= _XBF_DELWRI_Q
;
2259 xfs_buf_zone
= kmem_cache_create("xfs_buf", sizeof(struct xfs_buf
), 0,
2260 SLAB_HWCACHE_ALIGN
|
2261 SLAB_RECLAIM_ACCOUNT
|
2274 xfs_buf_terminate(void)
2276 kmem_cache_destroy(xfs_buf_zone
);
2279 void xfs_buf_set_ref(struct xfs_buf
*bp
, int lru_ref
)
2282 * Set the lru reference count to 0 based on the error injection tag.
2283 * This allows userspace to disrupt buffer caching for debug/testing
2286 if (XFS_TEST_ERROR(false, bp
->b_mount
, XFS_ERRTAG_BUF_LRU_REF
))
2289 atomic_set(&bp
->b_lru_ref
, lru_ref
);
2293 * Verify an on-disk magic value against the magic value specified in the
2294 * verifier structure. The verifier magic is in disk byte order so the caller is
2295 * expected to pass the value directly from disk.
2302 struct xfs_mount
*mp
= bp
->b_mount
;
2305 idx
= xfs_sb_version_hascrc(&mp
->m_sb
);
2306 if (WARN_ON(!bp
->b_ops
|| !bp
->b_ops
->magic
[idx
]))
2308 return dmagic
== bp
->b_ops
->magic
[idx
];
2311 * Verify an on-disk magic value against the magic value specified in the
2312 * verifier structure. The verifier magic is in disk byte order so the caller is
2313 * expected to pass the value directly from disk.
2320 struct xfs_mount
*mp
= bp
->b_mount
;
2323 idx
= xfs_sb_version_hascrc(&mp
->m_sb
);
2324 if (WARN_ON(!bp
->b_ops
|| !bp
->b_ops
->magic16
[idx
]))
2326 return dmagic
== bp
->b_ops
->magic16
[idx
];