]> git.proxmox.com Git - mirror_ubuntu-focal-kernel.git/blob - fs/xfs/xfs_buf.c
1264ac63e4e55a74d6af2e79bade5db60d66c8e6
[mirror_ubuntu-focal-kernel.git] / fs / xfs / xfs_buf.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
4 * All Rights Reserved.
5 */
6 #include "xfs.h"
7 #include <linux/backing-dev.h>
8
9 #include "xfs_shared.h"
10 #include "xfs_format.h"
11 #include "xfs_log_format.h"
12 #include "xfs_trans_resv.h"
13 #include "xfs_sb.h"
14 #include "xfs_mount.h"
15 #include "xfs_trace.h"
16 #include "xfs_log.h"
17 #include "xfs_errortag.h"
18 #include "xfs_error.h"
19
20 static kmem_zone_t *xfs_buf_zone;
21
22 #define xb_to_gfp(flags) \
23 ((((flags) & XBF_READ_AHEAD) ? __GFP_NORETRY : GFP_NOFS) | __GFP_NOWARN)
24
25 /*
26 * Locking orders
27 *
28 * xfs_buf_ioacct_inc:
29 * xfs_buf_ioacct_dec:
30 * b_sema (caller holds)
31 * b_lock
32 *
33 * xfs_buf_stale:
34 * b_sema (caller holds)
35 * b_lock
36 * lru_lock
37 *
38 * xfs_buf_rele:
39 * b_lock
40 * pag_buf_lock
41 * lru_lock
42 *
43 * xfs_buftarg_wait_rele
44 * lru_lock
45 * b_lock (trylock due to inversion)
46 *
47 * xfs_buftarg_isolate
48 * lru_lock
49 * b_lock (trylock due to inversion)
50 */
51
52 static inline int
53 xfs_buf_is_vmapped(
54 struct xfs_buf *bp)
55 {
56 /*
57 * Return true if the buffer is vmapped.
58 *
59 * b_addr is null if the buffer is not mapped, but the code is clever
60 * enough to know it doesn't have to map a single page, so the check has
61 * to be both for b_addr and bp->b_page_count > 1.
62 */
63 return bp->b_addr && bp->b_page_count > 1;
64 }
65
66 static inline int
67 xfs_buf_vmap_len(
68 struct xfs_buf *bp)
69 {
70 return (bp->b_page_count * PAGE_SIZE) - bp->b_offset;
71 }
72
73 /*
74 * Bump the I/O in flight count on the buftarg if we haven't yet done so for
75 * this buffer. The count is incremented once per buffer (per hold cycle)
76 * because the corresponding decrement is deferred to buffer release. Buffers
77 * can undergo I/O multiple times in a hold-release cycle and per buffer I/O
78 * tracking adds unnecessary overhead. This is used for sychronization purposes
79 * with unmount (see xfs_wait_buftarg()), so all we really need is a count of
80 * in-flight buffers.
81 *
82 * Buffers that are never released (e.g., superblock, iclog buffers) must set
83 * the XBF_NO_IOACCT flag before I/O submission. Otherwise, the buftarg count
84 * never reaches zero and unmount hangs indefinitely.
85 */
86 static inline void
87 xfs_buf_ioacct_inc(
88 struct xfs_buf *bp)
89 {
90 if (bp->b_flags & XBF_NO_IOACCT)
91 return;
92
93 ASSERT(bp->b_flags & XBF_ASYNC);
94 spin_lock(&bp->b_lock);
95 if (!(bp->b_state & XFS_BSTATE_IN_FLIGHT)) {
96 bp->b_state |= XFS_BSTATE_IN_FLIGHT;
97 percpu_counter_inc(&bp->b_target->bt_io_count);
98 }
99 spin_unlock(&bp->b_lock);
100 }
101
102 /*
103 * Clear the in-flight state on a buffer about to be released to the LRU or
104 * freed and unaccount from the buftarg.
105 */
106 static inline void
107 __xfs_buf_ioacct_dec(
108 struct xfs_buf *bp)
109 {
110 lockdep_assert_held(&bp->b_lock);
111
112 if (bp->b_state & XFS_BSTATE_IN_FLIGHT) {
113 bp->b_state &= ~XFS_BSTATE_IN_FLIGHT;
114 percpu_counter_dec(&bp->b_target->bt_io_count);
115 }
116 }
117
118 static inline void
119 xfs_buf_ioacct_dec(
120 struct xfs_buf *bp)
121 {
122 spin_lock(&bp->b_lock);
123 __xfs_buf_ioacct_dec(bp);
124 spin_unlock(&bp->b_lock);
125 }
126
127 /*
128 * When we mark a buffer stale, we remove the buffer from the LRU and clear the
129 * b_lru_ref count so that the buffer is freed immediately when the buffer
130 * reference count falls to zero. If the buffer is already on the LRU, we need
131 * to remove the reference that LRU holds on the buffer.
132 *
133 * This prevents build-up of stale buffers on the LRU.
134 */
135 void
136 xfs_buf_stale(
137 struct xfs_buf *bp)
138 {
139 ASSERT(xfs_buf_islocked(bp));
140
141 bp->b_flags |= XBF_STALE;
142
143 /*
144 * Clear the delwri status so that a delwri queue walker will not
145 * flush this buffer to disk now that it is stale. The delwri queue has
146 * a reference to the buffer, so this is safe to do.
147 */
148 bp->b_flags &= ~_XBF_DELWRI_Q;
149
150 /*
151 * Once the buffer is marked stale and unlocked, a subsequent lookup
152 * could reset b_flags. There is no guarantee that the buffer is
153 * unaccounted (released to LRU) before that occurs. Drop in-flight
154 * status now to preserve accounting consistency.
155 */
156 spin_lock(&bp->b_lock);
157 __xfs_buf_ioacct_dec(bp);
158
159 atomic_set(&bp->b_lru_ref, 0);
160 if (!(bp->b_state & XFS_BSTATE_DISPOSE) &&
161 (list_lru_del(&bp->b_target->bt_lru, &bp->b_lru)))
162 atomic_dec(&bp->b_hold);
163
164 ASSERT(atomic_read(&bp->b_hold) >= 1);
165 spin_unlock(&bp->b_lock);
166 }
167
168 static int
169 xfs_buf_get_maps(
170 struct xfs_buf *bp,
171 int map_count)
172 {
173 ASSERT(bp->b_maps == NULL);
174 bp->b_map_count = map_count;
175
176 if (map_count == 1) {
177 bp->b_maps = &bp->__b_map;
178 return 0;
179 }
180
181 bp->b_maps = kmem_zalloc(map_count * sizeof(struct xfs_buf_map),
182 KM_NOFS);
183 if (!bp->b_maps)
184 return -ENOMEM;
185 return 0;
186 }
187
188 /*
189 * Frees b_pages if it was allocated.
190 */
191 static void
192 xfs_buf_free_maps(
193 struct xfs_buf *bp)
194 {
195 if (bp->b_maps != &bp->__b_map) {
196 kmem_free(bp->b_maps);
197 bp->b_maps = NULL;
198 }
199 }
200
201 static struct xfs_buf *
202 _xfs_buf_alloc(
203 struct xfs_buftarg *target,
204 struct xfs_buf_map *map,
205 int nmaps,
206 xfs_buf_flags_t flags)
207 {
208 struct xfs_buf *bp;
209 int error;
210 int i;
211
212 bp = kmem_zone_zalloc(xfs_buf_zone, KM_NOFS);
213 if (unlikely(!bp))
214 return NULL;
215
216 /*
217 * We don't want certain flags to appear in b_flags unless they are
218 * specifically set by later operations on the buffer.
219 */
220 flags &= ~(XBF_UNMAPPED | XBF_TRYLOCK | XBF_ASYNC | XBF_READ_AHEAD);
221
222 atomic_set(&bp->b_hold, 1);
223 atomic_set(&bp->b_lru_ref, 1);
224 init_completion(&bp->b_iowait);
225 INIT_LIST_HEAD(&bp->b_lru);
226 INIT_LIST_HEAD(&bp->b_list);
227 INIT_LIST_HEAD(&bp->b_li_list);
228 sema_init(&bp->b_sema, 0); /* held, no waiters */
229 spin_lock_init(&bp->b_lock);
230 bp->b_target = target;
231 bp->b_mount = target->bt_mount;
232 bp->b_flags = flags;
233
234 /*
235 * Set length and io_length to the same value initially.
236 * I/O routines should use io_length, which will be the same in
237 * most cases but may be reset (e.g. XFS recovery).
238 */
239 error = xfs_buf_get_maps(bp, nmaps);
240 if (error) {
241 kmem_zone_free(xfs_buf_zone, bp);
242 return NULL;
243 }
244
245 bp->b_bn = map[0].bm_bn;
246 bp->b_length = 0;
247 for (i = 0; i < nmaps; i++) {
248 bp->b_maps[i].bm_bn = map[i].bm_bn;
249 bp->b_maps[i].bm_len = map[i].bm_len;
250 bp->b_length += map[i].bm_len;
251 }
252
253 atomic_set(&bp->b_pin_count, 0);
254 init_waitqueue_head(&bp->b_waiters);
255
256 XFS_STATS_INC(bp->b_mount, xb_create);
257 trace_xfs_buf_init(bp, _RET_IP_);
258
259 return bp;
260 }
261
262 /*
263 * Allocate a page array capable of holding a specified number
264 * of pages, and point the page buf at it.
265 */
266 STATIC int
267 _xfs_buf_get_pages(
268 xfs_buf_t *bp,
269 int page_count)
270 {
271 /* Make sure that we have a page list */
272 if (bp->b_pages == NULL) {
273 bp->b_page_count = page_count;
274 if (page_count <= XB_PAGES) {
275 bp->b_pages = bp->b_page_array;
276 } else {
277 bp->b_pages = kmem_alloc(sizeof(struct page *) *
278 page_count, KM_NOFS);
279 if (bp->b_pages == NULL)
280 return -ENOMEM;
281 }
282 memset(bp->b_pages, 0, sizeof(struct page *) * page_count);
283 }
284 return 0;
285 }
286
287 /*
288 * Frees b_pages if it was allocated.
289 */
290 STATIC void
291 _xfs_buf_free_pages(
292 xfs_buf_t *bp)
293 {
294 if (bp->b_pages != bp->b_page_array) {
295 kmem_free(bp->b_pages);
296 bp->b_pages = NULL;
297 }
298 }
299
300 /*
301 * Releases the specified buffer.
302 *
303 * The modification state of any associated pages is left unchanged.
304 * The buffer must not be on any hash - use xfs_buf_rele instead for
305 * hashed and refcounted buffers
306 */
307 void
308 xfs_buf_free(
309 xfs_buf_t *bp)
310 {
311 trace_xfs_buf_free(bp, _RET_IP_);
312
313 ASSERT(list_empty(&bp->b_lru));
314
315 if (bp->b_flags & _XBF_PAGES) {
316 uint i;
317
318 if (xfs_buf_is_vmapped(bp))
319 vm_unmap_ram(bp->b_addr - bp->b_offset,
320 bp->b_page_count);
321
322 for (i = 0; i < bp->b_page_count; i++) {
323 struct page *page = bp->b_pages[i];
324
325 __free_page(page);
326 }
327 } else if (bp->b_flags & _XBF_KMEM)
328 kmem_free(bp->b_addr);
329 _xfs_buf_free_pages(bp);
330 xfs_buf_free_maps(bp);
331 kmem_zone_free(xfs_buf_zone, bp);
332 }
333
334 /*
335 * Allocates all the pages for buffer in question and builds it's page list.
336 */
337 STATIC int
338 xfs_buf_allocate_memory(
339 xfs_buf_t *bp,
340 uint flags)
341 {
342 size_t size;
343 size_t nbytes, offset;
344 gfp_t gfp_mask = xb_to_gfp(flags);
345 unsigned short page_count, i;
346 xfs_off_t start, end;
347 int error;
348 xfs_km_flags_t kmflag_mask = 0;
349
350 /*
351 * assure zeroed buffer for non-read cases.
352 */
353 if (!(flags & XBF_READ)) {
354 kmflag_mask |= KM_ZERO;
355 gfp_mask |= __GFP_ZERO;
356 }
357
358 /*
359 * for buffers that are contained within a single page, just allocate
360 * the memory from the heap - there's no need for the complexity of
361 * page arrays to keep allocation down to order 0.
362 */
363 size = BBTOB(bp->b_length);
364 if (size < PAGE_SIZE) {
365 int align_mask = xfs_buftarg_dma_alignment(bp->b_target);
366 bp->b_addr = kmem_alloc_io(size, align_mask,
367 KM_NOFS | kmflag_mask);
368 if (!bp->b_addr) {
369 /* low memory - use alloc_page loop instead */
370 goto use_alloc_page;
371 }
372
373 if (((unsigned long)(bp->b_addr + size - 1) & PAGE_MASK) !=
374 ((unsigned long)bp->b_addr & PAGE_MASK)) {
375 /* b_addr spans two pages - use alloc_page instead */
376 kmem_free(bp->b_addr);
377 bp->b_addr = NULL;
378 goto use_alloc_page;
379 }
380 bp->b_offset = offset_in_page(bp->b_addr);
381 bp->b_pages = bp->b_page_array;
382 bp->b_pages[0] = kmem_to_page(bp->b_addr);
383 bp->b_page_count = 1;
384 bp->b_flags |= _XBF_KMEM;
385 return 0;
386 }
387
388 use_alloc_page:
389 start = BBTOB(bp->b_maps[0].bm_bn) >> PAGE_SHIFT;
390 end = (BBTOB(bp->b_maps[0].bm_bn + bp->b_length) + PAGE_SIZE - 1)
391 >> PAGE_SHIFT;
392 page_count = end - start;
393 error = _xfs_buf_get_pages(bp, page_count);
394 if (unlikely(error))
395 return error;
396
397 offset = bp->b_offset;
398 bp->b_flags |= _XBF_PAGES;
399
400 for (i = 0; i < bp->b_page_count; i++) {
401 struct page *page;
402 uint retries = 0;
403 retry:
404 page = alloc_page(gfp_mask);
405 if (unlikely(page == NULL)) {
406 if (flags & XBF_READ_AHEAD) {
407 bp->b_page_count = i;
408 error = -ENOMEM;
409 goto out_free_pages;
410 }
411
412 /*
413 * This could deadlock.
414 *
415 * But until all the XFS lowlevel code is revamped to
416 * handle buffer allocation failures we can't do much.
417 */
418 if (!(++retries % 100))
419 xfs_err(NULL,
420 "%s(%u) possible memory allocation deadlock in %s (mode:0x%x)",
421 current->comm, current->pid,
422 __func__, gfp_mask);
423
424 XFS_STATS_INC(bp->b_mount, xb_page_retries);
425 congestion_wait(BLK_RW_ASYNC, HZ/50);
426 goto retry;
427 }
428
429 XFS_STATS_INC(bp->b_mount, xb_page_found);
430
431 nbytes = min_t(size_t, size, PAGE_SIZE - offset);
432 size -= nbytes;
433 bp->b_pages[i] = page;
434 offset = 0;
435 }
436 return 0;
437
438 out_free_pages:
439 for (i = 0; i < bp->b_page_count; i++)
440 __free_page(bp->b_pages[i]);
441 bp->b_flags &= ~_XBF_PAGES;
442 return error;
443 }
444
445 /*
446 * Map buffer into kernel address-space if necessary.
447 */
448 STATIC int
449 _xfs_buf_map_pages(
450 xfs_buf_t *bp,
451 uint flags)
452 {
453 ASSERT(bp->b_flags & _XBF_PAGES);
454 if (bp->b_page_count == 1) {
455 /* A single page buffer is always mappable */
456 bp->b_addr = page_address(bp->b_pages[0]) + bp->b_offset;
457 } else if (flags & XBF_UNMAPPED) {
458 bp->b_addr = NULL;
459 } else {
460 int retried = 0;
461 unsigned nofs_flag;
462
463 /*
464 * vm_map_ram() will allocate auxillary structures (e.g.
465 * pagetables) with GFP_KERNEL, yet we are likely to be under
466 * GFP_NOFS context here. Hence we need to tell memory reclaim
467 * that we are in such a context via PF_MEMALLOC_NOFS to prevent
468 * memory reclaim re-entering the filesystem here and
469 * potentially deadlocking.
470 */
471 nofs_flag = memalloc_nofs_save();
472 do {
473 bp->b_addr = vm_map_ram(bp->b_pages, bp->b_page_count,
474 -1, PAGE_KERNEL);
475 if (bp->b_addr)
476 break;
477 vm_unmap_aliases();
478 } while (retried++ <= 1);
479 memalloc_nofs_restore(nofs_flag);
480
481 if (!bp->b_addr)
482 return -ENOMEM;
483 bp->b_addr += bp->b_offset;
484 }
485
486 return 0;
487 }
488
489 /*
490 * Finding and Reading Buffers
491 */
492 static int
493 _xfs_buf_obj_cmp(
494 struct rhashtable_compare_arg *arg,
495 const void *obj)
496 {
497 const struct xfs_buf_map *map = arg->key;
498 const struct xfs_buf *bp = obj;
499
500 /*
501 * The key hashing in the lookup path depends on the key being the
502 * first element of the compare_arg, make sure to assert this.
503 */
504 BUILD_BUG_ON(offsetof(struct xfs_buf_map, bm_bn) != 0);
505
506 if (bp->b_bn != map->bm_bn)
507 return 1;
508
509 if (unlikely(bp->b_length != map->bm_len)) {
510 /*
511 * found a block number match. If the range doesn't
512 * match, the only way this is allowed is if the buffer
513 * in the cache is stale and the transaction that made
514 * it stale has not yet committed. i.e. we are
515 * reallocating a busy extent. Skip this buffer and
516 * continue searching for an exact match.
517 */
518 ASSERT(bp->b_flags & XBF_STALE);
519 return 1;
520 }
521 return 0;
522 }
523
524 static const struct rhashtable_params xfs_buf_hash_params = {
525 .min_size = 32, /* empty AGs have minimal footprint */
526 .nelem_hint = 16,
527 .key_len = sizeof(xfs_daddr_t),
528 .key_offset = offsetof(struct xfs_buf, b_bn),
529 .head_offset = offsetof(struct xfs_buf, b_rhash_head),
530 .automatic_shrinking = true,
531 .obj_cmpfn = _xfs_buf_obj_cmp,
532 };
533
534 int
535 xfs_buf_hash_init(
536 struct xfs_perag *pag)
537 {
538 spin_lock_init(&pag->pag_buf_lock);
539 return rhashtable_init(&pag->pag_buf_hash, &xfs_buf_hash_params);
540 }
541
542 void
543 xfs_buf_hash_destroy(
544 struct xfs_perag *pag)
545 {
546 rhashtable_destroy(&pag->pag_buf_hash);
547 }
548
549 /*
550 * Look up a buffer in the buffer cache and return it referenced and locked
551 * in @found_bp.
552 *
553 * If @new_bp is supplied and we have a lookup miss, insert @new_bp into the
554 * cache.
555 *
556 * If XBF_TRYLOCK is set in @flags, only try to lock the buffer and return
557 * -EAGAIN if we fail to lock it.
558 *
559 * Return values are:
560 * -EFSCORRUPTED if have been supplied with an invalid address
561 * -EAGAIN on trylock failure
562 * -ENOENT if we fail to find a match and @new_bp was NULL
563 * 0, with @found_bp:
564 * - @new_bp if we inserted it into the cache
565 * - the buffer we found and locked.
566 */
567 static int
568 xfs_buf_find(
569 struct xfs_buftarg *btp,
570 struct xfs_buf_map *map,
571 int nmaps,
572 xfs_buf_flags_t flags,
573 struct xfs_buf *new_bp,
574 struct xfs_buf **found_bp)
575 {
576 struct xfs_perag *pag;
577 xfs_buf_t *bp;
578 struct xfs_buf_map cmap = { .bm_bn = map[0].bm_bn };
579 xfs_daddr_t eofs;
580 int i;
581
582 *found_bp = NULL;
583
584 for (i = 0; i < nmaps; i++)
585 cmap.bm_len += map[i].bm_len;
586
587 /* Check for IOs smaller than the sector size / not sector aligned */
588 ASSERT(!(BBTOB(cmap.bm_len) < btp->bt_meta_sectorsize));
589 ASSERT(!(BBTOB(cmap.bm_bn) & (xfs_off_t)btp->bt_meta_sectormask));
590
591 /*
592 * Corrupted block numbers can get through to here, unfortunately, so we
593 * have to check that the buffer falls within the filesystem bounds.
594 */
595 eofs = XFS_FSB_TO_BB(btp->bt_mount, btp->bt_mount->m_sb.sb_dblocks);
596 if (cmap.bm_bn < 0 || cmap.bm_bn >= eofs) {
597 xfs_alert(btp->bt_mount,
598 "%s: daddr 0x%llx out of range, EOFS 0x%llx",
599 __func__, cmap.bm_bn, eofs);
600 WARN_ON(1);
601 return -EFSCORRUPTED;
602 }
603
604 pag = xfs_perag_get(btp->bt_mount,
605 xfs_daddr_to_agno(btp->bt_mount, cmap.bm_bn));
606
607 spin_lock(&pag->pag_buf_lock);
608 bp = rhashtable_lookup_fast(&pag->pag_buf_hash, &cmap,
609 xfs_buf_hash_params);
610 if (bp) {
611 atomic_inc(&bp->b_hold);
612 goto found;
613 }
614
615 /* No match found */
616 if (!new_bp) {
617 XFS_STATS_INC(btp->bt_mount, xb_miss_locked);
618 spin_unlock(&pag->pag_buf_lock);
619 xfs_perag_put(pag);
620 return -ENOENT;
621 }
622
623 /* the buffer keeps the perag reference until it is freed */
624 new_bp->b_pag = pag;
625 rhashtable_insert_fast(&pag->pag_buf_hash, &new_bp->b_rhash_head,
626 xfs_buf_hash_params);
627 spin_unlock(&pag->pag_buf_lock);
628 *found_bp = new_bp;
629 return 0;
630
631 found:
632 spin_unlock(&pag->pag_buf_lock);
633 xfs_perag_put(pag);
634
635 if (!xfs_buf_trylock(bp)) {
636 if (flags & XBF_TRYLOCK) {
637 xfs_buf_rele(bp);
638 XFS_STATS_INC(btp->bt_mount, xb_busy_locked);
639 return -EAGAIN;
640 }
641 xfs_buf_lock(bp);
642 XFS_STATS_INC(btp->bt_mount, xb_get_locked_waited);
643 }
644
645 /*
646 * if the buffer is stale, clear all the external state associated with
647 * it. We need to keep flags such as how we allocated the buffer memory
648 * intact here.
649 */
650 if (bp->b_flags & XBF_STALE) {
651 ASSERT((bp->b_flags & _XBF_DELWRI_Q) == 0);
652 ASSERT(bp->b_iodone == NULL);
653 bp->b_flags &= _XBF_KMEM | _XBF_PAGES;
654 bp->b_ops = NULL;
655 }
656
657 trace_xfs_buf_find(bp, flags, _RET_IP_);
658 XFS_STATS_INC(btp->bt_mount, xb_get_locked);
659 *found_bp = bp;
660 return 0;
661 }
662
663 struct xfs_buf *
664 xfs_buf_incore(
665 struct xfs_buftarg *target,
666 xfs_daddr_t blkno,
667 size_t numblks,
668 xfs_buf_flags_t flags)
669 {
670 struct xfs_buf *bp;
671 int error;
672 DEFINE_SINGLE_BUF_MAP(map, blkno, numblks);
673
674 error = xfs_buf_find(target, &map, 1, flags, NULL, &bp);
675 if (error)
676 return NULL;
677 return bp;
678 }
679
680 /*
681 * Assembles a buffer covering the specified range. The code is optimised for
682 * cache hits, as metadata intensive workloads will see 3 orders of magnitude
683 * more hits than misses.
684 */
685 struct xfs_buf *
686 xfs_buf_get_map(
687 struct xfs_buftarg *target,
688 struct xfs_buf_map *map,
689 int nmaps,
690 xfs_buf_flags_t flags)
691 {
692 struct xfs_buf *bp;
693 struct xfs_buf *new_bp;
694 int error = 0;
695
696 error = xfs_buf_find(target, map, nmaps, flags, NULL, &bp);
697
698 switch (error) {
699 case 0:
700 /* cache hit */
701 goto found;
702 case -EAGAIN:
703 /* cache hit, trylock failure, caller handles failure */
704 ASSERT(flags & XBF_TRYLOCK);
705 return NULL;
706 case -ENOENT:
707 /* cache miss, go for insert */
708 break;
709 case -EFSCORRUPTED:
710 default:
711 /*
712 * None of the higher layers understand failure types
713 * yet, so return NULL to signal a fatal lookup error.
714 */
715 return NULL;
716 }
717
718 new_bp = _xfs_buf_alloc(target, map, nmaps, flags);
719 if (unlikely(!new_bp))
720 return NULL;
721
722 error = xfs_buf_allocate_memory(new_bp, flags);
723 if (error) {
724 xfs_buf_free(new_bp);
725 return NULL;
726 }
727
728 error = xfs_buf_find(target, map, nmaps, flags, new_bp, &bp);
729 if (error) {
730 xfs_buf_free(new_bp);
731 return NULL;
732 }
733
734 if (bp != new_bp)
735 xfs_buf_free(new_bp);
736
737 found:
738 if (!bp->b_addr) {
739 error = _xfs_buf_map_pages(bp, flags);
740 if (unlikely(error)) {
741 xfs_warn(target->bt_mount,
742 "%s: failed to map pagesn", __func__);
743 xfs_buf_relse(bp);
744 return NULL;
745 }
746 }
747
748 /*
749 * Clear b_error if this is a lookup from a caller that doesn't expect
750 * valid data to be found in the buffer.
751 */
752 if (!(flags & XBF_READ))
753 xfs_buf_ioerror(bp, 0);
754
755 XFS_STATS_INC(target->bt_mount, xb_get);
756 trace_xfs_buf_get(bp, flags, _RET_IP_);
757 return bp;
758 }
759
760 STATIC int
761 _xfs_buf_read(
762 xfs_buf_t *bp,
763 xfs_buf_flags_t flags)
764 {
765 ASSERT(!(flags & XBF_WRITE));
766 ASSERT(bp->b_maps[0].bm_bn != XFS_BUF_DADDR_NULL);
767
768 bp->b_flags &= ~(XBF_WRITE | XBF_ASYNC | XBF_READ_AHEAD);
769 bp->b_flags |= flags & (XBF_READ | XBF_ASYNC | XBF_READ_AHEAD);
770
771 return xfs_buf_submit(bp);
772 }
773
774 /*
775 * Reverify a buffer found in cache without an attached ->b_ops.
776 *
777 * If the caller passed an ops structure and the buffer doesn't have ops
778 * assigned, set the ops and use it to verify the contents. If verification
779 * fails, clear XBF_DONE. We assume the buffer has no recorded errors and is
780 * already in XBF_DONE state on entry.
781 *
782 * Under normal operations, every in-core buffer is verified on read I/O
783 * completion. There are two scenarios that can lead to in-core buffers without
784 * an assigned ->b_ops. The first is during log recovery of buffers on a V4
785 * filesystem, though these buffers are purged at the end of recovery. The
786 * other is online repair, which intentionally reads with a NULL buffer ops to
787 * run several verifiers across an in-core buffer in order to establish buffer
788 * type. If repair can't establish that, the buffer will be left in memory
789 * with NULL buffer ops.
790 */
791 int
792 xfs_buf_reverify(
793 struct xfs_buf *bp,
794 const struct xfs_buf_ops *ops)
795 {
796 ASSERT(bp->b_flags & XBF_DONE);
797 ASSERT(bp->b_error == 0);
798
799 if (!ops || bp->b_ops)
800 return 0;
801
802 bp->b_ops = ops;
803 bp->b_ops->verify_read(bp);
804 if (bp->b_error)
805 bp->b_flags &= ~XBF_DONE;
806 return bp->b_error;
807 }
808
809 xfs_buf_t *
810 xfs_buf_read_map(
811 struct xfs_buftarg *target,
812 struct xfs_buf_map *map,
813 int nmaps,
814 xfs_buf_flags_t flags,
815 const struct xfs_buf_ops *ops)
816 {
817 struct xfs_buf *bp;
818
819 flags |= XBF_READ;
820
821 bp = xfs_buf_get_map(target, map, nmaps, flags);
822 if (!bp)
823 return NULL;
824
825 trace_xfs_buf_read(bp, flags, _RET_IP_);
826
827 if (!(bp->b_flags & XBF_DONE)) {
828 XFS_STATS_INC(target->bt_mount, xb_get_read);
829 bp->b_ops = ops;
830 _xfs_buf_read(bp, flags);
831 return bp;
832 }
833
834 xfs_buf_reverify(bp, ops);
835
836 if (flags & XBF_ASYNC) {
837 /*
838 * Read ahead call which is already satisfied,
839 * drop the buffer
840 */
841 xfs_buf_relse(bp);
842 return NULL;
843 }
844
845 /* We do not want read in the flags */
846 bp->b_flags &= ~XBF_READ;
847 ASSERT(bp->b_ops != NULL || ops == NULL);
848 return bp;
849 }
850
851 /*
852 * If we are not low on memory then do the readahead in a deadlock
853 * safe manner.
854 */
855 void
856 xfs_buf_readahead_map(
857 struct xfs_buftarg *target,
858 struct xfs_buf_map *map,
859 int nmaps,
860 const struct xfs_buf_ops *ops)
861 {
862 if (bdi_read_congested(target->bt_bdev->bd_bdi))
863 return;
864
865 xfs_buf_read_map(target, map, nmaps,
866 XBF_TRYLOCK|XBF_ASYNC|XBF_READ_AHEAD, ops);
867 }
868
869 /*
870 * Read an uncached buffer from disk. Allocates and returns a locked
871 * buffer containing the disk contents or nothing.
872 */
873 int
874 xfs_buf_read_uncached(
875 struct xfs_buftarg *target,
876 xfs_daddr_t daddr,
877 size_t numblks,
878 int flags,
879 struct xfs_buf **bpp,
880 const struct xfs_buf_ops *ops)
881 {
882 struct xfs_buf *bp;
883
884 *bpp = NULL;
885
886 bp = xfs_buf_get_uncached(target, numblks, flags);
887 if (!bp)
888 return -ENOMEM;
889
890 /* set up the buffer for a read IO */
891 ASSERT(bp->b_map_count == 1);
892 bp->b_bn = XFS_BUF_DADDR_NULL; /* always null for uncached buffers */
893 bp->b_maps[0].bm_bn = daddr;
894 bp->b_flags |= XBF_READ;
895 bp->b_ops = ops;
896
897 xfs_buf_submit(bp);
898 if (bp->b_error) {
899 int error = bp->b_error;
900 xfs_buf_relse(bp);
901 return error;
902 }
903
904 *bpp = bp;
905 return 0;
906 }
907
908 xfs_buf_t *
909 xfs_buf_get_uncached(
910 struct xfs_buftarg *target,
911 size_t numblks,
912 int flags)
913 {
914 unsigned long page_count;
915 int error, i;
916 struct xfs_buf *bp;
917 DEFINE_SINGLE_BUF_MAP(map, XFS_BUF_DADDR_NULL, numblks);
918
919 /* flags might contain irrelevant bits, pass only what we care about */
920 bp = _xfs_buf_alloc(target, &map, 1, flags & XBF_NO_IOACCT);
921 if (unlikely(bp == NULL))
922 goto fail;
923
924 page_count = PAGE_ALIGN(numblks << BBSHIFT) >> PAGE_SHIFT;
925 error = _xfs_buf_get_pages(bp, page_count);
926 if (error)
927 goto fail_free_buf;
928
929 for (i = 0; i < page_count; i++) {
930 bp->b_pages[i] = alloc_page(xb_to_gfp(flags));
931 if (!bp->b_pages[i])
932 goto fail_free_mem;
933 }
934 bp->b_flags |= _XBF_PAGES;
935
936 error = _xfs_buf_map_pages(bp, 0);
937 if (unlikely(error)) {
938 xfs_warn(target->bt_mount,
939 "%s: failed to map pages", __func__);
940 goto fail_free_mem;
941 }
942
943 trace_xfs_buf_get_uncached(bp, _RET_IP_);
944 return bp;
945
946 fail_free_mem:
947 while (--i >= 0)
948 __free_page(bp->b_pages[i]);
949 _xfs_buf_free_pages(bp);
950 fail_free_buf:
951 xfs_buf_free_maps(bp);
952 kmem_zone_free(xfs_buf_zone, bp);
953 fail:
954 return NULL;
955 }
956
957 /*
958 * Increment reference count on buffer, to hold the buffer concurrently
959 * with another thread which may release (free) the buffer asynchronously.
960 * Must hold the buffer already to call this function.
961 */
962 void
963 xfs_buf_hold(
964 xfs_buf_t *bp)
965 {
966 trace_xfs_buf_hold(bp, _RET_IP_);
967 atomic_inc(&bp->b_hold);
968 }
969
970 /*
971 * Release a hold on the specified buffer. If the hold count is 1, the buffer is
972 * placed on LRU or freed (depending on b_lru_ref).
973 */
974 void
975 xfs_buf_rele(
976 xfs_buf_t *bp)
977 {
978 struct xfs_perag *pag = bp->b_pag;
979 bool release;
980 bool freebuf = false;
981
982 trace_xfs_buf_rele(bp, _RET_IP_);
983
984 if (!pag) {
985 ASSERT(list_empty(&bp->b_lru));
986 if (atomic_dec_and_test(&bp->b_hold)) {
987 xfs_buf_ioacct_dec(bp);
988 xfs_buf_free(bp);
989 }
990 return;
991 }
992
993 ASSERT(atomic_read(&bp->b_hold) > 0);
994
995 /*
996 * We grab the b_lock here first to serialise racing xfs_buf_rele()
997 * calls. The pag_buf_lock being taken on the last reference only
998 * serialises against racing lookups in xfs_buf_find(). IOWs, the second
999 * to last reference we drop here is not serialised against the last
1000 * reference until we take bp->b_lock. Hence if we don't grab b_lock
1001 * first, the last "release" reference can win the race to the lock and
1002 * free the buffer before the second-to-last reference is processed,
1003 * leading to a use-after-free scenario.
1004 */
1005 spin_lock(&bp->b_lock);
1006 release = atomic_dec_and_lock(&bp->b_hold, &pag->pag_buf_lock);
1007 if (!release) {
1008 /*
1009 * Drop the in-flight state if the buffer is already on the LRU
1010 * and it holds the only reference. This is racy because we
1011 * haven't acquired the pag lock, but the use of _XBF_IN_FLIGHT
1012 * ensures the decrement occurs only once per-buf.
1013 */
1014 if ((atomic_read(&bp->b_hold) == 1) && !list_empty(&bp->b_lru))
1015 __xfs_buf_ioacct_dec(bp);
1016 goto out_unlock;
1017 }
1018
1019 /* the last reference has been dropped ... */
1020 __xfs_buf_ioacct_dec(bp);
1021 if (!(bp->b_flags & XBF_STALE) && atomic_read(&bp->b_lru_ref)) {
1022 /*
1023 * If the buffer is added to the LRU take a new reference to the
1024 * buffer for the LRU and clear the (now stale) dispose list
1025 * state flag
1026 */
1027 if (list_lru_add(&bp->b_target->bt_lru, &bp->b_lru)) {
1028 bp->b_state &= ~XFS_BSTATE_DISPOSE;
1029 atomic_inc(&bp->b_hold);
1030 }
1031 spin_unlock(&pag->pag_buf_lock);
1032 } else {
1033 /*
1034 * most of the time buffers will already be removed from the
1035 * LRU, so optimise that case by checking for the
1036 * XFS_BSTATE_DISPOSE flag indicating the last list the buffer
1037 * was on was the disposal list
1038 */
1039 if (!(bp->b_state & XFS_BSTATE_DISPOSE)) {
1040 list_lru_del(&bp->b_target->bt_lru, &bp->b_lru);
1041 } else {
1042 ASSERT(list_empty(&bp->b_lru));
1043 }
1044
1045 ASSERT(!(bp->b_flags & _XBF_DELWRI_Q));
1046 rhashtable_remove_fast(&pag->pag_buf_hash, &bp->b_rhash_head,
1047 xfs_buf_hash_params);
1048 spin_unlock(&pag->pag_buf_lock);
1049 xfs_perag_put(pag);
1050 freebuf = true;
1051 }
1052
1053 out_unlock:
1054 spin_unlock(&bp->b_lock);
1055
1056 if (freebuf)
1057 xfs_buf_free(bp);
1058 }
1059
1060
1061 /*
1062 * Lock a buffer object, if it is not already locked.
1063 *
1064 * If we come across a stale, pinned, locked buffer, we know that we are
1065 * being asked to lock a buffer that has been reallocated. Because it is
1066 * pinned, we know that the log has not been pushed to disk and hence it
1067 * will still be locked. Rather than continuing to have trylock attempts
1068 * fail until someone else pushes the log, push it ourselves before
1069 * returning. This means that the xfsaild will not get stuck trying
1070 * to push on stale inode buffers.
1071 */
1072 int
1073 xfs_buf_trylock(
1074 struct xfs_buf *bp)
1075 {
1076 int locked;
1077
1078 locked = down_trylock(&bp->b_sema) == 0;
1079 if (locked)
1080 trace_xfs_buf_trylock(bp, _RET_IP_);
1081 else
1082 trace_xfs_buf_trylock_fail(bp, _RET_IP_);
1083 return locked;
1084 }
1085
1086 /*
1087 * Lock a buffer object.
1088 *
1089 * If we come across a stale, pinned, locked buffer, we know that we
1090 * are being asked to lock a buffer that has been reallocated. Because
1091 * it is pinned, we know that the log has not been pushed to disk and
1092 * hence it will still be locked. Rather than sleeping until someone
1093 * else pushes the log, push it ourselves before trying to get the lock.
1094 */
1095 void
1096 xfs_buf_lock(
1097 struct xfs_buf *bp)
1098 {
1099 trace_xfs_buf_lock(bp, _RET_IP_);
1100
1101 if (atomic_read(&bp->b_pin_count) && (bp->b_flags & XBF_STALE))
1102 xfs_log_force(bp->b_mount, 0);
1103 down(&bp->b_sema);
1104
1105 trace_xfs_buf_lock_done(bp, _RET_IP_);
1106 }
1107
1108 void
1109 xfs_buf_unlock(
1110 struct xfs_buf *bp)
1111 {
1112 ASSERT(xfs_buf_islocked(bp));
1113
1114 up(&bp->b_sema);
1115 trace_xfs_buf_unlock(bp, _RET_IP_);
1116 }
1117
1118 STATIC void
1119 xfs_buf_wait_unpin(
1120 xfs_buf_t *bp)
1121 {
1122 DECLARE_WAITQUEUE (wait, current);
1123
1124 if (atomic_read(&bp->b_pin_count) == 0)
1125 return;
1126
1127 add_wait_queue(&bp->b_waiters, &wait);
1128 for (;;) {
1129 set_current_state(TASK_UNINTERRUPTIBLE);
1130 if (atomic_read(&bp->b_pin_count) == 0)
1131 break;
1132 io_schedule();
1133 }
1134 remove_wait_queue(&bp->b_waiters, &wait);
1135 set_current_state(TASK_RUNNING);
1136 }
1137
1138 /*
1139 * Buffer Utility Routines
1140 */
1141
1142 void
1143 xfs_buf_ioend(
1144 struct xfs_buf *bp)
1145 {
1146 bool read = bp->b_flags & XBF_READ;
1147
1148 trace_xfs_buf_iodone(bp, _RET_IP_);
1149
1150 bp->b_flags &= ~(XBF_READ | XBF_WRITE | XBF_READ_AHEAD);
1151
1152 /*
1153 * Pull in IO completion errors now. We are guaranteed to be running
1154 * single threaded, so we don't need the lock to read b_io_error.
1155 */
1156 if (!bp->b_error && bp->b_io_error)
1157 xfs_buf_ioerror(bp, bp->b_io_error);
1158
1159 /* Only validate buffers that were read without errors */
1160 if (read && !bp->b_error && bp->b_ops) {
1161 ASSERT(!bp->b_iodone);
1162 bp->b_ops->verify_read(bp);
1163 }
1164
1165 if (!bp->b_error) {
1166 bp->b_flags &= ~XBF_WRITE_FAIL;
1167 bp->b_flags |= XBF_DONE;
1168 }
1169
1170 if (bp->b_iodone)
1171 (*(bp->b_iodone))(bp);
1172 else if (bp->b_flags & XBF_ASYNC)
1173 xfs_buf_relse(bp);
1174 else
1175 complete(&bp->b_iowait);
1176 }
1177
1178 static void
1179 xfs_buf_ioend_work(
1180 struct work_struct *work)
1181 {
1182 struct xfs_buf *bp =
1183 container_of(work, xfs_buf_t, b_ioend_work);
1184
1185 xfs_buf_ioend(bp);
1186 }
1187
1188 static void
1189 xfs_buf_ioend_async(
1190 struct xfs_buf *bp)
1191 {
1192 INIT_WORK(&bp->b_ioend_work, xfs_buf_ioend_work);
1193 queue_work(bp->b_mount->m_buf_workqueue, &bp->b_ioend_work);
1194 }
1195
1196 void
1197 __xfs_buf_ioerror(
1198 xfs_buf_t *bp,
1199 int error,
1200 xfs_failaddr_t failaddr)
1201 {
1202 ASSERT(error <= 0 && error >= -1000);
1203 bp->b_error = error;
1204 trace_xfs_buf_ioerror(bp, error, failaddr);
1205 }
1206
1207 void
1208 xfs_buf_ioerror_alert(
1209 struct xfs_buf *bp,
1210 const char *func)
1211 {
1212 xfs_alert(bp->b_mount,
1213 "metadata I/O error in \"%s\" at daddr 0x%llx len %d error %d",
1214 func, (uint64_t)XFS_BUF_ADDR(bp), bp->b_length,
1215 -bp->b_error);
1216 }
1217
1218 int
1219 xfs_bwrite(
1220 struct xfs_buf *bp)
1221 {
1222 int error;
1223
1224 ASSERT(xfs_buf_islocked(bp));
1225
1226 bp->b_flags |= XBF_WRITE;
1227 bp->b_flags &= ~(XBF_ASYNC | XBF_READ | _XBF_DELWRI_Q |
1228 XBF_DONE);
1229
1230 error = xfs_buf_submit(bp);
1231 if (error)
1232 xfs_force_shutdown(bp->b_mount, SHUTDOWN_META_IO_ERROR);
1233 return error;
1234 }
1235
1236 static void
1237 xfs_buf_bio_end_io(
1238 struct bio *bio)
1239 {
1240 struct xfs_buf *bp = (struct xfs_buf *)bio->bi_private;
1241
1242 /*
1243 * don't overwrite existing errors - otherwise we can lose errors on
1244 * buffers that require multiple bios to complete.
1245 */
1246 if (bio->bi_status) {
1247 int error = blk_status_to_errno(bio->bi_status);
1248
1249 cmpxchg(&bp->b_io_error, 0, error);
1250 }
1251
1252 if (!bp->b_error && xfs_buf_is_vmapped(bp) && (bp->b_flags & XBF_READ))
1253 invalidate_kernel_vmap_range(bp->b_addr, xfs_buf_vmap_len(bp));
1254
1255 if (atomic_dec_and_test(&bp->b_io_remaining) == 1)
1256 xfs_buf_ioend_async(bp);
1257 bio_put(bio);
1258 }
1259
1260 static void
1261 xfs_buf_ioapply_map(
1262 struct xfs_buf *bp,
1263 int map,
1264 int *buf_offset,
1265 int *count,
1266 int op,
1267 int op_flags)
1268 {
1269 int page_index;
1270 int total_nr_pages = bp->b_page_count;
1271 int nr_pages;
1272 struct bio *bio;
1273 sector_t sector = bp->b_maps[map].bm_bn;
1274 int size;
1275 int offset;
1276
1277 /* skip the pages in the buffer before the start offset */
1278 page_index = 0;
1279 offset = *buf_offset;
1280 while (offset >= PAGE_SIZE) {
1281 page_index++;
1282 offset -= PAGE_SIZE;
1283 }
1284
1285 /*
1286 * Limit the IO size to the length of the current vector, and update the
1287 * remaining IO count for the next time around.
1288 */
1289 size = min_t(int, BBTOB(bp->b_maps[map].bm_len), *count);
1290 *count -= size;
1291 *buf_offset += size;
1292
1293 next_chunk:
1294 atomic_inc(&bp->b_io_remaining);
1295 nr_pages = min(total_nr_pages, BIO_MAX_PAGES);
1296
1297 bio = bio_alloc(GFP_NOIO, nr_pages);
1298 bio_set_dev(bio, bp->b_target->bt_bdev);
1299 bio->bi_iter.bi_sector = sector;
1300 bio->bi_end_io = xfs_buf_bio_end_io;
1301 bio->bi_private = bp;
1302 bio_set_op_attrs(bio, op, op_flags);
1303
1304 for (; size && nr_pages; nr_pages--, page_index++) {
1305 int rbytes, nbytes = PAGE_SIZE - offset;
1306
1307 if (nbytes > size)
1308 nbytes = size;
1309
1310 rbytes = bio_add_page(bio, bp->b_pages[page_index], nbytes,
1311 offset);
1312 if (rbytes < nbytes)
1313 break;
1314
1315 offset = 0;
1316 sector += BTOBB(nbytes);
1317 size -= nbytes;
1318 total_nr_pages--;
1319 }
1320
1321 if (likely(bio->bi_iter.bi_size)) {
1322 if (xfs_buf_is_vmapped(bp)) {
1323 flush_kernel_vmap_range(bp->b_addr,
1324 xfs_buf_vmap_len(bp));
1325 }
1326 submit_bio(bio);
1327 if (size)
1328 goto next_chunk;
1329 } else {
1330 /*
1331 * This is guaranteed not to be the last io reference count
1332 * because the caller (xfs_buf_submit) holds a count itself.
1333 */
1334 atomic_dec(&bp->b_io_remaining);
1335 xfs_buf_ioerror(bp, -EIO);
1336 bio_put(bio);
1337 }
1338
1339 }
1340
1341 STATIC void
1342 _xfs_buf_ioapply(
1343 struct xfs_buf *bp)
1344 {
1345 struct blk_plug plug;
1346 int op;
1347 int op_flags = 0;
1348 int offset;
1349 int size;
1350 int i;
1351
1352 /*
1353 * Make sure we capture only current IO errors rather than stale errors
1354 * left over from previous use of the buffer (e.g. failed readahead).
1355 */
1356 bp->b_error = 0;
1357
1358 if (bp->b_flags & XBF_WRITE) {
1359 op = REQ_OP_WRITE;
1360
1361 /*
1362 * Run the write verifier callback function if it exists. If
1363 * this function fails it will mark the buffer with an error and
1364 * the IO should not be dispatched.
1365 */
1366 if (bp->b_ops) {
1367 bp->b_ops->verify_write(bp);
1368 if (bp->b_error) {
1369 xfs_force_shutdown(bp->b_mount,
1370 SHUTDOWN_CORRUPT_INCORE);
1371 return;
1372 }
1373 } else if (bp->b_bn != XFS_BUF_DADDR_NULL) {
1374 struct xfs_mount *mp = bp->b_mount;
1375
1376 /*
1377 * non-crc filesystems don't attach verifiers during
1378 * log recovery, so don't warn for such filesystems.
1379 */
1380 if (xfs_sb_version_hascrc(&mp->m_sb)) {
1381 xfs_warn(mp,
1382 "%s: no buf ops on daddr 0x%llx len %d",
1383 __func__, bp->b_bn, bp->b_length);
1384 xfs_hex_dump(bp->b_addr,
1385 XFS_CORRUPTION_DUMP_LEN);
1386 dump_stack();
1387 }
1388 }
1389 } else if (bp->b_flags & XBF_READ_AHEAD) {
1390 op = REQ_OP_READ;
1391 op_flags = REQ_RAHEAD;
1392 } else {
1393 op = REQ_OP_READ;
1394 }
1395
1396 /* we only use the buffer cache for meta-data */
1397 op_flags |= REQ_META;
1398
1399 /*
1400 * Walk all the vectors issuing IO on them. Set up the initial offset
1401 * into the buffer and the desired IO size before we start -
1402 * _xfs_buf_ioapply_vec() will modify them appropriately for each
1403 * subsequent call.
1404 */
1405 offset = bp->b_offset;
1406 size = BBTOB(bp->b_length);
1407 blk_start_plug(&plug);
1408 for (i = 0; i < bp->b_map_count; i++) {
1409 xfs_buf_ioapply_map(bp, i, &offset, &size, op, op_flags);
1410 if (bp->b_error)
1411 break;
1412 if (size <= 0)
1413 break; /* all done */
1414 }
1415 blk_finish_plug(&plug);
1416 }
1417
1418 /*
1419 * Wait for I/O completion of a sync buffer and return the I/O error code.
1420 */
1421 static int
1422 xfs_buf_iowait(
1423 struct xfs_buf *bp)
1424 {
1425 ASSERT(!(bp->b_flags & XBF_ASYNC));
1426
1427 trace_xfs_buf_iowait(bp, _RET_IP_);
1428 wait_for_completion(&bp->b_iowait);
1429 trace_xfs_buf_iowait_done(bp, _RET_IP_);
1430
1431 return bp->b_error;
1432 }
1433
1434 /*
1435 * Buffer I/O submission path, read or write. Asynchronous submission transfers
1436 * the buffer lock ownership and the current reference to the IO. It is not
1437 * safe to reference the buffer after a call to this function unless the caller
1438 * holds an additional reference itself.
1439 */
1440 int
1441 __xfs_buf_submit(
1442 struct xfs_buf *bp,
1443 bool wait)
1444 {
1445 int error = 0;
1446
1447 trace_xfs_buf_submit(bp, _RET_IP_);
1448
1449 ASSERT(!(bp->b_flags & _XBF_DELWRI_Q));
1450
1451 /* on shutdown we stale and complete the buffer immediately */
1452 if (XFS_FORCED_SHUTDOWN(bp->b_mount)) {
1453 xfs_buf_ioerror(bp, -EIO);
1454 bp->b_flags &= ~XBF_DONE;
1455 xfs_buf_stale(bp);
1456 xfs_buf_ioend(bp);
1457 return -EIO;
1458 }
1459
1460 /*
1461 * Grab a reference so the buffer does not go away underneath us. For
1462 * async buffers, I/O completion drops the callers reference, which
1463 * could occur before submission returns.
1464 */
1465 xfs_buf_hold(bp);
1466
1467 if (bp->b_flags & XBF_WRITE)
1468 xfs_buf_wait_unpin(bp);
1469
1470 /* clear the internal error state to avoid spurious errors */
1471 bp->b_io_error = 0;
1472
1473 /*
1474 * Set the count to 1 initially, this will stop an I/O completion
1475 * callout which happens before we have started all the I/O from calling
1476 * xfs_buf_ioend too early.
1477 */
1478 atomic_set(&bp->b_io_remaining, 1);
1479 if (bp->b_flags & XBF_ASYNC)
1480 xfs_buf_ioacct_inc(bp);
1481 _xfs_buf_ioapply(bp);
1482
1483 /*
1484 * If _xfs_buf_ioapply failed, we can get back here with only the IO
1485 * reference we took above. If we drop it to zero, run completion so
1486 * that we don't return to the caller with completion still pending.
1487 */
1488 if (atomic_dec_and_test(&bp->b_io_remaining) == 1) {
1489 if (bp->b_error || !(bp->b_flags & XBF_ASYNC))
1490 xfs_buf_ioend(bp);
1491 else
1492 xfs_buf_ioend_async(bp);
1493 }
1494
1495 if (wait)
1496 error = xfs_buf_iowait(bp);
1497
1498 /*
1499 * Release the hold that keeps the buffer referenced for the entire
1500 * I/O. Note that if the buffer is async, it is not safe to reference
1501 * after this release.
1502 */
1503 xfs_buf_rele(bp);
1504 return error;
1505 }
1506
1507 void *
1508 xfs_buf_offset(
1509 struct xfs_buf *bp,
1510 size_t offset)
1511 {
1512 struct page *page;
1513
1514 if (bp->b_addr)
1515 return bp->b_addr + offset;
1516
1517 offset += bp->b_offset;
1518 page = bp->b_pages[offset >> PAGE_SHIFT];
1519 return page_address(page) + (offset & (PAGE_SIZE-1));
1520 }
1521
1522 void
1523 xfs_buf_zero(
1524 struct xfs_buf *bp,
1525 size_t boff,
1526 size_t bsize)
1527 {
1528 size_t bend;
1529
1530 bend = boff + bsize;
1531 while (boff < bend) {
1532 struct page *page;
1533 int page_index, page_offset, csize;
1534
1535 page_index = (boff + bp->b_offset) >> PAGE_SHIFT;
1536 page_offset = (boff + bp->b_offset) & ~PAGE_MASK;
1537 page = bp->b_pages[page_index];
1538 csize = min_t(size_t, PAGE_SIZE - page_offset,
1539 BBTOB(bp->b_length) - boff);
1540
1541 ASSERT((csize + page_offset) <= PAGE_SIZE);
1542
1543 memset(page_address(page) + page_offset, 0, csize);
1544
1545 boff += csize;
1546 }
1547 }
1548
1549 /*
1550 * Handling of buffer targets (buftargs).
1551 */
1552
1553 /*
1554 * Wait for any bufs with callbacks that have been submitted but have not yet
1555 * returned. These buffers will have an elevated hold count, so wait on those
1556 * while freeing all the buffers only held by the LRU.
1557 */
1558 static enum lru_status
1559 xfs_buftarg_wait_rele(
1560 struct list_head *item,
1561 struct list_lru_one *lru,
1562 spinlock_t *lru_lock,
1563 void *arg)
1564
1565 {
1566 struct xfs_buf *bp = container_of(item, struct xfs_buf, b_lru);
1567 struct list_head *dispose = arg;
1568
1569 if (atomic_read(&bp->b_hold) > 1) {
1570 /* need to wait, so skip it this pass */
1571 trace_xfs_buf_wait_buftarg(bp, _RET_IP_);
1572 return LRU_SKIP;
1573 }
1574 if (!spin_trylock(&bp->b_lock))
1575 return LRU_SKIP;
1576
1577 /*
1578 * clear the LRU reference count so the buffer doesn't get
1579 * ignored in xfs_buf_rele().
1580 */
1581 atomic_set(&bp->b_lru_ref, 0);
1582 bp->b_state |= XFS_BSTATE_DISPOSE;
1583 list_lru_isolate_move(lru, item, dispose);
1584 spin_unlock(&bp->b_lock);
1585 return LRU_REMOVED;
1586 }
1587
1588 void
1589 xfs_wait_buftarg(
1590 struct xfs_buftarg *btp)
1591 {
1592 LIST_HEAD(dispose);
1593 int loop = 0;
1594
1595 /*
1596 * First wait on the buftarg I/O count for all in-flight buffers to be
1597 * released. This is critical as new buffers do not make the LRU until
1598 * they are released.
1599 *
1600 * Next, flush the buffer workqueue to ensure all completion processing
1601 * has finished. Just waiting on buffer locks is not sufficient for
1602 * async IO as the reference count held over IO is not released until
1603 * after the buffer lock is dropped. Hence we need to ensure here that
1604 * all reference counts have been dropped before we start walking the
1605 * LRU list.
1606 */
1607 while (percpu_counter_sum(&btp->bt_io_count))
1608 delay(100);
1609 flush_workqueue(btp->bt_mount->m_buf_workqueue);
1610
1611 /* loop until there is nothing left on the lru list. */
1612 while (list_lru_count(&btp->bt_lru)) {
1613 list_lru_walk(&btp->bt_lru, xfs_buftarg_wait_rele,
1614 &dispose, LONG_MAX);
1615
1616 while (!list_empty(&dispose)) {
1617 struct xfs_buf *bp;
1618 bp = list_first_entry(&dispose, struct xfs_buf, b_lru);
1619 list_del_init(&bp->b_lru);
1620 if (bp->b_flags & XBF_WRITE_FAIL) {
1621 xfs_alert(btp->bt_mount,
1622 "Corruption Alert: Buffer at daddr 0x%llx had permanent write failures!",
1623 (long long)bp->b_bn);
1624 xfs_alert(btp->bt_mount,
1625 "Please run xfs_repair to determine the extent of the problem.");
1626 }
1627 xfs_buf_rele(bp);
1628 }
1629 if (loop++ != 0)
1630 delay(100);
1631 }
1632 }
1633
1634 static enum lru_status
1635 xfs_buftarg_isolate(
1636 struct list_head *item,
1637 struct list_lru_one *lru,
1638 spinlock_t *lru_lock,
1639 void *arg)
1640 {
1641 struct xfs_buf *bp = container_of(item, struct xfs_buf, b_lru);
1642 struct list_head *dispose = arg;
1643
1644 /*
1645 * we are inverting the lru lock/bp->b_lock here, so use a trylock.
1646 * If we fail to get the lock, just skip it.
1647 */
1648 if (!spin_trylock(&bp->b_lock))
1649 return LRU_SKIP;
1650 /*
1651 * Decrement the b_lru_ref count unless the value is already
1652 * zero. If the value is already zero, we need to reclaim the
1653 * buffer, otherwise it gets another trip through the LRU.
1654 */
1655 if (atomic_add_unless(&bp->b_lru_ref, -1, 0)) {
1656 spin_unlock(&bp->b_lock);
1657 return LRU_ROTATE;
1658 }
1659
1660 bp->b_state |= XFS_BSTATE_DISPOSE;
1661 list_lru_isolate_move(lru, item, dispose);
1662 spin_unlock(&bp->b_lock);
1663 return LRU_REMOVED;
1664 }
1665
1666 static unsigned long
1667 xfs_buftarg_shrink_scan(
1668 struct shrinker *shrink,
1669 struct shrink_control *sc)
1670 {
1671 struct xfs_buftarg *btp = container_of(shrink,
1672 struct xfs_buftarg, bt_shrinker);
1673 LIST_HEAD(dispose);
1674 unsigned long freed;
1675
1676 freed = list_lru_shrink_walk(&btp->bt_lru, sc,
1677 xfs_buftarg_isolate, &dispose);
1678
1679 while (!list_empty(&dispose)) {
1680 struct xfs_buf *bp;
1681 bp = list_first_entry(&dispose, struct xfs_buf, b_lru);
1682 list_del_init(&bp->b_lru);
1683 xfs_buf_rele(bp);
1684 }
1685
1686 return freed;
1687 }
1688
1689 static unsigned long
1690 xfs_buftarg_shrink_count(
1691 struct shrinker *shrink,
1692 struct shrink_control *sc)
1693 {
1694 struct xfs_buftarg *btp = container_of(shrink,
1695 struct xfs_buftarg, bt_shrinker);
1696 return list_lru_shrink_count(&btp->bt_lru, sc);
1697 }
1698
1699 void
1700 xfs_free_buftarg(
1701 struct xfs_buftarg *btp)
1702 {
1703 unregister_shrinker(&btp->bt_shrinker);
1704 ASSERT(percpu_counter_sum(&btp->bt_io_count) == 0);
1705 percpu_counter_destroy(&btp->bt_io_count);
1706 list_lru_destroy(&btp->bt_lru);
1707
1708 xfs_blkdev_issue_flush(btp);
1709
1710 kmem_free(btp);
1711 }
1712
1713 int
1714 xfs_setsize_buftarg(
1715 xfs_buftarg_t *btp,
1716 unsigned int sectorsize)
1717 {
1718 /* Set up metadata sector size info */
1719 btp->bt_meta_sectorsize = sectorsize;
1720 btp->bt_meta_sectormask = sectorsize - 1;
1721
1722 if (set_blocksize(btp->bt_bdev, sectorsize)) {
1723 xfs_warn(btp->bt_mount,
1724 "Cannot set_blocksize to %u on device %pg",
1725 sectorsize, btp->bt_bdev);
1726 return -EINVAL;
1727 }
1728
1729 /* Set up device logical sector size mask */
1730 btp->bt_logical_sectorsize = bdev_logical_block_size(btp->bt_bdev);
1731 btp->bt_logical_sectormask = bdev_logical_block_size(btp->bt_bdev) - 1;
1732
1733 return 0;
1734 }
1735
1736 /*
1737 * When allocating the initial buffer target we have not yet
1738 * read in the superblock, so don't know what sized sectors
1739 * are being used at this early stage. Play safe.
1740 */
1741 STATIC int
1742 xfs_setsize_buftarg_early(
1743 xfs_buftarg_t *btp,
1744 struct block_device *bdev)
1745 {
1746 return xfs_setsize_buftarg(btp, bdev_logical_block_size(bdev));
1747 }
1748
1749 xfs_buftarg_t *
1750 xfs_alloc_buftarg(
1751 struct xfs_mount *mp,
1752 struct block_device *bdev,
1753 struct dax_device *dax_dev)
1754 {
1755 xfs_buftarg_t *btp;
1756
1757 btp = kmem_zalloc(sizeof(*btp), KM_NOFS);
1758
1759 btp->bt_mount = mp;
1760 btp->bt_dev = bdev->bd_dev;
1761 btp->bt_bdev = bdev;
1762 btp->bt_daxdev = dax_dev;
1763
1764 if (xfs_setsize_buftarg_early(btp, bdev))
1765 goto error_free;
1766
1767 if (list_lru_init(&btp->bt_lru))
1768 goto error_free;
1769
1770 if (percpu_counter_init(&btp->bt_io_count, 0, GFP_KERNEL))
1771 goto error_lru;
1772
1773 btp->bt_shrinker.count_objects = xfs_buftarg_shrink_count;
1774 btp->bt_shrinker.scan_objects = xfs_buftarg_shrink_scan;
1775 btp->bt_shrinker.seeks = DEFAULT_SEEKS;
1776 btp->bt_shrinker.flags = SHRINKER_NUMA_AWARE;
1777 if (register_shrinker(&btp->bt_shrinker))
1778 goto error_pcpu;
1779 return btp;
1780
1781 error_pcpu:
1782 percpu_counter_destroy(&btp->bt_io_count);
1783 error_lru:
1784 list_lru_destroy(&btp->bt_lru);
1785 error_free:
1786 kmem_free(btp);
1787 return NULL;
1788 }
1789
1790 /*
1791 * Cancel a delayed write list.
1792 *
1793 * Remove each buffer from the list, clear the delwri queue flag and drop the
1794 * associated buffer reference.
1795 */
1796 void
1797 xfs_buf_delwri_cancel(
1798 struct list_head *list)
1799 {
1800 struct xfs_buf *bp;
1801
1802 while (!list_empty(list)) {
1803 bp = list_first_entry(list, struct xfs_buf, b_list);
1804
1805 xfs_buf_lock(bp);
1806 bp->b_flags &= ~_XBF_DELWRI_Q;
1807 list_del_init(&bp->b_list);
1808 xfs_buf_relse(bp);
1809 }
1810 }
1811
1812 /*
1813 * Add a buffer to the delayed write list.
1814 *
1815 * This queues a buffer for writeout if it hasn't already been. Note that
1816 * neither this routine nor the buffer list submission functions perform
1817 * any internal synchronization. It is expected that the lists are thread-local
1818 * to the callers.
1819 *
1820 * Returns true if we queued up the buffer, or false if it already had
1821 * been on the buffer list.
1822 */
1823 bool
1824 xfs_buf_delwri_queue(
1825 struct xfs_buf *bp,
1826 struct list_head *list)
1827 {
1828 ASSERT(xfs_buf_islocked(bp));
1829 ASSERT(!(bp->b_flags & XBF_READ));
1830
1831 /*
1832 * If the buffer is already marked delwri it already is queued up
1833 * by someone else for imediate writeout. Just ignore it in that
1834 * case.
1835 */
1836 if (bp->b_flags & _XBF_DELWRI_Q) {
1837 trace_xfs_buf_delwri_queued(bp, _RET_IP_);
1838 return false;
1839 }
1840
1841 trace_xfs_buf_delwri_queue(bp, _RET_IP_);
1842
1843 /*
1844 * If a buffer gets written out synchronously or marked stale while it
1845 * is on a delwri list we lazily remove it. To do this, the other party
1846 * clears the _XBF_DELWRI_Q flag but otherwise leaves the buffer alone.
1847 * It remains referenced and on the list. In a rare corner case it
1848 * might get readded to a delwri list after the synchronous writeout, in
1849 * which case we need just need to re-add the flag here.
1850 */
1851 bp->b_flags |= _XBF_DELWRI_Q;
1852 if (list_empty(&bp->b_list)) {
1853 atomic_inc(&bp->b_hold);
1854 list_add_tail(&bp->b_list, list);
1855 }
1856
1857 return true;
1858 }
1859
1860 /*
1861 * Compare function is more complex than it needs to be because
1862 * the return value is only 32 bits and we are doing comparisons
1863 * on 64 bit values
1864 */
1865 static int
1866 xfs_buf_cmp(
1867 void *priv,
1868 struct list_head *a,
1869 struct list_head *b)
1870 {
1871 struct xfs_buf *ap = container_of(a, struct xfs_buf, b_list);
1872 struct xfs_buf *bp = container_of(b, struct xfs_buf, b_list);
1873 xfs_daddr_t diff;
1874
1875 diff = ap->b_maps[0].bm_bn - bp->b_maps[0].bm_bn;
1876 if (diff < 0)
1877 return -1;
1878 if (diff > 0)
1879 return 1;
1880 return 0;
1881 }
1882
1883 /*
1884 * Submit buffers for write. If wait_list is specified, the buffers are
1885 * submitted using sync I/O and placed on the wait list such that the caller can
1886 * iowait each buffer. Otherwise async I/O is used and the buffers are released
1887 * at I/O completion time. In either case, buffers remain locked until I/O
1888 * completes and the buffer is released from the queue.
1889 */
1890 static int
1891 xfs_buf_delwri_submit_buffers(
1892 struct list_head *buffer_list,
1893 struct list_head *wait_list)
1894 {
1895 struct xfs_buf *bp, *n;
1896 int pinned = 0;
1897 struct blk_plug plug;
1898
1899 list_sort(NULL, buffer_list, xfs_buf_cmp);
1900
1901 blk_start_plug(&plug);
1902 list_for_each_entry_safe(bp, n, buffer_list, b_list) {
1903 if (!wait_list) {
1904 if (xfs_buf_ispinned(bp)) {
1905 pinned++;
1906 continue;
1907 }
1908 if (!xfs_buf_trylock(bp))
1909 continue;
1910 } else {
1911 xfs_buf_lock(bp);
1912 }
1913
1914 /*
1915 * Someone else might have written the buffer synchronously or
1916 * marked it stale in the meantime. In that case only the
1917 * _XBF_DELWRI_Q flag got cleared, and we have to drop the
1918 * reference and remove it from the list here.
1919 */
1920 if (!(bp->b_flags & _XBF_DELWRI_Q)) {
1921 list_del_init(&bp->b_list);
1922 xfs_buf_relse(bp);
1923 continue;
1924 }
1925
1926 trace_xfs_buf_delwri_split(bp, _RET_IP_);
1927
1928 /*
1929 * If we have a wait list, each buffer (and associated delwri
1930 * queue reference) transfers to it and is submitted
1931 * synchronously. Otherwise, drop the buffer from the delwri
1932 * queue and submit async.
1933 */
1934 bp->b_flags &= ~_XBF_DELWRI_Q;
1935 bp->b_flags |= XBF_WRITE;
1936 if (wait_list) {
1937 bp->b_flags &= ~XBF_ASYNC;
1938 list_move_tail(&bp->b_list, wait_list);
1939 } else {
1940 bp->b_flags |= XBF_ASYNC;
1941 list_del_init(&bp->b_list);
1942 }
1943 __xfs_buf_submit(bp, false);
1944 }
1945 blk_finish_plug(&plug);
1946
1947 return pinned;
1948 }
1949
1950 /*
1951 * Write out a buffer list asynchronously.
1952 *
1953 * This will take the @buffer_list, write all non-locked and non-pinned buffers
1954 * out and not wait for I/O completion on any of the buffers. This interface
1955 * is only safely useable for callers that can track I/O completion by higher
1956 * level means, e.g. AIL pushing as the @buffer_list is consumed in this
1957 * function.
1958 *
1959 * Note: this function will skip buffers it would block on, and in doing so
1960 * leaves them on @buffer_list so they can be retried on a later pass. As such,
1961 * it is up to the caller to ensure that the buffer list is fully submitted or
1962 * cancelled appropriately when they are finished with the list. Failure to
1963 * cancel or resubmit the list until it is empty will result in leaked buffers
1964 * at unmount time.
1965 */
1966 int
1967 xfs_buf_delwri_submit_nowait(
1968 struct list_head *buffer_list)
1969 {
1970 return xfs_buf_delwri_submit_buffers(buffer_list, NULL);
1971 }
1972
1973 /*
1974 * Write out a buffer list synchronously.
1975 *
1976 * This will take the @buffer_list, write all buffers out and wait for I/O
1977 * completion on all of the buffers. @buffer_list is consumed by the function,
1978 * so callers must have some other way of tracking buffers if they require such
1979 * functionality.
1980 */
1981 int
1982 xfs_buf_delwri_submit(
1983 struct list_head *buffer_list)
1984 {
1985 LIST_HEAD (wait_list);
1986 int error = 0, error2;
1987 struct xfs_buf *bp;
1988
1989 xfs_buf_delwri_submit_buffers(buffer_list, &wait_list);
1990
1991 /* Wait for IO to complete. */
1992 while (!list_empty(&wait_list)) {
1993 bp = list_first_entry(&wait_list, struct xfs_buf, b_list);
1994
1995 list_del_init(&bp->b_list);
1996
1997 /*
1998 * Wait on the locked buffer, check for errors and unlock and
1999 * release the delwri queue reference.
2000 */
2001 error2 = xfs_buf_iowait(bp);
2002 xfs_buf_relse(bp);
2003 if (!error)
2004 error = error2;
2005 }
2006
2007 return error;
2008 }
2009
2010 /*
2011 * Push a single buffer on a delwri queue.
2012 *
2013 * The purpose of this function is to submit a single buffer of a delwri queue
2014 * and return with the buffer still on the original queue. The waiting delwri
2015 * buffer submission infrastructure guarantees transfer of the delwri queue
2016 * buffer reference to a temporary wait list. We reuse this infrastructure to
2017 * transfer the buffer back to the original queue.
2018 *
2019 * Note the buffer transitions from the queued state, to the submitted and wait
2020 * listed state and back to the queued state during this call. The buffer
2021 * locking and queue management logic between _delwri_pushbuf() and
2022 * _delwri_queue() guarantee that the buffer cannot be queued to another list
2023 * before returning.
2024 */
2025 int
2026 xfs_buf_delwri_pushbuf(
2027 struct xfs_buf *bp,
2028 struct list_head *buffer_list)
2029 {
2030 LIST_HEAD (submit_list);
2031 int error;
2032
2033 ASSERT(bp->b_flags & _XBF_DELWRI_Q);
2034
2035 trace_xfs_buf_delwri_pushbuf(bp, _RET_IP_);
2036
2037 /*
2038 * Isolate the buffer to a new local list so we can submit it for I/O
2039 * independently from the rest of the original list.
2040 */
2041 xfs_buf_lock(bp);
2042 list_move(&bp->b_list, &submit_list);
2043 xfs_buf_unlock(bp);
2044
2045 /*
2046 * Delwri submission clears the DELWRI_Q buffer flag and returns with
2047 * the buffer on the wait list with the original reference. Rather than
2048 * bounce the buffer from a local wait list back to the original list
2049 * after I/O completion, reuse the original list as the wait list.
2050 */
2051 xfs_buf_delwri_submit_buffers(&submit_list, buffer_list);
2052
2053 /*
2054 * The buffer is now locked, under I/O and wait listed on the original
2055 * delwri queue. Wait for I/O completion, restore the DELWRI_Q flag and
2056 * return with the buffer unlocked and on the original queue.
2057 */
2058 error = xfs_buf_iowait(bp);
2059 bp->b_flags |= _XBF_DELWRI_Q;
2060 xfs_buf_unlock(bp);
2061
2062 return error;
2063 }
2064
2065 int __init
2066 xfs_buf_init(void)
2067 {
2068 xfs_buf_zone = kmem_zone_init_flags(sizeof(xfs_buf_t), "xfs_buf",
2069 KM_ZONE_HWALIGN, NULL);
2070 if (!xfs_buf_zone)
2071 goto out;
2072
2073 return 0;
2074
2075 out:
2076 return -ENOMEM;
2077 }
2078
2079 void
2080 xfs_buf_terminate(void)
2081 {
2082 kmem_zone_destroy(xfs_buf_zone);
2083 }
2084
2085 void xfs_buf_set_ref(struct xfs_buf *bp, int lru_ref)
2086 {
2087 /*
2088 * Set the lru reference count to 0 based on the error injection tag.
2089 * This allows userspace to disrupt buffer caching for debug/testing
2090 * purposes.
2091 */
2092 if (XFS_TEST_ERROR(false, bp->b_mount, XFS_ERRTAG_BUF_LRU_REF))
2093 lru_ref = 0;
2094
2095 atomic_set(&bp->b_lru_ref, lru_ref);
2096 }
2097
2098 /*
2099 * Verify an on-disk magic value against the magic value specified in the
2100 * verifier structure. The verifier magic is in disk byte order so the caller is
2101 * expected to pass the value directly from disk.
2102 */
2103 bool
2104 xfs_verify_magic(
2105 struct xfs_buf *bp,
2106 __be32 dmagic)
2107 {
2108 struct xfs_mount *mp = bp->b_mount;
2109 int idx;
2110
2111 idx = xfs_sb_version_hascrc(&mp->m_sb);
2112 if (WARN_ON(!bp->b_ops || !bp->b_ops->magic[idx]))
2113 return false;
2114 return dmagic == bp->b_ops->magic[idx];
2115 }
2116 /*
2117 * Verify an on-disk magic value against the magic value specified in the
2118 * verifier structure. The verifier magic is in disk byte order so the caller is
2119 * expected to pass the value directly from disk.
2120 */
2121 bool
2122 xfs_verify_magic16(
2123 struct xfs_buf *bp,
2124 __be16 dmagic)
2125 {
2126 struct xfs_mount *mp = bp->b_mount;
2127 int idx;
2128
2129 idx = xfs_sb_version_hascrc(&mp->m_sb);
2130 if (WARN_ON(!bp->b_ops || !bp->b_ops->magic16[idx]))
2131 return false;
2132 return dmagic == bp->b_ops->magic16[idx];
2133 }