1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
4 * Copyright (c) 2012 Red Hat, Inc.
9 #include "xfs_shared.h"
10 #include "xfs_format.h"
11 #include "xfs_log_format.h"
12 #include "xfs_trans_resv.h"
14 #include "xfs_mount.h"
15 #include "xfs_defer.h"
16 #include "xfs_inode.h"
17 #include "xfs_btree.h"
18 #include "xfs_trans.h"
19 #include "xfs_alloc.h"
21 #include "xfs_bmap_util.h"
22 #include "xfs_bmap_btree.h"
23 #include "xfs_rtalloc.h"
24 #include "xfs_error.h"
25 #include "xfs_quota.h"
26 #include "xfs_trans_space.h"
27 #include "xfs_trace.h"
28 #include "xfs_icache.h"
29 #include "xfs_iomap.h"
30 #include "xfs_reflink.h"
32 /* Kernel only BMAP related definitions and functions */
35 * Convert the given file system block to a disk block. We have to treat it
36 * differently based on whether the file is a real time file or not, because the
40 xfs_fsb_to_db(struct xfs_inode
*ip
, xfs_fsblock_t fsb
)
42 if (XFS_IS_REALTIME_INODE(ip
))
43 return XFS_FSB_TO_BB(ip
->i_mount
, fsb
);
44 return XFS_FSB_TO_DADDR(ip
->i_mount
, fsb
);
48 * Routine to zero an extent on disk allocated to the specific inode.
50 * The VFS functions take a linearised filesystem block offset, so we have to
51 * convert the sparse xfs fsb to the right format first.
52 * VFS types are real funky, too.
57 xfs_fsblock_t start_fsb
,
60 struct xfs_mount
*mp
= ip
->i_mount
;
61 struct xfs_buftarg
*target
= xfs_inode_buftarg(ip
);
62 xfs_daddr_t sector
= xfs_fsb_to_db(ip
, start_fsb
);
63 sector_t block
= XFS_BB_TO_FSBT(mp
, sector
);
65 return blkdev_issue_zeroout(target
->bt_bdev
,
66 block
<< (mp
->m_super
->s_blocksize_bits
- 9),
67 count_fsb
<< (mp
->m_super
->s_blocksize_bits
- 9),
74 struct xfs_bmalloca
*ap
)
76 struct xfs_mount
*mp
= ap
->ip
->i_mount
;
77 xfs_fileoff_t orig_offset
= ap
->offset
;
79 xfs_extlen_t prod
= 0; /* product factor for allocators */
80 xfs_extlen_t mod
= 0; /* product factor for allocators */
81 xfs_extlen_t ralen
= 0; /* realtime allocation length */
82 xfs_extlen_t align
; /* minimum allocation alignment */
83 xfs_extlen_t orig_length
= ap
->length
;
84 xfs_extlen_t minlen
= mp
->m_sb
.sb_rextsize
;
85 xfs_extlen_t raminlen
;
86 bool rtlocked
= false;
87 bool ignore_locality
= false;
90 align
= xfs_get_extsz_hint(ap
->ip
);
92 prod
= align
/ mp
->m_sb
.sb_rextsize
;
93 error
= xfs_bmap_extsize_align(mp
, &ap
->got
, &ap
->prev
,
95 ap
->conv
, &ap
->offset
, &ap
->length
);
99 ASSERT(ap
->length
% mp
->m_sb
.sb_rextsize
== 0);
102 * If we shifted the file offset downward to satisfy an extent size
103 * hint, increase minlen by that amount so that the allocator won't
104 * give us an allocation that's too short to cover at least one of the
105 * blocks that the caller asked for.
107 if (ap
->offset
!= orig_offset
)
108 minlen
+= orig_offset
- ap
->offset
;
111 * If the offset & length are not perfectly aligned
112 * then kill prod, it will just get us in trouble.
114 div_u64_rem(ap
->offset
, align
, &mod
);
115 if (mod
|| ap
->length
% align
)
118 * Set ralen to be the actual requested length in rtextents.
120 ralen
= ap
->length
/ mp
->m_sb
.sb_rextsize
;
122 * If the old value was close enough to MAXEXTLEN that
123 * we rounded up to it, cut it back so it's valid again.
124 * Note that if it's a really large request (bigger than
125 * MAXEXTLEN), we don't hear about that number, and can't
126 * adjust the starting point to match it.
128 if (ralen
* mp
->m_sb
.sb_rextsize
>= MAXEXTLEN
)
129 ralen
= MAXEXTLEN
/ mp
->m_sb
.sb_rextsize
;
132 * Lock out modifications to both the RT bitmap and summary inodes
135 xfs_ilock(mp
->m_rbmip
, XFS_ILOCK_EXCL
|XFS_ILOCK_RTBITMAP
);
136 xfs_trans_ijoin(ap
->tp
, mp
->m_rbmip
, XFS_ILOCK_EXCL
);
137 xfs_ilock(mp
->m_rsumip
, XFS_ILOCK_EXCL
|XFS_ILOCK_RTSUM
);
138 xfs_trans_ijoin(ap
->tp
, mp
->m_rsumip
, XFS_ILOCK_EXCL
);
143 * If it's an allocation to an empty file at offset 0,
144 * pick an extent that will space things out in the rt area.
146 if (ap
->eof
&& ap
->offset
== 0) {
147 xfs_rtblock_t rtx
; /* realtime extent no */
149 error
= xfs_rtpick_extent(mp
, ap
->tp
, ralen
, &rtx
);
152 ap
->blkno
= rtx
* mp
->m_sb
.sb_rextsize
;
157 xfs_bmap_adjacent(ap
);
160 * Realtime allocation, done through xfs_rtallocate_extent.
165 do_div(ap
->blkno
, mp
->m_sb
.sb_rextsize
);
168 raminlen
= max_t(xfs_extlen_t
, 1, minlen
/ mp
->m_sb
.sb_rextsize
);
169 error
= xfs_rtallocate_extent(ap
->tp
, ap
->blkno
, raminlen
, ap
->length
,
170 &ralen
, ap
->wasdel
, prod
, &rtb
);
174 if (rtb
!= NULLRTBLOCK
) {
175 ap
->blkno
= rtb
* mp
->m_sb
.sb_rextsize
;
176 ap
->length
= ralen
* mp
->m_sb
.sb_rextsize
;
177 ap
->ip
->i_nblocks
+= ap
->length
;
178 xfs_trans_log_inode(ap
->tp
, ap
->ip
, XFS_ILOG_CORE
);
180 ap
->ip
->i_delayed_blks
-= ap
->length
;
182 * Adjust the disk quota also. This was reserved
185 xfs_trans_mod_dquot_byino(ap
->tp
, ap
->ip
,
186 ap
->wasdel
? XFS_TRANS_DQ_DELRTBCOUNT
:
187 XFS_TRANS_DQ_RTBCOUNT
, ap
->length
);
191 if (align
> mp
->m_sb
.sb_rextsize
) {
193 * We previously enlarged the request length to try to satisfy
194 * an extent size hint. The allocator didn't return anything,
195 * so reset the parameters to the original values and try again
196 * without alignment criteria.
198 ap
->offset
= orig_offset
;
199 ap
->length
= orig_length
;
200 minlen
= align
= mp
->m_sb
.sb_rextsize
;
204 if (!ignore_locality
&& ap
->blkno
!= 0) {
206 * If we can't allocate near a specific rt extent, try again
207 * without locality criteria.
209 ignore_locality
= true;
213 ap
->blkno
= NULLFSBLOCK
;
217 #endif /* CONFIG_XFS_RT */
220 * Extent tree block counting routines.
224 * Count leaf blocks given a range of extent records. Delayed allocation
225 * extents are not counted towards the totals.
228 xfs_bmap_count_leaves(
229 struct xfs_ifork
*ifp
,
230 xfs_filblks_t
*count
)
232 struct xfs_iext_cursor icur
;
233 struct xfs_bmbt_irec got
;
234 xfs_extnum_t numrecs
= 0;
236 for_each_xfs_iext(ifp
, &icur
, &got
) {
237 if (!isnullstartblock(got
.br_startblock
)) {
238 *count
+= got
.br_blockcount
;
247 * Count fsblocks of the given fork. Delayed allocation extents are
248 * not counted towards the totals.
251 xfs_bmap_count_blocks(
252 struct xfs_trans
*tp
,
253 struct xfs_inode
*ip
,
255 xfs_extnum_t
*nextents
,
256 xfs_filblks_t
*count
)
258 struct xfs_mount
*mp
= ip
->i_mount
;
259 struct xfs_ifork
*ifp
= XFS_IFORK_PTR(ip
, whichfork
);
260 struct xfs_btree_cur
*cur
;
261 xfs_extlen_t btblocks
= 0;
270 switch (ifp
->if_format
) {
271 case XFS_DINODE_FMT_BTREE
:
272 error
= xfs_iread_extents(tp
, ip
, whichfork
);
276 cur
= xfs_bmbt_init_cursor(mp
, tp
, ip
, whichfork
);
277 error
= xfs_btree_count_blocks(cur
, &btblocks
);
278 xfs_btree_del_cursor(cur
, error
);
283 * xfs_btree_count_blocks includes the root block contained in
284 * the inode fork in @btblocks, so subtract one because we're
285 * only interested in allocated disk blocks.
287 *count
+= btblocks
- 1;
290 case XFS_DINODE_FMT_EXTENTS
:
291 *nextents
= xfs_bmap_count_leaves(ifp
, count
);
299 xfs_getbmap_report_one(
300 struct xfs_inode
*ip
,
301 struct getbmapx
*bmv
,
302 struct kgetbmap
*out
,
304 struct xfs_bmbt_irec
*got
)
306 struct kgetbmap
*p
= out
+ bmv
->bmv_entries
;
310 error
= xfs_reflink_trim_around_shared(ip
, got
, &shared
);
314 if (isnullstartblock(got
->br_startblock
) ||
315 got
->br_startblock
== DELAYSTARTBLOCK
) {
317 * Delalloc extents that start beyond EOF can occur due to
318 * speculative EOF allocation when the delalloc extent is larger
319 * than the largest freespace extent at conversion time. These
320 * extents cannot be converted by data writeback, so can exist
321 * here even if we are not supposed to be finding delalloc
324 if (got
->br_startoff
< XFS_B_TO_FSB(ip
->i_mount
, XFS_ISIZE(ip
)))
325 ASSERT((bmv
->bmv_iflags
& BMV_IF_DELALLOC
) != 0);
327 p
->bmv_oflags
|= BMV_OF_DELALLOC
;
330 p
->bmv_block
= xfs_fsb_to_db(ip
, got
->br_startblock
);
333 if (got
->br_state
== XFS_EXT_UNWRITTEN
&&
334 (bmv
->bmv_iflags
& BMV_IF_PREALLOC
))
335 p
->bmv_oflags
|= BMV_OF_PREALLOC
;
338 p
->bmv_oflags
|= BMV_OF_SHARED
;
340 p
->bmv_offset
= XFS_FSB_TO_BB(ip
->i_mount
, got
->br_startoff
);
341 p
->bmv_length
= XFS_FSB_TO_BB(ip
->i_mount
, got
->br_blockcount
);
343 bmv
->bmv_offset
= p
->bmv_offset
+ p
->bmv_length
;
344 bmv
->bmv_length
= max(0LL, bmv_end
- bmv
->bmv_offset
);
350 xfs_getbmap_report_hole(
351 struct xfs_inode
*ip
,
352 struct getbmapx
*bmv
,
353 struct kgetbmap
*out
,
358 struct kgetbmap
*p
= out
+ bmv
->bmv_entries
;
360 if (bmv
->bmv_iflags
& BMV_IF_NO_HOLES
)
364 p
->bmv_offset
= XFS_FSB_TO_BB(ip
->i_mount
, bno
);
365 p
->bmv_length
= XFS_FSB_TO_BB(ip
->i_mount
, end
- bno
);
367 bmv
->bmv_offset
= p
->bmv_offset
+ p
->bmv_length
;
368 bmv
->bmv_length
= max(0LL, bmv_end
- bmv
->bmv_offset
);
374 struct getbmapx
*bmv
)
376 return bmv
->bmv_length
== 0 || bmv
->bmv_entries
>= bmv
->bmv_count
- 1;
380 xfs_getbmap_next_rec(
381 struct xfs_bmbt_irec
*rec
,
382 xfs_fileoff_t total_end
)
384 xfs_fileoff_t end
= rec
->br_startoff
+ rec
->br_blockcount
;
386 if (end
== total_end
)
389 rec
->br_startoff
+= rec
->br_blockcount
;
390 if (!isnullstartblock(rec
->br_startblock
) &&
391 rec
->br_startblock
!= DELAYSTARTBLOCK
)
392 rec
->br_startblock
+= rec
->br_blockcount
;
393 rec
->br_blockcount
= total_end
- end
;
398 * Get inode's extents as described in bmv, and format for output.
399 * Calls formatter to fill the user's buffer until all extents
400 * are mapped, until the passed-in bmv->bmv_count slots have
401 * been filled, or until the formatter short-circuits the loop,
402 * if it is tracking filled-in extents on its own.
406 struct xfs_inode
*ip
,
407 struct getbmapx
*bmv
, /* user bmap structure */
408 struct kgetbmap
*out
)
410 struct xfs_mount
*mp
= ip
->i_mount
;
411 int iflags
= bmv
->bmv_iflags
;
412 int whichfork
, lock
, error
= 0;
413 int64_t bmv_end
, max_len
;
414 xfs_fileoff_t bno
, first_bno
;
415 struct xfs_ifork
*ifp
;
416 struct xfs_bmbt_irec got
, rec
;
418 struct xfs_iext_cursor icur
;
420 if (bmv
->bmv_iflags
& ~BMV_IF_VALID
)
423 /* Only allow CoW fork queries if we're debugging. */
424 if (iflags
& BMV_IF_COWFORK
)
427 if ((iflags
& BMV_IF_ATTRFORK
) && (iflags
& BMV_IF_COWFORK
))
430 if (bmv
->bmv_length
< -1)
432 bmv
->bmv_entries
= 0;
433 if (bmv
->bmv_length
== 0)
436 if (iflags
& BMV_IF_ATTRFORK
)
437 whichfork
= XFS_ATTR_FORK
;
438 else if (iflags
& BMV_IF_COWFORK
)
439 whichfork
= XFS_COW_FORK
;
441 whichfork
= XFS_DATA_FORK
;
442 ifp
= XFS_IFORK_PTR(ip
, whichfork
);
444 xfs_ilock(ip
, XFS_IOLOCK_SHARED
);
447 if (!XFS_IFORK_Q(ip
))
448 goto out_unlock_iolock
;
451 lock
= xfs_ilock_attr_map_shared(ip
);
454 /* No CoW fork? Just return */
456 goto out_unlock_iolock
;
458 if (xfs_get_cowextsz_hint(ip
))
459 max_len
= mp
->m_super
->s_maxbytes
;
461 max_len
= XFS_ISIZE(ip
);
463 lock
= XFS_ILOCK_SHARED
;
467 if (!(iflags
& BMV_IF_DELALLOC
) &&
468 (ip
->i_delayed_blks
|| XFS_ISIZE(ip
) > ip
->i_disk_size
)) {
469 error
= filemap_write_and_wait(VFS_I(ip
)->i_mapping
);
471 goto out_unlock_iolock
;
474 * Even after flushing the inode, there can still be
475 * delalloc blocks on the inode beyond EOF due to
476 * speculative preallocation. These are not removed
477 * until the release function is called or the inode
478 * is inactivated. Hence we cannot assert here that
479 * ip->i_delayed_blks == 0.
483 if (xfs_get_extsz_hint(ip
) ||
485 (XFS_DIFLAG_PREALLOC
| XFS_DIFLAG_APPEND
)))
486 max_len
= mp
->m_super
->s_maxbytes
;
488 max_len
= XFS_ISIZE(ip
);
490 lock
= xfs_ilock_data_map_shared(ip
);
494 switch (ifp
->if_format
) {
495 case XFS_DINODE_FMT_EXTENTS
:
496 case XFS_DINODE_FMT_BTREE
:
498 case XFS_DINODE_FMT_LOCAL
:
499 /* Local format inode forks report no extents. */
500 goto out_unlock_ilock
;
503 goto out_unlock_ilock
;
506 if (bmv
->bmv_length
== -1) {
507 max_len
= XFS_FSB_TO_BB(mp
, XFS_B_TO_FSB(mp
, max_len
));
508 bmv
->bmv_length
= max(0LL, max_len
- bmv
->bmv_offset
);
511 bmv_end
= bmv
->bmv_offset
+ bmv
->bmv_length
;
513 first_bno
= bno
= XFS_BB_TO_FSBT(mp
, bmv
->bmv_offset
);
514 len
= XFS_BB_TO_FSB(mp
, bmv
->bmv_length
);
516 error
= xfs_iread_extents(NULL
, ip
, whichfork
);
518 goto out_unlock_ilock
;
520 if (!xfs_iext_lookup_extent(ip
, ifp
, bno
, &icur
, &got
)) {
522 * Report a whole-file hole if the delalloc flag is set to
523 * stay compatible with the old implementation.
525 if (iflags
& BMV_IF_DELALLOC
)
526 xfs_getbmap_report_hole(ip
, bmv
, out
, bmv_end
, bno
,
527 XFS_B_TO_FSB(mp
, XFS_ISIZE(ip
)));
528 goto out_unlock_ilock
;
531 while (!xfs_getbmap_full(bmv
)) {
532 xfs_trim_extent(&got
, first_bno
, len
);
535 * Report an entry for a hole if this extent doesn't directly
536 * follow the previous one.
538 if (got
.br_startoff
> bno
) {
539 xfs_getbmap_report_hole(ip
, bmv
, out
, bmv_end
, bno
,
541 if (xfs_getbmap_full(bmv
))
546 * In order to report shared extents accurately, we report each
547 * distinct shared / unshared part of a single bmbt record with
548 * an individual getbmapx record.
550 bno
= got
.br_startoff
+ got
.br_blockcount
;
553 error
= xfs_getbmap_report_one(ip
, bmv
, out
, bmv_end
,
555 if (error
|| xfs_getbmap_full(bmv
))
556 goto out_unlock_ilock
;
557 } while (xfs_getbmap_next_rec(&rec
, bno
));
559 if (!xfs_iext_next_extent(ifp
, &icur
, &got
)) {
560 xfs_fileoff_t end
= XFS_B_TO_FSB(mp
, XFS_ISIZE(ip
));
562 out
[bmv
->bmv_entries
- 1].bmv_oflags
|= BMV_OF_LAST
;
564 if (whichfork
!= XFS_ATTR_FORK
&& bno
< end
&&
565 !xfs_getbmap_full(bmv
)) {
566 xfs_getbmap_report_hole(ip
, bmv
, out
, bmv_end
,
572 if (bno
>= first_bno
+ len
)
577 xfs_iunlock(ip
, lock
);
579 xfs_iunlock(ip
, XFS_IOLOCK_SHARED
);
584 * Dead simple method of punching delalyed allocation blocks from a range in
585 * the inode. This will always punch out both the start and end blocks, even
586 * if the ranges only partially overlap them, so it is up to the caller to
587 * ensure that partial blocks are not passed in.
590 xfs_bmap_punch_delalloc_range(
591 struct xfs_inode
*ip
,
592 xfs_fileoff_t start_fsb
,
593 xfs_fileoff_t length
)
595 struct xfs_ifork
*ifp
= &ip
->i_df
;
596 xfs_fileoff_t end_fsb
= start_fsb
+ length
;
597 struct xfs_bmbt_irec got
, del
;
598 struct xfs_iext_cursor icur
;
601 ASSERT(!xfs_need_iread_extents(ifp
));
603 xfs_ilock(ip
, XFS_ILOCK_EXCL
);
604 if (!xfs_iext_lookup_extent_before(ip
, ifp
, &end_fsb
, &icur
, &got
))
607 while (got
.br_startoff
+ got
.br_blockcount
> start_fsb
) {
609 xfs_trim_extent(&del
, start_fsb
, length
);
612 * A delete can push the cursor forward. Step back to the
613 * previous extent on non-delalloc or extents outside the
616 if (!del
.br_blockcount
||
617 !isnullstartblock(del
.br_startblock
)) {
618 if (!xfs_iext_prev_extent(ifp
, &icur
, &got
))
623 error
= xfs_bmap_del_extent_delay(ip
, XFS_DATA_FORK
, &icur
,
625 if (error
|| !xfs_iext_get_extent(ifp
, &icur
, &got
))
630 xfs_iunlock(ip
, XFS_ILOCK_EXCL
);
635 * Test whether it is appropriate to check an inode for and free post EOF
636 * blocks. The 'force' parameter determines whether we should also consider
637 * regular files that are marked preallocated or append-only.
640 xfs_can_free_eofblocks(
641 struct xfs_inode
*ip
,
644 struct xfs_bmbt_irec imap
;
645 struct xfs_mount
*mp
= ip
->i_mount
;
646 xfs_fileoff_t end_fsb
;
647 xfs_fileoff_t last_fsb
;
652 * Caller must either hold the exclusive io lock; or be inactivating
653 * the inode, which guarantees there are no other users of the inode.
655 ASSERT(xfs_isilocked(ip
, XFS_IOLOCK_EXCL
) ||
656 (VFS_I(ip
)->i_state
& I_FREEING
));
658 /* prealloc/delalloc exists only on regular files */
659 if (!S_ISREG(VFS_I(ip
)->i_mode
))
663 * Zero sized files with no cached pages and delalloc blocks will not
664 * have speculative prealloc/delalloc blocks to remove.
666 if (VFS_I(ip
)->i_size
== 0 &&
667 VFS_I(ip
)->i_mapping
->nrpages
== 0 &&
668 ip
->i_delayed_blks
== 0)
671 /* If we haven't read in the extent list, then don't do it now. */
672 if (xfs_need_iread_extents(&ip
->i_df
))
676 * Do not free real preallocated or append-only files unless the file
677 * has delalloc blocks and we are forced to remove them.
679 if (ip
->i_diflags
& (XFS_DIFLAG_PREALLOC
| XFS_DIFLAG_APPEND
))
680 if (!force
|| ip
->i_delayed_blks
== 0)
684 * Do not try to free post-EOF blocks if EOF is beyond the end of the
685 * range supported by the page cache, because the truncation will loop
688 end_fsb
= XFS_B_TO_FSB(mp
, (xfs_ufsize_t
)XFS_ISIZE(ip
));
689 last_fsb
= XFS_B_TO_FSB(mp
, mp
->m_super
->s_maxbytes
);
690 if (last_fsb
<= end_fsb
)
694 * Look up the mapping for the first block past EOF. If we can't find
695 * it, there's nothing to free.
697 xfs_ilock(ip
, XFS_ILOCK_SHARED
);
698 error
= xfs_bmapi_read(ip
, end_fsb
, last_fsb
- end_fsb
, &imap
, &nimaps
,
700 xfs_iunlock(ip
, XFS_ILOCK_SHARED
);
701 if (error
|| nimaps
== 0)
705 * If there's a real mapping there or there are delayed allocation
706 * reservations, then we have post-EOF blocks to try to free.
708 return imap
.br_startblock
!= HOLESTARTBLOCK
|| ip
->i_delayed_blks
;
712 * This is called to free any blocks beyond eof. The caller must hold
713 * IOLOCK_EXCL unless we are in the inode reclaim path and have the only
714 * reference to the inode.
718 struct xfs_inode
*ip
)
720 struct xfs_trans
*tp
;
721 struct xfs_mount
*mp
= ip
->i_mount
;
724 /* Attach the dquots to the inode up front. */
725 error
= xfs_qm_dqattach(ip
);
729 /* Wait on dio to ensure i_size has settled. */
730 inode_dio_wait(VFS_I(ip
));
732 error
= xfs_trans_alloc(mp
, &M_RES(mp
)->tr_itruncate
, 0, 0, 0, &tp
);
734 ASSERT(xfs_is_shutdown(mp
));
738 xfs_ilock(ip
, XFS_ILOCK_EXCL
);
739 xfs_trans_ijoin(tp
, ip
, 0);
742 * Do not update the on-disk file size. If we update the on-disk file
743 * size and then the system crashes before the contents of the file are
744 * flushed to disk then the files may be full of holes (ie NULL files
747 error
= xfs_itruncate_extents_flags(&tp
, ip
, XFS_DATA_FORK
,
748 XFS_ISIZE(ip
), XFS_BMAPI_NODISCARD
);
752 error
= xfs_trans_commit(tp
);
756 xfs_inode_clear_eofblocks_tag(ip
);
761 * If we get an error at this point we simply don't
762 * bother truncating the file.
764 xfs_trans_cancel(tp
);
766 xfs_iunlock(ip
, XFS_ILOCK_EXCL
);
771 xfs_alloc_file_space(
772 struct xfs_inode
*ip
,
777 xfs_mount_t
*mp
= ip
->i_mount
;
779 xfs_filblks_t allocated_fsb
;
780 xfs_filblks_t allocatesize_fsb
;
781 xfs_extlen_t extsz
, temp
;
782 xfs_fileoff_t startoffset_fsb
;
783 xfs_fileoff_t endoffset_fsb
;
787 xfs_bmbt_irec_t imaps
[1], *imapp
;
790 trace_xfs_alloc_file_space(ip
);
792 if (xfs_is_shutdown(mp
))
795 error
= xfs_qm_dqattach(ip
);
802 rt
= XFS_IS_REALTIME_INODE(ip
);
803 extsz
= xfs_get_extsz_hint(ip
);
808 startoffset_fsb
= XFS_B_TO_FSBT(mp
, offset
);
809 endoffset_fsb
= XFS_B_TO_FSB(mp
, offset
+ count
);
810 allocatesize_fsb
= endoffset_fsb
- startoffset_fsb
;
813 * Allocate file space until done or until there is an error
815 while (allocatesize_fsb
&& !error
) {
817 unsigned int dblocks
, rblocks
, resblks
;
820 * Determine space reservations for data/realtime.
822 if (unlikely(extsz
)) {
826 e
= startoffset_fsb
+ allocatesize_fsb
;
827 div_u64_rem(startoffset_fsb
, extsz
, &temp
);
830 div_u64_rem(e
, extsz
, &temp
);
835 e
= allocatesize_fsb
;
839 * The transaction reservation is limited to a 32-bit block
840 * count, hence we need to limit the number of blocks we are
841 * trying to reserve to avoid an overflow. We can't allocate
842 * more than @nimaps extents, and an extent is limited on disk
843 * to MAXEXTLEN (21 bits), so use that to enforce the limit.
845 resblks
= min_t(xfs_fileoff_t
, (e
- s
), (MAXEXTLEN
* nimaps
));
847 dblocks
= XFS_DIOSTRAT_SPACE_RES(mp
, 0);
850 dblocks
= XFS_DIOSTRAT_SPACE_RES(mp
, resblks
);
855 * Allocate and setup the transaction.
857 error
= xfs_trans_alloc_inode(ip
, &M_RES(mp
)->tr_write
,
858 dblocks
, rblocks
, false, &tp
);
862 error
= xfs_iext_count_may_overflow(ip
, XFS_DATA_FORK
,
863 XFS_IEXT_ADD_NOSPLIT_CNT
);
867 error
= xfs_bmapi_write(tp
, ip
, startoffset_fsb
,
868 allocatesize_fsb
, alloc_type
, 0, imapp
,
874 * Complete the transaction
876 error
= xfs_trans_commit(tp
);
877 xfs_iunlock(ip
, XFS_ILOCK_EXCL
);
881 allocated_fsb
= imapp
->br_blockcount
;
888 startoffset_fsb
+= allocated_fsb
;
889 allocatesize_fsb
-= allocated_fsb
;
895 xfs_trans_cancel(tp
);
896 xfs_iunlock(ip
, XFS_ILOCK_EXCL
);
902 struct xfs_inode
*ip
,
903 xfs_fileoff_t startoffset_fsb
,
904 xfs_filblks_t len_fsb
,
907 struct xfs_mount
*mp
= ip
->i_mount
;
908 struct xfs_trans
*tp
;
909 uint resblks
= XFS_DIOSTRAT_SPACE_RES(mp
, 0);
912 error
= xfs_trans_alloc_inode(ip
, &M_RES(mp
)->tr_write
, resblks
, 0,
917 error
= xfs_iext_count_may_overflow(ip
, XFS_DATA_FORK
,
918 XFS_IEXT_PUNCH_HOLE_CNT
);
920 goto out_trans_cancel
;
922 error
= xfs_bunmapi(tp
, ip
, startoffset_fsb
, len_fsb
, 0, 2, done
);
924 goto out_trans_cancel
;
926 error
= xfs_trans_commit(tp
);
928 xfs_iunlock(ip
, XFS_ILOCK_EXCL
);
932 xfs_trans_cancel(tp
);
936 /* Caller must first wait for the completion of any pending DIOs if required. */
938 xfs_flush_unmap_range(
939 struct xfs_inode
*ip
,
943 struct xfs_mount
*mp
= ip
->i_mount
;
944 struct inode
*inode
= VFS_I(ip
);
945 xfs_off_t rounding
, start
, end
;
948 rounding
= max_t(xfs_off_t
, mp
->m_sb
.sb_blocksize
, PAGE_SIZE
);
949 start
= round_down(offset
, rounding
);
950 end
= round_up(offset
+ len
, rounding
) - 1;
952 error
= filemap_write_and_wait_range(inode
->i_mapping
, start
, end
);
955 truncate_pagecache_range(inode
, start
, end
);
961 struct xfs_inode
*ip
,
965 struct xfs_mount
*mp
= ip
->i_mount
;
966 xfs_fileoff_t startoffset_fsb
;
967 xfs_fileoff_t endoffset_fsb
;
970 trace_xfs_free_file_space(ip
);
972 error
= xfs_qm_dqattach(ip
);
976 if (len
<= 0) /* if nothing being freed */
979 startoffset_fsb
= XFS_B_TO_FSB(mp
, offset
);
980 endoffset_fsb
= XFS_B_TO_FSBT(mp
, offset
+ len
);
982 /* We can only free complete realtime extents. */
983 if (XFS_IS_REALTIME_INODE(ip
) && mp
->m_sb
.sb_rextsize
> 1) {
984 startoffset_fsb
= roundup_64(startoffset_fsb
,
985 mp
->m_sb
.sb_rextsize
);
986 endoffset_fsb
= rounddown_64(endoffset_fsb
,
987 mp
->m_sb
.sb_rextsize
);
991 * Need to zero the stuff we're not freeing, on disk.
993 if (endoffset_fsb
> startoffset_fsb
) {
995 error
= xfs_unmap_extent(ip
, startoffset_fsb
,
996 endoffset_fsb
- startoffset_fsb
, &done
);
1003 * Now that we've unmap all full blocks we'll have to zero out any
1004 * partial block at the beginning and/or end. iomap_zero_range is smart
1005 * enough to skip any holes, including those we just created, but we
1006 * must take care not to zero beyond EOF and enlarge i_size.
1008 if (offset
>= XFS_ISIZE(ip
))
1010 if (offset
+ len
> XFS_ISIZE(ip
))
1011 len
= XFS_ISIZE(ip
) - offset
;
1012 error
= iomap_zero_range(VFS_I(ip
), offset
, len
, NULL
,
1013 &xfs_buffered_write_iomap_ops
);
1018 * If we zeroed right up to EOF and EOF straddles a page boundary we
1019 * must make sure that the post-EOF area is also zeroed because the
1020 * page could be mmap'd and iomap_zero_range doesn't do that for us.
1021 * Writeback of the eof page will do this, albeit clumsily.
1023 if (offset
+ len
>= XFS_ISIZE(ip
) && offset_in_page(offset
+ len
) > 0) {
1024 error
= filemap_write_and_wait_range(VFS_I(ip
)->i_mapping
,
1025 round_down(offset
+ len
, PAGE_SIZE
), LLONG_MAX
);
1033 struct xfs_inode
*ip
,
1036 struct xfs_mount
*mp
= ip
->i_mount
;
1040 * Trim eofblocks to avoid shifting uninitialized post-eof preallocation
1041 * into the accessible region of the file.
1043 if (xfs_can_free_eofblocks(ip
, true)) {
1044 error
= xfs_free_eofblocks(ip
);
1050 * Shift operations must stabilize the start block offset boundary along
1051 * with the full range of the operation. If we don't, a COW writeback
1052 * completion could race with an insert, front merge with the start
1053 * extent (after split) during the shift and corrupt the file. Start
1054 * with the block just prior to the start to stabilize the boundary.
1056 offset
= round_down(offset
, mp
->m_sb
.sb_blocksize
);
1058 offset
-= mp
->m_sb
.sb_blocksize
;
1061 * Writeback and invalidate cache for the remainder of the file as we're
1062 * about to shift down every extent from offset to EOF.
1064 error
= xfs_flush_unmap_range(ip
, offset
, XFS_ISIZE(ip
));
1069 * Clean out anything hanging around in the cow fork now that
1070 * we've flushed all the dirty data out to disk to avoid having
1071 * CoW extents at the wrong offsets.
1073 if (xfs_inode_has_cow_data(ip
)) {
1074 error
= xfs_reflink_cancel_cow_range(ip
, offset
, NULLFILEOFF
,
1084 * xfs_collapse_file_space()
1085 * This routine frees disk space and shift extent for the given file.
1086 * The first thing we do is to free data blocks in the specified range
1087 * by calling xfs_free_file_space(). It would also sync dirty data
1088 * and invalidate page cache over the region on which collapse range
1089 * is working. And Shift extent records to the left to cover a hole.
1096 xfs_collapse_file_space(
1097 struct xfs_inode
*ip
,
1101 struct xfs_mount
*mp
= ip
->i_mount
;
1102 struct xfs_trans
*tp
;
1104 xfs_fileoff_t next_fsb
= XFS_B_TO_FSB(mp
, offset
+ len
);
1105 xfs_fileoff_t shift_fsb
= XFS_B_TO_FSB(mp
, len
);
1108 ASSERT(xfs_isilocked(ip
, XFS_IOLOCK_EXCL
));
1109 ASSERT(xfs_isilocked(ip
, XFS_MMAPLOCK_EXCL
));
1111 trace_xfs_collapse_file_space(ip
);
1113 error
= xfs_free_file_space(ip
, offset
, len
);
1117 error
= xfs_prepare_shift(ip
, offset
);
1121 error
= xfs_trans_alloc(mp
, &M_RES(mp
)->tr_write
, 0, 0, 0, &tp
);
1125 xfs_ilock(ip
, XFS_ILOCK_EXCL
);
1126 xfs_trans_ijoin(tp
, ip
, 0);
1129 error
= xfs_bmap_collapse_extents(tp
, ip
, &next_fsb
, shift_fsb
,
1132 goto out_trans_cancel
;
1136 /* finish any deferred frees and roll the transaction */
1137 error
= xfs_defer_finish(&tp
);
1139 goto out_trans_cancel
;
1142 error
= xfs_trans_commit(tp
);
1143 xfs_iunlock(ip
, XFS_ILOCK_EXCL
);
1147 xfs_trans_cancel(tp
);
1148 xfs_iunlock(ip
, XFS_ILOCK_EXCL
);
1153 * xfs_insert_file_space()
1154 * This routine create hole space by shifting extents for the given file.
1155 * The first thing we do is to sync dirty data and invalidate page cache
1156 * over the region on which insert range is working. And split an extent
1157 * to two extents at given offset by calling xfs_bmap_split_extent.
1158 * And shift all extent records which are laying between [offset,
1159 * last allocated extent] to the right to reserve hole range.
1165 xfs_insert_file_space(
1166 struct xfs_inode
*ip
,
1170 struct xfs_mount
*mp
= ip
->i_mount
;
1171 struct xfs_trans
*tp
;
1173 xfs_fileoff_t stop_fsb
= XFS_B_TO_FSB(mp
, offset
);
1174 xfs_fileoff_t next_fsb
= NULLFSBLOCK
;
1175 xfs_fileoff_t shift_fsb
= XFS_B_TO_FSB(mp
, len
);
1178 ASSERT(xfs_isilocked(ip
, XFS_IOLOCK_EXCL
));
1179 ASSERT(xfs_isilocked(ip
, XFS_MMAPLOCK_EXCL
));
1181 trace_xfs_insert_file_space(ip
);
1183 error
= xfs_bmap_can_insert_extents(ip
, stop_fsb
, shift_fsb
);
1187 error
= xfs_prepare_shift(ip
, offset
);
1191 error
= xfs_trans_alloc(mp
, &M_RES(mp
)->tr_write
,
1192 XFS_DIOSTRAT_SPACE_RES(mp
, 0), 0, 0, &tp
);
1196 xfs_ilock(ip
, XFS_ILOCK_EXCL
);
1197 xfs_trans_ijoin(tp
, ip
, 0);
1199 error
= xfs_iext_count_may_overflow(ip
, XFS_DATA_FORK
,
1200 XFS_IEXT_PUNCH_HOLE_CNT
);
1202 goto out_trans_cancel
;
1205 * The extent shifting code works on extent granularity. So, if stop_fsb
1206 * is not the starting block of extent, we need to split the extent at
1209 error
= xfs_bmap_split_extent(tp
, ip
, stop_fsb
);
1211 goto out_trans_cancel
;
1214 error
= xfs_defer_finish(&tp
);
1216 goto out_trans_cancel
;
1218 error
= xfs_bmap_insert_extents(tp
, ip
, &next_fsb
, shift_fsb
,
1221 goto out_trans_cancel
;
1224 error
= xfs_trans_commit(tp
);
1225 xfs_iunlock(ip
, XFS_ILOCK_EXCL
);
1229 xfs_trans_cancel(tp
);
1230 xfs_iunlock(ip
, XFS_ILOCK_EXCL
);
1235 * We need to check that the format of the data fork in the temporary inode is
1236 * valid for the target inode before doing the swap. This is not a problem with
1237 * attr1 because of the fixed fork offset, but attr2 has a dynamically sized
1238 * data fork depending on the space the attribute fork is taking so we can get
1239 * invalid formats on the target inode.
1241 * E.g. target has space for 7 extents in extent format, temp inode only has
1242 * space for 6. If we defragment down to 7 extents, then the tmp format is a
1243 * btree, but when swapped it needs to be in extent format. Hence we can't just
1244 * blindly swap data forks on attr2 filesystems.
1246 * Note that we check the swap in both directions so that we don't end up with
1247 * a corrupt temporary inode, either.
1249 * Note that fixing the way xfs_fsr sets up the attribute fork in the source
1250 * inode will prevent this situation from occurring, so all we do here is
1251 * reject and log the attempt. basically we are putting the responsibility on
1252 * userspace to get this right.
1255 xfs_swap_extents_check_format(
1256 struct xfs_inode
*ip
, /* target inode */
1257 struct xfs_inode
*tip
) /* tmp inode */
1259 struct xfs_ifork
*ifp
= &ip
->i_df
;
1260 struct xfs_ifork
*tifp
= &tip
->i_df
;
1262 /* User/group/project quota ids must match if quotas are enforced. */
1263 if (XFS_IS_QUOTA_ON(ip
->i_mount
) &&
1264 (!uid_eq(VFS_I(ip
)->i_uid
, VFS_I(tip
)->i_uid
) ||
1265 !gid_eq(VFS_I(ip
)->i_gid
, VFS_I(tip
)->i_gid
) ||
1266 ip
->i_projid
!= tip
->i_projid
))
1269 /* Should never get a local format */
1270 if (ifp
->if_format
== XFS_DINODE_FMT_LOCAL
||
1271 tifp
->if_format
== XFS_DINODE_FMT_LOCAL
)
1275 * if the target inode has less extents that then temporary inode then
1276 * why did userspace call us?
1278 if (ifp
->if_nextents
< tifp
->if_nextents
)
1282 * If we have to use the (expensive) rmap swap method, we can
1283 * handle any number of extents and any format.
1285 if (xfs_has_rmapbt(ip
->i_mount
))
1289 * if the target inode is in extent form and the temp inode is in btree
1290 * form then we will end up with the target inode in the wrong format
1291 * as we already know there are less extents in the temp inode.
1293 if (ifp
->if_format
== XFS_DINODE_FMT_EXTENTS
&&
1294 tifp
->if_format
== XFS_DINODE_FMT_BTREE
)
1297 /* Check temp in extent form to max in target */
1298 if (tifp
->if_format
== XFS_DINODE_FMT_EXTENTS
&&
1299 tifp
->if_nextents
> XFS_IFORK_MAXEXT(ip
, XFS_DATA_FORK
))
1302 /* Check target in extent form to max in temp */
1303 if (ifp
->if_format
== XFS_DINODE_FMT_EXTENTS
&&
1304 ifp
->if_nextents
> XFS_IFORK_MAXEXT(tip
, XFS_DATA_FORK
))
1308 * If we are in a btree format, check that the temp root block will fit
1309 * in the target and that it has enough extents to be in btree format
1312 * Note that we have to be careful to allow btree->extent conversions
1313 * (a common defrag case) which will occur when the temp inode is in
1316 if (tifp
->if_format
== XFS_DINODE_FMT_BTREE
) {
1317 if (XFS_IFORK_Q(ip
) &&
1318 XFS_BMAP_BMDR_SPACE(tifp
->if_broot
) > XFS_IFORK_BOFF(ip
))
1320 if (tifp
->if_nextents
<= XFS_IFORK_MAXEXT(ip
, XFS_DATA_FORK
))
1324 /* Reciprocal target->temp btree format checks */
1325 if (ifp
->if_format
== XFS_DINODE_FMT_BTREE
) {
1326 if (XFS_IFORK_Q(tip
) &&
1327 XFS_BMAP_BMDR_SPACE(ip
->i_df
.if_broot
) > XFS_IFORK_BOFF(tip
))
1329 if (ifp
->if_nextents
<= XFS_IFORK_MAXEXT(tip
, XFS_DATA_FORK
))
1337 xfs_swap_extent_flush(
1338 struct xfs_inode
*ip
)
1342 error
= filemap_write_and_wait(VFS_I(ip
)->i_mapping
);
1345 truncate_pagecache_range(VFS_I(ip
), 0, -1);
1347 /* Verify O_DIRECT for ftmp */
1348 if (VFS_I(ip
)->i_mapping
->nrpages
)
1354 * Move extents from one file to another, when rmap is enabled.
1357 xfs_swap_extent_rmap(
1358 struct xfs_trans
**tpp
,
1359 struct xfs_inode
*ip
,
1360 struct xfs_inode
*tip
)
1362 struct xfs_trans
*tp
= *tpp
;
1363 struct xfs_bmbt_irec irec
;
1364 struct xfs_bmbt_irec uirec
;
1365 struct xfs_bmbt_irec tirec
;
1366 xfs_fileoff_t offset_fsb
;
1367 xfs_fileoff_t end_fsb
;
1368 xfs_filblks_t count_fsb
;
1373 uint64_t tip_flags2
;
1376 * If the source file has shared blocks, we must flag the donor
1377 * file as having shared blocks so that we get the shared-block
1378 * rmap functions when we go to fix up the rmaps. The flags
1379 * will be switch for reals later.
1381 tip_flags2
= tip
->i_diflags2
;
1382 if (ip
->i_diflags2
& XFS_DIFLAG2_REFLINK
)
1383 tip
->i_diflags2
|= XFS_DIFLAG2_REFLINK
;
1386 end_fsb
= XFS_B_TO_FSB(ip
->i_mount
, i_size_read(VFS_I(ip
)));
1387 count_fsb
= (xfs_filblks_t
)(end_fsb
- offset_fsb
);
1390 /* Read extent from the donor file */
1392 error
= xfs_bmapi_read(tip
, offset_fsb
, count_fsb
, &tirec
,
1396 ASSERT(nimaps
== 1);
1397 ASSERT(tirec
.br_startblock
!= DELAYSTARTBLOCK
);
1399 trace_xfs_swap_extent_rmap_remap(tip
, &tirec
);
1400 ilen
= tirec
.br_blockcount
;
1402 /* Unmap the old blocks in the source file. */
1403 while (tirec
.br_blockcount
) {
1404 ASSERT(tp
->t_firstblock
== NULLFSBLOCK
);
1405 trace_xfs_swap_extent_rmap_remap_piece(tip
, &tirec
);
1407 /* Read extent from the source file */
1409 error
= xfs_bmapi_read(ip
, tirec
.br_startoff
,
1410 tirec
.br_blockcount
, &irec
,
1414 ASSERT(nimaps
== 1);
1415 ASSERT(tirec
.br_startoff
== irec
.br_startoff
);
1416 trace_xfs_swap_extent_rmap_remap_piece(ip
, &irec
);
1418 /* Trim the extent. */
1420 uirec
.br_blockcount
= rlen
= min_t(xfs_filblks_t
,
1421 tirec
.br_blockcount
,
1422 irec
.br_blockcount
);
1423 trace_xfs_swap_extent_rmap_remap_piece(tip
, &uirec
);
1425 if (xfs_bmap_is_real_extent(&uirec
)) {
1426 error
= xfs_iext_count_may_overflow(ip
,
1428 XFS_IEXT_SWAP_RMAP_CNT
);
1433 if (xfs_bmap_is_real_extent(&irec
)) {
1434 error
= xfs_iext_count_may_overflow(tip
,
1436 XFS_IEXT_SWAP_RMAP_CNT
);
1441 /* Remove the mapping from the donor file. */
1442 xfs_bmap_unmap_extent(tp
, tip
, &uirec
);
1444 /* Remove the mapping from the source file. */
1445 xfs_bmap_unmap_extent(tp
, ip
, &irec
);
1447 /* Map the donor file's blocks into the source file. */
1448 xfs_bmap_map_extent(tp
, ip
, &uirec
);
1450 /* Map the source file's blocks into the donor file. */
1451 xfs_bmap_map_extent(tp
, tip
, &irec
);
1453 error
= xfs_defer_finish(tpp
);
1458 tirec
.br_startoff
+= rlen
;
1459 if (tirec
.br_startblock
!= HOLESTARTBLOCK
&&
1460 tirec
.br_startblock
!= DELAYSTARTBLOCK
)
1461 tirec
.br_startblock
+= rlen
;
1462 tirec
.br_blockcount
-= rlen
;
1470 tip
->i_diflags2
= tip_flags2
;
1474 trace_xfs_swap_extent_rmap_error(ip
, error
, _RET_IP_
);
1475 tip
->i_diflags2
= tip_flags2
;
1479 /* Swap the extents of two files by swapping data forks. */
1481 xfs_swap_extent_forks(
1482 struct xfs_trans
*tp
,
1483 struct xfs_inode
*ip
,
1484 struct xfs_inode
*tip
,
1486 int *target_log_flags
)
1488 xfs_filblks_t aforkblks
= 0;
1489 xfs_filblks_t taforkblks
= 0;
1495 * Count the number of extended attribute blocks
1497 if (XFS_IFORK_Q(ip
) && ip
->i_afp
->if_nextents
> 0 &&
1498 ip
->i_afp
->if_format
!= XFS_DINODE_FMT_LOCAL
) {
1499 error
= xfs_bmap_count_blocks(tp
, ip
, XFS_ATTR_FORK
, &junk
,
1504 if (XFS_IFORK_Q(tip
) && tip
->i_afp
->if_nextents
> 0 &&
1505 tip
->i_afp
->if_format
!= XFS_DINODE_FMT_LOCAL
) {
1506 error
= xfs_bmap_count_blocks(tp
, tip
, XFS_ATTR_FORK
, &junk
,
1513 * Btree format (v3) inodes have the inode number stamped in the bmbt
1514 * block headers. We can't start changing the bmbt blocks until the
1515 * inode owner change is logged so recovery does the right thing in the
1516 * event of a crash. Set the owner change log flags now and leave the
1517 * bmbt scan as the last step.
1519 if (xfs_has_v3inodes(ip
->i_mount
)) {
1520 if (ip
->i_df
.if_format
== XFS_DINODE_FMT_BTREE
)
1521 (*target_log_flags
) |= XFS_ILOG_DOWNER
;
1522 if (tip
->i_df
.if_format
== XFS_DINODE_FMT_BTREE
)
1523 (*src_log_flags
) |= XFS_ILOG_DOWNER
;
1527 * Swap the data forks of the inodes
1529 swap(ip
->i_df
, tip
->i_df
);
1532 * Fix the on-disk inode values
1534 tmp
= (uint64_t)ip
->i_nblocks
;
1535 ip
->i_nblocks
= tip
->i_nblocks
- taforkblks
+ aforkblks
;
1536 tip
->i_nblocks
= tmp
+ taforkblks
- aforkblks
;
1539 * The extents in the source inode could still contain speculative
1540 * preallocation beyond EOF (e.g. the file is open but not modified
1541 * while defrag is in progress). In that case, we need to copy over the
1542 * number of delalloc blocks the data fork in the source inode is
1543 * tracking beyond EOF so that when the fork is truncated away when the
1544 * temporary inode is unlinked we don't underrun the i_delayed_blks
1545 * counter on that inode.
1547 ASSERT(tip
->i_delayed_blks
== 0);
1548 tip
->i_delayed_blks
= ip
->i_delayed_blks
;
1549 ip
->i_delayed_blks
= 0;
1551 switch (ip
->i_df
.if_format
) {
1552 case XFS_DINODE_FMT_EXTENTS
:
1553 (*src_log_flags
) |= XFS_ILOG_DEXT
;
1555 case XFS_DINODE_FMT_BTREE
:
1556 ASSERT(!xfs_has_v3inodes(ip
->i_mount
) ||
1557 (*src_log_flags
& XFS_ILOG_DOWNER
));
1558 (*src_log_flags
) |= XFS_ILOG_DBROOT
;
1562 switch (tip
->i_df
.if_format
) {
1563 case XFS_DINODE_FMT_EXTENTS
:
1564 (*target_log_flags
) |= XFS_ILOG_DEXT
;
1566 case XFS_DINODE_FMT_BTREE
:
1567 (*target_log_flags
) |= XFS_ILOG_DBROOT
;
1568 ASSERT(!xfs_has_v3inodes(ip
->i_mount
) ||
1569 (*target_log_flags
& XFS_ILOG_DOWNER
));
1577 * Fix up the owners of the bmbt blocks to refer to the current inode. The
1578 * change owner scan attempts to order all modified buffers in the current
1579 * transaction. In the event of ordered buffer failure, the offending buffer is
1580 * physically logged as a fallback and the scan returns -EAGAIN. We must roll
1581 * the transaction in this case to replenish the fallback log reservation and
1582 * restart the scan. This process repeats until the scan completes.
1585 xfs_swap_change_owner(
1586 struct xfs_trans
**tpp
,
1587 struct xfs_inode
*ip
,
1588 struct xfs_inode
*tmpip
)
1591 struct xfs_trans
*tp
= *tpp
;
1594 error
= xfs_bmbt_change_owner(tp
, ip
, XFS_DATA_FORK
, ip
->i_ino
,
1596 /* success or fatal error */
1597 if (error
!= -EAGAIN
)
1600 error
= xfs_trans_roll(tpp
);
1606 * Redirty both inodes so they can relog and keep the log tail
1609 xfs_trans_ijoin(tp
, ip
, 0);
1610 xfs_trans_ijoin(tp
, tmpip
, 0);
1611 xfs_trans_log_inode(tp
, ip
, XFS_ILOG_CORE
);
1612 xfs_trans_log_inode(tp
, tmpip
, XFS_ILOG_CORE
);
1620 struct xfs_inode
*ip
, /* target inode */
1621 struct xfs_inode
*tip
, /* tmp inode */
1622 struct xfs_swapext
*sxp
)
1624 struct xfs_mount
*mp
= ip
->i_mount
;
1625 struct xfs_trans
*tp
;
1626 struct xfs_bstat
*sbp
= &sxp
->sx_stat
;
1627 int src_log_flags
, target_log_flags
;
1631 unsigned int flags
= 0;
1634 * Lock the inodes against other IO, page faults and truncate to
1635 * begin with. Then we can ensure the inodes are flushed and have no
1636 * page cache safely. Once we have done this we can take the ilocks and
1637 * do the rest of the checks.
1639 lock_two_nondirectories(VFS_I(ip
), VFS_I(tip
));
1640 filemap_invalidate_lock_two(VFS_I(ip
)->i_mapping
,
1641 VFS_I(tip
)->i_mapping
);
1643 /* Verify that both files have the same format */
1644 if ((VFS_I(ip
)->i_mode
& S_IFMT
) != (VFS_I(tip
)->i_mode
& S_IFMT
)) {
1649 /* Verify both files are either real-time or non-realtime */
1650 if (XFS_IS_REALTIME_INODE(ip
) != XFS_IS_REALTIME_INODE(tip
)) {
1655 error
= xfs_qm_dqattach(ip
);
1659 error
= xfs_qm_dqattach(tip
);
1663 error
= xfs_swap_extent_flush(ip
);
1666 error
= xfs_swap_extent_flush(tip
);
1670 if (xfs_inode_has_cow_data(tip
)) {
1671 error
= xfs_reflink_cancel_cow_range(tip
, 0, NULLFILEOFF
, true);
1677 * Extent "swapping" with rmap requires a permanent reservation and
1678 * a block reservation because it's really just a remap operation
1679 * performed with log redo items!
1681 if (xfs_has_rmapbt(mp
)) {
1682 int w
= XFS_DATA_FORK
;
1683 uint32_t ipnext
= ip
->i_df
.if_nextents
;
1684 uint32_t tipnext
= tip
->i_df
.if_nextents
;
1687 * Conceptually this shouldn't affect the shape of either bmbt,
1688 * but since we atomically move extents one by one, we reserve
1689 * enough space to rebuild both trees.
1691 resblks
= XFS_SWAP_RMAP_SPACE_RES(mp
, ipnext
, w
);
1692 resblks
+= XFS_SWAP_RMAP_SPACE_RES(mp
, tipnext
, w
);
1695 * If either inode straddles a bmapbt block allocation boundary,
1696 * the rmapbt algorithm triggers repeated allocs and frees as
1697 * extents are remapped. This can exhaust the block reservation
1698 * prematurely and cause shutdown. Return freed blocks to the
1699 * transaction reservation to counter this behavior.
1701 flags
|= XFS_TRANS_RES_FDBLKS
;
1703 error
= xfs_trans_alloc(mp
, &M_RES(mp
)->tr_write
, resblks
, 0, flags
,
1709 * Lock and join the inodes to the tansaction so that transaction commit
1710 * or cancel will unlock the inodes from this point onwards.
1712 xfs_lock_two_inodes(ip
, XFS_ILOCK_EXCL
, tip
, XFS_ILOCK_EXCL
);
1713 xfs_trans_ijoin(tp
, ip
, 0);
1714 xfs_trans_ijoin(tp
, tip
, 0);
1717 /* Verify all data are being swapped */
1718 if (sxp
->sx_offset
!= 0 ||
1719 sxp
->sx_length
!= ip
->i_disk_size
||
1720 sxp
->sx_length
!= tip
->i_disk_size
) {
1722 goto out_trans_cancel
;
1725 trace_xfs_swap_extent_before(ip
, 0);
1726 trace_xfs_swap_extent_before(tip
, 1);
1728 /* check inode formats now that data is flushed */
1729 error
= xfs_swap_extents_check_format(ip
, tip
);
1732 "%s: inode 0x%llx format is incompatible for exchanging.",
1733 __func__
, ip
->i_ino
);
1734 goto out_trans_cancel
;
1738 * Compare the current change & modify times with that
1739 * passed in. If they differ, we abort this swap.
1740 * This is the mechanism used to ensure the calling
1741 * process that the file was not changed out from
1744 if ((sbp
->bs_ctime
.tv_sec
!= VFS_I(ip
)->i_ctime
.tv_sec
) ||
1745 (sbp
->bs_ctime
.tv_nsec
!= VFS_I(ip
)->i_ctime
.tv_nsec
) ||
1746 (sbp
->bs_mtime
.tv_sec
!= VFS_I(ip
)->i_mtime
.tv_sec
) ||
1747 (sbp
->bs_mtime
.tv_nsec
!= VFS_I(ip
)->i_mtime
.tv_nsec
)) {
1749 goto out_trans_cancel
;
1753 * Note the trickiness in setting the log flags - we set the owner log
1754 * flag on the opposite inode (i.e. the inode we are setting the new
1755 * owner to be) because once we swap the forks and log that, log
1756 * recovery is going to see the fork as owned by the swapped inode,
1757 * not the pre-swapped inodes.
1759 src_log_flags
= XFS_ILOG_CORE
;
1760 target_log_flags
= XFS_ILOG_CORE
;
1762 if (xfs_has_rmapbt(mp
))
1763 error
= xfs_swap_extent_rmap(&tp
, ip
, tip
);
1765 error
= xfs_swap_extent_forks(tp
, ip
, tip
, &src_log_flags
,
1768 goto out_trans_cancel
;
1770 /* Do we have to swap reflink flags? */
1771 if ((ip
->i_diflags2
& XFS_DIFLAG2_REFLINK
) ^
1772 (tip
->i_diflags2
& XFS_DIFLAG2_REFLINK
)) {
1773 f
= ip
->i_diflags2
& XFS_DIFLAG2_REFLINK
;
1774 ip
->i_diflags2
&= ~XFS_DIFLAG2_REFLINK
;
1775 ip
->i_diflags2
|= tip
->i_diflags2
& XFS_DIFLAG2_REFLINK
;
1776 tip
->i_diflags2
&= ~XFS_DIFLAG2_REFLINK
;
1777 tip
->i_diflags2
|= f
& XFS_DIFLAG2_REFLINK
;
1780 /* Swap the cow forks. */
1781 if (xfs_has_reflink(mp
)) {
1782 ASSERT(!ip
->i_cowfp
||
1783 ip
->i_cowfp
->if_format
== XFS_DINODE_FMT_EXTENTS
);
1784 ASSERT(!tip
->i_cowfp
||
1785 tip
->i_cowfp
->if_format
== XFS_DINODE_FMT_EXTENTS
);
1787 swap(ip
->i_cowfp
, tip
->i_cowfp
);
1789 if (ip
->i_cowfp
&& ip
->i_cowfp
->if_bytes
)
1790 xfs_inode_set_cowblocks_tag(ip
);
1792 xfs_inode_clear_cowblocks_tag(ip
);
1793 if (tip
->i_cowfp
&& tip
->i_cowfp
->if_bytes
)
1794 xfs_inode_set_cowblocks_tag(tip
);
1796 xfs_inode_clear_cowblocks_tag(tip
);
1799 xfs_trans_log_inode(tp
, ip
, src_log_flags
);
1800 xfs_trans_log_inode(tp
, tip
, target_log_flags
);
1803 * The extent forks have been swapped, but crc=1,rmapbt=0 filesystems
1804 * have inode number owner values in the bmbt blocks that still refer to
1805 * the old inode. Scan each bmbt to fix up the owner values with the
1806 * inode number of the current inode.
1808 if (src_log_flags
& XFS_ILOG_DOWNER
) {
1809 error
= xfs_swap_change_owner(&tp
, ip
, tip
);
1811 goto out_trans_cancel
;
1813 if (target_log_flags
& XFS_ILOG_DOWNER
) {
1814 error
= xfs_swap_change_owner(&tp
, tip
, ip
);
1816 goto out_trans_cancel
;
1820 * If this is a synchronous mount, make sure that the
1821 * transaction goes to disk before returning to the user.
1823 if (xfs_has_wsync(mp
))
1824 xfs_trans_set_sync(tp
);
1826 error
= xfs_trans_commit(tp
);
1828 trace_xfs_swap_extent_after(ip
, 0);
1829 trace_xfs_swap_extent_after(tip
, 1);
1832 xfs_iunlock(ip
, XFS_ILOCK_EXCL
);
1833 xfs_iunlock(tip
, XFS_ILOCK_EXCL
);
1835 filemap_invalidate_unlock_two(VFS_I(ip
)->i_mapping
,
1836 VFS_I(tip
)->i_mapping
);
1837 unlock_two_nondirectories(VFS_I(ip
), VFS_I(tip
));
1841 xfs_trans_cancel(tp
);
1842 goto out_unlock_ilock
;