2 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
3 * Copyright (c) 2012 Red Hat, Inc.
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License as
8 * published by the Free Software Foundation.
10 * This program is distributed in the hope that it would be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
21 #include "xfs_shared.h"
22 #include "xfs_format.h"
23 #include "xfs_log_format.h"
24 #include "xfs_trans_resv.h"
28 #include "xfs_mount.h"
29 #include "xfs_da_format.h"
30 #include "xfs_inode.h"
31 #include "xfs_btree.h"
32 #include "xfs_trans.h"
33 #include "xfs_extfree_item.h"
34 #include "xfs_alloc.h"
36 #include "xfs_bmap_util.h"
37 #include "xfs_bmap_btree.h"
38 #include "xfs_rtalloc.h"
39 #include "xfs_error.h"
40 #include "xfs_quota.h"
41 #include "xfs_trans_space.h"
42 #include "xfs_trace.h"
43 #include "xfs_icache.h"
45 #include "xfs_dinode.h"
47 /* Kernel only BMAP related definitions and functions */
50 * Convert the given file system block to a disk block. We have to treat it
51 * differently based on whether the file is a real time file or not, because the
55 xfs_fsb_to_db(struct xfs_inode
*ip
, xfs_fsblock_t fsb
)
57 return (XFS_IS_REALTIME_INODE(ip
) ? \
58 (xfs_daddr_t
)XFS_FSB_TO_BB((ip
)->i_mount
, (fsb
)) : \
59 XFS_FSB_TO_DADDR((ip
)->i_mount
, (fsb
)));
63 * Routine to be called at transaction's end by xfs_bmapi, xfs_bunmapi
64 * caller. Frees all the extents that need freeing, which must be done
65 * last due to locking considerations. We never free any extents in
66 * the first transaction.
68 * Return 1 if the given transaction was committed and a new one
69 * started, and 0 otherwise in the committed parameter.
73 xfs_trans_t
**tp
, /* transaction pointer addr */
74 xfs_bmap_free_t
*flist
, /* i/o: list extents to free */
75 int *committed
) /* xact committed or not */
77 xfs_efd_log_item_t
*efd
; /* extent free data */
78 xfs_efi_log_item_t
*efi
; /* extent free intention */
79 int error
; /* error return value */
80 xfs_bmap_free_item_t
*free
; /* free extent item */
81 struct xfs_trans_res tres
; /* new log reservation */
82 xfs_mount_t
*mp
; /* filesystem mount structure */
83 xfs_bmap_free_item_t
*next
; /* next item on free list */
84 xfs_trans_t
*ntp
; /* new transaction pointer */
86 ASSERT((*tp
)->t_flags
& XFS_TRANS_PERM_LOG_RES
);
87 if (flist
->xbf_count
== 0) {
92 efi
= xfs_trans_get_efi(ntp
, flist
->xbf_count
);
93 for (free
= flist
->xbf_first
; free
; free
= free
->xbfi_next
)
94 xfs_trans_log_efi_extent(ntp
, efi
, free
->xbfi_startblock
,
95 free
->xbfi_blockcount
);
97 tres
.tr_logres
= ntp
->t_log_res
;
98 tres
.tr_logcount
= ntp
->t_log_count
;
99 tres
.tr_logflags
= XFS_TRANS_PERM_LOG_RES
;
100 ntp
= xfs_trans_dup(*tp
);
101 error
= xfs_trans_commit(*tp
, 0);
105 * We have a new transaction, so we should return committed=1,
106 * even though we're returning an error.
112 * transaction commit worked ok so we can drop the extra ticket
113 * reference that we gained in xfs_trans_dup()
115 xfs_log_ticket_put(ntp
->t_ticket
);
117 error
= xfs_trans_reserve(ntp
, &tres
, 0, 0);
120 efd
= xfs_trans_get_efd(ntp
, efi
, flist
->xbf_count
);
121 for (free
= flist
->xbf_first
; free
!= NULL
; free
= next
) {
122 next
= free
->xbfi_next
;
123 if ((error
= xfs_free_extent(ntp
, free
->xbfi_startblock
,
124 free
->xbfi_blockcount
))) {
126 * The bmap free list will be cleaned up at a
127 * higher level. The EFI will be canceled when
128 * this transaction is aborted.
129 * Need to force shutdown here to make sure it
130 * happens, since this transaction may not be
134 if (!XFS_FORCED_SHUTDOWN(mp
))
135 xfs_force_shutdown(mp
,
136 (error
== EFSCORRUPTED
) ?
137 SHUTDOWN_CORRUPT_INCORE
:
138 SHUTDOWN_META_IO_ERROR
);
141 xfs_trans_log_efd_extent(ntp
, efd
, free
->xbfi_startblock
,
142 free
->xbfi_blockcount
);
143 xfs_bmap_del_free(flist
, NULL
, free
);
150 struct xfs_bmalloca
*ap
) /* bmap alloc argument struct */
152 xfs_alloctype_t atype
= 0; /* type for allocation routines */
153 int error
; /* error return value */
154 xfs_mount_t
*mp
; /* mount point structure */
155 xfs_extlen_t prod
= 0; /* product factor for allocators */
156 xfs_extlen_t ralen
= 0; /* realtime allocation length */
157 xfs_extlen_t align
; /* minimum allocation alignment */
160 mp
= ap
->ip
->i_mount
;
161 align
= xfs_get_extsz_hint(ap
->ip
);
162 prod
= align
/ mp
->m_sb
.sb_rextsize
;
163 error
= xfs_bmap_extsize_align(mp
, &ap
->got
, &ap
->prev
,
164 align
, 1, ap
->eof
, 0,
165 ap
->conv
, &ap
->offset
, &ap
->length
);
169 ASSERT(ap
->length
% mp
->m_sb
.sb_rextsize
== 0);
172 * If the offset & length are not perfectly aligned
173 * then kill prod, it will just get us in trouble.
175 if (do_mod(ap
->offset
, align
) || ap
->length
% align
)
178 * Set ralen to be the actual requested length in rtextents.
180 ralen
= ap
->length
/ mp
->m_sb
.sb_rextsize
;
182 * If the old value was close enough to MAXEXTLEN that
183 * we rounded up to it, cut it back so it's valid again.
184 * Note that if it's a really large request (bigger than
185 * MAXEXTLEN), we don't hear about that number, and can't
186 * adjust the starting point to match it.
188 if (ralen
* mp
->m_sb
.sb_rextsize
>= MAXEXTLEN
)
189 ralen
= MAXEXTLEN
/ mp
->m_sb
.sb_rextsize
;
192 * Lock out other modifications to the RT bitmap inode.
194 xfs_ilock(mp
->m_rbmip
, XFS_ILOCK_EXCL
);
195 xfs_trans_ijoin(ap
->tp
, mp
->m_rbmip
, XFS_ILOCK_EXCL
);
198 * If it's an allocation to an empty file at offset 0,
199 * pick an extent that will space things out in the rt area.
201 if (ap
->eof
&& ap
->offset
== 0) {
202 xfs_rtblock_t
uninitialized_var(rtx
); /* realtime extent no */
204 error
= xfs_rtpick_extent(mp
, ap
->tp
, ralen
, &rtx
);
207 ap
->blkno
= rtx
* mp
->m_sb
.sb_rextsize
;
212 xfs_bmap_adjacent(ap
);
215 * Realtime allocation, done through xfs_rtallocate_extent.
217 atype
= ap
->blkno
== 0 ? XFS_ALLOCTYPE_ANY_AG
: XFS_ALLOCTYPE_NEAR_BNO
;
218 do_div(ap
->blkno
, mp
->m_sb
.sb_rextsize
);
221 if ((error
= xfs_rtallocate_extent(ap
->tp
, ap
->blkno
, 1, ap
->length
,
222 &ralen
, atype
, ap
->wasdel
, prod
, &rtb
)))
224 if (rtb
== NULLFSBLOCK
&& prod
> 1 &&
225 (error
= xfs_rtallocate_extent(ap
->tp
, ap
->blkno
, 1,
226 ap
->length
, &ralen
, atype
,
227 ap
->wasdel
, 1, &rtb
)))
230 if (ap
->blkno
!= NULLFSBLOCK
) {
231 ap
->blkno
*= mp
->m_sb
.sb_rextsize
;
232 ralen
*= mp
->m_sb
.sb_rextsize
;
234 ap
->ip
->i_d
.di_nblocks
+= ralen
;
235 xfs_trans_log_inode(ap
->tp
, ap
->ip
, XFS_ILOG_CORE
);
237 ap
->ip
->i_delayed_blks
-= ralen
;
239 * Adjust the disk quota also. This was reserved
242 xfs_trans_mod_dquot_byino(ap
->tp
, ap
->ip
,
243 ap
->wasdel
? XFS_TRANS_DQ_DELRTBCOUNT
:
244 XFS_TRANS_DQ_RTBCOUNT
, (long) ralen
);
252 * Stack switching interfaces for allocation
255 xfs_bmapi_allocate_worker(
256 struct work_struct
*work
)
258 struct xfs_bmalloca
*args
= container_of(work
,
259 struct xfs_bmalloca
, work
);
260 unsigned long pflags
;
261 unsigned long new_pflags
= PF_FSTRANS
;
264 * we are in a transaction context here, but may also be doing work
265 * in kswapd context, and hence we may need to inherit that state
266 * temporarily to ensure that we don't block waiting for memory reclaim
270 new_pflags
|= PF_MEMALLOC
| PF_SWAPWRITE
| PF_KSWAPD
;
272 current_set_flags_nested(&pflags
, new_pflags
);
274 args
->result
= __xfs_bmapi_allocate(args
);
275 complete(args
->done
);
277 current_restore_flags_nested(&pflags
, new_pflags
);
281 * Some allocation requests often come in with little stack to work on. Push
282 * them off to a worker thread so there is lots of stack to use. Otherwise just
283 * call directly to avoid the context switch overhead here.
287 struct xfs_bmalloca
*args
)
289 DECLARE_COMPLETION_ONSTACK(done
);
291 if (!args
->stack_switch
)
292 return __xfs_bmapi_allocate(args
);
296 args
->kswapd
= current_is_kswapd();
297 INIT_WORK_ONSTACK(&args
->work
, xfs_bmapi_allocate_worker
);
298 queue_work(xfs_alloc_wq
, &args
->work
);
299 wait_for_completion(&done
);
300 destroy_work_on_stack(&args
->work
);
305 * Check if the endoff is outside the last extent. If so the caller will grow
306 * the allocation to a stripe unit boundary. All offsets are considered outside
307 * the end of file for an empty fork, so 1 is returned in *eof in that case.
311 struct xfs_inode
*ip
,
312 xfs_fileoff_t endoff
,
316 struct xfs_bmbt_irec rec
;
319 error
= xfs_bmap_last_extent(NULL
, ip
, whichfork
, &rec
, eof
);
323 *eof
= endoff
>= rec
.br_startoff
+ rec
.br_blockcount
;
328 * Extent tree block counting routines.
332 * Count leaf blocks given a range of extent records.
335 xfs_bmap_count_leaves(
343 for (b
= 0; b
< numrecs
; b
++) {
344 xfs_bmbt_rec_host_t
*frp
= xfs_iext_get_ext(ifp
, idx
+ b
);
345 *count
+= xfs_bmbt_get_blockcount(frp
);
350 * Count leaf blocks given a range of extent records originally
354 xfs_bmap_disk_count_leaves(
355 struct xfs_mount
*mp
,
356 struct xfs_btree_block
*block
,
363 for (b
= 1; b
<= numrecs
; b
++) {
364 frp
= XFS_BMBT_REC_ADDR(mp
, block
, b
);
365 *count
+= xfs_bmbt_disk_get_blockcount(frp
);
370 * Recursively walks each level of a btree
371 * to count total fsblocks in use.
373 STATIC
int /* error */
375 xfs_mount_t
*mp
, /* file system mount point */
376 xfs_trans_t
*tp
, /* transaction pointer */
377 xfs_ifork_t
*ifp
, /* inode fork pointer */
378 xfs_fsblock_t blockno
, /* file system block number */
379 int levelin
, /* level in btree */
380 int *count
) /* Count of blocks */
386 xfs_fsblock_t bno
= blockno
;
387 xfs_fsblock_t nextbno
;
388 struct xfs_btree_block
*block
, *nextblock
;
391 error
= xfs_btree_read_bufl(mp
, tp
, bno
, 0, &bp
, XFS_BMAP_BTREE_REF
,
396 block
= XFS_BUF_TO_BLOCK(bp
);
399 /* Not at node above leaves, count this level of nodes */
400 nextbno
= be64_to_cpu(block
->bb_u
.l
.bb_rightsib
);
401 while (nextbno
!= NULLFSBLOCK
) {
402 error
= xfs_btree_read_bufl(mp
, tp
, nextbno
, 0, &nbp
,
408 nextblock
= XFS_BUF_TO_BLOCK(nbp
);
409 nextbno
= be64_to_cpu(nextblock
->bb_u
.l
.bb_rightsib
);
410 xfs_trans_brelse(tp
, nbp
);
413 /* Dive to the next level */
414 pp
= XFS_BMBT_PTR_ADDR(mp
, block
, 1, mp
->m_bmap_dmxr
[1]);
415 bno
= be64_to_cpu(*pp
);
416 if (unlikely((error
=
417 xfs_bmap_count_tree(mp
, tp
, ifp
, bno
, level
, count
)) < 0)) {
418 xfs_trans_brelse(tp
, bp
);
419 XFS_ERROR_REPORT("xfs_bmap_count_tree(1)",
420 XFS_ERRLEVEL_LOW
, mp
);
421 return XFS_ERROR(EFSCORRUPTED
);
423 xfs_trans_brelse(tp
, bp
);
425 /* count all level 1 nodes and their leaves */
427 nextbno
= be64_to_cpu(block
->bb_u
.l
.bb_rightsib
);
428 numrecs
= be16_to_cpu(block
->bb_numrecs
);
429 xfs_bmap_disk_count_leaves(mp
, block
, numrecs
, count
);
430 xfs_trans_brelse(tp
, bp
);
431 if (nextbno
== NULLFSBLOCK
)
434 error
= xfs_btree_read_bufl(mp
, tp
, bno
, 0, &bp
,
440 block
= XFS_BUF_TO_BLOCK(bp
);
447 * Count fsblocks of the given fork.
450 xfs_bmap_count_blocks(
451 xfs_trans_t
*tp
, /* transaction pointer */
452 xfs_inode_t
*ip
, /* incore inode */
453 int whichfork
, /* data or attr fork */
454 int *count
) /* out: count of blocks */
456 struct xfs_btree_block
*block
; /* current btree block */
457 xfs_fsblock_t bno
; /* block # of "block" */
458 xfs_ifork_t
*ifp
; /* fork structure */
459 int level
; /* btree level, for checking */
460 xfs_mount_t
*mp
; /* file system mount structure */
461 __be64
*pp
; /* pointer to block address */
465 ifp
= XFS_IFORK_PTR(ip
, whichfork
);
466 if ( XFS_IFORK_FORMAT(ip
, whichfork
) == XFS_DINODE_FMT_EXTENTS
) {
467 xfs_bmap_count_leaves(ifp
, 0,
468 ifp
->if_bytes
/ (uint
)sizeof(xfs_bmbt_rec_t
),
474 * Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out.
476 block
= ifp
->if_broot
;
477 level
= be16_to_cpu(block
->bb_level
);
479 pp
= XFS_BMAP_BROOT_PTR_ADDR(mp
, block
, 1, ifp
->if_broot_bytes
);
480 bno
= be64_to_cpu(*pp
);
481 ASSERT(bno
!= NULLDFSBNO
);
482 ASSERT(XFS_FSB_TO_AGNO(mp
, bno
) < mp
->m_sb
.sb_agcount
);
483 ASSERT(XFS_FSB_TO_AGBNO(mp
, bno
) < mp
->m_sb
.sb_agblocks
);
485 if (unlikely(xfs_bmap_count_tree(mp
, tp
, ifp
, bno
, level
, count
) < 0)) {
486 XFS_ERROR_REPORT("xfs_bmap_count_blocks(2)", XFS_ERRLEVEL_LOW
,
488 return XFS_ERROR(EFSCORRUPTED
);
495 * returns 1 for success, 0 if we failed to map the extent.
498 xfs_getbmapx_fix_eof_hole(
499 xfs_inode_t
*ip
, /* xfs incore inode pointer */
500 struct getbmapx
*out
, /* output structure */
501 int prealloced
, /* this is a file with
502 * preallocated data space */
503 __int64_t end
, /* last block requested */
504 xfs_fsblock_t startblock
)
507 xfs_mount_t
*mp
; /* file system mount point */
508 xfs_ifork_t
*ifp
; /* inode fork pointer */
509 xfs_extnum_t lastx
; /* last extent pointer */
510 xfs_fileoff_t fileblock
;
512 if (startblock
== HOLESTARTBLOCK
) {
515 fixlen
= XFS_FSB_TO_BB(mp
, XFS_B_TO_FSB(mp
, XFS_ISIZE(ip
)));
516 fixlen
-= out
->bmv_offset
;
517 if (prealloced
&& out
->bmv_offset
+ out
->bmv_length
== end
) {
518 /* Came to hole at EOF. Trim it. */
521 out
->bmv_length
= fixlen
;
524 if (startblock
== DELAYSTARTBLOCK
)
527 out
->bmv_block
= xfs_fsb_to_db(ip
, startblock
);
528 fileblock
= XFS_BB_TO_FSB(ip
->i_mount
, out
->bmv_offset
);
529 ifp
= XFS_IFORK_PTR(ip
, XFS_DATA_FORK
);
530 if (xfs_iext_bno_to_ext(ifp
, fileblock
, &lastx
) &&
531 (lastx
== (ifp
->if_bytes
/ (uint
)sizeof(xfs_bmbt_rec_t
))-1))
532 out
->bmv_oflags
|= BMV_OF_LAST
;
539 * Get inode's extents as described in bmv, and format for output.
540 * Calls formatter to fill the user's buffer until all extents
541 * are mapped, until the passed-in bmv->bmv_count slots have
542 * been filled, or until the formatter short-circuits the loop,
543 * if it is tracking filled-in extents on its own.
548 struct getbmapx
*bmv
, /* user bmap structure */
549 xfs_bmap_format_t formatter
, /* format to user */
550 void *arg
) /* formatter arg */
552 __int64_t bmvend
; /* last block requested */
553 int error
= 0; /* return value */
554 __int64_t fixlen
; /* length for -1 case */
555 int i
; /* extent number */
556 int lock
; /* lock state */
557 xfs_bmbt_irec_t
*map
; /* buffer for user's data */
558 xfs_mount_t
*mp
; /* file system mount point */
559 int nex
; /* # of user extents can do */
560 int nexleft
; /* # of user extents left */
561 int subnex
; /* # of bmapi's can do */
562 int nmap
; /* number of map entries */
563 struct getbmapx
*out
; /* output structure */
564 int whichfork
; /* data or attr fork */
565 int prealloced
; /* this is a file with
566 * preallocated data space */
567 int iflags
; /* interface flags */
568 int bmapi_flags
; /* flags for xfs_bmapi */
572 iflags
= bmv
->bmv_iflags
;
573 whichfork
= iflags
& BMV_IF_ATTRFORK
? XFS_ATTR_FORK
: XFS_DATA_FORK
;
575 if (whichfork
== XFS_ATTR_FORK
) {
576 if (XFS_IFORK_Q(ip
)) {
577 if (ip
->i_d
.di_aformat
!= XFS_DINODE_FMT_EXTENTS
&&
578 ip
->i_d
.di_aformat
!= XFS_DINODE_FMT_BTREE
&&
579 ip
->i_d
.di_aformat
!= XFS_DINODE_FMT_LOCAL
)
580 return XFS_ERROR(EINVAL
);
582 ip
->i_d
.di_aformat
!= 0 &&
583 ip
->i_d
.di_aformat
!= XFS_DINODE_FMT_EXTENTS
)) {
584 XFS_ERROR_REPORT("xfs_getbmap", XFS_ERRLEVEL_LOW
,
586 return XFS_ERROR(EFSCORRUPTED
);
592 if (ip
->i_d
.di_format
!= XFS_DINODE_FMT_EXTENTS
&&
593 ip
->i_d
.di_format
!= XFS_DINODE_FMT_BTREE
&&
594 ip
->i_d
.di_format
!= XFS_DINODE_FMT_LOCAL
)
595 return XFS_ERROR(EINVAL
);
597 if (xfs_get_extsz_hint(ip
) ||
598 ip
->i_d
.di_flags
& (XFS_DIFLAG_PREALLOC
|XFS_DIFLAG_APPEND
)){
600 fixlen
= mp
->m_super
->s_maxbytes
;
603 fixlen
= XFS_ISIZE(ip
);
607 if (bmv
->bmv_length
== -1) {
608 fixlen
= XFS_FSB_TO_BB(mp
, XFS_B_TO_FSB(mp
, fixlen
));
610 max_t(__int64_t
, fixlen
- bmv
->bmv_offset
, 0);
611 } else if (bmv
->bmv_length
== 0) {
612 bmv
->bmv_entries
= 0;
614 } else if (bmv
->bmv_length
< 0) {
615 return XFS_ERROR(EINVAL
);
618 nex
= bmv
->bmv_count
- 1;
620 return XFS_ERROR(EINVAL
);
621 bmvend
= bmv
->bmv_offset
+ bmv
->bmv_length
;
624 if (bmv
->bmv_count
> ULONG_MAX
/ sizeof(struct getbmapx
))
625 return XFS_ERROR(ENOMEM
);
626 out
= kmem_zalloc_large(bmv
->bmv_count
* sizeof(struct getbmapx
), 0);
628 return XFS_ERROR(ENOMEM
);
630 xfs_ilock(ip
, XFS_IOLOCK_SHARED
);
631 if (whichfork
== XFS_DATA_FORK
) {
632 if (!(iflags
& BMV_IF_DELALLOC
) &&
633 (ip
->i_delayed_blks
|| XFS_ISIZE(ip
) > ip
->i_d
.di_size
)) {
634 error
= -filemap_write_and_wait(VFS_I(ip
)->i_mapping
);
636 goto out_unlock_iolock
;
639 * Even after flushing the inode, there can still be
640 * delalloc blocks on the inode beyond EOF due to
641 * speculative preallocation. These are not removed
642 * until the release function is called or the inode
643 * is inactivated. Hence we cannot assert here that
644 * ip->i_delayed_blks == 0.
648 lock
= xfs_ilock_data_map_shared(ip
);
650 lock
= xfs_ilock_attr_map_shared(ip
);
654 * Don't let nex be bigger than the number of extents
655 * we can have assuming alternating holes and real extents.
657 if (nex
> XFS_IFORK_NEXTENTS(ip
, whichfork
) * 2 + 1)
658 nex
= XFS_IFORK_NEXTENTS(ip
, whichfork
) * 2 + 1;
660 bmapi_flags
= xfs_bmapi_aflag(whichfork
);
661 if (!(iflags
& BMV_IF_PREALLOC
))
662 bmapi_flags
|= XFS_BMAPI_IGSTATE
;
665 * Allocate enough space to handle "subnex" maps at a time.
669 map
= kmem_alloc(subnex
* sizeof(*map
), KM_MAYFAIL
| KM_NOFS
);
671 goto out_unlock_ilock
;
673 bmv
->bmv_entries
= 0;
675 if (XFS_IFORK_NEXTENTS(ip
, whichfork
) == 0 &&
676 (whichfork
== XFS_ATTR_FORK
|| !(iflags
& BMV_IF_DELALLOC
))) {
684 nmap
= (nexleft
> subnex
) ? subnex
: nexleft
;
685 error
= xfs_bmapi_read(ip
, XFS_BB_TO_FSBT(mp
, bmv
->bmv_offset
),
686 XFS_BB_TO_FSB(mp
, bmv
->bmv_length
),
687 map
, &nmap
, bmapi_flags
);
690 ASSERT(nmap
<= subnex
);
692 for (i
= 0; i
< nmap
&& nexleft
&& bmv
->bmv_length
; i
++) {
693 out
[cur_ext
].bmv_oflags
= 0;
694 if (map
[i
].br_state
== XFS_EXT_UNWRITTEN
)
695 out
[cur_ext
].bmv_oflags
|= BMV_OF_PREALLOC
;
696 else if (map
[i
].br_startblock
== DELAYSTARTBLOCK
)
697 out
[cur_ext
].bmv_oflags
|= BMV_OF_DELALLOC
;
698 out
[cur_ext
].bmv_offset
=
699 XFS_FSB_TO_BB(mp
, map
[i
].br_startoff
);
700 out
[cur_ext
].bmv_length
=
701 XFS_FSB_TO_BB(mp
, map
[i
].br_blockcount
);
702 out
[cur_ext
].bmv_unused1
= 0;
703 out
[cur_ext
].bmv_unused2
= 0;
706 * delayed allocation extents that start beyond EOF can
707 * occur due to speculative EOF allocation when the
708 * delalloc extent is larger than the largest freespace
709 * extent at conversion time. These extents cannot be
710 * converted by data writeback, so can exist here even
711 * if we are not supposed to be finding delalloc
714 if (map
[i
].br_startblock
== DELAYSTARTBLOCK
&&
715 map
[i
].br_startoff
<= XFS_B_TO_FSB(mp
, XFS_ISIZE(ip
)))
716 ASSERT((iflags
& BMV_IF_DELALLOC
) != 0);
718 if (map
[i
].br_startblock
== HOLESTARTBLOCK
&&
719 whichfork
== XFS_ATTR_FORK
) {
720 /* came to the end of attribute fork */
721 out
[cur_ext
].bmv_oflags
|= BMV_OF_LAST
;
725 if (!xfs_getbmapx_fix_eof_hole(ip
, &out
[cur_ext
],
727 map
[i
].br_startblock
))
731 out
[cur_ext
].bmv_offset
+
732 out
[cur_ext
].bmv_length
;
734 max_t(__int64_t
, 0, bmvend
- bmv
->bmv_offset
);
737 * In case we don't want to return the hole,
738 * don't increase cur_ext so that we can reuse
739 * it in the next loop.
741 if ((iflags
& BMV_IF_NO_HOLES
) &&
742 map
[i
].br_startblock
== HOLESTARTBLOCK
) {
743 memset(&out
[cur_ext
], 0, sizeof(out
[cur_ext
]));
751 } while (nmap
&& nexleft
&& bmv
->bmv_length
);
756 xfs_iunlock(ip
, lock
);
758 xfs_iunlock(ip
, XFS_IOLOCK_SHARED
);
760 for (i
= 0; i
< cur_ext
; i
++) {
761 int full
= 0; /* user array is full */
763 /* format results & advance arg */
764 error
= formatter(&arg
, &out
[i
], &full
);
774 * dead simple method of punching delalyed allocation blocks from a range in
775 * the inode. Walks a block at a time so will be slow, but is only executed in
776 * rare error cases so the overhead is not critical. This will always punch out
777 * both the start and end blocks, even if the ranges only partially overlap
778 * them, so it is up to the caller to ensure that partial blocks are not
782 xfs_bmap_punch_delalloc_range(
783 struct xfs_inode
*ip
,
784 xfs_fileoff_t start_fsb
,
785 xfs_fileoff_t length
)
787 xfs_fileoff_t remaining
= length
;
790 ASSERT(xfs_isilocked(ip
, XFS_ILOCK_EXCL
));
794 xfs_bmbt_irec_t imap
;
796 xfs_fsblock_t firstblock
;
797 xfs_bmap_free_t flist
;
800 * Map the range first and check that it is a delalloc extent
801 * before trying to unmap the range. Otherwise we will be
802 * trying to remove a real extent (which requires a
803 * transaction) or a hole, which is probably a bad idea...
805 error
= xfs_bmapi_read(ip
, start_fsb
, 1, &imap
, &nimaps
,
809 /* something screwed, just bail */
810 if (!XFS_FORCED_SHUTDOWN(ip
->i_mount
)) {
811 xfs_alert(ip
->i_mount
,
812 "Failed delalloc mapping lookup ino %lld fsb %lld.",
813 ip
->i_ino
, start_fsb
);
821 if (imap
.br_startblock
!= DELAYSTARTBLOCK
) {
822 /* been converted, ignore */
825 WARN_ON(imap
.br_blockcount
== 0);
828 * Note: while we initialise the firstblock/flist pair, they
829 * should never be used because blocks should never be
830 * allocated or freed for a delalloc extent and hence we need
831 * don't cancel or finish them after the xfs_bunmapi() call.
833 xfs_bmap_init(&flist
, &firstblock
);
834 error
= xfs_bunmapi(NULL
, ip
, start_fsb
, 1, 0, 1, &firstblock
,
839 ASSERT(!flist
.xbf_count
&& !flist
.xbf_first
);
843 } while(remaining
> 0);
849 * Test whether it is appropriate to check an inode for and free post EOF
850 * blocks. The 'force' parameter determines whether we should also consider
851 * regular files that are marked preallocated or append-only.
854 xfs_can_free_eofblocks(struct xfs_inode
*ip
, bool force
)
856 /* prealloc/delalloc exists only on regular files */
857 if (!S_ISREG(ip
->i_d
.di_mode
))
861 * Zero sized files with no cached pages and delalloc blocks will not
862 * have speculative prealloc/delalloc blocks to remove.
864 if (VFS_I(ip
)->i_size
== 0 &&
865 VN_CACHED(VFS_I(ip
)) == 0 &&
866 ip
->i_delayed_blks
== 0)
869 /* If we haven't read in the extent list, then don't do it now. */
870 if (!(ip
->i_df
.if_flags
& XFS_IFEXTENTS
))
874 * Do not free real preallocated or append-only files unless the file
875 * has delalloc blocks and we are forced to remove them.
877 if (ip
->i_d
.di_flags
& (XFS_DIFLAG_PREALLOC
| XFS_DIFLAG_APPEND
))
878 if (!force
|| ip
->i_delayed_blks
== 0)
885 * This is called by xfs_inactive to free any blocks beyond eof
886 * when the link count isn't zero and by xfs_dm_punch_hole() when
887 * punching a hole to EOF.
897 xfs_fileoff_t end_fsb
;
898 xfs_fileoff_t last_fsb
;
899 xfs_filblks_t map_len
;
901 xfs_bmbt_irec_t imap
;
904 * Figure out if there are any blocks beyond the end
905 * of the file. If not, then there is nothing to do.
907 end_fsb
= XFS_B_TO_FSB(mp
, (xfs_ufsize_t
)XFS_ISIZE(ip
));
908 last_fsb
= XFS_B_TO_FSB(mp
, mp
->m_super
->s_maxbytes
);
909 if (last_fsb
<= end_fsb
)
911 map_len
= last_fsb
- end_fsb
;
914 xfs_ilock(ip
, XFS_ILOCK_SHARED
);
915 error
= xfs_bmapi_read(ip
, end_fsb
, map_len
, &imap
, &nimaps
, 0);
916 xfs_iunlock(ip
, XFS_ILOCK_SHARED
);
918 if (!error
&& (nimaps
!= 0) &&
919 (imap
.br_startblock
!= HOLESTARTBLOCK
||
920 ip
->i_delayed_blks
)) {
922 * Attach the dquots to the inode up front.
924 error
= xfs_qm_dqattach(ip
, 0);
929 * There are blocks after the end of file.
930 * Free them up now by truncating the file to
933 tp
= xfs_trans_alloc(mp
, XFS_TRANS_INACTIVE
);
936 if (!xfs_ilock_nowait(ip
, XFS_IOLOCK_EXCL
)) {
937 xfs_trans_cancel(tp
, 0);
942 error
= xfs_trans_reserve(tp
, &M_RES(mp
)->tr_itruncate
, 0, 0);
944 ASSERT(XFS_FORCED_SHUTDOWN(mp
));
945 xfs_trans_cancel(tp
, 0);
947 xfs_iunlock(ip
, XFS_IOLOCK_EXCL
);
951 xfs_ilock(ip
, XFS_ILOCK_EXCL
);
952 xfs_trans_ijoin(tp
, ip
, 0);
955 * Do not update the on-disk file size. If we update the
956 * on-disk file size and then the system crashes before the
957 * contents of the file are flushed to disk then the files
958 * may be full of holes (ie NULL files bug).
960 error
= xfs_itruncate_extents(&tp
, ip
, XFS_DATA_FORK
,
964 * If we get an error at this point we simply don't
965 * bother truncating the file.
968 (XFS_TRANS_RELEASE_LOG_RES
|
971 error
= xfs_trans_commit(tp
,
972 XFS_TRANS_RELEASE_LOG_RES
);
974 xfs_inode_clear_eofblocks_tag(ip
);
977 xfs_iunlock(ip
, XFS_ILOCK_EXCL
);
979 xfs_iunlock(ip
, XFS_IOLOCK_EXCL
);
985 xfs_alloc_file_space(
986 struct xfs_inode
*ip
,
991 xfs_mount_t
*mp
= ip
->i_mount
;
993 xfs_filblks_t allocated_fsb
;
994 xfs_filblks_t allocatesize_fsb
;
995 xfs_extlen_t extsz
, temp
;
996 xfs_fileoff_t startoffset_fsb
;
997 xfs_fsblock_t firstfsb
;
1002 xfs_bmbt_irec_t imaps
[1], *imapp
;
1003 xfs_bmap_free_t free_list
;
1004 uint qblocks
, resblks
, resrtextents
;
1008 trace_xfs_alloc_file_space(ip
);
1010 if (XFS_FORCED_SHUTDOWN(mp
))
1011 return XFS_ERROR(EIO
);
1013 error
= xfs_qm_dqattach(ip
, 0);
1018 return XFS_ERROR(EINVAL
);
1020 rt
= XFS_IS_REALTIME_INODE(ip
);
1021 extsz
= xfs_get_extsz_hint(ip
);
1026 startoffset_fsb
= XFS_B_TO_FSBT(mp
, offset
);
1027 allocatesize_fsb
= XFS_B_TO_FSB(mp
, count
);
1030 * Allocate file space until done or until there is an error
1032 while (allocatesize_fsb
&& !error
) {
1036 * Determine space reservations for data/realtime.
1038 if (unlikely(extsz
)) {
1039 s
= startoffset_fsb
;
1042 e
= startoffset_fsb
+ allocatesize_fsb
;
1043 if ((temp
= do_mod(startoffset_fsb
, extsz
)))
1045 if ((temp
= do_mod(e
, extsz
)))
1049 e
= allocatesize_fsb
;
1053 * The transaction reservation is limited to a 32-bit block
1054 * count, hence we need to limit the number of blocks we are
1055 * trying to reserve to avoid an overflow. We can't allocate
1056 * more than @nimaps extents, and an extent is limited on disk
1057 * to MAXEXTLEN (21 bits), so use that to enforce the limit.
1059 resblks
= min_t(xfs_fileoff_t
, (e
- s
), (MAXEXTLEN
* nimaps
));
1061 resrtextents
= qblocks
= resblks
;
1062 resrtextents
/= mp
->m_sb
.sb_rextsize
;
1063 resblks
= XFS_DIOSTRAT_SPACE_RES(mp
, 0);
1064 quota_flag
= XFS_QMOPT_RES_RTBLKS
;
1067 resblks
= qblocks
= XFS_DIOSTRAT_SPACE_RES(mp
, resblks
);
1068 quota_flag
= XFS_QMOPT_RES_REGBLKS
;
1072 * Allocate and setup the transaction.
1074 tp
= xfs_trans_alloc(mp
, XFS_TRANS_DIOSTRAT
);
1075 error
= xfs_trans_reserve(tp
, &M_RES(mp
)->tr_write
,
1076 resblks
, resrtextents
);
1078 * Check for running out of space
1082 * Free the transaction structure.
1084 ASSERT(error
== ENOSPC
|| XFS_FORCED_SHUTDOWN(mp
));
1085 xfs_trans_cancel(tp
, 0);
1088 xfs_ilock(ip
, XFS_ILOCK_EXCL
);
1089 error
= xfs_trans_reserve_quota_nblks(tp
, ip
, qblocks
,
1094 xfs_trans_ijoin(tp
, ip
, 0);
1096 xfs_bmap_init(&free_list
, &firstfsb
);
1097 error
= xfs_bmapi_write(tp
, ip
, startoffset_fsb
,
1098 allocatesize_fsb
, alloc_type
, &firstfsb
,
1099 0, imapp
, &nimaps
, &free_list
);
1105 * Complete the transaction
1107 error
= xfs_bmap_finish(&tp
, &free_list
, &committed
);
1112 error
= xfs_trans_commit(tp
, XFS_TRANS_RELEASE_LOG_RES
);
1113 xfs_iunlock(ip
, XFS_ILOCK_EXCL
);
1118 allocated_fsb
= imapp
->br_blockcount
;
1121 error
= XFS_ERROR(ENOSPC
);
1125 startoffset_fsb
+= allocated_fsb
;
1126 allocatesize_fsb
-= allocated_fsb
;
1131 error0
: /* Cancel bmap, unlock inode, unreserve quota blocks, cancel trans */
1132 xfs_bmap_cancel(&free_list
);
1133 xfs_trans_unreserve_quota_nblks(tp
, ip
, (long)qblocks
, 0, quota_flag
);
1135 error1
: /* Just cancel transaction */
1136 xfs_trans_cancel(tp
, XFS_TRANS_RELEASE_LOG_RES
| XFS_TRANS_ABORT
);
1137 xfs_iunlock(ip
, XFS_ILOCK_EXCL
);
1142 * Zero file bytes between startoff and endoff inclusive.
1143 * The iolock is held exclusive and no blocks are buffered.
1145 * This function is used by xfs_free_file_space() to zero
1146 * partial blocks when the range to free is not block aligned.
1147 * When unreserving space with boundaries that are not block
1148 * aligned we round up the start and round down the end
1149 * boundaries and then use this function to zero the parts of
1150 * the blocks that got dropped during the rounding.
1153 xfs_zero_remaining_bytes(
1158 xfs_bmbt_irec_t imap
;
1159 xfs_fileoff_t offset_fsb
;
1160 xfs_off_t lastoffset
;
1163 xfs_mount_t
*mp
= ip
->i_mount
;
1168 * Avoid doing I/O beyond eof - it's not necessary
1169 * since nothing can read beyond eof. The space will
1170 * be zeroed when the file is extended anyway.
1172 if (startoff
>= XFS_ISIZE(ip
))
1175 if (endoff
> XFS_ISIZE(ip
))
1176 endoff
= XFS_ISIZE(ip
);
1178 bp
= xfs_buf_get_uncached(XFS_IS_REALTIME_INODE(ip
) ?
1179 mp
->m_rtdev_targp
: mp
->m_ddev_targp
,
1180 BTOBB(mp
->m_sb
.sb_blocksize
), 0);
1182 return XFS_ERROR(ENOMEM
);
1186 for (offset
= startoff
; offset
<= endoff
; offset
= lastoffset
+ 1) {
1189 offset_fsb
= XFS_B_TO_FSBT(mp
, offset
);
1192 lock_mode
= xfs_ilock_data_map_shared(ip
);
1193 error
= xfs_bmapi_read(ip
, offset_fsb
, 1, &imap
, &nimap
, 0);
1194 xfs_iunlock(ip
, lock_mode
);
1196 if (error
|| nimap
< 1)
1198 ASSERT(imap
.br_blockcount
>= 1);
1199 ASSERT(imap
.br_startoff
== offset_fsb
);
1200 lastoffset
= XFS_FSB_TO_B(mp
, imap
.br_startoff
+ 1) - 1;
1201 if (lastoffset
> endoff
)
1202 lastoffset
= endoff
;
1203 if (imap
.br_startblock
== HOLESTARTBLOCK
)
1205 ASSERT(imap
.br_startblock
!= DELAYSTARTBLOCK
);
1206 if (imap
.br_state
== XFS_EXT_UNWRITTEN
)
1209 XFS_BUF_UNWRITE(bp
);
1211 XFS_BUF_SET_ADDR(bp
, xfs_fsb_to_db(ip
, imap
.br_startblock
));
1213 if (XFS_FORCED_SHUTDOWN(mp
)) {
1214 error
= XFS_ERROR(EIO
);
1217 xfs_buf_iorequest(bp
);
1218 error
= xfs_buf_iowait(bp
);
1220 xfs_buf_ioerror_alert(bp
,
1221 "xfs_zero_remaining_bytes(read)");
1225 (offset
- XFS_FSB_TO_B(mp
, imap
.br_startoff
)),
1226 0, lastoffset
- offset
+ 1);
1231 if (XFS_FORCED_SHUTDOWN(mp
)) {
1232 error
= XFS_ERROR(EIO
);
1235 xfs_buf_iorequest(bp
);
1236 error
= xfs_buf_iowait(bp
);
1238 xfs_buf_ioerror_alert(bp
,
1239 "xfs_zero_remaining_bytes(write)");
1248 xfs_free_file_space(
1249 struct xfs_inode
*ip
,
1255 xfs_fileoff_t endoffset_fsb
;
1257 xfs_fsblock_t firstfsb
;
1258 xfs_bmap_free_t free_list
;
1259 xfs_bmbt_irec_t imap
;
1267 xfs_fileoff_t startoffset_fsb
;
1272 trace_xfs_free_file_space(ip
);
1274 error
= xfs_qm_dqattach(ip
, 0);
1279 if (len
<= 0) /* if nothing being freed */
1281 rt
= XFS_IS_REALTIME_INODE(ip
);
1282 startoffset_fsb
= XFS_B_TO_FSB(mp
, offset
);
1283 endoffset_fsb
= XFS_B_TO_FSBT(mp
, offset
+ len
);
1285 /* wait for the completion of any pending DIOs */
1286 inode_dio_wait(VFS_I(ip
));
1288 rounding
= max_t(xfs_off_t
, 1 << mp
->m_sb
.sb_blocklog
, PAGE_CACHE_SIZE
);
1289 ioffset
= offset
& ~(rounding
- 1);
1290 error
= -filemap_write_and_wait_range(VFS_I(ip
)->i_mapping
,
1294 truncate_pagecache_range(VFS_I(ip
), ioffset
, -1);
1297 * Need to zero the stuff we're not freeing, on disk.
1298 * If it's a realtime file & can't use unwritten extents then we
1299 * actually need to zero the extent edges. Otherwise xfs_bunmapi
1300 * will take care of it for us.
1302 if (rt
&& !xfs_sb_version_hasextflgbit(&mp
->m_sb
)) {
1304 error
= xfs_bmapi_read(ip
, startoffset_fsb
, 1,
1308 ASSERT(nimap
== 0 || nimap
== 1);
1309 if (nimap
&& imap
.br_startblock
!= HOLESTARTBLOCK
) {
1312 ASSERT(imap
.br_startblock
!= DELAYSTARTBLOCK
);
1313 block
= imap
.br_startblock
;
1314 mod
= do_div(block
, mp
->m_sb
.sb_rextsize
);
1316 startoffset_fsb
+= mp
->m_sb
.sb_rextsize
- mod
;
1319 error
= xfs_bmapi_read(ip
, endoffset_fsb
- 1, 1,
1323 ASSERT(nimap
== 0 || nimap
== 1);
1324 if (nimap
&& imap
.br_startblock
!= HOLESTARTBLOCK
) {
1325 ASSERT(imap
.br_startblock
!= DELAYSTARTBLOCK
);
1327 if (mod
&& (mod
!= mp
->m_sb
.sb_rextsize
))
1328 endoffset_fsb
-= mod
;
1331 if ((done
= (endoffset_fsb
<= startoffset_fsb
)))
1333 * One contiguous piece to clear
1335 error
= xfs_zero_remaining_bytes(ip
, offset
, offset
+ len
- 1);
1338 * Some full blocks, possibly two pieces to clear
1340 if (offset
< XFS_FSB_TO_B(mp
, startoffset_fsb
))
1341 error
= xfs_zero_remaining_bytes(ip
, offset
,
1342 XFS_FSB_TO_B(mp
, startoffset_fsb
) - 1);
1344 XFS_FSB_TO_B(mp
, endoffset_fsb
) < offset
+ len
)
1345 error
= xfs_zero_remaining_bytes(ip
,
1346 XFS_FSB_TO_B(mp
, endoffset_fsb
),
1351 * free file space until done or until there is an error
1353 resblks
= XFS_DIOSTRAT_SPACE_RES(mp
, 0);
1354 while (!error
&& !done
) {
1357 * allocate and setup the transaction. Allow this
1358 * transaction to dip into the reserve blocks to ensure
1359 * the freeing of the space succeeds at ENOSPC.
1361 tp
= xfs_trans_alloc(mp
, XFS_TRANS_DIOSTRAT
);
1362 error
= xfs_trans_reserve(tp
, &M_RES(mp
)->tr_write
, resblks
, 0);
1365 * check for running out of space
1369 * Free the transaction structure.
1371 ASSERT(error
== ENOSPC
|| XFS_FORCED_SHUTDOWN(mp
));
1372 xfs_trans_cancel(tp
, 0);
1375 xfs_ilock(ip
, XFS_ILOCK_EXCL
);
1376 error
= xfs_trans_reserve_quota(tp
, mp
,
1377 ip
->i_udquot
, ip
->i_gdquot
, ip
->i_pdquot
,
1378 resblks
, 0, XFS_QMOPT_RES_REGBLKS
);
1382 xfs_trans_ijoin(tp
, ip
, 0);
1385 * issue the bunmapi() call to free the blocks
1387 xfs_bmap_init(&free_list
, &firstfsb
);
1388 error
= xfs_bunmapi(tp
, ip
, startoffset_fsb
,
1389 endoffset_fsb
- startoffset_fsb
,
1390 0, 2, &firstfsb
, &free_list
, &done
);
1396 * complete the transaction
1398 error
= xfs_bmap_finish(&tp
, &free_list
, &committed
);
1403 error
= xfs_trans_commit(tp
, XFS_TRANS_RELEASE_LOG_RES
);
1404 xfs_iunlock(ip
, XFS_ILOCK_EXCL
);
1411 xfs_bmap_cancel(&free_list
);
1413 xfs_trans_cancel(tp
, XFS_TRANS_RELEASE_LOG_RES
| XFS_TRANS_ABORT
);
1414 xfs_iunlock(ip
, XFS_ILOCK_EXCL
);
1420 xfs_zero_file_space(
1421 struct xfs_inode
*ip
,
1425 struct xfs_mount
*mp
= ip
->i_mount
;
1427 xfs_off_t start_boundary
;
1428 xfs_off_t end_boundary
;
1431 trace_xfs_zero_file_space(ip
);
1433 granularity
= max_t(uint
, 1 << mp
->m_sb
.sb_blocklog
, PAGE_CACHE_SIZE
);
1436 * Round the range of extents we are going to convert inwards. If the
1437 * offset is aligned, then it doesn't get changed so we zero from the
1438 * start of the block offset points to.
1440 start_boundary
= round_up(offset
, granularity
);
1441 end_boundary
= round_down(offset
+ len
, granularity
);
1443 ASSERT(start_boundary
>= offset
);
1444 ASSERT(end_boundary
<= offset
+ len
);
1446 if (start_boundary
< end_boundary
- 1) {
1448 * punch out delayed allocation blocks and the page cache over
1449 * the conversion range
1451 xfs_ilock(ip
, XFS_ILOCK_EXCL
);
1452 error
= xfs_bmap_punch_delalloc_range(ip
,
1453 XFS_B_TO_FSBT(mp
, start_boundary
),
1454 XFS_B_TO_FSB(mp
, end_boundary
- start_boundary
));
1455 xfs_iunlock(ip
, XFS_ILOCK_EXCL
);
1456 truncate_pagecache_range(VFS_I(ip
), start_boundary
,
1459 /* convert the blocks */
1460 error
= xfs_alloc_file_space(ip
, start_boundary
,
1461 end_boundary
- start_boundary
- 1,
1462 XFS_BMAPI_PREALLOC
| XFS_BMAPI_CONVERT
);
1466 /* We've handled the interior of the range, now for the edges */
1467 if (start_boundary
!= offset
) {
1468 error
= xfs_iozero(ip
, offset
, start_boundary
- offset
);
1473 if (end_boundary
!= offset
+ len
)
1474 error
= xfs_iozero(ip
, end_boundary
,
1475 offset
+ len
- end_boundary
);
1479 * It's either a sub-granularity range or the range spanned lies
1480 * partially across two adjacent blocks.
1482 error
= xfs_iozero(ip
, offset
, len
);
1491 * xfs_collapse_file_space()
1492 * This routine frees disk space and shift extent for the given file.
1493 * The first thing we do is to free data blocks in the specified range
1494 * by calling xfs_free_file_space(). It would also sync dirty data
1495 * and invalidate page cache over the region on which collapse range
1496 * is working. And Shift extent records to the left to cover a hole.
1503 xfs_collapse_file_space(
1504 struct xfs_inode
*ip
,
1509 struct xfs_mount
*mp
= ip
->i_mount
;
1510 struct xfs_trans
*tp
;
1512 xfs_extnum_t current_ext
= 0;
1513 struct xfs_bmap_free free_list
;
1514 xfs_fsblock_t first_block
;
1516 xfs_fileoff_t start_fsb
;
1517 xfs_fileoff_t shift_fsb
;
1519 ASSERT(xfs_isilocked(ip
, XFS_IOLOCK_EXCL
));
1521 trace_xfs_collapse_file_space(ip
);
1523 start_fsb
= XFS_B_TO_FSB(mp
, offset
+ len
);
1524 shift_fsb
= XFS_B_TO_FSB(mp
, len
);
1526 error
= xfs_free_file_space(ip
, offset
, len
);
1530 while (!error
&& !done
) {
1531 tp
= xfs_trans_alloc(mp
, XFS_TRANS_DIOSTRAT
);
1533 * We would need to reserve permanent block for transaction.
1534 * This will come into picture when after shifting extent into
1535 * hole we found that adjacent extents can be merged which
1536 * may lead to freeing of a block during record update.
1538 error
= xfs_trans_reserve(tp
, &M_RES(mp
)->tr_write
,
1539 XFS_DIOSTRAT_SPACE_RES(mp
, 0), 0);
1541 xfs_trans_cancel(tp
, 0);
1545 xfs_ilock(ip
, XFS_ILOCK_EXCL
);
1546 error
= xfs_trans_reserve_quota(tp
, mp
, ip
->i_udquot
,
1547 ip
->i_gdquot
, ip
->i_pdquot
,
1548 XFS_DIOSTRAT_SPACE_RES(mp
, 0), 0,
1549 XFS_QMOPT_RES_REGBLKS
);
1553 xfs_trans_ijoin(tp
, ip
, 0);
1555 xfs_bmap_init(&free_list
, &first_block
);
1558 * We are using the write transaction in which max 2 bmbt
1559 * updates are allowed
1561 error
= xfs_bmap_shift_extents(tp
, ip
, &done
, start_fsb
,
1562 shift_fsb
, ¤t_ext
,
1563 &first_block
, &free_list
,
1564 XFS_BMAP_MAX_SHIFT_EXTENTS
);
1568 error
= xfs_bmap_finish(&tp
, &free_list
, &committed
);
1572 error
= xfs_trans_commit(tp
, XFS_TRANS_RELEASE_LOG_RES
);
1573 xfs_iunlock(ip
, XFS_ILOCK_EXCL
);
1579 xfs_trans_cancel(tp
, XFS_TRANS_RELEASE_LOG_RES
| XFS_TRANS_ABORT
);
1580 xfs_iunlock(ip
, XFS_ILOCK_EXCL
);
1585 * We need to check that the format of the data fork in the temporary inode is
1586 * valid for the target inode before doing the swap. This is not a problem with
1587 * attr1 because of the fixed fork offset, but attr2 has a dynamically sized
1588 * data fork depending on the space the attribute fork is taking so we can get
1589 * invalid formats on the target inode.
1591 * E.g. target has space for 7 extents in extent format, temp inode only has
1592 * space for 6. If we defragment down to 7 extents, then the tmp format is a
1593 * btree, but when swapped it needs to be in extent format. Hence we can't just
1594 * blindly swap data forks on attr2 filesystems.
1596 * Note that we check the swap in both directions so that we don't end up with
1597 * a corrupt temporary inode, either.
1599 * Note that fixing the way xfs_fsr sets up the attribute fork in the source
1600 * inode will prevent this situation from occurring, so all we do here is
1601 * reject and log the attempt. basically we are putting the responsibility on
1602 * userspace to get this right.
1605 xfs_swap_extents_check_format(
1606 xfs_inode_t
*ip
, /* target inode */
1607 xfs_inode_t
*tip
) /* tmp inode */
1610 /* Should never get a local format */
1611 if (ip
->i_d
.di_format
== XFS_DINODE_FMT_LOCAL
||
1612 tip
->i_d
.di_format
== XFS_DINODE_FMT_LOCAL
)
1616 * if the target inode has less extents that then temporary inode then
1617 * why did userspace call us?
1619 if (ip
->i_d
.di_nextents
< tip
->i_d
.di_nextents
)
1623 * if the target inode is in extent form and the temp inode is in btree
1624 * form then we will end up with the target inode in the wrong format
1625 * as we already know there are less extents in the temp inode.
1627 if (ip
->i_d
.di_format
== XFS_DINODE_FMT_EXTENTS
&&
1628 tip
->i_d
.di_format
== XFS_DINODE_FMT_BTREE
)
1631 /* Check temp in extent form to max in target */
1632 if (tip
->i_d
.di_format
== XFS_DINODE_FMT_EXTENTS
&&
1633 XFS_IFORK_NEXTENTS(tip
, XFS_DATA_FORK
) >
1634 XFS_IFORK_MAXEXT(ip
, XFS_DATA_FORK
))
1637 /* Check target in extent form to max in temp */
1638 if (ip
->i_d
.di_format
== XFS_DINODE_FMT_EXTENTS
&&
1639 XFS_IFORK_NEXTENTS(ip
, XFS_DATA_FORK
) >
1640 XFS_IFORK_MAXEXT(tip
, XFS_DATA_FORK
))
1644 * If we are in a btree format, check that the temp root block will fit
1645 * in the target and that it has enough extents to be in btree format
1648 * Note that we have to be careful to allow btree->extent conversions
1649 * (a common defrag case) which will occur when the temp inode is in
1652 if (tip
->i_d
.di_format
== XFS_DINODE_FMT_BTREE
) {
1653 if (XFS_IFORK_BOFF(ip
) &&
1654 XFS_BMAP_BMDR_SPACE(tip
->i_df
.if_broot
) > XFS_IFORK_BOFF(ip
))
1656 if (XFS_IFORK_NEXTENTS(tip
, XFS_DATA_FORK
) <=
1657 XFS_IFORK_MAXEXT(ip
, XFS_DATA_FORK
))
1661 /* Reciprocal target->temp btree format checks */
1662 if (ip
->i_d
.di_format
== XFS_DINODE_FMT_BTREE
) {
1663 if (XFS_IFORK_BOFF(tip
) &&
1664 XFS_BMAP_BMDR_SPACE(ip
->i_df
.if_broot
) > XFS_IFORK_BOFF(tip
))
1666 if (XFS_IFORK_NEXTENTS(ip
, XFS_DATA_FORK
) <=
1667 XFS_IFORK_MAXEXT(tip
, XFS_DATA_FORK
))
1676 xfs_inode_t
*ip
, /* target inode */
1677 xfs_inode_t
*tip
, /* tmp inode */
1680 xfs_mount_t
*mp
= ip
->i_mount
;
1682 xfs_bstat_t
*sbp
= &sxp
->sx_stat
;
1683 xfs_ifork_t
*tempifp
, *ifp
, *tifp
;
1684 int src_log_flags
, target_log_flags
;
1690 tempifp
= kmem_alloc(sizeof(xfs_ifork_t
), KM_MAYFAIL
);
1692 error
= XFS_ERROR(ENOMEM
);
1697 * we have to do two separate lock calls here to keep lockdep
1698 * happy. If we try to get all the locks in one call, lock will
1699 * report false positives when we drop the ILOCK and regain them
1702 xfs_lock_two_inodes(ip
, tip
, XFS_IOLOCK_EXCL
);
1703 xfs_lock_two_inodes(ip
, tip
, XFS_ILOCK_EXCL
);
1705 /* Verify that both files have the same format */
1706 if ((ip
->i_d
.di_mode
& S_IFMT
) != (tip
->i_d
.di_mode
& S_IFMT
)) {
1707 error
= XFS_ERROR(EINVAL
);
1711 /* Verify both files are either real-time or non-realtime */
1712 if (XFS_IS_REALTIME_INODE(ip
) != XFS_IS_REALTIME_INODE(tip
)) {
1713 error
= XFS_ERROR(EINVAL
);
1717 error
= -filemap_write_and_wait(VFS_I(tip
)->i_mapping
);
1720 truncate_pagecache_range(VFS_I(tip
), 0, -1);
1722 /* Verify O_DIRECT for ftmp */
1723 if (VN_CACHED(VFS_I(tip
)) != 0) {
1724 error
= XFS_ERROR(EINVAL
);
1728 /* Verify all data are being swapped */
1729 if (sxp
->sx_offset
!= 0 ||
1730 sxp
->sx_length
!= ip
->i_d
.di_size
||
1731 sxp
->sx_length
!= tip
->i_d
.di_size
) {
1732 error
= XFS_ERROR(EFAULT
);
1736 trace_xfs_swap_extent_before(ip
, 0);
1737 trace_xfs_swap_extent_before(tip
, 1);
1739 /* check inode formats now that data is flushed */
1740 error
= xfs_swap_extents_check_format(ip
, tip
);
1743 "%s: inode 0x%llx format is incompatible for exchanging.",
1744 __func__
, ip
->i_ino
);
1749 * Compare the current change & modify times with that
1750 * passed in. If they differ, we abort this swap.
1751 * This is the mechanism used to ensure the calling
1752 * process that the file was not changed out from
1755 if ((sbp
->bs_ctime
.tv_sec
!= VFS_I(ip
)->i_ctime
.tv_sec
) ||
1756 (sbp
->bs_ctime
.tv_nsec
!= VFS_I(ip
)->i_ctime
.tv_nsec
) ||
1757 (sbp
->bs_mtime
.tv_sec
!= VFS_I(ip
)->i_mtime
.tv_sec
) ||
1758 (sbp
->bs_mtime
.tv_nsec
!= VFS_I(ip
)->i_mtime
.tv_nsec
)) {
1759 error
= XFS_ERROR(EBUSY
);
1763 /* We need to fail if the file is memory mapped. Once we have tossed
1764 * all existing pages, the page fault will have no option
1765 * but to go to the filesystem for pages. By making the page fault call
1766 * vop_read (or write in the case of autogrow) they block on the iolock
1767 * until we have switched the extents.
1769 if (VN_MAPPED(VFS_I(ip
))) {
1770 error
= XFS_ERROR(EBUSY
);
1774 xfs_iunlock(ip
, XFS_ILOCK_EXCL
);
1775 xfs_iunlock(tip
, XFS_ILOCK_EXCL
);
1778 * There is a race condition here since we gave up the
1779 * ilock. However, the data fork will not change since
1780 * we have the iolock (locked for truncation too) so we
1781 * are safe. We don't really care if non-io related
1784 truncate_pagecache_range(VFS_I(ip
), 0, -1);
1786 tp
= xfs_trans_alloc(mp
, XFS_TRANS_SWAPEXT
);
1787 error
= xfs_trans_reserve(tp
, &M_RES(mp
)->tr_ichange
, 0, 0);
1789 xfs_iunlock(ip
, XFS_IOLOCK_EXCL
);
1790 xfs_iunlock(tip
, XFS_IOLOCK_EXCL
);
1791 xfs_trans_cancel(tp
, 0);
1794 xfs_lock_two_inodes(ip
, tip
, XFS_ILOCK_EXCL
);
1797 * Count the number of extended attribute blocks
1799 if ( ((XFS_IFORK_Q(ip
) != 0) && (ip
->i_d
.di_anextents
> 0)) &&
1800 (ip
->i_d
.di_aformat
!= XFS_DINODE_FMT_LOCAL
)) {
1801 error
= xfs_bmap_count_blocks(tp
, ip
, XFS_ATTR_FORK
, &aforkblks
);
1803 goto out_trans_cancel
;
1805 if ( ((XFS_IFORK_Q(tip
) != 0) && (tip
->i_d
.di_anextents
> 0)) &&
1806 (tip
->i_d
.di_aformat
!= XFS_DINODE_FMT_LOCAL
)) {
1807 error
= xfs_bmap_count_blocks(tp
, tip
, XFS_ATTR_FORK
,
1810 goto out_trans_cancel
;
1813 xfs_trans_ijoin(tp
, ip
, XFS_ILOCK_EXCL
| XFS_IOLOCK_EXCL
);
1814 xfs_trans_ijoin(tp
, tip
, XFS_ILOCK_EXCL
| XFS_IOLOCK_EXCL
);
1817 * Before we've swapped the forks, lets set the owners of the forks
1818 * appropriately. We have to do this as we are demand paging the btree
1819 * buffers, and so the validation done on read will expect the owner
1820 * field to be correctly set. Once we change the owners, we can swap the
1823 * Note the trickiness in setting the log flags - we set the owner log
1824 * flag on the opposite inode (i.e. the inode we are setting the new
1825 * owner to be) because once we swap the forks and log that, log
1826 * recovery is going to see the fork as owned by the swapped inode,
1827 * not the pre-swapped inodes.
1829 src_log_flags
= XFS_ILOG_CORE
;
1830 target_log_flags
= XFS_ILOG_CORE
;
1831 if (ip
->i_d
.di_version
== 3 &&
1832 ip
->i_d
.di_format
== XFS_DINODE_FMT_BTREE
) {
1833 target_log_flags
|= XFS_ILOG_DOWNER
;
1834 error
= xfs_bmbt_change_owner(tp
, ip
, XFS_DATA_FORK
,
1837 goto out_trans_cancel
;
1840 if (tip
->i_d
.di_version
== 3 &&
1841 tip
->i_d
.di_format
== XFS_DINODE_FMT_BTREE
) {
1842 src_log_flags
|= XFS_ILOG_DOWNER
;
1843 error
= xfs_bmbt_change_owner(tp
, tip
, XFS_DATA_FORK
,
1846 goto out_trans_cancel
;
1850 * Swap the data forks of the inodes
1854 *tempifp
= *ifp
; /* struct copy */
1855 *ifp
= *tifp
; /* struct copy */
1856 *tifp
= *tempifp
; /* struct copy */
1859 * Fix the on-disk inode values
1861 tmp
= (__uint64_t
)ip
->i_d
.di_nblocks
;
1862 ip
->i_d
.di_nblocks
= tip
->i_d
.di_nblocks
- taforkblks
+ aforkblks
;
1863 tip
->i_d
.di_nblocks
= tmp
+ taforkblks
- aforkblks
;
1865 tmp
= (__uint64_t
) ip
->i_d
.di_nextents
;
1866 ip
->i_d
.di_nextents
= tip
->i_d
.di_nextents
;
1867 tip
->i_d
.di_nextents
= tmp
;
1869 tmp
= (__uint64_t
) ip
->i_d
.di_format
;
1870 ip
->i_d
.di_format
= tip
->i_d
.di_format
;
1871 tip
->i_d
.di_format
= tmp
;
1874 * The extents in the source inode could still contain speculative
1875 * preallocation beyond EOF (e.g. the file is open but not modified
1876 * while defrag is in progress). In that case, we need to copy over the
1877 * number of delalloc blocks the data fork in the source inode is
1878 * tracking beyond EOF so that when the fork is truncated away when the
1879 * temporary inode is unlinked we don't underrun the i_delayed_blks
1880 * counter on that inode.
1882 ASSERT(tip
->i_delayed_blks
== 0);
1883 tip
->i_delayed_blks
= ip
->i_delayed_blks
;
1884 ip
->i_delayed_blks
= 0;
1886 switch (ip
->i_d
.di_format
) {
1887 case XFS_DINODE_FMT_EXTENTS
:
1888 /* If the extents fit in the inode, fix the
1889 * pointer. Otherwise it's already NULL or
1890 * pointing to the extent.
1892 if (ip
->i_d
.di_nextents
<= XFS_INLINE_EXTS
) {
1893 ifp
->if_u1
.if_extents
=
1894 ifp
->if_u2
.if_inline_ext
;
1896 src_log_flags
|= XFS_ILOG_DEXT
;
1898 case XFS_DINODE_FMT_BTREE
:
1899 ASSERT(ip
->i_d
.di_version
< 3 ||
1900 (src_log_flags
& XFS_ILOG_DOWNER
));
1901 src_log_flags
|= XFS_ILOG_DBROOT
;
1905 switch (tip
->i_d
.di_format
) {
1906 case XFS_DINODE_FMT_EXTENTS
:
1907 /* If the extents fit in the inode, fix the
1908 * pointer. Otherwise it's already NULL or
1909 * pointing to the extent.
1911 if (tip
->i_d
.di_nextents
<= XFS_INLINE_EXTS
) {
1912 tifp
->if_u1
.if_extents
=
1913 tifp
->if_u2
.if_inline_ext
;
1915 target_log_flags
|= XFS_ILOG_DEXT
;
1917 case XFS_DINODE_FMT_BTREE
:
1918 target_log_flags
|= XFS_ILOG_DBROOT
;
1919 ASSERT(tip
->i_d
.di_version
< 3 ||
1920 (target_log_flags
& XFS_ILOG_DOWNER
));
1924 xfs_trans_log_inode(tp
, ip
, src_log_flags
);
1925 xfs_trans_log_inode(tp
, tip
, target_log_flags
);
1928 * If this is a synchronous mount, make sure that the
1929 * transaction goes to disk before returning to the user.
1931 if (mp
->m_flags
& XFS_MOUNT_WSYNC
)
1932 xfs_trans_set_sync(tp
);
1934 error
= xfs_trans_commit(tp
, 0);
1936 trace_xfs_swap_extent_after(ip
, 0);
1937 trace_xfs_swap_extent_after(tip
, 1);
1943 xfs_iunlock(ip
, XFS_ILOCK_EXCL
| XFS_IOLOCK_EXCL
);
1944 xfs_iunlock(tip
, XFS_ILOCK_EXCL
| XFS_IOLOCK_EXCL
);
1948 xfs_trans_cancel(tp
, 0);