2 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
18 #include <linux/log2.h>
22 #include "xfs_types.h"
25 #include "xfs_trans.h"
26 #include "xfs_trans_priv.h"
29 #include "xfs_mount.h"
30 #include "xfs_bmap_btree.h"
31 #include "xfs_alloc_btree.h"
32 #include "xfs_ialloc_btree.h"
33 #include "xfs_attr_sf.h"
34 #include "xfs_dinode.h"
35 #include "xfs_inode.h"
36 #include "xfs_buf_item.h"
37 #include "xfs_inode_item.h"
38 #include "xfs_btree.h"
39 #include "xfs_alloc.h"
40 #include "xfs_ialloc.h"
42 #include "xfs_error.h"
43 #include "xfs_utils.h"
44 #include "xfs_quota.h"
45 #include "xfs_filestream.h"
46 #include "xfs_vnodeops.h"
47 #include "xfs_trace.h"
48 #include "xfs_icache.h"
50 kmem_zone_t
*xfs_ifork_zone
;
51 kmem_zone_t
*xfs_inode_zone
;
54 * Used in xfs_itruncate_extents(). This is the maximum number of extents
55 * freed from a file in a single transaction.
57 #define XFS_ITRUNC_MAX_EXTENTS 2
59 STATIC
int xfs_iflush_int(xfs_inode_t
*, xfs_buf_t
*);
60 STATIC
int xfs_iformat_local(xfs_inode_t
*, xfs_dinode_t
*, int, int);
61 STATIC
int xfs_iformat_extents(xfs_inode_t
*, xfs_dinode_t
*, int);
62 STATIC
int xfs_iformat_btree(xfs_inode_t
*, xfs_dinode_t
*, int);
65 * helper function to extract extent size hint from inode
71 if ((ip
->i_d
.di_flags
& XFS_DIFLAG_EXTSIZE
) && ip
->i_d
.di_extsize
)
72 return ip
->i_d
.di_extsize
;
73 if (XFS_IS_REALTIME_INODE(ip
))
74 return ip
->i_mount
->m_sb
.sb_rextsize
;
79 * This is a wrapper routine around the xfs_ilock() routine used to centralize
80 * some grungy code. It is used in places that wish to lock the inode solely
81 * for reading the extents. The reason these places can't just call
82 * xfs_ilock(SHARED) is that the inode lock also guards to bringing in of the
83 * extents from disk for a file in b-tree format. If the inode is in b-tree
84 * format, then we need to lock the inode exclusively until the extents are read
85 * in. Locking it exclusively all the time would limit our parallelism
86 * unnecessarily, though. What we do instead is check to see if the extents
87 * have been read in yet, and only lock the inode exclusively if they have not.
89 * The function returns a value which should be given to the corresponding
90 * xfs_iunlock_map_shared(). This value is the mode in which the lock was
99 if ((ip
->i_d
.di_format
== XFS_DINODE_FMT_BTREE
) &&
100 ((ip
->i_df
.if_flags
& XFS_IFEXTENTS
) == 0)) {
101 lock_mode
= XFS_ILOCK_EXCL
;
103 lock_mode
= XFS_ILOCK_SHARED
;
106 xfs_ilock(ip
, lock_mode
);
112 * This is simply the unlock routine to go with xfs_ilock_map_shared().
113 * All it does is call xfs_iunlock() with the given lock_mode.
116 xfs_iunlock_map_shared(
118 unsigned int lock_mode
)
120 xfs_iunlock(ip
, lock_mode
);
124 * The xfs inode contains 2 locks: a multi-reader lock called the
125 * i_iolock and a multi-reader lock called the i_lock. This routine
126 * allows either or both of the locks to be obtained.
128 * The 2 locks should always be ordered so that the IO lock is
129 * obtained first in order to prevent deadlock.
131 * ip -- the inode being locked
132 * lock_flags -- this parameter indicates the inode's locks
133 * to be locked. It can be:
138 * XFS_IOLOCK_SHARED | XFS_ILOCK_SHARED,
139 * XFS_IOLOCK_SHARED | XFS_ILOCK_EXCL,
140 * XFS_IOLOCK_EXCL | XFS_ILOCK_SHARED,
141 * XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL
148 trace_xfs_ilock(ip
, lock_flags
, _RET_IP_
);
151 * You can't set both SHARED and EXCL for the same lock,
152 * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
153 * and XFS_ILOCK_EXCL are valid values to set in lock_flags.
155 ASSERT((lock_flags
& (XFS_IOLOCK_SHARED
| XFS_IOLOCK_EXCL
)) !=
156 (XFS_IOLOCK_SHARED
| XFS_IOLOCK_EXCL
));
157 ASSERT((lock_flags
& (XFS_ILOCK_SHARED
| XFS_ILOCK_EXCL
)) !=
158 (XFS_ILOCK_SHARED
| XFS_ILOCK_EXCL
));
159 ASSERT((lock_flags
& ~(XFS_LOCK_MASK
| XFS_LOCK_DEP_MASK
)) == 0);
161 if (lock_flags
& XFS_IOLOCK_EXCL
)
162 mrupdate_nested(&ip
->i_iolock
, XFS_IOLOCK_DEP(lock_flags
));
163 else if (lock_flags
& XFS_IOLOCK_SHARED
)
164 mraccess_nested(&ip
->i_iolock
, XFS_IOLOCK_DEP(lock_flags
));
166 if (lock_flags
& XFS_ILOCK_EXCL
)
167 mrupdate_nested(&ip
->i_lock
, XFS_ILOCK_DEP(lock_flags
));
168 else if (lock_flags
& XFS_ILOCK_SHARED
)
169 mraccess_nested(&ip
->i_lock
, XFS_ILOCK_DEP(lock_flags
));
173 * This is just like xfs_ilock(), except that the caller
174 * is guaranteed not to sleep. It returns 1 if it gets
175 * the requested locks and 0 otherwise. If the IO lock is
176 * obtained but the inode lock cannot be, then the IO lock
177 * is dropped before returning.
179 * ip -- the inode being locked
180 * lock_flags -- this parameter indicates the inode's locks to be
181 * to be locked. See the comment for xfs_ilock() for a list
189 trace_xfs_ilock_nowait(ip
, lock_flags
, _RET_IP_
);
192 * You can't set both SHARED and EXCL for the same lock,
193 * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
194 * and XFS_ILOCK_EXCL are valid values to set in lock_flags.
196 ASSERT((lock_flags
& (XFS_IOLOCK_SHARED
| XFS_IOLOCK_EXCL
)) !=
197 (XFS_IOLOCK_SHARED
| XFS_IOLOCK_EXCL
));
198 ASSERT((lock_flags
& (XFS_ILOCK_SHARED
| XFS_ILOCK_EXCL
)) !=
199 (XFS_ILOCK_SHARED
| XFS_ILOCK_EXCL
));
200 ASSERT((lock_flags
& ~(XFS_LOCK_MASK
| XFS_LOCK_DEP_MASK
)) == 0);
202 if (lock_flags
& XFS_IOLOCK_EXCL
) {
203 if (!mrtryupdate(&ip
->i_iolock
))
205 } else if (lock_flags
& XFS_IOLOCK_SHARED
) {
206 if (!mrtryaccess(&ip
->i_iolock
))
209 if (lock_flags
& XFS_ILOCK_EXCL
) {
210 if (!mrtryupdate(&ip
->i_lock
))
211 goto out_undo_iolock
;
212 } else if (lock_flags
& XFS_ILOCK_SHARED
) {
213 if (!mrtryaccess(&ip
->i_lock
))
214 goto out_undo_iolock
;
219 if (lock_flags
& XFS_IOLOCK_EXCL
)
220 mrunlock_excl(&ip
->i_iolock
);
221 else if (lock_flags
& XFS_IOLOCK_SHARED
)
222 mrunlock_shared(&ip
->i_iolock
);
228 * xfs_iunlock() is used to drop the inode locks acquired with
229 * xfs_ilock() and xfs_ilock_nowait(). The caller must pass
230 * in the flags given to xfs_ilock() or xfs_ilock_nowait() so
231 * that we know which locks to drop.
233 * ip -- the inode being unlocked
234 * lock_flags -- this parameter indicates the inode's locks to be
235 * to be unlocked. See the comment for xfs_ilock() for a list
236 * of valid values for this parameter.
245 * You can't set both SHARED and EXCL for the same lock,
246 * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
247 * and XFS_ILOCK_EXCL are valid values to set in lock_flags.
249 ASSERT((lock_flags
& (XFS_IOLOCK_SHARED
| XFS_IOLOCK_EXCL
)) !=
250 (XFS_IOLOCK_SHARED
| XFS_IOLOCK_EXCL
));
251 ASSERT((lock_flags
& (XFS_ILOCK_SHARED
| XFS_ILOCK_EXCL
)) !=
252 (XFS_ILOCK_SHARED
| XFS_ILOCK_EXCL
));
253 ASSERT((lock_flags
& ~(XFS_LOCK_MASK
| XFS_LOCK_DEP_MASK
)) == 0);
254 ASSERT(lock_flags
!= 0);
256 if (lock_flags
& XFS_IOLOCK_EXCL
)
257 mrunlock_excl(&ip
->i_iolock
);
258 else if (lock_flags
& XFS_IOLOCK_SHARED
)
259 mrunlock_shared(&ip
->i_iolock
);
261 if (lock_flags
& XFS_ILOCK_EXCL
)
262 mrunlock_excl(&ip
->i_lock
);
263 else if (lock_flags
& XFS_ILOCK_SHARED
)
264 mrunlock_shared(&ip
->i_lock
);
266 trace_xfs_iunlock(ip
, lock_flags
, _RET_IP_
);
270 * give up write locks. the i/o lock cannot be held nested
271 * if it is being demoted.
278 ASSERT(lock_flags
& (XFS_IOLOCK_EXCL
|XFS_ILOCK_EXCL
));
279 ASSERT((lock_flags
& ~(XFS_IOLOCK_EXCL
|XFS_ILOCK_EXCL
)) == 0);
281 if (lock_flags
& XFS_ILOCK_EXCL
)
282 mrdemote(&ip
->i_lock
);
283 if (lock_flags
& XFS_IOLOCK_EXCL
)
284 mrdemote(&ip
->i_iolock
);
286 trace_xfs_ilock_demote(ip
, lock_flags
, _RET_IP_
);
295 if (lock_flags
& (XFS_ILOCK_EXCL
|XFS_ILOCK_SHARED
)) {
296 if (!(lock_flags
& XFS_ILOCK_SHARED
))
297 return !!ip
->i_lock
.mr_writer
;
298 return rwsem_is_locked(&ip
->i_lock
.mr_lock
);
301 if (lock_flags
& (XFS_IOLOCK_EXCL
|XFS_IOLOCK_SHARED
)) {
302 if (!(lock_flags
& XFS_IOLOCK_SHARED
))
303 return !!ip
->i_iolock
.mr_writer
;
304 return rwsem_is_locked(&ip
->i_iolock
.mr_lock
);
314 struct xfs_inode
*ip
)
316 wait_queue_head_t
*wq
= bit_waitqueue(&ip
->i_flags
, __XFS_IFLOCK_BIT
);
317 DEFINE_WAIT_BIT(wait
, &ip
->i_flags
, __XFS_IFLOCK_BIT
);
320 prepare_to_wait_exclusive(wq
, &wait
.wait
, TASK_UNINTERRUPTIBLE
);
321 if (xfs_isiflocked(ip
))
323 } while (!xfs_iflock_nowait(ip
));
325 finish_wait(wq
, &wait
.wait
);
330 * Make sure that the extents in the given memory buffer
334 xfs_validate_extents(
339 xfs_bmbt_irec_t irec
;
340 xfs_bmbt_rec_host_t rec
;
343 for (i
= 0; i
< nrecs
; i
++) {
344 xfs_bmbt_rec_host_t
*ep
= xfs_iext_get_ext(ifp
, i
);
345 rec
.l0
= get_unaligned(&ep
->l0
);
346 rec
.l1
= get_unaligned(&ep
->l1
);
347 xfs_bmbt_get_all(&rec
, &irec
);
348 if (fmt
== XFS_EXTFMT_NOSTATE
)
349 ASSERT(irec
.br_state
== XFS_EXT_NORM
);
353 #define xfs_validate_extents(ifp, nrecs, fmt)
357 * Check that none of the inode's in the buffer have a next
358 * unlinked field of 0.
370 j
= mp
->m_inode_cluster_size
>> mp
->m_sb
.sb_inodelog
;
372 for (i
= 0; i
< j
; i
++) {
373 dip
= (xfs_dinode_t
*)xfs_buf_offset(bp
,
374 i
* mp
->m_sb
.sb_inodesize
);
375 if (!dip
->di_next_unlinked
) {
377 "Detected bogus zero next_unlinked field in incore inode buffer 0x%p.",
379 ASSERT(dip
->di_next_unlinked
);
386 * This routine is called to map an inode to the buffer containing the on-disk
387 * version of the inode. It returns a pointer to the buffer containing the
388 * on-disk inode in the bpp parameter, and in the dipp parameter it returns a
389 * pointer to the on-disk inode within that buffer.
391 * If a non-zero error is returned, then the contents of bpp and dipp are
396 struct xfs_mount
*mp
,
397 struct xfs_trans
*tp
,
398 struct xfs_imap
*imap
,
399 struct xfs_dinode
**dipp
,
400 struct xfs_buf
**bpp
,
409 buf_flags
|= XBF_UNMAPPED
;
410 error
= xfs_trans_read_buf(mp
, tp
, mp
->m_ddev_targp
, imap
->im_blkno
,
411 (int)imap
->im_len
, buf_flags
, &bp
, NULL
);
413 if (error
!= EAGAIN
) {
415 "%s: xfs_trans_read_buf() returned error %d.",
418 ASSERT(buf_flags
& XBF_TRYLOCK
);
424 * Validate the magic number and version of every inode in the buffer
425 * (if DEBUG kernel) or the first inode in the buffer, otherwise.
428 ni
= BBTOB(imap
->im_len
) >> mp
->m_sb
.sb_inodelog
;
429 #else /* usual case */
433 for (i
= 0; i
< ni
; i
++) {
437 dip
= (xfs_dinode_t
*)xfs_buf_offset(bp
,
438 (i
<< mp
->m_sb
.sb_inodelog
));
439 di_ok
= dip
->di_magic
== cpu_to_be16(XFS_DINODE_MAGIC
) &&
440 XFS_DINODE_GOOD_VERSION(dip
->di_version
);
441 if (unlikely(XFS_TEST_ERROR(!di_ok
, mp
,
442 XFS_ERRTAG_ITOBP_INOTOBP
,
443 XFS_RANDOM_ITOBP_INOTOBP
))) {
444 if (iget_flags
& XFS_IGET_UNTRUSTED
) {
445 xfs_trans_brelse(tp
, bp
);
446 return XFS_ERROR(EINVAL
);
448 XFS_CORRUPTION_ERROR(__func__
, XFS_ERRLEVEL_HIGH
,
452 "bad inode magic/vsn daddr %lld #%d (magic=%x)",
453 (unsigned long long)imap
->im_blkno
, i
,
454 be16_to_cpu(dip
->di_magic
));
457 xfs_trans_brelse(tp
, bp
);
458 return XFS_ERROR(EFSCORRUPTED
);
462 xfs_inobp_check(mp
, bp
);
465 *dipp
= (struct xfs_dinode
*)xfs_buf_offset(bp
, imap
->im_boffset
);
470 * Move inode type and inode format specific information from the
471 * on-disk inode to the in-core inode. For fifos, devs, and sockets
472 * this means set if_rdev to the proper value. For files, directories,
473 * and symlinks this means to bring in the in-line data or extent
474 * pointers. For a file in B-tree format, only the root is immediately
475 * brought in-core. The rest will be in-lined in if_extents when it
476 * is first referenced (see xfs_iread_extents()).
483 xfs_attr_shortform_t
*atp
;
488 if (unlikely(be32_to_cpu(dip
->di_nextents
) +
489 be16_to_cpu(dip
->di_anextents
) >
490 be64_to_cpu(dip
->di_nblocks
))) {
491 xfs_warn(ip
->i_mount
,
492 "corrupt dinode %Lu, extent total = %d, nblocks = %Lu.",
493 (unsigned long long)ip
->i_ino
,
494 (int)(be32_to_cpu(dip
->di_nextents
) +
495 be16_to_cpu(dip
->di_anextents
)),
497 be64_to_cpu(dip
->di_nblocks
));
498 XFS_CORRUPTION_ERROR("xfs_iformat(1)", XFS_ERRLEVEL_LOW
,
500 return XFS_ERROR(EFSCORRUPTED
);
503 if (unlikely(dip
->di_forkoff
> ip
->i_mount
->m_sb
.sb_inodesize
)) {
504 xfs_warn(ip
->i_mount
, "corrupt dinode %Lu, forkoff = 0x%x.",
505 (unsigned long long)ip
->i_ino
,
507 XFS_CORRUPTION_ERROR("xfs_iformat(2)", XFS_ERRLEVEL_LOW
,
509 return XFS_ERROR(EFSCORRUPTED
);
512 if (unlikely((ip
->i_d
.di_flags
& XFS_DIFLAG_REALTIME
) &&
513 !ip
->i_mount
->m_rtdev_targp
)) {
514 xfs_warn(ip
->i_mount
,
515 "corrupt dinode %Lu, has realtime flag set.",
517 XFS_CORRUPTION_ERROR("xfs_iformat(realtime)",
518 XFS_ERRLEVEL_LOW
, ip
->i_mount
, dip
);
519 return XFS_ERROR(EFSCORRUPTED
);
522 switch (ip
->i_d
.di_mode
& S_IFMT
) {
527 if (unlikely(dip
->di_format
!= XFS_DINODE_FMT_DEV
)) {
528 XFS_CORRUPTION_ERROR("xfs_iformat(3)", XFS_ERRLEVEL_LOW
,
530 return XFS_ERROR(EFSCORRUPTED
);
533 ip
->i_df
.if_u2
.if_rdev
= xfs_dinode_get_rdev(dip
);
539 switch (dip
->di_format
) {
540 case XFS_DINODE_FMT_LOCAL
:
542 * no local regular files yet
544 if (unlikely(S_ISREG(be16_to_cpu(dip
->di_mode
)))) {
545 xfs_warn(ip
->i_mount
,
546 "corrupt inode %Lu (local format for regular file).",
547 (unsigned long long) ip
->i_ino
);
548 XFS_CORRUPTION_ERROR("xfs_iformat(4)",
551 return XFS_ERROR(EFSCORRUPTED
);
554 di_size
= be64_to_cpu(dip
->di_size
);
555 if (unlikely(di_size
> XFS_DFORK_DSIZE(dip
, ip
->i_mount
))) {
556 xfs_warn(ip
->i_mount
,
557 "corrupt inode %Lu (bad size %Ld for local inode).",
558 (unsigned long long) ip
->i_ino
,
559 (long long) di_size
);
560 XFS_CORRUPTION_ERROR("xfs_iformat(5)",
563 return XFS_ERROR(EFSCORRUPTED
);
567 error
= xfs_iformat_local(ip
, dip
, XFS_DATA_FORK
, size
);
569 case XFS_DINODE_FMT_EXTENTS
:
570 error
= xfs_iformat_extents(ip
, dip
, XFS_DATA_FORK
);
572 case XFS_DINODE_FMT_BTREE
:
573 error
= xfs_iformat_btree(ip
, dip
, XFS_DATA_FORK
);
576 XFS_ERROR_REPORT("xfs_iformat(6)", XFS_ERRLEVEL_LOW
,
578 return XFS_ERROR(EFSCORRUPTED
);
583 XFS_ERROR_REPORT("xfs_iformat(7)", XFS_ERRLEVEL_LOW
, ip
->i_mount
);
584 return XFS_ERROR(EFSCORRUPTED
);
589 if (!XFS_DFORK_Q(dip
))
592 ASSERT(ip
->i_afp
== NULL
);
593 ip
->i_afp
= kmem_zone_zalloc(xfs_ifork_zone
, KM_SLEEP
| KM_NOFS
);
595 switch (dip
->di_aformat
) {
596 case XFS_DINODE_FMT_LOCAL
:
597 atp
= (xfs_attr_shortform_t
*)XFS_DFORK_APTR(dip
);
598 size
= be16_to_cpu(atp
->hdr
.totsize
);
600 if (unlikely(size
< sizeof(struct xfs_attr_sf_hdr
))) {
601 xfs_warn(ip
->i_mount
,
602 "corrupt inode %Lu (bad attr fork size %Ld).",
603 (unsigned long long) ip
->i_ino
,
605 XFS_CORRUPTION_ERROR("xfs_iformat(8)",
608 return XFS_ERROR(EFSCORRUPTED
);
611 error
= xfs_iformat_local(ip
, dip
, XFS_ATTR_FORK
, size
);
613 case XFS_DINODE_FMT_EXTENTS
:
614 error
= xfs_iformat_extents(ip
, dip
, XFS_ATTR_FORK
);
616 case XFS_DINODE_FMT_BTREE
:
617 error
= xfs_iformat_btree(ip
, dip
, XFS_ATTR_FORK
);
620 error
= XFS_ERROR(EFSCORRUPTED
);
624 kmem_zone_free(xfs_ifork_zone
, ip
->i_afp
);
626 xfs_idestroy_fork(ip
, XFS_DATA_FORK
);
632 * The file is in-lined in the on-disk inode.
633 * If it fits into if_inline_data, then copy
634 * it there, otherwise allocate a buffer for it
635 * and copy the data there. Either way, set
636 * if_data to point at the data.
637 * If we allocate a buffer for the data, make
638 * sure that its size is a multiple of 4 and
639 * record the real size in i_real_bytes.
652 * If the size is unreasonable, then something
653 * is wrong and we just bail out rather than crash in
654 * kmem_alloc() or memcpy() below.
656 if (unlikely(size
> XFS_DFORK_SIZE(dip
, ip
->i_mount
, whichfork
))) {
657 xfs_warn(ip
->i_mount
,
658 "corrupt inode %Lu (bad size %d for local fork, size = %d).",
659 (unsigned long long) ip
->i_ino
, size
,
660 XFS_DFORK_SIZE(dip
, ip
->i_mount
, whichfork
));
661 XFS_CORRUPTION_ERROR("xfs_iformat_local", XFS_ERRLEVEL_LOW
,
663 return XFS_ERROR(EFSCORRUPTED
);
665 ifp
= XFS_IFORK_PTR(ip
, whichfork
);
668 ifp
->if_u1
.if_data
= NULL
;
669 else if (size
<= sizeof(ifp
->if_u2
.if_inline_data
))
670 ifp
->if_u1
.if_data
= ifp
->if_u2
.if_inline_data
;
672 real_size
= roundup(size
, 4);
673 ifp
->if_u1
.if_data
= kmem_alloc(real_size
, KM_SLEEP
| KM_NOFS
);
675 ifp
->if_bytes
= size
;
676 ifp
->if_real_bytes
= real_size
;
678 memcpy(ifp
->if_u1
.if_data
, XFS_DFORK_PTR(dip
, whichfork
), size
);
679 ifp
->if_flags
&= ~XFS_IFEXTENTS
;
680 ifp
->if_flags
|= XFS_IFINLINE
;
685 * The file consists of a set of extents all
686 * of which fit into the on-disk inode.
687 * If there are few enough extents to fit into
688 * the if_inline_ext, then copy them there.
689 * Otherwise allocate a buffer for them and copy
690 * them into it. Either way, set if_extents
691 * to point at the extents.
705 ifp
= XFS_IFORK_PTR(ip
, whichfork
);
706 nex
= XFS_DFORK_NEXTENTS(dip
, whichfork
);
707 size
= nex
* (uint
)sizeof(xfs_bmbt_rec_t
);
710 * If the number of extents is unreasonable, then something
711 * is wrong and we just bail out rather than crash in
712 * kmem_alloc() or memcpy() below.
714 if (unlikely(size
< 0 || size
> XFS_DFORK_SIZE(dip
, ip
->i_mount
, whichfork
))) {
715 xfs_warn(ip
->i_mount
, "corrupt inode %Lu ((a)extents = %d).",
716 (unsigned long long) ip
->i_ino
, nex
);
717 XFS_CORRUPTION_ERROR("xfs_iformat_extents(1)", XFS_ERRLEVEL_LOW
,
719 return XFS_ERROR(EFSCORRUPTED
);
722 ifp
->if_real_bytes
= 0;
724 ifp
->if_u1
.if_extents
= NULL
;
725 else if (nex
<= XFS_INLINE_EXTS
)
726 ifp
->if_u1
.if_extents
= ifp
->if_u2
.if_inline_ext
;
728 xfs_iext_add(ifp
, 0, nex
);
730 ifp
->if_bytes
= size
;
732 dp
= (xfs_bmbt_rec_t
*) XFS_DFORK_PTR(dip
, whichfork
);
733 xfs_validate_extents(ifp
, nex
, XFS_EXTFMT_INODE(ip
));
734 for (i
= 0; i
< nex
; i
++, dp
++) {
735 xfs_bmbt_rec_host_t
*ep
= xfs_iext_get_ext(ifp
, i
);
736 ep
->l0
= get_unaligned_be64(&dp
->l0
);
737 ep
->l1
= get_unaligned_be64(&dp
->l1
);
739 XFS_BMAP_TRACE_EXLIST(ip
, nex
, whichfork
);
740 if (whichfork
!= XFS_DATA_FORK
||
741 XFS_EXTFMT_INODE(ip
) == XFS_EXTFMT_NOSTATE
)
742 if (unlikely(xfs_check_nostate_extents(
744 XFS_ERROR_REPORT("xfs_iformat_extents(2)",
747 return XFS_ERROR(EFSCORRUPTED
);
750 ifp
->if_flags
|= XFS_IFEXTENTS
;
755 * The file has too many extents to fit into
756 * the inode, so they are in B-tree format.
757 * Allocate a buffer for the root of the B-tree
758 * and copy the root into it. The i_extents
759 * field will remain NULL until all of the
760 * extents are read in (when they are needed).
768 xfs_bmdr_block_t
*dfp
;
774 ifp
= XFS_IFORK_PTR(ip
, whichfork
);
775 dfp
= (xfs_bmdr_block_t
*)XFS_DFORK_PTR(dip
, whichfork
);
776 size
= XFS_BMAP_BROOT_SPACE(dfp
);
777 nrecs
= be16_to_cpu(dfp
->bb_numrecs
);
780 * blow out if -- fork has less extents than can fit in
781 * fork (fork shouldn't be a btree format), root btree
782 * block has more records than can fit into the fork,
783 * or the number of extents is greater than the number of
786 if (unlikely(XFS_IFORK_NEXTENTS(ip
, whichfork
) <=
787 XFS_IFORK_MAXEXT(ip
, whichfork
) ||
788 XFS_BMDR_SPACE_CALC(nrecs
) >
789 XFS_DFORK_SIZE(dip
, ip
->i_mount
, whichfork
) ||
790 XFS_IFORK_NEXTENTS(ip
, whichfork
) > ip
->i_d
.di_nblocks
)) {
791 xfs_warn(ip
->i_mount
, "corrupt inode %Lu (btree).",
792 (unsigned long long) ip
->i_ino
);
793 XFS_CORRUPTION_ERROR("xfs_iformat_btree", XFS_ERRLEVEL_LOW
,
795 return XFS_ERROR(EFSCORRUPTED
);
798 ifp
->if_broot_bytes
= size
;
799 ifp
->if_broot
= kmem_alloc(size
, KM_SLEEP
| KM_NOFS
);
800 ASSERT(ifp
->if_broot
!= NULL
);
802 * Copy and convert from the on-disk structure
803 * to the in-memory structure.
805 xfs_bmdr_to_bmbt(ip
->i_mount
, dfp
,
806 XFS_DFORK_SIZE(dip
, ip
->i_mount
, whichfork
),
807 ifp
->if_broot
, size
);
808 ifp
->if_flags
&= ~XFS_IFEXTENTS
;
809 ifp
->if_flags
|= XFS_IFBROOT
;
815 xfs_dinode_from_disk(
819 to
->di_magic
= be16_to_cpu(from
->di_magic
);
820 to
->di_mode
= be16_to_cpu(from
->di_mode
);
821 to
->di_version
= from
->di_version
;
822 to
->di_format
= from
->di_format
;
823 to
->di_onlink
= be16_to_cpu(from
->di_onlink
);
824 to
->di_uid
= be32_to_cpu(from
->di_uid
);
825 to
->di_gid
= be32_to_cpu(from
->di_gid
);
826 to
->di_nlink
= be32_to_cpu(from
->di_nlink
);
827 to
->di_projid_lo
= be16_to_cpu(from
->di_projid_lo
);
828 to
->di_projid_hi
= be16_to_cpu(from
->di_projid_hi
);
829 memcpy(to
->di_pad
, from
->di_pad
, sizeof(to
->di_pad
));
830 to
->di_flushiter
= be16_to_cpu(from
->di_flushiter
);
831 to
->di_atime
.t_sec
= be32_to_cpu(from
->di_atime
.t_sec
);
832 to
->di_atime
.t_nsec
= be32_to_cpu(from
->di_atime
.t_nsec
);
833 to
->di_mtime
.t_sec
= be32_to_cpu(from
->di_mtime
.t_sec
);
834 to
->di_mtime
.t_nsec
= be32_to_cpu(from
->di_mtime
.t_nsec
);
835 to
->di_ctime
.t_sec
= be32_to_cpu(from
->di_ctime
.t_sec
);
836 to
->di_ctime
.t_nsec
= be32_to_cpu(from
->di_ctime
.t_nsec
);
837 to
->di_size
= be64_to_cpu(from
->di_size
);
838 to
->di_nblocks
= be64_to_cpu(from
->di_nblocks
);
839 to
->di_extsize
= be32_to_cpu(from
->di_extsize
);
840 to
->di_nextents
= be32_to_cpu(from
->di_nextents
);
841 to
->di_anextents
= be16_to_cpu(from
->di_anextents
);
842 to
->di_forkoff
= from
->di_forkoff
;
843 to
->di_aformat
= from
->di_aformat
;
844 to
->di_dmevmask
= be32_to_cpu(from
->di_dmevmask
);
845 to
->di_dmstate
= be16_to_cpu(from
->di_dmstate
);
846 to
->di_flags
= be16_to_cpu(from
->di_flags
);
847 to
->di_gen
= be32_to_cpu(from
->di_gen
);
853 xfs_icdinode_t
*from
)
855 to
->di_magic
= cpu_to_be16(from
->di_magic
);
856 to
->di_mode
= cpu_to_be16(from
->di_mode
);
857 to
->di_version
= from
->di_version
;
858 to
->di_format
= from
->di_format
;
859 to
->di_onlink
= cpu_to_be16(from
->di_onlink
);
860 to
->di_uid
= cpu_to_be32(from
->di_uid
);
861 to
->di_gid
= cpu_to_be32(from
->di_gid
);
862 to
->di_nlink
= cpu_to_be32(from
->di_nlink
);
863 to
->di_projid_lo
= cpu_to_be16(from
->di_projid_lo
);
864 to
->di_projid_hi
= cpu_to_be16(from
->di_projid_hi
);
865 memcpy(to
->di_pad
, from
->di_pad
, sizeof(to
->di_pad
));
866 to
->di_flushiter
= cpu_to_be16(from
->di_flushiter
);
867 to
->di_atime
.t_sec
= cpu_to_be32(from
->di_atime
.t_sec
);
868 to
->di_atime
.t_nsec
= cpu_to_be32(from
->di_atime
.t_nsec
);
869 to
->di_mtime
.t_sec
= cpu_to_be32(from
->di_mtime
.t_sec
);
870 to
->di_mtime
.t_nsec
= cpu_to_be32(from
->di_mtime
.t_nsec
);
871 to
->di_ctime
.t_sec
= cpu_to_be32(from
->di_ctime
.t_sec
);
872 to
->di_ctime
.t_nsec
= cpu_to_be32(from
->di_ctime
.t_nsec
);
873 to
->di_size
= cpu_to_be64(from
->di_size
);
874 to
->di_nblocks
= cpu_to_be64(from
->di_nblocks
);
875 to
->di_extsize
= cpu_to_be32(from
->di_extsize
);
876 to
->di_nextents
= cpu_to_be32(from
->di_nextents
);
877 to
->di_anextents
= cpu_to_be16(from
->di_anextents
);
878 to
->di_forkoff
= from
->di_forkoff
;
879 to
->di_aformat
= from
->di_aformat
;
880 to
->di_dmevmask
= cpu_to_be32(from
->di_dmevmask
);
881 to
->di_dmstate
= cpu_to_be16(from
->di_dmstate
);
882 to
->di_flags
= cpu_to_be16(from
->di_flags
);
883 to
->di_gen
= cpu_to_be32(from
->di_gen
);
892 if (di_flags
& XFS_DIFLAG_ANY
) {
893 if (di_flags
& XFS_DIFLAG_REALTIME
)
894 flags
|= XFS_XFLAG_REALTIME
;
895 if (di_flags
& XFS_DIFLAG_PREALLOC
)
896 flags
|= XFS_XFLAG_PREALLOC
;
897 if (di_flags
& XFS_DIFLAG_IMMUTABLE
)
898 flags
|= XFS_XFLAG_IMMUTABLE
;
899 if (di_flags
& XFS_DIFLAG_APPEND
)
900 flags
|= XFS_XFLAG_APPEND
;
901 if (di_flags
& XFS_DIFLAG_SYNC
)
902 flags
|= XFS_XFLAG_SYNC
;
903 if (di_flags
& XFS_DIFLAG_NOATIME
)
904 flags
|= XFS_XFLAG_NOATIME
;
905 if (di_flags
& XFS_DIFLAG_NODUMP
)
906 flags
|= XFS_XFLAG_NODUMP
;
907 if (di_flags
& XFS_DIFLAG_RTINHERIT
)
908 flags
|= XFS_XFLAG_RTINHERIT
;
909 if (di_flags
& XFS_DIFLAG_PROJINHERIT
)
910 flags
|= XFS_XFLAG_PROJINHERIT
;
911 if (di_flags
& XFS_DIFLAG_NOSYMLINKS
)
912 flags
|= XFS_XFLAG_NOSYMLINKS
;
913 if (di_flags
& XFS_DIFLAG_EXTSIZE
)
914 flags
|= XFS_XFLAG_EXTSIZE
;
915 if (di_flags
& XFS_DIFLAG_EXTSZINHERIT
)
916 flags
|= XFS_XFLAG_EXTSZINHERIT
;
917 if (di_flags
& XFS_DIFLAG_NODEFRAG
)
918 flags
|= XFS_XFLAG_NODEFRAG
;
919 if (di_flags
& XFS_DIFLAG_FILESTREAM
)
920 flags
|= XFS_XFLAG_FILESTREAM
;
930 xfs_icdinode_t
*dic
= &ip
->i_d
;
932 return _xfs_dic2xflags(dic
->di_flags
) |
933 (XFS_IFORK_Q(ip
) ? XFS_XFLAG_HASATTR
: 0);
940 return _xfs_dic2xflags(be16_to_cpu(dip
->di_flags
)) |
941 (XFS_DFORK_Q(dip
) ? XFS_XFLAG_HASATTR
: 0);
945 * Read the disk inode attributes into the in-core inode structure.
959 * Fill in the location information in the in-core inode.
961 error
= xfs_imap(mp
, tp
, ip
->i_ino
, &ip
->i_imap
, iget_flags
);
966 * Get pointers to the on-disk inode and the buffer containing it.
968 error
= xfs_imap_to_bp(mp
, tp
, &ip
->i_imap
, &dip
, &bp
, 0, iget_flags
);
973 * If we got something that isn't an inode it means someone
974 * (nfs or dmi) has a stale handle.
976 if (dip
->di_magic
!= cpu_to_be16(XFS_DINODE_MAGIC
)) {
979 "%s: dip->di_magic (0x%x) != XFS_DINODE_MAGIC (0x%x)",
980 __func__
, be16_to_cpu(dip
->di_magic
), XFS_DINODE_MAGIC
);
982 error
= XFS_ERROR(EINVAL
);
987 * If the on-disk inode is already linked to a directory
988 * entry, copy all of the inode into the in-core inode.
989 * xfs_iformat() handles copying in the inode format
990 * specific information.
991 * Otherwise, just get the truly permanent information.
994 xfs_dinode_from_disk(&ip
->i_d
, dip
);
995 error
= xfs_iformat(ip
, dip
);
998 xfs_alert(mp
, "%s: xfs_iformat() returned error %d",
1004 ip
->i_d
.di_magic
= be16_to_cpu(dip
->di_magic
);
1005 ip
->i_d
.di_version
= dip
->di_version
;
1006 ip
->i_d
.di_gen
= be32_to_cpu(dip
->di_gen
);
1007 ip
->i_d
.di_flushiter
= be16_to_cpu(dip
->di_flushiter
);
1009 * Make sure to pull in the mode here as well in
1010 * case the inode is released without being used.
1011 * This ensures that xfs_inactive() will see that
1012 * the inode is already free and not try to mess
1013 * with the uninitialized part of it.
1015 ip
->i_d
.di_mode
= 0;
1019 * The inode format changed when we moved the link count and
1020 * made it 32 bits long. If this is an old format inode,
1021 * convert it in memory to look like a new one. If it gets
1022 * flushed to disk we will convert back before flushing or
1023 * logging it. We zero out the new projid field and the old link
1024 * count field. We'll handle clearing the pad field (the remains
1025 * of the old uuid field) when we actually convert the inode to
1026 * the new format. We don't change the version number so that we
1027 * can distinguish this from a real new format inode.
1029 if (ip
->i_d
.di_version
== 1) {
1030 ip
->i_d
.di_nlink
= ip
->i_d
.di_onlink
;
1031 ip
->i_d
.di_onlink
= 0;
1032 xfs_set_projid(ip
, 0);
1035 ip
->i_delayed_blks
= 0;
1038 * Mark the buffer containing the inode as something to keep
1039 * around for a while. This helps to keep recently accessed
1040 * meta-data in-core longer.
1042 xfs_buf_set_ref(bp
, XFS_INO_REF
);
1045 * Use xfs_trans_brelse() to release the buffer containing the
1046 * on-disk inode, because it was acquired with xfs_trans_read_buf()
1047 * in xfs_imap_to_bp() above. If tp is NULL, this is just a normal
1048 * brelse(). If we're within a transaction, then xfs_trans_brelse()
1049 * will only release the buffer if it is not dirty within the
1050 * transaction. It will be OK to release the buffer in this case,
1051 * because inodes on disk are never destroyed and we will be
1052 * locking the new in-core inode before putting it in the hash
1053 * table where other processes can find it. Thus we don't have
1054 * to worry about the inode being changed just because we released
1058 xfs_trans_brelse(tp
, bp
);
1063 * Read in extents from a btree-format inode.
1064 * Allocate and fill in if_extents. Real work is done in xfs_bmap.c.
1074 xfs_extnum_t nextents
;
1076 if (unlikely(XFS_IFORK_FORMAT(ip
, whichfork
) != XFS_DINODE_FMT_BTREE
)) {
1077 XFS_ERROR_REPORT("xfs_iread_extents", XFS_ERRLEVEL_LOW
,
1079 return XFS_ERROR(EFSCORRUPTED
);
1081 nextents
= XFS_IFORK_NEXTENTS(ip
, whichfork
);
1082 ifp
= XFS_IFORK_PTR(ip
, whichfork
);
1085 * We know that the size is valid (it's checked in iformat_btree)
1087 ifp
->if_bytes
= ifp
->if_real_bytes
= 0;
1088 ifp
->if_flags
|= XFS_IFEXTENTS
;
1089 xfs_iext_add(ifp
, 0, nextents
);
1090 error
= xfs_bmap_read_extents(tp
, ip
, whichfork
);
1092 xfs_iext_destroy(ifp
);
1093 ifp
->if_flags
&= ~XFS_IFEXTENTS
;
1096 xfs_validate_extents(ifp
, nextents
, XFS_EXTFMT_INODE(ip
));
1101 * Allocate an inode on disk and return a copy of its in-core version.
1102 * The in-core inode is locked exclusively. Set mode, nlink, and rdev
1103 * appropriately within the inode. The uid and gid for the inode are
1104 * set according to the contents of the given cred structure.
1106 * Use xfs_dialloc() to allocate the on-disk inode. If xfs_dialloc()
1107 * has a free inode available, call xfs_iget() to obtain the in-core
1108 * version of the allocated inode. Finally, fill in the inode and
1109 * log its initial contents. In this case, ialloc_context would be
1112 * If xfs_dialloc() does not have an available inode, it will replenish
1113 * its supply by doing an allocation. Since we can only do one
1114 * allocation within a transaction without deadlocks, we must commit
1115 * the current transaction before returning the inode itself.
1116 * In this case, therefore, we will set ialloc_context and return.
1117 * The caller should then commit the current transaction, start a new
1118 * transaction, and call xfs_ialloc() again to actually get the inode.
1120 * To ensure that some other process does not grab the inode that
1121 * was allocated during the first call to xfs_ialloc(), this routine
1122 * also returns the [locked] bp pointing to the head of the freelist
1123 * as ialloc_context. The caller should hold this buffer across
1124 * the commit and pass it back into this routine on the second call.
1126 * If we are allocating quota inodes, we do not have a parent inode
1127 * to attach to or associate with (i.e. pip == NULL) because they
1128 * are not linked into the directory structure - they are attached
1129 * directly to the superblock - and so have no parent.
1140 xfs_buf_t
**ialloc_context
,
1148 int filestreams
= 0;
1151 * Call the space management code to pick
1152 * the on-disk inode to be allocated.
1154 error
= xfs_dialloc(tp
, pip
? pip
->i_ino
: 0, mode
, okalloc
,
1155 ialloc_context
, &ino
);
1158 if (*ialloc_context
|| ino
== NULLFSINO
) {
1162 ASSERT(*ialloc_context
== NULL
);
1165 * Get the in-core inode with the lock held exclusively.
1166 * This is because we're setting fields here we need
1167 * to prevent others from looking at until we're done.
1169 error
= xfs_iget(tp
->t_mountp
, tp
, ino
, XFS_IGET_CREATE
,
1170 XFS_ILOCK_EXCL
, &ip
);
1175 ip
->i_d
.di_mode
= mode
;
1176 ip
->i_d
.di_onlink
= 0;
1177 ip
->i_d
.di_nlink
= nlink
;
1178 ASSERT(ip
->i_d
.di_nlink
== nlink
);
1179 ip
->i_d
.di_uid
= current_fsuid();
1180 ip
->i_d
.di_gid
= current_fsgid();
1181 xfs_set_projid(ip
, prid
);
1182 memset(&(ip
->i_d
.di_pad
[0]), 0, sizeof(ip
->i_d
.di_pad
));
1185 * If the superblock version is up to where we support new format
1186 * inodes and this is currently an old format inode, then change
1187 * the inode version number now. This way we only do the conversion
1188 * here rather than here and in the flush/logging code.
1190 if (xfs_sb_version_hasnlink(&tp
->t_mountp
->m_sb
) &&
1191 ip
->i_d
.di_version
== 1) {
1192 ip
->i_d
.di_version
= 2;
1194 * We've already zeroed the old link count, the projid field,
1195 * and the pad field.
1200 * Project ids won't be stored on disk if we are using a version 1 inode.
1202 if ((prid
!= 0) && (ip
->i_d
.di_version
== 1))
1203 xfs_bump_ino_vers2(tp
, ip
);
1205 if (pip
&& XFS_INHERIT_GID(pip
)) {
1206 ip
->i_d
.di_gid
= pip
->i_d
.di_gid
;
1207 if ((pip
->i_d
.di_mode
& S_ISGID
) && S_ISDIR(mode
)) {
1208 ip
->i_d
.di_mode
|= S_ISGID
;
1213 * If the group ID of the new file does not match the effective group
1214 * ID or one of the supplementary group IDs, the S_ISGID bit is cleared
1215 * (and only if the irix_sgid_inherit compatibility variable is set).
1217 if ((irix_sgid_inherit
) &&
1218 (ip
->i_d
.di_mode
& S_ISGID
) &&
1219 (!in_group_p((gid_t
)ip
->i_d
.di_gid
))) {
1220 ip
->i_d
.di_mode
&= ~S_ISGID
;
1223 ip
->i_d
.di_size
= 0;
1224 ip
->i_d
.di_nextents
= 0;
1225 ASSERT(ip
->i_d
.di_nblocks
== 0);
1228 ip
->i_d
.di_mtime
.t_sec
= (__int32_t
)tv
.tv_sec
;
1229 ip
->i_d
.di_mtime
.t_nsec
= (__int32_t
)tv
.tv_nsec
;
1230 ip
->i_d
.di_atime
= ip
->i_d
.di_mtime
;
1231 ip
->i_d
.di_ctime
= ip
->i_d
.di_mtime
;
1234 * di_gen will have been taken care of in xfs_iread.
1236 ip
->i_d
.di_extsize
= 0;
1237 ip
->i_d
.di_dmevmask
= 0;
1238 ip
->i_d
.di_dmstate
= 0;
1239 ip
->i_d
.di_flags
= 0;
1240 flags
= XFS_ILOG_CORE
;
1241 switch (mode
& S_IFMT
) {
1246 ip
->i_d
.di_format
= XFS_DINODE_FMT_DEV
;
1247 ip
->i_df
.if_u2
.if_rdev
= rdev
;
1248 ip
->i_df
.if_flags
= 0;
1249 flags
|= XFS_ILOG_DEV
;
1253 * we can't set up filestreams until after the VFS inode
1254 * is set up properly.
1256 if (pip
&& xfs_inode_is_filestream(pip
))
1260 if (pip
&& (pip
->i_d
.di_flags
& XFS_DIFLAG_ANY
)) {
1263 if (S_ISDIR(mode
)) {
1264 if (pip
->i_d
.di_flags
& XFS_DIFLAG_RTINHERIT
)
1265 di_flags
|= XFS_DIFLAG_RTINHERIT
;
1266 if (pip
->i_d
.di_flags
& XFS_DIFLAG_EXTSZINHERIT
) {
1267 di_flags
|= XFS_DIFLAG_EXTSZINHERIT
;
1268 ip
->i_d
.di_extsize
= pip
->i_d
.di_extsize
;
1270 } else if (S_ISREG(mode
)) {
1271 if (pip
->i_d
.di_flags
& XFS_DIFLAG_RTINHERIT
)
1272 di_flags
|= XFS_DIFLAG_REALTIME
;
1273 if (pip
->i_d
.di_flags
& XFS_DIFLAG_EXTSZINHERIT
) {
1274 di_flags
|= XFS_DIFLAG_EXTSIZE
;
1275 ip
->i_d
.di_extsize
= pip
->i_d
.di_extsize
;
1278 if ((pip
->i_d
.di_flags
& XFS_DIFLAG_NOATIME
) &&
1279 xfs_inherit_noatime
)
1280 di_flags
|= XFS_DIFLAG_NOATIME
;
1281 if ((pip
->i_d
.di_flags
& XFS_DIFLAG_NODUMP
) &&
1283 di_flags
|= XFS_DIFLAG_NODUMP
;
1284 if ((pip
->i_d
.di_flags
& XFS_DIFLAG_SYNC
) &&
1286 di_flags
|= XFS_DIFLAG_SYNC
;
1287 if ((pip
->i_d
.di_flags
& XFS_DIFLAG_NOSYMLINKS
) &&
1288 xfs_inherit_nosymlinks
)
1289 di_flags
|= XFS_DIFLAG_NOSYMLINKS
;
1290 if (pip
->i_d
.di_flags
& XFS_DIFLAG_PROJINHERIT
)
1291 di_flags
|= XFS_DIFLAG_PROJINHERIT
;
1292 if ((pip
->i_d
.di_flags
& XFS_DIFLAG_NODEFRAG
) &&
1293 xfs_inherit_nodefrag
)
1294 di_flags
|= XFS_DIFLAG_NODEFRAG
;
1295 if (pip
->i_d
.di_flags
& XFS_DIFLAG_FILESTREAM
)
1296 di_flags
|= XFS_DIFLAG_FILESTREAM
;
1297 ip
->i_d
.di_flags
|= di_flags
;
1301 ip
->i_d
.di_format
= XFS_DINODE_FMT_EXTENTS
;
1302 ip
->i_df
.if_flags
= XFS_IFEXTENTS
;
1303 ip
->i_df
.if_bytes
= ip
->i_df
.if_real_bytes
= 0;
1304 ip
->i_df
.if_u1
.if_extents
= NULL
;
1310 * Attribute fork settings for new inode.
1312 ip
->i_d
.di_aformat
= XFS_DINODE_FMT_EXTENTS
;
1313 ip
->i_d
.di_anextents
= 0;
1316 * Log the new values stuffed into the inode.
1318 xfs_trans_ijoin(tp
, ip
, XFS_ILOCK_EXCL
);
1319 xfs_trans_log_inode(tp
, ip
, flags
);
1321 /* now that we have an i_mode we can setup inode ops and unlock */
1322 xfs_setup_inode(ip
);
1324 /* now we have set up the vfs inode we can associate the filestream */
1326 error
= xfs_filestream_associate(pip
, ip
);
1330 xfs_iflags_set(ip
, XFS_IFILESTREAM
);
1338 * Free up the underlying blocks past new_size. The new size must be smaller
1339 * than the current size. This routine can be used both for the attribute and
1340 * data fork, and does not modify the inode size, which is left to the caller.
1342 * The transaction passed to this routine must have made a permanent log
1343 * reservation of at least XFS_ITRUNCATE_LOG_RES. This routine may commit the
1344 * given transaction and start new ones, so make sure everything involved in
1345 * the transaction is tidy before calling here. Some transaction will be
1346 * returned to the caller to be committed. The incoming transaction must
1347 * already include the inode, and both inode locks must be held exclusively.
1348 * The inode must also be "held" within the transaction. On return the inode
1349 * will be "held" within the returned transaction. This routine does NOT
1350 * require any disk space to be reserved for it within the transaction.
1352 * If we get an error, we must return with the inode locked and linked into the
1353 * current transaction. This keeps things simple for the higher level code,
1354 * because it always knows that the inode is locked and held in the transaction
1355 * that returns to it whether errors occur or not. We don't mark the inode
1356 * dirty on error so that transactions can be easily aborted if possible.
1359 xfs_itruncate_extents(
1360 struct xfs_trans
**tpp
,
1361 struct xfs_inode
*ip
,
1363 xfs_fsize_t new_size
)
1365 struct xfs_mount
*mp
= ip
->i_mount
;
1366 struct xfs_trans
*tp
= *tpp
;
1367 struct xfs_trans
*ntp
;
1368 xfs_bmap_free_t free_list
;
1369 xfs_fsblock_t first_block
;
1370 xfs_fileoff_t first_unmap_block
;
1371 xfs_fileoff_t last_block
;
1372 xfs_filblks_t unmap_len
;
1377 ASSERT(xfs_isilocked(ip
, XFS_ILOCK_EXCL
));
1378 ASSERT(!atomic_read(&VFS_I(ip
)->i_count
) ||
1379 xfs_isilocked(ip
, XFS_IOLOCK_EXCL
));
1380 ASSERT(new_size
<= XFS_ISIZE(ip
));
1381 ASSERT(tp
->t_flags
& XFS_TRANS_PERM_LOG_RES
);
1382 ASSERT(ip
->i_itemp
!= NULL
);
1383 ASSERT(ip
->i_itemp
->ili_lock_flags
== 0);
1384 ASSERT(!XFS_NOT_DQATTACHED(mp
, ip
));
1386 trace_xfs_itruncate_extents_start(ip
, new_size
);
1389 * Since it is possible for space to become allocated beyond
1390 * the end of the file (in a crash where the space is allocated
1391 * but the inode size is not yet updated), simply remove any
1392 * blocks which show up between the new EOF and the maximum
1393 * possible file size. If the first block to be removed is
1394 * beyond the maximum file size (ie it is the same as last_block),
1395 * then there is nothing to do.
1397 first_unmap_block
= XFS_B_TO_FSB(mp
, (xfs_ufsize_t
)new_size
);
1398 last_block
= XFS_B_TO_FSB(mp
, mp
->m_super
->s_maxbytes
);
1399 if (first_unmap_block
== last_block
)
1402 ASSERT(first_unmap_block
< last_block
);
1403 unmap_len
= last_block
- first_unmap_block
+ 1;
1405 xfs_bmap_init(&free_list
, &first_block
);
1406 error
= xfs_bunmapi(tp
, ip
,
1407 first_unmap_block
, unmap_len
,
1408 xfs_bmapi_aflag(whichfork
),
1409 XFS_ITRUNC_MAX_EXTENTS
,
1410 &first_block
, &free_list
,
1413 goto out_bmap_cancel
;
1416 * Duplicate the transaction that has the permanent
1417 * reservation and commit the old transaction.
1419 error
= xfs_bmap_finish(&tp
, &free_list
, &committed
);
1421 xfs_trans_ijoin(tp
, ip
, 0);
1423 goto out_bmap_cancel
;
1427 * Mark the inode dirty so it will be logged and
1428 * moved forward in the log as part of every commit.
1430 xfs_trans_log_inode(tp
, ip
, XFS_ILOG_CORE
);
1433 ntp
= xfs_trans_dup(tp
);
1434 error
= xfs_trans_commit(tp
, 0);
1437 xfs_trans_ijoin(tp
, ip
, 0);
1443 * Transaction commit worked ok so we can drop the extra ticket
1444 * reference that we gained in xfs_trans_dup()
1446 xfs_log_ticket_put(tp
->t_ticket
);
1447 error
= xfs_trans_reserve(tp
, 0,
1448 XFS_ITRUNCATE_LOG_RES(mp
), 0,
1449 XFS_TRANS_PERM_LOG_RES
,
1450 XFS_ITRUNCATE_LOG_COUNT
);
1456 * Always re-log the inode so that our permanent transaction can keep
1457 * on rolling it forward in the log.
1459 xfs_trans_log_inode(tp
, ip
, XFS_ILOG_CORE
);
1461 trace_xfs_itruncate_extents_end(ip
, new_size
);
1468 * If the bunmapi call encounters an error, return to the caller where
1469 * the transaction can be properly aborted. We just need to make sure
1470 * we're not holding any resources that we were not when we came in.
1472 xfs_bmap_cancel(&free_list
);
1477 * This is called when the inode's link count goes to 0.
1478 * We place the on-disk inode on a list in the AGI. It
1479 * will be pulled from this list when the inode is freed.
1496 ASSERT(ip
->i_d
.di_nlink
== 0);
1497 ASSERT(ip
->i_d
.di_mode
!= 0);
1502 * Get the agi buffer first. It ensures lock ordering
1505 error
= xfs_read_agi(mp
, tp
, XFS_INO_TO_AGNO(mp
, ip
->i_ino
), &agibp
);
1508 agi
= XFS_BUF_TO_AGI(agibp
);
1511 * Get the index into the agi hash table for the
1512 * list this inode will go on.
1514 agino
= XFS_INO_TO_AGINO(mp
, ip
->i_ino
);
1516 bucket_index
= agino
% XFS_AGI_UNLINKED_BUCKETS
;
1517 ASSERT(agi
->agi_unlinked
[bucket_index
]);
1518 ASSERT(be32_to_cpu(agi
->agi_unlinked
[bucket_index
]) != agino
);
1520 if (agi
->agi_unlinked
[bucket_index
] != cpu_to_be32(NULLAGINO
)) {
1522 * There is already another inode in the bucket we need
1523 * to add ourselves to. Add us at the front of the list.
1524 * Here we put the head pointer into our next pointer,
1525 * and then we fall through to point the head at us.
1527 error
= xfs_imap_to_bp(mp
, tp
, &ip
->i_imap
, &dip
, &ibp
,
1532 ASSERT(dip
->di_next_unlinked
== cpu_to_be32(NULLAGINO
));
1533 dip
->di_next_unlinked
= agi
->agi_unlinked
[bucket_index
];
1534 offset
= ip
->i_imap
.im_boffset
+
1535 offsetof(xfs_dinode_t
, di_next_unlinked
);
1536 xfs_trans_inode_buf(tp
, ibp
);
1537 xfs_trans_log_buf(tp
, ibp
, offset
,
1538 (offset
+ sizeof(xfs_agino_t
) - 1));
1539 xfs_inobp_check(mp
, ibp
);
1543 * Point the bucket head pointer at the inode being inserted.
1546 agi
->agi_unlinked
[bucket_index
] = cpu_to_be32(agino
);
1547 offset
= offsetof(xfs_agi_t
, agi_unlinked
) +
1548 (sizeof(xfs_agino_t
) * bucket_index
);
1549 xfs_trans_log_buf(tp
, agibp
, offset
,
1550 (offset
+ sizeof(xfs_agino_t
) - 1));
1555 * Pull the on-disk inode from the AGI unlinked list.
1568 xfs_agnumber_t agno
;
1570 xfs_agino_t next_agino
;
1571 xfs_buf_t
*last_ibp
;
1572 xfs_dinode_t
*last_dip
= NULL
;
1574 int offset
, last_offset
= 0;
1578 agno
= XFS_INO_TO_AGNO(mp
, ip
->i_ino
);
1581 * Get the agi buffer first. It ensures lock ordering
1584 error
= xfs_read_agi(mp
, tp
, agno
, &agibp
);
1588 agi
= XFS_BUF_TO_AGI(agibp
);
1591 * Get the index into the agi hash table for the
1592 * list this inode will go on.
1594 agino
= XFS_INO_TO_AGINO(mp
, ip
->i_ino
);
1596 bucket_index
= agino
% XFS_AGI_UNLINKED_BUCKETS
;
1597 ASSERT(agi
->agi_unlinked
[bucket_index
] != cpu_to_be32(NULLAGINO
));
1598 ASSERT(agi
->agi_unlinked
[bucket_index
]);
1600 if (be32_to_cpu(agi
->agi_unlinked
[bucket_index
]) == agino
) {
1602 * We're at the head of the list. Get the inode's on-disk
1603 * buffer to see if there is anyone after us on the list.
1604 * Only modify our next pointer if it is not already NULLAGINO.
1605 * This saves us the overhead of dealing with the buffer when
1606 * there is no need to change it.
1608 error
= xfs_imap_to_bp(mp
, tp
, &ip
->i_imap
, &dip
, &ibp
,
1611 xfs_warn(mp
, "%s: xfs_imap_to_bp returned error %d.",
1615 next_agino
= be32_to_cpu(dip
->di_next_unlinked
);
1616 ASSERT(next_agino
!= 0);
1617 if (next_agino
!= NULLAGINO
) {
1618 dip
->di_next_unlinked
= cpu_to_be32(NULLAGINO
);
1619 offset
= ip
->i_imap
.im_boffset
+
1620 offsetof(xfs_dinode_t
, di_next_unlinked
);
1621 xfs_trans_inode_buf(tp
, ibp
);
1622 xfs_trans_log_buf(tp
, ibp
, offset
,
1623 (offset
+ sizeof(xfs_agino_t
) - 1));
1624 xfs_inobp_check(mp
, ibp
);
1626 xfs_trans_brelse(tp
, ibp
);
1629 * Point the bucket head pointer at the next inode.
1631 ASSERT(next_agino
!= 0);
1632 ASSERT(next_agino
!= agino
);
1633 agi
->agi_unlinked
[bucket_index
] = cpu_to_be32(next_agino
);
1634 offset
= offsetof(xfs_agi_t
, agi_unlinked
) +
1635 (sizeof(xfs_agino_t
) * bucket_index
);
1636 xfs_trans_log_buf(tp
, agibp
, offset
,
1637 (offset
+ sizeof(xfs_agino_t
) - 1));
1640 * We need to search the list for the inode being freed.
1642 next_agino
= be32_to_cpu(agi
->agi_unlinked
[bucket_index
]);
1644 while (next_agino
!= agino
) {
1645 struct xfs_imap imap
;
1648 xfs_trans_brelse(tp
, last_ibp
);
1651 next_ino
= XFS_AGINO_TO_INO(mp
, agno
, next_agino
);
1653 error
= xfs_imap(mp
, tp
, next_ino
, &imap
, 0);
1656 "%s: xfs_imap returned error %d.",
1661 error
= xfs_imap_to_bp(mp
, tp
, &imap
, &last_dip
,
1665 "%s: xfs_imap_to_bp returned error %d.",
1670 last_offset
= imap
.im_boffset
;
1671 next_agino
= be32_to_cpu(last_dip
->di_next_unlinked
);
1672 ASSERT(next_agino
!= NULLAGINO
);
1673 ASSERT(next_agino
!= 0);
1677 * Now last_ibp points to the buffer previous to us on the
1678 * unlinked list. Pull us from the list.
1680 error
= xfs_imap_to_bp(mp
, tp
, &ip
->i_imap
, &dip
, &ibp
,
1683 xfs_warn(mp
, "%s: xfs_imap_to_bp(2) returned error %d.",
1687 next_agino
= be32_to_cpu(dip
->di_next_unlinked
);
1688 ASSERT(next_agino
!= 0);
1689 ASSERT(next_agino
!= agino
);
1690 if (next_agino
!= NULLAGINO
) {
1691 dip
->di_next_unlinked
= cpu_to_be32(NULLAGINO
);
1692 offset
= ip
->i_imap
.im_boffset
+
1693 offsetof(xfs_dinode_t
, di_next_unlinked
);
1694 xfs_trans_inode_buf(tp
, ibp
);
1695 xfs_trans_log_buf(tp
, ibp
, offset
,
1696 (offset
+ sizeof(xfs_agino_t
) - 1));
1697 xfs_inobp_check(mp
, ibp
);
1699 xfs_trans_brelse(tp
, ibp
);
1702 * Point the previous inode on the list to the next inode.
1704 last_dip
->di_next_unlinked
= cpu_to_be32(next_agino
);
1705 ASSERT(next_agino
!= 0);
1706 offset
= last_offset
+ offsetof(xfs_dinode_t
, di_next_unlinked
);
1707 xfs_trans_inode_buf(tp
, last_ibp
);
1708 xfs_trans_log_buf(tp
, last_ibp
, offset
,
1709 (offset
+ sizeof(xfs_agino_t
) - 1));
1710 xfs_inobp_check(mp
, last_ibp
);
1716 * A big issue when freeing the inode cluster is is that we _cannot_ skip any
1717 * inodes that are in memory - they all must be marked stale and attached to
1718 * the cluster buffer.
1722 xfs_inode_t
*free_ip
,
1726 xfs_mount_t
*mp
= free_ip
->i_mount
;
1727 int blks_per_cluster
;
1734 xfs_inode_log_item_t
*iip
;
1735 xfs_log_item_t
*lip
;
1736 struct xfs_perag
*pag
;
1738 pag
= xfs_perag_get(mp
, XFS_INO_TO_AGNO(mp
, inum
));
1739 if (mp
->m_sb
.sb_blocksize
>= XFS_INODE_CLUSTER_SIZE(mp
)) {
1740 blks_per_cluster
= 1;
1741 ninodes
= mp
->m_sb
.sb_inopblock
;
1742 nbufs
= XFS_IALLOC_BLOCKS(mp
);
1744 blks_per_cluster
= XFS_INODE_CLUSTER_SIZE(mp
) /
1745 mp
->m_sb
.sb_blocksize
;
1746 ninodes
= blks_per_cluster
* mp
->m_sb
.sb_inopblock
;
1747 nbufs
= XFS_IALLOC_BLOCKS(mp
) / blks_per_cluster
;
1750 for (j
= 0; j
< nbufs
; j
++, inum
+= ninodes
) {
1751 blkno
= XFS_AGB_TO_DADDR(mp
, XFS_INO_TO_AGNO(mp
, inum
),
1752 XFS_INO_TO_AGBNO(mp
, inum
));
1755 * We obtain and lock the backing buffer first in the process
1756 * here, as we have to ensure that any dirty inode that we
1757 * can't get the flush lock on is attached to the buffer.
1758 * If we scan the in-memory inodes first, then buffer IO can
1759 * complete before we get a lock on it, and hence we may fail
1760 * to mark all the active inodes on the buffer stale.
1762 bp
= xfs_trans_get_buf(tp
, mp
->m_ddev_targp
, blkno
,
1763 mp
->m_bsize
* blks_per_cluster
,
1769 * Walk the inodes already attached to the buffer and mark them
1770 * stale. These will all have the flush locks held, so an
1771 * in-memory inode walk can't lock them. By marking them all
1772 * stale first, we will not attempt to lock them in the loop
1773 * below as the XFS_ISTALE flag will be set.
1777 if (lip
->li_type
== XFS_LI_INODE
) {
1778 iip
= (xfs_inode_log_item_t
*)lip
;
1779 ASSERT(iip
->ili_logged
== 1);
1780 lip
->li_cb
= xfs_istale_done
;
1781 xfs_trans_ail_copy_lsn(mp
->m_ail
,
1782 &iip
->ili_flush_lsn
,
1783 &iip
->ili_item
.li_lsn
);
1784 xfs_iflags_set(iip
->ili_inode
, XFS_ISTALE
);
1786 lip
= lip
->li_bio_list
;
1791 * For each inode in memory attempt to add it to the inode
1792 * buffer and set it up for being staled on buffer IO
1793 * completion. This is safe as we've locked out tail pushing
1794 * and flushing by locking the buffer.
1796 * We have already marked every inode that was part of a
1797 * transaction stale above, which means there is no point in
1798 * even trying to lock them.
1800 for (i
= 0; i
< ninodes
; i
++) {
1803 ip
= radix_tree_lookup(&pag
->pag_ici_root
,
1804 XFS_INO_TO_AGINO(mp
, (inum
+ i
)));
1806 /* Inode not in memory, nothing to do */
1813 * because this is an RCU protected lookup, we could
1814 * find a recently freed or even reallocated inode
1815 * during the lookup. We need to check under the
1816 * i_flags_lock for a valid inode here. Skip it if it
1817 * is not valid, the wrong inode or stale.
1819 spin_lock(&ip
->i_flags_lock
);
1820 if (ip
->i_ino
!= inum
+ i
||
1821 __xfs_iflags_test(ip
, XFS_ISTALE
)) {
1822 spin_unlock(&ip
->i_flags_lock
);
1826 spin_unlock(&ip
->i_flags_lock
);
1829 * Don't try to lock/unlock the current inode, but we
1830 * _cannot_ skip the other inodes that we did not find
1831 * in the list attached to the buffer and are not
1832 * already marked stale. If we can't lock it, back off
1835 if (ip
!= free_ip
&&
1836 !xfs_ilock_nowait(ip
, XFS_ILOCK_EXCL
)) {
1844 xfs_iflags_set(ip
, XFS_ISTALE
);
1847 * we don't need to attach clean inodes or those only
1848 * with unlogged changes (which we throw away, anyway).
1851 if (!iip
|| xfs_inode_clean(ip
)) {
1852 ASSERT(ip
!= free_ip
);
1854 xfs_iunlock(ip
, XFS_ILOCK_EXCL
);
1858 iip
->ili_last_fields
= iip
->ili_fields
;
1859 iip
->ili_fields
= 0;
1860 iip
->ili_logged
= 1;
1861 xfs_trans_ail_copy_lsn(mp
->m_ail
, &iip
->ili_flush_lsn
,
1862 &iip
->ili_item
.li_lsn
);
1864 xfs_buf_attach_iodone(bp
, xfs_istale_done
,
1868 xfs_iunlock(ip
, XFS_ILOCK_EXCL
);
1871 xfs_trans_stale_inode_buf(tp
, bp
);
1872 xfs_trans_binval(tp
, bp
);
1880 * This is called to return an inode to the inode free list.
1881 * The inode should already be truncated to 0 length and have
1882 * no pages associated with it. This routine also assumes that
1883 * the inode is already a part of the transaction.
1885 * The on-disk copy of the inode will have been added to the list
1886 * of unlinked inodes in the AGI. We need to remove the inode from
1887 * that list atomically with respect to freeing it here.
1893 xfs_bmap_free_t
*flist
)
1897 xfs_ino_t first_ino
;
1901 ASSERT(xfs_isilocked(ip
, XFS_ILOCK_EXCL
));
1902 ASSERT(ip
->i_d
.di_nlink
== 0);
1903 ASSERT(ip
->i_d
.di_nextents
== 0);
1904 ASSERT(ip
->i_d
.di_anextents
== 0);
1905 ASSERT(ip
->i_d
.di_size
== 0 || !S_ISREG(ip
->i_d
.di_mode
));
1906 ASSERT(ip
->i_d
.di_nblocks
== 0);
1909 * Pull the on-disk inode from the AGI unlinked list.
1911 error
= xfs_iunlink_remove(tp
, ip
);
1916 error
= xfs_difree(tp
, ip
->i_ino
, flist
, &delete, &first_ino
);
1920 ip
->i_d
.di_mode
= 0; /* mark incore inode as free */
1921 ip
->i_d
.di_flags
= 0;
1922 ip
->i_d
.di_dmevmask
= 0;
1923 ip
->i_d
.di_forkoff
= 0; /* mark the attr fork not in use */
1924 ip
->i_d
.di_format
= XFS_DINODE_FMT_EXTENTS
;
1925 ip
->i_d
.di_aformat
= XFS_DINODE_FMT_EXTENTS
;
1927 * Bump the generation count so no one will be confused
1928 * by reincarnations of this inode.
1932 xfs_trans_log_inode(tp
, ip
, XFS_ILOG_CORE
);
1934 error
= xfs_imap_to_bp(ip
->i_mount
, tp
, &ip
->i_imap
, &dip
, &ibp
,
1940 * Clear the on-disk di_mode. This is to prevent xfs_bulkstat
1941 * from picking up this inode when it is reclaimed (its incore state
1942 * initialzed but not flushed to disk yet). The in-core di_mode is
1943 * already cleared and a corresponding transaction logged.
1944 * The hack here just synchronizes the in-core to on-disk
1945 * di_mode value in advance before the actual inode sync to disk.
1946 * This is OK because the inode is already unlinked and would never
1947 * change its di_mode again for this inode generation.
1948 * This is a temporary hack that would require a proper fix
1954 error
= xfs_ifree_cluster(ip
, tp
, first_ino
);
1961 * Reallocate the space for if_broot based on the number of records
1962 * being added or deleted as indicated in rec_diff. Move the records
1963 * and pointers in if_broot to fit the new size. When shrinking this
1964 * will eliminate holes between the records and pointers created by
1965 * the caller. When growing this will create holes to be filled in
1968 * The caller must not request to add more records than would fit in
1969 * the on-disk inode root. If the if_broot is currently NULL, then
1970 * if we adding records one will be allocated. The caller must also
1971 * not request that the number of records go below zero, although
1972 * it can go to zero.
1974 * ip -- the inode whose if_broot area is changing
1975 * ext_diff -- the change in the number of records, positive or negative,
1976 * requested for the if_broot array.
1984 struct xfs_mount
*mp
= ip
->i_mount
;
1987 struct xfs_btree_block
*new_broot
;
1994 * Handle the degenerate case quietly.
1996 if (rec_diff
== 0) {
2000 ifp
= XFS_IFORK_PTR(ip
, whichfork
);
2003 * If there wasn't any memory allocated before, just
2004 * allocate it now and get out.
2006 if (ifp
->if_broot_bytes
== 0) {
2007 new_size
= (size_t)XFS_BMAP_BROOT_SPACE_CALC(rec_diff
);
2008 ifp
->if_broot
= kmem_alloc(new_size
, KM_SLEEP
| KM_NOFS
);
2009 ifp
->if_broot_bytes
= (int)new_size
;
2014 * If there is already an existing if_broot, then we need
2015 * to realloc() it and shift the pointers to their new
2016 * location. The records don't change location because
2017 * they are kept butted up against the btree block header.
2019 cur_max
= xfs_bmbt_maxrecs(mp
, ifp
->if_broot_bytes
, 0);
2020 new_max
= cur_max
+ rec_diff
;
2021 new_size
= (size_t)XFS_BMAP_BROOT_SPACE_CALC(new_max
);
2022 ifp
->if_broot
= kmem_realloc(ifp
->if_broot
, new_size
,
2023 (size_t)XFS_BMAP_BROOT_SPACE_CALC(cur_max
), /* old size */
2024 KM_SLEEP
| KM_NOFS
);
2025 op
= (char *)XFS_BMAP_BROOT_PTR_ADDR(mp
, ifp
->if_broot
, 1,
2026 ifp
->if_broot_bytes
);
2027 np
= (char *)XFS_BMAP_BROOT_PTR_ADDR(mp
, ifp
->if_broot
, 1,
2029 ifp
->if_broot_bytes
= (int)new_size
;
2030 ASSERT(ifp
->if_broot_bytes
<=
2031 XFS_IFORK_SIZE(ip
, whichfork
) + XFS_BROOT_SIZE_ADJ
);
2032 memmove(np
, op
, cur_max
* (uint
)sizeof(xfs_dfsbno_t
));
2037 * rec_diff is less than 0. In this case, we are shrinking the
2038 * if_broot buffer. It must already exist. If we go to zero
2039 * records, just get rid of the root and clear the status bit.
2041 ASSERT((ifp
->if_broot
!= NULL
) && (ifp
->if_broot_bytes
> 0));
2042 cur_max
= xfs_bmbt_maxrecs(mp
, ifp
->if_broot_bytes
, 0);
2043 new_max
= cur_max
+ rec_diff
;
2044 ASSERT(new_max
>= 0);
2046 new_size
= (size_t)XFS_BMAP_BROOT_SPACE_CALC(new_max
);
2050 new_broot
= kmem_alloc(new_size
, KM_SLEEP
| KM_NOFS
);
2052 * First copy over the btree block header.
2054 memcpy(new_broot
, ifp
->if_broot
, XFS_BTREE_LBLOCK_LEN
);
2057 ifp
->if_flags
&= ~XFS_IFBROOT
;
2061 * Only copy the records and pointers if there are any.
2065 * First copy the records.
2067 op
= (char *)XFS_BMBT_REC_ADDR(mp
, ifp
->if_broot
, 1);
2068 np
= (char *)XFS_BMBT_REC_ADDR(mp
, new_broot
, 1);
2069 memcpy(np
, op
, new_max
* (uint
)sizeof(xfs_bmbt_rec_t
));
2072 * Then copy the pointers.
2074 op
= (char *)XFS_BMAP_BROOT_PTR_ADDR(mp
, ifp
->if_broot
, 1,
2075 ifp
->if_broot_bytes
);
2076 np
= (char *)XFS_BMAP_BROOT_PTR_ADDR(mp
, new_broot
, 1,
2078 memcpy(np
, op
, new_max
* (uint
)sizeof(xfs_dfsbno_t
));
2080 kmem_free(ifp
->if_broot
);
2081 ifp
->if_broot
= new_broot
;
2082 ifp
->if_broot_bytes
= (int)new_size
;
2083 ASSERT(ifp
->if_broot_bytes
<=
2084 XFS_IFORK_SIZE(ip
, whichfork
) + XFS_BROOT_SIZE_ADJ
);
2090 * This is called when the amount of space needed for if_data
2091 * is increased or decreased. The change in size is indicated by
2092 * the number of bytes that need to be added or deleted in the
2093 * byte_diff parameter.
2095 * If the amount of space needed has decreased below the size of the
2096 * inline buffer, then switch to using the inline buffer. Otherwise,
2097 * use kmem_realloc() or kmem_alloc() to adjust the size of the buffer
2098 * to what is needed.
2100 * ip -- the inode whose if_data area is changing
2101 * byte_diff -- the change in the number of bytes, positive or negative,
2102 * requested for the if_data array.
2114 if (byte_diff
== 0) {
2118 ifp
= XFS_IFORK_PTR(ip
, whichfork
);
2119 new_size
= (int)ifp
->if_bytes
+ byte_diff
;
2120 ASSERT(new_size
>= 0);
2122 if (new_size
== 0) {
2123 if (ifp
->if_u1
.if_data
!= ifp
->if_u2
.if_inline_data
) {
2124 kmem_free(ifp
->if_u1
.if_data
);
2126 ifp
->if_u1
.if_data
= NULL
;
2128 } else if (new_size
<= sizeof(ifp
->if_u2
.if_inline_data
)) {
2130 * If the valid extents/data can fit in if_inline_ext/data,
2131 * copy them from the malloc'd vector and free it.
2133 if (ifp
->if_u1
.if_data
== NULL
) {
2134 ifp
->if_u1
.if_data
= ifp
->if_u2
.if_inline_data
;
2135 } else if (ifp
->if_u1
.if_data
!= ifp
->if_u2
.if_inline_data
) {
2136 ASSERT(ifp
->if_real_bytes
!= 0);
2137 memcpy(ifp
->if_u2
.if_inline_data
, ifp
->if_u1
.if_data
,
2139 kmem_free(ifp
->if_u1
.if_data
);
2140 ifp
->if_u1
.if_data
= ifp
->if_u2
.if_inline_data
;
2145 * Stuck with malloc/realloc.
2146 * For inline data, the underlying buffer must be
2147 * a multiple of 4 bytes in size so that it can be
2148 * logged and stay on word boundaries. We enforce
2151 real_size
= roundup(new_size
, 4);
2152 if (ifp
->if_u1
.if_data
== NULL
) {
2153 ASSERT(ifp
->if_real_bytes
== 0);
2154 ifp
->if_u1
.if_data
= kmem_alloc(real_size
,
2155 KM_SLEEP
| KM_NOFS
);
2156 } else if (ifp
->if_u1
.if_data
!= ifp
->if_u2
.if_inline_data
) {
2158 * Only do the realloc if the underlying size
2159 * is really changing.
2161 if (ifp
->if_real_bytes
!= real_size
) {
2162 ifp
->if_u1
.if_data
=
2163 kmem_realloc(ifp
->if_u1
.if_data
,
2166 KM_SLEEP
| KM_NOFS
);
2169 ASSERT(ifp
->if_real_bytes
== 0);
2170 ifp
->if_u1
.if_data
= kmem_alloc(real_size
,
2171 KM_SLEEP
| KM_NOFS
);
2172 memcpy(ifp
->if_u1
.if_data
, ifp
->if_u2
.if_inline_data
,
2176 ifp
->if_real_bytes
= real_size
;
2177 ifp
->if_bytes
= new_size
;
2178 ASSERT(ifp
->if_bytes
<= XFS_IFORK_SIZE(ip
, whichfork
));
2188 ifp
= XFS_IFORK_PTR(ip
, whichfork
);
2189 if (ifp
->if_broot
!= NULL
) {
2190 kmem_free(ifp
->if_broot
);
2191 ifp
->if_broot
= NULL
;
2195 * If the format is local, then we can't have an extents
2196 * array so just look for an inline data array. If we're
2197 * not local then we may or may not have an extents list,
2198 * so check and free it up if we do.
2200 if (XFS_IFORK_FORMAT(ip
, whichfork
) == XFS_DINODE_FMT_LOCAL
) {
2201 if ((ifp
->if_u1
.if_data
!= ifp
->if_u2
.if_inline_data
) &&
2202 (ifp
->if_u1
.if_data
!= NULL
)) {
2203 ASSERT(ifp
->if_real_bytes
!= 0);
2204 kmem_free(ifp
->if_u1
.if_data
);
2205 ifp
->if_u1
.if_data
= NULL
;
2206 ifp
->if_real_bytes
= 0;
2208 } else if ((ifp
->if_flags
& XFS_IFEXTENTS
) &&
2209 ((ifp
->if_flags
& XFS_IFEXTIREC
) ||
2210 ((ifp
->if_u1
.if_extents
!= NULL
) &&
2211 (ifp
->if_u1
.if_extents
!= ifp
->if_u2
.if_inline_ext
)))) {
2212 ASSERT(ifp
->if_real_bytes
!= 0);
2213 xfs_iext_destroy(ifp
);
2215 ASSERT(ifp
->if_u1
.if_extents
== NULL
||
2216 ifp
->if_u1
.if_extents
== ifp
->if_u2
.if_inline_ext
);
2217 ASSERT(ifp
->if_real_bytes
== 0);
2218 if (whichfork
== XFS_ATTR_FORK
) {
2219 kmem_zone_free(xfs_ifork_zone
, ip
->i_afp
);
2225 * This is called to unpin an inode. The caller must have the inode locked
2226 * in at least shared mode so that the buffer cannot be subsequently pinned
2227 * once someone is waiting for it to be unpinned.
2231 struct xfs_inode
*ip
)
2233 ASSERT(xfs_isilocked(ip
, XFS_ILOCK_EXCL
|XFS_ILOCK_SHARED
));
2235 trace_xfs_inode_unpin_nowait(ip
, _RET_IP_
);
2237 /* Give the log a push to start the unpinning I/O */
2238 xfs_log_force_lsn(ip
->i_mount
, ip
->i_itemp
->ili_last_lsn
, 0);
2244 struct xfs_inode
*ip
)
2246 wait_queue_head_t
*wq
= bit_waitqueue(&ip
->i_flags
, __XFS_IPINNED_BIT
);
2247 DEFINE_WAIT_BIT(wait
, &ip
->i_flags
, __XFS_IPINNED_BIT
);
2252 prepare_to_wait(wq
, &wait
.wait
, TASK_UNINTERRUPTIBLE
);
2253 if (xfs_ipincount(ip
))
2255 } while (xfs_ipincount(ip
));
2256 finish_wait(wq
, &wait
.wait
);
2261 struct xfs_inode
*ip
)
2263 if (xfs_ipincount(ip
))
2264 __xfs_iunpin_wait(ip
);
2268 * xfs_iextents_copy()
2270 * This is called to copy the REAL extents (as opposed to the delayed
2271 * allocation extents) from the inode into the given buffer. It
2272 * returns the number of bytes copied into the buffer.
2274 * If there are no delayed allocation extents, then we can just
2275 * memcpy() the extents into the buffer. Otherwise, we need to
2276 * examine each extent in turn and skip those which are delayed.
2288 xfs_fsblock_t start_block
;
2290 ifp
= XFS_IFORK_PTR(ip
, whichfork
);
2291 ASSERT(xfs_isilocked(ip
, XFS_ILOCK_EXCL
|XFS_ILOCK_SHARED
));
2292 ASSERT(ifp
->if_bytes
> 0);
2294 nrecs
= ifp
->if_bytes
/ (uint
)sizeof(xfs_bmbt_rec_t
);
2295 XFS_BMAP_TRACE_EXLIST(ip
, nrecs
, whichfork
);
2299 * There are some delayed allocation extents in the
2300 * inode, so copy the extents one at a time and skip
2301 * the delayed ones. There must be at least one
2302 * non-delayed extent.
2305 for (i
= 0; i
< nrecs
; i
++) {
2306 xfs_bmbt_rec_host_t
*ep
= xfs_iext_get_ext(ifp
, i
);
2307 start_block
= xfs_bmbt_get_startblock(ep
);
2308 if (isnullstartblock(start_block
)) {
2310 * It's a delayed allocation extent, so skip it.
2315 /* Translate to on disk format */
2316 put_unaligned(cpu_to_be64(ep
->l0
), &dp
->l0
);
2317 put_unaligned(cpu_to_be64(ep
->l1
), &dp
->l1
);
2321 ASSERT(copied
!= 0);
2322 xfs_validate_extents(ifp
, copied
, XFS_EXTFMT_INODE(ip
));
2324 return (copied
* (uint
)sizeof(xfs_bmbt_rec_t
));
2328 * Each of the following cases stores data into the same region
2329 * of the on-disk inode, so only one of them can be valid at
2330 * any given time. While it is possible to have conflicting formats
2331 * and log flags, e.g. having XFS_ILOG_?DATA set when the fork is
2332 * in EXTENTS format, this can only happen when the fork has
2333 * changed formats after being modified but before being flushed.
2334 * In these cases, the format always takes precedence, because the
2335 * format indicates the current state of the fork.
2342 xfs_inode_log_item_t
*iip
,
2349 #ifdef XFS_TRANS_DEBUG
2352 static const short brootflag
[2] =
2353 { XFS_ILOG_DBROOT
, XFS_ILOG_ABROOT
};
2354 static const short dataflag
[2] =
2355 { XFS_ILOG_DDATA
, XFS_ILOG_ADATA
};
2356 static const short extflag
[2] =
2357 { XFS_ILOG_DEXT
, XFS_ILOG_AEXT
};
2361 ifp
= XFS_IFORK_PTR(ip
, whichfork
);
2363 * This can happen if we gave up in iformat in an error path,
2364 * for the attribute fork.
2367 ASSERT(whichfork
== XFS_ATTR_FORK
);
2370 cp
= XFS_DFORK_PTR(dip
, whichfork
);
2372 switch (XFS_IFORK_FORMAT(ip
, whichfork
)) {
2373 case XFS_DINODE_FMT_LOCAL
:
2374 if ((iip
->ili_fields
& dataflag
[whichfork
]) &&
2375 (ifp
->if_bytes
> 0)) {
2376 ASSERT(ifp
->if_u1
.if_data
!= NULL
);
2377 ASSERT(ifp
->if_bytes
<= XFS_IFORK_SIZE(ip
, whichfork
));
2378 memcpy(cp
, ifp
->if_u1
.if_data
, ifp
->if_bytes
);
2382 case XFS_DINODE_FMT_EXTENTS
:
2383 ASSERT((ifp
->if_flags
& XFS_IFEXTENTS
) ||
2384 !(iip
->ili_fields
& extflag
[whichfork
]));
2385 if ((iip
->ili_fields
& extflag
[whichfork
]) &&
2386 (ifp
->if_bytes
> 0)) {
2387 ASSERT(xfs_iext_get_ext(ifp
, 0));
2388 ASSERT(XFS_IFORK_NEXTENTS(ip
, whichfork
) > 0);
2389 (void)xfs_iextents_copy(ip
, (xfs_bmbt_rec_t
*)cp
,
2394 case XFS_DINODE_FMT_BTREE
:
2395 if ((iip
->ili_fields
& brootflag
[whichfork
]) &&
2396 (ifp
->if_broot_bytes
> 0)) {
2397 ASSERT(ifp
->if_broot
!= NULL
);
2398 ASSERT(ifp
->if_broot_bytes
<=
2399 (XFS_IFORK_SIZE(ip
, whichfork
) +
2400 XFS_BROOT_SIZE_ADJ
));
2401 xfs_bmbt_to_bmdr(mp
, ifp
->if_broot
, ifp
->if_broot_bytes
,
2402 (xfs_bmdr_block_t
*)cp
,
2403 XFS_DFORK_SIZE(dip
, mp
, whichfork
));
2407 case XFS_DINODE_FMT_DEV
:
2408 if (iip
->ili_fields
& XFS_ILOG_DEV
) {
2409 ASSERT(whichfork
== XFS_DATA_FORK
);
2410 xfs_dinode_put_rdev(dip
, ip
->i_df
.if_u2
.if_rdev
);
2414 case XFS_DINODE_FMT_UUID
:
2415 if (iip
->ili_fields
& XFS_ILOG_UUID
) {
2416 ASSERT(whichfork
== XFS_DATA_FORK
);
2417 memcpy(XFS_DFORK_DPTR(dip
),
2418 &ip
->i_df
.if_u2
.if_uuid
,
2434 xfs_mount_t
*mp
= ip
->i_mount
;
2435 struct xfs_perag
*pag
;
2436 unsigned long first_index
, mask
;
2437 unsigned long inodes_per_cluster
;
2439 xfs_inode_t
**ilist
;
2446 pag
= xfs_perag_get(mp
, XFS_INO_TO_AGNO(mp
, ip
->i_ino
));
2448 inodes_per_cluster
= XFS_INODE_CLUSTER_SIZE(mp
) >> mp
->m_sb
.sb_inodelog
;
2449 ilist_size
= inodes_per_cluster
* sizeof(xfs_inode_t
*);
2450 ilist
= kmem_alloc(ilist_size
, KM_MAYFAIL
|KM_NOFS
);
2454 mask
= ~(((XFS_INODE_CLUSTER_SIZE(mp
) >> mp
->m_sb
.sb_inodelog
)) - 1);
2455 first_index
= XFS_INO_TO_AGINO(mp
, ip
->i_ino
) & mask
;
2457 /* really need a gang lookup range call here */
2458 nr_found
= radix_tree_gang_lookup(&pag
->pag_ici_root
, (void**)ilist
,
2459 first_index
, inodes_per_cluster
);
2463 for (i
= 0; i
< nr_found
; i
++) {
2469 * because this is an RCU protected lookup, we could find a
2470 * recently freed or even reallocated inode during the lookup.
2471 * We need to check under the i_flags_lock for a valid inode
2472 * here. Skip it if it is not valid or the wrong inode.
2474 spin_lock(&ip
->i_flags_lock
);
2476 (XFS_INO_TO_AGINO(mp
, iq
->i_ino
) & mask
) != first_index
) {
2477 spin_unlock(&ip
->i_flags_lock
);
2480 spin_unlock(&ip
->i_flags_lock
);
2483 * Do an un-protected check to see if the inode is dirty and
2484 * is a candidate for flushing. These checks will be repeated
2485 * later after the appropriate locks are acquired.
2487 if (xfs_inode_clean(iq
) && xfs_ipincount(iq
) == 0)
2491 * Try to get locks. If any are unavailable or it is pinned,
2492 * then this inode cannot be flushed and is skipped.
2495 if (!xfs_ilock_nowait(iq
, XFS_ILOCK_SHARED
))
2497 if (!xfs_iflock_nowait(iq
)) {
2498 xfs_iunlock(iq
, XFS_ILOCK_SHARED
);
2501 if (xfs_ipincount(iq
)) {
2503 xfs_iunlock(iq
, XFS_ILOCK_SHARED
);
2508 * arriving here means that this inode can be flushed. First
2509 * re-check that it's dirty before flushing.
2511 if (!xfs_inode_clean(iq
)) {
2513 error
= xfs_iflush_int(iq
, bp
);
2515 xfs_iunlock(iq
, XFS_ILOCK_SHARED
);
2516 goto cluster_corrupt_out
;
2522 xfs_iunlock(iq
, XFS_ILOCK_SHARED
);
2526 XFS_STATS_INC(xs_icluster_flushcnt
);
2527 XFS_STATS_ADD(xs_icluster_flushinode
, clcount
);
2538 cluster_corrupt_out
:
2540 * Corruption detected in the clustering loop. Invalidate the
2541 * inode buffer and shut down the filesystem.
2545 * Clean up the buffer. If it was delwri, just release it --
2546 * brelse can handle it with no problems. If not, shut down the
2547 * filesystem before releasing the buffer.
2549 bufwasdelwri
= (bp
->b_flags
& _XBF_DELWRI_Q
);
2553 xfs_force_shutdown(mp
, SHUTDOWN_CORRUPT_INCORE
);
2555 if (!bufwasdelwri
) {
2557 * Just like incore_relse: if we have b_iodone functions,
2558 * mark the buffer as an error and call them. Otherwise
2559 * mark it as stale and brelse.
2564 xfs_buf_ioerror(bp
, EIO
);
2565 xfs_buf_ioend(bp
, 0);
2573 * Unlocks the flush lock
2575 xfs_iflush_abort(iq
, false);
2578 return XFS_ERROR(EFSCORRUPTED
);
2582 * Flush dirty inode metadata into the backing buffer.
2584 * The caller must have the inode lock and the inode flush lock held. The
2585 * inode lock will still be held upon return to the caller, and the inode
2586 * flush lock will be released after the inode has reached the disk.
2588 * The caller must write out the buffer returned in *bpp and release it.
2592 struct xfs_inode
*ip
,
2593 struct xfs_buf
**bpp
)
2595 struct xfs_mount
*mp
= ip
->i_mount
;
2597 struct xfs_dinode
*dip
;
2600 XFS_STATS_INC(xs_iflush_count
);
2602 ASSERT(xfs_isilocked(ip
, XFS_ILOCK_EXCL
|XFS_ILOCK_SHARED
));
2603 ASSERT(xfs_isiflocked(ip
));
2604 ASSERT(ip
->i_d
.di_format
!= XFS_DINODE_FMT_BTREE
||
2605 ip
->i_d
.di_nextents
> XFS_IFORK_MAXEXT(ip
, XFS_DATA_FORK
));
2609 xfs_iunpin_wait(ip
);
2612 * For stale inodes we cannot rely on the backing buffer remaining
2613 * stale in cache for the remaining life of the stale inode and so
2614 * xfs_imap_to_bp() below may give us a buffer that no longer contains
2615 * inodes below. We have to check this after ensuring the inode is
2616 * unpinned so that it is safe to reclaim the stale inode after the
2619 if (xfs_iflags_test(ip
, XFS_ISTALE
)) {
2625 * This may have been unpinned because the filesystem is shutting
2626 * down forcibly. If that's the case we must not write this inode
2627 * to disk, because the log record didn't make it to disk.
2629 * We also have to remove the log item from the AIL in this case,
2630 * as we wait for an empty AIL as part of the unmount process.
2632 if (XFS_FORCED_SHUTDOWN(mp
)) {
2633 error
= XFS_ERROR(EIO
);
2638 * Get the buffer containing the on-disk inode.
2640 error
= xfs_imap_to_bp(mp
, NULL
, &ip
->i_imap
, &dip
, &bp
, XBF_TRYLOCK
,
2648 * First flush out the inode that xfs_iflush was called with.
2650 error
= xfs_iflush_int(ip
, bp
);
2655 * If the buffer is pinned then push on the log now so we won't
2656 * get stuck waiting in the write for too long.
2658 if (xfs_buf_ispinned(bp
))
2659 xfs_log_force(mp
, 0);
2663 * see if other inodes can be gathered into this write
2665 error
= xfs_iflush_cluster(ip
, bp
);
2667 goto cluster_corrupt_out
;
2674 xfs_force_shutdown(mp
, SHUTDOWN_CORRUPT_INCORE
);
2675 cluster_corrupt_out
:
2676 error
= XFS_ERROR(EFSCORRUPTED
);
2679 * Unlocks the flush lock
2681 xfs_iflush_abort(ip
, false);
2691 xfs_inode_log_item_t
*iip
;
2694 #ifdef XFS_TRANS_DEBUG
2698 ASSERT(xfs_isilocked(ip
, XFS_ILOCK_EXCL
|XFS_ILOCK_SHARED
));
2699 ASSERT(xfs_isiflocked(ip
));
2700 ASSERT(ip
->i_d
.di_format
!= XFS_DINODE_FMT_BTREE
||
2701 ip
->i_d
.di_nextents
> XFS_IFORK_MAXEXT(ip
, XFS_DATA_FORK
));
2706 /* set *dip = inode's place in the buffer */
2707 dip
= (xfs_dinode_t
*)xfs_buf_offset(bp
, ip
->i_imap
.im_boffset
);
2709 if (XFS_TEST_ERROR(dip
->di_magic
!= cpu_to_be16(XFS_DINODE_MAGIC
),
2710 mp
, XFS_ERRTAG_IFLUSH_1
, XFS_RANDOM_IFLUSH_1
)) {
2711 xfs_alert_tag(mp
, XFS_PTAG_IFLUSH
,
2712 "%s: Bad inode %Lu magic number 0x%x, ptr 0x%p",
2713 __func__
, ip
->i_ino
, be16_to_cpu(dip
->di_magic
), dip
);
2716 if (XFS_TEST_ERROR(ip
->i_d
.di_magic
!= XFS_DINODE_MAGIC
,
2717 mp
, XFS_ERRTAG_IFLUSH_2
, XFS_RANDOM_IFLUSH_2
)) {
2718 xfs_alert_tag(mp
, XFS_PTAG_IFLUSH
,
2719 "%s: Bad inode %Lu, ptr 0x%p, magic number 0x%x",
2720 __func__
, ip
->i_ino
, ip
, ip
->i_d
.di_magic
);
2723 if (S_ISREG(ip
->i_d
.di_mode
)) {
2725 (ip
->i_d
.di_format
!= XFS_DINODE_FMT_EXTENTS
) &&
2726 (ip
->i_d
.di_format
!= XFS_DINODE_FMT_BTREE
),
2727 mp
, XFS_ERRTAG_IFLUSH_3
, XFS_RANDOM_IFLUSH_3
)) {
2728 xfs_alert_tag(mp
, XFS_PTAG_IFLUSH
,
2729 "%s: Bad regular inode %Lu, ptr 0x%p",
2730 __func__
, ip
->i_ino
, ip
);
2733 } else if (S_ISDIR(ip
->i_d
.di_mode
)) {
2735 (ip
->i_d
.di_format
!= XFS_DINODE_FMT_EXTENTS
) &&
2736 (ip
->i_d
.di_format
!= XFS_DINODE_FMT_BTREE
) &&
2737 (ip
->i_d
.di_format
!= XFS_DINODE_FMT_LOCAL
),
2738 mp
, XFS_ERRTAG_IFLUSH_4
, XFS_RANDOM_IFLUSH_4
)) {
2739 xfs_alert_tag(mp
, XFS_PTAG_IFLUSH
,
2740 "%s: Bad directory inode %Lu, ptr 0x%p",
2741 __func__
, ip
->i_ino
, ip
);
2745 if (XFS_TEST_ERROR(ip
->i_d
.di_nextents
+ ip
->i_d
.di_anextents
>
2746 ip
->i_d
.di_nblocks
, mp
, XFS_ERRTAG_IFLUSH_5
,
2747 XFS_RANDOM_IFLUSH_5
)) {
2748 xfs_alert_tag(mp
, XFS_PTAG_IFLUSH
,
2749 "%s: detected corrupt incore inode %Lu, "
2750 "total extents = %d, nblocks = %Ld, ptr 0x%p",
2751 __func__
, ip
->i_ino
,
2752 ip
->i_d
.di_nextents
+ ip
->i_d
.di_anextents
,
2753 ip
->i_d
.di_nblocks
, ip
);
2756 if (XFS_TEST_ERROR(ip
->i_d
.di_forkoff
> mp
->m_sb
.sb_inodesize
,
2757 mp
, XFS_ERRTAG_IFLUSH_6
, XFS_RANDOM_IFLUSH_6
)) {
2758 xfs_alert_tag(mp
, XFS_PTAG_IFLUSH
,
2759 "%s: bad inode %Lu, forkoff 0x%x, ptr 0x%p",
2760 __func__
, ip
->i_ino
, ip
->i_d
.di_forkoff
, ip
);
2764 * bump the flush iteration count, used to detect flushes which
2765 * postdate a log record during recovery.
2768 ip
->i_d
.di_flushiter
++;
2771 * Copy the dirty parts of the inode into the on-disk
2772 * inode. We always copy out the core of the inode,
2773 * because if the inode is dirty at all the core must
2776 xfs_dinode_to_disk(dip
, &ip
->i_d
);
2778 /* Wrap, we never let the log put out DI_MAX_FLUSH */
2779 if (ip
->i_d
.di_flushiter
== DI_MAX_FLUSH
)
2780 ip
->i_d
.di_flushiter
= 0;
2783 * If this is really an old format inode and the superblock version
2784 * has not been updated to support only new format inodes, then
2785 * convert back to the old inode format. If the superblock version
2786 * has been updated, then make the conversion permanent.
2788 ASSERT(ip
->i_d
.di_version
== 1 || xfs_sb_version_hasnlink(&mp
->m_sb
));
2789 if (ip
->i_d
.di_version
== 1) {
2790 if (!xfs_sb_version_hasnlink(&mp
->m_sb
)) {
2794 ASSERT(ip
->i_d
.di_nlink
<= XFS_MAXLINK_1
);
2795 dip
->di_onlink
= cpu_to_be16(ip
->i_d
.di_nlink
);
2798 * The superblock version has already been bumped,
2799 * so just make the conversion to the new inode
2802 ip
->i_d
.di_version
= 2;
2803 dip
->di_version
= 2;
2804 ip
->i_d
.di_onlink
= 0;
2806 memset(&(ip
->i_d
.di_pad
[0]), 0, sizeof(ip
->i_d
.di_pad
));
2807 memset(&(dip
->di_pad
[0]), 0,
2808 sizeof(dip
->di_pad
));
2809 ASSERT(xfs_get_projid(ip
) == 0);
2813 xfs_iflush_fork(ip
, dip
, iip
, XFS_DATA_FORK
, bp
);
2814 if (XFS_IFORK_Q(ip
))
2815 xfs_iflush_fork(ip
, dip
, iip
, XFS_ATTR_FORK
, bp
);
2816 xfs_inobp_check(mp
, bp
);
2819 * We've recorded everything logged in the inode, so we'd like to clear
2820 * the ili_fields bits so we don't log and flush things unnecessarily.
2821 * However, we can't stop logging all this information until the data
2822 * we've copied into the disk buffer is written to disk. If we did we
2823 * might overwrite the copy of the inode in the log with all the data
2824 * after re-logging only part of it, and in the face of a crash we
2825 * wouldn't have all the data we need to recover.
2827 * What we do is move the bits to the ili_last_fields field. When
2828 * logging the inode, these bits are moved back to the ili_fields field.
2829 * In the xfs_iflush_done() routine we clear ili_last_fields, since we
2830 * know that the information those bits represent is permanently on
2831 * disk. As long as the flush completes before the inode is logged
2832 * again, then both ili_fields and ili_last_fields will be cleared.
2834 * We can play with the ili_fields bits here, because the inode lock
2835 * must be held exclusively in order to set bits there and the flush
2836 * lock protects the ili_last_fields bits. Set ili_logged so the flush
2837 * done routine can tell whether or not to look in the AIL. Also, store
2838 * the current LSN of the inode so that we can tell whether the item has
2839 * moved in the AIL from xfs_iflush_done(). In order to read the lsn we
2840 * need the AIL lock, because it is a 64 bit value that cannot be read
2843 if (iip
!= NULL
&& iip
->ili_fields
!= 0) {
2844 iip
->ili_last_fields
= iip
->ili_fields
;
2845 iip
->ili_fields
= 0;
2846 iip
->ili_logged
= 1;
2848 xfs_trans_ail_copy_lsn(mp
->m_ail
, &iip
->ili_flush_lsn
,
2849 &iip
->ili_item
.li_lsn
);
2852 * Attach the function xfs_iflush_done to the inode's
2853 * buffer. This will remove the inode from the AIL
2854 * and unlock the inode's flush lock when the inode is
2855 * completely written to disk.
2857 xfs_buf_attach_iodone(bp
, xfs_iflush_done
, &iip
->ili_item
);
2859 ASSERT(bp
->b_fspriv
!= NULL
);
2860 ASSERT(bp
->b_iodone
!= NULL
);
2863 * We're flushing an inode which is not in the AIL and has
2864 * not been logged. For this case we can immediately drop
2865 * the inode flush lock because we can avoid the whole
2866 * AIL state thing. It's OK to drop the flush lock now,
2867 * because we've already locked the buffer and to do anything
2868 * you really need both.
2871 ASSERT(iip
->ili_logged
== 0);
2872 ASSERT(iip
->ili_last_fields
== 0);
2873 ASSERT((iip
->ili_item
.li_flags
& XFS_LI_IN_AIL
) == 0);
2881 return XFS_ERROR(EFSCORRUPTED
);
2885 * Return a pointer to the extent record at file index idx.
2887 xfs_bmbt_rec_host_t
*
2889 xfs_ifork_t
*ifp
, /* inode fork pointer */
2890 xfs_extnum_t idx
) /* index of target extent */
2893 ASSERT(idx
< ifp
->if_bytes
/ sizeof(xfs_bmbt_rec_t
));
2895 if ((ifp
->if_flags
& XFS_IFEXTIREC
) && (idx
== 0)) {
2896 return ifp
->if_u1
.if_ext_irec
->er_extbuf
;
2897 } else if (ifp
->if_flags
& XFS_IFEXTIREC
) {
2898 xfs_ext_irec_t
*erp
; /* irec pointer */
2899 int erp_idx
= 0; /* irec index */
2900 xfs_extnum_t page_idx
= idx
; /* ext index in target list */
2902 erp
= xfs_iext_idx_to_irec(ifp
, &page_idx
, &erp_idx
, 0);
2903 return &erp
->er_extbuf
[page_idx
];
2904 } else if (ifp
->if_bytes
) {
2905 return &ifp
->if_u1
.if_extents
[idx
];
2912 * Insert new item(s) into the extent records for incore inode
2913 * fork 'ifp'. 'count' new items are inserted at index 'idx'.
2917 xfs_inode_t
*ip
, /* incore inode pointer */
2918 xfs_extnum_t idx
, /* starting index of new items */
2919 xfs_extnum_t count
, /* number of inserted items */
2920 xfs_bmbt_irec_t
*new, /* items to insert */
2921 int state
) /* type of extent conversion */
2923 xfs_ifork_t
*ifp
= (state
& BMAP_ATTRFORK
) ? ip
->i_afp
: &ip
->i_df
;
2924 xfs_extnum_t i
; /* extent record index */
2926 trace_xfs_iext_insert(ip
, idx
, new, state
, _RET_IP_
);
2928 ASSERT(ifp
->if_flags
& XFS_IFEXTENTS
);
2929 xfs_iext_add(ifp
, idx
, count
);
2930 for (i
= idx
; i
< idx
+ count
; i
++, new++)
2931 xfs_bmbt_set_all(xfs_iext_get_ext(ifp
, i
), new);
2935 * This is called when the amount of space required for incore file
2936 * extents needs to be increased. The ext_diff parameter stores the
2937 * number of new extents being added and the idx parameter contains
2938 * the extent index where the new extents will be added. If the new
2939 * extents are being appended, then we just need to (re)allocate and
2940 * initialize the space. Otherwise, if the new extents are being
2941 * inserted into the middle of the existing entries, a bit more work
2942 * is required to make room for the new extents to be inserted. The
2943 * caller is responsible for filling in the new extent entries upon
2948 xfs_ifork_t
*ifp
, /* inode fork pointer */
2949 xfs_extnum_t idx
, /* index to begin adding exts */
2950 int ext_diff
) /* number of extents to add */
2952 int byte_diff
; /* new bytes being added */
2953 int new_size
; /* size of extents after adding */
2954 xfs_extnum_t nextents
; /* number of extents in file */
2956 nextents
= ifp
->if_bytes
/ (uint
)sizeof(xfs_bmbt_rec_t
);
2957 ASSERT((idx
>= 0) && (idx
<= nextents
));
2958 byte_diff
= ext_diff
* sizeof(xfs_bmbt_rec_t
);
2959 new_size
= ifp
->if_bytes
+ byte_diff
;
2961 * If the new number of extents (nextents + ext_diff)
2962 * fits inside the inode, then continue to use the inline
2965 if (nextents
+ ext_diff
<= XFS_INLINE_EXTS
) {
2966 if (idx
< nextents
) {
2967 memmove(&ifp
->if_u2
.if_inline_ext
[idx
+ ext_diff
],
2968 &ifp
->if_u2
.if_inline_ext
[idx
],
2969 (nextents
- idx
) * sizeof(xfs_bmbt_rec_t
));
2970 memset(&ifp
->if_u2
.if_inline_ext
[idx
], 0, byte_diff
);
2972 ifp
->if_u1
.if_extents
= ifp
->if_u2
.if_inline_ext
;
2973 ifp
->if_real_bytes
= 0;
2976 * Otherwise use a linear (direct) extent list.
2977 * If the extents are currently inside the inode,
2978 * xfs_iext_realloc_direct will switch us from
2979 * inline to direct extent allocation mode.
2981 else if (nextents
+ ext_diff
<= XFS_LINEAR_EXTS
) {
2982 xfs_iext_realloc_direct(ifp
, new_size
);
2983 if (idx
< nextents
) {
2984 memmove(&ifp
->if_u1
.if_extents
[idx
+ ext_diff
],
2985 &ifp
->if_u1
.if_extents
[idx
],
2986 (nextents
- idx
) * sizeof(xfs_bmbt_rec_t
));
2987 memset(&ifp
->if_u1
.if_extents
[idx
], 0, byte_diff
);
2990 /* Indirection array */
2992 xfs_ext_irec_t
*erp
;
2996 ASSERT(nextents
+ ext_diff
> XFS_LINEAR_EXTS
);
2997 if (ifp
->if_flags
& XFS_IFEXTIREC
) {
2998 erp
= xfs_iext_idx_to_irec(ifp
, &page_idx
, &erp_idx
, 1);
3000 xfs_iext_irec_init(ifp
);
3001 ASSERT(ifp
->if_flags
& XFS_IFEXTIREC
);
3002 erp
= ifp
->if_u1
.if_ext_irec
;
3004 /* Extents fit in target extent page */
3005 if (erp
&& erp
->er_extcount
+ ext_diff
<= XFS_LINEAR_EXTS
) {
3006 if (page_idx
< erp
->er_extcount
) {
3007 memmove(&erp
->er_extbuf
[page_idx
+ ext_diff
],
3008 &erp
->er_extbuf
[page_idx
],
3009 (erp
->er_extcount
- page_idx
) *
3010 sizeof(xfs_bmbt_rec_t
));
3011 memset(&erp
->er_extbuf
[page_idx
], 0, byte_diff
);
3013 erp
->er_extcount
+= ext_diff
;
3014 xfs_iext_irec_update_extoffs(ifp
, erp_idx
+ 1, ext_diff
);
3016 /* Insert a new extent page */
3018 xfs_iext_add_indirect_multi(ifp
,
3019 erp_idx
, page_idx
, ext_diff
);
3022 * If extent(s) are being appended to the last page in
3023 * the indirection array and the new extent(s) don't fit
3024 * in the page, then erp is NULL and erp_idx is set to
3025 * the next index needed in the indirection array.
3028 int count
= ext_diff
;
3031 erp
= xfs_iext_irec_new(ifp
, erp_idx
);
3032 erp
->er_extcount
= count
;
3033 count
-= MIN(count
, (int)XFS_LINEAR_EXTS
);
3040 ifp
->if_bytes
= new_size
;
3044 * This is called when incore extents are being added to the indirection
3045 * array and the new extents do not fit in the target extent list. The
3046 * erp_idx parameter contains the irec index for the target extent list
3047 * in the indirection array, and the idx parameter contains the extent
3048 * index within the list. The number of extents being added is stored
3049 * in the count parameter.
3051 * |-------| |-------|
3052 * | | | | idx - number of extents before idx
3054 * | | | | count - number of extents being inserted at idx
3055 * |-------| |-------|
3056 * | count | | nex2 | nex2 - number of extents after idx + count
3057 * |-------| |-------|
3060 xfs_iext_add_indirect_multi(
3061 xfs_ifork_t
*ifp
, /* inode fork pointer */
3062 int erp_idx
, /* target extent irec index */
3063 xfs_extnum_t idx
, /* index within target list */
3064 int count
) /* new extents being added */
3066 int byte_diff
; /* new bytes being added */
3067 xfs_ext_irec_t
*erp
; /* pointer to irec entry */
3068 xfs_extnum_t ext_diff
; /* number of extents to add */
3069 xfs_extnum_t ext_cnt
; /* new extents still needed */
3070 xfs_extnum_t nex2
; /* extents after idx + count */
3071 xfs_bmbt_rec_t
*nex2_ep
= NULL
; /* temp list for nex2 extents */
3072 int nlists
; /* number of irec's (lists) */
3074 ASSERT(ifp
->if_flags
& XFS_IFEXTIREC
);
3075 erp
= &ifp
->if_u1
.if_ext_irec
[erp_idx
];
3076 nex2
= erp
->er_extcount
- idx
;
3077 nlists
= ifp
->if_real_bytes
/ XFS_IEXT_BUFSZ
;
3080 * Save second part of target extent list
3081 * (all extents past */
3083 byte_diff
= nex2
* sizeof(xfs_bmbt_rec_t
);
3084 nex2_ep
= (xfs_bmbt_rec_t
*) kmem_alloc(byte_diff
, KM_NOFS
);
3085 memmove(nex2_ep
, &erp
->er_extbuf
[idx
], byte_diff
);
3086 erp
->er_extcount
-= nex2
;
3087 xfs_iext_irec_update_extoffs(ifp
, erp_idx
+ 1, -nex2
);
3088 memset(&erp
->er_extbuf
[idx
], 0, byte_diff
);
3092 * Add the new extents to the end of the target
3093 * list, then allocate new irec record(s) and
3094 * extent buffer(s) as needed to store the rest
3095 * of the new extents.
3098 ext_diff
= MIN(ext_cnt
, (int)XFS_LINEAR_EXTS
- erp
->er_extcount
);
3100 erp
->er_extcount
+= ext_diff
;
3101 xfs_iext_irec_update_extoffs(ifp
, erp_idx
+ 1, ext_diff
);
3102 ext_cnt
-= ext_diff
;
3106 erp
= xfs_iext_irec_new(ifp
, erp_idx
);
3107 ext_diff
= MIN(ext_cnt
, (int)XFS_LINEAR_EXTS
);
3108 erp
->er_extcount
= ext_diff
;
3109 xfs_iext_irec_update_extoffs(ifp
, erp_idx
+ 1, ext_diff
);
3110 ext_cnt
-= ext_diff
;
3113 /* Add nex2 extents back to indirection array */
3115 xfs_extnum_t ext_avail
;
3118 byte_diff
= nex2
* sizeof(xfs_bmbt_rec_t
);
3119 ext_avail
= XFS_LINEAR_EXTS
- erp
->er_extcount
;
3122 * If nex2 extents fit in the current page, append
3123 * nex2_ep after the new extents.
3125 if (nex2
<= ext_avail
) {
3126 i
= erp
->er_extcount
;
3129 * Otherwise, check if space is available in the
3132 else if ((erp_idx
< nlists
- 1) &&
3133 (nex2
<= (ext_avail
= XFS_LINEAR_EXTS
-
3134 ifp
->if_u1
.if_ext_irec
[erp_idx
+1].er_extcount
))) {
3137 /* Create a hole for nex2 extents */
3138 memmove(&erp
->er_extbuf
[nex2
], erp
->er_extbuf
,
3139 erp
->er_extcount
* sizeof(xfs_bmbt_rec_t
));
3142 * Final choice, create a new extent page for
3147 erp
= xfs_iext_irec_new(ifp
, erp_idx
);
3149 memmove(&erp
->er_extbuf
[i
], nex2_ep
, byte_diff
);
3151 erp
->er_extcount
+= nex2
;
3152 xfs_iext_irec_update_extoffs(ifp
, erp_idx
+ 1, nex2
);
3157 * This is called when the amount of space required for incore file
3158 * extents needs to be decreased. The ext_diff parameter stores the
3159 * number of extents to be removed and the idx parameter contains
3160 * the extent index where the extents will be removed from.
3162 * If the amount of space needed has decreased below the linear
3163 * limit, XFS_IEXT_BUFSZ, then switch to using the contiguous
3164 * extent array. Otherwise, use kmem_realloc() to adjust the
3165 * size to what is needed.
3169 xfs_inode_t
*ip
, /* incore inode pointer */
3170 xfs_extnum_t idx
, /* index to begin removing exts */
3171 int ext_diff
, /* number of extents to remove */
3172 int state
) /* type of extent conversion */
3174 xfs_ifork_t
*ifp
= (state
& BMAP_ATTRFORK
) ? ip
->i_afp
: &ip
->i_df
;
3175 xfs_extnum_t nextents
; /* number of extents in file */
3176 int new_size
; /* size of extents after removal */
3178 trace_xfs_iext_remove(ip
, idx
, state
, _RET_IP_
);
3180 ASSERT(ext_diff
> 0);
3181 nextents
= ifp
->if_bytes
/ (uint
)sizeof(xfs_bmbt_rec_t
);
3182 new_size
= (nextents
- ext_diff
) * sizeof(xfs_bmbt_rec_t
);
3184 if (new_size
== 0) {
3185 xfs_iext_destroy(ifp
);
3186 } else if (ifp
->if_flags
& XFS_IFEXTIREC
) {
3187 xfs_iext_remove_indirect(ifp
, idx
, ext_diff
);
3188 } else if (ifp
->if_real_bytes
) {
3189 xfs_iext_remove_direct(ifp
, idx
, ext_diff
);
3191 xfs_iext_remove_inline(ifp
, idx
, ext_diff
);
3193 ifp
->if_bytes
= new_size
;
3197 * This removes ext_diff extents from the inline buffer, beginning
3198 * at extent index idx.
3201 xfs_iext_remove_inline(
3202 xfs_ifork_t
*ifp
, /* inode fork pointer */
3203 xfs_extnum_t idx
, /* index to begin removing exts */
3204 int ext_diff
) /* number of extents to remove */
3206 int nextents
; /* number of extents in file */
3208 ASSERT(!(ifp
->if_flags
& XFS_IFEXTIREC
));
3209 ASSERT(idx
< XFS_INLINE_EXTS
);
3210 nextents
= ifp
->if_bytes
/ (uint
)sizeof(xfs_bmbt_rec_t
);
3211 ASSERT(((nextents
- ext_diff
) > 0) &&
3212 (nextents
- ext_diff
) < XFS_INLINE_EXTS
);
3214 if (idx
+ ext_diff
< nextents
) {
3215 memmove(&ifp
->if_u2
.if_inline_ext
[idx
],
3216 &ifp
->if_u2
.if_inline_ext
[idx
+ ext_diff
],
3217 (nextents
- (idx
+ ext_diff
)) *
3218 sizeof(xfs_bmbt_rec_t
));
3219 memset(&ifp
->if_u2
.if_inline_ext
[nextents
- ext_diff
],
3220 0, ext_diff
* sizeof(xfs_bmbt_rec_t
));
3222 memset(&ifp
->if_u2
.if_inline_ext
[idx
], 0,
3223 ext_diff
* sizeof(xfs_bmbt_rec_t
));
3228 * This removes ext_diff extents from a linear (direct) extent list,
3229 * beginning at extent index idx. If the extents are being removed
3230 * from the end of the list (ie. truncate) then we just need to re-
3231 * allocate the list to remove the extra space. Otherwise, if the
3232 * extents are being removed from the middle of the existing extent
3233 * entries, then we first need to move the extent records beginning
3234 * at idx + ext_diff up in the list to overwrite the records being
3235 * removed, then remove the extra space via kmem_realloc.
3238 xfs_iext_remove_direct(
3239 xfs_ifork_t
*ifp
, /* inode fork pointer */
3240 xfs_extnum_t idx
, /* index to begin removing exts */
3241 int ext_diff
) /* number of extents to remove */
3243 xfs_extnum_t nextents
; /* number of extents in file */
3244 int new_size
; /* size of extents after removal */
3246 ASSERT(!(ifp
->if_flags
& XFS_IFEXTIREC
));
3247 new_size
= ifp
->if_bytes
-
3248 (ext_diff
* sizeof(xfs_bmbt_rec_t
));
3249 nextents
= ifp
->if_bytes
/ (uint
)sizeof(xfs_bmbt_rec_t
);
3251 if (new_size
== 0) {
3252 xfs_iext_destroy(ifp
);
3255 /* Move extents up in the list (if needed) */
3256 if (idx
+ ext_diff
< nextents
) {
3257 memmove(&ifp
->if_u1
.if_extents
[idx
],
3258 &ifp
->if_u1
.if_extents
[idx
+ ext_diff
],
3259 (nextents
- (idx
+ ext_diff
)) *
3260 sizeof(xfs_bmbt_rec_t
));
3262 memset(&ifp
->if_u1
.if_extents
[nextents
- ext_diff
],
3263 0, ext_diff
* sizeof(xfs_bmbt_rec_t
));
3265 * Reallocate the direct extent list. If the extents
3266 * will fit inside the inode then xfs_iext_realloc_direct
3267 * will switch from direct to inline extent allocation
3270 xfs_iext_realloc_direct(ifp
, new_size
);
3271 ifp
->if_bytes
= new_size
;
3275 * This is called when incore extents are being removed from the
3276 * indirection array and the extents being removed span multiple extent
3277 * buffers. The idx parameter contains the file extent index where we
3278 * want to begin removing extents, and the count parameter contains
3279 * how many extents need to be removed.
3281 * |-------| |-------|
3282 * | nex1 | | | nex1 - number of extents before idx
3283 * |-------| | count |
3284 * | | | | count - number of extents being removed at idx
3285 * | count | |-------|
3286 * | | | nex2 | nex2 - number of extents after idx + count
3287 * |-------| |-------|
3290 xfs_iext_remove_indirect(
3291 xfs_ifork_t
*ifp
, /* inode fork pointer */
3292 xfs_extnum_t idx
, /* index to begin removing extents */
3293 int count
) /* number of extents to remove */
3295 xfs_ext_irec_t
*erp
; /* indirection array pointer */
3296 int erp_idx
= 0; /* indirection array index */
3297 xfs_extnum_t ext_cnt
; /* extents left to remove */
3298 xfs_extnum_t ext_diff
; /* extents to remove in current list */
3299 xfs_extnum_t nex1
; /* number of extents before idx */
3300 xfs_extnum_t nex2
; /* extents after idx + count */
3301 int page_idx
= idx
; /* index in target extent list */
3303 ASSERT(ifp
->if_flags
& XFS_IFEXTIREC
);
3304 erp
= xfs_iext_idx_to_irec(ifp
, &page_idx
, &erp_idx
, 0);
3305 ASSERT(erp
!= NULL
);
3309 nex2
= MAX((erp
->er_extcount
- (nex1
+ ext_cnt
)), 0);
3310 ext_diff
= MIN(ext_cnt
, (erp
->er_extcount
- nex1
));
3312 * Check for deletion of entire list;
3313 * xfs_iext_irec_remove() updates extent offsets.
3315 if (ext_diff
== erp
->er_extcount
) {
3316 xfs_iext_irec_remove(ifp
, erp_idx
);
3317 ext_cnt
-= ext_diff
;
3320 ASSERT(erp_idx
< ifp
->if_real_bytes
/
3322 erp
= &ifp
->if_u1
.if_ext_irec
[erp_idx
];
3329 /* Move extents up (if needed) */
3331 memmove(&erp
->er_extbuf
[nex1
],
3332 &erp
->er_extbuf
[nex1
+ ext_diff
],
3333 nex2
* sizeof(xfs_bmbt_rec_t
));
3335 /* Zero out rest of page */
3336 memset(&erp
->er_extbuf
[nex1
+ nex2
], 0, (XFS_IEXT_BUFSZ
-
3337 ((nex1
+ nex2
) * sizeof(xfs_bmbt_rec_t
))));
3338 /* Update remaining counters */
3339 erp
->er_extcount
-= ext_diff
;
3340 xfs_iext_irec_update_extoffs(ifp
, erp_idx
+ 1, -ext_diff
);
3341 ext_cnt
-= ext_diff
;
3346 ifp
->if_bytes
-= count
* sizeof(xfs_bmbt_rec_t
);
3347 xfs_iext_irec_compact(ifp
);
3351 * Create, destroy, or resize a linear (direct) block of extents.
3354 xfs_iext_realloc_direct(
3355 xfs_ifork_t
*ifp
, /* inode fork pointer */
3356 int new_size
) /* new size of extents */
3358 int rnew_size
; /* real new size of extents */
3360 rnew_size
= new_size
;
3362 ASSERT(!(ifp
->if_flags
& XFS_IFEXTIREC
) ||
3363 ((new_size
>= 0) && (new_size
<= XFS_IEXT_BUFSZ
) &&
3364 (new_size
!= ifp
->if_real_bytes
)));
3366 /* Free extent records */
3367 if (new_size
== 0) {
3368 xfs_iext_destroy(ifp
);
3370 /* Resize direct extent list and zero any new bytes */
3371 else if (ifp
->if_real_bytes
) {
3372 /* Check if extents will fit inside the inode */
3373 if (new_size
<= XFS_INLINE_EXTS
* sizeof(xfs_bmbt_rec_t
)) {
3374 xfs_iext_direct_to_inline(ifp
, new_size
/
3375 (uint
)sizeof(xfs_bmbt_rec_t
));
3376 ifp
->if_bytes
= new_size
;
3379 if (!is_power_of_2(new_size
)){
3380 rnew_size
= roundup_pow_of_two(new_size
);
3382 if (rnew_size
!= ifp
->if_real_bytes
) {
3383 ifp
->if_u1
.if_extents
=
3384 kmem_realloc(ifp
->if_u1
.if_extents
,
3386 ifp
->if_real_bytes
, KM_NOFS
);
3388 if (rnew_size
> ifp
->if_real_bytes
) {
3389 memset(&ifp
->if_u1
.if_extents
[ifp
->if_bytes
/
3390 (uint
)sizeof(xfs_bmbt_rec_t
)], 0,
3391 rnew_size
- ifp
->if_real_bytes
);
3395 * Switch from the inline extent buffer to a direct
3396 * extent list. Be sure to include the inline extent
3397 * bytes in new_size.
3400 new_size
+= ifp
->if_bytes
;
3401 if (!is_power_of_2(new_size
)) {
3402 rnew_size
= roundup_pow_of_two(new_size
);
3404 xfs_iext_inline_to_direct(ifp
, rnew_size
);
3406 ifp
->if_real_bytes
= rnew_size
;
3407 ifp
->if_bytes
= new_size
;
3411 * Switch from linear (direct) extent records to inline buffer.
3414 xfs_iext_direct_to_inline(
3415 xfs_ifork_t
*ifp
, /* inode fork pointer */
3416 xfs_extnum_t nextents
) /* number of extents in file */
3418 ASSERT(ifp
->if_flags
& XFS_IFEXTENTS
);
3419 ASSERT(nextents
<= XFS_INLINE_EXTS
);
3421 * The inline buffer was zeroed when we switched
3422 * from inline to direct extent allocation mode,
3423 * so we don't need to clear it here.
3425 memcpy(ifp
->if_u2
.if_inline_ext
, ifp
->if_u1
.if_extents
,
3426 nextents
* sizeof(xfs_bmbt_rec_t
));
3427 kmem_free(ifp
->if_u1
.if_extents
);
3428 ifp
->if_u1
.if_extents
= ifp
->if_u2
.if_inline_ext
;
3429 ifp
->if_real_bytes
= 0;
3433 * Switch from inline buffer to linear (direct) extent records.
3434 * new_size should already be rounded up to the next power of 2
3435 * by the caller (when appropriate), so use new_size as it is.
3436 * However, since new_size may be rounded up, we can't update
3437 * if_bytes here. It is the caller's responsibility to update
3438 * if_bytes upon return.
3441 xfs_iext_inline_to_direct(
3442 xfs_ifork_t
*ifp
, /* inode fork pointer */
3443 int new_size
) /* number of extents in file */
3445 ifp
->if_u1
.if_extents
= kmem_alloc(new_size
, KM_NOFS
);
3446 memset(ifp
->if_u1
.if_extents
, 0, new_size
);
3447 if (ifp
->if_bytes
) {
3448 memcpy(ifp
->if_u1
.if_extents
, ifp
->if_u2
.if_inline_ext
,
3450 memset(ifp
->if_u2
.if_inline_ext
, 0, XFS_INLINE_EXTS
*
3451 sizeof(xfs_bmbt_rec_t
));
3453 ifp
->if_real_bytes
= new_size
;
3457 * Resize an extent indirection array to new_size bytes.
3460 xfs_iext_realloc_indirect(
3461 xfs_ifork_t
*ifp
, /* inode fork pointer */
3462 int new_size
) /* new indirection array size */
3464 int nlists
; /* number of irec's (ex lists) */
3465 int size
; /* current indirection array size */
3467 ASSERT(ifp
->if_flags
& XFS_IFEXTIREC
);
3468 nlists
= ifp
->if_real_bytes
/ XFS_IEXT_BUFSZ
;
3469 size
= nlists
* sizeof(xfs_ext_irec_t
);
3470 ASSERT(ifp
->if_real_bytes
);
3471 ASSERT((new_size
>= 0) && (new_size
!= size
));
3472 if (new_size
== 0) {
3473 xfs_iext_destroy(ifp
);
3475 ifp
->if_u1
.if_ext_irec
= (xfs_ext_irec_t
*)
3476 kmem_realloc(ifp
->if_u1
.if_ext_irec
,
3477 new_size
, size
, KM_NOFS
);
3482 * Switch from indirection array to linear (direct) extent allocations.
3485 xfs_iext_indirect_to_direct(
3486 xfs_ifork_t
*ifp
) /* inode fork pointer */
3488 xfs_bmbt_rec_host_t
*ep
; /* extent record pointer */
3489 xfs_extnum_t nextents
; /* number of extents in file */
3490 int size
; /* size of file extents */
3492 ASSERT(ifp
->if_flags
& XFS_IFEXTIREC
);
3493 nextents
= ifp
->if_bytes
/ (uint
)sizeof(xfs_bmbt_rec_t
);
3494 ASSERT(nextents
<= XFS_LINEAR_EXTS
);
3495 size
= nextents
* sizeof(xfs_bmbt_rec_t
);
3497 xfs_iext_irec_compact_pages(ifp
);
3498 ASSERT(ifp
->if_real_bytes
== XFS_IEXT_BUFSZ
);
3500 ep
= ifp
->if_u1
.if_ext_irec
->er_extbuf
;
3501 kmem_free(ifp
->if_u1
.if_ext_irec
);
3502 ifp
->if_flags
&= ~XFS_IFEXTIREC
;
3503 ifp
->if_u1
.if_extents
= ep
;
3504 ifp
->if_bytes
= size
;
3505 if (nextents
< XFS_LINEAR_EXTS
) {
3506 xfs_iext_realloc_direct(ifp
, size
);
3511 * Free incore file extents.
3515 xfs_ifork_t
*ifp
) /* inode fork pointer */
3517 if (ifp
->if_flags
& XFS_IFEXTIREC
) {
3521 nlists
= ifp
->if_real_bytes
/ XFS_IEXT_BUFSZ
;
3522 for (erp_idx
= nlists
- 1; erp_idx
>= 0 ; erp_idx
--) {
3523 xfs_iext_irec_remove(ifp
, erp_idx
);
3525 ifp
->if_flags
&= ~XFS_IFEXTIREC
;
3526 } else if (ifp
->if_real_bytes
) {
3527 kmem_free(ifp
->if_u1
.if_extents
);
3528 } else if (ifp
->if_bytes
) {
3529 memset(ifp
->if_u2
.if_inline_ext
, 0, XFS_INLINE_EXTS
*
3530 sizeof(xfs_bmbt_rec_t
));
3532 ifp
->if_u1
.if_extents
= NULL
;
3533 ifp
->if_real_bytes
= 0;
3538 * Return a pointer to the extent record for file system block bno.
3540 xfs_bmbt_rec_host_t
* /* pointer to found extent record */
3541 xfs_iext_bno_to_ext(
3542 xfs_ifork_t
*ifp
, /* inode fork pointer */
3543 xfs_fileoff_t bno
, /* block number to search for */
3544 xfs_extnum_t
*idxp
) /* index of target extent */
3546 xfs_bmbt_rec_host_t
*base
; /* pointer to first extent */
3547 xfs_filblks_t blockcount
= 0; /* number of blocks in extent */
3548 xfs_bmbt_rec_host_t
*ep
= NULL
; /* pointer to target extent */
3549 xfs_ext_irec_t
*erp
= NULL
; /* indirection array pointer */
3550 int high
; /* upper boundary in search */
3551 xfs_extnum_t idx
= 0; /* index of target extent */
3552 int low
; /* lower boundary in search */
3553 xfs_extnum_t nextents
; /* number of file extents */
3554 xfs_fileoff_t startoff
= 0; /* start offset of extent */
3556 nextents
= ifp
->if_bytes
/ (uint
)sizeof(xfs_bmbt_rec_t
);
3557 if (nextents
== 0) {
3562 if (ifp
->if_flags
& XFS_IFEXTIREC
) {
3563 /* Find target extent list */
3565 erp
= xfs_iext_bno_to_irec(ifp
, bno
, &erp_idx
);
3566 base
= erp
->er_extbuf
;
3567 high
= erp
->er_extcount
- 1;
3569 base
= ifp
->if_u1
.if_extents
;
3570 high
= nextents
- 1;
3572 /* Binary search extent records */
3573 while (low
<= high
) {
3574 idx
= (low
+ high
) >> 1;
3576 startoff
= xfs_bmbt_get_startoff(ep
);
3577 blockcount
= xfs_bmbt_get_blockcount(ep
);
3578 if (bno
< startoff
) {
3580 } else if (bno
>= startoff
+ blockcount
) {
3583 /* Convert back to file-based extent index */
3584 if (ifp
->if_flags
& XFS_IFEXTIREC
) {
3585 idx
+= erp
->er_extoff
;
3591 /* Convert back to file-based extent index */
3592 if (ifp
->if_flags
& XFS_IFEXTIREC
) {
3593 idx
+= erp
->er_extoff
;
3595 if (bno
>= startoff
+ blockcount
) {
3596 if (++idx
== nextents
) {
3599 ep
= xfs_iext_get_ext(ifp
, idx
);
3607 * Return a pointer to the indirection array entry containing the
3608 * extent record for filesystem block bno. Store the index of the
3609 * target irec in *erp_idxp.
3611 xfs_ext_irec_t
* /* pointer to found extent record */
3612 xfs_iext_bno_to_irec(
3613 xfs_ifork_t
*ifp
, /* inode fork pointer */
3614 xfs_fileoff_t bno
, /* block number to search for */
3615 int *erp_idxp
) /* irec index of target ext list */
3617 xfs_ext_irec_t
*erp
= NULL
; /* indirection array pointer */
3618 xfs_ext_irec_t
*erp_next
; /* next indirection array entry */
3619 int erp_idx
; /* indirection array index */
3620 int nlists
; /* number of extent irec's (lists) */
3621 int high
; /* binary search upper limit */
3622 int low
; /* binary search lower limit */
3624 ASSERT(ifp
->if_flags
& XFS_IFEXTIREC
);
3625 nlists
= ifp
->if_real_bytes
/ XFS_IEXT_BUFSZ
;
3629 while (low
<= high
) {
3630 erp_idx
= (low
+ high
) >> 1;
3631 erp
= &ifp
->if_u1
.if_ext_irec
[erp_idx
];
3632 erp_next
= erp_idx
< nlists
- 1 ? erp
+ 1 : NULL
;
3633 if (bno
< xfs_bmbt_get_startoff(erp
->er_extbuf
)) {
3635 } else if (erp_next
&& bno
>=
3636 xfs_bmbt_get_startoff(erp_next
->er_extbuf
)) {
3642 *erp_idxp
= erp_idx
;
3647 * Return a pointer to the indirection array entry containing the
3648 * extent record at file extent index *idxp. Store the index of the
3649 * target irec in *erp_idxp and store the page index of the target
3650 * extent record in *idxp.
3653 xfs_iext_idx_to_irec(
3654 xfs_ifork_t
*ifp
, /* inode fork pointer */
3655 xfs_extnum_t
*idxp
, /* extent index (file -> page) */
3656 int *erp_idxp
, /* pointer to target irec */
3657 int realloc
) /* new bytes were just added */
3659 xfs_ext_irec_t
*prev
; /* pointer to previous irec */
3660 xfs_ext_irec_t
*erp
= NULL
; /* pointer to current irec */
3661 int erp_idx
; /* indirection array index */
3662 int nlists
; /* number of irec's (ex lists) */
3663 int high
; /* binary search upper limit */
3664 int low
; /* binary search lower limit */
3665 xfs_extnum_t page_idx
= *idxp
; /* extent index in target list */
3667 ASSERT(ifp
->if_flags
& XFS_IFEXTIREC
);
3668 ASSERT(page_idx
>= 0);
3669 ASSERT(page_idx
<= ifp
->if_bytes
/ sizeof(xfs_bmbt_rec_t
));
3670 ASSERT(page_idx
< ifp
->if_bytes
/ sizeof(xfs_bmbt_rec_t
) || realloc
);
3672 nlists
= ifp
->if_real_bytes
/ XFS_IEXT_BUFSZ
;
3677 /* Binary search extent irec's */
3678 while (low
<= high
) {
3679 erp_idx
= (low
+ high
) >> 1;
3680 erp
= &ifp
->if_u1
.if_ext_irec
[erp_idx
];
3681 prev
= erp_idx
> 0 ? erp
- 1 : NULL
;
3682 if (page_idx
< erp
->er_extoff
|| (page_idx
== erp
->er_extoff
&&
3683 realloc
&& prev
&& prev
->er_extcount
< XFS_LINEAR_EXTS
)) {
3685 } else if (page_idx
> erp
->er_extoff
+ erp
->er_extcount
||
3686 (page_idx
== erp
->er_extoff
+ erp
->er_extcount
&&
3689 } else if (page_idx
== erp
->er_extoff
+ erp
->er_extcount
&&
3690 erp
->er_extcount
== XFS_LINEAR_EXTS
) {
3694 erp
= erp_idx
< nlists
? erp
+ 1 : NULL
;
3697 page_idx
-= erp
->er_extoff
;
3702 *erp_idxp
= erp_idx
;
3707 * Allocate and initialize an indirection array once the space needed
3708 * for incore extents increases above XFS_IEXT_BUFSZ.
3712 xfs_ifork_t
*ifp
) /* inode fork pointer */
3714 xfs_ext_irec_t
*erp
; /* indirection array pointer */
3715 xfs_extnum_t nextents
; /* number of extents in file */
3717 ASSERT(!(ifp
->if_flags
& XFS_IFEXTIREC
));
3718 nextents
= ifp
->if_bytes
/ (uint
)sizeof(xfs_bmbt_rec_t
);
3719 ASSERT(nextents
<= XFS_LINEAR_EXTS
);
3721 erp
= kmem_alloc(sizeof(xfs_ext_irec_t
), KM_NOFS
);
3723 if (nextents
== 0) {
3724 ifp
->if_u1
.if_extents
= kmem_alloc(XFS_IEXT_BUFSZ
, KM_NOFS
);
3725 } else if (!ifp
->if_real_bytes
) {
3726 xfs_iext_inline_to_direct(ifp
, XFS_IEXT_BUFSZ
);
3727 } else if (ifp
->if_real_bytes
< XFS_IEXT_BUFSZ
) {
3728 xfs_iext_realloc_direct(ifp
, XFS_IEXT_BUFSZ
);
3730 erp
->er_extbuf
= ifp
->if_u1
.if_extents
;
3731 erp
->er_extcount
= nextents
;
3734 ifp
->if_flags
|= XFS_IFEXTIREC
;
3735 ifp
->if_real_bytes
= XFS_IEXT_BUFSZ
;
3736 ifp
->if_bytes
= nextents
* sizeof(xfs_bmbt_rec_t
);
3737 ifp
->if_u1
.if_ext_irec
= erp
;
3743 * Allocate and initialize a new entry in the indirection array.
3747 xfs_ifork_t
*ifp
, /* inode fork pointer */
3748 int erp_idx
) /* index for new irec */
3750 xfs_ext_irec_t
*erp
; /* indirection array pointer */
3751 int i
; /* loop counter */
3752 int nlists
; /* number of irec's (ex lists) */
3754 ASSERT(ifp
->if_flags
& XFS_IFEXTIREC
);
3755 nlists
= ifp
->if_real_bytes
/ XFS_IEXT_BUFSZ
;
3757 /* Resize indirection array */
3758 xfs_iext_realloc_indirect(ifp
, ++nlists
*
3759 sizeof(xfs_ext_irec_t
));
3761 * Move records down in the array so the
3762 * new page can use erp_idx.
3764 erp
= ifp
->if_u1
.if_ext_irec
;
3765 for (i
= nlists
- 1; i
> erp_idx
; i
--) {
3766 memmove(&erp
[i
], &erp
[i
-1], sizeof(xfs_ext_irec_t
));
3768 ASSERT(i
== erp_idx
);
3770 /* Initialize new extent record */
3771 erp
= ifp
->if_u1
.if_ext_irec
;
3772 erp
[erp_idx
].er_extbuf
= kmem_alloc(XFS_IEXT_BUFSZ
, KM_NOFS
);
3773 ifp
->if_real_bytes
= nlists
* XFS_IEXT_BUFSZ
;
3774 memset(erp
[erp_idx
].er_extbuf
, 0, XFS_IEXT_BUFSZ
);
3775 erp
[erp_idx
].er_extcount
= 0;
3776 erp
[erp_idx
].er_extoff
= erp_idx
> 0 ?
3777 erp
[erp_idx
-1].er_extoff
+ erp
[erp_idx
-1].er_extcount
: 0;
3778 return (&erp
[erp_idx
]);
3782 * Remove a record from the indirection array.
3785 xfs_iext_irec_remove(
3786 xfs_ifork_t
*ifp
, /* inode fork pointer */
3787 int erp_idx
) /* irec index to remove */
3789 xfs_ext_irec_t
*erp
; /* indirection array pointer */
3790 int i
; /* loop counter */
3791 int nlists
; /* number of irec's (ex lists) */
3793 ASSERT(ifp
->if_flags
& XFS_IFEXTIREC
);
3794 nlists
= ifp
->if_real_bytes
/ XFS_IEXT_BUFSZ
;
3795 erp
= &ifp
->if_u1
.if_ext_irec
[erp_idx
];
3796 if (erp
->er_extbuf
) {
3797 xfs_iext_irec_update_extoffs(ifp
, erp_idx
+ 1,
3799 kmem_free(erp
->er_extbuf
);
3801 /* Compact extent records */
3802 erp
= ifp
->if_u1
.if_ext_irec
;
3803 for (i
= erp_idx
; i
< nlists
- 1; i
++) {
3804 memmove(&erp
[i
], &erp
[i
+1], sizeof(xfs_ext_irec_t
));
3807 * Manually free the last extent record from the indirection
3808 * array. A call to xfs_iext_realloc_indirect() with a size
3809 * of zero would result in a call to xfs_iext_destroy() which
3810 * would in turn call this function again, creating a nasty
3814 xfs_iext_realloc_indirect(ifp
,
3815 nlists
* sizeof(xfs_ext_irec_t
));
3817 kmem_free(ifp
->if_u1
.if_ext_irec
);
3819 ifp
->if_real_bytes
= nlists
* XFS_IEXT_BUFSZ
;
3823 * This is called to clean up large amounts of unused memory allocated
3824 * by the indirection array. Before compacting anything though, verify
3825 * that the indirection array is still needed and switch back to the
3826 * linear extent list (or even the inline buffer) if possible. The
3827 * compaction policy is as follows:
3829 * Full Compaction: Extents fit into a single page (or inline buffer)
3830 * Partial Compaction: Extents occupy less than 50% of allocated space
3831 * No Compaction: Extents occupy at least 50% of allocated space
3834 xfs_iext_irec_compact(
3835 xfs_ifork_t
*ifp
) /* inode fork pointer */
3837 xfs_extnum_t nextents
; /* number of extents in file */
3838 int nlists
; /* number of irec's (ex lists) */
3840 ASSERT(ifp
->if_flags
& XFS_IFEXTIREC
);
3841 nlists
= ifp
->if_real_bytes
/ XFS_IEXT_BUFSZ
;
3842 nextents
= ifp
->if_bytes
/ (uint
)sizeof(xfs_bmbt_rec_t
);
3844 if (nextents
== 0) {
3845 xfs_iext_destroy(ifp
);
3846 } else if (nextents
<= XFS_INLINE_EXTS
) {
3847 xfs_iext_indirect_to_direct(ifp
);
3848 xfs_iext_direct_to_inline(ifp
, nextents
);
3849 } else if (nextents
<= XFS_LINEAR_EXTS
) {
3850 xfs_iext_indirect_to_direct(ifp
);
3851 } else if (nextents
< (nlists
* XFS_LINEAR_EXTS
) >> 1) {
3852 xfs_iext_irec_compact_pages(ifp
);
3857 * Combine extents from neighboring extent pages.
3860 xfs_iext_irec_compact_pages(
3861 xfs_ifork_t
*ifp
) /* inode fork pointer */
3863 xfs_ext_irec_t
*erp
, *erp_next
;/* pointers to irec entries */
3864 int erp_idx
= 0; /* indirection array index */
3865 int nlists
; /* number of irec's (ex lists) */
3867 ASSERT(ifp
->if_flags
& XFS_IFEXTIREC
);
3868 nlists
= ifp
->if_real_bytes
/ XFS_IEXT_BUFSZ
;
3869 while (erp_idx
< nlists
- 1) {
3870 erp
= &ifp
->if_u1
.if_ext_irec
[erp_idx
];
3872 if (erp_next
->er_extcount
<=
3873 (XFS_LINEAR_EXTS
- erp
->er_extcount
)) {
3874 memcpy(&erp
->er_extbuf
[erp
->er_extcount
],
3875 erp_next
->er_extbuf
, erp_next
->er_extcount
*
3876 sizeof(xfs_bmbt_rec_t
));
3877 erp
->er_extcount
+= erp_next
->er_extcount
;
3879 * Free page before removing extent record
3880 * so er_extoffs don't get modified in
3881 * xfs_iext_irec_remove.
3883 kmem_free(erp_next
->er_extbuf
);
3884 erp_next
->er_extbuf
= NULL
;
3885 xfs_iext_irec_remove(ifp
, erp_idx
+ 1);
3886 nlists
= ifp
->if_real_bytes
/ XFS_IEXT_BUFSZ
;
3894 * This is called to update the er_extoff field in the indirection
3895 * array when extents have been added or removed from one of the
3896 * extent lists. erp_idx contains the irec index to begin updating
3897 * at and ext_diff contains the number of extents that were added
3901 xfs_iext_irec_update_extoffs(
3902 xfs_ifork_t
*ifp
, /* inode fork pointer */
3903 int erp_idx
, /* irec index to update */
3904 int ext_diff
) /* number of new extents */
3906 int i
; /* loop counter */
3907 int nlists
; /* number of irec's (ex lists */
3909 ASSERT(ifp
->if_flags
& XFS_IFEXTIREC
);
3910 nlists
= ifp
->if_real_bytes
/ XFS_IEXT_BUFSZ
;
3911 for (i
= erp_idx
; i
< nlists
; i
++) {
3912 ifp
->if_u1
.if_ext_irec
[i
].er_extoff
+= ext_diff
;
3917 * Test whether it is appropriate to check an inode for and free post EOF
3918 * blocks. The 'force' parameter determines whether we should also consider
3919 * regular files that are marked preallocated or append-only.
3922 xfs_can_free_eofblocks(struct xfs_inode
*ip
, bool force
)
3924 /* prealloc/delalloc exists only on regular files */
3925 if (!S_ISREG(ip
->i_d
.di_mode
))
3929 * Zero sized files with no cached pages and delalloc blocks will not
3930 * have speculative prealloc/delalloc blocks to remove.
3932 if (VFS_I(ip
)->i_size
== 0 &&
3933 VN_CACHED(VFS_I(ip
)) == 0 &&
3934 ip
->i_delayed_blks
== 0)
3937 /* If we haven't read in the extent list, then don't do it now. */
3938 if (!(ip
->i_df
.if_flags
& XFS_IFEXTENTS
))
3942 * Do not free real preallocated or append-only files unless the file
3943 * has delalloc blocks and we are forced to remove them.
3945 if (ip
->i_d
.di_flags
& (XFS_DIFLAG_PREALLOC
| XFS_DIFLAG_APPEND
))
3946 if (!force
|| ip
->i_delayed_blks
== 0)