]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - fs/xfs/xfs_inode.c
Merge branch 'for-3.12' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq
[mirror_ubuntu-bionic-kernel.git] / fs / xfs / xfs_inode.c
1 /*
2 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 */
18 #include <linux/log2.h>
19
20 #include "xfs.h"
21 #include "xfs_fs.h"
22 #include "xfs_types.h"
23 #include "xfs_log.h"
24 #include "xfs_inum.h"
25 #include "xfs_trans.h"
26 #include "xfs_trans_priv.h"
27 #include "xfs_sb.h"
28 #include "xfs_ag.h"
29 #include "xfs_mount.h"
30 #include "xfs_bmap_btree.h"
31 #include "xfs_alloc_btree.h"
32 #include "xfs_ialloc_btree.h"
33 #include "xfs_attr_sf.h"
34 #include "xfs_dinode.h"
35 #include "xfs_inode.h"
36 #include "xfs_buf_item.h"
37 #include "xfs_inode_item.h"
38 #include "xfs_btree.h"
39 #include "xfs_alloc.h"
40 #include "xfs_ialloc.h"
41 #include "xfs_bmap.h"
42 #include "xfs_error.h"
43 #include "xfs_utils.h"
44 #include "xfs_quota.h"
45 #include "xfs_filestream.h"
46 #include "xfs_vnodeops.h"
47 #include "xfs_cksum.h"
48 #include "xfs_trace.h"
49 #include "xfs_icache.h"
50
51 kmem_zone_t *xfs_ifork_zone;
52 kmem_zone_t *xfs_inode_zone;
53
54 /*
55 * Used in xfs_itruncate_extents(). This is the maximum number of extents
56 * freed from a file in a single transaction.
57 */
58 #define XFS_ITRUNC_MAX_EXTENTS 2
59
60 STATIC int xfs_iflush_int(xfs_inode_t *, xfs_buf_t *);
61 STATIC int xfs_iformat_local(xfs_inode_t *, xfs_dinode_t *, int, int);
62 STATIC int xfs_iformat_extents(xfs_inode_t *, xfs_dinode_t *, int);
63 STATIC int xfs_iformat_btree(xfs_inode_t *, xfs_dinode_t *, int);
64
65 /*
66 * helper function to extract extent size hint from inode
67 */
68 xfs_extlen_t
69 xfs_get_extsz_hint(
70 struct xfs_inode *ip)
71 {
72 if ((ip->i_d.di_flags & XFS_DIFLAG_EXTSIZE) && ip->i_d.di_extsize)
73 return ip->i_d.di_extsize;
74 if (XFS_IS_REALTIME_INODE(ip))
75 return ip->i_mount->m_sb.sb_rextsize;
76 return 0;
77 }
78
79 /*
80 * This is a wrapper routine around the xfs_ilock() routine used to centralize
81 * some grungy code. It is used in places that wish to lock the inode solely
82 * for reading the extents. The reason these places can't just call
83 * xfs_ilock(SHARED) is that the inode lock also guards to bringing in of the
84 * extents from disk for a file in b-tree format. If the inode is in b-tree
85 * format, then we need to lock the inode exclusively until the extents are read
86 * in. Locking it exclusively all the time would limit our parallelism
87 * unnecessarily, though. What we do instead is check to see if the extents
88 * have been read in yet, and only lock the inode exclusively if they have not.
89 *
90 * The function returns a value which should be given to the corresponding
91 * xfs_iunlock_map_shared(). This value is the mode in which the lock was
92 * actually taken.
93 */
94 uint
95 xfs_ilock_map_shared(
96 xfs_inode_t *ip)
97 {
98 uint lock_mode;
99
100 if ((ip->i_d.di_format == XFS_DINODE_FMT_BTREE) &&
101 ((ip->i_df.if_flags & XFS_IFEXTENTS) == 0)) {
102 lock_mode = XFS_ILOCK_EXCL;
103 } else {
104 lock_mode = XFS_ILOCK_SHARED;
105 }
106
107 xfs_ilock(ip, lock_mode);
108
109 return lock_mode;
110 }
111
112 /*
113 * This is simply the unlock routine to go with xfs_ilock_map_shared().
114 * All it does is call xfs_iunlock() with the given lock_mode.
115 */
116 void
117 xfs_iunlock_map_shared(
118 xfs_inode_t *ip,
119 unsigned int lock_mode)
120 {
121 xfs_iunlock(ip, lock_mode);
122 }
123
124 /*
125 * The xfs inode contains 2 locks: a multi-reader lock called the
126 * i_iolock and a multi-reader lock called the i_lock. This routine
127 * allows either or both of the locks to be obtained.
128 *
129 * The 2 locks should always be ordered so that the IO lock is
130 * obtained first in order to prevent deadlock.
131 *
132 * ip -- the inode being locked
133 * lock_flags -- this parameter indicates the inode's locks
134 * to be locked. It can be:
135 * XFS_IOLOCK_SHARED,
136 * XFS_IOLOCK_EXCL,
137 * XFS_ILOCK_SHARED,
138 * XFS_ILOCK_EXCL,
139 * XFS_IOLOCK_SHARED | XFS_ILOCK_SHARED,
140 * XFS_IOLOCK_SHARED | XFS_ILOCK_EXCL,
141 * XFS_IOLOCK_EXCL | XFS_ILOCK_SHARED,
142 * XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL
143 */
144 void
145 xfs_ilock(
146 xfs_inode_t *ip,
147 uint lock_flags)
148 {
149 trace_xfs_ilock(ip, lock_flags, _RET_IP_);
150
151 /*
152 * You can't set both SHARED and EXCL for the same lock,
153 * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
154 * and XFS_ILOCK_EXCL are valid values to set in lock_flags.
155 */
156 ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=
157 (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
158 ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
159 (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
160 ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_DEP_MASK)) == 0);
161
162 if (lock_flags & XFS_IOLOCK_EXCL)
163 mrupdate_nested(&ip->i_iolock, XFS_IOLOCK_DEP(lock_flags));
164 else if (lock_flags & XFS_IOLOCK_SHARED)
165 mraccess_nested(&ip->i_iolock, XFS_IOLOCK_DEP(lock_flags));
166
167 if (lock_flags & XFS_ILOCK_EXCL)
168 mrupdate_nested(&ip->i_lock, XFS_ILOCK_DEP(lock_flags));
169 else if (lock_flags & XFS_ILOCK_SHARED)
170 mraccess_nested(&ip->i_lock, XFS_ILOCK_DEP(lock_flags));
171 }
172
173 /*
174 * This is just like xfs_ilock(), except that the caller
175 * is guaranteed not to sleep. It returns 1 if it gets
176 * the requested locks and 0 otherwise. If the IO lock is
177 * obtained but the inode lock cannot be, then the IO lock
178 * is dropped before returning.
179 *
180 * ip -- the inode being locked
181 * lock_flags -- this parameter indicates the inode's locks to be
182 * to be locked. See the comment for xfs_ilock() for a list
183 * of valid values.
184 */
185 int
186 xfs_ilock_nowait(
187 xfs_inode_t *ip,
188 uint lock_flags)
189 {
190 trace_xfs_ilock_nowait(ip, lock_flags, _RET_IP_);
191
192 /*
193 * You can't set both SHARED and EXCL for the same lock,
194 * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
195 * and XFS_ILOCK_EXCL are valid values to set in lock_flags.
196 */
197 ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=
198 (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
199 ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
200 (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
201 ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_DEP_MASK)) == 0);
202
203 if (lock_flags & XFS_IOLOCK_EXCL) {
204 if (!mrtryupdate(&ip->i_iolock))
205 goto out;
206 } else if (lock_flags & XFS_IOLOCK_SHARED) {
207 if (!mrtryaccess(&ip->i_iolock))
208 goto out;
209 }
210 if (lock_flags & XFS_ILOCK_EXCL) {
211 if (!mrtryupdate(&ip->i_lock))
212 goto out_undo_iolock;
213 } else if (lock_flags & XFS_ILOCK_SHARED) {
214 if (!mrtryaccess(&ip->i_lock))
215 goto out_undo_iolock;
216 }
217 return 1;
218
219 out_undo_iolock:
220 if (lock_flags & XFS_IOLOCK_EXCL)
221 mrunlock_excl(&ip->i_iolock);
222 else if (lock_flags & XFS_IOLOCK_SHARED)
223 mrunlock_shared(&ip->i_iolock);
224 out:
225 return 0;
226 }
227
228 /*
229 * xfs_iunlock() is used to drop the inode locks acquired with
230 * xfs_ilock() and xfs_ilock_nowait(). The caller must pass
231 * in the flags given to xfs_ilock() or xfs_ilock_nowait() so
232 * that we know which locks to drop.
233 *
234 * ip -- the inode being unlocked
235 * lock_flags -- this parameter indicates the inode's locks to be
236 * to be unlocked. See the comment for xfs_ilock() for a list
237 * of valid values for this parameter.
238 *
239 */
240 void
241 xfs_iunlock(
242 xfs_inode_t *ip,
243 uint lock_flags)
244 {
245 /*
246 * You can't set both SHARED and EXCL for the same lock,
247 * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
248 * and XFS_ILOCK_EXCL are valid values to set in lock_flags.
249 */
250 ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=
251 (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
252 ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
253 (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
254 ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_DEP_MASK)) == 0);
255 ASSERT(lock_flags != 0);
256
257 if (lock_flags & XFS_IOLOCK_EXCL)
258 mrunlock_excl(&ip->i_iolock);
259 else if (lock_flags & XFS_IOLOCK_SHARED)
260 mrunlock_shared(&ip->i_iolock);
261
262 if (lock_flags & XFS_ILOCK_EXCL)
263 mrunlock_excl(&ip->i_lock);
264 else if (lock_flags & XFS_ILOCK_SHARED)
265 mrunlock_shared(&ip->i_lock);
266
267 trace_xfs_iunlock(ip, lock_flags, _RET_IP_);
268 }
269
270 /*
271 * give up write locks. the i/o lock cannot be held nested
272 * if it is being demoted.
273 */
274 void
275 xfs_ilock_demote(
276 xfs_inode_t *ip,
277 uint lock_flags)
278 {
279 ASSERT(lock_flags & (XFS_IOLOCK_EXCL|XFS_ILOCK_EXCL));
280 ASSERT((lock_flags & ~(XFS_IOLOCK_EXCL|XFS_ILOCK_EXCL)) == 0);
281
282 if (lock_flags & XFS_ILOCK_EXCL)
283 mrdemote(&ip->i_lock);
284 if (lock_flags & XFS_IOLOCK_EXCL)
285 mrdemote(&ip->i_iolock);
286
287 trace_xfs_ilock_demote(ip, lock_flags, _RET_IP_);
288 }
289
290 #if defined(DEBUG) || defined(XFS_WARN)
291 int
292 xfs_isilocked(
293 xfs_inode_t *ip,
294 uint lock_flags)
295 {
296 if (lock_flags & (XFS_ILOCK_EXCL|XFS_ILOCK_SHARED)) {
297 if (!(lock_flags & XFS_ILOCK_SHARED))
298 return !!ip->i_lock.mr_writer;
299 return rwsem_is_locked(&ip->i_lock.mr_lock);
300 }
301
302 if (lock_flags & (XFS_IOLOCK_EXCL|XFS_IOLOCK_SHARED)) {
303 if (!(lock_flags & XFS_IOLOCK_SHARED))
304 return !!ip->i_iolock.mr_writer;
305 return rwsem_is_locked(&ip->i_iolock.mr_lock);
306 }
307
308 ASSERT(0);
309 return 0;
310 }
311 #endif
312
313 void
314 __xfs_iflock(
315 struct xfs_inode *ip)
316 {
317 wait_queue_head_t *wq = bit_waitqueue(&ip->i_flags, __XFS_IFLOCK_BIT);
318 DEFINE_WAIT_BIT(wait, &ip->i_flags, __XFS_IFLOCK_BIT);
319
320 do {
321 prepare_to_wait_exclusive(wq, &wait.wait, TASK_UNINTERRUPTIBLE);
322 if (xfs_isiflocked(ip))
323 io_schedule();
324 } while (!xfs_iflock_nowait(ip));
325
326 finish_wait(wq, &wait.wait);
327 }
328
329 #ifdef DEBUG
330 /*
331 * Make sure that the extents in the given memory buffer
332 * are valid.
333 */
334 STATIC void
335 xfs_validate_extents(
336 xfs_ifork_t *ifp,
337 int nrecs,
338 xfs_exntfmt_t fmt)
339 {
340 xfs_bmbt_irec_t irec;
341 xfs_bmbt_rec_host_t rec;
342 int i;
343
344 for (i = 0; i < nrecs; i++) {
345 xfs_bmbt_rec_host_t *ep = xfs_iext_get_ext(ifp, i);
346 rec.l0 = get_unaligned(&ep->l0);
347 rec.l1 = get_unaligned(&ep->l1);
348 xfs_bmbt_get_all(&rec, &irec);
349 if (fmt == XFS_EXTFMT_NOSTATE)
350 ASSERT(irec.br_state == XFS_EXT_NORM);
351 }
352 }
353 #else /* DEBUG */
354 #define xfs_validate_extents(ifp, nrecs, fmt)
355 #endif /* DEBUG */
356
357 /*
358 * Check that none of the inode's in the buffer have a next
359 * unlinked field of 0.
360 */
361 #if defined(DEBUG)
362 void
363 xfs_inobp_check(
364 xfs_mount_t *mp,
365 xfs_buf_t *bp)
366 {
367 int i;
368 int j;
369 xfs_dinode_t *dip;
370
371 j = mp->m_inode_cluster_size >> mp->m_sb.sb_inodelog;
372
373 for (i = 0; i < j; i++) {
374 dip = (xfs_dinode_t *)xfs_buf_offset(bp,
375 i * mp->m_sb.sb_inodesize);
376 if (!dip->di_next_unlinked) {
377 xfs_alert(mp,
378 "Detected bogus zero next_unlinked field in incore inode buffer 0x%p.",
379 bp);
380 ASSERT(dip->di_next_unlinked);
381 }
382 }
383 }
384 #endif
385
386 static void
387 xfs_inode_buf_verify(
388 struct xfs_buf *bp)
389 {
390 struct xfs_mount *mp = bp->b_target->bt_mount;
391 int i;
392 int ni;
393
394 /*
395 * Validate the magic number and version of every inode in the buffer
396 */
397 ni = XFS_BB_TO_FSB(mp, bp->b_length) * mp->m_sb.sb_inopblock;
398 for (i = 0; i < ni; i++) {
399 int di_ok;
400 xfs_dinode_t *dip;
401
402 dip = (struct xfs_dinode *)xfs_buf_offset(bp,
403 (i << mp->m_sb.sb_inodelog));
404 di_ok = dip->di_magic == cpu_to_be16(XFS_DINODE_MAGIC) &&
405 XFS_DINODE_GOOD_VERSION(dip->di_version);
406 if (unlikely(XFS_TEST_ERROR(!di_ok, mp,
407 XFS_ERRTAG_ITOBP_INOTOBP,
408 XFS_RANDOM_ITOBP_INOTOBP))) {
409 xfs_buf_ioerror(bp, EFSCORRUPTED);
410 XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_HIGH,
411 mp, dip);
412 #ifdef DEBUG
413 xfs_emerg(mp,
414 "bad inode magic/vsn daddr %lld #%d (magic=%x)",
415 (unsigned long long)bp->b_bn, i,
416 be16_to_cpu(dip->di_magic));
417 ASSERT(0);
418 #endif
419 }
420 }
421 xfs_inobp_check(mp, bp);
422 }
423
424
425 static void
426 xfs_inode_buf_read_verify(
427 struct xfs_buf *bp)
428 {
429 xfs_inode_buf_verify(bp);
430 }
431
432 static void
433 xfs_inode_buf_write_verify(
434 struct xfs_buf *bp)
435 {
436 xfs_inode_buf_verify(bp);
437 }
438
439 const struct xfs_buf_ops xfs_inode_buf_ops = {
440 .verify_read = xfs_inode_buf_read_verify,
441 .verify_write = xfs_inode_buf_write_verify,
442 };
443
444
445 /*
446 * This routine is called to map an inode to the buffer containing the on-disk
447 * version of the inode. It returns a pointer to the buffer containing the
448 * on-disk inode in the bpp parameter, and in the dipp parameter it returns a
449 * pointer to the on-disk inode within that buffer.
450 *
451 * If a non-zero error is returned, then the contents of bpp and dipp are
452 * undefined.
453 */
454 int
455 xfs_imap_to_bp(
456 struct xfs_mount *mp,
457 struct xfs_trans *tp,
458 struct xfs_imap *imap,
459 struct xfs_dinode **dipp,
460 struct xfs_buf **bpp,
461 uint buf_flags,
462 uint iget_flags)
463 {
464 struct xfs_buf *bp;
465 int error;
466
467 buf_flags |= XBF_UNMAPPED;
468 error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, imap->im_blkno,
469 (int)imap->im_len, buf_flags, &bp,
470 &xfs_inode_buf_ops);
471 if (error) {
472 if (error == EAGAIN) {
473 ASSERT(buf_flags & XBF_TRYLOCK);
474 return error;
475 }
476
477 if (error == EFSCORRUPTED &&
478 (iget_flags & XFS_IGET_UNTRUSTED))
479 return XFS_ERROR(EINVAL);
480
481 xfs_warn(mp, "%s: xfs_trans_read_buf() returned error %d.",
482 __func__, error);
483 return error;
484 }
485
486 *bpp = bp;
487 *dipp = (struct xfs_dinode *)xfs_buf_offset(bp, imap->im_boffset);
488 return 0;
489 }
490
491 /*
492 * Move inode type and inode format specific information from the
493 * on-disk inode to the in-core inode. For fifos, devs, and sockets
494 * this means set if_rdev to the proper value. For files, directories,
495 * and symlinks this means to bring in the in-line data or extent
496 * pointers. For a file in B-tree format, only the root is immediately
497 * brought in-core. The rest will be in-lined in if_extents when it
498 * is first referenced (see xfs_iread_extents()).
499 */
500 STATIC int
501 xfs_iformat(
502 xfs_inode_t *ip,
503 xfs_dinode_t *dip)
504 {
505 xfs_attr_shortform_t *atp;
506 int size;
507 int error = 0;
508 xfs_fsize_t di_size;
509
510 if (unlikely(be32_to_cpu(dip->di_nextents) +
511 be16_to_cpu(dip->di_anextents) >
512 be64_to_cpu(dip->di_nblocks))) {
513 xfs_warn(ip->i_mount,
514 "corrupt dinode %Lu, extent total = %d, nblocks = %Lu.",
515 (unsigned long long)ip->i_ino,
516 (int)(be32_to_cpu(dip->di_nextents) +
517 be16_to_cpu(dip->di_anextents)),
518 (unsigned long long)
519 be64_to_cpu(dip->di_nblocks));
520 XFS_CORRUPTION_ERROR("xfs_iformat(1)", XFS_ERRLEVEL_LOW,
521 ip->i_mount, dip);
522 return XFS_ERROR(EFSCORRUPTED);
523 }
524
525 if (unlikely(dip->di_forkoff > ip->i_mount->m_sb.sb_inodesize)) {
526 xfs_warn(ip->i_mount, "corrupt dinode %Lu, forkoff = 0x%x.",
527 (unsigned long long)ip->i_ino,
528 dip->di_forkoff);
529 XFS_CORRUPTION_ERROR("xfs_iformat(2)", XFS_ERRLEVEL_LOW,
530 ip->i_mount, dip);
531 return XFS_ERROR(EFSCORRUPTED);
532 }
533
534 if (unlikely((ip->i_d.di_flags & XFS_DIFLAG_REALTIME) &&
535 !ip->i_mount->m_rtdev_targp)) {
536 xfs_warn(ip->i_mount,
537 "corrupt dinode %Lu, has realtime flag set.",
538 ip->i_ino);
539 XFS_CORRUPTION_ERROR("xfs_iformat(realtime)",
540 XFS_ERRLEVEL_LOW, ip->i_mount, dip);
541 return XFS_ERROR(EFSCORRUPTED);
542 }
543
544 switch (ip->i_d.di_mode & S_IFMT) {
545 case S_IFIFO:
546 case S_IFCHR:
547 case S_IFBLK:
548 case S_IFSOCK:
549 if (unlikely(dip->di_format != XFS_DINODE_FMT_DEV)) {
550 XFS_CORRUPTION_ERROR("xfs_iformat(3)", XFS_ERRLEVEL_LOW,
551 ip->i_mount, dip);
552 return XFS_ERROR(EFSCORRUPTED);
553 }
554 ip->i_d.di_size = 0;
555 ip->i_df.if_u2.if_rdev = xfs_dinode_get_rdev(dip);
556 break;
557
558 case S_IFREG:
559 case S_IFLNK:
560 case S_IFDIR:
561 switch (dip->di_format) {
562 case XFS_DINODE_FMT_LOCAL:
563 /*
564 * no local regular files yet
565 */
566 if (unlikely(S_ISREG(be16_to_cpu(dip->di_mode)))) {
567 xfs_warn(ip->i_mount,
568 "corrupt inode %Lu (local format for regular file).",
569 (unsigned long long) ip->i_ino);
570 XFS_CORRUPTION_ERROR("xfs_iformat(4)",
571 XFS_ERRLEVEL_LOW,
572 ip->i_mount, dip);
573 return XFS_ERROR(EFSCORRUPTED);
574 }
575
576 di_size = be64_to_cpu(dip->di_size);
577 if (unlikely(di_size > XFS_DFORK_DSIZE(dip, ip->i_mount))) {
578 xfs_warn(ip->i_mount,
579 "corrupt inode %Lu (bad size %Ld for local inode).",
580 (unsigned long long) ip->i_ino,
581 (long long) di_size);
582 XFS_CORRUPTION_ERROR("xfs_iformat(5)",
583 XFS_ERRLEVEL_LOW,
584 ip->i_mount, dip);
585 return XFS_ERROR(EFSCORRUPTED);
586 }
587
588 size = (int)di_size;
589 error = xfs_iformat_local(ip, dip, XFS_DATA_FORK, size);
590 break;
591 case XFS_DINODE_FMT_EXTENTS:
592 error = xfs_iformat_extents(ip, dip, XFS_DATA_FORK);
593 break;
594 case XFS_DINODE_FMT_BTREE:
595 error = xfs_iformat_btree(ip, dip, XFS_DATA_FORK);
596 break;
597 default:
598 XFS_ERROR_REPORT("xfs_iformat(6)", XFS_ERRLEVEL_LOW,
599 ip->i_mount);
600 return XFS_ERROR(EFSCORRUPTED);
601 }
602 break;
603
604 default:
605 XFS_ERROR_REPORT("xfs_iformat(7)", XFS_ERRLEVEL_LOW, ip->i_mount);
606 return XFS_ERROR(EFSCORRUPTED);
607 }
608 if (error) {
609 return error;
610 }
611 if (!XFS_DFORK_Q(dip))
612 return 0;
613
614 ASSERT(ip->i_afp == NULL);
615 ip->i_afp = kmem_zone_zalloc(xfs_ifork_zone, KM_SLEEP | KM_NOFS);
616
617 switch (dip->di_aformat) {
618 case XFS_DINODE_FMT_LOCAL:
619 atp = (xfs_attr_shortform_t *)XFS_DFORK_APTR(dip);
620 size = be16_to_cpu(atp->hdr.totsize);
621
622 if (unlikely(size < sizeof(struct xfs_attr_sf_hdr))) {
623 xfs_warn(ip->i_mount,
624 "corrupt inode %Lu (bad attr fork size %Ld).",
625 (unsigned long long) ip->i_ino,
626 (long long) size);
627 XFS_CORRUPTION_ERROR("xfs_iformat(8)",
628 XFS_ERRLEVEL_LOW,
629 ip->i_mount, dip);
630 return XFS_ERROR(EFSCORRUPTED);
631 }
632
633 error = xfs_iformat_local(ip, dip, XFS_ATTR_FORK, size);
634 break;
635 case XFS_DINODE_FMT_EXTENTS:
636 error = xfs_iformat_extents(ip, dip, XFS_ATTR_FORK);
637 break;
638 case XFS_DINODE_FMT_BTREE:
639 error = xfs_iformat_btree(ip, dip, XFS_ATTR_FORK);
640 break;
641 default:
642 error = XFS_ERROR(EFSCORRUPTED);
643 break;
644 }
645 if (error) {
646 kmem_zone_free(xfs_ifork_zone, ip->i_afp);
647 ip->i_afp = NULL;
648 xfs_idestroy_fork(ip, XFS_DATA_FORK);
649 }
650 return error;
651 }
652
653 /*
654 * The file is in-lined in the on-disk inode.
655 * If it fits into if_inline_data, then copy
656 * it there, otherwise allocate a buffer for it
657 * and copy the data there. Either way, set
658 * if_data to point at the data.
659 * If we allocate a buffer for the data, make
660 * sure that its size is a multiple of 4 and
661 * record the real size in i_real_bytes.
662 */
663 STATIC int
664 xfs_iformat_local(
665 xfs_inode_t *ip,
666 xfs_dinode_t *dip,
667 int whichfork,
668 int size)
669 {
670 xfs_ifork_t *ifp;
671 int real_size;
672
673 /*
674 * If the size is unreasonable, then something
675 * is wrong and we just bail out rather than crash in
676 * kmem_alloc() or memcpy() below.
677 */
678 if (unlikely(size > XFS_DFORK_SIZE(dip, ip->i_mount, whichfork))) {
679 xfs_warn(ip->i_mount,
680 "corrupt inode %Lu (bad size %d for local fork, size = %d).",
681 (unsigned long long) ip->i_ino, size,
682 XFS_DFORK_SIZE(dip, ip->i_mount, whichfork));
683 XFS_CORRUPTION_ERROR("xfs_iformat_local", XFS_ERRLEVEL_LOW,
684 ip->i_mount, dip);
685 return XFS_ERROR(EFSCORRUPTED);
686 }
687 ifp = XFS_IFORK_PTR(ip, whichfork);
688 real_size = 0;
689 if (size == 0)
690 ifp->if_u1.if_data = NULL;
691 else if (size <= sizeof(ifp->if_u2.if_inline_data))
692 ifp->if_u1.if_data = ifp->if_u2.if_inline_data;
693 else {
694 real_size = roundup(size, 4);
695 ifp->if_u1.if_data = kmem_alloc(real_size, KM_SLEEP | KM_NOFS);
696 }
697 ifp->if_bytes = size;
698 ifp->if_real_bytes = real_size;
699 if (size)
700 memcpy(ifp->if_u1.if_data, XFS_DFORK_PTR(dip, whichfork), size);
701 ifp->if_flags &= ~XFS_IFEXTENTS;
702 ifp->if_flags |= XFS_IFINLINE;
703 return 0;
704 }
705
706 /*
707 * The file consists of a set of extents all
708 * of which fit into the on-disk inode.
709 * If there are few enough extents to fit into
710 * the if_inline_ext, then copy them there.
711 * Otherwise allocate a buffer for them and copy
712 * them into it. Either way, set if_extents
713 * to point at the extents.
714 */
715 STATIC int
716 xfs_iformat_extents(
717 xfs_inode_t *ip,
718 xfs_dinode_t *dip,
719 int whichfork)
720 {
721 xfs_bmbt_rec_t *dp;
722 xfs_ifork_t *ifp;
723 int nex;
724 int size;
725 int i;
726
727 ifp = XFS_IFORK_PTR(ip, whichfork);
728 nex = XFS_DFORK_NEXTENTS(dip, whichfork);
729 size = nex * (uint)sizeof(xfs_bmbt_rec_t);
730
731 /*
732 * If the number of extents is unreasonable, then something
733 * is wrong and we just bail out rather than crash in
734 * kmem_alloc() or memcpy() below.
735 */
736 if (unlikely(size < 0 || size > XFS_DFORK_SIZE(dip, ip->i_mount, whichfork))) {
737 xfs_warn(ip->i_mount, "corrupt inode %Lu ((a)extents = %d).",
738 (unsigned long long) ip->i_ino, nex);
739 XFS_CORRUPTION_ERROR("xfs_iformat_extents(1)", XFS_ERRLEVEL_LOW,
740 ip->i_mount, dip);
741 return XFS_ERROR(EFSCORRUPTED);
742 }
743
744 ifp->if_real_bytes = 0;
745 if (nex == 0)
746 ifp->if_u1.if_extents = NULL;
747 else if (nex <= XFS_INLINE_EXTS)
748 ifp->if_u1.if_extents = ifp->if_u2.if_inline_ext;
749 else
750 xfs_iext_add(ifp, 0, nex);
751
752 ifp->if_bytes = size;
753 if (size) {
754 dp = (xfs_bmbt_rec_t *) XFS_DFORK_PTR(dip, whichfork);
755 xfs_validate_extents(ifp, nex, XFS_EXTFMT_INODE(ip));
756 for (i = 0; i < nex; i++, dp++) {
757 xfs_bmbt_rec_host_t *ep = xfs_iext_get_ext(ifp, i);
758 ep->l0 = get_unaligned_be64(&dp->l0);
759 ep->l1 = get_unaligned_be64(&dp->l1);
760 }
761 XFS_BMAP_TRACE_EXLIST(ip, nex, whichfork);
762 if (whichfork != XFS_DATA_FORK ||
763 XFS_EXTFMT_INODE(ip) == XFS_EXTFMT_NOSTATE)
764 if (unlikely(xfs_check_nostate_extents(
765 ifp, 0, nex))) {
766 XFS_ERROR_REPORT("xfs_iformat_extents(2)",
767 XFS_ERRLEVEL_LOW,
768 ip->i_mount);
769 return XFS_ERROR(EFSCORRUPTED);
770 }
771 }
772 ifp->if_flags |= XFS_IFEXTENTS;
773 return 0;
774 }
775
776 /*
777 * The file has too many extents to fit into
778 * the inode, so they are in B-tree format.
779 * Allocate a buffer for the root of the B-tree
780 * and copy the root into it. The i_extents
781 * field will remain NULL until all of the
782 * extents are read in (when they are needed).
783 */
784 STATIC int
785 xfs_iformat_btree(
786 xfs_inode_t *ip,
787 xfs_dinode_t *dip,
788 int whichfork)
789 {
790 struct xfs_mount *mp = ip->i_mount;
791 xfs_bmdr_block_t *dfp;
792 xfs_ifork_t *ifp;
793 /* REFERENCED */
794 int nrecs;
795 int size;
796
797 ifp = XFS_IFORK_PTR(ip, whichfork);
798 dfp = (xfs_bmdr_block_t *)XFS_DFORK_PTR(dip, whichfork);
799 size = XFS_BMAP_BROOT_SPACE(mp, dfp);
800 nrecs = be16_to_cpu(dfp->bb_numrecs);
801
802 /*
803 * blow out if -- fork has less extents than can fit in
804 * fork (fork shouldn't be a btree format), root btree
805 * block has more records than can fit into the fork,
806 * or the number of extents is greater than the number of
807 * blocks.
808 */
809 if (unlikely(XFS_IFORK_NEXTENTS(ip, whichfork) <=
810 XFS_IFORK_MAXEXT(ip, whichfork) ||
811 XFS_BMDR_SPACE_CALC(nrecs) >
812 XFS_DFORK_SIZE(dip, mp, whichfork) ||
813 XFS_IFORK_NEXTENTS(ip, whichfork) > ip->i_d.di_nblocks)) {
814 xfs_warn(mp, "corrupt inode %Lu (btree).",
815 (unsigned long long) ip->i_ino);
816 XFS_CORRUPTION_ERROR("xfs_iformat_btree", XFS_ERRLEVEL_LOW,
817 mp, dip);
818 return XFS_ERROR(EFSCORRUPTED);
819 }
820
821 ifp->if_broot_bytes = size;
822 ifp->if_broot = kmem_alloc(size, KM_SLEEP | KM_NOFS);
823 ASSERT(ifp->if_broot != NULL);
824 /*
825 * Copy and convert from the on-disk structure
826 * to the in-memory structure.
827 */
828 xfs_bmdr_to_bmbt(ip, dfp, XFS_DFORK_SIZE(dip, ip->i_mount, whichfork),
829 ifp->if_broot, size);
830 ifp->if_flags &= ~XFS_IFEXTENTS;
831 ifp->if_flags |= XFS_IFBROOT;
832
833 return 0;
834 }
835
836 STATIC void
837 xfs_dinode_from_disk(
838 xfs_icdinode_t *to,
839 xfs_dinode_t *from)
840 {
841 to->di_magic = be16_to_cpu(from->di_magic);
842 to->di_mode = be16_to_cpu(from->di_mode);
843 to->di_version = from ->di_version;
844 to->di_format = from->di_format;
845 to->di_onlink = be16_to_cpu(from->di_onlink);
846 to->di_uid = be32_to_cpu(from->di_uid);
847 to->di_gid = be32_to_cpu(from->di_gid);
848 to->di_nlink = be32_to_cpu(from->di_nlink);
849 to->di_projid_lo = be16_to_cpu(from->di_projid_lo);
850 to->di_projid_hi = be16_to_cpu(from->di_projid_hi);
851 memcpy(to->di_pad, from->di_pad, sizeof(to->di_pad));
852 to->di_flushiter = be16_to_cpu(from->di_flushiter);
853 to->di_atime.t_sec = be32_to_cpu(from->di_atime.t_sec);
854 to->di_atime.t_nsec = be32_to_cpu(from->di_atime.t_nsec);
855 to->di_mtime.t_sec = be32_to_cpu(from->di_mtime.t_sec);
856 to->di_mtime.t_nsec = be32_to_cpu(from->di_mtime.t_nsec);
857 to->di_ctime.t_sec = be32_to_cpu(from->di_ctime.t_sec);
858 to->di_ctime.t_nsec = be32_to_cpu(from->di_ctime.t_nsec);
859 to->di_size = be64_to_cpu(from->di_size);
860 to->di_nblocks = be64_to_cpu(from->di_nblocks);
861 to->di_extsize = be32_to_cpu(from->di_extsize);
862 to->di_nextents = be32_to_cpu(from->di_nextents);
863 to->di_anextents = be16_to_cpu(from->di_anextents);
864 to->di_forkoff = from->di_forkoff;
865 to->di_aformat = from->di_aformat;
866 to->di_dmevmask = be32_to_cpu(from->di_dmevmask);
867 to->di_dmstate = be16_to_cpu(from->di_dmstate);
868 to->di_flags = be16_to_cpu(from->di_flags);
869 to->di_gen = be32_to_cpu(from->di_gen);
870
871 if (to->di_version == 3) {
872 to->di_changecount = be64_to_cpu(from->di_changecount);
873 to->di_crtime.t_sec = be32_to_cpu(from->di_crtime.t_sec);
874 to->di_crtime.t_nsec = be32_to_cpu(from->di_crtime.t_nsec);
875 to->di_flags2 = be64_to_cpu(from->di_flags2);
876 to->di_ino = be64_to_cpu(from->di_ino);
877 to->di_lsn = be64_to_cpu(from->di_lsn);
878 memcpy(to->di_pad2, from->di_pad2, sizeof(to->di_pad2));
879 uuid_copy(&to->di_uuid, &from->di_uuid);
880 }
881 }
882
883 void
884 xfs_dinode_to_disk(
885 xfs_dinode_t *to,
886 xfs_icdinode_t *from)
887 {
888 to->di_magic = cpu_to_be16(from->di_magic);
889 to->di_mode = cpu_to_be16(from->di_mode);
890 to->di_version = from ->di_version;
891 to->di_format = from->di_format;
892 to->di_onlink = cpu_to_be16(from->di_onlink);
893 to->di_uid = cpu_to_be32(from->di_uid);
894 to->di_gid = cpu_to_be32(from->di_gid);
895 to->di_nlink = cpu_to_be32(from->di_nlink);
896 to->di_projid_lo = cpu_to_be16(from->di_projid_lo);
897 to->di_projid_hi = cpu_to_be16(from->di_projid_hi);
898 memcpy(to->di_pad, from->di_pad, sizeof(to->di_pad));
899 to->di_atime.t_sec = cpu_to_be32(from->di_atime.t_sec);
900 to->di_atime.t_nsec = cpu_to_be32(from->di_atime.t_nsec);
901 to->di_mtime.t_sec = cpu_to_be32(from->di_mtime.t_sec);
902 to->di_mtime.t_nsec = cpu_to_be32(from->di_mtime.t_nsec);
903 to->di_ctime.t_sec = cpu_to_be32(from->di_ctime.t_sec);
904 to->di_ctime.t_nsec = cpu_to_be32(from->di_ctime.t_nsec);
905 to->di_size = cpu_to_be64(from->di_size);
906 to->di_nblocks = cpu_to_be64(from->di_nblocks);
907 to->di_extsize = cpu_to_be32(from->di_extsize);
908 to->di_nextents = cpu_to_be32(from->di_nextents);
909 to->di_anextents = cpu_to_be16(from->di_anextents);
910 to->di_forkoff = from->di_forkoff;
911 to->di_aformat = from->di_aformat;
912 to->di_dmevmask = cpu_to_be32(from->di_dmevmask);
913 to->di_dmstate = cpu_to_be16(from->di_dmstate);
914 to->di_flags = cpu_to_be16(from->di_flags);
915 to->di_gen = cpu_to_be32(from->di_gen);
916
917 if (from->di_version == 3) {
918 to->di_changecount = cpu_to_be64(from->di_changecount);
919 to->di_crtime.t_sec = cpu_to_be32(from->di_crtime.t_sec);
920 to->di_crtime.t_nsec = cpu_to_be32(from->di_crtime.t_nsec);
921 to->di_flags2 = cpu_to_be64(from->di_flags2);
922 to->di_ino = cpu_to_be64(from->di_ino);
923 to->di_lsn = cpu_to_be64(from->di_lsn);
924 memcpy(to->di_pad2, from->di_pad2, sizeof(to->di_pad2));
925 uuid_copy(&to->di_uuid, &from->di_uuid);
926 to->di_flushiter = 0;
927 } else {
928 to->di_flushiter = cpu_to_be16(from->di_flushiter);
929 }
930 }
931
932 STATIC uint
933 _xfs_dic2xflags(
934 __uint16_t di_flags)
935 {
936 uint flags = 0;
937
938 if (di_flags & XFS_DIFLAG_ANY) {
939 if (di_flags & XFS_DIFLAG_REALTIME)
940 flags |= XFS_XFLAG_REALTIME;
941 if (di_flags & XFS_DIFLAG_PREALLOC)
942 flags |= XFS_XFLAG_PREALLOC;
943 if (di_flags & XFS_DIFLAG_IMMUTABLE)
944 flags |= XFS_XFLAG_IMMUTABLE;
945 if (di_flags & XFS_DIFLAG_APPEND)
946 flags |= XFS_XFLAG_APPEND;
947 if (di_flags & XFS_DIFLAG_SYNC)
948 flags |= XFS_XFLAG_SYNC;
949 if (di_flags & XFS_DIFLAG_NOATIME)
950 flags |= XFS_XFLAG_NOATIME;
951 if (di_flags & XFS_DIFLAG_NODUMP)
952 flags |= XFS_XFLAG_NODUMP;
953 if (di_flags & XFS_DIFLAG_RTINHERIT)
954 flags |= XFS_XFLAG_RTINHERIT;
955 if (di_flags & XFS_DIFLAG_PROJINHERIT)
956 flags |= XFS_XFLAG_PROJINHERIT;
957 if (di_flags & XFS_DIFLAG_NOSYMLINKS)
958 flags |= XFS_XFLAG_NOSYMLINKS;
959 if (di_flags & XFS_DIFLAG_EXTSIZE)
960 flags |= XFS_XFLAG_EXTSIZE;
961 if (di_flags & XFS_DIFLAG_EXTSZINHERIT)
962 flags |= XFS_XFLAG_EXTSZINHERIT;
963 if (di_flags & XFS_DIFLAG_NODEFRAG)
964 flags |= XFS_XFLAG_NODEFRAG;
965 if (di_flags & XFS_DIFLAG_FILESTREAM)
966 flags |= XFS_XFLAG_FILESTREAM;
967 }
968
969 return flags;
970 }
971
972 uint
973 xfs_ip2xflags(
974 xfs_inode_t *ip)
975 {
976 xfs_icdinode_t *dic = &ip->i_d;
977
978 return _xfs_dic2xflags(dic->di_flags) |
979 (XFS_IFORK_Q(ip) ? XFS_XFLAG_HASATTR : 0);
980 }
981
982 uint
983 xfs_dic2xflags(
984 xfs_dinode_t *dip)
985 {
986 return _xfs_dic2xflags(be16_to_cpu(dip->di_flags)) |
987 (XFS_DFORK_Q(dip) ? XFS_XFLAG_HASATTR : 0);
988 }
989
990 static bool
991 xfs_dinode_verify(
992 struct xfs_mount *mp,
993 struct xfs_inode *ip,
994 struct xfs_dinode *dip)
995 {
996 if (dip->di_magic != cpu_to_be16(XFS_DINODE_MAGIC))
997 return false;
998
999 /* only version 3 or greater inodes are extensively verified here */
1000 if (dip->di_version < 3)
1001 return true;
1002
1003 if (!xfs_sb_version_hascrc(&mp->m_sb))
1004 return false;
1005 if (!xfs_verify_cksum((char *)dip, mp->m_sb.sb_inodesize,
1006 offsetof(struct xfs_dinode, di_crc)))
1007 return false;
1008 if (be64_to_cpu(dip->di_ino) != ip->i_ino)
1009 return false;
1010 if (!uuid_equal(&dip->di_uuid, &mp->m_sb.sb_uuid))
1011 return false;
1012 return true;
1013 }
1014
1015 void
1016 xfs_dinode_calc_crc(
1017 struct xfs_mount *mp,
1018 struct xfs_dinode *dip)
1019 {
1020 __uint32_t crc;
1021
1022 if (dip->di_version < 3)
1023 return;
1024
1025 ASSERT(xfs_sb_version_hascrc(&mp->m_sb));
1026 crc = xfs_start_cksum((char *)dip, mp->m_sb.sb_inodesize,
1027 offsetof(struct xfs_dinode, di_crc));
1028 dip->di_crc = xfs_end_cksum(crc);
1029 }
1030
1031 /*
1032 * Read the disk inode attributes into the in-core inode structure.
1033 *
1034 * For version 5 superblocks, if we are initialising a new inode and we are not
1035 * utilising the XFS_MOUNT_IKEEP inode cluster mode, we can simple build the new
1036 * inode core with a random generation number. If we are keeping inodes around,
1037 * we need to read the inode cluster to get the existing generation number off
1038 * disk. Further, if we are using version 4 superblocks (i.e. v1/v2 inode
1039 * format) then log recovery is dependent on the di_flushiter field being
1040 * initialised from the current on-disk value and hence we must also read the
1041 * inode off disk.
1042 */
1043 int
1044 xfs_iread(
1045 xfs_mount_t *mp,
1046 xfs_trans_t *tp,
1047 xfs_inode_t *ip,
1048 uint iget_flags)
1049 {
1050 xfs_buf_t *bp;
1051 xfs_dinode_t *dip;
1052 int error;
1053
1054 /*
1055 * Fill in the location information in the in-core inode.
1056 */
1057 error = xfs_imap(mp, tp, ip->i_ino, &ip->i_imap, iget_flags);
1058 if (error)
1059 return error;
1060
1061 /* shortcut IO on inode allocation if possible */
1062 if ((iget_flags & XFS_IGET_CREATE) &&
1063 xfs_sb_version_hascrc(&mp->m_sb) &&
1064 !(mp->m_flags & XFS_MOUNT_IKEEP)) {
1065 /* initialise the on-disk inode core */
1066 memset(&ip->i_d, 0, sizeof(ip->i_d));
1067 ip->i_d.di_magic = XFS_DINODE_MAGIC;
1068 ip->i_d.di_gen = prandom_u32();
1069 if (xfs_sb_version_hascrc(&mp->m_sb)) {
1070 ip->i_d.di_version = 3;
1071 ip->i_d.di_ino = ip->i_ino;
1072 uuid_copy(&ip->i_d.di_uuid, &mp->m_sb.sb_uuid);
1073 } else
1074 ip->i_d.di_version = 2;
1075 return 0;
1076 }
1077
1078 /*
1079 * Get pointers to the on-disk inode and the buffer containing it.
1080 */
1081 error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &dip, &bp, 0, iget_flags);
1082 if (error)
1083 return error;
1084
1085 /* even unallocated inodes are verified */
1086 if (!xfs_dinode_verify(mp, ip, dip)) {
1087 xfs_alert(mp, "%s: validation failed for inode %lld failed",
1088 __func__, ip->i_ino);
1089
1090 XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, dip);
1091 error = XFS_ERROR(EFSCORRUPTED);
1092 goto out_brelse;
1093 }
1094
1095 /*
1096 * If the on-disk inode is already linked to a directory
1097 * entry, copy all of the inode into the in-core inode.
1098 * xfs_iformat() handles copying in the inode format
1099 * specific information.
1100 * Otherwise, just get the truly permanent information.
1101 */
1102 if (dip->di_mode) {
1103 xfs_dinode_from_disk(&ip->i_d, dip);
1104 error = xfs_iformat(ip, dip);
1105 if (error) {
1106 #ifdef DEBUG
1107 xfs_alert(mp, "%s: xfs_iformat() returned error %d",
1108 __func__, error);
1109 #endif /* DEBUG */
1110 goto out_brelse;
1111 }
1112 } else {
1113 /*
1114 * Partial initialisation of the in-core inode. Just the bits
1115 * that xfs_ialloc won't overwrite or relies on being correct.
1116 */
1117 ip->i_d.di_magic = be16_to_cpu(dip->di_magic);
1118 ip->i_d.di_version = dip->di_version;
1119 ip->i_d.di_gen = be32_to_cpu(dip->di_gen);
1120 ip->i_d.di_flushiter = be16_to_cpu(dip->di_flushiter);
1121
1122 if (dip->di_version == 3) {
1123 ip->i_d.di_ino = be64_to_cpu(dip->di_ino);
1124 uuid_copy(&ip->i_d.di_uuid, &dip->di_uuid);
1125 }
1126
1127 /*
1128 * Make sure to pull in the mode here as well in
1129 * case the inode is released without being used.
1130 * This ensures that xfs_inactive() will see that
1131 * the inode is already free and not try to mess
1132 * with the uninitialized part of it.
1133 */
1134 ip->i_d.di_mode = 0;
1135 }
1136
1137 /*
1138 * The inode format changed when we moved the link count and
1139 * made it 32 bits long. If this is an old format inode,
1140 * convert it in memory to look like a new one. If it gets
1141 * flushed to disk we will convert back before flushing or
1142 * logging it. We zero out the new projid field and the old link
1143 * count field. We'll handle clearing the pad field (the remains
1144 * of the old uuid field) when we actually convert the inode to
1145 * the new format. We don't change the version number so that we
1146 * can distinguish this from a real new format inode.
1147 */
1148 if (ip->i_d.di_version == 1) {
1149 ip->i_d.di_nlink = ip->i_d.di_onlink;
1150 ip->i_d.di_onlink = 0;
1151 xfs_set_projid(ip, 0);
1152 }
1153
1154 ip->i_delayed_blks = 0;
1155
1156 /*
1157 * Mark the buffer containing the inode as something to keep
1158 * around for a while. This helps to keep recently accessed
1159 * meta-data in-core longer.
1160 */
1161 xfs_buf_set_ref(bp, XFS_INO_REF);
1162
1163 /*
1164 * Use xfs_trans_brelse() to release the buffer containing the on-disk
1165 * inode, because it was acquired with xfs_trans_read_buf() in
1166 * xfs_imap_to_bp() above. If tp is NULL, this is just a normal
1167 * brelse(). If we're within a transaction, then xfs_trans_brelse()
1168 * will only release the buffer if it is not dirty within the
1169 * transaction. It will be OK to release the buffer in this case,
1170 * because inodes on disk are never destroyed and we will be locking the
1171 * new in-core inode before putting it in the cache where other
1172 * processes can find it. Thus we don't have to worry about the inode
1173 * being changed just because we released the buffer.
1174 */
1175 out_brelse:
1176 xfs_trans_brelse(tp, bp);
1177 return error;
1178 }
1179
1180 /*
1181 * Read in extents from a btree-format inode.
1182 * Allocate and fill in if_extents. Real work is done in xfs_bmap.c.
1183 */
1184 int
1185 xfs_iread_extents(
1186 xfs_trans_t *tp,
1187 xfs_inode_t *ip,
1188 int whichfork)
1189 {
1190 int error;
1191 xfs_ifork_t *ifp;
1192 xfs_extnum_t nextents;
1193
1194 if (unlikely(XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE)) {
1195 XFS_ERROR_REPORT("xfs_iread_extents", XFS_ERRLEVEL_LOW,
1196 ip->i_mount);
1197 return XFS_ERROR(EFSCORRUPTED);
1198 }
1199 nextents = XFS_IFORK_NEXTENTS(ip, whichfork);
1200 ifp = XFS_IFORK_PTR(ip, whichfork);
1201
1202 /*
1203 * We know that the size is valid (it's checked in iformat_btree)
1204 */
1205 ifp->if_bytes = ifp->if_real_bytes = 0;
1206 ifp->if_flags |= XFS_IFEXTENTS;
1207 xfs_iext_add(ifp, 0, nextents);
1208 error = xfs_bmap_read_extents(tp, ip, whichfork);
1209 if (error) {
1210 xfs_iext_destroy(ifp);
1211 ifp->if_flags &= ~XFS_IFEXTENTS;
1212 return error;
1213 }
1214 xfs_validate_extents(ifp, nextents, XFS_EXTFMT_INODE(ip));
1215 return 0;
1216 }
1217
1218 /*
1219 * Allocate an inode on disk and return a copy of its in-core version.
1220 * The in-core inode is locked exclusively. Set mode, nlink, and rdev
1221 * appropriately within the inode. The uid and gid for the inode are
1222 * set according to the contents of the given cred structure.
1223 *
1224 * Use xfs_dialloc() to allocate the on-disk inode. If xfs_dialloc()
1225 * has a free inode available, call xfs_iget() to obtain the in-core
1226 * version of the allocated inode. Finally, fill in the inode and
1227 * log its initial contents. In this case, ialloc_context would be
1228 * set to NULL.
1229 *
1230 * If xfs_dialloc() does not have an available inode, it will replenish
1231 * its supply by doing an allocation. Since we can only do one
1232 * allocation within a transaction without deadlocks, we must commit
1233 * the current transaction before returning the inode itself.
1234 * In this case, therefore, we will set ialloc_context and return.
1235 * The caller should then commit the current transaction, start a new
1236 * transaction, and call xfs_ialloc() again to actually get the inode.
1237 *
1238 * To ensure that some other process does not grab the inode that
1239 * was allocated during the first call to xfs_ialloc(), this routine
1240 * also returns the [locked] bp pointing to the head of the freelist
1241 * as ialloc_context. The caller should hold this buffer across
1242 * the commit and pass it back into this routine on the second call.
1243 *
1244 * If we are allocating quota inodes, we do not have a parent inode
1245 * to attach to or associate with (i.e. pip == NULL) because they
1246 * are not linked into the directory structure - they are attached
1247 * directly to the superblock - and so have no parent.
1248 */
1249 int
1250 xfs_ialloc(
1251 xfs_trans_t *tp,
1252 xfs_inode_t *pip,
1253 umode_t mode,
1254 xfs_nlink_t nlink,
1255 xfs_dev_t rdev,
1256 prid_t prid,
1257 int okalloc,
1258 xfs_buf_t **ialloc_context,
1259 xfs_inode_t **ipp)
1260 {
1261 struct xfs_mount *mp = tp->t_mountp;
1262 xfs_ino_t ino;
1263 xfs_inode_t *ip;
1264 uint flags;
1265 int error;
1266 timespec_t tv;
1267 int filestreams = 0;
1268
1269 /*
1270 * Call the space management code to pick
1271 * the on-disk inode to be allocated.
1272 */
1273 error = xfs_dialloc(tp, pip ? pip->i_ino : 0, mode, okalloc,
1274 ialloc_context, &ino);
1275 if (error)
1276 return error;
1277 if (*ialloc_context || ino == NULLFSINO) {
1278 *ipp = NULL;
1279 return 0;
1280 }
1281 ASSERT(*ialloc_context == NULL);
1282
1283 /*
1284 * Get the in-core inode with the lock held exclusively.
1285 * This is because we're setting fields here we need
1286 * to prevent others from looking at until we're done.
1287 */
1288 error = xfs_iget(mp, tp, ino, XFS_IGET_CREATE,
1289 XFS_ILOCK_EXCL, &ip);
1290 if (error)
1291 return error;
1292 ASSERT(ip != NULL);
1293
1294 ip->i_d.di_mode = mode;
1295 ip->i_d.di_onlink = 0;
1296 ip->i_d.di_nlink = nlink;
1297 ASSERT(ip->i_d.di_nlink == nlink);
1298 ip->i_d.di_uid = current_fsuid();
1299 ip->i_d.di_gid = current_fsgid();
1300 xfs_set_projid(ip, prid);
1301 memset(&(ip->i_d.di_pad[0]), 0, sizeof(ip->i_d.di_pad));
1302
1303 /*
1304 * If the superblock version is up to where we support new format
1305 * inodes and this is currently an old format inode, then change
1306 * the inode version number now. This way we only do the conversion
1307 * here rather than here and in the flush/logging code.
1308 */
1309 if (xfs_sb_version_hasnlink(&mp->m_sb) &&
1310 ip->i_d.di_version == 1) {
1311 ip->i_d.di_version = 2;
1312 /*
1313 * We've already zeroed the old link count, the projid field,
1314 * and the pad field.
1315 */
1316 }
1317
1318 /*
1319 * Project ids won't be stored on disk if we are using a version 1 inode.
1320 */
1321 if ((prid != 0) && (ip->i_d.di_version == 1))
1322 xfs_bump_ino_vers2(tp, ip);
1323
1324 if (pip && XFS_INHERIT_GID(pip)) {
1325 ip->i_d.di_gid = pip->i_d.di_gid;
1326 if ((pip->i_d.di_mode & S_ISGID) && S_ISDIR(mode)) {
1327 ip->i_d.di_mode |= S_ISGID;
1328 }
1329 }
1330
1331 /*
1332 * If the group ID of the new file does not match the effective group
1333 * ID or one of the supplementary group IDs, the S_ISGID bit is cleared
1334 * (and only if the irix_sgid_inherit compatibility variable is set).
1335 */
1336 if ((irix_sgid_inherit) &&
1337 (ip->i_d.di_mode & S_ISGID) &&
1338 (!in_group_p((gid_t)ip->i_d.di_gid))) {
1339 ip->i_d.di_mode &= ~S_ISGID;
1340 }
1341
1342 ip->i_d.di_size = 0;
1343 ip->i_d.di_nextents = 0;
1344 ASSERT(ip->i_d.di_nblocks == 0);
1345
1346 nanotime(&tv);
1347 ip->i_d.di_mtime.t_sec = (__int32_t)tv.tv_sec;
1348 ip->i_d.di_mtime.t_nsec = (__int32_t)tv.tv_nsec;
1349 ip->i_d.di_atime = ip->i_d.di_mtime;
1350 ip->i_d.di_ctime = ip->i_d.di_mtime;
1351
1352 /*
1353 * di_gen will have been taken care of in xfs_iread.
1354 */
1355 ip->i_d.di_extsize = 0;
1356 ip->i_d.di_dmevmask = 0;
1357 ip->i_d.di_dmstate = 0;
1358 ip->i_d.di_flags = 0;
1359
1360 if (ip->i_d.di_version == 3) {
1361 ASSERT(ip->i_d.di_ino == ino);
1362 ASSERT(uuid_equal(&ip->i_d.di_uuid, &mp->m_sb.sb_uuid));
1363 ip->i_d.di_crc = 0;
1364 ip->i_d.di_changecount = 1;
1365 ip->i_d.di_lsn = 0;
1366 ip->i_d.di_flags2 = 0;
1367 memset(&(ip->i_d.di_pad2[0]), 0, sizeof(ip->i_d.di_pad2));
1368 ip->i_d.di_crtime = ip->i_d.di_mtime;
1369 }
1370
1371
1372 flags = XFS_ILOG_CORE;
1373 switch (mode & S_IFMT) {
1374 case S_IFIFO:
1375 case S_IFCHR:
1376 case S_IFBLK:
1377 case S_IFSOCK:
1378 ip->i_d.di_format = XFS_DINODE_FMT_DEV;
1379 ip->i_df.if_u2.if_rdev = rdev;
1380 ip->i_df.if_flags = 0;
1381 flags |= XFS_ILOG_DEV;
1382 break;
1383 case S_IFREG:
1384 /*
1385 * we can't set up filestreams until after the VFS inode
1386 * is set up properly.
1387 */
1388 if (pip && xfs_inode_is_filestream(pip))
1389 filestreams = 1;
1390 /* fall through */
1391 case S_IFDIR:
1392 if (pip && (pip->i_d.di_flags & XFS_DIFLAG_ANY)) {
1393 uint di_flags = 0;
1394
1395 if (S_ISDIR(mode)) {
1396 if (pip->i_d.di_flags & XFS_DIFLAG_RTINHERIT)
1397 di_flags |= XFS_DIFLAG_RTINHERIT;
1398 if (pip->i_d.di_flags & XFS_DIFLAG_EXTSZINHERIT) {
1399 di_flags |= XFS_DIFLAG_EXTSZINHERIT;
1400 ip->i_d.di_extsize = pip->i_d.di_extsize;
1401 }
1402 } else if (S_ISREG(mode)) {
1403 if (pip->i_d.di_flags & XFS_DIFLAG_RTINHERIT)
1404 di_flags |= XFS_DIFLAG_REALTIME;
1405 if (pip->i_d.di_flags & XFS_DIFLAG_EXTSZINHERIT) {
1406 di_flags |= XFS_DIFLAG_EXTSIZE;
1407 ip->i_d.di_extsize = pip->i_d.di_extsize;
1408 }
1409 }
1410 if ((pip->i_d.di_flags & XFS_DIFLAG_NOATIME) &&
1411 xfs_inherit_noatime)
1412 di_flags |= XFS_DIFLAG_NOATIME;
1413 if ((pip->i_d.di_flags & XFS_DIFLAG_NODUMP) &&
1414 xfs_inherit_nodump)
1415 di_flags |= XFS_DIFLAG_NODUMP;
1416 if ((pip->i_d.di_flags & XFS_DIFLAG_SYNC) &&
1417 xfs_inherit_sync)
1418 di_flags |= XFS_DIFLAG_SYNC;
1419 if ((pip->i_d.di_flags & XFS_DIFLAG_NOSYMLINKS) &&
1420 xfs_inherit_nosymlinks)
1421 di_flags |= XFS_DIFLAG_NOSYMLINKS;
1422 if (pip->i_d.di_flags & XFS_DIFLAG_PROJINHERIT)
1423 di_flags |= XFS_DIFLAG_PROJINHERIT;
1424 if ((pip->i_d.di_flags & XFS_DIFLAG_NODEFRAG) &&
1425 xfs_inherit_nodefrag)
1426 di_flags |= XFS_DIFLAG_NODEFRAG;
1427 if (pip->i_d.di_flags & XFS_DIFLAG_FILESTREAM)
1428 di_flags |= XFS_DIFLAG_FILESTREAM;
1429 ip->i_d.di_flags |= di_flags;
1430 }
1431 /* FALLTHROUGH */
1432 case S_IFLNK:
1433 ip->i_d.di_format = XFS_DINODE_FMT_EXTENTS;
1434 ip->i_df.if_flags = XFS_IFEXTENTS;
1435 ip->i_df.if_bytes = ip->i_df.if_real_bytes = 0;
1436 ip->i_df.if_u1.if_extents = NULL;
1437 break;
1438 default:
1439 ASSERT(0);
1440 }
1441 /*
1442 * Attribute fork settings for new inode.
1443 */
1444 ip->i_d.di_aformat = XFS_DINODE_FMT_EXTENTS;
1445 ip->i_d.di_anextents = 0;
1446
1447 /*
1448 * Log the new values stuffed into the inode.
1449 */
1450 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
1451 xfs_trans_log_inode(tp, ip, flags);
1452
1453 /* now that we have an i_mode we can setup inode ops and unlock */
1454 xfs_setup_inode(ip);
1455
1456 /* now we have set up the vfs inode we can associate the filestream */
1457 if (filestreams) {
1458 error = xfs_filestream_associate(pip, ip);
1459 if (error < 0)
1460 return -error;
1461 if (!error)
1462 xfs_iflags_set(ip, XFS_IFILESTREAM);
1463 }
1464
1465 *ipp = ip;
1466 return 0;
1467 }
1468
1469 /*
1470 * Free up the underlying blocks past new_size. The new size must be smaller
1471 * than the current size. This routine can be used both for the attribute and
1472 * data fork, and does not modify the inode size, which is left to the caller.
1473 *
1474 * The transaction passed to this routine must have made a permanent log
1475 * reservation of at least XFS_ITRUNCATE_LOG_RES. This routine may commit the
1476 * given transaction and start new ones, so make sure everything involved in
1477 * the transaction is tidy before calling here. Some transaction will be
1478 * returned to the caller to be committed. The incoming transaction must
1479 * already include the inode, and both inode locks must be held exclusively.
1480 * The inode must also be "held" within the transaction. On return the inode
1481 * will be "held" within the returned transaction. This routine does NOT
1482 * require any disk space to be reserved for it within the transaction.
1483 *
1484 * If we get an error, we must return with the inode locked and linked into the
1485 * current transaction. This keeps things simple for the higher level code,
1486 * because it always knows that the inode is locked and held in the transaction
1487 * that returns to it whether errors occur or not. We don't mark the inode
1488 * dirty on error so that transactions can be easily aborted if possible.
1489 */
1490 int
1491 xfs_itruncate_extents(
1492 struct xfs_trans **tpp,
1493 struct xfs_inode *ip,
1494 int whichfork,
1495 xfs_fsize_t new_size)
1496 {
1497 struct xfs_mount *mp = ip->i_mount;
1498 struct xfs_trans *tp = *tpp;
1499 struct xfs_trans *ntp;
1500 xfs_bmap_free_t free_list;
1501 xfs_fsblock_t first_block;
1502 xfs_fileoff_t first_unmap_block;
1503 xfs_fileoff_t last_block;
1504 xfs_filblks_t unmap_len;
1505 int committed;
1506 int error = 0;
1507 int done = 0;
1508
1509 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
1510 ASSERT(!atomic_read(&VFS_I(ip)->i_count) ||
1511 xfs_isilocked(ip, XFS_IOLOCK_EXCL));
1512 ASSERT(new_size <= XFS_ISIZE(ip));
1513 ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES);
1514 ASSERT(ip->i_itemp != NULL);
1515 ASSERT(ip->i_itemp->ili_lock_flags == 0);
1516 ASSERT(!XFS_NOT_DQATTACHED(mp, ip));
1517
1518 trace_xfs_itruncate_extents_start(ip, new_size);
1519
1520 /*
1521 * Since it is possible for space to become allocated beyond
1522 * the end of the file (in a crash where the space is allocated
1523 * but the inode size is not yet updated), simply remove any
1524 * blocks which show up between the new EOF and the maximum
1525 * possible file size. If the first block to be removed is
1526 * beyond the maximum file size (ie it is the same as last_block),
1527 * then there is nothing to do.
1528 */
1529 first_unmap_block = XFS_B_TO_FSB(mp, (xfs_ufsize_t)new_size);
1530 last_block = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes);
1531 if (first_unmap_block == last_block)
1532 return 0;
1533
1534 ASSERT(first_unmap_block < last_block);
1535 unmap_len = last_block - first_unmap_block + 1;
1536 while (!done) {
1537 xfs_bmap_init(&free_list, &first_block);
1538 error = xfs_bunmapi(tp, ip,
1539 first_unmap_block, unmap_len,
1540 xfs_bmapi_aflag(whichfork),
1541 XFS_ITRUNC_MAX_EXTENTS,
1542 &first_block, &free_list,
1543 &done);
1544 if (error)
1545 goto out_bmap_cancel;
1546
1547 /*
1548 * Duplicate the transaction that has the permanent
1549 * reservation and commit the old transaction.
1550 */
1551 error = xfs_bmap_finish(&tp, &free_list, &committed);
1552 if (committed)
1553 xfs_trans_ijoin(tp, ip, 0);
1554 if (error)
1555 goto out_bmap_cancel;
1556
1557 if (committed) {
1558 /*
1559 * Mark the inode dirty so it will be logged and
1560 * moved forward in the log as part of every commit.
1561 */
1562 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1563 }
1564
1565 ntp = xfs_trans_dup(tp);
1566 error = xfs_trans_commit(tp, 0);
1567 tp = ntp;
1568
1569 xfs_trans_ijoin(tp, ip, 0);
1570
1571 if (error)
1572 goto out;
1573
1574 /*
1575 * Transaction commit worked ok so we can drop the extra ticket
1576 * reference that we gained in xfs_trans_dup()
1577 */
1578 xfs_log_ticket_put(tp->t_ticket);
1579 error = xfs_trans_reserve(tp, 0,
1580 XFS_ITRUNCATE_LOG_RES(mp), 0,
1581 XFS_TRANS_PERM_LOG_RES,
1582 XFS_ITRUNCATE_LOG_COUNT);
1583 if (error)
1584 goto out;
1585 }
1586
1587 /*
1588 * Always re-log the inode so that our permanent transaction can keep
1589 * on rolling it forward in the log.
1590 */
1591 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1592
1593 trace_xfs_itruncate_extents_end(ip, new_size);
1594
1595 out:
1596 *tpp = tp;
1597 return error;
1598 out_bmap_cancel:
1599 /*
1600 * If the bunmapi call encounters an error, return to the caller where
1601 * the transaction can be properly aborted. We just need to make sure
1602 * we're not holding any resources that we were not when we came in.
1603 */
1604 xfs_bmap_cancel(&free_list);
1605 goto out;
1606 }
1607
1608 /*
1609 * This is called when the inode's link count goes to 0.
1610 * We place the on-disk inode on a list in the AGI. It
1611 * will be pulled from this list when the inode is freed.
1612 */
1613 int
1614 xfs_iunlink(
1615 xfs_trans_t *tp,
1616 xfs_inode_t *ip)
1617 {
1618 xfs_mount_t *mp;
1619 xfs_agi_t *agi;
1620 xfs_dinode_t *dip;
1621 xfs_buf_t *agibp;
1622 xfs_buf_t *ibp;
1623 xfs_agino_t agino;
1624 short bucket_index;
1625 int offset;
1626 int error;
1627
1628 ASSERT(ip->i_d.di_nlink == 0);
1629 ASSERT(ip->i_d.di_mode != 0);
1630
1631 mp = tp->t_mountp;
1632
1633 /*
1634 * Get the agi buffer first. It ensures lock ordering
1635 * on the list.
1636 */
1637 error = xfs_read_agi(mp, tp, XFS_INO_TO_AGNO(mp, ip->i_ino), &agibp);
1638 if (error)
1639 return error;
1640 agi = XFS_BUF_TO_AGI(agibp);
1641
1642 /*
1643 * Get the index into the agi hash table for the
1644 * list this inode will go on.
1645 */
1646 agino = XFS_INO_TO_AGINO(mp, ip->i_ino);
1647 ASSERT(agino != 0);
1648 bucket_index = agino % XFS_AGI_UNLINKED_BUCKETS;
1649 ASSERT(agi->agi_unlinked[bucket_index]);
1650 ASSERT(be32_to_cpu(agi->agi_unlinked[bucket_index]) != agino);
1651
1652 if (agi->agi_unlinked[bucket_index] != cpu_to_be32(NULLAGINO)) {
1653 /*
1654 * There is already another inode in the bucket we need
1655 * to add ourselves to. Add us at the front of the list.
1656 * Here we put the head pointer into our next pointer,
1657 * and then we fall through to point the head at us.
1658 */
1659 error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &dip, &ibp,
1660 0, 0);
1661 if (error)
1662 return error;
1663
1664 ASSERT(dip->di_next_unlinked == cpu_to_be32(NULLAGINO));
1665 dip->di_next_unlinked = agi->agi_unlinked[bucket_index];
1666 offset = ip->i_imap.im_boffset +
1667 offsetof(xfs_dinode_t, di_next_unlinked);
1668
1669 /* need to recalc the inode CRC if appropriate */
1670 xfs_dinode_calc_crc(mp, dip);
1671
1672 xfs_trans_inode_buf(tp, ibp);
1673 xfs_trans_log_buf(tp, ibp, offset,
1674 (offset + sizeof(xfs_agino_t) - 1));
1675 xfs_inobp_check(mp, ibp);
1676 }
1677
1678 /*
1679 * Point the bucket head pointer at the inode being inserted.
1680 */
1681 ASSERT(agino != 0);
1682 agi->agi_unlinked[bucket_index] = cpu_to_be32(agino);
1683 offset = offsetof(xfs_agi_t, agi_unlinked) +
1684 (sizeof(xfs_agino_t) * bucket_index);
1685 xfs_trans_log_buf(tp, agibp, offset,
1686 (offset + sizeof(xfs_agino_t) - 1));
1687 return 0;
1688 }
1689
1690 /*
1691 * Pull the on-disk inode from the AGI unlinked list.
1692 */
1693 STATIC int
1694 xfs_iunlink_remove(
1695 xfs_trans_t *tp,
1696 xfs_inode_t *ip)
1697 {
1698 xfs_ino_t next_ino;
1699 xfs_mount_t *mp;
1700 xfs_agi_t *agi;
1701 xfs_dinode_t *dip;
1702 xfs_buf_t *agibp;
1703 xfs_buf_t *ibp;
1704 xfs_agnumber_t agno;
1705 xfs_agino_t agino;
1706 xfs_agino_t next_agino;
1707 xfs_buf_t *last_ibp;
1708 xfs_dinode_t *last_dip = NULL;
1709 short bucket_index;
1710 int offset, last_offset = 0;
1711 int error;
1712
1713 mp = tp->t_mountp;
1714 agno = XFS_INO_TO_AGNO(mp, ip->i_ino);
1715
1716 /*
1717 * Get the agi buffer first. It ensures lock ordering
1718 * on the list.
1719 */
1720 error = xfs_read_agi(mp, tp, agno, &agibp);
1721 if (error)
1722 return error;
1723
1724 agi = XFS_BUF_TO_AGI(agibp);
1725
1726 /*
1727 * Get the index into the agi hash table for the
1728 * list this inode will go on.
1729 */
1730 agino = XFS_INO_TO_AGINO(mp, ip->i_ino);
1731 ASSERT(agino != 0);
1732 bucket_index = agino % XFS_AGI_UNLINKED_BUCKETS;
1733 ASSERT(agi->agi_unlinked[bucket_index] != cpu_to_be32(NULLAGINO));
1734 ASSERT(agi->agi_unlinked[bucket_index]);
1735
1736 if (be32_to_cpu(agi->agi_unlinked[bucket_index]) == agino) {
1737 /*
1738 * We're at the head of the list. Get the inode's on-disk
1739 * buffer to see if there is anyone after us on the list.
1740 * Only modify our next pointer if it is not already NULLAGINO.
1741 * This saves us the overhead of dealing with the buffer when
1742 * there is no need to change it.
1743 */
1744 error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &dip, &ibp,
1745 0, 0);
1746 if (error) {
1747 xfs_warn(mp, "%s: xfs_imap_to_bp returned error %d.",
1748 __func__, error);
1749 return error;
1750 }
1751 next_agino = be32_to_cpu(dip->di_next_unlinked);
1752 ASSERT(next_agino != 0);
1753 if (next_agino != NULLAGINO) {
1754 dip->di_next_unlinked = cpu_to_be32(NULLAGINO);
1755 offset = ip->i_imap.im_boffset +
1756 offsetof(xfs_dinode_t, di_next_unlinked);
1757
1758 /* need to recalc the inode CRC if appropriate */
1759 xfs_dinode_calc_crc(mp, dip);
1760
1761 xfs_trans_inode_buf(tp, ibp);
1762 xfs_trans_log_buf(tp, ibp, offset,
1763 (offset + sizeof(xfs_agino_t) - 1));
1764 xfs_inobp_check(mp, ibp);
1765 } else {
1766 xfs_trans_brelse(tp, ibp);
1767 }
1768 /*
1769 * Point the bucket head pointer at the next inode.
1770 */
1771 ASSERT(next_agino != 0);
1772 ASSERT(next_agino != agino);
1773 agi->agi_unlinked[bucket_index] = cpu_to_be32(next_agino);
1774 offset = offsetof(xfs_agi_t, agi_unlinked) +
1775 (sizeof(xfs_agino_t) * bucket_index);
1776 xfs_trans_log_buf(tp, agibp, offset,
1777 (offset + sizeof(xfs_agino_t) - 1));
1778 } else {
1779 /*
1780 * We need to search the list for the inode being freed.
1781 */
1782 next_agino = be32_to_cpu(agi->agi_unlinked[bucket_index]);
1783 last_ibp = NULL;
1784 while (next_agino != agino) {
1785 struct xfs_imap imap;
1786
1787 if (last_ibp)
1788 xfs_trans_brelse(tp, last_ibp);
1789
1790 imap.im_blkno = 0;
1791 next_ino = XFS_AGINO_TO_INO(mp, agno, next_agino);
1792
1793 error = xfs_imap(mp, tp, next_ino, &imap, 0);
1794 if (error) {
1795 xfs_warn(mp,
1796 "%s: xfs_imap returned error %d.",
1797 __func__, error);
1798 return error;
1799 }
1800
1801 error = xfs_imap_to_bp(mp, tp, &imap, &last_dip,
1802 &last_ibp, 0, 0);
1803 if (error) {
1804 xfs_warn(mp,
1805 "%s: xfs_imap_to_bp returned error %d.",
1806 __func__, error);
1807 return error;
1808 }
1809
1810 last_offset = imap.im_boffset;
1811 next_agino = be32_to_cpu(last_dip->di_next_unlinked);
1812 ASSERT(next_agino != NULLAGINO);
1813 ASSERT(next_agino != 0);
1814 }
1815
1816 /*
1817 * Now last_ibp points to the buffer previous to us on the
1818 * unlinked list. Pull us from the list.
1819 */
1820 error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &dip, &ibp,
1821 0, 0);
1822 if (error) {
1823 xfs_warn(mp, "%s: xfs_imap_to_bp(2) returned error %d.",
1824 __func__, error);
1825 return error;
1826 }
1827 next_agino = be32_to_cpu(dip->di_next_unlinked);
1828 ASSERT(next_agino != 0);
1829 ASSERT(next_agino != agino);
1830 if (next_agino != NULLAGINO) {
1831 dip->di_next_unlinked = cpu_to_be32(NULLAGINO);
1832 offset = ip->i_imap.im_boffset +
1833 offsetof(xfs_dinode_t, di_next_unlinked);
1834
1835 /* need to recalc the inode CRC if appropriate */
1836 xfs_dinode_calc_crc(mp, dip);
1837
1838 xfs_trans_inode_buf(tp, ibp);
1839 xfs_trans_log_buf(tp, ibp, offset,
1840 (offset + sizeof(xfs_agino_t) - 1));
1841 xfs_inobp_check(mp, ibp);
1842 } else {
1843 xfs_trans_brelse(tp, ibp);
1844 }
1845 /*
1846 * Point the previous inode on the list to the next inode.
1847 */
1848 last_dip->di_next_unlinked = cpu_to_be32(next_agino);
1849 ASSERT(next_agino != 0);
1850 offset = last_offset + offsetof(xfs_dinode_t, di_next_unlinked);
1851
1852 /* need to recalc the inode CRC if appropriate */
1853 xfs_dinode_calc_crc(mp, last_dip);
1854
1855 xfs_trans_inode_buf(tp, last_ibp);
1856 xfs_trans_log_buf(tp, last_ibp, offset,
1857 (offset + sizeof(xfs_agino_t) - 1));
1858 xfs_inobp_check(mp, last_ibp);
1859 }
1860 return 0;
1861 }
1862
1863 /*
1864 * A big issue when freeing the inode cluster is is that we _cannot_ skip any
1865 * inodes that are in memory - they all must be marked stale and attached to
1866 * the cluster buffer.
1867 */
1868 STATIC int
1869 xfs_ifree_cluster(
1870 xfs_inode_t *free_ip,
1871 xfs_trans_t *tp,
1872 xfs_ino_t inum)
1873 {
1874 xfs_mount_t *mp = free_ip->i_mount;
1875 int blks_per_cluster;
1876 int nbufs;
1877 int ninodes;
1878 int i, j;
1879 xfs_daddr_t blkno;
1880 xfs_buf_t *bp;
1881 xfs_inode_t *ip;
1882 xfs_inode_log_item_t *iip;
1883 xfs_log_item_t *lip;
1884 struct xfs_perag *pag;
1885
1886 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, inum));
1887 if (mp->m_sb.sb_blocksize >= XFS_INODE_CLUSTER_SIZE(mp)) {
1888 blks_per_cluster = 1;
1889 ninodes = mp->m_sb.sb_inopblock;
1890 nbufs = XFS_IALLOC_BLOCKS(mp);
1891 } else {
1892 blks_per_cluster = XFS_INODE_CLUSTER_SIZE(mp) /
1893 mp->m_sb.sb_blocksize;
1894 ninodes = blks_per_cluster * mp->m_sb.sb_inopblock;
1895 nbufs = XFS_IALLOC_BLOCKS(mp) / blks_per_cluster;
1896 }
1897
1898 for (j = 0; j < nbufs; j++, inum += ninodes) {
1899 blkno = XFS_AGB_TO_DADDR(mp, XFS_INO_TO_AGNO(mp, inum),
1900 XFS_INO_TO_AGBNO(mp, inum));
1901
1902 /*
1903 * We obtain and lock the backing buffer first in the process
1904 * here, as we have to ensure that any dirty inode that we
1905 * can't get the flush lock on is attached to the buffer.
1906 * If we scan the in-memory inodes first, then buffer IO can
1907 * complete before we get a lock on it, and hence we may fail
1908 * to mark all the active inodes on the buffer stale.
1909 */
1910 bp = xfs_trans_get_buf(tp, mp->m_ddev_targp, blkno,
1911 mp->m_bsize * blks_per_cluster,
1912 XBF_UNMAPPED);
1913
1914 if (!bp)
1915 return ENOMEM;
1916
1917 /*
1918 * This buffer may not have been correctly initialised as we
1919 * didn't read it from disk. That's not important because we are
1920 * only using to mark the buffer as stale in the log, and to
1921 * attach stale cached inodes on it. That means it will never be
1922 * dispatched for IO. If it is, we want to know about it, and we
1923 * want it to fail. We can acheive this by adding a write
1924 * verifier to the buffer.
1925 */
1926 bp->b_ops = &xfs_inode_buf_ops;
1927
1928 /*
1929 * Walk the inodes already attached to the buffer and mark them
1930 * stale. These will all have the flush locks held, so an
1931 * in-memory inode walk can't lock them. By marking them all
1932 * stale first, we will not attempt to lock them in the loop
1933 * below as the XFS_ISTALE flag will be set.
1934 */
1935 lip = bp->b_fspriv;
1936 while (lip) {
1937 if (lip->li_type == XFS_LI_INODE) {
1938 iip = (xfs_inode_log_item_t *)lip;
1939 ASSERT(iip->ili_logged == 1);
1940 lip->li_cb = xfs_istale_done;
1941 xfs_trans_ail_copy_lsn(mp->m_ail,
1942 &iip->ili_flush_lsn,
1943 &iip->ili_item.li_lsn);
1944 xfs_iflags_set(iip->ili_inode, XFS_ISTALE);
1945 }
1946 lip = lip->li_bio_list;
1947 }
1948
1949
1950 /*
1951 * For each inode in memory attempt to add it to the inode
1952 * buffer and set it up for being staled on buffer IO
1953 * completion. This is safe as we've locked out tail pushing
1954 * and flushing by locking the buffer.
1955 *
1956 * We have already marked every inode that was part of a
1957 * transaction stale above, which means there is no point in
1958 * even trying to lock them.
1959 */
1960 for (i = 0; i < ninodes; i++) {
1961 retry:
1962 rcu_read_lock();
1963 ip = radix_tree_lookup(&pag->pag_ici_root,
1964 XFS_INO_TO_AGINO(mp, (inum + i)));
1965
1966 /* Inode not in memory, nothing to do */
1967 if (!ip) {
1968 rcu_read_unlock();
1969 continue;
1970 }
1971
1972 /*
1973 * because this is an RCU protected lookup, we could
1974 * find a recently freed or even reallocated inode
1975 * during the lookup. We need to check under the
1976 * i_flags_lock for a valid inode here. Skip it if it
1977 * is not valid, the wrong inode or stale.
1978 */
1979 spin_lock(&ip->i_flags_lock);
1980 if (ip->i_ino != inum + i ||
1981 __xfs_iflags_test(ip, XFS_ISTALE)) {
1982 spin_unlock(&ip->i_flags_lock);
1983 rcu_read_unlock();
1984 continue;
1985 }
1986 spin_unlock(&ip->i_flags_lock);
1987
1988 /*
1989 * Don't try to lock/unlock the current inode, but we
1990 * _cannot_ skip the other inodes that we did not find
1991 * in the list attached to the buffer and are not
1992 * already marked stale. If we can't lock it, back off
1993 * and retry.
1994 */
1995 if (ip != free_ip &&
1996 !xfs_ilock_nowait(ip, XFS_ILOCK_EXCL)) {
1997 rcu_read_unlock();
1998 delay(1);
1999 goto retry;
2000 }
2001 rcu_read_unlock();
2002
2003 xfs_iflock(ip);
2004 xfs_iflags_set(ip, XFS_ISTALE);
2005
2006 /*
2007 * we don't need to attach clean inodes or those only
2008 * with unlogged changes (which we throw away, anyway).
2009 */
2010 iip = ip->i_itemp;
2011 if (!iip || xfs_inode_clean(ip)) {
2012 ASSERT(ip != free_ip);
2013 xfs_ifunlock(ip);
2014 xfs_iunlock(ip, XFS_ILOCK_EXCL);
2015 continue;
2016 }
2017
2018 iip->ili_last_fields = iip->ili_fields;
2019 iip->ili_fields = 0;
2020 iip->ili_logged = 1;
2021 xfs_trans_ail_copy_lsn(mp->m_ail, &iip->ili_flush_lsn,
2022 &iip->ili_item.li_lsn);
2023
2024 xfs_buf_attach_iodone(bp, xfs_istale_done,
2025 &iip->ili_item);
2026
2027 if (ip != free_ip)
2028 xfs_iunlock(ip, XFS_ILOCK_EXCL);
2029 }
2030
2031 xfs_trans_stale_inode_buf(tp, bp);
2032 xfs_trans_binval(tp, bp);
2033 }
2034
2035 xfs_perag_put(pag);
2036 return 0;
2037 }
2038
2039 /*
2040 * This is called to return an inode to the inode free list.
2041 * The inode should already be truncated to 0 length and have
2042 * no pages associated with it. This routine also assumes that
2043 * the inode is already a part of the transaction.
2044 *
2045 * The on-disk copy of the inode will have been added to the list
2046 * of unlinked inodes in the AGI. We need to remove the inode from
2047 * that list atomically with respect to freeing it here.
2048 */
2049 int
2050 xfs_ifree(
2051 xfs_trans_t *tp,
2052 xfs_inode_t *ip,
2053 xfs_bmap_free_t *flist)
2054 {
2055 int error;
2056 int delete;
2057 xfs_ino_t first_ino;
2058
2059 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
2060 ASSERT(ip->i_d.di_nlink == 0);
2061 ASSERT(ip->i_d.di_nextents == 0);
2062 ASSERT(ip->i_d.di_anextents == 0);
2063 ASSERT(ip->i_d.di_size == 0 || !S_ISREG(ip->i_d.di_mode));
2064 ASSERT(ip->i_d.di_nblocks == 0);
2065
2066 /*
2067 * Pull the on-disk inode from the AGI unlinked list.
2068 */
2069 error = xfs_iunlink_remove(tp, ip);
2070 if (error)
2071 return error;
2072
2073 error = xfs_difree(tp, ip->i_ino, flist, &delete, &first_ino);
2074 if (error)
2075 return error;
2076
2077 ip->i_d.di_mode = 0; /* mark incore inode as free */
2078 ip->i_d.di_flags = 0;
2079 ip->i_d.di_dmevmask = 0;
2080 ip->i_d.di_forkoff = 0; /* mark the attr fork not in use */
2081 ip->i_d.di_format = XFS_DINODE_FMT_EXTENTS;
2082 ip->i_d.di_aformat = XFS_DINODE_FMT_EXTENTS;
2083 /*
2084 * Bump the generation count so no one will be confused
2085 * by reincarnations of this inode.
2086 */
2087 ip->i_d.di_gen++;
2088 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
2089
2090 if (delete)
2091 error = xfs_ifree_cluster(ip, tp, first_ino);
2092
2093 return error;
2094 }
2095
2096 /*
2097 * Reallocate the space for if_broot based on the number of records
2098 * being added or deleted as indicated in rec_diff. Move the records
2099 * and pointers in if_broot to fit the new size. When shrinking this
2100 * will eliminate holes between the records and pointers created by
2101 * the caller. When growing this will create holes to be filled in
2102 * by the caller.
2103 *
2104 * The caller must not request to add more records than would fit in
2105 * the on-disk inode root. If the if_broot is currently NULL, then
2106 * if we adding records one will be allocated. The caller must also
2107 * not request that the number of records go below zero, although
2108 * it can go to zero.
2109 *
2110 * ip -- the inode whose if_broot area is changing
2111 * ext_diff -- the change in the number of records, positive or negative,
2112 * requested for the if_broot array.
2113 */
2114 void
2115 xfs_iroot_realloc(
2116 xfs_inode_t *ip,
2117 int rec_diff,
2118 int whichfork)
2119 {
2120 struct xfs_mount *mp = ip->i_mount;
2121 int cur_max;
2122 xfs_ifork_t *ifp;
2123 struct xfs_btree_block *new_broot;
2124 int new_max;
2125 size_t new_size;
2126 char *np;
2127 char *op;
2128
2129 /*
2130 * Handle the degenerate case quietly.
2131 */
2132 if (rec_diff == 0) {
2133 return;
2134 }
2135
2136 ifp = XFS_IFORK_PTR(ip, whichfork);
2137 if (rec_diff > 0) {
2138 /*
2139 * If there wasn't any memory allocated before, just
2140 * allocate it now and get out.
2141 */
2142 if (ifp->if_broot_bytes == 0) {
2143 new_size = XFS_BMAP_BROOT_SPACE_CALC(mp, rec_diff);
2144 ifp->if_broot = kmem_alloc(new_size, KM_SLEEP | KM_NOFS);
2145 ifp->if_broot_bytes = (int)new_size;
2146 return;
2147 }
2148
2149 /*
2150 * If there is already an existing if_broot, then we need
2151 * to realloc() it and shift the pointers to their new
2152 * location. The records don't change location because
2153 * they are kept butted up against the btree block header.
2154 */
2155 cur_max = xfs_bmbt_maxrecs(mp, ifp->if_broot_bytes, 0);
2156 new_max = cur_max + rec_diff;
2157 new_size = XFS_BMAP_BROOT_SPACE_CALC(mp, new_max);
2158 ifp->if_broot = kmem_realloc(ifp->if_broot, new_size,
2159 XFS_BMAP_BROOT_SPACE_CALC(mp, cur_max),
2160 KM_SLEEP | KM_NOFS);
2161 op = (char *)XFS_BMAP_BROOT_PTR_ADDR(mp, ifp->if_broot, 1,
2162 ifp->if_broot_bytes);
2163 np = (char *)XFS_BMAP_BROOT_PTR_ADDR(mp, ifp->if_broot, 1,
2164 (int)new_size);
2165 ifp->if_broot_bytes = (int)new_size;
2166 ASSERT(XFS_BMAP_BMDR_SPACE(ifp->if_broot) <=
2167 XFS_IFORK_SIZE(ip, whichfork));
2168 memmove(np, op, cur_max * (uint)sizeof(xfs_dfsbno_t));
2169 return;
2170 }
2171
2172 /*
2173 * rec_diff is less than 0. In this case, we are shrinking the
2174 * if_broot buffer. It must already exist. If we go to zero
2175 * records, just get rid of the root and clear the status bit.
2176 */
2177 ASSERT((ifp->if_broot != NULL) && (ifp->if_broot_bytes > 0));
2178 cur_max = xfs_bmbt_maxrecs(mp, ifp->if_broot_bytes, 0);
2179 new_max = cur_max + rec_diff;
2180 ASSERT(new_max >= 0);
2181 if (new_max > 0)
2182 new_size = XFS_BMAP_BROOT_SPACE_CALC(mp, new_max);
2183 else
2184 new_size = 0;
2185 if (new_size > 0) {
2186 new_broot = kmem_alloc(new_size, KM_SLEEP | KM_NOFS);
2187 /*
2188 * First copy over the btree block header.
2189 */
2190 memcpy(new_broot, ifp->if_broot,
2191 XFS_BMBT_BLOCK_LEN(ip->i_mount));
2192 } else {
2193 new_broot = NULL;
2194 ifp->if_flags &= ~XFS_IFBROOT;
2195 }
2196
2197 /*
2198 * Only copy the records and pointers if there are any.
2199 */
2200 if (new_max > 0) {
2201 /*
2202 * First copy the records.
2203 */
2204 op = (char *)XFS_BMBT_REC_ADDR(mp, ifp->if_broot, 1);
2205 np = (char *)XFS_BMBT_REC_ADDR(mp, new_broot, 1);
2206 memcpy(np, op, new_max * (uint)sizeof(xfs_bmbt_rec_t));
2207
2208 /*
2209 * Then copy the pointers.
2210 */
2211 op = (char *)XFS_BMAP_BROOT_PTR_ADDR(mp, ifp->if_broot, 1,
2212 ifp->if_broot_bytes);
2213 np = (char *)XFS_BMAP_BROOT_PTR_ADDR(mp, new_broot, 1,
2214 (int)new_size);
2215 memcpy(np, op, new_max * (uint)sizeof(xfs_dfsbno_t));
2216 }
2217 kmem_free(ifp->if_broot);
2218 ifp->if_broot = new_broot;
2219 ifp->if_broot_bytes = (int)new_size;
2220 if (ifp->if_broot)
2221 ASSERT(XFS_BMAP_BMDR_SPACE(ifp->if_broot) <=
2222 XFS_IFORK_SIZE(ip, whichfork));
2223 return;
2224 }
2225
2226
2227 /*
2228 * This is called when the amount of space needed for if_data
2229 * is increased or decreased. The change in size is indicated by
2230 * the number of bytes that need to be added or deleted in the
2231 * byte_diff parameter.
2232 *
2233 * If the amount of space needed has decreased below the size of the
2234 * inline buffer, then switch to using the inline buffer. Otherwise,
2235 * use kmem_realloc() or kmem_alloc() to adjust the size of the buffer
2236 * to what is needed.
2237 *
2238 * ip -- the inode whose if_data area is changing
2239 * byte_diff -- the change in the number of bytes, positive or negative,
2240 * requested for the if_data array.
2241 */
2242 void
2243 xfs_idata_realloc(
2244 xfs_inode_t *ip,
2245 int byte_diff,
2246 int whichfork)
2247 {
2248 xfs_ifork_t *ifp;
2249 int new_size;
2250 int real_size;
2251
2252 if (byte_diff == 0) {
2253 return;
2254 }
2255
2256 ifp = XFS_IFORK_PTR(ip, whichfork);
2257 new_size = (int)ifp->if_bytes + byte_diff;
2258 ASSERT(new_size >= 0);
2259
2260 if (new_size == 0) {
2261 if (ifp->if_u1.if_data != ifp->if_u2.if_inline_data) {
2262 kmem_free(ifp->if_u1.if_data);
2263 }
2264 ifp->if_u1.if_data = NULL;
2265 real_size = 0;
2266 } else if (new_size <= sizeof(ifp->if_u2.if_inline_data)) {
2267 /*
2268 * If the valid extents/data can fit in if_inline_ext/data,
2269 * copy them from the malloc'd vector and free it.
2270 */
2271 if (ifp->if_u1.if_data == NULL) {
2272 ifp->if_u1.if_data = ifp->if_u2.if_inline_data;
2273 } else if (ifp->if_u1.if_data != ifp->if_u2.if_inline_data) {
2274 ASSERT(ifp->if_real_bytes != 0);
2275 memcpy(ifp->if_u2.if_inline_data, ifp->if_u1.if_data,
2276 new_size);
2277 kmem_free(ifp->if_u1.if_data);
2278 ifp->if_u1.if_data = ifp->if_u2.if_inline_data;
2279 }
2280 real_size = 0;
2281 } else {
2282 /*
2283 * Stuck with malloc/realloc.
2284 * For inline data, the underlying buffer must be
2285 * a multiple of 4 bytes in size so that it can be
2286 * logged and stay on word boundaries. We enforce
2287 * that here.
2288 */
2289 real_size = roundup(new_size, 4);
2290 if (ifp->if_u1.if_data == NULL) {
2291 ASSERT(ifp->if_real_bytes == 0);
2292 ifp->if_u1.if_data = kmem_alloc(real_size,
2293 KM_SLEEP | KM_NOFS);
2294 } else if (ifp->if_u1.if_data != ifp->if_u2.if_inline_data) {
2295 /*
2296 * Only do the realloc if the underlying size
2297 * is really changing.
2298 */
2299 if (ifp->if_real_bytes != real_size) {
2300 ifp->if_u1.if_data =
2301 kmem_realloc(ifp->if_u1.if_data,
2302 real_size,
2303 ifp->if_real_bytes,
2304 KM_SLEEP | KM_NOFS);
2305 }
2306 } else {
2307 ASSERT(ifp->if_real_bytes == 0);
2308 ifp->if_u1.if_data = kmem_alloc(real_size,
2309 KM_SLEEP | KM_NOFS);
2310 memcpy(ifp->if_u1.if_data, ifp->if_u2.if_inline_data,
2311 ifp->if_bytes);
2312 }
2313 }
2314 ifp->if_real_bytes = real_size;
2315 ifp->if_bytes = new_size;
2316 ASSERT(ifp->if_bytes <= XFS_IFORK_SIZE(ip, whichfork));
2317 }
2318
2319 void
2320 xfs_idestroy_fork(
2321 xfs_inode_t *ip,
2322 int whichfork)
2323 {
2324 xfs_ifork_t *ifp;
2325
2326 ifp = XFS_IFORK_PTR(ip, whichfork);
2327 if (ifp->if_broot != NULL) {
2328 kmem_free(ifp->if_broot);
2329 ifp->if_broot = NULL;
2330 }
2331
2332 /*
2333 * If the format is local, then we can't have an extents
2334 * array so just look for an inline data array. If we're
2335 * not local then we may or may not have an extents list,
2336 * so check and free it up if we do.
2337 */
2338 if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL) {
2339 if ((ifp->if_u1.if_data != ifp->if_u2.if_inline_data) &&
2340 (ifp->if_u1.if_data != NULL)) {
2341 ASSERT(ifp->if_real_bytes != 0);
2342 kmem_free(ifp->if_u1.if_data);
2343 ifp->if_u1.if_data = NULL;
2344 ifp->if_real_bytes = 0;
2345 }
2346 } else if ((ifp->if_flags & XFS_IFEXTENTS) &&
2347 ((ifp->if_flags & XFS_IFEXTIREC) ||
2348 ((ifp->if_u1.if_extents != NULL) &&
2349 (ifp->if_u1.if_extents != ifp->if_u2.if_inline_ext)))) {
2350 ASSERT(ifp->if_real_bytes != 0);
2351 xfs_iext_destroy(ifp);
2352 }
2353 ASSERT(ifp->if_u1.if_extents == NULL ||
2354 ifp->if_u1.if_extents == ifp->if_u2.if_inline_ext);
2355 ASSERT(ifp->if_real_bytes == 0);
2356 if (whichfork == XFS_ATTR_FORK) {
2357 kmem_zone_free(xfs_ifork_zone, ip->i_afp);
2358 ip->i_afp = NULL;
2359 }
2360 }
2361
2362 /*
2363 * This is called to unpin an inode. The caller must have the inode locked
2364 * in at least shared mode so that the buffer cannot be subsequently pinned
2365 * once someone is waiting for it to be unpinned.
2366 */
2367 static void
2368 xfs_iunpin(
2369 struct xfs_inode *ip)
2370 {
2371 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
2372
2373 trace_xfs_inode_unpin_nowait(ip, _RET_IP_);
2374
2375 /* Give the log a push to start the unpinning I/O */
2376 xfs_log_force_lsn(ip->i_mount, ip->i_itemp->ili_last_lsn, 0);
2377
2378 }
2379
2380 static void
2381 __xfs_iunpin_wait(
2382 struct xfs_inode *ip)
2383 {
2384 wait_queue_head_t *wq = bit_waitqueue(&ip->i_flags, __XFS_IPINNED_BIT);
2385 DEFINE_WAIT_BIT(wait, &ip->i_flags, __XFS_IPINNED_BIT);
2386
2387 xfs_iunpin(ip);
2388
2389 do {
2390 prepare_to_wait(wq, &wait.wait, TASK_UNINTERRUPTIBLE);
2391 if (xfs_ipincount(ip))
2392 io_schedule();
2393 } while (xfs_ipincount(ip));
2394 finish_wait(wq, &wait.wait);
2395 }
2396
2397 void
2398 xfs_iunpin_wait(
2399 struct xfs_inode *ip)
2400 {
2401 if (xfs_ipincount(ip))
2402 __xfs_iunpin_wait(ip);
2403 }
2404
2405 /*
2406 * xfs_iextents_copy()
2407 *
2408 * This is called to copy the REAL extents (as opposed to the delayed
2409 * allocation extents) from the inode into the given buffer. It
2410 * returns the number of bytes copied into the buffer.
2411 *
2412 * If there are no delayed allocation extents, then we can just
2413 * memcpy() the extents into the buffer. Otherwise, we need to
2414 * examine each extent in turn and skip those which are delayed.
2415 */
2416 int
2417 xfs_iextents_copy(
2418 xfs_inode_t *ip,
2419 xfs_bmbt_rec_t *dp,
2420 int whichfork)
2421 {
2422 int copied;
2423 int i;
2424 xfs_ifork_t *ifp;
2425 int nrecs;
2426 xfs_fsblock_t start_block;
2427
2428 ifp = XFS_IFORK_PTR(ip, whichfork);
2429 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
2430 ASSERT(ifp->if_bytes > 0);
2431
2432 nrecs = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
2433 XFS_BMAP_TRACE_EXLIST(ip, nrecs, whichfork);
2434 ASSERT(nrecs > 0);
2435
2436 /*
2437 * There are some delayed allocation extents in the
2438 * inode, so copy the extents one at a time and skip
2439 * the delayed ones. There must be at least one
2440 * non-delayed extent.
2441 */
2442 copied = 0;
2443 for (i = 0; i < nrecs; i++) {
2444 xfs_bmbt_rec_host_t *ep = xfs_iext_get_ext(ifp, i);
2445 start_block = xfs_bmbt_get_startblock(ep);
2446 if (isnullstartblock(start_block)) {
2447 /*
2448 * It's a delayed allocation extent, so skip it.
2449 */
2450 continue;
2451 }
2452
2453 /* Translate to on disk format */
2454 put_unaligned(cpu_to_be64(ep->l0), &dp->l0);
2455 put_unaligned(cpu_to_be64(ep->l1), &dp->l1);
2456 dp++;
2457 copied++;
2458 }
2459 ASSERT(copied != 0);
2460 xfs_validate_extents(ifp, copied, XFS_EXTFMT_INODE(ip));
2461
2462 return (copied * (uint)sizeof(xfs_bmbt_rec_t));
2463 }
2464
2465 /*
2466 * Each of the following cases stores data into the same region
2467 * of the on-disk inode, so only one of them can be valid at
2468 * any given time. While it is possible to have conflicting formats
2469 * and log flags, e.g. having XFS_ILOG_?DATA set when the fork is
2470 * in EXTENTS format, this can only happen when the fork has
2471 * changed formats after being modified but before being flushed.
2472 * In these cases, the format always takes precedence, because the
2473 * format indicates the current state of the fork.
2474 */
2475 /*ARGSUSED*/
2476 STATIC void
2477 xfs_iflush_fork(
2478 xfs_inode_t *ip,
2479 xfs_dinode_t *dip,
2480 xfs_inode_log_item_t *iip,
2481 int whichfork,
2482 xfs_buf_t *bp)
2483 {
2484 char *cp;
2485 xfs_ifork_t *ifp;
2486 xfs_mount_t *mp;
2487 static const short brootflag[2] =
2488 { XFS_ILOG_DBROOT, XFS_ILOG_ABROOT };
2489 static const short dataflag[2] =
2490 { XFS_ILOG_DDATA, XFS_ILOG_ADATA };
2491 static const short extflag[2] =
2492 { XFS_ILOG_DEXT, XFS_ILOG_AEXT };
2493
2494 if (!iip)
2495 return;
2496 ifp = XFS_IFORK_PTR(ip, whichfork);
2497 /*
2498 * This can happen if we gave up in iformat in an error path,
2499 * for the attribute fork.
2500 */
2501 if (!ifp) {
2502 ASSERT(whichfork == XFS_ATTR_FORK);
2503 return;
2504 }
2505 cp = XFS_DFORK_PTR(dip, whichfork);
2506 mp = ip->i_mount;
2507 switch (XFS_IFORK_FORMAT(ip, whichfork)) {
2508 case XFS_DINODE_FMT_LOCAL:
2509 if ((iip->ili_fields & dataflag[whichfork]) &&
2510 (ifp->if_bytes > 0)) {
2511 ASSERT(ifp->if_u1.if_data != NULL);
2512 ASSERT(ifp->if_bytes <= XFS_IFORK_SIZE(ip, whichfork));
2513 memcpy(cp, ifp->if_u1.if_data, ifp->if_bytes);
2514 }
2515 break;
2516
2517 case XFS_DINODE_FMT_EXTENTS:
2518 ASSERT((ifp->if_flags & XFS_IFEXTENTS) ||
2519 !(iip->ili_fields & extflag[whichfork]));
2520 if ((iip->ili_fields & extflag[whichfork]) &&
2521 (ifp->if_bytes > 0)) {
2522 ASSERT(xfs_iext_get_ext(ifp, 0));
2523 ASSERT(XFS_IFORK_NEXTENTS(ip, whichfork) > 0);
2524 (void)xfs_iextents_copy(ip, (xfs_bmbt_rec_t *)cp,
2525 whichfork);
2526 }
2527 break;
2528
2529 case XFS_DINODE_FMT_BTREE:
2530 if ((iip->ili_fields & brootflag[whichfork]) &&
2531 (ifp->if_broot_bytes > 0)) {
2532 ASSERT(ifp->if_broot != NULL);
2533 ASSERT(XFS_BMAP_BMDR_SPACE(ifp->if_broot) <=
2534 XFS_IFORK_SIZE(ip, whichfork));
2535 xfs_bmbt_to_bmdr(mp, ifp->if_broot, ifp->if_broot_bytes,
2536 (xfs_bmdr_block_t *)cp,
2537 XFS_DFORK_SIZE(dip, mp, whichfork));
2538 }
2539 break;
2540
2541 case XFS_DINODE_FMT_DEV:
2542 if (iip->ili_fields & XFS_ILOG_DEV) {
2543 ASSERT(whichfork == XFS_DATA_FORK);
2544 xfs_dinode_put_rdev(dip, ip->i_df.if_u2.if_rdev);
2545 }
2546 break;
2547
2548 case XFS_DINODE_FMT_UUID:
2549 if (iip->ili_fields & XFS_ILOG_UUID) {
2550 ASSERT(whichfork == XFS_DATA_FORK);
2551 memcpy(XFS_DFORK_DPTR(dip),
2552 &ip->i_df.if_u2.if_uuid,
2553 sizeof(uuid_t));
2554 }
2555 break;
2556
2557 default:
2558 ASSERT(0);
2559 break;
2560 }
2561 }
2562
2563 STATIC int
2564 xfs_iflush_cluster(
2565 xfs_inode_t *ip,
2566 xfs_buf_t *bp)
2567 {
2568 xfs_mount_t *mp = ip->i_mount;
2569 struct xfs_perag *pag;
2570 unsigned long first_index, mask;
2571 unsigned long inodes_per_cluster;
2572 int ilist_size;
2573 xfs_inode_t **ilist;
2574 xfs_inode_t *iq;
2575 int nr_found;
2576 int clcount = 0;
2577 int bufwasdelwri;
2578 int i;
2579
2580 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
2581
2582 inodes_per_cluster = XFS_INODE_CLUSTER_SIZE(mp) >> mp->m_sb.sb_inodelog;
2583 ilist_size = inodes_per_cluster * sizeof(xfs_inode_t *);
2584 ilist = kmem_alloc(ilist_size, KM_MAYFAIL|KM_NOFS);
2585 if (!ilist)
2586 goto out_put;
2587
2588 mask = ~(((XFS_INODE_CLUSTER_SIZE(mp) >> mp->m_sb.sb_inodelog)) - 1);
2589 first_index = XFS_INO_TO_AGINO(mp, ip->i_ino) & mask;
2590 rcu_read_lock();
2591 /* really need a gang lookup range call here */
2592 nr_found = radix_tree_gang_lookup(&pag->pag_ici_root, (void**)ilist,
2593 first_index, inodes_per_cluster);
2594 if (nr_found == 0)
2595 goto out_free;
2596
2597 for (i = 0; i < nr_found; i++) {
2598 iq = ilist[i];
2599 if (iq == ip)
2600 continue;
2601
2602 /*
2603 * because this is an RCU protected lookup, we could find a
2604 * recently freed or even reallocated inode during the lookup.
2605 * We need to check under the i_flags_lock for a valid inode
2606 * here. Skip it if it is not valid or the wrong inode.
2607 */
2608 spin_lock(&ip->i_flags_lock);
2609 if (!ip->i_ino ||
2610 (XFS_INO_TO_AGINO(mp, iq->i_ino) & mask) != first_index) {
2611 spin_unlock(&ip->i_flags_lock);
2612 continue;
2613 }
2614 spin_unlock(&ip->i_flags_lock);
2615
2616 /*
2617 * Do an un-protected check to see if the inode is dirty and
2618 * is a candidate for flushing. These checks will be repeated
2619 * later after the appropriate locks are acquired.
2620 */
2621 if (xfs_inode_clean(iq) && xfs_ipincount(iq) == 0)
2622 continue;
2623
2624 /*
2625 * Try to get locks. If any are unavailable or it is pinned,
2626 * then this inode cannot be flushed and is skipped.
2627 */
2628
2629 if (!xfs_ilock_nowait(iq, XFS_ILOCK_SHARED))
2630 continue;
2631 if (!xfs_iflock_nowait(iq)) {
2632 xfs_iunlock(iq, XFS_ILOCK_SHARED);
2633 continue;
2634 }
2635 if (xfs_ipincount(iq)) {
2636 xfs_ifunlock(iq);
2637 xfs_iunlock(iq, XFS_ILOCK_SHARED);
2638 continue;
2639 }
2640
2641 /*
2642 * arriving here means that this inode can be flushed. First
2643 * re-check that it's dirty before flushing.
2644 */
2645 if (!xfs_inode_clean(iq)) {
2646 int error;
2647 error = xfs_iflush_int(iq, bp);
2648 if (error) {
2649 xfs_iunlock(iq, XFS_ILOCK_SHARED);
2650 goto cluster_corrupt_out;
2651 }
2652 clcount++;
2653 } else {
2654 xfs_ifunlock(iq);
2655 }
2656 xfs_iunlock(iq, XFS_ILOCK_SHARED);
2657 }
2658
2659 if (clcount) {
2660 XFS_STATS_INC(xs_icluster_flushcnt);
2661 XFS_STATS_ADD(xs_icluster_flushinode, clcount);
2662 }
2663
2664 out_free:
2665 rcu_read_unlock();
2666 kmem_free(ilist);
2667 out_put:
2668 xfs_perag_put(pag);
2669 return 0;
2670
2671
2672 cluster_corrupt_out:
2673 /*
2674 * Corruption detected in the clustering loop. Invalidate the
2675 * inode buffer and shut down the filesystem.
2676 */
2677 rcu_read_unlock();
2678 /*
2679 * Clean up the buffer. If it was delwri, just release it --
2680 * brelse can handle it with no problems. If not, shut down the
2681 * filesystem before releasing the buffer.
2682 */
2683 bufwasdelwri = (bp->b_flags & _XBF_DELWRI_Q);
2684 if (bufwasdelwri)
2685 xfs_buf_relse(bp);
2686
2687 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
2688
2689 if (!bufwasdelwri) {
2690 /*
2691 * Just like incore_relse: if we have b_iodone functions,
2692 * mark the buffer as an error and call them. Otherwise
2693 * mark it as stale and brelse.
2694 */
2695 if (bp->b_iodone) {
2696 XFS_BUF_UNDONE(bp);
2697 xfs_buf_stale(bp);
2698 xfs_buf_ioerror(bp, EIO);
2699 xfs_buf_ioend(bp, 0);
2700 } else {
2701 xfs_buf_stale(bp);
2702 xfs_buf_relse(bp);
2703 }
2704 }
2705
2706 /*
2707 * Unlocks the flush lock
2708 */
2709 xfs_iflush_abort(iq, false);
2710 kmem_free(ilist);
2711 xfs_perag_put(pag);
2712 return XFS_ERROR(EFSCORRUPTED);
2713 }
2714
2715 /*
2716 * Flush dirty inode metadata into the backing buffer.
2717 *
2718 * The caller must have the inode lock and the inode flush lock held. The
2719 * inode lock will still be held upon return to the caller, and the inode
2720 * flush lock will be released after the inode has reached the disk.
2721 *
2722 * The caller must write out the buffer returned in *bpp and release it.
2723 */
2724 int
2725 xfs_iflush(
2726 struct xfs_inode *ip,
2727 struct xfs_buf **bpp)
2728 {
2729 struct xfs_mount *mp = ip->i_mount;
2730 struct xfs_buf *bp;
2731 struct xfs_dinode *dip;
2732 int error;
2733
2734 XFS_STATS_INC(xs_iflush_count);
2735
2736 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
2737 ASSERT(xfs_isiflocked(ip));
2738 ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE ||
2739 ip->i_d.di_nextents > XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK));
2740
2741 *bpp = NULL;
2742
2743 xfs_iunpin_wait(ip);
2744
2745 /*
2746 * For stale inodes we cannot rely on the backing buffer remaining
2747 * stale in cache for the remaining life of the stale inode and so
2748 * xfs_imap_to_bp() below may give us a buffer that no longer contains
2749 * inodes below. We have to check this after ensuring the inode is
2750 * unpinned so that it is safe to reclaim the stale inode after the
2751 * flush call.
2752 */
2753 if (xfs_iflags_test(ip, XFS_ISTALE)) {
2754 xfs_ifunlock(ip);
2755 return 0;
2756 }
2757
2758 /*
2759 * This may have been unpinned because the filesystem is shutting
2760 * down forcibly. If that's the case we must not write this inode
2761 * to disk, because the log record didn't make it to disk.
2762 *
2763 * We also have to remove the log item from the AIL in this case,
2764 * as we wait for an empty AIL as part of the unmount process.
2765 */
2766 if (XFS_FORCED_SHUTDOWN(mp)) {
2767 error = XFS_ERROR(EIO);
2768 goto abort_out;
2769 }
2770
2771 /*
2772 * Get the buffer containing the on-disk inode.
2773 */
2774 error = xfs_imap_to_bp(mp, NULL, &ip->i_imap, &dip, &bp, XBF_TRYLOCK,
2775 0);
2776 if (error || !bp) {
2777 xfs_ifunlock(ip);
2778 return error;
2779 }
2780
2781 /*
2782 * First flush out the inode that xfs_iflush was called with.
2783 */
2784 error = xfs_iflush_int(ip, bp);
2785 if (error)
2786 goto corrupt_out;
2787
2788 /*
2789 * If the buffer is pinned then push on the log now so we won't
2790 * get stuck waiting in the write for too long.
2791 */
2792 if (xfs_buf_ispinned(bp))
2793 xfs_log_force(mp, 0);
2794
2795 /*
2796 * inode clustering:
2797 * see if other inodes can be gathered into this write
2798 */
2799 error = xfs_iflush_cluster(ip, bp);
2800 if (error)
2801 goto cluster_corrupt_out;
2802
2803 *bpp = bp;
2804 return 0;
2805
2806 corrupt_out:
2807 xfs_buf_relse(bp);
2808 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
2809 cluster_corrupt_out:
2810 error = XFS_ERROR(EFSCORRUPTED);
2811 abort_out:
2812 /*
2813 * Unlocks the flush lock
2814 */
2815 xfs_iflush_abort(ip, false);
2816 return error;
2817 }
2818
2819
2820 STATIC int
2821 xfs_iflush_int(
2822 struct xfs_inode *ip,
2823 struct xfs_buf *bp)
2824 {
2825 struct xfs_inode_log_item *iip = ip->i_itemp;
2826 struct xfs_dinode *dip;
2827 struct xfs_mount *mp = ip->i_mount;
2828
2829 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
2830 ASSERT(xfs_isiflocked(ip));
2831 ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE ||
2832 ip->i_d.di_nextents > XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK));
2833 ASSERT(iip != NULL && iip->ili_fields != 0);
2834
2835 /* set *dip = inode's place in the buffer */
2836 dip = (xfs_dinode_t *)xfs_buf_offset(bp, ip->i_imap.im_boffset);
2837
2838 if (XFS_TEST_ERROR(dip->di_magic != cpu_to_be16(XFS_DINODE_MAGIC),
2839 mp, XFS_ERRTAG_IFLUSH_1, XFS_RANDOM_IFLUSH_1)) {
2840 xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
2841 "%s: Bad inode %Lu magic number 0x%x, ptr 0x%p",
2842 __func__, ip->i_ino, be16_to_cpu(dip->di_magic), dip);
2843 goto corrupt_out;
2844 }
2845 if (XFS_TEST_ERROR(ip->i_d.di_magic != XFS_DINODE_MAGIC,
2846 mp, XFS_ERRTAG_IFLUSH_2, XFS_RANDOM_IFLUSH_2)) {
2847 xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
2848 "%s: Bad inode %Lu, ptr 0x%p, magic number 0x%x",
2849 __func__, ip->i_ino, ip, ip->i_d.di_magic);
2850 goto corrupt_out;
2851 }
2852 if (S_ISREG(ip->i_d.di_mode)) {
2853 if (XFS_TEST_ERROR(
2854 (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS) &&
2855 (ip->i_d.di_format != XFS_DINODE_FMT_BTREE),
2856 mp, XFS_ERRTAG_IFLUSH_3, XFS_RANDOM_IFLUSH_3)) {
2857 xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
2858 "%s: Bad regular inode %Lu, ptr 0x%p",
2859 __func__, ip->i_ino, ip);
2860 goto corrupt_out;
2861 }
2862 } else if (S_ISDIR(ip->i_d.di_mode)) {
2863 if (XFS_TEST_ERROR(
2864 (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS) &&
2865 (ip->i_d.di_format != XFS_DINODE_FMT_BTREE) &&
2866 (ip->i_d.di_format != XFS_DINODE_FMT_LOCAL),
2867 mp, XFS_ERRTAG_IFLUSH_4, XFS_RANDOM_IFLUSH_4)) {
2868 xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
2869 "%s: Bad directory inode %Lu, ptr 0x%p",
2870 __func__, ip->i_ino, ip);
2871 goto corrupt_out;
2872 }
2873 }
2874 if (XFS_TEST_ERROR(ip->i_d.di_nextents + ip->i_d.di_anextents >
2875 ip->i_d.di_nblocks, mp, XFS_ERRTAG_IFLUSH_5,
2876 XFS_RANDOM_IFLUSH_5)) {
2877 xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
2878 "%s: detected corrupt incore inode %Lu, "
2879 "total extents = %d, nblocks = %Ld, ptr 0x%p",
2880 __func__, ip->i_ino,
2881 ip->i_d.di_nextents + ip->i_d.di_anextents,
2882 ip->i_d.di_nblocks, ip);
2883 goto corrupt_out;
2884 }
2885 if (XFS_TEST_ERROR(ip->i_d.di_forkoff > mp->m_sb.sb_inodesize,
2886 mp, XFS_ERRTAG_IFLUSH_6, XFS_RANDOM_IFLUSH_6)) {
2887 xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
2888 "%s: bad inode %Lu, forkoff 0x%x, ptr 0x%p",
2889 __func__, ip->i_ino, ip->i_d.di_forkoff, ip);
2890 goto corrupt_out;
2891 }
2892
2893 /*
2894 * Inode item log recovery for v1/v2 inodes are dependent on the
2895 * di_flushiter count for correct sequencing. We bump the flush
2896 * iteration count so we can detect flushes which postdate a log record
2897 * during recovery. This is redundant as we now log every change and
2898 * hence this can't happen but we need to still do it to ensure
2899 * backwards compatibility with old kernels that predate logging all
2900 * inode changes.
2901 */
2902 if (ip->i_d.di_version < 3)
2903 ip->i_d.di_flushiter++;
2904
2905 /*
2906 * Copy the dirty parts of the inode into the on-disk
2907 * inode. We always copy out the core of the inode,
2908 * because if the inode is dirty at all the core must
2909 * be.
2910 */
2911 xfs_dinode_to_disk(dip, &ip->i_d);
2912
2913 /* Wrap, we never let the log put out DI_MAX_FLUSH */
2914 if (ip->i_d.di_flushiter == DI_MAX_FLUSH)
2915 ip->i_d.di_flushiter = 0;
2916
2917 /*
2918 * If this is really an old format inode and the superblock version
2919 * has not been updated to support only new format inodes, then
2920 * convert back to the old inode format. If the superblock version
2921 * has been updated, then make the conversion permanent.
2922 */
2923 ASSERT(ip->i_d.di_version == 1 || xfs_sb_version_hasnlink(&mp->m_sb));
2924 if (ip->i_d.di_version == 1) {
2925 if (!xfs_sb_version_hasnlink(&mp->m_sb)) {
2926 /*
2927 * Convert it back.
2928 */
2929 ASSERT(ip->i_d.di_nlink <= XFS_MAXLINK_1);
2930 dip->di_onlink = cpu_to_be16(ip->i_d.di_nlink);
2931 } else {
2932 /*
2933 * The superblock version has already been bumped,
2934 * so just make the conversion to the new inode
2935 * format permanent.
2936 */
2937 ip->i_d.di_version = 2;
2938 dip->di_version = 2;
2939 ip->i_d.di_onlink = 0;
2940 dip->di_onlink = 0;
2941 memset(&(ip->i_d.di_pad[0]), 0, sizeof(ip->i_d.di_pad));
2942 memset(&(dip->di_pad[0]), 0,
2943 sizeof(dip->di_pad));
2944 ASSERT(xfs_get_projid(ip) == 0);
2945 }
2946 }
2947
2948 xfs_iflush_fork(ip, dip, iip, XFS_DATA_FORK, bp);
2949 if (XFS_IFORK_Q(ip))
2950 xfs_iflush_fork(ip, dip, iip, XFS_ATTR_FORK, bp);
2951 xfs_inobp_check(mp, bp);
2952
2953 /*
2954 * We've recorded everything logged in the inode, so we'd like to clear
2955 * the ili_fields bits so we don't log and flush things unnecessarily.
2956 * However, we can't stop logging all this information until the data
2957 * we've copied into the disk buffer is written to disk. If we did we
2958 * might overwrite the copy of the inode in the log with all the data
2959 * after re-logging only part of it, and in the face of a crash we
2960 * wouldn't have all the data we need to recover.
2961 *
2962 * What we do is move the bits to the ili_last_fields field. When
2963 * logging the inode, these bits are moved back to the ili_fields field.
2964 * In the xfs_iflush_done() routine we clear ili_last_fields, since we
2965 * know that the information those bits represent is permanently on
2966 * disk. As long as the flush completes before the inode is logged
2967 * again, then both ili_fields and ili_last_fields will be cleared.
2968 *
2969 * We can play with the ili_fields bits here, because the inode lock
2970 * must be held exclusively in order to set bits there and the flush
2971 * lock protects the ili_last_fields bits. Set ili_logged so the flush
2972 * done routine can tell whether or not to look in the AIL. Also, store
2973 * the current LSN of the inode so that we can tell whether the item has
2974 * moved in the AIL from xfs_iflush_done(). In order to read the lsn we
2975 * need the AIL lock, because it is a 64 bit value that cannot be read
2976 * atomically.
2977 */
2978 iip->ili_last_fields = iip->ili_fields;
2979 iip->ili_fields = 0;
2980 iip->ili_logged = 1;
2981
2982 xfs_trans_ail_copy_lsn(mp->m_ail, &iip->ili_flush_lsn,
2983 &iip->ili_item.li_lsn);
2984
2985 /*
2986 * Attach the function xfs_iflush_done to the inode's
2987 * buffer. This will remove the inode from the AIL
2988 * and unlock the inode's flush lock when the inode is
2989 * completely written to disk.
2990 */
2991 xfs_buf_attach_iodone(bp, xfs_iflush_done, &iip->ili_item);
2992
2993 /* update the lsn in the on disk inode if required */
2994 if (ip->i_d.di_version == 3)
2995 dip->di_lsn = cpu_to_be64(iip->ili_item.li_lsn);
2996
2997 /* generate the checksum. */
2998 xfs_dinode_calc_crc(mp, dip);
2999
3000 ASSERT(bp->b_fspriv != NULL);
3001 ASSERT(bp->b_iodone != NULL);
3002 return 0;
3003
3004 corrupt_out:
3005 return XFS_ERROR(EFSCORRUPTED);
3006 }
3007
3008 /*
3009 * Return a pointer to the extent record at file index idx.
3010 */
3011 xfs_bmbt_rec_host_t *
3012 xfs_iext_get_ext(
3013 xfs_ifork_t *ifp, /* inode fork pointer */
3014 xfs_extnum_t idx) /* index of target extent */
3015 {
3016 ASSERT(idx >= 0);
3017 ASSERT(idx < ifp->if_bytes / sizeof(xfs_bmbt_rec_t));
3018
3019 if ((ifp->if_flags & XFS_IFEXTIREC) && (idx == 0)) {
3020 return ifp->if_u1.if_ext_irec->er_extbuf;
3021 } else if (ifp->if_flags & XFS_IFEXTIREC) {
3022 xfs_ext_irec_t *erp; /* irec pointer */
3023 int erp_idx = 0; /* irec index */
3024 xfs_extnum_t page_idx = idx; /* ext index in target list */
3025
3026 erp = xfs_iext_idx_to_irec(ifp, &page_idx, &erp_idx, 0);
3027 return &erp->er_extbuf[page_idx];
3028 } else if (ifp->if_bytes) {
3029 return &ifp->if_u1.if_extents[idx];
3030 } else {
3031 return NULL;
3032 }
3033 }
3034
3035 /*
3036 * Insert new item(s) into the extent records for incore inode
3037 * fork 'ifp'. 'count' new items are inserted at index 'idx'.
3038 */
3039 void
3040 xfs_iext_insert(
3041 xfs_inode_t *ip, /* incore inode pointer */
3042 xfs_extnum_t idx, /* starting index of new items */
3043 xfs_extnum_t count, /* number of inserted items */
3044 xfs_bmbt_irec_t *new, /* items to insert */
3045 int state) /* type of extent conversion */
3046 {
3047 xfs_ifork_t *ifp = (state & BMAP_ATTRFORK) ? ip->i_afp : &ip->i_df;
3048 xfs_extnum_t i; /* extent record index */
3049
3050 trace_xfs_iext_insert(ip, idx, new, state, _RET_IP_);
3051
3052 ASSERT(ifp->if_flags & XFS_IFEXTENTS);
3053 xfs_iext_add(ifp, idx, count);
3054 for (i = idx; i < idx + count; i++, new++)
3055 xfs_bmbt_set_all(xfs_iext_get_ext(ifp, i), new);
3056 }
3057
3058 /*
3059 * This is called when the amount of space required for incore file
3060 * extents needs to be increased. The ext_diff parameter stores the
3061 * number of new extents being added and the idx parameter contains
3062 * the extent index where the new extents will be added. If the new
3063 * extents are being appended, then we just need to (re)allocate and
3064 * initialize the space. Otherwise, if the new extents are being
3065 * inserted into the middle of the existing entries, a bit more work
3066 * is required to make room for the new extents to be inserted. The
3067 * caller is responsible for filling in the new extent entries upon
3068 * return.
3069 */
3070 void
3071 xfs_iext_add(
3072 xfs_ifork_t *ifp, /* inode fork pointer */
3073 xfs_extnum_t idx, /* index to begin adding exts */
3074 int ext_diff) /* number of extents to add */
3075 {
3076 int byte_diff; /* new bytes being added */
3077 int new_size; /* size of extents after adding */
3078 xfs_extnum_t nextents; /* number of extents in file */
3079
3080 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
3081 ASSERT((idx >= 0) && (idx <= nextents));
3082 byte_diff = ext_diff * sizeof(xfs_bmbt_rec_t);
3083 new_size = ifp->if_bytes + byte_diff;
3084 /*
3085 * If the new number of extents (nextents + ext_diff)
3086 * fits inside the inode, then continue to use the inline
3087 * extent buffer.
3088 */
3089 if (nextents + ext_diff <= XFS_INLINE_EXTS) {
3090 if (idx < nextents) {
3091 memmove(&ifp->if_u2.if_inline_ext[idx + ext_diff],
3092 &ifp->if_u2.if_inline_ext[idx],
3093 (nextents - idx) * sizeof(xfs_bmbt_rec_t));
3094 memset(&ifp->if_u2.if_inline_ext[idx], 0, byte_diff);
3095 }
3096 ifp->if_u1.if_extents = ifp->if_u2.if_inline_ext;
3097 ifp->if_real_bytes = 0;
3098 }
3099 /*
3100 * Otherwise use a linear (direct) extent list.
3101 * If the extents are currently inside the inode,
3102 * xfs_iext_realloc_direct will switch us from
3103 * inline to direct extent allocation mode.
3104 */
3105 else if (nextents + ext_diff <= XFS_LINEAR_EXTS) {
3106 xfs_iext_realloc_direct(ifp, new_size);
3107 if (idx < nextents) {
3108 memmove(&ifp->if_u1.if_extents[idx + ext_diff],
3109 &ifp->if_u1.if_extents[idx],
3110 (nextents - idx) * sizeof(xfs_bmbt_rec_t));
3111 memset(&ifp->if_u1.if_extents[idx], 0, byte_diff);
3112 }
3113 }
3114 /* Indirection array */
3115 else {
3116 xfs_ext_irec_t *erp;
3117 int erp_idx = 0;
3118 int page_idx = idx;
3119
3120 ASSERT(nextents + ext_diff > XFS_LINEAR_EXTS);
3121 if (ifp->if_flags & XFS_IFEXTIREC) {
3122 erp = xfs_iext_idx_to_irec(ifp, &page_idx, &erp_idx, 1);
3123 } else {
3124 xfs_iext_irec_init(ifp);
3125 ASSERT(ifp->if_flags & XFS_IFEXTIREC);
3126 erp = ifp->if_u1.if_ext_irec;
3127 }
3128 /* Extents fit in target extent page */
3129 if (erp && erp->er_extcount + ext_diff <= XFS_LINEAR_EXTS) {
3130 if (page_idx < erp->er_extcount) {
3131 memmove(&erp->er_extbuf[page_idx + ext_diff],
3132 &erp->er_extbuf[page_idx],
3133 (erp->er_extcount - page_idx) *
3134 sizeof(xfs_bmbt_rec_t));
3135 memset(&erp->er_extbuf[page_idx], 0, byte_diff);
3136 }
3137 erp->er_extcount += ext_diff;
3138 xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, ext_diff);
3139 }
3140 /* Insert a new extent page */
3141 else if (erp) {
3142 xfs_iext_add_indirect_multi(ifp,
3143 erp_idx, page_idx, ext_diff);
3144 }
3145 /*
3146 * If extent(s) are being appended to the last page in
3147 * the indirection array and the new extent(s) don't fit
3148 * in the page, then erp is NULL and erp_idx is set to
3149 * the next index needed in the indirection array.
3150 */
3151 else {
3152 int count = ext_diff;
3153
3154 while (count) {
3155 erp = xfs_iext_irec_new(ifp, erp_idx);
3156 erp->er_extcount = count;
3157 count -= MIN(count, (int)XFS_LINEAR_EXTS);
3158 if (count) {
3159 erp_idx++;
3160 }
3161 }
3162 }
3163 }
3164 ifp->if_bytes = new_size;
3165 }
3166
3167 /*
3168 * This is called when incore extents are being added to the indirection
3169 * array and the new extents do not fit in the target extent list. The
3170 * erp_idx parameter contains the irec index for the target extent list
3171 * in the indirection array, and the idx parameter contains the extent
3172 * index within the list. The number of extents being added is stored
3173 * in the count parameter.
3174 *
3175 * |-------| |-------|
3176 * | | | | idx - number of extents before idx
3177 * | idx | | count |
3178 * | | | | count - number of extents being inserted at idx
3179 * |-------| |-------|
3180 * | count | | nex2 | nex2 - number of extents after idx + count
3181 * |-------| |-------|
3182 */
3183 void
3184 xfs_iext_add_indirect_multi(
3185 xfs_ifork_t *ifp, /* inode fork pointer */
3186 int erp_idx, /* target extent irec index */
3187 xfs_extnum_t idx, /* index within target list */
3188 int count) /* new extents being added */
3189 {
3190 int byte_diff; /* new bytes being added */
3191 xfs_ext_irec_t *erp; /* pointer to irec entry */
3192 xfs_extnum_t ext_diff; /* number of extents to add */
3193 xfs_extnum_t ext_cnt; /* new extents still needed */
3194 xfs_extnum_t nex2; /* extents after idx + count */
3195 xfs_bmbt_rec_t *nex2_ep = NULL; /* temp list for nex2 extents */
3196 int nlists; /* number of irec's (lists) */
3197
3198 ASSERT(ifp->if_flags & XFS_IFEXTIREC);
3199 erp = &ifp->if_u1.if_ext_irec[erp_idx];
3200 nex2 = erp->er_extcount - idx;
3201 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
3202
3203 /*
3204 * Save second part of target extent list
3205 * (all extents past */
3206 if (nex2) {
3207 byte_diff = nex2 * sizeof(xfs_bmbt_rec_t);
3208 nex2_ep = (xfs_bmbt_rec_t *) kmem_alloc(byte_diff, KM_NOFS);
3209 memmove(nex2_ep, &erp->er_extbuf[idx], byte_diff);
3210 erp->er_extcount -= nex2;
3211 xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, -nex2);
3212 memset(&erp->er_extbuf[idx], 0, byte_diff);
3213 }
3214
3215 /*
3216 * Add the new extents to the end of the target
3217 * list, then allocate new irec record(s) and
3218 * extent buffer(s) as needed to store the rest
3219 * of the new extents.
3220 */
3221 ext_cnt = count;
3222 ext_diff = MIN(ext_cnt, (int)XFS_LINEAR_EXTS - erp->er_extcount);
3223 if (ext_diff) {
3224 erp->er_extcount += ext_diff;
3225 xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, ext_diff);
3226 ext_cnt -= ext_diff;
3227 }
3228 while (ext_cnt) {
3229 erp_idx++;
3230 erp = xfs_iext_irec_new(ifp, erp_idx);
3231 ext_diff = MIN(ext_cnt, (int)XFS_LINEAR_EXTS);
3232 erp->er_extcount = ext_diff;
3233 xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, ext_diff);
3234 ext_cnt -= ext_diff;
3235 }
3236
3237 /* Add nex2 extents back to indirection array */
3238 if (nex2) {
3239 xfs_extnum_t ext_avail;
3240 int i;
3241
3242 byte_diff = nex2 * sizeof(xfs_bmbt_rec_t);
3243 ext_avail = XFS_LINEAR_EXTS - erp->er_extcount;
3244 i = 0;
3245 /*
3246 * If nex2 extents fit in the current page, append
3247 * nex2_ep after the new extents.
3248 */
3249 if (nex2 <= ext_avail) {
3250 i = erp->er_extcount;
3251 }
3252 /*
3253 * Otherwise, check if space is available in the
3254 * next page.
3255 */
3256 else if ((erp_idx < nlists - 1) &&
3257 (nex2 <= (ext_avail = XFS_LINEAR_EXTS -
3258 ifp->if_u1.if_ext_irec[erp_idx+1].er_extcount))) {
3259 erp_idx++;
3260 erp++;
3261 /* Create a hole for nex2 extents */
3262 memmove(&erp->er_extbuf[nex2], erp->er_extbuf,
3263 erp->er_extcount * sizeof(xfs_bmbt_rec_t));
3264 }
3265 /*
3266 * Final choice, create a new extent page for
3267 * nex2 extents.
3268 */
3269 else {
3270 erp_idx++;
3271 erp = xfs_iext_irec_new(ifp, erp_idx);
3272 }
3273 memmove(&erp->er_extbuf[i], nex2_ep, byte_diff);
3274 kmem_free(nex2_ep);
3275 erp->er_extcount += nex2;
3276 xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, nex2);
3277 }
3278 }
3279
3280 /*
3281 * This is called when the amount of space required for incore file
3282 * extents needs to be decreased. The ext_diff parameter stores the
3283 * number of extents to be removed and the idx parameter contains
3284 * the extent index where the extents will be removed from.
3285 *
3286 * If the amount of space needed has decreased below the linear
3287 * limit, XFS_IEXT_BUFSZ, then switch to using the contiguous
3288 * extent array. Otherwise, use kmem_realloc() to adjust the
3289 * size to what is needed.
3290 */
3291 void
3292 xfs_iext_remove(
3293 xfs_inode_t *ip, /* incore inode pointer */
3294 xfs_extnum_t idx, /* index to begin removing exts */
3295 int ext_diff, /* number of extents to remove */
3296 int state) /* type of extent conversion */
3297 {
3298 xfs_ifork_t *ifp = (state & BMAP_ATTRFORK) ? ip->i_afp : &ip->i_df;
3299 xfs_extnum_t nextents; /* number of extents in file */
3300 int new_size; /* size of extents after removal */
3301
3302 trace_xfs_iext_remove(ip, idx, state, _RET_IP_);
3303
3304 ASSERT(ext_diff > 0);
3305 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
3306 new_size = (nextents - ext_diff) * sizeof(xfs_bmbt_rec_t);
3307
3308 if (new_size == 0) {
3309 xfs_iext_destroy(ifp);
3310 } else if (ifp->if_flags & XFS_IFEXTIREC) {
3311 xfs_iext_remove_indirect(ifp, idx, ext_diff);
3312 } else if (ifp->if_real_bytes) {
3313 xfs_iext_remove_direct(ifp, idx, ext_diff);
3314 } else {
3315 xfs_iext_remove_inline(ifp, idx, ext_diff);
3316 }
3317 ifp->if_bytes = new_size;
3318 }
3319
3320 /*
3321 * This removes ext_diff extents from the inline buffer, beginning
3322 * at extent index idx.
3323 */
3324 void
3325 xfs_iext_remove_inline(
3326 xfs_ifork_t *ifp, /* inode fork pointer */
3327 xfs_extnum_t idx, /* index to begin removing exts */
3328 int ext_diff) /* number of extents to remove */
3329 {
3330 int nextents; /* number of extents in file */
3331
3332 ASSERT(!(ifp->if_flags & XFS_IFEXTIREC));
3333 ASSERT(idx < XFS_INLINE_EXTS);
3334 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
3335 ASSERT(((nextents - ext_diff) > 0) &&
3336 (nextents - ext_diff) < XFS_INLINE_EXTS);
3337
3338 if (idx + ext_diff < nextents) {
3339 memmove(&ifp->if_u2.if_inline_ext[idx],
3340 &ifp->if_u2.if_inline_ext[idx + ext_diff],
3341 (nextents - (idx + ext_diff)) *
3342 sizeof(xfs_bmbt_rec_t));
3343 memset(&ifp->if_u2.if_inline_ext[nextents - ext_diff],
3344 0, ext_diff * sizeof(xfs_bmbt_rec_t));
3345 } else {
3346 memset(&ifp->if_u2.if_inline_ext[idx], 0,
3347 ext_diff * sizeof(xfs_bmbt_rec_t));
3348 }
3349 }
3350
3351 /*
3352 * This removes ext_diff extents from a linear (direct) extent list,
3353 * beginning at extent index idx. If the extents are being removed
3354 * from the end of the list (ie. truncate) then we just need to re-
3355 * allocate the list to remove the extra space. Otherwise, if the
3356 * extents are being removed from the middle of the existing extent
3357 * entries, then we first need to move the extent records beginning
3358 * at idx + ext_diff up in the list to overwrite the records being
3359 * removed, then remove the extra space via kmem_realloc.
3360 */
3361 void
3362 xfs_iext_remove_direct(
3363 xfs_ifork_t *ifp, /* inode fork pointer */
3364 xfs_extnum_t idx, /* index to begin removing exts */
3365 int ext_diff) /* number of extents to remove */
3366 {
3367 xfs_extnum_t nextents; /* number of extents in file */
3368 int new_size; /* size of extents after removal */
3369
3370 ASSERT(!(ifp->if_flags & XFS_IFEXTIREC));
3371 new_size = ifp->if_bytes -
3372 (ext_diff * sizeof(xfs_bmbt_rec_t));
3373 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
3374
3375 if (new_size == 0) {
3376 xfs_iext_destroy(ifp);
3377 return;
3378 }
3379 /* Move extents up in the list (if needed) */
3380 if (idx + ext_diff < nextents) {
3381 memmove(&ifp->if_u1.if_extents[idx],
3382 &ifp->if_u1.if_extents[idx + ext_diff],
3383 (nextents - (idx + ext_diff)) *
3384 sizeof(xfs_bmbt_rec_t));
3385 }
3386 memset(&ifp->if_u1.if_extents[nextents - ext_diff],
3387 0, ext_diff * sizeof(xfs_bmbt_rec_t));
3388 /*
3389 * Reallocate the direct extent list. If the extents
3390 * will fit inside the inode then xfs_iext_realloc_direct
3391 * will switch from direct to inline extent allocation
3392 * mode for us.
3393 */
3394 xfs_iext_realloc_direct(ifp, new_size);
3395 ifp->if_bytes = new_size;
3396 }
3397
3398 /*
3399 * This is called when incore extents are being removed from the
3400 * indirection array and the extents being removed span multiple extent
3401 * buffers. The idx parameter contains the file extent index where we
3402 * want to begin removing extents, and the count parameter contains
3403 * how many extents need to be removed.
3404 *
3405 * |-------| |-------|
3406 * | nex1 | | | nex1 - number of extents before idx
3407 * |-------| | count |
3408 * | | | | count - number of extents being removed at idx
3409 * | count | |-------|
3410 * | | | nex2 | nex2 - number of extents after idx + count
3411 * |-------| |-------|
3412 */
3413 void
3414 xfs_iext_remove_indirect(
3415 xfs_ifork_t *ifp, /* inode fork pointer */
3416 xfs_extnum_t idx, /* index to begin removing extents */
3417 int count) /* number of extents to remove */
3418 {
3419 xfs_ext_irec_t *erp; /* indirection array pointer */
3420 int erp_idx = 0; /* indirection array index */
3421 xfs_extnum_t ext_cnt; /* extents left to remove */
3422 xfs_extnum_t ext_diff; /* extents to remove in current list */
3423 xfs_extnum_t nex1; /* number of extents before idx */
3424 xfs_extnum_t nex2; /* extents after idx + count */
3425 int page_idx = idx; /* index in target extent list */
3426
3427 ASSERT(ifp->if_flags & XFS_IFEXTIREC);
3428 erp = xfs_iext_idx_to_irec(ifp, &page_idx, &erp_idx, 0);
3429 ASSERT(erp != NULL);
3430 nex1 = page_idx;
3431 ext_cnt = count;
3432 while (ext_cnt) {
3433 nex2 = MAX((erp->er_extcount - (nex1 + ext_cnt)), 0);
3434 ext_diff = MIN(ext_cnt, (erp->er_extcount - nex1));
3435 /*
3436 * Check for deletion of entire list;
3437 * xfs_iext_irec_remove() updates extent offsets.
3438 */
3439 if (ext_diff == erp->er_extcount) {
3440 xfs_iext_irec_remove(ifp, erp_idx);
3441 ext_cnt -= ext_diff;
3442 nex1 = 0;
3443 if (ext_cnt) {
3444 ASSERT(erp_idx < ifp->if_real_bytes /
3445 XFS_IEXT_BUFSZ);
3446 erp = &ifp->if_u1.if_ext_irec[erp_idx];
3447 nex1 = 0;
3448 continue;
3449 } else {
3450 break;
3451 }
3452 }
3453 /* Move extents up (if needed) */
3454 if (nex2) {
3455 memmove(&erp->er_extbuf[nex1],
3456 &erp->er_extbuf[nex1 + ext_diff],
3457 nex2 * sizeof(xfs_bmbt_rec_t));
3458 }
3459 /* Zero out rest of page */
3460 memset(&erp->er_extbuf[nex1 + nex2], 0, (XFS_IEXT_BUFSZ -
3461 ((nex1 + nex2) * sizeof(xfs_bmbt_rec_t))));
3462 /* Update remaining counters */
3463 erp->er_extcount -= ext_diff;
3464 xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, -ext_diff);
3465 ext_cnt -= ext_diff;
3466 nex1 = 0;
3467 erp_idx++;
3468 erp++;
3469 }
3470 ifp->if_bytes -= count * sizeof(xfs_bmbt_rec_t);
3471 xfs_iext_irec_compact(ifp);
3472 }
3473
3474 /*
3475 * Create, destroy, or resize a linear (direct) block of extents.
3476 */
3477 void
3478 xfs_iext_realloc_direct(
3479 xfs_ifork_t *ifp, /* inode fork pointer */
3480 int new_size) /* new size of extents */
3481 {
3482 int rnew_size; /* real new size of extents */
3483
3484 rnew_size = new_size;
3485
3486 ASSERT(!(ifp->if_flags & XFS_IFEXTIREC) ||
3487 ((new_size >= 0) && (new_size <= XFS_IEXT_BUFSZ) &&
3488 (new_size != ifp->if_real_bytes)));
3489
3490 /* Free extent records */
3491 if (new_size == 0) {
3492 xfs_iext_destroy(ifp);
3493 }
3494 /* Resize direct extent list and zero any new bytes */
3495 else if (ifp->if_real_bytes) {
3496 /* Check if extents will fit inside the inode */
3497 if (new_size <= XFS_INLINE_EXTS * sizeof(xfs_bmbt_rec_t)) {
3498 xfs_iext_direct_to_inline(ifp, new_size /
3499 (uint)sizeof(xfs_bmbt_rec_t));
3500 ifp->if_bytes = new_size;
3501 return;
3502 }
3503 if (!is_power_of_2(new_size)){
3504 rnew_size = roundup_pow_of_two(new_size);
3505 }
3506 if (rnew_size != ifp->if_real_bytes) {
3507 ifp->if_u1.if_extents =
3508 kmem_realloc(ifp->if_u1.if_extents,
3509 rnew_size,
3510 ifp->if_real_bytes, KM_NOFS);
3511 }
3512 if (rnew_size > ifp->if_real_bytes) {
3513 memset(&ifp->if_u1.if_extents[ifp->if_bytes /
3514 (uint)sizeof(xfs_bmbt_rec_t)], 0,
3515 rnew_size - ifp->if_real_bytes);
3516 }
3517 }
3518 /*
3519 * Switch from the inline extent buffer to a direct
3520 * extent list. Be sure to include the inline extent
3521 * bytes in new_size.
3522 */
3523 else {
3524 new_size += ifp->if_bytes;
3525 if (!is_power_of_2(new_size)) {
3526 rnew_size = roundup_pow_of_two(new_size);
3527 }
3528 xfs_iext_inline_to_direct(ifp, rnew_size);
3529 }
3530 ifp->if_real_bytes = rnew_size;
3531 ifp->if_bytes = new_size;
3532 }
3533
3534 /*
3535 * Switch from linear (direct) extent records to inline buffer.
3536 */
3537 void
3538 xfs_iext_direct_to_inline(
3539 xfs_ifork_t *ifp, /* inode fork pointer */
3540 xfs_extnum_t nextents) /* number of extents in file */
3541 {
3542 ASSERT(ifp->if_flags & XFS_IFEXTENTS);
3543 ASSERT(nextents <= XFS_INLINE_EXTS);
3544 /*
3545 * The inline buffer was zeroed when we switched
3546 * from inline to direct extent allocation mode,
3547 * so we don't need to clear it here.
3548 */
3549 memcpy(ifp->if_u2.if_inline_ext, ifp->if_u1.if_extents,
3550 nextents * sizeof(xfs_bmbt_rec_t));
3551 kmem_free(ifp->if_u1.if_extents);
3552 ifp->if_u1.if_extents = ifp->if_u2.if_inline_ext;
3553 ifp->if_real_bytes = 0;
3554 }
3555
3556 /*
3557 * Switch from inline buffer to linear (direct) extent records.
3558 * new_size should already be rounded up to the next power of 2
3559 * by the caller (when appropriate), so use new_size as it is.
3560 * However, since new_size may be rounded up, we can't update
3561 * if_bytes here. It is the caller's responsibility to update
3562 * if_bytes upon return.
3563 */
3564 void
3565 xfs_iext_inline_to_direct(
3566 xfs_ifork_t *ifp, /* inode fork pointer */
3567 int new_size) /* number of extents in file */
3568 {
3569 ifp->if_u1.if_extents = kmem_alloc(new_size, KM_NOFS);
3570 memset(ifp->if_u1.if_extents, 0, new_size);
3571 if (ifp->if_bytes) {
3572 memcpy(ifp->if_u1.if_extents, ifp->if_u2.if_inline_ext,
3573 ifp->if_bytes);
3574 memset(ifp->if_u2.if_inline_ext, 0, XFS_INLINE_EXTS *
3575 sizeof(xfs_bmbt_rec_t));
3576 }
3577 ifp->if_real_bytes = new_size;
3578 }
3579
3580 /*
3581 * Resize an extent indirection array to new_size bytes.
3582 */
3583 STATIC void
3584 xfs_iext_realloc_indirect(
3585 xfs_ifork_t *ifp, /* inode fork pointer */
3586 int new_size) /* new indirection array size */
3587 {
3588 int nlists; /* number of irec's (ex lists) */
3589 int size; /* current indirection array size */
3590
3591 ASSERT(ifp->if_flags & XFS_IFEXTIREC);
3592 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
3593 size = nlists * sizeof(xfs_ext_irec_t);
3594 ASSERT(ifp->if_real_bytes);
3595 ASSERT((new_size >= 0) && (new_size != size));
3596 if (new_size == 0) {
3597 xfs_iext_destroy(ifp);
3598 } else {
3599 ifp->if_u1.if_ext_irec = (xfs_ext_irec_t *)
3600 kmem_realloc(ifp->if_u1.if_ext_irec,
3601 new_size, size, KM_NOFS);
3602 }
3603 }
3604
3605 /*
3606 * Switch from indirection array to linear (direct) extent allocations.
3607 */
3608 STATIC void
3609 xfs_iext_indirect_to_direct(
3610 xfs_ifork_t *ifp) /* inode fork pointer */
3611 {
3612 xfs_bmbt_rec_host_t *ep; /* extent record pointer */
3613 xfs_extnum_t nextents; /* number of extents in file */
3614 int size; /* size of file extents */
3615
3616 ASSERT(ifp->if_flags & XFS_IFEXTIREC);
3617 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
3618 ASSERT(nextents <= XFS_LINEAR_EXTS);
3619 size = nextents * sizeof(xfs_bmbt_rec_t);
3620
3621 xfs_iext_irec_compact_pages(ifp);
3622 ASSERT(ifp->if_real_bytes == XFS_IEXT_BUFSZ);
3623
3624 ep = ifp->if_u1.if_ext_irec->er_extbuf;
3625 kmem_free(ifp->if_u1.if_ext_irec);
3626 ifp->if_flags &= ~XFS_IFEXTIREC;
3627 ifp->if_u1.if_extents = ep;
3628 ifp->if_bytes = size;
3629 if (nextents < XFS_LINEAR_EXTS) {
3630 xfs_iext_realloc_direct(ifp, size);
3631 }
3632 }
3633
3634 /*
3635 * Free incore file extents.
3636 */
3637 void
3638 xfs_iext_destroy(
3639 xfs_ifork_t *ifp) /* inode fork pointer */
3640 {
3641 if (ifp->if_flags & XFS_IFEXTIREC) {
3642 int erp_idx;
3643 int nlists;
3644
3645 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
3646 for (erp_idx = nlists - 1; erp_idx >= 0 ; erp_idx--) {
3647 xfs_iext_irec_remove(ifp, erp_idx);
3648 }
3649 ifp->if_flags &= ~XFS_IFEXTIREC;
3650 } else if (ifp->if_real_bytes) {
3651 kmem_free(ifp->if_u1.if_extents);
3652 } else if (ifp->if_bytes) {
3653 memset(ifp->if_u2.if_inline_ext, 0, XFS_INLINE_EXTS *
3654 sizeof(xfs_bmbt_rec_t));
3655 }
3656 ifp->if_u1.if_extents = NULL;
3657 ifp->if_real_bytes = 0;
3658 ifp->if_bytes = 0;
3659 }
3660
3661 /*
3662 * Return a pointer to the extent record for file system block bno.
3663 */
3664 xfs_bmbt_rec_host_t * /* pointer to found extent record */
3665 xfs_iext_bno_to_ext(
3666 xfs_ifork_t *ifp, /* inode fork pointer */
3667 xfs_fileoff_t bno, /* block number to search for */
3668 xfs_extnum_t *idxp) /* index of target extent */
3669 {
3670 xfs_bmbt_rec_host_t *base; /* pointer to first extent */
3671 xfs_filblks_t blockcount = 0; /* number of blocks in extent */
3672 xfs_bmbt_rec_host_t *ep = NULL; /* pointer to target extent */
3673 xfs_ext_irec_t *erp = NULL; /* indirection array pointer */
3674 int high; /* upper boundary in search */
3675 xfs_extnum_t idx = 0; /* index of target extent */
3676 int low; /* lower boundary in search */
3677 xfs_extnum_t nextents; /* number of file extents */
3678 xfs_fileoff_t startoff = 0; /* start offset of extent */
3679
3680 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
3681 if (nextents == 0) {
3682 *idxp = 0;
3683 return NULL;
3684 }
3685 low = 0;
3686 if (ifp->if_flags & XFS_IFEXTIREC) {
3687 /* Find target extent list */
3688 int erp_idx = 0;
3689 erp = xfs_iext_bno_to_irec(ifp, bno, &erp_idx);
3690 base = erp->er_extbuf;
3691 high = erp->er_extcount - 1;
3692 } else {
3693 base = ifp->if_u1.if_extents;
3694 high = nextents - 1;
3695 }
3696 /* Binary search extent records */
3697 while (low <= high) {
3698 idx = (low + high) >> 1;
3699 ep = base + idx;
3700 startoff = xfs_bmbt_get_startoff(ep);
3701 blockcount = xfs_bmbt_get_blockcount(ep);
3702 if (bno < startoff) {
3703 high = idx - 1;
3704 } else if (bno >= startoff + blockcount) {
3705 low = idx + 1;
3706 } else {
3707 /* Convert back to file-based extent index */
3708 if (ifp->if_flags & XFS_IFEXTIREC) {
3709 idx += erp->er_extoff;
3710 }
3711 *idxp = idx;
3712 return ep;
3713 }
3714 }
3715 /* Convert back to file-based extent index */
3716 if (ifp->if_flags & XFS_IFEXTIREC) {
3717 idx += erp->er_extoff;
3718 }
3719 if (bno >= startoff + blockcount) {
3720 if (++idx == nextents) {
3721 ep = NULL;
3722 } else {
3723 ep = xfs_iext_get_ext(ifp, idx);
3724 }
3725 }
3726 *idxp = idx;
3727 return ep;
3728 }
3729
3730 /*
3731 * Return a pointer to the indirection array entry containing the
3732 * extent record for filesystem block bno. Store the index of the
3733 * target irec in *erp_idxp.
3734 */
3735 xfs_ext_irec_t * /* pointer to found extent record */
3736 xfs_iext_bno_to_irec(
3737 xfs_ifork_t *ifp, /* inode fork pointer */
3738 xfs_fileoff_t bno, /* block number to search for */
3739 int *erp_idxp) /* irec index of target ext list */
3740 {
3741 xfs_ext_irec_t *erp = NULL; /* indirection array pointer */
3742 xfs_ext_irec_t *erp_next; /* next indirection array entry */
3743 int erp_idx; /* indirection array index */
3744 int nlists; /* number of extent irec's (lists) */
3745 int high; /* binary search upper limit */
3746 int low; /* binary search lower limit */
3747
3748 ASSERT(ifp->if_flags & XFS_IFEXTIREC);
3749 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
3750 erp_idx = 0;
3751 low = 0;
3752 high = nlists - 1;
3753 while (low <= high) {
3754 erp_idx = (low + high) >> 1;
3755 erp = &ifp->if_u1.if_ext_irec[erp_idx];
3756 erp_next = erp_idx < nlists - 1 ? erp + 1 : NULL;
3757 if (bno < xfs_bmbt_get_startoff(erp->er_extbuf)) {
3758 high = erp_idx - 1;
3759 } else if (erp_next && bno >=
3760 xfs_bmbt_get_startoff(erp_next->er_extbuf)) {
3761 low = erp_idx + 1;
3762 } else {
3763 break;
3764 }
3765 }
3766 *erp_idxp = erp_idx;
3767 return erp;
3768 }
3769
3770 /*
3771 * Return a pointer to the indirection array entry containing the
3772 * extent record at file extent index *idxp. Store the index of the
3773 * target irec in *erp_idxp and store the page index of the target
3774 * extent record in *idxp.
3775 */
3776 xfs_ext_irec_t *
3777 xfs_iext_idx_to_irec(
3778 xfs_ifork_t *ifp, /* inode fork pointer */
3779 xfs_extnum_t *idxp, /* extent index (file -> page) */
3780 int *erp_idxp, /* pointer to target irec */
3781 int realloc) /* new bytes were just added */
3782 {
3783 xfs_ext_irec_t *prev; /* pointer to previous irec */
3784 xfs_ext_irec_t *erp = NULL; /* pointer to current irec */
3785 int erp_idx; /* indirection array index */
3786 int nlists; /* number of irec's (ex lists) */
3787 int high; /* binary search upper limit */
3788 int low; /* binary search lower limit */
3789 xfs_extnum_t page_idx = *idxp; /* extent index in target list */
3790
3791 ASSERT(ifp->if_flags & XFS_IFEXTIREC);
3792 ASSERT(page_idx >= 0);
3793 ASSERT(page_idx <= ifp->if_bytes / sizeof(xfs_bmbt_rec_t));
3794 ASSERT(page_idx < ifp->if_bytes / sizeof(xfs_bmbt_rec_t) || realloc);
3795
3796 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
3797 erp_idx = 0;
3798 low = 0;
3799 high = nlists - 1;
3800
3801 /* Binary search extent irec's */
3802 while (low <= high) {
3803 erp_idx = (low + high) >> 1;
3804 erp = &ifp->if_u1.if_ext_irec[erp_idx];
3805 prev = erp_idx > 0 ? erp - 1 : NULL;
3806 if (page_idx < erp->er_extoff || (page_idx == erp->er_extoff &&
3807 realloc && prev && prev->er_extcount < XFS_LINEAR_EXTS)) {
3808 high = erp_idx - 1;
3809 } else if (page_idx > erp->er_extoff + erp->er_extcount ||
3810 (page_idx == erp->er_extoff + erp->er_extcount &&
3811 !realloc)) {
3812 low = erp_idx + 1;
3813 } else if (page_idx == erp->er_extoff + erp->er_extcount &&
3814 erp->er_extcount == XFS_LINEAR_EXTS) {
3815 ASSERT(realloc);
3816 page_idx = 0;
3817 erp_idx++;
3818 erp = erp_idx < nlists ? erp + 1 : NULL;
3819 break;
3820 } else {
3821 page_idx -= erp->er_extoff;
3822 break;
3823 }
3824 }
3825 *idxp = page_idx;
3826 *erp_idxp = erp_idx;
3827 return(erp);
3828 }
3829
3830 /*
3831 * Allocate and initialize an indirection array once the space needed
3832 * for incore extents increases above XFS_IEXT_BUFSZ.
3833 */
3834 void
3835 xfs_iext_irec_init(
3836 xfs_ifork_t *ifp) /* inode fork pointer */
3837 {
3838 xfs_ext_irec_t *erp; /* indirection array pointer */
3839 xfs_extnum_t nextents; /* number of extents in file */
3840
3841 ASSERT(!(ifp->if_flags & XFS_IFEXTIREC));
3842 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
3843 ASSERT(nextents <= XFS_LINEAR_EXTS);
3844
3845 erp = kmem_alloc(sizeof(xfs_ext_irec_t), KM_NOFS);
3846
3847 if (nextents == 0) {
3848 ifp->if_u1.if_extents = kmem_alloc(XFS_IEXT_BUFSZ, KM_NOFS);
3849 } else if (!ifp->if_real_bytes) {
3850 xfs_iext_inline_to_direct(ifp, XFS_IEXT_BUFSZ);
3851 } else if (ifp->if_real_bytes < XFS_IEXT_BUFSZ) {
3852 xfs_iext_realloc_direct(ifp, XFS_IEXT_BUFSZ);
3853 }
3854 erp->er_extbuf = ifp->if_u1.if_extents;
3855 erp->er_extcount = nextents;
3856 erp->er_extoff = 0;
3857
3858 ifp->if_flags |= XFS_IFEXTIREC;
3859 ifp->if_real_bytes = XFS_IEXT_BUFSZ;
3860 ifp->if_bytes = nextents * sizeof(xfs_bmbt_rec_t);
3861 ifp->if_u1.if_ext_irec = erp;
3862
3863 return;
3864 }
3865
3866 /*
3867 * Allocate and initialize a new entry in the indirection array.
3868 */
3869 xfs_ext_irec_t *
3870 xfs_iext_irec_new(
3871 xfs_ifork_t *ifp, /* inode fork pointer */
3872 int erp_idx) /* index for new irec */
3873 {
3874 xfs_ext_irec_t *erp; /* indirection array pointer */
3875 int i; /* loop counter */
3876 int nlists; /* number of irec's (ex lists) */
3877
3878 ASSERT(ifp->if_flags & XFS_IFEXTIREC);
3879 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
3880
3881 /* Resize indirection array */
3882 xfs_iext_realloc_indirect(ifp, ++nlists *
3883 sizeof(xfs_ext_irec_t));
3884 /*
3885 * Move records down in the array so the
3886 * new page can use erp_idx.
3887 */
3888 erp = ifp->if_u1.if_ext_irec;
3889 for (i = nlists - 1; i > erp_idx; i--) {
3890 memmove(&erp[i], &erp[i-1], sizeof(xfs_ext_irec_t));
3891 }
3892 ASSERT(i == erp_idx);
3893
3894 /* Initialize new extent record */
3895 erp = ifp->if_u1.if_ext_irec;
3896 erp[erp_idx].er_extbuf = kmem_alloc(XFS_IEXT_BUFSZ, KM_NOFS);
3897 ifp->if_real_bytes = nlists * XFS_IEXT_BUFSZ;
3898 memset(erp[erp_idx].er_extbuf, 0, XFS_IEXT_BUFSZ);
3899 erp[erp_idx].er_extcount = 0;
3900 erp[erp_idx].er_extoff = erp_idx > 0 ?
3901 erp[erp_idx-1].er_extoff + erp[erp_idx-1].er_extcount : 0;
3902 return (&erp[erp_idx]);
3903 }
3904
3905 /*
3906 * Remove a record from the indirection array.
3907 */
3908 void
3909 xfs_iext_irec_remove(
3910 xfs_ifork_t *ifp, /* inode fork pointer */
3911 int erp_idx) /* irec index to remove */
3912 {
3913 xfs_ext_irec_t *erp; /* indirection array pointer */
3914 int i; /* loop counter */
3915 int nlists; /* number of irec's (ex lists) */
3916
3917 ASSERT(ifp->if_flags & XFS_IFEXTIREC);
3918 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
3919 erp = &ifp->if_u1.if_ext_irec[erp_idx];
3920 if (erp->er_extbuf) {
3921 xfs_iext_irec_update_extoffs(ifp, erp_idx + 1,
3922 -erp->er_extcount);
3923 kmem_free(erp->er_extbuf);
3924 }
3925 /* Compact extent records */
3926 erp = ifp->if_u1.if_ext_irec;
3927 for (i = erp_idx; i < nlists - 1; i++) {
3928 memmove(&erp[i], &erp[i+1], sizeof(xfs_ext_irec_t));
3929 }
3930 /*
3931 * Manually free the last extent record from the indirection
3932 * array. A call to xfs_iext_realloc_indirect() with a size
3933 * of zero would result in a call to xfs_iext_destroy() which
3934 * would in turn call this function again, creating a nasty
3935 * infinite loop.
3936 */
3937 if (--nlists) {
3938 xfs_iext_realloc_indirect(ifp,
3939 nlists * sizeof(xfs_ext_irec_t));
3940 } else {
3941 kmem_free(ifp->if_u1.if_ext_irec);
3942 }
3943 ifp->if_real_bytes = nlists * XFS_IEXT_BUFSZ;
3944 }
3945
3946 /*
3947 * This is called to clean up large amounts of unused memory allocated
3948 * by the indirection array. Before compacting anything though, verify
3949 * that the indirection array is still needed and switch back to the
3950 * linear extent list (or even the inline buffer) if possible. The
3951 * compaction policy is as follows:
3952 *
3953 * Full Compaction: Extents fit into a single page (or inline buffer)
3954 * Partial Compaction: Extents occupy less than 50% of allocated space
3955 * No Compaction: Extents occupy at least 50% of allocated space
3956 */
3957 void
3958 xfs_iext_irec_compact(
3959 xfs_ifork_t *ifp) /* inode fork pointer */
3960 {
3961 xfs_extnum_t nextents; /* number of extents in file */
3962 int nlists; /* number of irec's (ex lists) */
3963
3964 ASSERT(ifp->if_flags & XFS_IFEXTIREC);
3965 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
3966 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
3967
3968 if (nextents == 0) {
3969 xfs_iext_destroy(ifp);
3970 } else if (nextents <= XFS_INLINE_EXTS) {
3971 xfs_iext_indirect_to_direct(ifp);
3972 xfs_iext_direct_to_inline(ifp, nextents);
3973 } else if (nextents <= XFS_LINEAR_EXTS) {
3974 xfs_iext_indirect_to_direct(ifp);
3975 } else if (nextents < (nlists * XFS_LINEAR_EXTS) >> 1) {
3976 xfs_iext_irec_compact_pages(ifp);
3977 }
3978 }
3979
3980 /*
3981 * Combine extents from neighboring extent pages.
3982 */
3983 void
3984 xfs_iext_irec_compact_pages(
3985 xfs_ifork_t *ifp) /* inode fork pointer */
3986 {
3987 xfs_ext_irec_t *erp, *erp_next;/* pointers to irec entries */
3988 int erp_idx = 0; /* indirection array index */
3989 int nlists; /* number of irec's (ex lists) */
3990
3991 ASSERT(ifp->if_flags & XFS_IFEXTIREC);
3992 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
3993 while (erp_idx < nlists - 1) {
3994 erp = &ifp->if_u1.if_ext_irec[erp_idx];
3995 erp_next = erp + 1;
3996 if (erp_next->er_extcount <=
3997 (XFS_LINEAR_EXTS - erp->er_extcount)) {
3998 memcpy(&erp->er_extbuf[erp->er_extcount],
3999 erp_next->er_extbuf, erp_next->er_extcount *
4000 sizeof(xfs_bmbt_rec_t));
4001 erp->er_extcount += erp_next->er_extcount;
4002 /*
4003 * Free page before removing extent record
4004 * so er_extoffs don't get modified in
4005 * xfs_iext_irec_remove.
4006 */
4007 kmem_free(erp_next->er_extbuf);
4008 erp_next->er_extbuf = NULL;
4009 xfs_iext_irec_remove(ifp, erp_idx + 1);
4010 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
4011 } else {
4012 erp_idx++;
4013 }
4014 }
4015 }
4016
4017 /*
4018 * This is called to update the er_extoff field in the indirection
4019 * array when extents have been added or removed from one of the
4020 * extent lists. erp_idx contains the irec index to begin updating
4021 * at and ext_diff contains the number of extents that were added
4022 * or removed.
4023 */
4024 void
4025 xfs_iext_irec_update_extoffs(
4026 xfs_ifork_t *ifp, /* inode fork pointer */
4027 int erp_idx, /* irec index to update */
4028 int ext_diff) /* number of new extents */
4029 {
4030 int i; /* loop counter */
4031 int nlists; /* number of irec's (ex lists */
4032
4033 ASSERT(ifp->if_flags & XFS_IFEXTIREC);
4034 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
4035 for (i = erp_idx; i < nlists; i++) {
4036 ifp->if_u1.if_ext_irec[i].er_extoff += ext_diff;
4037 }
4038 }
4039
4040 /*
4041 * Test whether it is appropriate to check an inode for and free post EOF
4042 * blocks. The 'force' parameter determines whether we should also consider
4043 * regular files that are marked preallocated or append-only.
4044 */
4045 bool
4046 xfs_can_free_eofblocks(struct xfs_inode *ip, bool force)
4047 {
4048 /* prealloc/delalloc exists only on regular files */
4049 if (!S_ISREG(ip->i_d.di_mode))
4050 return false;
4051
4052 /*
4053 * Zero sized files with no cached pages and delalloc blocks will not
4054 * have speculative prealloc/delalloc blocks to remove.
4055 */
4056 if (VFS_I(ip)->i_size == 0 &&
4057 VN_CACHED(VFS_I(ip)) == 0 &&
4058 ip->i_delayed_blks == 0)
4059 return false;
4060
4061 /* If we haven't read in the extent list, then don't do it now. */
4062 if (!(ip->i_df.if_flags & XFS_IFEXTENTS))
4063 return false;
4064
4065 /*
4066 * Do not free real preallocated or append-only files unless the file
4067 * has delalloc blocks and we are forced to remove them.
4068 */
4069 if (ip->i_d.di_flags & (XFS_DIFLAG_PREALLOC | XFS_DIFLAG_APPEND))
4070 if (!force || ip->i_delayed_blks == 0)
4071 return false;
4072
4073 return true;
4074 }
4075