]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - fs/xfs/xfs_inode.c
Merge branch 'master' of /home/davem/src/GIT/linux-2.6/
[mirror_ubuntu-artful-kernel.git] / fs / xfs / xfs_inode.c
1 /*
2 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 */
18 #include <linux/log2.h>
19
20 #include "xfs.h"
21 #include "xfs_fs.h"
22 #include "xfs_types.h"
23 #include "xfs_bit.h"
24 #include "xfs_log.h"
25 #include "xfs_inum.h"
26 #include "xfs_trans.h"
27 #include "xfs_trans_priv.h"
28 #include "xfs_sb.h"
29 #include "xfs_ag.h"
30 #include "xfs_dir2.h"
31 #include "xfs_dmapi.h"
32 #include "xfs_mount.h"
33 #include "xfs_bmap_btree.h"
34 #include "xfs_alloc_btree.h"
35 #include "xfs_ialloc_btree.h"
36 #include "xfs_dir2_sf.h"
37 #include "xfs_attr_sf.h"
38 #include "xfs_dinode.h"
39 #include "xfs_inode.h"
40 #include "xfs_buf_item.h"
41 #include "xfs_inode_item.h"
42 #include "xfs_btree.h"
43 #include "xfs_btree_trace.h"
44 #include "xfs_alloc.h"
45 #include "xfs_ialloc.h"
46 #include "xfs_bmap.h"
47 #include "xfs_rw.h"
48 #include "xfs_error.h"
49 #include "xfs_utils.h"
50 #include "xfs_quota.h"
51 #include "xfs_filestream.h"
52 #include "xfs_vnodeops.h"
53 #include "xfs_trace.h"
54
55 kmem_zone_t *xfs_ifork_zone;
56 kmem_zone_t *xfs_inode_zone;
57
58 /*
59 * Used in xfs_itruncate(). This is the maximum number of extents
60 * freed from a file in a single transaction.
61 */
62 #define XFS_ITRUNC_MAX_EXTENTS 2
63
64 STATIC int xfs_iflush_int(xfs_inode_t *, xfs_buf_t *);
65 STATIC int xfs_iformat_local(xfs_inode_t *, xfs_dinode_t *, int, int);
66 STATIC int xfs_iformat_extents(xfs_inode_t *, xfs_dinode_t *, int);
67 STATIC int xfs_iformat_btree(xfs_inode_t *, xfs_dinode_t *, int);
68
69 #ifdef DEBUG
70 /*
71 * Make sure that the extents in the given memory buffer
72 * are valid.
73 */
74 STATIC void
75 xfs_validate_extents(
76 xfs_ifork_t *ifp,
77 int nrecs,
78 xfs_exntfmt_t fmt)
79 {
80 xfs_bmbt_irec_t irec;
81 xfs_bmbt_rec_host_t rec;
82 int i;
83
84 for (i = 0; i < nrecs; i++) {
85 xfs_bmbt_rec_host_t *ep = xfs_iext_get_ext(ifp, i);
86 rec.l0 = get_unaligned(&ep->l0);
87 rec.l1 = get_unaligned(&ep->l1);
88 xfs_bmbt_get_all(&rec, &irec);
89 if (fmt == XFS_EXTFMT_NOSTATE)
90 ASSERT(irec.br_state == XFS_EXT_NORM);
91 }
92 }
93 #else /* DEBUG */
94 #define xfs_validate_extents(ifp, nrecs, fmt)
95 #endif /* DEBUG */
96
97 /*
98 * Check that none of the inode's in the buffer have a next
99 * unlinked field of 0.
100 */
101 #if defined(DEBUG)
102 void
103 xfs_inobp_check(
104 xfs_mount_t *mp,
105 xfs_buf_t *bp)
106 {
107 int i;
108 int j;
109 xfs_dinode_t *dip;
110
111 j = mp->m_inode_cluster_size >> mp->m_sb.sb_inodelog;
112
113 for (i = 0; i < j; i++) {
114 dip = (xfs_dinode_t *)xfs_buf_offset(bp,
115 i * mp->m_sb.sb_inodesize);
116 if (!dip->di_next_unlinked) {
117 xfs_fs_cmn_err(CE_ALERT, mp,
118 "Detected a bogus zero next_unlinked field in incore inode buffer 0x%p. About to pop an ASSERT.",
119 bp);
120 ASSERT(dip->di_next_unlinked);
121 }
122 }
123 }
124 #endif
125
126 /*
127 * Find the buffer associated with the given inode map
128 * We do basic validation checks on the buffer once it has been
129 * retrieved from disk.
130 */
131 STATIC int
132 xfs_imap_to_bp(
133 xfs_mount_t *mp,
134 xfs_trans_t *tp,
135 struct xfs_imap *imap,
136 xfs_buf_t **bpp,
137 uint buf_flags,
138 uint iget_flags)
139 {
140 int error;
141 int i;
142 int ni;
143 xfs_buf_t *bp;
144
145 error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, imap->im_blkno,
146 (int)imap->im_len, buf_flags, &bp);
147 if (error) {
148 if (error != EAGAIN) {
149 cmn_err(CE_WARN,
150 "xfs_imap_to_bp: xfs_trans_read_buf()returned "
151 "an error %d on %s. Returning error.",
152 error, mp->m_fsname);
153 } else {
154 ASSERT(buf_flags & XFS_BUF_TRYLOCK);
155 }
156 return error;
157 }
158
159 /*
160 * Validate the magic number and version of every inode in the buffer
161 * (if DEBUG kernel) or the first inode in the buffer, otherwise.
162 */
163 #ifdef DEBUG
164 ni = BBTOB(imap->im_len) >> mp->m_sb.sb_inodelog;
165 #else /* usual case */
166 ni = 1;
167 #endif
168
169 for (i = 0; i < ni; i++) {
170 int di_ok;
171 xfs_dinode_t *dip;
172
173 dip = (xfs_dinode_t *)xfs_buf_offset(bp,
174 (i << mp->m_sb.sb_inodelog));
175 di_ok = be16_to_cpu(dip->di_magic) == XFS_DINODE_MAGIC &&
176 XFS_DINODE_GOOD_VERSION(dip->di_version);
177 if (unlikely(XFS_TEST_ERROR(!di_ok, mp,
178 XFS_ERRTAG_ITOBP_INOTOBP,
179 XFS_RANDOM_ITOBP_INOTOBP))) {
180 if (iget_flags & XFS_IGET_BULKSTAT) {
181 xfs_trans_brelse(tp, bp);
182 return XFS_ERROR(EINVAL);
183 }
184 XFS_CORRUPTION_ERROR("xfs_imap_to_bp",
185 XFS_ERRLEVEL_HIGH, mp, dip);
186 #ifdef DEBUG
187 cmn_err(CE_PANIC,
188 "Device %s - bad inode magic/vsn "
189 "daddr %lld #%d (magic=%x)",
190 XFS_BUFTARG_NAME(mp->m_ddev_targp),
191 (unsigned long long)imap->im_blkno, i,
192 be16_to_cpu(dip->di_magic));
193 #endif
194 xfs_trans_brelse(tp, bp);
195 return XFS_ERROR(EFSCORRUPTED);
196 }
197 }
198
199 xfs_inobp_check(mp, bp);
200
201 /*
202 * Mark the buffer as an inode buffer now that it looks good
203 */
204 XFS_BUF_SET_VTYPE(bp, B_FS_INO);
205
206 *bpp = bp;
207 return 0;
208 }
209
210 /*
211 * This routine is called to map an inode number within a file
212 * system to the buffer containing the on-disk version of the
213 * inode. It returns a pointer to the buffer containing the
214 * on-disk inode in the bpp parameter, and in the dip parameter
215 * it returns a pointer to the on-disk inode within that buffer.
216 *
217 * If a non-zero error is returned, then the contents of bpp and
218 * dipp are undefined.
219 *
220 * Use xfs_imap() to determine the size and location of the
221 * buffer to read from disk.
222 */
223 int
224 xfs_inotobp(
225 xfs_mount_t *mp,
226 xfs_trans_t *tp,
227 xfs_ino_t ino,
228 xfs_dinode_t **dipp,
229 xfs_buf_t **bpp,
230 int *offset,
231 uint imap_flags)
232 {
233 struct xfs_imap imap;
234 xfs_buf_t *bp;
235 int error;
236
237 imap.im_blkno = 0;
238 error = xfs_imap(mp, tp, ino, &imap, imap_flags);
239 if (error)
240 return error;
241
242 error = xfs_imap_to_bp(mp, tp, &imap, &bp, XFS_BUF_LOCK, imap_flags);
243 if (error)
244 return error;
245
246 *dipp = (xfs_dinode_t *)xfs_buf_offset(bp, imap.im_boffset);
247 *bpp = bp;
248 *offset = imap.im_boffset;
249 return 0;
250 }
251
252
253 /*
254 * This routine is called to map an inode to the buffer containing
255 * the on-disk version of the inode. It returns a pointer to the
256 * buffer containing the on-disk inode in the bpp parameter, and in
257 * the dip parameter it returns a pointer to the on-disk inode within
258 * that buffer.
259 *
260 * If a non-zero error is returned, then the contents of bpp and
261 * dipp are undefined.
262 *
263 * The inode is expected to already been mapped to its buffer and read
264 * in once, thus we can use the mapping information stored in the inode
265 * rather than calling xfs_imap(). This allows us to avoid the overhead
266 * of looking at the inode btree for small block file systems
267 * (see xfs_imap()).
268 */
269 int
270 xfs_itobp(
271 xfs_mount_t *mp,
272 xfs_trans_t *tp,
273 xfs_inode_t *ip,
274 xfs_dinode_t **dipp,
275 xfs_buf_t **bpp,
276 uint buf_flags)
277 {
278 xfs_buf_t *bp;
279 int error;
280
281 ASSERT(ip->i_imap.im_blkno != 0);
282
283 error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &bp, buf_flags, 0);
284 if (error)
285 return error;
286
287 if (!bp) {
288 ASSERT(buf_flags & XFS_BUF_TRYLOCK);
289 ASSERT(tp == NULL);
290 *bpp = NULL;
291 return EAGAIN;
292 }
293
294 *dipp = (xfs_dinode_t *)xfs_buf_offset(bp, ip->i_imap.im_boffset);
295 *bpp = bp;
296 return 0;
297 }
298
299 /*
300 * Move inode type and inode format specific information from the
301 * on-disk inode to the in-core inode. For fifos, devs, and sockets
302 * this means set if_rdev to the proper value. For files, directories,
303 * and symlinks this means to bring in the in-line data or extent
304 * pointers. For a file in B-tree format, only the root is immediately
305 * brought in-core. The rest will be in-lined in if_extents when it
306 * is first referenced (see xfs_iread_extents()).
307 */
308 STATIC int
309 xfs_iformat(
310 xfs_inode_t *ip,
311 xfs_dinode_t *dip)
312 {
313 xfs_attr_shortform_t *atp;
314 int size;
315 int error;
316 xfs_fsize_t di_size;
317 ip->i_df.if_ext_max =
318 XFS_IFORK_DSIZE(ip) / (uint)sizeof(xfs_bmbt_rec_t);
319 error = 0;
320
321 if (unlikely(be32_to_cpu(dip->di_nextents) +
322 be16_to_cpu(dip->di_anextents) >
323 be64_to_cpu(dip->di_nblocks))) {
324 xfs_fs_repair_cmn_err(CE_WARN, ip->i_mount,
325 "corrupt dinode %Lu, extent total = %d, nblocks = %Lu.",
326 (unsigned long long)ip->i_ino,
327 (int)(be32_to_cpu(dip->di_nextents) +
328 be16_to_cpu(dip->di_anextents)),
329 (unsigned long long)
330 be64_to_cpu(dip->di_nblocks));
331 XFS_CORRUPTION_ERROR("xfs_iformat(1)", XFS_ERRLEVEL_LOW,
332 ip->i_mount, dip);
333 return XFS_ERROR(EFSCORRUPTED);
334 }
335
336 if (unlikely(dip->di_forkoff > ip->i_mount->m_sb.sb_inodesize)) {
337 xfs_fs_repair_cmn_err(CE_WARN, ip->i_mount,
338 "corrupt dinode %Lu, forkoff = 0x%x.",
339 (unsigned long long)ip->i_ino,
340 dip->di_forkoff);
341 XFS_CORRUPTION_ERROR("xfs_iformat(2)", XFS_ERRLEVEL_LOW,
342 ip->i_mount, dip);
343 return XFS_ERROR(EFSCORRUPTED);
344 }
345
346 if (unlikely((ip->i_d.di_flags & XFS_DIFLAG_REALTIME) &&
347 !ip->i_mount->m_rtdev_targp)) {
348 xfs_fs_repair_cmn_err(CE_WARN, ip->i_mount,
349 "corrupt dinode %Lu, has realtime flag set.",
350 ip->i_ino);
351 XFS_CORRUPTION_ERROR("xfs_iformat(realtime)",
352 XFS_ERRLEVEL_LOW, ip->i_mount, dip);
353 return XFS_ERROR(EFSCORRUPTED);
354 }
355
356 switch (ip->i_d.di_mode & S_IFMT) {
357 case S_IFIFO:
358 case S_IFCHR:
359 case S_IFBLK:
360 case S_IFSOCK:
361 if (unlikely(dip->di_format != XFS_DINODE_FMT_DEV)) {
362 XFS_CORRUPTION_ERROR("xfs_iformat(3)", XFS_ERRLEVEL_LOW,
363 ip->i_mount, dip);
364 return XFS_ERROR(EFSCORRUPTED);
365 }
366 ip->i_d.di_size = 0;
367 ip->i_size = 0;
368 ip->i_df.if_u2.if_rdev = xfs_dinode_get_rdev(dip);
369 break;
370
371 case S_IFREG:
372 case S_IFLNK:
373 case S_IFDIR:
374 switch (dip->di_format) {
375 case XFS_DINODE_FMT_LOCAL:
376 /*
377 * no local regular files yet
378 */
379 if (unlikely((be16_to_cpu(dip->di_mode) & S_IFMT) == S_IFREG)) {
380 xfs_fs_repair_cmn_err(CE_WARN, ip->i_mount,
381 "corrupt inode %Lu "
382 "(local format for regular file).",
383 (unsigned long long) ip->i_ino);
384 XFS_CORRUPTION_ERROR("xfs_iformat(4)",
385 XFS_ERRLEVEL_LOW,
386 ip->i_mount, dip);
387 return XFS_ERROR(EFSCORRUPTED);
388 }
389
390 di_size = be64_to_cpu(dip->di_size);
391 if (unlikely(di_size > XFS_DFORK_DSIZE(dip, ip->i_mount))) {
392 xfs_fs_repair_cmn_err(CE_WARN, ip->i_mount,
393 "corrupt inode %Lu "
394 "(bad size %Ld for local inode).",
395 (unsigned long long) ip->i_ino,
396 (long long) di_size);
397 XFS_CORRUPTION_ERROR("xfs_iformat(5)",
398 XFS_ERRLEVEL_LOW,
399 ip->i_mount, dip);
400 return XFS_ERROR(EFSCORRUPTED);
401 }
402
403 size = (int)di_size;
404 error = xfs_iformat_local(ip, dip, XFS_DATA_FORK, size);
405 break;
406 case XFS_DINODE_FMT_EXTENTS:
407 error = xfs_iformat_extents(ip, dip, XFS_DATA_FORK);
408 break;
409 case XFS_DINODE_FMT_BTREE:
410 error = xfs_iformat_btree(ip, dip, XFS_DATA_FORK);
411 break;
412 default:
413 XFS_ERROR_REPORT("xfs_iformat(6)", XFS_ERRLEVEL_LOW,
414 ip->i_mount);
415 return XFS_ERROR(EFSCORRUPTED);
416 }
417 break;
418
419 default:
420 XFS_ERROR_REPORT("xfs_iformat(7)", XFS_ERRLEVEL_LOW, ip->i_mount);
421 return XFS_ERROR(EFSCORRUPTED);
422 }
423 if (error) {
424 return error;
425 }
426 if (!XFS_DFORK_Q(dip))
427 return 0;
428 ASSERT(ip->i_afp == NULL);
429 ip->i_afp = kmem_zone_zalloc(xfs_ifork_zone, KM_SLEEP);
430 ip->i_afp->if_ext_max =
431 XFS_IFORK_ASIZE(ip) / (uint)sizeof(xfs_bmbt_rec_t);
432 switch (dip->di_aformat) {
433 case XFS_DINODE_FMT_LOCAL:
434 atp = (xfs_attr_shortform_t *)XFS_DFORK_APTR(dip);
435 size = be16_to_cpu(atp->hdr.totsize);
436
437 if (unlikely(size < sizeof(struct xfs_attr_sf_hdr))) {
438 xfs_fs_repair_cmn_err(CE_WARN, ip->i_mount,
439 "corrupt inode %Lu "
440 "(bad attr fork size %Ld).",
441 (unsigned long long) ip->i_ino,
442 (long long) size);
443 XFS_CORRUPTION_ERROR("xfs_iformat(8)",
444 XFS_ERRLEVEL_LOW,
445 ip->i_mount, dip);
446 return XFS_ERROR(EFSCORRUPTED);
447 }
448
449 error = xfs_iformat_local(ip, dip, XFS_ATTR_FORK, size);
450 break;
451 case XFS_DINODE_FMT_EXTENTS:
452 error = xfs_iformat_extents(ip, dip, XFS_ATTR_FORK);
453 break;
454 case XFS_DINODE_FMT_BTREE:
455 error = xfs_iformat_btree(ip, dip, XFS_ATTR_FORK);
456 break;
457 default:
458 error = XFS_ERROR(EFSCORRUPTED);
459 break;
460 }
461 if (error) {
462 kmem_zone_free(xfs_ifork_zone, ip->i_afp);
463 ip->i_afp = NULL;
464 xfs_idestroy_fork(ip, XFS_DATA_FORK);
465 }
466 return error;
467 }
468
469 /*
470 * The file is in-lined in the on-disk inode.
471 * If it fits into if_inline_data, then copy
472 * it there, otherwise allocate a buffer for it
473 * and copy the data there. Either way, set
474 * if_data to point at the data.
475 * If we allocate a buffer for the data, make
476 * sure that its size is a multiple of 4 and
477 * record the real size in i_real_bytes.
478 */
479 STATIC int
480 xfs_iformat_local(
481 xfs_inode_t *ip,
482 xfs_dinode_t *dip,
483 int whichfork,
484 int size)
485 {
486 xfs_ifork_t *ifp;
487 int real_size;
488
489 /*
490 * If the size is unreasonable, then something
491 * is wrong and we just bail out rather than crash in
492 * kmem_alloc() or memcpy() below.
493 */
494 if (unlikely(size > XFS_DFORK_SIZE(dip, ip->i_mount, whichfork))) {
495 xfs_fs_repair_cmn_err(CE_WARN, ip->i_mount,
496 "corrupt inode %Lu "
497 "(bad size %d for local fork, size = %d).",
498 (unsigned long long) ip->i_ino, size,
499 XFS_DFORK_SIZE(dip, ip->i_mount, whichfork));
500 XFS_CORRUPTION_ERROR("xfs_iformat_local", XFS_ERRLEVEL_LOW,
501 ip->i_mount, dip);
502 return XFS_ERROR(EFSCORRUPTED);
503 }
504 ifp = XFS_IFORK_PTR(ip, whichfork);
505 real_size = 0;
506 if (size == 0)
507 ifp->if_u1.if_data = NULL;
508 else if (size <= sizeof(ifp->if_u2.if_inline_data))
509 ifp->if_u1.if_data = ifp->if_u2.if_inline_data;
510 else {
511 real_size = roundup(size, 4);
512 ifp->if_u1.if_data = kmem_alloc(real_size, KM_SLEEP);
513 }
514 ifp->if_bytes = size;
515 ifp->if_real_bytes = real_size;
516 if (size)
517 memcpy(ifp->if_u1.if_data, XFS_DFORK_PTR(dip, whichfork), size);
518 ifp->if_flags &= ~XFS_IFEXTENTS;
519 ifp->if_flags |= XFS_IFINLINE;
520 return 0;
521 }
522
523 /*
524 * The file consists of a set of extents all
525 * of which fit into the on-disk inode.
526 * If there are few enough extents to fit into
527 * the if_inline_ext, then copy them there.
528 * Otherwise allocate a buffer for them and copy
529 * them into it. Either way, set if_extents
530 * to point at the extents.
531 */
532 STATIC int
533 xfs_iformat_extents(
534 xfs_inode_t *ip,
535 xfs_dinode_t *dip,
536 int whichfork)
537 {
538 xfs_bmbt_rec_t *dp;
539 xfs_ifork_t *ifp;
540 int nex;
541 int size;
542 int i;
543
544 ifp = XFS_IFORK_PTR(ip, whichfork);
545 nex = XFS_DFORK_NEXTENTS(dip, whichfork);
546 size = nex * (uint)sizeof(xfs_bmbt_rec_t);
547
548 /*
549 * If the number of extents is unreasonable, then something
550 * is wrong and we just bail out rather than crash in
551 * kmem_alloc() or memcpy() below.
552 */
553 if (unlikely(size < 0 || size > XFS_DFORK_SIZE(dip, ip->i_mount, whichfork))) {
554 xfs_fs_repair_cmn_err(CE_WARN, ip->i_mount,
555 "corrupt inode %Lu ((a)extents = %d).",
556 (unsigned long long) ip->i_ino, nex);
557 XFS_CORRUPTION_ERROR("xfs_iformat_extents(1)", XFS_ERRLEVEL_LOW,
558 ip->i_mount, dip);
559 return XFS_ERROR(EFSCORRUPTED);
560 }
561
562 ifp->if_real_bytes = 0;
563 if (nex == 0)
564 ifp->if_u1.if_extents = NULL;
565 else if (nex <= XFS_INLINE_EXTS)
566 ifp->if_u1.if_extents = ifp->if_u2.if_inline_ext;
567 else
568 xfs_iext_add(ifp, 0, nex);
569
570 ifp->if_bytes = size;
571 if (size) {
572 dp = (xfs_bmbt_rec_t *) XFS_DFORK_PTR(dip, whichfork);
573 xfs_validate_extents(ifp, nex, XFS_EXTFMT_INODE(ip));
574 for (i = 0; i < nex; i++, dp++) {
575 xfs_bmbt_rec_host_t *ep = xfs_iext_get_ext(ifp, i);
576 ep->l0 = get_unaligned_be64(&dp->l0);
577 ep->l1 = get_unaligned_be64(&dp->l1);
578 }
579 XFS_BMAP_TRACE_EXLIST(ip, nex, whichfork);
580 if (whichfork != XFS_DATA_FORK ||
581 XFS_EXTFMT_INODE(ip) == XFS_EXTFMT_NOSTATE)
582 if (unlikely(xfs_check_nostate_extents(
583 ifp, 0, nex))) {
584 XFS_ERROR_REPORT("xfs_iformat_extents(2)",
585 XFS_ERRLEVEL_LOW,
586 ip->i_mount);
587 return XFS_ERROR(EFSCORRUPTED);
588 }
589 }
590 ifp->if_flags |= XFS_IFEXTENTS;
591 return 0;
592 }
593
594 /*
595 * The file has too many extents to fit into
596 * the inode, so they are in B-tree format.
597 * Allocate a buffer for the root of the B-tree
598 * and copy the root into it. The i_extents
599 * field will remain NULL until all of the
600 * extents are read in (when they are needed).
601 */
602 STATIC int
603 xfs_iformat_btree(
604 xfs_inode_t *ip,
605 xfs_dinode_t *dip,
606 int whichfork)
607 {
608 xfs_bmdr_block_t *dfp;
609 xfs_ifork_t *ifp;
610 /* REFERENCED */
611 int nrecs;
612 int size;
613
614 ifp = XFS_IFORK_PTR(ip, whichfork);
615 dfp = (xfs_bmdr_block_t *)XFS_DFORK_PTR(dip, whichfork);
616 size = XFS_BMAP_BROOT_SPACE(dfp);
617 nrecs = be16_to_cpu(dfp->bb_numrecs);
618
619 /*
620 * blow out if -- fork has less extents than can fit in
621 * fork (fork shouldn't be a btree format), root btree
622 * block has more records than can fit into the fork,
623 * or the number of extents is greater than the number of
624 * blocks.
625 */
626 if (unlikely(XFS_IFORK_NEXTENTS(ip, whichfork) <= ifp->if_ext_max
627 || XFS_BMDR_SPACE_CALC(nrecs) >
628 XFS_DFORK_SIZE(dip, ip->i_mount, whichfork)
629 || XFS_IFORK_NEXTENTS(ip, whichfork) > ip->i_d.di_nblocks)) {
630 xfs_fs_repair_cmn_err(CE_WARN, ip->i_mount,
631 "corrupt inode %Lu (btree).",
632 (unsigned long long) ip->i_ino);
633 XFS_ERROR_REPORT("xfs_iformat_btree", XFS_ERRLEVEL_LOW,
634 ip->i_mount);
635 return XFS_ERROR(EFSCORRUPTED);
636 }
637
638 ifp->if_broot_bytes = size;
639 ifp->if_broot = kmem_alloc(size, KM_SLEEP);
640 ASSERT(ifp->if_broot != NULL);
641 /*
642 * Copy and convert from the on-disk structure
643 * to the in-memory structure.
644 */
645 xfs_bmdr_to_bmbt(ip->i_mount, dfp,
646 XFS_DFORK_SIZE(dip, ip->i_mount, whichfork),
647 ifp->if_broot, size);
648 ifp->if_flags &= ~XFS_IFEXTENTS;
649 ifp->if_flags |= XFS_IFBROOT;
650
651 return 0;
652 }
653
654 STATIC void
655 xfs_dinode_from_disk(
656 xfs_icdinode_t *to,
657 xfs_dinode_t *from)
658 {
659 to->di_magic = be16_to_cpu(from->di_magic);
660 to->di_mode = be16_to_cpu(from->di_mode);
661 to->di_version = from ->di_version;
662 to->di_format = from->di_format;
663 to->di_onlink = be16_to_cpu(from->di_onlink);
664 to->di_uid = be32_to_cpu(from->di_uid);
665 to->di_gid = be32_to_cpu(from->di_gid);
666 to->di_nlink = be32_to_cpu(from->di_nlink);
667 to->di_projid = be16_to_cpu(from->di_projid);
668 memcpy(to->di_pad, from->di_pad, sizeof(to->di_pad));
669 to->di_flushiter = be16_to_cpu(from->di_flushiter);
670 to->di_atime.t_sec = be32_to_cpu(from->di_atime.t_sec);
671 to->di_atime.t_nsec = be32_to_cpu(from->di_atime.t_nsec);
672 to->di_mtime.t_sec = be32_to_cpu(from->di_mtime.t_sec);
673 to->di_mtime.t_nsec = be32_to_cpu(from->di_mtime.t_nsec);
674 to->di_ctime.t_sec = be32_to_cpu(from->di_ctime.t_sec);
675 to->di_ctime.t_nsec = be32_to_cpu(from->di_ctime.t_nsec);
676 to->di_size = be64_to_cpu(from->di_size);
677 to->di_nblocks = be64_to_cpu(from->di_nblocks);
678 to->di_extsize = be32_to_cpu(from->di_extsize);
679 to->di_nextents = be32_to_cpu(from->di_nextents);
680 to->di_anextents = be16_to_cpu(from->di_anextents);
681 to->di_forkoff = from->di_forkoff;
682 to->di_aformat = from->di_aformat;
683 to->di_dmevmask = be32_to_cpu(from->di_dmevmask);
684 to->di_dmstate = be16_to_cpu(from->di_dmstate);
685 to->di_flags = be16_to_cpu(from->di_flags);
686 to->di_gen = be32_to_cpu(from->di_gen);
687 }
688
689 void
690 xfs_dinode_to_disk(
691 xfs_dinode_t *to,
692 xfs_icdinode_t *from)
693 {
694 to->di_magic = cpu_to_be16(from->di_magic);
695 to->di_mode = cpu_to_be16(from->di_mode);
696 to->di_version = from ->di_version;
697 to->di_format = from->di_format;
698 to->di_onlink = cpu_to_be16(from->di_onlink);
699 to->di_uid = cpu_to_be32(from->di_uid);
700 to->di_gid = cpu_to_be32(from->di_gid);
701 to->di_nlink = cpu_to_be32(from->di_nlink);
702 to->di_projid = cpu_to_be16(from->di_projid);
703 memcpy(to->di_pad, from->di_pad, sizeof(to->di_pad));
704 to->di_flushiter = cpu_to_be16(from->di_flushiter);
705 to->di_atime.t_sec = cpu_to_be32(from->di_atime.t_sec);
706 to->di_atime.t_nsec = cpu_to_be32(from->di_atime.t_nsec);
707 to->di_mtime.t_sec = cpu_to_be32(from->di_mtime.t_sec);
708 to->di_mtime.t_nsec = cpu_to_be32(from->di_mtime.t_nsec);
709 to->di_ctime.t_sec = cpu_to_be32(from->di_ctime.t_sec);
710 to->di_ctime.t_nsec = cpu_to_be32(from->di_ctime.t_nsec);
711 to->di_size = cpu_to_be64(from->di_size);
712 to->di_nblocks = cpu_to_be64(from->di_nblocks);
713 to->di_extsize = cpu_to_be32(from->di_extsize);
714 to->di_nextents = cpu_to_be32(from->di_nextents);
715 to->di_anextents = cpu_to_be16(from->di_anextents);
716 to->di_forkoff = from->di_forkoff;
717 to->di_aformat = from->di_aformat;
718 to->di_dmevmask = cpu_to_be32(from->di_dmevmask);
719 to->di_dmstate = cpu_to_be16(from->di_dmstate);
720 to->di_flags = cpu_to_be16(from->di_flags);
721 to->di_gen = cpu_to_be32(from->di_gen);
722 }
723
724 STATIC uint
725 _xfs_dic2xflags(
726 __uint16_t di_flags)
727 {
728 uint flags = 0;
729
730 if (di_flags & XFS_DIFLAG_ANY) {
731 if (di_flags & XFS_DIFLAG_REALTIME)
732 flags |= XFS_XFLAG_REALTIME;
733 if (di_flags & XFS_DIFLAG_PREALLOC)
734 flags |= XFS_XFLAG_PREALLOC;
735 if (di_flags & XFS_DIFLAG_IMMUTABLE)
736 flags |= XFS_XFLAG_IMMUTABLE;
737 if (di_flags & XFS_DIFLAG_APPEND)
738 flags |= XFS_XFLAG_APPEND;
739 if (di_flags & XFS_DIFLAG_SYNC)
740 flags |= XFS_XFLAG_SYNC;
741 if (di_flags & XFS_DIFLAG_NOATIME)
742 flags |= XFS_XFLAG_NOATIME;
743 if (di_flags & XFS_DIFLAG_NODUMP)
744 flags |= XFS_XFLAG_NODUMP;
745 if (di_flags & XFS_DIFLAG_RTINHERIT)
746 flags |= XFS_XFLAG_RTINHERIT;
747 if (di_flags & XFS_DIFLAG_PROJINHERIT)
748 flags |= XFS_XFLAG_PROJINHERIT;
749 if (di_flags & XFS_DIFLAG_NOSYMLINKS)
750 flags |= XFS_XFLAG_NOSYMLINKS;
751 if (di_flags & XFS_DIFLAG_EXTSIZE)
752 flags |= XFS_XFLAG_EXTSIZE;
753 if (di_flags & XFS_DIFLAG_EXTSZINHERIT)
754 flags |= XFS_XFLAG_EXTSZINHERIT;
755 if (di_flags & XFS_DIFLAG_NODEFRAG)
756 flags |= XFS_XFLAG_NODEFRAG;
757 if (di_flags & XFS_DIFLAG_FILESTREAM)
758 flags |= XFS_XFLAG_FILESTREAM;
759 }
760
761 return flags;
762 }
763
764 uint
765 xfs_ip2xflags(
766 xfs_inode_t *ip)
767 {
768 xfs_icdinode_t *dic = &ip->i_d;
769
770 return _xfs_dic2xflags(dic->di_flags) |
771 (XFS_IFORK_Q(ip) ? XFS_XFLAG_HASATTR : 0);
772 }
773
774 uint
775 xfs_dic2xflags(
776 xfs_dinode_t *dip)
777 {
778 return _xfs_dic2xflags(be16_to_cpu(dip->di_flags)) |
779 (XFS_DFORK_Q(dip) ? XFS_XFLAG_HASATTR : 0);
780 }
781
782 /*
783 * Read the disk inode attributes into the in-core inode structure.
784 */
785 int
786 xfs_iread(
787 xfs_mount_t *mp,
788 xfs_trans_t *tp,
789 xfs_inode_t *ip,
790 xfs_daddr_t bno,
791 uint iget_flags)
792 {
793 xfs_buf_t *bp;
794 xfs_dinode_t *dip;
795 int error;
796
797 /*
798 * Fill in the location information in the in-core inode.
799 */
800 ip->i_imap.im_blkno = bno;
801 error = xfs_imap(mp, tp, ip->i_ino, &ip->i_imap, iget_flags);
802 if (error)
803 return error;
804 ASSERT(bno == 0 || bno == ip->i_imap.im_blkno);
805
806 /*
807 * Get pointers to the on-disk inode and the buffer containing it.
808 */
809 error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &bp,
810 XFS_BUF_LOCK, iget_flags);
811 if (error)
812 return error;
813 dip = (xfs_dinode_t *)xfs_buf_offset(bp, ip->i_imap.im_boffset);
814
815 /*
816 * If we got something that isn't an inode it means someone
817 * (nfs or dmi) has a stale handle.
818 */
819 if (be16_to_cpu(dip->di_magic) != XFS_DINODE_MAGIC) {
820 #ifdef DEBUG
821 xfs_fs_cmn_err(CE_ALERT, mp, "xfs_iread: "
822 "dip->di_magic (0x%x) != "
823 "XFS_DINODE_MAGIC (0x%x)",
824 be16_to_cpu(dip->di_magic),
825 XFS_DINODE_MAGIC);
826 #endif /* DEBUG */
827 error = XFS_ERROR(EINVAL);
828 goto out_brelse;
829 }
830
831 /*
832 * If the on-disk inode is already linked to a directory
833 * entry, copy all of the inode into the in-core inode.
834 * xfs_iformat() handles copying in the inode format
835 * specific information.
836 * Otherwise, just get the truly permanent information.
837 */
838 if (dip->di_mode) {
839 xfs_dinode_from_disk(&ip->i_d, dip);
840 error = xfs_iformat(ip, dip);
841 if (error) {
842 #ifdef DEBUG
843 xfs_fs_cmn_err(CE_ALERT, mp, "xfs_iread: "
844 "xfs_iformat() returned error %d",
845 error);
846 #endif /* DEBUG */
847 goto out_brelse;
848 }
849 } else {
850 ip->i_d.di_magic = be16_to_cpu(dip->di_magic);
851 ip->i_d.di_version = dip->di_version;
852 ip->i_d.di_gen = be32_to_cpu(dip->di_gen);
853 ip->i_d.di_flushiter = be16_to_cpu(dip->di_flushiter);
854 /*
855 * Make sure to pull in the mode here as well in
856 * case the inode is released without being used.
857 * This ensures that xfs_inactive() will see that
858 * the inode is already free and not try to mess
859 * with the uninitialized part of it.
860 */
861 ip->i_d.di_mode = 0;
862 /*
863 * Initialize the per-fork minima and maxima for a new
864 * inode here. xfs_iformat will do it for old inodes.
865 */
866 ip->i_df.if_ext_max =
867 XFS_IFORK_DSIZE(ip) / (uint)sizeof(xfs_bmbt_rec_t);
868 }
869
870 /*
871 * The inode format changed when we moved the link count and
872 * made it 32 bits long. If this is an old format inode,
873 * convert it in memory to look like a new one. If it gets
874 * flushed to disk we will convert back before flushing or
875 * logging it. We zero out the new projid field and the old link
876 * count field. We'll handle clearing the pad field (the remains
877 * of the old uuid field) when we actually convert the inode to
878 * the new format. We don't change the version number so that we
879 * can distinguish this from a real new format inode.
880 */
881 if (ip->i_d.di_version == 1) {
882 ip->i_d.di_nlink = ip->i_d.di_onlink;
883 ip->i_d.di_onlink = 0;
884 ip->i_d.di_projid = 0;
885 }
886
887 ip->i_delayed_blks = 0;
888 ip->i_size = ip->i_d.di_size;
889
890 /*
891 * Mark the buffer containing the inode as something to keep
892 * around for a while. This helps to keep recently accessed
893 * meta-data in-core longer.
894 */
895 XFS_BUF_SET_REF(bp, XFS_INO_REF);
896
897 /*
898 * Use xfs_trans_brelse() to release the buffer containing the
899 * on-disk inode, because it was acquired with xfs_trans_read_buf()
900 * in xfs_itobp() above. If tp is NULL, this is just a normal
901 * brelse(). If we're within a transaction, then xfs_trans_brelse()
902 * will only release the buffer if it is not dirty within the
903 * transaction. It will be OK to release the buffer in this case,
904 * because inodes on disk are never destroyed and we will be
905 * locking the new in-core inode before putting it in the hash
906 * table where other processes can find it. Thus we don't have
907 * to worry about the inode being changed just because we released
908 * the buffer.
909 */
910 out_brelse:
911 xfs_trans_brelse(tp, bp);
912 return error;
913 }
914
915 /*
916 * Read in extents from a btree-format inode.
917 * Allocate and fill in if_extents. Real work is done in xfs_bmap.c.
918 */
919 int
920 xfs_iread_extents(
921 xfs_trans_t *tp,
922 xfs_inode_t *ip,
923 int whichfork)
924 {
925 int error;
926 xfs_ifork_t *ifp;
927 xfs_extnum_t nextents;
928 size_t size;
929
930 if (unlikely(XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE)) {
931 XFS_ERROR_REPORT("xfs_iread_extents", XFS_ERRLEVEL_LOW,
932 ip->i_mount);
933 return XFS_ERROR(EFSCORRUPTED);
934 }
935 nextents = XFS_IFORK_NEXTENTS(ip, whichfork);
936 size = nextents * sizeof(xfs_bmbt_rec_t);
937 ifp = XFS_IFORK_PTR(ip, whichfork);
938
939 /*
940 * We know that the size is valid (it's checked in iformat_btree)
941 */
942 ifp->if_lastex = NULLEXTNUM;
943 ifp->if_bytes = ifp->if_real_bytes = 0;
944 ifp->if_flags |= XFS_IFEXTENTS;
945 xfs_iext_add(ifp, 0, nextents);
946 error = xfs_bmap_read_extents(tp, ip, whichfork);
947 if (error) {
948 xfs_iext_destroy(ifp);
949 ifp->if_flags &= ~XFS_IFEXTENTS;
950 return error;
951 }
952 xfs_validate_extents(ifp, nextents, XFS_EXTFMT_INODE(ip));
953 return 0;
954 }
955
956 /*
957 * Allocate an inode on disk and return a copy of its in-core version.
958 * The in-core inode is locked exclusively. Set mode, nlink, and rdev
959 * appropriately within the inode. The uid and gid for the inode are
960 * set according to the contents of the given cred structure.
961 *
962 * Use xfs_dialloc() to allocate the on-disk inode. If xfs_dialloc()
963 * has a free inode available, call xfs_iget()
964 * to obtain the in-core version of the allocated inode. Finally,
965 * fill in the inode and log its initial contents. In this case,
966 * ialloc_context would be set to NULL and call_again set to false.
967 *
968 * If xfs_dialloc() does not have an available inode,
969 * it will replenish its supply by doing an allocation. Since we can
970 * only do one allocation within a transaction without deadlocks, we
971 * must commit the current transaction before returning the inode itself.
972 * In this case, therefore, we will set call_again to true and return.
973 * The caller should then commit the current transaction, start a new
974 * transaction, and call xfs_ialloc() again to actually get the inode.
975 *
976 * To ensure that some other process does not grab the inode that
977 * was allocated during the first call to xfs_ialloc(), this routine
978 * also returns the [locked] bp pointing to the head of the freelist
979 * as ialloc_context. The caller should hold this buffer across
980 * the commit and pass it back into this routine on the second call.
981 *
982 * If we are allocating quota inodes, we do not have a parent inode
983 * to attach to or associate with (i.e. pip == NULL) because they
984 * are not linked into the directory structure - they are attached
985 * directly to the superblock - and so have no parent.
986 */
987 int
988 xfs_ialloc(
989 xfs_trans_t *tp,
990 xfs_inode_t *pip,
991 mode_t mode,
992 xfs_nlink_t nlink,
993 xfs_dev_t rdev,
994 cred_t *cr,
995 xfs_prid_t prid,
996 int okalloc,
997 xfs_buf_t **ialloc_context,
998 boolean_t *call_again,
999 xfs_inode_t **ipp)
1000 {
1001 xfs_ino_t ino;
1002 xfs_inode_t *ip;
1003 uint flags;
1004 int error;
1005 timespec_t tv;
1006 int filestreams = 0;
1007
1008 /*
1009 * Call the space management code to pick
1010 * the on-disk inode to be allocated.
1011 */
1012 error = xfs_dialloc(tp, pip ? pip->i_ino : 0, mode, okalloc,
1013 ialloc_context, call_again, &ino);
1014 if (error)
1015 return error;
1016 if (*call_again || ino == NULLFSINO) {
1017 *ipp = NULL;
1018 return 0;
1019 }
1020 ASSERT(*ialloc_context == NULL);
1021
1022 /*
1023 * Get the in-core inode with the lock held exclusively.
1024 * This is because we're setting fields here we need
1025 * to prevent others from looking at until we're done.
1026 */
1027 error = xfs_trans_iget(tp->t_mountp, tp, ino,
1028 XFS_IGET_CREATE, XFS_ILOCK_EXCL, &ip);
1029 if (error)
1030 return error;
1031 ASSERT(ip != NULL);
1032
1033 ip->i_d.di_mode = (__uint16_t)mode;
1034 ip->i_d.di_onlink = 0;
1035 ip->i_d.di_nlink = nlink;
1036 ASSERT(ip->i_d.di_nlink == nlink);
1037 ip->i_d.di_uid = current_fsuid();
1038 ip->i_d.di_gid = current_fsgid();
1039 ip->i_d.di_projid = prid;
1040 memset(&(ip->i_d.di_pad[0]), 0, sizeof(ip->i_d.di_pad));
1041
1042 /*
1043 * If the superblock version is up to where we support new format
1044 * inodes and this is currently an old format inode, then change
1045 * the inode version number now. This way we only do the conversion
1046 * here rather than here and in the flush/logging code.
1047 */
1048 if (xfs_sb_version_hasnlink(&tp->t_mountp->m_sb) &&
1049 ip->i_d.di_version == 1) {
1050 ip->i_d.di_version = 2;
1051 /*
1052 * We've already zeroed the old link count, the projid field,
1053 * and the pad field.
1054 */
1055 }
1056
1057 /*
1058 * Project ids won't be stored on disk if we are using a version 1 inode.
1059 */
1060 if ((prid != 0) && (ip->i_d.di_version == 1))
1061 xfs_bump_ino_vers2(tp, ip);
1062
1063 if (pip && XFS_INHERIT_GID(pip)) {
1064 ip->i_d.di_gid = pip->i_d.di_gid;
1065 if ((pip->i_d.di_mode & S_ISGID) && (mode & S_IFMT) == S_IFDIR) {
1066 ip->i_d.di_mode |= S_ISGID;
1067 }
1068 }
1069
1070 /*
1071 * If the group ID of the new file does not match the effective group
1072 * ID or one of the supplementary group IDs, the S_ISGID bit is cleared
1073 * (and only if the irix_sgid_inherit compatibility variable is set).
1074 */
1075 if ((irix_sgid_inherit) &&
1076 (ip->i_d.di_mode & S_ISGID) &&
1077 (!in_group_p((gid_t)ip->i_d.di_gid))) {
1078 ip->i_d.di_mode &= ~S_ISGID;
1079 }
1080
1081 ip->i_d.di_size = 0;
1082 ip->i_size = 0;
1083 ip->i_d.di_nextents = 0;
1084 ASSERT(ip->i_d.di_nblocks == 0);
1085
1086 nanotime(&tv);
1087 ip->i_d.di_mtime.t_sec = (__int32_t)tv.tv_sec;
1088 ip->i_d.di_mtime.t_nsec = (__int32_t)tv.tv_nsec;
1089 ip->i_d.di_atime = ip->i_d.di_mtime;
1090 ip->i_d.di_ctime = ip->i_d.di_mtime;
1091
1092 /*
1093 * di_gen will have been taken care of in xfs_iread.
1094 */
1095 ip->i_d.di_extsize = 0;
1096 ip->i_d.di_dmevmask = 0;
1097 ip->i_d.di_dmstate = 0;
1098 ip->i_d.di_flags = 0;
1099 flags = XFS_ILOG_CORE;
1100 switch (mode & S_IFMT) {
1101 case S_IFIFO:
1102 case S_IFCHR:
1103 case S_IFBLK:
1104 case S_IFSOCK:
1105 ip->i_d.di_format = XFS_DINODE_FMT_DEV;
1106 ip->i_df.if_u2.if_rdev = rdev;
1107 ip->i_df.if_flags = 0;
1108 flags |= XFS_ILOG_DEV;
1109 break;
1110 case S_IFREG:
1111 /*
1112 * we can't set up filestreams until after the VFS inode
1113 * is set up properly.
1114 */
1115 if (pip && xfs_inode_is_filestream(pip))
1116 filestreams = 1;
1117 /* fall through */
1118 case S_IFDIR:
1119 if (pip && (pip->i_d.di_flags & XFS_DIFLAG_ANY)) {
1120 uint di_flags = 0;
1121
1122 if ((mode & S_IFMT) == S_IFDIR) {
1123 if (pip->i_d.di_flags & XFS_DIFLAG_RTINHERIT)
1124 di_flags |= XFS_DIFLAG_RTINHERIT;
1125 if (pip->i_d.di_flags & XFS_DIFLAG_EXTSZINHERIT) {
1126 di_flags |= XFS_DIFLAG_EXTSZINHERIT;
1127 ip->i_d.di_extsize = pip->i_d.di_extsize;
1128 }
1129 } else if ((mode & S_IFMT) == S_IFREG) {
1130 if (pip->i_d.di_flags & XFS_DIFLAG_RTINHERIT)
1131 di_flags |= XFS_DIFLAG_REALTIME;
1132 if (pip->i_d.di_flags & XFS_DIFLAG_EXTSZINHERIT) {
1133 di_flags |= XFS_DIFLAG_EXTSIZE;
1134 ip->i_d.di_extsize = pip->i_d.di_extsize;
1135 }
1136 }
1137 if ((pip->i_d.di_flags & XFS_DIFLAG_NOATIME) &&
1138 xfs_inherit_noatime)
1139 di_flags |= XFS_DIFLAG_NOATIME;
1140 if ((pip->i_d.di_flags & XFS_DIFLAG_NODUMP) &&
1141 xfs_inherit_nodump)
1142 di_flags |= XFS_DIFLAG_NODUMP;
1143 if ((pip->i_d.di_flags & XFS_DIFLAG_SYNC) &&
1144 xfs_inherit_sync)
1145 di_flags |= XFS_DIFLAG_SYNC;
1146 if ((pip->i_d.di_flags & XFS_DIFLAG_NOSYMLINKS) &&
1147 xfs_inherit_nosymlinks)
1148 di_flags |= XFS_DIFLAG_NOSYMLINKS;
1149 if (pip->i_d.di_flags & XFS_DIFLAG_PROJINHERIT)
1150 di_flags |= XFS_DIFLAG_PROJINHERIT;
1151 if ((pip->i_d.di_flags & XFS_DIFLAG_NODEFRAG) &&
1152 xfs_inherit_nodefrag)
1153 di_flags |= XFS_DIFLAG_NODEFRAG;
1154 if (pip->i_d.di_flags & XFS_DIFLAG_FILESTREAM)
1155 di_flags |= XFS_DIFLAG_FILESTREAM;
1156 ip->i_d.di_flags |= di_flags;
1157 }
1158 /* FALLTHROUGH */
1159 case S_IFLNK:
1160 ip->i_d.di_format = XFS_DINODE_FMT_EXTENTS;
1161 ip->i_df.if_flags = XFS_IFEXTENTS;
1162 ip->i_df.if_bytes = ip->i_df.if_real_bytes = 0;
1163 ip->i_df.if_u1.if_extents = NULL;
1164 break;
1165 default:
1166 ASSERT(0);
1167 }
1168 /*
1169 * Attribute fork settings for new inode.
1170 */
1171 ip->i_d.di_aformat = XFS_DINODE_FMT_EXTENTS;
1172 ip->i_d.di_anextents = 0;
1173
1174 /*
1175 * Log the new values stuffed into the inode.
1176 */
1177 xfs_trans_log_inode(tp, ip, flags);
1178
1179 /* now that we have an i_mode we can setup inode ops and unlock */
1180 xfs_setup_inode(ip);
1181
1182 /* now we have set up the vfs inode we can associate the filestream */
1183 if (filestreams) {
1184 error = xfs_filestream_associate(pip, ip);
1185 if (error < 0)
1186 return -error;
1187 if (!error)
1188 xfs_iflags_set(ip, XFS_IFILESTREAM);
1189 }
1190
1191 *ipp = ip;
1192 return 0;
1193 }
1194
1195 /*
1196 * Check to make sure that there are no blocks allocated to the
1197 * file beyond the size of the file. We don't check this for
1198 * files with fixed size extents or real time extents, but we
1199 * at least do it for regular files.
1200 */
1201 #ifdef DEBUG
1202 void
1203 xfs_isize_check(
1204 xfs_mount_t *mp,
1205 xfs_inode_t *ip,
1206 xfs_fsize_t isize)
1207 {
1208 xfs_fileoff_t map_first;
1209 int nimaps;
1210 xfs_bmbt_irec_t imaps[2];
1211
1212 if ((ip->i_d.di_mode & S_IFMT) != S_IFREG)
1213 return;
1214
1215 if (XFS_IS_REALTIME_INODE(ip))
1216 return;
1217
1218 if (ip->i_d.di_flags & XFS_DIFLAG_EXTSIZE)
1219 return;
1220
1221 nimaps = 2;
1222 map_first = XFS_B_TO_FSB(mp, (xfs_ufsize_t)isize);
1223 /*
1224 * The filesystem could be shutting down, so bmapi may return
1225 * an error.
1226 */
1227 if (xfs_bmapi(NULL, ip, map_first,
1228 (XFS_B_TO_FSB(mp,
1229 (xfs_ufsize_t)XFS_MAXIOFFSET(mp)) -
1230 map_first),
1231 XFS_BMAPI_ENTIRE, NULL, 0, imaps, &nimaps,
1232 NULL, NULL))
1233 return;
1234 ASSERT(nimaps == 1);
1235 ASSERT(imaps[0].br_startblock == HOLESTARTBLOCK);
1236 }
1237 #endif /* DEBUG */
1238
1239 /*
1240 * Calculate the last possible buffered byte in a file. This must
1241 * include data that was buffered beyond the EOF by the write code.
1242 * This also needs to deal with overflowing the xfs_fsize_t type
1243 * which can happen for sizes near the limit.
1244 *
1245 * We also need to take into account any blocks beyond the EOF. It
1246 * may be the case that they were buffered by a write which failed.
1247 * In that case the pages will still be in memory, but the inode size
1248 * will never have been updated.
1249 */
1250 STATIC xfs_fsize_t
1251 xfs_file_last_byte(
1252 xfs_inode_t *ip)
1253 {
1254 xfs_mount_t *mp;
1255 xfs_fsize_t last_byte;
1256 xfs_fileoff_t last_block;
1257 xfs_fileoff_t size_last_block;
1258 int error;
1259
1260 ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL|XFS_IOLOCK_SHARED));
1261
1262 mp = ip->i_mount;
1263 /*
1264 * Only check for blocks beyond the EOF if the extents have
1265 * been read in. This eliminates the need for the inode lock,
1266 * and it also saves us from looking when it really isn't
1267 * necessary.
1268 */
1269 if (ip->i_df.if_flags & XFS_IFEXTENTS) {
1270 xfs_ilock(ip, XFS_ILOCK_SHARED);
1271 error = xfs_bmap_last_offset(NULL, ip, &last_block,
1272 XFS_DATA_FORK);
1273 xfs_iunlock(ip, XFS_ILOCK_SHARED);
1274 if (error) {
1275 last_block = 0;
1276 }
1277 } else {
1278 last_block = 0;
1279 }
1280 size_last_block = XFS_B_TO_FSB(mp, (xfs_ufsize_t)ip->i_size);
1281 last_block = XFS_FILEOFF_MAX(last_block, size_last_block);
1282
1283 last_byte = XFS_FSB_TO_B(mp, last_block);
1284 if (last_byte < 0) {
1285 return XFS_MAXIOFFSET(mp);
1286 }
1287 last_byte += (1 << mp->m_writeio_log);
1288 if (last_byte < 0) {
1289 return XFS_MAXIOFFSET(mp);
1290 }
1291 return last_byte;
1292 }
1293
1294 /*
1295 * Start the truncation of the file to new_size. The new size
1296 * must be smaller than the current size. This routine will
1297 * clear the buffer and page caches of file data in the removed
1298 * range, and xfs_itruncate_finish() will remove the underlying
1299 * disk blocks.
1300 *
1301 * The inode must have its I/O lock locked EXCLUSIVELY, and it
1302 * must NOT have the inode lock held at all. This is because we're
1303 * calling into the buffer/page cache code and we can't hold the
1304 * inode lock when we do so.
1305 *
1306 * We need to wait for any direct I/Os in flight to complete before we
1307 * proceed with the truncate. This is needed to prevent the extents
1308 * being read or written by the direct I/Os from being removed while the
1309 * I/O is in flight as there is no other method of synchronising
1310 * direct I/O with the truncate operation. Also, because we hold
1311 * the IOLOCK in exclusive mode, we prevent new direct I/Os from being
1312 * started until the truncate completes and drops the lock. Essentially,
1313 * the xfs_ioend_wait() call forms an I/O barrier that provides strict
1314 * ordering between direct I/Os and the truncate operation.
1315 *
1316 * The flags parameter can have either the value XFS_ITRUNC_DEFINITE
1317 * or XFS_ITRUNC_MAYBE. The XFS_ITRUNC_MAYBE value should be used
1318 * in the case that the caller is locking things out of order and
1319 * may not be able to call xfs_itruncate_finish() with the inode lock
1320 * held without dropping the I/O lock. If the caller must drop the
1321 * I/O lock before calling xfs_itruncate_finish(), then xfs_itruncate_start()
1322 * must be called again with all the same restrictions as the initial
1323 * call.
1324 */
1325 int
1326 xfs_itruncate_start(
1327 xfs_inode_t *ip,
1328 uint flags,
1329 xfs_fsize_t new_size)
1330 {
1331 xfs_fsize_t last_byte;
1332 xfs_off_t toss_start;
1333 xfs_mount_t *mp;
1334 int error = 0;
1335
1336 ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
1337 ASSERT((new_size == 0) || (new_size <= ip->i_size));
1338 ASSERT((flags == XFS_ITRUNC_DEFINITE) ||
1339 (flags == XFS_ITRUNC_MAYBE));
1340
1341 mp = ip->i_mount;
1342
1343 /* wait for the completion of any pending DIOs */
1344 if (new_size == 0 || new_size < ip->i_size)
1345 xfs_ioend_wait(ip);
1346
1347 /*
1348 * Call toss_pages or flushinval_pages to get rid of pages
1349 * overlapping the region being removed. We have to use
1350 * the less efficient flushinval_pages in the case that the
1351 * caller may not be able to finish the truncate without
1352 * dropping the inode's I/O lock. Make sure
1353 * to catch any pages brought in by buffers overlapping
1354 * the EOF by searching out beyond the isize by our
1355 * block size. We round new_size up to a block boundary
1356 * so that we don't toss things on the same block as
1357 * new_size but before it.
1358 *
1359 * Before calling toss_page or flushinval_pages, make sure to
1360 * call remapf() over the same region if the file is mapped.
1361 * This frees up mapped file references to the pages in the
1362 * given range and for the flushinval_pages case it ensures
1363 * that we get the latest mapped changes flushed out.
1364 */
1365 toss_start = XFS_B_TO_FSB(mp, (xfs_ufsize_t)new_size);
1366 toss_start = XFS_FSB_TO_B(mp, toss_start);
1367 if (toss_start < 0) {
1368 /*
1369 * The place to start tossing is beyond our maximum
1370 * file size, so there is no way that the data extended
1371 * out there.
1372 */
1373 return 0;
1374 }
1375 last_byte = xfs_file_last_byte(ip);
1376 trace_xfs_itruncate_start(ip, flags, new_size, toss_start, last_byte);
1377 if (last_byte > toss_start) {
1378 if (flags & XFS_ITRUNC_DEFINITE) {
1379 xfs_tosspages(ip, toss_start,
1380 -1, FI_REMAPF_LOCKED);
1381 } else {
1382 error = xfs_flushinval_pages(ip, toss_start,
1383 -1, FI_REMAPF_LOCKED);
1384 }
1385 }
1386
1387 #ifdef DEBUG
1388 if (new_size == 0) {
1389 ASSERT(VN_CACHED(VFS_I(ip)) == 0);
1390 }
1391 #endif
1392 return error;
1393 }
1394
1395 /*
1396 * Shrink the file to the given new_size. The new size must be smaller than
1397 * the current size. This will free up the underlying blocks in the removed
1398 * range after a call to xfs_itruncate_start() or xfs_atruncate_start().
1399 *
1400 * The transaction passed to this routine must have made a permanent log
1401 * reservation of at least XFS_ITRUNCATE_LOG_RES. This routine may commit the
1402 * given transaction and start new ones, so make sure everything involved in
1403 * the transaction is tidy before calling here. Some transaction will be
1404 * returned to the caller to be committed. The incoming transaction must
1405 * already include the inode, and both inode locks must be held exclusively.
1406 * The inode must also be "held" within the transaction. On return the inode
1407 * will be "held" within the returned transaction. This routine does NOT
1408 * require any disk space to be reserved for it within the transaction.
1409 *
1410 * The fork parameter must be either xfs_attr_fork or xfs_data_fork, and it
1411 * indicates the fork which is to be truncated. For the attribute fork we only
1412 * support truncation to size 0.
1413 *
1414 * We use the sync parameter to indicate whether or not the first transaction
1415 * we perform might have to be synchronous. For the attr fork, it needs to be
1416 * so if the unlink of the inode is not yet known to be permanent in the log.
1417 * This keeps us from freeing and reusing the blocks of the attribute fork
1418 * before the unlink of the inode becomes permanent.
1419 *
1420 * For the data fork, we normally have to run synchronously if we're being
1421 * called out of the inactive path or we're being called out of the create path
1422 * where we're truncating an existing file. Either way, the truncate needs to
1423 * be sync so blocks don't reappear in the file with altered data in case of a
1424 * crash. wsync filesystems can run the first case async because anything that
1425 * shrinks the inode has to run sync so by the time we're called here from
1426 * inactive, the inode size is permanently set to 0.
1427 *
1428 * Calls from the truncate path always need to be sync unless we're in a wsync
1429 * filesystem and the file has already been unlinked.
1430 *
1431 * The caller is responsible for correctly setting the sync parameter. It gets
1432 * too hard for us to guess here which path we're being called out of just
1433 * based on inode state.
1434 *
1435 * If we get an error, we must return with the inode locked and linked into the
1436 * current transaction. This keeps things simple for the higher level code,
1437 * because it always knows that the inode is locked and held in the transaction
1438 * that returns to it whether errors occur or not. We don't mark the inode
1439 * dirty on error so that transactions can be easily aborted if possible.
1440 */
1441 int
1442 xfs_itruncate_finish(
1443 xfs_trans_t **tp,
1444 xfs_inode_t *ip,
1445 xfs_fsize_t new_size,
1446 int fork,
1447 int sync)
1448 {
1449 xfs_fsblock_t first_block;
1450 xfs_fileoff_t first_unmap_block;
1451 xfs_fileoff_t last_block;
1452 xfs_filblks_t unmap_len=0;
1453 xfs_mount_t *mp;
1454 xfs_trans_t *ntp;
1455 int done;
1456 int committed;
1457 xfs_bmap_free_t free_list;
1458 int error;
1459
1460 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_IOLOCK_EXCL));
1461 ASSERT((new_size == 0) || (new_size <= ip->i_size));
1462 ASSERT(*tp != NULL);
1463 ASSERT((*tp)->t_flags & XFS_TRANS_PERM_LOG_RES);
1464 ASSERT(ip->i_transp == *tp);
1465 ASSERT(ip->i_itemp != NULL);
1466 ASSERT(ip->i_itemp->ili_flags & XFS_ILI_HOLD);
1467
1468
1469 ntp = *tp;
1470 mp = (ntp)->t_mountp;
1471 ASSERT(! XFS_NOT_DQATTACHED(mp, ip));
1472
1473 /*
1474 * We only support truncating the entire attribute fork.
1475 */
1476 if (fork == XFS_ATTR_FORK) {
1477 new_size = 0LL;
1478 }
1479 first_unmap_block = XFS_B_TO_FSB(mp, (xfs_ufsize_t)new_size);
1480 trace_xfs_itruncate_finish_start(ip, new_size);
1481
1482 /*
1483 * The first thing we do is set the size to new_size permanently
1484 * on disk. This way we don't have to worry about anyone ever
1485 * being able to look at the data being freed even in the face
1486 * of a crash. What we're getting around here is the case where
1487 * we free a block, it is allocated to another file, it is written
1488 * to, and then we crash. If the new data gets written to the
1489 * file but the log buffers containing the free and reallocation
1490 * don't, then we'd end up with garbage in the blocks being freed.
1491 * As long as we make the new_size permanent before actually
1492 * freeing any blocks it doesn't matter if they get writtten to.
1493 *
1494 * The callers must signal into us whether or not the size
1495 * setting here must be synchronous. There are a few cases
1496 * where it doesn't have to be synchronous. Those cases
1497 * occur if the file is unlinked and we know the unlink is
1498 * permanent or if the blocks being truncated are guaranteed
1499 * to be beyond the inode eof (regardless of the link count)
1500 * and the eof value is permanent. Both of these cases occur
1501 * only on wsync-mounted filesystems. In those cases, we're
1502 * guaranteed that no user will ever see the data in the blocks
1503 * that are being truncated so the truncate can run async.
1504 * In the free beyond eof case, the file may wind up with
1505 * more blocks allocated to it than it needs if we crash
1506 * and that won't get fixed until the next time the file
1507 * is re-opened and closed but that's ok as that shouldn't
1508 * be too many blocks.
1509 *
1510 * However, we can't just make all wsync xactions run async
1511 * because there's one call out of the create path that needs
1512 * to run sync where it's truncating an existing file to size
1513 * 0 whose size is > 0.
1514 *
1515 * It's probably possible to come up with a test in this
1516 * routine that would correctly distinguish all the above
1517 * cases from the values of the function parameters and the
1518 * inode state but for sanity's sake, I've decided to let the
1519 * layers above just tell us. It's simpler to correctly figure
1520 * out in the layer above exactly under what conditions we
1521 * can run async and I think it's easier for others read and
1522 * follow the logic in case something has to be changed.
1523 * cscope is your friend -- rcc.
1524 *
1525 * The attribute fork is much simpler.
1526 *
1527 * For the attribute fork we allow the caller to tell us whether
1528 * the unlink of the inode that led to this call is yet permanent
1529 * in the on disk log. If it is not and we will be freeing extents
1530 * in this inode then we make the first transaction synchronous
1531 * to make sure that the unlink is permanent by the time we free
1532 * the blocks.
1533 */
1534 if (fork == XFS_DATA_FORK) {
1535 if (ip->i_d.di_nextents > 0) {
1536 /*
1537 * If we are not changing the file size then do
1538 * not update the on-disk file size - we may be
1539 * called from xfs_inactive_free_eofblocks(). If we
1540 * update the on-disk file size and then the system
1541 * crashes before the contents of the file are
1542 * flushed to disk then the files may be full of
1543 * holes (ie NULL files bug).
1544 */
1545 if (ip->i_size != new_size) {
1546 ip->i_d.di_size = new_size;
1547 ip->i_size = new_size;
1548 xfs_trans_log_inode(ntp, ip, XFS_ILOG_CORE);
1549 }
1550 }
1551 } else if (sync) {
1552 ASSERT(!(mp->m_flags & XFS_MOUNT_WSYNC));
1553 if (ip->i_d.di_anextents > 0)
1554 xfs_trans_set_sync(ntp);
1555 }
1556 ASSERT(fork == XFS_DATA_FORK ||
1557 (fork == XFS_ATTR_FORK &&
1558 ((sync && !(mp->m_flags & XFS_MOUNT_WSYNC)) ||
1559 (sync == 0 && (mp->m_flags & XFS_MOUNT_WSYNC)))));
1560
1561 /*
1562 * Since it is possible for space to become allocated beyond
1563 * the end of the file (in a crash where the space is allocated
1564 * but the inode size is not yet updated), simply remove any
1565 * blocks which show up between the new EOF and the maximum
1566 * possible file size. If the first block to be removed is
1567 * beyond the maximum file size (ie it is the same as last_block),
1568 * then there is nothing to do.
1569 */
1570 last_block = XFS_B_TO_FSB(mp, (xfs_ufsize_t)XFS_MAXIOFFSET(mp));
1571 ASSERT(first_unmap_block <= last_block);
1572 done = 0;
1573 if (last_block == first_unmap_block) {
1574 done = 1;
1575 } else {
1576 unmap_len = last_block - first_unmap_block + 1;
1577 }
1578 while (!done) {
1579 /*
1580 * Free up up to XFS_ITRUNC_MAX_EXTENTS. xfs_bunmapi()
1581 * will tell us whether it freed the entire range or
1582 * not. If this is a synchronous mount (wsync),
1583 * then we can tell bunmapi to keep all the
1584 * transactions asynchronous since the unlink
1585 * transaction that made this inode inactive has
1586 * already hit the disk. There's no danger of
1587 * the freed blocks being reused, there being a
1588 * crash, and the reused blocks suddenly reappearing
1589 * in this file with garbage in them once recovery
1590 * runs.
1591 */
1592 xfs_bmap_init(&free_list, &first_block);
1593 error = xfs_bunmapi(ntp, ip,
1594 first_unmap_block, unmap_len,
1595 xfs_bmapi_aflag(fork) |
1596 (sync ? 0 : XFS_BMAPI_ASYNC),
1597 XFS_ITRUNC_MAX_EXTENTS,
1598 &first_block, &free_list,
1599 NULL, &done);
1600 if (error) {
1601 /*
1602 * If the bunmapi call encounters an error,
1603 * return to the caller where the transaction
1604 * can be properly aborted. We just need to
1605 * make sure we're not holding any resources
1606 * that we were not when we came in.
1607 */
1608 xfs_bmap_cancel(&free_list);
1609 return error;
1610 }
1611
1612 /*
1613 * Duplicate the transaction that has the permanent
1614 * reservation and commit the old transaction.
1615 */
1616 error = xfs_bmap_finish(tp, &free_list, &committed);
1617 ntp = *tp;
1618 if (committed) {
1619 /* link the inode into the next xact in the chain */
1620 xfs_trans_ijoin(ntp, ip,
1621 XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
1622 xfs_trans_ihold(ntp, ip);
1623 }
1624
1625 if (error) {
1626 /*
1627 * If the bmap finish call encounters an error, return
1628 * to the caller where the transaction can be properly
1629 * aborted. We just need to make sure we're not
1630 * holding any resources that we were not when we came
1631 * in.
1632 *
1633 * Aborting from this point might lose some blocks in
1634 * the file system, but oh well.
1635 */
1636 xfs_bmap_cancel(&free_list);
1637 return error;
1638 }
1639
1640 if (committed) {
1641 /*
1642 * Mark the inode dirty so it will be logged and
1643 * moved forward in the log as part of every commit.
1644 */
1645 xfs_trans_log_inode(ntp, ip, XFS_ILOG_CORE);
1646 }
1647
1648 ntp = xfs_trans_dup(ntp);
1649 error = xfs_trans_commit(*tp, 0);
1650 *tp = ntp;
1651
1652 /* link the inode into the next transaction in the chain */
1653 xfs_trans_ijoin(ntp, ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
1654 xfs_trans_ihold(ntp, ip);
1655
1656 if (error)
1657 return error;
1658 /*
1659 * transaction commit worked ok so we can drop the extra ticket
1660 * reference that we gained in xfs_trans_dup()
1661 */
1662 xfs_log_ticket_put(ntp->t_ticket);
1663 error = xfs_trans_reserve(ntp, 0,
1664 XFS_ITRUNCATE_LOG_RES(mp), 0,
1665 XFS_TRANS_PERM_LOG_RES,
1666 XFS_ITRUNCATE_LOG_COUNT);
1667 if (error)
1668 return error;
1669 }
1670 /*
1671 * Only update the size in the case of the data fork, but
1672 * always re-log the inode so that our permanent transaction
1673 * can keep on rolling it forward in the log.
1674 */
1675 if (fork == XFS_DATA_FORK) {
1676 xfs_isize_check(mp, ip, new_size);
1677 /*
1678 * If we are not changing the file size then do
1679 * not update the on-disk file size - we may be
1680 * called from xfs_inactive_free_eofblocks(). If we
1681 * update the on-disk file size and then the system
1682 * crashes before the contents of the file are
1683 * flushed to disk then the files may be full of
1684 * holes (ie NULL files bug).
1685 */
1686 if (ip->i_size != new_size) {
1687 ip->i_d.di_size = new_size;
1688 ip->i_size = new_size;
1689 }
1690 }
1691 xfs_trans_log_inode(ntp, ip, XFS_ILOG_CORE);
1692 ASSERT((new_size != 0) ||
1693 (fork == XFS_ATTR_FORK) ||
1694 (ip->i_delayed_blks == 0));
1695 ASSERT((new_size != 0) ||
1696 (fork == XFS_ATTR_FORK) ||
1697 (ip->i_d.di_nextents == 0));
1698 trace_xfs_itruncate_finish_end(ip, new_size);
1699 return 0;
1700 }
1701
1702 /*
1703 * This is called when the inode's link count goes to 0.
1704 * We place the on-disk inode on a list in the AGI. It
1705 * will be pulled from this list when the inode is freed.
1706 */
1707 int
1708 xfs_iunlink(
1709 xfs_trans_t *tp,
1710 xfs_inode_t *ip)
1711 {
1712 xfs_mount_t *mp;
1713 xfs_agi_t *agi;
1714 xfs_dinode_t *dip;
1715 xfs_buf_t *agibp;
1716 xfs_buf_t *ibp;
1717 xfs_agino_t agino;
1718 short bucket_index;
1719 int offset;
1720 int error;
1721
1722 ASSERT(ip->i_d.di_nlink == 0);
1723 ASSERT(ip->i_d.di_mode != 0);
1724 ASSERT(ip->i_transp == tp);
1725
1726 mp = tp->t_mountp;
1727
1728 /*
1729 * Get the agi buffer first. It ensures lock ordering
1730 * on the list.
1731 */
1732 error = xfs_read_agi(mp, tp, XFS_INO_TO_AGNO(mp, ip->i_ino), &agibp);
1733 if (error)
1734 return error;
1735 agi = XFS_BUF_TO_AGI(agibp);
1736
1737 /*
1738 * Get the index into the agi hash table for the
1739 * list this inode will go on.
1740 */
1741 agino = XFS_INO_TO_AGINO(mp, ip->i_ino);
1742 ASSERT(agino != 0);
1743 bucket_index = agino % XFS_AGI_UNLINKED_BUCKETS;
1744 ASSERT(agi->agi_unlinked[bucket_index]);
1745 ASSERT(be32_to_cpu(agi->agi_unlinked[bucket_index]) != agino);
1746
1747 if (be32_to_cpu(agi->agi_unlinked[bucket_index]) != NULLAGINO) {
1748 /*
1749 * There is already another inode in the bucket we need
1750 * to add ourselves to. Add us at the front of the list.
1751 * Here we put the head pointer into our next pointer,
1752 * and then we fall through to point the head at us.
1753 */
1754 error = xfs_itobp(mp, tp, ip, &dip, &ibp, XFS_BUF_LOCK);
1755 if (error)
1756 return error;
1757
1758 ASSERT(be32_to_cpu(dip->di_next_unlinked) == NULLAGINO);
1759 /* both on-disk, don't endian flip twice */
1760 dip->di_next_unlinked = agi->agi_unlinked[bucket_index];
1761 offset = ip->i_imap.im_boffset +
1762 offsetof(xfs_dinode_t, di_next_unlinked);
1763 xfs_trans_inode_buf(tp, ibp);
1764 xfs_trans_log_buf(tp, ibp, offset,
1765 (offset + sizeof(xfs_agino_t) - 1));
1766 xfs_inobp_check(mp, ibp);
1767 }
1768
1769 /*
1770 * Point the bucket head pointer at the inode being inserted.
1771 */
1772 ASSERT(agino != 0);
1773 agi->agi_unlinked[bucket_index] = cpu_to_be32(agino);
1774 offset = offsetof(xfs_agi_t, agi_unlinked) +
1775 (sizeof(xfs_agino_t) * bucket_index);
1776 xfs_trans_log_buf(tp, agibp, offset,
1777 (offset + sizeof(xfs_agino_t) - 1));
1778 return 0;
1779 }
1780
1781 /*
1782 * Pull the on-disk inode from the AGI unlinked list.
1783 */
1784 STATIC int
1785 xfs_iunlink_remove(
1786 xfs_trans_t *tp,
1787 xfs_inode_t *ip)
1788 {
1789 xfs_ino_t next_ino;
1790 xfs_mount_t *mp;
1791 xfs_agi_t *agi;
1792 xfs_dinode_t *dip;
1793 xfs_buf_t *agibp;
1794 xfs_buf_t *ibp;
1795 xfs_agnumber_t agno;
1796 xfs_agino_t agino;
1797 xfs_agino_t next_agino;
1798 xfs_buf_t *last_ibp;
1799 xfs_dinode_t *last_dip = NULL;
1800 short bucket_index;
1801 int offset, last_offset = 0;
1802 int error;
1803
1804 mp = tp->t_mountp;
1805 agno = XFS_INO_TO_AGNO(mp, ip->i_ino);
1806
1807 /*
1808 * Get the agi buffer first. It ensures lock ordering
1809 * on the list.
1810 */
1811 error = xfs_read_agi(mp, tp, agno, &agibp);
1812 if (error)
1813 return error;
1814
1815 agi = XFS_BUF_TO_AGI(agibp);
1816
1817 /*
1818 * Get the index into the agi hash table for the
1819 * list this inode will go on.
1820 */
1821 agino = XFS_INO_TO_AGINO(mp, ip->i_ino);
1822 ASSERT(agino != 0);
1823 bucket_index = agino % XFS_AGI_UNLINKED_BUCKETS;
1824 ASSERT(be32_to_cpu(agi->agi_unlinked[bucket_index]) != NULLAGINO);
1825 ASSERT(agi->agi_unlinked[bucket_index]);
1826
1827 if (be32_to_cpu(agi->agi_unlinked[bucket_index]) == agino) {
1828 /*
1829 * We're at the head of the list. Get the inode's
1830 * on-disk buffer to see if there is anyone after us
1831 * on the list. Only modify our next pointer if it
1832 * is not already NULLAGINO. This saves us the overhead
1833 * of dealing with the buffer when there is no need to
1834 * change it.
1835 */
1836 error = xfs_itobp(mp, tp, ip, &dip, &ibp, XFS_BUF_LOCK);
1837 if (error) {
1838 cmn_err(CE_WARN,
1839 "xfs_iunlink_remove: xfs_itobp() returned an error %d on %s. Returning error.",
1840 error, mp->m_fsname);
1841 return error;
1842 }
1843 next_agino = be32_to_cpu(dip->di_next_unlinked);
1844 ASSERT(next_agino != 0);
1845 if (next_agino != NULLAGINO) {
1846 dip->di_next_unlinked = cpu_to_be32(NULLAGINO);
1847 offset = ip->i_imap.im_boffset +
1848 offsetof(xfs_dinode_t, di_next_unlinked);
1849 xfs_trans_inode_buf(tp, ibp);
1850 xfs_trans_log_buf(tp, ibp, offset,
1851 (offset + sizeof(xfs_agino_t) - 1));
1852 xfs_inobp_check(mp, ibp);
1853 } else {
1854 xfs_trans_brelse(tp, ibp);
1855 }
1856 /*
1857 * Point the bucket head pointer at the next inode.
1858 */
1859 ASSERT(next_agino != 0);
1860 ASSERT(next_agino != agino);
1861 agi->agi_unlinked[bucket_index] = cpu_to_be32(next_agino);
1862 offset = offsetof(xfs_agi_t, agi_unlinked) +
1863 (sizeof(xfs_agino_t) * bucket_index);
1864 xfs_trans_log_buf(tp, agibp, offset,
1865 (offset + sizeof(xfs_agino_t) - 1));
1866 } else {
1867 /*
1868 * We need to search the list for the inode being freed.
1869 */
1870 next_agino = be32_to_cpu(agi->agi_unlinked[bucket_index]);
1871 last_ibp = NULL;
1872 while (next_agino != agino) {
1873 /*
1874 * If the last inode wasn't the one pointing to
1875 * us, then release its buffer since we're not
1876 * going to do anything with it.
1877 */
1878 if (last_ibp != NULL) {
1879 xfs_trans_brelse(tp, last_ibp);
1880 }
1881 next_ino = XFS_AGINO_TO_INO(mp, agno, next_agino);
1882 error = xfs_inotobp(mp, tp, next_ino, &last_dip,
1883 &last_ibp, &last_offset, 0);
1884 if (error) {
1885 cmn_err(CE_WARN,
1886 "xfs_iunlink_remove: xfs_inotobp() returned an error %d on %s. Returning error.",
1887 error, mp->m_fsname);
1888 return error;
1889 }
1890 next_agino = be32_to_cpu(last_dip->di_next_unlinked);
1891 ASSERT(next_agino != NULLAGINO);
1892 ASSERT(next_agino != 0);
1893 }
1894 /*
1895 * Now last_ibp points to the buffer previous to us on
1896 * the unlinked list. Pull us from the list.
1897 */
1898 error = xfs_itobp(mp, tp, ip, &dip, &ibp, XFS_BUF_LOCK);
1899 if (error) {
1900 cmn_err(CE_WARN,
1901 "xfs_iunlink_remove: xfs_itobp() returned an error %d on %s. Returning error.",
1902 error, mp->m_fsname);
1903 return error;
1904 }
1905 next_agino = be32_to_cpu(dip->di_next_unlinked);
1906 ASSERT(next_agino != 0);
1907 ASSERT(next_agino != agino);
1908 if (next_agino != NULLAGINO) {
1909 dip->di_next_unlinked = cpu_to_be32(NULLAGINO);
1910 offset = ip->i_imap.im_boffset +
1911 offsetof(xfs_dinode_t, di_next_unlinked);
1912 xfs_trans_inode_buf(tp, ibp);
1913 xfs_trans_log_buf(tp, ibp, offset,
1914 (offset + sizeof(xfs_agino_t) - 1));
1915 xfs_inobp_check(mp, ibp);
1916 } else {
1917 xfs_trans_brelse(tp, ibp);
1918 }
1919 /*
1920 * Point the previous inode on the list to the next inode.
1921 */
1922 last_dip->di_next_unlinked = cpu_to_be32(next_agino);
1923 ASSERT(next_agino != 0);
1924 offset = last_offset + offsetof(xfs_dinode_t, di_next_unlinked);
1925 xfs_trans_inode_buf(tp, last_ibp);
1926 xfs_trans_log_buf(tp, last_ibp, offset,
1927 (offset + sizeof(xfs_agino_t) - 1));
1928 xfs_inobp_check(mp, last_ibp);
1929 }
1930 return 0;
1931 }
1932
1933 STATIC void
1934 xfs_ifree_cluster(
1935 xfs_inode_t *free_ip,
1936 xfs_trans_t *tp,
1937 xfs_ino_t inum)
1938 {
1939 xfs_mount_t *mp = free_ip->i_mount;
1940 int blks_per_cluster;
1941 int nbufs;
1942 int ninodes;
1943 int i, j, found, pre_flushed;
1944 xfs_daddr_t blkno;
1945 xfs_buf_t *bp;
1946 xfs_inode_t *ip, **ip_found;
1947 xfs_inode_log_item_t *iip;
1948 xfs_log_item_t *lip;
1949 xfs_perag_t *pag = xfs_get_perag(mp, inum);
1950
1951 if (mp->m_sb.sb_blocksize >= XFS_INODE_CLUSTER_SIZE(mp)) {
1952 blks_per_cluster = 1;
1953 ninodes = mp->m_sb.sb_inopblock;
1954 nbufs = XFS_IALLOC_BLOCKS(mp);
1955 } else {
1956 blks_per_cluster = XFS_INODE_CLUSTER_SIZE(mp) /
1957 mp->m_sb.sb_blocksize;
1958 ninodes = blks_per_cluster * mp->m_sb.sb_inopblock;
1959 nbufs = XFS_IALLOC_BLOCKS(mp) / blks_per_cluster;
1960 }
1961
1962 ip_found = kmem_alloc(ninodes * sizeof(xfs_inode_t *), KM_NOFS);
1963
1964 for (j = 0; j < nbufs; j++, inum += ninodes) {
1965 blkno = XFS_AGB_TO_DADDR(mp, XFS_INO_TO_AGNO(mp, inum),
1966 XFS_INO_TO_AGBNO(mp, inum));
1967
1968
1969 /*
1970 * Look for each inode in memory and attempt to lock it,
1971 * we can be racing with flush and tail pushing here.
1972 * any inode we get the locks on, add to an array of
1973 * inode items to process later.
1974 *
1975 * The get the buffer lock, we could beat a flush
1976 * or tail pushing thread to the lock here, in which
1977 * case they will go looking for the inode buffer
1978 * and fail, we need some other form of interlock
1979 * here.
1980 */
1981 found = 0;
1982 for (i = 0; i < ninodes; i++) {
1983 read_lock(&pag->pag_ici_lock);
1984 ip = radix_tree_lookup(&pag->pag_ici_root,
1985 XFS_INO_TO_AGINO(mp, (inum + i)));
1986
1987 /* Inode not in memory or we found it already,
1988 * nothing to do
1989 */
1990 if (!ip || xfs_iflags_test(ip, XFS_ISTALE)) {
1991 read_unlock(&pag->pag_ici_lock);
1992 continue;
1993 }
1994
1995 if (xfs_inode_clean(ip)) {
1996 read_unlock(&pag->pag_ici_lock);
1997 continue;
1998 }
1999
2000 /* If we can get the locks then add it to the
2001 * list, otherwise by the time we get the bp lock
2002 * below it will already be attached to the
2003 * inode buffer.
2004 */
2005
2006 /* This inode will already be locked - by us, lets
2007 * keep it that way.
2008 */
2009
2010 if (ip == free_ip) {
2011 if (xfs_iflock_nowait(ip)) {
2012 xfs_iflags_set(ip, XFS_ISTALE);
2013 if (xfs_inode_clean(ip)) {
2014 xfs_ifunlock(ip);
2015 } else {
2016 ip_found[found++] = ip;
2017 }
2018 }
2019 read_unlock(&pag->pag_ici_lock);
2020 continue;
2021 }
2022
2023 if (xfs_ilock_nowait(ip, XFS_ILOCK_EXCL)) {
2024 if (xfs_iflock_nowait(ip)) {
2025 xfs_iflags_set(ip, XFS_ISTALE);
2026
2027 if (xfs_inode_clean(ip)) {
2028 xfs_ifunlock(ip);
2029 xfs_iunlock(ip, XFS_ILOCK_EXCL);
2030 } else {
2031 ip_found[found++] = ip;
2032 }
2033 } else {
2034 xfs_iunlock(ip, XFS_ILOCK_EXCL);
2035 }
2036 }
2037 read_unlock(&pag->pag_ici_lock);
2038 }
2039
2040 bp = xfs_trans_get_buf(tp, mp->m_ddev_targp, blkno,
2041 mp->m_bsize * blks_per_cluster,
2042 XFS_BUF_LOCK);
2043
2044 pre_flushed = 0;
2045 lip = XFS_BUF_FSPRIVATE(bp, xfs_log_item_t *);
2046 while (lip) {
2047 if (lip->li_type == XFS_LI_INODE) {
2048 iip = (xfs_inode_log_item_t *)lip;
2049 ASSERT(iip->ili_logged == 1);
2050 lip->li_cb = (void(*)(xfs_buf_t*,xfs_log_item_t*)) xfs_istale_done;
2051 xfs_trans_ail_copy_lsn(mp->m_ail,
2052 &iip->ili_flush_lsn,
2053 &iip->ili_item.li_lsn);
2054 xfs_iflags_set(iip->ili_inode, XFS_ISTALE);
2055 pre_flushed++;
2056 }
2057 lip = lip->li_bio_list;
2058 }
2059
2060 for (i = 0; i < found; i++) {
2061 ip = ip_found[i];
2062 iip = ip->i_itemp;
2063
2064 if (!iip) {
2065 ip->i_update_core = 0;
2066 xfs_ifunlock(ip);
2067 xfs_iunlock(ip, XFS_ILOCK_EXCL);
2068 continue;
2069 }
2070
2071 iip->ili_last_fields = iip->ili_format.ilf_fields;
2072 iip->ili_format.ilf_fields = 0;
2073 iip->ili_logged = 1;
2074 xfs_trans_ail_copy_lsn(mp->m_ail, &iip->ili_flush_lsn,
2075 &iip->ili_item.li_lsn);
2076
2077 xfs_buf_attach_iodone(bp,
2078 (void(*)(xfs_buf_t*,xfs_log_item_t*))
2079 xfs_istale_done, (xfs_log_item_t *)iip);
2080 if (ip != free_ip) {
2081 xfs_iunlock(ip, XFS_ILOCK_EXCL);
2082 }
2083 }
2084
2085 if (found || pre_flushed)
2086 xfs_trans_stale_inode_buf(tp, bp);
2087 xfs_trans_binval(tp, bp);
2088 }
2089
2090 kmem_free(ip_found);
2091 xfs_put_perag(mp, pag);
2092 }
2093
2094 /*
2095 * This is called to return an inode to the inode free list.
2096 * The inode should already be truncated to 0 length and have
2097 * no pages associated with it. This routine also assumes that
2098 * the inode is already a part of the transaction.
2099 *
2100 * The on-disk copy of the inode will have been added to the list
2101 * of unlinked inodes in the AGI. We need to remove the inode from
2102 * that list atomically with respect to freeing it here.
2103 */
2104 int
2105 xfs_ifree(
2106 xfs_trans_t *tp,
2107 xfs_inode_t *ip,
2108 xfs_bmap_free_t *flist)
2109 {
2110 int error;
2111 int delete;
2112 xfs_ino_t first_ino;
2113 xfs_dinode_t *dip;
2114 xfs_buf_t *ibp;
2115
2116 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
2117 ASSERT(ip->i_transp == tp);
2118 ASSERT(ip->i_d.di_nlink == 0);
2119 ASSERT(ip->i_d.di_nextents == 0);
2120 ASSERT(ip->i_d.di_anextents == 0);
2121 ASSERT((ip->i_d.di_size == 0 && ip->i_size == 0) ||
2122 ((ip->i_d.di_mode & S_IFMT) != S_IFREG));
2123 ASSERT(ip->i_d.di_nblocks == 0);
2124
2125 /*
2126 * Pull the on-disk inode from the AGI unlinked list.
2127 */
2128 error = xfs_iunlink_remove(tp, ip);
2129 if (error != 0) {
2130 return error;
2131 }
2132
2133 error = xfs_difree(tp, ip->i_ino, flist, &delete, &first_ino);
2134 if (error != 0) {
2135 return error;
2136 }
2137 ip->i_d.di_mode = 0; /* mark incore inode as free */
2138 ip->i_d.di_flags = 0;
2139 ip->i_d.di_dmevmask = 0;
2140 ip->i_d.di_forkoff = 0; /* mark the attr fork not in use */
2141 ip->i_df.if_ext_max =
2142 XFS_IFORK_DSIZE(ip) / (uint)sizeof(xfs_bmbt_rec_t);
2143 ip->i_d.di_format = XFS_DINODE_FMT_EXTENTS;
2144 ip->i_d.di_aformat = XFS_DINODE_FMT_EXTENTS;
2145 /*
2146 * Bump the generation count so no one will be confused
2147 * by reincarnations of this inode.
2148 */
2149 ip->i_d.di_gen++;
2150
2151 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
2152
2153 error = xfs_itobp(ip->i_mount, tp, ip, &dip, &ibp, XFS_BUF_LOCK);
2154 if (error)
2155 return error;
2156
2157 /*
2158 * Clear the on-disk di_mode. This is to prevent xfs_bulkstat
2159 * from picking up this inode when it is reclaimed (its incore state
2160 * initialzed but not flushed to disk yet). The in-core di_mode is
2161 * already cleared and a corresponding transaction logged.
2162 * The hack here just synchronizes the in-core to on-disk
2163 * di_mode value in advance before the actual inode sync to disk.
2164 * This is OK because the inode is already unlinked and would never
2165 * change its di_mode again for this inode generation.
2166 * This is a temporary hack that would require a proper fix
2167 * in the future.
2168 */
2169 dip->di_mode = 0;
2170
2171 if (delete) {
2172 xfs_ifree_cluster(ip, tp, first_ino);
2173 }
2174
2175 return 0;
2176 }
2177
2178 /*
2179 * Reallocate the space for if_broot based on the number of records
2180 * being added or deleted as indicated in rec_diff. Move the records
2181 * and pointers in if_broot to fit the new size. When shrinking this
2182 * will eliminate holes between the records and pointers created by
2183 * the caller. When growing this will create holes to be filled in
2184 * by the caller.
2185 *
2186 * The caller must not request to add more records than would fit in
2187 * the on-disk inode root. If the if_broot is currently NULL, then
2188 * if we adding records one will be allocated. The caller must also
2189 * not request that the number of records go below zero, although
2190 * it can go to zero.
2191 *
2192 * ip -- the inode whose if_broot area is changing
2193 * ext_diff -- the change in the number of records, positive or negative,
2194 * requested for the if_broot array.
2195 */
2196 void
2197 xfs_iroot_realloc(
2198 xfs_inode_t *ip,
2199 int rec_diff,
2200 int whichfork)
2201 {
2202 struct xfs_mount *mp = ip->i_mount;
2203 int cur_max;
2204 xfs_ifork_t *ifp;
2205 struct xfs_btree_block *new_broot;
2206 int new_max;
2207 size_t new_size;
2208 char *np;
2209 char *op;
2210
2211 /*
2212 * Handle the degenerate case quietly.
2213 */
2214 if (rec_diff == 0) {
2215 return;
2216 }
2217
2218 ifp = XFS_IFORK_PTR(ip, whichfork);
2219 if (rec_diff > 0) {
2220 /*
2221 * If there wasn't any memory allocated before, just
2222 * allocate it now and get out.
2223 */
2224 if (ifp->if_broot_bytes == 0) {
2225 new_size = (size_t)XFS_BMAP_BROOT_SPACE_CALC(rec_diff);
2226 ifp->if_broot = kmem_alloc(new_size, KM_SLEEP);
2227 ifp->if_broot_bytes = (int)new_size;
2228 return;
2229 }
2230
2231 /*
2232 * If there is already an existing if_broot, then we need
2233 * to realloc() it and shift the pointers to their new
2234 * location. The records don't change location because
2235 * they are kept butted up against the btree block header.
2236 */
2237 cur_max = xfs_bmbt_maxrecs(mp, ifp->if_broot_bytes, 0);
2238 new_max = cur_max + rec_diff;
2239 new_size = (size_t)XFS_BMAP_BROOT_SPACE_CALC(new_max);
2240 ifp->if_broot = kmem_realloc(ifp->if_broot, new_size,
2241 (size_t)XFS_BMAP_BROOT_SPACE_CALC(cur_max), /* old size */
2242 KM_SLEEP);
2243 op = (char *)XFS_BMAP_BROOT_PTR_ADDR(mp, ifp->if_broot, 1,
2244 ifp->if_broot_bytes);
2245 np = (char *)XFS_BMAP_BROOT_PTR_ADDR(mp, ifp->if_broot, 1,
2246 (int)new_size);
2247 ifp->if_broot_bytes = (int)new_size;
2248 ASSERT(ifp->if_broot_bytes <=
2249 XFS_IFORK_SIZE(ip, whichfork) + XFS_BROOT_SIZE_ADJ);
2250 memmove(np, op, cur_max * (uint)sizeof(xfs_dfsbno_t));
2251 return;
2252 }
2253
2254 /*
2255 * rec_diff is less than 0. In this case, we are shrinking the
2256 * if_broot buffer. It must already exist. If we go to zero
2257 * records, just get rid of the root and clear the status bit.
2258 */
2259 ASSERT((ifp->if_broot != NULL) && (ifp->if_broot_bytes > 0));
2260 cur_max = xfs_bmbt_maxrecs(mp, ifp->if_broot_bytes, 0);
2261 new_max = cur_max + rec_diff;
2262 ASSERT(new_max >= 0);
2263 if (new_max > 0)
2264 new_size = (size_t)XFS_BMAP_BROOT_SPACE_CALC(new_max);
2265 else
2266 new_size = 0;
2267 if (new_size > 0) {
2268 new_broot = kmem_alloc(new_size, KM_SLEEP);
2269 /*
2270 * First copy over the btree block header.
2271 */
2272 memcpy(new_broot, ifp->if_broot, XFS_BTREE_LBLOCK_LEN);
2273 } else {
2274 new_broot = NULL;
2275 ifp->if_flags &= ~XFS_IFBROOT;
2276 }
2277
2278 /*
2279 * Only copy the records and pointers if there are any.
2280 */
2281 if (new_max > 0) {
2282 /*
2283 * First copy the records.
2284 */
2285 op = (char *)XFS_BMBT_REC_ADDR(mp, ifp->if_broot, 1);
2286 np = (char *)XFS_BMBT_REC_ADDR(mp, new_broot, 1);
2287 memcpy(np, op, new_max * (uint)sizeof(xfs_bmbt_rec_t));
2288
2289 /*
2290 * Then copy the pointers.
2291 */
2292 op = (char *)XFS_BMAP_BROOT_PTR_ADDR(mp, ifp->if_broot, 1,
2293 ifp->if_broot_bytes);
2294 np = (char *)XFS_BMAP_BROOT_PTR_ADDR(mp, new_broot, 1,
2295 (int)new_size);
2296 memcpy(np, op, new_max * (uint)sizeof(xfs_dfsbno_t));
2297 }
2298 kmem_free(ifp->if_broot);
2299 ifp->if_broot = new_broot;
2300 ifp->if_broot_bytes = (int)new_size;
2301 ASSERT(ifp->if_broot_bytes <=
2302 XFS_IFORK_SIZE(ip, whichfork) + XFS_BROOT_SIZE_ADJ);
2303 return;
2304 }
2305
2306
2307 /*
2308 * This is called when the amount of space needed for if_data
2309 * is increased or decreased. The change in size is indicated by
2310 * the number of bytes that need to be added or deleted in the
2311 * byte_diff parameter.
2312 *
2313 * If the amount of space needed has decreased below the size of the
2314 * inline buffer, then switch to using the inline buffer. Otherwise,
2315 * use kmem_realloc() or kmem_alloc() to adjust the size of the buffer
2316 * to what is needed.
2317 *
2318 * ip -- the inode whose if_data area is changing
2319 * byte_diff -- the change in the number of bytes, positive or negative,
2320 * requested for the if_data array.
2321 */
2322 void
2323 xfs_idata_realloc(
2324 xfs_inode_t *ip,
2325 int byte_diff,
2326 int whichfork)
2327 {
2328 xfs_ifork_t *ifp;
2329 int new_size;
2330 int real_size;
2331
2332 if (byte_diff == 0) {
2333 return;
2334 }
2335
2336 ifp = XFS_IFORK_PTR(ip, whichfork);
2337 new_size = (int)ifp->if_bytes + byte_diff;
2338 ASSERT(new_size >= 0);
2339
2340 if (new_size == 0) {
2341 if (ifp->if_u1.if_data != ifp->if_u2.if_inline_data) {
2342 kmem_free(ifp->if_u1.if_data);
2343 }
2344 ifp->if_u1.if_data = NULL;
2345 real_size = 0;
2346 } else if (new_size <= sizeof(ifp->if_u2.if_inline_data)) {
2347 /*
2348 * If the valid extents/data can fit in if_inline_ext/data,
2349 * copy them from the malloc'd vector and free it.
2350 */
2351 if (ifp->if_u1.if_data == NULL) {
2352 ifp->if_u1.if_data = ifp->if_u2.if_inline_data;
2353 } else if (ifp->if_u1.if_data != ifp->if_u2.if_inline_data) {
2354 ASSERT(ifp->if_real_bytes != 0);
2355 memcpy(ifp->if_u2.if_inline_data, ifp->if_u1.if_data,
2356 new_size);
2357 kmem_free(ifp->if_u1.if_data);
2358 ifp->if_u1.if_data = ifp->if_u2.if_inline_data;
2359 }
2360 real_size = 0;
2361 } else {
2362 /*
2363 * Stuck with malloc/realloc.
2364 * For inline data, the underlying buffer must be
2365 * a multiple of 4 bytes in size so that it can be
2366 * logged and stay on word boundaries. We enforce
2367 * that here.
2368 */
2369 real_size = roundup(new_size, 4);
2370 if (ifp->if_u1.if_data == NULL) {
2371 ASSERT(ifp->if_real_bytes == 0);
2372 ifp->if_u1.if_data = kmem_alloc(real_size, KM_SLEEP);
2373 } else if (ifp->if_u1.if_data != ifp->if_u2.if_inline_data) {
2374 /*
2375 * Only do the realloc if the underlying size
2376 * is really changing.
2377 */
2378 if (ifp->if_real_bytes != real_size) {
2379 ifp->if_u1.if_data =
2380 kmem_realloc(ifp->if_u1.if_data,
2381 real_size,
2382 ifp->if_real_bytes,
2383 KM_SLEEP);
2384 }
2385 } else {
2386 ASSERT(ifp->if_real_bytes == 0);
2387 ifp->if_u1.if_data = kmem_alloc(real_size, KM_SLEEP);
2388 memcpy(ifp->if_u1.if_data, ifp->if_u2.if_inline_data,
2389 ifp->if_bytes);
2390 }
2391 }
2392 ifp->if_real_bytes = real_size;
2393 ifp->if_bytes = new_size;
2394 ASSERT(ifp->if_bytes <= XFS_IFORK_SIZE(ip, whichfork));
2395 }
2396
2397 void
2398 xfs_idestroy_fork(
2399 xfs_inode_t *ip,
2400 int whichfork)
2401 {
2402 xfs_ifork_t *ifp;
2403
2404 ifp = XFS_IFORK_PTR(ip, whichfork);
2405 if (ifp->if_broot != NULL) {
2406 kmem_free(ifp->if_broot);
2407 ifp->if_broot = NULL;
2408 }
2409
2410 /*
2411 * If the format is local, then we can't have an extents
2412 * array so just look for an inline data array. If we're
2413 * not local then we may or may not have an extents list,
2414 * so check and free it up if we do.
2415 */
2416 if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL) {
2417 if ((ifp->if_u1.if_data != ifp->if_u2.if_inline_data) &&
2418 (ifp->if_u1.if_data != NULL)) {
2419 ASSERT(ifp->if_real_bytes != 0);
2420 kmem_free(ifp->if_u1.if_data);
2421 ifp->if_u1.if_data = NULL;
2422 ifp->if_real_bytes = 0;
2423 }
2424 } else if ((ifp->if_flags & XFS_IFEXTENTS) &&
2425 ((ifp->if_flags & XFS_IFEXTIREC) ||
2426 ((ifp->if_u1.if_extents != NULL) &&
2427 (ifp->if_u1.if_extents != ifp->if_u2.if_inline_ext)))) {
2428 ASSERT(ifp->if_real_bytes != 0);
2429 xfs_iext_destroy(ifp);
2430 }
2431 ASSERT(ifp->if_u1.if_extents == NULL ||
2432 ifp->if_u1.if_extents == ifp->if_u2.if_inline_ext);
2433 ASSERT(ifp->if_real_bytes == 0);
2434 if (whichfork == XFS_ATTR_FORK) {
2435 kmem_zone_free(xfs_ifork_zone, ip->i_afp);
2436 ip->i_afp = NULL;
2437 }
2438 }
2439
2440 /*
2441 * Increment the pin count of the given buffer.
2442 * This value is protected by ipinlock spinlock in the mount structure.
2443 */
2444 void
2445 xfs_ipin(
2446 xfs_inode_t *ip)
2447 {
2448 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
2449
2450 atomic_inc(&ip->i_pincount);
2451 }
2452
2453 /*
2454 * Decrement the pin count of the given inode, and wake up
2455 * anyone in xfs_iwait_unpin() if the count goes to 0. The
2456 * inode must have been previously pinned with a call to xfs_ipin().
2457 */
2458 void
2459 xfs_iunpin(
2460 xfs_inode_t *ip)
2461 {
2462 ASSERT(atomic_read(&ip->i_pincount) > 0);
2463
2464 if (atomic_dec_and_test(&ip->i_pincount))
2465 wake_up(&ip->i_ipin_wait);
2466 }
2467
2468 /*
2469 * This is called to unpin an inode. It can be directed to wait or to return
2470 * immediately without waiting for the inode to be unpinned. The caller must
2471 * have the inode locked in at least shared mode so that the buffer cannot be
2472 * subsequently pinned once someone is waiting for it to be unpinned.
2473 */
2474 STATIC void
2475 __xfs_iunpin_wait(
2476 xfs_inode_t *ip,
2477 int wait)
2478 {
2479 xfs_inode_log_item_t *iip = ip->i_itemp;
2480
2481 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
2482 if (atomic_read(&ip->i_pincount) == 0)
2483 return;
2484
2485 /* Give the log a push to start the unpinning I/O */
2486 xfs_log_force(ip->i_mount, (iip && iip->ili_last_lsn) ?
2487 iip->ili_last_lsn : 0, XFS_LOG_FORCE);
2488 if (wait)
2489 wait_event(ip->i_ipin_wait, (atomic_read(&ip->i_pincount) == 0));
2490 }
2491
2492 static inline void
2493 xfs_iunpin_wait(
2494 xfs_inode_t *ip)
2495 {
2496 __xfs_iunpin_wait(ip, 1);
2497 }
2498
2499 static inline void
2500 xfs_iunpin_nowait(
2501 xfs_inode_t *ip)
2502 {
2503 __xfs_iunpin_wait(ip, 0);
2504 }
2505
2506
2507 /*
2508 * xfs_iextents_copy()
2509 *
2510 * This is called to copy the REAL extents (as opposed to the delayed
2511 * allocation extents) from the inode into the given buffer. It
2512 * returns the number of bytes copied into the buffer.
2513 *
2514 * If there are no delayed allocation extents, then we can just
2515 * memcpy() the extents into the buffer. Otherwise, we need to
2516 * examine each extent in turn and skip those which are delayed.
2517 */
2518 int
2519 xfs_iextents_copy(
2520 xfs_inode_t *ip,
2521 xfs_bmbt_rec_t *dp,
2522 int whichfork)
2523 {
2524 int copied;
2525 int i;
2526 xfs_ifork_t *ifp;
2527 int nrecs;
2528 xfs_fsblock_t start_block;
2529
2530 ifp = XFS_IFORK_PTR(ip, whichfork);
2531 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
2532 ASSERT(ifp->if_bytes > 0);
2533
2534 nrecs = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
2535 XFS_BMAP_TRACE_EXLIST(ip, nrecs, whichfork);
2536 ASSERT(nrecs > 0);
2537
2538 /*
2539 * There are some delayed allocation extents in the
2540 * inode, so copy the extents one at a time and skip
2541 * the delayed ones. There must be at least one
2542 * non-delayed extent.
2543 */
2544 copied = 0;
2545 for (i = 0; i < nrecs; i++) {
2546 xfs_bmbt_rec_host_t *ep = xfs_iext_get_ext(ifp, i);
2547 start_block = xfs_bmbt_get_startblock(ep);
2548 if (isnullstartblock(start_block)) {
2549 /*
2550 * It's a delayed allocation extent, so skip it.
2551 */
2552 continue;
2553 }
2554
2555 /* Translate to on disk format */
2556 put_unaligned(cpu_to_be64(ep->l0), &dp->l0);
2557 put_unaligned(cpu_to_be64(ep->l1), &dp->l1);
2558 dp++;
2559 copied++;
2560 }
2561 ASSERT(copied != 0);
2562 xfs_validate_extents(ifp, copied, XFS_EXTFMT_INODE(ip));
2563
2564 return (copied * (uint)sizeof(xfs_bmbt_rec_t));
2565 }
2566
2567 /*
2568 * Each of the following cases stores data into the same region
2569 * of the on-disk inode, so only one of them can be valid at
2570 * any given time. While it is possible to have conflicting formats
2571 * and log flags, e.g. having XFS_ILOG_?DATA set when the fork is
2572 * in EXTENTS format, this can only happen when the fork has
2573 * changed formats after being modified but before being flushed.
2574 * In these cases, the format always takes precedence, because the
2575 * format indicates the current state of the fork.
2576 */
2577 /*ARGSUSED*/
2578 STATIC void
2579 xfs_iflush_fork(
2580 xfs_inode_t *ip,
2581 xfs_dinode_t *dip,
2582 xfs_inode_log_item_t *iip,
2583 int whichfork,
2584 xfs_buf_t *bp)
2585 {
2586 char *cp;
2587 xfs_ifork_t *ifp;
2588 xfs_mount_t *mp;
2589 #ifdef XFS_TRANS_DEBUG
2590 int first;
2591 #endif
2592 static const short brootflag[2] =
2593 { XFS_ILOG_DBROOT, XFS_ILOG_ABROOT };
2594 static const short dataflag[2] =
2595 { XFS_ILOG_DDATA, XFS_ILOG_ADATA };
2596 static const short extflag[2] =
2597 { XFS_ILOG_DEXT, XFS_ILOG_AEXT };
2598
2599 if (!iip)
2600 return;
2601 ifp = XFS_IFORK_PTR(ip, whichfork);
2602 /*
2603 * This can happen if we gave up in iformat in an error path,
2604 * for the attribute fork.
2605 */
2606 if (!ifp) {
2607 ASSERT(whichfork == XFS_ATTR_FORK);
2608 return;
2609 }
2610 cp = XFS_DFORK_PTR(dip, whichfork);
2611 mp = ip->i_mount;
2612 switch (XFS_IFORK_FORMAT(ip, whichfork)) {
2613 case XFS_DINODE_FMT_LOCAL:
2614 if ((iip->ili_format.ilf_fields & dataflag[whichfork]) &&
2615 (ifp->if_bytes > 0)) {
2616 ASSERT(ifp->if_u1.if_data != NULL);
2617 ASSERT(ifp->if_bytes <= XFS_IFORK_SIZE(ip, whichfork));
2618 memcpy(cp, ifp->if_u1.if_data, ifp->if_bytes);
2619 }
2620 break;
2621
2622 case XFS_DINODE_FMT_EXTENTS:
2623 ASSERT((ifp->if_flags & XFS_IFEXTENTS) ||
2624 !(iip->ili_format.ilf_fields & extflag[whichfork]));
2625 ASSERT((xfs_iext_get_ext(ifp, 0) != NULL) ||
2626 (ifp->if_bytes == 0));
2627 ASSERT((xfs_iext_get_ext(ifp, 0) == NULL) ||
2628 (ifp->if_bytes > 0));
2629 if ((iip->ili_format.ilf_fields & extflag[whichfork]) &&
2630 (ifp->if_bytes > 0)) {
2631 ASSERT(XFS_IFORK_NEXTENTS(ip, whichfork) > 0);
2632 (void)xfs_iextents_copy(ip, (xfs_bmbt_rec_t *)cp,
2633 whichfork);
2634 }
2635 break;
2636
2637 case XFS_DINODE_FMT_BTREE:
2638 if ((iip->ili_format.ilf_fields & brootflag[whichfork]) &&
2639 (ifp->if_broot_bytes > 0)) {
2640 ASSERT(ifp->if_broot != NULL);
2641 ASSERT(ifp->if_broot_bytes <=
2642 (XFS_IFORK_SIZE(ip, whichfork) +
2643 XFS_BROOT_SIZE_ADJ));
2644 xfs_bmbt_to_bmdr(mp, ifp->if_broot, ifp->if_broot_bytes,
2645 (xfs_bmdr_block_t *)cp,
2646 XFS_DFORK_SIZE(dip, mp, whichfork));
2647 }
2648 break;
2649
2650 case XFS_DINODE_FMT_DEV:
2651 if (iip->ili_format.ilf_fields & XFS_ILOG_DEV) {
2652 ASSERT(whichfork == XFS_DATA_FORK);
2653 xfs_dinode_put_rdev(dip, ip->i_df.if_u2.if_rdev);
2654 }
2655 break;
2656
2657 case XFS_DINODE_FMT_UUID:
2658 if (iip->ili_format.ilf_fields & XFS_ILOG_UUID) {
2659 ASSERT(whichfork == XFS_DATA_FORK);
2660 memcpy(XFS_DFORK_DPTR(dip),
2661 &ip->i_df.if_u2.if_uuid,
2662 sizeof(uuid_t));
2663 }
2664 break;
2665
2666 default:
2667 ASSERT(0);
2668 break;
2669 }
2670 }
2671
2672 STATIC int
2673 xfs_iflush_cluster(
2674 xfs_inode_t *ip,
2675 xfs_buf_t *bp)
2676 {
2677 xfs_mount_t *mp = ip->i_mount;
2678 xfs_perag_t *pag = xfs_get_perag(mp, ip->i_ino);
2679 unsigned long first_index, mask;
2680 unsigned long inodes_per_cluster;
2681 int ilist_size;
2682 xfs_inode_t **ilist;
2683 xfs_inode_t *iq;
2684 int nr_found;
2685 int clcount = 0;
2686 int bufwasdelwri;
2687 int i;
2688
2689 ASSERT(pag->pagi_inodeok);
2690 ASSERT(pag->pag_ici_init);
2691
2692 inodes_per_cluster = XFS_INODE_CLUSTER_SIZE(mp) >> mp->m_sb.sb_inodelog;
2693 ilist_size = inodes_per_cluster * sizeof(xfs_inode_t *);
2694 ilist = kmem_alloc(ilist_size, KM_MAYFAIL|KM_NOFS);
2695 if (!ilist)
2696 return 0;
2697
2698 mask = ~(((XFS_INODE_CLUSTER_SIZE(mp) >> mp->m_sb.sb_inodelog)) - 1);
2699 first_index = XFS_INO_TO_AGINO(mp, ip->i_ino) & mask;
2700 read_lock(&pag->pag_ici_lock);
2701 /* really need a gang lookup range call here */
2702 nr_found = radix_tree_gang_lookup(&pag->pag_ici_root, (void**)ilist,
2703 first_index, inodes_per_cluster);
2704 if (nr_found == 0)
2705 goto out_free;
2706
2707 for (i = 0; i < nr_found; i++) {
2708 iq = ilist[i];
2709 if (iq == ip)
2710 continue;
2711 /* if the inode lies outside this cluster, we're done. */
2712 if ((XFS_INO_TO_AGINO(mp, iq->i_ino) & mask) != first_index)
2713 break;
2714 /*
2715 * Do an un-protected check to see if the inode is dirty and
2716 * is a candidate for flushing. These checks will be repeated
2717 * later after the appropriate locks are acquired.
2718 */
2719 if (xfs_inode_clean(iq) && xfs_ipincount(iq) == 0)
2720 continue;
2721
2722 /*
2723 * Try to get locks. If any are unavailable or it is pinned,
2724 * then this inode cannot be flushed and is skipped.
2725 */
2726
2727 if (!xfs_ilock_nowait(iq, XFS_ILOCK_SHARED))
2728 continue;
2729 if (!xfs_iflock_nowait(iq)) {
2730 xfs_iunlock(iq, XFS_ILOCK_SHARED);
2731 continue;
2732 }
2733 if (xfs_ipincount(iq)) {
2734 xfs_ifunlock(iq);
2735 xfs_iunlock(iq, XFS_ILOCK_SHARED);
2736 continue;
2737 }
2738
2739 /*
2740 * arriving here means that this inode can be flushed. First
2741 * re-check that it's dirty before flushing.
2742 */
2743 if (!xfs_inode_clean(iq)) {
2744 int error;
2745 error = xfs_iflush_int(iq, bp);
2746 if (error) {
2747 xfs_iunlock(iq, XFS_ILOCK_SHARED);
2748 goto cluster_corrupt_out;
2749 }
2750 clcount++;
2751 } else {
2752 xfs_ifunlock(iq);
2753 }
2754 xfs_iunlock(iq, XFS_ILOCK_SHARED);
2755 }
2756
2757 if (clcount) {
2758 XFS_STATS_INC(xs_icluster_flushcnt);
2759 XFS_STATS_ADD(xs_icluster_flushinode, clcount);
2760 }
2761
2762 out_free:
2763 read_unlock(&pag->pag_ici_lock);
2764 kmem_free(ilist);
2765 return 0;
2766
2767
2768 cluster_corrupt_out:
2769 /*
2770 * Corruption detected in the clustering loop. Invalidate the
2771 * inode buffer and shut down the filesystem.
2772 */
2773 read_unlock(&pag->pag_ici_lock);
2774 /*
2775 * Clean up the buffer. If it was B_DELWRI, just release it --
2776 * brelse can handle it with no problems. If not, shut down the
2777 * filesystem before releasing the buffer.
2778 */
2779 bufwasdelwri = XFS_BUF_ISDELAYWRITE(bp);
2780 if (bufwasdelwri)
2781 xfs_buf_relse(bp);
2782
2783 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
2784
2785 if (!bufwasdelwri) {
2786 /*
2787 * Just like incore_relse: if we have b_iodone functions,
2788 * mark the buffer as an error and call them. Otherwise
2789 * mark it as stale and brelse.
2790 */
2791 if (XFS_BUF_IODONE_FUNC(bp)) {
2792 XFS_BUF_CLR_BDSTRAT_FUNC(bp);
2793 XFS_BUF_UNDONE(bp);
2794 XFS_BUF_STALE(bp);
2795 XFS_BUF_ERROR(bp,EIO);
2796 xfs_biodone(bp);
2797 } else {
2798 XFS_BUF_STALE(bp);
2799 xfs_buf_relse(bp);
2800 }
2801 }
2802
2803 /*
2804 * Unlocks the flush lock
2805 */
2806 xfs_iflush_abort(iq);
2807 kmem_free(ilist);
2808 return XFS_ERROR(EFSCORRUPTED);
2809 }
2810
2811 /*
2812 * xfs_iflush() will write a modified inode's changes out to the
2813 * inode's on disk home. The caller must have the inode lock held
2814 * in at least shared mode and the inode flush completion must be
2815 * active as well. The inode lock will still be held upon return from
2816 * the call and the caller is free to unlock it.
2817 * The inode flush will be completed when the inode reaches the disk.
2818 * The flags indicate how the inode's buffer should be written out.
2819 */
2820 int
2821 xfs_iflush(
2822 xfs_inode_t *ip,
2823 uint flags)
2824 {
2825 xfs_inode_log_item_t *iip;
2826 xfs_buf_t *bp;
2827 xfs_dinode_t *dip;
2828 xfs_mount_t *mp;
2829 int error;
2830 int noblock = (flags == XFS_IFLUSH_ASYNC_NOBLOCK);
2831 enum { INT_DELWRI = (1 << 0), INT_ASYNC = (1 << 1) };
2832
2833 XFS_STATS_INC(xs_iflush_count);
2834
2835 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
2836 ASSERT(!completion_done(&ip->i_flush));
2837 ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE ||
2838 ip->i_d.di_nextents > ip->i_df.if_ext_max);
2839
2840 iip = ip->i_itemp;
2841 mp = ip->i_mount;
2842
2843 /*
2844 * If the inode isn't dirty, then just release the inode
2845 * flush lock and do nothing.
2846 */
2847 if (xfs_inode_clean(ip)) {
2848 xfs_ifunlock(ip);
2849 return 0;
2850 }
2851
2852 /*
2853 * We can't flush the inode until it is unpinned, so wait for it if we
2854 * are allowed to block. We know noone new can pin it, because we are
2855 * holding the inode lock shared and you need to hold it exclusively to
2856 * pin the inode.
2857 *
2858 * If we are not allowed to block, force the log out asynchronously so
2859 * that when we come back the inode will be unpinned. If other inodes
2860 * in the same cluster are dirty, they will probably write the inode
2861 * out for us if they occur after the log force completes.
2862 */
2863 if (noblock && xfs_ipincount(ip)) {
2864 xfs_iunpin_nowait(ip);
2865 xfs_ifunlock(ip);
2866 return EAGAIN;
2867 }
2868 xfs_iunpin_wait(ip);
2869
2870 /*
2871 * This may have been unpinned because the filesystem is shutting
2872 * down forcibly. If that's the case we must not write this inode
2873 * to disk, because the log record didn't make it to disk!
2874 */
2875 if (XFS_FORCED_SHUTDOWN(mp)) {
2876 ip->i_update_core = 0;
2877 if (iip)
2878 iip->ili_format.ilf_fields = 0;
2879 xfs_ifunlock(ip);
2880 return XFS_ERROR(EIO);
2881 }
2882
2883 /*
2884 * Decide how buffer will be flushed out. This is done before
2885 * the call to xfs_iflush_int because this field is zeroed by it.
2886 */
2887 if (iip != NULL && iip->ili_format.ilf_fields != 0) {
2888 /*
2889 * Flush out the inode buffer according to the directions
2890 * of the caller. In the cases where the caller has given
2891 * us a choice choose the non-delwri case. This is because
2892 * the inode is in the AIL and we need to get it out soon.
2893 */
2894 switch (flags) {
2895 case XFS_IFLUSH_SYNC:
2896 case XFS_IFLUSH_DELWRI_ELSE_SYNC:
2897 flags = 0;
2898 break;
2899 case XFS_IFLUSH_ASYNC_NOBLOCK:
2900 case XFS_IFLUSH_ASYNC:
2901 case XFS_IFLUSH_DELWRI_ELSE_ASYNC:
2902 flags = INT_ASYNC;
2903 break;
2904 case XFS_IFLUSH_DELWRI:
2905 flags = INT_DELWRI;
2906 break;
2907 default:
2908 ASSERT(0);
2909 flags = 0;
2910 break;
2911 }
2912 } else {
2913 switch (flags) {
2914 case XFS_IFLUSH_DELWRI_ELSE_SYNC:
2915 case XFS_IFLUSH_DELWRI_ELSE_ASYNC:
2916 case XFS_IFLUSH_DELWRI:
2917 flags = INT_DELWRI;
2918 break;
2919 case XFS_IFLUSH_ASYNC_NOBLOCK:
2920 case XFS_IFLUSH_ASYNC:
2921 flags = INT_ASYNC;
2922 break;
2923 case XFS_IFLUSH_SYNC:
2924 flags = 0;
2925 break;
2926 default:
2927 ASSERT(0);
2928 flags = 0;
2929 break;
2930 }
2931 }
2932
2933 /*
2934 * Get the buffer containing the on-disk inode.
2935 */
2936 error = xfs_itobp(mp, NULL, ip, &dip, &bp,
2937 noblock ? XFS_BUF_TRYLOCK : XFS_BUF_LOCK);
2938 if (error || !bp) {
2939 xfs_ifunlock(ip);
2940 return error;
2941 }
2942
2943 /*
2944 * First flush out the inode that xfs_iflush was called with.
2945 */
2946 error = xfs_iflush_int(ip, bp);
2947 if (error)
2948 goto corrupt_out;
2949
2950 /*
2951 * If the buffer is pinned then push on the log now so we won't
2952 * get stuck waiting in the write for too long.
2953 */
2954 if (XFS_BUF_ISPINNED(bp))
2955 xfs_log_force(mp, (xfs_lsn_t)0, XFS_LOG_FORCE);
2956
2957 /*
2958 * inode clustering:
2959 * see if other inodes can be gathered into this write
2960 */
2961 error = xfs_iflush_cluster(ip, bp);
2962 if (error)
2963 goto cluster_corrupt_out;
2964
2965 if (flags & INT_DELWRI) {
2966 xfs_bdwrite(mp, bp);
2967 } else if (flags & INT_ASYNC) {
2968 error = xfs_bawrite(mp, bp);
2969 } else {
2970 error = xfs_bwrite(mp, bp);
2971 }
2972 return error;
2973
2974 corrupt_out:
2975 xfs_buf_relse(bp);
2976 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
2977 cluster_corrupt_out:
2978 /*
2979 * Unlocks the flush lock
2980 */
2981 xfs_iflush_abort(ip);
2982 return XFS_ERROR(EFSCORRUPTED);
2983 }
2984
2985
2986 STATIC int
2987 xfs_iflush_int(
2988 xfs_inode_t *ip,
2989 xfs_buf_t *bp)
2990 {
2991 xfs_inode_log_item_t *iip;
2992 xfs_dinode_t *dip;
2993 xfs_mount_t *mp;
2994 #ifdef XFS_TRANS_DEBUG
2995 int first;
2996 #endif
2997
2998 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
2999 ASSERT(!completion_done(&ip->i_flush));
3000 ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE ||
3001 ip->i_d.di_nextents > ip->i_df.if_ext_max);
3002
3003 iip = ip->i_itemp;
3004 mp = ip->i_mount;
3005
3006
3007 /*
3008 * If the inode isn't dirty, then just release the inode
3009 * flush lock and do nothing.
3010 */
3011 if (xfs_inode_clean(ip)) {
3012 xfs_ifunlock(ip);
3013 return 0;
3014 }
3015
3016 /* set *dip = inode's place in the buffer */
3017 dip = (xfs_dinode_t *)xfs_buf_offset(bp, ip->i_imap.im_boffset);
3018
3019 /*
3020 * Clear i_update_core before copying out the data.
3021 * This is for coordination with our timestamp updates
3022 * that don't hold the inode lock. They will always
3023 * update the timestamps BEFORE setting i_update_core,
3024 * so if we clear i_update_core after they set it we
3025 * are guaranteed to see their updates to the timestamps.
3026 * I believe that this depends on strongly ordered memory
3027 * semantics, but we have that. We use the SYNCHRONIZE
3028 * macro to make sure that the compiler does not reorder
3029 * the i_update_core access below the data copy below.
3030 */
3031 ip->i_update_core = 0;
3032 SYNCHRONIZE();
3033
3034 /*
3035 * Make sure to get the latest timestamps from the Linux inode.
3036 */
3037 xfs_synchronize_times(ip);
3038
3039 if (XFS_TEST_ERROR(be16_to_cpu(dip->di_magic) != XFS_DINODE_MAGIC,
3040 mp, XFS_ERRTAG_IFLUSH_1, XFS_RANDOM_IFLUSH_1)) {
3041 xfs_cmn_err(XFS_PTAG_IFLUSH, CE_ALERT, mp,
3042 "xfs_iflush: Bad inode %Lu magic number 0x%x, ptr 0x%p",
3043 ip->i_ino, be16_to_cpu(dip->di_magic), dip);
3044 goto corrupt_out;
3045 }
3046 if (XFS_TEST_ERROR(ip->i_d.di_magic != XFS_DINODE_MAGIC,
3047 mp, XFS_ERRTAG_IFLUSH_2, XFS_RANDOM_IFLUSH_2)) {
3048 xfs_cmn_err(XFS_PTAG_IFLUSH, CE_ALERT, mp,
3049 "xfs_iflush: Bad inode %Lu, ptr 0x%p, magic number 0x%x",
3050 ip->i_ino, ip, ip->i_d.di_magic);
3051 goto corrupt_out;
3052 }
3053 if ((ip->i_d.di_mode & S_IFMT) == S_IFREG) {
3054 if (XFS_TEST_ERROR(
3055 (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS) &&
3056 (ip->i_d.di_format != XFS_DINODE_FMT_BTREE),
3057 mp, XFS_ERRTAG_IFLUSH_3, XFS_RANDOM_IFLUSH_3)) {
3058 xfs_cmn_err(XFS_PTAG_IFLUSH, CE_ALERT, mp,
3059 "xfs_iflush: Bad regular inode %Lu, ptr 0x%p",
3060 ip->i_ino, ip);
3061 goto corrupt_out;
3062 }
3063 } else if ((ip->i_d.di_mode & S_IFMT) == S_IFDIR) {
3064 if (XFS_TEST_ERROR(
3065 (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS) &&
3066 (ip->i_d.di_format != XFS_DINODE_FMT_BTREE) &&
3067 (ip->i_d.di_format != XFS_DINODE_FMT_LOCAL),
3068 mp, XFS_ERRTAG_IFLUSH_4, XFS_RANDOM_IFLUSH_4)) {
3069 xfs_cmn_err(XFS_PTAG_IFLUSH, CE_ALERT, mp,
3070 "xfs_iflush: Bad directory inode %Lu, ptr 0x%p",
3071 ip->i_ino, ip);
3072 goto corrupt_out;
3073 }
3074 }
3075 if (XFS_TEST_ERROR(ip->i_d.di_nextents + ip->i_d.di_anextents >
3076 ip->i_d.di_nblocks, mp, XFS_ERRTAG_IFLUSH_5,
3077 XFS_RANDOM_IFLUSH_5)) {
3078 xfs_cmn_err(XFS_PTAG_IFLUSH, CE_ALERT, mp,
3079 "xfs_iflush: detected corrupt incore inode %Lu, total extents = %d, nblocks = %Ld, ptr 0x%p",
3080 ip->i_ino,
3081 ip->i_d.di_nextents + ip->i_d.di_anextents,
3082 ip->i_d.di_nblocks,
3083 ip);
3084 goto corrupt_out;
3085 }
3086 if (XFS_TEST_ERROR(ip->i_d.di_forkoff > mp->m_sb.sb_inodesize,
3087 mp, XFS_ERRTAG_IFLUSH_6, XFS_RANDOM_IFLUSH_6)) {
3088 xfs_cmn_err(XFS_PTAG_IFLUSH, CE_ALERT, mp,
3089 "xfs_iflush: bad inode %Lu, forkoff 0x%x, ptr 0x%p",
3090 ip->i_ino, ip->i_d.di_forkoff, ip);
3091 goto corrupt_out;
3092 }
3093 /*
3094 * bump the flush iteration count, used to detect flushes which
3095 * postdate a log record during recovery.
3096 */
3097
3098 ip->i_d.di_flushiter++;
3099
3100 /*
3101 * Copy the dirty parts of the inode into the on-disk
3102 * inode. We always copy out the core of the inode,
3103 * because if the inode is dirty at all the core must
3104 * be.
3105 */
3106 xfs_dinode_to_disk(dip, &ip->i_d);
3107
3108 /* Wrap, we never let the log put out DI_MAX_FLUSH */
3109 if (ip->i_d.di_flushiter == DI_MAX_FLUSH)
3110 ip->i_d.di_flushiter = 0;
3111
3112 /*
3113 * If this is really an old format inode and the superblock version
3114 * has not been updated to support only new format inodes, then
3115 * convert back to the old inode format. If the superblock version
3116 * has been updated, then make the conversion permanent.
3117 */
3118 ASSERT(ip->i_d.di_version == 1 || xfs_sb_version_hasnlink(&mp->m_sb));
3119 if (ip->i_d.di_version == 1) {
3120 if (!xfs_sb_version_hasnlink(&mp->m_sb)) {
3121 /*
3122 * Convert it back.
3123 */
3124 ASSERT(ip->i_d.di_nlink <= XFS_MAXLINK_1);
3125 dip->di_onlink = cpu_to_be16(ip->i_d.di_nlink);
3126 } else {
3127 /*
3128 * The superblock version has already been bumped,
3129 * so just make the conversion to the new inode
3130 * format permanent.
3131 */
3132 ip->i_d.di_version = 2;
3133 dip->di_version = 2;
3134 ip->i_d.di_onlink = 0;
3135 dip->di_onlink = 0;
3136 memset(&(ip->i_d.di_pad[0]), 0, sizeof(ip->i_d.di_pad));
3137 memset(&(dip->di_pad[0]), 0,
3138 sizeof(dip->di_pad));
3139 ASSERT(ip->i_d.di_projid == 0);
3140 }
3141 }
3142
3143 xfs_iflush_fork(ip, dip, iip, XFS_DATA_FORK, bp);
3144 if (XFS_IFORK_Q(ip))
3145 xfs_iflush_fork(ip, dip, iip, XFS_ATTR_FORK, bp);
3146 xfs_inobp_check(mp, bp);
3147
3148 /*
3149 * We've recorded everything logged in the inode, so we'd
3150 * like to clear the ilf_fields bits so we don't log and
3151 * flush things unnecessarily. However, we can't stop
3152 * logging all this information until the data we've copied
3153 * into the disk buffer is written to disk. If we did we might
3154 * overwrite the copy of the inode in the log with all the
3155 * data after re-logging only part of it, and in the face of
3156 * a crash we wouldn't have all the data we need to recover.
3157 *
3158 * What we do is move the bits to the ili_last_fields field.
3159 * When logging the inode, these bits are moved back to the
3160 * ilf_fields field. In the xfs_iflush_done() routine we
3161 * clear ili_last_fields, since we know that the information
3162 * those bits represent is permanently on disk. As long as
3163 * the flush completes before the inode is logged again, then
3164 * both ilf_fields and ili_last_fields will be cleared.
3165 *
3166 * We can play with the ilf_fields bits here, because the inode
3167 * lock must be held exclusively in order to set bits there
3168 * and the flush lock protects the ili_last_fields bits.
3169 * Set ili_logged so the flush done
3170 * routine can tell whether or not to look in the AIL.
3171 * Also, store the current LSN of the inode so that we can tell
3172 * whether the item has moved in the AIL from xfs_iflush_done().
3173 * In order to read the lsn we need the AIL lock, because
3174 * it is a 64 bit value that cannot be read atomically.
3175 */
3176 if (iip != NULL && iip->ili_format.ilf_fields != 0) {
3177 iip->ili_last_fields = iip->ili_format.ilf_fields;
3178 iip->ili_format.ilf_fields = 0;
3179 iip->ili_logged = 1;
3180
3181 xfs_trans_ail_copy_lsn(mp->m_ail, &iip->ili_flush_lsn,
3182 &iip->ili_item.li_lsn);
3183
3184 /*
3185 * Attach the function xfs_iflush_done to the inode's
3186 * buffer. This will remove the inode from the AIL
3187 * and unlock the inode's flush lock when the inode is
3188 * completely written to disk.
3189 */
3190 xfs_buf_attach_iodone(bp, (void(*)(xfs_buf_t*,xfs_log_item_t*))
3191 xfs_iflush_done, (xfs_log_item_t *)iip);
3192
3193 ASSERT(XFS_BUF_FSPRIVATE(bp, void *) != NULL);
3194 ASSERT(XFS_BUF_IODONE_FUNC(bp) != NULL);
3195 } else {
3196 /*
3197 * We're flushing an inode which is not in the AIL and has
3198 * not been logged but has i_update_core set. For this
3199 * case we can use a B_DELWRI flush and immediately drop
3200 * the inode flush lock because we can avoid the whole
3201 * AIL state thing. It's OK to drop the flush lock now,
3202 * because we've already locked the buffer and to do anything
3203 * you really need both.
3204 */
3205 if (iip != NULL) {
3206 ASSERT(iip->ili_logged == 0);
3207 ASSERT(iip->ili_last_fields == 0);
3208 ASSERT((iip->ili_item.li_flags & XFS_LI_IN_AIL) == 0);
3209 }
3210 xfs_ifunlock(ip);
3211 }
3212
3213 return 0;
3214
3215 corrupt_out:
3216 return XFS_ERROR(EFSCORRUPTED);
3217 }
3218
3219 /*
3220 * Return a pointer to the extent record at file index idx.
3221 */
3222 xfs_bmbt_rec_host_t *
3223 xfs_iext_get_ext(
3224 xfs_ifork_t *ifp, /* inode fork pointer */
3225 xfs_extnum_t idx) /* index of target extent */
3226 {
3227 ASSERT(idx >= 0);
3228 if ((ifp->if_flags & XFS_IFEXTIREC) && (idx == 0)) {
3229 return ifp->if_u1.if_ext_irec->er_extbuf;
3230 } else if (ifp->if_flags & XFS_IFEXTIREC) {
3231 xfs_ext_irec_t *erp; /* irec pointer */
3232 int erp_idx = 0; /* irec index */
3233 xfs_extnum_t page_idx = idx; /* ext index in target list */
3234
3235 erp = xfs_iext_idx_to_irec(ifp, &page_idx, &erp_idx, 0);
3236 return &erp->er_extbuf[page_idx];
3237 } else if (ifp->if_bytes) {
3238 return &ifp->if_u1.if_extents[idx];
3239 } else {
3240 return NULL;
3241 }
3242 }
3243
3244 /*
3245 * Insert new item(s) into the extent records for incore inode
3246 * fork 'ifp'. 'count' new items are inserted at index 'idx'.
3247 */
3248 void
3249 xfs_iext_insert(
3250 xfs_inode_t *ip, /* incore inode pointer */
3251 xfs_extnum_t idx, /* starting index of new items */
3252 xfs_extnum_t count, /* number of inserted items */
3253 xfs_bmbt_irec_t *new, /* items to insert */
3254 int state) /* type of extent conversion */
3255 {
3256 xfs_ifork_t *ifp = (state & BMAP_ATTRFORK) ? ip->i_afp : &ip->i_df;
3257 xfs_extnum_t i; /* extent record index */
3258
3259 trace_xfs_iext_insert(ip, idx, new, state, _RET_IP_);
3260
3261 ASSERT(ifp->if_flags & XFS_IFEXTENTS);
3262 xfs_iext_add(ifp, idx, count);
3263 for (i = idx; i < idx + count; i++, new++)
3264 xfs_bmbt_set_all(xfs_iext_get_ext(ifp, i), new);
3265 }
3266
3267 /*
3268 * This is called when the amount of space required for incore file
3269 * extents needs to be increased. The ext_diff parameter stores the
3270 * number of new extents being added and the idx parameter contains
3271 * the extent index where the new extents will be added. If the new
3272 * extents are being appended, then we just need to (re)allocate and
3273 * initialize the space. Otherwise, if the new extents are being
3274 * inserted into the middle of the existing entries, a bit more work
3275 * is required to make room for the new extents to be inserted. The
3276 * caller is responsible for filling in the new extent entries upon
3277 * return.
3278 */
3279 void
3280 xfs_iext_add(
3281 xfs_ifork_t *ifp, /* inode fork pointer */
3282 xfs_extnum_t idx, /* index to begin adding exts */
3283 int ext_diff) /* number of extents to add */
3284 {
3285 int byte_diff; /* new bytes being added */
3286 int new_size; /* size of extents after adding */
3287 xfs_extnum_t nextents; /* number of extents in file */
3288
3289 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
3290 ASSERT((idx >= 0) && (idx <= nextents));
3291 byte_diff = ext_diff * sizeof(xfs_bmbt_rec_t);
3292 new_size = ifp->if_bytes + byte_diff;
3293 /*
3294 * If the new number of extents (nextents + ext_diff)
3295 * fits inside the inode, then continue to use the inline
3296 * extent buffer.
3297 */
3298 if (nextents + ext_diff <= XFS_INLINE_EXTS) {
3299 if (idx < nextents) {
3300 memmove(&ifp->if_u2.if_inline_ext[idx + ext_diff],
3301 &ifp->if_u2.if_inline_ext[idx],
3302 (nextents - idx) * sizeof(xfs_bmbt_rec_t));
3303 memset(&ifp->if_u2.if_inline_ext[idx], 0, byte_diff);
3304 }
3305 ifp->if_u1.if_extents = ifp->if_u2.if_inline_ext;
3306 ifp->if_real_bytes = 0;
3307 ifp->if_lastex = nextents + ext_diff;
3308 }
3309 /*
3310 * Otherwise use a linear (direct) extent list.
3311 * If the extents are currently inside the inode,
3312 * xfs_iext_realloc_direct will switch us from
3313 * inline to direct extent allocation mode.
3314 */
3315 else if (nextents + ext_diff <= XFS_LINEAR_EXTS) {
3316 xfs_iext_realloc_direct(ifp, new_size);
3317 if (idx < nextents) {
3318 memmove(&ifp->if_u1.if_extents[idx + ext_diff],
3319 &ifp->if_u1.if_extents[idx],
3320 (nextents - idx) * sizeof(xfs_bmbt_rec_t));
3321 memset(&ifp->if_u1.if_extents[idx], 0, byte_diff);
3322 }
3323 }
3324 /* Indirection array */
3325 else {
3326 xfs_ext_irec_t *erp;
3327 int erp_idx = 0;
3328 int page_idx = idx;
3329
3330 ASSERT(nextents + ext_diff > XFS_LINEAR_EXTS);
3331 if (ifp->if_flags & XFS_IFEXTIREC) {
3332 erp = xfs_iext_idx_to_irec(ifp, &page_idx, &erp_idx, 1);
3333 } else {
3334 xfs_iext_irec_init(ifp);
3335 ASSERT(ifp->if_flags & XFS_IFEXTIREC);
3336 erp = ifp->if_u1.if_ext_irec;
3337 }
3338 /* Extents fit in target extent page */
3339 if (erp && erp->er_extcount + ext_diff <= XFS_LINEAR_EXTS) {
3340 if (page_idx < erp->er_extcount) {
3341 memmove(&erp->er_extbuf[page_idx + ext_diff],
3342 &erp->er_extbuf[page_idx],
3343 (erp->er_extcount - page_idx) *
3344 sizeof(xfs_bmbt_rec_t));
3345 memset(&erp->er_extbuf[page_idx], 0, byte_diff);
3346 }
3347 erp->er_extcount += ext_diff;
3348 xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, ext_diff);
3349 }
3350 /* Insert a new extent page */
3351 else if (erp) {
3352 xfs_iext_add_indirect_multi(ifp,
3353 erp_idx, page_idx, ext_diff);
3354 }
3355 /*
3356 * If extent(s) are being appended to the last page in
3357 * the indirection array and the new extent(s) don't fit
3358 * in the page, then erp is NULL and erp_idx is set to
3359 * the next index needed in the indirection array.
3360 */
3361 else {
3362 int count = ext_diff;
3363
3364 while (count) {
3365 erp = xfs_iext_irec_new(ifp, erp_idx);
3366 erp->er_extcount = count;
3367 count -= MIN(count, (int)XFS_LINEAR_EXTS);
3368 if (count) {
3369 erp_idx++;
3370 }
3371 }
3372 }
3373 }
3374 ifp->if_bytes = new_size;
3375 }
3376
3377 /*
3378 * This is called when incore extents are being added to the indirection
3379 * array and the new extents do not fit in the target extent list. The
3380 * erp_idx parameter contains the irec index for the target extent list
3381 * in the indirection array, and the idx parameter contains the extent
3382 * index within the list. The number of extents being added is stored
3383 * in the count parameter.
3384 *
3385 * |-------| |-------|
3386 * | | | | idx - number of extents before idx
3387 * | idx | | count |
3388 * | | | | count - number of extents being inserted at idx
3389 * |-------| |-------|
3390 * | count | | nex2 | nex2 - number of extents after idx + count
3391 * |-------| |-------|
3392 */
3393 void
3394 xfs_iext_add_indirect_multi(
3395 xfs_ifork_t *ifp, /* inode fork pointer */
3396 int erp_idx, /* target extent irec index */
3397 xfs_extnum_t idx, /* index within target list */
3398 int count) /* new extents being added */
3399 {
3400 int byte_diff; /* new bytes being added */
3401 xfs_ext_irec_t *erp; /* pointer to irec entry */
3402 xfs_extnum_t ext_diff; /* number of extents to add */
3403 xfs_extnum_t ext_cnt; /* new extents still needed */
3404 xfs_extnum_t nex2; /* extents after idx + count */
3405 xfs_bmbt_rec_t *nex2_ep = NULL; /* temp list for nex2 extents */
3406 int nlists; /* number of irec's (lists) */
3407
3408 ASSERT(ifp->if_flags & XFS_IFEXTIREC);
3409 erp = &ifp->if_u1.if_ext_irec[erp_idx];
3410 nex2 = erp->er_extcount - idx;
3411 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
3412
3413 /*
3414 * Save second part of target extent list
3415 * (all extents past */
3416 if (nex2) {
3417 byte_diff = nex2 * sizeof(xfs_bmbt_rec_t);
3418 nex2_ep = (xfs_bmbt_rec_t *) kmem_alloc(byte_diff, KM_NOFS);
3419 memmove(nex2_ep, &erp->er_extbuf[idx], byte_diff);
3420 erp->er_extcount -= nex2;
3421 xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, -nex2);
3422 memset(&erp->er_extbuf[idx], 0, byte_diff);
3423 }
3424
3425 /*
3426 * Add the new extents to the end of the target
3427 * list, then allocate new irec record(s) and
3428 * extent buffer(s) as needed to store the rest
3429 * of the new extents.
3430 */
3431 ext_cnt = count;
3432 ext_diff = MIN(ext_cnt, (int)XFS_LINEAR_EXTS - erp->er_extcount);
3433 if (ext_diff) {
3434 erp->er_extcount += ext_diff;
3435 xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, ext_diff);
3436 ext_cnt -= ext_diff;
3437 }
3438 while (ext_cnt) {
3439 erp_idx++;
3440 erp = xfs_iext_irec_new(ifp, erp_idx);
3441 ext_diff = MIN(ext_cnt, (int)XFS_LINEAR_EXTS);
3442 erp->er_extcount = ext_diff;
3443 xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, ext_diff);
3444 ext_cnt -= ext_diff;
3445 }
3446
3447 /* Add nex2 extents back to indirection array */
3448 if (nex2) {
3449 xfs_extnum_t ext_avail;
3450 int i;
3451
3452 byte_diff = nex2 * sizeof(xfs_bmbt_rec_t);
3453 ext_avail = XFS_LINEAR_EXTS - erp->er_extcount;
3454 i = 0;
3455 /*
3456 * If nex2 extents fit in the current page, append
3457 * nex2_ep after the new extents.
3458 */
3459 if (nex2 <= ext_avail) {
3460 i = erp->er_extcount;
3461 }
3462 /*
3463 * Otherwise, check if space is available in the
3464 * next page.
3465 */
3466 else if ((erp_idx < nlists - 1) &&
3467 (nex2 <= (ext_avail = XFS_LINEAR_EXTS -
3468 ifp->if_u1.if_ext_irec[erp_idx+1].er_extcount))) {
3469 erp_idx++;
3470 erp++;
3471 /* Create a hole for nex2 extents */
3472 memmove(&erp->er_extbuf[nex2], erp->er_extbuf,
3473 erp->er_extcount * sizeof(xfs_bmbt_rec_t));
3474 }
3475 /*
3476 * Final choice, create a new extent page for
3477 * nex2 extents.
3478 */
3479 else {
3480 erp_idx++;
3481 erp = xfs_iext_irec_new(ifp, erp_idx);
3482 }
3483 memmove(&erp->er_extbuf[i], nex2_ep, byte_diff);
3484 kmem_free(nex2_ep);
3485 erp->er_extcount += nex2;
3486 xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, nex2);
3487 }
3488 }
3489
3490 /*
3491 * This is called when the amount of space required for incore file
3492 * extents needs to be decreased. The ext_diff parameter stores the
3493 * number of extents to be removed and the idx parameter contains
3494 * the extent index where the extents will be removed from.
3495 *
3496 * If the amount of space needed has decreased below the linear
3497 * limit, XFS_IEXT_BUFSZ, then switch to using the contiguous
3498 * extent array. Otherwise, use kmem_realloc() to adjust the
3499 * size to what is needed.
3500 */
3501 void
3502 xfs_iext_remove(
3503 xfs_inode_t *ip, /* incore inode pointer */
3504 xfs_extnum_t idx, /* index to begin removing exts */
3505 int ext_diff, /* number of extents to remove */
3506 int state) /* type of extent conversion */
3507 {
3508 xfs_ifork_t *ifp = (state & BMAP_ATTRFORK) ? ip->i_afp : &ip->i_df;
3509 xfs_extnum_t nextents; /* number of extents in file */
3510 int new_size; /* size of extents after removal */
3511
3512 trace_xfs_iext_remove(ip, idx, state, _RET_IP_);
3513
3514 ASSERT(ext_diff > 0);
3515 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
3516 new_size = (nextents - ext_diff) * sizeof(xfs_bmbt_rec_t);
3517
3518 if (new_size == 0) {
3519 xfs_iext_destroy(ifp);
3520 } else if (ifp->if_flags & XFS_IFEXTIREC) {
3521 xfs_iext_remove_indirect(ifp, idx, ext_diff);
3522 } else if (ifp->if_real_bytes) {
3523 xfs_iext_remove_direct(ifp, idx, ext_diff);
3524 } else {
3525 xfs_iext_remove_inline(ifp, idx, ext_diff);
3526 }
3527 ifp->if_bytes = new_size;
3528 }
3529
3530 /*
3531 * This removes ext_diff extents from the inline buffer, beginning
3532 * at extent index idx.
3533 */
3534 void
3535 xfs_iext_remove_inline(
3536 xfs_ifork_t *ifp, /* inode fork pointer */
3537 xfs_extnum_t idx, /* index to begin removing exts */
3538 int ext_diff) /* number of extents to remove */
3539 {
3540 int nextents; /* number of extents in file */
3541
3542 ASSERT(!(ifp->if_flags & XFS_IFEXTIREC));
3543 ASSERT(idx < XFS_INLINE_EXTS);
3544 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
3545 ASSERT(((nextents - ext_diff) > 0) &&
3546 (nextents - ext_diff) < XFS_INLINE_EXTS);
3547
3548 if (idx + ext_diff < nextents) {
3549 memmove(&ifp->if_u2.if_inline_ext[idx],
3550 &ifp->if_u2.if_inline_ext[idx + ext_diff],
3551 (nextents - (idx + ext_diff)) *
3552 sizeof(xfs_bmbt_rec_t));
3553 memset(&ifp->if_u2.if_inline_ext[nextents - ext_diff],
3554 0, ext_diff * sizeof(xfs_bmbt_rec_t));
3555 } else {
3556 memset(&ifp->if_u2.if_inline_ext[idx], 0,
3557 ext_diff * sizeof(xfs_bmbt_rec_t));
3558 }
3559 }
3560
3561 /*
3562 * This removes ext_diff extents from a linear (direct) extent list,
3563 * beginning at extent index idx. If the extents are being removed
3564 * from the end of the list (ie. truncate) then we just need to re-
3565 * allocate the list to remove the extra space. Otherwise, if the
3566 * extents are being removed from the middle of the existing extent
3567 * entries, then we first need to move the extent records beginning
3568 * at idx + ext_diff up in the list to overwrite the records being
3569 * removed, then remove the extra space via kmem_realloc.
3570 */
3571 void
3572 xfs_iext_remove_direct(
3573 xfs_ifork_t *ifp, /* inode fork pointer */
3574 xfs_extnum_t idx, /* index to begin removing exts */
3575 int ext_diff) /* number of extents to remove */
3576 {
3577 xfs_extnum_t nextents; /* number of extents in file */
3578 int new_size; /* size of extents after removal */
3579
3580 ASSERT(!(ifp->if_flags & XFS_IFEXTIREC));
3581 new_size = ifp->if_bytes -
3582 (ext_diff * sizeof(xfs_bmbt_rec_t));
3583 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
3584
3585 if (new_size == 0) {
3586 xfs_iext_destroy(ifp);
3587 return;
3588 }
3589 /* Move extents up in the list (if needed) */
3590 if (idx + ext_diff < nextents) {
3591 memmove(&ifp->if_u1.if_extents[idx],
3592 &ifp->if_u1.if_extents[idx + ext_diff],
3593 (nextents - (idx + ext_diff)) *
3594 sizeof(xfs_bmbt_rec_t));
3595 }
3596 memset(&ifp->if_u1.if_extents[nextents - ext_diff],
3597 0, ext_diff * sizeof(xfs_bmbt_rec_t));
3598 /*
3599 * Reallocate the direct extent list. If the extents
3600 * will fit inside the inode then xfs_iext_realloc_direct
3601 * will switch from direct to inline extent allocation
3602 * mode for us.
3603 */
3604 xfs_iext_realloc_direct(ifp, new_size);
3605 ifp->if_bytes = new_size;
3606 }
3607
3608 /*
3609 * This is called when incore extents are being removed from the
3610 * indirection array and the extents being removed span multiple extent
3611 * buffers. The idx parameter contains the file extent index where we
3612 * want to begin removing extents, and the count parameter contains
3613 * how many extents need to be removed.
3614 *
3615 * |-------| |-------|
3616 * | nex1 | | | nex1 - number of extents before idx
3617 * |-------| | count |
3618 * | | | | count - number of extents being removed at idx
3619 * | count | |-------|
3620 * | | | nex2 | nex2 - number of extents after idx + count
3621 * |-------| |-------|
3622 */
3623 void
3624 xfs_iext_remove_indirect(
3625 xfs_ifork_t *ifp, /* inode fork pointer */
3626 xfs_extnum_t idx, /* index to begin removing extents */
3627 int count) /* number of extents to remove */
3628 {
3629 xfs_ext_irec_t *erp; /* indirection array pointer */
3630 int erp_idx = 0; /* indirection array index */
3631 xfs_extnum_t ext_cnt; /* extents left to remove */
3632 xfs_extnum_t ext_diff; /* extents to remove in current list */
3633 xfs_extnum_t nex1; /* number of extents before idx */
3634 xfs_extnum_t nex2; /* extents after idx + count */
3635 int nlists; /* entries in indirection array */
3636 int page_idx = idx; /* index in target extent list */
3637
3638 ASSERT(ifp->if_flags & XFS_IFEXTIREC);
3639 erp = xfs_iext_idx_to_irec(ifp, &page_idx, &erp_idx, 0);
3640 ASSERT(erp != NULL);
3641 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
3642 nex1 = page_idx;
3643 ext_cnt = count;
3644 while (ext_cnt) {
3645 nex2 = MAX((erp->er_extcount - (nex1 + ext_cnt)), 0);
3646 ext_diff = MIN(ext_cnt, (erp->er_extcount - nex1));
3647 /*
3648 * Check for deletion of entire list;
3649 * xfs_iext_irec_remove() updates extent offsets.
3650 */
3651 if (ext_diff == erp->er_extcount) {
3652 xfs_iext_irec_remove(ifp, erp_idx);
3653 ext_cnt -= ext_diff;
3654 nex1 = 0;
3655 if (ext_cnt) {
3656 ASSERT(erp_idx < ifp->if_real_bytes /
3657 XFS_IEXT_BUFSZ);
3658 erp = &ifp->if_u1.if_ext_irec[erp_idx];
3659 nex1 = 0;
3660 continue;
3661 } else {
3662 break;
3663 }
3664 }
3665 /* Move extents up (if needed) */
3666 if (nex2) {
3667 memmove(&erp->er_extbuf[nex1],
3668 &erp->er_extbuf[nex1 + ext_diff],
3669 nex2 * sizeof(xfs_bmbt_rec_t));
3670 }
3671 /* Zero out rest of page */
3672 memset(&erp->er_extbuf[nex1 + nex2], 0, (XFS_IEXT_BUFSZ -
3673 ((nex1 + nex2) * sizeof(xfs_bmbt_rec_t))));
3674 /* Update remaining counters */
3675 erp->er_extcount -= ext_diff;
3676 xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, -ext_diff);
3677 ext_cnt -= ext_diff;
3678 nex1 = 0;
3679 erp_idx++;
3680 erp++;
3681 }
3682 ifp->if_bytes -= count * sizeof(xfs_bmbt_rec_t);
3683 xfs_iext_irec_compact(ifp);
3684 }
3685
3686 /*
3687 * Create, destroy, or resize a linear (direct) block of extents.
3688 */
3689 void
3690 xfs_iext_realloc_direct(
3691 xfs_ifork_t *ifp, /* inode fork pointer */
3692 int new_size) /* new size of extents */
3693 {
3694 int rnew_size; /* real new size of extents */
3695
3696 rnew_size = new_size;
3697
3698 ASSERT(!(ifp->if_flags & XFS_IFEXTIREC) ||
3699 ((new_size >= 0) && (new_size <= XFS_IEXT_BUFSZ) &&
3700 (new_size != ifp->if_real_bytes)));
3701
3702 /* Free extent records */
3703 if (new_size == 0) {
3704 xfs_iext_destroy(ifp);
3705 }
3706 /* Resize direct extent list and zero any new bytes */
3707 else if (ifp->if_real_bytes) {
3708 /* Check if extents will fit inside the inode */
3709 if (new_size <= XFS_INLINE_EXTS * sizeof(xfs_bmbt_rec_t)) {
3710 xfs_iext_direct_to_inline(ifp, new_size /
3711 (uint)sizeof(xfs_bmbt_rec_t));
3712 ifp->if_bytes = new_size;
3713 return;
3714 }
3715 if (!is_power_of_2(new_size)){
3716 rnew_size = roundup_pow_of_two(new_size);
3717 }
3718 if (rnew_size != ifp->if_real_bytes) {
3719 ifp->if_u1.if_extents =
3720 kmem_realloc(ifp->if_u1.if_extents,
3721 rnew_size,
3722 ifp->if_real_bytes, KM_NOFS);
3723 }
3724 if (rnew_size > ifp->if_real_bytes) {
3725 memset(&ifp->if_u1.if_extents[ifp->if_bytes /
3726 (uint)sizeof(xfs_bmbt_rec_t)], 0,
3727 rnew_size - ifp->if_real_bytes);
3728 }
3729 }
3730 /*
3731 * Switch from the inline extent buffer to a direct
3732 * extent list. Be sure to include the inline extent
3733 * bytes in new_size.
3734 */
3735 else {
3736 new_size += ifp->if_bytes;
3737 if (!is_power_of_2(new_size)) {
3738 rnew_size = roundup_pow_of_two(new_size);
3739 }
3740 xfs_iext_inline_to_direct(ifp, rnew_size);
3741 }
3742 ifp->if_real_bytes = rnew_size;
3743 ifp->if_bytes = new_size;
3744 }
3745
3746 /*
3747 * Switch from linear (direct) extent records to inline buffer.
3748 */
3749 void
3750 xfs_iext_direct_to_inline(
3751 xfs_ifork_t *ifp, /* inode fork pointer */
3752 xfs_extnum_t nextents) /* number of extents in file */
3753 {
3754 ASSERT(ifp->if_flags & XFS_IFEXTENTS);
3755 ASSERT(nextents <= XFS_INLINE_EXTS);
3756 /*
3757 * The inline buffer was zeroed when we switched
3758 * from inline to direct extent allocation mode,
3759 * so we don't need to clear it here.
3760 */
3761 memcpy(ifp->if_u2.if_inline_ext, ifp->if_u1.if_extents,
3762 nextents * sizeof(xfs_bmbt_rec_t));
3763 kmem_free(ifp->if_u1.if_extents);
3764 ifp->if_u1.if_extents = ifp->if_u2.if_inline_ext;
3765 ifp->if_real_bytes = 0;
3766 }
3767
3768 /*
3769 * Switch from inline buffer to linear (direct) extent records.
3770 * new_size should already be rounded up to the next power of 2
3771 * by the caller (when appropriate), so use new_size as it is.
3772 * However, since new_size may be rounded up, we can't update
3773 * if_bytes here. It is the caller's responsibility to update
3774 * if_bytes upon return.
3775 */
3776 void
3777 xfs_iext_inline_to_direct(
3778 xfs_ifork_t *ifp, /* inode fork pointer */
3779 int new_size) /* number of extents in file */
3780 {
3781 ifp->if_u1.if_extents = kmem_alloc(new_size, KM_NOFS);
3782 memset(ifp->if_u1.if_extents, 0, new_size);
3783 if (ifp->if_bytes) {
3784 memcpy(ifp->if_u1.if_extents, ifp->if_u2.if_inline_ext,
3785 ifp->if_bytes);
3786 memset(ifp->if_u2.if_inline_ext, 0, XFS_INLINE_EXTS *
3787 sizeof(xfs_bmbt_rec_t));
3788 }
3789 ifp->if_real_bytes = new_size;
3790 }
3791
3792 /*
3793 * Resize an extent indirection array to new_size bytes.
3794 */
3795 STATIC void
3796 xfs_iext_realloc_indirect(
3797 xfs_ifork_t *ifp, /* inode fork pointer */
3798 int new_size) /* new indirection array size */
3799 {
3800 int nlists; /* number of irec's (ex lists) */
3801 int size; /* current indirection array size */
3802
3803 ASSERT(ifp->if_flags & XFS_IFEXTIREC);
3804 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
3805 size = nlists * sizeof(xfs_ext_irec_t);
3806 ASSERT(ifp->if_real_bytes);
3807 ASSERT((new_size >= 0) && (new_size != size));
3808 if (new_size == 0) {
3809 xfs_iext_destroy(ifp);
3810 } else {
3811 ifp->if_u1.if_ext_irec = (xfs_ext_irec_t *)
3812 kmem_realloc(ifp->if_u1.if_ext_irec,
3813 new_size, size, KM_NOFS);
3814 }
3815 }
3816
3817 /*
3818 * Switch from indirection array to linear (direct) extent allocations.
3819 */
3820 STATIC void
3821 xfs_iext_indirect_to_direct(
3822 xfs_ifork_t *ifp) /* inode fork pointer */
3823 {
3824 xfs_bmbt_rec_host_t *ep; /* extent record pointer */
3825 xfs_extnum_t nextents; /* number of extents in file */
3826 int size; /* size of file extents */
3827
3828 ASSERT(ifp->if_flags & XFS_IFEXTIREC);
3829 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
3830 ASSERT(nextents <= XFS_LINEAR_EXTS);
3831 size = nextents * sizeof(xfs_bmbt_rec_t);
3832
3833 xfs_iext_irec_compact_pages(ifp);
3834 ASSERT(ifp->if_real_bytes == XFS_IEXT_BUFSZ);
3835
3836 ep = ifp->if_u1.if_ext_irec->er_extbuf;
3837 kmem_free(ifp->if_u1.if_ext_irec);
3838 ifp->if_flags &= ~XFS_IFEXTIREC;
3839 ifp->if_u1.if_extents = ep;
3840 ifp->if_bytes = size;
3841 if (nextents < XFS_LINEAR_EXTS) {
3842 xfs_iext_realloc_direct(ifp, size);
3843 }
3844 }
3845
3846 /*
3847 * Free incore file extents.
3848 */
3849 void
3850 xfs_iext_destroy(
3851 xfs_ifork_t *ifp) /* inode fork pointer */
3852 {
3853 if (ifp->if_flags & XFS_IFEXTIREC) {
3854 int erp_idx;
3855 int nlists;
3856
3857 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
3858 for (erp_idx = nlists - 1; erp_idx >= 0 ; erp_idx--) {
3859 xfs_iext_irec_remove(ifp, erp_idx);
3860 }
3861 ifp->if_flags &= ~XFS_IFEXTIREC;
3862 } else if (ifp->if_real_bytes) {
3863 kmem_free(ifp->if_u1.if_extents);
3864 } else if (ifp->if_bytes) {
3865 memset(ifp->if_u2.if_inline_ext, 0, XFS_INLINE_EXTS *
3866 sizeof(xfs_bmbt_rec_t));
3867 }
3868 ifp->if_u1.if_extents = NULL;
3869 ifp->if_real_bytes = 0;
3870 ifp->if_bytes = 0;
3871 }
3872
3873 /*
3874 * Return a pointer to the extent record for file system block bno.
3875 */
3876 xfs_bmbt_rec_host_t * /* pointer to found extent record */
3877 xfs_iext_bno_to_ext(
3878 xfs_ifork_t *ifp, /* inode fork pointer */
3879 xfs_fileoff_t bno, /* block number to search for */
3880 xfs_extnum_t *idxp) /* index of target extent */
3881 {
3882 xfs_bmbt_rec_host_t *base; /* pointer to first extent */
3883 xfs_filblks_t blockcount = 0; /* number of blocks in extent */
3884 xfs_bmbt_rec_host_t *ep = NULL; /* pointer to target extent */
3885 xfs_ext_irec_t *erp = NULL; /* indirection array pointer */
3886 int high; /* upper boundary in search */
3887 xfs_extnum_t idx = 0; /* index of target extent */
3888 int low; /* lower boundary in search */
3889 xfs_extnum_t nextents; /* number of file extents */
3890 xfs_fileoff_t startoff = 0; /* start offset of extent */
3891
3892 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
3893 if (nextents == 0) {
3894 *idxp = 0;
3895 return NULL;
3896 }
3897 low = 0;
3898 if (ifp->if_flags & XFS_IFEXTIREC) {
3899 /* Find target extent list */
3900 int erp_idx = 0;
3901 erp = xfs_iext_bno_to_irec(ifp, bno, &erp_idx);
3902 base = erp->er_extbuf;
3903 high = erp->er_extcount - 1;
3904 } else {
3905 base = ifp->if_u1.if_extents;
3906 high = nextents - 1;
3907 }
3908 /* Binary search extent records */
3909 while (low <= high) {
3910 idx = (low + high) >> 1;
3911 ep = base + idx;
3912 startoff = xfs_bmbt_get_startoff(ep);
3913 blockcount = xfs_bmbt_get_blockcount(ep);
3914 if (bno < startoff) {
3915 high = idx - 1;
3916 } else if (bno >= startoff + blockcount) {
3917 low = idx + 1;
3918 } else {
3919 /* Convert back to file-based extent index */
3920 if (ifp->if_flags & XFS_IFEXTIREC) {
3921 idx += erp->er_extoff;
3922 }
3923 *idxp = idx;
3924 return ep;
3925 }
3926 }
3927 /* Convert back to file-based extent index */
3928 if (ifp->if_flags & XFS_IFEXTIREC) {
3929 idx += erp->er_extoff;
3930 }
3931 if (bno >= startoff + blockcount) {
3932 if (++idx == nextents) {
3933 ep = NULL;
3934 } else {
3935 ep = xfs_iext_get_ext(ifp, idx);
3936 }
3937 }
3938 *idxp = idx;
3939 return ep;
3940 }
3941
3942 /*
3943 * Return a pointer to the indirection array entry containing the
3944 * extent record for filesystem block bno. Store the index of the
3945 * target irec in *erp_idxp.
3946 */
3947 xfs_ext_irec_t * /* pointer to found extent record */
3948 xfs_iext_bno_to_irec(
3949 xfs_ifork_t *ifp, /* inode fork pointer */
3950 xfs_fileoff_t bno, /* block number to search for */
3951 int *erp_idxp) /* irec index of target ext list */
3952 {
3953 xfs_ext_irec_t *erp = NULL; /* indirection array pointer */
3954 xfs_ext_irec_t *erp_next; /* next indirection array entry */
3955 int erp_idx; /* indirection array index */
3956 int nlists; /* number of extent irec's (lists) */
3957 int high; /* binary search upper limit */
3958 int low; /* binary search lower limit */
3959
3960 ASSERT(ifp->if_flags & XFS_IFEXTIREC);
3961 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
3962 erp_idx = 0;
3963 low = 0;
3964 high = nlists - 1;
3965 while (low <= high) {
3966 erp_idx = (low + high) >> 1;
3967 erp = &ifp->if_u1.if_ext_irec[erp_idx];
3968 erp_next = erp_idx < nlists - 1 ? erp + 1 : NULL;
3969 if (bno < xfs_bmbt_get_startoff(erp->er_extbuf)) {
3970 high = erp_idx - 1;
3971 } else if (erp_next && bno >=
3972 xfs_bmbt_get_startoff(erp_next->er_extbuf)) {
3973 low = erp_idx + 1;
3974 } else {
3975 break;
3976 }
3977 }
3978 *erp_idxp = erp_idx;
3979 return erp;
3980 }
3981
3982 /*
3983 * Return a pointer to the indirection array entry containing the
3984 * extent record at file extent index *idxp. Store the index of the
3985 * target irec in *erp_idxp and store the page index of the target
3986 * extent record in *idxp.
3987 */
3988 xfs_ext_irec_t *
3989 xfs_iext_idx_to_irec(
3990 xfs_ifork_t *ifp, /* inode fork pointer */
3991 xfs_extnum_t *idxp, /* extent index (file -> page) */
3992 int *erp_idxp, /* pointer to target irec */
3993 int realloc) /* new bytes were just added */
3994 {
3995 xfs_ext_irec_t *prev; /* pointer to previous irec */
3996 xfs_ext_irec_t *erp = NULL; /* pointer to current irec */
3997 int erp_idx; /* indirection array index */
3998 int nlists; /* number of irec's (ex lists) */
3999 int high; /* binary search upper limit */
4000 int low; /* binary search lower limit */
4001 xfs_extnum_t page_idx = *idxp; /* extent index in target list */
4002
4003 ASSERT(ifp->if_flags & XFS_IFEXTIREC);
4004 ASSERT(page_idx >= 0 && page_idx <=
4005 ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t));
4006 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
4007 erp_idx = 0;
4008 low = 0;
4009 high = nlists - 1;
4010
4011 /* Binary search extent irec's */
4012 while (low <= high) {
4013 erp_idx = (low + high) >> 1;
4014 erp = &ifp->if_u1.if_ext_irec[erp_idx];
4015 prev = erp_idx > 0 ? erp - 1 : NULL;
4016 if (page_idx < erp->er_extoff || (page_idx == erp->er_extoff &&
4017 realloc && prev && prev->er_extcount < XFS_LINEAR_EXTS)) {
4018 high = erp_idx - 1;
4019 } else if (page_idx > erp->er_extoff + erp->er_extcount ||
4020 (page_idx == erp->er_extoff + erp->er_extcount &&
4021 !realloc)) {
4022 low = erp_idx + 1;
4023 } else if (page_idx == erp->er_extoff + erp->er_extcount &&
4024 erp->er_extcount == XFS_LINEAR_EXTS) {
4025 ASSERT(realloc);
4026 page_idx = 0;
4027 erp_idx++;
4028 erp = erp_idx < nlists ? erp + 1 : NULL;
4029 break;
4030 } else {
4031 page_idx -= erp->er_extoff;
4032 break;
4033 }
4034 }
4035 *idxp = page_idx;
4036 *erp_idxp = erp_idx;
4037 return(erp);
4038 }
4039
4040 /*
4041 * Allocate and initialize an indirection array once the space needed
4042 * for incore extents increases above XFS_IEXT_BUFSZ.
4043 */
4044 void
4045 xfs_iext_irec_init(
4046 xfs_ifork_t *ifp) /* inode fork pointer */
4047 {
4048 xfs_ext_irec_t *erp; /* indirection array pointer */
4049 xfs_extnum_t nextents; /* number of extents in file */
4050
4051 ASSERT(!(ifp->if_flags & XFS_IFEXTIREC));
4052 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
4053 ASSERT(nextents <= XFS_LINEAR_EXTS);
4054
4055 erp = kmem_alloc(sizeof(xfs_ext_irec_t), KM_NOFS);
4056
4057 if (nextents == 0) {
4058 ifp->if_u1.if_extents = kmem_alloc(XFS_IEXT_BUFSZ, KM_NOFS);
4059 } else if (!ifp->if_real_bytes) {
4060 xfs_iext_inline_to_direct(ifp, XFS_IEXT_BUFSZ);
4061 } else if (ifp->if_real_bytes < XFS_IEXT_BUFSZ) {
4062 xfs_iext_realloc_direct(ifp, XFS_IEXT_BUFSZ);
4063 }
4064 erp->er_extbuf = ifp->if_u1.if_extents;
4065 erp->er_extcount = nextents;
4066 erp->er_extoff = 0;
4067
4068 ifp->if_flags |= XFS_IFEXTIREC;
4069 ifp->if_real_bytes = XFS_IEXT_BUFSZ;
4070 ifp->if_bytes = nextents * sizeof(xfs_bmbt_rec_t);
4071 ifp->if_u1.if_ext_irec = erp;
4072
4073 return;
4074 }
4075
4076 /*
4077 * Allocate and initialize a new entry in the indirection array.
4078 */
4079 xfs_ext_irec_t *
4080 xfs_iext_irec_new(
4081 xfs_ifork_t *ifp, /* inode fork pointer */
4082 int erp_idx) /* index for new irec */
4083 {
4084 xfs_ext_irec_t *erp; /* indirection array pointer */
4085 int i; /* loop counter */
4086 int nlists; /* number of irec's (ex lists) */
4087
4088 ASSERT(ifp->if_flags & XFS_IFEXTIREC);
4089 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
4090
4091 /* Resize indirection array */
4092 xfs_iext_realloc_indirect(ifp, ++nlists *
4093 sizeof(xfs_ext_irec_t));
4094 /*
4095 * Move records down in the array so the
4096 * new page can use erp_idx.
4097 */
4098 erp = ifp->if_u1.if_ext_irec;
4099 for (i = nlists - 1; i > erp_idx; i--) {
4100 memmove(&erp[i], &erp[i-1], sizeof(xfs_ext_irec_t));
4101 }
4102 ASSERT(i == erp_idx);
4103
4104 /* Initialize new extent record */
4105 erp = ifp->if_u1.if_ext_irec;
4106 erp[erp_idx].er_extbuf = kmem_alloc(XFS_IEXT_BUFSZ, KM_NOFS);
4107 ifp->if_real_bytes = nlists * XFS_IEXT_BUFSZ;
4108 memset(erp[erp_idx].er_extbuf, 0, XFS_IEXT_BUFSZ);
4109 erp[erp_idx].er_extcount = 0;
4110 erp[erp_idx].er_extoff = erp_idx > 0 ?
4111 erp[erp_idx-1].er_extoff + erp[erp_idx-1].er_extcount : 0;
4112 return (&erp[erp_idx]);
4113 }
4114
4115 /*
4116 * Remove a record from the indirection array.
4117 */
4118 void
4119 xfs_iext_irec_remove(
4120 xfs_ifork_t *ifp, /* inode fork pointer */
4121 int erp_idx) /* irec index to remove */
4122 {
4123 xfs_ext_irec_t *erp; /* indirection array pointer */
4124 int i; /* loop counter */
4125 int nlists; /* number of irec's (ex lists) */
4126
4127 ASSERT(ifp->if_flags & XFS_IFEXTIREC);
4128 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
4129 erp = &ifp->if_u1.if_ext_irec[erp_idx];
4130 if (erp->er_extbuf) {
4131 xfs_iext_irec_update_extoffs(ifp, erp_idx + 1,
4132 -erp->er_extcount);
4133 kmem_free(erp->er_extbuf);
4134 }
4135 /* Compact extent records */
4136 erp = ifp->if_u1.if_ext_irec;
4137 for (i = erp_idx; i < nlists - 1; i++) {
4138 memmove(&erp[i], &erp[i+1], sizeof(xfs_ext_irec_t));
4139 }
4140 /*
4141 * Manually free the last extent record from the indirection
4142 * array. A call to xfs_iext_realloc_indirect() with a size
4143 * of zero would result in a call to xfs_iext_destroy() which
4144 * would in turn call this function again, creating a nasty
4145 * infinite loop.
4146 */
4147 if (--nlists) {
4148 xfs_iext_realloc_indirect(ifp,
4149 nlists * sizeof(xfs_ext_irec_t));
4150 } else {
4151 kmem_free(ifp->if_u1.if_ext_irec);
4152 }
4153 ifp->if_real_bytes = nlists * XFS_IEXT_BUFSZ;
4154 }
4155
4156 /*
4157 * This is called to clean up large amounts of unused memory allocated
4158 * by the indirection array. Before compacting anything though, verify
4159 * that the indirection array is still needed and switch back to the
4160 * linear extent list (or even the inline buffer) if possible. The
4161 * compaction policy is as follows:
4162 *
4163 * Full Compaction: Extents fit into a single page (or inline buffer)
4164 * Partial Compaction: Extents occupy less than 50% of allocated space
4165 * No Compaction: Extents occupy at least 50% of allocated space
4166 */
4167 void
4168 xfs_iext_irec_compact(
4169 xfs_ifork_t *ifp) /* inode fork pointer */
4170 {
4171 xfs_extnum_t nextents; /* number of extents in file */
4172 int nlists; /* number of irec's (ex lists) */
4173
4174 ASSERT(ifp->if_flags & XFS_IFEXTIREC);
4175 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
4176 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
4177
4178 if (nextents == 0) {
4179 xfs_iext_destroy(ifp);
4180 } else if (nextents <= XFS_INLINE_EXTS) {
4181 xfs_iext_indirect_to_direct(ifp);
4182 xfs_iext_direct_to_inline(ifp, nextents);
4183 } else if (nextents <= XFS_LINEAR_EXTS) {
4184 xfs_iext_indirect_to_direct(ifp);
4185 } else if (nextents < (nlists * XFS_LINEAR_EXTS) >> 1) {
4186 xfs_iext_irec_compact_pages(ifp);
4187 }
4188 }
4189
4190 /*
4191 * Combine extents from neighboring extent pages.
4192 */
4193 void
4194 xfs_iext_irec_compact_pages(
4195 xfs_ifork_t *ifp) /* inode fork pointer */
4196 {
4197 xfs_ext_irec_t *erp, *erp_next;/* pointers to irec entries */
4198 int erp_idx = 0; /* indirection array index */
4199 int nlists; /* number of irec's (ex lists) */
4200
4201 ASSERT(ifp->if_flags & XFS_IFEXTIREC);
4202 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
4203 while (erp_idx < nlists - 1) {
4204 erp = &ifp->if_u1.if_ext_irec[erp_idx];
4205 erp_next = erp + 1;
4206 if (erp_next->er_extcount <=
4207 (XFS_LINEAR_EXTS - erp->er_extcount)) {
4208 memcpy(&erp->er_extbuf[erp->er_extcount],
4209 erp_next->er_extbuf, erp_next->er_extcount *
4210 sizeof(xfs_bmbt_rec_t));
4211 erp->er_extcount += erp_next->er_extcount;
4212 /*
4213 * Free page before removing extent record
4214 * so er_extoffs don't get modified in
4215 * xfs_iext_irec_remove.
4216 */
4217 kmem_free(erp_next->er_extbuf);
4218 erp_next->er_extbuf = NULL;
4219 xfs_iext_irec_remove(ifp, erp_idx + 1);
4220 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
4221 } else {
4222 erp_idx++;
4223 }
4224 }
4225 }
4226
4227 /*
4228 * This is called to update the er_extoff field in the indirection
4229 * array when extents have been added or removed from one of the
4230 * extent lists. erp_idx contains the irec index to begin updating
4231 * at and ext_diff contains the number of extents that were added
4232 * or removed.
4233 */
4234 void
4235 xfs_iext_irec_update_extoffs(
4236 xfs_ifork_t *ifp, /* inode fork pointer */
4237 int erp_idx, /* irec index to update */
4238 int ext_diff) /* number of new extents */
4239 {
4240 int i; /* loop counter */
4241 int nlists; /* number of irec's (ex lists */
4242
4243 ASSERT(ifp->if_flags & XFS_IFEXTIREC);
4244 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
4245 for (i = erp_idx; i < nlists; i++) {
4246 ifp->if_u1.if_ext_irec[i].er_extoff += ext_diff;
4247 }
4248 }