]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blob - fs/xfs/libxfs/xfs_bmap.c
Merge branch 'x86-headers-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[mirror_ubuntu-hirsute-kernel.git] / fs / xfs / libxfs / xfs_bmap.c
1 /*
2 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 */
18 #include "xfs.h"
19 #include "xfs_fs.h"
20 #include "xfs_shared.h"
21 #include "xfs_format.h"
22 #include "xfs_log_format.h"
23 #include "xfs_trans_resv.h"
24 #include "xfs_bit.h"
25 #include "xfs_sb.h"
26 #include "xfs_mount.h"
27 #include "xfs_da_format.h"
28 #include "xfs_da_btree.h"
29 #include "xfs_dir2.h"
30 #include "xfs_inode.h"
31 #include "xfs_btree.h"
32 #include "xfs_trans.h"
33 #include "xfs_inode_item.h"
34 #include "xfs_extfree_item.h"
35 #include "xfs_alloc.h"
36 #include "xfs_bmap.h"
37 #include "xfs_bmap_util.h"
38 #include "xfs_bmap_btree.h"
39 #include "xfs_rtalloc.h"
40 #include "xfs_error.h"
41 #include "xfs_quota.h"
42 #include "xfs_trans_space.h"
43 #include "xfs_buf_item.h"
44 #include "xfs_trace.h"
45 #include "xfs_symlink.h"
46 #include "xfs_attr_leaf.h"
47 #include "xfs_filestream.h"
48
49
50 kmem_zone_t *xfs_bmap_free_item_zone;
51
52 /*
53 * Miscellaneous helper functions
54 */
55
56 /*
57 * Compute and fill in the value of the maximum depth of a bmap btree
58 * in this filesystem. Done once, during mount.
59 */
60 void
61 xfs_bmap_compute_maxlevels(
62 xfs_mount_t *mp, /* file system mount structure */
63 int whichfork) /* data or attr fork */
64 {
65 int level; /* btree level */
66 uint maxblocks; /* max blocks at this level */
67 uint maxleafents; /* max leaf entries possible */
68 int maxrootrecs; /* max records in root block */
69 int minleafrecs; /* min records in leaf block */
70 int minnoderecs; /* min records in node block */
71 int sz; /* root block size */
72
73 /*
74 * The maximum number of extents in a file, hence the maximum
75 * number of leaf entries, is controlled by the type of di_nextents
76 * (a signed 32-bit number, xfs_extnum_t), or by di_anextents
77 * (a signed 16-bit number, xfs_aextnum_t).
78 *
79 * Note that we can no longer assume that if we are in ATTR1 that
80 * the fork offset of all the inodes will be
81 * (xfs_default_attroffset(ip) >> 3) because we could have mounted
82 * with ATTR2 and then mounted back with ATTR1, keeping the
83 * di_forkoff's fixed but probably at various positions. Therefore,
84 * for both ATTR1 and ATTR2 we have to assume the worst case scenario
85 * of a minimum size available.
86 */
87 if (whichfork == XFS_DATA_FORK) {
88 maxleafents = MAXEXTNUM;
89 sz = XFS_BMDR_SPACE_CALC(MINDBTPTRS);
90 } else {
91 maxleafents = MAXAEXTNUM;
92 sz = XFS_BMDR_SPACE_CALC(MINABTPTRS);
93 }
94 maxrootrecs = xfs_bmdr_maxrecs(sz, 0);
95 minleafrecs = mp->m_bmap_dmnr[0];
96 minnoderecs = mp->m_bmap_dmnr[1];
97 maxblocks = (maxleafents + minleafrecs - 1) / minleafrecs;
98 for (level = 1; maxblocks > 1; level++) {
99 if (maxblocks <= maxrootrecs)
100 maxblocks = 1;
101 else
102 maxblocks = (maxblocks + minnoderecs - 1) / minnoderecs;
103 }
104 mp->m_bm_maxlevels[whichfork] = level;
105 }
106
107 STATIC int /* error */
108 xfs_bmbt_lookup_eq(
109 struct xfs_btree_cur *cur,
110 xfs_fileoff_t off,
111 xfs_fsblock_t bno,
112 xfs_filblks_t len,
113 int *stat) /* success/failure */
114 {
115 cur->bc_rec.b.br_startoff = off;
116 cur->bc_rec.b.br_startblock = bno;
117 cur->bc_rec.b.br_blockcount = len;
118 return xfs_btree_lookup(cur, XFS_LOOKUP_EQ, stat);
119 }
120
121 STATIC int /* error */
122 xfs_bmbt_lookup_ge(
123 struct xfs_btree_cur *cur,
124 xfs_fileoff_t off,
125 xfs_fsblock_t bno,
126 xfs_filblks_t len,
127 int *stat) /* success/failure */
128 {
129 cur->bc_rec.b.br_startoff = off;
130 cur->bc_rec.b.br_startblock = bno;
131 cur->bc_rec.b.br_blockcount = len;
132 return xfs_btree_lookup(cur, XFS_LOOKUP_GE, stat);
133 }
134
135 /*
136 * Check if the inode needs to be converted to btree format.
137 */
138 static inline bool xfs_bmap_needs_btree(struct xfs_inode *ip, int whichfork)
139 {
140 return XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS &&
141 XFS_IFORK_NEXTENTS(ip, whichfork) >
142 XFS_IFORK_MAXEXT(ip, whichfork);
143 }
144
145 /*
146 * Check if the inode should be converted to extent format.
147 */
148 static inline bool xfs_bmap_wants_extents(struct xfs_inode *ip, int whichfork)
149 {
150 return XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE &&
151 XFS_IFORK_NEXTENTS(ip, whichfork) <=
152 XFS_IFORK_MAXEXT(ip, whichfork);
153 }
154
155 /*
156 * Update the record referred to by cur to the value given
157 * by [off, bno, len, state].
158 * This either works (return 0) or gets an EFSCORRUPTED error.
159 */
160 STATIC int
161 xfs_bmbt_update(
162 struct xfs_btree_cur *cur,
163 xfs_fileoff_t off,
164 xfs_fsblock_t bno,
165 xfs_filblks_t len,
166 xfs_exntst_t state)
167 {
168 union xfs_btree_rec rec;
169
170 xfs_bmbt_disk_set_allf(&rec.bmbt, off, bno, len, state);
171 return xfs_btree_update(cur, &rec);
172 }
173
174 /*
175 * Compute the worst-case number of indirect blocks that will be used
176 * for ip's delayed extent of length "len".
177 */
178 STATIC xfs_filblks_t
179 xfs_bmap_worst_indlen(
180 xfs_inode_t *ip, /* incore inode pointer */
181 xfs_filblks_t len) /* delayed extent length */
182 {
183 int level; /* btree level number */
184 int maxrecs; /* maximum record count at this level */
185 xfs_mount_t *mp; /* mount structure */
186 xfs_filblks_t rval; /* return value */
187
188 mp = ip->i_mount;
189 maxrecs = mp->m_bmap_dmxr[0];
190 for (level = 0, rval = 0;
191 level < XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK);
192 level++) {
193 len += maxrecs - 1;
194 do_div(len, maxrecs);
195 rval += len;
196 if (len == 1)
197 return rval + XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK) -
198 level - 1;
199 if (level == 0)
200 maxrecs = mp->m_bmap_dmxr[1];
201 }
202 return rval;
203 }
204
205 /*
206 * Calculate the default attribute fork offset for newly created inodes.
207 */
208 uint
209 xfs_default_attroffset(
210 struct xfs_inode *ip)
211 {
212 struct xfs_mount *mp = ip->i_mount;
213 uint offset;
214
215 if (mp->m_sb.sb_inodesize == 256) {
216 offset = XFS_LITINO(mp, ip->i_d.di_version) -
217 XFS_BMDR_SPACE_CALC(MINABTPTRS);
218 } else {
219 offset = XFS_BMDR_SPACE_CALC(6 * MINABTPTRS);
220 }
221
222 ASSERT(offset < XFS_LITINO(mp, ip->i_d.di_version));
223 return offset;
224 }
225
226 /*
227 * Helper routine to reset inode di_forkoff field when switching
228 * attribute fork from local to extent format - we reset it where
229 * possible to make space available for inline data fork extents.
230 */
231 STATIC void
232 xfs_bmap_forkoff_reset(
233 xfs_inode_t *ip,
234 int whichfork)
235 {
236 if (whichfork == XFS_ATTR_FORK &&
237 ip->i_d.di_format != XFS_DINODE_FMT_DEV &&
238 ip->i_d.di_format != XFS_DINODE_FMT_UUID &&
239 ip->i_d.di_format != XFS_DINODE_FMT_BTREE) {
240 uint dfl_forkoff = xfs_default_attroffset(ip) >> 3;
241
242 if (dfl_forkoff > ip->i_d.di_forkoff)
243 ip->i_d.di_forkoff = dfl_forkoff;
244 }
245 }
246
247 #ifdef DEBUG
248 STATIC struct xfs_buf *
249 xfs_bmap_get_bp(
250 struct xfs_btree_cur *cur,
251 xfs_fsblock_t bno)
252 {
253 struct xfs_log_item_desc *lidp;
254 int i;
255
256 if (!cur)
257 return NULL;
258
259 for (i = 0; i < XFS_BTREE_MAXLEVELS; i++) {
260 if (!cur->bc_bufs[i])
261 break;
262 if (XFS_BUF_ADDR(cur->bc_bufs[i]) == bno)
263 return cur->bc_bufs[i];
264 }
265
266 /* Chase down all the log items to see if the bp is there */
267 list_for_each_entry(lidp, &cur->bc_tp->t_items, lid_trans) {
268 struct xfs_buf_log_item *bip;
269 bip = (struct xfs_buf_log_item *)lidp->lid_item;
270 if (bip->bli_item.li_type == XFS_LI_BUF &&
271 XFS_BUF_ADDR(bip->bli_buf) == bno)
272 return bip->bli_buf;
273 }
274
275 return NULL;
276 }
277
278 STATIC void
279 xfs_check_block(
280 struct xfs_btree_block *block,
281 xfs_mount_t *mp,
282 int root,
283 short sz)
284 {
285 int i, j, dmxr;
286 __be64 *pp, *thispa; /* pointer to block address */
287 xfs_bmbt_key_t *prevp, *keyp;
288
289 ASSERT(be16_to_cpu(block->bb_level) > 0);
290
291 prevp = NULL;
292 for( i = 1; i <= xfs_btree_get_numrecs(block); i++) {
293 dmxr = mp->m_bmap_dmxr[0];
294 keyp = XFS_BMBT_KEY_ADDR(mp, block, i);
295
296 if (prevp) {
297 ASSERT(be64_to_cpu(prevp->br_startoff) <
298 be64_to_cpu(keyp->br_startoff));
299 }
300 prevp = keyp;
301
302 /*
303 * Compare the block numbers to see if there are dups.
304 */
305 if (root)
306 pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, i, sz);
307 else
308 pp = XFS_BMBT_PTR_ADDR(mp, block, i, dmxr);
309
310 for (j = i+1; j <= be16_to_cpu(block->bb_numrecs); j++) {
311 if (root)
312 thispa = XFS_BMAP_BROOT_PTR_ADDR(mp, block, j, sz);
313 else
314 thispa = XFS_BMBT_PTR_ADDR(mp, block, j, dmxr);
315 if (*thispa == *pp) {
316 xfs_warn(mp, "%s: thispa(%d) == pp(%d) %Ld",
317 __func__, j, i,
318 (unsigned long long)be64_to_cpu(*thispa));
319 panic("%s: ptrs are equal in node\n",
320 __func__);
321 }
322 }
323 }
324 }
325
326 /*
327 * Check that the extents for the inode ip are in the right order in all
328 * btree leaves. THis becomes prohibitively expensive for large extent count
329 * files, so don't bother with inodes that have more than 10,000 extents in
330 * them. The btree record ordering checks will still be done, so for such large
331 * bmapbt constructs that is going to catch most corruptions.
332 */
333 STATIC void
334 xfs_bmap_check_leaf_extents(
335 xfs_btree_cur_t *cur, /* btree cursor or null */
336 xfs_inode_t *ip, /* incore inode pointer */
337 int whichfork) /* data or attr fork */
338 {
339 struct xfs_btree_block *block; /* current btree block */
340 xfs_fsblock_t bno; /* block # of "block" */
341 xfs_buf_t *bp; /* buffer for "block" */
342 int error; /* error return value */
343 xfs_extnum_t i=0, j; /* index into the extents list */
344 xfs_ifork_t *ifp; /* fork structure */
345 int level; /* btree level, for checking */
346 xfs_mount_t *mp; /* file system mount structure */
347 __be64 *pp; /* pointer to block address */
348 xfs_bmbt_rec_t *ep; /* pointer to current extent */
349 xfs_bmbt_rec_t last = {0, 0}; /* last extent in prev block */
350 xfs_bmbt_rec_t *nextp; /* pointer to next extent */
351 int bp_release = 0;
352
353 if (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE) {
354 return;
355 }
356
357 /* skip large extent count inodes */
358 if (ip->i_d.di_nextents > 10000)
359 return;
360
361 bno = NULLFSBLOCK;
362 mp = ip->i_mount;
363 ifp = XFS_IFORK_PTR(ip, whichfork);
364 block = ifp->if_broot;
365 /*
366 * Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out.
367 */
368 level = be16_to_cpu(block->bb_level);
369 ASSERT(level > 0);
370 xfs_check_block(block, mp, 1, ifp->if_broot_bytes);
371 pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, 1, ifp->if_broot_bytes);
372 bno = be64_to_cpu(*pp);
373
374 ASSERT(bno != NULLFSBLOCK);
375 ASSERT(XFS_FSB_TO_AGNO(mp, bno) < mp->m_sb.sb_agcount);
376 ASSERT(XFS_FSB_TO_AGBNO(mp, bno) < mp->m_sb.sb_agblocks);
377
378 /*
379 * Go down the tree until leaf level is reached, following the first
380 * pointer (leftmost) at each level.
381 */
382 while (level-- > 0) {
383 /* See if buf is in cur first */
384 bp_release = 0;
385 bp = xfs_bmap_get_bp(cur, XFS_FSB_TO_DADDR(mp, bno));
386 if (!bp) {
387 bp_release = 1;
388 error = xfs_btree_read_bufl(mp, NULL, bno, 0, &bp,
389 XFS_BMAP_BTREE_REF,
390 &xfs_bmbt_buf_ops);
391 if (error)
392 goto error_norelse;
393 }
394 block = XFS_BUF_TO_BLOCK(bp);
395 if (level == 0)
396 break;
397
398 /*
399 * Check this block for basic sanity (increasing keys and
400 * no duplicate blocks).
401 */
402
403 xfs_check_block(block, mp, 0, 0);
404 pp = XFS_BMBT_PTR_ADDR(mp, block, 1, mp->m_bmap_dmxr[1]);
405 bno = be64_to_cpu(*pp);
406 XFS_WANT_CORRUPTED_GOTO(mp,
407 XFS_FSB_SANITY_CHECK(mp, bno), error0);
408 if (bp_release) {
409 bp_release = 0;
410 xfs_trans_brelse(NULL, bp);
411 }
412 }
413
414 /*
415 * Here with bp and block set to the leftmost leaf node in the tree.
416 */
417 i = 0;
418
419 /*
420 * Loop over all leaf nodes checking that all extents are in the right order.
421 */
422 for (;;) {
423 xfs_fsblock_t nextbno;
424 xfs_extnum_t num_recs;
425
426
427 num_recs = xfs_btree_get_numrecs(block);
428
429 /*
430 * Read-ahead the next leaf block, if any.
431 */
432
433 nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib);
434
435 /*
436 * Check all the extents to make sure they are OK.
437 * If we had a previous block, the last entry should
438 * conform with the first entry in this one.
439 */
440
441 ep = XFS_BMBT_REC_ADDR(mp, block, 1);
442 if (i) {
443 ASSERT(xfs_bmbt_disk_get_startoff(&last) +
444 xfs_bmbt_disk_get_blockcount(&last) <=
445 xfs_bmbt_disk_get_startoff(ep));
446 }
447 for (j = 1; j < num_recs; j++) {
448 nextp = XFS_BMBT_REC_ADDR(mp, block, j + 1);
449 ASSERT(xfs_bmbt_disk_get_startoff(ep) +
450 xfs_bmbt_disk_get_blockcount(ep) <=
451 xfs_bmbt_disk_get_startoff(nextp));
452 ep = nextp;
453 }
454
455 last = *ep;
456 i += num_recs;
457 if (bp_release) {
458 bp_release = 0;
459 xfs_trans_brelse(NULL, bp);
460 }
461 bno = nextbno;
462 /*
463 * If we've reached the end, stop.
464 */
465 if (bno == NULLFSBLOCK)
466 break;
467
468 bp_release = 0;
469 bp = xfs_bmap_get_bp(cur, XFS_FSB_TO_DADDR(mp, bno));
470 if (!bp) {
471 bp_release = 1;
472 error = xfs_btree_read_bufl(mp, NULL, bno, 0, &bp,
473 XFS_BMAP_BTREE_REF,
474 &xfs_bmbt_buf_ops);
475 if (error)
476 goto error_norelse;
477 }
478 block = XFS_BUF_TO_BLOCK(bp);
479 }
480
481 return;
482
483 error0:
484 xfs_warn(mp, "%s: at error0", __func__);
485 if (bp_release)
486 xfs_trans_brelse(NULL, bp);
487 error_norelse:
488 xfs_warn(mp, "%s: BAD after btree leaves for %d extents",
489 __func__, i);
490 panic("%s: CORRUPTED BTREE OR SOMETHING", __func__);
491 return;
492 }
493
494 /*
495 * Add bmap trace insert entries for all the contents of the extent records.
496 */
497 void
498 xfs_bmap_trace_exlist(
499 xfs_inode_t *ip, /* incore inode pointer */
500 xfs_extnum_t cnt, /* count of entries in the list */
501 int whichfork, /* data or attr fork */
502 unsigned long caller_ip)
503 {
504 xfs_extnum_t idx; /* extent record index */
505 xfs_ifork_t *ifp; /* inode fork pointer */
506 int state = 0;
507
508 if (whichfork == XFS_ATTR_FORK)
509 state |= BMAP_ATTRFORK;
510
511 ifp = XFS_IFORK_PTR(ip, whichfork);
512 ASSERT(cnt == (ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t)));
513 for (idx = 0; idx < cnt; idx++)
514 trace_xfs_extlist(ip, idx, whichfork, caller_ip);
515 }
516
517 /*
518 * Validate that the bmbt_irecs being returned from bmapi are valid
519 * given the caller's original parameters. Specifically check the
520 * ranges of the returned irecs to ensure that they only extend beyond
521 * the given parameters if the XFS_BMAPI_ENTIRE flag was set.
522 */
523 STATIC void
524 xfs_bmap_validate_ret(
525 xfs_fileoff_t bno,
526 xfs_filblks_t len,
527 int flags,
528 xfs_bmbt_irec_t *mval,
529 int nmap,
530 int ret_nmap)
531 {
532 int i; /* index to map values */
533
534 ASSERT(ret_nmap <= nmap);
535
536 for (i = 0; i < ret_nmap; i++) {
537 ASSERT(mval[i].br_blockcount > 0);
538 if (!(flags & XFS_BMAPI_ENTIRE)) {
539 ASSERT(mval[i].br_startoff >= bno);
540 ASSERT(mval[i].br_blockcount <= len);
541 ASSERT(mval[i].br_startoff + mval[i].br_blockcount <=
542 bno + len);
543 } else {
544 ASSERT(mval[i].br_startoff < bno + len);
545 ASSERT(mval[i].br_startoff + mval[i].br_blockcount >
546 bno);
547 }
548 ASSERT(i == 0 ||
549 mval[i - 1].br_startoff + mval[i - 1].br_blockcount ==
550 mval[i].br_startoff);
551 ASSERT(mval[i].br_startblock != DELAYSTARTBLOCK &&
552 mval[i].br_startblock != HOLESTARTBLOCK);
553 ASSERT(mval[i].br_state == XFS_EXT_NORM ||
554 mval[i].br_state == XFS_EXT_UNWRITTEN);
555 }
556 }
557
558 #else
559 #define xfs_bmap_check_leaf_extents(cur, ip, whichfork) do { } while (0)
560 #define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
561 #endif /* DEBUG */
562
563 /*
564 * bmap free list manipulation functions
565 */
566
567 /*
568 * Add the extent to the list of extents to be free at transaction end.
569 * The list is maintained sorted (by block number).
570 */
571 void
572 xfs_bmap_add_free(
573 struct xfs_mount *mp, /* mount point structure */
574 struct xfs_bmap_free *flist, /* list of extents */
575 xfs_fsblock_t bno, /* fs block number of extent */
576 xfs_filblks_t len) /* length of extent */
577 {
578 struct xfs_bmap_free_item *new; /* new element */
579 #ifdef DEBUG
580 xfs_agnumber_t agno;
581 xfs_agblock_t agbno;
582
583 ASSERT(bno != NULLFSBLOCK);
584 ASSERT(len > 0);
585 ASSERT(len <= MAXEXTLEN);
586 ASSERT(!isnullstartblock(bno));
587 agno = XFS_FSB_TO_AGNO(mp, bno);
588 agbno = XFS_FSB_TO_AGBNO(mp, bno);
589 ASSERT(agno < mp->m_sb.sb_agcount);
590 ASSERT(agbno < mp->m_sb.sb_agblocks);
591 ASSERT(len < mp->m_sb.sb_agblocks);
592 ASSERT(agbno + len <= mp->m_sb.sb_agblocks);
593 #endif
594 ASSERT(xfs_bmap_free_item_zone != NULL);
595 new = kmem_zone_alloc(xfs_bmap_free_item_zone, KM_SLEEP);
596 new->xbfi_startblock = bno;
597 new->xbfi_blockcount = (xfs_extlen_t)len;
598 list_add(&new->xbfi_list, &flist->xbf_flist);
599 flist->xbf_count++;
600 }
601
602 /*
603 * Remove the entry "free" from the free item list. Prev points to the
604 * previous entry, unless "free" is the head of the list.
605 */
606 void
607 xfs_bmap_del_free(
608 struct xfs_bmap_free *flist, /* free item list header */
609 struct xfs_bmap_free_item *free) /* list item to be freed */
610 {
611 list_del(&free->xbfi_list);
612 flist->xbf_count--;
613 kmem_zone_free(xfs_bmap_free_item_zone, free);
614 }
615
616 /*
617 * Free up any items left in the list.
618 */
619 void
620 xfs_bmap_cancel(
621 struct xfs_bmap_free *flist) /* list of bmap_free_items */
622 {
623 struct xfs_bmap_free_item *free; /* free list item */
624
625 if (flist->xbf_count == 0)
626 return;
627 while (!list_empty(&flist->xbf_flist)) {
628 free = list_first_entry(&flist->xbf_flist,
629 struct xfs_bmap_free_item, xbfi_list);
630 xfs_bmap_del_free(flist, free);
631 }
632 ASSERT(flist->xbf_count == 0);
633 }
634
635 /*
636 * Inode fork format manipulation functions
637 */
638
639 /*
640 * Transform a btree format file with only one leaf node, where the
641 * extents list will fit in the inode, into an extents format file.
642 * Since the file extents are already in-core, all we have to do is
643 * give up the space for the btree root and pitch the leaf block.
644 */
645 STATIC int /* error */
646 xfs_bmap_btree_to_extents(
647 xfs_trans_t *tp, /* transaction pointer */
648 xfs_inode_t *ip, /* incore inode pointer */
649 xfs_btree_cur_t *cur, /* btree cursor */
650 int *logflagsp, /* inode logging flags */
651 int whichfork) /* data or attr fork */
652 {
653 /* REFERENCED */
654 struct xfs_btree_block *cblock;/* child btree block */
655 xfs_fsblock_t cbno; /* child block number */
656 xfs_buf_t *cbp; /* child block's buffer */
657 int error; /* error return value */
658 xfs_ifork_t *ifp; /* inode fork data */
659 xfs_mount_t *mp; /* mount point structure */
660 __be64 *pp; /* ptr to block address */
661 struct xfs_btree_block *rblock;/* root btree block */
662
663 mp = ip->i_mount;
664 ifp = XFS_IFORK_PTR(ip, whichfork);
665 ASSERT(ifp->if_flags & XFS_IFEXTENTS);
666 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE);
667 rblock = ifp->if_broot;
668 ASSERT(be16_to_cpu(rblock->bb_level) == 1);
669 ASSERT(be16_to_cpu(rblock->bb_numrecs) == 1);
670 ASSERT(xfs_bmbt_maxrecs(mp, ifp->if_broot_bytes, 0) == 1);
671 pp = XFS_BMAP_BROOT_PTR_ADDR(mp, rblock, 1, ifp->if_broot_bytes);
672 cbno = be64_to_cpu(*pp);
673 *logflagsp = 0;
674 #ifdef DEBUG
675 if ((error = xfs_btree_check_lptr(cur, cbno, 1)))
676 return error;
677 #endif
678 error = xfs_btree_read_bufl(mp, tp, cbno, 0, &cbp, XFS_BMAP_BTREE_REF,
679 &xfs_bmbt_buf_ops);
680 if (error)
681 return error;
682 cblock = XFS_BUF_TO_BLOCK(cbp);
683 if ((error = xfs_btree_check_block(cur, cblock, 0, cbp)))
684 return error;
685 xfs_bmap_add_free(mp, cur->bc_private.b.flist, cbno, 1);
686 ip->i_d.di_nblocks--;
687 xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, -1L);
688 xfs_trans_binval(tp, cbp);
689 if (cur->bc_bufs[0] == cbp)
690 cur->bc_bufs[0] = NULL;
691 xfs_iroot_realloc(ip, -1, whichfork);
692 ASSERT(ifp->if_broot == NULL);
693 ASSERT((ifp->if_flags & XFS_IFBROOT) == 0);
694 XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_EXTENTS);
695 *logflagsp = XFS_ILOG_CORE | xfs_ilog_fext(whichfork);
696 return 0;
697 }
698
699 /*
700 * Convert an extents-format file into a btree-format file.
701 * The new file will have a root block (in the inode) and a single child block.
702 */
703 STATIC int /* error */
704 xfs_bmap_extents_to_btree(
705 xfs_trans_t *tp, /* transaction pointer */
706 xfs_inode_t *ip, /* incore inode pointer */
707 xfs_fsblock_t *firstblock, /* first-block-allocated */
708 xfs_bmap_free_t *flist, /* blocks freed in xaction */
709 xfs_btree_cur_t **curp, /* cursor returned to caller */
710 int wasdel, /* converting a delayed alloc */
711 int *logflagsp, /* inode logging flags */
712 int whichfork) /* data or attr fork */
713 {
714 struct xfs_btree_block *ablock; /* allocated (child) bt block */
715 xfs_buf_t *abp; /* buffer for ablock */
716 xfs_alloc_arg_t args; /* allocation arguments */
717 xfs_bmbt_rec_t *arp; /* child record pointer */
718 struct xfs_btree_block *block; /* btree root block */
719 xfs_btree_cur_t *cur; /* bmap btree cursor */
720 xfs_bmbt_rec_host_t *ep; /* extent record pointer */
721 int error; /* error return value */
722 xfs_extnum_t i, cnt; /* extent record index */
723 xfs_ifork_t *ifp; /* inode fork pointer */
724 xfs_bmbt_key_t *kp; /* root block key pointer */
725 xfs_mount_t *mp; /* mount structure */
726 xfs_extnum_t nextents; /* number of file extents */
727 xfs_bmbt_ptr_t *pp; /* root block address pointer */
728
729 mp = ip->i_mount;
730 ifp = XFS_IFORK_PTR(ip, whichfork);
731 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS);
732
733 /*
734 * Make space in the inode incore.
735 */
736 xfs_iroot_realloc(ip, 1, whichfork);
737 ifp->if_flags |= XFS_IFBROOT;
738
739 /*
740 * Fill in the root.
741 */
742 block = ifp->if_broot;
743 if (xfs_sb_version_hascrc(&mp->m_sb))
744 xfs_btree_init_block_int(mp, block, XFS_BUF_DADDR_NULL,
745 XFS_BMAP_CRC_MAGIC, 1, 1, ip->i_ino,
746 XFS_BTREE_LONG_PTRS | XFS_BTREE_CRC_BLOCKS);
747 else
748 xfs_btree_init_block_int(mp, block, XFS_BUF_DADDR_NULL,
749 XFS_BMAP_MAGIC, 1, 1, ip->i_ino,
750 XFS_BTREE_LONG_PTRS);
751
752 /*
753 * Need a cursor. Can't allocate until bb_level is filled in.
754 */
755 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
756 cur->bc_private.b.firstblock = *firstblock;
757 cur->bc_private.b.flist = flist;
758 cur->bc_private.b.flags = wasdel ? XFS_BTCUR_BPRV_WASDEL : 0;
759 /*
760 * Convert to a btree with two levels, one record in root.
761 */
762 XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_BTREE);
763 memset(&args, 0, sizeof(args));
764 args.tp = tp;
765 args.mp = mp;
766 args.firstblock = *firstblock;
767 if (*firstblock == NULLFSBLOCK) {
768 args.type = XFS_ALLOCTYPE_START_BNO;
769 args.fsbno = XFS_INO_TO_FSB(mp, ip->i_ino);
770 } else if (flist->xbf_low) {
771 args.type = XFS_ALLOCTYPE_START_BNO;
772 args.fsbno = *firstblock;
773 } else {
774 args.type = XFS_ALLOCTYPE_NEAR_BNO;
775 args.fsbno = *firstblock;
776 }
777 args.minlen = args.maxlen = args.prod = 1;
778 args.wasdel = wasdel;
779 *logflagsp = 0;
780 if ((error = xfs_alloc_vextent(&args))) {
781 xfs_iroot_realloc(ip, -1, whichfork);
782 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
783 return error;
784 }
785 /*
786 * Allocation can't fail, the space was reserved.
787 */
788 ASSERT(args.fsbno != NULLFSBLOCK);
789 ASSERT(*firstblock == NULLFSBLOCK ||
790 args.agno == XFS_FSB_TO_AGNO(mp, *firstblock) ||
791 (flist->xbf_low &&
792 args.agno > XFS_FSB_TO_AGNO(mp, *firstblock)));
793 *firstblock = cur->bc_private.b.firstblock = args.fsbno;
794 cur->bc_private.b.allocated++;
795 ip->i_d.di_nblocks++;
796 xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, 1L);
797 abp = xfs_btree_get_bufl(mp, tp, args.fsbno, 0);
798 /*
799 * Fill in the child block.
800 */
801 abp->b_ops = &xfs_bmbt_buf_ops;
802 ablock = XFS_BUF_TO_BLOCK(abp);
803 if (xfs_sb_version_hascrc(&mp->m_sb))
804 xfs_btree_init_block_int(mp, ablock, abp->b_bn,
805 XFS_BMAP_CRC_MAGIC, 0, 0, ip->i_ino,
806 XFS_BTREE_LONG_PTRS | XFS_BTREE_CRC_BLOCKS);
807 else
808 xfs_btree_init_block_int(mp, ablock, abp->b_bn,
809 XFS_BMAP_MAGIC, 0, 0, ip->i_ino,
810 XFS_BTREE_LONG_PTRS);
811
812 arp = XFS_BMBT_REC_ADDR(mp, ablock, 1);
813 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
814 for (cnt = i = 0; i < nextents; i++) {
815 ep = xfs_iext_get_ext(ifp, i);
816 if (!isnullstartblock(xfs_bmbt_get_startblock(ep))) {
817 arp->l0 = cpu_to_be64(ep->l0);
818 arp->l1 = cpu_to_be64(ep->l1);
819 arp++; cnt++;
820 }
821 }
822 ASSERT(cnt == XFS_IFORK_NEXTENTS(ip, whichfork));
823 xfs_btree_set_numrecs(ablock, cnt);
824
825 /*
826 * Fill in the root key and pointer.
827 */
828 kp = XFS_BMBT_KEY_ADDR(mp, block, 1);
829 arp = XFS_BMBT_REC_ADDR(mp, ablock, 1);
830 kp->br_startoff = cpu_to_be64(xfs_bmbt_disk_get_startoff(arp));
831 pp = XFS_BMBT_PTR_ADDR(mp, block, 1, xfs_bmbt_get_maxrecs(cur,
832 be16_to_cpu(block->bb_level)));
833 *pp = cpu_to_be64(args.fsbno);
834
835 /*
836 * Do all this logging at the end so that
837 * the root is at the right level.
838 */
839 xfs_btree_log_block(cur, abp, XFS_BB_ALL_BITS);
840 xfs_btree_log_recs(cur, abp, 1, be16_to_cpu(ablock->bb_numrecs));
841 ASSERT(*curp == NULL);
842 *curp = cur;
843 *logflagsp = XFS_ILOG_CORE | xfs_ilog_fbroot(whichfork);
844 return 0;
845 }
846
847 /*
848 * Convert a local file to an extents file.
849 * This code is out of bounds for data forks of regular files,
850 * since the file data needs to get logged so things will stay consistent.
851 * (The bmap-level manipulations are ok, though).
852 */
853 void
854 xfs_bmap_local_to_extents_empty(
855 struct xfs_inode *ip,
856 int whichfork)
857 {
858 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
859
860 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL);
861 ASSERT(ifp->if_bytes == 0);
862 ASSERT(XFS_IFORK_NEXTENTS(ip, whichfork) == 0);
863
864 xfs_bmap_forkoff_reset(ip, whichfork);
865 ifp->if_flags &= ~XFS_IFINLINE;
866 ifp->if_flags |= XFS_IFEXTENTS;
867 XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_EXTENTS);
868 }
869
870
871 STATIC int /* error */
872 xfs_bmap_local_to_extents(
873 xfs_trans_t *tp, /* transaction pointer */
874 xfs_inode_t *ip, /* incore inode pointer */
875 xfs_fsblock_t *firstblock, /* first block allocated in xaction */
876 xfs_extlen_t total, /* total blocks needed by transaction */
877 int *logflagsp, /* inode logging flags */
878 int whichfork,
879 void (*init_fn)(struct xfs_trans *tp,
880 struct xfs_buf *bp,
881 struct xfs_inode *ip,
882 struct xfs_ifork *ifp))
883 {
884 int error = 0;
885 int flags; /* logging flags returned */
886 xfs_ifork_t *ifp; /* inode fork pointer */
887 xfs_alloc_arg_t args; /* allocation arguments */
888 xfs_buf_t *bp; /* buffer for extent block */
889 xfs_bmbt_rec_host_t *ep; /* extent record pointer */
890
891 /*
892 * We don't want to deal with the case of keeping inode data inline yet.
893 * So sending the data fork of a regular inode is invalid.
894 */
895 ASSERT(!(S_ISREG(VFS_I(ip)->i_mode) && whichfork == XFS_DATA_FORK));
896 ifp = XFS_IFORK_PTR(ip, whichfork);
897 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL);
898
899 if (!ifp->if_bytes) {
900 xfs_bmap_local_to_extents_empty(ip, whichfork);
901 flags = XFS_ILOG_CORE;
902 goto done;
903 }
904
905 flags = 0;
906 error = 0;
907 ASSERT((ifp->if_flags & (XFS_IFINLINE|XFS_IFEXTENTS|XFS_IFEXTIREC)) ==
908 XFS_IFINLINE);
909 memset(&args, 0, sizeof(args));
910 args.tp = tp;
911 args.mp = ip->i_mount;
912 args.firstblock = *firstblock;
913 /*
914 * Allocate a block. We know we need only one, since the
915 * file currently fits in an inode.
916 */
917 if (*firstblock == NULLFSBLOCK) {
918 args.fsbno = XFS_INO_TO_FSB(args.mp, ip->i_ino);
919 args.type = XFS_ALLOCTYPE_START_BNO;
920 } else {
921 args.fsbno = *firstblock;
922 args.type = XFS_ALLOCTYPE_NEAR_BNO;
923 }
924 args.total = total;
925 args.minlen = args.maxlen = args.prod = 1;
926 error = xfs_alloc_vextent(&args);
927 if (error)
928 goto done;
929
930 /* Can't fail, the space was reserved. */
931 ASSERT(args.fsbno != NULLFSBLOCK);
932 ASSERT(args.len == 1);
933 *firstblock = args.fsbno;
934 bp = xfs_btree_get_bufl(args.mp, tp, args.fsbno, 0);
935
936 /*
937 * Initialize the block, copy the data and log the remote buffer.
938 *
939 * The callout is responsible for logging because the remote format
940 * might differ from the local format and thus we don't know how much to
941 * log here. Note that init_fn must also set the buffer log item type
942 * correctly.
943 */
944 init_fn(tp, bp, ip, ifp);
945
946 /* account for the change in fork size */
947 xfs_idata_realloc(ip, -ifp->if_bytes, whichfork);
948 xfs_bmap_local_to_extents_empty(ip, whichfork);
949 flags |= XFS_ILOG_CORE;
950
951 xfs_iext_add(ifp, 0, 1);
952 ep = xfs_iext_get_ext(ifp, 0);
953 xfs_bmbt_set_allf(ep, 0, args.fsbno, 1, XFS_EXT_NORM);
954 trace_xfs_bmap_post_update(ip, 0,
955 whichfork == XFS_ATTR_FORK ? BMAP_ATTRFORK : 0,
956 _THIS_IP_);
957 XFS_IFORK_NEXT_SET(ip, whichfork, 1);
958 ip->i_d.di_nblocks = 1;
959 xfs_trans_mod_dquot_byino(tp, ip,
960 XFS_TRANS_DQ_BCOUNT, 1L);
961 flags |= xfs_ilog_fext(whichfork);
962
963 done:
964 *logflagsp = flags;
965 return error;
966 }
967
968 /*
969 * Called from xfs_bmap_add_attrfork to handle btree format files.
970 */
971 STATIC int /* error */
972 xfs_bmap_add_attrfork_btree(
973 xfs_trans_t *tp, /* transaction pointer */
974 xfs_inode_t *ip, /* incore inode pointer */
975 xfs_fsblock_t *firstblock, /* first block allocated */
976 xfs_bmap_free_t *flist, /* blocks to free at commit */
977 int *flags) /* inode logging flags */
978 {
979 xfs_btree_cur_t *cur; /* btree cursor */
980 int error; /* error return value */
981 xfs_mount_t *mp; /* file system mount struct */
982 int stat; /* newroot status */
983
984 mp = ip->i_mount;
985 if (ip->i_df.if_broot_bytes <= XFS_IFORK_DSIZE(ip))
986 *flags |= XFS_ILOG_DBROOT;
987 else {
988 cur = xfs_bmbt_init_cursor(mp, tp, ip, XFS_DATA_FORK);
989 cur->bc_private.b.flist = flist;
990 cur->bc_private.b.firstblock = *firstblock;
991 if ((error = xfs_bmbt_lookup_ge(cur, 0, 0, 0, &stat)))
992 goto error0;
993 /* must be at least one entry */
994 XFS_WANT_CORRUPTED_GOTO(mp, stat == 1, error0);
995 if ((error = xfs_btree_new_iroot(cur, flags, &stat)))
996 goto error0;
997 if (stat == 0) {
998 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
999 return -ENOSPC;
1000 }
1001 *firstblock = cur->bc_private.b.firstblock;
1002 cur->bc_private.b.allocated = 0;
1003 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
1004 }
1005 return 0;
1006 error0:
1007 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
1008 return error;
1009 }
1010
1011 /*
1012 * Called from xfs_bmap_add_attrfork to handle extents format files.
1013 */
1014 STATIC int /* error */
1015 xfs_bmap_add_attrfork_extents(
1016 xfs_trans_t *tp, /* transaction pointer */
1017 xfs_inode_t *ip, /* incore inode pointer */
1018 xfs_fsblock_t *firstblock, /* first block allocated */
1019 xfs_bmap_free_t *flist, /* blocks to free at commit */
1020 int *flags) /* inode logging flags */
1021 {
1022 xfs_btree_cur_t *cur; /* bmap btree cursor */
1023 int error; /* error return value */
1024
1025 if (ip->i_d.di_nextents * sizeof(xfs_bmbt_rec_t) <= XFS_IFORK_DSIZE(ip))
1026 return 0;
1027 cur = NULL;
1028 error = xfs_bmap_extents_to_btree(tp, ip, firstblock, flist, &cur, 0,
1029 flags, XFS_DATA_FORK);
1030 if (cur) {
1031 cur->bc_private.b.allocated = 0;
1032 xfs_btree_del_cursor(cur,
1033 error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
1034 }
1035 return error;
1036 }
1037
1038 /*
1039 * Called from xfs_bmap_add_attrfork to handle local format files. Each
1040 * different data fork content type needs a different callout to do the
1041 * conversion. Some are basic and only require special block initialisation
1042 * callouts for the data formating, others (directories) are so specialised they
1043 * handle everything themselves.
1044 *
1045 * XXX (dgc): investigate whether directory conversion can use the generic
1046 * formatting callout. It should be possible - it's just a very complex
1047 * formatter.
1048 */
1049 STATIC int /* error */
1050 xfs_bmap_add_attrfork_local(
1051 xfs_trans_t *tp, /* transaction pointer */
1052 xfs_inode_t *ip, /* incore inode pointer */
1053 xfs_fsblock_t *firstblock, /* first block allocated */
1054 xfs_bmap_free_t *flist, /* blocks to free at commit */
1055 int *flags) /* inode logging flags */
1056 {
1057 xfs_da_args_t dargs; /* args for dir/attr code */
1058
1059 if (ip->i_df.if_bytes <= XFS_IFORK_DSIZE(ip))
1060 return 0;
1061
1062 if (S_ISDIR(VFS_I(ip)->i_mode)) {
1063 memset(&dargs, 0, sizeof(dargs));
1064 dargs.geo = ip->i_mount->m_dir_geo;
1065 dargs.dp = ip;
1066 dargs.firstblock = firstblock;
1067 dargs.flist = flist;
1068 dargs.total = dargs.geo->fsbcount;
1069 dargs.whichfork = XFS_DATA_FORK;
1070 dargs.trans = tp;
1071 return xfs_dir2_sf_to_block(&dargs);
1072 }
1073
1074 if (S_ISLNK(VFS_I(ip)->i_mode))
1075 return xfs_bmap_local_to_extents(tp, ip, firstblock, 1,
1076 flags, XFS_DATA_FORK,
1077 xfs_symlink_local_to_remote);
1078
1079 /* should only be called for types that support local format data */
1080 ASSERT(0);
1081 return -EFSCORRUPTED;
1082 }
1083
1084 /*
1085 * Convert inode from non-attributed to attributed.
1086 * Must not be in a transaction, ip must not be locked.
1087 */
1088 int /* error code */
1089 xfs_bmap_add_attrfork(
1090 xfs_inode_t *ip, /* incore inode pointer */
1091 int size, /* space new attribute needs */
1092 int rsvd) /* xact may use reserved blks */
1093 {
1094 xfs_fsblock_t firstblock; /* 1st block/ag allocated */
1095 xfs_bmap_free_t flist; /* freed extent records */
1096 xfs_mount_t *mp; /* mount structure */
1097 xfs_trans_t *tp; /* transaction pointer */
1098 int blks; /* space reservation */
1099 int version = 1; /* superblock attr version */
1100 int logflags; /* logging flags */
1101 int error; /* error return value */
1102
1103 ASSERT(XFS_IFORK_Q(ip) == 0);
1104
1105 mp = ip->i_mount;
1106 ASSERT(!XFS_NOT_DQATTACHED(mp, ip));
1107
1108 blks = XFS_ADDAFORK_SPACE_RES(mp);
1109
1110 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_addafork, blks, 0,
1111 rsvd ? XFS_TRANS_RESERVE : 0, &tp);
1112 if (error)
1113 return error;
1114
1115 xfs_ilock(ip, XFS_ILOCK_EXCL);
1116 error = xfs_trans_reserve_quota_nblks(tp, ip, blks, 0, rsvd ?
1117 XFS_QMOPT_RES_REGBLKS | XFS_QMOPT_FORCE_RES :
1118 XFS_QMOPT_RES_REGBLKS);
1119 if (error)
1120 goto trans_cancel;
1121 if (XFS_IFORK_Q(ip))
1122 goto trans_cancel;
1123 if (ip->i_d.di_aformat != XFS_DINODE_FMT_EXTENTS) {
1124 /*
1125 * For inodes coming from pre-6.2 filesystems.
1126 */
1127 ASSERT(ip->i_d.di_aformat == 0);
1128 ip->i_d.di_aformat = XFS_DINODE_FMT_EXTENTS;
1129 }
1130 ASSERT(ip->i_d.di_anextents == 0);
1131
1132 xfs_trans_ijoin(tp, ip, 0);
1133 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1134
1135 switch (ip->i_d.di_format) {
1136 case XFS_DINODE_FMT_DEV:
1137 ip->i_d.di_forkoff = roundup(sizeof(xfs_dev_t), 8) >> 3;
1138 break;
1139 case XFS_DINODE_FMT_UUID:
1140 ip->i_d.di_forkoff = roundup(sizeof(uuid_t), 8) >> 3;
1141 break;
1142 case XFS_DINODE_FMT_LOCAL:
1143 case XFS_DINODE_FMT_EXTENTS:
1144 case XFS_DINODE_FMT_BTREE:
1145 ip->i_d.di_forkoff = xfs_attr_shortform_bytesfit(ip, size);
1146 if (!ip->i_d.di_forkoff)
1147 ip->i_d.di_forkoff = xfs_default_attroffset(ip) >> 3;
1148 else if (mp->m_flags & XFS_MOUNT_ATTR2)
1149 version = 2;
1150 break;
1151 default:
1152 ASSERT(0);
1153 error = -EINVAL;
1154 goto trans_cancel;
1155 }
1156
1157 ASSERT(ip->i_afp == NULL);
1158 ip->i_afp = kmem_zone_zalloc(xfs_ifork_zone, KM_SLEEP);
1159 ip->i_afp->if_flags = XFS_IFEXTENTS;
1160 logflags = 0;
1161 xfs_bmap_init(&flist, &firstblock);
1162 switch (ip->i_d.di_format) {
1163 case XFS_DINODE_FMT_LOCAL:
1164 error = xfs_bmap_add_attrfork_local(tp, ip, &firstblock, &flist,
1165 &logflags);
1166 break;
1167 case XFS_DINODE_FMT_EXTENTS:
1168 error = xfs_bmap_add_attrfork_extents(tp, ip, &firstblock,
1169 &flist, &logflags);
1170 break;
1171 case XFS_DINODE_FMT_BTREE:
1172 error = xfs_bmap_add_attrfork_btree(tp, ip, &firstblock, &flist,
1173 &logflags);
1174 break;
1175 default:
1176 error = 0;
1177 break;
1178 }
1179 if (logflags)
1180 xfs_trans_log_inode(tp, ip, logflags);
1181 if (error)
1182 goto bmap_cancel;
1183 if (!xfs_sb_version_hasattr(&mp->m_sb) ||
1184 (!xfs_sb_version_hasattr2(&mp->m_sb) && version == 2)) {
1185 bool log_sb = false;
1186
1187 spin_lock(&mp->m_sb_lock);
1188 if (!xfs_sb_version_hasattr(&mp->m_sb)) {
1189 xfs_sb_version_addattr(&mp->m_sb);
1190 log_sb = true;
1191 }
1192 if (!xfs_sb_version_hasattr2(&mp->m_sb) && version == 2) {
1193 xfs_sb_version_addattr2(&mp->m_sb);
1194 log_sb = true;
1195 }
1196 spin_unlock(&mp->m_sb_lock);
1197 if (log_sb)
1198 xfs_log_sb(tp);
1199 }
1200
1201 error = xfs_bmap_finish(&tp, &flist, NULL);
1202 if (error)
1203 goto bmap_cancel;
1204 error = xfs_trans_commit(tp);
1205 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1206 return error;
1207
1208 bmap_cancel:
1209 xfs_bmap_cancel(&flist);
1210 trans_cancel:
1211 xfs_trans_cancel(tp);
1212 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1213 return error;
1214 }
1215
1216 /*
1217 * Internal and external extent tree search functions.
1218 */
1219
1220 /*
1221 * Read in the extents to if_extents.
1222 * All inode fields are set up by caller, we just traverse the btree
1223 * and copy the records in. If the file system cannot contain unwritten
1224 * extents, the records are checked for no "state" flags.
1225 */
1226 int /* error */
1227 xfs_bmap_read_extents(
1228 xfs_trans_t *tp, /* transaction pointer */
1229 xfs_inode_t *ip, /* incore inode */
1230 int whichfork) /* data or attr fork */
1231 {
1232 struct xfs_btree_block *block; /* current btree block */
1233 xfs_fsblock_t bno; /* block # of "block" */
1234 xfs_buf_t *bp; /* buffer for "block" */
1235 int error; /* error return value */
1236 xfs_exntfmt_t exntf; /* XFS_EXTFMT_NOSTATE, if checking */
1237 xfs_extnum_t i, j; /* index into the extents list */
1238 xfs_ifork_t *ifp; /* fork structure */
1239 int level; /* btree level, for checking */
1240 xfs_mount_t *mp; /* file system mount structure */
1241 __be64 *pp; /* pointer to block address */
1242 /* REFERENCED */
1243 xfs_extnum_t room; /* number of entries there's room for */
1244
1245 bno = NULLFSBLOCK;
1246 mp = ip->i_mount;
1247 ifp = XFS_IFORK_PTR(ip, whichfork);
1248 exntf = (whichfork != XFS_DATA_FORK) ? XFS_EXTFMT_NOSTATE :
1249 XFS_EXTFMT_INODE(ip);
1250 block = ifp->if_broot;
1251 /*
1252 * Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out.
1253 */
1254 level = be16_to_cpu(block->bb_level);
1255 ASSERT(level > 0);
1256 pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, 1, ifp->if_broot_bytes);
1257 bno = be64_to_cpu(*pp);
1258 ASSERT(bno != NULLFSBLOCK);
1259 ASSERT(XFS_FSB_TO_AGNO(mp, bno) < mp->m_sb.sb_agcount);
1260 ASSERT(XFS_FSB_TO_AGBNO(mp, bno) < mp->m_sb.sb_agblocks);
1261 /*
1262 * Go down the tree until leaf level is reached, following the first
1263 * pointer (leftmost) at each level.
1264 */
1265 while (level-- > 0) {
1266 error = xfs_btree_read_bufl(mp, tp, bno, 0, &bp,
1267 XFS_BMAP_BTREE_REF, &xfs_bmbt_buf_ops);
1268 if (error)
1269 return error;
1270 block = XFS_BUF_TO_BLOCK(bp);
1271 if (level == 0)
1272 break;
1273 pp = XFS_BMBT_PTR_ADDR(mp, block, 1, mp->m_bmap_dmxr[1]);
1274 bno = be64_to_cpu(*pp);
1275 XFS_WANT_CORRUPTED_GOTO(mp,
1276 XFS_FSB_SANITY_CHECK(mp, bno), error0);
1277 xfs_trans_brelse(tp, bp);
1278 }
1279 /*
1280 * Here with bp and block set to the leftmost leaf node in the tree.
1281 */
1282 room = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
1283 i = 0;
1284 /*
1285 * Loop over all leaf nodes. Copy information to the extent records.
1286 */
1287 for (;;) {
1288 xfs_bmbt_rec_t *frp;
1289 xfs_fsblock_t nextbno;
1290 xfs_extnum_t num_recs;
1291 xfs_extnum_t start;
1292
1293 num_recs = xfs_btree_get_numrecs(block);
1294 if (unlikely(i + num_recs > room)) {
1295 ASSERT(i + num_recs <= room);
1296 xfs_warn(ip->i_mount,
1297 "corrupt dinode %Lu, (btree extents).",
1298 (unsigned long long) ip->i_ino);
1299 XFS_CORRUPTION_ERROR("xfs_bmap_read_extents(1)",
1300 XFS_ERRLEVEL_LOW, ip->i_mount, block);
1301 goto error0;
1302 }
1303 /*
1304 * Read-ahead the next leaf block, if any.
1305 */
1306 nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib);
1307 if (nextbno != NULLFSBLOCK)
1308 xfs_btree_reada_bufl(mp, nextbno, 1,
1309 &xfs_bmbt_buf_ops);
1310 /*
1311 * Copy records into the extent records.
1312 */
1313 frp = XFS_BMBT_REC_ADDR(mp, block, 1);
1314 start = i;
1315 for (j = 0; j < num_recs; j++, i++, frp++) {
1316 xfs_bmbt_rec_host_t *trp = xfs_iext_get_ext(ifp, i);
1317 trp->l0 = be64_to_cpu(frp->l0);
1318 trp->l1 = be64_to_cpu(frp->l1);
1319 }
1320 if (exntf == XFS_EXTFMT_NOSTATE) {
1321 /*
1322 * Check all attribute bmap btree records and
1323 * any "older" data bmap btree records for a
1324 * set bit in the "extent flag" position.
1325 */
1326 if (unlikely(xfs_check_nostate_extents(ifp,
1327 start, num_recs))) {
1328 XFS_ERROR_REPORT("xfs_bmap_read_extents(2)",
1329 XFS_ERRLEVEL_LOW,
1330 ip->i_mount);
1331 goto error0;
1332 }
1333 }
1334 xfs_trans_brelse(tp, bp);
1335 bno = nextbno;
1336 /*
1337 * If we've reached the end, stop.
1338 */
1339 if (bno == NULLFSBLOCK)
1340 break;
1341 error = xfs_btree_read_bufl(mp, tp, bno, 0, &bp,
1342 XFS_BMAP_BTREE_REF, &xfs_bmbt_buf_ops);
1343 if (error)
1344 return error;
1345 block = XFS_BUF_TO_BLOCK(bp);
1346 }
1347 ASSERT(i == (ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t)));
1348 ASSERT(i == XFS_IFORK_NEXTENTS(ip, whichfork));
1349 XFS_BMAP_TRACE_EXLIST(ip, i, whichfork);
1350 return 0;
1351 error0:
1352 xfs_trans_brelse(tp, bp);
1353 return -EFSCORRUPTED;
1354 }
1355
1356
1357 /*
1358 * Search the extent records for the entry containing block bno.
1359 * If bno lies in a hole, point to the next entry. If bno lies
1360 * past eof, *eofp will be set, and *prevp will contain the last
1361 * entry (null if none). Else, *lastxp will be set to the index
1362 * of the found entry; *gotp will contain the entry.
1363 */
1364 STATIC xfs_bmbt_rec_host_t * /* pointer to found extent entry */
1365 xfs_bmap_search_multi_extents(
1366 xfs_ifork_t *ifp, /* inode fork pointer */
1367 xfs_fileoff_t bno, /* block number searched for */
1368 int *eofp, /* out: end of file found */
1369 xfs_extnum_t *lastxp, /* out: last extent index */
1370 xfs_bmbt_irec_t *gotp, /* out: extent entry found */
1371 xfs_bmbt_irec_t *prevp) /* out: previous extent entry found */
1372 {
1373 xfs_bmbt_rec_host_t *ep; /* extent record pointer */
1374 xfs_extnum_t lastx; /* last extent index */
1375
1376 /*
1377 * Initialize the extent entry structure to catch access to
1378 * uninitialized br_startblock field.
1379 */
1380 gotp->br_startoff = 0xffa5a5a5a5a5a5a5LL;
1381 gotp->br_blockcount = 0xa55a5a5a5a5a5a5aLL;
1382 gotp->br_state = XFS_EXT_INVALID;
1383 gotp->br_startblock = 0xffffa5a5a5a5a5a5LL;
1384 prevp->br_startoff = NULLFILEOFF;
1385
1386 ep = xfs_iext_bno_to_ext(ifp, bno, &lastx);
1387 if (lastx > 0) {
1388 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, lastx - 1), prevp);
1389 }
1390 if (lastx < (ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t))) {
1391 xfs_bmbt_get_all(ep, gotp);
1392 *eofp = 0;
1393 } else {
1394 if (lastx > 0) {
1395 *gotp = *prevp;
1396 }
1397 *eofp = 1;
1398 ep = NULL;
1399 }
1400 *lastxp = lastx;
1401 return ep;
1402 }
1403
1404 /*
1405 * Search the extents list for the inode, for the extent containing bno.
1406 * If bno lies in a hole, point to the next entry. If bno lies past eof,
1407 * *eofp will be set, and *prevp will contain the last entry (null if none).
1408 * Else, *lastxp will be set to the index of the found
1409 * entry; *gotp will contain the entry.
1410 */
1411 STATIC xfs_bmbt_rec_host_t * /* pointer to found extent entry */
1412 xfs_bmap_search_extents(
1413 xfs_inode_t *ip, /* incore inode pointer */
1414 xfs_fileoff_t bno, /* block number searched for */
1415 int fork, /* data or attr fork */
1416 int *eofp, /* out: end of file found */
1417 xfs_extnum_t *lastxp, /* out: last extent index */
1418 xfs_bmbt_irec_t *gotp, /* out: extent entry found */
1419 xfs_bmbt_irec_t *prevp) /* out: previous extent entry found */
1420 {
1421 xfs_ifork_t *ifp; /* inode fork pointer */
1422 xfs_bmbt_rec_host_t *ep; /* extent record pointer */
1423
1424 XFS_STATS_INC(ip->i_mount, xs_look_exlist);
1425 ifp = XFS_IFORK_PTR(ip, fork);
1426
1427 ep = xfs_bmap_search_multi_extents(ifp, bno, eofp, lastxp, gotp, prevp);
1428
1429 if (unlikely(!(gotp->br_startblock) && (*lastxp != NULLEXTNUM) &&
1430 !(XFS_IS_REALTIME_INODE(ip) && fork == XFS_DATA_FORK))) {
1431 xfs_alert_tag(ip->i_mount, XFS_PTAG_FSBLOCK_ZERO,
1432 "Access to block zero in inode %llu "
1433 "start_block: %llx start_off: %llx "
1434 "blkcnt: %llx extent-state: %x lastx: %x",
1435 (unsigned long long)ip->i_ino,
1436 (unsigned long long)gotp->br_startblock,
1437 (unsigned long long)gotp->br_startoff,
1438 (unsigned long long)gotp->br_blockcount,
1439 gotp->br_state, *lastxp);
1440 *lastxp = NULLEXTNUM;
1441 *eofp = 1;
1442 return NULL;
1443 }
1444 return ep;
1445 }
1446
1447 /*
1448 * Returns the file-relative block number of the first unused block(s)
1449 * in the file with at least "len" logically contiguous blocks free.
1450 * This is the lowest-address hole if the file has holes, else the first block
1451 * past the end of file.
1452 * Return 0 if the file is currently local (in-inode).
1453 */
1454 int /* error */
1455 xfs_bmap_first_unused(
1456 xfs_trans_t *tp, /* transaction pointer */
1457 xfs_inode_t *ip, /* incore inode */
1458 xfs_extlen_t len, /* size of hole to find */
1459 xfs_fileoff_t *first_unused, /* unused block */
1460 int whichfork) /* data or attr fork */
1461 {
1462 int error; /* error return value */
1463 int idx; /* extent record index */
1464 xfs_ifork_t *ifp; /* inode fork pointer */
1465 xfs_fileoff_t lastaddr; /* last block number seen */
1466 xfs_fileoff_t lowest; /* lowest useful block */
1467 xfs_fileoff_t max; /* starting useful block */
1468 xfs_fileoff_t off; /* offset for this block */
1469 xfs_extnum_t nextents; /* number of extent entries */
1470
1471 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE ||
1472 XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS ||
1473 XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL);
1474 if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL) {
1475 *first_unused = 0;
1476 return 0;
1477 }
1478 ifp = XFS_IFORK_PTR(ip, whichfork);
1479 if (!(ifp->if_flags & XFS_IFEXTENTS) &&
1480 (error = xfs_iread_extents(tp, ip, whichfork)))
1481 return error;
1482 lowest = *first_unused;
1483 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
1484 for (idx = 0, lastaddr = 0, max = lowest; idx < nextents; idx++) {
1485 xfs_bmbt_rec_host_t *ep = xfs_iext_get_ext(ifp, idx);
1486 off = xfs_bmbt_get_startoff(ep);
1487 /*
1488 * See if the hole before this extent will work.
1489 */
1490 if (off >= lowest + len && off - max >= len) {
1491 *first_unused = max;
1492 return 0;
1493 }
1494 lastaddr = off + xfs_bmbt_get_blockcount(ep);
1495 max = XFS_FILEOFF_MAX(lastaddr, lowest);
1496 }
1497 *first_unused = max;
1498 return 0;
1499 }
1500
1501 /*
1502 * Returns the file-relative block number of the last block - 1 before
1503 * last_block (input value) in the file.
1504 * This is not based on i_size, it is based on the extent records.
1505 * Returns 0 for local files, as they do not have extent records.
1506 */
1507 int /* error */
1508 xfs_bmap_last_before(
1509 xfs_trans_t *tp, /* transaction pointer */
1510 xfs_inode_t *ip, /* incore inode */
1511 xfs_fileoff_t *last_block, /* last block */
1512 int whichfork) /* data or attr fork */
1513 {
1514 xfs_fileoff_t bno; /* input file offset */
1515 int eof; /* hit end of file */
1516 xfs_bmbt_rec_host_t *ep; /* pointer to last extent */
1517 int error; /* error return value */
1518 xfs_bmbt_irec_t got; /* current extent value */
1519 xfs_ifork_t *ifp; /* inode fork pointer */
1520 xfs_extnum_t lastx; /* last extent used */
1521 xfs_bmbt_irec_t prev; /* previous extent value */
1522
1523 if (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE &&
1524 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS &&
1525 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_LOCAL)
1526 return -EIO;
1527 if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL) {
1528 *last_block = 0;
1529 return 0;
1530 }
1531 ifp = XFS_IFORK_PTR(ip, whichfork);
1532 if (!(ifp->if_flags & XFS_IFEXTENTS) &&
1533 (error = xfs_iread_extents(tp, ip, whichfork)))
1534 return error;
1535 bno = *last_block - 1;
1536 ep = xfs_bmap_search_extents(ip, bno, whichfork, &eof, &lastx, &got,
1537 &prev);
1538 if (eof || xfs_bmbt_get_startoff(ep) > bno) {
1539 if (prev.br_startoff == NULLFILEOFF)
1540 *last_block = 0;
1541 else
1542 *last_block = prev.br_startoff + prev.br_blockcount;
1543 }
1544 /*
1545 * Otherwise *last_block is already the right answer.
1546 */
1547 return 0;
1548 }
1549
1550 int
1551 xfs_bmap_last_extent(
1552 struct xfs_trans *tp,
1553 struct xfs_inode *ip,
1554 int whichfork,
1555 struct xfs_bmbt_irec *rec,
1556 int *is_empty)
1557 {
1558 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
1559 int error;
1560 int nextents;
1561
1562 if (!(ifp->if_flags & XFS_IFEXTENTS)) {
1563 error = xfs_iread_extents(tp, ip, whichfork);
1564 if (error)
1565 return error;
1566 }
1567
1568 nextents = ifp->if_bytes / sizeof(xfs_bmbt_rec_t);
1569 if (nextents == 0) {
1570 *is_empty = 1;
1571 return 0;
1572 }
1573
1574 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, nextents - 1), rec);
1575 *is_empty = 0;
1576 return 0;
1577 }
1578
1579 /*
1580 * Check the last inode extent to determine whether this allocation will result
1581 * in blocks being allocated at the end of the file. When we allocate new data
1582 * blocks at the end of the file which do not start at the previous data block,
1583 * we will try to align the new blocks at stripe unit boundaries.
1584 *
1585 * Returns 1 in bma->aeof if the file (fork) is empty as any new write will be
1586 * at, or past the EOF.
1587 */
1588 STATIC int
1589 xfs_bmap_isaeof(
1590 struct xfs_bmalloca *bma,
1591 int whichfork)
1592 {
1593 struct xfs_bmbt_irec rec;
1594 int is_empty;
1595 int error;
1596
1597 bma->aeof = 0;
1598 error = xfs_bmap_last_extent(NULL, bma->ip, whichfork, &rec,
1599 &is_empty);
1600 if (error)
1601 return error;
1602
1603 if (is_empty) {
1604 bma->aeof = 1;
1605 return 0;
1606 }
1607
1608 /*
1609 * Check if we are allocation or past the last extent, or at least into
1610 * the last delayed allocated extent.
1611 */
1612 bma->aeof = bma->offset >= rec.br_startoff + rec.br_blockcount ||
1613 (bma->offset >= rec.br_startoff &&
1614 isnullstartblock(rec.br_startblock));
1615 return 0;
1616 }
1617
1618 /*
1619 * Returns the file-relative block number of the first block past eof in
1620 * the file. This is not based on i_size, it is based on the extent records.
1621 * Returns 0 for local files, as they do not have extent records.
1622 */
1623 int
1624 xfs_bmap_last_offset(
1625 struct xfs_inode *ip,
1626 xfs_fileoff_t *last_block,
1627 int whichfork)
1628 {
1629 struct xfs_bmbt_irec rec;
1630 int is_empty;
1631 int error;
1632
1633 *last_block = 0;
1634
1635 if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL)
1636 return 0;
1637
1638 if (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE &&
1639 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS)
1640 return -EIO;
1641
1642 error = xfs_bmap_last_extent(NULL, ip, whichfork, &rec, &is_empty);
1643 if (error || is_empty)
1644 return error;
1645
1646 *last_block = rec.br_startoff + rec.br_blockcount;
1647 return 0;
1648 }
1649
1650 /*
1651 * Returns whether the selected fork of the inode has exactly one
1652 * block or not. For the data fork we check this matches di_size,
1653 * implying the file's range is 0..bsize-1.
1654 */
1655 int /* 1=>1 block, 0=>otherwise */
1656 xfs_bmap_one_block(
1657 xfs_inode_t *ip, /* incore inode */
1658 int whichfork) /* data or attr fork */
1659 {
1660 xfs_bmbt_rec_host_t *ep; /* ptr to fork's extent */
1661 xfs_ifork_t *ifp; /* inode fork pointer */
1662 int rval; /* return value */
1663 xfs_bmbt_irec_t s; /* internal version of extent */
1664
1665 #ifndef DEBUG
1666 if (whichfork == XFS_DATA_FORK)
1667 return XFS_ISIZE(ip) == ip->i_mount->m_sb.sb_blocksize;
1668 #endif /* !DEBUG */
1669 if (XFS_IFORK_NEXTENTS(ip, whichfork) != 1)
1670 return 0;
1671 if (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS)
1672 return 0;
1673 ifp = XFS_IFORK_PTR(ip, whichfork);
1674 ASSERT(ifp->if_flags & XFS_IFEXTENTS);
1675 ep = xfs_iext_get_ext(ifp, 0);
1676 xfs_bmbt_get_all(ep, &s);
1677 rval = s.br_startoff == 0 && s.br_blockcount == 1;
1678 if (rval && whichfork == XFS_DATA_FORK)
1679 ASSERT(XFS_ISIZE(ip) == ip->i_mount->m_sb.sb_blocksize);
1680 return rval;
1681 }
1682
1683 /*
1684 * Extent tree manipulation functions used during allocation.
1685 */
1686
1687 /*
1688 * Convert a delayed allocation to a real allocation.
1689 */
1690 STATIC int /* error */
1691 xfs_bmap_add_extent_delay_real(
1692 struct xfs_bmalloca *bma)
1693 {
1694 struct xfs_bmbt_irec *new = &bma->got;
1695 int diff; /* temp value */
1696 xfs_bmbt_rec_host_t *ep; /* extent entry for idx */
1697 int error; /* error return value */
1698 int i; /* temp state */
1699 xfs_ifork_t *ifp; /* inode fork pointer */
1700 xfs_fileoff_t new_endoff; /* end offset of new entry */
1701 xfs_bmbt_irec_t r[3]; /* neighbor extent entries */
1702 /* left is 0, right is 1, prev is 2 */
1703 int rval=0; /* return value (logging flags) */
1704 int state = 0;/* state bits, accessed thru macros */
1705 xfs_filblks_t da_new; /* new count del alloc blocks used */
1706 xfs_filblks_t da_old; /* old count del alloc blocks used */
1707 xfs_filblks_t temp=0; /* value for da_new calculations */
1708 xfs_filblks_t temp2=0;/* value for da_new calculations */
1709 int tmp_rval; /* partial logging flags */
1710 int whichfork = XFS_DATA_FORK;
1711 struct xfs_mount *mp;
1712
1713 mp = bma->ip->i_mount;
1714 ifp = XFS_IFORK_PTR(bma->ip, whichfork);
1715
1716 ASSERT(bma->idx >= 0);
1717 ASSERT(bma->idx <= ifp->if_bytes / sizeof(struct xfs_bmbt_rec));
1718 ASSERT(!isnullstartblock(new->br_startblock));
1719 ASSERT(!bma->cur ||
1720 (bma->cur->bc_private.b.flags & XFS_BTCUR_BPRV_WASDEL));
1721
1722 XFS_STATS_INC(mp, xs_add_exlist);
1723
1724 #define LEFT r[0]
1725 #define RIGHT r[1]
1726 #define PREV r[2]
1727
1728 /*
1729 * Set up a bunch of variables to make the tests simpler.
1730 */
1731 ep = xfs_iext_get_ext(ifp, bma->idx);
1732 xfs_bmbt_get_all(ep, &PREV);
1733 new_endoff = new->br_startoff + new->br_blockcount;
1734 ASSERT(PREV.br_startoff <= new->br_startoff);
1735 ASSERT(PREV.br_startoff + PREV.br_blockcount >= new_endoff);
1736
1737 da_old = startblockval(PREV.br_startblock);
1738 da_new = 0;
1739
1740 /*
1741 * Set flags determining what part of the previous delayed allocation
1742 * extent is being replaced by a real allocation.
1743 */
1744 if (PREV.br_startoff == new->br_startoff)
1745 state |= BMAP_LEFT_FILLING;
1746 if (PREV.br_startoff + PREV.br_blockcount == new_endoff)
1747 state |= BMAP_RIGHT_FILLING;
1748
1749 /*
1750 * Check and set flags if this segment has a left neighbor.
1751 * Don't set contiguous if the combined extent would be too large.
1752 */
1753 if (bma->idx > 0) {
1754 state |= BMAP_LEFT_VALID;
1755 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, bma->idx - 1), &LEFT);
1756
1757 if (isnullstartblock(LEFT.br_startblock))
1758 state |= BMAP_LEFT_DELAY;
1759 }
1760
1761 if ((state & BMAP_LEFT_VALID) && !(state & BMAP_LEFT_DELAY) &&
1762 LEFT.br_startoff + LEFT.br_blockcount == new->br_startoff &&
1763 LEFT.br_startblock + LEFT.br_blockcount == new->br_startblock &&
1764 LEFT.br_state == new->br_state &&
1765 LEFT.br_blockcount + new->br_blockcount <= MAXEXTLEN)
1766 state |= BMAP_LEFT_CONTIG;
1767
1768 /*
1769 * Check and set flags if this segment has a right neighbor.
1770 * Don't set contiguous if the combined extent would be too large.
1771 * Also check for all-three-contiguous being too large.
1772 */
1773 if (bma->idx < ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t) - 1) {
1774 state |= BMAP_RIGHT_VALID;
1775 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, bma->idx + 1), &RIGHT);
1776
1777 if (isnullstartblock(RIGHT.br_startblock))
1778 state |= BMAP_RIGHT_DELAY;
1779 }
1780
1781 if ((state & BMAP_RIGHT_VALID) && !(state & BMAP_RIGHT_DELAY) &&
1782 new_endoff == RIGHT.br_startoff &&
1783 new->br_startblock + new->br_blockcount == RIGHT.br_startblock &&
1784 new->br_state == RIGHT.br_state &&
1785 new->br_blockcount + RIGHT.br_blockcount <= MAXEXTLEN &&
1786 ((state & (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING |
1787 BMAP_RIGHT_FILLING)) !=
1788 (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING |
1789 BMAP_RIGHT_FILLING) ||
1790 LEFT.br_blockcount + new->br_blockcount + RIGHT.br_blockcount
1791 <= MAXEXTLEN))
1792 state |= BMAP_RIGHT_CONTIG;
1793
1794 error = 0;
1795 /*
1796 * Switch out based on the FILLING and CONTIG state bits.
1797 */
1798 switch (state & (BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG |
1799 BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG)) {
1800 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG |
1801 BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
1802 /*
1803 * Filling in all of a previously delayed allocation extent.
1804 * The left and right neighbors are both contiguous with new.
1805 */
1806 bma->idx--;
1807 trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);
1808 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, bma->idx),
1809 LEFT.br_blockcount + PREV.br_blockcount +
1810 RIGHT.br_blockcount);
1811 trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
1812
1813 xfs_iext_remove(bma->ip, bma->idx + 1, 2, state);
1814 bma->ip->i_d.di_nextents--;
1815 if (bma->cur == NULL)
1816 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1817 else {
1818 rval = XFS_ILOG_CORE;
1819 error = xfs_bmbt_lookup_eq(bma->cur, RIGHT.br_startoff,
1820 RIGHT.br_startblock,
1821 RIGHT.br_blockcount, &i);
1822 if (error)
1823 goto done;
1824 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
1825 error = xfs_btree_delete(bma->cur, &i);
1826 if (error)
1827 goto done;
1828 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
1829 error = xfs_btree_decrement(bma->cur, 0, &i);
1830 if (error)
1831 goto done;
1832 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
1833 error = xfs_bmbt_update(bma->cur, LEFT.br_startoff,
1834 LEFT.br_startblock,
1835 LEFT.br_blockcount +
1836 PREV.br_blockcount +
1837 RIGHT.br_blockcount, LEFT.br_state);
1838 if (error)
1839 goto done;
1840 }
1841 break;
1842
1843 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG:
1844 /*
1845 * Filling in all of a previously delayed allocation extent.
1846 * The left neighbor is contiguous, the right is not.
1847 */
1848 bma->idx--;
1849
1850 trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);
1851 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, bma->idx),
1852 LEFT.br_blockcount + PREV.br_blockcount);
1853 trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
1854
1855 xfs_iext_remove(bma->ip, bma->idx + 1, 1, state);
1856 if (bma->cur == NULL)
1857 rval = XFS_ILOG_DEXT;
1858 else {
1859 rval = 0;
1860 error = xfs_bmbt_lookup_eq(bma->cur, LEFT.br_startoff,
1861 LEFT.br_startblock, LEFT.br_blockcount,
1862 &i);
1863 if (error)
1864 goto done;
1865 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
1866 error = xfs_bmbt_update(bma->cur, LEFT.br_startoff,
1867 LEFT.br_startblock,
1868 LEFT.br_blockcount +
1869 PREV.br_blockcount, LEFT.br_state);
1870 if (error)
1871 goto done;
1872 }
1873 break;
1874
1875 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
1876 /*
1877 * Filling in all of a previously delayed allocation extent.
1878 * The right neighbor is contiguous, the left is not.
1879 */
1880 trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);
1881 xfs_bmbt_set_startblock(ep, new->br_startblock);
1882 xfs_bmbt_set_blockcount(ep,
1883 PREV.br_blockcount + RIGHT.br_blockcount);
1884 trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
1885
1886 xfs_iext_remove(bma->ip, bma->idx + 1, 1, state);
1887 if (bma->cur == NULL)
1888 rval = XFS_ILOG_DEXT;
1889 else {
1890 rval = 0;
1891 error = xfs_bmbt_lookup_eq(bma->cur, RIGHT.br_startoff,
1892 RIGHT.br_startblock,
1893 RIGHT.br_blockcount, &i);
1894 if (error)
1895 goto done;
1896 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
1897 error = xfs_bmbt_update(bma->cur, PREV.br_startoff,
1898 new->br_startblock,
1899 PREV.br_blockcount +
1900 RIGHT.br_blockcount, PREV.br_state);
1901 if (error)
1902 goto done;
1903 }
1904 break;
1905
1906 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING:
1907 /*
1908 * Filling in all of a previously delayed allocation extent.
1909 * Neither the left nor right neighbors are contiguous with
1910 * the new one.
1911 */
1912 trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);
1913 xfs_bmbt_set_startblock(ep, new->br_startblock);
1914 trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
1915
1916 bma->ip->i_d.di_nextents++;
1917 if (bma->cur == NULL)
1918 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1919 else {
1920 rval = XFS_ILOG_CORE;
1921 error = xfs_bmbt_lookup_eq(bma->cur, new->br_startoff,
1922 new->br_startblock, new->br_blockcount,
1923 &i);
1924 if (error)
1925 goto done;
1926 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done);
1927 bma->cur->bc_rec.b.br_state = XFS_EXT_NORM;
1928 error = xfs_btree_insert(bma->cur, &i);
1929 if (error)
1930 goto done;
1931 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
1932 }
1933 break;
1934
1935 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG:
1936 /*
1937 * Filling in the first part of a previous delayed allocation.
1938 * The left neighbor is contiguous.
1939 */
1940 trace_xfs_bmap_pre_update(bma->ip, bma->idx - 1, state, _THIS_IP_);
1941 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, bma->idx - 1),
1942 LEFT.br_blockcount + new->br_blockcount);
1943 xfs_bmbt_set_startoff(ep,
1944 PREV.br_startoff + new->br_blockcount);
1945 trace_xfs_bmap_post_update(bma->ip, bma->idx - 1, state, _THIS_IP_);
1946
1947 temp = PREV.br_blockcount - new->br_blockcount;
1948 trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);
1949 xfs_bmbt_set_blockcount(ep, temp);
1950 if (bma->cur == NULL)
1951 rval = XFS_ILOG_DEXT;
1952 else {
1953 rval = 0;
1954 error = xfs_bmbt_lookup_eq(bma->cur, LEFT.br_startoff,
1955 LEFT.br_startblock, LEFT.br_blockcount,
1956 &i);
1957 if (error)
1958 goto done;
1959 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
1960 error = xfs_bmbt_update(bma->cur, LEFT.br_startoff,
1961 LEFT.br_startblock,
1962 LEFT.br_blockcount +
1963 new->br_blockcount,
1964 LEFT.br_state);
1965 if (error)
1966 goto done;
1967 }
1968 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp),
1969 startblockval(PREV.br_startblock));
1970 xfs_bmbt_set_startblock(ep, nullstartblock(da_new));
1971 trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
1972
1973 bma->idx--;
1974 break;
1975
1976 case BMAP_LEFT_FILLING:
1977 /*
1978 * Filling in the first part of a previous delayed allocation.
1979 * The left neighbor is not contiguous.
1980 */
1981 trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);
1982 xfs_bmbt_set_startoff(ep, new_endoff);
1983 temp = PREV.br_blockcount - new->br_blockcount;
1984 xfs_bmbt_set_blockcount(ep, temp);
1985 xfs_iext_insert(bma->ip, bma->idx, 1, new, state);
1986 bma->ip->i_d.di_nextents++;
1987 if (bma->cur == NULL)
1988 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1989 else {
1990 rval = XFS_ILOG_CORE;
1991 error = xfs_bmbt_lookup_eq(bma->cur, new->br_startoff,
1992 new->br_startblock, new->br_blockcount,
1993 &i);
1994 if (error)
1995 goto done;
1996 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done);
1997 bma->cur->bc_rec.b.br_state = XFS_EXT_NORM;
1998 error = xfs_btree_insert(bma->cur, &i);
1999 if (error)
2000 goto done;
2001 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2002 }
2003
2004 if (xfs_bmap_needs_btree(bma->ip, whichfork)) {
2005 error = xfs_bmap_extents_to_btree(bma->tp, bma->ip,
2006 bma->firstblock, bma->flist,
2007 &bma->cur, 1, &tmp_rval, whichfork);
2008 rval |= tmp_rval;
2009 if (error)
2010 goto done;
2011 }
2012 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp),
2013 startblockval(PREV.br_startblock) -
2014 (bma->cur ? bma->cur->bc_private.b.allocated : 0));
2015 ep = xfs_iext_get_ext(ifp, bma->idx + 1);
2016 xfs_bmbt_set_startblock(ep, nullstartblock(da_new));
2017 trace_xfs_bmap_post_update(bma->ip, bma->idx + 1, state, _THIS_IP_);
2018 break;
2019
2020 case BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
2021 /*
2022 * Filling in the last part of a previous delayed allocation.
2023 * The right neighbor is contiguous with the new allocation.
2024 */
2025 temp = PREV.br_blockcount - new->br_blockcount;
2026 trace_xfs_bmap_pre_update(bma->ip, bma->idx + 1, state, _THIS_IP_);
2027 xfs_bmbt_set_blockcount(ep, temp);
2028 xfs_bmbt_set_allf(xfs_iext_get_ext(ifp, bma->idx + 1),
2029 new->br_startoff, new->br_startblock,
2030 new->br_blockcount + RIGHT.br_blockcount,
2031 RIGHT.br_state);
2032 trace_xfs_bmap_post_update(bma->ip, bma->idx + 1, state, _THIS_IP_);
2033 if (bma->cur == NULL)
2034 rval = XFS_ILOG_DEXT;
2035 else {
2036 rval = 0;
2037 error = xfs_bmbt_lookup_eq(bma->cur, RIGHT.br_startoff,
2038 RIGHT.br_startblock,
2039 RIGHT.br_blockcount, &i);
2040 if (error)
2041 goto done;
2042 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2043 error = xfs_bmbt_update(bma->cur, new->br_startoff,
2044 new->br_startblock,
2045 new->br_blockcount +
2046 RIGHT.br_blockcount,
2047 RIGHT.br_state);
2048 if (error)
2049 goto done;
2050 }
2051
2052 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp),
2053 startblockval(PREV.br_startblock));
2054 trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);
2055 xfs_bmbt_set_startblock(ep, nullstartblock(da_new));
2056 trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
2057
2058 bma->idx++;
2059 break;
2060
2061 case BMAP_RIGHT_FILLING:
2062 /*
2063 * Filling in the last part of a previous delayed allocation.
2064 * The right neighbor is not contiguous.
2065 */
2066 temp = PREV.br_blockcount - new->br_blockcount;
2067 trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);
2068 xfs_bmbt_set_blockcount(ep, temp);
2069 xfs_iext_insert(bma->ip, bma->idx + 1, 1, new, state);
2070 bma->ip->i_d.di_nextents++;
2071 if (bma->cur == NULL)
2072 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
2073 else {
2074 rval = XFS_ILOG_CORE;
2075 error = xfs_bmbt_lookup_eq(bma->cur, new->br_startoff,
2076 new->br_startblock, new->br_blockcount,
2077 &i);
2078 if (error)
2079 goto done;
2080 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done);
2081 bma->cur->bc_rec.b.br_state = XFS_EXT_NORM;
2082 error = xfs_btree_insert(bma->cur, &i);
2083 if (error)
2084 goto done;
2085 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2086 }
2087
2088 if (xfs_bmap_needs_btree(bma->ip, whichfork)) {
2089 error = xfs_bmap_extents_to_btree(bma->tp, bma->ip,
2090 bma->firstblock, bma->flist, &bma->cur, 1,
2091 &tmp_rval, whichfork);
2092 rval |= tmp_rval;
2093 if (error)
2094 goto done;
2095 }
2096 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp),
2097 startblockval(PREV.br_startblock) -
2098 (bma->cur ? bma->cur->bc_private.b.allocated : 0));
2099 ep = xfs_iext_get_ext(ifp, bma->idx);
2100 xfs_bmbt_set_startblock(ep, nullstartblock(da_new));
2101 trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
2102
2103 bma->idx++;
2104 break;
2105
2106 case 0:
2107 /*
2108 * Filling in the middle part of a previous delayed allocation.
2109 * Contiguity is impossible here.
2110 * This case is avoided almost all the time.
2111 *
2112 * We start with a delayed allocation:
2113 *
2114 * +ddddddddddddddddddddddddddddddddddddddddddddddddddddddd+
2115 * PREV @ idx
2116 *
2117 * and we are allocating:
2118 * +rrrrrrrrrrrrrrrrr+
2119 * new
2120 *
2121 * and we set it up for insertion as:
2122 * +ddddddddddddddddddd+rrrrrrrrrrrrrrrrr+ddddddddddddddddd+
2123 * new
2124 * PREV @ idx LEFT RIGHT
2125 * inserted at idx + 1
2126 */
2127 temp = new->br_startoff - PREV.br_startoff;
2128 temp2 = PREV.br_startoff + PREV.br_blockcount - new_endoff;
2129 trace_xfs_bmap_pre_update(bma->ip, bma->idx, 0, _THIS_IP_);
2130 xfs_bmbt_set_blockcount(ep, temp); /* truncate PREV */
2131 LEFT = *new;
2132 RIGHT.br_state = PREV.br_state;
2133 RIGHT.br_startblock = nullstartblock(
2134 (int)xfs_bmap_worst_indlen(bma->ip, temp2));
2135 RIGHT.br_startoff = new_endoff;
2136 RIGHT.br_blockcount = temp2;
2137 /* insert LEFT (r[0]) and RIGHT (r[1]) at the same time */
2138 xfs_iext_insert(bma->ip, bma->idx + 1, 2, &LEFT, state);
2139 bma->ip->i_d.di_nextents++;
2140 if (bma->cur == NULL)
2141 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
2142 else {
2143 rval = XFS_ILOG_CORE;
2144 error = xfs_bmbt_lookup_eq(bma->cur, new->br_startoff,
2145 new->br_startblock, new->br_blockcount,
2146 &i);
2147 if (error)
2148 goto done;
2149 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done);
2150 bma->cur->bc_rec.b.br_state = XFS_EXT_NORM;
2151 error = xfs_btree_insert(bma->cur, &i);
2152 if (error)
2153 goto done;
2154 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2155 }
2156
2157 if (xfs_bmap_needs_btree(bma->ip, whichfork)) {
2158 error = xfs_bmap_extents_to_btree(bma->tp, bma->ip,
2159 bma->firstblock, bma->flist, &bma->cur,
2160 1, &tmp_rval, whichfork);
2161 rval |= tmp_rval;
2162 if (error)
2163 goto done;
2164 }
2165 temp = xfs_bmap_worst_indlen(bma->ip, temp);
2166 temp2 = xfs_bmap_worst_indlen(bma->ip, temp2);
2167 diff = (int)(temp + temp2 - startblockval(PREV.br_startblock) -
2168 (bma->cur ? bma->cur->bc_private.b.allocated : 0));
2169 if (diff > 0) {
2170 error = xfs_mod_fdblocks(bma->ip->i_mount,
2171 -((int64_t)diff), false);
2172 ASSERT(!error);
2173 if (error)
2174 goto done;
2175 }
2176
2177 ep = xfs_iext_get_ext(ifp, bma->idx);
2178 xfs_bmbt_set_startblock(ep, nullstartblock((int)temp));
2179 trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
2180 trace_xfs_bmap_pre_update(bma->ip, bma->idx + 2, state, _THIS_IP_);
2181 xfs_bmbt_set_startblock(xfs_iext_get_ext(ifp, bma->idx + 2),
2182 nullstartblock((int)temp2));
2183 trace_xfs_bmap_post_update(bma->ip, bma->idx + 2, state, _THIS_IP_);
2184
2185 bma->idx++;
2186 da_new = temp + temp2;
2187 break;
2188
2189 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
2190 case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
2191 case BMAP_LEFT_FILLING | BMAP_RIGHT_CONTIG:
2192 case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG:
2193 case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
2194 case BMAP_LEFT_CONTIG:
2195 case BMAP_RIGHT_CONTIG:
2196 /*
2197 * These cases are all impossible.
2198 */
2199 ASSERT(0);
2200 }
2201
2202 /* convert to a btree if necessary */
2203 if (xfs_bmap_needs_btree(bma->ip, whichfork)) {
2204 int tmp_logflags; /* partial log flag return val */
2205
2206 ASSERT(bma->cur == NULL);
2207 error = xfs_bmap_extents_to_btree(bma->tp, bma->ip,
2208 bma->firstblock, bma->flist, &bma->cur,
2209 da_old > 0, &tmp_logflags, whichfork);
2210 bma->logflags |= tmp_logflags;
2211 if (error)
2212 goto done;
2213 }
2214
2215 /* adjust for changes in reserved delayed indirect blocks */
2216 if (da_old || da_new) {
2217 temp = da_new;
2218 if (bma->cur)
2219 temp += bma->cur->bc_private.b.allocated;
2220 ASSERT(temp <= da_old);
2221 if (temp < da_old)
2222 xfs_mod_fdblocks(bma->ip->i_mount,
2223 (int64_t)(da_old - temp), false);
2224 }
2225
2226 /* clear out the allocated field, done with it now in any case. */
2227 if (bma->cur)
2228 bma->cur->bc_private.b.allocated = 0;
2229
2230 xfs_bmap_check_leaf_extents(bma->cur, bma->ip, whichfork);
2231 done:
2232 bma->logflags |= rval;
2233 return error;
2234 #undef LEFT
2235 #undef RIGHT
2236 #undef PREV
2237 }
2238
2239 /*
2240 * Convert an unwritten allocation to a real allocation or vice versa.
2241 */
2242 STATIC int /* error */
2243 xfs_bmap_add_extent_unwritten_real(
2244 struct xfs_trans *tp,
2245 xfs_inode_t *ip, /* incore inode pointer */
2246 xfs_extnum_t *idx, /* extent number to update/insert */
2247 xfs_btree_cur_t **curp, /* if *curp is null, not a btree */
2248 xfs_bmbt_irec_t *new, /* new data to add to file extents */
2249 xfs_fsblock_t *first, /* pointer to firstblock variable */
2250 xfs_bmap_free_t *flist, /* list of extents to be freed */
2251 int *logflagsp) /* inode logging flags */
2252 {
2253 xfs_btree_cur_t *cur; /* btree cursor */
2254 xfs_bmbt_rec_host_t *ep; /* extent entry for idx */
2255 int error; /* error return value */
2256 int i; /* temp state */
2257 xfs_ifork_t *ifp; /* inode fork pointer */
2258 xfs_fileoff_t new_endoff; /* end offset of new entry */
2259 xfs_exntst_t newext; /* new extent state */
2260 xfs_exntst_t oldext; /* old extent state */
2261 xfs_bmbt_irec_t r[3]; /* neighbor extent entries */
2262 /* left is 0, right is 1, prev is 2 */
2263 int rval=0; /* return value (logging flags) */
2264 int state = 0;/* state bits, accessed thru macros */
2265 struct xfs_mount *mp = tp->t_mountp;
2266
2267 *logflagsp = 0;
2268
2269 cur = *curp;
2270 ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
2271
2272 ASSERT(*idx >= 0);
2273 ASSERT(*idx <= ifp->if_bytes / sizeof(struct xfs_bmbt_rec));
2274 ASSERT(!isnullstartblock(new->br_startblock));
2275
2276 XFS_STATS_INC(mp, xs_add_exlist);
2277
2278 #define LEFT r[0]
2279 #define RIGHT r[1]
2280 #define PREV r[2]
2281
2282 /*
2283 * Set up a bunch of variables to make the tests simpler.
2284 */
2285 error = 0;
2286 ep = xfs_iext_get_ext(ifp, *idx);
2287 xfs_bmbt_get_all(ep, &PREV);
2288 newext = new->br_state;
2289 oldext = (newext == XFS_EXT_UNWRITTEN) ?
2290 XFS_EXT_NORM : XFS_EXT_UNWRITTEN;
2291 ASSERT(PREV.br_state == oldext);
2292 new_endoff = new->br_startoff + new->br_blockcount;
2293 ASSERT(PREV.br_startoff <= new->br_startoff);
2294 ASSERT(PREV.br_startoff + PREV.br_blockcount >= new_endoff);
2295
2296 /*
2297 * Set flags determining what part of the previous oldext allocation
2298 * extent is being replaced by a newext allocation.
2299 */
2300 if (PREV.br_startoff == new->br_startoff)
2301 state |= BMAP_LEFT_FILLING;
2302 if (PREV.br_startoff + PREV.br_blockcount == new_endoff)
2303 state |= BMAP_RIGHT_FILLING;
2304
2305 /*
2306 * Check and set flags if this segment has a left neighbor.
2307 * Don't set contiguous if the combined extent would be too large.
2308 */
2309 if (*idx > 0) {
2310 state |= BMAP_LEFT_VALID;
2311 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, *idx - 1), &LEFT);
2312
2313 if (isnullstartblock(LEFT.br_startblock))
2314 state |= BMAP_LEFT_DELAY;
2315 }
2316
2317 if ((state & BMAP_LEFT_VALID) && !(state & BMAP_LEFT_DELAY) &&
2318 LEFT.br_startoff + LEFT.br_blockcount == new->br_startoff &&
2319 LEFT.br_startblock + LEFT.br_blockcount == new->br_startblock &&
2320 LEFT.br_state == newext &&
2321 LEFT.br_blockcount + new->br_blockcount <= MAXEXTLEN)
2322 state |= BMAP_LEFT_CONTIG;
2323
2324 /*
2325 * Check and set flags if this segment has a right neighbor.
2326 * Don't set contiguous if the combined extent would be too large.
2327 * Also check for all-three-contiguous being too large.
2328 */
2329 if (*idx < ip->i_df.if_bytes / (uint)sizeof(xfs_bmbt_rec_t) - 1) {
2330 state |= BMAP_RIGHT_VALID;
2331 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, *idx + 1), &RIGHT);
2332 if (isnullstartblock(RIGHT.br_startblock))
2333 state |= BMAP_RIGHT_DELAY;
2334 }
2335
2336 if ((state & BMAP_RIGHT_VALID) && !(state & BMAP_RIGHT_DELAY) &&
2337 new_endoff == RIGHT.br_startoff &&
2338 new->br_startblock + new->br_blockcount == RIGHT.br_startblock &&
2339 newext == RIGHT.br_state &&
2340 new->br_blockcount + RIGHT.br_blockcount <= MAXEXTLEN &&
2341 ((state & (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING |
2342 BMAP_RIGHT_FILLING)) !=
2343 (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING |
2344 BMAP_RIGHT_FILLING) ||
2345 LEFT.br_blockcount + new->br_blockcount + RIGHT.br_blockcount
2346 <= MAXEXTLEN))
2347 state |= BMAP_RIGHT_CONTIG;
2348
2349 /*
2350 * Switch out based on the FILLING and CONTIG state bits.
2351 */
2352 switch (state & (BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG |
2353 BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG)) {
2354 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG |
2355 BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
2356 /*
2357 * Setting all of a previous oldext extent to newext.
2358 * The left and right neighbors are both contiguous with new.
2359 */
2360 --*idx;
2361
2362 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
2363 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, *idx),
2364 LEFT.br_blockcount + PREV.br_blockcount +
2365 RIGHT.br_blockcount);
2366 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
2367
2368 xfs_iext_remove(ip, *idx + 1, 2, state);
2369 ip->i_d.di_nextents -= 2;
2370 if (cur == NULL)
2371 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
2372 else {
2373 rval = XFS_ILOG_CORE;
2374 if ((error = xfs_bmbt_lookup_eq(cur, RIGHT.br_startoff,
2375 RIGHT.br_startblock,
2376 RIGHT.br_blockcount, &i)))
2377 goto done;
2378 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2379 if ((error = xfs_btree_delete(cur, &i)))
2380 goto done;
2381 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2382 if ((error = xfs_btree_decrement(cur, 0, &i)))
2383 goto done;
2384 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2385 if ((error = xfs_btree_delete(cur, &i)))
2386 goto done;
2387 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2388 if ((error = xfs_btree_decrement(cur, 0, &i)))
2389 goto done;
2390 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2391 if ((error = xfs_bmbt_update(cur, LEFT.br_startoff,
2392 LEFT.br_startblock,
2393 LEFT.br_blockcount + PREV.br_blockcount +
2394 RIGHT.br_blockcount, LEFT.br_state)))
2395 goto done;
2396 }
2397 break;
2398
2399 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG:
2400 /*
2401 * Setting all of a previous oldext extent to newext.
2402 * The left neighbor is contiguous, the right is not.
2403 */
2404 --*idx;
2405
2406 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
2407 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, *idx),
2408 LEFT.br_blockcount + PREV.br_blockcount);
2409 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
2410
2411 xfs_iext_remove(ip, *idx + 1, 1, state);
2412 ip->i_d.di_nextents--;
2413 if (cur == NULL)
2414 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
2415 else {
2416 rval = XFS_ILOG_CORE;
2417 if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff,
2418 PREV.br_startblock, PREV.br_blockcount,
2419 &i)))
2420 goto done;
2421 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2422 if ((error = xfs_btree_delete(cur, &i)))
2423 goto done;
2424 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2425 if ((error = xfs_btree_decrement(cur, 0, &i)))
2426 goto done;
2427 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2428 if ((error = xfs_bmbt_update(cur, LEFT.br_startoff,
2429 LEFT.br_startblock,
2430 LEFT.br_blockcount + PREV.br_blockcount,
2431 LEFT.br_state)))
2432 goto done;
2433 }
2434 break;
2435
2436 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
2437 /*
2438 * Setting all of a previous oldext extent to newext.
2439 * The right neighbor is contiguous, the left is not.
2440 */
2441 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
2442 xfs_bmbt_set_blockcount(ep,
2443 PREV.br_blockcount + RIGHT.br_blockcount);
2444 xfs_bmbt_set_state(ep, newext);
2445 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
2446 xfs_iext_remove(ip, *idx + 1, 1, state);
2447 ip->i_d.di_nextents--;
2448 if (cur == NULL)
2449 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
2450 else {
2451 rval = XFS_ILOG_CORE;
2452 if ((error = xfs_bmbt_lookup_eq(cur, RIGHT.br_startoff,
2453 RIGHT.br_startblock,
2454 RIGHT.br_blockcount, &i)))
2455 goto done;
2456 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2457 if ((error = xfs_btree_delete(cur, &i)))
2458 goto done;
2459 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2460 if ((error = xfs_btree_decrement(cur, 0, &i)))
2461 goto done;
2462 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2463 if ((error = xfs_bmbt_update(cur, new->br_startoff,
2464 new->br_startblock,
2465 new->br_blockcount + RIGHT.br_blockcount,
2466 newext)))
2467 goto done;
2468 }
2469 break;
2470
2471 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING:
2472 /*
2473 * Setting all of a previous oldext extent to newext.
2474 * Neither the left nor right neighbors are contiguous with
2475 * the new one.
2476 */
2477 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
2478 xfs_bmbt_set_state(ep, newext);
2479 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
2480
2481 if (cur == NULL)
2482 rval = XFS_ILOG_DEXT;
2483 else {
2484 rval = 0;
2485 if ((error = xfs_bmbt_lookup_eq(cur, new->br_startoff,
2486 new->br_startblock, new->br_blockcount,
2487 &i)))
2488 goto done;
2489 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2490 if ((error = xfs_bmbt_update(cur, new->br_startoff,
2491 new->br_startblock, new->br_blockcount,
2492 newext)))
2493 goto done;
2494 }
2495 break;
2496
2497 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG:
2498 /*
2499 * Setting the first part of a previous oldext extent to newext.
2500 * The left neighbor is contiguous.
2501 */
2502 trace_xfs_bmap_pre_update(ip, *idx - 1, state, _THIS_IP_);
2503 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, *idx - 1),
2504 LEFT.br_blockcount + new->br_blockcount);
2505 xfs_bmbt_set_startoff(ep,
2506 PREV.br_startoff + new->br_blockcount);
2507 trace_xfs_bmap_post_update(ip, *idx - 1, state, _THIS_IP_);
2508
2509 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
2510 xfs_bmbt_set_startblock(ep,
2511 new->br_startblock + new->br_blockcount);
2512 xfs_bmbt_set_blockcount(ep,
2513 PREV.br_blockcount - new->br_blockcount);
2514 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
2515
2516 --*idx;
2517
2518 if (cur == NULL)
2519 rval = XFS_ILOG_DEXT;
2520 else {
2521 rval = 0;
2522 if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff,
2523 PREV.br_startblock, PREV.br_blockcount,
2524 &i)))
2525 goto done;
2526 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2527 if ((error = xfs_bmbt_update(cur,
2528 PREV.br_startoff + new->br_blockcount,
2529 PREV.br_startblock + new->br_blockcount,
2530 PREV.br_blockcount - new->br_blockcount,
2531 oldext)))
2532 goto done;
2533 if ((error = xfs_btree_decrement(cur, 0, &i)))
2534 goto done;
2535 error = xfs_bmbt_update(cur, LEFT.br_startoff,
2536 LEFT.br_startblock,
2537 LEFT.br_blockcount + new->br_blockcount,
2538 LEFT.br_state);
2539 if (error)
2540 goto done;
2541 }
2542 break;
2543
2544 case BMAP_LEFT_FILLING:
2545 /*
2546 * Setting the first part of a previous oldext extent to newext.
2547 * The left neighbor is not contiguous.
2548 */
2549 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
2550 ASSERT(ep && xfs_bmbt_get_state(ep) == oldext);
2551 xfs_bmbt_set_startoff(ep, new_endoff);
2552 xfs_bmbt_set_blockcount(ep,
2553 PREV.br_blockcount - new->br_blockcount);
2554 xfs_bmbt_set_startblock(ep,
2555 new->br_startblock + new->br_blockcount);
2556 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
2557
2558 xfs_iext_insert(ip, *idx, 1, new, state);
2559 ip->i_d.di_nextents++;
2560 if (cur == NULL)
2561 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
2562 else {
2563 rval = XFS_ILOG_CORE;
2564 if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff,
2565 PREV.br_startblock, PREV.br_blockcount,
2566 &i)))
2567 goto done;
2568 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2569 if ((error = xfs_bmbt_update(cur,
2570 PREV.br_startoff + new->br_blockcount,
2571 PREV.br_startblock + new->br_blockcount,
2572 PREV.br_blockcount - new->br_blockcount,
2573 oldext)))
2574 goto done;
2575 cur->bc_rec.b = *new;
2576 if ((error = xfs_btree_insert(cur, &i)))
2577 goto done;
2578 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2579 }
2580 break;
2581
2582 case BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
2583 /*
2584 * Setting the last part of a previous oldext extent to newext.
2585 * The right neighbor is contiguous with the new allocation.
2586 */
2587 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
2588 xfs_bmbt_set_blockcount(ep,
2589 PREV.br_blockcount - new->br_blockcount);
2590 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
2591
2592 ++*idx;
2593
2594 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
2595 xfs_bmbt_set_allf(xfs_iext_get_ext(ifp, *idx),
2596 new->br_startoff, new->br_startblock,
2597 new->br_blockcount + RIGHT.br_blockcount, newext);
2598 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
2599
2600 if (cur == NULL)
2601 rval = XFS_ILOG_DEXT;
2602 else {
2603 rval = 0;
2604 if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff,
2605 PREV.br_startblock,
2606 PREV.br_blockcount, &i)))
2607 goto done;
2608 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2609 if ((error = xfs_bmbt_update(cur, PREV.br_startoff,
2610 PREV.br_startblock,
2611 PREV.br_blockcount - new->br_blockcount,
2612 oldext)))
2613 goto done;
2614 if ((error = xfs_btree_increment(cur, 0, &i)))
2615 goto done;
2616 if ((error = xfs_bmbt_update(cur, new->br_startoff,
2617 new->br_startblock,
2618 new->br_blockcount + RIGHT.br_blockcount,
2619 newext)))
2620 goto done;
2621 }
2622 break;
2623
2624 case BMAP_RIGHT_FILLING:
2625 /*
2626 * Setting the last part of a previous oldext extent to newext.
2627 * The right neighbor is not contiguous.
2628 */
2629 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
2630 xfs_bmbt_set_blockcount(ep,
2631 PREV.br_blockcount - new->br_blockcount);
2632 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
2633
2634 ++*idx;
2635 xfs_iext_insert(ip, *idx, 1, new, state);
2636
2637 ip->i_d.di_nextents++;
2638 if (cur == NULL)
2639 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
2640 else {
2641 rval = XFS_ILOG_CORE;
2642 if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff,
2643 PREV.br_startblock, PREV.br_blockcount,
2644 &i)))
2645 goto done;
2646 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2647 if ((error = xfs_bmbt_update(cur, PREV.br_startoff,
2648 PREV.br_startblock,
2649 PREV.br_blockcount - new->br_blockcount,
2650 oldext)))
2651 goto done;
2652 if ((error = xfs_bmbt_lookup_eq(cur, new->br_startoff,
2653 new->br_startblock, new->br_blockcount,
2654 &i)))
2655 goto done;
2656 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done);
2657 cur->bc_rec.b.br_state = XFS_EXT_NORM;
2658 if ((error = xfs_btree_insert(cur, &i)))
2659 goto done;
2660 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2661 }
2662 break;
2663
2664 case 0:
2665 /*
2666 * Setting the middle part of a previous oldext extent to
2667 * newext. Contiguity is impossible here.
2668 * One extent becomes three extents.
2669 */
2670 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
2671 xfs_bmbt_set_blockcount(ep,
2672 new->br_startoff - PREV.br_startoff);
2673 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
2674
2675 r[0] = *new;
2676 r[1].br_startoff = new_endoff;
2677 r[1].br_blockcount =
2678 PREV.br_startoff + PREV.br_blockcount - new_endoff;
2679 r[1].br_startblock = new->br_startblock + new->br_blockcount;
2680 r[1].br_state = oldext;
2681
2682 ++*idx;
2683 xfs_iext_insert(ip, *idx, 2, &r[0], state);
2684
2685 ip->i_d.di_nextents += 2;
2686 if (cur == NULL)
2687 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
2688 else {
2689 rval = XFS_ILOG_CORE;
2690 if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff,
2691 PREV.br_startblock, PREV.br_blockcount,
2692 &i)))
2693 goto done;
2694 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2695 /* new right extent - oldext */
2696 if ((error = xfs_bmbt_update(cur, r[1].br_startoff,
2697 r[1].br_startblock, r[1].br_blockcount,
2698 r[1].br_state)))
2699 goto done;
2700 /* new left extent - oldext */
2701 cur->bc_rec.b = PREV;
2702 cur->bc_rec.b.br_blockcount =
2703 new->br_startoff - PREV.br_startoff;
2704 if ((error = xfs_btree_insert(cur, &i)))
2705 goto done;
2706 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2707 /*
2708 * Reset the cursor to the position of the new extent
2709 * we are about to insert as we can't trust it after
2710 * the previous insert.
2711 */
2712 if ((error = xfs_bmbt_lookup_eq(cur, new->br_startoff,
2713 new->br_startblock, new->br_blockcount,
2714 &i)))
2715 goto done;
2716 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done);
2717 /* new middle extent - newext */
2718 cur->bc_rec.b.br_state = new->br_state;
2719 if ((error = xfs_btree_insert(cur, &i)))
2720 goto done;
2721 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2722 }
2723 break;
2724
2725 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
2726 case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
2727 case BMAP_LEFT_FILLING | BMAP_RIGHT_CONTIG:
2728 case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG:
2729 case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
2730 case BMAP_LEFT_CONTIG:
2731 case BMAP_RIGHT_CONTIG:
2732 /*
2733 * These cases are all impossible.
2734 */
2735 ASSERT(0);
2736 }
2737
2738 /* convert to a btree if necessary */
2739 if (xfs_bmap_needs_btree(ip, XFS_DATA_FORK)) {
2740 int tmp_logflags; /* partial log flag return val */
2741
2742 ASSERT(cur == NULL);
2743 error = xfs_bmap_extents_to_btree(tp, ip, first, flist, &cur,
2744 0, &tmp_logflags, XFS_DATA_FORK);
2745 *logflagsp |= tmp_logflags;
2746 if (error)
2747 goto done;
2748 }
2749
2750 /* clear out the allocated field, done with it now in any case. */
2751 if (cur) {
2752 cur->bc_private.b.allocated = 0;
2753 *curp = cur;
2754 }
2755
2756 xfs_bmap_check_leaf_extents(*curp, ip, XFS_DATA_FORK);
2757 done:
2758 *logflagsp |= rval;
2759 return error;
2760 #undef LEFT
2761 #undef RIGHT
2762 #undef PREV
2763 }
2764
2765 /*
2766 * Convert a hole to a delayed allocation.
2767 */
2768 STATIC void
2769 xfs_bmap_add_extent_hole_delay(
2770 xfs_inode_t *ip, /* incore inode pointer */
2771 xfs_extnum_t *idx, /* extent number to update/insert */
2772 xfs_bmbt_irec_t *new) /* new data to add to file extents */
2773 {
2774 xfs_ifork_t *ifp; /* inode fork pointer */
2775 xfs_bmbt_irec_t left; /* left neighbor extent entry */
2776 xfs_filblks_t newlen=0; /* new indirect size */
2777 xfs_filblks_t oldlen=0; /* old indirect size */
2778 xfs_bmbt_irec_t right; /* right neighbor extent entry */
2779 int state; /* state bits, accessed thru macros */
2780 xfs_filblks_t temp=0; /* temp for indirect calculations */
2781
2782 ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
2783 state = 0;
2784 ASSERT(isnullstartblock(new->br_startblock));
2785
2786 /*
2787 * Check and set flags if this segment has a left neighbor
2788 */
2789 if (*idx > 0) {
2790 state |= BMAP_LEFT_VALID;
2791 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, *idx - 1), &left);
2792
2793 if (isnullstartblock(left.br_startblock))
2794 state |= BMAP_LEFT_DELAY;
2795 }
2796
2797 /*
2798 * Check and set flags if the current (right) segment exists.
2799 * If it doesn't exist, we're converting the hole at end-of-file.
2800 */
2801 if (*idx < ip->i_df.if_bytes / (uint)sizeof(xfs_bmbt_rec_t)) {
2802 state |= BMAP_RIGHT_VALID;
2803 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, *idx), &right);
2804
2805 if (isnullstartblock(right.br_startblock))
2806 state |= BMAP_RIGHT_DELAY;
2807 }
2808
2809 /*
2810 * Set contiguity flags on the left and right neighbors.
2811 * Don't let extents get too large, even if the pieces are contiguous.
2812 */
2813 if ((state & BMAP_LEFT_VALID) && (state & BMAP_LEFT_DELAY) &&
2814 left.br_startoff + left.br_blockcount == new->br_startoff &&
2815 left.br_blockcount + new->br_blockcount <= MAXEXTLEN)
2816 state |= BMAP_LEFT_CONTIG;
2817
2818 if ((state & BMAP_RIGHT_VALID) && (state & BMAP_RIGHT_DELAY) &&
2819 new->br_startoff + new->br_blockcount == right.br_startoff &&
2820 new->br_blockcount + right.br_blockcount <= MAXEXTLEN &&
2821 (!(state & BMAP_LEFT_CONTIG) ||
2822 (left.br_blockcount + new->br_blockcount +
2823 right.br_blockcount <= MAXEXTLEN)))
2824 state |= BMAP_RIGHT_CONTIG;
2825
2826 /*
2827 * Switch out based on the contiguity flags.
2828 */
2829 switch (state & (BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG)) {
2830 case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
2831 /*
2832 * New allocation is contiguous with delayed allocations
2833 * on the left and on the right.
2834 * Merge all three into a single extent record.
2835 */
2836 --*idx;
2837 temp = left.br_blockcount + new->br_blockcount +
2838 right.br_blockcount;
2839
2840 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
2841 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, *idx), temp);
2842 oldlen = startblockval(left.br_startblock) +
2843 startblockval(new->br_startblock) +
2844 startblockval(right.br_startblock);
2845 newlen = xfs_bmap_worst_indlen(ip, temp);
2846 xfs_bmbt_set_startblock(xfs_iext_get_ext(ifp, *idx),
2847 nullstartblock((int)newlen));
2848 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
2849
2850 xfs_iext_remove(ip, *idx + 1, 1, state);
2851 break;
2852
2853 case BMAP_LEFT_CONTIG:
2854 /*
2855 * New allocation is contiguous with a delayed allocation
2856 * on the left.
2857 * Merge the new allocation with the left neighbor.
2858 */
2859 --*idx;
2860 temp = left.br_blockcount + new->br_blockcount;
2861
2862 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
2863 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, *idx), temp);
2864 oldlen = startblockval(left.br_startblock) +
2865 startblockval(new->br_startblock);
2866 newlen = xfs_bmap_worst_indlen(ip, temp);
2867 xfs_bmbt_set_startblock(xfs_iext_get_ext(ifp, *idx),
2868 nullstartblock((int)newlen));
2869 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
2870 break;
2871
2872 case BMAP_RIGHT_CONTIG:
2873 /*
2874 * New allocation is contiguous with a delayed allocation
2875 * on the right.
2876 * Merge the new allocation with the right neighbor.
2877 */
2878 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
2879 temp = new->br_blockcount + right.br_blockcount;
2880 oldlen = startblockval(new->br_startblock) +
2881 startblockval(right.br_startblock);
2882 newlen = xfs_bmap_worst_indlen(ip, temp);
2883 xfs_bmbt_set_allf(xfs_iext_get_ext(ifp, *idx),
2884 new->br_startoff,
2885 nullstartblock((int)newlen), temp, right.br_state);
2886 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
2887 break;
2888
2889 case 0:
2890 /*
2891 * New allocation is not contiguous with another
2892 * delayed allocation.
2893 * Insert a new entry.
2894 */
2895 oldlen = newlen = 0;
2896 xfs_iext_insert(ip, *idx, 1, new, state);
2897 break;
2898 }
2899 if (oldlen != newlen) {
2900 ASSERT(oldlen > newlen);
2901 xfs_mod_fdblocks(ip->i_mount, (int64_t)(oldlen - newlen),
2902 false);
2903 /*
2904 * Nothing to do for disk quota accounting here.
2905 */
2906 }
2907 }
2908
2909 /*
2910 * Convert a hole to a real allocation.
2911 */
2912 STATIC int /* error */
2913 xfs_bmap_add_extent_hole_real(
2914 struct xfs_bmalloca *bma,
2915 int whichfork)
2916 {
2917 struct xfs_bmbt_irec *new = &bma->got;
2918 int error; /* error return value */
2919 int i; /* temp state */
2920 xfs_ifork_t *ifp; /* inode fork pointer */
2921 xfs_bmbt_irec_t left; /* left neighbor extent entry */
2922 xfs_bmbt_irec_t right; /* right neighbor extent entry */
2923 int rval=0; /* return value (logging flags) */
2924 int state; /* state bits, accessed thru macros */
2925 struct xfs_mount *mp;
2926
2927 mp = bma->ip->i_mount;
2928 ifp = XFS_IFORK_PTR(bma->ip, whichfork);
2929
2930 ASSERT(bma->idx >= 0);
2931 ASSERT(bma->idx <= ifp->if_bytes / sizeof(struct xfs_bmbt_rec));
2932 ASSERT(!isnullstartblock(new->br_startblock));
2933 ASSERT(!bma->cur ||
2934 !(bma->cur->bc_private.b.flags & XFS_BTCUR_BPRV_WASDEL));
2935
2936 XFS_STATS_INC(mp, xs_add_exlist);
2937
2938 state = 0;
2939 if (whichfork == XFS_ATTR_FORK)
2940 state |= BMAP_ATTRFORK;
2941
2942 /*
2943 * Check and set flags if this segment has a left neighbor.
2944 */
2945 if (bma->idx > 0) {
2946 state |= BMAP_LEFT_VALID;
2947 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, bma->idx - 1), &left);
2948 if (isnullstartblock(left.br_startblock))
2949 state |= BMAP_LEFT_DELAY;
2950 }
2951
2952 /*
2953 * Check and set flags if this segment has a current value.
2954 * Not true if we're inserting into the "hole" at eof.
2955 */
2956 if (bma->idx < ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t)) {
2957 state |= BMAP_RIGHT_VALID;
2958 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, bma->idx), &right);
2959 if (isnullstartblock(right.br_startblock))
2960 state |= BMAP_RIGHT_DELAY;
2961 }
2962
2963 /*
2964 * We're inserting a real allocation between "left" and "right".
2965 * Set the contiguity flags. Don't let extents get too large.
2966 */
2967 if ((state & BMAP_LEFT_VALID) && !(state & BMAP_LEFT_DELAY) &&
2968 left.br_startoff + left.br_blockcount == new->br_startoff &&
2969 left.br_startblock + left.br_blockcount == new->br_startblock &&
2970 left.br_state == new->br_state &&
2971 left.br_blockcount + new->br_blockcount <= MAXEXTLEN)
2972 state |= BMAP_LEFT_CONTIG;
2973
2974 if ((state & BMAP_RIGHT_VALID) && !(state & BMAP_RIGHT_DELAY) &&
2975 new->br_startoff + new->br_blockcount == right.br_startoff &&
2976 new->br_startblock + new->br_blockcount == right.br_startblock &&
2977 new->br_state == right.br_state &&
2978 new->br_blockcount + right.br_blockcount <= MAXEXTLEN &&
2979 (!(state & BMAP_LEFT_CONTIG) ||
2980 left.br_blockcount + new->br_blockcount +
2981 right.br_blockcount <= MAXEXTLEN))
2982 state |= BMAP_RIGHT_CONTIG;
2983
2984 error = 0;
2985 /*
2986 * Select which case we're in here, and implement it.
2987 */
2988 switch (state & (BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG)) {
2989 case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
2990 /*
2991 * New allocation is contiguous with real allocations on the
2992 * left and on the right.
2993 * Merge all three into a single extent record.
2994 */
2995 --bma->idx;
2996 trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);
2997 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, bma->idx),
2998 left.br_blockcount + new->br_blockcount +
2999 right.br_blockcount);
3000 trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
3001
3002 xfs_iext_remove(bma->ip, bma->idx + 1, 1, state);
3003
3004 XFS_IFORK_NEXT_SET(bma->ip, whichfork,
3005 XFS_IFORK_NEXTENTS(bma->ip, whichfork) - 1);
3006 if (bma->cur == NULL) {
3007 rval = XFS_ILOG_CORE | xfs_ilog_fext(whichfork);
3008 } else {
3009 rval = XFS_ILOG_CORE;
3010 error = xfs_bmbt_lookup_eq(bma->cur, right.br_startoff,
3011 right.br_startblock, right.br_blockcount,
3012 &i);
3013 if (error)
3014 goto done;
3015 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
3016 error = xfs_btree_delete(bma->cur, &i);
3017 if (error)
3018 goto done;
3019 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
3020 error = xfs_btree_decrement(bma->cur, 0, &i);
3021 if (error)
3022 goto done;
3023 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
3024 error = xfs_bmbt_update(bma->cur, left.br_startoff,
3025 left.br_startblock,
3026 left.br_blockcount +
3027 new->br_blockcount +
3028 right.br_blockcount,
3029 left.br_state);
3030 if (error)
3031 goto done;
3032 }
3033 break;
3034
3035 case BMAP_LEFT_CONTIG:
3036 /*
3037 * New allocation is contiguous with a real allocation
3038 * on the left.
3039 * Merge the new allocation with the left neighbor.
3040 */
3041 --bma->idx;
3042 trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);
3043 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, bma->idx),
3044 left.br_blockcount + new->br_blockcount);
3045 trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
3046
3047 if (bma->cur == NULL) {
3048 rval = xfs_ilog_fext(whichfork);
3049 } else {
3050 rval = 0;
3051 error = xfs_bmbt_lookup_eq(bma->cur, left.br_startoff,
3052 left.br_startblock, left.br_blockcount,
3053 &i);
3054 if (error)
3055 goto done;
3056 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
3057 error = xfs_bmbt_update(bma->cur, left.br_startoff,
3058 left.br_startblock,
3059 left.br_blockcount +
3060 new->br_blockcount,
3061 left.br_state);
3062 if (error)
3063 goto done;
3064 }
3065 break;
3066
3067 case BMAP_RIGHT_CONTIG:
3068 /*
3069 * New allocation is contiguous with a real allocation
3070 * on the right.
3071 * Merge the new allocation with the right neighbor.
3072 */
3073 trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);
3074 xfs_bmbt_set_allf(xfs_iext_get_ext(ifp, bma->idx),
3075 new->br_startoff, new->br_startblock,
3076 new->br_blockcount + right.br_blockcount,
3077 right.br_state);
3078 trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
3079
3080 if (bma->cur == NULL) {
3081 rval = xfs_ilog_fext(whichfork);
3082 } else {
3083 rval = 0;
3084 error = xfs_bmbt_lookup_eq(bma->cur,
3085 right.br_startoff,
3086 right.br_startblock,
3087 right.br_blockcount, &i);
3088 if (error)
3089 goto done;
3090 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
3091 error = xfs_bmbt_update(bma->cur, new->br_startoff,
3092 new->br_startblock,
3093 new->br_blockcount +
3094 right.br_blockcount,
3095 right.br_state);
3096 if (error)
3097 goto done;
3098 }
3099 break;
3100
3101 case 0:
3102 /*
3103 * New allocation is not contiguous with another
3104 * real allocation.
3105 * Insert a new entry.
3106 */
3107 xfs_iext_insert(bma->ip, bma->idx, 1, new, state);
3108 XFS_IFORK_NEXT_SET(bma->ip, whichfork,
3109 XFS_IFORK_NEXTENTS(bma->ip, whichfork) + 1);
3110 if (bma->cur == NULL) {
3111 rval = XFS_ILOG_CORE | xfs_ilog_fext(whichfork);
3112 } else {
3113 rval = XFS_ILOG_CORE;
3114 error = xfs_bmbt_lookup_eq(bma->cur,
3115 new->br_startoff,
3116 new->br_startblock,
3117 new->br_blockcount, &i);
3118 if (error)
3119 goto done;
3120 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done);
3121 bma->cur->bc_rec.b.br_state = new->br_state;
3122 error = xfs_btree_insert(bma->cur, &i);
3123 if (error)
3124 goto done;
3125 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
3126 }
3127 break;
3128 }
3129
3130 /* convert to a btree if necessary */
3131 if (xfs_bmap_needs_btree(bma->ip, whichfork)) {
3132 int tmp_logflags; /* partial log flag return val */
3133
3134 ASSERT(bma->cur == NULL);
3135 error = xfs_bmap_extents_to_btree(bma->tp, bma->ip,
3136 bma->firstblock, bma->flist, &bma->cur,
3137 0, &tmp_logflags, whichfork);
3138 bma->logflags |= tmp_logflags;
3139 if (error)
3140 goto done;
3141 }
3142
3143 /* clear out the allocated field, done with it now in any case. */
3144 if (bma->cur)
3145 bma->cur->bc_private.b.allocated = 0;
3146
3147 xfs_bmap_check_leaf_extents(bma->cur, bma->ip, whichfork);
3148 done:
3149 bma->logflags |= rval;
3150 return error;
3151 }
3152
3153 /*
3154 * Functions used in the extent read, allocate and remove paths
3155 */
3156
3157 /*
3158 * Adjust the size of the new extent based on di_extsize and rt extsize.
3159 */
3160 int
3161 xfs_bmap_extsize_align(
3162 xfs_mount_t *mp,
3163 xfs_bmbt_irec_t *gotp, /* next extent pointer */
3164 xfs_bmbt_irec_t *prevp, /* previous extent pointer */
3165 xfs_extlen_t extsz, /* align to this extent size */
3166 int rt, /* is this a realtime inode? */
3167 int eof, /* is extent at end-of-file? */
3168 int delay, /* creating delalloc extent? */
3169 int convert, /* overwriting unwritten extent? */
3170 xfs_fileoff_t *offp, /* in/out: aligned offset */
3171 xfs_extlen_t *lenp) /* in/out: aligned length */
3172 {
3173 xfs_fileoff_t orig_off; /* original offset */
3174 xfs_extlen_t orig_alen; /* original length */
3175 xfs_fileoff_t orig_end; /* original off+len */
3176 xfs_fileoff_t nexto; /* next file offset */
3177 xfs_fileoff_t prevo; /* previous file offset */
3178 xfs_fileoff_t align_off; /* temp for offset */
3179 xfs_extlen_t align_alen; /* temp for length */
3180 xfs_extlen_t temp; /* temp for calculations */
3181
3182 if (convert)
3183 return 0;
3184
3185 orig_off = align_off = *offp;
3186 orig_alen = align_alen = *lenp;
3187 orig_end = orig_off + orig_alen;
3188
3189 /*
3190 * If this request overlaps an existing extent, then don't
3191 * attempt to perform any additional alignment.
3192 */
3193 if (!delay && !eof &&
3194 (orig_off >= gotp->br_startoff) &&
3195 (orig_end <= gotp->br_startoff + gotp->br_blockcount)) {
3196 return 0;
3197 }
3198
3199 /*
3200 * If the file offset is unaligned vs. the extent size
3201 * we need to align it. This will be possible unless
3202 * the file was previously written with a kernel that didn't
3203 * perform this alignment, or if a truncate shot us in the
3204 * foot.
3205 */
3206 temp = do_mod(orig_off, extsz);
3207 if (temp) {
3208 align_alen += temp;
3209 align_off -= temp;
3210 }
3211
3212 /* Same adjustment for the end of the requested area. */
3213 temp = (align_alen % extsz);
3214 if (temp)
3215 align_alen += extsz - temp;
3216
3217 /*
3218 * For large extent hint sizes, the aligned extent might be larger than
3219 * MAXEXTLEN. In that case, reduce the size by an extsz so that it pulls
3220 * the length back under MAXEXTLEN. The outer allocation loops handle
3221 * short allocation just fine, so it is safe to do this. We only want to
3222 * do it when we are forced to, though, because it means more allocation
3223 * operations are required.
3224 */
3225 while (align_alen > MAXEXTLEN)
3226 align_alen -= extsz;
3227 ASSERT(align_alen <= MAXEXTLEN);
3228
3229 /*
3230 * If the previous block overlaps with this proposed allocation
3231 * then move the start forward without adjusting the length.
3232 */
3233 if (prevp->br_startoff != NULLFILEOFF) {
3234 if (prevp->br_startblock == HOLESTARTBLOCK)
3235 prevo = prevp->br_startoff;
3236 else
3237 prevo = prevp->br_startoff + prevp->br_blockcount;
3238 } else
3239 prevo = 0;
3240 if (align_off != orig_off && align_off < prevo)
3241 align_off = prevo;
3242 /*
3243 * If the next block overlaps with this proposed allocation
3244 * then move the start back without adjusting the length,
3245 * but not before offset 0.
3246 * This may of course make the start overlap previous block,
3247 * and if we hit the offset 0 limit then the next block
3248 * can still overlap too.
3249 */
3250 if (!eof && gotp->br_startoff != NULLFILEOFF) {
3251 if ((delay && gotp->br_startblock == HOLESTARTBLOCK) ||
3252 (!delay && gotp->br_startblock == DELAYSTARTBLOCK))
3253 nexto = gotp->br_startoff + gotp->br_blockcount;
3254 else
3255 nexto = gotp->br_startoff;
3256 } else
3257 nexto = NULLFILEOFF;
3258 if (!eof &&
3259 align_off + align_alen != orig_end &&
3260 align_off + align_alen > nexto)
3261 align_off = nexto > align_alen ? nexto - align_alen : 0;
3262 /*
3263 * If we're now overlapping the next or previous extent that
3264 * means we can't fit an extsz piece in this hole. Just move
3265 * the start forward to the first valid spot and set
3266 * the length so we hit the end.
3267 */
3268 if (align_off != orig_off && align_off < prevo)
3269 align_off = prevo;
3270 if (align_off + align_alen != orig_end &&
3271 align_off + align_alen > nexto &&
3272 nexto != NULLFILEOFF) {
3273 ASSERT(nexto > prevo);
3274 align_alen = nexto - align_off;
3275 }
3276
3277 /*
3278 * If realtime, and the result isn't a multiple of the realtime
3279 * extent size we need to remove blocks until it is.
3280 */
3281 if (rt && (temp = (align_alen % mp->m_sb.sb_rextsize))) {
3282 /*
3283 * We're not covering the original request, or
3284 * we won't be able to once we fix the length.
3285 */
3286 if (orig_off < align_off ||
3287 orig_end > align_off + align_alen ||
3288 align_alen - temp < orig_alen)
3289 return -EINVAL;
3290 /*
3291 * Try to fix it by moving the start up.
3292 */
3293 if (align_off + temp <= orig_off) {
3294 align_alen -= temp;
3295 align_off += temp;
3296 }
3297 /*
3298 * Try to fix it by moving the end in.
3299 */
3300 else if (align_off + align_alen - temp >= orig_end)
3301 align_alen -= temp;
3302 /*
3303 * Set the start to the minimum then trim the length.
3304 */
3305 else {
3306 align_alen -= orig_off - align_off;
3307 align_off = orig_off;
3308 align_alen -= align_alen % mp->m_sb.sb_rextsize;
3309 }
3310 /*
3311 * Result doesn't cover the request, fail it.
3312 */
3313 if (orig_off < align_off || orig_end > align_off + align_alen)
3314 return -EINVAL;
3315 } else {
3316 ASSERT(orig_off >= align_off);
3317 /* see MAXEXTLEN handling above */
3318 ASSERT(orig_end <= align_off + align_alen ||
3319 align_alen + extsz > MAXEXTLEN);
3320 }
3321
3322 #ifdef DEBUG
3323 if (!eof && gotp->br_startoff != NULLFILEOFF)
3324 ASSERT(align_off + align_alen <= gotp->br_startoff);
3325 if (prevp->br_startoff != NULLFILEOFF)
3326 ASSERT(align_off >= prevp->br_startoff + prevp->br_blockcount);
3327 #endif
3328
3329 *lenp = align_alen;
3330 *offp = align_off;
3331 return 0;
3332 }
3333
3334 #define XFS_ALLOC_GAP_UNITS 4
3335
3336 void
3337 xfs_bmap_adjacent(
3338 struct xfs_bmalloca *ap) /* bmap alloc argument struct */
3339 {
3340 xfs_fsblock_t adjust; /* adjustment to block numbers */
3341 xfs_agnumber_t fb_agno; /* ag number of ap->firstblock */
3342 xfs_mount_t *mp; /* mount point structure */
3343 int nullfb; /* true if ap->firstblock isn't set */
3344 int rt; /* true if inode is realtime */
3345
3346 #define ISVALID(x,y) \
3347 (rt ? \
3348 (x) < mp->m_sb.sb_rblocks : \
3349 XFS_FSB_TO_AGNO(mp, x) == XFS_FSB_TO_AGNO(mp, y) && \
3350 XFS_FSB_TO_AGNO(mp, x) < mp->m_sb.sb_agcount && \
3351 XFS_FSB_TO_AGBNO(mp, x) < mp->m_sb.sb_agblocks)
3352
3353 mp = ap->ip->i_mount;
3354 nullfb = *ap->firstblock == NULLFSBLOCK;
3355 rt = XFS_IS_REALTIME_INODE(ap->ip) && ap->userdata;
3356 fb_agno = nullfb ? NULLAGNUMBER : XFS_FSB_TO_AGNO(mp, *ap->firstblock);
3357 /*
3358 * If allocating at eof, and there's a previous real block,
3359 * try to use its last block as our starting point.
3360 */
3361 if (ap->eof && ap->prev.br_startoff != NULLFILEOFF &&
3362 !isnullstartblock(ap->prev.br_startblock) &&
3363 ISVALID(ap->prev.br_startblock + ap->prev.br_blockcount,
3364 ap->prev.br_startblock)) {
3365 ap->blkno = ap->prev.br_startblock + ap->prev.br_blockcount;
3366 /*
3367 * Adjust for the gap between prevp and us.
3368 */
3369 adjust = ap->offset -
3370 (ap->prev.br_startoff + ap->prev.br_blockcount);
3371 if (adjust &&
3372 ISVALID(ap->blkno + adjust, ap->prev.br_startblock))
3373 ap->blkno += adjust;
3374 }
3375 /*
3376 * If not at eof, then compare the two neighbor blocks.
3377 * Figure out whether either one gives us a good starting point,
3378 * and pick the better one.
3379 */
3380 else if (!ap->eof) {
3381 xfs_fsblock_t gotbno; /* right side block number */
3382 xfs_fsblock_t gotdiff=0; /* right side difference */
3383 xfs_fsblock_t prevbno; /* left side block number */
3384 xfs_fsblock_t prevdiff=0; /* left side difference */
3385
3386 /*
3387 * If there's a previous (left) block, select a requested
3388 * start block based on it.
3389 */
3390 if (ap->prev.br_startoff != NULLFILEOFF &&
3391 !isnullstartblock(ap->prev.br_startblock) &&
3392 (prevbno = ap->prev.br_startblock +
3393 ap->prev.br_blockcount) &&
3394 ISVALID(prevbno, ap->prev.br_startblock)) {
3395 /*
3396 * Calculate gap to end of previous block.
3397 */
3398 adjust = prevdiff = ap->offset -
3399 (ap->prev.br_startoff +
3400 ap->prev.br_blockcount);
3401 /*
3402 * Figure the startblock based on the previous block's
3403 * end and the gap size.
3404 * Heuristic!
3405 * If the gap is large relative to the piece we're
3406 * allocating, or using it gives us an invalid block
3407 * number, then just use the end of the previous block.
3408 */
3409 if (prevdiff <= XFS_ALLOC_GAP_UNITS * ap->length &&
3410 ISVALID(prevbno + prevdiff,
3411 ap->prev.br_startblock))
3412 prevbno += adjust;
3413 else
3414 prevdiff += adjust;
3415 /*
3416 * If the firstblock forbids it, can't use it,
3417 * must use default.
3418 */
3419 if (!rt && !nullfb &&
3420 XFS_FSB_TO_AGNO(mp, prevbno) != fb_agno)
3421 prevbno = NULLFSBLOCK;
3422 }
3423 /*
3424 * No previous block or can't follow it, just default.
3425 */
3426 else
3427 prevbno = NULLFSBLOCK;
3428 /*
3429 * If there's a following (right) block, select a requested
3430 * start block based on it.
3431 */
3432 if (!isnullstartblock(ap->got.br_startblock)) {
3433 /*
3434 * Calculate gap to start of next block.
3435 */
3436 adjust = gotdiff = ap->got.br_startoff - ap->offset;
3437 /*
3438 * Figure the startblock based on the next block's
3439 * start and the gap size.
3440 */
3441 gotbno = ap->got.br_startblock;
3442 /*
3443 * Heuristic!
3444 * If the gap is large relative to the piece we're
3445 * allocating, or using it gives us an invalid block
3446 * number, then just use the start of the next block
3447 * offset by our length.
3448 */
3449 if (gotdiff <= XFS_ALLOC_GAP_UNITS * ap->length &&
3450 ISVALID(gotbno - gotdiff, gotbno))
3451 gotbno -= adjust;
3452 else if (ISVALID(gotbno - ap->length, gotbno)) {
3453 gotbno -= ap->length;
3454 gotdiff += adjust - ap->length;
3455 } else
3456 gotdiff += adjust;
3457 /*
3458 * If the firstblock forbids it, can't use it,
3459 * must use default.
3460 */
3461 if (!rt && !nullfb &&
3462 XFS_FSB_TO_AGNO(mp, gotbno) != fb_agno)
3463 gotbno = NULLFSBLOCK;
3464 }
3465 /*
3466 * No next block, just default.
3467 */
3468 else
3469 gotbno = NULLFSBLOCK;
3470 /*
3471 * If both valid, pick the better one, else the only good
3472 * one, else ap->blkno is already set (to 0 or the inode block).
3473 */
3474 if (prevbno != NULLFSBLOCK && gotbno != NULLFSBLOCK)
3475 ap->blkno = prevdiff <= gotdiff ? prevbno : gotbno;
3476 else if (prevbno != NULLFSBLOCK)
3477 ap->blkno = prevbno;
3478 else if (gotbno != NULLFSBLOCK)
3479 ap->blkno = gotbno;
3480 }
3481 #undef ISVALID
3482 }
3483
3484 static int
3485 xfs_bmap_longest_free_extent(
3486 struct xfs_trans *tp,
3487 xfs_agnumber_t ag,
3488 xfs_extlen_t *blen,
3489 int *notinit)
3490 {
3491 struct xfs_mount *mp = tp->t_mountp;
3492 struct xfs_perag *pag;
3493 xfs_extlen_t longest;
3494 int error = 0;
3495
3496 pag = xfs_perag_get(mp, ag);
3497 if (!pag->pagf_init) {
3498 error = xfs_alloc_pagf_init(mp, tp, ag, XFS_ALLOC_FLAG_TRYLOCK);
3499 if (error)
3500 goto out;
3501
3502 if (!pag->pagf_init) {
3503 *notinit = 1;
3504 goto out;
3505 }
3506 }
3507
3508 longest = xfs_alloc_longest_free_extent(mp, pag,
3509 xfs_alloc_min_freelist(mp, pag));
3510 if (*blen < longest)
3511 *blen = longest;
3512
3513 out:
3514 xfs_perag_put(pag);
3515 return error;
3516 }
3517
3518 static void
3519 xfs_bmap_select_minlen(
3520 struct xfs_bmalloca *ap,
3521 struct xfs_alloc_arg *args,
3522 xfs_extlen_t *blen,
3523 int notinit)
3524 {
3525 if (notinit || *blen < ap->minlen) {
3526 /*
3527 * Since we did a BUF_TRYLOCK above, it is possible that
3528 * there is space for this request.
3529 */
3530 args->minlen = ap->minlen;
3531 } else if (*blen < args->maxlen) {
3532 /*
3533 * If the best seen length is less than the request length,
3534 * use the best as the minimum.
3535 */
3536 args->minlen = *blen;
3537 } else {
3538 /*
3539 * Otherwise we've seen an extent as big as maxlen, use that
3540 * as the minimum.
3541 */
3542 args->minlen = args->maxlen;
3543 }
3544 }
3545
3546 STATIC int
3547 xfs_bmap_btalloc_nullfb(
3548 struct xfs_bmalloca *ap,
3549 struct xfs_alloc_arg *args,
3550 xfs_extlen_t *blen)
3551 {
3552 struct xfs_mount *mp = ap->ip->i_mount;
3553 xfs_agnumber_t ag, startag;
3554 int notinit = 0;
3555 int error;
3556
3557 args->type = XFS_ALLOCTYPE_START_BNO;
3558 args->total = ap->total;
3559
3560 startag = ag = XFS_FSB_TO_AGNO(mp, args->fsbno);
3561 if (startag == NULLAGNUMBER)
3562 startag = ag = 0;
3563
3564 while (*blen < args->maxlen) {
3565 error = xfs_bmap_longest_free_extent(args->tp, ag, blen,
3566 &notinit);
3567 if (error)
3568 return error;
3569
3570 if (++ag == mp->m_sb.sb_agcount)
3571 ag = 0;
3572 if (ag == startag)
3573 break;
3574 }
3575
3576 xfs_bmap_select_minlen(ap, args, blen, notinit);
3577 return 0;
3578 }
3579
3580 STATIC int
3581 xfs_bmap_btalloc_filestreams(
3582 struct xfs_bmalloca *ap,
3583 struct xfs_alloc_arg *args,
3584 xfs_extlen_t *blen)
3585 {
3586 struct xfs_mount *mp = ap->ip->i_mount;
3587 xfs_agnumber_t ag;
3588 int notinit = 0;
3589 int error;
3590
3591 args->type = XFS_ALLOCTYPE_NEAR_BNO;
3592 args->total = ap->total;
3593
3594 ag = XFS_FSB_TO_AGNO(mp, args->fsbno);
3595 if (ag == NULLAGNUMBER)
3596 ag = 0;
3597
3598 error = xfs_bmap_longest_free_extent(args->tp, ag, blen, &notinit);
3599 if (error)
3600 return error;
3601
3602 if (*blen < args->maxlen) {
3603 error = xfs_filestream_new_ag(ap, &ag);
3604 if (error)
3605 return error;
3606
3607 error = xfs_bmap_longest_free_extent(args->tp, ag, blen,
3608 &notinit);
3609 if (error)
3610 return error;
3611
3612 }
3613
3614 xfs_bmap_select_minlen(ap, args, blen, notinit);
3615
3616 /*
3617 * Set the failure fallback case to look in the selected AG as stream
3618 * may have moved.
3619 */
3620 ap->blkno = args->fsbno = XFS_AGB_TO_FSB(mp, ag, 0);
3621 return 0;
3622 }
3623
3624 STATIC int
3625 xfs_bmap_btalloc(
3626 struct xfs_bmalloca *ap) /* bmap alloc argument struct */
3627 {
3628 xfs_mount_t *mp; /* mount point structure */
3629 xfs_alloctype_t atype = 0; /* type for allocation routines */
3630 xfs_extlen_t align; /* minimum allocation alignment */
3631 xfs_agnumber_t fb_agno; /* ag number of ap->firstblock */
3632 xfs_agnumber_t ag;
3633 xfs_alloc_arg_t args;
3634 xfs_extlen_t blen;
3635 xfs_extlen_t nextminlen = 0;
3636 int nullfb; /* true if ap->firstblock isn't set */
3637 int isaligned;
3638 int tryagain;
3639 int error;
3640 int stripe_align;
3641
3642 ASSERT(ap->length);
3643
3644 mp = ap->ip->i_mount;
3645
3646 /* stripe alignment for allocation is determined by mount parameters */
3647 stripe_align = 0;
3648 if (mp->m_swidth && (mp->m_flags & XFS_MOUNT_SWALLOC))
3649 stripe_align = mp->m_swidth;
3650 else if (mp->m_dalign)
3651 stripe_align = mp->m_dalign;
3652
3653 align = ap->userdata ? xfs_get_extsz_hint(ap->ip) : 0;
3654 if (unlikely(align)) {
3655 error = xfs_bmap_extsize_align(mp, &ap->got, &ap->prev,
3656 align, 0, ap->eof, 0, ap->conv,
3657 &ap->offset, &ap->length);
3658 ASSERT(!error);
3659 ASSERT(ap->length);
3660 }
3661
3662
3663 nullfb = *ap->firstblock == NULLFSBLOCK;
3664 fb_agno = nullfb ? NULLAGNUMBER : XFS_FSB_TO_AGNO(mp, *ap->firstblock);
3665 if (nullfb) {
3666 if (ap->userdata && xfs_inode_is_filestream(ap->ip)) {
3667 ag = xfs_filestream_lookup_ag(ap->ip);
3668 ag = (ag != NULLAGNUMBER) ? ag : 0;
3669 ap->blkno = XFS_AGB_TO_FSB(mp, ag, 0);
3670 } else {
3671 ap->blkno = XFS_INO_TO_FSB(mp, ap->ip->i_ino);
3672 }
3673 } else
3674 ap->blkno = *ap->firstblock;
3675
3676 xfs_bmap_adjacent(ap);
3677
3678 /*
3679 * If allowed, use ap->blkno; otherwise must use firstblock since
3680 * it's in the right allocation group.
3681 */
3682 if (nullfb || XFS_FSB_TO_AGNO(mp, ap->blkno) == fb_agno)
3683 ;
3684 else
3685 ap->blkno = *ap->firstblock;
3686 /*
3687 * Normal allocation, done through xfs_alloc_vextent.
3688 */
3689 tryagain = isaligned = 0;
3690 memset(&args, 0, sizeof(args));
3691 args.tp = ap->tp;
3692 args.mp = mp;
3693 args.fsbno = ap->blkno;
3694
3695 /* Trim the allocation back to the maximum an AG can fit. */
3696 args.maxlen = MIN(ap->length, XFS_ALLOC_AG_MAX_USABLE(mp));
3697 args.firstblock = *ap->firstblock;
3698 blen = 0;
3699 if (nullfb) {
3700 /*
3701 * Search for an allocation group with a single extent large
3702 * enough for the request. If one isn't found, then adjust
3703 * the minimum allocation size to the largest space found.
3704 */
3705 if (ap->userdata && xfs_inode_is_filestream(ap->ip))
3706 error = xfs_bmap_btalloc_filestreams(ap, &args, &blen);
3707 else
3708 error = xfs_bmap_btalloc_nullfb(ap, &args, &blen);
3709 if (error)
3710 return error;
3711 } else if (ap->flist->xbf_low) {
3712 if (xfs_inode_is_filestream(ap->ip))
3713 args.type = XFS_ALLOCTYPE_FIRST_AG;
3714 else
3715 args.type = XFS_ALLOCTYPE_START_BNO;
3716 args.total = args.minlen = ap->minlen;
3717 } else {
3718 args.type = XFS_ALLOCTYPE_NEAR_BNO;
3719 args.total = ap->total;
3720 args.minlen = ap->minlen;
3721 }
3722 /* apply extent size hints if obtained earlier */
3723 if (unlikely(align)) {
3724 args.prod = align;
3725 if ((args.mod = (xfs_extlen_t)do_mod(ap->offset, args.prod)))
3726 args.mod = (xfs_extlen_t)(args.prod - args.mod);
3727 } else if (mp->m_sb.sb_blocksize >= PAGE_SIZE) {
3728 args.prod = 1;
3729 args.mod = 0;
3730 } else {
3731 args.prod = PAGE_SIZE >> mp->m_sb.sb_blocklog;
3732 if ((args.mod = (xfs_extlen_t)(do_mod(ap->offset, args.prod))))
3733 args.mod = (xfs_extlen_t)(args.prod - args.mod);
3734 }
3735 /*
3736 * If we are not low on available data blocks, and the
3737 * underlying logical volume manager is a stripe, and
3738 * the file offset is zero then try to allocate data
3739 * blocks on stripe unit boundary.
3740 * NOTE: ap->aeof is only set if the allocation length
3741 * is >= the stripe unit and the allocation offset is
3742 * at the end of file.
3743 */
3744 if (!ap->flist->xbf_low && ap->aeof) {
3745 if (!ap->offset) {
3746 args.alignment = stripe_align;
3747 atype = args.type;
3748 isaligned = 1;
3749 /*
3750 * Adjust for alignment
3751 */
3752 if (blen > args.alignment && blen <= args.maxlen)
3753 args.minlen = blen - args.alignment;
3754 args.minalignslop = 0;
3755 } else {
3756 /*
3757 * First try an exact bno allocation.
3758 * If it fails then do a near or start bno
3759 * allocation with alignment turned on.
3760 */
3761 atype = args.type;
3762 tryagain = 1;
3763 args.type = XFS_ALLOCTYPE_THIS_BNO;
3764 args.alignment = 1;
3765 /*
3766 * Compute the minlen+alignment for the
3767 * next case. Set slop so that the value
3768 * of minlen+alignment+slop doesn't go up
3769 * between the calls.
3770 */
3771 if (blen > stripe_align && blen <= args.maxlen)
3772 nextminlen = blen - stripe_align;
3773 else
3774 nextminlen = args.minlen;
3775 if (nextminlen + stripe_align > args.minlen + 1)
3776 args.minalignslop =
3777 nextminlen + stripe_align -
3778 args.minlen - 1;
3779 else
3780 args.minalignslop = 0;
3781 }
3782 } else {
3783 args.alignment = 1;
3784 args.minalignslop = 0;
3785 }
3786 args.minleft = ap->minleft;
3787 args.wasdel = ap->wasdel;
3788 args.isfl = 0;
3789 args.userdata = ap->userdata;
3790 if (ap->userdata & XFS_ALLOC_USERDATA_ZERO)
3791 args.ip = ap->ip;
3792
3793 error = xfs_alloc_vextent(&args);
3794 if (error)
3795 return error;
3796
3797 if (tryagain && args.fsbno == NULLFSBLOCK) {
3798 /*
3799 * Exact allocation failed. Now try with alignment
3800 * turned on.
3801 */
3802 args.type = atype;
3803 args.fsbno = ap->blkno;
3804 args.alignment = stripe_align;
3805 args.minlen = nextminlen;
3806 args.minalignslop = 0;
3807 isaligned = 1;
3808 if ((error = xfs_alloc_vextent(&args)))
3809 return error;
3810 }
3811 if (isaligned && args.fsbno == NULLFSBLOCK) {
3812 /*
3813 * allocation failed, so turn off alignment and
3814 * try again.
3815 */
3816 args.type = atype;
3817 args.fsbno = ap->blkno;
3818 args.alignment = 0;
3819 if ((error = xfs_alloc_vextent(&args)))
3820 return error;
3821 }
3822 if (args.fsbno == NULLFSBLOCK && nullfb &&
3823 args.minlen > ap->minlen) {
3824 args.minlen = ap->minlen;
3825 args.type = XFS_ALLOCTYPE_START_BNO;
3826 args.fsbno = ap->blkno;
3827 if ((error = xfs_alloc_vextent(&args)))
3828 return error;
3829 }
3830 if (args.fsbno == NULLFSBLOCK && nullfb) {
3831 args.fsbno = 0;
3832 args.type = XFS_ALLOCTYPE_FIRST_AG;
3833 args.total = ap->minlen;
3834 args.minleft = 0;
3835 if ((error = xfs_alloc_vextent(&args)))
3836 return error;
3837 ap->flist->xbf_low = 1;
3838 }
3839 if (args.fsbno != NULLFSBLOCK) {
3840 /*
3841 * check the allocation happened at the same or higher AG than
3842 * the first block that was allocated.
3843 */
3844 ASSERT(*ap->firstblock == NULLFSBLOCK ||
3845 XFS_FSB_TO_AGNO(mp, *ap->firstblock) ==
3846 XFS_FSB_TO_AGNO(mp, args.fsbno) ||
3847 (ap->flist->xbf_low &&
3848 XFS_FSB_TO_AGNO(mp, *ap->firstblock) <
3849 XFS_FSB_TO_AGNO(mp, args.fsbno)));
3850
3851 ap->blkno = args.fsbno;
3852 if (*ap->firstblock == NULLFSBLOCK)
3853 *ap->firstblock = args.fsbno;
3854 ASSERT(nullfb || fb_agno == args.agno ||
3855 (ap->flist->xbf_low && fb_agno < args.agno));
3856 ap->length = args.len;
3857 ap->ip->i_d.di_nblocks += args.len;
3858 xfs_trans_log_inode(ap->tp, ap->ip, XFS_ILOG_CORE);
3859 if (ap->wasdel)
3860 ap->ip->i_delayed_blks -= args.len;
3861 /*
3862 * Adjust the disk quota also. This was reserved
3863 * earlier.
3864 */
3865 xfs_trans_mod_dquot_byino(ap->tp, ap->ip,
3866 ap->wasdel ? XFS_TRANS_DQ_DELBCOUNT :
3867 XFS_TRANS_DQ_BCOUNT,
3868 (long) args.len);
3869 } else {
3870 ap->blkno = NULLFSBLOCK;
3871 ap->length = 0;
3872 }
3873 return 0;
3874 }
3875
3876 /*
3877 * xfs_bmap_alloc is called by xfs_bmapi to allocate an extent for a file.
3878 * It figures out where to ask the underlying allocator to put the new extent.
3879 */
3880 STATIC int
3881 xfs_bmap_alloc(
3882 struct xfs_bmalloca *ap) /* bmap alloc argument struct */
3883 {
3884 if (XFS_IS_REALTIME_INODE(ap->ip) && ap->userdata)
3885 return xfs_bmap_rtalloc(ap);
3886 return xfs_bmap_btalloc(ap);
3887 }
3888
3889 /*
3890 * Trim the returned map to the required bounds
3891 */
3892 STATIC void
3893 xfs_bmapi_trim_map(
3894 struct xfs_bmbt_irec *mval,
3895 struct xfs_bmbt_irec *got,
3896 xfs_fileoff_t *bno,
3897 xfs_filblks_t len,
3898 xfs_fileoff_t obno,
3899 xfs_fileoff_t end,
3900 int n,
3901 int flags)
3902 {
3903 if ((flags & XFS_BMAPI_ENTIRE) ||
3904 got->br_startoff + got->br_blockcount <= obno) {
3905 *mval = *got;
3906 if (isnullstartblock(got->br_startblock))
3907 mval->br_startblock = DELAYSTARTBLOCK;
3908 return;
3909 }
3910
3911 if (obno > *bno)
3912 *bno = obno;
3913 ASSERT((*bno >= obno) || (n == 0));
3914 ASSERT(*bno < end);
3915 mval->br_startoff = *bno;
3916 if (isnullstartblock(got->br_startblock))
3917 mval->br_startblock = DELAYSTARTBLOCK;
3918 else
3919 mval->br_startblock = got->br_startblock +
3920 (*bno - got->br_startoff);
3921 /*
3922 * Return the minimum of what we got and what we asked for for
3923 * the length. We can use the len variable here because it is
3924 * modified below and we could have been there before coming
3925 * here if the first part of the allocation didn't overlap what
3926 * was asked for.
3927 */
3928 mval->br_blockcount = XFS_FILBLKS_MIN(end - *bno,
3929 got->br_blockcount - (*bno - got->br_startoff));
3930 mval->br_state = got->br_state;
3931 ASSERT(mval->br_blockcount <= len);
3932 return;
3933 }
3934
3935 /*
3936 * Update and validate the extent map to return
3937 */
3938 STATIC void
3939 xfs_bmapi_update_map(
3940 struct xfs_bmbt_irec **map,
3941 xfs_fileoff_t *bno,
3942 xfs_filblks_t *len,
3943 xfs_fileoff_t obno,
3944 xfs_fileoff_t end,
3945 int *n,
3946 int flags)
3947 {
3948 xfs_bmbt_irec_t *mval = *map;
3949
3950 ASSERT((flags & XFS_BMAPI_ENTIRE) ||
3951 ((mval->br_startoff + mval->br_blockcount) <= end));
3952 ASSERT((flags & XFS_BMAPI_ENTIRE) || (mval->br_blockcount <= *len) ||
3953 (mval->br_startoff < obno));
3954
3955 *bno = mval->br_startoff + mval->br_blockcount;
3956 *len = end - *bno;
3957 if (*n > 0 && mval->br_startoff == mval[-1].br_startoff) {
3958 /* update previous map with new information */
3959 ASSERT(mval->br_startblock == mval[-1].br_startblock);
3960 ASSERT(mval->br_blockcount > mval[-1].br_blockcount);
3961 ASSERT(mval->br_state == mval[-1].br_state);
3962 mval[-1].br_blockcount = mval->br_blockcount;
3963 mval[-1].br_state = mval->br_state;
3964 } else if (*n > 0 && mval->br_startblock != DELAYSTARTBLOCK &&
3965 mval[-1].br_startblock != DELAYSTARTBLOCK &&
3966 mval[-1].br_startblock != HOLESTARTBLOCK &&
3967 mval->br_startblock == mval[-1].br_startblock +
3968 mval[-1].br_blockcount &&
3969 ((flags & XFS_BMAPI_IGSTATE) ||
3970 mval[-1].br_state == mval->br_state)) {
3971 ASSERT(mval->br_startoff ==
3972 mval[-1].br_startoff + mval[-1].br_blockcount);
3973 mval[-1].br_blockcount += mval->br_blockcount;
3974 } else if (*n > 0 &&
3975 mval->br_startblock == DELAYSTARTBLOCK &&
3976 mval[-1].br_startblock == DELAYSTARTBLOCK &&
3977 mval->br_startoff ==
3978 mval[-1].br_startoff + mval[-1].br_blockcount) {
3979 mval[-1].br_blockcount += mval->br_blockcount;
3980 mval[-1].br_state = mval->br_state;
3981 } else if (!((*n == 0) &&
3982 ((mval->br_startoff + mval->br_blockcount) <=
3983 obno))) {
3984 mval++;
3985 (*n)++;
3986 }
3987 *map = mval;
3988 }
3989
3990 /*
3991 * Map file blocks to filesystem blocks without allocation.
3992 */
3993 int
3994 xfs_bmapi_read(
3995 struct xfs_inode *ip,
3996 xfs_fileoff_t bno,
3997 xfs_filblks_t len,
3998 struct xfs_bmbt_irec *mval,
3999 int *nmap,
4000 int flags)
4001 {
4002 struct xfs_mount *mp = ip->i_mount;
4003 struct xfs_ifork *ifp;
4004 struct xfs_bmbt_irec got;
4005 struct xfs_bmbt_irec prev;
4006 xfs_fileoff_t obno;
4007 xfs_fileoff_t end;
4008 xfs_extnum_t lastx;
4009 int error;
4010 int eof;
4011 int n = 0;
4012 int whichfork = (flags & XFS_BMAPI_ATTRFORK) ?
4013 XFS_ATTR_FORK : XFS_DATA_FORK;
4014
4015 ASSERT(*nmap >= 1);
4016 ASSERT(!(flags & ~(XFS_BMAPI_ATTRFORK|XFS_BMAPI_ENTIRE|
4017 XFS_BMAPI_IGSTATE)));
4018 ASSERT(xfs_isilocked(ip, XFS_ILOCK_SHARED|XFS_ILOCK_EXCL));
4019
4020 if (unlikely(XFS_TEST_ERROR(
4021 (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS &&
4022 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE),
4023 mp, XFS_ERRTAG_BMAPIFORMAT, XFS_RANDOM_BMAPIFORMAT))) {
4024 XFS_ERROR_REPORT("xfs_bmapi_read", XFS_ERRLEVEL_LOW, mp);
4025 return -EFSCORRUPTED;
4026 }
4027
4028 if (XFS_FORCED_SHUTDOWN(mp))
4029 return -EIO;
4030
4031 XFS_STATS_INC(mp, xs_blk_mapr);
4032
4033 ifp = XFS_IFORK_PTR(ip, whichfork);
4034
4035 if (!(ifp->if_flags & XFS_IFEXTENTS)) {
4036 error = xfs_iread_extents(NULL, ip, whichfork);
4037 if (error)
4038 return error;
4039 }
4040
4041 xfs_bmap_search_extents(ip, bno, whichfork, &eof, &lastx, &got, &prev);
4042 end = bno + len;
4043 obno = bno;
4044
4045 while (bno < end && n < *nmap) {
4046 /* Reading past eof, act as though there's a hole up to end. */
4047 if (eof)
4048 got.br_startoff = end;
4049 if (got.br_startoff > bno) {
4050 /* Reading in a hole. */
4051 mval->br_startoff = bno;
4052 mval->br_startblock = HOLESTARTBLOCK;
4053 mval->br_blockcount =
4054 XFS_FILBLKS_MIN(len, got.br_startoff - bno);
4055 mval->br_state = XFS_EXT_NORM;
4056 bno += mval->br_blockcount;
4057 len -= mval->br_blockcount;
4058 mval++;
4059 n++;
4060 continue;
4061 }
4062
4063 /* set up the extent map to return. */
4064 xfs_bmapi_trim_map(mval, &got, &bno, len, obno, end, n, flags);
4065 xfs_bmapi_update_map(&mval, &bno, &len, obno, end, &n, flags);
4066
4067 /* If we're done, stop now. */
4068 if (bno >= end || n >= *nmap)
4069 break;
4070
4071 /* Else go on to the next record. */
4072 if (++lastx < ifp->if_bytes / sizeof(xfs_bmbt_rec_t))
4073 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, lastx), &got);
4074 else
4075 eof = 1;
4076 }
4077 *nmap = n;
4078 return 0;
4079 }
4080
4081 STATIC int
4082 xfs_bmapi_reserve_delalloc(
4083 struct xfs_inode *ip,
4084 xfs_fileoff_t aoff,
4085 xfs_filblks_t len,
4086 struct xfs_bmbt_irec *got,
4087 struct xfs_bmbt_irec *prev,
4088 xfs_extnum_t *lastx,
4089 int eof)
4090 {
4091 struct xfs_mount *mp = ip->i_mount;
4092 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
4093 xfs_extlen_t alen;
4094 xfs_extlen_t indlen;
4095 char rt = XFS_IS_REALTIME_INODE(ip);
4096 xfs_extlen_t extsz;
4097 int error;
4098
4099 alen = XFS_FILBLKS_MIN(len, MAXEXTLEN);
4100 if (!eof)
4101 alen = XFS_FILBLKS_MIN(alen, got->br_startoff - aoff);
4102
4103 /* Figure out the extent size, adjust alen */
4104 extsz = xfs_get_extsz_hint(ip);
4105 if (extsz) {
4106 error = xfs_bmap_extsize_align(mp, got, prev, extsz, rt, eof,
4107 1, 0, &aoff, &alen);
4108 ASSERT(!error);
4109 }
4110
4111 if (rt)
4112 extsz = alen / mp->m_sb.sb_rextsize;
4113
4114 /*
4115 * Make a transaction-less quota reservation for delayed allocation
4116 * blocks. This number gets adjusted later. We return if we haven't
4117 * allocated blocks already inside this loop.
4118 */
4119 error = xfs_trans_reserve_quota_nblks(NULL, ip, (long)alen, 0,
4120 rt ? XFS_QMOPT_RES_RTBLKS : XFS_QMOPT_RES_REGBLKS);
4121 if (error)
4122 return error;
4123
4124 /*
4125 * Split changing sb for alen and indlen since they could be coming
4126 * from different places.
4127 */
4128 indlen = (xfs_extlen_t)xfs_bmap_worst_indlen(ip, alen);
4129 ASSERT(indlen > 0);
4130
4131 if (rt) {
4132 error = xfs_mod_frextents(mp, -((int64_t)extsz));
4133 } else {
4134 error = xfs_mod_fdblocks(mp, -((int64_t)alen), false);
4135 }
4136
4137 if (error)
4138 goto out_unreserve_quota;
4139
4140 error = xfs_mod_fdblocks(mp, -((int64_t)indlen), false);
4141 if (error)
4142 goto out_unreserve_blocks;
4143
4144
4145 ip->i_delayed_blks += alen;
4146
4147 got->br_startoff = aoff;
4148 got->br_startblock = nullstartblock(indlen);
4149 got->br_blockcount = alen;
4150 got->br_state = XFS_EXT_NORM;
4151 xfs_bmap_add_extent_hole_delay(ip, lastx, got);
4152
4153 /*
4154 * Update our extent pointer, given that xfs_bmap_add_extent_hole_delay
4155 * might have merged it into one of the neighbouring ones.
4156 */
4157 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, *lastx), got);
4158
4159 ASSERT(got->br_startoff <= aoff);
4160 ASSERT(got->br_startoff + got->br_blockcount >= aoff + alen);
4161 ASSERT(isnullstartblock(got->br_startblock));
4162 ASSERT(got->br_state == XFS_EXT_NORM);
4163 return 0;
4164
4165 out_unreserve_blocks:
4166 if (rt)
4167 xfs_mod_frextents(mp, extsz);
4168 else
4169 xfs_mod_fdblocks(mp, alen, false);
4170 out_unreserve_quota:
4171 if (XFS_IS_QUOTA_ON(mp))
4172 xfs_trans_unreserve_quota_nblks(NULL, ip, (long)alen, 0, rt ?
4173 XFS_QMOPT_RES_RTBLKS : XFS_QMOPT_RES_REGBLKS);
4174 return error;
4175 }
4176
4177 /*
4178 * Map file blocks to filesystem blocks, adding delayed allocations as needed.
4179 */
4180 int
4181 xfs_bmapi_delay(
4182 struct xfs_inode *ip, /* incore inode */
4183 xfs_fileoff_t bno, /* starting file offs. mapped */
4184 xfs_filblks_t len, /* length to map in file */
4185 struct xfs_bmbt_irec *mval, /* output: map values */
4186 int *nmap, /* i/o: mval size/count */
4187 int flags) /* XFS_BMAPI_... */
4188 {
4189 struct xfs_mount *mp = ip->i_mount;
4190 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
4191 struct xfs_bmbt_irec got; /* current file extent record */
4192 struct xfs_bmbt_irec prev; /* previous file extent record */
4193 xfs_fileoff_t obno; /* old block number (offset) */
4194 xfs_fileoff_t end; /* end of mapped file region */
4195 xfs_extnum_t lastx; /* last useful extent number */
4196 int eof; /* we've hit the end of extents */
4197 int n = 0; /* current extent index */
4198 int error = 0;
4199
4200 ASSERT(*nmap >= 1);
4201 ASSERT(*nmap <= XFS_BMAP_MAX_NMAP);
4202 ASSERT(!(flags & ~XFS_BMAPI_ENTIRE));
4203 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
4204
4205 if (unlikely(XFS_TEST_ERROR(
4206 (XFS_IFORK_FORMAT(ip, XFS_DATA_FORK) != XFS_DINODE_FMT_EXTENTS &&
4207 XFS_IFORK_FORMAT(ip, XFS_DATA_FORK) != XFS_DINODE_FMT_BTREE),
4208 mp, XFS_ERRTAG_BMAPIFORMAT, XFS_RANDOM_BMAPIFORMAT))) {
4209 XFS_ERROR_REPORT("xfs_bmapi_delay", XFS_ERRLEVEL_LOW, mp);
4210 return -EFSCORRUPTED;
4211 }
4212
4213 if (XFS_FORCED_SHUTDOWN(mp))
4214 return -EIO;
4215
4216 XFS_STATS_INC(mp, xs_blk_mapw);
4217
4218 if (!(ifp->if_flags & XFS_IFEXTENTS)) {
4219 error = xfs_iread_extents(NULL, ip, XFS_DATA_FORK);
4220 if (error)
4221 return error;
4222 }
4223
4224 xfs_bmap_search_extents(ip, bno, XFS_DATA_FORK, &eof, &lastx, &got, &prev);
4225 end = bno + len;
4226 obno = bno;
4227
4228 while (bno < end && n < *nmap) {
4229 if (eof || got.br_startoff > bno) {
4230 error = xfs_bmapi_reserve_delalloc(ip, bno, len, &got,
4231 &prev, &lastx, eof);
4232 if (error) {
4233 if (n == 0) {
4234 *nmap = 0;
4235 return error;
4236 }
4237 break;
4238 }
4239 }
4240
4241 /* set up the extent map to return. */
4242 xfs_bmapi_trim_map(mval, &got, &bno, len, obno, end, n, flags);
4243 xfs_bmapi_update_map(&mval, &bno, &len, obno, end, &n, flags);
4244
4245 /* If we're done, stop now. */
4246 if (bno >= end || n >= *nmap)
4247 break;
4248
4249 /* Else go on to the next record. */
4250 prev = got;
4251 if (++lastx < ifp->if_bytes / sizeof(xfs_bmbt_rec_t))
4252 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, lastx), &got);
4253 else
4254 eof = 1;
4255 }
4256
4257 *nmap = n;
4258 return 0;
4259 }
4260
4261
4262 static int
4263 xfs_bmapi_allocate(
4264 struct xfs_bmalloca *bma)
4265 {
4266 struct xfs_mount *mp = bma->ip->i_mount;
4267 int whichfork = (bma->flags & XFS_BMAPI_ATTRFORK) ?
4268 XFS_ATTR_FORK : XFS_DATA_FORK;
4269 struct xfs_ifork *ifp = XFS_IFORK_PTR(bma->ip, whichfork);
4270 int tmp_logflags = 0;
4271 int error;
4272
4273 ASSERT(bma->length > 0);
4274
4275 /*
4276 * For the wasdelay case, we could also just allocate the stuff asked
4277 * for in this bmap call but that wouldn't be as good.
4278 */
4279 if (bma->wasdel) {
4280 bma->length = (xfs_extlen_t)bma->got.br_blockcount;
4281 bma->offset = bma->got.br_startoff;
4282 if (bma->idx != NULLEXTNUM && bma->idx) {
4283 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, bma->idx - 1),
4284 &bma->prev);
4285 }
4286 } else {
4287 bma->length = XFS_FILBLKS_MIN(bma->length, MAXEXTLEN);
4288 if (!bma->eof)
4289 bma->length = XFS_FILBLKS_MIN(bma->length,
4290 bma->got.br_startoff - bma->offset);
4291 }
4292
4293 /*
4294 * Indicate if this is the first user data in the file, or just any
4295 * user data. And if it is userdata, indicate whether it needs to
4296 * be initialised to zero during allocation.
4297 */
4298 if (!(bma->flags & XFS_BMAPI_METADATA)) {
4299 bma->userdata = (bma->offset == 0) ?
4300 XFS_ALLOC_INITIAL_USER_DATA : XFS_ALLOC_USERDATA;
4301 if (bma->flags & XFS_BMAPI_ZERO)
4302 bma->userdata |= XFS_ALLOC_USERDATA_ZERO;
4303 }
4304
4305 bma->minlen = (bma->flags & XFS_BMAPI_CONTIG) ? bma->length : 1;
4306
4307 /*
4308 * Only want to do the alignment at the eof if it is userdata and
4309 * allocation length is larger than a stripe unit.
4310 */
4311 if (mp->m_dalign && bma->length >= mp->m_dalign &&
4312 !(bma->flags & XFS_BMAPI_METADATA) && whichfork == XFS_DATA_FORK) {
4313 error = xfs_bmap_isaeof(bma, whichfork);
4314 if (error)
4315 return error;
4316 }
4317
4318 error = xfs_bmap_alloc(bma);
4319 if (error)
4320 return error;
4321
4322 if (bma->flist->xbf_low)
4323 bma->minleft = 0;
4324 if (bma->cur)
4325 bma->cur->bc_private.b.firstblock = *bma->firstblock;
4326 if (bma->blkno == NULLFSBLOCK)
4327 return 0;
4328 if ((ifp->if_flags & XFS_IFBROOT) && !bma->cur) {
4329 bma->cur = xfs_bmbt_init_cursor(mp, bma->tp, bma->ip, whichfork);
4330 bma->cur->bc_private.b.firstblock = *bma->firstblock;
4331 bma->cur->bc_private.b.flist = bma->flist;
4332 }
4333 /*
4334 * Bump the number of extents we've allocated
4335 * in this call.
4336 */
4337 bma->nallocs++;
4338
4339 if (bma->cur)
4340 bma->cur->bc_private.b.flags =
4341 bma->wasdel ? XFS_BTCUR_BPRV_WASDEL : 0;
4342
4343 bma->got.br_startoff = bma->offset;
4344 bma->got.br_startblock = bma->blkno;
4345 bma->got.br_blockcount = bma->length;
4346 bma->got.br_state = XFS_EXT_NORM;
4347
4348 /*
4349 * A wasdelay extent has been initialized, so shouldn't be flagged
4350 * as unwritten.
4351 */
4352 if (!bma->wasdel && (bma->flags & XFS_BMAPI_PREALLOC) &&
4353 xfs_sb_version_hasextflgbit(&mp->m_sb))
4354 bma->got.br_state = XFS_EXT_UNWRITTEN;
4355
4356 if (bma->wasdel)
4357 error = xfs_bmap_add_extent_delay_real(bma);
4358 else
4359 error = xfs_bmap_add_extent_hole_real(bma, whichfork);
4360
4361 bma->logflags |= tmp_logflags;
4362 if (error)
4363 return error;
4364
4365 /*
4366 * Update our extent pointer, given that xfs_bmap_add_extent_delay_real
4367 * or xfs_bmap_add_extent_hole_real might have merged it into one of
4368 * the neighbouring ones.
4369 */
4370 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, bma->idx), &bma->got);
4371
4372 ASSERT(bma->got.br_startoff <= bma->offset);
4373 ASSERT(bma->got.br_startoff + bma->got.br_blockcount >=
4374 bma->offset + bma->length);
4375 ASSERT(bma->got.br_state == XFS_EXT_NORM ||
4376 bma->got.br_state == XFS_EXT_UNWRITTEN);
4377 return 0;
4378 }
4379
4380 STATIC int
4381 xfs_bmapi_convert_unwritten(
4382 struct xfs_bmalloca *bma,
4383 struct xfs_bmbt_irec *mval,
4384 xfs_filblks_t len,
4385 int flags)
4386 {
4387 int whichfork = (flags & XFS_BMAPI_ATTRFORK) ?
4388 XFS_ATTR_FORK : XFS_DATA_FORK;
4389 struct xfs_ifork *ifp = XFS_IFORK_PTR(bma->ip, whichfork);
4390 int tmp_logflags = 0;
4391 int error;
4392
4393 /* check if we need to do unwritten->real conversion */
4394 if (mval->br_state == XFS_EXT_UNWRITTEN &&
4395 (flags & XFS_BMAPI_PREALLOC))
4396 return 0;
4397
4398 /* check if we need to do real->unwritten conversion */
4399 if (mval->br_state == XFS_EXT_NORM &&
4400 (flags & (XFS_BMAPI_PREALLOC | XFS_BMAPI_CONVERT)) !=
4401 (XFS_BMAPI_PREALLOC | XFS_BMAPI_CONVERT))
4402 return 0;
4403
4404 /*
4405 * Modify (by adding) the state flag, if writing.
4406 */
4407 ASSERT(mval->br_blockcount <= len);
4408 if ((ifp->if_flags & XFS_IFBROOT) && !bma->cur) {
4409 bma->cur = xfs_bmbt_init_cursor(bma->ip->i_mount, bma->tp,
4410 bma->ip, whichfork);
4411 bma->cur->bc_private.b.firstblock = *bma->firstblock;
4412 bma->cur->bc_private.b.flist = bma->flist;
4413 }
4414 mval->br_state = (mval->br_state == XFS_EXT_UNWRITTEN)
4415 ? XFS_EXT_NORM : XFS_EXT_UNWRITTEN;
4416
4417 /*
4418 * Before insertion into the bmbt, zero the range being converted
4419 * if required.
4420 */
4421 if (flags & XFS_BMAPI_ZERO) {
4422 error = xfs_zero_extent(bma->ip, mval->br_startblock,
4423 mval->br_blockcount);
4424 if (error)
4425 return error;
4426 }
4427
4428 error = xfs_bmap_add_extent_unwritten_real(bma->tp, bma->ip, &bma->idx,
4429 &bma->cur, mval, bma->firstblock, bma->flist,
4430 &tmp_logflags);
4431 /*
4432 * Log the inode core unconditionally in the unwritten extent conversion
4433 * path because the conversion might not have done so (e.g., if the
4434 * extent count hasn't changed). We need to make sure the inode is dirty
4435 * in the transaction for the sake of fsync(), even if nothing has
4436 * changed, because fsync() will not force the log for this transaction
4437 * unless it sees the inode pinned.
4438 */
4439 bma->logflags |= tmp_logflags | XFS_ILOG_CORE;
4440 if (error)
4441 return error;
4442
4443 /*
4444 * Update our extent pointer, given that
4445 * xfs_bmap_add_extent_unwritten_real might have merged it into one
4446 * of the neighbouring ones.
4447 */
4448 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, bma->idx), &bma->got);
4449
4450 /*
4451 * We may have combined previously unwritten space with written space,
4452 * so generate another request.
4453 */
4454 if (mval->br_blockcount < len)
4455 return -EAGAIN;
4456 return 0;
4457 }
4458
4459 /*
4460 * Map file blocks to filesystem blocks, and allocate blocks or convert the
4461 * extent state if necessary. Details behaviour is controlled by the flags
4462 * parameter. Only allocates blocks from a single allocation group, to avoid
4463 * locking problems.
4464 *
4465 * The returned value in "firstblock" from the first call in a transaction
4466 * must be remembered and presented to subsequent calls in "firstblock".
4467 * An upper bound for the number of blocks to be allocated is supplied to
4468 * the first call in "total"; if no allocation group has that many free
4469 * blocks then the call will fail (return NULLFSBLOCK in "firstblock").
4470 */
4471 int
4472 xfs_bmapi_write(
4473 struct xfs_trans *tp, /* transaction pointer */
4474 struct xfs_inode *ip, /* incore inode */
4475 xfs_fileoff_t bno, /* starting file offs. mapped */
4476 xfs_filblks_t len, /* length to map in file */
4477 int flags, /* XFS_BMAPI_... */
4478 xfs_fsblock_t *firstblock, /* first allocated block
4479 controls a.g. for allocs */
4480 xfs_extlen_t total, /* total blocks needed */
4481 struct xfs_bmbt_irec *mval, /* output: map values */
4482 int *nmap, /* i/o: mval size/count */
4483 struct xfs_bmap_free *flist) /* i/o: list extents to free */
4484 {
4485 struct xfs_mount *mp = ip->i_mount;
4486 struct xfs_ifork *ifp;
4487 struct xfs_bmalloca bma = { NULL }; /* args for xfs_bmap_alloc */
4488 xfs_fileoff_t end; /* end of mapped file region */
4489 int eof; /* after the end of extents */
4490 int error; /* error return */
4491 int n; /* current extent index */
4492 xfs_fileoff_t obno; /* old block number (offset) */
4493 int whichfork; /* data or attr fork */
4494 char inhole; /* current location is hole in file */
4495 char wasdelay; /* old extent was delayed */
4496
4497 #ifdef DEBUG
4498 xfs_fileoff_t orig_bno; /* original block number value */
4499 int orig_flags; /* original flags arg value */
4500 xfs_filblks_t orig_len; /* original value of len arg */
4501 struct xfs_bmbt_irec *orig_mval; /* original value of mval */
4502 int orig_nmap; /* original value of *nmap */
4503
4504 orig_bno = bno;
4505 orig_len = len;
4506 orig_flags = flags;
4507 orig_mval = mval;
4508 orig_nmap = *nmap;
4509 #endif
4510 whichfork = (flags & XFS_BMAPI_ATTRFORK) ?
4511 XFS_ATTR_FORK : XFS_DATA_FORK;
4512
4513 ASSERT(*nmap >= 1);
4514 ASSERT(*nmap <= XFS_BMAP_MAX_NMAP);
4515 ASSERT(!(flags & XFS_BMAPI_IGSTATE));
4516 ASSERT(tp != NULL);
4517 ASSERT(len > 0);
4518 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_LOCAL);
4519 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
4520
4521 /* zeroing is for currently only for data extents, not metadata */
4522 ASSERT((flags & (XFS_BMAPI_METADATA | XFS_BMAPI_ZERO)) !=
4523 (XFS_BMAPI_METADATA | XFS_BMAPI_ZERO));
4524 /*
4525 * we can allocate unwritten extents or pre-zero allocated blocks,
4526 * but it makes no sense to do both at once. This would result in
4527 * zeroing the unwritten extent twice, but it still being an
4528 * unwritten extent....
4529 */
4530 ASSERT((flags & (XFS_BMAPI_PREALLOC | XFS_BMAPI_ZERO)) !=
4531 (XFS_BMAPI_PREALLOC | XFS_BMAPI_ZERO));
4532
4533 if (unlikely(XFS_TEST_ERROR(
4534 (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS &&
4535 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE),
4536 mp, XFS_ERRTAG_BMAPIFORMAT, XFS_RANDOM_BMAPIFORMAT))) {
4537 XFS_ERROR_REPORT("xfs_bmapi_write", XFS_ERRLEVEL_LOW, mp);
4538 return -EFSCORRUPTED;
4539 }
4540
4541 if (XFS_FORCED_SHUTDOWN(mp))
4542 return -EIO;
4543
4544 ifp = XFS_IFORK_PTR(ip, whichfork);
4545
4546 XFS_STATS_INC(mp, xs_blk_mapw);
4547
4548 if (*firstblock == NULLFSBLOCK) {
4549 if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE)
4550 bma.minleft = be16_to_cpu(ifp->if_broot->bb_level) + 1;
4551 else
4552 bma.minleft = 1;
4553 } else {
4554 bma.minleft = 0;
4555 }
4556
4557 if (!(ifp->if_flags & XFS_IFEXTENTS)) {
4558 error = xfs_iread_extents(tp, ip, whichfork);
4559 if (error)
4560 goto error0;
4561 }
4562
4563 xfs_bmap_search_extents(ip, bno, whichfork, &eof, &bma.idx, &bma.got,
4564 &bma.prev);
4565 n = 0;
4566 end = bno + len;
4567 obno = bno;
4568
4569 bma.tp = tp;
4570 bma.ip = ip;
4571 bma.total = total;
4572 bma.userdata = 0;
4573 bma.flist = flist;
4574 bma.firstblock = firstblock;
4575
4576 while (bno < end && n < *nmap) {
4577 inhole = eof || bma.got.br_startoff > bno;
4578 wasdelay = !inhole && isnullstartblock(bma.got.br_startblock);
4579
4580 /*
4581 * First, deal with the hole before the allocated space
4582 * that we found, if any.
4583 */
4584 if (inhole || wasdelay) {
4585 bma.eof = eof;
4586 bma.conv = !!(flags & XFS_BMAPI_CONVERT);
4587 bma.wasdel = wasdelay;
4588 bma.offset = bno;
4589 bma.flags = flags;
4590
4591 /*
4592 * There's a 32/64 bit type mismatch between the
4593 * allocation length request (which can be 64 bits in
4594 * length) and the bma length request, which is
4595 * xfs_extlen_t and therefore 32 bits. Hence we have to
4596 * check for 32-bit overflows and handle them here.
4597 */
4598 if (len > (xfs_filblks_t)MAXEXTLEN)
4599 bma.length = MAXEXTLEN;
4600 else
4601 bma.length = len;
4602
4603 ASSERT(len > 0);
4604 ASSERT(bma.length > 0);
4605 error = xfs_bmapi_allocate(&bma);
4606 if (error)
4607 goto error0;
4608 if (bma.blkno == NULLFSBLOCK)
4609 break;
4610 }
4611
4612 /* Deal with the allocated space we found. */
4613 xfs_bmapi_trim_map(mval, &bma.got, &bno, len, obno,
4614 end, n, flags);
4615
4616 /* Execute unwritten extent conversion if necessary */
4617 error = xfs_bmapi_convert_unwritten(&bma, mval, len, flags);
4618 if (error == -EAGAIN)
4619 continue;
4620 if (error)
4621 goto error0;
4622
4623 /* update the extent map to return */
4624 xfs_bmapi_update_map(&mval, &bno, &len, obno, end, &n, flags);
4625
4626 /*
4627 * If we're done, stop now. Stop when we've allocated
4628 * XFS_BMAP_MAX_NMAP extents no matter what. Otherwise
4629 * the transaction may get too big.
4630 */
4631 if (bno >= end || n >= *nmap || bma.nallocs >= *nmap)
4632 break;
4633
4634 /* Else go on to the next record. */
4635 bma.prev = bma.got;
4636 if (++bma.idx < ifp->if_bytes / sizeof(xfs_bmbt_rec_t)) {
4637 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, bma.idx),
4638 &bma.got);
4639 } else
4640 eof = 1;
4641 }
4642 *nmap = n;
4643
4644 /*
4645 * Transform from btree to extents, give it cur.
4646 */
4647 if (xfs_bmap_wants_extents(ip, whichfork)) {
4648 int tmp_logflags = 0;
4649
4650 ASSERT(bma.cur);
4651 error = xfs_bmap_btree_to_extents(tp, ip, bma.cur,
4652 &tmp_logflags, whichfork);
4653 bma.logflags |= tmp_logflags;
4654 if (error)
4655 goto error0;
4656 }
4657
4658 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE ||
4659 XFS_IFORK_NEXTENTS(ip, whichfork) >
4660 XFS_IFORK_MAXEXT(ip, whichfork));
4661 error = 0;
4662 error0:
4663 /*
4664 * Log everything. Do this after conversion, there's no point in
4665 * logging the extent records if we've converted to btree format.
4666 */
4667 if ((bma.logflags & xfs_ilog_fext(whichfork)) &&
4668 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS)
4669 bma.logflags &= ~xfs_ilog_fext(whichfork);
4670 else if ((bma.logflags & xfs_ilog_fbroot(whichfork)) &&
4671 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE)
4672 bma.logflags &= ~xfs_ilog_fbroot(whichfork);
4673 /*
4674 * Log whatever the flags say, even if error. Otherwise we might miss
4675 * detecting a case where the data is changed, there's an error,
4676 * and it's not logged so we don't shutdown when we should.
4677 */
4678 if (bma.logflags)
4679 xfs_trans_log_inode(tp, ip, bma.logflags);
4680
4681 if (bma.cur) {
4682 if (!error) {
4683 ASSERT(*firstblock == NULLFSBLOCK ||
4684 XFS_FSB_TO_AGNO(mp, *firstblock) ==
4685 XFS_FSB_TO_AGNO(mp,
4686 bma.cur->bc_private.b.firstblock) ||
4687 (flist->xbf_low &&
4688 XFS_FSB_TO_AGNO(mp, *firstblock) <
4689 XFS_FSB_TO_AGNO(mp,
4690 bma.cur->bc_private.b.firstblock)));
4691 *firstblock = bma.cur->bc_private.b.firstblock;
4692 }
4693 xfs_btree_del_cursor(bma.cur,
4694 error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
4695 }
4696 if (!error)
4697 xfs_bmap_validate_ret(orig_bno, orig_len, orig_flags, orig_mval,
4698 orig_nmap, *nmap);
4699 return error;
4700 }
4701
4702 /*
4703 * When a delalloc extent is split (e.g., due to a hole punch), the original
4704 * indlen reservation must be shared across the two new extents that are left
4705 * behind.
4706 *
4707 * Given the original reservation and the worst case indlen for the two new
4708 * extents (as calculated by xfs_bmap_worst_indlen()), split the original
4709 * reservation fairly across the two new extents. If necessary, steal available
4710 * blocks from a deleted extent to make up a reservation deficiency (e.g., if
4711 * ores == 1). The number of stolen blocks is returned. The availability and
4712 * subsequent accounting of stolen blocks is the responsibility of the caller.
4713 */
4714 static xfs_filblks_t
4715 xfs_bmap_split_indlen(
4716 xfs_filblks_t ores, /* original res. */
4717 xfs_filblks_t *indlen1, /* ext1 worst indlen */
4718 xfs_filblks_t *indlen2, /* ext2 worst indlen */
4719 xfs_filblks_t avail) /* stealable blocks */
4720 {
4721 xfs_filblks_t len1 = *indlen1;
4722 xfs_filblks_t len2 = *indlen2;
4723 xfs_filblks_t nres = len1 + len2; /* new total res. */
4724 xfs_filblks_t stolen = 0;
4725
4726 /*
4727 * Steal as many blocks as we can to try and satisfy the worst case
4728 * indlen for both new extents.
4729 */
4730 while (nres > ores && avail) {
4731 nres--;
4732 avail--;
4733 stolen++;
4734 }
4735
4736 /*
4737 * The only blocks available are those reserved for the original
4738 * extent and what we can steal from the extent being removed.
4739 * If this still isn't enough to satisfy the combined
4740 * requirements for the two new extents, skim blocks off of each
4741 * of the new reservations until they match what is available.
4742 */
4743 while (nres > ores) {
4744 if (len1) {
4745 len1--;
4746 nres--;
4747 }
4748 if (nres == ores)
4749 break;
4750 if (len2) {
4751 len2--;
4752 nres--;
4753 }
4754 }
4755
4756 *indlen1 = len1;
4757 *indlen2 = len2;
4758
4759 return stolen;
4760 }
4761
4762 /*
4763 * Called by xfs_bmapi to update file extent records and the btree
4764 * after removing space (or undoing a delayed allocation).
4765 */
4766 STATIC int /* error */
4767 xfs_bmap_del_extent(
4768 xfs_inode_t *ip, /* incore inode pointer */
4769 xfs_trans_t *tp, /* current transaction pointer */
4770 xfs_extnum_t *idx, /* extent number to update/delete */
4771 xfs_bmap_free_t *flist, /* list of extents to be freed */
4772 xfs_btree_cur_t *cur, /* if null, not a btree */
4773 xfs_bmbt_irec_t *del, /* data to remove from extents */
4774 int *logflagsp, /* inode logging flags */
4775 int whichfork) /* data or attr fork */
4776 {
4777 xfs_filblks_t da_new; /* new delay-alloc indirect blocks */
4778 xfs_filblks_t da_old; /* old delay-alloc indirect blocks */
4779 xfs_fsblock_t del_endblock=0; /* first block past del */
4780 xfs_fileoff_t del_endoff; /* first offset past del */
4781 int delay; /* current block is delayed allocated */
4782 int do_fx; /* free extent at end of routine */
4783 xfs_bmbt_rec_host_t *ep; /* current extent entry pointer */
4784 int error; /* error return value */
4785 int flags; /* inode logging flags */
4786 xfs_bmbt_irec_t got; /* current extent entry */
4787 xfs_fileoff_t got_endoff; /* first offset past got */
4788 int i; /* temp state */
4789 xfs_ifork_t *ifp; /* inode fork pointer */
4790 xfs_mount_t *mp; /* mount structure */
4791 xfs_filblks_t nblks; /* quota/sb block count */
4792 xfs_bmbt_irec_t new; /* new record to be inserted */
4793 /* REFERENCED */
4794 uint qfield; /* quota field to update */
4795 xfs_filblks_t temp; /* for indirect length calculations */
4796 xfs_filblks_t temp2; /* for indirect length calculations */
4797 int state = 0;
4798
4799 mp = ip->i_mount;
4800 XFS_STATS_INC(mp, xs_del_exlist);
4801
4802 if (whichfork == XFS_ATTR_FORK)
4803 state |= BMAP_ATTRFORK;
4804
4805 ifp = XFS_IFORK_PTR(ip, whichfork);
4806 ASSERT((*idx >= 0) && (*idx < ifp->if_bytes /
4807 (uint)sizeof(xfs_bmbt_rec_t)));
4808 ASSERT(del->br_blockcount > 0);
4809 ep = xfs_iext_get_ext(ifp, *idx);
4810 xfs_bmbt_get_all(ep, &got);
4811 ASSERT(got.br_startoff <= del->br_startoff);
4812 del_endoff = del->br_startoff + del->br_blockcount;
4813 got_endoff = got.br_startoff + got.br_blockcount;
4814 ASSERT(got_endoff >= del_endoff);
4815 delay = isnullstartblock(got.br_startblock);
4816 ASSERT(isnullstartblock(del->br_startblock) == delay);
4817 flags = 0;
4818 qfield = 0;
4819 error = 0;
4820 /*
4821 * If deleting a real allocation, must free up the disk space.
4822 */
4823 if (!delay) {
4824 flags = XFS_ILOG_CORE;
4825 /*
4826 * Realtime allocation. Free it and record di_nblocks update.
4827 */
4828 if (whichfork == XFS_DATA_FORK && XFS_IS_REALTIME_INODE(ip)) {
4829 xfs_fsblock_t bno;
4830 xfs_filblks_t len;
4831
4832 ASSERT(do_mod(del->br_blockcount,
4833 mp->m_sb.sb_rextsize) == 0);
4834 ASSERT(do_mod(del->br_startblock,
4835 mp->m_sb.sb_rextsize) == 0);
4836 bno = del->br_startblock;
4837 len = del->br_blockcount;
4838 do_div(bno, mp->m_sb.sb_rextsize);
4839 do_div(len, mp->m_sb.sb_rextsize);
4840 error = xfs_rtfree_extent(tp, bno, (xfs_extlen_t)len);
4841 if (error)
4842 goto done;
4843 do_fx = 0;
4844 nblks = len * mp->m_sb.sb_rextsize;
4845 qfield = XFS_TRANS_DQ_RTBCOUNT;
4846 }
4847 /*
4848 * Ordinary allocation.
4849 */
4850 else {
4851 do_fx = 1;
4852 nblks = del->br_blockcount;
4853 qfield = XFS_TRANS_DQ_BCOUNT;
4854 }
4855 /*
4856 * Set up del_endblock and cur for later.
4857 */
4858 del_endblock = del->br_startblock + del->br_blockcount;
4859 if (cur) {
4860 if ((error = xfs_bmbt_lookup_eq(cur, got.br_startoff,
4861 got.br_startblock, got.br_blockcount,
4862 &i)))
4863 goto done;
4864 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
4865 }
4866 da_old = da_new = 0;
4867 } else {
4868 da_old = startblockval(got.br_startblock);
4869 da_new = 0;
4870 nblks = 0;
4871 do_fx = 0;
4872 }
4873 /*
4874 * Set flag value to use in switch statement.
4875 * Left-contig is 2, right-contig is 1.
4876 */
4877 switch (((got.br_startoff == del->br_startoff) << 1) |
4878 (got_endoff == del_endoff)) {
4879 case 3:
4880 /*
4881 * Matches the whole extent. Delete the entry.
4882 */
4883 xfs_iext_remove(ip, *idx, 1,
4884 whichfork == XFS_ATTR_FORK ? BMAP_ATTRFORK : 0);
4885 --*idx;
4886 if (delay)
4887 break;
4888
4889 XFS_IFORK_NEXT_SET(ip, whichfork,
4890 XFS_IFORK_NEXTENTS(ip, whichfork) - 1);
4891 flags |= XFS_ILOG_CORE;
4892 if (!cur) {
4893 flags |= xfs_ilog_fext(whichfork);
4894 break;
4895 }
4896 if ((error = xfs_btree_delete(cur, &i)))
4897 goto done;
4898 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
4899 break;
4900
4901 case 2:
4902 /*
4903 * Deleting the first part of the extent.
4904 */
4905 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
4906 xfs_bmbt_set_startoff(ep, del_endoff);
4907 temp = got.br_blockcount - del->br_blockcount;
4908 xfs_bmbt_set_blockcount(ep, temp);
4909 if (delay) {
4910 temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
4911 da_old);
4912 xfs_bmbt_set_startblock(ep, nullstartblock((int)temp));
4913 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
4914 da_new = temp;
4915 break;
4916 }
4917 xfs_bmbt_set_startblock(ep, del_endblock);
4918 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
4919 if (!cur) {
4920 flags |= xfs_ilog_fext(whichfork);
4921 break;
4922 }
4923 if ((error = xfs_bmbt_update(cur, del_endoff, del_endblock,
4924 got.br_blockcount - del->br_blockcount,
4925 got.br_state)))
4926 goto done;
4927 break;
4928
4929 case 1:
4930 /*
4931 * Deleting the last part of the extent.
4932 */
4933 temp = got.br_blockcount - del->br_blockcount;
4934 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
4935 xfs_bmbt_set_blockcount(ep, temp);
4936 if (delay) {
4937 temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
4938 da_old);
4939 xfs_bmbt_set_startblock(ep, nullstartblock((int)temp));
4940 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
4941 da_new = temp;
4942 break;
4943 }
4944 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
4945 if (!cur) {
4946 flags |= xfs_ilog_fext(whichfork);
4947 break;
4948 }
4949 if ((error = xfs_bmbt_update(cur, got.br_startoff,
4950 got.br_startblock,
4951 got.br_blockcount - del->br_blockcount,
4952 got.br_state)))
4953 goto done;
4954 break;
4955
4956 case 0:
4957 /*
4958 * Deleting the middle of the extent.
4959 */
4960 temp = del->br_startoff - got.br_startoff;
4961 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
4962 xfs_bmbt_set_blockcount(ep, temp);
4963 new.br_startoff = del_endoff;
4964 temp2 = got_endoff - del_endoff;
4965 new.br_blockcount = temp2;
4966 new.br_state = got.br_state;
4967 if (!delay) {
4968 new.br_startblock = del_endblock;
4969 flags |= XFS_ILOG_CORE;
4970 if (cur) {
4971 if ((error = xfs_bmbt_update(cur,
4972 got.br_startoff,
4973 got.br_startblock, temp,
4974 got.br_state)))
4975 goto done;
4976 if ((error = xfs_btree_increment(cur, 0, &i)))
4977 goto done;
4978 cur->bc_rec.b = new;
4979 error = xfs_btree_insert(cur, &i);
4980 if (error && error != -ENOSPC)
4981 goto done;
4982 /*
4983 * If get no-space back from btree insert,
4984 * it tried a split, and we have a zero
4985 * block reservation.
4986 * Fix up our state and return the error.
4987 */
4988 if (error == -ENOSPC) {
4989 /*
4990 * Reset the cursor, don't trust
4991 * it after any insert operation.
4992 */
4993 if ((error = xfs_bmbt_lookup_eq(cur,
4994 got.br_startoff,
4995 got.br_startblock,
4996 temp, &i)))
4997 goto done;
4998 XFS_WANT_CORRUPTED_GOTO(mp,
4999 i == 1, done);
5000 /*
5001 * Update the btree record back
5002 * to the original value.
5003 */
5004 if ((error = xfs_bmbt_update(cur,
5005 got.br_startoff,
5006 got.br_startblock,
5007 got.br_blockcount,
5008 got.br_state)))
5009 goto done;
5010 /*
5011 * Reset the extent record back
5012 * to the original value.
5013 */
5014 xfs_bmbt_set_blockcount(ep,
5015 got.br_blockcount);
5016 flags = 0;
5017 error = -ENOSPC;
5018 goto done;
5019 }
5020 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
5021 } else
5022 flags |= xfs_ilog_fext(whichfork);
5023 XFS_IFORK_NEXT_SET(ip, whichfork,
5024 XFS_IFORK_NEXTENTS(ip, whichfork) + 1);
5025 } else {
5026 xfs_filblks_t stolen;
5027 ASSERT(whichfork == XFS_DATA_FORK);
5028
5029 /*
5030 * Distribute the original indlen reservation across the
5031 * two new extents. Steal blocks from the deleted extent
5032 * if necessary. Stealing blocks simply fudges the
5033 * fdblocks accounting in xfs_bunmapi().
5034 */
5035 temp = xfs_bmap_worst_indlen(ip, got.br_blockcount);
5036 temp2 = xfs_bmap_worst_indlen(ip, new.br_blockcount);
5037 stolen = xfs_bmap_split_indlen(da_old, &temp, &temp2,
5038 del->br_blockcount);
5039 da_new = temp + temp2 - stolen;
5040 del->br_blockcount -= stolen;
5041
5042 /*
5043 * Set the reservation for each extent. Warn if either
5044 * is zero as this can lead to delalloc problems.
5045 */
5046 WARN_ON_ONCE(!temp || !temp2);
5047 xfs_bmbt_set_startblock(ep, nullstartblock((int)temp));
5048 new.br_startblock = nullstartblock((int)temp2);
5049 }
5050 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
5051 xfs_iext_insert(ip, *idx + 1, 1, &new, state);
5052 ++*idx;
5053 break;
5054 }
5055 /*
5056 * If we need to, add to list of extents to delete.
5057 */
5058 if (do_fx)
5059 xfs_bmap_add_free(mp, flist, del->br_startblock,
5060 del->br_blockcount);
5061 /*
5062 * Adjust inode # blocks in the file.
5063 */
5064 if (nblks)
5065 ip->i_d.di_nblocks -= nblks;
5066 /*
5067 * Adjust quota data.
5068 */
5069 if (qfield)
5070 xfs_trans_mod_dquot_byino(tp, ip, qfield, (long)-nblks);
5071
5072 /*
5073 * Account for change in delayed indirect blocks.
5074 * Nothing to do for disk quota accounting here.
5075 */
5076 ASSERT(da_old >= da_new);
5077 if (da_old > da_new)
5078 xfs_mod_fdblocks(mp, (int64_t)(da_old - da_new), false);
5079 done:
5080 *logflagsp = flags;
5081 return error;
5082 }
5083
5084 /*
5085 * Unmap (remove) blocks from a file.
5086 * If nexts is nonzero then the number of extents to remove is limited to
5087 * that value. If not all extents in the block range can be removed then
5088 * *done is set.
5089 */
5090 int /* error */
5091 xfs_bunmapi(
5092 xfs_trans_t *tp, /* transaction pointer */
5093 struct xfs_inode *ip, /* incore inode */
5094 xfs_fileoff_t bno, /* starting offset to unmap */
5095 xfs_filblks_t len, /* length to unmap in file */
5096 int flags, /* misc flags */
5097 xfs_extnum_t nexts, /* number of extents max */
5098 xfs_fsblock_t *firstblock, /* first allocated block
5099 controls a.g. for allocs */
5100 xfs_bmap_free_t *flist, /* i/o: list extents to free */
5101 int *done) /* set if not done yet */
5102 {
5103 xfs_btree_cur_t *cur; /* bmap btree cursor */
5104 xfs_bmbt_irec_t del; /* extent being deleted */
5105 int eof; /* is deleting at eof */
5106 xfs_bmbt_rec_host_t *ep; /* extent record pointer */
5107 int error; /* error return value */
5108 xfs_extnum_t extno; /* extent number in list */
5109 xfs_bmbt_irec_t got; /* current extent record */
5110 xfs_ifork_t *ifp; /* inode fork pointer */
5111 int isrt; /* freeing in rt area */
5112 xfs_extnum_t lastx; /* last extent index used */
5113 int logflags; /* transaction logging flags */
5114 xfs_extlen_t mod; /* rt extent offset */
5115 xfs_mount_t *mp; /* mount structure */
5116 xfs_extnum_t nextents; /* number of file extents */
5117 xfs_bmbt_irec_t prev; /* previous extent record */
5118 xfs_fileoff_t start; /* first file offset deleted */
5119 int tmp_logflags; /* partial logging flags */
5120 int wasdel; /* was a delayed alloc extent */
5121 int whichfork; /* data or attribute fork */
5122 xfs_fsblock_t sum;
5123
5124 trace_xfs_bunmap(ip, bno, len, flags, _RET_IP_);
5125
5126 whichfork = (flags & XFS_BMAPI_ATTRFORK) ?
5127 XFS_ATTR_FORK : XFS_DATA_FORK;
5128 ifp = XFS_IFORK_PTR(ip, whichfork);
5129 if (unlikely(
5130 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS &&
5131 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE)) {
5132 XFS_ERROR_REPORT("xfs_bunmapi", XFS_ERRLEVEL_LOW,
5133 ip->i_mount);
5134 return -EFSCORRUPTED;
5135 }
5136 mp = ip->i_mount;
5137 if (XFS_FORCED_SHUTDOWN(mp))
5138 return -EIO;
5139
5140 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
5141 ASSERT(len > 0);
5142 ASSERT(nexts >= 0);
5143
5144 if (!(ifp->if_flags & XFS_IFEXTENTS) &&
5145 (error = xfs_iread_extents(tp, ip, whichfork)))
5146 return error;
5147 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
5148 if (nextents == 0) {
5149 *done = 1;
5150 return 0;
5151 }
5152 XFS_STATS_INC(mp, xs_blk_unmap);
5153 isrt = (whichfork == XFS_DATA_FORK) && XFS_IS_REALTIME_INODE(ip);
5154 start = bno;
5155 bno = start + len - 1;
5156 ep = xfs_bmap_search_extents(ip, bno, whichfork, &eof, &lastx, &got,
5157 &prev);
5158
5159 /*
5160 * Check to see if the given block number is past the end of the
5161 * file, back up to the last block if so...
5162 */
5163 if (eof) {
5164 ep = xfs_iext_get_ext(ifp, --lastx);
5165 xfs_bmbt_get_all(ep, &got);
5166 bno = got.br_startoff + got.br_blockcount - 1;
5167 }
5168 logflags = 0;
5169 if (ifp->if_flags & XFS_IFBROOT) {
5170 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE);
5171 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
5172 cur->bc_private.b.firstblock = *firstblock;
5173 cur->bc_private.b.flist = flist;
5174 cur->bc_private.b.flags = 0;
5175 } else
5176 cur = NULL;
5177
5178 if (isrt) {
5179 /*
5180 * Synchronize by locking the bitmap inode.
5181 */
5182 xfs_ilock(mp->m_rbmip, XFS_ILOCK_EXCL);
5183 xfs_trans_ijoin(tp, mp->m_rbmip, XFS_ILOCK_EXCL);
5184 }
5185
5186 extno = 0;
5187 while (bno != (xfs_fileoff_t)-1 && bno >= start && lastx >= 0 &&
5188 (nexts == 0 || extno < nexts)) {
5189 /*
5190 * Is the found extent after a hole in which bno lives?
5191 * Just back up to the previous extent, if so.
5192 */
5193 if (got.br_startoff > bno) {
5194 if (--lastx < 0)
5195 break;
5196 ep = xfs_iext_get_ext(ifp, lastx);
5197 xfs_bmbt_get_all(ep, &got);
5198 }
5199 /*
5200 * Is the last block of this extent before the range
5201 * we're supposed to delete? If so, we're done.
5202 */
5203 bno = XFS_FILEOFF_MIN(bno,
5204 got.br_startoff + got.br_blockcount - 1);
5205 if (bno < start)
5206 break;
5207 /*
5208 * Then deal with the (possibly delayed) allocated space
5209 * we found.
5210 */
5211 ASSERT(ep != NULL);
5212 del = got;
5213 wasdel = isnullstartblock(del.br_startblock);
5214 if (got.br_startoff < start) {
5215 del.br_startoff = start;
5216 del.br_blockcount -= start - got.br_startoff;
5217 if (!wasdel)
5218 del.br_startblock += start - got.br_startoff;
5219 }
5220 if (del.br_startoff + del.br_blockcount > bno + 1)
5221 del.br_blockcount = bno + 1 - del.br_startoff;
5222 sum = del.br_startblock + del.br_blockcount;
5223 if (isrt &&
5224 (mod = do_mod(sum, mp->m_sb.sb_rextsize))) {
5225 /*
5226 * Realtime extent not lined up at the end.
5227 * The extent could have been split into written
5228 * and unwritten pieces, or we could just be
5229 * unmapping part of it. But we can't really
5230 * get rid of part of a realtime extent.
5231 */
5232 if (del.br_state == XFS_EXT_UNWRITTEN ||
5233 !xfs_sb_version_hasextflgbit(&mp->m_sb)) {
5234 /*
5235 * This piece is unwritten, or we're not
5236 * using unwritten extents. Skip over it.
5237 */
5238 ASSERT(bno >= mod);
5239 bno -= mod > del.br_blockcount ?
5240 del.br_blockcount : mod;
5241 if (bno < got.br_startoff) {
5242 if (--lastx >= 0)
5243 xfs_bmbt_get_all(xfs_iext_get_ext(
5244 ifp, lastx), &got);
5245 }
5246 continue;
5247 }
5248 /*
5249 * It's written, turn it unwritten.
5250 * This is better than zeroing it.
5251 */
5252 ASSERT(del.br_state == XFS_EXT_NORM);
5253 ASSERT(tp->t_blk_res > 0);
5254 /*
5255 * If this spans a realtime extent boundary,
5256 * chop it back to the start of the one we end at.
5257 */
5258 if (del.br_blockcount > mod) {
5259 del.br_startoff += del.br_blockcount - mod;
5260 del.br_startblock += del.br_blockcount - mod;
5261 del.br_blockcount = mod;
5262 }
5263 del.br_state = XFS_EXT_UNWRITTEN;
5264 error = xfs_bmap_add_extent_unwritten_real(tp, ip,
5265 &lastx, &cur, &del, firstblock, flist,
5266 &logflags);
5267 if (error)
5268 goto error0;
5269 goto nodelete;
5270 }
5271 if (isrt && (mod = do_mod(del.br_startblock, mp->m_sb.sb_rextsize))) {
5272 /*
5273 * Realtime extent is lined up at the end but not
5274 * at the front. We'll get rid of full extents if
5275 * we can.
5276 */
5277 mod = mp->m_sb.sb_rextsize - mod;
5278 if (del.br_blockcount > mod) {
5279 del.br_blockcount -= mod;
5280 del.br_startoff += mod;
5281 del.br_startblock += mod;
5282 } else if ((del.br_startoff == start &&
5283 (del.br_state == XFS_EXT_UNWRITTEN ||
5284 tp->t_blk_res == 0)) ||
5285 !xfs_sb_version_hasextflgbit(&mp->m_sb)) {
5286 /*
5287 * Can't make it unwritten. There isn't
5288 * a full extent here so just skip it.
5289 */
5290 ASSERT(bno >= del.br_blockcount);
5291 bno -= del.br_blockcount;
5292 if (got.br_startoff > bno) {
5293 if (--lastx >= 0) {
5294 ep = xfs_iext_get_ext(ifp,
5295 lastx);
5296 xfs_bmbt_get_all(ep, &got);
5297 }
5298 }
5299 continue;
5300 } else if (del.br_state == XFS_EXT_UNWRITTEN) {
5301 /*
5302 * This one is already unwritten.
5303 * It must have a written left neighbor.
5304 * Unwrite the killed part of that one and
5305 * try again.
5306 */
5307 ASSERT(lastx > 0);
5308 xfs_bmbt_get_all(xfs_iext_get_ext(ifp,
5309 lastx - 1), &prev);
5310 ASSERT(prev.br_state == XFS_EXT_NORM);
5311 ASSERT(!isnullstartblock(prev.br_startblock));
5312 ASSERT(del.br_startblock ==
5313 prev.br_startblock + prev.br_blockcount);
5314 if (prev.br_startoff < start) {
5315 mod = start - prev.br_startoff;
5316 prev.br_blockcount -= mod;
5317 prev.br_startblock += mod;
5318 prev.br_startoff = start;
5319 }
5320 prev.br_state = XFS_EXT_UNWRITTEN;
5321 lastx--;
5322 error = xfs_bmap_add_extent_unwritten_real(tp,
5323 ip, &lastx, &cur, &prev,
5324 firstblock, flist, &logflags);
5325 if (error)
5326 goto error0;
5327 goto nodelete;
5328 } else {
5329 ASSERT(del.br_state == XFS_EXT_NORM);
5330 del.br_state = XFS_EXT_UNWRITTEN;
5331 error = xfs_bmap_add_extent_unwritten_real(tp,
5332 ip, &lastx, &cur, &del,
5333 firstblock, flist, &logflags);
5334 if (error)
5335 goto error0;
5336 goto nodelete;
5337 }
5338 }
5339
5340 /*
5341 * If it's the case where the directory code is running
5342 * with no block reservation, and the deleted block is in
5343 * the middle of its extent, and the resulting insert
5344 * of an extent would cause transformation to btree format,
5345 * then reject it. The calling code will then swap
5346 * blocks around instead.
5347 * We have to do this now, rather than waiting for the
5348 * conversion to btree format, since the transaction
5349 * will be dirty.
5350 */
5351 if (!wasdel && tp->t_blk_res == 0 &&
5352 XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS &&
5353 XFS_IFORK_NEXTENTS(ip, whichfork) >= /* Note the >= */
5354 XFS_IFORK_MAXEXT(ip, whichfork) &&
5355 del.br_startoff > got.br_startoff &&
5356 del.br_startoff + del.br_blockcount <
5357 got.br_startoff + got.br_blockcount) {
5358 error = -ENOSPC;
5359 goto error0;
5360 }
5361
5362 /*
5363 * Unreserve quota and update realtime free space, if
5364 * appropriate. If delayed allocation, update the inode delalloc
5365 * counter now and wait to update the sb counters as
5366 * xfs_bmap_del_extent() might need to borrow some blocks.
5367 */
5368 if (wasdel) {
5369 ASSERT(startblockval(del.br_startblock) > 0);
5370 if (isrt) {
5371 xfs_filblks_t rtexts;
5372
5373 rtexts = XFS_FSB_TO_B(mp, del.br_blockcount);
5374 do_div(rtexts, mp->m_sb.sb_rextsize);
5375 xfs_mod_frextents(mp, (int64_t)rtexts);
5376 (void)xfs_trans_reserve_quota_nblks(NULL,
5377 ip, -((long)del.br_blockcount), 0,
5378 XFS_QMOPT_RES_RTBLKS);
5379 } else {
5380 (void)xfs_trans_reserve_quota_nblks(NULL,
5381 ip, -((long)del.br_blockcount), 0,
5382 XFS_QMOPT_RES_REGBLKS);
5383 }
5384 ip->i_delayed_blks -= del.br_blockcount;
5385 if (cur)
5386 cur->bc_private.b.flags |=
5387 XFS_BTCUR_BPRV_WASDEL;
5388 } else if (cur)
5389 cur->bc_private.b.flags &= ~XFS_BTCUR_BPRV_WASDEL;
5390
5391 error = xfs_bmap_del_extent(ip, tp, &lastx, flist, cur, &del,
5392 &tmp_logflags, whichfork);
5393 logflags |= tmp_logflags;
5394 if (error)
5395 goto error0;
5396
5397 if (!isrt && wasdel)
5398 xfs_mod_fdblocks(mp, (int64_t)del.br_blockcount, false);
5399
5400 bno = del.br_startoff - 1;
5401 nodelete:
5402 /*
5403 * If not done go on to the next (previous) record.
5404 */
5405 if (bno != (xfs_fileoff_t)-1 && bno >= start) {
5406 if (lastx >= 0) {
5407 ep = xfs_iext_get_ext(ifp, lastx);
5408 if (xfs_bmbt_get_startoff(ep) > bno) {
5409 if (--lastx >= 0)
5410 ep = xfs_iext_get_ext(ifp,
5411 lastx);
5412 }
5413 xfs_bmbt_get_all(ep, &got);
5414 }
5415 extno++;
5416 }
5417 }
5418 *done = bno == (xfs_fileoff_t)-1 || bno < start || lastx < 0;
5419
5420 /*
5421 * Convert to a btree if necessary.
5422 */
5423 if (xfs_bmap_needs_btree(ip, whichfork)) {
5424 ASSERT(cur == NULL);
5425 error = xfs_bmap_extents_to_btree(tp, ip, firstblock, flist,
5426 &cur, 0, &tmp_logflags, whichfork);
5427 logflags |= tmp_logflags;
5428 if (error)
5429 goto error0;
5430 }
5431 /*
5432 * transform from btree to extents, give it cur
5433 */
5434 else if (xfs_bmap_wants_extents(ip, whichfork)) {
5435 ASSERT(cur != NULL);
5436 error = xfs_bmap_btree_to_extents(tp, ip, cur, &tmp_logflags,
5437 whichfork);
5438 logflags |= tmp_logflags;
5439 if (error)
5440 goto error0;
5441 }
5442 /*
5443 * transform from extents to local?
5444 */
5445 error = 0;
5446 error0:
5447 /*
5448 * Log everything. Do this after conversion, there's no point in
5449 * logging the extent records if we've converted to btree format.
5450 */
5451 if ((logflags & xfs_ilog_fext(whichfork)) &&
5452 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS)
5453 logflags &= ~xfs_ilog_fext(whichfork);
5454 else if ((logflags & xfs_ilog_fbroot(whichfork)) &&
5455 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE)
5456 logflags &= ~xfs_ilog_fbroot(whichfork);
5457 /*
5458 * Log inode even in the error case, if the transaction
5459 * is dirty we'll need to shut down the filesystem.
5460 */
5461 if (logflags)
5462 xfs_trans_log_inode(tp, ip, logflags);
5463 if (cur) {
5464 if (!error) {
5465 *firstblock = cur->bc_private.b.firstblock;
5466 cur->bc_private.b.allocated = 0;
5467 }
5468 xfs_btree_del_cursor(cur,
5469 error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
5470 }
5471 return error;
5472 }
5473
5474 /*
5475 * Determine whether an extent shift can be accomplished by a merge with the
5476 * extent that precedes the target hole of the shift.
5477 */
5478 STATIC bool
5479 xfs_bmse_can_merge(
5480 struct xfs_bmbt_irec *left, /* preceding extent */
5481 struct xfs_bmbt_irec *got, /* current extent to shift */
5482 xfs_fileoff_t shift) /* shift fsb */
5483 {
5484 xfs_fileoff_t startoff;
5485
5486 startoff = got->br_startoff - shift;
5487
5488 /*
5489 * The extent, once shifted, must be adjacent in-file and on-disk with
5490 * the preceding extent.
5491 */
5492 if ((left->br_startoff + left->br_blockcount != startoff) ||
5493 (left->br_startblock + left->br_blockcount != got->br_startblock) ||
5494 (left->br_state != got->br_state) ||
5495 (left->br_blockcount + got->br_blockcount > MAXEXTLEN))
5496 return false;
5497
5498 return true;
5499 }
5500
5501 /*
5502 * A bmap extent shift adjusts the file offset of an extent to fill a preceding
5503 * hole in the file. If an extent shift would result in the extent being fully
5504 * adjacent to the extent that currently precedes the hole, we can merge with
5505 * the preceding extent rather than do the shift.
5506 *
5507 * This function assumes the caller has verified a shift-by-merge is possible
5508 * with the provided extents via xfs_bmse_can_merge().
5509 */
5510 STATIC int
5511 xfs_bmse_merge(
5512 struct xfs_inode *ip,
5513 int whichfork,
5514 xfs_fileoff_t shift, /* shift fsb */
5515 int current_ext, /* idx of gotp */
5516 struct xfs_bmbt_rec_host *gotp, /* extent to shift */
5517 struct xfs_bmbt_rec_host *leftp, /* preceding extent */
5518 struct xfs_btree_cur *cur,
5519 int *logflags) /* output */
5520 {
5521 struct xfs_bmbt_irec got;
5522 struct xfs_bmbt_irec left;
5523 xfs_filblks_t blockcount;
5524 int error, i;
5525 struct xfs_mount *mp = ip->i_mount;
5526
5527 xfs_bmbt_get_all(gotp, &got);
5528 xfs_bmbt_get_all(leftp, &left);
5529 blockcount = left.br_blockcount + got.br_blockcount;
5530
5531 ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
5532 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
5533 ASSERT(xfs_bmse_can_merge(&left, &got, shift));
5534
5535 /*
5536 * Merge the in-core extents. Note that the host record pointers and
5537 * current_ext index are invalid once the extent has been removed via
5538 * xfs_iext_remove().
5539 */
5540 xfs_bmbt_set_blockcount(leftp, blockcount);
5541 xfs_iext_remove(ip, current_ext, 1, 0);
5542
5543 /*
5544 * Update the on-disk extent count, the btree if necessary and log the
5545 * inode.
5546 */
5547 XFS_IFORK_NEXT_SET(ip, whichfork,
5548 XFS_IFORK_NEXTENTS(ip, whichfork) - 1);
5549 *logflags |= XFS_ILOG_CORE;
5550 if (!cur) {
5551 *logflags |= XFS_ILOG_DEXT;
5552 return 0;
5553 }
5554
5555 /* lookup and remove the extent to merge */
5556 error = xfs_bmbt_lookup_eq(cur, got.br_startoff, got.br_startblock,
5557 got.br_blockcount, &i);
5558 if (error)
5559 return error;
5560 XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
5561
5562 error = xfs_btree_delete(cur, &i);
5563 if (error)
5564 return error;
5565 XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
5566
5567 /* lookup and update size of the previous extent */
5568 error = xfs_bmbt_lookup_eq(cur, left.br_startoff, left.br_startblock,
5569 left.br_blockcount, &i);
5570 if (error)
5571 return error;
5572 XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
5573
5574 left.br_blockcount = blockcount;
5575
5576 return xfs_bmbt_update(cur, left.br_startoff, left.br_startblock,
5577 left.br_blockcount, left.br_state);
5578 }
5579
5580 /*
5581 * Shift a single extent.
5582 */
5583 STATIC int
5584 xfs_bmse_shift_one(
5585 struct xfs_inode *ip,
5586 int whichfork,
5587 xfs_fileoff_t offset_shift_fsb,
5588 int *current_ext,
5589 struct xfs_bmbt_rec_host *gotp,
5590 struct xfs_btree_cur *cur,
5591 int *logflags,
5592 enum shift_direction direction)
5593 {
5594 struct xfs_ifork *ifp;
5595 struct xfs_mount *mp;
5596 xfs_fileoff_t startoff;
5597 struct xfs_bmbt_rec_host *adj_irecp;
5598 struct xfs_bmbt_irec got;
5599 struct xfs_bmbt_irec adj_irec;
5600 int error;
5601 int i;
5602 int total_extents;
5603
5604 mp = ip->i_mount;
5605 ifp = XFS_IFORK_PTR(ip, whichfork);
5606 total_extents = ifp->if_bytes / sizeof(xfs_bmbt_rec_t);
5607
5608 xfs_bmbt_get_all(gotp, &got);
5609
5610 /* delalloc extents should be prevented by caller */
5611 XFS_WANT_CORRUPTED_RETURN(mp, !isnullstartblock(got.br_startblock));
5612
5613 if (direction == SHIFT_LEFT) {
5614 startoff = got.br_startoff - offset_shift_fsb;
5615
5616 /*
5617 * Check for merge if we've got an extent to the left,
5618 * otherwise make sure there's enough room at the start
5619 * of the file for the shift.
5620 */
5621 if (!*current_ext) {
5622 if (got.br_startoff < offset_shift_fsb)
5623 return -EINVAL;
5624 goto update_current_ext;
5625 }
5626 /*
5627 * grab the left extent and check for a large
5628 * enough hole.
5629 */
5630 adj_irecp = xfs_iext_get_ext(ifp, *current_ext - 1);
5631 xfs_bmbt_get_all(adj_irecp, &adj_irec);
5632
5633 if (startoff <
5634 adj_irec.br_startoff + adj_irec.br_blockcount)
5635 return -EINVAL;
5636
5637 /* check whether to merge the extent or shift it down */
5638 if (xfs_bmse_can_merge(&adj_irec, &got,
5639 offset_shift_fsb)) {
5640 return xfs_bmse_merge(ip, whichfork, offset_shift_fsb,
5641 *current_ext, gotp, adj_irecp,
5642 cur, logflags);
5643 }
5644 } else {
5645 startoff = got.br_startoff + offset_shift_fsb;
5646 /* nothing to move if this is the last extent */
5647 if (*current_ext >= (total_extents - 1))
5648 goto update_current_ext;
5649 /*
5650 * If this is not the last extent in the file, make sure there
5651 * is enough room between current extent and next extent for
5652 * accommodating the shift.
5653 */
5654 adj_irecp = xfs_iext_get_ext(ifp, *current_ext + 1);
5655 xfs_bmbt_get_all(adj_irecp, &adj_irec);
5656 if (startoff + got.br_blockcount > adj_irec.br_startoff)
5657 return -EINVAL;
5658 /*
5659 * Unlike a left shift (which involves a hole punch),
5660 * a right shift does not modify extent neighbors
5661 * in any way. We should never find mergeable extents
5662 * in this scenario. Check anyways and warn if we
5663 * encounter two extents that could be one.
5664 */
5665 if (xfs_bmse_can_merge(&got, &adj_irec, offset_shift_fsb))
5666 WARN_ON_ONCE(1);
5667 }
5668 /*
5669 * Increment the extent index for the next iteration, update the start
5670 * offset of the in-core extent and update the btree if applicable.
5671 */
5672 update_current_ext:
5673 if (direction == SHIFT_LEFT)
5674 (*current_ext)++;
5675 else
5676 (*current_ext)--;
5677 xfs_bmbt_set_startoff(gotp, startoff);
5678 *logflags |= XFS_ILOG_CORE;
5679 if (!cur) {
5680 *logflags |= XFS_ILOG_DEXT;
5681 return 0;
5682 }
5683
5684 error = xfs_bmbt_lookup_eq(cur, got.br_startoff, got.br_startblock,
5685 got.br_blockcount, &i);
5686 if (error)
5687 return error;
5688 XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
5689
5690 got.br_startoff = startoff;
5691 return xfs_bmbt_update(cur, got.br_startoff, got.br_startblock,
5692 got.br_blockcount, got.br_state);
5693 }
5694
5695 /*
5696 * Shift extent records to the left/right to cover/create a hole.
5697 *
5698 * The maximum number of extents to be shifted in a single operation is
5699 * @num_exts. @stop_fsb specifies the file offset at which to stop shift and the
5700 * file offset where we've left off is returned in @next_fsb. @offset_shift_fsb
5701 * is the length by which each extent is shifted. If there is no hole to shift
5702 * the extents into, this will be considered invalid operation and we abort
5703 * immediately.
5704 */
5705 int
5706 xfs_bmap_shift_extents(
5707 struct xfs_trans *tp,
5708 struct xfs_inode *ip,
5709 xfs_fileoff_t *next_fsb,
5710 xfs_fileoff_t offset_shift_fsb,
5711 int *done,
5712 xfs_fileoff_t stop_fsb,
5713 xfs_fsblock_t *firstblock,
5714 struct xfs_bmap_free *flist,
5715 enum shift_direction direction,
5716 int num_exts)
5717 {
5718 struct xfs_btree_cur *cur = NULL;
5719 struct xfs_bmbt_rec_host *gotp;
5720 struct xfs_bmbt_irec got;
5721 struct xfs_mount *mp = ip->i_mount;
5722 struct xfs_ifork *ifp;
5723 xfs_extnum_t nexts = 0;
5724 xfs_extnum_t current_ext;
5725 xfs_extnum_t total_extents;
5726 xfs_extnum_t stop_extent;
5727 int error = 0;
5728 int whichfork = XFS_DATA_FORK;
5729 int logflags = 0;
5730
5731 if (unlikely(XFS_TEST_ERROR(
5732 (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS &&
5733 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE),
5734 mp, XFS_ERRTAG_BMAPIFORMAT, XFS_RANDOM_BMAPIFORMAT))) {
5735 XFS_ERROR_REPORT("xfs_bmap_shift_extents",
5736 XFS_ERRLEVEL_LOW, mp);
5737 return -EFSCORRUPTED;
5738 }
5739
5740 if (XFS_FORCED_SHUTDOWN(mp))
5741 return -EIO;
5742
5743 ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
5744 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
5745 ASSERT(direction == SHIFT_LEFT || direction == SHIFT_RIGHT);
5746 ASSERT(*next_fsb != NULLFSBLOCK || direction == SHIFT_RIGHT);
5747
5748 ifp = XFS_IFORK_PTR(ip, whichfork);
5749 if (!(ifp->if_flags & XFS_IFEXTENTS)) {
5750 /* Read in all the extents */
5751 error = xfs_iread_extents(tp, ip, whichfork);
5752 if (error)
5753 return error;
5754 }
5755
5756 if (ifp->if_flags & XFS_IFBROOT) {
5757 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
5758 cur->bc_private.b.firstblock = *firstblock;
5759 cur->bc_private.b.flist = flist;
5760 cur->bc_private.b.flags = 0;
5761 }
5762
5763 /*
5764 * There may be delalloc extents in the data fork before the range we
5765 * are collapsing out, so we cannot use the count of real extents here.
5766 * Instead we have to calculate it from the incore fork.
5767 */
5768 total_extents = ifp->if_bytes / sizeof(xfs_bmbt_rec_t);
5769 if (total_extents == 0) {
5770 *done = 1;
5771 goto del_cursor;
5772 }
5773
5774 /*
5775 * In case of first right shift, we need to initialize next_fsb
5776 */
5777 if (*next_fsb == NULLFSBLOCK) {
5778 gotp = xfs_iext_get_ext(ifp, total_extents - 1);
5779 xfs_bmbt_get_all(gotp, &got);
5780 *next_fsb = got.br_startoff;
5781 if (stop_fsb > *next_fsb) {
5782 *done = 1;
5783 goto del_cursor;
5784 }
5785 }
5786
5787 /* Lookup the extent index at which we have to stop */
5788 if (direction == SHIFT_RIGHT) {
5789 gotp = xfs_iext_bno_to_ext(ifp, stop_fsb, &stop_extent);
5790 /* Make stop_extent exclusive of shift range */
5791 stop_extent--;
5792 } else
5793 stop_extent = total_extents;
5794
5795 /*
5796 * Look up the extent index for the fsb where we start shifting. We can
5797 * henceforth iterate with current_ext as extent list changes are locked
5798 * out via ilock.
5799 *
5800 * gotp can be null in 2 cases: 1) if there are no extents or 2)
5801 * *next_fsb lies in a hole beyond which there are no extents. Either
5802 * way, we are done.
5803 */
5804 gotp = xfs_iext_bno_to_ext(ifp, *next_fsb, &current_ext);
5805 if (!gotp) {
5806 *done = 1;
5807 goto del_cursor;
5808 }
5809
5810 /* some sanity checking before we finally start shifting extents */
5811 if ((direction == SHIFT_LEFT && current_ext >= stop_extent) ||
5812 (direction == SHIFT_RIGHT && current_ext <= stop_extent)) {
5813 error = -EIO;
5814 goto del_cursor;
5815 }
5816
5817 while (nexts++ < num_exts) {
5818 error = xfs_bmse_shift_one(ip, whichfork, offset_shift_fsb,
5819 &current_ext, gotp, cur, &logflags,
5820 direction);
5821 if (error)
5822 goto del_cursor;
5823 /*
5824 * If there was an extent merge during the shift, the extent
5825 * count can change. Update the total and grade the next record.
5826 */
5827 if (direction == SHIFT_LEFT) {
5828 total_extents = ifp->if_bytes / sizeof(xfs_bmbt_rec_t);
5829 stop_extent = total_extents;
5830 }
5831
5832 if (current_ext == stop_extent) {
5833 *done = 1;
5834 *next_fsb = NULLFSBLOCK;
5835 break;
5836 }
5837 gotp = xfs_iext_get_ext(ifp, current_ext);
5838 }
5839
5840 if (!*done) {
5841 xfs_bmbt_get_all(gotp, &got);
5842 *next_fsb = got.br_startoff;
5843 }
5844
5845 del_cursor:
5846 if (cur)
5847 xfs_btree_del_cursor(cur,
5848 error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
5849
5850 if (logflags)
5851 xfs_trans_log_inode(tp, ip, logflags);
5852
5853 return error;
5854 }
5855
5856 /*
5857 * Splits an extent into two extents at split_fsb block such that it is
5858 * the first block of the current_ext. @current_ext is a target extent
5859 * to be split. @split_fsb is a block where the extents is split.
5860 * If split_fsb lies in a hole or the first block of extents, just return 0.
5861 */
5862 STATIC int
5863 xfs_bmap_split_extent_at(
5864 struct xfs_trans *tp,
5865 struct xfs_inode *ip,
5866 xfs_fileoff_t split_fsb,
5867 xfs_fsblock_t *firstfsb,
5868 struct xfs_bmap_free *free_list)
5869 {
5870 int whichfork = XFS_DATA_FORK;
5871 struct xfs_btree_cur *cur = NULL;
5872 struct xfs_bmbt_rec_host *gotp;
5873 struct xfs_bmbt_irec got;
5874 struct xfs_bmbt_irec new; /* split extent */
5875 struct xfs_mount *mp = ip->i_mount;
5876 struct xfs_ifork *ifp;
5877 xfs_fsblock_t gotblkcnt; /* new block count for got */
5878 xfs_extnum_t current_ext;
5879 int error = 0;
5880 int logflags = 0;
5881 int i = 0;
5882
5883 if (unlikely(XFS_TEST_ERROR(
5884 (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS &&
5885 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE),
5886 mp, XFS_ERRTAG_BMAPIFORMAT, XFS_RANDOM_BMAPIFORMAT))) {
5887 XFS_ERROR_REPORT("xfs_bmap_split_extent_at",
5888 XFS_ERRLEVEL_LOW, mp);
5889 return -EFSCORRUPTED;
5890 }
5891
5892 if (XFS_FORCED_SHUTDOWN(mp))
5893 return -EIO;
5894
5895 ifp = XFS_IFORK_PTR(ip, whichfork);
5896 if (!(ifp->if_flags & XFS_IFEXTENTS)) {
5897 /* Read in all the extents */
5898 error = xfs_iread_extents(tp, ip, whichfork);
5899 if (error)
5900 return error;
5901 }
5902
5903 /*
5904 * gotp can be null in 2 cases: 1) if there are no extents
5905 * or 2) split_fsb lies in a hole beyond which there are
5906 * no extents. Either way, we are done.
5907 */
5908 gotp = xfs_iext_bno_to_ext(ifp, split_fsb, &current_ext);
5909 if (!gotp)
5910 return 0;
5911
5912 xfs_bmbt_get_all(gotp, &got);
5913
5914 /*
5915 * Check split_fsb lies in a hole or the start boundary offset
5916 * of the extent.
5917 */
5918 if (got.br_startoff >= split_fsb)
5919 return 0;
5920
5921 gotblkcnt = split_fsb - got.br_startoff;
5922 new.br_startoff = split_fsb;
5923 new.br_startblock = got.br_startblock + gotblkcnt;
5924 new.br_blockcount = got.br_blockcount - gotblkcnt;
5925 new.br_state = got.br_state;
5926
5927 if (ifp->if_flags & XFS_IFBROOT) {
5928 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
5929 cur->bc_private.b.firstblock = *firstfsb;
5930 cur->bc_private.b.flist = free_list;
5931 cur->bc_private.b.flags = 0;
5932 error = xfs_bmbt_lookup_eq(cur, got.br_startoff,
5933 got.br_startblock,
5934 got.br_blockcount,
5935 &i);
5936 if (error)
5937 goto del_cursor;
5938 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, del_cursor);
5939 }
5940
5941 xfs_bmbt_set_blockcount(gotp, gotblkcnt);
5942 got.br_blockcount = gotblkcnt;
5943
5944 logflags = XFS_ILOG_CORE;
5945 if (cur) {
5946 error = xfs_bmbt_update(cur, got.br_startoff,
5947 got.br_startblock,
5948 got.br_blockcount,
5949 got.br_state);
5950 if (error)
5951 goto del_cursor;
5952 } else
5953 logflags |= XFS_ILOG_DEXT;
5954
5955 /* Add new extent */
5956 current_ext++;
5957 xfs_iext_insert(ip, current_ext, 1, &new, 0);
5958 XFS_IFORK_NEXT_SET(ip, whichfork,
5959 XFS_IFORK_NEXTENTS(ip, whichfork) + 1);
5960
5961 if (cur) {
5962 error = xfs_bmbt_lookup_eq(cur, new.br_startoff,
5963 new.br_startblock, new.br_blockcount,
5964 &i);
5965 if (error)
5966 goto del_cursor;
5967 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, del_cursor);
5968 cur->bc_rec.b.br_state = new.br_state;
5969
5970 error = xfs_btree_insert(cur, &i);
5971 if (error)
5972 goto del_cursor;
5973 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, del_cursor);
5974 }
5975
5976 /*
5977 * Convert to a btree if necessary.
5978 */
5979 if (xfs_bmap_needs_btree(ip, whichfork)) {
5980 int tmp_logflags; /* partial log flag return val */
5981
5982 ASSERT(cur == NULL);
5983 error = xfs_bmap_extents_to_btree(tp, ip, firstfsb, free_list,
5984 &cur, 0, &tmp_logflags, whichfork);
5985 logflags |= tmp_logflags;
5986 }
5987
5988 del_cursor:
5989 if (cur) {
5990 cur->bc_private.b.allocated = 0;
5991 xfs_btree_del_cursor(cur,
5992 error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
5993 }
5994
5995 if (logflags)
5996 xfs_trans_log_inode(tp, ip, logflags);
5997 return error;
5998 }
5999
6000 int
6001 xfs_bmap_split_extent(
6002 struct xfs_inode *ip,
6003 xfs_fileoff_t split_fsb)
6004 {
6005 struct xfs_mount *mp = ip->i_mount;
6006 struct xfs_trans *tp;
6007 struct xfs_bmap_free free_list;
6008 xfs_fsblock_t firstfsb;
6009 int error;
6010
6011 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write,
6012 XFS_DIOSTRAT_SPACE_RES(mp, 0), 0, 0, &tp);
6013 if (error)
6014 return error;
6015
6016 xfs_ilock(ip, XFS_ILOCK_EXCL);
6017 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
6018
6019 xfs_bmap_init(&free_list, &firstfsb);
6020
6021 error = xfs_bmap_split_extent_at(tp, ip, split_fsb,
6022 &firstfsb, &free_list);
6023 if (error)
6024 goto out;
6025
6026 error = xfs_bmap_finish(&tp, &free_list, NULL);
6027 if (error)
6028 goto out;
6029
6030 return xfs_trans_commit(tp);
6031
6032 out:
6033 xfs_bmap_cancel(&free_list);
6034 xfs_trans_cancel(tp);
6035 return error;
6036 }