]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/commitdiff
Merge branch 'xfs-libxfs-restructure' into for-next
authorDave Chinner <david@fromorbit.com>
Mon, 14 Jul 2014 21:37:18 +0000 (07:37 +1000)
committerDave Chinner <david@fromorbit.com>
Mon, 14 Jul 2014 21:37:18 +0000 (07:37 +1000)
1  2 
fs/xfs/libxfs/xfs_bmap.c
fs/xfs/libxfs/xfs_bmap.h
fs/xfs/libxfs/xfs_btree.c
fs/xfs/libxfs/xfs_sb.c
fs/xfs/xfs_bmap_util.c
fs/xfs/xfs_iomap.c

index 0000000000000000000000000000000000000000,72a110eb1ddac5520f47261ccbe4f6967d61fccc..94ac88306fa68e8874a3c682478fec0e5575be18
mode 000000,100644..100644
--- /dev/null
@@@ -1,0 -1,5609 +1,5606 @@@
 -int
 -__xfs_bmapi_allocate(
+ /*
+  * Copyright (c) 2000-2006 Silicon Graphics, Inc.
+  * All Rights Reserved.
+  *
+  * This program is free software; you can redistribute it and/or
+  * modify it under the terms of the GNU General Public License as
+  * published by the Free Software Foundation.
+  *
+  * This program is distributed in the hope that it would be useful,
+  * but WITHOUT ANY WARRANTY; without even the implied warranty of
+  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  * GNU General Public License for more details.
+  *
+  * You should have received a copy of the GNU General Public License
+  * along with this program; if not, write the Free Software Foundation,
+  * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+  */
+ #include "xfs.h"
+ #include "xfs_fs.h"
+ #include "xfs_shared.h"
+ #include "xfs_format.h"
+ #include "xfs_log_format.h"
+ #include "xfs_trans_resv.h"
+ #include "xfs_bit.h"
+ #include "xfs_inum.h"
+ #include "xfs_sb.h"
+ #include "xfs_ag.h"
+ #include "xfs_mount.h"
+ #include "xfs_da_format.h"
+ #include "xfs_da_btree.h"
+ #include "xfs_dir2.h"
+ #include "xfs_inode.h"
+ #include "xfs_btree.h"
+ #include "xfs_trans.h"
+ #include "xfs_inode_item.h"
+ #include "xfs_extfree_item.h"
+ #include "xfs_alloc.h"
+ #include "xfs_bmap.h"
+ #include "xfs_bmap_util.h"
+ #include "xfs_bmap_btree.h"
+ #include "xfs_rtalloc.h"
+ #include "xfs_error.h"
+ #include "xfs_quota.h"
+ #include "xfs_trans_space.h"
+ #include "xfs_buf_item.h"
+ #include "xfs_trace.h"
+ #include "xfs_symlink.h"
+ #include "xfs_attr_leaf.h"
+ #include "xfs_dinode.h"
+ #include "xfs_filestream.h"
+ kmem_zone_t           *xfs_bmap_free_item_zone;
+ /*
+  * Miscellaneous helper functions
+  */
+ /*
+  * Compute and fill in the value of the maximum depth of a bmap btree
+  * in this filesystem.  Done once, during mount.
+  */
+ void
+ xfs_bmap_compute_maxlevels(
+       xfs_mount_t     *mp,            /* file system mount structure */
+       int             whichfork)      /* data or attr fork */
+ {
+       int             level;          /* btree level */
+       uint            maxblocks;      /* max blocks at this level */
+       uint            maxleafents;    /* max leaf entries possible */
+       int             maxrootrecs;    /* max records in root block */
+       int             minleafrecs;    /* min records in leaf block */
+       int             minnoderecs;    /* min records in node block */
+       int             sz;             /* root block size */
+       /*
+        * The maximum number of extents in a file, hence the maximum
+        * number of leaf entries, is controlled by the type of di_nextents
+        * (a signed 32-bit number, xfs_extnum_t), or by di_anextents
+        * (a signed 16-bit number, xfs_aextnum_t).
+        *
+        * Note that we can no longer assume that if we are in ATTR1 that
+        * the fork offset of all the inodes will be
+        * (xfs_default_attroffset(ip) >> 3) because we could have mounted
+        * with ATTR2 and then mounted back with ATTR1, keeping the
+        * di_forkoff's fixed but probably at various positions. Therefore,
+        * for both ATTR1 and ATTR2 we have to assume the worst case scenario
+        * of a minimum size available.
+        */
+       if (whichfork == XFS_DATA_FORK) {
+               maxleafents = MAXEXTNUM;
+               sz = XFS_BMDR_SPACE_CALC(MINDBTPTRS);
+       } else {
+               maxleafents = MAXAEXTNUM;
+               sz = XFS_BMDR_SPACE_CALC(MINABTPTRS);
+       }
+       maxrootrecs = xfs_bmdr_maxrecs(sz, 0);
+       minleafrecs = mp->m_bmap_dmnr[0];
+       minnoderecs = mp->m_bmap_dmnr[1];
+       maxblocks = (maxleafents + minleafrecs - 1) / minleafrecs;
+       for (level = 1; maxblocks > 1; level++) {
+               if (maxblocks <= maxrootrecs)
+                       maxblocks = 1;
+               else
+                       maxblocks = (maxblocks + minnoderecs - 1) / minnoderecs;
+       }
+       mp->m_bm_maxlevels[whichfork] = level;
+ }
+ STATIC int                            /* error */
+ xfs_bmbt_lookup_eq(
+       struct xfs_btree_cur    *cur,
+       xfs_fileoff_t           off,
+       xfs_fsblock_t           bno,
+       xfs_filblks_t           len,
+       int                     *stat)  /* success/failure */
+ {
+       cur->bc_rec.b.br_startoff = off;
+       cur->bc_rec.b.br_startblock = bno;
+       cur->bc_rec.b.br_blockcount = len;
+       return xfs_btree_lookup(cur, XFS_LOOKUP_EQ, stat);
+ }
+ STATIC int                            /* error */
+ xfs_bmbt_lookup_ge(
+       struct xfs_btree_cur    *cur,
+       xfs_fileoff_t           off,
+       xfs_fsblock_t           bno,
+       xfs_filblks_t           len,
+       int                     *stat)  /* success/failure */
+ {
+       cur->bc_rec.b.br_startoff = off;
+       cur->bc_rec.b.br_startblock = bno;
+       cur->bc_rec.b.br_blockcount = len;
+       return xfs_btree_lookup(cur, XFS_LOOKUP_GE, stat);
+ }
+ /*
+  * Check if the inode needs to be converted to btree format.
+  */
+ static inline bool xfs_bmap_needs_btree(struct xfs_inode *ip, int whichfork)
+ {
+       return XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS &&
+               XFS_IFORK_NEXTENTS(ip, whichfork) >
+                       XFS_IFORK_MAXEXT(ip, whichfork);
+ }
+ /*
+  * Check if the inode should be converted to extent format.
+  */
+ static inline bool xfs_bmap_wants_extents(struct xfs_inode *ip, int whichfork)
+ {
+       return XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE &&
+               XFS_IFORK_NEXTENTS(ip, whichfork) <=
+                       XFS_IFORK_MAXEXT(ip, whichfork);
+ }
+ /*
+  * Update the record referred to by cur to the value given
+  * by [off, bno, len, state].
+  * This either works (return 0) or gets an EFSCORRUPTED error.
+  */
+ STATIC int
+ xfs_bmbt_update(
+       struct xfs_btree_cur    *cur,
+       xfs_fileoff_t           off,
+       xfs_fsblock_t           bno,
+       xfs_filblks_t           len,
+       xfs_exntst_t            state)
+ {
+       union xfs_btree_rec     rec;
+       xfs_bmbt_disk_set_allf(&rec.bmbt, off, bno, len, state);
+       return xfs_btree_update(cur, &rec);
+ }
+ /*
+  * Compute the worst-case number of indirect blocks that will be used
+  * for ip's delayed extent of length "len".
+  */
+ STATIC xfs_filblks_t
+ xfs_bmap_worst_indlen(
+       xfs_inode_t     *ip,            /* incore inode pointer */
+       xfs_filblks_t   len)            /* delayed extent length */
+ {
+       int             level;          /* btree level number */
+       int             maxrecs;        /* maximum record count at this level */
+       xfs_mount_t     *mp;            /* mount structure */
+       xfs_filblks_t   rval;           /* return value */
+       mp = ip->i_mount;
+       maxrecs = mp->m_bmap_dmxr[0];
+       for (level = 0, rval = 0;
+            level < XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK);
+            level++) {
+               len += maxrecs - 1;
+               do_div(len, maxrecs);
+               rval += len;
+               if (len == 1)
+                       return rval + XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK) -
+                               level - 1;
+               if (level == 0)
+                       maxrecs = mp->m_bmap_dmxr[1];
+       }
+       return rval;
+ }
+ /*
+  * Calculate the default attribute fork offset for newly created inodes.
+  */
+ uint
+ xfs_default_attroffset(
+       struct xfs_inode        *ip)
+ {
+       struct xfs_mount        *mp = ip->i_mount;
+       uint                    offset;
+       if (mp->m_sb.sb_inodesize == 256) {
+               offset = XFS_LITINO(mp, ip->i_d.di_version) -
+                               XFS_BMDR_SPACE_CALC(MINABTPTRS);
+       } else {
+               offset = XFS_BMDR_SPACE_CALC(6 * MINABTPTRS);
+       }
+       ASSERT(offset < XFS_LITINO(mp, ip->i_d.di_version));
+       return offset;
+ }
+ /*
+  * Helper routine to reset inode di_forkoff field when switching
+  * attribute fork from local to extent format - we reset it where
+  * possible to make space available for inline data fork extents.
+  */
+ STATIC void
+ xfs_bmap_forkoff_reset(
+       xfs_inode_t     *ip,
+       int             whichfork)
+ {
+       if (whichfork == XFS_ATTR_FORK &&
+           ip->i_d.di_format != XFS_DINODE_FMT_DEV &&
+           ip->i_d.di_format != XFS_DINODE_FMT_UUID &&
+           ip->i_d.di_format != XFS_DINODE_FMT_BTREE) {
+               uint    dfl_forkoff = xfs_default_attroffset(ip) >> 3;
+               if (dfl_forkoff > ip->i_d.di_forkoff)
+                       ip->i_d.di_forkoff = dfl_forkoff;
+       }
+ }
+ /*
+  * Debug/sanity checking code
+  */
+ STATIC int
+ xfs_bmap_sanity_check(
+       struct xfs_mount        *mp,
+       struct xfs_buf          *bp,
+       int                     level)
+ {
+       struct xfs_btree_block  *block = XFS_BUF_TO_BLOCK(bp);
+       if (block->bb_magic != cpu_to_be32(XFS_BMAP_CRC_MAGIC) &&
+           block->bb_magic != cpu_to_be32(XFS_BMAP_MAGIC))
+               return 0;
+       if (be16_to_cpu(block->bb_level) != level ||
+           be16_to_cpu(block->bb_numrecs) == 0 ||
+           be16_to_cpu(block->bb_numrecs) > mp->m_bmap_dmxr[level != 0])
+               return 0;
+       return 1;
+ }
+ #ifdef DEBUG
+ STATIC struct xfs_buf *
+ xfs_bmap_get_bp(
+       struct xfs_btree_cur    *cur,
+       xfs_fsblock_t           bno)
+ {
+       struct xfs_log_item_desc *lidp;
+       int                     i;
+       if (!cur)
+               return NULL;
+       for (i = 0; i < XFS_BTREE_MAXLEVELS; i++) {
+               if (!cur->bc_bufs[i])
+                       break;
+               if (XFS_BUF_ADDR(cur->bc_bufs[i]) == bno)
+                       return cur->bc_bufs[i];
+       }
+       /* Chase down all the log items to see if the bp is there */
+       list_for_each_entry(lidp, &cur->bc_tp->t_items, lid_trans) {
+               struct xfs_buf_log_item *bip;
+               bip = (struct xfs_buf_log_item *)lidp->lid_item;
+               if (bip->bli_item.li_type == XFS_LI_BUF &&
+                   XFS_BUF_ADDR(bip->bli_buf) == bno)
+                       return bip->bli_buf;
+       }
+       return NULL;
+ }
+ STATIC void
+ xfs_check_block(
+       struct xfs_btree_block  *block,
+       xfs_mount_t             *mp,
+       int                     root,
+       short                   sz)
+ {
+       int                     i, j, dmxr;
+       __be64                  *pp, *thispa;   /* pointer to block address */
+       xfs_bmbt_key_t          *prevp, *keyp;
+       ASSERT(be16_to_cpu(block->bb_level) > 0);
+       prevp = NULL;
+       for( i = 1; i <= xfs_btree_get_numrecs(block); i++) {
+               dmxr = mp->m_bmap_dmxr[0];
+               keyp = XFS_BMBT_KEY_ADDR(mp, block, i);
+               if (prevp) {
+                       ASSERT(be64_to_cpu(prevp->br_startoff) <
+                              be64_to_cpu(keyp->br_startoff));
+               }
+               prevp = keyp;
+               /*
+                * Compare the block numbers to see if there are dups.
+                */
+               if (root)
+                       pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, i, sz);
+               else
+                       pp = XFS_BMBT_PTR_ADDR(mp, block, i, dmxr);
+               for (j = i+1; j <= be16_to_cpu(block->bb_numrecs); j++) {
+                       if (root)
+                               thispa = XFS_BMAP_BROOT_PTR_ADDR(mp, block, j, sz);
+                       else
+                               thispa = XFS_BMBT_PTR_ADDR(mp, block, j, dmxr);
+                       if (*thispa == *pp) {
+                               xfs_warn(mp, "%s: thispa(%d) == pp(%d) %Ld",
+                                       __func__, j, i,
+                                       (unsigned long long)be64_to_cpu(*thispa));
+                               panic("%s: ptrs are equal in node\n",
+                                       __func__);
+                       }
+               }
+       }
+ }
+ /*
+  * Check that the extents for the inode ip are in the right order in all
+  * btree leaves.
+  */
+ STATIC void
+ xfs_bmap_check_leaf_extents(
+       xfs_btree_cur_t         *cur,   /* btree cursor or null */
+       xfs_inode_t             *ip,            /* incore inode pointer */
+       int                     whichfork)      /* data or attr fork */
+ {
+       struct xfs_btree_block  *block; /* current btree block */
+       xfs_fsblock_t           bno;    /* block # of "block" */
+       xfs_buf_t               *bp;    /* buffer for "block" */
+       int                     error;  /* error return value */
+       xfs_extnum_t            i=0, j; /* index into the extents list */
+       xfs_ifork_t             *ifp;   /* fork structure */
+       int                     level;  /* btree level, for checking */
+       xfs_mount_t             *mp;    /* file system mount structure */
+       __be64                  *pp;    /* pointer to block address */
+       xfs_bmbt_rec_t          *ep;    /* pointer to current extent */
+       xfs_bmbt_rec_t          last = {0, 0}; /* last extent in prev block */
+       xfs_bmbt_rec_t          *nextp; /* pointer to next extent */
+       int                     bp_release = 0;
+       if (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE) {
+               return;
+       }
+       bno = NULLFSBLOCK;
+       mp = ip->i_mount;
+       ifp = XFS_IFORK_PTR(ip, whichfork);
+       block = ifp->if_broot;
+       /*
+        * Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out.
+        */
+       level = be16_to_cpu(block->bb_level);
+       ASSERT(level > 0);
+       xfs_check_block(block, mp, 1, ifp->if_broot_bytes);
+       pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, 1, ifp->if_broot_bytes);
+       bno = be64_to_cpu(*pp);
+       ASSERT(bno != NULLDFSBNO);
+       ASSERT(XFS_FSB_TO_AGNO(mp, bno) < mp->m_sb.sb_agcount);
+       ASSERT(XFS_FSB_TO_AGBNO(mp, bno) < mp->m_sb.sb_agblocks);
+       /*
+        * Go down the tree until leaf level is reached, following the first
+        * pointer (leftmost) at each level.
+        */
+       while (level-- > 0) {
+               /* See if buf is in cur first */
+               bp_release = 0;
+               bp = xfs_bmap_get_bp(cur, XFS_FSB_TO_DADDR(mp, bno));
+               if (!bp) {
+                       bp_release = 1;
+                       error = xfs_btree_read_bufl(mp, NULL, bno, 0, &bp,
+                                               XFS_BMAP_BTREE_REF,
+                                               &xfs_bmbt_buf_ops);
+                       if (error)
+                               goto error_norelse;
+               }
+               block = XFS_BUF_TO_BLOCK(bp);
+               XFS_WANT_CORRUPTED_GOTO(
+                       xfs_bmap_sanity_check(mp, bp, level),
+                       error0);
+               if (level == 0)
+                       break;
+               /*
+                * Check this block for basic sanity (increasing keys and
+                * no duplicate blocks).
+                */
+               xfs_check_block(block, mp, 0, 0);
+               pp = XFS_BMBT_PTR_ADDR(mp, block, 1, mp->m_bmap_dmxr[1]);
+               bno = be64_to_cpu(*pp);
+               XFS_WANT_CORRUPTED_GOTO(XFS_FSB_SANITY_CHECK(mp, bno), error0);
+               if (bp_release) {
+                       bp_release = 0;
+                       xfs_trans_brelse(NULL, bp);
+               }
+       }
+       /*
+        * Here with bp and block set to the leftmost leaf node in the tree.
+        */
+       i = 0;
+       /*
+        * Loop over all leaf nodes checking that all extents are in the right order.
+        */
+       for (;;) {
+               xfs_fsblock_t   nextbno;
+               xfs_extnum_t    num_recs;
+               num_recs = xfs_btree_get_numrecs(block);
+               /*
+                * Read-ahead the next leaf block, if any.
+                */
+               nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib);
+               /*
+                * Check all the extents to make sure they are OK.
+                * If we had a previous block, the last entry should
+                * conform with the first entry in this one.
+                */
+               ep = XFS_BMBT_REC_ADDR(mp, block, 1);
+               if (i) {
+                       ASSERT(xfs_bmbt_disk_get_startoff(&last) +
+                              xfs_bmbt_disk_get_blockcount(&last) <=
+                              xfs_bmbt_disk_get_startoff(ep));
+               }
+               for (j = 1; j < num_recs; j++) {
+                       nextp = XFS_BMBT_REC_ADDR(mp, block, j + 1);
+                       ASSERT(xfs_bmbt_disk_get_startoff(ep) +
+                              xfs_bmbt_disk_get_blockcount(ep) <=
+                              xfs_bmbt_disk_get_startoff(nextp));
+                       ep = nextp;
+               }
+               last = *ep;
+               i += num_recs;
+               if (bp_release) {
+                       bp_release = 0;
+                       xfs_trans_brelse(NULL, bp);
+               }
+               bno = nextbno;
+               /*
+                * If we've reached the end, stop.
+                */
+               if (bno == NULLFSBLOCK)
+                       break;
+               bp_release = 0;
+               bp = xfs_bmap_get_bp(cur, XFS_FSB_TO_DADDR(mp, bno));
+               if (!bp) {
+                       bp_release = 1;
+                       error = xfs_btree_read_bufl(mp, NULL, bno, 0, &bp,
+                                               XFS_BMAP_BTREE_REF,
+                                               &xfs_bmbt_buf_ops);
+                       if (error)
+                               goto error_norelse;
+               }
+               block = XFS_BUF_TO_BLOCK(bp);
+       }
+       if (bp_release) {
+               bp_release = 0;
+               xfs_trans_brelse(NULL, bp);
+       }
+       return;
+ error0:
+       xfs_warn(mp, "%s: at error0", __func__);
+       if (bp_release)
+               xfs_trans_brelse(NULL, bp);
+ error_norelse:
+       xfs_warn(mp, "%s: BAD after btree leaves for %d extents",
+               __func__, i);
+       panic("%s: CORRUPTED BTREE OR SOMETHING", __func__);
+       return;
+ }
+ /*
+  * Add bmap trace insert entries for all the contents of the extent records.
+  */
+ void
+ xfs_bmap_trace_exlist(
+       xfs_inode_t     *ip,            /* incore inode pointer */
+       xfs_extnum_t    cnt,            /* count of entries in the list */
+       int             whichfork,      /* data or attr fork */
+       unsigned long   caller_ip)
+ {
+       xfs_extnum_t    idx;            /* extent record index */
+       xfs_ifork_t     *ifp;           /* inode fork pointer */
+       int             state = 0;
+       if (whichfork == XFS_ATTR_FORK)
+               state |= BMAP_ATTRFORK;
+       ifp = XFS_IFORK_PTR(ip, whichfork);
+       ASSERT(cnt == (ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t)));
+       for (idx = 0; idx < cnt; idx++)
+               trace_xfs_extlist(ip, idx, whichfork, caller_ip);
+ }
+ /*
+  * Validate that the bmbt_irecs being returned from bmapi are valid
+  * given the caller's original parameters.  Specifically check the
+  * ranges of the returned irecs to ensure that they only extend beyond
+  * the given parameters if the XFS_BMAPI_ENTIRE flag was set.
+  */
+ STATIC void
+ xfs_bmap_validate_ret(
+       xfs_fileoff_t           bno,
+       xfs_filblks_t           len,
+       int                     flags,
+       xfs_bmbt_irec_t         *mval,
+       int                     nmap,
+       int                     ret_nmap)
+ {
+       int                     i;              /* index to map values */
+       ASSERT(ret_nmap <= nmap);
+       for (i = 0; i < ret_nmap; i++) {
+               ASSERT(mval[i].br_blockcount > 0);
+               if (!(flags & XFS_BMAPI_ENTIRE)) {
+                       ASSERT(mval[i].br_startoff >= bno);
+                       ASSERT(mval[i].br_blockcount <= len);
+                       ASSERT(mval[i].br_startoff + mval[i].br_blockcount <=
+                              bno + len);
+               } else {
+                       ASSERT(mval[i].br_startoff < bno + len);
+                       ASSERT(mval[i].br_startoff + mval[i].br_blockcount >
+                              bno);
+               }
+               ASSERT(i == 0 ||
+                      mval[i - 1].br_startoff + mval[i - 1].br_blockcount ==
+                      mval[i].br_startoff);
+               ASSERT(mval[i].br_startblock != DELAYSTARTBLOCK &&
+                      mval[i].br_startblock != HOLESTARTBLOCK);
+               ASSERT(mval[i].br_state == XFS_EXT_NORM ||
+                      mval[i].br_state == XFS_EXT_UNWRITTEN);
+       }
+ }
+ #else
+ #define xfs_bmap_check_leaf_extents(cur, ip, whichfork)               do { } while (0)
+ #define       xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
+ #endif /* DEBUG */
+ /*
+  * bmap free list manipulation functions
+  */
+ /*
+  * Add the extent to the list of extents to be free at transaction end.
+  * The list is maintained sorted (by block number).
+  */
+ void
+ xfs_bmap_add_free(
+       xfs_fsblock_t           bno,            /* fs block number of extent */
+       xfs_filblks_t           len,            /* length of extent */
+       xfs_bmap_free_t         *flist,         /* list of extents */
+       xfs_mount_t             *mp)            /* mount point structure */
+ {
+       xfs_bmap_free_item_t    *cur;           /* current (next) element */
+       xfs_bmap_free_item_t    *new;           /* new element */
+       xfs_bmap_free_item_t    *prev;          /* previous element */
+ #ifdef DEBUG
+       xfs_agnumber_t          agno;
+       xfs_agblock_t           agbno;
+       ASSERT(bno != NULLFSBLOCK);
+       ASSERT(len > 0);
+       ASSERT(len <= MAXEXTLEN);
+       ASSERT(!isnullstartblock(bno));
+       agno = XFS_FSB_TO_AGNO(mp, bno);
+       agbno = XFS_FSB_TO_AGBNO(mp, bno);
+       ASSERT(agno < mp->m_sb.sb_agcount);
+       ASSERT(agbno < mp->m_sb.sb_agblocks);
+       ASSERT(len < mp->m_sb.sb_agblocks);
+       ASSERT(agbno + len <= mp->m_sb.sb_agblocks);
+ #endif
+       ASSERT(xfs_bmap_free_item_zone != NULL);
+       new = kmem_zone_alloc(xfs_bmap_free_item_zone, KM_SLEEP);
+       new->xbfi_startblock = bno;
+       new->xbfi_blockcount = (xfs_extlen_t)len;
+       for (prev = NULL, cur = flist->xbf_first;
+            cur != NULL;
+            prev = cur, cur = cur->xbfi_next) {
+               if (cur->xbfi_startblock >= bno)
+                       break;
+       }
+       if (prev)
+               prev->xbfi_next = new;
+       else
+               flist->xbf_first = new;
+       new->xbfi_next = cur;
+       flist->xbf_count++;
+ }
+ /*
+  * Remove the entry "free" from the free item list.  Prev points to the
+  * previous entry, unless "free" is the head of the list.
+  */
+ void
+ xfs_bmap_del_free(
+       xfs_bmap_free_t         *flist, /* free item list header */
+       xfs_bmap_free_item_t    *prev,  /* previous item on list, if any */
+       xfs_bmap_free_item_t    *free)  /* list item to be freed */
+ {
+       if (prev)
+               prev->xbfi_next = free->xbfi_next;
+       else
+               flist->xbf_first = free->xbfi_next;
+       flist->xbf_count--;
+       kmem_zone_free(xfs_bmap_free_item_zone, free);
+ }
+ /*
+  * Free up any items left in the list.
+  */
+ void
+ xfs_bmap_cancel(
+       xfs_bmap_free_t         *flist) /* list of bmap_free_items */
+ {
+       xfs_bmap_free_item_t    *free;  /* free list item */
+       xfs_bmap_free_item_t    *next;
+       if (flist->xbf_count == 0)
+               return;
+       ASSERT(flist->xbf_first != NULL);
+       for (free = flist->xbf_first; free; free = next) {
+               next = free->xbfi_next;
+               xfs_bmap_del_free(flist, NULL, free);
+       }
+       ASSERT(flist->xbf_count == 0);
+ }
+ /*
+  * Inode fork format manipulation functions
+  */
+ /*
+  * Transform a btree format file with only one leaf node, where the
+  * extents list will fit in the inode, into an extents format file.
+  * Since the file extents are already in-core, all we have to do is
+  * give up the space for the btree root and pitch the leaf block.
+  */
+ STATIC int                            /* error */
+ xfs_bmap_btree_to_extents(
+       xfs_trans_t             *tp,    /* transaction pointer */
+       xfs_inode_t             *ip,    /* incore inode pointer */
+       xfs_btree_cur_t         *cur,   /* btree cursor */
+       int                     *logflagsp, /* inode logging flags */
+       int                     whichfork)  /* data or attr fork */
+ {
+       /* REFERENCED */
+       struct xfs_btree_block  *cblock;/* child btree block */
+       xfs_fsblock_t           cbno;   /* child block number */
+       xfs_buf_t               *cbp;   /* child block's buffer */
+       int                     error;  /* error return value */
+       xfs_ifork_t             *ifp;   /* inode fork data */
+       xfs_mount_t             *mp;    /* mount point structure */
+       __be64                  *pp;    /* ptr to block address */
+       struct xfs_btree_block  *rblock;/* root btree block */
+       mp = ip->i_mount;
+       ifp = XFS_IFORK_PTR(ip, whichfork);
+       ASSERT(ifp->if_flags & XFS_IFEXTENTS);
+       ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE);
+       rblock = ifp->if_broot;
+       ASSERT(be16_to_cpu(rblock->bb_level) == 1);
+       ASSERT(be16_to_cpu(rblock->bb_numrecs) == 1);
+       ASSERT(xfs_bmbt_maxrecs(mp, ifp->if_broot_bytes, 0) == 1);
+       pp = XFS_BMAP_BROOT_PTR_ADDR(mp, rblock, 1, ifp->if_broot_bytes);
+       cbno = be64_to_cpu(*pp);
+       *logflagsp = 0;
+ #ifdef DEBUG
+       if ((error = xfs_btree_check_lptr(cur, cbno, 1)))
+               return error;
+ #endif
+       error = xfs_btree_read_bufl(mp, tp, cbno, 0, &cbp, XFS_BMAP_BTREE_REF,
+                               &xfs_bmbt_buf_ops);
+       if (error)
+               return error;
+       cblock = XFS_BUF_TO_BLOCK(cbp);
+       if ((error = xfs_btree_check_block(cur, cblock, 0, cbp)))
+               return error;
+       xfs_bmap_add_free(cbno, 1, cur->bc_private.b.flist, mp);
+       ip->i_d.di_nblocks--;
+       xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, -1L);
+       xfs_trans_binval(tp, cbp);
+       if (cur->bc_bufs[0] == cbp)
+               cur->bc_bufs[0] = NULL;
+       xfs_iroot_realloc(ip, -1, whichfork);
+       ASSERT(ifp->if_broot == NULL);
+       ASSERT((ifp->if_flags & XFS_IFBROOT) == 0);
+       XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_EXTENTS);
+       *logflagsp = XFS_ILOG_CORE | xfs_ilog_fext(whichfork);
+       return 0;
+ }
+ /*
+  * Convert an extents-format file into a btree-format file.
+  * The new file will have a root block (in the inode) and a single child block.
+  */
+ STATIC int                                    /* error */
+ xfs_bmap_extents_to_btree(
+       xfs_trans_t             *tp,            /* transaction pointer */
+       xfs_inode_t             *ip,            /* incore inode pointer */
+       xfs_fsblock_t           *firstblock,    /* first-block-allocated */
+       xfs_bmap_free_t         *flist,         /* blocks freed in xaction */
+       xfs_btree_cur_t         **curp,         /* cursor returned to caller */
+       int                     wasdel,         /* converting a delayed alloc */
+       int                     *logflagsp,     /* inode logging flags */
+       int                     whichfork)      /* data or attr fork */
+ {
+       struct xfs_btree_block  *ablock;        /* allocated (child) bt block */
+       xfs_buf_t               *abp;           /* buffer for ablock */
+       xfs_alloc_arg_t         args;           /* allocation arguments */
+       xfs_bmbt_rec_t          *arp;           /* child record pointer */
+       struct xfs_btree_block  *block;         /* btree root block */
+       xfs_btree_cur_t         *cur;           /* bmap btree cursor */
+       xfs_bmbt_rec_host_t     *ep;            /* extent record pointer */
+       int                     error;          /* error return value */
+       xfs_extnum_t            i, cnt;         /* extent record index */
+       xfs_ifork_t             *ifp;           /* inode fork pointer */
+       xfs_bmbt_key_t          *kp;            /* root block key pointer */
+       xfs_mount_t             *mp;            /* mount structure */
+       xfs_extnum_t            nextents;       /* number of file extents */
+       xfs_bmbt_ptr_t          *pp;            /* root block address pointer */
+       mp = ip->i_mount;
+       ifp = XFS_IFORK_PTR(ip, whichfork);
+       ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS);
+       /*
+        * Make space in the inode incore.
+        */
+       xfs_iroot_realloc(ip, 1, whichfork);
+       ifp->if_flags |= XFS_IFBROOT;
+       /*
+        * Fill in the root.
+        */
+       block = ifp->if_broot;
+       if (xfs_sb_version_hascrc(&mp->m_sb))
+               xfs_btree_init_block_int(mp, block, XFS_BUF_DADDR_NULL,
+                                XFS_BMAP_CRC_MAGIC, 1, 1, ip->i_ino,
+                                XFS_BTREE_LONG_PTRS | XFS_BTREE_CRC_BLOCKS);
+       else
+               xfs_btree_init_block_int(mp, block, XFS_BUF_DADDR_NULL,
+                                XFS_BMAP_MAGIC, 1, 1, ip->i_ino,
+                                XFS_BTREE_LONG_PTRS);
+       /*
+        * Need a cursor.  Can't allocate until bb_level is filled in.
+        */
+       cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
+       cur->bc_private.b.firstblock = *firstblock;
+       cur->bc_private.b.flist = flist;
+       cur->bc_private.b.flags = wasdel ? XFS_BTCUR_BPRV_WASDEL : 0;
+       /*
+        * Convert to a btree with two levels, one record in root.
+        */
+       XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_BTREE);
+       memset(&args, 0, sizeof(args));
+       args.tp = tp;
+       args.mp = mp;
+       args.firstblock = *firstblock;
+       if (*firstblock == NULLFSBLOCK) {
+               args.type = XFS_ALLOCTYPE_START_BNO;
+               args.fsbno = XFS_INO_TO_FSB(mp, ip->i_ino);
+       } else if (flist->xbf_low) {
+               args.type = XFS_ALLOCTYPE_START_BNO;
+               args.fsbno = *firstblock;
+       } else {
+               args.type = XFS_ALLOCTYPE_NEAR_BNO;
+               args.fsbno = *firstblock;
+       }
+       args.minlen = args.maxlen = args.prod = 1;
+       args.wasdel = wasdel;
+       *logflagsp = 0;
+       if ((error = xfs_alloc_vextent(&args))) {
+               xfs_iroot_realloc(ip, -1, whichfork);
+               xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
+               return error;
+       }
+       /*
+        * Allocation can't fail, the space was reserved.
+        */
+       ASSERT(args.fsbno != NULLFSBLOCK);
+       ASSERT(*firstblock == NULLFSBLOCK ||
+              args.agno == XFS_FSB_TO_AGNO(mp, *firstblock) ||
+              (flist->xbf_low &&
+               args.agno > XFS_FSB_TO_AGNO(mp, *firstblock)));
+       *firstblock = cur->bc_private.b.firstblock = args.fsbno;
+       cur->bc_private.b.allocated++;
+       ip->i_d.di_nblocks++;
+       xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, 1L);
+       abp = xfs_btree_get_bufl(mp, tp, args.fsbno, 0);
+       /*
+        * Fill in the child block.
+        */
+       abp->b_ops = &xfs_bmbt_buf_ops;
+       ablock = XFS_BUF_TO_BLOCK(abp);
+       if (xfs_sb_version_hascrc(&mp->m_sb))
+               xfs_btree_init_block_int(mp, ablock, abp->b_bn,
+                               XFS_BMAP_CRC_MAGIC, 0, 0, ip->i_ino,
+                               XFS_BTREE_LONG_PTRS | XFS_BTREE_CRC_BLOCKS);
+       else
+               xfs_btree_init_block_int(mp, ablock, abp->b_bn,
+                               XFS_BMAP_MAGIC, 0, 0, ip->i_ino,
+                               XFS_BTREE_LONG_PTRS);
+       arp = XFS_BMBT_REC_ADDR(mp, ablock, 1);
+       nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
+       for (cnt = i = 0; i < nextents; i++) {
+               ep = xfs_iext_get_ext(ifp, i);
+               if (!isnullstartblock(xfs_bmbt_get_startblock(ep))) {
+                       arp->l0 = cpu_to_be64(ep->l0);
+                       arp->l1 = cpu_to_be64(ep->l1);
+                       arp++; cnt++;
+               }
+       }
+       ASSERT(cnt == XFS_IFORK_NEXTENTS(ip, whichfork));
+       xfs_btree_set_numrecs(ablock, cnt);
+       /*
+        * Fill in the root key and pointer.
+        */
+       kp = XFS_BMBT_KEY_ADDR(mp, block, 1);
+       arp = XFS_BMBT_REC_ADDR(mp, ablock, 1);
+       kp->br_startoff = cpu_to_be64(xfs_bmbt_disk_get_startoff(arp));
+       pp = XFS_BMBT_PTR_ADDR(mp, block, 1, xfs_bmbt_get_maxrecs(cur,
+                                               be16_to_cpu(block->bb_level)));
+       *pp = cpu_to_be64(args.fsbno);
+       /*
+        * Do all this logging at the end so that
+        * the root is at the right level.
+        */
+       xfs_btree_log_block(cur, abp, XFS_BB_ALL_BITS);
+       xfs_btree_log_recs(cur, abp, 1, be16_to_cpu(ablock->bb_numrecs));
+       ASSERT(*curp == NULL);
+       *curp = cur;
+       *logflagsp = XFS_ILOG_CORE | xfs_ilog_fbroot(whichfork);
+       return 0;
+ }
+ /*
+  * Convert a local file to an extents file.
+  * This code is out of bounds for data forks of regular files,
+  * since the file data needs to get logged so things will stay consistent.
+  * (The bmap-level manipulations are ok, though).
+  */
+ void
+ xfs_bmap_local_to_extents_empty(
+       struct xfs_inode        *ip,
+       int                     whichfork)
+ {
+       struct xfs_ifork        *ifp = XFS_IFORK_PTR(ip, whichfork);
+       ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL);
+       ASSERT(ifp->if_bytes == 0);
+       ASSERT(XFS_IFORK_NEXTENTS(ip, whichfork) == 0);
+       xfs_bmap_forkoff_reset(ip, whichfork);
+       ifp->if_flags &= ~XFS_IFINLINE;
+       ifp->if_flags |= XFS_IFEXTENTS;
+       XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_EXTENTS);
+ }
+ STATIC int                            /* error */
+ xfs_bmap_local_to_extents(
+       xfs_trans_t     *tp,            /* transaction pointer */
+       xfs_inode_t     *ip,            /* incore inode pointer */
+       xfs_fsblock_t   *firstblock,    /* first block allocated in xaction */
+       xfs_extlen_t    total,          /* total blocks needed by transaction */
+       int             *logflagsp,     /* inode logging flags */
+       int             whichfork,
+       void            (*init_fn)(struct xfs_trans *tp,
+                                  struct xfs_buf *bp,
+                                  struct xfs_inode *ip,
+                                  struct xfs_ifork *ifp))
+ {
+       int             error = 0;
+       int             flags;          /* logging flags returned */
+       xfs_ifork_t     *ifp;           /* inode fork pointer */
+       xfs_alloc_arg_t args;           /* allocation arguments */
+       xfs_buf_t       *bp;            /* buffer for extent block */
+       xfs_bmbt_rec_host_t *ep;        /* extent record pointer */
+       /*
+        * We don't want to deal with the case of keeping inode data inline yet.
+        * So sending the data fork of a regular inode is invalid.
+        */
+       ASSERT(!(S_ISREG(ip->i_d.di_mode) && whichfork == XFS_DATA_FORK));
+       ifp = XFS_IFORK_PTR(ip, whichfork);
+       ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL);
+       if (!ifp->if_bytes) {
+               xfs_bmap_local_to_extents_empty(ip, whichfork);
+               flags = XFS_ILOG_CORE;
+               goto done;
+       }
+       flags = 0;
+       error = 0;
+       ASSERT((ifp->if_flags & (XFS_IFINLINE|XFS_IFEXTENTS|XFS_IFEXTIREC)) ==
+                                                               XFS_IFINLINE);
+       memset(&args, 0, sizeof(args));
+       args.tp = tp;
+       args.mp = ip->i_mount;
+       args.firstblock = *firstblock;
+       /*
+        * Allocate a block.  We know we need only one, since the
+        * file currently fits in an inode.
+        */
+       if (*firstblock == NULLFSBLOCK) {
+               args.fsbno = XFS_INO_TO_FSB(args.mp, ip->i_ino);
+               args.type = XFS_ALLOCTYPE_START_BNO;
+       } else {
+               args.fsbno = *firstblock;
+               args.type = XFS_ALLOCTYPE_NEAR_BNO;
+       }
+       args.total = total;
+       args.minlen = args.maxlen = args.prod = 1;
+       error = xfs_alloc_vextent(&args);
+       if (error)
+               goto done;
+       /* Can't fail, the space was reserved. */
+       ASSERT(args.fsbno != NULLFSBLOCK);
+       ASSERT(args.len == 1);
+       *firstblock = args.fsbno;
+       bp = xfs_btree_get_bufl(args.mp, tp, args.fsbno, 0);
+       /* initialise the block and copy the data */
+       init_fn(tp, bp, ip, ifp);
+       /* account for the change in fork size and log everything */
+       xfs_trans_log_buf(tp, bp, 0, ifp->if_bytes - 1);
+       xfs_idata_realloc(ip, -ifp->if_bytes, whichfork);
+       xfs_bmap_local_to_extents_empty(ip, whichfork);
+       flags |= XFS_ILOG_CORE;
+       xfs_iext_add(ifp, 0, 1);
+       ep = xfs_iext_get_ext(ifp, 0);
+       xfs_bmbt_set_allf(ep, 0, args.fsbno, 1, XFS_EXT_NORM);
+       trace_xfs_bmap_post_update(ip, 0,
+                       whichfork == XFS_ATTR_FORK ? BMAP_ATTRFORK : 0,
+                       _THIS_IP_);
+       XFS_IFORK_NEXT_SET(ip, whichfork, 1);
+       ip->i_d.di_nblocks = 1;
+       xfs_trans_mod_dquot_byino(tp, ip,
+               XFS_TRANS_DQ_BCOUNT, 1L);
+       flags |= xfs_ilog_fext(whichfork);
+ done:
+       *logflagsp = flags;
+       return error;
+ }
+ /*
+  * Called from xfs_bmap_add_attrfork to handle btree format files.
+  */
+ STATIC int                                    /* error */
+ xfs_bmap_add_attrfork_btree(
+       xfs_trans_t             *tp,            /* transaction pointer */
+       xfs_inode_t             *ip,            /* incore inode pointer */
+       xfs_fsblock_t           *firstblock,    /* first block allocated */
+       xfs_bmap_free_t         *flist,         /* blocks to free at commit */
+       int                     *flags)         /* inode logging flags */
+ {
+       xfs_btree_cur_t         *cur;           /* btree cursor */
+       int                     error;          /* error return value */
+       xfs_mount_t             *mp;            /* file system mount struct */
+       int                     stat;           /* newroot status */
+       mp = ip->i_mount;
+       if (ip->i_df.if_broot_bytes <= XFS_IFORK_DSIZE(ip))
+               *flags |= XFS_ILOG_DBROOT;
+       else {
+               cur = xfs_bmbt_init_cursor(mp, tp, ip, XFS_DATA_FORK);
+               cur->bc_private.b.flist = flist;
+               cur->bc_private.b.firstblock = *firstblock;
+               if ((error = xfs_bmbt_lookup_ge(cur, 0, 0, 0, &stat)))
+                       goto error0;
+               /* must be at least one entry */
+               XFS_WANT_CORRUPTED_GOTO(stat == 1, error0);
+               if ((error = xfs_btree_new_iroot(cur, flags, &stat)))
+                       goto error0;
+               if (stat == 0) {
+                       xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
+                       return -ENOSPC;
+               }
+               *firstblock = cur->bc_private.b.firstblock;
+               cur->bc_private.b.allocated = 0;
+               xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
+       }
+       return 0;
+ error0:
+       xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
+       return error;
+ }
+ /*
+  * Called from xfs_bmap_add_attrfork to handle extents format files.
+  */
+ STATIC int                                    /* error */
+ xfs_bmap_add_attrfork_extents(
+       xfs_trans_t             *tp,            /* transaction pointer */
+       xfs_inode_t             *ip,            /* incore inode pointer */
+       xfs_fsblock_t           *firstblock,    /* first block allocated */
+       xfs_bmap_free_t         *flist,         /* blocks to free at commit */
+       int                     *flags)         /* inode logging flags */
+ {
+       xfs_btree_cur_t         *cur;           /* bmap btree cursor */
+       int                     error;          /* error return value */
+       if (ip->i_d.di_nextents * sizeof(xfs_bmbt_rec_t) <= XFS_IFORK_DSIZE(ip))
+               return 0;
+       cur = NULL;
+       error = xfs_bmap_extents_to_btree(tp, ip, firstblock, flist, &cur, 0,
+               flags, XFS_DATA_FORK);
+       if (cur) {
+               cur->bc_private.b.allocated = 0;
+               xfs_btree_del_cursor(cur,
+                       error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
+       }
+       return error;
+ }
+ /*
+  * Called from xfs_bmap_add_attrfork to handle local format files. Each
+  * different data fork content type needs a different callout to do the
+  * conversion. Some are basic and only require special block initialisation
+  * callouts for the data formating, others (directories) are so specialised they
+  * handle everything themselves.
+  *
+  * XXX (dgc): investigate whether directory conversion can use the generic
+  * formatting callout. It should be possible - it's just a very complex
+  * formatter.
+  */
+ STATIC int                                    /* error */
+ xfs_bmap_add_attrfork_local(
+       xfs_trans_t             *tp,            /* transaction pointer */
+       xfs_inode_t             *ip,            /* incore inode pointer */
+       xfs_fsblock_t           *firstblock,    /* first block allocated */
+       xfs_bmap_free_t         *flist,         /* blocks to free at commit */
+       int                     *flags)         /* inode logging flags */
+ {
+       xfs_da_args_t           dargs;          /* args for dir/attr code */
+       if (ip->i_df.if_bytes <= XFS_IFORK_DSIZE(ip))
+               return 0;
+       if (S_ISDIR(ip->i_d.di_mode)) {
+               memset(&dargs, 0, sizeof(dargs));
+               dargs.geo = ip->i_mount->m_dir_geo;
+               dargs.dp = ip;
+               dargs.firstblock = firstblock;
+               dargs.flist = flist;
+               dargs.total = dargs.geo->fsbcount;
+               dargs.whichfork = XFS_DATA_FORK;
+               dargs.trans = tp;
+               return xfs_dir2_sf_to_block(&dargs);
+       }
+       if (S_ISLNK(ip->i_d.di_mode))
+               return xfs_bmap_local_to_extents(tp, ip, firstblock, 1,
+                                                flags, XFS_DATA_FORK,
+                                                xfs_symlink_local_to_remote);
+       /* should only be called for types that support local format data */
+       ASSERT(0);
+       return -EFSCORRUPTED;
+ }
+ /*
+  * Convert inode from non-attributed to attributed.
+  * Must not be in a transaction, ip must not be locked.
+  */
+ int                                           /* error code */
+ xfs_bmap_add_attrfork(
+       xfs_inode_t             *ip,            /* incore inode pointer */
+       int                     size,           /* space new attribute needs */
+       int                     rsvd)           /* xact may use reserved blks */
+ {
+       xfs_fsblock_t           firstblock;     /* 1st block/ag allocated */
+       xfs_bmap_free_t         flist;          /* freed extent records */
+       xfs_mount_t             *mp;            /* mount structure */
+       xfs_trans_t             *tp;            /* transaction pointer */
+       int                     blks;           /* space reservation */
+       int                     version = 1;    /* superblock attr version */
+       int                     committed;      /* xaction was committed */
+       int                     logflags;       /* logging flags */
+       int                     error;          /* error return value */
+       int                     cancel_flags = 0;
+       ASSERT(XFS_IFORK_Q(ip) == 0);
+       mp = ip->i_mount;
+       ASSERT(!XFS_NOT_DQATTACHED(mp, ip));
+       tp = xfs_trans_alloc(mp, XFS_TRANS_ADDAFORK);
+       blks = XFS_ADDAFORK_SPACE_RES(mp);
+       if (rsvd)
+               tp->t_flags |= XFS_TRANS_RESERVE;
+       error = xfs_trans_reserve(tp, &M_RES(mp)->tr_addafork, blks, 0);
+       if (error) {
+               xfs_trans_cancel(tp, 0);
+               return error;
+       }
+       cancel_flags = XFS_TRANS_RELEASE_LOG_RES;
+       xfs_ilock(ip, XFS_ILOCK_EXCL);
+       error = xfs_trans_reserve_quota_nblks(tp, ip, blks, 0, rsvd ?
+                       XFS_QMOPT_RES_REGBLKS | XFS_QMOPT_FORCE_RES :
+                       XFS_QMOPT_RES_REGBLKS);
+       if (error)
+               goto trans_cancel;
+       cancel_flags |= XFS_TRANS_ABORT;
+       if (XFS_IFORK_Q(ip))
+               goto trans_cancel;
+       if (ip->i_d.di_aformat != XFS_DINODE_FMT_EXTENTS) {
+               /*
+                * For inodes coming from pre-6.2 filesystems.
+                */
+               ASSERT(ip->i_d.di_aformat == 0);
+               ip->i_d.di_aformat = XFS_DINODE_FMT_EXTENTS;
+       }
+       ASSERT(ip->i_d.di_anextents == 0);
+       xfs_trans_ijoin(tp, ip, 0);
+       xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
+       switch (ip->i_d.di_format) {
+       case XFS_DINODE_FMT_DEV:
+               ip->i_d.di_forkoff = roundup(sizeof(xfs_dev_t), 8) >> 3;
+               break;
+       case XFS_DINODE_FMT_UUID:
+               ip->i_d.di_forkoff = roundup(sizeof(uuid_t), 8) >> 3;
+               break;
+       case XFS_DINODE_FMT_LOCAL:
+       case XFS_DINODE_FMT_EXTENTS:
+       case XFS_DINODE_FMT_BTREE:
+               ip->i_d.di_forkoff = xfs_attr_shortform_bytesfit(ip, size);
+               if (!ip->i_d.di_forkoff)
+                       ip->i_d.di_forkoff = xfs_default_attroffset(ip) >> 3;
+               else if (mp->m_flags & XFS_MOUNT_ATTR2)
+                       version = 2;
+               break;
+       default:
+               ASSERT(0);
+               error = -EINVAL;
+               goto trans_cancel;
+       }
+       ASSERT(ip->i_afp == NULL);
+       ip->i_afp = kmem_zone_zalloc(xfs_ifork_zone, KM_SLEEP);
+       ip->i_afp->if_flags = XFS_IFEXTENTS;
+       logflags = 0;
+       xfs_bmap_init(&flist, &firstblock);
+       switch (ip->i_d.di_format) {
+       case XFS_DINODE_FMT_LOCAL:
+               error = xfs_bmap_add_attrfork_local(tp, ip, &firstblock, &flist,
+                       &logflags);
+               break;
+       case XFS_DINODE_FMT_EXTENTS:
+               error = xfs_bmap_add_attrfork_extents(tp, ip, &firstblock,
+                       &flist, &logflags);
+               break;
+       case XFS_DINODE_FMT_BTREE:
+               error = xfs_bmap_add_attrfork_btree(tp, ip, &firstblock, &flist,
+                       &logflags);
+               break;
+       default:
+               error = 0;
+               break;
+       }
+       if (logflags)
+               xfs_trans_log_inode(tp, ip, logflags);
+       if (error)
+               goto bmap_cancel;
+       if (!xfs_sb_version_hasattr(&mp->m_sb) ||
+          (!xfs_sb_version_hasattr2(&mp->m_sb) && version == 2)) {
+               __int64_t sbfields = 0;
+               spin_lock(&mp->m_sb_lock);
+               if (!xfs_sb_version_hasattr(&mp->m_sb)) {
+                       xfs_sb_version_addattr(&mp->m_sb);
+                       sbfields |= XFS_SB_VERSIONNUM;
+               }
+               if (!xfs_sb_version_hasattr2(&mp->m_sb) && version == 2) {
+                       xfs_sb_version_addattr2(&mp->m_sb);
+                       sbfields |= (XFS_SB_VERSIONNUM | XFS_SB_FEATURES2);
+               }
+               if (sbfields) {
+                       spin_unlock(&mp->m_sb_lock);
+                       xfs_mod_sb(tp, sbfields);
+               } else
+                       spin_unlock(&mp->m_sb_lock);
+       }
+       error = xfs_bmap_finish(&tp, &flist, &committed);
+       if (error)
+               goto bmap_cancel;
+       error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
+       xfs_iunlock(ip, XFS_ILOCK_EXCL);
+       return error;
+ bmap_cancel:
+       xfs_bmap_cancel(&flist);
+ trans_cancel:
+       xfs_trans_cancel(tp, cancel_flags);
+       xfs_iunlock(ip, XFS_ILOCK_EXCL);
+       return error;
+ }
+ /*
+  * Internal and external extent tree search functions.
+  */
+ /*
+  * Read in the extents to if_extents.
+  * All inode fields are set up by caller, we just traverse the btree
+  * and copy the records in. If the file system cannot contain unwritten
+  * extents, the records are checked for no "state" flags.
+  */
+ int                                   /* error */
+ xfs_bmap_read_extents(
+       xfs_trans_t             *tp,    /* transaction pointer */
+       xfs_inode_t             *ip,    /* incore inode */
+       int                     whichfork) /* data or attr fork */
+ {
+       struct xfs_btree_block  *block; /* current btree block */
+       xfs_fsblock_t           bno;    /* block # of "block" */
+       xfs_buf_t               *bp;    /* buffer for "block" */
+       int                     error;  /* error return value */
+       xfs_exntfmt_t           exntf;  /* XFS_EXTFMT_NOSTATE, if checking */
+       xfs_extnum_t            i, j;   /* index into the extents list */
+       xfs_ifork_t             *ifp;   /* fork structure */
+       int                     level;  /* btree level, for checking */
+       xfs_mount_t             *mp;    /* file system mount structure */
+       __be64                  *pp;    /* pointer to block address */
+       /* REFERENCED */
+       xfs_extnum_t            room;   /* number of entries there's room for */
+       bno = NULLFSBLOCK;
+       mp = ip->i_mount;
+       ifp = XFS_IFORK_PTR(ip, whichfork);
+       exntf = (whichfork != XFS_DATA_FORK) ? XFS_EXTFMT_NOSTATE :
+                                       XFS_EXTFMT_INODE(ip);
+       block = ifp->if_broot;
+       /*
+        * Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out.
+        */
+       level = be16_to_cpu(block->bb_level);
+       ASSERT(level > 0);
+       pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, 1, ifp->if_broot_bytes);
+       bno = be64_to_cpu(*pp);
+       ASSERT(bno != NULLDFSBNO);
+       ASSERT(XFS_FSB_TO_AGNO(mp, bno) < mp->m_sb.sb_agcount);
+       ASSERT(XFS_FSB_TO_AGBNO(mp, bno) < mp->m_sb.sb_agblocks);
+       /*
+        * Go down the tree until leaf level is reached, following the first
+        * pointer (leftmost) at each level.
+        */
+       while (level-- > 0) {
+               error = xfs_btree_read_bufl(mp, tp, bno, 0, &bp,
+                               XFS_BMAP_BTREE_REF, &xfs_bmbt_buf_ops);
+               if (error)
+                       return error;
+               block = XFS_BUF_TO_BLOCK(bp);
+               XFS_WANT_CORRUPTED_GOTO(
+                       xfs_bmap_sanity_check(mp, bp, level),
+                       error0);
+               if (level == 0)
+                       break;
+               pp = XFS_BMBT_PTR_ADDR(mp, block, 1, mp->m_bmap_dmxr[1]);
+               bno = be64_to_cpu(*pp);
+               XFS_WANT_CORRUPTED_GOTO(XFS_FSB_SANITY_CHECK(mp, bno), error0);
+               xfs_trans_brelse(tp, bp);
+       }
+       /*
+        * Here with bp and block set to the leftmost leaf node in the tree.
+        */
+       room = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
+       i = 0;
+       /*
+        * Loop over all leaf nodes.  Copy information to the extent records.
+        */
+       for (;;) {
+               xfs_bmbt_rec_t  *frp;
+               xfs_fsblock_t   nextbno;
+               xfs_extnum_t    num_recs;
+               xfs_extnum_t    start;
+               num_recs = xfs_btree_get_numrecs(block);
+               if (unlikely(i + num_recs > room)) {
+                       ASSERT(i + num_recs <= room);
+                       xfs_warn(ip->i_mount,
+                               "corrupt dinode %Lu, (btree extents).",
+                               (unsigned long long) ip->i_ino);
+                       XFS_CORRUPTION_ERROR("xfs_bmap_read_extents(1)",
+                               XFS_ERRLEVEL_LOW, ip->i_mount, block);
+                       goto error0;
+               }
+               XFS_WANT_CORRUPTED_GOTO(
+                       xfs_bmap_sanity_check(mp, bp, 0),
+                       error0);
+               /*
+                * Read-ahead the next leaf block, if any.
+                */
+               nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib);
+               if (nextbno != NULLFSBLOCK)
+                       xfs_btree_reada_bufl(mp, nextbno, 1,
+                                            &xfs_bmbt_buf_ops);
+               /*
+                * Copy records into the extent records.
+                */
+               frp = XFS_BMBT_REC_ADDR(mp, block, 1);
+               start = i;
+               for (j = 0; j < num_recs; j++, i++, frp++) {
+                       xfs_bmbt_rec_host_t *trp = xfs_iext_get_ext(ifp, i);
+                       trp->l0 = be64_to_cpu(frp->l0);
+                       trp->l1 = be64_to_cpu(frp->l1);
+               }
+               if (exntf == XFS_EXTFMT_NOSTATE) {
+                       /*
+                        * Check all attribute bmap btree records and
+                        * any "older" data bmap btree records for a
+                        * set bit in the "extent flag" position.
+                        */
+                       if (unlikely(xfs_check_nostate_extents(ifp,
+                                       start, num_recs))) {
+                               XFS_ERROR_REPORT("xfs_bmap_read_extents(2)",
+                                                XFS_ERRLEVEL_LOW,
+                                                ip->i_mount);
+                               goto error0;
+                       }
+               }
+               xfs_trans_brelse(tp, bp);
+               bno = nextbno;
+               /*
+                * If we've reached the end, stop.
+                */
+               if (bno == NULLFSBLOCK)
+                       break;
+               error = xfs_btree_read_bufl(mp, tp, bno, 0, &bp,
+                               XFS_BMAP_BTREE_REF, &xfs_bmbt_buf_ops);
+               if (error)
+                       return error;
+               block = XFS_BUF_TO_BLOCK(bp);
+       }
+       ASSERT(i == (ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t)));
+       ASSERT(i == XFS_IFORK_NEXTENTS(ip, whichfork));
+       XFS_BMAP_TRACE_EXLIST(ip, i, whichfork);
+       return 0;
+ error0:
+       xfs_trans_brelse(tp, bp);
+       return -EFSCORRUPTED;
+ }
+ /*
+  * Search the extent records for the entry containing block bno.
+  * If bno lies in a hole, point to the next entry.  If bno lies
+  * past eof, *eofp will be set, and *prevp will contain the last
+  * entry (null if none).  Else, *lastxp will be set to the index
+  * of the found entry; *gotp will contain the entry.
+  */
+ STATIC xfs_bmbt_rec_host_t *          /* pointer to found extent entry */
+ xfs_bmap_search_multi_extents(
+       xfs_ifork_t     *ifp,           /* inode fork pointer */
+       xfs_fileoff_t   bno,            /* block number searched for */
+       int             *eofp,          /* out: end of file found */
+       xfs_extnum_t    *lastxp,        /* out: last extent index */
+       xfs_bmbt_irec_t *gotp,          /* out: extent entry found */
+       xfs_bmbt_irec_t *prevp)         /* out: previous extent entry found */
+ {
+       xfs_bmbt_rec_host_t *ep;                /* extent record pointer */
+       xfs_extnum_t    lastx;          /* last extent index */
+       /*
+        * Initialize the extent entry structure to catch access to
+        * uninitialized br_startblock field.
+        */
+       gotp->br_startoff = 0xffa5a5a5a5a5a5a5LL;
+       gotp->br_blockcount = 0xa55a5a5a5a5a5a5aLL;
+       gotp->br_state = XFS_EXT_INVALID;
+ #if XFS_BIG_BLKNOS
+       gotp->br_startblock = 0xffffa5a5a5a5a5a5LL;
+ #else
+       gotp->br_startblock = 0xffffa5a5;
+ #endif
+       prevp->br_startoff = NULLFILEOFF;
+       ep = xfs_iext_bno_to_ext(ifp, bno, &lastx);
+       if (lastx > 0) {
+               xfs_bmbt_get_all(xfs_iext_get_ext(ifp, lastx - 1), prevp);
+       }
+       if (lastx < (ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t))) {
+               xfs_bmbt_get_all(ep, gotp);
+               *eofp = 0;
+       } else {
+               if (lastx > 0) {
+                       *gotp = *prevp;
+               }
+               *eofp = 1;
+               ep = NULL;
+       }
+       *lastxp = lastx;
+       return ep;
+ }
+ /*
+  * Search the extents list for the inode, for the extent containing bno.
+  * If bno lies in a hole, point to the next entry.  If bno lies past eof,
+  * *eofp will be set, and *prevp will contain the last entry (null if none).
+  * Else, *lastxp will be set to the index of the found
+  * entry; *gotp will contain the entry.
+  */
+ STATIC xfs_bmbt_rec_host_t *                 /* pointer to found extent entry */
+ xfs_bmap_search_extents(
+       xfs_inode_t     *ip,            /* incore inode pointer */
+       xfs_fileoff_t   bno,            /* block number searched for */
+       int             fork,           /* data or attr fork */
+       int             *eofp,          /* out: end of file found */
+       xfs_extnum_t    *lastxp,        /* out: last extent index */
+       xfs_bmbt_irec_t *gotp,          /* out: extent entry found */
+       xfs_bmbt_irec_t *prevp)         /* out: previous extent entry found */
+ {
+       xfs_ifork_t     *ifp;           /* inode fork pointer */
+       xfs_bmbt_rec_host_t  *ep;            /* extent record pointer */
+       XFS_STATS_INC(xs_look_exlist);
+       ifp = XFS_IFORK_PTR(ip, fork);
+       ep = xfs_bmap_search_multi_extents(ifp, bno, eofp, lastxp, gotp, prevp);
+       if (unlikely(!(gotp->br_startblock) && (*lastxp != NULLEXTNUM) &&
+                    !(XFS_IS_REALTIME_INODE(ip) && fork == XFS_DATA_FORK))) {
+               xfs_alert_tag(ip->i_mount, XFS_PTAG_FSBLOCK_ZERO,
+                               "Access to block zero in inode %llu "
+                               "start_block: %llx start_off: %llx "
+                               "blkcnt: %llx extent-state: %x lastx: %x",
+                       (unsigned long long)ip->i_ino,
+                       (unsigned long long)gotp->br_startblock,
+                       (unsigned long long)gotp->br_startoff,
+                       (unsigned long long)gotp->br_blockcount,
+                       gotp->br_state, *lastxp);
+               *lastxp = NULLEXTNUM;
+               *eofp = 1;
+               return NULL;
+       }
+       return ep;
+ }
+ /*
+  * Returns the file-relative block number of the first unused block(s)
+  * in the file with at least "len" logically contiguous blocks free.
+  * This is the lowest-address hole if the file has holes, else the first block
+  * past the end of file.
+  * Return 0 if the file is currently local (in-inode).
+  */
+ int                                           /* error */
+ xfs_bmap_first_unused(
+       xfs_trans_t     *tp,                    /* transaction pointer */
+       xfs_inode_t     *ip,                    /* incore inode */
+       xfs_extlen_t    len,                    /* size of hole to find */
+       xfs_fileoff_t   *first_unused,          /* unused block */
+       int             whichfork)              /* data or attr fork */
+ {
+       int             error;                  /* error return value */
+       int             idx;                    /* extent record index */
+       xfs_ifork_t     *ifp;                   /* inode fork pointer */
+       xfs_fileoff_t   lastaddr;               /* last block number seen */
+       xfs_fileoff_t   lowest;                 /* lowest useful block */
+       xfs_fileoff_t   max;                    /* starting useful block */
+       xfs_fileoff_t   off;                    /* offset for this block */
+       xfs_extnum_t    nextents;               /* number of extent entries */
+       ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE ||
+              XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS ||
+              XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL);
+       if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL) {
+               *first_unused = 0;
+               return 0;
+       }
+       ifp = XFS_IFORK_PTR(ip, whichfork);
+       if (!(ifp->if_flags & XFS_IFEXTENTS) &&
+           (error = xfs_iread_extents(tp, ip, whichfork)))
+               return error;
+       lowest = *first_unused;
+       nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
+       for (idx = 0, lastaddr = 0, max = lowest; idx < nextents; idx++) {
+               xfs_bmbt_rec_host_t *ep = xfs_iext_get_ext(ifp, idx);
+               off = xfs_bmbt_get_startoff(ep);
+               /*
+                * See if the hole before this extent will work.
+                */
+               if (off >= lowest + len && off - max >= len) {
+                       *first_unused = max;
+                       return 0;
+               }
+               lastaddr = off + xfs_bmbt_get_blockcount(ep);
+               max = XFS_FILEOFF_MAX(lastaddr, lowest);
+       }
+       *first_unused = max;
+       return 0;
+ }
+ /*
+  * Returns the file-relative block number of the last block - 1 before
+  * last_block (input value) in the file.
+  * This is not based on i_size, it is based on the extent records.
+  * Returns 0 for local files, as they do not have extent records.
+  */
+ int                                           /* error */
+ xfs_bmap_last_before(
+       xfs_trans_t     *tp,                    /* transaction pointer */
+       xfs_inode_t     *ip,                    /* incore inode */
+       xfs_fileoff_t   *last_block,            /* last block */
+       int             whichfork)              /* data or attr fork */
+ {
+       xfs_fileoff_t   bno;                    /* input file offset */
+       int             eof;                    /* hit end of file */
+       xfs_bmbt_rec_host_t *ep;                /* pointer to last extent */
+       int             error;                  /* error return value */
+       xfs_bmbt_irec_t got;                    /* current extent value */
+       xfs_ifork_t     *ifp;                   /* inode fork pointer */
+       xfs_extnum_t    lastx;                  /* last extent used */
+       xfs_bmbt_irec_t prev;                   /* previous extent value */
+       if (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE &&
+           XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS &&
+           XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_LOCAL)
+              return -EIO;
+       if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL) {
+               *last_block = 0;
+               return 0;
+       }
+       ifp = XFS_IFORK_PTR(ip, whichfork);
+       if (!(ifp->if_flags & XFS_IFEXTENTS) &&
+           (error = xfs_iread_extents(tp, ip, whichfork)))
+               return error;
+       bno = *last_block - 1;
+       ep = xfs_bmap_search_extents(ip, bno, whichfork, &eof, &lastx, &got,
+               &prev);
+       if (eof || xfs_bmbt_get_startoff(ep) > bno) {
+               if (prev.br_startoff == NULLFILEOFF)
+                       *last_block = 0;
+               else
+                       *last_block = prev.br_startoff + prev.br_blockcount;
+       }
+       /*
+        * Otherwise *last_block is already the right answer.
+        */
+       return 0;
+ }
+ int
+ xfs_bmap_last_extent(
+       struct xfs_trans        *tp,
+       struct xfs_inode        *ip,
+       int                     whichfork,
+       struct xfs_bmbt_irec    *rec,
+       int                     *is_empty)
+ {
+       struct xfs_ifork        *ifp = XFS_IFORK_PTR(ip, whichfork);
+       int                     error;
+       int                     nextents;
+       if (!(ifp->if_flags & XFS_IFEXTENTS)) {
+               error = xfs_iread_extents(tp, ip, whichfork);
+               if (error)
+                       return error;
+       }
+       nextents = ifp->if_bytes / sizeof(xfs_bmbt_rec_t);
+       if (nextents == 0) {
+               *is_empty = 1;
+               return 0;
+       }
+       xfs_bmbt_get_all(xfs_iext_get_ext(ifp, nextents - 1), rec);
+       *is_empty = 0;
+       return 0;
+ }
+ /*
+  * Check the last inode extent to determine whether this allocation will result
+  * in blocks being allocated at the end of the file. When we allocate new data
+  * blocks at the end of the file which do not start at the previous data block,
+  * we will try to align the new blocks at stripe unit boundaries.
+  *
+  * Returns 1 in bma->aeof if the file (fork) is empty as any new write will be
+  * at, or past the EOF.
+  */
+ STATIC int
+ xfs_bmap_isaeof(
+       struct xfs_bmalloca     *bma,
+       int                     whichfork)
+ {
+       struct xfs_bmbt_irec    rec;
+       int                     is_empty;
+       int                     error;
+       bma->aeof = 0;
+       error = xfs_bmap_last_extent(NULL, bma->ip, whichfork, &rec,
+                                    &is_empty);
+       if (error)
+               return error;
+       if (is_empty) {
+               bma->aeof = 1;
+               return 0;
+       }
+       /*
+        * Check if we are allocation or past the last extent, or at least into
+        * the last delayed allocated extent.
+        */
+       bma->aeof = bma->offset >= rec.br_startoff + rec.br_blockcount ||
+               (bma->offset >= rec.br_startoff &&
+                isnullstartblock(rec.br_startblock));
+       return 0;
+ }
+ /*
+  * Returns the file-relative block number of the first block past eof in
+  * the file.  This is not based on i_size, it is based on the extent records.
+  * Returns 0 for local files, as they do not have extent records.
+  */
+ int
+ xfs_bmap_last_offset(
+       struct xfs_inode        *ip,
+       xfs_fileoff_t           *last_block,
+       int                     whichfork)
+ {
+       struct xfs_bmbt_irec    rec;
+       int                     is_empty;
+       int                     error;
+       *last_block = 0;
+       if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL)
+               return 0;
+       if (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE &&
+           XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS)
+              return -EIO;
+       error = xfs_bmap_last_extent(NULL, ip, whichfork, &rec, &is_empty);
+       if (error || is_empty)
+               return error;
+       *last_block = rec.br_startoff + rec.br_blockcount;
+       return 0;
+ }
+ /*
+  * Returns whether the selected fork of the inode has exactly one
+  * block or not.  For the data fork we check this matches di_size,
+  * implying the file's range is 0..bsize-1.
+  */
+ int                                   /* 1=>1 block, 0=>otherwise */
+ xfs_bmap_one_block(
+       xfs_inode_t     *ip,            /* incore inode */
+       int             whichfork)      /* data or attr fork */
+ {
+       xfs_bmbt_rec_host_t *ep;        /* ptr to fork's extent */
+       xfs_ifork_t     *ifp;           /* inode fork pointer */
+       int             rval;           /* return value */
+       xfs_bmbt_irec_t s;              /* internal version of extent */
+ #ifndef DEBUG
+       if (whichfork == XFS_DATA_FORK)
+               return XFS_ISIZE(ip) == ip->i_mount->m_sb.sb_blocksize;
+ #endif        /* !DEBUG */
+       if (XFS_IFORK_NEXTENTS(ip, whichfork) != 1)
+               return 0;
+       if (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS)
+               return 0;
+       ifp = XFS_IFORK_PTR(ip, whichfork);
+       ASSERT(ifp->if_flags & XFS_IFEXTENTS);
+       ep = xfs_iext_get_ext(ifp, 0);
+       xfs_bmbt_get_all(ep, &s);
+       rval = s.br_startoff == 0 && s.br_blockcount == 1;
+       if (rval && whichfork == XFS_DATA_FORK)
+               ASSERT(XFS_ISIZE(ip) == ip->i_mount->m_sb.sb_blocksize);
+       return rval;
+ }
+ /*
+  * Extent tree manipulation functions used during allocation.
+  */
+ /*
+  * Convert a delayed allocation to a real allocation.
+  */
+ STATIC int                            /* error */
+ xfs_bmap_add_extent_delay_real(
+       struct xfs_bmalloca     *bma)
+ {
+       struct xfs_bmbt_irec    *new = &bma->got;
+       int                     diff;   /* temp value */
+       xfs_bmbt_rec_host_t     *ep;    /* extent entry for idx */
+       int                     error;  /* error return value */
+       int                     i;      /* temp state */
+       xfs_ifork_t             *ifp;   /* inode fork pointer */
+       xfs_fileoff_t           new_endoff;     /* end offset of new entry */
+       xfs_bmbt_irec_t         r[3];   /* neighbor extent entries */
+                                       /* left is 0, right is 1, prev is 2 */
+       int                     rval=0; /* return value (logging flags) */
+       int                     state = 0;/* state bits, accessed thru macros */
+       xfs_filblks_t           da_new; /* new count del alloc blocks used */
+       xfs_filblks_t           da_old; /* old count del alloc blocks used */
+       xfs_filblks_t           temp=0; /* value for da_new calculations */
+       xfs_filblks_t           temp2=0;/* value for da_new calculations */
+       int                     tmp_rval;       /* partial logging flags */
+       ifp = XFS_IFORK_PTR(bma->ip, XFS_DATA_FORK);
+       ASSERT(bma->idx >= 0);
+       ASSERT(bma->idx <= ifp->if_bytes / sizeof(struct xfs_bmbt_rec));
+       ASSERT(!isnullstartblock(new->br_startblock));
+       ASSERT(!bma->cur ||
+              (bma->cur->bc_private.b.flags & XFS_BTCUR_BPRV_WASDEL));
+       XFS_STATS_INC(xs_add_exlist);
+ #define       LEFT            r[0]
+ #define       RIGHT           r[1]
+ #define       PREV            r[2]
+       /*
+        * Set up a bunch of variables to make the tests simpler.
+        */
+       ep = xfs_iext_get_ext(ifp, bma->idx);
+       xfs_bmbt_get_all(ep, &PREV);
+       new_endoff = new->br_startoff + new->br_blockcount;
+       ASSERT(PREV.br_startoff <= new->br_startoff);
+       ASSERT(PREV.br_startoff + PREV.br_blockcount >= new_endoff);
+       da_old = startblockval(PREV.br_startblock);
+       da_new = 0;
+       /*
+        * Set flags determining what part of the previous delayed allocation
+        * extent is being replaced by a real allocation.
+        */
+       if (PREV.br_startoff == new->br_startoff)
+               state |= BMAP_LEFT_FILLING;
+       if (PREV.br_startoff + PREV.br_blockcount == new_endoff)
+               state |= BMAP_RIGHT_FILLING;
+       /*
+        * Check and set flags if this segment has a left neighbor.
+        * Don't set contiguous if the combined extent would be too large.
+        */
+       if (bma->idx > 0) {
+               state |= BMAP_LEFT_VALID;
+               xfs_bmbt_get_all(xfs_iext_get_ext(ifp, bma->idx - 1), &LEFT);
+               if (isnullstartblock(LEFT.br_startblock))
+                       state |= BMAP_LEFT_DELAY;
+       }
+       if ((state & BMAP_LEFT_VALID) && !(state & BMAP_LEFT_DELAY) &&
+           LEFT.br_startoff + LEFT.br_blockcount == new->br_startoff &&
+           LEFT.br_startblock + LEFT.br_blockcount == new->br_startblock &&
+           LEFT.br_state == new->br_state &&
+           LEFT.br_blockcount + new->br_blockcount <= MAXEXTLEN)
+               state |= BMAP_LEFT_CONTIG;
+       /*
+        * Check and set flags if this segment has a right neighbor.
+        * Don't set contiguous if the combined extent would be too large.
+        * Also check for all-three-contiguous being too large.
+        */
+       if (bma->idx < bma->ip->i_df.if_bytes / (uint)sizeof(xfs_bmbt_rec_t) - 1) {
+               state |= BMAP_RIGHT_VALID;
+               xfs_bmbt_get_all(xfs_iext_get_ext(ifp, bma->idx + 1), &RIGHT);
+               if (isnullstartblock(RIGHT.br_startblock))
+                       state |= BMAP_RIGHT_DELAY;
+       }
+       if ((state & BMAP_RIGHT_VALID) && !(state & BMAP_RIGHT_DELAY) &&
+           new_endoff == RIGHT.br_startoff &&
+           new->br_startblock + new->br_blockcount == RIGHT.br_startblock &&
+           new->br_state == RIGHT.br_state &&
+           new->br_blockcount + RIGHT.br_blockcount <= MAXEXTLEN &&
+           ((state & (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING |
+                      BMAP_RIGHT_FILLING)) !=
+                     (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING |
+                      BMAP_RIGHT_FILLING) ||
+            LEFT.br_blockcount + new->br_blockcount + RIGHT.br_blockcount
+                       <= MAXEXTLEN))
+               state |= BMAP_RIGHT_CONTIG;
+       error = 0;
+       /*
+        * Switch out based on the FILLING and CONTIG state bits.
+        */
+       switch (state & (BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG |
+                        BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG)) {
+       case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG |
+            BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
+               /*
+                * Filling in all of a previously delayed allocation extent.
+                * The left and right neighbors are both contiguous with new.
+                */
+               bma->idx--;
+               trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);
+               xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, bma->idx),
+                       LEFT.br_blockcount + PREV.br_blockcount +
+                       RIGHT.br_blockcount);
+               trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
+               xfs_iext_remove(bma->ip, bma->idx + 1, 2, state);
+               bma->ip->i_d.di_nextents--;
+               if (bma->cur == NULL)
+                       rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
+               else {
+                       rval = XFS_ILOG_CORE;
+                       error = xfs_bmbt_lookup_eq(bma->cur, RIGHT.br_startoff,
+                                       RIGHT.br_startblock,
+                                       RIGHT.br_blockcount, &i);
+                       if (error)
+                               goto done;
+                       XFS_WANT_CORRUPTED_GOTO(i == 1, done);
+                       error = xfs_btree_delete(bma->cur, &i);
+                       if (error)
+                               goto done;
+                       XFS_WANT_CORRUPTED_GOTO(i == 1, done);
+                       error = xfs_btree_decrement(bma->cur, 0, &i);
+                       if (error)
+                               goto done;
+                       XFS_WANT_CORRUPTED_GOTO(i == 1, done);
+                       error = xfs_bmbt_update(bma->cur, LEFT.br_startoff,
+                                       LEFT.br_startblock,
+                                       LEFT.br_blockcount +
+                                       PREV.br_blockcount +
+                                       RIGHT.br_blockcount, LEFT.br_state);
+                       if (error)
+                               goto done;
+               }
+               break;
+       case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG:
+               /*
+                * Filling in all of a previously delayed allocation extent.
+                * The left neighbor is contiguous, the right is not.
+                */
+               bma->idx--;
+               trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);
+               xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, bma->idx),
+                       LEFT.br_blockcount + PREV.br_blockcount);
+               trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
+               xfs_iext_remove(bma->ip, bma->idx + 1, 1, state);
+               if (bma->cur == NULL)
+                       rval = XFS_ILOG_DEXT;
+               else {
+                       rval = 0;
+                       error = xfs_bmbt_lookup_eq(bma->cur, LEFT.br_startoff,
+                                       LEFT.br_startblock, LEFT.br_blockcount,
+                                       &i);
+                       if (error)
+                               goto done;
+                       XFS_WANT_CORRUPTED_GOTO(i == 1, done);
+                       error = xfs_bmbt_update(bma->cur, LEFT.br_startoff,
+                                       LEFT.br_startblock,
+                                       LEFT.br_blockcount +
+                                       PREV.br_blockcount, LEFT.br_state);
+                       if (error)
+                               goto done;
+               }
+               break;
+       case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
+               /*
+                * Filling in all of a previously delayed allocation extent.
+                * The right neighbor is contiguous, the left is not.
+                */
+               trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);
+               xfs_bmbt_set_startblock(ep, new->br_startblock);
+               xfs_bmbt_set_blockcount(ep,
+                       PREV.br_blockcount + RIGHT.br_blockcount);
+               trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
+               xfs_iext_remove(bma->ip, bma->idx + 1, 1, state);
+               if (bma->cur == NULL)
+                       rval = XFS_ILOG_DEXT;
+               else {
+                       rval = 0;
+                       error = xfs_bmbt_lookup_eq(bma->cur, RIGHT.br_startoff,
+                                       RIGHT.br_startblock,
+                                       RIGHT.br_blockcount, &i);
+                       if (error)
+                               goto done;
+                       XFS_WANT_CORRUPTED_GOTO(i == 1, done);
+                       error = xfs_bmbt_update(bma->cur, PREV.br_startoff,
+                                       new->br_startblock,
+                                       PREV.br_blockcount +
+                                       RIGHT.br_blockcount, PREV.br_state);
+                       if (error)
+                               goto done;
+               }
+               break;
+       case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING:
+               /*
+                * Filling in all of a previously delayed allocation extent.
+                * Neither the left nor right neighbors are contiguous with
+                * the new one.
+                */
+               trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);
+               xfs_bmbt_set_startblock(ep, new->br_startblock);
+               trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
+               bma->ip->i_d.di_nextents++;
+               if (bma->cur == NULL)
+                       rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
+               else {
+                       rval = XFS_ILOG_CORE;
+                       error = xfs_bmbt_lookup_eq(bma->cur, new->br_startoff,
+                                       new->br_startblock, new->br_blockcount,
+                                       &i);
+                       if (error)
+                               goto done;
+                       XFS_WANT_CORRUPTED_GOTO(i == 0, done);
+                       bma->cur->bc_rec.b.br_state = XFS_EXT_NORM;
+                       error = xfs_btree_insert(bma->cur, &i);
+                       if (error)
+                               goto done;
+                       XFS_WANT_CORRUPTED_GOTO(i == 1, done);
+               }
+               break;
+       case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG:
+               /*
+                * Filling in the first part of a previous delayed allocation.
+                * The left neighbor is contiguous.
+                */
+               trace_xfs_bmap_pre_update(bma->ip, bma->idx - 1, state, _THIS_IP_);
+               xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, bma->idx - 1),
+                       LEFT.br_blockcount + new->br_blockcount);
+               xfs_bmbt_set_startoff(ep,
+                       PREV.br_startoff + new->br_blockcount);
+               trace_xfs_bmap_post_update(bma->ip, bma->idx - 1, state, _THIS_IP_);
+               temp = PREV.br_blockcount - new->br_blockcount;
+               trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);
+               xfs_bmbt_set_blockcount(ep, temp);
+               if (bma->cur == NULL)
+                       rval = XFS_ILOG_DEXT;
+               else {
+                       rval = 0;
+                       error = xfs_bmbt_lookup_eq(bma->cur, LEFT.br_startoff,
+                                       LEFT.br_startblock, LEFT.br_blockcount,
+                                       &i);
+                       if (error)
+                               goto done;
+                       XFS_WANT_CORRUPTED_GOTO(i == 1, done);
+                       error = xfs_bmbt_update(bma->cur, LEFT.br_startoff,
+                                       LEFT.br_startblock,
+                                       LEFT.br_blockcount +
+                                       new->br_blockcount,
+                                       LEFT.br_state);
+                       if (error)
+                               goto done;
+               }
+               da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp),
+                       startblockval(PREV.br_startblock));
+               xfs_bmbt_set_startblock(ep, nullstartblock(da_new));
+               trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
+               bma->idx--;
+               break;
+       case BMAP_LEFT_FILLING:
+               /*
+                * Filling in the first part of a previous delayed allocation.
+                * The left neighbor is not contiguous.
+                */
+               trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);
+               xfs_bmbt_set_startoff(ep, new_endoff);
+               temp = PREV.br_blockcount - new->br_blockcount;
+               xfs_bmbt_set_blockcount(ep, temp);
+               xfs_iext_insert(bma->ip, bma->idx, 1, new, state);
+               bma->ip->i_d.di_nextents++;
+               if (bma->cur == NULL)
+                       rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
+               else {
+                       rval = XFS_ILOG_CORE;
+                       error = xfs_bmbt_lookup_eq(bma->cur, new->br_startoff,
+                                       new->br_startblock, new->br_blockcount,
+                                       &i);
+                       if (error)
+                               goto done;
+                       XFS_WANT_CORRUPTED_GOTO(i == 0, done);
+                       bma->cur->bc_rec.b.br_state = XFS_EXT_NORM;
+                       error = xfs_btree_insert(bma->cur, &i);
+                       if (error)
+                               goto done;
+                       XFS_WANT_CORRUPTED_GOTO(i == 1, done);
+               }
+               if (xfs_bmap_needs_btree(bma->ip, XFS_DATA_FORK)) {
+                       error = xfs_bmap_extents_to_btree(bma->tp, bma->ip,
+                                       bma->firstblock, bma->flist,
+                                       &bma->cur, 1, &tmp_rval, XFS_DATA_FORK);
+                       rval |= tmp_rval;
+                       if (error)
+                               goto done;
+               }
+               da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp),
+                       startblockval(PREV.br_startblock) -
+                       (bma->cur ? bma->cur->bc_private.b.allocated : 0));
+               ep = xfs_iext_get_ext(ifp, bma->idx + 1);
+               xfs_bmbt_set_startblock(ep, nullstartblock(da_new));
+               trace_xfs_bmap_post_update(bma->ip, bma->idx + 1, state, _THIS_IP_);
+               break;
+       case BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
+               /*
+                * Filling in the last part of a previous delayed allocation.
+                * The right neighbor is contiguous with the new allocation.
+                */
+               temp = PREV.br_blockcount - new->br_blockcount;
+               trace_xfs_bmap_pre_update(bma->ip, bma->idx + 1, state, _THIS_IP_);
+               xfs_bmbt_set_blockcount(ep, temp);
+               xfs_bmbt_set_allf(xfs_iext_get_ext(ifp, bma->idx + 1),
+                       new->br_startoff, new->br_startblock,
+                       new->br_blockcount + RIGHT.br_blockcount,
+                       RIGHT.br_state);
+               trace_xfs_bmap_post_update(bma->ip, bma->idx + 1, state, _THIS_IP_);
+               if (bma->cur == NULL)
+                       rval = XFS_ILOG_DEXT;
+               else {
+                       rval = 0;
+                       error = xfs_bmbt_lookup_eq(bma->cur, RIGHT.br_startoff,
+                                       RIGHT.br_startblock,
+                                       RIGHT.br_blockcount, &i);
+                       if (error)
+                               goto done;
+                       XFS_WANT_CORRUPTED_GOTO(i == 1, done);
+                       error = xfs_bmbt_update(bma->cur, new->br_startoff,
+                                       new->br_startblock,
+                                       new->br_blockcount +
+                                       RIGHT.br_blockcount,
+                                       RIGHT.br_state);
+                       if (error)
+                               goto done;
+               }
+               da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp),
+                       startblockval(PREV.br_startblock));
+               trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);
+               xfs_bmbt_set_startblock(ep, nullstartblock(da_new));
+               trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
+               bma->idx++;
+               break;
+       case BMAP_RIGHT_FILLING:
+               /*
+                * Filling in the last part of a previous delayed allocation.
+                * The right neighbor is not contiguous.
+                */
+               temp = PREV.br_blockcount - new->br_blockcount;
+               trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);
+               xfs_bmbt_set_blockcount(ep, temp);
+               xfs_iext_insert(bma->ip, bma->idx + 1, 1, new, state);
+               bma->ip->i_d.di_nextents++;
+               if (bma->cur == NULL)
+                       rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
+               else {
+                       rval = XFS_ILOG_CORE;
+                       error = xfs_bmbt_lookup_eq(bma->cur, new->br_startoff,
+                                       new->br_startblock, new->br_blockcount,
+                                       &i);
+                       if (error)
+                               goto done;
+                       XFS_WANT_CORRUPTED_GOTO(i == 0, done);
+                       bma->cur->bc_rec.b.br_state = XFS_EXT_NORM;
+                       error = xfs_btree_insert(bma->cur, &i);
+                       if (error)
+                               goto done;
+                       XFS_WANT_CORRUPTED_GOTO(i == 1, done);
+               }
+               if (xfs_bmap_needs_btree(bma->ip, XFS_DATA_FORK)) {
+                       error = xfs_bmap_extents_to_btree(bma->tp, bma->ip,
+                               bma->firstblock, bma->flist, &bma->cur, 1,
+                               &tmp_rval, XFS_DATA_FORK);
+                       rval |= tmp_rval;
+                       if (error)
+                               goto done;
+               }
+               da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp),
+                       startblockval(PREV.br_startblock) -
+                       (bma->cur ? bma->cur->bc_private.b.allocated : 0));
+               ep = xfs_iext_get_ext(ifp, bma->idx);
+               xfs_bmbt_set_startblock(ep, nullstartblock(da_new));
+               trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
+               bma->idx++;
+               break;
+       case 0:
+               /*
+                * Filling in the middle part of a previous delayed allocation.
+                * Contiguity is impossible here.
+                * This case is avoided almost all the time.
+                *
+                * We start with a delayed allocation:
+                *
+                * +ddddddddddddddddddddddddddddddddddddddddddddddddddddddd+
+                *  PREV @ idx
+                *
+                * and we are allocating:
+                *                     +rrrrrrrrrrrrrrrrr+
+                *                            new
+                *
+                * and we set it up for insertion as:
+                * +ddddddddddddddddddd+rrrrrrrrrrrrrrrrr+ddddddddddddddddd+
+                *                            new
+                *  PREV @ idx          LEFT              RIGHT
+                *                      inserted at idx + 1
+                */
+               temp = new->br_startoff - PREV.br_startoff;
+               temp2 = PREV.br_startoff + PREV.br_blockcount - new_endoff;
+               trace_xfs_bmap_pre_update(bma->ip, bma->idx, 0, _THIS_IP_);
+               xfs_bmbt_set_blockcount(ep, temp);      /* truncate PREV */
+               LEFT = *new;
+               RIGHT.br_state = PREV.br_state;
+               RIGHT.br_startblock = nullstartblock(
+                               (int)xfs_bmap_worst_indlen(bma->ip, temp2));
+               RIGHT.br_startoff = new_endoff;
+               RIGHT.br_blockcount = temp2;
+               /* insert LEFT (r[0]) and RIGHT (r[1]) at the same time */
+               xfs_iext_insert(bma->ip, bma->idx + 1, 2, &LEFT, state);
+               bma->ip->i_d.di_nextents++;
+               if (bma->cur == NULL)
+                       rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
+               else {
+                       rval = XFS_ILOG_CORE;
+                       error = xfs_bmbt_lookup_eq(bma->cur, new->br_startoff,
+                                       new->br_startblock, new->br_blockcount,
+                                       &i);
+                       if (error)
+                               goto done;
+                       XFS_WANT_CORRUPTED_GOTO(i == 0, done);
+                       bma->cur->bc_rec.b.br_state = XFS_EXT_NORM;
+                       error = xfs_btree_insert(bma->cur, &i);
+                       if (error)
+                               goto done;
+                       XFS_WANT_CORRUPTED_GOTO(i == 1, done);
+               }
+               if (xfs_bmap_needs_btree(bma->ip, XFS_DATA_FORK)) {
+                       error = xfs_bmap_extents_to_btree(bma->tp, bma->ip,
+                                       bma->firstblock, bma->flist, &bma->cur,
+                                       1, &tmp_rval, XFS_DATA_FORK);
+                       rval |= tmp_rval;
+                       if (error)
+                               goto done;
+               }
+               temp = xfs_bmap_worst_indlen(bma->ip, temp);
+               temp2 = xfs_bmap_worst_indlen(bma->ip, temp2);
+               diff = (int)(temp + temp2 - startblockval(PREV.br_startblock) -
+                       (bma->cur ? bma->cur->bc_private.b.allocated : 0));
+               if (diff > 0) {
+                       error = xfs_icsb_modify_counters(bma->ip->i_mount,
+                                       XFS_SBS_FDBLOCKS,
+                                       -((int64_t)diff), 0);
+                       ASSERT(!error);
+                       if (error)
+                               goto done;
+               }
+               ep = xfs_iext_get_ext(ifp, bma->idx);
+               xfs_bmbt_set_startblock(ep, nullstartblock((int)temp));
+               trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
+               trace_xfs_bmap_pre_update(bma->ip, bma->idx + 2, state, _THIS_IP_);
+               xfs_bmbt_set_startblock(xfs_iext_get_ext(ifp, bma->idx + 2),
+                       nullstartblock((int)temp2));
+               trace_xfs_bmap_post_update(bma->ip, bma->idx + 2, state, _THIS_IP_);
+               bma->idx++;
+               da_new = temp + temp2;
+               break;
+       case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
+       case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
+       case BMAP_LEFT_FILLING | BMAP_RIGHT_CONTIG:
+       case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG:
+       case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
+       case BMAP_LEFT_CONTIG:
+       case BMAP_RIGHT_CONTIG:
+               /*
+                * These cases are all impossible.
+                */
+               ASSERT(0);
+       }
+       /* convert to a btree if necessary */
+       if (xfs_bmap_needs_btree(bma->ip, XFS_DATA_FORK)) {
+               int     tmp_logflags;   /* partial log flag return val */
+               ASSERT(bma->cur == NULL);
+               error = xfs_bmap_extents_to_btree(bma->tp, bma->ip,
+                               bma->firstblock, bma->flist, &bma->cur,
+                               da_old > 0, &tmp_logflags, XFS_DATA_FORK);
+               bma->logflags |= tmp_logflags;
+               if (error)
+                       goto done;
+       }
+       /* adjust for changes in reserved delayed indirect blocks */
+       if (da_old || da_new) {
+               temp = da_new;
+               if (bma->cur)
+                       temp += bma->cur->bc_private.b.allocated;
+               ASSERT(temp <= da_old);
+               if (temp < da_old)
+                       xfs_icsb_modify_counters(bma->ip->i_mount,
+                                       XFS_SBS_FDBLOCKS,
+                                       (int64_t)(da_old - temp), 0);
+       }
+       /* clear out the allocated field, done with it now in any case. */
+       if (bma->cur)
+               bma->cur->bc_private.b.allocated = 0;
+       xfs_bmap_check_leaf_extents(bma->cur, bma->ip, XFS_DATA_FORK);
+ done:
+       bma->logflags |= rval;
+       return error;
+ #undef        LEFT
+ #undef        RIGHT
+ #undef        PREV
+ }
+ /*
+  * Convert an unwritten allocation to a real allocation or vice versa.
+  */
+ STATIC int                            /* error */
+ xfs_bmap_add_extent_unwritten_real(
+       struct xfs_trans        *tp,
+       xfs_inode_t             *ip,    /* incore inode pointer */
+       xfs_extnum_t            *idx,   /* extent number to update/insert */
+       xfs_btree_cur_t         **curp, /* if *curp is null, not a btree */
+       xfs_bmbt_irec_t         *new,   /* new data to add to file extents */
+       xfs_fsblock_t           *first, /* pointer to firstblock variable */
+       xfs_bmap_free_t         *flist, /* list of extents to be freed */
+       int                     *logflagsp) /* inode logging flags */
+ {
+       xfs_btree_cur_t         *cur;   /* btree cursor */
+       xfs_bmbt_rec_host_t     *ep;    /* extent entry for idx */
+       int                     error;  /* error return value */
+       int                     i;      /* temp state */
+       xfs_ifork_t             *ifp;   /* inode fork pointer */
+       xfs_fileoff_t           new_endoff;     /* end offset of new entry */
+       xfs_exntst_t            newext; /* new extent state */
+       xfs_exntst_t            oldext; /* old extent state */
+       xfs_bmbt_irec_t         r[3];   /* neighbor extent entries */
+                                       /* left is 0, right is 1, prev is 2 */
+       int                     rval=0; /* return value (logging flags) */
+       int                     state = 0;/* state bits, accessed thru macros */
+       *logflagsp = 0;
+       cur = *curp;
+       ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
+       ASSERT(*idx >= 0);
+       ASSERT(*idx <= ifp->if_bytes / sizeof(struct xfs_bmbt_rec));
+       ASSERT(!isnullstartblock(new->br_startblock));
+       XFS_STATS_INC(xs_add_exlist);
+ #define       LEFT            r[0]
+ #define       RIGHT           r[1]
+ #define       PREV            r[2]
+       /*
+        * Set up a bunch of variables to make the tests simpler.
+        */
+       error = 0;
+       ep = xfs_iext_get_ext(ifp, *idx);
+       xfs_bmbt_get_all(ep, &PREV);
+       newext = new->br_state;
+       oldext = (newext == XFS_EXT_UNWRITTEN) ?
+               XFS_EXT_NORM : XFS_EXT_UNWRITTEN;
+       ASSERT(PREV.br_state == oldext);
+       new_endoff = new->br_startoff + new->br_blockcount;
+       ASSERT(PREV.br_startoff <= new->br_startoff);
+       ASSERT(PREV.br_startoff + PREV.br_blockcount >= new_endoff);
+       /*
+        * Set flags determining what part of the previous oldext allocation
+        * extent is being replaced by a newext allocation.
+        */
+       if (PREV.br_startoff == new->br_startoff)
+               state |= BMAP_LEFT_FILLING;
+       if (PREV.br_startoff + PREV.br_blockcount == new_endoff)
+               state |= BMAP_RIGHT_FILLING;
+       /*
+        * Check and set flags if this segment has a left neighbor.
+        * Don't set contiguous if the combined extent would be too large.
+        */
+       if (*idx > 0) {
+               state |= BMAP_LEFT_VALID;
+               xfs_bmbt_get_all(xfs_iext_get_ext(ifp, *idx - 1), &LEFT);
+               if (isnullstartblock(LEFT.br_startblock))
+                       state |= BMAP_LEFT_DELAY;
+       }
+       if ((state & BMAP_LEFT_VALID) && !(state & BMAP_LEFT_DELAY) &&
+           LEFT.br_startoff + LEFT.br_blockcount == new->br_startoff &&
+           LEFT.br_startblock + LEFT.br_blockcount == new->br_startblock &&
+           LEFT.br_state == newext &&
+           LEFT.br_blockcount + new->br_blockcount <= MAXEXTLEN)
+               state |= BMAP_LEFT_CONTIG;
+       /*
+        * Check and set flags if this segment has a right neighbor.
+        * Don't set contiguous if the combined extent would be too large.
+        * Also check for all-three-contiguous being too large.
+        */
+       if (*idx < ip->i_df.if_bytes / (uint)sizeof(xfs_bmbt_rec_t) - 1) {
+               state |= BMAP_RIGHT_VALID;
+               xfs_bmbt_get_all(xfs_iext_get_ext(ifp, *idx + 1), &RIGHT);
+               if (isnullstartblock(RIGHT.br_startblock))
+                       state |= BMAP_RIGHT_DELAY;
+       }
+       if ((state & BMAP_RIGHT_VALID) && !(state & BMAP_RIGHT_DELAY) &&
+           new_endoff == RIGHT.br_startoff &&
+           new->br_startblock + new->br_blockcount == RIGHT.br_startblock &&
+           newext == RIGHT.br_state &&
+           new->br_blockcount + RIGHT.br_blockcount <= MAXEXTLEN &&
+           ((state & (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING |
+                      BMAP_RIGHT_FILLING)) !=
+                     (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING |
+                      BMAP_RIGHT_FILLING) ||
+            LEFT.br_blockcount + new->br_blockcount + RIGHT.br_blockcount
+                       <= MAXEXTLEN))
+               state |= BMAP_RIGHT_CONTIG;
+       /*
+        * Switch out based on the FILLING and CONTIG state bits.
+        */
+       switch (state & (BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG |
+                        BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG)) {
+       case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG |
+            BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
+               /*
+                * Setting all of a previous oldext extent to newext.
+                * The left and right neighbors are both contiguous with new.
+                */
+               --*idx;
+               trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
+               xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, *idx),
+                       LEFT.br_blockcount + PREV.br_blockcount +
+                       RIGHT.br_blockcount);
+               trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
+               xfs_iext_remove(ip, *idx + 1, 2, state);
+               ip->i_d.di_nextents -= 2;
+               if (cur == NULL)
+                       rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
+               else {
+                       rval = XFS_ILOG_CORE;
+                       if ((error = xfs_bmbt_lookup_eq(cur, RIGHT.br_startoff,
+                                       RIGHT.br_startblock,
+                                       RIGHT.br_blockcount, &i)))
+                               goto done;
+                       XFS_WANT_CORRUPTED_GOTO(i == 1, done);
+                       if ((error = xfs_btree_delete(cur, &i)))
+                               goto done;
+                       XFS_WANT_CORRUPTED_GOTO(i == 1, done);
+                       if ((error = xfs_btree_decrement(cur, 0, &i)))
+                               goto done;
+                       XFS_WANT_CORRUPTED_GOTO(i == 1, done);
+                       if ((error = xfs_btree_delete(cur, &i)))
+                               goto done;
+                       XFS_WANT_CORRUPTED_GOTO(i == 1, done);
+                       if ((error = xfs_btree_decrement(cur, 0, &i)))
+                               goto done;
+                       XFS_WANT_CORRUPTED_GOTO(i == 1, done);
+                       if ((error = xfs_bmbt_update(cur, LEFT.br_startoff,
+                               LEFT.br_startblock,
+                               LEFT.br_blockcount + PREV.br_blockcount +
+                               RIGHT.br_blockcount, LEFT.br_state)))
+                               goto done;
+               }
+               break;
+       case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG:
+               /*
+                * Setting all of a previous oldext extent to newext.
+                * The left neighbor is contiguous, the right is not.
+                */
+               --*idx;
+               trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
+               xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, *idx),
+                       LEFT.br_blockcount + PREV.br_blockcount);
+               trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
+               xfs_iext_remove(ip, *idx + 1, 1, state);
+               ip->i_d.di_nextents--;
+               if (cur == NULL)
+                       rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
+               else {
+                       rval = XFS_ILOG_CORE;
+                       if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff,
+                                       PREV.br_startblock, PREV.br_blockcount,
+                                       &i)))
+                               goto done;
+                       XFS_WANT_CORRUPTED_GOTO(i == 1, done);
+                       if ((error = xfs_btree_delete(cur, &i)))
+                               goto done;
+                       XFS_WANT_CORRUPTED_GOTO(i == 1, done);
+                       if ((error = xfs_btree_decrement(cur, 0, &i)))
+                               goto done;
+                       XFS_WANT_CORRUPTED_GOTO(i == 1, done);
+                       if ((error = xfs_bmbt_update(cur, LEFT.br_startoff,
+                               LEFT.br_startblock,
+                               LEFT.br_blockcount + PREV.br_blockcount,
+                               LEFT.br_state)))
+                               goto done;
+               }
+               break;
+       case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
+               /*
+                * Setting all of a previous oldext extent to newext.
+                * The right neighbor is contiguous, the left is not.
+                */
+               trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
+               xfs_bmbt_set_blockcount(ep,
+                       PREV.br_blockcount + RIGHT.br_blockcount);
+               xfs_bmbt_set_state(ep, newext);
+               trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
+               xfs_iext_remove(ip, *idx + 1, 1, state);
+               ip->i_d.di_nextents--;
+               if (cur == NULL)
+                       rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
+               else {
+                       rval = XFS_ILOG_CORE;
+                       if ((error = xfs_bmbt_lookup_eq(cur, RIGHT.br_startoff,
+                                       RIGHT.br_startblock,
+                                       RIGHT.br_blockcount, &i)))
+                               goto done;
+                       XFS_WANT_CORRUPTED_GOTO(i == 1, done);
+                       if ((error = xfs_btree_delete(cur, &i)))
+                               goto done;
+                       XFS_WANT_CORRUPTED_GOTO(i == 1, done);
+                       if ((error = xfs_btree_decrement(cur, 0, &i)))
+                               goto done;
+                       XFS_WANT_CORRUPTED_GOTO(i == 1, done);
+                       if ((error = xfs_bmbt_update(cur, new->br_startoff,
+                               new->br_startblock,
+                               new->br_blockcount + RIGHT.br_blockcount,
+                               newext)))
+                               goto done;
+               }
+               break;
+       case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING:
+               /*
+                * Setting all of a previous oldext extent to newext.
+                * Neither the left nor right neighbors are contiguous with
+                * the new one.
+                */
+               trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
+               xfs_bmbt_set_state(ep, newext);
+               trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
+               if (cur == NULL)
+                       rval = XFS_ILOG_DEXT;
+               else {
+                       rval = 0;
+                       if ((error = xfs_bmbt_lookup_eq(cur, new->br_startoff,
+                                       new->br_startblock, new->br_blockcount,
+                                       &i)))
+                               goto done;
+                       XFS_WANT_CORRUPTED_GOTO(i == 1, done);
+                       if ((error = xfs_bmbt_update(cur, new->br_startoff,
+                               new->br_startblock, new->br_blockcount,
+                               newext)))
+                               goto done;
+               }
+               break;
+       case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG:
+               /*
+                * Setting the first part of a previous oldext extent to newext.
+                * The left neighbor is contiguous.
+                */
+               trace_xfs_bmap_pre_update(ip, *idx - 1, state, _THIS_IP_);
+               xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, *idx - 1),
+                       LEFT.br_blockcount + new->br_blockcount);
+               xfs_bmbt_set_startoff(ep,
+                       PREV.br_startoff + new->br_blockcount);
+               trace_xfs_bmap_post_update(ip, *idx - 1, state, _THIS_IP_);
+               trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
+               xfs_bmbt_set_startblock(ep,
+                       new->br_startblock + new->br_blockcount);
+               xfs_bmbt_set_blockcount(ep,
+                       PREV.br_blockcount - new->br_blockcount);
+               trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
+               --*idx;
+               if (cur == NULL)
+                       rval = XFS_ILOG_DEXT;
+               else {
+                       rval = 0;
+                       if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff,
+                                       PREV.br_startblock, PREV.br_blockcount,
+                                       &i)))
+                               goto done;
+                       XFS_WANT_CORRUPTED_GOTO(i == 1, done);
+                       if ((error = xfs_bmbt_update(cur,
+                               PREV.br_startoff + new->br_blockcount,
+                               PREV.br_startblock + new->br_blockcount,
+                               PREV.br_blockcount - new->br_blockcount,
+                               oldext)))
+                               goto done;
+                       if ((error = xfs_btree_decrement(cur, 0, &i)))
+                               goto done;
+                       error = xfs_bmbt_update(cur, LEFT.br_startoff,
+                               LEFT.br_startblock,
+                               LEFT.br_blockcount + new->br_blockcount,
+                               LEFT.br_state);
+                       if (error)
+                               goto done;
+               }
+               break;
+       case BMAP_LEFT_FILLING:
+               /*
+                * Setting the first part of a previous oldext extent to newext.
+                * The left neighbor is not contiguous.
+                */
+               trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
+               ASSERT(ep && xfs_bmbt_get_state(ep) == oldext);
+               xfs_bmbt_set_startoff(ep, new_endoff);
+               xfs_bmbt_set_blockcount(ep,
+                       PREV.br_blockcount - new->br_blockcount);
+               xfs_bmbt_set_startblock(ep,
+                       new->br_startblock + new->br_blockcount);
+               trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
+               xfs_iext_insert(ip, *idx, 1, new, state);
+               ip->i_d.di_nextents++;
+               if (cur == NULL)
+                       rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
+               else {
+                       rval = XFS_ILOG_CORE;
+                       if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff,
+                                       PREV.br_startblock, PREV.br_blockcount,
+                                       &i)))
+                               goto done;
+                       XFS_WANT_CORRUPTED_GOTO(i == 1, done);
+                       if ((error = xfs_bmbt_update(cur,
+                               PREV.br_startoff + new->br_blockcount,
+                               PREV.br_startblock + new->br_blockcount,
+                               PREV.br_blockcount - new->br_blockcount,
+                               oldext)))
+                               goto done;
+                       cur->bc_rec.b = *new;
+                       if ((error = xfs_btree_insert(cur, &i)))
+                               goto done;
+                       XFS_WANT_CORRUPTED_GOTO(i == 1, done);
+               }
+               break;
+       case BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
+               /*
+                * Setting the last part of a previous oldext extent to newext.
+                * The right neighbor is contiguous with the new allocation.
+                */
+               trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
+               xfs_bmbt_set_blockcount(ep,
+                       PREV.br_blockcount - new->br_blockcount);
+               trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
+               ++*idx;
+               trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
+               xfs_bmbt_set_allf(xfs_iext_get_ext(ifp, *idx),
+                       new->br_startoff, new->br_startblock,
+                       new->br_blockcount + RIGHT.br_blockcount, newext);
+               trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
+               if (cur == NULL)
+                       rval = XFS_ILOG_DEXT;
+               else {
+                       rval = 0;
+                       if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff,
+                                       PREV.br_startblock,
+                                       PREV.br_blockcount, &i)))
+                               goto done;
+                       XFS_WANT_CORRUPTED_GOTO(i == 1, done);
+                       if ((error = xfs_bmbt_update(cur, PREV.br_startoff,
+                               PREV.br_startblock,
+                               PREV.br_blockcount - new->br_blockcount,
+                               oldext)))
+                               goto done;
+                       if ((error = xfs_btree_increment(cur, 0, &i)))
+                               goto done;
+                       if ((error = xfs_bmbt_update(cur, new->br_startoff,
+                               new->br_startblock,
+                               new->br_blockcount + RIGHT.br_blockcount,
+                               newext)))
+                               goto done;
+               }
+               break;
+       case BMAP_RIGHT_FILLING:
+               /*
+                * Setting the last part of a previous oldext extent to newext.
+                * The right neighbor is not contiguous.
+                */
+               trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
+               xfs_bmbt_set_blockcount(ep,
+                       PREV.br_blockcount - new->br_blockcount);
+               trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
+               ++*idx;
+               xfs_iext_insert(ip, *idx, 1, new, state);
+               ip->i_d.di_nextents++;
+               if (cur == NULL)
+                       rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
+               else {
+                       rval = XFS_ILOG_CORE;
+                       if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff,
+                                       PREV.br_startblock, PREV.br_blockcount,
+                                       &i)))
+                               goto done;
+                       XFS_WANT_CORRUPTED_GOTO(i == 1, done);
+                       if ((error = xfs_bmbt_update(cur, PREV.br_startoff,
+                               PREV.br_startblock,
+                               PREV.br_blockcount - new->br_blockcount,
+                               oldext)))
+                               goto done;
+                       if ((error = xfs_bmbt_lookup_eq(cur, new->br_startoff,
+                                       new->br_startblock, new->br_blockcount,
+                                       &i)))
+                               goto done;
+                       XFS_WANT_CORRUPTED_GOTO(i == 0, done);
+                       cur->bc_rec.b.br_state = XFS_EXT_NORM;
+                       if ((error = xfs_btree_insert(cur, &i)))
+                               goto done;
+                       XFS_WANT_CORRUPTED_GOTO(i == 1, done);
+               }
+               break;
+       case 0:
+               /*
+                * Setting the middle part of a previous oldext extent to
+                * newext.  Contiguity is impossible here.
+                * One extent becomes three extents.
+                */
+               trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
+               xfs_bmbt_set_blockcount(ep,
+                       new->br_startoff - PREV.br_startoff);
+               trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
+               r[0] = *new;
+               r[1].br_startoff = new_endoff;
+               r[1].br_blockcount =
+                       PREV.br_startoff + PREV.br_blockcount - new_endoff;
+               r[1].br_startblock = new->br_startblock + new->br_blockcount;
+               r[1].br_state = oldext;
+               ++*idx;
+               xfs_iext_insert(ip, *idx, 2, &r[0], state);
+               ip->i_d.di_nextents += 2;
+               if (cur == NULL)
+                       rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
+               else {
+                       rval = XFS_ILOG_CORE;
+                       if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff,
+                                       PREV.br_startblock, PREV.br_blockcount,
+                                       &i)))
+                               goto done;
+                       XFS_WANT_CORRUPTED_GOTO(i == 1, done);
+                       /* new right extent - oldext */
+                       if ((error = xfs_bmbt_update(cur, r[1].br_startoff,
+                               r[1].br_startblock, r[1].br_blockcount,
+                               r[1].br_state)))
+                               goto done;
+                       /* new left extent - oldext */
+                       cur->bc_rec.b = PREV;
+                       cur->bc_rec.b.br_blockcount =
+                               new->br_startoff - PREV.br_startoff;
+                       if ((error = xfs_btree_insert(cur, &i)))
+                               goto done;
+                       XFS_WANT_CORRUPTED_GOTO(i == 1, done);
+                       /*
+                        * Reset the cursor to the position of the new extent
+                        * we are about to insert as we can't trust it after
+                        * the previous insert.
+                        */
+                       if ((error = xfs_bmbt_lookup_eq(cur, new->br_startoff,
+                                       new->br_startblock, new->br_blockcount,
+                                       &i)))
+                               goto done;
+                       XFS_WANT_CORRUPTED_GOTO(i == 0, done);
+                       /* new middle extent - newext */
+                       cur->bc_rec.b.br_state = new->br_state;
+                       if ((error = xfs_btree_insert(cur, &i)))
+                               goto done;
+                       XFS_WANT_CORRUPTED_GOTO(i == 1, done);
+               }
+               break;
+       case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
+       case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
+       case BMAP_LEFT_FILLING | BMAP_RIGHT_CONTIG:
+       case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG:
+       case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
+       case BMAP_LEFT_CONTIG:
+       case BMAP_RIGHT_CONTIG:
+               /*
+                * These cases are all impossible.
+                */
+               ASSERT(0);
+       }
+       /* convert to a btree if necessary */
+       if (xfs_bmap_needs_btree(ip, XFS_DATA_FORK)) {
+               int     tmp_logflags;   /* partial log flag return val */
+               ASSERT(cur == NULL);
+               error = xfs_bmap_extents_to_btree(tp, ip, first, flist, &cur,
+                               0, &tmp_logflags, XFS_DATA_FORK);
+               *logflagsp |= tmp_logflags;
+               if (error)
+                       goto done;
+       }
+       /* clear out the allocated field, done with it now in any case. */
+       if (cur) {
+               cur->bc_private.b.allocated = 0;
+               *curp = cur;
+       }
+       xfs_bmap_check_leaf_extents(*curp, ip, XFS_DATA_FORK);
+ done:
+       *logflagsp |= rval;
+       return error;
+ #undef        LEFT
+ #undef        RIGHT
+ #undef        PREV
+ }
+ /*
+  * Convert a hole to a delayed allocation.
+  */
+ STATIC void
+ xfs_bmap_add_extent_hole_delay(
+       xfs_inode_t             *ip,    /* incore inode pointer */
+       xfs_extnum_t            *idx,   /* extent number to update/insert */
+       xfs_bmbt_irec_t         *new)   /* new data to add to file extents */
+ {
+       xfs_ifork_t             *ifp;   /* inode fork pointer */
+       xfs_bmbt_irec_t         left;   /* left neighbor extent entry */
+       xfs_filblks_t           newlen=0;       /* new indirect size */
+       xfs_filblks_t           oldlen=0;       /* old indirect size */
+       xfs_bmbt_irec_t         right;  /* right neighbor extent entry */
+       int                     state;  /* state bits, accessed thru macros */
+       xfs_filblks_t           temp=0; /* temp for indirect calculations */
+       ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
+       state = 0;
+       ASSERT(isnullstartblock(new->br_startblock));
+       /*
+        * Check and set flags if this segment has a left neighbor
+        */
+       if (*idx > 0) {
+               state |= BMAP_LEFT_VALID;
+               xfs_bmbt_get_all(xfs_iext_get_ext(ifp, *idx - 1), &left);
+               if (isnullstartblock(left.br_startblock))
+                       state |= BMAP_LEFT_DELAY;
+       }
+       /*
+        * Check and set flags if the current (right) segment exists.
+        * If it doesn't exist, we're converting the hole at end-of-file.
+        */
+       if (*idx < ip->i_df.if_bytes / (uint)sizeof(xfs_bmbt_rec_t)) {
+               state |= BMAP_RIGHT_VALID;
+               xfs_bmbt_get_all(xfs_iext_get_ext(ifp, *idx), &right);
+               if (isnullstartblock(right.br_startblock))
+                       state |= BMAP_RIGHT_DELAY;
+       }
+       /*
+        * Set contiguity flags on the left and right neighbors.
+        * Don't let extents get too large, even if the pieces are contiguous.
+        */
+       if ((state & BMAP_LEFT_VALID) && (state & BMAP_LEFT_DELAY) &&
+           left.br_startoff + left.br_blockcount == new->br_startoff &&
+           left.br_blockcount + new->br_blockcount <= MAXEXTLEN)
+               state |= BMAP_LEFT_CONTIG;
+       if ((state & BMAP_RIGHT_VALID) && (state & BMAP_RIGHT_DELAY) &&
+           new->br_startoff + new->br_blockcount == right.br_startoff &&
+           new->br_blockcount + right.br_blockcount <= MAXEXTLEN &&
+           (!(state & BMAP_LEFT_CONTIG) ||
+            (left.br_blockcount + new->br_blockcount +
+             right.br_blockcount <= MAXEXTLEN)))
+               state |= BMAP_RIGHT_CONTIG;
+       /*
+        * Switch out based on the contiguity flags.
+        */
+       switch (state & (BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG)) {
+       case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
+               /*
+                * New allocation is contiguous with delayed allocations
+                * on the left and on the right.
+                * Merge all three into a single extent record.
+                */
+               --*idx;
+               temp = left.br_blockcount + new->br_blockcount +
+                       right.br_blockcount;
+               trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
+               xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, *idx), temp);
+               oldlen = startblockval(left.br_startblock) +
+                       startblockval(new->br_startblock) +
+                       startblockval(right.br_startblock);
+               newlen = xfs_bmap_worst_indlen(ip, temp);
+               xfs_bmbt_set_startblock(xfs_iext_get_ext(ifp, *idx),
+                       nullstartblock((int)newlen));
+               trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
+               xfs_iext_remove(ip, *idx + 1, 1, state);
+               break;
+       case BMAP_LEFT_CONTIG:
+               /*
+                * New allocation is contiguous with a delayed allocation
+                * on the left.
+                * Merge the new allocation with the left neighbor.
+                */
+               --*idx;
+               temp = left.br_blockcount + new->br_blockcount;
+               trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
+               xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, *idx), temp);
+               oldlen = startblockval(left.br_startblock) +
+                       startblockval(new->br_startblock);
+               newlen = xfs_bmap_worst_indlen(ip, temp);
+               xfs_bmbt_set_startblock(xfs_iext_get_ext(ifp, *idx),
+                       nullstartblock((int)newlen));
+               trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
+               break;
+       case BMAP_RIGHT_CONTIG:
+               /*
+                * New allocation is contiguous with a delayed allocation
+                * on the right.
+                * Merge the new allocation with the right neighbor.
+                */
+               trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
+               temp = new->br_blockcount + right.br_blockcount;
+               oldlen = startblockval(new->br_startblock) +
+                       startblockval(right.br_startblock);
+               newlen = xfs_bmap_worst_indlen(ip, temp);
+               xfs_bmbt_set_allf(xfs_iext_get_ext(ifp, *idx),
+                       new->br_startoff,
+                       nullstartblock((int)newlen), temp, right.br_state);
+               trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
+               break;
+       case 0:
+               /*
+                * New allocation is not contiguous with another
+                * delayed allocation.
+                * Insert a new entry.
+                */
+               oldlen = newlen = 0;
+               xfs_iext_insert(ip, *idx, 1, new, state);
+               break;
+       }
+       if (oldlen != newlen) {
+               ASSERT(oldlen > newlen);
+               xfs_icsb_modify_counters(ip->i_mount, XFS_SBS_FDBLOCKS,
+                       (int64_t)(oldlen - newlen), 0);
+               /*
+                * Nothing to do for disk quota accounting here.
+                */
+       }
+ }
+ /*
+  * Convert a hole to a real allocation.
+  */
+ STATIC int                            /* error */
+ xfs_bmap_add_extent_hole_real(
+       struct xfs_bmalloca     *bma,
+       int                     whichfork)
+ {
+       struct xfs_bmbt_irec    *new = &bma->got;
+       int                     error;  /* error return value */
+       int                     i;      /* temp state */
+       xfs_ifork_t             *ifp;   /* inode fork pointer */
+       xfs_bmbt_irec_t         left;   /* left neighbor extent entry */
+       xfs_bmbt_irec_t         right;  /* right neighbor extent entry */
+       int                     rval=0; /* return value (logging flags) */
+       int                     state;  /* state bits, accessed thru macros */
+       ifp = XFS_IFORK_PTR(bma->ip, whichfork);
+       ASSERT(bma->idx >= 0);
+       ASSERT(bma->idx <= ifp->if_bytes / sizeof(struct xfs_bmbt_rec));
+       ASSERT(!isnullstartblock(new->br_startblock));
+       ASSERT(!bma->cur ||
+              !(bma->cur->bc_private.b.flags & XFS_BTCUR_BPRV_WASDEL));
+       XFS_STATS_INC(xs_add_exlist);
+       state = 0;
+       if (whichfork == XFS_ATTR_FORK)
+               state |= BMAP_ATTRFORK;
+       /*
+        * Check and set flags if this segment has a left neighbor.
+        */
+       if (bma->idx > 0) {
+               state |= BMAP_LEFT_VALID;
+               xfs_bmbt_get_all(xfs_iext_get_ext(ifp, bma->idx - 1), &left);
+               if (isnullstartblock(left.br_startblock))
+                       state |= BMAP_LEFT_DELAY;
+       }
+       /*
+        * Check and set flags if this segment has a current value.
+        * Not true if we're inserting into the "hole" at eof.
+        */
+       if (bma->idx < ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t)) {
+               state |= BMAP_RIGHT_VALID;
+               xfs_bmbt_get_all(xfs_iext_get_ext(ifp, bma->idx), &right);
+               if (isnullstartblock(right.br_startblock))
+                       state |= BMAP_RIGHT_DELAY;
+       }
+       /*
+        * We're inserting a real allocation between "left" and "right".
+        * Set the contiguity flags.  Don't let extents get too large.
+        */
+       if ((state & BMAP_LEFT_VALID) && !(state & BMAP_LEFT_DELAY) &&
+           left.br_startoff + left.br_blockcount == new->br_startoff &&
+           left.br_startblock + left.br_blockcount == new->br_startblock &&
+           left.br_state == new->br_state &&
+           left.br_blockcount + new->br_blockcount <= MAXEXTLEN)
+               state |= BMAP_LEFT_CONTIG;
+       if ((state & BMAP_RIGHT_VALID) && !(state & BMAP_RIGHT_DELAY) &&
+           new->br_startoff + new->br_blockcount == right.br_startoff &&
+           new->br_startblock + new->br_blockcount == right.br_startblock &&
+           new->br_state == right.br_state &&
+           new->br_blockcount + right.br_blockcount <= MAXEXTLEN &&
+           (!(state & BMAP_LEFT_CONTIG) ||
+            left.br_blockcount + new->br_blockcount +
+            right.br_blockcount <= MAXEXTLEN))
+               state |= BMAP_RIGHT_CONTIG;
+       error = 0;
+       /*
+        * Select which case we're in here, and implement it.
+        */
+       switch (state & (BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG)) {
+       case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
+               /*
+                * New allocation is contiguous with real allocations on the
+                * left and on the right.
+                * Merge all three into a single extent record.
+                */
+               --bma->idx;
+               trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);
+               xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, bma->idx),
+                       left.br_blockcount + new->br_blockcount +
+                       right.br_blockcount);
+               trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
+               xfs_iext_remove(bma->ip, bma->idx + 1, 1, state);
+               XFS_IFORK_NEXT_SET(bma->ip, whichfork,
+                       XFS_IFORK_NEXTENTS(bma->ip, whichfork) - 1);
+               if (bma->cur == NULL) {
+                       rval = XFS_ILOG_CORE | xfs_ilog_fext(whichfork);
+               } else {
+                       rval = XFS_ILOG_CORE;
+                       error = xfs_bmbt_lookup_eq(bma->cur, right.br_startoff,
+                                       right.br_startblock, right.br_blockcount,
+                                       &i);
+                       if (error)
+                               goto done;
+                       XFS_WANT_CORRUPTED_GOTO(i == 1, done);
+                       error = xfs_btree_delete(bma->cur, &i);
+                       if (error)
+                               goto done;
+                       XFS_WANT_CORRUPTED_GOTO(i == 1, done);
+                       error = xfs_btree_decrement(bma->cur, 0, &i);
+                       if (error)
+                               goto done;
+                       XFS_WANT_CORRUPTED_GOTO(i == 1, done);
+                       error = xfs_bmbt_update(bma->cur, left.br_startoff,
+                                       left.br_startblock,
+                                       left.br_blockcount +
+                                               new->br_blockcount +
+                                               right.br_blockcount,
+                                       left.br_state);
+                       if (error)
+                               goto done;
+               }
+               break;
+       case BMAP_LEFT_CONTIG:
+               /*
+                * New allocation is contiguous with a real allocation
+                * on the left.
+                * Merge the new allocation with the left neighbor.
+                */
+               --bma->idx;
+               trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);
+               xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, bma->idx),
+                       left.br_blockcount + new->br_blockcount);
+               trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
+               if (bma->cur == NULL) {
+                       rval = xfs_ilog_fext(whichfork);
+               } else {
+                       rval = 0;
+                       error = xfs_bmbt_lookup_eq(bma->cur, left.br_startoff,
+                                       left.br_startblock, left.br_blockcount,
+                                       &i);
+                       if (error)
+                               goto done;
+                       XFS_WANT_CORRUPTED_GOTO(i == 1, done);
+                       error = xfs_bmbt_update(bma->cur, left.br_startoff,
+                                       left.br_startblock,
+                                       left.br_blockcount +
+                                               new->br_blockcount,
+                                       left.br_state);
+                       if (error)
+                               goto done;
+               }
+               break;
+       case BMAP_RIGHT_CONTIG:
+               /*
+                * New allocation is contiguous with a real allocation
+                * on the right.
+                * Merge the new allocation with the right neighbor.
+                */
+               trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);
+               xfs_bmbt_set_allf(xfs_iext_get_ext(ifp, bma->idx),
+                       new->br_startoff, new->br_startblock,
+                       new->br_blockcount + right.br_blockcount,
+                       right.br_state);
+               trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
+               if (bma->cur == NULL) {
+                       rval = xfs_ilog_fext(whichfork);
+               } else {
+                       rval = 0;
+                       error = xfs_bmbt_lookup_eq(bma->cur,
+                                       right.br_startoff,
+                                       right.br_startblock,
+                                       right.br_blockcount, &i);
+                       if (error)
+                               goto done;
+                       XFS_WANT_CORRUPTED_GOTO(i == 1, done);
+                       error = xfs_bmbt_update(bma->cur, new->br_startoff,
+                                       new->br_startblock,
+                                       new->br_blockcount +
+                                               right.br_blockcount,
+                                       right.br_state);
+                       if (error)
+                               goto done;
+               }
+               break;
+       case 0:
+               /*
+                * New allocation is not contiguous with another
+                * real allocation.
+                * Insert a new entry.
+                */
+               xfs_iext_insert(bma->ip, bma->idx, 1, new, state);
+               XFS_IFORK_NEXT_SET(bma->ip, whichfork,
+                       XFS_IFORK_NEXTENTS(bma->ip, whichfork) + 1);
+               if (bma->cur == NULL) {
+                       rval = XFS_ILOG_CORE | xfs_ilog_fext(whichfork);
+               } else {
+                       rval = XFS_ILOG_CORE;
+                       error = xfs_bmbt_lookup_eq(bma->cur,
+                                       new->br_startoff,
+                                       new->br_startblock,
+                                       new->br_blockcount, &i);
+                       if (error)
+                               goto done;
+                       XFS_WANT_CORRUPTED_GOTO(i == 0, done);
+                       bma->cur->bc_rec.b.br_state = new->br_state;
+                       error = xfs_btree_insert(bma->cur, &i);
+                       if (error)
+                               goto done;
+                       XFS_WANT_CORRUPTED_GOTO(i == 1, done);
+               }
+               break;
+       }
+       /* convert to a btree if necessary */
+       if (xfs_bmap_needs_btree(bma->ip, whichfork)) {
+               int     tmp_logflags;   /* partial log flag return val */
+               ASSERT(bma->cur == NULL);
+               error = xfs_bmap_extents_to_btree(bma->tp, bma->ip,
+                               bma->firstblock, bma->flist, &bma->cur,
+                               0, &tmp_logflags, whichfork);
+               bma->logflags |= tmp_logflags;
+               if (error)
+                       goto done;
+       }
+       /* clear out the allocated field, done with it now in any case. */
+       if (bma->cur)
+               bma->cur->bc_private.b.allocated = 0;
+       xfs_bmap_check_leaf_extents(bma->cur, bma->ip, whichfork);
+ done:
+       bma->logflags |= rval;
+       return error;
+ }
+ /*
+  * Functions used in the extent read, allocate and remove paths
+  */
+ /*
+  * Adjust the size of the new extent based on di_extsize and rt extsize.
+  */
+ int
+ xfs_bmap_extsize_align(
+       xfs_mount_t     *mp,
+       xfs_bmbt_irec_t *gotp,          /* next extent pointer */
+       xfs_bmbt_irec_t *prevp,         /* previous extent pointer */
+       xfs_extlen_t    extsz,          /* align to this extent size */
+       int             rt,             /* is this a realtime inode? */
+       int             eof,            /* is extent at end-of-file? */
+       int             delay,          /* creating delalloc extent? */
+       int             convert,        /* overwriting unwritten extent? */
+       xfs_fileoff_t   *offp,          /* in/out: aligned offset */
+       xfs_extlen_t    *lenp)          /* in/out: aligned length */
+ {
+       xfs_fileoff_t   orig_off;       /* original offset */
+       xfs_extlen_t    orig_alen;      /* original length */
+       xfs_fileoff_t   orig_end;       /* original off+len */
+       xfs_fileoff_t   nexto;          /* next file offset */
+       xfs_fileoff_t   prevo;          /* previous file offset */
+       xfs_fileoff_t   align_off;      /* temp for offset */
+       xfs_extlen_t    align_alen;     /* temp for length */
+       xfs_extlen_t    temp;           /* temp for calculations */
+       if (convert)
+               return 0;
+       orig_off = align_off = *offp;
+       orig_alen = align_alen = *lenp;
+       orig_end = orig_off + orig_alen;
+       /*
+        * If this request overlaps an existing extent, then don't
+        * attempt to perform any additional alignment.
+        */
+       if (!delay && !eof &&
+           (orig_off >= gotp->br_startoff) &&
+           (orig_end <= gotp->br_startoff + gotp->br_blockcount)) {
+               return 0;
+       }
+       /*
+        * If the file offset is unaligned vs. the extent size
+        * we need to align it.  This will be possible unless
+        * the file was previously written with a kernel that didn't
+        * perform this alignment, or if a truncate shot us in the
+        * foot.
+        */
+       temp = do_mod(orig_off, extsz);
+       if (temp) {
+               align_alen += temp;
+               align_off -= temp;
+       }
+       /*
+        * Same adjustment for the end of the requested area.
+        */
+       if ((temp = (align_alen % extsz))) {
+               align_alen += extsz - temp;
+       }
+       /*
+        * If the previous block overlaps with this proposed allocation
+        * then move the start forward without adjusting the length.
+        */
+       if (prevp->br_startoff != NULLFILEOFF) {
+               if (prevp->br_startblock == HOLESTARTBLOCK)
+                       prevo = prevp->br_startoff;
+               else
+                       prevo = prevp->br_startoff + prevp->br_blockcount;
+       } else
+               prevo = 0;
+       if (align_off != orig_off && align_off < prevo)
+               align_off = prevo;
+       /*
+        * If the next block overlaps with this proposed allocation
+        * then move the start back without adjusting the length,
+        * but not before offset 0.
+        * This may of course make the start overlap previous block,
+        * and if we hit the offset 0 limit then the next block
+        * can still overlap too.
+        */
+       if (!eof && gotp->br_startoff != NULLFILEOFF) {
+               if ((delay && gotp->br_startblock == HOLESTARTBLOCK) ||
+                   (!delay && gotp->br_startblock == DELAYSTARTBLOCK))
+                       nexto = gotp->br_startoff + gotp->br_blockcount;
+               else
+                       nexto = gotp->br_startoff;
+       } else
+               nexto = NULLFILEOFF;
+       if (!eof &&
+           align_off + align_alen != orig_end &&
+           align_off + align_alen > nexto)
+               align_off = nexto > align_alen ? nexto - align_alen : 0;
+       /*
+        * If we're now overlapping the next or previous extent that
+        * means we can't fit an extsz piece in this hole.  Just move
+        * the start forward to the first valid spot and set
+        * the length so we hit the end.
+        */
+       if (align_off != orig_off && align_off < prevo)
+               align_off = prevo;
+       if (align_off + align_alen != orig_end &&
+           align_off + align_alen > nexto &&
+           nexto != NULLFILEOFF) {
+               ASSERT(nexto > prevo);
+               align_alen = nexto - align_off;
+       }
+       /*
+        * If realtime, and the result isn't a multiple of the realtime
+        * extent size we need to remove blocks until it is.
+        */
+       if (rt && (temp = (align_alen % mp->m_sb.sb_rextsize))) {
+               /*
+                * We're not covering the original request, or
+                * we won't be able to once we fix the length.
+                */
+               if (orig_off < align_off ||
+                   orig_end > align_off + align_alen ||
+                   align_alen - temp < orig_alen)
+                       return -EINVAL;
+               /*
+                * Try to fix it by moving the start up.
+                */
+               if (align_off + temp <= orig_off) {
+                       align_alen -= temp;
+                       align_off += temp;
+               }
+               /*
+                * Try to fix it by moving the end in.
+                */
+               else if (align_off + align_alen - temp >= orig_end)
+                       align_alen -= temp;
+               /*
+                * Set the start to the minimum then trim the length.
+                */
+               else {
+                       align_alen -= orig_off - align_off;
+                       align_off = orig_off;
+                       align_alen -= align_alen % mp->m_sb.sb_rextsize;
+               }
+               /*
+                * Result doesn't cover the request, fail it.
+                */
+               if (orig_off < align_off || orig_end > align_off + align_alen)
+                       return -EINVAL;
+       } else {
+               ASSERT(orig_off >= align_off);
+               ASSERT(orig_end <= align_off + align_alen);
+       }
+ #ifdef DEBUG
+       if (!eof && gotp->br_startoff != NULLFILEOFF)
+               ASSERT(align_off + align_alen <= gotp->br_startoff);
+       if (prevp->br_startoff != NULLFILEOFF)
+               ASSERT(align_off >= prevp->br_startoff + prevp->br_blockcount);
+ #endif
+       *lenp = align_alen;
+       *offp = align_off;
+       return 0;
+ }
+ #define XFS_ALLOC_GAP_UNITS   4
+ void
+ xfs_bmap_adjacent(
+       struct xfs_bmalloca     *ap)    /* bmap alloc argument struct */
+ {
+       xfs_fsblock_t   adjust;         /* adjustment to block numbers */
+       xfs_agnumber_t  fb_agno;        /* ag number of ap->firstblock */
+       xfs_mount_t     *mp;            /* mount point structure */
+       int             nullfb;         /* true if ap->firstblock isn't set */
+       int             rt;             /* true if inode is realtime */
+ #define       ISVALID(x,y)    \
+       (rt ? \
+               (x) < mp->m_sb.sb_rblocks : \
+               XFS_FSB_TO_AGNO(mp, x) == XFS_FSB_TO_AGNO(mp, y) && \
+               XFS_FSB_TO_AGNO(mp, x) < mp->m_sb.sb_agcount && \
+               XFS_FSB_TO_AGBNO(mp, x) < mp->m_sb.sb_agblocks)
+       mp = ap->ip->i_mount;
+       nullfb = *ap->firstblock == NULLFSBLOCK;
+       rt = XFS_IS_REALTIME_INODE(ap->ip) && ap->userdata;
+       fb_agno = nullfb ? NULLAGNUMBER : XFS_FSB_TO_AGNO(mp, *ap->firstblock);
+       /*
+        * If allocating at eof, and there's a previous real block,
+        * try to use its last block as our starting point.
+        */
+       if (ap->eof && ap->prev.br_startoff != NULLFILEOFF &&
+           !isnullstartblock(ap->prev.br_startblock) &&
+           ISVALID(ap->prev.br_startblock + ap->prev.br_blockcount,
+                   ap->prev.br_startblock)) {
+               ap->blkno = ap->prev.br_startblock + ap->prev.br_blockcount;
+               /*
+                * Adjust for the gap between prevp and us.
+                */
+               adjust = ap->offset -
+                       (ap->prev.br_startoff + ap->prev.br_blockcount);
+               if (adjust &&
+                   ISVALID(ap->blkno + adjust, ap->prev.br_startblock))
+                       ap->blkno += adjust;
+       }
+       /*
+        * If not at eof, then compare the two neighbor blocks.
+        * Figure out whether either one gives us a good starting point,
+        * and pick the better one.
+        */
+       else if (!ap->eof) {
+               xfs_fsblock_t   gotbno;         /* right side block number */
+               xfs_fsblock_t   gotdiff=0;      /* right side difference */
+               xfs_fsblock_t   prevbno;        /* left side block number */
+               xfs_fsblock_t   prevdiff=0;     /* left side difference */
+               /*
+                * If there's a previous (left) block, select a requested
+                * start block based on it.
+                */
+               if (ap->prev.br_startoff != NULLFILEOFF &&
+                   !isnullstartblock(ap->prev.br_startblock) &&
+                   (prevbno = ap->prev.br_startblock +
+                              ap->prev.br_blockcount) &&
+                   ISVALID(prevbno, ap->prev.br_startblock)) {
+                       /*
+                        * Calculate gap to end of previous block.
+                        */
+                       adjust = prevdiff = ap->offset -
+                               (ap->prev.br_startoff +
+                                ap->prev.br_blockcount);
+                       /*
+                        * Figure the startblock based on the previous block's
+                        * end and the gap size.
+                        * Heuristic!
+                        * If the gap is large relative to the piece we're
+                        * allocating, or using it gives us an invalid block
+                        * number, then just use the end of the previous block.
+                        */
+                       if (prevdiff <= XFS_ALLOC_GAP_UNITS * ap->length &&
+                           ISVALID(prevbno + prevdiff,
+                                   ap->prev.br_startblock))
+                               prevbno += adjust;
+                       else
+                               prevdiff += adjust;
+                       /*
+                        * If the firstblock forbids it, can't use it,
+                        * must use default.
+                        */
+                       if (!rt && !nullfb &&
+                           XFS_FSB_TO_AGNO(mp, prevbno) != fb_agno)
+                               prevbno = NULLFSBLOCK;
+               }
+               /*
+                * No previous block or can't follow it, just default.
+                */
+               else
+                       prevbno = NULLFSBLOCK;
+               /*
+                * If there's a following (right) block, select a requested
+                * start block based on it.
+                */
+               if (!isnullstartblock(ap->got.br_startblock)) {
+                       /*
+                        * Calculate gap to start of next block.
+                        */
+                       adjust = gotdiff = ap->got.br_startoff - ap->offset;
+                       /*
+                        * Figure the startblock based on the next block's
+                        * start and the gap size.
+                        */
+                       gotbno = ap->got.br_startblock;
+                       /*
+                        * Heuristic!
+                        * If the gap is large relative to the piece we're
+                        * allocating, or using it gives us an invalid block
+                        * number, then just use the start of the next block
+                        * offset by our length.
+                        */
+                       if (gotdiff <= XFS_ALLOC_GAP_UNITS * ap->length &&
+                           ISVALID(gotbno - gotdiff, gotbno))
+                               gotbno -= adjust;
+                       else if (ISVALID(gotbno - ap->length, gotbno)) {
+                               gotbno -= ap->length;
+                               gotdiff += adjust - ap->length;
+                       } else
+                               gotdiff += adjust;
+                       /*
+                        * If the firstblock forbids it, can't use it,
+                        * must use default.
+                        */
+                       if (!rt && !nullfb &&
+                           XFS_FSB_TO_AGNO(mp, gotbno) != fb_agno)
+                               gotbno = NULLFSBLOCK;
+               }
+               /*
+                * No next block, just default.
+                */
+               else
+                       gotbno = NULLFSBLOCK;
+               /*
+                * If both valid, pick the better one, else the only good
+                * one, else ap->blkno is already set (to 0 or the inode block).
+                */
+               if (prevbno != NULLFSBLOCK && gotbno != NULLFSBLOCK)
+                       ap->blkno = prevdiff <= gotdiff ? prevbno : gotbno;
+               else if (prevbno != NULLFSBLOCK)
+                       ap->blkno = prevbno;
+               else if (gotbno != NULLFSBLOCK)
+                       ap->blkno = gotbno;
+       }
+ #undef ISVALID
+ }
+ static int
+ xfs_bmap_longest_free_extent(
+       struct xfs_trans        *tp,
+       xfs_agnumber_t          ag,
+       xfs_extlen_t            *blen,
+       int                     *notinit)
+ {
+       struct xfs_mount        *mp = tp->t_mountp;
+       struct xfs_perag        *pag;
+       xfs_extlen_t            longest;
+       int                     error = 0;
+       pag = xfs_perag_get(mp, ag);
+       if (!pag->pagf_init) {
+               error = xfs_alloc_pagf_init(mp, tp, ag, XFS_ALLOC_FLAG_TRYLOCK);
+               if (error)
+                       goto out;
+               if (!pag->pagf_init) {
+                       *notinit = 1;
+                       goto out;
+               }
+       }
+       longest = xfs_alloc_longest_free_extent(mp, pag);
+       if (*blen < longest)
+               *blen = longest;
+ out:
+       xfs_perag_put(pag);
+       return error;
+ }
+ static void
+ xfs_bmap_select_minlen(
+       struct xfs_bmalloca     *ap,
+       struct xfs_alloc_arg    *args,
+       xfs_extlen_t            *blen,
+       int                     notinit)
+ {
+       if (notinit || *blen < ap->minlen) {
+               /*
+                * Since we did a BUF_TRYLOCK above, it is possible that
+                * there is space for this request.
+                */
+               args->minlen = ap->minlen;
+       } else if (*blen < args->maxlen) {
+               /*
+                * If the best seen length is less than the request length,
+                * use the best as the minimum.
+                */
+               args->minlen = *blen;
+       } else {
+               /*
+                * Otherwise we've seen an extent as big as maxlen, use that
+                * as the minimum.
+                */
+               args->minlen = args->maxlen;
+       }
+ }
+ STATIC int
+ xfs_bmap_btalloc_nullfb(
+       struct xfs_bmalloca     *ap,
+       struct xfs_alloc_arg    *args,
+       xfs_extlen_t            *blen)
+ {
+       struct xfs_mount        *mp = ap->ip->i_mount;
+       xfs_agnumber_t          ag, startag;
+       int                     notinit = 0;
+       int                     error;
+       args->type = XFS_ALLOCTYPE_START_BNO;
+       args->total = ap->total;
+       startag = ag = XFS_FSB_TO_AGNO(mp, args->fsbno);
+       if (startag == NULLAGNUMBER)
+               startag = ag = 0;
+       while (*blen < args->maxlen) {
+               error = xfs_bmap_longest_free_extent(args->tp, ag, blen,
+                                                    &notinit);
+               if (error)
+                       return error;
+               if (++ag == mp->m_sb.sb_agcount)
+                       ag = 0;
+               if (ag == startag)
+                       break;
+       }
+       xfs_bmap_select_minlen(ap, args, blen, notinit);
+       return 0;
+ }
+ STATIC int
+ xfs_bmap_btalloc_filestreams(
+       struct xfs_bmalloca     *ap,
+       struct xfs_alloc_arg    *args,
+       xfs_extlen_t            *blen)
+ {
+       struct xfs_mount        *mp = ap->ip->i_mount;
+       xfs_agnumber_t          ag;
+       int                     notinit = 0;
+       int                     error;
+       args->type = XFS_ALLOCTYPE_NEAR_BNO;
+       args->total = ap->total;
+       ag = XFS_FSB_TO_AGNO(mp, args->fsbno);
+       if (ag == NULLAGNUMBER)
+               ag = 0;
+       error = xfs_bmap_longest_free_extent(args->tp, ag, blen, &notinit);
+       if (error)
+               return error;
+       if (*blen < args->maxlen) {
+               error = xfs_filestream_new_ag(ap, &ag);
+               if (error)
+                       return error;
+               error = xfs_bmap_longest_free_extent(args->tp, ag, blen,
+                                                    &notinit);
+               if (error)
+                       return error;
+       }
+       xfs_bmap_select_minlen(ap, args, blen, notinit);
+       /*
+        * Set the failure fallback case to look in the selected AG as stream
+        * may have moved.
+        */
+       ap->blkno = args->fsbno = XFS_AGB_TO_FSB(mp, ag, 0);
+       return 0;
+ }
+ STATIC int
+ xfs_bmap_btalloc(
+       struct xfs_bmalloca     *ap)    /* bmap alloc argument struct */
+ {
+       xfs_mount_t     *mp;            /* mount point structure */
+       xfs_alloctype_t atype = 0;      /* type for allocation routines */
+       xfs_extlen_t    align;          /* minimum allocation alignment */
+       xfs_agnumber_t  fb_agno;        /* ag number of ap->firstblock */
+       xfs_agnumber_t  ag;
+       xfs_alloc_arg_t args;
+       xfs_extlen_t    blen;
+       xfs_extlen_t    nextminlen = 0;
+       int             nullfb;         /* true if ap->firstblock isn't set */
+       int             isaligned;
+       int             tryagain;
+       int             error;
+       int             stripe_align;
+       ASSERT(ap->length);
+       mp = ap->ip->i_mount;
+       /* stripe alignment for allocation is determined by mount parameters */
+       stripe_align = 0;
+       if (mp->m_swidth && (mp->m_flags & XFS_MOUNT_SWALLOC))
+               stripe_align = mp->m_swidth;
+       else if (mp->m_dalign)
+               stripe_align = mp->m_dalign;
+       align = ap->userdata ? xfs_get_extsz_hint(ap->ip) : 0;
+       if (unlikely(align)) {
+               error = xfs_bmap_extsize_align(mp, &ap->got, &ap->prev,
+                                               align, 0, ap->eof, 0, ap->conv,
+                                               &ap->offset, &ap->length);
+               ASSERT(!error);
+               ASSERT(ap->length);
+       }
+       nullfb = *ap->firstblock == NULLFSBLOCK;
+       fb_agno = nullfb ? NULLAGNUMBER : XFS_FSB_TO_AGNO(mp, *ap->firstblock);
+       if (nullfb) {
+               if (ap->userdata && xfs_inode_is_filestream(ap->ip)) {
+                       ag = xfs_filestream_lookup_ag(ap->ip);
+                       ag = (ag != NULLAGNUMBER) ? ag : 0;
+                       ap->blkno = XFS_AGB_TO_FSB(mp, ag, 0);
+               } else {
+                       ap->blkno = XFS_INO_TO_FSB(mp, ap->ip->i_ino);
+               }
+       } else
+               ap->blkno = *ap->firstblock;
+       xfs_bmap_adjacent(ap);
+       /*
+        * If allowed, use ap->blkno; otherwise must use firstblock since
+        * it's in the right allocation group.
+        */
+       if (nullfb || XFS_FSB_TO_AGNO(mp, ap->blkno) == fb_agno)
+               ;
+       else
+               ap->blkno = *ap->firstblock;
+       /*
+        * Normal allocation, done through xfs_alloc_vextent.
+        */
+       tryagain = isaligned = 0;
+       memset(&args, 0, sizeof(args));
+       args.tp = ap->tp;
+       args.mp = mp;
+       args.fsbno = ap->blkno;
+       /* Trim the allocation back to the maximum an AG can fit. */
+       args.maxlen = MIN(ap->length, XFS_ALLOC_AG_MAX_USABLE(mp));
+       args.firstblock = *ap->firstblock;
+       blen = 0;
+       if (nullfb) {
+               /*
+                * Search for an allocation group with a single extent large
+                * enough for the request.  If one isn't found, then adjust
+                * the minimum allocation size to the largest space found.
+                */
+               if (ap->userdata && xfs_inode_is_filestream(ap->ip))
+                       error = xfs_bmap_btalloc_filestreams(ap, &args, &blen);
+               else
+                       error = xfs_bmap_btalloc_nullfb(ap, &args, &blen);
+               if (error)
+                       return error;
+       } else if (ap->flist->xbf_low) {
+               if (xfs_inode_is_filestream(ap->ip))
+                       args.type = XFS_ALLOCTYPE_FIRST_AG;
+               else
+                       args.type = XFS_ALLOCTYPE_START_BNO;
+               args.total = args.minlen = ap->minlen;
+       } else {
+               args.type = XFS_ALLOCTYPE_NEAR_BNO;
+               args.total = ap->total;
+               args.minlen = ap->minlen;
+       }
+       /* apply extent size hints if obtained earlier */
+       if (unlikely(align)) {
+               args.prod = align;
+               if ((args.mod = (xfs_extlen_t)do_mod(ap->offset, args.prod)))
+                       args.mod = (xfs_extlen_t)(args.prod - args.mod);
+       } else if (mp->m_sb.sb_blocksize >= PAGE_CACHE_SIZE) {
+               args.prod = 1;
+               args.mod = 0;
+       } else {
+               args.prod = PAGE_CACHE_SIZE >> mp->m_sb.sb_blocklog;
+               if ((args.mod = (xfs_extlen_t)(do_mod(ap->offset, args.prod))))
+                       args.mod = (xfs_extlen_t)(args.prod - args.mod);
+       }
+       /*
+        * If we are not low on available data blocks, and the
+        * underlying logical volume manager is a stripe, and
+        * the file offset is zero then try to allocate data
+        * blocks on stripe unit boundary.
+        * NOTE: ap->aeof is only set if the allocation length
+        * is >= the stripe unit and the allocation offset is
+        * at the end of file.
+        */
+       if (!ap->flist->xbf_low && ap->aeof) {
+               if (!ap->offset) {
+                       args.alignment = stripe_align;
+                       atype = args.type;
+                       isaligned = 1;
+                       /*
+                        * Adjust for alignment
+                        */
+                       if (blen > args.alignment && blen <= args.maxlen)
+                               args.minlen = blen - args.alignment;
+                       args.minalignslop = 0;
+               } else {
+                       /*
+                        * First try an exact bno allocation.
+                        * If it fails then do a near or start bno
+                        * allocation with alignment turned on.
+                        */
+                       atype = args.type;
+                       tryagain = 1;
+                       args.type = XFS_ALLOCTYPE_THIS_BNO;
+                       args.alignment = 1;
+                       /*
+                        * Compute the minlen+alignment for the
+                        * next case.  Set slop so that the value
+                        * of minlen+alignment+slop doesn't go up
+                        * between the calls.
+                        */
+                       if (blen > stripe_align && blen <= args.maxlen)
+                               nextminlen = blen - stripe_align;
+                       else
+                               nextminlen = args.minlen;
+                       if (nextminlen + stripe_align > args.minlen + 1)
+                               args.minalignslop =
+                                       nextminlen + stripe_align -
+                                       args.minlen - 1;
+                       else
+                               args.minalignslop = 0;
+               }
+       } else {
+               args.alignment = 1;
+               args.minalignslop = 0;
+       }
+       args.minleft = ap->minleft;
+       args.wasdel = ap->wasdel;
+       args.isfl = 0;
+       args.userdata = ap->userdata;
+       if ((error = xfs_alloc_vextent(&args)))
+               return error;
+       if (tryagain && args.fsbno == NULLFSBLOCK) {
+               /*
+                * Exact allocation failed. Now try with alignment
+                * turned on.
+                */
+               args.type = atype;
+               args.fsbno = ap->blkno;
+               args.alignment = stripe_align;
+               args.minlen = nextminlen;
+               args.minalignslop = 0;
+               isaligned = 1;
+               if ((error = xfs_alloc_vextent(&args)))
+                       return error;
+       }
+       if (isaligned && args.fsbno == NULLFSBLOCK) {
+               /*
+                * allocation failed, so turn off alignment and
+                * try again.
+                */
+               args.type = atype;
+               args.fsbno = ap->blkno;
+               args.alignment = 0;
+               if ((error = xfs_alloc_vextent(&args)))
+                       return error;
+       }
+       if (args.fsbno == NULLFSBLOCK && nullfb &&
+           args.minlen > ap->minlen) {
+               args.minlen = ap->minlen;
+               args.type = XFS_ALLOCTYPE_START_BNO;
+               args.fsbno = ap->blkno;
+               if ((error = xfs_alloc_vextent(&args)))
+                       return error;
+       }
+       if (args.fsbno == NULLFSBLOCK && nullfb) {
+               args.fsbno = 0;
+               args.type = XFS_ALLOCTYPE_FIRST_AG;
+               args.total = ap->minlen;
+               args.minleft = 0;
+               if ((error = xfs_alloc_vextent(&args)))
+                       return error;
+               ap->flist->xbf_low = 1;
+       }
+       if (args.fsbno != NULLFSBLOCK) {
+               /*
+                * check the allocation happened at the same or higher AG than
+                * the first block that was allocated.
+                */
+               ASSERT(*ap->firstblock == NULLFSBLOCK ||
+                      XFS_FSB_TO_AGNO(mp, *ap->firstblock) ==
+                      XFS_FSB_TO_AGNO(mp, args.fsbno) ||
+                      (ap->flist->xbf_low &&
+                       XFS_FSB_TO_AGNO(mp, *ap->firstblock) <
+                       XFS_FSB_TO_AGNO(mp, args.fsbno)));
+               ap->blkno = args.fsbno;
+               if (*ap->firstblock == NULLFSBLOCK)
+                       *ap->firstblock = args.fsbno;
+               ASSERT(nullfb || fb_agno == args.agno ||
+                      (ap->flist->xbf_low && fb_agno < args.agno));
+               ap->length = args.len;
+               ap->ip->i_d.di_nblocks += args.len;
+               xfs_trans_log_inode(ap->tp, ap->ip, XFS_ILOG_CORE);
+               if (ap->wasdel)
+                       ap->ip->i_delayed_blks -= args.len;
+               /*
+                * Adjust the disk quota also. This was reserved
+                * earlier.
+                */
+               xfs_trans_mod_dquot_byino(ap->tp, ap->ip,
+                       ap->wasdel ? XFS_TRANS_DQ_DELBCOUNT :
+                                       XFS_TRANS_DQ_BCOUNT,
+                       (long) args.len);
+       } else {
+               ap->blkno = NULLFSBLOCK;
+               ap->length = 0;
+       }
+       return 0;
+ }
+ /*
+  * xfs_bmap_alloc is called by xfs_bmapi to allocate an extent for a file.
+  * It figures out where to ask the underlying allocator to put the new extent.
+  */
+ STATIC int
+ xfs_bmap_alloc(
+       struct xfs_bmalloca     *ap)    /* bmap alloc argument struct */
+ {
+       if (XFS_IS_REALTIME_INODE(ap->ip) && ap->userdata)
+               return xfs_bmap_rtalloc(ap);
+       return xfs_bmap_btalloc(ap);
+ }
+ /*
+  * Trim the returned map to the required bounds
+  */
+ STATIC void
+ xfs_bmapi_trim_map(
+       struct xfs_bmbt_irec    *mval,
+       struct xfs_bmbt_irec    *got,
+       xfs_fileoff_t           *bno,
+       xfs_filblks_t           len,
+       xfs_fileoff_t           obno,
+       xfs_fileoff_t           end,
+       int                     n,
+       int                     flags)
+ {
+       if ((flags & XFS_BMAPI_ENTIRE) ||
+           got->br_startoff + got->br_blockcount <= obno) {
+               *mval = *got;
+               if (isnullstartblock(got->br_startblock))
+                       mval->br_startblock = DELAYSTARTBLOCK;
+               return;
+       }
+       if (obno > *bno)
+               *bno = obno;
+       ASSERT((*bno >= obno) || (n == 0));
+       ASSERT(*bno < end);
+       mval->br_startoff = *bno;
+       if (isnullstartblock(got->br_startblock))
+               mval->br_startblock = DELAYSTARTBLOCK;
+       else
+               mval->br_startblock = got->br_startblock +
+                                       (*bno - got->br_startoff);
+       /*
+        * Return the minimum of what we got and what we asked for for
+        * the length.  We can use the len variable here because it is
+        * modified below and we could have been there before coming
+        * here if the first part of the allocation didn't overlap what
+        * was asked for.
+        */
+       mval->br_blockcount = XFS_FILBLKS_MIN(end - *bno,
+                       got->br_blockcount - (*bno - got->br_startoff));
+       mval->br_state = got->br_state;
+       ASSERT(mval->br_blockcount <= len);
+       return;
+ }
+ /*
+  * Update and validate the extent map to return
+  */
+ STATIC void
+ xfs_bmapi_update_map(
+       struct xfs_bmbt_irec    **map,
+       xfs_fileoff_t           *bno,
+       xfs_filblks_t           *len,
+       xfs_fileoff_t           obno,
+       xfs_fileoff_t           end,
+       int                     *n,
+       int                     flags)
+ {
+       xfs_bmbt_irec_t *mval = *map;
+       ASSERT((flags & XFS_BMAPI_ENTIRE) ||
+              ((mval->br_startoff + mval->br_blockcount) <= end));
+       ASSERT((flags & XFS_BMAPI_ENTIRE) || (mval->br_blockcount <= *len) ||
+              (mval->br_startoff < obno));
+       *bno = mval->br_startoff + mval->br_blockcount;
+       *len = end - *bno;
+       if (*n > 0 && mval->br_startoff == mval[-1].br_startoff) {
+               /* update previous map with new information */
+               ASSERT(mval->br_startblock == mval[-1].br_startblock);
+               ASSERT(mval->br_blockcount > mval[-1].br_blockcount);
+               ASSERT(mval->br_state == mval[-1].br_state);
+               mval[-1].br_blockcount = mval->br_blockcount;
+               mval[-1].br_state = mval->br_state;
+       } else if (*n > 0 && mval->br_startblock != DELAYSTARTBLOCK &&
+                  mval[-1].br_startblock != DELAYSTARTBLOCK &&
+                  mval[-1].br_startblock != HOLESTARTBLOCK &&
+                  mval->br_startblock == mval[-1].br_startblock +
+                                         mval[-1].br_blockcount &&
+                  ((flags & XFS_BMAPI_IGSTATE) ||
+                       mval[-1].br_state == mval->br_state)) {
+               ASSERT(mval->br_startoff ==
+                      mval[-1].br_startoff + mval[-1].br_blockcount);
+               mval[-1].br_blockcount += mval->br_blockcount;
+       } else if (*n > 0 &&
+                  mval->br_startblock == DELAYSTARTBLOCK &&
+                  mval[-1].br_startblock == DELAYSTARTBLOCK &&
+                  mval->br_startoff ==
+                  mval[-1].br_startoff + mval[-1].br_blockcount) {
+               mval[-1].br_blockcount += mval->br_blockcount;
+               mval[-1].br_state = mval->br_state;
+       } else if (!((*n == 0) &&
+                    ((mval->br_startoff + mval->br_blockcount) <=
+                     obno))) {
+               mval++;
+               (*n)++;
+       }
+       *map = mval;
+ }
+ /*
+  * Map file blocks to filesystem blocks without allocation.
+  */
+ int
+ xfs_bmapi_read(
+       struct xfs_inode        *ip,
+       xfs_fileoff_t           bno,
+       xfs_filblks_t           len,
+       struct xfs_bmbt_irec    *mval,
+       int                     *nmap,
+       int                     flags)
+ {
+       struct xfs_mount        *mp = ip->i_mount;
+       struct xfs_ifork        *ifp;
+       struct xfs_bmbt_irec    got;
+       struct xfs_bmbt_irec    prev;
+       xfs_fileoff_t           obno;
+       xfs_fileoff_t           end;
+       xfs_extnum_t            lastx;
+       int                     error;
+       int                     eof;
+       int                     n = 0;
+       int                     whichfork = (flags & XFS_BMAPI_ATTRFORK) ?
+                                               XFS_ATTR_FORK : XFS_DATA_FORK;
+       ASSERT(*nmap >= 1);
+       ASSERT(!(flags & ~(XFS_BMAPI_ATTRFORK|XFS_BMAPI_ENTIRE|
+                          XFS_BMAPI_IGSTATE)));
+       ASSERT(xfs_isilocked(ip, XFS_ILOCK_SHARED|XFS_ILOCK_EXCL));
+       if (unlikely(XFS_TEST_ERROR(
+           (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS &&
+            XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE),
+            mp, XFS_ERRTAG_BMAPIFORMAT, XFS_RANDOM_BMAPIFORMAT))) {
+               XFS_ERROR_REPORT("xfs_bmapi_read", XFS_ERRLEVEL_LOW, mp);
+               return -EFSCORRUPTED;
+       }
+       if (XFS_FORCED_SHUTDOWN(mp))
+               return -EIO;
+       XFS_STATS_INC(xs_blk_mapr);
+       ifp = XFS_IFORK_PTR(ip, whichfork);
+       if (!(ifp->if_flags & XFS_IFEXTENTS)) {
+               error = xfs_iread_extents(NULL, ip, whichfork);
+               if (error)
+                       return error;
+       }
+       xfs_bmap_search_extents(ip, bno, whichfork, &eof, &lastx, &got, &prev);
+       end = bno + len;
+       obno = bno;
+       while (bno < end && n < *nmap) {
+               /* Reading past eof, act as though there's a hole up to end. */
+               if (eof)
+                       got.br_startoff = end;
+               if (got.br_startoff > bno) {
+                       /* Reading in a hole.  */
+                       mval->br_startoff = bno;
+                       mval->br_startblock = HOLESTARTBLOCK;
+                       mval->br_blockcount =
+                               XFS_FILBLKS_MIN(len, got.br_startoff - bno);
+                       mval->br_state = XFS_EXT_NORM;
+                       bno += mval->br_blockcount;
+                       len -= mval->br_blockcount;
+                       mval++;
+                       n++;
+                       continue;
+               }
+               /* set up the extent map to return. */
+               xfs_bmapi_trim_map(mval, &got, &bno, len, obno, end, n, flags);
+               xfs_bmapi_update_map(&mval, &bno, &len, obno, end, &n, flags);
+               /* If we're done, stop now. */
+               if (bno >= end || n >= *nmap)
+                       break;
+               /* Else go on to the next record. */
+               if (++lastx < ifp->if_bytes / sizeof(xfs_bmbt_rec_t))
+                       xfs_bmbt_get_all(xfs_iext_get_ext(ifp, lastx), &got);
+               else
+                       eof = 1;
+       }
+       *nmap = n;
+       return 0;
+ }
+ STATIC int
+ xfs_bmapi_reserve_delalloc(
+       struct xfs_inode        *ip,
+       xfs_fileoff_t           aoff,
+       xfs_filblks_t           len,
+       struct xfs_bmbt_irec    *got,
+       struct xfs_bmbt_irec    *prev,
+       xfs_extnum_t            *lastx,
+       int                     eof)
+ {
+       struct xfs_mount        *mp = ip->i_mount;
+       struct xfs_ifork        *ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
+       xfs_extlen_t            alen;
+       xfs_extlen_t            indlen;
+       char                    rt = XFS_IS_REALTIME_INODE(ip);
+       xfs_extlen_t            extsz;
+       int                     error;
+       alen = XFS_FILBLKS_MIN(len, MAXEXTLEN);
+       if (!eof)
+               alen = XFS_FILBLKS_MIN(alen, got->br_startoff - aoff);
+       /* Figure out the extent size, adjust alen */
+       extsz = xfs_get_extsz_hint(ip);
+       if (extsz) {
+               /*
+                * Make sure we don't exceed a single extent length when we
+                * align the extent by reducing length we are going to
+                * allocate by the maximum amount extent size aligment may
+                * require.
+                */
+               alen = XFS_FILBLKS_MIN(len, MAXEXTLEN - (2 * extsz - 1));
+               error = xfs_bmap_extsize_align(mp, got, prev, extsz, rt, eof,
+                                              1, 0, &aoff, &alen);
+               ASSERT(!error);
+       }
+       if (rt)
+               extsz = alen / mp->m_sb.sb_rextsize;
+       /*
+        * Make a transaction-less quota reservation for delayed allocation
+        * blocks.  This number gets adjusted later.  We return if we haven't
+        * allocated blocks already inside this loop.
+        */
+       error = xfs_trans_reserve_quota_nblks(NULL, ip, (long)alen, 0,
+                       rt ? XFS_QMOPT_RES_RTBLKS : XFS_QMOPT_RES_REGBLKS);
+       if (error)
+               return error;
+       /*
+        * Split changing sb for alen and indlen since they could be coming
+        * from different places.
+        */
+       indlen = (xfs_extlen_t)xfs_bmap_worst_indlen(ip, alen);
+       ASSERT(indlen > 0);
+       if (rt) {
+               error = xfs_mod_incore_sb(mp, XFS_SBS_FREXTENTS,
+                                         -((int64_t)extsz), 0);
+       } else {
+               error = xfs_icsb_modify_counters(mp, XFS_SBS_FDBLOCKS,
+                                                -((int64_t)alen), 0);
+       }
+       if (error)
+               goto out_unreserve_quota;
+       error = xfs_icsb_modify_counters(mp, XFS_SBS_FDBLOCKS,
+                                        -((int64_t)indlen), 0);
+       if (error)
+               goto out_unreserve_blocks;
+       ip->i_delayed_blks += alen;
+       got->br_startoff = aoff;
+       got->br_startblock = nullstartblock(indlen);
+       got->br_blockcount = alen;
+       got->br_state = XFS_EXT_NORM;
+       xfs_bmap_add_extent_hole_delay(ip, lastx, got);
+       /*
+        * Update our extent pointer, given that xfs_bmap_add_extent_hole_delay
+        * might have merged it into one of the neighbouring ones.
+        */
+       xfs_bmbt_get_all(xfs_iext_get_ext(ifp, *lastx), got);
+       ASSERT(got->br_startoff <= aoff);
+       ASSERT(got->br_startoff + got->br_blockcount >= aoff + alen);
+       ASSERT(isnullstartblock(got->br_startblock));
+       ASSERT(got->br_state == XFS_EXT_NORM);
+       return 0;
+ out_unreserve_blocks:
+       if (rt)
+               xfs_mod_incore_sb(mp, XFS_SBS_FREXTENTS, extsz, 0);
+       else
+               xfs_icsb_modify_counters(mp, XFS_SBS_FDBLOCKS, alen, 0);
+ out_unreserve_quota:
+       if (XFS_IS_QUOTA_ON(mp))
+               xfs_trans_unreserve_quota_nblks(NULL, ip, (long)alen, 0, rt ?
+                               XFS_QMOPT_RES_RTBLKS : XFS_QMOPT_RES_REGBLKS);
+       return error;
+ }
+ /*
+  * Map file blocks to filesystem blocks, adding delayed allocations as needed.
+  */
+ int
+ xfs_bmapi_delay(
+       struct xfs_inode        *ip,    /* incore inode */
+       xfs_fileoff_t           bno,    /* starting file offs. mapped */
+       xfs_filblks_t           len,    /* length to map in file */
+       struct xfs_bmbt_irec    *mval,  /* output: map values */
+       int                     *nmap,  /* i/o: mval size/count */
+       int                     flags)  /* XFS_BMAPI_... */
+ {
+       struct xfs_mount        *mp = ip->i_mount;
+       struct xfs_ifork        *ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
+       struct xfs_bmbt_irec    got;    /* current file extent record */
+       struct xfs_bmbt_irec    prev;   /* previous file extent record */
+       xfs_fileoff_t           obno;   /* old block number (offset) */
+       xfs_fileoff_t           end;    /* end of mapped file region */
+       xfs_extnum_t            lastx;  /* last useful extent number */
+       int                     eof;    /* we've hit the end of extents */
+       int                     n = 0;  /* current extent index */
+       int                     error = 0;
+       ASSERT(*nmap >= 1);
+       ASSERT(*nmap <= XFS_BMAP_MAX_NMAP);
+       ASSERT(!(flags & ~XFS_BMAPI_ENTIRE));
+       ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
+       if (unlikely(XFS_TEST_ERROR(
+           (XFS_IFORK_FORMAT(ip, XFS_DATA_FORK) != XFS_DINODE_FMT_EXTENTS &&
+            XFS_IFORK_FORMAT(ip, XFS_DATA_FORK) != XFS_DINODE_FMT_BTREE),
+            mp, XFS_ERRTAG_BMAPIFORMAT, XFS_RANDOM_BMAPIFORMAT))) {
+               XFS_ERROR_REPORT("xfs_bmapi_delay", XFS_ERRLEVEL_LOW, mp);
+               return -EFSCORRUPTED;
+       }
+       if (XFS_FORCED_SHUTDOWN(mp))
+               return -EIO;
+       XFS_STATS_INC(xs_blk_mapw);
+       if (!(ifp->if_flags & XFS_IFEXTENTS)) {
+               error = xfs_iread_extents(NULL, ip, XFS_DATA_FORK);
+               if (error)
+                       return error;
+       }
+       xfs_bmap_search_extents(ip, bno, XFS_DATA_FORK, &eof, &lastx, &got, &prev);
+       end = bno + len;
+       obno = bno;
+       while (bno < end && n < *nmap) {
+               if (eof || got.br_startoff > bno) {
+                       error = xfs_bmapi_reserve_delalloc(ip, bno, len, &got,
+                                                          &prev, &lastx, eof);
+                       if (error) {
+                               if (n == 0) {
+                                       *nmap = 0;
+                                       return error;
+                               }
+                               break;
+                       }
+               }
+               /* set up the extent map to return. */
+               xfs_bmapi_trim_map(mval, &got, &bno, len, obno, end, n, flags);
+               xfs_bmapi_update_map(&mval, &bno, &len, obno, end, &n, flags);
+               /* If we're done, stop now. */
+               if (bno >= end || n >= *nmap)
+                       break;
+               /* Else go on to the next record. */
+               prev = got;
+               if (++lastx < ifp->if_bytes / sizeof(xfs_bmbt_rec_t))
+                       xfs_bmbt_get_all(xfs_iext_get_ext(ifp, lastx), &got);
+               else
+                       eof = 1;
+       }
+       *nmap = n;
+       return 0;
+ }
 -      if (flags & XFS_BMAPI_STACK_SWITCH)
 -              bma.stack_switch = 1;
 -
++static int
++xfs_bmapi_allocate(
+       struct xfs_bmalloca     *bma)
+ {
+       struct xfs_mount        *mp = bma->ip->i_mount;
+       int                     whichfork = (bma->flags & XFS_BMAPI_ATTRFORK) ?
+                                               XFS_ATTR_FORK : XFS_DATA_FORK;
+       struct xfs_ifork        *ifp = XFS_IFORK_PTR(bma->ip, whichfork);
+       int                     tmp_logflags = 0;
+       int                     error;
+       ASSERT(bma->length > 0);
+       /*
+        * For the wasdelay case, we could also just allocate the stuff asked
+        * for in this bmap call but that wouldn't be as good.
+        */
+       if (bma->wasdel) {
+               bma->length = (xfs_extlen_t)bma->got.br_blockcount;
+               bma->offset = bma->got.br_startoff;
+               if (bma->idx != NULLEXTNUM && bma->idx) {
+                       xfs_bmbt_get_all(xfs_iext_get_ext(ifp, bma->idx - 1),
+                                        &bma->prev);
+               }
+       } else {
+               bma->length = XFS_FILBLKS_MIN(bma->length, MAXEXTLEN);
+               if (!bma->eof)
+                       bma->length = XFS_FILBLKS_MIN(bma->length,
+                                       bma->got.br_startoff - bma->offset);
+       }
+       /*
+        * Indicate if this is the first user data in the file, or just any
+        * user data.
+        */
+       if (!(bma->flags & XFS_BMAPI_METADATA)) {
+               bma->userdata = (bma->offset == 0) ?
+                       XFS_ALLOC_INITIAL_USER_DATA : XFS_ALLOC_USERDATA;
+       }
+       bma->minlen = (bma->flags & XFS_BMAPI_CONTIG) ? bma->length : 1;
+       /*
+        * Only want to do the alignment at the eof if it is userdata and
+        * allocation length is larger than a stripe unit.
+        */
+       if (mp->m_dalign && bma->length >= mp->m_dalign &&
+           !(bma->flags & XFS_BMAPI_METADATA) && whichfork == XFS_DATA_FORK) {
+               error = xfs_bmap_isaeof(bma, whichfork);
+               if (error)
+                       return error;
+       }
+       error = xfs_bmap_alloc(bma);
+       if (error)
+               return error;
+       if (bma->flist->xbf_low)
+               bma->minleft = 0;
+       if (bma->cur)
+               bma->cur->bc_private.b.firstblock = *bma->firstblock;
+       if (bma->blkno == NULLFSBLOCK)
+               return 0;
+       if ((ifp->if_flags & XFS_IFBROOT) && !bma->cur) {
+               bma->cur = xfs_bmbt_init_cursor(mp, bma->tp, bma->ip, whichfork);
+               bma->cur->bc_private.b.firstblock = *bma->firstblock;
+               bma->cur->bc_private.b.flist = bma->flist;
+       }
+       /*
+        * Bump the number of extents we've allocated
+        * in this call.
+        */
+       bma->nallocs++;
+       if (bma->cur)
+               bma->cur->bc_private.b.flags =
+                       bma->wasdel ? XFS_BTCUR_BPRV_WASDEL : 0;
+       bma->got.br_startoff = bma->offset;
+       bma->got.br_startblock = bma->blkno;
+       bma->got.br_blockcount = bma->length;
+       bma->got.br_state = XFS_EXT_NORM;
+       /*
+        * A wasdelay extent has been initialized, so shouldn't be flagged
+        * as unwritten.
+        */
+       if (!bma->wasdel && (bma->flags & XFS_BMAPI_PREALLOC) &&
+           xfs_sb_version_hasextflgbit(&mp->m_sb))
+               bma->got.br_state = XFS_EXT_UNWRITTEN;
+       if (bma->wasdel)
+               error = xfs_bmap_add_extent_delay_real(bma);
+       else
+               error = xfs_bmap_add_extent_hole_real(bma, whichfork);
+       bma->logflags |= tmp_logflags;
+       if (error)
+               return error;
+       /*
+        * Update our extent pointer, given that xfs_bmap_add_extent_delay_real
+        * or xfs_bmap_add_extent_hole_real might have merged it into one of
+        * the neighbouring ones.
+        */
+       xfs_bmbt_get_all(xfs_iext_get_ext(ifp, bma->idx), &bma->got);
+       ASSERT(bma->got.br_startoff <= bma->offset);
+       ASSERT(bma->got.br_startoff + bma->got.br_blockcount >=
+              bma->offset + bma->length);
+       ASSERT(bma->got.br_state == XFS_EXT_NORM ||
+              bma->got.br_state == XFS_EXT_UNWRITTEN);
+       return 0;
+ }
+ STATIC int
+ xfs_bmapi_convert_unwritten(
+       struct xfs_bmalloca     *bma,
+       struct xfs_bmbt_irec    *mval,
+       xfs_filblks_t           len,
+       int                     flags)
+ {
+       int                     whichfork = (flags & XFS_BMAPI_ATTRFORK) ?
+                                               XFS_ATTR_FORK : XFS_DATA_FORK;
+       struct xfs_ifork        *ifp = XFS_IFORK_PTR(bma->ip, whichfork);
+       int                     tmp_logflags = 0;
+       int                     error;
+       /* check if we need to do unwritten->real conversion */
+       if (mval->br_state == XFS_EXT_UNWRITTEN &&
+           (flags & XFS_BMAPI_PREALLOC))
+               return 0;
+       /* check if we need to do real->unwritten conversion */
+       if (mval->br_state == XFS_EXT_NORM &&
+           (flags & (XFS_BMAPI_PREALLOC | XFS_BMAPI_CONVERT)) !=
+                       (XFS_BMAPI_PREALLOC | XFS_BMAPI_CONVERT))
+               return 0;
+       /*
+        * Modify (by adding) the state flag, if writing.
+        */
+       ASSERT(mval->br_blockcount <= len);
+       if ((ifp->if_flags & XFS_IFBROOT) && !bma->cur) {
+               bma->cur = xfs_bmbt_init_cursor(bma->ip->i_mount, bma->tp,
+                                       bma->ip, whichfork);
+               bma->cur->bc_private.b.firstblock = *bma->firstblock;
+               bma->cur->bc_private.b.flist = bma->flist;
+       }
+       mval->br_state = (mval->br_state == XFS_EXT_UNWRITTEN)
+                               ? XFS_EXT_NORM : XFS_EXT_UNWRITTEN;
+       error = xfs_bmap_add_extent_unwritten_real(bma->tp, bma->ip, &bma->idx,
+                       &bma->cur, mval, bma->firstblock, bma->flist,
+                       &tmp_logflags);
+       bma->logflags |= tmp_logflags;
+       if (error)
+               return error;
+       /*
+        * Update our extent pointer, given that
+        * xfs_bmap_add_extent_unwritten_real might have merged it into one
+        * of the neighbouring ones.
+        */
+       xfs_bmbt_get_all(xfs_iext_get_ext(ifp, bma->idx), &bma->got);
+       /*
+        * We may have combined previously unwritten space with written space,
+        * so generate another request.
+        */
+       if (mval->br_blockcount < len)
+               return -EAGAIN;
+       return 0;
+ }
+ /*
+  * Map file blocks to filesystem blocks, and allocate blocks or convert the
+  * extent state if necessary.  Details behaviour is controlled by the flags
+  * parameter.  Only allocates blocks from a single allocation group, to avoid
+  * locking problems.
+  *
+  * The returned value in "firstblock" from the first call in a transaction
+  * must be remembered and presented to subsequent calls in "firstblock".
+  * An upper bound for the number of blocks to be allocated is supplied to
+  * the first call in "total"; if no allocation group has that many free
+  * blocks then the call will fail (return NULLFSBLOCK in "firstblock").
+  */
+ int
+ xfs_bmapi_write(
+       struct xfs_trans        *tp,            /* transaction pointer */
+       struct xfs_inode        *ip,            /* incore inode */
+       xfs_fileoff_t           bno,            /* starting file offs. mapped */
+       xfs_filblks_t           len,            /* length to map in file */
+       int                     flags,          /* XFS_BMAPI_... */
+       xfs_fsblock_t           *firstblock,    /* first allocated block
+                                                  controls a.g. for allocs */
+       xfs_extlen_t            total,          /* total blocks needed */
+       struct xfs_bmbt_irec    *mval,          /* output: map values */
+       int                     *nmap,          /* i/o: mval size/count */
+       struct xfs_bmap_free    *flist)         /* i/o: list extents to free */
+ {
+       struct xfs_mount        *mp = ip->i_mount;
+       struct xfs_ifork        *ifp;
+       struct xfs_bmalloca     bma = { NULL }; /* args for xfs_bmap_alloc */
+       xfs_fileoff_t           end;            /* end of mapped file region */
+       int                     eof;            /* after the end of extents */
+       int                     error;          /* error return */
+       int                     n;              /* current extent index */
+       xfs_fileoff_t           obno;           /* old block number (offset) */
+       int                     whichfork;      /* data or attr fork */
+       char                    inhole;         /* current location is hole in file */
+       char                    wasdelay;       /* old extent was delayed */
+ #ifdef DEBUG
+       xfs_fileoff_t           orig_bno;       /* original block number value */
+       int                     orig_flags;     /* original flags arg value */
+       xfs_filblks_t           orig_len;       /* original value of len arg */
+       struct xfs_bmbt_irec    *orig_mval;     /* original value of mval */
+       int                     orig_nmap;      /* original value of *nmap */
+       orig_bno = bno;
+       orig_len = len;
+       orig_flags = flags;
+       orig_mval = mval;
+       orig_nmap = *nmap;
+ #endif
+       whichfork = (flags & XFS_BMAPI_ATTRFORK) ?
+               XFS_ATTR_FORK : XFS_DATA_FORK;
+       ASSERT(*nmap >= 1);
+       ASSERT(*nmap <= XFS_BMAP_MAX_NMAP);
+       ASSERT(!(flags & XFS_BMAPI_IGSTATE));
+       ASSERT(tp != NULL);
+       ASSERT(len > 0);
+       ASSERT(XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_LOCAL);
+       ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
+       if (unlikely(XFS_TEST_ERROR(
+           (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS &&
+            XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE),
+            mp, XFS_ERRTAG_BMAPIFORMAT, XFS_RANDOM_BMAPIFORMAT))) {
+               XFS_ERROR_REPORT("xfs_bmapi_write", XFS_ERRLEVEL_LOW, mp);
+               return -EFSCORRUPTED;
+       }
+       if (XFS_FORCED_SHUTDOWN(mp))
+               return -EIO;
+       ifp = XFS_IFORK_PTR(ip, whichfork);
+       XFS_STATS_INC(xs_blk_mapw);
+       if (*firstblock == NULLFSBLOCK) {
+               if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE)
+                       bma.minleft = be16_to_cpu(ifp->if_broot->bb_level) + 1;
+               else
+                       bma.minleft = 1;
+       } else {
+               bma.minleft = 0;
+       }
+       if (!(ifp->if_flags & XFS_IFEXTENTS)) {
+               error = xfs_iread_extents(tp, ip, whichfork);
+               if (error)
+                       goto error0;
+       }
+       xfs_bmap_search_extents(ip, bno, whichfork, &eof, &bma.idx, &bma.got,
+                               &bma.prev);
+       n = 0;
+       end = bno + len;
+       obno = bno;
+       bma.tp = tp;
+       bma.ip = ip;
+       bma.total = total;
+       bma.userdata = 0;
+       bma.flist = flist;
+       bma.firstblock = firstblock;
+       while (bno < end && n < *nmap) {
+               inhole = eof || bma.got.br_startoff > bno;
+               wasdelay = !inhole && isnullstartblock(bma.got.br_startblock);
+               /*
+                * First, deal with the hole before the allocated space
+                * that we found, if any.
+                */
+               if (inhole || wasdelay) {
+                       bma.eof = eof;
+                       bma.conv = !!(flags & XFS_BMAPI_CONVERT);
+                       bma.wasdel = wasdelay;
+                       bma.offset = bno;
+                       bma.flags = flags;
+                       /*
+                        * There's a 32/64 bit type mismatch between the
+                        * allocation length request (which can be 64 bits in
+                        * length) and the bma length request, which is
+                        * xfs_extlen_t and therefore 32 bits. Hence we have to
+                        * check for 32-bit overflows and handle them here.
+                        */
+                       if (len > (xfs_filblks_t)MAXEXTLEN)
+                               bma.length = MAXEXTLEN;
+                       else
+                               bma.length = len;
+                       ASSERT(len > 0);
+                       ASSERT(bma.length > 0);
+                       error = xfs_bmapi_allocate(&bma);
+                       if (error)
+                               goto error0;
+                       if (bma.blkno == NULLFSBLOCK)
+                               break;
+               }
+               /* Deal with the allocated space we found.  */
+               xfs_bmapi_trim_map(mval, &bma.got, &bno, len, obno,
+                                                       end, n, flags);
+               /* Execute unwritten extent conversion if necessary */
+               error = xfs_bmapi_convert_unwritten(&bma, mval, len, flags);
+               if (error == -EAGAIN)
+                       continue;
+               if (error)
+                       goto error0;
+               /* update the extent map to return */
+               xfs_bmapi_update_map(&mval, &bno, &len, obno, end, &n, flags);
+               /*
+                * If we're done, stop now.  Stop when we've allocated
+                * XFS_BMAP_MAX_NMAP extents no matter what.  Otherwise
+                * the transaction may get too big.
+                */
+               if (bno >= end || n >= *nmap || bma.nallocs >= *nmap)
+                       break;
+               /* Else go on to the next record. */
+               bma.prev = bma.got;
+               if (++bma.idx < ifp->if_bytes / sizeof(xfs_bmbt_rec_t)) {
+                       xfs_bmbt_get_all(xfs_iext_get_ext(ifp, bma.idx),
+                                        &bma.got);
+               } else
+                       eof = 1;
+       }
+       *nmap = n;
+       /*
+        * Transform from btree to extents, give it cur.
+        */
+       if (xfs_bmap_wants_extents(ip, whichfork)) {
+               int             tmp_logflags = 0;
+               ASSERT(bma.cur);
+               error = xfs_bmap_btree_to_extents(tp, ip, bma.cur,
+                       &tmp_logflags, whichfork);
+               bma.logflags |= tmp_logflags;
+               if (error)
+                       goto error0;
+       }
+       ASSERT(XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE ||
+              XFS_IFORK_NEXTENTS(ip, whichfork) >
+               XFS_IFORK_MAXEXT(ip, whichfork));
+       error = 0;
+ error0:
+       /*
+        * Log everything.  Do this after conversion, there's no point in
+        * logging the extent records if we've converted to btree format.
+        */
+       if ((bma.logflags & xfs_ilog_fext(whichfork)) &&
+           XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS)
+               bma.logflags &= ~xfs_ilog_fext(whichfork);
+       else if ((bma.logflags & xfs_ilog_fbroot(whichfork)) &&
+                XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE)
+               bma.logflags &= ~xfs_ilog_fbroot(whichfork);
+       /*
+        * Log whatever the flags say, even if error.  Otherwise we might miss
+        * detecting a case where the data is changed, there's an error,
+        * and it's not logged so we don't shutdown when we should.
+        */
+       if (bma.logflags)
+               xfs_trans_log_inode(tp, ip, bma.logflags);
+       if (bma.cur) {
+               if (!error) {
+                       ASSERT(*firstblock == NULLFSBLOCK ||
+                              XFS_FSB_TO_AGNO(mp, *firstblock) ==
+                              XFS_FSB_TO_AGNO(mp,
+                                      bma.cur->bc_private.b.firstblock) ||
+                              (flist->xbf_low &&
+                               XFS_FSB_TO_AGNO(mp, *firstblock) <
+                               XFS_FSB_TO_AGNO(mp,
+                                       bma.cur->bc_private.b.firstblock)));
+                       *firstblock = bma.cur->bc_private.b.firstblock;
+               }
+               xfs_btree_del_cursor(bma.cur,
+                       error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
+       }
+       if (!error)
+               xfs_bmap_validate_ret(orig_bno, orig_len, orig_flags, orig_mval,
+                       orig_nmap, *nmap);
+       return error;
+ }
+ /*
+  * Called by xfs_bmapi to update file extent records and the btree
+  * after removing space (or undoing a delayed allocation).
+  */
+ STATIC int                            /* error */
+ xfs_bmap_del_extent(
+       xfs_inode_t             *ip,    /* incore inode pointer */
+       xfs_trans_t             *tp,    /* current transaction pointer */
+       xfs_extnum_t            *idx,   /* extent number to update/delete */
+       xfs_bmap_free_t         *flist, /* list of extents to be freed */
+       xfs_btree_cur_t         *cur,   /* if null, not a btree */
+       xfs_bmbt_irec_t         *del,   /* data to remove from extents */
+       int                     *logflagsp, /* inode logging flags */
+       int                     whichfork) /* data or attr fork */
+ {
+       xfs_filblks_t           da_new; /* new delay-alloc indirect blocks */
+       xfs_filblks_t           da_old; /* old delay-alloc indirect blocks */
+       xfs_fsblock_t           del_endblock=0; /* first block past del */
+       xfs_fileoff_t           del_endoff;     /* first offset past del */
+       int                     delay;  /* current block is delayed allocated */
+       int                     do_fx;  /* free extent at end of routine */
+       xfs_bmbt_rec_host_t     *ep;    /* current extent entry pointer */
+       int                     error;  /* error return value */
+       int                     flags;  /* inode logging flags */
+       xfs_bmbt_irec_t         got;    /* current extent entry */
+       xfs_fileoff_t           got_endoff;     /* first offset past got */
+       int                     i;      /* temp state */
+       xfs_ifork_t             *ifp;   /* inode fork pointer */
+       xfs_mount_t             *mp;    /* mount structure */
+       xfs_filblks_t           nblks;  /* quota/sb block count */
+       xfs_bmbt_irec_t         new;    /* new record to be inserted */
+       /* REFERENCED */
+       uint                    qfield; /* quota field to update */
+       xfs_filblks_t           temp;   /* for indirect length calculations */
+       xfs_filblks_t           temp2;  /* for indirect length calculations */
+       int                     state = 0;
+       XFS_STATS_INC(xs_del_exlist);
+       if (whichfork == XFS_ATTR_FORK)
+               state |= BMAP_ATTRFORK;
+       mp = ip->i_mount;
+       ifp = XFS_IFORK_PTR(ip, whichfork);
+       ASSERT((*idx >= 0) && (*idx < ifp->if_bytes /
+               (uint)sizeof(xfs_bmbt_rec_t)));
+       ASSERT(del->br_blockcount > 0);
+       ep = xfs_iext_get_ext(ifp, *idx);
+       xfs_bmbt_get_all(ep, &got);
+       ASSERT(got.br_startoff <= del->br_startoff);
+       del_endoff = del->br_startoff + del->br_blockcount;
+       got_endoff = got.br_startoff + got.br_blockcount;
+       ASSERT(got_endoff >= del_endoff);
+       delay = isnullstartblock(got.br_startblock);
+       ASSERT(isnullstartblock(del->br_startblock) == delay);
+       flags = 0;
+       qfield = 0;
+       error = 0;
+       /*
+        * If deleting a real allocation, must free up the disk space.
+        */
+       if (!delay) {
+               flags = XFS_ILOG_CORE;
+               /*
+                * Realtime allocation.  Free it and record di_nblocks update.
+                */
+               if (whichfork == XFS_DATA_FORK && XFS_IS_REALTIME_INODE(ip)) {
+                       xfs_fsblock_t   bno;
+                       xfs_filblks_t   len;
+                       ASSERT(do_mod(del->br_blockcount,
+                                     mp->m_sb.sb_rextsize) == 0);
+                       ASSERT(do_mod(del->br_startblock,
+                                     mp->m_sb.sb_rextsize) == 0);
+                       bno = del->br_startblock;
+                       len = del->br_blockcount;
+                       do_div(bno, mp->m_sb.sb_rextsize);
+                       do_div(len, mp->m_sb.sb_rextsize);
+                       error = xfs_rtfree_extent(tp, bno, (xfs_extlen_t)len);
+                       if (error)
+                               goto done;
+                       do_fx = 0;
+                       nblks = len * mp->m_sb.sb_rextsize;
+                       qfield = XFS_TRANS_DQ_RTBCOUNT;
+               }
+               /*
+                * Ordinary allocation.
+                */
+               else {
+                       do_fx = 1;
+                       nblks = del->br_blockcount;
+                       qfield = XFS_TRANS_DQ_BCOUNT;
+               }
+               /*
+                * Set up del_endblock and cur for later.
+                */
+               del_endblock = del->br_startblock + del->br_blockcount;
+               if (cur) {
+                       if ((error = xfs_bmbt_lookup_eq(cur, got.br_startoff,
+                                       got.br_startblock, got.br_blockcount,
+                                       &i)))
+                               goto done;
+                       XFS_WANT_CORRUPTED_GOTO(i == 1, done);
+               }
+               da_old = da_new = 0;
+       } else {
+               da_old = startblockval(got.br_startblock);
+               da_new = 0;
+               nblks = 0;
+               do_fx = 0;
+       }
+       /*
+        * Set flag value to use in switch statement.
+        * Left-contig is 2, right-contig is 1.
+        */
+       switch (((got.br_startoff == del->br_startoff) << 1) |
+               (got_endoff == del_endoff)) {
+       case 3:
+               /*
+                * Matches the whole extent.  Delete the entry.
+                */
+               xfs_iext_remove(ip, *idx, 1,
+                               whichfork == XFS_ATTR_FORK ? BMAP_ATTRFORK : 0);
+               --*idx;
+               if (delay)
+                       break;
+               XFS_IFORK_NEXT_SET(ip, whichfork,
+                       XFS_IFORK_NEXTENTS(ip, whichfork) - 1);
+               flags |= XFS_ILOG_CORE;
+               if (!cur) {
+                       flags |= xfs_ilog_fext(whichfork);
+                       break;
+               }
+               if ((error = xfs_btree_delete(cur, &i)))
+                       goto done;
+               XFS_WANT_CORRUPTED_GOTO(i == 1, done);
+               break;
+       case 2:
+               /*
+                * Deleting the first part of the extent.
+                */
+               trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
+               xfs_bmbt_set_startoff(ep, del_endoff);
+               temp = got.br_blockcount - del->br_blockcount;
+               xfs_bmbt_set_blockcount(ep, temp);
+               if (delay) {
+                       temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
+                               da_old);
+                       xfs_bmbt_set_startblock(ep, nullstartblock((int)temp));
+                       trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
+                       da_new = temp;
+                       break;
+               }
+               xfs_bmbt_set_startblock(ep, del_endblock);
+               trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
+               if (!cur) {
+                       flags |= xfs_ilog_fext(whichfork);
+                       break;
+               }
+               if ((error = xfs_bmbt_update(cur, del_endoff, del_endblock,
+                               got.br_blockcount - del->br_blockcount,
+                               got.br_state)))
+                       goto done;
+               break;
+       case 1:
+               /*
+                * Deleting the last part of the extent.
+                */
+               temp = got.br_blockcount - del->br_blockcount;
+               trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
+               xfs_bmbt_set_blockcount(ep, temp);
+               if (delay) {
+                       temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
+                               da_old);
+                       xfs_bmbt_set_startblock(ep, nullstartblock((int)temp));
+                       trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
+                       da_new = temp;
+                       break;
+               }
+               trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
+               if (!cur) {
+                       flags |= xfs_ilog_fext(whichfork);
+                       break;
+               }
+               if ((error = xfs_bmbt_update(cur, got.br_startoff,
+                               got.br_startblock,
+                               got.br_blockcount - del->br_blockcount,
+                               got.br_state)))
+                       goto done;
+               break;
+       case 0:
+               /*
+                * Deleting the middle of the extent.
+                */
+               temp = del->br_startoff - got.br_startoff;
+               trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
+               xfs_bmbt_set_blockcount(ep, temp);
+               new.br_startoff = del_endoff;
+               temp2 = got_endoff - del_endoff;
+               new.br_blockcount = temp2;
+               new.br_state = got.br_state;
+               if (!delay) {
+                       new.br_startblock = del_endblock;
+                       flags |= XFS_ILOG_CORE;
+                       if (cur) {
+                               if ((error = xfs_bmbt_update(cur,
+                                               got.br_startoff,
+                                               got.br_startblock, temp,
+                                               got.br_state)))
+                                       goto done;
+                               if ((error = xfs_btree_increment(cur, 0, &i)))
+                                       goto done;
+                               cur->bc_rec.b = new;
+                               error = xfs_btree_insert(cur, &i);
+                               if (error && error != -ENOSPC)
+                                       goto done;
+                               /*
+                                * If get no-space back from btree insert,
+                                * it tried a split, and we have a zero
+                                * block reservation.
+                                * Fix up our state and return the error.
+                                */
+                               if (error == -ENOSPC) {
+                                       /*
+                                        * Reset the cursor, don't trust
+                                        * it after any insert operation.
+                                        */
+                                       if ((error = xfs_bmbt_lookup_eq(cur,
+                                                       got.br_startoff,
+                                                       got.br_startblock,
+                                                       temp, &i)))
+                                               goto done;
+                                       XFS_WANT_CORRUPTED_GOTO(i == 1, done);
+                                       /*
+                                        * Update the btree record back
+                                        * to the original value.
+                                        */
+                                       if ((error = xfs_bmbt_update(cur,
+                                                       got.br_startoff,
+                                                       got.br_startblock,
+                                                       got.br_blockcount,
+                                                       got.br_state)))
+                                               goto done;
+                                       /*
+                                        * Reset the extent record back
+                                        * to the original value.
+                                        */
+                                       xfs_bmbt_set_blockcount(ep,
+                                               got.br_blockcount);
+                                       flags = 0;
+                                       error = -ENOSPC;
+                                       goto done;
+                               }
+                               XFS_WANT_CORRUPTED_GOTO(i == 1, done);
+                       } else
+                               flags |= xfs_ilog_fext(whichfork);
+                       XFS_IFORK_NEXT_SET(ip, whichfork,
+                               XFS_IFORK_NEXTENTS(ip, whichfork) + 1);
+               } else {
+                       ASSERT(whichfork == XFS_DATA_FORK);
+                       temp = xfs_bmap_worst_indlen(ip, temp);
+                       xfs_bmbt_set_startblock(ep, nullstartblock((int)temp));
+                       temp2 = xfs_bmap_worst_indlen(ip, temp2);
+                       new.br_startblock = nullstartblock((int)temp2);
+                       da_new = temp + temp2;
+                       while (da_new > da_old) {
+                               if (temp) {
+                                       temp--;
+                                       da_new--;
+                                       xfs_bmbt_set_startblock(ep,
+                                               nullstartblock((int)temp));
+                               }
+                               if (da_new == da_old)
+                                       break;
+                               if (temp2) {
+                                       temp2--;
+                                       da_new--;
+                                       new.br_startblock =
+                                               nullstartblock((int)temp2);
+                               }
+                       }
+               }
+               trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
+               xfs_iext_insert(ip, *idx + 1, 1, &new, state);
+               ++*idx;
+               break;
+       }
+       /*
+        * If we need to, add to list of extents to delete.
+        */
+       if (do_fx)
+               xfs_bmap_add_free(del->br_startblock, del->br_blockcount, flist,
+                       mp);
+       /*
+        * Adjust inode # blocks in the file.
+        */
+       if (nblks)
+               ip->i_d.di_nblocks -= nblks;
+       /*
+        * Adjust quota data.
+        */
+       if (qfield)
+               xfs_trans_mod_dquot_byino(tp, ip, qfield, (long)-nblks);
+       /*
+        * Account for change in delayed indirect blocks.
+        * Nothing to do for disk quota accounting here.
+        */
+       ASSERT(da_old >= da_new);
+       if (da_old > da_new) {
+               xfs_icsb_modify_counters(mp, XFS_SBS_FDBLOCKS,
+                       (int64_t)(da_old - da_new), 0);
+       }
+ done:
+       *logflagsp = flags;
+       return error;
+ }
+ /*
+  * Unmap (remove) blocks from a file.
+  * If nexts is nonzero then the number of extents to remove is limited to
+  * that value.  If not all extents in the block range can be removed then
+  * *done is set.
+  */
+ int                                           /* error */
+ xfs_bunmapi(
+       xfs_trans_t             *tp,            /* transaction pointer */
+       struct xfs_inode        *ip,            /* incore inode */
+       xfs_fileoff_t           bno,            /* starting offset to unmap */
+       xfs_filblks_t           len,            /* length to unmap in file */
+       int                     flags,          /* misc flags */
+       xfs_extnum_t            nexts,          /* number of extents max */
+       xfs_fsblock_t           *firstblock,    /* first allocated block
+                                                  controls a.g. for allocs */
+       xfs_bmap_free_t         *flist,         /* i/o: list extents to free */
+       int                     *done)          /* set if not done yet */
+ {
+       xfs_btree_cur_t         *cur;           /* bmap btree cursor */
+       xfs_bmbt_irec_t         del;            /* extent being deleted */
+       int                     eof;            /* is deleting at eof */
+       xfs_bmbt_rec_host_t     *ep;            /* extent record pointer */
+       int                     error;          /* error return value */
+       xfs_extnum_t            extno;          /* extent number in list */
+       xfs_bmbt_irec_t         got;            /* current extent record */
+       xfs_ifork_t             *ifp;           /* inode fork pointer */
+       int                     isrt;           /* freeing in rt area */
+       xfs_extnum_t            lastx;          /* last extent index used */
+       int                     logflags;       /* transaction logging flags */
+       xfs_extlen_t            mod;            /* rt extent offset */
+       xfs_mount_t             *mp;            /* mount structure */
+       xfs_extnum_t            nextents;       /* number of file extents */
+       xfs_bmbt_irec_t         prev;           /* previous extent record */
+       xfs_fileoff_t           start;          /* first file offset deleted */
+       int                     tmp_logflags;   /* partial logging flags */
+       int                     wasdel;         /* was a delayed alloc extent */
+       int                     whichfork;      /* data or attribute fork */
+       xfs_fsblock_t           sum;
+       trace_xfs_bunmap(ip, bno, len, flags, _RET_IP_);
+       whichfork = (flags & XFS_BMAPI_ATTRFORK) ?
+               XFS_ATTR_FORK : XFS_DATA_FORK;
+       ifp = XFS_IFORK_PTR(ip, whichfork);
+       if (unlikely(
+           XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS &&
+           XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE)) {
+               XFS_ERROR_REPORT("xfs_bunmapi", XFS_ERRLEVEL_LOW,
+                                ip->i_mount);
+               return -EFSCORRUPTED;
+       }
+       mp = ip->i_mount;
+       if (XFS_FORCED_SHUTDOWN(mp))
+               return -EIO;
+       ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
+       ASSERT(len > 0);
+       ASSERT(nexts >= 0);
+       if (!(ifp->if_flags & XFS_IFEXTENTS) &&
+           (error = xfs_iread_extents(tp, ip, whichfork)))
+               return error;
+       nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
+       if (nextents == 0) {
+               *done = 1;
+               return 0;
+       }
+       XFS_STATS_INC(xs_blk_unmap);
+       isrt = (whichfork == XFS_DATA_FORK) && XFS_IS_REALTIME_INODE(ip);
+       start = bno;
+       bno = start + len - 1;
+       ep = xfs_bmap_search_extents(ip, bno, whichfork, &eof, &lastx, &got,
+               &prev);
+       /*
+        * Check to see if the given block number is past the end of the
+        * file, back up to the last block if so...
+        */
+       if (eof) {
+               ep = xfs_iext_get_ext(ifp, --lastx);
+               xfs_bmbt_get_all(ep, &got);
+               bno = got.br_startoff + got.br_blockcount - 1;
+       }
+       logflags = 0;
+       if (ifp->if_flags & XFS_IFBROOT) {
+               ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE);
+               cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
+               cur->bc_private.b.firstblock = *firstblock;
+               cur->bc_private.b.flist = flist;
+               cur->bc_private.b.flags = 0;
+       } else
+               cur = NULL;
+       if (isrt) {
+               /*
+                * Synchronize by locking the bitmap inode.
+                */
+               xfs_ilock(mp->m_rbmip, XFS_ILOCK_EXCL);
+               xfs_trans_ijoin(tp, mp->m_rbmip, XFS_ILOCK_EXCL);
+       }
+       extno = 0;
+       while (bno != (xfs_fileoff_t)-1 && bno >= start && lastx >= 0 &&
+              (nexts == 0 || extno < nexts)) {
+               /*
+                * Is the found extent after a hole in which bno lives?
+                * Just back up to the previous extent, if so.
+                */
+               if (got.br_startoff > bno) {
+                       if (--lastx < 0)
+                               break;
+                       ep = xfs_iext_get_ext(ifp, lastx);
+                       xfs_bmbt_get_all(ep, &got);
+               }
+               /*
+                * Is the last block of this extent before the range
+                * we're supposed to delete?  If so, we're done.
+                */
+               bno = XFS_FILEOFF_MIN(bno,
+                       got.br_startoff + got.br_blockcount - 1);
+               if (bno < start)
+                       break;
+               /*
+                * Then deal with the (possibly delayed) allocated space
+                * we found.
+                */
+               ASSERT(ep != NULL);
+               del = got;
+               wasdel = isnullstartblock(del.br_startblock);
+               if (got.br_startoff < start) {
+                       del.br_startoff = start;
+                       del.br_blockcount -= start - got.br_startoff;
+                       if (!wasdel)
+                               del.br_startblock += start - got.br_startoff;
+               }
+               if (del.br_startoff + del.br_blockcount > bno + 1)
+                       del.br_blockcount = bno + 1 - del.br_startoff;
+               sum = del.br_startblock + del.br_blockcount;
+               if (isrt &&
+                   (mod = do_mod(sum, mp->m_sb.sb_rextsize))) {
+                       /*
+                        * Realtime extent not lined up at the end.
+                        * The extent could have been split into written
+                        * and unwritten pieces, or we could just be
+                        * unmapping part of it.  But we can't really
+                        * get rid of part of a realtime extent.
+                        */
+                       if (del.br_state == XFS_EXT_UNWRITTEN ||
+                           !xfs_sb_version_hasextflgbit(&mp->m_sb)) {
+                               /*
+                                * This piece is unwritten, or we're not
+                                * using unwritten extents.  Skip over it.
+                                */
+                               ASSERT(bno >= mod);
+                               bno -= mod > del.br_blockcount ?
+                                       del.br_blockcount : mod;
+                               if (bno < got.br_startoff) {
+                                       if (--lastx >= 0)
+                                               xfs_bmbt_get_all(xfs_iext_get_ext(
+                                                       ifp, lastx), &got);
+                               }
+                               continue;
+                       }
+                       /*
+                        * It's written, turn it unwritten.
+                        * This is better than zeroing it.
+                        */
+                       ASSERT(del.br_state == XFS_EXT_NORM);
+                       ASSERT(xfs_trans_get_block_res(tp) > 0);
+                       /*
+                        * If this spans a realtime extent boundary,
+                        * chop it back to the start of the one we end at.
+                        */
+                       if (del.br_blockcount > mod) {
+                               del.br_startoff += del.br_blockcount - mod;
+                               del.br_startblock += del.br_blockcount - mod;
+                               del.br_blockcount = mod;
+                       }
+                       del.br_state = XFS_EXT_UNWRITTEN;
+                       error = xfs_bmap_add_extent_unwritten_real(tp, ip,
+                                       &lastx, &cur, &del, firstblock, flist,
+                                       &logflags);
+                       if (error)
+                               goto error0;
+                       goto nodelete;
+               }
+               if (isrt && (mod = do_mod(del.br_startblock, mp->m_sb.sb_rextsize))) {
+                       /*
+                        * Realtime extent is lined up at the end but not
+                        * at the front.  We'll get rid of full extents if
+                        * we can.
+                        */
+                       mod = mp->m_sb.sb_rextsize - mod;
+                       if (del.br_blockcount > mod) {
+                               del.br_blockcount -= mod;
+                               del.br_startoff += mod;
+                               del.br_startblock += mod;
+                       } else if ((del.br_startoff == start &&
+                                   (del.br_state == XFS_EXT_UNWRITTEN ||
+                                    xfs_trans_get_block_res(tp) == 0)) ||
+                                  !xfs_sb_version_hasextflgbit(&mp->m_sb)) {
+                               /*
+                                * Can't make it unwritten.  There isn't
+                                * a full extent here so just skip it.
+                                */
+                               ASSERT(bno >= del.br_blockcount);
+                               bno -= del.br_blockcount;
+                               if (got.br_startoff > bno) {
+                                       if (--lastx >= 0) {
+                                               ep = xfs_iext_get_ext(ifp,
+                                                                     lastx);
+                                               xfs_bmbt_get_all(ep, &got);
+                                       }
+                               }
+                               continue;
+                       } else if (del.br_state == XFS_EXT_UNWRITTEN) {
+                               /*
+                                * This one is already unwritten.
+                                * It must have a written left neighbor.
+                                * Unwrite the killed part of that one and
+                                * try again.
+                                */
+                               ASSERT(lastx > 0);
+                               xfs_bmbt_get_all(xfs_iext_get_ext(ifp,
+                                               lastx - 1), &prev);
+                               ASSERT(prev.br_state == XFS_EXT_NORM);
+                               ASSERT(!isnullstartblock(prev.br_startblock));
+                               ASSERT(del.br_startblock ==
+                                      prev.br_startblock + prev.br_blockcount);
+                               if (prev.br_startoff < start) {
+                                       mod = start - prev.br_startoff;
+                                       prev.br_blockcount -= mod;
+                                       prev.br_startblock += mod;
+                                       prev.br_startoff = start;
+                               }
+                               prev.br_state = XFS_EXT_UNWRITTEN;
+                               lastx--;
+                               error = xfs_bmap_add_extent_unwritten_real(tp,
+                                               ip, &lastx, &cur, &prev,
+                                               firstblock, flist, &logflags);
+                               if (error)
+                                       goto error0;
+                               goto nodelete;
+                       } else {
+                               ASSERT(del.br_state == XFS_EXT_NORM);
+                               del.br_state = XFS_EXT_UNWRITTEN;
+                               error = xfs_bmap_add_extent_unwritten_real(tp,
+                                               ip, &lastx, &cur, &del,
+                                               firstblock, flist, &logflags);
+                               if (error)
+                                       goto error0;
+                               goto nodelete;
+                       }
+               }
+               if (wasdel) {
+                       ASSERT(startblockval(del.br_startblock) > 0);
+                       /* Update realtime/data freespace, unreserve quota */
+                       if (isrt) {
+                               xfs_filblks_t rtexts;
+                               rtexts = XFS_FSB_TO_B(mp, del.br_blockcount);
+                               do_div(rtexts, mp->m_sb.sb_rextsize);
+                               xfs_mod_incore_sb(mp, XFS_SBS_FREXTENTS,
+                                               (int64_t)rtexts, 0);
+                               (void)xfs_trans_reserve_quota_nblks(NULL,
+                                       ip, -((long)del.br_blockcount), 0,
+                                       XFS_QMOPT_RES_RTBLKS);
+                       } else {
+                               xfs_icsb_modify_counters(mp, XFS_SBS_FDBLOCKS,
+                                               (int64_t)del.br_blockcount, 0);
+                               (void)xfs_trans_reserve_quota_nblks(NULL,
+                                       ip, -((long)del.br_blockcount), 0,
+                                       XFS_QMOPT_RES_REGBLKS);
+                       }
+                       ip->i_delayed_blks -= del.br_blockcount;
+                       if (cur)
+                               cur->bc_private.b.flags |=
+                                       XFS_BTCUR_BPRV_WASDEL;
+               } else if (cur)
+                       cur->bc_private.b.flags &= ~XFS_BTCUR_BPRV_WASDEL;
+               /*
+                * If it's the case where the directory code is running
+                * with no block reservation, and the deleted block is in
+                * the middle of its extent, and the resulting insert
+                * of an extent would cause transformation to btree format,
+                * then reject it.  The calling code will then swap
+                * blocks around instead.
+                * We have to do this now, rather than waiting for the
+                * conversion to btree format, since the transaction
+                * will be dirty.
+                */
+               if (!wasdel && xfs_trans_get_block_res(tp) == 0 &&
+                   XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS &&
+                   XFS_IFORK_NEXTENTS(ip, whichfork) >= /* Note the >= */
+                       XFS_IFORK_MAXEXT(ip, whichfork) &&
+                   del.br_startoff > got.br_startoff &&
+                   del.br_startoff + del.br_blockcount <
+                   got.br_startoff + got.br_blockcount) {
+                       error = -ENOSPC;
+                       goto error0;
+               }
+               error = xfs_bmap_del_extent(ip, tp, &lastx, flist, cur, &del,
+                               &tmp_logflags, whichfork);
+               logflags |= tmp_logflags;
+               if (error)
+                       goto error0;
+               bno = del.br_startoff - 1;
+ nodelete:
+               /*
+                * If not done go on to the next (previous) record.
+                */
+               if (bno != (xfs_fileoff_t)-1 && bno >= start) {
+                       if (lastx >= 0) {
+                               ep = xfs_iext_get_ext(ifp, lastx);
+                               if (xfs_bmbt_get_startoff(ep) > bno) {
+                                       if (--lastx >= 0)
+                                               ep = xfs_iext_get_ext(ifp,
+                                                                     lastx);
+                               }
+                               xfs_bmbt_get_all(ep, &got);
+                       }
+                       extno++;
+               }
+       }
+       *done = bno == (xfs_fileoff_t)-1 || bno < start || lastx < 0;
+       /*
+        * Convert to a btree if necessary.
+        */
+       if (xfs_bmap_needs_btree(ip, whichfork)) {
+               ASSERT(cur == NULL);
+               error = xfs_bmap_extents_to_btree(tp, ip, firstblock, flist,
+                       &cur, 0, &tmp_logflags, whichfork);
+               logflags |= tmp_logflags;
+               if (error)
+                       goto error0;
+       }
+       /*
+        * transform from btree to extents, give it cur
+        */
+       else if (xfs_bmap_wants_extents(ip, whichfork)) {
+               ASSERT(cur != NULL);
+               error = xfs_bmap_btree_to_extents(tp, ip, cur, &tmp_logflags,
+                       whichfork);
+               logflags |= tmp_logflags;
+               if (error)
+                       goto error0;
+       }
+       /*
+        * transform from extents to local?
+        */
+       error = 0;
+ error0:
+       /*
+        * Log everything.  Do this after conversion, there's no point in
+        * logging the extent records if we've converted to btree format.
+        */
+       if ((logflags & xfs_ilog_fext(whichfork)) &&
+           XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS)
+               logflags &= ~xfs_ilog_fext(whichfork);
+       else if ((logflags & xfs_ilog_fbroot(whichfork)) &&
+                XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE)
+               logflags &= ~xfs_ilog_fbroot(whichfork);
+       /*
+        * Log inode even in the error case, if the transaction
+        * is dirty we'll need to shut down the filesystem.
+        */
+       if (logflags)
+               xfs_trans_log_inode(tp, ip, logflags);
+       if (cur) {
+               if (!error) {
+                       *firstblock = cur->bc_private.b.firstblock;
+                       cur->bc_private.b.allocated = 0;
+               }
+               xfs_btree_del_cursor(cur,
+                       error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
+       }
+       return error;
+ }
+ /*
+  * Shift extent records to the left to cover a hole.
+  *
+  * The maximum number of extents to be shifted in a single operation
+  * is @num_exts, and @current_ext keeps track of the current extent
+  * index we have shifted. @offset_shift_fsb is the length by which each
+  * extent is shifted. If there is no hole to shift the extents
+  * into, this will be considered invalid operation and we abort immediately.
+  */
+ int
+ xfs_bmap_shift_extents(
+       struct xfs_trans        *tp,
+       struct xfs_inode        *ip,
+       int                     *done,
+       xfs_fileoff_t           start_fsb,
+       xfs_fileoff_t           offset_shift_fsb,
+       xfs_extnum_t            *current_ext,
+       xfs_fsblock_t           *firstblock,
+       struct xfs_bmap_free    *flist,
+       int                     num_exts)
+ {
+       struct xfs_btree_cur            *cur;
+       struct xfs_bmbt_rec_host        *gotp;
+       struct xfs_bmbt_irec            got;
+       struct xfs_bmbt_irec            left;
+       struct xfs_mount                *mp = ip->i_mount;
+       struct xfs_ifork                *ifp;
+       xfs_extnum_t                    nexts = 0;
+       xfs_fileoff_t                   startoff;
+       int                             error = 0;
+       int                             i;
+       int                             whichfork = XFS_DATA_FORK;
+       int                             logflags;
+       xfs_filblks_t                   blockcount = 0;
+       int                             total_extents;
+       if (unlikely(XFS_TEST_ERROR(
+           (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS &&
+            XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE),
+            mp, XFS_ERRTAG_BMAPIFORMAT, XFS_RANDOM_BMAPIFORMAT))) {
+               XFS_ERROR_REPORT("xfs_bmap_shift_extents",
+                                XFS_ERRLEVEL_LOW, mp);
+               return -EFSCORRUPTED;
+       }
+       if (XFS_FORCED_SHUTDOWN(mp))
+               return -EIO;
+       ASSERT(current_ext != NULL);
+       ifp = XFS_IFORK_PTR(ip, whichfork);
+       if (!(ifp->if_flags & XFS_IFEXTENTS)) {
+               /* Read in all the extents */
+               error = xfs_iread_extents(tp, ip, whichfork);
+               if (error)
+                       return error;
+       }
+       /*
+        * If *current_ext is 0, we would need to lookup the extent
+        * from where we would start shifting and store it in gotp.
+        */
+       if (!*current_ext) {
+               gotp = xfs_iext_bno_to_ext(ifp, start_fsb, current_ext);
+               /*
+                * gotp can be null in 2 cases: 1) if there are no extents
+                * or 2) start_fsb lies in a hole beyond which there are
+                * no extents. Either way, we are done.
+                */
+               if (!gotp) {
+                       *done = 1;
+                       return 0;
+               }
+       }
+       /* We are going to change core inode */
+       logflags = XFS_ILOG_CORE;
+       if (ifp->if_flags & XFS_IFBROOT) {
+               cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
+               cur->bc_private.b.firstblock = *firstblock;
+               cur->bc_private.b.flist = flist;
+               cur->bc_private.b.flags = 0;
+       } else {
+               cur = NULL;
+               logflags |= XFS_ILOG_DEXT;
+       }
+       /*
+        * There may be delalloc extents in the data fork before the range we
+        * are collapsing out, so we cannot
+        * use the count of real extents here. Instead we have to calculate it
+        * from the incore fork.
+        */
+       total_extents = ifp->if_bytes / sizeof(xfs_bmbt_rec_t);
+       while (nexts++ < num_exts && *current_ext < total_extents) {
+               gotp = xfs_iext_get_ext(ifp, *current_ext);
+               xfs_bmbt_get_all(gotp, &got);
+               startoff = got.br_startoff - offset_shift_fsb;
+               /*
+                * Before shifting extent into hole, make sure that the hole
+                * is large enough to accomodate the shift.
+                */
+               if (*current_ext) {
+                       xfs_bmbt_get_all(xfs_iext_get_ext(ifp,
+                                               *current_ext - 1), &left);
+                       if (startoff < left.br_startoff + left.br_blockcount)
+                               error = -EINVAL;
+               } else if (offset_shift_fsb > got.br_startoff) {
+                       /*
+                        * When first extent is shifted, offset_shift_fsb
+                        * should be less than the stating offset of
+                        * the first extent.
+                        */
+                       error = -EINVAL;
+               }
+               if (error)
+                       goto del_cursor;
+               if (cur) {
+                       error = xfs_bmbt_lookup_eq(cur, got.br_startoff,
+                                                  got.br_startblock,
+                                                  got.br_blockcount,
+                                                  &i);
+                       if (error)
+                               goto del_cursor;
+                       XFS_WANT_CORRUPTED_GOTO(i == 1, del_cursor);
+               }
+               /* Check if we can merge 2 adjacent extents */
+               if (*current_ext &&
+                   left.br_startoff + left.br_blockcount == startoff &&
+                   left.br_startblock + left.br_blockcount ==
+                               got.br_startblock &&
+                   left.br_state == got.br_state &&
+                   left.br_blockcount + got.br_blockcount <= MAXEXTLEN) {
+                       blockcount = left.br_blockcount +
+                               got.br_blockcount;
+                       xfs_iext_remove(ip, *current_ext, 1, 0);
+                       if (cur) {
+                               error = xfs_btree_delete(cur, &i);
+                               if (error)
+                                       goto del_cursor;
+                               XFS_WANT_CORRUPTED_GOTO(i == 1, del_cursor);
+                       }
+                       XFS_IFORK_NEXT_SET(ip, whichfork,
+                               XFS_IFORK_NEXTENTS(ip, whichfork) - 1);
+                       gotp = xfs_iext_get_ext(ifp, --*current_ext);
+                       xfs_bmbt_get_all(gotp, &got);
+                       /* Make cursor point to the extent we will update */
+                       if (cur) {
+                               error = xfs_bmbt_lookup_eq(cur, got.br_startoff,
+                                                          got.br_startblock,
+                                                          got.br_blockcount,
+                                                          &i);
+                               if (error)
+                                       goto del_cursor;
+                               XFS_WANT_CORRUPTED_GOTO(i == 1, del_cursor);
+                       }
+                       xfs_bmbt_set_blockcount(gotp, blockcount);
+                       got.br_blockcount = blockcount;
+               } else {
+                       /* We have to update the startoff */
+                       xfs_bmbt_set_startoff(gotp, startoff);
+                       got.br_startoff = startoff;
+               }
+               if (cur) {
+                       error = xfs_bmbt_update(cur, got.br_startoff,
+                                               got.br_startblock,
+                                               got.br_blockcount,
+                                               got.br_state);
+                       if (error)
+                               goto del_cursor;
+               }
+               (*current_ext)++;
+               total_extents = ifp->if_bytes / sizeof(xfs_bmbt_rec_t);
+       }
+       /* Check if we are done */
+       if (*current_ext == total_extents)
+               *done = 1;
+ del_cursor:
+       if (cur)
+               xfs_btree_del_cursor(cur,
+                       error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
+       xfs_trans_log_inode(tp, ip, logflags);
+       return error;
+ }
index 0000000000000000000000000000000000000000,38ba36e9b2f0c5616f018c0e5474da7ce9b42290..b879ca56a64ccfab5b2a42502a5b50f68b85f1df
mode 000000,100644..100644
--- /dev/null
@@@ -1,0 -1,188 +1,186 @@@
 -#define XFS_BMAPI_STACK_SWITCH        0x080
+ /*
+  * Copyright (c) 2000-2006 Silicon Graphics, Inc.
+  * All Rights Reserved.
+  *
+  * This program is free software; you can redistribute it and/or
+  * modify it under the terms of the GNU General Public License as
+  * published by the Free Software Foundation.
+  *
+  * This program is distributed in the hope that it would be useful,
+  * but WITHOUT ANY WARRANTY; without even the implied warranty of
+  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  * GNU General Public License for more details.
+  *
+  * You should have received a copy of the GNU General Public License
+  * along with this program; if not, write the Free Software Foundation,
+  * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+  */
+ #ifndef __XFS_BMAP_H__
+ #define       __XFS_BMAP_H__
+ struct getbmap;
+ struct xfs_bmbt_irec;
+ struct xfs_ifork;
+ struct xfs_inode;
+ struct xfs_mount;
+ struct xfs_trans;
+ extern kmem_zone_t    *xfs_bmap_free_item_zone;
+ /*
+  * List of extents to be free "later".
+  * The list is kept sorted on xbf_startblock.
+  */
+ typedef struct xfs_bmap_free_item
+ {
+       xfs_fsblock_t           xbfi_startblock;/* starting fs block number */
+       xfs_extlen_t            xbfi_blockcount;/* number of blocks in extent */
+       struct xfs_bmap_free_item *xbfi_next;   /* link to next entry */
+ } xfs_bmap_free_item_t;
+ /*
+  * Header for free extent list.
+  *
+  * xbf_low is used by the allocator to activate the lowspace algorithm -
+  * when free space is running low the extent allocator may choose to
+  * allocate an extent from an AG without leaving sufficient space for
+  * a btree split when inserting the new extent.  In this case the allocator
+  * will enable the lowspace algorithm which is supposed to allow further
+  * allocations (such as btree splits and newroots) to allocate from
+  * sequential AGs.  In order to avoid locking AGs out of order the lowspace
+  * algorithm will start searching for free space from AG 0.  If the correct
+  * transaction reservations have been made then this algorithm will eventually
+  * find all the space it needs.
+  */
+ typedef       struct xfs_bmap_free
+ {
+       xfs_bmap_free_item_t    *xbf_first;     /* list of to-be-free extents */
+       int                     xbf_count;      /* count of items on list */
+       int                     xbf_low;        /* alloc in low mode */
+ } xfs_bmap_free_t;
+ #define       XFS_BMAP_MAX_NMAP       4
+ /*
+  * Flags for xfs_bmapi_*
+  */
+ #define XFS_BMAPI_ENTIRE      0x001   /* return entire extent, not trimmed */
+ #define XFS_BMAPI_METADATA    0x002   /* mapping metadata not user data */
+ #define XFS_BMAPI_ATTRFORK    0x004   /* use attribute fork not data */
+ #define XFS_BMAPI_PREALLOC    0x008   /* preallocation op: unwritten space */
+ #define XFS_BMAPI_IGSTATE     0x010   /* Ignore state - */
+                                       /* combine contig. space */
+ #define XFS_BMAPI_CONTIG      0x020   /* must allocate only one extent */
+ /*
+  * unwritten extent conversion - this needs write cache flushing and no additional
+  * allocation alignments. When specified with XFS_BMAPI_PREALLOC it converts
+  * from written to unwritten, otherwise convert from unwritten to written.
+  */
+ #define XFS_BMAPI_CONVERT     0x040
 -      { XFS_BMAPI_CONVERT,    "CONVERT" }, \
 -      { XFS_BMAPI_STACK_SWITCH, "STACK_SWITCH" }
+ #define XFS_BMAPI_FLAGS \
+       { XFS_BMAPI_ENTIRE,     "ENTIRE" }, \
+       { XFS_BMAPI_METADATA,   "METADATA" }, \
+       { XFS_BMAPI_ATTRFORK,   "ATTRFORK" }, \
+       { XFS_BMAPI_PREALLOC,   "PREALLOC" }, \
+       { XFS_BMAPI_IGSTATE,    "IGSTATE" }, \
+       { XFS_BMAPI_CONTIG,     "CONTIG" }, \
++      { XFS_BMAPI_CONVERT,    "CONVERT" }
+ static inline int xfs_bmapi_aflag(int w)
+ {
+       return (w == XFS_ATTR_FORK ? XFS_BMAPI_ATTRFORK : 0);
+ }
+ /*
+  * Special values for xfs_bmbt_irec_t br_startblock field.
+  */
+ #define       DELAYSTARTBLOCK         ((xfs_fsblock_t)-1LL)
+ #define       HOLESTARTBLOCK          ((xfs_fsblock_t)-2LL)
+ static inline void xfs_bmap_init(xfs_bmap_free_t *flp, xfs_fsblock_t *fbp)
+ {
+       ((flp)->xbf_first = NULL, (flp)->xbf_count = 0, \
+               (flp)->xbf_low = 0, *(fbp) = NULLFSBLOCK);
+ }
+ /*
+  * Flags for xfs_bmap_add_extent*.
+  */
+ #define BMAP_LEFT_CONTIG      (1 << 0)
+ #define BMAP_RIGHT_CONTIG     (1 << 1)
+ #define BMAP_LEFT_FILLING     (1 << 2)
+ #define BMAP_RIGHT_FILLING    (1 << 3)
+ #define BMAP_LEFT_DELAY               (1 << 4)
+ #define BMAP_RIGHT_DELAY      (1 << 5)
+ #define BMAP_LEFT_VALID               (1 << 6)
+ #define BMAP_RIGHT_VALID      (1 << 7)
+ #define BMAP_ATTRFORK         (1 << 8)
+ #define XFS_BMAP_EXT_FLAGS \
+       { BMAP_LEFT_CONTIG,     "LC" }, \
+       { BMAP_RIGHT_CONTIG,    "RC" }, \
+       { BMAP_LEFT_FILLING,    "LF" }, \
+       { BMAP_RIGHT_FILLING,   "RF" }, \
+       { BMAP_ATTRFORK,        "ATTR" }
+ /*
+  * This macro is used to determine how many extents will be shifted
+  * in one write transaction. We could require two splits,
+  * an extent move on the first and an extent merge on the second,
+  * So it is proper that one extent is shifted inside write transaction
+  * at a time.
+  */
+ #define XFS_BMAP_MAX_SHIFT_EXTENTS    1
+ #ifdef DEBUG
+ void  xfs_bmap_trace_exlist(struct xfs_inode *ip, xfs_extnum_t cnt,
+               int whichfork, unsigned long caller_ip);
+ #define       XFS_BMAP_TRACE_EXLIST(ip,c,w)   \
+       xfs_bmap_trace_exlist(ip,c,w, _THIS_IP_)
+ #else
+ #define       XFS_BMAP_TRACE_EXLIST(ip,c,w)
+ #endif
+ int   xfs_bmap_add_attrfork(struct xfs_inode *ip, int size, int rsvd);
+ void  xfs_bmap_local_to_extents_empty(struct xfs_inode *ip, int whichfork);
+ void  xfs_bmap_add_free(xfs_fsblock_t bno, xfs_filblks_t len,
+               struct xfs_bmap_free *flist, struct xfs_mount *mp);
+ void  xfs_bmap_cancel(struct xfs_bmap_free *flist);
+ void  xfs_bmap_compute_maxlevels(struct xfs_mount *mp, int whichfork);
+ int   xfs_bmap_first_unused(struct xfs_trans *tp, struct xfs_inode *ip,
+               xfs_extlen_t len, xfs_fileoff_t *unused, int whichfork);
+ int   xfs_bmap_last_before(struct xfs_trans *tp, struct xfs_inode *ip,
+               xfs_fileoff_t *last_block, int whichfork);
+ int   xfs_bmap_last_offset(struct xfs_inode *ip, xfs_fileoff_t *unused,
+               int whichfork);
+ int   xfs_bmap_one_block(struct xfs_inode *ip, int whichfork);
+ int   xfs_bmap_read_extents(struct xfs_trans *tp, struct xfs_inode *ip,
+               int whichfork);
+ int   xfs_bmapi_read(struct xfs_inode *ip, xfs_fileoff_t bno,
+               xfs_filblks_t len, struct xfs_bmbt_irec *mval,
+               int *nmap, int flags);
+ int   xfs_bmapi_delay(struct xfs_inode *ip, xfs_fileoff_t bno,
+               xfs_filblks_t len, struct xfs_bmbt_irec *mval,
+               int *nmap, int flags);
+ int   xfs_bmapi_write(struct xfs_trans *tp, struct xfs_inode *ip,
+               xfs_fileoff_t bno, xfs_filblks_t len, int flags,
+               xfs_fsblock_t *firstblock, xfs_extlen_t total,
+               struct xfs_bmbt_irec *mval, int *nmap,
+               struct xfs_bmap_free *flist);
+ int   xfs_bunmapi(struct xfs_trans *tp, struct xfs_inode *ip,
+               xfs_fileoff_t bno, xfs_filblks_t len, int flags,
+               xfs_extnum_t nexts, xfs_fsblock_t *firstblock,
+               struct xfs_bmap_free *flist, int *done);
+ int   xfs_check_nostate_extents(struct xfs_ifork *ifp, xfs_extnum_t idx,
+               xfs_extnum_t num);
+ uint  xfs_default_attroffset(struct xfs_inode *ip);
+ int   xfs_bmap_shift_extents(struct xfs_trans *tp, struct xfs_inode *ip,
+               int *done, xfs_fileoff_t start_fsb,
+               xfs_fileoff_t offset_shift_fsb, xfs_extnum_t *current_ext,
+               xfs_fsblock_t *firstblock, struct xfs_bmap_free *flist,
+               int num_exts);
+ #endif        /* __XFS_BMAP_H__ */
index 0000000000000000000000000000000000000000,0097c42f1f1042abcc2f309389dbde748797eb65..ba35c9ccb8f98e0947f3af99b595357fa2b5339f
mode 000000,100644..100644
--- /dev/null
@@@ -1,0 -1,3989 +1,4069 @@@
 -xfs_btree_split(
+ /*
+  * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc.
+  * All Rights Reserved.
+  *
+  * This program is free software; you can redistribute it and/or
+  * modify it under the terms of the GNU General Public License as
+  * published by the Free Software Foundation.
+  *
+  * This program is distributed in the hope that it would be useful,
+  * but WITHOUT ANY WARRANTY; without even the implied warranty of
+  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  * GNU General Public License for more details.
+  *
+  * You should have received a copy of the GNU General Public License
+  * along with this program; if not, write the Free Software Foundation,
+  * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+  */
+ #include "xfs.h"
+ #include "xfs_fs.h"
+ #include "xfs_shared.h"
+ #include "xfs_format.h"
+ #include "xfs_log_format.h"
+ #include "xfs_trans_resv.h"
+ #include "xfs_bit.h"
+ #include "xfs_sb.h"
+ #include "xfs_ag.h"
+ #include "xfs_mount.h"
+ #include "xfs_inode.h"
+ #include "xfs_trans.h"
+ #include "xfs_inode_item.h"
+ #include "xfs_buf_item.h"
+ #include "xfs_btree.h"
+ #include "xfs_error.h"
+ #include "xfs_trace.h"
+ #include "xfs_cksum.h"
++#include "xfs_alloc.h"
+ /*
+  * Cursor allocation zone.
+  */
+ kmem_zone_t   *xfs_btree_cur_zone;
+ /*
+  * Btree magic numbers.
+  */
+ static const __uint32_t xfs_magics[2][XFS_BTNUM_MAX] = {
+       { XFS_ABTB_MAGIC, XFS_ABTC_MAGIC, XFS_BMAP_MAGIC, XFS_IBT_MAGIC,
+         XFS_FIBT_MAGIC },
+       { XFS_ABTB_CRC_MAGIC, XFS_ABTC_CRC_MAGIC,
+         XFS_BMAP_CRC_MAGIC, XFS_IBT_CRC_MAGIC, XFS_FIBT_CRC_MAGIC }
+ };
+ #define xfs_btree_magic(cur) \
+       xfs_magics[!!((cur)->bc_flags & XFS_BTREE_CRC_BLOCKS)][cur->bc_btnum]
+ STATIC int                            /* error (0 or EFSCORRUPTED) */
+ xfs_btree_check_lblock(
+       struct xfs_btree_cur    *cur,   /* btree cursor */
+       struct xfs_btree_block  *block, /* btree long form block pointer */
+       int                     level,  /* level of the btree block */
+       struct xfs_buf          *bp)    /* buffer for block, if any */
+ {
+       int                     lblock_ok = 1; /* block passes checks */
+       struct xfs_mount        *mp;    /* file system mount point */
+       mp = cur->bc_mp;
+       if (xfs_sb_version_hascrc(&mp->m_sb)) {
+               lblock_ok = lblock_ok &&
+                       uuid_equal(&block->bb_u.l.bb_uuid, &mp->m_sb.sb_uuid) &&
+                       block->bb_u.l.bb_blkno == cpu_to_be64(
+                               bp ? bp->b_bn : XFS_BUF_DADDR_NULL);
+       }
+       lblock_ok = lblock_ok &&
+               be32_to_cpu(block->bb_magic) == xfs_btree_magic(cur) &&
+               be16_to_cpu(block->bb_level) == level &&
+               be16_to_cpu(block->bb_numrecs) <=
+                       cur->bc_ops->get_maxrecs(cur, level) &&
+               block->bb_u.l.bb_leftsib &&
+               (block->bb_u.l.bb_leftsib == cpu_to_be64(NULLDFSBNO) ||
+                XFS_FSB_SANITY_CHECK(mp,
+                       be64_to_cpu(block->bb_u.l.bb_leftsib))) &&
+               block->bb_u.l.bb_rightsib &&
+               (block->bb_u.l.bb_rightsib == cpu_to_be64(NULLDFSBNO) ||
+                XFS_FSB_SANITY_CHECK(mp,
+                       be64_to_cpu(block->bb_u.l.bb_rightsib)));
+       if (unlikely(XFS_TEST_ERROR(!lblock_ok, mp,
+                       XFS_ERRTAG_BTREE_CHECK_LBLOCK,
+                       XFS_RANDOM_BTREE_CHECK_LBLOCK))) {
+               if (bp)
+                       trace_xfs_btree_corrupt(bp, _RET_IP_);
+               XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, mp);
+               return -EFSCORRUPTED;
+       }
+       return 0;
+ }
+ STATIC int                            /* error (0 or EFSCORRUPTED) */
+ xfs_btree_check_sblock(
+       struct xfs_btree_cur    *cur,   /* btree cursor */
+       struct xfs_btree_block  *block, /* btree short form block pointer */
+       int                     level,  /* level of the btree block */
+       struct xfs_buf          *bp)    /* buffer containing block */
+ {
+       struct xfs_mount        *mp;    /* file system mount point */
+       struct xfs_buf          *agbp;  /* buffer for ag. freespace struct */
+       struct xfs_agf          *agf;   /* ag. freespace structure */
+       xfs_agblock_t           agflen; /* native ag. freespace length */
+       int                     sblock_ok = 1; /* block passes checks */
+       mp = cur->bc_mp;
+       agbp = cur->bc_private.a.agbp;
+       agf = XFS_BUF_TO_AGF(agbp);
+       agflen = be32_to_cpu(agf->agf_length);
+       if (xfs_sb_version_hascrc(&mp->m_sb)) {
+               sblock_ok = sblock_ok &&
+                       uuid_equal(&block->bb_u.s.bb_uuid, &mp->m_sb.sb_uuid) &&
+                       block->bb_u.s.bb_blkno == cpu_to_be64(
+                               bp ? bp->b_bn : XFS_BUF_DADDR_NULL);
+       }
+       sblock_ok = sblock_ok &&
+               be32_to_cpu(block->bb_magic) == xfs_btree_magic(cur) &&
+               be16_to_cpu(block->bb_level) == level &&
+               be16_to_cpu(block->bb_numrecs) <=
+                       cur->bc_ops->get_maxrecs(cur, level) &&
+               (block->bb_u.s.bb_leftsib == cpu_to_be32(NULLAGBLOCK) ||
+                be32_to_cpu(block->bb_u.s.bb_leftsib) < agflen) &&
+               block->bb_u.s.bb_leftsib &&
+               (block->bb_u.s.bb_rightsib == cpu_to_be32(NULLAGBLOCK) ||
+                be32_to_cpu(block->bb_u.s.bb_rightsib) < agflen) &&
+               block->bb_u.s.bb_rightsib;
+       if (unlikely(XFS_TEST_ERROR(!sblock_ok, mp,
+                       XFS_ERRTAG_BTREE_CHECK_SBLOCK,
+                       XFS_RANDOM_BTREE_CHECK_SBLOCK))) {
+               if (bp)
+                       trace_xfs_btree_corrupt(bp, _RET_IP_);
+               XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, mp);
+               return -EFSCORRUPTED;
+       }
+       return 0;
+ }
+ /*
+  * Debug routine: check that block header is ok.
+  */
+ int
+ xfs_btree_check_block(
+       struct xfs_btree_cur    *cur,   /* btree cursor */
+       struct xfs_btree_block  *block, /* generic btree block pointer */
+       int                     level,  /* level of the btree block */
+       struct xfs_buf          *bp)    /* buffer containing block, if any */
+ {
+       if (cur->bc_flags & XFS_BTREE_LONG_PTRS)
+               return xfs_btree_check_lblock(cur, block, level, bp);
+       else
+               return xfs_btree_check_sblock(cur, block, level, bp);
+ }
+ /*
+  * Check that (long) pointer is ok.
+  */
+ int                                   /* error (0 or EFSCORRUPTED) */
+ xfs_btree_check_lptr(
+       struct xfs_btree_cur    *cur,   /* btree cursor */
+       xfs_dfsbno_t            bno,    /* btree block disk address */
+       int                     level)  /* btree block level */
+ {
+       XFS_WANT_CORRUPTED_RETURN(
+               level > 0 &&
+               bno != NULLDFSBNO &&
+               XFS_FSB_SANITY_CHECK(cur->bc_mp, bno));
+       return 0;
+ }
+ #ifdef DEBUG
+ /*
+  * Check that (short) pointer is ok.
+  */
+ STATIC int                            /* error (0 or EFSCORRUPTED) */
+ xfs_btree_check_sptr(
+       struct xfs_btree_cur    *cur,   /* btree cursor */
+       xfs_agblock_t           bno,    /* btree block disk address */
+       int                     level)  /* btree block level */
+ {
+       xfs_agblock_t           agblocks = cur->bc_mp->m_sb.sb_agblocks;
+       XFS_WANT_CORRUPTED_RETURN(
+               level > 0 &&
+               bno != NULLAGBLOCK &&
+               bno != 0 &&
+               bno < agblocks);
+       return 0;
+ }
+ /*
+  * Check that block ptr is ok.
+  */
+ STATIC int                            /* error (0 or EFSCORRUPTED) */
+ xfs_btree_check_ptr(
+       struct xfs_btree_cur    *cur,   /* btree cursor */
+       union xfs_btree_ptr     *ptr,   /* btree block disk address */
+       int                     index,  /* offset from ptr to check */
+       int                     level)  /* btree block level */
+ {
+       if (cur->bc_flags & XFS_BTREE_LONG_PTRS) {
+               return xfs_btree_check_lptr(cur,
+                               be64_to_cpu((&ptr->l)[index]), level);
+       } else {
+               return xfs_btree_check_sptr(cur,
+                               be32_to_cpu((&ptr->s)[index]), level);
+       }
+ }
+ #endif
+ /*
+  * Calculate CRC on the whole btree block and stuff it into the
+  * long-form btree header.
+  *
+  * Prior to calculting the CRC, pull the LSN out of the buffer log item and put
+  * it into the buffer so recovery knows what the last modifcation was that made
+  * it to disk.
+  */
+ void
+ xfs_btree_lblock_calc_crc(
+       struct xfs_buf          *bp)
+ {
+       struct xfs_btree_block  *block = XFS_BUF_TO_BLOCK(bp);
+       struct xfs_buf_log_item *bip = bp->b_fspriv;
+       if (!xfs_sb_version_hascrc(&bp->b_target->bt_mount->m_sb))
+               return;
+       if (bip)
+               block->bb_u.l.bb_lsn = cpu_to_be64(bip->bli_item.li_lsn);
+       xfs_buf_update_cksum(bp, XFS_BTREE_LBLOCK_CRC_OFF);
+ }
+ bool
+ xfs_btree_lblock_verify_crc(
+       struct xfs_buf          *bp)
+ {
+       if (xfs_sb_version_hascrc(&bp->b_target->bt_mount->m_sb))
+               return xfs_buf_verify_cksum(bp, XFS_BTREE_LBLOCK_CRC_OFF);
+       return true;
+ }
+ /*
+  * Calculate CRC on the whole btree block and stuff it into the
+  * short-form btree header.
+  *
+  * Prior to calculting the CRC, pull the LSN out of the buffer log item and put
+  * it into the buffer so recovery knows what the last modifcation was that made
+  * it to disk.
+  */
+ void
+ xfs_btree_sblock_calc_crc(
+       struct xfs_buf          *bp)
+ {
+       struct xfs_btree_block  *block = XFS_BUF_TO_BLOCK(bp);
+       struct xfs_buf_log_item *bip = bp->b_fspriv;
+       if (!xfs_sb_version_hascrc(&bp->b_target->bt_mount->m_sb))
+               return;
+       if (bip)
+               block->bb_u.s.bb_lsn = cpu_to_be64(bip->bli_item.li_lsn);
+       xfs_buf_update_cksum(bp, XFS_BTREE_SBLOCK_CRC_OFF);
+ }
+ bool
+ xfs_btree_sblock_verify_crc(
+       struct xfs_buf          *bp)
+ {
+       if (xfs_sb_version_hascrc(&bp->b_target->bt_mount->m_sb))
+               return xfs_buf_verify_cksum(bp, XFS_BTREE_SBLOCK_CRC_OFF);
+       return true;
+ }
+ /*
+  * Delete the btree cursor.
+  */
+ void
+ xfs_btree_del_cursor(
+       xfs_btree_cur_t *cur,           /* btree cursor */
+       int             error)          /* del because of error */
+ {
+       int             i;              /* btree level */
+       /*
+        * Clear the buffer pointers, and release the buffers.
+        * If we're doing this in the face of an error, we
+        * need to make sure to inspect all of the entries
+        * in the bc_bufs array for buffers to be unlocked.
+        * This is because some of the btree code works from
+        * level n down to 0, and if we get an error along
+        * the way we won't have initialized all the entries
+        * down to 0.
+        */
+       for (i = 0; i < cur->bc_nlevels; i++) {
+               if (cur->bc_bufs[i])
+                       xfs_trans_brelse(cur->bc_tp, cur->bc_bufs[i]);
+               else if (!error)
+                       break;
+       }
+       /*
+        * Can't free a bmap cursor without having dealt with the
+        * allocated indirect blocks' accounting.
+        */
+       ASSERT(cur->bc_btnum != XFS_BTNUM_BMAP ||
+              cur->bc_private.b.allocated == 0);
+       /*
+        * Free the cursor.
+        */
+       kmem_zone_free(xfs_btree_cur_zone, cur);
+ }
+ /*
+  * Duplicate the btree cursor.
+  * Allocate a new one, copy the record, re-get the buffers.
+  */
+ int                                   /* error */
+ xfs_btree_dup_cursor(
+       xfs_btree_cur_t *cur,           /* input cursor */
+       xfs_btree_cur_t **ncur)         /* output cursor */
+ {
+       xfs_buf_t       *bp;            /* btree block's buffer pointer */
+       int             error;          /* error return value */
+       int             i;              /* level number of btree block */
+       xfs_mount_t     *mp;            /* mount structure for filesystem */
+       xfs_btree_cur_t *new;           /* new cursor value */
+       xfs_trans_t     *tp;            /* transaction pointer, can be NULL */
+       tp = cur->bc_tp;
+       mp = cur->bc_mp;
+       /*
+        * Allocate a new cursor like the old one.
+        */
+       new = cur->bc_ops->dup_cursor(cur);
+       /*
+        * Copy the record currently in the cursor.
+        */
+       new->bc_rec = cur->bc_rec;
+       /*
+        * For each level current, re-get the buffer and copy the ptr value.
+        */
+       for (i = 0; i < new->bc_nlevels; i++) {
+               new->bc_ptrs[i] = cur->bc_ptrs[i];
+               new->bc_ra[i] = cur->bc_ra[i];
+               bp = cur->bc_bufs[i];
+               if (bp) {
+                       error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp,
+                                                  XFS_BUF_ADDR(bp), mp->m_bsize,
+                                                  0, &bp,
+                                                  cur->bc_ops->buf_ops);
+                       if (error) {
+                               xfs_btree_del_cursor(new, error);
+                               *ncur = NULL;
+                               return error;
+                       }
+               }
+               new->bc_bufs[i] = bp;
+       }
+       *ncur = new;
+       return 0;
+ }
+ /*
+  * XFS btree block layout and addressing:
+  *
+  * There are two types of blocks in the btree: leaf and non-leaf blocks.
+  *
+  * The leaf record start with a header then followed by records containing
+  * the values.  A non-leaf block also starts with the same header, and
+  * then first contains lookup keys followed by an equal number of pointers
+  * to the btree blocks at the previous level.
+  *
+  *            +--------+-------+-------+-------+-------+-------+-------+
+  * Leaf:      | header | rec 1 | rec 2 | rec 3 | rec 4 | rec 5 | rec N |
+  *            +--------+-------+-------+-------+-------+-------+-------+
+  *
+  *            +--------+-------+-------+-------+-------+-------+-------+
+  * Non-Leaf:  | header | key 1 | key 2 | key N | ptr 1 | ptr 2 | ptr N |
+  *            +--------+-------+-------+-------+-------+-------+-------+
+  *
+  * The header is called struct xfs_btree_block for reasons better left unknown
+  * and comes in different versions for short (32bit) and long (64bit) block
+  * pointers.  The record and key structures are defined by the btree instances
+  * and opaque to the btree core.  The block pointers are simple disk endian
+  * integers, available in a short (32bit) and long (64bit) variant.
+  *
+  * The helpers below calculate the offset of a given record, key or pointer
+  * into a btree block (xfs_btree_*_offset) or return a pointer to the given
+  * record, key or pointer (xfs_btree_*_addr).  Note that all addressing
+  * inside the btree block is done using indices starting at one, not zero!
+  */
+ /*
+  * Return size of the btree block header for this btree instance.
+  */
+ static inline size_t xfs_btree_block_len(struct xfs_btree_cur *cur)
+ {
+       if (cur->bc_flags & XFS_BTREE_LONG_PTRS) {
+               if (cur->bc_flags & XFS_BTREE_CRC_BLOCKS)
+                       return XFS_BTREE_LBLOCK_CRC_LEN;
+               return XFS_BTREE_LBLOCK_LEN;
+       }
+       if (cur->bc_flags & XFS_BTREE_CRC_BLOCKS)
+               return XFS_BTREE_SBLOCK_CRC_LEN;
+       return XFS_BTREE_SBLOCK_LEN;
+ }
+ /*
+  * Return size of btree block pointers for this btree instance.
+  */
+ static inline size_t xfs_btree_ptr_len(struct xfs_btree_cur *cur)
+ {
+       return (cur->bc_flags & XFS_BTREE_LONG_PTRS) ?
+               sizeof(__be64) : sizeof(__be32);
+ }
+ /*
+  * Calculate offset of the n-th record in a btree block.
+  */
+ STATIC size_t
+ xfs_btree_rec_offset(
+       struct xfs_btree_cur    *cur,
+       int                     n)
+ {
+       return xfs_btree_block_len(cur) +
+               (n - 1) * cur->bc_ops->rec_len;
+ }
+ /*
+  * Calculate offset of the n-th key in a btree block.
+  */
+ STATIC size_t
+ xfs_btree_key_offset(
+       struct xfs_btree_cur    *cur,
+       int                     n)
+ {
+       return xfs_btree_block_len(cur) +
+               (n - 1) * cur->bc_ops->key_len;
+ }
+ /*
+  * Calculate offset of the n-th block pointer in a btree block.
+  */
+ STATIC size_t
+ xfs_btree_ptr_offset(
+       struct xfs_btree_cur    *cur,
+       int                     n,
+       int                     level)
+ {
+       return xfs_btree_block_len(cur) +
+               cur->bc_ops->get_maxrecs(cur, level) * cur->bc_ops->key_len +
+               (n - 1) * xfs_btree_ptr_len(cur);
+ }
+ /*
+  * Return a pointer to the n-th record in the btree block.
+  */
+ STATIC union xfs_btree_rec *
+ xfs_btree_rec_addr(
+       struct xfs_btree_cur    *cur,
+       int                     n,
+       struct xfs_btree_block  *block)
+ {
+       return (union xfs_btree_rec *)
+               ((char *)block + xfs_btree_rec_offset(cur, n));
+ }
+ /*
+  * Return a pointer to the n-th key in the btree block.
+  */
+ STATIC union xfs_btree_key *
+ xfs_btree_key_addr(
+       struct xfs_btree_cur    *cur,
+       int                     n,
+       struct xfs_btree_block  *block)
+ {
+       return (union xfs_btree_key *)
+               ((char *)block + xfs_btree_key_offset(cur, n));
+ }
+ /*
+  * Return a pointer to the n-th block pointer in the btree block.
+  */
+ STATIC union xfs_btree_ptr *
+ xfs_btree_ptr_addr(
+       struct xfs_btree_cur    *cur,
+       int                     n,
+       struct xfs_btree_block  *block)
+ {
+       int                     level = xfs_btree_get_level(block);
+       ASSERT(block->bb_level != 0);
+       return (union xfs_btree_ptr *)
+               ((char *)block + xfs_btree_ptr_offset(cur, n, level));
+ }
+ /*
+  * Get the root block which is stored in the inode.
+  *
+  * For now this btree implementation assumes the btree root is always
+  * stored in the if_broot field of an inode fork.
+  */
+ STATIC struct xfs_btree_block *
+ xfs_btree_get_iroot(
+        struct xfs_btree_cur    *cur)
+ {
+        struct xfs_ifork        *ifp;
+        ifp = XFS_IFORK_PTR(cur->bc_private.b.ip, cur->bc_private.b.whichfork);
+        return (struct xfs_btree_block *)ifp->if_broot;
+ }
+ /*
+  * Retrieve the block pointer from the cursor at the given level.
+  * This may be an inode btree root or from a buffer.
+  */
+ STATIC struct xfs_btree_block *               /* generic btree block pointer */
+ xfs_btree_get_block(
+       struct xfs_btree_cur    *cur,   /* btree cursor */
+       int                     level,  /* level in btree */
+       struct xfs_buf          **bpp)  /* buffer containing the block */
+ {
+       if ((cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) &&
+           (level == cur->bc_nlevels - 1)) {
+               *bpp = NULL;
+               return xfs_btree_get_iroot(cur);
+       }
+       *bpp = cur->bc_bufs[level];
+       return XFS_BUF_TO_BLOCK(*bpp);
+ }
+ /*
+  * Get a buffer for the block, return it with no data read.
+  * Long-form addressing.
+  */
+ xfs_buf_t *                           /* buffer for fsbno */
+ xfs_btree_get_bufl(
+       xfs_mount_t     *mp,            /* file system mount point */
+       xfs_trans_t     *tp,            /* transaction pointer */
+       xfs_fsblock_t   fsbno,          /* file system block number */
+       uint            lock)           /* lock flags for get_buf */
+ {
+       xfs_daddr_t             d;              /* real disk block address */
+       ASSERT(fsbno != NULLFSBLOCK);
+       d = XFS_FSB_TO_DADDR(mp, fsbno);
+       return xfs_trans_get_buf(tp, mp->m_ddev_targp, d, mp->m_bsize, lock);
+ }
+ /*
+  * Get a buffer for the block, return it with no data read.
+  * Short-form addressing.
+  */
+ xfs_buf_t *                           /* buffer for agno/agbno */
+ xfs_btree_get_bufs(
+       xfs_mount_t     *mp,            /* file system mount point */
+       xfs_trans_t     *tp,            /* transaction pointer */
+       xfs_agnumber_t  agno,           /* allocation group number */
+       xfs_agblock_t   agbno,          /* allocation group block number */
+       uint            lock)           /* lock flags for get_buf */
+ {
+       xfs_daddr_t             d;              /* real disk block address */
+       ASSERT(agno != NULLAGNUMBER);
+       ASSERT(agbno != NULLAGBLOCK);
+       d = XFS_AGB_TO_DADDR(mp, agno, agbno);
+       return xfs_trans_get_buf(tp, mp->m_ddev_targp, d, mp->m_bsize, lock);
+ }
+ /*
+  * Check for the cursor referring to the last block at the given level.
+  */
+ int                                   /* 1=is last block, 0=not last block */
+ xfs_btree_islastblock(
+       xfs_btree_cur_t         *cur,   /* btree cursor */
+       int                     level)  /* level to check */
+ {
+       struct xfs_btree_block  *block; /* generic btree block pointer */
+       xfs_buf_t               *bp;    /* buffer containing block */
+       block = xfs_btree_get_block(cur, level, &bp);
+       xfs_btree_check_block(cur, block, level, bp);
+       if (cur->bc_flags & XFS_BTREE_LONG_PTRS)
+               return block->bb_u.l.bb_rightsib == cpu_to_be64(NULLDFSBNO);
+       else
+               return block->bb_u.s.bb_rightsib == cpu_to_be32(NULLAGBLOCK);
+ }
+ /*
+  * Change the cursor to point to the first record at the given level.
+  * Other levels are unaffected.
+  */
+ STATIC int                            /* success=1, failure=0 */
+ xfs_btree_firstrec(
+       xfs_btree_cur_t         *cur,   /* btree cursor */
+       int                     level)  /* level to change */
+ {
+       struct xfs_btree_block  *block; /* generic btree block pointer */
+       xfs_buf_t               *bp;    /* buffer containing block */
+       /*
+        * Get the block pointer for this level.
+        */
+       block = xfs_btree_get_block(cur, level, &bp);
+       xfs_btree_check_block(cur, block, level, bp);
+       /*
+        * It's empty, there is no such record.
+        */
+       if (!block->bb_numrecs)
+               return 0;
+       /*
+        * Set the ptr value to 1, that's the first record/key.
+        */
+       cur->bc_ptrs[level] = 1;
+       return 1;
+ }
+ /*
+  * Change the cursor to point to the last record in the current block
+  * at the given level.  Other levels are unaffected.
+  */
+ STATIC int                            /* success=1, failure=0 */
+ xfs_btree_lastrec(
+       xfs_btree_cur_t         *cur,   /* btree cursor */
+       int                     level)  /* level to change */
+ {
+       struct xfs_btree_block  *block; /* generic btree block pointer */
+       xfs_buf_t               *bp;    /* buffer containing block */
+       /*
+        * Get the block pointer for this level.
+        */
+       block = xfs_btree_get_block(cur, level, &bp);
+       xfs_btree_check_block(cur, block, level, bp);
+       /*
+        * It's empty, there is no such record.
+        */
+       if (!block->bb_numrecs)
+               return 0;
+       /*
+        * Set the ptr value to numrecs, that's the last record/key.
+        */
+       cur->bc_ptrs[level] = be16_to_cpu(block->bb_numrecs);
+       return 1;
+ }
+ /*
+  * Compute first and last byte offsets for the fields given.
+  * Interprets the offsets table, which contains struct field offsets.
+  */
+ void
+ xfs_btree_offsets(
+       __int64_t       fields,         /* bitmask of fields */
+       const short     *offsets,       /* table of field offsets */
+       int             nbits,          /* number of bits to inspect */
+       int             *first,         /* output: first byte offset */
+       int             *last)          /* output: last byte offset */
+ {
+       int             i;              /* current bit number */
+       __int64_t       imask;          /* mask for current bit number */
+       ASSERT(fields != 0);
+       /*
+        * Find the lowest bit, so the first byte offset.
+        */
+       for (i = 0, imask = 1LL; ; i++, imask <<= 1) {
+               if (imask & fields) {
+                       *first = offsets[i];
+                       break;
+               }
+       }
+       /*
+        * Find the highest bit, so the last byte offset.
+        */
+       for (i = nbits - 1, imask = 1LL << i; ; i--, imask >>= 1) {
+               if (imask & fields) {
+                       *last = offsets[i + 1] - 1;
+                       break;
+               }
+       }
+ }
+ /*
+  * Get a buffer for the block, return it read in.
+  * Long-form addressing.
+  */
+ int
+ xfs_btree_read_bufl(
+       struct xfs_mount        *mp,            /* file system mount point */
+       struct xfs_trans        *tp,            /* transaction pointer */
+       xfs_fsblock_t           fsbno,          /* file system block number */
+       uint                    lock,           /* lock flags for read_buf */
+       struct xfs_buf          **bpp,          /* buffer for fsbno */
+       int                     refval,         /* ref count value for buffer */
+       const struct xfs_buf_ops *ops)
+ {
+       struct xfs_buf          *bp;            /* return value */
+       xfs_daddr_t             d;              /* real disk block address */
+       int                     error;
+       ASSERT(fsbno != NULLFSBLOCK);
+       d = XFS_FSB_TO_DADDR(mp, fsbno);
+       error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, d,
+                                  mp->m_bsize, lock, &bp, ops);
+       if (error)
+               return error;
+       if (bp)
+               xfs_buf_set_ref(bp, refval);
+       *bpp = bp;
+       return 0;
+ }
+ /*
+  * Read-ahead the block, don't wait for it, don't return a buffer.
+  * Long-form addressing.
+  */
+ /* ARGSUSED */
+ void
+ xfs_btree_reada_bufl(
+       struct xfs_mount        *mp,            /* file system mount point */
+       xfs_fsblock_t           fsbno,          /* file system block number */
+       xfs_extlen_t            count,          /* count of filesystem blocks */
+       const struct xfs_buf_ops *ops)
+ {
+       xfs_daddr_t             d;
+       ASSERT(fsbno != NULLFSBLOCK);
+       d = XFS_FSB_TO_DADDR(mp, fsbno);
+       xfs_buf_readahead(mp->m_ddev_targp, d, mp->m_bsize * count, ops);
+ }
+ /*
+  * Read-ahead the block, don't wait for it, don't return a buffer.
+  * Short-form addressing.
+  */
+ /* ARGSUSED */
+ void
+ xfs_btree_reada_bufs(
+       struct xfs_mount        *mp,            /* file system mount point */
+       xfs_agnumber_t          agno,           /* allocation group number */
+       xfs_agblock_t           agbno,          /* allocation group block number */
+       xfs_extlen_t            count,          /* count of filesystem blocks */
+       const struct xfs_buf_ops *ops)
+ {
+       xfs_daddr_t             d;
+       ASSERT(agno != NULLAGNUMBER);
+       ASSERT(agbno != NULLAGBLOCK);
+       d = XFS_AGB_TO_DADDR(mp, agno, agbno);
+       xfs_buf_readahead(mp->m_ddev_targp, d, mp->m_bsize * count, ops);
+ }
+ STATIC int
+ xfs_btree_readahead_lblock(
+       struct xfs_btree_cur    *cur,
+       int                     lr,
+       struct xfs_btree_block  *block)
+ {
+       int                     rval = 0;
+       xfs_dfsbno_t            left = be64_to_cpu(block->bb_u.l.bb_leftsib);
+       xfs_dfsbno_t            right = be64_to_cpu(block->bb_u.l.bb_rightsib);
+       if ((lr & XFS_BTCUR_LEFTRA) && left != NULLDFSBNO) {
+               xfs_btree_reada_bufl(cur->bc_mp, left, 1,
+                                    cur->bc_ops->buf_ops);
+               rval++;
+       }
+       if ((lr & XFS_BTCUR_RIGHTRA) && right != NULLDFSBNO) {
+               xfs_btree_reada_bufl(cur->bc_mp, right, 1,
+                                    cur->bc_ops->buf_ops);
+               rval++;
+       }
+       return rval;
+ }
+ STATIC int
+ xfs_btree_readahead_sblock(
+       struct xfs_btree_cur    *cur,
+       int                     lr,
+       struct xfs_btree_block *block)
+ {
+       int                     rval = 0;
+       xfs_agblock_t           left = be32_to_cpu(block->bb_u.s.bb_leftsib);
+       xfs_agblock_t           right = be32_to_cpu(block->bb_u.s.bb_rightsib);
+       if ((lr & XFS_BTCUR_LEFTRA) && left != NULLAGBLOCK) {
+               xfs_btree_reada_bufs(cur->bc_mp, cur->bc_private.a.agno,
+                                    left, 1, cur->bc_ops->buf_ops);
+               rval++;
+       }
+       if ((lr & XFS_BTCUR_RIGHTRA) && right != NULLAGBLOCK) {
+               xfs_btree_reada_bufs(cur->bc_mp, cur->bc_private.a.agno,
+                                    right, 1, cur->bc_ops->buf_ops);
+               rval++;
+       }
+       return rval;
+ }
+ /*
+  * Read-ahead btree blocks, at the given level.
+  * Bits in lr are set from XFS_BTCUR_{LEFT,RIGHT}RA.
+  */
+ STATIC int
+ xfs_btree_readahead(
+       struct xfs_btree_cur    *cur,           /* btree cursor */
+       int                     lev,            /* level in btree */
+       int                     lr)             /* left/right bits */
+ {
+       struct xfs_btree_block  *block;
+       /*
+        * No readahead needed if we are at the root level and the
+        * btree root is stored in the inode.
+        */
+       if ((cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) &&
+           (lev == cur->bc_nlevels - 1))
+               return 0;
+       if ((cur->bc_ra[lev] | lr) == cur->bc_ra[lev])
+               return 0;
+       cur->bc_ra[lev] |= lr;
+       block = XFS_BUF_TO_BLOCK(cur->bc_bufs[lev]);
+       if (cur->bc_flags & XFS_BTREE_LONG_PTRS)
+               return xfs_btree_readahead_lblock(cur, lr, block);
+       return xfs_btree_readahead_sblock(cur, lr, block);
+ }
+ STATIC xfs_daddr_t
+ xfs_btree_ptr_to_daddr(
+       struct xfs_btree_cur    *cur,
+       union xfs_btree_ptr     *ptr)
+ {
+       if (cur->bc_flags & XFS_BTREE_LONG_PTRS) {
+               ASSERT(ptr->l != cpu_to_be64(NULLDFSBNO));
+               return XFS_FSB_TO_DADDR(cur->bc_mp, be64_to_cpu(ptr->l));
+       } else {
+               ASSERT(cur->bc_private.a.agno != NULLAGNUMBER);
+               ASSERT(ptr->s != cpu_to_be32(NULLAGBLOCK));
+               return XFS_AGB_TO_DADDR(cur->bc_mp, cur->bc_private.a.agno,
+                                       be32_to_cpu(ptr->s));
+       }
+ }
+ /*
+  * Readahead @count btree blocks at the given @ptr location.
+  *
+  * We don't need to care about long or short form btrees here as we have a
+  * method of converting the ptr directly to a daddr available to us.
+  */
+ STATIC void
+ xfs_btree_readahead_ptr(
+       struct xfs_btree_cur    *cur,
+       union xfs_btree_ptr     *ptr,
+       xfs_extlen_t            count)
+ {
+       xfs_buf_readahead(cur->bc_mp->m_ddev_targp,
+                         xfs_btree_ptr_to_daddr(cur, ptr),
+                         cur->bc_mp->m_bsize * count, cur->bc_ops->buf_ops);
+ }
+ /*
+  * Set the buffer for level "lev" in the cursor to bp, releasing
+  * any previous buffer.
+  */
+ STATIC void
+ xfs_btree_setbuf(
+       xfs_btree_cur_t         *cur,   /* btree cursor */
+       int                     lev,    /* level in btree */
+       xfs_buf_t               *bp)    /* new buffer to set */
+ {
+       struct xfs_btree_block  *b;     /* btree block */
+       if (cur->bc_bufs[lev])
+               xfs_trans_brelse(cur->bc_tp, cur->bc_bufs[lev]);
+       cur->bc_bufs[lev] = bp;
+       cur->bc_ra[lev] = 0;
+       b = XFS_BUF_TO_BLOCK(bp);
+       if (cur->bc_flags & XFS_BTREE_LONG_PTRS) {
+               if (b->bb_u.l.bb_leftsib == cpu_to_be64(NULLDFSBNO))
+                       cur->bc_ra[lev] |= XFS_BTCUR_LEFTRA;
+               if (b->bb_u.l.bb_rightsib == cpu_to_be64(NULLDFSBNO))
+                       cur->bc_ra[lev] |= XFS_BTCUR_RIGHTRA;
+       } else {
+               if (b->bb_u.s.bb_leftsib == cpu_to_be32(NULLAGBLOCK))
+                       cur->bc_ra[lev] |= XFS_BTCUR_LEFTRA;
+               if (b->bb_u.s.bb_rightsib == cpu_to_be32(NULLAGBLOCK))
+                       cur->bc_ra[lev] |= XFS_BTCUR_RIGHTRA;
+       }
+ }
+ STATIC int
+ xfs_btree_ptr_is_null(
+       struct xfs_btree_cur    *cur,
+       union xfs_btree_ptr     *ptr)
+ {
+       if (cur->bc_flags & XFS_BTREE_LONG_PTRS)
+               return ptr->l == cpu_to_be64(NULLDFSBNO);
+       else
+               return ptr->s == cpu_to_be32(NULLAGBLOCK);
+ }
+ STATIC void
+ xfs_btree_set_ptr_null(
+       struct xfs_btree_cur    *cur,
+       union xfs_btree_ptr     *ptr)
+ {
+       if (cur->bc_flags & XFS_BTREE_LONG_PTRS)
+               ptr->l = cpu_to_be64(NULLDFSBNO);
+       else
+               ptr->s = cpu_to_be32(NULLAGBLOCK);
+ }
+ /*
+  * Get/set/init sibling pointers
+  */
+ STATIC void
+ xfs_btree_get_sibling(
+       struct xfs_btree_cur    *cur,
+       struct xfs_btree_block  *block,
+       union xfs_btree_ptr     *ptr,
+       int                     lr)
+ {
+       ASSERT(lr == XFS_BB_LEFTSIB || lr == XFS_BB_RIGHTSIB);
+       if (cur->bc_flags & XFS_BTREE_LONG_PTRS) {
+               if (lr == XFS_BB_RIGHTSIB)
+                       ptr->l = block->bb_u.l.bb_rightsib;
+               else
+                       ptr->l = block->bb_u.l.bb_leftsib;
+       } else {
+               if (lr == XFS_BB_RIGHTSIB)
+                       ptr->s = block->bb_u.s.bb_rightsib;
+               else
+                       ptr->s = block->bb_u.s.bb_leftsib;
+       }
+ }
+ STATIC void
+ xfs_btree_set_sibling(
+       struct xfs_btree_cur    *cur,
+       struct xfs_btree_block  *block,
+       union xfs_btree_ptr     *ptr,
+       int                     lr)
+ {
+       ASSERT(lr == XFS_BB_LEFTSIB || lr == XFS_BB_RIGHTSIB);
+       if (cur->bc_flags & XFS_BTREE_LONG_PTRS) {
+               if (lr == XFS_BB_RIGHTSIB)
+                       block->bb_u.l.bb_rightsib = ptr->l;
+               else
+                       block->bb_u.l.bb_leftsib = ptr->l;
+       } else {
+               if (lr == XFS_BB_RIGHTSIB)
+                       block->bb_u.s.bb_rightsib = ptr->s;
+               else
+                       block->bb_u.s.bb_leftsib = ptr->s;
+       }
+ }
+ void
+ xfs_btree_init_block_int(
+       struct xfs_mount        *mp,
+       struct xfs_btree_block  *buf,
+       xfs_daddr_t             blkno,
+       __u32                   magic,
+       __u16                   level,
+       __u16                   numrecs,
+       __u64                   owner,
+       unsigned int            flags)
+ {
+       buf->bb_magic = cpu_to_be32(magic);
+       buf->bb_level = cpu_to_be16(level);
+       buf->bb_numrecs = cpu_to_be16(numrecs);
+       if (flags & XFS_BTREE_LONG_PTRS) {
+               buf->bb_u.l.bb_leftsib = cpu_to_be64(NULLDFSBNO);
+               buf->bb_u.l.bb_rightsib = cpu_to_be64(NULLDFSBNO);
+               if (flags & XFS_BTREE_CRC_BLOCKS) {
+                       buf->bb_u.l.bb_blkno = cpu_to_be64(blkno);
+                       buf->bb_u.l.bb_owner = cpu_to_be64(owner);
+                       uuid_copy(&buf->bb_u.l.bb_uuid, &mp->m_sb.sb_uuid);
+                       buf->bb_u.l.bb_pad = 0;
+                       buf->bb_u.l.bb_lsn = 0;
+               }
+       } else {
+               /* owner is a 32 bit value on short blocks */
+               __u32 __owner = (__u32)owner;
+               buf->bb_u.s.bb_leftsib = cpu_to_be32(NULLAGBLOCK);
+               buf->bb_u.s.bb_rightsib = cpu_to_be32(NULLAGBLOCK);
+               if (flags & XFS_BTREE_CRC_BLOCKS) {
+                       buf->bb_u.s.bb_blkno = cpu_to_be64(blkno);
+                       buf->bb_u.s.bb_owner = cpu_to_be32(__owner);
+                       uuid_copy(&buf->bb_u.s.bb_uuid, &mp->m_sb.sb_uuid);
+                       buf->bb_u.s.bb_lsn = 0;
+               }
+       }
+ }
+ void
+ xfs_btree_init_block(
+       struct xfs_mount *mp,
+       struct xfs_buf  *bp,
+       __u32           magic,
+       __u16           level,
+       __u16           numrecs,
+       __u64           owner,
+       unsigned int    flags)
+ {
+       xfs_btree_init_block_int(mp, XFS_BUF_TO_BLOCK(bp), bp->b_bn,
+                                magic, level, numrecs, owner, flags);
+ }
+ STATIC void
+ xfs_btree_init_block_cur(
+       struct xfs_btree_cur    *cur,
+       struct xfs_buf          *bp,
+       int                     level,
+       int                     numrecs)
+ {
+       __u64 owner;
+       /*
+        * we can pull the owner from the cursor right now as the different
+        * owners align directly with the pointer size of the btree. This may
+        * change in future, but is safe for current users of the generic btree
+        * code.
+        */
+       if (cur->bc_flags & XFS_BTREE_LONG_PTRS)
+               owner = cur->bc_private.b.ip->i_ino;
+       else
+               owner = cur->bc_private.a.agno;
+       xfs_btree_init_block_int(cur->bc_mp, XFS_BUF_TO_BLOCK(bp), bp->b_bn,
+                                xfs_btree_magic(cur), level, numrecs,
+                                owner, cur->bc_flags);
+ }
+ /*
+  * Return true if ptr is the last record in the btree and
+  * we need to track updates to this record.  The decision
+  * will be further refined in the update_lastrec method.
+  */
+ STATIC int
+ xfs_btree_is_lastrec(
+       struct xfs_btree_cur    *cur,
+       struct xfs_btree_block  *block,
+       int                     level)
+ {
+       union xfs_btree_ptr     ptr;
+       if (level > 0)
+               return 0;
+       if (!(cur->bc_flags & XFS_BTREE_LASTREC_UPDATE))
+               return 0;
+       xfs_btree_get_sibling(cur, block, &ptr, XFS_BB_RIGHTSIB);
+       if (!xfs_btree_ptr_is_null(cur, &ptr))
+               return 0;
+       return 1;
+ }
+ STATIC void
+ xfs_btree_buf_to_ptr(
+       struct xfs_btree_cur    *cur,
+       struct xfs_buf          *bp,
+       union xfs_btree_ptr     *ptr)
+ {
+       if (cur->bc_flags & XFS_BTREE_LONG_PTRS)
+               ptr->l = cpu_to_be64(XFS_DADDR_TO_FSB(cur->bc_mp,
+                                       XFS_BUF_ADDR(bp)));
+       else {
+               ptr->s = cpu_to_be32(xfs_daddr_to_agbno(cur->bc_mp,
+                                       XFS_BUF_ADDR(bp)));
+       }
+ }
+ STATIC void
+ xfs_btree_set_refs(
+       struct xfs_btree_cur    *cur,
+       struct xfs_buf          *bp)
+ {
+       switch (cur->bc_btnum) {
+       case XFS_BTNUM_BNO:
+       case XFS_BTNUM_CNT:
+               xfs_buf_set_ref(bp, XFS_ALLOC_BTREE_REF);
+               break;
+       case XFS_BTNUM_INO:
+       case XFS_BTNUM_FINO:
+               xfs_buf_set_ref(bp, XFS_INO_BTREE_REF);
+               break;
+       case XFS_BTNUM_BMAP:
+               xfs_buf_set_ref(bp, XFS_BMAP_BTREE_REF);
+               break;
+       default:
+               ASSERT(0);
+       }
+ }
+ STATIC int
+ xfs_btree_get_buf_block(
+       struct xfs_btree_cur    *cur,
+       union xfs_btree_ptr     *ptr,
+       int                     flags,
+       struct xfs_btree_block  **block,
+       struct xfs_buf          **bpp)
+ {
+       struct xfs_mount        *mp = cur->bc_mp;
+       xfs_daddr_t             d;
+       /* need to sort out how callers deal with failures first */
+       ASSERT(!(flags & XBF_TRYLOCK));
+       d = xfs_btree_ptr_to_daddr(cur, ptr);
+       *bpp = xfs_trans_get_buf(cur->bc_tp, mp->m_ddev_targp, d,
+                                mp->m_bsize, flags);
+       if (!*bpp)
+               return -ENOMEM;
+       (*bpp)->b_ops = cur->bc_ops->buf_ops;
+       *block = XFS_BUF_TO_BLOCK(*bpp);
+       return 0;
+ }
+ /*
+  * Read in the buffer at the given ptr and return the buffer and
+  * the block pointer within the buffer.
+  */
+ STATIC int
+ xfs_btree_read_buf_block(
+       struct xfs_btree_cur    *cur,
+       union xfs_btree_ptr     *ptr,
+       int                     flags,
+       struct xfs_btree_block  **block,
+       struct xfs_buf          **bpp)
+ {
+       struct xfs_mount        *mp = cur->bc_mp;
+       xfs_daddr_t             d;
+       int                     error;
+       /* need to sort out how callers deal with failures first */
+       ASSERT(!(flags & XBF_TRYLOCK));
+       d = xfs_btree_ptr_to_daddr(cur, ptr);
+       error = xfs_trans_read_buf(mp, cur->bc_tp, mp->m_ddev_targp, d,
+                                  mp->m_bsize, flags, bpp,
+                                  cur->bc_ops->buf_ops);
+       if (error)
+               return error;
+       xfs_btree_set_refs(cur, *bpp);
+       *block = XFS_BUF_TO_BLOCK(*bpp);
+       return 0;
+ }
+ /*
+  * Copy keys from one btree block to another.
+  */
+ STATIC void
+ xfs_btree_copy_keys(
+       struct xfs_btree_cur    *cur,
+       union xfs_btree_key     *dst_key,
+       union xfs_btree_key     *src_key,
+       int                     numkeys)
+ {
+       ASSERT(numkeys >= 0);
+       memcpy(dst_key, src_key, numkeys * cur->bc_ops->key_len);
+ }
+ /*
+  * Copy records from one btree block to another.
+  */
+ STATIC void
+ xfs_btree_copy_recs(
+       struct xfs_btree_cur    *cur,
+       union xfs_btree_rec     *dst_rec,
+       union xfs_btree_rec     *src_rec,
+       int                     numrecs)
+ {
+       ASSERT(numrecs >= 0);
+       memcpy(dst_rec, src_rec, numrecs * cur->bc_ops->rec_len);
+ }
+ /*
+  * Copy block pointers from one btree block to another.
+  */
+ STATIC void
+ xfs_btree_copy_ptrs(
+       struct xfs_btree_cur    *cur,
+       union xfs_btree_ptr     *dst_ptr,
+       union xfs_btree_ptr     *src_ptr,
+       int                     numptrs)
+ {
+       ASSERT(numptrs >= 0);
+       memcpy(dst_ptr, src_ptr, numptrs * xfs_btree_ptr_len(cur));
+ }
+ /*
+  * Shift keys one index left/right inside a single btree block.
+  */
+ STATIC void
+ xfs_btree_shift_keys(
+       struct xfs_btree_cur    *cur,
+       union xfs_btree_key     *key,
+       int                     dir,
+       int                     numkeys)
+ {
+       char                    *dst_key;
+       ASSERT(numkeys >= 0);
+       ASSERT(dir == 1 || dir == -1);
+       dst_key = (char *)key + (dir * cur->bc_ops->key_len);
+       memmove(dst_key, key, numkeys * cur->bc_ops->key_len);
+ }
+ /*
+  * Shift records one index left/right inside a single btree block.
+  */
+ STATIC void
+ xfs_btree_shift_recs(
+       struct xfs_btree_cur    *cur,
+       union xfs_btree_rec     *rec,
+       int                     dir,
+       int                     numrecs)
+ {
+       char                    *dst_rec;
+       ASSERT(numrecs >= 0);
+       ASSERT(dir == 1 || dir == -1);
+       dst_rec = (char *)rec + (dir * cur->bc_ops->rec_len);
+       memmove(dst_rec, rec, numrecs * cur->bc_ops->rec_len);
+ }
+ /*
+  * Shift block pointers one index left/right inside a single btree block.
+  */
+ STATIC void
+ xfs_btree_shift_ptrs(
+       struct xfs_btree_cur    *cur,
+       union xfs_btree_ptr     *ptr,
+       int                     dir,
+       int                     numptrs)
+ {
+       char                    *dst_ptr;
+       ASSERT(numptrs >= 0);
+       ASSERT(dir == 1 || dir == -1);
+       dst_ptr = (char *)ptr + (dir * xfs_btree_ptr_len(cur));
+       memmove(dst_ptr, ptr, numptrs * xfs_btree_ptr_len(cur));
+ }
+ /*
+  * Log key values from the btree block.
+  */
+ STATIC void
+ xfs_btree_log_keys(
+       struct xfs_btree_cur    *cur,
+       struct xfs_buf          *bp,
+       int                     first,
+       int                     last)
+ {
+       XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY);
+       XFS_BTREE_TRACE_ARGBII(cur, bp, first, last);
+       if (bp) {
+               xfs_trans_buf_set_type(cur->bc_tp, bp, XFS_BLFT_BTREE_BUF);
+               xfs_trans_log_buf(cur->bc_tp, bp,
+                                 xfs_btree_key_offset(cur, first),
+                                 xfs_btree_key_offset(cur, last + 1) - 1);
+       } else {
+               xfs_trans_log_inode(cur->bc_tp, cur->bc_private.b.ip,
+                               xfs_ilog_fbroot(cur->bc_private.b.whichfork));
+       }
+       XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
+ }
+ /*
+  * Log record values from the btree block.
+  */
+ void
+ xfs_btree_log_recs(
+       struct xfs_btree_cur    *cur,
+       struct xfs_buf          *bp,
+       int                     first,
+       int                     last)
+ {
+       XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY);
+       XFS_BTREE_TRACE_ARGBII(cur, bp, first, last);
+       xfs_trans_buf_set_type(cur->bc_tp, bp, XFS_BLFT_BTREE_BUF);
+       xfs_trans_log_buf(cur->bc_tp, bp,
+                         xfs_btree_rec_offset(cur, first),
+                         xfs_btree_rec_offset(cur, last + 1) - 1);
+       XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
+ }
+ /*
+  * Log block pointer fields from a btree block (nonleaf).
+  */
+ STATIC void
+ xfs_btree_log_ptrs(
+       struct xfs_btree_cur    *cur,   /* btree cursor */
+       struct xfs_buf          *bp,    /* buffer containing btree block */
+       int                     first,  /* index of first pointer to log */
+       int                     last)   /* index of last pointer to log */
+ {
+       XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY);
+       XFS_BTREE_TRACE_ARGBII(cur, bp, first, last);
+       if (bp) {
+               struct xfs_btree_block  *block = XFS_BUF_TO_BLOCK(bp);
+               int                     level = xfs_btree_get_level(block);
+               xfs_trans_buf_set_type(cur->bc_tp, bp, XFS_BLFT_BTREE_BUF);
+               xfs_trans_log_buf(cur->bc_tp, bp,
+                               xfs_btree_ptr_offset(cur, first, level),
+                               xfs_btree_ptr_offset(cur, last + 1, level) - 1);
+       } else {
+               xfs_trans_log_inode(cur->bc_tp, cur->bc_private.b.ip,
+                       xfs_ilog_fbroot(cur->bc_private.b.whichfork));
+       }
+       XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
+ }
+ /*
+  * Log fields from a btree block header.
+  */
+ void
+ xfs_btree_log_block(
+       struct xfs_btree_cur    *cur,   /* btree cursor */
+       struct xfs_buf          *bp,    /* buffer containing btree block */
+       int                     fields) /* mask of fields: XFS_BB_... */
+ {
+       int                     first;  /* first byte offset logged */
+       int                     last;   /* last byte offset logged */
+       static const short      soffsets[] = {  /* table of offsets (short) */
+               offsetof(struct xfs_btree_block, bb_magic),
+               offsetof(struct xfs_btree_block, bb_level),
+               offsetof(struct xfs_btree_block, bb_numrecs),
+               offsetof(struct xfs_btree_block, bb_u.s.bb_leftsib),
+               offsetof(struct xfs_btree_block, bb_u.s.bb_rightsib),
+               offsetof(struct xfs_btree_block, bb_u.s.bb_blkno),
+               offsetof(struct xfs_btree_block, bb_u.s.bb_lsn),
+               offsetof(struct xfs_btree_block, bb_u.s.bb_uuid),
+               offsetof(struct xfs_btree_block, bb_u.s.bb_owner),
+               offsetof(struct xfs_btree_block, bb_u.s.bb_crc),
+               XFS_BTREE_SBLOCK_CRC_LEN
+       };
+       static const short      loffsets[] = {  /* table of offsets (long) */
+               offsetof(struct xfs_btree_block, bb_magic),
+               offsetof(struct xfs_btree_block, bb_level),
+               offsetof(struct xfs_btree_block, bb_numrecs),
+               offsetof(struct xfs_btree_block, bb_u.l.bb_leftsib),
+               offsetof(struct xfs_btree_block, bb_u.l.bb_rightsib),
+               offsetof(struct xfs_btree_block, bb_u.l.bb_blkno),
+               offsetof(struct xfs_btree_block, bb_u.l.bb_lsn),
+               offsetof(struct xfs_btree_block, bb_u.l.bb_uuid),
+               offsetof(struct xfs_btree_block, bb_u.l.bb_owner),
+               offsetof(struct xfs_btree_block, bb_u.l.bb_crc),
+               offsetof(struct xfs_btree_block, bb_u.l.bb_pad),
+               XFS_BTREE_LBLOCK_CRC_LEN
+       };
+       XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY);
+       XFS_BTREE_TRACE_ARGBI(cur, bp, fields);
+       if (bp) {
+               int nbits;
+               if (cur->bc_flags & XFS_BTREE_CRC_BLOCKS) {
+                       /*
+                        * We don't log the CRC when updating a btree
+                        * block but instead recreate it during log
+                        * recovery.  As the log buffers have checksums
+                        * of their own this is safe and avoids logging a crc
+                        * update in a lot of places.
+                        */
+                       if (fields == XFS_BB_ALL_BITS)
+                               fields = XFS_BB_ALL_BITS_CRC;
+                       nbits = XFS_BB_NUM_BITS_CRC;
+               } else {
+                       nbits = XFS_BB_NUM_BITS;
+               }
+               xfs_btree_offsets(fields,
+                                 (cur->bc_flags & XFS_BTREE_LONG_PTRS) ?
+                                       loffsets : soffsets,
+                                 nbits, &first, &last);
+               xfs_trans_buf_set_type(cur->bc_tp, bp, XFS_BLFT_BTREE_BUF);
+               xfs_trans_log_buf(cur->bc_tp, bp, first, last);
+       } else {
+               xfs_trans_log_inode(cur->bc_tp, cur->bc_private.b.ip,
+                       xfs_ilog_fbroot(cur->bc_private.b.whichfork));
+       }
+       XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
+ }
+ /*
+  * Increment cursor by one record at the level.
+  * For nonzero levels the leaf-ward information is untouched.
+  */
+ int                                           /* error */
+ xfs_btree_increment(
+       struct xfs_btree_cur    *cur,
+       int                     level,
+       int                     *stat)          /* success/failure */
+ {
+       struct xfs_btree_block  *block;
+       union xfs_btree_ptr     ptr;
+       struct xfs_buf          *bp;
+       int                     error;          /* error return value */
+       int                     lev;
+       XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY);
+       XFS_BTREE_TRACE_ARGI(cur, level);
+       ASSERT(level < cur->bc_nlevels);
+       /* Read-ahead to the right at this level. */
+       xfs_btree_readahead(cur, level, XFS_BTCUR_RIGHTRA);
+       /* Get a pointer to the btree block. */
+       block = xfs_btree_get_block(cur, level, &bp);
+ #ifdef DEBUG
+       error = xfs_btree_check_block(cur, block, level, bp);
+       if (error)
+               goto error0;
+ #endif
+       /* We're done if we remain in the block after the increment. */
+       if (++cur->bc_ptrs[level] <= xfs_btree_get_numrecs(block))
+               goto out1;
+       /* Fail if we just went off the right edge of the tree. */
+       xfs_btree_get_sibling(cur, block, &ptr, XFS_BB_RIGHTSIB);
+       if (xfs_btree_ptr_is_null(cur, &ptr))
+               goto out0;
+       XFS_BTREE_STATS_INC(cur, increment);
+       /*
+        * March up the tree incrementing pointers.
+        * Stop when we don't go off the right edge of a block.
+        */
+       for (lev = level + 1; lev < cur->bc_nlevels; lev++) {
+               block = xfs_btree_get_block(cur, lev, &bp);
+ #ifdef DEBUG
+               error = xfs_btree_check_block(cur, block, lev, bp);
+               if (error)
+                       goto error0;
+ #endif
+               if (++cur->bc_ptrs[lev] <= xfs_btree_get_numrecs(block))
+                       break;
+               /* Read-ahead the right block for the next loop. */
+               xfs_btree_readahead(cur, lev, XFS_BTCUR_RIGHTRA);
+       }
+       /*
+        * If we went off the root then we are either seriously
+        * confused or have the tree root in an inode.
+        */
+       if (lev == cur->bc_nlevels) {
+               if (cur->bc_flags & XFS_BTREE_ROOT_IN_INODE)
+                       goto out0;
+               ASSERT(0);
+               error = -EFSCORRUPTED;
+               goto error0;
+       }
+       ASSERT(lev < cur->bc_nlevels);
+       /*
+        * Now walk back down the tree, fixing up the cursor's buffer
+        * pointers and key numbers.
+        */
+       for (block = xfs_btree_get_block(cur, lev, &bp); lev > level; ) {
+               union xfs_btree_ptr     *ptrp;
+               ptrp = xfs_btree_ptr_addr(cur, cur->bc_ptrs[lev], block);
+               --lev;
+               error = xfs_btree_read_buf_block(cur, ptrp, 0, &block, &bp);
+               if (error)
+                       goto error0;
+               xfs_btree_setbuf(cur, lev, bp);
+               cur->bc_ptrs[lev] = 1;
+       }
+ out1:
+       XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
+       *stat = 1;
+       return 0;
+ out0:
+       XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
+       *stat = 0;
+       return 0;
+ error0:
+       XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR);
+       return error;
+ }
+ /*
+  * Decrement cursor by one record at the level.
+  * For nonzero levels the leaf-ward information is untouched.
+  */
+ int                                           /* error */
+ xfs_btree_decrement(
+       struct xfs_btree_cur    *cur,
+       int                     level,
+       int                     *stat)          /* success/failure */
+ {
+       struct xfs_btree_block  *block;
+       xfs_buf_t               *bp;
+       int                     error;          /* error return value */
+       int                     lev;
+       union xfs_btree_ptr     ptr;
+       XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY);
+       XFS_BTREE_TRACE_ARGI(cur, level);
+       ASSERT(level < cur->bc_nlevels);
+       /* Read-ahead to the left at this level. */
+       xfs_btree_readahead(cur, level, XFS_BTCUR_LEFTRA);
+       /* We're done if we remain in the block after the decrement. */
+       if (--cur->bc_ptrs[level] > 0)
+               goto out1;
+       /* Get a pointer to the btree block. */
+       block = xfs_btree_get_block(cur, level, &bp);
+ #ifdef DEBUG
+       error = xfs_btree_check_block(cur, block, level, bp);
+       if (error)
+               goto error0;
+ #endif
+       /* Fail if we just went off the left edge of the tree. */
+       xfs_btree_get_sibling(cur, block, &ptr, XFS_BB_LEFTSIB);
+       if (xfs_btree_ptr_is_null(cur, &ptr))
+               goto out0;
+       XFS_BTREE_STATS_INC(cur, decrement);
+       /*
+        * March up the tree decrementing pointers.
+        * Stop when we don't go off the left edge of a block.
+        */
+       for (lev = level + 1; lev < cur->bc_nlevels; lev++) {
+               if (--cur->bc_ptrs[lev] > 0)
+                       break;
+               /* Read-ahead the left block for the next loop. */
+               xfs_btree_readahead(cur, lev, XFS_BTCUR_LEFTRA);
+       }
+       /*
+        * If we went off the root then we are seriously confused.
+        * or the root of the tree is in an inode.
+        */
+       if (lev == cur->bc_nlevels) {
+               if (cur->bc_flags & XFS_BTREE_ROOT_IN_INODE)
+                       goto out0;
+               ASSERT(0);
+               error = -EFSCORRUPTED;
+               goto error0;
+       }
+       ASSERT(lev < cur->bc_nlevels);
+       /*
+        * Now walk back down the tree, fixing up the cursor's buffer
+        * pointers and key numbers.
+        */
+       for (block = xfs_btree_get_block(cur, lev, &bp); lev > level; ) {
+               union xfs_btree_ptr     *ptrp;
+               ptrp = xfs_btree_ptr_addr(cur, cur->bc_ptrs[lev], block);
+               --lev;
+               error = xfs_btree_read_buf_block(cur, ptrp, 0, &block, &bp);
+               if (error)
+                       goto error0;
+               xfs_btree_setbuf(cur, lev, bp);
+               cur->bc_ptrs[lev] = xfs_btree_get_numrecs(block);
+       }
+ out1:
+       XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
+       *stat = 1;
+       return 0;
+ out0:
+       XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
+       *stat = 0;
+       return 0;
+ error0:
+       XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR);
+       return error;
+ }
+ STATIC int
+ xfs_btree_lookup_get_block(
+       struct xfs_btree_cur    *cur,   /* btree cursor */
+       int                     level,  /* level in the btree */
+       union xfs_btree_ptr     *pp,    /* ptr to btree block */
+       struct xfs_btree_block  **blkp) /* return btree block */
+ {
+       struct xfs_buf          *bp;    /* buffer pointer for btree block */
+       int                     error = 0;
+       /* special case the root block if in an inode */
+       if ((cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) &&
+           (level == cur->bc_nlevels - 1)) {
+               *blkp = xfs_btree_get_iroot(cur);
+               return 0;
+       }
+       /*
+        * If the old buffer at this level for the disk address we are
+        * looking for re-use it.
+        *
+        * Otherwise throw it away and get a new one.
+        */
+       bp = cur->bc_bufs[level];
+       if (bp && XFS_BUF_ADDR(bp) == xfs_btree_ptr_to_daddr(cur, pp)) {
+               *blkp = XFS_BUF_TO_BLOCK(bp);
+               return 0;
+       }
+       error = xfs_btree_read_buf_block(cur, pp, 0, blkp, &bp);
+       if (error)
+               return error;
+       xfs_btree_setbuf(cur, level, bp);
+       return 0;
+ }
+ /*
+  * Get current search key.  For level 0 we don't actually have a key
+  * structure so we make one up from the record.  For all other levels
+  * we just return the right key.
+  */
+ STATIC union xfs_btree_key *
+ xfs_lookup_get_search_key(
+       struct xfs_btree_cur    *cur,
+       int                     level,
+       int                     keyno,
+       struct xfs_btree_block  *block,
+       union xfs_btree_key     *kp)
+ {
+       if (level == 0) {
+               cur->bc_ops->init_key_from_rec(kp,
+                               xfs_btree_rec_addr(cur, keyno, block));
+               return kp;
+       }
+       return xfs_btree_key_addr(cur, keyno, block);
+ }
+ /*
+  * Lookup the record.  The cursor is made to point to it, based on dir.
+  * stat is set to 0 if can't find any such record, 1 for success.
+  */
+ int                                   /* error */
+ xfs_btree_lookup(
+       struct xfs_btree_cur    *cur,   /* btree cursor */
+       xfs_lookup_t            dir,    /* <=, ==, or >= */
+       int                     *stat)  /* success/failure */
+ {
+       struct xfs_btree_block  *block; /* current btree block */
+       __int64_t               diff;   /* difference for the current key */
+       int                     error;  /* error return value */
+       int                     keyno;  /* current key number */
+       int                     level;  /* level in the btree */
+       union xfs_btree_ptr     *pp;    /* ptr to btree block */
+       union xfs_btree_ptr     ptr;    /* ptr to btree block */
+       XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY);
+       XFS_BTREE_TRACE_ARGI(cur, dir);
+       XFS_BTREE_STATS_INC(cur, lookup);
+       block = NULL;
+       keyno = 0;
+       /* initialise start pointer from cursor */
+       cur->bc_ops->init_ptr_from_cur(cur, &ptr);
+       pp = &ptr;
+       /*
+        * Iterate over each level in the btree, starting at the root.
+        * For each level above the leaves, find the key we need, based
+        * on the lookup record, then follow the corresponding block
+        * pointer down to the next level.
+        */
+       for (level = cur->bc_nlevels - 1, diff = 1; level >= 0; level--) {
+               /* Get the block we need to do the lookup on. */
+               error = xfs_btree_lookup_get_block(cur, level, pp, &block);
+               if (error)
+                       goto error0;
+               if (diff == 0) {
+                       /*
+                        * If we already had a key match at a higher level, we
+                        * know we need to use the first entry in this block.
+                        */
+                       keyno = 1;
+               } else {
+                       /* Otherwise search this block. Do a binary search. */
+                       int     high;   /* high entry number */
+                       int     low;    /* low entry number */
+                       /* Set low and high entry numbers, 1-based. */
+                       low = 1;
+                       high = xfs_btree_get_numrecs(block);
+                       if (!high) {
+                               /* Block is empty, must be an empty leaf. */
+                               ASSERT(level == 0 && cur->bc_nlevels == 1);
+                               cur->bc_ptrs[0] = dir != XFS_LOOKUP_LE;
+                               XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
+                               *stat = 0;
+                               return 0;
+                       }
+                       /* Binary search the block. */
+                       while (low <= high) {
+                               union xfs_btree_key     key;
+                               union xfs_btree_key     *kp;
+                               XFS_BTREE_STATS_INC(cur, compare);
+                               /* keyno is average of low and high. */
+                               keyno = (low + high) >> 1;
+                               /* Get current search key */
+                               kp = xfs_lookup_get_search_key(cur, level,
+                                               keyno, block, &key);
+                               /*
+                                * Compute difference to get next direction:
+                                *  - less than, move right
+                                *  - greater than, move left
+                                *  - equal, we're done
+                                */
+                               diff = cur->bc_ops->key_diff(cur, kp);
+                               if (diff < 0)
+                                       low = keyno + 1;
+                               else if (diff > 0)
+                                       high = keyno - 1;
+                               else
+                                       break;
+                       }
+               }
+               /*
+                * If there are more levels, set up for the next level
+                * by getting the block number and filling in the cursor.
+                */
+               if (level > 0) {
+                       /*
+                        * If we moved left, need the previous key number,
+                        * unless there isn't one.
+                        */
+                       if (diff > 0 && --keyno < 1)
+                               keyno = 1;
+                       pp = xfs_btree_ptr_addr(cur, keyno, block);
+ #ifdef DEBUG
+                       error = xfs_btree_check_ptr(cur, pp, 0, level);
+                       if (error)
+                               goto error0;
+ #endif
+                       cur->bc_ptrs[level] = keyno;
+               }
+       }
+       /* Done with the search. See if we need to adjust the results. */
+       if (dir != XFS_LOOKUP_LE && diff < 0) {
+               keyno++;
+               /*
+                * If ge search and we went off the end of the block, but it's
+                * not the last block, we're in the wrong block.
+                */
+               xfs_btree_get_sibling(cur, block, &ptr, XFS_BB_RIGHTSIB);
+               if (dir == XFS_LOOKUP_GE &&
+                   keyno > xfs_btree_get_numrecs(block) &&
+                   !xfs_btree_ptr_is_null(cur, &ptr)) {
+                       int     i;
+                       cur->bc_ptrs[0] = keyno;
+                       error = xfs_btree_increment(cur, 0, &i);
+                       if (error)
+                               goto error0;
+                       XFS_WANT_CORRUPTED_RETURN(i == 1);
+                       XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
+                       *stat = 1;
+                       return 0;
+               }
+       } else if (dir == XFS_LOOKUP_LE && diff > 0)
+               keyno--;
+       cur->bc_ptrs[0] = keyno;
+       /* Return if we succeeded or not. */
+       if (keyno == 0 || keyno > xfs_btree_get_numrecs(block))
+               *stat = 0;
+       else if (dir != XFS_LOOKUP_EQ || diff == 0)
+               *stat = 1;
+       else
+               *stat = 0;
+       XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
+       return 0;
+ error0:
+       XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR);
+       return error;
+ }
+ /*
+  * Update keys at all levels from here to the root along the cursor's path.
+  */
+ STATIC int
+ xfs_btree_updkey(
+       struct xfs_btree_cur    *cur,
+       union xfs_btree_key     *keyp,
+       int                     level)
+ {
+       struct xfs_btree_block  *block;
+       struct xfs_buf          *bp;
+       union xfs_btree_key     *kp;
+       int                     ptr;
+       XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY);
+       XFS_BTREE_TRACE_ARGIK(cur, level, keyp);
+       ASSERT(!(cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) || level >= 1);
+       /*
+        * Go up the tree from this level toward the root.
+        * At each level, update the key value to the value input.
+        * Stop when we reach a level where the cursor isn't pointing
+        * at the first entry in the block.
+        */
+       for (ptr = 1; ptr == 1 && level < cur->bc_nlevels; level++) {
+ #ifdef DEBUG
+               int             error;
+ #endif
+               block = xfs_btree_get_block(cur, level, &bp);
+ #ifdef DEBUG
+               error = xfs_btree_check_block(cur, block, level, bp);
+               if (error) {
+                       XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR);
+                       return error;
+               }
+ #endif
+               ptr = cur->bc_ptrs[level];
+               kp = xfs_btree_key_addr(cur, ptr, block);
+               xfs_btree_copy_keys(cur, kp, keyp, 1);
+               xfs_btree_log_keys(cur, bp, ptr, ptr);
+       }
+       XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
+       return 0;
+ }
+ /*
+  * Update the record referred to by cur to the value in the
+  * given record. This either works (return 0) or gets an
+  * EFSCORRUPTED error.
+  */
+ int
+ xfs_btree_update(
+       struct xfs_btree_cur    *cur,
+       union xfs_btree_rec     *rec)
+ {
+       struct xfs_btree_block  *block;
+       struct xfs_buf          *bp;
+       int                     error;
+       int                     ptr;
+       union xfs_btree_rec     *rp;
+       XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY);
+       XFS_BTREE_TRACE_ARGR(cur, rec);
+       /* Pick up the current block. */
+       block = xfs_btree_get_block(cur, 0, &bp);
+ #ifdef DEBUG
+       error = xfs_btree_check_block(cur, block, 0, bp);
+       if (error)
+               goto error0;
+ #endif
+       /* Get the address of the rec to be updated. */
+       ptr = cur->bc_ptrs[0];
+       rp = xfs_btree_rec_addr(cur, ptr, block);
+       /* Fill in the new contents and log them. */
+       xfs_btree_copy_recs(cur, rp, rec, 1);
+       xfs_btree_log_recs(cur, bp, ptr, ptr);
+       /*
+        * If we are tracking the last record in the tree and
+        * we are at the far right edge of the tree, update it.
+        */
+       if (xfs_btree_is_lastrec(cur, block, 0)) {
+               cur->bc_ops->update_lastrec(cur, block, rec,
+                                           ptr, LASTREC_UPDATE);
+       }
+       /* Updating first rec in leaf. Pass new key value up to our parent. */
+       if (ptr == 1) {
+               union xfs_btree_key     key;
+               cur->bc_ops->init_key_from_rec(&key, rec);
+               error = xfs_btree_updkey(cur, &key, 1);
+               if (error)
+                       goto error0;
+       }
+       XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
+       return 0;
+ error0:
+       XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR);
+       return error;
+ }
+ /*
+  * Move 1 record left from cur/level if possible.
+  * Update cur to reflect the new path.
+  */
+ STATIC int                                    /* error */
+ xfs_btree_lshift(
+       struct xfs_btree_cur    *cur,
+       int                     level,
+       int                     *stat)          /* success/failure */
+ {
+       union xfs_btree_key     key;            /* btree key */
+       struct xfs_buf          *lbp;           /* left buffer pointer */
+       struct xfs_btree_block  *left;          /* left btree block */
+       int                     lrecs;          /* left record count */
+       struct xfs_buf          *rbp;           /* right buffer pointer */
+       struct xfs_btree_block  *right;         /* right btree block */
+       int                     rrecs;          /* right record count */
+       union xfs_btree_ptr     lptr;           /* left btree pointer */
+       union xfs_btree_key     *rkp = NULL;    /* right btree key */
+       union xfs_btree_ptr     *rpp = NULL;    /* right address pointer */
+       union xfs_btree_rec     *rrp = NULL;    /* right record pointer */
+       int                     error;          /* error return value */
+       XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY);
+       XFS_BTREE_TRACE_ARGI(cur, level);
+       if ((cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) &&
+           level == cur->bc_nlevels - 1)
+               goto out0;
+       /* Set up variables for this block as "right". */
+       right = xfs_btree_get_block(cur, level, &rbp);
+ #ifdef DEBUG
+       error = xfs_btree_check_block(cur, right, level, rbp);
+       if (error)
+               goto error0;
+ #endif
+       /* If we've got no left sibling then we can't shift an entry left. */
+       xfs_btree_get_sibling(cur, right, &lptr, XFS_BB_LEFTSIB);
+       if (xfs_btree_ptr_is_null(cur, &lptr))
+               goto out0;
+       /*
+        * If the cursor entry is the one that would be moved, don't
+        * do it... it's too complicated.
+        */
+       if (cur->bc_ptrs[level] <= 1)
+               goto out0;
+       /* Set up the left neighbor as "left". */
+       error = xfs_btree_read_buf_block(cur, &lptr, 0, &left, &lbp);
+       if (error)
+               goto error0;
+       /* If it's full, it can't take another entry. */
+       lrecs = xfs_btree_get_numrecs(left);
+       if (lrecs == cur->bc_ops->get_maxrecs(cur, level))
+               goto out0;
+       rrecs = xfs_btree_get_numrecs(right);
+       /*
+        * We add one entry to the left side and remove one for the right side.
+        * Account for it here, the changes will be updated on disk and logged
+        * later.
+        */
+       lrecs++;
+       rrecs--;
+       XFS_BTREE_STATS_INC(cur, lshift);
+       XFS_BTREE_STATS_ADD(cur, moves, 1);
+       /*
+        * If non-leaf, copy a key and a ptr to the left block.
+        * Log the changes to the left block.
+        */
+       if (level > 0) {
+               /* It's a non-leaf.  Move keys and pointers. */
+               union xfs_btree_key     *lkp;   /* left btree key */
+               union xfs_btree_ptr     *lpp;   /* left address pointer */
+               lkp = xfs_btree_key_addr(cur, lrecs, left);
+               rkp = xfs_btree_key_addr(cur, 1, right);
+               lpp = xfs_btree_ptr_addr(cur, lrecs, left);
+               rpp = xfs_btree_ptr_addr(cur, 1, right);
+ #ifdef DEBUG
+               error = xfs_btree_check_ptr(cur, rpp, 0, level);
+               if (error)
+                       goto error0;
+ #endif
+               xfs_btree_copy_keys(cur, lkp, rkp, 1);
+               xfs_btree_copy_ptrs(cur, lpp, rpp, 1);
+               xfs_btree_log_keys(cur, lbp, lrecs, lrecs);
+               xfs_btree_log_ptrs(cur, lbp, lrecs, lrecs);
+               ASSERT(cur->bc_ops->keys_inorder(cur,
+                       xfs_btree_key_addr(cur, lrecs - 1, left), lkp));
+       } else {
+               /* It's a leaf.  Move records.  */
+               union xfs_btree_rec     *lrp;   /* left record pointer */
+               lrp = xfs_btree_rec_addr(cur, lrecs, left);
+               rrp = xfs_btree_rec_addr(cur, 1, right);
+               xfs_btree_copy_recs(cur, lrp, rrp, 1);
+               xfs_btree_log_recs(cur, lbp, lrecs, lrecs);
+               ASSERT(cur->bc_ops->recs_inorder(cur,
+                       xfs_btree_rec_addr(cur, lrecs - 1, left), lrp));
+       }
+       xfs_btree_set_numrecs(left, lrecs);
+       xfs_btree_log_block(cur, lbp, XFS_BB_NUMRECS);
+       xfs_btree_set_numrecs(right, rrecs);
+       xfs_btree_log_block(cur, rbp, XFS_BB_NUMRECS);
+       /*
+        * Slide the contents of right down one entry.
+        */
+       XFS_BTREE_STATS_ADD(cur, moves, rrecs - 1);
+       if (level > 0) {
+               /* It's a nonleaf. operate on keys and ptrs */
+ #ifdef DEBUG
+               int                     i;              /* loop index */
+               for (i = 0; i < rrecs; i++) {
+                       error = xfs_btree_check_ptr(cur, rpp, i + 1, level);
+                       if (error)
+                               goto error0;
+               }
+ #endif
+               xfs_btree_shift_keys(cur,
+                               xfs_btree_key_addr(cur, 2, right),
+                               -1, rrecs);
+               xfs_btree_shift_ptrs(cur,
+                               xfs_btree_ptr_addr(cur, 2, right),
+                               -1, rrecs);
+               xfs_btree_log_keys(cur, rbp, 1, rrecs);
+               xfs_btree_log_ptrs(cur, rbp, 1, rrecs);
+       } else {
+               /* It's a leaf. operate on records */
+               xfs_btree_shift_recs(cur,
+                       xfs_btree_rec_addr(cur, 2, right),
+                       -1, rrecs);
+               xfs_btree_log_recs(cur, rbp, 1, rrecs);
+               /*
+                * If it's the first record in the block, we'll need a key
+                * structure to pass up to the next level (updkey).
+                */
+               cur->bc_ops->init_key_from_rec(&key,
+                       xfs_btree_rec_addr(cur, 1, right));
+               rkp = &key;
+       }
+       /* Update the parent key values of right. */
+       error = xfs_btree_updkey(cur, rkp, level + 1);
+       if (error)
+               goto error0;
+       /* Slide the cursor value left one. */
+       cur->bc_ptrs[level]--;
+       XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
+       *stat = 1;
+       return 0;
+ out0:
+       XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
+       *stat = 0;
+       return 0;
+ error0:
+       XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR);
+       return error;
+ }
+ /*
+  * Move 1 record right from cur/level if possible.
+  * Update cur to reflect the new path.
+  */
+ STATIC int                                    /* error */
+ xfs_btree_rshift(
+       struct xfs_btree_cur    *cur,
+       int                     level,
+       int                     *stat)          /* success/failure */
+ {
+       union xfs_btree_key     key;            /* btree key */
+       struct xfs_buf          *lbp;           /* left buffer pointer */
+       struct xfs_btree_block  *left;          /* left btree block */
+       struct xfs_buf          *rbp;           /* right buffer pointer */
+       struct xfs_btree_block  *right;         /* right btree block */
+       struct xfs_btree_cur    *tcur;          /* temporary btree cursor */
+       union xfs_btree_ptr     rptr;           /* right block pointer */
+       union xfs_btree_key     *rkp;           /* right btree key */
+       int                     rrecs;          /* right record count */
+       int                     lrecs;          /* left record count */
+       int                     error;          /* error return value */
+       int                     i;              /* loop counter */
+       XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY);
+       XFS_BTREE_TRACE_ARGI(cur, level);
+       if ((cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) &&
+           (level == cur->bc_nlevels - 1))
+               goto out0;
+       /* Set up variables for this block as "left". */
+       left = xfs_btree_get_block(cur, level, &lbp);
+ #ifdef DEBUG
+       error = xfs_btree_check_block(cur, left, level, lbp);
+       if (error)
+               goto error0;
+ #endif
+       /* If we've got no right sibling then we can't shift an entry right. */
+       xfs_btree_get_sibling(cur, left, &rptr, XFS_BB_RIGHTSIB);
+       if (xfs_btree_ptr_is_null(cur, &rptr))
+               goto out0;
+       /*
+        * If the cursor entry is the one that would be moved, don't
+        * do it... it's too complicated.
+        */
+       lrecs = xfs_btree_get_numrecs(left);
+       if (cur->bc_ptrs[level] >= lrecs)
+               goto out0;
+       /* Set up the right neighbor as "right". */
+       error = xfs_btree_read_buf_block(cur, &rptr, 0, &right, &rbp);
+       if (error)
+               goto error0;
+       /* If it's full, it can't take another entry. */
+       rrecs = xfs_btree_get_numrecs(right);
+       if (rrecs == cur->bc_ops->get_maxrecs(cur, level))
+               goto out0;
+       XFS_BTREE_STATS_INC(cur, rshift);
+       XFS_BTREE_STATS_ADD(cur, moves, rrecs);
+       /*
+        * Make a hole at the start of the right neighbor block, then
+        * copy the last left block entry to the hole.
+        */
+       if (level > 0) {
+               /* It's a nonleaf. make a hole in the keys and ptrs */
+               union xfs_btree_key     *lkp;
+               union xfs_btree_ptr     *lpp;
+               union xfs_btree_ptr     *rpp;
+               lkp = xfs_btree_key_addr(cur, lrecs, left);
+               lpp = xfs_btree_ptr_addr(cur, lrecs, left);
+               rkp = xfs_btree_key_addr(cur, 1, right);
+               rpp = xfs_btree_ptr_addr(cur, 1, right);
+ #ifdef DEBUG
+               for (i = rrecs - 1; i >= 0; i--) {
+                       error = xfs_btree_check_ptr(cur, rpp, i, level);
+                       if (error)
+                               goto error0;
+               }
+ #endif
+               xfs_btree_shift_keys(cur, rkp, 1, rrecs);
+               xfs_btree_shift_ptrs(cur, rpp, 1, rrecs);
+ #ifdef DEBUG
+               error = xfs_btree_check_ptr(cur, lpp, 0, level);
+               if (error)
+                       goto error0;
+ #endif
+               /* Now put the new data in, and log it. */
+               xfs_btree_copy_keys(cur, rkp, lkp, 1);
+               xfs_btree_copy_ptrs(cur, rpp, lpp, 1);
+               xfs_btree_log_keys(cur, rbp, 1, rrecs + 1);
+               xfs_btree_log_ptrs(cur, rbp, 1, rrecs + 1);
+               ASSERT(cur->bc_ops->keys_inorder(cur, rkp,
+                       xfs_btree_key_addr(cur, 2, right)));
+       } else {
+               /* It's a leaf. make a hole in the records */
+               union xfs_btree_rec     *lrp;
+               union xfs_btree_rec     *rrp;
+               lrp = xfs_btree_rec_addr(cur, lrecs, left);
+               rrp = xfs_btree_rec_addr(cur, 1, right);
+               xfs_btree_shift_recs(cur, rrp, 1, rrecs);
+               /* Now put the new data in, and log it. */
+               xfs_btree_copy_recs(cur, rrp, lrp, 1);
+               xfs_btree_log_recs(cur, rbp, 1, rrecs + 1);
+               cur->bc_ops->init_key_from_rec(&key, rrp);
+               rkp = &key;
+               ASSERT(cur->bc_ops->recs_inorder(cur, rrp,
+                       xfs_btree_rec_addr(cur, 2, right)));
+       }
+       /*
+        * Decrement and log left's numrecs, bump and log right's numrecs.
+        */
+       xfs_btree_set_numrecs(left, --lrecs);
+       xfs_btree_log_block(cur, lbp, XFS_BB_NUMRECS);
+       xfs_btree_set_numrecs(right, ++rrecs);
+       xfs_btree_log_block(cur, rbp, XFS_BB_NUMRECS);
+       /*
+        * Using a temporary cursor, update the parent key values of the
+        * block on the right.
+        */
+       error = xfs_btree_dup_cursor(cur, &tcur);
+       if (error)
+               goto error0;
+       i = xfs_btree_lastrec(tcur, level);
+       XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
+       error = xfs_btree_increment(tcur, level, &i);
+       if (error)
+               goto error1;
+       error = xfs_btree_updkey(tcur, rkp, level + 1);
+       if (error)
+               goto error1;
+       xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR);
+       XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
+       *stat = 1;
+       return 0;
+ out0:
+       XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
+       *stat = 0;
+       return 0;
+ error0:
+       XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR);
+       return error;
+ error1:
+       XFS_BTREE_TRACE_CURSOR(tcur, XBT_ERROR);
+       xfs_btree_del_cursor(tcur, XFS_BTREE_ERROR);
+       return error;
+ }
+ /*
+  * Split cur/level block in half.
+  * Return new block number and the key to its first
+  * record (to be inserted into parent).
+  */
+ STATIC int                                    /* error */
++__xfs_btree_split(
+       struct xfs_btree_cur    *cur,
+       int                     level,
+       union xfs_btree_ptr     *ptrp,
+       union xfs_btree_key     *key,
+       struct xfs_btree_cur    **curp,
+       int                     *stat)          /* success/failure */
+ {
+       union xfs_btree_ptr     lptr;           /* left sibling block ptr */
+       struct xfs_buf          *lbp;           /* left buffer pointer */
+       struct xfs_btree_block  *left;          /* left btree block */
+       union xfs_btree_ptr     rptr;           /* right sibling block ptr */
+       struct xfs_buf          *rbp;           /* right buffer pointer */
+       struct xfs_btree_block  *right;         /* right btree block */
+       union xfs_btree_ptr     rrptr;          /* right-right sibling ptr */
+       struct xfs_buf          *rrbp;          /* right-right buffer pointer */
+       struct xfs_btree_block  *rrblock;       /* right-right btree block */
+       int                     lrecs;
+       int                     rrecs;
+       int                     src_index;
+       int                     error;          /* error return value */
+ #ifdef DEBUG
+       int                     i;
+ #endif
+       XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY);
+       XFS_BTREE_TRACE_ARGIPK(cur, level, *ptrp, key);
+       XFS_BTREE_STATS_INC(cur, split);
+       /* Set up left block (current one). */
+       left = xfs_btree_get_block(cur, level, &lbp);
+ #ifdef DEBUG
+       error = xfs_btree_check_block(cur, left, level, lbp);
+       if (error)
+               goto error0;
+ #endif
+       xfs_btree_buf_to_ptr(cur, lbp, &lptr);
+       /* Allocate the new block. If we can't do it, we're toast. Give up. */
+       error = cur->bc_ops->alloc_block(cur, &lptr, &rptr, stat);
+       if (error)
+               goto error0;
+       if (*stat == 0)
+               goto out0;
+       XFS_BTREE_STATS_INC(cur, alloc);
+       /* Set up the new block as "right". */
+       error = xfs_btree_get_buf_block(cur, &rptr, 0, &right, &rbp);
+       if (error)
+               goto error0;
+       /* Fill in the btree header for the new right block. */
+       xfs_btree_init_block_cur(cur, rbp, xfs_btree_get_level(left), 0);
+       /*
+        * Split the entries between the old and the new block evenly.
+        * Make sure that if there's an odd number of entries now, that
+        * each new block will have the same number of entries.
+        */
+       lrecs = xfs_btree_get_numrecs(left);
+       rrecs = lrecs / 2;
+       if ((lrecs & 1) && cur->bc_ptrs[level] <= rrecs + 1)
+               rrecs++;
+       src_index = (lrecs - rrecs + 1);
+       XFS_BTREE_STATS_ADD(cur, moves, rrecs);
+       /*
+        * Copy btree block entries from the left block over to the
+        * new block, the right. Update the right block and log the
+        * changes.
+        */
+       if (level > 0) {
+               /* It's a non-leaf.  Move keys and pointers. */
+               union xfs_btree_key     *lkp;   /* left btree key */
+               union xfs_btree_ptr     *lpp;   /* left address pointer */
+               union xfs_btree_key     *rkp;   /* right btree key */
+               union xfs_btree_ptr     *rpp;   /* right address pointer */
+               lkp = xfs_btree_key_addr(cur, src_index, left);
+               lpp = xfs_btree_ptr_addr(cur, src_index, left);
+               rkp = xfs_btree_key_addr(cur, 1, right);
+               rpp = xfs_btree_ptr_addr(cur, 1, right);
+ #ifdef DEBUG
+               for (i = src_index; i < rrecs; i++) {
+                       error = xfs_btree_check_ptr(cur, lpp, i, level);
+                       if (error)
+                               goto error0;
+               }
+ #endif
+               xfs_btree_copy_keys(cur, rkp, lkp, rrecs);
+               xfs_btree_copy_ptrs(cur, rpp, lpp, rrecs);
+               xfs_btree_log_keys(cur, rbp, 1, rrecs);
+               xfs_btree_log_ptrs(cur, rbp, 1, rrecs);
+               /* Grab the keys to the entries moved to the right block */
+               xfs_btree_copy_keys(cur, key, rkp, 1);
+       } else {
+               /* It's a leaf.  Move records.  */
+               union xfs_btree_rec     *lrp;   /* left record pointer */
+               union xfs_btree_rec     *rrp;   /* right record pointer */
+               lrp = xfs_btree_rec_addr(cur, src_index, left);
+               rrp = xfs_btree_rec_addr(cur, 1, right);
+               xfs_btree_copy_recs(cur, rrp, lrp, rrecs);
+               xfs_btree_log_recs(cur, rbp, 1, rrecs);
+               cur->bc_ops->init_key_from_rec(key,
+                       xfs_btree_rec_addr(cur, 1, right));
+       }
+       /*
+        * Find the left block number by looking in the buffer.
+        * Adjust numrecs, sibling pointers.
+        */
+       xfs_btree_get_sibling(cur, left, &rrptr, XFS_BB_RIGHTSIB);
+       xfs_btree_set_sibling(cur, right, &rrptr, XFS_BB_RIGHTSIB);
+       xfs_btree_set_sibling(cur, right, &lptr, XFS_BB_LEFTSIB);
+       xfs_btree_set_sibling(cur, left, &rptr, XFS_BB_RIGHTSIB);
+       lrecs -= rrecs;
+       xfs_btree_set_numrecs(left, lrecs);
+       xfs_btree_set_numrecs(right, xfs_btree_get_numrecs(right) + rrecs);
+       xfs_btree_log_block(cur, rbp, XFS_BB_ALL_BITS);
+       xfs_btree_log_block(cur, lbp, XFS_BB_NUMRECS | XFS_BB_RIGHTSIB);
+       /*
+        * If there's a block to the new block's right, make that block
+        * point back to right instead of to left.
+        */
+       if (!xfs_btree_ptr_is_null(cur, &rrptr)) {
+               error = xfs_btree_read_buf_block(cur, &rrptr,
+                                                       0, &rrblock, &rrbp);
+               if (error)
+                       goto error0;
+               xfs_btree_set_sibling(cur, rrblock, &rptr, XFS_BB_LEFTSIB);
+               xfs_btree_log_block(cur, rrbp, XFS_BB_LEFTSIB);
+       }
+       /*
+        * If the cursor is really in the right block, move it there.
+        * If it's just pointing past the last entry in left, then we'll
+        * insert there, so don't change anything in that case.
+        */
+       if (cur->bc_ptrs[level] > lrecs + 1) {
+               xfs_btree_setbuf(cur, level, rbp);
+               cur->bc_ptrs[level] -= lrecs;
+       }
+       /*
+        * If there are more levels, we'll need another cursor which refers
+        * the right block, no matter where this cursor was.
+        */
+       if (level + 1 < cur->bc_nlevels) {
+               error = xfs_btree_dup_cursor(cur, curp);
+               if (error)
+                       goto error0;
+               (*curp)->bc_ptrs[level + 1]++;
+       }
+       *ptrp = rptr;
+       XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
+       *stat = 1;
+       return 0;
+ out0:
+       XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
+       *stat = 0;
+       return 0;
+ error0:
+       XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR);
+       return error;
+ }
++struct xfs_btree_split_args {
++      struct xfs_btree_cur    *cur;
++      int                     level;
++      union xfs_btree_ptr     *ptrp;
++      union xfs_btree_key     *key;
++      struct xfs_btree_cur    **curp;
++      int                     *stat;          /* success/failure */
++      int                     result;
++      bool                    kswapd; /* allocation in kswapd context */
++      struct completion       *done;
++      struct work_struct      work;
++};
++
++/*
++ * Stack switching interfaces for allocation
++ */
++static void
++xfs_btree_split_worker(
++      struct work_struct      *work)
++{
++      struct xfs_btree_split_args     *args = container_of(work,
++                                              struct xfs_btree_split_args, work);
++      unsigned long           pflags;
++      unsigned long           new_pflags = PF_FSTRANS;
++
++      /*
++       * we are in a transaction context here, but may also be doing work
++       * in kswapd context, and hence we may need to inherit that state
++       * temporarily to ensure that we don't block waiting for memory reclaim
++       * in any way.
++       */
++      if (args->kswapd)
++              new_pflags |= PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD;
++
++      current_set_flags_nested(&pflags, new_pflags);
++
++      args->result = __xfs_btree_split(args->cur, args->level, args->ptrp,
++                                       args->key, args->curp, args->stat);
++      complete(args->done);
++
++      current_restore_flags_nested(&pflags, new_pflags);
++}
++
++/*
++ * BMBT split requests often come in with little stack to work on. Push
++ * them off to a worker thread so there is lots of stack to use. For the other
++ * btree types, just call directly to avoid the context switch overhead here.
++ */
++STATIC int                                    /* error */
++xfs_btree_split(
++      struct xfs_btree_cur    *cur,
++      int                     level,
++      union xfs_btree_ptr     *ptrp,
++      union xfs_btree_key     *key,
++      struct xfs_btree_cur    **curp,
++      int                     *stat)          /* success/failure */
++{
++      struct xfs_btree_split_args     args;
++      DECLARE_COMPLETION_ONSTACK(done);
++
++      if (cur->bc_btnum != XFS_BTNUM_BMAP)
++              return __xfs_btree_split(cur, level, ptrp, key, curp, stat);
++
++      args.cur = cur;
++      args.level = level;
++      args.ptrp = ptrp;
++      args.key = key;
++      args.curp = curp;
++      args.stat = stat;
++      args.done = &done;
++      args.kswapd = current_is_kswapd();
++      INIT_WORK_ONSTACK(&args.work, xfs_btree_split_worker);
++      queue_work(xfs_alloc_wq, &args.work);
++      wait_for_completion(&done);
++      destroy_work_on_stack(&args.work);
++      return args.result;
++}
++
++
+ /*
+  * Copy the old inode root contents into a real block and make the
+  * broot point to it.
+  */
+ int                                           /* error */
+ xfs_btree_new_iroot(
+       struct xfs_btree_cur    *cur,           /* btree cursor */
+       int                     *logflags,      /* logging flags for inode */
+       int                     *stat)          /* return status - 0 fail */
+ {
+       struct xfs_buf          *cbp;           /* buffer for cblock */
+       struct xfs_btree_block  *block;         /* btree block */
+       struct xfs_btree_block  *cblock;        /* child btree block */
+       union xfs_btree_key     *ckp;           /* child key pointer */
+       union xfs_btree_ptr     *cpp;           /* child ptr pointer */
+       union xfs_btree_key     *kp;            /* pointer to btree key */
+       union xfs_btree_ptr     *pp;            /* pointer to block addr */
+       union xfs_btree_ptr     nptr;           /* new block addr */
+       int                     level;          /* btree level */
+       int                     error;          /* error return code */
+ #ifdef DEBUG
+       int                     i;              /* loop counter */
+ #endif
+       XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY);
+       XFS_BTREE_STATS_INC(cur, newroot);
+       ASSERT(cur->bc_flags & XFS_BTREE_ROOT_IN_INODE);
+       level = cur->bc_nlevels - 1;
+       block = xfs_btree_get_iroot(cur);
+       pp = xfs_btree_ptr_addr(cur, 1, block);
+       /* Allocate the new block. If we can't do it, we're toast. Give up. */
+       error = cur->bc_ops->alloc_block(cur, pp, &nptr, stat);
+       if (error)
+               goto error0;
+       if (*stat == 0) {
+               XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
+               return 0;
+       }
+       XFS_BTREE_STATS_INC(cur, alloc);
+       /* Copy the root into a real block. */
+       error = xfs_btree_get_buf_block(cur, &nptr, 0, &cblock, &cbp);
+       if (error)
+               goto error0;
+       /*
+        * we can't just memcpy() the root in for CRC enabled btree blocks.
+        * In that case have to also ensure the blkno remains correct
+        */
+       memcpy(cblock, block, xfs_btree_block_len(cur));
+       if (cur->bc_flags & XFS_BTREE_CRC_BLOCKS) {
+               if (cur->bc_flags & XFS_BTREE_LONG_PTRS)
+                       cblock->bb_u.l.bb_blkno = cpu_to_be64(cbp->b_bn);
+               else
+                       cblock->bb_u.s.bb_blkno = cpu_to_be64(cbp->b_bn);
+       }
+       be16_add_cpu(&block->bb_level, 1);
+       xfs_btree_set_numrecs(block, 1);
+       cur->bc_nlevels++;
+       cur->bc_ptrs[level + 1] = 1;
+       kp = xfs_btree_key_addr(cur, 1, block);
+       ckp = xfs_btree_key_addr(cur, 1, cblock);
+       xfs_btree_copy_keys(cur, ckp, kp, xfs_btree_get_numrecs(cblock));
+       cpp = xfs_btree_ptr_addr(cur, 1, cblock);
+ #ifdef DEBUG
+       for (i = 0; i < be16_to_cpu(cblock->bb_numrecs); i++) {
+               error = xfs_btree_check_ptr(cur, pp, i, level);
+               if (error)
+                       goto error0;
+       }
+ #endif
+       xfs_btree_copy_ptrs(cur, cpp, pp, xfs_btree_get_numrecs(cblock));
+ #ifdef DEBUG
+       error = xfs_btree_check_ptr(cur, &nptr, 0, level);
+       if (error)
+               goto error0;
+ #endif
+       xfs_btree_copy_ptrs(cur, pp, &nptr, 1);
+       xfs_iroot_realloc(cur->bc_private.b.ip,
+                         1 - xfs_btree_get_numrecs(cblock),
+                         cur->bc_private.b.whichfork);
+       xfs_btree_setbuf(cur, level, cbp);
+       /*
+        * Do all this logging at the end so that
+        * the root is at the right level.
+        */
+       xfs_btree_log_block(cur, cbp, XFS_BB_ALL_BITS);
+       xfs_btree_log_keys(cur, cbp, 1, be16_to_cpu(cblock->bb_numrecs));
+       xfs_btree_log_ptrs(cur, cbp, 1, be16_to_cpu(cblock->bb_numrecs));
+       *logflags |=
+               XFS_ILOG_CORE | xfs_ilog_fbroot(cur->bc_private.b.whichfork);
+       *stat = 1;
+       XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
+       return 0;
+ error0:
+       XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR);
+       return error;
+ }
+ /*
+  * Allocate a new root block, fill it in.
+  */
+ STATIC int                            /* error */
+ xfs_btree_new_root(
+       struct xfs_btree_cur    *cur,   /* btree cursor */
+       int                     *stat)  /* success/failure */
+ {
+       struct xfs_btree_block  *block; /* one half of the old root block */
+       struct xfs_buf          *bp;    /* buffer containing block */
+       int                     error;  /* error return value */
+       struct xfs_buf          *lbp;   /* left buffer pointer */
+       struct xfs_btree_block  *left;  /* left btree block */
+       struct xfs_buf          *nbp;   /* new (root) buffer */
+       struct xfs_btree_block  *new;   /* new (root) btree block */
+       int                     nptr;   /* new value for key index, 1 or 2 */
+       struct xfs_buf          *rbp;   /* right buffer pointer */
+       struct xfs_btree_block  *right; /* right btree block */
+       union xfs_btree_ptr     rptr;
+       union xfs_btree_ptr     lptr;
+       XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY);
+       XFS_BTREE_STATS_INC(cur, newroot);
+       /* initialise our start point from the cursor */
+       cur->bc_ops->init_ptr_from_cur(cur, &rptr);
+       /* Allocate the new block. If we can't do it, we're toast. Give up. */
+       error = cur->bc_ops->alloc_block(cur, &rptr, &lptr, stat);
+       if (error)
+               goto error0;
+       if (*stat == 0)
+               goto out0;
+       XFS_BTREE_STATS_INC(cur, alloc);
+       /* Set up the new block. */
+       error = xfs_btree_get_buf_block(cur, &lptr, 0, &new, &nbp);
+       if (error)
+               goto error0;
+       /* Set the root in the holding structure  increasing the level by 1. */
+       cur->bc_ops->set_root(cur, &lptr, 1);
+       /*
+        * At the previous root level there are now two blocks: the old root,
+        * and the new block generated when it was split.  We don't know which
+        * one the cursor is pointing at, so we set up variables "left" and
+        * "right" for each case.
+        */
+       block = xfs_btree_get_block(cur, cur->bc_nlevels - 1, &bp);
+ #ifdef DEBUG
+       error = xfs_btree_check_block(cur, block, cur->bc_nlevels - 1, bp);
+       if (error)
+               goto error0;
+ #endif
+       xfs_btree_get_sibling(cur, block, &rptr, XFS_BB_RIGHTSIB);
+       if (!xfs_btree_ptr_is_null(cur, &rptr)) {
+               /* Our block is left, pick up the right block. */
+               lbp = bp;
+               xfs_btree_buf_to_ptr(cur, lbp, &lptr);
+               left = block;
+               error = xfs_btree_read_buf_block(cur, &rptr, 0, &right, &rbp);
+               if (error)
+                       goto error0;
+               bp = rbp;
+               nptr = 1;
+       } else {
+               /* Our block is right, pick up the left block. */
+               rbp = bp;
+               xfs_btree_buf_to_ptr(cur, rbp, &rptr);
+               right = block;
+               xfs_btree_get_sibling(cur, right, &lptr, XFS_BB_LEFTSIB);
+               error = xfs_btree_read_buf_block(cur, &lptr, 0, &left, &lbp);
+               if (error)
+                       goto error0;
+               bp = lbp;
+               nptr = 2;
+       }
+       /* Fill in the new block's btree header and log it. */
+       xfs_btree_init_block_cur(cur, nbp, cur->bc_nlevels, 2);
+       xfs_btree_log_block(cur, nbp, XFS_BB_ALL_BITS);
+       ASSERT(!xfs_btree_ptr_is_null(cur, &lptr) &&
+                       !xfs_btree_ptr_is_null(cur, &rptr));
+       /* Fill in the key data in the new root. */
+       if (xfs_btree_get_level(left) > 0) {
+               xfs_btree_copy_keys(cur,
+                               xfs_btree_key_addr(cur, 1, new),
+                               xfs_btree_key_addr(cur, 1, left), 1);
+               xfs_btree_copy_keys(cur,
+                               xfs_btree_key_addr(cur, 2, new),
+                               xfs_btree_key_addr(cur, 1, right), 1);
+       } else {
+               cur->bc_ops->init_key_from_rec(
+                               xfs_btree_key_addr(cur, 1, new),
+                               xfs_btree_rec_addr(cur, 1, left));
+               cur->bc_ops->init_key_from_rec(
+                               xfs_btree_key_addr(cur, 2, new),
+                               xfs_btree_rec_addr(cur, 1, right));
+       }
+       xfs_btree_log_keys(cur, nbp, 1, 2);
+       /* Fill in the pointer data in the new root. */
+       xfs_btree_copy_ptrs(cur,
+               xfs_btree_ptr_addr(cur, 1, new), &lptr, 1);
+       xfs_btree_copy_ptrs(cur,
+               xfs_btree_ptr_addr(cur, 2, new), &rptr, 1);
+       xfs_btree_log_ptrs(cur, nbp, 1, 2);
+       /* Fix up the cursor. */
+       xfs_btree_setbuf(cur, cur->bc_nlevels, nbp);
+       cur->bc_ptrs[cur->bc_nlevels] = nptr;
+       cur->bc_nlevels++;
+       XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
+       *stat = 1;
+       return 0;
+ error0:
+       XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR);
+       return error;
+ out0:
+       XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
+       *stat = 0;
+       return 0;
+ }
+ STATIC int
+ xfs_btree_make_block_unfull(
+       struct xfs_btree_cur    *cur,   /* btree cursor */
+       int                     level,  /* btree level */
+       int                     numrecs,/* # of recs in block */
+       int                     *oindex,/* old tree index */
+       int                     *index, /* new tree index */
+       union xfs_btree_ptr     *nptr,  /* new btree ptr */
+       struct xfs_btree_cur    **ncur, /* new btree cursor */
+       union xfs_btree_rec     *nrec,  /* new record */
+       int                     *stat)
+ {
+       union xfs_btree_key     key;    /* new btree key value */
+       int                     error = 0;
+       if ((cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) &&
+           level == cur->bc_nlevels - 1) {
+               struct xfs_inode *ip = cur->bc_private.b.ip;
+               if (numrecs < cur->bc_ops->get_dmaxrecs(cur, level)) {
+                       /* A root block that can be made bigger. */
+                       xfs_iroot_realloc(ip, 1, cur->bc_private.b.whichfork);
+               } else {
+                       /* A root block that needs replacing */
+                       int     logflags = 0;
+                       error = xfs_btree_new_iroot(cur, &logflags, stat);
+                       if (error || *stat == 0)
+                               return error;
+                       xfs_trans_log_inode(cur->bc_tp, ip, logflags);
+               }
+               return 0;
+       }
+       /* First, try shifting an entry to the right neighbor. */
+       error = xfs_btree_rshift(cur, level, stat);
+       if (error || *stat)
+               return error;
+       /* Next, try shifting an entry to the left neighbor. */
+       error = xfs_btree_lshift(cur, level, stat);
+       if (error)
+               return error;
+       if (*stat) {
+               *oindex = *index = cur->bc_ptrs[level];
+               return 0;
+       }
+       /*
+        * Next, try splitting the current block in half.
+        *
+        * If this works we have to re-set our variables because we
+        * could be in a different block now.
+        */
+       error = xfs_btree_split(cur, level, nptr, &key, ncur, stat);
+       if (error || *stat == 0)
+               return error;
+       *index = cur->bc_ptrs[level];
+       cur->bc_ops->init_rec_from_key(&key, nrec);
+       return 0;
+ }
+ /*
+  * Insert one record/level.  Return information to the caller
+  * allowing the next level up to proceed if necessary.
+  */
+ STATIC int
+ xfs_btree_insrec(
+       struct xfs_btree_cur    *cur,   /* btree cursor */
+       int                     level,  /* level to insert record at */
+       union xfs_btree_ptr     *ptrp,  /* i/o: block number inserted */
+       union xfs_btree_rec     *recp,  /* i/o: record data inserted */
+       struct xfs_btree_cur    **curp, /* output: new cursor replacing cur */
+       int                     *stat)  /* success/failure */
+ {
+       struct xfs_btree_block  *block; /* btree block */
+       struct xfs_buf          *bp;    /* buffer for block */
+       union xfs_btree_key     key;    /* btree key */
+       union xfs_btree_ptr     nptr;   /* new block ptr */
+       struct xfs_btree_cur    *ncur;  /* new btree cursor */
+       union xfs_btree_rec     nrec;   /* new record count */
+       int                     optr;   /* old key/record index */
+       int                     ptr;    /* key/record index */
+       int                     numrecs;/* number of records */
+       int                     error;  /* error return value */
+ #ifdef DEBUG
+       int                     i;
+ #endif
+       XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY);
+       XFS_BTREE_TRACE_ARGIPR(cur, level, *ptrp, recp);
+       ncur = NULL;
+       /*
+        * If we have an external root pointer, and we've made it to the
+        * root level, allocate a new root block and we're done.
+        */
+       if (!(cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) &&
+           (level >= cur->bc_nlevels)) {
+               error = xfs_btree_new_root(cur, stat);
+               xfs_btree_set_ptr_null(cur, ptrp);
+               XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
+               return error;
+       }
+       /* If we're off the left edge, return failure. */
+       ptr = cur->bc_ptrs[level];
+       if (ptr == 0) {
+               XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
+               *stat = 0;
+               return 0;
+       }
+       /* Make a key out of the record data to be inserted, and save it. */
+       cur->bc_ops->init_key_from_rec(&key, recp);
+       optr = ptr;
+       XFS_BTREE_STATS_INC(cur, insrec);
+       /* Get pointers to the btree buffer and block. */
+       block = xfs_btree_get_block(cur, level, &bp);
+       numrecs = xfs_btree_get_numrecs(block);
+ #ifdef DEBUG
+       error = xfs_btree_check_block(cur, block, level, bp);
+       if (error)
+               goto error0;
+       /* Check that the new entry is being inserted in the right place. */
+       if (ptr <= numrecs) {
+               if (level == 0) {
+                       ASSERT(cur->bc_ops->recs_inorder(cur, recp,
+                               xfs_btree_rec_addr(cur, ptr, block)));
+               } else {
+                       ASSERT(cur->bc_ops->keys_inorder(cur, &key,
+                               xfs_btree_key_addr(cur, ptr, block)));
+               }
+       }
+ #endif
+       /*
+        * If the block is full, we can't insert the new entry until we
+        * make the block un-full.
+        */
+       xfs_btree_set_ptr_null(cur, &nptr);
+       if (numrecs == cur->bc_ops->get_maxrecs(cur, level)) {
+               error = xfs_btree_make_block_unfull(cur, level, numrecs,
+                                       &optr, &ptr, &nptr, &ncur, &nrec, stat);
+               if (error || *stat == 0)
+                       goto error0;
+       }
+       /*
+        * The current block may have changed if the block was
+        * previously full and we have just made space in it.
+        */
+       block = xfs_btree_get_block(cur, level, &bp);
+       numrecs = xfs_btree_get_numrecs(block);
+ #ifdef DEBUG
+       error = xfs_btree_check_block(cur, block, level, bp);
+       if (error)
+               return error;
+ #endif
+       /*
+        * At this point we know there's room for our new entry in the block
+        * we're pointing at.
+        */
+       XFS_BTREE_STATS_ADD(cur, moves, numrecs - ptr + 1);
+       if (level > 0) {
+               /* It's a nonleaf. make a hole in the keys and ptrs */
+               union xfs_btree_key     *kp;
+               union xfs_btree_ptr     *pp;
+               kp = xfs_btree_key_addr(cur, ptr, block);
+               pp = xfs_btree_ptr_addr(cur, ptr, block);
+ #ifdef DEBUG
+               for (i = numrecs - ptr; i >= 0; i--) {
+                       error = xfs_btree_check_ptr(cur, pp, i, level);
+                       if (error)
+                               return error;
+               }
+ #endif
+               xfs_btree_shift_keys(cur, kp, 1, numrecs - ptr + 1);
+               xfs_btree_shift_ptrs(cur, pp, 1, numrecs - ptr + 1);
+ #ifdef DEBUG
+               error = xfs_btree_check_ptr(cur, ptrp, 0, level);
+               if (error)
+                       goto error0;
+ #endif
+               /* Now put the new data in, bump numrecs and log it. */
+               xfs_btree_copy_keys(cur, kp, &key, 1);
+               xfs_btree_copy_ptrs(cur, pp, ptrp, 1);
+               numrecs++;
+               xfs_btree_set_numrecs(block, numrecs);
+               xfs_btree_log_ptrs(cur, bp, ptr, numrecs);
+               xfs_btree_log_keys(cur, bp, ptr, numrecs);
+ #ifdef DEBUG
+               if (ptr < numrecs) {
+                       ASSERT(cur->bc_ops->keys_inorder(cur, kp,
+                               xfs_btree_key_addr(cur, ptr + 1, block)));
+               }
+ #endif
+       } else {
+               /* It's a leaf. make a hole in the records */
+               union xfs_btree_rec             *rp;
+               rp = xfs_btree_rec_addr(cur, ptr, block);
+               xfs_btree_shift_recs(cur, rp, 1, numrecs - ptr + 1);
+               /* Now put the new data in, bump numrecs and log it. */
+               xfs_btree_copy_recs(cur, rp, recp, 1);
+               xfs_btree_set_numrecs(block, ++numrecs);
+               xfs_btree_log_recs(cur, bp, ptr, numrecs);
+ #ifdef DEBUG
+               if (ptr < numrecs) {
+                       ASSERT(cur->bc_ops->recs_inorder(cur, rp,
+                               xfs_btree_rec_addr(cur, ptr + 1, block)));
+               }
+ #endif
+       }
+       /* Log the new number of records in the btree header. */
+       xfs_btree_log_block(cur, bp, XFS_BB_NUMRECS);
+       /* If we inserted at the start of a block, update the parents' keys. */
+       if (optr == 1) {
+               error = xfs_btree_updkey(cur, &key, level + 1);
+               if (error)
+                       goto error0;
+       }
+       /*
+        * If we are tracking the last record in the tree and
+        * we are at the far right edge of the tree, update it.
+        */
+       if (xfs_btree_is_lastrec(cur, block, level)) {
+               cur->bc_ops->update_lastrec(cur, block, recp,
+                                           ptr, LASTREC_INSREC);
+       }
+       /*
+        * Return the new block number, if any.
+        * If there is one, give back a record value and a cursor too.
+        */
+       *ptrp = nptr;
+       if (!xfs_btree_ptr_is_null(cur, &nptr)) {
+               *recp = nrec;
+               *curp = ncur;
+       }
+       XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
+       *stat = 1;
+       return 0;
+ error0:
+       XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR);
+       return error;
+ }
+ /*
+  * Insert the record at the point referenced by cur.
+  *
+  * A multi-level split of the tree on insert will invalidate the original
+  * cursor.  All callers of this function should assume that the cursor is
+  * no longer valid and revalidate it.
+  */
+ int
+ xfs_btree_insert(
+       struct xfs_btree_cur    *cur,
+       int                     *stat)
+ {
+       int                     error;  /* error return value */
+       int                     i;      /* result value, 0 for failure */
+       int                     level;  /* current level number in btree */
+       union xfs_btree_ptr     nptr;   /* new block number (split result) */
+       struct xfs_btree_cur    *ncur;  /* new cursor (split result) */
+       struct xfs_btree_cur    *pcur;  /* previous level's cursor */
+       union xfs_btree_rec     rec;    /* record to insert */
+       level = 0;
+       ncur = NULL;
+       pcur = cur;
+       xfs_btree_set_ptr_null(cur, &nptr);
+       cur->bc_ops->init_rec_from_cur(cur, &rec);
+       /*
+        * Loop going up the tree, starting at the leaf level.
+        * Stop when we don't get a split block, that must mean that
+        * the insert is finished with this level.
+        */
+       do {
+               /*
+                * Insert nrec/nptr into this level of the tree.
+                * Note if we fail, nptr will be null.
+                */
+               error = xfs_btree_insrec(pcur, level, &nptr, &rec, &ncur, &i);
+               if (error) {
+                       if (pcur != cur)
+                               xfs_btree_del_cursor(pcur, XFS_BTREE_ERROR);
+                       goto error0;
+               }
+               XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
+               level++;
+               /*
+                * See if the cursor we just used is trash.
+                * Can't trash the caller's cursor, but otherwise we should
+                * if ncur is a new cursor or we're about to be done.
+                */
+               if (pcur != cur &&
+                   (ncur || xfs_btree_ptr_is_null(cur, &nptr))) {
+                       /* Save the state from the cursor before we trash it */
+                       if (cur->bc_ops->update_cursor)
+                               cur->bc_ops->update_cursor(pcur, cur);
+                       cur->bc_nlevels = pcur->bc_nlevels;
+                       xfs_btree_del_cursor(pcur, XFS_BTREE_NOERROR);
+               }
+               /* If we got a new cursor, switch to it. */
+               if (ncur) {
+                       pcur = ncur;
+                       ncur = NULL;
+               }
+       } while (!xfs_btree_ptr_is_null(cur, &nptr));
+       XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
+       *stat = i;
+       return 0;
+ error0:
+       XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR);
+       return error;
+ }
+ /*
+  * Try to merge a non-leaf block back into the inode root.
+  *
+  * Note: the killroot names comes from the fact that we're effectively
+  * killing the old root block.  But because we can't just delete the
+  * inode we have to copy the single block it was pointing to into the
+  * inode.
+  */
+ STATIC int
+ xfs_btree_kill_iroot(
+       struct xfs_btree_cur    *cur)
+ {
+       int                     whichfork = cur->bc_private.b.whichfork;
+       struct xfs_inode        *ip = cur->bc_private.b.ip;
+       struct xfs_ifork        *ifp = XFS_IFORK_PTR(ip, whichfork);
+       struct xfs_btree_block  *block;
+       struct xfs_btree_block  *cblock;
+       union xfs_btree_key     *kp;
+       union xfs_btree_key     *ckp;
+       union xfs_btree_ptr     *pp;
+       union xfs_btree_ptr     *cpp;
+       struct xfs_buf          *cbp;
+       int                     level;
+       int                     index;
+       int                     numrecs;
+ #ifdef DEBUG
+       union xfs_btree_ptr     ptr;
+       int                     i;
+ #endif
+       XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY);
+       ASSERT(cur->bc_flags & XFS_BTREE_ROOT_IN_INODE);
+       ASSERT(cur->bc_nlevels > 1);
+       /*
+        * Don't deal with the root block needs to be a leaf case.
+        * We're just going to turn the thing back into extents anyway.
+        */
+       level = cur->bc_nlevels - 1;
+       if (level == 1)
+               goto out0;
+       /*
+        * Give up if the root has multiple children.
+        */
+       block = xfs_btree_get_iroot(cur);
+       if (xfs_btree_get_numrecs(block) != 1)
+               goto out0;
+       cblock = xfs_btree_get_block(cur, level - 1, &cbp);
+       numrecs = xfs_btree_get_numrecs(cblock);
+       /*
+        * Only do this if the next level will fit.
+        * Then the data must be copied up to the inode,
+        * instead of freeing the root you free the next level.
+        */
+       if (numrecs > cur->bc_ops->get_dmaxrecs(cur, level))
+               goto out0;
+       XFS_BTREE_STATS_INC(cur, killroot);
+ #ifdef DEBUG
+       xfs_btree_get_sibling(cur, block, &ptr, XFS_BB_LEFTSIB);
+       ASSERT(xfs_btree_ptr_is_null(cur, &ptr));
+       xfs_btree_get_sibling(cur, block, &ptr, XFS_BB_RIGHTSIB);
+       ASSERT(xfs_btree_ptr_is_null(cur, &ptr));
+ #endif
+       index = numrecs - cur->bc_ops->get_maxrecs(cur, level);
+       if (index) {
+               xfs_iroot_realloc(cur->bc_private.b.ip, index,
+                                 cur->bc_private.b.whichfork);
+               block = ifp->if_broot;
+       }
+       be16_add_cpu(&block->bb_numrecs, index);
+       ASSERT(block->bb_numrecs == cblock->bb_numrecs);
+       kp = xfs_btree_key_addr(cur, 1, block);
+       ckp = xfs_btree_key_addr(cur, 1, cblock);
+       xfs_btree_copy_keys(cur, kp, ckp, numrecs);
+       pp = xfs_btree_ptr_addr(cur, 1, block);
+       cpp = xfs_btree_ptr_addr(cur, 1, cblock);
+ #ifdef DEBUG
+       for (i = 0; i < numrecs; i++) {
+               int             error;
+               error = xfs_btree_check_ptr(cur, cpp, i, level - 1);
+               if (error) {
+                       XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR);
+                       return error;
+               }
+       }
+ #endif
+       xfs_btree_copy_ptrs(cur, pp, cpp, numrecs);
+       cur->bc_ops->free_block(cur, cbp);
+       XFS_BTREE_STATS_INC(cur, free);
+       cur->bc_bufs[level - 1] = NULL;
+       be16_add_cpu(&block->bb_level, -1);
+       xfs_trans_log_inode(cur->bc_tp, ip,
+               XFS_ILOG_CORE | xfs_ilog_fbroot(cur->bc_private.b.whichfork));
+       cur->bc_nlevels--;
+ out0:
+       XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
+       return 0;
+ }
+ /*
+  * Kill the current root node, and replace it with it's only child node.
+  */
+ STATIC int
+ xfs_btree_kill_root(
+       struct xfs_btree_cur    *cur,
+       struct xfs_buf          *bp,
+       int                     level,
+       union xfs_btree_ptr     *newroot)
+ {
+       int                     error;
+       XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY);
+       XFS_BTREE_STATS_INC(cur, killroot);
+       /*
+        * Update the root pointer, decreasing the level by 1 and then
+        * free the old root.
+        */
+       cur->bc_ops->set_root(cur, newroot, -1);
+       error = cur->bc_ops->free_block(cur, bp);
+       if (error) {
+               XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR);
+               return error;
+       }
+       XFS_BTREE_STATS_INC(cur, free);
+       cur->bc_bufs[level] = NULL;
+       cur->bc_ra[level] = 0;
+       cur->bc_nlevels--;
+       XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
+       return 0;
+ }
+ STATIC int
+ xfs_btree_dec_cursor(
+       struct xfs_btree_cur    *cur,
+       int                     level,
+       int                     *stat)
+ {
+       int                     error;
+       int                     i;
+       if (level > 0) {
+               error = xfs_btree_decrement(cur, level, &i);
+               if (error)
+                       return error;
+       }
+       XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
+       *stat = 1;
+       return 0;
+ }
+ /*
+  * Single level of the btree record deletion routine.
+  * Delete record pointed to by cur/level.
+  * Remove the record from its block then rebalance the tree.
+  * Return 0 for error, 1 for done, 2 to go on to the next level.
+  */
+ STATIC int                                    /* error */
+ xfs_btree_delrec(
+       struct xfs_btree_cur    *cur,           /* btree cursor */
+       int                     level,          /* level removing record from */
+       int                     *stat)          /* fail/done/go-on */
+ {
+       struct xfs_btree_block  *block;         /* btree block */
+       union xfs_btree_ptr     cptr;           /* current block ptr */
+       struct xfs_buf          *bp;            /* buffer for block */
+       int                     error;          /* error return value */
+       int                     i;              /* loop counter */
+       union xfs_btree_key     key;            /* storage for keyp */
+       union xfs_btree_key     *keyp = &key;   /* passed to the next level */
+       union xfs_btree_ptr     lptr;           /* left sibling block ptr */
+       struct xfs_buf          *lbp;           /* left buffer pointer */
+       struct xfs_btree_block  *left;          /* left btree block */
+       int                     lrecs = 0;      /* left record count */
+       int                     ptr;            /* key/record index */
+       union xfs_btree_ptr     rptr;           /* right sibling block ptr */
+       struct xfs_buf          *rbp;           /* right buffer pointer */
+       struct xfs_btree_block  *right;         /* right btree block */
+       struct xfs_btree_block  *rrblock;       /* right-right btree block */
+       struct xfs_buf          *rrbp;          /* right-right buffer pointer */
+       int                     rrecs = 0;      /* right record count */
+       struct xfs_btree_cur    *tcur;          /* temporary btree cursor */
+       int                     numrecs;        /* temporary numrec count */
+       XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY);
+       XFS_BTREE_TRACE_ARGI(cur, level);
+       tcur = NULL;
+       /* Get the index of the entry being deleted, check for nothing there. */
+       ptr = cur->bc_ptrs[level];
+       if (ptr == 0) {
+               XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
+               *stat = 0;
+               return 0;
+       }
+       /* Get the buffer & block containing the record or key/ptr. */
+       block = xfs_btree_get_block(cur, level, &bp);
+       numrecs = xfs_btree_get_numrecs(block);
+ #ifdef DEBUG
+       error = xfs_btree_check_block(cur, block, level, bp);
+       if (error)
+               goto error0;
+ #endif
+       /* Fail if we're off the end of the block. */
+       if (ptr > numrecs) {
+               XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
+               *stat = 0;
+               return 0;
+       }
+       XFS_BTREE_STATS_INC(cur, delrec);
+       XFS_BTREE_STATS_ADD(cur, moves, numrecs - ptr);
+       /* Excise the entries being deleted. */
+       if (level > 0) {
+               /* It's a nonleaf. operate on keys and ptrs */
+               union xfs_btree_key     *lkp;
+               union xfs_btree_ptr     *lpp;
+               lkp = xfs_btree_key_addr(cur, ptr + 1, block);
+               lpp = xfs_btree_ptr_addr(cur, ptr + 1, block);
+ #ifdef DEBUG
+               for (i = 0; i < numrecs - ptr; i++) {
+                       error = xfs_btree_check_ptr(cur, lpp, i, level);
+                       if (error)
+                               goto error0;
+               }
+ #endif
+               if (ptr < numrecs) {
+                       xfs_btree_shift_keys(cur, lkp, -1, numrecs - ptr);
+                       xfs_btree_shift_ptrs(cur, lpp, -1, numrecs - ptr);
+                       xfs_btree_log_keys(cur, bp, ptr, numrecs - 1);
+                       xfs_btree_log_ptrs(cur, bp, ptr, numrecs - 1);
+               }
+               /*
+                * If it's the first record in the block, we'll need to pass a
+                * key up to the next level (updkey).
+                */
+               if (ptr == 1)
+                       keyp = xfs_btree_key_addr(cur, 1, block);
+       } else {
+               /* It's a leaf. operate on records */
+               if (ptr < numrecs) {
+                       xfs_btree_shift_recs(cur,
+                               xfs_btree_rec_addr(cur, ptr + 1, block),
+                               -1, numrecs - ptr);
+                       xfs_btree_log_recs(cur, bp, ptr, numrecs - 1);
+               }
+               /*
+                * If it's the first record in the block, we'll need a key
+                * structure to pass up to the next level (updkey).
+                */
+               if (ptr == 1) {
+                       cur->bc_ops->init_key_from_rec(&key,
+                                       xfs_btree_rec_addr(cur, 1, block));
+                       keyp = &key;
+               }
+       }
+       /*
+        * Decrement and log the number of entries in the block.
+        */
+       xfs_btree_set_numrecs(block, --numrecs);
+       xfs_btree_log_block(cur, bp, XFS_BB_NUMRECS);
+       /*
+        * If we are tracking the last record in the tree and
+        * we are at the far right edge of the tree, update it.
+        */
+       if (xfs_btree_is_lastrec(cur, block, level)) {
+               cur->bc_ops->update_lastrec(cur, block, NULL,
+                                           ptr, LASTREC_DELREC);
+       }
+       /*
+        * We're at the root level.  First, shrink the root block in-memory.
+        * Try to get rid of the next level down.  If we can't then there's
+        * nothing left to do.
+        */
+       if (level == cur->bc_nlevels - 1) {
+               if (cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) {
+                       xfs_iroot_realloc(cur->bc_private.b.ip, -1,
+                                         cur->bc_private.b.whichfork);
+                       error = xfs_btree_kill_iroot(cur);
+                       if (error)
+                               goto error0;
+                       error = xfs_btree_dec_cursor(cur, level, stat);
+                       if (error)
+                               goto error0;
+                       *stat = 1;
+                       return 0;
+               }
+               /*
+                * If this is the root level, and there's only one entry left,
+                * and it's NOT the leaf level, then we can get rid of this
+                * level.
+                */
+               if (numrecs == 1 && level > 0) {
+                       union xfs_btree_ptr     *pp;
+                       /*
+                        * pp is still set to the first pointer in the block.
+                        * Make it the new root of the btree.
+                        */
+                       pp = xfs_btree_ptr_addr(cur, 1, block);
+                       error = xfs_btree_kill_root(cur, bp, level, pp);
+                       if (error)
+                               goto error0;
+               } else if (level > 0) {
+                       error = xfs_btree_dec_cursor(cur, level, stat);
+                       if (error)
+                               goto error0;
+               }
+               *stat = 1;
+               return 0;
+       }
+       /*
+        * If we deleted the leftmost entry in the block, update the
+        * key values above us in the tree.
+        */
+       if (ptr == 1) {
+               error = xfs_btree_updkey(cur, keyp, level + 1);
+               if (error)
+                       goto error0;
+       }
+       /*
+        * If the number of records remaining in the block is at least
+        * the minimum, we're done.
+        */
+       if (numrecs >= cur->bc_ops->get_minrecs(cur, level)) {
+               error = xfs_btree_dec_cursor(cur, level, stat);
+               if (error)
+                       goto error0;
+               return 0;
+       }
+       /*
+        * Otherwise, we have to move some records around to keep the
+        * tree balanced.  Look at the left and right sibling blocks to
+        * see if we can re-balance by moving only one record.
+        */
+       xfs_btree_get_sibling(cur, block, &rptr, XFS_BB_RIGHTSIB);
+       xfs_btree_get_sibling(cur, block, &lptr, XFS_BB_LEFTSIB);
+       if (cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) {
+               /*
+                * One child of root, need to get a chance to copy its contents
+                * into the root and delete it. Can't go up to next level,
+                * there's nothing to delete there.
+                */
+               if (xfs_btree_ptr_is_null(cur, &rptr) &&
+                   xfs_btree_ptr_is_null(cur, &lptr) &&
+                   level == cur->bc_nlevels - 2) {
+                       error = xfs_btree_kill_iroot(cur);
+                       if (!error)
+                               error = xfs_btree_dec_cursor(cur, level, stat);
+                       if (error)
+                               goto error0;
+                       return 0;
+               }
+       }
+       ASSERT(!xfs_btree_ptr_is_null(cur, &rptr) ||
+              !xfs_btree_ptr_is_null(cur, &lptr));
+       /*
+        * Duplicate the cursor so our btree manipulations here won't
+        * disrupt the next level up.
+        */
+       error = xfs_btree_dup_cursor(cur, &tcur);
+       if (error)
+               goto error0;
+       /*
+        * If there's a right sibling, see if it's ok to shift an entry
+        * out of it.
+        */
+       if (!xfs_btree_ptr_is_null(cur, &rptr)) {
+               /*
+                * Move the temp cursor to the last entry in the next block.
+                * Actually any entry but the first would suffice.
+                */
+               i = xfs_btree_lastrec(tcur, level);
+               XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
+               error = xfs_btree_increment(tcur, level, &i);
+               if (error)
+                       goto error0;
+               XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
+               i = xfs_btree_lastrec(tcur, level);
+               XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
+               /* Grab a pointer to the block. */
+               right = xfs_btree_get_block(tcur, level, &rbp);
+ #ifdef DEBUG
+               error = xfs_btree_check_block(tcur, right, level, rbp);
+               if (error)
+                       goto error0;
+ #endif
+               /* Grab the current block number, for future use. */
+               xfs_btree_get_sibling(tcur, right, &cptr, XFS_BB_LEFTSIB);
+               /*
+                * If right block is full enough so that removing one entry
+                * won't make it too empty, and left-shifting an entry out
+                * of right to us works, we're done.
+                */
+               if (xfs_btree_get_numrecs(right) - 1 >=
+                   cur->bc_ops->get_minrecs(tcur, level)) {
+                       error = xfs_btree_lshift(tcur, level, &i);
+                       if (error)
+                               goto error0;
+                       if (i) {
+                               ASSERT(xfs_btree_get_numrecs(block) >=
+                                      cur->bc_ops->get_minrecs(tcur, level));
+                               xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR);
+                               tcur = NULL;
+                               error = xfs_btree_dec_cursor(cur, level, stat);
+                               if (error)
+                                       goto error0;
+                               return 0;
+                       }
+               }
+               /*
+                * Otherwise, grab the number of records in right for
+                * future reference, and fix up the temp cursor to point
+                * to our block again (last record).
+                */
+               rrecs = xfs_btree_get_numrecs(right);
+               if (!xfs_btree_ptr_is_null(cur, &lptr)) {
+                       i = xfs_btree_firstrec(tcur, level);
+                       XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
+                       error = xfs_btree_decrement(tcur, level, &i);
+                       if (error)
+                               goto error0;
+                       XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
+               }
+       }
+       /*
+        * If there's a left sibling, see if it's ok to shift an entry
+        * out of it.
+        */
+       if (!xfs_btree_ptr_is_null(cur, &lptr)) {
+               /*
+                * Move the temp cursor to the first entry in the
+                * previous block.
+                */
+               i = xfs_btree_firstrec(tcur, level);
+               XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
+               error = xfs_btree_decrement(tcur, level, &i);
+               if (error)
+                       goto error0;
+               i = xfs_btree_firstrec(tcur, level);
+               XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
+               /* Grab a pointer to the block. */
+               left = xfs_btree_get_block(tcur, level, &lbp);
+ #ifdef DEBUG
+               error = xfs_btree_check_block(cur, left, level, lbp);
+               if (error)
+                       goto error0;
+ #endif
+               /* Grab the current block number, for future use. */
+               xfs_btree_get_sibling(tcur, left, &cptr, XFS_BB_RIGHTSIB);
+               /*
+                * If left block is full enough so that removing one entry
+                * won't make it too empty, and right-shifting an entry out
+                * of left to us works, we're done.
+                */
+               if (xfs_btree_get_numrecs(left) - 1 >=
+                   cur->bc_ops->get_minrecs(tcur, level)) {
+                       error = xfs_btree_rshift(tcur, level, &i);
+                       if (error)
+                               goto error0;
+                       if (i) {
+                               ASSERT(xfs_btree_get_numrecs(block) >=
+                                      cur->bc_ops->get_minrecs(tcur, level));
+                               xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR);
+                               tcur = NULL;
+                               if (level == 0)
+                                       cur->bc_ptrs[0]++;
+                               XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
+                               *stat = 1;
+                               return 0;
+                       }
+               }
+               /*
+                * Otherwise, grab the number of records in right for
+                * future reference.
+                */
+               lrecs = xfs_btree_get_numrecs(left);
+       }
+       /* Delete the temp cursor, we're done with it. */
+       xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR);
+       tcur = NULL;
+       /* If here, we need to do a join to keep the tree balanced. */
+       ASSERT(!xfs_btree_ptr_is_null(cur, &cptr));
+       if (!xfs_btree_ptr_is_null(cur, &lptr) &&
+           lrecs + xfs_btree_get_numrecs(block) <=
+                       cur->bc_ops->get_maxrecs(cur, level)) {
+               /*
+                * Set "right" to be the starting block,
+                * "left" to be the left neighbor.
+                */
+               rptr = cptr;
+               right = block;
+               rbp = bp;
+               error = xfs_btree_read_buf_block(cur, &lptr, 0, &left, &lbp);
+               if (error)
+                       goto error0;
+       /*
+        * If that won't work, see if we can join with the right neighbor block.
+        */
+       } else if (!xfs_btree_ptr_is_null(cur, &rptr) &&
+                  rrecs + xfs_btree_get_numrecs(block) <=
+                       cur->bc_ops->get_maxrecs(cur, level)) {
+               /*
+                * Set "left" to be the starting block,
+                * "right" to be the right neighbor.
+                */
+               lptr = cptr;
+               left = block;
+               lbp = bp;
+               error = xfs_btree_read_buf_block(cur, &rptr, 0, &right, &rbp);
+               if (error)
+                       goto error0;
+       /*
+        * Otherwise, we can't fix the imbalance.
+        * Just return.  This is probably a logic error, but it's not fatal.
+        */
+       } else {
+               error = xfs_btree_dec_cursor(cur, level, stat);
+               if (error)
+                       goto error0;
+               return 0;
+       }
+       rrecs = xfs_btree_get_numrecs(right);
+       lrecs = xfs_btree_get_numrecs(left);
+       /*
+        * We're now going to join "left" and "right" by moving all the stuff
+        * in "right" to "left" and deleting "right".
+        */
+       XFS_BTREE_STATS_ADD(cur, moves, rrecs);
+       if (level > 0) {
+               /* It's a non-leaf.  Move keys and pointers. */
+               union xfs_btree_key     *lkp;   /* left btree key */
+               union xfs_btree_ptr     *lpp;   /* left address pointer */
+               union xfs_btree_key     *rkp;   /* right btree key */
+               union xfs_btree_ptr     *rpp;   /* right address pointer */
+               lkp = xfs_btree_key_addr(cur, lrecs + 1, left);
+               lpp = xfs_btree_ptr_addr(cur, lrecs + 1, left);
+               rkp = xfs_btree_key_addr(cur, 1, right);
+               rpp = xfs_btree_ptr_addr(cur, 1, right);
+ #ifdef DEBUG
+               for (i = 1; i < rrecs; i++) {
+                       error = xfs_btree_check_ptr(cur, rpp, i, level);
+                       if (error)
+                               goto error0;
+               }
+ #endif
+               xfs_btree_copy_keys(cur, lkp, rkp, rrecs);
+               xfs_btree_copy_ptrs(cur, lpp, rpp, rrecs);
+               xfs_btree_log_keys(cur, lbp, lrecs + 1, lrecs + rrecs);
+               xfs_btree_log_ptrs(cur, lbp, lrecs + 1, lrecs + rrecs);
+       } else {
+               /* It's a leaf.  Move records.  */
+               union xfs_btree_rec     *lrp;   /* left record pointer */
+               union xfs_btree_rec     *rrp;   /* right record pointer */
+               lrp = xfs_btree_rec_addr(cur, lrecs + 1, left);
+               rrp = xfs_btree_rec_addr(cur, 1, right);
+               xfs_btree_copy_recs(cur, lrp, rrp, rrecs);
+               xfs_btree_log_recs(cur, lbp, lrecs + 1, lrecs + rrecs);
+       }
+       XFS_BTREE_STATS_INC(cur, join);
+       /*
+        * Fix up the number of records and right block pointer in the
+        * surviving block, and log it.
+        */
+       xfs_btree_set_numrecs(left, lrecs + rrecs);
+       xfs_btree_get_sibling(cur, right, &cptr, XFS_BB_RIGHTSIB),
+       xfs_btree_set_sibling(cur, left, &cptr, XFS_BB_RIGHTSIB);
+       xfs_btree_log_block(cur, lbp, XFS_BB_NUMRECS | XFS_BB_RIGHTSIB);
+       /* If there is a right sibling, point it to the remaining block. */
+       xfs_btree_get_sibling(cur, left, &cptr, XFS_BB_RIGHTSIB);
+       if (!xfs_btree_ptr_is_null(cur, &cptr)) {
+               error = xfs_btree_read_buf_block(cur, &cptr, 0, &rrblock, &rrbp);
+               if (error)
+                       goto error0;
+               xfs_btree_set_sibling(cur, rrblock, &lptr, XFS_BB_LEFTSIB);
+               xfs_btree_log_block(cur, rrbp, XFS_BB_LEFTSIB);
+       }
+       /* Free the deleted block. */
+       error = cur->bc_ops->free_block(cur, rbp);
+       if (error)
+               goto error0;
+       XFS_BTREE_STATS_INC(cur, free);
+       /*
+        * If we joined with the left neighbor, set the buffer in the
+        * cursor to the left block, and fix up the index.
+        */
+       if (bp != lbp) {
+               cur->bc_bufs[level] = lbp;
+               cur->bc_ptrs[level] += lrecs;
+               cur->bc_ra[level] = 0;
+       }
+       /*
+        * If we joined with the right neighbor and there's a level above
+        * us, increment the cursor at that level.
+        */
+       else if ((cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) ||
+                  (level + 1 < cur->bc_nlevels)) {
+               error = xfs_btree_increment(cur, level + 1, &i);
+               if (error)
+                       goto error0;
+       }
+       /*
+        * Readjust the ptr at this level if it's not a leaf, since it's
+        * still pointing at the deletion point, which makes the cursor
+        * inconsistent.  If this makes the ptr 0, the caller fixes it up.
+        * We can't use decrement because it would change the next level up.
+        */
+       if (level > 0)
+               cur->bc_ptrs[level]--;
+       XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
+       /* Return value means the next level up has something to do. */
+       *stat = 2;
+       return 0;
+ error0:
+       XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR);
+       if (tcur)
+               xfs_btree_del_cursor(tcur, XFS_BTREE_ERROR);
+       return error;
+ }
+ /*
+  * Delete the record pointed to by cur.
+  * The cursor refers to the place where the record was (could be inserted)
+  * when the operation returns.
+  */
+ int                                   /* error */
+ xfs_btree_delete(
+       struct xfs_btree_cur    *cur,
+       int                     *stat)  /* success/failure */
+ {
+       int                     error;  /* error return value */
+       int                     level;
+       int                     i;
+       XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY);
+       /*
+        * Go up the tree, starting at leaf level.
+        *
+        * If 2 is returned then a join was done; go to the next level.
+        * Otherwise we are done.
+        */
+       for (level = 0, i = 2; i == 2; level++) {
+               error = xfs_btree_delrec(cur, level, &i);
+               if (error)
+                       goto error0;
+       }
+       if (i == 0) {
+               for (level = 1; level < cur->bc_nlevels; level++) {
+                       if (cur->bc_ptrs[level] == 0) {
+                               error = xfs_btree_decrement(cur, level, &i);
+                               if (error)
+                                       goto error0;
+                               break;
+                       }
+               }
+       }
+       XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
+       *stat = i;
+       return 0;
+ error0:
+       XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR);
+       return error;
+ }
+ /*
+  * Get the data from the pointed-to record.
+  */
+ int                                   /* error */
+ xfs_btree_get_rec(
+       struct xfs_btree_cur    *cur,   /* btree cursor */
+       union xfs_btree_rec     **recp, /* output: btree record */
+       int                     *stat)  /* output: success/failure */
+ {
+       struct xfs_btree_block  *block; /* btree block */
+       struct xfs_buf          *bp;    /* buffer pointer */
+       int                     ptr;    /* record number */
+ #ifdef DEBUG
+       int                     error;  /* error return value */
+ #endif
+       ptr = cur->bc_ptrs[0];
+       block = xfs_btree_get_block(cur, 0, &bp);
+ #ifdef DEBUG
+       error = xfs_btree_check_block(cur, block, 0, bp);
+       if (error)
+               return error;
+ #endif
+       /*
+        * Off the right end or left end, return failure.
+        */
+       if (ptr > xfs_btree_get_numrecs(block) || ptr <= 0) {
+               *stat = 0;
+               return 0;
+       }
+       /*
+        * Point to the record and extract its data.
+        */
+       *recp = xfs_btree_rec_addr(cur, ptr, block);
+       *stat = 1;
+       return 0;
+ }
+ /*
+  * Change the owner of a btree.
+  *
+  * The mechanism we use here is ordered buffer logging. Because we don't know
+  * how many buffers were are going to need to modify, we don't really want to
+  * have to make transaction reservations for the worst case of every buffer in a
+  * full size btree as that may be more space that we can fit in the log....
+  *
+  * We do the btree walk in the most optimal manner possible - we have sibling
+  * pointers so we can just walk all the blocks on each level from left to right
+  * in a single pass, and then move to the next level and do the same. We can
+  * also do readahead on the sibling pointers to get IO moving more quickly,
+  * though for slow disks this is unlikely to make much difference to performance
+  * as the amount of CPU work we have to do before moving to the next block is
+  * relatively small.
+  *
+  * For each btree block that we load, modify the owner appropriately, set the
+  * buffer as an ordered buffer and log it appropriately. We need to ensure that
+  * we mark the region we change dirty so that if the buffer is relogged in
+  * a subsequent transaction the changes we make here as an ordered buffer are
+  * correctly relogged in that transaction.  If we are in recovery context, then
+  * just queue the modified buffer as delayed write buffer so the transaction
+  * recovery completion writes the changes to disk.
+  */
+ static int
+ xfs_btree_block_change_owner(
+       struct xfs_btree_cur    *cur,
+       int                     level,
+       __uint64_t              new_owner,
+       struct list_head        *buffer_list)
+ {
+       struct xfs_btree_block  *block;
+       struct xfs_buf          *bp;
+       union xfs_btree_ptr     rptr;
+       /* do right sibling readahead */
+       xfs_btree_readahead(cur, level, XFS_BTCUR_RIGHTRA);
+       /* modify the owner */
+       block = xfs_btree_get_block(cur, level, &bp);
+       if (cur->bc_flags & XFS_BTREE_LONG_PTRS)
+               block->bb_u.l.bb_owner = cpu_to_be64(new_owner);
+       else
+               block->bb_u.s.bb_owner = cpu_to_be32(new_owner);
+       /*
+        * If the block is a root block hosted in an inode, we might not have a
+        * buffer pointer here and we shouldn't attempt to log the change as the
+        * information is already held in the inode and discarded when the root
+        * block is formatted into the on-disk inode fork. We still change it,
+        * though, so everything is consistent in memory.
+        */
+       if (bp) {
+               if (cur->bc_tp) {
+                       xfs_trans_ordered_buf(cur->bc_tp, bp);
+                       xfs_btree_log_block(cur, bp, XFS_BB_OWNER);
+               } else {
+                       xfs_buf_delwri_queue(bp, buffer_list);
+               }
+       } else {
+               ASSERT(cur->bc_flags & XFS_BTREE_ROOT_IN_INODE);
+               ASSERT(level == cur->bc_nlevels - 1);
+       }
+       /* now read rh sibling block for next iteration */
+       xfs_btree_get_sibling(cur, block, &rptr, XFS_BB_RIGHTSIB);
+       if (xfs_btree_ptr_is_null(cur, &rptr))
+               return -ENOENT;
+       return xfs_btree_lookup_get_block(cur, level, &rptr, &block);
+ }
+ int
+ xfs_btree_change_owner(
+       struct xfs_btree_cur    *cur,
+       __uint64_t              new_owner,
+       struct list_head        *buffer_list)
+ {
+       union xfs_btree_ptr     lptr;
+       int                     level;
+       struct xfs_btree_block  *block = NULL;
+       int                     error = 0;
+       cur->bc_ops->init_ptr_from_cur(cur, &lptr);
+       /* for each level */
+       for (level = cur->bc_nlevels - 1; level >= 0; level--) {
+               /* grab the left hand block */
+               error = xfs_btree_lookup_get_block(cur, level, &lptr, &block);
+               if (error)
+                       return error;
+               /* readahead the left most block for the next level down */
+               if (level > 0) {
+                       union xfs_btree_ptr     *ptr;
+                       ptr = xfs_btree_ptr_addr(cur, 1, block);
+                       xfs_btree_readahead_ptr(cur, ptr, 1);
+                       /* save for the next iteration of the loop */
+                       lptr = *ptr;
+               }
+               /* for each buffer in the level */
+               do {
+                       error = xfs_btree_block_change_owner(cur, level,
+                                                            new_owner,
+                                                            buffer_list);
+               } while (!error);
+               if (error != -ENOENT)
+                       return error;
+       }
+       return 0;
+ }
index 0000000000000000000000000000000000000000,f5ca0286a0afc3ebaf7451d7bd0f135af18ef4c5..6e93b5ef0a6bc4aa138a2bfe5dca216bdb646321
mode 000000,100644..100644
--- /dev/null
@@@ -1,0 -1,819 +1,836 @@@
 -       * GQUOTINO and PQUOTINO cannot be used together in versions
 -       * of superblock that do not have pquotino. from->sb_flags
 -       * tells us which quota is active and should be copied to
 -       * disk.
+ /*
+  * Copyright (c) 2000-2005 Silicon Graphics, Inc.
+  * All Rights Reserved.
+  *
+  * This program is free software; you can redistribute it and/or
+  * modify it under the terms of the GNU General Public License as
+  * published by the Free Software Foundation.
+  *
+  * This program is distributed in the hope that it would be useful,
+  * but WITHOUT ANY WARRANTY; without even the implied warranty of
+  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  * GNU General Public License for more details.
+  *
+  * You should have received a copy of the GNU General Public License
+  * along with this program; if not, write the Free Software Foundation,
+  * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+  */
+ #include "xfs.h"
+ #include "xfs_fs.h"
+ #include "xfs_shared.h"
+ #include "xfs_format.h"
+ #include "xfs_log_format.h"
+ #include "xfs_trans_resv.h"
+ #include "xfs_bit.h"
+ #include "xfs_sb.h"
+ #include "xfs_ag.h"
+ #include "xfs_mount.h"
+ #include "xfs_inode.h"
+ #include "xfs_ialloc.h"
+ #include "xfs_alloc.h"
+ #include "xfs_error.h"
+ #include "xfs_trace.h"
+ #include "xfs_cksum.h"
+ #include "xfs_trans.h"
+ #include "xfs_buf_item.h"
+ #include "xfs_dinode.h"
+ #include "xfs_bmap_btree.h"
+ #include "xfs_alloc_btree.h"
+ #include "xfs_ialloc_btree.h"
+ /*
+  * Physical superblock buffer manipulations. Shared with libxfs in userspace.
+  */
+ static const struct {
+       short offset;
+       short type;     /* 0 = integer
+                        * 1 = binary / string (no translation)
+                        */
+ } xfs_sb_info[] = {
+       { offsetof(xfs_sb_t, sb_magicnum),      0 },
+       { offsetof(xfs_sb_t, sb_blocksize),     0 },
+       { offsetof(xfs_sb_t, sb_dblocks),       0 },
+       { offsetof(xfs_sb_t, sb_rblocks),       0 },
+       { offsetof(xfs_sb_t, sb_rextents),      0 },
+       { offsetof(xfs_sb_t, sb_uuid),          1 },
+       { offsetof(xfs_sb_t, sb_logstart),      0 },
+       { offsetof(xfs_sb_t, sb_rootino),       0 },
+       { offsetof(xfs_sb_t, sb_rbmino),        0 },
+       { offsetof(xfs_sb_t, sb_rsumino),       0 },
+       { offsetof(xfs_sb_t, sb_rextsize),      0 },
+       { offsetof(xfs_sb_t, sb_agblocks),      0 },
+       { offsetof(xfs_sb_t, sb_agcount),       0 },
+       { offsetof(xfs_sb_t, sb_rbmblocks),     0 },
+       { offsetof(xfs_sb_t, sb_logblocks),     0 },
+       { offsetof(xfs_sb_t, sb_versionnum),    0 },
+       { offsetof(xfs_sb_t, sb_sectsize),      0 },
+       { offsetof(xfs_sb_t, sb_inodesize),     0 },
+       { offsetof(xfs_sb_t, sb_inopblock),     0 },
+       { offsetof(xfs_sb_t, sb_fname[0]),      1 },
+       { offsetof(xfs_sb_t, sb_blocklog),      0 },
+       { offsetof(xfs_sb_t, sb_sectlog),       0 },
+       { offsetof(xfs_sb_t, sb_inodelog),      0 },
+       { offsetof(xfs_sb_t, sb_inopblog),      0 },
+       { offsetof(xfs_sb_t, sb_agblklog),      0 },
+       { offsetof(xfs_sb_t, sb_rextslog),      0 },
+       { offsetof(xfs_sb_t, sb_inprogress),    0 },
+       { offsetof(xfs_sb_t, sb_imax_pct),      0 },
+       { offsetof(xfs_sb_t, sb_icount),        0 },
+       { offsetof(xfs_sb_t, sb_ifree),         0 },
+       { offsetof(xfs_sb_t, sb_fdblocks),      0 },
+       { offsetof(xfs_sb_t, sb_frextents),     0 },
+       { offsetof(xfs_sb_t, sb_uquotino),      0 },
+       { offsetof(xfs_sb_t, sb_gquotino),      0 },
+       { offsetof(xfs_sb_t, sb_qflags),        0 },
+       { offsetof(xfs_sb_t, sb_flags),         0 },
+       { offsetof(xfs_sb_t, sb_shared_vn),     0 },
+       { offsetof(xfs_sb_t, sb_inoalignmt),    0 },
+       { offsetof(xfs_sb_t, sb_unit),          0 },
+       { offsetof(xfs_sb_t, sb_width),         0 },
+       { offsetof(xfs_sb_t, sb_dirblklog),     0 },
+       { offsetof(xfs_sb_t, sb_logsectlog),    0 },
+       { offsetof(xfs_sb_t, sb_logsectsize),   0 },
+       { offsetof(xfs_sb_t, sb_logsunit),      0 },
+       { offsetof(xfs_sb_t, sb_features2),     0 },
+       { offsetof(xfs_sb_t, sb_bad_features2), 0 },
+       { offsetof(xfs_sb_t, sb_features_compat),       0 },
+       { offsetof(xfs_sb_t, sb_features_ro_compat),    0 },
+       { offsetof(xfs_sb_t, sb_features_incompat),     0 },
+       { offsetof(xfs_sb_t, sb_features_log_incompat), 0 },
+       { offsetof(xfs_sb_t, sb_crc),           0 },
+       { offsetof(xfs_sb_t, sb_pad),           0 },
+       { offsetof(xfs_sb_t, sb_pquotino),      0 },
+       { offsetof(xfs_sb_t, sb_lsn),           0 },
+       { sizeof(xfs_sb_t),                     0 }
+ };
+ /*
+  * Reference counting access wrappers to the perag structures.
+  * Because we never free per-ag structures, the only thing we
+  * have to protect against changes is the tree structure itself.
+  */
+ struct xfs_perag *
+ xfs_perag_get(
+       struct xfs_mount        *mp,
+       xfs_agnumber_t          agno)
+ {
+       struct xfs_perag        *pag;
+       int                     ref = 0;
+       rcu_read_lock();
+       pag = radix_tree_lookup(&mp->m_perag_tree, agno);
+       if (pag) {
+               ASSERT(atomic_read(&pag->pag_ref) >= 0);
+               ref = atomic_inc_return(&pag->pag_ref);
+       }
+       rcu_read_unlock();
+       trace_xfs_perag_get(mp, agno, ref, _RET_IP_);
+       return pag;
+ }
+ /*
+  * search from @first to find the next perag with the given tag set.
+  */
+ struct xfs_perag *
+ xfs_perag_get_tag(
+       struct xfs_mount        *mp,
+       xfs_agnumber_t          first,
+       int                     tag)
+ {
+       struct xfs_perag        *pag;
+       int                     found;
+       int                     ref;
+       rcu_read_lock();
+       found = radix_tree_gang_lookup_tag(&mp->m_perag_tree,
+                                       (void **)&pag, first, 1, tag);
+       if (found <= 0) {
+               rcu_read_unlock();
+               return NULL;
+       }
+       ref = atomic_inc_return(&pag->pag_ref);
+       rcu_read_unlock();
+       trace_xfs_perag_get_tag(mp, pag->pag_agno, ref, _RET_IP_);
+       return pag;
+ }
+ void
+ xfs_perag_put(
+       struct xfs_perag        *pag)
+ {
+       int     ref;
+       ASSERT(atomic_read(&pag->pag_ref) > 0);
+       ref = atomic_dec_return(&pag->pag_ref);
+       trace_xfs_perag_put(pag->pag_mount, pag->pag_agno, ref, _RET_IP_);
+ }
+ /*
+  * Check the validity of the SB found.
+  */
+ STATIC int
+ xfs_mount_validate_sb(
+       xfs_mount_t     *mp,
+       xfs_sb_t        *sbp,
+       bool            check_inprogress,
+       bool            check_version)
+ {
+       /*
+        * If the log device and data device have the
+        * same device number, the log is internal.
+        * Consequently, the sb_logstart should be non-zero.  If
+        * we have a zero sb_logstart in this case, we may be trying to mount
+        * a volume filesystem in a non-volume manner.
+        */
+       if (sbp->sb_magicnum != XFS_SB_MAGIC) {
+               xfs_warn(mp, "bad magic number");
+               return -EWRONGFS;
+       }
+       if (!xfs_sb_good_version(sbp)) {
+               xfs_warn(mp, "bad version");
+               return -EWRONGFS;
+       }
+       /*
+        * Version 5 superblock feature mask validation. Reject combinations the
+        * kernel cannot support up front before checking anything else. For
+        * write validation, we don't need to check feature masks.
+        */
+       if (check_version && XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_5) {
+               if (xfs_sb_has_compat_feature(sbp,
+                                       XFS_SB_FEAT_COMPAT_UNKNOWN)) {
+                       xfs_warn(mp,
+ "Superblock has unknown compatible features (0x%x) enabled.\n"
+ "Using a more recent kernel is recommended.",
+                               (sbp->sb_features_compat &
+                                               XFS_SB_FEAT_COMPAT_UNKNOWN));
+               }
+               if (xfs_sb_has_ro_compat_feature(sbp,
+                                       XFS_SB_FEAT_RO_COMPAT_UNKNOWN)) {
+                       xfs_alert(mp,
+ "Superblock has unknown read-only compatible features (0x%x) enabled.",
+                               (sbp->sb_features_ro_compat &
+                                               XFS_SB_FEAT_RO_COMPAT_UNKNOWN));
+                       if (!(mp->m_flags & XFS_MOUNT_RDONLY)) {
+                               xfs_warn(mp,
+ "Attempted to mount read-only compatible filesystem read-write.\n"
+ "Filesystem can only be safely mounted read only.");
+                               return -EINVAL;
+                       }
+               }
+               if (xfs_sb_has_incompat_feature(sbp,
+                                       XFS_SB_FEAT_INCOMPAT_UNKNOWN)) {
+                       xfs_warn(mp,
+ "Superblock has unknown incompatible features (0x%x) enabled.\n"
+ "Filesystem can not be safely mounted by this kernel.",
+                               (sbp->sb_features_incompat &
+                                               XFS_SB_FEAT_INCOMPAT_UNKNOWN));
+                       return -EINVAL;
+               }
+       }
+       if (xfs_sb_version_has_pquotino(sbp)) {
+               if (sbp->sb_qflags & (XFS_OQUOTA_ENFD | XFS_OQUOTA_CHKD)) {
+                       xfs_notice(mp,
+                          "Version 5 of Super block has XFS_OQUOTA bits.");
+                       return -EFSCORRUPTED;
+               }
+       } else if (sbp->sb_qflags & (XFS_PQUOTA_ENFD | XFS_GQUOTA_ENFD |
+                               XFS_PQUOTA_CHKD | XFS_GQUOTA_CHKD)) {
+                       xfs_notice(mp,
+ "Superblock earlier than Version 5 has XFS_[PQ]UOTA_{ENFD|CHKD} bits.");
+                       return -EFSCORRUPTED;
+       }
+       if (unlikely(
+           sbp->sb_logstart == 0 && mp->m_logdev_targp == mp->m_ddev_targp)) {
+               xfs_warn(mp,
+               "filesystem is marked as having an external log; "
+               "specify logdev on the mount command line.");
+               return -EINVAL;
+       }
+       if (unlikely(
+           sbp->sb_logstart != 0 && mp->m_logdev_targp != mp->m_ddev_targp)) {
+               xfs_warn(mp,
+               "filesystem is marked as having an internal log; "
+               "do not specify logdev on the mount command line.");
+               return -EINVAL;
+       }
+       /*
+        * More sanity checking.  Most of these were stolen directly from
+        * xfs_repair.
+        */
+       if (unlikely(
+           sbp->sb_agcount <= 0                                        ||
+           sbp->sb_sectsize < XFS_MIN_SECTORSIZE                       ||
+           sbp->sb_sectsize > XFS_MAX_SECTORSIZE                       ||
+           sbp->sb_sectlog < XFS_MIN_SECTORSIZE_LOG                    ||
+           sbp->sb_sectlog > XFS_MAX_SECTORSIZE_LOG                    ||
+           sbp->sb_sectsize != (1 << sbp->sb_sectlog)                  ||
+           sbp->sb_blocksize < XFS_MIN_BLOCKSIZE                       ||
+           sbp->sb_blocksize > XFS_MAX_BLOCKSIZE                       ||
+           sbp->sb_blocklog < XFS_MIN_BLOCKSIZE_LOG                    ||
+           sbp->sb_blocklog > XFS_MAX_BLOCKSIZE_LOG                    ||
+           sbp->sb_blocksize != (1 << sbp->sb_blocklog)                ||
+           sbp->sb_inodesize < XFS_DINODE_MIN_SIZE                     ||
+           sbp->sb_inodesize > XFS_DINODE_MAX_SIZE                     ||
+           sbp->sb_inodelog < XFS_DINODE_MIN_LOG                       ||
+           sbp->sb_inodelog > XFS_DINODE_MAX_LOG                       ||
+           sbp->sb_inodesize != (1 << sbp->sb_inodelog)                ||
+           sbp->sb_inopblock != howmany(sbp->sb_blocksize,sbp->sb_inodesize) ||
+           (sbp->sb_blocklog - sbp->sb_inodelog != sbp->sb_inopblog)   ||
+           (sbp->sb_rextsize * sbp->sb_blocksize > XFS_MAX_RTEXTSIZE)  ||
+           (sbp->sb_rextsize * sbp->sb_blocksize < XFS_MIN_RTEXTSIZE)  ||
+           (sbp->sb_imax_pct > 100 /* zero sb_imax_pct is valid */)    ||
+           sbp->sb_dblocks == 0                                        ||
+           sbp->sb_dblocks > XFS_MAX_DBLOCKS(sbp)                      ||
+           sbp->sb_dblocks < XFS_MIN_DBLOCKS(sbp)                      ||
+           sbp->sb_shared_vn != 0)) {
+               xfs_notice(mp, "SB sanity check failed");
+               return -EFSCORRUPTED;
+       }
+       /*
+        * Until this is fixed only page-sized or smaller data blocks work.
+        */
+       if (unlikely(sbp->sb_blocksize > PAGE_SIZE)) {
+               xfs_warn(mp,
+               "File system with blocksize %d bytes. "
+               "Only pagesize (%ld) or less will currently work.",
+                               sbp->sb_blocksize, PAGE_SIZE);
+               return -ENOSYS;
+       }
+       /*
+        * Currently only very few inode sizes are supported.
+        */
+       switch (sbp->sb_inodesize) {
+       case 256:
+       case 512:
+       case 1024:
+       case 2048:
+               break;
+       default:
+               xfs_warn(mp, "inode size of %d bytes not supported",
+                               sbp->sb_inodesize);
+               return -ENOSYS;
+       }
+       if (xfs_sb_validate_fsb_count(sbp, sbp->sb_dblocks) ||
+           xfs_sb_validate_fsb_count(sbp, sbp->sb_rblocks)) {
+               xfs_warn(mp,
+               "file system too large to be mounted on this system.");
+               return -EFBIG;
+       }
+       if (check_inprogress && sbp->sb_inprogress) {
+               xfs_warn(mp, "Offline file system operation in progress!");
+               return -EFSCORRUPTED;
+       }
+       return 0;
+ }
+ void
+ xfs_sb_quota_from_disk(struct xfs_sb *sbp)
+ {
+       /*
+        * older mkfs doesn't initialize quota inodes to NULLFSINO. This
+        * leads to in-core values having two different values for a quota
+        * inode to be invalid: 0 and NULLFSINO. Change it to a single value
+        * NULLFSINO.
+        *
+        * Note that this change affect only the in-core values. These
+        * values are not written back to disk unless any quota information
+        * is written to the disk. Even in that case, sb_pquotino field is
+        * not written to disk unless the superblock supports pquotino.
+        */
+       if (sbp->sb_uquotino == 0)
+               sbp->sb_uquotino = NULLFSINO;
+       if (sbp->sb_gquotino == 0)
+               sbp->sb_gquotino = NULLFSINO;
+       if (sbp->sb_pquotino == 0)
+               sbp->sb_pquotino = NULLFSINO;
+       /*
+        * We need to do these manipilations only if we are working
+        * with an older version of on-disk superblock.
+        */
+       if (xfs_sb_version_has_pquotino(sbp))
+               return;
+       if (sbp->sb_qflags & XFS_OQUOTA_ENFD)
+               sbp->sb_qflags |= (sbp->sb_qflags & XFS_PQUOTA_ACCT) ?
+                                       XFS_PQUOTA_ENFD : XFS_GQUOTA_ENFD;
+       if (sbp->sb_qflags & XFS_OQUOTA_CHKD)
+               sbp->sb_qflags |= (sbp->sb_qflags & XFS_PQUOTA_ACCT) ?
+                                       XFS_PQUOTA_CHKD : XFS_GQUOTA_CHKD;
+       sbp->sb_qflags &= ~(XFS_OQUOTA_ENFD | XFS_OQUOTA_CHKD);
+       if (sbp->sb_qflags & XFS_PQUOTA_ACCT)  {
+               /*
+                * In older version of superblock, on-disk superblock only
+                * has sb_gquotino, and in-core superblock has both sb_gquotino
+                * and sb_pquotino. But, only one of them is supported at any
+                * point of time. So, if PQUOTA is set in disk superblock,
+                * copy over sb_gquotino to sb_pquotino.
+                */
+               sbp->sb_pquotino = sbp->sb_gquotino;
+               sbp->sb_gquotino = NULLFSINO;
+       }
+ }
+ void
+ xfs_sb_from_disk(
+       struct xfs_sb   *to,
+       xfs_dsb_t       *from)
+ {
+       to->sb_magicnum = be32_to_cpu(from->sb_magicnum);
+       to->sb_blocksize = be32_to_cpu(from->sb_blocksize);
+       to->sb_dblocks = be64_to_cpu(from->sb_dblocks);
+       to->sb_rblocks = be64_to_cpu(from->sb_rblocks);
+       to->sb_rextents = be64_to_cpu(from->sb_rextents);
+       memcpy(&to->sb_uuid, &from->sb_uuid, sizeof(to->sb_uuid));
+       to->sb_logstart = be64_to_cpu(from->sb_logstart);
+       to->sb_rootino = be64_to_cpu(from->sb_rootino);
+       to->sb_rbmino = be64_to_cpu(from->sb_rbmino);
+       to->sb_rsumino = be64_to_cpu(from->sb_rsumino);
+       to->sb_rextsize = be32_to_cpu(from->sb_rextsize);
+       to->sb_agblocks = be32_to_cpu(from->sb_agblocks);
+       to->sb_agcount = be32_to_cpu(from->sb_agcount);
+       to->sb_rbmblocks = be32_to_cpu(from->sb_rbmblocks);
+       to->sb_logblocks = be32_to_cpu(from->sb_logblocks);
+       to->sb_versionnum = be16_to_cpu(from->sb_versionnum);
+       to->sb_sectsize = be16_to_cpu(from->sb_sectsize);
+       to->sb_inodesize = be16_to_cpu(from->sb_inodesize);
+       to->sb_inopblock = be16_to_cpu(from->sb_inopblock);
+       memcpy(&to->sb_fname, &from->sb_fname, sizeof(to->sb_fname));
+       to->sb_blocklog = from->sb_blocklog;
+       to->sb_sectlog = from->sb_sectlog;
+       to->sb_inodelog = from->sb_inodelog;
+       to->sb_inopblog = from->sb_inopblog;
+       to->sb_agblklog = from->sb_agblklog;
+       to->sb_rextslog = from->sb_rextslog;
+       to->sb_inprogress = from->sb_inprogress;
+       to->sb_imax_pct = from->sb_imax_pct;
+       to->sb_icount = be64_to_cpu(from->sb_icount);
+       to->sb_ifree = be64_to_cpu(from->sb_ifree);
+       to->sb_fdblocks = be64_to_cpu(from->sb_fdblocks);
+       to->sb_frextents = be64_to_cpu(from->sb_frextents);
+       to->sb_uquotino = be64_to_cpu(from->sb_uquotino);
+       to->sb_gquotino = be64_to_cpu(from->sb_gquotino);
+       to->sb_qflags = be16_to_cpu(from->sb_qflags);
+       to->sb_flags = from->sb_flags;
+       to->sb_shared_vn = from->sb_shared_vn;
+       to->sb_inoalignmt = be32_to_cpu(from->sb_inoalignmt);
+       to->sb_unit = be32_to_cpu(from->sb_unit);
+       to->sb_width = be32_to_cpu(from->sb_width);
+       to->sb_dirblklog = from->sb_dirblklog;
+       to->sb_logsectlog = from->sb_logsectlog;
+       to->sb_logsectsize = be16_to_cpu(from->sb_logsectsize);
+       to->sb_logsunit = be32_to_cpu(from->sb_logsunit);
+       to->sb_features2 = be32_to_cpu(from->sb_features2);
+       to->sb_bad_features2 = be32_to_cpu(from->sb_bad_features2);
+       to->sb_features_compat = be32_to_cpu(from->sb_features_compat);
+       to->sb_features_ro_compat = be32_to_cpu(from->sb_features_ro_compat);
+       to->sb_features_incompat = be32_to_cpu(from->sb_features_incompat);
+       to->sb_features_log_incompat =
+                               be32_to_cpu(from->sb_features_log_incompat);
+       to->sb_pad = 0;
+       to->sb_pquotino = be64_to_cpu(from->sb_pquotino);
+       to->sb_lsn = be64_to_cpu(from->sb_lsn);
+ }
+ static inline void
+ xfs_sb_quota_to_disk(
+       xfs_dsb_t       *to,
+       xfs_sb_t        *from,
+       __int64_t       *fields)
+ {
+       __uint16_t      qflags = from->sb_qflags;
+       /*
+        * We need to do these manipilations only if we are working
+        * with an older version of on-disk superblock.
+        */
+       if (xfs_sb_version_has_pquotino(from))
+               return;
+       if (*fields & XFS_SB_QFLAGS) {
+               /*
+                * The in-core version of sb_qflags do not have
+                * XFS_OQUOTA_* flags, whereas the on-disk version
+                * does.  So, convert incore XFS_{PG}QUOTA_* flags
+                * to on-disk XFS_OQUOTA_* flags.
+                */
+               qflags &= ~(XFS_PQUOTA_ENFD | XFS_PQUOTA_CHKD |
+                               XFS_GQUOTA_ENFD | XFS_GQUOTA_CHKD);
+               if (from->sb_qflags &
+                               (XFS_PQUOTA_ENFD | XFS_GQUOTA_ENFD))
+                       qflags |= XFS_OQUOTA_ENFD;
+               if (from->sb_qflags &
+                               (XFS_PQUOTA_CHKD | XFS_GQUOTA_CHKD))
+                       qflags |= XFS_OQUOTA_CHKD;
+               to->sb_qflags = cpu_to_be16(qflags);
+               *fields &= ~XFS_SB_QFLAGS;
+       }
+       /*
++       * GQUOTINO and PQUOTINO cannot be used together in versions of
++       * superblock that do not have pquotino. from->sb_flags tells us which
++       * quota is active and should be copied to disk. If neither are active,
++       * make sure we write NULLFSINO to the sb_gquotino field as a quota
++       * inode value of "0" is invalid when the XFS_SB_VERSION_QUOTA feature
++       * bit is set.
++       *
++       * Note that we don't need to handle the sb_uquotino or sb_pquotino here
++       * as they do not require any translation. Hence the main sb field loop
++       * will write them appropriately from the in-core superblock.
+        */
+       if ((*fields & XFS_SB_GQUOTINO) &&
+                               (from->sb_qflags & XFS_GQUOTA_ACCT))
+               to->sb_gquotino = cpu_to_be64(from->sb_gquotino);
+       else if ((*fields & XFS_SB_PQUOTINO) &&
+                               (from->sb_qflags & XFS_PQUOTA_ACCT))
+               to->sb_gquotino = cpu_to_be64(from->sb_pquotino);
++      else {
++              /*
++               * We can't rely on just the fields being logged to tell us
++               * that it is safe to write NULLFSINO - we should only do that
++               * if quotas are not actually enabled. Hence only write
++               * NULLFSINO if both in-core quota inodes are NULL.
++               */
++              if (from->sb_gquotino == NULLFSINO &&
++                  from->sb_pquotino == NULLFSINO)
++                      to->sb_gquotino = cpu_to_be64(NULLFSINO);
++      }
+       *fields &= ~(XFS_SB_PQUOTINO | XFS_SB_GQUOTINO);
+ }
+ /*
+  * Copy in core superblock to ondisk one.
+  *
+  * The fields argument is mask of superblock fields to copy.
+  */
+ void
+ xfs_sb_to_disk(
+       xfs_dsb_t       *to,
+       xfs_sb_t        *from,
+       __int64_t       fields)
+ {
+       xfs_caddr_t     to_ptr = (xfs_caddr_t)to;
+       xfs_caddr_t     from_ptr = (xfs_caddr_t)from;
+       xfs_sb_field_t  f;
+       int             first;
+       int             size;
+       ASSERT(fields);
+       if (!fields)
+               return;
+       xfs_sb_quota_to_disk(to, from, &fields);
+       while (fields) {
+               f = (xfs_sb_field_t)xfs_lowbit64((__uint64_t)fields);
+               first = xfs_sb_info[f].offset;
+               size = xfs_sb_info[f + 1].offset - first;
+               ASSERT(xfs_sb_info[f].type == 0 || xfs_sb_info[f].type == 1);
+               if (size == 1 || xfs_sb_info[f].type == 1) {
+                       memcpy(to_ptr + first, from_ptr + first, size);
+               } else {
+                       switch (size) {
+                       case 2:
+                               *(__be16 *)(to_ptr + first) =
+                                     cpu_to_be16(*(__u16 *)(from_ptr + first));
+                               break;
+                       case 4:
+                               *(__be32 *)(to_ptr + first) =
+                                     cpu_to_be32(*(__u32 *)(from_ptr + first));
+                               break;
+                       case 8:
+                               *(__be64 *)(to_ptr + first) =
+                                     cpu_to_be64(*(__u64 *)(from_ptr + first));
+                               break;
+                       default:
+                               ASSERT(0);
+                       }
+               }
+               fields &= ~(1LL << f);
+       }
+ }
+ static int
+ xfs_sb_verify(
+       struct xfs_buf  *bp,
+       bool            check_version)
+ {
+       struct xfs_mount *mp = bp->b_target->bt_mount;
+       struct xfs_sb   sb;
+       xfs_sb_from_disk(&sb, XFS_BUF_TO_SBP(bp));
+       /*
+        * Only check the in progress field for the primary superblock as
+        * mkfs.xfs doesn't clear it from secondary superblocks.
+        */
+       return xfs_mount_validate_sb(mp, &sb, bp->b_bn == XFS_SB_DADDR,
+                                    check_version);
+ }
+ /*
+  * If the superblock has the CRC feature bit set or the CRC field is non-null,
+  * check that the CRC is valid.  We check the CRC field is non-null because a
+  * single bit error could clear the feature bit and unused parts of the
+  * superblock are supposed to be zero. Hence a non-null crc field indicates that
+  * we've potentially lost a feature bit and we should check it anyway.
+  *
+  * However, past bugs (i.e. in growfs) left non-zeroed regions beyond the
+  * last field in V4 secondary superblocks.  So for secondary superblocks,
+  * we are more forgiving, and ignore CRC failures if the primary doesn't
+  * indicate that the fs version is V5.
+  */
+ static void
+ xfs_sb_read_verify(
+       struct xfs_buf  *bp)
+ {
+       struct xfs_mount *mp = bp->b_target->bt_mount;
+       struct xfs_dsb  *dsb = XFS_BUF_TO_SBP(bp);
+       int             error;
+       /*
+        * open code the version check to avoid needing to convert the entire
+        * superblock from disk order just to check the version number
+        */
+       if (dsb->sb_magicnum == cpu_to_be32(XFS_SB_MAGIC) &&
+           (((be16_to_cpu(dsb->sb_versionnum) & XFS_SB_VERSION_NUMBITS) ==
+                                               XFS_SB_VERSION_5) ||
+            dsb->sb_crc != 0)) {
+               if (!xfs_buf_verify_cksum(bp, XFS_SB_CRC_OFF)) {
+                       /* Only fail bad secondaries on a known V5 filesystem */
+                       if (bp->b_bn == XFS_SB_DADDR ||
+                           xfs_sb_version_hascrc(&mp->m_sb)) {
+                               error = -EFSBADCRC;
+                               goto out_error;
+                       }
+               }
+       }
+       error = xfs_sb_verify(bp, true);
+ out_error:
+       if (error) {
+               xfs_buf_ioerror(bp, error);
+               if (error == -EFSCORRUPTED || error == -EFSBADCRC)
+                       xfs_verifier_error(bp);
+       }
+ }
+ /*
+  * We may be probed for a filesystem match, so we may not want to emit
+  * messages when the superblock buffer is not actually an XFS superblock.
+  * If we find an XFS superblock, then run a normal, noisy mount because we are
+  * really going to mount it and want to know about errors.
+  */
+ static void
+ xfs_sb_quiet_read_verify(
+       struct xfs_buf  *bp)
+ {
+       struct xfs_dsb  *dsb = XFS_BUF_TO_SBP(bp);
+       if (dsb->sb_magicnum == cpu_to_be32(XFS_SB_MAGIC)) {
+               /* XFS filesystem, verify noisily! */
+               xfs_sb_read_verify(bp);
+               return;
+       }
+       /* quietly fail */
+       xfs_buf_ioerror(bp, -EWRONGFS);
+ }
+ static void
+ xfs_sb_write_verify(
+       struct xfs_buf          *bp)
+ {
+       struct xfs_mount        *mp = bp->b_target->bt_mount;
+       struct xfs_buf_log_item *bip = bp->b_fspriv;
+       int                     error;
+       error = xfs_sb_verify(bp, false);
+       if (error) {
+               xfs_buf_ioerror(bp, error);
+               xfs_verifier_error(bp);
+               return;
+       }
+       if (!xfs_sb_version_hascrc(&mp->m_sb))
+               return;
+       if (bip)
+               XFS_BUF_TO_SBP(bp)->sb_lsn = cpu_to_be64(bip->bli_item.li_lsn);
+       xfs_buf_update_cksum(bp, XFS_SB_CRC_OFF);
+ }
+ const struct xfs_buf_ops xfs_sb_buf_ops = {
+       .verify_read = xfs_sb_read_verify,
+       .verify_write = xfs_sb_write_verify,
+ };
+ const struct xfs_buf_ops xfs_sb_quiet_buf_ops = {
+       .verify_read = xfs_sb_quiet_read_verify,
+       .verify_write = xfs_sb_write_verify,
+ };
+ /*
+  * xfs_mount_common
+  *
+  * Mount initialization code establishing various mount
+  * fields from the superblock associated with the given
+  * mount structure
+  */
+ void
+ xfs_sb_mount_common(
+       struct xfs_mount *mp,
+       struct xfs_sb   *sbp)
+ {
+       mp->m_agfrotor = mp->m_agirotor = 0;
+       spin_lock_init(&mp->m_agirotor_lock);
+       mp->m_maxagi = mp->m_sb.sb_agcount;
+       mp->m_blkbit_log = sbp->sb_blocklog + XFS_NBBYLOG;
+       mp->m_blkbb_log = sbp->sb_blocklog - BBSHIFT;
+       mp->m_sectbb_log = sbp->sb_sectlog - BBSHIFT;
+       mp->m_agno_log = xfs_highbit32(sbp->sb_agcount - 1) + 1;
+       mp->m_agino_log = sbp->sb_inopblog + sbp->sb_agblklog;
+       mp->m_blockmask = sbp->sb_blocksize - 1;
+       mp->m_blockwsize = sbp->sb_blocksize >> XFS_WORDLOG;
+       mp->m_blockwmask = mp->m_blockwsize - 1;
+       mp->m_alloc_mxr[0] = xfs_allocbt_maxrecs(mp, sbp->sb_blocksize, 1);
+       mp->m_alloc_mxr[1] = xfs_allocbt_maxrecs(mp, sbp->sb_blocksize, 0);
+       mp->m_alloc_mnr[0] = mp->m_alloc_mxr[0] / 2;
+       mp->m_alloc_mnr[1] = mp->m_alloc_mxr[1] / 2;
+       mp->m_inobt_mxr[0] = xfs_inobt_maxrecs(mp, sbp->sb_blocksize, 1);
+       mp->m_inobt_mxr[1] = xfs_inobt_maxrecs(mp, sbp->sb_blocksize, 0);
+       mp->m_inobt_mnr[0] = mp->m_inobt_mxr[0] / 2;
+       mp->m_inobt_mnr[1] = mp->m_inobt_mxr[1] / 2;
+       mp->m_bmap_dmxr[0] = xfs_bmbt_maxrecs(mp, sbp->sb_blocksize, 1);
+       mp->m_bmap_dmxr[1] = xfs_bmbt_maxrecs(mp, sbp->sb_blocksize, 0);
+       mp->m_bmap_dmnr[0] = mp->m_bmap_dmxr[0] / 2;
+       mp->m_bmap_dmnr[1] = mp->m_bmap_dmxr[1] / 2;
+       mp->m_bsize = XFS_FSB_TO_BB(mp, 1);
+       mp->m_ialloc_inos = (int)MAX((__uint16_t)XFS_INODES_PER_CHUNK,
+                                       sbp->sb_inopblock);
+       mp->m_ialloc_blks = mp->m_ialloc_inos >> sbp->sb_inopblog;
+ }
+ /*
+  * xfs_initialize_perag_data
+  *
+  * Read in each per-ag structure so we can count up the number of
+  * allocated inodes, free inodes and used filesystem blocks as this
+  * information is no longer persistent in the superblock. Once we have
+  * this information, write it into the in-core superblock structure.
+  */
+ int
+ xfs_initialize_perag_data(
+       struct xfs_mount *mp,
+       xfs_agnumber_t  agcount)
+ {
+       xfs_agnumber_t  index;
+       xfs_perag_t     *pag;
+       xfs_sb_t        *sbp = &mp->m_sb;
+       uint64_t        ifree = 0;
+       uint64_t        ialloc = 0;
+       uint64_t        bfree = 0;
+       uint64_t        bfreelst = 0;
+       uint64_t        btree = 0;
+       int             error;
+       for (index = 0; index < agcount; index++) {
+               /*
+                * read the agf, then the agi. This gets us
+                * all the information we need and populates the
+                * per-ag structures for us.
+                */
+               error = xfs_alloc_pagf_init(mp, NULL, index, 0);
+               if (error)
+                       return error;
+               error = xfs_ialloc_pagi_init(mp, NULL, index);
+               if (error)
+                       return error;
+               pag = xfs_perag_get(mp, index);
+               ifree += pag->pagi_freecount;
+               ialloc += pag->pagi_count;
+               bfree += pag->pagf_freeblks;
+               bfreelst += pag->pagf_flcount;
+               btree += pag->pagf_btreeblks;
+               xfs_perag_put(pag);
+       }
+       /*
+        * Overwrite incore superblock counters with just-read data
+        */
+       spin_lock(&mp->m_sb_lock);
+       sbp->sb_ifree = ifree;
+       sbp->sb_icount = ialloc;
+       sbp->sb_fdblocks = bfree + bfreelst + btree;
+       spin_unlock(&mp->m_sb_lock);
+       /* Fixup the per-cpu counters as well. */
+       xfs_icsb_reinit_counters(mp);
+       return 0;
+ }
+ /*
+  * xfs_mod_sb() can be used to copy arbitrary changes to the
+  * in-core superblock into the superblock buffer to be logged.
+  * It does not provide the higher level of locking that is
+  * needed to protect the in-core superblock from concurrent
+  * access.
+  */
+ void
+ xfs_mod_sb(xfs_trans_t *tp, __int64_t fields)
+ {
+       xfs_buf_t       *bp;
+       int             first;
+       int             last;
+       xfs_mount_t     *mp;
+       xfs_sb_field_t  f;
+       ASSERT(fields);
+       if (!fields)
+               return;
+       mp = tp->t_mountp;
+       bp = xfs_trans_getsb(tp, mp, 0);
+       first = sizeof(xfs_sb_t);
+       last = 0;
+       /* translate/copy */
+       xfs_sb_to_disk(XFS_BUF_TO_SBP(bp), &mp->m_sb, fields);
+       /* find modified range */
+       f = (xfs_sb_field_t)xfs_highbit64((__uint64_t)fields);
+       ASSERT((1LL << f) & XFS_SB_MOD_BITS);
+       last = xfs_sb_info[f + 1].offset - 1;
+       f = (xfs_sb_field_t)xfs_lowbit64((__uint64_t)fields);
+       ASSERT((1LL << f) & XFS_SB_MOD_BITS);
+       first = xfs_sb_info[f].offset;
+       xfs_trans_buf_set_type(tp, bp, XFS_BLFT_SB_BUF);
+       xfs_trans_log_buf(tp, bp, first, last);
+ }
Simple merge
Simple merge