]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - fs/xfs/xfs_alloc_btree.c
[XFS] implement generic xfs_btree_split
[mirror_ubuntu-zesty-kernel.git] / fs / xfs / xfs_alloc_btree.c
CommitLineData
1da177e4 1/*
7b718769
NS
2 * Copyright (c) 2000-2001,2005 Silicon Graphics, Inc.
3 * All Rights Reserved.
1da177e4 4 *
7b718769
NS
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
1da177e4
LT
7 * published by the Free Software Foundation.
8 *
7b718769
NS
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
1da177e4 13 *
7b718769
NS
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
1da177e4 17 */
1da177e4 18#include "xfs.h"
a844f451 19#include "xfs_fs.h"
1da177e4 20#include "xfs_types.h"
a844f451 21#include "xfs_bit.h"
1da177e4 22#include "xfs_log.h"
a844f451 23#include "xfs_inum.h"
1da177e4
LT
24#include "xfs_trans.h"
25#include "xfs_sb.h"
26#include "xfs_ag.h"
a844f451 27#include "xfs_dir2.h"
1da177e4
LT
28#include "xfs_dmapi.h"
29#include "xfs_mount.h"
a844f451 30#include "xfs_bmap_btree.h"
1da177e4
LT
31#include "xfs_alloc_btree.h"
32#include "xfs_ialloc_btree.h"
a844f451
NS
33#include "xfs_dir2_sf.h"
34#include "xfs_attr_sf.h"
35#include "xfs_dinode.h"
36#include "xfs_inode.h"
1da177e4 37#include "xfs_btree.h"
f5eb8e7c 38#include "xfs_btree_trace.h"
1da177e4
LT
39#include "xfs_ialloc.h"
40#include "xfs_alloc.h"
41#include "xfs_error.h"
42
43/*
44 * Prototypes for internal functions.
45 */
46
47STATIC void xfs_alloc_log_block(xfs_trans_t *, xfs_buf_t *, int);
48STATIC void xfs_alloc_log_keys(xfs_btree_cur_t *, xfs_buf_t *, int, int);
49STATIC void xfs_alloc_log_ptrs(xfs_btree_cur_t *, xfs_buf_t *, int, int);
50STATIC void xfs_alloc_log_recs(xfs_btree_cur_t *, xfs_buf_t *, int, int);
1da177e4 51STATIC int xfs_alloc_newroot(xfs_btree_cur_t *, int *);
1da177e4
LT
52
53/*
54 * Internal functions.
55 */
56
57/*
58 * Single level of the xfs_alloc_delete record deletion routine.
59 * Delete record pointed to by cur/level.
60 * Remove the record from its block then rebalance the tree.
61 * Return 0 for error, 1 for done, 2 to go on to the next level.
62 */
63STATIC int /* error */
64xfs_alloc_delrec(
65 xfs_btree_cur_t *cur, /* btree cursor */
66 int level, /* level removing record from */
67 int *stat) /* fail/done/go-on */
68{
69 xfs_agf_t *agf; /* allocation group freelist header */
70 xfs_alloc_block_t *block; /* btree block record/key lives in */
71 xfs_agblock_t bno; /* btree block number */
72 xfs_buf_t *bp; /* buffer for block */
73 int error; /* error return value */
74 int i; /* loop index */
75 xfs_alloc_key_t key; /* kp points here if block is level 0 */
76 xfs_agblock_t lbno; /* left block's block number */
77 xfs_buf_t *lbp; /* left block's buffer pointer */
78 xfs_alloc_block_t *left; /* left btree block */
79 xfs_alloc_key_t *lkp=NULL; /* left block key pointer */
80 xfs_alloc_ptr_t *lpp=NULL; /* left block address pointer */
81 int lrecs=0; /* number of records in left block */
82 xfs_alloc_rec_t *lrp; /* left block record pointer */
83 xfs_mount_t *mp; /* mount structure */
84 int ptr; /* index in btree block for this rec */
85 xfs_agblock_t rbno; /* right block's block number */
86 xfs_buf_t *rbp; /* right block's buffer pointer */
87 xfs_alloc_block_t *right; /* right btree block */
88 xfs_alloc_key_t *rkp; /* right block key pointer */
89 xfs_alloc_ptr_t *rpp; /* right block address pointer */
90 int rrecs=0; /* number of records in right block */
91d87232 91 int numrecs;
1da177e4
LT
92 xfs_alloc_rec_t *rrp; /* right block record pointer */
93 xfs_btree_cur_t *tcur; /* temporary btree cursor */
94
95 /*
96 * Get the index of the entry being deleted, check for nothing there.
97 */
98 ptr = cur->bc_ptrs[level];
99 if (ptr == 0) {
100 *stat = 0;
101 return 0;
102 }
103 /*
104 * Get the buffer & block containing the record or key/ptr.
105 */
106 bp = cur->bc_bufs[level];
107 block = XFS_BUF_TO_ALLOC_BLOCK(bp);
108#ifdef DEBUG
109 if ((error = xfs_btree_check_sblock(cur, block, level, bp)))
110 return error;
111#endif
112 /*
113 * Fail if we're off the end of the block.
114 */
91d87232
ES
115 numrecs = be16_to_cpu(block->bb_numrecs);
116 if (ptr > numrecs) {
1da177e4
LT
117 *stat = 0;
118 return 0;
119 }
120 XFS_STATS_INC(xs_abt_delrec);
121 /*
122 * It's a nonleaf. Excise the key and ptr being deleted, by
123 * sliding the entries past them down one.
124 * Log the changed areas of the block.
125 */
126 if (level > 0) {
127 lkp = XFS_ALLOC_KEY_ADDR(block, 1, cur);
128 lpp = XFS_ALLOC_PTR_ADDR(block, 1, cur);
129#ifdef DEBUG
91d87232 130 for (i = ptr; i < numrecs; i++) {
16259e7d 131 if ((error = xfs_btree_check_sptr(cur, be32_to_cpu(lpp[i]), level)))
1da177e4
LT
132 return error;
133 }
134#endif
91d87232 135 if (ptr < numrecs) {
1da177e4 136 memmove(&lkp[ptr - 1], &lkp[ptr],
91d87232 137 (numrecs - ptr) * sizeof(*lkp));
1da177e4 138 memmove(&lpp[ptr - 1], &lpp[ptr],
91d87232
ES
139 (numrecs - ptr) * sizeof(*lpp));
140 xfs_alloc_log_ptrs(cur, bp, ptr, numrecs - 1);
141 xfs_alloc_log_keys(cur, bp, ptr, numrecs - 1);
1da177e4
LT
142 }
143 }
144 /*
145 * It's a leaf. Excise the record being deleted, by sliding the
146 * entries past it down one. Log the changed areas of the block.
147 */
148 else {
149 lrp = XFS_ALLOC_REC_ADDR(block, 1, cur);
91d87232 150 if (ptr < numrecs) {
1da177e4 151 memmove(&lrp[ptr - 1], &lrp[ptr],
91d87232
ES
152 (numrecs - ptr) * sizeof(*lrp));
153 xfs_alloc_log_recs(cur, bp, ptr, numrecs - 1);
1da177e4
LT
154 }
155 /*
156 * If it's the first record in the block, we'll need a key
157 * structure to pass up to the next level (updkey).
158 */
159 if (ptr == 1) {
16259e7d
CH
160 key.ar_startblock = lrp->ar_startblock;
161 key.ar_blockcount = lrp->ar_blockcount;
1da177e4
LT
162 lkp = &key;
163 }
164 }
165 /*
166 * Decrement and log the number of entries in the block.
167 */
91d87232
ES
168 numrecs--;
169 block->bb_numrecs = cpu_to_be16(numrecs);
1da177e4
LT
170 xfs_alloc_log_block(cur->bc_tp, bp, XFS_BB_NUMRECS);
171 /*
172 * See if the longest free extent in the allocation group was
173 * changed by this operation. True if it's the by-size btree, and
174 * this is the leaf level, and there is no right sibling block,
175 * and this was the last record.
176 */
177 agf = XFS_BUF_TO_AGF(cur->bc_private.a.agbp);
178 mp = cur->bc_mp;
179
180 if (level == 0 &&
181 cur->bc_btnum == XFS_BTNUM_CNT &&
16259e7d 182 be32_to_cpu(block->bb_rightsib) == NULLAGBLOCK &&
91d87232
ES
183 ptr > numrecs) {
184 ASSERT(ptr == numrecs + 1);
1da177e4
LT
185 /*
186 * There are still records in the block. Grab the size
187 * from the last one.
188 */
91d87232
ES
189 if (numrecs) {
190 rrp = XFS_ALLOC_REC_ADDR(block, numrecs, cur);
16259e7d 191 agf->agf_longest = rrp->ar_blockcount;
1da177e4
LT
192 }
193 /*
194 * No free extents left.
195 */
196 else
197 agf->agf_longest = 0;
16259e7d
CH
198 mp->m_perag[be32_to_cpu(agf->agf_seqno)].pagf_longest =
199 be32_to_cpu(agf->agf_longest);
1da177e4
LT
200 xfs_alloc_log_agf(cur->bc_tp, cur->bc_private.a.agbp,
201 XFS_AGF_LONGEST);
202 }
203 /*
204 * Is this the root level? If so, we're almost done.
205 */
206 if (level == cur->bc_nlevels - 1) {
207 /*
208 * If this is the root level,
209 * and there's only one entry left,
210 * and it's NOT the leaf level,
211 * then we can get rid of this level.
212 */
91d87232 213 if (numrecs == 1 && level > 0) {
1da177e4
LT
214 /*
215 * lpp is still set to the first pointer in the block.
216 * Make it the new root of the btree.
217 */
16259e7d
CH
218 bno = be32_to_cpu(agf->agf_roots[cur->bc_btnum]);
219 agf->agf_roots[cur->bc_btnum] = *lpp;
413d57c9 220 be32_add_cpu(&agf->agf_levels[cur->bc_btnum], -1);
16259e7d 221 mp->m_perag[be32_to_cpu(agf->agf_seqno)].pagf_levels[cur->bc_btnum]--;
1da177e4
LT
222 /*
223 * Put this buffer/block on the ag's freelist.
224 */
92821e2b
DC
225 error = xfs_alloc_put_freelist(cur->bc_tp,
226 cur->bc_private.a.agbp, NULL, bno, 1);
227 if (error)
1da177e4
LT
228 return error;
229 /*
230 * Since blocks move to the free list without the
231 * coordination used in xfs_bmap_finish, we can't allow
232 * block to be available for reallocation and
233 * non-transaction writing (user data) until we know
234 * that the transaction that moved it to the free list
235 * is permanently on disk. We track the blocks by
236 * declaring these blocks as "busy"; the busy list is
237 * maintained on a per-ag basis and each transaction
238 * records which entries should be removed when the
239 * iclog commits to disk. If a busy block is
240 * allocated, the iclog is pushed up to the LSN
241 * that freed the block.
242 */
243 xfs_alloc_mark_busy(cur->bc_tp,
16259e7d 244 be32_to_cpu(agf->agf_seqno), bno, 1);
1da177e4
LT
245
246 xfs_trans_agbtree_delta(cur->bc_tp, -1);
247 xfs_alloc_log_agf(cur->bc_tp, cur->bc_private.a.agbp,
248 XFS_AGF_ROOTS | XFS_AGF_LEVELS);
249 /*
250 * Update the cursor so there's one fewer level.
251 */
252 xfs_btree_setbuf(cur, level, NULL);
253 cur->bc_nlevels--;
254 } else if (level > 0 &&
8df4da4a 255 (error = xfs_btree_decrement(cur, level, &i)))
1da177e4
LT
256 return error;
257 *stat = 1;
258 return 0;
259 }
260 /*
261 * If we deleted the leftmost entry in the block, update the
262 * key values above us in the tree.
263 */
38bb7423 264 if (ptr == 1 && (error = xfs_btree_updkey(cur, (union xfs_btree_key *)lkp, level + 1)))
1da177e4
LT
265 return error;
266 /*
267 * If the number of records remaining in the block is at least
268 * the minimum, we're done.
269 */
91d87232 270 if (numrecs >= XFS_ALLOC_BLOCK_MINRECS(level, cur)) {
8df4da4a 271 if (level > 0 && (error = xfs_btree_decrement(cur, level, &i)))
1da177e4
LT
272 return error;
273 *stat = 1;
274 return 0;
275 }
276 /*
277 * Otherwise, we have to move some records around to keep the
278 * tree balanced. Look at the left and right sibling blocks to
279 * see if we can re-balance by moving only one record.
280 */
16259e7d
CH
281 rbno = be32_to_cpu(block->bb_rightsib);
282 lbno = be32_to_cpu(block->bb_leftsib);
1da177e4
LT
283 bno = NULLAGBLOCK;
284 ASSERT(rbno != NULLAGBLOCK || lbno != NULLAGBLOCK);
285 /*
286 * Duplicate the cursor so our btree manipulations here won't
287 * disrupt the next level up.
288 */
289 if ((error = xfs_btree_dup_cursor(cur, &tcur)))
290 return error;
291 /*
292 * If there's a right sibling, see if it's ok to shift an entry
293 * out of it.
294 */
295 if (rbno != NULLAGBLOCK) {
296 /*
297 * Move the temp cursor to the last entry in the next block.
298 * Actually any entry but the first would suffice.
299 */
300 i = xfs_btree_lastrec(tcur, level);
301 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
637aa50f 302 if ((error = xfs_btree_increment(tcur, level, &i)))
1da177e4
LT
303 goto error0;
304 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
305 i = xfs_btree_lastrec(tcur, level);
306 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
307 /*
308 * Grab a pointer to the block.
309 */
310 rbp = tcur->bc_bufs[level];
311 right = XFS_BUF_TO_ALLOC_BLOCK(rbp);
312#ifdef DEBUG
313 if ((error = xfs_btree_check_sblock(cur, right, level, rbp)))
314 goto error0;
315#endif
316 /*
317 * Grab the current block number, for future use.
318 */
16259e7d 319 bno = be32_to_cpu(right->bb_leftsib);
1da177e4
LT
320 /*
321 * If right block is full enough so that removing one entry
322 * won't make it too empty, and left-shifting an entry out
323 * of right to us works, we're done.
324 */
16259e7d 325 if (be16_to_cpu(right->bb_numrecs) - 1 >=
1da177e4 326 XFS_ALLOC_BLOCK_MINRECS(level, cur)) {
687b890a 327 if ((error = xfs_btree_lshift(tcur, level, &i)))
1da177e4
LT
328 goto error0;
329 if (i) {
16259e7d 330 ASSERT(be16_to_cpu(block->bb_numrecs) >=
1da177e4
LT
331 XFS_ALLOC_BLOCK_MINRECS(level, cur));
332 xfs_btree_del_cursor(tcur,
333 XFS_BTREE_NOERROR);
334 if (level > 0 &&
8df4da4a 335 (error = xfs_btree_decrement(cur, level,
1da177e4
LT
336 &i)))
337 return error;
338 *stat = 1;
339 return 0;
340 }
341 }
342 /*
343 * Otherwise, grab the number of records in right for
344 * future reference, and fix up the temp cursor to point
345 * to our block again (last record).
346 */
16259e7d 347 rrecs = be16_to_cpu(right->bb_numrecs);
1da177e4
LT
348 if (lbno != NULLAGBLOCK) {
349 i = xfs_btree_firstrec(tcur, level);
350 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
8df4da4a 351 if ((error = xfs_btree_decrement(tcur, level, &i)))
1da177e4
LT
352 goto error0;
353 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
354 }
355 }
356 /*
357 * If there's a left sibling, see if it's ok to shift an entry
358 * out of it.
359 */
360 if (lbno != NULLAGBLOCK) {
361 /*
362 * Move the temp cursor to the first entry in the
363 * previous block.
364 */
365 i = xfs_btree_firstrec(tcur, level);
366 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
8df4da4a 367 if ((error = xfs_btree_decrement(tcur, level, &i)))
1da177e4
LT
368 goto error0;
369 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
370 xfs_btree_firstrec(tcur, level);
371 /*
372 * Grab a pointer to the block.
373 */
374 lbp = tcur->bc_bufs[level];
375 left = XFS_BUF_TO_ALLOC_BLOCK(lbp);
376#ifdef DEBUG
377 if ((error = xfs_btree_check_sblock(cur, left, level, lbp)))
378 goto error0;
379#endif
380 /*
381 * Grab the current block number, for future use.
382 */
16259e7d 383 bno = be32_to_cpu(left->bb_rightsib);
1da177e4
LT
384 /*
385 * If left block is full enough so that removing one entry
386 * won't make it too empty, and right-shifting an entry out
387 * of left to us works, we're done.
388 */
16259e7d 389 if (be16_to_cpu(left->bb_numrecs) - 1 >=
1da177e4 390 XFS_ALLOC_BLOCK_MINRECS(level, cur)) {
9eaead51 391 if ((error = xfs_btree_rshift(tcur, level, &i)))
1da177e4
LT
392 goto error0;
393 if (i) {
16259e7d 394 ASSERT(be16_to_cpu(block->bb_numrecs) >=
1da177e4
LT
395 XFS_ALLOC_BLOCK_MINRECS(level, cur));
396 xfs_btree_del_cursor(tcur,
397 XFS_BTREE_NOERROR);
398 if (level == 0)
399 cur->bc_ptrs[0]++;
400 *stat = 1;
401 return 0;
402 }
403 }
404 /*
405 * Otherwise, grab the number of records in right for
406 * future reference.
407 */
16259e7d 408 lrecs = be16_to_cpu(left->bb_numrecs);
1da177e4
LT
409 }
410 /*
411 * Delete the temp cursor, we're done with it.
412 */
413 xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR);
414 /*
415 * If here, we need to do a join to keep the tree balanced.
416 */
417 ASSERT(bno != NULLAGBLOCK);
418 /*
419 * See if we can join with the left neighbor block.
420 */
421 if (lbno != NULLAGBLOCK &&
91d87232 422 lrecs + numrecs <= XFS_ALLOC_BLOCK_MAXRECS(level, cur)) {
1da177e4
LT
423 /*
424 * Set "right" to be the starting block,
425 * "left" to be the left neighbor.
426 */
427 rbno = bno;
428 right = block;
91d87232 429 rrecs = be16_to_cpu(right->bb_numrecs);
1da177e4
LT
430 rbp = bp;
431 if ((error = xfs_btree_read_bufs(mp, cur->bc_tp,
432 cur->bc_private.a.agno, lbno, 0, &lbp,
433 XFS_ALLOC_BTREE_REF)))
434 return error;
435 left = XFS_BUF_TO_ALLOC_BLOCK(lbp);
91d87232 436 lrecs = be16_to_cpu(left->bb_numrecs);
1da177e4
LT
437 if ((error = xfs_btree_check_sblock(cur, left, level, lbp)))
438 return error;
439 }
440 /*
441 * If that won't work, see if we can join with the right neighbor block.
442 */
443 else if (rbno != NULLAGBLOCK &&
91d87232 444 rrecs + numrecs <= XFS_ALLOC_BLOCK_MAXRECS(level, cur)) {
1da177e4
LT
445 /*
446 * Set "left" to be the starting block,
447 * "right" to be the right neighbor.
448 */
449 lbno = bno;
450 left = block;
91d87232 451 lrecs = be16_to_cpu(left->bb_numrecs);
1da177e4
LT
452 lbp = bp;
453 if ((error = xfs_btree_read_bufs(mp, cur->bc_tp,
454 cur->bc_private.a.agno, rbno, 0, &rbp,
455 XFS_ALLOC_BTREE_REF)))
456 return error;
457 right = XFS_BUF_TO_ALLOC_BLOCK(rbp);
91d87232 458 rrecs = be16_to_cpu(right->bb_numrecs);
1da177e4
LT
459 if ((error = xfs_btree_check_sblock(cur, right, level, rbp)))
460 return error;
461 }
462 /*
463 * Otherwise, we can't fix the imbalance.
464 * Just return. This is probably a logic error, but it's not fatal.
465 */
466 else {
8df4da4a 467 if (level > 0 && (error = xfs_btree_decrement(cur, level, &i)))
1da177e4
LT
468 return error;
469 *stat = 1;
470 return 0;
471 }
472 /*
473 * We're now going to join "left" and "right" by moving all the stuff
474 * in "right" to "left" and deleting "right".
475 */
476 if (level > 0) {
477 /*
478 * It's a non-leaf. Move keys and pointers.
479 */
91d87232
ES
480 lkp = XFS_ALLOC_KEY_ADDR(left, lrecs + 1, cur);
481 lpp = XFS_ALLOC_PTR_ADDR(left, lrecs + 1, cur);
1da177e4
LT
482 rkp = XFS_ALLOC_KEY_ADDR(right, 1, cur);
483 rpp = XFS_ALLOC_PTR_ADDR(right, 1, cur);
484#ifdef DEBUG
91d87232 485 for (i = 0; i < rrecs; i++) {
16259e7d 486 if ((error = xfs_btree_check_sptr(cur, be32_to_cpu(rpp[i]), level)))
1da177e4
LT
487 return error;
488 }
489#endif
91d87232
ES
490 memcpy(lkp, rkp, rrecs * sizeof(*lkp));
491 memcpy(lpp, rpp, rrecs * sizeof(*lpp));
492 xfs_alloc_log_keys(cur, lbp, lrecs + 1, lrecs + rrecs);
493 xfs_alloc_log_ptrs(cur, lbp, lrecs + 1, lrecs + rrecs);
1da177e4
LT
494 } else {
495 /*
496 * It's a leaf. Move records.
497 */
91d87232 498 lrp = XFS_ALLOC_REC_ADDR(left, lrecs + 1, cur);
1da177e4 499 rrp = XFS_ALLOC_REC_ADDR(right, 1, cur);
91d87232
ES
500 memcpy(lrp, rrp, rrecs * sizeof(*lrp));
501 xfs_alloc_log_recs(cur, lbp, lrecs + 1, lrecs + rrecs);
1da177e4
LT
502 }
503 /*
504 * If we joined with the left neighbor, set the buffer in the
505 * cursor to the left block, and fix up the index.
506 */
507 if (bp != lbp) {
508 xfs_btree_setbuf(cur, level, lbp);
91d87232 509 cur->bc_ptrs[level] += lrecs;
1da177e4
LT
510 }
511 /*
512 * If we joined with the right neighbor and there's a level above
513 * us, increment the cursor at that level.
514 */
515 else if (level + 1 < cur->bc_nlevels &&
637aa50f 516 (error = xfs_btree_increment(cur, level + 1, &i)))
1da177e4
LT
517 return error;
518 /*
519 * Fix up the number of records in the surviving block.
520 */
91d87232
ES
521 lrecs += rrecs;
522 left->bb_numrecs = cpu_to_be16(lrecs);
1da177e4
LT
523 /*
524 * Fix up the right block pointer in the surviving block, and log it.
525 */
16259e7d 526 left->bb_rightsib = right->bb_rightsib;
1da177e4
LT
527 xfs_alloc_log_block(cur->bc_tp, lbp, XFS_BB_NUMRECS | XFS_BB_RIGHTSIB);
528 /*
529 * If there is a right sibling now, make it point to the
530 * remaining block.
531 */
16259e7d 532 if (be32_to_cpu(left->bb_rightsib) != NULLAGBLOCK) {
1da177e4
LT
533 xfs_alloc_block_t *rrblock;
534 xfs_buf_t *rrbp;
535
536 if ((error = xfs_btree_read_bufs(mp, cur->bc_tp,
16259e7d 537 cur->bc_private.a.agno, be32_to_cpu(left->bb_rightsib), 0,
1da177e4
LT
538 &rrbp, XFS_ALLOC_BTREE_REF)))
539 return error;
540 rrblock = XFS_BUF_TO_ALLOC_BLOCK(rrbp);
541 if ((error = xfs_btree_check_sblock(cur, rrblock, level, rrbp)))
542 return error;
16259e7d 543 rrblock->bb_leftsib = cpu_to_be32(lbno);
1da177e4
LT
544 xfs_alloc_log_block(cur->bc_tp, rrbp, XFS_BB_LEFTSIB);
545 }
546 /*
547 * Free the deleting block by putting it on the freelist.
548 */
92821e2b
DC
549 error = xfs_alloc_put_freelist(cur->bc_tp,
550 cur->bc_private.a.agbp, NULL, rbno, 1);
551 if (error)
1da177e4
LT
552 return error;
553 /*
554 * Since blocks move to the free list without the coordination
555 * used in xfs_bmap_finish, we can't allow block to be available
556 * for reallocation and non-transaction writing (user data)
557 * until we know that the transaction that moved it to the free
558 * list is permanently on disk. We track the blocks by declaring
559 * these blocks as "busy"; the busy list is maintained on a
560 * per-ag basis and each transaction records which entries
561 * should be removed when the iclog commits to disk. If a
562 * busy block is allocated, the iclog is pushed up to the
563 * LSN that freed the block.
564 */
16259e7d 565 xfs_alloc_mark_busy(cur->bc_tp, be32_to_cpu(agf->agf_seqno), bno, 1);
1da177e4 566 xfs_trans_agbtree_delta(cur->bc_tp, -1);
16259e7d 567
1da177e4
LT
568 /*
569 * Adjust the current level's cursor so that we're left referring
570 * to the right node, after we're done.
571 * If this leaves the ptr value 0 our caller will fix it up.
572 */
573 if (level > 0)
574 cur->bc_ptrs[level]--;
575 /*
576 * Return value means the next level up has something to do.
577 */
578 *stat = 2;
579 return 0;
580
581error0:
582 xfs_btree_del_cursor(tcur, XFS_BTREE_ERROR);
583 return error;
584}
585
586/*
587 * Insert one record/level. Return information to the caller
588 * allowing the next level up to proceed if necessary.
589 */
590STATIC int /* error */
591xfs_alloc_insrec(
592 xfs_btree_cur_t *cur, /* btree cursor */
593 int level, /* level to insert record at */
594 xfs_agblock_t *bnop, /* i/o: block number inserted */
595 xfs_alloc_rec_t *recp, /* i/o: record data inserted */
596 xfs_btree_cur_t **curp, /* output: new cursor replacing cur */
597 int *stat) /* output: success/failure */
598{
599 xfs_agf_t *agf; /* allocation group freelist header */
600 xfs_alloc_block_t *block; /* btree block record/key lives in */
601 xfs_buf_t *bp; /* buffer for block */
602 int error; /* error return value */
603 int i; /* loop index */
604 xfs_alloc_key_t key; /* key value being inserted */
605 xfs_alloc_key_t *kp; /* pointer to btree keys */
606 xfs_agblock_t nbno; /* block number of allocated block */
607 xfs_btree_cur_t *ncur; /* new cursor to be used at next lvl */
608 xfs_alloc_key_t nkey; /* new key value, from split */
609 xfs_alloc_rec_t nrec; /* new record value, for caller */
91d87232 610 int numrecs;
1da177e4
LT
611 int optr; /* old ptr value */
612 xfs_alloc_ptr_t *pp; /* pointer to btree addresses */
613 int ptr; /* index in btree block for this rec */
614 xfs_alloc_rec_t *rp; /* pointer to btree records */
615
16259e7d 616 ASSERT(be32_to_cpu(recp->ar_blockcount) > 0);
5bde1ba9
CH
617
618 /*
619 * GCC doesn't understand the (arguably complex) control flow in
620 * this function and complains about uninitialized structure fields
621 * without this.
622 */
623 memset(&nrec, 0, sizeof(nrec));
624
1da177e4
LT
625 /*
626 * If we made it to the root level, allocate a new root block
627 * and we're done.
628 */
629 if (level >= cur->bc_nlevels) {
630 XFS_STATS_INC(xs_abt_insrec);
631 if ((error = xfs_alloc_newroot(cur, &i)))
632 return error;
633 *bnop = NULLAGBLOCK;
634 *stat = i;
635 return 0;
636 }
637 /*
638 * Make a key out of the record data to be inserted, and save it.
639 */
16259e7d
CH
640 key.ar_startblock = recp->ar_startblock;
641 key.ar_blockcount = recp->ar_blockcount;
1da177e4
LT
642 optr = ptr = cur->bc_ptrs[level];
643 /*
644 * If we're off the left edge, return failure.
645 */
646 if (ptr == 0) {
647 *stat = 0;
648 return 0;
649 }
650 XFS_STATS_INC(xs_abt_insrec);
651 /*
652 * Get pointers to the btree buffer and block.
653 */
654 bp = cur->bc_bufs[level];
655 block = XFS_BUF_TO_ALLOC_BLOCK(bp);
91d87232 656 numrecs = be16_to_cpu(block->bb_numrecs);
1da177e4
LT
657#ifdef DEBUG
658 if ((error = xfs_btree_check_sblock(cur, block, level, bp)))
659 return error;
660 /*
661 * Check that the new entry is being inserted in the right place.
662 */
91d87232 663 if (ptr <= numrecs) {
1da177e4
LT
664 if (level == 0) {
665 rp = XFS_ALLOC_REC_ADDR(block, ptr, cur);
666 xfs_btree_check_rec(cur->bc_btnum, recp, rp);
667 } else {
668 kp = XFS_ALLOC_KEY_ADDR(block, ptr, cur);
669 xfs_btree_check_key(cur->bc_btnum, &key, kp);
670 }
671 }
672#endif
673 nbno = NULLAGBLOCK;
1121b219 674 ncur = NULL;
1da177e4
LT
675 /*
676 * If the block is full, we can't insert the new entry until we
677 * make the block un-full.
678 */
91d87232 679 if (numrecs == XFS_ALLOC_BLOCK_MAXRECS(level, cur)) {
1da177e4
LT
680 /*
681 * First, try shifting an entry to the right neighbor.
682 */
9eaead51 683 if ((error = xfs_btree_rshift(cur, level, &i)))
1da177e4
LT
684 return error;
685 if (i) {
686 /* nothing */
687 }
688 /*
689 * Next, try shifting an entry to the left neighbor.
690 */
691 else {
687b890a 692 if ((error = xfs_btree_lshift(cur, level, &i)))
1da177e4
LT
693 return error;
694 if (i)
695 optr = ptr = cur->bc_ptrs[level];
696 else {
f5eb8e7c 697 union xfs_btree_ptr bno = { .s = cpu_to_be32(nbno) };
1da177e4
LT
698 /*
699 * Next, try splitting the current block in
700 * half. If this works we have to re-set our
701 * variables because we could be in a
702 * different block now.
703 */
f5eb8e7c
CH
704 if ((error = xfs_btree_split(cur, level, &bno,
705 (union xfs_btree_key *)&nkey,
706 &ncur, &i)))
1da177e4 707 return error;
f5eb8e7c 708 nbno = be32_to_cpu(bno.s);
1da177e4
LT
709 if (i) {
710 bp = cur->bc_bufs[level];
711 block = XFS_BUF_TO_ALLOC_BLOCK(bp);
712#ifdef DEBUG
713 if ((error =
714 xfs_btree_check_sblock(cur,
715 block, level, bp)))
716 return error;
717#endif
718 ptr = cur->bc_ptrs[level];
16259e7d
CH
719 nrec.ar_startblock = nkey.ar_startblock;
720 nrec.ar_blockcount = nkey.ar_blockcount;
1da177e4
LT
721 }
722 /*
723 * Otherwise the insert fails.
724 */
725 else {
726 *stat = 0;
727 return 0;
728 }
729 }
730 }
731 }
732 /*
733 * At this point we know there's room for our new entry in the block
734 * we're pointing at.
735 */
91d87232 736 numrecs = be16_to_cpu(block->bb_numrecs);
1da177e4
LT
737 if (level > 0) {
738 /*
739 * It's a non-leaf entry. Make a hole for the new data
740 * in the key and ptr regions of the block.
741 */
742 kp = XFS_ALLOC_KEY_ADDR(block, 1, cur);
743 pp = XFS_ALLOC_PTR_ADDR(block, 1, cur);
744#ifdef DEBUG
91d87232 745 for (i = numrecs; i >= ptr; i--) {
16259e7d 746 if ((error = xfs_btree_check_sptr(cur, be32_to_cpu(pp[i - 1]), level)))
1da177e4
LT
747 return error;
748 }
749#endif
750 memmove(&kp[ptr], &kp[ptr - 1],
91d87232 751 (numrecs - ptr + 1) * sizeof(*kp));
1da177e4 752 memmove(&pp[ptr], &pp[ptr - 1],
91d87232 753 (numrecs - ptr + 1) * sizeof(*pp));
1da177e4
LT
754#ifdef DEBUG
755 if ((error = xfs_btree_check_sptr(cur, *bnop, level)))
756 return error;
757#endif
758 /*
759 * Now stuff the new data in, bump numrecs and log the new data.
760 */
761 kp[ptr - 1] = key;
16259e7d 762 pp[ptr - 1] = cpu_to_be32(*bnop);
91d87232
ES
763 numrecs++;
764 block->bb_numrecs = cpu_to_be16(numrecs);
765 xfs_alloc_log_keys(cur, bp, ptr, numrecs);
766 xfs_alloc_log_ptrs(cur, bp, ptr, numrecs);
1da177e4 767#ifdef DEBUG
91d87232 768 if (ptr < numrecs)
1da177e4
LT
769 xfs_btree_check_key(cur->bc_btnum, kp + ptr - 1,
770 kp + ptr);
771#endif
772 } else {
773 /*
774 * It's a leaf entry. Make a hole for the new record.
775 */
776 rp = XFS_ALLOC_REC_ADDR(block, 1, cur);
777 memmove(&rp[ptr], &rp[ptr - 1],
91d87232 778 (numrecs - ptr + 1) * sizeof(*rp));
1da177e4
LT
779 /*
780 * Now stuff the new record in, bump numrecs
781 * and log the new data.
782 */
c38e5e84 783 rp[ptr - 1] = *recp;
91d87232
ES
784 numrecs++;
785 block->bb_numrecs = cpu_to_be16(numrecs);
786 xfs_alloc_log_recs(cur, bp, ptr, numrecs);
1da177e4 787#ifdef DEBUG
91d87232 788 if (ptr < numrecs)
1da177e4
LT
789 xfs_btree_check_rec(cur->bc_btnum, rp + ptr - 1,
790 rp + ptr);
791#endif
792 }
793 /*
794 * Log the new number of records in the btree header.
795 */
796 xfs_alloc_log_block(cur->bc_tp, bp, XFS_BB_NUMRECS);
797 /*
798 * If we inserted at the start of a block, update the parents' keys.
799 */
38bb7423 800 if (optr == 1 && (error = xfs_btree_updkey(cur, (union xfs_btree_key *)&key, level + 1)))
1da177e4
LT
801 return error;
802 /*
803 * Look to see if the longest extent in the allocation group
804 * needs to be updated.
805 */
806
807 agf = XFS_BUF_TO_AGF(cur->bc_private.a.agbp);
808 if (level == 0 &&
809 cur->bc_btnum == XFS_BTNUM_CNT &&
16259e7d
CH
810 be32_to_cpu(block->bb_rightsib) == NULLAGBLOCK &&
811 be32_to_cpu(recp->ar_blockcount) > be32_to_cpu(agf->agf_longest)) {
1da177e4
LT
812 /*
813 * If this is a leaf in the by-size btree and there
814 * is no right sibling block and this block is bigger
815 * than the previous longest block, update it.
816 */
16259e7d
CH
817 agf->agf_longest = recp->ar_blockcount;
818 cur->bc_mp->m_perag[be32_to_cpu(agf->agf_seqno)].pagf_longest
819 = be32_to_cpu(recp->ar_blockcount);
1da177e4
LT
820 xfs_alloc_log_agf(cur->bc_tp, cur->bc_private.a.agbp,
821 XFS_AGF_LONGEST);
822 }
823 /*
824 * Return the new block number, if any.
825 * If there is one, give back a record value and a cursor too.
826 */
827 *bnop = nbno;
828 if (nbno != NULLAGBLOCK) {
c38e5e84
CH
829 *recp = nrec;
830 *curp = ncur;
1da177e4
LT
831 }
832 *stat = 1;
833 return 0;
834}
835
836/*
837 * Log header fields from a btree block.
838 */
839STATIC void
840xfs_alloc_log_block(
841 xfs_trans_t *tp, /* transaction pointer */
842 xfs_buf_t *bp, /* buffer containing btree block */
843 int fields) /* mask of fields: XFS_BB_... */
844{
845 int first; /* first byte offset logged */
846 int last; /* last byte offset logged */
847 static const short offsets[] = { /* table of offsets */
848 offsetof(xfs_alloc_block_t, bb_magic),
849 offsetof(xfs_alloc_block_t, bb_level),
850 offsetof(xfs_alloc_block_t, bb_numrecs),
851 offsetof(xfs_alloc_block_t, bb_leftsib),
852 offsetof(xfs_alloc_block_t, bb_rightsib),
853 sizeof(xfs_alloc_block_t)
854 };
855
856 xfs_btree_offsets(fields, offsets, XFS_BB_NUM_BITS, &first, &last);
857 xfs_trans_log_buf(tp, bp, first, last);
858}
859
860/*
861 * Log keys from a btree block (nonleaf).
862 */
863STATIC void
864xfs_alloc_log_keys(
865 xfs_btree_cur_t *cur, /* btree cursor */
866 xfs_buf_t *bp, /* buffer containing btree block */
867 int kfirst, /* index of first key to log */
868 int klast) /* index of last key to log */
869{
870 xfs_alloc_block_t *block; /* btree block to log from */
871 int first; /* first byte offset logged */
872 xfs_alloc_key_t *kp; /* key pointer in btree block */
873 int last; /* last byte offset logged */
874
875 block = XFS_BUF_TO_ALLOC_BLOCK(bp);
876 kp = XFS_ALLOC_KEY_ADDR(block, 1, cur);
877 first = (int)((xfs_caddr_t)&kp[kfirst - 1] - (xfs_caddr_t)block);
878 last = (int)(((xfs_caddr_t)&kp[klast] - 1) - (xfs_caddr_t)block);
879 xfs_trans_log_buf(cur->bc_tp, bp, first, last);
880}
881
882/*
883 * Log block pointer fields from a btree block (nonleaf).
884 */
885STATIC void
886xfs_alloc_log_ptrs(
887 xfs_btree_cur_t *cur, /* btree cursor */
888 xfs_buf_t *bp, /* buffer containing btree block */
889 int pfirst, /* index of first pointer to log */
890 int plast) /* index of last pointer to log */
891{
892 xfs_alloc_block_t *block; /* btree block to log from */
893 int first; /* first byte offset logged */
894 int last; /* last byte offset logged */
895 xfs_alloc_ptr_t *pp; /* block-pointer pointer in btree blk */
896
897 block = XFS_BUF_TO_ALLOC_BLOCK(bp);
898 pp = XFS_ALLOC_PTR_ADDR(block, 1, cur);
899 first = (int)((xfs_caddr_t)&pp[pfirst - 1] - (xfs_caddr_t)block);
900 last = (int)(((xfs_caddr_t)&pp[plast] - 1) - (xfs_caddr_t)block);
901 xfs_trans_log_buf(cur->bc_tp, bp, first, last);
902}
903
904/*
905 * Log records from a btree block (leaf).
906 */
907STATIC void
908xfs_alloc_log_recs(
909 xfs_btree_cur_t *cur, /* btree cursor */
910 xfs_buf_t *bp, /* buffer containing btree block */
911 int rfirst, /* index of first record to log */
912 int rlast) /* index of last record to log */
913{
914 xfs_alloc_block_t *block; /* btree block to log from */
915 int first; /* first byte offset logged */
916 int last; /* last byte offset logged */
917 xfs_alloc_rec_t *rp; /* record pointer for btree block */
918
919
920 block = XFS_BUF_TO_ALLOC_BLOCK(bp);
921 rp = XFS_ALLOC_REC_ADDR(block, 1, cur);
922#ifdef DEBUG
923 {
924 xfs_agf_t *agf;
925 xfs_alloc_rec_t *p;
926
927 agf = XFS_BUF_TO_AGF(cur->bc_private.a.agbp);
928 for (p = &rp[rfirst - 1]; p <= &rp[rlast - 1]; p++)
16259e7d
CH
929 ASSERT(be32_to_cpu(p->ar_startblock) +
930 be32_to_cpu(p->ar_blockcount) <=
931 be32_to_cpu(agf->agf_length));
1da177e4
LT
932 }
933#endif
934 first = (int)((xfs_caddr_t)&rp[rfirst - 1] - (xfs_caddr_t)block);
935 last = (int)(((xfs_caddr_t)&rp[rlast] - 1) - (xfs_caddr_t)block);
936 xfs_trans_log_buf(cur->bc_tp, bp, first, last);
937}
938
1da177e4
LT
939/*
940 * Allocate a new root block, fill it in.
941 */
942STATIC int /* error */
943xfs_alloc_newroot(
944 xfs_btree_cur_t *cur, /* btree cursor */
945 int *stat) /* success/failure */
946{
947 int error; /* error return value */
948 xfs_agblock_t lbno; /* left block number */
949 xfs_buf_t *lbp; /* left btree buffer */
950 xfs_alloc_block_t *left; /* left btree block */
951 xfs_mount_t *mp; /* mount structure */
952 xfs_agblock_t nbno; /* new block number */
953 xfs_buf_t *nbp; /* new (root) buffer */
954 xfs_alloc_block_t *new; /* new (root) btree block */
955 int nptr; /* new value for key index, 1 or 2 */
956 xfs_agblock_t rbno; /* right block number */
957 xfs_buf_t *rbp; /* right btree buffer */
958 xfs_alloc_block_t *right; /* right btree block */
959
960 mp = cur->bc_mp;
961
962 ASSERT(cur->bc_nlevels < XFS_AG_MAXLEVELS(mp));
963 /*
964 * Get a buffer from the freelist blocks, for the new root.
965 */
92821e2b
DC
966 error = xfs_alloc_get_freelist(cur->bc_tp,
967 cur->bc_private.a.agbp, &nbno, 1);
968 if (error)
1da177e4
LT
969 return error;
970 /*
971 * None available, we fail.
972 */
973 if (nbno == NULLAGBLOCK) {
974 *stat = 0;
975 return 0;
976 }
977 xfs_trans_agbtree_delta(cur->bc_tp, 1);
978 nbp = xfs_btree_get_bufs(mp, cur->bc_tp, cur->bc_private.a.agno, nbno,
979 0);
980 new = XFS_BUF_TO_ALLOC_BLOCK(nbp);
981 /*
982 * Set the root data in the a.g. freespace structure.
983 */
984 {
985 xfs_agf_t *agf; /* a.g. freespace header */
986 xfs_agnumber_t seqno;
987
988 agf = XFS_BUF_TO_AGF(cur->bc_private.a.agbp);
16259e7d 989 agf->agf_roots[cur->bc_btnum] = cpu_to_be32(nbno);
413d57c9 990 be32_add_cpu(&agf->agf_levels[cur->bc_btnum], 1);
16259e7d 991 seqno = be32_to_cpu(agf->agf_seqno);
1da177e4
LT
992 mp->m_perag[seqno].pagf_levels[cur->bc_btnum]++;
993 xfs_alloc_log_agf(cur->bc_tp, cur->bc_private.a.agbp,
994 XFS_AGF_ROOTS | XFS_AGF_LEVELS);
995 }
996 /*
997 * At the previous root level there are now two blocks: the old
998 * root, and the new block generated when it was split.
999 * We don't know which one the cursor is pointing at, so we
1000 * set up variables "left" and "right" for each case.
1001 */
1002 lbp = cur->bc_bufs[cur->bc_nlevels - 1];
1003 left = XFS_BUF_TO_ALLOC_BLOCK(lbp);
1004#ifdef DEBUG
1005 if ((error = xfs_btree_check_sblock(cur, left, cur->bc_nlevels - 1, lbp)))
1006 return error;
1007#endif
16259e7d 1008 if (be32_to_cpu(left->bb_rightsib) != NULLAGBLOCK) {
1da177e4
LT
1009 /*
1010 * Our block is left, pick up the right block.
1011 */
1012 lbno = XFS_DADDR_TO_AGBNO(mp, XFS_BUF_ADDR(lbp));
16259e7d 1013 rbno = be32_to_cpu(left->bb_rightsib);
1da177e4
LT
1014 if ((error = xfs_btree_read_bufs(mp, cur->bc_tp,
1015 cur->bc_private.a.agno, rbno, 0, &rbp,
1016 XFS_ALLOC_BTREE_REF)))
1017 return error;
1018 right = XFS_BUF_TO_ALLOC_BLOCK(rbp);
1019 if ((error = xfs_btree_check_sblock(cur, right,
1020 cur->bc_nlevels - 1, rbp)))
1021 return error;
1022 nptr = 1;
1023 } else {
1024 /*
1025 * Our block is right, pick up the left block.
1026 */
1027 rbp = lbp;
1028 right = left;
1029 rbno = XFS_DADDR_TO_AGBNO(mp, XFS_BUF_ADDR(rbp));
16259e7d 1030 lbno = be32_to_cpu(right->bb_leftsib);
1da177e4
LT
1031 if ((error = xfs_btree_read_bufs(mp, cur->bc_tp,
1032 cur->bc_private.a.agno, lbno, 0, &lbp,
1033 XFS_ALLOC_BTREE_REF)))
1034 return error;
1035 left = XFS_BUF_TO_ALLOC_BLOCK(lbp);
1036 if ((error = xfs_btree_check_sblock(cur, left,
1037 cur->bc_nlevels - 1, lbp)))
1038 return error;
1039 nptr = 2;
1040 }
1041 /*
1042 * Fill in the new block's btree header and log it.
1043 */
16259e7d
CH
1044 new->bb_magic = cpu_to_be32(xfs_magics[cur->bc_btnum]);
1045 new->bb_level = cpu_to_be16(cur->bc_nlevels);
1046 new->bb_numrecs = cpu_to_be16(2);
1047 new->bb_leftsib = cpu_to_be32(NULLAGBLOCK);
1048 new->bb_rightsib = cpu_to_be32(NULLAGBLOCK);
1da177e4
LT
1049 xfs_alloc_log_block(cur->bc_tp, nbp, XFS_BB_ALL_BITS);
1050 ASSERT(lbno != NULLAGBLOCK && rbno != NULLAGBLOCK);
1051 /*
1052 * Fill in the key data in the new root.
1053 */
1054 {
1055 xfs_alloc_key_t *kp; /* btree key pointer */
1056
1057 kp = XFS_ALLOC_KEY_ADDR(new, 1, cur);
16259e7d 1058 if (be16_to_cpu(left->bb_level) > 0) {
c38e5e84
CH
1059 kp[0] = *XFS_ALLOC_KEY_ADDR(left, 1, cur);
1060 kp[1] = *XFS_ALLOC_KEY_ADDR(right, 1, cur);
1da177e4
LT
1061 } else {
1062 xfs_alloc_rec_t *rp; /* btree record pointer */
1063
1064 rp = XFS_ALLOC_REC_ADDR(left, 1, cur);
16259e7d
CH
1065 kp[0].ar_startblock = rp->ar_startblock;
1066 kp[0].ar_blockcount = rp->ar_blockcount;
1da177e4 1067 rp = XFS_ALLOC_REC_ADDR(right, 1, cur);
16259e7d
CH
1068 kp[1].ar_startblock = rp->ar_startblock;
1069 kp[1].ar_blockcount = rp->ar_blockcount;
1da177e4
LT
1070 }
1071 }
1072 xfs_alloc_log_keys(cur, nbp, 1, 2);
1073 /*
1074 * Fill in the pointer data in the new root.
1075 */
1076 {
1077 xfs_alloc_ptr_t *pp; /* btree address pointer */
1078
1079 pp = XFS_ALLOC_PTR_ADDR(new, 1, cur);
16259e7d
CH
1080 pp[0] = cpu_to_be32(lbno);
1081 pp[1] = cpu_to_be32(rbno);
1da177e4
LT
1082 }
1083 xfs_alloc_log_ptrs(cur, nbp, 1, 2);
1084 /*
1085 * Fix up the cursor.
1086 */
1087 xfs_btree_setbuf(cur, cur->bc_nlevels, nbp);
1088 cur->bc_ptrs[cur->bc_nlevels] = nptr;
1089 cur->bc_nlevels++;
1090 *stat = 1;
1091 return 0;
1092}
1093
1da177e4 1094
1da177e4
LT
1095/*
1096 * Externally visible routines.
1097 */
1098
1da177e4
LT
1099/*
1100 * Delete the record pointed to by cur.
1101 * The cursor refers to the place where the record was (could be inserted)
1102 * when the operation returns.
1103 */
1104int /* error */
1105xfs_alloc_delete(
1106 xfs_btree_cur_t *cur, /* btree cursor */
1107 int *stat) /* success/failure */
1108{
1109 int error; /* error return value */
1110 int i; /* result code */
1111 int level; /* btree level */
1112
1113 /*
1114 * Go up the tree, starting at leaf level.
1115 * If 2 is returned then a join was done; go to the next level.
1116 * Otherwise we are done.
1117 */
1118 for (level = 0, i = 2; i == 2; level++) {
1119 if ((error = xfs_alloc_delrec(cur, level, &i)))
1120 return error;
1121 }
1122 if (i == 0) {
1123 for (level = 1; level < cur->bc_nlevels; level++) {
1124 if (cur->bc_ptrs[level] == 0) {
8df4da4a 1125 if ((error = xfs_btree_decrement(cur, level, &i)))
1da177e4
LT
1126 return error;
1127 break;
1128 }
1129 }
1130 }
1131 *stat = i;
1132 return 0;
1133}
1134
1135/*
1136 * Get the data from the pointed-to record.
1137 */
1138int /* error */
1139xfs_alloc_get_rec(
1140 xfs_btree_cur_t *cur, /* btree cursor */
1141 xfs_agblock_t *bno, /* output: starting block of extent */
1142 xfs_extlen_t *len, /* output: length of extent */
1143 int *stat) /* output: success/failure */
1144{
1145 xfs_alloc_block_t *block; /* btree block */
1146#ifdef DEBUG
1147 int error; /* error return value */
1148#endif
1149 int ptr; /* record number */
1150
1151 ptr = cur->bc_ptrs[0];
1152 block = XFS_BUF_TO_ALLOC_BLOCK(cur->bc_bufs[0]);
1153#ifdef DEBUG
1154 if ((error = xfs_btree_check_sblock(cur, block, 0, cur->bc_bufs[0])))
1155 return error;
1156#endif
1157 /*
1158 * Off the right end or left end, return failure.
1159 */
16259e7d 1160 if (ptr > be16_to_cpu(block->bb_numrecs) || ptr <= 0) {
1da177e4
LT
1161 *stat = 0;
1162 return 0;
1163 }
1164 /*
1165 * Point to the record and extract its data.
1166 */
1167 {
1168 xfs_alloc_rec_t *rec; /* record data */
1169
1170 rec = XFS_ALLOC_REC_ADDR(block, ptr, cur);
16259e7d
CH
1171 *bno = be32_to_cpu(rec->ar_startblock);
1172 *len = be32_to_cpu(rec->ar_blockcount);
1da177e4
LT
1173 }
1174 *stat = 1;
1175 return 0;
1176}
1177
1da177e4
LT
1178/*
1179 * Insert the current record at the point referenced by cur.
1180 * The cursor may be inconsistent on return if splits have been done.
1181 */
1182int /* error */
1183xfs_alloc_insert(
1184 xfs_btree_cur_t *cur, /* btree cursor */
1185 int *stat) /* success/failure */
1186{
1187 int error; /* error return value */
1188 int i; /* result value, 0 for failure */
1189 int level; /* current level number in btree */
1190 xfs_agblock_t nbno; /* new block number (split result) */
1191 xfs_btree_cur_t *ncur; /* new cursor (split result) */
1192 xfs_alloc_rec_t nrec; /* record being inserted this level */
1193 xfs_btree_cur_t *pcur; /* previous level's cursor */
1194
1195 level = 0;
1196 nbno = NULLAGBLOCK;
16259e7d
CH
1197 nrec.ar_startblock = cpu_to_be32(cur->bc_rec.a.ar_startblock);
1198 nrec.ar_blockcount = cpu_to_be32(cur->bc_rec.a.ar_blockcount);
1121b219 1199 ncur = NULL;
1da177e4
LT
1200 pcur = cur;
1201 /*
1202 * Loop going up the tree, starting at the leaf level.
1203 * Stop when we don't get a split block, that must mean that
1204 * the insert is finished with this level.
1205 */
1206 do {
1207 /*
1208 * Insert nrec/nbno into this level of the tree.
1209 * Note if we fail, nbno will be null.
1210 */
1211 if ((error = xfs_alloc_insrec(pcur, level++, &nbno, &nrec, &ncur,
1212 &i))) {
1213 if (pcur != cur)
1214 xfs_btree_del_cursor(pcur, XFS_BTREE_ERROR);
1215 return error;
1216 }
1217 /*
1218 * See if the cursor we just used is trash.
1219 * Can't trash the caller's cursor, but otherwise we should
1220 * if ncur is a new cursor or we're about to be done.
1221 */
1222 if (pcur != cur && (ncur || nbno == NULLAGBLOCK)) {
1223 cur->bc_nlevels = pcur->bc_nlevels;
1224 xfs_btree_del_cursor(pcur, XFS_BTREE_NOERROR);
1225 }
1226 /*
1227 * If we got a new cursor, switch to it.
1228 */
1229 if (ncur) {
1230 pcur = ncur;
1121b219 1231 ncur = NULL;
1da177e4
LT
1232 }
1233 } while (nbno != NULLAGBLOCK);
1234 *stat = i;
1235 return 0;
1236}
1237
278d0ca1
CH
1238STATIC struct xfs_btree_cur *
1239xfs_allocbt_dup_cursor(
1240 struct xfs_btree_cur *cur)
1241{
1242 return xfs_allocbt_init_cursor(cur->bc_mp, cur->bc_tp,
1243 cur->bc_private.a.agbp, cur->bc_private.a.agno,
1244 cur->bc_btnum);
1245}
1246
f5eb8e7c
CH
1247STATIC int
1248xfs_allocbt_alloc_block(
1249 struct xfs_btree_cur *cur,
1250 union xfs_btree_ptr *start,
1251 union xfs_btree_ptr *new,
1252 int length,
1253 int *stat)
1254{
1255 int error;
1256 xfs_agblock_t bno;
1257
1258 XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY);
1259
1260 /* Allocate the new block from the freelist. If we can't, give up. */
1261 error = xfs_alloc_get_freelist(cur->bc_tp, cur->bc_private.a.agbp,
1262 &bno, 1);
1263 if (error) {
1264 XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR);
1265 return error;
1266 }
1267
1268 if (bno == NULLAGBLOCK) {
1269 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
1270 *stat = 0;
1271 return 0;
1272 }
1273
1274 xfs_trans_agbtree_delta(cur->bc_tp, 1);
1275 new->s = cpu_to_be32(bno);
1276
1277 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
1278 *stat = 1;
1279 return 0;
1280}
1281
1da177e4 1282/*
278d0ca1 1283 * Update the longest extent in the AGF
1da177e4 1284 */
278d0ca1
CH
1285STATIC void
1286xfs_allocbt_update_lastrec(
1287 struct xfs_btree_cur *cur,
1288 struct xfs_btree_block *block,
1289 union xfs_btree_rec *rec,
1290 int ptr,
1291 int reason)
1da177e4 1292{
278d0ca1
CH
1293 struct xfs_agf *agf = XFS_BUF_TO_AGF(cur->bc_private.a.agbp);
1294 xfs_agnumber_t seqno = be32_to_cpu(agf->agf_seqno);
1295 __be32 len;
1da177e4 1296
278d0ca1 1297 ASSERT(cur->bc_btnum == XFS_BTNUM_CNT);
1da177e4 1298
278d0ca1
CH
1299 switch (reason) {
1300 case LASTREC_UPDATE:
1da177e4 1301 /*
278d0ca1
CH
1302 * If this is the last leaf block and it's the last record,
1303 * then update the size of the longest extent in the AG.
1da177e4 1304 */
278d0ca1
CH
1305 if (ptr != xfs_btree_get_numrecs(block))
1306 return;
1307 len = rec->alloc.ar_blockcount;
1308 break;
1309 default:
1310 ASSERT(0);
1311 return;
1da177e4 1312 }
561f7d17 1313
278d0ca1
CH
1314 agf->agf_longest = len;
1315 cur->bc_mp->m_perag[seqno].pagf_longest = be32_to_cpu(len);
1316 xfs_alloc_log_agf(cur->bc_tp, cur->bc_private.a.agbp, XFS_AGF_LONGEST);
561f7d17
CH
1317}
1318
ce5e42db
CH
1319STATIC int
1320xfs_allocbt_get_maxrecs(
1321 struct xfs_btree_cur *cur,
1322 int level)
1323{
1324 return cur->bc_mp->m_alloc_mxr[level != 0];
1325}
1326
fe033cc8
CH
1327STATIC void
1328xfs_allocbt_init_key_from_rec(
1329 union xfs_btree_key *key,
1330 union xfs_btree_rec *rec)
1331{
1332 ASSERT(rec->alloc.ar_startblock != 0);
1333
1334 key->alloc.ar_startblock = rec->alloc.ar_startblock;
1335 key->alloc.ar_blockcount = rec->alloc.ar_blockcount;
1336}
1337
1338STATIC void
1339xfs_allocbt_init_ptr_from_cur(
1340 struct xfs_btree_cur *cur,
1341 union xfs_btree_ptr *ptr)
1342{
1343 struct xfs_agf *agf = XFS_BUF_TO_AGF(cur->bc_private.a.agbp);
1344
1345 ASSERT(cur->bc_private.a.agno == be32_to_cpu(agf->agf_seqno));
1346 ASSERT(agf->agf_roots[cur->bc_btnum] != 0);
1347
1348 ptr->s = agf->agf_roots[cur->bc_btnum];
1349}
1350
1351STATIC __int64_t
1352xfs_allocbt_key_diff(
1353 struct xfs_btree_cur *cur,
1354 union xfs_btree_key *key)
1355{
1356 xfs_alloc_rec_incore_t *rec = &cur->bc_rec.a;
1357 xfs_alloc_key_t *kp = &key->alloc;
1358 __int64_t diff;
1359
1360 if (cur->bc_btnum == XFS_BTNUM_BNO) {
1361 return (__int64_t)be32_to_cpu(kp->ar_startblock) -
1362 rec->ar_startblock;
1363 }
1364
1365 diff = (__int64_t)be32_to_cpu(kp->ar_blockcount) - rec->ar_blockcount;
1366 if (diff)
1367 return diff;
1368
1369 return (__int64_t)be32_to_cpu(kp->ar_startblock) - rec->ar_startblock;
1370}
1371
8c4ed633
CH
1372#ifdef XFS_BTREE_TRACE
1373ktrace_t *xfs_allocbt_trace_buf;
1374
1375STATIC void
1376xfs_allocbt_trace_enter(
1377 struct xfs_btree_cur *cur,
1378 const char *func,
1379 char *s,
1380 int type,
1381 int line,
1382 __psunsigned_t a0,
1383 __psunsigned_t a1,
1384 __psunsigned_t a2,
1385 __psunsigned_t a3,
1386 __psunsigned_t a4,
1387 __psunsigned_t a5,
1388 __psunsigned_t a6,
1389 __psunsigned_t a7,
1390 __psunsigned_t a8,
1391 __psunsigned_t a9,
1392 __psunsigned_t a10)
1393{
1394 ktrace_enter(xfs_allocbt_trace_buf, (void *)(__psint_t)type,
1395 (void *)func, (void *)s, NULL, (void *)cur,
1396 (void *)a0, (void *)a1, (void *)a2, (void *)a3,
1397 (void *)a4, (void *)a5, (void *)a6, (void *)a7,
1398 (void *)a8, (void *)a9, (void *)a10);
1399}
1400
1401STATIC void
1402xfs_allocbt_trace_cursor(
1403 struct xfs_btree_cur *cur,
1404 __uint32_t *s0,
1405 __uint64_t *l0,
1406 __uint64_t *l1)
1407{
1408 *s0 = cur->bc_private.a.agno;
1409 *l0 = cur->bc_rec.a.ar_startblock;
1410 *l1 = cur->bc_rec.a.ar_blockcount;
1411}
1412
1413STATIC void
1414xfs_allocbt_trace_key(
1415 struct xfs_btree_cur *cur,
1416 union xfs_btree_key *key,
1417 __uint64_t *l0,
1418 __uint64_t *l1)
1419{
1420 *l0 = be32_to_cpu(key->alloc.ar_startblock);
1421 *l1 = be32_to_cpu(key->alloc.ar_blockcount);
1422}
1423
1424STATIC void
1425xfs_allocbt_trace_record(
1426 struct xfs_btree_cur *cur,
1427 union xfs_btree_rec *rec,
1428 __uint64_t *l0,
1429 __uint64_t *l1,
1430 __uint64_t *l2)
1431{
1432 *l0 = be32_to_cpu(rec->alloc.ar_startblock);
1433 *l1 = be32_to_cpu(rec->alloc.ar_blockcount);
1434 *l2 = 0;
1435}
1436#endif /* XFS_BTREE_TRACE */
1437
561f7d17 1438static const struct xfs_btree_ops xfs_allocbt_ops = {
65f1eaea
CH
1439 .rec_len = sizeof(xfs_alloc_rec_t),
1440 .key_len = sizeof(xfs_alloc_key_t),
1441
561f7d17 1442 .dup_cursor = xfs_allocbt_dup_cursor,
f5eb8e7c 1443 .alloc_block = xfs_allocbt_alloc_block,
278d0ca1 1444 .update_lastrec = xfs_allocbt_update_lastrec,
ce5e42db 1445 .get_maxrecs = xfs_allocbt_get_maxrecs,
fe033cc8
CH
1446 .init_key_from_rec = xfs_allocbt_init_key_from_rec,
1447 .init_ptr_from_cur = xfs_allocbt_init_ptr_from_cur,
1448 .key_diff = xfs_allocbt_key_diff,
8c4ed633
CH
1449
1450#ifdef XFS_BTREE_TRACE
1451 .trace_enter = xfs_allocbt_trace_enter,
1452 .trace_cursor = xfs_allocbt_trace_cursor,
1453 .trace_key = xfs_allocbt_trace_key,
1454 .trace_record = xfs_allocbt_trace_record,
1455#endif
561f7d17
CH
1456};
1457
1458/*
1459 * Allocate a new allocation btree cursor.
1460 */
1461struct xfs_btree_cur * /* new alloc btree cursor */
1462xfs_allocbt_init_cursor(
1463 struct xfs_mount *mp, /* file system mount point */
1464 struct xfs_trans *tp, /* transaction pointer */
1465 struct xfs_buf *agbp, /* buffer for agf structure */
1466 xfs_agnumber_t agno, /* allocation group number */
1467 xfs_btnum_t btnum) /* btree identifier */
1468{
1469 struct xfs_agf *agf = XFS_BUF_TO_AGF(agbp);
1470 struct xfs_btree_cur *cur;
1471
1472 ASSERT(btnum == XFS_BTNUM_BNO || btnum == XFS_BTNUM_CNT);
1473
1474 cur = kmem_zone_zalloc(xfs_btree_cur_zone, KM_SLEEP);
1475
1476 cur->bc_tp = tp;
1477 cur->bc_mp = mp;
1478 cur->bc_nlevels = be32_to_cpu(agf->agf_levels[btnum]);
1479 cur->bc_btnum = btnum;
1480 cur->bc_blocklog = mp->m_sb.sb_blocklog;
1481
1482 cur->bc_ops = &xfs_allocbt_ops;
278d0ca1
CH
1483 if (btnum == XFS_BTNUM_CNT)
1484 cur->bc_flags = XFS_BTREE_LASTREC_UPDATE;
561f7d17
CH
1485
1486 cur->bc_private.a.agbp = agbp;
1487 cur->bc_private.a.agno = agno;
1488
1489 return cur;
1490}