]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - fs/xfs/xfs_alloc.c
xfs: tone down writepage/releasepage WARN_ONs
[mirror_ubuntu-artful-kernel.git] / fs / xfs / xfs_alloc.c
CommitLineData
1da177e4 1/*
7b718769
NS
2 * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc.
3 * All Rights Reserved.
1da177e4 4 *
7b718769
NS
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
1da177e4
LT
7 * published by the Free Software Foundation.
8 *
7b718769
NS
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
1da177e4 13 *
7b718769
NS
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
1da177e4 17 */
1da177e4 18#include "xfs.h"
a844f451 19#include "xfs_fs.h"
70a9883c 20#include "xfs_format.h"
239880ef 21#include "xfs_log_format.h"
70a9883c 22#include "xfs_shared.h"
239880ef 23#include "xfs_trans_resv.h"
a844f451 24#include "xfs_bit.h"
1da177e4
LT
25#include "xfs_sb.h"
26#include "xfs_ag.h"
1da177e4 27#include "xfs_mount.h"
a844f451 28#include "xfs_inode.h"
1da177e4 29#include "xfs_btree.h"
a4fbe6ab 30#include "xfs_alloc_btree.h"
1da177e4 31#include "xfs_alloc.h"
efc27b52 32#include "xfs_extent_busy.h"
1da177e4 33#include "xfs_error.h"
4e0e6040 34#include "xfs_cksum.h"
0b1b213f 35#include "xfs_trace.h"
239880ef 36#include "xfs_trans.h"
4e0e6040 37#include "xfs_buf_item.h"
239880ef 38#include "xfs_log.h"
1da177e4 39
c999a223 40struct workqueue_struct *xfs_alloc_wq;
1da177e4
LT
41
42#define XFS_ABSDIFF(a,b) (((a) <= (b)) ? ((b) - (a)) : ((a) - (b)))
43
44#define XFSA_FIXUP_BNO_OK 1
45#define XFSA_FIXUP_CNT_OK 2
46
1da177e4
LT
47STATIC int xfs_alloc_ag_vextent_exact(xfs_alloc_arg_t *);
48STATIC int xfs_alloc_ag_vextent_near(xfs_alloc_arg_t *);
49STATIC int xfs_alloc_ag_vextent_size(xfs_alloc_arg_t *);
50STATIC int xfs_alloc_ag_vextent_small(xfs_alloc_arg_t *,
e26f0501 51 xfs_btree_cur_t *, xfs_agblock_t *, xfs_extlen_t *, int *);
1da177e4 52
fe033cc8
CH
53/*
54 * Lookup the record equal to [bno, len] in the btree given by cur.
55 */
56STATIC int /* error */
57xfs_alloc_lookup_eq(
58 struct xfs_btree_cur *cur, /* btree cursor */
59 xfs_agblock_t bno, /* starting block of extent */
60 xfs_extlen_t len, /* length of extent */
61 int *stat) /* success/failure */
62{
63 cur->bc_rec.a.ar_startblock = bno;
64 cur->bc_rec.a.ar_blockcount = len;
65 return xfs_btree_lookup(cur, XFS_LOOKUP_EQ, stat);
66}
67
68/*
69 * Lookup the first record greater than or equal to [bno, len]
70 * in the btree given by cur.
71 */
a66d6363 72int /* error */
fe033cc8
CH
73xfs_alloc_lookup_ge(
74 struct xfs_btree_cur *cur, /* btree cursor */
75 xfs_agblock_t bno, /* starting block of extent */
76 xfs_extlen_t len, /* length of extent */
77 int *stat) /* success/failure */
78{
79 cur->bc_rec.a.ar_startblock = bno;
80 cur->bc_rec.a.ar_blockcount = len;
81 return xfs_btree_lookup(cur, XFS_LOOKUP_GE, stat);
82}
83
84/*
85 * Lookup the first record less than or equal to [bno, len]
86 * in the btree given by cur.
87 */
a46db608 88int /* error */
fe033cc8
CH
89xfs_alloc_lookup_le(
90 struct xfs_btree_cur *cur, /* btree cursor */
91 xfs_agblock_t bno, /* starting block of extent */
92 xfs_extlen_t len, /* length of extent */
93 int *stat) /* success/failure */
94{
95 cur->bc_rec.a.ar_startblock = bno;
96 cur->bc_rec.a.ar_blockcount = len;
97 return xfs_btree_lookup(cur, XFS_LOOKUP_LE, stat);
98}
99
278d0ca1
CH
100/*
101 * Update the record referred to by cur to the value given
102 * by [bno, len].
103 * This either works (return 0) or gets an EFSCORRUPTED error.
104 */
105STATIC int /* error */
106xfs_alloc_update(
107 struct xfs_btree_cur *cur, /* btree cursor */
108 xfs_agblock_t bno, /* starting block of extent */
109 xfs_extlen_t len) /* length of extent */
110{
111 union xfs_btree_rec rec;
112
113 rec.alloc.ar_startblock = cpu_to_be32(bno);
114 rec.alloc.ar_blockcount = cpu_to_be32(len);
115 return xfs_btree_update(cur, &rec);
116}
fe033cc8 117
8cc938fe
CH
118/*
119 * Get the data from the pointed-to record.
120 */
a46db608 121int /* error */
8cc938fe
CH
122xfs_alloc_get_rec(
123 struct xfs_btree_cur *cur, /* btree cursor */
124 xfs_agblock_t *bno, /* output: starting block of extent */
125 xfs_extlen_t *len, /* output: length of extent */
126 int *stat) /* output: success/failure */
127{
128 union xfs_btree_rec *rec;
129 int error;
130
131 error = xfs_btree_get_rec(cur, &rec, stat);
132 if (!error && *stat == 1) {
133 *bno = be32_to_cpu(rec->alloc.ar_startblock);
134 *len = be32_to_cpu(rec->alloc.ar_blockcount);
135 }
136 return error;
137}
138
1da177e4
LT
139/*
140 * Compute aligned version of the found extent.
141 * Takes alignment and min length into account.
142 */
12375c82 143STATIC void
1da177e4 144xfs_alloc_compute_aligned(
86fa8af6 145 xfs_alloc_arg_t *args, /* allocation argument structure */
1da177e4
LT
146 xfs_agblock_t foundbno, /* starting block in found extent */
147 xfs_extlen_t foundlen, /* length in found extent */
1da177e4
LT
148 xfs_agblock_t *resbno, /* result block number */
149 xfs_extlen_t *reslen) /* result length */
150{
151 xfs_agblock_t bno;
1da177e4
LT
152 xfs_extlen_t len;
153
e26f0501 154 /* Trim busy sections out of found extent */
4ecbfe63 155 xfs_extent_busy_trim(args, foundbno, foundlen, &bno, &len);
e26f0501
CH
156
157 if (args->alignment > 1 && len >= args->minlen) {
158 xfs_agblock_t aligned_bno = roundup(bno, args->alignment);
159 xfs_extlen_t diff = aligned_bno - bno;
160
161 *resbno = aligned_bno;
162 *reslen = diff >= len ? 0 : len - diff;
1da177e4 163 } else {
e26f0501
CH
164 *resbno = bno;
165 *reslen = len;
1da177e4 166 }
1da177e4
LT
167}
168
169/*
170 * Compute best start block and diff for "near" allocations.
171 * freelen >= wantlen already checked by caller.
172 */
173STATIC xfs_extlen_t /* difference value (absolute) */
174xfs_alloc_compute_diff(
175 xfs_agblock_t wantbno, /* target starting block */
176 xfs_extlen_t wantlen, /* target length */
177 xfs_extlen_t alignment, /* target alignment */
211d022c 178 char userdata, /* are we allocating data? */
1da177e4
LT
179 xfs_agblock_t freebno, /* freespace's starting block */
180 xfs_extlen_t freelen, /* freespace's length */
181 xfs_agblock_t *newbnop) /* result: best start block from free */
182{
183 xfs_agblock_t freeend; /* end of freespace extent */
184 xfs_agblock_t newbno1; /* return block number */
185 xfs_agblock_t newbno2; /* other new block number */
186 xfs_extlen_t newlen1=0; /* length with newbno1 */
187 xfs_extlen_t newlen2=0; /* length with newbno2 */
188 xfs_agblock_t wantend; /* end of target extent */
189
190 ASSERT(freelen >= wantlen);
191 freeend = freebno + freelen;
192 wantend = wantbno + wantlen;
211d022c
JK
193 /*
194 * We want to allocate from the start of a free extent if it is past
195 * the desired block or if we are allocating user data and the free
196 * extent is before desired block. The second case is there to allow
197 * for contiguous allocation from the remaining free space if the file
198 * grows in the short term.
199 */
200 if (freebno >= wantbno || (userdata && freeend < wantend)) {
1da177e4
LT
201 if ((newbno1 = roundup(freebno, alignment)) >= freeend)
202 newbno1 = NULLAGBLOCK;
203 } else if (freeend >= wantend && alignment > 1) {
204 newbno1 = roundup(wantbno, alignment);
205 newbno2 = newbno1 - alignment;
206 if (newbno1 >= freeend)
207 newbno1 = NULLAGBLOCK;
208 else
209 newlen1 = XFS_EXTLEN_MIN(wantlen, freeend - newbno1);
210 if (newbno2 < freebno)
211 newbno2 = NULLAGBLOCK;
212 else
213 newlen2 = XFS_EXTLEN_MIN(wantlen, freeend - newbno2);
214 if (newbno1 != NULLAGBLOCK && newbno2 != NULLAGBLOCK) {
215 if (newlen1 < newlen2 ||
216 (newlen1 == newlen2 &&
217 XFS_ABSDIFF(newbno1, wantbno) >
218 XFS_ABSDIFF(newbno2, wantbno)))
219 newbno1 = newbno2;
220 } else if (newbno2 != NULLAGBLOCK)
221 newbno1 = newbno2;
222 } else if (freeend >= wantend) {
223 newbno1 = wantbno;
224 } else if (alignment > 1) {
225 newbno1 = roundup(freeend - wantlen, alignment);
226 if (newbno1 > freeend - wantlen &&
227 newbno1 - alignment >= freebno)
228 newbno1 -= alignment;
229 else if (newbno1 >= freeend)
230 newbno1 = NULLAGBLOCK;
231 } else
232 newbno1 = freeend - wantlen;
233 *newbnop = newbno1;
234 return newbno1 == NULLAGBLOCK ? 0 : XFS_ABSDIFF(newbno1, wantbno);
235}
236
237/*
238 * Fix up the length, based on mod and prod.
239 * len should be k * prod + mod for some k.
240 * If len is too small it is returned unchanged.
241 * If len hits maxlen it is left alone.
242 */
243STATIC void
244xfs_alloc_fix_len(
245 xfs_alloc_arg_t *args) /* allocation argument structure */
246{
247 xfs_extlen_t k;
248 xfs_extlen_t rlen;
249
250 ASSERT(args->mod < args->prod);
251 rlen = args->len;
252 ASSERT(rlen >= args->minlen);
253 ASSERT(rlen <= args->maxlen);
254 if (args->prod <= 1 || rlen < args->mod || rlen == args->maxlen ||
255 (args->mod == 0 && rlen < args->prod))
256 return;
257 k = rlen % args->prod;
258 if (k == args->mod)
259 return;
260 if (k > args->mod) {
261 if ((int)(rlen = rlen - k - args->mod) < (int)args->minlen)
262 return;
263 } else {
264 if ((int)(rlen = rlen - args->prod - (args->mod - k)) <
265 (int)args->minlen)
266 return;
267 }
268 ASSERT(rlen >= args->minlen);
269 ASSERT(rlen <= args->maxlen);
270 args->len = rlen;
271}
272
273/*
274 * Fix up length if there is too little space left in the a.g.
275 * Return 1 if ok, 0 if too little, should give up.
276 */
277STATIC int
278xfs_alloc_fix_minleft(
279 xfs_alloc_arg_t *args) /* allocation argument structure */
280{
281 xfs_agf_t *agf; /* a.g. freelist header */
282 int diff; /* free space difference */
283
284 if (args->minleft == 0)
285 return 1;
286 agf = XFS_BUF_TO_AGF(args->agbp);
16259e7d 287 diff = be32_to_cpu(agf->agf_freeblks)
1da177e4
LT
288 - args->len - args->minleft;
289 if (diff >= 0)
290 return 1;
291 args->len += diff; /* shrink the allocated space */
292 if (args->len >= args->minlen)
293 return 1;
294 args->agbno = NULLAGBLOCK;
295 return 0;
296}
297
298/*
299 * Update the two btrees, logically removing from freespace the extent
300 * starting at rbno, rlen blocks. The extent is contained within the
301 * actual (current) free extent fbno for flen blocks.
302 * Flags are passed in indicating whether the cursors are set to the
303 * relevant records.
304 */
305STATIC int /* error code */
306xfs_alloc_fixup_trees(
307 xfs_btree_cur_t *cnt_cur, /* cursor for by-size btree */
308 xfs_btree_cur_t *bno_cur, /* cursor for by-block btree */
309 xfs_agblock_t fbno, /* starting block of free extent */
310 xfs_extlen_t flen, /* length of free extent */
311 xfs_agblock_t rbno, /* starting block of returned extent */
312 xfs_extlen_t rlen, /* length of returned extent */
313 int flags) /* flags, XFSA_FIXUP_... */
314{
315 int error; /* error code */
316 int i; /* operation results */
317 xfs_agblock_t nfbno1; /* first new free startblock */
318 xfs_agblock_t nfbno2; /* second new free startblock */
319 xfs_extlen_t nflen1=0; /* first new free length */
320 xfs_extlen_t nflen2=0; /* second new free length */
321
322 /*
323 * Look up the record in the by-size tree if necessary.
324 */
325 if (flags & XFSA_FIXUP_CNT_OK) {
326#ifdef DEBUG
327 if ((error = xfs_alloc_get_rec(cnt_cur, &nfbno1, &nflen1, &i)))
328 return error;
329 XFS_WANT_CORRUPTED_RETURN(
330 i == 1 && nfbno1 == fbno && nflen1 == flen);
331#endif
332 } else {
333 if ((error = xfs_alloc_lookup_eq(cnt_cur, fbno, flen, &i)))
334 return error;
335 XFS_WANT_CORRUPTED_RETURN(i == 1);
336 }
337 /*
338 * Look up the record in the by-block tree if necessary.
339 */
340 if (flags & XFSA_FIXUP_BNO_OK) {
341#ifdef DEBUG
342 if ((error = xfs_alloc_get_rec(bno_cur, &nfbno1, &nflen1, &i)))
343 return error;
344 XFS_WANT_CORRUPTED_RETURN(
345 i == 1 && nfbno1 == fbno && nflen1 == flen);
346#endif
347 } else {
348 if ((error = xfs_alloc_lookup_eq(bno_cur, fbno, flen, &i)))
349 return error;
350 XFS_WANT_CORRUPTED_RETURN(i == 1);
351 }
7cc95a82 352
1da177e4 353#ifdef DEBUG
7cc95a82
CH
354 if (bno_cur->bc_nlevels == 1 && cnt_cur->bc_nlevels == 1) {
355 struct xfs_btree_block *bnoblock;
356 struct xfs_btree_block *cntblock;
357
358 bnoblock = XFS_BUF_TO_BLOCK(bno_cur->bc_bufs[0]);
359 cntblock = XFS_BUF_TO_BLOCK(cnt_cur->bc_bufs[0]);
1da177e4 360
7cc95a82
CH
361 XFS_WANT_CORRUPTED_RETURN(
362 bnoblock->bb_numrecs == cntblock->bb_numrecs);
1da177e4
LT
363 }
364#endif
7cc95a82 365
1da177e4
LT
366 /*
367 * Deal with all four cases: the allocated record is contained
368 * within the freespace record, so we can have new freespace
369 * at either (or both) end, or no freespace remaining.
370 */
371 if (rbno == fbno && rlen == flen)
372 nfbno1 = nfbno2 = NULLAGBLOCK;
373 else if (rbno == fbno) {
374 nfbno1 = rbno + rlen;
375 nflen1 = flen - rlen;
376 nfbno2 = NULLAGBLOCK;
377 } else if (rbno + rlen == fbno + flen) {
378 nfbno1 = fbno;
379 nflen1 = flen - rlen;
380 nfbno2 = NULLAGBLOCK;
381 } else {
382 nfbno1 = fbno;
383 nflen1 = rbno - fbno;
384 nfbno2 = rbno + rlen;
385 nflen2 = (fbno + flen) - nfbno2;
386 }
387 /*
388 * Delete the entry from the by-size btree.
389 */
91cca5df 390 if ((error = xfs_btree_delete(cnt_cur, &i)))
1da177e4
LT
391 return error;
392 XFS_WANT_CORRUPTED_RETURN(i == 1);
393 /*
394 * Add new by-size btree entry(s).
395 */
396 if (nfbno1 != NULLAGBLOCK) {
397 if ((error = xfs_alloc_lookup_eq(cnt_cur, nfbno1, nflen1, &i)))
398 return error;
399 XFS_WANT_CORRUPTED_RETURN(i == 0);
4b22a571 400 if ((error = xfs_btree_insert(cnt_cur, &i)))
1da177e4
LT
401 return error;
402 XFS_WANT_CORRUPTED_RETURN(i == 1);
403 }
404 if (nfbno2 != NULLAGBLOCK) {
405 if ((error = xfs_alloc_lookup_eq(cnt_cur, nfbno2, nflen2, &i)))
406 return error;
407 XFS_WANT_CORRUPTED_RETURN(i == 0);
4b22a571 408 if ((error = xfs_btree_insert(cnt_cur, &i)))
1da177e4
LT
409 return error;
410 XFS_WANT_CORRUPTED_RETURN(i == 1);
411 }
412 /*
413 * Fix up the by-block btree entry(s).
414 */
415 if (nfbno1 == NULLAGBLOCK) {
416 /*
417 * No remaining freespace, just delete the by-block tree entry.
418 */
91cca5df 419 if ((error = xfs_btree_delete(bno_cur, &i)))
1da177e4
LT
420 return error;
421 XFS_WANT_CORRUPTED_RETURN(i == 1);
422 } else {
423 /*
424 * Update the by-block entry to start later|be shorter.
425 */
426 if ((error = xfs_alloc_update(bno_cur, nfbno1, nflen1)))
427 return error;
428 }
429 if (nfbno2 != NULLAGBLOCK) {
430 /*
431 * 2 resulting free entries, need to add one.
432 */
433 if ((error = xfs_alloc_lookup_eq(bno_cur, nfbno2, nflen2, &i)))
434 return error;
435 XFS_WANT_CORRUPTED_RETURN(i == 0);
4b22a571 436 if ((error = xfs_btree_insert(bno_cur, &i)))
1da177e4
LT
437 return error;
438 XFS_WANT_CORRUPTED_RETURN(i == 1);
439 }
440 return 0;
441}
442
77c95bba 443static bool
612cfbfe 444xfs_agfl_verify(
bb80c6d7
DC
445 struct xfs_buf *bp)
446{
bb80c6d7
DC
447 struct xfs_mount *mp = bp->b_target->bt_mount;
448 struct xfs_agfl *agfl = XFS_BUF_TO_AGFL(bp);
bb80c6d7
DC
449 int i;
450
77c95bba
CH
451 if (!uuid_equal(&agfl->agfl_uuid, &mp->m_sb.sb_uuid))
452 return false;
453 if (be32_to_cpu(agfl->agfl_magicnum) != XFS_AGFL_MAGIC)
454 return false;
455 /*
456 * during growfs operations, the perag is not fully initialised,
457 * so we can't use it for any useful checking. growfs ensures we can't
458 * use it by using uncached buffers that don't have the perag attached
459 * so we can detect and avoid this problem.
460 */
461 if (bp->b_pag && be32_to_cpu(agfl->agfl_seqno) != bp->b_pag->pag_agno)
462 return false;
463
bb80c6d7 464 for (i = 0; i < XFS_AGFL_SIZE(mp); i++) {
77c95bba 465 if (be32_to_cpu(agfl->agfl_bno[i]) != NULLAGBLOCK &&
bb80c6d7 466 be32_to_cpu(agfl->agfl_bno[i]) >= mp->m_sb.sb_agblocks)
77c95bba 467 return false;
bb80c6d7 468 }
77c95bba
CH
469 return true;
470}
471
472static void
473xfs_agfl_read_verify(
474 struct xfs_buf *bp)
475{
476 struct xfs_mount *mp = bp->b_target->bt_mount;
77c95bba
CH
477
478 /*
479 * There is no verification of non-crc AGFLs because mkfs does not
480 * initialise the AGFL to zero or NULL. Hence the only valid part of the
481 * AGFL is what the AGF says is active. We can't get to the AGF, so we
482 * can't verify just those entries are valid.
483 */
484 if (!xfs_sb_version_hascrc(&mp->m_sb))
485 return;
486
ce5028cf
ES
487 if (!xfs_buf_verify_cksum(bp, XFS_AGFL_CRC_OFF))
488 xfs_buf_ioerror(bp, EFSBADCRC);
489 else if (!xfs_agfl_verify(bp))
bb80c6d7 490 xfs_buf_ioerror(bp, EFSCORRUPTED);
ce5028cf
ES
491
492 if (bp->b_error)
493 xfs_verifier_error(bp);
612cfbfe
DC
494}
495
1813dd64 496static void
612cfbfe
DC
497xfs_agfl_write_verify(
498 struct xfs_buf *bp)
499{
77c95bba
CH
500 struct xfs_mount *mp = bp->b_target->bt_mount;
501 struct xfs_buf_log_item *bip = bp->b_fspriv;
612cfbfe 502
77c95bba
CH
503 /* no verification of non-crc AGFLs */
504 if (!xfs_sb_version_hascrc(&mp->m_sb))
505 return;
506
507 if (!xfs_agfl_verify(bp)) {
77c95bba 508 xfs_buf_ioerror(bp, EFSCORRUPTED);
ce5028cf 509 xfs_verifier_error(bp);
77c95bba
CH
510 return;
511 }
512
513 if (bip)
514 XFS_BUF_TO_AGFL(bp)->agfl_lsn = cpu_to_be64(bip->bli_item.li_lsn);
515
f1dbcd7e 516 xfs_buf_update_cksum(bp, XFS_AGFL_CRC_OFF);
bb80c6d7
DC
517}
518
1813dd64
DC
519const struct xfs_buf_ops xfs_agfl_buf_ops = {
520 .verify_read = xfs_agfl_read_verify,
521 .verify_write = xfs_agfl_write_verify,
522};
523
1da177e4
LT
524/*
525 * Read in the allocation group free block array.
526 */
527STATIC int /* error */
528xfs_alloc_read_agfl(
529 xfs_mount_t *mp, /* mount point structure */
530 xfs_trans_t *tp, /* transaction pointer */
531 xfs_agnumber_t agno, /* allocation group number */
532 xfs_buf_t **bpp) /* buffer for the ag free block array */
533{
534 xfs_buf_t *bp; /* return value */
535 int error;
536
537 ASSERT(agno != NULLAGNUMBER);
538 error = xfs_trans_read_buf(
539 mp, tp, mp->m_ddev_targp,
540 XFS_AG_DADDR(mp, agno, XFS_AGFL_DADDR(mp)),
1813dd64 541 XFS_FSS_TO_BB(mp, 1), 0, &bp, &xfs_agfl_buf_ops);
1da177e4
LT
542 if (error)
543 return error;
38f23232 544 xfs_buf_set_ref(bp, XFS_AGFL_REF);
1da177e4
LT
545 *bpp = bp;
546 return 0;
547}
548
ecb6928f
CH
549STATIC int
550xfs_alloc_update_counters(
551 struct xfs_trans *tp,
552 struct xfs_perag *pag,
553 struct xfs_buf *agbp,
554 long len)
555{
556 struct xfs_agf *agf = XFS_BUF_TO_AGF(agbp);
557
558 pag->pagf_freeblks += len;
559 be32_add_cpu(&agf->agf_freeblks, len);
560
561 xfs_trans_agblocks_delta(tp, len);
562 if (unlikely(be32_to_cpu(agf->agf_freeblks) >
563 be32_to_cpu(agf->agf_length)))
564 return EFSCORRUPTED;
565
566 xfs_alloc_log_agf(tp, agbp, XFS_AGF_FREEBLKS);
567 return 0;
568}
569
1da177e4
LT
570/*
571 * Allocation group level functions.
572 */
573
574/*
575 * Allocate a variable extent in the allocation group agno.
576 * Type and bno are used to determine where in the allocation group the
577 * extent will start.
578 * Extent's length (returned in *len) will be between minlen and maxlen,
579 * and of the form k * prod + mod unless there's nothing that large.
580 * Return the starting a.g. block, or NULLAGBLOCK if we can't do it.
581 */
582STATIC int /* error */
583xfs_alloc_ag_vextent(
584 xfs_alloc_arg_t *args) /* argument structure for allocation */
585{
586 int error=0;
1da177e4
LT
587
588 ASSERT(args->minlen > 0);
589 ASSERT(args->maxlen > 0);
590 ASSERT(args->minlen <= args->maxlen);
591 ASSERT(args->mod < args->prod);
592 ASSERT(args->alignment > 0);
593 /*
594 * Branch to correct routine based on the type.
595 */
596 args->wasfromfl = 0;
597 switch (args->type) {
598 case XFS_ALLOCTYPE_THIS_AG:
599 error = xfs_alloc_ag_vextent_size(args);
600 break;
601 case XFS_ALLOCTYPE_NEAR_BNO:
602 error = xfs_alloc_ag_vextent_near(args);
603 break;
604 case XFS_ALLOCTYPE_THIS_BNO:
605 error = xfs_alloc_ag_vextent_exact(args);
606 break;
607 default:
608 ASSERT(0);
609 /* NOTREACHED */
610 }
ecb6928f
CH
611
612 if (error || args->agbno == NULLAGBLOCK)
1da177e4 613 return error;
ecb6928f
CH
614
615 ASSERT(args->len >= args->minlen);
616 ASSERT(args->len <= args->maxlen);
617 ASSERT(!args->wasfromfl || !args->isfl);
618 ASSERT(args->agbno % args->alignment == 0);
619
620 if (!args->wasfromfl) {
621 error = xfs_alloc_update_counters(args->tp, args->pag,
622 args->agbp,
623 -((long)(args->len)));
624 if (error)
625 return error;
626
4ecbfe63 627 ASSERT(!xfs_extent_busy_search(args->mp, args->agno,
e26f0501 628 args->agbno, args->len));
1da177e4 629 }
ecb6928f
CH
630
631 if (!args->isfl) {
632 xfs_trans_mod_sb(args->tp, args->wasdel ?
633 XFS_TRANS_SB_RES_FDBLOCKS :
634 XFS_TRANS_SB_FDBLOCKS,
635 -((long)(args->len)));
636 }
637
638 XFS_STATS_INC(xs_allocx);
639 XFS_STATS_ADD(xs_allocb, args->len);
640 return error;
1da177e4
LT
641}
642
643/*
644 * Allocate a variable extent at exactly agno/bno.
645 * Extent's length (returned in *len) will be between minlen and maxlen,
646 * and of the form k * prod + mod unless there's nothing that large.
647 * Return the starting a.g. block (bno), or NULLAGBLOCK if we can't do it.
648 */
649STATIC int /* error */
650xfs_alloc_ag_vextent_exact(
651 xfs_alloc_arg_t *args) /* allocation argument structure */
652{
653 xfs_btree_cur_t *bno_cur;/* by block-number btree cursor */
654 xfs_btree_cur_t *cnt_cur;/* by count btree cursor */
1da177e4
LT
655 int error;
656 xfs_agblock_t fbno; /* start block of found extent */
1da177e4 657 xfs_extlen_t flen; /* length of found extent */
e26f0501
CH
658 xfs_agblock_t tbno; /* start block of trimmed extent */
659 xfs_extlen_t tlen; /* length of trimmed extent */
660 xfs_agblock_t tend; /* end block of trimmed extent */
1da177e4 661 int i; /* success/failure of operation */
1da177e4
LT
662
663 ASSERT(args->alignment == 1);
9f9baab3 664
1da177e4
LT
665 /*
666 * Allocate/initialize a cursor for the by-number freespace btree.
667 */
561f7d17 668 bno_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
9f9baab3
CH
669 args->agno, XFS_BTNUM_BNO);
670
1da177e4
LT
671 /*
672 * Lookup bno and minlen in the btree (minlen is irrelevant, really).
673 * Look for the closest free block <= bno, it must contain bno
674 * if any free block does.
675 */
9f9baab3
CH
676 error = xfs_alloc_lookup_le(bno_cur, args->agbno, args->minlen, &i);
677 if (error)
1da177e4 678 goto error0;
9f9baab3
CH
679 if (!i)
680 goto not_found;
681
1da177e4
LT
682 /*
683 * Grab the freespace record.
684 */
9f9baab3
CH
685 error = xfs_alloc_get_rec(bno_cur, &fbno, &flen, &i);
686 if (error)
1da177e4
LT
687 goto error0;
688 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
689 ASSERT(fbno <= args->agbno);
9f9baab3 690
1da177e4 691 /*
e26f0501 692 * Check for overlapping busy extents.
1da177e4 693 */
4ecbfe63 694 xfs_extent_busy_trim(args, fbno, flen, &tbno, &tlen);
e26f0501
CH
695
696 /*
697 * Give up if the start of the extent is busy, or the freespace isn't
698 * long enough for the minimum request.
699 */
700 if (tbno > args->agbno)
701 goto not_found;
702 if (tlen < args->minlen)
703 goto not_found;
704 tend = tbno + tlen;
705 if (tend < args->agbno + args->minlen)
9f9baab3
CH
706 goto not_found;
707
1da177e4
LT
708 /*
709 * End of extent will be smaller of the freespace end and the
710 * maximal requested end.
9f9baab3 711 *
1da177e4
LT
712 * Fix the length according to mod and prod if given.
713 */
81463b1c
CS
714 args->len = XFS_AGBLOCK_MIN(tend, args->agbno + args->maxlen)
715 - args->agbno;
1da177e4 716 xfs_alloc_fix_len(args);
9f9baab3
CH
717 if (!xfs_alloc_fix_minleft(args))
718 goto not_found;
719
81463b1c 720 ASSERT(args->agbno + args->len <= tend);
9f9baab3 721
1da177e4 722 /*
81463b1c 723 * We are allocating agbno for args->len
1da177e4
LT
724 * Allocate/initialize a cursor for the by-size btree.
725 */
561f7d17
CH
726 cnt_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
727 args->agno, XFS_BTNUM_CNT);
1da177e4 728 ASSERT(args->agbno + args->len <=
16259e7d 729 be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length));
9f9baab3
CH
730 error = xfs_alloc_fixup_trees(cnt_cur, bno_cur, fbno, flen, args->agbno,
731 args->len, XFSA_FIXUP_BNO_OK);
732 if (error) {
1da177e4
LT
733 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_ERROR);
734 goto error0;
735 }
9f9baab3 736
1da177e4
LT
737 xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR);
738 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
0b1b213f 739
1da177e4 740 args->wasfromfl = 0;
9f9baab3
CH
741 trace_xfs_alloc_exact_done(args);
742 return 0;
743
744not_found:
745 /* Didn't find it, return null. */
746 xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR);
747 args->agbno = NULLAGBLOCK;
748 trace_xfs_alloc_exact_notfound(args);
1da177e4
LT
749 return 0;
750
751error0:
752 xfs_btree_del_cursor(bno_cur, XFS_BTREE_ERROR);
0b1b213f 753 trace_xfs_alloc_exact_error(args);
1da177e4
LT
754 return error;
755}
756
489a150f
CH
757/*
758 * Search the btree in a given direction via the search cursor and compare
759 * the records found against the good extent we've already found.
760 */
761STATIC int
762xfs_alloc_find_best_extent(
763 struct xfs_alloc_arg *args, /* allocation argument structure */
764 struct xfs_btree_cur **gcur, /* good cursor */
765 struct xfs_btree_cur **scur, /* searching cursor */
766 xfs_agblock_t gdiff, /* difference for search comparison */
767 xfs_agblock_t *sbno, /* extent found by search */
e26f0501
CH
768 xfs_extlen_t *slen, /* extent length */
769 xfs_agblock_t *sbnoa, /* aligned extent found by search */
770 xfs_extlen_t *slena, /* aligned extent length */
489a150f
CH
771 int dir) /* 0 = search right, 1 = search left */
772{
489a150f
CH
773 xfs_agblock_t new;
774 xfs_agblock_t sdiff;
775 int error;
776 int i;
777
778 /* The good extent is perfect, no need to search. */
779 if (!gdiff)
780 goto out_use_good;
781
782 /*
783 * Look until we find a better one, run out of space or run off the end.
784 */
785 do {
786 error = xfs_alloc_get_rec(*scur, sbno, slen, &i);
787 if (error)
788 goto error0;
789 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
e26f0501 790 xfs_alloc_compute_aligned(args, *sbno, *slen, sbnoa, slena);
489a150f
CH
791
792 /*
793 * The good extent is closer than this one.
794 */
795 if (!dir) {
e26f0501 796 if (*sbnoa >= args->agbno + gdiff)
489a150f
CH
797 goto out_use_good;
798 } else {
e26f0501 799 if (*sbnoa <= args->agbno - gdiff)
489a150f
CH
800 goto out_use_good;
801 }
802
803 /*
804 * Same distance, compare length and pick the best.
805 */
806 if (*slena >= args->minlen) {
807 args->len = XFS_EXTLEN_MIN(*slena, args->maxlen);
808 xfs_alloc_fix_len(args);
809
810 sdiff = xfs_alloc_compute_diff(args->agbno, args->len,
211d022c
JK
811 args->alignment,
812 args->userdata, *sbnoa,
e26f0501 813 *slena, &new);
489a150f
CH
814
815 /*
816 * Choose closer size and invalidate other cursor.
817 */
818 if (sdiff < gdiff)
819 goto out_use_search;
820 goto out_use_good;
821 }
822
823 if (!dir)
824 error = xfs_btree_increment(*scur, 0, &i);
825 else
826 error = xfs_btree_decrement(*scur, 0, &i);
827 if (error)
828 goto error0;
829 } while (i);
830
831out_use_good:
832 xfs_btree_del_cursor(*scur, XFS_BTREE_NOERROR);
833 *scur = NULL;
834 return 0;
835
836out_use_search:
837 xfs_btree_del_cursor(*gcur, XFS_BTREE_NOERROR);
838 *gcur = NULL;
839 return 0;
840
841error0:
842 /* caller invalidates cursors */
843 return error;
844}
845
1da177e4
LT
846/*
847 * Allocate a variable extent near bno in the allocation group agno.
848 * Extent's length (returned in len) will be between minlen and maxlen,
849 * and of the form k * prod + mod unless there's nothing that large.
850 * Return the starting a.g. block, or NULLAGBLOCK if we can't do it.
851 */
852STATIC int /* error */
853xfs_alloc_ag_vextent_near(
854 xfs_alloc_arg_t *args) /* allocation argument structure */
855{
856 xfs_btree_cur_t *bno_cur_gt; /* cursor for bno btree, right side */
857 xfs_btree_cur_t *bno_cur_lt; /* cursor for bno btree, left side */
858 xfs_btree_cur_t *cnt_cur; /* cursor for count btree */
1da177e4
LT
859 xfs_agblock_t gtbno; /* start bno of right side entry */
860 xfs_agblock_t gtbnoa; /* aligned ... */
861 xfs_extlen_t gtdiff; /* difference to right side entry */
862 xfs_extlen_t gtlen; /* length of right side entry */
e26f0501 863 xfs_extlen_t gtlena; /* aligned ... */
1da177e4
LT
864 xfs_agblock_t gtnew; /* useful start bno of right side */
865 int error; /* error code */
866 int i; /* result code, temporary */
867 int j; /* result code, temporary */
868 xfs_agblock_t ltbno; /* start bno of left side entry */
869 xfs_agblock_t ltbnoa; /* aligned ... */
870 xfs_extlen_t ltdiff; /* difference to left side entry */
1da177e4 871 xfs_extlen_t ltlen; /* length of left side entry */
e26f0501 872 xfs_extlen_t ltlena; /* aligned ... */
1da177e4
LT
873 xfs_agblock_t ltnew; /* useful start bno of left side */
874 xfs_extlen_t rlen; /* length of returned extent */
e26f0501 875 int forced = 0;
63d20d6e 876#ifdef DEBUG
1da177e4
LT
877 /*
878 * Randomly don't execute the first algorithm.
879 */
880 int dofirst; /* set to do first algorithm */
881
ecb3403d 882 dofirst = prandom_u32() & 1;
1da177e4 883#endif
e26f0501
CH
884
885restart:
886 bno_cur_lt = NULL;
887 bno_cur_gt = NULL;
888 ltlen = 0;
889 gtlena = 0;
890 ltlena = 0;
891
1da177e4
LT
892 /*
893 * Get a cursor for the by-size btree.
894 */
561f7d17
CH
895 cnt_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
896 args->agno, XFS_BTNUM_CNT);
e26f0501 897
1da177e4
LT
898 /*
899 * See if there are any free extents as big as maxlen.
900 */
901 if ((error = xfs_alloc_lookup_ge(cnt_cur, 0, args->maxlen, &i)))
902 goto error0;
903 /*
904 * If none, then pick up the last entry in the tree unless the
905 * tree is empty.
906 */
907 if (!i) {
908 if ((error = xfs_alloc_ag_vextent_small(args, cnt_cur, &ltbno,
909 &ltlen, &i)))
910 goto error0;
911 if (i == 0 || ltlen == 0) {
912 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
e26f0501 913 trace_xfs_alloc_near_noentry(args);
1da177e4
LT
914 return 0;
915 }
916 ASSERT(i == 1);
917 }
918 args->wasfromfl = 0;
e26f0501 919
1da177e4
LT
920 /*
921 * First algorithm.
922 * If the requested extent is large wrt the freespaces available
923 * in this a.g., then the cursor will be pointing to a btree entry
924 * near the right edge of the tree. If it's in the last btree leaf
925 * block, then we just examine all the entries in that block
926 * that are big enough, and pick the best one.
927 * This is written as a while loop so we can break out of it,
928 * but we never loop back to the top.
929 */
930 while (xfs_btree_islastblock(cnt_cur, 0)) {
931 xfs_extlen_t bdiff;
932 int besti=0;
933 xfs_extlen_t blen=0;
934 xfs_agblock_t bnew=0;
935
63d20d6e
DC
936#ifdef DEBUG
937 if (dofirst)
1da177e4
LT
938 break;
939#endif
940 /*
941 * Start from the entry that lookup found, sequence through
942 * all larger free blocks. If we're actually pointing at a
943 * record smaller than maxlen, go to the start of this block,
944 * and skip all those smaller than minlen.
945 */
946 if (ltlen || args->alignment > 1) {
947 cnt_cur->bc_ptrs[0] = 1;
948 do {
949 if ((error = xfs_alloc_get_rec(cnt_cur, &ltbno,
950 &ltlen, &i)))
951 goto error0;
952 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
953 if (ltlen >= args->minlen)
954 break;
637aa50f 955 if ((error = xfs_btree_increment(cnt_cur, 0, &i)))
1da177e4
LT
956 goto error0;
957 } while (i);
958 ASSERT(ltlen >= args->minlen);
959 if (!i)
960 break;
961 }
962 i = cnt_cur->bc_ptrs[0];
963 for (j = 1, blen = 0, bdiff = 0;
964 !error && j && (blen < args->maxlen || bdiff > 0);
637aa50f 965 error = xfs_btree_increment(cnt_cur, 0, &j)) {
1da177e4
LT
966 /*
967 * For each entry, decide if it's better than
968 * the previous best entry.
969 */
970 if ((error = xfs_alloc_get_rec(cnt_cur, &ltbno, &ltlen, &i)))
971 goto error0;
972 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
86fa8af6
CH
973 xfs_alloc_compute_aligned(args, ltbno, ltlen,
974 &ltbnoa, &ltlena);
e6430037 975 if (ltlena < args->minlen)
1da177e4
LT
976 continue;
977 args->len = XFS_EXTLEN_MIN(ltlena, args->maxlen);
978 xfs_alloc_fix_len(args);
979 ASSERT(args->len >= args->minlen);
980 if (args->len < blen)
981 continue;
982 ltdiff = xfs_alloc_compute_diff(args->agbno, args->len,
211d022c
JK
983 args->alignment, args->userdata, ltbnoa,
984 ltlena, &ltnew);
1da177e4
LT
985 if (ltnew != NULLAGBLOCK &&
986 (args->len > blen || ltdiff < bdiff)) {
987 bdiff = ltdiff;
988 bnew = ltnew;
989 blen = args->len;
990 besti = cnt_cur->bc_ptrs[0];
991 }
992 }
993 /*
994 * It didn't work. We COULD be in a case where
995 * there's a good record somewhere, so try again.
996 */
997 if (blen == 0)
998 break;
999 /*
1000 * Point at the best entry, and retrieve it again.
1001 */
1002 cnt_cur->bc_ptrs[0] = besti;
1003 if ((error = xfs_alloc_get_rec(cnt_cur, &ltbno, &ltlen, &i)))
1004 goto error0;
1005 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
73523a2e 1006 ASSERT(ltbno + ltlen <= be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length));
1da177e4
LT
1007 args->len = blen;
1008 if (!xfs_alloc_fix_minleft(args)) {
1009 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
0b1b213f 1010 trace_xfs_alloc_near_nominleft(args);
1da177e4
LT
1011 return 0;
1012 }
1013 blen = args->len;
1014 /*
1015 * We are allocating starting at bnew for blen blocks.
1016 */
1017 args->agbno = bnew;
1018 ASSERT(bnew >= ltbno);
73523a2e 1019 ASSERT(bnew + blen <= ltbno + ltlen);
1da177e4
LT
1020 /*
1021 * Set up a cursor for the by-bno tree.
1022 */
561f7d17
CH
1023 bno_cur_lt = xfs_allocbt_init_cursor(args->mp, args->tp,
1024 args->agbp, args->agno, XFS_BTNUM_BNO);
1da177e4
LT
1025 /*
1026 * Fix up the btree entries.
1027 */
1028 if ((error = xfs_alloc_fixup_trees(cnt_cur, bno_cur_lt, ltbno,
1029 ltlen, bnew, blen, XFSA_FIXUP_CNT_OK)))
1030 goto error0;
1031 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1032 xfs_btree_del_cursor(bno_cur_lt, XFS_BTREE_NOERROR);
0b1b213f
CH
1033
1034 trace_xfs_alloc_near_first(args);
1da177e4
LT
1035 return 0;
1036 }
1037 /*
1038 * Second algorithm.
1039 * Search in the by-bno tree to the left and to the right
1040 * simultaneously, until in each case we find a space big enough,
1041 * or run into the edge of the tree. When we run into the edge,
1042 * we deallocate that cursor.
1043 * If both searches succeed, we compare the two spaces and pick
1044 * the better one.
1045 * With alignment, it's possible for both to fail; the upper
1046 * level algorithm that picks allocation groups for allocations
1047 * is not supposed to do this.
1048 */
1049 /*
1050 * Allocate and initialize the cursor for the leftward search.
1051 */
561f7d17
CH
1052 bno_cur_lt = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
1053 args->agno, XFS_BTNUM_BNO);
1da177e4
LT
1054 /*
1055 * Lookup <= bno to find the leftward search's starting point.
1056 */
1057 if ((error = xfs_alloc_lookup_le(bno_cur_lt, args->agbno, args->maxlen, &i)))
1058 goto error0;
1059 if (!i) {
1060 /*
1061 * Didn't find anything; use this cursor for the rightward
1062 * search.
1063 */
1064 bno_cur_gt = bno_cur_lt;
1065 bno_cur_lt = NULL;
1066 }
1067 /*
1068 * Found something. Duplicate the cursor for the rightward search.
1069 */
1070 else if ((error = xfs_btree_dup_cursor(bno_cur_lt, &bno_cur_gt)))
1071 goto error0;
1072 /*
1073 * Increment the cursor, so we will point at the entry just right
1074 * of the leftward entry if any, or to the leftmost entry.
1075 */
637aa50f 1076 if ((error = xfs_btree_increment(bno_cur_gt, 0, &i)))
1da177e4
LT
1077 goto error0;
1078 if (!i) {
1079 /*
1080 * It failed, there are no rightward entries.
1081 */
1082 xfs_btree_del_cursor(bno_cur_gt, XFS_BTREE_NOERROR);
1083 bno_cur_gt = NULL;
1084 }
1085 /*
1086 * Loop going left with the leftward cursor, right with the
1087 * rightward cursor, until either both directions give up or
1088 * we find an entry at least as big as minlen.
1089 */
1090 do {
1091 if (bno_cur_lt) {
1092 if ((error = xfs_alloc_get_rec(bno_cur_lt, &ltbno, &ltlen, &i)))
1093 goto error0;
1094 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
86fa8af6
CH
1095 xfs_alloc_compute_aligned(args, ltbno, ltlen,
1096 &ltbnoa, &ltlena);
12375c82 1097 if (ltlena >= args->minlen)
1da177e4 1098 break;
8df4da4a 1099 if ((error = xfs_btree_decrement(bno_cur_lt, 0, &i)))
1da177e4
LT
1100 goto error0;
1101 if (!i) {
1102 xfs_btree_del_cursor(bno_cur_lt,
1103 XFS_BTREE_NOERROR);
1104 bno_cur_lt = NULL;
1105 }
1106 }
1107 if (bno_cur_gt) {
1108 if ((error = xfs_alloc_get_rec(bno_cur_gt, &gtbno, &gtlen, &i)))
1109 goto error0;
1110 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
86fa8af6
CH
1111 xfs_alloc_compute_aligned(args, gtbno, gtlen,
1112 &gtbnoa, &gtlena);
12375c82 1113 if (gtlena >= args->minlen)
1da177e4 1114 break;
637aa50f 1115 if ((error = xfs_btree_increment(bno_cur_gt, 0, &i)))
1da177e4
LT
1116 goto error0;
1117 if (!i) {
1118 xfs_btree_del_cursor(bno_cur_gt,
1119 XFS_BTREE_NOERROR);
1120 bno_cur_gt = NULL;
1121 }
1122 }
1123 } while (bno_cur_lt || bno_cur_gt);
489a150f 1124
1da177e4
LT
1125 /*
1126 * Got both cursors still active, need to find better entry.
1127 */
1128 if (bno_cur_lt && bno_cur_gt) {
1da177e4
LT
1129 if (ltlena >= args->minlen) {
1130 /*
489a150f 1131 * Left side is good, look for a right side entry.
1da177e4
LT
1132 */
1133 args->len = XFS_EXTLEN_MIN(ltlena, args->maxlen);
1134 xfs_alloc_fix_len(args);
489a150f 1135 ltdiff = xfs_alloc_compute_diff(args->agbno, args->len,
211d022c
JK
1136 args->alignment, args->userdata, ltbnoa,
1137 ltlena, &ltnew);
489a150f
CH
1138
1139 error = xfs_alloc_find_best_extent(args,
1140 &bno_cur_lt, &bno_cur_gt,
e26f0501
CH
1141 ltdiff, &gtbno, &gtlen,
1142 &gtbnoa, &gtlena,
489a150f
CH
1143 0 /* search right */);
1144 } else {
1145 ASSERT(gtlena >= args->minlen);
1146
1da177e4 1147 /*
489a150f 1148 * Right side is good, look for a left side entry.
1da177e4
LT
1149 */
1150 args->len = XFS_EXTLEN_MIN(gtlena, args->maxlen);
1151 xfs_alloc_fix_len(args);
489a150f 1152 gtdiff = xfs_alloc_compute_diff(args->agbno, args->len,
211d022c
JK
1153 args->alignment, args->userdata, gtbnoa,
1154 gtlena, &gtnew);
489a150f
CH
1155
1156 error = xfs_alloc_find_best_extent(args,
1157 &bno_cur_gt, &bno_cur_lt,
e26f0501
CH
1158 gtdiff, &ltbno, &ltlen,
1159 &ltbnoa, &ltlena,
489a150f 1160 1 /* search left */);
1da177e4 1161 }
489a150f
CH
1162
1163 if (error)
1164 goto error0;
1da177e4 1165 }
489a150f 1166
1da177e4
LT
1167 /*
1168 * If we couldn't get anything, give up.
1169 */
1170 if (bno_cur_lt == NULL && bno_cur_gt == NULL) {
e3a746f5
DC
1171 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1172
e26f0501
CH
1173 if (!forced++) {
1174 trace_xfs_alloc_near_busy(args);
1175 xfs_log_force(args->mp, XFS_LOG_SYNC);
1176 goto restart;
1177 }
0b1b213f 1178 trace_xfs_alloc_size_neither(args);
1da177e4
LT
1179 args->agbno = NULLAGBLOCK;
1180 return 0;
1181 }
489a150f 1182
1da177e4
LT
1183 /*
1184 * At this point we have selected a freespace entry, either to the
1185 * left or to the right. If it's on the right, copy all the
1186 * useful variables to the "left" set so we only have one
1187 * copy of this code.
1188 */
1189 if (bno_cur_gt) {
1190 bno_cur_lt = bno_cur_gt;
1191 bno_cur_gt = NULL;
1192 ltbno = gtbno;
1193 ltbnoa = gtbnoa;
1194 ltlen = gtlen;
1195 ltlena = gtlena;
1196 j = 1;
1197 } else
1198 j = 0;
489a150f 1199
1da177e4
LT
1200 /*
1201 * Fix up the length and compute the useful address.
1202 */
1da177e4
LT
1203 args->len = XFS_EXTLEN_MIN(ltlena, args->maxlen);
1204 xfs_alloc_fix_len(args);
1205 if (!xfs_alloc_fix_minleft(args)) {
0b1b213f 1206 trace_xfs_alloc_near_nominleft(args);
1da177e4
LT
1207 xfs_btree_del_cursor(bno_cur_lt, XFS_BTREE_NOERROR);
1208 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1209 return 0;
1210 }
1211 rlen = args->len;
e26f0501 1212 (void)xfs_alloc_compute_diff(args->agbno, rlen, args->alignment,
211d022c 1213 args->userdata, ltbnoa, ltlena, &ltnew);
1da177e4 1214 ASSERT(ltnew >= ltbno);
e26f0501 1215 ASSERT(ltnew + rlen <= ltbnoa + ltlena);
16259e7d 1216 ASSERT(ltnew + rlen <= be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length));
1da177e4 1217 args->agbno = ltnew;
e26f0501 1218
1da177e4
LT
1219 if ((error = xfs_alloc_fixup_trees(cnt_cur, bno_cur_lt, ltbno, ltlen,
1220 ltnew, rlen, XFSA_FIXUP_BNO_OK)))
1221 goto error0;
0b1b213f
CH
1222
1223 if (j)
1224 trace_xfs_alloc_near_greater(args);
1225 else
1226 trace_xfs_alloc_near_lesser(args);
1227
1da177e4
LT
1228 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1229 xfs_btree_del_cursor(bno_cur_lt, XFS_BTREE_NOERROR);
1230 return 0;
1231
1232 error0:
0b1b213f 1233 trace_xfs_alloc_near_error(args);
1da177e4
LT
1234 if (cnt_cur != NULL)
1235 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_ERROR);
1236 if (bno_cur_lt != NULL)
1237 xfs_btree_del_cursor(bno_cur_lt, XFS_BTREE_ERROR);
1238 if (bno_cur_gt != NULL)
1239 xfs_btree_del_cursor(bno_cur_gt, XFS_BTREE_ERROR);
1240 return error;
1241}
1242
1243/*
1244 * Allocate a variable extent anywhere in the allocation group agno.
1245 * Extent's length (returned in len) will be between minlen and maxlen,
1246 * and of the form k * prod + mod unless there's nothing that large.
1247 * Return the starting a.g. block, or NULLAGBLOCK if we can't do it.
1248 */
1249STATIC int /* error */
1250xfs_alloc_ag_vextent_size(
1251 xfs_alloc_arg_t *args) /* allocation argument structure */
1252{
1253 xfs_btree_cur_t *bno_cur; /* cursor for bno btree */
1254 xfs_btree_cur_t *cnt_cur; /* cursor for cnt btree */
1255 int error; /* error result */
1256 xfs_agblock_t fbno; /* start of found freespace */
1257 xfs_extlen_t flen; /* length of found freespace */
1da177e4
LT
1258 int i; /* temp status variable */
1259 xfs_agblock_t rbno; /* returned block number */
1260 xfs_extlen_t rlen; /* length of returned extent */
e26f0501 1261 int forced = 0;
1da177e4 1262
e26f0501 1263restart:
1da177e4
LT
1264 /*
1265 * Allocate and initialize a cursor for the by-size btree.
1266 */
561f7d17
CH
1267 cnt_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
1268 args->agno, XFS_BTNUM_CNT);
1da177e4 1269 bno_cur = NULL;
e26f0501 1270
1da177e4
LT
1271 /*
1272 * Look for an entry >= maxlen+alignment-1 blocks.
1273 */
1274 if ((error = xfs_alloc_lookup_ge(cnt_cur, 0,
1275 args->maxlen + args->alignment - 1, &i)))
1276 goto error0;
e26f0501 1277
1da177e4 1278 /*
e26f0501
CH
1279 * If none or we have busy extents that we cannot allocate from, then
1280 * we have to settle for a smaller extent. In the case that there are
1281 * no large extents, this will return the last entry in the tree unless
1282 * the tree is empty. In the case that there are only busy large
1283 * extents, this will return the largest small extent unless there
1284 * are no smaller extents available.
1da177e4 1285 */
e26f0501
CH
1286 if (!i || forced > 1) {
1287 error = xfs_alloc_ag_vextent_small(args, cnt_cur,
1288 &fbno, &flen, &i);
1289 if (error)
1da177e4
LT
1290 goto error0;
1291 if (i == 0 || flen == 0) {
1292 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
0b1b213f 1293 trace_xfs_alloc_size_noentry(args);
1da177e4
LT
1294 return 0;
1295 }
1296 ASSERT(i == 1);
e26f0501
CH
1297 xfs_alloc_compute_aligned(args, fbno, flen, &rbno, &rlen);
1298 } else {
1299 /*
1300 * Search for a non-busy extent that is large enough.
1301 * If we are at low space, don't check, or if we fall of
1302 * the end of the btree, turn off the busy check and
1303 * restart.
1304 */
1305 for (;;) {
1306 error = xfs_alloc_get_rec(cnt_cur, &fbno, &flen, &i);
1307 if (error)
1308 goto error0;
1309 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1310
1311 xfs_alloc_compute_aligned(args, fbno, flen,
1312 &rbno, &rlen);
1313
1314 if (rlen >= args->maxlen)
1315 break;
1316
1317 error = xfs_btree_increment(cnt_cur, 0, &i);
1318 if (error)
1319 goto error0;
1320 if (i == 0) {
1321 /*
1322 * Our only valid extents must have been busy.
1323 * Make it unbusy by forcing the log out and
1324 * retrying. If we've been here before, forcing
1325 * the log isn't making the extents available,
1326 * which means they have probably been freed in
1327 * this transaction. In that case, we have to
1328 * give up on them and we'll attempt a minlen
1329 * allocation the next time around.
1330 */
1331 xfs_btree_del_cursor(cnt_cur,
1332 XFS_BTREE_NOERROR);
1333 trace_xfs_alloc_size_busy(args);
1334 if (!forced++)
1335 xfs_log_force(args->mp, XFS_LOG_SYNC);
1336 goto restart;
1337 }
1338 }
1da177e4 1339 }
e26f0501 1340
1da177e4
LT
1341 /*
1342 * In the first case above, we got the last entry in the
1343 * by-size btree. Now we check to see if the space hits maxlen
1344 * once aligned; if not, we search left for something better.
1345 * This can't happen in the second case above.
1346 */
1da177e4
LT
1347 rlen = XFS_EXTLEN_MIN(args->maxlen, rlen);
1348 XFS_WANT_CORRUPTED_GOTO(rlen == 0 ||
1349 (rlen <= flen && rbno + rlen <= fbno + flen), error0);
1350 if (rlen < args->maxlen) {
1351 xfs_agblock_t bestfbno;
1352 xfs_extlen_t bestflen;
1353 xfs_agblock_t bestrbno;
1354 xfs_extlen_t bestrlen;
1355
1356 bestrlen = rlen;
1357 bestrbno = rbno;
1358 bestflen = flen;
1359 bestfbno = fbno;
1360 for (;;) {
8df4da4a 1361 if ((error = xfs_btree_decrement(cnt_cur, 0, &i)))
1da177e4
LT
1362 goto error0;
1363 if (i == 0)
1364 break;
1365 if ((error = xfs_alloc_get_rec(cnt_cur, &fbno, &flen,
1366 &i)))
1367 goto error0;
1368 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1369 if (flen < bestrlen)
1370 break;
86fa8af6
CH
1371 xfs_alloc_compute_aligned(args, fbno, flen,
1372 &rbno, &rlen);
1da177e4
LT
1373 rlen = XFS_EXTLEN_MIN(args->maxlen, rlen);
1374 XFS_WANT_CORRUPTED_GOTO(rlen == 0 ||
1375 (rlen <= flen && rbno + rlen <= fbno + flen),
1376 error0);
1377 if (rlen > bestrlen) {
1378 bestrlen = rlen;
1379 bestrbno = rbno;
1380 bestflen = flen;
1381 bestfbno = fbno;
1382 if (rlen == args->maxlen)
1383 break;
1384 }
1385 }
1386 if ((error = xfs_alloc_lookup_eq(cnt_cur, bestfbno, bestflen,
1387 &i)))
1388 goto error0;
1389 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1390 rlen = bestrlen;
1391 rbno = bestrbno;
1392 flen = bestflen;
1393 fbno = bestfbno;
1394 }
1395 args->wasfromfl = 0;
1396 /*
1397 * Fix up the length.
1398 */
1399 args->len = rlen;
e26f0501
CH
1400 if (rlen < args->minlen) {
1401 if (!forced++) {
1402 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1403 trace_xfs_alloc_size_busy(args);
1404 xfs_log_force(args->mp, XFS_LOG_SYNC);
1405 goto restart;
1406 }
1407 goto out_nominleft;
1da177e4 1408 }
e26f0501
CH
1409 xfs_alloc_fix_len(args);
1410
1411 if (!xfs_alloc_fix_minleft(args))
1412 goto out_nominleft;
1da177e4
LT
1413 rlen = args->len;
1414 XFS_WANT_CORRUPTED_GOTO(rlen <= flen, error0);
1415 /*
1416 * Allocate and initialize a cursor for the by-block tree.
1417 */
561f7d17
CH
1418 bno_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
1419 args->agno, XFS_BTNUM_BNO);
1da177e4
LT
1420 if ((error = xfs_alloc_fixup_trees(cnt_cur, bno_cur, fbno, flen,
1421 rbno, rlen, XFSA_FIXUP_CNT_OK)))
1422 goto error0;
1423 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1424 xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR);
1425 cnt_cur = bno_cur = NULL;
1426 args->len = rlen;
1427 args->agbno = rbno;
1428 XFS_WANT_CORRUPTED_GOTO(
1429 args->agbno + args->len <=
16259e7d 1430 be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length),
1da177e4 1431 error0);
0b1b213f 1432 trace_xfs_alloc_size_done(args);
1da177e4
LT
1433 return 0;
1434
1435error0:
0b1b213f 1436 trace_xfs_alloc_size_error(args);
1da177e4
LT
1437 if (cnt_cur)
1438 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_ERROR);
1439 if (bno_cur)
1440 xfs_btree_del_cursor(bno_cur, XFS_BTREE_ERROR);
1441 return error;
e26f0501
CH
1442
1443out_nominleft:
1444 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1445 trace_xfs_alloc_size_nominleft(args);
1446 args->agbno = NULLAGBLOCK;
1447 return 0;
1da177e4
LT
1448}
1449
1450/*
1451 * Deal with the case where only small freespaces remain.
1452 * Either return the contents of the last freespace record,
1453 * or allocate space from the freelist if there is nothing in the tree.
1454 */
1455STATIC int /* error */
1456xfs_alloc_ag_vextent_small(
1457 xfs_alloc_arg_t *args, /* allocation argument structure */
1458 xfs_btree_cur_t *ccur, /* by-size cursor */
1459 xfs_agblock_t *fbnop, /* result block number */
1460 xfs_extlen_t *flenp, /* result length */
1461 int *stat) /* status: 0-freelist, 1-normal/none */
1462{
1463 int error;
1464 xfs_agblock_t fbno;
1465 xfs_extlen_t flen;
1da177e4
LT
1466 int i;
1467
8df4da4a 1468 if ((error = xfs_btree_decrement(ccur, 0, &i)))
1da177e4
LT
1469 goto error0;
1470 if (i) {
1471 if ((error = xfs_alloc_get_rec(ccur, &fbno, &flen, &i)))
1472 goto error0;
1473 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1474 }
1475 /*
1476 * Nothing in the btree, try the freelist. Make sure
1477 * to respect minleft even when pulling from the
1478 * freelist.
1479 */
1480 else if (args->minlen == 1 && args->alignment == 1 && !args->isfl &&
16259e7d
CH
1481 (be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_flcount)
1482 > args->minleft)) {
92821e2b
DC
1483 error = xfs_alloc_get_freelist(args->tp, args->agbp, &fbno, 0);
1484 if (error)
1da177e4
LT
1485 goto error0;
1486 if (fbno != NULLAGBLOCK) {
4ecbfe63 1487 xfs_extent_busy_reuse(args->mp, args->agno, fbno, 1,
97d3ac75
CH
1488 args->userdata);
1489
1da177e4
LT
1490 if (args->userdata) {
1491 xfs_buf_t *bp;
1492
1493 bp = xfs_btree_get_bufs(args->mp, args->tp,
1494 args->agno, fbno, 0);
1495 xfs_trans_binval(args->tp, bp);
1496 }
1497 args->len = 1;
1498 args->agbno = fbno;
1499 XFS_WANT_CORRUPTED_GOTO(
1500 args->agbno + args->len <=
16259e7d 1501 be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length),
1da177e4
LT
1502 error0);
1503 args->wasfromfl = 1;
0b1b213f 1504 trace_xfs_alloc_small_freelist(args);
1da177e4
LT
1505 *stat = 0;
1506 return 0;
1507 }
1508 /*
1509 * Nothing in the freelist.
1510 */
1511 else
1512 flen = 0;
1513 }
1514 /*
1515 * Can't allocate from the freelist for some reason.
1516 */
d432c80e
NS
1517 else {
1518 fbno = NULLAGBLOCK;
1da177e4 1519 flen = 0;
d432c80e 1520 }
1da177e4
LT
1521 /*
1522 * Can't do the allocation, give up.
1523 */
1524 if (flen < args->minlen) {
1525 args->agbno = NULLAGBLOCK;
0b1b213f 1526 trace_xfs_alloc_small_notenough(args);
1da177e4
LT
1527 flen = 0;
1528 }
1529 *fbnop = fbno;
1530 *flenp = flen;
1531 *stat = 1;
0b1b213f 1532 trace_xfs_alloc_small_done(args);
1da177e4
LT
1533 return 0;
1534
1535error0:
0b1b213f 1536 trace_xfs_alloc_small_error(args);
1da177e4
LT
1537 return error;
1538}
1539
1540/*
1541 * Free the extent starting at agno/bno for length.
1542 */
1543STATIC int /* error */
1544xfs_free_ag_extent(
1545 xfs_trans_t *tp, /* transaction pointer */
1546 xfs_buf_t *agbp, /* buffer for a.g. freelist header */
1547 xfs_agnumber_t agno, /* allocation group number */
1548 xfs_agblock_t bno, /* starting block number */
1549 xfs_extlen_t len, /* length of extent */
1550 int isfl) /* set if is freelist blocks - no sb acctg */
1551{
1552 xfs_btree_cur_t *bno_cur; /* cursor for by-block btree */
1553 xfs_btree_cur_t *cnt_cur; /* cursor for by-size btree */
1554 int error; /* error return value */
1da177e4
LT
1555 xfs_agblock_t gtbno; /* start of right neighbor block */
1556 xfs_extlen_t gtlen; /* length of right neighbor block */
1557 int haveleft; /* have a left neighbor block */
1558 int haveright; /* have a right neighbor block */
1559 int i; /* temp, result code */
1560 xfs_agblock_t ltbno; /* start of left neighbor block */
1561 xfs_extlen_t ltlen; /* length of left neighbor block */
1562 xfs_mount_t *mp; /* mount point struct for filesystem */
1563 xfs_agblock_t nbno; /* new starting block of freespace */
1564 xfs_extlen_t nlen; /* new length of freespace */
ecb6928f 1565 xfs_perag_t *pag; /* per allocation group data */
1da177e4
LT
1566
1567 mp = tp->t_mountp;
1568 /*
1569 * Allocate and initialize a cursor for the by-block btree.
1570 */
561f7d17 1571 bno_cur = xfs_allocbt_init_cursor(mp, tp, agbp, agno, XFS_BTNUM_BNO);
1da177e4
LT
1572 cnt_cur = NULL;
1573 /*
1574 * Look for a neighboring block on the left (lower block numbers)
1575 * that is contiguous with this space.
1576 */
1577 if ((error = xfs_alloc_lookup_le(bno_cur, bno, len, &haveleft)))
1578 goto error0;
1579 if (haveleft) {
1580 /*
1581 * There is a block to our left.
1582 */
1583 if ((error = xfs_alloc_get_rec(bno_cur, &ltbno, &ltlen, &i)))
1584 goto error0;
1585 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1586 /*
1587 * It's not contiguous, though.
1588 */
1589 if (ltbno + ltlen < bno)
1590 haveleft = 0;
1591 else {
1592 /*
1593 * If this failure happens the request to free this
1594 * space was invalid, it's (partly) already free.
1595 * Very bad.
1596 */
1597 XFS_WANT_CORRUPTED_GOTO(ltbno + ltlen <= bno, error0);
1598 }
1599 }
1600 /*
1601 * Look for a neighboring block on the right (higher block numbers)
1602 * that is contiguous with this space.
1603 */
637aa50f 1604 if ((error = xfs_btree_increment(bno_cur, 0, &haveright)))
1da177e4
LT
1605 goto error0;
1606 if (haveright) {
1607 /*
1608 * There is a block to our right.
1609 */
1610 if ((error = xfs_alloc_get_rec(bno_cur, &gtbno, &gtlen, &i)))
1611 goto error0;
1612 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1613 /*
1614 * It's not contiguous, though.
1615 */
1616 if (bno + len < gtbno)
1617 haveright = 0;
1618 else {
1619 /*
1620 * If this failure happens the request to free this
1621 * space was invalid, it's (partly) already free.
1622 * Very bad.
1623 */
1624 XFS_WANT_CORRUPTED_GOTO(gtbno >= bno + len, error0);
1625 }
1626 }
1627 /*
1628 * Now allocate and initialize a cursor for the by-size tree.
1629 */
561f7d17 1630 cnt_cur = xfs_allocbt_init_cursor(mp, tp, agbp, agno, XFS_BTNUM_CNT);
1da177e4
LT
1631 /*
1632 * Have both left and right contiguous neighbors.
1633 * Merge all three into a single free block.
1634 */
1635 if (haveleft && haveright) {
1636 /*
1637 * Delete the old by-size entry on the left.
1638 */
1639 if ((error = xfs_alloc_lookup_eq(cnt_cur, ltbno, ltlen, &i)))
1640 goto error0;
1641 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
91cca5df 1642 if ((error = xfs_btree_delete(cnt_cur, &i)))
1da177e4
LT
1643 goto error0;
1644 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1645 /*
1646 * Delete the old by-size entry on the right.
1647 */
1648 if ((error = xfs_alloc_lookup_eq(cnt_cur, gtbno, gtlen, &i)))
1649 goto error0;
1650 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
91cca5df 1651 if ((error = xfs_btree_delete(cnt_cur, &i)))
1da177e4
LT
1652 goto error0;
1653 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1654 /*
1655 * Delete the old by-block entry for the right block.
1656 */
91cca5df 1657 if ((error = xfs_btree_delete(bno_cur, &i)))
1da177e4
LT
1658 goto error0;
1659 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1660 /*
1661 * Move the by-block cursor back to the left neighbor.
1662 */
8df4da4a 1663 if ((error = xfs_btree_decrement(bno_cur, 0, &i)))
1da177e4
LT
1664 goto error0;
1665 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1666#ifdef DEBUG
1667 /*
1668 * Check that this is the right record: delete didn't
1669 * mangle the cursor.
1670 */
1671 {
1672 xfs_agblock_t xxbno;
1673 xfs_extlen_t xxlen;
1674
1675 if ((error = xfs_alloc_get_rec(bno_cur, &xxbno, &xxlen,
1676 &i)))
1677 goto error0;
1678 XFS_WANT_CORRUPTED_GOTO(
1679 i == 1 && xxbno == ltbno && xxlen == ltlen,
1680 error0);
1681 }
1682#endif
1683 /*
1684 * Update remaining by-block entry to the new, joined block.
1685 */
1686 nbno = ltbno;
1687 nlen = len + ltlen + gtlen;
1688 if ((error = xfs_alloc_update(bno_cur, nbno, nlen)))
1689 goto error0;
1690 }
1691 /*
1692 * Have only a left contiguous neighbor.
1693 * Merge it together with the new freespace.
1694 */
1695 else if (haveleft) {
1696 /*
1697 * Delete the old by-size entry on the left.
1698 */
1699 if ((error = xfs_alloc_lookup_eq(cnt_cur, ltbno, ltlen, &i)))
1700 goto error0;
1701 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
91cca5df 1702 if ((error = xfs_btree_delete(cnt_cur, &i)))
1da177e4
LT
1703 goto error0;
1704 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1705 /*
1706 * Back up the by-block cursor to the left neighbor, and
1707 * update its length.
1708 */
8df4da4a 1709 if ((error = xfs_btree_decrement(bno_cur, 0, &i)))
1da177e4
LT
1710 goto error0;
1711 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1712 nbno = ltbno;
1713 nlen = len + ltlen;
1714 if ((error = xfs_alloc_update(bno_cur, nbno, nlen)))
1715 goto error0;
1716 }
1717 /*
1718 * Have only a right contiguous neighbor.
1719 * Merge it together with the new freespace.
1720 */
1721 else if (haveright) {
1722 /*
1723 * Delete the old by-size entry on the right.
1724 */
1725 if ((error = xfs_alloc_lookup_eq(cnt_cur, gtbno, gtlen, &i)))
1726 goto error0;
1727 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
91cca5df 1728 if ((error = xfs_btree_delete(cnt_cur, &i)))
1da177e4
LT
1729 goto error0;
1730 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1731 /*
1732 * Update the starting block and length of the right
1733 * neighbor in the by-block tree.
1734 */
1735 nbno = bno;
1736 nlen = len + gtlen;
1737 if ((error = xfs_alloc_update(bno_cur, nbno, nlen)))
1738 goto error0;
1739 }
1740 /*
1741 * No contiguous neighbors.
1742 * Insert the new freespace into the by-block tree.
1743 */
1744 else {
1745 nbno = bno;
1746 nlen = len;
4b22a571 1747 if ((error = xfs_btree_insert(bno_cur, &i)))
1da177e4
LT
1748 goto error0;
1749 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1750 }
1751 xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR);
1752 bno_cur = NULL;
1753 /*
1754 * In all cases we need to insert the new freespace in the by-size tree.
1755 */
1756 if ((error = xfs_alloc_lookup_eq(cnt_cur, nbno, nlen, &i)))
1757 goto error0;
1758 XFS_WANT_CORRUPTED_GOTO(i == 0, error0);
4b22a571 1759 if ((error = xfs_btree_insert(cnt_cur, &i)))
1da177e4
LT
1760 goto error0;
1761 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1762 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1763 cnt_cur = NULL;
ecb6928f 1764
1da177e4
LT
1765 /*
1766 * Update the freespace totals in the ag and superblock.
1767 */
ecb6928f
CH
1768 pag = xfs_perag_get(mp, agno);
1769 error = xfs_alloc_update_counters(tp, pag, agbp, len);
1770 xfs_perag_put(pag);
1771 if (error)
1772 goto error0;
1773
1774 if (!isfl)
1775 xfs_trans_mod_sb(tp, XFS_TRANS_SB_FDBLOCKS, (long)len);
1776 XFS_STATS_INC(xs_freex);
1777 XFS_STATS_ADD(xs_freeb, len);
0b1b213f
CH
1778
1779 trace_xfs_free_extent(mp, agno, bno, len, isfl, haveleft, haveright);
1da177e4 1780
1da177e4
LT
1781 return 0;
1782
1783 error0:
0b1b213f 1784 trace_xfs_free_extent(mp, agno, bno, len, isfl, -1, -1);
1da177e4
LT
1785 if (bno_cur)
1786 xfs_btree_del_cursor(bno_cur, XFS_BTREE_ERROR);
1787 if (cnt_cur)
1788 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_ERROR);
1789 return error;
1790}
1791
1792/*
1793 * Visible (exported) allocation/free functions.
1794 * Some of these are used just by xfs_alloc_btree.c and this file.
1795 */
1796
1797/*
1798 * Compute and fill in value of m_ag_maxlevels.
1799 */
1800void
1801xfs_alloc_compute_maxlevels(
1802 xfs_mount_t *mp) /* file system mount structure */
1803{
1804 int level;
1805 uint maxblocks;
1806 uint maxleafents;
1807 int minleafrecs;
1808 int minnoderecs;
1809
1810 maxleafents = (mp->m_sb.sb_agblocks + 1) / 2;
1811 minleafrecs = mp->m_alloc_mnr[0];
1812 minnoderecs = mp->m_alloc_mnr[1];
1813 maxblocks = (maxleafents + minleafrecs - 1) / minleafrecs;
1814 for (level = 1; maxblocks > 1; level++)
1815 maxblocks = (maxblocks + minnoderecs - 1) / minnoderecs;
1816 mp->m_ag_maxlevels = level;
1817}
1818
6cc87645
DC
1819/*
1820 * Find the length of the longest extent in an AG.
1821 */
1822xfs_extlen_t
1823xfs_alloc_longest_free_extent(
1824 struct xfs_mount *mp,
1825 struct xfs_perag *pag)
1826{
1827 xfs_extlen_t need, delta = 0;
1828
1829 need = XFS_MIN_FREELIST_PAG(pag, mp);
1830 if (need > pag->pagf_flcount)
1831 delta = need - pag->pagf_flcount;
1832
1833 if (pag->pagf_longest > delta)
1834 return pag->pagf_longest - delta;
1835 return pag->pagf_flcount > 0 || pag->pagf_longest > 0;
1836}
1837
1da177e4
LT
1838/*
1839 * Decide whether to use this allocation group for this allocation.
1840 * If so, fix up the btree freelist's size.
1841 */
1842STATIC int /* error */
1843xfs_alloc_fix_freelist(
1844 xfs_alloc_arg_t *args, /* allocation argument structure */
1845 int flags) /* XFS_ALLOC_FLAG_... */
1846{
1847 xfs_buf_t *agbp; /* agf buffer pointer */
1848 xfs_agf_t *agf; /* a.g. freespace structure pointer */
1849 xfs_buf_t *agflbp;/* agfl buffer pointer */
1850 xfs_agblock_t bno; /* freelist block */
1851 xfs_extlen_t delta; /* new blocks needed in freelist */
1852 int error; /* error result code */
1853 xfs_extlen_t longest;/* longest extent in allocation group */
1854 xfs_mount_t *mp; /* file system mount point structure */
1855 xfs_extlen_t need; /* total blocks needed in freelist */
1856 xfs_perag_t *pag; /* per-ag information structure */
1857 xfs_alloc_arg_t targs; /* local allocation arguments */
1858 xfs_trans_t *tp; /* transaction pointer */
1859
1860 mp = args->mp;
1861
1862 pag = args->pag;
1863 tp = args->tp;
1864 if (!pag->pagf_init) {
1865 if ((error = xfs_alloc_read_agf(mp, tp, args->agno, flags,
1866 &agbp)))
1867 return error;
1868 if (!pag->pagf_init) {
0e1edbd9
NS
1869 ASSERT(flags & XFS_ALLOC_FLAG_TRYLOCK);
1870 ASSERT(!(flags & XFS_ALLOC_FLAG_FREEING));
1da177e4
LT
1871 args->agbp = NULL;
1872 return 0;
1873 }
1874 } else
1875 agbp = NULL;
1876
0e1edbd9
NS
1877 /*
1878 * If this is a metadata preferred pag and we are user data
1da177e4
LT
1879 * then try somewhere else if we are not being asked to
1880 * try harder at this point
1881 */
0e1edbd9
NS
1882 if (pag->pagf_metadata && args->userdata &&
1883 (flags & XFS_ALLOC_FLAG_TRYLOCK)) {
1884 ASSERT(!(flags & XFS_ALLOC_FLAG_FREEING));
1da177e4
LT
1885 args->agbp = NULL;
1886 return 0;
1887 }
1888
0e1edbd9 1889 if (!(flags & XFS_ALLOC_FLAG_FREEING)) {
0e1edbd9
NS
1890 /*
1891 * If it looks like there isn't a long enough extent, or enough
1892 * total blocks, reject it.
1893 */
6cc87645
DC
1894 need = XFS_MIN_FREELIST_PAG(pag, mp);
1895 longest = xfs_alloc_longest_free_extent(mp, pag);
0e1edbd9
NS
1896 if ((args->minlen + args->alignment + args->minalignslop - 1) >
1897 longest ||
1898 ((int)(pag->pagf_freeblks + pag->pagf_flcount -
1899 need - args->total) < (int)args->minleft)) {
1900 if (agbp)
1901 xfs_trans_brelse(tp, agbp);
1902 args->agbp = NULL;
1903 return 0;
1904 }
1da177e4 1905 }
0e1edbd9 1906
1da177e4
LT
1907 /*
1908 * Get the a.g. freespace buffer.
1909 * Can fail if we're not blocking on locks, and it's held.
1910 */
1911 if (agbp == NULL) {
1912 if ((error = xfs_alloc_read_agf(mp, tp, args->agno, flags,
1913 &agbp)))
1914 return error;
1915 if (agbp == NULL) {
0e1edbd9
NS
1916 ASSERT(flags & XFS_ALLOC_FLAG_TRYLOCK);
1917 ASSERT(!(flags & XFS_ALLOC_FLAG_FREEING));
1da177e4
LT
1918 args->agbp = NULL;
1919 return 0;
1920 }
1921 }
1922 /*
1923 * Figure out how many blocks we should have in the freelist.
1924 */
1925 agf = XFS_BUF_TO_AGF(agbp);
1926 need = XFS_MIN_FREELIST(agf, mp);
1da177e4
LT
1927 /*
1928 * If there isn't enough total or single-extent, reject it.
1929 */
0e1edbd9
NS
1930 if (!(flags & XFS_ALLOC_FLAG_FREEING)) {
1931 delta = need > be32_to_cpu(agf->agf_flcount) ?
1932 (need - be32_to_cpu(agf->agf_flcount)) : 0;
1933 longest = be32_to_cpu(agf->agf_longest);
1934 longest = (longest > delta) ? (longest - delta) :
1935 (be32_to_cpu(agf->agf_flcount) > 0 || longest > 0);
1936 if ((args->minlen + args->alignment + args->minalignslop - 1) >
1937 longest ||
1938 ((int)(be32_to_cpu(agf->agf_freeblks) +
1939 be32_to_cpu(agf->agf_flcount) - need - args->total) <
1940 (int)args->minleft)) {
1941 xfs_trans_brelse(tp, agbp);
1942 args->agbp = NULL;
1943 return 0;
1944 }
1da177e4
LT
1945 }
1946 /*
1947 * Make the freelist shorter if it's too long.
1948 */
16259e7d 1949 while (be32_to_cpu(agf->agf_flcount) > need) {
1da177e4
LT
1950 xfs_buf_t *bp;
1951
92821e2b
DC
1952 error = xfs_alloc_get_freelist(tp, agbp, &bno, 0);
1953 if (error)
1da177e4
LT
1954 return error;
1955 if ((error = xfs_free_ag_extent(tp, agbp, args->agno, bno, 1, 1)))
1956 return error;
1957 bp = xfs_btree_get_bufs(mp, tp, args->agno, bno, 0);
1958 xfs_trans_binval(tp, bp);
1959 }
1960 /*
1961 * Initialize the args structure.
1962 */
a0041684 1963 memset(&targs, 0, sizeof(targs));
1da177e4
LT
1964 targs.tp = tp;
1965 targs.mp = mp;
1966 targs.agbp = agbp;
1967 targs.agno = args->agno;
1da177e4
LT
1968 targs.alignment = targs.minlen = targs.prod = targs.isfl = 1;
1969 targs.type = XFS_ALLOCTYPE_THIS_AG;
1970 targs.pag = pag;
1971 if ((error = xfs_alloc_read_agfl(mp, tp, targs.agno, &agflbp)))
1972 return error;
1973 /*
1974 * Make the freelist longer if it's too short.
1975 */
16259e7d 1976 while (be32_to_cpu(agf->agf_flcount) < need) {
1da177e4 1977 targs.agbno = 0;
16259e7d 1978 targs.maxlen = need - be32_to_cpu(agf->agf_flcount);
1da177e4
LT
1979 /*
1980 * Allocate as many blocks as possible at once.
1981 */
e63a3690
NS
1982 if ((error = xfs_alloc_ag_vextent(&targs))) {
1983 xfs_trans_brelse(tp, agflbp);
1da177e4 1984 return error;
e63a3690 1985 }
1da177e4
LT
1986 /*
1987 * Stop if we run out. Won't happen if callers are obeying
1988 * the restrictions correctly. Can happen for free calls
1989 * on a completely full ag.
1990 */
d210a28c 1991 if (targs.agbno == NULLAGBLOCK) {
0e1edbd9
NS
1992 if (flags & XFS_ALLOC_FLAG_FREEING)
1993 break;
1994 xfs_trans_brelse(tp, agflbp);
1995 args->agbp = NULL;
1996 return 0;
d210a28c 1997 }
1da177e4
LT
1998 /*
1999 * Put each allocated block on the list.
2000 */
2001 for (bno = targs.agbno; bno < targs.agbno + targs.len; bno++) {
92821e2b
DC
2002 error = xfs_alloc_put_freelist(tp, agbp,
2003 agflbp, bno, 0);
2004 if (error)
1da177e4
LT
2005 return error;
2006 }
2007 }
e63a3690 2008 xfs_trans_brelse(tp, agflbp);
1da177e4
LT
2009 args->agbp = agbp;
2010 return 0;
2011}
2012
2013/*
2014 * Get a block from the freelist.
2015 * Returns with the buffer for the block gotten.
2016 */
2017int /* error */
2018xfs_alloc_get_freelist(
2019 xfs_trans_t *tp, /* transaction pointer */
2020 xfs_buf_t *agbp, /* buffer containing the agf structure */
92821e2b
DC
2021 xfs_agblock_t *bnop, /* block address retrieved from freelist */
2022 int btreeblk) /* destination is a AGF btree */
1da177e4
LT
2023{
2024 xfs_agf_t *agf; /* a.g. freespace structure */
1da177e4
LT
2025 xfs_buf_t *agflbp;/* buffer for a.g. freelist structure */
2026 xfs_agblock_t bno; /* block number returned */
77c95bba 2027 __be32 *agfl_bno;
1da177e4 2028 int error;
92821e2b 2029 int logflags;
77c95bba 2030 xfs_mount_t *mp = tp->t_mountp;
1da177e4
LT
2031 xfs_perag_t *pag; /* per allocation group data */
2032
1da177e4
LT
2033 /*
2034 * Freelist is empty, give up.
2035 */
77c95bba 2036 agf = XFS_BUF_TO_AGF(agbp);
1da177e4
LT
2037 if (!agf->agf_flcount) {
2038 *bnop = NULLAGBLOCK;
2039 return 0;
2040 }
2041 /*
2042 * Read the array of free blocks.
2043 */
77c95bba
CH
2044 error = xfs_alloc_read_agfl(mp, tp, be32_to_cpu(agf->agf_seqno),
2045 &agflbp);
2046 if (error)
1da177e4 2047 return error;
77c95bba
CH
2048
2049
1da177e4
LT
2050 /*
2051 * Get the block number and update the data structures.
2052 */
77c95bba
CH
2053 agfl_bno = XFS_BUF_TO_AGFL_BNO(mp, agflbp);
2054 bno = be32_to_cpu(agfl_bno[be32_to_cpu(agf->agf_flfirst)]);
413d57c9 2055 be32_add_cpu(&agf->agf_flfirst, 1);
1da177e4 2056 xfs_trans_brelse(tp, agflbp);
16259e7d 2057 if (be32_to_cpu(agf->agf_flfirst) == XFS_AGFL_SIZE(mp))
1da177e4 2058 agf->agf_flfirst = 0;
a862e0fd
DC
2059
2060 pag = xfs_perag_get(mp, be32_to_cpu(agf->agf_seqno));
413d57c9 2061 be32_add_cpu(&agf->agf_flcount, -1);
1da177e4
LT
2062 xfs_trans_agflist_delta(tp, -1);
2063 pag->pagf_flcount--;
a862e0fd 2064 xfs_perag_put(pag);
92821e2b
DC
2065
2066 logflags = XFS_AGF_FLFIRST | XFS_AGF_FLCOUNT;
2067 if (btreeblk) {
413d57c9 2068 be32_add_cpu(&agf->agf_btreeblks, 1);
92821e2b
DC
2069 pag->pagf_btreeblks++;
2070 logflags |= XFS_AGF_BTREEBLKS;
2071 }
2072
92821e2b 2073 xfs_alloc_log_agf(tp, agbp, logflags);
1da177e4
LT
2074 *bnop = bno;
2075
1da177e4
LT
2076 return 0;
2077}
2078
2079/*
2080 * Log the given fields from the agf structure.
2081 */
2082void
2083xfs_alloc_log_agf(
2084 xfs_trans_t *tp, /* transaction pointer */
2085 xfs_buf_t *bp, /* buffer for a.g. freelist header */
2086 int fields) /* mask of fields to be logged (XFS_AGF_...) */
2087{
2088 int first; /* first byte offset */
2089 int last; /* last byte offset */
2090 static const short offsets[] = {
2091 offsetof(xfs_agf_t, agf_magicnum),
2092 offsetof(xfs_agf_t, agf_versionnum),
2093 offsetof(xfs_agf_t, agf_seqno),
2094 offsetof(xfs_agf_t, agf_length),
2095 offsetof(xfs_agf_t, agf_roots[0]),
2096 offsetof(xfs_agf_t, agf_levels[0]),
2097 offsetof(xfs_agf_t, agf_flfirst),
2098 offsetof(xfs_agf_t, agf_fllast),
2099 offsetof(xfs_agf_t, agf_flcount),
2100 offsetof(xfs_agf_t, agf_freeblks),
2101 offsetof(xfs_agf_t, agf_longest),
92821e2b 2102 offsetof(xfs_agf_t, agf_btreeblks),
4e0e6040 2103 offsetof(xfs_agf_t, agf_uuid),
1da177e4
LT
2104 sizeof(xfs_agf_t)
2105 };
2106
0b1b213f
CH
2107 trace_xfs_agf(tp->t_mountp, XFS_BUF_TO_AGF(bp), fields, _RET_IP_);
2108
61fe135c 2109 xfs_trans_buf_set_type(tp, bp, XFS_BLFT_AGF_BUF);
4e0e6040 2110
1da177e4
LT
2111 xfs_btree_offsets(fields, offsets, XFS_AGF_NUM_BITS, &first, &last);
2112 xfs_trans_log_buf(tp, bp, (uint)first, (uint)last);
2113}
2114
2115/*
2116 * Interface for inode allocation to force the pag data to be initialized.
2117 */
2118int /* error */
2119xfs_alloc_pagf_init(
2120 xfs_mount_t *mp, /* file system mount structure */
2121 xfs_trans_t *tp, /* transaction pointer */
2122 xfs_agnumber_t agno, /* allocation group number */
2123 int flags) /* XFS_ALLOC_FLAGS_... */
2124{
2125 xfs_buf_t *bp;
2126 int error;
2127
2128 if ((error = xfs_alloc_read_agf(mp, tp, agno, flags, &bp)))
2129 return error;
2130 if (bp)
2131 xfs_trans_brelse(tp, bp);
2132 return 0;
2133}
2134
2135/*
2136 * Put the block on the freelist for the allocation group.
2137 */
2138int /* error */
2139xfs_alloc_put_freelist(
2140 xfs_trans_t *tp, /* transaction pointer */
2141 xfs_buf_t *agbp, /* buffer for a.g. freelist header */
2142 xfs_buf_t *agflbp,/* buffer for a.g. free block array */
92821e2b
DC
2143 xfs_agblock_t bno, /* block being freed */
2144 int btreeblk) /* block came from a AGF btree */
1da177e4
LT
2145{
2146 xfs_agf_t *agf; /* a.g. freespace structure */
e2101005 2147 __be32 *blockp;/* pointer to array entry */
1da177e4 2148 int error;
92821e2b 2149 int logflags;
1da177e4
LT
2150 xfs_mount_t *mp; /* mount structure */
2151 xfs_perag_t *pag; /* per allocation group data */
77c95bba
CH
2152 __be32 *agfl_bno;
2153 int startoff;
1da177e4
LT
2154
2155 agf = XFS_BUF_TO_AGF(agbp);
2156 mp = tp->t_mountp;
2157
2158 if (!agflbp && (error = xfs_alloc_read_agfl(mp, tp,
16259e7d 2159 be32_to_cpu(agf->agf_seqno), &agflbp)))
1da177e4 2160 return error;
413d57c9 2161 be32_add_cpu(&agf->agf_fllast, 1);
16259e7d 2162 if (be32_to_cpu(agf->agf_fllast) == XFS_AGFL_SIZE(mp))
1da177e4 2163 agf->agf_fllast = 0;
a862e0fd
DC
2164
2165 pag = xfs_perag_get(mp, be32_to_cpu(agf->agf_seqno));
413d57c9 2166 be32_add_cpu(&agf->agf_flcount, 1);
1da177e4
LT
2167 xfs_trans_agflist_delta(tp, 1);
2168 pag->pagf_flcount++;
92821e2b
DC
2169
2170 logflags = XFS_AGF_FLLAST | XFS_AGF_FLCOUNT;
2171 if (btreeblk) {
413d57c9 2172 be32_add_cpu(&agf->agf_btreeblks, -1);
92821e2b
DC
2173 pag->pagf_btreeblks--;
2174 logflags |= XFS_AGF_BTREEBLKS;
2175 }
a862e0fd 2176 xfs_perag_put(pag);
92821e2b 2177
92821e2b
DC
2178 xfs_alloc_log_agf(tp, agbp, logflags);
2179
16259e7d 2180 ASSERT(be32_to_cpu(agf->agf_flcount) <= XFS_AGFL_SIZE(mp));
77c95bba
CH
2181
2182 agfl_bno = XFS_BUF_TO_AGFL_BNO(mp, agflbp);
2183 blockp = &agfl_bno[be32_to_cpu(agf->agf_fllast)];
e2101005 2184 *blockp = cpu_to_be32(bno);
77c95bba
CH
2185 startoff = (char *)blockp - (char *)agflbp->b_addr;
2186
92821e2b 2187 xfs_alloc_log_agf(tp, agbp, logflags);
77c95bba 2188
61fe135c 2189 xfs_trans_buf_set_type(tp, agflbp, XFS_BLFT_AGFL_BUF);
77c95bba
CH
2190 xfs_trans_log_buf(tp, agflbp, startoff,
2191 startoff + sizeof(xfs_agblock_t) - 1);
1da177e4
LT
2192 return 0;
2193}
2194
4e0e6040 2195static bool
612cfbfe 2196xfs_agf_verify(
4e0e6040 2197 struct xfs_mount *mp,
5d5f527d
DC
2198 struct xfs_buf *bp)
2199 {
4e0e6040 2200 struct xfs_agf *agf = XFS_BUF_TO_AGF(bp);
5d5f527d 2201
4e0e6040
DC
2202 if (xfs_sb_version_hascrc(&mp->m_sb) &&
2203 !uuid_equal(&agf->agf_uuid, &mp->m_sb.sb_uuid))
2204 return false;
5d5f527d 2205
4e0e6040
DC
2206 if (!(agf->agf_magicnum == cpu_to_be32(XFS_AGF_MAGIC) &&
2207 XFS_AGF_GOOD_VERSION(be32_to_cpu(agf->agf_versionnum)) &&
2208 be32_to_cpu(agf->agf_freeblks) <= be32_to_cpu(agf->agf_length) &&
2209 be32_to_cpu(agf->agf_flfirst) < XFS_AGFL_SIZE(mp) &&
2210 be32_to_cpu(agf->agf_fllast) < XFS_AGFL_SIZE(mp) &&
2211 be32_to_cpu(agf->agf_flcount) <= XFS_AGFL_SIZE(mp)))
2212 return false;
5d5f527d
DC
2213
2214 /*
2215 * during growfs operations, the perag is not fully initialised,
2216 * so we can't use it for any useful checking. growfs ensures we can't
2217 * use it by using uncached buffers that don't have the perag attached
2218 * so we can detect and avoid this problem.
2219 */
4e0e6040
DC
2220 if (bp->b_pag && be32_to_cpu(agf->agf_seqno) != bp->b_pag->pag_agno)
2221 return false;
5d5f527d 2222
4e0e6040
DC
2223 if (xfs_sb_version_haslazysbcount(&mp->m_sb) &&
2224 be32_to_cpu(agf->agf_btreeblks) > be32_to_cpu(agf->agf_length))
2225 return false;
2226
2227 return true;;
5d5f527d 2228
612cfbfe
DC
2229}
2230
1813dd64
DC
2231static void
2232xfs_agf_read_verify(
612cfbfe
DC
2233 struct xfs_buf *bp)
2234{
4e0e6040 2235 struct xfs_mount *mp = bp->b_target->bt_mount;
4e0e6040 2236
ce5028cf
ES
2237 if (xfs_sb_version_hascrc(&mp->m_sb) &&
2238 !xfs_buf_verify_cksum(bp, XFS_AGF_CRC_OFF))
2239 xfs_buf_ioerror(bp, EFSBADCRC);
2240 else if (XFS_TEST_ERROR(!xfs_agf_verify(mp, bp), mp,
2241 XFS_ERRTAG_ALLOC_READ_AGF,
2242 XFS_RANDOM_ALLOC_READ_AGF))
4e0e6040 2243 xfs_buf_ioerror(bp, EFSCORRUPTED);
ce5028cf
ES
2244
2245 if (bp->b_error)
2246 xfs_verifier_error(bp);
612cfbfe 2247}
5d5f527d 2248
b0f539de 2249static void
1813dd64 2250xfs_agf_write_verify(
612cfbfe
DC
2251 struct xfs_buf *bp)
2252{
4e0e6040
DC
2253 struct xfs_mount *mp = bp->b_target->bt_mount;
2254 struct xfs_buf_log_item *bip = bp->b_fspriv;
2255
2256 if (!xfs_agf_verify(mp, bp)) {
4e0e6040 2257 xfs_buf_ioerror(bp, EFSCORRUPTED);
ce5028cf 2258 xfs_verifier_error(bp);
4e0e6040
DC
2259 return;
2260 }
2261
2262 if (!xfs_sb_version_hascrc(&mp->m_sb))
2263 return;
2264
2265 if (bip)
2266 XFS_BUF_TO_AGF(bp)->agf_lsn = cpu_to_be64(bip->bli_item.li_lsn);
2267
f1dbcd7e 2268 xfs_buf_update_cksum(bp, XFS_AGF_CRC_OFF);
5d5f527d
DC
2269}
2270
1813dd64
DC
2271const struct xfs_buf_ops xfs_agf_buf_ops = {
2272 .verify_read = xfs_agf_read_verify,
2273 .verify_write = xfs_agf_write_verify,
2274};
2275
1da177e4
LT
2276/*
2277 * Read in the allocation group header (free/alloc section).
2278 */
2279int /* error */
4805621a
CH
2280xfs_read_agf(
2281 struct xfs_mount *mp, /* mount point structure */
2282 struct xfs_trans *tp, /* transaction pointer */
2283 xfs_agnumber_t agno, /* allocation group number */
2284 int flags, /* XFS_BUF_ */
2285 struct xfs_buf **bpp) /* buffer for the ag freelist header */
1da177e4 2286{
1da177e4
LT
2287 int error;
2288
d123031a
DC
2289 trace_xfs_read_agf(mp, agno);
2290
1da177e4
LT
2291 ASSERT(agno != NULLAGNUMBER);
2292 error = xfs_trans_read_buf(
2293 mp, tp, mp->m_ddev_targp,
2294 XFS_AG_DADDR(mp, agno, XFS_AGF_DADDR(mp)),
1813dd64 2295 XFS_FSS_TO_BB(mp, 1), flags, bpp, &xfs_agf_buf_ops);
1da177e4
LT
2296 if (error)
2297 return error;
4805621a 2298 if (!*bpp)
1da177e4 2299 return 0;
4805621a 2300
5a52c2a5 2301 ASSERT(!(*bpp)->b_error);
38f23232 2302 xfs_buf_set_ref(*bpp, XFS_AGF_REF);
4805621a
CH
2303 return 0;
2304}
2305
2306/*
2307 * Read in the allocation group header (free/alloc section).
2308 */
2309int /* error */
2310xfs_alloc_read_agf(
2311 struct xfs_mount *mp, /* mount point structure */
2312 struct xfs_trans *tp, /* transaction pointer */
2313 xfs_agnumber_t agno, /* allocation group number */
2314 int flags, /* XFS_ALLOC_FLAG_... */
2315 struct xfs_buf **bpp) /* buffer for the ag freelist header */
2316{
2317 struct xfs_agf *agf; /* ag freelist header */
2318 struct xfs_perag *pag; /* per allocation group data */
2319 int error;
2320
d123031a 2321 trace_xfs_alloc_read_agf(mp, agno);
4805621a 2322
d123031a 2323 ASSERT(agno != NULLAGNUMBER);
4805621a 2324 error = xfs_read_agf(mp, tp, agno,
0cadda1c 2325 (flags & XFS_ALLOC_FLAG_TRYLOCK) ? XBF_TRYLOCK : 0,
4805621a
CH
2326 bpp);
2327 if (error)
2328 return error;
2329 if (!*bpp)
2330 return 0;
5a52c2a5 2331 ASSERT(!(*bpp)->b_error);
4805621a
CH
2332
2333 agf = XFS_BUF_TO_AGF(*bpp);
a862e0fd 2334 pag = xfs_perag_get(mp, agno);
1da177e4 2335 if (!pag->pagf_init) {
16259e7d 2336 pag->pagf_freeblks = be32_to_cpu(agf->agf_freeblks);
92821e2b 2337 pag->pagf_btreeblks = be32_to_cpu(agf->agf_btreeblks);
16259e7d
CH
2338 pag->pagf_flcount = be32_to_cpu(agf->agf_flcount);
2339 pag->pagf_longest = be32_to_cpu(agf->agf_longest);
1da177e4 2340 pag->pagf_levels[XFS_BTNUM_BNOi] =
16259e7d 2341 be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNOi]);
1da177e4 2342 pag->pagf_levels[XFS_BTNUM_CNTi] =
16259e7d 2343 be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNTi]);
007c61c6 2344 spin_lock_init(&pag->pagb_lock);
e57336ff 2345 pag->pagb_count = 0;
ed3b4d6c 2346 pag->pagb_tree = RB_ROOT;
1da177e4
LT
2347 pag->pagf_init = 1;
2348 }
2349#ifdef DEBUG
2350 else if (!XFS_FORCED_SHUTDOWN(mp)) {
16259e7d 2351 ASSERT(pag->pagf_freeblks == be32_to_cpu(agf->agf_freeblks));
89b28393 2352 ASSERT(pag->pagf_btreeblks == be32_to_cpu(agf->agf_btreeblks));
16259e7d
CH
2353 ASSERT(pag->pagf_flcount == be32_to_cpu(agf->agf_flcount));
2354 ASSERT(pag->pagf_longest == be32_to_cpu(agf->agf_longest));
1da177e4 2355 ASSERT(pag->pagf_levels[XFS_BTNUM_BNOi] ==
16259e7d 2356 be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNOi]));
1da177e4 2357 ASSERT(pag->pagf_levels[XFS_BTNUM_CNTi] ==
16259e7d 2358 be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNTi]));
1da177e4
LT
2359 }
2360#endif
a862e0fd 2361 xfs_perag_put(pag);
1da177e4
LT
2362 return 0;
2363}
2364
2365/*
2366 * Allocate an extent (variable-size).
2367 * Depending on the allocation type, we either look in a single allocation
2368 * group or loop over the allocation groups to find the result.
2369 */
2370int /* error */
e04426b9 2371xfs_alloc_vextent(
1da177e4
LT
2372 xfs_alloc_arg_t *args) /* allocation argument structure */
2373{
2374 xfs_agblock_t agsize; /* allocation group size */
2375 int error;
2376 int flags; /* XFS_ALLOC_FLAG_... locking flags */
1da177e4
LT
2377 xfs_extlen_t minleft;/* minimum left value, temp copy */
2378 xfs_mount_t *mp; /* mount structure pointer */
2379 xfs_agnumber_t sagno; /* starting allocation group number */
2380 xfs_alloctype_t type; /* input allocation type */
2381 int bump_rotor = 0;
2382 int no_min = 0;
2383 xfs_agnumber_t rotorstep = xfs_rotorstep; /* inode32 agf stepper */
2384
2385 mp = args->mp;
2386 type = args->otype = args->type;
2387 args->agbno = NULLAGBLOCK;
2388 /*
2389 * Just fix this up, for the case where the last a.g. is shorter
2390 * (or there's only one a.g.) and the caller couldn't easily figure
2391 * that out (xfs_bmap_alloc).
2392 */
2393 agsize = mp->m_sb.sb_agblocks;
2394 if (args->maxlen > agsize)
2395 args->maxlen = agsize;
2396 if (args->alignment == 0)
2397 args->alignment = 1;
2398 ASSERT(XFS_FSB_TO_AGNO(mp, args->fsbno) < mp->m_sb.sb_agcount);
2399 ASSERT(XFS_FSB_TO_AGBNO(mp, args->fsbno) < agsize);
2400 ASSERT(args->minlen <= args->maxlen);
2401 ASSERT(args->minlen <= agsize);
2402 ASSERT(args->mod < args->prod);
2403 if (XFS_FSB_TO_AGNO(mp, args->fsbno) >= mp->m_sb.sb_agcount ||
2404 XFS_FSB_TO_AGBNO(mp, args->fsbno) >= agsize ||
2405 args->minlen > args->maxlen || args->minlen > agsize ||
2406 args->mod >= args->prod) {
2407 args->fsbno = NULLFSBLOCK;
0b1b213f 2408 trace_xfs_alloc_vextent_badargs(args);
1da177e4
LT
2409 return 0;
2410 }
2411 minleft = args->minleft;
2412
2413 switch (type) {
2414 case XFS_ALLOCTYPE_THIS_AG:
2415 case XFS_ALLOCTYPE_NEAR_BNO:
2416 case XFS_ALLOCTYPE_THIS_BNO:
2417 /*
2418 * These three force us into a single a.g.
2419 */
2420 args->agno = XFS_FSB_TO_AGNO(mp, args->fsbno);
a862e0fd 2421 args->pag = xfs_perag_get(mp, args->agno);
1da177e4
LT
2422 args->minleft = 0;
2423 error = xfs_alloc_fix_freelist(args, 0);
2424 args->minleft = minleft;
2425 if (error) {
0b1b213f 2426 trace_xfs_alloc_vextent_nofix(args);
1da177e4
LT
2427 goto error0;
2428 }
2429 if (!args->agbp) {
0b1b213f 2430 trace_xfs_alloc_vextent_noagbp(args);
1da177e4
LT
2431 break;
2432 }
2433 args->agbno = XFS_FSB_TO_AGBNO(mp, args->fsbno);
2434 if ((error = xfs_alloc_ag_vextent(args)))
2435 goto error0;
1da177e4
LT
2436 break;
2437 case XFS_ALLOCTYPE_START_BNO:
2438 /*
2439 * Try near allocation first, then anywhere-in-ag after
2440 * the first a.g. fails.
2441 */
2442 if ((args->userdata == XFS_ALLOC_INITIAL_USER_DATA) &&
2443 (mp->m_flags & XFS_MOUNT_32BITINODES)) {
2444 args->fsbno = XFS_AGB_TO_FSB(mp,
2445 ((mp->m_agfrotor / rotorstep) %
2446 mp->m_sb.sb_agcount), 0);
2447 bump_rotor = 1;
2448 }
2449 args->agbno = XFS_FSB_TO_AGBNO(mp, args->fsbno);
2450 args->type = XFS_ALLOCTYPE_NEAR_BNO;
2451 /* FALLTHROUGH */
2452 case XFS_ALLOCTYPE_ANY_AG:
2453 case XFS_ALLOCTYPE_START_AG:
2454 case XFS_ALLOCTYPE_FIRST_AG:
2455 /*
2456 * Rotate through the allocation groups looking for a winner.
2457 */
2458 if (type == XFS_ALLOCTYPE_ANY_AG) {
2459 /*
2460 * Start with the last place we left off.
2461 */
2462 args->agno = sagno = (mp->m_agfrotor / rotorstep) %
2463 mp->m_sb.sb_agcount;
2464 args->type = XFS_ALLOCTYPE_THIS_AG;
2465 flags = XFS_ALLOC_FLAG_TRYLOCK;
2466 } else if (type == XFS_ALLOCTYPE_FIRST_AG) {
2467 /*
2468 * Start with allocation group given by bno.
2469 */
2470 args->agno = XFS_FSB_TO_AGNO(mp, args->fsbno);
2471 args->type = XFS_ALLOCTYPE_THIS_AG;
2472 sagno = 0;
2473 flags = 0;
2474 } else {
2475 if (type == XFS_ALLOCTYPE_START_AG)
2476 args->type = XFS_ALLOCTYPE_THIS_AG;
2477 /*
2478 * Start with the given allocation group.
2479 */
2480 args->agno = sagno = XFS_FSB_TO_AGNO(mp, args->fsbno);
2481 flags = XFS_ALLOC_FLAG_TRYLOCK;
2482 }
2483 /*
2484 * Loop over allocation groups twice; first time with
2485 * trylock set, second time without.
2486 */
1da177e4 2487 for (;;) {
a862e0fd 2488 args->pag = xfs_perag_get(mp, args->agno);
1da177e4
LT
2489 if (no_min) args->minleft = 0;
2490 error = xfs_alloc_fix_freelist(args, flags);
2491 args->minleft = minleft;
2492 if (error) {
0b1b213f 2493 trace_xfs_alloc_vextent_nofix(args);
1da177e4
LT
2494 goto error0;
2495 }
2496 /*
2497 * If we get a buffer back then the allocation will fly.
2498 */
2499 if (args->agbp) {
2500 if ((error = xfs_alloc_ag_vextent(args)))
2501 goto error0;
2502 break;
2503 }
0b1b213f
CH
2504
2505 trace_xfs_alloc_vextent_loopfailed(args);
2506
1da177e4
LT
2507 /*
2508 * Didn't work, figure out the next iteration.
2509 */
2510 if (args->agno == sagno &&
2511 type == XFS_ALLOCTYPE_START_BNO)
2512 args->type = XFS_ALLOCTYPE_THIS_AG;
d210a28c
YL
2513 /*
2514 * For the first allocation, we can try any AG to get
2515 * space. However, if we already have allocated a
2516 * block, we don't want to try AGs whose number is below
2517 * sagno. Otherwise, we may end up with out-of-order
2518 * locking of AGF, which might cause deadlock.
2519 */
2520 if (++(args->agno) == mp->m_sb.sb_agcount) {
2521 if (args->firstblock != NULLFSBLOCK)
2522 args->agno = sagno;
2523 else
2524 args->agno = 0;
2525 }
1da177e4
LT
2526 /*
2527 * Reached the starting a.g., must either be done
2528 * or switch to non-trylock mode.
2529 */
2530 if (args->agno == sagno) {
2531 if (no_min == 1) {
2532 args->agbno = NULLAGBLOCK;
0b1b213f 2533 trace_xfs_alloc_vextent_allfailed(args);
1da177e4
LT
2534 break;
2535 }
2536 if (flags == 0) {
2537 no_min = 1;
2538 } else {
2539 flags = 0;
2540 if (type == XFS_ALLOCTYPE_START_BNO) {
2541 args->agbno = XFS_FSB_TO_AGBNO(mp,
2542 args->fsbno);
2543 args->type = XFS_ALLOCTYPE_NEAR_BNO;
2544 }
2545 }
2546 }
a862e0fd 2547 xfs_perag_put(args->pag);
1da177e4 2548 }
1da177e4
LT
2549 if (bump_rotor || (type == XFS_ALLOCTYPE_ANY_AG)) {
2550 if (args->agno == sagno)
2551 mp->m_agfrotor = (mp->m_agfrotor + 1) %
2552 (mp->m_sb.sb_agcount * rotorstep);
2553 else
2554 mp->m_agfrotor = (args->agno * rotorstep + 1) %
2555 (mp->m_sb.sb_agcount * rotorstep);
2556 }
2557 break;
2558 default:
2559 ASSERT(0);
2560 /* NOTREACHED */
2561 }
2562 if (args->agbno == NULLAGBLOCK)
2563 args->fsbno = NULLFSBLOCK;
2564 else {
2565 args->fsbno = XFS_AGB_TO_FSB(mp, args->agno, args->agbno);
2566#ifdef DEBUG
2567 ASSERT(args->len >= args->minlen);
2568 ASSERT(args->len <= args->maxlen);
2569 ASSERT(args->agbno % args->alignment == 0);
2570 XFS_AG_CHECK_DADDR(mp, XFS_FSB_TO_DADDR(mp, args->fsbno),
2571 args->len);
2572#endif
2573 }
a862e0fd 2574 xfs_perag_put(args->pag);
1da177e4
LT
2575 return 0;
2576error0:
a862e0fd 2577 xfs_perag_put(args->pag);
1da177e4
LT
2578 return error;
2579}
2580
2581/*
2582 * Free an extent.
2583 * Just break up the extent address and hand off to xfs_free_ag_extent
2584 * after fixing up the freelist.
2585 */
2586int /* error */
2587xfs_free_extent(
2588 xfs_trans_t *tp, /* transaction pointer */
2589 xfs_fsblock_t bno, /* starting block number of extent */
2590 xfs_extlen_t len) /* length of extent */
2591{
0e1edbd9 2592 xfs_alloc_arg_t args;
1da177e4
LT
2593 int error;
2594
2595 ASSERT(len != 0);
0e1edbd9 2596 memset(&args, 0, sizeof(xfs_alloc_arg_t));
1da177e4
LT
2597 args.tp = tp;
2598 args.mp = tp->t_mountp;
be65b18a
DC
2599
2600 /*
2601 * validate that the block number is legal - the enables us to detect
2602 * and handle a silent filesystem corruption rather than crashing.
2603 */
1da177e4 2604 args.agno = XFS_FSB_TO_AGNO(args.mp, bno);
be65b18a
DC
2605 if (args.agno >= args.mp->m_sb.sb_agcount)
2606 return EFSCORRUPTED;
2607
1da177e4 2608 args.agbno = XFS_FSB_TO_AGBNO(args.mp, bno);
be65b18a
DC
2609 if (args.agbno >= args.mp->m_sb.sb_agblocks)
2610 return EFSCORRUPTED;
2611
a862e0fd 2612 args.pag = xfs_perag_get(args.mp, args.agno);
be65b18a
DC
2613 ASSERT(args.pag);
2614
2615 error = xfs_alloc_fix_freelist(&args, XFS_ALLOC_FLAG_FREEING);
2616 if (error)
1da177e4 2617 goto error0;
be65b18a
DC
2618
2619 /* validate the extent size is legal now we have the agf locked */
2620 if (args.agbno + len >
2621 be32_to_cpu(XFS_BUF_TO_AGF(args.agbp)->agf_length)) {
2622 error = EFSCORRUPTED;
2623 goto error0;
2624 }
2625
0e1edbd9 2626 error = xfs_free_ag_extent(tp, args.agbp, args.agno, args.agbno, len, 0);
a870acd9 2627 if (!error)
4ecbfe63 2628 xfs_extent_busy_insert(tp, args.agno, args.agbno, len, 0);
1da177e4 2629error0:
a862e0fd 2630 xfs_perag_put(args.pag);
1da177e4
LT
2631 return error;
2632}