]>
Commit | Line | Data |
---|---|---|
efc27b52 DC |
1 | /* |
2 | * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc. | |
3 | * Copyright (c) 2010 David Chinner. | |
4 | * Copyright (c) 2011 Christoph Hellwig. | |
5 | * All Rights Reserved. | |
6 | * | |
7 | * This program is free software; you can redistribute it and/or | |
8 | * modify it under the terms of the GNU General Public License as | |
9 | * published by the Free Software Foundation. | |
10 | * | |
11 | * This program is distributed in the hope that it would be useful, | |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
14 | * GNU General Public License for more details. | |
15 | * | |
16 | * You should have received a copy of the GNU General Public License | |
17 | * along with this program; if not, write the Free Software Foundation, | |
18 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA | |
19 | */ | |
20 | #include "xfs.h" | |
21 | #include "xfs_fs.h" | |
70a9883c | 22 | #include "xfs_format.h" |
239880ef | 23 | #include "xfs_log_format.h" |
70a9883c | 24 | #include "xfs_shared.h" |
239880ef | 25 | #include "xfs_trans_resv.h" |
efc27b52 | 26 | #include "xfs_sb.h" |
efc27b52 | 27 | #include "xfs_mount.h" |
efc27b52 | 28 | #include "xfs_alloc.h" |
efc27b52 DC |
29 | #include "xfs_extent_busy.h" |
30 | #include "xfs_trace.h" | |
239880ef DC |
31 | #include "xfs_trans.h" |
32 | #include "xfs_log.h" | |
efc27b52 DC |
33 | |
34 | void | |
4ecbfe63 | 35 | xfs_extent_busy_insert( |
efc27b52 DC |
36 | struct xfs_trans *tp, |
37 | xfs_agnumber_t agno, | |
38 | xfs_agblock_t bno, | |
39 | xfs_extlen_t len, | |
40 | unsigned int flags) | |
41 | { | |
4ecbfe63 DC |
42 | struct xfs_extent_busy *new; |
43 | struct xfs_extent_busy *busyp; | |
efc27b52 DC |
44 | struct xfs_perag *pag; |
45 | struct rb_node **rbp; | |
46 | struct rb_node *parent = NULL; | |
47 | ||
5d9435bd | 48 | new = kmem_zalloc(sizeof(struct xfs_extent_busy), KM_SLEEP); |
efc27b52 DC |
49 | new->agno = agno; |
50 | new->bno = bno; | |
51 | new->length = len; | |
52 | INIT_LIST_HEAD(&new->list); | |
53 | new->flags = flags; | |
54 | ||
55 | /* trace before insert to be able to see failed inserts */ | |
4ecbfe63 | 56 | trace_xfs_extent_busy(tp->t_mountp, agno, bno, len); |
efc27b52 DC |
57 | |
58 | pag = xfs_perag_get(tp->t_mountp, new->agno); | |
59 | spin_lock(&pag->pagb_lock); | |
60 | rbp = &pag->pagb_tree.rb_node; | |
61 | while (*rbp) { | |
62 | parent = *rbp; | |
4ecbfe63 | 63 | busyp = rb_entry(parent, struct xfs_extent_busy, rb_node); |
efc27b52 DC |
64 | |
65 | if (new->bno < busyp->bno) { | |
66 | rbp = &(*rbp)->rb_left; | |
67 | ASSERT(new->bno + new->length <= busyp->bno); | |
68 | } else if (new->bno > busyp->bno) { | |
69 | rbp = &(*rbp)->rb_right; | |
70 | ASSERT(bno >= busyp->bno + busyp->length); | |
71 | } else { | |
72 | ASSERT(0); | |
73 | } | |
74 | } | |
75 | ||
76 | rb_link_node(&new->rb_node, parent, rbp); | |
77 | rb_insert_color(&new->rb_node, &pag->pagb_tree); | |
78 | ||
79 | list_add(&new->list, &tp->t_busy); | |
80 | spin_unlock(&pag->pagb_lock); | |
81 | xfs_perag_put(pag); | |
82 | } | |
83 | ||
84 | /* | |
85 | * Search for a busy extent within the range of the extent we are about to | |
86 | * allocate. You need to be holding the busy extent tree lock when calling | |
4ecbfe63 | 87 | * xfs_extent_busy_search(). This function returns 0 for no overlapping busy |
efc27b52 DC |
88 | * extent, -1 for an overlapping but not exact busy extent, and 1 for an exact |
89 | * match. This is done so that a non-zero return indicates an overlap that | |
90 | * will require a synchronous transaction, but it can still be | |
91 | * used to distinguish between a partial or exact match. | |
92 | */ | |
93 | int | |
4ecbfe63 | 94 | xfs_extent_busy_search( |
efc27b52 DC |
95 | struct xfs_mount *mp, |
96 | xfs_agnumber_t agno, | |
97 | xfs_agblock_t bno, | |
98 | xfs_extlen_t len) | |
99 | { | |
100 | struct xfs_perag *pag; | |
101 | struct rb_node *rbp; | |
4ecbfe63 | 102 | struct xfs_extent_busy *busyp; |
efc27b52 DC |
103 | int match = 0; |
104 | ||
105 | pag = xfs_perag_get(mp, agno); | |
106 | spin_lock(&pag->pagb_lock); | |
107 | ||
108 | rbp = pag->pagb_tree.rb_node; | |
109 | ||
110 | /* find closest start bno overlap */ | |
111 | while (rbp) { | |
4ecbfe63 | 112 | busyp = rb_entry(rbp, struct xfs_extent_busy, rb_node); |
efc27b52 DC |
113 | if (bno < busyp->bno) { |
114 | /* may overlap, but exact start block is lower */ | |
115 | if (bno + len > busyp->bno) | |
116 | match = -1; | |
117 | rbp = rbp->rb_left; | |
118 | } else if (bno > busyp->bno) { | |
119 | /* may overlap, but exact start block is higher */ | |
120 | if (bno < busyp->bno + busyp->length) | |
121 | match = -1; | |
122 | rbp = rbp->rb_right; | |
123 | } else { | |
124 | /* bno matches busyp, length determines exact match */ | |
125 | match = (busyp->length == len) ? 1 : -1; | |
126 | break; | |
127 | } | |
128 | } | |
129 | spin_unlock(&pag->pagb_lock); | |
130 | xfs_perag_put(pag); | |
131 | return match; | |
132 | } | |
133 | ||
134 | /* | |
135 | * The found free extent [fbno, fend] overlaps part or all of the given busy | |
136 | * extent. If the overlap covers the beginning, the end, or all of the busy | |
137 | * extent, the overlapping portion can be made unbusy and used for the | |
138 | * allocation. We can't split a busy extent because we can't modify a | |
b3c49634 | 139 | * transaction/CIL context busy list, but we can update an entry's block |
efc27b52 DC |
140 | * number or length. |
141 | * | |
142 | * Returns true if the extent can safely be reused, or false if the search | |
143 | * needs to be restarted. | |
144 | */ | |
145 | STATIC bool | |
4ecbfe63 | 146 | xfs_extent_busy_update_extent( |
efc27b52 DC |
147 | struct xfs_mount *mp, |
148 | struct xfs_perag *pag, | |
4ecbfe63 | 149 | struct xfs_extent_busy *busyp, |
efc27b52 DC |
150 | xfs_agblock_t fbno, |
151 | xfs_extlen_t flen, | |
a30b0367 DC |
152 | bool userdata) __releases(&pag->pagb_lock) |
153 | __acquires(&pag->pagb_lock) | |
efc27b52 DC |
154 | { |
155 | xfs_agblock_t fend = fbno + flen; | |
156 | xfs_agblock_t bbno = busyp->bno; | |
157 | xfs_agblock_t bend = bbno + busyp->length; | |
158 | ||
159 | /* | |
160 | * This extent is currently being discarded. Give the thread | |
161 | * performing the discard a chance to mark the extent unbusy | |
162 | * and retry. | |
163 | */ | |
4ecbfe63 | 164 | if (busyp->flags & XFS_EXTENT_BUSY_DISCARDED) { |
efc27b52 DC |
165 | spin_unlock(&pag->pagb_lock); |
166 | delay(1); | |
167 | spin_lock(&pag->pagb_lock); | |
168 | return false; | |
169 | } | |
170 | ||
171 | /* | |
172 | * If there is a busy extent overlapping a user allocation, we have | |
173 | * no choice but to force the log and retry the search. | |
174 | * | |
175 | * Fortunately this does not happen during normal operation, but | |
176 | * only if the filesystem is very low on space and has to dip into | |
177 | * the AGFL for normal allocations. | |
178 | */ | |
179 | if (userdata) | |
180 | goto out_force_log; | |
181 | ||
182 | if (bbno < fbno && bend > fend) { | |
183 | /* | |
184 | * Case 1: | |
185 | * bbno bend | |
186 | * +BBBBBBBBBBBBBBBBB+ | |
187 | * +---------+ | |
188 | * fbno fend | |
189 | */ | |
190 | ||
191 | /* | |
192 | * We would have to split the busy extent to be able to track | |
193 | * it correct, which we cannot do because we would have to | |
194 | * modify the list of busy extents attached to the transaction | |
195 | * or CIL context, which is immutable. | |
196 | * | |
197 | * Force out the log to clear the busy extent and retry the | |
198 | * search. | |
199 | */ | |
200 | goto out_force_log; | |
201 | } else if (bbno >= fbno && bend <= fend) { | |
202 | /* | |
203 | * Case 2: | |
204 | * bbno bend | |
205 | * +BBBBBBBBBBBBBBBBB+ | |
206 | * +-----------------+ | |
207 | * fbno fend | |
208 | * | |
209 | * Case 3: | |
210 | * bbno bend | |
211 | * +BBBBBBBBBBBBBBBBB+ | |
212 | * +--------------------------+ | |
213 | * fbno fend | |
214 | * | |
215 | * Case 4: | |
216 | * bbno bend | |
217 | * +BBBBBBBBBBBBBBBBB+ | |
218 | * +--------------------------+ | |
219 | * fbno fend | |
220 | * | |
221 | * Case 5: | |
222 | * bbno bend | |
223 | * +BBBBBBBBBBBBBBBBB+ | |
224 | * +-----------------------------------+ | |
225 | * fbno fend | |
226 | * | |
227 | */ | |
228 | ||
229 | /* | |
230 | * The busy extent is fully covered by the extent we are | |
231 | * allocating, and can simply be removed from the rbtree. | |
232 | * However we cannot remove it from the immutable list | |
233 | * tracking busy extents in the transaction or CIL context, | |
234 | * so set the length to zero to mark it invalid. | |
235 | * | |
236 | * We also need to restart the busy extent search from the | |
237 | * tree root, because erasing the node can rearrange the | |
238 | * tree topology. | |
239 | */ | |
240 | rb_erase(&busyp->rb_node, &pag->pagb_tree); | |
241 | busyp->length = 0; | |
242 | return false; | |
243 | } else if (fend < bend) { | |
244 | /* | |
245 | * Case 6: | |
246 | * bbno bend | |
247 | * +BBBBBBBBBBBBBBBBB+ | |
248 | * +---------+ | |
249 | * fbno fend | |
250 | * | |
251 | * Case 7: | |
252 | * bbno bend | |
253 | * +BBBBBBBBBBBBBBBBB+ | |
254 | * +------------------+ | |
255 | * fbno fend | |
256 | * | |
257 | */ | |
258 | busyp->bno = fend; | |
259 | } else if (bbno < fbno) { | |
260 | /* | |
261 | * Case 8: | |
262 | * bbno bend | |
263 | * +BBBBBBBBBBBBBBBBB+ | |
264 | * +-------------+ | |
265 | * fbno fend | |
266 | * | |
267 | * Case 9: | |
268 | * bbno bend | |
269 | * +BBBBBBBBBBBBBBBBB+ | |
270 | * +----------------------+ | |
271 | * fbno fend | |
272 | */ | |
273 | busyp->length = fbno - busyp->bno; | |
274 | } else { | |
275 | ASSERT(0); | |
276 | } | |
277 | ||
4ecbfe63 | 278 | trace_xfs_extent_busy_reuse(mp, pag->pag_agno, fbno, flen); |
efc27b52 DC |
279 | return true; |
280 | ||
281 | out_force_log: | |
282 | spin_unlock(&pag->pagb_lock); | |
283 | xfs_log_force(mp, XFS_LOG_SYNC); | |
4ecbfe63 | 284 | trace_xfs_extent_busy_force(mp, pag->pag_agno, fbno, flen); |
efc27b52 DC |
285 | spin_lock(&pag->pagb_lock); |
286 | return false; | |
287 | } | |
288 | ||
289 | ||
290 | /* | |
291 | * For a given extent [fbno, flen], make sure we can reuse it safely. | |
292 | */ | |
293 | void | |
4ecbfe63 | 294 | xfs_extent_busy_reuse( |
efc27b52 DC |
295 | struct xfs_mount *mp, |
296 | xfs_agnumber_t agno, | |
297 | xfs_agblock_t fbno, | |
298 | xfs_extlen_t flen, | |
299 | bool userdata) | |
300 | { | |
301 | struct xfs_perag *pag; | |
302 | struct rb_node *rbp; | |
303 | ||
304 | ASSERT(flen > 0); | |
305 | ||
306 | pag = xfs_perag_get(mp, agno); | |
307 | spin_lock(&pag->pagb_lock); | |
308 | restart: | |
309 | rbp = pag->pagb_tree.rb_node; | |
310 | while (rbp) { | |
4ecbfe63 DC |
311 | struct xfs_extent_busy *busyp = |
312 | rb_entry(rbp, struct xfs_extent_busy, rb_node); | |
efc27b52 DC |
313 | xfs_agblock_t bbno = busyp->bno; |
314 | xfs_agblock_t bend = bbno + busyp->length; | |
315 | ||
316 | if (fbno + flen <= bbno) { | |
317 | rbp = rbp->rb_left; | |
318 | continue; | |
319 | } else if (fbno >= bend) { | |
320 | rbp = rbp->rb_right; | |
321 | continue; | |
322 | } | |
323 | ||
4ecbfe63 | 324 | if (!xfs_extent_busy_update_extent(mp, pag, busyp, fbno, flen, |
efc27b52 DC |
325 | userdata)) |
326 | goto restart; | |
327 | } | |
328 | spin_unlock(&pag->pagb_lock); | |
329 | xfs_perag_put(pag); | |
330 | } | |
331 | ||
332 | /* | |
333 | * For a given extent [fbno, flen], search the busy extent list to find a | |
334 | * subset of the extent that is not busy. If *rlen is smaller than | |
335 | * args->minlen no suitable extent could be found, and the higher level | |
336 | * code needs to force out the log and retry the allocation. | |
337 | */ | |
e700a06c | 338 | void |
4ecbfe63 | 339 | xfs_extent_busy_trim( |
efc27b52 DC |
340 | struct xfs_alloc_arg *args, |
341 | xfs_agblock_t bno, | |
342 | xfs_extlen_t len, | |
343 | xfs_agblock_t *rbno, | |
344 | xfs_extlen_t *rlen) | |
345 | { | |
346 | xfs_agblock_t fbno; | |
347 | xfs_extlen_t flen; | |
348 | struct rb_node *rbp; | |
349 | ||
350 | ASSERT(len > 0); | |
351 | ||
352 | spin_lock(&args->pag->pagb_lock); | |
353 | restart: | |
354 | fbno = bno; | |
355 | flen = len; | |
356 | rbp = args->pag->pagb_tree.rb_node; | |
357 | while (rbp && flen >= args->minlen) { | |
4ecbfe63 DC |
358 | struct xfs_extent_busy *busyp = |
359 | rb_entry(rbp, struct xfs_extent_busy, rb_node); | |
efc27b52 DC |
360 | xfs_agblock_t fend = fbno + flen; |
361 | xfs_agblock_t bbno = busyp->bno; | |
362 | xfs_agblock_t bend = bbno + busyp->length; | |
363 | ||
364 | if (fend <= bbno) { | |
365 | rbp = rbp->rb_left; | |
366 | continue; | |
367 | } else if (fbno >= bend) { | |
368 | rbp = rbp->rb_right; | |
369 | continue; | |
370 | } | |
371 | ||
372 | /* | |
373 | * If this is a metadata allocation, try to reuse the busy | |
374 | * extent instead of trimming the allocation. | |
375 | */ | |
292378ed | 376 | if (!xfs_alloc_is_userdata(args->datatype) && |
4ecbfe63 DC |
377 | !(busyp->flags & XFS_EXTENT_BUSY_DISCARDED)) { |
378 | if (!xfs_extent_busy_update_extent(args->mp, args->pag, | |
efc27b52 DC |
379 | busyp, fbno, flen, |
380 | false)) | |
381 | goto restart; | |
382 | continue; | |
383 | } | |
384 | ||
385 | if (bbno <= fbno) { | |
386 | /* start overlap */ | |
387 | ||
388 | /* | |
389 | * Case 1: | |
390 | * bbno bend | |
391 | * +BBBBBBBBBBBBBBBBB+ | |
392 | * +---------+ | |
393 | * fbno fend | |
394 | * | |
395 | * Case 2: | |
396 | * bbno bend | |
397 | * +BBBBBBBBBBBBBBBBB+ | |
398 | * +-------------+ | |
399 | * fbno fend | |
400 | * | |
401 | * Case 3: | |
402 | * bbno bend | |
403 | * +BBBBBBBBBBBBBBBBB+ | |
404 | * +-------------+ | |
405 | * fbno fend | |
406 | * | |
407 | * Case 4: | |
408 | * bbno bend | |
409 | * +BBBBBBBBBBBBBBBBB+ | |
410 | * +-----------------+ | |
411 | * fbno fend | |
412 | * | |
413 | * No unbusy region in extent, return failure. | |
414 | */ | |
415 | if (fend <= bend) | |
416 | goto fail; | |
417 | ||
418 | /* | |
419 | * Case 5: | |
420 | * bbno bend | |
421 | * +BBBBBBBBBBBBBBBBB+ | |
422 | * +----------------------+ | |
423 | * fbno fend | |
424 | * | |
425 | * Case 6: | |
426 | * bbno bend | |
427 | * +BBBBBBBBBBBBBBBBB+ | |
428 | * +--------------------------+ | |
429 | * fbno fend | |
430 | * | |
431 | * Needs to be trimmed to: | |
432 | * +-------+ | |
433 | * fbno fend | |
434 | */ | |
435 | fbno = bend; | |
436 | } else if (bend >= fend) { | |
437 | /* end overlap */ | |
438 | ||
439 | /* | |
440 | * Case 7: | |
441 | * bbno bend | |
442 | * +BBBBBBBBBBBBBBBBB+ | |
443 | * +------------------+ | |
444 | * fbno fend | |
445 | * | |
446 | * Case 8: | |
447 | * bbno bend | |
448 | * +BBBBBBBBBBBBBBBBB+ | |
449 | * +--------------------------+ | |
450 | * fbno fend | |
451 | * | |
452 | * Needs to be trimmed to: | |
453 | * +-------+ | |
454 | * fbno fend | |
455 | */ | |
456 | fend = bbno; | |
457 | } else { | |
458 | /* middle overlap */ | |
459 | ||
460 | /* | |
461 | * Case 9: | |
462 | * bbno bend | |
463 | * +BBBBBBBBBBBBBBBBB+ | |
464 | * +-----------------------------------+ | |
465 | * fbno fend | |
466 | * | |
467 | * Can be trimmed to: | |
468 | * +-------+ OR +-------+ | |
469 | * fbno fend fbno fend | |
470 | * | |
471 | * Backward allocation leads to significant | |
472 | * fragmentation of directories, which degrades | |
473 | * directory performance, therefore we always want to | |
474 | * choose the option that produces forward allocation | |
475 | * patterns. | |
476 | * Preferring the lower bno extent will make the next | |
477 | * request use "fend" as the start of the next | |
478 | * allocation; if the segment is no longer busy at | |
479 | * that point, we'll get a contiguous allocation, but | |
480 | * even if it is still busy, we will get a forward | |
481 | * allocation. | |
482 | * We try to avoid choosing the segment at "bend", | |
483 | * because that can lead to the next allocation | |
484 | * taking the segment at "fbno", which would be a | |
485 | * backward allocation. We only use the segment at | |
486 | * "fbno" if it is much larger than the current | |
487 | * requested size, because in that case there's a | |
488 | * good chance subsequent allocations will be | |
489 | * contiguous. | |
490 | */ | |
491 | if (bbno - fbno >= args->maxlen) { | |
492 | /* left candidate fits perfect */ | |
493 | fend = bbno; | |
494 | } else if (fend - bend >= args->maxlen * 4) { | |
495 | /* right candidate has enough free space */ | |
496 | fbno = bend; | |
497 | } else if (bbno - fbno >= args->minlen) { | |
498 | /* left candidate fits minimum requirement */ | |
499 | fend = bbno; | |
500 | } else { | |
501 | goto fail; | |
502 | } | |
503 | } | |
504 | ||
505 | flen = fend - fbno; | |
506 | } | |
507 | spin_unlock(&args->pag->pagb_lock); | |
508 | ||
509 | if (fbno != bno || flen != len) { | |
4ecbfe63 | 510 | trace_xfs_extent_busy_trim(args->mp, args->agno, bno, len, |
efc27b52 DC |
511 | fbno, flen); |
512 | } | |
513 | *rbno = fbno; | |
514 | *rlen = flen; | |
515 | return; | |
516 | fail: | |
517 | /* | |
518 | * Return a zero extent length as failure indications. All callers | |
519 | * re-check if the trimmed extent satisfies the minlen requirement. | |
520 | */ | |
521 | spin_unlock(&args->pag->pagb_lock); | |
4ecbfe63 | 522 | trace_xfs_extent_busy_trim(args->mp, args->agno, bno, len, fbno, 0); |
efc27b52 DC |
523 | *rbno = fbno; |
524 | *rlen = 0; | |
525 | } | |
526 | ||
4ecbfe63 DC |
527 | STATIC void |
528 | xfs_extent_busy_clear_one( | |
efc27b52 DC |
529 | struct xfs_mount *mp, |
530 | struct xfs_perag *pag, | |
4ecbfe63 | 531 | struct xfs_extent_busy *busyp) |
efc27b52 DC |
532 | { |
533 | if (busyp->length) { | |
4ecbfe63 | 534 | trace_xfs_extent_busy_clear(mp, busyp->agno, busyp->bno, |
efc27b52 DC |
535 | busyp->length); |
536 | rb_erase(&busyp->rb_node, &pag->pagb_tree); | |
537 | } | |
538 | ||
539 | list_del_init(&busyp->list); | |
540 | kmem_free(busyp); | |
541 | } | |
542 | ||
543 | /* | |
544 | * Remove all extents on the passed in list from the busy extents tree. | |
545 | * If do_discard is set skip extents that need to be discarded, and mark | |
546 | * these as undergoing a discard operation instead. | |
547 | */ | |
548 | void | |
4ecbfe63 | 549 | xfs_extent_busy_clear( |
efc27b52 DC |
550 | struct xfs_mount *mp, |
551 | struct list_head *list, | |
552 | bool do_discard) | |
553 | { | |
4ecbfe63 | 554 | struct xfs_extent_busy *busyp, *n; |
efc27b52 DC |
555 | struct xfs_perag *pag = NULL; |
556 | xfs_agnumber_t agno = NULLAGNUMBER; | |
557 | ||
558 | list_for_each_entry_safe(busyp, n, list, list) { | |
559 | if (busyp->agno != agno) { | |
560 | if (pag) { | |
561 | spin_unlock(&pag->pagb_lock); | |
562 | xfs_perag_put(pag); | |
563 | } | |
564 | pag = xfs_perag_get(mp, busyp->agno); | |
565 | spin_lock(&pag->pagb_lock); | |
566 | agno = busyp->agno; | |
567 | } | |
568 | ||
569 | if (do_discard && busyp->length && | |
4ecbfe63 DC |
570 | !(busyp->flags & XFS_EXTENT_BUSY_SKIP_DISCARD)) |
571 | busyp->flags = XFS_EXTENT_BUSY_DISCARDED; | |
efc27b52 | 572 | else |
4ecbfe63 | 573 | xfs_extent_busy_clear_one(mp, pag, busyp); |
efc27b52 DC |
574 | } |
575 | ||
576 | if (pag) { | |
577 | spin_unlock(&pag->pagb_lock); | |
578 | xfs_perag_put(pag); | |
579 | } | |
580 | } | |
581 | ||
582 | /* | |
583 | * Callback for list_sort to sort busy extents by the AG they reside in. | |
584 | */ | |
585 | int | |
4ecbfe63 | 586 | xfs_extent_busy_ag_cmp( |
efc27b52 DC |
587 | void *priv, |
588 | struct list_head *a, | |
589 | struct list_head *b) | |
590 | { | |
4ecbfe63 DC |
591 | return container_of(a, struct xfs_extent_busy, list)->agno - |
592 | container_of(b, struct xfs_extent_busy, list)->agno; | |
efc27b52 | 593 | } |