]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - fs/xfs/xfs_extent_busy.c
xfs: decouple log and transaction headers
[mirror_ubuntu-bionic-kernel.git] / fs / xfs / xfs_extent_busy.c
1 /*
2 * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc.
3 * Copyright (c) 2010 David Chinner.
4 * Copyright (c) 2011 Christoph Hellwig.
5 * All Rights Reserved.
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it would be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
19 */
20 #include "xfs.h"
21 #include "xfs_fs.h"
22 #include "xfs_format.h"
23 #include "xfs_log_format.h"
24 #include "xfs_shared.h"
25 #include "xfs_trans_resv.h"
26 #include "xfs_sb.h"
27 #include "xfs_ag.h"
28 #include "xfs_mount.h"
29 #include "xfs_bmap_btree.h"
30 #include "xfs_alloc.h"
31 #include "xfs_inode.h"
32 #include "xfs_extent_busy.h"
33 #include "xfs_trace.h"
34 #include "xfs_trans.h"
35 #include "xfs_log.h"
36
37 void
38 xfs_extent_busy_insert(
39 struct xfs_trans *tp,
40 xfs_agnumber_t agno,
41 xfs_agblock_t bno,
42 xfs_extlen_t len,
43 unsigned int flags)
44 {
45 struct xfs_extent_busy *new;
46 struct xfs_extent_busy *busyp;
47 struct xfs_perag *pag;
48 struct rb_node **rbp;
49 struct rb_node *parent = NULL;
50
51 new = kmem_zalloc(sizeof(struct xfs_extent_busy), KM_MAYFAIL);
52 if (!new) {
53 /*
54 * No Memory! Since it is now not possible to track the free
55 * block, make this a synchronous transaction to insure that
56 * the block is not reused before this transaction commits.
57 */
58 trace_xfs_extent_busy_enomem(tp->t_mountp, agno, bno, len);
59 xfs_trans_set_sync(tp);
60 return;
61 }
62
63 new->agno = agno;
64 new->bno = bno;
65 new->length = len;
66 INIT_LIST_HEAD(&new->list);
67 new->flags = flags;
68
69 /* trace before insert to be able to see failed inserts */
70 trace_xfs_extent_busy(tp->t_mountp, agno, bno, len);
71
72 pag = xfs_perag_get(tp->t_mountp, new->agno);
73 spin_lock(&pag->pagb_lock);
74 rbp = &pag->pagb_tree.rb_node;
75 while (*rbp) {
76 parent = *rbp;
77 busyp = rb_entry(parent, struct xfs_extent_busy, rb_node);
78
79 if (new->bno < busyp->bno) {
80 rbp = &(*rbp)->rb_left;
81 ASSERT(new->bno + new->length <= busyp->bno);
82 } else if (new->bno > busyp->bno) {
83 rbp = &(*rbp)->rb_right;
84 ASSERT(bno >= busyp->bno + busyp->length);
85 } else {
86 ASSERT(0);
87 }
88 }
89
90 rb_link_node(&new->rb_node, parent, rbp);
91 rb_insert_color(&new->rb_node, &pag->pagb_tree);
92
93 list_add(&new->list, &tp->t_busy);
94 spin_unlock(&pag->pagb_lock);
95 xfs_perag_put(pag);
96 }
97
98 /*
99 * Search for a busy extent within the range of the extent we are about to
100 * allocate. You need to be holding the busy extent tree lock when calling
101 * xfs_extent_busy_search(). This function returns 0 for no overlapping busy
102 * extent, -1 for an overlapping but not exact busy extent, and 1 for an exact
103 * match. This is done so that a non-zero return indicates an overlap that
104 * will require a synchronous transaction, but it can still be
105 * used to distinguish between a partial or exact match.
106 */
107 int
108 xfs_extent_busy_search(
109 struct xfs_mount *mp,
110 xfs_agnumber_t agno,
111 xfs_agblock_t bno,
112 xfs_extlen_t len)
113 {
114 struct xfs_perag *pag;
115 struct rb_node *rbp;
116 struct xfs_extent_busy *busyp;
117 int match = 0;
118
119 pag = xfs_perag_get(mp, agno);
120 spin_lock(&pag->pagb_lock);
121
122 rbp = pag->pagb_tree.rb_node;
123
124 /* find closest start bno overlap */
125 while (rbp) {
126 busyp = rb_entry(rbp, struct xfs_extent_busy, rb_node);
127 if (bno < busyp->bno) {
128 /* may overlap, but exact start block is lower */
129 if (bno + len > busyp->bno)
130 match = -1;
131 rbp = rbp->rb_left;
132 } else if (bno > busyp->bno) {
133 /* may overlap, but exact start block is higher */
134 if (bno < busyp->bno + busyp->length)
135 match = -1;
136 rbp = rbp->rb_right;
137 } else {
138 /* bno matches busyp, length determines exact match */
139 match = (busyp->length == len) ? 1 : -1;
140 break;
141 }
142 }
143 spin_unlock(&pag->pagb_lock);
144 xfs_perag_put(pag);
145 return match;
146 }
147
148 /*
149 * The found free extent [fbno, fend] overlaps part or all of the given busy
150 * extent. If the overlap covers the beginning, the end, or all of the busy
151 * extent, the overlapping portion can be made unbusy and used for the
152 * allocation. We can't split a busy extent because we can't modify a
153 * transaction/CIL context busy list, but we can update an entry's block
154 * number or length.
155 *
156 * Returns true if the extent can safely be reused, or false if the search
157 * needs to be restarted.
158 */
159 STATIC bool
160 xfs_extent_busy_update_extent(
161 struct xfs_mount *mp,
162 struct xfs_perag *pag,
163 struct xfs_extent_busy *busyp,
164 xfs_agblock_t fbno,
165 xfs_extlen_t flen,
166 bool userdata) __releases(&pag->pagb_lock)
167 __acquires(&pag->pagb_lock)
168 {
169 xfs_agblock_t fend = fbno + flen;
170 xfs_agblock_t bbno = busyp->bno;
171 xfs_agblock_t bend = bbno + busyp->length;
172
173 /*
174 * This extent is currently being discarded. Give the thread
175 * performing the discard a chance to mark the extent unbusy
176 * and retry.
177 */
178 if (busyp->flags & XFS_EXTENT_BUSY_DISCARDED) {
179 spin_unlock(&pag->pagb_lock);
180 delay(1);
181 spin_lock(&pag->pagb_lock);
182 return false;
183 }
184
185 /*
186 * If there is a busy extent overlapping a user allocation, we have
187 * no choice but to force the log and retry the search.
188 *
189 * Fortunately this does not happen during normal operation, but
190 * only if the filesystem is very low on space and has to dip into
191 * the AGFL for normal allocations.
192 */
193 if (userdata)
194 goto out_force_log;
195
196 if (bbno < fbno && bend > fend) {
197 /*
198 * Case 1:
199 * bbno bend
200 * +BBBBBBBBBBBBBBBBB+
201 * +---------+
202 * fbno fend
203 */
204
205 /*
206 * We would have to split the busy extent to be able to track
207 * it correct, which we cannot do because we would have to
208 * modify the list of busy extents attached to the transaction
209 * or CIL context, which is immutable.
210 *
211 * Force out the log to clear the busy extent and retry the
212 * search.
213 */
214 goto out_force_log;
215 } else if (bbno >= fbno && bend <= fend) {
216 /*
217 * Case 2:
218 * bbno bend
219 * +BBBBBBBBBBBBBBBBB+
220 * +-----------------+
221 * fbno fend
222 *
223 * Case 3:
224 * bbno bend
225 * +BBBBBBBBBBBBBBBBB+
226 * +--------------------------+
227 * fbno fend
228 *
229 * Case 4:
230 * bbno bend
231 * +BBBBBBBBBBBBBBBBB+
232 * +--------------------------+
233 * fbno fend
234 *
235 * Case 5:
236 * bbno bend
237 * +BBBBBBBBBBBBBBBBB+
238 * +-----------------------------------+
239 * fbno fend
240 *
241 */
242
243 /*
244 * The busy extent is fully covered by the extent we are
245 * allocating, and can simply be removed from the rbtree.
246 * However we cannot remove it from the immutable list
247 * tracking busy extents in the transaction or CIL context,
248 * so set the length to zero to mark it invalid.
249 *
250 * We also need to restart the busy extent search from the
251 * tree root, because erasing the node can rearrange the
252 * tree topology.
253 */
254 rb_erase(&busyp->rb_node, &pag->pagb_tree);
255 busyp->length = 0;
256 return false;
257 } else if (fend < bend) {
258 /*
259 * Case 6:
260 * bbno bend
261 * +BBBBBBBBBBBBBBBBB+
262 * +---------+
263 * fbno fend
264 *
265 * Case 7:
266 * bbno bend
267 * +BBBBBBBBBBBBBBBBB+
268 * +------------------+
269 * fbno fend
270 *
271 */
272 busyp->bno = fend;
273 } else if (bbno < fbno) {
274 /*
275 * Case 8:
276 * bbno bend
277 * +BBBBBBBBBBBBBBBBB+
278 * +-------------+
279 * fbno fend
280 *
281 * Case 9:
282 * bbno bend
283 * +BBBBBBBBBBBBBBBBB+
284 * +----------------------+
285 * fbno fend
286 */
287 busyp->length = fbno - busyp->bno;
288 } else {
289 ASSERT(0);
290 }
291
292 trace_xfs_extent_busy_reuse(mp, pag->pag_agno, fbno, flen);
293 return true;
294
295 out_force_log:
296 spin_unlock(&pag->pagb_lock);
297 xfs_log_force(mp, XFS_LOG_SYNC);
298 trace_xfs_extent_busy_force(mp, pag->pag_agno, fbno, flen);
299 spin_lock(&pag->pagb_lock);
300 return false;
301 }
302
303
304 /*
305 * For a given extent [fbno, flen], make sure we can reuse it safely.
306 */
307 void
308 xfs_extent_busy_reuse(
309 struct xfs_mount *mp,
310 xfs_agnumber_t agno,
311 xfs_agblock_t fbno,
312 xfs_extlen_t flen,
313 bool userdata)
314 {
315 struct xfs_perag *pag;
316 struct rb_node *rbp;
317
318 ASSERT(flen > 0);
319
320 pag = xfs_perag_get(mp, agno);
321 spin_lock(&pag->pagb_lock);
322 restart:
323 rbp = pag->pagb_tree.rb_node;
324 while (rbp) {
325 struct xfs_extent_busy *busyp =
326 rb_entry(rbp, struct xfs_extent_busy, rb_node);
327 xfs_agblock_t bbno = busyp->bno;
328 xfs_agblock_t bend = bbno + busyp->length;
329
330 if (fbno + flen <= bbno) {
331 rbp = rbp->rb_left;
332 continue;
333 } else if (fbno >= bend) {
334 rbp = rbp->rb_right;
335 continue;
336 }
337
338 if (!xfs_extent_busy_update_extent(mp, pag, busyp, fbno, flen,
339 userdata))
340 goto restart;
341 }
342 spin_unlock(&pag->pagb_lock);
343 xfs_perag_put(pag);
344 }
345
346 /*
347 * For a given extent [fbno, flen], search the busy extent list to find a
348 * subset of the extent that is not busy. If *rlen is smaller than
349 * args->minlen no suitable extent could be found, and the higher level
350 * code needs to force out the log and retry the allocation.
351 */
352 void
353 xfs_extent_busy_trim(
354 struct xfs_alloc_arg *args,
355 xfs_agblock_t bno,
356 xfs_extlen_t len,
357 xfs_agblock_t *rbno,
358 xfs_extlen_t *rlen)
359 {
360 xfs_agblock_t fbno;
361 xfs_extlen_t flen;
362 struct rb_node *rbp;
363
364 ASSERT(len > 0);
365
366 spin_lock(&args->pag->pagb_lock);
367 restart:
368 fbno = bno;
369 flen = len;
370 rbp = args->pag->pagb_tree.rb_node;
371 while (rbp && flen >= args->minlen) {
372 struct xfs_extent_busy *busyp =
373 rb_entry(rbp, struct xfs_extent_busy, rb_node);
374 xfs_agblock_t fend = fbno + flen;
375 xfs_agblock_t bbno = busyp->bno;
376 xfs_agblock_t bend = bbno + busyp->length;
377
378 if (fend <= bbno) {
379 rbp = rbp->rb_left;
380 continue;
381 } else if (fbno >= bend) {
382 rbp = rbp->rb_right;
383 continue;
384 }
385
386 /*
387 * If this is a metadata allocation, try to reuse the busy
388 * extent instead of trimming the allocation.
389 */
390 if (!args->userdata &&
391 !(busyp->flags & XFS_EXTENT_BUSY_DISCARDED)) {
392 if (!xfs_extent_busy_update_extent(args->mp, args->pag,
393 busyp, fbno, flen,
394 false))
395 goto restart;
396 continue;
397 }
398
399 if (bbno <= fbno) {
400 /* start overlap */
401
402 /*
403 * Case 1:
404 * bbno bend
405 * +BBBBBBBBBBBBBBBBB+
406 * +---------+
407 * fbno fend
408 *
409 * Case 2:
410 * bbno bend
411 * +BBBBBBBBBBBBBBBBB+
412 * +-------------+
413 * fbno fend
414 *
415 * Case 3:
416 * bbno bend
417 * +BBBBBBBBBBBBBBBBB+
418 * +-------------+
419 * fbno fend
420 *
421 * Case 4:
422 * bbno bend
423 * +BBBBBBBBBBBBBBBBB+
424 * +-----------------+
425 * fbno fend
426 *
427 * No unbusy region in extent, return failure.
428 */
429 if (fend <= bend)
430 goto fail;
431
432 /*
433 * Case 5:
434 * bbno bend
435 * +BBBBBBBBBBBBBBBBB+
436 * +----------------------+
437 * fbno fend
438 *
439 * Case 6:
440 * bbno bend
441 * +BBBBBBBBBBBBBBBBB+
442 * +--------------------------+
443 * fbno fend
444 *
445 * Needs to be trimmed to:
446 * +-------+
447 * fbno fend
448 */
449 fbno = bend;
450 } else if (bend >= fend) {
451 /* end overlap */
452
453 /*
454 * Case 7:
455 * bbno bend
456 * +BBBBBBBBBBBBBBBBB+
457 * +------------------+
458 * fbno fend
459 *
460 * Case 8:
461 * bbno bend
462 * +BBBBBBBBBBBBBBBBB+
463 * +--------------------------+
464 * fbno fend
465 *
466 * Needs to be trimmed to:
467 * +-------+
468 * fbno fend
469 */
470 fend = bbno;
471 } else {
472 /* middle overlap */
473
474 /*
475 * Case 9:
476 * bbno bend
477 * +BBBBBBBBBBBBBBBBB+
478 * +-----------------------------------+
479 * fbno fend
480 *
481 * Can be trimmed to:
482 * +-------+ OR +-------+
483 * fbno fend fbno fend
484 *
485 * Backward allocation leads to significant
486 * fragmentation of directories, which degrades
487 * directory performance, therefore we always want to
488 * choose the option that produces forward allocation
489 * patterns.
490 * Preferring the lower bno extent will make the next
491 * request use "fend" as the start of the next
492 * allocation; if the segment is no longer busy at
493 * that point, we'll get a contiguous allocation, but
494 * even if it is still busy, we will get a forward
495 * allocation.
496 * We try to avoid choosing the segment at "bend",
497 * because that can lead to the next allocation
498 * taking the segment at "fbno", which would be a
499 * backward allocation. We only use the segment at
500 * "fbno" if it is much larger than the current
501 * requested size, because in that case there's a
502 * good chance subsequent allocations will be
503 * contiguous.
504 */
505 if (bbno - fbno >= args->maxlen) {
506 /* left candidate fits perfect */
507 fend = bbno;
508 } else if (fend - bend >= args->maxlen * 4) {
509 /* right candidate has enough free space */
510 fbno = bend;
511 } else if (bbno - fbno >= args->minlen) {
512 /* left candidate fits minimum requirement */
513 fend = bbno;
514 } else {
515 goto fail;
516 }
517 }
518
519 flen = fend - fbno;
520 }
521 spin_unlock(&args->pag->pagb_lock);
522
523 if (fbno != bno || flen != len) {
524 trace_xfs_extent_busy_trim(args->mp, args->agno, bno, len,
525 fbno, flen);
526 }
527 *rbno = fbno;
528 *rlen = flen;
529 return;
530 fail:
531 /*
532 * Return a zero extent length as failure indications. All callers
533 * re-check if the trimmed extent satisfies the minlen requirement.
534 */
535 spin_unlock(&args->pag->pagb_lock);
536 trace_xfs_extent_busy_trim(args->mp, args->agno, bno, len, fbno, 0);
537 *rbno = fbno;
538 *rlen = 0;
539 }
540
541 STATIC void
542 xfs_extent_busy_clear_one(
543 struct xfs_mount *mp,
544 struct xfs_perag *pag,
545 struct xfs_extent_busy *busyp)
546 {
547 if (busyp->length) {
548 trace_xfs_extent_busy_clear(mp, busyp->agno, busyp->bno,
549 busyp->length);
550 rb_erase(&busyp->rb_node, &pag->pagb_tree);
551 }
552
553 list_del_init(&busyp->list);
554 kmem_free(busyp);
555 }
556
557 /*
558 * Remove all extents on the passed in list from the busy extents tree.
559 * If do_discard is set skip extents that need to be discarded, and mark
560 * these as undergoing a discard operation instead.
561 */
562 void
563 xfs_extent_busy_clear(
564 struct xfs_mount *mp,
565 struct list_head *list,
566 bool do_discard)
567 {
568 struct xfs_extent_busy *busyp, *n;
569 struct xfs_perag *pag = NULL;
570 xfs_agnumber_t agno = NULLAGNUMBER;
571
572 list_for_each_entry_safe(busyp, n, list, list) {
573 if (busyp->agno != agno) {
574 if (pag) {
575 spin_unlock(&pag->pagb_lock);
576 xfs_perag_put(pag);
577 }
578 pag = xfs_perag_get(mp, busyp->agno);
579 spin_lock(&pag->pagb_lock);
580 agno = busyp->agno;
581 }
582
583 if (do_discard && busyp->length &&
584 !(busyp->flags & XFS_EXTENT_BUSY_SKIP_DISCARD))
585 busyp->flags = XFS_EXTENT_BUSY_DISCARDED;
586 else
587 xfs_extent_busy_clear_one(mp, pag, busyp);
588 }
589
590 if (pag) {
591 spin_unlock(&pag->pagb_lock);
592 xfs_perag_put(pag);
593 }
594 }
595
596 /*
597 * Callback for list_sort to sort busy extents by the AG they reside in.
598 */
599 int
600 xfs_extent_busy_ag_cmp(
601 void *priv,
602 struct list_head *a,
603 struct list_head *b)
604 {
605 return container_of(a, struct xfs_extent_busy, list)->agno -
606 container_of(b, struct xfs_extent_busy, list)->agno;
607 }