]> git.proxmox.com Git - mirror_zfs.git/blame - module/zfs/range_tree.c
Fix typo/etc in module/zfs/zfs_ctldir.c
[mirror_zfs.git] / module / zfs / range_tree.c
CommitLineData
93cf2076
GW
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
24 */
25/*
d2734cce 26 * Copyright (c) 2013, 2017 by Delphix. All rights reserved.
93cf2076
GW
27 */
28
29#include <sys/zfs_context.h>
30#include <sys/spa.h>
31#include <sys/dmu.h>
32#include <sys/dnode.h>
33#include <sys/zio.h>
34#include <sys/range_tree.h>
35
d4a72f23
TC
36/*
37 * Range trees are tree-based data structures that can be used to
38 * track free space or generally any space allocation information.
39 * A range tree keeps track of individual segments and automatically
40 * provides facilities such as adjacent extent merging and extent
41 * splitting in response to range add/remove requests.
42 *
43 * A range tree starts out completely empty, with no segments in it.
44 * Adding an allocation via range_tree_add to the range tree can either:
45 * 1) create a new extent
46 * 2) extend an adjacent extent
47 * 3) merge two adjacent extents
48 * Conversely, removing an allocation via range_tree_remove can:
49 * 1) completely remove an extent
50 * 2) shorten an extent (if the allocation was near one of its ends)
51 * 3) split an extent into two extents, in effect punching a hole
52 *
53 * A range tree is also capable of 'bridging' gaps when adding
54 * allocations. This is useful for cases when close proximity of
55 * allocations is an important detail that needs to be represented
56 * in the range tree. See range_tree_set_gap(). The default behavior
57 * is not to bridge gaps (i.e. the maximum allowed gap size is 0).
58 *
59 * In order to traverse a range tree, use either the range_tree_walk()
60 * or range_tree_vacate() functions.
61 *
62 * To obtain more accurate information on individual segment
63 * operations that the range tree performs "under the hood", you can
64 * specify a set of callbacks by passing a range_tree_ops_t structure
65 * to the range_tree_create function. Any callbacks that are non-NULL
66 * are then called at the appropriate times.
67 *
68 * The range tree code also supports a special variant of range trees
69 * that can bridge small gaps between segments. This kind of tree is used
70 * by the dsl scanning code to group I/Os into mostly sequential chunks to
71 * optimize disk performance. The code here attempts to do this with as
72 * little memory and computational overhead as possible. One limitation of
73 * this implementation is that segments of range trees with gaps can only
74 * support removing complete segments.
75 */
76
669dedb3 77kmem_cache_t *range_seg_cache;
93cf2076 78
d4a72f23
TC
79/* Generic ops for managing an AVL tree alongside a range tree */
80struct range_tree_ops rt_avl_ops = {
81 .rtop_create = rt_avl_create,
82 .rtop_destroy = rt_avl_destroy,
83 .rtop_add = rt_avl_add,
84 .rtop_remove = rt_avl_remove,
85 .rtop_vacate = rt_avl_vacate,
86};
87
93cf2076
GW
88void
89range_tree_init(void)
90{
91 ASSERT(range_seg_cache == NULL);
92 range_seg_cache = kmem_cache_create("range_seg_cache",
93 sizeof (range_seg_t), 0, NULL, NULL, NULL, NULL, NULL, 0);
94}
95
96void
97range_tree_fini(void)
98{
99 kmem_cache_destroy(range_seg_cache);
100 range_seg_cache = NULL;
101}
102
103void
104range_tree_stat_verify(range_tree_t *rt)
105{
106 range_seg_t *rs;
107 uint64_t hist[RANGE_TREE_HISTOGRAM_SIZE] = { 0 };
108 int i;
109
110 for (rs = avl_first(&rt->rt_root); rs != NULL;
111 rs = AVL_NEXT(&rt->rt_root, rs)) {
112 uint64_t size = rs->rs_end - rs->rs_start;
9bd274dd 113 int idx = highbit64(size) - 1;
93cf2076
GW
114
115 hist[idx]++;
116 ASSERT3U(hist[idx], !=, 0);
117 }
118
119 for (i = 0; i < RANGE_TREE_HISTOGRAM_SIZE; i++) {
120 if (hist[i] != rt->rt_histogram[i]) {
a887d653 121 zfs_dbgmsg("i=%d, hist=%px, hist=%llu, rt_hist=%llu",
93cf2076
GW
122 i, hist, hist[i], rt->rt_histogram[i]);
123 }
124 VERIFY3U(hist[i], ==, rt->rt_histogram[i]);
125 }
126}
127
128static void
129range_tree_stat_incr(range_tree_t *rt, range_seg_t *rs)
130{
131 uint64_t size = rs->rs_end - rs->rs_start;
9bd274dd 132 int idx = highbit64(size) - 1;
93cf2076 133
f3a7f661 134 ASSERT(size != 0);
93cf2076
GW
135 ASSERT3U(idx, <,
136 sizeof (rt->rt_histogram) / sizeof (*rt->rt_histogram));
137
93cf2076
GW
138 rt->rt_histogram[idx]++;
139 ASSERT3U(rt->rt_histogram[idx], !=, 0);
140}
141
142static void
143range_tree_stat_decr(range_tree_t *rt, range_seg_t *rs)
144{
145 uint64_t size = rs->rs_end - rs->rs_start;
9bd274dd 146 int idx = highbit64(size) - 1;
93cf2076 147
f3a7f661 148 ASSERT(size != 0);
93cf2076
GW
149 ASSERT3U(idx, <,
150 sizeof (rt->rt_histogram) / sizeof (*rt->rt_histogram));
151
93cf2076
GW
152 ASSERT3U(rt->rt_histogram[idx], !=, 0);
153 rt->rt_histogram[idx]--;
154}
155
156/*
157 * NOTE: caller is responsible for all locking.
158 */
159static int
160range_tree_seg_compare(const void *x1, const void *x2)
161{
ee36c709
GN
162 const range_seg_t *r1 = (const range_seg_t *)x1;
163 const range_seg_t *r2 = (const range_seg_t *)x2;
93cf2076 164
ee36c709
GN
165 ASSERT3U(r1->rs_start, <=, r1->rs_end);
166 ASSERT3U(r2->rs_start, <=, r2->rs_end);
167
168 return ((r1->rs_start >= r2->rs_end) - (r1->rs_end <= r2->rs_start));
93cf2076
GW
169}
170
171range_tree_t *
d4a72f23 172range_tree_create_impl(range_tree_ops_t *ops, void *arg,
a1d477c2 173 int (*avl_compare) (const void *, const void *), uint64_t gap)
93cf2076 174{
d4a72f23 175 range_tree_t *rt = kmem_zalloc(sizeof (range_tree_t), KM_SLEEP);
93cf2076
GW
176
177 avl_create(&rt->rt_root, range_tree_seg_compare,
178 sizeof (range_seg_t), offsetof(range_seg_t, rs_node));
179
93cf2076 180 rt->rt_ops = ops;
d4a72f23 181 rt->rt_gap = gap;
93cf2076 182 rt->rt_arg = arg;
d4a72f23 183 rt->rt_avl_compare = avl_compare;
93cf2076 184
d4a72f23 185 if (rt->rt_ops != NULL && rt->rt_ops->rtop_create != NULL)
93cf2076
GW
186 rt->rt_ops->rtop_create(rt, rt->rt_arg);
187
188 return (rt);
189}
190
d4a72f23 191range_tree_t *
a1d477c2 192range_tree_create(range_tree_ops_t *ops, void *arg)
d4a72f23 193{
a1d477c2 194 return (range_tree_create_impl(ops, arg, NULL, 0));
d4a72f23
TC
195}
196
93cf2076
GW
197void
198range_tree_destroy(range_tree_t *rt)
199{
200 VERIFY0(rt->rt_space);
201
d4a72f23 202 if (rt->rt_ops != NULL && rt->rt_ops->rtop_destroy != NULL)
93cf2076
GW
203 rt->rt_ops->rtop_destroy(rt, rt->rt_arg);
204
205 avl_destroy(&rt->rt_root);
206 kmem_free(rt, sizeof (*rt));
207}
208
209void
d4a72f23
TC
210range_tree_adjust_fill(range_tree_t *rt, range_seg_t *rs, int64_t delta)
211{
d4a72f23
TC
212 ASSERT3U(rs->rs_fill + delta, !=, 0);
213 ASSERT3U(rs->rs_fill + delta, <=, rs->rs_end - rs->rs_start);
214
215 if (rt->rt_ops != NULL && rt->rt_ops->rtop_remove != NULL)
216 rt->rt_ops->rtop_remove(rt, rs, rt->rt_arg);
217 rs->rs_fill += delta;
218 if (rt->rt_ops != NULL && rt->rt_ops->rtop_add != NULL)
219 rt->rt_ops->rtop_add(rt, rs, rt->rt_arg);
220}
221
222static void
223range_tree_add_impl(void *arg, uint64_t start, uint64_t size, uint64_t fill)
93cf2076
GW
224{
225 range_tree_t *rt = arg;
226 avl_index_t where;
227 range_seg_t rsearch, *rs_before, *rs_after, *rs;
d4a72f23
TC
228 uint64_t end = start + size, gap = rt->rt_gap;
229 uint64_t bridge_size = 0;
93cf2076
GW
230 boolean_t merge_before, merge_after;
231
d4a72f23
TC
232 ASSERT3U(size, !=, 0);
233 ASSERT3U(fill, <=, size);
93cf2076
GW
234
235 rsearch.rs_start = start;
236 rsearch.rs_end = end;
237 rs = avl_find(&rt->rt_root, &rsearch, &where);
238
d4a72f23
TC
239 if (gap == 0 && rs != NULL &&
240 rs->rs_start <= start && rs->rs_end >= end) {
93cf2076 241 zfs_panic_recover("zfs: allocating allocated segment"
d4a72f23
TC
242 "(offset=%llu size=%llu) of (offset=%llu size=%llu)\n",
243 (longlong_t)start, (longlong_t)size,
244 (longlong_t)rs->rs_start,
245 (longlong_t)rs->rs_end - rs->rs_start);
246 return;
247 }
248
249 /*
250 * If this is a gap-supporting range tree, it is possible that we
251 * are inserting into an existing segment. In this case simply
252 * bump the fill count and call the remove / add callbacks. If the
253 * new range will extend an existing segment, we remove the
254 * existing one, apply the new extent to it and re-insert it using
255 * the normal code paths.
256 */
257 if (rs != NULL) {
258 ASSERT3U(gap, !=, 0);
259 if (rs->rs_start <= start && rs->rs_end >= end) {
260 range_tree_adjust_fill(rt, rs, fill);
261 return;
262 }
263
264 avl_remove(&rt->rt_root, rs);
265 if (rt->rt_ops != NULL && rt->rt_ops->rtop_remove != NULL)
266 rt->rt_ops->rtop_remove(rt, rs, rt->rt_arg);
267
268 range_tree_stat_decr(rt, rs);
269 rt->rt_space -= rs->rs_end - rs->rs_start;
270
271 fill += rs->rs_fill;
272 start = MIN(start, rs->rs_start);
273 end = MAX(end, rs->rs_end);
274 size = end - start;
275
276 range_tree_add_impl(rt, start, size, fill);
277
278 kmem_cache_free(range_seg_cache, rs);
93cf2076
GW
279 return;
280 }
281
d4a72f23 282 ASSERT3P(rs, ==, NULL);
93cf2076 283
d4a72f23
TC
284 /*
285 * Determine whether or not we will have to merge with our neighbors.
286 * If gap != 0, we might need to merge with our neighbors even if we
287 * aren't directly touching.
288 */
93cf2076
GW
289 rs_before = avl_nearest(&rt->rt_root, where, AVL_BEFORE);
290 rs_after = avl_nearest(&rt->rt_root, where, AVL_AFTER);
291
d4a72f23
TC
292 merge_before = (rs_before != NULL && rs_before->rs_end >= start - gap);
293 merge_after = (rs_after != NULL && rs_after->rs_start <= end + gap);
294
295 if (merge_before && gap != 0)
296 bridge_size += start - rs_before->rs_end;
297 if (merge_after && gap != 0)
298 bridge_size += rs_after->rs_start - end;
93cf2076
GW
299
300 if (merge_before && merge_after) {
301 avl_remove(&rt->rt_root, rs_before);
d4a72f23 302 if (rt->rt_ops != NULL && rt->rt_ops->rtop_remove != NULL) {
93cf2076
GW
303 rt->rt_ops->rtop_remove(rt, rs_before, rt->rt_arg);
304 rt->rt_ops->rtop_remove(rt, rs_after, rt->rt_arg);
305 }
306
307 range_tree_stat_decr(rt, rs_before);
308 range_tree_stat_decr(rt, rs_after);
309
d4a72f23 310 rs_after->rs_fill += rs_before->rs_fill + fill;
93cf2076
GW
311 rs_after->rs_start = rs_before->rs_start;
312 kmem_cache_free(range_seg_cache, rs_before);
313 rs = rs_after;
314 } else if (merge_before) {
d4a72f23 315 if (rt->rt_ops != NULL && rt->rt_ops->rtop_remove != NULL)
93cf2076
GW
316 rt->rt_ops->rtop_remove(rt, rs_before, rt->rt_arg);
317
318 range_tree_stat_decr(rt, rs_before);
319
d4a72f23 320 rs_before->rs_fill += fill;
93cf2076
GW
321 rs_before->rs_end = end;
322 rs = rs_before;
323 } else if (merge_after) {
d4a72f23 324 if (rt->rt_ops != NULL && rt->rt_ops->rtop_remove != NULL)
93cf2076
GW
325 rt->rt_ops->rtop_remove(rt, rs_after, rt->rt_arg);
326
327 range_tree_stat_decr(rt, rs_after);
328
d4a72f23 329 rs_after->rs_fill += fill;
93cf2076
GW
330 rs_after->rs_start = start;
331 rs = rs_after;
332 } else {
79c76d5b 333 rs = kmem_cache_alloc(range_seg_cache, KM_SLEEP);
d4a72f23
TC
334
335 rs->rs_fill = fill;
93cf2076
GW
336 rs->rs_start = start;
337 rs->rs_end = end;
338 avl_insert(&rt->rt_root, rs, where);
339 }
340
d4a72f23
TC
341 if (gap != 0)
342 ASSERT3U(rs->rs_fill, <=, rs->rs_end - rs->rs_start);
343 else
344 ASSERT3U(rs->rs_fill, ==, rs->rs_end - rs->rs_start);
345
346 if (rt->rt_ops != NULL && rt->rt_ops->rtop_add != NULL)
93cf2076
GW
347 rt->rt_ops->rtop_add(rt, rs, rt->rt_arg);
348
349 range_tree_stat_incr(rt, rs);
d4a72f23 350 rt->rt_space += size + bridge_size;
93cf2076
GW
351}
352
353void
d4a72f23
TC
354range_tree_add(void *arg, uint64_t start, uint64_t size)
355{
356 range_tree_add_impl(arg, start, size, size);
357}
358
359static void
360range_tree_remove_impl(range_tree_t *rt, uint64_t start, uint64_t size,
361 boolean_t do_fill)
93cf2076 362{
93cf2076
GW
363 avl_index_t where;
364 range_seg_t rsearch, *rs, *newseg;
365 uint64_t end = start + size;
366 boolean_t left_over, right_over;
367
93cf2076
GW
368 VERIFY3U(size, !=, 0);
369 VERIFY3U(size, <=, rt->rt_space);
370
371 rsearch.rs_start = start;
372 rsearch.rs_end = end;
373 rs = avl_find(&rt->rt_root, &rsearch, &where);
374
375 /* Make sure we completely overlap with someone */
376 if (rs == NULL) {
377 zfs_panic_recover("zfs: freeing free segment "
378 "(offset=%llu size=%llu)",
379 (longlong_t)start, (longlong_t)size);
380 return;
381 }
d4a72f23
TC
382
383 /*
384 * Range trees with gap support must only remove complete segments
385 * from the tree. This allows us to maintain accurate fill accounting
386 * and to ensure that bridged sections are not leaked. If we need to
387 * remove less than the full segment, we can only adjust the fill count.
388 */
389 if (rt->rt_gap != 0) {
390 if (do_fill) {
391 if (rs->rs_fill == size) {
392 start = rs->rs_start;
393 end = rs->rs_end;
394 size = end - start;
395 } else {
396 range_tree_adjust_fill(rt, rs, -size);
397 return;
398 }
399 } else if (rs->rs_start != start || rs->rs_end != end) {
400 zfs_panic_recover("zfs: freeing partial segment of "
401 "gap tree (offset=%llu size=%llu) of "
402 "(offset=%llu size=%llu)",
403 (longlong_t)start, (longlong_t)size,
404 (longlong_t)rs->rs_start,
405 (longlong_t)rs->rs_end - rs->rs_start);
406 return;
407 }
408 }
409
93cf2076
GW
410 VERIFY3U(rs->rs_start, <=, start);
411 VERIFY3U(rs->rs_end, >=, end);
412
413 left_over = (rs->rs_start != start);
414 right_over = (rs->rs_end != end);
415
416 range_tree_stat_decr(rt, rs);
417
d4a72f23 418 if (rt->rt_ops != NULL && rt->rt_ops->rtop_remove != NULL)
93cf2076
GW
419 rt->rt_ops->rtop_remove(rt, rs, rt->rt_arg);
420
421 if (left_over && right_over) {
79c76d5b 422 newseg = kmem_cache_alloc(range_seg_cache, KM_SLEEP);
93cf2076
GW
423 newseg->rs_start = end;
424 newseg->rs_end = rs->rs_end;
d4a72f23 425 newseg->rs_fill = newseg->rs_end - newseg->rs_start;
93cf2076
GW
426 range_tree_stat_incr(rt, newseg);
427
428 rs->rs_end = start;
429
430 avl_insert_here(&rt->rt_root, newseg, rs, AVL_AFTER);
d4a72f23 431 if (rt->rt_ops != NULL && rt->rt_ops->rtop_add != NULL)
93cf2076
GW
432 rt->rt_ops->rtop_add(rt, newseg, rt->rt_arg);
433 } else if (left_over) {
434 rs->rs_end = start;
435 } else if (right_over) {
436 rs->rs_start = end;
437 } else {
438 avl_remove(&rt->rt_root, rs);
439 kmem_cache_free(range_seg_cache, rs);
440 rs = NULL;
441 }
442
443 if (rs != NULL) {
d4a72f23
TC
444 /*
445 * The fill of the leftover segment will always be equal to
446 * the size, since we do not support removing partial segments
447 * of range trees with gaps.
448 */
449 rs->rs_fill = rs->rs_end - rs->rs_start;
93cf2076
GW
450 range_tree_stat_incr(rt, rs);
451
d4a72f23 452 if (rt->rt_ops != NULL && rt->rt_ops->rtop_add != NULL)
93cf2076
GW
453 rt->rt_ops->rtop_add(rt, rs, rt->rt_arg);
454 }
455
456 rt->rt_space -= size;
457}
458
d4a72f23
TC
459void
460range_tree_remove(void *arg, uint64_t start, uint64_t size)
461{
462 range_tree_remove_impl(arg, start, size, B_FALSE);
463}
464
465void
466range_tree_remove_fill(range_tree_t *rt, uint64_t start, uint64_t size)
467{
468 range_tree_remove_impl(rt, start, size, B_TRUE);
469}
470
471void
472range_tree_resize_segment(range_tree_t *rt, range_seg_t *rs,
473 uint64_t newstart, uint64_t newsize)
474{
475 int64_t delta = newsize - (rs->rs_end - rs->rs_start);
476
d4a72f23
TC
477 range_tree_stat_decr(rt, rs);
478 if (rt->rt_ops != NULL && rt->rt_ops->rtop_remove != NULL)
479 rt->rt_ops->rtop_remove(rt, rs, rt->rt_arg);
480
481 rs->rs_start = newstart;
482 rs->rs_end = newstart + newsize;
483
484 range_tree_stat_incr(rt, rs);
485 if (rt->rt_ops != NULL && rt->rt_ops->rtop_add != NULL)
486 rt->rt_ops->rtop_add(rt, rs, rt->rt_arg);
487
488 rt->rt_space += delta;
489}
490
93cf2076 491static range_seg_t *
9bd274dd 492range_tree_find_impl(range_tree_t *rt, uint64_t start, uint64_t size)
93cf2076 493{
9bd274dd 494 range_seg_t rsearch;
93cf2076
GW
495 uint64_t end = start + size;
496
93cf2076
GW
497 VERIFY(size != 0);
498
499 rsearch.rs_start = start;
500 rsearch.rs_end = end;
0dc2f70c 501 return (avl_find(&rt->rt_root, &rsearch, NULL));
9bd274dd 502}
93cf2076 503
d4a72f23 504range_seg_t *
9bd274dd
MA
505range_tree_find(range_tree_t *rt, uint64_t start, uint64_t size)
506{
507 range_seg_t *rs = range_tree_find_impl(rt, start, size);
508 if (rs != NULL && rs->rs_start <= start && rs->rs_end >= start + size)
93cf2076
GW
509 return (rs);
510 return (NULL);
511}
512
513void
df72b8be 514range_tree_verify_not_present(range_tree_t *rt, uint64_t off, uint64_t size)
93cf2076 515{
df72b8be 516 range_seg_t *rs = range_tree_find(rt, off, size);
93cf2076 517 if (rs != NULL)
df72b8be 518 panic("segment already in tree; rs=%p", (void *)rs);
93cf2076
GW
519}
520
521boolean_t
522range_tree_contains(range_tree_t *rt, uint64_t start, uint64_t size)
523{
9bd274dd
MA
524 return (range_tree_find(rt, start, size) != NULL);
525}
93cf2076 526
9bd274dd
MA
527/*
528 * Ensure that this range is not in the tree, regardless of whether
529 * it is currently in the tree.
530 */
531void
532range_tree_clear(range_tree_t *rt, uint64_t start, uint64_t size)
533{
534 range_seg_t *rs;
535
a1d477c2
MA
536 if (size == 0)
537 return;
538
9bd274dd
MA
539 while ((rs = range_tree_find_impl(rt, start, size)) != NULL) {
540 uint64_t free_start = MAX(rs->rs_start, start);
541 uint64_t free_end = MIN(rs->rs_end, start + size);
542 range_tree_remove(rt, free_start, free_end - free_start);
543 }
93cf2076
GW
544}
545
546void
547range_tree_swap(range_tree_t **rtsrc, range_tree_t **rtdst)
548{
549 range_tree_t *rt;
550
93cf2076
GW
551 ASSERT0(range_tree_space(*rtdst));
552 ASSERT0(avl_numnodes(&(*rtdst)->rt_root));
553
554 rt = *rtsrc;
555 *rtsrc = *rtdst;
556 *rtdst = rt;
557}
558
559void
560range_tree_vacate(range_tree_t *rt, range_tree_func_t *func, void *arg)
561{
562 range_seg_t *rs;
563 void *cookie = NULL;
564
d4a72f23 565 if (rt->rt_ops != NULL && rt->rt_ops->rtop_vacate != NULL)
93cf2076
GW
566 rt->rt_ops->rtop_vacate(rt, rt->rt_arg);
567
568 while ((rs = avl_destroy_nodes(&rt->rt_root, &cookie)) != NULL) {
569 if (func != NULL)
570 func(arg, rs->rs_start, rs->rs_end - rs->rs_start);
571 kmem_cache_free(range_seg_cache, rs);
572 }
573
574 bzero(rt->rt_histogram, sizeof (rt->rt_histogram));
575 rt->rt_space = 0;
576}
577
578void
579range_tree_walk(range_tree_t *rt, range_tree_func_t *func, void *arg)
580{
581 range_seg_t *rs;
582
93cf2076
GW
583 for (rs = avl_first(&rt->rt_root); rs; rs = AVL_NEXT(&rt->rt_root, rs))
584 func(arg, rs->rs_start, rs->rs_end - rs->rs_start);
585}
586
d4a72f23
TC
587range_seg_t *
588range_tree_first(range_tree_t *rt)
589{
d4a72f23
TC
590 return (avl_first(&rt->rt_root));
591}
592
93cf2076
GW
593uint64_t
594range_tree_space(range_tree_t *rt)
595{
596 return (rt->rt_space);
597}
d4a72f23 598
0dc2f70c
MA
599boolean_t
600range_tree_is_empty(range_tree_t *rt)
601{
602 ASSERT(rt != NULL);
603 return (range_tree_space(rt) == 0);
604}
605
d4a72f23
TC
606/* Generic range tree functions for maintaining segments in an AVL tree. */
607void
608rt_avl_create(range_tree_t *rt, void *arg)
609{
610 avl_tree_t *tree = arg;
611
612 avl_create(tree, rt->rt_avl_compare, sizeof (range_seg_t),
613 offsetof(range_seg_t, rs_pp_node));
614}
615
616void
617rt_avl_destroy(range_tree_t *rt, void *arg)
618{
619 avl_tree_t *tree = arg;
620
621 ASSERT0(avl_numnodes(tree));
622 avl_destroy(tree);
623}
624
625void
626rt_avl_add(range_tree_t *rt, range_seg_t *rs, void *arg)
627{
628 avl_tree_t *tree = arg;
629 avl_add(tree, rs);
630}
631
632void
633rt_avl_remove(range_tree_t *rt, range_seg_t *rs, void *arg)
634{
635 avl_tree_t *tree = arg;
636 avl_remove(tree, rs);
637}
638
639void
640rt_avl_vacate(range_tree_t *rt, void *arg)
641{
642 /*
643 * Normally one would walk the tree freeing nodes along the way.
644 * Since the nodes are shared with the range trees we can avoid
645 * walking all nodes and just reinitialize the avl tree. The nodes
646 * will be freed by the range tree, so we don't want to free them here.
647 */
648 rt_avl_create(rt, arg);
649}
0dc2f70c
MA
650
651uint64_t
652range_tree_min(range_tree_t *rt)
653{
654 range_seg_t *rs = avl_first(&rt->rt_root);
655 return (rs != NULL ? rs->rs_start : 0);
656}
657
658uint64_t
659range_tree_max(range_tree_t *rt)
660{
661 range_seg_t *rs = avl_last(&rt->rt_root);
662 return (rs != NULL ? rs->rs_end : 0);
663}
664
665uint64_t
666range_tree_span(range_tree_t *rt)
667{
668 return (range_tree_max(rt) - range_tree_min(rt));
669}