]>
git.proxmox.com Git - mirror_zfs.git/blob - module/zfs/space_map.c
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
26 #include <sys/zfs_context.h>
30 #include <sys/space_map.h>
34 * NOTE: caller is responsible for all locking.
37 space_map_seg_compare(const void *x1
, const void *x2
)
39 const space_seg_t
*s1
= x1
;
40 const space_seg_t
*s2
= x2
;
42 if (s1
->ss_start
< s2
->ss_start
) {
43 if (s1
->ss_end
> s2
->ss_start
)
47 if (s1
->ss_start
> s2
->ss_start
) {
48 if (s1
->ss_start
< s2
->ss_end
)
56 space_map_create(space_map_t
*sm
, uint64_t start
, uint64_t size
, uint8_t shift
,
59 bzero(sm
, sizeof (*sm
));
61 cv_init(&sm
->sm_load_cv
, NULL
, CV_DEFAULT
, NULL
);
63 avl_create(&sm
->sm_root
, space_map_seg_compare
,
64 sizeof (space_seg_t
), offsetof(struct space_seg
, ss_node
));
73 space_map_destroy(space_map_t
*sm
)
75 ASSERT(!sm
->sm_loaded
&& !sm
->sm_loading
);
76 VERIFY3U(sm
->sm_space
, ==, 0);
77 avl_destroy(&sm
->sm_root
);
78 cv_destroy(&sm
->sm_load_cv
);
82 space_map_add(space_map_t
*sm
, uint64_t start
, uint64_t size
)
85 space_seg_t ssearch
, *ss_before
, *ss_after
, *ss
;
86 uint64_t end
= start
+ size
;
87 int merge_before
, merge_after
;
89 ASSERT(MUTEX_HELD(sm
->sm_lock
));
91 VERIFY3U(start
, >=, sm
->sm_start
);
92 VERIFY3U(end
, <=, sm
->sm_start
+ sm
->sm_size
);
93 VERIFY(sm
->sm_space
+ size
<= sm
->sm_size
);
94 VERIFY(P2PHASE(start
, 1ULL << sm
->sm_shift
) == 0);
95 VERIFY(P2PHASE(size
, 1ULL << sm
->sm_shift
) == 0);
97 ssearch
.ss_start
= start
;
99 ss
= avl_find(&sm
->sm_root
, &ssearch
, &where
);
101 if (ss
!= NULL
&& ss
->ss_start
<= start
&& ss
->ss_end
>= end
) {
102 zfs_panic_recover("zfs: allocating allocated segment"
103 "(offset=%llu size=%llu)\n",
104 (longlong_t
)start
, (longlong_t
)size
);
108 /* Make sure we don't overlap with either of our neighbors */
111 ss_before
= avl_nearest(&sm
->sm_root
, where
, AVL_BEFORE
);
112 ss_after
= avl_nearest(&sm
->sm_root
, where
, AVL_AFTER
);
114 merge_before
= (ss_before
!= NULL
&& ss_before
->ss_end
== start
);
115 merge_after
= (ss_after
!= NULL
&& ss_after
->ss_start
== end
);
117 if (merge_before
&& merge_after
) {
118 avl_remove(&sm
->sm_root
, ss_before
);
119 if (sm
->sm_pp_root
) {
120 avl_remove(sm
->sm_pp_root
, ss_before
);
121 avl_remove(sm
->sm_pp_root
, ss_after
);
123 ss_after
->ss_start
= ss_before
->ss_start
;
124 kmem_free(ss_before
, sizeof (*ss_before
));
126 } else if (merge_before
) {
127 ss_before
->ss_end
= end
;
129 avl_remove(sm
->sm_pp_root
, ss_before
);
131 } else if (merge_after
) {
132 ss_after
->ss_start
= start
;
134 avl_remove(sm
->sm_pp_root
, ss_after
);
137 ss
= kmem_alloc(sizeof (*ss
), KM_PUSHPAGE
);
138 ss
->ss_start
= start
;
140 avl_insert(&sm
->sm_root
, ss
, where
);
144 avl_add(sm
->sm_pp_root
, ss
);
146 sm
->sm_space
+= size
;
150 space_map_remove(space_map_t
*sm
, uint64_t start
, uint64_t size
)
153 space_seg_t ssearch
, *ss
, *newseg
;
154 uint64_t end
= start
+ size
;
155 int left_over
, right_over
;
157 ASSERT(MUTEX_HELD(sm
->sm_lock
));
159 VERIFY(P2PHASE(start
, 1ULL << sm
->sm_shift
) == 0);
160 VERIFY(P2PHASE(size
, 1ULL << sm
->sm_shift
) == 0);
162 ssearch
.ss_start
= start
;
163 ssearch
.ss_end
= end
;
164 ss
= avl_find(&sm
->sm_root
, &ssearch
, &where
);
166 /* Make sure we completely overlap with someone */
168 zfs_panic_recover("zfs: freeing free segment "
169 "(offset=%llu size=%llu)",
170 (longlong_t
)start
, (longlong_t
)size
);
173 VERIFY3U(ss
->ss_start
, <=, start
);
174 VERIFY3U(ss
->ss_end
, >=, end
);
175 VERIFY(sm
->sm_space
- size
<= sm
->sm_size
);
177 left_over
= (ss
->ss_start
!= start
);
178 right_over
= (ss
->ss_end
!= end
);
181 avl_remove(sm
->sm_pp_root
, ss
);
183 if (left_over
&& right_over
) {
184 newseg
= kmem_alloc(sizeof (*newseg
), KM_PUSHPAGE
);
185 newseg
->ss_start
= end
;
186 newseg
->ss_end
= ss
->ss_end
;
188 avl_insert_here(&sm
->sm_root
, newseg
, ss
, AVL_AFTER
);
190 avl_add(sm
->sm_pp_root
, newseg
);
191 } else if (left_over
) {
193 } else if (right_over
) {
196 avl_remove(&sm
->sm_root
, ss
);
197 kmem_free(ss
, sizeof (*ss
));
201 if (sm
->sm_pp_root
&& ss
!= NULL
)
202 avl_add(sm
->sm_pp_root
, ss
);
204 sm
->sm_space
-= size
;
208 space_map_contains(space_map_t
*sm
, uint64_t start
, uint64_t size
)
211 space_seg_t ssearch
, *ss
;
212 uint64_t end
= start
+ size
;
214 ASSERT(MUTEX_HELD(sm
->sm_lock
));
216 VERIFY(P2PHASE(start
, 1ULL << sm
->sm_shift
) == 0);
217 VERIFY(P2PHASE(size
, 1ULL << sm
->sm_shift
) == 0);
219 ssearch
.ss_start
= start
;
220 ssearch
.ss_end
= end
;
221 ss
= avl_find(&sm
->sm_root
, &ssearch
, &where
);
223 return (ss
!= NULL
&& ss
->ss_start
<= start
&& ss
->ss_end
>= end
);
227 space_map_vacate(space_map_t
*sm
, space_map_func_t
*func
, space_map_t
*mdest
)
232 ASSERT(MUTEX_HELD(sm
->sm_lock
));
234 while ((ss
= avl_destroy_nodes(&sm
->sm_root
, &cookie
)) != NULL
) {
236 func(mdest
, ss
->ss_start
, ss
->ss_end
- ss
->ss_start
);
237 kmem_free(ss
, sizeof (*ss
));
243 space_map_walk(space_map_t
*sm
, space_map_func_t
*func
, space_map_t
*mdest
)
247 ASSERT(MUTEX_HELD(sm
->sm_lock
));
249 for (ss
= avl_first(&sm
->sm_root
); ss
; ss
= AVL_NEXT(&sm
->sm_root
, ss
))
250 func(mdest
, ss
->ss_start
, ss
->ss_end
- ss
->ss_start
);
254 * Wait for any in-progress space_map_load() to complete.
257 space_map_load_wait(space_map_t
*sm
)
259 ASSERT(MUTEX_HELD(sm
->sm_lock
));
261 while (sm
->sm_loading
) {
262 ASSERT(!sm
->sm_loaded
);
263 cv_wait(&sm
->sm_load_cv
, sm
->sm_lock
);
268 * Note: space_map_load() will drop sm_lock across dmu_read() calls.
269 * The caller must be OK with this.
272 space_map_load(space_map_t
*sm
, space_map_ops_t
*ops
, uint8_t maptype
,
273 space_map_obj_t
*smo
, objset_t
*os
)
275 uint64_t *entry
, *entry_map
, *entry_map_end
;
276 uint64_t bufsize
, size
, offset
, end
, space
;
277 uint64_t mapstart
= sm
->sm_start
;
280 ASSERT(MUTEX_HELD(sm
->sm_lock
));
281 ASSERT(!sm
->sm_loaded
);
282 ASSERT(!sm
->sm_loading
);
284 sm
->sm_loading
= B_TRUE
;
285 end
= smo
->smo_objsize
;
286 space
= smo
->smo_alloc
;
288 ASSERT(sm
->sm_ops
== NULL
);
289 VERIFY3U(sm
->sm_space
, ==, 0);
291 if (maptype
== SM_FREE
) {
292 space_map_add(sm
, sm
->sm_start
, sm
->sm_size
);
293 space
= sm
->sm_size
- space
;
296 bufsize
= 1ULL << SPACE_MAP_BLOCKSHIFT
;
297 entry_map
= zio_buf_alloc(bufsize
);
299 mutex_exit(sm
->sm_lock
);
301 dmu_prefetch(os
, smo
->smo_object
, bufsize
, end
- bufsize
);
302 mutex_enter(sm
->sm_lock
);
304 for (offset
= 0; offset
< end
; offset
+= bufsize
) {
305 size
= MIN(end
- offset
, bufsize
);
306 VERIFY(P2PHASE(size
, sizeof (uint64_t)) == 0);
309 dprintf("object=%llu offset=%llx size=%llx\n",
310 smo
->smo_object
, offset
, size
);
312 mutex_exit(sm
->sm_lock
);
313 error
= dmu_read(os
, smo
->smo_object
, offset
, size
, entry_map
,
315 mutex_enter(sm
->sm_lock
);
319 entry_map_end
= entry_map
+ (size
/ sizeof (uint64_t));
320 for (entry
= entry_map
; entry
< entry_map_end
; entry
++) {
323 if (SM_DEBUG_DECODE(e
)) /* Skip debug entries */
326 (SM_TYPE_DECODE(e
) == maptype
?
327 space_map_add
: space_map_remove
)(sm
,
328 (SM_OFFSET_DECODE(e
) << sm
->sm_shift
) + mapstart
,
329 SM_RUN_DECODE(e
) << sm
->sm_shift
);
334 VERIFY3U(sm
->sm_space
, ==, space
);
336 sm
->sm_loaded
= B_TRUE
;
341 space_map_vacate(sm
, NULL
, NULL
);
344 zio_buf_free(entry_map
, bufsize
);
346 sm
->sm_loading
= B_FALSE
;
348 cv_broadcast(&sm
->sm_load_cv
);
354 space_map_unload(space_map_t
*sm
)
356 ASSERT(MUTEX_HELD(sm
->sm_lock
));
358 if (sm
->sm_loaded
&& sm
->sm_ops
!= NULL
)
359 sm
->sm_ops
->smop_unload(sm
);
361 sm
->sm_loaded
= B_FALSE
;
364 space_map_vacate(sm
, NULL
, NULL
);
368 space_map_maxsize(space_map_t
*sm
)
370 ASSERT(sm
->sm_ops
!= NULL
);
371 return (sm
->sm_ops
->smop_max(sm
));
375 space_map_alloc(space_map_t
*sm
, uint64_t size
)
379 start
= sm
->sm_ops
->smop_alloc(sm
, size
);
381 space_map_remove(sm
, start
, size
);
386 space_map_claim(space_map_t
*sm
, uint64_t start
, uint64_t size
)
388 sm
->sm_ops
->smop_claim(sm
, start
, size
);
389 space_map_remove(sm
, start
, size
);
393 space_map_free(space_map_t
*sm
, uint64_t start
, uint64_t size
)
395 space_map_add(sm
, start
, size
);
396 sm
->sm_ops
->smop_free(sm
, start
, size
);
400 * Note: space_map_sync() will drop sm_lock across dmu_write() calls.
403 space_map_sync(space_map_t
*sm
, uint8_t maptype
,
404 space_map_obj_t
*smo
, objset_t
*os
, dmu_tx_t
*tx
)
406 spa_t
*spa
= dmu_objset_spa(os
);
409 uint64_t bufsize
, start
, size
, run_len
;
410 uint64_t *entry
, *entry_map
, *entry_map_end
;
412 ASSERT(MUTEX_HELD(sm
->sm_lock
));
414 if (sm
->sm_space
== 0)
417 dprintf("object %4llu, txg %llu, pass %d, %c, count %lu, space %llx\n",
418 smo
->smo_object
, dmu_tx_get_txg(tx
), spa_sync_pass(spa
),
419 maptype
== SM_ALLOC
? 'A' : 'F', avl_numnodes(&sm
->sm_root
),
422 if (maptype
== SM_ALLOC
)
423 smo
->smo_alloc
+= sm
->sm_space
;
425 smo
->smo_alloc
-= sm
->sm_space
;
427 bufsize
= (8 + avl_numnodes(&sm
->sm_root
)) * sizeof (uint64_t);
428 bufsize
= MIN(bufsize
, 1ULL << SPACE_MAP_BLOCKSHIFT
);
429 entry_map
= zio_buf_alloc(bufsize
);
430 entry_map_end
= entry_map
+ (bufsize
/ sizeof (uint64_t));
433 *entry
++ = SM_DEBUG_ENCODE(1) |
434 SM_DEBUG_ACTION_ENCODE(maptype
) |
435 SM_DEBUG_SYNCPASS_ENCODE(spa_sync_pass(spa
)) |
436 SM_DEBUG_TXG_ENCODE(dmu_tx_get_txg(tx
));
438 while ((ss
= avl_destroy_nodes(&sm
->sm_root
, &cookie
)) != NULL
) {
439 size
= ss
->ss_end
- ss
->ss_start
;
440 start
= (ss
->ss_start
- sm
->sm_start
) >> sm
->sm_shift
;
442 sm
->sm_space
-= size
;
443 size
>>= sm
->sm_shift
;
446 run_len
= MIN(size
, SM_RUN_MAX
);
448 if (entry
== entry_map_end
) {
449 mutex_exit(sm
->sm_lock
);
450 dmu_write(os
, smo
->smo_object
, smo
->smo_objsize
,
451 bufsize
, entry_map
, tx
);
452 mutex_enter(sm
->sm_lock
);
453 smo
->smo_objsize
+= bufsize
;
457 *entry
++ = SM_OFFSET_ENCODE(start
) |
458 SM_TYPE_ENCODE(maptype
) |
459 SM_RUN_ENCODE(run_len
);
464 kmem_free(ss
, sizeof (*ss
));
467 if (entry
!= entry_map
) {
468 size
= (entry
- entry_map
) * sizeof (uint64_t);
469 mutex_exit(sm
->sm_lock
);
470 dmu_write(os
, smo
->smo_object
, smo
->smo_objsize
,
471 size
, entry_map
, tx
);
472 mutex_enter(sm
->sm_lock
);
473 smo
->smo_objsize
+= size
;
476 zio_buf_free(entry_map
, bufsize
);
478 VERIFY3U(sm
->sm_space
, ==, 0);
482 space_map_truncate(space_map_obj_t
*smo
, objset_t
*os
, dmu_tx_t
*tx
)
484 VERIFY(dmu_free_range(os
, smo
->smo_object
, 0, -1ULL, tx
) == 0);
486 smo
->smo_objsize
= 0;
491 * Space map reference trees.
493 * A space map is a collection of integers. Every integer is either
494 * in the map, or it's not. A space map reference tree generalizes
495 * the idea: it allows its members to have arbitrary reference counts,
496 * as opposed to the implicit reference count of 0 or 1 in a space map.
497 * This representation comes in handy when computing the union or
498 * intersection of multiple space maps. For example, the union of
499 * N space maps is the subset of the reference tree with refcnt >= 1.
500 * The intersection of N space maps is the subset with refcnt >= N.
502 * [It's very much like a Fourier transform. Unions and intersections
503 * are hard to perform in the 'space map domain', so we convert the maps
504 * into the 'reference count domain', where it's trivial, then invert.]
506 * vdev_dtl_reassess() uses computations of this form to determine
507 * DTL_MISSING and DTL_OUTAGE for interior vdevs -- e.g. a RAID-Z vdev
508 * has an outage wherever refcnt >= vdev_nparity + 1, and a mirror vdev
509 * has an outage wherever refcnt >= vdev_children.
512 space_map_ref_compare(const void *x1
, const void *x2
)
514 const space_ref_t
*sr1
= x1
;
515 const space_ref_t
*sr2
= x2
;
517 if (sr1
->sr_offset
< sr2
->sr_offset
)
519 if (sr1
->sr_offset
> sr2
->sr_offset
)
531 space_map_ref_create(avl_tree_t
*t
)
533 avl_create(t
, space_map_ref_compare
,
534 sizeof (space_ref_t
), offsetof(space_ref_t
, sr_node
));
538 space_map_ref_destroy(avl_tree_t
*t
)
543 while ((sr
= avl_destroy_nodes(t
, &cookie
)) != NULL
)
544 kmem_free(sr
, sizeof (*sr
));
550 space_map_ref_add_node(avl_tree_t
*t
, uint64_t offset
, int64_t refcnt
)
554 sr
= kmem_alloc(sizeof (*sr
), KM_PUSHPAGE
);
555 sr
->sr_offset
= offset
;
556 sr
->sr_refcnt
= refcnt
;
562 space_map_ref_add_seg(avl_tree_t
*t
, uint64_t start
, uint64_t end
,
565 space_map_ref_add_node(t
, start
, refcnt
);
566 space_map_ref_add_node(t
, end
, -refcnt
);
570 * Convert (or add) a space map into a reference tree.
573 space_map_ref_add_map(avl_tree_t
*t
, space_map_t
*sm
, int64_t refcnt
)
577 ASSERT(MUTEX_HELD(sm
->sm_lock
));
579 for (ss
= avl_first(&sm
->sm_root
); ss
; ss
= AVL_NEXT(&sm
->sm_root
, ss
))
580 space_map_ref_add_seg(t
, ss
->ss_start
, ss
->ss_end
, refcnt
);
584 * Convert a reference tree into a space map. The space map will contain
585 * all members of the reference tree for which refcnt >= minref.
588 space_map_ref_generate_map(avl_tree_t
*t
, space_map_t
*sm
, int64_t minref
)
590 uint64_t start
= -1ULL;
594 ASSERT(MUTEX_HELD(sm
->sm_lock
));
596 space_map_vacate(sm
, NULL
, NULL
);
598 for (sr
= avl_first(t
); sr
!= NULL
; sr
= AVL_NEXT(t
, sr
)) {
599 refcnt
+= sr
->sr_refcnt
;
600 if (refcnt
>= minref
) {
601 if (start
== -1ULL) {
602 start
= sr
->sr_offset
;
605 if (start
!= -1ULL) {
606 uint64_t end
= sr
->sr_offset
;
607 ASSERT(start
<= end
);
609 space_map_add(sm
, start
, end
- start
);
615 ASSERT(start
== -1ULL);