]>
git.proxmox.com Git - mirror_zfs-debian.git/blob - zfs/lib/libzpool/space_map.c
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
26 #pragma ident "%Z%%M% %I% %E% SMI"
28 #include <sys/zfs_context.h>
32 #include <sys/space_map.h>
36 * NOTE: caller is responsible for all locking.
39 space_map_seg_compare(const void *x1
, const void *x2
)
41 const space_seg_t
*s1
= x1
;
42 const space_seg_t
*s2
= x2
;
44 if (s1
->ss_start
< s2
->ss_start
) {
45 if (s1
->ss_end
> s2
->ss_start
)
49 if (s1
->ss_start
> s2
->ss_start
) {
50 if (s1
->ss_start
< s2
->ss_end
)
58 space_map_create(space_map_t
*sm
, uint64_t start
, uint64_t size
, uint8_t shift
,
61 bzero(sm
, sizeof (*sm
));
63 avl_create(&sm
->sm_root
, space_map_seg_compare
,
64 sizeof (space_seg_t
), offsetof(struct space_seg
, ss_node
));
73 space_map_destroy(space_map_t
*sm
)
75 ASSERT(!sm
->sm_loaded
&& !sm
->sm_loading
);
76 VERIFY3U(sm
->sm_space
, ==, 0);
77 avl_destroy(&sm
->sm_root
);
81 space_map_add(space_map_t
*sm
, uint64_t start
, uint64_t size
)
84 space_seg_t ssearch
, *ss_before
, *ss_after
, *ss
;
85 uint64_t end
= start
+ size
;
86 int merge_before
, merge_after
;
88 ASSERT(MUTEX_HELD(sm
->sm_lock
));
90 VERIFY3U(start
, >=, sm
->sm_start
);
91 VERIFY3U(end
, <=, sm
->sm_start
+ sm
->sm_size
);
92 VERIFY(sm
->sm_space
+ size
<= sm
->sm_size
);
93 VERIFY(P2PHASE(start
, 1ULL << sm
->sm_shift
) == 0);
94 VERIFY(P2PHASE(size
, 1ULL << sm
->sm_shift
) == 0);
96 ssearch
.ss_start
= start
;
98 ss
= avl_find(&sm
->sm_root
, &ssearch
, &where
);
100 if (ss
!= NULL
&& ss
->ss_start
<= start
&& ss
->ss_end
>= end
) {
101 zfs_panic_recover("zfs: allocating allocated segment"
102 "(offset=%llu size=%llu)\n",
103 (longlong_t
)start
, (longlong_t
)size
);
107 /* Make sure we don't overlap with either of our neighbors */
110 ss_before
= avl_nearest(&sm
->sm_root
, where
, AVL_BEFORE
);
111 ss_after
= avl_nearest(&sm
->sm_root
, where
, AVL_AFTER
);
113 merge_before
= (ss_before
!= NULL
&& ss_before
->ss_end
== start
);
114 merge_after
= (ss_after
!= NULL
&& ss_after
->ss_start
== end
);
116 if (merge_before
&& merge_after
) {
117 avl_remove(&sm
->sm_root
, ss_before
);
118 ss_after
->ss_start
= ss_before
->ss_start
;
119 kmem_free(ss_before
, sizeof (*ss_before
));
120 } else if (merge_before
) {
121 ss_before
->ss_end
= end
;
122 } else if (merge_after
) {
123 ss_after
->ss_start
= start
;
125 ss
= kmem_alloc(sizeof (*ss
), KM_SLEEP
);
126 ss
->ss_start
= start
;
128 avl_insert(&sm
->sm_root
, ss
, where
);
131 sm
->sm_space
+= size
;
135 space_map_remove(space_map_t
*sm
, uint64_t start
, uint64_t size
)
138 space_seg_t ssearch
, *ss
, *newseg
;
139 uint64_t end
= start
+ size
;
140 int left_over
, right_over
;
142 ASSERT(MUTEX_HELD(sm
->sm_lock
));
144 VERIFY(P2PHASE(start
, 1ULL << sm
->sm_shift
) == 0);
145 VERIFY(P2PHASE(size
, 1ULL << sm
->sm_shift
) == 0);
147 ssearch
.ss_start
= start
;
148 ssearch
.ss_end
= end
;
149 ss
= avl_find(&sm
->sm_root
, &ssearch
, &where
);
151 /* Make sure we completely overlap with someone */
153 zfs_panic_recover("zfs: freeing free segment "
154 "(offset=%llu size=%llu)",
155 (longlong_t
)start
, (longlong_t
)size
);
158 VERIFY3U(ss
->ss_start
, <=, start
);
159 VERIFY3U(ss
->ss_end
, >=, end
);
160 VERIFY(sm
->sm_space
- size
<= sm
->sm_size
);
162 left_over
= (ss
->ss_start
!= start
);
163 right_over
= (ss
->ss_end
!= end
);
165 if (left_over
&& right_over
) {
166 newseg
= kmem_alloc(sizeof (*newseg
), KM_SLEEP
);
167 newseg
->ss_start
= end
;
168 newseg
->ss_end
= ss
->ss_end
;
170 avl_insert_here(&sm
->sm_root
, newseg
, ss
, AVL_AFTER
);
171 } else if (left_over
) {
173 } else if (right_over
) {
176 avl_remove(&sm
->sm_root
, ss
);
177 kmem_free(ss
, sizeof (*ss
));
180 sm
->sm_space
-= size
;
184 space_map_contains(space_map_t
*sm
, uint64_t start
, uint64_t size
)
187 space_seg_t ssearch
, *ss
;
188 uint64_t end
= start
+ size
;
190 ASSERT(MUTEX_HELD(sm
->sm_lock
));
192 VERIFY(P2PHASE(start
, 1ULL << sm
->sm_shift
) == 0);
193 VERIFY(P2PHASE(size
, 1ULL << sm
->sm_shift
) == 0);
195 ssearch
.ss_start
= start
;
196 ssearch
.ss_end
= end
;
197 ss
= avl_find(&sm
->sm_root
, &ssearch
, &where
);
199 return (ss
!= NULL
&& ss
->ss_start
<= start
&& ss
->ss_end
>= end
);
203 space_map_vacate(space_map_t
*sm
, space_map_func_t
*func
, space_map_t
*mdest
)
208 ASSERT(MUTEX_HELD(sm
->sm_lock
));
210 while ((ss
= avl_destroy_nodes(&sm
->sm_root
, &cookie
)) != NULL
) {
212 func(mdest
, ss
->ss_start
, ss
->ss_end
- ss
->ss_start
);
213 kmem_free(ss
, sizeof (*ss
));
219 space_map_walk(space_map_t
*sm
, space_map_func_t
*func
, space_map_t
*mdest
)
223 for (ss
= avl_first(&sm
->sm_root
); ss
; ss
= AVL_NEXT(&sm
->sm_root
, ss
))
224 func(mdest
, ss
->ss_start
, ss
->ss_end
- ss
->ss_start
);
228 space_map_excise(space_map_t
*sm
, uint64_t start
, uint64_t size
)
230 avl_tree_t
*t
= &sm
->sm_root
;
232 space_seg_t
*ss
, search
;
233 uint64_t end
= start
+ size
;
234 uint64_t rm_start
, rm_end
;
236 ASSERT(MUTEX_HELD(sm
->sm_lock
));
238 search
.ss_start
= start
;
239 search
.ss_end
= start
;
242 ss
= avl_find(t
, &search
, &where
);
245 ss
= avl_nearest(t
, where
, AVL_AFTER
);
247 if (ss
== NULL
|| ss
->ss_start
>= end
)
250 rm_start
= MAX(ss
->ss_start
, start
);
251 rm_end
= MIN(ss
->ss_end
, end
);
253 space_map_remove(sm
, rm_start
, rm_end
- rm_start
);
258 * Replace smd with the union of smd and sms.
261 space_map_union(space_map_t
*smd
, space_map_t
*sms
)
263 avl_tree_t
*t
= &sms
->sm_root
;
266 ASSERT(MUTEX_HELD(smd
->sm_lock
));
269 * For each source segment, remove any intersections with the
270 * destination, then add the source segment to the destination.
272 for (ss
= avl_first(t
); ss
!= NULL
; ss
= AVL_NEXT(t
, ss
)) {
273 space_map_excise(smd
, ss
->ss_start
, ss
->ss_end
- ss
->ss_start
);
274 space_map_add(smd
, ss
->ss_start
, ss
->ss_end
- ss
->ss_start
);
279 * Wait for any in-progress space_map_load() to complete.
282 space_map_load_wait(space_map_t
*sm
)
284 ASSERT(MUTEX_HELD(sm
->sm_lock
));
286 while (sm
->sm_loading
)
287 cv_wait(&sm
->sm_load_cv
, sm
->sm_lock
);
291 * Note: space_map_load() will drop sm_lock across dmu_read() calls.
292 * The caller must be OK with this.
295 space_map_load(space_map_t
*sm
, space_map_ops_t
*ops
, uint8_t maptype
,
296 space_map_obj_t
*smo
, objset_t
*os
)
298 uint64_t *entry
, *entry_map
, *entry_map_end
;
299 uint64_t bufsize
, size
, offset
, end
, space
;
300 uint64_t mapstart
= sm
->sm_start
;
303 ASSERT(MUTEX_HELD(sm
->sm_lock
));
305 space_map_load_wait(sm
);
310 sm
->sm_loading
= B_TRUE
;
311 end
= smo
->smo_objsize
;
312 space
= smo
->smo_alloc
;
314 ASSERT(sm
->sm_ops
== NULL
);
315 VERIFY3U(sm
->sm_space
, ==, 0);
317 if (maptype
== SM_FREE
) {
318 space_map_add(sm
, sm
->sm_start
, sm
->sm_size
);
319 space
= sm
->sm_size
- space
;
322 bufsize
= 1ULL << SPACE_MAP_BLOCKSHIFT
;
323 entry_map
= zio_buf_alloc(bufsize
);
325 mutex_exit(sm
->sm_lock
);
327 dmu_prefetch(os
, smo
->smo_object
, bufsize
, end
- bufsize
);
328 mutex_enter(sm
->sm_lock
);
330 for (offset
= 0; offset
< end
; offset
+= bufsize
) {
331 size
= MIN(end
- offset
, bufsize
);
332 VERIFY(P2PHASE(size
, sizeof (uint64_t)) == 0);
335 dprintf("object=%llu offset=%llx size=%llx\n",
336 smo
->smo_object
, offset
, size
);
338 mutex_exit(sm
->sm_lock
);
339 error
= dmu_read(os
, smo
->smo_object
, offset
, size
, entry_map
);
340 mutex_enter(sm
->sm_lock
);
344 entry_map_end
= entry_map
+ (size
/ sizeof (uint64_t));
345 for (entry
= entry_map
; entry
< entry_map_end
; entry
++) {
348 if (SM_DEBUG_DECODE(e
)) /* Skip debug entries */
351 (SM_TYPE_DECODE(e
) == maptype
?
352 space_map_add
: space_map_remove
)(sm
,
353 (SM_OFFSET_DECODE(e
) << sm
->sm_shift
) + mapstart
,
354 SM_RUN_DECODE(e
) << sm
->sm_shift
);
359 VERIFY3U(sm
->sm_space
, ==, space
);
361 sm
->sm_loaded
= B_TRUE
;
366 space_map_vacate(sm
, NULL
, NULL
);
369 zio_buf_free(entry_map
, bufsize
);
371 sm
->sm_loading
= B_FALSE
;
373 cv_broadcast(&sm
->sm_load_cv
);
379 space_map_unload(space_map_t
*sm
)
381 ASSERT(MUTEX_HELD(sm
->sm_lock
));
383 if (sm
->sm_loaded
&& sm
->sm_ops
!= NULL
)
384 sm
->sm_ops
->smop_unload(sm
);
386 sm
->sm_loaded
= B_FALSE
;
389 space_map_vacate(sm
, NULL
, NULL
);
393 space_map_alloc(space_map_t
*sm
, uint64_t size
)
397 start
= sm
->sm_ops
->smop_alloc(sm
, size
);
399 space_map_remove(sm
, start
, size
);
404 space_map_claim(space_map_t
*sm
, uint64_t start
, uint64_t size
)
406 sm
->sm_ops
->smop_claim(sm
, start
, size
);
407 space_map_remove(sm
, start
, size
);
411 space_map_free(space_map_t
*sm
, uint64_t start
, uint64_t size
)
413 space_map_add(sm
, start
, size
);
414 sm
->sm_ops
->smop_free(sm
, start
, size
);
418 * Note: space_map_sync() will drop sm_lock across dmu_write() calls.
421 space_map_sync(space_map_t
*sm
, uint8_t maptype
,
422 space_map_obj_t
*smo
, objset_t
*os
, dmu_tx_t
*tx
)
424 spa_t
*spa
= dmu_objset_spa(os
);
427 uint64_t bufsize
, start
, size
, run_len
;
428 uint64_t *entry
, *entry_map
, *entry_map_end
;
430 ASSERT(MUTEX_HELD(sm
->sm_lock
));
432 if (sm
->sm_space
== 0)
435 dprintf("object %4llu, txg %llu, pass %d, %c, count %lu, space %llx\n",
436 smo
->smo_object
, dmu_tx_get_txg(tx
), spa_sync_pass(spa
),
437 maptype
== SM_ALLOC
? 'A' : 'F', avl_numnodes(&sm
->sm_root
),
440 if (maptype
== SM_ALLOC
)
441 smo
->smo_alloc
+= sm
->sm_space
;
443 smo
->smo_alloc
-= sm
->sm_space
;
445 bufsize
= (8 + avl_numnodes(&sm
->sm_root
)) * sizeof (uint64_t);
446 bufsize
= MIN(bufsize
, 1ULL << SPACE_MAP_BLOCKSHIFT
);
447 entry_map
= zio_buf_alloc(bufsize
);
448 entry_map_end
= entry_map
+ (bufsize
/ sizeof (uint64_t));
451 *entry
++ = SM_DEBUG_ENCODE(1) |
452 SM_DEBUG_ACTION_ENCODE(maptype
) |
453 SM_DEBUG_SYNCPASS_ENCODE(spa_sync_pass(spa
)) |
454 SM_DEBUG_TXG_ENCODE(dmu_tx_get_txg(tx
));
456 while ((ss
= avl_destroy_nodes(&sm
->sm_root
, &cookie
)) != NULL
) {
457 size
= ss
->ss_end
- ss
->ss_start
;
458 start
= (ss
->ss_start
- sm
->sm_start
) >> sm
->sm_shift
;
460 sm
->sm_space
-= size
;
461 size
>>= sm
->sm_shift
;
464 run_len
= MIN(size
, SM_RUN_MAX
);
466 if (entry
== entry_map_end
) {
467 mutex_exit(sm
->sm_lock
);
468 dmu_write(os
, smo
->smo_object
, smo
->smo_objsize
,
469 bufsize
, entry_map
, tx
);
470 mutex_enter(sm
->sm_lock
);
471 smo
->smo_objsize
+= bufsize
;
475 *entry
++ = SM_OFFSET_ENCODE(start
) |
476 SM_TYPE_ENCODE(maptype
) |
477 SM_RUN_ENCODE(run_len
);
482 kmem_free(ss
, sizeof (*ss
));
485 if (entry
!= entry_map
) {
486 size
= (entry
- entry_map
) * sizeof (uint64_t);
487 mutex_exit(sm
->sm_lock
);
488 dmu_write(os
, smo
->smo_object
, smo
->smo_objsize
,
489 size
, entry_map
, tx
);
490 mutex_enter(sm
->sm_lock
);
491 smo
->smo_objsize
+= size
;
494 zio_buf_free(entry_map
, bufsize
);
496 VERIFY3U(sm
->sm_space
, ==, 0);
500 space_map_truncate(space_map_obj_t
*smo
, objset_t
*os
, dmu_tx_t
*tx
)
502 VERIFY(dmu_free_range(os
, smo
->smo_object
, 0, -1ULL, tx
) == 0);
504 smo
->smo_objsize
= 0;