4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
26 * Copyright (c) 2012, 2016 by Delphix. All rights reserved.
29 #include <sys/zfs_context.h>
32 #include <sys/dmu_tx.h>
33 #include <sys/dnode.h>
34 #include <sys/dsl_pool.h>
36 #include <sys/space_map.h>
37 #include <sys/refcount.h>
38 #include <sys/zfeature.h>
41 * The data for a given space map can be kept on blocks of any size.
42 * Larger blocks entail fewer i/o operations, but they also cause the
43 * DMU to keep more data in-core, and also to waste more i/o bandwidth
44 * when only a few blocks have changed since the last transaction group.
46 int space_map_blksz
= (1 << 12);
49 * Iterate through the space map, invoking the callback on each (non-debug)
53 space_map_iterate(space_map_t
*sm
, sm_cb_t callback
, void *arg
)
55 uint64_t *entry
, *entry_map
, *entry_map_end
;
56 uint64_t bufsize
, size
, offset
, end
;
59 end
= space_map_length(sm
);
61 bufsize
= MAX(sm
->sm_blksz
, SPA_MINBLOCKSIZE
);
62 entry_map
= vmem_alloc(bufsize
, KM_SLEEP
);
65 dmu_prefetch(sm
->sm_os
, space_map_object(sm
), 0, bufsize
,
66 end
- bufsize
, ZIO_PRIORITY_SYNC_READ
);
69 for (offset
= 0; offset
< end
&& error
== 0; offset
+= bufsize
) {
70 size
= MIN(end
- offset
, bufsize
);
71 VERIFY(P2PHASE(size
, sizeof (uint64_t)) == 0);
73 ASSERT3U(sm
->sm_blksz
, !=, 0);
75 dprintf("object=%llu offset=%llx size=%llx\n",
76 space_map_object(sm
), offset
, size
);
78 error
= dmu_read(sm
->sm_os
, space_map_object(sm
), offset
, size
,
79 entry_map
, DMU_READ_PREFETCH
);
83 entry_map_end
= entry_map
+ (size
/ sizeof (uint64_t));
84 for (entry
= entry_map
; entry
< entry_map_end
&& error
== 0;
87 uint64_t offset
, size
;
89 if (SM_DEBUG_DECODE(e
)) /* Skip debug entries */
92 offset
= (SM_OFFSET_DECODE(e
) << sm
->sm_shift
) +
94 size
= SM_RUN_DECODE(e
) << sm
->sm_shift
;
96 VERIFY0(P2PHASE(offset
, 1ULL << sm
->sm_shift
));
97 VERIFY0(P2PHASE(size
, 1ULL << sm
->sm_shift
));
98 VERIFY3U(offset
, >=, sm
->sm_start
);
99 VERIFY3U(offset
+ size
, <=, sm
->sm_start
+ sm
->sm_size
);
100 error
= callback(SM_TYPE_DECODE(e
), offset
, size
, arg
);
104 vmem_free(entry_map
, bufsize
);
108 typedef struct space_map_load_arg
{
109 space_map_t
*smla_sm
;
110 range_tree_t
*smla_rt
;
112 } space_map_load_arg_t
;
115 space_map_load_callback(maptype_t type
, uint64_t offset
, uint64_t size
,
118 space_map_load_arg_t
*smla
= arg
;
119 if (type
== smla
->smla_type
) {
120 VERIFY3U(range_tree_space(smla
->smla_rt
) + size
, <=,
121 smla
->smla_sm
->sm_size
);
122 range_tree_add(smla
->smla_rt
, offset
, size
);
124 range_tree_remove(smla
->smla_rt
, offset
, size
);
131 * Load the space map disk into the specified range tree. Segments of maptype
132 * are added to the range tree, other segment types are removed.
135 space_map_load(space_map_t
*sm
, range_tree_t
*rt
, maptype_t maptype
)
139 space_map_load_arg_t smla
;
141 VERIFY0(range_tree_space(rt
));
142 space
= space_map_allocated(sm
);
144 if (maptype
== SM_FREE
) {
145 range_tree_add(rt
, sm
->sm_start
, sm
->sm_size
);
146 space
= sm
->sm_size
- space
;
151 smla
.smla_type
= maptype
;
152 err
= space_map_iterate(sm
, space_map_load_callback
, &smla
);
155 VERIFY3U(range_tree_space(rt
), ==, space
);
157 range_tree_vacate(rt
, NULL
, NULL
);
164 space_map_histogram_clear(space_map_t
*sm
)
166 if (sm
->sm_dbuf
->db_size
!= sizeof (space_map_phys_t
))
169 bzero(sm
->sm_phys
->smp_histogram
, sizeof (sm
->sm_phys
->smp_histogram
));
173 space_map_histogram_verify(space_map_t
*sm
, range_tree_t
*rt
)
176 * Verify that the in-core range tree does not have any
177 * ranges smaller than our sm_shift size.
179 for (int i
= 0; i
< sm
->sm_shift
; i
++) {
180 if (rt
->rt_histogram
[i
] != 0)
187 space_map_histogram_add(space_map_t
*sm
, range_tree_t
*rt
, dmu_tx_t
*tx
)
191 ASSERT(dmu_tx_is_syncing(tx
));
192 VERIFY3U(space_map_object(sm
), !=, 0);
194 if (sm
->sm_dbuf
->db_size
!= sizeof (space_map_phys_t
))
197 dmu_buf_will_dirty(sm
->sm_dbuf
, tx
);
199 ASSERT(space_map_histogram_verify(sm
, rt
));
201 * Transfer the content of the range tree histogram to the space
202 * map histogram. The space map histogram contains 32 buckets ranging
203 * between 2^sm_shift to 2^(32+sm_shift-1). The range tree,
204 * however, can represent ranges from 2^0 to 2^63. Since the space
205 * map only cares about allocatable blocks (minimum of sm_shift) we
206 * can safely ignore all ranges in the range tree smaller than sm_shift.
208 for (int i
= sm
->sm_shift
; i
< RANGE_TREE_HISTOGRAM_SIZE
; i
++) {
211 * Since the largest histogram bucket in the space map is
212 * 2^(32+sm_shift-1), we need to normalize the values in
213 * the range tree for any bucket larger than that size. For
214 * example given an sm_shift of 9, ranges larger than 2^40
215 * would get normalized as if they were 1TB ranges. Assume
216 * the range tree had a count of 5 in the 2^44 (16TB) bucket,
217 * the calculation below would normalize this to 5 * 2^4 (16).
219 ASSERT3U(i
, >=, idx
+ sm
->sm_shift
);
220 sm
->sm_phys
->smp_histogram
[idx
] +=
221 rt
->rt_histogram
[i
] << (i
- idx
- sm
->sm_shift
);
224 * Increment the space map's index as long as we haven't
225 * reached the maximum bucket size. Accumulate all ranges
226 * larger than the max bucket size into the last bucket.
228 if (idx
< SPACE_MAP_HISTOGRAM_SIZE
- 1) {
229 ASSERT3U(idx
+ sm
->sm_shift
, ==, i
);
231 ASSERT3U(idx
, <, SPACE_MAP_HISTOGRAM_SIZE
);
237 space_map_entries(space_map_t
*sm
, range_tree_t
*rt
)
239 avl_tree_t
*t
= &rt
->rt_root
;
241 uint64_t size
, entries
;
244 * All space_maps always have a debug entry so account for it here.
249 * Traverse the range tree and calculate the number of space map
250 * entries that would be required to write out the range tree.
252 for (rs
= avl_first(t
); rs
!= NULL
; rs
= AVL_NEXT(t
, rs
)) {
253 size
= (rs
->rs_end
- rs
->rs_start
) >> sm
->sm_shift
;
254 entries
+= howmany(size
, SM_RUN_MAX
);
260 space_map_write(space_map_t
*sm
, range_tree_t
*rt
, maptype_t maptype
,
263 objset_t
*os
= sm
->sm_os
;
264 spa_t
*spa
= dmu_objset_spa(os
);
265 avl_tree_t
*t
= &rt
->rt_root
;
267 uint64_t size
, total
, rt_space
, nodes
;
268 uint64_t *entry
, *entry_map
, *entry_map_end
;
269 uint64_t expected_entries
, actual_entries
= 1;
271 ASSERT(dsl_pool_sync_context(dmu_objset_pool(os
)));
272 VERIFY3U(space_map_object(sm
), !=, 0);
273 dmu_buf_will_dirty(sm
->sm_dbuf
, tx
);
276 * This field is no longer necessary since the in-core space map
277 * now contains the object number but is maintained for backwards
280 sm
->sm_phys
->smp_object
= sm
->sm_object
;
282 if (range_tree_space(rt
) == 0) {
283 VERIFY3U(sm
->sm_object
, ==, sm
->sm_phys
->smp_object
);
287 if (maptype
== SM_ALLOC
)
288 sm
->sm_phys
->smp_alloc
+= range_tree_space(rt
);
290 sm
->sm_phys
->smp_alloc
-= range_tree_space(rt
);
292 expected_entries
= space_map_entries(sm
, rt
);
294 entry_map
= vmem_alloc(sm
->sm_blksz
, KM_SLEEP
);
295 entry_map_end
= entry_map
+ (sm
->sm_blksz
/ sizeof (uint64_t));
298 *entry
++ = SM_DEBUG_ENCODE(1) |
299 SM_DEBUG_ACTION_ENCODE(maptype
) |
300 SM_DEBUG_SYNCPASS_ENCODE(spa_sync_pass(spa
)) |
301 SM_DEBUG_TXG_ENCODE(dmu_tx_get_txg(tx
));
304 nodes
= avl_numnodes(&rt
->rt_root
);
305 rt_space
= range_tree_space(rt
);
306 for (rs
= avl_first(t
); rs
!= NULL
; rs
= AVL_NEXT(t
, rs
)) {
309 size
= (rs
->rs_end
- rs
->rs_start
) >> sm
->sm_shift
;
310 start
= (rs
->rs_start
- sm
->sm_start
) >> sm
->sm_shift
;
312 total
+= size
<< sm
->sm_shift
;
317 run_len
= MIN(size
, SM_RUN_MAX
);
319 if (entry
== entry_map_end
) {
320 dmu_write(os
, space_map_object(sm
),
321 sm
->sm_phys
->smp_objsize
, sm
->sm_blksz
,
323 sm
->sm_phys
->smp_objsize
+= sm
->sm_blksz
;
327 *entry
++ = SM_OFFSET_ENCODE(start
) |
328 SM_TYPE_ENCODE(maptype
) |
329 SM_RUN_ENCODE(run_len
);
337 if (entry
!= entry_map
) {
338 size
= (entry
- entry_map
) * sizeof (uint64_t);
339 dmu_write(os
, space_map_object(sm
), sm
->sm_phys
->smp_objsize
,
340 size
, entry_map
, tx
);
341 sm
->sm_phys
->smp_objsize
+= size
;
343 ASSERT3U(expected_entries
, ==, actual_entries
);
346 * Ensure that the space_map's accounting wasn't changed
347 * while we were in the middle of writing it out.
349 VERIFY3U(nodes
, ==, avl_numnodes(&rt
->rt_root
));
350 VERIFY3U(range_tree_space(rt
), ==, rt_space
);
351 VERIFY3U(range_tree_space(rt
), ==, total
);
353 vmem_free(entry_map
, sm
->sm_blksz
);
357 space_map_open_impl(space_map_t
*sm
)
362 error
= dmu_bonus_hold(sm
->sm_os
, sm
->sm_object
, sm
, &sm
->sm_dbuf
);
366 dmu_object_size_from_db(sm
->sm_dbuf
, &sm
->sm_blksz
, &blocks
);
367 sm
->sm_phys
= sm
->sm_dbuf
->db_data
;
372 space_map_open(space_map_t
**smp
, objset_t
*os
, uint64_t object
,
373 uint64_t start
, uint64_t size
, uint8_t shift
)
378 ASSERT(*smp
== NULL
);
382 sm
= kmem_alloc(sizeof (space_map_t
), KM_SLEEP
);
384 sm
->sm_start
= start
;
386 sm
->sm_shift
= shift
;
388 sm
->sm_object
= object
;
395 error
= space_map_open_impl(sm
);
407 space_map_close(space_map_t
*sm
)
412 if (sm
->sm_dbuf
!= NULL
)
413 dmu_buf_rele(sm
->sm_dbuf
, sm
);
417 kmem_free(sm
, sizeof (*sm
));
421 space_map_truncate(space_map_t
*sm
, dmu_tx_t
*tx
)
423 objset_t
*os
= sm
->sm_os
;
424 spa_t
*spa
= dmu_objset_spa(os
);
425 dmu_object_info_t doi
;
427 ASSERT(dsl_pool_sync_context(dmu_objset_pool(os
)));
428 ASSERT(dmu_tx_is_syncing(tx
));
429 VERIFY3U(dmu_tx_get_txg(tx
), <=, spa_final_dirty_txg(spa
));
431 dmu_object_info_from_db(sm
->sm_dbuf
, &doi
);
434 * If the space map has the wrong bonus size (because
435 * SPA_FEATURE_SPACEMAP_HISTOGRAM has recently been enabled), or
436 * the wrong block size (because space_map_blksz has changed),
437 * free and re-allocate its object with the updated sizes.
439 * Otherwise, just truncate the current object.
441 if ((spa_feature_is_enabled(spa
, SPA_FEATURE_SPACEMAP_HISTOGRAM
) &&
442 doi
.doi_bonus_size
!= sizeof (space_map_phys_t
)) ||
443 doi
.doi_data_block_size
!= space_map_blksz
) {
444 zfs_dbgmsg("txg %llu, spa %s, sm %p, reallocating "
445 "object[%llu]: old bonus %u, old blocksz %u",
446 dmu_tx_get_txg(tx
), spa_name(spa
), sm
, sm
->sm_object
,
447 doi
.doi_bonus_size
, doi
.doi_data_block_size
);
449 space_map_free(sm
, tx
);
450 dmu_buf_rele(sm
->sm_dbuf
, sm
);
452 sm
->sm_object
= space_map_alloc(sm
->sm_os
, tx
);
453 VERIFY0(space_map_open_impl(sm
));
455 VERIFY0(dmu_free_range(os
, space_map_object(sm
), 0, -1ULL, tx
));
458 * If the spacemap is reallocated, its histogram
459 * will be reset. Do the same in the common case so that
460 * bugs related to the uncommon case do not go unnoticed.
462 bzero(sm
->sm_phys
->smp_histogram
,
463 sizeof (sm
->sm_phys
->smp_histogram
));
466 dmu_buf_will_dirty(sm
->sm_dbuf
, tx
);
467 sm
->sm_phys
->smp_objsize
= 0;
468 sm
->sm_phys
->smp_alloc
= 0;
472 * Update the in-core space_map allocation and length values.
475 space_map_update(space_map_t
*sm
)
480 sm
->sm_alloc
= sm
->sm_phys
->smp_alloc
;
481 sm
->sm_length
= sm
->sm_phys
->smp_objsize
;
485 space_map_alloc(objset_t
*os
, dmu_tx_t
*tx
)
487 spa_t
*spa
= dmu_objset_spa(os
);
491 if (spa_feature_is_enabled(spa
, SPA_FEATURE_SPACEMAP_HISTOGRAM
)) {
492 spa_feature_incr(spa
, SPA_FEATURE_SPACEMAP_HISTOGRAM
, tx
);
493 bonuslen
= sizeof (space_map_phys_t
);
494 ASSERT3U(bonuslen
, <=, dmu_bonus_max());
496 bonuslen
= SPACE_MAP_SIZE_V0
;
499 object
= dmu_object_alloc(os
,
500 DMU_OT_SPACE_MAP
, space_map_blksz
,
501 DMU_OT_SPACE_MAP_HEADER
, bonuslen
, tx
);
507 space_map_free_obj(objset_t
*os
, uint64_t smobj
, dmu_tx_t
*tx
)
509 spa_t
*spa
= dmu_objset_spa(os
);
510 if (spa_feature_is_enabled(spa
, SPA_FEATURE_SPACEMAP_HISTOGRAM
)) {
511 dmu_object_info_t doi
;
513 VERIFY0(dmu_object_info(os
, smobj
, &doi
));
514 if (doi
.doi_bonus_size
!= SPACE_MAP_SIZE_V0
) {
515 spa_feature_decr(spa
,
516 SPA_FEATURE_SPACEMAP_HISTOGRAM
, tx
);
520 VERIFY0(dmu_object_free(os
, smobj
, tx
));
524 space_map_free(space_map_t
*sm
, dmu_tx_t
*tx
)
529 space_map_free_obj(sm
->sm_os
, space_map_object(sm
), tx
);
534 space_map_object(space_map_t
*sm
)
536 return (sm
!= NULL
? sm
->sm_object
: 0);
540 * Returns the already synced, on-disk allocated space.
543 space_map_allocated(space_map_t
*sm
)
545 return (sm
!= NULL
? sm
->sm_alloc
: 0);
549 * Returns the already synced, on-disk length;
552 space_map_length(space_map_t
*sm
)
554 return (sm
!= NULL
? sm
->sm_length
: 0);
558 * Returns the allocated space that is currently syncing.
561 space_map_alloc_delta(space_map_t
*sm
)
565 ASSERT(sm
->sm_dbuf
!= NULL
);
566 return (sm
->sm_phys
->smp_alloc
- space_map_allocated(sm
));