4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2013 by Delphix. All rights reserved.
24 * Copyright (c) 2013 by Saso Kiselkov. All rights reserved.
27 #include <sys/zfs_context.h>
29 #include <sys/dmu_tx.h>
30 #include <sys/space_map.h>
31 #include <sys/metaslab_impl.h>
32 #include <sys/vdev_impl.h>
35 #define WITH_DF_BLOCK_ALLOCATOR
38 * Allow allocations to switch to gang blocks quickly. We do this to
39 * avoid having to load lots of space_maps in a given txg. There are,
40 * however, some cases where we want to avoid "fast" ganging and instead
41 * we want to do an exhaustive search of all metaslabs on this device.
42 * Currently we don't allow any gang, zil, or dump device related allocations
45 #define CAN_FASTGANG(flags) \
46 (!((flags) & (METASLAB_GANG_CHILD | METASLAB_GANG_HEADER | \
47 METASLAB_GANG_AVOID)))
49 uint64_t metaslab_aliquot
= 512ULL << 10;
50 uint64_t metaslab_gang_bang
= SPA_MAXBLOCKSIZE
+ 1; /* force gang blocks */
53 * The in-core space map representation is more compact than its on-disk form.
54 * The zfs_condense_pct determines how much more compact the in-core
55 * space_map representation must be before we compact it on-disk.
56 * Values should be greater than or equal to 100.
58 int zfs_condense_pct
= 200;
61 * This value defines the number of allowed allocation failures per vdev.
62 * If a device reaches this threshold in a given txg then we consider skipping
63 * allocations on that device. The value of zfs_mg_alloc_failures is computed
64 * in zio_init() unless it has been overridden in /etc/system.
66 int zfs_mg_alloc_failures
= 0;
69 * The zfs_mg_noalloc_threshold defines which metaslab groups should
70 * be eligible for allocation. The value is defined as a percentage of
71 * a free space. Metaslab groups that have more free space than
72 * zfs_mg_noalloc_threshold are always eligible for allocations. Once
73 * a metaslab group's free space is less than or equal to the
74 * zfs_mg_noalloc_threshold the allocator will avoid allocating to that
75 * group unless all groups in the pool have reached zfs_mg_noalloc_threshold.
76 * Once all groups in the pool reach zfs_mg_noalloc_threshold then all
77 * groups are allowed to accept allocations. Gang blocks are always
78 * eligible to allocate on any metaslab group. The default value of 0 means
79 * no metaslab group will be excluded based on this criterion.
81 int zfs_mg_noalloc_threshold
= 0;
84 * When set will load all metaslabs when pool is first opened.
86 int metaslab_debug_load
= 0;
89 * When set will prevent metaslabs from being unloaded.
91 int metaslab_debug_unload
= 0;
94 * Minimum size which forces the dynamic allocator to change
95 * it's allocation strategy. Once the space map cannot satisfy
96 * an allocation of this size then it switches to using more
97 * aggressive strategy (i.e search by size rather than offset).
99 uint64_t metaslab_df_alloc_threshold
= SPA_MAXBLOCKSIZE
;
102 * The minimum free space, in percent, which must be available
103 * in a space map to continue allocations in a first-fit fashion.
104 * Once the space_map's free space drops below this level we dynamically
105 * switch to using best-fit allocations.
107 int metaslab_df_free_pct
= 4;
110 * A metaslab is considered "free" if it contains a contiguous
111 * segment which is greater than metaslab_min_alloc_size.
113 uint64_t metaslab_min_alloc_size
= DMU_MAX_ACCESS
;
116 * Max number of space_maps to prefetch.
118 int metaslab_prefetch_limit
= SPA_DVAS_PER_BP
;
121 * Percentage bonus multiplier for metaslabs that are in the bonus area.
123 int metaslab_smo_bonus_pct
= 150;
126 * Should we be willing to write data to degraded vdevs?
128 boolean_t zfs_write_to_degraded
= B_FALSE
;
131 * ==========================================================================
133 * ==========================================================================
136 metaslab_class_create(spa_t
*spa
, space_map_ops_t
*ops
)
138 metaslab_class_t
*mc
;
140 mc
= kmem_zalloc(sizeof (metaslab_class_t
), KM_PUSHPAGE
);
145 mutex_init(&mc
->mc_fastwrite_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
151 metaslab_class_destroy(metaslab_class_t
*mc
)
153 ASSERT(mc
->mc_rotor
== NULL
);
154 ASSERT(mc
->mc_alloc
== 0);
155 ASSERT(mc
->mc_deferred
== 0);
156 ASSERT(mc
->mc_space
== 0);
157 ASSERT(mc
->mc_dspace
== 0);
159 mutex_destroy(&mc
->mc_fastwrite_lock
);
160 kmem_free(mc
, sizeof (metaslab_class_t
));
164 metaslab_class_validate(metaslab_class_t
*mc
)
166 metaslab_group_t
*mg
;
170 * Must hold one of the spa_config locks.
172 ASSERT(spa_config_held(mc
->mc_spa
, SCL_ALL
, RW_READER
) ||
173 spa_config_held(mc
->mc_spa
, SCL_ALL
, RW_WRITER
));
175 if ((mg
= mc
->mc_rotor
) == NULL
)
180 ASSERT(vd
->vdev_mg
!= NULL
);
181 ASSERT3P(vd
->vdev_top
, ==, vd
);
182 ASSERT3P(mg
->mg_class
, ==, mc
);
183 ASSERT3P(vd
->vdev_ops
, !=, &vdev_hole_ops
);
184 } while ((mg
= mg
->mg_next
) != mc
->mc_rotor
);
190 metaslab_class_space_update(metaslab_class_t
*mc
, int64_t alloc_delta
,
191 int64_t defer_delta
, int64_t space_delta
, int64_t dspace_delta
)
193 atomic_add_64(&mc
->mc_alloc
, alloc_delta
);
194 atomic_add_64(&mc
->mc_deferred
, defer_delta
);
195 atomic_add_64(&mc
->mc_space
, space_delta
);
196 atomic_add_64(&mc
->mc_dspace
, dspace_delta
);
200 metaslab_class_get_alloc(metaslab_class_t
*mc
)
202 return (mc
->mc_alloc
);
206 metaslab_class_get_deferred(metaslab_class_t
*mc
)
208 return (mc
->mc_deferred
);
212 metaslab_class_get_space(metaslab_class_t
*mc
)
214 return (mc
->mc_space
);
218 metaslab_class_get_dspace(metaslab_class_t
*mc
)
220 return (spa_deflate(mc
->mc_spa
) ? mc
->mc_dspace
: mc
->mc_space
);
224 * ==========================================================================
226 * ==========================================================================
229 metaslab_compare(const void *x1
, const void *x2
)
231 const metaslab_t
*m1
= x1
;
232 const metaslab_t
*m2
= x2
;
234 if (m1
->ms_weight
< m2
->ms_weight
)
236 if (m1
->ms_weight
> m2
->ms_weight
)
240 * If the weights are identical, use the offset to force uniqueness.
242 if (m1
->ms_map
->sm_start
< m2
->ms_map
->sm_start
)
244 if (m1
->ms_map
->sm_start
> m2
->ms_map
->sm_start
)
247 ASSERT3P(m1
, ==, m2
);
253 * Update the allocatable flag and the metaslab group's capacity.
254 * The allocatable flag is set to true if the capacity is below
255 * the zfs_mg_noalloc_threshold. If a metaslab group transitions
256 * from allocatable to non-allocatable or vice versa then the metaslab
257 * group's class is updated to reflect the transition.
260 metaslab_group_alloc_update(metaslab_group_t
*mg
)
262 vdev_t
*vd
= mg
->mg_vd
;
263 metaslab_class_t
*mc
= mg
->mg_class
;
264 vdev_stat_t
*vs
= &vd
->vdev_stat
;
265 boolean_t was_allocatable
;
267 ASSERT(vd
== vd
->vdev_top
);
269 mutex_enter(&mg
->mg_lock
);
270 was_allocatable
= mg
->mg_allocatable
;
272 mg
->mg_free_capacity
= ((vs
->vs_space
- vs
->vs_alloc
) * 100) /
275 mg
->mg_allocatable
= (mg
->mg_free_capacity
> zfs_mg_noalloc_threshold
);
278 * The mc_alloc_groups maintains a count of the number of
279 * groups in this metaslab class that are still above the
280 * zfs_mg_noalloc_threshold. This is used by the allocating
281 * threads to determine if they should avoid allocations to
282 * a given group. The allocator will avoid allocations to a group
283 * if that group has reached or is below the zfs_mg_noalloc_threshold
284 * and there are still other groups that are above the threshold.
285 * When a group transitions from allocatable to non-allocatable or
286 * vice versa we update the metaslab class to reflect that change.
287 * When the mc_alloc_groups value drops to 0 that means that all
288 * groups have reached the zfs_mg_noalloc_threshold making all groups
289 * eligible for allocations. This effectively means that all devices
290 * are balanced again.
292 if (was_allocatable
&& !mg
->mg_allocatable
)
293 mc
->mc_alloc_groups
--;
294 else if (!was_allocatable
&& mg
->mg_allocatable
)
295 mc
->mc_alloc_groups
++;
296 mutex_exit(&mg
->mg_lock
);
300 metaslab_group_create(metaslab_class_t
*mc
, vdev_t
*vd
)
302 metaslab_group_t
*mg
;
304 mg
= kmem_zalloc(sizeof (metaslab_group_t
), KM_PUSHPAGE
);
305 mutex_init(&mg
->mg_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
306 avl_create(&mg
->mg_metaslab_tree
, metaslab_compare
,
307 sizeof (metaslab_t
), offsetof(struct metaslab
, ms_group_node
));
310 mg
->mg_activation_count
= 0;
316 metaslab_group_destroy(metaslab_group_t
*mg
)
318 ASSERT(mg
->mg_prev
== NULL
);
319 ASSERT(mg
->mg_next
== NULL
);
321 * We may have gone below zero with the activation count
322 * either because we never activated in the first place or
323 * because we're done, and possibly removing the vdev.
325 ASSERT(mg
->mg_activation_count
<= 0);
327 avl_destroy(&mg
->mg_metaslab_tree
);
328 mutex_destroy(&mg
->mg_lock
);
329 kmem_free(mg
, sizeof (metaslab_group_t
));
333 metaslab_group_activate(metaslab_group_t
*mg
)
335 metaslab_class_t
*mc
= mg
->mg_class
;
336 metaslab_group_t
*mgprev
, *mgnext
;
338 ASSERT(spa_config_held(mc
->mc_spa
, SCL_ALLOC
, RW_WRITER
));
340 ASSERT(mc
->mc_rotor
!= mg
);
341 ASSERT(mg
->mg_prev
== NULL
);
342 ASSERT(mg
->mg_next
== NULL
);
343 ASSERT(mg
->mg_activation_count
<= 0);
345 if (++mg
->mg_activation_count
<= 0)
348 mg
->mg_aliquot
= metaslab_aliquot
* MAX(1, mg
->mg_vd
->vdev_children
);
349 metaslab_group_alloc_update(mg
);
351 if ((mgprev
= mc
->mc_rotor
) == NULL
) {
355 mgnext
= mgprev
->mg_next
;
356 mg
->mg_prev
= mgprev
;
357 mg
->mg_next
= mgnext
;
358 mgprev
->mg_next
= mg
;
359 mgnext
->mg_prev
= mg
;
365 metaslab_group_passivate(metaslab_group_t
*mg
)
367 metaslab_class_t
*mc
= mg
->mg_class
;
368 metaslab_group_t
*mgprev
, *mgnext
;
370 ASSERT(spa_config_held(mc
->mc_spa
, SCL_ALLOC
, RW_WRITER
));
372 if (--mg
->mg_activation_count
!= 0) {
373 ASSERT(mc
->mc_rotor
!= mg
);
374 ASSERT(mg
->mg_prev
== NULL
);
375 ASSERT(mg
->mg_next
== NULL
);
376 ASSERT(mg
->mg_activation_count
< 0);
380 mgprev
= mg
->mg_prev
;
381 mgnext
= mg
->mg_next
;
386 mc
->mc_rotor
= mgnext
;
387 mgprev
->mg_next
= mgnext
;
388 mgnext
->mg_prev
= mgprev
;
396 metaslab_group_add(metaslab_group_t
*mg
, metaslab_t
*msp
)
398 mutex_enter(&mg
->mg_lock
);
399 ASSERT(msp
->ms_group
== NULL
);
402 avl_add(&mg
->mg_metaslab_tree
, msp
);
403 mutex_exit(&mg
->mg_lock
);
407 metaslab_group_remove(metaslab_group_t
*mg
, metaslab_t
*msp
)
409 mutex_enter(&mg
->mg_lock
);
410 ASSERT(msp
->ms_group
== mg
);
411 avl_remove(&mg
->mg_metaslab_tree
, msp
);
412 msp
->ms_group
= NULL
;
413 mutex_exit(&mg
->mg_lock
);
417 metaslab_group_sort(metaslab_group_t
*mg
, metaslab_t
*msp
, uint64_t weight
)
420 * Although in principle the weight can be any value, in
421 * practice we do not use values in the range [1, 510].
423 ASSERT(weight
>= SPA_MINBLOCKSIZE
-1 || weight
== 0);
424 ASSERT(MUTEX_HELD(&msp
->ms_lock
));
426 mutex_enter(&mg
->mg_lock
);
427 ASSERT(msp
->ms_group
== mg
);
428 avl_remove(&mg
->mg_metaslab_tree
, msp
);
429 msp
->ms_weight
= weight
;
430 avl_add(&mg
->mg_metaslab_tree
, msp
);
431 mutex_exit(&mg
->mg_lock
);
435 * Determine if a given metaslab group should skip allocations. A metaslab
436 * group should avoid allocations if its used capacity has crossed the
437 * zfs_mg_noalloc_threshold and there is at least one metaslab group
438 * that can still handle allocations.
441 metaslab_group_allocatable(metaslab_group_t
*mg
)
443 vdev_t
*vd
= mg
->mg_vd
;
444 spa_t
*spa
= vd
->vdev_spa
;
445 metaslab_class_t
*mc
= mg
->mg_class
;
448 * A metaslab group is considered allocatable if its free capacity
449 * is greater than the set value of zfs_mg_noalloc_threshold, it's
450 * associated with a slog, or there are no other metaslab groups
451 * with free capacity greater than zfs_mg_noalloc_threshold.
453 return (mg
->mg_free_capacity
> zfs_mg_noalloc_threshold
||
454 mc
!= spa_normal_class(spa
) || mc
->mc_alloc_groups
== 0);
458 * ==========================================================================
459 * Common allocator routines
460 * ==========================================================================
463 metaslab_segsize_compare(const void *x1
, const void *x2
)
465 const space_seg_t
*s1
= x1
;
466 const space_seg_t
*s2
= x2
;
467 uint64_t ss_size1
= s1
->ss_end
- s1
->ss_start
;
468 uint64_t ss_size2
= s2
->ss_end
- s2
->ss_start
;
470 if (ss_size1
< ss_size2
)
472 if (ss_size1
> ss_size2
)
475 if (s1
->ss_start
< s2
->ss_start
)
477 if (s1
->ss_start
> s2
->ss_start
)
483 #if defined(WITH_FF_BLOCK_ALLOCATOR) || \
484 defined(WITH_DF_BLOCK_ALLOCATOR) || \
485 defined(WITH_CDF_BLOCK_ALLOCATOR)
487 * This is a helper function that can be used by the allocator to find
488 * a suitable block to allocate. This will search the specified AVL
489 * tree looking for a block that matches the specified criteria.
492 metaslab_block_picker(avl_tree_t
*t
, uint64_t *cursor
, uint64_t size
,
495 space_seg_t
*ss
, ssearch
;
498 ssearch
.ss_start
= *cursor
;
499 ssearch
.ss_end
= *cursor
+ size
;
501 ss
= avl_find(t
, &ssearch
, &where
);
503 ss
= avl_nearest(t
, where
, AVL_AFTER
);
506 uint64_t offset
= P2ROUNDUP(ss
->ss_start
, align
);
508 if (offset
+ size
<= ss
->ss_end
) {
509 *cursor
= offset
+ size
;
512 ss
= AVL_NEXT(t
, ss
);
516 * If we know we've searched the whole map (*cursor == 0), give up.
517 * Otherwise, reset the cursor to the beginning and try again.
523 return (metaslab_block_picker(t
, cursor
, size
, align
));
525 #endif /* WITH_FF/DF/CDF_BLOCK_ALLOCATOR */
528 metaslab_pp_load(space_map_t
*sm
)
532 ASSERT(sm
->sm_ppd
== NULL
);
533 sm
->sm_ppd
= kmem_zalloc(64 * sizeof (uint64_t), KM_PUSHPAGE
);
535 sm
->sm_pp_root
= kmem_alloc(sizeof (avl_tree_t
), KM_PUSHPAGE
);
536 avl_create(sm
->sm_pp_root
, metaslab_segsize_compare
,
537 sizeof (space_seg_t
), offsetof(struct space_seg
, ss_pp_node
));
539 for (ss
= avl_first(&sm
->sm_root
); ss
; ss
= AVL_NEXT(&sm
->sm_root
, ss
))
540 avl_add(sm
->sm_pp_root
, ss
);
544 metaslab_pp_unload(space_map_t
*sm
)
548 kmem_free(sm
->sm_ppd
, 64 * sizeof (uint64_t));
551 while (avl_destroy_nodes(sm
->sm_pp_root
, &cookie
) != NULL
) {
552 /* tear down the tree */
555 avl_destroy(sm
->sm_pp_root
);
556 kmem_free(sm
->sm_pp_root
, sizeof (avl_tree_t
));
557 sm
->sm_pp_root
= NULL
;
562 metaslab_pp_claim(space_map_t
*sm
, uint64_t start
, uint64_t size
)
564 /* No need to update cursor */
569 metaslab_pp_free(space_map_t
*sm
, uint64_t start
, uint64_t size
)
571 /* No need to update cursor */
575 * Return the maximum contiguous segment within the metaslab.
578 metaslab_pp_maxsize(space_map_t
*sm
)
580 avl_tree_t
*t
= sm
->sm_pp_root
;
583 if (t
== NULL
|| (ss
= avl_last(t
)) == NULL
)
586 return (ss
->ss_end
- ss
->ss_start
);
589 #if defined(WITH_FF_BLOCK_ALLOCATOR)
591 * ==========================================================================
592 * The first-fit block allocator
593 * ==========================================================================
596 metaslab_ff_alloc(space_map_t
*sm
, uint64_t size
)
598 avl_tree_t
*t
= &sm
->sm_root
;
599 uint64_t align
= size
& -size
;
600 uint64_t *cursor
= (uint64_t *)sm
->sm_ppd
+ highbit(align
) - 1;
602 return (metaslab_block_picker(t
, cursor
, size
, align
));
607 metaslab_ff_fragmented(space_map_t
*sm
)
612 static space_map_ops_t metaslab_ff_ops
= {
619 metaslab_ff_fragmented
622 space_map_ops_t
*zfs_metaslab_ops
= &metaslab_ff_ops
;
623 #endif /* WITH_FF_BLOCK_ALLOCATOR */
625 #if defined(WITH_DF_BLOCK_ALLOCATOR)
627 * ==========================================================================
628 * Dynamic block allocator -
629 * Uses the first fit allocation scheme until space get low and then
630 * adjusts to a best fit allocation method. Uses metaslab_df_alloc_threshold
631 * and metaslab_df_free_pct to determine when to switch the allocation scheme.
632 * ==========================================================================
635 metaslab_df_alloc(space_map_t
*sm
, uint64_t size
)
637 avl_tree_t
*t
= &sm
->sm_root
;
638 uint64_t align
= size
& -size
;
639 uint64_t *cursor
= (uint64_t *)sm
->sm_ppd
+ highbit(align
) - 1;
640 uint64_t max_size
= metaslab_pp_maxsize(sm
);
641 int free_pct
= sm
->sm_space
* 100 / sm
->sm_size
;
643 ASSERT(MUTEX_HELD(sm
->sm_lock
));
644 ASSERT3U(avl_numnodes(&sm
->sm_root
), ==, avl_numnodes(sm
->sm_pp_root
));
650 * If we're running low on space switch to using the size
651 * sorted AVL tree (best-fit).
653 if (max_size
< metaslab_df_alloc_threshold
||
654 free_pct
< metaslab_df_free_pct
) {
659 return (metaslab_block_picker(t
, cursor
, size
, 1ULL));
663 metaslab_df_fragmented(space_map_t
*sm
)
665 uint64_t max_size
= metaslab_pp_maxsize(sm
);
666 int free_pct
= sm
->sm_space
* 100 / sm
->sm_size
;
668 if (max_size
>= metaslab_df_alloc_threshold
&&
669 free_pct
>= metaslab_df_free_pct
)
675 static space_map_ops_t metaslab_df_ops
= {
682 metaslab_df_fragmented
685 space_map_ops_t
*zfs_metaslab_ops
= &metaslab_df_ops
;
686 #endif /* WITH_DF_BLOCK_ALLOCATOR */
689 * ==========================================================================
690 * Other experimental allocators
691 * ==========================================================================
693 #if defined(WITH_CDF_BLOCK_ALLOCATOR)
695 metaslab_cdf_alloc(space_map_t
*sm
, uint64_t size
)
697 avl_tree_t
*t
= &sm
->sm_root
;
698 uint64_t *cursor
= (uint64_t *)sm
->sm_ppd
;
699 uint64_t *extent_end
= (uint64_t *)sm
->sm_ppd
+ 1;
700 uint64_t max_size
= metaslab_pp_maxsize(sm
);
701 uint64_t rsize
= size
;
704 ASSERT(MUTEX_HELD(sm
->sm_lock
));
705 ASSERT3U(avl_numnodes(&sm
->sm_root
), ==, avl_numnodes(sm
->sm_pp_root
));
710 ASSERT3U(*extent_end
, >=, *cursor
);
713 * If we're running low on space switch to using the size
714 * sorted AVL tree (best-fit).
716 if ((*cursor
+ size
) > *extent_end
) {
719 *cursor
= *extent_end
= 0;
721 if (max_size
> 2 * SPA_MAXBLOCKSIZE
)
722 rsize
= MIN(metaslab_min_alloc_size
, max_size
);
723 offset
= metaslab_block_picker(t
, extent_end
, rsize
, 1ULL);
725 *cursor
= offset
+ size
;
727 offset
= metaslab_block_picker(t
, cursor
, rsize
, 1ULL);
729 ASSERT3U(*cursor
, <=, *extent_end
);
734 metaslab_cdf_fragmented(space_map_t
*sm
)
736 uint64_t max_size
= metaslab_pp_maxsize(sm
);
738 if (max_size
> (metaslab_min_alloc_size
* 10))
743 static space_map_ops_t metaslab_cdf_ops
= {
750 metaslab_cdf_fragmented
753 space_map_ops_t
*zfs_metaslab_ops
= &metaslab_cdf_ops
;
754 #endif /* WITH_CDF_BLOCK_ALLOCATOR */
756 #if defined(WITH_NDF_BLOCK_ALLOCATOR)
757 uint64_t metaslab_ndf_clump_shift
= 4;
760 metaslab_ndf_alloc(space_map_t
*sm
, uint64_t size
)
762 avl_tree_t
*t
= &sm
->sm_root
;
764 space_seg_t
*ss
, ssearch
;
765 uint64_t hbit
= highbit(size
);
766 uint64_t *cursor
= (uint64_t *)sm
->sm_ppd
+ hbit
- 1;
767 uint64_t max_size
= metaslab_pp_maxsize(sm
);
769 ASSERT(MUTEX_HELD(sm
->sm_lock
));
770 ASSERT3U(avl_numnodes(&sm
->sm_root
), ==, avl_numnodes(sm
->sm_pp_root
));
775 ssearch
.ss_start
= *cursor
;
776 ssearch
.ss_end
= *cursor
+ size
;
778 ss
= avl_find(t
, &ssearch
, &where
);
779 if (ss
== NULL
|| (ss
->ss_start
+ size
> ss
->ss_end
)) {
782 ssearch
.ss_start
= 0;
783 ssearch
.ss_end
= MIN(max_size
,
784 1ULL << (hbit
+ metaslab_ndf_clump_shift
));
785 ss
= avl_find(t
, &ssearch
, &where
);
787 ss
= avl_nearest(t
, where
, AVL_AFTER
);
792 if (ss
->ss_start
+ size
<= ss
->ss_end
) {
793 *cursor
= ss
->ss_start
+ size
;
794 return (ss
->ss_start
);
801 metaslab_ndf_fragmented(space_map_t
*sm
)
803 uint64_t max_size
= metaslab_pp_maxsize(sm
);
805 if (max_size
> (metaslab_min_alloc_size
<< metaslab_ndf_clump_shift
))
811 static space_map_ops_t metaslab_ndf_ops
= {
818 metaslab_ndf_fragmented
821 space_map_ops_t
*zfs_metaslab_ops
= &metaslab_ndf_ops
;
822 #endif /* WITH_NDF_BLOCK_ALLOCATOR */
825 * ==========================================================================
827 * ==========================================================================
830 metaslab_init(metaslab_group_t
*mg
, space_map_obj_t
*smo
,
831 uint64_t start
, uint64_t size
, uint64_t txg
)
833 vdev_t
*vd
= mg
->mg_vd
;
836 msp
= kmem_zalloc(sizeof (metaslab_t
), KM_PUSHPAGE
);
837 mutex_init(&msp
->ms_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
839 msp
->ms_smo_syncing
= *smo
;
842 * We create the main space map here, but we don't create the
843 * allocmaps and freemaps until metaslab_sync_done(). This serves
844 * two purposes: it allows metaslab_sync_done() to detect the
845 * addition of new space; and for debugging, it ensures that we'd
846 * data fault on any attempt to use this metaslab before it's ready.
848 msp
->ms_map
= kmem_zalloc(sizeof (space_map_t
), KM_PUSHPAGE
);
849 space_map_create(msp
->ms_map
, start
, size
,
850 vd
->vdev_ashift
, &msp
->ms_lock
);
852 metaslab_group_add(mg
, msp
);
854 if (metaslab_debug_load
&& smo
->smo_object
!= 0) {
855 mutex_enter(&msp
->ms_lock
);
856 VERIFY(space_map_load(msp
->ms_map
, mg
->mg_class
->mc_ops
,
857 SM_FREE
, smo
, spa_meta_objset(vd
->vdev_spa
)) == 0);
858 mutex_exit(&msp
->ms_lock
);
862 * If we're opening an existing pool (txg == 0) or creating
863 * a new one (txg == TXG_INITIAL), all space is available now.
864 * If we're adding space to an existing pool, the new space
865 * does not become available until after this txg has synced.
867 if (txg
<= TXG_INITIAL
)
868 metaslab_sync_done(msp
, 0);
871 vdev_dirty(vd
, 0, NULL
, txg
);
872 vdev_dirty(vd
, VDD_METASLAB
, msp
, txg
);
879 metaslab_fini(metaslab_t
*msp
)
881 metaslab_group_t
*mg
= msp
->ms_group
;
884 vdev_space_update(mg
->mg_vd
,
885 -msp
->ms_smo
.smo_alloc
, 0, -msp
->ms_map
->sm_size
);
887 metaslab_group_remove(mg
, msp
);
889 mutex_enter(&msp
->ms_lock
);
891 space_map_unload(msp
->ms_map
);
892 space_map_destroy(msp
->ms_map
);
893 kmem_free(msp
->ms_map
, sizeof (*msp
->ms_map
));
895 for (t
= 0; t
< TXG_SIZE
; t
++) {
896 space_map_destroy(msp
->ms_allocmap
[t
]);
897 space_map_destroy(msp
->ms_freemap
[t
]);
898 kmem_free(msp
->ms_allocmap
[t
], sizeof (*msp
->ms_allocmap
[t
]));
899 kmem_free(msp
->ms_freemap
[t
], sizeof (*msp
->ms_freemap
[t
]));
902 for (t
= 0; t
< TXG_DEFER_SIZE
; t
++) {
903 space_map_destroy(msp
->ms_defermap
[t
]);
904 kmem_free(msp
->ms_defermap
[t
], sizeof (*msp
->ms_defermap
[t
]));
907 ASSERT0(msp
->ms_deferspace
);
909 mutex_exit(&msp
->ms_lock
);
910 mutex_destroy(&msp
->ms_lock
);
912 kmem_free(msp
, sizeof (metaslab_t
));
915 #define METASLAB_WEIGHT_PRIMARY (1ULL << 63)
916 #define METASLAB_WEIGHT_SECONDARY (1ULL << 62)
917 #define METASLAB_ACTIVE_MASK \
918 (METASLAB_WEIGHT_PRIMARY | METASLAB_WEIGHT_SECONDARY)
921 metaslab_weight(metaslab_t
*msp
)
923 metaslab_group_t
*mg
= msp
->ms_group
;
924 space_map_t
*sm
= msp
->ms_map
;
925 space_map_obj_t
*smo
= &msp
->ms_smo
;
926 vdev_t
*vd
= mg
->mg_vd
;
927 uint64_t weight
, space
;
929 ASSERT(MUTEX_HELD(&msp
->ms_lock
));
932 * This vdev is in the process of being removed so there is nothing
935 if (vd
->vdev_removing
) {
936 ASSERT0(smo
->smo_alloc
);
937 ASSERT0(vd
->vdev_ms_shift
);
942 * The baseline weight is the metaslab's free space.
944 space
= sm
->sm_size
- smo
->smo_alloc
;
948 * Modern disks have uniform bit density and constant angular velocity.
949 * Therefore, the outer recording zones are faster (higher bandwidth)
950 * than the inner zones by the ratio of outer to inner track diameter,
951 * which is typically around 2:1. We account for this by assigning
952 * higher weight to lower metaslabs (multiplier ranging from 2x to 1x).
953 * In effect, this means that we'll select the metaslab with the most
954 * free bandwidth rather than simply the one with the most free space.
956 weight
= 2 * weight
-
957 ((sm
->sm_start
>> vd
->vdev_ms_shift
) * weight
) / vd
->vdev_ms_count
;
958 ASSERT(weight
>= space
&& weight
<= 2 * space
);
961 * For locality, assign higher weight to metaslabs which have
962 * a lower offset than what we've already activated.
964 if (sm
->sm_start
<= mg
->mg_bonus_area
)
965 weight
*= (metaslab_smo_bonus_pct
/ 100);
966 ASSERT(weight
>= space
&&
967 weight
<= 2 * (metaslab_smo_bonus_pct
/ 100) * space
);
969 if (sm
->sm_loaded
&& !sm
->sm_ops
->smop_fragmented(sm
)) {
971 * If this metaslab is one we're actively using, adjust its
972 * weight to make it preferable to any inactive metaslab so
973 * we'll polish it off.
975 weight
|= (msp
->ms_weight
& METASLAB_ACTIVE_MASK
);
981 metaslab_prefetch(metaslab_group_t
*mg
)
983 spa_t
*spa
= mg
->mg_vd
->vdev_spa
;
985 avl_tree_t
*t
= &mg
->mg_metaslab_tree
;
988 mutex_enter(&mg
->mg_lock
);
991 * Prefetch the next potential metaslabs
993 for (msp
= avl_first(t
), m
= 0; msp
; msp
= AVL_NEXT(t
, msp
), m
++) {
994 space_map_t
*sm
= msp
->ms_map
;
995 space_map_obj_t
*smo
= &msp
->ms_smo
;
997 /* If we have reached our prefetch limit then we're done */
998 if (m
>= metaslab_prefetch_limit
)
1001 if (!sm
->sm_loaded
&& smo
->smo_object
!= 0) {
1002 mutex_exit(&mg
->mg_lock
);
1003 dmu_prefetch(spa_meta_objset(spa
), smo
->smo_object
,
1004 0ULL, smo
->smo_objsize
);
1005 mutex_enter(&mg
->mg_lock
);
1008 mutex_exit(&mg
->mg_lock
);
1012 metaslab_activate(metaslab_t
*msp
, uint64_t activation_weight
)
1014 metaslab_group_t
*mg
= msp
->ms_group
;
1015 space_map_t
*sm
= msp
->ms_map
;
1016 space_map_ops_t
*sm_ops
= msp
->ms_group
->mg_class
->mc_ops
;
1019 ASSERT(MUTEX_HELD(&msp
->ms_lock
));
1021 if ((msp
->ms_weight
& METASLAB_ACTIVE_MASK
) == 0) {
1022 space_map_load_wait(sm
);
1023 if (!sm
->sm_loaded
) {
1024 space_map_obj_t
*smo
= &msp
->ms_smo
;
1026 int error
= space_map_load(sm
, sm_ops
, SM_FREE
, smo
,
1027 spa_meta_objset(msp
->ms_group
->mg_vd
->vdev_spa
));
1029 metaslab_group_sort(msp
->ms_group
, msp
, 0);
1032 for (t
= 0; t
< TXG_DEFER_SIZE
; t
++)
1033 space_map_walk(msp
->ms_defermap
[t
],
1034 space_map_claim
, sm
);
1039 * Track the bonus area as we activate new metaslabs.
1041 if (sm
->sm_start
> mg
->mg_bonus_area
) {
1042 mutex_enter(&mg
->mg_lock
);
1043 mg
->mg_bonus_area
= sm
->sm_start
;
1044 mutex_exit(&mg
->mg_lock
);
1047 metaslab_group_sort(msp
->ms_group
, msp
,
1048 msp
->ms_weight
| activation_weight
);
1050 ASSERT(sm
->sm_loaded
);
1051 ASSERT(msp
->ms_weight
& METASLAB_ACTIVE_MASK
);
1057 metaslab_passivate(metaslab_t
*msp
, uint64_t size
)
1060 * If size < SPA_MINBLOCKSIZE, then we will not allocate from
1061 * this metaslab again. In that case, it had better be empty,
1062 * or we would be leaving space on the table.
1064 ASSERT(size
>= SPA_MINBLOCKSIZE
|| msp
->ms_map
->sm_space
== 0);
1065 metaslab_group_sort(msp
->ms_group
, msp
, MIN(msp
->ms_weight
, size
));
1066 ASSERT((msp
->ms_weight
& METASLAB_ACTIVE_MASK
) == 0);
1070 * Determine if the in-core space map representation can be condensed on-disk.
1071 * We would like to use the following criteria to make our decision:
1073 * 1. The size of the space map object should not dramatically increase as a
1074 * result of writing out our in-core free map.
1076 * 2. The minimal on-disk space map representation is zfs_condense_pct/100
1077 * times the size than the in-core representation (i.e. zfs_condense_pct = 110
1078 * and in-core = 1MB, minimal = 1.1.MB).
1080 * Checking the first condition is tricky since we don't want to walk
1081 * the entire AVL tree calculating the estimated on-disk size. Instead we
1082 * use the size-ordered AVL tree in the space map and calculate the
1083 * size required for the largest segment in our in-core free map. If the
1084 * size required to represent that segment on disk is larger than the space
1085 * map object then we avoid condensing this map.
1087 * To determine the second criterion we use a best-case estimate and assume
1088 * each segment can be represented on-disk as a single 64-bit entry. We refer
1089 * to this best-case estimate as the space map's minimal form.
1092 metaslab_should_condense(metaslab_t
*msp
)
1094 space_map_t
*sm
= msp
->ms_map
;
1095 space_map_obj_t
*smo
= &msp
->ms_smo_syncing
;
1097 uint64_t size
, entries
, segsz
;
1099 ASSERT(MUTEX_HELD(&msp
->ms_lock
));
1100 ASSERT(sm
->sm_loaded
);
1103 * Use the sm_pp_root AVL tree, which is ordered by size, to obtain
1104 * the largest segment in the in-core free map. If the tree is
1105 * empty then we should condense the map.
1107 ss
= avl_last(sm
->sm_pp_root
);
1112 * Calculate the number of 64-bit entries this segment would
1113 * require when written to disk. If this single segment would be
1114 * larger on-disk than the entire current on-disk structure, then
1115 * clearly condensing will increase the on-disk structure size.
1117 size
= (ss
->ss_end
- ss
->ss_start
) >> sm
->sm_shift
;
1118 entries
= size
/ (MIN(size
, SM_RUN_MAX
));
1119 segsz
= entries
* sizeof (uint64_t);
1121 return (segsz
<= smo
->smo_objsize
&&
1122 smo
->smo_objsize
>= (zfs_condense_pct
*
1123 sizeof (uint64_t) * avl_numnodes(&sm
->sm_root
)) / 100);
1127 * Condense the on-disk space map representation to its minimized form.
1128 * The minimized form consists of a small number of allocations followed by
1129 * the in-core free map.
1132 metaslab_condense(metaslab_t
*msp
, uint64_t txg
, dmu_tx_t
*tx
)
1134 spa_t
*spa
= msp
->ms_group
->mg_vd
->vdev_spa
;
1135 space_map_t
*freemap
= msp
->ms_freemap
[txg
& TXG_MASK
];
1136 space_map_t condense_map
;
1137 space_map_t
*sm
= msp
->ms_map
;
1138 objset_t
*mos
= spa_meta_objset(spa
);
1139 space_map_obj_t
*smo
= &msp
->ms_smo_syncing
;
1142 ASSERT(MUTEX_HELD(&msp
->ms_lock
));
1143 ASSERT3U(spa_sync_pass(spa
), ==, 1);
1144 ASSERT(sm
->sm_loaded
);
1146 spa_dbgmsg(spa
, "condensing: txg %llu, msp[%llu] %p, "
1147 "smo size %llu, segments %lu", txg
,
1148 (msp
->ms_map
->sm_start
/ msp
->ms_map
->sm_size
), msp
,
1149 smo
->smo_objsize
, avl_numnodes(&sm
->sm_root
));
1152 * Create an map that is a 100% allocated map. We remove segments
1153 * that have been freed in this txg, any deferred frees that exist,
1154 * and any allocation in the future. Removing segments should be
1155 * a relatively inexpensive operation since we expect these maps to
1156 * a small number of nodes.
1158 space_map_create(&condense_map
, sm
->sm_start
, sm
->sm_size
,
1159 sm
->sm_shift
, sm
->sm_lock
);
1160 space_map_add(&condense_map
, condense_map
.sm_start
,
1161 condense_map
.sm_size
);
1164 * Remove what's been freed in this txg from the condense_map.
1165 * Since we're in sync_pass 1, we know that all the frees from
1166 * this txg are in the freemap.
1168 space_map_walk(freemap
, space_map_remove
, &condense_map
);
1170 for (t
= 0; t
< TXG_DEFER_SIZE
; t
++)
1171 space_map_walk(msp
->ms_defermap
[t
],
1172 space_map_remove
, &condense_map
);
1174 for (t
= 1; t
< TXG_CONCURRENT_STATES
; t
++)
1175 space_map_walk(msp
->ms_allocmap
[(txg
+ t
) & TXG_MASK
],
1176 space_map_remove
, &condense_map
);
1179 * We're about to drop the metaslab's lock thus allowing
1180 * other consumers to change it's content. Set the
1181 * space_map's sm_condensing flag to ensure that
1182 * allocations on this metaslab do not occur while we're
1183 * in the middle of committing it to disk. This is only critical
1184 * for the ms_map as all other space_maps use per txg
1185 * views of their content.
1187 sm
->sm_condensing
= B_TRUE
;
1189 mutex_exit(&msp
->ms_lock
);
1190 space_map_truncate(smo
, mos
, tx
);
1191 mutex_enter(&msp
->ms_lock
);
1194 * While we would ideally like to create a space_map representation
1195 * that consists only of allocation records, doing so can be
1196 * prohibitively expensive because the in-core free map can be
1197 * large, and therefore computationally expensive to subtract
1198 * from the condense_map. Instead we sync out two maps, a cheap
1199 * allocation only map followed by the in-core free map. While not
1200 * optimal, this is typically close to optimal, and much cheaper to
1203 space_map_sync(&condense_map
, SM_ALLOC
, smo
, mos
, tx
);
1204 space_map_vacate(&condense_map
, NULL
, NULL
);
1205 space_map_destroy(&condense_map
);
1207 space_map_sync(sm
, SM_FREE
, smo
, mos
, tx
);
1208 sm
->sm_condensing
= B_FALSE
;
1210 spa_dbgmsg(spa
, "condensed: txg %llu, msp[%llu] %p, "
1211 "smo size %llu", txg
,
1212 (msp
->ms_map
->sm_start
/ msp
->ms_map
->sm_size
), msp
,
1217 * Write a metaslab to disk in the context of the specified transaction group.
1220 metaslab_sync(metaslab_t
*msp
, uint64_t txg
)
1222 vdev_t
*vd
= msp
->ms_group
->mg_vd
;
1223 spa_t
*spa
= vd
->vdev_spa
;
1224 objset_t
*mos
= spa_meta_objset(spa
);
1225 space_map_t
*allocmap
= msp
->ms_allocmap
[txg
& TXG_MASK
];
1226 space_map_t
**freemap
= &msp
->ms_freemap
[txg
& TXG_MASK
];
1227 space_map_t
**freed_map
= &msp
->ms_freemap
[TXG_CLEAN(txg
) & TXG_MASK
];
1228 space_map_t
*sm
= msp
->ms_map
;
1229 space_map_obj_t
*smo
= &msp
->ms_smo_syncing
;
1233 ASSERT(!vd
->vdev_ishole
);
1236 * This metaslab has just been added so there's no work to do now.
1238 if (*freemap
== NULL
) {
1239 ASSERT3P(allocmap
, ==, NULL
);
1243 ASSERT3P(allocmap
, !=, NULL
);
1244 ASSERT3P(*freemap
, !=, NULL
);
1245 ASSERT3P(*freed_map
, !=, NULL
);
1247 if (allocmap
->sm_space
== 0 && (*freemap
)->sm_space
== 0)
1251 * The only state that can actually be changing concurrently with
1252 * metaslab_sync() is the metaslab's ms_map. No other thread can
1253 * be modifying this txg's allocmap, freemap, freed_map, or smo.
1254 * Therefore, we only hold ms_lock to satify space_map ASSERTs.
1255 * We drop it whenever we call into the DMU, because the DMU
1256 * can call down to us (e.g. via zio_free()) at any time.
1259 tx
= dmu_tx_create_assigned(spa_get_dsl(spa
), txg
);
1261 if (smo
->smo_object
== 0) {
1262 ASSERT(smo
->smo_objsize
== 0);
1263 ASSERT(smo
->smo_alloc
== 0);
1264 smo
->smo_object
= dmu_object_alloc(mos
,
1265 DMU_OT_SPACE_MAP
, 1 << SPACE_MAP_BLOCKSHIFT
,
1266 DMU_OT_SPACE_MAP_HEADER
, sizeof (*smo
), tx
);
1267 ASSERT(smo
->smo_object
!= 0);
1268 dmu_write(mos
, vd
->vdev_ms_array
, sizeof (uint64_t) *
1269 (sm
->sm_start
>> vd
->vdev_ms_shift
),
1270 sizeof (uint64_t), &smo
->smo_object
, tx
);
1273 mutex_enter(&msp
->ms_lock
);
1275 if (sm
->sm_loaded
&& spa_sync_pass(spa
) == 1 &&
1276 metaslab_should_condense(msp
)) {
1277 metaslab_condense(msp
, txg
, tx
);
1279 space_map_sync(allocmap
, SM_ALLOC
, smo
, mos
, tx
);
1280 space_map_sync(*freemap
, SM_FREE
, smo
, mos
, tx
);
1283 space_map_vacate(allocmap
, NULL
, NULL
);
1286 * For sync pass 1, we avoid walking the entire space map and
1287 * instead will just swap the pointers for freemap and
1288 * freed_map. We can safely do this since the freed_map is
1289 * guaranteed to be empty on the initial pass.
1291 if (spa_sync_pass(spa
) == 1) {
1292 ASSERT0((*freed_map
)->sm_space
);
1293 ASSERT0(avl_numnodes(&(*freed_map
)->sm_root
));
1294 space_map_swap(freemap
, freed_map
);
1296 space_map_vacate(*freemap
, space_map_add
, *freed_map
);
1299 ASSERT0(msp
->ms_allocmap
[txg
& TXG_MASK
]->sm_space
);
1300 ASSERT0(msp
->ms_freemap
[txg
& TXG_MASK
]->sm_space
);
1302 mutex_exit(&msp
->ms_lock
);
1304 VERIFY0(dmu_bonus_hold(mos
, smo
->smo_object
, FTAG
, &db
));
1305 dmu_buf_will_dirty(db
, tx
);
1306 ASSERT3U(db
->db_size
, >=, sizeof (*smo
));
1307 bcopy(smo
, db
->db_data
, sizeof (*smo
));
1308 dmu_buf_rele(db
, FTAG
);
1314 * Called after a transaction group has completely synced to mark
1315 * all of the metaslab's free space as usable.
1318 metaslab_sync_done(metaslab_t
*msp
, uint64_t txg
)
1320 space_map_obj_t
*smo
= &msp
->ms_smo
;
1321 space_map_obj_t
*smosync
= &msp
->ms_smo_syncing
;
1322 space_map_t
*sm
= msp
->ms_map
;
1323 space_map_t
**freed_map
= &msp
->ms_freemap
[TXG_CLEAN(txg
) & TXG_MASK
];
1324 space_map_t
**defer_map
= &msp
->ms_defermap
[txg
% TXG_DEFER_SIZE
];
1325 metaslab_group_t
*mg
= msp
->ms_group
;
1326 vdev_t
*vd
= mg
->mg_vd
;
1327 int64_t alloc_delta
, defer_delta
;
1330 ASSERT(!vd
->vdev_ishole
);
1332 mutex_enter(&msp
->ms_lock
);
1335 * If this metaslab is just becoming available, initialize its
1336 * allocmaps, freemaps, and defermap and add its capacity to the vdev.
1338 if (*freed_map
== NULL
) {
1339 ASSERT(*defer_map
== NULL
);
1340 for (t
= 0; t
< TXG_SIZE
; t
++) {
1341 msp
->ms_allocmap
[t
] = kmem_zalloc(sizeof (space_map_t
),
1343 space_map_create(msp
->ms_allocmap
[t
], sm
->sm_start
,
1344 sm
->sm_size
, sm
->sm_shift
, sm
->sm_lock
);
1345 msp
->ms_freemap
[t
] = kmem_zalloc(sizeof (space_map_t
),
1347 space_map_create(msp
->ms_freemap
[t
], sm
->sm_start
,
1348 sm
->sm_size
, sm
->sm_shift
, sm
->sm_lock
);
1351 for (t
= 0; t
< TXG_DEFER_SIZE
; t
++) {
1352 msp
->ms_defermap
[t
] = kmem_zalloc(sizeof (space_map_t
),
1354 space_map_create(msp
->ms_defermap
[t
], sm
->sm_start
,
1355 sm
->sm_size
, sm
->sm_shift
, sm
->sm_lock
);
1358 freed_map
= &msp
->ms_freemap
[TXG_CLEAN(txg
) & TXG_MASK
];
1359 defer_map
= &msp
->ms_defermap
[txg
% TXG_DEFER_SIZE
];
1361 vdev_space_update(vd
, 0, 0, sm
->sm_size
);
1364 alloc_delta
= smosync
->smo_alloc
- smo
->smo_alloc
;
1365 defer_delta
= (*freed_map
)->sm_space
- (*defer_map
)->sm_space
;
1367 vdev_space_update(vd
, alloc_delta
+ defer_delta
, defer_delta
, 0);
1369 ASSERT(msp
->ms_allocmap
[txg
& TXG_MASK
]->sm_space
== 0);
1370 ASSERT(msp
->ms_freemap
[txg
& TXG_MASK
]->sm_space
== 0);
1373 * If there's a space_map_load() in progress, wait for it to complete
1374 * so that we have a consistent view of the in-core space map.
1376 space_map_load_wait(sm
);
1379 * Move the frees from the defer_map to this map (if it's loaded).
1380 * Swap the freed_map and the defer_map -- this is safe to do
1381 * because we've just emptied out the defer_map.
1383 space_map_vacate(*defer_map
, sm
->sm_loaded
? space_map_free
: NULL
, sm
);
1384 ASSERT0((*defer_map
)->sm_space
);
1385 ASSERT0(avl_numnodes(&(*defer_map
)->sm_root
));
1386 space_map_swap(freed_map
, defer_map
);
1390 msp
->ms_deferspace
+= defer_delta
;
1391 ASSERT3S(msp
->ms_deferspace
, >=, 0);
1392 ASSERT3S(msp
->ms_deferspace
, <=, sm
->sm_size
);
1393 if (msp
->ms_deferspace
!= 0) {
1395 * Keep syncing this metaslab until all deferred frees
1396 * are back in circulation.
1398 vdev_dirty(vd
, VDD_METASLAB
, msp
, txg
+ 1);
1402 * If the map is loaded but no longer active, evict it as soon as all
1403 * future allocations have synced. (If we unloaded it now and then
1404 * loaded a moment later, the map wouldn't reflect those allocations.)
1406 if (sm
->sm_loaded
&& (msp
->ms_weight
& METASLAB_ACTIVE_MASK
) == 0) {
1409 for (t
= 1; t
< TXG_CONCURRENT_STATES
; t
++)
1410 if (msp
->ms_allocmap
[(txg
+ t
) & TXG_MASK
]->sm_space
)
1413 if (evictable
&& !metaslab_debug_unload
)
1414 space_map_unload(sm
);
1417 metaslab_group_sort(mg
, msp
, metaslab_weight(msp
));
1419 mutex_exit(&msp
->ms_lock
);
1423 metaslab_sync_reassess(metaslab_group_t
*mg
)
1425 vdev_t
*vd
= mg
->mg_vd
;
1426 int64_t failures
= mg
->mg_alloc_failures
;
1429 metaslab_group_alloc_update(mg
);
1432 * Re-evaluate all metaslabs which have lower offsets than the
1435 for (m
= 0; m
< vd
->vdev_ms_count
; m
++) {
1436 metaslab_t
*msp
= vd
->vdev_ms
[m
];
1438 if (msp
->ms_map
->sm_start
> mg
->mg_bonus_area
)
1441 mutex_enter(&msp
->ms_lock
);
1442 metaslab_group_sort(mg
, msp
, metaslab_weight(msp
));
1443 mutex_exit(&msp
->ms_lock
);
1446 atomic_add_64(&mg
->mg_alloc_failures
, -failures
);
1449 * Prefetch the next potential metaslabs
1451 metaslab_prefetch(mg
);
1455 metaslab_distance(metaslab_t
*msp
, dva_t
*dva
)
1457 uint64_t ms_shift
= msp
->ms_group
->mg_vd
->vdev_ms_shift
;
1458 uint64_t offset
= DVA_GET_OFFSET(dva
) >> ms_shift
;
1459 uint64_t start
= msp
->ms_map
->sm_start
>> ms_shift
;
1461 if (msp
->ms_group
->mg_vd
->vdev_id
!= DVA_GET_VDEV(dva
))
1462 return (1ULL << 63);
1465 return ((start
- offset
) << ms_shift
);
1467 return ((offset
- start
) << ms_shift
);
1472 metaslab_group_alloc(metaslab_group_t
*mg
, uint64_t psize
, uint64_t asize
,
1473 uint64_t txg
, uint64_t min_distance
, dva_t
*dva
, int d
, int flags
)
1475 spa_t
*spa
= mg
->mg_vd
->vdev_spa
;
1476 metaslab_t
*msp
= NULL
;
1477 uint64_t offset
= -1ULL;
1478 avl_tree_t
*t
= &mg
->mg_metaslab_tree
;
1479 uint64_t activation_weight
;
1480 uint64_t target_distance
;
1483 activation_weight
= METASLAB_WEIGHT_PRIMARY
;
1484 for (i
= 0; i
< d
; i
++) {
1485 if (DVA_GET_VDEV(&dva
[i
]) == mg
->mg_vd
->vdev_id
) {
1486 activation_weight
= METASLAB_WEIGHT_SECONDARY
;
1492 boolean_t was_active
;
1494 mutex_enter(&mg
->mg_lock
);
1495 for (msp
= avl_first(t
); msp
; msp
= AVL_NEXT(t
, msp
)) {
1496 if (msp
->ms_weight
< asize
) {
1497 spa_dbgmsg(spa
, "%s: failed to meet weight "
1498 "requirement: vdev %llu, txg %llu, mg %p, "
1499 "msp %p, psize %llu, asize %llu, "
1500 "failures %llu, weight %llu",
1501 spa_name(spa
), mg
->mg_vd
->vdev_id
, txg
,
1502 mg
, msp
, psize
, asize
,
1503 mg
->mg_alloc_failures
, msp
->ms_weight
);
1504 mutex_exit(&mg
->mg_lock
);
1509 * If the selected metaslab is condensing, skip it.
1511 if (msp
->ms_map
->sm_condensing
)
1514 was_active
= msp
->ms_weight
& METASLAB_ACTIVE_MASK
;
1515 if (activation_weight
== METASLAB_WEIGHT_PRIMARY
)
1518 target_distance
= min_distance
+
1519 (msp
->ms_smo
.smo_alloc
? 0 : min_distance
>> 1);
1521 for (i
= 0; i
< d
; i
++)
1522 if (metaslab_distance(msp
, &dva
[i
]) <
1528 mutex_exit(&mg
->mg_lock
);
1532 mutex_enter(&msp
->ms_lock
);
1535 * If we've already reached the allowable number of failed
1536 * allocation attempts on this metaslab group then we
1537 * consider skipping it. We skip it only if we're allowed
1538 * to "fast" gang, the physical size is larger than
1539 * a gang block, and we're attempting to allocate from
1540 * the primary metaslab.
1542 if (mg
->mg_alloc_failures
> zfs_mg_alloc_failures
&&
1543 CAN_FASTGANG(flags
) && psize
> SPA_GANGBLOCKSIZE
&&
1544 activation_weight
== METASLAB_WEIGHT_PRIMARY
) {
1545 spa_dbgmsg(spa
, "%s: skipping metaslab group: "
1546 "vdev %llu, txg %llu, mg %p, psize %llu, "
1547 "asize %llu, failures %llu", spa_name(spa
),
1548 mg
->mg_vd
->vdev_id
, txg
, mg
, psize
, asize
,
1549 mg
->mg_alloc_failures
);
1550 mutex_exit(&msp
->ms_lock
);
1555 * Ensure that the metaslab we have selected is still
1556 * capable of handling our request. It's possible that
1557 * another thread may have changed the weight while we
1558 * were blocked on the metaslab lock.
1560 if (msp
->ms_weight
< asize
|| (was_active
&&
1561 !(msp
->ms_weight
& METASLAB_ACTIVE_MASK
) &&
1562 activation_weight
== METASLAB_WEIGHT_PRIMARY
)) {
1563 mutex_exit(&msp
->ms_lock
);
1567 if ((msp
->ms_weight
& METASLAB_WEIGHT_SECONDARY
) &&
1568 activation_weight
== METASLAB_WEIGHT_PRIMARY
) {
1569 metaslab_passivate(msp
,
1570 msp
->ms_weight
& ~METASLAB_ACTIVE_MASK
);
1571 mutex_exit(&msp
->ms_lock
);
1575 if (metaslab_activate(msp
, activation_weight
) != 0) {
1576 mutex_exit(&msp
->ms_lock
);
1581 * If this metaslab is currently condensing then pick again as
1582 * we can't manipulate this metaslab until it's committed
1585 if (msp
->ms_map
->sm_condensing
) {
1586 mutex_exit(&msp
->ms_lock
);
1590 if ((offset
= space_map_alloc(msp
->ms_map
, asize
)) != -1ULL)
1593 atomic_inc_64(&mg
->mg_alloc_failures
);
1595 metaslab_passivate(msp
, space_map_maxsize(msp
->ms_map
));
1597 mutex_exit(&msp
->ms_lock
);
1600 if (msp
->ms_allocmap
[txg
& TXG_MASK
]->sm_space
== 0)
1601 vdev_dirty(mg
->mg_vd
, VDD_METASLAB
, msp
, txg
);
1603 space_map_add(msp
->ms_allocmap
[txg
& TXG_MASK
], offset
, asize
);
1605 mutex_exit(&msp
->ms_lock
);
1611 * Allocate a block for the specified i/o.
1614 metaslab_alloc_dva(spa_t
*spa
, metaslab_class_t
*mc
, uint64_t psize
,
1615 dva_t
*dva
, int d
, dva_t
*hintdva
, uint64_t txg
, int flags
)
1617 metaslab_group_t
*mg
, *fast_mg
, *rotor
;
1621 int zio_lock
= B_FALSE
;
1622 boolean_t allocatable
;
1623 uint64_t offset
= -1ULL;
1627 ASSERT(!DVA_IS_VALID(&dva
[d
]));
1630 * For testing, make some blocks above a certain size be gang blocks.
1632 if (psize
>= metaslab_gang_bang
&& (ddi_get_lbolt() & 3) == 0)
1633 return (SET_ERROR(ENOSPC
));
1635 if (flags
& METASLAB_FASTWRITE
)
1636 mutex_enter(&mc
->mc_fastwrite_lock
);
1639 * Start at the rotor and loop through all mgs until we find something.
1640 * Note that there's no locking on mc_rotor or mc_aliquot because
1641 * nothing actually breaks if we miss a few updates -- we just won't
1642 * allocate quite as evenly. It all balances out over time.
1644 * If we are doing ditto or log blocks, try to spread them across
1645 * consecutive vdevs. If we're forced to reuse a vdev before we've
1646 * allocated all of our ditto blocks, then try and spread them out on
1647 * that vdev as much as possible. If it turns out to not be possible,
1648 * gradually lower our standards until anything becomes acceptable.
1649 * Also, allocating on consecutive vdevs (as opposed to random vdevs)
1650 * gives us hope of containing our fault domains to something we're
1651 * able to reason about. Otherwise, any two top-level vdev failures
1652 * will guarantee the loss of data. With consecutive allocation,
1653 * only two adjacent top-level vdev failures will result in data loss.
1655 * If we are doing gang blocks (hintdva is non-NULL), try to keep
1656 * ourselves on the same vdev as our gang block header. That
1657 * way, we can hope for locality in vdev_cache, plus it makes our
1658 * fault domains something tractable.
1661 vd
= vdev_lookup_top(spa
, DVA_GET_VDEV(&hintdva
[d
]));
1664 * It's possible the vdev we're using as the hint no
1665 * longer exists (i.e. removed). Consult the rotor when
1671 if (flags
& METASLAB_HINTBP_AVOID
&&
1672 mg
->mg_next
!= NULL
)
1677 } else if (d
!= 0) {
1678 vd
= vdev_lookup_top(spa
, DVA_GET_VDEV(&dva
[d
- 1]));
1679 mg
= vd
->vdev_mg
->mg_next
;
1680 } else if (flags
& METASLAB_FASTWRITE
) {
1681 mg
= fast_mg
= mc
->mc_rotor
;
1684 if (fast_mg
->mg_vd
->vdev_pending_fastwrite
<
1685 mg
->mg_vd
->vdev_pending_fastwrite
)
1687 } while ((fast_mg
= fast_mg
->mg_next
) != mc
->mc_rotor
);
1694 * If the hint put us into the wrong metaslab class, or into a
1695 * metaslab group that has been passivated, just follow the rotor.
1697 if (mg
->mg_class
!= mc
|| mg
->mg_activation_count
<= 0)
1704 ASSERT(mg
->mg_activation_count
== 1);
1709 * Don't allocate from faulted devices.
1712 spa_config_enter(spa
, SCL_ZIO
, FTAG
, RW_READER
);
1713 allocatable
= vdev_allocatable(vd
);
1714 spa_config_exit(spa
, SCL_ZIO
, FTAG
);
1716 allocatable
= vdev_allocatable(vd
);
1720 * Determine if the selected metaslab group is eligible
1721 * for allocations. If we're ganging or have requested
1722 * an allocation for the smallest gang block size
1723 * then we don't want to avoid allocating to the this
1724 * metaslab group. If we're in this condition we should
1725 * try to allocate from any device possible so that we
1726 * don't inadvertently return ENOSPC and suspend the pool
1727 * even though space is still available.
1729 if (allocatable
&& CAN_FASTGANG(flags
) &&
1730 psize
> SPA_GANGBLOCKSIZE
)
1731 allocatable
= metaslab_group_allocatable(mg
);
1737 * Avoid writing single-copy data to a failing vdev
1738 * unless the user instructs us that it is okay.
1740 if ((vd
->vdev_stat
.vs_write_errors
> 0 ||
1741 vd
->vdev_state
< VDEV_STATE_HEALTHY
) &&
1742 d
== 0 && dshift
== 3 &&
1743 !(zfs_write_to_degraded
&& vd
->vdev_state
==
1744 VDEV_STATE_DEGRADED
)) {
1749 ASSERT(mg
->mg_class
== mc
);
1751 distance
= vd
->vdev_asize
>> dshift
;
1752 if (distance
<= (1ULL << vd
->vdev_ms_shift
))
1757 asize
= vdev_psize_to_asize(vd
, psize
);
1758 ASSERT(P2PHASE(asize
, 1ULL << vd
->vdev_ashift
) == 0);
1760 offset
= metaslab_group_alloc(mg
, psize
, asize
, txg
, distance
,
1762 if (offset
!= -1ULL) {
1764 * If we've just selected this metaslab group,
1765 * figure out whether the corresponding vdev is
1766 * over- or under-used relative to the pool,
1767 * and set an allocation bias to even it out.
1769 if (mc
->mc_aliquot
== 0) {
1770 vdev_stat_t
*vs
= &vd
->vdev_stat
;
1773 vu
= (vs
->vs_alloc
* 100) / (vs
->vs_space
+ 1);
1774 cu
= (mc
->mc_alloc
* 100) / (mc
->mc_space
+ 1);
1777 * Calculate how much more or less we should
1778 * try to allocate from this device during
1779 * this iteration around the rotor.
1780 * For example, if a device is 80% full
1781 * and the pool is 20% full then we should
1782 * reduce allocations by 60% on this device.
1784 * mg_bias = (20 - 80) * 512K / 100 = -307K
1786 * This reduces allocations by 307K for this
1789 mg
->mg_bias
= ((cu
- vu
) *
1790 (int64_t)mg
->mg_aliquot
) / 100;
1793 if ((flags
& METASLAB_FASTWRITE
) ||
1794 atomic_add_64_nv(&mc
->mc_aliquot
, asize
) >=
1795 mg
->mg_aliquot
+ mg
->mg_bias
) {
1796 mc
->mc_rotor
= mg
->mg_next
;
1800 DVA_SET_VDEV(&dva
[d
], vd
->vdev_id
);
1801 DVA_SET_OFFSET(&dva
[d
], offset
);
1802 DVA_SET_GANG(&dva
[d
], !!(flags
& METASLAB_GANG_HEADER
));
1803 DVA_SET_ASIZE(&dva
[d
], asize
);
1805 if (flags
& METASLAB_FASTWRITE
) {
1806 atomic_add_64(&vd
->vdev_pending_fastwrite
,
1808 mutex_exit(&mc
->mc_fastwrite_lock
);
1814 mc
->mc_rotor
= mg
->mg_next
;
1816 } while ((mg
= mg
->mg_next
) != rotor
);
1820 ASSERT(dshift
< 64);
1824 if (!allocatable
&& !zio_lock
) {
1830 bzero(&dva
[d
], sizeof (dva_t
));
1832 if (flags
& METASLAB_FASTWRITE
)
1833 mutex_exit(&mc
->mc_fastwrite_lock
);
1835 return (SET_ERROR(ENOSPC
));
1839 * Free the block represented by DVA in the context of the specified
1840 * transaction group.
1843 metaslab_free_dva(spa_t
*spa
, const dva_t
*dva
, uint64_t txg
, boolean_t now
)
1845 uint64_t vdev
= DVA_GET_VDEV(dva
);
1846 uint64_t offset
= DVA_GET_OFFSET(dva
);
1847 uint64_t size
= DVA_GET_ASIZE(dva
);
1851 ASSERT(DVA_IS_VALID(dva
));
1853 if (txg
> spa_freeze_txg(spa
))
1856 if ((vd
= vdev_lookup_top(spa
, vdev
)) == NULL
||
1857 (offset
>> vd
->vdev_ms_shift
) >= vd
->vdev_ms_count
) {
1858 cmn_err(CE_WARN
, "metaslab_free_dva(): bad DVA %llu:%llu",
1859 (u_longlong_t
)vdev
, (u_longlong_t
)offset
);
1864 msp
= vd
->vdev_ms
[offset
>> vd
->vdev_ms_shift
];
1866 if (DVA_GET_GANG(dva
))
1867 size
= vdev_psize_to_asize(vd
, SPA_GANGBLOCKSIZE
);
1869 mutex_enter(&msp
->ms_lock
);
1872 space_map_remove(msp
->ms_allocmap
[txg
& TXG_MASK
],
1874 space_map_free(msp
->ms_map
, offset
, size
);
1876 if (msp
->ms_freemap
[txg
& TXG_MASK
]->sm_space
== 0)
1877 vdev_dirty(vd
, VDD_METASLAB
, msp
, txg
);
1878 space_map_add(msp
->ms_freemap
[txg
& TXG_MASK
], offset
, size
);
1881 mutex_exit(&msp
->ms_lock
);
1885 * Intent log support: upon opening the pool after a crash, notify the SPA
1886 * of blocks that the intent log has allocated for immediate write, but
1887 * which are still considered free by the SPA because the last transaction
1888 * group didn't commit yet.
1891 metaslab_claim_dva(spa_t
*spa
, const dva_t
*dva
, uint64_t txg
)
1893 uint64_t vdev
= DVA_GET_VDEV(dva
);
1894 uint64_t offset
= DVA_GET_OFFSET(dva
);
1895 uint64_t size
= DVA_GET_ASIZE(dva
);
1900 ASSERT(DVA_IS_VALID(dva
));
1902 if ((vd
= vdev_lookup_top(spa
, vdev
)) == NULL
||
1903 (offset
>> vd
->vdev_ms_shift
) >= vd
->vdev_ms_count
)
1904 return (SET_ERROR(ENXIO
));
1906 msp
= vd
->vdev_ms
[offset
>> vd
->vdev_ms_shift
];
1908 if (DVA_GET_GANG(dva
))
1909 size
= vdev_psize_to_asize(vd
, SPA_GANGBLOCKSIZE
);
1911 mutex_enter(&msp
->ms_lock
);
1913 if ((txg
!= 0 && spa_writeable(spa
)) || !msp
->ms_map
->sm_loaded
)
1914 error
= metaslab_activate(msp
, METASLAB_WEIGHT_SECONDARY
);
1916 if (error
== 0 && !space_map_contains(msp
->ms_map
, offset
, size
))
1917 error
= SET_ERROR(ENOENT
);
1919 if (error
|| txg
== 0) { /* txg == 0 indicates dry run */
1920 mutex_exit(&msp
->ms_lock
);
1924 space_map_claim(msp
->ms_map
, offset
, size
);
1926 if (spa_writeable(spa
)) { /* don't dirty if we're zdb(1M) */
1927 if (msp
->ms_allocmap
[txg
& TXG_MASK
]->sm_space
== 0)
1928 vdev_dirty(vd
, VDD_METASLAB
, msp
, txg
);
1929 space_map_add(msp
->ms_allocmap
[txg
& TXG_MASK
], offset
, size
);
1932 mutex_exit(&msp
->ms_lock
);
1938 metaslab_alloc(spa_t
*spa
, metaslab_class_t
*mc
, uint64_t psize
, blkptr_t
*bp
,
1939 int ndvas
, uint64_t txg
, blkptr_t
*hintbp
, int flags
)
1941 dva_t
*dva
= bp
->blk_dva
;
1942 dva_t
*hintdva
= hintbp
->blk_dva
;
1945 ASSERT(bp
->blk_birth
== 0);
1946 ASSERT(BP_PHYSICAL_BIRTH(bp
) == 0);
1948 spa_config_enter(spa
, SCL_ALLOC
, FTAG
, RW_READER
);
1950 if (mc
->mc_rotor
== NULL
) { /* no vdevs in this class */
1951 spa_config_exit(spa
, SCL_ALLOC
, FTAG
);
1952 return (SET_ERROR(ENOSPC
));
1955 ASSERT(ndvas
> 0 && ndvas
<= spa_max_replication(spa
));
1956 ASSERT(BP_GET_NDVAS(bp
) == 0);
1957 ASSERT(hintbp
== NULL
|| ndvas
<= BP_GET_NDVAS(hintbp
));
1959 for (d
= 0; d
< ndvas
; d
++) {
1960 error
= metaslab_alloc_dva(spa
, mc
, psize
, dva
, d
, hintdva
,
1963 for (d
--; d
>= 0; d
--) {
1964 metaslab_free_dva(spa
, &dva
[d
], txg
, B_TRUE
);
1965 bzero(&dva
[d
], sizeof (dva_t
));
1967 spa_config_exit(spa
, SCL_ALLOC
, FTAG
);
1972 ASSERT(BP_GET_NDVAS(bp
) == ndvas
);
1974 spa_config_exit(spa
, SCL_ALLOC
, FTAG
);
1976 BP_SET_BIRTH(bp
, txg
, txg
);
1982 metaslab_free(spa_t
*spa
, const blkptr_t
*bp
, uint64_t txg
, boolean_t now
)
1984 const dva_t
*dva
= bp
->blk_dva
;
1985 int d
, ndvas
= BP_GET_NDVAS(bp
);
1987 ASSERT(!BP_IS_HOLE(bp
));
1988 ASSERT(!now
|| bp
->blk_birth
>= spa_syncing_txg(spa
));
1990 spa_config_enter(spa
, SCL_FREE
, FTAG
, RW_READER
);
1992 for (d
= 0; d
< ndvas
; d
++)
1993 metaslab_free_dva(spa
, &dva
[d
], txg
, now
);
1995 spa_config_exit(spa
, SCL_FREE
, FTAG
);
1999 metaslab_claim(spa_t
*spa
, const blkptr_t
*bp
, uint64_t txg
)
2001 const dva_t
*dva
= bp
->blk_dva
;
2002 int ndvas
= BP_GET_NDVAS(bp
);
2005 ASSERT(!BP_IS_HOLE(bp
));
2009 * First do a dry run to make sure all DVAs are claimable,
2010 * so we don't have to unwind from partial failures below.
2012 if ((error
= metaslab_claim(spa
, bp
, 0)) != 0)
2016 spa_config_enter(spa
, SCL_ALLOC
, FTAG
, RW_READER
);
2018 for (d
= 0; d
< ndvas
; d
++)
2019 if ((error
= metaslab_claim_dva(spa
, &dva
[d
], txg
)) != 0)
2022 spa_config_exit(spa
, SCL_ALLOC
, FTAG
);
2024 ASSERT(error
== 0 || txg
== 0);
2030 metaslab_fastwrite_mark(spa_t
*spa
, const blkptr_t
*bp
)
2032 const dva_t
*dva
= bp
->blk_dva
;
2033 int ndvas
= BP_GET_NDVAS(bp
);
2034 uint64_t psize
= BP_GET_PSIZE(bp
);
2038 ASSERT(!BP_IS_HOLE(bp
));
2041 spa_config_enter(spa
, SCL_VDEV
, FTAG
, RW_READER
);
2043 for (d
= 0; d
< ndvas
; d
++) {
2044 if ((vd
= vdev_lookup_top(spa
, DVA_GET_VDEV(&dva
[d
]))) == NULL
)
2046 atomic_add_64(&vd
->vdev_pending_fastwrite
, psize
);
2049 spa_config_exit(spa
, SCL_VDEV
, FTAG
);
2053 metaslab_fastwrite_unmark(spa_t
*spa
, const blkptr_t
*bp
)
2055 const dva_t
*dva
= bp
->blk_dva
;
2056 int ndvas
= BP_GET_NDVAS(bp
);
2057 uint64_t psize
= BP_GET_PSIZE(bp
);
2061 ASSERT(!BP_IS_HOLE(bp
));
2064 spa_config_enter(spa
, SCL_VDEV
, FTAG
, RW_READER
);
2066 for (d
= 0; d
< ndvas
; d
++) {
2067 if ((vd
= vdev_lookup_top(spa
, DVA_GET_VDEV(&dva
[d
]))) == NULL
)
2069 ASSERT3U(vd
->vdev_pending_fastwrite
, >=, psize
);
2070 atomic_sub_64(&vd
->vdev_pending_fastwrite
, psize
);
2073 spa_config_exit(spa
, SCL_VDEV
, FTAG
);
2077 checkmap(space_map_t
*sm
, uint64_t off
, uint64_t size
)
2082 mutex_enter(sm
->sm_lock
);
2083 ss
= space_map_find(sm
, off
, size
, &where
);
2085 panic("freeing free block; ss=%p", (void *)ss
);
2086 mutex_exit(sm
->sm_lock
);
2090 metaslab_check_free(spa_t
*spa
, const blkptr_t
*bp
)
2094 if ((zfs_flags
& ZFS_DEBUG_ZIO_FREE
) == 0)
2097 spa_config_enter(spa
, SCL_VDEV
, FTAG
, RW_READER
);
2098 for (i
= 0; i
< BP_GET_NDVAS(bp
); i
++) {
2099 uint64_t vdid
= DVA_GET_VDEV(&bp
->blk_dva
[i
]);
2100 vdev_t
*vd
= vdev_lookup_top(spa
, vdid
);
2101 uint64_t off
= DVA_GET_OFFSET(&bp
->blk_dva
[i
]);
2102 uint64_t size
= DVA_GET_ASIZE(&bp
->blk_dva
[i
]);
2103 metaslab_t
*ms
= vd
->vdev_ms
[off
>> vd
->vdev_ms_shift
];
2105 if (ms
->ms_map
->sm_loaded
)
2106 checkmap(ms
->ms_map
, off
, size
);
2108 for (j
= 0; j
< TXG_SIZE
; j
++)
2109 checkmap(ms
->ms_freemap
[j
], off
, size
);
2110 for (j
= 0; j
< TXG_DEFER_SIZE
; j
++)
2111 checkmap(ms
->ms_defermap
[j
], off
, size
);
2113 spa_config_exit(spa
, SCL_VDEV
, FTAG
);
2116 #if defined(_KERNEL) && defined(HAVE_SPL)
2117 module_param(metaslab_debug_load
, int, 0644);
2118 MODULE_PARM_DESC(metaslab_debug_load
, "load all metaslabs during pool import");
2120 module_param(metaslab_debug_unload
, int, 0644);
2121 MODULE_PARM_DESC(metaslab_debug_unload
,
2122 "prevent metaslabs from being unloaded");
2124 module_param(zfs_mg_noalloc_threshold
, int, 0644);
2125 MODULE_PARM_DESC(zfs_mg_noalloc_threshold
,
2126 "percentage of free space for metaslab group to allow allocation");
2127 #endif /* _KERNEL && HAVE_SPL */