4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2013 by Delphix. All rights reserved.
24 * Copyright (c) 2013 by Saso Kiselkov. All rights reserved.
27 #include <sys/zfs_context.h>
29 #include <sys/dmu_tx.h>
30 #include <sys/space_map.h>
31 #include <sys/metaslab_impl.h>
32 #include <sys/vdev_impl.h>
34 #include <sys/spa_impl.h>
36 #define WITH_DF_BLOCK_ALLOCATOR
39 * Allow allocations to switch to gang blocks quickly. We do this to
40 * avoid having to load lots of space_maps in a given txg. There are,
41 * however, some cases where we want to avoid "fast" ganging and instead
42 * we want to do an exhaustive search of all metaslabs on this device.
43 * Currently we don't allow any gang, zil, or dump device related allocations
46 #define CAN_FASTGANG(flags) \
47 (!((flags) & (METASLAB_GANG_CHILD | METASLAB_GANG_HEADER | \
48 METASLAB_GANG_AVOID)))
50 #define METASLAB_WEIGHT_PRIMARY (1ULL << 63)
51 #define METASLAB_WEIGHT_SECONDARY (1ULL << 62)
52 #define METASLAB_ACTIVE_MASK \
53 (METASLAB_WEIGHT_PRIMARY | METASLAB_WEIGHT_SECONDARY)
55 uint64_t metaslab_aliquot
= 512ULL << 10;
56 uint64_t metaslab_gang_bang
= SPA_MAXBLOCKSIZE
+ 1; /* force gang blocks */
59 * The in-core space map representation is more compact than its on-disk form.
60 * The zfs_condense_pct determines how much more compact the in-core
61 * space_map representation must be before we compact it on-disk.
62 * Values should be greater than or equal to 100.
64 int zfs_condense_pct
= 200;
67 * This value defines the number of allowed allocation failures per vdev.
68 * If a device reaches this threshold in a given txg then we consider skipping
69 * allocations on that device. The value of zfs_mg_alloc_failures is computed
70 * in zio_init() unless it has been overridden in /etc/system.
72 int zfs_mg_alloc_failures
= 0;
75 * The zfs_mg_noalloc_threshold defines which metaslab groups should
76 * be eligible for allocation. The value is defined as a percentage of
77 * a free space. Metaslab groups that have more free space than
78 * zfs_mg_noalloc_threshold are always eligible for allocations. Once
79 * a metaslab group's free space is less than or equal to the
80 * zfs_mg_noalloc_threshold the allocator will avoid allocating to that
81 * group unless all groups in the pool have reached zfs_mg_noalloc_threshold.
82 * Once all groups in the pool reach zfs_mg_noalloc_threshold then all
83 * groups are allowed to accept allocations. Gang blocks are always
84 * eligible to allocate on any metaslab group. The default value of 0 means
85 * no metaslab group will be excluded based on this criterion.
87 int zfs_mg_noalloc_threshold
= 0;
90 * When set will load all metaslabs when pool is first opened.
92 int metaslab_debug_load
= 0;
95 * When set will prevent metaslabs from being unloaded.
97 int metaslab_debug_unload
= 0;
100 * Minimum size which forces the dynamic allocator to change
101 * it's allocation strategy. Once the space map cannot satisfy
102 * an allocation of this size then it switches to using more
103 * aggressive strategy (i.e search by size rather than offset).
105 uint64_t metaslab_df_alloc_threshold
= SPA_MAXBLOCKSIZE
;
108 * The minimum free space, in percent, which must be available
109 * in a space map to continue allocations in a first-fit fashion.
110 * Once the space_map's free space drops below this level we dynamically
111 * switch to using best-fit allocations.
113 int metaslab_df_free_pct
= 4;
116 * A metaslab is considered "free" if it contains a contiguous
117 * segment which is greater than metaslab_min_alloc_size.
119 uint64_t metaslab_min_alloc_size
= DMU_MAX_ACCESS
;
122 * Percentage of all cpus that can be used by the metaslab taskq.
124 int metaslab_load_pct
= 50;
127 * Determines how many txgs a metaslab may remain loaded without having any
128 * allocations from it. As long as a metaslab continues to be used we will
131 int metaslab_unload_delay
= TXG_SIZE
* 2;
134 * Should we be willing to write data to degraded vdevs?
136 boolean_t zfs_write_to_degraded
= B_FALSE
;
139 * Max number of metaslabs per group to preload.
141 int metaslab_preload_limit
= SPA_DVAS_PER_BP
;
144 * Enable/disable preloading of metaslab.
146 boolean_t metaslab_preload_enabled
= B_TRUE
;
149 * Enable/disable additional weight factor for each metaslab.
151 boolean_t metaslab_weight_factor_enable
= B_FALSE
;
155 * ==========================================================================
157 * ==========================================================================
160 metaslab_class_create(spa_t
*spa
, metaslab_ops_t
*ops
)
162 metaslab_class_t
*mc
;
164 mc
= kmem_zalloc(sizeof (metaslab_class_t
), KM_PUSHPAGE
);
169 mutex_init(&mc
->mc_fastwrite_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
175 metaslab_class_destroy(metaslab_class_t
*mc
)
177 ASSERT(mc
->mc_rotor
== NULL
);
178 ASSERT(mc
->mc_alloc
== 0);
179 ASSERT(mc
->mc_deferred
== 0);
180 ASSERT(mc
->mc_space
== 0);
181 ASSERT(mc
->mc_dspace
== 0);
183 mutex_destroy(&mc
->mc_fastwrite_lock
);
184 kmem_free(mc
, sizeof (metaslab_class_t
));
188 metaslab_class_validate(metaslab_class_t
*mc
)
190 metaslab_group_t
*mg
;
194 * Must hold one of the spa_config locks.
196 ASSERT(spa_config_held(mc
->mc_spa
, SCL_ALL
, RW_READER
) ||
197 spa_config_held(mc
->mc_spa
, SCL_ALL
, RW_WRITER
));
199 if ((mg
= mc
->mc_rotor
) == NULL
)
204 ASSERT(vd
->vdev_mg
!= NULL
);
205 ASSERT3P(vd
->vdev_top
, ==, vd
);
206 ASSERT3P(mg
->mg_class
, ==, mc
);
207 ASSERT3P(vd
->vdev_ops
, !=, &vdev_hole_ops
);
208 } while ((mg
= mg
->mg_next
) != mc
->mc_rotor
);
214 metaslab_class_space_update(metaslab_class_t
*mc
, int64_t alloc_delta
,
215 int64_t defer_delta
, int64_t space_delta
, int64_t dspace_delta
)
217 atomic_add_64(&mc
->mc_alloc
, alloc_delta
);
218 atomic_add_64(&mc
->mc_deferred
, defer_delta
);
219 atomic_add_64(&mc
->mc_space
, space_delta
);
220 atomic_add_64(&mc
->mc_dspace
, dspace_delta
);
224 metaslab_class_get_alloc(metaslab_class_t
*mc
)
226 return (mc
->mc_alloc
);
230 metaslab_class_get_deferred(metaslab_class_t
*mc
)
232 return (mc
->mc_deferred
);
236 metaslab_class_get_space(metaslab_class_t
*mc
)
238 return (mc
->mc_space
);
242 metaslab_class_get_dspace(metaslab_class_t
*mc
)
244 return (spa_deflate(mc
->mc_spa
) ? mc
->mc_dspace
: mc
->mc_space
);
248 * ==========================================================================
250 * ==========================================================================
253 metaslab_compare(const void *x1
, const void *x2
)
255 const metaslab_t
*m1
= x1
;
256 const metaslab_t
*m2
= x2
;
258 if (m1
->ms_weight
< m2
->ms_weight
)
260 if (m1
->ms_weight
> m2
->ms_weight
)
264 * If the weights are identical, use the offset to force uniqueness.
266 if (m1
->ms_start
< m2
->ms_start
)
268 if (m1
->ms_start
> m2
->ms_start
)
271 ASSERT3P(m1
, ==, m2
);
277 * Update the allocatable flag and the metaslab group's capacity.
278 * The allocatable flag is set to true if the capacity is below
279 * the zfs_mg_noalloc_threshold. If a metaslab group transitions
280 * from allocatable to non-allocatable or vice versa then the metaslab
281 * group's class is updated to reflect the transition.
284 metaslab_group_alloc_update(metaslab_group_t
*mg
)
286 vdev_t
*vd
= mg
->mg_vd
;
287 metaslab_class_t
*mc
= mg
->mg_class
;
288 vdev_stat_t
*vs
= &vd
->vdev_stat
;
289 boolean_t was_allocatable
;
291 ASSERT(vd
== vd
->vdev_top
);
293 mutex_enter(&mg
->mg_lock
);
294 was_allocatable
= mg
->mg_allocatable
;
296 mg
->mg_free_capacity
= ((vs
->vs_space
- vs
->vs_alloc
) * 100) /
299 mg
->mg_allocatable
= (mg
->mg_free_capacity
> zfs_mg_noalloc_threshold
);
302 * The mc_alloc_groups maintains a count of the number of
303 * groups in this metaslab class that are still above the
304 * zfs_mg_noalloc_threshold. This is used by the allocating
305 * threads to determine if they should avoid allocations to
306 * a given group. The allocator will avoid allocations to a group
307 * if that group has reached or is below the zfs_mg_noalloc_threshold
308 * and there are still other groups that are above the threshold.
309 * When a group transitions from allocatable to non-allocatable or
310 * vice versa we update the metaslab class to reflect that change.
311 * When the mc_alloc_groups value drops to 0 that means that all
312 * groups have reached the zfs_mg_noalloc_threshold making all groups
313 * eligible for allocations. This effectively means that all devices
314 * are balanced again.
316 if (was_allocatable
&& !mg
->mg_allocatable
)
317 mc
->mc_alloc_groups
--;
318 else if (!was_allocatable
&& mg
->mg_allocatable
)
319 mc
->mc_alloc_groups
++;
320 mutex_exit(&mg
->mg_lock
);
324 metaslab_group_create(metaslab_class_t
*mc
, vdev_t
*vd
)
326 metaslab_group_t
*mg
;
328 mg
= kmem_zalloc(sizeof (metaslab_group_t
), KM_PUSHPAGE
);
329 mutex_init(&mg
->mg_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
330 avl_create(&mg
->mg_metaslab_tree
, metaslab_compare
,
331 sizeof (metaslab_t
), offsetof(struct metaslab
, ms_group_node
));
334 mg
->mg_activation_count
= 0;
336 mg
->mg_taskq
= taskq_create("metaslab_group_taskq", metaslab_load_pct
,
337 minclsyspri
, 10, INT_MAX
, TASKQ_THREADS_CPU_PCT
);
343 metaslab_group_destroy(metaslab_group_t
*mg
)
345 ASSERT(mg
->mg_prev
== NULL
);
346 ASSERT(mg
->mg_next
== NULL
);
348 * We may have gone below zero with the activation count
349 * either because we never activated in the first place or
350 * because we're done, and possibly removing the vdev.
352 ASSERT(mg
->mg_activation_count
<= 0);
354 taskq_destroy(mg
->mg_taskq
);
355 avl_destroy(&mg
->mg_metaslab_tree
);
356 mutex_destroy(&mg
->mg_lock
);
357 kmem_free(mg
, sizeof (metaslab_group_t
));
361 metaslab_group_activate(metaslab_group_t
*mg
)
363 metaslab_class_t
*mc
= mg
->mg_class
;
364 metaslab_group_t
*mgprev
, *mgnext
;
366 ASSERT(spa_config_held(mc
->mc_spa
, SCL_ALLOC
, RW_WRITER
));
368 ASSERT(mc
->mc_rotor
!= mg
);
369 ASSERT(mg
->mg_prev
== NULL
);
370 ASSERT(mg
->mg_next
== NULL
);
371 ASSERT(mg
->mg_activation_count
<= 0);
373 if (++mg
->mg_activation_count
<= 0)
376 mg
->mg_aliquot
= metaslab_aliquot
* MAX(1, mg
->mg_vd
->vdev_children
);
377 metaslab_group_alloc_update(mg
);
379 if ((mgprev
= mc
->mc_rotor
) == NULL
) {
383 mgnext
= mgprev
->mg_next
;
384 mg
->mg_prev
= mgprev
;
385 mg
->mg_next
= mgnext
;
386 mgprev
->mg_next
= mg
;
387 mgnext
->mg_prev
= mg
;
393 metaslab_group_passivate(metaslab_group_t
*mg
)
395 metaslab_class_t
*mc
= mg
->mg_class
;
396 metaslab_group_t
*mgprev
, *mgnext
;
398 ASSERT(spa_config_held(mc
->mc_spa
, SCL_ALLOC
, RW_WRITER
));
400 if (--mg
->mg_activation_count
!= 0) {
401 ASSERT(mc
->mc_rotor
!= mg
);
402 ASSERT(mg
->mg_prev
== NULL
);
403 ASSERT(mg
->mg_next
== NULL
);
404 ASSERT(mg
->mg_activation_count
< 0);
408 taskq_wait(mg
->mg_taskq
);
410 mgprev
= mg
->mg_prev
;
411 mgnext
= mg
->mg_next
;
416 mc
->mc_rotor
= mgnext
;
417 mgprev
->mg_next
= mgnext
;
418 mgnext
->mg_prev
= mgprev
;
426 metaslab_group_add(metaslab_group_t
*mg
, metaslab_t
*msp
)
428 mutex_enter(&mg
->mg_lock
);
429 ASSERT(msp
->ms_group
== NULL
);
432 avl_add(&mg
->mg_metaslab_tree
, msp
);
433 mutex_exit(&mg
->mg_lock
);
437 metaslab_group_remove(metaslab_group_t
*mg
, metaslab_t
*msp
)
439 mutex_enter(&mg
->mg_lock
);
440 ASSERT(msp
->ms_group
== mg
);
441 avl_remove(&mg
->mg_metaslab_tree
, msp
);
442 msp
->ms_group
= NULL
;
443 mutex_exit(&mg
->mg_lock
);
447 metaslab_group_sort(metaslab_group_t
*mg
, metaslab_t
*msp
, uint64_t weight
)
450 * Although in principle the weight can be any value, in
451 * practice we do not use values in the range [1, 510].
453 ASSERT(weight
>= SPA_MINBLOCKSIZE
-1 || weight
== 0);
454 ASSERT(MUTEX_HELD(&msp
->ms_lock
));
456 mutex_enter(&mg
->mg_lock
);
457 ASSERT(msp
->ms_group
== mg
);
458 avl_remove(&mg
->mg_metaslab_tree
, msp
);
459 msp
->ms_weight
= weight
;
460 avl_add(&mg
->mg_metaslab_tree
, msp
);
461 mutex_exit(&mg
->mg_lock
);
465 * Determine if a given metaslab group should skip allocations. A metaslab
466 * group should avoid allocations if its used capacity has crossed the
467 * zfs_mg_noalloc_threshold and there is at least one metaslab group
468 * that can still handle allocations.
471 metaslab_group_allocatable(metaslab_group_t
*mg
)
473 vdev_t
*vd
= mg
->mg_vd
;
474 spa_t
*spa
= vd
->vdev_spa
;
475 metaslab_class_t
*mc
= mg
->mg_class
;
478 * A metaslab group is considered allocatable if its free capacity
479 * is greater than the set value of zfs_mg_noalloc_threshold, it's
480 * associated with a slog, or there are no other metaslab groups
481 * with free capacity greater than zfs_mg_noalloc_threshold.
483 return (mg
->mg_free_capacity
> zfs_mg_noalloc_threshold
||
484 mc
!= spa_normal_class(spa
) || mc
->mc_alloc_groups
== 0);
488 * ==========================================================================
489 * Range tree callbacks
490 * ==========================================================================
494 * Comparison function for the private size-ordered tree. Tree is sorted
495 * by size, larger sizes at the end of the tree.
498 metaslab_rangesize_compare(const void *x1
, const void *x2
)
500 const range_seg_t
*r1
= x1
;
501 const range_seg_t
*r2
= x2
;
502 uint64_t rs_size1
= r1
->rs_end
- r1
->rs_start
;
503 uint64_t rs_size2
= r2
->rs_end
- r2
->rs_start
;
505 if (rs_size1
< rs_size2
)
507 if (rs_size1
> rs_size2
)
510 if (r1
->rs_start
< r2
->rs_start
)
513 if (r1
->rs_start
> r2
->rs_start
)
520 * Create any block allocator specific components. The current allocators
521 * rely on using both a size-ordered range_tree_t and an array of uint64_t's.
524 metaslab_rt_create(range_tree_t
*rt
, void *arg
)
526 metaslab_t
*msp
= arg
;
528 ASSERT3P(rt
->rt_arg
, ==, msp
);
529 ASSERT(msp
->ms_tree
== NULL
);
531 avl_create(&msp
->ms_size_tree
, metaslab_rangesize_compare
,
532 sizeof (range_seg_t
), offsetof(range_seg_t
, rs_pp_node
));
536 * Destroy the block allocator specific components.
539 metaslab_rt_destroy(range_tree_t
*rt
, void *arg
)
541 metaslab_t
*msp
= arg
;
543 ASSERT3P(rt
->rt_arg
, ==, msp
);
544 ASSERT3P(msp
->ms_tree
, ==, rt
);
545 ASSERT0(avl_numnodes(&msp
->ms_size_tree
));
547 avl_destroy(&msp
->ms_size_tree
);
551 metaslab_rt_add(range_tree_t
*rt
, range_seg_t
*rs
, void *arg
)
553 metaslab_t
*msp
= arg
;
555 ASSERT3P(rt
->rt_arg
, ==, msp
);
556 ASSERT3P(msp
->ms_tree
, ==, rt
);
557 VERIFY(!msp
->ms_condensing
);
558 avl_add(&msp
->ms_size_tree
, rs
);
562 metaslab_rt_remove(range_tree_t
*rt
, range_seg_t
*rs
, void *arg
)
564 metaslab_t
*msp
= arg
;
566 ASSERT3P(rt
->rt_arg
, ==, msp
);
567 ASSERT3P(msp
->ms_tree
, ==, rt
);
568 VERIFY(!msp
->ms_condensing
);
569 avl_remove(&msp
->ms_size_tree
, rs
);
573 metaslab_rt_vacate(range_tree_t
*rt
, void *arg
)
575 metaslab_t
*msp
= arg
;
577 ASSERT3P(rt
->rt_arg
, ==, msp
);
578 ASSERT3P(msp
->ms_tree
, ==, rt
);
581 * Normally one would walk the tree freeing nodes along the way.
582 * Since the nodes are shared with the range trees we can avoid
583 * walking all nodes and just reinitialize the avl tree. The nodes
584 * will be freed by the range tree, so we don't want to free them here.
586 avl_create(&msp
->ms_size_tree
, metaslab_rangesize_compare
,
587 sizeof (range_seg_t
), offsetof(range_seg_t
, rs_pp_node
));
590 static range_tree_ops_t metaslab_rt_ops
= {
599 * ==========================================================================
600 * Metaslab block operations
601 * ==========================================================================
605 * Return the maximum contiguous segment within the metaslab.
608 metaslab_block_maxsize(metaslab_t
*msp
)
610 avl_tree_t
*t
= &msp
->ms_size_tree
;
613 if (t
== NULL
|| (rs
= avl_last(t
)) == NULL
)
616 return (rs
->rs_end
- rs
->rs_start
);
620 metaslab_block_alloc(metaslab_t
*msp
, uint64_t size
)
623 range_tree_t
*rt
= msp
->ms_tree
;
625 VERIFY(!msp
->ms_condensing
);
627 start
= msp
->ms_ops
->msop_alloc(msp
, size
);
628 if (start
!= -1ULL) {
629 vdev_t
*vd
= msp
->ms_group
->mg_vd
;
631 VERIFY0(P2PHASE(start
, 1ULL << vd
->vdev_ashift
));
632 VERIFY0(P2PHASE(size
, 1ULL << vd
->vdev_ashift
));
633 VERIFY3U(range_tree_space(rt
) - size
, <=, msp
->ms_size
);
634 range_tree_remove(rt
, start
, size
);
640 * ==========================================================================
641 * Common allocator routines
642 * ==========================================================================
645 #if defined(WITH_FF_BLOCK_ALLOCATOR) || \
646 defined(WITH_DF_BLOCK_ALLOCATOR) || \
647 defined(WITH_CF_BLOCK_ALLOCATOR)
649 * This is a helper function that can be used by the allocator to find
650 * a suitable block to allocate. This will search the specified AVL
651 * tree looking for a block that matches the specified criteria.
654 metaslab_block_picker(avl_tree_t
*t
, uint64_t *cursor
, uint64_t size
,
657 range_seg_t
*rs
, rsearch
;
660 rsearch
.rs_start
= *cursor
;
661 rsearch
.rs_end
= *cursor
+ size
;
663 rs
= avl_find(t
, &rsearch
, &where
);
665 rs
= avl_nearest(t
, where
, AVL_AFTER
);
668 uint64_t offset
= P2ROUNDUP(rs
->rs_start
, align
);
670 if (offset
+ size
<= rs
->rs_end
) {
671 *cursor
= offset
+ size
;
674 rs
= AVL_NEXT(t
, rs
);
678 * If we know we've searched the whole map (*cursor == 0), give up.
679 * Otherwise, reset the cursor to the beginning and try again.
685 return (metaslab_block_picker(t
, cursor
, size
, align
));
687 #endif /* WITH_FF/DF/CF_BLOCK_ALLOCATOR */
689 #if defined(WITH_FF_BLOCK_ALLOCATOR)
691 * ==========================================================================
692 * The first-fit block allocator
693 * ==========================================================================
696 metaslab_ff_alloc(metaslab_t
*msp
, uint64_t size
)
699 * Find the largest power of 2 block size that evenly divides the
700 * requested size. This is used to try to allocate blocks with similar
701 * alignment from the same area of the metaslab (i.e. same cursor
702 * bucket) but it does not guarantee that other allocations sizes
703 * may exist in the same region.
705 uint64_t align
= size
& -size
;
706 uint64_t *cursor
= &msp
->ms_lbas
[highbit(align
) - 1];
707 avl_tree_t
*t
= &msp
->ms_tree
->rt_root
;
709 return (metaslab_block_picker(t
, cursor
, size
, align
));
714 metaslab_ff_fragmented(metaslab_t
*msp
)
719 static metaslab_ops_t metaslab_ff_ops
= {
721 metaslab_ff_fragmented
724 metaslab_ops_t
*zfs_metaslab_ops
= &metaslab_ff_ops
;
725 #endif /* WITH_FF_BLOCK_ALLOCATOR */
727 #if defined(WITH_DF_BLOCK_ALLOCATOR)
729 * ==========================================================================
730 * Dynamic block allocator -
731 * Uses the first fit allocation scheme until space get low and then
732 * adjusts to a best fit allocation method. Uses metaslab_df_alloc_threshold
733 * and metaslab_df_free_pct to determine when to switch the allocation scheme.
734 * ==========================================================================
737 metaslab_df_alloc(metaslab_t
*msp
, uint64_t size
)
740 * Find the largest power of 2 block size that evenly divides the
741 * requested size. This is used to try to allocate blocks with similar
742 * alignment from the same area of the metaslab (i.e. same cursor
743 * bucket) but it does not guarantee that other allocations sizes
744 * may exist in the same region.
746 uint64_t align
= size
& -size
;
747 uint64_t *cursor
= &msp
->ms_lbas
[highbit(align
) - 1];
748 range_tree_t
*rt
= msp
->ms_tree
;
749 avl_tree_t
*t
= &rt
->rt_root
;
750 uint64_t max_size
= metaslab_block_maxsize(msp
);
751 int free_pct
= range_tree_space(rt
) * 100 / msp
->ms_size
;
753 ASSERT(MUTEX_HELD(&msp
->ms_lock
));
754 ASSERT3U(avl_numnodes(t
), ==, avl_numnodes(&msp
->ms_size_tree
));
760 * If we're running low on space switch to using the size
761 * sorted AVL tree (best-fit).
763 if (max_size
< metaslab_df_alloc_threshold
||
764 free_pct
< metaslab_df_free_pct
) {
765 t
= &msp
->ms_size_tree
;
769 return (metaslab_block_picker(t
, cursor
, size
, 1ULL));
773 metaslab_df_fragmented(metaslab_t
*msp
)
775 range_tree_t
*rt
= msp
->ms_tree
;
776 uint64_t max_size
= metaslab_block_maxsize(msp
);
777 int free_pct
= range_tree_space(rt
) * 100 / msp
->ms_size
;
779 if (max_size
>= metaslab_df_alloc_threshold
&&
780 free_pct
>= metaslab_df_free_pct
)
787 static metaslab_ops_t metaslab_df_ops
= {
789 metaslab_df_fragmented
792 metaslab_ops_t
*zfs_metaslab_ops
= &metaslab_df_ops
;
793 #endif /* WITH_DF_BLOCK_ALLOCATOR */
795 #if defined(WITH_CF_BLOCK_ALLOCATOR)
797 * ==========================================================================
798 * Cursor fit block allocator -
799 * Select the largest region in the metaslab, set the cursor to the beginning
800 * of the range and the cursor_end to the end of the range. As allocations
801 * are made advance the cursor. Continue allocating from the cursor until
802 * the range is exhausted and then find a new range.
803 * ==========================================================================
806 metaslab_cf_alloc(metaslab_t
*msp
, uint64_t size
)
808 range_tree_t
*rt
= msp
->ms_tree
;
809 avl_tree_t
*t
= &msp
->ms_size_tree
;
810 uint64_t *cursor
= &msp
->ms_lbas
[0];
811 uint64_t *cursor_end
= &msp
->ms_lbas
[1];
814 ASSERT(MUTEX_HELD(&msp
->ms_lock
));
815 ASSERT3U(avl_numnodes(t
), ==, avl_numnodes(&rt
->rt_root
));
817 ASSERT3U(*cursor_end
, >=, *cursor
);
819 if ((*cursor
+ size
) > *cursor_end
) {
822 rs
= avl_last(&msp
->ms_size_tree
);
823 if (rs
== NULL
|| (rs
->rs_end
- rs
->rs_start
) < size
)
826 *cursor
= rs
->rs_start
;
827 *cursor_end
= rs
->rs_end
;
837 metaslab_cf_fragmented(metaslab_t
*msp
)
839 return (metaslab_block_maxsize(msp
) < metaslab_min_alloc_size
);
842 static metaslab_ops_t metaslab_cf_ops
= {
844 metaslab_cf_fragmented
847 metaslab_ops_t
*zfs_metaslab_ops
= &metaslab_cf_ops
;
848 #endif /* WITH_CF_BLOCK_ALLOCATOR */
850 #if defined(WITH_NDF_BLOCK_ALLOCATOR)
852 * ==========================================================================
853 * New dynamic fit allocator -
854 * Select a region that is large enough to allocate 2^metaslab_ndf_clump_shift
855 * contiguous blocks. If no region is found then just use the largest segment
857 * ==========================================================================
861 * Determines desired number of contiguous blocks (2^metaslab_ndf_clump_shift)
862 * to request from the allocator.
864 uint64_t metaslab_ndf_clump_shift
= 4;
867 metaslab_ndf_alloc(metaslab_t
*msp
, uint64_t size
)
869 avl_tree_t
*t
= &msp
->ms_tree
->rt_root
;
871 range_seg_t
*rs
, rsearch
;
872 uint64_t hbit
= highbit(size
);
873 uint64_t *cursor
= &msp
->ms_lbas
[hbit
- 1];
874 uint64_t max_size
= metaslab_block_maxsize(msp
);
876 ASSERT(MUTEX_HELD(&msp
->ms_lock
));
877 ASSERT3U(avl_numnodes(t
), ==, avl_numnodes(&msp
->ms_size_tree
));
882 rsearch
.rs_start
= *cursor
;
883 rsearch
.rs_end
= *cursor
+ size
;
885 rs
= avl_find(t
, &rsearch
, &where
);
886 if (rs
== NULL
|| (rs
->rs_end
- rs
->rs_start
) < size
) {
887 t
= &msp
->ms_size_tree
;
889 rsearch
.rs_start
= 0;
890 rsearch
.rs_end
= MIN(max_size
,
891 1ULL << (hbit
+ metaslab_ndf_clump_shift
));
892 rs
= avl_find(t
, &rsearch
, &where
);
894 rs
= avl_nearest(t
, where
, AVL_AFTER
);
898 if ((rs
->rs_end
- rs
->rs_start
) >= size
) {
899 *cursor
= rs
->rs_start
+ size
;
900 return (rs
->rs_start
);
906 metaslab_ndf_fragmented(metaslab_t
*msp
)
908 return (metaslab_block_maxsize(msp
) <=
909 (metaslab_min_alloc_size
<< metaslab_ndf_clump_shift
));
912 static metaslab_ops_t metaslab_ndf_ops
= {
914 metaslab_ndf_fragmented
917 metaslab_ops_t
*zfs_metaslab_ops
= &metaslab_ndf_ops
;
918 #endif /* WITH_NDF_BLOCK_ALLOCATOR */
922 * ==========================================================================
924 * ==========================================================================
928 * Wait for any in-progress metaslab loads to complete.
931 metaslab_load_wait(metaslab_t
*msp
)
933 ASSERT(MUTEX_HELD(&msp
->ms_lock
));
935 while (msp
->ms_loading
) {
936 ASSERT(!msp
->ms_loaded
);
937 cv_wait(&msp
->ms_load_cv
, &msp
->ms_lock
);
942 metaslab_load(metaslab_t
*msp
)
947 ASSERT(MUTEX_HELD(&msp
->ms_lock
));
948 ASSERT(!msp
->ms_loaded
);
949 ASSERT(!msp
->ms_loading
);
951 msp
->ms_loading
= B_TRUE
;
954 * If the space map has not been allocated yet, then treat
955 * all the space in the metaslab as free and add it to the
958 if (msp
->ms_sm
!= NULL
)
959 error
= space_map_load(msp
->ms_sm
, msp
->ms_tree
, SM_FREE
);
961 range_tree_add(msp
->ms_tree
, msp
->ms_start
, msp
->ms_size
);
963 msp
->ms_loaded
= (error
== 0);
964 msp
->ms_loading
= B_FALSE
;
966 if (msp
->ms_loaded
) {
967 for (t
= 0; t
< TXG_DEFER_SIZE
; t
++) {
968 range_tree_walk(msp
->ms_defertree
[t
],
969 range_tree_remove
, msp
->ms_tree
);
972 cv_broadcast(&msp
->ms_load_cv
);
977 metaslab_unload(metaslab_t
*msp
)
979 ASSERT(MUTEX_HELD(&msp
->ms_lock
));
980 range_tree_vacate(msp
->ms_tree
, NULL
, NULL
);
981 msp
->ms_loaded
= B_FALSE
;
982 msp
->ms_weight
&= ~METASLAB_ACTIVE_MASK
;
986 metaslab_init(metaslab_group_t
*mg
, uint64_t id
, uint64_t object
, uint64_t txg
)
988 vdev_t
*vd
= mg
->mg_vd
;
989 objset_t
*mos
= vd
->vdev_spa
->spa_meta_objset
;
992 msp
= kmem_zalloc(sizeof (metaslab_t
), KM_PUSHPAGE
);
993 mutex_init(&msp
->ms_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
994 cv_init(&msp
->ms_load_cv
, NULL
, CV_DEFAULT
, NULL
);
996 msp
->ms_start
= id
<< vd
->vdev_ms_shift
;
997 msp
->ms_size
= 1ULL << vd
->vdev_ms_shift
;
1000 * We only open space map objects that already exist. All others
1001 * will be opened when we finally allocate an object for it.
1004 VERIFY0(space_map_open(&msp
->ms_sm
, mos
, object
, msp
->ms_start
,
1005 msp
->ms_size
, vd
->vdev_ashift
, &msp
->ms_lock
));
1006 ASSERT(msp
->ms_sm
!= NULL
);
1010 * We create the main range tree here, but we don't create the
1011 * alloctree and freetree until metaslab_sync_done(). This serves
1012 * two purposes: it allows metaslab_sync_done() to detect the
1013 * addition of new space; and for debugging, it ensures that we'd
1014 * data fault on any attempt to use this metaslab before it's ready.
1016 msp
->ms_tree
= range_tree_create(&metaslab_rt_ops
, msp
, &msp
->ms_lock
);
1017 metaslab_group_add(mg
, msp
);
1019 msp
->ms_ops
= mg
->mg_class
->mc_ops
;
1022 * If we're opening an existing pool (txg == 0) or creating
1023 * a new one (txg == TXG_INITIAL), all space is available now.
1024 * If we're adding space to an existing pool, the new space
1025 * does not become available until after this txg has synced.
1027 if (txg
<= TXG_INITIAL
)
1028 metaslab_sync_done(msp
, 0);
1031 * If metaslab_debug_load is set and we're initializing a metaslab
1032 * that has an allocated space_map object then load the its space
1033 * map so that can verify frees.
1035 if (metaslab_debug_load
&& msp
->ms_sm
!= NULL
) {
1036 mutex_enter(&msp
->ms_lock
);
1037 VERIFY0(metaslab_load(msp
));
1038 mutex_exit(&msp
->ms_lock
);
1042 vdev_dirty(vd
, 0, NULL
, txg
);
1043 vdev_dirty(vd
, VDD_METASLAB
, msp
, txg
);
1050 metaslab_fini(metaslab_t
*msp
)
1054 metaslab_group_t
*mg
= msp
->ms_group
;
1056 metaslab_group_remove(mg
, msp
);
1058 mutex_enter(&msp
->ms_lock
);
1060 VERIFY(msp
->ms_group
== NULL
);
1061 vdev_space_update(mg
->mg_vd
, -space_map_allocated(msp
->ms_sm
),
1063 space_map_close(msp
->ms_sm
);
1065 metaslab_unload(msp
);
1066 range_tree_destroy(msp
->ms_tree
);
1068 for (t
= 0; t
< TXG_SIZE
; t
++) {
1069 range_tree_destroy(msp
->ms_alloctree
[t
]);
1070 range_tree_destroy(msp
->ms_freetree
[t
]);
1073 for (t
= 0; t
< TXG_DEFER_SIZE
; t
++) {
1074 range_tree_destroy(msp
->ms_defertree
[t
]);
1077 ASSERT0(msp
->ms_deferspace
);
1079 mutex_exit(&msp
->ms_lock
);
1080 cv_destroy(&msp
->ms_load_cv
);
1081 mutex_destroy(&msp
->ms_lock
);
1083 kmem_free(msp
, sizeof (metaslab_t
));
1087 * Apply a weighting factor based on the histogram information for this
1088 * metaslab. The current weighting factor is somewhat arbitrary and requires
1089 * additional investigation. The implementation provides a measure of
1090 * "weighted" free space and gives a higher weighting for larger contiguous
1091 * regions. The weighting factor is determined by counting the number of
1092 * sm_shift sectors that exist in each region represented by the histogram.
1093 * That value is then multiplied by the power of 2 exponent and the sm_shift
1096 * For example, assume the 2^21 histogram bucket has 4 2MB regions and the
1097 * metaslab has an sm_shift value of 9 (512B):
1099 * 1) calculate the number of sm_shift sectors in the region:
1100 * 2^21 / 2^9 = 2^12 = 4096 * 4 (number of regions) = 16384
1101 * 2) multiply by the power of 2 exponent and the sm_shift value:
1102 * 16384 * 21 * 9 = 3096576
1103 * This value will be added to the weighting of the metaslab.
1106 metaslab_weight_factor(metaslab_t
*msp
)
1108 uint64_t factor
= 0;
1113 * A null space map means that the entire metaslab is free,
1114 * calculate a weight factor that spans the entire size of the
1117 if (msp
->ms_sm
== NULL
) {
1118 vdev_t
*vd
= msp
->ms_group
->mg_vd
;
1120 i
= highbit(msp
->ms_size
) - 1;
1121 sectors
= msp
->ms_size
>> vd
->vdev_ashift
;
1122 return (sectors
* i
* vd
->vdev_ashift
);
1125 if (msp
->ms_sm
->sm_dbuf
->db_size
!= sizeof (space_map_phys_t
))
1128 for (i
= 0; i
< SPACE_MAP_HISTOGRAM_SIZE(msp
->ms_sm
); i
++) {
1129 if (msp
->ms_sm
->sm_phys
->smp_histogram
[i
] == 0)
1133 * Determine the number of sm_shift sectors in the region
1134 * indicated by the histogram. For example, given an
1135 * sm_shift value of 9 (512 bytes) and i = 4 then we know
1136 * that we're looking at an 8K region in the histogram
1137 * (i.e. 9 + 4 = 13, 2^13 = 8192). To figure out the
1138 * number of sm_shift sectors (512 bytes in this example),
1139 * we would take 8192 / 512 = 16. Since the histogram
1140 * is offset by sm_shift we can simply use the value of
1141 * of i to calculate this (i.e. 2^i = 16 where i = 4).
1143 sectors
= msp
->ms_sm
->sm_phys
->smp_histogram
[i
] << i
;
1144 factor
+= (i
+ msp
->ms_sm
->sm_shift
) * sectors
;
1146 return (factor
* msp
->ms_sm
->sm_shift
);
1150 metaslab_weight(metaslab_t
*msp
)
1152 metaslab_group_t
*mg
= msp
->ms_group
;
1153 vdev_t
*vd
= mg
->mg_vd
;
1154 uint64_t weight
, space
;
1156 ASSERT(MUTEX_HELD(&msp
->ms_lock
));
1159 * This vdev is in the process of being removed so there is nothing
1160 * for us to do here.
1162 if (vd
->vdev_removing
) {
1163 ASSERT0(space_map_allocated(msp
->ms_sm
));
1164 ASSERT0(vd
->vdev_ms_shift
);
1169 * The baseline weight is the metaslab's free space.
1171 space
= msp
->ms_size
- space_map_allocated(msp
->ms_sm
);
1175 * Modern disks have uniform bit density and constant angular velocity.
1176 * Therefore, the outer recording zones are faster (higher bandwidth)
1177 * than the inner zones by the ratio of outer to inner track diameter,
1178 * which is typically around 2:1. We account for this by assigning
1179 * higher weight to lower metaslabs (multiplier ranging from 2x to 1x).
1180 * In effect, this means that we'll select the metaslab with the most
1181 * free bandwidth rather than simply the one with the most free space.
1183 weight
= 2 * weight
- (msp
->ms_id
* weight
) / vd
->vdev_ms_count
;
1184 ASSERT(weight
>= space
&& weight
<= 2 * space
);
1186 msp
->ms_factor
= metaslab_weight_factor(msp
);
1187 if (metaslab_weight_factor_enable
)
1188 weight
+= msp
->ms_factor
;
1190 if (msp
->ms_loaded
&& !msp
->ms_ops
->msop_fragmented(msp
)) {
1192 * If this metaslab is one we're actively using, adjust its
1193 * weight to make it preferable to any inactive metaslab so
1194 * we'll polish it off.
1196 weight
|= (msp
->ms_weight
& METASLAB_ACTIVE_MASK
);
1203 metaslab_activate(metaslab_t
*msp
, uint64_t activation_weight
)
1205 ASSERT(MUTEX_HELD(&msp
->ms_lock
));
1207 if ((msp
->ms_weight
& METASLAB_ACTIVE_MASK
) == 0) {
1208 metaslab_load_wait(msp
);
1209 if (!msp
->ms_loaded
) {
1210 int error
= metaslab_load(msp
);
1212 metaslab_group_sort(msp
->ms_group
, msp
, 0);
1217 metaslab_group_sort(msp
->ms_group
, msp
,
1218 msp
->ms_weight
| activation_weight
);
1220 ASSERT(msp
->ms_loaded
);
1221 ASSERT(msp
->ms_weight
& METASLAB_ACTIVE_MASK
);
1227 metaslab_passivate(metaslab_t
*msp
, uint64_t size
)
1230 * If size < SPA_MINBLOCKSIZE, then we will not allocate from
1231 * this metaslab again. In that case, it had better be empty,
1232 * or we would be leaving space on the table.
1234 ASSERT(size
>= SPA_MINBLOCKSIZE
|| range_tree_space(msp
->ms_tree
) == 0);
1235 metaslab_group_sort(msp
->ms_group
, msp
, MIN(msp
->ms_weight
, size
));
1236 ASSERT((msp
->ms_weight
& METASLAB_ACTIVE_MASK
) == 0);
1240 metaslab_preload(void *arg
)
1242 metaslab_t
*msp
= arg
;
1243 spa_t
*spa
= msp
->ms_group
->mg_vd
->vdev_spa
;
1245 ASSERT(!MUTEX_HELD(&msp
->ms_group
->mg_lock
));
1247 mutex_enter(&msp
->ms_lock
);
1248 metaslab_load_wait(msp
);
1249 if (!msp
->ms_loaded
)
1250 (void) metaslab_load(msp
);
1253 * Set the ms_access_txg value so that we don't unload it right away.
1255 msp
->ms_access_txg
= spa_syncing_txg(spa
) + metaslab_unload_delay
+ 1;
1256 mutex_exit(&msp
->ms_lock
);
1260 metaslab_group_preload(metaslab_group_t
*mg
)
1262 spa_t
*spa
= mg
->mg_vd
->vdev_spa
;
1264 avl_tree_t
*t
= &mg
->mg_metaslab_tree
;
1267 if (spa_shutting_down(spa
) || !metaslab_preload_enabled
) {
1268 taskq_wait(mg
->mg_taskq
);
1272 mutex_enter(&mg
->mg_lock
);
1274 * Load the next potential metaslabs
1277 while (msp
!= NULL
) {
1278 metaslab_t
*msp_next
= AVL_NEXT(t
, msp
);
1280 /* If we have reached our preload limit then we're done */
1281 if (++m
> metaslab_preload_limit
)
1285 * We must drop the metaslab group lock here to preserve
1286 * lock ordering with the ms_lock (when grabbing both
1287 * the mg_lock and the ms_lock, the ms_lock must be taken
1288 * first). As a result, it is possible that the ordering
1289 * of the metaslabs within the avl tree may change before
1290 * we reacquire the lock. The metaslab cannot be removed from
1291 * the tree while we're in syncing context so it is safe to
1292 * drop the mg_lock here. If the metaslabs are reordered
1293 * nothing will break -- we just may end up loading a
1294 * less than optimal one.
1296 mutex_exit(&mg
->mg_lock
);
1297 VERIFY(taskq_dispatch(mg
->mg_taskq
, metaslab_preload
,
1298 msp
, TQ_PUSHPAGE
) != 0);
1299 mutex_enter(&mg
->mg_lock
);
1302 mutex_exit(&mg
->mg_lock
);
1306 * Determine if the space map's on-disk footprint is past our tolerance
1307 * for inefficiency. We would like to use the following criteria to make
1310 * 1. The size of the space map object should not dramatically increase as a
1311 * result of writing out the free space range tree.
1313 * 2. The minimal on-disk space map representation is zfs_condense_pct/100
1314 * times the size than the free space range tree representation
1315 * (i.e. zfs_condense_pct = 110 and in-core = 1MB, minimal = 1.1.MB).
1317 * Checking the first condition is tricky since we don't want to walk
1318 * the entire AVL tree calculating the estimated on-disk size. Instead we
1319 * use the size-ordered range tree in the metaslab and calculate the
1320 * size required to write out the largest segment in our free tree. If the
1321 * size required to represent that segment on disk is larger than the space
1322 * map object then we avoid condensing this map.
1324 * To determine the second criterion we use a best-case estimate and assume
1325 * each segment can be represented on-disk as a single 64-bit entry. We refer
1326 * to this best-case estimate as the space map's minimal form.
1329 metaslab_should_condense(metaslab_t
*msp
)
1331 space_map_t
*sm
= msp
->ms_sm
;
1333 uint64_t size
, entries
, segsz
;
1335 ASSERT(MUTEX_HELD(&msp
->ms_lock
));
1336 ASSERT(msp
->ms_loaded
);
1339 * Use the ms_size_tree range tree, which is ordered by size, to
1340 * obtain the largest segment in the free tree. If the tree is empty
1341 * then we should condense the map.
1343 rs
= avl_last(&msp
->ms_size_tree
);
1348 * Calculate the number of 64-bit entries this segment would
1349 * require when written to disk. If this single segment would be
1350 * larger on-disk than the entire current on-disk structure, then
1351 * clearly condensing will increase the on-disk structure size.
1353 size
= (rs
->rs_end
- rs
->rs_start
) >> sm
->sm_shift
;
1354 entries
= size
/ (MIN(size
, SM_RUN_MAX
));
1355 segsz
= entries
* sizeof (uint64_t);
1357 return (segsz
<= space_map_length(msp
->ms_sm
) &&
1358 space_map_length(msp
->ms_sm
) >= (zfs_condense_pct
*
1359 sizeof (uint64_t) * avl_numnodes(&msp
->ms_tree
->rt_root
)) / 100);
1363 * Condense the on-disk space map representation to its minimized form.
1364 * The minimized form consists of a small number of allocations followed by
1365 * the entries of the free range tree.
1368 metaslab_condense(metaslab_t
*msp
, uint64_t txg
, dmu_tx_t
*tx
)
1370 spa_t
*spa
= msp
->ms_group
->mg_vd
->vdev_spa
;
1371 range_tree_t
*freetree
= msp
->ms_freetree
[txg
& TXG_MASK
];
1372 range_tree_t
*condense_tree
;
1373 space_map_t
*sm
= msp
->ms_sm
;
1376 ASSERT(MUTEX_HELD(&msp
->ms_lock
));
1377 ASSERT3U(spa_sync_pass(spa
), ==, 1);
1378 ASSERT(msp
->ms_loaded
);
1380 spa_dbgmsg(spa
, "condensing: txg %llu, msp[%llu] %p, "
1381 "smp size %llu, segments %lu", txg
, msp
->ms_id
, msp
,
1382 space_map_length(msp
->ms_sm
), avl_numnodes(&msp
->ms_tree
->rt_root
));
1385 * Create an range tree that is 100% allocated. We remove segments
1386 * that have been freed in this txg, any deferred frees that exist,
1387 * and any allocation in the future. Removing segments should be
1388 * a relatively inexpensive operation since we expect these trees to
1389 * have a small number of nodes.
1391 condense_tree
= range_tree_create(NULL
, NULL
, &msp
->ms_lock
);
1392 range_tree_add(condense_tree
, msp
->ms_start
, msp
->ms_size
);
1395 * Remove what's been freed in this txg from the condense_tree.
1396 * Since we're in sync_pass 1, we know that all the frees from
1397 * this txg are in the freetree.
1399 range_tree_walk(freetree
, range_tree_remove
, condense_tree
);
1401 for (t
= 0; t
< TXG_DEFER_SIZE
; t
++) {
1402 range_tree_walk(msp
->ms_defertree
[t
],
1403 range_tree_remove
, condense_tree
);
1406 for (t
= 1; t
< TXG_CONCURRENT_STATES
; t
++) {
1407 range_tree_walk(msp
->ms_alloctree
[(txg
+ t
) & TXG_MASK
],
1408 range_tree_remove
, condense_tree
);
1412 * We're about to drop the metaslab's lock thus allowing
1413 * other consumers to change it's content. Set the
1414 * metaslab's ms_condensing flag to ensure that
1415 * allocations on this metaslab do not occur while we're
1416 * in the middle of committing it to disk. This is only critical
1417 * for the ms_tree as all other range trees use per txg
1418 * views of their content.
1420 msp
->ms_condensing
= B_TRUE
;
1422 mutex_exit(&msp
->ms_lock
);
1423 space_map_truncate(sm
, tx
);
1424 mutex_enter(&msp
->ms_lock
);
1427 * While we would ideally like to create a space_map representation
1428 * that consists only of allocation records, doing so can be
1429 * prohibitively expensive because the in-core free tree can be
1430 * large, and therefore computationally expensive to subtract
1431 * from the condense_tree. Instead we sync out two trees, a cheap
1432 * allocation only tree followed by the in-core free tree. While not
1433 * optimal, this is typically close to optimal, and much cheaper to
1436 space_map_write(sm
, condense_tree
, SM_ALLOC
, tx
);
1437 range_tree_vacate(condense_tree
, NULL
, NULL
);
1438 range_tree_destroy(condense_tree
);
1440 space_map_write(sm
, msp
->ms_tree
, SM_FREE
, tx
);
1441 msp
->ms_condensing
= B_FALSE
;
1445 * Write a metaslab to disk in the context of the specified transaction group.
1448 metaslab_sync(metaslab_t
*msp
, uint64_t txg
)
1450 metaslab_group_t
*mg
= msp
->ms_group
;
1451 vdev_t
*vd
= mg
->mg_vd
;
1452 spa_t
*spa
= vd
->vdev_spa
;
1453 objset_t
*mos
= spa_meta_objset(spa
);
1454 range_tree_t
*alloctree
= msp
->ms_alloctree
[txg
& TXG_MASK
];
1455 range_tree_t
**freetree
= &msp
->ms_freetree
[txg
& TXG_MASK
];
1456 range_tree_t
**freed_tree
=
1457 &msp
->ms_freetree
[TXG_CLEAN(txg
) & TXG_MASK
];
1459 uint64_t object
= space_map_object(msp
->ms_sm
);
1461 ASSERT(!vd
->vdev_ishole
);
1464 * This metaslab has just been added so there's no work to do now.
1466 if (*freetree
== NULL
) {
1467 ASSERT3P(alloctree
, ==, NULL
);
1471 ASSERT3P(alloctree
, !=, NULL
);
1472 ASSERT3P(*freetree
, !=, NULL
);
1473 ASSERT3P(*freed_tree
, !=, NULL
);
1475 if (range_tree_space(alloctree
) == 0 &&
1476 range_tree_space(*freetree
) == 0)
1480 * The only state that can actually be changing concurrently with
1481 * metaslab_sync() is the metaslab's ms_tree. No other thread can
1482 * be modifying this txg's alloctree, freetree, freed_tree, or
1483 * space_map_phys_t. Therefore, we only hold ms_lock to satify
1484 * space_map ASSERTs. We drop it whenever we call into the DMU,
1485 * because the DMU can call down to us (e.g. via zio_free()) at
1489 tx
= dmu_tx_create_assigned(spa_get_dsl(spa
), txg
);
1491 if (msp
->ms_sm
== NULL
) {
1492 uint64_t new_object
;
1494 new_object
= space_map_alloc(mos
, tx
);
1495 VERIFY3U(new_object
, !=, 0);
1497 VERIFY0(space_map_open(&msp
->ms_sm
, mos
, new_object
,
1498 msp
->ms_start
, msp
->ms_size
, vd
->vdev_ashift
,
1500 ASSERT(msp
->ms_sm
!= NULL
);
1503 mutex_enter(&msp
->ms_lock
);
1505 if (msp
->ms_loaded
&& spa_sync_pass(spa
) == 1 &&
1506 metaslab_should_condense(msp
)) {
1507 metaslab_condense(msp
, txg
, tx
);
1509 space_map_write(msp
->ms_sm
, alloctree
, SM_ALLOC
, tx
);
1510 space_map_write(msp
->ms_sm
, *freetree
, SM_FREE
, tx
);
1513 range_tree_vacate(alloctree
, NULL
, NULL
);
1515 if (msp
->ms_loaded
) {
1517 * When the space map is loaded, we have an accruate
1518 * histogram in the range tree. This gives us an opportunity
1519 * to bring the space map's histogram up-to-date so we clear
1520 * it first before updating it.
1522 space_map_histogram_clear(msp
->ms_sm
);
1523 space_map_histogram_add(msp
->ms_sm
, msp
->ms_tree
, tx
);
1526 * Since the space map is not loaded we simply update the
1527 * exisiting histogram with what was freed in this txg. This
1528 * means that the on-disk histogram may not have an accurate
1529 * view of the free space but it's close enough to allow
1530 * us to make allocation decisions.
1532 space_map_histogram_add(msp
->ms_sm
, *freetree
, tx
);
1536 * For sync pass 1, we avoid traversing this txg's free range tree
1537 * and instead will just swap the pointers for freetree and
1538 * freed_tree. We can safely do this since the freed_tree is
1539 * guaranteed to be empty on the initial pass.
1541 if (spa_sync_pass(spa
) == 1) {
1542 range_tree_swap(freetree
, freed_tree
);
1544 range_tree_vacate(*freetree
, range_tree_add
, *freed_tree
);
1547 ASSERT0(range_tree_space(msp
->ms_alloctree
[txg
& TXG_MASK
]));
1548 ASSERT0(range_tree_space(msp
->ms_freetree
[txg
& TXG_MASK
]));
1550 mutex_exit(&msp
->ms_lock
);
1552 if (object
!= space_map_object(msp
->ms_sm
)) {
1553 object
= space_map_object(msp
->ms_sm
);
1554 dmu_write(mos
, vd
->vdev_ms_array
, sizeof (uint64_t) *
1555 msp
->ms_id
, sizeof (uint64_t), &object
, tx
);
1561 * Called after a transaction group has completely synced to mark
1562 * all of the metaslab's free space as usable.
1565 metaslab_sync_done(metaslab_t
*msp
, uint64_t txg
)
1567 metaslab_group_t
*mg
= msp
->ms_group
;
1568 vdev_t
*vd
= mg
->mg_vd
;
1569 range_tree_t
**freed_tree
;
1570 range_tree_t
**defer_tree
;
1571 int64_t alloc_delta
, defer_delta
;
1574 ASSERT(!vd
->vdev_ishole
);
1576 mutex_enter(&msp
->ms_lock
);
1579 * If this metaslab is just becoming available, initialize its
1580 * alloctrees, freetrees, and defertree and add its capacity to
1583 if (msp
->ms_freetree
[TXG_CLEAN(txg
) & TXG_MASK
] == NULL
) {
1584 for (t
= 0; t
< TXG_SIZE
; t
++) {
1585 ASSERT(msp
->ms_alloctree
[t
] == NULL
);
1586 ASSERT(msp
->ms_freetree
[t
] == NULL
);
1588 msp
->ms_alloctree
[t
] = range_tree_create(NULL
, msp
,
1590 msp
->ms_freetree
[t
] = range_tree_create(NULL
, msp
,
1594 for (t
= 0; t
< TXG_DEFER_SIZE
; t
++) {
1595 ASSERT(msp
->ms_defertree
[t
] == NULL
);
1597 msp
->ms_defertree
[t
] = range_tree_create(NULL
, msp
,
1601 vdev_space_update(vd
, 0, 0, msp
->ms_size
);
1604 freed_tree
= &msp
->ms_freetree
[TXG_CLEAN(txg
) & TXG_MASK
];
1605 defer_tree
= &msp
->ms_defertree
[txg
% TXG_DEFER_SIZE
];
1607 alloc_delta
= space_map_alloc_delta(msp
->ms_sm
);
1608 defer_delta
= range_tree_space(*freed_tree
) -
1609 range_tree_space(*defer_tree
);
1611 vdev_space_update(vd
, alloc_delta
+ defer_delta
, defer_delta
, 0);
1613 ASSERT0(range_tree_space(msp
->ms_alloctree
[txg
& TXG_MASK
]));
1614 ASSERT0(range_tree_space(msp
->ms_freetree
[txg
& TXG_MASK
]));
1617 * If there's a metaslab_load() in progress, wait for it to complete
1618 * so that we have a consistent view of the in-core space map.
1620 metaslab_load_wait(msp
);
1623 * Move the frees from the defer_tree back to the free
1624 * range tree (if it's loaded). Swap the freed_tree and the
1625 * defer_tree -- this is safe to do because we've just emptied out
1628 range_tree_vacate(*defer_tree
,
1629 msp
->ms_loaded
? range_tree_add
: NULL
, msp
->ms_tree
);
1630 range_tree_swap(freed_tree
, defer_tree
);
1632 space_map_update(msp
->ms_sm
);
1634 msp
->ms_deferspace
+= defer_delta
;
1635 ASSERT3S(msp
->ms_deferspace
, >=, 0);
1636 ASSERT3S(msp
->ms_deferspace
, <=, msp
->ms_size
);
1637 if (msp
->ms_deferspace
!= 0) {
1639 * Keep syncing this metaslab until all deferred frees
1640 * are back in circulation.
1642 vdev_dirty(vd
, VDD_METASLAB
, msp
, txg
+ 1);
1645 if (msp
->ms_loaded
&& msp
->ms_access_txg
< txg
) {
1646 for (t
= 1; t
< TXG_CONCURRENT_STATES
; t
++) {
1647 VERIFY0(range_tree_space(
1648 msp
->ms_alloctree
[(txg
+ t
) & TXG_MASK
]));
1651 if (!metaslab_debug_unload
)
1652 metaslab_unload(msp
);
1655 metaslab_group_sort(mg
, msp
, metaslab_weight(msp
));
1656 mutex_exit(&msp
->ms_lock
);
1661 metaslab_sync_reassess(metaslab_group_t
*mg
)
1663 int64_t failures
= mg
->mg_alloc_failures
;
1665 metaslab_group_alloc_update(mg
);
1666 atomic_add_64(&mg
->mg_alloc_failures
, -failures
);
1669 * Preload the next potential metaslabs
1671 metaslab_group_preload(mg
);
1675 metaslab_distance(metaslab_t
*msp
, dva_t
*dva
)
1677 uint64_t ms_shift
= msp
->ms_group
->mg_vd
->vdev_ms_shift
;
1678 uint64_t offset
= DVA_GET_OFFSET(dva
) >> ms_shift
;
1679 uint64_t start
= msp
->ms_id
;
1681 if (msp
->ms_group
->mg_vd
->vdev_id
!= DVA_GET_VDEV(dva
))
1682 return (1ULL << 63);
1685 return ((start
- offset
) << ms_shift
);
1687 return ((offset
- start
) << ms_shift
);
1692 metaslab_group_alloc(metaslab_group_t
*mg
, uint64_t psize
, uint64_t asize
,
1693 uint64_t txg
, uint64_t min_distance
, dva_t
*dva
, int d
, int flags
)
1695 spa_t
*spa
= mg
->mg_vd
->vdev_spa
;
1696 metaslab_t
*msp
= NULL
;
1697 uint64_t offset
= -1ULL;
1698 avl_tree_t
*t
= &mg
->mg_metaslab_tree
;
1699 uint64_t activation_weight
;
1700 uint64_t target_distance
;
1703 activation_weight
= METASLAB_WEIGHT_PRIMARY
;
1704 for (i
= 0; i
< d
; i
++) {
1705 if (DVA_GET_VDEV(&dva
[i
]) == mg
->mg_vd
->vdev_id
) {
1706 activation_weight
= METASLAB_WEIGHT_SECONDARY
;
1712 boolean_t was_active
;
1714 mutex_enter(&mg
->mg_lock
);
1715 for (msp
= avl_first(t
); msp
; msp
= AVL_NEXT(t
, msp
)) {
1716 if (msp
->ms_weight
< asize
) {
1717 spa_dbgmsg(spa
, "%s: failed to meet weight "
1718 "requirement: vdev %llu, txg %llu, mg %p, "
1719 "msp %p, psize %llu, asize %llu, "
1720 "failures %llu, weight %llu",
1721 spa_name(spa
), mg
->mg_vd
->vdev_id
, txg
,
1722 mg
, msp
, psize
, asize
,
1723 mg
->mg_alloc_failures
, msp
->ms_weight
);
1724 mutex_exit(&mg
->mg_lock
);
1729 * If the selected metaslab is condensing, skip it.
1731 if (msp
->ms_condensing
)
1734 was_active
= msp
->ms_weight
& METASLAB_ACTIVE_MASK
;
1735 if (activation_weight
== METASLAB_WEIGHT_PRIMARY
)
1738 target_distance
= min_distance
+
1739 (space_map_allocated(msp
->ms_sm
) != 0 ? 0 :
1742 for (i
= 0; i
< d
; i
++)
1743 if (metaslab_distance(msp
, &dva
[i
]) <
1749 mutex_exit(&mg
->mg_lock
);
1753 mutex_enter(&msp
->ms_lock
);
1756 * If we've already reached the allowable number of failed
1757 * allocation attempts on this metaslab group then we
1758 * consider skipping it. We skip it only if we're allowed
1759 * to "fast" gang, the physical size is larger than
1760 * a gang block, and we're attempting to allocate from
1761 * the primary metaslab.
1763 if (mg
->mg_alloc_failures
> zfs_mg_alloc_failures
&&
1764 CAN_FASTGANG(flags
) && psize
> SPA_GANGBLOCKSIZE
&&
1765 activation_weight
== METASLAB_WEIGHT_PRIMARY
) {
1766 spa_dbgmsg(spa
, "%s: skipping metaslab group: "
1767 "vdev %llu, txg %llu, mg %p, msp[%llu] %p, "
1768 "psize %llu, asize %llu, failures %llu",
1769 spa_name(spa
), mg
->mg_vd
->vdev_id
, txg
, mg
,
1770 msp
->ms_id
, msp
, psize
, asize
,
1771 mg
->mg_alloc_failures
);
1772 mutex_exit(&msp
->ms_lock
);
1777 * Ensure that the metaslab we have selected is still
1778 * capable of handling our request. It's possible that
1779 * another thread may have changed the weight while we
1780 * were blocked on the metaslab lock.
1782 if (msp
->ms_weight
< asize
|| (was_active
&&
1783 !(msp
->ms_weight
& METASLAB_ACTIVE_MASK
) &&
1784 activation_weight
== METASLAB_WEIGHT_PRIMARY
)) {
1785 mutex_exit(&msp
->ms_lock
);
1789 if ((msp
->ms_weight
& METASLAB_WEIGHT_SECONDARY
) &&
1790 activation_weight
== METASLAB_WEIGHT_PRIMARY
) {
1791 metaslab_passivate(msp
,
1792 msp
->ms_weight
& ~METASLAB_ACTIVE_MASK
);
1793 mutex_exit(&msp
->ms_lock
);
1797 if (metaslab_activate(msp
, activation_weight
) != 0) {
1798 mutex_exit(&msp
->ms_lock
);
1803 * If this metaslab is currently condensing then pick again as
1804 * we can't manipulate this metaslab until it's committed
1807 if (msp
->ms_condensing
) {
1808 mutex_exit(&msp
->ms_lock
);
1812 if ((offset
= metaslab_block_alloc(msp
, asize
)) != -1ULL)
1815 atomic_inc_64(&mg
->mg_alloc_failures
);
1817 metaslab_passivate(msp
, metaslab_block_maxsize(msp
));
1818 mutex_exit(&msp
->ms_lock
);
1821 if (range_tree_space(msp
->ms_alloctree
[txg
& TXG_MASK
]) == 0)
1822 vdev_dirty(mg
->mg_vd
, VDD_METASLAB
, msp
, txg
);
1824 range_tree_add(msp
->ms_alloctree
[txg
& TXG_MASK
], offset
, asize
);
1825 msp
->ms_access_txg
= txg
+ metaslab_unload_delay
;
1827 mutex_exit(&msp
->ms_lock
);
1833 * Allocate a block for the specified i/o.
1836 metaslab_alloc_dva(spa_t
*spa
, metaslab_class_t
*mc
, uint64_t psize
,
1837 dva_t
*dva
, int d
, dva_t
*hintdva
, uint64_t txg
, int flags
)
1839 metaslab_group_t
*mg
, *fast_mg
, *rotor
;
1843 int zio_lock
= B_FALSE
;
1844 boolean_t allocatable
;
1845 uint64_t offset
= -1ULL;
1849 ASSERT(!DVA_IS_VALID(&dva
[d
]));
1852 * For testing, make some blocks above a certain size be gang blocks.
1854 if (psize
>= metaslab_gang_bang
&& (ddi_get_lbolt() & 3) == 0)
1855 return (SET_ERROR(ENOSPC
));
1857 if (flags
& METASLAB_FASTWRITE
)
1858 mutex_enter(&mc
->mc_fastwrite_lock
);
1861 * Start at the rotor and loop through all mgs until we find something.
1862 * Note that there's no locking on mc_rotor or mc_aliquot because
1863 * nothing actually breaks if we miss a few updates -- we just won't
1864 * allocate quite as evenly. It all balances out over time.
1866 * If we are doing ditto or log blocks, try to spread them across
1867 * consecutive vdevs. If we're forced to reuse a vdev before we've
1868 * allocated all of our ditto blocks, then try and spread them out on
1869 * that vdev as much as possible. If it turns out to not be possible,
1870 * gradually lower our standards until anything becomes acceptable.
1871 * Also, allocating on consecutive vdevs (as opposed to random vdevs)
1872 * gives us hope of containing our fault domains to something we're
1873 * able to reason about. Otherwise, any two top-level vdev failures
1874 * will guarantee the loss of data. With consecutive allocation,
1875 * only two adjacent top-level vdev failures will result in data loss.
1877 * If we are doing gang blocks (hintdva is non-NULL), try to keep
1878 * ourselves on the same vdev as our gang block header. That
1879 * way, we can hope for locality in vdev_cache, plus it makes our
1880 * fault domains something tractable.
1883 vd
= vdev_lookup_top(spa
, DVA_GET_VDEV(&hintdva
[d
]));
1886 * It's possible the vdev we're using as the hint no
1887 * longer exists (i.e. removed). Consult the rotor when
1893 if (flags
& METASLAB_HINTBP_AVOID
&&
1894 mg
->mg_next
!= NULL
)
1899 } else if (d
!= 0) {
1900 vd
= vdev_lookup_top(spa
, DVA_GET_VDEV(&dva
[d
- 1]));
1901 mg
= vd
->vdev_mg
->mg_next
;
1902 } else if (flags
& METASLAB_FASTWRITE
) {
1903 mg
= fast_mg
= mc
->mc_rotor
;
1906 if (fast_mg
->mg_vd
->vdev_pending_fastwrite
<
1907 mg
->mg_vd
->vdev_pending_fastwrite
)
1909 } while ((fast_mg
= fast_mg
->mg_next
) != mc
->mc_rotor
);
1916 * If the hint put us into the wrong metaslab class, or into a
1917 * metaslab group that has been passivated, just follow the rotor.
1919 if (mg
->mg_class
!= mc
|| mg
->mg_activation_count
<= 0)
1926 ASSERT(mg
->mg_activation_count
== 1);
1931 * Don't allocate from faulted devices.
1934 spa_config_enter(spa
, SCL_ZIO
, FTAG
, RW_READER
);
1935 allocatable
= vdev_allocatable(vd
);
1936 spa_config_exit(spa
, SCL_ZIO
, FTAG
);
1938 allocatable
= vdev_allocatable(vd
);
1942 * Determine if the selected metaslab group is eligible
1943 * for allocations. If we're ganging or have requested
1944 * an allocation for the smallest gang block size
1945 * then we don't want to avoid allocating to the this
1946 * metaslab group. If we're in this condition we should
1947 * try to allocate from any device possible so that we
1948 * don't inadvertently return ENOSPC and suspend the pool
1949 * even though space is still available.
1951 if (allocatable
&& CAN_FASTGANG(flags
) &&
1952 psize
> SPA_GANGBLOCKSIZE
)
1953 allocatable
= metaslab_group_allocatable(mg
);
1959 * Avoid writing single-copy data to a failing vdev
1960 * unless the user instructs us that it is okay.
1962 if ((vd
->vdev_stat
.vs_write_errors
> 0 ||
1963 vd
->vdev_state
< VDEV_STATE_HEALTHY
) &&
1964 d
== 0 && dshift
== 3 &&
1965 !(zfs_write_to_degraded
&& vd
->vdev_state
==
1966 VDEV_STATE_DEGRADED
)) {
1971 ASSERT(mg
->mg_class
== mc
);
1973 distance
= vd
->vdev_asize
>> dshift
;
1974 if (distance
<= (1ULL << vd
->vdev_ms_shift
))
1979 asize
= vdev_psize_to_asize(vd
, psize
);
1980 ASSERT(P2PHASE(asize
, 1ULL << vd
->vdev_ashift
) == 0);
1982 offset
= metaslab_group_alloc(mg
, psize
, asize
, txg
, distance
,
1984 if (offset
!= -1ULL) {
1986 * If we've just selected this metaslab group,
1987 * figure out whether the corresponding vdev is
1988 * over- or under-used relative to the pool,
1989 * and set an allocation bias to even it out.
1991 if (mc
->mc_aliquot
== 0) {
1992 vdev_stat_t
*vs
= &vd
->vdev_stat
;
1995 vu
= (vs
->vs_alloc
* 100) / (vs
->vs_space
+ 1);
1996 cu
= (mc
->mc_alloc
* 100) / (mc
->mc_space
+ 1);
1999 * Calculate how much more or less we should
2000 * try to allocate from this device during
2001 * this iteration around the rotor.
2002 * For example, if a device is 80% full
2003 * and the pool is 20% full then we should
2004 * reduce allocations by 60% on this device.
2006 * mg_bias = (20 - 80) * 512K / 100 = -307K
2008 * This reduces allocations by 307K for this
2011 mg
->mg_bias
= ((cu
- vu
) *
2012 (int64_t)mg
->mg_aliquot
) / 100;
2015 if ((flags
& METASLAB_FASTWRITE
) ||
2016 atomic_add_64_nv(&mc
->mc_aliquot
, asize
) >=
2017 mg
->mg_aliquot
+ mg
->mg_bias
) {
2018 mc
->mc_rotor
= mg
->mg_next
;
2022 DVA_SET_VDEV(&dva
[d
], vd
->vdev_id
);
2023 DVA_SET_OFFSET(&dva
[d
], offset
);
2024 DVA_SET_GANG(&dva
[d
], !!(flags
& METASLAB_GANG_HEADER
));
2025 DVA_SET_ASIZE(&dva
[d
], asize
);
2027 if (flags
& METASLAB_FASTWRITE
) {
2028 atomic_add_64(&vd
->vdev_pending_fastwrite
,
2030 mutex_exit(&mc
->mc_fastwrite_lock
);
2036 mc
->mc_rotor
= mg
->mg_next
;
2038 } while ((mg
= mg
->mg_next
) != rotor
);
2042 ASSERT(dshift
< 64);
2046 if (!allocatable
&& !zio_lock
) {
2052 bzero(&dva
[d
], sizeof (dva_t
));
2054 if (flags
& METASLAB_FASTWRITE
)
2055 mutex_exit(&mc
->mc_fastwrite_lock
);
2057 return (SET_ERROR(ENOSPC
));
2061 * Free the block represented by DVA in the context of the specified
2062 * transaction group.
2065 metaslab_free_dva(spa_t
*spa
, const dva_t
*dva
, uint64_t txg
, boolean_t now
)
2067 uint64_t vdev
= DVA_GET_VDEV(dva
);
2068 uint64_t offset
= DVA_GET_OFFSET(dva
);
2069 uint64_t size
= DVA_GET_ASIZE(dva
);
2073 ASSERT(DVA_IS_VALID(dva
));
2075 if (txg
> spa_freeze_txg(spa
))
2078 if ((vd
= vdev_lookup_top(spa
, vdev
)) == NULL
||
2079 (offset
>> vd
->vdev_ms_shift
) >= vd
->vdev_ms_count
) {
2080 cmn_err(CE_WARN
, "metaslab_free_dva(): bad DVA %llu:%llu",
2081 (u_longlong_t
)vdev
, (u_longlong_t
)offset
);
2086 msp
= vd
->vdev_ms
[offset
>> vd
->vdev_ms_shift
];
2088 if (DVA_GET_GANG(dva
))
2089 size
= vdev_psize_to_asize(vd
, SPA_GANGBLOCKSIZE
);
2091 mutex_enter(&msp
->ms_lock
);
2094 range_tree_remove(msp
->ms_alloctree
[txg
& TXG_MASK
],
2097 VERIFY(!msp
->ms_condensing
);
2098 VERIFY3U(offset
, >=, msp
->ms_start
);
2099 VERIFY3U(offset
+ size
, <=, msp
->ms_start
+ msp
->ms_size
);
2100 VERIFY3U(range_tree_space(msp
->ms_tree
) + size
, <=,
2102 VERIFY0(P2PHASE(offset
, 1ULL << vd
->vdev_ashift
));
2103 VERIFY0(P2PHASE(size
, 1ULL << vd
->vdev_ashift
));
2104 range_tree_add(msp
->ms_tree
, offset
, size
);
2106 if (range_tree_space(msp
->ms_freetree
[txg
& TXG_MASK
]) == 0)
2107 vdev_dirty(vd
, VDD_METASLAB
, msp
, txg
);
2108 range_tree_add(msp
->ms_freetree
[txg
& TXG_MASK
],
2112 mutex_exit(&msp
->ms_lock
);
2116 * Intent log support: upon opening the pool after a crash, notify the SPA
2117 * of blocks that the intent log has allocated for immediate write, but
2118 * which are still considered free by the SPA because the last transaction
2119 * group didn't commit yet.
2122 metaslab_claim_dva(spa_t
*spa
, const dva_t
*dva
, uint64_t txg
)
2124 uint64_t vdev
= DVA_GET_VDEV(dva
);
2125 uint64_t offset
= DVA_GET_OFFSET(dva
);
2126 uint64_t size
= DVA_GET_ASIZE(dva
);
2131 ASSERT(DVA_IS_VALID(dva
));
2133 if ((vd
= vdev_lookup_top(spa
, vdev
)) == NULL
||
2134 (offset
>> vd
->vdev_ms_shift
) >= vd
->vdev_ms_count
)
2135 return (SET_ERROR(ENXIO
));
2137 msp
= vd
->vdev_ms
[offset
>> vd
->vdev_ms_shift
];
2139 if (DVA_GET_GANG(dva
))
2140 size
= vdev_psize_to_asize(vd
, SPA_GANGBLOCKSIZE
);
2142 mutex_enter(&msp
->ms_lock
);
2144 if ((txg
!= 0 && spa_writeable(spa
)) || !msp
->ms_loaded
)
2145 error
= metaslab_activate(msp
, METASLAB_WEIGHT_SECONDARY
);
2147 if (error
== 0 && !range_tree_contains(msp
->ms_tree
, offset
, size
))
2148 error
= SET_ERROR(ENOENT
);
2150 if (error
|| txg
== 0) { /* txg == 0 indicates dry run */
2151 mutex_exit(&msp
->ms_lock
);
2155 VERIFY(!msp
->ms_condensing
);
2156 VERIFY0(P2PHASE(offset
, 1ULL << vd
->vdev_ashift
));
2157 VERIFY0(P2PHASE(size
, 1ULL << vd
->vdev_ashift
));
2158 VERIFY3U(range_tree_space(msp
->ms_tree
) - size
, <=, msp
->ms_size
);
2159 range_tree_remove(msp
->ms_tree
, offset
, size
);
2161 if (spa_writeable(spa
)) { /* don't dirty if we're zdb(1M) */
2162 if (range_tree_space(msp
->ms_alloctree
[txg
& TXG_MASK
]) == 0)
2163 vdev_dirty(vd
, VDD_METASLAB
, msp
, txg
);
2164 range_tree_add(msp
->ms_alloctree
[txg
& TXG_MASK
], offset
, size
);
2167 mutex_exit(&msp
->ms_lock
);
2173 metaslab_alloc(spa_t
*spa
, metaslab_class_t
*mc
, uint64_t psize
, blkptr_t
*bp
,
2174 int ndvas
, uint64_t txg
, blkptr_t
*hintbp
, int flags
)
2176 dva_t
*dva
= bp
->blk_dva
;
2177 dva_t
*hintdva
= hintbp
->blk_dva
;
2180 ASSERT(bp
->blk_birth
== 0);
2181 ASSERT(BP_PHYSICAL_BIRTH(bp
) == 0);
2183 spa_config_enter(spa
, SCL_ALLOC
, FTAG
, RW_READER
);
2185 if (mc
->mc_rotor
== NULL
) { /* no vdevs in this class */
2186 spa_config_exit(spa
, SCL_ALLOC
, FTAG
);
2187 return (SET_ERROR(ENOSPC
));
2190 ASSERT(ndvas
> 0 && ndvas
<= spa_max_replication(spa
));
2191 ASSERT(BP_GET_NDVAS(bp
) == 0);
2192 ASSERT(hintbp
== NULL
|| ndvas
<= BP_GET_NDVAS(hintbp
));
2194 for (d
= 0; d
< ndvas
; d
++) {
2195 error
= metaslab_alloc_dva(spa
, mc
, psize
, dva
, d
, hintdva
,
2198 for (d
--; d
>= 0; d
--) {
2199 metaslab_free_dva(spa
, &dva
[d
], txg
, B_TRUE
);
2200 bzero(&dva
[d
], sizeof (dva_t
));
2202 spa_config_exit(spa
, SCL_ALLOC
, FTAG
);
2207 ASSERT(BP_GET_NDVAS(bp
) == ndvas
);
2209 spa_config_exit(spa
, SCL_ALLOC
, FTAG
);
2211 BP_SET_BIRTH(bp
, txg
, txg
);
2217 metaslab_free(spa_t
*spa
, const blkptr_t
*bp
, uint64_t txg
, boolean_t now
)
2219 const dva_t
*dva
= bp
->blk_dva
;
2220 int d
, ndvas
= BP_GET_NDVAS(bp
);
2222 ASSERT(!BP_IS_HOLE(bp
));
2223 ASSERT(!now
|| bp
->blk_birth
>= spa_syncing_txg(spa
));
2225 spa_config_enter(spa
, SCL_FREE
, FTAG
, RW_READER
);
2227 for (d
= 0; d
< ndvas
; d
++)
2228 metaslab_free_dva(spa
, &dva
[d
], txg
, now
);
2230 spa_config_exit(spa
, SCL_FREE
, FTAG
);
2234 metaslab_claim(spa_t
*spa
, const blkptr_t
*bp
, uint64_t txg
)
2236 const dva_t
*dva
= bp
->blk_dva
;
2237 int ndvas
= BP_GET_NDVAS(bp
);
2240 ASSERT(!BP_IS_HOLE(bp
));
2244 * First do a dry run to make sure all DVAs are claimable,
2245 * so we don't have to unwind from partial failures below.
2247 if ((error
= metaslab_claim(spa
, bp
, 0)) != 0)
2251 spa_config_enter(spa
, SCL_ALLOC
, FTAG
, RW_READER
);
2253 for (d
= 0; d
< ndvas
; d
++)
2254 if ((error
= metaslab_claim_dva(spa
, &dva
[d
], txg
)) != 0)
2257 spa_config_exit(spa
, SCL_ALLOC
, FTAG
);
2259 ASSERT(error
== 0 || txg
== 0);
2265 metaslab_fastwrite_mark(spa_t
*spa
, const blkptr_t
*bp
)
2267 const dva_t
*dva
= bp
->blk_dva
;
2268 int ndvas
= BP_GET_NDVAS(bp
);
2269 uint64_t psize
= BP_GET_PSIZE(bp
);
2273 ASSERT(!BP_IS_HOLE(bp
));
2276 spa_config_enter(spa
, SCL_VDEV
, FTAG
, RW_READER
);
2278 for (d
= 0; d
< ndvas
; d
++) {
2279 if ((vd
= vdev_lookup_top(spa
, DVA_GET_VDEV(&dva
[d
]))) == NULL
)
2281 atomic_add_64(&vd
->vdev_pending_fastwrite
, psize
);
2284 spa_config_exit(spa
, SCL_VDEV
, FTAG
);
2288 metaslab_fastwrite_unmark(spa_t
*spa
, const blkptr_t
*bp
)
2290 const dva_t
*dva
= bp
->blk_dva
;
2291 int ndvas
= BP_GET_NDVAS(bp
);
2292 uint64_t psize
= BP_GET_PSIZE(bp
);
2296 ASSERT(!BP_IS_HOLE(bp
));
2299 spa_config_enter(spa
, SCL_VDEV
, FTAG
, RW_READER
);
2301 for (d
= 0; d
< ndvas
; d
++) {
2302 if ((vd
= vdev_lookup_top(spa
, DVA_GET_VDEV(&dva
[d
]))) == NULL
)
2304 ASSERT3U(vd
->vdev_pending_fastwrite
, >=, psize
);
2305 atomic_sub_64(&vd
->vdev_pending_fastwrite
, psize
);
2308 spa_config_exit(spa
, SCL_VDEV
, FTAG
);
2312 metaslab_check_free(spa_t
*spa
, const blkptr_t
*bp
)
2316 if ((zfs_flags
& ZFS_DEBUG_ZIO_FREE
) == 0)
2319 spa_config_enter(spa
, SCL_VDEV
, FTAG
, RW_READER
);
2320 for (i
= 0; i
< BP_GET_NDVAS(bp
); i
++) {
2321 uint64_t vdev
= DVA_GET_VDEV(&bp
->blk_dva
[i
]);
2322 vdev_t
*vd
= vdev_lookup_top(spa
, vdev
);
2323 uint64_t offset
= DVA_GET_OFFSET(&bp
->blk_dva
[i
]);
2324 uint64_t size
= DVA_GET_ASIZE(&bp
->blk_dva
[i
]);
2325 metaslab_t
*msp
= vd
->vdev_ms
[offset
>> vd
->vdev_ms_shift
];
2328 range_tree_verify(msp
->ms_tree
, offset
, size
);
2330 for (j
= 0; j
< TXG_SIZE
; j
++)
2331 range_tree_verify(msp
->ms_freetree
[j
], offset
, size
);
2332 for (j
= 0; j
< TXG_DEFER_SIZE
; j
++)
2333 range_tree_verify(msp
->ms_defertree
[j
], offset
, size
);
2335 spa_config_exit(spa
, SCL_VDEV
, FTAG
);
2338 #if defined(_KERNEL) && defined(HAVE_SPL)
2339 module_param(metaslab_debug_load
, int, 0644);
2340 module_param(metaslab_debug_unload
, int, 0644);
2341 MODULE_PARM_DESC(metaslab_debug_load
,
2342 "load all metaslabs when pool is first opened");
2343 MODULE_PARM_DESC(metaslab_debug_unload
,
2344 "prevent metaslabs from being unloaded");
2346 module_param(zfs_mg_noalloc_threshold
, int, 0644);
2347 MODULE_PARM_DESC(zfs_mg_noalloc_threshold
,
2348 "percentage of free space for metaslab group to allow allocation");
2349 #endif /* _KERNEL && HAVE_SPL */