4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2011, 2018 by Delphix. All rights reserved.
24 * Copyright (c) 2013 by Saso Kiselkov. All rights reserved.
25 * Copyright (c) 2017, Intel Corporation.
28 #include <sys/zfs_context.h>
30 #include <sys/dmu_tx.h>
31 #include <sys/space_map.h>
32 #include <sys/metaslab_impl.h>
33 #include <sys/vdev_impl.h>
35 #include <sys/spa_impl.h>
36 #include <sys/zfeature.h>
37 #include <sys/vdev_indirect_mapping.h>
40 #define WITH_DF_BLOCK_ALLOCATOR
42 #define GANG_ALLOCATION(flags) \
43 ((flags) & (METASLAB_GANG_CHILD | METASLAB_GANG_HEADER))
46 * Metaslab granularity, in bytes. This is roughly similar to what would be
47 * referred to as the "stripe size" in traditional RAID arrays. In normal
48 * operation, we will try to write this amount of data to a top-level vdev
49 * before moving on to the next one.
51 unsigned long metaslab_aliquot
= 512 << 10;
54 * For testing, make some blocks above a certain size be gang blocks.
56 unsigned long metaslab_force_ganging
= SPA_MAXBLOCKSIZE
+ 1;
59 * Since we can touch multiple metaslabs (and their respective space maps)
60 * with each transaction group, we benefit from having a smaller space map
61 * block size since it allows us to issue more I/O operations scattered
64 int zfs_metaslab_sm_blksz
= (1 << 12);
67 * The in-core space map representation is more compact than its on-disk form.
68 * The zfs_condense_pct determines how much more compact the in-core
69 * space map representation must be before we compact it on-disk.
70 * Values should be greater than or equal to 100.
72 int zfs_condense_pct
= 200;
75 * Condensing a metaslab is not guaranteed to actually reduce the amount of
76 * space used on disk. In particular, a space map uses data in increments of
77 * MAX(1 << ashift, space_map_blksz), so a metaslab might use the
78 * same number of blocks after condensing. Since the goal of condensing is to
79 * reduce the number of IOPs required to read the space map, we only want to
80 * condense when we can be sure we will reduce the number of blocks used by the
81 * space map. Unfortunately, we cannot precisely compute whether or not this is
82 * the case in metaslab_should_condense since we are holding ms_lock. Instead,
83 * we apply the following heuristic: do not condense a spacemap unless the
84 * uncondensed size consumes greater than zfs_metaslab_condense_block_threshold
87 int zfs_metaslab_condense_block_threshold
= 4;
90 * The zfs_mg_noalloc_threshold defines which metaslab groups should
91 * be eligible for allocation. The value is defined as a percentage of
92 * free space. Metaslab groups that have more free space than
93 * zfs_mg_noalloc_threshold are always eligible for allocations. Once
94 * a metaslab group's free space is less than or equal to the
95 * zfs_mg_noalloc_threshold the allocator will avoid allocating to that
96 * group unless all groups in the pool have reached zfs_mg_noalloc_threshold.
97 * Once all groups in the pool reach zfs_mg_noalloc_threshold then all
98 * groups are allowed to accept allocations. Gang blocks are always
99 * eligible to allocate on any metaslab group. The default value of 0 means
100 * no metaslab group will be excluded based on this criterion.
102 int zfs_mg_noalloc_threshold
= 0;
105 * Metaslab groups are considered eligible for allocations if their
106 * fragmenation metric (measured as a percentage) is less than or equal to
107 * zfs_mg_fragmentation_threshold. If a metaslab group exceeds this threshold
108 * then it will be skipped unless all metaslab groups within the metaslab
109 * class have also crossed this threshold.
111 int zfs_mg_fragmentation_threshold
= 85;
114 * Allow metaslabs to keep their active state as long as their fragmentation
115 * percentage is less than or equal to zfs_metaslab_fragmentation_threshold. An
116 * active metaslab that exceeds this threshold will no longer keep its active
117 * status allowing better metaslabs to be selected.
119 int zfs_metaslab_fragmentation_threshold
= 70;
122 * When set will load all metaslabs when pool is first opened.
124 int metaslab_debug_load
= 0;
127 * When set will prevent metaslabs from being unloaded.
129 int metaslab_debug_unload
= 0;
132 * Minimum size which forces the dynamic allocator to change
133 * it's allocation strategy. Once the space map cannot satisfy
134 * an allocation of this size then it switches to using more
135 * aggressive strategy (i.e search by size rather than offset).
137 uint64_t metaslab_df_alloc_threshold
= SPA_OLD_MAXBLOCKSIZE
;
140 * The minimum free space, in percent, which must be available
141 * in a space map to continue allocations in a first-fit fashion.
142 * Once the space map's free space drops below this level we dynamically
143 * switch to using best-fit allocations.
145 int metaslab_df_free_pct
= 4;
148 * Percentage of all cpus that can be used by the metaslab taskq.
150 int metaslab_load_pct
= 50;
153 * Determines how many txgs a metaslab may remain loaded without having any
154 * allocations from it. As long as a metaslab continues to be used we will
157 int metaslab_unload_delay
= TXG_SIZE
* 2;
160 * Max number of metaslabs per group to preload.
162 int metaslab_preload_limit
= SPA_DVAS_PER_BP
;
165 * Enable/disable preloading of metaslab.
167 int metaslab_preload_enabled
= B_TRUE
;
170 * Enable/disable fragmentation weighting on metaslabs.
172 int metaslab_fragmentation_factor_enabled
= B_TRUE
;
175 * Enable/disable lba weighting (i.e. outer tracks are given preference).
177 int metaslab_lba_weighting_enabled
= B_TRUE
;
180 * Enable/disable metaslab group biasing.
182 int metaslab_bias_enabled
= B_TRUE
;
186 * Enable/disable remapping of indirect DVAs to their concrete vdevs.
188 boolean_t zfs_remap_blkptr_enable
= B_TRUE
;
191 * Enable/disable segment-based metaslab selection.
193 int zfs_metaslab_segment_weight_enabled
= B_TRUE
;
196 * When using segment-based metaslab selection, we will continue
197 * allocating from the active metaslab until we have exhausted
198 * zfs_metaslab_switch_threshold of its buckets.
200 int zfs_metaslab_switch_threshold
= 2;
203 * Internal switch to enable/disable the metaslab allocation tracing
206 #ifdef _METASLAB_TRACING
207 boolean_t metaslab_trace_enabled
= B_TRUE
;
211 * Maximum entries that the metaslab allocation tracing facility will keep
212 * in a given list when running in non-debug mode. We limit the number
213 * of entries in non-debug mode to prevent us from using up too much memory.
214 * The limit should be sufficiently large that we don't expect any allocation
215 * to every exceed this value. In debug mode, the system will panic if this
216 * limit is ever reached allowing for further investigation.
218 #ifdef _METASLAB_TRACING
219 uint64_t metaslab_trace_max_entries
= 5000;
222 static uint64_t metaslab_weight(metaslab_t
*);
223 static void metaslab_set_fragmentation(metaslab_t
*);
224 static void metaslab_free_impl(vdev_t
*, uint64_t, uint64_t, boolean_t
);
225 static void metaslab_check_free_impl(vdev_t
*, uint64_t, uint64_t);
227 static void metaslab_passivate(metaslab_t
*msp
, uint64_t weight
);
228 static uint64_t metaslab_weight_from_range_tree(metaslab_t
*msp
);
229 #ifdef _METASLAB_TRACING
230 kmem_cache_t
*metaslab_alloc_trace_cache
;
234 * ==========================================================================
236 * ==========================================================================
239 metaslab_class_create(spa_t
*spa
, metaslab_ops_t
*ops
)
241 metaslab_class_t
*mc
;
243 mc
= kmem_zalloc(sizeof (metaslab_class_t
), KM_SLEEP
);
248 mutex_init(&mc
->mc_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
249 mc
->mc_alloc_slots
= kmem_zalloc(spa
->spa_alloc_count
*
250 sizeof (zfs_refcount_t
), KM_SLEEP
);
251 mc
->mc_alloc_max_slots
= kmem_zalloc(spa
->spa_alloc_count
*
252 sizeof (uint64_t), KM_SLEEP
);
253 for (int i
= 0; i
< spa
->spa_alloc_count
; i
++)
254 zfs_refcount_create_tracked(&mc
->mc_alloc_slots
[i
]);
260 metaslab_class_destroy(metaslab_class_t
*mc
)
262 ASSERT(mc
->mc_rotor
== NULL
);
263 ASSERT(mc
->mc_alloc
== 0);
264 ASSERT(mc
->mc_deferred
== 0);
265 ASSERT(mc
->mc_space
== 0);
266 ASSERT(mc
->mc_dspace
== 0);
268 for (int i
= 0; i
< mc
->mc_spa
->spa_alloc_count
; i
++)
269 zfs_refcount_destroy(&mc
->mc_alloc_slots
[i
]);
270 kmem_free(mc
->mc_alloc_slots
, mc
->mc_spa
->spa_alloc_count
*
271 sizeof (zfs_refcount_t
));
272 kmem_free(mc
->mc_alloc_max_slots
, mc
->mc_spa
->spa_alloc_count
*
274 mutex_destroy(&mc
->mc_lock
);
275 kmem_free(mc
, sizeof (metaslab_class_t
));
279 metaslab_class_validate(metaslab_class_t
*mc
)
281 metaslab_group_t
*mg
;
285 * Must hold one of the spa_config locks.
287 ASSERT(spa_config_held(mc
->mc_spa
, SCL_ALL
, RW_READER
) ||
288 spa_config_held(mc
->mc_spa
, SCL_ALL
, RW_WRITER
));
290 if ((mg
= mc
->mc_rotor
) == NULL
)
295 ASSERT(vd
->vdev_mg
!= NULL
);
296 ASSERT3P(vd
->vdev_top
, ==, vd
);
297 ASSERT3P(mg
->mg_class
, ==, mc
);
298 ASSERT3P(vd
->vdev_ops
, !=, &vdev_hole_ops
);
299 } while ((mg
= mg
->mg_next
) != mc
->mc_rotor
);
305 metaslab_class_space_update(metaslab_class_t
*mc
, int64_t alloc_delta
,
306 int64_t defer_delta
, int64_t space_delta
, int64_t dspace_delta
)
308 atomic_add_64(&mc
->mc_alloc
, alloc_delta
);
309 atomic_add_64(&mc
->mc_deferred
, defer_delta
);
310 atomic_add_64(&mc
->mc_space
, space_delta
);
311 atomic_add_64(&mc
->mc_dspace
, dspace_delta
);
315 metaslab_class_get_alloc(metaslab_class_t
*mc
)
317 return (mc
->mc_alloc
);
321 metaslab_class_get_deferred(metaslab_class_t
*mc
)
323 return (mc
->mc_deferred
);
327 metaslab_class_get_space(metaslab_class_t
*mc
)
329 return (mc
->mc_space
);
333 metaslab_class_get_dspace(metaslab_class_t
*mc
)
335 return (spa_deflate(mc
->mc_spa
) ? mc
->mc_dspace
: mc
->mc_space
);
339 metaslab_class_histogram_verify(metaslab_class_t
*mc
)
341 spa_t
*spa
= mc
->mc_spa
;
342 vdev_t
*rvd
= spa
->spa_root_vdev
;
346 if ((zfs_flags
& ZFS_DEBUG_HISTOGRAM_VERIFY
) == 0)
349 mc_hist
= kmem_zalloc(sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE
,
352 for (int c
= 0; c
< rvd
->vdev_children
; c
++) {
353 vdev_t
*tvd
= rvd
->vdev_child
[c
];
354 metaslab_group_t
*mg
= tvd
->vdev_mg
;
357 * Skip any holes, uninitialized top-levels, or
358 * vdevs that are not in this metalab class.
360 if (!vdev_is_concrete(tvd
) || tvd
->vdev_ms_shift
== 0 ||
361 mg
->mg_class
!= mc
) {
365 for (i
= 0; i
< RANGE_TREE_HISTOGRAM_SIZE
; i
++)
366 mc_hist
[i
] += mg
->mg_histogram
[i
];
369 for (i
= 0; i
< RANGE_TREE_HISTOGRAM_SIZE
; i
++)
370 VERIFY3U(mc_hist
[i
], ==, mc
->mc_histogram
[i
]);
372 kmem_free(mc_hist
, sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE
);
376 * Calculate the metaslab class's fragmentation metric. The metric
377 * is weighted based on the space contribution of each metaslab group.
378 * The return value will be a number between 0 and 100 (inclusive), or
379 * ZFS_FRAG_INVALID if the metric has not been set. See comment above the
380 * zfs_frag_table for more information about the metric.
383 metaslab_class_fragmentation(metaslab_class_t
*mc
)
385 vdev_t
*rvd
= mc
->mc_spa
->spa_root_vdev
;
386 uint64_t fragmentation
= 0;
388 spa_config_enter(mc
->mc_spa
, SCL_VDEV
, FTAG
, RW_READER
);
390 for (int c
= 0; c
< rvd
->vdev_children
; c
++) {
391 vdev_t
*tvd
= rvd
->vdev_child
[c
];
392 metaslab_group_t
*mg
= tvd
->vdev_mg
;
395 * Skip any holes, uninitialized top-levels,
396 * or vdevs that are not in this metalab class.
398 if (!vdev_is_concrete(tvd
) || tvd
->vdev_ms_shift
== 0 ||
399 mg
->mg_class
!= mc
) {
404 * If a metaslab group does not contain a fragmentation
405 * metric then just bail out.
407 if (mg
->mg_fragmentation
== ZFS_FRAG_INVALID
) {
408 spa_config_exit(mc
->mc_spa
, SCL_VDEV
, FTAG
);
409 return (ZFS_FRAG_INVALID
);
413 * Determine how much this metaslab_group is contributing
414 * to the overall pool fragmentation metric.
416 fragmentation
+= mg
->mg_fragmentation
*
417 metaslab_group_get_space(mg
);
419 fragmentation
/= metaslab_class_get_space(mc
);
421 ASSERT3U(fragmentation
, <=, 100);
422 spa_config_exit(mc
->mc_spa
, SCL_VDEV
, FTAG
);
423 return (fragmentation
);
427 * Calculate the amount of expandable space that is available in
428 * this metaslab class. If a device is expanded then its expandable
429 * space will be the amount of allocatable space that is currently not
430 * part of this metaslab class.
433 metaslab_class_expandable_space(metaslab_class_t
*mc
)
435 vdev_t
*rvd
= mc
->mc_spa
->spa_root_vdev
;
438 spa_config_enter(mc
->mc_spa
, SCL_VDEV
, FTAG
, RW_READER
);
439 for (int c
= 0; c
< rvd
->vdev_children
; c
++) {
440 vdev_t
*tvd
= rvd
->vdev_child
[c
];
441 metaslab_group_t
*mg
= tvd
->vdev_mg
;
443 if (!vdev_is_concrete(tvd
) || tvd
->vdev_ms_shift
== 0 ||
444 mg
->mg_class
!= mc
) {
449 * Calculate if we have enough space to add additional
450 * metaslabs. We report the expandable space in terms
451 * of the metaslab size since that's the unit of expansion.
453 space
+= P2ALIGN(tvd
->vdev_max_asize
- tvd
->vdev_asize
,
454 1ULL << tvd
->vdev_ms_shift
);
456 spa_config_exit(mc
->mc_spa
, SCL_VDEV
, FTAG
);
461 metaslab_compare(const void *x1
, const void *x2
)
463 const metaslab_t
*m1
= (const metaslab_t
*)x1
;
464 const metaslab_t
*m2
= (const metaslab_t
*)x2
;
468 if (m1
->ms_allocator
!= -1 && m1
->ms_primary
)
470 else if (m1
->ms_allocator
!= -1 && !m1
->ms_primary
)
472 if (m2
->ms_allocator
!= -1 && m2
->ms_primary
)
474 else if (m2
->ms_allocator
!= -1 && !m2
->ms_primary
)
478 * Sort inactive metaslabs first, then primaries, then secondaries. When
479 * selecting a metaslab to allocate from, an allocator first tries its
480 * primary, then secondary active metaslab. If it doesn't have active
481 * metaslabs, or can't allocate from them, it searches for an inactive
482 * metaslab to activate. If it can't find a suitable one, it will steal
483 * a primary or secondary metaslab from another allocator.
490 int cmp
= AVL_CMP(m2
->ms_weight
, m1
->ms_weight
);
494 IMPLY(AVL_CMP(m1
->ms_start
, m2
->ms_start
) == 0, m1
== m2
);
496 return (AVL_CMP(m1
->ms_start
, m2
->ms_start
));
500 * Verify that the space accounting on disk matches the in-core range_trees.
503 metaslab_verify_space(metaslab_t
*msp
, uint64_t txg
)
505 spa_t
*spa
= msp
->ms_group
->mg_vd
->vdev_spa
;
506 uint64_t allocated
= 0;
507 uint64_t sm_free_space
, msp_free_space
;
509 ASSERT(MUTEX_HELD(&msp
->ms_lock
));
511 if ((zfs_flags
& ZFS_DEBUG_METASLAB_VERIFY
) == 0)
515 * We can only verify the metaslab space when we're called
516 * from syncing context with a loaded metaslab that has an allocated
517 * space map. Calling this in non-syncing context does not
518 * provide a consistent view of the metaslab since we're performing
519 * allocations in the future.
521 if (txg
!= spa_syncing_txg(spa
) || msp
->ms_sm
== NULL
||
525 sm_free_space
= msp
->ms_size
- space_map_allocated(msp
->ms_sm
) -
526 space_map_alloc_delta(msp
->ms_sm
);
529 * Account for future allocations since we would have already
530 * deducted that space from the ms_freetree.
532 for (int t
= 0; t
< TXG_CONCURRENT_STATES
; t
++) {
534 range_tree_space(msp
->ms_allocating
[(txg
+ t
) & TXG_MASK
]);
537 msp_free_space
= range_tree_space(msp
->ms_allocatable
) + allocated
+
538 msp
->ms_deferspace
+ range_tree_space(msp
->ms_freed
);
540 VERIFY3U(sm_free_space
, ==, msp_free_space
);
544 * ==========================================================================
546 * ==========================================================================
549 * Update the allocatable flag and the metaslab group's capacity.
550 * The allocatable flag is set to true if the capacity is below
551 * the zfs_mg_noalloc_threshold or has a fragmentation value that is
552 * greater than zfs_mg_fragmentation_threshold. If a metaslab group
553 * transitions from allocatable to non-allocatable or vice versa then the
554 * metaslab group's class is updated to reflect the transition.
557 metaslab_group_alloc_update(metaslab_group_t
*mg
)
559 vdev_t
*vd
= mg
->mg_vd
;
560 metaslab_class_t
*mc
= mg
->mg_class
;
561 vdev_stat_t
*vs
= &vd
->vdev_stat
;
562 boolean_t was_allocatable
;
563 boolean_t was_initialized
;
565 ASSERT(vd
== vd
->vdev_top
);
566 ASSERT3U(spa_config_held(mc
->mc_spa
, SCL_ALLOC
, RW_READER
), ==,
569 mutex_enter(&mg
->mg_lock
);
570 was_allocatable
= mg
->mg_allocatable
;
571 was_initialized
= mg
->mg_initialized
;
573 mg
->mg_free_capacity
= ((vs
->vs_space
- vs
->vs_alloc
) * 100) /
576 mutex_enter(&mc
->mc_lock
);
579 * If the metaslab group was just added then it won't
580 * have any space until we finish syncing out this txg.
581 * At that point we will consider it initialized and available
582 * for allocations. We also don't consider non-activated
583 * metaslab groups (e.g. vdevs that are in the middle of being removed)
584 * to be initialized, because they can't be used for allocation.
586 mg
->mg_initialized
= metaslab_group_initialized(mg
);
587 if (!was_initialized
&& mg
->mg_initialized
) {
589 } else if (was_initialized
&& !mg
->mg_initialized
) {
590 ASSERT3U(mc
->mc_groups
, >, 0);
593 if (mg
->mg_initialized
)
594 mg
->mg_no_free_space
= B_FALSE
;
597 * A metaslab group is considered allocatable if it has plenty
598 * of free space or is not heavily fragmented. We only take
599 * fragmentation into account if the metaslab group has a valid
600 * fragmentation metric (i.e. a value between 0 and 100).
602 mg
->mg_allocatable
= (mg
->mg_activation_count
> 0 &&
603 mg
->mg_free_capacity
> zfs_mg_noalloc_threshold
&&
604 (mg
->mg_fragmentation
== ZFS_FRAG_INVALID
||
605 mg
->mg_fragmentation
<= zfs_mg_fragmentation_threshold
));
608 * The mc_alloc_groups maintains a count of the number of
609 * groups in this metaslab class that are still above the
610 * zfs_mg_noalloc_threshold. This is used by the allocating
611 * threads to determine if they should avoid allocations to
612 * a given group. The allocator will avoid allocations to a group
613 * if that group has reached or is below the zfs_mg_noalloc_threshold
614 * and there are still other groups that are above the threshold.
615 * When a group transitions from allocatable to non-allocatable or
616 * vice versa we update the metaslab class to reflect that change.
617 * When the mc_alloc_groups value drops to 0 that means that all
618 * groups have reached the zfs_mg_noalloc_threshold making all groups
619 * eligible for allocations. This effectively means that all devices
620 * are balanced again.
622 if (was_allocatable
&& !mg
->mg_allocatable
)
623 mc
->mc_alloc_groups
--;
624 else if (!was_allocatable
&& mg
->mg_allocatable
)
625 mc
->mc_alloc_groups
++;
626 mutex_exit(&mc
->mc_lock
);
628 mutex_exit(&mg
->mg_lock
);
632 metaslab_group_create(metaslab_class_t
*mc
, vdev_t
*vd
, int allocators
)
634 metaslab_group_t
*mg
;
636 mg
= kmem_zalloc(sizeof (metaslab_group_t
), KM_SLEEP
);
637 mutex_init(&mg
->mg_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
638 mutex_init(&mg
->mg_ms_initialize_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
639 cv_init(&mg
->mg_ms_initialize_cv
, NULL
, CV_DEFAULT
, NULL
);
640 mg
->mg_primaries
= kmem_zalloc(allocators
* sizeof (metaslab_t
*),
642 mg
->mg_secondaries
= kmem_zalloc(allocators
* sizeof (metaslab_t
*),
644 avl_create(&mg
->mg_metaslab_tree
, metaslab_compare
,
645 sizeof (metaslab_t
), offsetof(struct metaslab
, ms_group_node
));
648 mg
->mg_activation_count
= 0;
649 mg
->mg_initialized
= B_FALSE
;
650 mg
->mg_no_free_space
= B_TRUE
;
651 mg
->mg_allocators
= allocators
;
653 mg
->mg_alloc_queue_depth
= kmem_zalloc(allocators
*
654 sizeof (zfs_refcount_t
), KM_SLEEP
);
655 mg
->mg_cur_max_alloc_queue_depth
= kmem_zalloc(allocators
*
656 sizeof (uint64_t), KM_SLEEP
);
657 for (int i
= 0; i
< allocators
; i
++) {
658 zfs_refcount_create_tracked(&mg
->mg_alloc_queue_depth
[i
]);
659 mg
->mg_cur_max_alloc_queue_depth
[i
] = 0;
662 mg
->mg_taskq
= taskq_create("metaslab_group_taskq", metaslab_load_pct
,
663 maxclsyspri
, 10, INT_MAX
, TASKQ_THREADS_CPU_PCT
| TASKQ_DYNAMIC
);
669 metaslab_group_destroy(metaslab_group_t
*mg
)
671 ASSERT(mg
->mg_prev
== NULL
);
672 ASSERT(mg
->mg_next
== NULL
);
674 * We may have gone below zero with the activation count
675 * either because we never activated in the first place or
676 * because we're done, and possibly removing the vdev.
678 ASSERT(mg
->mg_activation_count
<= 0);
680 taskq_destroy(mg
->mg_taskq
);
681 avl_destroy(&mg
->mg_metaslab_tree
);
682 kmem_free(mg
->mg_primaries
, mg
->mg_allocators
* sizeof (metaslab_t
*));
683 kmem_free(mg
->mg_secondaries
, mg
->mg_allocators
*
684 sizeof (metaslab_t
*));
685 mutex_destroy(&mg
->mg_lock
);
686 mutex_destroy(&mg
->mg_ms_initialize_lock
);
687 cv_destroy(&mg
->mg_ms_initialize_cv
);
689 for (int i
= 0; i
< mg
->mg_allocators
; i
++) {
690 zfs_refcount_destroy(&mg
->mg_alloc_queue_depth
[i
]);
691 mg
->mg_cur_max_alloc_queue_depth
[i
] = 0;
693 kmem_free(mg
->mg_alloc_queue_depth
, mg
->mg_allocators
*
694 sizeof (zfs_refcount_t
));
695 kmem_free(mg
->mg_cur_max_alloc_queue_depth
, mg
->mg_allocators
*
698 kmem_free(mg
, sizeof (metaslab_group_t
));
702 metaslab_group_activate(metaslab_group_t
*mg
)
704 metaslab_class_t
*mc
= mg
->mg_class
;
705 metaslab_group_t
*mgprev
, *mgnext
;
707 ASSERT3U(spa_config_held(mc
->mc_spa
, SCL_ALLOC
, RW_WRITER
), !=, 0);
709 ASSERT(mc
->mc_rotor
!= mg
);
710 ASSERT(mg
->mg_prev
== NULL
);
711 ASSERT(mg
->mg_next
== NULL
);
712 ASSERT(mg
->mg_activation_count
<= 0);
714 if (++mg
->mg_activation_count
<= 0)
717 mg
->mg_aliquot
= metaslab_aliquot
* MAX(1, mg
->mg_vd
->vdev_children
);
718 metaslab_group_alloc_update(mg
);
720 if ((mgprev
= mc
->mc_rotor
) == NULL
) {
724 mgnext
= mgprev
->mg_next
;
725 mg
->mg_prev
= mgprev
;
726 mg
->mg_next
= mgnext
;
727 mgprev
->mg_next
= mg
;
728 mgnext
->mg_prev
= mg
;
734 * Passivate a metaslab group and remove it from the allocation rotor.
735 * Callers must hold both the SCL_ALLOC and SCL_ZIO lock prior to passivating
736 * a metaslab group. This function will momentarily drop spa_config_locks
737 * that are lower than the SCL_ALLOC lock (see comment below).
740 metaslab_group_passivate(metaslab_group_t
*mg
)
742 metaslab_class_t
*mc
= mg
->mg_class
;
743 spa_t
*spa
= mc
->mc_spa
;
744 metaslab_group_t
*mgprev
, *mgnext
;
745 int locks
= spa_config_held(spa
, SCL_ALL
, RW_WRITER
);
747 ASSERT3U(spa_config_held(spa
, SCL_ALLOC
| SCL_ZIO
, RW_WRITER
), ==,
748 (SCL_ALLOC
| SCL_ZIO
));
750 if (--mg
->mg_activation_count
!= 0) {
751 ASSERT(mc
->mc_rotor
!= mg
);
752 ASSERT(mg
->mg_prev
== NULL
);
753 ASSERT(mg
->mg_next
== NULL
);
754 ASSERT(mg
->mg_activation_count
< 0);
759 * The spa_config_lock is an array of rwlocks, ordered as
760 * follows (from highest to lowest):
761 * SCL_CONFIG > SCL_STATE > SCL_L2ARC > SCL_ALLOC >
762 * SCL_ZIO > SCL_FREE > SCL_VDEV
763 * (For more information about the spa_config_lock see spa_misc.c)
764 * The higher the lock, the broader its coverage. When we passivate
765 * a metaslab group, we must hold both the SCL_ALLOC and the SCL_ZIO
766 * config locks. However, the metaslab group's taskq might be trying
767 * to preload metaslabs so we must drop the SCL_ZIO lock and any
768 * lower locks to allow the I/O to complete. At a minimum,
769 * we continue to hold the SCL_ALLOC lock, which prevents any future
770 * allocations from taking place and any changes to the vdev tree.
772 spa_config_exit(spa
, locks
& ~(SCL_ZIO
- 1), spa
);
773 taskq_wait_outstanding(mg
->mg_taskq
, 0);
774 spa_config_enter(spa
, locks
& ~(SCL_ZIO
- 1), spa
, RW_WRITER
);
775 metaslab_group_alloc_update(mg
);
776 for (int i
= 0; i
< mg
->mg_allocators
; i
++) {
777 metaslab_t
*msp
= mg
->mg_primaries
[i
];
779 mutex_enter(&msp
->ms_lock
);
780 metaslab_passivate(msp
,
781 metaslab_weight_from_range_tree(msp
));
782 mutex_exit(&msp
->ms_lock
);
784 msp
= mg
->mg_secondaries
[i
];
786 mutex_enter(&msp
->ms_lock
);
787 metaslab_passivate(msp
,
788 metaslab_weight_from_range_tree(msp
));
789 mutex_exit(&msp
->ms_lock
);
793 mgprev
= mg
->mg_prev
;
794 mgnext
= mg
->mg_next
;
799 mc
->mc_rotor
= mgnext
;
800 mgprev
->mg_next
= mgnext
;
801 mgnext
->mg_prev
= mgprev
;
809 metaslab_group_initialized(metaslab_group_t
*mg
)
811 vdev_t
*vd
= mg
->mg_vd
;
812 vdev_stat_t
*vs
= &vd
->vdev_stat
;
814 return (vs
->vs_space
!= 0 && mg
->mg_activation_count
> 0);
818 metaslab_group_get_space(metaslab_group_t
*mg
)
820 return ((1ULL << mg
->mg_vd
->vdev_ms_shift
) * mg
->mg_vd
->vdev_ms_count
);
824 metaslab_group_histogram_verify(metaslab_group_t
*mg
)
827 vdev_t
*vd
= mg
->mg_vd
;
828 uint64_t ashift
= vd
->vdev_ashift
;
831 if ((zfs_flags
& ZFS_DEBUG_HISTOGRAM_VERIFY
) == 0)
834 mg_hist
= kmem_zalloc(sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE
,
837 ASSERT3U(RANGE_TREE_HISTOGRAM_SIZE
, >=,
838 SPACE_MAP_HISTOGRAM_SIZE
+ ashift
);
840 for (int m
= 0; m
< vd
->vdev_ms_count
; m
++) {
841 metaslab_t
*msp
= vd
->vdev_ms
[m
];
843 /* skip if not active or not a member */
844 if (msp
->ms_sm
== NULL
|| msp
->ms_group
!= mg
)
847 for (i
= 0; i
< SPACE_MAP_HISTOGRAM_SIZE
; i
++)
848 mg_hist
[i
+ ashift
] +=
849 msp
->ms_sm
->sm_phys
->smp_histogram
[i
];
852 for (i
= 0; i
< RANGE_TREE_HISTOGRAM_SIZE
; i
++)
853 VERIFY3U(mg_hist
[i
], ==, mg
->mg_histogram
[i
]);
855 kmem_free(mg_hist
, sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE
);
859 metaslab_group_histogram_add(metaslab_group_t
*mg
, metaslab_t
*msp
)
861 metaslab_class_t
*mc
= mg
->mg_class
;
862 uint64_t ashift
= mg
->mg_vd
->vdev_ashift
;
864 ASSERT(MUTEX_HELD(&msp
->ms_lock
));
865 if (msp
->ms_sm
== NULL
)
868 mutex_enter(&mg
->mg_lock
);
869 for (int i
= 0; i
< SPACE_MAP_HISTOGRAM_SIZE
; i
++) {
870 mg
->mg_histogram
[i
+ ashift
] +=
871 msp
->ms_sm
->sm_phys
->smp_histogram
[i
];
872 mc
->mc_histogram
[i
+ ashift
] +=
873 msp
->ms_sm
->sm_phys
->smp_histogram
[i
];
875 mutex_exit(&mg
->mg_lock
);
879 metaslab_group_histogram_remove(metaslab_group_t
*mg
, metaslab_t
*msp
)
881 metaslab_class_t
*mc
= mg
->mg_class
;
882 uint64_t ashift
= mg
->mg_vd
->vdev_ashift
;
884 ASSERT(MUTEX_HELD(&msp
->ms_lock
));
885 if (msp
->ms_sm
== NULL
)
888 mutex_enter(&mg
->mg_lock
);
889 for (int i
= 0; i
< SPACE_MAP_HISTOGRAM_SIZE
; i
++) {
890 ASSERT3U(mg
->mg_histogram
[i
+ ashift
], >=,
891 msp
->ms_sm
->sm_phys
->smp_histogram
[i
]);
892 ASSERT3U(mc
->mc_histogram
[i
+ ashift
], >=,
893 msp
->ms_sm
->sm_phys
->smp_histogram
[i
]);
895 mg
->mg_histogram
[i
+ ashift
] -=
896 msp
->ms_sm
->sm_phys
->smp_histogram
[i
];
897 mc
->mc_histogram
[i
+ ashift
] -=
898 msp
->ms_sm
->sm_phys
->smp_histogram
[i
];
900 mutex_exit(&mg
->mg_lock
);
904 metaslab_group_add(metaslab_group_t
*mg
, metaslab_t
*msp
)
906 ASSERT(msp
->ms_group
== NULL
);
907 mutex_enter(&mg
->mg_lock
);
910 avl_add(&mg
->mg_metaslab_tree
, msp
);
911 mutex_exit(&mg
->mg_lock
);
913 mutex_enter(&msp
->ms_lock
);
914 metaslab_group_histogram_add(mg
, msp
);
915 mutex_exit(&msp
->ms_lock
);
919 metaslab_group_remove(metaslab_group_t
*mg
, metaslab_t
*msp
)
921 mutex_enter(&msp
->ms_lock
);
922 metaslab_group_histogram_remove(mg
, msp
);
923 mutex_exit(&msp
->ms_lock
);
925 mutex_enter(&mg
->mg_lock
);
926 ASSERT(msp
->ms_group
== mg
);
927 avl_remove(&mg
->mg_metaslab_tree
, msp
);
928 msp
->ms_group
= NULL
;
929 mutex_exit(&mg
->mg_lock
);
933 metaslab_group_sort_impl(metaslab_group_t
*mg
, metaslab_t
*msp
, uint64_t weight
)
935 ASSERT(MUTEX_HELD(&mg
->mg_lock
));
936 ASSERT(msp
->ms_group
== mg
);
937 avl_remove(&mg
->mg_metaslab_tree
, msp
);
938 msp
->ms_weight
= weight
;
939 avl_add(&mg
->mg_metaslab_tree
, msp
);
944 metaslab_group_sort(metaslab_group_t
*mg
, metaslab_t
*msp
, uint64_t weight
)
947 * Although in principle the weight can be any value, in
948 * practice we do not use values in the range [1, 511].
950 ASSERT(weight
>= SPA_MINBLOCKSIZE
|| weight
== 0);
951 ASSERT(MUTEX_HELD(&msp
->ms_lock
));
953 mutex_enter(&mg
->mg_lock
);
954 metaslab_group_sort_impl(mg
, msp
, weight
);
955 mutex_exit(&mg
->mg_lock
);
959 * Calculate the fragmentation for a given metaslab group. We can use
960 * a simple average here since all metaslabs within the group must have
961 * the same size. The return value will be a value between 0 and 100
962 * (inclusive), or ZFS_FRAG_INVALID if less than half of the metaslab in this
963 * group have a fragmentation metric.
966 metaslab_group_fragmentation(metaslab_group_t
*mg
)
968 vdev_t
*vd
= mg
->mg_vd
;
969 uint64_t fragmentation
= 0;
970 uint64_t valid_ms
= 0;
972 for (int m
= 0; m
< vd
->vdev_ms_count
; m
++) {
973 metaslab_t
*msp
= vd
->vdev_ms
[m
];
975 if (msp
->ms_fragmentation
== ZFS_FRAG_INVALID
)
977 if (msp
->ms_group
!= mg
)
981 fragmentation
+= msp
->ms_fragmentation
;
984 if (valid_ms
<= mg
->mg_vd
->vdev_ms_count
/ 2)
985 return (ZFS_FRAG_INVALID
);
987 fragmentation
/= valid_ms
;
988 ASSERT3U(fragmentation
, <=, 100);
989 return (fragmentation
);
993 * Determine if a given metaslab group should skip allocations. A metaslab
994 * group should avoid allocations if its free capacity is less than the
995 * zfs_mg_noalloc_threshold or its fragmentation metric is greater than
996 * zfs_mg_fragmentation_threshold and there is at least one metaslab group
997 * that can still handle allocations. If the allocation throttle is enabled
998 * then we skip allocations to devices that have reached their maximum
999 * allocation queue depth unless the selected metaslab group is the only
1000 * eligible group remaining.
1003 metaslab_group_allocatable(metaslab_group_t
*mg
, metaslab_group_t
*rotor
,
1004 uint64_t psize
, int allocator
, int d
)
1006 spa_t
*spa
= mg
->mg_vd
->vdev_spa
;
1007 metaslab_class_t
*mc
= mg
->mg_class
;
1010 * We can only consider skipping this metaslab group if it's
1011 * in the normal metaslab class and there are other metaslab
1012 * groups to select from. Otherwise, we always consider it eligible
1015 if ((mc
!= spa_normal_class(spa
) &&
1016 mc
!= spa_special_class(spa
) &&
1017 mc
!= spa_dedup_class(spa
)) ||
1022 * If the metaslab group's mg_allocatable flag is set (see comments
1023 * in metaslab_group_alloc_update() for more information) and
1024 * the allocation throttle is disabled then allow allocations to this
1025 * device. However, if the allocation throttle is enabled then
1026 * check if we have reached our allocation limit (mg_alloc_queue_depth)
1027 * to determine if we should allow allocations to this metaslab group.
1028 * If all metaslab groups are no longer considered allocatable
1029 * (mc_alloc_groups == 0) or we're trying to allocate the smallest
1030 * gang block size then we allow allocations on this metaslab group
1031 * regardless of the mg_allocatable or throttle settings.
1033 if (mg
->mg_allocatable
) {
1034 metaslab_group_t
*mgp
;
1036 uint64_t qmax
= mg
->mg_cur_max_alloc_queue_depth
[allocator
];
1038 if (!mc
->mc_alloc_throttle_enabled
)
1042 * If this metaslab group does not have any free space, then
1043 * there is no point in looking further.
1045 if (mg
->mg_no_free_space
)
1049 * Relax allocation throttling for ditto blocks. Due to
1050 * random imbalances in allocation it tends to push copies
1051 * to one vdev, that looks a bit better at the moment.
1053 qmax
= qmax
* (4 + d
) / 4;
1055 qdepth
= zfs_refcount_count(
1056 &mg
->mg_alloc_queue_depth
[allocator
]);
1059 * If this metaslab group is below its qmax or it's
1060 * the only allocatable metasable group, then attempt
1061 * to allocate from it.
1063 if (qdepth
< qmax
|| mc
->mc_alloc_groups
== 1)
1065 ASSERT3U(mc
->mc_alloc_groups
, >, 1);
1068 * Since this metaslab group is at or over its qmax, we
1069 * need to determine if there are metaslab groups after this
1070 * one that might be able to handle this allocation. This is
1071 * racy since we can't hold the locks for all metaslab
1072 * groups at the same time when we make this check.
1074 for (mgp
= mg
->mg_next
; mgp
!= rotor
; mgp
= mgp
->mg_next
) {
1075 qmax
= mgp
->mg_cur_max_alloc_queue_depth
[allocator
];
1076 qmax
= qmax
* (4 + d
) / 4;
1077 qdepth
= zfs_refcount_count(
1078 &mgp
->mg_alloc_queue_depth
[allocator
]);
1081 * If there is another metaslab group that
1082 * might be able to handle the allocation, then
1083 * we return false so that we skip this group.
1085 if (qdepth
< qmax
&& !mgp
->mg_no_free_space
)
1090 * We didn't find another group to handle the allocation
1091 * so we can't skip this metaslab group even though
1092 * we are at or over our qmax.
1096 } else if (mc
->mc_alloc_groups
== 0 || psize
== SPA_MINBLOCKSIZE
) {
1103 * ==========================================================================
1104 * Range tree callbacks
1105 * ==========================================================================
1109 * Comparison function for the private size-ordered tree. Tree is sorted
1110 * by size, larger sizes at the end of the tree.
1113 metaslab_rangesize_compare(const void *x1
, const void *x2
)
1115 const range_seg_t
*r1
= x1
;
1116 const range_seg_t
*r2
= x2
;
1117 uint64_t rs_size1
= r1
->rs_end
- r1
->rs_start
;
1118 uint64_t rs_size2
= r2
->rs_end
- r2
->rs_start
;
1120 int cmp
= AVL_CMP(rs_size1
, rs_size2
);
1124 return (AVL_CMP(r1
->rs_start
, r2
->rs_start
));
1128 * ==========================================================================
1129 * Common allocator routines
1130 * ==========================================================================
1134 * Return the maximum contiguous segment within the metaslab.
1137 metaslab_block_maxsize(metaslab_t
*msp
)
1139 avl_tree_t
*t
= &msp
->ms_allocatable_by_size
;
1142 if (t
== NULL
|| (rs
= avl_last(t
)) == NULL
)
1145 return (rs
->rs_end
- rs
->rs_start
);
1148 static range_seg_t
*
1149 metaslab_block_find(avl_tree_t
*t
, uint64_t start
, uint64_t size
)
1151 range_seg_t
*rs
, rsearch
;
1154 rsearch
.rs_start
= start
;
1155 rsearch
.rs_end
= start
+ size
;
1157 rs
= avl_find(t
, &rsearch
, &where
);
1159 rs
= avl_nearest(t
, where
, AVL_AFTER
);
1165 #if defined(WITH_FF_BLOCK_ALLOCATOR) || \
1166 defined(WITH_DF_BLOCK_ALLOCATOR) || \
1167 defined(WITH_CF_BLOCK_ALLOCATOR)
1169 * This is a helper function that can be used by the allocator to find
1170 * a suitable block to allocate. This will search the specified AVL
1171 * tree looking for a block that matches the specified criteria.
1174 metaslab_block_picker(avl_tree_t
*t
, uint64_t *cursor
, uint64_t size
,
1177 range_seg_t
*rs
= metaslab_block_find(t
, *cursor
, size
);
1179 while (rs
!= NULL
) {
1180 uint64_t offset
= P2ROUNDUP(rs
->rs_start
, align
);
1182 if (offset
+ size
<= rs
->rs_end
) {
1183 *cursor
= offset
+ size
;
1186 rs
= AVL_NEXT(t
, rs
);
1190 * If we know we've searched the whole map (*cursor == 0), give up.
1191 * Otherwise, reset the cursor to the beginning and try again.
1197 return (metaslab_block_picker(t
, cursor
, size
, align
));
1199 #endif /* WITH_FF/DF/CF_BLOCK_ALLOCATOR */
1201 #if defined(WITH_FF_BLOCK_ALLOCATOR)
1203 * ==========================================================================
1204 * The first-fit block allocator
1205 * ==========================================================================
1208 metaslab_ff_alloc(metaslab_t
*msp
, uint64_t size
)
1211 * Find the largest power of 2 block size that evenly divides the
1212 * requested size. This is used to try to allocate blocks with similar
1213 * alignment from the same area of the metaslab (i.e. same cursor
1214 * bucket) but it does not guarantee that other allocations sizes
1215 * may exist in the same region.
1217 uint64_t align
= size
& -size
;
1218 uint64_t *cursor
= &msp
->ms_lbas
[highbit64(align
) - 1];
1219 avl_tree_t
*t
= &msp
->ms_allocatable
->rt_root
;
1221 return (metaslab_block_picker(t
, cursor
, size
, align
));
1224 static metaslab_ops_t metaslab_ff_ops
= {
1228 metaslab_ops_t
*zfs_metaslab_ops
= &metaslab_ff_ops
;
1229 #endif /* WITH_FF_BLOCK_ALLOCATOR */
1231 #if defined(WITH_DF_BLOCK_ALLOCATOR)
1233 * ==========================================================================
1234 * Dynamic block allocator -
1235 * Uses the first fit allocation scheme until space get low and then
1236 * adjusts to a best fit allocation method. Uses metaslab_df_alloc_threshold
1237 * and metaslab_df_free_pct to determine when to switch the allocation scheme.
1238 * ==========================================================================
1241 metaslab_df_alloc(metaslab_t
*msp
, uint64_t size
)
1244 * Find the largest power of 2 block size that evenly divides the
1245 * requested size. This is used to try to allocate blocks with similar
1246 * alignment from the same area of the metaslab (i.e. same cursor
1247 * bucket) but it does not guarantee that other allocations sizes
1248 * may exist in the same region.
1250 uint64_t align
= size
& -size
;
1251 uint64_t *cursor
= &msp
->ms_lbas
[highbit64(align
) - 1];
1252 range_tree_t
*rt
= msp
->ms_allocatable
;
1253 avl_tree_t
*t
= &rt
->rt_root
;
1254 uint64_t max_size
= metaslab_block_maxsize(msp
);
1255 int free_pct
= range_tree_space(rt
) * 100 / msp
->ms_size
;
1257 ASSERT(MUTEX_HELD(&msp
->ms_lock
));
1258 ASSERT3U(avl_numnodes(t
), ==,
1259 avl_numnodes(&msp
->ms_allocatable_by_size
));
1261 if (max_size
< size
)
1265 * If we're running low on space switch to using the size
1266 * sorted AVL tree (best-fit).
1268 if (max_size
< metaslab_df_alloc_threshold
||
1269 free_pct
< metaslab_df_free_pct
) {
1270 t
= &msp
->ms_allocatable_by_size
;
1274 return (metaslab_block_picker(t
, cursor
, size
, 1ULL));
1277 static metaslab_ops_t metaslab_df_ops
= {
1281 metaslab_ops_t
*zfs_metaslab_ops
= &metaslab_df_ops
;
1282 #endif /* WITH_DF_BLOCK_ALLOCATOR */
1284 #if defined(WITH_CF_BLOCK_ALLOCATOR)
1286 * ==========================================================================
1287 * Cursor fit block allocator -
1288 * Select the largest region in the metaslab, set the cursor to the beginning
1289 * of the range and the cursor_end to the end of the range. As allocations
1290 * are made advance the cursor. Continue allocating from the cursor until
1291 * the range is exhausted and then find a new range.
1292 * ==========================================================================
1295 metaslab_cf_alloc(metaslab_t
*msp
, uint64_t size
)
1297 range_tree_t
*rt
= msp
->ms_allocatable
;
1298 avl_tree_t
*t
= &msp
->ms_allocatable_by_size
;
1299 uint64_t *cursor
= &msp
->ms_lbas
[0];
1300 uint64_t *cursor_end
= &msp
->ms_lbas
[1];
1301 uint64_t offset
= 0;
1303 ASSERT(MUTEX_HELD(&msp
->ms_lock
));
1304 ASSERT3U(avl_numnodes(t
), ==, avl_numnodes(&rt
->rt_root
));
1306 ASSERT3U(*cursor_end
, >=, *cursor
);
1308 if ((*cursor
+ size
) > *cursor_end
) {
1311 rs
= avl_last(&msp
->ms_allocatable_by_size
);
1312 if (rs
== NULL
|| (rs
->rs_end
- rs
->rs_start
) < size
)
1315 *cursor
= rs
->rs_start
;
1316 *cursor_end
= rs
->rs_end
;
1325 static metaslab_ops_t metaslab_cf_ops
= {
1329 metaslab_ops_t
*zfs_metaslab_ops
= &metaslab_cf_ops
;
1330 #endif /* WITH_CF_BLOCK_ALLOCATOR */
1332 #if defined(WITH_NDF_BLOCK_ALLOCATOR)
1334 * ==========================================================================
1335 * New dynamic fit allocator -
1336 * Select a region that is large enough to allocate 2^metaslab_ndf_clump_shift
1337 * contiguous blocks. If no region is found then just use the largest segment
1339 * ==========================================================================
1343 * Determines desired number of contiguous blocks (2^metaslab_ndf_clump_shift)
1344 * to request from the allocator.
1346 uint64_t metaslab_ndf_clump_shift
= 4;
1349 metaslab_ndf_alloc(metaslab_t
*msp
, uint64_t size
)
1351 avl_tree_t
*t
= &msp
->ms_allocatable
->rt_root
;
1353 range_seg_t
*rs
, rsearch
;
1354 uint64_t hbit
= highbit64(size
);
1355 uint64_t *cursor
= &msp
->ms_lbas
[hbit
- 1];
1356 uint64_t max_size
= metaslab_block_maxsize(msp
);
1358 ASSERT(MUTEX_HELD(&msp
->ms_lock
));
1359 ASSERT3U(avl_numnodes(t
), ==,
1360 avl_numnodes(&msp
->ms_allocatable_by_size
));
1362 if (max_size
< size
)
1365 rsearch
.rs_start
= *cursor
;
1366 rsearch
.rs_end
= *cursor
+ size
;
1368 rs
= avl_find(t
, &rsearch
, &where
);
1369 if (rs
== NULL
|| (rs
->rs_end
- rs
->rs_start
) < size
) {
1370 t
= &msp
->ms_allocatable_by_size
;
1372 rsearch
.rs_start
= 0;
1373 rsearch
.rs_end
= MIN(max_size
,
1374 1ULL << (hbit
+ metaslab_ndf_clump_shift
));
1375 rs
= avl_find(t
, &rsearch
, &where
);
1377 rs
= avl_nearest(t
, where
, AVL_AFTER
);
1381 if ((rs
->rs_end
- rs
->rs_start
) >= size
) {
1382 *cursor
= rs
->rs_start
+ size
;
1383 return (rs
->rs_start
);
1388 static metaslab_ops_t metaslab_ndf_ops
= {
1392 metaslab_ops_t
*zfs_metaslab_ops
= &metaslab_ndf_ops
;
1393 #endif /* WITH_NDF_BLOCK_ALLOCATOR */
1397 * ==========================================================================
1399 * ==========================================================================
1403 * Wait for any in-progress metaslab loads to complete.
1406 metaslab_load_wait(metaslab_t
*msp
)
1408 ASSERT(MUTEX_HELD(&msp
->ms_lock
));
1410 while (msp
->ms_loading
) {
1411 ASSERT(!msp
->ms_loaded
);
1412 cv_wait(&msp
->ms_load_cv
, &msp
->ms_lock
);
1417 metaslab_load(metaslab_t
*msp
)
1420 boolean_t success
= B_FALSE
;
1422 ASSERT(MUTEX_HELD(&msp
->ms_lock
));
1423 ASSERT(!msp
->ms_loaded
);
1424 ASSERT(!msp
->ms_loading
);
1426 msp
->ms_loading
= B_TRUE
;
1428 * Nobody else can manipulate a loading metaslab, so it's now safe
1429 * to drop the lock. This way we don't have to hold the lock while
1430 * reading the spacemap from disk.
1432 mutex_exit(&msp
->ms_lock
);
1435 * If the space map has not been allocated yet, then treat
1436 * all the space in the metaslab as free and add it to ms_allocatable.
1438 if (msp
->ms_sm
!= NULL
) {
1439 error
= space_map_load(msp
->ms_sm
, msp
->ms_allocatable
,
1442 range_tree_add(msp
->ms_allocatable
,
1443 msp
->ms_start
, msp
->ms_size
);
1446 success
= (error
== 0);
1448 mutex_enter(&msp
->ms_lock
);
1449 msp
->ms_loading
= B_FALSE
;
1452 ASSERT3P(msp
->ms_group
, !=, NULL
);
1453 msp
->ms_loaded
= B_TRUE
;
1456 * If the metaslab already has a spacemap, then we need to
1457 * remove all segments from the defer tree; otherwise, the
1458 * metaslab is completely empty and we can skip this.
1460 if (msp
->ms_sm
!= NULL
) {
1461 for (int t
= 0; t
< TXG_DEFER_SIZE
; t
++) {
1462 range_tree_walk(msp
->ms_defer
[t
],
1463 range_tree_remove
, msp
->ms_allocatable
);
1466 msp
->ms_max_size
= metaslab_block_maxsize(msp
);
1468 cv_broadcast(&msp
->ms_load_cv
);
1473 metaslab_unload(metaslab_t
*msp
)
1475 ASSERT(MUTEX_HELD(&msp
->ms_lock
));
1476 range_tree_vacate(msp
->ms_allocatable
, NULL
, NULL
);
1477 msp
->ms_loaded
= B_FALSE
;
1478 msp
->ms_weight
&= ~METASLAB_ACTIVE_MASK
;
1479 msp
->ms_max_size
= 0;
1483 metaslab_space_update(vdev_t
*vd
, metaslab_class_t
*mc
, int64_t alloc_delta
,
1484 int64_t defer_delta
, int64_t space_delta
)
1486 vdev_space_update(vd
, alloc_delta
, defer_delta
, space_delta
);
1488 ASSERT3P(vd
->vdev_spa
->spa_root_vdev
, ==, vd
->vdev_parent
);
1489 ASSERT(vd
->vdev_ms_count
!= 0);
1491 metaslab_class_space_update(mc
, alloc_delta
, defer_delta
, space_delta
,
1492 vdev_deflated_space(vd
, space_delta
));
1496 metaslab_init(metaslab_group_t
*mg
, uint64_t id
, uint64_t object
, uint64_t txg
,
1499 vdev_t
*vd
= mg
->mg_vd
;
1500 spa_t
*spa
= vd
->vdev_spa
;
1501 objset_t
*mos
= spa
->spa_meta_objset
;
1505 ms
= kmem_zalloc(sizeof (metaslab_t
), KM_SLEEP
);
1506 mutex_init(&ms
->ms_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
1507 mutex_init(&ms
->ms_sync_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
1508 cv_init(&ms
->ms_load_cv
, NULL
, CV_DEFAULT
, NULL
);
1511 ms
->ms_start
= id
<< vd
->vdev_ms_shift
;
1512 ms
->ms_size
= 1ULL << vd
->vdev_ms_shift
;
1513 ms
->ms_allocator
= -1;
1514 ms
->ms_new
= B_TRUE
;
1517 * We only open space map objects that already exist. All others
1518 * will be opened when we finally allocate an object for it.
1521 error
= space_map_open(&ms
->ms_sm
, mos
, object
, ms
->ms_start
,
1522 ms
->ms_size
, vd
->vdev_ashift
);
1525 kmem_free(ms
, sizeof (metaslab_t
));
1529 ASSERT(ms
->ms_sm
!= NULL
);
1533 * We create the main range tree here, but we don't create the
1534 * other range trees until metaslab_sync_done(). This serves
1535 * two purposes: it allows metaslab_sync_done() to detect the
1536 * addition of new space; and for debugging, it ensures that we'd
1537 * data fault on any attempt to use this metaslab before it's ready.
1539 ms
->ms_allocatable
= range_tree_create_impl(&rt_avl_ops
,
1540 &ms
->ms_allocatable_by_size
, metaslab_rangesize_compare
, 0);
1541 metaslab_group_add(mg
, ms
);
1543 metaslab_set_fragmentation(ms
);
1546 * If we're opening an existing pool (txg == 0) or creating
1547 * a new one (txg == TXG_INITIAL), all space is available now.
1548 * If we're adding space to an existing pool, the new space
1549 * does not become available until after this txg has synced.
1550 * The metaslab's weight will also be initialized when we sync
1551 * out this txg. This ensures that we don't attempt to allocate
1552 * from it before we have initialized it completely.
1554 if (txg
<= TXG_INITIAL
)
1555 metaslab_sync_done(ms
, 0);
1558 * If metaslab_debug_load is set and we're initializing a metaslab
1559 * that has an allocated space map object then load the space map
1560 * so that we can verify frees.
1562 if (metaslab_debug_load
&& ms
->ms_sm
!= NULL
) {
1563 mutex_enter(&ms
->ms_lock
);
1564 VERIFY0(metaslab_load(ms
));
1565 mutex_exit(&ms
->ms_lock
);
1569 vdev_dirty(vd
, 0, NULL
, txg
);
1570 vdev_dirty(vd
, VDD_METASLAB
, ms
, txg
);
1579 metaslab_fini(metaslab_t
*msp
)
1581 metaslab_group_t
*mg
= msp
->ms_group
;
1582 vdev_t
*vd
= mg
->mg_vd
;
1584 metaslab_group_remove(mg
, msp
);
1586 mutex_enter(&msp
->ms_lock
);
1587 VERIFY(msp
->ms_group
== NULL
);
1588 metaslab_space_update(vd
, mg
->mg_class
,
1589 -space_map_allocated(msp
->ms_sm
), 0, -msp
->ms_size
);
1591 space_map_close(msp
->ms_sm
);
1593 metaslab_unload(msp
);
1595 range_tree_destroy(msp
->ms_allocatable
);
1596 range_tree_destroy(msp
->ms_freeing
);
1597 range_tree_destroy(msp
->ms_freed
);
1599 for (int t
= 0; t
< TXG_SIZE
; t
++) {
1600 range_tree_destroy(msp
->ms_allocating
[t
]);
1603 for (int t
= 0; t
< TXG_DEFER_SIZE
; t
++) {
1604 range_tree_destroy(msp
->ms_defer
[t
]);
1606 ASSERT0(msp
->ms_deferspace
);
1608 range_tree_destroy(msp
->ms_checkpointing
);
1610 mutex_exit(&msp
->ms_lock
);
1611 cv_destroy(&msp
->ms_load_cv
);
1612 mutex_destroy(&msp
->ms_lock
);
1613 mutex_destroy(&msp
->ms_sync_lock
);
1614 ASSERT3U(msp
->ms_allocator
, ==, -1);
1616 kmem_free(msp
, sizeof (metaslab_t
));
1619 #define FRAGMENTATION_TABLE_SIZE 17
1622 * This table defines a segment size based fragmentation metric that will
1623 * allow each metaslab to derive its own fragmentation value. This is done
1624 * by calculating the space in each bucket of the spacemap histogram and
1625 * multiplying that by the fragmetation metric in this table. Doing
1626 * this for all buckets and dividing it by the total amount of free
1627 * space in this metaslab (i.e. the total free space in all buckets) gives
1628 * us the fragmentation metric. This means that a high fragmentation metric
1629 * equates to most of the free space being comprised of small segments.
1630 * Conversely, if the metric is low, then most of the free space is in
1631 * large segments. A 10% change in fragmentation equates to approximately
1632 * double the number of segments.
1634 * This table defines 0% fragmented space using 16MB segments. Testing has
1635 * shown that segments that are greater than or equal to 16MB do not suffer
1636 * from drastic performance problems. Using this value, we derive the rest
1637 * of the table. Since the fragmentation value is never stored on disk, it
1638 * is possible to change these calculations in the future.
1640 int zfs_frag_table
[FRAGMENTATION_TABLE_SIZE
] = {
1660 * Calclate the metaslab's fragmentation metric. A return value
1661 * of ZFS_FRAG_INVALID means that the metaslab has not been upgraded and does
1662 * not support this metric. Otherwise, the return value should be in the
1666 metaslab_set_fragmentation(metaslab_t
*msp
)
1668 spa_t
*spa
= msp
->ms_group
->mg_vd
->vdev_spa
;
1669 uint64_t fragmentation
= 0;
1671 boolean_t feature_enabled
= spa_feature_is_enabled(spa
,
1672 SPA_FEATURE_SPACEMAP_HISTOGRAM
);
1674 if (!feature_enabled
) {
1675 msp
->ms_fragmentation
= ZFS_FRAG_INVALID
;
1680 * A null space map means that the entire metaslab is free
1681 * and thus is not fragmented.
1683 if (msp
->ms_sm
== NULL
) {
1684 msp
->ms_fragmentation
= 0;
1689 * If this metaslab's space map has not been upgraded, flag it
1690 * so that we upgrade next time we encounter it.
1692 if (msp
->ms_sm
->sm_dbuf
->db_size
!= sizeof (space_map_phys_t
)) {
1693 uint64_t txg
= spa_syncing_txg(spa
);
1694 vdev_t
*vd
= msp
->ms_group
->mg_vd
;
1697 * If we've reached the final dirty txg, then we must
1698 * be shutting down the pool. We don't want to dirty
1699 * any data past this point so skip setting the condense
1700 * flag. We can retry this action the next time the pool
1703 if (spa_writeable(spa
) && txg
< spa_final_dirty_txg(spa
)) {
1704 msp
->ms_condense_wanted
= B_TRUE
;
1705 vdev_dirty(vd
, VDD_METASLAB
, msp
, txg
+ 1);
1706 zfs_dbgmsg("txg %llu, requesting force condense: "
1707 "ms_id %llu, vdev_id %llu", txg
, msp
->ms_id
,
1710 msp
->ms_fragmentation
= ZFS_FRAG_INVALID
;
1714 for (int i
= 0; i
< SPACE_MAP_HISTOGRAM_SIZE
; i
++) {
1716 uint8_t shift
= msp
->ms_sm
->sm_shift
;
1718 int idx
= MIN(shift
- SPA_MINBLOCKSHIFT
+ i
,
1719 FRAGMENTATION_TABLE_SIZE
- 1);
1721 if (msp
->ms_sm
->sm_phys
->smp_histogram
[i
] == 0)
1724 space
= msp
->ms_sm
->sm_phys
->smp_histogram
[i
] << (i
+ shift
);
1727 ASSERT3U(idx
, <, FRAGMENTATION_TABLE_SIZE
);
1728 fragmentation
+= space
* zfs_frag_table
[idx
];
1732 fragmentation
/= total
;
1733 ASSERT3U(fragmentation
, <=, 100);
1735 msp
->ms_fragmentation
= fragmentation
;
1739 * Compute a weight -- a selection preference value -- for the given metaslab.
1740 * This is based on the amount of free space, the level of fragmentation,
1741 * the LBA range, and whether the metaslab is loaded.
1744 metaslab_space_weight(metaslab_t
*msp
)
1746 metaslab_group_t
*mg
= msp
->ms_group
;
1747 vdev_t
*vd
= mg
->mg_vd
;
1748 uint64_t weight
, space
;
1750 ASSERT(MUTEX_HELD(&msp
->ms_lock
));
1751 ASSERT(!vd
->vdev_removing
);
1754 * The baseline weight is the metaslab's free space.
1756 space
= msp
->ms_size
- space_map_allocated(msp
->ms_sm
);
1758 if (metaslab_fragmentation_factor_enabled
&&
1759 msp
->ms_fragmentation
!= ZFS_FRAG_INVALID
) {
1761 * Use the fragmentation information to inversely scale
1762 * down the baseline weight. We need to ensure that we
1763 * don't exclude this metaslab completely when it's 100%
1764 * fragmented. To avoid this we reduce the fragmented value
1767 space
= (space
* (100 - (msp
->ms_fragmentation
- 1))) / 100;
1770 * If space < SPA_MINBLOCKSIZE, then we will not allocate from
1771 * this metaslab again. The fragmentation metric may have
1772 * decreased the space to something smaller than
1773 * SPA_MINBLOCKSIZE, so reset the space to SPA_MINBLOCKSIZE
1774 * so that we can consume any remaining space.
1776 if (space
> 0 && space
< SPA_MINBLOCKSIZE
)
1777 space
= SPA_MINBLOCKSIZE
;
1782 * Modern disks have uniform bit density and constant angular velocity.
1783 * Therefore, the outer recording zones are faster (higher bandwidth)
1784 * than the inner zones by the ratio of outer to inner track diameter,
1785 * which is typically around 2:1. We account for this by assigning
1786 * higher weight to lower metaslabs (multiplier ranging from 2x to 1x).
1787 * In effect, this means that we'll select the metaslab with the most
1788 * free bandwidth rather than simply the one with the most free space.
1790 if (!vd
->vdev_nonrot
&& metaslab_lba_weighting_enabled
) {
1791 weight
= 2 * weight
- (msp
->ms_id
* weight
) / vd
->vdev_ms_count
;
1792 ASSERT(weight
>= space
&& weight
<= 2 * space
);
1796 * If this metaslab is one we're actively using, adjust its
1797 * weight to make it preferable to any inactive metaslab so
1798 * we'll polish it off. If the fragmentation on this metaslab
1799 * has exceed our threshold, then don't mark it active.
1801 if (msp
->ms_loaded
&& msp
->ms_fragmentation
!= ZFS_FRAG_INVALID
&&
1802 msp
->ms_fragmentation
<= zfs_metaslab_fragmentation_threshold
) {
1803 weight
|= (msp
->ms_weight
& METASLAB_ACTIVE_MASK
);
1806 WEIGHT_SET_SPACEBASED(weight
);
1811 * Return the weight of the specified metaslab, according to the segment-based
1812 * weighting algorithm. The metaslab must be loaded. This function can
1813 * be called within a sync pass since it relies only on the metaslab's
1814 * range tree which is always accurate when the metaslab is loaded.
1817 metaslab_weight_from_range_tree(metaslab_t
*msp
)
1819 uint64_t weight
= 0;
1820 uint32_t segments
= 0;
1822 ASSERT(msp
->ms_loaded
);
1824 for (int i
= RANGE_TREE_HISTOGRAM_SIZE
- 1; i
>= SPA_MINBLOCKSHIFT
;
1826 uint8_t shift
= msp
->ms_group
->mg_vd
->vdev_ashift
;
1827 int max_idx
= SPACE_MAP_HISTOGRAM_SIZE
+ shift
- 1;
1830 segments
+= msp
->ms_allocatable
->rt_histogram
[i
];
1833 * The range tree provides more precision than the space map
1834 * and must be downgraded so that all values fit within the
1835 * space map's histogram. This allows us to compare loaded
1836 * vs. unloaded metaslabs to determine which metaslab is
1837 * considered "best".
1842 if (segments
!= 0) {
1843 WEIGHT_SET_COUNT(weight
, segments
);
1844 WEIGHT_SET_INDEX(weight
, i
);
1845 WEIGHT_SET_ACTIVE(weight
, 0);
1853 * Calculate the weight based on the on-disk histogram. This should only
1854 * be called after a sync pass has completely finished since the on-disk
1855 * information is updated in metaslab_sync().
1858 metaslab_weight_from_spacemap(metaslab_t
*msp
)
1860 uint64_t weight
= 0;
1862 for (int i
= SPACE_MAP_HISTOGRAM_SIZE
- 1; i
>= 0; i
--) {
1863 if (msp
->ms_sm
->sm_phys
->smp_histogram
[i
] != 0) {
1864 WEIGHT_SET_COUNT(weight
,
1865 msp
->ms_sm
->sm_phys
->smp_histogram
[i
]);
1866 WEIGHT_SET_INDEX(weight
, i
+
1867 msp
->ms_sm
->sm_shift
);
1868 WEIGHT_SET_ACTIVE(weight
, 0);
1876 * Compute a segment-based weight for the specified metaslab. The weight
1877 * is determined by highest bucket in the histogram. The information
1878 * for the highest bucket is encoded into the weight value.
1881 metaslab_segment_weight(metaslab_t
*msp
)
1883 metaslab_group_t
*mg
= msp
->ms_group
;
1884 uint64_t weight
= 0;
1885 uint8_t shift
= mg
->mg_vd
->vdev_ashift
;
1887 ASSERT(MUTEX_HELD(&msp
->ms_lock
));
1890 * The metaslab is completely free.
1892 if (space_map_allocated(msp
->ms_sm
) == 0) {
1893 int idx
= highbit64(msp
->ms_size
) - 1;
1894 int max_idx
= SPACE_MAP_HISTOGRAM_SIZE
+ shift
- 1;
1896 if (idx
< max_idx
) {
1897 WEIGHT_SET_COUNT(weight
, 1ULL);
1898 WEIGHT_SET_INDEX(weight
, idx
);
1900 WEIGHT_SET_COUNT(weight
, 1ULL << (idx
- max_idx
));
1901 WEIGHT_SET_INDEX(weight
, max_idx
);
1903 WEIGHT_SET_ACTIVE(weight
, 0);
1904 ASSERT(!WEIGHT_IS_SPACEBASED(weight
));
1909 ASSERT3U(msp
->ms_sm
->sm_dbuf
->db_size
, ==, sizeof (space_map_phys_t
));
1912 * If the metaslab is fully allocated then just make the weight 0.
1914 if (space_map_allocated(msp
->ms_sm
) == msp
->ms_size
)
1917 * If the metaslab is already loaded, then use the range tree to
1918 * determine the weight. Otherwise, we rely on the space map information
1919 * to generate the weight.
1921 if (msp
->ms_loaded
) {
1922 weight
= metaslab_weight_from_range_tree(msp
);
1924 weight
= metaslab_weight_from_spacemap(msp
);
1928 * If the metaslab was active the last time we calculated its weight
1929 * then keep it active. We want to consume the entire region that
1930 * is associated with this weight.
1932 if (msp
->ms_activation_weight
!= 0 && weight
!= 0)
1933 WEIGHT_SET_ACTIVE(weight
, WEIGHT_GET_ACTIVE(msp
->ms_weight
));
1938 * Determine if we should attempt to allocate from this metaslab. If the
1939 * metaslab has a maximum size then we can quickly determine if the desired
1940 * allocation size can be satisfied. Otherwise, if we're using segment-based
1941 * weighting then we can determine the maximum allocation that this metaslab
1942 * can accommodate based on the index encoded in the weight. If we're using
1943 * space-based weights then rely on the entire weight (excluding the weight
1947 metaslab_should_allocate(metaslab_t
*msp
, uint64_t asize
)
1949 boolean_t should_allocate
;
1951 if (msp
->ms_max_size
!= 0)
1952 return (msp
->ms_max_size
>= asize
);
1954 if (!WEIGHT_IS_SPACEBASED(msp
->ms_weight
)) {
1956 * The metaslab segment weight indicates segments in the
1957 * range [2^i, 2^(i+1)), where i is the index in the weight.
1958 * Since the asize might be in the middle of the range, we
1959 * should attempt the allocation if asize < 2^(i+1).
1961 should_allocate
= (asize
<
1962 1ULL << (WEIGHT_GET_INDEX(msp
->ms_weight
) + 1));
1964 should_allocate
= (asize
<=
1965 (msp
->ms_weight
& ~METASLAB_WEIGHT_TYPE
));
1967 return (should_allocate
);
1970 metaslab_weight(metaslab_t
*msp
)
1972 vdev_t
*vd
= msp
->ms_group
->mg_vd
;
1973 spa_t
*spa
= vd
->vdev_spa
;
1976 ASSERT(MUTEX_HELD(&msp
->ms_lock
));
1979 * If this vdev is in the process of being removed, there is nothing
1980 * for us to do here.
1982 if (vd
->vdev_removing
)
1985 metaslab_set_fragmentation(msp
);
1988 * Update the maximum size if the metaslab is loaded. This will
1989 * ensure that we get an accurate maximum size if newly freed space
1990 * has been added back into the free tree.
1993 msp
->ms_max_size
= metaslab_block_maxsize(msp
);
1996 * Segment-based weighting requires space map histogram support.
1998 if (zfs_metaslab_segment_weight_enabled
&&
1999 spa_feature_is_enabled(spa
, SPA_FEATURE_SPACEMAP_HISTOGRAM
) &&
2000 (msp
->ms_sm
== NULL
|| msp
->ms_sm
->sm_dbuf
->db_size
==
2001 sizeof (space_map_phys_t
))) {
2002 weight
= metaslab_segment_weight(msp
);
2004 weight
= metaslab_space_weight(msp
);
2010 metaslab_activate_allocator(metaslab_group_t
*mg
, metaslab_t
*msp
,
2011 int allocator
, uint64_t activation_weight
)
2014 * If we're activating for the claim code, we don't want to actually
2015 * set the metaslab up for a specific allocator.
2017 if (activation_weight
== METASLAB_WEIGHT_CLAIM
)
2019 metaslab_t
**arr
= (activation_weight
== METASLAB_WEIGHT_PRIMARY
?
2020 mg
->mg_primaries
: mg
->mg_secondaries
);
2022 ASSERT(MUTEX_HELD(&msp
->ms_lock
));
2023 mutex_enter(&mg
->mg_lock
);
2024 if (arr
[allocator
] != NULL
) {
2025 mutex_exit(&mg
->mg_lock
);
2029 arr
[allocator
] = msp
;
2030 ASSERT3S(msp
->ms_allocator
, ==, -1);
2031 msp
->ms_allocator
= allocator
;
2032 msp
->ms_primary
= (activation_weight
== METASLAB_WEIGHT_PRIMARY
);
2033 mutex_exit(&mg
->mg_lock
);
2039 metaslab_activate(metaslab_t
*msp
, int allocator
, uint64_t activation_weight
)
2041 ASSERT(MUTEX_HELD(&msp
->ms_lock
));
2043 if ((msp
->ms_weight
& METASLAB_ACTIVE_MASK
) == 0) {
2045 metaslab_load_wait(msp
);
2046 if (!msp
->ms_loaded
) {
2047 if ((error
= metaslab_load(msp
)) != 0) {
2048 metaslab_group_sort(msp
->ms_group
, msp
, 0);
2052 if ((msp
->ms_weight
& METASLAB_ACTIVE_MASK
) != 0) {
2054 * The metaslab was activated for another allocator
2055 * while we were waiting, we should reselect.
2057 return (SET_ERROR(EBUSY
));
2059 if ((error
= metaslab_activate_allocator(msp
->ms_group
, msp
,
2060 allocator
, activation_weight
)) != 0) {
2064 msp
->ms_activation_weight
= msp
->ms_weight
;
2065 metaslab_group_sort(msp
->ms_group
, msp
,
2066 msp
->ms_weight
| activation_weight
);
2068 ASSERT(msp
->ms_loaded
);
2069 ASSERT(msp
->ms_weight
& METASLAB_ACTIVE_MASK
);
2075 metaslab_passivate_allocator(metaslab_group_t
*mg
, metaslab_t
*msp
,
2078 ASSERT(MUTEX_HELD(&msp
->ms_lock
));
2079 if (msp
->ms_weight
& METASLAB_WEIGHT_CLAIM
) {
2080 metaslab_group_sort(mg
, msp
, weight
);
2084 mutex_enter(&mg
->mg_lock
);
2085 ASSERT3P(msp
->ms_group
, ==, mg
);
2086 if (msp
->ms_primary
) {
2087 ASSERT3U(0, <=, msp
->ms_allocator
);
2088 ASSERT3U(msp
->ms_allocator
, <, mg
->mg_allocators
);
2089 ASSERT3P(mg
->mg_primaries
[msp
->ms_allocator
], ==, msp
);
2090 ASSERT(msp
->ms_weight
& METASLAB_WEIGHT_PRIMARY
);
2091 mg
->mg_primaries
[msp
->ms_allocator
] = NULL
;
2093 ASSERT(msp
->ms_weight
& METASLAB_WEIGHT_SECONDARY
);
2094 ASSERT3P(mg
->mg_secondaries
[msp
->ms_allocator
], ==, msp
);
2095 mg
->mg_secondaries
[msp
->ms_allocator
] = NULL
;
2097 msp
->ms_allocator
= -1;
2098 metaslab_group_sort_impl(mg
, msp
, weight
);
2099 mutex_exit(&mg
->mg_lock
);
2103 metaslab_passivate(metaslab_t
*msp
, uint64_t weight
)
2105 ASSERTV(uint64_t size
= weight
& ~METASLAB_WEIGHT_TYPE
);
2108 * If size < SPA_MINBLOCKSIZE, then we will not allocate from
2109 * this metaslab again. In that case, it had better be empty,
2110 * or we would be leaving space on the table.
2112 ASSERT(!WEIGHT_IS_SPACEBASED(msp
->ms_weight
) ||
2113 size
>= SPA_MINBLOCKSIZE
||
2114 range_tree_space(msp
->ms_allocatable
) == 0);
2115 ASSERT0(weight
& METASLAB_ACTIVE_MASK
);
2117 msp
->ms_activation_weight
= 0;
2118 metaslab_passivate_allocator(msp
->ms_group
, msp
, weight
);
2119 ASSERT((msp
->ms_weight
& METASLAB_ACTIVE_MASK
) == 0);
2123 * Segment-based metaslabs are activated once and remain active until
2124 * we either fail an allocation attempt (similar to space-based metaslabs)
2125 * or have exhausted the free space in zfs_metaslab_switch_threshold
2126 * buckets since the metaslab was activated. This function checks to see
2127 * if we've exhaused the zfs_metaslab_switch_threshold buckets in the
2128 * metaslab and passivates it proactively. This will allow us to select a
2129 * metaslab with a larger contiguous region, if any, remaining within this
2130 * metaslab group. If we're in sync pass > 1, then we continue using this
2131 * metaslab so that we don't dirty more block and cause more sync passes.
2134 metaslab_segment_may_passivate(metaslab_t
*msp
)
2136 spa_t
*spa
= msp
->ms_group
->mg_vd
->vdev_spa
;
2138 if (WEIGHT_IS_SPACEBASED(msp
->ms_weight
) || spa_sync_pass(spa
) > 1)
2142 * Since we are in the middle of a sync pass, the most accurate
2143 * information that is accessible to us is the in-core range tree
2144 * histogram; calculate the new weight based on that information.
2146 uint64_t weight
= metaslab_weight_from_range_tree(msp
);
2147 int activation_idx
= WEIGHT_GET_INDEX(msp
->ms_activation_weight
);
2148 int current_idx
= WEIGHT_GET_INDEX(weight
);
2150 if (current_idx
<= activation_idx
- zfs_metaslab_switch_threshold
)
2151 metaslab_passivate(msp
, weight
);
2155 metaslab_preload(void *arg
)
2157 metaslab_t
*msp
= arg
;
2158 spa_t
*spa
= msp
->ms_group
->mg_vd
->vdev_spa
;
2159 fstrans_cookie_t cookie
= spl_fstrans_mark();
2161 ASSERT(!MUTEX_HELD(&msp
->ms_group
->mg_lock
));
2163 mutex_enter(&msp
->ms_lock
);
2164 metaslab_load_wait(msp
);
2165 if (!msp
->ms_loaded
)
2166 (void) metaslab_load(msp
);
2167 msp
->ms_selected_txg
= spa_syncing_txg(spa
);
2168 mutex_exit(&msp
->ms_lock
);
2169 spl_fstrans_unmark(cookie
);
2173 metaslab_group_preload(metaslab_group_t
*mg
)
2175 spa_t
*spa
= mg
->mg_vd
->vdev_spa
;
2177 avl_tree_t
*t
= &mg
->mg_metaslab_tree
;
2180 if (spa_shutting_down(spa
) || !metaslab_preload_enabled
) {
2181 taskq_wait_outstanding(mg
->mg_taskq
, 0);
2185 mutex_enter(&mg
->mg_lock
);
2188 * Load the next potential metaslabs
2190 for (msp
= avl_first(t
); msp
!= NULL
; msp
= AVL_NEXT(t
, msp
)) {
2191 ASSERT3P(msp
->ms_group
, ==, mg
);
2194 * We preload only the maximum number of metaslabs specified
2195 * by metaslab_preload_limit. If a metaslab is being forced
2196 * to condense then we preload it too. This will ensure
2197 * that force condensing happens in the next txg.
2199 if (++m
> metaslab_preload_limit
&& !msp
->ms_condense_wanted
) {
2203 VERIFY(taskq_dispatch(mg
->mg_taskq
, metaslab_preload
,
2204 msp
, TQ_SLEEP
) != TASKQID_INVALID
);
2206 mutex_exit(&mg
->mg_lock
);
2210 * Determine if the space map's on-disk footprint is past our tolerance
2211 * for inefficiency. We would like to use the following criteria to make
2214 * 1. The size of the space map object should not dramatically increase as a
2215 * result of writing out the free space range tree.
2217 * 2. The minimal on-disk space map representation is zfs_condense_pct/100
2218 * times the size than the free space range tree representation
2219 * (i.e. zfs_condense_pct = 110 and in-core = 1MB, minimal = 1.1MB).
2221 * 3. The on-disk size of the space map should actually decrease.
2223 * Unfortunately, we cannot compute the on-disk size of the space map in this
2224 * context because we cannot accurately compute the effects of compression, etc.
2225 * Instead, we apply the heuristic described in the block comment for
2226 * zfs_metaslab_condense_block_threshold - we only condense if the space used
2227 * is greater than a threshold number of blocks.
2230 metaslab_should_condense(metaslab_t
*msp
)
2232 space_map_t
*sm
= msp
->ms_sm
;
2233 vdev_t
*vd
= msp
->ms_group
->mg_vd
;
2234 uint64_t vdev_blocksize
= 1 << vd
->vdev_ashift
;
2235 uint64_t current_txg
= spa_syncing_txg(vd
->vdev_spa
);
2237 ASSERT(MUTEX_HELD(&msp
->ms_lock
));
2238 ASSERT(msp
->ms_loaded
);
2241 * Allocations and frees in early passes are generally more space
2242 * efficient (in terms of blocks described in space map entries)
2243 * than the ones in later passes (e.g. we don't compress after
2244 * sync pass 5) and condensing a metaslab multiple times in a txg
2245 * could degrade performance.
2247 * Thus we prefer condensing each metaslab at most once every txg at
2248 * the earliest sync pass possible. If a metaslab is eligible for
2249 * condensing again after being considered for condensing within the
2250 * same txg, it will hopefully be dirty in the next txg where it will
2251 * be condensed at an earlier pass.
2253 if (msp
->ms_condense_checked_txg
== current_txg
)
2255 msp
->ms_condense_checked_txg
= current_txg
;
2258 * We always condense metaslabs that are empty and metaslabs for
2259 * which a condense request has been made.
2261 if (avl_is_empty(&msp
->ms_allocatable_by_size
) ||
2262 msp
->ms_condense_wanted
)
2265 uint64_t object_size
= space_map_length(msp
->ms_sm
);
2266 uint64_t optimal_size
= space_map_estimate_optimal_size(sm
,
2267 msp
->ms_allocatable
, SM_NO_VDEVID
);
2269 dmu_object_info_t doi
;
2270 dmu_object_info_from_db(sm
->sm_dbuf
, &doi
);
2271 uint64_t record_size
= MAX(doi
.doi_data_block_size
, vdev_blocksize
);
2273 return (object_size
>= (optimal_size
* zfs_condense_pct
/ 100) &&
2274 object_size
> zfs_metaslab_condense_block_threshold
* record_size
);
2278 * Condense the on-disk space map representation to its minimized form.
2279 * The minimized form consists of a small number of allocations followed by
2280 * the entries of the free range tree.
2283 metaslab_condense(metaslab_t
*msp
, uint64_t txg
, dmu_tx_t
*tx
)
2285 range_tree_t
*condense_tree
;
2286 space_map_t
*sm
= msp
->ms_sm
;
2288 ASSERT(MUTEX_HELD(&msp
->ms_lock
));
2289 ASSERT(msp
->ms_loaded
);
2292 zfs_dbgmsg("condensing: txg %llu, msp[%llu] %p, vdev id %llu, "
2293 "spa %s, smp size %llu, segments %lu, forcing condense=%s", txg
,
2294 msp
->ms_id
, msp
, msp
->ms_group
->mg_vd
->vdev_id
,
2295 msp
->ms_group
->mg_vd
->vdev_spa
->spa_name
,
2296 space_map_length(msp
->ms_sm
),
2297 avl_numnodes(&msp
->ms_allocatable
->rt_root
),
2298 msp
->ms_condense_wanted
? "TRUE" : "FALSE");
2300 msp
->ms_condense_wanted
= B_FALSE
;
2303 * Create an range tree that is 100% allocated. We remove segments
2304 * that have been freed in this txg, any deferred frees that exist,
2305 * and any allocation in the future. Removing segments should be
2306 * a relatively inexpensive operation since we expect these trees to
2307 * have a small number of nodes.
2309 condense_tree
= range_tree_create(NULL
, NULL
);
2310 range_tree_add(condense_tree
, msp
->ms_start
, msp
->ms_size
);
2312 range_tree_walk(msp
->ms_freeing
, range_tree_remove
, condense_tree
);
2313 range_tree_walk(msp
->ms_freed
, range_tree_remove
, condense_tree
);
2315 for (int t
= 0; t
< TXG_DEFER_SIZE
; t
++) {
2316 range_tree_walk(msp
->ms_defer
[t
],
2317 range_tree_remove
, condense_tree
);
2320 for (int t
= 1; t
< TXG_CONCURRENT_STATES
; t
++) {
2321 range_tree_walk(msp
->ms_allocating
[(txg
+ t
) & TXG_MASK
],
2322 range_tree_remove
, condense_tree
);
2326 * We're about to drop the metaslab's lock thus allowing
2327 * other consumers to change it's content. Set the
2328 * metaslab's ms_condensing flag to ensure that
2329 * allocations on this metaslab do not occur while we're
2330 * in the middle of committing it to disk. This is only critical
2331 * for ms_allocatable as all other range trees use per txg
2332 * views of their content.
2334 msp
->ms_condensing
= B_TRUE
;
2336 mutex_exit(&msp
->ms_lock
);
2337 space_map_truncate(sm
, zfs_metaslab_sm_blksz
, tx
);
2340 * While we would ideally like to create a space map representation
2341 * that consists only of allocation records, doing so can be
2342 * prohibitively expensive because the in-core free tree can be
2343 * large, and therefore computationally expensive to subtract
2344 * from the condense_tree. Instead we sync out two trees, a cheap
2345 * allocation only tree followed by the in-core free tree. While not
2346 * optimal, this is typically close to optimal, and much cheaper to
2349 space_map_write(sm
, condense_tree
, SM_ALLOC
, SM_NO_VDEVID
, tx
);
2350 range_tree_vacate(condense_tree
, NULL
, NULL
);
2351 range_tree_destroy(condense_tree
);
2353 space_map_write(sm
, msp
->ms_allocatable
, SM_FREE
, SM_NO_VDEVID
, tx
);
2354 mutex_enter(&msp
->ms_lock
);
2355 msp
->ms_condensing
= B_FALSE
;
2359 * Write a metaslab to disk in the context of the specified transaction group.
2362 metaslab_sync(metaslab_t
*msp
, uint64_t txg
)
2364 metaslab_group_t
*mg
= msp
->ms_group
;
2365 vdev_t
*vd
= mg
->mg_vd
;
2366 spa_t
*spa
= vd
->vdev_spa
;
2367 objset_t
*mos
= spa_meta_objset(spa
);
2368 range_tree_t
*alloctree
= msp
->ms_allocating
[txg
& TXG_MASK
];
2370 uint64_t object
= space_map_object(msp
->ms_sm
);
2372 ASSERT(!vd
->vdev_ishole
);
2375 * This metaslab has just been added so there's no work to do now.
2377 if (msp
->ms_freeing
== NULL
) {
2378 ASSERT3P(alloctree
, ==, NULL
);
2382 ASSERT3P(alloctree
, !=, NULL
);
2383 ASSERT3P(msp
->ms_freeing
, !=, NULL
);
2384 ASSERT3P(msp
->ms_freed
, !=, NULL
);
2385 ASSERT3P(msp
->ms_checkpointing
, !=, NULL
);
2388 * Normally, we don't want to process a metaslab if there are no
2389 * allocations or frees to perform. However, if the metaslab is being
2390 * forced to condense and it's loaded, we need to let it through.
2392 if (range_tree_is_empty(alloctree
) &&
2393 range_tree_is_empty(msp
->ms_freeing
) &&
2394 range_tree_is_empty(msp
->ms_checkpointing
) &&
2395 !(msp
->ms_loaded
&& msp
->ms_condense_wanted
))
2399 VERIFY(txg
<= spa_final_dirty_txg(spa
));
2402 * The only state that can actually be changing concurrently with
2403 * metaslab_sync() is the metaslab's ms_allocatable. No other
2404 * thread can be modifying this txg's alloc, freeing,
2405 * freed, or space_map_phys_t. We drop ms_lock whenever we
2406 * could call into the DMU, because the DMU can call down to us
2407 * (e.g. via zio_free()) at any time.
2409 * The spa_vdev_remove_thread() can be reading metaslab state
2410 * concurrently, and it is locked out by the ms_sync_lock. Note
2411 * that the ms_lock is insufficient for this, because it is dropped
2412 * by space_map_write().
2414 tx
= dmu_tx_create_assigned(spa_get_dsl(spa
), txg
);
2416 if (msp
->ms_sm
== NULL
) {
2417 uint64_t new_object
;
2419 new_object
= space_map_alloc(mos
, zfs_metaslab_sm_blksz
, tx
);
2420 VERIFY3U(new_object
, !=, 0);
2422 VERIFY0(space_map_open(&msp
->ms_sm
, mos
, new_object
,
2423 msp
->ms_start
, msp
->ms_size
, vd
->vdev_ashift
));
2424 ASSERT(msp
->ms_sm
!= NULL
);
2427 if (!range_tree_is_empty(msp
->ms_checkpointing
) &&
2428 vd
->vdev_checkpoint_sm
== NULL
) {
2429 ASSERT(spa_has_checkpoint(spa
));
2431 uint64_t new_object
= space_map_alloc(mos
,
2432 vdev_standard_sm_blksz
, tx
);
2433 VERIFY3U(new_object
, !=, 0);
2435 VERIFY0(space_map_open(&vd
->vdev_checkpoint_sm
,
2436 mos
, new_object
, 0, vd
->vdev_asize
, vd
->vdev_ashift
));
2437 ASSERT3P(vd
->vdev_checkpoint_sm
, !=, NULL
);
2440 * We save the space map object as an entry in vdev_top_zap
2441 * so it can be retrieved when the pool is reopened after an
2442 * export or through zdb.
2444 VERIFY0(zap_add(vd
->vdev_spa
->spa_meta_objset
,
2445 vd
->vdev_top_zap
, VDEV_TOP_ZAP_POOL_CHECKPOINT_SM
,
2446 sizeof (new_object
), 1, &new_object
, tx
));
2449 mutex_enter(&msp
->ms_sync_lock
);
2450 mutex_enter(&msp
->ms_lock
);
2453 * Note: metaslab_condense() clears the space map's histogram.
2454 * Therefore we must verify and remove this histogram before
2457 metaslab_group_histogram_verify(mg
);
2458 metaslab_class_histogram_verify(mg
->mg_class
);
2459 metaslab_group_histogram_remove(mg
, msp
);
2461 if (msp
->ms_loaded
&& metaslab_should_condense(msp
)) {
2462 metaslab_condense(msp
, txg
, tx
);
2464 mutex_exit(&msp
->ms_lock
);
2465 space_map_write(msp
->ms_sm
, alloctree
, SM_ALLOC
,
2467 space_map_write(msp
->ms_sm
, msp
->ms_freeing
, SM_FREE
,
2469 mutex_enter(&msp
->ms_lock
);
2472 if (!range_tree_is_empty(msp
->ms_checkpointing
)) {
2473 ASSERT(spa_has_checkpoint(spa
));
2474 ASSERT3P(vd
->vdev_checkpoint_sm
, !=, NULL
);
2477 * Since we are doing writes to disk and the ms_checkpointing
2478 * tree won't be changing during that time, we drop the
2479 * ms_lock while writing to the checkpoint space map.
2481 mutex_exit(&msp
->ms_lock
);
2482 space_map_write(vd
->vdev_checkpoint_sm
,
2483 msp
->ms_checkpointing
, SM_FREE
, SM_NO_VDEVID
, tx
);
2484 mutex_enter(&msp
->ms_lock
);
2485 space_map_update(vd
->vdev_checkpoint_sm
);
2487 spa
->spa_checkpoint_info
.sci_dspace
+=
2488 range_tree_space(msp
->ms_checkpointing
);
2489 vd
->vdev_stat
.vs_checkpoint_space
+=
2490 range_tree_space(msp
->ms_checkpointing
);
2491 ASSERT3U(vd
->vdev_stat
.vs_checkpoint_space
, ==,
2492 -vd
->vdev_checkpoint_sm
->sm_alloc
);
2494 range_tree_vacate(msp
->ms_checkpointing
, NULL
, NULL
);
2497 if (msp
->ms_loaded
) {
2499 * When the space map is loaded, we have an accurate
2500 * histogram in the range tree. This gives us an opportunity
2501 * to bring the space map's histogram up-to-date so we clear
2502 * it first before updating it.
2504 space_map_histogram_clear(msp
->ms_sm
);
2505 space_map_histogram_add(msp
->ms_sm
, msp
->ms_allocatable
, tx
);
2508 * Since we've cleared the histogram we need to add back
2509 * any free space that has already been processed, plus
2510 * any deferred space. This allows the on-disk histogram
2511 * to accurately reflect all free space even if some space
2512 * is not yet available for allocation (i.e. deferred).
2514 space_map_histogram_add(msp
->ms_sm
, msp
->ms_freed
, tx
);
2517 * Add back any deferred free space that has not been
2518 * added back into the in-core free tree yet. This will
2519 * ensure that we don't end up with a space map histogram
2520 * that is completely empty unless the metaslab is fully
2523 for (int t
= 0; t
< TXG_DEFER_SIZE
; t
++) {
2524 space_map_histogram_add(msp
->ms_sm
,
2525 msp
->ms_defer
[t
], tx
);
2530 * Always add the free space from this sync pass to the space
2531 * map histogram. We want to make sure that the on-disk histogram
2532 * accounts for all free space. If the space map is not loaded,
2533 * then we will lose some accuracy but will correct it the next
2534 * time we load the space map.
2536 space_map_histogram_add(msp
->ms_sm
, msp
->ms_freeing
, tx
);
2538 metaslab_group_histogram_add(mg
, msp
);
2539 metaslab_group_histogram_verify(mg
);
2540 metaslab_class_histogram_verify(mg
->mg_class
);
2543 * For sync pass 1, we avoid traversing this txg's free range tree
2544 * and instead will just swap the pointers for freeing and
2545 * freed. We can safely do this since the freed_tree is
2546 * guaranteed to be empty on the initial pass.
2548 if (spa_sync_pass(spa
) == 1) {
2549 range_tree_swap(&msp
->ms_freeing
, &msp
->ms_freed
);
2551 range_tree_vacate(msp
->ms_freeing
,
2552 range_tree_add
, msp
->ms_freed
);
2554 range_tree_vacate(alloctree
, NULL
, NULL
);
2556 ASSERT0(range_tree_space(msp
->ms_allocating
[txg
& TXG_MASK
]));
2557 ASSERT0(range_tree_space(msp
->ms_allocating
[TXG_CLEAN(txg
)
2559 ASSERT0(range_tree_space(msp
->ms_freeing
));
2560 ASSERT0(range_tree_space(msp
->ms_checkpointing
));
2562 mutex_exit(&msp
->ms_lock
);
2564 if (object
!= space_map_object(msp
->ms_sm
)) {
2565 object
= space_map_object(msp
->ms_sm
);
2566 dmu_write(mos
, vd
->vdev_ms_array
, sizeof (uint64_t) *
2567 msp
->ms_id
, sizeof (uint64_t), &object
, tx
);
2569 mutex_exit(&msp
->ms_sync_lock
);
2574 * Called after a transaction group has completely synced to mark
2575 * all of the metaslab's free space as usable.
2578 metaslab_sync_done(metaslab_t
*msp
, uint64_t txg
)
2580 metaslab_group_t
*mg
= msp
->ms_group
;
2581 vdev_t
*vd
= mg
->mg_vd
;
2582 spa_t
*spa
= vd
->vdev_spa
;
2583 range_tree_t
**defer_tree
;
2584 int64_t alloc_delta
, defer_delta
;
2585 boolean_t defer_allowed
= B_TRUE
;
2587 ASSERT(!vd
->vdev_ishole
);
2589 mutex_enter(&msp
->ms_lock
);
2592 * If this metaslab is just becoming available, initialize its
2593 * range trees and add its capacity to the vdev.
2595 if (msp
->ms_freed
== NULL
) {
2596 for (int t
= 0; t
< TXG_SIZE
; t
++) {
2597 ASSERT(msp
->ms_allocating
[t
] == NULL
);
2599 msp
->ms_allocating
[t
] = range_tree_create(NULL
, NULL
);
2602 ASSERT3P(msp
->ms_freeing
, ==, NULL
);
2603 msp
->ms_freeing
= range_tree_create(NULL
, NULL
);
2605 ASSERT3P(msp
->ms_freed
, ==, NULL
);
2606 msp
->ms_freed
= range_tree_create(NULL
, NULL
);
2608 for (int t
= 0; t
< TXG_DEFER_SIZE
; t
++) {
2609 ASSERT(msp
->ms_defer
[t
] == NULL
);
2611 msp
->ms_defer
[t
] = range_tree_create(NULL
, NULL
);
2614 ASSERT3P(msp
->ms_checkpointing
, ==, NULL
);
2615 msp
->ms_checkpointing
= range_tree_create(NULL
, NULL
);
2617 metaslab_space_update(vd
, mg
->mg_class
, 0, 0, msp
->ms_size
);
2619 ASSERT0(range_tree_space(msp
->ms_freeing
));
2620 ASSERT0(range_tree_space(msp
->ms_checkpointing
));
2622 defer_tree
= &msp
->ms_defer
[txg
% TXG_DEFER_SIZE
];
2624 uint64_t free_space
= metaslab_class_get_space(spa_normal_class(spa
)) -
2625 metaslab_class_get_alloc(spa_normal_class(spa
));
2626 if (free_space
<= spa_get_slop_space(spa
) || vd
->vdev_removing
) {
2627 defer_allowed
= B_FALSE
;
2631 alloc_delta
= space_map_alloc_delta(msp
->ms_sm
);
2632 if (defer_allowed
) {
2633 defer_delta
= range_tree_space(msp
->ms_freed
) -
2634 range_tree_space(*defer_tree
);
2636 defer_delta
-= range_tree_space(*defer_tree
);
2639 metaslab_space_update(vd
, mg
->mg_class
, alloc_delta
+ defer_delta
,
2643 * If there's a metaslab_load() in progress, wait for it to complete
2644 * so that we have a consistent view of the in-core space map.
2646 metaslab_load_wait(msp
);
2649 * Move the frees from the defer_tree back to the free
2650 * range tree (if it's loaded). Swap the freed_tree and
2651 * the defer_tree -- this is safe to do because we've
2652 * just emptied out the defer_tree.
2654 range_tree_vacate(*defer_tree
,
2655 msp
->ms_loaded
? range_tree_add
: NULL
, msp
->ms_allocatable
);
2656 if (defer_allowed
) {
2657 range_tree_swap(&msp
->ms_freed
, defer_tree
);
2659 range_tree_vacate(msp
->ms_freed
,
2660 msp
->ms_loaded
? range_tree_add
: NULL
,
2661 msp
->ms_allocatable
);
2663 space_map_update(msp
->ms_sm
);
2665 msp
->ms_deferspace
+= defer_delta
;
2666 ASSERT3S(msp
->ms_deferspace
, >=, 0);
2667 ASSERT3S(msp
->ms_deferspace
, <=, msp
->ms_size
);
2668 if (msp
->ms_deferspace
!= 0) {
2670 * Keep syncing this metaslab until all deferred frees
2671 * are back in circulation.
2673 vdev_dirty(vd
, VDD_METASLAB
, msp
, txg
+ 1);
2677 msp
->ms_new
= B_FALSE
;
2678 mutex_enter(&mg
->mg_lock
);
2680 mutex_exit(&mg
->mg_lock
);
2683 * Calculate the new weights before unloading any metaslabs.
2684 * This will give us the most accurate weighting.
2686 metaslab_group_sort(mg
, msp
, metaslab_weight(msp
) |
2687 (msp
->ms_weight
& METASLAB_ACTIVE_MASK
));
2690 * If the metaslab is loaded and we've not tried to load or allocate
2691 * from it in 'metaslab_unload_delay' txgs, then unload it.
2693 if (msp
->ms_loaded
&&
2694 msp
->ms_initializing
== 0 &&
2695 msp
->ms_selected_txg
+ metaslab_unload_delay
< txg
) {
2697 for (int t
= 1; t
< TXG_CONCURRENT_STATES
; t
++) {
2698 VERIFY0(range_tree_space(
2699 msp
->ms_allocating
[(txg
+ t
) & TXG_MASK
]));
2701 if (msp
->ms_allocator
!= -1) {
2702 metaslab_passivate(msp
, msp
->ms_weight
&
2703 ~METASLAB_ACTIVE_MASK
);
2706 if (!metaslab_debug_unload
)
2707 metaslab_unload(msp
);
2710 ASSERT0(range_tree_space(msp
->ms_allocating
[txg
& TXG_MASK
]));
2711 ASSERT0(range_tree_space(msp
->ms_freeing
));
2712 ASSERT0(range_tree_space(msp
->ms_freed
));
2713 ASSERT0(range_tree_space(msp
->ms_checkpointing
));
2715 mutex_exit(&msp
->ms_lock
);
2719 metaslab_sync_reassess(metaslab_group_t
*mg
)
2721 spa_t
*spa
= mg
->mg_class
->mc_spa
;
2723 spa_config_enter(spa
, SCL_ALLOC
, FTAG
, RW_READER
);
2724 metaslab_group_alloc_update(mg
);
2725 mg
->mg_fragmentation
= metaslab_group_fragmentation(mg
);
2728 * Preload the next potential metaslabs but only on active
2729 * metaslab groups. We can get into a state where the metaslab
2730 * is no longer active since we dirty metaslabs as we remove a
2731 * a device, thus potentially making the metaslab group eligible
2734 if (mg
->mg_activation_count
> 0) {
2735 metaslab_group_preload(mg
);
2737 spa_config_exit(spa
, SCL_ALLOC
, FTAG
);
2741 * When writing a ditto block (i.e. more than one DVA for a given BP) on
2742 * the same vdev as an existing DVA of this BP, then try to allocate it
2743 * on a different metaslab than existing DVAs (i.e. a unique metaslab).
2746 metaslab_is_unique(metaslab_t
*msp
, dva_t
*dva
)
2750 if (DVA_GET_ASIZE(dva
) == 0)
2753 if (msp
->ms_group
->mg_vd
->vdev_id
!= DVA_GET_VDEV(dva
))
2756 dva_ms_id
= DVA_GET_OFFSET(dva
) >> msp
->ms_group
->mg_vd
->vdev_ms_shift
;
2758 return (msp
->ms_id
!= dva_ms_id
);
2762 * ==========================================================================
2763 * Metaslab allocation tracing facility
2764 * ==========================================================================
2766 #ifdef _METASLAB_TRACING
2767 kstat_t
*metaslab_trace_ksp
;
2768 kstat_named_t metaslab_trace_over_limit
;
2771 metaslab_alloc_trace_init(void)
2773 ASSERT(metaslab_alloc_trace_cache
== NULL
);
2774 metaslab_alloc_trace_cache
= kmem_cache_create(
2775 "metaslab_alloc_trace_cache", sizeof (metaslab_alloc_trace_t
),
2776 0, NULL
, NULL
, NULL
, NULL
, NULL
, 0);
2777 metaslab_trace_ksp
= kstat_create("zfs", 0, "metaslab_trace_stats",
2778 "misc", KSTAT_TYPE_NAMED
, 1, KSTAT_FLAG_VIRTUAL
);
2779 if (metaslab_trace_ksp
!= NULL
) {
2780 metaslab_trace_ksp
->ks_data
= &metaslab_trace_over_limit
;
2781 kstat_named_init(&metaslab_trace_over_limit
,
2782 "metaslab_trace_over_limit", KSTAT_DATA_UINT64
);
2783 kstat_install(metaslab_trace_ksp
);
2788 metaslab_alloc_trace_fini(void)
2790 if (metaslab_trace_ksp
!= NULL
) {
2791 kstat_delete(metaslab_trace_ksp
);
2792 metaslab_trace_ksp
= NULL
;
2794 kmem_cache_destroy(metaslab_alloc_trace_cache
);
2795 metaslab_alloc_trace_cache
= NULL
;
2799 * Add an allocation trace element to the allocation tracing list.
2802 metaslab_trace_add(zio_alloc_list_t
*zal
, metaslab_group_t
*mg
,
2803 metaslab_t
*msp
, uint64_t psize
, uint32_t dva_id
, uint64_t offset
,
2806 metaslab_alloc_trace_t
*mat
;
2808 if (!metaslab_trace_enabled
)
2812 * When the tracing list reaches its maximum we remove
2813 * the second element in the list before adding a new one.
2814 * By removing the second element we preserve the original
2815 * entry as a clue to what allocations steps have already been
2818 if (zal
->zal_size
== metaslab_trace_max_entries
) {
2819 metaslab_alloc_trace_t
*mat_next
;
2821 panic("too many entries in allocation list");
2823 atomic_inc_64(&metaslab_trace_over_limit
.value
.ui64
);
2825 mat_next
= list_next(&zal
->zal_list
, list_head(&zal
->zal_list
));
2826 list_remove(&zal
->zal_list
, mat_next
);
2827 kmem_cache_free(metaslab_alloc_trace_cache
, mat_next
);
2830 mat
= kmem_cache_alloc(metaslab_alloc_trace_cache
, KM_SLEEP
);
2831 list_link_init(&mat
->mat_list_node
);
2834 mat
->mat_size
= psize
;
2835 mat
->mat_dva_id
= dva_id
;
2836 mat
->mat_offset
= offset
;
2837 mat
->mat_weight
= 0;
2838 mat
->mat_allocator
= allocator
;
2841 mat
->mat_weight
= msp
->ms_weight
;
2844 * The list is part of the zio so locking is not required. Only
2845 * a single thread will perform allocations for a given zio.
2847 list_insert_tail(&zal
->zal_list
, mat
);
2850 ASSERT3U(zal
->zal_size
, <=, metaslab_trace_max_entries
);
2854 metaslab_trace_init(zio_alloc_list_t
*zal
)
2856 list_create(&zal
->zal_list
, sizeof (metaslab_alloc_trace_t
),
2857 offsetof(metaslab_alloc_trace_t
, mat_list_node
));
2862 metaslab_trace_fini(zio_alloc_list_t
*zal
)
2864 metaslab_alloc_trace_t
*mat
;
2866 while ((mat
= list_remove_head(&zal
->zal_list
)) != NULL
)
2867 kmem_cache_free(metaslab_alloc_trace_cache
, mat
);
2868 list_destroy(&zal
->zal_list
);
2873 #define metaslab_trace_add(zal, mg, msp, psize, id, off, alloc)
2876 metaslab_alloc_trace_init(void)
2881 metaslab_alloc_trace_fini(void)
2886 metaslab_trace_init(zio_alloc_list_t
*zal
)
2891 metaslab_trace_fini(zio_alloc_list_t
*zal
)
2895 #endif /* _METASLAB_TRACING */
2898 * ==========================================================================
2899 * Metaslab block operations
2900 * ==========================================================================
2904 metaslab_group_alloc_increment(spa_t
*spa
, uint64_t vdev
, void *tag
, int flags
,
2907 if (!(flags
& METASLAB_ASYNC_ALLOC
) ||
2908 (flags
& METASLAB_DONT_THROTTLE
))
2911 metaslab_group_t
*mg
= vdev_lookup_top(spa
, vdev
)->vdev_mg
;
2912 if (!mg
->mg_class
->mc_alloc_throttle_enabled
)
2915 (void) zfs_refcount_add(&mg
->mg_alloc_queue_depth
[allocator
], tag
);
2919 metaslab_group_increment_qdepth(metaslab_group_t
*mg
, int allocator
)
2921 uint64_t max
= mg
->mg_max_alloc_queue_depth
;
2922 uint64_t cur
= mg
->mg_cur_max_alloc_queue_depth
[allocator
];
2924 if (atomic_cas_64(&mg
->mg_cur_max_alloc_queue_depth
[allocator
],
2925 cur
, cur
+ 1) == cur
) {
2927 &mg
->mg_class
->mc_alloc_max_slots
[allocator
]);
2930 cur
= mg
->mg_cur_max_alloc_queue_depth
[allocator
];
2935 metaslab_group_alloc_decrement(spa_t
*spa
, uint64_t vdev
, void *tag
, int flags
,
2936 int allocator
, boolean_t io_complete
)
2938 if (!(flags
& METASLAB_ASYNC_ALLOC
) ||
2939 (flags
& METASLAB_DONT_THROTTLE
))
2942 metaslab_group_t
*mg
= vdev_lookup_top(spa
, vdev
)->vdev_mg
;
2943 if (!mg
->mg_class
->mc_alloc_throttle_enabled
)
2946 (void) zfs_refcount_remove(&mg
->mg_alloc_queue_depth
[allocator
], tag
);
2948 metaslab_group_increment_qdepth(mg
, allocator
);
2952 metaslab_group_alloc_verify(spa_t
*spa
, const blkptr_t
*bp
, void *tag
,
2956 const dva_t
*dva
= bp
->blk_dva
;
2957 int ndvas
= BP_GET_NDVAS(bp
);
2959 for (int d
= 0; d
< ndvas
; d
++) {
2960 uint64_t vdev
= DVA_GET_VDEV(&dva
[d
]);
2961 metaslab_group_t
*mg
= vdev_lookup_top(spa
, vdev
)->vdev_mg
;
2962 VERIFY(zfs_refcount_not_held(
2963 &mg
->mg_alloc_queue_depth
[allocator
], tag
));
2969 metaslab_block_alloc(metaslab_t
*msp
, uint64_t size
, uint64_t txg
)
2972 range_tree_t
*rt
= msp
->ms_allocatable
;
2973 metaslab_class_t
*mc
= msp
->ms_group
->mg_class
;
2975 VERIFY(!msp
->ms_condensing
);
2976 VERIFY0(msp
->ms_initializing
);
2978 start
= mc
->mc_ops
->msop_alloc(msp
, size
);
2979 if (start
!= -1ULL) {
2980 metaslab_group_t
*mg
= msp
->ms_group
;
2981 vdev_t
*vd
= mg
->mg_vd
;
2983 VERIFY0(P2PHASE(start
, 1ULL << vd
->vdev_ashift
));
2984 VERIFY0(P2PHASE(size
, 1ULL << vd
->vdev_ashift
));
2985 VERIFY3U(range_tree_space(rt
) - size
, <=, msp
->ms_size
);
2986 range_tree_remove(rt
, start
, size
);
2988 if (range_tree_is_empty(msp
->ms_allocating
[txg
& TXG_MASK
]))
2989 vdev_dirty(mg
->mg_vd
, VDD_METASLAB
, msp
, txg
);
2991 range_tree_add(msp
->ms_allocating
[txg
& TXG_MASK
], start
, size
);
2993 /* Track the last successful allocation */
2994 msp
->ms_alloc_txg
= txg
;
2995 metaslab_verify_space(msp
, txg
);
2999 * Now that we've attempted the allocation we need to update the
3000 * metaslab's maximum block size since it may have changed.
3002 msp
->ms_max_size
= metaslab_block_maxsize(msp
);
3007 * Find the metaslab with the highest weight that is less than what we've
3008 * already tried. In the common case, this means that we will examine each
3009 * metaslab at most once. Note that concurrent callers could reorder metaslabs
3010 * by activation/passivation once we have dropped the mg_lock. If a metaslab is
3011 * activated by another thread, and we fail to allocate from the metaslab we
3012 * have selected, we may not try the newly-activated metaslab, and instead
3013 * activate another metaslab. This is not optimal, but generally does not cause
3014 * any problems (a possible exception being if every metaslab is completely full
3015 * except for the the newly-activated metaslab which we fail to examine).
3018 find_valid_metaslab(metaslab_group_t
*mg
, uint64_t activation_weight
,
3019 dva_t
*dva
, int d
, boolean_t want_unique
, uint64_t asize
, int allocator
,
3020 zio_alloc_list_t
*zal
, metaslab_t
*search
, boolean_t
*was_active
)
3023 avl_tree_t
*t
= &mg
->mg_metaslab_tree
;
3024 metaslab_t
*msp
= avl_find(t
, search
, &idx
);
3026 msp
= avl_nearest(t
, idx
, AVL_AFTER
);
3028 for (; msp
!= NULL
; msp
= AVL_NEXT(t
, msp
)) {
3030 if (!metaslab_should_allocate(msp
, asize
)) {
3031 metaslab_trace_add(zal
, mg
, msp
, asize
, d
,
3032 TRACE_TOO_SMALL
, allocator
);
3037 * If the selected metaslab is condensing or being
3038 * initialized, skip it.
3040 if (msp
->ms_condensing
|| msp
->ms_initializing
> 0)
3043 *was_active
= msp
->ms_allocator
!= -1;
3045 * If we're activating as primary, this is our first allocation
3046 * from this disk, so we don't need to check how close we are.
3047 * If the metaslab under consideration was already active,
3048 * we're getting desperate enough to steal another allocator's
3049 * metaslab, so we still don't care about distances.
3051 if (activation_weight
== METASLAB_WEIGHT_PRIMARY
|| *was_active
)
3054 for (i
= 0; i
< d
; i
++) {
3056 !metaslab_is_unique(msp
, &dva
[i
]))
3057 break; /* try another metaslab */
3064 search
->ms_weight
= msp
->ms_weight
;
3065 search
->ms_start
= msp
->ms_start
+ 1;
3066 search
->ms_allocator
= msp
->ms_allocator
;
3067 search
->ms_primary
= msp
->ms_primary
;
3074 metaslab_group_alloc_normal(metaslab_group_t
*mg
, zio_alloc_list_t
*zal
,
3075 uint64_t asize
, uint64_t txg
, boolean_t want_unique
, dva_t
*dva
,
3076 int d
, int allocator
)
3078 metaslab_t
*msp
= NULL
;
3079 uint64_t offset
= -1ULL;
3080 uint64_t activation_weight
;
3082 activation_weight
= METASLAB_WEIGHT_PRIMARY
;
3083 for (int i
= 0; i
< d
; i
++) {
3084 if (activation_weight
== METASLAB_WEIGHT_PRIMARY
&&
3085 DVA_GET_VDEV(&dva
[i
]) == mg
->mg_vd
->vdev_id
) {
3086 activation_weight
= METASLAB_WEIGHT_SECONDARY
;
3087 } else if (activation_weight
== METASLAB_WEIGHT_SECONDARY
&&
3088 DVA_GET_VDEV(&dva
[i
]) == mg
->mg_vd
->vdev_id
) {
3089 activation_weight
= METASLAB_WEIGHT_CLAIM
;
3095 * If we don't have enough metaslabs active to fill the entire array, we
3096 * just use the 0th slot.
3098 if (mg
->mg_ms_ready
< mg
->mg_allocators
* 3)
3101 ASSERT3U(mg
->mg_vd
->vdev_ms_count
, >=, 2);
3103 metaslab_t
*search
= kmem_alloc(sizeof (*search
), KM_SLEEP
);
3104 search
->ms_weight
= UINT64_MAX
;
3105 search
->ms_start
= 0;
3107 * At the end of the metaslab tree are the already-active metaslabs,
3108 * first the primaries, then the secondaries. When we resume searching
3109 * through the tree, we need to consider ms_allocator and ms_primary so
3110 * we start in the location right after where we left off, and don't
3111 * accidentally loop forever considering the same metaslabs.
3113 search
->ms_allocator
= -1;
3114 search
->ms_primary
= B_TRUE
;
3116 boolean_t was_active
= B_FALSE
;
3118 mutex_enter(&mg
->mg_lock
);
3120 if (activation_weight
== METASLAB_WEIGHT_PRIMARY
&&
3121 mg
->mg_primaries
[allocator
] != NULL
) {
3122 msp
= mg
->mg_primaries
[allocator
];
3123 was_active
= B_TRUE
;
3124 } else if (activation_weight
== METASLAB_WEIGHT_SECONDARY
&&
3125 mg
->mg_secondaries
[allocator
] != NULL
) {
3126 msp
= mg
->mg_secondaries
[allocator
];
3127 was_active
= B_TRUE
;
3129 msp
= find_valid_metaslab(mg
, activation_weight
, dva
, d
,
3130 want_unique
, asize
, allocator
, zal
, search
,
3134 mutex_exit(&mg
->mg_lock
);
3136 kmem_free(search
, sizeof (*search
));
3140 mutex_enter(&msp
->ms_lock
);
3142 * Ensure that the metaslab we have selected is still
3143 * capable of handling our request. It's possible that
3144 * another thread may have changed the weight while we
3145 * were blocked on the metaslab lock. We check the
3146 * active status first to see if we need to reselect
3149 if (was_active
&& !(msp
->ms_weight
& METASLAB_ACTIVE_MASK
)) {
3150 mutex_exit(&msp
->ms_lock
);
3155 * If the metaslab is freshly activated for an allocator that
3156 * isn't the one we're allocating from, or if it's a primary and
3157 * we're seeking a secondary (or vice versa), we go back and
3158 * select a new metaslab.
3160 if (!was_active
&& (msp
->ms_weight
& METASLAB_ACTIVE_MASK
) &&
3161 (msp
->ms_allocator
!= -1) &&
3162 (msp
->ms_allocator
!= allocator
|| ((activation_weight
==
3163 METASLAB_WEIGHT_PRIMARY
) != msp
->ms_primary
))) {
3164 mutex_exit(&msp
->ms_lock
);
3168 if (msp
->ms_weight
& METASLAB_WEIGHT_CLAIM
&&
3169 activation_weight
!= METASLAB_WEIGHT_CLAIM
) {
3170 metaslab_passivate(msp
, msp
->ms_weight
&
3171 ~METASLAB_WEIGHT_CLAIM
);
3172 mutex_exit(&msp
->ms_lock
);
3176 if (metaslab_activate(msp
, allocator
, activation_weight
) != 0) {
3177 mutex_exit(&msp
->ms_lock
);
3181 msp
->ms_selected_txg
= txg
;
3184 * Now that we have the lock, recheck to see if we should
3185 * continue to use this metaslab for this allocation. The
3186 * the metaslab is now loaded so metaslab_should_allocate() can
3187 * accurately determine if the allocation attempt should
3190 if (!metaslab_should_allocate(msp
, asize
)) {
3191 /* Passivate this metaslab and select a new one. */
3192 metaslab_trace_add(zal
, mg
, msp
, asize
, d
,
3193 TRACE_TOO_SMALL
, allocator
);
3199 * If this metaslab is currently condensing then pick again as
3200 * we can't manipulate this metaslab until it's committed
3201 * to disk. If this metaslab is being initialized, we shouldn't
3202 * allocate from it since the allocated region might be
3203 * overwritten after allocation.
3205 if (msp
->ms_condensing
) {
3206 metaslab_trace_add(zal
, mg
, msp
, asize
, d
,
3207 TRACE_CONDENSING
, allocator
);
3208 metaslab_passivate(msp
, msp
->ms_weight
&
3209 ~METASLAB_ACTIVE_MASK
);
3210 mutex_exit(&msp
->ms_lock
);
3212 } else if (msp
->ms_initializing
> 0) {
3213 metaslab_trace_add(zal
, mg
, msp
, asize
, d
,
3214 TRACE_INITIALIZING
, allocator
);
3215 metaslab_passivate(msp
, msp
->ms_weight
&
3216 ~METASLAB_ACTIVE_MASK
);
3217 mutex_exit(&msp
->ms_lock
);
3221 offset
= metaslab_block_alloc(msp
, asize
, txg
);
3222 metaslab_trace_add(zal
, mg
, msp
, asize
, d
, offset
, allocator
);
3224 if (offset
!= -1ULL) {
3225 /* Proactively passivate the metaslab, if needed */
3226 metaslab_segment_may_passivate(msp
);
3230 ASSERT(msp
->ms_loaded
);
3233 * We were unable to allocate from this metaslab so determine
3234 * a new weight for this metaslab. Now that we have loaded
3235 * the metaslab we can provide a better hint to the metaslab
3238 * For space-based metaslabs, we use the maximum block size.
3239 * This information is only available when the metaslab
3240 * is loaded and is more accurate than the generic free
3241 * space weight that was calculated by metaslab_weight().
3242 * This information allows us to quickly compare the maximum
3243 * available allocation in the metaslab to the allocation
3244 * size being requested.
3246 * For segment-based metaslabs, determine the new weight
3247 * based on the highest bucket in the range tree. We
3248 * explicitly use the loaded segment weight (i.e. the range
3249 * tree histogram) since it contains the space that is
3250 * currently available for allocation and is accurate
3251 * even within a sync pass.
3253 if (WEIGHT_IS_SPACEBASED(msp
->ms_weight
)) {
3254 uint64_t weight
= metaslab_block_maxsize(msp
);
3255 WEIGHT_SET_SPACEBASED(weight
);
3256 metaslab_passivate(msp
, weight
);
3258 metaslab_passivate(msp
,
3259 metaslab_weight_from_range_tree(msp
));
3263 * We have just failed an allocation attempt, check
3264 * that metaslab_should_allocate() agrees. Otherwise,
3265 * we may end up in an infinite loop retrying the same
3268 ASSERT(!metaslab_should_allocate(msp
, asize
));
3270 mutex_exit(&msp
->ms_lock
);
3272 mutex_exit(&msp
->ms_lock
);
3273 kmem_free(search
, sizeof (*search
));
3278 metaslab_group_alloc(metaslab_group_t
*mg
, zio_alloc_list_t
*zal
,
3279 uint64_t asize
, uint64_t txg
, boolean_t want_unique
, dva_t
*dva
,
3280 int d
, int allocator
)
3283 ASSERT(mg
->mg_initialized
);
3285 offset
= metaslab_group_alloc_normal(mg
, zal
, asize
, txg
, want_unique
,
3288 mutex_enter(&mg
->mg_lock
);
3289 if (offset
== -1ULL) {
3290 mg
->mg_failed_allocations
++;
3291 metaslab_trace_add(zal
, mg
, NULL
, asize
, d
,
3292 TRACE_GROUP_FAILURE
, allocator
);
3293 if (asize
== SPA_GANGBLOCKSIZE
) {
3295 * This metaslab group was unable to allocate
3296 * the minimum gang block size so it must be out of
3297 * space. We must notify the allocation throttle
3298 * to start skipping allocation attempts to this
3299 * metaslab group until more space becomes available.
3300 * Note: this failure cannot be caused by the
3301 * allocation throttle since the allocation throttle
3302 * is only responsible for skipping devices and
3303 * not failing block allocations.
3305 mg
->mg_no_free_space
= B_TRUE
;
3308 mg
->mg_allocations
++;
3309 mutex_exit(&mg
->mg_lock
);
3314 * Allocate a block for the specified i/o.
3317 metaslab_alloc_dva(spa_t
*spa
, metaslab_class_t
*mc
, uint64_t psize
,
3318 dva_t
*dva
, int d
, dva_t
*hintdva
, uint64_t txg
, int flags
,
3319 zio_alloc_list_t
*zal
, int allocator
)
3321 metaslab_group_t
*mg
, *fast_mg
, *rotor
;
3323 boolean_t try_hard
= B_FALSE
;
3325 ASSERT(!DVA_IS_VALID(&dva
[d
]));
3328 * For testing, make some blocks above a certain size be gang blocks.
3329 * This will result in more split blocks when using device removal,
3330 * and a large number of split blocks coupled with ztest-induced
3331 * damage can result in extremely long reconstruction times. This
3332 * will also test spilling from special to normal.
3334 if (psize
>= metaslab_force_ganging
&& (spa_get_random(100) < 3)) {
3335 metaslab_trace_add(zal
, NULL
, NULL
, psize
, d
, TRACE_FORCE_GANG
,
3337 return (SET_ERROR(ENOSPC
));
3341 * Start at the rotor and loop through all mgs until we find something.
3342 * Note that there's no locking on mc_rotor or mc_aliquot because
3343 * nothing actually breaks if we miss a few updates -- we just won't
3344 * allocate quite as evenly. It all balances out over time.
3346 * If we are doing ditto or log blocks, try to spread them across
3347 * consecutive vdevs. If we're forced to reuse a vdev before we've
3348 * allocated all of our ditto blocks, then try and spread them out on
3349 * that vdev as much as possible. If it turns out to not be possible,
3350 * gradually lower our standards until anything becomes acceptable.
3351 * Also, allocating on consecutive vdevs (as opposed to random vdevs)
3352 * gives us hope of containing our fault domains to something we're
3353 * able to reason about. Otherwise, any two top-level vdev failures
3354 * will guarantee the loss of data. With consecutive allocation,
3355 * only two adjacent top-level vdev failures will result in data loss.
3357 * If we are doing gang blocks (hintdva is non-NULL), try to keep
3358 * ourselves on the same vdev as our gang block header. That
3359 * way, we can hope for locality in vdev_cache, plus it makes our
3360 * fault domains something tractable.
3363 vd
= vdev_lookup_top(spa
, DVA_GET_VDEV(&hintdva
[d
]));
3366 * It's possible the vdev we're using as the hint no
3367 * longer exists or its mg has been closed (e.g. by
3368 * device removal). Consult the rotor when
3371 if (vd
!= NULL
&& vd
->vdev_mg
!= NULL
) {
3374 if (flags
& METASLAB_HINTBP_AVOID
&&
3375 mg
->mg_next
!= NULL
)
3380 } else if (d
!= 0) {
3381 vd
= vdev_lookup_top(spa
, DVA_GET_VDEV(&dva
[d
- 1]));
3382 mg
= vd
->vdev_mg
->mg_next
;
3383 } else if (flags
& METASLAB_FASTWRITE
) {
3384 mg
= fast_mg
= mc
->mc_rotor
;
3387 if (fast_mg
->mg_vd
->vdev_pending_fastwrite
<
3388 mg
->mg_vd
->vdev_pending_fastwrite
)
3390 } while ((fast_mg
= fast_mg
->mg_next
) != mc
->mc_rotor
);
3393 ASSERT(mc
->mc_rotor
!= NULL
);
3398 * If the hint put us into the wrong metaslab class, or into a
3399 * metaslab group that has been passivated, just follow the rotor.
3401 if (mg
->mg_class
!= mc
|| mg
->mg_activation_count
<= 0)
3407 boolean_t allocatable
;
3409 ASSERT(mg
->mg_activation_count
== 1);
3413 * Don't allocate from faulted devices.
3416 spa_config_enter(spa
, SCL_ZIO
, FTAG
, RW_READER
);
3417 allocatable
= vdev_allocatable(vd
);
3418 spa_config_exit(spa
, SCL_ZIO
, FTAG
);
3420 allocatable
= vdev_allocatable(vd
);
3424 * Determine if the selected metaslab group is eligible
3425 * for allocations. If we're ganging then don't allow
3426 * this metaslab group to skip allocations since that would
3427 * inadvertently return ENOSPC and suspend the pool
3428 * even though space is still available.
3430 if (allocatable
&& !GANG_ALLOCATION(flags
) && !try_hard
) {
3431 allocatable
= metaslab_group_allocatable(mg
, rotor
,
3432 psize
, allocator
, d
);
3436 metaslab_trace_add(zal
, mg
, NULL
, psize
, d
,
3437 TRACE_NOT_ALLOCATABLE
, allocator
);
3441 ASSERT(mg
->mg_initialized
);
3444 * Avoid writing single-copy data to a failing,
3445 * non-redundant vdev, unless we've already tried all
3448 if ((vd
->vdev_stat
.vs_write_errors
> 0 ||
3449 vd
->vdev_state
< VDEV_STATE_HEALTHY
) &&
3450 d
== 0 && !try_hard
&& vd
->vdev_children
== 0) {
3451 metaslab_trace_add(zal
, mg
, NULL
, psize
, d
,
3452 TRACE_VDEV_ERROR
, allocator
);
3456 ASSERT(mg
->mg_class
== mc
);
3458 uint64_t asize
= vdev_psize_to_asize(vd
, psize
);
3459 ASSERT(P2PHASE(asize
, 1ULL << vd
->vdev_ashift
) == 0);
3462 * If we don't need to try hard, then require that the
3463 * block be on an different metaslab from any other DVAs
3464 * in this BP (unique=true). If we are trying hard, then
3465 * allow any metaslab to be used (unique=false).
3467 uint64_t offset
= metaslab_group_alloc(mg
, zal
, asize
, txg
,
3468 !try_hard
, dva
, d
, allocator
);
3470 if (offset
!= -1ULL) {
3472 * If we've just selected this metaslab group,
3473 * figure out whether the corresponding vdev is
3474 * over- or under-used relative to the pool,
3475 * and set an allocation bias to even it out.
3477 * Bias is also used to compensate for unequally
3478 * sized vdevs so that space is allocated fairly.
3480 if (mc
->mc_aliquot
== 0 && metaslab_bias_enabled
) {
3481 vdev_stat_t
*vs
= &vd
->vdev_stat
;
3482 int64_t vs_free
= vs
->vs_space
- vs
->vs_alloc
;
3483 int64_t mc_free
= mc
->mc_space
- mc
->mc_alloc
;
3487 * Calculate how much more or less we should
3488 * try to allocate from this device during
3489 * this iteration around the rotor.
3491 * This basically introduces a zero-centered
3492 * bias towards the devices with the most
3493 * free space, while compensating for vdev
3497 * vdev V1 = 16M/128M
3498 * vdev V2 = 16M/128M
3499 * ratio(V1) = 100% ratio(V2) = 100%
3501 * vdev V1 = 16M/128M
3502 * vdev V2 = 64M/128M
3503 * ratio(V1) = 127% ratio(V2) = 72%
3505 * vdev V1 = 16M/128M
3506 * vdev V2 = 64M/512M
3507 * ratio(V1) = 40% ratio(V2) = 160%
3509 ratio
= (vs_free
* mc
->mc_alloc_groups
* 100) /
3511 mg
->mg_bias
= ((ratio
- 100) *
3512 (int64_t)mg
->mg_aliquot
) / 100;
3513 } else if (!metaslab_bias_enabled
) {
3517 if ((flags
& METASLAB_FASTWRITE
) ||
3518 atomic_add_64_nv(&mc
->mc_aliquot
, asize
) >=
3519 mg
->mg_aliquot
+ mg
->mg_bias
) {
3520 mc
->mc_rotor
= mg
->mg_next
;
3524 DVA_SET_VDEV(&dva
[d
], vd
->vdev_id
);
3525 DVA_SET_OFFSET(&dva
[d
], offset
);
3526 DVA_SET_GANG(&dva
[d
],
3527 ((flags
& METASLAB_GANG_HEADER
) ? 1 : 0));
3528 DVA_SET_ASIZE(&dva
[d
], asize
);
3530 if (flags
& METASLAB_FASTWRITE
) {
3531 atomic_add_64(&vd
->vdev_pending_fastwrite
,
3538 mc
->mc_rotor
= mg
->mg_next
;
3540 } while ((mg
= mg
->mg_next
) != rotor
);
3543 * If we haven't tried hard, do so now.
3550 bzero(&dva
[d
], sizeof (dva_t
));
3552 metaslab_trace_add(zal
, rotor
, NULL
, psize
, d
, TRACE_ENOSPC
, allocator
);
3553 return (SET_ERROR(ENOSPC
));
3557 metaslab_free_concrete(vdev_t
*vd
, uint64_t offset
, uint64_t asize
,
3558 boolean_t checkpoint
)
3561 spa_t
*spa
= vd
->vdev_spa
;
3563 ASSERT(vdev_is_concrete(vd
));
3564 ASSERT3U(spa_config_held(spa
, SCL_ALL
, RW_READER
), !=, 0);
3565 ASSERT3U(offset
>> vd
->vdev_ms_shift
, <, vd
->vdev_ms_count
);
3567 msp
= vd
->vdev_ms
[offset
>> vd
->vdev_ms_shift
];
3569 VERIFY(!msp
->ms_condensing
);
3570 VERIFY3U(offset
, >=, msp
->ms_start
);
3571 VERIFY3U(offset
+ asize
, <=, msp
->ms_start
+ msp
->ms_size
);
3572 VERIFY0(P2PHASE(offset
, 1ULL << vd
->vdev_ashift
));
3573 VERIFY0(P2PHASE(asize
, 1ULL << vd
->vdev_ashift
));
3575 metaslab_check_free_impl(vd
, offset
, asize
);
3577 mutex_enter(&msp
->ms_lock
);
3578 if (range_tree_is_empty(msp
->ms_freeing
) &&
3579 range_tree_is_empty(msp
->ms_checkpointing
)) {
3580 vdev_dirty(vd
, VDD_METASLAB
, msp
, spa_syncing_txg(spa
));
3584 ASSERT(spa_has_checkpoint(spa
));
3585 range_tree_add(msp
->ms_checkpointing
, offset
, asize
);
3587 range_tree_add(msp
->ms_freeing
, offset
, asize
);
3589 mutex_exit(&msp
->ms_lock
);
3594 metaslab_free_impl_cb(uint64_t inner_offset
, vdev_t
*vd
, uint64_t offset
,
3595 uint64_t size
, void *arg
)
3597 boolean_t
*checkpoint
= arg
;
3599 ASSERT3P(checkpoint
, !=, NULL
);
3601 if (vd
->vdev_ops
->vdev_op_remap
!= NULL
)
3602 vdev_indirect_mark_obsolete(vd
, offset
, size
);
3604 metaslab_free_impl(vd
, offset
, size
, *checkpoint
);
3608 metaslab_free_impl(vdev_t
*vd
, uint64_t offset
, uint64_t size
,
3609 boolean_t checkpoint
)
3611 spa_t
*spa
= vd
->vdev_spa
;
3613 ASSERT3U(spa_config_held(spa
, SCL_ALL
, RW_READER
), !=, 0);
3615 if (spa_syncing_txg(spa
) > spa_freeze_txg(spa
))
3618 if (spa
->spa_vdev_removal
!= NULL
&&
3619 spa
->spa_vdev_removal
->svr_vdev_id
== vd
->vdev_id
&&
3620 vdev_is_concrete(vd
)) {
3622 * Note: we check if the vdev is concrete because when
3623 * we complete the removal, we first change the vdev to be
3624 * an indirect vdev (in open context), and then (in syncing
3625 * context) clear spa_vdev_removal.
3627 free_from_removing_vdev(vd
, offset
, size
);
3628 } else if (vd
->vdev_ops
->vdev_op_remap
!= NULL
) {
3629 vdev_indirect_mark_obsolete(vd
, offset
, size
);
3630 vd
->vdev_ops
->vdev_op_remap(vd
, offset
, size
,
3631 metaslab_free_impl_cb
, &checkpoint
);
3633 metaslab_free_concrete(vd
, offset
, size
, checkpoint
);
3637 typedef struct remap_blkptr_cb_arg
{
3639 spa_remap_cb_t rbca_cb
;
3640 vdev_t
*rbca_remap_vd
;
3641 uint64_t rbca_remap_offset
;
3643 } remap_blkptr_cb_arg_t
;
3646 remap_blkptr_cb(uint64_t inner_offset
, vdev_t
*vd
, uint64_t offset
,
3647 uint64_t size
, void *arg
)
3649 remap_blkptr_cb_arg_t
*rbca
= arg
;
3650 blkptr_t
*bp
= rbca
->rbca_bp
;
3652 /* We can not remap split blocks. */
3653 if (size
!= DVA_GET_ASIZE(&bp
->blk_dva
[0]))
3655 ASSERT0(inner_offset
);
3657 if (rbca
->rbca_cb
!= NULL
) {
3659 * At this point we know that we are not handling split
3660 * blocks and we invoke the callback on the previous
3661 * vdev which must be indirect.
3663 ASSERT3P(rbca
->rbca_remap_vd
->vdev_ops
, ==, &vdev_indirect_ops
);
3665 rbca
->rbca_cb(rbca
->rbca_remap_vd
->vdev_id
,
3666 rbca
->rbca_remap_offset
, size
, rbca
->rbca_cb_arg
);
3668 /* set up remap_blkptr_cb_arg for the next call */
3669 rbca
->rbca_remap_vd
= vd
;
3670 rbca
->rbca_remap_offset
= offset
;
3674 * The phys birth time is that of dva[0]. This ensures that we know
3675 * when each dva was written, so that resilver can determine which
3676 * blocks need to be scrubbed (i.e. those written during the time
3677 * the vdev was offline). It also ensures that the key used in
3678 * the ARC hash table is unique (i.e. dva[0] + phys_birth). If
3679 * we didn't change the phys_birth, a lookup in the ARC for a
3680 * remapped BP could find the data that was previously stored at
3681 * this vdev + offset.
3683 vdev_t
*oldvd
= vdev_lookup_top(vd
->vdev_spa
,
3684 DVA_GET_VDEV(&bp
->blk_dva
[0]));
3685 vdev_indirect_births_t
*vib
= oldvd
->vdev_indirect_births
;
3686 bp
->blk_phys_birth
= vdev_indirect_births_physbirth(vib
,
3687 DVA_GET_OFFSET(&bp
->blk_dva
[0]), DVA_GET_ASIZE(&bp
->blk_dva
[0]));
3689 DVA_SET_VDEV(&bp
->blk_dva
[0], vd
->vdev_id
);
3690 DVA_SET_OFFSET(&bp
->blk_dva
[0], offset
);
3694 * If the block pointer contains any indirect DVAs, modify them to refer to
3695 * concrete DVAs. Note that this will sometimes not be possible, leaving
3696 * the indirect DVA in place. This happens if the indirect DVA spans multiple
3697 * segments in the mapping (i.e. it is a "split block").
3699 * If the BP was remapped, calls the callback on the original dva (note the
3700 * callback can be called multiple times if the original indirect DVA refers
3701 * to another indirect DVA, etc).
3703 * Returns TRUE if the BP was remapped.
3706 spa_remap_blkptr(spa_t
*spa
, blkptr_t
*bp
, spa_remap_cb_t callback
, void *arg
)
3708 remap_blkptr_cb_arg_t rbca
;
3710 if (!zfs_remap_blkptr_enable
)
3713 if (!spa_feature_is_enabled(spa
, SPA_FEATURE_OBSOLETE_COUNTS
))
3717 * Dedup BP's can not be remapped, because ddt_phys_select() depends
3718 * on DVA[0] being the same in the BP as in the DDT (dedup table).
3720 if (BP_GET_DEDUP(bp
))
3724 * Gang blocks can not be remapped, because
3725 * zio_checksum_gang_verifier() depends on the DVA[0] that's in
3726 * the BP used to read the gang block header (GBH) being the same
3727 * as the DVA[0] that we allocated for the GBH.
3733 * Embedded BP's have no DVA to remap.
3735 if (BP_GET_NDVAS(bp
) < 1)
3739 * Note: we only remap dva[0]. If we remapped other dvas, we
3740 * would no longer know what their phys birth txg is.
3742 dva_t
*dva
= &bp
->blk_dva
[0];
3744 uint64_t offset
= DVA_GET_OFFSET(dva
);
3745 uint64_t size
= DVA_GET_ASIZE(dva
);
3746 vdev_t
*vd
= vdev_lookup_top(spa
, DVA_GET_VDEV(dva
));
3748 if (vd
->vdev_ops
->vdev_op_remap
== NULL
)
3752 rbca
.rbca_cb
= callback
;
3753 rbca
.rbca_remap_vd
= vd
;
3754 rbca
.rbca_remap_offset
= offset
;
3755 rbca
.rbca_cb_arg
= arg
;
3758 * remap_blkptr_cb() will be called in order for each level of
3759 * indirection, until a concrete vdev is reached or a split block is
3760 * encountered. old_vd and old_offset are updated within the callback
3761 * as we go from the one indirect vdev to the next one (either concrete
3762 * or indirect again) in that order.
3764 vd
->vdev_ops
->vdev_op_remap(vd
, offset
, size
, remap_blkptr_cb
, &rbca
);
3766 /* Check if the DVA wasn't remapped because it is a split block */
3767 if (DVA_GET_VDEV(&rbca
.rbca_bp
->blk_dva
[0]) == vd
->vdev_id
)
3774 * Undo the allocation of a DVA which happened in the given transaction group.
3777 metaslab_unalloc_dva(spa_t
*spa
, const dva_t
*dva
, uint64_t txg
)
3781 uint64_t vdev
= DVA_GET_VDEV(dva
);
3782 uint64_t offset
= DVA_GET_OFFSET(dva
);
3783 uint64_t size
= DVA_GET_ASIZE(dva
);
3785 ASSERT(DVA_IS_VALID(dva
));
3786 ASSERT3U(spa_config_held(spa
, SCL_ALL
, RW_READER
), !=, 0);
3788 if (txg
> spa_freeze_txg(spa
))
3791 if ((vd
= vdev_lookup_top(spa
, vdev
)) == NULL
|| !DVA_IS_VALID(dva
) ||
3792 (offset
>> vd
->vdev_ms_shift
) >= vd
->vdev_ms_count
) {
3793 zfs_panic_recover("metaslab_free_dva(): bad DVA %llu:%llu:%llu",
3794 (u_longlong_t
)vdev
, (u_longlong_t
)offset
,
3795 (u_longlong_t
)size
);
3799 ASSERT(!vd
->vdev_removing
);
3800 ASSERT(vdev_is_concrete(vd
));
3801 ASSERT0(vd
->vdev_indirect_config
.vic_mapping_object
);
3802 ASSERT3P(vd
->vdev_indirect_mapping
, ==, NULL
);
3804 if (DVA_GET_GANG(dva
))
3805 size
= vdev_psize_to_asize(vd
, SPA_GANGBLOCKSIZE
);
3807 msp
= vd
->vdev_ms
[offset
>> vd
->vdev_ms_shift
];
3809 mutex_enter(&msp
->ms_lock
);
3810 range_tree_remove(msp
->ms_allocating
[txg
& TXG_MASK
],
3813 VERIFY(!msp
->ms_condensing
);
3814 VERIFY3U(offset
, >=, msp
->ms_start
);
3815 VERIFY3U(offset
+ size
, <=, msp
->ms_start
+ msp
->ms_size
);
3816 VERIFY3U(range_tree_space(msp
->ms_allocatable
) + size
, <=,
3818 VERIFY0(P2PHASE(offset
, 1ULL << vd
->vdev_ashift
));
3819 VERIFY0(P2PHASE(size
, 1ULL << vd
->vdev_ashift
));
3820 range_tree_add(msp
->ms_allocatable
, offset
, size
);
3821 mutex_exit(&msp
->ms_lock
);
3825 * Free the block represented by the given DVA.
3828 metaslab_free_dva(spa_t
*spa
, const dva_t
*dva
, boolean_t checkpoint
)
3830 uint64_t vdev
= DVA_GET_VDEV(dva
);
3831 uint64_t offset
= DVA_GET_OFFSET(dva
);
3832 uint64_t size
= DVA_GET_ASIZE(dva
);
3833 vdev_t
*vd
= vdev_lookup_top(spa
, vdev
);
3835 ASSERT(DVA_IS_VALID(dva
));
3836 ASSERT3U(spa_config_held(spa
, SCL_ALL
, RW_READER
), !=, 0);
3838 if (DVA_GET_GANG(dva
)) {
3839 size
= vdev_psize_to_asize(vd
, SPA_GANGBLOCKSIZE
);
3842 metaslab_free_impl(vd
, offset
, size
, checkpoint
);
3846 * Reserve some allocation slots. The reservation system must be called
3847 * before we call into the allocator. If there aren't any available slots
3848 * then the I/O will be throttled until an I/O completes and its slots are
3849 * freed up. The function returns true if it was successful in placing
3853 metaslab_class_throttle_reserve(metaslab_class_t
*mc
, int slots
, int allocator
,
3854 zio_t
*zio
, int flags
)
3856 uint64_t available_slots
= 0;
3857 boolean_t slot_reserved
= B_FALSE
;
3858 uint64_t max
= mc
->mc_alloc_max_slots
[allocator
];
3860 ASSERT(mc
->mc_alloc_throttle_enabled
);
3861 mutex_enter(&mc
->mc_lock
);
3863 uint64_t reserved_slots
=
3864 zfs_refcount_count(&mc
->mc_alloc_slots
[allocator
]);
3865 if (reserved_slots
< max
)
3866 available_slots
= max
- reserved_slots
;
3868 if (slots
<= available_slots
|| GANG_ALLOCATION(flags
) ||
3869 flags
& METASLAB_MUST_RESERVE
) {
3871 * We reserve the slots individually so that we can unreserve
3872 * them individually when an I/O completes.
3874 for (int d
= 0; d
< slots
; d
++) {
3876 zfs_refcount_add(&mc
->mc_alloc_slots
[allocator
],
3879 zio
->io_flags
|= ZIO_FLAG_IO_ALLOCATING
;
3880 slot_reserved
= B_TRUE
;
3883 mutex_exit(&mc
->mc_lock
);
3884 return (slot_reserved
);
3888 metaslab_class_throttle_unreserve(metaslab_class_t
*mc
, int slots
,
3889 int allocator
, zio_t
*zio
)
3891 ASSERT(mc
->mc_alloc_throttle_enabled
);
3892 mutex_enter(&mc
->mc_lock
);
3893 for (int d
= 0; d
< slots
; d
++) {
3894 (void) zfs_refcount_remove(&mc
->mc_alloc_slots
[allocator
],
3897 mutex_exit(&mc
->mc_lock
);
3901 metaslab_claim_concrete(vdev_t
*vd
, uint64_t offset
, uint64_t size
,
3905 spa_t
*spa
= vd
->vdev_spa
;
3908 if (offset
>> vd
->vdev_ms_shift
>= vd
->vdev_ms_count
)
3909 return (SET_ERROR(ENXIO
));
3911 ASSERT3P(vd
->vdev_ms
, !=, NULL
);
3912 msp
= vd
->vdev_ms
[offset
>> vd
->vdev_ms_shift
];
3914 mutex_enter(&msp
->ms_lock
);
3916 if ((txg
!= 0 && spa_writeable(spa
)) || !msp
->ms_loaded
) {
3917 error
= metaslab_activate(msp
, 0, METASLAB_WEIGHT_CLAIM
);
3918 if (error
== EBUSY
) {
3919 ASSERT(msp
->ms_loaded
);
3920 ASSERT(msp
->ms_weight
& METASLAB_ACTIVE_MASK
);
3926 !range_tree_contains(msp
->ms_allocatable
, offset
, size
))
3927 error
= SET_ERROR(ENOENT
);
3929 if (error
|| txg
== 0) { /* txg == 0 indicates dry run */
3930 mutex_exit(&msp
->ms_lock
);
3934 VERIFY(!msp
->ms_condensing
);
3935 VERIFY0(P2PHASE(offset
, 1ULL << vd
->vdev_ashift
));
3936 VERIFY0(P2PHASE(size
, 1ULL << vd
->vdev_ashift
));
3937 VERIFY3U(range_tree_space(msp
->ms_allocatable
) - size
, <=,
3939 range_tree_remove(msp
->ms_allocatable
, offset
, size
);
3941 if (spa_writeable(spa
)) { /* don't dirty if we're zdb(1M) */
3942 if (range_tree_is_empty(msp
->ms_allocating
[txg
& TXG_MASK
]))
3943 vdev_dirty(vd
, VDD_METASLAB
, msp
, txg
);
3944 range_tree_add(msp
->ms_allocating
[txg
& TXG_MASK
],
3948 mutex_exit(&msp
->ms_lock
);
3953 typedef struct metaslab_claim_cb_arg_t
{
3956 } metaslab_claim_cb_arg_t
;
3960 metaslab_claim_impl_cb(uint64_t inner_offset
, vdev_t
*vd
, uint64_t offset
,
3961 uint64_t size
, void *arg
)
3963 metaslab_claim_cb_arg_t
*mcca_arg
= arg
;
3965 if (mcca_arg
->mcca_error
== 0) {
3966 mcca_arg
->mcca_error
= metaslab_claim_concrete(vd
, offset
,
3967 size
, mcca_arg
->mcca_txg
);
3972 metaslab_claim_impl(vdev_t
*vd
, uint64_t offset
, uint64_t size
, uint64_t txg
)
3974 if (vd
->vdev_ops
->vdev_op_remap
!= NULL
) {
3975 metaslab_claim_cb_arg_t arg
;
3978 * Only zdb(1M) can claim on indirect vdevs. This is used
3979 * to detect leaks of mapped space (that are not accounted
3980 * for in the obsolete counts, spacemap, or bpobj).
3982 ASSERT(!spa_writeable(vd
->vdev_spa
));
3986 vd
->vdev_ops
->vdev_op_remap(vd
, offset
, size
,
3987 metaslab_claim_impl_cb
, &arg
);
3989 if (arg
.mcca_error
== 0) {
3990 arg
.mcca_error
= metaslab_claim_concrete(vd
,
3993 return (arg
.mcca_error
);
3995 return (metaslab_claim_concrete(vd
, offset
, size
, txg
));
4000 * Intent log support: upon opening the pool after a crash, notify the SPA
4001 * of blocks that the intent log has allocated for immediate write, but
4002 * which are still considered free by the SPA because the last transaction
4003 * group didn't commit yet.
4006 metaslab_claim_dva(spa_t
*spa
, const dva_t
*dva
, uint64_t txg
)
4008 uint64_t vdev
= DVA_GET_VDEV(dva
);
4009 uint64_t offset
= DVA_GET_OFFSET(dva
);
4010 uint64_t size
= DVA_GET_ASIZE(dva
);
4013 if ((vd
= vdev_lookup_top(spa
, vdev
)) == NULL
) {
4014 return (SET_ERROR(ENXIO
));
4017 ASSERT(DVA_IS_VALID(dva
));
4019 if (DVA_GET_GANG(dva
))
4020 size
= vdev_psize_to_asize(vd
, SPA_GANGBLOCKSIZE
);
4022 return (metaslab_claim_impl(vd
, offset
, size
, txg
));
4026 metaslab_alloc(spa_t
*spa
, metaslab_class_t
*mc
, uint64_t psize
, blkptr_t
*bp
,
4027 int ndvas
, uint64_t txg
, blkptr_t
*hintbp
, int flags
,
4028 zio_alloc_list_t
*zal
, zio_t
*zio
, int allocator
)
4030 dva_t
*dva
= bp
->blk_dva
;
4031 dva_t
*hintdva
= hintbp
->blk_dva
;
4034 ASSERT(bp
->blk_birth
== 0);
4035 ASSERT(BP_PHYSICAL_BIRTH(bp
) == 0);
4037 spa_config_enter(spa
, SCL_ALLOC
, FTAG
, RW_READER
);
4039 if (mc
->mc_rotor
== NULL
) { /* no vdevs in this class */
4040 spa_config_exit(spa
, SCL_ALLOC
, FTAG
);
4041 return (SET_ERROR(ENOSPC
));
4044 ASSERT(ndvas
> 0 && ndvas
<= spa_max_replication(spa
));
4045 ASSERT(BP_GET_NDVAS(bp
) == 0);
4046 ASSERT(hintbp
== NULL
|| ndvas
<= BP_GET_NDVAS(hintbp
));
4047 ASSERT3P(zal
, !=, NULL
);
4049 for (int d
= 0; d
< ndvas
; d
++) {
4050 error
= metaslab_alloc_dva(spa
, mc
, psize
, dva
, d
, hintdva
,
4051 txg
, flags
, zal
, allocator
);
4053 for (d
--; d
>= 0; d
--) {
4054 metaslab_unalloc_dva(spa
, &dva
[d
], txg
);
4055 metaslab_group_alloc_decrement(spa
,
4056 DVA_GET_VDEV(&dva
[d
]), zio
, flags
,
4057 allocator
, B_FALSE
);
4058 bzero(&dva
[d
], sizeof (dva_t
));
4060 spa_config_exit(spa
, SCL_ALLOC
, FTAG
);
4064 * Update the metaslab group's queue depth
4065 * based on the newly allocated dva.
4067 metaslab_group_alloc_increment(spa
,
4068 DVA_GET_VDEV(&dva
[d
]), zio
, flags
, allocator
);
4073 ASSERT(BP_GET_NDVAS(bp
) == ndvas
);
4075 spa_config_exit(spa
, SCL_ALLOC
, FTAG
);
4077 BP_SET_BIRTH(bp
, txg
, 0);
4083 metaslab_free(spa_t
*spa
, const blkptr_t
*bp
, uint64_t txg
, boolean_t now
)
4085 const dva_t
*dva
= bp
->blk_dva
;
4086 int ndvas
= BP_GET_NDVAS(bp
);
4088 ASSERT(!BP_IS_HOLE(bp
));
4089 ASSERT(!now
|| bp
->blk_birth
>= spa_syncing_txg(spa
));
4092 * If we have a checkpoint for the pool we need to make sure that
4093 * the blocks that we free that are part of the checkpoint won't be
4094 * reused until the checkpoint is discarded or we revert to it.
4096 * The checkpoint flag is passed down the metaslab_free code path
4097 * and is set whenever we want to add a block to the checkpoint's
4098 * accounting. That is, we "checkpoint" blocks that existed at the
4099 * time the checkpoint was created and are therefore referenced by
4100 * the checkpointed uberblock.
4102 * Note that, we don't checkpoint any blocks if the current
4103 * syncing txg <= spa_checkpoint_txg. We want these frees to sync
4104 * normally as they will be referenced by the checkpointed uberblock.
4106 boolean_t checkpoint
= B_FALSE
;
4107 if (bp
->blk_birth
<= spa
->spa_checkpoint_txg
&&
4108 spa_syncing_txg(spa
) > spa
->spa_checkpoint_txg
) {
4110 * At this point, if the block is part of the checkpoint
4111 * there is no way it was created in the current txg.
4114 ASSERT3U(spa_syncing_txg(spa
), ==, txg
);
4115 checkpoint
= B_TRUE
;
4118 spa_config_enter(spa
, SCL_FREE
, FTAG
, RW_READER
);
4120 for (int d
= 0; d
< ndvas
; d
++) {
4122 metaslab_unalloc_dva(spa
, &dva
[d
], txg
);
4124 ASSERT3U(txg
, ==, spa_syncing_txg(spa
));
4125 metaslab_free_dva(spa
, &dva
[d
], checkpoint
);
4129 spa_config_exit(spa
, SCL_FREE
, FTAG
);
4133 metaslab_claim(spa_t
*spa
, const blkptr_t
*bp
, uint64_t txg
)
4135 const dva_t
*dva
= bp
->blk_dva
;
4136 int ndvas
= BP_GET_NDVAS(bp
);
4139 ASSERT(!BP_IS_HOLE(bp
));
4143 * First do a dry run to make sure all DVAs are claimable,
4144 * so we don't have to unwind from partial failures below.
4146 if ((error
= metaslab_claim(spa
, bp
, 0)) != 0)
4150 spa_config_enter(spa
, SCL_ALLOC
, FTAG
, RW_READER
);
4152 for (int d
= 0; d
< ndvas
; d
++) {
4153 error
= metaslab_claim_dva(spa
, &dva
[d
], txg
);
4158 spa_config_exit(spa
, SCL_ALLOC
, FTAG
);
4160 ASSERT(error
== 0 || txg
== 0);
4166 metaslab_fastwrite_mark(spa_t
*spa
, const blkptr_t
*bp
)
4168 const dva_t
*dva
= bp
->blk_dva
;
4169 int ndvas
= BP_GET_NDVAS(bp
);
4170 uint64_t psize
= BP_GET_PSIZE(bp
);
4174 ASSERT(!BP_IS_HOLE(bp
));
4175 ASSERT(!BP_IS_EMBEDDED(bp
));
4178 spa_config_enter(spa
, SCL_VDEV
, FTAG
, RW_READER
);
4180 for (d
= 0; d
< ndvas
; d
++) {
4181 if ((vd
= vdev_lookup_top(spa
, DVA_GET_VDEV(&dva
[d
]))) == NULL
)
4183 atomic_add_64(&vd
->vdev_pending_fastwrite
, psize
);
4186 spa_config_exit(spa
, SCL_VDEV
, FTAG
);
4190 metaslab_fastwrite_unmark(spa_t
*spa
, const blkptr_t
*bp
)
4192 const dva_t
*dva
= bp
->blk_dva
;
4193 int ndvas
= BP_GET_NDVAS(bp
);
4194 uint64_t psize
= BP_GET_PSIZE(bp
);
4198 ASSERT(!BP_IS_HOLE(bp
));
4199 ASSERT(!BP_IS_EMBEDDED(bp
));
4202 spa_config_enter(spa
, SCL_VDEV
, FTAG
, RW_READER
);
4204 for (d
= 0; d
< ndvas
; d
++) {
4205 if ((vd
= vdev_lookup_top(spa
, DVA_GET_VDEV(&dva
[d
]))) == NULL
)
4207 ASSERT3U(vd
->vdev_pending_fastwrite
, >=, psize
);
4208 atomic_sub_64(&vd
->vdev_pending_fastwrite
, psize
);
4211 spa_config_exit(spa
, SCL_VDEV
, FTAG
);
4216 metaslab_check_free_impl_cb(uint64_t inner
, vdev_t
*vd
, uint64_t offset
,
4217 uint64_t size
, void *arg
)
4219 if (vd
->vdev_ops
== &vdev_indirect_ops
)
4222 metaslab_check_free_impl(vd
, offset
, size
);
4226 metaslab_check_free_impl(vdev_t
*vd
, uint64_t offset
, uint64_t size
)
4229 ASSERTV(spa_t
*spa
= vd
->vdev_spa
);
4231 if ((zfs_flags
& ZFS_DEBUG_ZIO_FREE
) == 0)
4234 if (vd
->vdev_ops
->vdev_op_remap
!= NULL
) {
4235 vd
->vdev_ops
->vdev_op_remap(vd
, offset
, size
,
4236 metaslab_check_free_impl_cb
, NULL
);
4240 ASSERT(vdev_is_concrete(vd
));
4241 ASSERT3U(offset
>> vd
->vdev_ms_shift
, <, vd
->vdev_ms_count
);
4242 ASSERT3U(spa_config_held(spa
, SCL_ALL
, RW_READER
), !=, 0);
4244 msp
= vd
->vdev_ms
[offset
>> vd
->vdev_ms_shift
];
4246 mutex_enter(&msp
->ms_lock
);
4248 range_tree_verify(msp
->ms_allocatable
, offset
, size
);
4250 range_tree_verify(msp
->ms_freeing
, offset
, size
);
4251 range_tree_verify(msp
->ms_checkpointing
, offset
, size
);
4252 range_tree_verify(msp
->ms_freed
, offset
, size
);
4253 for (int j
= 0; j
< TXG_DEFER_SIZE
; j
++)
4254 range_tree_verify(msp
->ms_defer
[j
], offset
, size
);
4255 mutex_exit(&msp
->ms_lock
);
4259 metaslab_check_free(spa_t
*spa
, const blkptr_t
*bp
)
4261 if ((zfs_flags
& ZFS_DEBUG_ZIO_FREE
) == 0)
4264 spa_config_enter(spa
, SCL_VDEV
, FTAG
, RW_READER
);
4265 for (int i
= 0; i
< BP_GET_NDVAS(bp
); i
++) {
4266 uint64_t vdev
= DVA_GET_VDEV(&bp
->blk_dva
[i
]);
4267 vdev_t
*vd
= vdev_lookup_top(spa
, vdev
);
4268 uint64_t offset
= DVA_GET_OFFSET(&bp
->blk_dva
[i
]);
4269 uint64_t size
= DVA_GET_ASIZE(&bp
->blk_dva
[i
]);
4271 if (DVA_GET_GANG(&bp
->blk_dva
[i
]))
4272 size
= vdev_psize_to_asize(vd
, SPA_GANGBLOCKSIZE
);
4274 ASSERT3P(vd
, !=, NULL
);
4276 metaslab_check_free_impl(vd
, offset
, size
);
4278 spa_config_exit(spa
, SCL_VDEV
, FTAG
);
4281 #if defined(_KERNEL)
4283 module_param(metaslab_aliquot
, ulong
, 0644);
4284 MODULE_PARM_DESC(metaslab_aliquot
,
4285 "allocation granularity (a.k.a. stripe size)");
4287 module_param(metaslab_debug_load
, int, 0644);
4288 MODULE_PARM_DESC(metaslab_debug_load
,
4289 "load all metaslabs when pool is first opened");
4291 module_param(metaslab_debug_unload
, int, 0644);
4292 MODULE_PARM_DESC(metaslab_debug_unload
,
4293 "prevent metaslabs from being unloaded");
4295 module_param(metaslab_preload_enabled
, int, 0644);
4296 MODULE_PARM_DESC(metaslab_preload_enabled
,
4297 "preload potential metaslabs during reassessment");
4299 module_param(zfs_mg_noalloc_threshold
, int, 0644);
4300 MODULE_PARM_DESC(zfs_mg_noalloc_threshold
,
4301 "percentage of free space for metaslab group to allow allocation");
4303 module_param(zfs_mg_fragmentation_threshold
, int, 0644);
4304 MODULE_PARM_DESC(zfs_mg_fragmentation_threshold
,
4305 "fragmentation for metaslab group to allow allocation");
4307 module_param(zfs_metaslab_fragmentation_threshold
, int, 0644);
4308 MODULE_PARM_DESC(zfs_metaslab_fragmentation_threshold
,
4309 "fragmentation for metaslab to allow allocation");
4311 module_param(metaslab_fragmentation_factor_enabled
, int, 0644);
4312 MODULE_PARM_DESC(metaslab_fragmentation_factor_enabled
,
4313 "use the fragmentation metric to prefer less fragmented metaslabs");
4315 module_param(metaslab_lba_weighting_enabled
, int, 0644);
4316 MODULE_PARM_DESC(metaslab_lba_weighting_enabled
,
4317 "prefer metaslabs with lower LBAs");
4319 module_param(metaslab_bias_enabled
, int, 0644);
4320 MODULE_PARM_DESC(metaslab_bias_enabled
,
4321 "enable metaslab group biasing");
4323 module_param(zfs_metaslab_segment_weight_enabled
, int, 0644);
4324 MODULE_PARM_DESC(zfs_metaslab_segment_weight_enabled
,
4325 "enable segment-based metaslab selection");
4327 module_param(zfs_metaslab_switch_threshold
, int, 0644);
4328 MODULE_PARM_DESC(zfs_metaslab_switch_threshold
,
4329 "segment-based metaslab selection maximum buckets before switching");
4331 module_param(metaslab_force_ganging
, ulong
, 0644);
4332 MODULE_PARM_DESC(metaslab_force_ganging
,
4333 "blocks larger than this size are forced to be gang blocks");