4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2011, 2018 by Delphix. All rights reserved.
24 * Copyright (c) 2013 by Saso Kiselkov. All rights reserved.
25 * Copyright (c) 2017, Intel Corporation.
28 #include <sys/zfs_context.h>
30 #include <sys/dmu_tx.h>
31 #include <sys/space_map.h>
32 #include <sys/metaslab_impl.h>
33 #include <sys/vdev_impl.h>
35 #include <sys/spa_impl.h>
36 #include <sys/zfeature.h>
37 #include <sys/vdev_indirect_mapping.h>
40 #define WITH_DF_BLOCK_ALLOCATOR
42 #define GANG_ALLOCATION(flags) \
43 ((flags) & (METASLAB_GANG_CHILD | METASLAB_GANG_HEADER))
46 * Metaslab granularity, in bytes. This is roughly similar to what would be
47 * referred to as the "stripe size" in traditional RAID arrays. In normal
48 * operation, we will try to write this amount of data to a top-level vdev
49 * before moving on to the next one.
51 unsigned long metaslab_aliquot
= 512 << 10;
54 * For testing, make some blocks above a certain size be gang blocks.
56 unsigned long metaslab_force_ganging
= SPA_MAXBLOCKSIZE
+ 1;
59 * Since we can touch multiple metaslabs (and their respective space maps)
60 * with each transaction group, we benefit from having a smaller space map
61 * block size since it allows us to issue more I/O operations scattered
64 int zfs_metaslab_sm_blksz
= (1 << 12);
67 * The in-core space map representation is more compact than its on-disk form.
68 * The zfs_condense_pct determines how much more compact the in-core
69 * space map representation must be before we compact it on-disk.
70 * Values should be greater than or equal to 100.
72 int zfs_condense_pct
= 200;
75 * Condensing a metaslab is not guaranteed to actually reduce the amount of
76 * space used on disk. In particular, a space map uses data in increments of
77 * MAX(1 << ashift, space_map_blksz), so a metaslab might use the
78 * same number of blocks after condensing. Since the goal of condensing is to
79 * reduce the number of IOPs required to read the space map, we only want to
80 * condense when we can be sure we will reduce the number of blocks used by the
81 * space map. Unfortunately, we cannot precisely compute whether or not this is
82 * the case in metaslab_should_condense since we are holding ms_lock. Instead,
83 * we apply the following heuristic: do not condense a spacemap unless the
84 * uncondensed size consumes greater than zfs_metaslab_condense_block_threshold
87 int zfs_metaslab_condense_block_threshold
= 4;
90 * The zfs_mg_noalloc_threshold defines which metaslab groups should
91 * be eligible for allocation. The value is defined as a percentage of
92 * free space. Metaslab groups that have more free space than
93 * zfs_mg_noalloc_threshold are always eligible for allocations. Once
94 * a metaslab group's free space is less than or equal to the
95 * zfs_mg_noalloc_threshold the allocator will avoid allocating to that
96 * group unless all groups in the pool have reached zfs_mg_noalloc_threshold.
97 * Once all groups in the pool reach zfs_mg_noalloc_threshold then all
98 * groups are allowed to accept allocations. Gang blocks are always
99 * eligible to allocate on any metaslab group. The default value of 0 means
100 * no metaslab group will be excluded based on this criterion.
102 int zfs_mg_noalloc_threshold
= 0;
105 * Metaslab groups are considered eligible for allocations if their
106 * fragmenation metric (measured as a percentage) is less than or equal to
107 * zfs_mg_fragmentation_threshold. If a metaslab group exceeds this threshold
108 * then it will be skipped unless all metaslab groups within the metaslab
109 * class have also crossed this threshold.
111 int zfs_mg_fragmentation_threshold
= 85;
114 * Allow metaslabs to keep their active state as long as their fragmentation
115 * percentage is less than or equal to zfs_metaslab_fragmentation_threshold. An
116 * active metaslab that exceeds this threshold will no longer keep its active
117 * status allowing better metaslabs to be selected.
119 int zfs_metaslab_fragmentation_threshold
= 70;
122 * When set will load all metaslabs when pool is first opened.
124 int metaslab_debug_load
= 0;
127 * When set will prevent metaslabs from being unloaded.
129 int metaslab_debug_unload
= 0;
132 * Minimum size which forces the dynamic allocator to change
133 * it's allocation strategy. Once the space map cannot satisfy
134 * an allocation of this size then it switches to using more
135 * aggressive strategy (i.e search by size rather than offset).
137 uint64_t metaslab_df_alloc_threshold
= SPA_OLD_MAXBLOCKSIZE
;
140 * The minimum free space, in percent, which must be available
141 * in a space map to continue allocations in a first-fit fashion.
142 * Once the space map's free space drops below this level we dynamically
143 * switch to using best-fit allocations.
145 int metaslab_df_free_pct
= 4;
148 * Percentage of all cpus that can be used by the metaslab taskq.
150 int metaslab_load_pct
= 50;
153 * Determines how many txgs a metaslab may remain loaded without having any
154 * allocations from it. As long as a metaslab continues to be used we will
157 int metaslab_unload_delay
= TXG_SIZE
* 2;
160 * Max number of metaslabs per group to preload.
162 int metaslab_preload_limit
= SPA_DVAS_PER_BP
;
165 * Enable/disable preloading of metaslab.
167 int metaslab_preload_enabled
= B_TRUE
;
170 * Enable/disable fragmentation weighting on metaslabs.
172 int metaslab_fragmentation_factor_enabled
= B_TRUE
;
175 * Enable/disable lba weighting (i.e. outer tracks are given preference).
177 int metaslab_lba_weighting_enabled
= B_TRUE
;
180 * Enable/disable metaslab group biasing.
182 int metaslab_bias_enabled
= B_TRUE
;
185 * Enable/disable remapping of indirect DVAs to their concrete vdevs.
187 boolean_t zfs_remap_blkptr_enable
= B_TRUE
;
190 * Enable/disable segment-based metaslab selection.
192 int zfs_metaslab_segment_weight_enabled
= B_TRUE
;
195 * When using segment-based metaslab selection, we will continue
196 * allocating from the active metaslab until we have exhausted
197 * zfs_metaslab_switch_threshold of its buckets.
199 int zfs_metaslab_switch_threshold
= 2;
202 * Internal switch to enable/disable the metaslab allocation tracing
205 #ifdef _METASLAB_TRACING
206 boolean_t metaslab_trace_enabled
= B_TRUE
;
210 * Maximum entries that the metaslab allocation tracing facility will keep
211 * in a given list when running in non-debug mode. We limit the number
212 * of entries in non-debug mode to prevent us from using up too much memory.
213 * The limit should be sufficiently large that we don't expect any allocation
214 * to every exceed this value. In debug mode, the system will panic if this
215 * limit is ever reached allowing for further investigation.
217 #ifdef _METASLAB_TRACING
218 uint64_t metaslab_trace_max_entries
= 5000;
222 * Maximum number of metaslabs per group that can be disabled
225 int max_disabled_ms
= 3;
227 static uint64_t metaslab_weight(metaslab_t
*);
228 static void metaslab_set_fragmentation(metaslab_t
*);
229 static void metaslab_free_impl(vdev_t
*, uint64_t, uint64_t, boolean_t
);
230 static void metaslab_check_free_impl(vdev_t
*, uint64_t, uint64_t);
232 static void metaslab_passivate(metaslab_t
*msp
, uint64_t weight
);
233 static uint64_t metaslab_weight_from_range_tree(metaslab_t
*msp
);
234 #ifdef _METASLAB_TRACING
235 kmem_cache_t
*metaslab_alloc_trace_cache
;
239 * ==========================================================================
241 * ==========================================================================
244 metaslab_class_create(spa_t
*spa
, metaslab_ops_t
*ops
)
246 metaslab_class_t
*mc
;
248 mc
= kmem_zalloc(sizeof (metaslab_class_t
), KM_SLEEP
);
253 mutex_init(&mc
->mc_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
254 mc
->mc_alloc_slots
= kmem_zalloc(spa
->spa_alloc_count
*
255 sizeof (zfs_refcount_t
), KM_SLEEP
);
256 mc
->mc_alloc_max_slots
= kmem_zalloc(spa
->spa_alloc_count
*
257 sizeof (uint64_t), KM_SLEEP
);
258 for (int i
= 0; i
< spa
->spa_alloc_count
; i
++)
259 zfs_refcount_create_tracked(&mc
->mc_alloc_slots
[i
]);
265 metaslab_class_destroy(metaslab_class_t
*mc
)
267 ASSERT(mc
->mc_rotor
== NULL
);
268 ASSERT(mc
->mc_alloc
== 0);
269 ASSERT(mc
->mc_deferred
== 0);
270 ASSERT(mc
->mc_space
== 0);
271 ASSERT(mc
->mc_dspace
== 0);
273 for (int i
= 0; i
< mc
->mc_spa
->spa_alloc_count
; i
++)
274 zfs_refcount_destroy(&mc
->mc_alloc_slots
[i
]);
275 kmem_free(mc
->mc_alloc_slots
, mc
->mc_spa
->spa_alloc_count
*
276 sizeof (zfs_refcount_t
));
277 kmem_free(mc
->mc_alloc_max_slots
, mc
->mc_spa
->spa_alloc_count
*
279 mutex_destroy(&mc
->mc_lock
);
280 kmem_free(mc
, sizeof (metaslab_class_t
));
284 metaslab_class_validate(metaslab_class_t
*mc
)
286 metaslab_group_t
*mg
;
290 * Must hold one of the spa_config locks.
292 ASSERT(spa_config_held(mc
->mc_spa
, SCL_ALL
, RW_READER
) ||
293 spa_config_held(mc
->mc_spa
, SCL_ALL
, RW_WRITER
));
295 if ((mg
= mc
->mc_rotor
) == NULL
)
300 ASSERT(vd
->vdev_mg
!= NULL
);
301 ASSERT3P(vd
->vdev_top
, ==, vd
);
302 ASSERT3P(mg
->mg_class
, ==, mc
);
303 ASSERT3P(vd
->vdev_ops
, !=, &vdev_hole_ops
);
304 } while ((mg
= mg
->mg_next
) != mc
->mc_rotor
);
310 metaslab_class_space_update(metaslab_class_t
*mc
, int64_t alloc_delta
,
311 int64_t defer_delta
, int64_t space_delta
, int64_t dspace_delta
)
313 atomic_add_64(&mc
->mc_alloc
, alloc_delta
);
314 atomic_add_64(&mc
->mc_deferred
, defer_delta
);
315 atomic_add_64(&mc
->mc_space
, space_delta
);
316 atomic_add_64(&mc
->mc_dspace
, dspace_delta
);
320 metaslab_class_get_alloc(metaslab_class_t
*mc
)
322 return (mc
->mc_alloc
);
326 metaslab_class_get_deferred(metaslab_class_t
*mc
)
328 return (mc
->mc_deferred
);
332 metaslab_class_get_space(metaslab_class_t
*mc
)
334 return (mc
->mc_space
);
338 metaslab_class_get_dspace(metaslab_class_t
*mc
)
340 return (spa_deflate(mc
->mc_spa
) ? mc
->mc_dspace
: mc
->mc_space
);
344 metaslab_class_histogram_verify(metaslab_class_t
*mc
)
346 spa_t
*spa
= mc
->mc_spa
;
347 vdev_t
*rvd
= spa
->spa_root_vdev
;
351 if ((zfs_flags
& ZFS_DEBUG_HISTOGRAM_VERIFY
) == 0)
354 mc_hist
= kmem_zalloc(sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE
,
357 for (int c
= 0; c
< rvd
->vdev_children
; c
++) {
358 vdev_t
*tvd
= rvd
->vdev_child
[c
];
359 metaslab_group_t
*mg
= tvd
->vdev_mg
;
362 * Skip any holes, uninitialized top-levels, or
363 * vdevs that are not in this metalab class.
365 if (!vdev_is_concrete(tvd
) || tvd
->vdev_ms_shift
== 0 ||
366 mg
->mg_class
!= mc
) {
370 for (i
= 0; i
< RANGE_TREE_HISTOGRAM_SIZE
; i
++)
371 mc_hist
[i
] += mg
->mg_histogram
[i
];
374 for (i
= 0; i
< RANGE_TREE_HISTOGRAM_SIZE
; i
++)
375 VERIFY3U(mc_hist
[i
], ==, mc
->mc_histogram
[i
]);
377 kmem_free(mc_hist
, sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE
);
381 * Calculate the metaslab class's fragmentation metric. The metric
382 * is weighted based on the space contribution of each metaslab group.
383 * The return value will be a number between 0 and 100 (inclusive), or
384 * ZFS_FRAG_INVALID if the metric has not been set. See comment above the
385 * zfs_frag_table for more information about the metric.
388 metaslab_class_fragmentation(metaslab_class_t
*mc
)
390 vdev_t
*rvd
= mc
->mc_spa
->spa_root_vdev
;
391 uint64_t fragmentation
= 0;
393 spa_config_enter(mc
->mc_spa
, SCL_VDEV
, FTAG
, RW_READER
);
395 for (int c
= 0; c
< rvd
->vdev_children
; c
++) {
396 vdev_t
*tvd
= rvd
->vdev_child
[c
];
397 metaslab_group_t
*mg
= tvd
->vdev_mg
;
400 * Skip any holes, uninitialized top-levels,
401 * or vdevs that are not in this metalab class.
403 if (!vdev_is_concrete(tvd
) || tvd
->vdev_ms_shift
== 0 ||
404 mg
->mg_class
!= mc
) {
409 * If a metaslab group does not contain a fragmentation
410 * metric then just bail out.
412 if (mg
->mg_fragmentation
== ZFS_FRAG_INVALID
) {
413 spa_config_exit(mc
->mc_spa
, SCL_VDEV
, FTAG
);
414 return (ZFS_FRAG_INVALID
);
418 * Determine how much this metaslab_group is contributing
419 * to the overall pool fragmentation metric.
421 fragmentation
+= mg
->mg_fragmentation
*
422 metaslab_group_get_space(mg
);
424 fragmentation
/= metaslab_class_get_space(mc
);
426 ASSERT3U(fragmentation
, <=, 100);
427 spa_config_exit(mc
->mc_spa
, SCL_VDEV
, FTAG
);
428 return (fragmentation
);
432 * Calculate the amount of expandable space that is available in
433 * this metaslab class. If a device is expanded then its expandable
434 * space will be the amount of allocatable space that is currently not
435 * part of this metaslab class.
438 metaslab_class_expandable_space(metaslab_class_t
*mc
)
440 vdev_t
*rvd
= mc
->mc_spa
->spa_root_vdev
;
443 spa_config_enter(mc
->mc_spa
, SCL_VDEV
, FTAG
, RW_READER
);
444 for (int c
= 0; c
< rvd
->vdev_children
; c
++) {
445 vdev_t
*tvd
= rvd
->vdev_child
[c
];
446 metaslab_group_t
*mg
= tvd
->vdev_mg
;
448 if (!vdev_is_concrete(tvd
) || tvd
->vdev_ms_shift
== 0 ||
449 mg
->mg_class
!= mc
) {
454 * Calculate if we have enough space to add additional
455 * metaslabs. We report the expandable space in terms
456 * of the metaslab size since that's the unit of expansion.
458 space
+= P2ALIGN(tvd
->vdev_max_asize
- tvd
->vdev_asize
,
459 1ULL << tvd
->vdev_ms_shift
);
461 spa_config_exit(mc
->mc_spa
, SCL_VDEV
, FTAG
);
466 metaslab_compare(const void *x1
, const void *x2
)
468 const metaslab_t
*m1
= (const metaslab_t
*)x1
;
469 const metaslab_t
*m2
= (const metaslab_t
*)x2
;
473 if (m1
->ms_allocator
!= -1 && m1
->ms_primary
)
475 else if (m1
->ms_allocator
!= -1 && !m1
->ms_primary
)
477 if (m2
->ms_allocator
!= -1 && m2
->ms_primary
)
479 else if (m2
->ms_allocator
!= -1 && !m2
->ms_primary
)
483 * Sort inactive metaslabs first, then primaries, then secondaries. When
484 * selecting a metaslab to allocate from, an allocator first tries its
485 * primary, then secondary active metaslab. If it doesn't have active
486 * metaslabs, or can't allocate from them, it searches for an inactive
487 * metaslab to activate. If it can't find a suitable one, it will steal
488 * a primary or secondary metaslab from another allocator.
495 int cmp
= AVL_CMP(m2
->ms_weight
, m1
->ms_weight
);
499 IMPLY(AVL_CMP(m1
->ms_start
, m2
->ms_start
) == 0, m1
== m2
);
501 return (AVL_CMP(m1
->ms_start
, m2
->ms_start
));
505 metaslab_allocated_space(metaslab_t
*msp
)
507 return (msp
->ms_allocated_space
);
511 * Verify that the space accounting on disk matches the in-core range_trees.
514 metaslab_verify_space(metaslab_t
*msp
, uint64_t txg
)
516 spa_t
*spa
= msp
->ms_group
->mg_vd
->vdev_spa
;
517 uint64_t allocating
= 0;
518 uint64_t sm_free_space
, msp_free_space
;
520 ASSERT(MUTEX_HELD(&msp
->ms_lock
));
521 ASSERT(!msp
->ms_condensing
);
523 if ((zfs_flags
& ZFS_DEBUG_METASLAB_VERIFY
) == 0)
527 * We can only verify the metaslab space when we're called
528 * from syncing context with a loaded metaslab that has an
529 * allocated space map. Calling this in non-syncing context
530 * does not provide a consistent view of the metaslab since
531 * we're performing allocations in the future.
533 if (txg
!= spa_syncing_txg(spa
) || msp
->ms_sm
== NULL
||
538 * Even though the smp_alloc field can get negative (e.g.
539 * see vdev_checkpoint_sm), that should never be the case
540 * when it come's to a metaslab's space map.
542 ASSERT3S(space_map_allocated(msp
->ms_sm
), >=, 0);
544 sm_free_space
= msp
->ms_size
- metaslab_allocated_space(msp
);
547 * Account for future allocations since we would have
548 * already deducted that space from the ms_allocatable.
550 for (int t
= 0; t
< TXG_CONCURRENT_STATES
; t
++) {
552 range_tree_space(msp
->ms_allocating
[(txg
+ t
) & TXG_MASK
]);
555 ASSERT3U(msp
->ms_deferspace
, ==,
556 range_tree_space(msp
->ms_defer
[0]) +
557 range_tree_space(msp
->ms_defer
[1]));
559 msp_free_space
= range_tree_space(msp
->ms_allocatable
) + allocating
+
560 msp
->ms_deferspace
+ range_tree_space(msp
->ms_freed
);
562 VERIFY3U(sm_free_space
, ==, msp_free_space
);
566 * ==========================================================================
568 * ==========================================================================
571 * Update the allocatable flag and the metaslab group's capacity.
572 * The allocatable flag is set to true if the capacity is below
573 * the zfs_mg_noalloc_threshold or has a fragmentation value that is
574 * greater than zfs_mg_fragmentation_threshold. If a metaslab group
575 * transitions from allocatable to non-allocatable or vice versa then the
576 * metaslab group's class is updated to reflect the transition.
579 metaslab_group_alloc_update(metaslab_group_t
*mg
)
581 vdev_t
*vd
= mg
->mg_vd
;
582 metaslab_class_t
*mc
= mg
->mg_class
;
583 vdev_stat_t
*vs
= &vd
->vdev_stat
;
584 boolean_t was_allocatable
;
585 boolean_t was_initialized
;
587 ASSERT(vd
== vd
->vdev_top
);
588 ASSERT3U(spa_config_held(mc
->mc_spa
, SCL_ALLOC
, RW_READER
), ==,
591 mutex_enter(&mg
->mg_lock
);
592 was_allocatable
= mg
->mg_allocatable
;
593 was_initialized
= mg
->mg_initialized
;
595 mg
->mg_free_capacity
= ((vs
->vs_space
- vs
->vs_alloc
) * 100) /
598 mutex_enter(&mc
->mc_lock
);
601 * If the metaslab group was just added then it won't
602 * have any space until we finish syncing out this txg.
603 * At that point we will consider it initialized and available
604 * for allocations. We also don't consider non-activated
605 * metaslab groups (e.g. vdevs that are in the middle of being removed)
606 * to be initialized, because they can't be used for allocation.
608 mg
->mg_initialized
= metaslab_group_initialized(mg
);
609 if (!was_initialized
&& mg
->mg_initialized
) {
611 } else if (was_initialized
&& !mg
->mg_initialized
) {
612 ASSERT3U(mc
->mc_groups
, >, 0);
615 if (mg
->mg_initialized
)
616 mg
->mg_no_free_space
= B_FALSE
;
619 * A metaslab group is considered allocatable if it has plenty
620 * of free space or is not heavily fragmented. We only take
621 * fragmentation into account if the metaslab group has a valid
622 * fragmentation metric (i.e. a value between 0 and 100).
624 mg
->mg_allocatable
= (mg
->mg_activation_count
> 0 &&
625 mg
->mg_free_capacity
> zfs_mg_noalloc_threshold
&&
626 (mg
->mg_fragmentation
== ZFS_FRAG_INVALID
||
627 mg
->mg_fragmentation
<= zfs_mg_fragmentation_threshold
));
630 * The mc_alloc_groups maintains a count of the number of
631 * groups in this metaslab class that are still above the
632 * zfs_mg_noalloc_threshold. This is used by the allocating
633 * threads to determine if they should avoid allocations to
634 * a given group. The allocator will avoid allocations to a group
635 * if that group has reached or is below the zfs_mg_noalloc_threshold
636 * and there are still other groups that are above the threshold.
637 * When a group transitions from allocatable to non-allocatable or
638 * vice versa we update the metaslab class to reflect that change.
639 * When the mc_alloc_groups value drops to 0 that means that all
640 * groups have reached the zfs_mg_noalloc_threshold making all groups
641 * eligible for allocations. This effectively means that all devices
642 * are balanced again.
644 if (was_allocatable
&& !mg
->mg_allocatable
)
645 mc
->mc_alloc_groups
--;
646 else if (!was_allocatable
&& mg
->mg_allocatable
)
647 mc
->mc_alloc_groups
++;
648 mutex_exit(&mc
->mc_lock
);
650 mutex_exit(&mg
->mg_lock
);
654 metaslab_group_create(metaslab_class_t
*mc
, vdev_t
*vd
, int allocators
)
656 metaslab_group_t
*mg
;
658 mg
= kmem_zalloc(sizeof (metaslab_group_t
), KM_SLEEP
);
659 mutex_init(&mg
->mg_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
660 mutex_init(&mg
->mg_ms_disabled_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
661 cv_init(&mg
->mg_ms_disabled_cv
, NULL
, CV_DEFAULT
, NULL
);
662 mg
->mg_primaries
= kmem_zalloc(allocators
* sizeof (metaslab_t
*),
664 mg
->mg_secondaries
= kmem_zalloc(allocators
* sizeof (metaslab_t
*),
666 avl_create(&mg
->mg_metaslab_tree
, metaslab_compare
,
667 sizeof (metaslab_t
), offsetof(struct metaslab
, ms_group_node
));
670 mg
->mg_activation_count
= 0;
671 mg
->mg_initialized
= B_FALSE
;
672 mg
->mg_no_free_space
= B_TRUE
;
673 mg
->mg_allocators
= allocators
;
675 mg
->mg_alloc_queue_depth
= kmem_zalloc(allocators
*
676 sizeof (zfs_refcount_t
), KM_SLEEP
);
677 mg
->mg_cur_max_alloc_queue_depth
= kmem_zalloc(allocators
*
678 sizeof (uint64_t), KM_SLEEP
);
679 for (int i
= 0; i
< allocators
; i
++) {
680 zfs_refcount_create_tracked(&mg
->mg_alloc_queue_depth
[i
]);
681 mg
->mg_cur_max_alloc_queue_depth
[i
] = 0;
684 mg
->mg_taskq
= taskq_create("metaslab_group_taskq", metaslab_load_pct
,
685 maxclsyspri
, 10, INT_MAX
, TASKQ_THREADS_CPU_PCT
| TASKQ_DYNAMIC
);
691 metaslab_group_destroy(metaslab_group_t
*mg
)
693 ASSERT(mg
->mg_prev
== NULL
);
694 ASSERT(mg
->mg_next
== NULL
);
696 * We may have gone below zero with the activation count
697 * either because we never activated in the first place or
698 * because we're done, and possibly removing the vdev.
700 ASSERT(mg
->mg_activation_count
<= 0);
702 taskq_destroy(mg
->mg_taskq
);
703 avl_destroy(&mg
->mg_metaslab_tree
);
704 kmem_free(mg
->mg_primaries
, mg
->mg_allocators
* sizeof (metaslab_t
*));
705 kmem_free(mg
->mg_secondaries
, mg
->mg_allocators
*
706 sizeof (metaslab_t
*));
707 mutex_destroy(&mg
->mg_lock
);
708 mutex_destroy(&mg
->mg_ms_disabled_lock
);
709 cv_destroy(&mg
->mg_ms_disabled_cv
);
711 for (int i
= 0; i
< mg
->mg_allocators
; i
++) {
712 zfs_refcount_destroy(&mg
->mg_alloc_queue_depth
[i
]);
713 mg
->mg_cur_max_alloc_queue_depth
[i
] = 0;
715 kmem_free(mg
->mg_alloc_queue_depth
, mg
->mg_allocators
*
716 sizeof (zfs_refcount_t
));
717 kmem_free(mg
->mg_cur_max_alloc_queue_depth
, mg
->mg_allocators
*
720 kmem_free(mg
, sizeof (metaslab_group_t
));
724 metaslab_group_activate(metaslab_group_t
*mg
)
726 metaslab_class_t
*mc
= mg
->mg_class
;
727 metaslab_group_t
*mgprev
, *mgnext
;
729 ASSERT3U(spa_config_held(mc
->mc_spa
, SCL_ALLOC
, RW_WRITER
), !=, 0);
731 ASSERT(mc
->mc_rotor
!= mg
);
732 ASSERT(mg
->mg_prev
== NULL
);
733 ASSERT(mg
->mg_next
== NULL
);
734 ASSERT(mg
->mg_activation_count
<= 0);
736 if (++mg
->mg_activation_count
<= 0)
739 mg
->mg_aliquot
= metaslab_aliquot
* MAX(1, mg
->mg_vd
->vdev_children
);
740 metaslab_group_alloc_update(mg
);
742 if ((mgprev
= mc
->mc_rotor
) == NULL
) {
746 mgnext
= mgprev
->mg_next
;
747 mg
->mg_prev
= mgprev
;
748 mg
->mg_next
= mgnext
;
749 mgprev
->mg_next
= mg
;
750 mgnext
->mg_prev
= mg
;
756 * Passivate a metaslab group and remove it from the allocation rotor.
757 * Callers must hold both the SCL_ALLOC and SCL_ZIO lock prior to passivating
758 * a metaslab group. This function will momentarily drop spa_config_locks
759 * that are lower than the SCL_ALLOC lock (see comment below).
762 metaslab_group_passivate(metaslab_group_t
*mg
)
764 metaslab_class_t
*mc
= mg
->mg_class
;
765 spa_t
*spa
= mc
->mc_spa
;
766 metaslab_group_t
*mgprev
, *mgnext
;
767 int locks
= spa_config_held(spa
, SCL_ALL
, RW_WRITER
);
769 ASSERT3U(spa_config_held(spa
, SCL_ALLOC
| SCL_ZIO
, RW_WRITER
), ==,
770 (SCL_ALLOC
| SCL_ZIO
));
772 if (--mg
->mg_activation_count
!= 0) {
773 ASSERT(mc
->mc_rotor
!= mg
);
774 ASSERT(mg
->mg_prev
== NULL
);
775 ASSERT(mg
->mg_next
== NULL
);
776 ASSERT(mg
->mg_activation_count
< 0);
781 * The spa_config_lock is an array of rwlocks, ordered as
782 * follows (from highest to lowest):
783 * SCL_CONFIG > SCL_STATE > SCL_L2ARC > SCL_ALLOC >
784 * SCL_ZIO > SCL_FREE > SCL_VDEV
785 * (For more information about the spa_config_lock see spa_misc.c)
786 * The higher the lock, the broader its coverage. When we passivate
787 * a metaslab group, we must hold both the SCL_ALLOC and the SCL_ZIO
788 * config locks. However, the metaslab group's taskq might be trying
789 * to preload metaslabs so we must drop the SCL_ZIO lock and any
790 * lower locks to allow the I/O to complete. At a minimum,
791 * we continue to hold the SCL_ALLOC lock, which prevents any future
792 * allocations from taking place and any changes to the vdev tree.
794 spa_config_exit(spa
, locks
& ~(SCL_ZIO
- 1), spa
);
795 taskq_wait_outstanding(mg
->mg_taskq
, 0);
796 spa_config_enter(spa
, locks
& ~(SCL_ZIO
- 1), spa
, RW_WRITER
);
797 metaslab_group_alloc_update(mg
);
798 for (int i
= 0; i
< mg
->mg_allocators
; i
++) {
799 metaslab_t
*msp
= mg
->mg_primaries
[i
];
801 mutex_enter(&msp
->ms_lock
);
802 metaslab_passivate(msp
,
803 metaslab_weight_from_range_tree(msp
));
804 mutex_exit(&msp
->ms_lock
);
806 msp
= mg
->mg_secondaries
[i
];
808 mutex_enter(&msp
->ms_lock
);
809 metaslab_passivate(msp
,
810 metaslab_weight_from_range_tree(msp
));
811 mutex_exit(&msp
->ms_lock
);
815 mgprev
= mg
->mg_prev
;
816 mgnext
= mg
->mg_next
;
821 mc
->mc_rotor
= mgnext
;
822 mgprev
->mg_next
= mgnext
;
823 mgnext
->mg_prev
= mgprev
;
831 metaslab_group_initialized(metaslab_group_t
*mg
)
833 vdev_t
*vd
= mg
->mg_vd
;
834 vdev_stat_t
*vs
= &vd
->vdev_stat
;
836 return (vs
->vs_space
!= 0 && mg
->mg_activation_count
> 0);
840 metaslab_group_get_space(metaslab_group_t
*mg
)
842 return ((1ULL << mg
->mg_vd
->vdev_ms_shift
) * mg
->mg_vd
->vdev_ms_count
);
846 metaslab_group_histogram_verify(metaslab_group_t
*mg
)
849 vdev_t
*vd
= mg
->mg_vd
;
850 uint64_t ashift
= vd
->vdev_ashift
;
853 if ((zfs_flags
& ZFS_DEBUG_HISTOGRAM_VERIFY
) == 0)
856 mg_hist
= kmem_zalloc(sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE
,
859 ASSERT3U(RANGE_TREE_HISTOGRAM_SIZE
, >=,
860 SPACE_MAP_HISTOGRAM_SIZE
+ ashift
);
862 for (int m
= 0; m
< vd
->vdev_ms_count
; m
++) {
863 metaslab_t
*msp
= vd
->vdev_ms
[m
];
866 /* skip if not active or not a member */
867 if (msp
->ms_sm
== NULL
|| msp
->ms_group
!= mg
)
870 for (i
= 0; i
< SPACE_MAP_HISTOGRAM_SIZE
; i
++)
871 mg_hist
[i
+ ashift
] +=
872 msp
->ms_sm
->sm_phys
->smp_histogram
[i
];
875 for (i
= 0; i
< RANGE_TREE_HISTOGRAM_SIZE
; i
++)
876 VERIFY3U(mg_hist
[i
], ==, mg
->mg_histogram
[i
]);
878 kmem_free(mg_hist
, sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE
);
882 metaslab_group_histogram_add(metaslab_group_t
*mg
, metaslab_t
*msp
)
884 metaslab_class_t
*mc
= mg
->mg_class
;
885 uint64_t ashift
= mg
->mg_vd
->vdev_ashift
;
887 ASSERT(MUTEX_HELD(&msp
->ms_lock
));
888 if (msp
->ms_sm
== NULL
)
891 mutex_enter(&mg
->mg_lock
);
892 for (int i
= 0; i
< SPACE_MAP_HISTOGRAM_SIZE
; i
++) {
893 mg
->mg_histogram
[i
+ ashift
] +=
894 msp
->ms_sm
->sm_phys
->smp_histogram
[i
];
895 mc
->mc_histogram
[i
+ ashift
] +=
896 msp
->ms_sm
->sm_phys
->smp_histogram
[i
];
898 mutex_exit(&mg
->mg_lock
);
902 metaslab_group_histogram_remove(metaslab_group_t
*mg
, metaslab_t
*msp
)
904 metaslab_class_t
*mc
= mg
->mg_class
;
905 uint64_t ashift
= mg
->mg_vd
->vdev_ashift
;
907 ASSERT(MUTEX_HELD(&msp
->ms_lock
));
908 if (msp
->ms_sm
== NULL
)
911 mutex_enter(&mg
->mg_lock
);
912 for (int i
= 0; i
< SPACE_MAP_HISTOGRAM_SIZE
; i
++) {
913 ASSERT3U(mg
->mg_histogram
[i
+ ashift
], >=,
914 msp
->ms_sm
->sm_phys
->smp_histogram
[i
]);
915 ASSERT3U(mc
->mc_histogram
[i
+ ashift
], >=,
916 msp
->ms_sm
->sm_phys
->smp_histogram
[i
]);
918 mg
->mg_histogram
[i
+ ashift
] -=
919 msp
->ms_sm
->sm_phys
->smp_histogram
[i
];
920 mc
->mc_histogram
[i
+ ashift
] -=
921 msp
->ms_sm
->sm_phys
->smp_histogram
[i
];
923 mutex_exit(&mg
->mg_lock
);
927 metaslab_group_add(metaslab_group_t
*mg
, metaslab_t
*msp
)
929 ASSERT(msp
->ms_group
== NULL
);
930 mutex_enter(&mg
->mg_lock
);
933 avl_add(&mg
->mg_metaslab_tree
, msp
);
934 mutex_exit(&mg
->mg_lock
);
936 mutex_enter(&msp
->ms_lock
);
937 metaslab_group_histogram_add(mg
, msp
);
938 mutex_exit(&msp
->ms_lock
);
942 metaslab_group_remove(metaslab_group_t
*mg
, metaslab_t
*msp
)
944 mutex_enter(&msp
->ms_lock
);
945 metaslab_group_histogram_remove(mg
, msp
);
946 mutex_exit(&msp
->ms_lock
);
948 mutex_enter(&mg
->mg_lock
);
949 ASSERT(msp
->ms_group
== mg
);
950 avl_remove(&mg
->mg_metaslab_tree
, msp
);
951 msp
->ms_group
= NULL
;
952 mutex_exit(&mg
->mg_lock
);
956 metaslab_group_sort_impl(metaslab_group_t
*mg
, metaslab_t
*msp
, uint64_t weight
)
958 ASSERT(MUTEX_HELD(&mg
->mg_lock
));
959 ASSERT(msp
->ms_group
== mg
);
960 avl_remove(&mg
->mg_metaslab_tree
, msp
);
961 msp
->ms_weight
= weight
;
962 avl_add(&mg
->mg_metaslab_tree
, msp
);
967 metaslab_group_sort(metaslab_group_t
*mg
, metaslab_t
*msp
, uint64_t weight
)
970 * Although in principle the weight can be any value, in
971 * practice we do not use values in the range [1, 511].
973 ASSERT(weight
>= SPA_MINBLOCKSIZE
|| weight
== 0);
974 ASSERT(MUTEX_HELD(&msp
->ms_lock
));
976 mutex_enter(&mg
->mg_lock
);
977 metaslab_group_sort_impl(mg
, msp
, weight
);
978 mutex_exit(&mg
->mg_lock
);
982 * Calculate the fragmentation for a given metaslab group. We can use
983 * a simple average here since all metaslabs within the group must have
984 * the same size. The return value will be a value between 0 and 100
985 * (inclusive), or ZFS_FRAG_INVALID if less than half of the metaslab in this
986 * group have a fragmentation metric.
989 metaslab_group_fragmentation(metaslab_group_t
*mg
)
991 vdev_t
*vd
= mg
->mg_vd
;
992 uint64_t fragmentation
= 0;
993 uint64_t valid_ms
= 0;
995 for (int m
= 0; m
< vd
->vdev_ms_count
; m
++) {
996 metaslab_t
*msp
= vd
->vdev_ms
[m
];
998 if (msp
->ms_fragmentation
== ZFS_FRAG_INVALID
)
1000 if (msp
->ms_group
!= mg
)
1004 fragmentation
+= msp
->ms_fragmentation
;
1007 if (valid_ms
<= mg
->mg_vd
->vdev_ms_count
/ 2)
1008 return (ZFS_FRAG_INVALID
);
1010 fragmentation
/= valid_ms
;
1011 ASSERT3U(fragmentation
, <=, 100);
1012 return (fragmentation
);
1016 * Determine if a given metaslab group should skip allocations. A metaslab
1017 * group should avoid allocations if its free capacity is less than the
1018 * zfs_mg_noalloc_threshold or its fragmentation metric is greater than
1019 * zfs_mg_fragmentation_threshold and there is at least one metaslab group
1020 * that can still handle allocations. If the allocation throttle is enabled
1021 * then we skip allocations to devices that have reached their maximum
1022 * allocation queue depth unless the selected metaslab group is the only
1023 * eligible group remaining.
1026 metaslab_group_allocatable(metaslab_group_t
*mg
, metaslab_group_t
*rotor
,
1027 uint64_t psize
, int allocator
, int d
)
1029 spa_t
*spa
= mg
->mg_vd
->vdev_spa
;
1030 metaslab_class_t
*mc
= mg
->mg_class
;
1033 * We can only consider skipping this metaslab group if it's
1034 * in the normal metaslab class and there are other metaslab
1035 * groups to select from. Otherwise, we always consider it eligible
1038 if ((mc
!= spa_normal_class(spa
) &&
1039 mc
!= spa_special_class(spa
) &&
1040 mc
!= spa_dedup_class(spa
)) ||
1045 * If the metaslab group's mg_allocatable flag is set (see comments
1046 * in metaslab_group_alloc_update() for more information) and
1047 * the allocation throttle is disabled then allow allocations to this
1048 * device. However, if the allocation throttle is enabled then
1049 * check if we have reached our allocation limit (mg_alloc_queue_depth)
1050 * to determine if we should allow allocations to this metaslab group.
1051 * If all metaslab groups are no longer considered allocatable
1052 * (mc_alloc_groups == 0) or we're trying to allocate the smallest
1053 * gang block size then we allow allocations on this metaslab group
1054 * regardless of the mg_allocatable or throttle settings.
1056 if (mg
->mg_allocatable
) {
1057 metaslab_group_t
*mgp
;
1059 uint64_t qmax
= mg
->mg_cur_max_alloc_queue_depth
[allocator
];
1061 if (!mc
->mc_alloc_throttle_enabled
)
1065 * If this metaslab group does not have any free space, then
1066 * there is no point in looking further.
1068 if (mg
->mg_no_free_space
)
1072 * Relax allocation throttling for ditto blocks. Due to
1073 * random imbalances in allocation it tends to push copies
1074 * to one vdev, that looks a bit better at the moment.
1076 qmax
= qmax
* (4 + d
) / 4;
1078 qdepth
= zfs_refcount_count(
1079 &mg
->mg_alloc_queue_depth
[allocator
]);
1082 * If this metaslab group is below its qmax or it's
1083 * the only allocatable metasable group, then attempt
1084 * to allocate from it.
1086 if (qdepth
< qmax
|| mc
->mc_alloc_groups
== 1)
1088 ASSERT3U(mc
->mc_alloc_groups
, >, 1);
1091 * Since this metaslab group is at or over its qmax, we
1092 * need to determine if there are metaslab groups after this
1093 * one that might be able to handle this allocation. This is
1094 * racy since we can't hold the locks for all metaslab
1095 * groups at the same time when we make this check.
1097 for (mgp
= mg
->mg_next
; mgp
!= rotor
; mgp
= mgp
->mg_next
) {
1098 qmax
= mgp
->mg_cur_max_alloc_queue_depth
[allocator
];
1099 qmax
= qmax
* (4 + d
) / 4;
1100 qdepth
= zfs_refcount_count(
1101 &mgp
->mg_alloc_queue_depth
[allocator
]);
1104 * If there is another metaslab group that
1105 * might be able to handle the allocation, then
1106 * we return false so that we skip this group.
1108 if (qdepth
< qmax
&& !mgp
->mg_no_free_space
)
1113 * We didn't find another group to handle the allocation
1114 * so we can't skip this metaslab group even though
1115 * we are at or over our qmax.
1119 } else if (mc
->mc_alloc_groups
== 0 || psize
== SPA_MINBLOCKSIZE
) {
1126 * ==========================================================================
1127 * Range tree callbacks
1128 * ==========================================================================
1132 * Comparison function for the private size-ordered tree. Tree is sorted
1133 * by size, larger sizes at the end of the tree.
1136 metaslab_rangesize_compare(const void *x1
, const void *x2
)
1138 const range_seg_t
*r1
= x1
;
1139 const range_seg_t
*r2
= x2
;
1140 uint64_t rs_size1
= r1
->rs_end
- r1
->rs_start
;
1141 uint64_t rs_size2
= r2
->rs_end
- r2
->rs_start
;
1143 int cmp
= AVL_CMP(rs_size1
, rs_size2
);
1147 return (AVL_CMP(r1
->rs_start
, r2
->rs_start
));
1151 * ==========================================================================
1152 * Common allocator routines
1153 * ==========================================================================
1157 * Return the maximum contiguous segment within the metaslab.
1160 metaslab_block_maxsize(metaslab_t
*msp
)
1162 avl_tree_t
*t
= &msp
->ms_allocatable_by_size
;
1165 if (t
== NULL
|| (rs
= avl_last(t
)) == NULL
)
1168 return (rs
->rs_end
- rs
->rs_start
);
1171 static range_seg_t
*
1172 metaslab_block_find(avl_tree_t
*t
, uint64_t start
, uint64_t size
)
1174 range_seg_t
*rs
, rsearch
;
1177 rsearch
.rs_start
= start
;
1178 rsearch
.rs_end
= start
+ size
;
1180 rs
= avl_find(t
, &rsearch
, &where
);
1182 rs
= avl_nearest(t
, where
, AVL_AFTER
);
1188 #if defined(WITH_FF_BLOCK_ALLOCATOR) || \
1189 defined(WITH_DF_BLOCK_ALLOCATOR) || \
1190 defined(WITH_CF_BLOCK_ALLOCATOR)
1192 * This is a helper function that can be used by the allocator to find
1193 * a suitable block to allocate. This will search the specified AVL
1194 * tree looking for a block that matches the specified criteria.
1197 metaslab_block_picker(avl_tree_t
*t
, uint64_t *cursor
, uint64_t size
,
1200 range_seg_t
*rs
= metaslab_block_find(t
, *cursor
, size
);
1202 while (rs
!= NULL
) {
1203 uint64_t offset
= P2ROUNDUP(rs
->rs_start
, align
);
1205 if (offset
+ size
<= rs
->rs_end
) {
1206 *cursor
= offset
+ size
;
1209 rs
= AVL_NEXT(t
, rs
);
1213 * If we know we've searched the whole map (*cursor == 0), give up.
1214 * Otherwise, reset the cursor to the beginning and try again.
1220 return (metaslab_block_picker(t
, cursor
, size
, align
));
1222 #endif /* WITH_FF/DF/CF_BLOCK_ALLOCATOR */
1224 #if defined(WITH_FF_BLOCK_ALLOCATOR)
1226 * ==========================================================================
1227 * The first-fit block allocator
1228 * ==========================================================================
1231 metaslab_ff_alloc(metaslab_t
*msp
, uint64_t size
)
1234 * Find the largest power of 2 block size that evenly divides the
1235 * requested size. This is used to try to allocate blocks with similar
1236 * alignment from the same area of the metaslab (i.e. same cursor
1237 * bucket) but it does not guarantee that other allocations sizes
1238 * may exist in the same region.
1240 uint64_t align
= size
& -size
;
1241 uint64_t *cursor
= &msp
->ms_lbas
[highbit64(align
) - 1];
1242 avl_tree_t
*t
= &msp
->ms_allocatable
->rt_root
;
1244 return (metaslab_block_picker(t
, cursor
, size
, align
));
1247 static metaslab_ops_t metaslab_ff_ops
= {
1251 metaslab_ops_t
*zfs_metaslab_ops
= &metaslab_ff_ops
;
1252 #endif /* WITH_FF_BLOCK_ALLOCATOR */
1254 #if defined(WITH_DF_BLOCK_ALLOCATOR)
1256 * ==========================================================================
1257 * Dynamic block allocator -
1258 * Uses the first fit allocation scheme until space get low and then
1259 * adjusts to a best fit allocation method. Uses metaslab_df_alloc_threshold
1260 * and metaslab_df_free_pct to determine when to switch the allocation scheme.
1261 * ==========================================================================
1264 metaslab_df_alloc(metaslab_t
*msp
, uint64_t size
)
1267 * Find the largest power of 2 block size that evenly divides the
1268 * requested size. This is used to try to allocate blocks with similar
1269 * alignment from the same area of the metaslab (i.e. same cursor
1270 * bucket) but it does not guarantee that other allocations sizes
1271 * may exist in the same region.
1273 uint64_t align
= size
& -size
;
1274 uint64_t *cursor
= &msp
->ms_lbas
[highbit64(align
) - 1];
1275 range_tree_t
*rt
= msp
->ms_allocatable
;
1276 avl_tree_t
*t
= &rt
->rt_root
;
1277 uint64_t max_size
= metaslab_block_maxsize(msp
);
1278 int free_pct
= range_tree_space(rt
) * 100 / msp
->ms_size
;
1280 ASSERT(MUTEX_HELD(&msp
->ms_lock
));
1281 ASSERT3U(avl_numnodes(t
), ==,
1282 avl_numnodes(&msp
->ms_allocatable_by_size
));
1284 if (max_size
< size
)
1288 * If we're running low on space switch to using the size
1289 * sorted AVL tree (best-fit).
1291 if (max_size
< metaslab_df_alloc_threshold
||
1292 free_pct
< metaslab_df_free_pct
) {
1293 t
= &msp
->ms_allocatable_by_size
;
1297 return (metaslab_block_picker(t
, cursor
, size
, 1ULL));
1300 static metaslab_ops_t metaslab_df_ops
= {
1304 metaslab_ops_t
*zfs_metaslab_ops
= &metaslab_df_ops
;
1305 #endif /* WITH_DF_BLOCK_ALLOCATOR */
1307 #if defined(WITH_CF_BLOCK_ALLOCATOR)
1309 * ==========================================================================
1310 * Cursor fit block allocator -
1311 * Select the largest region in the metaslab, set the cursor to the beginning
1312 * of the range and the cursor_end to the end of the range. As allocations
1313 * are made advance the cursor. Continue allocating from the cursor until
1314 * the range is exhausted and then find a new range.
1315 * ==========================================================================
1318 metaslab_cf_alloc(metaslab_t
*msp
, uint64_t size
)
1320 range_tree_t
*rt
= msp
->ms_allocatable
;
1321 avl_tree_t
*t
= &msp
->ms_allocatable_by_size
;
1322 uint64_t *cursor
= &msp
->ms_lbas
[0];
1323 uint64_t *cursor_end
= &msp
->ms_lbas
[1];
1324 uint64_t offset
= 0;
1326 ASSERT(MUTEX_HELD(&msp
->ms_lock
));
1327 ASSERT3U(avl_numnodes(t
), ==, avl_numnodes(&rt
->rt_root
));
1329 ASSERT3U(*cursor_end
, >=, *cursor
);
1331 if ((*cursor
+ size
) > *cursor_end
) {
1334 rs
= avl_last(&msp
->ms_allocatable_by_size
);
1335 if (rs
== NULL
|| (rs
->rs_end
- rs
->rs_start
) < size
)
1338 *cursor
= rs
->rs_start
;
1339 *cursor_end
= rs
->rs_end
;
1348 static metaslab_ops_t metaslab_cf_ops
= {
1352 metaslab_ops_t
*zfs_metaslab_ops
= &metaslab_cf_ops
;
1353 #endif /* WITH_CF_BLOCK_ALLOCATOR */
1355 #if defined(WITH_NDF_BLOCK_ALLOCATOR)
1357 * ==========================================================================
1358 * New dynamic fit allocator -
1359 * Select a region that is large enough to allocate 2^metaslab_ndf_clump_shift
1360 * contiguous blocks. If no region is found then just use the largest segment
1362 * ==========================================================================
1366 * Determines desired number of contiguous blocks (2^metaslab_ndf_clump_shift)
1367 * to request from the allocator.
1369 uint64_t metaslab_ndf_clump_shift
= 4;
1372 metaslab_ndf_alloc(metaslab_t
*msp
, uint64_t size
)
1374 avl_tree_t
*t
= &msp
->ms_allocatable
->rt_root
;
1376 range_seg_t
*rs
, rsearch
;
1377 uint64_t hbit
= highbit64(size
);
1378 uint64_t *cursor
= &msp
->ms_lbas
[hbit
- 1];
1379 uint64_t max_size
= metaslab_block_maxsize(msp
);
1381 ASSERT(MUTEX_HELD(&msp
->ms_lock
));
1382 ASSERT3U(avl_numnodes(t
), ==,
1383 avl_numnodes(&msp
->ms_allocatable_by_size
));
1385 if (max_size
< size
)
1388 rsearch
.rs_start
= *cursor
;
1389 rsearch
.rs_end
= *cursor
+ size
;
1391 rs
= avl_find(t
, &rsearch
, &where
);
1392 if (rs
== NULL
|| (rs
->rs_end
- rs
->rs_start
) < size
) {
1393 t
= &msp
->ms_allocatable_by_size
;
1395 rsearch
.rs_start
= 0;
1396 rsearch
.rs_end
= MIN(max_size
,
1397 1ULL << (hbit
+ metaslab_ndf_clump_shift
));
1398 rs
= avl_find(t
, &rsearch
, &where
);
1400 rs
= avl_nearest(t
, where
, AVL_AFTER
);
1404 if ((rs
->rs_end
- rs
->rs_start
) >= size
) {
1405 *cursor
= rs
->rs_start
+ size
;
1406 return (rs
->rs_start
);
1411 static metaslab_ops_t metaslab_ndf_ops
= {
1415 metaslab_ops_t
*zfs_metaslab_ops
= &metaslab_ndf_ops
;
1416 #endif /* WITH_NDF_BLOCK_ALLOCATOR */
1420 * ==========================================================================
1422 * ==========================================================================
1426 metaslab_aux_histograms_clear(metaslab_t
*msp
)
1429 * Auxiliary histograms are only cleared when resetting them,
1430 * which can only happen while the metaslab is loaded.
1432 ASSERT(msp
->ms_loaded
);
1434 bzero(msp
->ms_synchist
, sizeof (msp
->ms_synchist
));
1435 for (int t
= 0; t
< TXG_DEFER_SIZE
; t
++)
1436 bzero(msp
->ms_deferhist
[t
], sizeof (msp
->ms_deferhist
[t
]));
1440 metaslab_aux_histogram_add(uint64_t *histogram
, uint64_t shift
,
1444 * This is modeled after space_map_histogram_add(), so refer to that
1445 * function for implementation details. We want this to work like
1446 * the space map histogram, and not the range tree histogram, as we
1447 * are essentially constructing a delta that will be later subtracted
1448 * from the space map histogram.
1451 for (int i
= shift
; i
< RANGE_TREE_HISTOGRAM_SIZE
; i
++) {
1452 ASSERT3U(i
, >=, idx
+ shift
);
1453 histogram
[idx
] += rt
->rt_histogram
[i
] << (i
- idx
- shift
);
1455 if (idx
< SPACE_MAP_HISTOGRAM_SIZE
- 1) {
1456 ASSERT3U(idx
+ shift
, ==, i
);
1458 ASSERT3U(idx
, <, SPACE_MAP_HISTOGRAM_SIZE
);
1464 * Called at every sync pass that the metaslab gets synced.
1466 * The reason is that we want our auxiliary histograms to be updated
1467 * wherever the metaslab's space map histogram is updated. This way
1468 * we stay consistent on which parts of the metaslab space map's
1469 * histogram are currently not available for allocations (e.g because
1470 * they are in the defer, freed, and freeing trees).
1473 metaslab_aux_histograms_update(metaslab_t
*msp
)
1475 space_map_t
*sm
= msp
->ms_sm
;
1479 * This is similar to the metaslab's space map histogram updates
1480 * that take place in metaslab_sync(). The only difference is that
1481 * we only care about segments that haven't made it into the
1482 * ms_allocatable tree yet.
1484 if (msp
->ms_loaded
) {
1485 metaslab_aux_histograms_clear(msp
);
1487 metaslab_aux_histogram_add(msp
->ms_synchist
,
1488 sm
->sm_shift
, msp
->ms_freed
);
1490 for (int t
= 0; t
< TXG_DEFER_SIZE
; t
++) {
1491 metaslab_aux_histogram_add(msp
->ms_deferhist
[t
],
1492 sm
->sm_shift
, msp
->ms_defer
[t
]);
1496 metaslab_aux_histogram_add(msp
->ms_synchist
,
1497 sm
->sm_shift
, msp
->ms_freeing
);
1501 * Called every time we are done syncing (writing to) the metaslab,
1502 * i.e. at the end of each sync pass.
1503 * [see the comment in metaslab_impl.h for ms_synchist, ms_deferhist]
1506 metaslab_aux_histograms_update_done(metaslab_t
*msp
, boolean_t defer_allowed
)
1508 spa_t
*spa
= msp
->ms_group
->mg_vd
->vdev_spa
;
1509 space_map_t
*sm
= msp
->ms_sm
;
1513 * We came here from metaslab_init() when creating/opening a
1514 * pool, looking at a metaslab that hasn't had any allocations
1521 * This is similar to the actions that we take for the ms_freed
1522 * and ms_defer trees in metaslab_sync_done().
1524 uint64_t hist_index
= spa_syncing_txg(spa
) % TXG_DEFER_SIZE
;
1525 if (defer_allowed
) {
1526 bcopy(msp
->ms_synchist
, msp
->ms_deferhist
[hist_index
],
1527 sizeof (msp
->ms_synchist
));
1529 bzero(msp
->ms_deferhist
[hist_index
],
1530 sizeof (msp
->ms_deferhist
[hist_index
]));
1532 bzero(msp
->ms_synchist
, sizeof (msp
->ms_synchist
));
1536 * Ensure that the metaslab's weight and fragmentation are consistent
1537 * with the contents of the histogram (either the range tree's histogram
1538 * or the space map's depending whether the metaslab is loaded).
1541 metaslab_verify_weight_and_frag(metaslab_t
*msp
)
1543 ASSERT(MUTEX_HELD(&msp
->ms_lock
));
1545 if ((zfs_flags
& ZFS_DEBUG_METASLAB_VERIFY
) == 0)
1548 /* see comment in metaslab_verify_unflushed_changes() */
1549 if (msp
->ms_group
== NULL
)
1553 * Devices being removed always return a weight of 0 and leave
1554 * fragmentation and ms_max_size as is - there is nothing for
1555 * us to verify here.
1557 vdev_t
*vd
= msp
->ms_group
->mg_vd
;
1558 if (vd
->vdev_removing
)
1562 * If the metaslab is dirty it probably means that we've done
1563 * some allocations or frees that have changed our histograms
1564 * and thus the weight.
1566 for (int t
= 0; t
< TXG_SIZE
; t
++) {
1567 if (txg_list_member(&vd
->vdev_ms_list
, msp
, t
))
1572 * This verification checks that our in-memory state is consistent
1573 * with what's on disk. If the pool is read-only then there aren't
1574 * any changes and we just have the initially-loaded state.
1576 if (!spa_writeable(msp
->ms_group
->mg_vd
->vdev_spa
))
1579 /* some extra verification for in-core tree if you can */
1580 if (msp
->ms_loaded
) {
1581 range_tree_stat_verify(msp
->ms_allocatable
);
1582 VERIFY(space_map_histogram_verify(msp
->ms_sm
,
1583 msp
->ms_allocatable
));
1586 uint64_t weight
= msp
->ms_weight
;
1587 uint64_t was_active
= msp
->ms_weight
& METASLAB_ACTIVE_MASK
;
1588 boolean_t space_based
= WEIGHT_IS_SPACEBASED(msp
->ms_weight
);
1589 uint64_t frag
= msp
->ms_fragmentation
;
1590 uint64_t max_segsize
= msp
->ms_max_size
;
1593 msp
->ms_fragmentation
= 0;
1594 msp
->ms_max_size
= 0;
1597 * This function is used for verification purposes. Regardless of
1598 * whether metaslab_weight() thinks this metaslab should be active or
1599 * not, we want to ensure that the actual weight (and therefore the
1600 * value of ms_weight) would be the same if it was to be recalculated
1603 msp
->ms_weight
= metaslab_weight(msp
) | was_active
;
1605 VERIFY3U(max_segsize
, ==, msp
->ms_max_size
);
1608 * If the weight type changed then there is no point in doing
1609 * verification. Revert fields to their original values.
1611 if ((space_based
&& !WEIGHT_IS_SPACEBASED(msp
->ms_weight
)) ||
1612 (!space_based
&& WEIGHT_IS_SPACEBASED(msp
->ms_weight
))) {
1613 msp
->ms_fragmentation
= frag
;
1614 msp
->ms_weight
= weight
;
1618 VERIFY3U(msp
->ms_fragmentation
, ==, frag
);
1619 VERIFY3U(msp
->ms_weight
, ==, weight
);
1623 * Wait for any in-progress metaslab loads to complete.
1626 metaslab_load_wait(metaslab_t
*msp
)
1628 ASSERT(MUTEX_HELD(&msp
->ms_lock
));
1630 while (msp
->ms_loading
) {
1631 ASSERT(!msp
->ms_loaded
);
1632 cv_wait(&msp
->ms_load_cv
, &msp
->ms_lock
);
1637 metaslab_load_impl(metaslab_t
*msp
)
1641 ASSERT(MUTEX_HELD(&msp
->ms_lock
));
1642 ASSERT(msp
->ms_loading
);
1643 ASSERT(!msp
->ms_condensing
);
1646 * We temporarily drop the lock to unblock other operations while we
1647 * are reading the space map. Therefore, metaslab_sync() and
1648 * metaslab_sync_done() can run at the same time as we do.
1650 * metaslab_sync() can append to the space map while we are loading.
1651 * Therefore we load only entries that existed when we started the
1652 * load. Additionally, metaslab_sync_done() has to wait for the load
1653 * to complete because there are potential races like metaslab_load()
1654 * loading parts of the space map that are currently being appended
1655 * by metaslab_sync(). If we didn't, the ms_allocatable would have
1656 * entries that metaslab_sync_done() would try to re-add later.
1658 * That's why before dropping the lock we remember the synced length
1659 * of the metaslab and read up to that point of the space map,
1660 * ignoring entries appended by metaslab_sync() that happen after we
1663 uint64_t length
= msp
->ms_synced_length
;
1664 mutex_exit(&msp
->ms_lock
);
1666 if (msp
->ms_sm
!= NULL
) {
1667 error
= space_map_load_length(msp
->ms_sm
, msp
->ms_allocatable
,
1671 * The space map has not been allocated yet, so treat
1672 * all the space in the metaslab as free and add it to the
1673 * ms_allocatable tree.
1675 range_tree_add(msp
->ms_allocatable
,
1676 msp
->ms_start
, msp
->ms_size
);
1680 * We need to grab the ms_sync_lock to prevent metaslab_sync() from
1681 * changing the ms_sm and the metaslab's range trees while we are
1682 * about to use them and populate the ms_allocatable. The ms_lock
1683 * is insufficient for this because metaslab_sync() doesn't hold
1684 * the ms_lock while writing the ms_checkpointing tree to disk.
1686 mutex_enter(&msp
->ms_sync_lock
);
1687 mutex_enter(&msp
->ms_lock
);
1688 ASSERT(!msp
->ms_condensing
);
1691 mutex_exit(&msp
->ms_sync_lock
);
1695 ASSERT3P(msp
->ms_group
, !=, NULL
);
1696 msp
->ms_loaded
= B_TRUE
;
1699 * The ms_allocatable contains the segments that exist in the
1700 * ms_defer trees [see ms_synced_length]. Thus we need to remove
1701 * them from ms_allocatable as they will be added again in
1702 * metaslab_sync_done().
1704 for (int t
= 0; t
< TXG_DEFER_SIZE
; t
++) {
1705 range_tree_walk(msp
->ms_defer
[t
],
1706 range_tree_remove
, msp
->ms_allocatable
);
1710 * Call metaslab_recalculate_weight_and_sort() now that the
1711 * metaslab is loaded so we get the metaslab's real weight.
1713 * Unless this metaslab was created with older software and
1714 * has not yet been converted to use segment-based weight, we
1715 * expect the new weight to be better or equal to the weight
1716 * that the metaslab had while it was not loaded. This is
1717 * because the old weight does not take into account the
1718 * consolidation of adjacent segments between TXGs. [see
1719 * comment for ms_synchist and ms_deferhist[] for more info]
1721 uint64_t weight
= msp
->ms_weight
;
1722 metaslab_recalculate_weight_and_sort(msp
);
1723 if (!WEIGHT_IS_SPACEBASED(weight
))
1724 ASSERT3U(weight
, <=, msp
->ms_weight
);
1725 msp
->ms_max_size
= metaslab_block_maxsize(msp
);
1727 spa_t
*spa
= msp
->ms_group
->mg_vd
->vdev_spa
;
1728 metaslab_verify_space(msp
, spa_syncing_txg(spa
));
1729 mutex_exit(&msp
->ms_sync_lock
);
1735 metaslab_load(metaslab_t
*msp
)
1737 ASSERT(MUTEX_HELD(&msp
->ms_lock
));
1740 * There may be another thread loading the same metaslab, if that's
1741 * the case just wait until the other thread is done and return.
1743 metaslab_load_wait(msp
);
1746 VERIFY(!msp
->ms_loading
);
1747 ASSERT(!msp
->ms_condensing
);
1749 msp
->ms_loading
= B_TRUE
;
1750 int error
= metaslab_load_impl(msp
);
1751 msp
->ms_loading
= B_FALSE
;
1752 cv_broadcast(&msp
->ms_load_cv
);
1758 metaslab_unload(metaslab_t
*msp
)
1760 ASSERT(MUTEX_HELD(&msp
->ms_lock
));
1762 metaslab_verify_weight_and_frag(msp
);
1764 range_tree_vacate(msp
->ms_allocatable
, NULL
, NULL
);
1765 msp
->ms_loaded
= B_FALSE
;
1767 msp
->ms_weight
&= ~METASLAB_ACTIVE_MASK
;
1768 msp
->ms_max_size
= 0;
1771 * We explicitly recalculate the metaslab's weight based on its space
1772 * map (as it is now not loaded). We want unload metaslabs to always
1773 * have their weights calculated from the space map histograms, while
1774 * loaded ones have it calculated from their in-core range tree
1775 * [see metaslab_load()]. This way, the weight reflects the information
1776 * available in-core, whether it is loaded or not
1778 * If ms_group == NULL means that we came here from metaslab_fini(),
1779 * at which point it doesn't make sense for us to do the recalculation
1782 if (msp
->ms_group
!= NULL
)
1783 metaslab_recalculate_weight_and_sort(msp
);
1787 metaslab_space_update(vdev_t
*vd
, metaslab_class_t
*mc
, int64_t alloc_delta
,
1788 int64_t defer_delta
, int64_t space_delta
)
1790 vdev_space_update(vd
, alloc_delta
, defer_delta
, space_delta
);
1792 ASSERT3P(vd
->vdev_spa
->spa_root_vdev
, ==, vd
->vdev_parent
);
1793 ASSERT(vd
->vdev_ms_count
!= 0);
1795 metaslab_class_space_update(mc
, alloc_delta
, defer_delta
, space_delta
,
1796 vdev_deflated_space(vd
, space_delta
));
1800 metaslab_init(metaslab_group_t
*mg
, uint64_t id
, uint64_t object
, uint64_t txg
,
1803 vdev_t
*vd
= mg
->mg_vd
;
1804 spa_t
*spa
= vd
->vdev_spa
;
1805 objset_t
*mos
= spa
->spa_meta_objset
;
1809 ms
= kmem_zalloc(sizeof (metaslab_t
), KM_SLEEP
);
1810 mutex_init(&ms
->ms_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
1811 mutex_init(&ms
->ms_sync_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
1812 cv_init(&ms
->ms_load_cv
, NULL
, CV_DEFAULT
, NULL
);
1815 ms
->ms_start
= id
<< vd
->vdev_ms_shift
;
1816 ms
->ms_size
= 1ULL << vd
->vdev_ms_shift
;
1817 ms
->ms_allocator
= -1;
1818 ms
->ms_new
= B_TRUE
;
1821 * We only open space map objects that already exist. All others
1822 * will be opened when we finally allocate an object for it.
1825 * When called from vdev_expand(), we can't call into the DMU as
1826 * we are holding the spa_config_lock as a writer and we would
1827 * deadlock [see relevant comment in vdev_metaslab_init()]. in
1828 * that case, the object parameter is zero though, so we won't
1829 * call into the DMU.
1832 error
= space_map_open(&ms
->ms_sm
, mos
, object
, ms
->ms_start
,
1833 ms
->ms_size
, vd
->vdev_ashift
);
1836 kmem_free(ms
, sizeof (metaslab_t
));
1840 ASSERT(ms
->ms_sm
!= NULL
);
1841 ms
->ms_allocated_space
= space_map_allocated(ms
->ms_sm
);
1845 * We create the ms_allocatable here, but we don't create the
1846 * other range trees until metaslab_sync_done(). This serves
1847 * two purposes: it allows metaslab_sync_done() to detect the
1848 * addition of new space; and for debugging, it ensures that
1849 * we'd data fault on any attempt to use this metaslab before
1852 ms
->ms_allocatable
= range_tree_create_impl(&rt_avl_ops
,
1853 &ms
->ms_allocatable_by_size
, metaslab_rangesize_compare
, 0);
1855 ms
->ms_trim
= range_tree_create(NULL
, NULL
);
1857 metaslab_group_add(mg
, ms
);
1858 metaslab_set_fragmentation(ms
);
1861 * If we're opening an existing pool (txg == 0) or creating
1862 * a new one (txg == TXG_INITIAL), all space is available now.
1863 * If we're adding space to an existing pool, the new space
1864 * does not become available until after this txg has synced.
1865 * The metaslab's weight will also be initialized when we sync
1866 * out this txg. This ensures that we don't attempt to allocate
1867 * from it before we have initialized it completely.
1869 if (txg
<= TXG_INITIAL
) {
1870 metaslab_sync_done(ms
, 0);
1871 metaslab_space_update(vd
, mg
->mg_class
,
1872 metaslab_allocated_space(ms
), 0, 0);
1876 * If metaslab_debug_load is set and we're initializing a metaslab
1877 * that has an allocated space map object then load the space map
1878 * so that we can verify frees.
1880 if (metaslab_debug_load
&& ms
->ms_sm
!= NULL
) {
1881 mutex_enter(&ms
->ms_lock
);
1882 VERIFY0(metaslab_load(ms
));
1883 mutex_exit(&ms
->ms_lock
);
1887 vdev_dirty(vd
, 0, NULL
, txg
);
1888 vdev_dirty(vd
, VDD_METASLAB
, ms
, txg
);
1897 metaslab_fini(metaslab_t
*msp
)
1899 metaslab_group_t
*mg
= msp
->ms_group
;
1900 vdev_t
*vd
= mg
->mg_vd
;
1902 metaslab_group_remove(mg
, msp
);
1904 mutex_enter(&msp
->ms_lock
);
1905 VERIFY(msp
->ms_group
== NULL
);
1906 metaslab_space_update(vd
, mg
->mg_class
,
1907 -metaslab_allocated_space(msp
), 0, -msp
->ms_size
);
1909 space_map_close(msp
->ms_sm
);
1911 metaslab_unload(msp
);
1913 range_tree_destroy(msp
->ms_allocatable
);
1914 range_tree_destroy(msp
->ms_freeing
);
1915 range_tree_destroy(msp
->ms_freed
);
1917 for (int t
= 0; t
< TXG_SIZE
; t
++) {
1918 range_tree_destroy(msp
->ms_allocating
[t
]);
1921 for (int t
= 0; t
< TXG_DEFER_SIZE
; t
++) {
1922 range_tree_destroy(msp
->ms_defer
[t
]);
1924 ASSERT0(msp
->ms_deferspace
);
1926 range_tree_destroy(msp
->ms_checkpointing
);
1928 for (int t
= 0; t
< TXG_SIZE
; t
++)
1929 ASSERT(!txg_list_member(&vd
->vdev_ms_list
, msp
, t
));
1931 range_tree_vacate(msp
->ms_trim
, NULL
, NULL
);
1932 range_tree_destroy(msp
->ms_trim
);
1934 mutex_exit(&msp
->ms_lock
);
1935 cv_destroy(&msp
->ms_load_cv
);
1936 mutex_destroy(&msp
->ms_lock
);
1937 mutex_destroy(&msp
->ms_sync_lock
);
1938 ASSERT3U(msp
->ms_allocator
, ==, -1);
1940 kmem_free(msp
, sizeof (metaslab_t
));
1943 #define FRAGMENTATION_TABLE_SIZE 17
1946 * This table defines a segment size based fragmentation metric that will
1947 * allow each metaslab to derive its own fragmentation value. This is done
1948 * by calculating the space in each bucket of the spacemap histogram and
1949 * multiplying that by the fragmentation metric in this table. Doing
1950 * this for all buckets and dividing it by the total amount of free
1951 * space in this metaslab (i.e. the total free space in all buckets) gives
1952 * us the fragmentation metric. This means that a high fragmentation metric
1953 * equates to most of the free space being comprised of small segments.
1954 * Conversely, if the metric is low, then most of the free space is in
1955 * large segments. A 10% change in fragmentation equates to approximately
1956 * double the number of segments.
1958 * This table defines 0% fragmented space using 16MB segments. Testing has
1959 * shown that segments that are greater than or equal to 16MB do not suffer
1960 * from drastic performance problems. Using this value, we derive the rest
1961 * of the table. Since the fragmentation value is never stored on disk, it
1962 * is possible to change these calculations in the future.
1964 int zfs_frag_table
[FRAGMENTATION_TABLE_SIZE
] = {
1984 * Calculate the metaslab's fragmentation metric and set ms_fragmentation.
1985 * Setting this value to ZFS_FRAG_INVALID means that the metaslab has not
1986 * been upgraded and does not support this metric. Otherwise, the return
1987 * value should be in the range [0, 100].
1990 metaslab_set_fragmentation(metaslab_t
*msp
)
1992 spa_t
*spa
= msp
->ms_group
->mg_vd
->vdev_spa
;
1993 uint64_t fragmentation
= 0;
1995 boolean_t feature_enabled
= spa_feature_is_enabled(spa
,
1996 SPA_FEATURE_SPACEMAP_HISTOGRAM
);
1998 if (!feature_enabled
) {
1999 msp
->ms_fragmentation
= ZFS_FRAG_INVALID
;
2004 * A null space map means that the entire metaslab is free
2005 * and thus is not fragmented.
2007 if (msp
->ms_sm
== NULL
) {
2008 msp
->ms_fragmentation
= 0;
2013 * If this metaslab's space map has not been upgraded, flag it
2014 * so that we upgrade next time we encounter it.
2016 if (msp
->ms_sm
->sm_dbuf
->db_size
!= sizeof (space_map_phys_t
)) {
2017 uint64_t txg
= spa_syncing_txg(spa
);
2018 vdev_t
*vd
= msp
->ms_group
->mg_vd
;
2021 * If we've reached the final dirty txg, then we must
2022 * be shutting down the pool. We don't want to dirty
2023 * any data past this point so skip setting the condense
2024 * flag. We can retry this action the next time the pool
2027 if (spa_writeable(spa
) && txg
< spa_final_dirty_txg(spa
)) {
2028 msp
->ms_condense_wanted
= B_TRUE
;
2029 vdev_dirty(vd
, VDD_METASLAB
, msp
, txg
+ 1);
2030 zfs_dbgmsg("txg %llu, requesting force condense: "
2031 "ms_id %llu, vdev_id %llu", txg
, msp
->ms_id
,
2034 msp
->ms_fragmentation
= ZFS_FRAG_INVALID
;
2038 for (int i
= 0; i
< SPACE_MAP_HISTOGRAM_SIZE
; i
++) {
2040 uint8_t shift
= msp
->ms_sm
->sm_shift
;
2042 int idx
= MIN(shift
- SPA_MINBLOCKSHIFT
+ i
,
2043 FRAGMENTATION_TABLE_SIZE
- 1);
2045 if (msp
->ms_sm
->sm_phys
->smp_histogram
[i
] == 0)
2048 space
= msp
->ms_sm
->sm_phys
->smp_histogram
[i
] << (i
+ shift
);
2051 ASSERT3U(idx
, <, FRAGMENTATION_TABLE_SIZE
);
2052 fragmentation
+= space
* zfs_frag_table
[idx
];
2056 fragmentation
/= total
;
2057 ASSERT3U(fragmentation
, <=, 100);
2059 msp
->ms_fragmentation
= fragmentation
;
2063 * Compute a weight -- a selection preference value -- for the given metaslab.
2064 * This is based on the amount of free space, the level of fragmentation,
2065 * the LBA range, and whether the metaslab is loaded.
2068 metaslab_space_weight(metaslab_t
*msp
)
2070 metaslab_group_t
*mg
= msp
->ms_group
;
2071 vdev_t
*vd
= mg
->mg_vd
;
2072 uint64_t weight
, space
;
2074 ASSERT(MUTEX_HELD(&msp
->ms_lock
));
2075 ASSERT(!vd
->vdev_removing
);
2078 * The baseline weight is the metaslab's free space.
2080 space
= msp
->ms_size
- metaslab_allocated_space(msp
);
2082 if (metaslab_fragmentation_factor_enabled
&&
2083 msp
->ms_fragmentation
!= ZFS_FRAG_INVALID
) {
2085 * Use the fragmentation information to inversely scale
2086 * down the baseline weight. We need to ensure that we
2087 * don't exclude this metaslab completely when it's 100%
2088 * fragmented. To avoid this we reduce the fragmented value
2091 space
= (space
* (100 - (msp
->ms_fragmentation
- 1))) / 100;
2094 * If space < SPA_MINBLOCKSIZE, then we will not allocate from
2095 * this metaslab again. The fragmentation metric may have
2096 * decreased the space to something smaller than
2097 * SPA_MINBLOCKSIZE, so reset the space to SPA_MINBLOCKSIZE
2098 * so that we can consume any remaining space.
2100 if (space
> 0 && space
< SPA_MINBLOCKSIZE
)
2101 space
= SPA_MINBLOCKSIZE
;
2106 * Modern disks have uniform bit density and constant angular velocity.
2107 * Therefore, the outer recording zones are faster (higher bandwidth)
2108 * than the inner zones by the ratio of outer to inner track diameter,
2109 * which is typically around 2:1. We account for this by assigning
2110 * higher weight to lower metaslabs (multiplier ranging from 2x to 1x).
2111 * In effect, this means that we'll select the metaslab with the most
2112 * free bandwidth rather than simply the one with the most free space.
2114 if (!vd
->vdev_nonrot
&& metaslab_lba_weighting_enabled
) {
2115 weight
= 2 * weight
- (msp
->ms_id
* weight
) / vd
->vdev_ms_count
;
2116 ASSERT(weight
>= space
&& weight
<= 2 * space
);
2120 * If this metaslab is one we're actively using, adjust its
2121 * weight to make it preferable to any inactive metaslab so
2122 * we'll polish it off. If the fragmentation on this metaslab
2123 * has exceed our threshold, then don't mark it active.
2125 if (msp
->ms_loaded
&& msp
->ms_fragmentation
!= ZFS_FRAG_INVALID
&&
2126 msp
->ms_fragmentation
<= zfs_metaslab_fragmentation_threshold
) {
2127 weight
|= (msp
->ms_weight
& METASLAB_ACTIVE_MASK
);
2130 WEIGHT_SET_SPACEBASED(weight
);
2135 * Return the weight of the specified metaslab, according to the segment-based
2136 * weighting algorithm. The metaslab must be loaded. This function can
2137 * be called within a sync pass since it relies only on the metaslab's
2138 * range tree which is always accurate when the metaslab is loaded.
2141 metaslab_weight_from_range_tree(metaslab_t
*msp
)
2143 uint64_t weight
= 0;
2144 uint32_t segments
= 0;
2146 ASSERT(msp
->ms_loaded
);
2148 for (int i
= RANGE_TREE_HISTOGRAM_SIZE
- 1; i
>= SPA_MINBLOCKSHIFT
;
2150 uint8_t shift
= msp
->ms_group
->mg_vd
->vdev_ashift
;
2151 int max_idx
= SPACE_MAP_HISTOGRAM_SIZE
+ shift
- 1;
2154 segments
+= msp
->ms_allocatable
->rt_histogram
[i
];
2157 * The range tree provides more precision than the space map
2158 * and must be downgraded so that all values fit within the
2159 * space map's histogram. This allows us to compare loaded
2160 * vs. unloaded metaslabs to determine which metaslab is
2161 * considered "best".
2166 if (segments
!= 0) {
2167 WEIGHT_SET_COUNT(weight
, segments
);
2168 WEIGHT_SET_INDEX(weight
, i
);
2169 WEIGHT_SET_ACTIVE(weight
, 0);
2177 * Calculate the weight based on the on-disk histogram. This should only
2178 * be called after a sync pass has completely finished since the on-disk
2179 * information is updated in metaslab_sync().
2182 metaslab_weight_from_spacemap(metaslab_t
*msp
)
2184 space_map_t
*sm
= msp
->ms_sm
;
2185 ASSERT(!msp
->ms_loaded
);
2187 ASSERT3U(space_map_object(sm
), !=, 0);
2188 ASSERT3U(sm
->sm_dbuf
->db_size
, ==, sizeof (space_map_phys_t
));
2191 * Create a joint histogram from all the segments that have made
2192 * it to the metaslab's space map histogram, that are not yet
2193 * available for allocation because they are still in the freeing
2194 * pipeline (e.g. freeing, freed, and defer trees). Then subtract
2195 * these segments from the space map's histogram to get a more
2198 uint64_t deferspace_histogram
[SPACE_MAP_HISTOGRAM_SIZE
] = {0};
2199 for (int i
= 0; i
< SPACE_MAP_HISTOGRAM_SIZE
; i
++)
2200 deferspace_histogram
[i
] += msp
->ms_synchist
[i
];
2201 for (int t
= 0; t
< TXG_DEFER_SIZE
; t
++) {
2202 for (int i
= 0; i
< SPACE_MAP_HISTOGRAM_SIZE
; i
++) {
2203 deferspace_histogram
[i
] += msp
->ms_deferhist
[t
][i
];
2207 uint64_t weight
= 0;
2208 for (int i
= SPACE_MAP_HISTOGRAM_SIZE
- 1; i
>= 0; i
--) {
2209 ASSERT3U(sm
->sm_phys
->smp_histogram
[i
], >=,
2210 deferspace_histogram
[i
]);
2212 sm
->sm_phys
->smp_histogram
[i
] - deferspace_histogram
[i
];
2214 WEIGHT_SET_COUNT(weight
, count
);
2215 WEIGHT_SET_INDEX(weight
, i
+ sm
->sm_shift
);
2216 WEIGHT_SET_ACTIVE(weight
, 0);
2224 * Compute a segment-based weight for the specified metaslab. The weight
2225 * is determined by highest bucket in the histogram. The information
2226 * for the highest bucket is encoded into the weight value.
2229 metaslab_segment_weight(metaslab_t
*msp
)
2231 metaslab_group_t
*mg
= msp
->ms_group
;
2232 uint64_t weight
= 0;
2233 uint8_t shift
= mg
->mg_vd
->vdev_ashift
;
2235 ASSERT(MUTEX_HELD(&msp
->ms_lock
));
2238 * The metaslab is completely free.
2240 if (metaslab_allocated_space(msp
) == 0) {
2241 int idx
= highbit64(msp
->ms_size
) - 1;
2242 int max_idx
= SPACE_MAP_HISTOGRAM_SIZE
+ shift
- 1;
2244 if (idx
< max_idx
) {
2245 WEIGHT_SET_COUNT(weight
, 1ULL);
2246 WEIGHT_SET_INDEX(weight
, idx
);
2248 WEIGHT_SET_COUNT(weight
, 1ULL << (idx
- max_idx
));
2249 WEIGHT_SET_INDEX(weight
, max_idx
);
2251 WEIGHT_SET_ACTIVE(weight
, 0);
2252 ASSERT(!WEIGHT_IS_SPACEBASED(weight
));
2257 ASSERT3U(msp
->ms_sm
->sm_dbuf
->db_size
, ==, sizeof (space_map_phys_t
));
2260 * If the metaslab is fully allocated then just make the weight 0.
2262 if (metaslab_allocated_space(msp
) == msp
->ms_size
)
2265 * If the metaslab is already loaded, then use the range tree to
2266 * determine the weight. Otherwise, we rely on the space map information
2267 * to generate the weight.
2269 if (msp
->ms_loaded
) {
2270 weight
= metaslab_weight_from_range_tree(msp
);
2272 weight
= metaslab_weight_from_spacemap(msp
);
2276 * If the metaslab was active the last time we calculated its weight
2277 * then keep it active. We want to consume the entire region that
2278 * is associated with this weight.
2280 if (msp
->ms_activation_weight
!= 0 && weight
!= 0)
2281 WEIGHT_SET_ACTIVE(weight
, WEIGHT_GET_ACTIVE(msp
->ms_weight
));
2286 * Determine if we should attempt to allocate from this metaslab. If the
2287 * metaslab has a maximum size then we can quickly determine if the desired
2288 * allocation size can be satisfied. Otherwise, if we're using segment-based
2289 * weighting then we can determine the maximum allocation that this metaslab
2290 * can accommodate based on the index encoded in the weight. If we're using
2291 * space-based weights then rely on the entire weight (excluding the weight
2295 metaslab_should_allocate(metaslab_t
*msp
, uint64_t asize
)
2297 boolean_t should_allocate
;
2299 if (msp
->ms_max_size
!= 0)
2300 return (msp
->ms_max_size
>= asize
);
2302 if (!WEIGHT_IS_SPACEBASED(msp
->ms_weight
)) {
2304 * The metaslab segment weight indicates segments in the
2305 * range [2^i, 2^(i+1)), where i is the index in the weight.
2306 * Since the asize might be in the middle of the range, we
2307 * should attempt the allocation if asize < 2^(i+1).
2309 should_allocate
= (asize
<
2310 1ULL << (WEIGHT_GET_INDEX(msp
->ms_weight
) + 1));
2312 should_allocate
= (asize
<=
2313 (msp
->ms_weight
& ~METASLAB_WEIGHT_TYPE
));
2315 return (should_allocate
);
2318 metaslab_weight(metaslab_t
*msp
)
2320 vdev_t
*vd
= msp
->ms_group
->mg_vd
;
2321 spa_t
*spa
= vd
->vdev_spa
;
2324 ASSERT(MUTEX_HELD(&msp
->ms_lock
));
2327 * If this vdev is in the process of being removed, there is nothing
2328 * for us to do here.
2330 if (vd
->vdev_removing
)
2333 metaslab_set_fragmentation(msp
);
2336 * Update the maximum size if the metaslab is loaded. This will
2337 * ensure that we get an accurate maximum size if newly freed space
2338 * has been added back into the free tree.
2341 msp
->ms_max_size
= metaslab_block_maxsize(msp
);
2343 ASSERT0(msp
->ms_max_size
);
2346 * Segment-based weighting requires space map histogram support.
2348 if (zfs_metaslab_segment_weight_enabled
&&
2349 spa_feature_is_enabled(spa
, SPA_FEATURE_SPACEMAP_HISTOGRAM
) &&
2350 (msp
->ms_sm
== NULL
|| msp
->ms_sm
->sm_dbuf
->db_size
==
2351 sizeof (space_map_phys_t
))) {
2352 weight
= metaslab_segment_weight(msp
);
2354 weight
= metaslab_space_weight(msp
);
2360 metaslab_recalculate_weight_and_sort(metaslab_t
*msp
)
2362 /* note: we preserve the mask (e.g. indication of primary, etc..) */
2363 uint64_t was_active
= msp
->ms_weight
& METASLAB_ACTIVE_MASK
;
2364 metaslab_group_sort(msp
->ms_group
, msp
,
2365 metaslab_weight(msp
) | was_active
);
2369 metaslab_activate_allocator(metaslab_group_t
*mg
, metaslab_t
*msp
,
2370 int allocator
, uint64_t activation_weight
)
2373 * If we're activating for the claim code, we don't want to actually
2374 * set the metaslab up for a specific allocator.
2376 if (activation_weight
== METASLAB_WEIGHT_CLAIM
)
2378 metaslab_t
**arr
= (activation_weight
== METASLAB_WEIGHT_PRIMARY
?
2379 mg
->mg_primaries
: mg
->mg_secondaries
);
2381 ASSERT(MUTEX_HELD(&msp
->ms_lock
));
2382 mutex_enter(&mg
->mg_lock
);
2383 if (arr
[allocator
] != NULL
) {
2384 mutex_exit(&mg
->mg_lock
);
2388 arr
[allocator
] = msp
;
2389 ASSERT3S(msp
->ms_allocator
, ==, -1);
2390 msp
->ms_allocator
= allocator
;
2391 msp
->ms_primary
= (activation_weight
== METASLAB_WEIGHT_PRIMARY
);
2392 mutex_exit(&mg
->mg_lock
);
2398 metaslab_activate(metaslab_t
*msp
, int allocator
, uint64_t activation_weight
)
2400 ASSERT(MUTEX_HELD(&msp
->ms_lock
));
2402 if ((msp
->ms_weight
& METASLAB_ACTIVE_MASK
) == 0) {
2403 int error
= metaslab_load(msp
);
2405 metaslab_group_sort(msp
->ms_group
, msp
, 0);
2408 if ((msp
->ms_weight
& METASLAB_ACTIVE_MASK
) != 0) {
2410 * The metaslab was activated for another allocator
2411 * while we were waiting, we should reselect.
2413 return (SET_ERROR(EBUSY
));
2415 if ((error
= metaslab_activate_allocator(msp
->ms_group
, msp
,
2416 allocator
, activation_weight
)) != 0) {
2420 msp
->ms_activation_weight
= msp
->ms_weight
;
2421 metaslab_group_sort(msp
->ms_group
, msp
,
2422 msp
->ms_weight
| activation_weight
);
2424 ASSERT(msp
->ms_loaded
);
2425 ASSERT(msp
->ms_weight
& METASLAB_ACTIVE_MASK
);
2431 metaslab_passivate_allocator(metaslab_group_t
*mg
, metaslab_t
*msp
,
2434 ASSERT(MUTEX_HELD(&msp
->ms_lock
));
2435 if (msp
->ms_weight
& METASLAB_WEIGHT_CLAIM
) {
2436 metaslab_group_sort(mg
, msp
, weight
);
2440 mutex_enter(&mg
->mg_lock
);
2441 ASSERT3P(msp
->ms_group
, ==, mg
);
2442 if (msp
->ms_primary
) {
2443 ASSERT3U(0, <=, msp
->ms_allocator
);
2444 ASSERT3U(msp
->ms_allocator
, <, mg
->mg_allocators
);
2445 ASSERT3P(mg
->mg_primaries
[msp
->ms_allocator
], ==, msp
);
2446 ASSERT(msp
->ms_weight
& METASLAB_WEIGHT_PRIMARY
);
2447 mg
->mg_primaries
[msp
->ms_allocator
] = NULL
;
2449 ASSERT(msp
->ms_weight
& METASLAB_WEIGHT_SECONDARY
);
2450 ASSERT3P(mg
->mg_secondaries
[msp
->ms_allocator
], ==, msp
);
2451 mg
->mg_secondaries
[msp
->ms_allocator
] = NULL
;
2453 msp
->ms_allocator
= -1;
2454 metaslab_group_sort_impl(mg
, msp
, weight
);
2455 mutex_exit(&mg
->mg_lock
);
2459 metaslab_passivate(metaslab_t
*msp
, uint64_t weight
)
2461 ASSERTV(uint64_t size
= weight
& ~METASLAB_WEIGHT_TYPE
);
2464 * If size < SPA_MINBLOCKSIZE, then we will not allocate from
2465 * this metaslab again. In that case, it had better be empty,
2466 * or we would be leaving space on the table.
2468 ASSERT(!WEIGHT_IS_SPACEBASED(msp
->ms_weight
) ||
2469 size
>= SPA_MINBLOCKSIZE
||
2470 range_tree_space(msp
->ms_allocatable
) == 0);
2471 ASSERT0(weight
& METASLAB_ACTIVE_MASK
);
2473 msp
->ms_activation_weight
= 0;
2474 metaslab_passivate_allocator(msp
->ms_group
, msp
, weight
);
2475 ASSERT((msp
->ms_weight
& METASLAB_ACTIVE_MASK
) == 0);
2479 * Segment-based metaslabs are activated once and remain active until
2480 * we either fail an allocation attempt (similar to space-based metaslabs)
2481 * or have exhausted the free space in zfs_metaslab_switch_threshold
2482 * buckets since the metaslab was activated. This function checks to see
2483 * if we've exhaused the zfs_metaslab_switch_threshold buckets in the
2484 * metaslab and passivates it proactively. This will allow us to select a
2485 * metaslab with a larger contiguous region, if any, remaining within this
2486 * metaslab group. If we're in sync pass > 1, then we continue using this
2487 * metaslab so that we don't dirty more block and cause more sync passes.
2490 metaslab_segment_may_passivate(metaslab_t
*msp
)
2492 spa_t
*spa
= msp
->ms_group
->mg_vd
->vdev_spa
;
2494 if (WEIGHT_IS_SPACEBASED(msp
->ms_weight
) || spa_sync_pass(spa
) > 1)
2498 * Since we are in the middle of a sync pass, the most accurate
2499 * information that is accessible to us is the in-core range tree
2500 * histogram; calculate the new weight based on that information.
2502 uint64_t weight
= metaslab_weight_from_range_tree(msp
);
2503 int activation_idx
= WEIGHT_GET_INDEX(msp
->ms_activation_weight
);
2504 int current_idx
= WEIGHT_GET_INDEX(weight
);
2506 if (current_idx
<= activation_idx
- zfs_metaslab_switch_threshold
)
2507 metaslab_passivate(msp
, weight
);
2511 metaslab_preload(void *arg
)
2513 metaslab_t
*msp
= arg
;
2514 spa_t
*spa
= msp
->ms_group
->mg_vd
->vdev_spa
;
2515 fstrans_cookie_t cookie
= spl_fstrans_mark();
2517 ASSERT(!MUTEX_HELD(&msp
->ms_group
->mg_lock
));
2519 mutex_enter(&msp
->ms_lock
);
2520 (void) metaslab_load(msp
);
2521 msp
->ms_selected_txg
= spa_syncing_txg(spa
);
2522 mutex_exit(&msp
->ms_lock
);
2523 spl_fstrans_unmark(cookie
);
2527 metaslab_group_preload(metaslab_group_t
*mg
)
2529 spa_t
*spa
= mg
->mg_vd
->vdev_spa
;
2531 avl_tree_t
*t
= &mg
->mg_metaslab_tree
;
2534 if (spa_shutting_down(spa
) || !metaslab_preload_enabled
) {
2535 taskq_wait_outstanding(mg
->mg_taskq
, 0);
2539 mutex_enter(&mg
->mg_lock
);
2542 * Load the next potential metaslabs
2544 for (msp
= avl_first(t
); msp
!= NULL
; msp
= AVL_NEXT(t
, msp
)) {
2545 ASSERT3P(msp
->ms_group
, ==, mg
);
2548 * We preload only the maximum number of metaslabs specified
2549 * by metaslab_preload_limit. If a metaslab is being forced
2550 * to condense then we preload it too. This will ensure
2551 * that force condensing happens in the next txg.
2553 if (++m
> metaslab_preload_limit
&& !msp
->ms_condense_wanted
) {
2557 VERIFY(taskq_dispatch(mg
->mg_taskq
, metaslab_preload
,
2558 msp
, TQ_SLEEP
) != TASKQID_INVALID
);
2560 mutex_exit(&mg
->mg_lock
);
2564 * Determine if the space map's on-disk footprint is past our tolerance
2565 * for inefficiency. We would like to use the following criteria to make
2568 * 1. The size of the space map object should not dramatically increase as a
2569 * result of writing out the free space range tree.
2571 * 2. The minimal on-disk space map representation is zfs_condense_pct/100
2572 * times the size than the free space range tree representation
2573 * (i.e. zfs_condense_pct = 110 and in-core = 1MB, minimal = 1.1MB).
2575 * 3. The on-disk size of the space map should actually decrease.
2577 * Unfortunately, we cannot compute the on-disk size of the space map in this
2578 * context because we cannot accurately compute the effects of compression, etc.
2579 * Instead, we apply the heuristic described in the block comment for
2580 * zfs_metaslab_condense_block_threshold - we only condense if the space used
2581 * is greater than a threshold number of blocks.
2584 metaslab_should_condense(metaslab_t
*msp
)
2586 space_map_t
*sm
= msp
->ms_sm
;
2587 vdev_t
*vd
= msp
->ms_group
->mg_vd
;
2588 uint64_t vdev_blocksize
= 1 << vd
->vdev_ashift
;
2589 uint64_t current_txg
= spa_syncing_txg(vd
->vdev_spa
);
2591 ASSERT(MUTEX_HELD(&msp
->ms_lock
));
2592 ASSERT(msp
->ms_loaded
);
2595 * Allocations and frees in early passes are generally more space
2596 * efficient (in terms of blocks described in space map entries)
2597 * than the ones in later passes (e.g. we don't compress after
2598 * sync pass 5) and condensing a metaslab multiple times in a txg
2599 * could degrade performance.
2601 * Thus we prefer condensing each metaslab at most once every txg at
2602 * the earliest sync pass possible. If a metaslab is eligible for
2603 * condensing again after being considered for condensing within the
2604 * same txg, it will hopefully be dirty in the next txg where it will
2605 * be condensed at an earlier pass.
2607 if (msp
->ms_condense_checked_txg
== current_txg
)
2609 msp
->ms_condense_checked_txg
= current_txg
;
2612 * We always condense metaslabs that are empty and metaslabs for
2613 * which a condense request has been made.
2615 if (avl_is_empty(&msp
->ms_allocatable_by_size
) ||
2616 msp
->ms_condense_wanted
)
2619 uint64_t object_size
= space_map_length(msp
->ms_sm
);
2620 uint64_t optimal_size
= space_map_estimate_optimal_size(sm
,
2621 msp
->ms_allocatable
, SM_NO_VDEVID
);
2623 dmu_object_info_t doi
;
2624 dmu_object_info_from_db(sm
->sm_dbuf
, &doi
);
2625 uint64_t record_size
= MAX(doi
.doi_data_block_size
, vdev_blocksize
);
2627 return (object_size
>= (optimal_size
* zfs_condense_pct
/ 100) &&
2628 object_size
> zfs_metaslab_condense_block_threshold
* record_size
);
2632 * Condense the on-disk space map representation to its minimized form.
2633 * The minimized form consists of a small number of allocations followed by
2634 * the entries of the free range tree.
2637 metaslab_condense(metaslab_t
*msp
, uint64_t txg
, dmu_tx_t
*tx
)
2639 range_tree_t
*condense_tree
;
2640 space_map_t
*sm
= msp
->ms_sm
;
2642 ASSERT(MUTEX_HELD(&msp
->ms_lock
));
2643 ASSERT(msp
->ms_loaded
);
2646 zfs_dbgmsg("condensing: txg %llu, msp[%llu] %px, vdev id %llu, "
2647 "spa %s, smp size %llu, segments %lu, forcing condense=%s", txg
,
2648 msp
->ms_id
, msp
, msp
->ms_group
->mg_vd
->vdev_id
,
2649 msp
->ms_group
->mg_vd
->vdev_spa
->spa_name
,
2650 space_map_length(msp
->ms_sm
),
2651 avl_numnodes(&msp
->ms_allocatable
->rt_root
),
2652 msp
->ms_condense_wanted
? "TRUE" : "FALSE");
2654 msp
->ms_condense_wanted
= B_FALSE
;
2657 * Create an range tree that is 100% allocated. We remove segments
2658 * that have been freed in this txg, any deferred frees that exist,
2659 * and any allocation in the future. Removing segments should be
2660 * a relatively inexpensive operation since we expect these trees to
2661 * have a small number of nodes.
2663 condense_tree
= range_tree_create(NULL
, NULL
);
2664 range_tree_add(condense_tree
, msp
->ms_start
, msp
->ms_size
);
2666 range_tree_walk(msp
->ms_freeing
, range_tree_remove
, condense_tree
);
2667 range_tree_walk(msp
->ms_freed
, range_tree_remove
, condense_tree
);
2669 for (int t
= 0; t
< TXG_DEFER_SIZE
; t
++) {
2670 range_tree_walk(msp
->ms_defer
[t
],
2671 range_tree_remove
, condense_tree
);
2674 for (int t
= 1; t
< TXG_CONCURRENT_STATES
; t
++) {
2675 range_tree_walk(msp
->ms_allocating
[(txg
+ t
) & TXG_MASK
],
2676 range_tree_remove
, condense_tree
);
2680 * We're about to drop the metaslab's lock thus allowing
2681 * other consumers to change it's content. Set the
2682 * metaslab's ms_condensing flag to ensure that
2683 * allocations on this metaslab do not occur while we're
2684 * in the middle of committing it to disk. This is only critical
2685 * for ms_allocatable as all other range trees use per txg
2686 * views of their content.
2688 msp
->ms_condensing
= B_TRUE
;
2690 mutex_exit(&msp
->ms_lock
);
2691 space_map_truncate(sm
, zfs_metaslab_sm_blksz
, tx
);
2694 * While we would ideally like to create a space map representation
2695 * that consists only of allocation records, doing so can be
2696 * prohibitively expensive because the in-core free tree can be
2697 * large, and therefore computationally expensive to subtract
2698 * from the condense_tree. Instead we sync out two trees, a cheap
2699 * allocation only tree followed by the in-core free tree. While not
2700 * optimal, this is typically close to optimal, and much cheaper to
2703 space_map_write(sm
, condense_tree
, SM_ALLOC
, SM_NO_VDEVID
, tx
);
2704 range_tree_vacate(condense_tree
, NULL
, NULL
);
2705 range_tree_destroy(condense_tree
);
2707 space_map_write(sm
, msp
->ms_allocatable
, SM_FREE
, SM_NO_VDEVID
, tx
);
2708 mutex_enter(&msp
->ms_lock
);
2709 msp
->ms_condensing
= B_FALSE
;
2713 * Write a metaslab to disk in the context of the specified transaction group.
2716 metaslab_sync(metaslab_t
*msp
, uint64_t txg
)
2718 metaslab_group_t
*mg
= msp
->ms_group
;
2719 vdev_t
*vd
= mg
->mg_vd
;
2720 spa_t
*spa
= vd
->vdev_spa
;
2721 objset_t
*mos
= spa_meta_objset(spa
);
2722 range_tree_t
*alloctree
= msp
->ms_allocating
[txg
& TXG_MASK
];
2724 uint64_t object
= space_map_object(msp
->ms_sm
);
2726 ASSERT(!vd
->vdev_ishole
);
2729 * This metaslab has just been added so there's no work to do now.
2731 if (msp
->ms_freeing
== NULL
) {
2732 ASSERT3P(alloctree
, ==, NULL
);
2736 ASSERT3P(alloctree
, !=, NULL
);
2737 ASSERT3P(msp
->ms_freeing
, !=, NULL
);
2738 ASSERT3P(msp
->ms_freed
, !=, NULL
);
2739 ASSERT3P(msp
->ms_checkpointing
, !=, NULL
);
2740 ASSERT3P(msp
->ms_trim
, !=, NULL
);
2743 * Normally, we don't want to process a metaslab if there are no
2744 * allocations or frees to perform. However, if the metaslab is being
2745 * forced to condense and it's loaded, we need to let it through.
2747 if (range_tree_is_empty(alloctree
) &&
2748 range_tree_is_empty(msp
->ms_freeing
) &&
2749 range_tree_is_empty(msp
->ms_checkpointing
) &&
2750 !(msp
->ms_loaded
&& msp
->ms_condense_wanted
))
2754 VERIFY(txg
<= spa_final_dirty_txg(spa
));
2757 * The only state that can actually be changing concurrently
2758 * with metaslab_sync() is the metaslab's ms_allocatable. No
2759 * other thread can be modifying this txg's alloc, freeing,
2760 * freed, or space_map_phys_t. We drop ms_lock whenever we
2761 * could call into the DMU, because the DMU can call down to
2762 * us (e.g. via zio_free()) at any time.
2764 * The spa_vdev_remove_thread() can be reading metaslab state
2765 * concurrently, and it is locked out by the ms_sync_lock.
2766 * Note that the ms_lock is insufficient for this, because it
2767 * is dropped by space_map_write().
2769 tx
= dmu_tx_create_assigned(spa_get_dsl(spa
), txg
);
2771 if (msp
->ms_sm
== NULL
) {
2772 uint64_t new_object
;
2774 new_object
= space_map_alloc(mos
, zfs_metaslab_sm_blksz
, tx
);
2775 VERIFY3U(new_object
, !=, 0);
2777 VERIFY0(space_map_open(&msp
->ms_sm
, mos
, new_object
,
2778 msp
->ms_start
, msp
->ms_size
, vd
->vdev_ashift
));
2780 ASSERT(msp
->ms_sm
!= NULL
);
2781 ASSERT0(metaslab_allocated_space(msp
));
2784 if (!range_tree_is_empty(msp
->ms_checkpointing
) &&
2785 vd
->vdev_checkpoint_sm
== NULL
) {
2786 ASSERT(spa_has_checkpoint(spa
));
2788 uint64_t new_object
= space_map_alloc(mos
,
2789 vdev_standard_sm_blksz
, tx
);
2790 VERIFY3U(new_object
, !=, 0);
2792 VERIFY0(space_map_open(&vd
->vdev_checkpoint_sm
,
2793 mos
, new_object
, 0, vd
->vdev_asize
, vd
->vdev_ashift
));
2794 ASSERT3P(vd
->vdev_checkpoint_sm
, !=, NULL
);
2797 * We save the space map object as an entry in vdev_top_zap
2798 * so it can be retrieved when the pool is reopened after an
2799 * export or through zdb.
2801 VERIFY0(zap_add(vd
->vdev_spa
->spa_meta_objset
,
2802 vd
->vdev_top_zap
, VDEV_TOP_ZAP_POOL_CHECKPOINT_SM
,
2803 sizeof (new_object
), 1, &new_object
, tx
));
2806 mutex_enter(&msp
->ms_sync_lock
);
2807 mutex_enter(&msp
->ms_lock
);
2810 * Note: metaslab_condense() clears the space map's histogram.
2811 * Therefore we must verify and remove this histogram before
2814 metaslab_group_histogram_verify(mg
);
2815 metaslab_class_histogram_verify(mg
->mg_class
);
2816 metaslab_group_histogram_remove(mg
, msp
);
2818 if (msp
->ms_loaded
&& metaslab_should_condense(msp
)) {
2819 metaslab_condense(msp
, txg
, tx
);
2821 mutex_exit(&msp
->ms_lock
);
2822 space_map_write(msp
->ms_sm
, alloctree
, SM_ALLOC
,
2824 space_map_write(msp
->ms_sm
, msp
->ms_freeing
, SM_FREE
,
2826 mutex_enter(&msp
->ms_lock
);
2829 msp
->ms_allocated_space
+= range_tree_space(alloctree
);
2830 ASSERT3U(msp
->ms_allocated_space
, >=,
2831 range_tree_space(msp
->ms_freeing
));
2832 msp
->ms_allocated_space
-= range_tree_space(msp
->ms_freeing
);
2834 if (!range_tree_is_empty(msp
->ms_checkpointing
)) {
2835 ASSERT(spa_has_checkpoint(spa
));
2836 ASSERT3P(vd
->vdev_checkpoint_sm
, !=, NULL
);
2839 * Since we are doing writes to disk and the ms_checkpointing
2840 * tree won't be changing during that time, we drop the
2841 * ms_lock while writing to the checkpoint space map.
2843 mutex_exit(&msp
->ms_lock
);
2844 space_map_write(vd
->vdev_checkpoint_sm
,
2845 msp
->ms_checkpointing
, SM_FREE
, SM_NO_VDEVID
, tx
);
2846 mutex_enter(&msp
->ms_lock
);
2848 spa
->spa_checkpoint_info
.sci_dspace
+=
2849 range_tree_space(msp
->ms_checkpointing
);
2850 vd
->vdev_stat
.vs_checkpoint_space
+=
2851 range_tree_space(msp
->ms_checkpointing
);
2852 ASSERT3U(vd
->vdev_stat
.vs_checkpoint_space
, ==,
2853 -space_map_allocated(vd
->vdev_checkpoint_sm
));
2855 range_tree_vacate(msp
->ms_checkpointing
, NULL
, NULL
);
2858 if (msp
->ms_loaded
) {
2860 * When the space map is loaded, we have an accurate
2861 * histogram in the range tree. This gives us an opportunity
2862 * to bring the space map's histogram up-to-date so we clear
2863 * it first before updating it.
2865 space_map_histogram_clear(msp
->ms_sm
);
2866 space_map_histogram_add(msp
->ms_sm
, msp
->ms_allocatable
, tx
);
2869 * Since we've cleared the histogram we need to add back
2870 * any free space that has already been processed, plus
2871 * any deferred space. This allows the on-disk histogram
2872 * to accurately reflect all free space even if some space
2873 * is not yet available for allocation (i.e. deferred).
2875 space_map_histogram_add(msp
->ms_sm
, msp
->ms_freed
, tx
);
2878 * Add back any deferred free space that has not been
2879 * added back into the in-core free tree yet. This will
2880 * ensure that we don't end up with a space map histogram
2881 * that is completely empty unless the metaslab is fully
2884 for (int t
= 0; t
< TXG_DEFER_SIZE
; t
++) {
2885 space_map_histogram_add(msp
->ms_sm
,
2886 msp
->ms_defer
[t
], tx
);
2891 * Always add the free space from this sync pass to the space
2892 * map histogram. We want to make sure that the on-disk histogram
2893 * accounts for all free space. If the space map is not loaded,
2894 * then we will lose some accuracy but will correct it the next
2895 * time we load the space map.
2897 space_map_histogram_add(msp
->ms_sm
, msp
->ms_freeing
, tx
);
2898 metaslab_aux_histograms_update(msp
);
2900 metaslab_group_histogram_add(mg
, msp
);
2901 metaslab_group_histogram_verify(mg
);
2902 metaslab_class_histogram_verify(mg
->mg_class
);
2905 * For sync pass 1, we avoid traversing this txg's free range tree
2906 * and instead will just swap the pointers for freeing and freed.
2907 * We can safely do this since the freed_tree is guaranteed to be
2908 * empty on the initial pass.
2910 if (spa_sync_pass(spa
) == 1) {
2911 range_tree_swap(&msp
->ms_freeing
, &msp
->ms_freed
);
2912 ASSERT0(msp
->ms_allocated_this_txg
);
2914 range_tree_vacate(msp
->ms_freeing
,
2915 range_tree_add
, msp
->ms_freed
);
2917 msp
->ms_allocated_this_txg
+= range_tree_space(alloctree
);
2918 range_tree_vacate(alloctree
, NULL
, NULL
);
2920 ASSERT0(range_tree_space(msp
->ms_allocating
[txg
& TXG_MASK
]));
2921 ASSERT0(range_tree_space(msp
->ms_allocating
[TXG_CLEAN(txg
)
2923 ASSERT0(range_tree_space(msp
->ms_freeing
));
2924 ASSERT0(range_tree_space(msp
->ms_checkpointing
));
2926 mutex_exit(&msp
->ms_lock
);
2928 if (object
!= space_map_object(msp
->ms_sm
)) {
2929 object
= space_map_object(msp
->ms_sm
);
2930 dmu_write(mos
, vd
->vdev_ms_array
, sizeof (uint64_t) *
2931 msp
->ms_id
, sizeof (uint64_t), &object
, tx
);
2933 mutex_exit(&msp
->ms_sync_lock
);
2938 * Called after a transaction group has completely synced to mark
2939 * all of the metaslab's free space as usable.
2942 metaslab_sync_done(metaslab_t
*msp
, uint64_t txg
)
2944 metaslab_group_t
*mg
= msp
->ms_group
;
2945 vdev_t
*vd
= mg
->mg_vd
;
2946 spa_t
*spa
= vd
->vdev_spa
;
2947 range_tree_t
**defer_tree
;
2948 int64_t alloc_delta
, defer_delta
;
2949 boolean_t defer_allowed
= B_TRUE
;
2951 ASSERT(!vd
->vdev_ishole
);
2953 mutex_enter(&msp
->ms_lock
);
2956 * If this metaslab is just becoming available, initialize its
2957 * range trees and add its capacity to the vdev.
2959 if (msp
->ms_freed
== NULL
) {
2960 for (int t
= 0; t
< TXG_SIZE
; t
++) {
2961 ASSERT(msp
->ms_allocating
[t
] == NULL
);
2963 msp
->ms_allocating
[t
] = range_tree_create(NULL
, NULL
);
2966 ASSERT3P(msp
->ms_freeing
, ==, NULL
);
2967 msp
->ms_freeing
= range_tree_create(NULL
, NULL
);
2969 ASSERT3P(msp
->ms_freed
, ==, NULL
);
2970 msp
->ms_freed
= range_tree_create(NULL
, NULL
);
2972 for (int t
= 0; t
< TXG_DEFER_SIZE
; t
++) {
2973 ASSERT(msp
->ms_defer
[t
] == NULL
);
2975 msp
->ms_defer
[t
] = range_tree_create(NULL
, NULL
);
2978 ASSERT3P(msp
->ms_checkpointing
, ==, NULL
);
2979 msp
->ms_checkpointing
= range_tree_create(NULL
, NULL
);
2981 metaslab_space_update(vd
, mg
->mg_class
, 0, 0, msp
->ms_size
);
2983 ASSERT0(range_tree_space(msp
->ms_freeing
));
2984 ASSERT0(range_tree_space(msp
->ms_checkpointing
));
2986 defer_tree
= &msp
->ms_defer
[txg
% TXG_DEFER_SIZE
];
2988 uint64_t free_space
= metaslab_class_get_space(spa_normal_class(spa
)) -
2989 metaslab_class_get_alloc(spa_normal_class(spa
));
2990 if (free_space
<= spa_get_slop_space(spa
) || vd
->vdev_removing
) {
2991 defer_allowed
= B_FALSE
;
2995 alloc_delta
= msp
->ms_allocated_this_txg
-
2996 range_tree_space(msp
->ms_freed
);
2997 if (defer_allowed
) {
2998 defer_delta
= range_tree_space(msp
->ms_freed
) -
2999 range_tree_space(*defer_tree
);
3001 defer_delta
-= range_tree_space(*defer_tree
);
3004 metaslab_space_update(vd
, mg
->mg_class
, alloc_delta
+ defer_delta
,
3008 * If there's a metaslab_load() in progress, wait for it to complete
3009 * so that we have a consistent view of the in-core space map.
3011 metaslab_load_wait(msp
);
3014 * When auto-trimming is enabled, free ranges which are added to
3015 * ms_allocatable are also be added to ms_trim. The ms_trim tree is
3016 * periodically consumed by the vdev_autotrim_thread() which issues
3017 * trims for all ranges and then vacates the tree. The ms_trim tree
3018 * can be discarded at any time with the sole consequence of recent
3019 * frees not being trimmed.
3021 if (spa_get_autotrim(spa
) == SPA_AUTOTRIM_ON
) {
3022 range_tree_walk(*defer_tree
, range_tree_add
, msp
->ms_trim
);
3023 if (!defer_allowed
) {
3024 range_tree_walk(msp
->ms_freed
, range_tree_add
,
3028 range_tree_vacate(msp
->ms_trim
, NULL
, NULL
);
3032 * Move the frees from the defer_tree back to the free
3033 * range tree (if it's loaded). Swap the freed_tree and
3034 * the defer_tree -- this is safe to do because we've
3035 * just emptied out the defer_tree.
3037 range_tree_vacate(*defer_tree
,
3038 msp
->ms_loaded
? range_tree_add
: NULL
, msp
->ms_allocatable
);
3039 if (defer_allowed
) {
3040 range_tree_swap(&msp
->ms_freed
, defer_tree
);
3042 range_tree_vacate(msp
->ms_freed
,
3043 msp
->ms_loaded
? range_tree_add
: NULL
,
3044 msp
->ms_allocatable
);
3047 msp
->ms_synced_length
= space_map_length(msp
->ms_sm
);
3049 msp
->ms_deferspace
+= defer_delta
;
3050 ASSERT3S(msp
->ms_deferspace
, >=, 0);
3051 ASSERT3S(msp
->ms_deferspace
, <=, msp
->ms_size
);
3052 if (msp
->ms_deferspace
!= 0) {
3054 * Keep syncing this metaslab until all deferred frees
3055 * are back in circulation.
3057 vdev_dirty(vd
, VDD_METASLAB
, msp
, txg
+ 1);
3059 metaslab_aux_histograms_update_done(msp
, defer_allowed
);
3062 msp
->ms_new
= B_FALSE
;
3063 mutex_enter(&mg
->mg_lock
);
3065 mutex_exit(&mg
->mg_lock
);
3069 * Re-sort metaslab within its group now that we've adjusted
3070 * its allocatable space.
3072 metaslab_recalculate_weight_and_sort(msp
);
3075 * If the metaslab is loaded and we've not tried to load or allocate
3076 * from it in 'metaslab_unload_delay' txgs, then unload it.
3078 if (msp
->ms_loaded
&&
3079 msp
->ms_disabled
== 0 &&
3080 msp
->ms_selected_txg
+ metaslab_unload_delay
< txg
) {
3082 for (int t
= 1; t
< TXG_CONCURRENT_STATES
; t
++) {
3083 VERIFY0(range_tree_space(
3084 msp
->ms_allocating
[(txg
+ t
) & TXG_MASK
]));
3086 if (msp
->ms_allocator
!= -1) {
3087 metaslab_passivate(msp
, msp
->ms_weight
&
3088 ~METASLAB_ACTIVE_MASK
);
3091 if (!metaslab_debug_unload
)
3092 metaslab_unload(msp
);
3095 ASSERT0(range_tree_space(msp
->ms_allocating
[txg
& TXG_MASK
]));
3096 ASSERT0(range_tree_space(msp
->ms_freeing
));
3097 ASSERT0(range_tree_space(msp
->ms_freed
));
3098 ASSERT0(range_tree_space(msp
->ms_checkpointing
));
3100 msp
->ms_allocated_this_txg
= 0;
3101 mutex_exit(&msp
->ms_lock
);
3105 metaslab_sync_reassess(metaslab_group_t
*mg
)
3107 spa_t
*spa
= mg
->mg_class
->mc_spa
;
3109 spa_config_enter(spa
, SCL_ALLOC
, FTAG
, RW_READER
);
3110 metaslab_group_alloc_update(mg
);
3111 mg
->mg_fragmentation
= metaslab_group_fragmentation(mg
);
3114 * Preload the next potential metaslabs but only on active
3115 * metaslab groups. We can get into a state where the metaslab
3116 * is no longer active since we dirty metaslabs as we remove a
3117 * a device, thus potentially making the metaslab group eligible
3120 if (mg
->mg_activation_count
> 0) {
3121 metaslab_group_preload(mg
);
3123 spa_config_exit(spa
, SCL_ALLOC
, FTAG
);
3127 * When writing a ditto block (i.e. more than one DVA for a given BP) on
3128 * the same vdev as an existing DVA of this BP, then try to allocate it
3129 * on a different metaslab than existing DVAs (i.e. a unique metaslab).
3132 metaslab_is_unique(metaslab_t
*msp
, dva_t
*dva
)
3136 if (DVA_GET_ASIZE(dva
) == 0)
3139 if (msp
->ms_group
->mg_vd
->vdev_id
!= DVA_GET_VDEV(dva
))
3142 dva_ms_id
= DVA_GET_OFFSET(dva
) >> msp
->ms_group
->mg_vd
->vdev_ms_shift
;
3144 return (msp
->ms_id
!= dva_ms_id
);
3148 * ==========================================================================
3149 * Metaslab allocation tracing facility
3150 * ==========================================================================
3152 #ifdef _METASLAB_TRACING
3153 kstat_t
*metaslab_trace_ksp
;
3154 kstat_named_t metaslab_trace_over_limit
;
3157 metaslab_alloc_trace_init(void)
3159 ASSERT(metaslab_alloc_trace_cache
== NULL
);
3160 metaslab_alloc_trace_cache
= kmem_cache_create(
3161 "metaslab_alloc_trace_cache", sizeof (metaslab_alloc_trace_t
),
3162 0, NULL
, NULL
, NULL
, NULL
, NULL
, 0);
3163 metaslab_trace_ksp
= kstat_create("zfs", 0, "metaslab_trace_stats",
3164 "misc", KSTAT_TYPE_NAMED
, 1, KSTAT_FLAG_VIRTUAL
);
3165 if (metaslab_trace_ksp
!= NULL
) {
3166 metaslab_trace_ksp
->ks_data
= &metaslab_trace_over_limit
;
3167 kstat_named_init(&metaslab_trace_over_limit
,
3168 "metaslab_trace_over_limit", KSTAT_DATA_UINT64
);
3169 kstat_install(metaslab_trace_ksp
);
3174 metaslab_alloc_trace_fini(void)
3176 if (metaslab_trace_ksp
!= NULL
) {
3177 kstat_delete(metaslab_trace_ksp
);
3178 metaslab_trace_ksp
= NULL
;
3180 kmem_cache_destroy(metaslab_alloc_trace_cache
);
3181 metaslab_alloc_trace_cache
= NULL
;
3185 * Add an allocation trace element to the allocation tracing list.
3188 metaslab_trace_add(zio_alloc_list_t
*zal
, metaslab_group_t
*mg
,
3189 metaslab_t
*msp
, uint64_t psize
, uint32_t dva_id
, uint64_t offset
,
3192 metaslab_alloc_trace_t
*mat
;
3194 if (!metaslab_trace_enabled
)
3198 * When the tracing list reaches its maximum we remove
3199 * the second element in the list before adding a new one.
3200 * By removing the second element we preserve the original
3201 * entry as a clue to what allocations steps have already been
3204 if (zal
->zal_size
== metaslab_trace_max_entries
) {
3205 metaslab_alloc_trace_t
*mat_next
;
3207 panic("too many entries in allocation list");
3209 atomic_inc_64(&metaslab_trace_over_limit
.value
.ui64
);
3211 mat_next
= list_next(&zal
->zal_list
, list_head(&zal
->zal_list
));
3212 list_remove(&zal
->zal_list
, mat_next
);
3213 kmem_cache_free(metaslab_alloc_trace_cache
, mat_next
);
3216 mat
= kmem_cache_alloc(metaslab_alloc_trace_cache
, KM_SLEEP
);
3217 list_link_init(&mat
->mat_list_node
);
3220 mat
->mat_size
= psize
;
3221 mat
->mat_dva_id
= dva_id
;
3222 mat
->mat_offset
= offset
;
3223 mat
->mat_weight
= 0;
3224 mat
->mat_allocator
= allocator
;
3227 mat
->mat_weight
= msp
->ms_weight
;
3230 * The list is part of the zio so locking is not required. Only
3231 * a single thread will perform allocations for a given zio.
3233 list_insert_tail(&zal
->zal_list
, mat
);
3236 ASSERT3U(zal
->zal_size
, <=, metaslab_trace_max_entries
);
3240 metaslab_trace_init(zio_alloc_list_t
*zal
)
3242 list_create(&zal
->zal_list
, sizeof (metaslab_alloc_trace_t
),
3243 offsetof(metaslab_alloc_trace_t
, mat_list_node
));
3248 metaslab_trace_fini(zio_alloc_list_t
*zal
)
3250 metaslab_alloc_trace_t
*mat
;
3252 while ((mat
= list_remove_head(&zal
->zal_list
)) != NULL
)
3253 kmem_cache_free(metaslab_alloc_trace_cache
, mat
);
3254 list_destroy(&zal
->zal_list
);
3259 #define metaslab_trace_add(zal, mg, msp, psize, id, off, alloc)
3262 metaslab_alloc_trace_init(void)
3267 metaslab_alloc_trace_fini(void)
3272 metaslab_trace_init(zio_alloc_list_t
*zal
)
3277 metaslab_trace_fini(zio_alloc_list_t
*zal
)
3281 #endif /* _METASLAB_TRACING */
3284 * ==========================================================================
3285 * Metaslab block operations
3286 * ==========================================================================
3290 metaslab_group_alloc_increment(spa_t
*spa
, uint64_t vdev
, void *tag
, int flags
,
3293 if (!(flags
& METASLAB_ASYNC_ALLOC
) ||
3294 (flags
& METASLAB_DONT_THROTTLE
))
3297 metaslab_group_t
*mg
= vdev_lookup_top(spa
, vdev
)->vdev_mg
;
3298 if (!mg
->mg_class
->mc_alloc_throttle_enabled
)
3301 (void) zfs_refcount_add(&mg
->mg_alloc_queue_depth
[allocator
], tag
);
3305 metaslab_group_increment_qdepth(metaslab_group_t
*mg
, int allocator
)
3307 uint64_t max
= mg
->mg_max_alloc_queue_depth
;
3308 uint64_t cur
= mg
->mg_cur_max_alloc_queue_depth
[allocator
];
3310 if (atomic_cas_64(&mg
->mg_cur_max_alloc_queue_depth
[allocator
],
3311 cur
, cur
+ 1) == cur
) {
3313 &mg
->mg_class
->mc_alloc_max_slots
[allocator
]);
3316 cur
= mg
->mg_cur_max_alloc_queue_depth
[allocator
];
3321 metaslab_group_alloc_decrement(spa_t
*spa
, uint64_t vdev
, void *tag
, int flags
,
3322 int allocator
, boolean_t io_complete
)
3324 if (!(flags
& METASLAB_ASYNC_ALLOC
) ||
3325 (flags
& METASLAB_DONT_THROTTLE
))
3328 metaslab_group_t
*mg
= vdev_lookup_top(spa
, vdev
)->vdev_mg
;
3329 if (!mg
->mg_class
->mc_alloc_throttle_enabled
)
3332 (void) zfs_refcount_remove(&mg
->mg_alloc_queue_depth
[allocator
], tag
);
3334 metaslab_group_increment_qdepth(mg
, allocator
);
3338 metaslab_group_alloc_verify(spa_t
*spa
, const blkptr_t
*bp
, void *tag
,
3342 const dva_t
*dva
= bp
->blk_dva
;
3343 int ndvas
= BP_GET_NDVAS(bp
);
3345 for (int d
= 0; d
< ndvas
; d
++) {
3346 uint64_t vdev
= DVA_GET_VDEV(&dva
[d
]);
3347 metaslab_group_t
*mg
= vdev_lookup_top(spa
, vdev
)->vdev_mg
;
3348 VERIFY(zfs_refcount_not_held(
3349 &mg
->mg_alloc_queue_depth
[allocator
], tag
));
3355 metaslab_block_alloc(metaslab_t
*msp
, uint64_t size
, uint64_t txg
)
3358 range_tree_t
*rt
= msp
->ms_allocatable
;
3359 metaslab_class_t
*mc
= msp
->ms_group
->mg_class
;
3361 VERIFY(!msp
->ms_condensing
);
3362 VERIFY0(msp
->ms_disabled
);
3364 start
= mc
->mc_ops
->msop_alloc(msp
, size
);
3365 if (start
!= -1ULL) {
3366 metaslab_group_t
*mg
= msp
->ms_group
;
3367 vdev_t
*vd
= mg
->mg_vd
;
3369 VERIFY0(P2PHASE(start
, 1ULL << vd
->vdev_ashift
));
3370 VERIFY0(P2PHASE(size
, 1ULL << vd
->vdev_ashift
));
3371 VERIFY3U(range_tree_space(rt
) - size
, <=, msp
->ms_size
);
3372 range_tree_remove(rt
, start
, size
);
3373 range_tree_clear(msp
->ms_trim
, start
, size
);
3375 if (range_tree_is_empty(msp
->ms_allocating
[txg
& TXG_MASK
]))
3376 vdev_dirty(mg
->mg_vd
, VDD_METASLAB
, msp
, txg
);
3378 range_tree_add(msp
->ms_allocating
[txg
& TXG_MASK
], start
, size
);
3380 /* Track the last successful allocation */
3381 msp
->ms_alloc_txg
= txg
;
3382 metaslab_verify_space(msp
, txg
);
3386 * Now that we've attempted the allocation we need to update the
3387 * metaslab's maximum block size since it may have changed.
3389 msp
->ms_max_size
= metaslab_block_maxsize(msp
);
3394 * Find the metaslab with the highest weight that is less than what we've
3395 * already tried. In the common case, this means that we will examine each
3396 * metaslab at most once. Note that concurrent callers could reorder metaslabs
3397 * by activation/passivation once we have dropped the mg_lock. If a metaslab is
3398 * activated by another thread, and we fail to allocate from the metaslab we
3399 * have selected, we may not try the newly-activated metaslab, and instead
3400 * activate another metaslab. This is not optimal, but generally does not cause
3401 * any problems (a possible exception being if every metaslab is completely full
3402 * except for the the newly-activated metaslab which we fail to examine).
3405 find_valid_metaslab(metaslab_group_t
*mg
, uint64_t activation_weight
,
3406 dva_t
*dva
, int d
, boolean_t want_unique
, uint64_t asize
, int allocator
,
3407 zio_alloc_list_t
*zal
, metaslab_t
*search
, boolean_t
*was_active
)
3410 avl_tree_t
*t
= &mg
->mg_metaslab_tree
;
3411 metaslab_t
*msp
= avl_find(t
, search
, &idx
);
3413 msp
= avl_nearest(t
, idx
, AVL_AFTER
);
3415 for (; msp
!= NULL
; msp
= AVL_NEXT(t
, msp
)) {
3417 if (!metaslab_should_allocate(msp
, asize
)) {
3418 metaslab_trace_add(zal
, mg
, msp
, asize
, d
,
3419 TRACE_TOO_SMALL
, allocator
);
3424 * If the selected metaslab is condensing or disabled,
3427 if (msp
->ms_condensing
|| msp
->ms_disabled
> 0)
3430 *was_active
= msp
->ms_allocator
!= -1;
3432 * If we're activating as primary, this is our first allocation
3433 * from this disk, so we don't need to check how close we are.
3434 * If the metaslab under consideration was already active,
3435 * we're getting desperate enough to steal another allocator's
3436 * metaslab, so we still don't care about distances.
3438 if (activation_weight
== METASLAB_WEIGHT_PRIMARY
|| *was_active
)
3441 for (i
= 0; i
< d
; i
++) {
3443 !metaslab_is_unique(msp
, &dva
[i
]))
3444 break; /* try another metaslab */
3451 search
->ms_weight
= msp
->ms_weight
;
3452 search
->ms_start
= msp
->ms_start
+ 1;
3453 search
->ms_allocator
= msp
->ms_allocator
;
3454 search
->ms_primary
= msp
->ms_primary
;
3461 metaslab_group_alloc_normal(metaslab_group_t
*mg
, zio_alloc_list_t
*zal
,
3462 uint64_t asize
, uint64_t txg
, boolean_t want_unique
, dva_t
*dva
,
3463 int d
, int allocator
)
3465 metaslab_t
*msp
= NULL
;
3466 uint64_t offset
= -1ULL;
3467 uint64_t activation_weight
;
3469 activation_weight
= METASLAB_WEIGHT_PRIMARY
;
3470 for (int i
= 0; i
< d
; i
++) {
3471 if (activation_weight
== METASLAB_WEIGHT_PRIMARY
&&
3472 DVA_GET_VDEV(&dva
[i
]) == mg
->mg_vd
->vdev_id
) {
3473 activation_weight
= METASLAB_WEIGHT_SECONDARY
;
3474 } else if (activation_weight
== METASLAB_WEIGHT_SECONDARY
&&
3475 DVA_GET_VDEV(&dva
[i
]) == mg
->mg_vd
->vdev_id
) {
3476 activation_weight
= METASLAB_WEIGHT_CLAIM
;
3482 * If we don't have enough metaslabs active to fill the entire array, we
3483 * just use the 0th slot.
3485 if (mg
->mg_ms_ready
< mg
->mg_allocators
* 3)
3488 ASSERT3U(mg
->mg_vd
->vdev_ms_count
, >=, 2);
3490 metaslab_t
*search
= kmem_alloc(sizeof (*search
), KM_SLEEP
);
3491 search
->ms_weight
= UINT64_MAX
;
3492 search
->ms_start
= 0;
3494 * At the end of the metaslab tree are the already-active metaslabs,
3495 * first the primaries, then the secondaries. When we resume searching
3496 * through the tree, we need to consider ms_allocator and ms_primary so
3497 * we start in the location right after where we left off, and don't
3498 * accidentally loop forever considering the same metaslabs.
3500 search
->ms_allocator
= -1;
3501 search
->ms_primary
= B_TRUE
;
3503 boolean_t was_active
= B_FALSE
;
3505 mutex_enter(&mg
->mg_lock
);
3507 if (activation_weight
== METASLAB_WEIGHT_PRIMARY
&&
3508 mg
->mg_primaries
[allocator
] != NULL
) {
3509 msp
= mg
->mg_primaries
[allocator
];
3510 was_active
= B_TRUE
;
3511 } else if (activation_weight
== METASLAB_WEIGHT_SECONDARY
&&
3512 mg
->mg_secondaries
[allocator
] != NULL
) {
3513 msp
= mg
->mg_secondaries
[allocator
];
3514 was_active
= B_TRUE
;
3516 msp
= find_valid_metaslab(mg
, activation_weight
, dva
, d
,
3517 want_unique
, asize
, allocator
, zal
, search
,
3521 mutex_exit(&mg
->mg_lock
);
3523 kmem_free(search
, sizeof (*search
));
3527 mutex_enter(&msp
->ms_lock
);
3529 * Ensure that the metaslab we have selected is still
3530 * capable of handling our request. It's possible that
3531 * another thread may have changed the weight while we
3532 * were blocked on the metaslab lock. We check the
3533 * active status first to see if we need to reselect
3536 if (was_active
&& !(msp
->ms_weight
& METASLAB_ACTIVE_MASK
)) {
3537 mutex_exit(&msp
->ms_lock
);
3542 * If the metaslab is freshly activated for an allocator that
3543 * isn't the one we're allocating from, or if it's a primary and
3544 * we're seeking a secondary (or vice versa), we go back and
3545 * select a new metaslab.
3547 if (!was_active
&& (msp
->ms_weight
& METASLAB_ACTIVE_MASK
) &&
3548 (msp
->ms_allocator
!= -1) &&
3549 (msp
->ms_allocator
!= allocator
|| ((activation_weight
==
3550 METASLAB_WEIGHT_PRIMARY
) != msp
->ms_primary
))) {
3551 mutex_exit(&msp
->ms_lock
);
3555 if (msp
->ms_weight
& METASLAB_WEIGHT_CLAIM
&&
3556 activation_weight
!= METASLAB_WEIGHT_CLAIM
) {
3557 metaslab_passivate(msp
, msp
->ms_weight
&
3558 ~METASLAB_WEIGHT_CLAIM
);
3559 mutex_exit(&msp
->ms_lock
);
3563 if (metaslab_activate(msp
, allocator
, activation_weight
) != 0) {
3564 mutex_exit(&msp
->ms_lock
);
3568 msp
->ms_selected_txg
= txg
;
3571 * Now that we have the lock, recheck to see if we should
3572 * continue to use this metaslab for this allocation. The
3573 * the metaslab is now loaded so metaslab_should_allocate() can
3574 * accurately determine if the allocation attempt should
3577 if (!metaslab_should_allocate(msp
, asize
)) {
3578 /* Passivate this metaslab and select a new one. */
3579 metaslab_trace_add(zal
, mg
, msp
, asize
, d
,
3580 TRACE_TOO_SMALL
, allocator
);
3586 * If this metaslab is currently condensing then pick again as
3587 * we can't manipulate this metaslab until it's committed
3588 * to disk. If this metaslab is being initialized, we shouldn't
3589 * allocate from it since the allocated region might be
3590 * overwritten after allocation.
3592 if (msp
->ms_condensing
) {
3593 metaslab_trace_add(zal
, mg
, msp
, asize
, d
,
3594 TRACE_CONDENSING
, allocator
);
3595 metaslab_passivate(msp
, msp
->ms_weight
&
3596 ~METASLAB_ACTIVE_MASK
);
3597 mutex_exit(&msp
->ms_lock
);
3599 } else if (msp
->ms_disabled
> 0) {
3600 metaslab_trace_add(zal
, mg
, msp
, asize
, d
,
3601 TRACE_DISABLED
, allocator
);
3602 metaslab_passivate(msp
, msp
->ms_weight
&
3603 ~METASLAB_ACTIVE_MASK
);
3604 mutex_exit(&msp
->ms_lock
);
3608 offset
= metaslab_block_alloc(msp
, asize
, txg
);
3609 metaslab_trace_add(zal
, mg
, msp
, asize
, d
, offset
, allocator
);
3611 if (offset
!= -1ULL) {
3612 /* Proactively passivate the metaslab, if needed */
3613 metaslab_segment_may_passivate(msp
);
3617 ASSERT(msp
->ms_loaded
);
3620 * We were unable to allocate from this metaslab so determine
3621 * a new weight for this metaslab. Now that we have loaded
3622 * the metaslab we can provide a better hint to the metaslab
3625 * For space-based metaslabs, we use the maximum block size.
3626 * This information is only available when the metaslab
3627 * is loaded and is more accurate than the generic free
3628 * space weight that was calculated by metaslab_weight().
3629 * This information allows us to quickly compare the maximum
3630 * available allocation in the metaslab to the allocation
3631 * size being requested.
3633 * For segment-based metaslabs, determine the new weight
3634 * based on the highest bucket in the range tree. We
3635 * explicitly use the loaded segment weight (i.e. the range
3636 * tree histogram) since it contains the space that is
3637 * currently available for allocation and is accurate
3638 * even within a sync pass.
3640 if (WEIGHT_IS_SPACEBASED(msp
->ms_weight
)) {
3641 uint64_t weight
= metaslab_block_maxsize(msp
);
3642 WEIGHT_SET_SPACEBASED(weight
);
3643 metaslab_passivate(msp
, weight
);
3645 metaslab_passivate(msp
,
3646 metaslab_weight_from_range_tree(msp
));
3650 * We have just failed an allocation attempt, check
3651 * that metaslab_should_allocate() agrees. Otherwise,
3652 * we may end up in an infinite loop retrying the same
3655 ASSERT(!metaslab_should_allocate(msp
, asize
));
3657 mutex_exit(&msp
->ms_lock
);
3659 mutex_exit(&msp
->ms_lock
);
3660 kmem_free(search
, sizeof (*search
));
3665 metaslab_group_alloc(metaslab_group_t
*mg
, zio_alloc_list_t
*zal
,
3666 uint64_t asize
, uint64_t txg
, boolean_t want_unique
, dva_t
*dva
,
3667 int d
, int allocator
)
3670 ASSERT(mg
->mg_initialized
);
3672 offset
= metaslab_group_alloc_normal(mg
, zal
, asize
, txg
, want_unique
,
3675 mutex_enter(&mg
->mg_lock
);
3676 if (offset
== -1ULL) {
3677 mg
->mg_failed_allocations
++;
3678 metaslab_trace_add(zal
, mg
, NULL
, asize
, d
,
3679 TRACE_GROUP_FAILURE
, allocator
);
3680 if (asize
== SPA_GANGBLOCKSIZE
) {
3682 * This metaslab group was unable to allocate
3683 * the minimum gang block size so it must be out of
3684 * space. We must notify the allocation throttle
3685 * to start skipping allocation attempts to this
3686 * metaslab group until more space becomes available.
3687 * Note: this failure cannot be caused by the
3688 * allocation throttle since the allocation throttle
3689 * is only responsible for skipping devices and
3690 * not failing block allocations.
3692 mg
->mg_no_free_space
= B_TRUE
;
3695 mg
->mg_allocations
++;
3696 mutex_exit(&mg
->mg_lock
);
3701 * Allocate a block for the specified i/o.
3704 metaslab_alloc_dva(spa_t
*spa
, metaslab_class_t
*mc
, uint64_t psize
,
3705 dva_t
*dva
, int d
, dva_t
*hintdva
, uint64_t txg
, int flags
,
3706 zio_alloc_list_t
*zal
, int allocator
)
3708 metaslab_group_t
*mg
, *fast_mg
, *rotor
;
3710 boolean_t try_hard
= B_FALSE
;
3712 ASSERT(!DVA_IS_VALID(&dva
[d
]));
3715 * For testing, make some blocks above a certain size be gang blocks.
3716 * This will result in more split blocks when using device removal,
3717 * and a large number of split blocks coupled with ztest-induced
3718 * damage can result in extremely long reconstruction times. This
3719 * will also test spilling from special to normal.
3721 if (psize
>= metaslab_force_ganging
&& (spa_get_random(100) < 3)) {
3722 metaslab_trace_add(zal
, NULL
, NULL
, psize
, d
, TRACE_FORCE_GANG
,
3724 return (SET_ERROR(ENOSPC
));
3728 * Start at the rotor and loop through all mgs until we find something.
3729 * Note that there's no locking on mc_rotor or mc_aliquot because
3730 * nothing actually breaks if we miss a few updates -- we just won't
3731 * allocate quite as evenly. It all balances out over time.
3733 * If we are doing ditto or log blocks, try to spread them across
3734 * consecutive vdevs. If we're forced to reuse a vdev before we've
3735 * allocated all of our ditto blocks, then try and spread them out on
3736 * that vdev as much as possible. If it turns out to not be possible,
3737 * gradually lower our standards until anything becomes acceptable.
3738 * Also, allocating on consecutive vdevs (as opposed to random vdevs)
3739 * gives us hope of containing our fault domains to something we're
3740 * able to reason about. Otherwise, any two top-level vdev failures
3741 * will guarantee the loss of data. With consecutive allocation,
3742 * only two adjacent top-level vdev failures will result in data loss.
3744 * If we are doing gang blocks (hintdva is non-NULL), try to keep
3745 * ourselves on the same vdev as our gang block header. That
3746 * way, we can hope for locality in vdev_cache, plus it makes our
3747 * fault domains something tractable.
3750 vd
= vdev_lookup_top(spa
, DVA_GET_VDEV(&hintdva
[d
]));
3753 * It's possible the vdev we're using as the hint no
3754 * longer exists or its mg has been closed (e.g. by
3755 * device removal). Consult the rotor when
3758 if (vd
!= NULL
&& vd
->vdev_mg
!= NULL
) {
3761 if (flags
& METASLAB_HINTBP_AVOID
&&
3762 mg
->mg_next
!= NULL
)
3767 } else if (d
!= 0) {
3768 vd
= vdev_lookup_top(spa
, DVA_GET_VDEV(&dva
[d
- 1]));
3769 mg
= vd
->vdev_mg
->mg_next
;
3770 } else if (flags
& METASLAB_FASTWRITE
) {
3771 mg
= fast_mg
= mc
->mc_rotor
;
3774 if (fast_mg
->mg_vd
->vdev_pending_fastwrite
<
3775 mg
->mg_vd
->vdev_pending_fastwrite
)
3777 } while ((fast_mg
= fast_mg
->mg_next
) != mc
->mc_rotor
);
3780 ASSERT(mc
->mc_rotor
!= NULL
);
3785 * If the hint put us into the wrong metaslab class, or into a
3786 * metaslab group that has been passivated, just follow the rotor.
3788 if (mg
->mg_class
!= mc
|| mg
->mg_activation_count
<= 0)
3794 boolean_t allocatable
;
3796 ASSERT(mg
->mg_activation_count
== 1);
3800 * Don't allocate from faulted devices.
3803 spa_config_enter(spa
, SCL_ZIO
, FTAG
, RW_READER
);
3804 allocatable
= vdev_allocatable(vd
);
3805 spa_config_exit(spa
, SCL_ZIO
, FTAG
);
3807 allocatable
= vdev_allocatable(vd
);
3811 * Determine if the selected metaslab group is eligible
3812 * for allocations. If we're ganging then don't allow
3813 * this metaslab group to skip allocations since that would
3814 * inadvertently return ENOSPC and suspend the pool
3815 * even though space is still available.
3817 if (allocatable
&& !GANG_ALLOCATION(flags
) && !try_hard
) {
3818 allocatable
= metaslab_group_allocatable(mg
, rotor
,
3819 psize
, allocator
, d
);
3823 metaslab_trace_add(zal
, mg
, NULL
, psize
, d
,
3824 TRACE_NOT_ALLOCATABLE
, allocator
);
3828 ASSERT(mg
->mg_initialized
);
3831 * Avoid writing single-copy data to a failing,
3832 * non-redundant vdev, unless we've already tried all
3835 if ((vd
->vdev_stat
.vs_write_errors
> 0 ||
3836 vd
->vdev_state
< VDEV_STATE_HEALTHY
) &&
3837 d
== 0 && !try_hard
&& vd
->vdev_children
== 0) {
3838 metaslab_trace_add(zal
, mg
, NULL
, psize
, d
,
3839 TRACE_VDEV_ERROR
, allocator
);
3843 ASSERT(mg
->mg_class
== mc
);
3845 uint64_t asize
= vdev_psize_to_asize(vd
, psize
);
3846 ASSERT(P2PHASE(asize
, 1ULL << vd
->vdev_ashift
) == 0);
3849 * If we don't need to try hard, then require that the
3850 * block be on an different metaslab from any other DVAs
3851 * in this BP (unique=true). If we are trying hard, then
3852 * allow any metaslab to be used (unique=false).
3854 uint64_t offset
= metaslab_group_alloc(mg
, zal
, asize
, txg
,
3855 !try_hard
, dva
, d
, allocator
);
3857 if (offset
!= -1ULL) {
3859 * If we've just selected this metaslab group,
3860 * figure out whether the corresponding vdev is
3861 * over- or under-used relative to the pool,
3862 * and set an allocation bias to even it out.
3864 * Bias is also used to compensate for unequally
3865 * sized vdevs so that space is allocated fairly.
3867 if (mc
->mc_aliquot
== 0 && metaslab_bias_enabled
) {
3868 vdev_stat_t
*vs
= &vd
->vdev_stat
;
3869 int64_t vs_free
= vs
->vs_space
- vs
->vs_alloc
;
3870 int64_t mc_free
= mc
->mc_space
- mc
->mc_alloc
;
3874 * Calculate how much more or less we should
3875 * try to allocate from this device during
3876 * this iteration around the rotor.
3878 * This basically introduces a zero-centered
3879 * bias towards the devices with the most
3880 * free space, while compensating for vdev
3884 * vdev V1 = 16M/128M
3885 * vdev V2 = 16M/128M
3886 * ratio(V1) = 100% ratio(V2) = 100%
3888 * vdev V1 = 16M/128M
3889 * vdev V2 = 64M/128M
3890 * ratio(V1) = 127% ratio(V2) = 72%
3892 * vdev V1 = 16M/128M
3893 * vdev V2 = 64M/512M
3894 * ratio(V1) = 40% ratio(V2) = 160%
3896 ratio
= (vs_free
* mc
->mc_alloc_groups
* 100) /
3898 mg
->mg_bias
= ((ratio
- 100) *
3899 (int64_t)mg
->mg_aliquot
) / 100;
3900 } else if (!metaslab_bias_enabled
) {
3904 if ((flags
& METASLAB_FASTWRITE
) ||
3905 atomic_add_64_nv(&mc
->mc_aliquot
, asize
) >=
3906 mg
->mg_aliquot
+ mg
->mg_bias
) {
3907 mc
->mc_rotor
= mg
->mg_next
;
3911 DVA_SET_VDEV(&dva
[d
], vd
->vdev_id
);
3912 DVA_SET_OFFSET(&dva
[d
], offset
);
3913 DVA_SET_GANG(&dva
[d
],
3914 ((flags
& METASLAB_GANG_HEADER
) ? 1 : 0));
3915 DVA_SET_ASIZE(&dva
[d
], asize
);
3917 if (flags
& METASLAB_FASTWRITE
) {
3918 atomic_add_64(&vd
->vdev_pending_fastwrite
,
3925 mc
->mc_rotor
= mg
->mg_next
;
3927 } while ((mg
= mg
->mg_next
) != rotor
);
3930 * If we haven't tried hard, do so now.
3937 bzero(&dva
[d
], sizeof (dva_t
));
3939 metaslab_trace_add(zal
, rotor
, NULL
, psize
, d
, TRACE_ENOSPC
, allocator
);
3940 return (SET_ERROR(ENOSPC
));
3944 metaslab_free_concrete(vdev_t
*vd
, uint64_t offset
, uint64_t asize
,
3945 boolean_t checkpoint
)
3948 spa_t
*spa
= vd
->vdev_spa
;
3950 ASSERT(vdev_is_concrete(vd
));
3951 ASSERT3U(spa_config_held(spa
, SCL_ALL
, RW_READER
), !=, 0);
3952 ASSERT3U(offset
>> vd
->vdev_ms_shift
, <, vd
->vdev_ms_count
);
3954 msp
= vd
->vdev_ms
[offset
>> vd
->vdev_ms_shift
];
3956 VERIFY(!msp
->ms_condensing
);
3957 VERIFY3U(offset
, >=, msp
->ms_start
);
3958 VERIFY3U(offset
+ asize
, <=, msp
->ms_start
+ msp
->ms_size
);
3959 VERIFY0(P2PHASE(offset
, 1ULL << vd
->vdev_ashift
));
3960 VERIFY0(P2PHASE(asize
, 1ULL << vd
->vdev_ashift
));
3962 metaslab_check_free_impl(vd
, offset
, asize
);
3964 mutex_enter(&msp
->ms_lock
);
3965 if (range_tree_is_empty(msp
->ms_freeing
) &&
3966 range_tree_is_empty(msp
->ms_checkpointing
)) {
3967 vdev_dirty(vd
, VDD_METASLAB
, msp
, spa_syncing_txg(spa
));
3971 ASSERT(spa_has_checkpoint(spa
));
3972 range_tree_add(msp
->ms_checkpointing
, offset
, asize
);
3974 range_tree_add(msp
->ms_freeing
, offset
, asize
);
3976 mutex_exit(&msp
->ms_lock
);
3981 metaslab_free_impl_cb(uint64_t inner_offset
, vdev_t
*vd
, uint64_t offset
,
3982 uint64_t size
, void *arg
)
3984 boolean_t
*checkpoint
= arg
;
3986 ASSERT3P(checkpoint
, !=, NULL
);
3988 if (vd
->vdev_ops
->vdev_op_remap
!= NULL
)
3989 vdev_indirect_mark_obsolete(vd
, offset
, size
);
3991 metaslab_free_impl(vd
, offset
, size
, *checkpoint
);
3995 metaslab_free_impl(vdev_t
*vd
, uint64_t offset
, uint64_t size
,
3996 boolean_t checkpoint
)
3998 spa_t
*spa
= vd
->vdev_spa
;
4000 ASSERT3U(spa_config_held(spa
, SCL_ALL
, RW_READER
), !=, 0);
4002 if (spa_syncing_txg(spa
) > spa_freeze_txg(spa
))
4005 if (spa
->spa_vdev_removal
!= NULL
&&
4006 spa
->spa_vdev_removal
->svr_vdev_id
== vd
->vdev_id
&&
4007 vdev_is_concrete(vd
)) {
4009 * Note: we check if the vdev is concrete because when
4010 * we complete the removal, we first change the vdev to be
4011 * an indirect vdev (in open context), and then (in syncing
4012 * context) clear spa_vdev_removal.
4014 free_from_removing_vdev(vd
, offset
, size
);
4015 } else if (vd
->vdev_ops
->vdev_op_remap
!= NULL
) {
4016 vdev_indirect_mark_obsolete(vd
, offset
, size
);
4017 vd
->vdev_ops
->vdev_op_remap(vd
, offset
, size
,
4018 metaslab_free_impl_cb
, &checkpoint
);
4020 metaslab_free_concrete(vd
, offset
, size
, checkpoint
);
4024 typedef struct remap_blkptr_cb_arg
{
4026 spa_remap_cb_t rbca_cb
;
4027 vdev_t
*rbca_remap_vd
;
4028 uint64_t rbca_remap_offset
;
4030 } remap_blkptr_cb_arg_t
;
4033 remap_blkptr_cb(uint64_t inner_offset
, vdev_t
*vd
, uint64_t offset
,
4034 uint64_t size
, void *arg
)
4036 remap_blkptr_cb_arg_t
*rbca
= arg
;
4037 blkptr_t
*bp
= rbca
->rbca_bp
;
4039 /* We can not remap split blocks. */
4040 if (size
!= DVA_GET_ASIZE(&bp
->blk_dva
[0]))
4042 ASSERT0(inner_offset
);
4044 if (rbca
->rbca_cb
!= NULL
) {
4046 * At this point we know that we are not handling split
4047 * blocks and we invoke the callback on the previous
4048 * vdev which must be indirect.
4050 ASSERT3P(rbca
->rbca_remap_vd
->vdev_ops
, ==, &vdev_indirect_ops
);
4052 rbca
->rbca_cb(rbca
->rbca_remap_vd
->vdev_id
,
4053 rbca
->rbca_remap_offset
, size
, rbca
->rbca_cb_arg
);
4055 /* set up remap_blkptr_cb_arg for the next call */
4056 rbca
->rbca_remap_vd
= vd
;
4057 rbca
->rbca_remap_offset
= offset
;
4061 * The phys birth time is that of dva[0]. This ensures that we know
4062 * when each dva was written, so that resilver can determine which
4063 * blocks need to be scrubbed (i.e. those written during the time
4064 * the vdev was offline). It also ensures that the key used in
4065 * the ARC hash table is unique (i.e. dva[0] + phys_birth). If
4066 * we didn't change the phys_birth, a lookup in the ARC for a
4067 * remapped BP could find the data that was previously stored at
4068 * this vdev + offset.
4070 vdev_t
*oldvd
= vdev_lookup_top(vd
->vdev_spa
,
4071 DVA_GET_VDEV(&bp
->blk_dva
[0]));
4072 vdev_indirect_births_t
*vib
= oldvd
->vdev_indirect_births
;
4073 bp
->blk_phys_birth
= vdev_indirect_births_physbirth(vib
,
4074 DVA_GET_OFFSET(&bp
->blk_dva
[0]), DVA_GET_ASIZE(&bp
->blk_dva
[0]));
4076 DVA_SET_VDEV(&bp
->blk_dva
[0], vd
->vdev_id
);
4077 DVA_SET_OFFSET(&bp
->blk_dva
[0], offset
);
4081 * If the block pointer contains any indirect DVAs, modify them to refer to
4082 * concrete DVAs. Note that this will sometimes not be possible, leaving
4083 * the indirect DVA in place. This happens if the indirect DVA spans multiple
4084 * segments in the mapping (i.e. it is a "split block").
4086 * If the BP was remapped, calls the callback on the original dva (note the
4087 * callback can be called multiple times if the original indirect DVA refers
4088 * to another indirect DVA, etc).
4090 * Returns TRUE if the BP was remapped.
4093 spa_remap_blkptr(spa_t
*spa
, blkptr_t
*bp
, spa_remap_cb_t callback
, void *arg
)
4095 remap_blkptr_cb_arg_t rbca
;
4097 if (!zfs_remap_blkptr_enable
)
4100 if (!spa_feature_is_enabled(spa
, SPA_FEATURE_OBSOLETE_COUNTS
))
4104 * Dedup BP's can not be remapped, because ddt_phys_select() depends
4105 * on DVA[0] being the same in the BP as in the DDT (dedup table).
4107 if (BP_GET_DEDUP(bp
))
4111 * Gang blocks can not be remapped, because
4112 * zio_checksum_gang_verifier() depends on the DVA[0] that's in
4113 * the BP used to read the gang block header (GBH) being the same
4114 * as the DVA[0] that we allocated for the GBH.
4120 * Embedded BP's have no DVA to remap.
4122 if (BP_GET_NDVAS(bp
) < 1)
4126 * Note: we only remap dva[0]. If we remapped other dvas, we
4127 * would no longer know what their phys birth txg is.
4129 dva_t
*dva
= &bp
->blk_dva
[0];
4131 uint64_t offset
= DVA_GET_OFFSET(dva
);
4132 uint64_t size
= DVA_GET_ASIZE(dva
);
4133 vdev_t
*vd
= vdev_lookup_top(spa
, DVA_GET_VDEV(dva
));
4135 if (vd
->vdev_ops
->vdev_op_remap
== NULL
)
4139 rbca
.rbca_cb
= callback
;
4140 rbca
.rbca_remap_vd
= vd
;
4141 rbca
.rbca_remap_offset
= offset
;
4142 rbca
.rbca_cb_arg
= arg
;
4145 * remap_blkptr_cb() will be called in order for each level of
4146 * indirection, until a concrete vdev is reached or a split block is
4147 * encountered. old_vd and old_offset are updated within the callback
4148 * as we go from the one indirect vdev to the next one (either concrete
4149 * or indirect again) in that order.
4151 vd
->vdev_ops
->vdev_op_remap(vd
, offset
, size
, remap_blkptr_cb
, &rbca
);
4153 /* Check if the DVA wasn't remapped because it is a split block */
4154 if (DVA_GET_VDEV(&rbca
.rbca_bp
->blk_dva
[0]) == vd
->vdev_id
)
4161 * Undo the allocation of a DVA which happened in the given transaction group.
4164 metaslab_unalloc_dva(spa_t
*spa
, const dva_t
*dva
, uint64_t txg
)
4168 uint64_t vdev
= DVA_GET_VDEV(dva
);
4169 uint64_t offset
= DVA_GET_OFFSET(dva
);
4170 uint64_t size
= DVA_GET_ASIZE(dva
);
4172 ASSERT(DVA_IS_VALID(dva
));
4173 ASSERT3U(spa_config_held(spa
, SCL_ALL
, RW_READER
), !=, 0);
4175 if (txg
> spa_freeze_txg(spa
))
4178 if ((vd
= vdev_lookup_top(spa
, vdev
)) == NULL
|| !DVA_IS_VALID(dva
) ||
4179 (offset
>> vd
->vdev_ms_shift
) >= vd
->vdev_ms_count
) {
4180 zfs_panic_recover("metaslab_free_dva(): bad DVA %llu:%llu:%llu",
4181 (u_longlong_t
)vdev
, (u_longlong_t
)offset
,
4182 (u_longlong_t
)size
);
4186 ASSERT(!vd
->vdev_removing
);
4187 ASSERT(vdev_is_concrete(vd
));
4188 ASSERT0(vd
->vdev_indirect_config
.vic_mapping_object
);
4189 ASSERT3P(vd
->vdev_indirect_mapping
, ==, NULL
);
4191 if (DVA_GET_GANG(dva
))
4192 size
= vdev_psize_to_asize(vd
, SPA_GANGBLOCKSIZE
);
4194 msp
= vd
->vdev_ms
[offset
>> vd
->vdev_ms_shift
];
4196 mutex_enter(&msp
->ms_lock
);
4197 range_tree_remove(msp
->ms_allocating
[txg
& TXG_MASK
],
4200 VERIFY(!msp
->ms_condensing
);
4201 VERIFY3U(offset
, >=, msp
->ms_start
);
4202 VERIFY3U(offset
+ size
, <=, msp
->ms_start
+ msp
->ms_size
);
4203 VERIFY3U(range_tree_space(msp
->ms_allocatable
) + size
, <=,
4205 VERIFY0(P2PHASE(offset
, 1ULL << vd
->vdev_ashift
));
4206 VERIFY0(P2PHASE(size
, 1ULL << vd
->vdev_ashift
));
4207 range_tree_add(msp
->ms_allocatable
, offset
, size
);
4208 mutex_exit(&msp
->ms_lock
);
4212 * Free the block represented by the given DVA.
4215 metaslab_free_dva(spa_t
*spa
, const dva_t
*dva
, boolean_t checkpoint
)
4217 uint64_t vdev
= DVA_GET_VDEV(dva
);
4218 uint64_t offset
= DVA_GET_OFFSET(dva
);
4219 uint64_t size
= DVA_GET_ASIZE(dva
);
4220 vdev_t
*vd
= vdev_lookup_top(spa
, vdev
);
4222 ASSERT(DVA_IS_VALID(dva
));
4223 ASSERT3U(spa_config_held(spa
, SCL_ALL
, RW_READER
), !=, 0);
4225 if (DVA_GET_GANG(dva
)) {
4226 size
= vdev_psize_to_asize(vd
, SPA_GANGBLOCKSIZE
);
4229 metaslab_free_impl(vd
, offset
, size
, checkpoint
);
4233 * Reserve some allocation slots. The reservation system must be called
4234 * before we call into the allocator. If there aren't any available slots
4235 * then the I/O will be throttled until an I/O completes and its slots are
4236 * freed up. The function returns true if it was successful in placing
4240 metaslab_class_throttle_reserve(metaslab_class_t
*mc
, int slots
, int allocator
,
4241 zio_t
*zio
, int flags
)
4243 uint64_t available_slots
= 0;
4244 boolean_t slot_reserved
= B_FALSE
;
4245 uint64_t max
= mc
->mc_alloc_max_slots
[allocator
];
4247 ASSERT(mc
->mc_alloc_throttle_enabled
);
4248 mutex_enter(&mc
->mc_lock
);
4250 uint64_t reserved_slots
=
4251 zfs_refcount_count(&mc
->mc_alloc_slots
[allocator
]);
4252 if (reserved_slots
< max
)
4253 available_slots
= max
- reserved_slots
;
4255 if (slots
<= available_slots
|| GANG_ALLOCATION(flags
) ||
4256 flags
& METASLAB_MUST_RESERVE
) {
4258 * We reserve the slots individually so that we can unreserve
4259 * them individually when an I/O completes.
4261 for (int d
= 0; d
< slots
; d
++) {
4263 zfs_refcount_add(&mc
->mc_alloc_slots
[allocator
],
4266 zio
->io_flags
|= ZIO_FLAG_IO_ALLOCATING
;
4267 slot_reserved
= B_TRUE
;
4270 mutex_exit(&mc
->mc_lock
);
4271 return (slot_reserved
);
4275 metaslab_class_throttle_unreserve(metaslab_class_t
*mc
, int slots
,
4276 int allocator
, zio_t
*zio
)
4278 ASSERT(mc
->mc_alloc_throttle_enabled
);
4279 mutex_enter(&mc
->mc_lock
);
4280 for (int d
= 0; d
< slots
; d
++) {
4281 (void) zfs_refcount_remove(&mc
->mc_alloc_slots
[allocator
],
4284 mutex_exit(&mc
->mc_lock
);
4288 metaslab_claim_concrete(vdev_t
*vd
, uint64_t offset
, uint64_t size
,
4292 spa_t
*spa
= vd
->vdev_spa
;
4295 if (offset
>> vd
->vdev_ms_shift
>= vd
->vdev_ms_count
)
4296 return (SET_ERROR(ENXIO
));
4298 ASSERT3P(vd
->vdev_ms
, !=, NULL
);
4299 msp
= vd
->vdev_ms
[offset
>> vd
->vdev_ms_shift
];
4301 mutex_enter(&msp
->ms_lock
);
4303 if ((txg
!= 0 && spa_writeable(spa
)) || !msp
->ms_loaded
) {
4304 error
= metaslab_activate(msp
, 0, METASLAB_WEIGHT_CLAIM
);
4305 if (error
== EBUSY
) {
4306 ASSERT(msp
->ms_loaded
);
4307 ASSERT(msp
->ms_weight
& METASLAB_ACTIVE_MASK
);
4313 !range_tree_contains(msp
->ms_allocatable
, offset
, size
))
4314 error
= SET_ERROR(ENOENT
);
4316 if (error
|| txg
== 0) { /* txg == 0 indicates dry run */
4317 mutex_exit(&msp
->ms_lock
);
4321 VERIFY(!msp
->ms_condensing
);
4322 VERIFY0(P2PHASE(offset
, 1ULL << vd
->vdev_ashift
));
4323 VERIFY0(P2PHASE(size
, 1ULL << vd
->vdev_ashift
));
4324 VERIFY3U(range_tree_space(msp
->ms_allocatable
) - size
, <=,
4326 range_tree_remove(msp
->ms_allocatable
, offset
, size
);
4327 range_tree_clear(msp
->ms_trim
, offset
, size
);
4329 if (spa_writeable(spa
)) { /* don't dirty if we're zdb(1M) */
4330 if (range_tree_is_empty(msp
->ms_allocating
[txg
& TXG_MASK
]))
4331 vdev_dirty(vd
, VDD_METASLAB
, msp
, txg
);
4332 range_tree_add(msp
->ms_allocating
[txg
& TXG_MASK
],
4336 mutex_exit(&msp
->ms_lock
);
4341 typedef struct metaslab_claim_cb_arg_t
{
4344 } metaslab_claim_cb_arg_t
;
4348 metaslab_claim_impl_cb(uint64_t inner_offset
, vdev_t
*vd
, uint64_t offset
,
4349 uint64_t size
, void *arg
)
4351 metaslab_claim_cb_arg_t
*mcca_arg
= arg
;
4353 if (mcca_arg
->mcca_error
== 0) {
4354 mcca_arg
->mcca_error
= metaslab_claim_concrete(vd
, offset
,
4355 size
, mcca_arg
->mcca_txg
);
4360 metaslab_claim_impl(vdev_t
*vd
, uint64_t offset
, uint64_t size
, uint64_t txg
)
4362 if (vd
->vdev_ops
->vdev_op_remap
!= NULL
) {
4363 metaslab_claim_cb_arg_t arg
;
4366 * Only zdb(1M) can claim on indirect vdevs. This is used
4367 * to detect leaks of mapped space (that are not accounted
4368 * for in the obsolete counts, spacemap, or bpobj).
4370 ASSERT(!spa_writeable(vd
->vdev_spa
));
4374 vd
->vdev_ops
->vdev_op_remap(vd
, offset
, size
,
4375 metaslab_claim_impl_cb
, &arg
);
4377 if (arg
.mcca_error
== 0) {
4378 arg
.mcca_error
= metaslab_claim_concrete(vd
,
4381 return (arg
.mcca_error
);
4383 return (metaslab_claim_concrete(vd
, offset
, size
, txg
));
4388 * Intent log support: upon opening the pool after a crash, notify the SPA
4389 * of blocks that the intent log has allocated for immediate write, but
4390 * which are still considered free by the SPA because the last transaction
4391 * group didn't commit yet.
4394 metaslab_claim_dva(spa_t
*spa
, const dva_t
*dva
, uint64_t txg
)
4396 uint64_t vdev
= DVA_GET_VDEV(dva
);
4397 uint64_t offset
= DVA_GET_OFFSET(dva
);
4398 uint64_t size
= DVA_GET_ASIZE(dva
);
4401 if ((vd
= vdev_lookup_top(spa
, vdev
)) == NULL
) {
4402 return (SET_ERROR(ENXIO
));
4405 ASSERT(DVA_IS_VALID(dva
));
4407 if (DVA_GET_GANG(dva
))
4408 size
= vdev_psize_to_asize(vd
, SPA_GANGBLOCKSIZE
);
4410 return (metaslab_claim_impl(vd
, offset
, size
, txg
));
4414 metaslab_alloc(spa_t
*spa
, metaslab_class_t
*mc
, uint64_t psize
, blkptr_t
*bp
,
4415 int ndvas
, uint64_t txg
, blkptr_t
*hintbp
, int flags
,
4416 zio_alloc_list_t
*zal
, zio_t
*zio
, int allocator
)
4418 dva_t
*dva
= bp
->blk_dva
;
4419 dva_t
*hintdva
= (hintbp
!= NULL
) ? hintbp
->blk_dva
: NULL
;
4422 ASSERT(bp
->blk_birth
== 0);
4423 ASSERT(BP_PHYSICAL_BIRTH(bp
) == 0);
4425 spa_config_enter(spa
, SCL_ALLOC
, FTAG
, RW_READER
);
4427 if (mc
->mc_rotor
== NULL
) { /* no vdevs in this class */
4428 spa_config_exit(spa
, SCL_ALLOC
, FTAG
);
4429 return (SET_ERROR(ENOSPC
));
4432 ASSERT(ndvas
> 0 && ndvas
<= spa_max_replication(spa
));
4433 ASSERT(BP_GET_NDVAS(bp
) == 0);
4434 ASSERT(hintbp
== NULL
|| ndvas
<= BP_GET_NDVAS(hintbp
));
4435 ASSERT3P(zal
, !=, NULL
);
4437 for (int d
= 0; d
< ndvas
; d
++) {
4438 error
= metaslab_alloc_dva(spa
, mc
, psize
, dva
, d
, hintdva
,
4439 txg
, flags
, zal
, allocator
);
4441 for (d
--; d
>= 0; d
--) {
4442 metaslab_unalloc_dva(spa
, &dva
[d
], txg
);
4443 metaslab_group_alloc_decrement(spa
,
4444 DVA_GET_VDEV(&dva
[d
]), zio
, flags
,
4445 allocator
, B_FALSE
);
4446 bzero(&dva
[d
], sizeof (dva_t
));
4448 spa_config_exit(spa
, SCL_ALLOC
, FTAG
);
4452 * Update the metaslab group's queue depth
4453 * based on the newly allocated dva.
4455 metaslab_group_alloc_increment(spa
,
4456 DVA_GET_VDEV(&dva
[d
]), zio
, flags
, allocator
);
4461 ASSERT(BP_GET_NDVAS(bp
) == ndvas
);
4463 spa_config_exit(spa
, SCL_ALLOC
, FTAG
);
4465 BP_SET_BIRTH(bp
, txg
, 0);
4471 metaslab_free(spa_t
*spa
, const blkptr_t
*bp
, uint64_t txg
, boolean_t now
)
4473 const dva_t
*dva
= bp
->blk_dva
;
4474 int ndvas
= BP_GET_NDVAS(bp
);
4476 ASSERT(!BP_IS_HOLE(bp
));
4477 ASSERT(!now
|| bp
->blk_birth
>= spa_syncing_txg(spa
));
4480 * If we have a checkpoint for the pool we need to make sure that
4481 * the blocks that we free that are part of the checkpoint won't be
4482 * reused until the checkpoint is discarded or we revert to it.
4484 * The checkpoint flag is passed down the metaslab_free code path
4485 * and is set whenever we want to add a block to the checkpoint's
4486 * accounting. That is, we "checkpoint" blocks that existed at the
4487 * time the checkpoint was created and are therefore referenced by
4488 * the checkpointed uberblock.
4490 * Note that, we don't checkpoint any blocks if the current
4491 * syncing txg <= spa_checkpoint_txg. We want these frees to sync
4492 * normally as they will be referenced by the checkpointed uberblock.
4494 boolean_t checkpoint
= B_FALSE
;
4495 if (bp
->blk_birth
<= spa
->spa_checkpoint_txg
&&
4496 spa_syncing_txg(spa
) > spa
->spa_checkpoint_txg
) {
4498 * At this point, if the block is part of the checkpoint
4499 * there is no way it was created in the current txg.
4502 ASSERT3U(spa_syncing_txg(spa
), ==, txg
);
4503 checkpoint
= B_TRUE
;
4506 spa_config_enter(spa
, SCL_FREE
, FTAG
, RW_READER
);
4508 for (int d
= 0; d
< ndvas
; d
++) {
4510 metaslab_unalloc_dva(spa
, &dva
[d
], txg
);
4512 ASSERT3U(txg
, ==, spa_syncing_txg(spa
));
4513 metaslab_free_dva(spa
, &dva
[d
], checkpoint
);
4517 spa_config_exit(spa
, SCL_FREE
, FTAG
);
4521 metaslab_claim(spa_t
*spa
, const blkptr_t
*bp
, uint64_t txg
)
4523 const dva_t
*dva
= bp
->blk_dva
;
4524 int ndvas
= BP_GET_NDVAS(bp
);
4527 ASSERT(!BP_IS_HOLE(bp
));
4531 * First do a dry run to make sure all DVAs are claimable,
4532 * so we don't have to unwind from partial failures below.
4534 if ((error
= metaslab_claim(spa
, bp
, 0)) != 0)
4538 spa_config_enter(spa
, SCL_ALLOC
, FTAG
, RW_READER
);
4540 for (int d
= 0; d
< ndvas
; d
++) {
4541 error
= metaslab_claim_dva(spa
, &dva
[d
], txg
);
4546 spa_config_exit(spa
, SCL_ALLOC
, FTAG
);
4548 ASSERT(error
== 0 || txg
== 0);
4554 metaslab_fastwrite_mark(spa_t
*spa
, const blkptr_t
*bp
)
4556 const dva_t
*dva
= bp
->blk_dva
;
4557 int ndvas
= BP_GET_NDVAS(bp
);
4558 uint64_t psize
= BP_GET_PSIZE(bp
);
4562 ASSERT(!BP_IS_HOLE(bp
));
4563 ASSERT(!BP_IS_EMBEDDED(bp
));
4566 spa_config_enter(spa
, SCL_VDEV
, FTAG
, RW_READER
);
4568 for (d
= 0; d
< ndvas
; d
++) {
4569 if ((vd
= vdev_lookup_top(spa
, DVA_GET_VDEV(&dva
[d
]))) == NULL
)
4571 atomic_add_64(&vd
->vdev_pending_fastwrite
, psize
);
4574 spa_config_exit(spa
, SCL_VDEV
, FTAG
);
4578 metaslab_fastwrite_unmark(spa_t
*spa
, const blkptr_t
*bp
)
4580 const dva_t
*dva
= bp
->blk_dva
;
4581 int ndvas
= BP_GET_NDVAS(bp
);
4582 uint64_t psize
= BP_GET_PSIZE(bp
);
4586 ASSERT(!BP_IS_HOLE(bp
));
4587 ASSERT(!BP_IS_EMBEDDED(bp
));
4590 spa_config_enter(spa
, SCL_VDEV
, FTAG
, RW_READER
);
4592 for (d
= 0; d
< ndvas
; d
++) {
4593 if ((vd
= vdev_lookup_top(spa
, DVA_GET_VDEV(&dva
[d
]))) == NULL
)
4595 ASSERT3U(vd
->vdev_pending_fastwrite
, >=, psize
);
4596 atomic_sub_64(&vd
->vdev_pending_fastwrite
, psize
);
4599 spa_config_exit(spa
, SCL_VDEV
, FTAG
);
4604 metaslab_check_free_impl_cb(uint64_t inner
, vdev_t
*vd
, uint64_t offset
,
4605 uint64_t size
, void *arg
)
4607 if (vd
->vdev_ops
== &vdev_indirect_ops
)
4610 metaslab_check_free_impl(vd
, offset
, size
);
4614 metaslab_check_free_impl(vdev_t
*vd
, uint64_t offset
, uint64_t size
)
4617 ASSERTV(spa_t
*spa
= vd
->vdev_spa
);
4619 if ((zfs_flags
& ZFS_DEBUG_ZIO_FREE
) == 0)
4622 if (vd
->vdev_ops
->vdev_op_remap
!= NULL
) {
4623 vd
->vdev_ops
->vdev_op_remap(vd
, offset
, size
,
4624 metaslab_check_free_impl_cb
, NULL
);
4628 ASSERT(vdev_is_concrete(vd
));
4629 ASSERT3U(offset
>> vd
->vdev_ms_shift
, <, vd
->vdev_ms_count
);
4630 ASSERT3U(spa_config_held(spa
, SCL_ALL
, RW_READER
), !=, 0);
4632 msp
= vd
->vdev_ms
[offset
>> vd
->vdev_ms_shift
];
4634 mutex_enter(&msp
->ms_lock
);
4635 if (msp
->ms_loaded
) {
4636 range_tree_verify_not_present(msp
->ms_allocatable
,
4640 range_tree_verify_not_present(msp
->ms_trim
, offset
, size
);
4641 range_tree_verify_not_present(msp
->ms_freeing
, offset
, size
);
4642 range_tree_verify_not_present(msp
->ms_checkpointing
, offset
, size
);
4643 range_tree_verify_not_present(msp
->ms_freed
, offset
, size
);
4644 for (int j
= 0; j
< TXG_DEFER_SIZE
; j
++)
4645 range_tree_verify_not_present(msp
->ms_defer
[j
], offset
, size
);
4646 mutex_exit(&msp
->ms_lock
);
4650 metaslab_check_free(spa_t
*spa
, const blkptr_t
*bp
)
4652 if ((zfs_flags
& ZFS_DEBUG_ZIO_FREE
) == 0)
4655 spa_config_enter(spa
, SCL_VDEV
, FTAG
, RW_READER
);
4656 for (int i
= 0; i
< BP_GET_NDVAS(bp
); i
++) {
4657 uint64_t vdev
= DVA_GET_VDEV(&bp
->blk_dva
[i
]);
4658 vdev_t
*vd
= vdev_lookup_top(spa
, vdev
);
4659 uint64_t offset
= DVA_GET_OFFSET(&bp
->blk_dva
[i
]);
4660 uint64_t size
= DVA_GET_ASIZE(&bp
->blk_dva
[i
]);
4662 if (DVA_GET_GANG(&bp
->blk_dva
[i
]))
4663 size
= vdev_psize_to_asize(vd
, SPA_GANGBLOCKSIZE
);
4665 ASSERT3P(vd
, !=, NULL
);
4667 metaslab_check_free_impl(vd
, offset
, size
);
4669 spa_config_exit(spa
, SCL_VDEV
, FTAG
);
4673 metaslab_group_disable_wait(metaslab_group_t
*mg
)
4675 ASSERT(MUTEX_HELD(&mg
->mg_ms_disabled_lock
));
4676 while (mg
->mg_disabled_updating
) {
4677 cv_wait(&mg
->mg_ms_disabled_cv
, &mg
->mg_ms_disabled_lock
);
4682 metaslab_group_disabled_increment(metaslab_group_t
*mg
)
4684 ASSERT(MUTEX_HELD(&mg
->mg_ms_disabled_lock
));
4685 ASSERT(mg
->mg_disabled_updating
);
4687 while (mg
->mg_ms_disabled
>= max_disabled_ms
) {
4688 cv_wait(&mg
->mg_ms_disabled_cv
, &mg
->mg_ms_disabled_lock
);
4690 mg
->mg_ms_disabled
++;
4691 ASSERT3U(mg
->mg_ms_disabled
, <=, max_disabled_ms
);
4695 * Mark the metaslab as disabled to prevent any allocations on this metaslab.
4696 * We must also track how many metaslabs are currently disabled within a
4697 * metaslab group and limit them to prevent allocation failures from
4698 * occurring because all metaslabs are disabled.
4701 metaslab_disable(metaslab_t
*msp
)
4703 ASSERT(!MUTEX_HELD(&msp
->ms_lock
));
4704 metaslab_group_t
*mg
= msp
->ms_group
;
4706 mutex_enter(&mg
->mg_ms_disabled_lock
);
4709 * To keep an accurate count of how many threads have disabled
4710 * a specific metaslab group, we only allow one thread to mark
4711 * the metaslab group at a time. This ensures that the value of
4712 * ms_disabled will be accurate when we decide to mark a metaslab
4713 * group as disabled. To do this we force all other threads
4714 * to wait till the metaslab's mg_disabled_updating flag is no
4717 metaslab_group_disable_wait(mg
);
4718 mg
->mg_disabled_updating
= B_TRUE
;
4719 if (msp
->ms_disabled
== 0) {
4720 metaslab_group_disabled_increment(mg
);
4722 mutex_enter(&msp
->ms_lock
);
4724 mutex_exit(&msp
->ms_lock
);
4726 mg
->mg_disabled_updating
= B_FALSE
;
4727 cv_broadcast(&mg
->mg_ms_disabled_cv
);
4728 mutex_exit(&mg
->mg_ms_disabled_lock
);
4732 metaslab_enable(metaslab_t
*msp
, boolean_t sync
)
4734 metaslab_group_t
*mg
= msp
->ms_group
;
4735 spa_t
*spa
= mg
->mg_vd
->vdev_spa
;
4738 * Wait for the outstanding IO to be synced to prevent newly
4739 * allocated blocks from being overwritten. This used by
4740 * initialize and TRIM which are modifying unallocated space.
4743 txg_wait_synced(spa_get_dsl(spa
), 0);
4745 mutex_enter(&mg
->mg_ms_disabled_lock
);
4746 mutex_enter(&msp
->ms_lock
);
4747 if (--msp
->ms_disabled
== 0) {
4748 mg
->mg_ms_disabled
--;
4749 cv_broadcast(&mg
->mg_ms_disabled_cv
);
4751 mutex_exit(&msp
->ms_lock
);
4752 mutex_exit(&mg
->mg_ms_disabled_lock
);
4755 #if defined(_KERNEL)
4757 module_param(metaslab_aliquot
, ulong
, 0644);
4758 MODULE_PARM_DESC(metaslab_aliquot
,
4759 "allocation granularity (a.k.a. stripe size)");
4761 module_param(metaslab_debug_load
, int, 0644);
4762 MODULE_PARM_DESC(metaslab_debug_load
,
4763 "load all metaslabs when pool is first opened");
4765 module_param(metaslab_debug_unload
, int, 0644);
4766 MODULE_PARM_DESC(metaslab_debug_unload
,
4767 "prevent metaslabs from being unloaded");
4769 module_param(metaslab_preload_enabled
, int, 0644);
4770 MODULE_PARM_DESC(metaslab_preload_enabled
,
4771 "preload potential metaslabs during reassessment");
4773 module_param(zfs_mg_noalloc_threshold
, int, 0644);
4774 MODULE_PARM_DESC(zfs_mg_noalloc_threshold
,
4775 "percentage of free space for metaslab group to allow allocation");
4777 module_param(zfs_mg_fragmentation_threshold
, int, 0644);
4778 MODULE_PARM_DESC(zfs_mg_fragmentation_threshold
,
4779 "fragmentation for metaslab group to allow allocation");
4781 module_param(zfs_metaslab_fragmentation_threshold
, int, 0644);
4782 MODULE_PARM_DESC(zfs_metaslab_fragmentation_threshold
,
4783 "fragmentation for metaslab to allow allocation");
4785 module_param(metaslab_fragmentation_factor_enabled
, int, 0644);
4786 MODULE_PARM_DESC(metaslab_fragmentation_factor_enabled
,
4787 "use the fragmentation metric to prefer less fragmented metaslabs");
4789 module_param(metaslab_lba_weighting_enabled
, int, 0644);
4790 MODULE_PARM_DESC(metaslab_lba_weighting_enabled
,
4791 "prefer metaslabs with lower LBAs");
4793 module_param(metaslab_bias_enabled
, int, 0644);
4794 MODULE_PARM_DESC(metaslab_bias_enabled
,
4795 "enable metaslab group biasing");
4797 module_param(zfs_metaslab_segment_weight_enabled
, int, 0644);
4798 MODULE_PARM_DESC(zfs_metaslab_segment_weight_enabled
,
4799 "enable segment-based metaslab selection");
4801 module_param(zfs_metaslab_switch_threshold
, int, 0644);
4802 MODULE_PARM_DESC(zfs_metaslab_switch_threshold
,
4803 "segment-based metaslab selection maximum buckets before switching");
4805 module_param(metaslab_force_ganging
, ulong
, 0644);
4806 MODULE_PARM_DESC(metaslab_force_ganging
,
4807 "blocks larger than this size are forced to be gang blocks");