4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2011, 2019 by Delphix. All rights reserved.
24 * Copyright (c) 2013 by Saso Kiselkov. All rights reserved.
25 * Copyright (c) 2015, Nexenta Systems, Inc. All rights reserved.
26 * Copyright (c) 2017, Intel Corporation.
29 #include <sys/zfs_context.h>
31 #include <sys/dmu_tx.h>
32 #include <sys/space_map.h>
33 #include <sys/metaslab_impl.h>
34 #include <sys/vdev_impl.h>
35 #include <sys/vdev_draid.h>
37 #include <sys/spa_impl.h>
38 #include <sys/zfeature.h>
39 #include <sys/vdev_indirect_mapping.h>
41 #include <sys/btree.h>
43 #define WITH_DF_BLOCK_ALLOCATOR
45 #define GANG_ALLOCATION(flags) \
46 ((flags) & (METASLAB_GANG_CHILD | METASLAB_GANG_HEADER))
49 * Metaslab granularity, in bytes. This is roughly similar to what would be
50 * referred to as the "stripe size" in traditional RAID arrays. In normal
51 * operation, we will try to write this amount of data to a top-level vdev
52 * before moving on to the next one.
54 unsigned long metaslab_aliquot
= 512 << 10;
57 * For testing, make some blocks above a certain size be gang blocks.
59 unsigned long metaslab_force_ganging
= SPA_MAXBLOCKSIZE
+ 1;
62 * In pools where the log space map feature is not enabled we touch
63 * multiple metaslabs (and their respective space maps) with each
64 * transaction group. Thus, we benefit from having a small space map
65 * block size since it allows us to issue more I/O operations scattered
66 * around the disk. So a sane default for the space map block size
69 int zfs_metaslab_sm_blksz_no_log
= (1 << 14);
72 * When the log space map feature is enabled, we accumulate a lot of
73 * changes per metaslab that are flushed once in a while so we benefit
74 * from a bigger block size like 128K for the metaslab space maps.
76 int zfs_metaslab_sm_blksz_with_log
= (1 << 17);
79 * The in-core space map representation is more compact than its on-disk form.
80 * The zfs_condense_pct determines how much more compact the in-core
81 * space map representation must be before we compact it on-disk.
82 * Values should be greater than or equal to 100.
84 int zfs_condense_pct
= 200;
87 * Condensing a metaslab is not guaranteed to actually reduce the amount of
88 * space used on disk. In particular, a space map uses data in increments of
89 * MAX(1 << ashift, space_map_blksz), so a metaslab might use the
90 * same number of blocks after condensing. Since the goal of condensing is to
91 * reduce the number of IOPs required to read the space map, we only want to
92 * condense when we can be sure we will reduce the number of blocks used by the
93 * space map. Unfortunately, we cannot precisely compute whether or not this is
94 * the case in metaslab_should_condense since we are holding ms_lock. Instead,
95 * we apply the following heuristic: do not condense a spacemap unless the
96 * uncondensed size consumes greater than zfs_metaslab_condense_block_threshold
99 int zfs_metaslab_condense_block_threshold
= 4;
102 * The zfs_mg_noalloc_threshold defines which metaslab groups should
103 * be eligible for allocation. The value is defined as a percentage of
104 * free space. Metaslab groups that have more free space than
105 * zfs_mg_noalloc_threshold are always eligible for allocations. Once
106 * a metaslab group's free space is less than or equal to the
107 * zfs_mg_noalloc_threshold the allocator will avoid allocating to that
108 * group unless all groups in the pool have reached zfs_mg_noalloc_threshold.
109 * Once all groups in the pool reach zfs_mg_noalloc_threshold then all
110 * groups are allowed to accept allocations. Gang blocks are always
111 * eligible to allocate on any metaslab group. The default value of 0 means
112 * no metaslab group will be excluded based on this criterion.
114 int zfs_mg_noalloc_threshold
= 0;
117 * Metaslab groups are considered eligible for allocations if their
118 * fragmentation metric (measured as a percentage) is less than or
119 * equal to zfs_mg_fragmentation_threshold. If a metaslab group
120 * exceeds this threshold then it will be skipped unless all metaslab
121 * groups within the metaslab class have also crossed this threshold.
123 * This tunable was introduced to avoid edge cases where we continue
124 * allocating from very fragmented disks in our pool while other, less
125 * fragmented disks, exists. On the other hand, if all disks in the
126 * pool are uniformly approaching the threshold, the threshold can
127 * be a speed bump in performance, where we keep switching the disks
128 * that we allocate from (e.g. we allocate some segments from disk A
129 * making it bypassing the threshold while freeing segments from disk
130 * B getting its fragmentation below the threshold).
132 * Empirically, we've seen that our vdev selection for allocations is
133 * good enough that fragmentation increases uniformly across all vdevs
134 * the majority of the time. Thus we set the threshold percentage high
135 * enough to avoid hitting the speed bump on pools that are being pushed
138 int zfs_mg_fragmentation_threshold
= 95;
141 * Allow metaslabs to keep their active state as long as their fragmentation
142 * percentage is less than or equal to zfs_metaslab_fragmentation_threshold. An
143 * active metaslab that exceeds this threshold will no longer keep its active
144 * status allowing better metaslabs to be selected.
146 int zfs_metaslab_fragmentation_threshold
= 70;
149 * When set will load all metaslabs when pool is first opened.
151 int metaslab_debug_load
= 0;
154 * When set will prevent metaslabs from being unloaded.
156 int metaslab_debug_unload
= 0;
159 * Minimum size which forces the dynamic allocator to change
160 * it's allocation strategy. Once the space map cannot satisfy
161 * an allocation of this size then it switches to using more
162 * aggressive strategy (i.e search by size rather than offset).
164 uint64_t metaslab_df_alloc_threshold
= SPA_OLD_MAXBLOCKSIZE
;
167 * The minimum free space, in percent, which must be available
168 * in a space map to continue allocations in a first-fit fashion.
169 * Once the space map's free space drops below this level we dynamically
170 * switch to using best-fit allocations.
172 int metaslab_df_free_pct
= 4;
175 * Maximum distance to search forward from the last offset. Without this
176 * limit, fragmented pools can see >100,000 iterations and
177 * metaslab_block_picker() becomes the performance limiting factor on
178 * high-performance storage.
180 * With the default setting of 16MB, we typically see less than 500
181 * iterations, even with very fragmented, ashift=9 pools. The maximum number
182 * of iterations possible is:
183 * metaslab_df_max_search / (2 * (1<<ashift))
184 * With the default setting of 16MB this is 16*1024 (with ashift=9) or
185 * 2048 (with ashift=12).
187 int metaslab_df_max_search
= 16 * 1024 * 1024;
190 * Forces the metaslab_block_picker function to search for at least this many
191 * segments forwards until giving up on finding a segment that the allocation
194 uint32_t metaslab_min_search_count
= 100;
197 * If we are not searching forward (due to metaslab_df_max_search,
198 * metaslab_df_free_pct, or metaslab_df_alloc_threshold), this tunable
199 * controls what segment is used. If it is set, we will use the largest free
200 * segment. If it is not set, we will use a segment of exactly the requested
203 int metaslab_df_use_largest_segment
= B_FALSE
;
206 * Percentage of all cpus that can be used by the metaslab taskq.
208 int metaslab_load_pct
= 50;
211 * These tunables control how long a metaslab will remain loaded after the
212 * last allocation from it. A metaslab can't be unloaded until at least
213 * metaslab_unload_delay TXG's and metaslab_unload_delay_ms milliseconds
214 * have elapsed. However, zfs_metaslab_mem_limit may cause it to be
215 * unloaded sooner. These settings are intended to be generous -- to keep
216 * metaslabs loaded for a long time, reducing the rate of metaslab loading.
218 int metaslab_unload_delay
= 32;
219 int metaslab_unload_delay_ms
= 10 * 60 * 1000; /* ten minutes */
222 * Max number of metaslabs per group to preload.
224 int metaslab_preload_limit
= 10;
227 * Enable/disable preloading of metaslab.
229 int metaslab_preload_enabled
= B_TRUE
;
232 * Enable/disable fragmentation weighting on metaslabs.
234 int metaslab_fragmentation_factor_enabled
= B_TRUE
;
237 * Enable/disable lba weighting (i.e. outer tracks are given preference).
239 int metaslab_lba_weighting_enabled
= B_TRUE
;
242 * Enable/disable metaslab group biasing.
244 int metaslab_bias_enabled
= B_TRUE
;
247 * Enable/disable remapping of indirect DVAs to their concrete vdevs.
249 boolean_t zfs_remap_blkptr_enable
= B_TRUE
;
252 * Enable/disable segment-based metaslab selection.
254 int zfs_metaslab_segment_weight_enabled
= B_TRUE
;
257 * When using segment-based metaslab selection, we will continue
258 * allocating from the active metaslab until we have exhausted
259 * zfs_metaslab_switch_threshold of its buckets.
261 int zfs_metaslab_switch_threshold
= 2;
264 * Internal switch to enable/disable the metaslab allocation tracing
267 #ifdef _METASLAB_TRACING
268 boolean_t metaslab_trace_enabled
= B_TRUE
;
272 * Maximum entries that the metaslab allocation tracing facility will keep
273 * in a given list when running in non-debug mode. We limit the number
274 * of entries in non-debug mode to prevent us from using up too much memory.
275 * The limit should be sufficiently large that we don't expect any allocation
276 * to every exceed this value. In debug mode, the system will panic if this
277 * limit is ever reached allowing for further investigation.
279 #ifdef _METASLAB_TRACING
280 uint64_t metaslab_trace_max_entries
= 5000;
284 * Maximum number of metaslabs per group that can be disabled
287 int max_disabled_ms
= 3;
290 * Time (in seconds) to respect ms_max_size when the metaslab is not loaded.
291 * To avoid 64-bit overflow, don't set above UINT32_MAX.
293 unsigned long zfs_metaslab_max_size_cache_sec
= 3600; /* 1 hour */
296 * Maximum percentage of memory to use on storing loaded metaslabs. If loading
297 * a metaslab would take it over this percentage, the oldest selected metaslab
298 * is automatically unloaded.
300 int zfs_metaslab_mem_limit
= 75;
303 * Force the per-metaslab range trees to use 64-bit integers to store
304 * segments. Used for debugging purposes.
306 boolean_t zfs_metaslab_force_large_segs
= B_FALSE
;
309 * By default we only store segments over a certain size in the size-sorted
310 * metaslab trees (ms_allocatable_by_size and
311 * ms_unflushed_frees_by_size). This dramatically reduces memory usage and
312 * improves load and unload times at the cost of causing us to use slightly
313 * larger segments than we would otherwise in some cases.
315 uint32_t metaslab_by_size_min_shift
= 14;
317 static uint64_t metaslab_weight(metaslab_t
*, boolean_t
);
318 static void metaslab_set_fragmentation(metaslab_t
*, boolean_t
);
319 static void metaslab_free_impl(vdev_t
*, uint64_t, uint64_t, boolean_t
);
320 static void metaslab_check_free_impl(vdev_t
*, uint64_t, uint64_t);
322 static void metaslab_passivate(metaslab_t
*msp
, uint64_t weight
);
323 static uint64_t metaslab_weight_from_range_tree(metaslab_t
*msp
);
324 static void metaslab_flush_update(metaslab_t
*, dmu_tx_t
*);
325 static unsigned int metaslab_idx_func(multilist_t
*, void *);
326 static void metaslab_evict(metaslab_t
*, uint64_t);
327 static void metaslab_rt_add(range_tree_t
*rt
, range_seg_t
*rs
, void *arg
);
328 #ifdef _METASLAB_TRACING
329 kmem_cache_t
*metaslab_alloc_trace_cache
;
331 typedef struct metaslab_stats
{
332 kstat_named_t metaslabstat_trace_over_limit
;
333 kstat_named_t metaslabstat_df_find_under_floor
;
334 kstat_named_t metaslabstat_reload_tree
;
337 static metaslab_stats_t metaslab_stats
= {
338 { "trace_over_limit", KSTAT_DATA_UINT64
},
339 { "df_find_under_floor", KSTAT_DATA_UINT64
},
340 { "reload_tree", KSTAT_DATA_UINT64
},
343 #define METASLABSTAT_BUMP(stat) \
344 atomic_inc_64(&metaslab_stats.stat.value.ui64);
347 kstat_t
*metaslab_ksp
;
350 metaslab_stat_init(void)
352 ASSERT(metaslab_alloc_trace_cache
== NULL
);
353 metaslab_alloc_trace_cache
= kmem_cache_create(
354 "metaslab_alloc_trace_cache", sizeof (metaslab_alloc_trace_t
),
355 0, NULL
, NULL
, NULL
, NULL
, NULL
, 0);
356 metaslab_ksp
= kstat_create("zfs", 0, "metaslab_stats",
357 "misc", KSTAT_TYPE_NAMED
, sizeof (metaslab_stats
) /
358 sizeof (kstat_named_t
), KSTAT_FLAG_VIRTUAL
);
359 if (metaslab_ksp
!= NULL
) {
360 metaslab_ksp
->ks_data
= &metaslab_stats
;
361 kstat_install(metaslab_ksp
);
366 metaslab_stat_fini(void)
368 if (metaslab_ksp
!= NULL
) {
369 kstat_delete(metaslab_ksp
);
373 kmem_cache_destroy(metaslab_alloc_trace_cache
);
374 metaslab_alloc_trace_cache
= NULL
;
379 metaslab_stat_init(void)
384 metaslab_stat_fini(void)
390 * ==========================================================================
392 * ==========================================================================
395 metaslab_class_create(spa_t
*spa
, metaslab_ops_t
*ops
)
397 metaslab_class_t
*mc
;
399 mc
= kmem_zalloc(offsetof(metaslab_class_t
,
400 mc_allocator
[spa
->spa_alloc_count
]), KM_SLEEP
);
404 mutex_init(&mc
->mc_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
405 mc
->mc_metaslab_txg_list
= multilist_create(sizeof (metaslab_t
),
406 offsetof(metaslab_t
, ms_class_txg_node
), metaslab_idx_func
);
407 for (int i
= 0; i
< spa
->spa_alloc_count
; i
++) {
408 metaslab_class_allocator_t
*mca
= &mc
->mc_allocator
[i
];
409 mca
->mca_rotor
= NULL
;
410 zfs_refcount_create_tracked(&mca
->mca_alloc_slots
);
417 metaslab_class_destroy(metaslab_class_t
*mc
)
419 spa_t
*spa
= mc
->mc_spa
;
421 ASSERT(mc
->mc_alloc
== 0);
422 ASSERT(mc
->mc_deferred
== 0);
423 ASSERT(mc
->mc_space
== 0);
424 ASSERT(mc
->mc_dspace
== 0);
426 for (int i
= 0; i
< spa
->spa_alloc_count
; i
++) {
427 metaslab_class_allocator_t
*mca
= &mc
->mc_allocator
[i
];
428 ASSERT(mca
->mca_rotor
== NULL
);
429 zfs_refcount_destroy(&mca
->mca_alloc_slots
);
431 mutex_destroy(&mc
->mc_lock
);
432 multilist_destroy(mc
->mc_metaslab_txg_list
);
433 kmem_free(mc
, offsetof(metaslab_class_t
,
434 mc_allocator
[spa
->spa_alloc_count
]));
438 metaslab_class_validate(metaslab_class_t
*mc
)
440 metaslab_group_t
*mg
;
444 * Must hold one of the spa_config locks.
446 ASSERT(spa_config_held(mc
->mc_spa
, SCL_ALL
, RW_READER
) ||
447 spa_config_held(mc
->mc_spa
, SCL_ALL
, RW_WRITER
));
449 if ((mg
= mc
->mc_allocator
[0].mca_rotor
) == NULL
)
454 ASSERT(vd
->vdev_mg
!= NULL
);
455 ASSERT3P(vd
->vdev_top
, ==, vd
);
456 ASSERT3P(mg
->mg_class
, ==, mc
);
457 ASSERT3P(vd
->vdev_ops
, !=, &vdev_hole_ops
);
458 } while ((mg
= mg
->mg_next
) != mc
->mc_allocator
[0].mca_rotor
);
464 metaslab_class_space_update(metaslab_class_t
*mc
, int64_t alloc_delta
,
465 int64_t defer_delta
, int64_t space_delta
, int64_t dspace_delta
)
467 atomic_add_64(&mc
->mc_alloc
, alloc_delta
);
468 atomic_add_64(&mc
->mc_deferred
, defer_delta
);
469 atomic_add_64(&mc
->mc_space
, space_delta
);
470 atomic_add_64(&mc
->mc_dspace
, dspace_delta
);
474 metaslab_class_get_alloc(metaslab_class_t
*mc
)
476 return (mc
->mc_alloc
);
480 metaslab_class_get_deferred(metaslab_class_t
*mc
)
482 return (mc
->mc_deferred
);
486 metaslab_class_get_space(metaslab_class_t
*mc
)
488 return (mc
->mc_space
);
492 metaslab_class_get_dspace(metaslab_class_t
*mc
)
494 return (spa_deflate(mc
->mc_spa
) ? mc
->mc_dspace
: mc
->mc_space
);
498 metaslab_class_histogram_verify(metaslab_class_t
*mc
)
500 spa_t
*spa
= mc
->mc_spa
;
501 vdev_t
*rvd
= spa
->spa_root_vdev
;
505 if ((zfs_flags
& ZFS_DEBUG_HISTOGRAM_VERIFY
) == 0)
508 mc_hist
= kmem_zalloc(sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE
,
511 for (int c
= 0; c
< rvd
->vdev_children
; c
++) {
512 vdev_t
*tvd
= rvd
->vdev_child
[c
];
513 metaslab_group_t
*mg
= tvd
->vdev_mg
;
516 * Skip any holes, uninitialized top-levels, or
517 * vdevs that are not in this metalab class.
519 if (!vdev_is_concrete(tvd
) || tvd
->vdev_ms_shift
== 0 ||
520 mg
->mg_class
!= mc
) {
524 for (i
= 0; i
< RANGE_TREE_HISTOGRAM_SIZE
; i
++)
525 mc_hist
[i
] += mg
->mg_histogram
[i
];
528 for (i
= 0; i
< RANGE_TREE_HISTOGRAM_SIZE
; i
++)
529 VERIFY3U(mc_hist
[i
], ==, mc
->mc_histogram
[i
]);
531 kmem_free(mc_hist
, sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE
);
535 * Calculate the metaslab class's fragmentation metric. The metric
536 * is weighted based on the space contribution of each metaslab group.
537 * The return value will be a number between 0 and 100 (inclusive), or
538 * ZFS_FRAG_INVALID if the metric has not been set. See comment above the
539 * zfs_frag_table for more information about the metric.
542 metaslab_class_fragmentation(metaslab_class_t
*mc
)
544 vdev_t
*rvd
= mc
->mc_spa
->spa_root_vdev
;
545 uint64_t fragmentation
= 0;
547 spa_config_enter(mc
->mc_spa
, SCL_VDEV
, FTAG
, RW_READER
);
549 for (int c
= 0; c
< rvd
->vdev_children
; c
++) {
550 vdev_t
*tvd
= rvd
->vdev_child
[c
];
551 metaslab_group_t
*mg
= tvd
->vdev_mg
;
554 * Skip any holes, uninitialized top-levels,
555 * or vdevs that are not in this metalab class.
557 if (!vdev_is_concrete(tvd
) || tvd
->vdev_ms_shift
== 0 ||
558 mg
->mg_class
!= mc
) {
563 * If a metaslab group does not contain a fragmentation
564 * metric then just bail out.
566 if (mg
->mg_fragmentation
== ZFS_FRAG_INVALID
) {
567 spa_config_exit(mc
->mc_spa
, SCL_VDEV
, FTAG
);
568 return (ZFS_FRAG_INVALID
);
572 * Determine how much this metaslab_group is contributing
573 * to the overall pool fragmentation metric.
575 fragmentation
+= mg
->mg_fragmentation
*
576 metaslab_group_get_space(mg
);
578 fragmentation
/= metaslab_class_get_space(mc
);
580 ASSERT3U(fragmentation
, <=, 100);
581 spa_config_exit(mc
->mc_spa
, SCL_VDEV
, FTAG
);
582 return (fragmentation
);
586 * Calculate the amount of expandable space that is available in
587 * this metaslab class. If a device is expanded then its expandable
588 * space will be the amount of allocatable space that is currently not
589 * part of this metaslab class.
592 metaslab_class_expandable_space(metaslab_class_t
*mc
)
594 vdev_t
*rvd
= mc
->mc_spa
->spa_root_vdev
;
597 spa_config_enter(mc
->mc_spa
, SCL_VDEV
, FTAG
, RW_READER
);
598 for (int c
= 0; c
< rvd
->vdev_children
; c
++) {
599 vdev_t
*tvd
= rvd
->vdev_child
[c
];
600 metaslab_group_t
*mg
= tvd
->vdev_mg
;
602 if (!vdev_is_concrete(tvd
) || tvd
->vdev_ms_shift
== 0 ||
603 mg
->mg_class
!= mc
) {
608 * Calculate if we have enough space to add additional
609 * metaslabs. We report the expandable space in terms
610 * of the metaslab size since that's the unit of expansion.
612 space
+= P2ALIGN(tvd
->vdev_max_asize
- tvd
->vdev_asize
,
613 1ULL << tvd
->vdev_ms_shift
);
615 spa_config_exit(mc
->mc_spa
, SCL_VDEV
, FTAG
);
620 metaslab_class_evict_old(metaslab_class_t
*mc
, uint64_t txg
)
622 multilist_t
*ml
= mc
->mc_metaslab_txg_list
;
623 for (int i
= 0; i
< multilist_get_num_sublists(ml
); i
++) {
624 multilist_sublist_t
*mls
= multilist_sublist_lock(ml
, i
);
625 metaslab_t
*msp
= multilist_sublist_head(mls
);
626 multilist_sublist_unlock(mls
);
627 while (msp
!= NULL
) {
628 mutex_enter(&msp
->ms_lock
);
631 * If the metaslab has been removed from the list
632 * (which could happen if we were at the memory limit
633 * and it was evicted during this loop), then we can't
634 * proceed and we should restart the sublist.
636 if (!multilist_link_active(&msp
->ms_class_txg_node
)) {
637 mutex_exit(&msp
->ms_lock
);
641 mls
= multilist_sublist_lock(ml
, i
);
642 metaslab_t
*next_msp
= multilist_sublist_next(mls
, msp
);
643 multilist_sublist_unlock(mls
);
645 msp
->ms_selected_txg
+ metaslab_unload_delay
&&
646 gethrtime() > msp
->ms_selected_time
+
647 (uint64_t)MSEC2NSEC(metaslab_unload_delay_ms
)) {
648 metaslab_evict(msp
, txg
);
651 * Once we've hit a metaslab selected too
652 * recently to evict, we're done evicting for
655 mutex_exit(&msp
->ms_lock
);
658 mutex_exit(&msp
->ms_lock
);
665 metaslab_compare(const void *x1
, const void *x2
)
667 const metaslab_t
*m1
= (const metaslab_t
*)x1
;
668 const metaslab_t
*m2
= (const metaslab_t
*)x2
;
672 if (m1
->ms_allocator
!= -1 && m1
->ms_primary
)
674 else if (m1
->ms_allocator
!= -1 && !m1
->ms_primary
)
676 if (m2
->ms_allocator
!= -1 && m2
->ms_primary
)
678 else if (m2
->ms_allocator
!= -1 && !m2
->ms_primary
)
682 * Sort inactive metaslabs first, then primaries, then secondaries. When
683 * selecting a metaslab to allocate from, an allocator first tries its
684 * primary, then secondary active metaslab. If it doesn't have active
685 * metaslabs, or can't allocate from them, it searches for an inactive
686 * metaslab to activate. If it can't find a suitable one, it will steal
687 * a primary or secondary metaslab from another allocator.
694 int cmp
= TREE_CMP(m2
->ms_weight
, m1
->ms_weight
);
698 IMPLY(TREE_CMP(m1
->ms_start
, m2
->ms_start
) == 0, m1
== m2
);
700 return (TREE_CMP(m1
->ms_start
, m2
->ms_start
));
704 * ==========================================================================
706 * ==========================================================================
709 * Update the allocatable flag and the metaslab group's capacity.
710 * The allocatable flag is set to true if the capacity is below
711 * the zfs_mg_noalloc_threshold or has a fragmentation value that is
712 * greater than zfs_mg_fragmentation_threshold. If a metaslab group
713 * transitions from allocatable to non-allocatable or vice versa then the
714 * metaslab group's class is updated to reflect the transition.
717 metaslab_group_alloc_update(metaslab_group_t
*mg
)
719 vdev_t
*vd
= mg
->mg_vd
;
720 metaslab_class_t
*mc
= mg
->mg_class
;
721 vdev_stat_t
*vs
= &vd
->vdev_stat
;
722 boolean_t was_allocatable
;
723 boolean_t was_initialized
;
725 ASSERT(vd
== vd
->vdev_top
);
726 ASSERT3U(spa_config_held(mc
->mc_spa
, SCL_ALLOC
, RW_READER
), ==,
729 mutex_enter(&mg
->mg_lock
);
730 was_allocatable
= mg
->mg_allocatable
;
731 was_initialized
= mg
->mg_initialized
;
733 mg
->mg_free_capacity
= ((vs
->vs_space
- vs
->vs_alloc
) * 100) /
736 mutex_enter(&mc
->mc_lock
);
739 * If the metaslab group was just added then it won't
740 * have any space until we finish syncing out this txg.
741 * At that point we will consider it initialized and available
742 * for allocations. We also don't consider non-activated
743 * metaslab groups (e.g. vdevs that are in the middle of being removed)
744 * to be initialized, because they can't be used for allocation.
746 mg
->mg_initialized
= metaslab_group_initialized(mg
);
747 if (!was_initialized
&& mg
->mg_initialized
) {
749 } else if (was_initialized
&& !mg
->mg_initialized
) {
750 ASSERT3U(mc
->mc_groups
, >, 0);
753 if (mg
->mg_initialized
)
754 mg
->mg_no_free_space
= B_FALSE
;
757 * A metaslab group is considered allocatable if it has plenty
758 * of free space or is not heavily fragmented. We only take
759 * fragmentation into account if the metaslab group has a valid
760 * fragmentation metric (i.e. a value between 0 and 100).
762 mg
->mg_allocatable
= (mg
->mg_activation_count
> 0 &&
763 mg
->mg_free_capacity
> zfs_mg_noalloc_threshold
&&
764 (mg
->mg_fragmentation
== ZFS_FRAG_INVALID
||
765 mg
->mg_fragmentation
<= zfs_mg_fragmentation_threshold
));
768 * The mc_alloc_groups maintains a count of the number of
769 * groups in this metaslab class that are still above the
770 * zfs_mg_noalloc_threshold. This is used by the allocating
771 * threads to determine if they should avoid allocations to
772 * a given group. The allocator will avoid allocations to a group
773 * if that group has reached or is below the zfs_mg_noalloc_threshold
774 * and there are still other groups that are above the threshold.
775 * When a group transitions from allocatable to non-allocatable or
776 * vice versa we update the metaslab class to reflect that change.
777 * When the mc_alloc_groups value drops to 0 that means that all
778 * groups have reached the zfs_mg_noalloc_threshold making all groups
779 * eligible for allocations. This effectively means that all devices
780 * are balanced again.
782 if (was_allocatable
&& !mg
->mg_allocatable
)
783 mc
->mc_alloc_groups
--;
784 else if (!was_allocatable
&& mg
->mg_allocatable
)
785 mc
->mc_alloc_groups
++;
786 mutex_exit(&mc
->mc_lock
);
788 mutex_exit(&mg
->mg_lock
);
792 metaslab_sort_by_flushed(const void *va
, const void *vb
)
794 const metaslab_t
*a
= va
;
795 const metaslab_t
*b
= vb
;
797 int cmp
= TREE_CMP(a
->ms_unflushed_txg
, b
->ms_unflushed_txg
);
801 uint64_t a_vdev_id
= a
->ms_group
->mg_vd
->vdev_id
;
802 uint64_t b_vdev_id
= b
->ms_group
->mg_vd
->vdev_id
;
803 cmp
= TREE_CMP(a_vdev_id
, b_vdev_id
);
807 return (TREE_CMP(a
->ms_id
, b
->ms_id
));
811 metaslab_group_create(metaslab_class_t
*mc
, vdev_t
*vd
, int allocators
)
813 metaslab_group_t
*mg
;
815 mg
= kmem_zalloc(offsetof(metaslab_group_t
,
816 mg_allocator
[allocators
]), KM_SLEEP
);
817 mutex_init(&mg
->mg_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
818 mutex_init(&mg
->mg_ms_disabled_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
819 cv_init(&mg
->mg_ms_disabled_cv
, NULL
, CV_DEFAULT
, NULL
);
820 avl_create(&mg
->mg_metaslab_tree
, metaslab_compare
,
821 sizeof (metaslab_t
), offsetof(metaslab_t
, ms_group_node
));
824 mg
->mg_activation_count
= 0;
825 mg
->mg_initialized
= B_FALSE
;
826 mg
->mg_no_free_space
= B_TRUE
;
827 mg
->mg_allocators
= allocators
;
829 for (int i
= 0; i
< allocators
; i
++) {
830 metaslab_group_allocator_t
*mga
= &mg
->mg_allocator
[i
];
831 zfs_refcount_create_tracked(&mga
->mga_alloc_queue_depth
);
834 mg
->mg_taskq
= taskq_create("metaslab_group_taskq", metaslab_load_pct
,
835 maxclsyspri
, 10, INT_MAX
, TASKQ_THREADS_CPU_PCT
| TASKQ_DYNAMIC
);
841 metaslab_group_destroy(metaslab_group_t
*mg
)
843 ASSERT(mg
->mg_prev
== NULL
);
844 ASSERT(mg
->mg_next
== NULL
);
846 * We may have gone below zero with the activation count
847 * either because we never activated in the first place or
848 * because we're done, and possibly removing the vdev.
850 ASSERT(mg
->mg_activation_count
<= 0);
852 taskq_destroy(mg
->mg_taskq
);
853 avl_destroy(&mg
->mg_metaslab_tree
);
854 mutex_destroy(&mg
->mg_lock
);
855 mutex_destroy(&mg
->mg_ms_disabled_lock
);
856 cv_destroy(&mg
->mg_ms_disabled_cv
);
858 for (int i
= 0; i
< mg
->mg_allocators
; i
++) {
859 metaslab_group_allocator_t
*mga
= &mg
->mg_allocator
[i
];
860 zfs_refcount_destroy(&mga
->mga_alloc_queue_depth
);
862 kmem_free(mg
, offsetof(metaslab_group_t
,
863 mg_allocator
[mg
->mg_allocators
]));
867 metaslab_group_activate(metaslab_group_t
*mg
)
869 metaslab_class_t
*mc
= mg
->mg_class
;
870 spa_t
*spa
= mc
->mc_spa
;
871 metaslab_group_t
*mgprev
, *mgnext
;
873 ASSERT3U(spa_config_held(spa
, SCL_ALLOC
, RW_WRITER
), !=, 0);
875 ASSERT(mg
->mg_prev
== NULL
);
876 ASSERT(mg
->mg_next
== NULL
);
877 ASSERT(mg
->mg_activation_count
<= 0);
879 if (++mg
->mg_activation_count
<= 0)
882 mg
->mg_aliquot
= metaslab_aliquot
* MAX(1, mg
->mg_vd
->vdev_children
);
883 metaslab_group_alloc_update(mg
);
885 if ((mgprev
= mc
->mc_allocator
[0].mca_rotor
) == NULL
) {
889 mgnext
= mgprev
->mg_next
;
890 mg
->mg_prev
= mgprev
;
891 mg
->mg_next
= mgnext
;
892 mgprev
->mg_next
= mg
;
893 mgnext
->mg_prev
= mg
;
895 for (int i
= 0; i
< spa
->spa_alloc_count
; i
++) {
896 mc
->mc_allocator
[i
].mca_rotor
= mg
;
902 * Passivate a metaslab group and remove it from the allocation rotor.
903 * Callers must hold both the SCL_ALLOC and SCL_ZIO lock prior to passivating
904 * a metaslab group. This function will momentarily drop spa_config_locks
905 * that are lower than the SCL_ALLOC lock (see comment below).
908 metaslab_group_passivate(metaslab_group_t
*mg
)
910 metaslab_class_t
*mc
= mg
->mg_class
;
911 spa_t
*spa
= mc
->mc_spa
;
912 metaslab_group_t
*mgprev
, *mgnext
;
913 int locks
= spa_config_held(spa
, SCL_ALL
, RW_WRITER
);
915 ASSERT3U(spa_config_held(spa
, SCL_ALLOC
| SCL_ZIO
, RW_WRITER
), ==,
916 (SCL_ALLOC
| SCL_ZIO
));
918 if (--mg
->mg_activation_count
!= 0) {
919 for (int i
= 0; i
< spa
->spa_alloc_count
; i
++)
920 ASSERT(mc
->mc_allocator
[i
].mca_rotor
!= mg
);
921 ASSERT(mg
->mg_prev
== NULL
);
922 ASSERT(mg
->mg_next
== NULL
);
923 ASSERT(mg
->mg_activation_count
< 0);
928 * The spa_config_lock is an array of rwlocks, ordered as
929 * follows (from highest to lowest):
930 * SCL_CONFIG > SCL_STATE > SCL_L2ARC > SCL_ALLOC >
931 * SCL_ZIO > SCL_FREE > SCL_VDEV
932 * (For more information about the spa_config_lock see spa_misc.c)
933 * The higher the lock, the broader its coverage. When we passivate
934 * a metaslab group, we must hold both the SCL_ALLOC and the SCL_ZIO
935 * config locks. However, the metaslab group's taskq might be trying
936 * to preload metaslabs so we must drop the SCL_ZIO lock and any
937 * lower locks to allow the I/O to complete. At a minimum,
938 * we continue to hold the SCL_ALLOC lock, which prevents any future
939 * allocations from taking place and any changes to the vdev tree.
941 spa_config_exit(spa
, locks
& ~(SCL_ZIO
- 1), spa
);
942 taskq_wait_outstanding(mg
->mg_taskq
, 0);
943 spa_config_enter(spa
, locks
& ~(SCL_ZIO
- 1), spa
, RW_WRITER
);
944 metaslab_group_alloc_update(mg
);
945 for (int i
= 0; i
< mg
->mg_allocators
; i
++) {
946 metaslab_group_allocator_t
*mga
= &mg
->mg_allocator
[i
];
947 metaslab_t
*msp
= mga
->mga_primary
;
949 mutex_enter(&msp
->ms_lock
);
950 metaslab_passivate(msp
,
951 metaslab_weight_from_range_tree(msp
));
952 mutex_exit(&msp
->ms_lock
);
954 msp
= mga
->mga_secondary
;
956 mutex_enter(&msp
->ms_lock
);
957 metaslab_passivate(msp
,
958 metaslab_weight_from_range_tree(msp
));
959 mutex_exit(&msp
->ms_lock
);
963 mgprev
= mg
->mg_prev
;
964 mgnext
= mg
->mg_next
;
969 mgprev
->mg_next
= mgnext
;
970 mgnext
->mg_prev
= mgprev
;
972 for (int i
= 0; i
< spa
->spa_alloc_count
; i
++) {
973 if (mc
->mc_allocator
[i
].mca_rotor
== mg
)
974 mc
->mc_allocator
[i
].mca_rotor
= mgnext
;
982 metaslab_group_initialized(metaslab_group_t
*mg
)
984 vdev_t
*vd
= mg
->mg_vd
;
985 vdev_stat_t
*vs
= &vd
->vdev_stat
;
987 return (vs
->vs_space
!= 0 && mg
->mg_activation_count
> 0);
991 metaslab_group_get_space(metaslab_group_t
*mg
)
993 return ((1ULL << mg
->mg_vd
->vdev_ms_shift
) * mg
->mg_vd
->vdev_ms_count
);
997 metaslab_group_histogram_verify(metaslab_group_t
*mg
)
1000 vdev_t
*vd
= mg
->mg_vd
;
1001 uint64_t ashift
= vd
->vdev_ashift
;
1004 if ((zfs_flags
& ZFS_DEBUG_HISTOGRAM_VERIFY
) == 0)
1007 mg_hist
= kmem_zalloc(sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE
,
1010 ASSERT3U(RANGE_TREE_HISTOGRAM_SIZE
, >=,
1011 SPACE_MAP_HISTOGRAM_SIZE
+ ashift
);
1013 for (int m
= 0; m
< vd
->vdev_ms_count
; m
++) {
1014 metaslab_t
*msp
= vd
->vdev_ms
[m
];
1016 /* skip if not active or not a member */
1017 if (msp
->ms_sm
== NULL
|| msp
->ms_group
!= mg
)
1020 for (i
= 0; i
< SPACE_MAP_HISTOGRAM_SIZE
; i
++)
1021 mg_hist
[i
+ ashift
] +=
1022 msp
->ms_sm
->sm_phys
->smp_histogram
[i
];
1025 for (i
= 0; i
< RANGE_TREE_HISTOGRAM_SIZE
; i
++)
1026 VERIFY3U(mg_hist
[i
], ==, mg
->mg_histogram
[i
]);
1028 kmem_free(mg_hist
, sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE
);
1032 metaslab_group_histogram_add(metaslab_group_t
*mg
, metaslab_t
*msp
)
1034 metaslab_class_t
*mc
= mg
->mg_class
;
1035 uint64_t ashift
= mg
->mg_vd
->vdev_ashift
;
1037 ASSERT(MUTEX_HELD(&msp
->ms_lock
));
1038 if (msp
->ms_sm
== NULL
)
1041 mutex_enter(&mg
->mg_lock
);
1042 for (int i
= 0; i
< SPACE_MAP_HISTOGRAM_SIZE
; i
++) {
1043 mg
->mg_histogram
[i
+ ashift
] +=
1044 msp
->ms_sm
->sm_phys
->smp_histogram
[i
];
1045 mc
->mc_histogram
[i
+ ashift
] +=
1046 msp
->ms_sm
->sm_phys
->smp_histogram
[i
];
1048 mutex_exit(&mg
->mg_lock
);
1052 metaslab_group_histogram_remove(metaslab_group_t
*mg
, metaslab_t
*msp
)
1054 metaslab_class_t
*mc
= mg
->mg_class
;
1055 uint64_t ashift
= mg
->mg_vd
->vdev_ashift
;
1057 ASSERT(MUTEX_HELD(&msp
->ms_lock
));
1058 if (msp
->ms_sm
== NULL
)
1061 mutex_enter(&mg
->mg_lock
);
1062 for (int i
= 0; i
< SPACE_MAP_HISTOGRAM_SIZE
; i
++) {
1063 ASSERT3U(mg
->mg_histogram
[i
+ ashift
], >=,
1064 msp
->ms_sm
->sm_phys
->smp_histogram
[i
]);
1065 ASSERT3U(mc
->mc_histogram
[i
+ ashift
], >=,
1066 msp
->ms_sm
->sm_phys
->smp_histogram
[i
]);
1068 mg
->mg_histogram
[i
+ ashift
] -=
1069 msp
->ms_sm
->sm_phys
->smp_histogram
[i
];
1070 mc
->mc_histogram
[i
+ ashift
] -=
1071 msp
->ms_sm
->sm_phys
->smp_histogram
[i
];
1073 mutex_exit(&mg
->mg_lock
);
1077 metaslab_group_add(metaslab_group_t
*mg
, metaslab_t
*msp
)
1079 ASSERT(msp
->ms_group
== NULL
);
1080 mutex_enter(&mg
->mg_lock
);
1083 avl_add(&mg
->mg_metaslab_tree
, msp
);
1084 mutex_exit(&mg
->mg_lock
);
1086 mutex_enter(&msp
->ms_lock
);
1087 metaslab_group_histogram_add(mg
, msp
);
1088 mutex_exit(&msp
->ms_lock
);
1092 metaslab_group_remove(metaslab_group_t
*mg
, metaslab_t
*msp
)
1094 mutex_enter(&msp
->ms_lock
);
1095 metaslab_group_histogram_remove(mg
, msp
);
1096 mutex_exit(&msp
->ms_lock
);
1098 mutex_enter(&mg
->mg_lock
);
1099 ASSERT(msp
->ms_group
== mg
);
1100 avl_remove(&mg
->mg_metaslab_tree
, msp
);
1102 metaslab_class_t
*mc
= msp
->ms_group
->mg_class
;
1103 multilist_sublist_t
*mls
=
1104 multilist_sublist_lock_obj(mc
->mc_metaslab_txg_list
, msp
);
1105 if (multilist_link_active(&msp
->ms_class_txg_node
))
1106 multilist_sublist_remove(mls
, msp
);
1107 multilist_sublist_unlock(mls
);
1109 msp
->ms_group
= NULL
;
1110 mutex_exit(&mg
->mg_lock
);
1114 metaslab_group_sort_impl(metaslab_group_t
*mg
, metaslab_t
*msp
, uint64_t weight
)
1116 ASSERT(MUTEX_HELD(&msp
->ms_lock
));
1117 ASSERT(MUTEX_HELD(&mg
->mg_lock
));
1118 ASSERT(msp
->ms_group
== mg
);
1120 avl_remove(&mg
->mg_metaslab_tree
, msp
);
1121 msp
->ms_weight
= weight
;
1122 avl_add(&mg
->mg_metaslab_tree
, msp
);
1127 metaslab_group_sort(metaslab_group_t
*mg
, metaslab_t
*msp
, uint64_t weight
)
1130 * Although in principle the weight can be any value, in
1131 * practice we do not use values in the range [1, 511].
1133 ASSERT(weight
>= SPA_MINBLOCKSIZE
|| weight
== 0);
1134 ASSERT(MUTEX_HELD(&msp
->ms_lock
));
1136 mutex_enter(&mg
->mg_lock
);
1137 metaslab_group_sort_impl(mg
, msp
, weight
);
1138 mutex_exit(&mg
->mg_lock
);
1142 * Calculate the fragmentation for a given metaslab group. We can use
1143 * a simple average here since all metaslabs within the group must have
1144 * the same size. The return value will be a value between 0 and 100
1145 * (inclusive), or ZFS_FRAG_INVALID if less than half of the metaslab in this
1146 * group have a fragmentation metric.
1149 metaslab_group_fragmentation(metaslab_group_t
*mg
)
1151 vdev_t
*vd
= mg
->mg_vd
;
1152 uint64_t fragmentation
= 0;
1153 uint64_t valid_ms
= 0;
1155 for (int m
= 0; m
< vd
->vdev_ms_count
; m
++) {
1156 metaslab_t
*msp
= vd
->vdev_ms
[m
];
1158 if (msp
->ms_fragmentation
== ZFS_FRAG_INVALID
)
1160 if (msp
->ms_group
!= mg
)
1164 fragmentation
+= msp
->ms_fragmentation
;
1167 if (valid_ms
<= mg
->mg_vd
->vdev_ms_count
/ 2)
1168 return (ZFS_FRAG_INVALID
);
1170 fragmentation
/= valid_ms
;
1171 ASSERT3U(fragmentation
, <=, 100);
1172 return (fragmentation
);
1176 * Determine if a given metaslab group should skip allocations. A metaslab
1177 * group should avoid allocations if its free capacity is less than the
1178 * zfs_mg_noalloc_threshold or its fragmentation metric is greater than
1179 * zfs_mg_fragmentation_threshold and there is at least one metaslab group
1180 * that can still handle allocations. If the allocation throttle is enabled
1181 * then we skip allocations to devices that have reached their maximum
1182 * allocation queue depth unless the selected metaslab group is the only
1183 * eligible group remaining.
1186 metaslab_group_allocatable(metaslab_group_t
*mg
, metaslab_group_t
*rotor
,
1187 uint64_t psize
, int allocator
, int d
)
1189 spa_t
*spa
= mg
->mg_vd
->vdev_spa
;
1190 metaslab_class_t
*mc
= mg
->mg_class
;
1193 * We can only consider skipping this metaslab group if it's
1194 * in the normal metaslab class and there are other metaslab
1195 * groups to select from. Otherwise, we always consider it eligible
1198 if ((mc
!= spa_normal_class(spa
) &&
1199 mc
!= spa_special_class(spa
) &&
1200 mc
!= spa_dedup_class(spa
)) ||
1205 * If the metaslab group's mg_allocatable flag is set (see comments
1206 * in metaslab_group_alloc_update() for more information) and
1207 * the allocation throttle is disabled then allow allocations to this
1208 * device. However, if the allocation throttle is enabled then
1209 * check if we have reached our allocation limit (mga_alloc_queue_depth)
1210 * to determine if we should allow allocations to this metaslab group.
1211 * If all metaslab groups are no longer considered allocatable
1212 * (mc_alloc_groups == 0) or we're trying to allocate the smallest
1213 * gang block size then we allow allocations on this metaslab group
1214 * regardless of the mg_allocatable or throttle settings.
1216 if (mg
->mg_allocatable
) {
1217 metaslab_group_allocator_t
*mga
= &mg
->mg_allocator
[allocator
];
1219 uint64_t qmax
= mga
->mga_cur_max_alloc_queue_depth
;
1221 if (!mc
->mc_alloc_throttle_enabled
)
1225 * If this metaslab group does not have any free space, then
1226 * there is no point in looking further.
1228 if (mg
->mg_no_free_space
)
1232 * Relax allocation throttling for ditto blocks. Due to
1233 * random imbalances in allocation it tends to push copies
1234 * to one vdev, that looks a bit better at the moment.
1236 qmax
= qmax
* (4 + d
) / 4;
1238 qdepth
= zfs_refcount_count(&mga
->mga_alloc_queue_depth
);
1241 * If this metaslab group is below its qmax or it's
1242 * the only allocatable metasable group, then attempt
1243 * to allocate from it.
1245 if (qdepth
< qmax
|| mc
->mc_alloc_groups
== 1)
1247 ASSERT3U(mc
->mc_alloc_groups
, >, 1);
1250 * Since this metaslab group is at or over its qmax, we
1251 * need to determine if there are metaslab groups after this
1252 * one that might be able to handle this allocation. This is
1253 * racy since we can't hold the locks for all metaslab
1254 * groups at the same time when we make this check.
1256 for (metaslab_group_t
*mgp
= mg
->mg_next
;
1257 mgp
!= rotor
; mgp
= mgp
->mg_next
) {
1258 metaslab_group_allocator_t
*mgap
=
1259 &mgp
->mg_allocator
[allocator
];
1260 qmax
= mgap
->mga_cur_max_alloc_queue_depth
;
1261 qmax
= qmax
* (4 + d
) / 4;
1263 zfs_refcount_count(&mgap
->mga_alloc_queue_depth
);
1266 * If there is another metaslab group that
1267 * might be able to handle the allocation, then
1268 * we return false so that we skip this group.
1270 if (qdepth
< qmax
&& !mgp
->mg_no_free_space
)
1275 * We didn't find another group to handle the allocation
1276 * so we can't skip this metaslab group even though
1277 * we are at or over our qmax.
1281 } else if (mc
->mc_alloc_groups
== 0 || psize
== SPA_MINBLOCKSIZE
) {
1288 * ==========================================================================
1289 * Range tree callbacks
1290 * ==========================================================================
1294 * Comparison function for the private size-ordered tree using 32-bit
1295 * ranges. Tree is sorted by size, larger sizes at the end of the tree.
1298 metaslab_rangesize32_compare(const void *x1
, const void *x2
)
1300 const range_seg32_t
*r1
= x1
;
1301 const range_seg32_t
*r2
= x2
;
1303 uint64_t rs_size1
= r1
->rs_end
- r1
->rs_start
;
1304 uint64_t rs_size2
= r2
->rs_end
- r2
->rs_start
;
1306 int cmp
= TREE_CMP(rs_size1
, rs_size2
);
1310 return (TREE_CMP(r1
->rs_start
, r2
->rs_start
));
1314 * Comparison function for the private size-ordered tree using 64-bit
1315 * ranges. Tree is sorted by size, larger sizes at the end of the tree.
1318 metaslab_rangesize64_compare(const void *x1
, const void *x2
)
1320 const range_seg64_t
*r1
= x1
;
1321 const range_seg64_t
*r2
= x2
;
1323 uint64_t rs_size1
= r1
->rs_end
- r1
->rs_start
;
1324 uint64_t rs_size2
= r2
->rs_end
- r2
->rs_start
;
1326 int cmp
= TREE_CMP(rs_size1
, rs_size2
);
1330 return (TREE_CMP(r1
->rs_start
, r2
->rs_start
));
1332 typedef struct metaslab_rt_arg
{
1333 zfs_btree_t
*mra_bt
;
1334 uint32_t mra_floor_shift
;
1335 } metaslab_rt_arg_t
;
1339 metaslab_rt_arg_t
*mra
;
1343 metaslab_size_sorted_add(void *arg
, uint64_t start
, uint64_t size
)
1345 struct mssa_arg
*mssap
= arg
;
1346 range_tree_t
*rt
= mssap
->rt
;
1347 metaslab_rt_arg_t
*mrap
= mssap
->mra
;
1348 range_seg_max_t seg
= {0};
1349 rs_set_start(&seg
, rt
, start
);
1350 rs_set_end(&seg
, rt
, start
+ size
);
1351 metaslab_rt_add(rt
, &seg
, mrap
);
1355 metaslab_size_tree_full_load(range_tree_t
*rt
)
1357 metaslab_rt_arg_t
*mrap
= rt
->rt_arg
;
1358 #ifdef _METASLAB_TRACING
1359 METASLABSTAT_BUMP(metaslabstat_reload_tree
);
1361 ASSERT0(zfs_btree_numnodes(mrap
->mra_bt
));
1362 mrap
->mra_floor_shift
= 0;
1363 struct mssa_arg arg
= {0};
1366 range_tree_walk(rt
, metaslab_size_sorted_add
, &arg
);
1370 * Create any block allocator specific components. The current allocators
1371 * rely on using both a size-ordered range_tree_t and an array of uint64_t's.
1375 metaslab_rt_create(range_tree_t
*rt
, void *arg
)
1377 metaslab_rt_arg_t
*mrap
= arg
;
1378 zfs_btree_t
*size_tree
= mrap
->mra_bt
;
1381 int (*compare
) (const void *, const void *);
1382 switch (rt
->rt_type
) {
1384 size
= sizeof (range_seg32_t
);
1385 compare
= metaslab_rangesize32_compare
;
1388 size
= sizeof (range_seg64_t
);
1389 compare
= metaslab_rangesize64_compare
;
1392 panic("Invalid range seg type %d", rt
->rt_type
);
1394 zfs_btree_create(size_tree
, compare
, size
);
1395 mrap
->mra_floor_shift
= metaslab_by_size_min_shift
;
1400 metaslab_rt_destroy(range_tree_t
*rt
, void *arg
)
1402 metaslab_rt_arg_t
*mrap
= arg
;
1403 zfs_btree_t
*size_tree
= mrap
->mra_bt
;
1405 zfs_btree_destroy(size_tree
);
1406 kmem_free(mrap
, sizeof (*mrap
));
1411 metaslab_rt_add(range_tree_t
*rt
, range_seg_t
*rs
, void *arg
)
1413 metaslab_rt_arg_t
*mrap
= arg
;
1414 zfs_btree_t
*size_tree
= mrap
->mra_bt
;
1416 if (rs_get_end(rs
, rt
) - rs_get_start(rs
, rt
) <
1417 (1 << mrap
->mra_floor_shift
))
1420 zfs_btree_add(size_tree
, rs
);
1425 metaslab_rt_remove(range_tree_t
*rt
, range_seg_t
*rs
, void *arg
)
1427 metaslab_rt_arg_t
*mrap
= arg
;
1428 zfs_btree_t
*size_tree
= mrap
->mra_bt
;
1430 if (rs_get_end(rs
, rt
) - rs_get_start(rs
, rt
) < (1 <<
1431 mrap
->mra_floor_shift
))
1434 zfs_btree_remove(size_tree
, rs
);
1439 metaslab_rt_vacate(range_tree_t
*rt
, void *arg
)
1441 metaslab_rt_arg_t
*mrap
= arg
;
1442 zfs_btree_t
*size_tree
= mrap
->mra_bt
;
1443 zfs_btree_clear(size_tree
);
1444 zfs_btree_destroy(size_tree
);
1446 metaslab_rt_create(rt
, arg
);
1449 static range_tree_ops_t metaslab_rt_ops
= {
1450 .rtop_create
= metaslab_rt_create
,
1451 .rtop_destroy
= metaslab_rt_destroy
,
1452 .rtop_add
= metaslab_rt_add
,
1453 .rtop_remove
= metaslab_rt_remove
,
1454 .rtop_vacate
= metaslab_rt_vacate
1458 * ==========================================================================
1459 * Common allocator routines
1460 * ==========================================================================
1464 * Return the maximum contiguous segment within the metaslab.
1467 metaslab_largest_allocatable(metaslab_t
*msp
)
1469 zfs_btree_t
*t
= &msp
->ms_allocatable_by_size
;
1474 if (zfs_btree_numnodes(t
) == 0)
1475 metaslab_size_tree_full_load(msp
->ms_allocatable
);
1477 rs
= zfs_btree_last(t
, NULL
);
1481 return (rs_get_end(rs
, msp
->ms_allocatable
) - rs_get_start(rs
,
1482 msp
->ms_allocatable
));
1486 * Return the maximum contiguous segment within the unflushed frees of this
1490 metaslab_largest_unflushed_free(metaslab_t
*msp
)
1492 ASSERT(MUTEX_HELD(&msp
->ms_lock
));
1494 if (msp
->ms_unflushed_frees
== NULL
)
1497 if (zfs_btree_numnodes(&msp
->ms_unflushed_frees_by_size
) == 0)
1498 metaslab_size_tree_full_load(msp
->ms_unflushed_frees
);
1499 range_seg_t
*rs
= zfs_btree_last(&msp
->ms_unflushed_frees_by_size
,
1505 * When a range is freed from the metaslab, that range is added to
1506 * both the unflushed frees and the deferred frees. While the block
1507 * will eventually be usable, if the metaslab were loaded the range
1508 * would not be added to the ms_allocatable tree until TXG_DEFER_SIZE
1509 * txgs had passed. As a result, when attempting to estimate an upper
1510 * bound for the largest currently-usable free segment in the
1511 * metaslab, we need to not consider any ranges currently in the defer
1512 * trees. This algorithm approximates the largest available chunk in
1513 * the largest range in the unflushed_frees tree by taking the first
1514 * chunk. While this may be a poor estimate, it should only remain so
1515 * briefly and should eventually self-correct as frees are no longer
1516 * deferred. Similar logic applies to the ms_freed tree. See
1517 * metaslab_load() for more details.
1519 * There are two primary sources of inaccuracy in this estimate. Both
1520 * are tolerated for performance reasons. The first source is that we
1521 * only check the largest segment for overlaps. Smaller segments may
1522 * have more favorable overlaps with the other trees, resulting in
1523 * larger usable chunks. Second, we only look at the first chunk in
1524 * the largest segment; there may be other usable chunks in the
1525 * largest segment, but we ignore them.
1527 uint64_t rstart
= rs_get_start(rs
, msp
->ms_unflushed_frees
);
1528 uint64_t rsize
= rs_get_end(rs
, msp
->ms_unflushed_frees
) - rstart
;
1529 for (int t
= 0; t
< TXG_DEFER_SIZE
; t
++) {
1532 boolean_t found
= range_tree_find_in(msp
->ms_defer
[t
], rstart
,
1533 rsize
, &start
, &size
);
1535 if (rstart
== start
)
1537 rsize
= start
- rstart
;
1543 boolean_t found
= range_tree_find_in(msp
->ms_freed
, rstart
,
1544 rsize
, &start
, &size
);
1546 rsize
= start
- rstart
;
1551 static range_seg_t
*
1552 metaslab_block_find(zfs_btree_t
*t
, range_tree_t
*rt
, uint64_t start
,
1553 uint64_t size
, zfs_btree_index_t
*where
)
1556 range_seg_max_t rsearch
;
1558 rs_set_start(&rsearch
, rt
, start
);
1559 rs_set_end(&rsearch
, rt
, start
+ size
);
1561 rs
= zfs_btree_find(t
, &rsearch
, where
);
1563 rs
= zfs_btree_next(t
, where
, where
);
1569 #if defined(WITH_DF_BLOCK_ALLOCATOR) || \
1570 defined(WITH_CF_BLOCK_ALLOCATOR)
1573 * This is a helper function that can be used by the allocator to find a
1574 * suitable block to allocate. This will search the specified B-tree looking
1575 * for a block that matches the specified criteria.
1578 metaslab_block_picker(range_tree_t
*rt
, uint64_t *cursor
, uint64_t size
,
1579 uint64_t max_search
)
1582 *cursor
= rt
->rt_start
;
1583 zfs_btree_t
*bt
= &rt
->rt_root
;
1584 zfs_btree_index_t where
;
1585 range_seg_t
*rs
= metaslab_block_find(bt
, rt
, *cursor
, size
, &where
);
1586 uint64_t first_found
;
1587 int count_searched
= 0;
1590 first_found
= rs_get_start(rs
, rt
);
1592 while (rs
!= NULL
&& (rs_get_start(rs
, rt
) - first_found
<=
1593 max_search
|| count_searched
< metaslab_min_search_count
)) {
1594 uint64_t offset
= rs_get_start(rs
, rt
);
1595 if (offset
+ size
<= rs_get_end(rs
, rt
)) {
1596 *cursor
= offset
+ size
;
1599 rs
= zfs_btree_next(bt
, &where
, &where
);
1606 #endif /* WITH_DF/CF_BLOCK_ALLOCATOR */
1608 #if defined(WITH_DF_BLOCK_ALLOCATOR)
1610 * ==========================================================================
1611 * Dynamic Fit (df) block allocator
1613 * Search for a free chunk of at least this size, starting from the last
1614 * offset (for this alignment of block) looking for up to
1615 * metaslab_df_max_search bytes (16MB). If a large enough free chunk is not
1616 * found within 16MB, then return a free chunk of exactly the requested size (or
1619 * If it seems like searching from the last offset will be unproductive, skip
1620 * that and just return a free chunk of exactly the requested size (or larger).
1621 * This is based on metaslab_df_alloc_threshold and metaslab_df_free_pct. This
1622 * mechanism is probably not very useful and may be removed in the future.
1624 * The behavior when not searching can be changed to return the largest free
1625 * chunk, instead of a free chunk of exactly the requested size, by setting
1626 * metaslab_df_use_largest_segment.
1627 * ==========================================================================
1630 metaslab_df_alloc(metaslab_t
*msp
, uint64_t size
)
1633 * Find the largest power of 2 block size that evenly divides the
1634 * requested size. This is used to try to allocate blocks with similar
1635 * alignment from the same area of the metaslab (i.e. same cursor
1636 * bucket) but it does not guarantee that other allocations sizes
1637 * may exist in the same region.
1639 uint64_t align
= size
& -size
;
1640 uint64_t *cursor
= &msp
->ms_lbas
[highbit64(align
) - 1];
1641 range_tree_t
*rt
= msp
->ms_allocatable
;
1642 int free_pct
= range_tree_space(rt
) * 100 / msp
->ms_size
;
1645 ASSERT(MUTEX_HELD(&msp
->ms_lock
));
1648 * If we're running low on space, find a segment based on size,
1649 * rather than iterating based on offset.
1651 if (metaslab_largest_allocatable(msp
) < metaslab_df_alloc_threshold
||
1652 free_pct
< metaslab_df_free_pct
) {
1655 offset
= metaslab_block_picker(rt
,
1656 cursor
, size
, metaslab_df_max_search
);
1661 if (zfs_btree_numnodes(&msp
->ms_allocatable_by_size
) == 0)
1662 metaslab_size_tree_full_load(msp
->ms_allocatable
);
1664 if (metaslab_df_use_largest_segment
) {
1665 /* use largest free segment */
1666 rs
= zfs_btree_last(&msp
->ms_allocatable_by_size
, NULL
);
1668 zfs_btree_index_t where
;
1669 /* use segment of this size, or next largest */
1670 #ifdef _METASLAB_TRACING
1671 metaslab_rt_arg_t
*mrap
= msp
->ms_allocatable
->rt_arg
;
1672 if (size
< (1 << mrap
->mra_floor_shift
)) {
1674 metaslabstat_df_find_under_floor
);
1677 rs
= metaslab_block_find(&msp
->ms_allocatable_by_size
,
1678 rt
, msp
->ms_start
, size
, &where
);
1680 if (rs
!= NULL
&& rs_get_start(rs
, rt
) + size
<= rs_get_end(rs
,
1682 offset
= rs_get_start(rs
, rt
);
1683 *cursor
= offset
+ size
;
1690 static metaslab_ops_t metaslab_df_ops
= {
1694 metaslab_ops_t
*zfs_metaslab_ops
= &metaslab_df_ops
;
1695 #endif /* WITH_DF_BLOCK_ALLOCATOR */
1697 #if defined(WITH_CF_BLOCK_ALLOCATOR)
1699 * ==========================================================================
1700 * Cursor fit block allocator -
1701 * Select the largest region in the metaslab, set the cursor to the beginning
1702 * of the range and the cursor_end to the end of the range. As allocations
1703 * are made advance the cursor. Continue allocating from the cursor until
1704 * the range is exhausted and then find a new range.
1705 * ==========================================================================
1708 metaslab_cf_alloc(metaslab_t
*msp
, uint64_t size
)
1710 range_tree_t
*rt
= msp
->ms_allocatable
;
1711 zfs_btree_t
*t
= &msp
->ms_allocatable_by_size
;
1712 uint64_t *cursor
= &msp
->ms_lbas
[0];
1713 uint64_t *cursor_end
= &msp
->ms_lbas
[1];
1714 uint64_t offset
= 0;
1716 ASSERT(MUTEX_HELD(&msp
->ms_lock
));
1718 ASSERT3U(*cursor_end
, >=, *cursor
);
1720 if ((*cursor
+ size
) > *cursor_end
) {
1723 if (zfs_btree_numnodes(t
) == 0)
1724 metaslab_size_tree_full_load(msp
->ms_allocatable
);
1725 rs
= zfs_btree_last(t
, NULL
);
1726 if (rs
== NULL
|| (rs_get_end(rs
, rt
) - rs_get_start(rs
, rt
)) <
1730 *cursor
= rs_get_start(rs
, rt
);
1731 *cursor_end
= rs_get_end(rs
, rt
);
1740 static metaslab_ops_t metaslab_cf_ops
= {
1744 metaslab_ops_t
*zfs_metaslab_ops
= &metaslab_cf_ops
;
1745 #endif /* WITH_CF_BLOCK_ALLOCATOR */
1747 #if defined(WITH_NDF_BLOCK_ALLOCATOR)
1749 * ==========================================================================
1750 * New dynamic fit allocator -
1751 * Select a region that is large enough to allocate 2^metaslab_ndf_clump_shift
1752 * contiguous blocks. If no region is found then just use the largest segment
1754 * ==========================================================================
1758 * Determines desired number of contiguous blocks (2^metaslab_ndf_clump_shift)
1759 * to request from the allocator.
1761 uint64_t metaslab_ndf_clump_shift
= 4;
1764 metaslab_ndf_alloc(metaslab_t
*msp
, uint64_t size
)
1766 zfs_btree_t
*t
= &msp
->ms_allocatable
->rt_root
;
1767 range_tree_t
*rt
= msp
->ms_allocatable
;
1768 zfs_btree_index_t where
;
1770 range_seg_max_t rsearch
;
1771 uint64_t hbit
= highbit64(size
);
1772 uint64_t *cursor
= &msp
->ms_lbas
[hbit
- 1];
1773 uint64_t max_size
= metaslab_largest_allocatable(msp
);
1775 ASSERT(MUTEX_HELD(&msp
->ms_lock
));
1777 if (max_size
< size
)
1780 rs_set_start(&rsearch
, rt
, *cursor
);
1781 rs_set_end(&rsearch
, rt
, *cursor
+ size
);
1783 rs
= zfs_btree_find(t
, &rsearch
, &where
);
1784 if (rs
== NULL
|| (rs_get_end(rs
, rt
) - rs_get_start(rs
, rt
)) < size
) {
1785 t
= &msp
->ms_allocatable_by_size
;
1787 rs_set_start(&rsearch
, rt
, 0);
1788 rs_set_end(&rsearch
, rt
, MIN(max_size
, 1ULL << (hbit
+
1789 metaslab_ndf_clump_shift
)));
1791 rs
= zfs_btree_find(t
, &rsearch
, &where
);
1793 rs
= zfs_btree_next(t
, &where
, &where
);
1797 if ((rs_get_end(rs
, rt
) - rs_get_start(rs
, rt
)) >= size
) {
1798 *cursor
= rs_get_start(rs
, rt
) + size
;
1799 return (rs_get_start(rs
, rt
));
1804 static metaslab_ops_t metaslab_ndf_ops
= {
1808 metaslab_ops_t
*zfs_metaslab_ops
= &metaslab_ndf_ops
;
1809 #endif /* WITH_NDF_BLOCK_ALLOCATOR */
1813 * ==========================================================================
1815 * ==========================================================================
1819 * Wait for any in-progress metaslab loads to complete.
1822 metaslab_load_wait(metaslab_t
*msp
)
1824 ASSERT(MUTEX_HELD(&msp
->ms_lock
));
1826 while (msp
->ms_loading
) {
1827 ASSERT(!msp
->ms_loaded
);
1828 cv_wait(&msp
->ms_load_cv
, &msp
->ms_lock
);
1833 * Wait for any in-progress flushing to complete.
1836 metaslab_flush_wait(metaslab_t
*msp
)
1838 ASSERT(MUTEX_HELD(&msp
->ms_lock
));
1840 while (msp
->ms_flushing
)
1841 cv_wait(&msp
->ms_flush_cv
, &msp
->ms_lock
);
1845 metaslab_idx_func(multilist_t
*ml
, void *arg
)
1847 metaslab_t
*msp
= arg
;
1848 return (msp
->ms_id
% multilist_get_num_sublists(ml
));
1852 metaslab_allocated_space(metaslab_t
*msp
)
1854 return (msp
->ms_allocated_space
);
1858 * Verify that the space accounting on disk matches the in-core range_trees.
1861 metaslab_verify_space(metaslab_t
*msp
, uint64_t txg
)
1863 spa_t
*spa
= msp
->ms_group
->mg_vd
->vdev_spa
;
1864 uint64_t allocating
= 0;
1865 uint64_t sm_free_space
, msp_free_space
;
1867 ASSERT(MUTEX_HELD(&msp
->ms_lock
));
1868 ASSERT(!msp
->ms_condensing
);
1870 if ((zfs_flags
& ZFS_DEBUG_METASLAB_VERIFY
) == 0)
1874 * We can only verify the metaslab space when we're called
1875 * from syncing context with a loaded metaslab that has an
1876 * allocated space map. Calling this in non-syncing context
1877 * does not provide a consistent view of the metaslab since
1878 * we're performing allocations in the future.
1880 if (txg
!= spa_syncing_txg(spa
) || msp
->ms_sm
== NULL
||
1885 * Even though the smp_alloc field can get negative,
1886 * when it comes to a metaslab's space map, that should
1887 * never be the case.
1889 ASSERT3S(space_map_allocated(msp
->ms_sm
), >=, 0);
1891 ASSERT3U(space_map_allocated(msp
->ms_sm
), >=,
1892 range_tree_space(msp
->ms_unflushed_frees
));
1894 ASSERT3U(metaslab_allocated_space(msp
), ==,
1895 space_map_allocated(msp
->ms_sm
) +
1896 range_tree_space(msp
->ms_unflushed_allocs
) -
1897 range_tree_space(msp
->ms_unflushed_frees
));
1899 sm_free_space
= msp
->ms_size
- metaslab_allocated_space(msp
);
1902 * Account for future allocations since we would have
1903 * already deducted that space from the ms_allocatable.
1905 for (int t
= 0; t
< TXG_CONCURRENT_STATES
; t
++) {
1907 range_tree_space(msp
->ms_allocating
[(txg
+ t
) & TXG_MASK
]);
1909 ASSERT3U(allocating
+ msp
->ms_allocated_this_txg
, ==,
1910 msp
->ms_allocating_total
);
1912 ASSERT3U(msp
->ms_deferspace
, ==,
1913 range_tree_space(msp
->ms_defer
[0]) +
1914 range_tree_space(msp
->ms_defer
[1]));
1916 msp_free_space
= range_tree_space(msp
->ms_allocatable
) + allocating
+
1917 msp
->ms_deferspace
+ range_tree_space(msp
->ms_freed
);
1919 VERIFY3U(sm_free_space
, ==, msp_free_space
);
1923 metaslab_aux_histograms_clear(metaslab_t
*msp
)
1926 * Auxiliary histograms are only cleared when resetting them,
1927 * which can only happen while the metaslab is loaded.
1929 ASSERT(msp
->ms_loaded
);
1931 bzero(msp
->ms_synchist
, sizeof (msp
->ms_synchist
));
1932 for (int t
= 0; t
< TXG_DEFER_SIZE
; t
++)
1933 bzero(msp
->ms_deferhist
[t
], sizeof (msp
->ms_deferhist
[t
]));
1937 metaslab_aux_histogram_add(uint64_t *histogram
, uint64_t shift
,
1941 * This is modeled after space_map_histogram_add(), so refer to that
1942 * function for implementation details. We want this to work like
1943 * the space map histogram, and not the range tree histogram, as we
1944 * are essentially constructing a delta that will be later subtracted
1945 * from the space map histogram.
1948 for (int i
= shift
; i
< RANGE_TREE_HISTOGRAM_SIZE
; i
++) {
1949 ASSERT3U(i
, >=, idx
+ shift
);
1950 histogram
[idx
] += rt
->rt_histogram
[i
] << (i
- idx
- shift
);
1952 if (idx
< SPACE_MAP_HISTOGRAM_SIZE
- 1) {
1953 ASSERT3U(idx
+ shift
, ==, i
);
1955 ASSERT3U(idx
, <, SPACE_MAP_HISTOGRAM_SIZE
);
1961 * Called at every sync pass that the metaslab gets synced.
1963 * The reason is that we want our auxiliary histograms to be updated
1964 * wherever the metaslab's space map histogram is updated. This way
1965 * we stay consistent on which parts of the metaslab space map's
1966 * histogram are currently not available for allocations (e.g because
1967 * they are in the defer, freed, and freeing trees).
1970 metaslab_aux_histograms_update(metaslab_t
*msp
)
1972 space_map_t
*sm
= msp
->ms_sm
;
1976 * This is similar to the metaslab's space map histogram updates
1977 * that take place in metaslab_sync(). The only difference is that
1978 * we only care about segments that haven't made it into the
1979 * ms_allocatable tree yet.
1981 if (msp
->ms_loaded
) {
1982 metaslab_aux_histograms_clear(msp
);
1984 metaslab_aux_histogram_add(msp
->ms_synchist
,
1985 sm
->sm_shift
, msp
->ms_freed
);
1987 for (int t
= 0; t
< TXG_DEFER_SIZE
; t
++) {
1988 metaslab_aux_histogram_add(msp
->ms_deferhist
[t
],
1989 sm
->sm_shift
, msp
->ms_defer
[t
]);
1993 metaslab_aux_histogram_add(msp
->ms_synchist
,
1994 sm
->sm_shift
, msp
->ms_freeing
);
1998 * Called every time we are done syncing (writing to) the metaslab,
1999 * i.e. at the end of each sync pass.
2000 * [see the comment in metaslab_impl.h for ms_synchist, ms_deferhist]
2003 metaslab_aux_histograms_update_done(metaslab_t
*msp
, boolean_t defer_allowed
)
2005 spa_t
*spa
= msp
->ms_group
->mg_vd
->vdev_spa
;
2006 space_map_t
*sm
= msp
->ms_sm
;
2010 * We came here from metaslab_init() when creating/opening a
2011 * pool, looking at a metaslab that hasn't had any allocations
2018 * This is similar to the actions that we take for the ms_freed
2019 * and ms_defer trees in metaslab_sync_done().
2021 uint64_t hist_index
= spa_syncing_txg(spa
) % TXG_DEFER_SIZE
;
2022 if (defer_allowed
) {
2023 bcopy(msp
->ms_synchist
, msp
->ms_deferhist
[hist_index
],
2024 sizeof (msp
->ms_synchist
));
2026 bzero(msp
->ms_deferhist
[hist_index
],
2027 sizeof (msp
->ms_deferhist
[hist_index
]));
2029 bzero(msp
->ms_synchist
, sizeof (msp
->ms_synchist
));
2033 * Ensure that the metaslab's weight and fragmentation are consistent
2034 * with the contents of the histogram (either the range tree's histogram
2035 * or the space map's depending whether the metaslab is loaded).
2038 metaslab_verify_weight_and_frag(metaslab_t
*msp
)
2040 ASSERT(MUTEX_HELD(&msp
->ms_lock
));
2042 if ((zfs_flags
& ZFS_DEBUG_METASLAB_VERIFY
) == 0)
2046 * We can end up here from vdev_remove_complete(), in which case we
2047 * cannot do these assertions because we hold spa config locks and
2048 * thus we are not allowed to read from the DMU.
2050 * We check if the metaslab group has been removed and if that's
2051 * the case we return immediately as that would mean that we are
2052 * here from the aforementioned code path.
2054 if (msp
->ms_group
== NULL
)
2058 * Devices being removed always return a weight of 0 and leave
2059 * fragmentation and ms_max_size as is - there is nothing for
2060 * us to verify here.
2062 vdev_t
*vd
= msp
->ms_group
->mg_vd
;
2063 if (vd
->vdev_removing
)
2067 * If the metaslab is dirty it probably means that we've done
2068 * some allocations or frees that have changed our histograms
2069 * and thus the weight.
2071 for (int t
= 0; t
< TXG_SIZE
; t
++) {
2072 if (txg_list_member(&vd
->vdev_ms_list
, msp
, t
))
2077 * This verification checks that our in-memory state is consistent
2078 * with what's on disk. If the pool is read-only then there aren't
2079 * any changes and we just have the initially-loaded state.
2081 if (!spa_writeable(msp
->ms_group
->mg_vd
->vdev_spa
))
2084 /* some extra verification for in-core tree if you can */
2085 if (msp
->ms_loaded
) {
2086 range_tree_stat_verify(msp
->ms_allocatable
);
2087 VERIFY(space_map_histogram_verify(msp
->ms_sm
,
2088 msp
->ms_allocatable
));
2091 uint64_t weight
= msp
->ms_weight
;
2092 uint64_t was_active
= msp
->ms_weight
& METASLAB_ACTIVE_MASK
;
2093 boolean_t space_based
= WEIGHT_IS_SPACEBASED(msp
->ms_weight
);
2094 uint64_t frag
= msp
->ms_fragmentation
;
2095 uint64_t max_segsize
= msp
->ms_max_size
;
2098 msp
->ms_fragmentation
= 0;
2101 * This function is used for verification purposes and thus should
2102 * not introduce any side-effects/mutations on the system's state.
2104 * Regardless of whether metaslab_weight() thinks this metaslab
2105 * should be active or not, we want to ensure that the actual weight
2106 * (and therefore the value of ms_weight) would be the same if it
2107 * was to be recalculated at this point.
2109 * In addition we set the nodirty flag so metaslab_weight() does
2110 * not dirty the metaslab for future TXGs (e.g. when trying to
2111 * force condensing to upgrade the metaslab spacemaps).
2113 msp
->ms_weight
= metaslab_weight(msp
, B_TRUE
) | was_active
;
2115 VERIFY3U(max_segsize
, ==, msp
->ms_max_size
);
2118 * If the weight type changed then there is no point in doing
2119 * verification. Revert fields to their original values.
2121 if ((space_based
&& !WEIGHT_IS_SPACEBASED(msp
->ms_weight
)) ||
2122 (!space_based
&& WEIGHT_IS_SPACEBASED(msp
->ms_weight
))) {
2123 msp
->ms_fragmentation
= frag
;
2124 msp
->ms_weight
= weight
;
2128 VERIFY3U(msp
->ms_fragmentation
, ==, frag
);
2129 VERIFY3U(msp
->ms_weight
, ==, weight
);
2133 * If we're over the zfs_metaslab_mem_limit, select the loaded metaslab from
2134 * this class that was used longest ago, and attempt to unload it. We don't
2135 * want to spend too much time in this loop to prevent performance
2136 * degradation, and we expect that most of the time this operation will
2137 * succeed. Between that and the normal unloading processing during txg sync,
2138 * we expect this to keep the metaslab memory usage under control.
2141 metaslab_potentially_evict(metaslab_class_t
*mc
)
2144 uint64_t allmem
= arc_all_memory();
2145 uint64_t inuse
= spl_kmem_cache_inuse(zfs_btree_leaf_cache
);
2146 uint64_t size
= spl_kmem_cache_entry_size(zfs_btree_leaf_cache
);
2148 for (; allmem
* zfs_metaslab_mem_limit
/ 100 < inuse
* size
&&
2149 tries
< multilist_get_num_sublists(mc
->mc_metaslab_txg_list
) * 2;
2151 unsigned int idx
= multilist_get_random_index(
2152 mc
->mc_metaslab_txg_list
);
2153 multilist_sublist_t
*mls
=
2154 multilist_sublist_lock(mc
->mc_metaslab_txg_list
, idx
);
2155 metaslab_t
*msp
= multilist_sublist_head(mls
);
2156 multilist_sublist_unlock(mls
);
2157 while (msp
!= NULL
&& allmem
* zfs_metaslab_mem_limit
/ 100 <
2159 VERIFY3P(mls
, ==, multilist_sublist_lock(
2160 mc
->mc_metaslab_txg_list
, idx
));
2162 metaslab_idx_func(mc
->mc_metaslab_txg_list
, msp
));
2164 if (!multilist_link_active(&msp
->ms_class_txg_node
)) {
2165 multilist_sublist_unlock(mls
);
2168 metaslab_t
*next_msp
= multilist_sublist_next(mls
, msp
);
2169 multilist_sublist_unlock(mls
);
2171 * If the metaslab is currently loading there are two
2172 * cases. If it's the metaslab we're evicting, we
2173 * can't continue on or we'll panic when we attempt to
2174 * recursively lock the mutex. If it's another
2175 * metaslab that's loading, it can be safely skipped,
2176 * since we know it's very new and therefore not a
2177 * good eviction candidate. We check later once the
2178 * lock is held that the metaslab is fully loaded
2179 * before actually unloading it.
2181 if (msp
->ms_loading
) {
2184 spl_kmem_cache_inuse(zfs_btree_leaf_cache
);
2188 * We can't unload metaslabs with no spacemap because
2189 * they're not ready to be unloaded yet. We can't
2190 * unload metaslabs with outstanding allocations
2191 * because doing so could cause the metaslab's weight
2192 * to decrease while it's unloaded, which violates an
2193 * invariant that we use to prevent unnecessary
2194 * loading. We also don't unload metaslabs that are
2195 * currently active because they are high-weight
2196 * metaslabs that are likely to be used in the near
2199 mutex_enter(&msp
->ms_lock
);
2200 if (msp
->ms_allocator
== -1 && msp
->ms_sm
!= NULL
&&
2201 msp
->ms_allocating_total
== 0) {
2202 metaslab_unload(msp
);
2204 mutex_exit(&msp
->ms_lock
);
2206 inuse
= spl_kmem_cache_inuse(zfs_btree_leaf_cache
);
2213 metaslab_load_impl(metaslab_t
*msp
)
2217 ASSERT(MUTEX_HELD(&msp
->ms_lock
));
2218 ASSERT(msp
->ms_loading
);
2219 ASSERT(!msp
->ms_condensing
);
2222 * We temporarily drop the lock to unblock other operations while we
2223 * are reading the space map. Therefore, metaslab_sync() and
2224 * metaslab_sync_done() can run at the same time as we do.
2226 * If we are using the log space maps, metaslab_sync() can't write to
2227 * the metaslab's space map while we are loading as we only write to
2228 * it when we are flushing the metaslab, and that can't happen while
2229 * we are loading it.
2231 * If we are not using log space maps though, metaslab_sync() can
2232 * append to the space map while we are loading. Therefore we load
2233 * only entries that existed when we started the load. Additionally,
2234 * metaslab_sync_done() has to wait for the load to complete because
2235 * there are potential races like metaslab_load() loading parts of the
2236 * space map that are currently being appended by metaslab_sync(). If
2237 * we didn't, the ms_allocatable would have entries that
2238 * metaslab_sync_done() would try to re-add later.
2240 * That's why before dropping the lock we remember the synced length
2241 * of the metaslab and read up to that point of the space map,
2242 * ignoring entries appended by metaslab_sync() that happen after we
2245 uint64_t length
= msp
->ms_synced_length
;
2246 mutex_exit(&msp
->ms_lock
);
2248 hrtime_t load_start
= gethrtime();
2249 metaslab_rt_arg_t
*mrap
;
2250 if (msp
->ms_allocatable
->rt_arg
== NULL
) {
2251 mrap
= kmem_zalloc(sizeof (*mrap
), KM_SLEEP
);
2253 mrap
= msp
->ms_allocatable
->rt_arg
;
2254 msp
->ms_allocatable
->rt_ops
= NULL
;
2255 msp
->ms_allocatable
->rt_arg
= NULL
;
2257 mrap
->mra_bt
= &msp
->ms_allocatable_by_size
;
2258 mrap
->mra_floor_shift
= metaslab_by_size_min_shift
;
2260 if (msp
->ms_sm
!= NULL
) {
2261 error
= space_map_load_length(msp
->ms_sm
, msp
->ms_allocatable
,
2264 /* Now, populate the size-sorted tree. */
2265 metaslab_rt_create(msp
->ms_allocatable
, mrap
);
2266 msp
->ms_allocatable
->rt_ops
= &metaslab_rt_ops
;
2267 msp
->ms_allocatable
->rt_arg
= mrap
;
2269 struct mssa_arg arg
= {0};
2270 arg
.rt
= msp
->ms_allocatable
;
2272 range_tree_walk(msp
->ms_allocatable
, metaslab_size_sorted_add
,
2276 * Add the size-sorted tree first, since we don't need to load
2277 * the metaslab from the spacemap.
2279 metaslab_rt_create(msp
->ms_allocatable
, mrap
);
2280 msp
->ms_allocatable
->rt_ops
= &metaslab_rt_ops
;
2281 msp
->ms_allocatable
->rt_arg
= mrap
;
2283 * The space map has not been allocated yet, so treat
2284 * all the space in the metaslab as free and add it to the
2285 * ms_allocatable tree.
2287 range_tree_add(msp
->ms_allocatable
,
2288 msp
->ms_start
, msp
->ms_size
);
2290 if (msp
->ms_freed
!= NULL
) {
2292 * If the ms_sm doesn't exist, this means that this
2293 * metaslab hasn't gone through metaslab_sync() and
2294 * thus has never been dirtied. So we shouldn't
2295 * expect any unflushed allocs or frees from previous
2298 * Note: ms_freed and all the other trees except for
2299 * the ms_allocatable, can be NULL at this point only
2300 * if this is a new metaslab of a vdev that just got
2303 ASSERT(range_tree_is_empty(msp
->ms_unflushed_allocs
));
2304 ASSERT(range_tree_is_empty(msp
->ms_unflushed_frees
));
2309 * We need to grab the ms_sync_lock to prevent metaslab_sync() from
2310 * changing the ms_sm (or log_sm) and the metaslab's range trees
2311 * while we are about to use them and populate the ms_allocatable.
2312 * The ms_lock is insufficient for this because metaslab_sync() doesn't
2313 * hold the ms_lock while writing the ms_checkpointing tree to disk.
2315 mutex_enter(&msp
->ms_sync_lock
);
2316 mutex_enter(&msp
->ms_lock
);
2318 ASSERT(!msp
->ms_condensing
);
2319 ASSERT(!msp
->ms_flushing
);
2322 mutex_exit(&msp
->ms_sync_lock
);
2326 ASSERT3P(msp
->ms_group
, !=, NULL
);
2327 msp
->ms_loaded
= B_TRUE
;
2330 * Apply all the unflushed changes to ms_allocatable right
2331 * away so any manipulations we do below have a clear view
2332 * of what is allocated and what is free.
2334 range_tree_walk(msp
->ms_unflushed_allocs
,
2335 range_tree_remove
, msp
->ms_allocatable
);
2336 range_tree_walk(msp
->ms_unflushed_frees
,
2337 range_tree_add
, msp
->ms_allocatable
);
2339 msp
->ms_loaded
= B_TRUE
;
2341 ASSERT3P(msp
->ms_group
, !=, NULL
);
2342 spa_t
*spa
= msp
->ms_group
->mg_vd
->vdev_spa
;
2343 if (spa_syncing_log_sm(spa
) != NULL
) {
2344 ASSERT(spa_feature_is_enabled(spa
,
2345 SPA_FEATURE_LOG_SPACEMAP
));
2348 * If we use a log space map we add all the segments
2349 * that are in ms_unflushed_frees so they are available
2352 * ms_allocatable needs to contain all free segments
2353 * that are ready for allocations (thus not segments
2354 * from ms_freeing, ms_freed, and the ms_defer trees).
2355 * But if we grab the lock in this code path at a sync
2356 * pass later that 1, then it also contains the
2357 * segments of ms_freed (they were added to it earlier
2358 * in this path through ms_unflushed_frees). So we
2359 * need to remove all the segments that exist in
2360 * ms_freed from ms_allocatable as they will be added
2361 * later in metaslab_sync_done().
2363 * When there's no log space map, the ms_allocatable
2364 * correctly doesn't contain any segments that exist
2365 * in ms_freed [see ms_synced_length].
2367 range_tree_walk(msp
->ms_freed
,
2368 range_tree_remove
, msp
->ms_allocatable
);
2372 * If we are not using the log space map, ms_allocatable
2373 * contains the segments that exist in the ms_defer trees
2374 * [see ms_synced_length]. Thus we need to remove them
2375 * from ms_allocatable as they will be added again in
2376 * metaslab_sync_done().
2378 * If we are using the log space map, ms_allocatable still
2379 * contains the segments that exist in the ms_defer trees.
2380 * Not because it read them through the ms_sm though. But
2381 * because these segments are part of ms_unflushed_frees
2382 * whose segments we add to ms_allocatable earlier in this
2385 for (int t
= 0; t
< TXG_DEFER_SIZE
; t
++) {
2386 range_tree_walk(msp
->ms_defer
[t
],
2387 range_tree_remove
, msp
->ms_allocatable
);
2391 * Call metaslab_recalculate_weight_and_sort() now that the
2392 * metaslab is loaded so we get the metaslab's real weight.
2394 * Unless this metaslab was created with older software and
2395 * has not yet been converted to use segment-based weight, we
2396 * expect the new weight to be better or equal to the weight
2397 * that the metaslab had while it was not loaded. This is
2398 * because the old weight does not take into account the
2399 * consolidation of adjacent segments between TXGs. [see
2400 * comment for ms_synchist and ms_deferhist[] for more info]
2402 uint64_t weight
= msp
->ms_weight
;
2403 uint64_t max_size
= msp
->ms_max_size
;
2404 metaslab_recalculate_weight_and_sort(msp
);
2405 if (!WEIGHT_IS_SPACEBASED(weight
))
2406 ASSERT3U(weight
, <=, msp
->ms_weight
);
2407 msp
->ms_max_size
= metaslab_largest_allocatable(msp
);
2408 ASSERT3U(max_size
, <=, msp
->ms_max_size
);
2409 hrtime_t load_end
= gethrtime();
2410 msp
->ms_load_time
= load_end
;
2411 zfs_dbgmsg("metaslab_load: txg %llu, spa %s, vdev_id %llu, "
2412 "ms_id %llu, smp_length %llu, "
2413 "unflushed_allocs %llu, unflushed_frees %llu, "
2414 "freed %llu, defer %llu + %llu, unloaded time %llu ms, "
2415 "loading_time %lld ms, ms_max_size %llu, "
2416 "max size error %lld, "
2417 "old_weight %llx, new_weight %llx",
2418 spa_syncing_txg(spa
), spa_name(spa
),
2419 msp
->ms_group
->mg_vd
->vdev_id
, msp
->ms_id
,
2420 space_map_length(msp
->ms_sm
),
2421 range_tree_space(msp
->ms_unflushed_allocs
),
2422 range_tree_space(msp
->ms_unflushed_frees
),
2423 range_tree_space(msp
->ms_freed
),
2424 range_tree_space(msp
->ms_defer
[0]),
2425 range_tree_space(msp
->ms_defer
[1]),
2426 (longlong_t
)((load_start
- msp
->ms_unload_time
) / 1000000),
2427 (longlong_t
)((load_end
- load_start
) / 1000000),
2428 msp
->ms_max_size
, msp
->ms_max_size
- max_size
,
2429 weight
, msp
->ms_weight
);
2431 metaslab_verify_space(msp
, spa_syncing_txg(spa
));
2432 mutex_exit(&msp
->ms_sync_lock
);
2437 metaslab_load(metaslab_t
*msp
)
2439 ASSERT(MUTEX_HELD(&msp
->ms_lock
));
2442 * There may be another thread loading the same metaslab, if that's
2443 * the case just wait until the other thread is done and return.
2445 metaslab_load_wait(msp
);
2448 VERIFY(!msp
->ms_loading
);
2449 ASSERT(!msp
->ms_condensing
);
2452 * We set the loading flag BEFORE potentially dropping the lock to
2453 * wait for an ongoing flush (see ms_flushing below). This way other
2454 * threads know that there is already a thread that is loading this
2457 msp
->ms_loading
= B_TRUE
;
2460 * Wait for any in-progress flushing to finish as we drop the ms_lock
2461 * both here (during space_map_load()) and in metaslab_flush() (when
2462 * we flush our changes to the ms_sm).
2464 if (msp
->ms_flushing
)
2465 metaslab_flush_wait(msp
);
2468 * In the possibility that we were waiting for the metaslab to be
2469 * flushed (where we temporarily dropped the ms_lock), ensure that
2470 * no one else loaded the metaslab somehow.
2472 ASSERT(!msp
->ms_loaded
);
2475 * If we're loading a metaslab in the normal class, consider evicting
2476 * another one to keep our memory usage under the limit defined by the
2477 * zfs_metaslab_mem_limit tunable.
2479 if (spa_normal_class(msp
->ms_group
->mg_class
->mc_spa
) ==
2480 msp
->ms_group
->mg_class
) {
2481 metaslab_potentially_evict(msp
->ms_group
->mg_class
);
2484 int error
= metaslab_load_impl(msp
);
2486 ASSERT(MUTEX_HELD(&msp
->ms_lock
));
2487 msp
->ms_loading
= B_FALSE
;
2488 cv_broadcast(&msp
->ms_load_cv
);
2494 metaslab_unload(metaslab_t
*msp
)
2496 ASSERT(MUTEX_HELD(&msp
->ms_lock
));
2499 * This can happen if a metaslab is selected for eviction (in
2500 * metaslab_potentially_evict) and then unloaded during spa_sync (via
2501 * metaslab_class_evict_old).
2503 if (!msp
->ms_loaded
)
2506 range_tree_vacate(msp
->ms_allocatable
, NULL
, NULL
);
2507 msp
->ms_loaded
= B_FALSE
;
2508 msp
->ms_unload_time
= gethrtime();
2510 msp
->ms_activation_weight
= 0;
2511 msp
->ms_weight
&= ~METASLAB_ACTIVE_MASK
;
2513 if (msp
->ms_group
!= NULL
) {
2514 metaslab_class_t
*mc
= msp
->ms_group
->mg_class
;
2515 multilist_sublist_t
*mls
=
2516 multilist_sublist_lock_obj(mc
->mc_metaslab_txg_list
, msp
);
2517 if (multilist_link_active(&msp
->ms_class_txg_node
))
2518 multilist_sublist_remove(mls
, msp
);
2519 multilist_sublist_unlock(mls
);
2521 spa_t
*spa
= msp
->ms_group
->mg_vd
->vdev_spa
;
2522 zfs_dbgmsg("metaslab_unload: txg %llu, spa %s, vdev_id %llu, "
2523 "ms_id %llu, weight %llx, "
2524 "selected txg %llu (%llu ms ago), alloc_txg %llu, "
2525 "loaded %llu ms ago, max_size %llu",
2526 spa_syncing_txg(spa
), spa_name(spa
),
2527 msp
->ms_group
->mg_vd
->vdev_id
, msp
->ms_id
,
2529 msp
->ms_selected_txg
,
2530 (msp
->ms_unload_time
- msp
->ms_selected_time
) / 1000 / 1000,
2532 (msp
->ms_unload_time
- msp
->ms_load_time
) / 1000 / 1000,
2537 * We explicitly recalculate the metaslab's weight based on its space
2538 * map (as it is now not loaded). We want unload metaslabs to always
2539 * have their weights calculated from the space map histograms, while
2540 * loaded ones have it calculated from their in-core range tree
2541 * [see metaslab_load()]. This way, the weight reflects the information
2542 * available in-core, whether it is loaded or not.
2544 * If ms_group == NULL means that we came here from metaslab_fini(),
2545 * at which point it doesn't make sense for us to do the recalculation
2548 if (msp
->ms_group
!= NULL
)
2549 metaslab_recalculate_weight_and_sort(msp
);
2553 * We want to optimize the memory use of the per-metaslab range
2554 * trees. To do this, we store the segments in the range trees in
2555 * units of sectors, zero-indexing from the start of the metaslab. If
2556 * the vdev_ms_shift - the vdev_ashift is less than 32, we can store
2557 * the ranges using two uint32_ts, rather than two uint64_ts.
2560 metaslab_calculate_range_tree_type(vdev_t
*vdev
, metaslab_t
*msp
,
2561 uint64_t *start
, uint64_t *shift
)
2563 if (vdev
->vdev_ms_shift
- vdev
->vdev_ashift
< 32 &&
2564 !zfs_metaslab_force_large_segs
) {
2565 *shift
= vdev
->vdev_ashift
;
2566 *start
= msp
->ms_start
;
2567 return (RANGE_SEG32
);
2571 return (RANGE_SEG64
);
2576 metaslab_set_selected_txg(metaslab_t
*msp
, uint64_t txg
)
2578 ASSERT(MUTEX_HELD(&msp
->ms_lock
));
2579 metaslab_class_t
*mc
= msp
->ms_group
->mg_class
;
2580 multilist_sublist_t
*mls
=
2581 multilist_sublist_lock_obj(mc
->mc_metaslab_txg_list
, msp
);
2582 if (multilist_link_active(&msp
->ms_class_txg_node
))
2583 multilist_sublist_remove(mls
, msp
);
2584 msp
->ms_selected_txg
= txg
;
2585 msp
->ms_selected_time
= gethrtime();
2586 multilist_sublist_insert_tail(mls
, msp
);
2587 multilist_sublist_unlock(mls
);
2591 metaslab_space_update(vdev_t
*vd
, metaslab_class_t
*mc
, int64_t alloc_delta
,
2592 int64_t defer_delta
, int64_t space_delta
)
2594 vdev_space_update(vd
, alloc_delta
, defer_delta
, space_delta
);
2596 ASSERT3P(vd
->vdev_spa
->spa_root_vdev
, ==, vd
->vdev_parent
);
2597 ASSERT(vd
->vdev_ms_count
!= 0);
2599 metaslab_class_space_update(mc
, alloc_delta
, defer_delta
, space_delta
,
2600 vdev_deflated_space(vd
, space_delta
));
2604 metaslab_init(metaslab_group_t
*mg
, uint64_t id
, uint64_t object
,
2605 uint64_t txg
, metaslab_t
**msp
)
2607 vdev_t
*vd
= mg
->mg_vd
;
2608 spa_t
*spa
= vd
->vdev_spa
;
2609 objset_t
*mos
= spa
->spa_meta_objset
;
2613 ms
= kmem_zalloc(sizeof (metaslab_t
), KM_SLEEP
);
2614 mutex_init(&ms
->ms_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
2615 mutex_init(&ms
->ms_sync_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
2616 cv_init(&ms
->ms_load_cv
, NULL
, CV_DEFAULT
, NULL
);
2617 cv_init(&ms
->ms_flush_cv
, NULL
, CV_DEFAULT
, NULL
);
2618 multilist_link_init(&ms
->ms_class_txg_node
);
2621 ms
->ms_start
= id
<< vd
->vdev_ms_shift
;
2622 ms
->ms_size
= 1ULL << vd
->vdev_ms_shift
;
2623 ms
->ms_allocator
= -1;
2624 ms
->ms_new
= B_TRUE
;
2626 vdev_ops_t
*ops
= vd
->vdev_ops
;
2627 if (ops
->vdev_op_metaslab_init
!= NULL
)
2628 ops
->vdev_op_metaslab_init(vd
, &ms
->ms_start
, &ms
->ms_size
);
2631 * We only open space map objects that already exist. All others
2632 * will be opened when we finally allocate an object for it.
2635 * When called from vdev_expand(), we can't call into the DMU as
2636 * we are holding the spa_config_lock as a writer and we would
2637 * deadlock [see relevant comment in vdev_metaslab_init()]. in
2638 * that case, the object parameter is zero though, so we won't
2639 * call into the DMU.
2642 error
= space_map_open(&ms
->ms_sm
, mos
, object
, ms
->ms_start
,
2643 ms
->ms_size
, vd
->vdev_ashift
);
2646 kmem_free(ms
, sizeof (metaslab_t
));
2650 ASSERT(ms
->ms_sm
!= NULL
);
2651 ms
->ms_allocated_space
= space_map_allocated(ms
->ms_sm
);
2654 range_seg_type_t type
;
2655 uint64_t shift
, start
;
2656 type
= metaslab_calculate_range_tree_type(vd
, ms
, &start
, &shift
);
2659 * We create the ms_allocatable here, but we don't create the
2660 * other range trees until metaslab_sync_done(). This serves
2661 * two purposes: it allows metaslab_sync_done() to detect the
2662 * addition of new space; and for debugging, it ensures that
2663 * we'd data fault on any attempt to use this metaslab before
2666 ms
->ms_allocatable
= range_tree_create(NULL
, type
, NULL
, start
, shift
);
2668 ms
->ms_trim
= range_tree_create(NULL
, type
, NULL
, start
, shift
);
2670 metaslab_group_add(mg
, ms
);
2671 metaslab_set_fragmentation(ms
, B_FALSE
);
2674 * If we're opening an existing pool (txg == 0) or creating
2675 * a new one (txg == TXG_INITIAL), all space is available now.
2676 * If we're adding space to an existing pool, the new space
2677 * does not become available until after this txg has synced.
2678 * The metaslab's weight will also be initialized when we sync
2679 * out this txg. This ensures that we don't attempt to allocate
2680 * from it before we have initialized it completely.
2682 if (txg
<= TXG_INITIAL
) {
2683 metaslab_sync_done(ms
, 0);
2684 metaslab_space_update(vd
, mg
->mg_class
,
2685 metaslab_allocated_space(ms
), 0, 0);
2689 vdev_dirty(vd
, 0, NULL
, txg
);
2690 vdev_dirty(vd
, VDD_METASLAB
, ms
, txg
);
2699 metaslab_fini_flush_data(metaslab_t
*msp
)
2701 spa_t
*spa
= msp
->ms_group
->mg_vd
->vdev_spa
;
2703 if (metaslab_unflushed_txg(msp
) == 0) {
2704 ASSERT3P(avl_find(&spa
->spa_metaslabs_by_flushed
, msp
, NULL
),
2708 ASSERT(spa_feature_is_active(spa
, SPA_FEATURE_LOG_SPACEMAP
));
2710 mutex_enter(&spa
->spa_flushed_ms_lock
);
2711 avl_remove(&spa
->spa_metaslabs_by_flushed
, msp
);
2712 mutex_exit(&spa
->spa_flushed_ms_lock
);
2714 spa_log_sm_decrement_mscount(spa
, metaslab_unflushed_txg(msp
));
2715 spa_log_summary_decrement_mscount(spa
, metaslab_unflushed_txg(msp
));
2719 metaslab_unflushed_changes_memused(metaslab_t
*ms
)
2721 return ((range_tree_numsegs(ms
->ms_unflushed_allocs
) +
2722 range_tree_numsegs(ms
->ms_unflushed_frees
)) *
2723 ms
->ms_unflushed_allocs
->rt_root
.bt_elem_size
);
2727 metaslab_fini(metaslab_t
*msp
)
2729 metaslab_group_t
*mg
= msp
->ms_group
;
2730 vdev_t
*vd
= mg
->mg_vd
;
2731 spa_t
*spa
= vd
->vdev_spa
;
2733 metaslab_fini_flush_data(msp
);
2735 metaslab_group_remove(mg
, msp
);
2737 mutex_enter(&msp
->ms_lock
);
2738 VERIFY(msp
->ms_group
== NULL
);
2739 metaslab_space_update(vd
, mg
->mg_class
,
2740 -metaslab_allocated_space(msp
), 0, -msp
->ms_size
);
2742 space_map_close(msp
->ms_sm
);
2745 metaslab_unload(msp
);
2746 range_tree_destroy(msp
->ms_allocatable
);
2747 range_tree_destroy(msp
->ms_freeing
);
2748 range_tree_destroy(msp
->ms_freed
);
2750 ASSERT3U(spa
->spa_unflushed_stats
.sus_memused
, >=,
2751 metaslab_unflushed_changes_memused(msp
));
2752 spa
->spa_unflushed_stats
.sus_memused
-=
2753 metaslab_unflushed_changes_memused(msp
);
2754 range_tree_vacate(msp
->ms_unflushed_allocs
, NULL
, NULL
);
2755 range_tree_destroy(msp
->ms_unflushed_allocs
);
2756 range_tree_vacate(msp
->ms_unflushed_frees
, NULL
, NULL
);
2757 range_tree_destroy(msp
->ms_unflushed_frees
);
2759 for (int t
= 0; t
< TXG_SIZE
; t
++) {
2760 range_tree_destroy(msp
->ms_allocating
[t
]);
2763 for (int t
= 0; t
< TXG_DEFER_SIZE
; t
++) {
2764 range_tree_destroy(msp
->ms_defer
[t
]);
2766 ASSERT0(msp
->ms_deferspace
);
2768 range_tree_destroy(msp
->ms_checkpointing
);
2770 for (int t
= 0; t
< TXG_SIZE
; t
++)
2771 ASSERT(!txg_list_member(&vd
->vdev_ms_list
, msp
, t
));
2773 range_tree_vacate(msp
->ms_trim
, NULL
, NULL
);
2774 range_tree_destroy(msp
->ms_trim
);
2776 mutex_exit(&msp
->ms_lock
);
2777 cv_destroy(&msp
->ms_load_cv
);
2778 cv_destroy(&msp
->ms_flush_cv
);
2779 mutex_destroy(&msp
->ms_lock
);
2780 mutex_destroy(&msp
->ms_sync_lock
);
2781 ASSERT3U(msp
->ms_allocator
, ==, -1);
2783 kmem_free(msp
, sizeof (metaslab_t
));
2786 #define FRAGMENTATION_TABLE_SIZE 17
2789 * This table defines a segment size based fragmentation metric that will
2790 * allow each metaslab to derive its own fragmentation value. This is done
2791 * by calculating the space in each bucket of the spacemap histogram and
2792 * multiplying that by the fragmentation metric in this table. Doing
2793 * this for all buckets and dividing it by the total amount of free
2794 * space in this metaslab (i.e. the total free space in all buckets) gives
2795 * us the fragmentation metric. This means that a high fragmentation metric
2796 * equates to most of the free space being comprised of small segments.
2797 * Conversely, if the metric is low, then most of the free space is in
2798 * large segments. A 10% change in fragmentation equates to approximately
2799 * double the number of segments.
2801 * This table defines 0% fragmented space using 16MB segments. Testing has
2802 * shown that segments that are greater than or equal to 16MB do not suffer
2803 * from drastic performance problems. Using this value, we derive the rest
2804 * of the table. Since the fragmentation value is never stored on disk, it
2805 * is possible to change these calculations in the future.
2807 int zfs_frag_table
[FRAGMENTATION_TABLE_SIZE
] = {
2827 * Calculate the metaslab's fragmentation metric and set ms_fragmentation.
2828 * Setting this value to ZFS_FRAG_INVALID means that the metaslab has not
2829 * been upgraded and does not support this metric. Otherwise, the return
2830 * value should be in the range [0, 100].
2833 metaslab_set_fragmentation(metaslab_t
*msp
, boolean_t nodirty
)
2835 spa_t
*spa
= msp
->ms_group
->mg_vd
->vdev_spa
;
2836 uint64_t fragmentation
= 0;
2838 boolean_t feature_enabled
= spa_feature_is_enabled(spa
,
2839 SPA_FEATURE_SPACEMAP_HISTOGRAM
);
2841 if (!feature_enabled
) {
2842 msp
->ms_fragmentation
= ZFS_FRAG_INVALID
;
2847 * A null space map means that the entire metaslab is free
2848 * and thus is not fragmented.
2850 if (msp
->ms_sm
== NULL
) {
2851 msp
->ms_fragmentation
= 0;
2856 * If this metaslab's space map has not been upgraded, flag it
2857 * so that we upgrade next time we encounter it.
2859 if (msp
->ms_sm
->sm_dbuf
->db_size
!= sizeof (space_map_phys_t
)) {
2860 uint64_t txg
= spa_syncing_txg(spa
);
2861 vdev_t
*vd
= msp
->ms_group
->mg_vd
;
2864 * If we've reached the final dirty txg, then we must
2865 * be shutting down the pool. We don't want to dirty
2866 * any data past this point so skip setting the condense
2867 * flag. We can retry this action the next time the pool
2868 * is imported. We also skip marking this metaslab for
2869 * condensing if the caller has explicitly set nodirty.
2872 spa_writeable(spa
) && txg
< spa_final_dirty_txg(spa
)) {
2873 msp
->ms_condense_wanted
= B_TRUE
;
2874 vdev_dirty(vd
, VDD_METASLAB
, msp
, txg
+ 1);
2875 zfs_dbgmsg("txg %llu, requesting force condense: "
2876 "ms_id %llu, vdev_id %llu", txg
, msp
->ms_id
,
2879 msp
->ms_fragmentation
= ZFS_FRAG_INVALID
;
2883 for (int i
= 0; i
< SPACE_MAP_HISTOGRAM_SIZE
; i
++) {
2885 uint8_t shift
= msp
->ms_sm
->sm_shift
;
2887 int idx
= MIN(shift
- SPA_MINBLOCKSHIFT
+ i
,
2888 FRAGMENTATION_TABLE_SIZE
- 1);
2890 if (msp
->ms_sm
->sm_phys
->smp_histogram
[i
] == 0)
2893 space
= msp
->ms_sm
->sm_phys
->smp_histogram
[i
] << (i
+ shift
);
2896 ASSERT3U(idx
, <, FRAGMENTATION_TABLE_SIZE
);
2897 fragmentation
+= space
* zfs_frag_table
[idx
];
2901 fragmentation
/= total
;
2902 ASSERT3U(fragmentation
, <=, 100);
2904 msp
->ms_fragmentation
= fragmentation
;
2908 * Compute a weight -- a selection preference value -- for the given metaslab.
2909 * This is based on the amount of free space, the level of fragmentation,
2910 * the LBA range, and whether the metaslab is loaded.
2913 metaslab_space_weight(metaslab_t
*msp
)
2915 metaslab_group_t
*mg
= msp
->ms_group
;
2916 vdev_t
*vd
= mg
->mg_vd
;
2917 uint64_t weight
, space
;
2919 ASSERT(MUTEX_HELD(&msp
->ms_lock
));
2922 * The baseline weight is the metaslab's free space.
2924 space
= msp
->ms_size
- metaslab_allocated_space(msp
);
2926 if (metaslab_fragmentation_factor_enabled
&&
2927 msp
->ms_fragmentation
!= ZFS_FRAG_INVALID
) {
2929 * Use the fragmentation information to inversely scale
2930 * down the baseline weight. We need to ensure that we
2931 * don't exclude this metaslab completely when it's 100%
2932 * fragmented. To avoid this we reduce the fragmented value
2935 space
= (space
* (100 - (msp
->ms_fragmentation
- 1))) / 100;
2938 * If space < SPA_MINBLOCKSIZE, then we will not allocate from
2939 * this metaslab again. The fragmentation metric may have
2940 * decreased the space to something smaller than
2941 * SPA_MINBLOCKSIZE, so reset the space to SPA_MINBLOCKSIZE
2942 * so that we can consume any remaining space.
2944 if (space
> 0 && space
< SPA_MINBLOCKSIZE
)
2945 space
= SPA_MINBLOCKSIZE
;
2950 * Modern disks have uniform bit density and constant angular velocity.
2951 * Therefore, the outer recording zones are faster (higher bandwidth)
2952 * than the inner zones by the ratio of outer to inner track diameter,
2953 * which is typically around 2:1. We account for this by assigning
2954 * higher weight to lower metaslabs (multiplier ranging from 2x to 1x).
2955 * In effect, this means that we'll select the metaslab with the most
2956 * free bandwidth rather than simply the one with the most free space.
2958 if (!vd
->vdev_nonrot
&& metaslab_lba_weighting_enabled
) {
2959 weight
= 2 * weight
- (msp
->ms_id
* weight
) / vd
->vdev_ms_count
;
2960 ASSERT(weight
>= space
&& weight
<= 2 * space
);
2964 * If this metaslab is one we're actively using, adjust its
2965 * weight to make it preferable to any inactive metaslab so
2966 * we'll polish it off. If the fragmentation on this metaslab
2967 * has exceed our threshold, then don't mark it active.
2969 if (msp
->ms_loaded
&& msp
->ms_fragmentation
!= ZFS_FRAG_INVALID
&&
2970 msp
->ms_fragmentation
<= zfs_metaslab_fragmentation_threshold
) {
2971 weight
|= (msp
->ms_weight
& METASLAB_ACTIVE_MASK
);
2974 WEIGHT_SET_SPACEBASED(weight
);
2979 * Return the weight of the specified metaslab, according to the segment-based
2980 * weighting algorithm. The metaslab must be loaded. This function can
2981 * be called within a sync pass since it relies only on the metaslab's
2982 * range tree which is always accurate when the metaslab is loaded.
2985 metaslab_weight_from_range_tree(metaslab_t
*msp
)
2987 uint64_t weight
= 0;
2988 uint32_t segments
= 0;
2990 ASSERT(msp
->ms_loaded
);
2992 for (int i
= RANGE_TREE_HISTOGRAM_SIZE
- 1; i
>= SPA_MINBLOCKSHIFT
;
2994 uint8_t shift
= msp
->ms_group
->mg_vd
->vdev_ashift
;
2995 int max_idx
= SPACE_MAP_HISTOGRAM_SIZE
+ shift
- 1;
2998 segments
+= msp
->ms_allocatable
->rt_histogram
[i
];
3001 * The range tree provides more precision than the space map
3002 * and must be downgraded so that all values fit within the
3003 * space map's histogram. This allows us to compare loaded
3004 * vs. unloaded metaslabs to determine which metaslab is
3005 * considered "best".
3010 if (segments
!= 0) {
3011 WEIGHT_SET_COUNT(weight
, segments
);
3012 WEIGHT_SET_INDEX(weight
, i
);
3013 WEIGHT_SET_ACTIVE(weight
, 0);
3021 * Calculate the weight based on the on-disk histogram. Should be applied
3022 * only to unloaded metaslabs (i.e no incoming allocations) in-order to
3023 * give results consistent with the on-disk state
3026 metaslab_weight_from_spacemap(metaslab_t
*msp
)
3028 space_map_t
*sm
= msp
->ms_sm
;
3029 ASSERT(!msp
->ms_loaded
);
3031 ASSERT3U(space_map_object(sm
), !=, 0);
3032 ASSERT3U(sm
->sm_dbuf
->db_size
, ==, sizeof (space_map_phys_t
));
3035 * Create a joint histogram from all the segments that have made
3036 * it to the metaslab's space map histogram, that are not yet
3037 * available for allocation because they are still in the freeing
3038 * pipeline (e.g. freeing, freed, and defer trees). Then subtract
3039 * these segments from the space map's histogram to get a more
3042 uint64_t deferspace_histogram
[SPACE_MAP_HISTOGRAM_SIZE
] = {0};
3043 for (int i
= 0; i
< SPACE_MAP_HISTOGRAM_SIZE
; i
++)
3044 deferspace_histogram
[i
] += msp
->ms_synchist
[i
];
3045 for (int t
= 0; t
< TXG_DEFER_SIZE
; t
++) {
3046 for (int i
= 0; i
< SPACE_MAP_HISTOGRAM_SIZE
; i
++) {
3047 deferspace_histogram
[i
] += msp
->ms_deferhist
[t
][i
];
3051 uint64_t weight
= 0;
3052 for (int i
= SPACE_MAP_HISTOGRAM_SIZE
- 1; i
>= 0; i
--) {
3053 ASSERT3U(sm
->sm_phys
->smp_histogram
[i
], >=,
3054 deferspace_histogram
[i
]);
3056 sm
->sm_phys
->smp_histogram
[i
] - deferspace_histogram
[i
];
3058 WEIGHT_SET_COUNT(weight
, count
);
3059 WEIGHT_SET_INDEX(weight
, i
+ sm
->sm_shift
);
3060 WEIGHT_SET_ACTIVE(weight
, 0);
3068 * Compute a segment-based weight for the specified metaslab. The weight
3069 * is determined by highest bucket in the histogram. The information
3070 * for the highest bucket is encoded into the weight value.
3073 metaslab_segment_weight(metaslab_t
*msp
)
3075 metaslab_group_t
*mg
= msp
->ms_group
;
3076 uint64_t weight
= 0;
3077 uint8_t shift
= mg
->mg_vd
->vdev_ashift
;
3079 ASSERT(MUTEX_HELD(&msp
->ms_lock
));
3082 * The metaslab is completely free.
3084 if (metaslab_allocated_space(msp
) == 0) {
3085 int idx
= highbit64(msp
->ms_size
) - 1;
3086 int max_idx
= SPACE_MAP_HISTOGRAM_SIZE
+ shift
- 1;
3088 if (idx
< max_idx
) {
3089 WEIGHT_SET_COUNT(weight
, 1ULL);
3090 WEIGHT_SET_INDEX(weight
, idx
);
3092 WEIGHT_SET_COUNT(weight
, 1ULL << (idx
- max_idx
));
3093 WEIGHT_SET_INDEX(weight
, max_idx
);
3095 WEIGHT_SET_ACTIVE(weight
, 0);
3096 ASSERT(!WEIGHT_IS_SPACEBASED(weight
));
3100 ASSERT3U(msp
->ms_sm
->sm_dbuf
->db_size
, ==, sizeof (space_map_phys_t
));
3103 * If the metaslab is fully allocated then just make the weight 0.
3105 if (metaslab_allocated_space(msp
) == msp
->ms_size
)
3108 * If the metaslab is already loaded, then use the range tree to
3109 * determine the weight. Otherwise, we rely on the space map information
3110 * to generate the weight.
3112 if (msp
->ms_loaded
) {
3113 weight
= metaslab_weight_from_range_tree(msp
);
3115 weight
= metaslab_weight_from_spacemap(msp
);
3119 * If the metaslab was active the last time we calculated its weight
3120 * then keep it active. We want to consume the entire region that
3121 * is associated with this weight.
3123 if (msp
->ms_activation_weight
!= 0 && weight
!= 0)
3124 WEIGHT_SET_ACTIVE(weight
, WEIGHT_GET_ACTIVE(msp
->ms_weight
));
3129 * Determine if we should attempt to allocate from this metaslab. If the
3130 * metaslab is loaded, then we can determine if the desired allocation
3131 * can be satisfied by looking at the size of the maximum free segment
3132 * on that metaslab. Otherwise, we make our decision based on the metaslab's
3133 * weight. For segment-based weighting we can determine the maximum
3134 * allocation based on the index encoded in its value. For space-based
3135 * weights we rely on the entire weight (excluding the weight-type bit).
3138 metaslab_should_allocate(metaslab_t
*msp
, uint64_t asize
, boolean_t try_hard
)
3141 * If the metaslab is loaded, ms_max_size is definitive and we can use
3142 * the fast check. If it's not, the ms_max_size is a lower bound (once
3143 * set), and we should use the fast check as long as we're not in
3144 * try_hard and it's been less than zfs_metaslab_max_size_cache_sec
3145 * seconds since the metaslab was unloaded.
3147 if (msp
->ms_loaded
||
3148 (msp
->ms_max_size
!= 0 && !try_hard
&& gethrtime() <
3149 msp
->ms_unload_time
+ SEC2NSEC(zfs_metaslab_max_size_cache_sec
)))
3150 return (msp
->ms_max_size
>= asize
);
3152 boolean_t should_allocate
;
3153 if (!WEIGHT_IS_SPACEBASED(msp
->ms_weight
)) {
3155 * The metaslab segment weight indicates segments in the
3156 * range [2^i, 2^(i+1)), where i is the index in the weight.
3157 * Since the asize might be in the middle of the range, we
3158 * should attempt the allocation if asize < 2^(i+1).
3160 should_allocate
= (asize
<
3161 1ULL << (WEIGHT_GET_INDEX(msp
->ms_weight
) + 1));
3163 should_allocate
= (asize
<=
3164 (msp
->ms_weight
& ~METASLAB_WEIGHT_TYPE
));
3167 return (should_allocate
);
3171 metaslab_weight(metaslab_t
*msp
, boolean_t nodirty
)
3173 vdev_t
*vd
= msp
->ms_group
->mg_vd
;
3174 spa_t
*spa
= vd
->vdev_spa
;
3177 ASSERT(MUTEX_HELD(&msp
->ms_lock
));
3179 metaslab_set_fragmentation(msp
, nodirty
);
3182 * Update the maximum size. If the metaslab is loaded, this will
3183 * ensure that we get an accurate maximum size if newly freed space
3184 * has been added back into the free tree. If the metaslab is
3185 * unloaded, we check if there's a larger free segment in the
3186 * unflushed frees. This is a lower bound on the largest allocatable
3187 * segment size. Coalescing of adjacent entries may reveal larger
3188 * allocatable segments, but we aren't aware of those until loading
3189 * the space map into a range tree.
3191 if (msp
->ms_loaded
) {
3192 msp
->ms_max_size
= metaslab_largest_allocatable(msp
);
3194 msp
->ms_max_size
= MAX(msp
->ms_max_size
,
3195 metaslab_largest_unflushed_free(msp
));
3199 * Segment-based weighting requires space map histogram support.
3201 if (zfs_metaslab_segment_weight_enabled
&&
3202 spa_feature_is_enabled(spa
, SPA_FEATURE_SPACEMAP_HISTOGRAM
) &&
3203 (msp
->ms_sm
== NULL
|| msp
->ms_sm
->sm_dbuf
->db_size
==
3204 sizeof (space_map_phys_t
))) {
3205 weight
= metaslab_segment_weight(msp
);
3207 weight
= metaslab_space_weight(msp
);
3213 metaslab_recalculate_weight_and_sort(metaslab_t
*msp
)
3215 ASSERT(MUTEX_HELD(&msp
->ms_lock
));
3217 /* note: we preserve the mask (e.g. indication of primary, etc..) */
3218 uint64_t was_active
= msp
->ms_weight
& METASLAB_ACTIVE_MASK
;
3219 metaslab_group_sort(msp
->ms_group
, msp
,
3220 metaslab_weight(msp
, B_FALSE
) | was_active
);
3224 metaslab_activate_allocator(metaslab_group_t
*mg
, metaslab_t
*msp
,
3225 int allocator
, uint64_t activation_weight
)
3227 metaslab_group_allocator_t
*mga
= &mg
->mg_allocator
[allocator
];
3228 ASSERT(MUTEX_HELD(&msp
->ms_lock
));
3231 * If we're activating for the claim code, we don't want to actually
3232 * set the metaslab up for a specific allocator.
3234 if (activation_weight
== METASLAB_WEIGHT_CLAIM
) {
3235 ASSERT0(msp
->ms_activation_weight
);
3236 msp
->ms_activation_weight
= msp
->ms_weight
;
3237 metaslab_group_sort(mg
, msp
, msp
->ms_weight
|
3242 metaslab_t
**mspp
= (activation_weight
== METASLAB_WEIGHT_PRIMARY
?
3243 &mga
->mga_primary
: &mga
->mga_secondary
);
3245 mutex_enter(&mg
->mg_lock
);
3246 if (*mspp
!= NULL
) {
3247 mutex_exit(&mg
->mg_lock
);
3252 ASSERT3S(msp
->ms_allocator
, ==, -1);
3253 msp
->ms_allocator
= allocator
;
3254 msp
->ms_primary
= (activation_weight
== METASLAB_WEIGHT_PRIMARY
);
3256 ASSERT0(msp
->ms_activation_weight
);
3257 msp
->ms_activation_weight
= msp
->ms_weight
;
3258 metaslab_group_sort_impl(mg
, msp
,
3259 msp
->ms_weight
| activation_weight
);
3260 mutex_exit(&mg
->mg_lock
);
3266 metaslab_activate(metaslab_t
*msp
, int allocator
, uint64_t activation_weight
)
3268 ASSERT(MUTEX_HELD(&msp
->ms_lock
));
3271 * The current metaslab is already activated for us so there
3272 * is nothing to do. Already activated though, doesn't mean
3273 * that this metaslab is activated for our allocator nor our
3274 * requested activation weight. The metaslab could have started
3275 * as an active one for our allocator but changed allocators
3276 * while we were waiting to grab its ms_lock or we stole it
3277 * [see find_valid_metaslab()]. This means that there is a
3278 * possibility of passivating a metaslab of another allocator
3279 * or from a different activation mask, from this thread.
3281 if ((msp
->ms_weight
& METASLAB_ACTIVE_MASK
) != 0) {
3282 ASSERT(msp
->ms_loaded
);
3286 int error
= metaslab_load(msp
);
3288 metaslab_group_sort(msp
->ms_group
, msp
, 0);
3293 * When entering metaslab_load() we may have dropped the
3294 * ms_lock because we were loading this metaslab, or we
3295 * were waiting for another thread to load it for us. In
3296 * that scenario, we recheck the weight of the metaslab
3297 * to see if it was activated by another thread.
3299 * If the metaslab was activated for another allocator or
3300 * it was activated with a different activation weight (e.g.
3301 * we wanted to make it a primary but it was activated as
3302 * secondary) we return error (EBUSY).
3304 * If the metaslab was activated for the same allocator
3305 * and requested activation mask, skip activating it.
3307 if ((msp
->ms_weight
& METASLAB_ACTIVE_MASK
) != 0) {
3308 if (msp
->ms_allocator
!= allocator
)
3311 if ((msp
->ms_weight
& activation_weight
) == 0)
3312 return (SET_ERROR(EBUSY
));
3314 EQUIV((activation_weight
== METASLAB_WEIGHT_PRIMARY
),
3320 * If the metaslab has literally 0 space, it will have weight 0. In
3321 * that case, don't bother activating it. This can happen if the
3322 * metaslab had space during find_valid_metaslab, but another thread
3323 * loaded it and used all that space while we were waiting to grab the
3326 if (msp
->ms_weight
== 0) {
3327 ASSERT0(range_tree_space(msp
->ms_allocatable
));
3328 return (SET_ERROR(ENOSPC
));
3331 if ((error
= metaslab_activate_allocator(msp
->ms_group
, msp
,
3332 allocator
, activation_weight
)) != 0) {
3336 ASSERT(msp
->ms_loaded
);
3337 ASSERT(msp
->ms_weight
& METASLAB_ACTIVE_MASK
);
3343 metaslab_passivate_allocator(metaslab_group_t
*mg
, metaslab_t
*msp
,
3346 ASSERT(MUTEX_HELD(&msp
->ms_lock
));
3347 ASSERT(msp
->ms_loaded
);
3349 if (msp
->ms_weight
& METASLAB_WEIGHT_CLAIM
) {
3350 metaslab_group_sort(mg
, msp
, weight
);
3354 mutex_enter(&mg
->mg_lock
);
3355 ASSERT3P(msp
->ms_group
, ==, mg
);
3356 ASSERT3S(0, <=, msp
->ms_allocator
);
3357 ASSERT3U(msp
->ms_allocator
, <, mg
->mg_allocators
);
3359 metaslab_group_allocator_t
*mga
= &mg
->mg_allocator
[msp
->ms_allocator
];
3360 if (msp
->ms_primary
) {
3361 ASSERT3P(mga
->mga_primary
, ==, msp
);
3362 ASSERT(msp
->ms_weight
& METASLAB_WEIGHT_PRIMARY
);
3363 mga
->mga_primary
= NULL
;
3365 ASSERT3P(mga
->mga_secondary
, ==, msp
);
3366 ASSERT(msp
->ms_weight
& METASLAB_WEIGHT_SECONDARY
);
3367 mga
->mga_secondary
= NULL
;
3369 msp
->ms_allocator
= -1;
3370 metaslab_group_sort_impl(mg
, msp
, weight
);
3371 mutex_exit(&mg
->mg_lock
);
3375 metaslab_passivate(metaslab_t
*msp
, uint64_t weight
)
3377 uint64_t size __maybe_unused
= weight
& ~METASLAB_WEIGHT_TYPE
;
3380 * If size < SPA_MINBLOCKSIZE, then we will not allocate from
3381 * this metaslab again. In that case, it had better be empty,
3382 * or we would be leaving space on the table.
3384 ASSERT(!WEIGHT_IS_SPACEBASED(msp
->ms_weight
) ||
3385 size
>= SPA_MINBLOCKSIZE
||
3386 range_tree_space(msp
->ms_allocatable
) == 0);
3387 ASSERT0(weight
& METASLAB_ACTIVE_MASK
);
3389 ASSERT(msp
->ms_activation_weight
!= 0);
3390 msp
->ms_activation_weight
= 0;
3391 metaslab_passivate_allocator(msp
->ms_group
, msp
, weight
);
3392 ASSERT0(msp
->ms_weight
& METASLAB_ACTIVE_MASK
);
3396 * Segment-based metaslabs are activated once and remain active until
3397 * we either fail an allocation attempt (similar to space-based metaslabs)
3398 * or have exhausted the free space in zfs_metaslab_switch_threshold
3399 * buckets since the metaslab was activated. This function checks to see
3400 * if we've exhausted the zfs_metaslab_switch_threshold buckets in the
3401 * metaslab and passivates it proactively. This will allow us to select a
3402 * metaslab with a larger contiguous region, if any, remaining within this
3403 * metaslab group. If we're in sync pass > 1, then we continue using this
3404 * metaslab so that we don't dirty more block and cause more sync passes.
3407 metaslab_segment_may_passivate(metaslab_t
*msp
)
3409 spa_t
*spa
= msp
->ms_group
->mg_vd
->vdev_spa
;
3411 if (WEIGHT_IS_SPACEBASED(msp
->ms_weight
) || spa_sync_pass(spa
) > 1)
3415 * Since we are in the middle of a sync pass, the most accurate
3416 * information that is accessible to us is the in-core range tree
3417 * histogram; calculate the new weight based on that information.
3419 uint64_t weight
= metaslab_weight_from_range_tree(msp
);
3420 int activation_idx
= WEIGHT_GET_INDEX(msp
->ms_activation_weight
);
3421 int current_idx
= WEIGHT_GET_INDEX(weight
);
3423 if (current_idx
<= activation_idx
- zfs_metaslab_switch_threshold
)
3424 metaslab_passivate(msp
, weight
);
3428 metaslab_preload(void *arg
)
3430 metaslab_t
*msp
= arg
;
3431 metaslab_class_t
*mc
= msp
->ms_group
->mg_class
;
3432 spa_t
*spa
= mc
->mc_spa
;
3433 fstrans_cookie_t cookie
= spl_fstrans_mark();
3435 ASSERT(!MUTEX_HELD(&msp
->ms_group
->mg_lock
));
3437 mutex_enter(&msp
->ms_lock
);
3438 (void) metaslab_load(msp
);
3439 metaslab_set_selected_txg(msp
, spa_syncing_txg(spa
));
3440 mutex_exit(&msp
->ms_lock
);
3441 spl_fstrans_unmark(cookie
);
3445 metaslab_group_preload(metaslab_group_t
*mg
)
3447 spa_t
*spa
= mg
->mg_vd
->vdev_spa
;
3449 avl_tree_t
*t
= &mg
->mg_metaslab_tree
;
3452 if (spa_shutting_down(spa
) || !metaslab_preload_enabled
) {
3453 taskq_wait_outstanding(mg
->mg_taskq
, 0);
3457 mutex_enter(&mg
->mg_lock
);
3460 * Load the next potential metaslabs
3462 for (msp
= avl_first(t
); msp
!= NULL
; msp
= AVL_NEXT(t
, msp
)) {
3463 ASSERT3P(msp
->ms_group
, ==, mg
);
3466 * We preload only the maximum number of metaslabs specified
3467 * by metaslab_preload_limit. If a metaslab is being forced
3468 * to condense then we preload it too. This will ensure
3469 * that force condensing happens in the next txg.
3471 if (++m
> metaslab_preload_limit
&& !msp
->ms_condense_wanted
) {
3475 VERIFY(taskq_dispatch(mg
->mg_taskq
, metaslab_preload
,
3476 msp
, TQ_SLEEP
) != TASKQID_INVALID
);
3478 mutex_exit(&mg
->mg_lock
);
3482 * Determine if the space map's on-disk footprint is past our tolerance for
3483 * inefficiency. We would like to use the following criteria to make our
3486 * 1. Do not condense if the size of the space map object would dramatically
3487 * increase as a result of writing out the free space range tree.
3489 * 2. Condense if the on on-disk space map representation is at least
3490 * zfs_condense_pct/100 times the size of the optimal representation
3491 * (i.e. zfs_condense_pct = 110 and in-core = 1MB, optimal = 1.1MB).
3493 * 3. Do not condense if the on-disk size of the space map does not actually
3496 * Unfortunately, we cannot compute the on-disk size of the space map in this
3497 * context because we cannot accurately compute the effects of compression, etc.
3498 * Instead, we apply the heuristic described in the block comment for
3499 * zfs_metaslab_condense_block_threshold - we only condense if the space used
3500 * is greater than a threshold number of blocks.
3503 metaslab_should_condense(metaslab_t
*msp
)
3505 space_map_t
*sm
= msp
->ms_sm
;
3506 vdev_t
*vd
= msp
->ms_group
->mg_vd
;
3507 uint64_t vdev_blocksize
= 1 << vd
->vdev_ashift
;
3509 ASSERT(MUTEX_HELD(&msp
->ms_lock
));
3510 ASSERT(msp
->ms_loaded
);
3512 ASSERT3U(spa_sync_pass(vd
->vdev_spa
), ==, 1);
3515 * We always condense metaslabs that are empty and metaslabs for
3516 * which a condense request has been made.
3518 if (range_tree_numsegs(msp
->ms_allocatable
) == 0 ||
3519 msp
->ms_condense_wanted
)
3522 uint64_t record_size
= MAX(sm
->sm_blksz
, vdev_blocksize
);
3523 uint64_t object_size
= space_map_length(sm
);
3524 uint64_t optimal_size
= space_map_estimate_optimal_size(sm
,
3525 msp
->ms_allocatable
, SM_NO_VDEVID
);
3527 return (object_size
>= (optimal_size
* zfs_condense_pct
/ 100) &&
3528 object_size
> zfs_metaslab_condense_block_threshold
* record_size
);
3532 * Condense the on-disk space map representation to its minimized form.
3533 * The minimized form consists of a small number of allocations followed
3534 * by the entries of the free range tree (ms_allocatable). The condensed
3535 * spacemap contains all the entries of previous TXGs (including those in
3536 * the pool-wide log spacemaps; thus this is effectively a superset of
3537 * metaslab_flush()), but this TXG's entries still need to be written.
3540 metaslab_condense(metaslab_t
*msp
, dmu_tx_t
*tx
)
3542 range_tree_t
*condense_tree
;
3543 space_map_t
*sm
= msp
->ms_sm
;
3544 uint64_t txg
= dmu_tx_get_txg(tx
);
3545 spa_t
*spa
= msp
->ms_group
->mg_vd
->vdev_spa
;
3547 ASSERT(MUTEX_HELD(&msp
->ms_lock
));
3548 ASSERT(msp
->ms_loaded
);
3549 ASSERT(msp
->ms_sm
!= NULL
);
3552 * In order to condense the space map, we need to change it so it
3553 * only describes which segments are currently allocated and free.
3555 * All the current free space resides in the ms_allocatable, all
3556 * the ms_defer trees, and all the ms_allocating trees. We ignore
3557 * ms_freed because it is empty because we're in sync pass 1. We
3558 * ignore ms_freeing because these changes are not yet reflected
3559 * in the spacemap (they will be written later this txg).
3561 * So to truncate the space map to represent all the entries of
3562 * previous TXGs we do the following:
3564 * 1] We create a range tree (condense tree) that is 100% empty.
3565 * 2] We add to it all segments found in the ms_defer trees
3566 * as those segments are marked as free in the original space
3567 * map. We do the same with the ms_allocating trees for the same
3568 * reason. Adding these segments should be a relatively
3569 * inexpensive operation since we expect these trees to have a
3570 * small number of nodes.
3571 * 3] We vacate any unflushed allocs, since they are not frees we
3572 * need to add to the condense tree. Then we vacate any
3573 * unflushed frees as they should already be part of ms_allocatable.
3574 * 4] At this point, we would ideally like to add all segments
3575 * in the ms_allocatable tree from the condense tree. This way
3576 * we would write all the entries of the condense tree as the
3577 * condensed space map, which would only contain freed
3578 * segments with everything else assumed to be allocated.
3580 * Doing so can be prohibitively expensive as ms_allocatable can
3581 * be large, and therefore computationally expensive to add to
3582 * the condense_tree. Instead we first sync out an entry marking
3583 * everything as allocated, then the condense_tree and then the
3584 * ms_allocatable, in the condensed space map. While this is not
3585 * optimal, it is typically close to optimal and more importantly
3586 * much cheaper to compute.
3588 * 5] Finally, as both of the unflushed trees were written to our
3589 * new and condensed metaslab space map, we basically flushed
3590 * all the unflushed changes to disk, thus we call
3591 * metaslab_flush_update().
3593 ASSERT3U(spa_sync_pass(spa
), ==, 1);
3594 ASSERT(range_tree_is_empty(msp
->ms_freed
)); /* since it is pass 1 */
3596 zfs_dbgmsg("condensing: txg %llu, msp[%llu] %px, vdev id %llu, "
3597 "spa %s, smp size %llu, segments %lu, forcing condense=%s", txg
,
3598 msp
->ms_id
, msp
, msp
->ms_group
->mg_vd
->vdev_id
,
3599 spa
->spa_name
, space_map_length(msp
->ms_sm
),
3600 range_tree_numsegs(msp
->ms_allocatable
),
3601 msp
->ms_condense_wanted
? "TRUE" : "FALSE");
3603 msp
->ms_condense_wanted
= B_FALSE
;
3605 range_seg_type_t type
;
3606 uint64_t shift
, start
;
3607 type
= metaslab_calculate_range_tree_type(msp
->ms_group
->mg_vd
, msp
,
3610 condense_tree
= range_tree_create(NULL
, type
, NULL
, start
, shift
);
3612 for (int t
= 0; t
< TXG_DEFER_SIZE
; t
++) {
3613 range_tree_walk(msp
->ms_defer
[t
],
3614 range_tree_add
, condense_tree
);
3617 for (int t
= 0; t
< TXG_CONCURRENT_STATES
; t
++) {
3618 range_tree_walk(msp
->ms_allocating
[(txg
+ t
) & TXG_MASK
],
3619 range_tree_add
, condense_tree
);
3622 ASSERT3U(spa
->spa_unflushed_stats
.sus_memused
, >=,
3623 metaslab_unflushed_changes_memused(msp
));
3624 spa
->spa_unflushed_stats
.sus_memused
-=
3625 metaslab_unflushed_changes_memused(msp
);
3626 range_tree_vacate(msp
->ms_unflushed_allocs
, NULL
, NULL
);
3627 range_tree_vacate(msp
->ms_unflushed_frees
, NULL
, NULL
);
3630 * We're about to drop the metaslab's lock thus allowing other
3631 * consumers to change it's content. Set the metaslab's ms_condensing
3632 * flag to ensure that allocations on this metaslab do not occur
3633 * while we're in the middle of committing it to disk. This is only
3634 * critical for ms_allocatable as all other range trees use per TXG
3635 * views of their content.
3637 msp
->ms_condensing
= B_TRUE
;
3639 mutex_exit(&msp
->ms_lock
);
3640 uint64_t object
= space_map_object(msp
->ms_sm
);
3641 space_map_truncate(sm
,
3642 spa_feature_is_enabled(spa
, SPA_FEATURE_LOG_SPACEMAP
) ?
3643 zfs_metaslab_sm_blksz_with_log
: zfs_metaslab_sm_blksz_no_log
, tx
);
3646 * space_map_truncate() may have reallocated the spacemap object.
3647 * If so, update the vdev_ms_array.
3649 if (space_map_object(msp
->ms_sm
) != object
) {
3650 object
= space_map_object(msp
->ms_sm
);
3651 dmu_write(spa
->spa_meta_objset
,
3652 msp
->ms_group
->mg_vd
->vdev_ms_array
, sizeof (uint64_t) *
3653 msp
->ms_id
, sizeof (uint64_t), &object
, tx
);
3658 * When the log space map feature is enabled, each space map will
3659 * always have ALLOCS followed by FREES for each sync pass. This is
3660 * typically true even when the log space map feature is disabled,
3661 * except from the case where a metaslab goes through metaslab_sync()
3662 * and gets condensed. In that case the metaslab's space map will have
3663 * ALLOCS followed by FREES (due to condensing) followed by ALLOCS
3664 * followed by FREES (due to space_map_write() in metaslab_sync()) for
3667 range_tree_t
*tmp_tree
= range_tree_create(NULL
, type
, NULL
, start
,
3669 range_tree_add(tmp_tree
, msp
->ms_start
, msp
->ms_size
);
3670 space_map_write(sm
, tmp_tree
, SM_ALLOC
, SM_NO_VDEVID
, tx
);
3671 space_map_write(sm
, msp
->ms_allocatable
, SM_FREE
, SM_NO_VDEVID
, tx
);
3672 space_map_write(sm
, condense_tree
, SM_FREE
, SM_NO_VDEVID
, tx
);
3674 range_tree_vacate(condense_tree
, NULL
, NULL
);
3675 range_tree_destroy(condense_tree
);
3676 range_tree_vacate(tmp_tree
, NULL
, NULL
);
3677 range_tree_destroy(tmp_tree
);
3678 mutex_enter(&msp
->ms_lock
);
3680 msp
->ms_condensing
= B_FALSE
;
3681 metaslab_flush_update(msp
, tx
);
3685 * Called when the metaslab has been flushed (its own spacemap now reflects
3686 * all the contents of the pool-wide spacemap log). Updates the metaslab's
3687 * metadata and any pool-wide related log space map data (e.g. summary,
3688 * obsolete logs, etc..) to reflect that.
3691 metaslab_flush_update(metaslab_t
*msp
, dmu_tx_t
*tx
)
3693 metaslab_group_t
*mg
= msp
->ms_group
;
3694 spa_t
*spa
= mg
->mg_vd
->vdev_spa
;
3696 ASSERT(MUTEX_HELD(&msp
->ms_lock
));
3698 ASSERT3U(spa_sync_pass(spa
), ==, 1);
3699 ASSERT(range_tree_is_empty(msp
->ms_unflushed_allocs
));
3700 ASSERT(range_tree_is_empty(msp
->ms_unflushed_frees
));
3703 * Just because a metaslab got flushed, that doesn't mean that
3704 * it will pass through metaslab_sync_done(). Thus, make sure to
3705 * update ms_synced_length here in case it doesn't.
3707 msp
->ms_synced_length
= space_map_length(msp
->ms_sm
);
3710 * We may end up here from metaslab_condense() without the
3711 * feature being active. In that case this is a no-op.
3713 if (!spa_feature_is_active(spa
, SPA_FEATURE_LOG_SPACEMAP
))
3716 ASSERT(spa_syncing_log_sm(spa
) != NULL
);
3717 ASSERT(msp
->ms_sm
!= NULL
);
3718 ASSERT(metaslab_unflushed_txg(msp
) != 0);
3719 ASSERT3P(avl_find(&spa
->spa_metaslabs_by_flushed
, msp
, NULL
), ==, msp
);
3721 VERIFY3U(tx
->tx_txg
, <=, spa_final_dirty_txg(spa
));
3723 /* update metaslab's position in our flushing tree */
3724 uint64_t ms_prev_flushed_txg
= metaslab_unflushed_txg(msp
);
3725 mutex_enter(&spa
->spa_flushed_ms_lock
);
3726 avl_remove(&spa
->spa_metaslabs_by_flushed
, msp
);
3727 metaslab_set_unflushed_txg(msp
, spa_syncing_txg(spa
), tx
);
3728 avl_add(&spa
->spa_metaslabs_by_flushed
, msp
);
3729 mutex_exit(&spa
->spa_flushed_ms_lock
);
3731 /* update metaslab counts of spa_log_sm_t nodes */
3732 spa_log_sm_decrement_mscount(spa
, ms_prev_flushed_txg
);
3733 spa_log_sm_increment_current_mscount(spa
);
3735 /* cleanup obsolete logs if any */
3736 uint64_t log_blocks_before
= spa_log_sm_nblocks(spa
);
3737 spa_cleanup_old_sm_logs(spa
, tx
);
3738 uint64_t log_blocks_after
= spa_log_sm_nblocks(spa
);
3739 VERIFY3U(log_blocks_after
, <=, log_blocks_before
);
3741 /* update log space map summary */
3742 uint64_t blocks_gone
= log_blocks_before
- log_blocks_after
;
3743 spa_log_summary_add_flushed_metaslab(spa
);
3744 spa_log_summary_decrement_mscount(spa
, ms_prev_flushed_txg
);
3745 spa_log_summary_decrement_blkcount(spa
, blocks_gone
);
3749 metaslab_flush(metaslab_t
*msp
, dmu_tx_t
*tx
)
3751 spa_t
*spa
= msp
->ms_group
->mg_vd
->vdev_spa
;
3753 ASSERT(MUTEX_HELD(&msp
->ms_lock
));
3754 ASSERT3U(spa_sync_pass(spa
), ==, 1);
3755 ASSERT(spa_feature_is_active(spa
, SPA_FEATURE_LOG_SPACEMAP
));
3757 ASSERT(msp
->ms_sm
!= NULL
);
3758 ASSERT(metaslab_unflushed_txg(msp
) != 0);
3759 ASSERT(avl_find(&spa
->spa_metaslabs_by_flushed
, msp
, NULL
) != NULL
);
3762 * There is nothing wrong with flushing the same metaslab twice, as
3763 * this codepath should work on that case. However, the current
3764 * flushing scheme makes sure to avoid this situation as we would be
3765 * making all these calls without having anything meaningful to write
3766 * to disk. We assert this behavior here.
3768 ASSERT3U(metaslab_unflushed_txg(msp
), <, dmu_tx_get_txg(tx
));
3771 * We can not flush while loading, because then we would
3772 * not load the ms_unflushed_{allocs,frees}.
3774 if (msp
->ms_loading
)
3777 metaslab_verify_space(msp
, dmu_tx_get_txg(tx
));
3778 metaslab_verify_weight_and_frag(msp
);
3781 * Metaslab condensing is effectively flushing. Therefore if the
3782 * metaslab can be condensed we can just condense it instead of
3785 * Note that metaslab_condense() does call metaslab_flush_update()
3786 * so we can just return immediately after condensing. We also
3787 * don't need to care about setting ms_flushing or broadcasting
3788 * ms_flush_cv, even if we temporarily drop the ms_lock in
3789 * metaslab_condense(), as the metaslab is already loaded.
3791 if (msp
->ms_loaded
&& metaslab_should_condense(msp
)) {
3792 metaslab_group_t
*mg
= msp
->ms_group
;
3795 * For all histogram operations below refer to the
3796 * comments of metaslab_sync() where we follow a
3797 * similar procedure.
3799 metaslab_group_histogram_verify(mg
);
3800 metaslab_class_histogram_verify(mg
->mg_class
);
3801 metaslab_group_histogram_remove(mg
, msp
);
3803 metaslab_condense(msp
, tx
);
3805 space_map_histogram_clear(msp
->ms_sm
);
3806 space_map_histogram_add(msp
->ms_sm
, msp
->ms_allocatable
, tx
);
3807 ASSERT(range_tree_is_empty(msp
->ms_freed
));
3808 for (int t
= 0; t
< TXG_DEFER_SIZE
; t
++) {
3809 space_map_histogram_add(msp
->ms_sm
,
3810 msp
->ms_defer
[t
], tx
);
3812 metaslab_aux_histograms_update(msp
);
3814 metaslab_group_histogram_add(mg
, msp
);
3815 metaslab_group_histogram_verify(mg
);
3816 metaslab_class_histogram_verify(mg
->mg_class
);
3818 metaslab_verify_space(msp
, dmu_tx_get_txg(tx
));
3821 * Since we recreated the histogram (and potentially
3822 * the ms_sm too while condensing) ensure that the
3823 * weight is updated too because we are not guaranteed
3824 * that this metaslab is dirty and will go through
3825 * metaslab_sync_done().
3827 metaslab_recalculate_weight_and_sort(msp
);
3831 msp
->ms_flushing
= B_TRUE
;
3832 uint64_t sm_len_before
= space_map_length(msp
->ms_sm
);
3834 mutex_exit(&msp
->ms_lock
);
3835 space_map_write(msp
->ms_sm
, msp
->ms_unflushed_allocs
, SM_ALLOC
,
3837 space_map_write(msp
->ms_sm
, msp
->ms_unflushed_frees
, SM_FREE
,
3839 mutex_enter(&msp
->ms_lock
);
3841 uint64_t sm_len_after
= space_map_length(msp
->ms_sm
);
3842 if (zfs_flags
& ZFS_DEBUG_LOG_SPACEMAP
) {
3843 zfs_dbgmsg("flushing: txg %llu, spa %s, vdev_id %llu, "
3844 "ms_id %llu, unflushed_allocs %llu, unflushed_frees %llu, "
3845 "appended %llu bytes", dmu_tx_get_txg(tx
), spa_name(spa
),
3846 msp
->ms_group
->mg_vd
->vdev_id
, msp
->ms_id
,
3847 range_tree_space(msp
->ms_unflushed_allocs
),
3848 range_tree_space(msp
->ms_unflushed_frees
),
3849 (sm_len_after
- sm_len_before
));
3852 ASSERT3U(spa
->spa_unflushed_stats
.sus_memused
, >=,
3853 metaslab_unflushed_changes_memused(msp
));
3854 spa
->spa_unflushed_stats
.sus_memused
-=
3855 metaslab_unflushed_changes_memused(msp
);
3856 range_tree_vacate(msp
->ms_unflushed_allocs
, NULL
, NULL
);
3857 range_tree_vacate(msp
->ms_unflushed_frees
, NULL
, NULL
);
3859 metaslab_verify_space(msp
, dmu_tx_get_txg(tx
));
3860 metaslab_verify_weight_and_frag(msp
);
3862 metaslab_flush_update(msp
, tx
);
3864 metaslab_verify_space(msp
, dmu_tx_get_txg(tx
));
3865 metaslab_verify_weight_and_frag(msp
);
3867 msp
->ms_flushing
= B_FALSE
;
3868 cv_broadcast(&msp
->ms_flush_cv
);
3873 * Write a metaslab to disk in the context of the specified transaction group.
3876 metaslab_sync(metaslab_t
*msp
, uint64_t txg
)
3878 metaslab_group_t
*mg
= msp
->ms_group
;
3879 vdev_t
*vd
= mg
->mg_vd
;
3880 spa_t
*spa
= vd
->vdev_spa
;
3881 objset_t
*mos
= spa_meta_objset(spa
);
3882 range_tree_t
*alloctree
= msp
->ms_allocating
[txg
& TXG_MASK
];
3885 ASSERT(!vd
->vdev_ishole
);
3888 * This metaslab has just been added so there's no work to do now.
3890 if (msp
->ms_freeing
== NULL
) {
3891 ASSERT3P(alloctree
, ==, NULL
);
3895 ASSERT3P(alloctree
, !=, NULL
);
3896 ASSERT3P(msp
->ms_freeing
, !=, NULL
);
3897 ASSERT3P(msp
->ms_freed
, !=, NULL
);
3898 ASSERT3P(msp
->ms_checkpointing
, !=, NULL
);
3899 ASSERT3P(msp
->ms_trim
, !=, NULL
);
3902 * Normally, we don't want to process a metaslab if there are no
3903 * allocations or frees to perform. However, if the metaslab is being
3904 * forced to condense, it's loaded and we're not beyond the final
3905 * dirty txg, we need to let it through. Not condensing beyond the
3906 * final dirty txg prevents an issue where metaslabs that need to be
3907 * condensed but were loaded for other reasons could cause a panic
3908 * here. By only checking the txg in that branch of the conditional,
3909 * we preserve the utility of the VERIFY statements in all other
3912 if (range_tree_is_empty(alloctree
) &&
3913 range_tree_is_empty(msp
->ms_freeing
) &&
3914 range_tree_is_empty(msp
->ms_checkpointing
) &&
3915 !(msp
->ms_loaded
&& msp
->ms_condense_wanted
&&
3916 txg
<= spa_final_dirty_txg(spa
)))
3920 VERIFY3U(txg
, <=, spa_final_dirty_txg(spa
));
3923 * The only state that can actually be changing concurrently
3924 * with metaslab_sync() is the metaslab's ms_allocatable. No
3925 * other thread can be modifying this txg's alloc, freeing,
3926 * freed, or space_map_phys_t. We drop ms_lock whenever we
3927 * could call into the DMU, because the DMU can call down to
3928 * us (e.g. via zio_free()) at any time.
3930 * The spa_vdev_remove_thread() can be reading metaslab state
3931 * concurrently, and it is locked out by the ms_sync_lock.
3932 * Note that the ms_lock is insufficient for this, because it
3933 * is dropped by space_map_write().
3935 tx
= dmu_tx_create_assigned(spa_get_dsl(spa
), txg
);
3938 * Generate a log space map if one doesn't exist already.
3940 spa_generate_syncing_log_sm(spa
, tx
);
3942 if (msp
->ms_sm
== NULL
) {
3943 uint64_t new_object
= space_map_alloc(mos
,
3944 spa_feature_is_enabled(spa
, SPA_FEATURE_LOG_SPACEMAP
) ?
3945 zfs_metaslab_sm_blksz_with_log
:
3946 zfs_metaslab_sm_blksz_no_log
, tx
);
3947 VERIFY3U(new_object
, !=, 0);
3949 dmu_write(mos
, vd
->vdev_ms_array
, sizeof (uint64_t) *
3950 msp
->ms_id
, sizeof (uint64_t), &new_object
, tx
);
3952 VERIFY0(space_map_open(&msp
->ms_sm
, mos
, new_object
,
3953 msp
->ms_start
, msp
->ms_size
, vd
->vdev_ashift
));
3954 ASSERT(msp
->ms_sm
!= NULL
);
3956 ASSERT(range_tree_is_empty(msp
->ms_unflushed_allocs
));
3957 ASSERT(range_tree_is_empty(msp
->ms_unflushed_frees
));
3958 ASSERT0(metaslab_allocated_space(msp
));
3961 if (metaslab_unflushed_txg(msp
) == 0 &&
3962 spa_feature_is_active(spa
, SPA_FEATURE_LOG_SPACEMAP
)) {
3963 ASSERT(spa_syncing_log_sm(spa
) != NULL
);
3965 metaslab_set_unflushed_txg(msp
, spa_syncing_txg(spa
), tx
);
3966 spa_log_sm_increment_current_mscount(spa
);
3967 spa_log_summary_add_flushed_metaslab(spa
);
3969 ASSERT(msp
->ms_sm
!= NULL
);
3970 mutex_enter(&spa
->spa_flushed_ms_lock
);
3971 avl_add(&spa
->spa_metaslabs_by_flushed
, msp
);
3972 mutex_exit(&spa
->spa_flushed_ms_lock
);
3974 ASSERT(range_tree_is_empty(msp
->ms_unflushed_allocs
));
3975 ASSERT(range_tree_is_empty(msp
->ms_unflushed_frees
));
3978 if (!range_tree_is_empty(msp
->ms_checkpointing
) &&
3979 vd
->vdev_checkpoint_sm
== NULL
) {
3980 ASSERT(spa_has_checkpoint(spa
));
3982 uint64_t new_object
= space_map_alloc(mos
,
3983 zfs_vdev_standard_sm_blksz
, tx
);
3984 VERIFY3U(new_object
, !=, 0);
3986 VERIFY0(space_map_open(&vd
->vdev_checkpoint_sm
,
3987 mos
, new_object
, 0, vd
->vdev_asize
, vd
->vdev_ashift
));
3988 ASSERT3P(vd
->vdev_checkpoint_sm
, !=, NULL
);
3991 * We save the space map object as an entry in vdev_top_zap
3992 * so it can be retrieved when the pool is reopened after an
3993 * export or through zdb.
3995 VERIFY0(zap_add(vd
->vdev_spa
->spa_meta_objset
,
3996 vd
->vdev_top_zap
, VDEV_TOP_ZAP_POOL_CHECKPOINT_SM
,
3997 sizeof (new_object
), 1, &new_object
, tx
));
4000 mutex_enter(&msp
->ms_sync_lock
);
4001 mutex_enter(&msp
->ms_lock
);
4004 * Note: metaslab_condense() clears the space map's histogram.
4005 * Therefore we must verify and remove this histogram before
4008 metaslab_group_histogram_verify(mg
);
4009 metaslab_class_histogram_verify(mg
->mg_class
);
4010 metaslab_group_histogram_remove(mg
, msp
);
4012 if (spa
->spa_sync_pass
== 1 && msp
->ms_loaded
&&
4013 metaslab_should_condense(msp
))
4014 metaslab_condense(msp
, tx
);
4017 * We'll be going to disk to sync our space accounting, thus we
4018 * drop the ms_lock during that time so allocations coming from
4019 * open-context (ZIL) for future TXGs do not block.
4021 mutex_exit(&msp
->ms_lock
);
4022 space_map_t
*log_sm
= spa_syncing_log_sm(spa
);
4023 if (log_sm
!= NULL
) {
4024 ASSERT(spa_feature_is_enabled(spa
, SPA_FEATURE_LOG_SPACEMAP
));
4026 space_map_write(log_sm
, alloctree
, SM_ALLOC
,
4028 space_map_write(log_sm
, msp
->ms_freeing
, SM_FREE
,
4030 mutex_enter(&msp
->ms_lock
);
4032 ASSERT3U(spa
->spa_unflushed_stats
.sus_memused
, >=,
4033 metaslab_unflushed_changes_memused(msp
));
4034 spa
->spa_unflushed_stats
.sus_memused
-=
4035 metaslab_unflushed_changes_memused(msp
);
4036 range_tree_remove_xor_add(alloctree
,
4037 msp
->ms_unflushed_frees
, msp
->ms_unflushed_allocs
);
4038 range_tree_remove_xor_add(msp
->ms_freeing
,
4039 msp
->ms_unflushed_allocs
, msp
->ms_unflushed_frees
);
4040 spa
->spa_unflushed_stats
.sus_memused
+=
4041 metaslab_unflushed_changes_memused(msp
);
4043 ASSERT(!spa_feature_is_enabled(spa
, SPA_FEATURE_LOG_SPACEMAP
));
4045 space_map_write(msp
->ms_sm
, alloctree
, SM_ALLOC
,
4047 space_map_write(msp
->ms_sm
, msp
->ms_freeing
, SM_FREE
,
4049 mutex_enter(&msp
->ms_lock
);
4052 msp
->ms_allocated_space
+= range_tree_space(alloctree
);
4053 ASSERT3U(msp
->ms_allocated_space
, >=,
4054 range_tree_space(msp
->ms_freeing
));
4055 msp
->ms_allocated_space
-= range_tree_space(msp
->ms_freeing
);
4057 if (!range_tree_is_empty(msp
->ms_checkpointing
)) {
4058 ASSERT(spa_has_checkpoint(spa
));
4059 ASSERT3P(vd
->vdev_checkpoint_sm
, !=, NULL
);
4062 * Since we are doing writes to disk and the ms_checkpointing
4063 * tree won't be changing during that time, we drop the
4064 * ms_lock while writing to the checkpoint space map, for the
4065 * same reason mentioned above.
4067 mutex_exit(&msp
->ms_lock
);
4068 space_map_write(vd
->vdev_checkpoint_sm
,
4069 msp
->ms_checkpointing
, SM_FREE
, SM_NO_VDEVID
, tx
);
4070 mutex_enter(&msp
->ms_lock
);
4072 spa
->spa_checkpoint_info
.sci_dspace
+=
4073 range_tree_space(msp
->ms_checkpointing
);
4074 vd
->vdev_stat
.vs_checkpoint_space
+=
4075 range_tree_space(msp
->ms_checkpointing
);
4076 ASSERT3U(vd
->vdev_stat
.vs_checkpoint_space
, ==,
4077 -space_map_allocated(vd
->vdev_checkpoint_sm
));
4079 range_tree_vacate(msp
->ms_checkpointing
, NULL
, NULL
);
4082 if (msp
->ms_loaded
) {
4084 * When the space map is loaded, we have an accurate
4085 * histogram in the range tree. This gives us an opportunity
4086 * to bring the space map's histogram up-to-date so we clear
4087 * it first before updating it.
4089 space_map_histogram_clear(msp
->ms_sm
);
4090 space_map_histogram_add(msp
->ms_sm
, msp
->ms_allocatable
, tx
);
4093 * Since we've cleared the histogram we need to add back
4094 * any free space that has already been processed, plus
4095 * any deferred space. This allows the on-disk histogram
4096 * to accurately reflect all free space even if some space
4097 * is not yet available for allocation (i.e. deferred).
4099 space_map_histogram_add(msp
->ms_sm
, msp
->ms_freed
, tx
);
4102 * Add back any deferred free space that has not been
4103 * added back into the in-core free tree yet. This will
4104 * ensure that we don't end up with a space map histogram
4105 * that is completely empty unless the metaslab is fully
4108 for (int t
= 0; t
< TXG_DEFER_SIZE
; t
++) {
4109 space_map_histogram_add(msp
->ms_sm
,
4110 msp
->ms_defer
[t
], tx
);
4115 * Always add the free space from this sync pass to the space
4116 * map histogram. We want to make sure that the on-disk histogram
4117 * accounts for all free space. If the space map is not loaded,
4118 * then we will lose some accuracy but will correct it the next
4119 * time we load the space map.
4121 space_map_histogram_add(msp
->ms_sm
, msp
->ms_freeing
, tx
);
4122 metaslab_aux_histograms_update(msp
);
4124 metaslab_group_histogram_add(mg
, msp
);
4125 metaslab_group_histogram_verify(mg
);
4126 metaslab_class_histogram_verify(mg
->mg_class
);
4129 * For sync pass 1, we avoid traversing this txg's free range tree
4130 * and instead will just swap the pointers for freeing and freed.
4131 * We can safely do this since the freed_tree is guaranteed to be
4132 * empty on the initial pass.
4134 * Keep in mind that even if we are currently using a log spacemap
4135 * we want current frees to end up in the ms_allocatable (but not
4136 * get appended to the ms_sm) so their ranges can be reused as usual.
4138 if (spa_sync_pass(spa
) == 1) {
4139 range_tree_swap(&msp
->ms_freeing
, &msp
->ms_freed
);
4140 ASSERT0(msp
->ms_allocated_this_txg
);
4142 range_tree_vacate(msp
->ms_freeing
,
4143 range_tree_add
, msp
->ms_freed
);
4145 msp
->ms_allocated_this_txg
+= range_tree_space(alloctree
);
4146 range_tree_vacate(alloctree
, NULL
, NULL
);
4148 ASSERT0(range_tree_space(msp
->ms_allocating
[txg
& TXG_MASK
]));
4149 ASSERT0(range_tree_space(msp
->ms_allocating
[TXG_CLEAN(txg
)
4151 ASSERT0(range_tree_space(msp
->ms_freeing
));
4152 ASSERT0(range_tree_space(msp
->ms_checkpointing
));
4154 mutex_exit(&msp
->ms_lock
);
4157 * Verify that the space map object ID has been recorded in the
4161 VERIFY0(dmu_read(mos
, vd
->vdev_ms_array
,
4162 msp
->ms_id
* sizeof (uint64_t), sizeof (uint64_t), &object
, 0));
4163 VERIFY3U(object
, ==, space_map_object(msp
->ms_sm
));
4165 mutex_exit(&msp
->ms_sync_lock
);
4170 metaslab_evict(metaslab_t
*msp
, uint64_t txg
)
4172 if (!msp
->ms_loaded
|| msp
->ms_disabled
!= 0)
4175 for (int t
= 1; t
< TXG_CONCURRENT_STATES
; t
++) {
4176 VERIFY0(range_tree_space(
4177 msp
->ms_allocating
[(txg
+ t
) & TXG_MASK
]));
4179 if (msp
->ms_allocator
!= -1)
4180 metaslab_passivate(msp
, msp
->ms_weight
& ~METASLAB_ACTIVE_MASK
);
4182 if (!metaslab_debug_unload
)
4183 metaslab_unload(msp
);
4187 * Called after a transaction group has completely synced to mark
4188 * all of the metaslab's free space as usable.
4191 metaslab_sync_done(metaslab_t
*msp
, uint64_t txg
)
4193 metaslab_group_t
*mg
= msp
->ms_group
;
4194 vdev_t
*vd
= mg
->mg_vd
;
4195 spa_t
*spa
= vd
->vdev_spa
;
4196 range_tree_t
**defer_tree
;
4197 int64_t alloc_delta
, defer_delta
;
4198 boolean_t defer_allowed
= B_TRUE
;
4200 ASSERT(!vd
->vdev_ishole
);
4202 mutex_enter(&msp
->ms_lock
);
4205 * If this metaslab is just becoming available, initialize its
4206 * range trees and add its capacity to the vdev.
4208 if (msp
->ms_freed
== NULL
) {
4209 range_seg_type_t type
;
4210 uint64_t shift
, start
;
4211 type
= metaslab_calculate_range_tree_type(vd
, msp
, &start
,
4214 for (int t
= 0; t
< TXG_SIZE
; t
++) {
4215 ASSERT(msp
->ms_allocating
[t
] == NULL
);
4217 msp
->ms_allocating
[t
] = range_tree_create(NULL
, type
,
4218 NULL
, start
, shift
);
4221 ASSERT3P(msp
->ms_freeing
, ==, NULL
);
4222 msp
->ms_freeing
= range_tree_create(NULL
, type
, NULL
, start
,
4225 ASSERT3P(msp
->ms_freed
, ==, NULL
);
4226 msp
->ms_freed
= range_tree_create(NULL
, type
, NULL
, start
,
4229 for (int t
= 0; t
< TXG_DEFER_SIZE
; t
++) {
4230 ASSERT3P(msp
->ms_defer
[t
], ==, NULL
);
4231 msp
->ms_defer
[t
] = range_tree_create(NULL
, type
, NULL
,
4235 ASSERT3P(msp
->ms_checkpointing
, ==, NULL
);
4236 msp
->ms_checkpointing
= range_tree_create(NULL
, type
, NULL
,
4239 ASSERT3P(msp
->ms_unflushed_allocs
, ==, NULL
);
4240 msp
->ms_unflushed_allocs
= range_tree_create(NULL
, type
, NULL
,
4243 metaslab_rt_arg_t
*mrap
= kmem_zalloc(sizeof (*mrap
), KM_SLEEP
);
4244 mrap
->mra_bt
= &msp
->ms_unflushed_frees_by_size
;
4245 mrap
->mra_floor_shift
= metaslab_by_size_min_shift
;
4246 ASSERT3P(msp
->ms_unflushed_frees
, ==, NULL
);
4247 msp
->ms_unflushed_frees
= range_tree_create(&metaslab_rt_ops
,
4248 type
, mrap
, start
, shift
);
4250 metaslab_space_update(vd
, mg
->mg_class
, 0, 0, msp
->ms_size
);
4252 ASSERT0(range_tree_space(msp
->ms_freeing
));
4253 ASSERT0(range_tree_space(msp
->ms_checkpointing
));
4255 defer_tree
= &msp
->ms_defer
[txg
% TXG_DEFER_SIZE
];
4257 uint64_t free_space
= metaslab_class_get_space(spa_normal_class(spa
)) -
4258 metaslab_class_get_alloc(spa_normal_class(spa
));
4259 if (free_space
<= spa_get_slop_space(spa
) || vd
->vdev_removing
) {
4260 defer_allowed
= B_FALSE
;
4264 alloc_delta
= msp
->ms_allocated_this_txg
-
4265 range_tree_space(msp
->ms_freed
);
4267 if (defer_allowed
) {
4268 defer_delta
= range_tree_space(msp
->ms_freed
) -
4269 range_tree_space(*defer_tree
);
4271 defer_delta
-= range_tree_space(*defer_tree
);
4273 metaslab_space_update(vd
, mg
->mg_class
, alloc_delta
+ defer_delta
,
4276 if (spa_syncing_log_sm(spa
) == NULL
) {
4278 * If there's a metaslab_load() in progress and we don't have
4279 * a log space map, it means that we probably wrote to the
4280 * metaslab's space map. If this is the case, we need to
4281 * make sure that we wait for the load to complete so that we
4282 * have a consistent view at the in-core side of the metaslab.
4284 metaslab_load_wait(msp
);
4286 ASSERT(spa_feature_is_active(spa
, SPA_FEATURE_LOG_SPACEMAP
));
4290 * When auto-trimming is enabled, free ranges which are added to
4291 * ms_allocatable are also be added to ms_trim. The ms_trim tree is
4292 * periodically consumed by the vdev_autotrim_thread() which issues
4293 * trims for all ranges and then vacates the tree. The ms_trim tree
4294 * can be discarded at any time with the sole consequence of recent
4295 * frees not being trimmed.
4297 if (spa_get_autotrim(spa
) == SPA_AUTOTRIM_ON
) {
4298 range_tree_walk(*defer_tree
, range_tree_add
, msp
->ms_trim
);
4299 if (!defer_allowed
) {
4300 range_tree_walk(msp
->ms_freed
, range_tree_add
,
4304 range_tree_vacate(msp
->ms_trim
, NULL
, NULL
);
4308 * Move the frees from the defer_tree back to the free
4309 * range tree (if it's loaded). Swap the freed_tree and
4310 * the defer_tree -- this is safe to do because we've
4311 * just emptied out the defer_tree.
4313 range_tree_vacate(*defer_tree
,
4314 msp
->ms_loaded
? range_tree_add
: NULL
, msp
->ms_allocatable
);
4315 if (defer_allowed
) {
4316 range_tree_swap(&msp
->ms_freed
, defer_tree
);
4318 range_tree_vacate(msp
->ms_freed
,
4319 msp
->ms_loaded
? range_tree_add
: NULL
,
4320 msp
->ms_allocatable
);
4323 msp
->ms_synced_length
= space_map_length(msp
->ms_sm
);
4325 msp
->ms_deferspace
+= defer_delta
;
4326 ASSERT3S(msp
->ms_deferspace
, >=, 0);
4327 ASSERT3S(msp
->ms_deferspace
, <=, msp
->ms_size
);
4328 if (msp
->ms_deferspace
!= 0) {
4330 * Keep syncing this metaslab until all deferred frees
4331 * are back in circulation.
4333 vdev_dirty(vd
, VDD_METASLAB
, msp
, txg
+ 1);
4335 metaslab_aux_histograms_update_done(msp
, defer_allowed
);
4338 msp
->ms_new
= B_FALSE
;
4339 mutex_enter(&mg
->mg_lock
);
4341 mutex_exit(&mg
->mg_lock
);
4345 * Re-sort metaslab within its group now that we've adjusted
4346 * its allocatable space.
4348 metaslab_recalculate_weight_and_sort(msp
);
4350 ASSERT0(range_tree_space(msp
->ms_allocating
[txg
& TXG_MASK
]));
4351 ASSERT0(range_tree_space(msp
->ms_freeing
));
4352 ASSERT0(range_tree_space(msp
->ms_freed
));
4353 ASSERT0(range_tree_space(msp
->ms_checkpointing
));
4354 msp
->ms_allocating_total
-= msp
->ms_allocated_this_txg
;
4355 msp
->ms_allocated_this_txg
= 0;
4356 mutex_exit(&msp
->ms_lock
);
4360 metaslab_sync_reassess(metaslab_group_t
*mg
)
4362 spa_t
*spa
= mg
->mg_class
->mc_spa
;
4364 spa_config_enter(spa
, SCL_ALLOC
, FTAG
, RW_READER
);
4365 metaslab_group_alloc_update(mg
);
4366 mg
->mg_fragmentation
= metaslab_group_fragmentation(mg
);
4369 * Preload the next potential metaslabs but only on active
4370 * metaslab groups. We can get into a state where the metaslab
4371 * is no longer active since we dirty metaslabs as we remove a
4372 * a device, thus potentially making the metaslab group eligible
4375 if (mg
->mg_activation_count
> 0) {
4376 metaslab_group_preload(mg
);
4378 spa_config_exit(spa
, SCL_ALLOC
, FTAG
);
4382 * When writing a ditto block (i.e. more than one DVA for a given BP) on
4383 * the same vdev as an existing DVA of this BP, then try to allocate it
4384 * on a different metaslab than existing DVAs (i.e. a unique metaslab).
4387 metaslab_is_unique(metaslab_t
*msp
, dva_t
*dva
)
4391 if (DVA_GET_ASIZE(dva
) == 0)
4394 if (msp
->ms_group
->mg_vd
->vdev_id
!= DVA_GET_VDEV(dva
))
4397 dva_ms_id
= DVA_GET_OFFSET(dva
) >> msp
->ms_group
->mg_vd
->vdev_ms_shift
;
4399 return (msp
->ms_id
!= dva_ms_id
);
4403 * ==========================================================================
4404 * Metaslab allocation tracing facility
4405 * ==========================================================================
4407 #ifdef _METASLAB_TRACING
4410 * Add an allocation trace element to the allocation tracing list.
4413 metaslab_trace_add(zio_alloc_list_t
*zal
, metaslab_group_t
*mg
,
4414 metaslab_t
*msp
, uint64_t psize
, uint32_t dva_id
, uint64_t offset
,
4417 metaslab_alloc_trace_t
*mat
;
4419 if (!metaslab_trace_enabled
)
4423 * When the tracing list reaches its maximum we remove
4424 * the second element in the list before adding a new one.
4425 * By removing the second element we preserve the original
4426 * entry as a clue to what allocations steps have already been
4429 if (zal
->zal_size
== metaslab_trace_max_entries
) {
4430 metaslab_alloc_trace_t
*mat_next
;
4432 panic("too many entries in allocation list");
4434 METASLABSTAT_BUMP(metaslabstat_trace_over_limit
);
4436 mat_next
= list_next(&zal
->zal_list
, list_head(&zal
->zal_list
));
4437 list_remove(&zal
->zal_list
, mat_next
);
4438 kmem_cache_free(metaslab_alloc_trace_cache
, mat_next
);
4441 mat
= kmem_cache_alloc(metaslab_alloc_trace_cache
, KM_SLEEP
);
4442 list_link_init(&mat
->mat_list_node
);
4445 mat
->mat_size
= psize
;
4446 mat
->mat_dva_id
= dva_id
;
4447 mat
->mat_offset
= offset
;
4448 mat
->mat_weight
= 0;
4449 mat
->mat_allocator
= allocator
;
4452 mat
->mat_weight
= msp
->ms_weight
;
4455 * The list is part of the zio so locking is not required. Only
4456 * a single thread will perform allocations for a given zio.
4458 list_insert_tail(&zal
->zal_list
, mat
);
4461 ASSERT3U(zal
->zal_size
, <=, metaslab_trace_max_entries
);
4465 metaslab_trace_init(zio_alloc_list_t
*zal
)
4467 list_create(&zal
->zal_list
, sizeof (metaslab_alloc_trace_t
),
4468 offsetof(metaslab_alloc_trace_t
, mat_list_node
));
4473 metaslab_trace_fini(zio_alloc_list_t
*zal
)
4475 metaslab_alloc_trace_t
*mat
;
4477 while ((mat
= list_remove_head(&zal
->zal_list
)) != NULL
)
4478 kmem_cache_free(metaslab_alloc_trace_cache
, mat
);
4479 list_destroy(&zal
->zal_list
);
4484 #define metaslab_trace_add(zal, mg, msp, psize, id, off, alloc)
4487 metaslab_trace_init(zio_alloc_list_t
*zal
)
4492 metaslab_trace_fini(zio_alloc_list_t
*zal
)
4496 #endif /* _METASLAB_TRACING */
4499 * ==========================================================================
4500 * Metaslab block operations
4501 * ==========================================================================
4505 metaslab_group_alloc_increment(spa_t
*spa
, uint64_t vdev
, void *tag
, int flags
,
4508 if (!(flags
& METASLAB_ASYNC_ALLOC
) ||
4509 (flags
& METASLAB_DONT_THROTTLE
))
4512 metaslab_group_t
*mg
= vdev_lookup_top(spa
, vdev
)->vdev_mg
;
4513 if (!mg
->mg_class
->mc_alloc_throttle_enabled
)
4516 metaslab_group_allocator_t
*mga
= &mg
->mg_allocator
[allocator
];
4517 (void) zfs_refcount_add(&mga
->mga_alloc_queue_depth
, tag
);
4521 metaslab_group_increment_qdepth(metaslab_group_t
*mg
, int allocator
)
4523 metaslab_group_allocator_t
*mga
= &mg
->mg_allocator
[allocator
];
4524 metaslab_class_allocator_t
*mca
=
4525 &mg
->mg_class
->mc_allocator
[allocator
];
4526 uint64_t max
= mg
->mg_max_alloc_queue_depth
;
4527 uint64_t cur
= mga
->mga_cur_max_alloc_queue_depth
;
4529 if (atomic_cas_64(&mga
->mga_cur_max_alloc_queue_depth
,
4530 cur
, cur
+ 1) == cur
) {
4531 atomic_inc_64(&mca
->mca_alloc_max_slots
);
4534 cur
= mga
->mga_cur_max_alloc_queue_depth
;
4539 metaslab_group_alloc_decrement(spa_t
*spa
, uint64_t vdev
, void *tag
, int flags
,
4540 int allocator
, boolean_t io_complete
)
4542 if (!(flags
& METASLAB_ASYNC_ALLOC
) ||
4543 (flags
& METASLAB_DONT_THROTTLE
))
4546 metaslab_group_t
*mg
= vdev_lookup_top(spa
, vdev
)->vdev_mg
;
4547 if (!mg
->mg_class
->mc_alloc_throttle_enabled
)
4550 metaslab_group_allocator_t
*mga
= &mg
->mg_allocator
[allocator
];
4551 (void) zfs_refcount_remove(&mga
->mga_alloc_queue_depth
, tag
);
4553 metaslab_group_increment_qdepth(mg
, allocator
);
4557 metaslab_group_alloc_verify(spa_t
*spa
, const blkptr_t
*bp
, void *tag
,
4561 const dva_t
*dva
= bp
->blk_dva
;
4562 int ndvas
= BP_GET_NDVAS(bp
);
4564 for (int d
= 0; d
< ndvas
; d
++) {
4565 uint64_t vdev
= DVA_GET_VDEV(&dva
[d
]);
4566 metaslab_group_t
*mg
= vdev_lookup_top(spa
, vdev
)->vdev_mg
;
4567 metaslab_group_allocator_t
*mga
= &mg
->mg_allocator
[allocator
];
4568 VERIFY(zfs_refcount_not_held(&mga
->mga_alloc_queue_depth
, tag
));
4574 metaslab_block_alloc(metaslab_t
*msp
, uint64_t size
, uint64_t txg
)
4577 range_tree_t
*rt
= msp
->ms_allocatable
;
4578 metaslab_class_t
*mc
= msp
->ms_group
->mg_class
;
4580 ASSERT(MUTEX_HELD(&msp
->ms_lock
));
4581 VERIFY(!msp
->ms_condensing
);
4582 VERIFY0(msp
->ms_disabled
);
4584 start
= mc
->mc_ops
->msop_alloc(msp
, size
);
4585 if (start
!= -1ULL) {
4586 metaslab_group_t
*mg
= msp
->ms_group
;
4587 vdev_t
*vd
= mg
->mg_vd
;
4589 VERIFY0(P2PHASE(start
, 1ULL << vd
->vdev_ashift
));
4590 VERIFY0(P2PHASE(size
, 1ULL << vd
->vdev_ashift
));
4591 VERIFY3U(range_tree_space(rt
) - size
, <=, msp
->ms_size
);
4592 range_tree_remove(rt
, start
, size
);
4593 range_tree_clear(msp
->ms_trim
, start
, size
);
4595 if (range_tree_is_empty(msp
->ms_allocating
[txg
& TXG_MASK
]))
4596 vdev_dirty(mg
->mg_vd
, VDD_METASLAB
, msp
, txg
);
4598 range_tree_add(msp
->ms_allocating
[txg
& TXG_MASK
], start
, size
);
4599 msp
->ms_allocating_total
+= size
;
4601 /* Track the last successful allocation */
4602 msp
->ms_alloc_txg
= txg
;
4603 metaslab_verify_space(msp
, txg
);
4607 * Now that we've attempted the allocation we need to update the
4608 * metaslab's maximum block size since it may have changed.
4610 msp
->ms_max_size
= metaslab_largest_allocatable(msp
);
4615 * Find the metaslab with the highest weight that is less than what we've
4616 * already tried. In the common case, this means that we will examine each
4617 * metaslab at most once. Note that concurrent callers could reorder metaslabs
4618 * by activation/passivation once we have dropped the mg_lock. If a metaslab is
4619 * activated by another thread, and we fail to allocate from the metaslab we
4620 * have selected, we may not try the newly-activated metaslab, and instead
4621 * activate another metaslab. This is not optimal, but generally does not cause
4622 * any problems (a possible exception being if every metaslab is completely full
4623 * except for the newly-activated metaslab which we fail to examine).
4626 find_valid_metaslab(metaslab_group_t
*mg
, uint64_t activation_weight
,
4627 dva_t
*dva
, int d
, boolean_t want_unique
, uint64_t asize
, int allocator
,
4628 boolean_t try_hard
, zio_alloc_list_t
*zal
, metaslab_t
*search
,
4629 boolean_t
*was_active
)
4632 avl_tree_t
*t
= &mg
->mg_metaslab_tree
;
4633 metaslab_t
*msp
= avl_find(t
, search
, &idx
);
4635 msp
= avl_nearest(t
, idx
, AVL_AFTER
);
4637 for (; msp
!= NULL
; msp
= AVL_NEXT(t
, msp
)) {
4639 if (!metaslab_should_allocate(msp
, asize
, try_hard
)) {
4640 metaslab_trace_add(zal
, mg
, msp
, asize
, d
,
4641 TRACE_TOO_SMALL
, allocator
);
4646 * If the selected metaslab is condensing or disabled,
4649 if (msp
->ms_condensing
|| msp
->ms_disabled
> 0)
4652 *was_active
= msp
->ms_allocator
!= -1;
4654 * If we're activating as primary, this is our first allocation
4655 * from this disk, so we don't need to check how close we are.
4656 * If the metaslab under consideration was already active,
4657 * we're getting desperate enough to steal another allocator's
4658 * metaslab, so we still don't care about distances.
4660 if (activation_weight
== METASLAB_WEIGHT_PRIMARY
|| *was_active
)
4663 for (i
= 0; i
< d
; i
++) {
4665 !metaslab_is_unique(msp
, &dva
[i
]))
4666 break; /* try another metaslab */
4673 search
->ms_weight
= msp
->ms_weight
;
4674 search
->ms_start
= msp
->ms_start
+ 1;
4675 search
->ms_allocator
= msp
->ms_allocator
;
4676 search
->ms_primary
= msp
->ms_primary
;
4682 metaslab_active_mask_verify(metaslab_t
*msp
)
4684 ASSERT(MUTEX_HELD(&msp
->ms_lock
));
4686 if ((zfs_flags
& ZFS_DEBUG_METASLAB_VERIFY
) == 0)
4689 if ((msp
->ms_weight
& METASLAB_ACTIVE_MASK
) == 0)
4692 if (msp
->ms_weight
& METASLAB_WEIGHT_PRIMARY
) {
4693 VERIFY0(msp
->ms_weight
& METASLAB_WEIGHT_SECONDARY
);
4694 VERIFY0(msp
->ms_weight
& METASLAB_WEIGHT_CLAIM
);
4695 VERIFY3S(msp
->ms_allocator
, !=, -1);
4696 VERIFY(msp
->ms_primary
);
4700 if (msp
->ms_weight
& METASLAB_WEIGHT_SECONDARY
) {
4701 VERIFY0(msp
->ms_weight
& METASLAB_WEIGHT_PRIMARY
);
4702 VERIFY0(msp
->ms_weight
& METASLAB_WEIGHT_CLAIM
);
4703 VERIFY3S(msp
->ms_allocator
, !=, -1);
4704 VERIFY(!msp
->ms_primary
);
4708 if (msp
->ms_weight
& METASLAB_WEIGHT_CLAIM
) {
4709 VERIFY0(msp
->ms_weight
& METASLAB_WEIGHT_PRIMARY
);
4710 VERIFY0(msp
->ms_weight
& METASLAB_WEIGHT_SECONDARY
);
4711 VERIFY3S(msp
->ms_allocator
, ==, -1);
4718 metaslab_group_alloc_normal(metaslab_group_t
*mg
, zio_alloc_list_t
*zal
,
4719 uint64_t asize
, uint64_t txg
, boolean_t want_unique
, dva_t
*dva
, int d
,
4720 int allocator
, boolean_t try_hard
)
4722 metaslab_t
*msp
= NULL
;
4723 uint64_t offset
= -1ULL;
4725 uint64_t activation_weight
= METASLAB_WEIGHT_PRIMARY
;
4726 for (int i
= 0; i
< d
; i
++) {
4727 if (activation_weight
== METASLAB_WEIGHT_PRIMARY
&&
4728 DVA_GET_VDEV(&dva
[i
]) == mg
->mg_vd
->vdev_id
) {
4729 activation_weight
= METASLAB_WEIGHT_SECONDARY
;
4730 } else if (activation_weight
== METASLAB_WEIGHT_SECONDARY
&&
4731 DVA_GET_VDEV(&dva
[i
]) == mg
->mg_vd
->vdev_id
) {
4732 activation_weight
= METASLAB_WEIGHT_CLAIM
;
4738 * If we don't have enough metaslabs active to fill the entire array, we
4739 * just use the 0th slot.
4741 if (mg
->mg_ms_ready
< mg
->mg_allocators
* 3)
4743 metaslab_group_allocator_t
*mga
= &mg
->mg_allocator
[allocator
];
4745 ASSERT3U(mg
->mg_vd
->vdev_ms_count
, >=, 2);
4747 metaslab_t
*search
= kmem_alloc(sizeof (*search
), KM_SLEEP
);
4748 search
->ms_weight
= UINT64_MAX
;
4749 search
->ms_start
= 0;
4751 * At the end of the metaslab tree are the already-active metaslabs,
4752 * first the primaries, then the secondaries. When we resume searching
4753 * through the tree, we need to consider ms_allocator and ms_primary so
4754 * we start in the location right after where we left off, and don't
4755 * accidentally loop forever considering the same metaslabs.
4757 search
->ms_allocator
= -1;
4758 search
->ms_primary
= B_TRUE
;
4760 boolean_t was_active
= B_FALSE
;
4762 mutex_enter(&mg
->mg_lock
);
4764 if (activation_weight
== METASLAB_WEIGHT_PRIMARY
&&
4765 mga
->mga_primary
!= NULL
) {
4766 msp
= mga
->mga_primary
;
4769 * Even though we don't hold the ms_lock for the
4770 * primary metaslab, those fields should not
4771 * change while we hold the mg_lock. Thus it is
4772 * safe to make assertions on them.
4774 ASSERT(msp
->ms_primary
);
4775 ASSERT3S(msp
->ms_allocator
, ==, allocator
);
4776 ASSERT(msp
->ms_loaded
);
4778 was_active
= B_TRUE
;
4779 ASSERT(msp
->ms_weight
& METASLAB_ACTIVE_MASK
);
4780 } else if (activation_weight
== METASLAB_WEIGHT_SECONDARY
&&
4781 mga
->mga_secondary
!= NULL
) {
4782 msp
= mga
->mga_secondary
;
4785 * See comment above about the similar assertions
4786 * for the primary metaslab.
4788 ASSERT(!msp
->ms_primary
);
4789 ASSERT3S(msp
->ms_allocator
, ==, allocator
);
4790 ASSERT(msp
->ms_loaded
);
4792 was_active
= B_TRUE
;
4793 ASSERT(msp
->ms_weight
& METASLAB_ACTIVE_MASK
);
4795 msp
= find_valid_metaslab(mg
, activation_weight
, dva
, d
,
4796 want_unique
, asize
, allocator
, try_hard
, zal
,
4797 search
, &was_active
);
4800 mutex_exit(&mg
->mg_lock
);
4802 kmem_free(search
, sizeof (*search
));
4805 mutex_enter(&msp
->ms_lock
);
4807 metaslab_active_mask_verify(msp
);
4810 * This code is disabled out because of issues with
4811 * tracepoints in non-gpl kernel modules.
4814 DTRACE_PROBE3(ms__activation__attempt
,
4815 metaslab_t
*, msp
, uint64_t, activation_weight
,
4816 boolean_t
, was_active
);
4820 * Ensure that the metaslab we have selected is still
4821 * capable of handling our request. It's possible that
4822 * another thread may have changed the weight while we
4823 * were blocked on the metaslab lock. We check the
4824 * active status first to see if we need to set_selected_txg
4827 if (was_active
&& !(msp
->ms_weight
& METASLAB_ACTIVE_MASK
)) {
4828 ASSERT3S(msp
->ms_allocator
, ==, -1);
4829 mutex_exit(&msp
->ms_lock
);
4834 * If the metaslab was activated for another allocator
4835 * while we were waiting in the ms_lock above, or it's
4836 * a primary and we're seeking a secondary (or vice versa),
4837 * we go back and select a new metaslab.
4839 if (!was_active
&& (msp
->ms_weight
& METASLAB_ACTIVE_MASK
) &&
4840 (msp
->ms_allocator
!= -1) &&
4841 (msp
->ms_allocator
!= allocator
|| ((activation_weight
==
4842 METASLAB_WEIGHT_PRIMARY
) != msp
->ms_primary
))) {
4843 ASSERT(msp
->ms_loaded
);
4844 ASSERT((msp
->ms_weight
& METASLAB_WEIGHT_CLAIM
) ||
4845 msp
->ms_allocator
!= -1);
4846 mutex_exit(&msp
->ms_lock
);
4851 * This metaslab was used for claiming regions allocated
4852 * by the ZIL during pool import. Once these regions are
4853 * claimed we don't need to keep the CLAIM bit set
4854 * anymore. Passivate this metaslab to zero its activation
4857 if (msp
->ms_weight
& METASLAB_WEIGHT_CLAIM
&&
4858 activation_weight
!= METASLAB_WEIGHT_CLAIM
) {
4859 ASSERT(msp
->ms_loaded
);
4860 ASSERT3S(msp
->ms_allocator
, ==, -1);
4861 metaslab_passivate(msp
, msp
->ms_weight
&
4862 ~METASLAB_WEIGHT_CLAIM
);
4863 mutex_exit(&msp
->ms_lock
);
4867 metaslab_set_selected_txg(msp
, txg
);
4869 int activation_error
=
4870 metaslab_activate(msp
, allocator
, activation_weight
);
4871 metaslab_active_mask_verify(msp
);
4874 * If the metaslab was activated by another thread for
4875 * another allocator or activation_weight (EBUSY), or it
4876 * failed because another metaslab was assigned as primary
4877 * for this allocator (EEXIST) we continue using this
4878 * metaslab for our allocation, rather than going on to a
4879 * worse metaslab (we waited for that metaslab to be loaded
4882 * If the activation failed due to an I/O error or ENOSPC we
4883 * skip to the next metaslab.
4885 boolean_t activated
;
4886 if (activation_error
== 0) {
4888 } else if (activation_error
== EBUSY
||
4889 activation_error
== EEXIST
) {
4890 activated
= B_FALSE
;
4892 mutex_exit(&msp
->ms_lock
);
4895 ASSERT(msp
->ms_loaded
);
4898 * Now that we have the lock, recheck to see if we should
4899 * continue to use this metaslab for this allocation. The
4900 * the metaslab is now loaded so metaslab_should_allocate()
4901 * can accurately determine if the allocation attempt should
4904 if (!metaslab_should_allocate(msp
, asize
, try_hard
)) {
4905 /* Passivate this metaslab and select a new one. */
4906 metaslab_trace_add(zal
, mg
, msp
, asize
, d
,
4907 TRACE_TOO_SMALL
, allocator
);
4912 * If this metaslab is currently condensing then pick again
4913 * as we can't manipulate this metaslab until it's committed
4914 * to disk. If this metaslab is being initialized, we shouldn't
4915 * allocate from it since the allocated region might be
4916 * overwritten after allocation.
4918 if (msp
->ms_condensing
) {
4919 metaslab_trace_add(zal
, mg
, msp
, asize
, d
,
4920 TRACE_CONDENSING
, allocator
);
4922 metaslab_passivate(msp
, msp
->ms_weight
&
4923 ~METASLAB_ACTIVE_MASK
);
4925 mutex_exit(&msp
->ms_lock
);
4927 } else if (msp
->ms_disabled
> 0) {
4928 metaslab_trace_add(zal
, mg
, msp
, asize
, d
,
4929 TRACE_DISABLED
, allocator
);
4931 metaslab_passivate(msp
, msp
->ms_weight
&
4932 ~METASLAB_ACTIVE_MASK
);
4934 mutex_exit(&msp
->ms_lock
);
4938 offset
= metaslab_block_alloc(msp
, asize
, txg
);
4939 metaslab_trace_add(zal
, mg
, msp
, asize
, d
, offset
, allocator
);
4941 if (offset
!= -1ULL) {
4942 /* Proactively passivate the metaslab, if needed */
4944 metaslab_segment_may_passivate(msp
);
4948 ASSERT(msp
->ms_loaded
);
4951 * This code is disabled out because of issues with
4952 * tracepoints in non-gpl kernel modules.
4955 DTRACE_PROBE2(ms__alloc__failure
, metaslab_t
*, msp
,
4960 * We were unable to allocate from this metaslab so determine
4961 * a new weight for this metaslab. Now that we have loaded
4962 * the metaslab we can provide a better hint to the metaslab
4965 * For space-based metaslabs, we use the maximum block size.
4966 * This information is only available when the metaslab
4967 * is loaded and is more accurate than the generic free
4968 * space weight that was calculated by metaslab_weight().
4969 * This information allows us to quickly compare the maximum
4970 * available allocation in the metaslab to the allocation
4971 * size being requested.
4973 * For segment-based metaslabs, determine the new weight
4974 * based on the highest bucket in the range tree. We
4975 * explicitly use the loaded segment weight (i.e. the range
4976 * tree histogram) since it contains the space that is
4977 * currently available for allocation and is accurate
4978 * even within a sync pass.
4981 if (WEIGHT_IS_SPACEBASED(msp
->ms_weight
)) {
4982 weight
= metaslab_largest_allocatable(msp
);
4983 WEIGHT_SET_SPACEBASED(weight
);
4985 weight
= metaslab_weight_from_range_tree(msp
);
4989 metaslab_passivate(msp
, weight
);
4992 * For the case where we use the metaslab that is
4993 * active for another allocator we want to make
4994 * sure that we retain the activation mask.
4996 * Note that we could attempt to use something like
4997 * metaslab_recalculate_weight_and_sort() that
4998 * retains the activation mask here. That function
4999 * uses metaslab_weight() to set the weight though
5000 * which is not as accurate as the calculations
5003 weight
|= msp
->ms_weight
& METASLAB_ACTIVE_MASK
;
5004 metaslab_group_sort(mg
, msp
, weight
);
5006 metaslab_active_mask_verify(msp
);
5009 * We have just failed an allocation attempt, check
5010 * that metaslab_should_allocate() agrees. Otherwise,
5011 * we may end up in an infinite loop retrying the same
5014 ASSERT(!metaslab_should_allocate(msp
, asize
, try_hard
));
5016 mutex_exit(&msp
->ms_lock
);
5018 mutex_exit(&msp
->ms_lock
);
5019 kmem_free(search
, sizeof (*search
));
5024 metaslab_group_alloc(metaslab_group_t
*mg
, zio_alloc_list_t
*zal
,
5025 uint64_t asize
, uint64_t txg
, boolean_t want_unique
, dva_t
*dva
, int d
,
5026 int allocator
, boolean_t try_hard
)
5029 ASSERT(mg
->mg_initialized
);
5031 offset
= metaslab_group_alloc_normal(mg
, zal
, asize
, txg
, want_unique
,
5032 dva
, d
, allocator
, try_hard
);
5034 mutex_enter(&mg
->mg_lock
);
5035 if (offset
== -1ULL) {
5036 mg
->mg_failed_allocations
++;
5037 metaslab_trace_add(zal
, mg
, NULL
, asize
, d
,
5038 TRACE_GROUP_FAILURE
, allocator
);
5039 if (asize
== SPA_GANGBLOCKSIZE
) {
5041 * This metaslab group was unable to allocate
5042 * the minimum gang block size so it must be out of
5043 * space. We must notify the allocation throttle
5044 * to start skipping allocation attempts to this
5045 * metaslab group until more space becomes available.
5046 * Note: this failure cannot be caused by the
5047 * allocation throttle since the allocation throttle
5048 * is only responsible for skipping devices and
5049 * not failing block allocations.
5051 mg
->mg_no_free_space
= B_TRUE
;
5054 mg
->mg_allocations
++;
5055 mutex_exit(&mg
->mg_lock
);
5060 * Allocate a block for the specified i/o.
5063 metaslab_alloc_dva(spa_t
*spa
, metaslab_class_t
*mc
, uint64_t psize
,
5064 dva_t
*dva
, int d
, dva_t
*hintdva
, uint64_t txg
, int flags
,
5065 zio_alloc_list_t
*zal
, int allocator
)
5067 metaslab_class_allocator_t
*mca
= &mc
->mc_allocator
[allocator
];
5068 metaslab_group_t
*mg
, *fast_mg
, *rotor
;
5070 boolean_t try_hard
= B_FALSE
;
5072 ASSERT(!DVA_IS_VALID(&dva
[d
]));
5075 * For testing, make some blocks above a certain size be gang blocks.
5076 * This will result in more split blocks when using device removal,
5077 * and a large number of split blocks coupled with ztest-induced
5078 * damage can result in extremely long reconstruction times. This
5079 * will also test spilling from special to normal.
5081 if (psize
>= metaslab_force_ganging
&& (spa_get_random(100) < 3)) {
5082 metaslab_trace_add(zal
, NULL
, NULL
, psize
, d
, TRACE_FORCE_GANG
,
5084 return (SET_ERROR(ENOSPC
));
5088 * Start at the rotor and loop through all mgs until we find something.
5089 * Note that there's no locking on mca_rotor or mca_aliquot because
5090 * nothing actually breaks if we miss a few updates -- we just won't
5091 * allocate quite as evenly. It all balances out over time.
5093 * If we are doing ditto or log blocks, try to spread them across
5094 * consecutive vdevs. If we're forced to reuse a vdev before we've
5095 * allocated all of our ditto blocks, then try and spread them out on
5096 * that vdev as much as possible. If it turns out to not be possible,
5097 * gradually lower our standards until anything becomes acceptable.
5098 * Also, allocating on consecutive vdevs (as opposed to random vdevs)
5099 * gives us hope of containing our fault domains to something we're
5100 * able to reason about. Otherwise, any two top-level vdev failures
5101 * will guarantee the loss of data. With consecutive allocation,
5102 * only two adjacent top-level vdev failures will result in data loss.
5104 * If we are doing gang blocks (hintdva is non-NULL), try to keep
5105 * ourselves on the same vdev as our gang block header. That
5106 * way, we can hope for locality in vdev_cache, plus it makes our
5107 * fault domains something tractable.
5110 vd
= vdev_lookup_top(spa
, DVA_GET_VDEV(&hintdva
[d
]));
5113 * It's possible the vdev we're using as the hint no
5114 * longer exists or its mg has been closed (e.g. by
5115 * device removal). Consult the rotor when
5118 if (vd
!= NULL
&& vd
->vdev_mg
!= NULL
) {
5121 if (flags
& METASLAB_HINTBP_AVOID
&&
5122 mg
->mg_next
!= NULL
)
5125 mg
= mca
->mca_rotor
;
5127 } else if (d
!= 0) {
5128 vd
= vdev_lookup_top(spa
, DVA_GET_VDEV(&dva
[d
- 1]));
5129 mg
= vd
->vdev_mg
->mg_next
;
5130 } else if (flags
& METASLAB_FASTWRITE
) {
5131 mg
= fast_mg
= mca
->mca_rotor
;
5134 if (fast_mg
->mg_vd
->vdev_pending_fastwrite
<
5135 mg
->mg_vd
->vdev_pending_fastwrite
)
5137 } while ((fast_mg
= fast_mg
->mg_next
) != mca
->mca_rotor
);
5140 ASSERT(mca
->mca_rotor
!= NULL
);
5141 mg
= mca
->mca_rotor
;
5145 * If the hint put us into the wrong metaslab class, or into a
5146 * metaslab group that has been passivated, just follow the rotor.
5148 if (mg
->mg_class
!= mc
|| mg
->mg_activation_count
<= 0)
5149 mg
= mca
->mca_rotor
;
5154 boolean_t allocatable
;
5156 ASSERT(mg
->mg_activation_count
== 1);
5160 * Don't allocate from faulted devices.
5163 spa_config_enter(spa
, SCL_ZIO
, FTAG
, RW_READER
);
5164 allocatable
= vdev_allocatable(vd
);
5165 spa_config_exit(spa
, SCL_ZIO
, FTAG
);
5167 allocatable
= vdev_allocatable(vd
);
5171 * Determine if the selected metaslab group is eligible
5172 * for allocations. If we're ganging then don't allow
5173 * this metaslab group to skip allocations since that would
5174 * inadvertently return ENOSPC and suspend the pool
5175 * even though space is still available.
5177 if (allocatable
&& !GANG_ALLOCATION(flags
) && !try_hard
) {
5178 allocatable
= metaslab_group_allocatable(mg
, rotor
,
5179 psize
, allocator
, d
);
5183 metaslab_trace_add(zal
, mg
, NULL
, psize
, d
,
5184 TRACE_NOT_ALLOCATABLE
, allocator
);
5188 ASSERT(mg
->mg_initialized
);
5191 * Avoid writing single-copy data to a failing,
5192 * non-redundant vdev, unless we've already tried all
5195 if ((vd
->vdev_stat
.vs_write_errors
> 0 ||
5196 vd
->vdev_state
< VDEV_STATE_HEALTHY
) &&
5197 d
== 0 && !try_hard
&& vd
->vdev_children
== 0) {
5198 metaslab_trace_add(zal
, mg
, NULL
, psize
, d
,
5199 TRACE_VDEV_ERROR
, allocator
);
5203 ASSERT(mg
->mg_class
== mc
);
5205 uint64_t asize
= vdev_psize_to_asize(vd
, psize
);
5206 ASSERT(P2PHASE(asize
, 1ULL << vd
->vdev_ashift
) == 0);
5209 * If we don't need to try hard, then require that the
5210 * block be on a different metaslab from any other DVAs
5211 * in this BP (unique=true). If we are trying hard, then
5212 * allow any metaslab to be used (unique=false).
5214 uint64_t offset
= metaslab_group_alloc(mg
, zal
, asize
, txg
,
5215 !try_hard
, dva
, d
, allocator
, try_hard
);
5217 if (offset
!= -1ULL) {
5219 * If we've just selected this metaslab group,
5220 * figure out whether the corresponding vdev is
5221 * over- or under-used relative to the pool,
5222 * and set an allocation bias to even it out.
5224 * Bias is also used to compensate for unequally
5225 * sized vdevs so that space is allocated fairly.
5227 if (mca
->mca_aliquot
== 0 && metaslab_bias_enabled
) {
5228 vdev_stat_t
*vs
= &vd
->vdev_stat
;
5229 int64_t vs_free
= vs
->vs_space
- vs
->vs_alloc
;
5230 int64_t mc_free
= mc
->mc_space
- mc
->mc_alloc
;
5234 * Calculate how much more or less we should
5235 * try to allocate from this device during
5236 * this iteration around the rotor.
5238 * This basically introduces a zero-centered
5239 * bias towards the devices with the most
5240 * free space, while compensating for vdev
5244 * vdev V1 = 16M/128M
5245 * vdev V2 = 16M/128M
5246 * ratio(V1) = 100% ratio(V2) = 100%
5248 * vdev V1 = 16M/128M
5249 * vdev V2 = 64M/128M
5250 * ratio(V1) = 127% ratio(V2) = 72%
5252 * vdev V1 = 16M/128M
5253 * vdev V2 = 64M/512M
5254 * ratio(V1) = 40% ratio(V2) = 160%
5256 ratio
= (vs_free
* mc
->mc_alloc_groups
* 100) /
5258 mg
->mg_bias
= ((ratio
- 100) *
5259 (int64_t)mg
->mg_aliquot
) / 100;
5260 } else if (!metaslab_bias_enabled
) {
5264 if ((flags
& METASLAB_FASTWRITE
) ||
5265 atomic_add_64_nv(&mca
->mca_aliquot
, asize
) >=
5266 mg
->mg_aliquot
+ mg
->mg_bias
) {
5267 mca
->mca_rotor
= mg
->mg_next
;
5268 mca
->mca_aliquot
= 0;
5271 DVA_SET_VDEV(&dva
[d
], vd
->vdev_id
);
5272 DVA_SET_OFFSET(&dva
[d
], offset
);
5273 DVA_SET_GANG(&dva
[d
],
5274 ((flags
& METASLAB_GANG_HEADER
) ? 1 : 0));
5275 DVA_SET_ASIZE(&dva
[d
], asize
);
5277 if (flags
& METASLAB_FASTWRITE
) {
5278 atomic_add_64(&vd
->vdev_pending_fastwrite
,
5285 mca
->mca_rotor
= mg
->mg_next
;
5286 mca
->mca_aliquot
= 0;
5287 } while ((mg
= mg
->mg_next
) != rotor
);
5290 * If we haven't tried hard, do so now.
5297 bzero(&dva
[d
], sizeof (dva_t
));
5299 metaslab_trace_add(zal
, rotor
, NULL
, psize
, d
, TRACE_ENOSPC
, allocator
);
5300 return (SET_ERROR(ENOSPC
));
5304 metaslab_free_concrete(vdev_t
*vd
, uint64_t offset
, uint64_t asize
,
5305 boolean_t checkpoint
)
5308 spa_t
*spa
= vd
->vdev_spa
;
5310 ASSERT(vdev_is_concrete(vd
));
5311 ASSERT3U(spa_config_held(spa
, SCL_ALL
, RW_READER
), !=, 0);
5312 ASSERT3U(offset
>> vd
->vdev_ms_shift
, <, vd
->vdev_ms_count
);
5314 msp
= vd
->vdev_ms
[offset
>> vd
->vdev_ms_shift
];
5316 VERIFY(!msp
->ms_condensing
);
5317 VERIFY3U(offset
, >=, msp
->ms_start
);
5318 VERIFY3U(offset
+ asize
, <=, msp
->ms_start
+ msp
->ms_size
);
5319 VERIFY0(P2PHASE(offset
, 1ULL << vd
->vdev_ashift
));
5320 VERIFY0(P2PHASE(asize
, 1ULL << vd
->vdev_ashift
));
5322 metaslab_check_free_impl(vd
, offset
, asize
);
5324 mutex_enter(&msp
->ms_lock
);
5325 if (range_tree_is_empty(msp
->ms_freeing
) &&
5326 range_tree_is_empty(msp
->ms_checkpointing
)) {
5327 vdev_dirty(vd
, VDD_METASLAB
, msp
, spa_syncing_txg(spa
));
5331 ASSERT(spa_has_checkpoint(spa
));
5332 range_tree_add(msp
->ms_checkpointing
, offset
, asize
);
5334 range_tree_add(msp
->ms_freeing
, offset
, asize
);
5336 mutex_exit(&msp
->ms_lock
);
5341 metaslab_free_impl_cb(uint64_t inner_offset
, vdev_t
*vd
, uint64_t offset
,
5342 uint64_t size
, void *arg
)
5344 boolean_t
*checkpoint
= arg
;
5346 ASSERT3P(checkpoint
, !=, NULL
);
5348 if (vd
->vdev_ops
->vdev_op_remap
!= NULL
)
5349 vdev_indirect_mark_obsolete(vd
, offset
, size
);
5351 metaslab_free_impl(vd
, offset
, size
, *checkpoint
);
5355 metaslab_free_impl(vdev_t
*vd
, uint64_t offset
, uint64_t size
,
5356 boolean_t checkpoint
)
5358 spa_t
*spa
= vd
->vdev_spa
;
5360 ASSERT3U(spa_config_held(spa
, SCL_ALL
, RW_READER
), !=, 0);
5362 if (spa_syncing_txg(spa
) > spa_freeze_txg(spa
))
5365 if (spa
->spa_vdev_removal
!= NULL
&&
5366 spa
->spa_vdev_removal
->svr_vdev_id
== vd
->vdev_id
&&
5367 vdev_is_concrete(vd
)) {
5369 * Note: we check if the vdev is concrete because when
5370 * we complete the removal, we first change the vdev to be
5371 * an indirect vdev (in open context), and then (in syncing
5372 * context) clear spa_vdev_removal.
5374 free_from_removing_vdev(vd
, offset
, size
);
5375 } else if (vd
->vdev_ops
->vdev_op_remap
!= NULL
) {
5376 vdev_indirect_mark_obsolete(vd
, offset
, size
);
5377 vd
->vdev_ops
->vdev_op_remap(vd
, offset
, size
,
5378 metaslab_free_impl_cb
, &checkpoint
);
5380 metaslab_free_concrete(vd
, offset
, size
, checkpoint
);
5384 typedef struct remap_blkptr_cb_arg
{
5386 spa_remap_cb_t rbca_cb
;
5387 vdev_t
*rbca_remap_vd
;
5388 uint64_t rbca_remap_offset
;
5390 } remap_blkptr_cb_arg_t
;
5393 remap_blkptr_cb(uint64_t inner_offset
, vdev_t
*vd
, uint64_t offset
,
5394 uint64_t size
, void *arg
)
5396 remap_blkptr_cb_arg_t
*rbca
= arg
;
5397 blkptr_t
*bp
= rbca
->rbca_bp
;
5399 /* We can not remap split blocks. */
5400 if (size
!= DVA_GET_ASIZE(&bp
->blk_dva
[0]))
5402 ASSERT0(inner_offset
);
5404 if (rbca
->rbca_cb
!= NULL
) {
5406 * At this point we know that we are not handling split
5407 * blocks and we invoke the callback on the previous
5408 * vdev which must be indirect.
5410 ASSERT3P(rbca
->rbca_remap_vd
->vdev_ops
, ==, &vdev_indirect_ops
);
5412 rbca
->rbca_cb(rbca
->rbca_remap_vd
->vdev_id
,
5413 rbca
->rbca_remap_offset
, size
, rbca
->rbca_cb_arg
);
5415 /* set up remap_blkptr_cb_arg for the next call */
5416 rbca
->rbca_remap_vd
= vd
;
5417 rbca
->rbca_remap_offset
= offset
;
5421 * The phys birth time is that of dva[0]. This ensures that we know
5422 * when each dva was written, so that resilver can determine which
5423 * blocks need to be scrubbed (i.e. those written during the time
5424 * the vdev was offline). It also ensures that the key used in
5425 * the ARC hash table is unique (i.e. dva[0] + phys_birth). If
5426 * we didn't change the phys_birth, a lookup in the ARC for a
5427 * remapped BP could find the data that was previously stored at
5428 * this vdev + offset.
5430 vdev_t
*oldvd
= vdev_lookup_top(vd
->vdev_spa
,
5431 DVA_GET_VDEV(&bp
->blk_dva
[0]));
5432 vdev_indirect_births_t
*vib
= oldvd
->vdev_indirect_births
;
5433 bp
->blk_phys_birth
= vdev_indirect_births_physbirth(vib
,
5434 DVA_GET_OFFSET(&bp
->blk_dva
[0]), DVA_GET_ASIZE(&bp
->blk_dva
[0]));
5436 DVA_SET_VDEV(&bp
->blk_dva
[0], vd
->vdev_id
);
5437 DVA_SET_OFFSET(&bp
->blk_dva
[0], offset
);
5441 * If the block pointer contains any indirect DVAs, modify them to refer to
5442 * concrete DVAs. Note that this will sometimes not be possible, leaving
5443 * the indirect DVA in place. This happens if the indirect DVA spans multiple
5444 * segments in the mapping (i.e. it is a "split block").
5446 * If the BP was remapped, calls the callback on the original dva (note the
5447 * callback can be called multiple times if the original indirect DVA refers
5448 * to another indirect DVA, etc).
5450 * Returns TRUE if the BP was remapped.
5453 spa_remap_blkptr(spa_t
*spa
, blkptr_t
*bp
, spa_remap_cb_t callback
, void *arg
)
5455 remap_blkptr_cb_arg_t rbca
;
5457 if (!zfs_remap_blkptr_enable
)
5460 if (!spa_feature_is_enabled(spa
, SPA_FEATURE_OBSOLETE_COUNTS
))
5464 * Dedup BP's can not be remapped, because ddt_phys_select() depends
5465 * on DVA[0] being the same in the BP as in the DDT (dedup table).
5467 if (BP_GET_DEDUP(bp
))
5471 * Gang blocks can not be remapped, because
5472 * zio_checksum_gang_verifier() depends on the DVA[0] that's in
5473 * the BP used to read the gang block header (GBH) being the same
5474 * as the DVA[0] that we allocated for the GBH.
5480 * Embedded BP's have no DVA to remap.
5482 if (BP_GET_NDVAS(bp
) < 1)
5486 * Note: we only remap dva[0]. If we remapped other dvas, we
5487 * would no longer know what their phys birth txg is.
5489 dva_t
*dva
= &bp
->blk_dva
[0];
5491 uint64_t offset
= DVA_GET_OFFSET(dva
);
5492 uint64_t size
= DVA_GET_ASIZE(dva
);
5493 vdev_t
*vd
= vdev_lookup_top(spa
, DVA_GET_VDEV(dva
));
5495 if (vd
->vdev_ops
->vdev_op_remap
== NULL
)
5499 rbca
.rbca_cb
= callback
;
5500 rbca
.rbca_remap_vd
= vd
;
5501 rbca
.rbca_remap_offset
= offset
;
5502 rbca
.rbca_cb_arg
= arg
;
5505 * remap_blkptr_cb() will be called in order for each level of
5506 * indirection, until a concrete vdev is reached or a split block is
5507 * encountered. old_vd and old_offset are updated within the callback
5508 * as we go from the one indirect vdev to the next one (either concrete
5509 * or indirect again) in that order.
5511 vd
->vdev_ops
->vdev_op_remap(vd
, offset
, size
, remap_blkptr_cb
, &rbca
);
5513 /* Check if the DVA wasn't remapped because it is a split block */
5514 if (DVA_GET_VDEV(&rbca
.rbca_bp
->blk_dva
[0]) == vd
->vdev_id
)
5521 * Undo the allocation of a DVA which happened in the given transaction group.
5524 metaslab_unalloc_dva(spa_t
*spa
, const dva_t
*dva
, uint64_t txg
)
5528 uint64_t vdev
= DVA_GET_VDEV(dva
);
5529 uint64_t offset
= DVA_GET_OFFSET(dva
);
5530 uint64_t size
= DVA_GET_ASIZE(dva
);
5532 ASSERT(DVA_IS_VALID(dva
));
5533 ASSERT3U(spa_config_held(spa
, SCL_ALL
, RW_READER
), !=, 0);
5535 if (txg
> spa_freeze_txg(spa
))
5538 if ((vd
= vdev_lookup_top(spa
, vdev
)) == NULL
|| !DVA_IS_VALID(dva
) ||
5539 (offset
>> vd
->vdev_ms_shift
) >= vd
->vdev_ms_count
) {
5540 zfs_panic_recover("metaslab_free_dva(): bad DVA %llu:%llu:%llu",
5541 (u_longlong_t
)vdev
, (u_longlong_t
)offset
,
5542 (u_longlong_t
)size
);
5546 ASSERT(!vd
->vdev_removing
);
5547 ASSERT(vdev_is_concrete(vd
));
5548 ASSERT0(vd
->vdev_indirect_config
.vic_mapping_object
);
5549 ASSERT3P(vd
->vdev_indirect_mapping
, ==, NULL
);
5551 if (DVA_GET_GANG(dva
))
5552 size
= vdev_psize_to_asize(vd
, SPA_GANGBLOCKSIZE
);
5554 msp
= vd
->vdev_ms
[offset
>> vd
->vdev_ms_shift
];
5556 mutex_enter(&msp
->ms_lock
);
5557 range_tree_remove(msp
->ms_allocating
[txg
& TXG_MASK
],
5559 msp
->ms_allocating_total
-= size
;
5561 VERIFY(!msp
->ms_condensing
);
5562 VERIFY3U(offset
, >=, msp
->ms_start
);
5563 VERIFY3U(offset
+ size
, <=, msp
->ms_start
+ msp
->ms_size
);
5564 VERIFY3U(range_tree_space(msp
->ms_allocatable
) + size
, <=,
5566 VERIFY0(P2PHASE(offset
, 1ULL << vd
->vdev_ashift
));
5567 VERIFY0(P2PHASE(size
, 1ULL << vd
->vdev_ashift
));
5568 range_tree_add(msp
->ms_allocatable
, offset
, size
);
5569 mutex_exit(&msp
->ms_lock
);
5573 * Free the block represented by the given DVA.
5576 metaslab_free_dva(spa_t
*spa
, const dva_t
*dva
, boolean_t checkpoint
)
5578 uint64_t vdev
= DVA_GET_VDEV(dva
);
5579 uint64_t offset
= DVA_GET_OFFSET(dva
);
5580 uint64_t size
= DVA_GET_ASIZE(dva
);
5581 vdev_t
*vd
= vdev_lookup_top(spa
, vdev
);
5583 ASSERT(DVA_IS_VALID(dva
));
5584 ASSERT3U(spa_config_held(spa
, SCL_ALL
, RW_READER
), !=, 0);
5586 if (DVA_GET_GANG(dva
)) {
5587 size
= vdev_psize_to_asize(vd
, SPA_GANGBLOCKSIZE
);
5590 metaslab_free_impl(vd
, offset
, size
, checkpoint
);
5594 * Reserve some allocation slots. The reservation system must be called
5595 * before we call into the allocator. If there aren't any available slots
5596 * then the I/O will be throttled until an I/O completes and its slots are
5597 * freed up. The function returns true if it was successful in placing
5601 metaslab_class_throttle_reserve(metaslab_class_t
*mc
, int slots
, int allocator
,
5602 zio_t
*zio
, int flags
)
5604 metaslab_class_allocator_t
*mca
= &mc
->mc_allocator
[allocator
];
5605 uint64_t available_slots
= 0;
5606 boolean_t slot_reserved
= B_FALSE
;
5607 uint64_t max
= mca
->mca_alloc_max_slots
;
5609 ASSERT(mc
->mc_alloc_throttle_enabled
);
5610 mutex_enter(&mc
->mc_lock
);
5612 uint64_t reserved_slots
= zfs_refcount_count(&mca
->mca_alloc_slots
);
5613 if (reserved_slots
< max
)
5614 available_slots
= max
- reserved_slots
;
5616 if (slots
<= available_slots
|| GANG_ALLOCATION(flags
) ||
5617 flags
& METASLAB_MUST_RESERVE
) {
5619 * We reserve the slots individually so that we can unreserve
5620 * them individually when an I/O completes.
5622 for (int d
= 0; d
< slots
; d
++)
5623 zfs_refcount_add(&mca
->mca_alloc_slots
, zio
);
5624 zio
->io_flags
|= ZIO_FLAG_IO_ALLOCATING
;
5625 slot_reserved
= B_TRUE
;
5628 mutex_exit(&mc
->mc_lock
);
5629 return (slot_reserved
);
5633 metaslab_class_throttle_unreserve(metaslab_class_t
*mc
, int slots
,
5634 int allocator
, zio_t
*zio
)
5636 metaslab_class_allocator_t
*mca
= &mc
->mc_allocator
[allocator
];
5638 ASSERT(mc
->mc_alloc_throttle_enabled
);
5639 mutex_enter(&mc
->mc_lock
);
5640 for (int d
= 0; d
< slots
; d
++)
5641 zfs_refcount_remove(&mca
->mca_alloc_slots
, zio
);
5642 mutex_exit(&mc
->mc_lock
);
5646 metaslab_claim_concrete(vdev_t
*vd
, uint64_t offset
, uint64_t size
,
5650 spa_t
*spa
= vd
->vdev_spa
;
5653 if (offset
>> vd
->vdev_ms_shift
>= vd
->vdev_ms_count
)
5654 return (SET_ERROR(ENXIO
));
5656 ASSERT3P(vd
->vdev_ms
, !=, NULL
);
5657 msp
= vd
->vdev_ms
[offset
>> vd
->vdev_ms_shift
];
5659 mutex_enter(&msp
->ms_lock
);
5661 if ((txg
!= 0 && spa_writeable(spa
)) || !msp
->ms_loaded
) {
5662 error
= metaslab_activate(msp
, 0, METASLAB_WEIGHT_CLAIM
);
5663 if (error
== EBUSY
) {
5664 ASSERT(msp
->ms_loaded
);
5665 ASSERT(msp
->ms_weight
& METASLAB_ACTIVE_MASK
);
5671 !range_tree_contains(msp
->ms_allocatable
, offset
, size
))
5672 error
= SET_ERROR(ENOENT
);
5674 if (error
|| txg
== 0) { /* txg == 0 indicates dry run */
5675 mutex_exit(&msp
->ms_lock
);
5679 VERIFY(!msp
->ms_condensing
);
5680 VERIFY0(P2PHASE(offset
, 1ULL << vd
->vdev_ashift
));
5681 VERIFY0(P2PHASE(size
, 1ULL << vd
->vdev_ashift
));
5682 VERIFY3U(range_tree_space(msp
->ms_allocatable
) - size
, <=,
5684 range_tree_remove(msp
->ms_allocatable
, offset
, size
);
5685 range_tree_clear(msp
->ms_trim
, offset
, size
);
5687 if (spa_writeable(spa
)) { /* don't dirty if we're zdb(8) */
5688 metaslab_class_t
*mc
= msp
->ms_group
->mg_class
;
5689 multilist_sublist_t
*mls
=
5690 multilist_sublist_lock_obj(mc
->mc_metaslab_txg_list
, msp
);
5691 if (!multilist_link_active(&msp
->ms_class_txg_node
)) {
5692 msp
->ms_selected_txg
= txg
;
5693 multilist_sublist_insert_head(mls
, msp
);
5695 multilist_sublist_unlock(mls
);
5697 if (range_tree_is_empty(msp
->ms_allocating
[txg
& TXG_MASK
]))
5698 vdev_dirty(vd
, VDD_METASLAB
, msp
, txg
);
5699 range_tree_add(msp
->ms_allocating
[txg
& TXG_MASK
],
5701 msp
->ms_allocating_total
+= size
;
5704 mutex_exit(&msp
->ms_lock
);
5709 typedef struct metaslab_claim_cb_arg_t
{
5712 } metaslab_claim_cb_arg_t
;
5716 metaslab_claim_impl_cb(uint64_t inner_offset
, vdev_t
*vd
, uint64_t offset
,
5717 uint64_t size
, void *arg
)
5719 metaslab_claim_cb_arg_t
*mcca_arg
= arg
;
5721 if (mcca_arg
->mcca_error
== 0) {
5722 mcca_arg
->mcca_error
= metaslab_claim_concrete(vd
, offset
,
5723 size
, mcca_arg
->mcca_txg
);
5728 metaslab_claim_impl(vdev_t
*vd
, uint64_t offset
, uint64_t size
, uint64_t txg
)
5730 if (vd
->vdev_ops
->vdev_op_remap
!= NULL
) {
5731 metaslab_claim_cb_arg_t arg
;
5734 * Only zdb(8) can claim on indirect vdevs. This is used
5735 * to detect leaks of mapped space (that are not accounted
5736 * for in the obsolete counts, spacemap, or bpobj).
5738 ASSERT(!spa_writeable(vd
->vdev_spa
));
5742 vd
->vdev_ops
->vdev_op_remap(vd
, offset
, size
,
5743 metaslab_claim_impl_cb
, &arg
);
5745 if (arg
.mcca_error
== 0) {
5746 arg
.mcca_error
= metaslab_claim_concrete(vd
,
5749 return (arg
.mcca_error
);
5751 return (metaslab_claim_concrete(vd
, offset
, size
, txg
));
5756 * Intent log support: upon opening the pool after a crash, notify the SPA
5757 * of blocks that the intent log has allocated for immediate write, but
5758 * which are still considered free by the SPA because the last transaction
5759 * group didn't commit yet.
5762 metaslab_claim_dva(spa_t
*spa
, const dva_t
*dva
, uint64_t txg
)
5764 uint64_t vdev
= DVA_GET_VDEV(dva
);
5765 uint64_t offset
= DVA_GET_OFFSET(dva
);
5766 uint64_t size
= DVA_GET_ASIZE(dva
);
5769 if ((vd
= vdev_lookup_top(spa
, vdev
)) == NULL
) {
5770 return (SET_ERROR(ENXIO
));
5773 ASSERT(DVA_IS_VALID(dva
));
5775 if (DVA_GET_GANG(dva
))
5776 size
= vdev_psize_to_asize(vd
, SPA_GANGBLOCKSIZE
);
5778 return (metaslab_claim_impl(vd
, offset
, size
, txg
));
5782 metaslab_alloc(spa_t
*spa
, metaslab_class_t
*mc
, uint64_t psize
, blkptr_t
*bp
,
5783 int ndvas
, uint64_t txg
, blkptr_t
*hintbp
, int flags
,
5784 zio_alloc_list_t
*zal
, zio_t
*zio
, int allocator
)
5786 dva_t
*dva
= bp
->blk_dva
;
5787 dva_t
*hintdva
= (hintbp
!= NULL
) ? hintbp
->blk_dva
: NULL
;
5790 ASSERT(bp
->blk_birth
== 0);
5791 ASSERT(BP_PHYSICAL_BIRTH(bp
) == 0);
5793 spa_config_enter(spa
, SCL_ALLOC
, FTAG
, RW_READER
);
5795 if (mc
->mc_allocator
[allocator
].mca_rotor
== NULL
) {
5796 /* no vdevs in this class */
5797 spa_config_exit(spa
, SCL_ALLOC
, FTAG
);
5798 return (SET_ERROR(ENOSPC
));
5801 ASSERT(ndvas
> 0 && ndvas
<= spa_max_replication(spa
));
5802 ASSERT(BP_GET_NDVAS(bp
) == 0);
5803 ASSERT(hintbp
== NULL
|| ndvas
<= BP_GET_NDVAS(hintbp
));
5804 ASSERT3P(zal
, !=, NULL
);
5806 for (int d
= 0; d
< ndvas
; d
++) {
5807 error
= metaslab_alloc_dva(spa
, mc
, psize
, dva
, d
, hintdva
,
5808 txg
, flags
, zal
, allocator
);
5810 for (d
--; d
>= 0; d
--) {
5811 metaslab_unalloc_dva(spa
, &dva
[d
], txg
);
5812 metaslab_group_alloc_decrement(spa
,
5813 DVA_GET_VDEV(&dva
[d
]), zio
, flags
,
5814 allocator
, B_FALSE
);
5815 bzero(&dva
[d
], sizeof (dva_t
));
5817 spa_config_exit(spa
, SCL_ALLOC
, FTAG
);
5821 * Update the metaslab group's queue depth
5822 * based on the newly allocated dva.
5824 metaslab_group_alloc_increment(spa
,
5825 DVA_GET_VDEV(&dva
[d
]), zio
, flags
, allocator
);
5829 ASSERT(BP_GET_NDVAS(bp
) == ndvas
);
5831 spa_config_exit(spa
, SCL_ALLOC
, FTAG
);
5833 BP_SET_BIRTH(bp
, txg
, 0);
5839 metaslab_free(spa_t
*spa
, const blkptr_t
*bp
, uint64_t txg
, boolean_t now
)
5841 const dva_t
*dva
= bp
->blk_dva
;
5842 int ndvas
= BP_GET_NDVAS(bp
);
5844 ASSERT(!BP_IS_HOLE(bp
));
5845 ASSERT(!now
|| bp
->blk_birth
>= spa_syncing_txg(spa
));
5848 * If we have a checkpoint for the pool we need to make sure that
5849 * the blocks that we free that are part of the checkpoint won't be
5850 * reused until the checkpoint is discarded or we revert to it.
5852 * The checkpoint flag is passed down the metaslab_free code path
5853 * and is set whenever we want to add a block to the checkpoint's
5854 * accounting. That is, we "checkpoint" blocks that existed at the
5855 * time the checkpoint was created and are therefore referenced by
5856 * the checkpointed uberblock.
5858 * Note that, we don't checkpoint any blocks if the current
5859 * syncing txg <= spa_checkpoint_txg. We want these frees to sync
5860 * normally as they will be referenced by the checkpointed uberblock.
5862 boolean_t checkpoint
= B_FALSE
;
5863 if (bp
->blk_birth
<= spa
->spa_checkpoint_txg
&&
5864 spa_syncing_txg(spa
) > spa
->spa_checkpoint_txg
) {
5866 * At this point, if the block is part of the checkpoint
5867 * there is no way it was created in the current txg.
5870 ASSERT3U(spa_syncing_txg(spa
), ==, txg
);
5871 checkpoint
= B_TRUE
;
5874 spa_config_enter(spa
, SCL_FREE
, FTAG
, RW_READER
);
5876 for (int d
= 0; d
< ndvas
; d
++) {
5878 metaslab_unalloc_dva(spa
, &dva
[d
], txg
);
5880 ASSERT3U(txg
, ==, spa_syncing_txg(spa
));
5881 metaslab_free_dva(spa
, &dva
[d
], checkpoint
);
5885 spa_config_exit(spa
, SCL_FREE
, FTAG
);
5889 metaslab_claim(spa_t
*spa
, const blkptr_t
*bp
, uint64_t txg
)
5891 const dva_t
*dva
= bp
->blk_dva
;
5892 int ndvas
= BP_GET_NDVAS(bp
);
5895 ASSERT(!BP_IS_HOLE(bp
));
5899 * First do a dry run to make sure all DVAs are claimable,
5900 * so we don't have to unwind from partial failures below.
5902 if ((error
= metaslab_claim(spa
, bp
, 0)) != 0)
5906 spa_config_enter(spa
, SCL_ALLOC
, FTAG
, RW_READER
);
5908 for (int d
= 0; d
< ndvas
; d
++) {
5909 error
= metaslab_claim_dva(spa
, &dva
[d
], txg
);
5914 spa_config_exit(spa
, SCL_ALLOC
, FTAG
);
5916 ASSERT(error
== 0 || txg
== 0);
5922 metaslab_fastwrite_mark(spa_t
*spa
, const blkptr_t
*bp
)
5924 const dva_t
*dva
= bp
->blk_dva
;
5925 int ndvas
= BP_GET_NDVAS(bp
);
5926 uint64_t psize
= BP_GET_PSIZE(bp
);
5930 ASSERT(!BP_IS_HOLE(bp
));
5931 ASSERT(!BP_IS_EMBEDDED(bp
));
5934 spa_config_enter(spa
, SCL_VDEV
, FTAG
, RW_READER
);
5936 for (d
= 0; d
< ndvas
; d
++) {
5937 if ((vd
= vdev_lookup_top(spa
, DVA_GET_VDEV(&dva
[d
]))) == NULL
)
5939 atomic_add_64(&vd
->vdev_pending_fastwrite
, psize
);
5942 spa_config_exit(spa
, SCL_VDEV
, FTAG
);
5946 metaslab_fastwrite_unmark(spa_t
*spa
, const blkptr_t
*bp
)
5948 const dva_t
*dva
= bp
->blk_dva
;
5949 int ndvas
= BP_GET_NDVAS(bp
);
5950 uint64_t psize
= BP_GET_PSIZE(bp
);
5954 ASSERT(!BP_IS_HOLE(bp
));
5955 ASSERT(!BP_IS_EMBEDDED(bp
));
5958 spa_config_enter(spa
, SCL_VDEV
, FTAG
, RW_READER
);
5960 for (d
= 0; d
< ndvas
; d
++) {
5961 if ((vd
= vdev_lookup_top(spa
, DVA_GET_VDEV(&dva
[d
]))) == NULL
)
5963 ASSERT3U(vd
->vdev_pending_fastwrite
, >=, psize
);
5964 atomic_sub_64(&vd
->vdev_pending_fastwrite
, psize
);
5967 spa_config_exit(spa
, SCL_VDEV
, FTAG
);
5972 metaslab_check_free_impl_cb(uint64_t inner
, vdev_t
*vd
, uint64_t offset
,
5973 uint64_t size
, void *arg
)
5975 if (vd
->vdev_ops
== &vdev_indirect_ops
)
5978 metaslab_check_free_impl(vd
, offset
, size
);
5982 metaslab_check_free_impl(vdev_t
*vd
, uint64_t offset
, uint64_t size
)
5985 spa_t
*spa __maybe_unused
= vd
->vdev_spa
;
5987 if ((zfs_flags
& ZFS_DEBUG_ZIO_FREE
) == 0)
5990 if (vd
->vdev_ops
->vdev_op_remap
!= NULL
) {
5991 vd
->vdev_ops
->vdev_op_remap(vd
, offset
, size
,
5992 metaslab_check_free_impl_cb
, NULL
);
5996 ASSERT(vdev_is_concrete(vd
));
5997 ASSERT3U(offset
>> vd
->vdev_ms_shift
, <, vd
->vdev_ms_count
);
5998 ASSERT3U(spa_config_held(spa
, SCL_ALL
, RW_READER
), !=, 0);
6000 msp
= vd
->vdev_ms
[offset
>> vd
->vdev_ms_shift
];
6002 mutex_enter(&msp
->ms_lock
);
6003 if (msp
->ms_loaded
) {
6004 range_tree_verify_not_present(msp
->ms_allocatable
,
6009 * Check all segments that currently exist in the freeing pipeline.
6011 * It would intuitively make sense to also check the current allocating
6012 * tree since metaslab_unalloc_dva() exists for extents that are
6013 * allocated and freed in the same sync pass within the same txg.
6014 * Unfortunately there are places (e.g. the ZIL) where we allocate a
6015 * segment but then we free part of it within the same txg
6016 * [see zil_sync()]. Thus, we don't call range_tree_verify() in the
6017 * current allocating tree.
6019 range_tree_verify_not_present(msp
->ms_freeing
, offset
, size
);
6020 range_tree_verify_not_present(msp
->ms_checkpointing
, offset
, size
);
6021 range_tree_verify_not_present(msp
->ms_freed
, offset
, size
);
6022 for (int j
= 0; j
< TXG_DEFER_SIZE
; j
++)
6023 range_tree_verify_not_present(msp
->ms_defer
[j
], offset
, size
);
6024 range_tree_verify_not_present(msp
->ms_trim
, offset
, size
);
6025 mutex_exit(&msp
->ms_lock
);
6029 metaslab_check_free(spa_t
*spa
, const blkptr_t
*bp
)
6031 if ((zfs_flags
& ZFS_DEBUG_ZIO_FREE
) == 0)
6034 spa_config_enter(spa
, SCL_VDEV
, FTAG
, RW_READER
);
6035 for (int i
= 0; i
< BP_GET_NDVAS(bp
); i
++) {
6036 uint64_t vdev
= DVA_GET_VDEV(&bp
->blk_dva
[i
]);
6037 vdev_t
*vd
= vdev_lookup_top(spa
, vdev
);
6038 uint64_t offset
= DVA_GET_OFFSET(&bp
->blk_dva
[i
]);
6039 uint64_t size
= DVA_GET_ASIZE(&bp
->blk_dva
[i
]);
6041 if (DVA_GET_GANG(&bp
->blk_dva
[i
]))
6042 size
= vdev_psize_to_asize(vd
, SPA_GANGBLOCKSIZE
);
6044 ASSERT3P(vd
, !=, NULL
);
6046 metaslab_check_free_impl(vd
, offset
, size
);
6048 spa_config_exit(spa
, SCL_VDEV
, FTAG
);
6052 metaslab_group_disable_wait(metaslab_group_t
*mg
)
6054 ASSERT(MUTEX_HELD(&mg
->mg_ms_disabled_lock
));
6055 while (mg
->mg_disabled_updating
) {
6056 cv_wait(&mg
->mg_ms_disabled_cv
, &mg
->mg_ms_disabled_lock
);
6061 metaslab_group_disabled_increment(metaslab_group_t
*mg
)
6063 ASSERT(MUTEX_HELD(&mg
->mg_ms_disabled_lock
));
6064 ASSERT(mg
->mg_disabled_updating
);
6066 while (mg
->mg_ms_disabled
>= max_disabled_ms
) {
6067 cv_wait(&mg
->mg_ms_disabled_cv
, &mg
->mg_ms_disabled_lock
);
6069 mg
->mg_ms_disabled
++;
6070 ASSERT3U(mg
->mg_ms_disabled
, <=, max_disabled_ms
);
6074 * Mark the metaslab as disabled to prevent any allocations on this metaslab.
6075 * We must also track how many metaslabs are currently disabled within a
6076 * metaslab group and limit them to prevent allocation failures from
6077 * occurring because all metaslabs are disabled.
6080 metaslab_disable(metaslab_t
*msp
)
6082 ASSERT(!MUTEX_HELD(&msp
->ms_lock
));
6083 metaslab_group_t
*mg
= msp
->ms_group
;
6085 mutex_enter(&mg
->mg_ms_disabled_lock
);
6088 * To keep an accurate count of how many threads have disabled
6089 * a specific metaslab group, we only allow one thread to mark
6090 * the metaslab group at a time. This ensures that the value of
6091 * ms_disabled will be accurate when we decide to mark a metaslab
6092 * group as disabled. To do this we force all other threads
6093 * to wait till the metaslab's mg_disabled_updating flag is no
6096 metaslab_group_disable_wait(mg
);
6097 mg
->mg_disabled_updating
= B_TRUE
;
6098 if (msp
->ms_disabled
== 0) {
6099 metaslab_group_disabled_increment(mg
);
6101 mutex_enter(&msp
->ms_lock
);
6103 mutex_exit(&msp
->ms_lock
);
6105 mg
->mg_disabled_updating
= B_FALSE
;
6106 cv_broadcast(&mg
->mg_ms_disabled_cv
);
6107 mutex_exit(&mg
->mg_ms_disabled_lock
);
6111 metaslab_enable(metaslab_t
*msp
, boolean_t sync
, boolean_t unload
)
6113 metaslab_group_t
*mg
= msp
->ms_group
;
6114 spa_t
*spa
= mg
->mg_vd
->vdev_spa
;
6117 * Wait for the outstanding IO to be synced to prevent newly
6118 * allocated blocks from being overwritten. This used by
6119 * initialize and TRIM which are modifying unallocated space.
6122 txg_wait_synced(spa_get_dsl(spa
), 0);
6124 mutex_enter(&mg
->mg_ms_disabled_lock
);
6125 mutex_enter(&msp
->ms_lock
);
6126 if (--msp
->ms_disabled
== 0) {
6127 mg
->mg_ms_disabled
--;
6128 cv_broadcast(&mg
->mg_ms_disabled_cv
);
6130 metaslab_unload(msp
);
6132 mutex_exit(&msp
->ms_lock
);
6133 mutex_exit(&mg
->mg_ms_disabled_lock
);
6137 metaslab_update_ondisk_flush_data(metaslab_t
*ms
, dmu_tx_t
*tx
)
6139 vdev_t
*vd
= ms
->ms_group
->mg_vd
;
6140 spa_t
*spa
= vd
->vdev_spa
;
6141 objset_t
*mos
= spa_meta_objset(spa
);
6143 ASSERT(spa_feature_is_active(spa
, SPA_FEATURE_LOG_SPACEMAP
));
6145 metaslab_unflushed_phys_t entry
= {
6146 .msp_unflushed_txg
= metaslab_unflushed_txg(ms
),
6148 uint64_t entry_size
= sizeof (entry
);
6149 uint64_t entry_offset
= ms
->ms_id
* entry_size
;
6151 uint64_t object
= 0;
6152 int err
= zap_lookup(mos
, vd
->vdev_top_zap
,
6153 VDEV_TOP_ZAP_MS_UNFLUSHED_PHYS_TXGS
, sizeof (uint64_t), 1,
6155 if (err
== ENOENT
) {
6156 object
= dmu_object_alloc(mos
, DMU_OTN_UINT64_METADATA
,
6157 SPA_OLD_MAXBLOCKSIZE
, DMU_OT_NONE
, 0, tx
);
6158 VERIFY0(zap_add(mos
, vd
->vdev_top_zap
,
6159 VDEV_TOP_ZAP_MS_UNFLUSHED_PHYS_TXGS
, sizeof (uint64_t), 1,
6165 dmu_write(spa_meta_objset(spa
), object
, entry_offset
, entry_size
,
6170 metaslab_set_unflushed_txg(metaslab_t
*ms
, uint64_t txg
, dmu_tx_t
*tx
)
6172 spa_t
*spa
= ms
->ms_group
->mg_vd
->vdev_spa
;
6174 if (!spa_feature_is_active(spa
, SPA_FEATURE_LOG_SPACEMAP
))
6177 ms
->ms_unflushed_txg
= txg
;
6178 metaslab_update_ondisk_flush_data(ms
, tx
);
6182 metaslab_unflushed_txg(metaslab_t
*ms
)
6184 return (ms
->ms_unflushed_txg
);
6187 ZFS_MODULE_PARAM(zfs_metaslab
, metaslab_
, aliquot
, ULONG
, ZMOD_RW
,
6188 "Allocation granularity (a.k.a. stripe size)");
6190 ZFS_MODULE_PARAM(zfs_metaslab
, metaslab_
, debug_load
, INT
, ZMOD_RW
,
6191 "Load all metaslabs when pool is first opened");
6193 ZFS_MODULE_PARAM(zfs_metaslab
, metaslab_
, debug_unload
, INT
, ZMOD_RW
,
6194 "Prevent metaslabs from being unloaded");
6196 ZFS_MODULE_PARAM(zfs_metaslab
, metaslab_
, preload_enabled
, INT
, ZMOD_RW
,
6197 "Preload potential metaslabs during reassessment");
6199 ZFS_MODULE_PARAM(zfs_metaslab
, metaslab_
, unload_delay
, INT
, ZMOD_RW
,
6200 "Delay in txgs after metaslab was last used before unloading");
6202 ZFS_MODULE_PARAM(zfs_metaslab
, metaslab_
, unload_delay_ms
, INT
, ZMOD_RW
,
6203 "Delay in milliseconds after metaslab was last used before unloading");
6206 ZFS_MODULE_PARAM(zfs_mg
, zfs_mg_
, noalloc_threshold
, INT
, ZMOD_RW
,
6207 "Percentage of metaslab group size that should be free to make it "
6208 "eligible for allocation");
6210 ZFS_MODULE_PARAM(zfs_mg
, zfs_mg_
, fragmentation_threshold
, INT
, ZMOD_RW
,
6211 "Percentage of metaslab group size that should be considered eligible "
6212 "for allocations unless all metaslab groups within the metaslab class "
6213 "have also crossed this threshold");
6215 ZFS_MODULE_PARAM(zfs_metaslab
, zfs_metaslab_
, fragmentation_threshold
, INT
,
6216 ZMOD_RW
, "Fragmentation for metaslab to allow allocation");
6218 ZFS_MODULE_PARAM(zfs_metaslab
, metaslab_
, fragmentation_factor_enabled
, INT
, ZMOD_RW
,
6219 "Use the fragmentation metric to prefer less fragmented metaslabs");
6222 ZFS_MODULE_PARAM(zfs_metaslab
, metaslab_
, lba_weighting_enabled
, INT
, ZMOD_RW
,
6223 "Prefer metaslabs with lower LBAs");
6225 ZFS_MODULE_PARAM(zfs_metaslab
, metaslab_
, bias_enabled
, INT
, ZMOD_RW
,
6226 "Enable metaslab group biasing");
6228 ZFS_MODULE_PARAM(zfs_metaslab
, zfs_metaslab_
, segment_weight_enabled
, INT
,
6229 ZMOD_RW
, "Enable segment-based metaslab selection");
6231 ZFS_MODULE_PARAM(zfs_metaslab
, zfs_metaslab_
, switch_threshold
, INT
, ZMOD_RW
,
6232 "Segment-based metaslab selection maximum buckets before switching");
6234 ZFS_MODULE_PARAM(zfs_metaslab
, metaslab_
, force_ganging
, ULONG
, ZMOD_RW
,
6235 "Blocks larger than this size are forced to be gang blocks");
6237 ZFS_MODULE_PARAM(zfs_metaslab
, metaslab_
, df_max_search
, INT
, ZMOD_RW
,
6238 "Max distance (bytes) to search forward before using size tree");
6240 ZFS_MODULE_PARAM(zfs_metaslab
, metaslab_
, df_use_largest_segment
, INT
, ZMOD_RW
,
6241 "When looking in size tree, use largest segment instead of exact fit");
6243 ZFS_MODULE_PARAM(zfs_metaslab
, zfs_metaslab_
, max_size_cache_sec
, ULONG
,
6244 ZMOD_RW
, "How long to trust the cached max chunk size of a metaslab");
6246 ZFS_MODULE_PARAM(zfs_metaslab
, zfs_metaslab_
, mem_limit
, INT
, ZMOD_RW
,
6247 "Percentage of memory that can be used to store metaslab range trees");