4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2011, 2019 by Delphix. All rights reserved.
24 * Copyright (c) 2013 by Saso Kiselkov. All rights reserved.
25 * Copyright (c) 2017, Intel Corporation.
28 #include <sys/zfs_context.h>
30 #include <sys/dmu_tx.h>
31 #include <sys/space_map.h>
32 #include <sys/metaslab_impl.h>
33 #include <sys/vdev_impl.h>
35 #include <sys/spa_impl.h>
36 #include <sys/zfeature.h>
37 #include <sys/vdev_indirect_mapping.h>
40 #define WITH_DF_BLOCK_ALLOCATOR
42 #define GANG_ALLOCATION(flags) \
43 ((flags) & (METASLAB_GANG_CHILD | METASLAB_GANG_HEADER))
46 * Metaslab granularity, in bytes. This is roughly similar to what would be
47 * referred to as the "stripe size" in traditional RAID arrays. In normal
48 * operation, we will try to write this amount of data to a top-level vdev
49 * before moving on to the next one.
51 unsigned long metaslab_aliquot
= 512 << 10;
54 * For testing, make some blocks above a certain size be gang blocks.
56 unsigned long metaslab_force_ganging
= SPA_MAXBLOCKSIZE
+ 1;
59 * In pools where the log space map feature is not enabled we touch
60 * multiple metaslabs (and their respective space maps) with each
61 * transaction group. Thus, we benefit from having a small space map
62 * block size since it allows us to issue more I/O operations scattered
63 * around the disk. So a sane default for the space map block size
66 int zfs_metaslab_sm_blksz_no_log
= (1 << 14);
69 * When the log space map feature is enabled, we accumulate a lot of
70 * changes per metaslab that are flushed once in a while so we benefit
71 * from a bigger block size like 128K for the metaslab space maps.
73 int zfs_metaslab_sm_blksz_with_log
= (1 << 17);
76 * The in-core space map representation is more compact than its on-disk form.
77 * The zfs_condense_pct determines how much more compact the in-core
78 * space map representation must be before we compact it on-disk.
79 * Values should be greater than or equal to 100.
81 int zfs_condense_pct
= 200;
84 * Condensing a metaslab is not guaranteed to actually reduce the amount of
85 * space used on disk. In particular, a space map uses data in increments of
86 * MAX(1 << ashift, space_map_blksz), so a metaslab might use the
87 * same number of blocks after condensing. Since the goal of condensing is to
88 * reduce the number of IOPs required to read the space map, we only want to
89 * condense when we can be sure we will reduce the number of blocks used by the
90 * space map. Unfortunately, we cannot precisely compute whether or not this is
91 * the case in metaslab_should_condense since we are holding ms_lock. Instead,
92 * we apply the following heuristic: do not condense a spacemap unless the
93 * uncondensed size consumes greater than zfs_metaslab_condense_block_threshold
96 int zfs_metaslab_condense_block_threshold
= 4;
99 * The zfs_mg_noalloc_threshold defines which metaslab groups should
100 * be eligible for allocation. The value is defined as a percentage of
101 * free space. Metaslab groups that have more free space than
102 * zfs_mg_noalloc_threshold are always eligible for allocations. Once
103 * a metaslab group's free space is less than or equal to the
104 * zfs_mg_noalloc_threshold the allocator will avoid allocating to that
105 * group unless all groups in the pool have reached zfs_mg_noalloc_threshold.
106 * Once all groups in the pool reach zfs_mg_noalloc_threshold then all
107 * groups are allowed to accept allocations. Gang blocks are always
108 * eligible to allocate on any metaslab group. The default value of 0 means
109 * no metaslab group will be excluded based on this criterion.
111 int zfs_mg_noalloc_threshold
= 0;
114 * Metaslab groups are considered eligible for allocations if their
115 * fragmenation metric (measured as a percentage) is less than or
116 * equal to zfs_mg_fragmentation_threshold. If a metaslab group
117 * exceeds this threshold then it will be skipped unless all metaslab
118 * groups within the metaslab class have also crossed this threshold.
120 * This tunable was introduced to avoid edge cases where we continue
121 * allocating from very fragmented disks in our pool while other, less
122 * fragmented disks, exists. On the other hand, if all disks in the
123 * pool are uniformly approaching the threshold, the threshold can
124 * be a speed bump in performance, where we keep switching the disks
125 * that we allocate from (e.g. we allocate some segments from disk A
126 * making it bypassing the threshold while freeing segments from disk
127 * B getting its fragmentation below the threshold).
129 * Empirically, we've seen that our vdev selection for allocations is
130 * good enough that fragmentation increases uniformly across all vdevs
131 * the majority of the time. Thus we set the threshold percentage high
132 * enough to avoid hitting the speed bump on pools that are being pushed
135 int zfs_mg_fragmentation_threshold
= 95;
138 * Allow metaslabs to keep their active state as long as their fragmentation
139 * percentage is less than or equal to zfs_metaslab_fragmentation_threshold. An
140 * active metaslab that exceeds this threshold will no longer keep its active
141 * status allowing better metaslabs to be selected.
143 int zfs_metaslab_fragmentation_threshold
= 70;
146 * When set will load all metaslabs when pool is first opened.
148 int metaslab_debug_load
= 0;
151 * When set will prevent metaslabs from being unloaded.
153 int metaslab_debug_unload
= 0;
156 * Minimum size which forces the dynamic allocator to change
157 * it's allocation strategy. Once the space map cannot satisfy
158 * an allocation of this size then it switches to using more
159 * aggressive strategy (i.e search by size rather than offset).
161 uint64_t metaslab_df_alloc_threshold
= SPA_OLD_MAXBLOCKSIZE
;
164 * The minimum free space, in percent, which must be available
165 * in a space map to continue allocations in a first-fit fashion.
166 * Once the space map's free space drops below this level we dynamically
167 * switch to using best-fit allocations.
169 int metaslab_df_free_pct
= 4;
172 * Maximum distance to search forward from the last offset. Without this
173 * limit, fragmented pools can see >100,000 iterations and
174 * metaslab_block_picker() becomes the performance limiting factor on
175 * high-performance storage.
177 * With the default setting of 16MB, we typically see less than 500
178 * iterations, even with very fragmented, ashift=9 pools. The maximum number
179 * of iterations possible is:
180 * metaslab_df_max_search / (2 * (1<<ashift))
181 * With the default setting of 16MB this is 16*1024 (with ashift=9) or
182 * 2048 (with ashift=12).
184 int metaslab_df_max_search
= 16 * 1024 * 1024;
187 * If we are not searching forward (due to metaslab_df_max_search,
188 * metaslab_df_free_pct, or metaslab_df_alloc_threshold), this tunable
189 * controls what segment is used. If it is set, we will use the largest free
190 * segment. If it is not set, we will use a segment of exactly the requested
193 int metaslab_df_use_largest_segment
= B_FALSE
;
196 * Percentage of all cpus that can be used by the metaslab taskq.
198 int metaslab_load_pct
= 50;
201 * Determines how many txgs a metaslab may remain loaded without having any
202 * allocations from it. As long as a metaslab continues to be used we will
205 int metaslab_unload_delay
= TXG_SIZE
* 2;
208 * Max number of metaslabs per group to preload.
210 int metaslab_preload_limit
= SPA_DVAS_PER_BP
;
213 * Enable/disable preloading of metaslab.
215 int metaslab_preload_enabled
= B_TRUE
;
218 * Enable/disable fragmentation weighting on metaslabs.
220 int metaslab_fragmentation_factor_enabled
= B_TRUE
;
223 * Enable/disable lba weighting (i.e. outer tracks are given preference).
225 int metaslab_lba_weighting_enabled
= B_TRUE
;
228 * Enable/disable metaslab group biasing.
230 int metaslab_bias_enabled
= B_TRUE
;
233 * Enable/disable remapping of indirect DVAs to their concrete vdevs.
235 boolean_t zfs_remap_blkptr_enable
= B_TRUE
;
238 * Enable/disable segment-based metaslab selection.
240 int zfs_metaslab_segment_weight_enabled
= B_TRUE
;
243 * When using segment-based metaslab selection, we will continue
244 * allocating from the active metaslab until we have exhausted
245 * zfs_metaslab_switch_threshold of its buckets.
247 int zfs_metaslab_switch_threshold
= 2;
250 * Internal switch to enable/disable the metaslab allocation tracing
253 #ifdef _METASLAB_TRACING
254 boolean_t metaslab_trace_enabled
= B_TRUE
;
258 * Maximum entries that the metaslab allocation tracing facility will keep
259 * in a given list when running in non-debug mode. We limit the number
260 * of entries in non-debug mode to prevent us from using up too much memory.
261 * The limit should be sufficiently large that we don't expect any allocation
262 * to every exceed this value. In debug mode, the system will panic if this
263 * limit is ever reached allowing for further investigation.
265 #ifdef _METASLAB_TRACING
266 uint64_t metaslab_trace_max_entries
= 5000;
270 * Maximum number of metaslabs per group that can be disabled
273 int max_disabled_ms
= 3;
275 static uint64_t metaslab_weight(metaslab_t
*);
276 static void metaslab_set_fragmentation(metaslab_t
*);
277 static void metaslab_free_impl(vdev_t
*, uint64_t, uint64_t, boolean_t
);
278 static void metaslab_check_free_impl(vdev_t
*, uint64_t, uint64_t);
280 static void metaslab_passivate(metaslab_t
*msp
, uint64_t weight
);
281 static uint64_t metaslab_weight_from_range_tree(metaslab_t
*msp
);
282 static void metaslab_flush_update(metaslab_t
*, dmu_tx_t
*);
283 #ifdef _METASLAB_TRACING
284 kmem_cache_t
*metaslab_alloc_trace_cache
;
288 * ==========================================================================
290 * ==========================================================================
293 metaslab_class_create(spa_t
*spa
, metaslab_ops_t
*ops
)
295 metaslab_class_t
*mc
;
297 mc
= kmem_zalloc(sizeof (metaslab_class_t
), KM_SLEEP
);
302 mutex_init(&mc
->mc_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
303 mc
->mc_alloc_slots
= kmem_zalloc(spa
->spa_alloc_count
*
304 sizeof (zfs_refcount_t
), KM_SLEEP
);
305 mc
->mc_alloc_max_slots
= kmem_zalloc(spa
->spa_alloc_count
*
306 sizeof (uint64_t), KM_SLEEP
);
307 for (int i
= 0; i
< spa
->spa_alloc_count
; i
++)
308 zfs_refcount_create_tracked(&mc
->mc_alloc_slots
[i
]);
314 metaslab_class_destroy(metaslab_class_t
*mc
)
316 ASSERT(mc
->mc_rotor
== NULL
);
317 ASSERT(mc
->mc_alloc
== 0);
318 ASSERT(mc
->mc_deferred
== 0);
319 ASSERT(mc
->mc_space
== 0);
320 ASSERT(mc
->mc_dspace
== 0);
322 for (int i
= 0; i
< mc
->mc_spa
->spa_alloc_count
; i
++)
323 zfs_refcount_destroy(&mc
->mc_alloc_slots
[i
]);
324 kmem_free(mc
->mc_alloc_slots
, mc
->mc_spa
->spa_alloc_count
*
325 sizeof (zfs_refcount_t
));
326 kmem_free(mc
->mc_alloc_max_slots
, mc
->mc_spa
->spa_alloc_count
*
328 mutex_destroy(&mc
->mc_lock
);
329 kmem_free(mc
, sizeof (metaslab_class_t
));
333 metaslab_class_validate(metaslab_class_t
*mc
)
335 metaslab_group_t
*mg
;
339 * Must hold one of the spa_config locks.
341 ASSERT(spa_config_held(mc
->mc_spa
, SCL_ALL
, RW_READER
) ||
342 spa_config_held(mc
->mc_spa
, SCL_ALL
, RW_WRITER
));
344 if ((mg
= mc
->mc_rotor
) == NULL
)
349 ASSERT(vd
->vdev_mg
!= NULL
);
350 ASSERT3P(vd
->vdev_top
, ==, vd
);
351 ASSERT3P(mg
->mg_class
, ==, mc
);
352 ASSERT3P(vd
->vdev_ops
, !=, &vdev_hole_ops
);
353 } while ((mg
= mg
->mg_next
) != mc
->mc_rotor
);
359 metaslab_class_space_update(metaslab_class_t
*mc
, int64_t alloc_delta
,
360 int64_t defer_delta
, int64_t space_delta
, int64_t dspace_delta
)
362 atomic_add_64(&mc
->mc_alloc
, alloc_delta
);
363 atomic_add_64(&mc
->mc_deferred
, defer_delta
);
364 atomic_add_64(&mc
->mc_space
, space_delta
);
365 atomic_add_64(&mc
->mc_dspace
, dspace_delta
);
369 metaslab_class_get_alloc(metaslab_class_t
*mc
)
371 return (mc
->mc_alloc
);
375 metaslab_class_get_deferred(metaslab_class_t
*mc
)
377 return (mc
->mc_deferred
);
381 metaslab_class_get_space(metaslab_class_t
*mc
)
383 return (mc
->mc_space
);
387 metaslab_class_get_dspace(metaslab_class_t
*mc
)
389 return (spa_deflate(mc
->mc_spa
) ? mc
->mc_dspace
: mc
->mc_space
);
393 metaslab_class_histogram_verify(metaslab_class_t
*mc
)
395 spa_t
*spa
= mc
->mc_spa
;
396 vdev_t
*rvd
= spa
->spa_root_vdev
;
400 if ((zfs_flags
& ZFS_DEBUG_HISTOGRAM_VERIFY
) == 0)
403 mc_hist
= kmem_zalloc(sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE
,
406 for (int c
= 0; c
< rvd
->vdev_children
; c
++) {
407 vdev_t
*tvd
= rvd
->vdev_child
[c
];
408 metaslab_group_t
*mg
= tvd
->vdev_mg
;
411 * Skip any holes, uninitialized top-levels, or
412 * vdevs that are not in this metalab class.
414 if (!vdev_is_concrete(tvd
) || tvd
->vdev_ms_shift
== 0 ||
415 mg
->mg_class
!= mc
) {
419 for (i
= 0; i
< RANGE_TREE_HISTOGRAM_SIZE
; i
++)
420 mc_hist
[i
] += mg
->mg_histogram
[i
];
423 for (i
= 0; i
< RANGE_TREE_HISTOGRAM_SIZE
; i
++)
424 VERIFY3U(mc_hist
[i
], ==, mc
->mc_histogram
[i
]);
426 kmem_free(mc_hist
, sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE
);
430 * Calculate the metaslab class's fragmentation metric. The metric
431 * is weighted based on the space contribution of each metaslab group.
432 * The return value will be a number between 0 and 100 (inclusive), or
433 * ZFS_FRAG_INVALID if the metric has not been set. See comment above the
434 * zfs_frag_table for more information about the metric.
437 metaslab_class_fragmentation(metaslab_class_t
*mc
)
439 vdev_t
*rvd
= mc
->mc_spa
->spa_root_vdev
;
440 uint64_t fragmentation
= 0;
442 spa_config_enter(mc
->mc_spa
, SCL_VDEV
, FTAG
, RW_READER
);
444 for (int c
= 0; c
< rvd
->vdev_children
; c
++) {
445 vdev_t
*tvd
= rvd
->vdev_child
[c
];
446 metaslab_group_t
*mg
= tvd
->vdev_mg
;
449 * Skip any holes, uninitialized top-levels,
450 * or vdevs that are not in this metalab class.
452 if (!vdev_is_concrete(tvd
) || tvd
->vdev_ms_shift
== 0 ||
453 mg
->mg_class
!= mc
) {
458 * If a metaslab group does not contain a fragmentation
459 * metric then just bail out.
461 if (mg
->mg_fragmentation
== ZFS_FRAG_INVALID
) {
462 spa_config_exit(mc
->mc_spa
, SCL_VDEV
, FTAG
);
463 return (ZFS_FRAG_INVALID
);
467 * Determine how much this metaslab_group is contributing
468 * to the overall pool fragmentation metric.
470 fragmentation
+= mg
->mg_fragmentation
*
471 metaslab_group_get_space(mg
);
473 fragmentation
/= metaslab_class_get_space(mc
);
475 ASSERT3U(fragmentation
, <=, 100);
476 spa_config_exit(mc
->mc_spa
, SCL_VDEV
, FTAG
);
477 return (fragmentation
);
481 * Calculate the amount of expandable space that is available in
482 * this metaslab class. If a device is expanded then its expandable
483 * space will be the amount of allocatable space that is currently not
484 * part of this metaslab class.
487 metaslab_class_expandable_space(metaslab_class_t
*mc
)
489 vdev_t
*rvd
= mc
->mc_spa
->spa_root_vdev
;
492 spa_config_enter(mc
->mc_spa
, SCL_VDEV
, FTAG
, RW_READER
);
493 for (int c
= 0; c
< rvd
->vdev_children
; c
++) {
494 vdev_t
*tvd
= rvd
->vdev_child
[c
];
495 metaslab_group_t
*mg
= tvd
->vdev_mg
;
497 if (!vdev_is_concrete(tvd
) || tvd
->vdev_ms_shift
== 0 ||
498 mg
->mg_class
!= mc
) {
503 * Calculate if we have enough space to add additional
504 * metaslabs. We report the expandable space in terms
505 * of the metaslab size since that's the unit of expansion.
507 space
+= P2ALIGN(tvd
->vdev_max_asize
- tvd
->vdev_asize
,
508 1ULL << tvd
->vdev_ms_shift
);
510 spa_config_exit(mc
->mc_spa
, SCL_VDEV
, FTAG
);
515 metaslab_compare(const void *x1
, const void *x2
)
517 const metaslab_t
*m1
= (const metaslab_t
*)x1
;
518 const metaslab_t
*m2
= (const metaslab_t
*)x2
;
522 if (m1
->ms_allocator
!= -1 && m1
->ms_primary
)
524 else if (m1
->ms_allocator
!= -1 && !m1
->ms_primary
)
526 if (m2
->ms_allocator
!= -1 && m2
->ms_primary
)
528 else if (m2
->ms_allocator
!= -1 && !m2
->ms_primary
)
532 * Sort inactive metaslabs first, then primaries, then secondaries. When
533 * selecting a metaslab to allocate from, an allocator first tries its
534 * primary, then secondary active metaslab. If it doesn't have active
535 * metaslabs, or can't allocate from them, it searches for an inactive
536 * metaslab to activate. If it can't find a suitable one, it will steal
537 * a primary or secondary metaslab from another allocator.
544 int cmp
= AVL_CMP(m2
->ms_weight
, m1
->ms_weight
);
548 IMPLY(AVL_CMP(m1
->ms_start
, m2
->ms_start
) == 0, m1
== m2
);
550 return (AVL_CMP(m1
->ms_start
, m2
->ms_start
));
554 * ==========================================================================
556 * ==========================================================================
559 * Update the allocatable flag and the metaslab group's capacity.
560 * The allocatable flag is set to true if the capacity is below
561 * the zfs_mg_noalloc_threshold or has a fragmentation value that is
562 * greater than zfs_mg_fragmentation_threshold. If a metaslab group
563 * transitions from allocatable to non-allocatable or vice versa then the
564 * metaslab group's class is updated to reflect the transition.
567 metaslab_group_alloc_update(metaslab_group_t
*mg
)
569 vdev_t
*vd
= mg
->mg_vd
;
570 metaslab_class_t
*mc
= mg
->mg_class
;
571 vdev_stat_t
*vs
= &vd
->vdev_stat
;
572 boolean_t was_allocatable
;
573 boolean_t was_initialized
;
575 ASSERT(vd
== vd
->vdev_top
);
576 ASSERT3U(spa_config_held(mc
->mc_spa
, SCL_ALLOC
, RW_READER
), ==,
579 mutex_enter(&mg
->mg_lock
);
580 was_allocatable
= mg
->mg_allocatable
;
581 was_initialized
= mg
->mg_initialized
;
583 mg
->mg_free_capacity
= ((vs
->vs_space
- vs
->vs_alloc
) * 100) /
586 mutex_enter(&mc
->mc_lock
);
589 * If the metaslab group was just added then it won't
590 * have any space until we finish syncing out this txg.
591 * At that point we will consider it initialized and available
592 * for allocations. We also don't consider non-activated
593 * metaslab groups (e.g. vdevs that are in the middle of being removed)
594 * to be initialized, because they can't be used for allocation.
596 mg
->mg_initialized
= metaslab_group_initialized(mg
);
597 if (!was_initialized
&& mg
->mg_initialized
) {
599 } else if (was_initialized
&& !mg
->mg_initialized
) {
600 ASSERT3U(mc
->mc_groups
, >, 0);
603 if (mg
->mg_initialized
)
604 mg
->mg_no_free_space
= B_FALSE
;
607 * A metaslab group is considered allocatable if it has plenty
608 * of free space or is not heavily fragmented. We only take
609 * fragmentation into account if the metaslab group has a valid
610 * fragmentation metric (i.e. a value between 0 and 100).
612 mg
->mg_allocatable
= (mg
->mg_activation_count
> 0 &&
613 mg
->mg_free_capacity
> zfs_mg_noalloc_threshold
&&
614 (mg
->mg_fragmentation
== ZFS_FRAG_INVALID
||
615 mg
->mg_fragmentation
<= zfs_mg_fragmentation_threshold
));
618 * The mc_alloc_groups maintains a count of the number of
619 * groups in this metaslab class that are still above the
620 * zfs_mg_noalloc_threshold. This is used by the allocating
621 * threads to determine if they should avoid allocations to
622 * a given group. The allocator will avoid allocations to a group
623 * if that group has reached or is below the zfs_mg_noalloc_threshold
624 * and there are still other groups that are above the threshold.
625 * When a group transitions from allocatable to non-allocatable or
626 * vice versa we update the metaslab class to reflect that change.
627 * When the mc_alloc_groups value drops to 0 that means that all
628 * groups have reached the zfs_mg_noalloc_threshold making all groups
629 * eligible for allocations. This effectively means that all devices
630 * are balanced again.
632 if (was_allocatable
&& !mg
->mg_allocatable
)
633 mc
->mc_alloc_groups
--;
634 else if (!was_allocatable
&& mg
->mg_allocatable
)
635 mc
->mc_alloc_groups
++;
636 mutex_exit(&mc
->mc_lock
);
638 mutex_exit(&mg
->mg_lock
);
642 metaslab_sort_by_flushed(const void *va
, const void *vb
)
644 const metaslab_t
*a
= va
;
645 const metaslab_t
*b
= vb
;
647 int cmp
= AVL_CMP(a
->ms_unflushed_txg
, b
->ms_unflushed_txg
);
651 uint64_t a_vdev_id
= a
->ms_group
->mg_vd
->vdev_id
;
652 uint64_t b_vdev_id
= b
->ms_group
->mg_vd
->vdev_id
;
653 cmp
= AVL_CMP(a_vdev_id
, b_vdev_id
);
657 return (AVL_CMP(a
->ms_id
, b
->ms_id
));
661 metaslab_group_create(metaslab_class_t
*mc
, vdev_t
*vd
, int allocators
)
663 metaslab_group_t
*mg
;
665 mg
= kmem_zalloc(sizeof (metaslab_group_t
), KM_SLEEP
);
666 mutex_init(&mg
->mg_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
667 mutex_init(&mg
->mg_ms_disabled_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
668 cv_init(&mg
->mg_ms_disabled_cv
, NULL
, CV_DEFAULT
, NULL
);
669 mg
->mg_primaries
= kmem_zalloc(allocators
* sizeof (metaslab_t
*),
671 mg
->mg_secondaries
= kmem_zalloc(allocators
* sizeof (metaslab_t
*),
673 avl_create(&mg
->mg_metaslab_tree
, metaslab_compare
,
674 sizeof (metaslab_t
), offsetof(metaslab_t
, ms_group_node
));
677 mg
->mg_activation_count
= 0;
678 mg
->mg_initialized
= B_FALSE
;
679 mg
->mg_no_free_space
= B_TRUE
;
680 mg
->mg_allocators
= allocators
;
682 mg
->mg_alloc_queue_depth
= kmem_zalloc(allocators
*
683 sizeof (zfs_refcount_t
), KM_SLEEP
);
684 mg
->mg_cur_max_alloc_queue_depth
= kmem_zalloc(allocators
*
685 sizeof (uint64_t), KM_SLEEP
);
686 for (int i
= 0; i
< allocators
; i
++) {
687 zfs_refcount_create_tracked(&mg
->mg_alloc_queue_depth
[i
]);
688 mg
->mg_cur_max_alloc_queue_depth
[i
] = 0;
691 mg
->mg_taskq
= taskq_create("metaslab_group_taskq", metaslab_load_pct
,
692 maxclsyspri
, 10, INT_MAX
, TASKQ_THREADS_CPU_PCT
| TASKQ_DYNAMIC
);
698 metaslab_group_destroy(metaslab_group_t
*mg
)
700 ASSERT(mg
->mg_prev
== NULL
);
701 ASSERT(mg
->mg_next
== NULL
);
703 * We may have gone below zero with the activation count
704 * either because we never activated in the first place or
705 * because we're done, and possibly removing the vdev.
707 ASSERT(mg
->mg_activation_count
<= 0);
709 taskq_destroy(mg
->mg_taskq
);
710 avl_destroy(&mg
->mg_metaslab_tree
);
711 kmem_free(mg
->mg_primaries
, mg
->mg_allocators
* sizeof (metaslab_t
*));
712 kmem_free(mg
->mg_secondaries
, mg
->mg_allocators
*
713 sizeof (metaslab_t
*));
714 mutex_destroy(&mg
->mg_lock
);
715 mutex_destroy(&mg
->mg_ms_disabled_lock
);
716 cv_destroy(&mg
->mg_ms_disabled_cv
);
718 for (int i
= 0; i
< mg
->mg_allocators
; i
++) {
719 zfs_refcount_destroy(&mg
->mg_alloc_queue_depth
[i
]);
720 mg
->mg_cur_max_alloc_queue_depth
[i
] = 0;
722 kmem_free(mg
->mg_alloc_queue_depth
, mg
->mg_allocators
*
723 sizeof (zfs_refcount_t
));
724 kmem_free(mg
->mg_cur_max_alloc_queue_depth
, mg
->mg_allocators
*
727 kmem_free(mg
, sizeof (metaslab_group_t
));
731 metaslab_group_activate(metaslab_group_t
*mg
)
733 metaslab_class_t
*mc
= mg
->mg_class
;
734 metaslab_group_t
*mgprev
, *mgnext
;
736 ASSERT3U(spa_config_held(mc
->mc_spa
, SCL_ALLOC
, RW_WRITER
), !=, 0);
738 ASSERT(mc
->mc_rotor
!= mg
);
739 ASSERT(mg
->mg_prev
== NULL
);
740 ASSERT(mg
->mg_next
== NULL
);
741 ASSERT(mg
->mg_activation_count
<= 0);
743 if (++mg
->mg_activation_count
<= 0)
746 mg
->mg_aliquot
= metaslab_aliquot
* MAX(1, mg
->mg_vd
->vdev_children
);
747 metaslab_group_alloc_update(mg
);
749 if ((mgprev
= mc
->mc_rotor
) == NULL
) {
753 mgnext
= mgprev
->mg_next
;
754 mg
->mg_prev
= mgprev
;
755 mg
->mg_next
= mgnext
;
756 mgprev
->mg_next
= mg
;
757 mgnext
->mg_prev
= mg
;
763 * Passivate a metaslab group and remove it from the allocation rotor.
764 * Callers must hold both the SCL_ALLOC and SCL_ZIO lock prior to passivating
765 * a metaslab group. This function will momentarily drop spa_config_locks
766 * that are lower than the SCL_ALLOC lock (see comment below).
769 metaslab_group_passivate(metaslab_group_t
*mg
)
771 metaslab_class_t
*mc
= mg
->mg_class
;
772 spa_t
*spa
= mc
->mc_spa
;
773 metaslab_group_t
*mgprev
, *mgnext
;
774 int locks
= spa_config_held(spa
, SCL_ALL
, RW_WRITER
);
776 ASSERT3U(spa_config_held(spa
, SCL_ALLOC
| SCL_ZIO
, RW_WRITER
), ==,
777 (SCL_ALLOC
| SCL_ZIO
));
779 if (--mg
->mg_activation_count
!= 0) {
780 ASSERT(mc
->mc_rotor
!= mg
);
781 ASSERT(mg
->mg_prev
== NULL
);
782 ASSERT(mg
->mg_next
== NULL
);
783 ASSERT(mg
->mg_activation_count
< 0);
788 * The spa_config_lock is an array of rwlocks, ordered as
789 * follows (from highest to lowest):
790 * SCL_CONFIG > SCL_STATE > SCL_L2ARC > SCL_ALLOC >
791 * SCL_ZIO > SCL_FREE > SCL_VDEV
792 * (For more information about the spa_config_lock see spa_misc.c)
793 * The higher the lock, the broader its coverage. When we passivate
794 * a metaslab group, we must hold both the SCL_ALLOC and the SCL_ZIO
795 * config locks. However, the metaslab group's taskq might be trying
796 * to preload metaslabs so we must drop the SCL_ZIO lock and any
797 * lower locks to allow the I/O to complete. At a minimum,
798 * we continue to hold the SCL_ALLOC lock, which prevents any future
799 * allocations from taking place and any changes to the vdev tree.
801 spa_config_exit(spa
, locks
& ~(SCL_ZIO
- 1), spa
);
802 taskq_wait_outstanding(mg
->mg_taskq
, 0);
803 spa_config_enter(spa
, locks
& ~(SCL_ZIO
- 1), spa
, RW_WRITER
);
804 metaslab_group_alloc_update(mg
);
805 for (int i
= 0; i
< mg
->mg_allocators
; i
++) {
806 metaslab_t
*msp
= mg
->mg_primaries
[i
];
808 mutex_enter(&msp
->ms_lock
);
809 metaslab_passivate(msp
,
810 metaslab_weight_from_range_tree(msp
));
811 mutex_exit(&msp
->ms_lock
);
813 msp
= mg
->mg_secondaries
[i
];
815 mutex_enter(&msp
->ms_lock
);
816 metaslab_passivate(msp
,
817 metaslab_weight_from_range_tree(msp
));
818 mutex_exit(&msp
->ms_lock
);
822 mgprev
= mg
->mg_prev
;
823 mgnext
= mg
->mg_next
;
828 mc
->mc_rotor
= mgnext
;
829 mgprev
->mg_next
= mgnext
;
830 mgnext
->mg_prev
= mgprev
;
838 metaslab_group_initialized(metaslab_group_t
*mg
)
840 vdev_t
*vd
= mg
->mg_vd
;
841 vdev_stat_t
*vs
= &vd
->vdev_stat
;
843 return (vs
->vs_space
!= 0 && mg
->mg_activation_count
> 0);
847 metaslab_group_get_space(metaslab_group_t
*mg
)
849 return ((1ULL << mg
->mg_vd
->vdev_ms_shift
) * mg
->mg_vd
->vdev_ms_count
);
853 metaslab_group_histogram_verify(metaslab_group_t
*mg
)
856 vdev_t
*vd
= mg
->mg_vd
;
857 uint64_t ashift
= vd
->vdev_ashift
;
860 if ((zfs_flags
& ZFS_DEBUG_HISTOGRAM_VERIFY
) == 0)
863 mg_hist
= kmem_zalloc(sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE
,
866 ASSERT3U(RANGE_TREE_HISTOGRAM_SIZE
, >=,
867 SPACE_MAP_HISTOGRAM_SIZE
+ ashift
);
869 for (int m
= 0; m
< vd
->vdev_ms_count
; m
++) {
870 metaslab_t
*msp
= vd
->vdev_ms
[m
];
872 /* skip if not active or not a member */
873 if (msp
->ms_sm
== NULL
|| msp
->ms_group
!= mg
)
876 for (i
= 0; i
< SPACE_MAP_HISTOGRAM_SIZE
; i
++)
877 mg_hist
[i
+ ashift
] +=
878 msp
->ms_sm
->sm_phys
->smp_histogram
[i
];
881 for (i
= 0; i
< RANGE_TREE_HISTOGRAM_SIZE
; i
++)
882 VERIFY3U(mg_hist
[i
], ==, mg
->mg_histogram
[i
]);
884 kmem_free(mg_hist
, sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE
);
888 metaslab_group_histogram_add(metaslab_group_t
*mg
, metaslab_t
*msp
)
890 metaslab_class_t
*mc
= mg
->mg_class
;
891 uint64_t ashift
= mg
->mg_vd
->vdev_ashift
;
893 ASSERT(MUTEX_HELD(&msp
->ms_lock
));
894 if (msp
->ms_sm
== NULL
)
897 mutex_enter(&mg
->mg_lock
);
898 for (int i
= 0; i
< SPACE_MAP_HISTOGRAM_SIZE
; i
++) {
899 mg
->mg_histogram
[i
+ ashift
] +=
900 msp
->ms_sm
->sm_phys
->smp_histogram
[i
];
901 mc
->mc_histogram
[i
+ ashift
] +=
902 msp
->ms_sm
->sm_phys
->smp_histogram
[i
];
904 mutex_exit(&mg
->mg_lock
);
908 metaslab_group_histogram_remove(metaslab_group_t
*mg
, metaslab_t
*msp
)
910 metaslab_class_t
*mc
= mg
->mg_class
;
911 uint64_t ashift
= mg
->mg_vd
->vdev_ashift
;
913 ASSERT(MUTEX_HELD(&msp
->ms_lock
));
914 if (msp
->ms_sm
== NULL
)
917 mutex_enter(&mg
->mg_lock
);
918 for (int i
= 0; i
< SPACE_MAP_HISTOGRAM_SIZE
; i
++) {
919 ASSERT3U(mg
->mg_histogram
[i
+ ashift
], >=,
920 msp
->ms_sm
->sm_phys
->smp_histogram
[i
]);
921 ASSERT3U(mc
->mc_histogram
[i
+ ashift
], >=,
922 msp
->ms_sm
->sm_phys
->smp_histogram
[i
]);
924 mg
->mg_histogram
[i
+ ashift
] -=
925 msp
->ms_sm
->sm_phys
->smp_histogram
[i
];
926 mc
->mc_histogram
[i
+ ashift
] -=
927 msp
->ms_sm
->sm_phys
->smp_histogram
[i
];
929 mutex_exit(&mg
->mg_lock
);
933 metaslab_group_add(metaslab_group_t
*mg
, metaslab_t
*msp
)
935 ASSERT(msp
->ms_group
== NULL
);
936 mutex_enter(&mg
->mg_lock
);
939 avl_add(&mg
->mg_metaslab_tree
, msp
);
940 mutex_exit(&mg
->mg_lock
);
942 mutex_enter(&msp
->ms_lock
);
943 metaslab_group_histogram_add(mg
, msp
);
944 mutex_exit(&msp
->ms_lock
);
948 metaslab_group_remove(metaslab_group_t
*mg
, metaslab_t
*msp
)
950 mutex_enter(&msp
->ms_lock
);
951 metaslab_group_histogram_remove(mg
, msp
);
952 mutex_exit(&msp
->ms_lock
);
954 mutex_enter(&mg
->mg_lock
);
955 ASSERT(msp
->ms_group
== mg
);
956 avl_remove(&mg
->mg_metaslab_tree
, msp
);
957 msp
->ms_group
= NULL
;
958 mutex_exit(&mg
->mg_lock
);
962 metaslab_group_sort_impl(metaslab_group_t
*mg
, metaslab_t
*msp
, uint64_t weight
)
964 ASSERT(MUTEX_HELD(&msp
->ms_lock
));
965 ASSERT(MUTEX_HELD(&mg
->mg_lock
));
966 ASSERT(msp
->ms_group
== mg
);
968 avl_remove(&mg
->mg_metaslab_tree
, msp
);
969 msp
->ms_weight
= weight
;
970 avl_add(&mg
->mg_metaslab_tree
, msp
);
975 metaslab_group_sort(metaslab_group_t
*mg
, metaslab_t
*msp
, uint64_t weight
)
978 * Although in principle the weight can be any value, in
979 * practice we do not use values in the range [1, 511].
981 ASSERT(weight
>= SPA_MINBLOCKSIZE
|| weight
== 0);
982 ASSERT(MUTEX_HELD(&msp
->ms_lock
));
984 mutex_enter(&mg
->mg_lock
);
985 metaslab_group_sort_impl(mg
, msp
, weight
);
986 mutex_exit(&mg
->mg_lock
);
990 * Calculate the fragmentation for a given metaslab group. We can use
991 * a simple average here since all metaslabs within the group must have
992 * the same size. The return value will be a value between 0 and 100
993 * (inclusive), or ZFS_FRAG_INVALID if less than half of the metaslab in this
994 * group have a fragmentation metric.
997 metaslab_group_fragmentation(metaslab_group_t
*mg
)
999 vdev_t
*vd
= mg
->mg_vd
;
1000 uint64_t fragmentation
= 0;
1001 uint64_t valid_ms
= 0;
1003 for (int m
= 0; m
< vd
->vdev_ms_count
; m
++) {
1004 metaslab_t
*msp
= vd
->vdev_ms
[m
];
1006 if (msp
->ms_fragmentation
== ZFS_FRAG_INVALID
)
1008 if (msp
->ms_group
!= mg
)
1012 fragmentation
+= msp
->ms_fragmentation
;
1015 if (valid_ms
<= mg
->mg_vd
->vdev_ms_count
/ 2)
1016 return (ZFS_FRAG_INVALID
);
1018 fragmentation
/= valid_ms
;
1019 ASSERT3U(fragmentation
, <=, 100);
1020 return (fragmentation
);
1024 * Determine if a given metaslab group should skip allocations. A metaslab
1025 * group should avoid allocations if its free capacity is less than the
1026 * zfs_mg_noalloc_threshold or its fragmentation metric is greater than
1027 * zfs_mg_fragmentation_threshold and there is at least one metaslab group
1028 * that can still handle allocations. If the allocation throttle is enabled
1029 * then we skip allocations to devices that have reached their maximum
1030 * allocation queue depth unless the selected metaslab group is the only
1031 * eligible group remaining.
1034 metaslab_group_allocatable(metaslab_group_t
*mg
, metaslab_group_t
*rotor
,
1035 uint64_t psize
, int allocator
, int d
)
1037 spa_t
*spa
= mg
->mg_vd
->vdev_spa
;
1038 metaslab_class_t
*mc
= mg
->mg_class
;
1041 * We can only consider skipping this metaslab group if it's
1042 * in the normal metaslab class and there are other metaslab
1043 * groups to select from. Otherwise, we always consider it eligible
1046 if ((mc
!= spa_normal_class(spa
) &&
1047 mc
!= spa_special_class(spa
) &&
1048 mc
!= spa_dedup_class(spa
)) ||
1053 * If the metaslab group's mg_allocatable flag is set (see comments
1054 * in metaslab_group_alloc_update() for more information) and
1055 * the allocation throttle is disabled then allow allocations to this
1056 * device. However, if the allocation throttle is enabled then
1057 * check if we have reached our allocation limit (mg_alloc_queue_depth)
1058 * to determine if we should allow allocations to this metaslab group.
1059 * If all metaslab groups are no longer considered allocatable
1060 * (mc_alloc_groups == 0) or we're trying to allocate the smallest
1061 * gang block size then we allow allocations on this metaslab group
1062 * regardless of the mg_allocatable or throttle settings.
1064 if (mg
->mg_allocatable
) {
1065 metaslab_group_t
*mgp
;
1067 uint64_t qmax
= mg
->mg_cur_max_alloc_queue_depth
[allocator
];
1069 if (!mc
->mc_alloc_throttle_enabled
)
1073 * If this metaslab group does not have any free space, then
1074 * there is no point in looking further.
1076 if (mg
->mg_no_free_space
)
1080 * Relax allocation throttling for ditto blocks. Due to
1081 * random imbalances in allocation it tends to push copies
1082 * to one vdev, that looks a bit better at the moment.
1084 qmax
= qmax
* (4 + d
) / 4;
1086 qdepth
= zfs_refcount_count(
1087 &mg
->mg_alloc_queue_depth
[allocator
]);
1090 * If this metaslab group is below its qmax or it's
1091 * the only allocatable metasable group, then attempt
1092 * to allocate from it.
1094 if (qdepth
< qmax
|| mc
->mc_alloc_groups
== 1)
1096 ASSERT3U(mc
->mc_alloc_groups
, >, 1);
1099 * Since this metaslab group is at or over its qmax, we
1100 * need to determine if there are metaslab groups after this
1101 * one that might be able to handle this allocation. This is
1102 * racy since we can't hold the locks for all metaslab
1103 * groups at the same time when we make this check.
1105 for (mgp
= mg
->mg_next
; mgp
!= rotor
; mgp
= mgp
->mg_next
) {
1106 qmax
= mgp
->mg_cur_max_alloc_queue_depth
[allocator
];
1107 qmax
= qmax
* (4 + d
) / 4;
1108 qdepth
= zfs_refcount_count(
1109 &mgp
->mg_alloc_queue_depth
[allocator
]);
1112 * If there is another metaslab group that
1113 * might be able to handle the allocation, then
1114 * we return false so that we skip this group.
1116 if (qdepth
< qmax
&& !mgp
->mg_no_free_space
)
1121 * We didn't find another group to handle the allocation
1122 * so we can't skip this metaslab group even though
1123 * we are at or over our qmax.
1127 } else if (mc
->mc_alloc_groups
== 0 || psize
== SPA_MINBLOCKSIZE
) {
1134 * ==========================================================================
1135 * Range tree callbacks
1136 * ==========================================================================
1140 * Comparison function for the private size-ordered tree. Tree is sorted
1141 * by size, larger sizes at the end of the tree.
1144 metaslab_rangesize_compare(const void *x1
, const void *x2
)
1146 const range_seg_t
*r1
= x1
;
1147 const range_seg_t
*r2
= x2
;
1148 uint64_t rs_size1
= r1
->rs_end
- r1
->rs_start
;
1149 uint64_t rs_size2
= r2
->rs_end
- r2
->rs_start
;
1151 int cmp
= AVL_CMP(rs_size1
, rs_size2
);
1155 return (AVL_CMP(r1
->rs_start
, r2
->rs_start
));
1159 * ==========================================================================
1160 * Common allocator routines
1161 * ==========================================================================
1165 * Return the maximum contiguous segment within the metaslab.
1168 metaslab_block_maxsize(metaslab_t
*msp
)
1170 avl_tree_t
*t
= &msp
->ms_allocatable_by_size
;
1173 if (t
== NULL
|| (rs
= avl_last(t
)) == NULL
)
1176 return (rs
->rs_end
- rs
->rs_start
);
1179 static range_seg_t
*
1180 metaslab_block_find(avl_tree_t
*t
, uint64_t start
, uint64_t size
)
1182 range_seg_t
*rs
, rsearch
;
1185 rsearch
.rs_start
= start
;
1186 rsearch
.rs_end
= start
+ size
;
1188 rs
= avl_find(t
, &rsearch
, &where
);
1190 rs
= avl_nearest(t
, where
, AVL_AFTER
);
1196 #if defined(WITH_DF_BLOCK_ALLOCATOR) || \
1197 defined(WITH_CF_BLOCK_ALLOCATOR)
1199 * This is a helper function that can be used by the allocator to find
1200 * a suitable block to allocate. This will search the specified AVL
1201 * tree looking for a block that matches the specified criteria.
1204 metaslab_block_picker(avl_tree_t
*t
, uint64_t *cursor
, uint64_t size
,
1205 uint64_t max_search
)
1207 range_seg_t
*rs
= metaslab_block_find(t
, *cursor
, size
);
1208 uint64_t first_found
;
1211 first_found
= rs
->rs_start
;
1213 while (rs
!= NULL
&& rs
->rs_start
- first_found
<= max_search
) {
1214 uint64_t offset
= rs
->rs_start
;
1215 if (offset
+ size
<= rs
->rs_end
) {
1216 *cursor
= offset
+ size
;
1219 rs
= AVL_NEXT(t
, rs
);
1225 #endif /* WITH_DF/CF_BLOCK_ALLOCATOR */
1227 #if defined(WITH_DF_BLOCK_ALLOCATOR)
1229 * ==========================================================================
1230 * Dynamic Fit (df) block allocator
1232 * Search for a free chunk of at least this size, starting from the last
1233 * offset (for this alignment of block) looking for up to
1234 * metaslab_df_max_search bytes (16MB). If a large enough free chunk is not
1235 * found within 16MB, then return a free chunk of exactly the requested size (or
1238 * If it seems like searching from the last offset will be unproductive, skip
1239 * that and just return a free chunk of exactly the requested size (or larger).
1240 * This is based on metaslab_df_alloc_threshold and metaslab_df_free_pct. This
1241 * mechanism is probably not very useful and may be removed in the future.
1243 * The behavior when not searching can be changed to return the largest free
1244 * chunk, instead of a free chunk of exactly the requested size, by setting
1245 * metaslab_df_use_largest_segment.
1246 * ==========================================================================
1249 metaslab_df_alloc(metaslab_t
*msp
, uint64_t size
)
1252 * Find the largest power of 2 block size that evenly divides the
1253 * requested size. This is used to try to allocate blocks with similar
1254 * alignment from the same area of the metaslab (i.e. same cursor
1255 * bucket) but it does not guarantee that other allocations sizes
1256 * may exist in the same region.
1258 uint64_t align
= size
& -size
;
1259 uint64_t *cursor
= &msp
->ms_lbas
[highbit64(align
) - 1];
1260 range_tree_t
*rt
= msp
->ms_allocatable
;
1261 int free_pct
= range_tree_space(rt
) * 100 / msp
->ms_size
;
1264 ASSERT(MUTEX_HELD(&msp
->ms_lock
));
1265 ASSERT3U(avl_numnodes(&rt
->rt_root
), ==,
1266 avl_numnodes(&msp
->ms_allocatable_by_size
));
1269 * If we're running low on space, find a segment based on size,
1270 * rather than iterating based on offset.
1272 if (metaslab_block_maxsize(msp
) < metaslab_df_alloc_threshold
||
1273 free_pct
< metaslab_df_free_pct
) {
1276 offset
= metaslab_block_picker(&rt
->rt_root
,
1277 cursor
, size
, metaslab_df_max_search
);
1282 if (metaslab_df_use_largest_segment
) {
1283 /* use largest free segment */
1284 rs
= avl_last(&msp
->ms_allocatable_by_size
);
1286 /* use segment of this size, or next largest */
1287 rs
= metaslab_block_find(&msp
->ms_allocatable_by_size
,
1290 if (rs
!= NULL
&& rs
->rs_start
+ size
<= rs
->rs_end
) {
1291 offset
= rs
->rs_start
;
1292 *cursor
= offset
+ size
;
1299 static metaslab_ops_t metaslab_df_ops
= {
1303 metaslab_ops_t
*zfs_metaslab_ops
= &metaslab_df_ops
;
1304 #endif /* WITH_DF_BLOCK_ALLOCATOR */
1306 #if defined(WITH_CF_BLOCK_ALLOCATOR)
1308 * ==========================================================================
1309 * Cursor fit block allocator -
1310 * Select the largest region in the metaslab, set the cursor to the beginning
1311 * of the range and the cursor_end to the end of the range. As allocations
1312 * are made advance the cursor. Continue allocating from the cursor until
1313 * the range is exhausted and then find a new range.
1314 * ==========================================================================
1317 metaslab_cf_alloc(metaslab_t
*msp
, uint64_t size
)
1319 range_tree_t
*rt
= msp
->ms_allocatable
;
1320 avl_tree_t
*t
= &msp
->ms_allocatable_by_size
;
1321 uint64_t *cursor
= &msp
->ms_lbas
[0];
1322 uint64_t *cursor_end
= &msp
->ms_lbas
[1];
1323 uint64_t offset
= 0;
1325 ASSERT(MUTEX_HELD(&msp
->ms_lock
));
1326 ASSERT3U(avl_numnodes(t
), ==, avl_numnodes(&rt
->rt_root
));
1328 ASSERT3U(*cursor_end
, >=, *cursor
);
1330 if ((*cursor
+ size
) > *cursor_end
) {
1333 rs
= avl_last(&msp
->ms_allocatable_by_size
);
1334 if (rs
== NULL
|| (rs
->rs_end
- rs
->rs_start
) < size
)
1337 *cursor
= rs
->rs_start
;
1338 *cursor_end
= rs
->rs_end
;
1347 static metaslab_ops_t metaslab_cf_ops
= {
1351 metaslab_ops_t
*zfs_metaslab_ops
= &metaslab_cf_ops
;
1352 #endif /* WITH_CF_BLOCK_ALLOCATOR */
1354 #if defined(WITH_NDF_BLOCK_ALLOCATOR)
1356 * ==========================================================================
1357 * New dynamic fit allocator -
1358 * Select a region that is large enough to allocate 2^metaslab_ndf_clump_shift
1359 * contiguous blocks. If no region is found then just use the largest segment
1361 * ==========================================================================
1365 * Determines desired number of contiguous blocks (2^metaslab_ndf_clump_shift)
1366 * to request from the allocator.
1368 uint64_t metaslab_ndf_clump_shift
= 4;
1371 metaslab_ndf_alloc(metaslab_t
*msp
, uint64_t size
)
1373 avl_tree_t
*t
= &msp
->ms_allocatable
->rt_root
;
1375 range_seg_t
*rs
, rsearch
;
1376 uint64_t hbit
= highbit64(size
);
1377 uint64_t *cursor
= &msp
->ms_lbas
[hbit
- 1];
1378 uint64_t max_size
= metaslab_block_maxsize(msp
);
1380 ASSERT(MUTEX_HELD(&msp
->ms_lock
));
1381 ASSERT3U(avl_numnodes(t
), ==,
1382 avl_numnodes(&msp
->ms_allocatable_by_size
));
1384 if (max_size
< size
)
1387 rsearch
.rs_start
= *cursor
;
1388 rsearch
.rs_end
= *cursor
+ size
;
1390 rs
= avl_find(t
, &rsearch
, &where
);
1391 if (rs
== NULL
|| (rs
->rs_end
- rs
->rs_start
) < size
) {
1392 t
= &msp
->ms_allocatable_by_size
;
1394 rsearch
.rs_start
= 0;
1395 rsearch
.rs_end
= MIN(max_size
,
1396 1ULL << (hbit
+ metaslab_ndf_clump_shift
));
1397 rs
= avl_find(t
, &rsearch
, &where
);
1399 rs
= avl_nearest(t
, where
, AVL_AFTER
);
1403 if ((rs
->rs_end
- rs
->rs_start
) >= size
) {
1404 *cursor
= rs
->rs_start
+ size
;
1405 return (rs
->rs_start
);
1410 static metaslab_ops_t metaslab_ndf_ops
= {
1414 metaslab_ops_t
*zfs_metaslab_ops
= &metaslab_ndf_ops
;
1415 #endif /* WITH_NDF_BLOCK_ALLOCATOR */
1419 * ==========================================================================
1421 * ==========================================================================
1425 * Wait for any in-progress metaslab loads to complete.
1428 metaslab_load_wait(metaslab_t
*msp
)
1430 ASSERT(MUTEX_HELD(&msp
->ms_lock
));
1432 while (msp
->ms_loading
) {
1433 ASSERT(!msp
->ms_loaded
);
1434 cv_wait(&msp
->ms_load_cv
, &msp
->ms_lock
);
1439 * Wait for any in-progress flushing to complete.
1442 metaslab_flush_wait(metaslab_t
*msp
)
1444 ASSERT(MUTEX_HELD(&msp
->ms_lock
));
1446 while (msp
->ms_flushing
)
1447 cv_wait(&msp
->ms_flush_cv
, &msp
->ms_lock
);
1451 metaslab_allocated_space(metaslab_t
*msp
)
1453 return (msp
->ms_allocated_space
);
1457 * Verify that the space accounting on disk matches the in-core range_trees.
1460 metaslab_verify_space(metaslab_t
*msp
, uint64_t txg
)
1462 spa_t
*spa
= msp
->ms_group
->mg_vd
->vdev_spa
;
1463 uint64_t allocating
= 0;
1464 uint64_t sm_free_space
, msp_free_space
;
1466 ASSERT(MUTEX_HELD(&msp
->ms_lock
));
1467 ASSERT(!msp
->ms_condensing
);
1469 if ((zfs_flags
& ZFS_DEBUG_METASLAB_VERIFY
) == 0)
1473 * We can only verify the metaslab space when we're called
1474 * from syncing context with a loaded metaslab that has an
1475 * allocated space map. Calling this in non-syncing context
1476 * does not provide a consistent view of the metaslab since
1477 * we're performing allocations in the future.
1479 if (txg
!= spa_syncing_txg(spa
) || msp
->ms_sm
== NULL
||
1484 * Even though the smp_alloc field can get negative,
1485 * when it comes to a metaslab's space map, that should
1486 * never be the case.
1488 ASSERT3S(space_map_allocated(msp
->ms_sm
), >=, 0);
1490 ASSERT3U(space_map_allocated(msp
->ms_sm
), >=,
1491 range_tree_space(msp
->ms_unflushed_frees
));
1493 ASSERT3U(metaslab_allocated_space(msp
), ==,
1494 space_map_allocated(msp
->ms_sm
) +
1495 range_tree_space(msp
->ms_unflushed_allocs
) -
1496 range_tree_space(msp
->ms_unflushed_frees
));
1498 sm_free_space
= msp
->ms_size
- metaslab_allocated_space(msp
);
1501 * Account for future allocations since we would have
1502 * already deducted that space from the ms_allocatable.
1504 for (int t
= 0; t
< TXG_CONCURRENT_STATES
; t
++) {
1506 range_tree_space(msp
->ms_allocating
[(txg
+ t
) & TXG_MASK
]);
1509 ASSERT3U(msp
->ms_deferspace
, ==,
1510 range_tree_space(msp
->ms_defer
[0]) +
1511 range_tree_space(msp
->ms_defer
[1]));
1513 msp_free_space
= range_tree_space(msp
->ms_allocatable
) + allocating
+
1514 msp
->ms_deferspace
+ range_tree_space(msp
->ms_freed
);
1516 VERIFY3U(sm_free_space
, ==, msp_free_space
);
1520 metaslab_aux_histograms_clear(metaslab_t
*msp
)
1523 * Auxiliary histograms are only cleared when resetting them,
1524 * which can only happen while the metaslab is loaded.
1526 ASSERT(msp
->ms_loaded
);
1528 bzero(msp
->ms_synchist
, sizeof (msp
->ms_synchist
));
1529 for (int t
= 0; t
< TXG_DEFER_SIZE
; t
++)
1530 bzero(msp
->ms_deferhist
[t
], sizeof (msp
->ms_deferhist
[t
]));
1534 metaslab_aux_histogram_add(uint64_t *histogram
, uint64_t shift
,
1538 * This is modeled after space_map_histogram_add(), so refer to that
1539 * function for implementation details. We want this to work like
1540 * the space map histogram, and not the range tree histogram, as we
1541 * are essentially constructing a delta that will be later subtracted
1542 * from the space map histogram.
1545 for (int i
= shift
; i
< RANGE_TREE_HISTOGRAM_SIZE
; i
++) {
1546 ASSERT3U(i
, >=, idx
+ shift
);
1547 histogram
[idx
] += rt
->rt_histogram
[i
] << (i
- idx
- shift
);
1549 if (idx
< SPACE_MAP_HISTOGRAM_SIZE
- 1) {
1550 ASSERT3U(idx
+ shift
, ==, i
);
1552 ASSERT3U(idx
, <, SPACE_MAP_HISTOGRAM_SIZE
);
1558 * Called at every sync pass that the metaslab gets synced.
1560 * The reason is that we want our auxiliary histograms to be updated
1561 * wherever the metaslab's space map histogram is updated. This way
1562 * we stay consistent on which parts of the metaslab space map's
1563 * histogram are currently not available for allocations (e.g because
1564 * they are in the defer, freed, and freeing trees).
1567 metaslab_aux_histograms_update(metaslab_t
*msp
)
1569 space_map_t
*sm
= msp
->ms_sm
;
1573 * This is similar to the metaslab's space map histogram updates
1574 * that take place in metaslab_sync(). The only difference is that
1575 * we only care about segments that haven't made it into the
1576 * ms_allocatable tree yet.
1578 if (msp
->ms_loaded
) {
1579 metaslab_aux_histograms_clear(msp
);
1581 metaslab_aux_histogram_add(msp
->ms_synchist
,
1582 sm
->sm_shift
, msp
->ms_freed
);
1584 for (int t
= 0; t
< TXG_DEFER_SIZE
; t
++) {
1585 metaslab_aux_histogram_add(msp
->ms_deferhist
[t
],
1586 sm
->sm_shift
, msp
->ms_defer
[t
]);
1590 metaslab_aux_histogram_add(msp
->ms_synchist
,
1591 sm
->sm_shift
, msp
->ms_freeing
);
1595 * Called every time we are done syncing (writing to) the metaslab,
1596 * i.e. at the end of each sync pass.
1597 * [see the comment in metaslab_impl.h for ms_synchist, ms_deferhist]
1600 metaslab_aux_histograms_update_done(metaslab_t
*msp
, boolean_t defer_allowed
)
1602 spa_t
*spa
= msp
->ms_group
->mg_vd
->vdev_spa
;
1603 space_map_t
*sm
= msp
->ms_sm
;
1607 * We came here from metaslab_init() when creating/opening a
1608 * pool, looking at a metaslab that hasn't had any allocations
1615 * This is similar to the actions that we take for the ms_freed
1616 * and ms_defer trees in metaslab_sync_done().
1618 uint64_t hist_index
= spa_syncing_txg(spa
) % TXG_DEFER_SIZE
;
1619 if (defer_allowed
) {
1620 bcopy(msp
->ms_synchist
, msp
->ms_deferhist
[hist_index
],
1621 sizeof (msp
->ms_synchist
));
1623 bzero(msp
->ms_deferhist
[hist_index
],
1624 sizeof (msp
->ms_deferhist
[hist_index
]));
1626 bzero(msp
->ms_synchist
, sizeof (msp
->ms_synchist
));
1630 * Ensure that the metaslab's weight and fragmentation are consistent
1631 * with the contents of the histogram (either the range tree's histogram
1632 * or the space map's depending whether the metaslab is loaded).
1635 metaslab_verify_weight_and_frag(metaslab_t
*msp
)
1637 ASSERT(MUTEX_HELD(&msp
->ms_lock
));
1639 if ((zfs_flags
& ZFS_DEBUG_METASLAB_VERIFY
) == 0)
1642 /* see comment in metaslab_verify_unflushed_changes() */
1643 if (msp
->ms_group
== NULL
)
1647 * Devices being removed always return a weight of 0 and leave
1648 * fragmentation and ms_max_size as is - there is nothing for
1649 * us to verify here.
1651 vdev_t
*vd
= msp
->ms_group
->mg_vd
;
1652 if (vd
->vdev_removing
)
1656 * If the metaslab is dirty it probably means that we've done
1657 * some allocations or frees that have changed our histograms
1658 * and thus the weight.
1660 for (int t
= 0; t
< TXG_SIZE
; t
++) {
1661 if (txg_list_member(&vd
->vdev_ms_list
, msp
, t
))
1666 * This verification checks that our in-memory state is consistent
1667 * with what's on disk. If the pool is read-only then there aren't
1668 * any changes and we just have the initially-loaded state.
1670 if (!spa_writeable(msp
->ms_group
->mg_vd
->vdev_spa
))
1673 /* some extra verification for in-core tree if you can */
1674 if (msp
->ms_loaded
) {
1675 range_tree_stat_verify(msp
->ms_allocatable
);
1676 VERIFY(space_map_histogram_verify(msp
->ms_sm
,
1677 msp
->ms_allocatable
));
1680 uint64_t weight
= msp
->ms_weight
;
1681 uint64_t was_active
= msp
->ms_weight
& METASLAB_ACTIVE_MASK
;
1682 boolean_t space_based
= WEIGHT_IS_SPACEBASED(msp
->ms_weight
);
1683 uint64_t frag
= msp
->ms_fragmentation
;
1684 uint64_t max_segsize
= msp
->ms_max_size
;
1687 msp
->ms_fragmentation
= 0;
1688 msp
->ms_max_size
= 0;
1691 * This function is used for verification purposes. Regardless of
1692 * whether metaslab_weight() thinks this metaslab should be active or
1693 * not, we want to ensure that the actual weight (and therefore the
1694 * value of ms_weight) would be the same if it was to be recalculated
1697 msp
->ms_weight
= metaslab_weight(msp
) | was_active
;
1699 VERIFY3U(max_segsize
, ==, msp
->ms_max_size
);
1702 * If the weight type changed then there is no point in doing
1703 * verification. Revert fields to their original values.
1705 if ((space_based
&& !WEIGHT_IS_SPACEBASED(msp
->ms_weight
)) ||
1706 (!space_based
&& WEIGHT_IS_SPACEBASED(msp
->ms_weight
))) {
1707 msp
->ms_fragmentation
= frag
;
1708 msp
->ms_weight
= weight
;
1712 VERIFY3U(msp
->ms_fragmentation
, ==, frag
);
1713 VERIFY3U(msp
->ms_weight
, ==, weight
);
1717 metaslab_load_impl(metaslab_t
*msp
)
1721 ASSERT(MUTEX_HELD(&msp
->ms_lock
));
1722 ASSERT(msp
->ms_loading
);
1723 ASSERT(!msp
->ms_condensing
);
1726 * We temporarily drop the lock to unblock other operations while we
1727 * are reading the space map. Therefore, metaslab_sync() and
1728 * metaslab_sync_done() can run at the same time as we do.
1730 * If we are using the log space maps, metaslab_sync() can't write to
1731 * the metaslab's space map while we are loading as we only write to
1732 * it when we are flushing the metaslab, and that can't happen while
1733 * we are loading it.
1735 * If we are not using log space maps though, metaslab_sync() can
1736 * append to the space map while we are loading. Therefore we load
1737 * only entries that existed when we started the load. Additionally,
1738 * metaslab_sync_done() has to wait for the load to complete because
1739 * there are potential races like metaslab_load() loading parts of the
1740 * space map that are currently being appended by metaslab_sync(). If
1741 * we didn't, the ms_allocatable would have entries that
1742 * metaslab_sync_done() would try to re-add later.
1744 * That's why before dropping the lock we remember the synced length
1745 * of the metaslab and read up to that point of the space map,
1746 * ignoring entries appended by metaslab_sync() that happen after we
1749 uint64_t length
= msp
->ms_synced_length
;
1750 mutex_exit(&msp
->ms_lock
);
1752 hrtime_t load_start
= gethrtime();
1753 if (msp
->ms_sm
!= NULL
) {
1754 error
= space_map_load_length(msp
->ms_sm
, msp
->ms_allocatable
,
1758 * The space map has not been allocated yet, so treat
1759 * all the space in the metaslab as free and add it to the
1760 * ms_allocatable tree.
1762 range_tree_add(msp
->ms_allocatable
,
1763 msp
->ms_start
, msp
->ms_size
);
1765 if (msp
->ms_freed
!= NULL
) {
1767 * If the ms_sm doesn't exist, this means that this
1768 * metaslab hasn't gone through metaslab_sync() and
1769 * thus has never been dirtied. So we shouldn't
1770 * expect any unflushed allocs or frees from previous
1773 * Note: ms_freed and all the other trees except for
1774 * the ms_allocatable, can be NULL at this point only
1775 * if this is a new metaslab of a vdev that just got
1778 ASSERT(range_tree_is_empty(msp
->ms_unflushed_allocs
));
1779 ASSERT(range_tree_is_empty(msp
->ms_unflushed_frees
));
1784 * We need to grab the ms_sync_lock to prevent metaslab_sync() from
1785 * changing the ms_sm (or log_sm) and the metaslab's range trees
1786 * while we are about to use them and populate the ms_allocatable.
1787 * The ms_lock is insufficient for this because metaslab_sync() doesn't
1788 * hold the ms_lock while writing the ms_checkpointing tree to disk.
1790 mutex_enter(&msp
->ms_sync_lock
);
1791 mutex_enter(&msp
->ms_lock
);
1793 ASSERT(!msp
->ms_condensing
);
1794 ASSERT(!msp
->ms_flushing
);
1797 mutex_exit(&msp
->ms_sync_lock
);
1801 ASSERT3P(msp
->ms_group
, !=, NULL
);
1802 msp
->ms_loaded
= B_TRUE
;
1805 * Apply all the unflushed changes to ms_allocatable right
1806 * away so any manipulations we do below have a clear view
1807 * of what is allocated and what is free.
1809 range_tree_walk(msp
->ms_unflushed_allocs
,
1810 range_tree_remove
, msp
->ms_allocatable
);
1811 range_tree_walk(msp
->ms_unflushed_frees
,
1812 range_tree_add
, msp
->ms_allocatable
);
1814 msp
->ms_loaded
= B_TRUE
;
1816 ASSERT3P(msp
->ms_group
, !=, NULL
);
1817 spa_t
*spa
= msp
->ms_group
->mg_vd
->vdev_spa
;
1818 if (spa_syncing_log_sm(spa
) != NULL
) {
1819 ASSERT(spa_feature_is_enabled(spa
,
1820 SPA_FEATURE_LOG_SPACEMAP
));
1823 * If we use a log space map we add all the segments
1824 * that are in ms_unflushed_frees so they are available
1827 * ms_allocatable needs to contain all free segments
1828 * that are ready for allocations (thus not segments
1829 * from ms_freeing, ms_freed, and the ms_defer trees).
1830 * But if we grab the lock in this code path at a sync
1831 * pass later that 1, then it also contains the
1832 * segments of ms_freed (they were added to it earlier
1833 * in this path through ms_unflushed_frees). So we
1834 * need to remove all the segments that exist in
1835 * ms_freed from ms_allocatable as they will be added
1836 * later in metaslab_sync_done().
1838 * When there's no log space map, the ms_allocatable
1839 * correctly doesn't contain any segments that exist
1840 * in ms_freed [see ms_synced_length].
1842 range_tree_walk(msp
->ms_freed
,
1843 range_tree_remove
, msp
->ms_allocatable
);
1847 * If we are not using the log space map, ms_allocatable
1848 * contains the segments that exist in the ms_defer trees
1849 * [see ms_synced_length]. Thus we need to remove them
1850 * from ms_allocatable as they will be added again in
1851 * metaslab_sync_done().
1853 * If we are using the log space map, ms_allocatable still
1854 * contains the segments that exist in the ms_defer trees.
1855 * Not because it read them through the ms_sm though. But
1856 * because these segments are part of ms_unflushed_frees
1857 * whose segments we add to ms_allocatable earlier in this
1860 for (int t
= 0; t
< TXG_DEFER_SIZE
; t
++) {
1861 range_tree_walk(msp
->ms_defer
[t
],
1862 range_tree_remove
, msp
->ms_allocatable
);
1866 * Call metaslab_recalculate_weight_and_sort() now that the
1867 * metaslab is loaded so we get the metaslab's real weight.
1869 * Unless this metaslab was created with older software and
1870 * has not yet been converted to use segment-based weight, we
1871 * expect the new weight to be better or equal to the weight
1872 * that the metaslab had while it was not loaded. This is
1873 * because the old weight does not take into account the
1874 * consolidation of adjacent segments between TXGs. [see
1875 * comment for ms_synchist and ms_deferhist[] for more info]
1877 uint64_t weight
= msp
->ms_weight
;
1878 metaslab_recalculate_weight_and_sort(msp
);
1879 if (!WEIGHT_IS_SPACEBASED(weight
))
1880 ASSERT3U(weight
, <=, msp
->ms_weight
);
1881 msp
->ms_max_size
= metaslab_block_maxsize(msp
);
1883 hrtime_t load_end
= gethrtime();
1884 if (zfs_flags
& ZFS_DEBUG_LOG_SPACEMAP
) {
1885 zfs_dbgmsg("loading: txg %llu, spa %s, vdev_id %llu, "
1886 "ms_id %llu, smp_length %llu, "
1887 "unflushed_allocs %llu, unflushed_frees %llu, "
1888 "freed %llu, defer %llu + %llu, "
1889 "loading_time %lld ms",
1890 spa_syncing_txg(spa
), spa_name(spa
),
1891 msp
->ms_group
->mg_vd
->vdev_id
, msp
->ms_id
,
1892 space_map_length(msp
->ms_sm
),
1893 range_tree_space(msp
->ms_unflushed_allocs
),
1894 range_tree_space(msp
->ms_unflushed_frees
),
1895 range_tree_space(msp
->ms_freed
),
1896 range_tree_space(msp
->ms_defer
[0]),
1897 range_tree_space(msp
->ms_defer
[1]),
1898 (longlong_t
)((load_end
- load_start
) / 1000000));
1901 metaslab_verify_space(msp
, spa_syncing_txg(spa
));
1902 mutex_exit(&msp
->ms_sync_lock
);
1907 metaslab_load(metaslab_t
*msp
)
1909 ASSERT(MUTEX_HELD(&msp
->ms_lock
));
1912 * There may be another thread loading the same metaslab, if that's
1913 * the case just wait until the other thread is done and return.
1915 metaslab_load_wait(msp
);
1918 VERIFY(!msp
->ms_loading
);
1919 ASSERT(!msp
->ms_condensing
);
1922 * We set the loading flag BEFORE potentially dropping the lock to
1923 * wait for an ongoing flush (see ms_flushing below). This way other
1924 * threads know that there is already a thread that is loading this
1927 msp
->ms_loading
= B_TRUE
;
1930 * Wait for any in-progress flushing to finish as we drop the ms_lock
1931 * both here (during space_map_load()) and in metaslab_flush() (when
1932 * we flush our changes to the ms_sm).
1934 if (msp
->ms_flushing
)
1935 metaslab_flush_wait(msp
);
1938 * In the possibility that we were waiting for the metaslab to be
1939 * flushed (where we temporarily dropped the ms_lock), ensure that
1940 * no one else loaded the metaslab somehow.
1942 ASSERT(!msp
->ms_loaded
);
1944 int error
= metaslab_load_impl(msp
);
1946 ASSERT(MUTEX_HELD(&msp
->ms_lock
));
1947 msp
->ms_loading
= B_FALSE
;
1948 cv_broadcast(&msp
->ms_load_cv
);
1954 metaslab_unload(metaslab_t
*msp
)
1956 ASSERT(MUTEX_HELD(&msp
->ms_lock
));
1958 metaslab_verify_weight_and_frag(msp
);
1960 range_tree_vacate(msp
->ms_allocatable
, NULL
, NULL
);
1961 msp
->ms_loaded
= B_FALSE
;
1963 msp
->ms_activation_weight
= 0;
1964 msp
->ms_weight
&= ~METASLAB_ACTIVE_MASK
;
1965 msp
->ms_max_size
= 0;
1968 * We explicitly recalculate the metaslab's weight based on its space
1969 * map (as it is now not loaded). We want unload metaslabs to always
1970 * have their weights calculated from the space map histograms, while
1971 * loaded ones have it calculated from their in-core range tree
1972 * [see metaslab_load()]. This way, the weight reflects the information
1973 * available in-core, whether it is loaded or not.
1975 * If ms_group == NULL means that we came here from metaslab_fini(),
1976 * at which point it doesn't make sense for us to do the recalculation
1979 if (msp
->ms_group
!= NULL
)
1980 metaslab_recalculate_weight_and_sort(msp
);
1984 metaslab_space_update(vdev_t
*vd
, metaslab_class_t
*mc
, int64_t alloc_delta
,
1985 int64_t defer_delta
, int64_t space_delta
)
1987 vdev_space_update(vd
, alloc_delta
, defer_delta
, space_delta
);
1989 ASSERT3P(vd
->vdev_spa
->spa_root_vdev
, ==, vd
->vdev_parent
);
1990 ASSERT(vd
->vdev_ms_count
!= 0);
1992 metaslab_class_space_update(mc
, alloc_delta
, defer_delta
, space_delta
,
1993 vdev_deflated_space(vd
, space_delta
));
1997 metaslab_init(metaslab_group_t
*mg
, uint64_t id
, uint64_t object
,
1998 uint64_t txg
, metaslab_t
**msp
)
2000 vdev_t
*vd
= mg
->mg_vd
;
2001 spa_t
*spa
= vd
->vdev_spa
;
2002 objset_t
*mos
= spa
->spa_meta_objset
;
2006 ms
= kmem_zalloc(sizeof (metaslab_t
), KM_SLEEP
);
2007 mutex_init(&ms
->ms_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
2008 mutex_init(&ms
->ms_sync_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
2009 cv_init(&ms
->ms_load_cv
, NULL
, CV_DEFAULT
, NULL
);
2010 cv_init(&ms
->ms_flush_cv
, NULL
, CV_DEFAULT
, NULL
);
2013 ms
->ms_start
= id
<< vd
->vdev_ms_shift
;
2014 ms
->ms_size
= 1ULL << vd
->vdev_ms_shift
;
2015 ms
->ms_allocator
= -1;
2016 ms
->ms_new
= B_TRUE
;
2019 * We only open space map objects that already exist. All others
2020 * will be opened when we finally allocate an object for it.
2023 * When called from vdev_expand(), we can't call into the DMU as
2024 * we are holding the spa_config_lock as a writer and we would
2025 * deadlock [see relevant comment in vdev_metaslab_init()]. in
2026 * that case, the object parameter is zero though, so we won't
2027 * call into the DMU.
2030 error
= space_map_open(&ms
->ms_sm
, mos
, object
, ms
->ms_start
,
2031 ms
->ms_size
, vd
->vdev_ashift
);
2034 kmem_free(ms
, sizeof (metaslab_t
));
2038 ASSERT(ms
->ms_sm
!= NULL
);
2039 ms
->ms_allocated_space
= space_map_allocated(ms
->ms_sm
);
2043 * We create the ms_allocatable here, but we don't create the
2044 * other range trees until metaslab_sync_done(). This serves
2045 * two purposes: it allows metaslab_sync_done() to detect the
2046 * addition of new space; and for debugging, it ensures that
2047 * we'd data fault on any attempt to use this metaslab before
2050 ms
->ms_allocatable
= range_tree_create_impl(&rt_avl_ops
,
2051 &ms
->ms_allocatable_by_size
, metaslab_rangesize_compare
, 0);
2053 ms
->ms_trim
= range_tree_create(NULL
, NULL
);
2055 metaslab_group_add(mg
, ms
);
2056 metaslab_set_fragmentation(ms
);
2059 * If we're opening an existing pool (txg == 0) or creating
2060 * a new one (txg == TXG_INITIAL), all space is available now.
2061 * If we're adding space to an existing pool, the new space
2062 * does not become available until after this txg has synced.
2063 * The metaslab's weight will also be initialized when we sync
2064 * out this txg. This ensures that we don't attempt to allocate
2065 * from it before we have initialized it completely.
2067 if (txg
<= TXG_INITIAL
) {
2068 metaslab_sync_done(ms
, 0);
2069 metaslab_space_update(vd
, mg
->mg_class
,
2070 metaslab_allocated_space(ms
), 0, 0);
2074 vdev_dirty(vd
, 0, NULL
, txg
);
2075 vdev_dirty(vd
, VDD_METASLAB
, ms
, txg
);
2084 metaslab_fini_flush_data(metaslab_t
*msp
)
2086 spa_t
*spa
= msp
->ms_group
->mg_vd
->vdev_spa
;
2088 if (metaslab_unflushed_txg(msp
) == 0) {
2089 ASSERT3P(avl_find(&spa
->spa_metaslabs_by_flushed
, msp
, NULL
),
2093 ASSERT(spa_feature_is_active(spa
, SPA_FEATURE_LOG_SPACEMAP
));
2095 mutex_enter(&spa
->spa_flushed_ms_lock
);
2096 avl_remove(&spa
->spa_metaslabs_by_flushed
, msp
);
2097 mutex_exit(&spa
->spa_flushed_ms_lock
);
2099 spa_log_sm_decrement_mscount(spa
, metaslab_unflushed_txg(msp
));
2100 spa_log_summary_decrement_mscount(spa
, metaslab_unflushed_txg(msp
));
2104 metaslab_unflushed_changes_memused(metaslab_t
*ms
)
2106 return ((range_tree_numsegs(ms
->ms_unflushed_allocs
) +
2107 range_tree_numsegs(ms
->ms_unflushed_frees
)) *
2108 sizeof (range_seg_t
));
2112 metaslab_fini(metaslab_t
*msp
)
2114 metaslab_group_t
*mg
= msp
->ms_group
;
2115 vdev_t
*vd
= mg
->mg_vd
;
2116 spa_t
*spa
= vd
->vdev_spa
;
2118 metaslab_fini_flush_data(msp
);
2120 metaslab_group_remove(mg
, msp
);
2122 mutex_enter(&msp
->ms_lock
);
2123 VERIFY(msp
->ms_group
== NULL
);
2124 metaslab_space_update(vd
, mg
->mg_class
,
2125 -metaslab_allocated_space(msp
), 0, -msp
->ms_size
);
2127 space_map_close(msp
->ms_sm
);
2130 metaslab_unload(msp
);
2131 range_tree_destroy(msp
->ms_allocatable
);
2132 range_tree_destroy(msp
->ms_freeing
);
2133 range_tree_destroy(msp
->ms_freed
);
2135 ASSERT3U(spa
->spa_unflushed_stats
.sus_memused
, >=,
2136 metaslab_unflushed_changes_memused(msp
));
2137 spa
->spa_unflushed_stats
.sus_memused
-=
2138 metaslab_unflushed_changes_memused(msp
);
2139 range_tree_vacate(msp
->ms_unflushed_allocs
, NULL
, NULL
);
2140 range_tree_destroy(msp
->ms_unflushed_allocs
);
2141 range_tree_vacate(msp
->ms_unflushed_frees
, NULL
, NULL
);
2142 range_tree_destroy(msp
->ms_unflushed_frees
);
2144 for (int t
= 0; t
< TXG_SIZE
; t
++) {
2145 range_tree_destroy(msp
->ms_allocating
[t
]);
2148 for (int t
= 0; t
< TXG_DEFER_SIZE
; t
++) {
2149 range_tree_destroy(msp
->ms_defer
[t
]);
2151 ASSERT0(msp
->ms_deferspace
);
2153 range_tree_destroy(msp
->ms_checkpointing
);
2155 for (int t
= 0; t
< TXG_SIZE
; t
++)
2156 ASSERT(!txg_list_member(&vd
->vdev_ms_list
, msp
, t
));
2158 range_tree_vacate(msp
->ms_trim
, NULL
, NULL
);
2159 range_tree_destroy(msp
->ms_trim
);
2161 mutex_exit(&msp
->ms_lock
);
2162 cv_destroy(&msp
->ms_load_cv
);
2163 cv_destroy(&msp
->ms_flush_cv
);
2164 mutex_destroy(&msp
->ms_lock
);
2165 mutex_destroy(&msp
->ms_sync_lock
);
2166 ASSERT3U(msp
->ms_allocator
, ==, -1);
2168 kmem_free(msp
, sizeof (metaslab_t
));
2171 #define FRAGMENTATION_TABLE_SIZE 17
2174 * This table defines a segment size based fragmentation metric that will
2175 * allow each metaslab to derive its own fragmentation value. This is done
2176 * by calculating the space in each bucket of the spacemap histogram and
2177 * multiplying that by the fragmentation metric in this table. Doing
2178 * this for all buckets and dividing it by the total amount of free
2179 * space in this metaslab (i.e. the total free space in all buckets) gives
2180 * us the fragmentation metric. This means that a high fragmentation metric
2181 * equates to most of the free space being comprised of small segments.
2182 * Conversely, if the metric is low, then most of the free space is in
2183 * large segments. A 10% change in fragmentation equates to approximately
2184 * double the number of segments.
2186 * This table defines 0% fragmented space using 16MB segments. Testing has
2187 * shown that segments that are greater than or equal to 16MB do not suffer
2188 * from drastic performance problems. Using this value, we derive the rest
2189 * of the table. Since the fragmentation value is never stored on disk, it
2190 * is possible to change these calculations in the future.
2192 int zfs_frag_table
[FRAGMENTATION_TABLE_SIZE
] = {
2212 * Calculate the metaslab's fragmentation metric and set ms_fragmentation.
2213 * Setting this value to ZFS_FRAG_INVALID means that the metaslab has not
2214 * been upgraded and does not support this metric. Otherwise, the return
2215 * value should be in the range [0, 100].
2218 metaslab_set_fragmentation(metaslab_t
*msp
)
2220 spa_t
*spa
= msp
->ms_group
->mg_vd
->vdev_spa
;
2221 uint64_t fragmentation
= 0;
2223 boolean_t feature_enabled
= spa_feature_is_enabled(spa
,
2224 SPA_FEATURE_SPACEMAP_HISTOGRAM
);
2226 if (!feature_enabled
) {
2227 msp
->ms_fragmentation
= ZFS_FRAG_INVALID
;
2232 * A null space map means that the entire metaslab is free
2233 * and thus is not fragmented.
2235 if (msp
->ms_sm
== NULL
) {
2236 msp
->ms_fragmentation
= 0;
2241 * If this metaslab's space map has not been upgraded, flag it
2242 * so that we upgrade next time we encounter it.
2244 if (msp
->ms_sm
->sm_dbuf
->db_size
!= sizeof (space_map_phys_t
)) {
2245 uint64_t txg
= spa_syncing_txg(spa
);
2246 vdev_t
*vd
= msp
->ms_group
->mg_vd
;
2249 * If we've reached the final dirty txg, then we must
2250 * be shutting down the pool. We don't want to dirty
2251 * any data past this point so skip setting the condense
2252 * flag. We can retry this action the next time the pool
2255 if (spa_writeable(spa
) && txg
< spa_final_dirty_txg(spa
)) {
2256 msp
->ms_condense_wanted
= B_TRUE
;
2257 vdev_dirty(vd
, VDD_METASLAB
, msp
, txg
+ 1);
2258 zfs_dbgmsg("txg %llu, requesting force condense: "
2259 "ms_id %llu, vdev_id %llu", txg
, msp
->ms_id
,
2262 msp
->ms_fragmentation
= ZFS_FRAG_INVALID
;
2266 for (int i
= 0; i
< SPACE_MAP_HISTOGRAM_SIZE
; i
++) {
2268 uint8_t shift
= msp
->ms_sm
->sm_shift
;
2270 int idx
= MIN(shift
- SPA_MINBLOCKSHIFT
+ i
,
2271 FRAGMENTATION_TABLE_SIZE
- 1);
2273 if (msp
->ms_sm
->sm_phys
->smp_histogram
[i
] == 0)
2276 space
= msp
->ms_sm
->sm_phys
->smp_histogram
[i
] << (i
+ shift
);
2279 ASSERT3U(idx
, <, FRAGMENTATION_TABLE_SIZE
);
2280 fragmentation
+= space
* zfs_frag_table
[idx
];
2284 fragmentation
/= total
;
2285 ASSERT3U(fragmentation
, <=, 100);
2287 msp
->ms_fragmentation
= fragmentation
;
2291 * Compute a weight -- a selection preference value -- for the given metaslab.
2292 * This is based on the amount of free space, the level of fragmentation,
2293 * the LBA range, and whether the metaslab is loaded.
2296 metaslab_space_weight(metaslab_t
*msp
)
2298 metaslab_group_t
*mg
= msp
->ms_group
;
2299 vdev_t
*vd
= mg
->mg_vd
;
2300 uint64_t weight
, space
;
2302 ASSERT(MUTEX_HELD(&msp
->ms_lock
));
2303 ASSERT(!vd
->vdev_removing
);
2306 * The baseline weight is the metaslab's free space.
2308 space
= msp
->ms_size
- metaslab_allocated_space(msp
);
2310 if (metaslab_fragmentation_factor_enabled
&&
2311 msp
->ms_fragmentation
!= ZFS_FRAG_INVALID
) {
2313 * Use the fragmentation information to inversely scale
2314 * down the baseline weight. We need to ensure that we
2315 * don't exclude this metaslab completely when it's 100%
2316 * fragmented. To avoid this we reduce the fragmented value
2319 space
= (space
* (100 - (msp
->ms_fragmentation
- 1))) / 100;
2322 * If space < SPA_MINBLOCKSIZE, then we will not allocate from
2323 * this metaslab again. The fragmentation metric may have
2324 * decreased the space to something smaller than
2325 * SPA_MINBLOCKSIZE, so reset the space to SPA_MINBLOCKSIZE
2326 * so that we can consume any remaining space.
2328 if (space
> 0 && space
< SPA_MINBLOCKSIZE
)
2329 space
= SPA_MINBLOCKSIZE
;
2334 * Modern disks have uniform bit density and constant angular velocity.
2335 * Therefore, the outer recording zones are faster (higher bandwidth)
2336 * than the inner zones by the ratio of outer to inner track diameter,
2337 * which is typically around 2:1. We account for this by assigning
2338 * higher weight to lower metaslabs (multiplier ranging from 2x to 1x).
2339 * In effect, this means that we'll select the metaslab with the most
2340 * free bandwidth rather than simply the one with the most free space.
2342 if (!vd
->vdev_nonrot
&& metaslab_lba_weighting_enabled
) {
2343 weight
= 2 * weight
- (msp
->ms_id
* weight
) / vd
->vdev_ms_count
;
2344 ASSERT(weight
>= space
&& weight
<= 2 * space
);
2348 * If this metaslab is one we're actively using, adjust its
2349 * weight to make it preferable to any inactive metaslab so
2350 * we'll polish it off. If the fragmentation on this metaslab
2351 * has exceed our threshold, then don't mark it active.
2353 if (msp
->ms_loaded
&& msp
->ms_fragmentation
!= ZFS_FRAG_INVALID
&&
2354 msp
->ms_fragmentation
<= zfs_metaslab_fragmentation_threshold
) {
2355 weight
|= (msp
->ms_weight
& METASLAB_ACTIVE_MASK
);
2358 WEIGHT_SET_SPACEBASED(weight
);
2363 * Return the weight of the specified metaslab, according to the segment-based
2364 * weighting algorithm. The metaslab must be loaded. This function can
2365 * be called within a sync pass since it relies only on the metaslab's
2366 * range tree which is always accurate when the metaslab is loaded.
2369 metaslab_weight_from_range_tree(metaslab_t
*msp
)
2371 uint64_t weight
= 0;
2372 uint32_t segments
= 0;
2374 ASSERT(msp
->ms_loaded
);
2376 for (int i
= RANGE_TREE_HISTOGRAM_SIZE
- 1; i
>= SPA_MINBLOCKSHIFT
;
2378 uint8_t shift
= msp
->ms_group
->mg_vd
->vdev_ashift
;
2379 int max_idx
= SPACE_MAP_HISTOGRAM_SIZE
+ shift
- 1;
2382 segments
+= msp
->ms_allocatable
->rt_histogram
[i
];
2385 * The range tree provides more precision than the space map
2386 * and must be downgraded so that all values fit within the
2387 * space map's histogram. This allows us to compare loaded
2388 * vs. unloaded metaslabs to determine which metaslab is
2389 * considered "best".
2394 if (segments
!= 0) {
2395 WEIGHT_SET_COUNT(weight
, segments
);
2396 WEIGHT_SET_INDEX(weight
, i
);
2397 WEIGHT_SET_ACTIVE(weight
, 0);
2405 * Calculate the weight based on the on-disk histogram. Should be applied
2406 * only to unloaded metaslabs (i.e no incoming allocations) in-order to
2407 * give results consistent with the on-disk state
2410 metaslab_weight_from_spacemap(metaslab_t
*msp
)
2412 space_map_t
*sm
= msp
->ms_sm
;
2413 ASSERT(!msp
->ms_loaded
);
2415 ASSERT3U(space_map_object(sm
), !=, 0);
2416 ASSERT3U(sm
->sm_dbuf
->db_size
, ==, sizeof (space_map_phys_t
));
2419 * Create a joint histogram from all the segments that have made
2420 * it to the metaslab's space map histogram, that are not yet
2421 * available for allocation because they are still in the freeing
2422 * pipeline (e.g. freeing, freed, and defer trees). Then subtract
2423 * these segments from the space map's histogram to get a more
2426 uint64_t deferspace_histogram
[SPACE_MAP_HISTOGRAM_SIZE
] = {0};
2427 for (int i
= 0; i
< SPACE_MAP_HISTOGRAM_SIZE
; i
++)
2428 deferspace_histogram
[i
] += msp
->ms_synchist
[i
];
2429 for (int t
= 0; t
< TXG_DEFER_SIZE
; t
++) {
2430 for (int i
= 0; i
< SPACE_MAP_HISTOGRAM_SIZE
; i
++) {
2431 deferspace_histogram
[i
] += msp
->ms_deferhist
[t
][i
];
2435 uint64_t weight
= 0;
2436 for (int i
= SPACE_MAP_HISTOGRAM_SIZE
- 1; i
>= 0; i
--) {
2437 ASSERT3U(sm
->sm_phys
->smp_histogram
[i
], >=,
2438 deferspace_histogram
[i
]);
2440 sm
->sm_phys
->smp_histogram
[i
] - deferspace_histogram
[i
];
2442 WEIGHT_SET_COUNT(weight
, count
);
2443 WEIGHT_SET_INDEX(weight
, i
+ sm
->sm_shift
);
2444 WEIGHT_SET_ACTIVE(weight
, 0);
2452 * Compute a segment-based weight for the specified metaslab. The weight
2453 * is determined by highest bucket in the histogram. The information
2454 * for the highest bucket is encoded into the weight value.
2457 metaslab_segment_weight(metaslab_t
*msp
)
2459 metaslab_group_t
*mg
= msp
->ms_group
;
2460 uint64_t weight
= 0;
2461 uint8_t shift
= mg
->mg_vd
->vdev_ashift
;
2463 ASSERT(MUTEX_HELD(&msp
->ms_lock
));
2466 * The metaslab is completely free.
2468 if (metaslab_allocated_space(msp
) == 0) {
2469 int idx
= highbit64(msp
->ms_size
) - 1;
2470 int max_idx
= SPACE_MAP_HISTOGRAM_SIZE
+ shift
- 1;
2472 if (idx
< max_idx
) {
2473 WEIGHT_SET_COUNT(weight
, 1ULL);
2474 WEIGHT_SET_INDEX(weight
, idx
);
2476 WEIGHT_SET_COUNT(weight
, 1ULL << (idx
- max_idx
));
2477 WEIGHT_SET_INDEX(weight
, max_idx
);
2479 WEIGHT_SET_ACTIVE(weight
, 0);
2480 ASSERT(!WEIGHT_IS_SPACEBASED(weight
));
2484 ASSERT3U(msp
->ms_sm
->sm_dbuf
->db_size
, ==, sizeof (space_map_phys_t
));
2487 * If the metaslab is fully allocated then just make the weight 0.
2489 if (metaslab_allocated_space(msp
) == msp
->ms_size
)
2492 * If the metaslab is already loaded, then use the range tree to
2493 * determine the weight. Otherwise, we rely on the space map information
2494 * to generate the weight.
2496 if (msp
->ms_loaded
) {
2497 weight
= metaslab_weight_from_range_tree(msp
);
2499 weight
= metaslab_weight_from_spacemap(msp
);
2503 * If the metaslab was active the last time we calculated its weight
2504 * then keep it active. We want to consume the entire region that
2505 * is associated with this weight.
2507 if (msp
->ms_activation_weight
!= 0 && weight
!= 0)
2508 WEIGHT_SET_ACTIVE(weight
, WEIGHT_GET_ACTIVE(msp
->ms_weight
));
2513 * Determine if we should attempt to allocate from this metaslab. If the
2514 * metaslab is loaded, then we can determine if the desired allocation
2515 * can be satisfied by looking at the size of the maximum free segment
2516 * on that metaslab. Otherwise, we make our decision based on the metaslab's
2517 * weight. For segment-based weighting we can determine the maximum
2518 * allocation based on the index encoded in its value. For space-based
2519 * weights we rely on the entire weight (excluding the weight-type bit).
2522 metaslab_should_allocate(metaslab_t
*msp
, uint64_t asize
)
2524 if (msp
->ms_loaded
) {
2525 return (msp
->ms_max_size
>= asize
);
2527 ASSERT0(msp
->ms_max_size
);
2530 boolean_t should_allocate
;
2531 if (!WEIGHT_IS_SPACEBASED(msp
->ms_weight
)) {
2533 * The metaslab segment weight indicates segments in the
2534 * range [2^i, 2^(i+1)), where i is the index in the weight.
2535 * Since the asize might be in the middle of the range, we
2536 * should attempt the allocation if asize < 2^(i+1).
2538 should_allocate
= (asize
<
2539 1ULL << (WEIGHT_GET_INDEX(msp
->ms_weight
) + 1));
2541 should_allocate
= (asize
<=
2542 (msp
->ms_weight
& ~METASLAB_WEIGHT_TYPE
));
2545 return (should_allocate
);
2548 metaslab_weight(metaslab_t
*msp
)
2550 vdev_t
*vd
= msp
->ms_group
->mg_vd
;
2551 spa_t
*spa
= vd
->vdev_spa
;
2554 ASSERT(MUTEX_HELD(&msp
->ms_lock
));
2557 * If this vdev is in the process of being removed, there is nothing
2558 * for us to do here.
2560 if (vd
->vdev_removing
)
2563 metaslab_set_fragmentation(msp
);
2566 * Update the maximum size if the metaslab is loaded. This will
2567 * ensure that we get an accurate maximum size if newly freed space
2568 * has been added back into the free tree.
2571 msp
->ms_max_size
= metaslab_block_maxsize(msp
);
2573 ASSERT0(msp
->ms_max_size
);
2576 * Segment-based weighting requires space map histogram support.
2578 if (zfs_metaslab_segment_weight_enabled
&&
2579 spa_feature_is_enabled(spa
, SPA_FEATURE_SPACEMAP_HISTOGRAM
) &&
2580 (msp
->ms_sm
== NULL
|| msp
->ms_sm
->sm_dbuf
->db_size
==
2581 sizeof (space_map_phys_t
))) {
2582 weight
= metaslab_segment_weight(msp
);
2584 weight
= metaslab_space_weight(msp
);
2590 metaslab_recalculate_weight_and_sort(metaslab_t
*msp
)
2592 ASSERT(MUTEX_HELD(&msp
->ms_lock
));
2594 /* note: we preserve the mask (e.g. indication of primary, etc..) */
2595 uint64_t was_active
= msp
->ms_weight
& METASLAB_ACTIVE_MASK
;
2596 metaslab_group_sort(msp
->ms_group
, msp
,
2597 metaslab_weight(msp
) | was_active
);
2601 metaslab_activate_allocator(metaslab_group_t
*mg
, metaslab_t
*msp
,
2602 int allocator
, uint64_t activation_weight
)
2604 ASSERT(MUTEX_HELD(&msp
->ms_lock
));
2607 * If we're activating for the claim code, we don't want to actually
2608 * set the metaslab up for a specific allocator.
2610 if (activation_weight
== METASLAB_WEIGHT_CLAIM
)
2613 metaslab_t
**arr
= (activation_weight
== METASLAB_WEIGHT_PRIMARY
?
2614 mg
->mg_primaries
: mg
->mg_secondaries
);
2616 mutex_enter(&mg
->mg_lock
);
2617 if (arr
[allocator
] != NULL
) {
2618 mutex_exit(&mg
->mg_lock
);
2622 arr
[allocator
] = msp
;
2623 ASSERT3S(msp
->ms_allocator
, ==, -1);
2624 msp
->ms_allocator
= allocator
;
2625 msp
->ms_primary
= (activation_weight
== METASLAB_WEIGHT_PRIMARY
);
2626 mutex_exit(&mg
->mg_lock
);
2632 metaslab_activate(metaslab_t
*msp
, int allocator
, uint64_t activation_weight
)
2634 ASSERT(MUTEX_HELD(&msp
->ms_lock
));
2637 * The current metaslab is already activated for us so there
2638 * is nothing to do. Already activated though, doesn't mean
2639 * that this metaslab is activated for our allocator nor our
2640 * requested activation weight. The metaslab could have started
2641 * as an active one for our allocator but changed allocators
2642 * while we were waiting to grab its ms_lock or we stole it
2643 * [see find_valid_metaslab()]. This means that there is a
2644 * possibility of passivating a metaslab of another allocator
2645 * or from a different activation mask, from this thread.
2647 if ((msp
->ms_weight
& METASLAB_ACTIVE_MASK
) != 0) {
2648 ASSERT(msp
->ms_loaded
);
2652 int error
= metaslab_load(msp
);
2654 metaslab_group_sort(msp
->ms_group
, msp
, 0);
2659 * When entering metaslab_load() we may have dropped the
2660 * ms_lock because we were loading this metaslab, or we
2661 * were waiting for another thread to load it for us. In
2662 * that scenario, we recheck the weight of the metaslab
2663 * to see if it was activated by another thread.
2665 * If the metaslab was activated for another allocator or
2666 * it was activated with a different activation weight (e.g.
2667 * we wanted to make it a primary but it was activated as
2668 * secondary) we return error (EBUSY).
2670 * If the metaslab was activated for the same allocator
2671 * and requested activation mask, skip activating it.
2673 if ((msp
->ms_weight
& METASLAB_ACTIVE_MASK
) != 0) {
2674 if (msp
->ms_allocator
!= allocator
)
2677 if ((msp
->ms_weight
& activation_weight
) == 0)
2678 return (SET_ERROR(EBUSY
));
2680 EQUIV((activation_weight
== METASLAB_WEIGHT_PRIMARY
),
2686 * If the metaslab has literally 0 space, it will have weight 0. In
2687 * that case, don't bother activating it. This can happen if the
2688 * metaslab had space during find_valid_metaslab, but another thread
2689 * loaded it and used all that space while we were waiting to grab the
2692 if (msp
->ms_weight
== 0) {
2693 ASSERT0(range_tree_space(msp
->ms_allocatable
));
2694 return (SET_ERROR(ENOSPC
));
2697 if ((error
= metaslab_activate_allocator(msp
->ms_group
, msp
,
2698 allocator
, activation_weight
)) != 0) {
2702 ASSERT0(msp
->ms_activation_weight
);
2703 msp
->ms_activation_weight
= msp
->ms_weight
;
2704 metaslab_group_sort(msp
->ms_group
, msp
,
2705 msp
->ms_weight
| activation_weight
);
2707 ASSERT(msp
->ms_loaded
);
2708 ASSERT(msp
->ms_weight
& METASLAB_ACTIVE_MASK
);
2714 metaslab_passivate_allocator(metaslab_group_t
*mg
, metaslab_t
*msp
,
2717 ASSERT(MUTEX_HELD(&msp
->ms_lock
));
2718 ASSERT(msp
->ms_loaded
);
2720 if (msp
->ms_weight
& METASLAB_WEIGHT_CLAIM
) {
2721 metaslab_group_sort(mg
, msp
, weight
);
2725 mutex_enter(&mg
->mg_lock
);
2726 ASSERT3P(msp
->ms_group
, ==, mg
);
2727 ASSERT3S(0, <=, msp
->ms_allocator
);
2728 ASSERT3U(msp
->ms_allocator
, <, mg
->mg_allocators
);
2730 if (msp
->ms_primary
) {
2731 ASSERT3P(mg
->mg_primaries
[msp
->ms_allocator
], ==, msp
);
2732 ASSERT(msp
->ms_weight
& METASLAB_WEIGHT_PRIMARY
);
2733 mg
->mg_primaries
[msp
->ms_allocator
] = NULL
;
2735 ASSERT3P(mg
->mg_secondaries
[msp
->ms_allocator
], ==, msp
);
2736 ASSERT(msp
->ms_weight
& METASLAB_WEIGHT_SECONDARY
);
2737 mg
->mg_secondaries
[msp
->ms_allocator
] = NULL
;
2739 msp
->ms_allocator
= -1;
2740 metaslab_group_sort_impl(mg
, msp
, weight
);
2741 mutex_exit(&mg
->mg_lock
);
2745 metaslab_passivate(metaslab_t
*msp
, uint64_t weight
)
2747 ASSERTV(uint64_t size
= weight
& ~METASLAB_WEIGHT_TYPE
);
2750 * If size < SPA_MINBLOCKSIZE, then we will not allocate from
2751 * this metaslab again. In that case, it had better be empty,
2752 * or we would be leaving space on the table.
2754 ASSERT(!WEIGHT_IS_SPACEBASED(msp
->ms_weight
) ||
2755 size
>= SPA_MINBLOCKSIZE
||
2756 range_tree_space(msp
->ms_allocatable
) == 0);
2757 ASSERT0(weight
& METASLAB_ACTIVE_MASK
);
2759 ASSERT(msp
->ms_activation_weight
!= 0);
2760 msp
->ms_activation_weight
= 0;
2761 metaslab_passivate_allocator(msp
->ms_group
, msp
, weight
);
2762 ASSERT0(msp
->ms_weight
& METASLAB_ACTIVE_MASK
);
2766 * Segment-based metaslabs are activated once and remain active until
2767 * we either fail an allocation attempt (similar to space-based metaslabs)
2768 * or have exhausted the free space in zfs_metaslab_switch_threshold
2769 * buckets since the metaslab was activated. This function checks to see
2770 * if we've exhaused the zfs_metaslab_switch_threshold buckets in the
2771 * metaslab and passivates it proactively. This will allow us to select a
2772 * metaslab with a larger contiguous region, if any, remaining within this
2773 * metaslab group. If we're in sync pass > 1, then we continue using this
2774 * metaslab so that we don't dirty more block and cause more sync passes.
2777 metaslab_segment_may_passivate(metaslab_t
*msp
)
2779 spa_t
*spa
= msp
->ms_group
->mg_vd
->vdev_spa
;
2781 if (WEIGHT_IS_SPACEBASED(msp
->ms_weight
) || spa_sync_pass(spa
) > 1)
2785 * Since we are in the middle of a sync pass, the most accurate
2786 * information that is accessible to us is the in-core range tree
2787 * histogram; calculate the new weight based on that information.
2789 uint64_t weight
= metaslab_weight_from_range_tree(msp
);
2790 int activation_idx
= WEIGHT_GET_INDEX(msp
->ms_activation_weight
);
2791 int current_idx
= WEIGHT_GET_INDEX(weight
);
2793 if (current_idx
<= activation_idx
- zfs_metaslab_switch_threshold
)
2794 metaslab_passivate(msp
, weight
);
2798 metaslab_preload(void *arg
)
2800 metaslab_t
*msp
= arg
;
2801 spa_t
*spa
= msp
->ms_group
->mg_vd
->vdev_spa
;
2802 fstrans_cookie_t cookie
= spl_fstrans_mark();
2804 ASSERT(!MUTEX_HELD(&msp
->ms_group
->mg_lock
));
2806 mutex_enter(&msp
->ms_lock
);
2807 (void) metaslab_load(msp
);
2808 msp
->ms_selected_txg
= spa_syncing_txg(spa
);
2809 mutex_exit(&msp
->ms_lock
);
2810 spl_fstrans_unmark(cookie
);
2814 metaslab_group_preload(metaslab_group_t
*mg
)
2816 spa_t
*spa
= mg
->mg_vd
->vdev_spa
;
2818 avl_tree_t
*t
= &mg
->mg_metaslab_tree
;
2821 if (spa_shutting_down(spa
) || !metaslab_preload_enabled
) {
2822 taskq_wait_outstanding(mg
->mg_taskq
, 0);
2826 mutex_enter(&mg
->mg_lock
);
2829 * Load the next potential metaslabs
2831 for (msp
= avl_first(t
); msp
!= NULL
; msp
= AVL_NEXT(t
, msp
)) {
2832 ASSERT3P(msp
->ms_group
, ==, mg
);
2835 * We preload only the maximum number of metaslabs specified
2836 * by metaslab_preload_limit. If a metaslab is being forced
2837 * to condense then we preload it too. This will ensure
2838 * that force condensing happens in the next txg.
2840 if (++m
> metaslab_preload_limit
&& !msp
->ms_condense_wanted
) {
2844 VERIFY(taskq_dispatch(mg
->mg_taskq
, metaslab_preload
,
2845 msp
, TQ_SLEEP
) != TASKQID_INVALID
);
2847 mutex_exit(&mg
->mg_lock
);
2851 * Determine if the space map's on-disk footprint is past our tolerance for
2852 * inefficiency. We would like to use the following criteria to make our
2855 * 1. Do not condense if the size of the space map object would dramatically
2856 * increase as a result of writing out the free space range tree.
2858 * 2. Condense if the on on-disk space map representation is at least
2859 * zfs_condense_pct/100 times the size of the optimal representation
2860 * (i.e. zfs_condense_pct = 110 and in-core = 1MB, optimal = 1.1MB).
2862 * 3. Do not condense if the on-disk size of the space map does not actually
2865 * Unfortunately, we cannot compute the on-disk size of the space map in this
2866 * context because we cannot accurately compute the effects of compression, etc.
2867 * Instead, we apply the heuristic described in the block comment for
2868 * zfs_metaslab_condense_block_threshold - we only condense if the space used
2869 * is greater than a threshold number of blocks.
2872 metaslab_should_condense(metaslab_t
*msp
)
2874 space_map_t
*sm
= msp
->ms_sm
;
2875 vdev_t
*vd
= msp
->ms_group
->mg_vd
;
2876 uint64_t vdev_blocksize
= 1 << vd
->vdev_ashift
;
2878 ASSERT(MUTEX_HELD(&msp
->ms_lock
));
2879 ASSERT(msp
->ms_loaded
);
2881 ASSERT3U(spa_sync_pass(vd
->vdev_spa
), ==, 1);
2884 * We always condense metaslabs that are empty and metaslabs for
2885 * which a condense request has been made.
2887 if (avl_is_empty(&msp
->ms_allocatable_by_size
) ||
2888 msp
->ms_condense_wanted
)
2891 uint64_t record_size
= MAX(sm
->sm_blksz
, vdev_blocksize
);
2892 uint64_t object_size
= space_map_length(sm
);
2893 uint64_t optimal_size
= space_map_estimate_optimal_size(sm
,
2894 msp
->ms_allocatable
, SM_NO_VDEVID
);
2896 return (object_size
>= (optimal_size
* zfs_condense_pct
/ 100) &&
2897 object_size
> zfs_metaslab_condense_block_threshold
* record_size
);
2901 * Condense the on-disk space map representation to its minimized form.
2902 * The minimized form consists of a small number of allocations followed
2903 * by the entries of the free range tree (ms_allocatable). The condensed
2904 * spacemap contains all the entries of previous TXGs (including those in
2905 * the pool-wide log spacemaps; thus this is effectively a superset of
2906 * metaslab_flush()), but this TXG's entries still need to be written.
2909 metaslab_condense(metaslab_t
*msp
, dmu_tx_t
*tx
)
2911 range_tree_t
*condense_tree
;
2912 space_map_t
*sm
= msp
->ms_sm
;
2913 uint64_t txg
= dmu_tx_get_txg(tx
);
2914 spa_t
*spa
= msp
->ms_group
->mg_vd
->vdev_spa
;
2916 ASSERT(MUTEX_HELD(&msp
->ms_lock
));
2917 ASSERT(msp
->ms_loaded
);
2918 ASSERT(msp
->ms_sm
!= NULL
);
2921 * In order to condense the space map, we need to change it so it
2922 * only describes which segments are currently allocated and free.
2924 * All the current free space resides in the ms_allocatable, all
2925 * the ms_defer trees, and all the ms_allocating trees. We ignore
2926 * ms_freed because it is empty because we're in sync pass 1. We
2927 * ignore ms_freeing because these changes are not yet reflected
2928 * in the spacemap (they will be written later this txg).
2930 * So to truncate the space map to represent all the entries of
2931 * previous TXGs we do the following:
2933 * 1] We create a range tree (condense tree) that is 100% allocated.
2934 * 2] We remove from it all segments found in the ms_defer trees
2935 * as those segments are marked as free in the original space
2936 * map. We do the same with the ms_allocating trees for the same
2937 * reason. Removing these segments should be a relatively
2938 * inexpensive operation since we expect these trees to have a
2939 * small number of nodes.
2940 * 3] We vacate any unflushed allocs as they should already exist
2941 * in the condense tree. Then we vacate any unflushed frees as
2942 * they should already be part of ms_allocatable.
2943 * 4] At this point, we would ideally like to remove all segments
2944 * in the ms_allocatable tree from the condense tree. This way
2945 * we would write all the entries of the condense tree as the
2946 * condensed space map, which would only contain allocated
2947 * segments with everything else assumed to be freed.
2949 * Doing so can be prohibitively expensive as ms_allocatable can
2950 * be large, and therefore computationally expensive to subtract
2951 * from the condense_tree. Instead we first sync out the
2952 * condense_tree and then the ms_allocatable, in the condensed
2953 * space map. While this is not optimal, it is typically close to
2954 * optimal and more importantly much cheaper to compute.
2956 * 5] Finally, as both of the unflushed trees were written to our
2957 * new and condensed metaslab space map, we basically flushed
2958 * all the unflushed changes to disk, thus we call
2959 * metaslab_flush_update().
2961 ASSERT3U(spa_sync_pass(spa
), ==, 1);
2962 ASSERT(range_tree_is_empty(msp
->ms_freed
)); /* since it is pass 1 */
2964 zfs_dbgmsg("condensing: txg %llu, msp[%llu] %px, vdev id %llu, "
2965 "spa %s, smp size %llu, segments %lu, forcing condense=%s", txg
,
2966 msp
->ms_id
, msp
, msp
->ms_group
->mg_vd
->vdev_id
,
2967 spa
->spa_name
, space_map_length(msp
->ms_sm
),
2968 avl_numnodes(&msp
->ms_allocatable
->rt_root
),
2969 msp
->ms_condense_wanted
? "TRUE" : "FALSE");
2971 msp
->ms_condense_wanted
= B_FALSE
;
2973 condense_tree
= range_tree_create(NULL
, NULL
);
2974 range_tree_add(condense_tree
, msp
->ms_start
, msp
->ms_size
);
2976 for (int t
= 0; t
< TXG_DEFER_SIZE
; t
++) {
2977 range_tree_walk(msp
->ms_defer
[t
],
2978 range_tree_remove
, condense_tree
);
2981 for (int t
= 0; t
< TXG_CONCURRENT_STATES
; t
++) {
2982 range_tree_walk(msp
->ms_allocating
[(txg
+ t
) & TXG_MASK
],
2983 range_tree_remove
, condense_tree
);
2986 ASSERT3U(spa
->spa_unflushed_stats
.sus_memused
, >=,
2987 metaslab_unflushed_changes_memused(msp
));
2988 spa
->spa_unflushed_stats
.sus_memused
-=
2989 metaslab_unflushed_changes_memused(msp
);
2990 range_tree_vacate(msp
->ms_unflushed_allocs
, NULL
, NULL
);
2991 range_tree_vacate(msp
->ms_unflushed_frees
, NULL
, NULL
);
2994 * We're about to drop the metaslab's lock thus allowing other
2995 * consumers to change it's content. Set the metaslab's ms_condensing
2996 * flag to ensure that allocations on this metaslab do not occur
2997 * while we're in the middle of committing it to disk. This is only
2998 * critical for ms_allocatable as all other range trees use per TXG
2999 * views of their content.
3001 msp
->ms_condensing
= B_TRUE
;
3003 mutex_exit(&msp
->ms_lock
);
3004 uint64_t object
= space_map_object(msp
->ms_sm
);
3005 space_map_truncate(sm
,
3006 spa_feature_is_enabled(spa
, SPA_FEATURE_LOG_SPACEMAP
) ?
3007 zfs_metaslab_sm_blksz_with_log
: zfs_metaslab_sm_blksz_no_log
, tx
);
3010 * space_map_truncate() may have reallocated the spacemap object.
3011 * If so, update the vdev_ms_array.
3013 if (space_map_object(msp
->ms_sm
) != object
) {
3014 object
= space_map_object(msp
->ms_sm
);
3015 dmu_write(spa
->spa_meta_objset
,
3016 msp
->ms_group
->mg_vd
->vdev_ms_array
, sizeof (uint64_t) *
3017 msp
->ms_id
, sizeof (uint64_t), &object
, tx
);
3022 * When the log space map feature is enabled, each space map will
3023 * always have ALLOCS followed by FREES for each sync pass. This is
3024 * typically true even when the log space map feature is disabled,
3025 * except from the case where a metaslab goes through metaslab_sync()
3026 * and gets condensed. In that case the metaslab's space map will have
3027 * ALLOCS followed by FREES (due to condensing) followed by ALLOCS
3028 * followed by FREES (due to space_map_write() in metaslab_sync()) for
3031 space_map_write(sm
, condense_tree
, SM_ALLOC
, SM_NO_VDEVID
, tx
);
3032 space_map_write(sm
, msp
->ms_allocatable
, SM_FREE
, SM_NO_VDEVID
, tx
);
3034 range_tree_vacate(condense_tree
, NULL
, NULL
);
3035 range_tree_destroy(condense_tree
);
3036 mutex_enter(&msp
->ms_lock
);
3038 msp
->ms_condensing
= B_FALSE
;
3039 metaslab_flush_update(msp
, tx
);
3043 * Called when the metaslab has been flushed (its own spacemap now reflects
3044 * all the contents of the pool-wide spacemap log). Updates the metaslab's
3045 * metadata and any pool-wide related log space map data (e.g. summary,
3046 * obsolete logs, etc..) to reflect that.
3049 metaslab_flush_update(metaslab_t
*msp
, dmu_tx_t
*tx
)
3051 metaslab_group_t
*mg
= msp
->ms_group
;
3052 spa_t
*spa
= mg
->mg_vd
->vdev_spa
;
3054 ASSERT(MUTEX_HELD(&msp
->ms_lock
));
3056 ASSERT3U(spa_sync_pass(spa
), ==, 1);
3057 ASSERT(range_tree_is_empty(msp
->ms_unflushed_allocs
));
3058 ASSERT(range_tree_is_empty(msp
->ms_unflushed_frees
));
3061 * Just because a metaslab got flushed, that doesn't mean that
3062 * it will pass through metaslab_sync_done(). Thus, make sure to
3063 * update ms_synced_length here in case it doesn't.
3065 msp
->ms_synced_length
= space_map_length(msp
->ms_sm
);
3068 * We may end up here from metaslab_condense() without the
3069 * feature being active. In that case this is a no-op.
3071 if (!spa_feature_is_active(spa
, SPA_FEATURE_LOG_SPACEMAP
))
3074 ASSERT(spa_syncing_log_sm(spa
) != NULL
);
3075 ASSERT(msp
->ms_sm
!= NULL
);
3076 ASSERT(metaslab_unflushed_txg(msp
) != 0);
3077 ASSERT3P(avl_find(&spa
->spa_metaslabs_by_flushed
, msp
, NULL
), ==, msp
);
3079 VERIFY3U(tx
->tx_txg
, <=, spa_final_dirty_txg(spa
));
3081 /* update metaslab's position in our flushing tree */
3082 uint64_t ms_prev_flushed_txg
= metaslab_unflushed_txg(msp
);
3083 mutex_enter(&spa
->spa_flushed_ms_lock
);
3084 avl_remove(&spa
->spa_metaslabs_by_flushed
, msp
);
3085 metaslab_set_unflushed_txg(msp
, spa_syncing_txg(spa
), tx
);
3086 avl_add(&spa
->spa_metaslabs_by_flushed
, msp
);
3087 mutex_exit(&spa
->spa_flushed_ms_lock
);
3089 /* update metaslab counts of spa_log_sm_t nodes */
3090 spa_log_sm_decrement_mscount(spa
, ms_prev_flushed_txg
);
3091 spa_log_sm_increment_current_mscount(spa
);
3093 /* cleanup obsolete logs if any */
3094 uint64_t log_blocks_before
= spa_log_sm_nblocks(spa
);
3095 spa_cleanup_old_sm_logs(spa
, tx
);
3096 uint64_t log_blocks_after
= spa_log_sm_nblocks(spa
);
3097 VERIFY3U(log_blocks_after
, <=, log_blocks_before
);
3099 /* update log space map summary */
3100 uint64_t blocks_gone
= log_blocks_before
- log_blocks_after
;
3101 spa_log_summary_add_flushed_metaslab(spa
);
3102 spa_log_summary_decrement_mscount(spa
, ms_prev_flushed_txg
);
3103 spa_log_summary_decrement_blkcount(spa
, blocks_gone
);
3107 metaslab_flush(metaslab_t
*msp
, dmu_tx_t
*tx
)
3109 spa_t
*spa
= msp
->ms_group
->mg_vd
->vdev_spa
;
3111 ASSERT(MUTEX_HELD(&msp
->ms_lock
));
3112 ASSERT3U(spa_sync_pass(spa
), ==, 1);
3113 ASSERT(spa_feature_is_active(spa
, SPA_FEATURE_LOG_SPACEMAP
));
3115 ASSERT(msp
->ms_sm
!= NULL
);
3116 ASSERT(metaslab_unflushed_txg(msp
) != 0);
3117 ASSERT(avl_find(&spa
->spa_metaslabs_by_flushed
, msp
, NULL
) != NULL
);
3120 * There is nothing wrong with flushing the same metaslab twice, as
3121 * this codepath should work on that case. However, the current
3122 * flushing scheme makes sure to avoid this situation as we would be
3123 * making all these calls without having anything meaningful to write
3124 * to disk. We assert this behavior here.
3126 ASSERT3U(metaslab_unflushed_txg(msp
), <, dmu_tx_get_txg(tx
));
3129 * We can not flush while loading, because then we would
3130 * not load the ms_unflushed_{allocs,frees}.
3132 if (msp
->ms_loading
)
3135 metaslab_verify_space(msp
, dmu_tx_get_txg(tx
));
3136 metaslab_verify_weight_and_frag(msp
);
3139 * Metaslab condensing is effectively flushing. Therefore if the
3140 * metaslab can be condensed we can just condense it instead of
3143 * Note that metaslab_condense() does call metaslab_flush_update()
3144 * so we can just return immediately after condensing. We also
3145 * don't need to care about setting ms_flushing or broadcasting
3146 * ms_flush_cv, even if we temporarily drop the ms_lock in
3147 * metaslab_condense(), as the metaslab is already loaded.
3149 if (msp
->ms_loaded
&& metaslab_should_condense(msp
)) {
3150 metaslab_group_t
*mg
= msp
->ms_group
;
3153 * For all histogram operations below refer to the
3154 * comments of metaslab_sync() where we follow a
3155 * similar procedure.
3157 metaslab_group_histogram_verify(mg
);
3158 metaslab_class_histogram_verify(mg
->mg_class
);
3159 metaslab_group_histogram_remove(mg
, msp
);
3161 metaslab_condense(msp
, tx
);
3163 space_map_histogram_clear(msp
->ms_sm
);
3164 space_map_histogram_add(msp
->ms_sm
, msp
->ms_allocatable
, tx
);
3165 ASSERT(range_tree_is_empty(msp
->ms_freed
));
3166 for (int t
= 0; t
< TXG_DEFER_SIZE
; t
++) {
3167 space_map_histogram_add(msp
->ms_sm
,
3168 msp
->ms_defer
[t
], tx
);
3170 metaslab_aux_histograms_update(msp
);
3172 metaslab_group_histogram_add(mg
, msp
);
3173 metaslab_group_histogram_verify(mg
);
3174 metaslab_class_histogram_verify(mg
->mg_class
);
3176 metaslab_verify_space(msp
, dmu_tx_get_txg(tx
));
3179 * Since we recreated the histogram (and potentially
3180 * the ms_sm too while condensing) ensure that the
3181 * weight is updated too because we are not guaranteed
3182 * that this metaslab is dirty and will go through
3183 * metaslab_sync_done().
3185 metaslab_recalculate_weight_and_sort(msp
);
3189 msp
->ms_flushing
= B_TRUE
;
3190 uint64_t sm_len_before
= space_map_length(msp
->ms_sm
);
3192 mutex_exit(&msp
->ms_lock
);
3193 space_map_write(msp
->ms_sm
, msp
->ms_unflushed_allocs
, SM_ALLOC
,
3195 space_map_write(msp
->ms_sm
, msp
->ms_unflushed_frees
, SM_FREE
,
3197 mutex_enter(&msp
->ms_lock
);
3199 uint64_t sm_len_after
= space_map_length(msp
->ms_sm
);
3200 if (zfs_flags
& ZFS_DEBUG_LOG_SPACEMAP
) {
3201 zfs_dbgmsg("flushing: txg %llu, spa %s, vdev_id %llu, "
3202 "ms_id %llu, unflushed_allocs %llu, unflushed_frees %llu, "
3203 "appended %llu bytes", dmu_tx_get_txg(tx
), spa_name(spa
),
3204 msp
->ms_group
->mg_vd
->vdev_id
, msp
->ms_id
,
3205 range_tree_space(msp
->ms_unflushed_allocs
),
3206 range_tree_space(msp
->ms_unflushed_frees
),
3207 (sm_len_after
- sm_len_before
));
3210 ASSERT3U(spa
->spa_unflushed_stats
.sus_memused
, >=,
3211 metaslab_unflushed_changes_memused(msp
));
3212 spa
->spa_unflushed_stats
.sus_memused
-=
3213 metaslab_unflushed_changes_memused(msp
);
3214 range_tree_vacate(msp
->ms_unflushed_allocs
, NULL
, NULL
);
3215 range_tree_vacate(msp
->ms_unflushed_frees
, NULL
, NULL
);
3217 metaslab_verify_space(msp
, dmu_tx_get_txg(tx
));
3218 metaslab_verify_weight_and_frag(msp
);
3220 metaslab_flush_update(msp
, tx
);
3222 metaslab_verify_space(msp
, dmu_tx_get_txg(tx
));
3223 metaslab_verify_weight_and_frag(msp
);
3225 msp
->ms_flushing
= B_FALSE
;
3226 cv_broadcast(&msp
->ms_flush_cv
);
3231 * Write a metaslab to disk in the context of the specified transaction group.
3234 metaslab_sync(metaslab_t
*msp
, uint64_t txg
)
3236 metaslab_group_t
*mg
= msp
->ms_group
;
3237 vdev_t
*vd
= mg
->mg_vd
;
3238 spa_t
*spa
= vd
->vdev_spa
;
3239 objset_t
*mos
= spa_meta_objset(spa
);
3240 range_tree_t
*alloctree
= msp
->ms_allocating
[txg
& TXG_MASK
];
3243 ASSERT(!vd
->vdev_ishole
);
3246 * This metaslab has just been added so there's no work to do now.
3248 if (msp
->ms_freeing
== NULL
) {
3249 ASSERT3P(alloctree
, ==, NULL
);
3253 ASSERT3P(alloctree
, !=, NULL
);
3254 ASSERT3P(msp
->ms_freeing
, !=, NULL
);
3255 ASSERT3P(msp
->ms_freed
, !=, NULL
);
3256 ASSERT3P(msp
->ms_checkpointing
, !=, NULL
);
3257 ASSERT3P(msp
->ms_trim
, !=, NULL
);
3260 * Normally, we don't want to process a metaslab if there are no
3261 * allocations or frees to perform. However, if the metaslab is being
3262 * forced to condense and it's loaded, we need to let it through.
3264 if (range_tree_is_empty(alloctree
) &&
3265 range_tree_is_empty(msp
->ms_freeing
) &&
3266 range_tree_is_empty(msp
->ms_checkpointing
) &&
3267 !(msp
->ms_loaded
&& msp
->ms_condense_wanted
))
3271 VERIFY(txg
<= spa_final_dirty_txg(spa
));
3274 * The only state that can actually be changing concurrently
3275 * with metaslab_sync() is the metaslab's ms_allocatable. No
3276 * other thread can be modifying this txg's alloc, freeing,
3277 * freed, or space_map_phys_t. We drop ms_lock whenever we
3278 * could call into the DMU, because the DMU can call down to
3279 * us (e.g. via zio_free()) at any time.
3281 * The spa_vdev_remove_thread() can be reading metaslab state
3282 * concurrently, and it is locked out by the ms_sync_lock.
3283 * Note that the ms_lock is insufficient for this, because it
3284 * is dropped by space_map_write().
3286 tx
= dmu_tx_create_assigned(spa_get_dsl(spa
), txg
);
3289 * Generate a log space map if one doesn't exist already.
3291 spa_generate_syncing_log_sm(spa
, tx
);
3293 if (msp
->ms_sm
== NULL
) {
3294 uint64_t new_object
= space_map_alloc(mos
,
3295 spa_feature_is_enabled(spa
, SPA_FEATURE_LOG_SPACEMAP
) ?
3296 zfs_metaslab_sm_blksz_with_log
:
3297 zfs_metaslab_sm_blksz_no_log
, tx
);
3298 VERIFY3U(new_object
, !=, 0);
3300 dmu_write(mos
, vd
->vdev_ms_array
, sizeof (uint64_t) *
3301 msp
->ms_id
, sizeof (uint64_t), &new_object
, tx
);
3303 VERIFY0(space_map_open(&msp
->ms_sm
, mos
, new_object
,
3304 msp
->ms_start
, msp
->ms_size
, vd
->vdev_ashift
));
3305 ASSERT(msp
->ms_sm
!= NULL
);
3307 ASSERT(range_tree_is_empty(msp
->ms_unflushed_allocs
));
3308 ASSERT(range_tree_is_empty(msp
->ms_unflushed_frees
));
3309 ASSERT0(metaslab_allocated_space(msp
));
3312 if (metaslab_unflushed_txg(msp
) == 0 &&
3313 spa_feature_is_active(spa
, SPA_FEATURE_LOG_SPACEMAP
)) {
3314 ASSERT(spa_syncing_log_sm(spa
) != NULL
);
3316 metaslab_set_unflushed_txg(msp
, spa_syncing_txg(spa
), tx
);
3317 spa_log_sm_increment_current_mscount(spa
);
3318 spa_log_summary_add_flushed_metaslab(spa
);
3320 ASSERT(msp
->ms_sm
!= NULL
);
3321 mutex_enter(&spa
->spa_flushed_ms_lock
);
3322 avl_add(&spa
->spa_metaslabs_by_flushed
, msp
);
3323 mutex_exit(&spa
->spa_flushed_ms_lock
);
3325 ASSERT(range_tree_is_empty(msp
->ms_unflushed_allocs
));
3326 ASSERT(range_tree_is_empty(msp
->ms_unflushed_frees
));
3329 if (!range_tree_is_empty(msp
->ms_checkpointing
) &&
3330 vd
->vdev_checkpoint_sm
== NULL
) {
3331 ASSERT(spa_has_checkpoint(spa
));
3333 uint64_t new_object
= space_map_alloc(mos
,
3334 zfs_vdev_standard_sm_blksz
, tx
);
3335 VERIFY3U(new_object
, !=, 0);
3337 VERIFY0(space_map_open(&vd
->vdev_checkpoint_sm
,
3338 mos
, new_object
, 0, vd
->vdev_asize
, vd
->vdev_ashift
));
3339 ASSERT3P(vd
->vdev_checkpoint_sm
, !=, NULL
);
3342 * We save the space map object as an entry in vdev_top_zap
3343 * so it can be retrieved when the pool is reopened after an
3344 * export or through zdb.
3346 VERIFY0(zap_add(vd
->vdev_spa
->spa_meta_objset
,
3347 vd
->vdev_top_zap
, VDEV_TOP_ZAP_POOL_CHECKPOINT_SM
,
3348 sizeof (new_object
), 1, &new_object
, tx
));
3351 mutex_enter(&msp
->ms_sync_lock
);
3352 mutex_enter(&msp
->ms_lock
);
3355 * Note: metaslab_condense() clears the space map's histogram.
3356 * Therefore we must verify and remove this histogram before
3359 metaslab_group_histogram_verify(mg
);
3360 metaslab_class_histogram_verify(mg
->mg_class
);
3361 metaslab_group_histogram_remove(mg
, msp
);
3363 if (spa
->spa_sync_pass
== 1 && msp
->ms_loaded
&&
3364 metaslab_should_condense(msp
))
3365 metaslab_condense(msp
, tx
);
3368 * We'll be going to disk to sync our space accounting, thus we
3369 * drop the ms_lock during that time so allocations coming from
3370 * open-context (ZIL) for future TXGs do not block.
3372 mutex_exit(&msp
->ms_lock
);
3373 space_map_t
*log_sm
= spa_syncing_log_sm(spa
);
3374 if (log_sm
!= NULL
) {
3375 ASSERT(spa_feature_is_enabled(spa
, SPA_FEATURE_LOG_SPACEMAP
));
3377 space_map_write(log_sm
, alloctree
, SM_ALLOC
,
3379 space_map_write(log_sm
, msp
->ms_freeing
, SM_FREE
,
3381 mutex_enter(&msp
->ms_lock
);
3383 ASSERT3U(spa
->spa_unflushed_stats
.sus_memused
, >=,
3384 metaslab_unflushed_changes_memused(msp
));
3385 spa
->spa_unflushed_stats
.sus_memused
-=
3386 metaslab_unflushed_changes_memused(msp
);
3387 range_tree_remove_xor_add(alloctree
,
3388 msp
->ms_unflushed_frees
, msp
->ms_unflushed_allocs
);
3389 range_tree_remove_xor_add(msp
->ms_freeing
,
3390 msp
->ms_unflushed_allocs
, msp
->ms_unflushed_frees
);
3391 spa
->spa_unflushed_stats
.sus_memused
+=
3392 metaslab_unflushed_changes_memused(msp
);
3394 ASSERT(!spa_feature_is_enabled(spa
, SPA_FEATURE_LOG_SPACEMAP
));
3396 space_map_write(msp
->ms_sm
, alloctree
, SM_ALLOC
,
3398 space_map_write(msp
->ms_sm
, msp
->ms_freeing
, SM_FREE
,
3400 mutex_enter(&msp
->ms_lock
);
3403 msp
->ms_allocated_space
+= range_tree_space(alloctree
);
3404 ASSERT3U(msp
->ms_allocated_space
, >=,
3405 range_tree_space(msp
->ms_freeing
));
3406 msp
->ms_allocated_space
-= range_tree_space(msp
->ms_freeing
);
3408 if (!range_tree_is_empty(msp
->ms_checkpointing
)) {
3409 ASSERT(spa_has_checkpoint(spa
));
3410 ASSERT3P(vd
->vdev_checkpoint_sm
, !=, NULL
);
3413 * Since we are doing writes to disk and the ms_checkpointing
3414 * tree won't be changing during that time, we drop the
3415 * ms_lock while writing to the checkpoint space map, for the
3416 * same reason mentioned above.
3418 mutex_exit(&msp
->ms_lock
);
3419 space_map_write(vd
->vdev_checkpoint_sm
,
3420 msp
->ms_checkpointing
, SM_FREE
, SM_NO_VDEVID
, tx
);
3421 mutex_enter(&msp
->ms_lock
);
3423 spa
->spa_checkpoint_info
.sci_dspace
+=
3424 range_tree_space(msp
->ms_checkpointing
);
3425 vd
->vdev_stat
.vs_checkpoint_space
+=
3426 range_tree_space(msp
->ms_checkpointing
);
3427 ASSERT3U(vd
->vdev_stat
.vs_checkpoint_space
, ==,
3428 -space_map_allocated(vd
->vdev_checkpoint_sm
));
3430 range_tree_vacate(msp
->ms_checkpointing
, NULL
, NULL
);
3433 if (msp
->ms_loaded
) {
3435 * When the space map is loaded, we have an accurate
3436 * histogram in the range tree. This gives us an opportunity
3437 * to bring the space map's histogram up-to-date so we clear
3438 * it first before updating it.
3440 space_map_histogram_clear(msp
->ms_sm
);
3441 space_map_histogram_add(msp
->ms_sm
, msp
->ms_allocatable
, tx
);
3444 * Since we've cleared the histogram we need to add back
3445 * any free space that has already been processed, plus
3446 * any deferred space. This allows the on-disk histogram
3447 * to accurately reflect all free space even if some space
3448 * is not yet available for allocation (i.e. deferred).
3450 space_map_histogram_add(msp
->ms_sm
, msp
->ms_freed
, tx
);
3453 * Add back any deferred free space that has not been
3454 * added back into the in-core free tree yet. This will
3455 * ensure that we don't end up with a space map histogram
3456 * that is completely empty unless the metaslab is fully
3459 for (int t
= 0; t
< TXG_DEFER_SIZE
; t
++) {
3460 space_map_histogram_add(msp
->ms_sm
,
3461 msp
->ms_defer
[t
], tx
);
3466 * Always add the free space from this sync pass to the space
3467 * map histogram. We want to make sure that the on-disk histogram
3468 * accounts for all free space. If the space map is not loaded,
3469 * then we will lose some accuracy but will correct it the next
3470 * time we load the space map.
3472 space_map_histogram_add(msp
->ms_sm
, msp
->ms_freeing
, tx
);
3473 metaslab_aux_histograms_update(msp
);
3475 metaslab_group_histogram_add(mg
, msp
);
3476 metaslab_group_histogram_verify(mg
);
3477 metaslab_class_histogram_verify(mg
->mg_class
);
3480 * For sync pass 1, we avoid traversing this txg's free range tree
3481 * and instead will just swap the pointers for freeing and freed.
3482 * We can safely do this since the freed_tree is guaranteed to be
3483 * empty on the initial pass.
3485 * Keep in mind that even if we are currently using a log spacemap
3486 * we want current frees to end up in the ms_allocatable (but not
3487 * get appended to the ms_sm) so their ranges can be reused as usual.
3489 if (spa_sync_pass(spa
) == 1) {
3490 range_tree_swap(&msp
->ms_freeing
, &msp
->ms_freed
);
3491 ASSERT0(msp
->ms_allocated_this_txg
);
3493 range_tree_vacate(msp
->ms_freeing
,
3494 range_tree_add
, msp
->ms_freed
);
3496 msp
->ms_allocated_this_txg
+= range_tree_space(alloctree
);
3497 range_tree_vacate(alloctree
, NULL
, NULL
);
3499 ASSERT0(range_tree_space(msp
->ms_allocating
[txg
& TXG_MASK
]));
3500 ASSERT0(range_tree_space(msp
->ms_allocating
[TXG_CLEAN(txg
)
3502 ASSERT0(range_tree_space(msp
->ms_freeing
));
3503 ASSERT0(range_tree_space(msp
->ms_checkpointing
));
3505 mutex_exit(&msp
->ms_lock
);
3508 * Verify that the space map object ID has been recorded in the
3512 VERIFY0(dmu_read(mos
, vd
->vdev_ms_array
,
3513 msp
->ms_id
* sizeof (uint64_t), sizeof (uint64_t), &object
, 0));
3514 VERIFY3U(object
, ==, space_map_object(msp
->ms_sm
));
3516 mutex_exit(&msp
->ms_sync_lock
);
3521 metaslab_potentially_unload(metaslab_t
*msp
, uint64_t txg
)
3524 * If the metaslab is loaded and we've not tried to load or allocate
3525 * from it in 'metaslab_unload_delay' txgs, then unload it.
3527 if (msp
->ms_loaded
&&
3528 msp
->ms_disabled
== 0 &&
3529 msp
->ms_selected_txg
+ metaslab_unload_delay
< txg
) {
3530 for (int t
= 1; t
< TXG_CONCURRENT_STATES
; t
++) {
3531 VERIFY0(range_tree_space(
3532 msp
->ms_allocating
[(txg
+ t
) & TXG_MASK
]));
3534 if (msp
->ms_allocator
!= -1) {
3535 metaslab_passivate(msp
, msp
->ms_weight
&
3536 ~METASLAB_ACTIVE_MASK
);
3539 if (!metaslab_debug_unload
)
3540 metaslab_unload(msp
);
3545 * Called after a transaction group has completely synced to mark
3546 * all of the metaslab's free space as usable.
3549 metaslab_sync_done(metaslab_t
*msp
, uint64_t txg
)
3551 metaslab_group_t
*mg
= msp
->ms_group
;
3552 vdev_t
*vd
= mg
->mg_vd
;
3553 spa_t
*spa
= vd
->vdev_spa
;
3554 range_tree_t
**defer_tree
;
3555 int64_t alloc_delta
, defer_delta
;
3556 boolean_t defer_allowed
= B_TRUE
;
3558 ASSERT(!vd
->vdev_ishole
);
3560 mutex_enter(&msp
->ms_lock
);
3563 * If this metaslab is just becoming available, initialize its
3564 * range trees and add its capacity to the vdev.
3566 if (msp
->ms_freed
== NULL
) {
3567 for (int t
= 0; t
< TXG_SIZE
; t
++) {
3568 ASSERT(msp
->ms_allocating
[t
] == NULL
);
3570 msp
->ms_allocating
[t
] = range_tree_create(NULL
, NULL
);
3573 ASSERT3P(msp
->ms_freeing
, ==, NULL
);
3574 msp
->ms_freeing
= range_tree_create(NULL
, NULL
);
3576 ASSERT3P(msp
->ms_freed
, ==, NULL
);
3577 msp
->ms_freed
= range_tree_create(NULL
, NULL
);
3579 for (int t
= 0; t
< TXG_DEFER_SIZE
; t
++) {
3580 ASSERT3P(msp
->ms_defer
[t
], ==, NULL
);
3581 msp
->ms_defer
[t
] = range_tree_create(NULL
, NULL
);
3584 ASSERT3P(msp
->ms_checkpointing
, ==, NULL
);
3585 msp
->ms_checkpointing
= range_tree_create(NULL
, NULL
);
3587 ASSERT3P(msp
->ms_unflushed_allocs
, ==, NULL
);
3588 msp
->ms_unflushed_allocs
= range_tree_create(NULL
, NULL
);
3589 ASSERT3P(msp
->ms_unflushed_frees
, ==, NULL
);
3590 msp
->ms_unflushed_frees
= range_tree_create(NULL
, NULL
);
3592 metaslab_space_update(vd
, mg
->mg_class
, 0, 0, msp
->ms_size
);
3594 ASSERT0(range_tree_space(msp
->ms_freeing
));
3595 ASSERT0(range_tree_space(msp
->ms_checkpointing
));
3597 defer_tree
= &msp
->ms_defer
[txg
% TXG_DEFER_SIZE
];
3599 uint64_t free_space
= metaslab_class_get_space(spa_normal_class(spa
)) -
3600 metaslab_class_get_alloc(spa_normal_class(spa
));
3601 if (free_space
<= spa_get_slop_space(spa
) || vd
->vdev_removing
) {
3602 defer_allowed
= B_FALSE
;
3606 alloc_delta
= msp
->ms_allocated_this_txg
-
3607 range_tree_space(msp
->ms_freed
);
3609 if (defer_allowed
) {
3610 defer_delta
= range_tree_space(msp
->ms_freed
) -
3611 range_tree_space(*defer_tree
);
3613 defer_delta
-= range_tree_space(*defer_tree
);
3615 metaslab_space_update(vd
, mg
->mg_class
, alloc_delta
+ defer_delta
,
3618 if (spa_syncing_log_sm(spa
) == NULL
) {
3620 * If there's a metaslab_load() in progress and we don't have
3621 * a log space map, it means that we probably wrote to the
3622 * metaslab's space map. If this is the case, we need to
3623 * make sure that we wait for the load to complete so that we
3624 * have a consistent view at the in-core side of the metaslab.
3626 metaslab_load_wait(msp
);
3628 ASSERT(spa_feature_is_active(spa
, SPA_FEATURE_LOG_SPACEMAP
));
3632 * When auto-trimming is enabled, free ranges which are added to
3633 * ms_allocatable are also be added to ms_trim. The ms_trim tree is
3634 * periodically consumed by the vdev_autotrim_thread() which issues
3635 * trims for all ranges and then vacates the tree. The ms_trim tree
3636 * can be discarded at any time with the sole consequence of recent
3637 * frees not being trimmed.
3639 if (spa_get_autotrim(spa
) == SPA_AUTOTRIM_ON
) {
3640 range_tree_walk(*defer_tree
, range_tree_add
, msp
->ms_trim
);
3641 if (!defer_allowed
) {
3642 range_tree_walk(msp
->ms_freed
, range_tree_add
,
3646 range_tree_vacate(msp
->ms_trim
, NULL
, NULL
);
3650 * Move the frees from the defer_tree back to the free
3651 * range tree (if it's loaded). Swap the freed_tree and
3652 * the defer_tree -- this is safe to do because we've
3653 * just emptied out the defer_tree.
3655 range_tree_vacate(*defer_tree
,
3656 msp
->ms_loaded
? range_tree_add
: NULL
, msp
->ms_allocatable
);
3657 if (defer_allowed
) {
3658 range_tree_swap(&msp
->ms_freed
, defer_tree
);
3660 range_tree_vacate(msp
->ms_freed
,
3661 msp
->ms_loaded
? range_tree_add
: NULL
,
3662 msp
->ms_allocatable
);
3665 msp
->ms_synced_length
= space_map_length(msp
->ms_sm
);
3667 msp
->ms_deferspace
+= defer_delta
;
3668 ASSERT3S(msp
->ms_deferspace
, >=, 0);
3669 ASSERT3S(msp
->ms_deferspace
, <=, msp
->ms_size
);
3670 if (msp
->ms_deferspace
!= 0) {
3672 * Keep syncing this metaslab until all deferred frees
3673 * are back in circulation.
3675 vdev_dirty(vd
, VDD_METASLAB
, msp
, txg
+ 1);
3677 metaslab_aux_histograms_update_done(msp
, defer_allowed
);
3680 msp
->ms_new
= B_FALSE
;
3681 mutex_enter(&mg
->mg_lock
);
3683 mutex_exit(&mg
->mg_lock
);
3687 * Re-sort metaslab within its group now that we've adjusted
3688 * its allocatable space.
3690 metaslab_recalculate_weight_and_sort(msp
);
3692 ASSERT0(range_tree_space(msp
->ms_allocating
[txg
& TXG_MASK
]));
3693 ASSERT0(range_tree_space(msp
->ms_freeing
));
3694 ASSERT0(range_tree_space(msp
->ms_freed
));
3695 ASSERT0(range_tree_space(msp
->ms_checkpointing
));
3697 msp
->ms_allocated_this_txg
= 0;
3698 mutex_exit(&msp
->ms_lock
);
3702 metaslab_sync_reassess(metaslab_group_t
*mg
)
3704 spa_t
*spa
= mg
->mg_class
->mc_spa
;
3706 spa_config_enter(spa
, SCL_ALLOC
, FTAG
, RW_READER
);
3707 metaslab_group_alloc_update(mg
);
3708 mg
->mg_fragmentation
= metaslab_group_fragmentation(mg
);
3711 * Preload the next potential metaslabs but only on active
3712 * metaslab groups. We can get into a state where the metaslab
3713 * is no longer active since we dirty metaslabs as we remove a
3714 * a device, thus potentially making the metaslab group eligible
3717 if (mg
->mg_activation_count
> 0) {
3718 metaslab_group_preload(mg
);
3720 spa_config_exit(spa
, SCL_ALLOC
, FTAG
);
3724 * When writing a ditto block (i.e. more than one DVA for a given BP) on
3725 * the same vdev as an existing DVA of this BP, then try to allocate it
3726 * on a different metaslab than existing DVAs (i.e. a unique metaslab).
3729 metaslab_is_unique(metaslab_t
*msp
, dva_t
*dva
)
3733 if (DVA_GET_ASIZE(dva
) == 0)
3736 if (msp
->ms_group
->mg_vd
->vdev_id
!= DVA_GET_VDEV(dva
))
3739 dva_ms_id
= DVA_GET_OFFSET(dva
) >> msp
->ms_group
->mg_vd
->vdev_ms_shift
;
3741 return (msp
->ms_id
!= dva_ms_id
);
3745 * ==========================================================================
3746 * Metaslab allocation tracing facility
3747 * ==========================================================================
3749 #ifdef _METASLAB_TRACING
3750 kstat_t
*metaslab_trace_ksp
;
3751 kstat_named_t metaslab_trace_over_limit
;
3754 metaslab_alloc_trace_init(void)
3756 ASSERT(metaslab_alloc_trace_cache
== NULL
);
3757 metaslab_alloc_trace_cache
= kmem_cache_create(
3758 "metaslab_alloc_trace_cache", sizeof (metaslab_alloc_trace_t
),
3759 0, NULL
, NULL
, NULL
, NULL
, NULL
, 0);
3760 metaslab_trace_ksp
= kstat_create("zfs", 0, "metaslab_trace_stats",
3761 "misc", KSTAT_TYPE_NAMED
, 1, KSTAT_FLAG_VIRTUAL
);
3762 if (metaslab_trace_ksp
!= NULL
) {
3763 metaslab_trace_ksp
->ks_data
= &metaslab_trace_over_limit
;
3764 kstat_named_init(&metaslab_trace_over_limit
,
3765 "metaslab_trace_over_limit", KSTAT_DATA_UINT64
);
3766 kstat_install(metaslab_trace_ksp
);
3771 metaslab_alloc_trace_fini(void)
3773 if (metaslab_trace_ksp
!= NULL
) {
3774 kstat_delete(metaslab_trace_ksp
);
3775 metaslab_trace_ksp
= NULL
;
3777 kmem_cache_destroy(metaslab_alloc_trace_cache
);
3778 metaslab_alloc_trace_cache
= NULL
;
3782 * Add an allocation trace element to the allocation tracing list.
3785 metaslab_trace_add(zio_alloc_list_t
*zal
, metaslab_group_t
*mg
,
3786 metaslab_t
*msp
, uint64_t psize
, uint32_t dva_id
, uint64_t offset
,
3789 metaslab_alloc_trace_t
*mat
;
3791 if (!metaslab_trace_enabled
)
3795 * When the tracing list reaches its maximum we remove
3796 * the second element in the list before adding a new one.
3797 * By removing the second element we preserve the original
3798 * entry as a clue to what allocations steps have already been
3801 if (zal
->zal_size
== metaslab_trace_max_entries
) {
3802 metaslab_alloc_trace_t
*mat_next
;
3804 panic("too many entries in allocation list");
3806 atomic_inc_64(&metaslab_trace_over_limit
.value
.ui64
);
3808 mat_next
= list_next(&zal
->zal_list
, list_head(&zal
->zal_list
));
3809 list_remove(&zal
->zal_list
, mat_next
);
3810 kmem_cache_free(metaslab_alloc_trace_cache
, mat_next
);
3813 mat
= kmem_cache_alloc(metaslab_alloc_trace_cache
, KM_SLEEP
);
3814 list_link_init(&mat
->mat_list_node
);
3817 mat
->mat_size
= psize
;
3818 mat
->mat_dva_id
= dva_id
;
3819 mat
->mat_offset
= offset
;
3820 mat
->mat_weight
= 0;
3821 mat
->mat_allocator
= allocator
;
3824 mat
->mat_weight
= msp
->ms_weight
;
3827 * The list is part of the zio so locking is not required. Only
3828 * a single thread will perform allocations for a given zio.
3830 list_insert_tail(&zal
->zal_list
, mat
);
3833 ASSERT3U(zal
->zal_size
, <=, metaslab_trace_max_entries
);
3837 metaslab_trace_init(zio_alloc_list_t
*zal
)
3839 list_create(&zal
->zal_list
, sizeof (metaslab_alloc_trace_t
),
3840 offsetof(metaslab_alloc_trace_t
, mat_list_node
));
3845 metaslab_trace_fini(zio_alloc_list_t
*zal
)
3847 metaslab_alloc_trace_t
*mat
;
3849 while ((mat
= list_remove_head(&zal
->zal_list
)) != NULL
)
3850 kmem_cache_free(metaslab_alloc_trace_cache
, mat
);
3851 list_destroy(&zal
->zal_list
);
3856 #define metaslab_trace_add(zal, mg, msp, psize, id, off, alloc)
3859 metaslab_alloc_trace_init(void)
3864 metaslab_alloc_trace_fini(void)
3869 metaslab_trace_init(zio_alloc_list_t
*zal
)
3874 metaslab_trace_fini(zio_alloc_list_t
*zal
)
3878 #endif /* _METASLAB_TRACING */
3881 * ==========================================================================
3882 * Metaslab block operations
3883 * ==========================================================================
3887 metaslab_group_alloc_increment(spa_t
*spa
, uint64_t vdev
, void *tag
, int flags
,
3890 if (!(flags
& METASLAB_ASYNC_ALLOC
) ||
3891 (flags
& METASLAB_DONT_THROTTLE
))
3894 metaslab_group_t
*mg
= vdev_lookup_top(spa
, vdev
)->vdev_mg
;
3895 if (!mg
->mg_class
->mc_alloc_throttle_enabled
)
3898 (void) zfs_refcount_add(&mg
->mg_alloc_queue_depth
[allocator
], tag
);
3902 metaslab_group_increment_qdepth(metaslab_group_t
*mg
, int allocator
)
3904 uint64_t max
= mg
->mg_max_alloc_queue_depth
;
3905 uint64_t cur
= mg
->mg_cur_max_alloc_queue_depth
[allocator
];
3907 if (atomic_cas_64(&mg
->mg_cur_max_alloc_queue_depth
[allocator
],
3908 cur
, cur
+ 1) == cur
) {
3910 &mg
->mg_class
->mc_alloc_max_slots
[allocator
]);
3913 cur
= mg
->mg_cur_max_alloc_queue_depth
[allocator
];
3918 metaslab_group_alloc_decrement(spa_t
*spa
, uint64_t vdev
, void *tag
, int flags
,
3919 int allocator
, boolean_t io_complete
)
3921 if (!(flags
& METASLAB_ASYNC_ALLOC
) ||
3922 (flags
& METASLAB_DONT_THROTTLE
))
3925 metaslab_group_t
*mg
= vdev_lookup_top(spa
, vdev
)->vdev_mg
;
3926 if (!mg
->mg_class
->mc_alloc_throttle_enabled
)
3929 (void) zfs_refcount_remove(&mg
->mg_alloc_queue_depth
[allocator
], tag
);
3931 metaslab_group_increment_qdepth(mg
, allocator
);
3935 metaslab_group_alloc_verify(spa_t
*spa
, const blkptr_t
*bp
, void *tag
,
3939 const dva_t
*dva
= bp
->blk_dva
;
3940 int ndvas
= BP_GET_NDVAS(bp
);
3942 for (int d
= 0; d
< ndvas
; d
++) {
3943 uint64_t vdev
= DVA_GET_VDEV(&dva
[d
]);
3944 metaslab_group_t
*mg
= vdev_lookup_top(spa
, vdev
)->vdev_mg
;
3945 VERIFY(zfs_refcount_not_held(
3946 &mg
->mg_alloc_queue_depth
[allocator
], tag
));
3952 metaslab_block_alloc(metaslab_t
*msp
, uint64_t size
, uint64_t txg
)
3955 range_tree_t
*rt
= msp
->ms_allocatable
;
3956 metaslab_class_t
*mc
= msp
->ms_group
->mg_class
;
3958 ASSERT(MUTEX_HELD(&msp
->ms_lock
));
3959 VERIFY(!msp
->ms_condensing
);
3960 VERIFY0(msp
->ms_disabled
);
3962 start
= mc
->mc_ops
->msop_alloc(msp
, size
);
3963 if (start
!= -1ULL) {
3964 metaslab_group_t
*mg
= msp
->ms_group
;
3965 vdev_t
*vd
= mg
->mg_vd
;
3967 VERIFY0(P2PHASE(start
, 1ULL << vd
->vdev_ashift
));
3968 VERIFY0(P2PHASE(size
, 1ULL << vd
->vdev_ashift
));
3969 VERIFY3U(range_tree_space(rt
) - size
, <=, msp
->ms_size
);
3970 range_tree_remove(rt
, start
, size
);
3971 range_tree_clear(msp
->ms_trim
, start
, size
);
3973 if (range_tree_is_empty(msp
->ms_allocating
[txg
& TXG_MASK
]))
3974 vdev_dirty(mg
->mg_vd
, VDD_METASLAB
, msp
, txg
);
3976 range_tree_add(msp
->ms_allocating
[txg
& TXG_MASK
], start
, size
);
3978 /* Track the last successful allocation */
3979 msp
->ms_alloc_txg
= txg
;
3980 metaslab_verify_space(msp
, txg
);
3984 * Now that we've attempted the allocation we need to update the
3985 * metaslab's maximum block size since it may have changed.
3987 msp
->ms_max_size
= metaslab_block_maxsize(msp
);
3992 * Find the metaslab with the highest weight that is less than what we've
3993 * already tried. In the common case, this means that we will examine each
3994 * metaslab at most once. Note that concurrent callers could reorder metaslabs
3995 * by activation/passivation once we have dropped the mg_lock. If a metaslab is
3996 * activated by another thread, and we fail to allocate from the metaslab we
3997 * have selected, we may not try the newly-activated metaslab, and instead
3998 * activate another metaslab. This is not optimal, but generally does not cause
3999 * any problems (a possible exception being if every metaslab is completely full
4000 * except for the the newly-activated metaslab which we fail to examine).
4003 find_valid_metaslab(metaslab_group_t
*mg
, uint64_t activation_weight
,
4004 dva_t
*dva
, int d
, boolean_t want_unique
, uint64_t asize
, int allocator
,
4005 zio_alloc_list_t
*zal
, metaslab_t
*search
, boolean_t
*was_active
)
4008 avl_tree_t
*t
= &mg
->mg_metaslab_tree
;
4009 metaslab_t
*msp
= avl_find(t
, search
, &idx
);
4011 msp
= avl_nearest(t
, idx
, AVL_AFTER
);
4013 for (; msp
!= NULL
; msp
= AVL_NEXT(t
, msp
)) {
4015 if (!metaslab_should_allocate(msp
, asize
)) {
4016 metaslab_trace_add(zal
, mg
, msp
, asize
, d
,
4017 TRACE_TOO_SMALL
, allocator
);
4022 * If the selected metaslab is condensing or disabled,
4025 if (msp
->ms_condensing
|| msp
->ms_disabled
> 0)
4028 *was_active
= msp
->ms_allocator
!= -1;
4030 * If we're activating as primary, this is our first allocation
4031 * from this disk, so we don't need to check how close we are.
4032 * If the metaslab under consideration was already active,
4033 * we're getting desperate enough to steal another allocator's
4034 * metaslab, so we still don't care about distances.
4036 if (activation_weight
== METASLAB_WEIGHT_PRIMARY
|| *was_active
)
4039 for (i
= 0; i
< d
; i
++) {
4041 !metaslab_is_unique(msp
, &dva
[i
]))
4042 break; /* try another metaslab */
4049 search
->ms_weight
= msp
->ms_weight
;
4050 search
->ms_start
= msp
->ms_start
+ 1;
4051 search
->ms_allocator
= msp
->ms_allocator
;
4052 search
->ms_primary
= msp
->ms_primary
;
4058 metaslab_active_mask_verify(metaslab_t
*msp
)
4060 ASSERT(MUTEX_HELD(&msp
->ms_lock
));
4062 if ((zfs_flags
& ZFS_DEBUG_METASLAB_VERIFY
) == 0)
4065 if ((msp
->ms_weight
& METASLAB_ACTIVE_MASK
) == 0)
4068 if (msp
->ms_weight
& METASLAB_WEIGHT_PRIMARY
) {
4069 VERIFY0(msp
->ms_weight
& METASLAB_WEIGHT_SECONDARY
);
4070 VERIFY0(msp
->ms_weight
& METASLAB_WEIGHT_CLAIM
);
4071 VERIFY3S(msp
->ms_allocator
, !=, -1);
4072 VERIFY(msp
->ms_primary
);
4076 if (msp
->ms_weight
& METASLAB_WEIGHT_SECONDARY
) {
4077 VERIFY0(msp
->ms_weight
& METASLAB_WEIGHT_PRIMARY
);
4078 VERIFY0(msp
->ms_weight
& METASLAB_WEIGHT_CLAIM
);
4079 VERIFY3S(msp
->ms_allocator
, !=, -1);
4080 VERIFY(!msp
->ms_primary
);
4084 if (msp
->ms_weight
& METASLAB_WEIGHT_CLAIM
) {
4085 VERIFY0(msp
->ms_weight
& METASLAB_WEIGHT_PRIMARY
);
4086 VERIFY0(msp
->ms_weight
& METASLAB_WEIGHT_SECONDARY
);
4087 VERIFY3S(msp
->ms_allocator
, ==, -1);
4094 metaslab_group_alloc_normal(metaslab_group_t
*mg
, zio_alloc_list_t
*zal
,
4095 uint64_t asize
, uint64_t txg
, boolean_t want_unique
, dva_t
*dva
,
4096 int d
, int allocator
)
4098 metaslab_t
*msp
= NULL
;
4099 uint64_t offset
= -1ULL;
4101 uint64_t activation_weight
= METASLAB_WEIGHT_PRIMARY
;
4102 for (int i
= 0; i
< d
; i
++) {
4103 if (activation_weight
== METASLAB_WEIGHT_PRIMARY
&&
4104 DVA_GET_VDEV(&dva
[i
]) == mg
->mg_vd
->vdev_id
) {
4105 activation_weight
= METASLAB_WEIGHT_SECONDARY
;
4106 } else if (activation_weight
== METASLAB_WEIGHT_SECONDARY
&&
4107 DVA_GET_VDEV(&dva
[i
]) == mg
->mg_vd
->vdev_id
) {
4108 activation_weight
= METASLAB_WEIGHT_CLAIM
;
4114 * If we don't have enough metaslabs active to fill the entire array, we
4115 * just use the 0th slot.
4117 if (mg
->mg_ms_ready
< mg
->mg_allocators
* 3)
4120 ASSERT3U(mg
->mg_vd
->vdev_ms_count
, >=, 2);
4122 metaslab_t
*search
= kmem_alloc(sizeof (*search
), KM_SLEEP
);
4123 search
->ms_weight
= UINT64_MAX
;
4124 search
->ms_start
= 0;
4126 * At the end of the metaslab tree are the already-active metaslabs,
4127 * first the primaries, then the secondaries. When we resume searching
4128 * through the tree, we need to consider ms_allocator and ms_primary so
4129 * we start in the location right after where we left off, and don't
4130 * accidentally loop forever considering the same metaslabs.
4132 search
->ms_allocator
= -1;
4133 search
->ms_primary
= B_TRUE
;
4135 boolean_t was_active
= B_FALSE
;
4137 mutex_enter(&mg
->mg_lock
);
4139 if (activation_weight
== METASLAB_WEIGHT_PRIMARY
&&
4140 mg
->mg_primaries
[allocator
] != NULL
) {
4141 msp
= mg
->mg_primaries
[allocator
];
4144 * Even though we don't hold the ms_lock for the
4145 * primary metaslab, those fields should not
4146 * change while we hold the mg_lock. Thus is is
4147 * safe to make assertions on them.
4149 ASSERT(msp
->ms_primary
);
4150 ASSERT3S(msp
->ms_allocator
, ==, allocator
);
4151 ASSERT(msp
->ms_loaded
);
4153 was_active
= B_TRUE
;
4154 } else if (activation_weight
== METASLAB_WEIGHT_SECONDARY
&&
4155 mg
->mg_secondaries
[allocator
] != NULL
) {
4156 msp
= mg
->mg_secondaries
[allocator
];
4159 * See comment above about the similar assertions
4160 * for the primary metaslab.
4162 ASSERT(!msp
->ms_primary
);
4163 ASSERT3S(msp
->ms_allocator
, ==, allocator
);
4164 ASSERT(msp
->ms_loaded
);
4166 was_active
= B_TRUE
;
4168 msp
= find_valid_metaslab(mg
, activation_weight
, dva
, d
,
4169 want_unique
, asize
, allocator
, zal
, search
,
4173 mutex_exit(&mg
->mg_lock
);
4175 kmem_free(search
, sizeof (*search
));
4178 mutex_enter(&msp
->ms_lock
);
4180 metaslab_active_mask_verify(msp
);
4183 * This code is disabled out because of issues with
4184 * tracepoints in non-gpl kernel modules.
4187 DTRACE_PROBE3(ms__activation__attempt
,
4188 metaslab_t
*, msp
, uint64_t, activation_weight
,
4189 boolean_t
, was_active
);
4193 * Ensure that the metaslab we have selected is still
4194 * capable of handling our request. It's possible that
4195 * another thread may have changed the weight while we
4196 * were blocked on the metaslab lock. We check the
4197 * active status first to see if we need to reselect
4200 if (was_active
&& !(msp
->ms_weight
& METASLAB_ACTIVE_MASK
)) {
4201 ASSERT3S(msp
->ms_allocator
, ==, -1);
4202 mutex_exit(&msp
->ms_lock
);
4207 * If the metaslab was activated for another allocator
4208 * while we were waiting in the ms_lock above, or it's
4209 * a primary and we're seeking a secondary (or vice versa),
4210 * we go back and select a new metaslab.
4212 if (!was_active
&& (msp
->ms_weight
& METASLAB_ACTIVE_MASK
) &&
4213 (msp
->ms_allocator
!= -1) &&
4214 (msp
->ms_allocator
!= allocator
|| ((activation_weight
==
4215 METASLAB_WEIGHT_PRIMARY
) != msp
->ms_primary
))) {
4216 ASSERT(msp
->ms_loaded
);
4217 ASSERT((msp
->ms_weight
& METASLAB_WEIGHT_CLAIM
) ||
4218 msp
->ms_allocator
!= -1);
4219 mutex_exit(&msp
->ms_lock
);
4224 * This metaslab was used for claiming regions allocated
4225 * by the ZIL during pool import. Once these regions are
4226 * claimed we don't need to keep the CLAIM bit set
4227 * anymore. Passivate this metaslab to zero its activation
4230 if (msp
->ms_weight
& METASLAB_WEIGHT_CLAIM
&&
4231 activation_weight
!= METASLAB_WEIGHT_CLAIM
) {
4232 ASSERT(msp
->ms_loaded
);
4233 ASSERT3S(msp
->ms_allocator
, ==, -1);
4234 metaslab_passivate(msp
, msp
->ms_weight
&
4235 ~METASLAB_WEIGHT_CLAIM
);
4236 mutex_exit(&msp
->ms_lock
);
4240 msp
->ms_selected_txg
= txg
;
4242 int activation_error
=
4243 metaslab_activate(msp
, allocator
, activation_weight
);
4244 metaslab_active_mask_verify(msp
);
4247 * If the metaslab was activated by another thread for
4248 * another allocator or activation_weight (EBUSY), or it
4249 * failed because another metaslab was assigned as primary
4250 * for this allocator (EEXIST) we continue using this
4251 * metaslab for our allocation, rather than going on to a
4252 * worse metaslab (we waited for that metaslab to be loaded
4255 * If the activation failed due to an I/O error or ENOSPC we
4256 * skip to the next metaslab.
4258 boolean_t activated
;
4259 if (activation_error
== 0) {
4261 } else if (activation_error
== EBUSY
||
4262 activation_error
== EEXIST
) {
4263 activated
= B_FALSE
;
4265 mutex_exit(&msp
->ms_lock
);
4268 ASSERT(msp
->ms_loaded
);
4271 * Now that we have the lock, recheck to see if we should
4272 * continue to use this metaslab for this allocation. The
4273 * the metaslab is now loaded so metaslab_should_allocate()
4274 * can accurately determine if the allocation attempt should
4277 if (!metaslab_should_allocate(msp
, asize
)) {
4278 /* Passivate this metaslab and select a new one. */
4279 metaslab_trace_add(zal
, mg
, msp
, asize
, d
,
4280 TRACE_TOO_SMALL
, allocator
);
4285 * If this metaslab is currently condensing then pick again
4286 * as we can't manipulate this metaslab until it's committed
4287 * to disk. If this metaslab is being initialized, we shouldn't
4288 * allocate from it since the allocated region might be
4289 * overwritten after allocation.
4291 if (msp
->ms_condensing
) {
4292 metaslab_trace_add(zal
, mg
, msp
, asize
, d
,
4293 TRACE_CONDENSING
, allocator
);
4295 metaslab_passivate(msp
, msp
->ms_weight
&
4296 ~METASLAB_ACTIVE_MASK
);
4298 mutex_exit(&msp
->ms_lock
);
4300 } else if (msp
->ms_disabled
> 0) {
4301 metaslab_trace_add(zal
, mg
, msp
, asize
, d
,
4302 TRACE_DISABLED
, allocator
);
4304 metaslab_passivate(msp
, msp
->ms_weight
&
4305 ~METASLAB_ACTIVE_MASK
);
4307 mutex_exit(&msp
->ms_lock
);
4311 offset
= metaslab_block_alloc(msp
, asize
, txg
);
4312 metaslab_trace_add(zal
, mg
, msp
, asize
, d
, offset
, allocator
);
4314 if (offset
!= -1ULL) {
4315 /* Proactively passivate the metaslab, if needed */
4317 metaslab_segment_may_passivate(msp
);
4321 ASSERT(msp
->ms_loaded
);
4324 * This code is disabled out because of issues with
4325 * tracepoints in non-gpl kernel modules.
4328 DTRACE_PROBE2(ms__alloc__failure
, metaslab_t
*, msp
,
4333 * We were unable to allocate from this metaslab so determine
4334 * a new weight for this metaslab. Now that we have loaded
4335 * the metaslab we can provide a better hint to the metaslab
4338 * For space-based metaslabs, we use the maximum block size.
4339 * This information is only available when the metaslab
4340 * is loaded and is more accurate than the generic free
4341 * space weight that was calculated by metaslab_weight().
4342 * This information allows us to quickly compare the maximum
4343 * available allocation in the metaslab to the allocation
4344 * size being requested.
4346 * For segment-based metaslabs, determine the new weight
4347 * based on the highest bucket in the range tree. We
4348 * explicitly use the loaded segment weight (i.e. the range
4349 * tree histogram) since it contains the space that is
4350 * currently available for allocation and is accurate
4351 * even within a sync pass.
4354 if (WEIGHT_IS_SPACEBASED(msp
->ms_weight
)) {
4355 weight
= metaslab_block_maxsize(msp
);
4356 WEIGHT_SET_SPACEBASED(weight
);
4358 weight
= metaslab_weight_from_range_tree(msp
);
4362 metaslab_passivate(msp
, weight
);
4365 * For the case where we use the metaslab that is
4366 * active for another allocator we want to make
4367 * sure that we retain the activation mask.
4369 * Note that we could attempt to use something like
4370 * metaslab_recalculate_weight_and_sort() that
4371 * retains the activation mask here. That function
4372 * uses metaslab_weight() to set the weight though
4373 * which is not as accurate as the calculations
4376 weight
|= msp
->ms_weight
& METASLAB_ACTIVE_MASK
;
4377 metaslab_group_sort(mg
, msp
, weight
);
4379 metaslab_active_mask_verify(msp
);
4382 * We have just failed an allocation attempt, check
4383 * that metaslab_should_allocate() agrees. Otherwise,
4384 * we may end up in an infinite loop retrying the same
4387 ASSERT(!metaslab_should_allocate(msp
, asize
));
4389 mutex_exit(&msp
->ms_lock
);
4391 mutex_exit(&msp
->ms_lock
);
4392 kmem_free(search
, sizeof (*search
));
4397 metaslab_group_alloc(metaslab_group_t
*mg
, zio_alloc_list_t
*zal
,
4398 uint64_t asize
, uint64_t txg
, boolean_t want_unique
, dva_t
*dva
,
4399 int d
, int allocator
)
4402 ASSERT(mg
->mg_initialized
);
4404 offset
= metaslab_group_alloc_normal(mg
, zal
, asize
, txg
, want_unique
,
4407 mutex_enter(&mg
->mg_lock
);
4408 if (offset
== -1ULL) {
4409 mg
->mg_failed_allocations
++;
4410 metaslab_trace_add(zal
, mg
, NULL
, asize
, d
,
4411 TRACE_GROUP_FAILURE
, allocator
);
4412 if (asize
== SPA_GANGBLOCKSIZE
) {
4414 * This metaslab group was unable to allocate
4415 * the minimum gang block size so it must be out of
4416 * space. We must notify the allocation throttle
4417 * to start skipping allocation attempts to this
4418 * metaslab group until more space becomes available.
4419 * Note: this failure cannot be caused by the
4420 * allocation throttle since the allocation throttle
4421 * is only responsible for skipping devices and
4422 * not failing block allocations.
4424 mg
->mg_no_free_space
= B_TRUE
;
4427 mg
->mg_allocations
++;
4428 mutex_exit(&mg
->mg_lock
);
4433 * Allocate a block for the specified i/o.
4436 metaslab_alloc_dva(spa_t
*spa
, metaslab_class_t
*mc
, uint64_t psize
,
4437 dva_t
*dva
, int d
, dva_t
*hintdva
, uint64_t txg
, int flags
,
4438 zio_alloc_list_t
*zal
, int allocator
)
4440 metaslab_group_t
*mg
, *fast_mg
, *rotor
;
4442 boolean_t try_hard
= B_FALSE
;
4444 ASSERT(!DVA_IS_VALID(&dva
[d
]));
4447 * For testing, make some blocks above a certain size be gang blocks.
4448 * This will result in more split blocks when using device removal,
4449 * and a large number of split blocks coupled with ztest-induced
4450 * damage can result in extremely long reconstruction times. This
4451 * will also test spilling from special to normal.
4453 if (psize
>= metaslab_force_ganging
&& (spa_get_random(100) < 3)) {
4454 metaslab_trace_add(zal
, NULL
, NULL
, psize
, d
, TRACE_FORCE_GANG
,
4456 return (SET_ERROR(ENOSPC
));
4460 * Start at the rotor and loop through all mgs until we find something.
4461 * Note that there's no locking on mc_rotor or mc_aliquot because
4462 * nothing actually breaks if we miss a few updates -- we just won't
4463 * allocate quite as evenly. It all balances out over time.
4465 * If we are doing ditto or log blocks, try to spread them across
4466 * consecutive vdevs. If we're forced to reuse a vdev before we've
4467 * allocated all of our ditto blocks, then try and spread them out on
4468 * that vdev as much as possible. If it turns out to not be possible,
4469 * gradually lower our standards until anything becomes acceptable.
4470 * Also, allocating on consecutive vdevs (as opposed to random vdevs)
4471 * gives us hope of containing our fault domains to something we're
4472 * able to reason about. Otherwise, any two top-level vdev failures
4473 * will guarantee the loss of data. With consecutive allocation,
4474 * only two adjacent top-level vdev failures will result in data loss.
4476 * If we are doing gang blocks (hintdva is non-NULL), try to keep
4477 * ourselves on the same vdev as our gang block header. That
4478 * way, we can hope for locality in vdev_cache, plus it makes our
4479 * fault domains something tractable.
4482 vd
= vdev_lookup_top(spa
, DVA_GET_VDEV(&hintdva
[d
]));
4485 * It's possible the vdev we're using as the hint no
4486 * longer exists or its mg has been closed (e.g. by
4487 * device removal). Consult the rotor when
4490 if (vd
!= NULL
&& vd
->vdev_mg
!= NULL
) {
4493 if (flags
& METASLAB_HINTBP_AVOID
&&
4494 mg
->mg_next
!= NULL
)
4499 } else if (d
!= 0) {
4500 vd
= vdev_lookup_top(spa
, DVA_GET_VDEV(&dva
[d
- 1]));
4501 mg
= vd
->vdev_mg
->mg_next
;
4502 } else if (flags
& METASLAB_FASTWRITE
) {
4503 mg
= fast_mg
= mc
->mc_rotor
;
4506 if (fast_mg
->mg_vd
->vdev_pending_fastwrite
<
4507 mg
->mg_vd
->vdev_pending_fastwrite
)
4509 } while ((fast_mg
= fast_mg
->mg_next
) != mc
->mc_rotor
);
4512 ASSERT(mc
->mc_rotor
!= NULL
);
4517 * If the hint put us into the wrong metaslab class, or into a
4518 * metaslab group that has been passivated, just follow the rotor.
4520 if (mg
->mg_class
!= mc
|| mg
->mg_activation_count
<= 0)
4526 boolean_t allocatable
;
4528 ASSERT(mg
->mg_activation_count
== 1);
4532 * Don't allocate from faulted devices.
4535 spa_config_enter(spa
, SCL_ZIO
, FTAG
, RW_READER
);
4536 allocatable
= vdev_allocatable(vd
);
4537 spa_config_exit(spa
, SCL_ZIO
, FTAG
);
4539 allocatable
= vdev_allocatable(vd
);
4543 * Determine if the selected metaslab group is eligible
4544 * for allocations. If we're ganging then don't allow
4545 * this metaslab group to skip allocations since that would
4546 * inadvertently return ENOSPC and suspend the pool
4547 * even though space is still available.
4549 if (allocatable
&& !GANG_ALLOCATION(flags
) && !try_hard
) {
4550 allocatable
= metaslab_group_allocatable(mg
, rotor
,
4551 psize
, allocator
, d
);
4555 metaslab_trace_add(zal
, mg
, NULL
, psize
, d
,
4556 TRACE_NOT_ALLOCATABLE
, allocator
);
4560 ASSERT(mg
->mg_initialized
);
4563 * Avoid writing single-copy data to a failing,
4564 * non-redundant vdev, unless we've already tried all
4567 if ((vd
->vdev_stat
.vs_write_errors
> 0 ||
4568 vd
->vdev_state
< VDEV_STATE_HEALTHY
) &&
4569 d
== 0 && !try_hard
&& vd
->vdev_children
== 0) {
4570 metaslab_trace_add(zal
, mg
, NULL
, psize
, d
,
4571 TRACE_VDEV_ERROR
, allocator
);
4575 ASSERT(mg
->mg_class
== mc
);
4577 uint64_t asize
= vdev_psize_to_asize(vd
, psize
);
4578 ASSERT(P2PHASE(asize
, 1ULL << vd
->vdev_ashift
) == 0);
4581 * If we don't need to try hard, then require that the
4582 * block be on an different metaslab from any other DVAs
4583 * in this BP (unique=true). If we are trying hard, then
4584 * allow any metaslab to be used (unique=false).
4586 uint64_t offset
= metaslab_group_alloc(mg
, zal
, asize
, txg
,
4587 !try_hard
, dva
, d
, allocator
);
4589 if (offset
!= -1ULL) {
4591 * If we've just selected this metaslab group,
4592 * figure out whether the corresponding vdev is
4593 * over- or under-used relative to the pool,
4594 * and set an allocation bias to even it out.
4596 * Bias is also used to compensate for unequally
4597 * sized vdevs so that space is allocated fairly.
4599 if (mc
->mc_aliquot
== 0 && metaslab_bias_enabled
) {
4600 vdev_stat_t
*vs
= &vd
->vdev_stat
;
4601 int64_t vs_free
= vs
->vs_space
- vs
->vs_alloc
;
4602 int64_t mc_free
= mc
->mc_space
- mc
->mc_alloc
;
4606 * Calculate how much more or less we should
4607 * try to allocate from this device during
4608 * this iteration around the rotor.
4610 * This basically introduces a zero-centered
4611 * bias towards the devices with the most
4612 * free space, while compensating for vdev
4616 * vdev V1 = 16M/128M
4617 * vdev V2 = 16M/128M
4618 * ratio(V1) = 100% ratio(V2) = 100%
4620 * vdev V1 = 16M/128M
4621 * vdev V2 = 64M/128M
4622 * ratio(V1) = 127% ratio(V2) = 72%
4624 * vdev V1 = 16M/128M
4625 * vdev V2 = 64M/512M
4626 * ratio(V1) = 40% ratio(V2) = 160%
4628 ratio
= (vs_free
* mc
->mc_alloc_groups
* 100) /
4630 mg
->mg_bias
= ((ratio
- 100) *
4631 (int64_t)mg
->mg_aliquot
) / 100;
4632 } else if (!metaslab_bias_enabled
) {
4636 if ((flags
& METASLAB_FASTWRITE
) ||
4637 atomic_add_64_nv(&mc
->mc_aliquot
, asize
) >=
4638 mg
->mg_aliquot
+ mg
->mg_bias
) {
4639 mc
->mc_rotor
= mg
->mg_next
;
4643 DVA_SET_VDEV(&dva
[d
], vd
->vdev_id
);
4644 DVA_SET_OFFSET(&dva
[d
], offset
);
4645 DVA_SET_GANG(&dva
[d
],
4646 ((flags
& METASLAB_GANG_HEADER
) ? 1 : 0));
4647 DVA_SET_ASIZE(&dva
[d
], asize
);
4649 if (flags
& METASLAB_FASTWRITE
) {
4650 atomic_add_64(&vd
->vdev_pending_fastwrite
,
4657 mc
->mc_rotor
= mg
->mg_next
;
4659 } while ((mg
= mg
->mg_next
) != rotor
);
4662 * If we haven't tried hard, do so now.
4669 bzero(&dva
[d
], sizeof (dva_t
));
4671 metaslab_trace_add(zal
, rotor
, NULL
, psize
, d
, TRACE_ENOSPC
, allocator
);
4672 return (SET_ERROR(ENOSPC
));
4676 metaslab_free_concrete(vdev_t
*vd
, uint64_t offset
, uint64_t asize
,
4677 boolean_t checkpoint
)
4680 spa_t
*spa
= vd
->vdev_spa
;
4682 ASSERT(vdev_is_concrete(vd
));
4683 ASSERT3U(spa_config_held(spa
, SCL_ALL
, RW_READER
), !=, 0);
4684 ASSERT3U(offset
>> vd
->vdev_ms_shift
, <, vd
->vdev_ms_count
);
4686 msp
= vd
->vdev_ms
[offset
>> vd
->vdev_ms_shift
];
4688 VERIFY(!msp
->ms_condensing
);
4689 VERIFY3U(offset
, >=, msp
->ms_start
);
4690 VERIFY3U(offset
+ asize
, <=, msp
->ms_start
+ msp
->ms_size
);
4691 VERIFY0(P2PHASE(offset
, 1ULL << vd
->vdev_ashift
));
4692 VERIFY0(P2PHASE(asize
, 1ULL << vd
->vdev_ashift
));
4694 metaslab_check_free_impl(vd
, offset
, asize
);
4696 mutex_enter(&msp
->ms_lock
);
4697 if (range_tree_is_empty(msp
->ms_freeing
) &&
4698 range_tree_is_empty(msp
->ms_checkpointing
)) {
4699 vdev_dirty(vd
, VDD_METASLAB
, msp
, spa_syncing_txg(spa
));
4703 ASSERT(spa_has_checkpoint(spa
));
4704 range_tree_add(msp
->ms_checkpointing
, offset
, asize
);
4706 range_tree_add(msp
->ms_freeing
, offset
, asize
);
4708 mutex_exit(&msp
->ms_lock
);
4713 metaslab_free_impl_cb(uint64_t inner_offset
, vdev_t
*vd
, uint64_t offset
,
4714 uint64_t size
, void *arg
)
4716 boolean_t
*checkpoint
= arg
;
4718 ASSERT3P(checkpoint
, !=, NULL
);
4720 if (vd
->vdev_ops
->vdev_op_remap
!= NULL
)
4721 vdev_indirect_mark_obsolete(vd
, offset
, size
);
4723 metaslab_free_impl(vd
, offset
, size
, *checkpoint
);
4727 metaslab_free_impl(vdev_t
*vd
, uint64_t offset
, uint64_t size
,
4728 boolean_t checkpoint
)
4730 spa_t
*spa
= vd
->vdev_spa
;
4732 ASSERT3U(spa_config_held(spa
, SCL_ALL
, RW_READER
), !=, 0);
4734 if (spa_syncing_txg(spa
) > spa_freeze_txg(spa
))
4737 if (spa
->spa_vdev_removal
!= NULL
&&
4738 spa
->spa_vdev_removal
->svr_vdev_id
== vd
->vdev_id
&&
4739 vdev_is_concrete(vd
)) {
4741 * Note: we check if the vdev is concrete because when
4742 * we complete the removal, we first change the vdev to be
4743 * an indirect vdev (in open context), and then (in syncing
4744 * context) clear spa_vdev_removal.
4746 free_from_removing_vdev(vd
, offset
, size
);
4747 } else if (vd
->vdev_ops
->vdev_op_remap
!= NULL
) {
4748 vdev_indirect_mark_obsolete(vd
, offset
, size
);
4749 vd
->vdev_ops
->vdev_op_remap(vd
, offset
, size
,
4750 metaslab_free_impl_cb
, &checkpoint
);
4752 metaslab_free_concrete(vd
, offset
, size
, checkpoint
);
4756 typedef struct remap_blkptr_cb_arg
{
4758 spa_remap_cb_t rbca_cb
;
4759 vdev_t
*rbca_remap_vd
;
4760 uint64_t rbca_remap_offset
;
4762 } remap_blkptr_cb_arg_t
;
4765 remap_blkptr_cb(uint64_t inner_offset
, vdev_t
*vd
, uint64_t offset
,
4766 uint64_t size
, void *arg
)
4768 remap_blkptr_cb_arg_t
*rbca
= arg
;
4769 blkptr_t
*bp
= rbca
->rbca_bp
;
4771 /* We can not remap split blocks. */
4772 if (size
!= DVA_GET_ASIZE(&bp
->blk_dva
[0]))
4774 ASSERT0(inner_offset
);
4776 if (rbca
->rbca_cb
!= NULL
) {
4778 * At this point we know that we are not handling split
4779 * blocks and we invoke the callback on the previous
4780 * vdev which must be indirect.
4782 ASSERT3P(rbca
->rbca_remap_vd
->vdev_ops
, ==, &vdev_indirect_ops
);
4784 rbca
->rbca_cb(rbca
->rbca_remap_vd
->vdev_id
,
4785 rbca
->rbca_remap_offset
, size
, rbca
->rbca_cb_arg
);
4787 /* set up remap_blkptr_cb_arg for the next call */
4788 rbca
->rbca_remap_vd
= vd
;
4789 rbca
->rbca_remap_offset
= offset
;
4793 * The phys birth time is that of dva[0]. This ensures that we know
4794 * when each dva was written, so that resilver can determine which
4795 * blocks need to be scrubbed (i.e. those written during the time
4796 * the vdev was offline). It also ensures that the key used in
4797 * the ARC hash table is unique (i.e. dva[0] + phys_birth). If
4798 * we didn't change the phys_birth, a lookup in the ARC for a
4799 * remapped BP could find the data that was previously stored at
4800 * this vdev + offset.
4802 vdev_t
*oldvd
= vdev_lookup_top(vd
->vdev_spa
,
4803 DVA_GET_VDEV(&bp
->blk_dva
[0]));
4804 vdev_indirect_births_t
*vib
= oldvd
->vdev_indirect_births
;
4805 bp
->blk_phys_birth
= vdev_indirect_births_physbirth(vib
,
4806 DVA_GET_OFFSET(&bp
->blk_dva
[0]), DVA_GET_ASIZE(&bp
->blk_dva
[0]));
4808 DVA_SET_VDEV(&bp
->blk_dva
[0], vd
->vdev_id
);
4809 DVA_SET_OFFSET(&bp
->blk_dva
[0], offset
);
4813 * If the block pointer contains any indirect DVAs, modify them to refer to
4814 * concrete DVAs. Note that this will sometimes not be possible, leaving
4815 * the indirect DVA in place. This happens if the indirect DVA spans multiple
4816 * segments in the mapping (i.e. it is a "split block").
4818 * If the BP was remapped, calls the callback on the original dva (note the
4819 * callback can be called multiple times if the original indirect DVA refers
4820 * to another indirect DVA, etc).
4822 * Returns TRUE if the BP was remapped.
4825 spa_remap_blkptr(spa_t
*spa
, blkptr_t
*bp
, spa_remap_cb_t callback
, void *arg
)
4827 remap_blkptr_cb_arg_t rbca
;
4829 if (!zfs_remap_blkptr_enable
)
4832 if (!spa_feature_is_enabled(spa
, SPA_FEATURE_OBSOLETE_COUNTS
))
4836 * Dedup BP's can not be remapped, because ddt_phys_select() depends
4837 * on DVA[0] being the same in the BP as in the DDT (dedup table).
4839 if (BP_GET_DEDUP(bp
))
4843 * Gang blocks can not be remapped, because
4844 * zio_checksum_gang_verifier() depends on the DVA[0] that's in
4845 * the BP used to read the gang block header (GBH) being the same
4846 * as the DVA[0] that we allocated for the GBH.
4852 * Embedded BP's have no DVA to remap.
4854 if (BP_GET_NDVAS(bp
) < 1)
4858 * Note: we only remap dva[0]. If we remapped other dvas, we
4859 * would no longer know what their phys birth txg is.
4861 dva_t
*dva
= &bp
->blk_dva
[0];
4863 uint64_t offset
= DVA_GET_OFFSET(dva
);
4864 uint64_t size
= DVA_GET_ASIZE(dva
);
4865 vdev_t
*vd
= vdev_lookup_top(spa
, DVA_GET_VDEV(dva
));
4867 if (vd
->vdev_ops
->vdev_op_remap
== NULL
)
4871 rbca
.rbca_cb
= callback
;
4872 rbca
.rbca_remap_vd
= vd
;
4873 rbca
.rbca_remap_offset
= offset
;
4874 rbca
.rbca_cb_arg
= arg
;
4877 * remap_blkptr_cb() will be called in order for each level of
4878 * indirection, until a concrete vdev is reached or a split block is
4879 * encountered. old_vd and old_offset are updated within the callback
4880 * as we go from the one indirect vdev to the next one (either concrete
4881 * or indirect again) in that order.
4883 vd
->vdev_ops
->vdev_op_remap(vd
, offset
, size
, remap_blkptr_cb
, &rbca
);
4885 /* Check if the DVA wasn't remapped because it is a split block */
4886 if (DVA_GET_VDEV(&rbca
.rbca_bp
->blk_dva
[0]) == vd
->vdev_id
)
4893 * Undo the allocation of a DVA which happened in the given transaction group.
4896 metaslab_unalloc_dva(spa_t
*spa
, const dva_t
*dva
, uint64_t txg
)
4900 uint64_t vdev
= DVA_GET_VDEV(dva
);
4901 uint64_t offset
= DVA_GET_OFFSET(dva
);
4902 uint64_t size
= DVA_GET_ASIZE(dva
);
4904 ASSERT(DVA_IS_VALID(dva
));
4905 ASSERT3U(spa_config_held(spa
, SCL_ALL
, RW_READER
), !=, 0);
4907 if (txg
> spa_freeze_txg(spa
))
4910 if ((vd
= vdev_lookup_top(spa
, vdev
)) == NULL
|| !DVA_IS_VALID(dva
) ||
4911 (offset
>> vd
->vdev_ms_shift
) >= vd
->vdev_ms_count
) {
4912 zfs_panic_recover("metaslab_free_dva(): bad DVA %llu:%llu:%llu",
4913 (u_longlong_t
)vdev
, (u_longlong_t
)offset
,
4914 (u_longlong_t
)size
);
4918 ASSERT(!vd
->vdev_removing
);
4919 ASSERT(vdev_is_concrete(vd
));
4920 ASSERT0(vd
->vdev_indirect_config
.vic_mapping_object
);
4921 ASSERT3P(vd
->vdev_indirect_mapping
, ==, NULL
);
4923 if (DVA_GET_GANG(dva
))
4924 size
= vdev_psize_to_asize(vd
, SPA_GANGBLOCKSIZE
);
4926 msp
= vd
->vdev_ms
[offset
>> vd
->vdev_ms_shift
];
4928 mutex_enter(&msp
->ms_lock
);
4929 range_tree_remove(msp
->ms_allocating
[txg
& TXG_MASK
],
4932 VERIFY(!msp
->ms_condensing
);
4933 VERIFY3U(offset
, >=, msp
->ms_start
);
4934 VERIFY3U(offset
+ size
, <=, msp
->ms_start
+ msp
->ms_size
);
4935 VERIFY3U(range_tree_space(msp
->ms_allocatable
) + size
, <=,
4937 VERIFY0(P2PHASE(offset
, 1ULL << vd
->vdev_ashift
));
4938 VERIFY0(P2PHASE(size
, 1ULL << vd
->vdev_ashift
));
4939 range_tree_add(msp
->ms_allocatable
, offset
, size
);
4940 mutex_exit(&msp
->ms_lock
);
4944 * Free the block represented by the given DVA.
4947 metaslab_free_dva(spa_t
*spa
, const dva_t
*dva
, boolean_t checkpoint
)
4949 uint64_t vdev
= DVA_GET_VDEV(dva
);
4950 uint64_t offset
= DVA_GET_OFFSET(dva
);
4951 uint64_t size
= DVA_GET_ASIZE(dva
);
4952 vdev_t
*vd
= vdev_lookup_top(spa
, vdev
);
4954 ASSERT(DVA_IS_VALID(dva
));
4955 ASSERT3U(spa_config_held(spa
, SCL_ALL
, RW_READER
), !=, 0);
4957 if (DVA_GET_GANG(dva
)) {
4958 size
= vdev_psize_to_asize(vd
, SPA_GANGBLOCKSIZE
);
4961 metaslab_free_impl(vd
, offset
, size
, checkpoint
);
4965 * Reserve some allocation slots. The reservation system must be called
4966 * before we call into the allocator. If there aren't any available slots
4967 * then the I/O will be throttled until an I/O completes and its slots are
4968 * freed up. The function returns true if it was successful in placing
4972 metaslab_class_throttle_reserve(metaslab_class_t
*mc
, int slots
, int allocator
,
4973 zio_t
*zio
, int flags
)
4975 uint64_t available_slots
= 0;
4976 boolean_t slot_reserved
= B_FALSE
;
4977 uint64_t max
= mc
->mc_alloc_max_slots
[allocator
];
4979 ASSERT(mc
->mc_alloc_throttle_enabled
);
4980 mutex_enter(&mc
->mc_lock
);
4982 uint64_t reserved_slots
=
4983 zfs_refcount_count(&mc
->mc_alloc_slots
[allocator
]);
4984 if (reserved_slots
< max
)
4985 available_slots
= max
- reserved_slots
;
4987 if (slots
<= available_slots
|| GANG_ALLOCATION(flags
) ||
4988 flags
& METASLAB_MUST_RESERVE
) {
4990 * We reserve the slots individually so that we can unreserve
4991 * them individually when an I/O completes.
4993 for (int d
= 0; d
< slots
; d
++) {
4995 zfs_refcount_add(&mc
->mc_alloc_slots
[allocator
],
4998 zio
->io_flags
|= ZIO_FLAG_IO_ALLOCATING
;
4999 slot_reserved
= B_TRUE
;
5002 mutex_exit(&mc
->mc_lock
);
5003 return (slot_reserved
);
5007 metaslab_class_throttle_unreserve(metaslab_class_t
*mc
, int slots
,
5008 int allocator
, zio_t
*zio
)
5010 ASSERT(mc
->mc_alloc_throttle_enabled
);
5011 mutex_enter(&mc
->mc_lock
);
5012 for (int d
= 0; d
< slots
; d
++) {
5013 (void) zfs_refcount_remove(&mc
->mc_alloc_slots
[allocator
],
5016 mutex_exit(&mc
->mc_lock
);
5020 metaslab_claim_concrete(vdev_t
*vd
, uint64_t offset
, uint64_t size
,
5024 spa_t
*spa
= vd
->vdev_spa
;
5027 if (offset
>> vd
->vdev_ms_shift
>= vd
->vdev_ms_count
)
5028 return (SET_ERROR(ENXIO
));
5030 ASSERT3P(vd
->vdev_ms
, !=, NULL
);
5031 msp
= vd
->vdev_ms
[offset
>> vd
->vdev_ms_shift
];
5033 mutex_enter(&msp
->ms_lock
);
5035 if ((txg
!= 0 && spa_writeable(spa
)) || !msp
->ms_loaded
) {
5036 error
= metaslab_activate(msp
, 0, METASLAB_WEIGHT_CLAIM
);
5037 if (error
== EBUSY
) {
5038 ASSERT(msp
->ms_loaded
);
5039 ASSERT(msp
->ms_weight
& METASLAB_ACTIVE_MASK
);
5045 !range_tree_contains(msp
->ms_allocatable
, offset
, size
))
5046 error
= SET_ERROR(ENOENT
);
5048 if (error
|| txg
== 0) { /* txg == 0 indicates dry run */
5049 mutex_exit(&msp
->ms_lock
);
5053 VERIFY(!msp
->ms_condensing
);
5054 VERIFY0(P2PHASE(offset
, 1ULL << vd
->vdev_ashift
));
5055 VERIFY0(P2PHASE(size
, 1ULL << vd
->vdev_ashift
));
5056 VERIFY3U(range_tree_space(msp
->ms_allocatable
) - size
, <=,
5058 range_tree_remove(msp
->ms_allocatable
, offset
, size
);
5059 range_tree_clear(msp
->ms_trim
, offset
, size
);
5061 if (spa_writeable(spa
)) { /* don't dirty if we're zdb(1M) */
5062 if (range_tree_is_empty(msp
->ms_allocating
[txg
& TXG_MASK
]))
5063 vdev_dirty(vd
, VDD_METASLAB
, msp
, txg
);
5064 range_tree_add(msp
->ms_allocating
[txg
& TXG_MASK
],
5068 mutex_exit(&msp
->ms_lock
);
5073 typedef struct metaslab_claim_cb_arg_t
{
5076 } metaslab_claim_cb_arg_t
;
5080 metaslab_claim_impl_cb(uint64_t inner_offset
, vdev_t
*vd
, uint64_t offset
,
5081 uint64_t size
, void *arg
)
5083 metaslab_claim_cb_arg_t
*mcca_arg
= arg
;
5085 if (mcca_arg
->mcca_error
== 0) {
5086 mcca_arg
->mcca_error
= metaslab_claim_concrete(vd
, offset
,
5087 size
, mcca_arg
->mcca_txg
);
5092 metaslab_claim_impl(vdev_t
*vd
, uint64_t offset
, uint64_t size
, uint64_t txg
)
5094 if (vd
->vdev_ops
->vdev_op_remap
!= NULL
) {
5095 metaslab_claim_cb_arg_t arg
;
5098 * Only zdb(1M) can claim on indirect vdevs. This is used
5099 * to detect leaks of mapped space (that are not accounted
5100 * for in the obsolete counts, spacemap, or bpobj).
5102 ASSERT(!spa_writeable(vd
->vdev_spa
));
5106 vd
->vdev_ops
->vdev_op_remap(vd
, offset
, size
,
5107 metaslab_claim_impl_cb
, &arg
);
5109 if (arg
.mcca_error
== 0) {
5110 arg
.mcca_error
= metaslab_claim_concrete(vd
,
5113 return (arg
.mcca_error
);
5115 return (metaslab_claim_concrete(vd
, offset
, size
, txg
));
5120 * Intent log support: upon opening the pool after a crash, notify the SPA
5121 * of blocks that the intent log has allocated for immediate write, but
5122 * which are still considered free by the SPA because the last transaction
5123 * group didn't commit yet.
5126 metaslab_claim_dva(spa_t
*spa
, const dva_t
*dva
, uint64_t txg
)
5128 uint64_t vdev
= DVA_GET_VDEV(dva
);
5129 uint64_t offset
= DVA_GET_OFFSET(dva
);
5130 uint64_t size
= DVA_GET_ASIZE(dva
);
5133 if ((vd
= vdev_lookup_top(spa
, vdev
)) == NULL
) {
5134 return (SET_ERROR(ENXIO
));
5137 ASSERT(DVA_IS_VALID(dva
));
5139 if (DVA_GET_GANG(dva
))
5140 size
= vdev_psize_to_asize(vd
, SPA_GANGBLOCKSIZE
);
5142 return (metaslab_claim_impl(vd
, offset
, size
, txg
));
5146 metaslab_alloc(spa_t
*spa
, metaslab_class_t
*mc
, uint64_t psize
, blkptr_t
*bp
,
5147 int ndvas
, uint64_t txg
, blkptr_t
*hintbp
, int flags
,
5148 zio_alloc_list_t
*zal
, zio_t
*zio
, int allocator
)
5150 dva_t
*dva
= bp
->blk_dva
;
5151 dva_t
*hintdva
= (hintbp
!= NULL
) ? hintbp
->blk_dva
: NULL
;
5154 ASSERT(bp
->blk_birth
== 0);
5155 ASSERT(BP_PHYSICAL_BIRTH(bp
) == 0);
5157 spa_config_enter(spa
, SCL_ALLOC
, FTAG
, RW_READER
);
5159 if (mc
->mc_rotor
== NULL
) { /* no vdevs in this class */
5160 spa_config_exit(spa
, SCL_ALLOC
, FTAG
);
5161 return (SET_ERROR(ENOSPC
));
5164 ASSERT(ndvas
> 0 && ndvas
<= spa_max_replication(spa
));
5165 ASSERT(BP_GET_NDVAS(bp
) == 0);
5166 ASSERT(hintbp
== NULL
|| ndvas
<= BP_GET_NDVAS(hintbp
));
5167 ASSERT3P(zal
, !=, NULL
);
5169 for (int d
= 0; d
< ndvas
; d
++) {
5170 error
= metaslab_alloc_dva(spa
, mc
, psize
, dva
, d
, hintdva
,
5171 txg
, flags
, zal
, allocator
);
5173 for (d
--; d
>= 0; d
--) {
5174 metaslab_unalloc_dva(spa
, &dva
[d
], txg
);
5175 metaslab_group_alloc_decrement(spa
,
5176 DVA_GET_VDEV(&dva
[d
]), zio
, flags
,
5177 allocator
, B_FALSE
);
5178 bzero(&dva
[d
], sizeof (dva_t
));
5180 spa_config_exit(spa
, SCL_ALLOC
, FTAG
);
5184 * Update the metaslab group's queue depth
5185 * based on the newly allocated dva.
5187 metaslab_group_alloc_increment(spa
,
5188 DVA_GET_VDEV(&dva
[d
]), zio
, flags
, allocator
);
5193 ASSERT(BP_GET_NDVAS(bp
) == ndvas
);
5195 spa_config_exit(spa
, SCL_ALLOC
, FTAG
);
5197 BP_SET_BIRTH(bp
, txg
, 0);
5203 metaslab_free(spa_t
*spa
, const blkptr_t
*bp
, uint64_t txg
, boolean_t now
)
5205 const dva_t
*dva
= bp
->blk_dva
;
5206 int ndvas
= BP_GET_NDVAS(bp
);
5208 ASSERT(!BP_IS_HOLE(bp
));
5209 ASSERT(!now
|| bp
->blk_birth
>= spa_syncing_txg(spa
));
5212 * If we have a checkpoint for the pool we need to make sure that
5213 * the blocks that we free that are part of the checkpoint won't be
5214 * reused until the checkpoint is discarded or we revert to it.
5216 * The checkpoint flag is passed down the metaslab_free code path
5217 * and is set whenever we want to add a block to the checkpoint's
5218 * accounting. That is, we "checkpoint" blocks that existed at the
5219 * time the checkpoint was created and are therefore referenced by
5220 * the checkpointed uberblock.
5222 * Note that, we don't checkpoint any blocks if the current
5223 * syncing txg <= spa_checkpoint_txg. We want these frees to sync
5224 * normally as they will be referenced by the checkpointed uberblock.
5226 boolean_t checkpoint
= B_FALSE
;
5227 if (bp
->blk_birth
<= spa
->spa_checkpoint_txg
&&
5228 spa_syncing_txg(spa
) > spa
->spa_checkpoint_txg
) {
5230 * At this point, if the block is part of the checkpoint
5231 * there is no way it was created in the current txg.
5234 ASSERT3U(spa_syncing_txg(spa
), ==, txg
);
5235 checkpoint
= B_TRUE
;
5238 spa_config_enter(spa
, SCL_FREE
, FTAG
, RW_READER
);
5240 for (int d
= 0; d
< ndvas
; d
++) {
5242 metaslab_unalloc_dva(spa
, &dva
[d
], txg
);
5244 ASSERT3U(txg
, ==, spa_syncing_txg(spa
));
5245 metaslab_free_dva(spa
, &dva
[d
], checkpoint
);
5249 spa_config_exit(spa
, SCL_FREE
, FTAG
);
5253 metaslab_claim(spa_t
*spa
, const blkptr_t
*bp
, uint64_t txg
)
5255 const dva_t
*dva
= bp
->blk_dva
;
5256 int ndvas
= BP_GET_NDVAS(bp
);
5259 ASSERT(!BP_IS_HOLE(bp
));
5263 * First do a dry run to make sure all DVAs are claimable,
5264 * so we don't have to unwind from partial failures below.
5266 if ((error
= metaslab_claim(spa
, bp
, 0)) != 0)
5270 spa_config_enter(spa
, SCL_ALLOC
, FTAG
, RW_READER
);
5272 for (int d
= 0; d
< ndvas
; d
++) {
5273 error
= metaslab_claim_dva(spa
, &dva
[d
], txg
);
5278 spa_config_exit(spa
, SCL_ALLOC
, FTAG
);
5280 ASSERT(error
== 0 || txg
== 0);
5286 metaslab_fastwrite_mark(spa_t
*spa
, const blkptr_t
*bp
)
5288 const dva_t
*dva
= bp
->blk_dva
;
5289 int ndvas
= BP_GET_NDVAS(bp
);
5290 uint64_t psize
= BP_GET_PSIZE(bp
);
5294 ASSERT(!BP_IS_HOLE(bp
));
5295 ASSERT(!BP_IS_EMBEDDED(bp
));
5298 spa_config_enter(spa
, SCL_VDEV
, FTAG
, RW_READER
);
5300 for (d
= 0; d
< ndvas
; d
++) {
5301 if ((vd
= vdev_lookup_top(spa
, DVA_GET_VDEV(&dva
[d
]))) == NULL
)
5303 atomic_add_64(&vd
->vdev_pending_fastwrite
, psize
);
5306 spa_config_exit(spa
, SCL_VDEV
, FTAG
);
5310 metaslab_fastwrite_unmark(spa_t
*spa
, const blkptr_t
*bp
)
5312 const dva_t
*dva
= bp
->blk_dva
;
5313 int ndvas
= BP_GET_NDVAS(bp
);
5314 uint64_t psize
= BP_GET_PSIZE(bp
);
5318 ASSERT(!BP_IS_HOLE(bp
));
5319 ASSERT(!BP_IS_EMBEDDED(bp
));
5322 spa_config_enter(spa
, SCL_VDEV
, FTAG
, RW_READER
);
5324 for (d
= 0; d
< ndvas
; d
++) {
5325 if ((vd
= vdev_lookup_top(spa
, DVA_GET_VDEV(&dva
[d
]))) == NULL
)
5327 ASSERT3U(vd
->vdev_pending_fastwrite
, >=, psize
);
5328 atomic_sub_64(&vd
->vdev_pending_fastwrite
, psize
);
5331 spa_config_exit(spa
, SCL_VDEV
, FTAG
);
5336 metaslab_check_free_impl_cb(uint64_t inner
, vdev_t
*vd
, uint64_t offset
,
5337 uint64_t size
, void *arg
)
5339 if (vd
->vdev_ops
== &vdev_indirect_ops
)
5342 metaslab_check_free_impl(vd
, offset
, size
);
5346 metaslab_check_free_impl(vdev_t
*vd
, uint64_t offset
, uint64_t size
)
5349 ASSERTV(spa_t
*spa
= vd
->vdev_spa
);
5351 if ((zfs_flags
& ZFS_DEBUG_ZIO_FREE
) == 0)
5354 if (vd
->vdev_ops
->vdev_op_remap
!= NULL
) {
5355 vd
->vdev_ops
->vdev_op_remap(vd
, offset
, size
,
5356 metaslab_check_free_impl_cb
, NULL
);
5360 ASSERT(vdev_is_concrete(vd
));
5361 ASSERT3U(offset
>> vd
->vdev_ms_shift
, <, vd
->vdev_ms_count
);
5362 ASSERT3U(spa_config_held(spa
, SCL_ALL
, RW_READER
), !=, 0);
5364 msp
= vd
->vdev_ms
[offset
>> vd
->vdev_ms_shift
];
5366 mutex_enter(&msp
->ms_lock
);
5367 if (msp
->ms_loaded
) {
5368 range_tree_verify_not_present(msp
->ms_allocatable
,
5373 * Check all segments that currently exist in the freeing pipeline.
5375 * It would intuitively make sense to also check the current allocating
5376 * tree since metaslab_unalloc_dva() exists for extents that are
5377 * allocated and freed in the same sync pass withing the same txg.
5378 * Unfortunately there are places (e.g. the ZIL) where we allocate a
5379 * segment but then we free part of it within the same txg
5380 * [see zil_sync()]. Thus, we don't call range_tree_verify() in the
5381 * current allocating tree.
5383 range_tree_verify_not_present(msp
->ms_freeing
, offset
, size
);
5384 range_tree_verify_not_present(msp
->ms_checkpointing
, offset
, size
);
5385 range_tree_verify_not_present(msp
->ms_freed
, offset
, size
);
5386 for (int j
= 0; j
< TXG_DEFER_SIZE
; j
++)
5387 range_tree_verify_not_present(msp
->ms_defer
[j
], offset
, size
);
5388 range_tree_verify_not_present(msp
->ms_trim
, offset
, size
);
5389 mutex_exit(&msp
->ms_lock
);
5393 metaslab_check_free(spa_t
*spa
, const blkptr_t
*bp
)
5395 if ((zfs_flags
& ZFS_DEBUG_ZIO_FREE
) == 0)
5398 spa_config_enter(spa
, SCL_VDEV
, FTAG
, RW_READER
);
5399 for (int i
= 0; i
< BP_GET_NDVAS(bp
); i
++) {
5400 uint64_t vdev
= DVA_GET_VDEV(&bp
->blk_dva
[i
]);
5401 vdev_t
*vd
= vdev_lookup_top(spa
, vdev
);
5402 uint64_t offset
= DVA_GET_OFFSET(&bp
->blk_dva
[i
]);
5403 uint64_t size
= DVA_GET_ASIZE(&bp
->blk_dva
[i
]);
5405 if (DVA_GET_GANG(&bp
->blk_dva
[i
]))
5406 size
= vdev_psize_to_asize(vd
, SPA_GANGBLOCKSIZE
);
5408 ASSERT3P(vd
, !=, NULL
);
5410 metaslab_check_free_impl(vd
, offset
, size
);
5412 spa_config_exit(spa
, SCL_VDEV
, FTAG
);
5416 metaslab_group_disable_wait(metaslab_group_t
*mg
)
5418 ASSERT(MUTEX_HELD(&mg
->mg_ms_disabled_lock
));
5419 while (mg
->mg_disabled_updating
) {
5420 cv_wait(&mg
->mg_ms_disabled_cv
, &mg
->mg_ms_disabled_lock
);
5425 metaslab_group_disabled_increment(metaslab_group_t
*mg
)
5427 ASSERT(MUTEX_HELD(&mg
->mg_ms_disabled_lock
));
5428 ASSERT(mg
->mg_disabled_updating
);
5430 while (mg
->mg_ms_disabled
>= max_disabled_ms
) {
5431 cv_wait(&mg
->mg_ms_disabled_cv
, &mg
->mg_ms_disabled_lock
);
5433 mg
->mg_ms_disabled
++;
5434 ASSERT3U(mg
->mg_ms_disabled
, <=, max_disabled_ms
);
5438 * Mark the metaslab as disabled to prevent any allocations on this metaslab.
5439 * We must also track how many metaslabs are currently disabled within a
5440 * metaslab group and limit them to prevent allocation failures from
5441 * occurring because all metaslabs are disabled.
5444 metaslab_disable(metaslab_t
*msp
)
5446 ASSERT(!MUTEX_HELD(&msp
->ms_lock
));
5447 metaslab_group_t
*mg
= msp
->ms_group
;
5449 mutex_enter(&mg
->mg_ms_disabled_lock
);
5452 * To keep an accurate count of how many threads have disabled
5453 * a specific metaslab group, we only allow one thread to mark
5454 * the metaslab group at a time. This ensures that the value of
5455 * ms_disabled will be accurate when we decide to mark a metaslab
5456 * group as disabled. To do this we force all other threads
5457 * to wait till the metaslab's mg_disabled_updating flag is no
5460 metaslab_group_disable_wait(mg
);
5461 mg
->mg_disabled_updating
= B_TRUE
;
5462 if (msp
->ms_disabled
== 0) {
5463 metaslab_group_disabled_increment(mg
);
5465 mutex_enter(&msp
->ms_lock
);
5467 mutex_exit(&msp
->ms_lock
);
5469 mg
->mg_disabled_updating
= B_FALSE
;
5470 cv_broadcast(&mg
->mg_ms_disabled_cv
);
5471 mutex_exit(&mg
->mg_ms_disabled_lock
);
5475 metaslab_enable(metaslab_t
*msp
, boolean_t sync
)
5477 metaslab_group_t
*mg
= msp
->ms_group
;
5478 spa_t
*spa
= mg
->mg_vd
->vdev_spa
;
5481 * Wait for the outstanding IO to be synced to prevent newly
5482 * allocated blocks from being overwritten. This used by
5483 * initialize and TRIM which are modifying unallocated space.
5486 txg_wait_synced(spa_get_dsl(spa
), 0);
5488 mutex_enter(&mg
->mg_ms_disabled_lock
);
5489 mutex_enter(&msp
->ms_lock
);
5490 if (--msp
->ms_disabled
== 0) {
5491 mg
->mg_ms_disabled
--;
5492 cv_broadcast(&mg
->mg_ms_disabled_cv
);
5494 mutex_exit(&msp
->ms_lock
);
5495 mutex_exit(&mg
->mg_ms_disabled_lock
);
5499 metaslab_update_ondisk_flush_data(metaslab_t
*ms
, dmu_tx_t
*tx
)
5501 vdev_t
*vd
= ms
->ms_group
->mg_vd
;
5502 spa_t
*spa
= vd
->vdev_spa
;
5503 objset_t
*mos
= spa_meta_objset(spa
);
5505 ASSERT(spa_feature_is_active(spa
, SPA_FEATURE_LOG_SPACEMAP
));
5507 metaslab_unflushed_phys_t entry
= {
5508 .msp_unflushed_txg
= metaslab_unflushed_txg(ms
),
5510 uint64_t entry_size
= sizeof (entry
);
5511 uint64_t entry_offset
= ms
->ms_id
* entry_size
;
5513 uint64_t object
= 0;
5514 int err
= zap_lookup(mos
, vd
->vdev_top_zap
,
5515 VDEV_TOP_ZAP_MS_UNFLUSHED_PHYS_TXGS
, sizeof (uint64_t), 1,
5517 if (err
== ENOENT
) {
5518 object
= dmu_object_alloc(mos
, DMU_OTN_UINT64_METADATA
,
5519 SPA_OLD_MAXBLOCKSIZE
, DMU_OT_NONE
, 0, tx
);
5520 VERIFY0(zap_add(mos
, vd
->vdev_top_zap
,
5521 VDEV_TOP_ZAP_MS_UNFLUSHED_PHYS_TXGS
, sizeof (uint64_t), 1,
5527 dmu_write(spa_meta_objset(spa
), object
, entry_offset
, entry_size
,
5532 metaslab_set_unflushed_txg(metaslab_t
*ms
, uint64_t txg
, dmu_tx_t
*tx
)
5534 spa_t
*spa
= ms
->ms_group
->mg_vd
->vdev_spa
;
5536 if (!spa_feature_is_active(spa
, SPA_FEATURE_LOG_SPACEMAP
))
5539 ms
->ms_unflushed_txg
= txg
;
5540 metaslab_update_ondisk_flush_data(ms
, tx
);
5544 metaslab_unflushed_txg(metaslab_t
*ms
)
5546 return (ms
->ms_unflushed_txg
);
5549 #if defined(_KERNEL)
5551 module_param(metaslab_aliquot
, ulong
, 0644);
5552 MODULE_PARM_DESC(metaslab_aliquot
,
5553 "allocation granularity (a.k.a. stripe size)");
5555 module_param(metaslab_debug_load
, int, 0644);
5556 MODULE_PARM_DESC(metaslab_debug_load
,
5557 "load all metaslabs when pool is first opened");
5559 module_param(metaslab_debug_unload
, int, 0644);
5560 MODULE_PARM_DESC(metaslab_debug_unload
,
5561 "prevent metaslabs from being unloaded");
5563 module_param(metaslab_preload_enabled
, int, 0644);
5564 MODULE_PARM_DESC(metaslab_preload_enabled
,
5565 "preload potential metaslabs during reassessment");
5567 module_param(zfs_mg_noalloc_threshold
, int, 0644);
5568 MODULE_PARM_DESC(zfs_mg_noalloc_threshold
,
5569 "percentage of free space for metaslab group to allow allocation");
5571 module_param(zfs_mg_fragmentation_threshold
, int, 0644);
5572 MODULE_PARM_DESC(zfs_mg_fragmentation_threshold
,
5573 "fragmentation for metaslab group to allow allocation");
5575 module_param(zfs_metaslab_fragmentation_threshold
, int, 0644);
5576 MODULE_PARM_DESC(zfs_metaslab_fragmentation_threshold
,
5577 "fragmentation for metaslab to allow allocation");
5579 module_param(metaslab_fragmentation_factor_enabled
, int, 0644);
5580 MODULE_PARM_DESC(metaslab_fragmentation_factor_enabled
,
5581 "use the fragmentation metric to prefer less fragmented metaslabs");
5583 module_param(metaslab_lba_weighting_enabled
, int, 0644);
5584 MODULE_PARM_DESC(metaslab_lba_weighting_enabled
,
5585 "prefer metaslabs with lower LBAs");
5587 module_param(metaslab_bias_enabled
, int, 0644);
5588 MODULE_PARM_DESC(metaslab_bias_enabled
,
5589 "enable metaslab group biasing");
5591 module_param(zfs_metaslab_segment_weight_enabled
, int, 0644);
5592 MODULE_PARM_DESC(zfs_metaslab_segment_weight_enabled
,
5593 "enable segment-based metaslab selection");
5595 module_param(zfs_metaslab_switch_threshold
, int, 0644);
5596 MODULE_PARM_DESC(zfs_metaslab_switch_threshold
,
5597 "segment-based metaslab selection maximum buckets before switching");
5599 module_param(metaslab_force_ganging
, ulong
, 0644);
5600 MODULE_PARM_DESC(metaslab_force_ganging
,
5601 "blocks larger than this size are forced to be gang blocks");
5603 module_param(metaslab_df_max_search
, int, 0644);
5604 MODULE_PARM_DESC(metaslab_df_max_search
,
5605 "max distance (bytes) to search forward before using size tree");
5607 module_param(metaslab_df_use_largest_segment
, int, 0644);
5608 MODULE_PARM_DESC(metaslab_df_use_largest_segment
,
5609 "when looking in size tree, use largest segment instead of exact fit");