4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2011, 2014 by Delphix. All rights reserved.
24 * Copyright (c) 2013 by Saso Kiselkov. All rights reserved.
27 #include <sys/zfs_context.h>
29 #include <sys/dmu_tx.h>
30 #include <sys/space_map.h>
31 #include <sys/metaslab_impl.h>
32 #include <sys/vdev_impl.h>
34 #include <sys/spa_impl.h>
35 #include <sys/zfeature.h>
37 #define WITH_DF_BLOCK_ALLOCATOR
40 * Allow allocations to switch to gang blocks quickly. We do this to
41 * avoid having to load lots of space_maps in a given txg. There are,
42 * however, some cases where we want to avoid "fast" ganging and instead
43 * we want to do an exhaustive search of all metaslabs on this device.
44 * Currently we don't allow any gang, slog, or dump device related allocations
47 #define CAN_FASTGANG(flags) \
48 (!((flags) & (METASLAB_GANG_CHILD | METASLAB_GANG_HEADER | \
49 METASLAB_GANG_AVOID)))
51 #define METASLAB_WEIGHT_PRIMARY (1ULL << 63)
52 #define METASLAB_WEIGHT_SECONDARY (1ULL << 62)
53 #define METASLAB_ACTIVE_MASK \
54 (METASLAB_WEIGHT_PRIMARY | METASLAB_WEIGHT_SECONDARY)
57 * Metaslab granularity, in bytes. This is roughly similar to what would be
58 * referred to as the "stripe size" in traditional RAID arrays. In normal
59 * operation, we will try to write this amount of data to a top-level vdev
60 * before moving on to the next one.
62 unsigned long metaslab_aliquot
= 512 << 10;
64 uint64_t metaslab_gang_bang
= SPA_MAXBLOCKSIZE
+ 1; /* force gang blocks */
67 * The in-core space map representation is more compact than its on-disk form.
68 * The zfs_condense_pct determines how much more compact the in-core
69 * space_map representation must be before we compact it on-disk.
70 * Values should be greater than or equal to 100.
72 int zfs_condense_pct
= 200;
75 * Condensing a metaslab is not guaranteed to actually reduce the amount of
76 * space used on disk. In particular, a space map uses data in increments of
77 * MAX(1 << ashift, space_map_blksz), so a metaslab might use the
78 * same number of blocks after condensing. Since the goal of condensing is to
79 * reduce the number of IOPs required to read the space map, we only want to
80 * condense when we can be sure we will reduce the number of blocks used by the
81 * space map. Unfortunately, we cannot precisely compute whether or not this is
82 * the case in metaslab_should_condense since we are holding ms_lock. Instead,
83 * we apply the following heuristic: do not condense a spacemap unless the
84 * uncondensed size consumes greater than zfs_metaslab_condense_block_threshold
87 int zfs_metaslab_condense_block_threshold
= 4;
90 * The zfs_mg_noalloc_threshold defines which metaslab groups should
91 * be eligible for allocation. The value is defined as a percentage of
92 * free space. Metaslab groups that have more free space than
93 * zfs_mg_noalloc_threshold are always eligible for allocations. Once
94 * a metaslab group's free space is less than or equal to the
95 * zfs_mg_noalloc_threshold the allocator will avoid allocating to that
96 * group unless all groups in the pool have reached zfs_mg_noalloc_threshold.
97 * Once all groups in the pool reach zfs_mg_noalloc_threshold then all
98 * groups are allowed to accept allocations. Gang blocks are always
99 * eligible to allocate on any metaslab group. The default value of 0 means
100 * no metaslab group will be excluded based on this criterion.
102 int zfs_mg_noalloc_threshold
= 0;
105 * Metaslab groups are considered eligible for allocations if their
106 * fragmenation metric (measured as a percentage) is less than or equal to
107 * zfs_mg_fragmentation_threshold. If a metaslab group exceeds this threshold
108 * then it will be skipped unless all metaslab groups within the metaslab
109 * class have also crossed this threshold.
111 int zfs_mg_fragmentation_threshold
= 85;
114 * Allow metaslabs to keep their active state as long as their fragmentation
115 * percentage is less than or equal to zfs_metaslab_fragmentation_threshold. An
116 * active metaslab that exceeds this threshold will no longer keep its active
117 * status allowing better metaslabs to be selected.
119 int zfs_metaslab_fragmentation_threshold
= 70;
122 * When set will load all metaslabs when pool is first opened.
124 int metaslab_debug_load
= 0;
127 * When set will prevent metaslabs from being unloaded.
129 int metaslab_debug_unload
= 0;
132 * Minimum size which forces the dynamic allocator to change
133 * it's allocation strategy. Once the space map cannot satisfy
134 * an allocation of this size then it switches to using more
135 * aggressive strategy (i.e search by size rather than offset).
137 uint64_t metaslab_df_alloc_threshold
= SPA_MAXBLOCKSIZE
;
140 * The minimum free space, in percent, which must be available
141 * in a space map to continue allocations in a first-fit fashion.
142 * Once the space_map's free space drops below this level we dynamically
143 * switch to using best-fit allocations.
145 int metaslab_df_free_pct
= 4;
148 * Percentage of all cpus that can be used by the metaslab taskq.
150 int metaslab_load_pct
= 50;
153 * Determines how many txgs a metaslab may remain loaded without having any
154 * allocations from it. As long as a metaslab continues to be used we will
157 int metaslab_unload_delay
= TXG_SIZE
* 2;
160 * Max number of metaslabs per group to preload.
162 int metaslab_preload_limit
= SPA_DVAS_PER_BP
;
165 * Enable/disable preloading of metaslab.
167 int metaslab_preload_enabled
= B_TRUE
;
170 * Enable/disable fragmentation weighting on metaslabs.
172 int metaslab_fragmentation_factor_enabled
= B_TRUE
;
175 * Enable/disable lba weighting (i.e. outer tracks are given preference).
177 int metaslab_lba_weighting_enabled
= B_TRUE
;
180 * Enable/disable metaslab group biasing.
182 int metaslab_bias_enabled
= B_TRUE
;
184 static uint64_t metaslab_fragmentation(metaslab_t
*);
187 * ==========================================================================
189 * ==========================================================================
192 metaslab_class_create(spa_t
*spa
, metaslab_ops_t
*ops
)
194 metaslab_class_t
*mc
;
196 mc
= kmem_zalloc(sizeof (metaslab_class_t
), KM_SLEEP
);
201 mutex_init(&mc
->mc_fastwrite_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
207 metaslab_class_destroy(metaslab_class_t
*mc
)
209 ASSERT(mc
->mc_rotor
== NULL
);
210 ASSERT(mc
->mc_alloc
== 0);
211 ASSERT(mc
->mc_deferred
== 0);
212 ASSERT(mc
->mc_space
== 0);
213 ASSERT(mc
->mc_dspace
== 0);
215 mutex_destroy(&mc
->mc_fastwrite_lock
);
216 kmem_free(mc
, sizeof (metaslab_class_t
));
220 metaslab_class_validate(metaslab_class_t
*mc
)
222 metaslab_group_t
*mg
;
226 * Must hold one of the spa_config locks.
228 ASSERT(spa_config_held(mc
->mc_spa
, SCL_ALL
, RW_READER
) ||
229 spa_config_held(mc
->mc_spa
, SCL_ALL
, RW_WRITER
));
231 if ((mg
= mc
->mc_rotor
) == NULL
)
236 ASSERT(vd
->vdev_mg
!= NULL
);
237 ASSERT3P(vd
->vdev_top
, ==, vd
);
238 ASSERT3P(mg
->mg_class
, ==, mc
);
239 ASSERT3P(vd
->vdev_ops
, !=, &vdev_hole_ops
);
240 } while ((mg
= mg
->mg_next
) != mc
->mc_rotor
);
246 metaslab_class_space_update(metaslab_class_t
*mc
, int64_t alloc_delta
,
247 int64_t defer_delta
, int64_t space_delta
, int64_t dspace_delta
)
249 atomic_add_64(&mc
->mc_alloc
, alloc_delta
);
250 atomic_add_64(&mc
->mc_deferred
, defer_delta
);
251 atomic_add_64(&mc
->mc_space
, space_delta
);
252 atomic_add_64(&mc
->mc_dspace
, dspace_delta
);
256 metaslab_class_get_alloc(metaslab_class_t
*mc
)
258 return (mc
->mc_alloc
);
262 metaslab_class_get_deferred(metaslab_class_t
*mc
)
264 return (mc
->mc_deferred
);
268 metaslab_class_get_space(metaslab_class_t
*mc
)
270 return (mc
->mc_space
);
274 metaslab_class_get_dspace(metaslab_class_t
*mc
)
276 return (spa_deflate(mc
->mc_spa
) ? mc
->mc_dspace
: mc
->mc_space
);
280 metaslab_class_histogram_verify(metaslab_class_t
*mc
)
282 vdev_t
*rvd
= mc
->mc_spa
->spa_root_vdev
;
286 if ((zfs_flags
& ZFS_DEBUG_HISTOGRAM_VERIFY
) == 0)
289 mc_hist
= kmem_zalloc(sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE
,
292 for (c
= 0; c
< rvd
->vdev_children
; c
++) {
293 vdev_t
*tvd
= rvd
->vdev_child
[c
];
294 metaslab_group_t
*mg
= tvd
->vdev_mg
;
297 * Skip any holes, uninitialized top-levels, or
298 * vdevs that are not in this metalab class.
300 if (tvd
->vdev_ishole
|| tvd
->vdev_ms_shift
== 0 ||
301 mg
->mg_class
!= mc
) {
305 for (i
= 0; i
< RANGE_TREE_HISTOGRAM_SIZE
; i
++)
306 mc_hist
[i
] += mg
->mg_histogram
[i
];
309 for (i
= 0; i
< RANGE_TREE_HISTOGRAM_SIZE
; i
++)
310 VERIFY3U(mc_hist
[i
], ==, mc
->mc_histogram
[i
]);
312 kmem_free(mc_hist
, sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE
);
316 * Calculate the metaslab class's fragmentation metric. The metric
317 * is weighted based on the space contribution of each metaslab group.
318 * The return value will be a number between 0 and 100 (inclusive), or
319 * ZFS_FRAG_INVALID if the metric has not been set. See comment above the
320 * zfs_frag_table for more information about the metric.
323 metaslab_class_fragmentation(metaslab_class_t
*mc
)
325 vdev_t
*rvd
= mc
->mc_spa
->spa_root_vdev
;
326 uint64_t fragmentation
= 0;
329 spa_config_enter(mc
->mc_spa
, SCL_VDEV
, FTAG
, RW_READER
);
331 for (c
= 0; c
< rvd
->vdev_children
; c
++) {
332 vdev_t
*tvd
= rvd
->vdev_child
[c
];
333 metaslab_group_t
*mg
= tvd
->vdev_mg
;
336 * Skip any holes, uninitialized top-levels, or
337 * vdevs that are not in this metalab class.
339 if (tvd
->vdev_ishole
|| tvd
->vdev_ms_shift
== 0 ||
340 mg
->mg_class
!= mc
) {
345 * If a metaslab group does not contain a fragmentation
346 * metric then just bail out.
348 if (mg
->mg_fragmentation
== ZFS_FRAG_INVALID
) {
349 spa_config_exit(mc
->mc_spa
, SCL_VDEV
, FTAG
);
350 return (ZFS_FRAG_INVALID
);
354 * Determine how much this metaslab_group is contributing
355 * to the overall pool fragmentation metric.
357 fragmentation
+= mg
->mg_fragmentation
*
358 metaslab_group_get_space(mg
);
360 fragmentation
/= metaslab_class_get_space(mc
);
362 ASSERT3U(fragmentation
, <=, 100);
363 spa_config_exit(mc
->mc_spa
, SCL_VDEV
, FTAG
);
364 return (fragmentation
);
368 * Calculate the amount of expandable space that is available in
369 * this metaslab class. If a device is expanded then its expandable
370 * space will be the amount of allocatable space that is currently not
371 * part of this metaslab class.
374 metaslab_class_expandable_space(metaslab_class_t
*mc
)
376 vdev_t
*rvd
= mc
->mc_spa
->spa_root_vdev
;
380 spa_config_enter(mc
->mc_spa
, SCL_VDEV
, FTAG
, RW_READER
);
381 for (c
= 0; c
< rvd
->vdev_children
; c
++) {
382 vdev_t
*tvd
= rvd
->vdev_child
[c
];
383 metaslab_group_t
*mg
= tvd
->vdev_mg
;
385 if (tvd
->vdev_ishole
|| tvd
->vdev_ms_shift
== 0 ||
386 mg
->mg_class
!= mc
) {
390 space
+= tvd
->vdev_max_asize
- tvd
->vdev_asize
;
392 spa_config_exit(mc
->mc_spa
, SCL_VDEV
, FTAG
);
397 * ==========================================================================
399 * ==========================================================================
402 metaslab_compare(const void *x1
, const void *x2
)
404 const metaslab_t
*m1
= x1
;
405 const metaslab_t
*m2
= x2
;
407 if (m1
->ms_weight
< m2
->ms_weight
)
409 if (m1
->ms_weight
> m2
->ms_weight
)
413 * If the weights are identical, use the offset to force uniqueness.
415 if (m1
->ms_start
< m2
->ms_start
)
417 if (m1
->ms_start
> m2
->ms_start
)
420 ASSERT3P(m1
, ==, m2
);
426 * Update the allocatable flag and the metaslab group's capacity.
427 * The allocatable flag is set to true if the capacity is below
428 * the zfs_mg_noalloc_threshold. If a metaslab group transitions
429 * from allocatable to non-allocatable or vice versa then the metaslab
430 * group's class is updated to reflect the transition.
433 metaslab_group_alloc_update(metaslab_group_t
*mg
)
435 vdev_t
*vd
= mg
->mg_vd
;
436 metaslab_class_t
*mc
= mg
->mg_class
;
437 vdev_stat_t
*vs
= &vd
->vdev_stat
;
438 boolean_t was_allocatable
;
440 ASSERT(vd
== vd
->vdev_top
);
442 mutex_enter(&mg
->mg_lock
);
443 was_allocatable
= mg
->mg_allocatable
;
445 mg
->mg_free_capacity
= ((vs
->vs_space
- vs
->vs_alloc
) * 100) /
449 * A metaslab group is considered allocatable if it has plenty
450 * of free space or is not heavily fragmented. We only take
451 * fragmentation into account if the metaslab group has a valid
452 * fragmentation metric (i.e. a value between 0 and 100).
454 mg
->mg_allocatable
= (mg
->mg_free_capacity
> zfs_mg_noalloc_threshold
&&
455 (mg
->mg_fragmentation
== ZFS_FRAG_INVALID
||
456 mg
->mg_fragmentation
<= zfs_mg_fragmentation_threshold
));
459 * The mc_alloc_groups maintains a count of the number of
460 * groups in this metaslab class that are still above the
461 * zfs_mg_noalloc_threshold. This is used by the allocating
462 * threads to determine if they should avoid allocations to
463 * a given group. The allocator will avoid allocations to a group
464 * if that group has reached or is below the zfs_mg_noalloc_threshold
465 * and there are still other groups that are above the threshold.
466 * When a group transitions from allocatable to non-allocatable or
467 * vice versa we update the metaslab class to reflect that change.
468 * When the mc_alloc_groups value drops to 0 that means that all
469 * groups have reached the zfs_mg_noalloc_threshold making all groups
470 * eligible for allocations. This effectively means that all devices
471 * are balanced again.
473 if (was_allocatable
&& !mg
->mg_allocatable
)
474 mc
->mc_alloc_groups
--;
475 else if (!was_allocatable
&& mg
->mg_allocatable
)
476 mc
->mc_alloc_groups
++;
478 mutex_exit(&mg
->mg_lock
);
482 metaslab_group_create(metaslab_class_t
*mc
, vdev_t
*vd
)
484 metaslab_group_t
*mg
;
486 mg
= kmem_zalloc(sizeof (metaslab_group_t
), KM_SLEEP
);
487 mutex_init(&mg
->mg_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
488 avl_create(&mg
->mg_metaslab_tree
, metaslab_compare
,
489 sizeof (metaslab_t
), offsetof(struct metaslab
, ms_group_node
));
492 mg
->mg_activation_count
= 0;
494 mg
->mg_taskq
= taskq_create("metaslab_group_taskq", metaslab_load_pct
,
495 maxclsyspri
, 10, INT_MAX
, TASKQ_THREADS_CPU_PCT
| TASKQ_DYNAMIC
);
501 metaslab_group_destroy(metaslab_group_t
*mg
)
503 ASSERT(mg
->mg_prev
== NULL
);
504 ASSERT(mg
->mg_next
== NULL
);
506 * We may have gone below zero with the activation count
507 * either because we never activated in the first place or
508 * because we're done, and possibly removing the vdev.
510 ASSERT(mg
->mg_activation_count
<= 0);
512 taskq_destroy(mg
->mg_taskq
);
513 avl_destroy(&mg
->mg_metaslab_tree
);
514 mutex_destroy(&mg
->mg_lock
);
515 kmem_free(mg
, sizeof (metaslab_group_t
));
519 metaslab_group_activate(metaslab_group_t
*mg
)
521 metaslab_class_t
*mc
= mg
->mg_class
;
522 metaslab_group_t
*mgprev
, *mgnext
;
524 ASSERT(spa_config_held(mc
->mc_spa
, SCL_ALLOC
, RW_WRITER
));
526 ASSERT(mc
->mc_rotor
!= mg
);
527 ASSERT(mg
->mg_prev
== NULL
);
528 ASSERT(mg
->mg_next
== NULL
);
529 ASSERT(mg
->mg_activation_count
<= 0);
531 if (++mg
->mg_activation_count
<= 0)
534 mg
->mg_aliquot
= metaslab_aliquot
* MAX(1, mg
->mg_vd
->vdev_children
);
535 metaslab_group_alloc_update(mg
);
537 if ((mgprev
= mc
->mc_rotor
) == NULL
) {
541 mgnext
= mgprev
->mg_next
;
542 mg
->mg_prev
= mgprev
;
543 mg
->mg_next
= mgnext
;
544 mgprev
->mg_next
= mg
;
545 mgnext
->mg_prev
= mg
;
551 metaslab_group_passivate(metaslab_group_t
*mg
)
553 metaslab_class_t
*mc
= mg
->mg_class
;
554 metaslab_group_t
*mgprev
, *mgnext
;
556 ASSERT(spa_config_held(mc
->mc_spa
, SCL_ALLOC
, RW_WRITER
));
558 if (--mg
->mg_activation_count
!= 0) {
559 ASSERT(mc
->mc_rotor
!= mg
);
560 ASSERT(mg
->mg_prev
== NULL
);
561 ASSERT(mg
->mg_next
== NULL
);
562 ASSERT(mg
->mg_activation_count
< 0);
566 taskq_wait_outstanding(mg
->mg_taskq
, 0);
567 metaslab_group_alloc_update(mg
);
569 mgprev
= mg
->mg_prev
;
570 mgnext
= mg
->mg_next
;
575 mc
->mc_rotor
= mgnext
;
576 mgprev
->mg_next
= mgnext
;
577 mgnext
->mg_prev
= mgprev
;
585 metaslab_group_get_space(metaslab_group_t
*mg
)
587 return ((1ULL << mg
->mg_vd
->vdev_ms_shift
) * mg
->mg_vd
->vdev_ms_count
);
591 metaslab_group_histogram_verify(metaslab_group_t
*mg
)
594 vdev_t
*vd
= mg
->mg_vd
;
595 uint64_t ashift
= vd
->vdev_ashift
;
598 if ((zfs_flags
& ZFS_DEBUG_HISTOGRAM_VERIFY
) == 0)
601 mg_hist
= kmem_zalloc(sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE
,
604 ASSERT3U(RANGE_TREE_HISTOGRAM_SIZE
, >=,
605 SPACE_MAP_HISTOGRAM_SIZE
+ ashift
);
607 for (m
= 0; m
< vd
->vdev_ms_count
; m
++) {
608 metaslab_t
*msp
= vd
->vdev_ms
[m
];
610 if (msp
->ms_sm
== NULL
)
613 for (i
= 0; i
< SPACE_MAP_HISTOGRAM_SIZE
; i
++)
614 mg_hist
[i
+ ashift
] +=
615 msp
->ms_sm
->sm_phys
->smp_histogram
[i
];
618 for (i
= 0; i
< RANGE_TREE_HISTOGRAM_SIZE
; i
++)
619 VERIFY3U(mg_hist
[i
], ==, mg
->mg_histogram
[i
]);
621 kmem_free(mg_hist
, sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE
);
625 metaslab_group_histogram_add(metaslab_group_t
*mg
, metaslab_t
*msp
)
627 metaslab_class_t
*mc
= mg
->mg_class
;
628 uint64_t ashift
= mg
->mg_vd
->vdev_ashift
;
631 ASSERT(MUTEX_HELD(&msp
->ms_lock
));
632 if (msp
->ms_sm
== NULL
)
635 mutex_enter(&mg
->mg_lock
);
636 for (i
= 0; i
< SPACE_MAP_HISTOGRAM_SIZE
; i
++) {
637 mg
->mg_histogram
[i
+ ashift
] +=
638 msp
->ms_sm
->sm_phys
->smp_histogram
[i
];
639 mc
->mc_histogram
[i
+ ashift
] +=
640 msp
->ms_sm
->sm_phys
->smp_histogram
[i
];
642 mutex_exit(&mg
->mg_lock
);
646 metaslab_group_histogram_remove(metaslab_group_t
*mg
, metaslab_t
*msp
)
648 metaslab_class_t
*mc
= mg
->mg_class
;
649 uint64_t ashift
= mg
->mg_vd
->vdev_ashift
;
652 ASSERT(MUTEX_HELD(&msp
->ms_lock
));
653 if (msp
->ms_sm
== NULL
)
656 mutex_enter(&mg
->mg_lock
);
657 for (i
= 0; i
< SPACE_MAP_HISTOGRAM_SIZE
; i
++) {
658 ASSERT3U(mg
->mg_histogram
[i
+ ashift
], >=,
659 msp
->ms_sm
->sm_phys
->smp_histogram
[i
]);
660 ASSERT3U(mc
->mc_histogram
[i
+ ashift
], >=,
661 msp
->ms_sm
->sm_phys
->smp_histogram
[i
]);
663 mg
->mg_histogram
[i
+ ashift
] -=
664 msp
->ms_sm
->sm_phys
->smp_histogram
[i
];
665 mc
->mc_histogram
[i
+ ashift
] -=
666 msp
->ms_sm
->sm_phys
->smp_histogram
[i
];
668 mutex_exit(&mg
->mg_lock
);
672 metaslab_group_add(metaslab_group_t
*mg
, metaslab_t
*msp
)
674 ASSERT(msp
->ms_group
== NULL
);
675 mutex_enter(&mg
->mg_lock
);
678 avl_add(&mg
->mg_metaslab_tree
, msp
);
679 mutex_exit(&mg
->mg_lock
);
681 mutex_enter(&msp
->ms_lock
);
682 metaslab_group_histogram_add(mg
, msp
);
683 mutex_exit(&msp
->ms_lock
);
687 metaslab_group_remove(metaslab_group_t
*mg
, metaslab_t
*msp
)
689 mutex_enter(&msp
->ms_lock
);
690 metaslab_group_histogram_remove(mg
, msp
);
691 mutex_exit(&msp
->ms_lock
);
693 mutex_enter(&mg
->mg_lock
);
694 ASSERT(msp
->ms_group
== mg
);
695 avl_remove(&mg
->mg_metaslab_tree
, msp
);
696 msp
->ms_group
= NULL
;
697 mutex_exit(&mg
->mg_lock
);
701 metaslab_group_sort(metaslab_group_t
*mg
, metaslab_t
*msp
, uint64_t weight
)
704 * Although in principle the weight can be any value, in
705 * practice we do not use values in the range [1, 511].
707 ASSERT(weight
>= SPA_MINBLOCKSIZE
|| weight
== 0);
708 ASSERT(MUTEX_HELD(&msp
->ms_lock
));
710 mutex_enter(&mg
->mg_lock
);
711 ASSERT(msp
->ms_group
== mg
);
712 avl_remove(&mg
->mg_metaslab_tree
, msp
);
713 msp
->ms_weight
= weight
;
714 avl_add(&mg
->mg_metaslab_tree
, msp
);
715 mutex_exit(&mg
->mg_lock
);
719 * Calculate the fragmentation for a given metaslab group. We can use
720 * a simple average here since all metaslabs within the group must have
721 * the same size. The return value will be a value between 0 and 100
722 * (inclusive), or ZFS_FRAG_INVALID if less than half of the metaslab in this
723 * group have a fragmentation metric.
726 metaslab_group_fragmentation(metaslab_group_t
*mg
)
728 vdev_t
*vd
= mg
->mg_vd
;
729 uint64_t fragmentation
= 0;
730 uint64_t valid_ms
= 0;
733 for (m
= 0; m
< vd
->vdev_ms_count
; m
++) {
734 metaslab_t
*msp
= vd
->vdev_ms
[m
];
736 if (msp
->ms_fragmentation
== ZFS_FRAG_INVALID
)
740 fragmentation
+= msp
->ms_fragmentation
;
743 if (valid_ms
<= vd
->vdev_ms_count
/ 2)
744 return (ZFS_FRAG_INVALID
);
746 fragmentation
/= valid_ms
;
747 ASSERT3U(fragmentation
, <=, 100);
748 return (fragmentation
);
752 * Determine if a given metaslab group should skip allocations. A metaslab
753 * group should avoid allocations if its free capacity is less than the
754 * zfs_mg_noalloc_threshold or its fragmentation metric is greater than
755 * zfs_mg_fragmentation_threshold and there is at least one metaslab group
756 * that can still handle allocations.
759 metaslab_group_allocatable(metaslab_group_t
*mg
)
761 vdev_t
*vd
= mg
->mg_vd
;
762 spa_t
*spa
= vd
->vdev_spa
;
763 metaslab_class_t
*mc
= mg
->mg_class
;
766 * We use two key metrics to determine if a metaslab group is
767 * considered allocatable -- free space and fragmentation. If
768 * the free space is greater than the free space threshold and
769 * the fragmentation is less than the fragmentation threshold then
770 * consider the group allocatable. There are two case when we will
771 * not consider these key metrics. The first is if the group is
772 * associated with a slog device and the second is if all groups
773 * in this metaslab class have already been consider ineligible
776 return ((mg
->mg_free_capacity
> zfs_mg_noalloc_threshold
&&
777 (mg
->mg_fragmentation
== ZFS_FRAG_INVALID
||
778 mg
->mg_fragmentation
<= zfs_mg_fragmentation_threshold
)) ||
779 mc
!= spa_normal_class(spa
) || mc
->mc_alloc_groups
== 0);
783 * ==========================================================================
784 * Range tree callbacks
785 * ==========================================================================
789 * Comparison function for the private size-ordered tree. Tree is sorted
790 * by size, larger sizes at the end of the tree.
793 metaslab_rangesize_compare(const void *x1
, const void *x2
)
795 const range_seg_t
*r1
= x1
;
796 const range_seg_t
*r2
= x2
;
797 uint64_t rs_size1
= r1
->rs_end
- r1
->rs_start
;
798 uint64_t rs_size2
= r2
->rs_end
- r2
->rs_start
;
800 if (rs_size1
< rs_size2
)
802 if (rs_size1
> rs_size2
)
805 if (r1
->rs_start
< r2
->rs_start
)
808 if (r1
->rs_start
> r2
->rs_start
)
815 * Create any block allocator specific components. The current allocators
816 * rely on using both a size-ordered range_tree_t and an array of uint64_t's.
819 metaslab_rt_create(range_tree_t
*rt
, void *arg
)
821 metaslab_t
*msp
= arg
;
823 ASSERT3P(rt
->rt_arg
, ==, msp
);
824 ASSERT(msp
->ms_tree
== NULL
);
826 avl_create(&msp
->ms_size_tree
, metaslab_rangesize_compare
,
827 sizeof (range_seg_t
), offsetof(range_seg_t
, rs_pp_node
));
831 * Destroy the block allocator specific components.
834 metaslab_rt_destroy(range_tree_t
*rt
, void *arg
)
836 metaslab_t
*msp
= arg
;
838 ASSERT3P(rt
->rt_arg
, ==, msp
);
839 ASSERT3P(msp
->ms_tree
, ==, rt
);
840 ASSERT0(avl_numnodes(&msp
->ms_size_tree
));
842 avl_destroy(&msp
->ms_size_tree
);
846 metaslab_rt_add(range_tree_t
*rt
, range_seg_t
*rs
, void *arg
)
848 metaslab_t
*msp
= arg
;
850 ASSERT3P(rt
->rt_arg
, ==, msp
);
851 ASSERT3P(msp
->ms_tree
, ==, rt
);
852 VERIFY(!msp
->ms_condensing
);
853 avl_add(&msp
->ms_size_tree
, rs
);
857 metaslab_rt_remove(range_tree_t
*rt
, range_seg_t
*rs
, void *arg
)
859 metaslab_t
*msp
= arg
;
861 ASSERT3P(rt
->rt_arg
, ==, msp
);
862 ASSERT3P(msp
->ms_tree
, ==, rt
);
863 VERIFY(!msp
->ms_condensing
);
864 avl_remove(&msp
->ms_size_tree
, rs
);
868 metaslab_rt_vacate(range_tree_t
*rt
, void *arg
)
870 metaslab_t
*msp
= arg
;
872 ASSERT3P(rt
->rt_arg
, ==, msp
);
873 ASSERT3P(msp
->ms_tree
, ==, rt
);
876 * Normally one would walk the tree freeing nodes along the way.
877 * Since the nodes are shared with the range trees we can avoid
878 * walking all nodes and just reinitialize the avl tree. The nodes
879 * will be freed by the range tree, so we don't want to free them here.
881 avl_create(&msp
->ms_size_tree
, metaslab_rangesize_compare
,
882 sizeof (range_seg_t
), offsetof(range_seg_t
, rs_pp_node
));
885 static range_tree_ops_t metaslab_rt_ops
= {
894 * ==========================================================================
895 * Metaslab block operations
896 * ==========================================================================
900 * Return the maximum contiguous segment within the metaslab.
903 metaslab_block_maxsize(metaslab_t
*msp
)
905 avl_tree_t
*t
= &msp
->ms_size_tree
;
908 if (t
== NULL
|| (rs
= avl_last(t
)) == NULL
)
911 return (rs
->rs_end
- rs
->rs_start
);
915 metaslab_block_alloc(metaslab_t
*msp
, uint64_t size
)
918 range_tree_t
*rt
= msp
->ms_tree
;
920 VERIFY(!msp
->ms_condensing
);
922 start
= msp
->ms_ops
->msop_alloc(msp
, size
);
923 if (start
!= -1ULL) {
924 vdev_t
*vd
= msp
->ms_group
->mg_vd
;
926 VERIFY0(P2PHASE(start
, 1ULL << vd
->vdev_ashift
));
927 VERIFY0(P2PHASE(size
, 1ULL << vd
->vdev_ashift
));
928 VERIFY3U(range_tree_space(rt
) - size
, <=, msp
->ms_size
);
929 range_tree_remove(rt
, start
, size
);
935 * ==========================================================================
936 * Common allocator routines
937 * ==========================================================================
940 #if defined(WITH_FF_BLOCK_ALLOCATOR) || \
941 defined(WITH_DF_BLOCK_ALLOCATOR) || \
942 defined(WITH_CF_BLOCK_ALLOCATOR)
944 * This is a helper function that can be used by the allocator to find
945 * a suitable block to allocate. This will search the specified AVL
946 * tree looking for a block that matches the specified criteria.
949 metaslab_block_picker(avl_tree_t
*t
, uint64_t *cursor
, uint64_t size
,
952 range_seg_t
*rs
, rsearch
;
955 rsearch
.rs_start
= *cursor
;
956 rsearch
.rs_end
= *cursor
+ size
;
958 rs
= avl_find(t
, &rsearch
, &where
);
960 rs
= avl_nearest(t
, where
, AVL_AFTER
);
963 uint64_t offset
= P2ROUNDUP(rs
->rs_start
, align
);
965 if (offset
+ size
<= rs
->rs_end
) {
966 *cursor
= offset
+ size
;
969 rs
= AVL_NEXT(t
, rs
);
973 * If we know we've searched the whole map (*cursor == 0), give up.
974 * Otherwise, reset the cursor to the beginning and try again.
980 return (metaslab_block_picker(t
, cursor
, size
, align
));
982 #endif /* WITH_FF/DF/CF_BLOCK_ALLOCATOR */
984 #if defined(WITH_FF_BLOCK_ALLOCATOR)
986 * ==========================================================================
987 * The first-fit block allocator
988 * ==========================================================================
991 metaslab_ff_alloc(metaslab_t
*msp
, uint64_t size
)
994 * Find the largest power of 2 block size that evenly divides the
995 * requested size. This is used to try to allocate blocks with similar
996 * alignment from the same area of the metaslab (i.e. same cursor
997 * bucket) but it does not guarantee that other allocations sizes
998 * may exist in the same region.
1000 uint64_t align
= size
& -size
;
1001 uint64_t *cursor
= &msp
->ms_lbas
[highbit64(align
) - 1];
1002 avl_tree_t
*t
= &msp
->ms_tree
->rt_root
;
1004 return (metaslab_block_picker(t
, cursor
, size
, align
));
1007 static metaslab_ops_t metaslab_ff_ops
= {
1011 metaslab_ops_t
*zfs_metaslab_ops
= &metaslab_ff_ops
;
1012 #endif /* WITH_FF_BLOCK_ALLOCATOR */
1014 #if defined(WITH_DF_BLOCK_ALLOCATOR)
1016 * ==========================================================================
1017 * Dynamic block allocator -
1018 * Uses the first fit allocation scheme until space get low and then
1019 * adjusts to a best fit allocation method. Uses metaslab_df_alloc_threshold
1020 * and metaslab_df_free_pct to determine when to switch the allocation scheme.
1021 * ==========================================================================
1024 metaslab_df_alloc(metaslab_t
*msp
, uint64_t size
)
1027 * Find the largest power of 2 block size that evenly divides the
1028 * requested size. This is used to try to allocate blocks with similar
1029 * alignment from the same area of the metaslab (i.e. same cursor
1030 * bucket) but it does not guarantee that other allocations sizes
1031 * may exist in the same region.
1033 uint64_t align
= size
& -size
;
1034 uint64_t *cursor
= &msp
->ms_lbas
[highbit64(align
) - 1];
1035 range_tree_t
*rt
= msp
->ms_tree
;
1036 avl_tree_t
*t
= &rt
->rt_root
;
1037 uint64_t max_size
= metaslab_block_maxsize(msp
);
1038 int free_pct
= range_tree_space(rt
) * 100 / msp
->ms_size
;
1040 ASSERT(MUTEX_HELD(&msp
->ms_lock
));
1041 ASSERT3U(avl_numnodes(t
), ==, avl_numnodes(&msp
->ms_size_tree
));
1043 if (max_size
< size
)
1047 * If we're running low on space switch to using the size
1048 * sorted AVL tree (best-fit).
1050 if (max_size
< metaslab_df_alloc_threshold
||
1051 free_pct
< metaslab_df_free_pct
) {
1052 t
= &msp
->ms_size_tree
;
1056 return (metaslab_block_picker(t
, cursor
, size
, 1ULL));
1059 static metaslab_ops_t metaslab_df_ops
= {
1063 metaslab_ops_t
*zfs_metaslab_ops
= &metaslab_df_ops
;
1064 #endif /* WITH_DF_BLOCK_ALLOCATOR */
1066 #if defined(WITH_CF_BLOCK_ALLOCATOR)
1068 * ==========================================================================
1069 * Cursor fit block allocator -
1070 * Select the largest region in the metaslab, set the cursor to the beginning
1071 * of the range and the cursor_end to the end of the range. As allocations
1072 * are made advance the cursor. Continue allocating from the cursor until
1073 * the range is exhausted and then find a new range.
1074 * ==========================================================================
1077 metaslab_cf_alloc(metaslab_t
*msp
, uint64_t size
)
1079 range_tree_t
*rt
= msp
->ms_tree
;
1080 avl_tree_t
*t
= &msp
->ms_size_tree
;
1081 uint64_t *cursor
= &msp
->ms_lbas
[0];
1082 uint64_t *cursor_end
= &msp
->ms_lbas
[1];
1083 uint64_t offset
= 0;
1085 ASSERT(MUTEX_HELD(&msp
->ms_lock
));
1086 ASSERT3U(avl_numnodes(t
), ==, avl_numnodes(&rt
->rt_root
));
1088 ASSERT3U(*cursor_end
, >=, *cursor
);
1090 if ((*cursor
+ size
) > *cursor_end
) {
1093 rs
= avl_last(&msp
->ms_size_tree
);
1094 if (rs
== NULL
|| (rs
->rs_end
- rs
->rs_start
) < size
)
1097 *cursor
= rs
->rs_start
;
1098 *cursor_end
= rs
->rs_end
;
1107 static metaslab_ops_t metaslab_cf_ops
= {
1111 metaslab_ops_t
*zfs_metaslab_ops
= &metaslab_cf_ops
;
1112 #endif /* WITH_CF_BLOCK_ALLOCATOR */
1114 #if defined(WITH_NDF_BLOCK_ALLOCATOR)
1116 * ==========================================================================
1117 * New dynamic fit allocator -
1118 * Select a region that is large enough to allocate 2^metaslab_ndf_clump_shift
1119 * contiguous blocks. If no region is found then just use the largest segment
1121 * ==========================================================================
1125 * Determines desired number of contiguous blocks (2^metaslab_ndf_clump_shift)
1126 * to request from the allocator.
1128 uint64_t metaslab_ndf_clump_shift
= 4;
1131 metaslab_ndf_alloc(metaslab_t
*msp
, uint64_t size
)
1133 avl_tree_t
*t
= &msp
->ms_tree
->rt_root
;
1135 range_seg_t
*rs
, rsearch
;
1136 uint64_t hbit
= highbit64(size
);
1137 uint64_t *cursor
= &msp
->ms_lbas
[hbit
- 1];
1138 uint64_t max_size
= metaslab_block_maxsize(msp
);
1140 ASSERT(MUTEX_HELD(&msp
->ms_lock
));
1141 ASSERT3U(avl_numnodes(t
), ==, avl_numnodes(&msp
->ms_size_tree
));
1143 if (max_size
< size
)
1146 rsearch
.rs_start
= *cursor
;
1147 rsearch
.rs_end
= *cursor
+ size
;
1149 rs
= avl_find(t
, &rsearch
, &where
);
1150 if (rs
== NULL
|| (rs
->rs_end
- rs
->rs_start
) < size
) {
1151 t
= &msp
->ms_size_tree
;
1153 rsearch
.rs_start
= 0;
1154 rsearch
.rs_end
= MIN(max_size
,
1155 1ULL << (hbit
+ metaslab_ndf_clump_shift
));
1156 rs
= avl_find(t
, &rsearch
, &where
);
1158 rs
= avl_nearest(t
, where
, AVL_AFTER
);
1162 if ((rs
->rs_end
- rs
->rs_start
) >= size
) {
1163 *cursor
= rs
->rs_start
+ size
;
1164 return (rs
->rs_start
);
1169 static metaslab_ops_t metaslab_ndf_ops
= {
1173 metaslab_ops_t
*zfs_metaslab_ops
= &metaslab_ndf_ops
;
1174 #endif /* WITH_NDF_BLOCK_ALLOCATOR */
1178 * ==========================================================================
1180 * ==========================================================================
1184 * Wait for any in-progress metaslab loads to complete.
1187 metaslab_load_wait(metaslab_t
*msp
)
1189 ASSERT(MUTEX_HELD(&msp
->ms_lock
));
1191 while (msp
->ms_loading
) {
1192 ASSERT(!msp
->ms_loaded
);
1193 cv_wait(&msp
->ms_load_cv
, &msp
->ms_lock
);
1198 metaslab_load(metaslab_t
*msp
)
1203 ASSERT(MUTEX_HELD(&msp
->ms_lock
));
1204 ASSERT(!msp
->ms_loaded
);
1205 ASSERT(!msp
->ms_loading
);
1207 msp
->ms_loading
= B_TRUE
;
1210 * If the space map has not been allocated yet, then treat
1211 * all the space in the metaslab as free and add it to the
1214 if (msp
->ms_sm
!= NULL
)
1215 error
= space_map_load(msp
->ms_sm
, msp
->ms_tree
, SM_FREE
);
1217 range_tree_add(msp
->ms_tree
, msp
->ms_start
, msp
->ms_size
);
1219 msp
->ms_loaded
= (error
== 0);
1220 msp
->ms_loading
= B_FALSE
;
1222 if (msp
->ms_loaded
) {
1223 for (t
= 0; t
< TXG_DEFER_SIZE
; t
++) {
1224 range_tree_walk(msp
->ms_defertree
[t
],
1225 range_tree_remove
, msp
->ms_tree
);
1228 cv_broadcast(&msp
->ms_load_cv
);
1233 metaslab_unload(metaslab_t
*msp
)
1235 ASSERT(MUTEX_HELD(&msp
->ms_lock
));
1236 range_tree_vacate(msp
->ms_tree
, NULL
, NULL
);
1237 msp
->ms_loaded
= B_FALSE
;
1238 msp
->ms_weight
&= ~METASLAB_ACTIVE_MASK
;
1242 metaslab_init(metaslab_group_t
*mg
, uint64_t id
, uint64_t object
, uint64_t txg
,
1245 vdev_t
*vd
= mg
->mg_vd
;
1246 objset_t
*mos
= vd
->vdev_spa
->spa_meta_objset
;
1250 ms
= kmem_zalloc(sizeof (metaslab_t
), KM_SLEEP
);
1251 mutex_init(&ms
->ms_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
1252 cv_init(&ms
->ms_load_cv
, NULL
, CV_DEFAULT
, NULL
);
1254 ms
->ms_start
= id
<< vd
->vdev_ms_shift
;
1255 ms
->ms_size
= 1ULL << vd
->vdev_ms_shift
;
1258 * We only open space map objects that already exist. All others
1259 * will be opened when we finally allocate an object for it.
1262 error
= space_map_open(&ms
->ms_sm
, mos
, object
, ms
->ms_start
,
1263 ms
->ms_size
, vd
->vdev_ashift
, &ms
->ms_lock
);
1266 kmem_free(ms
, sizeof (metaslab_t
));
1270 ASSERT(ms
->ms_sm
!= NULL
);
1274 * We create the main range tree here, but we don't create the
1275 * alloctree and freetree until metaslab_sync_done(). This serves
1276 * two purposes: it allows metaslab_sync_done() to detect the
1277 * addition of new space; and for debugging, it ensures that we'd
1278 * data fault on any attempt to use this metaslab before it's ready.
1280 ms
->ms_tree
= range_tree_create(&metaslab_rt_ops
, ms
, &ms
->ms_lock
);
1281 metaslab_group_add(mg
, ms
);
1283 ms
->ms_fragmentation
= metaslab_fragmentation(ms
);
1284 ms
->ms_ops
= mg
->mg_class
->mc_ops
;
1287 * If we're opening an existing pool (txg == 0) or creating
1288 * a new one (txg == TXG_INITIAL), all space is available now.
1289 * If we're adding space to an existing pool, the new space
1290 * does not become available until after this txg has synced.
1292 if (txg
<= TXG_INITIAL
)
1293 metaslab_sync_done(ms
, 0);
1296 * If metaslab_debug_load is set and we're initializing a metaslab
1297 * that has an allocated space_map object then load the its space
1298 * map so that can verify frees.
1300 if (metaslab_debug_load
&& ms
->ms_sm
!= NULL
) {
1301 mutex_enter(&ms
->ms_lock
);
1302 VERIFY0(metaslab_load(ms
));
1303 mutex_exit(&ms
->ms_lock
);
1307 vdev_dirty(vd
, 0, NULL
, txg
);
1308 vdev_dirty(vd
, VDD_METASLAB
, ms
, txg
);
1317 metaslab_fini(metaslab_t
*msp
)
1321 metaslab_group_t
*mg
= msp
->ms_group
;
1323 metaslab_group_remove(mg
, msp
);
1325 mutex_enter(&msp
->ms_lock
);
1327 VERIFY(msp
->ms_group
== NULL
);
1328 vdev_space_update(mg
->mg_vd
, -space_map_allocated(msp
->ms_sm
),
1330 space_map_close(msp
->ms_sm
);
1332 metaslab_unload(msp
);
1333 range_tree_destroy(msp
->ms_tree
);
1335 for (t
= 0; t
< TXG_SIZE
; t
++) {
1336 range_tree_destroy(msp
->ms_alloctree
[t
]);
1337 range_tree_destroy(msp
->ms_freetree
[t
]);
1340 for (t
= 0; t
< TXG_DEFER_SIZE
; t
++) {
1341 range_tree_destroy(msp
->ms_defertree
[t
]);
1344 ASSERT0(msp
->ms_deferspace
);
1346 mutex_exit(&msp
->ms_lock
);
1347 cv_destroy(&msp
->ms_load_cv
);
1348 mutex_destroy(&msp
->ms_lock
);
1350 kmem_free(msp
, sizeof (metaslab_t
));
1353 #define FRAGMENTATION_TABLE_SIZE 17
1356 * This table defines a segment size based fragmentation metric that will
1357 * allow each metaslab to derive its own fragmentation value. This is done
1358 * by calculating the space in each bucket of the spacemap histogram and
1359 * multiplying that by the fragmetation metric in this table. Doing
1360 * this for all buckets and dividing it by the total amount of free
1361 * space in this metaslab (i.e. the total free space in all buckets) gives
1362 * us the fragmentation metric. This means that a high fragmentation metric
1363 * equates to most of the free space being comprised of small segments.
1364 * Conversely, if the metric is low, then most of the free space is in
1365 * large segments. A 10% change in fragmentation equates to approximately
1366 * double the number of segments.
1368 * This table defines 0% fragmented space using 16MB segments. Testing has
1369 * shown that segments that are greater than or equal to 16MB do not suffer
1370 * from drastic performance problems. Using this value, we derive the rest
1371 * of the table. Since the fragmentation value is never stored on disk, it
1372 * is possible to change these calculations in the future.
1374 int zfs_frag_table
[FRAGMENTATION_TABLE_SIZE
] = {
1394 * Calclate the metaslab's fragmentation metric. A return value
1395 * of ZFS_FRAG_INVALID means that the metaslab has not been upgraded and does
1396 * not support this metric. Otherwise, the return value should be in the
1400 metaslab_fragmentation(metaslab_t
*msp
)
1402 spa_t
*spa
= msp
->ms_group
->mg_vd
->vdev_spa
;
1403 uint64_t fragmentation
= 0;
1405 boolean_t feature_enabled
= spa_feature_is_enabled(spa
,
1406 SPA_FEATURE_SPACEMAP_HISTOGRAM
);
1409 if (!feature_enabled
)
1410 return (ZFS_FRAG_INVALID
);
1413 * A null space map means that the entire metaslab is free
1414 * and thus is not fragmented.
1416 if (msp
->ms_sm
== NULL
)
1420 * If this metaslab's space_map has not been upgraded, flag it
1421 * so that we upgrade next time we encounter it.
1423 if (msp
->ms_sm
->sm_dbuf
->db_size
!= sizeof (space_map_phys_t
)) {
1424 vdev_t
*vd
= msp
->ms_group
->mg_vd
;
1426 if (spa_writeable(vd
->vdev_spa
)) {
1427 uint64_t txg
= spa_syncing_txg(spa
);
1429 msp
->ms_condense_wanted
= B_TRUE
;
1430 vdev_dirty(vd
, VDD_METASLAB
, msp
, txg
+ 1);
1431 spa_dbgmsg(spa
, "txg %llu, requesting force condense: "
1432 "msp %p, vd %p", txg
, msp
, vd
);
1434 return (ZFS_FRAG_INVALID
);
1437 for (i
= 0; i
< SPACE_MAP_HISTOGRAM_SIZE
; i
++) {
1439 uint8_t shift
= msp
->ms_sm
->sm_shift
;
1440 int idx
= MIN(shift
- SPA_MINBLOCKSHIFT
+ i
,
1441 FRAGMENTATION_TABLE_SIZE
- 1);
1443 if (msp
->ms_sm
->sm_phys
->smp_histogram
[i
] == 0)
1446 space
= msp
->ms_sm
->sm_phys
->smp_histogram
[i
] << (i
+ shift
);
1449 ASSERT3U(idx
, <, FRAGMENTATION_TABLE_SIZE
);
1450 fragmentation
+= space
* zfs_frag_table
[idx
];
1454 fragmentation
/= total
;
1455 ASSERT3U(fragmentation
, <=, 100);
1456 return (fragmentation
);
1460 * Compute a weight -- a selection preference value -- for the given metaslab.
1461 * This is based on the amount of free space, the level of fragmentation,
1462 * the LBA range, and whether the metaslab is loaded.
1465 metaslab_weight(metaslab_t
*msp
)
1467 metaslab_group_t
*mg
= msp
->ms_group
;
1468 vdev_t
*vd
= mg
->mg_vd
;
1469 uint64_t weight
, space
;
1471 ASSERT(MUTEX_HELD(&msp
->ms_lock
));
1474 * This vdev is in the process of being removed so there is nothing
1475 * for us to do here.
1477 if (vd
->vdev_removing
) {
1478 ASSERT0(space_map_allocated(msp
->ms_sm
));
1479 ASSERT0(vd
->vdev_ms_shift
);
1484 * The baseline weight is the metaslab's free space.
1486 space
= msp
->ms_size
- space_map_allocated(msp
->ms_sm
);
1488 msp
->ms_fragmentation
= metaslab_fragmentation(msp
);
1489 if (metaslab_fragmentation_factor_enabled
&&
1490 msp
->ms_fragmentation
!= ZFS_FRAG_INVALID
) {
1492 * Use the fragmentation information to inversely scale
1493 * down the baseline weight. We need to ensure that we
1494 * don't exclude this metaslab completely when it's 100%
1495 * fragmented. To avoid this we reduce the fragmented value
1498 space
= (space
* (100 - (msp
->ms_fragmentation
- 1))) / 100;
1501 * If space < SPA_MINBLOCKSIZE, then we will not allocate from
1502 * this metaslab again. The fragmentation metric may have
1503 * decreased the space to something smaller than
1504 * SPA_MINBLOCKSIZE, so reset the space to SPA_MINBLOCKSIZE
1505 * so that we can consume any remaining space.
1507 if (space
> 0 && space
< SPA_MINBLOCKSIZE
)
1508 space
= SPA_MINBLOCKSIZE
;
1513 * Modern disks have uniform bit density and constant angular velocity.
1514 * Therefore, the outer recording zones are faster (higher bandwidth)
1515 * than the inner zones by the ratio of outer to inner track diameter,
1516 * which is typically around 2:1. We account for this by assigning
1517 * higher weight to lower metaslabs (multiplier ranging from 2x to 1x).
1518 * In effect, this means that we'll select the metaslab with the most
1519 * free bandwidth rather than simply the one with the most free space.
1521 if (!vd
->vdev_nonrot
&& metaslab_lba_weighting_enabled
) {
1522 weight
= 2 * weight
- (msp
->ms_id
* weight
) / vd
->vdev_ms_count
;
1523 ASSERT(weight
>= space
&& weight
<= 2 * space
);
1527 * If this metaslab is one we're actively using, adjust its
1528 * weight to make it preferable to any inactive metaslab so
1529 * we'll polish it off. If the fragmentation on this metaslab
1530 * has exceed our threshold, then don't mark it active.
1532 if (msp
->ms_loaded
&& msp
->ms_fragmentation
!= ZFS_FRAG_INVALID
&&
1533 msp
->ms_fragmentation
<= zfs_metaslab_fragmentation_threshold
) {
1534 weight
|= (msp
->ms_weight
& METASLAB_ACTIVE_MASK
);
1541 metaslab_activate(metaslab_t
*msp
, uint64_t activation_weight
)
1543 ASSERT(MUTEX_HELD(&msp
->ms_lock
));
1545 if ((msp
->ms_weight
& METASLAB_ACTIVE_MASK
) == 0) {
1546 metaslab_load_wait(msp
);
1547 if (!msp
->ms_loaded
) {
1548 int error
= metaslab_load(msp
);
1550 metaslab_group_sort(msp
->ms_group
, msp
, 0);
1555 metaslab_group_sort(msp
->ms_group
, msp
,
1556 msp
->ms_weight
| activation_weight
);
1558 ASSERT(msp
->ms_loaded
);
1559 ASSERT(msp
->ms_weight
& METASLAB_ACTIVE_MASK
);
1565 metaslab_passivate(metaslab_t
*msp
, uint64_t size
)
1568 * If size < SPA_MINBLOCKSIZE, then we will not allocate from
1569 * this metaslab again. In that case, it had better be empty,
1570 * or we would be leaving space on the table.
1572 ASSERT(size
>= SPA_MINBLOCKSIZE
|| range_tree_space(msp
->ms_tree
) == 0);
1573 metaslab_group_sort(msp
->ms_group
, msp
, MIN(msp
->ms_weight
, size
));
1574 ASSERT((msp
->ms_weight
& METASLAB_ACTIVE_MASK
) == 0);
1578 metaslab_preload(void *arg
)
1580 metaslab_t
*msp
= arg
;
1581 spa_t
*spa
= msp
->ms_group
->mg_vd
->vdev_spa
;
1582 fstrans_cookie_t cookie
= spl_fstrans_mark();
1584 ASSERT(!MUTEX_HELD(&msp
->ms_group
->mg_lock
));
1586 mutex_enter(&msp
->ms_lock
);
1587 metaslab_load_wait(msp
);
1588 if (!msp
->ms_loaded
)
1589 (void) metaslab_load(msp
);
1592 * Set the ms_access_txg value so that we don't unload it right away.
1594 msp
->ms_access_txg
= spa_syncing_txg(spa
) + metaslab_unload_delay
+ 1;
1595 mutex_exit(&msp
->ms_lock
);
1596 spl_fstrans_unmark(cookie
);
1600 metaslab_group_preload(metaslab_group_t
*mg
)
1602 spa_t
*spa
= mg
->mg_vd
->vdev_spa
;
1604 avl_tree_t
*t
= &mg
->mg_metaslab_tree
;
1607 if (spa_shutting_down(spa
) || !metaslab_preload_enabled
) {
1608 taskq_wait_outstanding(mg
->mg_taskq
, 0);
1612 mutex_enter(&mg
->mg_lock
);
1614 * Load the next potential metaslabs
1617 while (msp
!= NULL
) {
1618 metaslab_t
*msp_next
= AVL_NEXT(t
, msp
);
1621 * We preload only the maximum number of metaslabs specified
1622 * by metaslab_preload_limit. If a metaslab is being forced
1623 * to condense then we preload it too. This will ensure
1624 * that force condensing happens in the next txg.
1626 if (++m
> metaslab_preload_limit
&& !msp
->ms_condense_wanted
) {
1632 * We must drop the metaslab group lock here to preserve
1633 * lock ordering with the ms_lock (when grabbing both
1634 * the mg_lock and the ms_lock, the ms_lock must be taken
1635 * first). As a result, it is possible that the ordering
1636 * of the metaslabs within the avl tree may change before
1637 * we reacquire the lock. The metaslab cannot be removed from
1638 * the tree while we're in syncing context so it is safe to
1639 * drop the mg_lock here. If the metaslabs are reordered
1640 * nothing will break -- we just may end up loading a
1641 * less than optimal one.
1643 mutex_exit(&mg
->mg_lock
);
1644 VERIFY(taskq_dispatch(mg
->mg_taskq
, metaslab_preload
,
1645 msp
, TQ_SLEEP
) != 0);
1646 mutex_enter(&mg
->mg_lock
);
1649 mutex_exit(&mg
->mg_lock
);
1653 * Determine if the space map's on-disk footprint is past our tolerance
1654 * for inefficiency. We would like to use the following criteria to make
1657 * 1. The size of the space map object should not dramatically increase as a
1658 * result of writing out the free space range tree.
1660 * 2. The minimal on-disk space map representation is zfs_condense_pct/100
1661 * times the size than the free space range tree representation
1662 * (i.e. zfs_condense_pct = 110 and in-core = 1MB, minimal = 1.1.MB).
1664 * 3. The on-disk size of the space map should actually decrease.
1666 * Checking the first condition is tricky since we don't want to walk
1667 * the entire AVL tree calculating the estimated on-disk size. Instead we
1668 * use the size-ordered range tree in the metaslab and calculate the
1669 * size required to write out the largest segment in our free tree. If the
1670 * size required to represent that segment on disk is larger than the space
1671 * map object then we avoid condensing this map.
1673 * To determine the second criterion we use a best-case estimate and assume
1674 * each segment can be represented on-disk as a single 64-bit entry. We refer
1675 * to this best-case estimate as the space map's minimal form.
1677 * Unfortunately, we cannot compute the on-disk size of the space map in this
1678 * context because we cannot accurately compute the effects of compression, etc.
1679 * Instead, we apply the heuristic described in the block comment for
1680 * zfs_metaslab_condense_block_threshold - we only condense if the space used
1681 * is greater than a threshold number of blocks.
1684 metaslab_should_condense(metaslab_t
*msp
)
1686 space_map_t
*sm
= msp
->ms_sm
;
1688 uint64_t size
, entries
, segsz
, object_size
, optimal_size
, record_size
;
1689 dmu_object_info_t doi
;
1690 uint64_t vdev_blocksize
= 1 << msp
->ms_group
->mg_vd
->vdev_ashift
;
1692 ASSERT(MUTEX_HELD(&msp
->ms_lock
));
1693 ASSERT(msp
->ms_loaded
);
1696 * Use the ms_size_tree range tree, which is ordered by size, to
1697 * obtain the largest segment in the free tree. We always condense
1698 * metaslabs that are empty and metaslabs for which a condense
1699 * request has been made.
1701 rs
= avl_last(&msp
->ms_size_tree
);
1702 if (rs
== NULL
|| msp
->ms_condense_wanted
)
1706 * Calculate the number of 64-bit entries this segment would
1707 * require when written to disk. If this single segment would be
1708 * larger on-disk than the entire current on-disk structure, then
1709 * clearly condensing will increase the on-disk structure size.
1711 size
= (rs
->rs_end
- rs
->rs_start
) >> sm
->sm_shift
;
1712 entries
= size
/ (MIN(size
, SM_RUN_MAX
));
1713 segsz
= entries
* sizeof (uint64_t);
1715 optimal_size
= sizeof (uint64_t) * avl_numnodes(&msp
->ms_tree
->rt_root
);
1716 object_size
= space_map_length(msp
->ms_sm
);
1718 dmu_object_info_from_db(sm
->sm_dbuf
, &doi
);
1719 record_size
= MAX(doi
.doi_data_block_size
, vdev_blocksize
);
1721 return (segsz
<= object_size
&&
1722 object_size
>= (optimal_size
* zfs_condense_pct
/ 100) &&
1723 object_size
> zfs_metaslab_condense_block_threshold
* record_size
);
1727 * Condense the on-disk space map representation to its minimized form.
1728 * The minimized form consists of a small number of allocations followed by
1729 * the entries of the free range tree.
1732 metaslab_condense(metaslab_t
*msp
, uint64_t txg
, dmu_tx_t
*tx
)
1734 spa_t
*spa
= msp
->ms_group
->mg_vd
->vdev_spa
;
1735 range_tree_t
*freetree
= msp
->ms_freetree
[txg
& TXG_MASK
];
1736 range_tree_t
*condense_tree
;
1737 space_map_t
*sm
= msp
->ms_sm
;
1740 ASSERT(MUTEX_HELD(&msp
->ms_lock
));
1741 ASSERT3U(spa_sync_pass(spa
), ==, 1);
1742 ASSERT(msp
->ms_loaded
);
1745 spa_dbgmsg(spa
, "condensing: txg %llu, msp[%llu] %p, "
1746 "smp size %llu, segments %lu, forcing condense=%s", txg
,
1747 msp
->ms_id
, msp
, space_map_length(msp
->ms_sm
),
1748 avl_numnodes(&msp
->ms_tree
->rt_root
),
1749 msp
->ms_condense_wanted
? "TRUE" : "FALSE");
1751 msp
->ms_condense_wanted
= B_FALSE
;
1754 * Create an range tree that is 100% allocated. We remove segments
1755 * that have been freed in this txg, any deferred frees that exist,
1756 * and any allocation in the future. Removing segments should be
1757 * a relatively inexpensive operation since we expect these trees to
1758 * have a small number of nodes.
1760 condense_tree
= range_tree_create(NULL
, NULL
, &msp
->ms_lock
);
1761 range_tree_add(condense_tree
, msp
->ms_start
, msp
->ms_size
);
1764 * Remove what's been freed in this txg from the condense_tree.
1765 * Since we're in sync_pass 1, we know that all the frees from
1766 * this txg are in the freetree.
1768 range_tree_walk(freetree
, range_tree_remove
, condense_tree
);
1770 for (t
= 0; t
< TXG_DEFER_SIZE
; t
++) {
1771 range_tree_walk(msp
->ms_defertree
[t
],
1772 range_tree_remove
, condense_tree
);
1775 for (t
= 1; t
< TXG_CONCURRENT_STATES
; t
++) {
1776 range_tree_walk(msp
->ms_alloctree
[(txg
+ t
) & TXG_MASK
],
1777 range_tree_remove
, condense_tree
);
1781 * We're about to drop the metaslab's lock thus allowing
1782 * other consumers to change it's content. Set the
1783 * metaslab's ms_condensing flag to ensure that
1784 * allocations on this metaslab do not occur while we're
1785 * in the middle of committing it to disk. This is only critical
1786 * for the ms_tree as all other range trees use per txg
1787 * views of their content.
1789 msp
->ms_condensing
= B_TRUE
;
1791 mutex_exit(&msp
->ms_lock
);
1792 space_map_truncate(sm
, tx
);
1793 mutex_enter(&msp
->ms_lock
);
1796 * While we would ideally like to create a space_map representation
1797 * that consists only of allocation records, doing so can be
1798 * prohibitively expensive because the in-core free tree can be
1799 * large, and therefore computationally expensive to subtract
1800 * from the condense_tree. Instead we sync out two trees, a cheap
1801 * allocation only tree followed by the in-core free tree. While not
1802 * optimal, this is typically close to optimal, and much cheaper to
1805 space_map_write(sm
, condense_tree
, SM_ALLOC
, tx
);
1806 range_tree_vacate(condense_tree
, NULL
, NULL
);
1807 range_tree_destroy(condense_tree
);
1809 space_map_write(sm
, msp
->ms_tree
, SM_FREE
, tx
);
1810 msp
->ms_condensing
= B_FALSE
;
1814 * Write a metaslab to disk in the context of the specified transaction group.
1817 metaslab_sync(metaslab_t
*msp
, uint64_t txg
)
1819 metaslab_group_t
*mg
= msp
->ms_group
;
1820 vdev_t
*vd
= mg
->mg_vd
;
1821 spa_t
*spa
= vd
->vdev_spa
;
1822 objset_t
*mos
= spa_meta_objset(spa
);
1823 range_tree_t
*alloctree
= msp
->ms_alloctree
[txg
& TXG_MASK
];
1824 range_tree_t
**freetree
= &msp
->ms_freetree
[txg
& TXG_MASK
];
1825 range_tree_t
**freed_tree
=
1826 &msp
->ms_freetree
[TXG_CLEAN(txg
) & TXG_MASK
];
1828 uint64_t object
= space_map_object(msp
->ms_sm
);
1830 ASSERT(!vd
->vdev_ishole
);
1833 * This metaslab has just been added so there's no work to do now.
1835 if (*freetree
== NULL
) {
1836 ASSERT3P(alloctree
, ==, NULL
);
1840 ASSERT3P(alloctree
, !=, NULL
);
1841 ASSERT3P(*freetree
, !=, NULL
);
1842 ASSERT3P(*freed_tree
, !=, NULL
);
1845 * Normally, we don't want to process a metaslab if there
1846 * are no allocations or frees to perform. However, if the metaslab
1847 * is being forced to condense we need to let it through.
1849 if (range_tree_space(alloctree
) == 0 &&
1850 range_tree_space(*freetree
) == 0 &&
1851 !msp
->ms_condense_wanted
)
1855 * The only state that can actually be changing concurrently with
1856 * metaslab_sync() is the metaslab's ms_tree. No other thread can
1857 * be modifying this txg's alloctree, freetree, freed_tree, or
1858 * space_map_phys_t. Therefore, we only hold ms_lock to satify
1859 * space_map ASSERTs. We drop it whenever we call into the DMU,
1860 * because the DMU can call down to us (e.g. via zio_free()) at
1864 tx
= dmu_tx_create_assigned(spa_get_dsl(spa
), txg
);
1866 if (msp
->ms_sm
== NULL
) {
1867 uint64_t new_object
;
1869 new_object
= space_map_alloc(mos
, tx
);
1870 VERIFY3U(new_object
, !=, 0);
1872 VERIFY0(space_map_open(&msp
->ms_sm
, mos
, new_object
,
1873 msp
->ms_start
, msp
->ms_size
, vd
->vdev_ashift
,
1875 ASSERT(msp
->ms_sm
!= NULL
);
1878 mutex_enter(&msp
->ms_lock
);
1881 * Note: metaslab_condense() clears the space_map's histogram.
1882 * Therefore we muse verify and remove this histogram before
1885 metaslab_group_histogram_verify(mg
);
1886 metaslab_class_histogram_verify(mg
->mg_class
);
1887 metaslab_group_histogram_remove(mg
, msp
);
1889 if (msp
->ms_loaded
&& spa_sync_pass(spa
) == 1 &&
1890 metaslab_should_condense(msp
)) {
1891 metaslab_condense(msp
, txg
, tx
);
1893 space_map_write(msp
->ms_sm
, alloctree
, SM_ALLOC
, tx
);
1894 space_map_write(msp
->ms_sm
, *freetree
, SM_FREE
, tx
);
1897 if (msp
->ms_loaded
) {
1899 * When the space map is loaded, we have an accruate
1900 * histogram in the range tree. This gives us an opportunity
1901 * to bring the space map's histogram up-to-date so we clear
1902 * it first before updating it.
1904 space_map_histogram_clear(msp
->ms_sm
);
1905 space_map_histogram_add(msp
->ms_sm
, msp
->ms_tree
, tx
);
1908 * Since the space map is not loaded we simply update the
1909 * exisiting histogram with what was freed in this txg. This
1910 * means that the on-disk histogram may not have an accurate
1911 * view of the free space but it's close enough to allow
1912 * us to make allocation decisions.
1914 space_map_histogram_add(msp
->ms_sm
, *freetree
, tx
);
1916 metaslab_group_histogram_add(mg
, msp
);
1917 metaslab_group_histogram_verify(mg
);
1918 metaslab_class_histogram_verify(mg
->mg_class
);
1921 * For sync pass 1, we avoid traversing this txg's free range tree
1922 * and instead will just swap the pointers for freetree and
1923 * freed_tree. We can safely do this since the freed_tree is
1924 * guaranteed to be empty on the initial pass.
1926 if (spa_sync_pass(spa
) == 1) {
1927 range_tree_swap(freetree
, freed_tree
);
1929 range_tree_vacate(*freetree
, range_tree_add
, *freed_tree
);
1931 range_tree_vacate(alloctree
, NULL
, NULL
);
1933 ASSERT0(range_tree_space(msp
->ms_alloctree
[txg
& TXG_MASK
]));
1934 ASSERT0(range_tree_space(msp
->ms_freetree
[txg
& TXG_MASK
]));
1936 mutex_exit(&msp
->ms_lock
);
1938 if (object
!= space_map_object(msp
->ms_sm
)) {
1939 object
= space_map_object(msp
->ms_sm
);
1940 dmu_write(mos
, vd
->vdev_ms_array
, sizeof (uint64_t) *
1941 msp
->ms_id
, sizeof (uint64_t), &object
, tx
);
1947 * Called after a transaction group has completely synced to mark
1948 * all of the metaslab's free space as usable.
1951 metaslab_sync_done(metaslab_t
*msp
, uint64_t txg
)
1953 metaslab_group_t
*mg
= msp
->ms_group
;
1954 vdev_t
*vd
= mg
->mg_vd
;
1955 range_tree_t
**freed_tree
;
1956 range_tree_t
**defer_tree
;
1957 int64_t alloc_delta
, defer_delta
;
1960 ASSERT(!vd
->vdev_ishole
);
1962 mutex_enter(&msp
->ms_lock
);
1965 * If this metaslab is just becoming available, initialize its
1966 * alloctrees, freetrees, and defertree and add its capacity to
1969 if (msp
->ms_freetree
[TXG_CLEAN(txg
) & TXG_MASK
] == NULL
) {
1970 for (t
= 0; t
< TXG_SIZE
; t
++) {
1971 ASSERT(msp
->ms_alloctree
[t
] == NULL
);
1972 ASSERT(msp
->ms_freetree
[t
] == NULL
);
1974 msp
->ms_alloctree
[t
] = range_tree_create(NULL
, msp
,
1976 msp
->ms_freetree
[t
] = range_tree_create(NULL
, msp
,
1980 for (t
= 0; t
< TXG_DEFER_SIZE
; t
++) {
1981 ASSERT(msp
->ms_defertree
[t
] == NULL
);
1983 msp
->ms_defertree
[t
] = range_tree_create(NULL
, msp
,
1987 vdev_space_update(vd
, 0, 0, msp
->ms_size
);
1990 freed_tree
= &msp
->ms_freetree
[TXG_CLEAN(txg
) & TXG_MASK
];
1991 defer_tree
= &msp
->ms_defertree
[txg
% TXG_DEFER_SIZE
];
1993 alloc_delta
= space_map_alloc_delta(msp
->ms_sm
);
1994 defer_delta
= range_tree_space(*freed_tree
) -
1995 range_tree_space(*defer_tree
);
1997 vdev_space_update(vd
, alloc_delta
+ defer_delta
, defer_delta
, 0);
1999 ASSERT0(range_tree_space(msp
->ms_alloctree
[txg
& TXG_MASK
]));
2000 ASSERT0(range_tree_space(msp
->ms_freetree
[txg
& TXG_MASK
]));
2003 * If there's a metaslab_load() in progress, wait for it to complete
2004 * so that we have a consistent view of the in-core space map.
2006 metaslab_load_wait(msp
);
2009 * Move the frees from the defer_tree back to the free
2010 * range tree (if it's loaded). Swap the freed_tree and the
2011 * defer_tree -- this is safe to do because we've just emptied out
2014 range_tree_vacate(*defer_tree
,
2015 msp
->ms_loaded
? range_tree_add
: NULL
, msp
->ms_tree
);
2016 range_tree_swap(freed_tree
, defer_tree
);
2018 space_map_update(msp
->ms_sm
);
2020 msp
->ms_deferspace
+= defer_delta
;
2021 ASSERT3S(msp
->ms_deferspace
, >=, 0);
2022 ASSERT3S(msp
->ms_deferspace
, <=, msp
->ms_size
);
2023 if (msp
->ms_deferspace
!= 0) {
2025 * Keep syncing this metaslab until all deferred frees
2026 * are back in circulation.
2028 vdev_dirty(vd
, VDD_METASLAB
, msp
, txg
+ 1);
2031 if (msp
->ms_loaded
&& msp
->ms_access_txg
< txg
) {
2032 for (t
= 1; t
< TXG_CONCURRENT_STATES
; t
++) {
2033 VERIFY0(range_tree_space(
2034 msp
->ms_alloctree
[(txg
+ t
) & TXG_MASK
]));
2037 if (!metaslab_debug_unload
)
2038 metaslab_unload(msp
);
2041 metaslab_group_sort(mg
, msp
, metaslab_weight(msp
));
2042 mutex_exit(&msp
->ms_lock
);
2046 metaslab_sync_reassess(metaslab_group_t
*mg
)
2048 metaslab_group_alloc_update(mg
);
2049 mg
->mg_fragmentation
= metaslab_group_fragmentation(mg
);
2052 * Preload the next potential metaslabs
2054 metaslab_group_preload(mg
);
2058 metaslab_distance(metaslab_t
*msp
, dva_t
*dva
)
2060 uint64_t ms_shift
= msp
->ms_group
->mg_vd
->vdev_ms_shift
;
2061 uint64_t offset
= DVA_GET_OFFSET(dva
) >> ms_shift
;
2062 uint64_t start
= msp
->ms_id
;
2064 if (msp
->ms_group
->mg_vd
->vdev_id
!= DVA_GET_VDEV(dva
))
2065 return (1ULL << 63);
2068 return ((start
- offset
) << ms_shift
);
2070 return ((offset
- start
) << ms_shift
);
2075 metaslab_group_alloc(metaslab_group_t
*mg
, uint64_t psize
, uint64_t asize
,
2076 uint64_t txg
, uint64_t min_distance
, dva_t
*dva
, int d
)
2078 spa_t
*spa
= mg
->mg_vd
->vdev_spa
;
2079 metaslab_t
*msp
= NULL
;
2080 uint64_t offset
= -1ULL;
2081 avl_tree_t
*t
= &mg
->mg_metaslab_tree
;
2082 uint64_t activation_weight
;
2083 uint64_t target_distance
;
2086 activation_weight
= METASLAB_WEIGHT_PRIMARY
;
2087 for (i
= 0; i
< d
; i
++) {
2088 if (DVA_GET_VDEV(&dva
[i
]) == mg
->mg_vd
->vdev_id
) {
2089 activation_weight
= METASLAB_WEIGHT_SECONDARY
;
2095 boolean_t was_active
;
2097 mutex_enter(&mg
->mg_lock
);
2098 for (msp
= avl_first(t
); msp
; msp
= AVL_NEXT(t
, msp
)) {
2099 if (msp
->ms_weight
< asize
) {
2100 spa_dbgmsg(spa
, "%s: failed to meet weight "
2101 "requirement: vdev %llu, txg %llu, mg %p, "
2102 "msp %p, psize %llu, asize %llu, "
2103 "weight %llu", spa_name(spa
),
2104 mg
->mg_vd
->vdev_id
, txg
,
2105 mg
, msp
, psize
, asize
, msp
->ms_weight
);
2106 mutex_exit(&mg
->mg_lock
);
2111 * If the selected metaslab is condensing, skip it.
2113 if (msp
->ms_condensing
)
2116 was_active
= msp
->ms_weight
& METASLAB_ACTIVE_MASK
;
2117 if (activation_weight
== METASLAB_WEIGHT_PRIMARY
)
2120 target_distance
= min_distance
+
2121 (space_map_allocated(msp
->ms_sm
) != 0 ? 0 :
2124 for (i
= 0; i
< d
; i
++)
2125 if (metaslab_distance(msp
, &dva
[i
]) <
2131 mutex_exit(&mg
->mg_lock
);
2135 mutex_enter(&msp
->ms_lock
);
2138 * Ensure that the metaslab we have selected is still
2139 * capable of handling our request. It's possible that
2140 * another thread may have changed the weight while we
2141 * were blocked on the metaslab lock.
2143 if (msp
->ms_weight
< asize
|| (was_active
&&
2144 !(msp
->ms_weight
& METASLAB_ACTIVE_MASK
) &&
2145 activation_weight
== METASLAB_WEIGHT_PRIMARY
)) {
2146 mutex_exit(&msp
->ms_lock
);
2150 if ((msp
->ms_weight
& METASLAB_WEIGHT_SECONDARY
) &&
2151 activation_weight
== METASLAB_WEIGHT_PRIMARY
) {
2152 metaslab_passivate(msp
,
2153 msp
->ms_weight
& ~METASLAB_ACTIVE_MASK
);
2154 mutex_exit(&msp
->ms_lock
);
2158 if (metaslab_activate(msp
, activation_weight
) != 0) {
2159 mutex_exit(&msp
->ms_lock
);
2164 * If this metaslab is currently condensing then pick again as
2165 * we can't manipulate this metaslab until it's committed
2168 if (msp
->ms_condensing
) {
2169 mutex_exit(&msp
->ms_lock
);
2173 if ((offset
= metaslab_block_alloc(msp
, asize
)) != -1ULL)
2176 metaslab_passivate(msp
, metaslab_block_maxsize(msp
));
2177 mutex_exit(&msp
->ms_lock
);
2180 if (range_tree_space(msp
->ms_alloctree
[txg
& TXG_MASK
]) == 0)
2181 vdev_dirty(mg
->mg_vd
, VDD_METASLAB
, msp
, txg
);
2183 range_tree_add(msp
->ms_alloctree
[txg
& TXG_MASK
], offset
, asize
);
2184 msp
->ms_access_txg
= txg
+ metaslab_unload_delay
;
2186 mutex_exit(&msp
->ms_lock
);
2192 * Allocate a block for the specified i/o.
2195 metaslab_alloc_dva(spa_t
*spa
, metaslab_class_t
*mc
, uint64_t psize
,
2196 dva_t
*dva
, int d
, dva_t
*hintdva
, uint64_t txg
, int flags
)
2198 metaslab_group_t
*mg
, *fast_mg
, *rotor
;
2202 int zio_lock
= B_FALSE
;
2203 boolean_t allocatable
;
2204 uint64_t offset
= -1ULL;
2208 ASSERT(!DVA_IS_VALID(&dva
[d
]));
2211 * For testing, make some blocks above a certain size be gang blocks.
2213 if (psize
>= metaslab_gang_bang
&& (ddi_get_lbolt() & 3) == 0)
2214 return (SET_ERROR(ENOSPC
));
2216 if (flags
& METASLAB_FASTWRITE
)
2217 mutex_enter(&mc
->mc_fastwrite_lock
);
2220 * Start at the rotor and loop through all mgs until we find something.
2221 * Note that there's no locking on mc_rotor or mc_aliquot because
2222 * nothing actually breaks if we miss a few updates -- we just won't
2223 * allocate quite as evenly. It all balances out over time.
2225 * If we are doing ditto or log blocks, try to spread them across
2226 * consecutive vdevs. If we're forced to reuse a vdev before we've
2227 * allocated all of our ditto blocks, then try and spread them out on
2228 * that vdev as much as possible. If it turns out to not be possible,
2229 * gradually lower our standards until anything becomes acceptable.
2230 * Also, allocating on consecutive vdevs (as opposed to random vdevs)
2231 * gives us hope of containing our fault domains to something we're
2232 * able to reason about. Otherwise, any two top-level vdev failures
2233 * will guarantee the loss of data. With consecutive allocation,
2234 * only two adjacent top-level vdev failures will result in data loss.
2236 * If we are doing gang blocks (hintdva is non-NULL), try to keep
2237 * ourselves on the same vdev as our gang block header. That
2238 * way, we can hope for locality in vdev_cache, plus it makes our
2239 * fault domains something tractable.
2242 vd
= vdev_lookup_top(spa
, DVA_GET_VDEV(&hintdva
[d
]));
2245 * It's possible the vdev we're using as the hint no
2246 * longer exists (i.e. removed). Consult the rotor when
2252 if (flags
& METASLAB_HINTBP_AVOID
&&
2253 mg
->mg_next
!= NULL
)
2258 } else if (d
!= 0) {
2259 vd
= vdev_lookup_top(spa
, DVA_GET_VDEV(&dva
[d
- 1]));
2260 mg
= vd
->vdev_mg
->mg_next
;
2261 } else if (flags
& METASLAB_FASTWRITE
) {
2262 mg
= fast_mg
= mc
->mc_rotor
;
2265 if (fast_mg
->mg_vd
->vdev_pending_fastwrite
<
2266 mg
->mg_vd
->vdev_pending_fastwrite
)
2268 } while ((fast_mg
= fast_mg
->mg_next
) != mc
->mc_rotor
);
2275 * If the hint put us into the wrong metaslab class, or into a
2276 * metaslab group that has been passivated, just follow the rotor.
2278 if (mg
->mg_class
!= mc
|| mg
->mg_activation_count
<= 0)
2285 ASSERT(mg
->mg_activation_count
== 1);
2290 * Don't allocate from faulted devices.
2293 spa_config_enter(spa
, SCL_ZIO
, FTAG
, RW_READER
);
2294 allocatable
= vdev_allocatable(vd
);
2295 spa_config_exit(spa
, SCL_ZIO
, FTAG
);
2297 allocatable
= vdev_allocatable(vd
);
2301 * Determine if the selected metaslab group is eligible
2302 * for allocations. If we're ganging or have requested
2303 * an allocation for the smallest gang block size
2304 * then we don't want to avoid allocating to the this
2305 * metaslab group. If we're in this condition we should
2306 * try to allocate from any device possible so that we
2307 * don't inadvertently return ENOSPC and suspend the pool
2308 * even though space is still available.
2310 if (allocatable
&& CAN_FASTGANG(flags
) &&
2311 psize
> SPA_GANGBLOCKSIZE
)
2312 allocatable
= metaslab_group_allocatable(mg
);
2318 * Avoid writing single-copy data to a failing vdev
2319 * unless the user instructs us that it is okay.
2321 if ((vd
->vdev_stat
.vs_write_errors
> 0 ||
2322 vd
->vdev_state
< VDEV_STATE_HEALTHY
) &&
2323 d
== 0 && dshift
== 3 && vd
->vdev_children
== 0) {
2328 ASSERT(mg
->mg_class
== mc
);
2330 distance
= vd
->vdev_asize
>> dshift
;
2331 if (distance
<= (1ULL << vd
->vdev_ms_shift
))
2336 asize
= vdev_psize_to_asize(vd
, psize
);
2337 ASSERT(P2PHASE(asize
, 1ULL << vd
->vdev_ashift
) == 0);
2339 offset
= metaslab_group_alloc(mg
, psize
, asize
, txg
, distance
,
2341 if (offset
!= -1ULL) {
2343 * If we've just selected this metaslab group,
2344 * figure out whether the corresponding vdev is
2345 * over- or under-used relative to the pool,
2346 * and set an allocation bias to even it out.
2348 * Bias is also used to compensate for unequally
2349 * sized vdevs so that space is allocated fairly.
2351 if (mc
->mc_aliquot
== 0 && metaslab_bias_enabled
) {
2352 vdev_stat_t
*vs
= &vd
->vdev_stat
;
2353 int64_t vs_free
= vs
->vs_space
- vs
->vs_alloc
;
2354 int64_t mc_free
= mc
->mc_space
- mc
->mc_alloc
;
2358 * Calculate how much more or less we should
2359 * try to allocate from this device during
2360 * this iteration around the rotor.
2362 * This basically introduces a zero-centered
2363 * bias towards the devices with the most
2364 * free space, while compensating for vdev
2368 * vdev V1 = 16M/128M
2369 * vdev V2 = 16M/128M
2370 * ratio(V1) = 100% ratio(V2) = 100%
2372 * vdev V1 = 16M/128M
2373 * vdev V2 = 64M/128M
2374 * ratio(V1) = 127% ratio(V2) = 72%
2376 * vdev V1 = 16M/128M
2377 * vdev V2 = 64M/512M
2378 * ratio(V1) = 40% ratio(V2) = 160%
2380 ratio
= (vs_free
* mc
->mc_alloc_groups
* 100) /
2382 mg
->mg_bias
= ((ratio
- 100) *
2383 (int64_t)mg
->mg_aliquot
) / 100;
2384 } else if (!metaslab_bias_enabled
) {
2388 if ((flags
& METASLAB_FASTWRITE
) ||
2389 atomic_add_64_nv(&mc
->mc_aliquot
, asize
) >=
2390 mg
->mg_aliquot
+ mg
->mg_bias
) {
2391 mc
->mc_rotor
= mg
->mg_next
;
2395 DVA_SET_VDEV(&dva
[d
], vd
->vdev_id
);
2396 DVA_SET_OFFSET(&dva
[d
], offset
);
2397 DVA_SET_GANG(&dva
[d
], !!(flags
& METASLAB_GANG_HEADER
));
2398 DVA_SET_ASIZE(&dva
[d
], asize
);
2400 if (flags
& METASLAB_FASTWRITE
) {
2401 atomic_add_64(&vd
->vdev_pending_fastwrite
,
2403 mutex_exit(&mc
->mc_fastwrite_lock
);
2409 mc
->mc_rotor
= mg
->mg_next
;
2411 } while ((mg
= mg
->mg_next
) != rotor
);
2415 ASSERT(dshift
< 64);
2419 if (!allocatable
&& !zio_lock
) {
2425 bzero(&dva
[d
], sizeof (dva_t
));
2427 if (flags
& METASLAB_FASTWRITE
)
2428 mutex_exit(&mc
->mc_fastwrite_lock
);
2430 return (SET_ERROR(ENOSPC
));
2434 * Free the block represented by DVA in the context of the specified
2435 * transaction group.
2438 metaslab_free_dva(spa_t
*spa
, const dva_t
*dva
, uint64_t txg
, boolean_t now
)
2440 uint64_t vdev
= DVA_GET_VDEV(dva
);
2441 uint64_t offset
= DVA_GET_OFFSET(dva
);
2442 uint64_t size
= DVA_GET_ASIZE(dva
);
2446 if (txg
> spa_freeze_txg(spa
))
2449 if ((vd
= vdev_lookup_top(spa
, vdev
)) == NULL
|| !DVA_IS_VALID(dva
) ||
2450 (offset
>> vd
->vdev_ms_shift
) >= vd
->vdev_ms_count
) {
2451 zfs_panic_recover("metaslab_free_dva(): bad DVA %llu:%llu:%llu",
2452 (u_longlong_t
)vdev
, (u_longlong_t
)offset
,
2453 (u_longlong_t
)size
);
2457 msp
= vd
->vdev_ms
[offset
>> vd
->vdev_ms_shift
];
2459 if (DVA_GET_GANG(dva
))
2460 size
= vdev_psize_to_asize(vd
, SPA_GANGBLOCKSIZE
);
2462 mutex_enter(&msp
->ms_lock
);
2465 range_tree_remove(msp
->ms_alloctree
[txg
& TXG_MASK
],
2468 VERIFY(!msp
->ms_condensing
);
2469 VERIFY3U(offset
, >=, msp
->ms_start
);
2470 VERIFY3U(offset
+ size
, <=, msp
->ms_start
+ msp
->ms_size
);
2471 VERIFY3U(range_tree_space(msp
->ms_tree
) + size
, <=,
2473 VERIFY0(P2PHASE(offset
, 1ULL << vd
->vdev_ashift
));
2474 VERIFY0(P2PHASE(size
, 1ULL << vd
->vdev_ashift
));
2475 range_tree_add(msp
->ms_tree
, offset
, size
);
2477 if (range_tree_space(msp
->ms_freetree
[txg
& TXG_MASK
]) == 0)
2478 vdev_dirty(vd
, VDD_METASLAB
, msp
, txg
);
2479 range_tree_add(msp
->ms_freetree
[txg
& TXG_MASK
],
2483 mutex_exit(&msp
->ms_lock
);
2487 * Intent log support: upon opening the pool after a crash, notify the SPA
2488 * of blocks that the intent log has allocated for immediate write, but
2489 * which are still considered free by the SPA because the last transaction
2490 * group didn't commit yet.
2493 metaslab_claim_dva(spa_t
*spa
, const dva_t
*dva
, uint64_t txg
)
2495 uint64_t vdev
= DVA_GET_VDEV(dva
);
2496 uint64_t offset
= DVA_GET_OFFSET(dva
);
2497 uint64_t size
= DVA_GET_ASIZE(dva
);
2502 ASSERT(DVA_IS_VALID(dva
));
2504 if ((vd
= vdev_lookup_top(spa
, vdev
)) == NULL
||
2505 (offset
>> vd
->vdev_ms_shift
) >= vd
->vdev_ms_count
)
2506 return (SET_ERROR(ENXIO
));
2508 msp
= vd
->vdev_ms
[offset
>> vd
->vdev_ms_shift
];
2510 if (DVA_GET_GANG(dva
))
2511 size
= vdev_psize_to_asize(vd
, SPA_GANGBLOCKSIZE
);
2513 mutex_enter(&msp
->ms_lock
);
2515 if ((txg
!= 0 && spa_writeable(spa
)) || !msp
->ms_loaded
)
2516 error
= metaslab_activate(msp
, METASLAB_WEIGHT_SECONDARY
);
2518 if (error
== 0 && !range_tree_contains(msp
->ms_tree
, offset
, size
))
2519 error
= SET_ERROR(ENOENT
);
2521 if (error
|| txg
== 0) { /* txg == 0 indicates dry run */
2522 mutex_exit(&msp
->ms_lock
);
2526 VERIFY(!msp
->ms_condensing
);
2527 VERIFY0(P2PHASE(offset
, 1ULL << vd
->vdev_ashift
));
2528 VERIFY0(P2PHASE(size
, 1ULL << vd
->vdev_ashift
));
2529 VERIFY3U(range_tree_space(msp
->ms_tree
) - size
, <=, msp
->ms_size
);
2530 range_tree_remove(msp
->ms_tree
, offset
, size
);
2532 if (spa_writeable(spa
)) { /* don't dirty if we're zdb(1M) */
2533 if (range_tree_space(msp
->ms_alloctree
[txg
& TXG_MASK
]) == 0)
2534 vdev_dirty(vd
, VDD_METASLAB
, msp
, txg
);
2535 range_tree_add(msp
->ms_alloctree
[txg
& TXG_MASK
], offset
, size
);
2538 mutex_exit(&msp
->ms_lock
);
2544 metaslab_alloc(spa_t
*spa
, metaslab_class_t
*mc
, uint64_t psize
, blkptr_t
*bp
,
2545 int ndvas
, uint64_t txg
, blkptr_t
*hintbp
, int flags
)
2547 dva_t
*dva
= bp
->blk_dva
;
2548 dva_t
*hintdva
= hintbp
->blk_dva
;
2551 ASSERT(bp
->blk_birth
== 0);
2552 ASSERT(BP_PHYSICAL_BIRTH(bp
) == 0);
2554 spa_config_enter(spa
, SCL_ALLOC
, FTAG
, RW_READER
);
2556 if (mc
->mc_rotor
== NULL
) { /* no vdevs in this class */
2557 spa_config_exit(spa
, SCL_ALLOC
, FTAG
);
2558 return (SET_ERROR(ENOSPC
));
2561 ASSERT(ndvas
> 0 && ndvas
<= spa_max_replication(spa
));
2562 ASSERT(BP_GET_NDVAS(bp
) == 0);
2563 ASSERT(hintbp
== NULL
|| ndvas
<= BP_GET_NDVAS(hintbp
));
2565 for (d
= 0; d
< ndvas
; d
++) {
2566 error
= metaslab_alloc_dva(spa
, mc
, psize
, dva
, d
, hintdva
,
2569 for (d
--; d
>= 0; d
--) {
2570 metaslab_free_dva(spa
, &dva
[d
], txg
, B_TRUE
);
2571 bzero(&dva
[d
], sizeof (dva_t
));
2573 spa_config_exit(spa
, SCL_ALLOC
, FTAG
);
2578 ASSERT(BP_GET_NDVAS(bp
) == ndvas
);
2580 spa_config_exit(spa
, SCL_ALLOC
, FTAG
);
2582 BP_SET_BIRTH(bp
, txg
, txg
);
2588 metaslab_free(spa_t
*spa
, const blkptr_t
*bp
, uint64_t txg
, boolean_t now
)
2590 const dva_t
*dva
= bp
->blk_dva
;
2591 int d
, ndvas
= BP_GET_NDVAS(bp
);
2593 ASSERT(!BP_IS_HOLE(bp
));
2594 ASSERT(!now
|| bp
->blk_birth
>= spa_syncing_txg(spa
));
2596 spa_config_enter(spa
, SCL_FREE
, FTAG
, RW_READER
);
2598 for (d
= 0; d
< ndvas
; d
++)
2599 metaslab_free_dva(spa
, &dva
[d
], txg
, now
);
2601 spa_config_exit(spa
, SCL_FREE
, FTAG
);
2605 metaslab_claim(spa_t
*spa
, const blkptr_t
*bp
, uint64_t txg
)
2607 const dva_t
*dva
= bp
->blk_dva
;
2608 int ndvas
= BP_GET_NDVAS(bp
);
2611 ASSERT(!BP_IS_HOLE(bp
));
2615 * First do a dry run to make sure all DVAs are claimable,
2616 * so we don't have to unwind from partial failures below.
2618 if ((error
= metaslab_claim(spa
, bp
, 0)) != 0)
2622 spa_config_enter(spa
, SCL_ALLOC
, FTAG
, RW_READER
);
2624 for (d
= 0; d
< ndvas
; d
++)
2625 if ((error
= metaslab_claim_dva(spa
, &dva
[d
], txg
)) != 0)
2628 spa_config_exit(spa
, SCL_ALLOC
, FTAG
);
2630 ASSERT(error
== 0 || txg
== 0);
2636 metaslab_fastwrite_mark(spa_t
*spa
, const blkptr_t
*bp
)
2638 const dva_t
*dva
= bp
->blk_dva
;
2639 int ndvas
= BP_GET_NDVAS(bp
);
2640 uint64_t psize
= BP_GET_PSIZE(bp
);
2644 ASSERT(!BP_IS_HOLE(bp
));
2645 ASSERT(!BP_IS_EMBEDDED(bp
));
2648 spa_config_enter(spa
, SCL_VDEV
, FTAG
, RW_READER
);
2650 for (d
= 0; d
< ndvas
; d
++) {
2651 if ((vd
= vdev_lookup_top(spa
, DVA_GET_VDEV(&dva
[d
]))) == NULL
)
2653 atomic_add_64(&vd
->vdev_pending_fastwrite
, psize
);
2656 spa_config_exit(spa
, SCL_VDEV
, FTAG
);
2660 metaslab_fastwrite_unmark(spa_t
*spa
, const blkptr_t
*bp
)
2662 const dva_t
*dva
= bp
->blk_dva
;
2663 int ndvas
= BP_GET_NDVAS(bp
);
2664 uint64_t psize
= BP_GET_PSIZE(bp
);
2668 ASSERT(!BP_IS_HOLE(bp
));
2669 ASSERT(!BP_IS_EMBEDDED(bp
));
2672 spa_config_enter(spa
, SCL_VDEV
, FTAG
, RW_READER
);
2674 for (d
= 0; d
< ndvas
; d
++) {
2675 if ((vd
= vdev_lookup_top(spa
, DVA_GET_VDEV(&dva
[d
]))) == NULL
)
2677 ASSERT3U(vd
->vdev_pending_fastwrite
, >=, psize
);
2678 atomic_sub_64(&vd
->vdev_pending_fastwrite
, psize
);
2681 spa_config_exit(spa
, SCL_VDEV
, FTAG
);
2685 metaslab_check_free(spa_t
*spa
, const blkptr_t
*bp
)
2689 if ((zfs_flags
& ZFS_DEBUG_ZIO_FREE
) == 0)
2692 spa_config_enter(spa
, SCL_VDEV
, FTAG
, RW_READER
);
2693 for (i
= 0; i
< BP_GET_NDVAS(bp
); i
++) {
2694 uint64_t vdev
= DVA_GET_VDEV(&bp
->blk_dva
[i
]);
2695 vdev_t
*vd
= vdev_lookup_top(spa
, vdev
);
2696 uint64_t offset
= DVA_GET_OFFSET(&bp
->blk_dva
[i
]);
2697 uint64_t size
= DVA_GET_ASIZE(&bp
->blk_dva
[i
]);
2698 metaslab_t
*msp
= vd
->vdev_ms
[offset
>> vd
->vdev_ms_shift
];
2701 range_tree_verify(msp
->ms_tree
, offset
, size
);
2703 for (j
= 0; j
< TXG_SIZE
; j
++)
2704 range_tree_verify(msp
->ms_freetree
[j
], offset
, size
);
2705 for (j
= 0; j
< TXG_DEFER_SIZE
; j
++)
2706 range_tree_verify(msp
->ms_defertree
[j
], offset
, size
);
2708 spa_config_exit(spa
, SCL_VDEV
, FTAG
);
2711 #if defined(_KERNEL) && defined(HAVE_SPL)
2712 module_param(metaslab_aliquot
, ulong
, 0644);
2713 module_param(metaslab_debug_load
, int, 0644);
2714 module_param(metaslab_debug_unload
, int, 0644);
2715 module_param(metaslab_preload_enabled
, int, 0644);
2716 module_param(zfs_mg_noalloc_threshold
, int, 0644);
2717 module_param(zfs_mg_fragmentation_threshold
, int, 0644);
2718 module_param(zfs_metaslab_fragmentation_threshold
, int, 0644);
2719 module_param(metaslab_fragmentation_factor_enabled
, int, 0644);
2720 module_param(metaslab_lba_weighting_enabled
, int, 0644);
2721 module_param(metaslab_bias_enabled
, int, 0644);
2723 MODULE_PARM_DESC(metaslab_aliquot
,
2724 "allocation granularity (a.k.a. stripe size)");
2725 MODULE_PARM_DESC(metaslab_debug_load
,
2726 "load all metaslabs when pool is first opened");
2727 MODULE_PARM_DESC(metaslab_debug_unload
,
2728 "prevent metaslabs from being unloaded");
2729 MODULE_PARM_DESC(metaslab_preload_enabled
,
2730 "preload potential metaslabs during reassessment");
2732 MODULE_PARM_DESC(zfs_mg_noalloc_threshold
,
2733 "percentage of free space for metaslab group to allow allocation");
2734 MODULE_PARM_DESC(zfs_mg_fragmentation_threshold
,
2735 "fragmentation for metaslab group to allow allocation");
2737 MODULE_PARM_DESC(zfs_metaslab_fragmentation_threshold
,
2738 "fragmentation for metaslab to allow allocation");
2739 MODULE_PARM_DESC(metaslab_fragmentation_factor_enabled
,
2740 "use the fragmentation metric to prefer less fragmented metaslabs");
2741 MODULE_PARM_DESC(metaslab_lba_weighting_enabled
,
2742 "prefer metaslabs with lower LBAs");
2743 MODULE_PARM_DESC(metaslab_bias_enabled
,
2744 "enable metaslab group biasing");
2745 #endif /* _KERNEL && HAVE_SPL */