4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2011, 2015 by Delphix. All rights reserved.
24 * Copyright (c) 2013 by Saso Kiselkov. All rights reserved.
27 #include <sys/zfs_context.h>
29 #include <sys/dmu_tx.h>
30 #include <sys/space_map.h>
31 #include <sys/metaslab_impl.h>
32 #include <sys/vdev_impl.h>
34 #include <sys/spa_impl.h>
35 #include <sys/zfeature.h>
37 #define WITH_DF_BLOCK_ALLOCATOR
39 #define GANG_ALLOCATION(flags) \
40 ((flags) & (METASLAB_GANG_CHILD | METASLAB_GANG_HEADER))
42 #define METASLAB_WEIGHT_PRIMARY (1ULL << 63)
43 #define METASLAB_WEIGHT_SECONDARY (1ULL << 62)
44 #define METASLAB_ACTIVE_MASK \
45 (METASLAB_WEIGHT_PRIMARY | METASLAB_WEIGHT_SECONDARY)
48 * Metaslab granularity, in bytes. This is roughly similar to what would be
49 * referred to as the "stripe size" in traditional RAID arrays. In normal
50 * operation, we will try to write this amount of data to a top-level vdev
51 * before moving on to the next one.
53 unsigned long metaslab_aliquot
= 512 << 10;
55 uint64_t metaslab_gang_bang
= SPA_MAXBLOCKSIZE
+ 1; /* force gang blocks */
58 * The in-core space map representation is more compact than its on-disk form.
59 * The zfs_condense_pct determines how much more compact the in-core
60 * space_map representation must be before we compact it on-disk.
61 * Values should be greater than or equal to 100.
63 int zfs_condense_pct
= 200;
66 * Condensing a metaslab is not guaranteed to actually reduce the amount of
67 * space used on disk. In particular, a space map uses data in increments of
68 * MAX(1 << ashift, space_map_blksz), so a metaslab might use the
69 * same number of blocks after condensing. Since the goal of condensing is to
70 * reduce the number of IOPs required to read the space map, we only want to
71 * condense when we can be sure we will reduce the number of blocks used by the
72 * space map. Unfortunately, we cannot precisely compute whether or not this is
73 * the case in metaslab_should_condense since we are holding ms_lock. Instead,
74 * we apply the following heuristic: do not condense a spacemap unless the
75 * uncondensed size consumes greater than zfs_metaslab_condense_block_threshold
78 int zfs_metaslab_condense_block_threshold
= 4;
81 * The zfs_mg_noalloc_threshold defines which metaslab groups should
82 * be eligible for allocation. The value is defined as a percentage of
83 * free space. Metaslab groups that have more free space than
84 * zfs_mg_noalloc_threshold are always eligible for allocations. Once
85 * a metaslab group's free space is less than or equal to the
86 * zfs_mg_noalloc_threshold the allocator will avoid allocating to that
87 * group unless all groups in the pool have reached zfs_mg_noalloc_threshold.
88 * Once all groups in the pool reach zfs_mg_noalloc_threshold then all
89 * groups are allowed to accept allocations. Gang blocks are always
90 * eligible to allocate on any metaslab group. The default value of 0 means
91 * no metaslab group will be excluded based on this criterion.
93 int zfs_mg_noalloc_threshold
= 0;
96 * Metaslab groups are considered eligible for allocations if their
97 * fragmenation metric (measured as a percentage) is less than or equal to
98 * zfs_mg_fragmentation_threshold. If a metaslab group exceeds this threshold
99 * then it will be skipped unless all metaslab groups within the metaslab
100 * class have also crossed this threshold.
102 int zfs_mg_fragmentation_threshold
= 85;
105 * Allow metaslabs to keep their active state as long as their fragmentation
106 * percentage is less than or equal to zfs_metaslab_fragmentation_threshold. An
107 * active metaslab that exceeds this threshold will no longer keep its active
108 * status allowing better metaslabs to be selected.
110 int zfs_metaslab_fragmentation_threshold
= 70;
113 * When set will load all metaslabs when pool is first opened.
115 int metaslab_debug_load
= 0;
118 * When set will prevent metaslabs from being unloaded.
120 int metaslab_debug_unload
= 0;
123 * Minimum size which forces the dynamic allocator to change
124 * it's allocation strategy. Once the space map cannot satisfy
125 * an allocation of this size then it switches to using more
126 * aggressive strategy (i.e search by size rather than offset).
128 uint64_t metaslab_df_alloc_threshold
= SPA_MAXBLOCKSIZE
;
131 * The minimum free space, in percent, which must be available
132 * in a space map to continue allocations in a first-fit fashion.
133 * Once the space_map's free space drops below this level we dynamically
134 * switch to using best-fit allocations.
136 int metaslab_df_free_pct
= 4;
139 * Percentage of all cpus that can be used by the metaslab taskq.
141 int metaslab_load_pct
= 50;
144 * Determines how many txgs a metaslab may remain loaded without having any
145 * allocations from it. As long as a metaslab continues to be used we will
148 int metaslab_unload_delay
= TXG_SIZE
* 2;
151 * Max number of metaslabs per group to preload.
153 int metaslab_preload_limit
= SPA_DVAS_PER_BP
;
156 * Enable/disable preloading of metaslab.
158 int metaslab_preload_enabled
= B_TRUE
;
161 * Enable/disable fragmentation weighting on metaslabs.
163 int metaslab_fragmentation_factor_enabled
= B_TRUE
;
166 * Enable/disable lba weighting (i.e. outer tracks are given preference).
168 int metaslab_lba_weighting_enabled
= B_TRUE
;
171 * Enable/disable metaslab group biasing.
173 int metaslab_bias_enabled
= B_TRUE
;
175 static uint64_t metaslab_fragmentation(metaslab_t
*);
178 * ==========================================================================
180 * ==========================================================================
183 metaslab_class_create(spa_t
*spa
, metaslab_ops_t
*ops
)
185 metaslab_class_t
*mc
;
187 mc
= kmem_zalloc(sizeof (metaslab_class_t
), KM_SLEEP
);
192 mutex_init(&mc
->mc_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
193 refcount_create_tracked(&mc
->mc_alloc_slots
);
199 metaslab_class_destroy(metaslab_class_t
*mc
)
201 ASSERT(mc
->mc_rotor
== NULL
);
202 ASSERT(mc
->mc_alloc
== 0);
203 ASSERT(mc
->mc_deferred
== 0);
204 ASSERT(mc
->mc_space
== 0);
205 ASSERT(mc
->mc_dspace
== 0);
207 refcount_destroy(&mc
->mc_alloc_slots
);
208 mutex_destroy(&mc
->mc_lock
);
209 kmem_free(mc
, sizeof (metaslab_class_t
));
213 metaslab_class_validate(metaslab_class_t
*mc
)
215 metaslab_group_t
*mg
;
219 * Must hold one of the spa_config locks.
221 ASSERT(spa_config_held(mc
->mc_spa
, SCL_ALL
, RW_READER
) ||
222 spa_config_held(mc
->mc_spa
, SCL_ALL
, RW_WRITER
));
224 if ((mg
= mc
->mc_rotor
) == NULL
)
229 ASSERT(vd
->vdev_mg
!= NULL
);
230 ASSERT3P(vd
->vdev_top
, ==, vd
);
231 ASSERT3P(mg
->mg_class
, ==, mc
);
232 ASSERT3P(vd
->vdev_ops
, !=, &vdev_hole_ops
);
233 } while ((mg
= mg
->mg_next
) != mc
->mc_rotor
);
239 metaslab_class_space_update(metaslab_class_t
*mc
, int64_t alloc_delta
,
240 int64_t defer_delta
, int64_t space_delta
, int64_t dspace_delta
)
242 atomic_add_64(&mc
->mc_alloc
, alloc_delta
);
243 atomic_add_64(&mc
->mc_deferred
, defer_delta
);
244 atomic_add_64(&mc
->mc_space
, space_delta
);
245 atomic_add_64(&mc
->mc_dspace
, dspace_delta
);
249 metaslab_class_get_alloc(metaslab_class_t
*mc
)
251 return (mc
->mc_alloc
);
255 metaslab_class_get_deferred(metaslab_class_t
*mc
)
257 return (mc
->mc_deferred
);
261 metaslab_class_get_space(metaslab_class_t
*mc
)
263 return (mc
->mc_space
);
267 metaslab_class_get_dspace(metaslab_class_t
*mc
)
269 return (spa_deflate(mc
->mc_spa
) ? mc
->mc_dspace
: mc
->mc_space
);
273 metaslab_class_histogram_verify(metaslab_class_t
*mc
)
275 vdev_t
*rvd
= mc
->mc_spa
->spa_root_vdev
;
279 if ((zfs_flags
& ZFS_DEBUG_HISTOGRAM_VERIFY
) == 0)
282 mc_hist
= kmem_zalloc(sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE
,
285 for (c
= 0; c
< rvd
->vdev_children
; c
++) {
286 vdev_t
*tvd
= rvd
->vdev_child
[c
];
287 metaslab_group_t
*mg
= tvd
->vdev_mg
;
290 * Skip any holes, uninitialized top-levels, or
291 * vdevs that are not in this metalab class.
293 if (tvd
->vdev_ishole
|| tvd
->vdev_ms_shift
== 0 ||
294 mg
->mg_class
!= mc
) {
298 for (i
= 0; i
< RANGE_TREE_HISTOGRAM_SIZE
; i
++)
299 mc_hist
[i
] += mg
->mg_histogram
[i
];
302 for (i
= 0; i
< RANGE_TREE_HISTOGRAM_SIZE
; i
++)
303 VERIFY3U(mc_hist
[i
], ==, mc
->mc_histogram
[i
]);
305 kmem_free(mc_hist
, sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE
);
309 * Calculate the metaslab class's fragmentation metric. The metric
310 * is weighted based on the space contribution of each metaslab group.
311 * The return value will be a number between 0 and 100 (inclusive), or
312 * ZFS_FRAG_INVALID if the metric has not been set. See comment above the
313 * zfs_frag_table for more information about the metric.
316 metaslab_class_fragmentation(metaslab_class_t
*mc
)
318 vdev_t
*rvd
= mc
->mc_spa
->spa_root_vdev
;
319 uint64_t fragmentation
= 0;
322 spa_config_enter(mc
->mc_spa
, SCL_VDEV
, FTAG
, RW_READER
);
324 for (c
= 0; c
< rvd
->vdev_children
; c
++) {
325 vdev_t
*tvd
= rvd
->vdev_child
[c
];
326 metaslab_group_t
*mg
= tvd
->vdev_mg
;
329 * Skip any holes, uninitialized top-levels, or
330 * vdevs that are not in this metalab class.
332 if (tvd
->vdev_ishole
|| tvd
->vdev_ms_shift
== 0 ||
333 mg
->mg_class
!= mc
) {
338 * If a metaslab group does not contain a fragmentation
339 * metric then just bail out.
341 if (mg
->mg_fragmentation
== ZFS_FRAG_INVALID
) {
342 spa_config_exit(mc
->mc_spa
, SCL_VDEV
, FTAG
);
343 return (ZFS_FRAG_INVALID
);
347 * Determine how much this metaslab_group is contributing
348 * to the overall pool fragmentation metric.
350 fragmentation
+= mg
->mg_fragmentation
*
351 metaslab_group_get_space(mg
);
353 fragmentation
/= metaslab_class_get_space(mc
);
355 ASSERT3U(fragmentation
, <=, 100);
356 spa_config_exit(mc
->mc_spa
, SCL_VDEV
, FTAG
);
357 return (fragmentation
);
361 * Calculate the amount of expandable space that is available in
362 * this metaslab class. If a device is expanded then its expandable
363 * space will be the amount of allocatable space that is currently not
364 * part of this metaslab class.
367 metaslab_class_expandable_space(metaslab_class_t
*mc
)
369 vdev_t
*rvd
= mc
->mc_spa
->spa_root_vdev
;
373 spa_config_enter(mc
->mc_spa
, SCL_VDEV
, FTAG
, RW_READER
);
374 for (c
= 0; c
< rvd
->vdev_children
; c
++) {
375 vdev_t
*tvd
= rvd
->vdev_child
[c
];
376 metaslab_group_t
*mg
= tvd
->vdev_mg
;
378 if (tvd
->vdev_ishole
|| tvd
->vdev_ms_shift
== 0 ||
379 mg
->mg_class
!= mc
) {
383 space
+= tvd
->vdev_max_asize
- tvd
->vdev_asize
;
385 spa_config_exit(mc
->mc_spa
, SCL_VDEV
, FTAG
);
390 * ==========================================================================
392 * ==========================================================================
395 metaslab_compare(const void *x1
, const void *x2
)
397 const metaslab_t
*m1
= (const metaslab_t
*)x1
;
398 const metaslab_t
*m2
= (const metaslab_t
*)x2
;
400 int cmp
= AVL_CMP(m2
->ms_weight
, m1
->ms_weight
);
404 IMPLY(AVL_CMP(m1
->ms_start
, m2
->ms_start
) == 0, m1
== m2
);
406 return (AVL_CMP(m1
->ms_start
, m2
->ms_start
));
410 * Update the allocatable flag and the metaslab group's capacity.
411 * The allocatable flag is set to true if the capacity is below
412 * the zfs_mg_noalloc_threshold or has a fragmentation value that is
413 * greater than zfs_mg_fragmentation_threshold. If a metaslab group
414 * transitions from allocatable to non-allocatable or vice versa then the
415 * metaslab group's class is updated to reflect the transition.
418 metaslab_group_alloc_update(metaslab_group_t
*mg
)
420 vdev_t
*vd
= mg
->mg_vd
;
421 metaslab_class_t
*mc
= mg
->mg_class
;
422 vdev_stat_t
*vs
= &vd
->vdev_stat
;
423 boolean_t was_allocatable
;
424 boolean_t was_initialized
;
426 ASSERT(vd
== vd
->vdev_top
);
428 mutex_enter(&mg
->mg_lock
);
429 was_allocatable
= mg
->mg_allocatable
;
430 was_initialized
= mg
->mg_initialized
;
432 mg
->mg_free_capacity
= ((vs
->vs_space
- vs
->vs_alloc
) * 100) /
435 mutex_enter(&mc
->mc_lock
);
438 * If the metaslab group was just added then it won't
439 * have any space until we finish syncing out this txg.
440 * At that point we will consider it initialized and available
441 * for allocations. We also don't consider non-activated
442 * metaslab groups (e.g. vdevs that are in the middle of being removed)
443 * to be initialized, because they can't be used for allocation.
445 mg
->mg_initialized
= metaslab_group_initialized(mg
);
446 if (!was_initialized
&& mg
->mg_initialized
) {
448 } else if (was_initialized
&& !mg
->mg_initialized
) {
449 ASSERT3U(mc
->mc_groups
, >, 0);
452 if (mg
->mg_initialized
)
453 mg
->mg_no_free_space
= B_FALSE
;
456 * A metaslab group is considered allocatable if it has plenty
457 * of free space or is not heavily fragmented. We only take
458 * fragmentation into account if the metaslab group has a valid
459 * fragmentation metric (i.e. a value between 0 and 100).
461 mg
->mg_allocatable
= (mg
->mg_activation_count
> 0 &&
462 mg
->mg_free_capacity
> zfs_mg_noalloc_threshold
&&
463 (mg
->mg_fragmentation
== ZFS_FRAG_INVALID
||
464 mg
->mg_fragmentation
<= zfs_mg_fragmentation_threshold
));
467 * The mc_alloc_groups maintains a count of the number of
468 * groups in this metaslab class that are still above the
469 * zfs_mg_noalloc_threshold. This is used by the allocating
470 * threads to determine if they should avoid allocations to
471 * a given group. The allocator will avoid allocations to a group
472 * if that group has reached or is below the zfs_mg_noalloc_threshold
473 * and there are still other groups that are above the threshold.
474 * When a group transitions from allocatable to non-allocatable or
475 * vice versa we update the metaslab class to reflect that change.
476 * When the mc_alloc_groups value drops to 0 that means that all
477 * groups have reached the zfs_mg_noalloc_threshold making all groups
478 * eligible for allocations. This effectively means that all devices
479 * are balanced again.
481 if (was_allocatable
&& !mg
->mg_allocatable
)
482 mc
->mc_alloc_groups
--;
483 else if (!was_allocatable
&& mg
->mg_allocatable
)
484 mc
->mc_alloc_groups
++;
485 mutex_exit(&mc
->mc_lock
);
487 mutex_exit(&mg
->mg_lock
);
491 metaslab_group_create(metaslab_class_t
*mc
, vdev_t
*vd
)
493 metaslab_group_t
*mg
;
495 mg
= kmem_zalloc(sizeof (metaslab_group_t
), KM_SLEEP
);
496 mutex_init(&mg
->mg_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
497 avl_create(&mg
->mg_metaslab_tree
, metaslab_compare
,
498 sizeof (metaslab_t
), offsetof(struct metaslab
, ms_group_node
));
501 mg
->mg_activation_count
= 0;
502 mg
->mg_initialized
= B_FALSE
;
503 mg
->mg_no_free_space
= B_TRUE
;
504 refcount_create_tracked(&mg
->mg_alloc_queue_depth
);
506 mg
->mg_taskq
= taskq_create("metaslab_group_taskq", metaslab_load_pct
,
507 maxclsyspri
, 10, INT_MAX
, TASKQ_THREADS_CPU_PCT
| TASKQ_DYNAMIC
);
513 metaslab_group_destroy(metaslab_group_t
*mg
)
515 ASSERT(mg
->mg_prev
== NULL
);
516 ASSERT(mg
->mg_next
== NULL
);
518 * We may have gone below zero with the activation count
519 * either because we never activated in the first place or
520 * because we're done, and possibly removing the vdev.
522 ASSERT(mg
->mg_activation_count
<= 0);
524 taskq_destroy(mg
->mg_taskq
);
525 avl_destroy(&mg
->mg_metaslab_tree
);
526 mutex_destroy(&mg
->mg_lock
);
527 refcount_destroy(&mg
->mg_alloc_queue_depth
);
528 kmem_free(mg
, sizeof (metaslab_group_t
));
532 metaslab_group_activate(metaslab_group_t
*mg
)
534 metaslab_class_t
*mc
= mg
->mg_class
;
535 metaslab_group_t
*mgprev
, *mgnext
;
537 ASSERT(spa_config_held(mc
->mc_spa
, SCL_ALLOC
, RW_WRITER
));
539 ASSERT(mc
->mc_rotor
!= mg
);
540 ASSERT(mg
->mg_prev
== NULL
);
541 ASSERT(mg
->mg_next
== NULL
);
542 ASSERT(mg
->mg_activation_count
<= 0);
544 if (++mg
->mg_activation_count
<= 0)
547 mg
->mg_aliquot
= metaslab_aliquot
* MAX(1, mg
->mg_vd
->vdev_children
);
548 metaslab_group_alloc_update(mg
);
550 if ((mgprev
= mc
->mc_rotor
) == NULL
) {
554 mgnext
= mgprev
->mg_next
;
555 mg
->mg_prev
= mgprev
;
556 mg
->mg_next
= mgnext
;
557 mgprev
->mg_next
= mg
;
558 mgnext
->mg_prev
= mg
;
564 metaslab_group_passivate(metaslab_group_t
*mg
)
566 metaslab_class_t
*mc
= mg
->mg_class
;
567 metaslab_group_t
*mgprev
, *mgnext
;
569 ASSERT(spa_config_held(mc
->mc_spa
, SCL_ALLOC
, RW_WRITER
));
571 if (--mg
->mg_activation_count
!= 0) {
572 ASSERT(mc
->mc_rotor
!= mg
);
573 ASSERT(mg
->mg_prev
== NULL
);
574 ASSERT(mg
->mg_next
== NULL
);
575 ASSERT(mg
->mg_activation_count
< 0);
579 taskq_wait_outstanding(mg
->mg_taskq
, 0);
580 metaslab_group_alloc_update(mg
);
582 mgprev
= mg
->mg_prev
;
583 mgnext
= mg
->mg_next
;
588 mc
->mc_rotor
= mgnext
;
589 mgprev
->mg_next
= mgnext
;
590 mgnext
->mg_prev
= mgprev
;
598 metaslab_group_initialized(metaslab_group_t
*mg
)
600 vdev_t
*vd
= mg
->mg_vd
;
601 vdev_stat_t
*vs
= &vd
->vdev_stat
;
603 return (vs
->vs_space
!= 0 && mg
->mg_activation_count
> 0);
607 metaslab_group_get_space(metaslab_group_t
*mg
)
609 return ((1ULL << mg
->mg_vd
->vdev_ms_shift
) * mg
->mg_vd
->vdev_ms_count
);
613 metaslab_group_histogram_verify(metaslab_group_t
*mg
)
616 vdev_t
*vd
= mg
->mg_vd
;
617 uint64_t ashift
= vd
->vdev_ashift
;
620 if ((zfs_flags
& ZFS_DEBUG_HISTOGRAM_VERIFY
) == 0)
623 mg_hist
= kmem_zalloc(sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE
,
626 ASSERT3U(RANGE_TREE_HISTOGRAM_SIZE
, >=,
627 SPACE_MAP_HISTOGRAM_SIZE
+ ashift
);
629 for (m
= 0; m
< vd
->vdev_ms_count
; m
++) {
630 metaslab_t
*msp
= vd
->vdev_ms
[m
];
632 if (msp
->ms_sm
== NULL
)
635 for (i
= 0; i
< SPACE_MAP_HISTOGRAM_SIZE
; i
++)
636 mg_hist
[i
+ ashift
] +=
637 msp
->ms_sm
->sm_phys
->smp_histogram
[i
];
640 for (i
= 0; i
< RANGE_TREE_HISTOGRAM_SIZE
; i
++)
641 VERIFY3U(mg_hist
[i
], ==, mg
->mg_histogram
[i
]);
643 kmem_free(mg_hist
, sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE
);
647 metaslab_group_histogram_add(metaslab_group_t
*mg
, metaslab_t
*msp
)
649 metaslab_class_t
*mc
= mg
->mg_class
;
650 uint64_t ashift
= mg
->mg_vd
->vdev_ashift
;
653 ASSERT(MUTEX_HELD(&msp
->ms_lock
));
654 if (msp
->ms_sm
== NULL
)
657 mutex_enter(&mg
->mg_lock
);
658 for (i
= 0; i
< SPACE_MAP_HISTOGRAM_SIZE
; i
++) {
659 mg
->mg_histogram
[i
+ ashift
] +=
660 msp
->ms_sm
->sm_phys
->smp_histogram
[i
];
661 mc
->mc_histogram
[i
+ ashift
] +=
662 msp
->ms_sm
->sm_phys
->smp_histogram
[i
];
664 mutex_exit(&mg
->mg_lock
);
668 metaslab_group_histogram_remove(metaslab_group_t
*mg
, metaslab_t
*msp
)
670 metaslab_class_t
*mc
= mg
->mg_class
;
671 uint64_t ashift
= mg
->mg_vd
->vdev_ashift
;
674 ASSERT(MUTEX_HELD(&msp
->ms_lock
));
675 if (msp
->ms_sm
== NULL
)
678 mutex_enter(&mg
->mg_lock
);
679 for (i
= 0; i
< SPACE_MAP_HISTOGRAM_SIZE
; i
++) {
680 ASSERT3U(mg
->mg_histogram
[i
+ ashift
], >=,
681 msp
->ms_sm
->sm_phys
->smp_histogram
[i
]);
682 ASSERT3U(mc
->mc_histogram
[i
+ ashift
], >=,
683 msp
->ms_sm
->sm_phys
->smp_histogram
[i
]);
685 mg
->mg_histogram
[i
+ ashift
] -=
686 msp
->ms_sm
->sm_phys
->smp_histogram
[i
];
687 mc
->mc_histogram
[i
+ ashift
] -=
688 msp
->ms_sm
->sm_phys
->smp_histogram
[i
];
690 mutex_exit(&mg
->mg_lock
);
694 metaslab_group_add(metaslab_group_t
*mg
, metaslab_t
*msp
)
696 ASSERT(msp
->ms_group
== NULL
);
697 mutex_enter(&mg
->mg_lock
);
700 avl_add(&mg
->mg_metaslab_tree
, msp
);
701 mutex_exit(&mg
->mg_lock
);
703 mutex_enter(&msp
->ms_lock
);
704 metaslab_group_histogram_add(mg
, msp
);
705 mutex_exit(&msp
->ms_lock
);
709 metaslab_group_remove(metaslab_group_t
*mg
, metaslab_t
*msp
)
711 mutex_enter(&msp
->ms_lock
);
712 metaslab_group_histogram_remove(mg
, msp
);
713 mutex_exit(&msp
->ms_lock
);
715 mutex_enter(&mg
->mg_lock
);
716 ASSERT(msp
->ms_group
== mg
);
717 avl_remove(&mg
->mg_metaslab_tree
, msp
);
718 msp
->ms_group
= NULL
;
719 mutex_exit(&mg
->mg_lock
);
723 metaslab_group_sort(metaslab_group_t
*mg
, metaslab_t
*msp
, uint64_t weight
)
726 * Although in principle the weight can be any value, in
727 * practice we do not use values in the range [1, 511].
729 ASSERT(weight
>= SPA_MINBLOCKSIZE
|| weight
== 0);
730 ASSERT(MUTEX_HELD(&msp
->ms_lock
));
732 mutex_enter(&mg
->mg_lock
);
733 ASSERT(msp
->ms_group
== mg
);
734 avl_remove(&mg
->mg_metaslab_tree
, msp
);
735 msp
->ms_weight
= weight
;
736 avl_add(&mg
->mg_metaslab_tree
, msp
);
737 mutex_exit(&mg
->mg_lock
);
741 * Calculate the fragmentation for a given metaslab group. We can use
742 * a simple average here since all metaslabs within the group must have
743 * the same size. The return value will be a value between 0 and 100
744 * (inclusive), or ZFS_FRAG_INVALID if less than half of the metaslab in this
745 * group have a fragmentation metric.
748 metaslab_group_fragmentation(metaslab_group_t
*mg
)
750 vdev_t
*vd
= mg
->mg_vd
;
751 uint64_t fragmentation
= 0;
752 uint64_t valid_ms
= 0;
755 for (m
= 0; m
< vd
->vdev_ms_count
; m
++) {
756 metaslab_t
*msp
= vd
->vdev_ms
[m
];
758 if (msp
->ms_fragmentation
== ZFS_FRAG_INVALID
)
762 fragmentation
+= msp
->ms_fragmentation
;
765 if (valid_ms
<= vd
->vdev_ms_count
/ 2)
766 return (ZFS_FRAG_INVALID
);
768 fragmentation
/= valid_ms
;
769 ASSERT3U(fragmentation
, <=, 100);
770 return (fragmentation
);
774 * Determine if a given metaslab group should skip allocations. A metaslab
775 * group should avoid allocations if its free capacity is less than the
776 * zfs_mg_noalloc_threshold or its fragmentation metric is greater than
777 * zfs_mg_fragmentation_threshold and there is at least one metaslab group
778 * that can still handle allocations. If the allocation throttle is enabled
779 * then we skip allocations to devices that have reached their maximum
780 * allocation queue depth unless the selected metaslab group is the only
781 * eligible group remaining.
784 metaslab_group_allocatable(metaslab_group_t
*mg
, metaslab_group_t
*rotor
,
787 spa_t
*spa
= mg
->mg_vd
->vdev_spa
;
788 metaslab_class_t
*mc
= mg
->mg_class
;
791 * We can only consider skipping this metaslab group if it's
792 * in the normal metaslab class and there are other metaslab
793 * groups to select from. Otherwise, we always consider it eligible
796 if (mc
!= spa_normal_class(spa
) || mc
->mc_groups
<= 1)
800 * If the metaslab group's mg_allocatable flag is set (see comments
801 * in metaslab_group_alloc_update() for more information) and
802 * the allocation throttle is disabled then allow allocations to this
803 * device. However, if the allocation throttle is enabled then
804 * check if we have reached our allocation limit (mg_alloc_queue_depth)
805 * to determine if we should allow allocations to this metaslab group.
806 * If all metaslab groups are no longer considered allocatable
807 * (mc_alloc_groups == 0) or we're trying to allocate the smallest
808 * gang block size then we allow allocations on this metaslab group
809 * regardless of the mg_allocatable or throttle settings.
811 if (mg
->mg_allocatable
) {
812 metaslab_group_t
*mgp
;
814 uint64_t qmax
= mg
->mg_max_alloc_queue_depth
;
816 if (!mc
->mc_alloc_throttle_enabled
)
820 * If this metaslab group does not have any free space, then
821 * there is no point in looking further.
823 if (mg
->mg_no_free_space
)
826 qdepth
= refcount_count(&mg
->mg_alloc_queue_depth
);
829 * If this metaslab group is below its qmax or it's
830 * the only allocatable metasable group, then attempt
831 * to allocate from it.
833 if (qdepth
< qmax
|| mc
->mc_alloc_groups
== 1)
835 ASSERT3U(mc
->mc_alloc_groups
, >, 1);
838 * Since this metaslab group is at or over its qmax, we
839 * need to determine if there are metaslab groups after this
840 * one that might be able to handle this allocation. This is
841 * racy since we can't hold the locks for all metaslab
842 * groups at the same time when we make this check.
844 for (mgp
= mg
->mg_next
; mgp
!= rotor
; mgp
= mgp
->mg_next
) {
845 qmax
= mgp
->mg_max_alloc_queue_depth
;
847 qdepth
= refcount_count(&mgp
->mg_alloc_queue_depth
);
850 * If there is another metaslab group that
851 * might be able to handle the allocation, then
852 * we return false so that we skip this group.
854 if (qdepth
< qmax
&& !mgp
->mg_no_free_space
)
859 * We didn't find another group to handle the allocation
860 * so we can't skip this metaslab group even though
861 * we are at or over our qmax.
865 } else if (mc
->mc_alloc_groups
== 0 || psize
== SPA_MINBLOCKSIZE
) {
872 * ==========================================================================
873 * Range tree callbacks
874 * ==========================================================================
878 * Comparison function for the private size-ordered tree. Tree is sorted
879 * by size, larger sizes at the end of the tree.
882 metaslab_rangesize_compare(const void *x1
, const void *x2
)
884 const range_seg_t
*r1
= x1
;
885 const range_seg_t
*r2
= x2
;
886 uint64_t rs_size1
= r1
->rs_end
- r1
->rs_start
;
887 uint64_t rs_size2
= r2
->rs_end
- r2
->rs_start
;
889 int cmp
= AVL_CMP(rs_size1
, rs_size2
);
893 return (AVL_CMP(r1
->rs_start
, r2
->rs_start
));
897 * Create any block allocator specific components. The current allocators
898 * rely on using both a size-ordered range_tree_t and an array of uint64_t's.
901 metaslab_rt_create(range_tree_t
*rt
, void *arg
)
903 metaslab_t
*msp
= arg
;
905 ASSERT3P(rt
->rt_arg
, ==, msp
);
906 ASSERT(msp
->ms_tree
== NULL
);
908 avl_create(&msp
->ms_size_tree
, metaslab_rangesize_compare
,
909 sizeof (range_seg_t
), offsetof(range_seg_t
, rs_pp_node
));
913 * Destroy the block allocator specific components.
916 metaslab_rt_destroy(range_tree_t
*rt
, void *arg
)
918 metaslab_t
*msp
= arg
;
920 ASSERT3P(rt
->rt_arg
, ==, msp
);
921 ASSERT3P(msp
->ms_tree
, ==, rt
);
922 ASSERT0(avl_numnodes(&msp
->ms_size_tree
));
924 avl_destroy(&msp
->ms_size_tree
);
928 metaslab_rt_add(range_tree_t
*rt
, range_seg_t
*rs
, void *arg
)
930 metaslab_t
*msp
= arg
;
932 ASSERT3P(rt
->rt_arg
, ==, msp
);
933 ASSERT3P(msp
->ms_tree
, ==, rt
);
934 VERIFY(!msp
->ms_condensing
);
935 avl_add(&msp
->ms_size_tree
, rs
);
939 metaslab_rt_remove(range_tree_t
*rt
, range_seg_t
*rs
, void *arg
)
941 metaslab_t
*msp
= arg
;
943 ASSERT3P(rt
->rt_arg
, ==, msp
);
944 ASSERT3P(msp
->ms_tree
, ==, rt
);
945 VERIFY(!msp
->ms_condensing
);
946 avl_remove(&msp
->ms_size_tree
, rs
);
950 metaslab_rt_vacate(range_tree_t
*rt
, void *arg
)
952 metaslab_t
*msp
= arg
;
954 ASSERT3P(rt
->rt_arg
, ==, msp
);
955 ASSERT3P(msp
->ms_tree
, ==, rt
);
958 * Normally one would walk the tree freeing nodes along the way.
959 * Since the nodes are shared with the range trees we can avoid
960 * walking all nodes and just reinitialize the avl tree. The nodes
961 * will be freed by the range tree, so we don't want to free them here.
963 avl_create(&msp
->ms_size_tree
, metaslab_rangesize_compare
,
964 sizeof (range_seg_t
), offsetof(range_seg_t
, rs_pp_node
));
967 static range_tree_ops_t metaslab_rt_ops
= {
976 * ==========================================================================
977 * Metaslab block operations
978 * ==========================================================================
982 * Return the maximum contiguous segment within the metaslab.
985 metaslab_block_maxsize(metaslab_t
*msp
)
987 avl_tree_t
*t
= &msp
->ms_size_tree
;
990 if (t
== NULL
|| (rs
= avl_last(t
)) == NULL
)
993 return (rs
->rs_end
- rs
->rs_start
);
997 metaslab_block_alloc(metaslab_t
*msp
, uint64_t size
)
1000 range_tree_t
*rt
= msp
->ms_tree
;
1002 VERIFY(!msp
->ms_condensing
);
1004 start
= msp
->ms_ops
->msop_alloc(msp
, size
);
1005 if (start
!= -1ULL) {
1006 vdev_t
*vd
= msp
->ms_group
->mg_vd
;
1008 VERIFY0(P2PHASE(start
, 1ULL << vd
->vdev_ashift
));
1009 VERIFY0(P2PHASE(size
, 1ULL << vd
->vdev_ashift
));
1010 VERIFY3U(range_tree_space(rt
) - size
, <=, msp
->ms_size
);
1011 range_tree_remove(rt
, start
, size
);
1017 * ==========================================================================
1018 * Common allocator routines
1019 * ==========================================================================
1022 #if defined(WITH_FF_BLOCK_ALLOCATOR) || \
1023 defined(WITH_DF_BLOCK_ALLOCATOR) || \
1024 defined(WITH_CF_BLOCK_ALLOCATOR)
1026 * This is a helper function that can be used by the allocator to find
1027 * a suitable block to allocate. This will search the specified AVL
1028 * tree looking for a block that matches the specified criteria.
1031 metaslab_block_picker(avl_tree_t
*t
, uint64_t *cursor
, uint64_t size
,
1034 range_seg_t
*rs
, rsearch
;
1037 rsearch
.rs_start
= *cursor
;
1038 rsearch
.rs_end
= *cursor
+ size
;
1040 rs
= avl_find(t
, &rsearch
, &where
);
1042 rs
= avl_nearest(t
, where
, AVL_AFTER
);
1044 while (rs
!= NULL
) {
1045 uint64_t offset
= P2ROUNDUP(rs
->rs_start
, align
);
1047 if (offset
+ size
<= rs
->rs_end
) {
1048 *cursor
= offset
+ size
;
1051 rs
= AVL_NEXT(t
, rs
);
1055 * If we know we've searched the whole map (*cursor == 0), give up.
1056 * Otherwise, reset the cursor to the beginning and try again.
1062 return (metaslab_block_picker(t
, cursor
, size
, align
));
1064 #endif /* WITH_FF/DF/CF_BLOCK_ALLOCATOR */
1066 #if defined(WITH_FF_BLOCK_ALLOCATOR)
1068 * ==========================================================================
1069 * The first-fit block allocator
1070 * ==========================================================================
1073 metaslab_ff_alloc(metaslab_t
*msp
, uint64_t size
)
1076 * Find the largest power of 2 block size that evenly divides the
1077 * requested size. This is used to try to allocate blocks with similar
1078 * alignment from the same area of the metaslab (i.e. same cursor
1079 * bucket) but it does not guarantee that other allocations sizes
1080 * may exist in the same region.
1082 uint64_t align
= size
& -size
;
1083 uint64_t *cursor
= &msp
->ms_lbas
[highbit64(align
) - 1];
1084 avl_tree_t
*t
= &msp
->ms_tree
->rt_root
;
1086 return (metaslab_block_picker(t
, cursor
, size
, align
));
1089 static metaslab_ops_t metaslab_ff_ops
= {
1093 metaslab_ops_t
*zfs_metaslab_ops
= &metaslab_ff_ops
;
1094 #endif /* WITH_FF_BLOCK_ALLOCATOR */
1096 #if defined(WITH_DF_BLOCK_ALLOCATOR)
1098 * ==========================================================================
1099 * Dynamic block allocator -
1100 * Uses the first fit allocation scheme until space get low and then
1101 * adjusts to a best fit allocation method. Uses metaslab_df_alloc_threshold
1102 * and metaslab_df_free_pct to determine when to switch the allocation scheme.
1103 * ==========================================================================
1106 metaslab_df_alloc(metaslab_t
*msp
, uint64_t size
)
1109 * Find the largest power of 2 block size that evenly divides the
1110 * requested size. This is used to try to allocate blocks with similar
1111 * alignment from the same area of the metaslab (i.e. same cursor
1112 * bucket) but it does not guarantee that other allocations sizes
1113 * may exist in the same region.
1115 uint64_t align
= size
& -size
;
1116 uint64_t *cursor
= &msp
->ms_lbas
[highbit64(align
) - 1];
1117 range_tree_t
*rt
= msp
->ms_tree
;
1118 avl_tree_t
*t
= &rt
->rt_root
;
1119 uint64_t max_size
= metaslab_block_maxsize(msp
);
1120 int free_pct
= range_tree_space(rt
) * 100 / msp
->ms_size
;
1122 ASSERT(MUTEX_HELD(&msp
->ms_lock
));
1123 ASSERT3U(avl_numnodes(t
), ==, avl_numnodes(&msp
->ms_size_tree
));
1125 if (max_size
< size
)
1129 * If we're running low on space switch to using the size
1130 * sorted AVL tree (best-fit).
1132 if (max_size
< metaslab_df_alloc_threshold
||
1133 free_pct
< metaslab_df_free_pct
) {
1134 t
= &msp
->ms_size_tree
;
1138 return (metaslab_block_picker(t
, cursor
, size
, 1ULL));
1141 static metaslab_ops_t metaslab_df_ops
= {
1145 metaslab_ops_t
*zfs_metaslab_ops
= &metaslab_df_ops
;
1146 #endif /* WITH_DF_BLOCK_ALLOCATOR */
1148 #if defined(WITH_CF_BLOCK_ALLOCATOR)
1150 * ==========================================================================
1151 * Cursor fit block allocator -
1152 * Select the largest region in the metaslab, set the cursor to the beginning
1153 * of the range and the cursor_end to the end of the range. As allocations
1154 * are made advance the cursor. Continue allocating from the cursor until
1155 * the range is exhausted and then find a new range.
1156 * ==========================================================================
1159 metaslab_cf_alloc(metaslab_t
*msp
, uint64_t size
)
1161 range_tree_t
*rt
= msp
->ms_tree
;
1162 avl_tree_t
*t
= &msp
->ms_size_tree
;
1163 uint64_t *cursor
= &msp
->ms_lbas
[0];
1164 uint64_t *cursor_end
= &msp
->ms_lbas
[1];
1165 uint64_t offset
= 0;
1167 ASSERT(MUTEX_HELD(&msp
->ms_lock
));
1168 ASSERT3U(avl_numnodes(t
), ==, avl_numnodes(&rt
->rt_root
));
1170 ASSERT3U(*cursor_end
, >=, *cursor
);
1172 if ((*cursor
+ size
) > *cursor_end
) {
1175 rs
= avl_last(&msp
->ms_size_tree
);
1176 if (rs
== NULL
|| (rs
->rs_end
- rs
->rs_start
) < size
)
1179 *cursor
= rs
->rs_start
;
1180 *cursor_end
= rs
->rs_end
;
1189 static metaslab_ops_t metaslab_cf_ops
= {
1193 metaslab_ops_t
*zfs_metaslab_ops
= &metaslab_cf_ops
;
1194 #endif /* WITH_CF_BLOCK_ALLOCATOR */
1196 #if defined(WITH_NDF_BLOCK_ALLOCATOR)
1198 * ==========================================================================
1199 * New dynamic fit allocator -
1200 * Select a region that is large enough to allocate 2^metaslab_ndf_clump_shift
1201 * contiguous blocks. If no region is found then just use the largest segment
1203 * ==========================================================================
1207 * Determines desired number of contiguous blocks (2^metaslab_ndf_clump_shift)
1208 * to request from the allocator.
1210 uint64_t metaslab_ndf_clump_shift
= 4;
1213 metaslab_ndf_alloc(metaslab_t
*msp
, uint64_t size
)
1215 avl_tree_t
*t
= &msp
->ms_tree
->rt_root
;
1217 range_seg_t
*rs
, rsearch
;
1218 uint64_t hbit
= highbit64(size
);
1219 uint64_t *cursor
= &msp
->ms_lbas
[hbit
- 1];
1220 uint64_t max_size
= metaslab_block_maxsize(msp
);
1222 ASSERT(MUTEX_HELD(&msp
->ms_lock
));
1223 ASSERT3U(avl_numnodes(t
), ==, avl_numnodes(&msp
->ms_size_tree
));
1225 if (max_size
< size
)
1228 rsearch
.rs_start
= *cursor
;
1229 rsearch
.rs_end
= *cursor
+ size
;
1231 rs
= avl_find(t
, &rsearch
, &where
);
1232 if (rs
== NULL
|| (rs
->rs_end
- rs
->rs_start
) < size
) {
1233 t
= &msp
->ms_size_tree
;
1235 rsearch
.rs_start
= 0;
1236 rsearch
.rs_end
= MIN(max_size
,
1237 1ULL << (hbit
+ metaslab_ndf_clump_shift
));
1238 rs
= avl_find(t
, &rsearch
, &where
);
1240 rs
= avl_nearest(t
, where
, AVL_AFTER
);
1244 if ((rs
->rs_end
- rs
->rs_start
) >= size
) {
1245 *cursor
= rs
->rs_start
+ size
;
1246 return (rs
->rs_start
);
1251 static metaslab_ops_t metaslab_ndf_ops
= {
1255 metaslab_ops_t
*zfs_metaslab_ops
= &metaslab_ndf_ops
;
1256 #endif /* WITH_NDF_BLOCK_ALLOCATOR */
1260 * ==========================================================================
1262 * ==========================================================================
1266 * Wait for any in-progress metaslab loads to complete.
1269 metaslab_load_wait(metaslab_t
*msp
)
1271 ASSERT(MUTEX_HELD(&msp
->ms_lock
));
1273 while (msp
->ms_loading
) {
1274 ASSERT(!msp
->ms_loaded
);
1275 cv_wait(&msp
->ms_load_cv
, &msp
->ms_lock
);
1280 metaslab_load(metaslab_t
*msp
)
1285 ASSERT(MUTEX_HELD(&msp
->ms_lock
));
1286 ASSERT(!msp
->ms_loaded
);
1287 ASSERT(!msp
->ms_loading
);
1289 msp
->ms_loading
= B_TRUE
;
1292 * If the space map has not been allocated yet, then treat
1293 * all the space in the metaslab as free and add it to the
1296 if (msp
->ms_sm
!= NULL
)
1297 error
= space_map_load(msp
->ms_sm
, msp
->ms_tree
, SM_FREE
);
1299 range_tree_add(msp
->ms_tree
, msp
->ms_start
, msp
->ms_size
);
1301 msp
->ms_loaded
= (error
== 0);
1302 msp
->ms_loading
= B_FALSE
;
1304 if (msp
->ms_loaded
) {
1305 for (t
= 0; t
< TXG_DEFER_SIZE
; t
++) {
1306 range_tree_walk(msp
->ms_defertree
[t
],
1307 range_tree_remove
, msp
->ms_tree
);
1310 cv_broadcast(&msp
->ms_load_cv
);
1315 metaslab_unload(metaslab_t
*msp
)
1317 ASSERT(MUTEX_HELD(&msp
->ms_lock
));
1318 range_tree_vacate(msp
->ms_tree
, NULL
, NULL
);
1319 msp
->ms_loaded
= B_FALSE
;
1320 msp
->ms_weight
&= ~METASLAB_ACTIVE_MASK
;
1324 metaslab_init(metaslab_group_t
*mg
, uint64_t id
, uint64_t object
, uint64_t txg
,
1327 vdev_t
*vd
= mg
->mg_vd
;
1328 objset_t
*mos
= vd
->vdev_spa
->spa_meta_objset
;
1332 ms
= kmem_zalloc(sizeof (metaslab_t
), KM_SLEEP
);
1333 mutex_init(&ms
->ms_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
1334 cv_init(&ms
->ms_load_cv
, NULL
, CV_DEFAULT
, NULL
);
1336 ms
->ms_start
= id
<< vd
->vdev_ms_shift
;
1337 ms
->ms_size
= 1ULL << vd
->vdev_ms_shift
;
1340 * We only open space map objects that already exist. All others
1341 * will be opened when we finally allocate an object for it.
1344 error
= space_map_open(&ms
->ms_sm
, mos
, object
, ms
->ms_start
,
1345 ms
->ms_size
, vd
->vdev_ashift
, &ms
->ms_lock
);
1348 kmem_free(ms
, sizeof (metaslab_t
));
1352 ASSERT(ms
->ms_sm
!= NULL
);
1356 * We create the main range tree here, but we don't create the
1357 * alloctree and freetree until metaslab_sync_done(). This serves
1358 * two purposes: it allows metaslab_sync_done() to detect the
1359 * addition of new space; and for debugging, it ensures that we'd
1360 * data fault on any attempt to use this metaslab before it's ready.
1362 ms
->ms_tree
= range_tree_create(&metaslab_rt_ops
, ms
, &ms
->ms_lock
);
1363 metaslab_group_add(mg
, ms
);
1365 ms
->ms_fragmentation
= metaslab_fragmentation(ms
);
1366 ms
->ms_ops
= mg
->mg_class
->mc_ops
;
1369 * If we're opening an existing pool (txg == 0) or creating
1370 * a new one (txg == TXG_INITIAL), all space is available now.
1371 * If we're adding space to an existing pool, the new space
1372 * does not become available until after this txg has synced.
1374 if (txg
<= TXG_INITIAL
)
1375 metaslab_sync_done(ms
, 0);
1378 * If metaslab_debug_load is set and we're initializing a metaslab
1379 * that has an allocated space_map object then load the its space
1380 * map so that can verify frees.
1382 if (metaslab_debug_load
&& ms
->ms_sm
!= NULL
) {
1383 mutex_enter(&ms
->ms_lock
);
1384 VERIFY0(metaslab_load(ms
));
1385 mutex_exit(&ms
->ms_lock
);
1389 vdev_dirty(vd
, 0, NULL
, txg
);
1390 vdev_dirty(vd
, VDD_METASLAB
, ms
, txg
);
1399 metaslab_fini(metaslab_t
*msp
)
1403 metaslab_group_t
*mg
= msp
->ms_group
;
1405 metaslab_group_remove(mg
, msp
);
1407 mutex_enter(&msp
->ms_lock
);
1409 VERIFY(msp
->ms_group
== NULL
);
1410 vdev_space_update(mg
->mg_vd
, -space_map_allocated(msp
->ms_sm
),
1412 space_map_close(msp
->ms_sm
);
1414 metaslab_unload(msp
);
1415 range_tree_destroy(msp
->ms_tree
);
1417 for (t
= 0; t
< TXG_SIZE
; t
++) {
1418 range_tree_destroy(msp
->ms_alloctree
[t
]);
1419 range_tree_destroy(msp
->ms_freetree
[t
]);
1422 for (t
= 0; t
< TXG_DEFER_SIZE
; t
++) {
1423 range_tree_destroy(msp
->ms_defertree
[t
]);
1426 ASSERT0(msp
->ms_deferspace
);
1428 mutex_exit(&msp
->ms_lock
);
1429 cv_destroy(&msp
->ms_load_cv
);
1430 mutex_destroy(&msp
->ms_lock
);
1432 kmem_free(msp
, sizeof (metaslab_t
));
1435 #define FRAGMENTATION_TABLE_SIZE 17
1438 * This table defines a segment size based fragmentation metric that will
1439 * allow each metaslab to derive its own fragmentation value. This is done
1440 * by calculating the space in each bucket of the spacemap histogram and
1441 * multiplying that by the fragmetation metric in this table. Doing
1442 * this for all buckets and dividing it by the total amount of free
1443 * space in this metaslab (i.e. the total free space in all buckets) gives
1444 * us the fragmentation metric. This means that a high fragmentation metric
1445 * equates to most of the free space being comprised of small segments.
1446 * Conversely, if the metric is low, then most of the free space is in
1447 * large segments. A 10% change in fragmentation equates to approximately
1448 * double the number of segments.
1450 * This table defines 0% fragmented space using 16MB segments. Testing has
1451 * shown that segments that are greater than or equal to 16MB do not suffer
1452 * from drastic performance problems. Using this value, we derive the rest
1453 * of the table. Since the fragmentation value is never stored on disk, it
1454 * is possible to change these calculations in the future.
1456 int zfs_frag_table
[FRAGMENTATION_TABLE_SIZE
] = {
1476 * Calclate the metaslab's fragmentation metric. A return value
1477 * of ZFS_FRAG_INVALID means that the metaslab has not been upgraded and does
1478 * not support this metric. Otherwise, the return value should be in the
1482 metaslab_fragmentation(metaslab_t
*msp
)
1484 spa_t
*spa
= msp
->ms_group
->mg_vd
->vdev_spa
;
1485 uint64_t fragmentation
= 0;
1487 boolean_t feature_enabled
= spa_feature_is_enabled(spa
,
1488 SPA_FEATURE_SPACEMAP_HISTOGRAM
);
1491 if (!feature_enabled
)
1492 return (ZFS_FRAG_INVALID
);
1495 * A null space map means that the entire metaslab is free
1496 * and thus is not fragmented.
1498 if (msp
->ms_sm
== NULL
)
1502 * If this metaslab's space_map has not been upgraded, flag it
1503 * so that we upgrade next time we encounter it.
1505 if (msp
->ms_sm
->sm_dbuf
->db_size
!= sizeof (space_map_phys_t
)) {
1506 vdev_t
*vd
= msp
->ms_group
->mg_vd
;
1508 if (spa_writeable(vd
->vdev_spa
)) {
1509 uint64_t txg
= spa_syncing_txg(spa
);
1511 msp
->ms_condense_wanted
= B_TRUE
;
1512 vdev_dirty(vd
, VDD_METASLAB
, msp
, txg
+ 1);
1513 spa_dbgmsg(spa
, "txg %llu, requesting force condense: "
1514 "msp %p, vd %p", txg
, msp
, vd
);
1516 return (ZFS_FRAG_INVALID
);
1519 for (i
= 0; i
< SPACE_MAP_HISTOGRAM_SIZE
; i
++) {
1521 uint8_t shift
= msp
->ms_sm
->sm_shift
;
1522 int idx
= MIN(shift
- SPA_MINBLOCKSHIFT
+ i
,
1523 FRAGMENTATION_TABLE_SIZE
- 1);
1525 if (msp
->ms_sm
->sm_phys
->smp_histogram
[i
] == 0)
1528 space
= msp
->ms_sm
->sm_phys
->smp_histogram
[i
] << (i
+ shift
);
1531 ASSERT3U(idx
, <, FRAGMENTATION_TABLE_SIZE
);
1532 fragmentation
+= space
* zfs_frag_table
[idx
];
1536 fragmentation
/= total
;
1537 ASSERT3U(fragmentation
, <=, 100);
1538 return (fragmentation
);
1542 * Compute a weight -- a selection preference value -- for the given metaslab.
1543 * This is based on the amount of free space, the level of fragmentation,
1544 * the LBA range, and whether the metaslab is loaded.
1547 metaslab_weight(metaslab_t
*msp
)
1549 metaslab_group_t
*mg
= msp
->ms_group
;
1550 vdev_t
*vd
= mg
->mg_vd
;
1551 uint64_t weight
, space
;
1553 ASSERT(MUTEX_HELD(&msp
->ms_lock
));
1556 * This vdev is in the process of being removed so there is nothing
1557 * for us to do here.
1559 if (vd
->vdev_removing
) {
1560 ASSERT0(space_map_allocated(msp
->ms_sm
));
1561 ASSERT0(vd
->vdev_ms_shift
);
1566 * The baseline weight is the metaslab's free space.
1568 space
= msp
->ms_size
- space_map_allocated(msp
->ms_sm
);
1570 msp
->ms_fragmentation
= metaslab_fragmentation(msp
);
1571 if (metaslab_fragmentation_factor_enabled
&&
1572 msp
->ms_fragmentation
!= ZFS_FRAG_INVALID
) {
1574 * Use the fragmentation information to inversely scale
1575 * down the baseline weight. We need to ensure that we
1576 * don't exclude this metaslab completely when it's 100%
1577 * fragmented. To avoid this we reduce the fragmented value
1580 space
= (space
* (100 - (msp
->ms_fragmentation
- 1))) / 100;
1583 * If space < SPA_MINBLOCKSIZE, then we will not allocate from
1584 * this metaslab again. The fragmentation metric may have
1585 * decreased the space to something smaller than
1586 * SPA_MINBLOCKSIZE, so reset the space to SPA_MINBLOCKSIZE
1587 * so that we can consume any remaining space.
1589 if (space
> 0 && space
< SPA_MINBLOCKSIZE
)
1590 space
= SPA_MINBLOCKSIZE
;
1595 * Modern disks have uniform bit density and constant angular velocity.
1596 * Therefore, the outer recording zones are faster (higher bandwidth)
1597 * than the inner zones by the ratio of outer to inner track diameter,
1598 * which is typically around 2:1. We account for this by assigning
1599 * higher weight to lower metaslabs (multiplier ranging from 2x to 1x).
1600 * In effect, this means that we'll select the metaslab with the most
1601 * free bandwidth rather than simply the one with the most free space.
1603 if (!vd
->vdev_nonrot
&& metaslab_lba_weighting_enabled
) {
1604 weight
= 2 * weight
- (msp
->ms_id
* weight
) / vd
->vdev_ms_count
;
1605 ASSERT(weight
>= space
&& weight
<= 2 * space
);
1609 * If this metaslab is one we're actively using, adjust its
1610 * weight to make it preferable to any inactive metaslab so
1611 * we'll polish it off. If the fragmentation on this metaslab
1612 * has exceed our threshold, then don't mark it active.
1614 if (msp
->ms_loaded
&& msp
->ms_fragmentation
!= ZFS_FRAG_INVALID
&&
1615 msp
->ms_fragmentation
<= zfs_metaslab_fragmentation_threshold
) {
1616 weight
|= (msp
->ms_weight
& METASLAB_ACTIVE_MASK
);
1623 metaslab_activate(metaslab_t
*msp
, uint64_t activation_weight
)
1625 ASSERT(MUTEX_HELD(&msp
->ms_lock
));
1627 if ((msp
->ms_weight
& METASLAB_ACTIVE_MASK
) == 0) {
1628 metaslab_load_wait(msp
);
1629 if (!msp
->ms_loaded
) {
1630 int error
= metaslab_load(msp
);
1632 metaslab_group_sort(msp
->ms_group
, msp
, 0);
1637 metaslab_group_sort(msp
->ms_group
, msp
,
1638 msp
->ms_weight
| activation_weight
);
1640 ASSERT(msp
->ms_loaded
);
1641 ASSERT(msp
->ms_weight
& METASLAB_ACTIVE_MASK
);
1647 metaslab_passivate(metaslab_t
*msp
, uint64_t size
)
1650 * If size < SPA_MINBLOCKSIZE, then we will not allocate from
1651 * this metaslab again. In that case, it had better be empty,
1652 * or we would be leaving space on the table.
1654 ASSERT(size
>= SPA_MINBLOCKSIZE
|| range_tree_space(msp
->ms_tree
) == 0);
1655 metaslab_group_sort(msp
->ms_group
, msp
, MIN(msp
->ms_weight
, size
));
1656 ASSERT((msp
->ms_weight
& METASLAB_ACTIVE_MASK
) == 0);
1660 metaslab_preload(void *arg
)
1662 metaslab_t
*msp
= arg
;
1663 spa_t
*spa
= msp
->ms_group
->mg_vd
->vdev_spa
;
1664 fstrans_cookie_t cookie
= spl_fstrans_mark();
1666 ASSERT(!MUTEX_HELD(&msp
->ms_group
->mg_lock
));
1668 mutex_enter(&msp
->ms_lock
);
1669 metaslab_load_wait(msp
);
1670 if (!msp
->ms_loaded
)
1671 (void) metaslab_load(msp
);
1674 * Set the ms_access_txg value so that we don't unload it right away.
1676 msp
->ms_access_txg
= spa_syncing_txg(spa
) + metaslab_unload_delay
+ 1;
1677 mutex_exit(&msp
->ms_lock
);
1678 spl_fstrans_unmark(cookie
);
1682 metaslab_group_preload(metaslab_group_t
*mg
)
1684 spa_t
*spa
= mg
->mg_vd
->vdev_spa
;
1686 avl_tree_t
*t
= &mg
->mg_metaslab_tree
;
1689 if (spa_shutting_down(spa
) || !metaslab_preload_enabled
) {
1690 taskq_wait_outstanding(mg
->mg_taskq
, 0);
1694 mutex_enter(&mg
->mg_lock
);
1696 * Load the next potential metaslabs
1699 while (msp
!= NULL
) {
1700 metaslab_t
*msp_next
= AVL_NEXT(t
, msp
);
1703 * We preload only the maximum number of metaslabs specified
1704 * by metaslab_preload_limit. If a metaslab is being forced
1705 * to condense then we preload it too. This will ensure
1706 * that force condensing happens in the next txg.
1708 if (++m
> metaslab_preload_limit
&& !msp
->ms_condense_wanted
) {
1714 * We must drop the metaslab group lock here to preserve
1715 * lock ordering with the ms_lock (when grabbing both
1716 * the mg_lock and the ms_lock, the ms_lock must be taken
1717 * first). As a result, it is possible that the ordering
1718 * of the metaslabs within the avl tree may change before
1719 * we reacquire the lock. The metaslab cannot be removed from
1720 * the tree while we're in syncing context so it is safe to
1721 * drop the mg_lock here. If the metaslabs are reordered
1722 * nothing will break -- we just may end up loading a
1723 * less than optimal one.
1725 mutex_exit(&mg
->mg_lock
);
1726 VERIFY(taskq_dispatch(mg
->mg_taskq
, metaslab_preload
,
1727 msp
, TQ_SLEEP
) != TASKQID_INVALID
);
1728 mutex_enter(&mg
->mg_lock
);
1731 mutex_exit(&mg
->mg_lock
);
1735 * Determine if the space map's on-disk footprint is past our tolerance
1736 * for inefficiency. We would like to use the following criteria to make
1739 * 1. The size of the space map object should not dramatically increase as a
1740 * result of writing out the free space range tree.
1742 * 2. The minimal on-disk space map representation is zfs_condense_pct/100
1743 * times the size than the free space range tree representation
1744 * (i.e. zfs_condense_pct = 110 and in-core = 1MB, minimal = 1.1.MB).
1746 * 3. The on-disk size of the space map should actually decrease.
1748 * Checking the first condition is tricky since we don't want to walk
1749 * the entire AVL tree calculating the estimated on-disk size. Instead we
1750 * use the size-ordered range tree in the metaslab and calculate the
1751 * size required to write out the largest segment in our free tree. If the
1752 * size required to represent that segment on disk is larger than the space
1753 * map object then we avoid condensing this map.
1755 * To determine the second criterion we use a best-case estimate and assume
1756 * each segment can be represented on-disk as a single 64-bit entry. We refer
1757 * to this best-case estimate as the space map's minimal form.
1759 * Unfortunately, we cannot compute the on-disk size of the space map in this
1760 * context because we cannot accurately compute the effects of compression, etc.
1761 * Instead, we apply the heuristic described in the block comment for
1762 * zfs_metaslab_condense_block_threshold - we only condense if the space used
1763 * is greater than a threshold number of blocks.
1766 metaslab_should_condense(metaslab_t
*msp
)
1768 space_map_t
*sm
= msp
->ms_sm
;
1770 uint64_t size
, entries
, segsz
, object_size
, optimal_size
, record_size
;
1771 dmu_object_info_t doi
;
1772 uint64_t vdev_blocksize
= 1ULL << msp
->ms_group
->mg_vd
->vdev_ashift
;
1774 ASSERT(MUTEX_HELD(&msp
->ms_lock
));
1775 ASSERT(msp
->ms_loaded
);
1778 * Use the ms_size_tree range tree, which is ordered by size, to
1779 * obtain the largest segment in the free tree. We always condense
1780 * metaslabs that are empty and metaslabs for which a condense
1781 * request has been made.
1783 rs
= avl_last(&msp
->ms_size_tree
);
1784 if (rs
== NULL
|| msp
->ms_condense_wanted
)
1788 * Calculate the number of 64-bit entries this segment would
1789 * require when written to disk. If this single segment would be
1790 * larger on-disk than the entire current on-disk structure, then
1791 * clearly condensing will increase the on-disk structure size.
1793 size
= (rs
->rs_end
- rs
->rs_start
) >> sm
->sm_shift
;
1794 entries
= size
/ (MIN(size
, SM_RUN_MAX
));
1795 segsz
= entries
* sizeof (uint64_t);
1797 optimal_size
= sizeof (uint64_t) * avl_numnodes(&msp
->ms_tree
->rt_root
);
1798 object_size
= space_map_length(msp
->ms_sm
);
1800 dmu_object_info_from_db(sm
->sm_dbuf
, &doi
);
1801 record_size
= MAX(doi
.doi_data_block_size
, vdev_blocksize
);
1803 return (segsz
<= object_size
&&
1804 object_size
>= (optimal_size
* zfs_condense_pct
/ 100) &&
1805 object_size
> zfs_metaslab_condense_block_threshold
* record_size
);
1809 * Condense the on-disk space map representation to its minimized form.
1810 * The minimized form consists of a small number of allocations followed by
1811 * the entries of the free range tree.
1814 metaslab_condense(metaslab_t
*msp
, uint64_t txg
, dmu_tx_t
*tx
)
1816 spa_t
*spa
= msp
->ms_group
->mg_vd
->vdev_spa
;
1817 range_tree_t
*freetree
= msp
->ms_freetree
[txg
& TXG_MASK
];
1818 range_tree_t
*condense_tree
;
1819 space_map_t
*sm
= msp
->ms_sm
;
1822 ASSERT(MUTEX_HELD(&msp
->ms_lock
));
1823 ASSERT3U(spa_sync_pass(spa
), ==, 1);
1824 ASSERT(msp
->ms_loaded
);
1827 spa_dbgmsg(spa
, "condensing: txg %llu, msp[%llu] %p, vdev id %llu, "
1828 "spa %s, smp size %llu, segments %lu, forcing condense=%s", txg
,
1829 msp
->ms_id
, msp
, msp
->ms_group
->mg_vd
->vdev_id
,
1830 msp
->ms_group
->mg_vd
->vdev_spa
->spa_name
,
1831 space_map_length(msp
->ms_sm
), avl_numnodes(&msp
->ms_tree
->rt_root
),
1832 msp
->ms_condense_wanted
? "TRUE" : "FALSE");
1834 msp
->ms_condense_wanted
= B_FALSE
;
1837 * Create an range tree that is 100% allocated. We remove segments
1838 * that have been freed in this txg, any deferred frees that exist,
1839 * and any allocation in the future. Removing segments should be
1840 * a relatively inexpensive operation since we expect these trees to
1841 * have a small number of nodes.
1843 condense_tree
= range_tree_create(NULL
, NULL
, &msp
->ms_lock
);
1844 range_tree_add(condense_tree
, msp
->ms_start
, msp
->ms_size
);
1847 * Remove what's been freed in this txg from the condense_tree.
1848 * Since we're in sync_pass 1, we know that all the frees from
1849 * this txg are in the freetree.
1851 range_tree_walk(freetree
, range_tree_remove
, condense_tree
);
1853 for (t
= 0; t
< TXG_DEFER_SIZE
; t
++) {
1854 range_tree_walk(msp
->ms_defertree
[t
],
1855 range_tree_remove
, condense_tree
);
1858 for (t
= 1; t
< TXG_CONCURRENT_STATES
; t
++) {
1859 range_tree_walk(msp
->ms_alloctree
[(txg
+ t
) & TXG_MASK
],
1860 range_tree_remove
, condense_tree
);
1864 * We're about to drop the metaslab's lock thus allowing
1865 * other consumers to change it's content. Set the
1866 * metaslab's ms_condensing flag to ensure that
1867 * allocations on this metaslab do not occur while we're
1868 * in the middle of committing it to disk. This is only critical
1869 * for the ms_tree as all other range trees use per txg
1870 * views of their content.
1872 msp
->ms_condensing
= B_TRUE
;
1874 mutex_exit(&msp
->ms_lock
);
1875 space_map_truncate(sm
, tx
);
1876 mutex_enter(&msp
->ms_lock
);
1879 * While we would ideally like to create a space_map representation
1880 * that consists only of allocation records, doing so can be
1881 * prohibitively expensive because the in-core free tree can be
1882 * large, and therefore computationally expensive to subtract
1883 * from the condense_tree. Instead we sync out two trees, a cheap
1884 * allocation only tree followed by the in-core free tree. While not
1885 * optimal, this is typically close to optimal, and much cheaper to
1888 space_map_write(sm
, condense_tree
, SM_ALLOC
, tx
);
1889 range_tree_vacate(condense_tree
, NULL
, NULL
);
1890 range_tree_destroy(condense_tree
);
1892 space_map_write(sm
, msp
->ms_tree
, SM_FREE
, tx
);
1893 msp
->ms_condensing
= B_FALSE
;
1897 * Write a metaslab to disk in the context of the specified transaction group.
1900 metaslab_sync(metaslab_t
*msp
, uint64_t txg
)
1902 metaslab_group_t
*mg
= msp
->ms_group
;
1903 vdev_t
*vd
= mg
->mg_vd
;
1904 spa_t
*spa
= vd
->vdev_spa
;
1905 objset_t
*mos
= spa_meta_objset(spa
);
1906 range_tree_t
*alloctree
= msp
->ms_alloctree
[txg
& TXG_MASK
];
1907 range_tree_t
**freetree
= &msp
->ms_freetree
[txg
& TXG_MASK
];
1908 range_tree_t
**freed_tree
=
1909 &msp
->ms_freetree
[TXG_CLEAN(txg
) & TXG_MASK
];
1911 uint64_t object
= space_map_object(msp
->ms_sm
);
1913 ASSERT(!vd
->vdev_ishole
);
1916 * This metaslab has just been added so there's no work to do now.
1918 if (*freetree
== NULL
) {
1919 ASSERT3P(alloctree
, ==, NULL
);
1923 ASSERT3P(alloctree
, !=, NULL
);
1924 ASSERT3P(*freetree
, !=, NULL
);
1925 ASSERT3P(*freed_tree
, !=, NULL
);
1928 * Normally, we don't want to process a metaslab if there
1929 * are no allocations or frees to perform. However, if the metaslab
1930 * is being forced to condense we need to let it through.
1932 if (range_tree_space(alloctree
) == 0 &&
1933 range_tree_space(*freetree
) == 0 &&
1934 !msp
->ms_condense_wanted
)
1938 * The only state that can actually be changing concurrently with
1939 * metaslab_sync() is the metaslab's ms_tree. No other thread can
1940 * be modifying this txg's alloctree, freetree, freed_tree, or
1941 * space_map_phys_t. Therefore, we only hold ms_lock to satify
1942 * space_map ASSERTs. We drop it whenever we call into the DMU,
1943 * because the DMU can call down to us (e.g. via zio_free()) at
1947 tx
= dmu_tx_create_assigned(spa_get_dsl(spa
), txg
);
1949 if (msp
->ms_sm
== NULL
) {
1950 uint64_t new_object
;
1952 new_object
= space_map_alloc(mos
, tx
);
1953 VERIFY3U(new_object
, !=, 0);
1955 VERIFY0(space_map_open(&msp
->ms_sm
, mos
, new_object
,
1956 msp
->ms_start
, msp
->ms_size
, vd
->vdev_ashift
,
1958 ASSERT(msp
->ms_sm
!= NULL
);
1961 mutex_enter(&msp
->ms_lock
);
1964 * Note: metaslab_condense() clears the space_map's histogram.
1965 * Therefore we muse verify and remove this histogram before
1968 metaslab_group_histogram_verify(mg
);
1969 metaslab_class_histogram_verify(mg
->mg_class
);
1970 metaslab_group_histogram_remove(mg
, msp
);
1972 if (msp
->ms_loaded
&& spa_sync_pass(spa
) == 1 &&
1973 metaslab_should_condense(msp
)) {
1974 metaslab_condense(msp
, txg
, tx
);
1976 space_map_write(msp
->ms_sm
, alloctree
, SM_ALLOC
, tx
);
1977 space_map_write(msp
->ms_sm
, *freetree
, SM_FREE
, tx
);
1980 if (msp
->ms_loaded
) {
1982 * When the space map is loaded, we have an accruate
1983 * histogram in the range tree. This gives us an opportunity
1984 * to bring the space map's histogram up-to-date so we clear
1985 * it first before updating it.
1987 space_map_histogram_clear(msp
->ms_sm
);
1988 space_map_histogram_add(msp
->ms_sm
, msp
->ms_tree
, tx
);
1991 * Since the space map is not loaded we simply update the
1992 * exisiting histogram with what was freed in this txg. This
1993 * means that the on-disk histogram may not have an accurate
1994 * view of the free space but it's close enough to allow
1995 * us to make allocation decisions.
1997 space_map_histogram_add(msp
->ms_sm
, *freetree
, tx
);
1999 metaslab_group_histogram_add(mg
, msp
);
2000 metaslab_group_histogram_verify(mg
);
2001 metaslab_class_histogram_verify(mg
->mg_class
);
2004 * For sync pass 1, we avoid traversing this txg's free range tree
2005 * and instead will just swap the pointers for freetree and
2006 * freed_tree. We can safely do this since the freed_tree is
2007 * guaranteed to be empty on the initial pass.
2009 if (spa_sync_pass(spa
) == 1) {
2010 range_tree_swap(freetree
, freed_tree
);
2012 range_tree_vacate(*freetree
, range_tree_add
, *freed_tree
);
2014 range_tree_vacate(alloctree
, NULL
, NULL
);
2016 ASSERT0(range_tree_space(msp
->ms_alloctree
[txg
& TXG_MASK
]));
2017 ASSERT0(range_tree_space(msp
->ms_freetree
[txg
& TXG_MASK
]));
2019 mutex_exit(&msp
->ms_lock
);
2021 if (object
!= space_map_object(msp
->ms_sm
)) {
2022 object
= space_map_object(msp
->ms_sm
);
2023 dmu_write(mos
, vd
->vdev_ms_array
, sizeof (uint64_t) *
2024 msp
->ms_id
, sizeof (uint64_t), &object
, tx
);
2030 * Called after a transaction group has completely synced to mark
2031 * all of the metaslab's free space as usable.
2034 metaslab_sync_done(metaslab_t
*msp
, uint64_t txg
)
2036 metaslab_group_t
*mg
= msp
->ms_group
;
2037 vdev_t
*vd
= mg
->mg_vd
;
2038 range_tree_t
**freed_tree
;
2039 range_tree_t
**defer_tree
;
2040 int64_t alloc_delta
, defer_delta
;
2043 ASSERT(!vd
->vdev_ishole
);
2045 mutex_enter(&msp
->ms_lock
);
2048 * If this metaslab is just becoming available, initialize its
2049 * alloctrees, freetrees, and defertree and add its capacity to
2052 if (msp
->ms_freetree
[TXG_CLEAN(txg
) & TXG_MASK
] == NULL
) {
2053 for (t
= 0; t
< TXG_SIZE
; t
++) {
2054 ASSERT(msp
->ms_alloctree
[t
] == NULL
);
2055 ASSERT(msp
->ms_freetree
[t
] == NULL
);
2057 msp
->ms_alloctree
[t
] = range_tree_create(NULL
, msp
,
2059 msp
->ms_freetree
[t
] = range_tree_create(NULL
, msp
,
2063 for (t
= 0; t
< TXG_DEFER_SIZE
; t
++) {
2064 ASSERT(msp
->ms_defertree
[t
] == NULL
);
2066 msp
->ms_defertree
[t
] = range_tree_create(NULL
, msp
,
2070 vdev_space_update(vd
, 0, 0, msp
->ms_size
);
2073 freed_tree
= &msp
->ms_freetree
[TXG_CLEAN(txg
) & TXG_MASK
];
2074 defer_tree
= &msp
->ms_defertree
[txg
% TXG_DEFER_SIZE
];
2076 alloc_delta
= space_map_alloc_delta(msp
->ms_sm
);
2077 defer_delta
= range_tree_space(*freed_tree
) -
2078 range_tree_space(*defer_tree
);
2080 vdev_space_update(vd
, alloc_delta
+ defer_delta
, defer_delta
, 0);
2082 ASSERT0(range_tree_space(msp
->ms_alloctree
[txg
& TXG_MASK
]));
2083 ASSERT0(range_tree_space(msp
->ms_freetree
[txg
& TXG_MASK
]));
2086 * If there's a metaslab_load() in progress, wait for it to complete
2087 * so that we have a consistent view of the in-core space map.
2089 metaslab_load_wait(msp
);
2092 * Move the frees from the defer_tree back to the free
2093 * range tree (if it's loaded). Swap the freed_tree and the
2094 * defer_tree -- this is safe to do because we've just emptied out
2097 range_tree_vacate(*defer_tree
,
2098 msp
->ms_loaded
? range_tree_add
: NULL
, msp
->ms_tree
);
2099 range_tree_swap(freed_tree
, defer_tree
);
2101 space_map_update(msp
->ms_sm
);
2103 msp
->ms_deferspace
+= defer_delta
;
2104 ASSERT3S(msp
->ms_deferspace
, >=, 0);
2105 ASSERT3S(msp
->ms_deferspace
, <=, msp
->ms_size
);
2106 if (msp
->ms_deferspace
!= 0) {
2108 * Keep syncing this metaslab until all deferred frees
2109 * are back in circulation.
2111 vdev_dirty(vd
, VDD_METASLAB
, msp
, txg
+ 1);
2114 if (msp
->ms_loaded
&& msp
->ms_access_txg
< txg
) {
2115 for (t
= 1; t
< TXG_CONCURRENT_STATES
; t
++) {
2116 VERIFY0(range_tree_space(
2117 msp
->ms_alloctree
[(txg
+ t
) & TXG_MASK
]));
2120 if (!metaslab_debug_unload
)
2121 metaslab_unload(msp
);
2124 metaslab_group_sort(mg
, msp
, metaslab_weight(msp
));
2125 mutex_exit(&msp
->ms_lock
);
2129 metaslab_sync_reassess(metaslab_group_t
*mg
)
2131 metaslab_group_alloc_update(mg
);
2132 mg
->mg_fragmentation
= metaslab_group_fragmentation(mg
);
2135 * Preload the next potential metaslabs
2137 metaslab_group_preload(mg
);
2141 metaslab_distance(metaslab_t
*msp
, dva_t
*dva
)
2143 uint64_t ms_shift
= msp
->ms_group
->mg_vd
->vdev_ms_shift
;
2144 uint64_t offset
= DVA_GET_OFFSET(dva
) >> ms_shift
;
2145 uint64_t start
= msp
->ms_id
;
2147 if (msp
->ms_group
->mg_vd
->vdev_id
!= DVA_GET_VDEV(dva
))
2148 return (1ULL << 63);
2151 return ((start
- offset
) << ms_shift
);
2153 return ((offset
- start
) << ms_shift
);
2158 * ==========================================================================
2159 * Metaslab block operations
2160 * ==========================================================================
2164 metaslab_group_alloc_increment(spa_t
*spa
, uint64_t vdev
, void *tag
, int flags
)
2166 metaslab_group_t
*mg
;
2168 if (!(flags
& METASLAB_ASYNC_ALLOC
) ||
2169 flags
& METASLAB_DONT_THROTTLE
)
2172 mg
= vdev_lookup_top(spa
, vdev
)->vdev_mg
;
2173 if (!mg
->mg_class
->mc_alloc_throttle_enabled
)
2176 (void) refcount_add(&mg
->mg_alloc_queue_depth
, tag
);
2180 metaslab_group_alloc_decrement(spa_t
*spa
, uint64_t vdev
, void *tag
, int flags
)
2182 metaslab_group_t
*mg
;
2184 if (!(flags
& METASLAB_ASYNC_ALLOC
) ||
2185 flags
& METASLAB_DONT_THROTTLE
)
2188 mg
= vdev_lookup_top(spa
, vdev
)->vdev_mg
;
2189 if (!mg
->mg_class
->mc_alloc_throttle_enabled
)
2192 (void) refcount_remove(&mg
->mg_alloc_queue_depth
, tag
);
2196 metaslab_group_alloc_verify(spa_t
*spa
, const blkptr_t
*bp
, void *tag
)
2199 const dva_t
*dva
= bp
->blk_dva
;
2200 int ndvas
= BP_GET_NDVAS(bp
);
2203 for (d
= 0; d
< ndvas
; d
++) {
2204 uint64_t vdev
= DVA_GET_VDEV(&dva
[d
]);
2205 metaslab_group_t
*mg
= vdev_lookup_top(spa
, vdev
)->vdev_mg
;
2206 VERIFY(refcount_not_held(&mg
->mg_alloc_queue_depth
, tag
));
2212 metaslab_group_alloc(metaslab_group_t
*mg
, uint64_t asize
,
2213 uint64_t txg
, uint64_t min_distance
, dva_t
*dva
, int d
)
2215 spa_t
*spa
= mg
->mg_vd
->vdev_spa
;
2216 metaslab_t
*msp
= NULL
;
2217 uint64_t offset
= -1ULL;
2218 avl_tree_t
*t
= &mg
->mg_metaslab_tree
;
2219 uint64_t activation_weight
;
2220 uint64_t target_distance
;
2223 activation_weight
= METASLAB_WEIGHT_PRIMARY
;
2224 for (i
= 0; i
< d
; i
++) {
2225 if (DVA_GET_VDEV(&dva
[i
]) == mg
->mg_vd
->vdev_id
) {
2226 activation_weight
= METASLAB_WEIGHT_SECONDARY
;
2232 boolean_t was_active
;
2234 mutex_enter(&mg
->mg_lock
);
2235 for (msp
= avl_first(t
); msp
; msp
= AVL_NEXT(t
, msp
)) {
2236 if (msp
->ms_weight
< asize
) {
2237 spa_dbgmsg(spa
, "%s: failed to meet weight "
2238 "requirement: vdev %llu, txg %llu, mg %p, "
2239 "msp %p, asize %llu, "
2240 "weight %llu", spa_name(spa
),
2241 mg
->mg_vd
->vdev_id
, txg
,
2242 mg
, msp
, asize
, msp
->ms_weight
);
2243 mutex_exit(&mg
->mg_lock
);
2248 * If the selected metaslab is condensing, skip it.
2250 if (msp
->ms_condensing
)
2253 was_active
= msp
->ms_weight
& METASLAB_ACTIVE_MASK
;
2254 if (activation_weight
== METASLAB_WEIGHT_PRIMARY
)
2257 target_distance
= min_distance
+
2258 (space_map_allocated(msp
->ms_sm
) != 0 ? 0 :
2261 for (i
= 0; i
< d
; i
++)
2262 if (metaslab_distance(msp
, &dva
[i
]) <
2268 mutex_exit(&mg
->mg_lock
);
2272 mutex_enter(&msp
->ms_lock
);
2275 * Ensure that the metaslab we have selected is still
2276 * capable of handling our request. It's possible that
2277 * another thread may have changed the weight while we
2278 * were blocked on the metaslab lock.
2280 if (msp
->ms_weight
< asize
|| (was_active
&&
2281 !(msp
->ms_weight
& METASLAB_ACTIVE_MASK
) &&
2282 activation_weight
== METASLAB_WEIGHT_PRIMARY
)) {
2283 mutex_exit(&msp
->ms_lock
);
2287 if ((msp
->ms_weight
& METASLAB_WEIGHT_SECONDARY
) &&
2288 activation_weight
== METASLAB_WEIGHT_PRIMARY
) {
2289 metaslab_passivate(msp
,
2290 msp
->ms_weight
& ~METASLAB_ACTIVE_MASK
);
2291 mutex_exit(&msp
->ms_lock
);
2295 if (metaslab_activate(msp
, activation_weight
) != 0) {
2296 mutex_exit(&msp
->ms_lock
);
2301 * If this metaslab is currently condensing then pick again as
2302 * we can't manipulate this metaslab until it's committed
2305 if (msp
->ms_condensing
) {
2306 mutex_exit(&msp
->ms_lock
);
2310 if ((offset
= metaslab_block_alloc(msp
, asize
)) != -1ULL)
2313 metaslab_passivate(msp
, metaslab_block_maxsize(msp
));
2314 mutex_exit(&msp
->ms_lock
);
2317 if (range_tree_space(msp
->ms_alloctree
[txg
& TXG_MASK
]) == 0)
2318 vdev_dirty(mg
->mg_vd
, VDD_METASLAB
, msp
, txg
);
2320 range_tree_add(msp
->ms_alloctree
[txg
& TXG_MASK
], offset
, asize
);
2321 msp
->ms_access_txg
= txg
+ metaslab_unload_delay
;
2323 mutex_exit(&msp
->ms_lock
);
2328 * Allocate a block for the specified i/o.
2331 metaslab_alloc_dva(spa_t
*spa
, metaslab_class_t
*mc
, uint64_t psize
,
2332 dva_t
*dva
, int d
, dva_t
*hintdva
, uint64_t txg
, int flags
)
2334 metaslab_group_t
*mg
, *fast_mg
, *rotor
;
2338 int zio_lock
= B_FALSE
;
2339 boolean_t allocatable
;
2343 ASSERT(!DVA_IS_VALID(&dva
[d
]));
2346 * For testing, make some blocks above a certain size be gang blocks.
2348 if (psize
>= metaslab_gang_bang
&& (ddi_get_lbolt() & 3) == 0)
2349 return (SET_ERROR(ENOSPC
));
2352 * Start at the rotor and loop through all mgs until we find something.
2353 * Note that there's no locking on mc_rotor or mc_aliquot because
2354 * nothing actually breaks if we miss a few updates -- we just won't
2355 * allocate quite as evenly. It all balances out over time.
2357 * If we are doing ditto or log blocks, try to spread them across
2358 * consecutive vdevs. If we're forced to reuse a vdev before we've
2359 * allocated all of our ditto blocks, then try and spread them out on
2360 * that vdev as much as possible. If it turns out to not be possible,
2361 * gradually lower our standards until anything becomes acceptable.
2362 * Also, allocating on consecutive vdevs (as opposed to random vdevs)
2363 * gives us hope of containing our fault domains to something we're
2364 * able to reason about. Otherwise, any two top-level vdev failures
2365 * will guarantee the loss of data. With consecutive allocation,
2366 * only two adjacent top-level vdev failures will result in data loss.
2368 * If we are doing gang blocks (hintdva is non-NULL), try to keep
2369 * ourselves on the same vdev as our gang block header. That
2370 * way, we can hope for locality in vdev_cache, plus it makes our
2371 * fault domains something tractable.
2374 vd
= vdev_lookup_top(spa
, DVA_GET_VDEV(&hintdva
[d
]));
2377 * It's possible the vdev we're using as the hint no
2378 * longer exists (i.e. removed). Consult the rotor when
2384 if (flags
& METASLAB_HINTBP_AVOID
&&
2385 mg
->mg_next
!= NULL
)
2390 } else if (d
!= 0) {
2391 vd
= vdev_lookup_top(spa
, DVA_GET_VDEV(&dva
[d
- 1]));
2392 mg
= vd
->vdev_mg
->mg_next
;
2393 } else if (flags
& METASLAB_FASTWRITE
) {
2394 mg
= fast_mg
= mc
->mc_rotor
;
2397 if (fast_mg
->mg_vd
->vdev_pending_fastwrite
<
2398 mg
->mg_vd
->vdev_pending_fastwrite
)
2400 } while ((fast_mg
= fast_mg
->mg_next
) != mc
->mc_rotor
);
2407 * If the hint put us into the wrong metaslab class, or into a
2408 * metaslab group that has been passivated, just follow the rotor.
2410 if (mg
->mg_class
!= mc
|| mg
->mg_activation_count
<= 0)
2419 ASSERT(mg
->mg_activation_count
== 1);
2423 * Don't allocate from faulted devices.
2426 spa_config_enter(spa
, SCL_ZIO
, FTAG
, RW_READER
);
2427 allocatable
= vdev_allocatable(vd
);
2428 spa_config_exit(spa
, SCL_ZIO
, FTAG
);
2430 allocatable
= vdev_allocatable(vd
);
2434 * Determine if the selected metaslab group is eligible
2435 * for allocations. If we're ganging then don't allow
2436 * this metaslab group to skip allocations since that would
2437 * inadvertently return ENOSPC and suspend the pool
2438 * even though space is still available.
2440 if (allocatable
&& !GANG_ALLOCATION(flags
) && !zio_lock
) {
2441 allocatable
= metaslab_group_allocatable(mg
, rotor
,
2448 ASSERT(mg
->mg_initialized
);
2451 * Avoid writing single-copy data to a failing vdev.
2453 if ((vd
->vdev_stat
.vs_write_errors
> 0 ||
2454 vd
->vdev_state
< VDEV_STATE_HEALTHY
) &&
2455 d
== 0 && dshift
== 3 && vd
->vdev_children
== 0) {
2460 ASSERT(mg
->mg_class
== mc
);
2462 distance
= vd
->vdev_asize
>> dshift
;
2463 if (distance
<= (1ULL << vd
->vdev_ms_shift
))
2468 asize
= vdev_psize_to_asize(vd
, psize
);
2469 ASSERT(P2PHASE(asize
, 1ULL << vd
->vdev_ashift
) == 0);
2471 offset
= metaslab_group_alloc(mg
, asize
, txg
, distance
, dva
, d
);
2473 mutex_enter(&mg
->mg_lock
);
2474 if (offset
== -1ULL) {
2475 mg
->mg_failed_allocations
++;
2476 if (asize
== SPA_GANGBLOCKSIZE
) {
2478 * This metaslab group was unable to allocate
2479 * the minimum gang block size so it must be
2480 * out of space. We must notify the allocation
2481 * throttle to start skipping allocation
2482 * attempts to this metaslab group until more
2483 * space becomes available.
2485 * Note: this failure cannot be caused by the
2486 * allocation throttle since the allocation
2487 * throttle is only responsible for skipping
2488 * devices and not failing block allocations.
2490 mg
->mg_no_free_space
= B_TRUE
;
2493 mg
->mg_allocations
++;
2494 mutex_exit(&mg
->mg_lock
);
2496 if (offset
!= -1ULL) {
2498 * If we've just selected this metaslab group,
2499 * figure out whether the corresponding vdev is
2500 * over- or under-used relative to the pool,
2501 * and set an allocation bias to even it out.
2503 * Bias is also used to compensate for unequally
2504 * sized vdevs so that space is allocated fairly.
2506 if (mc
->mc_aliquot
== 0 && metaslab_bias_enabled
) {
2507 vdev_stat_t
*vs
= &vd
->vdev_stat
;
2508 int64_t vs_free
= vs
->vs_space
- vs
->vs_alloc
;
2509 int64_t mc_free
= mc
->mc_space
- mc
->mc_alloc
;
2513 * Calculate how much more or less we should
2514 * try to allocate from this device during
2515 * this iteration around the rotor.
2517 * This basically introduces a zero-centered
2518 * bias towards the devices with the most
2519 * free space, while compensating for vdev
2523 * vdev V1 = 16M/128M
2524 * vdev V2 = 16M/128M
2525 * ratio(V1) = 100% ratio(V2) = 100%
2527 * vdev V1 = 16M/128M
2528 * vdev V2 = 64M/128M
2529 * ratio(V1) = 127% ratio(V2) = 72%
2531 * vdev V1 = 16M/128M
2532 * vdev V2 = 64M/512M
2533 * ratio(V1) = 40% ratio(V2) = 160%
2535 ratio
= (vs_free
* mc
->mc_alloc_groups
* 100) /
2537 mg
->mg_bias
= ((ratio
- 100) *
2538 (int64_t)mg
->mg_aliquot
) / 100;
2539 } else if (!metaslab_bias_enabled
) {
2543 if ((flags
& METASLAB_FASTWRITE
) ||
2544 atomic_add_64_nv(&mc
->mc_aliquot
, asize
) >=
2545 mg
->mg_aliquot
+ mg
->mg_bias
) {
2546 mc
->mc_rotor
= mg
->mg_next
;
2550 DVA_SET_VDEV(&dva
[d
], vd
->vdev_id
);
2551 DVA_SET_OFFSET(&dva
[d
], offset
);
2552 DVA_SET_GANG(&dva
[d
],
2553 ((flags
& METASLAB_GANG_HEADER
) ? 1 : 0));
2554 DVA_SET_ASIZE(&dva
[d
], asize
);
2556 if (flags
& METASLAB_FASTWRITE
) {
2557 atomic_add_64(&vd
->vdev_pending_fastwrite
,
2564 mc
->mc_rotor
= mg
->mg_next
;
2566 } while ((mg
= mg
->mg_next
) != rotor
);
2570 ASSERT(dshift
< 64);
2574 if (!allocatable
&& !zio_lock
) {
2580 bzero(&dva
[d
], sizeof (dva_t
));
2582 return (SET_ERROR(ENOSPC
));
2586 * Free the block represented by DVA in the context of the specified
2587 * transaction group.
2590 metaslab_free_dva(spa_t
*spa
, const dva_t
*dva
, uint64_t txg
, boolean_t now
)
2592 uint64_t vdev
= DVA_GET_VDEV(dva
);
2593 uint64_t offset
= DVA_GET_OFFSET(dva
);
2594 uint64_t size
= DVA_GET_ASIZE(dva
);
2598 if (txg
> spa_freeze_txg(spa
))
2601 if ((vd
= vdev_lookup_top(spa
, vdev
)) == NULL
|| !DVA_IS_VALID(dva
) ||
2602 (offset
>> vd
->vdev_ms_shift
) >= vd
->vdev_ms_count
) {
2603 zfs_panic_recover("metaslab_free_dva(): bad DVA %llu:%llu:%llu",
2604 (u_longlong_t
)vdev
, (u_longlong_t
)offset
,
2605 (u_longlong_t
)size
);
2609 msp
= vd
->vdev_ms
[offset
>> vd
->vdev_ms_shift
];
2611 if (DVA_GET_GANG(dva
))
2612 size
= vdev_psize_to_asize(vd
, SPA_GANGBLOCKSIZE
);
2614 mutex_enter(&msp
->ms_lock
);
2617 range_tree_remove(msp
->ms_alloctree
[txg
& TXG_MASK
],
2620 VERIFY(!msp
->ms_condensing
);
2621 VERIFY3U(offset
, >=, msp
->ms_start
);
2622 VERIFY3U(offset
+ size
, <=, msp
->ms_start
+ msp
->ms_size
);
2623 VERIFY3U(range_tree_space(msp
->ms_tree
) + size
, <=,
2625 VERIFY0(P2PHASE(offset
, 1ULL << vd
->vdev_ashift
));
2626 VERIFY0(P2PHASE(size
, 1ULL << vd
->vdev_ashift
));
2627 range_tree_add(msp
->ms_tree
, offset
, size
);
2629 if (range_tree_space(msp
->ms_freetree
[txg
& TXG_MASK
]) == 0)
2630 vdev_dirty(vd
, VDD_METASLAB
, msp
, txg
);
2631 range_tree_add(msp
->ms_freetree
[txg
& TXG_MASK
],
2635 mutex_exit(&msp
->ms_lock
);
2639 * Intent log support: upon opening the pool after a crash, notify the SPA
2640 * of blocks that the intent log has allocated for immediate write, but
2641 * which are still considered free by the SPA because the last transaction
2642 * group didn't commit yet.
2645 metaslab_claim_dva(spa_t
*spa
, const dva_t
*dva
, uint64_t txg
)
2647 uint64_t vdev
= DVA_GET_VDEV(dva
);
2648 uint64_t offset
= DVA_GET_OFFSET(dva
);
2649 uint64_t size
= DVA_GET_ASIZE(dva
);
2654 ASSERT(DVA_IS_VALID(dva
));
2656 if ((vd
= vdev_lookup_top(spa
, vdev
)) == NULL
||
2657 (offset
>> vd
->vdev_ms_shift
) >= vd
->vdev_ms_count
)
2658 return (SET_ERROR(ENXIO
));
2660 msp
= vd
->vdev_ms
[offset
>> vd
->vdev_ms_shift
];
2662 if (DVA_GET_GANG(dva
))
2663 size
= vdev_psize_to_asize(vd
, SPA_GANGBLOCKSIZE
);
2665 mutex_enter(&msp
->ms_lock
);
2667 if ((txg
!= 0 && spa_writeable(spa
)) || !msp
->ms_loaded
)
2668 error
= metaslab_activate(msp
, METASLAB_WEIGHT_SECONDARY
);
2670 if (error
== 0 && !range_tree_contains(msp
->ms_tree
, offset
, size
))
2671 error
= SET_ERROR(ENOENT
);
2673 if (error
|| txg
== 0) { /* txg == 0 indicates dry run */
2674 mutex_exit(&msp
->ms_lock
);
2678 VERIFY(!msp
->ms_condensing
);
2679 VERIFY0(P2PHASE(offset
, 1ULL << vd
->vdev_ashift
));
2680 VERIFY0(P2PHASE(size
, 1ULL << vd
->vdev_ashift
));
2681 VERIFY3U(range_tree_space(msp
->ms_tree
) - size
, <=, msp
->ms_size
);
2682 range_tree_remove(msp
->ms_tree
, offset
, size
);
2684 if (spa_writeable(spa
)) { /* don't dirty if we're zdb(1M) */
2685 if (range_tree_space(msp
->ms_alloctree
[txg
& TXG_MASK
]) == 0)
2686 vdev_dirty(vd
, VDD_METASLAB
, msp
, txg
);
2687 range_tree_add(msp
->ms_alloctree
[txg
& TXG_MASK
], offset
, size
);
2690 mutex_exit(&msp
->ms_lock
);
2696 * Reserve some allocation slots. The reservation system must be called
2697 * before we call into the allocator. If there aren't any available slots
2698 * then the I/O will be throttled until an I/O completes and its slots are
2699 * freed up. The function returns true if it was successful in placing
2703 metaslab_class_throttle_reserve(metaslab_class_t
*mc
, int slots
, zio_t
*zio
,
2706 uint64_t available_slots
= 0;
2707 uint64_t reserved_slots
;
2708 boolean_t slot_reserved
= B_FALSE
;
2710 ASSERT(mc
->mc_alloc_throttle_enabled
);
2711 mutex_enter(&mc
->mc_lock
);
2713 reserved_slots
= refcount_count(&mc
->mc_alloc_slots
);
2714 if (reserved_slots
< mc
->mc_alloc_max_slots
)
2715 available_slots
= mc
->mc_alloc_max_slots
- reserved_slots
;
2717 if (slots
<= available_slots
|| GANG_ALLOCATION(flags
)) {
2721 * We reserve the slots individually so that we can unreserve
2722 * them individually when an I/O completes.
2724 for (d
= 0; d
< slots
; d
++) {
2725 reserved_slots
= refcount_add(&mc
->mc_alloc_slots
, zio
);
2727 zio
->io_flags
|= ZIO_FLAG_IO_ALLOCATING
;
2728 slot_reserved
= B_TRUE
;
2731 mutex_exit(&mc
->mc_lock
);
2732 return (slot_reserved
);
2736 metaslab_class_throttle_unreserve(metaslab_class_t
*mc
, int slots
, zio_t
*zio
)
2740 ASSERT(mc
->mc_alloc_throttle_enabled
);
2741 mutex_enter(&mc
->mc_lock
);
2742 for (d
= 0; d
< slots
; d
++) {
2743 (void) refcount_remove(&mc
->mc_alloc_slots
, zio
);
2745 mutex_exit(&mc
->mc_lock
);
2749 metaslab_alloc(spa_t
*spa
, metaslab_class_t
*mc
, uint64_t psize
, blkptr_t
*bp
,
2750 int ndvas
, uint64_t txg
, blkptr_t
*hintbp
, int flags
, zio_t
*zio
)
2752 dva_t
*dva
= bp
->blk_dva
;
2753 dva_t
*hintdva
= hintbp
->blk_dva
;
2756 ASSERT(bp
->blk_birth
== 0);
2757 ASSERT(BP_PHYSICAL_BIRTH(bp
) == 0);
2759 spa_config_enter(spa
, SCL_ALLOC
, FTAG
, RW_READER
);
2761 if (mc
->mc_rotor
== NULL
) { /* no vdevs in this class */
2762 spa_config_exit(spa
, SCL_ALLOC
, FTAG
);
2763 return (SET_ERROR(ENOSPC
));
2766 ASSERT(ndvas
> 0 && ndvas
<= spa_max_replication(spa
));
2767 ASSERT(BP_GET_NDVAS(bp
) == 0);
2768 ASSERT(hintbp
== NULL
|| ndvas
<= BP_GET_NDVAS(hintbp
));
2770 for (d
= 0; d
< ndvas
; d
++) {
2771 error
= metaslab_alloc_dva(spa
, mc
, psize
, dva
, d
, hintdva
,
2774 for (d
--; d
>= 0; d
--) {
2775 metaslab_free_dva(spa
, &dva
[d
], txg
, B_TRUE
);
2776 metaslab_group_alloc_decrement(spa
,
2777 DVA_GET_VDEV(&dva
[d
]), zio
, flags
);
2778 bzero(&dva
[d
], sizeof (dva_t
));
2780 spa_config_exit(spa
, SCL_ALLOC
, FTAG
);
2784 * Update the metaslab group's queue depth
2785 * based on the newly allocated dva.
2787 metaslab_group_alloc_increment(spa
,
2788 DVA_GET_VDEV(&dva
[d
]), zio
, flags
);
2793 ASSERT(BP_GET_NDVAS(bp
) == ndvas
);
2795 spa_config_exit(spa
, SCL_ALLOC
, FTAG
);
2797 BP_SET_BIRTH(bp
, txg
, 0);
2803 metaslab_free(spa_t
*spa
, const blkptr_t
*bp
, uint64_t txg
, boolean_t now
)
2805 const dva_t
*dva
= bp
->blk_dva
;
2806 int d
, ndvas
= BP_GET_NDVAS(bp
);
2808 ASSERT(!BP_IS_HOLE(bp
));
2809 ASSERT(!now
|| bp
->blk_birth
>= spa_syncing_txg(spa
));
2811 spa_config_enter(spa
, SCL_FREE
, FTAG
, RW_READER
);
2813 for (d
= 0; d
< ndvas
; d
++)
2814 metaslab_free_dva(spa
, &dva
[d
], txg
, now
);
2816 spa_config_exit(spa
, SCL_FREE
, FTAG
);
2820 metaslab_claim(spa_t
*spa
, const blkptr_t
*bp
, uint64_t txg
)
2822 const dva_t
*dva
= bp
->blk_dva
;
2823 int ndvas
= BP_GET_NDVAS(bp
);
2826 ASSERT(!BP_IS_HOLE(bp
));
2830 * First do a dry run to make sure all DVAs are claimable,
2831 * so we don't have to unwind from partial failures below.
2833 if ((error
= metaslab_claim(spa
, bp
, 0)) != 0)
2837 spa_config_enter(spa
, SCL_ALLOC
, FTAG
, RW_READER
);
2839 for (d
= 0; d
< ndvas
; d
++)
2840 if ((error
= metaslab_claim_dva(spa
, &dva
[d
], txg
)) != 0)
2843 spa_config_exit(spa
, SCL_ALLOC
, FTAG
);
2845 ASSERT(error
== 0 || txg
== 0);
2851 metaslab_fastwrite_mark(spa_t
*spa
, const blkptr_t
*bp
)
2853 const dva_t
*dva
= bp
->blk_dva
;
2854 int ndvas
= BP_GET_NDVAS(bp
);
2855 uint64_t psize
= BP_GET_PSIZE(bp
);
2859 ASSERT(!BP_IS_HOLE(bp
));
2860 ASSERT(!BP_IS_EMBEDDED(bp
));
2863 spa_config_enter(spa
, SCL_VDEV
, FTAG
, RW_READER
);
2865 for (d
= 0; d
< ndvas
; d
++) {
2866 if ((vd
= vdev_lookup_top(spa
, DVA_GET_VDEV(&dva
[d
]))) == NULL
)
2868 atomic_add_64(&vd
->vdev_pending_fastwrite
, psize
);
2871 spa_config_exit(spa
, SCL_VDEV
, FTAG
);
2875 metaslab_fastwrite_unmark(spa_t
*spa
, const blkptr_t
*bp
)
2877 const dva_t
*dva
= bp
->blk_dva
;
2878 int ndvas
= BP_GET_NDVAS(bp
);
2879 uint64_t psize
= BP_GET_PSIZE(bp
);
2883 ASSERT(!BP_IS_HOLE(bp
));
2884 ASSERT(!BP_IS_EMBEDDED(bp
));
2887 spa_config_enter(spa
, SCL_VDEV
, FTAG
, RW_READER
);
2889 for (d
= 0; d
< ndvas
; d
++) {
2890 if ((vd
= vdev_lookup_top(spa
, DVA_GET_VDEV(&dva
[d
]))) == NULL
)
2892 ASSERT3U(vd
->vdev_pending_fastwrite
, >=, psize
);
2893 atomic_sub_64(&vd
->vdev_pending_fastwrite
, psize
);
2896 spa_config_exit(spa
, SCL_VDEV
, FTAG
);
2900 metaslab_check_free(spa_t
*spa
, const blkptr_t
*bp
)
2904 if ((zfs_flags
& ZFS_DEBUG_ZIO_FREE
) == 0)
2907 spa_config_enter(spa
, SCL_VDEV
, FTAG
, RW_READER
);
2908 for (i
= 0; i
< BP_GET_NDVAS(bp
); i
++) {
2909 uint64_t vdev
= DVA_GET_VDEV(&bp
->blk_dva
[i
]);
2910 vdev_t
*vd
= vdev_lookup_top(spa
, vdev
);
2911 uint64_t offset
= DVA_GET_OFFSET(&bp
->blk_dva
[i
]);
2912 uint64_t size
= DVA_GET_ASIZE(&bp
->blk_dva
[i
]);
2913 metaslab_t
*msp
= vd
->vdev_ms
[offset
>> vd
->vdev_ms_shift
];
2916 range_tree_verify(msp
->ms_tree
, offset
, size
);
2918 for (j
= 0; j
< TXG_SIZE
; j
++)
2919 range_tree_verify(msp
->ms_freetree
[j
], offset
, size
);
2920 for (j
= 0; j
< TXG_DEFER_SIZE
; j
++)
2921 range_tree_verify(msp
->ms_defertree
[j
], offset
, size
);
2923 spa_config_exit(spa
, SCL_VDEV
, FTAG
);
2926 #if defined(_KERNEL) && defined(HAVE_SPL)
2928 module_param(metaslab_aliquot
, ulong
, 0644);
2929 MODULE_PARM_DESC(metaslab_aliquot
,
2930 "allocation granularity (a.k.a. stripe size)");
2932 module_param(metaslab_debug_load
, int, 0644);
2933 MODULE_PARM_DESC(metaslab_debug_load
,
2934 "load all metaslabs when pool is first opened");
2936 module_param(metaslab_debug_unload
, int, 0644);
2937 MODULE_PARM_DESC(metaslab_debug_unload
,
2938 "prevent metaslabs from being unloaded");
2940 module_param(metaslab_preload_enabled
, int, 0644);
2941 MODULE_PARM_DESC(metaslab_preload_enabled
,
2942 "preload potential metaslabs during reassessment");
2944 module_param(zfs_mg_noalloc_threshold
, int, 0644);
2945 MODULE_PARM_DESC(zfs_mg_noalloc_threshold
,
2946 "percentage of free space for metaslab group to allow allocation");
2948 module_param(zfs_mg_fragmentation_threshold
, int, 0644);
2949 MODULE_PARM_DESC(zfs_mg_fragmentation_threshold
,
2950 "fragmentation for metaslab group to allow allocation");
2952 module_param(zfs_metaslab_fragmentation_threshold
, int, 0644);
2953 MODULE_PARM_DESC(zfs_metaslab_fragmentation_threshold
,
2954 "fragmentation for metaslab to allow allocation");
2956 module_param(metaslab_fragmentation_factor_enabled
, int, 0644);
2957 MODULE_PARM_DESC(metaslab_fragmentation_factor_enabled
,
2958 "use the fragmentation metric to prefer less fragmented metaslabs");
2960 module_param(metaslab_lba_weighting_enabled
, int, 0644);
2961 MODULE_PARM_DESC(metaslab_lba_weighting_enabled
,
2962 "prefer metaslabs with lower LBAs");
2964 module_param(metaslab_bias_enabled
, int, 0644);
2965 MODULE_PARM_DESC(metaslab_bias_enabled
,
2966 "enable metaslab group biasing");
2967 #endif /* _KERNEL && HAVE_SPL */