4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2011, 2014 by Delphix. All rights reserved.
24 * Copyright (c) 2013 by Saso Kiselkov. All rights reserved.
27 #include <sys/zfs_context.h>
29 #include <sys/dmu_tx.h>
30 #include <sys/space_map.h>
31 #include <sys/metaslab_impl.h>
32 #include <sys/vdev_impl.h>
34 #include <sys/spa_impl.h>
35 #include <sys/zfeature.h>
37 #define WITH_DF_BLOCK_ALLOCATOR
40 * Allow allocations to switch to gang blocks quickly. We do this to
41 * avoid having to load lots of space_maps in a given txg. There are,
42 * however, some cases where we want to avoid "fast" ganging and instead
43 * we want to do an exhaustive search of all metaslabs on this device.
44 * Currently we don't allow any gang, slog, or dump device related allocations
47 #define CAN_FASTGANG(flags) \
48 (!((flags) & (METASLAB_GANG_CHILD | METASLAB_GANG_HEADER | \
49 METASLAB_GANG_AVOID)))
51 #define METASLAB_WEIGHT_PRIMARY (1ULL << 63)
52 #define METASLAB_WEIGHT_SECONDARY (1ULL << 62)
53 #define METASLAB_ACTIVE_MASK \
54 (METASLAB_WEIGHT_PRIMARY | METASLAB_WEIGHT_SECONDARY)
56 uint64_t metaslab_aliquot
= 512ULL << 10;
57 uint64_t metaslab_gang_bang
= SPA_MAXBLOCKSIZE
+ 1; /* force gang blocks */
60 * The in-core space map representation is more compact than its on-disk form.
61 * The zfs_condense_pct determines how much more compact the in-core
62 * space_map representation must be before we compact it on-disk.
63 * Values should be greater than or equal to 100.
65 int zfs_condense_pct
= 200;
68 * Condensing a metaslab is not guaranteed to actually reduce the amount of
69 * space used on disk. In particular, a space map uses data in increments of
70 * MAX(1 << ashift, space_map_blksz), so a metaslab might use the
71 * same number of blocks after condensing. Since the goal of condensing is to
72 * reduce the number of IOPs required to read the space map, we only want to
73 * condense when we can be sure we will reduce the number of blocks used by the
74 * space map. Unfortunately, we cannot precisely compute whether or not this is
75 * the case in metaslab_should_condense since we are holding ms_lock. Instead,
76 * we apply the following heuristic: do not condense a spacemap unless the
77 * uncondensed size consumes greater than zfs_metaslab_condense_block_threshold
80 int zfs_metaslab_condense_block_threshold
= 4;
83 * The zfs_mg_noalloc_threshold defines which metaslab groups should
84 * be eligible for allocation. The value is defined as a percentage of
85 * free space. Metaslab groups that have more free space than
86 * zfs_mg_noalloc_threshold are always eligible for allocations. Once
87 * a metaslab group's free space is less than or equal to the
88 * zfs_mg_noalloc_threshold the allocator will avoid allocating to that
89 * group unless all groups in the pool have reached zfs_mg_noalloc_threshold.
90 * Once all groups in the pool reach zfs_mg_noalloc_threshold then all
91 * groups are allowed to accept allocations. Gang blocks are always
92 * eligible to allocate on any metaslab group. The default value of 0 means
93 * no metaslab group will be excluded based on this criterion.
95 int zfs_mg_noalloc_threshold
= 0;
98 * Metaslab groups are considered eligible for allocations if their
99 * fragmenation metric (measured as a percentage) is less than or equal to
100 * zfs_mg_fragmentation_threshold. If a metaslab group exceeds this threshold
101 * then it will be skipped unless all metaslab groups within the metaslab
102 * class have also crossed this threshold.
104 int zfs_mg_fragmentation_threshold
= 85;
107 * Allow metaslabs to keep their active state as long as their fragmentation
108 * percentage is less than or equal to zfs_metaslab_fragmentation_threshold. An
109 * active metaslab that exceeds this threshold will no longer keep its active
110 * status allowing better metaslabs to be selected.
112 int zfs_metaslab_fragmentation_threshold
= 70;
115 * When set will load all metaslabs when pool is first opened.
117 int metaslab_debug_load
= 0;
120 * When set will prevent metaslabs from being unloaded.
122 int metaslab_debug_unload
= 0;
125 * Minimum size which forces the dynamic allocator to change
126 * it's allocation strategy. Once the space map cannot satisfy
127 * an allocation of this size then it switches to using more
128 * aggressive strategy (i.e search by size rather than offset).
130 uint64_t metaslab_df_alloc_threshold
= SPA_MAXBLOCKSIZE
;
133 * The minimum free space, in percent, which must be available
134 * in a space map to continue allocations in a first-fit fashion.
135 * Once the space_map's free space drops below this level we dynamically
136 * switch to using best-fit allocations.
138 int metaslab_df_free_pct
= 4;
141 * A metaslab is considered "free" if it contains a contiguous
142 * segment which is greater than metaslab_min_alloc_size.
144 uint64_t metaslab_min_alloc_size
= DMU_MAX_ACCESS
;
147 * Percentage of all cpus that can be used by the metaslab taskq.
149 int metaslab_load_pct
= 50;
152 * Determines how many txgs a metaslab may remain loaded without having any
153 * allocations from it. As long as a metaslab continues to be used we will
156 int metaslab_unload_delay
= TXG_SIZE
* 2;
159 * Max number of metaslabs per group to preload.
161 int metaslab_preload_limit
= SPA_DVAS_PER_BP
;
164 * Enable/disable preloading of metaslab.
166 int metaslab_preload_enabled
= B_TRUE
;
169 * Enable/disable fragmentation weighting on metaslabs.
171 int metaslab_fragmentation_factor_enabled
= B_TRUE
;
174 * Enable/disable lba weighting (i.e. outer tracks are given preference).
176 int metaslab_lba_weighting_enabled
= B_TRUE
;
179 * Enable/disable metaslab group biasing.
181 int metaslab_bias_enabled
= B_TRUE
;
183 static uint64_t metaslab_fragmentation(metaslab_t
*);
186 * ==========================================================================
188 * ==========================================================================
191 metaslab_class_create(spa_t
*spa
, metaslab_ops_t
*ops
)
193 metaslab_class_t
*mc
;
195 mc
= kmem_zalloc(sizeof (metaslab_class_t
), KM_SLEEP
);
200 mutex_init(&mc
->mc_fastwrite_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
206 metaslab_class_destroy(metaslab_class_t
*mc
)
208 ASSERT(mc
->mc_rotor
== NULL
);
209 ASSERT(mc
->mc_alloc
== 0);
210 ASSERT(mc
->mc_deferred
== 0);
211 ASSERT(mc
->mc_space
== 0);
212 ASSERT(mc
->mc_dspace
== 0);
214 mutex_destroy(&mc
->mc_fastwrite_lock
);
215 kmem_free(mc
, sizeof (metaslab_class_t
));
219 metaslab_class_validate(metaslab_class_t
*mc
)
221 metaslab_group_t
*mg
;
225 * Must hold one of the spa_config locks.
227 ASSERT(spa_config_held(mc
->mc_spa
, SCL_ALL
, RW_READER
) ||
228 spa_config_held(mc
->mc_spa
, SCL_ALL
, RW_WRITER
));
230 if ((mg
= mc
->mc_rotor
) == NULL
)
235 ASSERT(vd
->vdev_mg
!= NULL
);
236 ASSERT3P(vd
->vdev_top
, ==, vd
);
237 ASSERT3P(mg
->mg_class
, ==, mc
);
238 ASSERT3P(vd
->vdev_ops
, !=, &vdev_hole_ops
);
239 } while ((mg
= mg
->mg_next
) != mc
->mc_rotor
);
245 metaslab_class_space_update(metaslab_class_t
*mc
, int64_t alloc_delta
,
246 int64_t defer_delta
, int64_t space_delta
, int64_t dspace_delta
)
248 atomic_add_64(&mc
->mc_alloc
, alloc_delta
);
249 atomic_add_64(&mc
->mc_deferred
, defer_delta
);
250 atomic_add_64(&mc
->mc_space
, space_delta
);
251 atomic_add_64(&mc
->mc_dspace
, dspace_delta
);
255 metaslab_class_get_alloc(metaslab_class_t
*mc
)
257 return (mc
->mc_alloc
);
261 metaslab_class_get_deferred(metaslab_class_t
*mc
)
263 return (mc
->mc_deferred
);
267 metaslab_class_get_space(metaslab_class_t
*mc
)
269 return (mc
->mc_space
);
273 metaslab_class_get_dspace(metaslab_class_t
*mc
)
275 return (spa_deflate(mc
->mc_spa
) ? mc
->mc_dspace
: mc
->mc_space
);
279 metaslab_class_histogram_verify(metaslab_class_t
*mc
)
281 vdev_t
*rvd
= mc
->mc_spa
->spa_root_vdev
;
285 if ((zfs_flags
& ZFS_DEBUG_HISTOGRAM_VERIFY
) == 0)
288 mc_hist
= kmem_zalloc(sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE
,
291 for (c
= 0; c
< rvd
->vdev_children
; c
++) {
292 vdev_t
*tvd
= rvd
->vdev_child
[c
];
293 metaslab_group_t
*mg
= tvd
->vdev_mg
;
296 * Skip any holes, uninitialized top-levels, or
297 * vdevs that are not in this metalab class.
299 if (tvd
->vdev_ishole
|| tvd
->vdev_ms_shift
== 0 ||
300 mg
->mg_class
!= mc
) {
304 for (i
= 0; i
< RANGE_TREE_HISTOGRAM_SIZE
; i
++)
305 mc_hist
[i
] += mg
->mg_histogram
[i
];
308 for (i
= 0; i
< RANGE_TREE_HISTOGRAM_SIZE
; i
++)
309 VERIFY3U(mc_hist
[i
], ==, mc
->mc_histogram
[i
]);
311 kmem_free(mc_hist
, sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE
);
315 * Calculate the metaslab class's fragmentation metric. The metric
316 * is weighted based on the space contribution of each metaslab group.
317 * The return value will be a number between 0 and 100 (inclusive), or
318 * ZFS_FRAG_INVALID if the metric has not been set. See comment above the
319 * zfs_frag_table for more information about the metric.
322 metaslab_class_fragmentation(metaslab_class_t
*mc
)
324 vdev_t
*rvd
= mc
->mc_spa
->spa_root_vdev
;
325 uint64_t fragmentation
= 0;
328 spa_config_enter(mc
->mc_spa
, SCL_VDEV
, FTAG
, RW_READER
);
330 for (c
= 0; c
< rvd
->vdev_children
; c
++) {
331 vdev_t
*tvd
= rvd
->vdev_child
[c
];
332 metaslab_group_t
*mg
= tvd
->vdev_mg
;
335 * Skip any holes, uninitialized top-levels, or
336 * vdevs that are not in this metalab class.
338 if (tvd
->vdev_ishole
|| tvd
->vdev_ms_shift
== 0 ||
339 mg
->mg_class
!= mc
) {
344 * If a metaslab group does not contain a fragmentation
345 * metric then just bail out.
347 if (mg
->mg_fragmentation
== ZFS_FRAG_INVALID
) {
348 spa_config_exit(mc
->mc_spa
, SCL_VDEV
, FTAG
);
349 return (ZFS_FRAG_INVALID
);
353 * Determine how much this metaslab_group is contributing
354 * to the overall pool fragmentation metric.
356 fragmentation
+= mg
->mg_fragmentation
*
357 metaslab_group_get_space(mg
);
359 fragmentation
/= metaslab_class_get_space(mc
);
361 ASSERT3U(fragmentation
, <=, 100);
362 spa_config_exit(mc
->mc_spa
, SCL_VDEV
, FTAG
);
363 return (fragmentation
);
367 * Calculate the amount of expandable space that is available in
368 * this metaslab class. If a device is expanded then its expandable
369 * space will be the amount of allocatable space that is currently not
370 * part of this metaslab class.
373 metaslab_class_expandable_space(metaslab_class_t
*mc
)
375 vdev_t
*rvd
= mc
->mc_spa
->spa_root_vdev
;
379 spa_config_enter(mc
->mc_spa
, SCL_VDEV
, FTAG
, RW_READER
);
380 for (c
= 0; c
< rvd
->vdev_children
; c
++) {
381 vdev_t
*tvd
= rvd
->vdev_child
[c
];
382 metaslab_group_t
*mg
= tvd
->vdev_mg
;
384 if (tvd
->vdev_ishole
|| tvd
->vdev_ms_shift
== 0 ||
385 mg
->mg_class
!= mc
) {
389 space
+= tvd
->vdev_max_asize
- tvd
->vdev_asize
;
391 spa_config_exit(mc
->mc_spa
, SCL_VDEV
, FTAG
);
396 * ==========================================================================
398 * ==========================================================================
401 metaslab_compare(const void *x1
, const void *x2
)
403 const metaslab_t
*m1
= x1
;
404 const metaslab_t
*m2
= x2
;
406 if (m1
->ms_weight
< m2
->ms_weight
)
408 if (m1
->ms_weight
> m2
->ms_weight
)
412 * If the weights are identical, use the offset to force uniqueness.
414 if (m1
->ms_start
< m2
->ms_start
)
416 if (m1
->ms_start
> m2
->ms_start
)
419 ASSERT3P(m1
, ==, m2
);
425 * Update the allocatable flag and the metaslab group's capacity.
426 * The allocatable flag is set to true if the capacity is below
427 * the zfs_mg_noalloc_threshold. If a metaslab group transitions
428 * from allocatable to non-allocatable or vice versa then the metaslab
429 * group's class is updated to reflect the transition.
432 metaslab_group_alloc_update(metaslab_group_t
*mg
)
434 vdev_t
*vd
= mg
->mg_vd
;
435 metaslab_class_t
*mc
= mg
->mg_class
;
436 vdev_stat_t
*vs
= &vd
->vdev_stat
;
437 boolean_t was_allocatable
;
439 ASSERT(vd
== vd
->vdev_top
);
441 mutex_enter(&mg
->mg_lock
);
442 was_allocatable
= mg
->mg_allocatable
;
444 mg
->mg_free_capacity
= ((vs
->vs_space
- vs
->vs_alloc
) * 100) /
448 * A metaslab group is considered allocatable if it has plenty
449 * of free space or is not heavily fragmented. We only take
450 * fragmentation into account if the metaslab group has a valid
451 * fragmentation metric (i.e. a value between 0 and 100).
453 mg
->mg_allocatable
= (mg
->mg_free_capacity
> zfs_mg_noalloc_threshold
&&
454 (mg
->mg_fragmentation
== ZFS_FRAG_INVALID
||
455 mg
->mg_fragmentation
<= zfs_mg_fragmentation_threshold
));
458 * The mc_alloc_groups maintains a count of the number of
459 * groups in this metaslab class that are still above the
460 * zfs_mg_noalloc_threshold. This is used by the allocating
461 * threads to determine if they should avoid allocations to
462 * a given group. The allocator will avoid allocations to a group
463 * if that group has reached or is below the zfs_mg_noalloc_threshold
464 * and there are still other groups that are above the threshold.
465 * When a group transitions from allocatable to non-allocatable or
466 * vice versa we update the metaslab class to reflect that change.
467 * When the mc_alloc_groups value drops to 0 that means that all
468 * groups have reached the zfs_mg_noalloc_threshold making all groups
469 * eligible for allocations. This effectively means that all devices
470 * are balanced again.
472 if (was_allocatable
&& !mg
->mg_allocatable
)
473 mc
->mc_alloc_groups
--;
474 else if (!was_allocatable
&& mg
->mg_allocatable
)
475 mc
->mc_alloc_groups
++;
477 mutex_exit(&mg
->mg_lock
);
481 metaslab_group_create(metaslab_class_t
*mc
, vdev_t
*vd
)
483 metaslab_group_t
*mg
;
485 mg
= kmem_zalloc(sizeof (metaslab_group_t
), KM_SLEEP
);
486 mutex_init(&mg
->mg_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
487 avl_create(&mg
->mg_metaslab_tree
, metaslab_compare
,
488 sizeof (metaslab_t
), offsetof(struct metaslab
, ms_group_node
));
491 mg
->mg_activation_count
= 0;
493 mg
->mg_taskq
= taskq_create("metaslab_group_taskq", metaslab_load_pct
,
494 minclsyspri
, 10, INT_MAX
, TASKQ_THREADS_CPU_PCT
);
500 metaslab_group_destroy(metaslab_group_t
*mg
)
502 ASSERT(mg
->mg_prev
== NULL
);
503 ASSERT(mg
->mg_next
== NULL
);
505 * We may have gone below zero with the activation count
506 * either because we never activated in the first place or
507 * because we're done, and possibly removing the vdev.
509 ASSERT(mg
->mg_activation_count
<= 0);
511 taskq_destroy(mg
->mg_taskq
);
512 avl_destroy(&mg
->mg_metaslab_tree
);
513 mutex_destroy(&mg
->mg_lock
);
514 kmem_free(mg
, sizeof (metaslab_group_t
));
518 metaslab_group_activate(metaslab_group_t
*mg
)
520 metaslab_class_t
*mc
= mg
->mg_class
;
521 metaslab_group_t
*mgprev
, *mgnext
;
523 ASSERT(spa_config_held(mc
->mc_spa
, SCL_ALLOC
, RW_WRITER
));
525 ASSERT(mc
->mc_rotor
!= mg
);
526 ASSERT(mg
->mg_prev
== NULL
);
527 ASSERT(mg
->mg_next
== NULL
);
528 ASSERT(mg
->mg_activation_count
<= 0);
530 if (++mg
->mg_activation_count
<= 0)
533 mg
->mg_aliquot
= metaslab_aliquot
* MAX(1, mg
->mg_vd
->vdev_children
);
534 metaslab_group_alloc_update(mg
);
536 if ((mgprev
= mc
->mc_rotor
) == NULL
) {
540 mgnext
= mgprev
->mg_next
;
541 mg
->mg_prev
= mgprev
;
542 mg
->mg_next
= mgnext
;
543 mgprev
->mg_next
= mg
;
544 mgnext
->mg_prev
= mg
;
550 metaslab_group_passivate(metaslab_group_t
*mg
)
552 metaslab_class_t
*mc
= mg
->mg_class
;
553 metaslab_group_t
*mgprev
, *mgnext
;
555 ASSERT(spa_config_held(mc
->mc_spa
, SCL_ALLOC
, RW_WRITER
));
557 if (--mg
->mg_activation_count
!= 0) {
558 ASSERT(mc
->mc_rotor
!= mg
);
559 ASSERT(mg
->mg_prev
== NULL
);
560 ASSERT(mg
->mg_next
== NULL
);
561 ASSERT(mg
->mg_activation_count
< 0);
565 taskq_wait(mg
->mg_taskq
);
566 metaslab_group_alloc_update(mg
);
568 mgprev
= mg
->mg_prev
;
569 mgnext
= mg
->mg_next
;
574 mc
->mc_rotor
= mgnext
;
575 mgprev
->mg_next
= mgnext
;
576 mgnext
->mg_prev
= mgprev
;
584 metaslab_group_get_space(metaslab_group_t
*mg
)
586 return ((1ULL << mg
->mg_vd
->vdev_ms_shift
) * mg
->mg_vd
->vdev_ms_count
);
590 metaslab_group_histogram_verify(metaslab_group_t
*mg
)
593 vdev_t
*vd
= mg
->mg_vd
;
594 uint64_t ashift
= vd
->vdev_ashift
;
597 if ((zfs_flags
& ZFS_DEBUG_HISTOGRAM_VERIFY
) == 0)
600 mg_hist
= kmem_zalloc(sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE
,
603 ASSERT3U(RANGE_TREE_HISTOGRAM_SIZE
, >=,
604 SPACE_MAP_HISTOGRAM_SIZE
+ ashift
);
606 for (m
= 0; m
< vd
->vdev_ms_count
; m
++) {
607 metaslab_t
*msp
= vd
->vdev_ms
[m
];
609 if (msp
->ms_sm
== NULL
)
612 for (i
= 0; i
< SPACE_MAP_HISTOGRAM_SIZE
; i
++)
613 mg_hist
[i
+ ashift
] +=
614 msp
->ms_sm
->sm_phys
->smp_histogram
[i
];
617 for (i
= 0; i
< RANGE_TREE_HISTOGRAM_SIZE
; i
++)
618 VERIFY3U(mg_hist
[i
], ==, mg
->mg_histogram
[i
]);
620 kmem_free(mg_hist
, sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE
);
624 metaslab_group_histogram_add(metaslab_group_t
*mg
, metaslab_t
*msp
)
626 metaslab_class_t
*mc
= mg
->mg_class
;
627 uint64_t ashift
= mg
->mg_vd
->vdev_ashift
;
630 ASSERT(MUTEX_HELD(&msp
->ms_lock
));
631 if (msp
->ms_sm
== NULL
)
634 mutex_enter(&mg
->mg_lock
);
635 for (i
= 0; i
< SPACE_MAP_HISTOGRAM_SIZE
; i
++) {
636 mg
->mg_histogram
[i
+ ashift
] +=
637 msp
->ms_sm
->sm_phys
->smp_histogram
[i
];
638 mc
->mc_histogram
[i
+ ashift
] +=
639 msp
->ms_sm
->sm_phys
->smp_histogram
[i
];
641 mutex_exit(&mg
->mg_lock
);
645 metaslab_group_histogram_remove(metaslab_group_t
*mg
, metaslab_t
*msp
)
647 metaslab_class_t
*mc
= mg
->mg_class
;
648 uint64_t ashift
= mg
->mg_vd
->vdev_ashift
;
651 ASSERT(MUTEX_HELD(&msp
->ms_lock
));
652 if (msp
->ms_sm
== NULL
)
655 mutex_enter(&mg
->mg_lock
);
656 for (i
= 0; i
< SPACE_MAP_HISTOGRAM_SIZE
; i
++) {
657 ASSERT3U(mg
->mg_histogram
[i
+ ashift
], >=,
658 msp
->ms_sm
->sm_phys
->smp_histogram
[i
]);
659 ASSERT3U(mc
->mc_histogram
[i
+ ashift
], >=,
660 msp
->ms_sm
->sm_phys
->smp_histogram
[i
]);
662 mg
->mg_histogram
[i
+ ashift
] -=
663 msp
->ms_sm
->sm_phys
->smp_histogram
[i
];
664 mc
->mc_histogram
[i
+ ashift
] -=
665 msp
->ms_sm
->sm_phys
->smp_histogram
[i
];
667 mutex_exit(&mg
->mg_lock
);
671 metaslab_group_add(metaslab_group_t
*mg
, metaslab_t
*msp
)
673 ASSERT(msp
->ms_group
== NULL
);
674 mutex_enter(&mg
->mg_lock
);
677 avl_add(&mg
->mg_metaslab_tree
, msp
);
678 mutex_exit(&mg
->mg_lock
);
680 mutex_enter(&msp
->ms_lock
);
681 metaslab_group_histogram_add(mg
, msp
);
682 mutex_exit(&msp
->ms_lock
);
686 metaslab_group_remove(metaslab_group_t
*mg
, metaslab_t
*msp
)
688 mutex_enter(&msp
->ms_lock
);
689 metaslab_group_histogram_remove(mg
, msp
);
690 mutex_exit(&msp
->ms_lock
);
692 mutex_enter(&mg
->mg_lock
);
693 ASSERT(msp
->ms_group
== mg
);
694 avl_remove(&mg
->mg_metaslab_tree
, msp
);
695 msp
->ms_group
= NULL
;
696 mutex_exit(&mg
->mg_lock
);
700 metaslab_group_sort(metaslab_group_t
*mg
, metaslab_t
*msp
, uint64_t weight
)
703 * Although in principle the weight can be any value, in
704 * practice we do not use values in the range [1, 511].
706 ASSERT(weight
>= SPA_MINBLOCKSIZE
|| weight
== 0);
707 ASSERT(MUTEX_HELD(&msp
->ms_lock
));
709 mutex_enter(&mg
->mg_lock
);
710 ASSERT(msp
->ms_group
== mg
);
711 avl_remove(&mg
->mg_metaslab_tree
, msp
);
712 msp
->ms_weight
= weight
;
713 avl_add(&mg
->mg_metaslab_tree
, msp
);
714 mutex_exit(&mg
->mg_lock
);
718 * Calculate the fragmentation for a given metaslab group. We can use
719 * a simple average here since all metaslabs within the group must have
720 * the same size. The return value will be a value between 0 and 100
721 * (inclusive), or ZFS_FRAG_INVALID if less than half of the metaslab in this
722 * group have a fragmentation metric.
725 metaslab_group_fragmentation(metaslab_group_t
*mg
)
727 vdev_t
*vd
= mg
->mg_vd
;
728 uint64_t fragmentation
= 0;
729 uint64_t valid_ms
= 0;
732 for (m
= 0; m
< vd
->vdev_ms_count
; m
++) {
733 metaslab_t
*msp
= vd
->vdev_ms
[m
];
735 if (msp
->ms_fragmentation
== ZFS_FRAG_INVALID
)
739 fragmentation
+= msp
->ms_fragmentation
;
742 if (valid_ms
<= vd
->vdev_ms_count
/ 2)
743 return (ZFS_FRAG_INVALID
);
745 fragmentation
/= valid_ms
;
746 ASSERT3U(fragmentation
, <=, 100);
747 return (fragmentation
);
751 * Determine if a given metaslab group should skip allocations. A metaslab
752 * group should avoid allocations if its free capacity is less than the
753 * zfs_mg_noalloc_threshold or its fragmentation metric is greater than
754 * zfs_mg_fragmentation_threshold and there is at least one metaslab group
755 * that can still handle allocations.
758 metaslab_group_allocatable(metaslab_group_t
*mg
)
760 vdev_t
*vd
= mg
->mg_vd
;
761 spa_t
*spa
= vd
->vdev_spa
;
762 metaslab_class_t
*mc
= mg
->mg_class
;
765 * We use two key metrics to determine if a metaslab group is
766 * considered allocatable -- free space and fragmentation. If
767 * the free space is greater than the free space threshold and
768 * the fragmentation is less than the fragmentation threshold then
769 * consider the group allocatable. There are two case when we will
770 * not consider these key metrics. The first is if the group is
771 * associated with a slog device and the second is if all groups
772 * in this metaslab class have already been consider ineligible
775 return ((mg
->mg_free_capacity
> zfs_mg_noalloc_threshold
&&
776 (mg
->mg_fragmentation
== ZFS_FRAG_INVALID
||
777 mg
->mg_fragmentation
<= zfs_mg_fragmentation_threshold
)) ||
778 mc
!= spa_normal_class(spa
) || mc
->mc_alloc_groups
== 0);
782 * ==========================================================================
783 * Range tree callbacks
784 * ==========================================================================
788 * Comparison function for the private size-ordered tree. Tree is sorted
789 * by size, larger sizes at the end of the tree.
792 metaslab_rangesize_compare(const void *x1
, const void *x2
)
794 const range_seg_t
*r1
= x1
;
795 const range_seg_t
*r2
= x2
;
796 uint64_t rs_size1
= r1
->rs_end
- r1
->rs_start
;
797 uint64_t rs_size2
= r2
->rs_end
- r2
->rs_start
;
799 if (rs_size1
< rs_size2
)
801 if (rs_size1
> rs_size2
)
804 if (r1
->rs_start
< r2
->rs_start
)
807 if (r1
->rs_start
> r2
->rs_start
)
814 * Create any block allocator specific components. The current allocators
815 * rely on using both a size-ordered range_tree_t and an array of uint64_t's.
818 metaslab_rt_create(range_tree_t
*rt
, void *arg
)
820 metaslab_t
*msp
= arg
;
822 ASSERT3P(rt
->rt_arg
, ==, msp
);
823 ASSERT(msp
->ms_tree
== NULL
);
825 avl_create(&msp
->ms_size_tree
, metaslab_rangesize_compare
,
826 sizeof (range_seg_t
), offsetof(range_seg_t
, rs_pp_node
));
830 * Destroy the block allocator specific components.
833 metaslab_rt_destroy(range_tree_t
*rt
, void *arg
)
835 metaslab_t
*msp
= arg
;
837 ASSERT3P(rt
->rt_arg
, ==, msp
);
838 ASSERT3P(msp
->ms_tree
, ==, rt
);
839 ASSERT0(avl_numnodes(&msp
->ms_size_tree
));
841 avl_destroy(&msp
->ms_size_tree
);
845 metaslab_rt_add(range_tree_t
*rt
, range_seg_t
*rs
, void *arg
)
847 metaslab_t
*msp
= arg
;
849 ASSERT3P(rt
->rt_arg
, ==, msp
);
850 ASSERT3P(msp
->ms_tree
, ==, rt
);
851 VERIFY(!msp
->ms_condensing
);
852 avl_add(&msp
->ms_size_tree
, rs
);
856 metaslab_rt_remove(range_tree_t
*rt
, range_seg_t
*rs
, void *arg
)
858 metaslab_t
*msp
= arg
;
860 ASSERT3P(rt
->rt_arg
, ==, msp
);
861 ASSERT3P(msp
->ms_tree
, ==, rt
);
862 VERIFY(!msp
->ms_condensing
);
863 avl_remove(&msp
->ms_size_tree
, rs
);
867 metaslab_rt_vacate(range_tree_t
*rt
, void *arg
)
869 metaslab_t
*msp
= arg
;
871 ASSERT3P(rt
->rt_arg
, ==, msp
);
872 ASSERT3P(msp
->ms_tree
, ==, rt
);
875 * Normally one would walk the tree freeing nodes along the way.
876 * Since the nodes are shared with the range trees we can avoid
877 * walking all nodes and just reinitialize the avl tree. The nodes
878 * will be freed by the range tree, so we don't want to free them here.
880 avl_create(&msp
->ms_size_tree
, metaslab_rangesize_compare
,
881 sizeof (range_seg_t
), offsetof(range_seg_t
, rs_pp_node
));
884 static range_tree_ops_t metaslab_rt_ops
= {
893 * ==========================================================================
894 * Metaslab block operations
895 * ==========================================================================
899 * Return the maximum contiguous segment within the metaslab.
902 metaslab_block_maxsize(metaslab_t
*msp
)
904 avl_tree_t
*t
= &msp
->ms_size_tree
;
907 if (t
== NULL
|| (rs
= avl_last(t
)) == NULL
)
910 return (rs
->rs_end
- rs
->rs_start
);
914 metaslab_block_alloc(metaslab_t
*msp
, uint64_t size
)
917 range_tree_t
*rt
= msp
->ms_tree
;
919 VERIFY(!msp
->ms_condensing
);
921 start
= msp
->ms_ops
->msop_alloc(msp
, size
);
922 if (start
!= -1ULL) {
923 vdev_t
*vd
= msp
->ms_group
->mg_vd
;
925 VERIFY0(P2PHASE(start
, 1ULL << vd
->vdev_ashift
));
926 VERIFY0(P2PHASE(size
, 1ULL << vd
->vdev_ashift
));
927 VERIFY3U(range_tree_space(rt
) - size
, <=, msp
->ms_size
);
928 range_tree_remove(rt
, start
, size
);
934 * ==========================================================================
935 * Common allocator routines
936 * ==========================================================================
939 #if defined(WITH_FF_BLOCK_ALLOCATOR) || \
940 defined(WITH_DF_BLOCK_ALLOCATOR) || \
941 defined(WITH_CF_BLOCK_ALLOCATOR)
943 * This is a helper function that can be used by the allocator to find
944 * a suitable block to allocate. This will search the specified AVL
945 * tree looking for a block that matches the specified criteria.
948 metaslab_block_picker(avl_tree_t
*t
, uint64_t *cursor
, uint64_t size
,
951 range_seg_t
*rs
, rsearch
;
954 rsearch
.rs_start
= *cursor
;
955 rsearch
.rs_end
= *cursor
+ size
;
957 rs
= avl_find(t
, &rsearch
, &where
);
959 rs
= avl_nearest(t
, where
, AVL_AFTER
);
962 uint64_t offset
= P2ROUNDUP(rs
->rs_start
, align
);
964 if (offset
+ size
<= rs
->rs_end
) {
965 *cursor
= offset
+ size
;
968 rs
= AVL_NEXT(t
, rs
);
972 * If we know we've searched the whole map (*cursor == 0), give up.
973 * Otherwise, reset the cursor to the beginning and try again.
979 return (metaslab_block_picker(t
, cursor
, size
, align
));
981 #endif /* WITH_FF/DF/CF_BLOCK_ALLOCATOR */
983 #if defined(WITH_FF_BLOCK_ALLOCATOR)
985 * ==========================================================================
986 * The first-fit block allocator
987 * ==========================================================================
990 metaslab_ff_alloc(metaslab_t
*msp
, uint64_t size
)
993 * Find the largest power of 2 block size that evenly divides the
994 * requested size. This is used to try to allocate blocks with similar
995 * alignment from the same area of the metaslab (i.e. same cursor
996 * bucket) but it does not guarantee that other allocations sizes
997 * may exist in the same region.
999 uint64_t align
= size
& -size
;
1000 uint64_t *cursor
= &msp
->ms_lbas
[highbit64(align
) - 1];
1001 avl_tree_t
*t
= &msp
->ms_tree
->rt_root
;
1003 return (metaslab_block_picker(t
, cursor
, size
, align
));
1006 static metaslab_ops_t metaslab_ff_ops
= {
1010 metaslab_ops_t
*zfs_metaslab_ops
= &metaslab_ff_ops
;
1011 #endif /* WITH_FF_BLOCK_ALLOCATOR */
1013 #if defined(WITH_DF_BLOCK_ALLOCATOR)
1015 * ==========================================================================
1016 * Dynamic block allocator -
1017 * Uses the first fit allocation scheme until space get low and then
1018 * adjusts to a best fit allocation method. Uses metaslab_df_alloc_threshold
1019 * and metaslab_df_free_pct to determine when to switch the allocation scheme.
1020 * ==========================================================================
1023 metaslab_df_alloc(metaslab_t
*msp
, uint64_t size
)
1026 * Find the largest power of 2 block size that evenly divides the
1027 * requested size. This is used to try to allocate blocks with similar
1028 * alignment from the same area of the metaslab (i.e. same cursor
1029 * bucket) but it does not guarantee that other allocations sizes
1030 * may exist in the same region.
1032 uint64_t align
= size
& -size
;
1033 uint64_t *cursor
= &msp
->ms_lbas
[highbit64(align
) - 1];
1034 range_tree_t
*rt
= msp
->ms_tree
;
1035 avl_tree_t
*t
= &rt
->rt_root
;
1036 uint64_t max_size
= metaslab_block_maxsize(msp
);
1037 int free_pct
= range_tree_space(rt
) * 100 / msp
->ms_size
;
1039 ASSERT(MUTEX_HELD(&msp
->ms_lock
));
1040 ASSERT3U(avl_numnodes(t
), ==, avl_numnodes(&msp
->ms_size_tree
));
1042 if (max_size
< size
)
1046 * If we're running low on space switch to using the size
1047 * sorted AVL tree (best-fit).
1049 if (max_size
< metaslab_df_alloc_threshold
||
1050 free_pct
< metaslab_df_free_pct
) {
1051 t
= &msp
->ms_size_tree
;
1055 return (metaslab_block_picker(t
, cursor
, size
, 1ULL));
1058 static metaslab_ops_t metaslab_df_ops
= {
1062 metaslab_ops_t
*zfs_metaslab_ops
= &metaslab_df_ops
;
1063 #endif /* WITH_DF_BLOCK_ALLOCATOR */
1065 #if defined(WITH_CF_BLOCK_ALLOCATOR)
1067 * ==========================================================================
1068 * Cursor fit block allocator -
1069 * Select the largest region in the metaslab, set the cursor to the beginning
1070 * of the range and the cursor_end to the end of the range. As allocations
1071 * are made advance the cursor. Continue allocating from the cursor until
1072 * the range is exhausted and then find a new range.
1073 * ==========================================================================
1076 metaslab_cf_alloc(metaslab_t
*msp
, uint64_t size
)
1078 range_tree_t
*rt
= msp
->ms_tree
;
1079 avl_tree_t
*t
= &msp
->ms_size_tree
;
1080 uint64_t *cursor
= &msp
->ms_lbas
[0];
1081 uint64_t *cursor_end
= &msp
->ms_lbas
[1];
1082 uint64_t offset
= 0;
1084 ASSERT(MUTEX_HELD(&msp
->ms_lock
));
1085 ASSERT3U(avl_numnodes(t
), ==, avl_numnodes(&rt
->rt_root
));
1087 ASSERT3U(*cursor_end
, >=, *cursor
);
1089 if ((*cursor
+ size
) > *cursor_end
) {
1092 rs
= avl_last(&msp
->ms_size_tree
);
1093 if (rs
== NULL
|| (rs
->rs_end
- rs
->rs_start
) < size
)
1096 *cursor
= rs
->rs_start
;
1097 *cursor_end
= rs
->rs_end
;
1106 static metaslab_ops_t metaslab_cf_ops
= {
1110 metaslab_ops_t
*zfs_metaslab_ops
= &metaslab_cf_ops
;
1111 #endif /* WITH_CF_BLOCK_ALLOCATOR */
1113 #if defined(WITH_NDF_BLOCK_ALLOCATOR)
1115 * ==========================================================================
1116 * New dynamic fit allocator -
1117 * Select a region that is large enough to allocate 2^metaslab_ndf_clump_shift
1118 * contiguous blocks. If no region is found then just use the largest segment
1120 * ==========================================================================
1124 * Determines desired number of contiguous blocks (2^metaslab_ndf_clump_shift)
1125 * to request from the allocator.
1127 uint64_t metaslab_ndf_clump_shift
= 4;
1130 metaslab_ndf_alloc(metaslab_t
*msp
, uint64_t size
)
1132 avl_tree_t
*t
= &msp
->ms_tree
->rt_root
;
1134 range_seg_t
*rs
, rsearch
;
1135 uint64_t hbit
= highbit64(size
);
1136 uint64_t *cursor
= &msp
->ms_lbas
[hbit
- 1];
1137 uint64_t max_size
= metaslab_block_maxsize(msp
);
1139 ASSERT(MUTEX_HELD(&msp
->ms_lock
));
1140 ASSERT3U(avl_numnodes(t
), ==, avl_numnodes(&msp
->ms_size_tree
));
1142 if (max_size
< size
)
1145 rsearch
.rs_start
= *cursor
;
1146 rsearch
.rs_end
= *cursor
+ size
;
1148 rs
= avl_find(t
, &rsearch
, &where
);
1149 if (rs
== NULL
|| (rs
->rs_end
- rs
->rs_start
) < size
) {
1150 t
= &msp
->ms_size_tree
;
1152 rsearch
.rs_start
= 0;
1153 rsearch
.rs_end
= MIN(max_size
,
1154 1ULL << (hbit
+ metaslab_ndf_clump_shift
));
1155 rs
= avl_find(t
, &rsearch
, &where
);
1157 rs
= avl_nearest(t
, where
, AVL_AFTER
);
1161 if ((rs
->rs_end
- rs
->rs_start
) >= size
) {
1162 *cursor
= rs
->rs_start
+ size
;
1163 return (rs
->rs_start
);
1168 static metaslab_ops_t metaslab_ndf_ops
= {
1172 metaslab_ops_t
*zfs_metaslab_ops
= &metaslab_ndf_ops
;
1173 #endif /* WITH_NDF_BLOCK_ALLOCATOR */
1177 * ==========================================================================
1179 * ==========================================================================
1183 * Wait for any in-progress metaslab loads to complete.
1186 metaslab_load_wait(metaslab_t
*msp
)
1188 ASSERT(MUTEX_HELD(&msp
->ms_lock
));
1190 while (msp
->ms_loading
) {
1191 ASSERT(!msp
->ms_loaded
);
1192 cv_wait(&msp
->ms_load_cv
, &msp
->ms_lock
);
1197 metaslab_load(metaslab_t
*msp
)
1202 ASSERT(MUTEX_HELD(&msp
->ms_lock
));
1203 ASSERT(!msp
->ms_loaded
);
1204 ASSERT(!msp
->ms_loading
);
1206 msp
->ms_loading
= B_TRUE
;
1209 * If the space map has not been allocated yet, then treat
1210 * all the space in the metaslab as free and add it to the
1213 if (msp
->ms_sm
!= NULL
)
1214 error
= space_map_load(msp
->ms_sm
, msp
->ms_tree
, SM_FREE
);
1216 range_tree_add(msp
->ms_tree
, msp
->ms_start
, msp
->ms_size
);
1218 msp
->ms_loaded
= (error
== 0);
1219 msp
->ms_loading
= B_FALSE
;
1221 if (msp
->ms_loaded
) {
1222 for (t
= 0; t
< TXG_DEFER_SIZE
; t
++) {
1223 range_tree_walk(msp
->ms_defertree
[t
],
1224 range_tree_remove
, msp
->ms_tree
);
1227 cv_broadcast(&msp
->ms_load_cv
);
1232 metaslab_unload(metaslab_t
*msp
)
1234 ASSERT(MUTEX_HELD(&msp
->ms_lock
));
1235 range_tree_vacate(msp
->ms_tree
, NULL
, NULL
);
1236 msp
->ms_loaded
= B_FALSE
;
1237 msp
->ms_weight
&= ~METASLAB_ACTIVE_MASK
;
1241 metaslab_init(metaslab_group_t
*mg
, uint64_t id
, uint64_t object
, uint64_t txg
,
1244 vdev_t
*vd
= mg
->mg_vd
;
1245 objset_t
*mos
= vd
->vdev_spa
->spa_meta_objset
;
1249 ms
= kmem_zalloc(sizeof (metaslab_t
), KM_SLEEP
);
1250 mutex_init(&ms
->ms_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
1251 cv_init(&ms
->ms_load_cv
, NULL
, CV_DEFAULT
, NULL
);
1253 ms
->ms_start
= id
<< vd
->vdev_ms_shift
;
1254 ms
->ms_size
= 1ULL << vd
->vdev_ms_shift
;
1257 * We only open space map objects that already exist. All others
1258 * will be opened when we finally allocate an object for it.
1261 error
= space_map_open(&ms
->ms_sm
, mos
, object
, ms
->ms_start
,
1262 ms
->ms_size
, vd
->vdev_ashift
, &ms
->ms_lock
);
1265 kmem_free(ms
, sizeof (metaslab_t
));
1269 ASSERT(ms
->ms_sm
!= NULL
);
1273 * We create the main range tree here, but we don't create the
1274 * alloctree and freetree until metaslab_sync_done(). This serves
1275 * two purposes: it allows metaslab_sync_done() to detect the
1276 * addition of new space; and for debugging, it ensures that we'd
1277 * data fault on any attempt to use this metaslab before it's ready.
1279 ms
->ms_tree
= range_tree_create(&metaslab_rt_ops
, ms
, &ms
->ms_lock
);
1280 metaslab_group_add(mg
, ms
);
1282 ms
->ms_fragmentation
= metaslab_fragmentation(ms
);
1283 ms
->ms_ops
= mg
->mg_class
->mc_ops
;
1286 * If we're opening an existing pool (txg == 0) or creating
1287 * a new one (txg == TXG_INITIAL), all space is available now.
1288 * If we're adding space to an existing pool, the new space
1289 * does not become available until after this txg has synced.
1291 if (txg
<= TXG_INITIAL
)
1292 metaslab_sync_done(ms
, 0);
1295 * If metaslab_debug_load is set and we're initializing a metaslab
1296 * that has an allocated space_map object then load the its space
1297 * map so that can verify frees.
1299 if (metaslab_debug_load
&& ms
->ms_sm
!= NULL
) {
1300 mutex_enter(&ms
->ms_lock
);
1301 VERIFY0(metaslab_load(ms
));
1302 mutex_exit(&ms
->ms_lock
);
1306 vdev_dirty(vd
, 0, NULL
, txg
);
1307 vdev_dirty(vd
, VDD_METASLAB
, ms
, txg
);
1316 metaslab_fini(metaslab_t
*msp
)
1320 metaslab_group_t
*mg
= msp
->ms_group
;
1322 metaslab_group_remove(mg
, msp
);
1324 mutex_enter(&msp
->ms_lock
);
1326 VERIFY(msp
->ms_group
== NULL
);
1327 vdev_space_update(mg
->mg_vd
, -space_map_allocated(msp
->ms_sm
),
1329 space_map_close(msp
->ms_sm
);
1331 metaslab_unload(msp
);
1332 range_tree_destroy(msp
->ms_tree
);
1334 for (t
= 0; t
< TXG_SIZE
; t
++) {
1335 range_tree_destroy(msp
->ms_alloctree
[t
]);
1336 range_tree_destroy(msp
->ms_freetree
[t
]);
1339 for (t
= 0; t
< TXG_DEFER_SIZE
; t
++) {
1340 range_tree_destroy(msp
->ms_defertree
[t
]);
1343 ASSERT0(msp
->ms_deferspace
);
1345 mutex_exit(&msp
->ms_lock
);
1346 cv_destroy(&msp
->ms_load_cv
);
1347 mutex_destroy(&msp
->ms_lock
);
1349 kmem_free(msp
, sizeof (metaslab_t
));
1352 #define FRAGMENTATION_TABLE_SIZE 17
1355 * This table defines a segment size based fragmentation metric that will
1356 * allow each metaslab to derive its own fragmentation value. This is done
1357 * by calculating the space in each bucket of the spacemap histogram and
1358 * multiplying that by the fragmetation metric in this table. Doing
1359 * this for all buckets and dividing it by the total amount of free
1360 * space in this metaslab (i.e. the total free space in all buckets) gives
1361 * us the fragmentation metric. This means that a high fragmentation metric
1362 * equates to most of the free space being comprised of small segments.
1363 * Conversely, if the metric is low, then most of the free space is in
1364 * large segments. A 10% change in fragmentation equates to approximately
1365 * double the number of segments.
1367 * This table defines 0% fragmented space using 16MB segments. Testing has
1368 * shown that segments that are greater than or equal to 16MB do not suffer
1369 * from drastic performance problems. Using this value, we derive the rest
1370 * of the table. Since the fragmentation value is never stored on disk, it
1371 * is possible to change these calculations in the future.
1373 int zfs_frag_table
[FRAGMENTATION_TABLE_SIZE
] = {
1393 * Calclate the metaslab's fragmentation metric. A return value
1394 * of ZFS_FRAG_INVALID means that the metaslab has not been upgraded and does
1395 * not support this metric. Otherwise, the return value should be in the
1399 metaslab_fragmentation(metaslab_t
*msp
)
1401 spa_t
*spa
= msp
->ms_group
->mg_vd
->vdev_spa
;
1402 uint64_t fragmentation
= 0;
1404 boolean_t feature_enabled
= spa_feature_is_enabled(spa
,
1405 SPA_FEATURE_SPACEMAP_HISTOGRAM
);
1408 if (!feature_enabled
)
1409 return (ZFS_FRAG_INVALID
);
1412 * A null space map means that the entire metaslab is free
1413 * and thus is not fragmented.
1415 if (msp
->ms_sm
== NULL
)
1419 * If this metaslab's space_map has not been upgraded, flag it
1420 * so that we upgrade next time we encounter it.
1422 if (msp
->ms_sm
->sm_dbuf
->db_size
!= sizeof (space_map_phys_t
)) {
1423 vdev_t
*vd
= msp
->ms_group
->mg_vd
;
1425 if (spa_writeable(vd
->vdev_spa
)) {
1426 uint64_t txg
= spa_syncing_txg(spa
);
1428 msp
->ms_condense_wanted
= B_TRUE
;
1429 vdev_dirty(vd
, VDD_METASLAB
, msp
, txg
+ 1);
1430 spa_dbgmsg(spa
, "txg %llu, requesting force condense: "
1431 "msp %p, vd %p", txg
, msp
, vd
);
1433 return (ZFS_FRAG_INVALID
);
1436 for (i
= 0; i
< SPACE_MAP_HISTOGRAM_SIZE
; i
++) {
1438 uint8_t shift
= msp
->ms_sm
->sm_shift
;
1439 int idx
= MIN(shift
- SPA_MINBLOCKSHIFT
+ i
,
1440 FRAGMENTATION_TABLE_SIZE
- 1);
1442 if (msp
->ms_sm
->sm_phys
->smp_histogram
[i
] == 0)
1445 space
= msp
->ms_sm
->sm_phys
->smp_histogram
[i
] << (i
+ shift
);
1448 ASSERT3U(idx
, <, FRAGMENTATION_TABLE_SIZE
);
1449 fragmentation
+= space
* zfs_frag_table
[idx
];
1453 fragmentation
/= total
;
1454 ASSERT3U(fragmentation
, <=, 100);
1455 return (fragmentation
);
1459 * Compute a weight -- a selection preference value -- for the given metaslab.
1460 * This is based on the amount of free space, the level of fragmentation,
1461 * the LBA range, and whether the metaslab is loaded.
1464 metaslab_weight(metaslab_t
*msp
)
1466 metaslab_group_t
*mg
= msp
->ms_group
;
1467 vdev_t
*vd
= mg
->mg_vd
;
1468 uint64_t weight
, space
;
1470 ASSERT(MUTEX_HELD(&msp
->ms_lock
));
1473 * This vdev is in the process of being removed so there is nothing
1474 * for us to do here.
1476 if (vd
->vdev_removing
) {
1477 ASSERT0(space_map_allocated(msp
->ms_sm
));
1478 ASSERT0(vd
->vdev_ms_shift
);
1483 * The baseline weight is the metaslab's free space.
1485 space
= msp
->ms_size
- space_map_allocated(msp
->ms_sm
);
1487 msp
->ms_fragmentation
= metaslab_fragmentation(msp
);
1488 if (metaslab_fragmentation_factor_enabled
&&
1489 msp
->ms_fragmentation
!= ZFS_FRAG_INVALID
) {
1491 * Use the fragmentation information to inversely scale
1492 * down the baseline weight. We need to ensure that we
1493 * don't exclude this metaslab completely when it's 100%
1494 * fragmented. To avoid this we reduce the fragmented value
1497 space
= (space
* (100 - (msp
->ms_fragmentation
- 1))) / 100;
1500 * If space < SPA_MINBLOCKSIZE, then we will not allocate from
1501 * this metaslab again. The fragmentation metric may have
1502 * decreased the space to something smaller than
1503 * SPA_MINBLOCKSIZE, so reset the space to SPA_MINBLOCKSIZE
1504 * so that we can consume any remaining space.
1506 if (space
> 0 && space
< SPA_MINBLOCKSIZE
)
1507 space
= SPA_MINBLOCKSIZE
;
1512 * Modern disks have uniform bit density and constant angular velocity.
1513 * Therefore, the outer recording zones are faster (higher bandwidth)
1514 * than the inner zones by the ratio of outer to inner track diameter,
1515 * which is typically around 2:1. We account for this by assigning
1516 * higher weight to lower metaslabs (multiplier ranging from 2x to 1x).
1517 * In effect, this means that we'll select the metaslab with the most
1518 * free bandwidth rather than simply the one with the most free space.
1520 if (metaslab_lba_weighting_enabled
) {
1521 weight
= 2 * weight
- (msp
->ms_id
* weight
) / vd
->vdev_ms_count
;
1522 ASSERT(weight
>= space
&& weight
<= 2 * space
);
1526 * If this metaslab is one we're actively using, adjust its
1527 * weight to make it preferable to any inactive metaslab so
1528 * we'll polish it off. If the fragmentation on this metaslab
1529 * has exceed our threshold, then don't mark it active.
1531 if (msp
->ms_loaded
&& msp
->ms_fragmentation
!= ZFS_FRAG_INVALID
&&
1532 msp
->ms_fragmentation
<= zfs_metaslab_fragmentation_threshold
) {
1533 weight
|= (msp
->ms_weight
& METASLAB_ACTIVE_MASK
);
1540 metaslab_activate(metaslab_t
*msp
, uint64_t activation_weight
)
1542 ASSERT(MUTEX_HELD(&msp
->ms_lock
));
1544 if ((msp
->ms_weight
& METASLAB_ACTIVE_MASK
) == 0) {
1545 metaslab_load_wait(msp
);
1546 if (!msp
->ms_loaded
) {
1547 int error
= metaslab_load(msp
);
1549 metaslab_group_sort(msp
->ms_group
, msp
, 0);
1554 metaslab_group_sort(msp
->ms_group
, msp
,
1555 msp
->ms_weight
| activation_weight
);
1557 ASSERT(msp
->ms_loaded
);
1558 ASSERT(msp
->ms_weight
& METASLAB_ACTIVE_MASK
);
1564 metaslab_passivate(metaslab_t
*msp
, uint64_t size
)
1567 * If size < SPA_MINBLOCKSIZE, then we will not allocate from
1568 * this metaslab again. In that case, it had better be empty,
1569 * or we would be leaving space on the table.
1571 ASSERT(size
>= SPA_MINBLOCKSIZE
|| range_tree_space(msp
->ms_tree
) == 0);
1572 metaslab_group_sort(msp
->ms_group
, msp
, MIN(msp
->ms_weight
, size
));
1573 ASSERT((msp
->ms_weight
& METASLAB_ACTIVE_MASK
) == 0);
1577 metaslab_preload(void *arg
)
1579 metaslab_t
*msp
= arg
;
1580 spa_t
*spa
= msp
->ms_group
->mg_vd
->vdev_spa
;
1582 ASSERT(!MUTEX_HELD(&msp
->ms_group
->mg_lock
));
1584 mutex_enter(&msp
->ms_lock
);
1585 metaslab_load_wait(msp
);
1586 if (!msp
->ms_loaded
)
1587 (void) metaslab_load(msp
);
1590 * Set the ms_access_txg value so that we don't unload it right away.
1592 msp
->ms_access_txg
= spa_syncing_txg(spa
) + metaslab_unload_delay
+ 1;
1593 mutex_exit(&msp
->ms_lock
);
1597 metaslab_group_preload(metaslab_group_t
*mg
)
1599 spa_t
*spa
= mg
->mg_vd
->vdev_spa
;
1601 avl_tree_t
*t
= &mg
->mg_metaslab_tree
;
1604 if (spa_shutting_down(spa
) || !metaslab_preload_enabled
) {
1605 taskq_wait(mg
->mg_taskq
);
1609 mutex_enter(&mg
->mg_lock
);
1611 * Load the next potential metaslabs
1614 while (msp
!= NULL
) {
1615 metaslab_t
*msp_next
= AVL_NEXT(t
, msp
);
1618 * We preload only the maximum number of metaslabs specified
1619 * by metaslab_preload_limit. If a metaslab is being forced
1620 * to condense then we preload it too. This will ensure
1621 * that force condensing happens in the next txg.
1623 if (++m
> metaslab_preload_limit
&& !msp
->ms_condense_wanted
) {
1629 * We must drop the metaslab group lock here to preserve
1630 * lock ordering with the ms_lock (when grabbing both
1631 * the mg_lock and the ms_lock, the ms_lock must be taken
1632 * first). As a result, it is possible that the ordering
1633 * of the metaslabs within the avl tree may change before
1634 * we reacquire the lock. The metaslab cannot be removed from
1635 * the tree while we're in syncing context so it is safe to
1636 * drop the mg_lock here. If the metaslabs are reordered
1637 * nothing will break -- we just may end up loading a
1638 * less than optimal one.
1640 mutex_exit(&mg
->mg_lock
);
1641 VERIFY(taskq_dispatch(mg
->mg_taskq
, metaslab_preload
,
1642 msp
, TQ_SLEEP
) != 0);
1643 mutex_enter(&mg
->mg_lock
);
1646 mutex_exit(&mg
->mg_lock
);
1650 * Determine if the space map's on-disk footprint is past our tolerance
1651 * for inefficiency. We would like to use the following criteria to make
1654 * 1. The size of the space map object should not dramatically increase as a
1655 * result of writing out the free space range tree.
1657 * 2. The minimal on-disk space map representation is zfs_condense_pct/100
1658 * times the size than the free space range tree representation
1659 * (i.e. zfs_condense_pct = 110 and in-core = 1MB, minimal = 1.1.MB).
1661 * 3. The on-disk size of the space map should actually decrease.
1663 * Checking the first condition is tricky since we don't want to walk
1664 * the entire AVL tree calculating the estimated on-disk size. Instead we
1665 * use the size-ordered range tree in the metaslab and calculate the
1666 * size required to write out the largest segment in our free tree. If the
1667 * size required to represent that segment on disk is larger than the space
1668 * map object then we avoid condensing this map.
1670 * To determine the second criterion we use a best-case estimate and assume
1671 * each segment can be represented on-disk as a single 64-bit entry. We refer
1672 * to this best-case estimate as the space map's minimal form.
1674 * Unfortunately, we cannot compute the on-disk size of the space map in this
1675 * context because we cannot accurately compute the effects of compression, etc.
1676 * Instead, we apply the heuristic described in the block comment for
1677 * zfs_metaslab_condense_block_threshold - we only condense if the space used
1678 * is greater than a threshold number of blocks.
1681 metaslab_should_condense(metaslab_t
*msp
)
1683 space_map_t
*sm
= msp
->ms_sm
;
1685 uint64_t size
, entries
, segsz
, object_size
, optimal_size
, record_size
;
1686 dmu_object_info_t doi
;
1687 uint64_t vdev_blocksize
= 1 << msp
->ms_group
->mg_vd
->vdev_ashift
;
1689 ASSERT(MUTEX_HELD(&msp
->ms_lock
));
1690 ASSERT(msp
->ms_loaded
);
1693 * Use the ms_size_tree range tree, which is ordered by size, to
1694 * obtain the largest segment in the free tree. We always condense
1695 * metaslabs that are empty and metaslabs for which a condense
1696 * request has been made.
1698 rs
= avl_last(&msp
->ms_size_tree
);
1699 if (rs
== NULL
|| msp
->ms_condense_wanted
)
1703 * Calculate the number of 64-bit entries this segment would
1704 * require when written to disk. If this single segment would be
1705 * larger on-disk than the entire current on-disk structure, then
1706 * clearly condensing will increase the on-disk structure size.
1708 size
= (rs
->rs_end
- rs
->rs_start
) >> sm
->sm_shift
;
1709 entries
= size
/ (MIN(size
, SM_RUN_MAX
));
1710 segsz
= entries
* sizeof (uint64_t);
1712 optimal_size
= sizeof (uint64_t) * avl_numnodes(&msp
->ms_tree
->rt_root
);
1713 object_size
= space_map_length(msp
->ms_sm
);
1715 dmu_object_info_from_db(sm
->sm_dbuf
, &doi
);
1716 record_size
= MAX(doi
.doi_data_block_size
, vdev_blocksize
);
1718 return (segsz
<= object_size
&&
1719 object_size
>= (optimal_size
* zfs_condense_pct
/ 100) &&
1720 object_size
> zfs_metaslab_condense_block_threshold
* record_size
);
1724 * Condense the on-disk space map representation to its minimized form.
1725 * The minimized form consists of a small number of allocations followed by
1726 * the entries of the free range tree.
1729 metaslab_condense(metaslab_t
*msp
, uint64_t txg
, dmu_tx_t
*tx
)
1731 spa_t
*spa
= msp
->ms_group
->mg_vd
->vdev_spa
;
1732 range_tree_t
*freetree
= msp
->ms_freetree
[txg
& TXG_MASK
];
1733 range_tree_t
*condense_tree
;
1734 space_map_t
*sm
= msp
->ms_sm
;
1737 ASSERT(MUTEX_HELD(&msp
->ms_lock
));
1738 ASSERT3U(spa_sync_pass(spa
), ==, 1);
1739 ASSERT(msp
->ms_loaded
);
1742 spa_dbgmsg(spa
, "condensing: txg %llu, msp[%llu] %p, "
1743 "smp size %llu, segments %lu, forcing condense=%s", txg
,
1744 msp
->ms_id
, msp
, space_map_length(msp
->ms_sm
),
1745 avl_numnodes(&msp
->ms_tree
->rt_root
),
1746 msp
->ms_condense_wanted
? "TRUE" : "FALSE");
1748 msp
->ms_condense_wanted
= B_FALSE
;
1751 * Create an range tree that is 100% allocated. We remove segments
1752 * that have been freed in this txg, any deferred frees that exist,
1753 * and any allocation in the future. Removing segments should be
1754 * a relatively inexpensive operation since we expect these trees to
1755 * have a small number of nodes.
1757 condense_tree
= range_tree_create(NULL
, NULL
, &msp
->ms_lock
);
1758 range_tree_add(condense_tree
, msp
->ms_start
, msp
->ms_size
);
1761 * Remove what's been freed in this txg from the condense_tree.
1762 * Since we're in sync_pass 1, we know that all the frees from
1763 * this txg are in the freetree.
1765 range_tree_walk(freetree
, range_tree_remove
, condense_tree
);
1767 for (t
= 0; t
< TXG_DEFER_SIZE
; t
++) {
1768 range_tree_walk(msp
->ms_defertree
[t
],
1769 range_tree_remove
, condense_tree
);
1772 for (t
= 1; t
< TXG_CONCURRENT_STATES
; t
++) {
1773 range_tree_walk(msp
->ms_alloctree
[(txg
+ t
) & TXG_MASK
],
1774 range_tree_remove
, condense_tree
);
1778 * We're about to drop the metaslab's lock thus allowing
1779 * other consumers to change it's content. Set the
1780 * metaslab's ms_condensing flag to ensure that
1781 * allocations on this metaslab do not occur while we're
1782 * in the middle of committing it to disk. This is only critical
1783 * for the ms_tree as all other range trees use per txg
1784 * views of their content.
1786 msp
->ms_condensing
= B_TRUE
;
1788 mutex_exit(&msp
->ms_lock
);
1789 space_map_truncate(sm
, tx
);
1790 mutex_enter(&msp
->ms_lock
);
1793 * While we would ideally like to create a space_map representation
1794 * that consists only of allocation records, doing so can be
1795 * prohibitively expensive because the in-core free tree can be
1796 * large, and therefore computationally expensive to subtract
1797 * from the condense_tree. Instead we sync out two trees, a cheap
1798 * allocation only tree followed by the in-core free tree. While not
1799 * optimal, this is typically close to optimal, and much cheaper to
1802 space_map_write(sm
, condense_tree
, SM_ALLOC
, tx
);
1803 range_tree_vacate(condense_tree
, NULL
, NULL
);
1804 range_tree_destroy(condense_tree
);
1806 space_map_write(sm
, msp
->ms_tree
, SM_FREE
, tx
);
1807 msp
->ms_condensing
= B_FALSE
;
1811 * Write a metaslab to disk in the context of the specified transaction group.
1814 metaslab_sync(metaslab_t
*msp
, uint64_t txg
)
1816 metaslab_group_t
*mg
= msp
->ms_group
;
1817 vdev_t
*vd
= mg
->mg_vd
;
1818 spa_t
*spa
= vd
->vdev_spa
;
1819 objset_t
*mos
= spa_meta_objset(spa
);
1820 range_tree_t
*alloctree
= msp
->ms_alloctree
[txg
& TXG_MASK
];
1821 range_tree_t
**freetree
= &msp
->ms_freetree
[txg
& TXG_MASK
];
1822 range_tree_t
**freed_tree
=
1823 &msp
->ms_freetree
[TXG_CLEAN(txg
) & TXG_MASK
];
1825 uint64_t object
= space_map_object(msp
->ms_sm
);
1827 ASSERT(!vd
->vdev_ishole
);
1830 * This metaslab has just been added so there's no work to do now.
1832 if (*freetree
== NULL
) {
1833 ASSERT3P(alloctree
, ==, NULL
);
1837 ASSERT3P(alloctree
, !=, NULL
);
1838 ASSERT3P(*freetree
, !=, NULL
);
1839 ASSERT3P(*freed_tree
, !=, NULL
);
1842 * Normally, we don't want to process a metaslab if there
1843 * are no allocations or frees to perform. However, if the metaslab
1844 * is being forced to condense we need to let it through.
1846 if (range_tree_space(alloctree
) == 0 &&
1847 range_tree_space(*freetree
) == 0 &&
1848 !msp
->ms_condense_wanted
)
1852 * The only state that can actually be changing concurrently with
1853 * metaslab_sync() is the metaslab's ms_tree. No other thread can
1854 * be modifying this txg's alloctree, freetree, freed_tree, or
1855 * space_map_phys_t. Therefore, we only hold ms_lock to satify
1856 * space_map ASSERTs. We drop it whenever we call into the DMU,
1857 * because the DMU can call down to us (e.g. via zio_free()) at
1861 tx
= dmu_tx_create_assigned(spa_get_dsl(spa
), txg
);
1863 if (msp
->ms_sm
== NULL
) {
1864 uint64_t new_object
;
1866 new_object
= space_map_alloc(mos
, tx
);
1867 VERIFY3U(new_object
, !=, 0);
1869 VERIFY0(space_map_open(&msp
->ms_sm
, mos
, new_object
,
1870 msp
->ms_start
, msp
->ms_size
, vd
->vdev_ashift
,
1872 ASSERT(msp
->ms_sm
!= NULL
);
1875 mutex_enter(&msp
->ms_lock
);
1878 * Note: metaslab_condense() clears the space_map's histogram.
1879 * Therefore we muse verify and remove this histogram before
1882 metaslab_group_histogram_verify(mg
);
1883 metaslab_class_histogram_verify(mg
->mg_class
);
1884 metaslab_group_histogram_remove(mg
, msp
);
1886 if (msp
->ms_loaded
&& spa_sync_pass(spa
) == 1 &&
1887 metaslab_should_condense(msp
)) {
1888 metaslab_condense(msp
, txg
, tx
);
1890 space_map_write(msp
->ms_sm
, alloctree
, SM_ALLOC
, tx
);
1891 space_map_write(msp
->ms_sm
, *freetree
, SM_FREE
, tx
);
1894 if (msp
->ms_loaded
) {
1896 * When the space map is loaded, we have an accruate
1897 * histogram in the range tree. This gives us an opportunity
1898 * to bring the space map's histogram up-to-date so we clear
1899 * it first before updating it.
1901 space_map_histogram_clear(msp
->ms_sm
);
1902 space_map_histogram_add(msp
->ms_sm
, msp
->ms_tree
, tx
);
1905 * Since the space map is not loaded we simply update the
1906 * exisiting histogram with what was freed in this txg. This
1907 * means that the on-disk histogram may not have an accurate
1908 * view of the free space but it's close enough to allow
1909 * us to make allocation decisions.
1911 space_map_histogram_add(msp
->ms_sm
, *freetree
, tx
);
1913 metaslab_group_histogram_add(mg
, msp
);
1914 metaslab_group_histogram_verify(mg
);
1915 metaslab_class_histogram_verify(mg
->mg_class
);
1918 * For sync pass 1, we avoid traversing this txg's free range tree
1919 * and instead will just swap the pointers for freetree and
1920 * freed_tree. We can safely do this since the freed_tree is
1921 * guaranteed to be empty on the initial pass.
1923 if (spa_sync_pass(spa
) == 1) {
1924 range_tree_swap(freetree
, freed_tree
);
1926 range_tree_vacate(*freetree
, range_tree_add
, *freed_tree
);
1928 range_tree_vacate(alloctree
, NULL
, NULL
);
1930 ASSERT0(range_tree_space(msp
->ms_alloctree
[txg
& TXG_MASK
]));
1931 ASSERT0(range_tree_space(msp
->ms_freetree
[txg
& TXG_MASK
]));
1933 mutex_exit(&msp
->ms_lock
);
1935 if (object
!= space_map_object(msp
->ms_sm
)) {
1936 object
= space_map_object(msp
->ms_sm
);
1937 dmu_write(mos
, vd
->vdev_ms_array
, sizeof (uint64_t) *
1938 msp
->ms_id
, sizeof (uint64_t), &object
, tx
);
1944 * Called after a transaction group has completely synced to mark
1945 * all of the metaslab's free space as usable.
1948 metaslab_sync_done(metaslab_t
*msp
, uint64_t txg
)
1950 metaslab_group_t
*mg
= msp
->ms_group
;
1951 vdev_t
*vd
= mg
->mg_vd
;
1952 range_tree_t
**freed_tree
;
1953 range_tree_t
**defer_tree
;
1954 int64_t alloc_delta
, defer_delta
;
1957 ASSERT(!vd
->vdev_ishole
);
1959 mutex_enter(&msp
->ms_lock
);
1962 * If this metaslab is just becoming available, initialize its
1963 * alloctrees, freetrees, and defertree and add its capacity to
1966 if (msp
->ms_freetree
[TXG_CLEAN(txg
) & TXG_MASK
] == NULL
) {
1967 for (t
= 0; t
< TXG_SIZE
; t
++) {
1968 ASSERT(msp
->ms_alloctree
[t
] == NULL
);
1969 ASSERT(msp
->ms_freetree
[t
] == NULL
);
1971 msp
->ms_alloctree
[t
] = range_tree_create(NULL
, msp
,
1973 msp
->ms_freetree
[t
] = range_tree_create(NULL
, msp
,
1977 for (t
= 0; t
< TXG_DEFER_SIZE
; t
++) {
1978 ASSERT(msp
->ms_defertree
[t
] == NULL
);
1980 msp
->ms_defertree
[t
] = range_tree_create(NULL
, msp
,
1984 vdev_space_update(vd
, 0, 0, msp
->ms_size
);
1987 freed_tree
= &msp
->ms_freetree
[TXG_CLEAN(txg
) & TXG_MASK
];
1988 defer_tree
= &msp
->ms_defertree
[txg
% TXG_DEFER_SIZE
];
1990 alloc_delta
= space_map_alloc_delta(msp
->ms_sm
);
1991 defer_delta
= range_tree_space(*freed_tree
) -
1992 range_tree_space(*defer_tree
);
1994 vdev_space_update(vd
, alloc_delta
+ defer_delta
, defer_delta
, 0);
1996 ASSERT0(range_tree_space(msp
->ms_alloctree
[txg
& TXG_MASK
]));
1997 ASSERT0(range_tree_space(msp
->ms_freetree
[txg
& TXG_MASK
]));
2000 * If there's a metaslab_load() in progress, wait for it to complete
2001 * so that we have a consistent view of the in-core space map.
2003 metaslab_load_wait(msp
);
2006 * Move the frees from the defer_tree back to the free
2007 * range tree (if it's loaded). Swap the freed_tree and the
2008 * defer_tree -- this is safe to do because we've just emptied out
2011 range_tree_vacate(*defer_tree
,
2012 msp
->ms_loaded
? range_tree_add
: NULL
, msp
->ms_tree
);
2013 range_tree_swap(freed_tree
, defer_tree
);
2015 space_map_update(msp
->ms_sm
);
2017 msp
->ms_deferspace
+= defer_delta
;
2018 ASSERT3S(msp
->ms_deferspace
, >=, 0);
2019 ASSERT3S(msp
->ms_deferspace
, <=, msp
->ms_size
);
2020 if (msp
->ms_deferspace
!= 0) {
2022 * Keep syncing this metaslab until all deferred frees
2023 * are back in circulation.
2025 vdev_dirty(vd
, VDD_METASLAB
, msp
, txg
+ 1);
2028 if (msp
->ms_loaded
&& msp
->ms_access_txg
< txg
) {
2029 for (t
= 1; t
< TXG_CONCURRENT_STATES
; t
++) {
2030 VERIFY0(range_tree_space(
2031 msp
->ms_alloctree
[(txg
+ t
) & TXG_MASK
]));
2034 if (!metaslab_debug_unload
)
2035 metaslab_unload(msp
);
2038 metaslab_group_sort(mg
, msp
, metaslab_weight(msp
));
2039 mutex_exit(&msp
->ms_lock
);
2043 metaslab_sync_reassess(metaslab_group_t
*mg
)
2045 metaslab_group_alloc_update(mg
);
2046 mg
->mg_fragmentation
= metaslab_group_fragmentation(mg
);
2049 * Preload the next potential metaslabs
2051 metaslab_group_preload(mg
);
2055 metaslab_distance(metaslab_t
*msp
, dva_t
*dva
)
2057 uint64_t ms_shift
= msp
->ms_group
->mg_vd
->vdev_ms_shift
;
2058 uint64_t offset
= DVA_GET_OFFSET(dva
) >> ms_shift
;
2059 uint64_t start
= msp
->ms_id
;
2061 if (msp
->ms_group
->mg_vd
->vdev_id
!= DVA_GET_VDEV(dva
))
2062 return (1ULL << 63);
2065 return ((start
- offset
) << ms_shift
);
2067 return ((offset
- start
) << ms_shift
);
2072 metaslab_group_alloc(metaslab_group_t
*mg
, uint64_t psize
, uint64_t asize
,
2073 uint64_t txg
, uint64_t min_distance
, dva_t
*dva
, int d
)
2075 spa_t
*spa
= mg
->mg_vd
->vdev_spa
;
2076 metaslab_t
*msp
= NULL
;
2077 uint64_t offset
= -1ULL;
2078 avl_tree_t
*t
= &mg
->mg_metaslab_tree
;
2079 uint64_t activation_weight
;
2080 uint64_t target_distance
;
2083 activation_weight
= METASLAB_WEIGHT_PRIMARY
;
2084 for (i
= 0; i
< d
; i
++) {
2085 if (DVA_GET_VDEV(&dva
[i
]) == mg
->mg_vd
->vdev_id
) {
2086 activation_weight
= METASLAB_WEIGHT_SECONDARY
;
2092 boolean_t was_active
;
2094 mutex_enter(&mg
->mg_lock
);
2095 for (msp
= avl_first(t
); msp
; msp
= AVL_NEXT(t
, msp
)) {
2096 if (msp
->ms_weight
< asize
) {
2097 spa_dbgmsg(spa
, "%s: failed to meet weight "
2098 "requirement: vdev %llu, txg %llu, mg %p, "
2099 "msp %p, psize %llu, asize %llu, "
2100 "weight %llu", spa_name(spa
),
2101 mg
->mg_vd
->vdev_id
, txg
,
2102 mg
, msp
, psize
, asize
, msp
->ms_weight
);
2103 mutex_exit(&mg
->mg_lock
);
2108 * If the selected metaslab is condensing, skip it.
2110 if (msp
->ms_condensing
)
2113 was_active
= msp
->ms_weight
& METASLAB_ACTIVE_MASK
;
2114 if (activation_weight
== METASLAB_WEIGHT_PRIMARY
)
2117 target_distance
= min_distance
+
2118 (space_map_allocated(msp
->ms_sm
) != 0 ? 0 :
2121 for (i
= 0; i
< d
; i
++)
2122 if (metaslab_distance(msp
, &dva
[i
]) <
2128 mutex_exit(&mg
->mg_lock
);
2132 mutex_enter(&msp
->ms_lock
);
2135 * Ensure that the metaslab we have selected is still
2136 * capable of handling our request. It's possible that
2137 * another thread may have changed the weight while we
2138 * were blocked on the metaslab lock.
2140 if (msp
->ms_weight
< asize
|| (was_active
&&
2141 !(msp
->ms_weight
& METASLAB_ACTIVE_MASK
) &&
2142 activation_weight
== METASLAB_WEIGHT_PRIMARY
)) {
2143 mutex_exit(&msp
->ms_lock
);
2147 if ((msp
->ms_weight
& METASLAB_WEIGHT_SECONDARY
) &&
2148 activation_weight
== METASLAB_WEIGHT_PRIMARY
) {
2149 metaslab_passivate(msp
,
2150 msp
->ms_weight
& ~METASLAB_ACTIVE_MASK
);
2151 mutex_exit(&msp
->ms_lock
);
2155 if (metaslab_activate(msp
, activation_weight
) != 0) {
2156 mutex_exit(&msp
->ms_lock
);
2161 * If this metaslab is currently condensing then pick again as
2162 * we can't manipulate this metaslab until it's committed
2165 if (msp
->ms_condensing
) {
2166 mutex_exit(&msp
->ms_lock
);
2170 if ((offset
= metaslab_block_alloc(msp
, asize
)) != -1ULL)
2173 metaslab_passivate(msp
, metaslab_block_maxsize(msp
));
2174 mutex_exit(&msp
->ms_lock
);
2177 if (range_tree_space(msp
->ms_alloctree
[txg
& TXG_MASK
]) == 0)
2178 vdev_dirty(mg
->mg_vd
, VDD_METASLAB
, msp
, txg
);
2180 range_tree_add(msp
->ms_alloctree
[txg
& TXG_MASK
], offset
, asize
);
2181 msp
->ms_access_txg
= txg
+ metaslab_unload_delay
;
2183 mutex_exit(&msp
->ms_lock
);
2189 * Allocate a block for the specified i/o.
2192 metaslab_alloc_dva(spa_t
*spa
, metaslab_class_t
*mc
, uint64_t psize
,
2193 dva_t
*dva
, int d
, dva_t
*hintdva
, uint64_t txg
, int flags
)
2195 metaslab_group_t
*mg
, *fast_mg
, *rotor
;
2199 int zio_lock
= B_FALSE
;
2200 boolean_t allocatable
;
2201 uint64_t offset
= -1ULL;
2205 ASSERT(!DVA_IS_VALID(&dva
[d
]));
2208 * For testing, make some blocks above a certain size be gang blocks.
2210 if (psize
>= metaslab_gang_bang
&& (ddi_get_lbolt() & 3) == 0)
2211 return (SET_ERROR(ENOSPC
));
2213 if (flags
& METASLAB_FASTWRITE
)
2214 mutex_enter(&mc
->mc_fastwrite_lock
);
2217 * Start at the rotor and loop through all mgs until we find something.
2218 * Note that there's no locking on mc_rotor or mc_aliquot because
2219 * nothing actually breaks if we miss a few updates -- we just won't
2220 * allocate quite as evenly. It all balances out over time.
2222 * If we are doing ditto or log blocks, try to spread them across
2223 * consecutive vdevs. If we're forced to reuse a vdev before we've
2224 * allocated all of our ditto blocks, then try and spread them out on
2225 * that vdev as much as possible. If it turns out to not be possible,
2226 * gradually lower our standards until anything becomes acceptable.
2227 * Also, allocating on consecutive vdevs (as opposed to random vdevs)
2228 * gives us hope of containing our fault domains to something we're
2229 * able to reason about. Otherwise, any two top-level vdev failures
2230 * will guarantee the loss of data. With consecutive allocation,
2231 * only two adjacent top-level vdev failures will result in data loss.
2233 * If we are doing gang blocks (hintdva is non-NULL), try to keep
2234 * ourselves on the same vdev as our gang block header. That
2235 * way, we can hope for locality in vdev_cache, plus it makes our
2236 * fault domains something tractable.
2239 vd
= vdev_lookup_top(spa
, DVA_GET_VDEV(&hintdva
[d
]));
2242 * It's possible the vdev we're using as the hint no
2243 * longer exists (i.e. removed). Consult the rotor when
2249 if (flags
& METASLAB_HINTBP_AVOID
&&
2250 mg
->mg_next
!= NULL
)
2255 } else if (d
!= 0) {
2256 vd
= vdev_lookup_top(spa
, DVA_GET_VDEV(&dva
[d
- 1]));
2257 mg
= vd
->vdev_mg
->mg_next
;
2258 } else if (flags
& METASLAB_FASTWRITE
) {
2259 mg
= fast_mg
= mc
->mc_rotor
;
2262 if (fast_mg
->mg_vd
->vdev_pending_fastwrite
<
2263 mg
->mg_vd
->vdev_pending_fastwrite
)
2265 } while ((fast_mg
= fast_mg
->mg_next
) != mc
->mc_rotor
);
2272 * If the hint put us into the wrong metaslab class, or into a
2273 * metaslab group that has been passivated, just follow the rotor.
2275 if (mg
->mg_class
!= mc
|| mg
->mg_activation_count
<= 0)
2282 ASSERT(mg
->mg_activation_count
== 1);
2287 * Don't allocate from faulted devices.
2290 spa_config_enter(spa
, SCL_ZIO
, FTAG
, RW_READER
);
2291 allocatable
= vdev_allocatable(vd
);
2292 spa_config_exit(spa
, SCL_ZIO
, FTAG
);
2294 allocatable
= vdev_allocatable(vd
);
2298 * Determine if the selected metaslab group is eligible
2299 * for allocations. If we're ganging or have requested
2300 * an allocation for the smallest gang block size
2301 * then we don't want to avoid allocating to the this
2302 * metaslab group. If we're in this condition we should
2303 * try to allocate from any device possible so that we
2304 * don't inadvertently return ENOSPC and suspend the pool
2305 * even though space is still available.
2307 if (allocatable
&& CAN_FASTGANG(flags
) &&
2308 psize
> SPA_GANGBLOCKSIZE
)
2309 allocatable
= metaslab_group_allocatable(mg
);
2315 * Avoid writing single-copy data to a failing vdev
2316 * unless the user instructs us that it is okay.
2318 if ((vd
->vdev_stat
.vs_write_errors
> 0 ||
2319 vd
->vdev_state
< VDEV_STATE_HEALTHY
) &&
2320 d
== 0 && dshift
== 3 && vd
->vdev_children
== 0) {
2325 ASSERT(mg
->mg_class
== mc
);
2327 distance
= vd
->vdev_asize
>> dshift
;
2328 if (distance
<= (1ULL << vd
->vdev_ms_shift
))
2333 asize
= vdev_psize_to_asize(vd
, psize
);
2334 ASSERT(P2PHASE(asize
, 1ULL << vd
->vdev_ashift
) == 0);
2336 offset
= metaslab_group_alloc(mg
, psize
, asize
, txg
, distance
,
2338 if (offset
!= -1ULL) {
2340 * If we've just selected this metaslab group,
2341 * figure out whether the corresponding vdev is
2342 * over- or under-used relative to the pool,
2343 * and set an allocation bias to even it out.
2345 if (mc
->mc_aliquot
== 0 && metaslab_bias_enabled
) {
2346 vdev_stat_t
*vs
= &vd
->vdev_stat
;
2349 vu
= (vs
->vs_alloc
* 100) / (vs
->vs_space
+ 1);
2350 cu
= (mc
->mc_alloc
* 100) / (mc
->mc_space
+ 1);
2353 * Calculate how much more or less we should
2354 * try to allocate from this device during
2355 * this iteration around the rotor.
2356 * For example, if a device is 80% full
2357 * and the pool is 20% full then we should
2358 * reduce allocations by 60% on this device.
2360 * mg_bias = (20 - 80) * 512K / 100 = -307K
2362 * This reduces allocations by 307K for this
2365 mg
->mg_bias
= ((cu
- vu
) *
2366 (int64_t)mg
->mg_aliquot
) / 100;
2367 } else if (!metaslab_bias_enabled
) {
2371 if ((flags
& METASLAB_FASTWRITE
) ||
2372 atomic_add_64_nv(&mc
->mc_aliquot
, asize
) >=
2373 mg
->mg_aliquot
+ mg
->mg_bias
) {
2374 mc
->mc_rotor
= mg
->mg_next
;
2378 DVA_SET_VDEV(&dva
[d
], vd
->vdev_id
);
2379 DVA_SET_OFFSET(&dva
[d
], offset
);
2380 DVA_SET_GANG(&dva
[d
], !!(flags
& METASLAB_GANG_HEADER
));
2381 DVA_SET_ASIZE(&dva
[d
], asize
);
2383 if (flags
& METASLAB_FASTWRITE
) {
2384 atomic_add_64(&vd
->vdev_pending_fastwrite
,
2386 mutex_exit(&mc
->mc_fastwrite_lock
);
2392 mc
->mc_rotor
= mg
->mg_next
;
2394 } while ((mg
= mg
->mg_next
) != rotor
);
2398 ASSERT(dshift
< 64);
2402 if (!allocatable
&& !zio_lock
) {
2408 bzero(&dva
[d
], sizeof (dva_t
));
2410 if (flags
& METASLAB_FASTWRITE
)
2411 mutex_exit(&mc
->mc_fastwrite_lock
);
2413 return (SET_ERROR(ENOSPC
));
2417 * Free the block represented by DVA in the context of the specified
2418 * transaction group.
2421 metaslab_free_dva(spa_t
*spa
, const dva_t
*dva
, uint64_t txg
, boolean_t now
)
2423 uint64_t vdev
= DVA_GET_VDEV(dva
);
2424 uint64_t offset
= DVA_GET_OFFSET(dva
);
2425 uint64_t size
= DVA_GET_ASIZE(dva
);
2429 ASSERT(DVA_IS_VALID(dva
));
2431 if (txg
> spa_freeze_txg(spa
))
2434 if ((vd
= vdev_lookup_top(spa
, vdev
)) == NULL
||
2435 (offset
>> vd
->vdev_ms_shift
) >= vd
->vdev_ms_count
) {
2436 cmn_err(CE_WARN
, "metaslab_free_dva(): bad DVA %llu:%llu",
2437 (u_longlong_t
)vdev
, (u_longlong_t
)offset
);
2442 msp
= vd
->vdev_ms
[offset
>> vd
->vdev_ms_shift
];
2444 if (DVA_GET_GANG(dva
))
2445 size
= vdev_psize_to_asize(vd
, SPA_GANGBLOCKSIZE
);
2447 mutex_enter(&msp
->ms_lock
);
2450 range_tree_remove(msp
->ms_alloctree
[txg
& TXG_MASK
],
2453 VERIFY(!msp
->ms_condensing
);
2454 VERIFY3U(offset
, >=, msp
->ms_start
);
2455 VERIFY3U(offset
+ size
, <=, msp
->ms_start
+ msp
->ms_size
);
2456 VERIFY3U(range_tree_space(msp
->ms_tree
) + size
, <=,
2458 VERIFY0(P2PHASE(offset
, 1ULL << vd
->vdev_ashift
));
2459 VERIFY0(P2PHASE(size
, 1ULL << vd
->vdev_ashift
));
2460 range_tree_add(msp
->ms_tree
, offset
, size
);
2462 if (range_tree_space(msp
->ms_freetree
[txg
& TXG_MASK
]) == 0)
2463 vdev_dirty(vd
, VDD_METASLAB
, msp
, txg
);
2464 range_tree_add(msp
->ms_freetree
[txg
& TXG_MASK
],
2468 mutex_exit(&msp
->ms_lock
);
2472 * Intent log support: upon opening the pool after a crash, notify the SPA
2473 * of blocks that the intent log has allocated for immediate write, but
2474 * which are still considered free by the SPA because the last transaction
2475 * group didn't commit yet.
2478 metaslab_claim_dva(spa_t
*spa
, const dva_t
*dva
, uint64_t txg
)
2480 uint64_t vdev
= DVA_GET_VDEV(dva
);
2481 uint64_t offset
= DVA_GET_OFFSET(dva
);
2482 uint64_t size
= DVA_GET_ASIZE(dva
);
2487 ASSERT(DVA_IS_VALID(dva
));
2489 if ((vd
= vdev_lookup_top(spa
, vdev
)) == NULL
||
2490 (offset
>> vd
->vdev_ms_shift
) >= vd
->vdev_ms_count
)
2491 return (SET_ERROR(ENXIO
));
2493 msp
= vd
->vdev_ms
[offset
>> vd
->vdev_ms_shift
];
2495 if (DVA_GET_GANG(dva
))
2496 size
= vdev_psize_to_asize(vd
, SPA_GANGBLOCKSIZE
);
2498 mutex_enter(&msp
->ms_lock
);
2500 if ((txg
!= 0 && spa_writeable(spa
)) || !msp
->ms_loaded
)
2501 error
= metaslab_activate(msp
, METASLAB_WEIGHT_SECONDARY
);
2503 if (error
== 0 && !range_tree_contains(msp
->ms_tree
, offset
, size
))
2504 error
= SET_ERROR(ENOENT
);
2506 if (error
|| txg
== 0) { /* txg == 0 indicates dry run */
2507 mutex_exit(&msp
->ms_lock
);
2511 VERIFY(!msp
->ms_condensing
);
2512 VERIFY0(P2PHASE(offset
, 1ULL << vd
->vdev_ashift
));
2513 VERIFY0(P2PHASE(size
, 1ULL << vd
->vdev_ashift
));
2514 VERIFY3U(range_tree_space(msp
->ms_tree
) - size
, <=, msp
->ms_size
);
2515 range_tree_remove(msp
->ms_tree
, offset
, size
);
2517 if (spa_writeable(spa
)) { /* don't dirty if we're zdb(1M) */
2518 if (range_tree_space(msp
->ms_alloctree
[txg
& TXG_MASK
]) == 0)
2519 vdev_dirty(vd
, VDD_METASLAB
, msp
, txg
);
2520 range_tree_add(msp
->ms_alloctree
[txg
& TXG_MASK
], offset
, size
);
2523 mutex_exit(&msp
->ms_lock
);
2529 metaslab_alloc(spa_t
*spa
, metaslab_class_t
*mc
, uint64_t psize
, blkptr_t
*bp
,
2530 int ndvas
, uint64_t txg
, blkptr_t
*hintbp
, int flags
)
2532 dva_t
*dva
= bp
->blk_dva
;
2533 dva_t
*hintdva
= hintbp
->blk_dva
;
2536 ASSERT(bp
->blk_birth
== 0);
2537 ASSERT(BP_PHYSICAL_BIRTH(bp
) == 0);
2539 spa_config_enter(spa
, SCL_ALLOC
, FTAG
, RW_READER
);
2541 if (mc
->mc_rotor
== NULL
) { /* no vdevs in this class */
2542 spa_config_exit(spa
, SCL_ALLOC
, FTAG
);
2543 return (SET_ERROR(ENOSPC
));
2546 ASSERT(ndvas
> 0 && ndvas
<= spa_max_replication(spa
));
2547 ASSERT(BP_GET_NDVAS(bp
) == 0);
2548 ASSERT(hintbp
== NULL
|| ndvas
<= BP_GET_NDVAS(hintbp
));
2550 for (d
= 0; d
< ndvas
; d
++) {
2551 error
= metaslab_alloc_dva(spa
, mc
, psize
, dva
, d
, hintdva
,
2554 for (d
--; d
>= 0; d
--) {
2555 metaslab_free_dva(spa
, &dva
[d
], txg
, B_TRUE
);
2556 bzero(&dva
[d
], sizeof (dva_t
));
2558 spa_config_exit(spa
, SCL_ALLOC
, FTAG
);
2563 ASSERT(BP_GET_NDVAS(bp
) == ndvas
);
2565 spa_config_exit(spa
, SCL_ALLOC
, FTAG
);
2567 BP_SET_BIRTH(bp
, txg
, txg
);
2573 metaslab_free(spa_t
*spa
, const blkptr_t
*bp
, uint64_t txg
, boolean_t now
)
2575 const dva_t
*dva
= bp
->blk_dva
;
2576 int d
, ndvas
= BP_GET_NDVAS(bp
);
2578 ASSERT(!BP_IS_HOLE(bp
));
2579 ASSERT(!now
|| bp
->blk_birth
>= spa_syncing_txg(spa
));
2581 spa_config_enter(spa
, SCL_FREE
, FTAG
, RW_READER
);
2583 for (d
= 0; d
< ndvas
; d
++)
2584 metaslab_free_dva(spa
, &dva
[d
], txg
, now
);
2586 spa_config_exit(spa
, SCL_FREE
, FTAG
);
2590 metaslab_claim(spa_t
*spa
, const blkptr_t
*bp
, uint64_t txg
)
2592 const dva_t
*dva
= bp
->blk_dva
;
2593 int ndvas
= BP_GET_NDVAS(bp
);
2596 ASSERT(!BP_IS_HOLE(bp
));
2600 * First do a dry run to make sure all DVAs are claimable,
2601 * so we don't have to unwind from partial failures below.
2603 if ((error
= metaslab_claim(spa
, bp
, 0)) != 0)
2607 spa_config_enter(spa
, SCL_ALLOC
, FTAG
, RW_READER
);
2609 for (d
= 0; d
< ndvas
; d
++)
2610 if ((error
= metaslab_claim_dva(spa
, &dva
[d
], txg
)) != 0)
2613 spa_config_exit(spa
, SCL_ALLOC
, FTAG
);
2615 ASSERT(error
== 0 || txg
== 0);
2621 metaslab_fastwrite_mark(spa_t
*spa
, const blkptr_t
*bp
)
2623 const dva_t
*dva
= bp
->blk_dva
;
2624 int ndvas
= BP_GET_NDVAS(bp
);
2625 uint64_t psize
= BP_GET_PSIZE(bp
);
2629 ASSERT(!BP_IS_HOLE(bp
));
2630 ASSERT(!BP_IS_EMBEDDED(bp
));
2633 spa_config_enter(spa
, SCL_VDEV
, FTAG
, RW_READER
);
2635 for (d
= 0; d
< ndvas
; d
++) {
2636 if ((vd
= vdev_lookup_top(spa
, DVA_GET_VDEV(&dva
[d
]))) == NULL
)
2638 atomic_add_64(&vd
->vdev_pending_fastwrite
, psize
);
2641 spa_config_exit(spa
, SCL_VDEV
, FTAG
);
2645 metaslab_fastwrite_unmark(spa_t
*spa
, const blkptr_t
*bp
)
2647 const dva_t
*dva
= bp
->blk_dva
;
2648 int ndvas
= BP_GET_NDVAS(bp
);
2649 uint64_t psize
= BP_GET_PSIZE(bp
);
2653 ASSERT(!BP_IS_HOLE(bp
));
2654 ASSERT(!BP_IS_EMBEDDED(bp
));
2657 spa_config_enter(spa
, SCL_VDEV
, FTAG
, RW_READER
);
2659 for (d
= 0; d
< ndvas
; d
++) {
2660 if ((vd
= vdev_lookup_top(spa
, DVA_GET_VDEV(&dva
[d
]))) == NULL
)
2662 ASSERT3U(vd
->vdev_pending_fastwrite
, >=, psize
);
2663 atomic_sub_64(&vd
->vdev_pending_fastwrite
, psize
);
2666 spa_config_exit(spa
, SCL_VDEV
, FTAG
);
2670 metaslab_check_free(spa_t
*spa
, const blkptr_t
*bp
)
2674 if ((zfs_flags
& ZFS_DEBUG_ZIO_FREE
) == 0)
2677 spa_config_enter(spa
, SCL_VDEV
, FTAG
, RW_READER
);
2678 for (i
= 0; i
< BP_GET_NDVAS(bp
); i
++) {
2679 uint64_t vdev
= DVA_GET_VDEV(&bp
->blk_dva
[i
]);
2680 vdev_t
*vd
= vdev_lookup_top(spa
, vdev
);
2681 uint64_t offset
= DVA_GET_OFFSET(&bp
->blk_dva
[i
]);
2682 uint64_t size
= DVA_GET_ASIZE(&bp
->blk_dva
[i
]);
2683 metaslab_t
*msp
= vd
->vdev_ms
[offset
>> vd
->vdev_ms_shift
];
2686 range_tree_verify(msp
->ms_tree
, offset
, size
);
2688 for (j
= 0; j
< TXG_SIZE
; j
++)
2689 range_tree_verify(msp
->ms_freetree
[j
], offset
, size
);
2690 for (j
= 0; j
< TXG_DEFER_SIZE
; j
++)
2691 range_tree_verify(msp
->ms_defertree
[j
], offset
, size
);
2693 spa_config_exit(spa
, SCL_VDEV
, FTAG
);
2696 #if defined(_KERNEL) && defined(HAVE_SPL)
2697 module_param(metaslab_debug_load
, int, 0644);
2698 module_param(metaslab_debug_unload
, int, 0644);
2699 module_param(metaslab_preload_enabled
, int, 0644);
2700 module_param(zfs_mg_noalloc_threshold
, int, 0644);
2701 module_param(zfs_mg_fragmentation_threshold
, int, 0644);
2702 module_param(zfs_metaslab_fragmentation_threshold
, int, 0644);
2703 module_param(metaslab_fragmentation_factor_enabled
, int, 0644);
2704 module_param(metaslab_lba_weighting_enabled
, int, 0644);
2705 module_param(metaslab_bias_enabled
, int, 0644);
2707 MODULE_PARM_DESC(metaslab_debug_load
,
2708 "load all metaslabs when pool is first opened");
2709 MODULE_PARM_DESC(metaslab_debug_unload
,
2710 "prevent metaslabs from being unloaded");
2711 MODULE_PARM_DESC(metaslab_preload_enabled
,
2712 "preload potential metaslabs during reassessment");
2714 MODULE_PARM_DESC(zfs_mg_noalloc_threshold
,
2715 "percentage of free space for metaslab group to allow allocation");
2716 MODULE_PARM_DESC(zfs_mg_fragmentation_threshold
,
2717 "fragmentation for metaslab group to allow allocation");
2719 MODULE_PARM_DESC(zfs_metaslab_fragmentation_threshold
,
2720 "fragmentation for metaslab to allow allocation");
2721 MODULE_PARM_DESC(metaslab_fragmentation_factor_enabled
,
2722 "use the fragmentation metric to prefer less fragmented metaslabs");
2723 MODULE_PARM_DESC(metaslab_lba_weighting_enabled
,
2724 "prefer metaslabs with lower LBAs");
2725 MODULE_PARM_DESC(metaslab_bias_enabled
,
2726 "enable metaslab group biasing");
2727 #endif /* _KERNEL && HAVE_SPL */