]> git.proxmox.com Git - mirror_zfs.git/blame - module/zfs/metaslab.c
Fix ztest deadman panic with indirect vdev damage
[mirror_zfs.git] / module / zfs / metaslab.c
CommitLineData
34dc7c2f
BB
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
428870ff 22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
cc99f275 23 * Copyright (c) 2011, 2018 by Delphix. All rights reserved.
2e528b49 24 * Copyright (c) 2013 by Saso Kiselkov. All rights reserved.
cc99f275 25 * Copyright (c) 2017, Intel Corporation.
34dc7c2f
BB
26 */
27
34dc7c2f 28#include <sys/zfs_context.h>
34dc7c2f
BB
29#include <sys/dmu.h>
30#include <sys/dmu_tx.h>
31#include <sys/space_map.h>
32#include <sys/metaslab_impl.h>
33#include <sys/vdev_impl.h>
34#include <sys/zio.h>
93cf2076 35#include <sys/spa_impl.h>
f3a7f661 36#include <sys/zfeature.h>
a1d477c2 37#include <sys/vdev_indirect_mapping.h>
d2734cce 38#include <sys/zap.h>
34dc7c2f 39
d1d7e268 40#define WITH_DF_BLOCK_ALLOCATOR
6d974228 41
3dfb57a3
DB
42#define GANG_ALLOCATION(flags) \
43 ((flags) & (METASLAB_GANG_CHILD | METASLAB_GANG_HEADER))
22c81dd8 44
e8fe6684
ED
45/*
46 * Metaslab granularity, in bytes. This is roughly similar to what would be
47 * referred to as the "stripe size" in traditional RAID arrays. In normal
48 * operation, we will try to write this amount of data to a top-level vdev
49 * before moving on to the next one.
50 */
99b14de4 51unsigned long metaslab_aliquot = 512 << 10;
e8fe6684 52
d830d479
MA
53/*
54 * For testing, make some blocks above a certain size be gang blocks.
55 */
56unsigned long metaslab_force_ganging = SPA_MAXBLOCKSIZE + 1;
34dc7c2f 57
d2734cce
SD
58/*
59 * Since we can touch multiple metaslabs (and their respective space maps)
60 * with each transaction group, we benefit from having a smaller space map
61 * block size since it allows us to issue more I/O operations scattered
62 * around the disk.
63 */
64int zfs_metaslab_sm_blksz = (1 << 12);
65
e51be066
GW
66/*
67 * The in-core space map representation is more compact than its on-disk form.
68 * The zfs_condense_pct determines how much more compact the in-core
4e21fd06 69 * space map representation must be before we compact it on-disk.
e51be066
GW
70 * Values should be greater than or equal to 100.
71 */
72int zfs_condense_pct = 200;
73
b02fe35d
AR
74/*
75 * Condensing a metaslab is not guaranteed to actually reduce the amount of
76 * space used on disk. In particular, a space map uses data in increments of
96358617 77 * MAX(1 << ashift, space_map_blksz), so a metaslab might use the
b02fe35d
AR
78 * same number of blocks after condensing. Since the goal of condensing is to
79 * reduce the number of IOPs required to read the space map, we only want to
80 * condense when we can be sure we will reduce the number of blocks used by the
81 * space map. Unfortunately, we cannot precisely compute whether or not this is
82 * the case in metaslab_should_condense since we are holding ms_lock. Instead,
83 * we apply the following heuristic: do not condense a spacemap unless the
84 * uncondensed size consumes greater than zfs_metaslab_condense_block_threshold
85 * blocks.
86 */
87int zfs_metaslab_condense_block_threshold = 4;
88
ac72fac3
GW
89/*
90 * The zfs_mg_noalloc_threshold defines which metaslab groups should
91 * be eligible for allocation. The value is defined as a percentage of
f3a7f661 92 * free space. Metaslab groups that have more free space than
ac72fac3
GW
93 * zfs_mg_noalloc_threshold are always eligible for allocations. Once
94 * a metaslab group's free space is less than or equal to the
95 * zfs_mg_noalloc_threshold the allocator will avoid allocating to that
96 * group unless all groups in the pool have reached zfs_mg_noalloc_threshold.
97 * Once all groups in the pool reach zfs_mg_noalloc_threshold then all
98 * groups are allowed to accept allocations. Gang blocks are always
99 * eligible to allocate on any metaslab group. The default value of 0 means
100 * no metaslab group will be excluded based on this criterion.
101 */
102int zfs_mg_noalloc_threshold = 0;
6d974228 103
f3a7f661
GW
104/*
105 * Metaslab groups are considered eligible for allocations if their
106 * fragmenation metric (measured as a percentage) is less than or equal to
107 * zfs_mg_fragmentation_threshold. If a metaslab group exceeds this threshold
108 * then it will be skipped unless all metaslab groups within the metaslab
109 * class have also crossed this threshold.
110 */
111int zfs_mg_fragmentation_threshold = 85;
112
113/*
114 * Allow metaslabs to keep their active state as long as their fragmentation
115 * percentage is less than or equal to zfs_metaslab_fragmentation_threshold. An
116 * active metaslab that exceeds this threshold will no longer keep its active
117 * status allowing better metaslabs to be selected.
118 */
119int zfs_metaslab_fragmentation_threshold = 70;
120
428870ff 121/*
aa7d06a9 122 * When set will load all metaslabs when pool is first opened.
428870ff 123 */
aa7d06a9
GW
124int metaslab_debug_load = 0;
125
126/*
127 * When set will prevent metaslabs from being unloaded.
128 */
129int metaslab_debug_unload = 0;
428870ff 130
9babb374
BB
131/*
132 * Minimum size which forces the dynamic allocator to change
428870ff 133 * it's allocation strategy. Once the space map cannot satisfy
9babb374
BB
134 * an allocation of this size then it switches to using more
135 * aggressive strategy (i.e search by size rather than offset).
136 */
4e21fd06 137uint64_t metaslab_df_alloc_threshold = SPA_OLD_MAXBLOCKSIZE;
9babb374
BB
138
139/*
140 * The minimum free space, in percent, which must be available
141 * in a space map to continue allocations in a first-fit fashion.
4e21fd06 142 * Once the space map's free space drops below this level we dynamically
9babb374
BB
143 * switch to using best-fit allocations.
144 */
428870ff
BB
145int metaslab_df_free_pct = 4;
146
428870ff 147/*
93cf2076 148 * Percentage of all cpus that can be used by the metaslab taskq.
428870ff 149 */
93cf2076 150int metaslab_load_pct = 50;
428870ff
BB
151
152/*
93cf2076
GW
153 * Determines how many txgs a metaslab may remain loaded without having any
154 * allocations from it. As long as a metaslab continues to be used we will
155 * keep it loaded.
428870ff 156 */
93cf2076 157int metaslab_unload_delay = TXG_SIZE * 2;
9babb374 158
93cf2076
GW
159/*
160 * Max number of metaslabs per group to preload.
161 */
162int metaslab_preload_limit = SPA_DVAS_PER_BP;
163
164/*
165 * Enable/disable preloading of metaslab.
166 */
f3a7f661 167int metaslab_preload_enabled = B_TRUE;
93cf2076
GW
168
169/*
f3a7f661 170 * Enable/disable fragmentation weighting on metaslabs.
93cf2076 171 */
f3a7f661 172int metaslab_fragmentation_factor_enabled = B_TRUE;
93cf2076 173
f3a7f661
GW
174/*
175 * Enable/disable lba weighting (i.e. outer tracks are given preference).
176 */
177int metaslab_lba_weighting_enabled = B_TRUE;
178
179/*
180 * Enable/disable metaslab group biasing.
181 */
182int metaslab_bias_enabled = B_TRUE;
183
4e21fd06 184
a1d477c2
MA
185/*
186 * Enable/disable remapping of indirect DVAs to their concrete vdevs.
187 */
188boolean_t zfs_remap_blkptr_enable = B_TRUE;
189
4e21fd06
DB
190/*
191 * Enable/disable segment-based metaslab selection.
192 */
193int zfs_metaslab_segment_weight_enabled = B_TRUE;
194
195/*
196 * When using segment-based metaslab selection, we will continue
197 * allocating from the active metaslab until we have exhausted
198 * zfs_metaslab_switch_threshold of its buckets.
199 */
200int zfs_metaslab_switch_threshold = 2;
201
202/*
203 * Internal switch to enable/disable the metaslab allocation tracing
204 * facility.
205 */
206#ifdef _METASLAB_TRACING
207boolean_t metaslab_trace_enabled = B_TRUE;
208#endif
209
210/*
211 * Maximum entries that the metaslab allocation tracing facility will keep
212 * in a given list when running in non-debug mode. We limit the number
213 * of entries in non-debug mode to prevent us from using up too much memory.
214 * The limit should be sufficiently large that we don't expect any allocation
215 * to every exceed this value. In debug mode, the system will panic if this
216 * limit is ever reached allowing for further investigation.
217 */
218#ifdef _METASLAB_TRACING
219uint64_t metaslab_trace_max_entries = 5000;
220#endif
221
222static uint64_t metaslab_weight(metaslab_t *);
223static void metaslab_set_fragmentation(metaslab_t *);
d2734cce 224static void metaslab_free_impl(vdev_t *, uint64_t, uint64_t, boolean_t);
a1d477c2 225static void metaslab_check_free_impl(vdev_t *, uint64_t, uint64_t);
4e21fd06 226
492f64e9
PD
227static void metaslab_passivate(metaslab_t *msp, uint64_t weight);
228static uint64_t metaslab_weight_from_range_tree(metaslab_t *msp);
4e21fd06
DB
229#ifdef _METASLAB_TRACING
230kmem_cache_t *metaslab_alloc_trace_cache;
231#endif
93cf2076 232
34dc7c2f
BB
233/*
234 * ==========================================================================
235 * Metaslab classes
236 * ==========================================================================
237 */
238metaslab_class_t *
93cf2076 239metaslab_class_create(spa_t *spa, metaslab_ops_t *ops)
34dc7c2f
BB
240{
241 metaslab_class_t *mc;
242
79c76d5b 243 mc = kmem_zalloc(sizeof (metaslab_class_t), KM_SLEEP);
34dc7c2f 244
428870ff 245 mc->mc_spa = spa;
34dc7c2f 246 mc->mc_rotor = NULL;
9babb374 247 mc->mc_ops = ops;
3dfb57a3 248 mutex_init(&mc->mc_lock, NULL, MUTEX_DEFAULT, NULL);
492f64e9 249 mc->mc_alloc_slots = kmem_zalloc(spa->spa_alloc_count *
c13060e4 250 sizeof (zfs_refcount_t), KM_SLEEP);
492f64e9
PD
251 mc->mc_alloc_max_slots = kmem_zalloc(spa->spa_alloc_count *
252 sizeof (uint64_t), KM_SLEEP);
253 for (int i = 0; i < spa->spa_alloc_count; i++)
424fd7c3 254 zfs_refcount_create_tracked(&mc->mc_alloc_slots[i]);
34dc7c2f
BB
255
256 return (mc);
257}
258
259void
260metaslab_class_destroy(metaslab_class_t *mc)
261{
428870ff
BB
262 ASSERT(mc->mc_rotor == NULL);
263 ASSERT(mc->mc_alloc == 0);
264 ASSERT(mc->mc_deferred == 0);
265 ASSERT(mc->mc_space == 0);
266 ASSERT(mc->mc_dspace == 0);
34dc7c2f 267
492f64e9 268 for (int i = 0; i < mc->mc_spa->spa_alloc_count; i++)
424fd7c3 269 zfs_refcount_destroy(&mc->mc_alloc_slots[i]);
492f64e9 270 kmem_free(mc->mc_alloc_slots, mc->mc_spa->spa_alloc_count *
c13060e4 271 sizeof (zfs_refcount_t));
492f64e9
PD
272 kmem_free(mc->mc_alloc_max_slots, mc->mc_spa->spa_alloc_count *
273 sizeof (uint64_t));
3dfb57a3 274 mutex_destroy(&mc->mc_lock);
34dc7c2f
BB
275 kmem_free(mc, sizeof (metaslab_class_t));
276}
277
428870ff
BB
278int
279metaslab_class_validate(metaslab_class_t *mc)
34dc7c2f 280{
428870ff
BB
281 metaslab_group_t *mg;
282 vdev_t *vd;
34dc7c2f 283
428870ff
BB
284 /*
285 * Must hold one of the spa_config locks.
286 */
287 ASSERT(spa_config_held(mc->mc_spa, SCL_ALL, RW_READER) ||
288 spa_config_held(mc->mc_spa, SCL_ALL, RW_WRITER));
34dc7c2f 289
428870ff
BB
290 if ((mg = mc->mc_rotor) == NULL)
291 return (0);
292
293 do {
294 vd = mg->mg_vd;
295 ASSERT(vd->vdev_mg != NULL);
296 ASSERT3P(vd->vdev_top, ==, vd);
297 ASSERT3P(mg->mg_class, ==, mc);
298 ASSERT3P(vd->vdev_ops, !=, &vdev_hole_ops);
299 } while ((mg = mg->mg_next) != mc->mc_rotor);
300
301 return (0);
34dc7c2f
BB
302}
303
cc99f275 304static void
428870ff
BB
305metaslab_class_space_update(metaslab_class_t *mc, int64_t alloc_delta,
306 int64_t defer_delta, int64_t space_delta, int64_t dspace_delta)
34dc7c2f 307{
428870ff
BB
308 atomic_add_64(&mc->mc_alloc, alloc_delta);
309 atomic_add_64(&mc->mc_deferred, defer_delta);
310 atomic_add_64(&mc->mc_space, space_delta);
311 atomic_add_64(&mc->mc_dspace, dspace_delta);
312}
34dc7c2f 313
428870ff
BB
314uint64_t
315metaslab_class_get_alloc(metaslab_class_t *mc)
316{
317 return (mc->mc_alloc);
318}
34dc7c2f 319
428870ff
BB
320uint64_t
321metaslab_class_get_deferred(metaslab_class_t *mc)
322{
323 return (mc->mc_deferred);
324}
34dc7c2f 325
428870ff
BB
326uint64_t
327metaslab_class_get_space(metaslab_class_t *mc)
328{
329 return (mc->mc_space);
330}
34dc7c2f 331
428870ff
BB
332uint64_t
333metaslab_class_get_dspace(metaslab_class_t *mc)
334{
335 return (spa_deflate(mc->mc_spa) ? mc->mc_dspace : mc->mc_space);
34dc7c2f
BB
336}
337
f3a7f661
GW
338void
339metaslab_class_histogram_verify(metaslab_class_t *mc)
340{
cc99f275
DB
341 spa_t *spa = mc->mc_spa;
342 vdev_t *rvd = spa->spa_root_vdev;
f3a7f661 343 uint64_t *mc_hist;
1c27024e 344 int i;
f3a7f661
GW
345
346 if ((zfs_flags & ZFS_DEBUG_HISTOGRAM_VERIFY) == 0)
347 return;
348
349 mc_hist = kmem_zalloc(sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE,
79c76d5b 350 KM_SLEEP);
f3a7f661 351
1c27024e 352 for (int c = 0; c < rvd->vdev_children; c++) {
f3a7f661
GW
353 vdev_t *tvd = rvd->vdev_child[c];
354 metaslab_group_t *mg = tvd->vdev_mg;
355
356 /*
357 * Skip any holes, uninitialized top-levels, or
358 * vdevs that are not in this metalab class.
359 */
a1d477c2 360 if (!vdev_is_concrete(tvd) || tvd->vdev_ms_shift == 0 ||
f3a7f661
GW
361 mg->mg_class != mc) {
362 continue;
363 }
364
365 for (i = 0; i < RANGE_TREE_HISTOGRAM_SIZE; i++)
366 mc_hist[i] += mg->mg_histogram[i];
367 }
368
369 for (i = 0; i < RANGE_TREE_HISTOGRAM_SIZE; i++)
370 VERIFY3U(mc_hist[i], ==, mc->mc_histogram[i]);
371
372 kmem_free(mc_hist, sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE);
373}
374
375/*
376 * Calculate the metaslab class's fragmentation metric. The metric
377 * is weighted based on the space contribution of each metaslab group.
378 * The return value will be a number between 0 and 100 (inclusive), or
379 * ZFS_FRAG_INVALID if the metric has not been set. See comment above the
380 * zfs_frag_table for more information about the metric.
381 */
382uint64_t
383metaslab_class_fragmentation(metaslab_class_t *mc)
384{
385 vdev_t *rvd = mc->mc_spa->spa_root_vdev;
386 uint64_t fragmentation = 0;
f3a7f661
GW
387
388 spa_config_enter(mc->mc_spa, SCL_VDEV, FTAG, RW_READER);
389
1c27024e 390 for (int c = 0; c < rvd->vdev_children; c++) {
f3a7f661
GW
391 vdev_t *tvd = rvd->vdev_child[c];
392 metaslab_group_t *mg = tvd->vdev_mg;
393
394 /*
a1d477c2
MA
395 * Skip any holes, uninitialized top-levels,
396 * or vdevs that are not in this metalab class.
f3a7f661 397 */
a1d477c2 398 if (!vdev_is_concrete(tvd) || tvd->vdev_ms_shift == 0 ||
f3a7f661
GW
399 mg->mg_class != mc) {
400 continue;
401 }
402
403 /*
404 * If a metaslab group does not contain a fragmentation
405 * metric then just bail out.
406 */
407 if (mg->mg_fragmentation == ZFS_FRAG_INVALID) {
408 spa_config_exit(mc->mc_spa, SCL_VDEV, FTAG);
409 return (ZFS_FRAG_INVALID);
410 }
411
412 /*
413 * Determine how much this metaslab_group is contributing
414 * to the overall pool fragmentation metric.
415 */
416 fragmentation += mg->mg_fragmentation *
417 metaslab_group_get_space(mg);
418 }
419 fragmentation /= metaslab_class_get_space(mc);
420
421 ASSERT3U(fragmentation, <=, 100);
422 spa_config_exit(mc->mc_spa, SCL_VDEV, FTAG);
423 return (fragmentation);
424}
425
426/*
427 * Calculate the amount of expandable space that is available in
428 * this metaslab class. If a device is expanded then its expandable
429 * space will be the amount of allocatable space that is currently not
430 * part of this metaslab class.
431 */
432uint64_t
433metaslab_class_expandable_space(metaslab_class_t *mc)
434{
435 vdev_t *rvd = mc->mc_spa->spa_root_vdev;
436 uint64_t space = 0;
f3a7f661
GW
437
438 spa_config_enter(mc->mc_spa, SCL_VDEV, FTAG, RW_READER);
1c27024e 439 for (int c = 0; c < rvd->vdev_children; c++) {
f3a7f661
GW
440 vdev_t *tvd = rvd->vdev_child[c];
441 metaslab_group_t *mg = tvd->vdev_mg;
442
a1d477c2 443 if (!vdev_is_concrete(tvd) || tvd->vdev_ms_shift == 0 ||
f3a7f661
GW
444 mg->mg_class != mc) {
445 continue;
446 }
447
0f676dc2
GM
448 /*
449 * Calculate if we have enough space to add additional
450 * metaslabs. We report the expandable space in terms
451 * of the metaslab size since that's the unit of expansion.
452 */
453 space += P2ALIGN(tvd->vdev_max_asize - tvd->vdev_asize,
454 1ULL << tvd->vdev_ms_shift);
f3a7f661
GW
455 }
456 spa_config_exit(mc->mc_spa, SCL_VDEV, FTAG);
457 return (space);
458}
459
34dc7c2f
BB
460static int
461metaslab_compare(const void *x1, const void *x2)
462{
ee36c709
GN
463 const metaslab_t *m1 = (const metaslab_t *)x1;
464 const metaslab_t *m2 = (const metaslab_t *)x2;
34dc7c2f 465
492f64e9
PD
466 int sort1 = 0;
467 int sort2 = 0;
468 if (m1->ms_allocator != -1 && m1->ms_primary)
469 sort1 = 1;
470 else if (m1->ms_allocator != -1 && !m1->ms_primary)
471 sort1 = 2;
472 if (m2->ms_allocator != -1 && m2->ms_primary)
473 sort2 = 1;
474 else if (m2->ms_allocator != -1 && !m2->ms_primary)
475 sort2 = 2;
476
477 /*
478 * Sort inactive metaslabs first, then primaries, then secondaries. When
479 * selecting a metaslab to allocate from, an allocator first tries its
480 * primary, then secondary active metaslab. If it doesn't have active
481 * metaslabs, or can't allocate from them, it searches for an inactive
482 * metaslab to activate. If it can't find a suitable one, it will steal
483 * a primary or secondary metaslab from another allocator.
484 */
485 if (sort1 < sort2)
486 return (-1);
487 if (sort1 > sort2)
488 return (1);
489
ee36c709
GN
490 int cmp = AVL_CMP(m2->ms_weight, m1->ms_weight);
491 if (likely(cmp))
492 return (cmp);
34dc7c2f 493
ee36c709 494 IMPLY(AVL_CMP(m1->ms_start, m2->ms_start) == 0, m1 == m2);
34dc7c2f 495
ee36c709 496 return (AVL_CMP(m1->ms_start, m2->ms_start));
34dc7c2f
BB
497}
498
4e21fd06
DB
499/*
500 * Verify that the space accounting on disk matches the in-core range_trees.
501 */
502void
503metaslab_verify_space(metaslab_t *msp, uint64_t txg)
504{
505 spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
506 uint64_t allocated = 0;
4e21fd06 507 uint64_t sm_free_space, msp_free_space;
4e21fd06
DB
508
509 ASSERT(MUTEX_HELD(&msp->ms_lock));
510
511 if ((zfs_flags & ZFS_DEBUG_METASLAB_VERIFY) == 0)
512 return;
513
514 /*
515 * We can only verify the metaslab space when we're called
516 * from syncing context with a loaded metaslab that has an allocated
517 * space map. Calling this in non-syncing context does not
518 * provide a consistent view of the metaslab since we're performing
519 * allocations in the future.
520 */
521 if (txg != spa_syncing_txg(spa) || msp->ms_sm == NULL ||
522 !msp->ms_loaded)
523 return;
524
525 sm_free_space = msp->ms_size - space_map_allocated(msp->ms_sm) -
526 space_map_alloc_delta(msp->ms_sm);
527
528 /*
529 * Account for future allocations since we would have already
530 * deducted that space from the ms_freetree.
531 */
1c27024e 532 for (int t = 0; t < TXG_CONCURRENT_STATES; t++) {
4e21fd06 533 allocated +=
d2734cce 534 range_tree_space(msp->ms_allocating[(txg + t) & TXG_MASK]);
4e21fd06 535 }
4e21fd06 536
d2734cce
SD
537 msp_free_space = range_tree_space(msp->ms_allocatable) + allocated +
538 msp->ms_deferspace + range_tree_space(msp->ms_freed);
4e21fd06
DB
539
540 VERIFY3U(sm_free_space, ==, msp_free_space);
541}
542
543/*
544 * ==========================================================================
545 * Metaslab groups
546 * ==========================================================================
547 */
ac72fac3
GW
548/*
549 * Update the allocatable flag and the metaslab group's capacity.
550 * The allocatable flag is set to true if the capacity is below
3dfb57a3
DB
551 * the zfs_mg_noalloc_threshold or has a fragmentation value that is
552 * greater than zfs_mg_fragmentation_threshold. If a metaslab group
553 * transitions from allocatable to non-allocatable or vice versa then the
554 * metaslab group's class is updated to reflect the transition.
ac72fac3
GW
555 */
556static void
557metaslab_group_alloc_update(metaslab_group_t *mg)
558{
559 vdev_t *vd = mg->mg_vd;
560 metaslab_class_t *mc = mg->mg_class;
561 vdev_stat_t *vs = &vd->vdev_stat;
562 boolean_t was_allocatable;
3dfb57a3 563 boolean_t was_initialized;
ac72fac3
GW
564
565 ASSERT(vd == vd->vdev_top);
a1d477c2
MA
566 ASSERT3U(spa_config_held(mc->mc_spa, SCL_ALLOC, RW_READER), ==,
567 SCL_ALLOC);
ac72fac3
GW
568
569 mutex_enter(&mg->mg_lock);
570 was_allocatable = mg->mg_allocatable;
3dfb57a3 571 was_initialized = mg->mg_initialized;
ac72fac3
GW
572
573 mg->mg_free_capacity = ((vs->vs_space - vs->vs_alloc) * 100) /
574 (vs->vs_space + 1);
575
3dfb57a3
DB
576 mutex_enter(&mc->mc_lock);
577
578 /*
579 * If the metaslab group was just added then it won't
580 * have any space until we finish syncing out this txg.
581 * At that point we will consider it initialized and available
582 * for allocations. We also don't consider non-activated
583 * metaslab groups (e.g. vdevs that are in the middle of being removed)
584 * to be initialized, because they can't be used for allocation.
585 */
586 mg->mg_initialized = metaslab_group_initialized(mg);
587 if (!was_initialized && mg->mg_initialized) {
588 mc->mc_groups++;
589 } else if (was_initialized && !mg->mg_initialized) {
590 ASSERT3U(mc->mc_groups, >, 0);
591 mc->mc_groups--;
592 }
593 if (mg->mg_initialized)
594 mg->mg_no_free_space = B_FALSE;
595
f3a7f661
GW
596 /*
597 * A metaslab group is considered allocatable if it has plenty
598 * of free space or is not heavily fragmented. We only take
599 * fragmentation into account if the metaslab group has a valid
600 * fragmentation metric (i.e. a value between 0 and 100).
601 */
3dfb57a3
DB
602 mg->mg_allocatable = (mg->mg_activation_count > 0 &&
603 mg->mg_free_capacity > zfs_mg_noalloc_threshold &&
f3a7f661
GW
604 (mg->mg_fragmentation == ZFS_FRAG_INVALID ||
605 mg->mg_fragmentation <= zfs_mg_fragmentation_threshold));
ac72fac3
GW
606
607 /*
608 * The mc_alloc_groups maintains a count of the number of
609 * groups in this metaslab class that are still above the
610 * zfs_mg_noalloc_threshold. This is used by the allocating
611 * threads to determine if they should avoid allocations to
612 * a given group. The allocator will avoid allocations to a group
613 * if that group has reached or is below the zfs_mg_noalloc_threshold
614 * and there are still other groups that are above the threshold.
615 * When a group transitions from allocatable to non-allocatable or
616 * vice versa we update the metaslab class to reflect that change.
617 * When the mc_alloc_groups value drops to 0 that means that all
618 * groups have reached the zfs_mg_noalloc_threshold making all groups
619 * eligible for allocations. This effectively means that all devices
620 * are balanced again.
621 */
622 if (was_allocatable && !mg->mg_allocatable)
623 mc->mc_alloc_groups--;
624 else if (!was_allocatable && mg->mg_allocatable)
625 mc->mc_alloc_groups++;
3dfb57a3 626 mutex_exit(&mc->mc_lock);
f3a7f661 627
ac72fac3
GW
628 mutex_exit(&mg->mg_lock);
629}
630
34dc7c2f 631metaslab_group_t *
492f64e9 632metaslab_group_create(metaslab_class_t *mc, vdev_t *vd, int allocators)
34dc7c2f
BB
633{
634 metaslab_group_t *mg;
635
79c76d5b 636 mg = kmem_zalloc(sizeof (metaslab_group_t), KM_SLEEP);
34dc7c2f 637 mutex_init(&mg->mg_lock, NULL, MUTEX_DEFAULT, NULL);
492f64e9
PD
638 mg->mg_primaries = kmem_zalloc(allocators * sizeof (metaslab_t *),
639 KM_SLEEP);
640 mg->mg_secondaries = kmem_zalloc(allocators * sizeof (metaslab_t *),
641 KM_SLEEP);
34dc7c2f
BB
642 avl_create(&mg->mg_metaslab_tree, metaslab_compare,
643 sizeof (metaslab_t), offsetof(struct metaslab, ms_group_node));
34dc7c2f 644 mg->mg_vd = vd;
428870ff
BB
645 mg->mg_class = mc;
646 mg->mg_activation_count = 0;
3dfb57a3
DB
647 mg->mg_initialized = B_FALSE;
648 mg->mg_no_free_space = B_TRUE;
492f64e9
PD
649 mg->mg_allocators = allocators;
650
c13060e4
TS
651 mg->mg_alloc_queue_depth = kmem_zalloc(allocators *
652 sizeof (zfs_refcount_t), KM_SLEEP);
492f64e9
PD
653 mg->mg_cur_max_alloc_queue_depth = kmem_zalloc(allocators *
654 sizeof (uint64_t), KM_SLEEP);
655 for (int i = 0; i < allocators; i++) {
424fd7c3 656 zfs_refcount_create_tracked(&mg->mg_alloc_queue_depth[i]);
492f64e9
PD
657 mg->mg_cur_max_alloc_queue_depth[i] = 0;
658 }
34dc7c2f 659
3c51c5cb 660 mg->mg_taskq = taskq_create("metaslab_group_taskq", metaslab_load_pct,
1229323d 661 maxclsyspri, 10, INT_MAX, TASKQ_THREADS_CPU_PCT | TASKQ_DYNAMIC);
93cf2076 662
34dc7c2f
BB
663 return (mg);
664}
665
666void
667metaslab_group_destroy(metaslab_group_t *mg)
668{
428870ff
BB
669 ASSERT(mg->mg_prev == NULL);
670 ASSERT(mg->mg_next == NULL);
671 /*
672 * We may have gone below zero with the activation count
673 * either because we never activated in the first place or
674 * because we're done, and possibly removing the vdev.
675 */
676 ASSERT(mg->mg_activation_count <= 0);
677
3c51c5cb 678 taskq_destroy(mg->mg_taskq);
34dc7c2f 679 avl_destroy(&mg->mg_metaslab_tree);
492f64e9
PD
680 kmem_free(mg->mg_primaries, mg->mg_allocators * sizeof (metaslab_t *));
681 kmem_free(mg->mg_secondaries, mg->mg_allocators *
682 sizeof (metaslab_t *));
34dc7c2f 683 mutex_destroy(&mg->mg_lock);
492f64e9
PD
684
685 for (int i = 0; i < mg->mg_allocators; i++) {
424fd7c3 686 zfs_refcount_destroy(&mg->mg_alloc_queue_depth[i]);
492f64e9
PD
687 mg->mg_cur_max_alloc_queue_depth[i] = 0;
688 }
689 kmem_free(mg->mg_alloc_queue_depth, mg->mg_allocators *
c13060e4 690 sizeof (zfs_refcount_t));
492f64e9
PD
691 kmem_free(mg->mg_cur_max_alloc_queue_depth, mg->mg_allocators *
692 sizeof (uint64_t));
693
34dc7c2f
BB
694 kmem_free(mg, sizeof (metaslab_group_t));
695}
696
428870ff
BB
697void
698metaslab_group_activate(metaslab_group_t *mg)
699{
700 metaslab_class_t *mc = mg->mg_class;
701 metaslab_group_t *mgprev, *mgnext;
702
a1d477c2 703 ASSERT3U(spa_config_held(mc->mc_spa, SCL_ALLOC, RW_WRITER), !=, 0);
428870ff
BB
704
705 ASSERT(mc->mc_rotor != mg);
706 ASSERT(mg->mg_prev == NULL);
707 ASSERT(mg->mg_next == NULL);
708 ASSERT(mg->mg_activation_count <= 0);
709
710 if (++mg->mg_activation_count <= 0)
711 return;
712
713 mg->mg_aliquot = metaslab_aliquot * MAX(1, mg->mg_vd->vdev_children);
ac72fac3 714 metaslab_group_alloc_update(mg);
428870ff
BB
715
716 if ((mgprev = mc->mc_rotor) == NULL) {
717 mg->mg_prev = mg;
718 mg->mg_next = mg;
719 } else {
720 mgnext = mgprev->mg_next;
721 mg->mg_prev = mgprev;
722 mg->mg_next = mgnext;
723 mgprev->mg_next = mg;
724 mgnext->mg_prev = mg;
725 }
726 mc->mc_rotor = mg;
727}
728
a1d477c2
MA
729/*
730 * Passivate a metaslab group and remove it from the allocation rotor.
731 * Callers must hold both the SCL_ALLOC and SCL_ZIO lock prior to passivating
732 * a metaslab group. This function will momentarily drop spa_config_locks
733 * that are lower than the SCL_ALLOC lock (see comment below).
734 */
428870ff
BB
735void
736metaslab_group_passivate(metaslab_group_t *mg)
737{
738 metaslab_class_t *mc = mg->mg_class;
a1d477c2 739 spa_t *spa = mc->mc_spa;
428870ff 740 metaslab_group_t *mgprev, *mgnext;
a1d477c2 741 int locks = spa_config_held(spa, SCL_ALL, RW_WRITER);
428870ff 742
a1d477c2
MA
743 ASSERT3U(spa_config_held(spa, SCL_ALLOC | SCL_ZIO, RW_WRITER), ==,
744 (SCL_ALLOC | SCL_ZIO));
428870ff
BB
745
746 if (--mg->mg_activation_count != 0) {
747 ASSERT(mc->mc_rotor != mg);
748 ASSERT(mg->mg_prev == NULL);
749 ASSERT(mg->mg_next == NULL);
750 ASSERT(mg->mg_activation_count < 0);
751 return;
752 }
753
a1d477c2
MA
754 /*
755 * The spa_config_lock is an array of rwlocks, ordered as
756 * follows (from highest to lowest):
757 * SCL_CONFIG > SCL_STATE > SCL_L2ARC > SCL_ALLOC >
758 * SCL_ZIO > SCL_FREE > SCL_VDEV
759 * (For more information about the spa_config_lock see spa_misc.c)
760 * The higher the lock, the broader its coverage. When we passivate
761 * a metaslab group, we must hold both the SCL_ALLOC and the SCL_ZIO
762 * config locks. However, the metaslab group's taskq might be trying
763 * to preload metaslabs so we must drop the SCL_ZIO lock and any
764 * lower locks to allow the I/O to complete. At a minimum,
765 * we continue to hold the SCL_ALLOC lock, which prevents any future
766 * allocations from taking place and any changes to the vdev tree.
767 */
768 spa_config_exit(spa, locks & ~(SCL_ZIO - 1), spa);
c5528b9b 769 taskq_wait_outstanding(mg->mg_taskq, 0);
a1d477c2 770 spa_config_enter(spa, locks & ~(SCL_ZIO - 1), spa, RW_WRITER);
f3a7f661 771 metaslab_group_alloc_update(mg);
492f64e9
PD
772 for (int i = 0; i < mg->mg_allocators; i++) {
773 metaslab_t *msp = mg->mg_primaries[i];
774 if (msp != NULL) {
775 mutex_enter(&msp->ms_lock);
776 metaslab_passivate(msp,
777 metaslab_weight_from_range_tree(msp));
778 mutex_exit(&msp->ms_lock);
779 }
780 msp = mg->mg_secondaries[i];
781 if (msp != NULL) {
782 mutex_enter(&msp->ms_lock);
783 metaslab_passivate(msp,
784 metaslab_weight_from_range_tree(msp));
785 mutex_exit(&msp->ms_lock);
786 }
787 }
93cf2076 788
428870ff
BB
789 mgprev = mg->mg_prev;
790 mgnext = mg->mg_next;
791
792 if (mg == mgnext) {
793 mc->mc_rotor = NULL;
794 } else {
795 mc->mc_rotor = mgnext;
796 mgprev->mg_next = mgnext;
797 mgnext->mg_prev = mgprev;
798 }
799
800 mg->mg_prev = NULL;
801 mg->mg_next = NULL;
802}
803
3dfb57a3
DB
804boolean_t
805metaslab_group_initialized(metaslab_group_t *mg)
806{
807 vdev_t *vd = mg->mg_vd;
808 vdev_stat_t *vs = &vd->vdev_stat;
809
810 return (vs->vs_space != 0 && mg->mg_activation_count > 0);
811}
812
f3a7f661
GW
813uint64_t
814metaslab_group_get_space(metaslab_group_t *mg)
815{
816 return ((1ULL << mg->mg_vd->vdev_ms_shift) * mg->mg_vd->vdev_ms_count);
817}
818
819void
820metaslab_group_histogram_verify(metaslab_group_t *mg)
821{
822 uint64_t *mg_hist;
823 vdev_t *vd = mg->mg_vd;
824 uint64_t ashift = vd->vdev_ashift;
1c27024e 825 int i;
f3a7f661
GW
826
827 if ((zfs_flags & ZFS_DEBUG_HISTOGRAM_VERIFY) == 0)
828 return;
829
830 mg_hist = kmem_zalloc(sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE,
79c76d5b 831 KM_SLEEP);
f3a7f661
GW
832
833 ASSERT3U(RANGE_TREE_HISTOGRAM_SIZE, >=,
834 SPACE_MAP_HISTOGRAM_SIZE + ashift);
835
1c27024e 836 for (int m = 0; m < vd->vdev_ms_count; m++) {
f3a7f661
GW
837 metaslab_t *msp = vd->vdev_ms[m];
838
cc99f275
DB
839 /* skip if not active or not a member */
840 if (msp->ms_sm == NULL || msp->ms_group != mg)
f3a7f661
GW
841 continue;
842
843 for (i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++)
844 mg_hist[i + ashift] +=
845 msp->ms_sm->sm_phys->smp_histogram[i];
846 }
847
848 for (i = 0; i < RANGE_TREE_HISTOGRAM_SIZE; i ++)
849 VERIFY3U(mg_hist[i], ==, mg->mg_histogram[i]);
850
851 kmem_free(mg_hist, sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE);
852}
853
34dc7c2f 854static void
f3a7f661 855metaslab_group_histogram_add(metaslab_group_t *mg, metaslab_t *msp)
34dc7c2f 856{
f3a7f661
GW
857 metaslab_class_t *mc = mg->mg_class;
858 uint64_t ashift = mg->mg_vd->vdev_ashift;
f3a7f661
GW
859
860 ASSERT(MUTEX_HELD(&msp->ms_lock));
861 if (msp->ms_sm == NULL)
862 return;
863
34dc7c2f 864 mutex_enter(&mg->mg_lock);
1c27024e 865 for (int i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) {
f3a7f661
GW
866 mg->mg_histogram[i + ashift] +=
867 msp->ms_sm->sm_phys->smp_histogram[i];
868 mc->mc_histogram[i + ashift] +=
869 msp->ms_sm->sm_phys->smp_histogram[i];
870 }
871 mutex_exit(&mg->mg_lock);
872}
873
874void
875metaslab_group_histogram_remove(metaslab_group_t *mg, metaslab_t *msp)
876{
877 metaslab_class_t *mc = mg->mg_class;
878 uint64_t ashift = mg->mg_vd->vdev_ashift;
f3a7f661
GW
879
880 ASSERT(MUTEX_HELD(&msp->ms_lock));
881 if (msp->ms_sm == NULL)
882 return;
883
884 mutex_enter(&mg->mg_lock);
1c27024e 885 for (int i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) {
f3a7f661
GW
886 ASSERT3U(mg->mg_histogram[i + ashift], >=,
887 msp->ms_sm->sm_phys->smp_histogram[i]);
888 ASSERT3U(mc->mc_histogram[i + ashift], >=,
889 msp->ms_sm->sm_phys->smp_histogram[i]);
890
891 mg->mg_histogram[i + ashift] -=
892 msp->ms_sm->sm_phys->smp_histogram[i];
893 mc->mc_histogram[i + ashift] -=
894 msp->ms_sm->sm_phys->smp_histogram[i];
895 }
896 mutex_exit(&mg->mg_lock);
897}
898
899static void
900metaslab_group_add(metaslab_group_t *mg, metaslab_t *msp)
901{
34dc7c2f 902 ASSERT(msp->ms_group == NULL);
f3a7f661 903 mutex_enter(&mg->mg_lock);
34dc7c2f
BB
904 msp->ms_group = mg;
905 msp->ms_weight = 0;
906 avl_add(&mg->mg_metaslab_tree, msp);
907 mutex_exit(&mg->mg_lock);
f3a7f661
GW
908
909 mutex_enter(&msp->ms_lock);
910 metaslab_group_histogram_add(mg, msp);
911 mutex_exit(&msp->ms_lock);
34dc7c2f
BB
912}
913
914static void
915metaslab_group_remove(metaslab_group_t *mg, metaslab_t *msp)
916{
f3a7f661
GW
917 mutex_enter(&msp->ms_lock);
918 metaslab_group_histogram_remove(mg, msp);
919 mutex_exit(&msp->ms_lock);
920
34dc7c2f
BB
921 mutex_enter(&mg->mg_lock);
922 ASSERT(msp->ms_group == mg);
923 avl_remove(&mg->mg_metaslab_tree, msp);
924 msp->ms_group = NULL;
925 mutex_exit(&mg->mg_lock);
926}
927
492f64e9
PD
928static void
929metaslab_group_sort_impl(metaslab_group_t *mg, metaslab_t *msp, uint64_t weight)
930{
931 ASSERT(MUTEX_HELD(&mg->mg_lock));
932 ASSERT(msp->ms_group == mg);
933 avl_remove(&mg->mg_metaslab_tree, msp);
934 msp->ms_weight = weight;
935 avl_add(&mg->mg_metaslab_tree, msp);
936
937}
938
34dc7c2f
BB
939static void
940metaslab_group_sort(metaslab_group_t *mg, metaslab_t *msp, uint64_t weight)
941{
942 /*
943 * Although in principle the weight can be any value, in
f3a7f661 944 * practice we do not use values in the range [1, 511].
34dc7c2f 945 */
f3a7f661 946 ASSERT(weight >= SPA_MINBLOCKSIZE || weight == 0);
34dc7c2f
BB
947 ASSERT(MUTEX_HELD(&msp->ms_lock));
948
949 mutex_enter(&mg->mg_lock);
492f64e9 950 metaslab_group_sort_impl(mg, msp, weight);
34dc7c2f
BB
951 mutex_exit(&mg->mg_lock);
952}
953
f3a7f661
GW
954/*
955 * Calculate the fragmentation for a given metaslab group. We can use
956 * a simple average here since all metaslabs within the group must have
957 * the same size. The return value will be a value between 0 and 100
958 * (inclusive), or ZFS_FRAG_INVALID if less than half of the metaslab in this
959 * group have a fragmentation metric.
960 */
961uint64_t
962metaslab_group_fragmentation(metaslab_group_t *mg)
963{
964 vdev_t *vd = mg->mg_vd;
965 uint64_t fragmentation = 0;
966 uint64_t valid_ms = 0;
f3a7f661 967
1c27024e 968 for (int m = 0; m < vd->vdev_ms_count; m++) {
f3a7f661
GW
969 metaslab_t *msp = vd->vdev_ms[m];
970
971 if (msp->ms_fragmentation == ZFS_FRAG_INVALID)
972 continue;
cc99f275
DB
973 if (msp->ms_group != mg)
974 continue;
f3a7f661
GW
975
976 valid_ms++;
977 fragmentation += msp->ms_fragmentation;
978 }
979
cc99f275 980 if (valid_ms <= mg->mg_vd->vdev_ms_count / 2)
f3a7f661
GW
981 return (ZFS_FRAG_INVALID);
982
983 fragmentation /= valid_ms;
984 ASSERT3U(fragmentation, <=, 100);
985 return (fragmentation);
986}
987
ac72fac3
GW
988/*
989 * Determine if a given metaslab group should skip allocations. A metaslab
f3a7f661
GW
990 * group should avoid allocations if its free capacity is less than the
991 * zfs_mg_noalloc_threshold or its fragmentation metric is greater than
992 * zfs_mg_fragmentation_threshold and there is at least one metaslab group
3dfb57a3
DB
993 * that can still handle allocations. If the allocation throttle is enabled
994 * then we skip allocations to devices that have reached their maximum
995 * allocation queue depth unless the selected metaslab group is the only
996 * eligible group remaining.
ac72fac3
GW
997 */
998static boolean_t
3dfb57a3 999metaslab_group_allocatable(metaslab_group_t *mg, metaslab_group_t *rotor,
c197a77c 1000 uint64_t psize, int allocator, int d)
ac72fac3 1001{
3dfb57a3 1002 spa_t *spa = mg->mg_vd->vdev_spa;
ac72fac3
GW
1003 metaslab_class_t *mc = mg->mg_class;
1004
1005 /*
3dfb57a3
DB
1006 * We can only consider skipping this metaslab group if it's
1007 * in the normal metaslab class and there are other metaslab
1008 * groups to select from. Otherwise, we always consider it eligible
f3a7f661 1009 * for allocations.
ac72fac3 1010 */
cc99f275
DB
1011 if ((mc != spa_normal_class(spa) &&
1012 mc != spa_special_class(spa) &&
1013 mc != spa_dedup_class(spa)) ||
1014 mc->mc_groups <= 1)
3dfb57a3
DB
1015 return (B_TRUE);
1016
1017 /*
1018 * If the metaslab group's mg_allocatable flag is set (see comments
1019 * in metaslab_group_alloc_update() for more information) and
1020 * the allocation throttle is disabled then allow allocations to this
1021 * device. However, if the allocation throttle is enabled then
1022 * check if we have reached our allocation limit (mg_alloc_queue_depth)
1023 * to determine if we should allow allocations to this metaslab group.
1024 * If all metaslab groups are no longer considered allocatable
1025 * (mc_alloc_groups == 0) or we're trying to allocate the smallest
1026 * gang block size then we allow allocations on this metaslab group
1027 * regardless of the mg_allocatable or throttle settings.
1028 */
1029 if (mg->mg_allocatable) {
1030 metaslab_group_t *mgp;
1031 int64_t qdepth;
492f64e9 1032 uint64_t qmax = mg->mg_cur_max_alloc_queue_depth[allocator];
3dfb57a3
DB
1033
1034 if (!mc->mc_alloc_throttle_enabled)
1035 return (B_TRUE);
1036
1037 /*
1038 * If this metaslab group does not have any free space, then
1039 * there is no point in looking further.
1040 */
1041 if (mg->mg_no_free_space)
1042 return (B_FALSE);
1043
c197a77c 1044 /*
1045 * Relax allocation throttling for ditto blocks. Due to
1046 * random imbalances in allocation it tends to push copies
1047 * to one vdev, that looks a bit better at the moment.
1048 */
1049 qmax = qmax * (4 + d) / 4;
1050
424fd7c3
TS
1051 qdepth = zfs_refcount_count(
1052 &mg->mg_alloc_queue_depth[allocator]);
3dfb57a3
DB
1053
1054 /*
1055 * If this metaslab group is below its qmax or it's
1056 * the only allocatable metasable group, then attempt
1057 * to allocate from it.
1058 */
1059 if (qdepth < qmax || mc->mc_alloc_groups == 1)
1060 return (B_TRUE);
1061 ASSERT3U(mc->mc_alloc_groups, >, 1);
1062
1063 /*
1064 * Since this metaslab group is at or over its qmax, we
1065 * need to determine if there are metaslab groups after this
1066 * one that might be able to handle this allocation. This is
1067 * racy since we can't hold the locks for all metaslab
1068 * groups at the same time when we make this check.
1069 */
1070 for (mgp = mg->mg_next; mgp != rotor; mgp = mgp->mg_next) {
492f64e9 1071 qmax = mgp->mg_cur_max_alloc_queue_depth[allocator];
c197a77c 1072 qmax = qmax * (4 + d) / 4;
424fd7c3 1073 qdepth = zfs_refcount_count(
492f64e9 1074 &mgp->mg_alloc_queue_depth[allocator]);
3dfb57a3
DB
1075
1076 /*
1077 * If there is another metaslab group that
1078 * might be able to handle the allocation, then
1079 * we return false so that we skip this group.
1080 */
1081 if (qdepth < qmax && !mgp->mg_no_free_space)
1082 return (B_FALSE);
1083 }
1084
1085 /*
1086 * We didn't find another group to handle the allocation
1087 * so we can't skip this metaslab group even though
1088 * we are at or over our qmax.
1089 */
1090 return (B_TRUE);
1091
1092 } else if (mc->mc_alloc_groups == 0 || psize == SPA_MINBLOCKSIZE) {
1093 return (B_TRUE);
1094 }
1095 return (B_FALSE);
ac72fac3
GW
1096}
1097
428870ff
BB
1098/*
1099 * ==========================================================================
93cf2076 1100 * Range tree callbacks
428870ff
BB
1101 * ==========================================================================
1102 */
93cf2076
GW
1103
1104/*
1105 * Comparison function for the private size-ordered tree. Tree is sorted
1106 * by size, larger sizes at the end of the tree.
1107 */
428870ff 1108static int
93cf2076 1109metaslab_rangesize_compare(const void *x1, const void *x2)
428870ff 1110{
93cf2076
GW
1111 const range_seg_t *r1 = x1;
1112 const range_seg_t *r2 = x2;
1113 uint64_t rs_size1 = r1->rs_end - r1->rs_start;
1114 uint64_t rs_size2 = r2->rs_end - r2->rs_start;
428870ff 1115
ee36c709
GN
1116 int cmp = AVL_CMP(rs_size1, rs_size2);
1117 if (likely(cmp))
1118 return (cmp);
428870ff 1119
ee36c709 1120 return (AVL_CMP(r1->rs_start, r2->rs_start));
428870ff
BB
1121}
1122
93cf2076
GW
1123/*
1124 * ==========================================================================
4e21fd06 1125 * Common allocator routines
93cf2076
GW
1126 * ==========================================================================
1127 */
1128
9babb374 1129/*
428870ff 1130 * Return the maximum contiguous segment within the metaslab.
9babb374 1131 */
9babb374 1132uint64_t
93cf2076 1133metaslab_block_maxsize(metaslab_t *msp)
9babb374 1134{
d2734cce 1135 avl_tree_t *t = &msp->ms_allocatable_by_size;
93cf2076 1136 range_seg_t *rs;
9babb374 1137
93cf2076 1138 if (t == NULL || (rs = avl_last(t)) == NULL)
9babb374
BB
1139 return (0ULL);
1140
93cf2076
GW
1141 return (rs->rs_end - rs->rs_start);
1142}
1143
4e21fd06
DB
1144static range_seg_t *
1145metaslab_block_find(avl_tree_t *t, uint64_t start, uint64_t size)
93cf2076 1146{
4e21fd06
DB
1147 range_seg_t *rs, rsearch;
1148 avl_index_t where;
93cf2076 1149
4e21fd06
DB
1150 rsearch.rs_start = start;
1151 rsearch.rs_end = start + size;
93cf2076 1152
4e21fd06
DB
1153 rs = avl_find(t, &rsearch, &where);
1154 if (rs == NULL) {
1155 rs = avl_nearest(t, where, AVL_AFTER);
93cf2076 1156 }
93cf2076 1157
4e21fd06
DB
1158 return (rs);
1159}
93cf2076
GW
1160
1161#if defined(WITH_FF_BLOCK_ALLOCATOR) || \
1162 defined(WITH_DF_BLOCK_ALLOCATOR) || \
1163 defined(WITH_CF_BLOCK_ALLOCATOR)
1164/*
1165 * This is a helper function that can be used by the allocator to find
1166 * a suitable block to allocate. This will search the specified AVL
1167 * tree looking for a block that matches the specified criteria.
1168 */
1169static uint64_t
1170metaslab_block_picker(avl_tree_t *t, uint64_t *cursor, uint64_t size,
1171 uint64_t align)
1172{
4e21fd06 1173 range_seg_t *rs = metaslab_block_find(t, *cursor, size);
93cf2076
GW
1174
1175 while (rs != NULL) {
1176 uint64_t offset = P2ROUNDUP(rs->rs_start, align);
1177
1178 if (offset + size <= rs->rs_end) {
1179 *cursor = offset + size;
1180 return (offset);
1181 }
1182 rs = AVL_NEXT(t, rs);
1183 }
1184
1185 /*
1186 * If we know we've searched the whole map (*cursor == 0), give up.
1187 * Otherwise, reset the cursor to the beginning and try again.
1188 */
1189 if (*cursor == 0)
1190 return (-1ULL);
1191
1192 *cursor = 0;
1193 return (metaslab_block_picker(t, cursor, size, align));
9babb374 1194}
93cf2076 1195#endif /* WITH_FF/DF/CF_BLOCK_ALLOCATOR */
9babb374 1196
22c81dd8 1197#if defined(WITH_FF_BLOCK_ALLOCATOR)
428870ff
BB
1198/*
1199 * ==========================================================================
1200 * The first-fit block allocator
1201 * ==========================================================================
1202 */
1203static uint64_t
93cf2076 1204metaslab_ff_alloc(metaslab_t *msp, uint64_t size)
9babb374 1205{
93cf2076
GW
1206 /*
1207 * Find the largest power of 2 block size that evenly divides the
1208 * requested size. This is used to try to allocate blocks with similar
1209 * alignment from the same area of the metaslab (i.e. same cursor
1210 * bucket) but it does not guarantee that other allocations sizes
1211 * may exist in the same region.
1212 */
428870ff 1213 uint64_t align = size & -size;
9bd274dd 1214 uint64_t *cursor = &msp->ms_lbas[highbit64(align) - 1];
d2734cce 1215 avl_tree_t *t = &msp->ms_allocatable->rt_root;
9babb374 1216
428870ff 1217 return (metaslab_block_picker(t, cursor, size, align));
9babb374
BB
1218}
1219
93cf2076 1220static metaslab_ops_t metaslab_ff_ops = {
f3a7f661 1221 metaslab_ff_alloc
428870ff 1222};
9babb374 1223
93cf2076 1224metaslab_ops_t *zfs_metaslab_ops = &metaslab_ff_ops;
22c81dd8
BB
1225#endif /* WITH_FF_BLOCK_ALLOCATOR */
1226
1227#if defined(WITH_DF_BLOCK_ALLOCATOR)
428870ff
BB
1228/*
1229 * ==========================================================================
1230 * Dynamic block allocator -
1231 * Uses the first fit allocation scheme until space get low and then
1232 * adjusts to a best fit allocation method. Uses metaslab_df_alloc_threshold
1233 * and metaslab_df_free_pct to determine when to switch the allocation scheme.
1234 * ==========================================================================
1235 */
9babb374 1236static uint64_t
93cf2076 1237metaslab_df_alloc(metaslab_t *msp, uint64_t size)
9babb374 1238{
93cf2076
GW
1239 /*
1240 * Find the largest power of 2 block size that evenly divides the
1241 * requested size. This is used to try to allocate blocks with similar
1242 * alignment from the same area of the metaslab (i.e. same cursor
1243 * bucket) but it does not guarantee that other allocations sizes
1244 * may exist in the same region.
1245 */
9babb374 1246 uint64_t align = size & -size;
9bd274dd 1247 uint64_t *cursor = &msp->ms_lbas[highbit64(align) - 1];
d2734cce 1248 range_tree_t *rt = msp->ms_allocatable;
93cf2076
GW
1249 avl_tree_t *t = &rt->rt_root;
1250 uint64_t max_size = metaslab_block_maxsize(msp);
1251 int free_pct = range_tree_space(rt) * 100 / msp->ms_size;
9babb374 1252
93cf2076 1253 ASSERT(MUTEX_HELD(&msp->ms_lock));
d2734cce
SD
1254 ASSERT3U(avl_numnodes(t), ==,
1255 avl_numnodes(&msp->ms_allocatable_by_size));
9babb374
BB
1256
1257 if (max_size < size)
1258 return (-1ULL);
1259
1260 /*
1261 * If we're running low on space switch to using the size
1262 * sorted AVL tree (best-fit).
1263 */
1264 if (max_size < metaslab_df_alloc_threshold ||
1265 free_pct < metaslab_df_free_pct) {
d2734cce 1266 t = &msp->ms_allocatable_by_size;
9babb374
BB
1267 *cursor = 0;
1268 }
1269
1270 return (metaslab_block_picker(t, cursor, size, 1ULL));
1271}
1272
93cf2076 1273static metaslab_ops_t metaslab_df_ops = {
f3a7f661 1274 metaslab_df_alloc
34dc7c2f
BB
1275};
1276
93cf2076 1277metaslab_ops_t *zfs_metaslab_ops = &metaslab_df_ops;
22c81dd8
BB
1278#endif /* WITH_DF_BLOCK_ALLOCATOR */
1279
93cf2076 1280#if defined(WITH_CF_BLOCK_ALLOCATOR)
428870ff
BB
1281/*
1282 * ==========================================================================
93cf2076
GW
1283 * Cursor fit block allocator -
1284 * Select the largest region in the metaslab, set the cursor to the beginning
1285 * of the range and the cursor_end to the end of the range. As allocations
1286 * are made advance the cursor. Continue allocating from the cursor until
1287 * the range is exhausted and then find a new range.
428870ff
BB
1288 * ==========================================================================
1289 */
1290static uint64_t
93cf2076 1291metaslab_cf_alloc(metaslab_t *msp, uint64_t size)
428870ff 1292{
d2734cce
SD
1293 range_tree_t *rt = msp->ms_allocatable;
1294 avl_tree_t *t = &msp->ms_allocatable_by_size;
93cf2076
GW
1295 uint64_t *cursor = &msp->ms_lbas[0];
1296 uint64_t *cursor_end = &msp->ms_lbas[1];
428870ff
BB
1297 uint64_t offset = 0;
1298
93cf2076
GW
1299 ASSERT(MUTEX_HELD(&msp->ms_lock));
1300 ASSERT3U(avl_numnodes(t), ==, avl_numnodes(&rt->rt_root));
428870ff 1301
93cf2076 1302 ASSERT3U(*cursor_end, >=, *cursor);
428870ff 1303
93cf2076
GW
1304 if ((*cursor + size) > *cursor_end) {
1305 range_seg_t *rs;
428870ff 1306
d2734cce 1307 rs = avl_last(&msp->ms_allocatable_by_size);
93cf2076
GW
1308 if (rs == NULL || (rs->rs_end - rs->rs_start) < size)
1309 return (-1ULL);
428870ff 1310
93cf2076
GW
1311 *cursor = rs->rs_start;
1312 *cursor_end = rs->rs_end;
428870ff 1313 }
93cf2076
GW
1314
1315 offset = *cursor;
1316 *cursor += size;
1317
428870ff
BB
1318 return (offset);
1319}
1320
93cf2076 1321static metaslab_ops_t metaslab_cf_ops = {
f3a7f661 1322 metaslab_cf_alloc
428870ff
BB
1323};
1324
93cf2076
GW
1325metaslab_ops_t *zfs_metaslab_ops = &metaslab_cf_ops;
1326#endif /* WITH_CF_BLOCK_ALLOCATOR */
22c81dd8
BB
1327
1328#if defined(WITH_NDF_BLOCK_ALLOCATOR)
93cf2076
GW
1329/*
1330 * ==========================================================================
1331 * New dynamic fit allocator -
1332 * Select a region that is large enough to allocate 2^metaslab_ndf_clump_shift
1333 * contiguous blocks. If no region is found then just use the largest segment
1334 * that remains.
1335 * ==========================================================================
1336 */
1337
1338/*
1339 * Determines desired number of contiguous blocks (2^metaslab_ndf_clump_shift)
1340 * to request from the allocator.
1341 */
428870ff
BB
1342uint64_t metaslab_ndf_clump_shift = 4;
1343
1344static uint64_t
93cf2076 1345metaslab_ndf_alloc(metaslab_t *msp, uint64_t size)
428870ff 1346{
d2734cce 1347 avl_tree_t *t = &msp->ms_allocatable->rt_root;
428870ff 1348 avl_index_t where;
93cf2076 1349 range_seg_t *rs, rsearch;
9bd274dd 1350 uint64_t hbit = highbit64(size);
93cf2076
GW
1351 uint64_t *cursor = &msp->ms_lbas[hbit - 1];
1352 uint64_t max_size = metaslab_block_maxsize(msp);
428870ff 1353
93cf2076 1354 ASSERT(MUTEX_HELD(&msp->ms_lock));
d2734cce
SD
1355 ASSERT3U(avl_numnodes(t), ==,
1356 avl_numnodes(&msp->ms_allocatable_by_size));
428870ff
BB
1357
1358 if (max_size < size)
1359 return (-1ULL);
1360
93cf2076
GW
1361 rsearch.rs_start = *cursor;
1362 rsearch.rs_end = *cursor + size;
428870ff 1363
93cf2076
GW
1364 rs = avl_find(t, &rsearch, &where);
1365 if (rs == NULL || (rs->rs_end - rs->rs_start) < size) {
d2734cce 1366 t = &msp->ms_allocatable_by_size;
428870ff 1367
93cf2076
GW
1368 rsearch.rs_start = 0;
1369 rsearch.rs_end = MIN(max_size,
428870ff 1370 1ULL << (hbit + metaslab_ndf_clump_shift));
93cf2076
GW
1371 rs = avl_find(t, &rsearch, &where);
1372 if (rs == NULL)
1373 rs = avl_nearest(t, where, AVL_AFTER);
1374 ASSERT(rs != NULL);
428870ff
BB
1375 }
1376
93cf2076
GW
1377 if ((rs->rs_end - rs->rs_start) >= size) {
1378 *cursor = rs->rs_start + size;
1379 return (rs->rs_start);
428870ff
BB
1380 }
1381 return (-1ULL);
1382}
1383
93cf2076 1384static metaslab_ops_t metaslab_ndf_ops = {
f3a7f661 1385 metaslab_ndf_alloc
428870ff
BB
1386};
1387
93cf2076 1388metaslab_ops_t *zfs_metaslab_ops = &metaslab_ndf_ops;
22c81dd8 1389#endif /* WITH_NDF_BLOCK_ALLOCATOR */
9babb374 1390
93cf2076 1391
34dc7c2f
BB
1392/*
1393 * ==========================================================================
1394 * Metaslabs
1395 * ==========================================================================
1396 */
93cf2076
GW
1397
1398/*
1399 * Wait for any in-progress metaslab loads to complete.
1400 */
1401void
1402metaslab_load_wait(metaslab_t *msp)
1403{
1404 ASSERT(MUTEX_HELD(&msp->ms_lock));
1405
1406 while (msp->ms_loading) {
1407 ASSERT(!msp->ms_loaded);
1408 cv_wait(&msp->ms_load_cv, &msp->ms_lock);
1409 }
1410}
1411
1412int
1413metaslab_load(metaslab_t *msp)
1414{
1415 int error = 0;
4e21fd06 1416 boolean_t success = B_FALSE;
93cf2076
GW
1417
1418 ASSERT(MUTEX_HELD(&msp->ms_lock));
1419 ASSERT(!msp->ms_loaded);
1420 ASSERT(!msp->ms_loading);
1421
1422 msp->ms_loading = B_TRUE;
a1d477c2
MA
1423 /*
1424 * Nobody else can manipulate a loading metaslab, so it's now safe
1425 * to drop the lock. This way we don't have to hold the lock while
1426 * reading the spacemap from disk.
1427 */
1428 mutex_exit(&msp->ms_lock);
93cf2076
GW
1429
1430 /*
1431 * If the space map has not been allocated yet, then treat
d2734cce 1432 * all the space in the metaslab as free and add it to ms_allocatable.
93cf2076 1433 */
d2734cce
SD
1434 if (msp->ms_sm != NULL) {
1435 error = space_map_load(msp->ms_sm, msp->ms_allocatable,
1436 SM_FREE);
1437 } else {
1438 range_tree_add(msp->ms_allocatable,
1439 msp->ms_start, msp->ms_size);
1440 }
93cf2076 1441
4e21fd06 1442 success = (error == 0);
a1d477c2
MA
1443
1444 mutex_enter(&msp->ms_lock);
93cf2076
GW
1445 msp->ms_loading = B_FALSE;
1446
4e21fd06
DB
1447 if (success) {
1448 ASSERT3P(msp->ms_group, !=, NULL);
1449 msp->ms_loaded = B_TRUE;
1450
d2734cce
SD
1451 /*
1452 * If the metaslab already has a spacemap, then we need to
1453 * remove all segments from the defer tree; otherwise, the
1454 * metaslab is completely empty and we can skip this.
1455 */
1456 if (msp->ms_sm != NULL) {
1457 for (int t = 0; t < TXG_DEFER_SIZE; t++) {
1458 range_tree_walk(msp->ms_defer[t],
1459 range_tree_remove, msp->ms_allocatable);
1460 }
93cf2076 1461 }
4e21fd06 1462 msp->ms_max_size = metaslab_block_maxsize(msp);
93cf2076
GW
1463 }
1464 cv_broadcast(&msp->ms_load_cv);
1465 return (error);
1466}
1467
1468void
1469metaslab_unload(metaslab_t *msp)
1470{
1471 ASSERT(MUTEX_HELD(&msp->ms_lock));
d2734cce 1472 range_tree_vacate(msp->ms_allocatable, NULL, NULL);
93cf2076
GW
1473 msp->ms_loaded = B_FALSE;
1474 msp->ms_weight &= ~METASLAB_ACTIVE_MASK;
4e21fd06 1475 msp->ms_max_size = 0;
93cf2076
GW
1476}
1477
cc99f275
DB
1478static void
1479metaslab_space_update(vdev_t *vd, metaslab_class_t *mc, int64_t alloc_delta,
1480 int64_t defer_delta, int64_t space_delta)
1481{
1482 vdev_space_update(vd, alloc_delta, defer_delta, space_delta);
1483
1484 ASSERT3P(vd->vdev_spa->spa_root_vdev, ==, vd->vdev_parent);
1485 ASSERT(vd->vdev_ms_count != 0);
1486
1487 metaslab_class_space_update(mc, alloc_delta, defer_delta, space_delta,
1488 vdev_deflated_space(vd, space_delta));
1489}
1490
fb42a493
PS
1491int
1492metaslab_init(metaslab_group_t *mg, uint64_t id, uint64_t object, uint64_t txg,
1493 metaslab_t **msp)
34dc7c2f
BB
1494{
1495 vdev_t *vd = mg->mg_vd;
cc99f275
DB
1496 spa_t *spa = vd->vdev_spa;
1497 objset_t *mos = spa->spa_meta_objset;
fb42a493
PS
1498 metaslab_t *ms;
1499 int error;
34dc7c2f 1500
79c76d5b 1501 ms = kmem_zalloc(sizeof (metaslab_t), KM_SLEEP);
fb42a493 1502 mutex_init(&ms->ms_lock, NULL, MUTEX_DEFAULT, NULL);
a1d477c2 1503 mutex_init(&ms->ms_sync_lock, NULL, MUTEX_DEFAULT, NULL);
fb42a493
PS
1504 cv_init(&ms->ms_load_cv, NULL, CV_DEFAULT, NULL);
1505 ms->ms_id = id;
1506 ms->ms_start = id << vd->vdev_ms_shift;
1507 ms->ms_size = 1ULL << vd->vdev_ms_shift;
492f64e9
PD
1508 ms->ms_allocator = -1;
1509 ms->ms_new = B_TRUE;
34dc7c2f 1510
93cf2076
GW
1511 /*
1512 * We only open space map objects that already exist. All others
afe37326 1513 * will be opened when we finally allocate an object for it.
93cf2076 1514 */
afe37326 1515 if (object != 0) {
fb42a493 1516 error = space_map_open(&ms->ms_sm, mos, object, ms->ms_start,
a1d477c2 1517 ms->ms_size, vd->vdev_ashift);
fb42a493
PS
1518
1519 if (error != 0) {
1520 kmem_free(ms, sizeof (metaslab_t));
1521 return (error);
1522 }
1523
1524 ASSERT(ms->ms_sm != NULL);
93cf2076 1525 }
34dc7c2f
BB
1526
1527 /*
93cf2076 1528 * We create the main range tree here, but we don't create the
258553d3 1529 * other range trees until metaslab_sync_done(). This serves
34dc7c2f
BB
1530 * two purposes: it allows metaslab_sync_done() to detect the
1531 * addition of new space; and for debugging, it ensures that we'd
1532 * data fault on any attempt to use this metaslab before it's ready.
1533 */
d2734cce
SD
1534 ms->ms_allocatable = range_tree_create_impl(&rt_avl_ops,
1535 &ms->ms_allocatable_by_size, metaslab_rangesize_compare, 0);
fb42a493 1536 metaslab_group_add(mg, ms);
34dc7c2f 1537
4e21fd06 1538 metaslab_set_fragmentation(ms);
428870ff 1539
34dc7c2f
BB
1540 /*
1541 * If we're opening an existing pool (txg == 0) or creating
1542 * a new one (txg == TXG_INITIAL), all space is available now.
1543 * If we're adding space to an existing pool, the new space
1544 * does not become available until after this txg has synced.
4e21fd06
DB
1545 * The metaslab's weight will also be initialized when we sync
1546 * out this txg. This ensures that we don't attempt to allocate
1547 * from it before we have initialized it completely.
34dc7c2f
BB
1548 */
1549 if (txg <= TXG_INITIAL)
fb42a493 1550 metaslab_sync_done(ms, 0);
34dc7c2f 1551
93cf2076
GW
1552 /*
1553 * If metaslab_debug_load is set and we're initializing a metaslab
cc99f275
DB
1554 * that has an allocated space map object then load the space map
1555 * so that we can verify frees.
93cf2076 1556 */
fb42a493
PS
1557 if (metaslab_debug_load && ms->ms_sm != NULL) {
1558 mutex_enter(&ms->ms_lock);
1559 VERIFY0(metaslab_load(ms));
1560 mutex_exit(&ms->ms_lock);
93cf2076
GW
1561 }
1562
34dc7c2f 1563 if (txg != 0) {
34dc7c2f 1564 vdev_dirty(vd, 0, NULL, txg);
fb42a493 1565 vdev_dirty(vd, VDD_METASLAB, ms, txg);
34dc7c2f
BB
1566 }
1567
fb42a493
PS
1568 *msp = ms;
1569
1570 return (0);
34dc7c2f
BB
1571}
1572
1573void
1574metaslab_fini(metaslab_t *msp)
1575{
93cf2076 1576 metaslab_group_t *mg = msp->ms_group;
cc99f275 1577 vdev_t *vd = mg->mg_vd;
34dc7c2f
BB
1578
1579 metaslab_group_remove(mg, msp);
1580
1581 mutex_enter(&msp->ms_lock);
93cf2076 1582 VERIFY(msp->ms_group == NULL);
cc99f275
DB
1583 metaslab_space_update(vd, mg->mg_class,
1584 -space_map_allocated(msp->ms_sm), 0, -msp->ms_size);
1585
93cf2076
GW
1586 space_map_close(msp->ms_sm);
1587
1588 metaslab_unload(msp);
cc99f275 1589
d2734cce
SD
1590 range_tree_destroy(msp->ms_allocatable);
1591 range_tree_destroy(msp->ms_freeing);
1592 range_tree_destroy(msp->ms_freed);
34dc7c2f 1593
1c27024e 1594 for (int t = 0; t < TXG_SIZE; t++) {
d2734cce 1595 range_tree_destroy(msp->ms_allocating[t]);
34dc7c2f
BB
1596 }
1597
1c27024e 1598 for (int t = 0; t < TXG_DEFER_SIZE; t++) {
d2734cce 1599 range_tree_destroy(msp->ms_defer[t]);
e51be066 1600 }
c99c9001 1601 ASSERT0(msp->ms_deferspace);
428870ff 1602
d2734cce
SD
1603 range_tree_destroy(msp->ms_checkpointing);
1604
34dc7c2f 1605 mutex_exit(&msp->ms_lock);
93cf2076 1606 cv_destroy(&msp->ms_load_cv);
34dc7c2f 1607 mutex_destroy(&msp->ms_lock);
a1d477c2 1608 mutex_destroy(&msp->ms_sync_lock);
492f64e9 1609 ASSERT3U(msp->ms_allocator, ==, -1);
34dc7c2f
BB
1610
1611 kmem_free(msp, sizeof (metaslab_t));
1612}
1613
f3a7f661
GW
1614#define FRAGMENTATION_TABLE_SIZE 17
1615
93cf2076 1616/*
f3a7f661
GW
1617 * This table defines a segment size based fragmentation metric that will
1618 * allow each metaslab to derive its own fragmentation value. This is done
1619 * by calculating the space in each bucket of the spacemap histogram and
1620 * multiplying that by the fragmetation metric in this table. Doing
1621 * this for all buckets and dividing it by the total amount of free
1622 * space in this metaslab (i.e. the total free space in all buckets) gives
1623 * us the fragmentation metric. This means that a high fragmentation metric
1624 * equates to most of the free space being comprised of small segments.
1625 * Conversely, if the metric is low, then most of the free space is in
1626 * large segments. A 10% change in fragmentation equates to approximately
1627 * double the number of segments.
93cf2076 1628 *
f3a7f661
GW
1629 * This table defines 0% fragmented space using 16MB segments. Testing has
1630 * shown that segments that are greater than or equal to 16MB do not suffer
1631 * from drastic performance problems. Using this value, we derive the rest
1632 * of the table. Since the fragmentation value is never stored on disk, it
1633 * is possible to change these calculations in the future.
1634 */
1635int zfs_frag_table[FRAGMENTATION_TABLE_SIZE] = {
1636 100, /* 512B */
1637 100, /* 1K */
1638 98, /* 2K */
1639 95, /* 4K */
1640 90, /* 8K */
1641 80, /* 16K */
1642 70, /* 32K */
1643 60, /* 64K */
1644 50, /* 128K */
1645 40, /* 256K */
1646 30, /* 512K */
1647 20, /* 1M */
1648 15, /* 2M */
1649 10, /* 4M */
1650 5, /* 8M */
1651 0 /* 16M */
1652};
1653
1654/*
1655 * Calclate the metaslab's fragmentation metric. A return value
1656 * of ZFS_FRAG_INVALID means that the metaslab has not been upgraded and does
1657 * not support this metric. Otherwise, the return value should be in the
1658 * range [0, 100].
93cf2076 1659 */
4e21fd06
DB
1660static void
1661metaslab_set_fragmentation(metaslab_t *msp)
93cf2076 1662{
f3a7f661
GW
1663 spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
1664 uint64_t fragmentation = 0;
1665 uint64_t total = 0;
1666 boolean_t feature_enabled = spa_feature_is_enabled(spa,
1667 SPA_FEATURE_SPACEMAP_HISTOGRAM);
93cf2076 1668
4e21fd06
DB
1669 if (!feature_enabled) {
1670 msp->ms_fragmentation = ZFS_FRAG_INVALID;
1671 return;
1672 }
f3a7f661 1673
93cf2076 1674 /*
f3a7f661
GW
1675 * A null space map means that the entire metaslab is free
1676 * and thus is not fragmented.
93cf2076 1677 */
4e21fd06
DB
1678 if (msp->ms_sm == NULL) {
1679 msp->ms_fragmentation = 0;
1680 return;
1681 }
f3a7f661
GW
1682
1683 /*
4e21fd06 1684 * If this metaslab's space map has not been upgraded, flag it
f3a7f661
GW
1685 * so that we upgrade next time we encounter it.
1686 */
1687 if (msp->ms_sm->sm_dbuf->db_size != sizeof (space_map_phys_t)) {
3b7f360c 1688 uint64_t txg = spa_syncing_txg(spa);
93cf2076
GW
1689 vdev_t *vd = msp->ms_group->mg_vd;
1690
3b7f360c
GW
1691 /*
1692 * If we've reached the final dirty txg, then we must
1693 * be shutting down the pool. We don't want to dirty
1694 * any data past this point so skip setting the condense
1695 * flag. We can retry this action the next time the pool
1696 * is imported.
1697 */
1698 if (spa_writeable(spa) && txg < spa_final_dirty_txg(spa)) {
8b0a0840
TC
1699 msp->ms_condense_wanted = B_TRUE;
1700 vdev_dirty(vd, VDD_METASLAB, msp, txg + 1);
964c2d69 1701 zfs_dbgmsg("txg %llu, requesting force condense: "
3b7f360c
GW
1702 "ms_id %llu, vdev_id %llu", txg, msp->ms_id,
1703 vd->vdev_id);
8b0a0840 1704 }
4e21fd06
DB
1705 msp->ms_fragmentation = ZFS_FRAG_INVALID;
1706 return;
93cf2076
GW
1707 }
1708
1c27024e 1709 for (int i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) {
f3a7f661
GW
1710 uint64_t space = 0;
1711 uint8_t shift = msp->ms_sm->sm_shift;
4e21fd06 1712
f3a7f661
GW
1713 int idx = MIN(shift - SPA_MINBLOCKSHIFT + i,
1714 FRAGMENTATION_TABLE_SIZE - 1);
93cf2076 1715
93cf2076
GW
1716 if (msp->ms_sm->sm_phys->smp_histogram[i] == 0)
1717 continue;
1718
f3a7f661
GW
1719 space = msp->ms_sm->sm_phys->smp_histogram[i] << (i + shift);
1720 total += space;
1721
1722 ASSERT3U(idx, <, FRAGMENTATION_TABLE_SIZE);
1723 fragmentation += space * zfs_frag_table[idx];
93cf2076 1724 }
f3a7f661
GW
1725
1726 if (total > 0)
1727 fragmentation /= total;
1728 ASSERT3U(fragmentation, <=, 100);
4e21fd06
DB
1729
1730 msp->ms_fragmentation = fragmentation;
93cf2076 1731}
34dc7c2f 1732
f3a7f661
GW
1733/*
1734 * Compute a weight -- a selection preference value -- for the given metaslab.
1735 * This is based on the amount of free space, the level of fragmentation,
1736 * the LBA range, and whether the metaslab is loaded.
1737 */
34dc7c2f 1738static uint64_t
4e21fd06 1739metaslab_space_weight(metaslab_t *msp)
34dc7c2f
BB
1740{
1741 metaslab_group_t *mg = msp->ms_group;
34dc7c2f
BB
1742 vdev_t *vd = mg->mg_vd;
1743 uint64_t weight, space;
1744
1745 ASSERT(MUTEX_HELD(&msp->ms_lock));
4e21fd06 1746 ASSERT(!vd->vdev_removing);
c2e42f9d 1747
34dc7c2f
BB
1748 /*
1749 * The baseline weight is the metaslab's free space.
1750 */
93cf2076 1751 space = msp->ms_size - space_map_allocated(msp->ms_sm);
f3a7f661 1752
f3a7f661
GW
1753 if (metaslab_fragmentation_factor_enabled &&
1754 msp->ms_fragmentation != ZFS_FRAG_INVALID) {
1755 /*
1756 * Use the fragmentation information to inversely scale
1757 * down the baseline weight. We need to ensure that we
1758 * don't exclude this metaslab completely when it's 100%
1759 * fragmented. To avoid this we reduce the fragmented value
1760 * by 1.
1761 */
1762 space = (space * (100 - (msp->ms_fragmentation - 1))) / 100;
1763
1764 /*
1765 * If space < SPA_MINBLOCKSIZE, then we will not allocate from
1766 * this metaslab again. The fragmentation metric may have
1767 * decreased the space to something smaller than
1768 * SPA_MINBLOCKSIZE, so reset the space to SPA_MINBLOCKSIZE
1769 * so that we can consume any remaining space.
1770 */
1771 if (space > 0 && space < SPA_MINBLOCKSIZE)
1772 space = SPA_MINBLOCKSIZE;
1773 }
34dc7c2f
BB
1774 weight = space;
1775
1776 /*
1777 * Modern disks have uniform bit density and constant angular velocity.
1778 * Therefore, the outer recording zones are faster (higher bandwidth)
1779 * than the inner zones by the ratio of outer to inner track diameter,
1780 * which is typically around 2:1. We account for this by assigning
1781 * higher weight to lower metaslabs (multiplier ranging from 2x to 1x).
1782 * In effect, this means that we'll select the metaslab with the most
1783 * free bandwidth rather than simply the one with the most free space.
1784 */
fb40095f 1785 if (!vd->vdev_nonrot && metaslab_lba_weighting_enabled) {
f3a7f661
GW
1786 weight = 2 * weight - (msp->ms_id * weight) / vd->vdev_ms_count;
1787 ASSERT(weight >= space && weight <= 2 * space);
1788 }
428870ff 1789
f3a7f661
GW
1790 /*
1791 * If this metaslab is one we're actively using, adjust its
1792 * weight to make it preferable to any inactive metaslab so
1793 * we'll polish it off. If the fragmentation on this metaslab
1794 * has exceed our threshold, then don't mark it active.
1795 */
1796 if (msp->ms_loaded && msp->ms_fragmentation != ZFS_FRAG_INVALID &&
1797 msp->ms_fragmentation <= zfs_metaslab_fragmentation_threshold) {
428870ff
BB
1798 weight |= (msp->ms_weight & METASLAB_ACTIVE_MASK);
1799 }
34dc7c2f 1800
4e21fd06
DB
1801 WEIGHT_SET_SPACEBASED(weight);
1802 return (weight);
1803}
1804
1805/*
1806 * Return the weight of the specified metaslab, according to the segment-based
1807 * weighting algorithm. The metaslab must be loaded. This function can
1808 * be called within a sync pass since it relies only on the metaslab's
1809 * range tree which is always accurate when the metaslab is loaded.
1810 */
1811static uint64_t
1812metaslab_weight_from_range_tree(metaslab_t *msp)
1813{
1814 uint64_t weight = 0;
1815 uint32_t segments = 0;
4e21fd06
DB
1816
1817 ASSERT(msp->ms_loaded);
1818
1c27024e
DB
1819 for (int i = RANGE_TREE_HISTOGRAM_SIZE - 1; i >= SPA_MINBLOCKSHIFT;
1820 i--) {
4e21fd06
DB
1821 uint8_t shift = msp->ms_group->mg_vd->vdev_ashift;
1822 int max_idx = SPACE_MAP_HISTOGRAM_SIZE + shift - 1;
1823
1824 segments <<= 1;
d2734cce 1825 segments += msp->ms_allocatable->rt_histogram[i];
4e21fd06
DB
1826
1827 /*
1828 * The range tree provides more precision than the space map
1829 * and must be downgraded so that all values fit within the
1830 * space map's histogram. This allows us to compare loaded
1831 * vs. unloaded metaslabs to determine which metaslab is
1832 * considered "best".
1833 */
1834 if (i > max_idx)
1835 continue;
1836
1837 if (segments != 0) {
1838 WEIGHT_SET_COUNT(weight, segments);
1839 WEIGHT_SET_INDEX(weight, i);
1840 WEIGHT_SET_ACTIVE(weight, 0);
1841 break;
1842 }
1843 }
1844 return (weight);
1845}
1846
1847/*
1848 * Calculate the weight based on the on-disk histogram. This should only
1849 * be called after a sync pass has completely finished since the on-disk
1850 * information is updated in metaslab_sync().
1851 */
1852static uint64_t
1853metaslab_weight_from_spacemap(metaslab_t *msp)
1854{
1855 uint64_t weight = 0;
4e21fd06 1856
1c27024e 1857 for (int i = SPACE_MAP_HISTOGRAM_SIZE - 1; i >= 0; i--) {
4e21fd06
DB
1858 if (msp->ms_sm->sm_phys->smp_histogram[i] != 0) {
1859 WEIGHT_SET_COUNT(weight,
1860 msp->ms_sm->sm_phys->smp_histogram[i]);
1861 WEIGHT_SET_INDEX(weight, i +
1862 msp->ms_sm->sm_shift);
1863 WEIGHT_SET_ACTIVE(weight, 0);
1864 break;
1865 }
1866 }
1867 return (weight);
1868}
1869
1870/*
1871 * Compute a segment-based weight for the specified metaslab. The weight
1872 * is determined by highest bucket in the histogram. The information
1873 * for the highest bucket is encoded into the weight value.
1874 */
1875static uint64_t
1876metaslab_segment_weight(metaslab_t *msp)
1877{
1878 metaslab_group_t *mg = msp->ms_group;
1879 uint64_t weight = 0;
1880 uint8_t shift = mg->mg_vd->vdev_ashift;
1881
1882 ASSERT(MUTEX_HELD(&msp->ms_lock));
1883
1884 /*
1885 * The metaslab is completely free.
1886 */
1887 if (space_map_allocated(msp->ms_sm) == 0) {
1888 int idx = highbit64(msp->ms_size) - 1;
1889 int max_idx = SPACE_MAP_HISTOGRAM_SIZE + shift - 1;
1890
1891 if (idx < max_idx) {
1892 WEIGHT_SET_COUNT(weight, 1ULL);
1893 WEIGHT_SET_INDEX(weight, idx);
1894 } else {
1895 WEIGHT_SET_COUNT(weight, 1ULL << (idx - max_idx));
1896 WEIGHT_SET_INDEX(weight, max_idx);
1897 }
1898 WEIGHT_SET_ACTIVE(weight, 0);
1899 ASSERT(!WEIGHT_IS_SPACEBASED(weight));
1900
1901 return (weight);
1902 }
1903
1904 ASSERT3U(msp->ms_sm->sm_dbuf->db_size, ==, sizeof (space_map_phys_t));
1905
1906 /*
1907 * If the metaslab is fully allocated then just make the weight 0.
1908 */
1909 if (space_map_allocated(msp->ms_sm) == msp->ms_size)
1910 return (0);
1911 /*
1912 * If the metaslab is already loaded, then use the range tree to
1913 * determine the weight. Otherwise, we rely on the space map information
1914 * to generate the weight.
1915 */
1916 if (msp->ms_loaded) {
1917 weight = metaslab_weight_from_range_tree(msp);
1918 } else {
1919 weight = metaslab_weight_from_spacemap(msp);
1920 }
1921
1922 /*
1923 * If the metaslab was active the last time we calculated its weight
1924 * then keep it active. We want to consume the entire region that
1925 * is associated with this weight.
1926 */
1927 if (msp->ms_activation_weight != 0 && weight != 0)
1928 WEIGHT_SET_ACTIVE(weight, WEIGHT_GET_ACTIVE(msp->ms_weight));
1929 return (weight);
1930}
1931
1932/*
1933 * Determine if we should attempt to allocate from this metaslab. If the
1934 * metaslab has a maximum size then we can quickly determine if the desired
1935 * allocation size can be satisfied. Otherwise, if we're using segment-based
1936 * weighting then we can determine the maximum allocation that this metaslab
1937 * can accommodate based on the index encoded in the weight. If we're using
1938 * space-based weights then rely on the entire weight (excluding the weight
1939 * type bit).
1940 */
1941boolean_t
1942metaslab_should_allocate(metaslab_t *msp, uint64_t asize)
1943{
1944 boolean_t should_allocate;
1945
1946 if (msp->ms_max_size != 0)
1947 return (msp->ms_max_size >= asize);
1948
1949 if (!WEIGHT_IS_SPACEBASED(msp->ms_weight)) {
1950 /*
1951 * The metaslab segment weight indicates segments in the
1952 * range [2^i, 2^(i+1)), where i is the index in the weight.
1953 * Since the asize might be in the middle of the range, we
1954 * should attempt the allocation if asize < 2^(i+1).
1955 */
1956 should_allocate = (asize <
1957 1ULL << (WEIGHT_GET_INDEX(msp->ms_weight) + 1));
1958 } else {
1959 should_allocate = (asize <=
1960 (msp->ms_weight & ~METASLAB_WEIGHT_TYPE));
1961 }
1962 return (should_allocate);
1963}
1964static uint64_t
1965metaslab_weight(metaslab_t *msp)
1966{
1967 vdev_t *vd = msp->ms_group->mg_vd;
1968 spa_t *spa = vd->vdev_spa;
1969 uint64_t weight;
1970
1971 ASSERT(MUTEX_HELD(&msp->ms_lock));
1972
1973 /*
a1d477c2 1974 * If this vdev is in the process of being removed, there is nothing
4e21fd06
DB
1975 * for us to do here.
1976 */
a1d477c2 1977 if (vd->vdev_removing)
4e21fd06 1978 return (0);
4e21fd06
DB
1979
1980 metaslab_set_fragmentation(msp);
1981
1982 /*
1983 * Update the maximum size if the metaslab is loaded. This will
1984 * ensure that we get an accurate maximum size if newly freed space
1985 * has been added back into the free tree.
1986 */
1987 if (msp->ms_loaded)
1988 msp->ms_max_size = metaslab_block_maxsize(msp);
1989
1990 /*
1991 * Segment-based weighting requires space map histogram support.
1992 */
1993 if (zfs_metaslab_segment_weight_enabled &&
1994 spa_feature_is_enabled(spa, SPA_FEATURE_SPACEMAP_HISTOGRAM) &&
1995 (msp->ms_sm == NULL || msp->ms_sm->sm_dbuf->db_size ==
1996 sizeof (space_map_phys_t))) {
1997 weight = metaslab_segment_weight(msp);
1998 } else {
1999 weight = metaslab_space_weight(msp);
2000 }
93cf2076 2001 return (weight);
34dc7c2f
BB
2002}
2003
2004static int
492f64e9
PD
2005metaslab_activate_allocator(metaslab_group_t *mg, metaslab_t *msp,
2006 int allocator, uint64_t activation_weight)
2007{
2008 /*
2009 * If we're activating for the claim code, we don't want to actually
2010 * set the metaslab up for a specific allocator.
2011 */
2012 if (activation_weight == METASLAB_WEIGHT_CLAIM)
2013 return (0);
2014 metaslab_t **arr = (activation_weight == METASLAB_WEIGHT_PRIMARY ?
2015 mg->mg_primaries : mg->mg_secondaries);
2016
2017 ASSERT(MUTEX_HELD(&msp->ms_lock));
2018 mutex_enter(&mg->mg_lock);
2019 if (arr[allocator] != NULL) {
2020 mutex_exit(&mg->mg_lock);
2021 return (EEXIST);
2022 }
2023
2024 arr[allocator] = msp;
2025 ASSERT3S(msp->ms_allocator, ==, -1);
2026 msp->ms_allocator = allocator;
2027 msp->ms_primary = (activation_weight == METASLAB_WEIGHT_PRIMARY);
2028 mutex_exit(&mg->mg_lock);
2029
2030 return (0);
2031}
2032
2033static int
2034metaslab_activate(metaslab_t *msp, int allocator, uint64_t activation_weight)
34dc7c2f 2035{
34dc7c2f
BB
2036 ASSERT(MUTEX_HELD(&msp->ms_lock));
2037
2038 if ((msp->ms_weight & METASLAB_ACTIVE_MASK) == 0) {
492f64e9 2039 int error = 0;
93cf2076
GW
2040 metaslab_load_wait(msp);
2041 if (!msp->ms_loaded) {
492f64e9 2042 if ((error = metaslab_load(msp)) != 0) {
428870ff
BB
2043 metaslab_group_sort(msp->ms_group, msp, 0);
2044 return (error);
2045 }
34dc7c2f 2046 }
492f64e9
PD
2047 if ((msp->ms_weight & METASLAB_ACTIVE_MASK) != 0) {
2048 /*
2049 * The metaslab was activated for another allocator
2050 * while we were waiting, we should reselect.
2051 */
2052 return (EBUSY);
2053 }
2054 if ((error = metaslab_activate_allocator(msp->ms_group, msp,
2055 allocator, activation_weight)) != 0) {
2056 return (error);
2057 }
9babb374 2058
4e21fd06 2059 msp->ms_activation_weight = msp->ms_weight;
34dc7c2f
BB
2060 metaslab_group_sort(msp->ms_group, msp,
2061 msp->ms_weight | activation_weight);
2062 }
93cf2076 2063 ASSERT(msp->ms_loaded);
34dc7c2f
BB
2064 ASSERT(msp->ms_weight & METASLAB_ACTIVE_MASK);
2065
2066 return (0);
2067}
2068
492f64e9
PD
2069static void
2070metaslab_passivate_allocator(metaslab_group_t *mg, metaslab_t *msp,
2071 uint64_t weight)
2072{
2073 ASSERT(MUTEX_HELD(&msp->ms_lock));
2074 if (msp->ms_weight & METASLAB_WEIGHT_CLAIM) {
2075 metaslab_group_sort(mg, msp, weight);
2076 return;
2077 }
2078
2079 mutex_enter(&mg->mg_lock);
2080 ASSERT3P(msp->ms_group, ==, mg);
2081 if (msp->ms_primary) {
2082 ASSERT3U(0, <=, msp->ms_allocator);
2083 ASSERT3U(msp->ms_allocator, <, mg->mg_allocators);
2084 ASSERT3P(mg->mg_primaries[msp->ms_allocator], ==, msp);
2085 ASSERT(msp->ms_weight & METASLAB_WEIGHT_PRIMARY);
2086 mg->mg_primaries[msp->ms_allocator] = NULL;
2087 } else {
2088 ASSERT(msp->ms_weight & METASLAB_WEIGHT_SECONDARY);
2089 ASSERT3P(mg->mg_secondaries[msp->ms_allocator], ==, msp);
2090 mg->mg_secondaries[msp->ms_allocator] = NULL;
2091 }
2092 msp->ms_allocator = -1;
2093 metaslab_group_sort_impl(mg, msp, weight);
2094 mutex_exit(&mg->mg_lock);
2095}
2096
34dc7c2f 2097static void
4e21fd06 2098metaslab_passivate(metaslab_t *msp, uint64_t weight)
34dc7c2f 2099{
4e21fd06
DB
2100 ASSERTV(uint64_t size = weight & ~METASLAB_WEIGHT_TYPE);
2101
34dc7c2f
BB
2102 /*
2103 * If size < SPA_MINBLOCKSIZE, then we will not allocate from
2104 * this metaslab again. In that case, it had better be empty,
2105 * or we would be leaving space on the table.
2106 */
94d49e8f
TC
2107 ASSERT(!WEIGHT_IS_SPACEBASED(msp->ms_weight) ||
2108 size >= SPA_MINBLOCKSIZE ||
d2734cce 2109 range_tree_space(msp->ms_allocatable) == 0);
4e21fd06
DB
2110 ASSERT0(weight & METASLAB_ACTIVE_MASK);
2111
2112 msp->ms_activation_weight = 0;
492f64e9 2113 metaslab_passivate_allocator(msp->ms_group, msp, weight);
34dc7c2f
BB
2114 ASSERT((msp->ms_weight & METASLAB_ACTIVE_MASK) == 0);
2115}
2116
4e21fd06
DB
2117/*
2118 * Segment-based metaslabs are activated once and remain active until
2119 * we either fail an allocation attempt (similar to space-based metaslabs)
2120 * or have exhausted the free space in zfs_metaslab_switch_threshold
2121 * buckets since the metaslab was activated. This function checks to see
2122 * if we've exhaused the zfs_metaslab_switch_threshold buckets in the
2123 * metaslab and passivates it proactively. This will allow us to select a
2124 * metaslab with a larger contiguous region, if any, remaining within this
2125 * metaslab group. If we're in sync pass > 1, then we continue using this
2126 * metaslab so that we don't dirty more block and cause more sync passes.
2127 */
2128void
2129metaslab_segment_may_passivate(metaslab_t *msp)
2130{
2131 spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
4e21fd06
DB
2132
2133 if (WEIGHT_IS_SPACEBASED(msp->ms_weight) || spa_sync_pass(spa) > 1)
2134 return;
2135
2136 /*
2137 * Since we are in the middle of a sync pass, the most accurate
2138 * information that is accessible to us is the in-core range tree
2139 * histogram; calculate the new weight based on that information.
2140 */
1c27024e
DB
2141 uint64_t weight = metaslab_weight_from_range_tree(msp);
2142 int activation_idx = WEIGHT_GET_INDEX(msp->ms_activation_weight);
2143 int current_idx = WEIGHT_GET_INDEX(weight);
4e21fd06
DB
2144
2145 if (current_idx <= activation_idx - zfs_metaslab_switch_threshold)
2146 metaslab_passivate(msp, weight);
2147}
2148
93cf2076
GW
2149static void
2150metaslab_preload(void *arg)
2151{
2152 metaslab_t *msp = arg;
2153 spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
1cd77734 2154 fstrans_cookie_t cookie = spl_fstrans_mark();
93cf2076 2155
080b3100
GW
2156 ASSERT(!MUTEX_HELD(&msp->ms_group->mg_lock));
2157
93cf2076
GW
2158 mutex_enter(&msp->ms_lock);
2159 metaslab_load_wait(msp);
2160 if (!msp->ms_loaded)
2161 (void) metaslab_load(msp);
4e21fd06 2162 msp->ms_selected_txg = spa_syncing_txg(spa);
93cf2076 2163 mutex_exit(&msp->ms_lock);
1cd77734 2164 spl_fstrans_unmark(cookie);
93cf2076
GW
2165}
2166
2167static void
2168metaslab_group_preload(metaslab_group_t *mg)
2169{
2170 spa_t *spa = mg->mg_vd->vdev_spa;
2171 metaslab_t *msp;
2172 avl_tree_t *t = &mg->mg_metaslab_tree;
2173 int m = 0;
2174
2175 if (spa_shutting_down(spa) || !metaslab_preload_enabled) {
c5528b9b 2176 taskq_wait_outstanding(mg->mg_taskq, 0);
93cf2076
GW
2177 return;
2178 }
93cf2076 2179
080b3100 2180 mutex_enter(&mg->mg_lock);
a1d477c2 2181
93cf2076 2182 /*
080b3100 2183 * Load the next potential metaslabs
93cf2076 2184 */
4e21fd06 2185 for (msp = avl_first(t); msp != NULL; msp = AVL_NEXT(t, msp)) {
a1d477c2
MA
2186 ASSERT3P(msp->ms_group, ==, mg);
2187
f3a7f661
GW
2188 /*
2189 * We preload only the maximum number of metaslabs specified
2190 * by metaslab_preload_limit. If a metaslab is being forced
2191 * to condense then we preload it too. This will ensure
2192 * that force condensing happens in the next txg.
2193 */
2194 if (++m > metaslab_preload_limit && !msp->ms_condense_wanted) {
f3a7f661
GW
2195 continue;
2196 }
93cf2076
GW
2197
2198 VERIFY(taskq_dispatch(mg->mg_taskq, metaslab_preload,
48d3eb40 2199 msp, TQ_SLEEP) != TASKQID_INVALID);
93cf2076
GW
2200 }
2201 mutex_exit(&mg->mg_lock);
2202}
2203
e51be066 2204/*
93cf2076
GW
2205 * Determine if the space map's on-disk footprint is past our tolerance
2206 * for inefficiency. We would like to use the following criteria to make
2207 * our decision:
e51be066
GW
2208 *
2209 * 1. The size of the space map object should not dramatically increase as a
93cf2076 2210 * result of writing out the free space range tree.
e51be066
GW
2211 *
2212 * 2. The minimal on-disk space map representation is zfs_condense_pct/100
93cf2076 2213 * times the size than the free space range tree representation
a1d477c2 2214 * (i.e. zfs_condense_pct = 110 and in-core = 1MB, minimal = 1.1MB).
e51be066 2215 *
b02fe35d
AR
2216 * 3. The on-disk size of the space map should actually decrease.
2217 *
b02fe35d
AR
2218 * Unfortunately, we cannot compute the on-disk size of the space map in this
2219 * context because we cannot accurately compute the effects of compression, etc.
2220 * Instead, we apply the heuristic described in the block comment for
2221 * zfs_metaslab_condense_block_threshold - we only condense if the space used
2222 * is greater than a threshold number of blocks.
e51be066
GW
2223 */
2224static boolean_t
2225metaslab_should_condense(metaslab_t *msp)
2226{
93cf2076 2227 space_map_t *sm = msp->ms_sm;
d2734cce
SD
2228 vdev_t *vd = msp->ms_group->mg_vd;
2229 uint64_t vdev_blocksize = 1 << vd->vdev_ashift;
2230 uint64_t current_txg = spa_syncing_txg(vd->vdev_spa);
e51be066
GW
2231
2232 ASSERT(MUTEX_HELD(&msp->ms_lock));
93cf2076 2233 ASSERT(msp->ms_loaded);
e51be066
GW
2234
2235 /*
d2734cce
SD
2236 * Allocations and frees in early passes are generally more space
2237 * efficient (in terms of blocks described in space map entries)
2238 * than the ones in later passes (e.g. we don't compress after
2239 * sync pass 5) and condensing a metaslab multiple times in a txg
2240 * could degrade performance.
2241 *
2242 * Thus we prefer condensing each metaslab at most once every txg at
2243 * the earliest sync pass possible. If a metaslab is eligible for
2244 * condensing again after being considered for condensing within the
2245 * same txg, it will hopefully be dirty in the next txg where it will
2246 * be condensed at an earlier pass.
2247 */
2248 if (msp->ms_condense_checked_txg == current_txg)
2249 return (B_FALSE);
2250 msp->ms_condense_checked_txg = current_txg;
2251
2252 /*
4d044c4c
SD
2253 * We always condense metaslabs that are empty and metaslabs for
2254 * which a condense request has been made.
e51be066 2255 */
4d044c4c
SD
2256 if (avl_is_empty(&msp->ms_allocatable_by_size) ||
2257 msp->ms_condense_wanted)
e51be066
GW
2258 return (B_TRUE);
2259
4d044c4c
SD
2260 uint64_t object_size = space_map_length(msp->ms_sm);
2261 uint64_t optimal_size = space_map_estimate_optimal_size(sm,
2262 msp->ms_allocatable, SM_NO_VDEVID);
b02fe35d 2263
4d044c4c 2264 dmu_object_info_t doi;
b02fe35d 2265 dmu_object_info_from_db(sm->sm_dbuf, &doi);
4d044c4c 2266 uint64_t record_size = MAX(doi.doi_data_block_size, vdev_blocksize);
b02fe35d 2267
4d044c4c 2268 return (object_size >= (optimal_size * zfs_condense_pct / 100) &&
b02fe35d 2269 object_size > zfs_metaslab_condense_block_threshold * record_size);
e51be066
GW
2270}
2271
2272/*
2273 * Condense the on-disk space map representation to its minimized form.
2274 * The minimized form consists of a small number of allocations followed by
93cf2076 2275 * the entries of the free range tree.
e51be066
GW
2276 */
2277static void
2278metaslab_condense(metaslab_t *msp, uint64_t txg, dmu_tx_t *tx)
2279{
93cf2076
GW
2280 range_tree_t *condense_tree;
2281 space_map_t *sm = msp->ms_sm;
e51be066
GW
2282
2283 ASSERT(MUTEX_HELD(&msp->ms_lock));
93cf2076 2284 ASSERT(msp->ms_loaded);
e51be066 2285
f3a7f661 2286
964c2d69 2287 zfs_dbgmsg("condensing: txg %llu, msp[%llu] %p, vdev id %llu, "
5f3d9c69
JS
2288 "spa %s, smp size %llu, segments %lu, forcing condense=%s", txg,
2289 msp->ms_id, msp, msp->ms_group->mg_vd->vdev_id,
2290 msp->ms_group->mg_vd->vdev_spa->spa_name,
d2734cce
SD
2291 space_map_length(msp->ms_sm),
2292 avl_numnodes(&msp->ms_allocatable->rt_root),
f3a7f661
GW
2293 msp->ms_condense_wanted ? "TRUE" : "FALSE");
2294
2295 msp->ms_condense_wanted = B_FALSE;
e51be066
GW
2296
2297 /*
93cf2076 2298 * Create an range tree that is 100% allocated. We remove segments
e51be066
GW
2299 * that have been freed in this txg, any deferred frees that exist,
2300 * and any allocation in the future. Removing segments should be
93cf2076
GW
2301 * a relatively inexpensive operation since we expect these trees to
2302 * have a small number of nodes.
e51be066 2303 */
a1d477c2 2304 condense_tree = range_tree_create(NULL, NULL);
93cf2076 2305 range_tree_add(condense_tree, msp->ms_start, msp->ms_size);
e51be066 2306
d2734cce
SD
2307 range_tree_walk(msp->ms_freeing, range_tree_remove, condense_tree);
2308 range_tree_walk(msp->ms_freed, range_tree_remove, condense_tree);
e51be066 2309
1c27024e 2310 for (int t = 0; t < TXG_DEFER_SIZE; t++) {
d2734cce 2311 range_tree_walk(msp->ms_defer[t],
93cf2076
GW
2312 range_tree_remove, condense_tree);
2313 }
e51be066 2314
1c27024e 2315 for (int t = 1; t < TXG_CONCURRENT_STATES; t++) {
d2734cce 2316 range_tree_walk(msp->ms_allocating[(txg + t) & TXG_MASK],
93cf2076
GW
2317 range_tree_remove, condense_tree);
2318 }
e51be066
GW
2319
2320 /*
2321 * We're about to drop the metaslab's lock thus allowing
2322 * other consumers to change it's content. Set the
93cf2076 2323 * metaslab's ms_condensing flag to ensure that
e51be066
GW
2324 * allocations on this metaslab do not occur while we're
2325 * in the middle of committing it to disk. This is only critical
d2734cce 2326 * for ms_allocatable as all other range trees use per txg
e51be066
GW
2327 * views of their content.
2328 */
93cf2076 2329 msp->ms_condensing = B_TRUE;
e51be066
GW
2330
2331 mutex_exit(&msp->ms_lock);
d2734cce 2332 space_map_truncate(sm, zfs_metaslab_sm_blksz, tx);
e51be066
GW
2333
2334 /*
4e21fd06 2335 * While we would ideally like to create a space map representation
e51be066 2336 * that consists only of allocation records, doing so can be
93cf2076 2337 * prohibitively expensive because the in-core free tree can be
e51be066 2338 * large, and therefore computationally expensive to subtract
93cf2076
GW
2339 * from the condense_tree. Instead we sync out two trees, a cheap
2340 * allocation only tree followed by the in-core free tree. While not
e51be066
GW
2341 * optimal, this is typically close to optimal, and much cheaper to
2342 * compute.
2343 */
4d044c4c 2344 space_map_write(sm, condense_tree, SM_ALLOC, SM_NO_VDEVID, tx);
93cf2076
GW
2345 range_tree_vacate(condense_tree, NULL, NULL);
2346 range_tree_destroy(condense_tree);
e51be066 2347
4d044c4c 2348 space_map_write(sm, msp->ms_allocatable, SM_FREE, SM_NO_VDEVID, tx);
a1d477c2 2349 mutex_enter(&msp->ms_lock);
93cf2076 2350 msp->ms_condensing = B_FALSE;
e51be066
GW
2351}
2352
34dc7c2f
BB
2353/*
2354 * Write a metaslab to disk in the context of the specified transaction group.
2355 */
2356void
2357metaslab_sync(metaslab_t *msp, uint64_t txg)
2358{
93cf2076
GW
2359 metaslab_group_t *mg = msp->ms_group;
2360 vdev_t *vd = mg->mg_vd;
34dc7c2f 2361 spa_t *spa = vd->vdev_spa;
428870ff 2362 objset_t *mos = spa_meta_objset(spa);
d2734cce 2363 range_tree_t *alloctree = msp->ms_allocating[txg & TXG_MASK];
34dc7c2f 2364 dmu_tx_t *tx;
93cf2076 2365 uint64_t object = space_map_object(msp->ms_sm);
34dc7c2f 2366
428870ff
BB
2367 ASSERT(!vd->vdev_ishole);
2368
e51be066
GW
2369 /*
2370 * This metaslab has just been added so there's no work to do now.
2371 */
d2734cce 2372 if (msp->ms_freeing == NULL) {
93cf2076 2373 ASSERT3P(alloctree, ==, NULL);
e51be066
GW
2374 return;
2375 }
2376
93cf2076 2377 ASSERT3P(alloctree, !=, NULL);
d2734cce
SD
2378 ASSERT3P(msp->ms_freeing, !=, NULL);
2379 ASSERT3P(msp->ms_freed, !=, NULL);
2380 ASSERT3P(msp->ms_checkpointing, !=, NULL);
e51be066 2381
f3a7f661 2382 /*
d2734cce
SD
2383 * Normally, we don't want to process a metaslab if there are no
2384 * allocations or frees to perform. However, if the metaslab is being
2385 * forced to condense and it's loaded, we need to let it through.
f3a7f661 2386 */
d2734cce
SD
2387 if (range_tree_is_empty(alloctree) &&
2388 range_tree_is_empty(msp->ms_freeing) &&
2389 range_tree_is_empty(msp->ms_checkpointing) &&
3b7f360c 2390 !(msp->ms_loaded && msp->ms_condense_wanted))
428870ff 2391 return;
34dc7c2f 2392
3b7f360c
GW
2393
2394 VERIFY(txg <= spa_final_dirty_txg(spa));
2395
34dc7c2f
BB
2396 /*
2397 * The only state that can actually be changing concurrently with
d2734cce
SD
2398 * metaslab_sync() is the metaslab's ms_allocatable. No other
2399 * thread can be modifying this txg's alloc, freeing,
2400 * freed, or space_map_phys_t. We drop ms_lock whenever we
2401 * could call into the DMU, because the DMU can call down to us
a1d477c2
MA
2402 * (e.g. via zio_free()) at any time.
2403 *
2404 * The spa_vdev_remove_thread() can be reading metaslab state
2405 * concurrently, and it is locked out by the ms_sync_lock. Note
2406 * that the ms_lock is insufficient for this, because it is dropped
2407 * by space_map_write().
34dc7c2f 2408 */
428870ff 2409 tx = dmu_tx_create_assigned(spa_get_dsl(spa), txg);
34dc7c2f 2410
93cf2076
GW
2411 if (msp->ms_sm == NULL) {
2412 uint64_t new_object;
2413
d2734cce 2414 new_object = space_map_alloc(mos, zfs_metaslab_sm_blksz, tx);
93cf2076
GW
2415 VERIFY3U(new_object, !=, 0);
2416
2417 VERIFY0(space_map_open(&msp->ms_sm, mos, new_object,
a1d477c2 2418 msp->ms_start, msp->ms_size, vd->vdev_ashift));
93cf2076 2419 ASSERT(msp->ms_sm != NULL);
34dc7c2f
BB
2420 }
2421
d2734cce
SD
2422 if (!range_tree_is_empty(msp->ms_checkpointing) &&
2423 vd->vdev_checkpoint_sm == NULL) {
2424 ASSERT(spa_has_checkpoint(spa));
2425
2426 uint64_t new_object = space_map_alloc(mos,
2427 vdev_standard_sm_blksz, tx);
2428 VERIFY3U(new_object, !=, 0);
2429
2430 VERIFY0(space_map_open(&vd->vdev_checkpoint_sm,
2431 mos, new_object, 0, vd->vdev_asize, vd->vdev_ashift));
2432 ASSERT3P(vd->vdev_checkpoint_sm, !=, NULL);
2433
2434 /*
2435 * We save the space map object as an entry in vdev_top_zap
2436 * so it can be retrieved when the pool is reopened after an
2437 * export or through zdb.
2438 */
2439 VERIFY0(zap_add(vd->vdev_spa->spa_meta_objset,
2440 vd->vdev_top_zap, VDEV_TOP_ZAP_POOL_CHECKPOINT_SM,
2441 sizeof (new_object), 1, &new_object, tx));
2442 }
2443
a1d477c2 2444 mutex_enter(&msp->ms_sync_lock);
428870ff
BB
2445 mutex_enter(&msp->ms_lock);
2446
96358617 2447 /*
4e21fd06
DB
2448 * Note: metaslab_condense() clears the space map's histogram.
2449 * Therefore we must verify and remove this histogram before
96358617
MA
2450 * condensing.
2451 */
2452 metaslab_group_histogram_verify(mg);
2453 metaslab_class_histogram_verify(mg->mg_class);
2454 metaslab_group_histogram_remove(mg, msp);
2455
d2734cce 2456 if (msp->ms_loaded && metaslab_should_condense(msp)) {
e51be066
GW
2457 metaslab_condense(msp, txg, tx);
2458 } else {
a1d477c2 2459 mutex_exit(&msp->ms_lock);
4d044c4c
SD
2460 space_map_write(msp->ms_sm, alloctree, SM_ALLOC,
2461 SM_NO_VDEVID, tx);
2462 space_map_write(msp->ms_sm, msp->ms_freeing, SM_FREE,
2463 SM_NO_VDEVID, tx);
a1d477c2 2464 mutex_enter(&msp->ms_lock);
e51be066 2465 }
428870ff 2466
d2734cce
SD
2467 if (!range_tree_is_empty(msp->ms_checkpointing)) {
2468 ASSERT(spa_has_checkpoint(spa));
2469 ASSERT3P(vd->vdev_checkpoint_sm, !=, NULL);
2470
2471 /*
2472 * Since we are doing writes to disk and the ms_checkpointing
2473 * tree won't be changing during that time, we drop the
2474 * ms_lock while writing to the checkpoint space map.
2475 */
2476 mutex_exit(&msp->ms_lock);
2477 space_map_write(vd->vdev_checkpoint_sm,
4d044c4c 2478 msp->ms_checkpointing, SM_FREE, SM_NO_VDEVID, tx);
d2734cce
SD
2479 mutex_enter(&msp->ms_lock);
2480 space_map_update(vd->vdev_checkpoint_sm);
2481
2482 spa->spa_checkpoint_info.sci_dspace +=
2483 range_tree_space(msp->ms_checkpointing);
2484 vd->vdev_stat.vs_checkpoint_space +=
2485 range_tree_space(msp->ms_checkpointing);
2486 ASSERT3U(vd->vdev_stat.vs_checkpoint_space, ==,
2487 -vd->vdev_checkpoint_sm->sm_alloc);
2488
2489 range_tree_vacate(msp->ms_checkpointing, NULL, NULL);
2490 }
2491
93cf2076
GW
2492 if (msp->ms_loaded) {
2493 /*
a1d477c2 2494 * When the space map is loaded, we have an accurate
93cf2076
GW
2495 * histogram in the range tree. This gives us an opportunity
2496 * to bring the space map's histogram up-to-date so we clear
2497 * it first before updating it.
2498 */
2499 space_map_histogram_clear(msp->ms_sm);
d2734cce 2500 space_map_histogram_add(msp->ms_sm, msp->ms_allocatable, tx);
4e21fd06
DB
2501
2502 /*
2503 * Since we've cleared the histogram we need to add back
2504 * any free space that has already been processed, plus
2505 * any deferred space. This allows the on-disk histogram
2506 * to accurately reflect all free space even if some space
2507 * is not yet available for allocation (i.e. deferred).
2508 */
d2734cce 2509 space_map_histogram_add(msp->ms_sm, msp->ms_freed, tx);
4e21fd06 2510
93cf2076 2511 /*
4e21fd06
DB
2512 * Add back any deferred free space that has not been
2513 * added back into the in-core free tree yet. This will
2514 * ensure that we don't end up with a space map histogram
2515 * that is completely empty unless the metaslab is fully
2516 * allocated.
93cf2076 2517 */
1c27024e 2518 for (int t = 0; t < TXG_DEFER_SIZE; t++) {
4e21fd06 2519 space_map_histogram_add(msp->ms_sm,
d2734cce 2520 msp->ms_defer[t], tx);
4e21fd06 2521 }
93cf2076 2522 }
4e21fd06
DB
2523
2524 /*
2525 * Always add the free space from this sync pass to the space
2526 * map histogram. We want to make sure that the on-disk histogram
2527 * accounts for all free space. If the space map is not loaded,
2528 * then we will lose some accuracy but will correct it the next
2529 * time we load the space map.
2530 */
d2734cce 2531 space_map_histogram_add(msp->ms_sm, msp->ms_freeing, tx);
4e21fd06 2532
f3a7f661
GW
2533 metaslab_group_histogram_add(mg, msp);
2534 metaslab_group_histogram_verify(mg);
2535 metaslab_class_histogram_verify(mg->mg_class);
34dc7c2f 2536
e51be066 2537 /*
93cf2076 2538 * For sync pass 1, we avoid traversing this txg's free range tree
d2734cce
SD
2539 * and instead will just swap the pointers for freeing and
2540 * freed. We can safely do this since the freed_tree is
e51be066
GW
2541 * guaranteed to be empty on the initial pass.
2542 */
2543 if (spa_sync_pass(spa) == 1) {
d2734cce 2544 range_tree_swap(&msp->ms_freeing, &msp->ms_freed);
e51be066 2545 } else {
d2734cce
SD
2546 range_tree_vacate(msp->ms_freeing,
2547 range_tree_add, msp->ms_freed);
34dc7c2f 2548 }
f3a7f661 2549 range_tree_vacate(alloctree, NULL, NULL);
34dc7c2f 2550
d2734cce
SD
2551 ASSERT0(range_tree_space(msp->ms_allocating[txg & TXG_MASK]));
2552 ASSERT0(range_tree_space(msp->ms_allocating[TXG_CLEAN(txg)
2553 & TXG_MASK]));
2554 ASSERT0(range_tree_space(msp->ms_freeing));
2555 ASSERT0(range_tree_space(msp->ms_checkpointing));
34dc7c2f
BB
2556
2557 mutex_exit(&msp->ms_lock);
2558
93cf2076
GW
2559 if (object != space_map_object(msp->ms_sm)) {
2560 object = space_map_object(msp->ms_sm);
2561 dmu_write(mos, vd->vdev_ms_array, sizeof (uint64_t) *
2562 msp->ms_id, sizeof (uint64_t), &object, tx);
2563 }
a1d477c2 2564 mutex_exit(&msp->ms_sync_lock);
34dc7c2f
BB
2565 dmu_tx_commit(tx);
2566}
2567
2568/*
2569 * Called after a transaction group has completely synced to mark
2570 * all of the metaslab's free space as usable.
2571 */
2572void
2573metaslab_sync_done(metaslab_t *msp, uint64_t txg)
2574{
34dc7c2f
BB
2575 metaslab_group_t *mg = msp->ms_group;
2576 vdev_t *vd = mg->mg_vd;
4e21fd06 2577 spa_t *spa = vd->vdev_spa;
93cf2076 2578 range_tree_t **defer_tree;
428870ff 2579 int64_t alloc_delta, defer_delta;
4e21fd06 2580 boolean_t defer_allowed = B_TRUE;
428870ff
BB
2581
2582 ASSERT(!vd->vdev_ishole);
34dc7c2f
BB
2583
2584 mutex_enter(&msp->ms_lock);
2585
2586 /*
2587 * If this metaslab is just becoming available, initialize its
258553d3 2588 * range trees and add its capacity to the vdev.
34dc7c2f 2589 */
d2734cce 2590 if (msp->ms_freed == NULL) {
1c27024e 2591 for (int t = 0; t < TXG_SIZE; t++) {
d2734cce 2592 ASSERT(msp->ms_allocating[t] == NULL);
93cf2076 2593
d2734cce 2594 msp->ms_allocating[t] = range_tree_create(NULL, NULL);
34dc7c2f 2595 }
428870ff 2596
d2734cce
SD
2597 ASSERT3P(msp->ms_freeing, ==, NULL);
2598 msp->ms_freeing = range_tree_create(NULL, NULL);
258553d3 2599
d2734cce
SD
2600 ASSERT3P(msp->ms_freed, ==, NULL);
2601 msp->ms_freed = range_tree_create(NULL, NULL);
258553d3 2602
1c27024e 2603 for (int t = 0; t < TXG_DEFER_SIZE; t++) {
d2734cce 2604 ASSERT(msp->ms_defer[t] == NULL);
e51be066 2605
d2734cce 2606 msp->ms_defer[t] = range_tree_create(NULL, NULL);
93cf2076 2607 }
428870ff 2608
d2734cce
SD
2609 ASSERT3P(msp->ms_checkpointing, ==, NULL);
2610 msp->ms_checkpointing = range_tree_create(NULL, NULL);
2611
cc99f275 2612 metaslab_space_update(vd, mg->mg_class, 0, 0, msp->ms_size);
34dc7c2f 2613 }
d2734cce
SD
2614 ASSERT0(range_tree_space(msp->ms_freeing));
2615 ASSERT0(range_tree_space(msp->ms_checkpointing));
34dc7c2f 2616
d2734cce 2617 defer_tree = &msp->ms_defer[txg % TXG_DEFER_SIZE];
93cf2076 2618
1c27024e 2619 uint64_t free_space = metaslab_class_get_space(spa_normal_class(spa)) -
4e21fd06 2620 metaslab_class_get_alloc(spa_normal_class(spa));
a1d477c2 2621 if (free_space <= spa_get_slop_space(spa) || vd->vdev_removing) {
4e21fd06
DB
2622 defer_allowed = B_FALSE;
2623 }
2624
2625 defer_delta = 0;
93cf2076 2626 alloc_delta = space_map_alloc_delta(msp->ms_sm);
4e21fd06 2627 if (defer_allowed) {
d2734cce 2628 defer_delta = range_tree_space(msp->ms_freed) -
4e21fd06
DB
2629 range_tree_space(*defer_tree);
2630 } else {
2631 defer_delta -= range_tree_space(*defer_tree);
2632 }
428870ff 2633
cc99f275
DB
2634 metaslab_space_update(vd, mg->mg_class, alloc_delta + defer_delta,
2635 defer_delta, 0);
34dc7c2f 2636
34dc7c2f 2637 /*
93cf2076 2638 * If there's a metaslab_load() in progress, wait for it to complete
34dc7c2f 2639 * so that we have a consistent view of the in-core space map.
34dc7c2f 2640 */
93cf2076 2641 metaslab_load_wait(msp);
c2e42f9d
GW
2642
2643 /*
93cf2076 2644 * Move the frees from the defer_tree back to the free
d2734cce
SD
2645 * range tree (if it's loaded). Swap the freed_tree and
2646 * the defer_tree -- this is safe to do because we've
2647 * just emptied out the defer_tree.
c2e42f9d 2648 */
93cf2076 2649 range_tree_vacate(*defer_tree,
d2734cce 2650 msp->ms_loaded ? range_tree_add : NULL, msp->ms_allocatable);
4e21fd06 2651 if (defer_allowed) {
d2734cce 2652 range_tree_swap(&msp->ms_freed, defer_tree);
4e21fd06 2653 } else {
d2734cce
SD
2654 range_tree_vacate(msp->ms_freed,
2655 msp->ms_loaded ? range_tree_add : NULL,
2656 msp->ms_allocatable);
4e21fd06 2657 }
93cf2076 2658 space_map_update(msp->ms_sm);
34dc7c2f 2659
428870ff
BB
2660 msp->ms_deferspace += defer_delta;
2661 ASSERT3S(msp->ms_deferspace, >=, 0);
93cf2076 2662 ASSERT3S(msp->ms_deferspace, <=, msp->ms_size);
428870ff
BB
2663 if (msp->ms_deferspace != 0) {
2664 /*
2665 * Keep syncing this metaslab until all deferred frees
2666 * are back in circulation.
2667 */
2668 vdev_dirty(vd, VDD_METASLAB, msp, txg + 1);
2669 }
2670
492f64e9
PD
2671 if (msp->ms_new) {
2672 msp->ms_new = B_FALSE;
2673 mutex_enter(&mg->mg_lock);
2674 mg->mg_ms_ready++;
2675 mutex_exit(&mg->mg_lock);
2676 }
4e21fd06
DB
2677 /*
2678 * Calculate the new weights before unloading any metaslabs.
2679 * This will give us the most accurate weighting.
2680 */
492f64e9
PD
2681 metaslab_group_sort(mg, msp, metaslab_weight(msp) |
2682 (msp->ms_weight & METASLAB_ACTIVE_MASK));
4e21fd06
DB
2683
2684 /*
2685 * If the metaslab is loaded and we've not tried to load or allocate
2686 * from it in 'metaslab_unload_delay' txgs, then unload it.
2687 */
2688 if (msp->ms_loaded &&
2689 msp->ms_selected_txg + metaslab_unload_delay < txg) {
2690
1c27024e 2691 for (int t = 1; t < TXG_CONCURRENT_STATES; t++) {
93cf2076 2692 VERIFY0(range_tree_space(
d2734cce 2693 msp->ms_allocating[(txg + t) & TXG_MASK]));
93cf2076 2694 }
492f64e9
PD
2695 if (msp->ms_allocator != -1) {
2696 metaslab_passivate(msp, msp->ms_weight &
2697 ~METASLAB_ACTIVE_MASK);
2698 }
34dc7c2f 2699
93cf2076
GW
2700 if (!metaslab_debug_unload)
2701 metaslab_unload(msp);
34dc7c2f
BB
2702 }
2703
d2734cce
SD
2704 ASSERT0(range_tree_space(msp->ms_allocating[txg & TXG_MASK]));
2705 ASSERT0(range_tree_space(msp->ms_freeing));
2706 ASSERT0(range_tree_space(msp->ms_freed));
2707 ASSERT0(range_tree_space(msp->ms_checkpointing));
a1d477c2 2708
34dc7c2f
BB
2709 mutex_exit(&msp->ms_lock);
2710}
2711
428870ff
BB
2712void
2713metaslab_sync_reassess(metaslab_group_t *mg)
2714{
a1d477c2
MA
2715 spa_t *spa = mg->mg_class->mc_spa;
2716
2717 spa_config_enter(spa, SCL_ALLOC, FTAG, RW_READER);
1be627f5 2718 metaslab_group_alloc_update(mg);
f3a7f661 2719 mg->mg_fragmentation = metaslab_group_fragmentation(mg);
6d974228 2720
428870ff 2721 /*
a1d477c2
MA
2722 * Preload the next potential metaslabs but only on active
2723 * metaslab groups. We can get into a state where the metaslab
2724 * is no longer active since we dirty metaslabs as we remove a
2725 * a device, thus potentially making the metaslab group eligible
2726 * for preloading.
428870ff 2727 */
a1d477c2
MA
2728 if (mg->mg_activation_count > 0) {
2729 metaslab_group_preload(mg);
2730 }
2731 spa_config_exit(spa, SCL_ALLOC, FTAG);
428870ff
BB
2732}
2733
cc99f275
DB
2734/*
2735 * When writing a ditto block (i.e. more than one DVA for a given BP) on
2736 * the same vdev as an existing DVA of this BP, then try to allocate it
2737 * on a different metaslab than existing DVAs (i.e. a unique metaslab).
2738 */
2739static boolean_t
2740metaslab_is_unique(metaslab_t *msp, dva_t *dva)
34dc7c2f 2741{
cc99f275
DB
2742 uint64_t dva_ms_id;
2743
2744 if (DVA_GET_ASIZE(dva) == 0)
2745 return (B_TRUE);
34dc7c2f
BB
2746
2747 if (msp->ms_group->mg_vd->vdev_id != DVA_GET_VDEV(dva))
cc99f275 2748 return (B_TRUE);
34dc7c2f 2749
cc99f275
DB
2750 dva_ms_id = DVA_GET_OFFSET(dva) >> msp->ms_group->mg_vd->vdev_ms_shift;
2751
2752 return (msp->ms_id != dva_ms_id);
34dc7c2f
BB
2753}
2754
4e21fd06
DB
2755/*
2756 * ==========================================================================
2757 * Metaslab allocation tracing facility
2758 * ==========================================================================
2759 */
2760#ifdef _METASLAB_TRACING
2761kstat_t *metaslab_trace_ksp;
2762kstat_named_t metaslab_trace_over_limit;
2763
2764void
2765metaslab_alloc_trace_init(void)
2766{
2767 ASSERT(metaslab_alloc_trace_cache == NULL);
2768 metaslab_alloc_trace_cache = kmem_cache_create(
2769 "metaslab_alloc_trace_cache", sizeof (metaslab_alloc_trace_t),
2770 0, NULL, NULL, NULL, NULL, NULL, 0);
2771 metaslab_trace_ksp = kstat_create("zfs", 0, "metaslab_trace_stats",
2772 "misc", KSTAT_TYPE_NAMED, 1, KSTAT_FLAG_VIRTUAL);
2773 if (metaslab_trace_ksp != NULL) {
2774 metaslab_trace_ksp->ks_data = &metaslab_trace_over_limit;
2775 kstat_named_init(&metaslab_trace_over_limit,
2776 "metaslab_trace_over_limit", KSTAT_DATA_UINT64);
2777 kstat_install(metaslab_trace_ksp);
2778 }
2779}
2780
2781void
2782metaslab_alloc_trace_fini(void)
2783{
2784 if (metaslab_trace_ksp != NULL) {
2785 kstat_delete(metaslab_trace_ksp);
2786 metaslab_trace_ksp = NULL;
2787 }
2788 kmem_cache_destroy(metaslab_alloc_trace_cache);
2789 metaslab_alloc_trace_cache = NULL;
2790}
2791
2792/*
2793 * Add an allocation trace element to the allocation tracing list.
2794 */
2795static void
2796metaslab_trace_add(zio_alloc_list_t *zal, metaslab_group_t *mg,
492f64e9
PD
2797 metaslab_t *msp, uint64_t psize, uint32_t dva_id, uint64_t offset,
2798 int allocator)
4e21fd06
DB
2799{
2800 metaslab_alloc_trace_t *mat;
2801
2802 if (!metaslab_trace_enabled)
2803 return;
2804
2805 /*
2806 * When the tracing list reaches its maximum we remove
2807 * the second element in the list before adding a new one.
2808 * By removing the second element we preserve the original
2809 * entry as a clue to what allocations steps have already been
2810 * performed.
2811 */
2812 if (zal->zal_size == metaslab_trace_max_entries) {
2813 metaslab_alloc_trace_t *mat_next;
2814#ifdef DEBUG
2815 panic("too many entries in allocation list");
2816#endif
2817 atomic_inc_64(&metaslab_trace_over_limit.value.ui64);
2818 zal->zal_size--;
2819 mat_next = list_next(&zal->zal_list, list_head(&zal->zal_list));
2820 list_remove(&zal->zal_list, mat_next);
2821 kmem_cache_free(metaslab_alloc_trace_cache, mat_next);
2822 }
2823
2824 mat = kmem_cache_alloc(metaslab_alloc_trace_cache, KM_SLEEP);
2825 list_link_init(&mat->mat_list_node);
2826 mat->mat_mg = mg;
2827 mat->mat_msp = msp;
2828 mat->mat_size = psize;
2829 mat->mat_dva_id = dva_id;
2830 mat->mat_offset = offset;
2831 mat->mat_weight = 0;
492f64e9 2832 mat->mat_allocator = allocator;
4e21fd06
DB
2833
2834 if (msp != NULL)
2835 mat->mat_weight = msp->ms_weight;
2836
2837 /*
2838 * The list is part of the zio so locking is not required. Only
2839 * a single thread will perform allocations for a given zio.
2840 */
2841 list_insert_tail(&zal->zal_list, mat);
2842 zal->zal_size++;
2843
2844 ASSERT3U(zal->zal_size, <=, metaslab_trace_max_entries);
2845}
2846
2847void
2848metaslab_trace_init(zio_alloc_list_t *zal)
2849{
2850 list_create(&zal->zal_list, sizeof (metaslab_alloc_trace_t),
2851 offsetof(metaslab_alloc_trace_t, mat_list_node));
2852 zal->zal_size = 0;
2853}
2854
2855void
2856metaslab_trace_fini(zio_alloc_list_t *zal)
2857{
2858 metaslab_alloc_trace_t *mat;
2859
2860 while ((mat = list_remove_head(&zal->zal_list)) != NULL)
2861 kmem_cache_free(metaslab_alloc_trace_cache, mat);
2862 list_destroy(&zal->zal_list);
2863 zal->zal_size = 0;
2864}
2865#else
2866
492f64e9 2867#define metaslab_trace_add(zal, mg, msp, psize, id, off, alloc)
4e21fd06
DB
2868
2869void
2870metaslab_alloc_trace_init(void)
2871{
2872}
2873
2874void
2875metaslab_alloc_trace_fini(void)
2876{
2877}
2878
2879void
2880metaslab_trace_init(zio_alloc_list_t *zal)
2881{
2882}
2883
2884void
2885metaslab_trace_fini(zio_alloc_list_t *zal)
2886{
2887}
2888
2889#endif /* _METASLAB_TRACING */
2890
3dfb57a3
DB
2891/*
2892 * ==========================================================================
2893 * Metaslab block operations
2894 * ==========================================================================
2895 */
2896
2897static void
492f64e9
PD
2898metaslab_group_alloc_increment(spa_t *spa, uint64_t vdev, void *tag, int flags,
2899 int allocator)
3dfb57a3 2900{
3dfb57a3 2901 if (!(flags & METASLAB_ASYNC_ALLOC) ||
492f64e9 2902 (flags & METASLAB_DONT_THROTTLE))
3dfb57a3
DB
2903 return;
2904
1c27024e 2905 metaslab_group_t *mg = vdev_lookup_top(spa, vdev)->vdev_mg;
3dfb57a3
DB
2906 if (!mg->mg_class->mc_alloc_throttle_enabled)
2907 return;
2908
c13060e4 2909 (void) zfs_refcount_add(&mg->mg_alloc_queue_depth[allocator], tag);
492f64e9
PD
2910}
2911
2912static void
2913metaslab_group_increment_qdepth(metaslab_group_t *mg, int allocator)
2914{
2915 uint64_t max = mg->mg_max_alloc_queue_depth;
2916 uint64_t cur = mg->mg_cur_max_alloc_queue_depth[allocator];
2917 while (cur < max) {
2918 if (atomic_cas_64(&mg->mg_cur_max_alloc_queue_depth[allocator],
2919 cur, cur + 1) == cur) {
2920 atomic_inc_64(
2921 &mg->mg_class->mc_alloc_max_slots[allocator]);
2922 return;
2923 }
2924 cur = mg->mg_cur_max_alloc_queue_depth[allocator];
2925 }
3dfb57a3
DB
2926}
2927
2928void
492f64e9
PD
2929metaslab_group_alloc_decrement(spa_t *spa, uint64_t vdev, void *tag, int flags,
2930 int allocator, boolean_t io_complete)
3dfb57a3 2931{
3dfb57a3 2932 if (!(flags & METASLAB_ASYNC_ALLOC) ||
492f64e9 2933 (flags & METASLAB_DONT_THROTTLE))
3dfb57a3
DB
2934 return;
2935
1c27024e 2936 metaslab_group_t *mg = vdev_lookup_top(spa, vdev)->vdev_mg;
3dfb57a3
DB
2937 if (!mg->mg_class->mc_alloc_throttle_enabled)
2938 return;
2939
424fd7c3 2940 (void) zfs_refcount_remove(&mg->mg_alloc_queue_depth[allocator], tag);
492f64e9
PD
2941 if (io_complete)
2942 metaslab_group_increment_qdepth(mg, allocator);
3dfb57a3
DB
2943}
2944
2945void
492f64e9
PD
2946metaslab_group_alloc_verify(spa_t *spa, const blkptr_t *bp, void *tag,
2947 int allocator)
3dfb57a3
DB
2948{
2949#ifdef ZFS_DEBUG
2950 const dva_t *dva = bp->blk_dva;
2951 int ndvas = BP_GET_NDVAS(bp);
3dfb57a3 2952
1c27024e 2953 for (int d = 0; d < ndvas; d++) {
3dfb57a3
DB
2954 uint64_t vdev = DVA_GET_VDEV(&dva[d]);
2955 metaslab_group_t *mg = vdev_lookup_top(spa, vdev)->vdev_mg;
424fd7c3
TS
2956 VERIFY(zfs_refcount_not_held(
2957 &mg->mg_alloc_queue_depth[allocator], tag));
3dfb57a3
DB
2958 }
2959#endif
2960}
2961
34dc7c2f 2962static uint64_t
4e21fd06
DB
2963metaslab_block_alloc(metaslab_t *msp, uint64_t size, uint64_t txg)
2964{
2965 uint64_t start;
d2734cce 2966 range_tree_t *rt = msp->ms_allocatable;
4e21fd06
DB
2967 metaslab_class_t *mc = msp->ms_group->mg_class;
2968
2969 VERIFY(!msp->ms_condensing);
2970
2971 start = mc->mc_ops->msop_alloc(msp, size);
2972 if (start != -1ULL) {
2973 metaslab_group_t *mg = msp->ms_group;
2974 vdev_t *vd = mg->mg_vd;
2975
2976 VERIFY0(P2PHASE(start, 1ULL << vd->vdev_ashift));
2977 VERIFY0(P2PHASE(size, 1ULL << vd->vdev_ashift));
2978 VERIFY3U(range_tree_space(rt) - size, <=, msp->ms_size);
2979 range_tree_remove(rt, start, size);
2980
d2734cce 2981 if (range_tree_is_empty(msp->ms_allocating[txg & TXG_MASK]))
4e21fd06
DB
2982 vdev_dirty(mg->mg_vd, VDD_METASLAB, msp, txg);
2983
d2734cce 2984 range_tree_add(msp->ms_allocating[txg & TXG_MASK], start, size);
4e21fd06
DB
2985
2986 /* Track the last successful allocation */
2987 msp->ms_alloc_txg = txg;
2988 metaslab_verify_space(msp, txg);
2989 }
2990
2991 /*
2992 * Now that we've attempted the allocation we need to update the
2993 * metaslab's maximum block size since it may have changed.
2994 */
2995 msp->ms_max_size = metaslab_block_maxsize(msp);
2996 return (start);
2997}
2998
492f64e9
PD
2999/*
3000 * Find the metaslab with the highest weight that is less than what we've
3001 * already tried. In the common case, this means that we will examine each
3002 * metaslab at most once. Note that concurrent callers could reorder metaslabs
3003 * by activation/passivation once we have dropped the mg_lock. If a metaslab is
3004 * activated by another thread, and we fail to allocate from the metaslab we
3005 * have selected, we may not try the newly-activated metaslab, and instead
3006 * activate another metaslab. This is not optimal, but generally does not cause
3007 * any problems (a possible exception being if every metaslab is completely full
3008 * except for the the newly-activated metaslab which we fail to examine).
3009 */
3010static metaslab_t *
3011find_valid_metaslab(metaslab_group_t *mg, uint64_t activation_weight,
cc99f275 3012 dva_t *dva, int d, boolean_t want_unique, uint64_t asize, int allocator,
492f64e9
PD
3013 zio_alloc_list_t *zal, metaslab_t *search, boolean_t *was_active)
3014{
3015 avl_index_t idx;
3016 avl_tree_t *t = &mg->mg_metaslab_tree;
3017 metaslab_t *msp = avl_find(t, search, &idx);
3018 if (msp == NULL)
3019 msp = avl_nearest(t, idx, AVL_AFTER);
3020
3021 for (; msp != NULL; msp = AVL_NEXT(t, msp)) {
3022 int i;
3023 if (!metaslab_should_allocate(msp, asize)) {
3024 metaslab_trace_add(zal, mg, msp, asize, d,
3025 TRACE_TOO_SMALL, allocator);
3026 continue;
3027 }
3028
3029 /*
3030 * If the selected metaslab is condensing, skip it.
3031 */
3032 if (msp->ms_condensing)
3033 continue;
3034
3035 *was_active = msp->ms_allocator != -1;
3036 /*
3037 * If we're activating as primary, this is our first allocation
3038 * from this disk, so we don't need to check how close we are.
3039 * If the metaslab under consideration was already active,
3040 * we're getting desperate enough to steal another allocator's
3041 * metaslab, so we still don't care about distances.
3042 */
3043 if (activation_weight == METASLAB_WEIGHT_PRIMARY || *was_active)
3044 break;
3045
492f64e9 3046 for (i = 0; i < d; i++) {
cc99f275
DB
3047 if (want_unique &&
3048 !metaslab_is_unique(msp, &dva[i]))
3049 break; /* try another metaslab */
492f64e9
PD
3050 }
3051 if (i == d)
3052 break;
3053 }
3054
3055 if (msp != NULL) {
3056 search->ms_weight = msp->ms_weight;
3057 search->ms_start = msp->ms_start + 1;
3058 search->ms_allocator = msp->ms_allocator;
3059 search->ms_primary = msp->ms_primary;
3060 }
3061 return (msp);
3062}
3063
3064/* ARGSUSED */
4e21fd06
DB
3065static uint64_t
3066metaslab_group_alloc_normal(metaslab_group_t *mg, zio_alloc_list_t *zal,
cc99f275
DB
3067 uint64_t asize, uint64_t txg, boolean_t want_unique, dva_t *dva,
3068 int d, int allocator)
34dc7c2f
BB
3069{
3070 metaslab_t *msp = NULL;
3071 uint64_t offset = -1ULL;
34dc7c2f 3072 uint64_t activation_weight;
34dc7c2f
BB
3073
3074 activation_weight = METASLAB_WEIGHT_PRIMARY;
492f64e9
PD
3075 for (int i = 0; i < d; i++) {
3076 if (activation_weight == METASLAB_WEIGHT_PRIMARY &&
3077 DVA_GET_VDEV(&dva[i]) == mg->mg_vd->vdev_id) {
34dc7c2f 3078 activation_weight = METASLAB_WEIGHT_SECONDARY;
492f64e9
PD
3079 } else if (activation_weight == METASLAB_WEIGHT_SECONDARY &&
3080 DVA_GET_VDEV(&dva[i]) == mg->mg_vd->vdev_id) {
e38afd34 3081 activation_weight = METASLAB_WEIGHT_CLAIM;
9babb374
BB
3082 break;
3083 }
3084 }
34dc7c2f 3085
492f64e9
PD
3086 /*
3087 * If we don't have enough metaslabs active to fill the entire array, we
3088 * just use the 0th slot.
3089 */
e38afd34 3090 if (mg->mg_ms_ready < mg->mg_allocators * 3)
492f64e9 3091 allocator = 0;
492f64e9
PD
3092
3093 ASSERT3U(mg->mg_vd->vdev_ms_count, >=, 2);
3094
1c27024e 3095 metaslab_t *search = kmem_alloc(sizeof (*search), KM_SLEEP);
4e21fd06
DB
3096 search->ms_weight = UINT64_MAX;
3097 search->ms_start = 0;
492f64e9
PD
3098 /*
3099 * At the end of the metaslab tree are the already-active metaslabs,
3100 * first the primaries, then the secondaries. When we resume searching
3101 * through the tree, we need to consider ms_allocator and ms_primary so
3102 * we start in the location right after where we left off, and don't
3103 * accidentally loop forever considering the same metaslabs.
3104 */
3105 search->ms_allocator = -1;
3106 search->ms_primary = B_TRUE;
34dc7c2f 3107 for (;;) {
492f64e9 3108 boolean_t was_active = B_FALSE;
9babb374 3109
34dc7c2f 3110 mutex_enter(&mg->mg_lock);
4e21fd06 3111
492f64e9
PD
3112 if (activation_weight == METASLAB_WEIGHT_PRIMARY &&
3113 mg->mg_primaries[allocator] != NULL) {
3114 msp = mg->mg_primaries[allocator];
3115 was_active = B_TRUE;
3116 } else if (activation_weight == METASLAB_WEIGHT_SECONDARY &&
e38afd34 3117 mg->mg_secondaries[allocator] != NULL) {
492f64e9
PD
3118 msp = mg->mg_secondaries[allocator];
3119 was_active = B_TRUE;
3120 } else {
3121 msp = find_valid_metaslab(mg, activation_weight, dva, d,
cc99f275 3122 want_unique, asize, allocator, zal, search,
492f64e9 3123 &was_active);
34dc7c2f 3124 }
492f64e9 3125
34dc7c2f 3126 mutex_exit(&mg->mg_lock);
4e21fd06
DB
3127 if (msp == NULL) {
3128 kmem_free(search, sizeof (*search));
34dc7c2f 3129 return (-1ULL);
4e21fd06 3130 }
34dc7c2f 3131
ac72fac3 3132 mutex_enter(&msp->ms_lock);
34dc7c2f
BB
3133 /*
3134 * Ensure that the metaslab we have selected is still
3135 * capable of handling our request. It's possible that
3136 * another thread may have changed the weight while we
4e21fd06
DB
3137 * were blocked on the metaslab lock. We check the
3138 * active status first to see if we need to reselect
3139 * a new metaslab.
34dc7c2f 3140 */
4e21fd06 3141 if (was_active && !(msp->ms_weight & METASLAB_ACTIVE_MASK)) {
34dc7c2f
BB
3142 mutex_exit(&msp->ms_lock);
3143 continue;
3144 }
3145
492f64e9
PD
3146 /*
3147 * If the metaslab is freshly activated for an allocator that
3148 * isn't the one we're allocating from, or if it's a primary and
3149 * we're seeking a secondary (or vice versa), we go back and
3150 * select a new metaslab.
3151 */
3152 if (!was_active && (msp->ms_weight & METASLAB_ACTIVE_MASK) &&
3153 (msp->ms_allocator != -1) &&
3154 (msp->ms_allocator != allocator || ((activation_weight ==
3155 METASLAB_WEIGHT_PRIMARY) != msp->ms_primary))) {
3156 mutex_exit(&msp->ms_lock);
3157 continue;
3158 }
3159
e38afd34 3160 if (msp->ms_weight & METASLAB_WEIGHT_CLAIM &&
3161 activation_weight != METASLAB_WEIGHT_CLAIM) {
492f64e9
PD
3162 metaslab_passivate(msp, msp->ms_weight &
3163 ~METASLAB_WEIGHT_CLAIM);
34dc7c2f
BB
3164 mutex_exit(&msp->ms_lock);
3165 continue;
3166 }
3167
492f64e9 3168 if (metaslab_activate(msp, allocator, activation_weight) != 0) {
34dc7c2f
BB
3169 mutex_exit(&msp->ms_lock);
3170 continue;
3171 }
492f64e9 3172
4e21fd06
DB
3173 msp->ms_selected_txg = txg;
3174
3175 /*
3176 * Now that we have the lock, recheck to see if we should
3177 * continue to use this metaslab for this allocation. The
3178 * the metaslab is now loaded so metaslab_should_allocate() can
3179 * accurately determine if the allocation attempt should
3180 * proceed.
3181 */
3182 if (!metaslab_should_allocate(msp, asize)) {
3183 /* Passivate this metaslab and select a new one. */
3184 metaslab_trace_add(zal, mg, msp, asize, d,
492f64e9 3185 TRACE_TOO_SMALL, allocator);
4e21fd06
DB
3186 goto next;
3187 }
3188
34dc7c2f 3189
7a614407
GW
3190 /*
3191 * If this metaslab is currently condensing then pick again as
3192 * we can't manipulate this metaslab until it's committed
3193 * to disk.
3194 */
93cf2076 3195 if (msp->ms_condensing) {
4e21fd06 3196 metaslab_trace_add(zal, mg, msp, asize, d,
492f64e9
PD
3197 TRACE_CONDENSING, allocator);
3198 metaslab_passivate(msp, msp->ms_weight &
3199 ~METASLAB_ACTIVE_MASK);
7a614407
GW
3200 mutex_exit(&msp->ms_lock);
3201 continue;
3202 }
3203
4e21fd06 3204 offset = metaslab_block_alloc(msp, asize, txg);
492f64e9 3205 metaslab_trace_add(zal, mg, msp, asize, d, offset, allocator);
4e21fd06
DB
3206
3207 if (offset != -1ULL) {
3208 /* Proactively passivate the metaslab, if needed */
3209 metaslab_segment_may_passivate(msp);
34dc7c2f 3210 break;
4e21fd06
DB
3211 }
3212next:
3213 ASSERT(msp->ms_loaded);
3214
3215 /*
3216 * We were unable to allocate from this metaslab so determine
3217 * a new weight for this metaslab. Now that we have loaded
3218 * the metaslab we can provide a better hint to the metaslab
3219 * selector.
3220 *
3221 * For space-based metaslabs, we use the maximum block size.
3222 * This information is only available when the metaslab
3223 * is loaded and is more accurate than the generic free
3224 * space weight that was calculated by metaslab_weight().
3225 * This information allows us to quickly compare the maximum
3226 * available allocation in the metaslab to the allocation
3227 * size being requested.
3228 *
3229 * For segment-based metaslabs, determine the new weight
3230 * based on the highest bucket in the range tree. We
3231 * explicitly use the loaded segment weight (i.e. the range
3232 * tree histogram) since it contains the space that is
3233 * currently available for allocation and is accurate
3234 * even within a sync pass.
3235 */
3236 if (WEIGHT_IS_SPACEBASED(msp->ms_weight)) {
3237 uint64_t weight = metaslab_block_maxsize(msp);
3238 WEIGHT_SET_SPACEBASED(weight);
3239 metaslab_passivate(msp, weight);
3240 } else {
3241 metaslab_passivate(msp,
3242 metaslab_weight_from_range_tree(msp));
3243 }
34dc7c2f 3244
4e21fd06
DB
3245 /*
3246 * We have just failed an allocation attempt, check
3247 * that metaslab_should_allocate() agrees. Otherwise,
3248 * we may end up in an infinite loop retrying the same
3249 * metaslab.
3250 */
3251 ASSERT(!metaslab_should_allocate(msp, asize));
cc99f275 3252
34dc7c2f
BB
3253 mutex_exit(&msp->ms_lock);
3254 }
4e21fd06
DB
3255 mutex_exit(&msp->ms_lock);
3256 kmem_free(search, sizeof (*search));
3257 return (offset);
3258}
34dc7c2f 3259
4e21fd06
DB
3260static uint64_t
3261metaslab_group_alloc(metaslab_group_t *mg, zio_alloc_list_t *zal,
cc99f275
DB
3262 uint64_t asize, uint64_t txg, boolean_t want_unique, dva_t *dva,
3263 int d, int allocator)
4e21fd06
DB
3264{
3265 uint64_t offset;
3266 ASSERT(mg->mg_initialized);
34dc7c2f 3267
cc99f275
DB
3268 offset = metaslab_group_alloc_normal(mg, zal, asize, txg, want_unique,
3269 dva, d, allocator);
34dc7c2f 3270
4e21fd06
DB
3271 mutex_enter(&mg->mg_lock);
3272 if (offset == -1ULL) {
3273 mg->mg_failed_allocations++;
3274 metaslab_trace_add(zal, mg, NULL, asize, d,
492f64e9 3275 TRACE_GROUP_FAILURE, allocator);
4e21fd06
DB
3276 if (asize == SPA_GANGBLOCKSIZE) {
3277 /*
3278 * This metaslab group was unable to allocate
3279 * the minimum gang block size so it must be out of
3280 * space. We must notify the allocation throttle
3281 * to start skipping allocation attempts to this
3282 * metaslab group until more space becomes available.
3283 * Note: this failure cannot be caused by the
3284 * allocation throttle since the allocation throttle
3285 * is only responsible for skipping devices and
3286 * not failing block allocations.
3287 */
3288 mg->mg_no_free_space = B_TRUE;
3289 }
3290 }
3291 mg->mg_allocations++;
3292 mutex_exit(&mg->mg_lock);
34dc7c2f
BB
3293 return (offset);
3294}
3295
3296/*
3297 * Allocate a block for the specified i/o.
3298 */
a1d477c2 3299int
34dc7c2f 3300metaslab_alloc_dva(spa_t *spa, metaslab_class_t *mc, uint64_t psize,
4e21fd06 3301 dva_t *dva, int d, dva_t *hintdva, uint64_t txg, int flags,
492f64e9 3302 zio_alloc_list_t *zal, int allocator)
34dc7c2f 3303{
920dd524 3304 metaslab_group_t *mg, *fast_mg, *rotor;
34dc7c2f 3305 vdev_t *vd;
4e21fd06 3306 boolean_t try_hard = B_FALSE;
34dc7c2f
BB
3307
3308 ASSERT(!DVA_IS_VALID(&dva[d]));
3309
3310 /*
3311 * For testing, make some blocks above a certain size be gang blocks.
cc99f275 3312 * This will also test spilling from special to normal.
34dc7c2f 3313 */
d830d479 3314 if (psize >= metaslab_force_ganging && (ddi_get_lbolt() & 3) == 0) {
492f64e9
PD
3315 metaslab_trace_add(zal, NULL, NULL, psize, d, TRACE_FORCE_GANG,
3316 allocator);
2e528b49 3317 return (SET_ERROR(ENOSPC));
4e21fd06 3318 }
34dc7c2f
BB
3319
3320 /*
3321 * Start at the rotor and loop through all mgs until we find something.
428870ff 3322 * Note that there's no locking on mc_rotor or mc_aliquot because
34dc7c2f
BB
3323 * nothing actually breaks if we miss a few updates -- we just won't
3324 * allocate quite as evenly. It all balances out over time.
3325 *
3326 * If we are doing ditto or log blocks, try to spread them across
3327 * consecutive vdevs. If we're forced to reuse a vdev before we've
3328 * allocated all of our ditto blocks, then try and spread them out on
3329 * that vdev as much as possible. If it turns out to not be possible,
3330 * gradually lower our standards until anything becomes acceptable.
3331 * Also, allocating on consecutive vdevs (as opposed to random vdevs)
3332 * gives us hope of containing our fault domains to something we're
3333 * able to reason about. Otherwise, any two top-level vdev failures
3334 * will guarantee the loss of data. With consecutive allocation,
3335 * only two adjacent top-level vdev failures will result in data loss.
3336 *
3337 * If we are doing gang blocks (hintdva is non-NULL), try to keep
3338 * ourselves on the same vdev as our gang block header. That
3339 * way, we can hope for locality in vdev_cache, plus it makes our
3340 * fault domains something tractable.
3341 */
3342 if (hintdva) {
3343 vd = vdev_lookup_top(spa, DVA_GET_VDEV(&hintdva[d]));
428870ff
BB
3344
3345 /*
3346 * It's possible the vdev we're using as the hint no
a1d477c2
MA
3347 * longer exists or its mg has been closed (e.g. by
3348 * device removal). Consult the rotor when
428870ff
BB
3349 * all else fails.
3350 */
a1d477c2 3351 if (vd != NULL && vd->vdev_mg != NULL) {
34dc7c2f 3352 mg = vd->vdev_mg;
428870ff
BB
3353
3354 if (flags & METASLAB_HINTBP_AVOID &&
3355 mg->mg_next != NULL)
3356 mg = mg->mg_next;
3357 } else {
3358 mg = mc->mc_rotor;
3359 }
34dc7c2f
BB
3360 } else if (d != 0) {
3361 vd = vdev_lookup_top(spa, DVA_GET_VDEV(&dva[d - 1]));
3362 mg = vd->vdev_mg->mg_next;
920dd524
ED
3363 } else if (flags & METASLAB_FASTWRITE) {
3364 mg = fast_mg = mc->mc_rotor;
3365
3366 do {
3367 if (fast_mg->mg_vd->vdev_pending_fastwrite <
3368 mg->mg_vd->vdev_pending_fastwrite)
3369 mg = fast_mg;
3370 } while ((fast_mg = fast_mg->mg_next) != mc->mc_rotor);
3371
34dc7c2f 3372 } else {
cc99f275 3373 ASSERT(mc->mc_rotor != NULL);
34dc7c2f
BB
3374 mg = mc->mc_rotor;
3375 }
3376
3377 /*
428870ff
BB
3378 * If the hint put us into the wrong metaslab class, or into a
3379 * metaslab group that has been passivated, just follow the rotor.
34dc7c2f 3380 */
428870ff 3381 if (mg->mg_class != mc || mg->mg_activation_count <= 0)
34dc7c2f
BB
3382 mg = mc->mc_rotor;
3383
3384 rotor = mg;
3385top:
34dc7c2f 3386 do {
4e21fd06 3387 boolean_t allocatable;
428870ff 3388
3dfb57a3 3389 ASSERT(mg->mg_activation_count == 1);
34dc7c2f 3390 vd = mg->mg_vd;
fb5f0bc8 3391
34dc7c2f 3392 /*
b128c09f 3393 * Don't allocate from faulted devices.
34dc7c2f 3394 */
4e21fd06 3395 if (try_hard) {
fb5f0bc8
BB
3396 spa_config_enter(spa, SCL_ZIO, FTAG, RW_READER);
3397 allocatable = vdev_allocatable(vd);
3398 spa_config_exit(spa, SCL_ZIO, FTAG);
3399 } else {
3400 allocatable = vdev_allocatable(vd);
3401 }
ac72fac3
GW
3402
3403 /*
3404 * Determine if the selected metaslab group is eligible
3dfb57a3
DB
3405 * for allocations. If we're ganging then don't allow
3406 * this metaslab group to skip allocations since that would
3407 * inadvertently return ENOSPC and suspend the pool
ac72fac3
GW
3408 * even though space is still available.
3409 */
4e21fd06 3410 if (allocatable && !GANG_ALLOCATION(flags) && !try_hard) {
3dfb57a3 3411 allocatable = metaslab_group_allocatable(mg, rotor,
c197a77c 3412 psize, allocator, d);
3dfb57a3 3413 }
ac72fac3 3414
4e21fd06
DB
3415 if (!allocatable) {
3416 metaslab_trace_add(zal, mg, NULL, psize, d,
492f64e9 3417 TRACE_NOT_ALLOCATABLE, allocator);
34dc7c2f 3418 goto next;
4e21fd06 3419 }
fb5f0bc8 3420
3dfb57a3
DB
3421 ASSERT(mg->mg_initialized);
3422
34dc7c2f 3423 /*
4e21fd06
DB
3424 * Avoid writing single-copy data to a failing,
3425 * non-redundant vdev, unless we've already tried all
3426 * other vdevs.
34dc7c2f
BB
3427 */
3428 if ((vd->vdev_stat.vs_write_errors > 0 ||
3429 vd->vdev_state < VDEV_STATE_HEALTHY) &&
4e21fd06
DB
3430 d == 0 && !try_hard && vd->vdev_children == 0) {
3431 metaslab_trace_add(zal, mg, NULL, psize, d,
492f64e9 3432 TRACE_VDEV_ERROR, allocator);
34dc7c2f
BB
3433 goto next;
3434 }
3435
3436 ASSERT(mg->mg_class == mc);
3437
1c27024e 3438 uint64_t asize = vdev_psize_to_asize(vd, psize);
34dc7c2f
BB
3439 ASSERT(P2PHASE(asize, 1ULL << vd->vdev_ashift) == 0);
3440
cc99f275
DB
3441 /*
3442 * If we don't need to try hard, then require that the
3443 * block be on an different metaslab from any other DVAs
3444 * in this BP (unique=true). If we are trying hard, then
3445 * allow any metaslab to be used (unique=false).
3446 */
1c27024e 3447 uint64_t offset = metaslab_group_alloc(mg, zal, asize, txg,
cc99f275 3448 !try_hard, dva, d, allocator);
3dfb57a3 3449
34dc7c2f
BB
3450 if (offset != -1ULL) {
3451 /*
3452 * If we've just selected this metaslab group,
3453 * figure out whether the corresponding vdev is
3454 * over- or under-used relative to the pool,
3455 * and set an allocation bias to even it out.
bb3250d0
ED
3456 *
3457 * Bias is also used to compensate for unequally
3458 * sized vdevs so that space is allocated fairly.
34dc7c2f 3459 */
f3a7f661 3460 if (mc->mc_aliquot == 0 && metaslab_bias_enabled) {
34dc7c2f 3461 vdev_stat_t *vs = &vd->vdev_stat;
bb3250d0
ED
3462 int64_t vs_free = vs->vs_space - vs->vs_alloc;
3463 int64_t mc_free = mc->mc_space - mc->mc_alloc;
3464 int64_t ratio;
34dc7c2f
BB
3465
3466 /*
6d974228
GW
3467 * Calculate how much more or less we should
3468 * try to allocate from this device during
3469 * this iteration around the rotor.
6d974228 3470 *
bb3250d0
ED
3471 * This basically introduces a zero-centered
3472 * bias towards the devices with the most
3473 * free space, while compensating for vdev
3474 * size differences.
3475 *
3476 * Examples:
3477 * vdev V1 = 16M/128M
3478 * vdev V2 = 16M/128M
3479 * ratio(V1) = 100% ratio(V2) = 100%
3480 *
3481 * vdev V1 = 16M/128M
3482 * vdev V2 = 64M/128M
3483 * ratio(V1) = 127% ratio(V2) = 72%
6d974228 3484 *
bb3250d0
ED
3485 * vdev V1 = 16M/128M
3486 * vdev V2 = 64M/512M
3487 * ratio(V1) = 40% ratio(V2) = 160%
34dc7c2f 3488 */
bb3250d0
ED
3489 ratio = (vs_free * mc->mc_alloc_groups * 100) /
3490 (mc_free + 1);
3491 mg->mg_bias = ((ratio - 100) *
6d974228 3492 (int64_t)mg->mg_aliquot) / 100;
f3a7f661
GW
3493 } else if (!metaslab_bias_enabled) {
3494 mg->mg_bias = 0;
34dc7c2f
BB
3495 }
3496
920dd524
ED
3497 if ((flags & METASLAB_FASTWRITE) ||
3498 atomic_add_64_nv(&mc->mc_aliquot, asize) >=
34dc7c2f
BB
3499 mg->mg_aliquot + mg->mg_bias) {
3500 mc->mc_rotor = mg->mg_next;
428870ff 3501 mc->mc_aliquot = 0;
34dc7c2f
BB
3502 }
3503
3504 DVA_SET_VDEV(&dva[d], vd->vdev_id);
3505 DVA_SET_OFFSET(&dva[d], offset);
e3e7cf60
D
3506 DVA_SET_GANG(&dva[d],
3507 ((flags & METASLAB_GANG_HEADER) ? 1 : 0));
34dc7c2f
BB
3508 DVA_SET_ASIZE(&dva[d], asize);
3509
920dd524
ED
3510 if (flags & METASLAB_FASTWRITE) {
3511 atomic_add_64(&vd->vdev_pending_fastwrite,
3512 psize);
920dd524
ED
3513 }
3514
34dc7c2f
BB
3515 return (0);
3516 }
3517next:
3518 mc->mc_rotor = mg->mg_next;
428870ff 3519 mc->mc_aliquot = 0;
34dc7c2f
BB
3520 } while ((mg = mg->mg_next) != rotor);
3521
4e21fd06
DB
3522 /*
3523 * If we haven't tried hard, do so now.
3524 */
3525 if (!try_hard) {
3526 try_hard = B_TRUE;
fb5f0bc8
BB
3527 goto top;
3528 }
3529
34dc7c2f
BB
3530 bzero(&dva[d], sizeof (dva_t));
3531
492f64e9 3532 metaslab_trace_add(zal, rotor, NULL, psize, d, TRACE_ENOSPC, allocator);
2e528b49 3533 return (SET_ERROR(ENOSPC));
34dc7c2f
BB
3534}
3535
a1d477c2
MA
3536void
3537metaslab_free_concrete(vdev_t *vd, uint64_t offset, uint64_t asize,
d2734cce 3538 boolean_t checkpoint)
a1d477c2
MA
3539{
3540 metaslab_t *msp;
d2734cce 3541 spa_t *spa = vd->vdev_spa;
a1d477c2 3542
a1d477c2
MA
3543 ASSERT(vdev_is_concrete(vd));
3544 ASSERT3U(spa_config_held(spa, SCL_ALL, RW_READER), !=, 0);
3545 ASSERT3U(offset >> vd->vdev_ms_shift, <, vd->vdev_ms_count);
3546
3547 msp = vd->vdev_ms[offset >> vd->vdev_ms_shift];
3548
3549 VERIFY(!msp->ms_condensing);
3550 VERIFY3U(offset, >=, msp->ms_start);
3551 VERIFY3U(offset + asize, <=, msp->ms_start + msp->ms_size);
3552 VERIFY0(P2PHASE(offset, 1ULL << vd->vdev_ashift));
3553 VERIFY0(P2PHASE(asize, 1ULL << vd->vdev_ashift));
3554
3555 metaslab_check_free_impl(vd, offset, asize);
d2734cce 3556
a1d477c2 3557 mutex_enter(&msp->ms_lock);
d2734cce
SD
3558 if (range_tree_is_empty(msp->ms_freeing) &&
3559 range_tree_is_empty(msp->ms_checkpointing)) {
3560 vdev_dirty(vd, VDD_METASLAB, msp, spa_syncing_txg(spa));
3561 }
3562
3563 if (checkpoint) {
3564 ASSERT(spa_has_checkpoint(spa));
3565 range_tree_add(msp->ms_checkpointing, offset, asize);
3566 } else {
3567 range_tree_add(msp->ms_freeing, offset, asize);
a1d477c2 3568 }
a1d477c2
MA
3569 mutex_exit(&msp->ms_lock);
3570}
3571
3572/* ARGSUSED */
3573void
3574metaslab_free_impl_cb(uint64_t inner_offset, vdev_t *vd, uint64_t offset,
3575 uint64_t size, void *arg)
3576{
d2734cce
SD
3577 boolean_t *checkpoint = arg;
3578
3579 ASSERT3P(checkpoint, !=, NULL);
a1d477c2
MA
3580
3581 if (vd->vdev_ops->vdev_op_remap != NULL)
d2734cce 3582 vdev_indirect_mark_obsolete(vd, offset, size);
a1d477c2 3583 else
d2734cce 3584 metaslab_free_impl(vd, offset, size, *checkpoint);
a1d477c2
MA
3585}
3586
3587static void
3588metaslab_free_impl(vdev_t *vd, uint64_t offset, uint64_t size,
d2734cce 3589 boolean_t checkpoint)
a1d477c2
MA
3590{
3591 spa_t *spa = vd->vdev_spa;
3592
3593 ASSERT3U(spa_config_held(spa, SCL_ALL, RW_READER), !=, 0);
3594
d2734cce 3595 if (spa_syncing_txg(spa) > spa_freeze_txg(spa))
a1d477c2
MA
3596 return;
3597
3598 if (spa->spa_vdev_removal != NULL &&
9e052db4 3599 spa->spa_vdev_removal->svr_vdev_id == vd->vdev_id &&
a1d477c2
MA
3600 vdev_is_concrete(vd)) {
3601 /*
3602 * Note: we check if the vdev is concrete because when
3603 * we complete the removal, we first change the vdev to be
3604 * an indirect vdev (in open context), and then (in syncing
3605 * context) clear spa_vdev_removal.
3606 */
d2734cce 3607 free_from_removing_vdev(vd, offset, size);
a1d477c2 3608 } else if (vd->vdev_ops->vdev_op_remap != NULL) {
d2734cce 3609 vdev_indirect_mark_obsolete(vd, offset, size);
a1d477c2 3610 vd->vdev_ops->vdev_op_remap(vd, offset, size,
d2734cce 3611 metaslab_free_impl_cb, &checkpoint);
a1d477c2 3612 } else {
d2734cce 3613 metaslab_free_concrete(vd, offset, size, checkpoint);
a1d477c2
MA
3614 }
3615}
3616
3617typedef struct remap_blkptr_cb_arg {
3618 blkptr_t *rbca_bp;
3619 spa_remap_cb_t rbca_cb;
3620 vdev_t *rbca_remap_vd;
3621 uint64_t rbca_remap_offset;
3622 void *rbca_cb_arg;
3623} remap_blkptr_cb_arg_t;
3624
3625void
3626remap_blkptr_cb(uint64_t inner_offset, vdev_t *vd, uint64_t offset,
3627 uint64_t size, void *arg)
3628{
3629 remap_blkptr_cb_arg_t *rbca = arg;
3630 blkptr_t *bp = rbca->rbca_bp;
3631
3632 /* We can not remap split blocks. */
3633 if (size != DVA_GET_ASIZE(&bp->blk_dva[0]))
3634 return;
3635 ASSERT0(inner_offset);
3636
3637 if (rbca->rbca_cb != NULL) {
3638 /*
3639 * At this point we know that we are not handling split
3640 * blocks and we invoke the callback on the previous
3641 * vdev which must be indirect.
3642 */
3643 ASSERT3P(rbca->rbca_remap_vd->vdev_ops, ==, &vdev_indirect_ops);
3644
3645 rbca->rbca_cb(rbca->rbca_remap_vd->vdev_id,
3646 rbca->rbca_remap_offset, size, rbca->rbca_cb_arg);
3647
3648 /* set up remap_blkptr_cb_arg for the next call */
3649 rbca->rbca_remap_vd = vd;
3650 rbca->rbca_remap_offset = offset;
3651 }
3652
3653 /*
3654 * The phys birth time is that of dva[0]. This ensures that we know
3655 * when each dva was written, so that resilver can determine which
3656 * blocks need to be scrubbed (i.e. those written during the time
3657 * the vdev was offline). It also ensures that the key used in
3658 * the ARC hash table is unique (i.e. dva[0] + phys_birth). If
3659 * we didn't change the phys_birth, a lookup in the ARC for a
3660 * remapped BP could find the data that was previously stored at
3661 * this vdev + offset.
3662 */
3663 vdev_t *oldvd = vdev_lookup_top(vd->vdev_spa,
3664 DVA_GET_VDEV(&bp->blk_dva[0]));
3665 vdev_indirect_births_t *vib = oldvd->vdev_indirect_births;
3666 bp->blk_phys_birth = vdev_indirect_births_physbirth(vib,
3667 DVA_GET_OFFSET(&bp->blk_dva[0]), DVA_GET_ASIZE(&bp->blk_dva[0]));
3668
3669 DVA_SET_VDEV(&bp->blk_dva[0], vd->vdev_id);
3670 DVA_SET_OFFSET(&bp->blk_dva[0], offset);
3671}
3672
34dc7c2f 3673/*
a1d477c2
MA
3674 * If the block pointer contains any indirect DVAs, modify them to refer to
3675 * concrete DVAs. Note that this will sometimes not be possible, leaving
3676 * the indirect DVA in place. This happens if the indirect DVA spans multiple
3677 * segments in the mapping (i.e. it is a "split block").
3678 *
3679 * If the BP was remapped, calls the callback on the original dva (note the
3680 * callback can be called multiple times if the original indirect DVA refers
3681 * to another indirect DVA, etc).
3682 *
3683 * Returns TRUE if the BP was remapped.
34dc7c2f 3684 */
a1d477c2
MA
3685boolean_t
3686spa_remap_blkptr(spa_t *spa, blkptr_t *bp, spa_remap_cb_t callback, void *arg)
34dc7c2f 3687{
a1d477c2
MA
3688 remap_blkptr_cb_arg_t rbca;
3689
3690 if (!zfs_remap_blkptr_enable)
3691 return (B_FALSE);
3692
3693 if (!spa_feature_is_enabled(spa, SPA_FEATURE_OBSOLETE_COUNTS))
3694 return (B_FALSE);
3695
3696 /*
3697 * Dedup BP's can not be remapped, because ddt_phys_select() depends
3698 * on DVA[0] being the same in the BP as in the DDT (dedup table).
3699 */
3700 if (BP_GET_DEDUP(bp))
3701 return (B_FALSE);
3702
3703 /*
3704 * Gang blocks can not be remapped, because
3705 * zio_checksum_gang_verifier() depends on the DVA[0] that's in
3706 * the BP used to read the gang block header (GBH) being the same
3707 * as the DVA[0] that we allocated for the GBH.
3708 */
3709 if (BP_IS_GANG(bp))
3710 return (B_FALSE);
3711
3712 /*
3713 * Embedded BP's have no DVA to remap.
3714 */
3715 if (BP_GET_NDVAS(bp) < 1)
3716 return (B_FALSE);
3717
3718 /*
3719 * Note: we only remap dva[0]. If we remapped other dvas, we
3720 * would no longer know what their phys birth txg is.
3721 */
3722 dva_t *dva = &bp->blk_dva[0];
3723
34dc7c2f
BB
3724 uint64_t offset = DVA_GET_OFFSET(dva);
3725 uint64_t size = DVA_GET_ASIZE(dva);
a1d477c2
MA
3726 vdev_t *vd = vdev_lookup_top(spa, DVA_GET_VDEV(dva));
3727
3728 if (vd->vdev_ops->vdev_op_remap == NULL)
3729 return (B_FALSE);
3730
3731 rbca.rbca_bp = bp;
3732 rbca.rbca_cb = callback;
3733 rbca.rbca_remap_vd = vd;
3734 rbca.rbca_remap_offset = offset;
3735 rbca.rbca_cb_arg = arg;
3736
3737 /*
3738 * remap_blkptr_cb() will be called in order for each level of
3739 * indirection, until a concrete vdev is reached or a split block is
3740 * encountered. old_vd and old_offset are updated within the callback
3741 * as we go from the one indirect vdev to the next one (either concrete
3742 * or indirect again) in that order.
3743 */
3744 vd->vdev_ops->vdev_op_remap(vd, offset, size, remap_blkptr_cb, &rbca);
3745
3746 /* Check if the DVA wasn't remapped because it is a split block */
3747 if (DVA_GET_VDEV(&rbca.rbca_bp->blk_dva[0]) == vd->vdev_id)
3748 return (B_FALSE);
3749
3750 return (B_TRUE);
3751}
3752
3753/*
3754 * Undo the allocation of a DVA which happened in the given transaction group.
3755 */
3756void
3757metaslab_unalloc_dva(spa_t *spa, const dva_t *dva, uint64_t txg)
3758{
34dc7c2f 3759 metaslab_t *msp;
a1d477c2
MA
3760 vdev_t *vd;
3761 uint64_t vdev = DVA_GET_VDEV(dva);
3762 uint64_t offset = DVA_GET_OFFSET(dva);
3763 uint64_t size = DVA_GET_ASIZE(dva);
3764
3765 ASSERT(DVA_IS_VALID(dva));
3766 ASSERT3U(spa_config_held(spa, SCL_ALL, RW_READER), !=, 0);
34dc7c2f 3767
34dc7c2f
BB
3768 if (txg > spa_freeze_txg(spa))
3769 return;
3770
7d2868d5 3771 if ((vd = vdev_lookup_top(spa, vdev)) == NULL || !DVA_IS_VALID(dva) ||
34dc7c2f 3772 (offset >> vd->vdev_ms_shift) >= vd->vdev_ms_count) {
7d2868d5
BB
3773 zfs_panic_recover("metaslab_free_dva(): bad DVA %llu:%llu:%llu",
3774 (u_longlong_t)vdev, (u_longlong_t)offset,
3775 (u_longlong_t)size);
34dc7c2f
BB
3776 return;
3777 }
3778
a1d477c2
MA
3779 ASSERT(!vd->vdev_removing);
3780 ASSERT(vdev_is_concrete(vd));
3781 ASSERT0(vd->vdev_indirect_config.vic_mapping_object);
3782 ASSERT3P(vd->vdev_indirect_mapping, ==, NULL);
34dc7c2f
BB
3783
3784 if (DVA_GET_GANG(dva))
3785 size = vdev_psize_to_asize(vd, SPA_GANGBLOCKSIZE);
3786
a1d477c2 3787 msp = vd->vdev_ms[offset >> vd->vdev_ms_shift];
93cf2076 3788
a1d477c2 3789 mutex_enter(&msp->ms_lock);
d2734cce 3790 range_tree_remove(msp->ms_allocating[txg & TXG_MASK],
a1d477c2 3791 offset, size);
34dc7c2f 3792
a1d477c2
MA
3793 VERIFY(!msp->ms_condensing);
3794 VERIFY3U(offset, >=, msp->ms_start);
3795 VERIFY3U(offset + size, <=, msp->ms_start + msp->ms_size);
d2734cce 3796 VERIFY3U(range_tree_space(msp->ms_allocatable) + size, <=,
a1d477c2
MA
3797 msp->ms_size);
3798 VERIFY0(P2PHASE(offset, 1ULL << vd->vdev_ashift));
3799 VERIFY0(P2PHASE(size, 1ULL << vd->vdev_ashift));
d2734cce 3800 range_tree_add(msp->ms_allocatable, offset, size);
34dc7c2f
BB
3801 mutex_exit(&msp->ms_lock);
3802}
3803
3804/*
d2734cce 3805 * Free the block represented by the given DVA.
34dc7c2f 3806 */
a1d477c2 3807void
d2734cce 3808metaslab_free_dva(spa_t *spa, const dva_t *dva, boolean_t checkpoint)
34dc7c2f
BB
3809{
3810 uint64_t vdev = DVA_GET_VDEV(dva);
3811 uint64_t offset = DVA_GET_OFFSET(dva);
3812 uint64_t size = DVA_GET_ASIZE(dva);
a1d477c2 3813 vdev_t *vd = vdev_lookup_top(spa, vdev);
34dc7c2f
BB
3814
3815 ASSERT(DVA_IS_VALID(dva));
a1d477c2 3816 ASSERT3U(spa_config_held(spa, SCL_ALL, RW_READER), !=, 0);
34dc7c2f 3817
a1d477c2 3818 if (DVA_GET_GANG(dva)) {
34dc7c2f 3819 size = vdev_psize_to_asize(vd, SPA_GANGBLOCKSIZE);
34dc7c2f
BB
3820 }
3821
d2734cce 3822 metaslab_free_impl(vd, offset, size, checkpoint);
34dc7c2f
BB
3823}
3824
3dfb57a3
DB
3825/*
3826 * Reserve some allocation slots. The reservation system must be called
3827 * before we call into the allocator. If there aren't any available slots
3828 * then the I/O will be throttled until an I/O completes and its slots are
3829 * freed up. The function returns true if it was successful in placing
3830 * the reservation.
3831 */
3832boolean_t
492f64e9
PD
3833metaslab_class_throttle_reserve(metaslab_class_t *mc, int slots, int allocator,
3834 zio_t *zio, int flags)
3dfb57a3
DB
3835{
3836 uint64_t available_slots = 0;
3dfb57a3 3837 boolean_t slot_reserved = B_FALSE;
492f64e9 3838 uint64_t max = mc->mc_alloc_max_slots[allocator];
3dfb57a3
DB
3839
3840 ASSERT(mc->mc_alloc_throttle_enabled);
3841 mutex_enter(&mc->mc_lock);
3842
492f64e9 3843 uint64_t reserved_slots =
424fd7c3 3844 zfs_refcount_count(&mc->mc_alloc_slots[allocator]);
492f64e9
PD
3845 if (reserved_slots < max)
3846 available_slots = max - reserved_slots;
3dfb57a3 3847
cc99f275
DB
3848 if (slots <= available_slots || GANG_ALLOCATION(flags) ||
3849 flags & METASLAB_MUST_RESERVE) {
3dfb57a3
DB
3850 /*
3851 * We reserve the slots individually so that we can unreserve
3852 * them individually when an I/O completes.
3853 */
1c27024e 3854 for (int d = 0; d < slots; d++) {
492f64e9 3855 reserved_slots =
c13060e4 3856 zfs_refcount_add(&mc->mc_alloc_slots[allocator],
492f64e9 3857 zio);
3dfb57a3
DB
3858 }
3859 zio->io_flags |= ZIO_FLAG_IO_ALLOCATING;
3860 slot_reserved = B_TRUE;
3861 }
3862
3863 mutex_exit(&mc->mc_lock);
3864 return (slot_reserved);
3865}
3866
3867void
492f64e9
PD
3868metaslab_class_throttle_unreserve(metaslab_class_t *mc, int slots,
3869 int allocator, zio_t *zio)
3dfb57a3 3870{
3dfb57a3
DB
3871 ASSERT(mc->mc_alloc_throttle_enabled);
3872 mutex_enter(&mc->mc_lock);
1c27024e 3873 for (int d = 0; d < slots; d++) {
424fd7c3 3874 (void) zfs_refcount_remove(&mc->mc_alloc_slots[allocator],
492f64e9 3875 zio);
3dfb57a3
DB
3876 }
3877 mutex_exit(&mc->mc_lock);
3878}
3879
a1d477c2
MA
3880static int
3881metaslab_claim_concrete(vdev_t *vd, uint64_t offset, uint64_t size,
3882 uint64_t txg)
3883{
3884 metaslab_t *msp;
3885 spa_t *spa = vd->vdev_spa;
3886 int error = 0;
3887
3888 if (offset >> vd->vdev_ms_shift >= vd->vdev_ms_count)
3889 return (ENXIO);
3890
3891 ASSERT3P(vd->vdev_ms, !=, NULL);
3892 msp = vd->vdev_ms[offset >> vd->vdev_ms_shift];
3893
3894 mutex_enter(&msp->ms_lock);
3895
3896 if ((txg != 0 && spa_writeable(spa)) || !msp->ms_loaded)
492f64e9 3897 error = metaslab_activate(msp, 0, METASLAB_WEIGHT_CLAIM);
a1d477c2 3898
d2734cce
SD
3899 if (error == 0 &&
3900 !range_tree_contains(msp->ms_allocatable, offset, size))
a1d477c2
MA
3901 error = SET_ERROR(ENOENT);
3902
3903 if (error || txg == 0) { /* txg == 0 indicates dry run */
3904 mutex_exit(&msp->ms_lock);
3905 return (error);
3906 }
3907
3908 VERIFY(!msp->ms_condensing);
3909 VERIFY0(P2PHASE(offset, 1ULL << vd->vdev_ashift));
3910 VERIFY0(P2PHASE(size, 1ULL << vd->vdev_ashift));
d2734cce
SD
3911 VERIFY3U(range_tree_space(msp->ms_allocatable) - size, <=,
3912 msp->ms_size);
3913 range_tree_remove(msp->ms_allocatable, offset, size);
a1d477c2
MA
3914
3915 if (spa_writeable(spa)) { /* don't dirty if we're zdb(1M) */
d2734cce 3916 if (range_tree_is_empty(msp->ms_allocating[txg & TXG_MASK]))
a1d477c2 3917 vdev_dirty(vd, VDD_METASLAB, msp, txg);
d2734cce
SD
3918 range_tree_add(msp->ms_allocating[txg & TXG_MASK],
3919 offset, size);
a1d477c2
MA
3920 }
3921
3922 mutex_exit(&msp->ms_lock);
3923
3924 return (0);
3925}
3926
3927typedef struct metaslab_claim_cb_arg_t {
3928 uint64_t mcca_txg;
3929 int mcca_error;
3930} metaslab_claim_cb_arg_t;
3931
3932/* ARGSUSED */
3933static void
3934metaslab_claim_impl_cb(uint64_t inner_offset, vdev_t *vd, uint64_t offset,
3935 uint64_t size, void *arg)
3936{
3937 metaslab_claim_cb_arg_t *mcca_arg = arg;
3938
3939 if (mcca_arg->mcca_error == 0) {
3940 mcca_arg->mcca_error = metaslab_claim_concrete(vd, offset,
3941 size, mcca_arg->mcca_txg);
3942 }
3943}
3944
3945int
3946metaslab_claim_impl(vdev_t *vd, uint64_t offset, uint64_t size, uint64_t txg)
3947{
3948 if (vd->vdev_ops->vdev_op_remap != NULL) {
3949 metaslab_claim_cb_arg_t arg;
3950
3951 /*
3952 * Only zdb(1M) can claim on indirect vdevs. This is used
3953 * to detect leaks of mapped space (that are not accounted
3954 * for in the obsolete counts, spacemap, or bpobj).
3955 */
3956 ASSERT(!spa_writeable(vd->vdev_spa));
3957 arg.mcca_error = 0;
3958 arg.mcca_txg = txg;
3959
3960 vd->vdev_ops->vdev_op_remap(vd, offset, size,
3961 metaslab_claim_impl_cb, &arg);
3962
3963 if (arg.mcca_error == 0) {
3964 arg.mcca_error = metaslab_claim_concrete(vd,
3965 offset, size, txg);
3966 }
3967 return (arg.mcca_error);
3968 } else {
3969 return (metaslab_claim_concrete(vd, offset, size, txg));
3970 }
3971}
3972
3973/*
3974 * Intent log support: upon opening the pool after a crash, notify the SPA
3975 * of blocks that the intent log has allocated for immediate write, but
3976 * which are still considered free by the SPA because the last transaction
3977 * group didn't commit yet.
3978 */
3979static int
3980metaslab_claim_dva(spa_t *spa, const dva_t *dva, uint64_t txg)
3981{
3982 uint64_t vdev = DVA_GET_VDEV(dva);
3983 uint64_t offset = DVA_GET_OFFSET(dva);
3984 uint64_t size = DVA_GET_ASIZE(dva);
3985 vdev_t *vd;
3986
3987 if ((vd = vdev_lookup_top(spa, vdev)) == NULL) {
3988 return (SET_ERROR(ENXIO));
3989 }
3990
3991 ASSERT(DVA_IS_VALID(dva));
3992
3993 if (DVA_GET_GANG(dva))
3994 size = vdev_psize_to_asize(vd, SPA_GANGBLOCKSIZE);
3995
3996 return (metaslab_claim_impl(vd, offset, size, txg));
3997}
3998
34dc7c2f
BB
3999int
4000metaslab_alloc(spa_t *spa, metaslab_class_t *mc, uint64_t psize, blkptr_t *bp,
4e21fd06 4001 int ndvas, uint64_t txg, blkptr_t *hintbp, int flags,
492f64e9 4002 zio_alloc_list_t *zal, zio_t *zio, int allocator)
34dc7c2f
BB
4003{
4004 dva_t *dva = bp->blk_dva;
4005 dva_t *hintdva = hintbp->blk_dva;
1c27024e 4006 int error = 0;
34dc7c2f 4007
b128c09f 4008 ASSERT(bp->blk_birth == 0);
428870ff 4009 ASSERT(BP_PHYSICAL_BIRTH(bp) == 0);
b128c09f
BB
4010
4011 spa_config_enter(spa, SCL_ALLOC, FTAG, RW_READER);
4012
4013 if (mc->mc_rotor == NULL) { /* no vdevs in this class */
4014 spa_config_exit(spa, SCL_ALLOC, FTAG);
2e528b49 4015 return (SET_ERROR(ENOSPC));
b128c09f 4016 }
34dc7c2f
BB
4017
4018 ASSERT(ndvas > 0 && ndvas <= spa_max_replication(spa));
4019 ASSERT(BP_GET_NDVAS(bp) == 0);
4020 ASSERT(hintbp == NULL || ndvas <= BP_GET_NDVAS(hintbp));
4e21fd06 4021 ASSERT3P(zal, !=, NULL);
34dc7c2f 4022
1c27024e 4023 for (int d = 0; d < ndvas; d++) {
34dc7c2f 4024 error = metaslab_alloc_dva(spa, mc, psize, dva, d, hintdva,
492f64e9 4025 txg, flags, zal, allocator);
93cf2076 4026 if (error != 0) {
34dc7c2f 4027 for (d--; d >= 0; d--) {
a1d477c2 4028 metaslab_unalloc_dva(spa, &dva[d], txg);
3dfb57a3 4029 metaslab_group_alloc_decrement(spa,
492f64e9
PD
4030 DVA_GET_VDEV(&dva[d]), zio, flags,
4031 allocator, B_FALSE);
34dc7c2f
BB
4032 bzero(&dva[d], sizeof (dva_t));
4033 }
b128c09f 4034 spa_config_exit(spa, SCL_ALLOC, FTAG);
34dc7c2f 4035 return (error);
3dfb57a3
DB
4036 } else {
4037 /*
4038 * Update the metaslab group's queue depth
4039 * based on the newly allocated dva.
4040 */
4041 metaslab_group_alloc_increment(spa,
492f64e9 4042 DVA_GET_VDEV(&dva[d]), zio, flags, allocator);
34dc7c2f 4043 }
3dfb57a3 4044
34dc7c2f
BB
4045 }
4046 ASSERT(error == 0);
4047 ASSERT(BP_GET_NDVAS(bp) == ndvas);
4048
b128c09f
BB
4049 spa_config_exit(spa, SCL_ALLOC, FTAG);
4050
efe7978d 4051 BP_SET_BIRTH(bp, txg, 0);
b128c09f 4052
34dc7c2f
BB
4053 return (0);
4054}
4055
4056void
4057metaslab_free(spa_t *spa, const blkptr_t *bp, uint64_t txg, boolean_t now)
4058{
4059 const dva_t *dva = bp->blk_dva;
1c27024e 4060 int ndvas = BP_GET_NDVAS(bp);
34dc7c2f
BB
4061
4062 ASSERT(!BP_IS_HOLE(bp));
428870ff 4063 ASSERT(!now || bp->blk_birth >= spa_syncing_txg(spa));
b128c09f 4064
d2734cce
SD
4065 /*
4066 * If we have a checkpoint for the pool we need to make sure that
4067 * the blocks that we free that are part of the checkpoint won't be
4068 * reused until the checkpoint is discarded or we revert to it.
4069 *
4070 * The checkpoint flag is passed down the metaslab_free code path
4071 * and is set whenever we want to add a block to the checkpoint's
4072 * accounting. That is, we "checkpoint" blocks that existed at the
4073 * time the checkpoint was created and are therefore referenced by
4074 * the checkpointed uberblock.
4075 *
4076 * Note that, we don't checkpoint any blocks if the current
4077 * syncing txg <= spa_checkpoint_txg. We want these frees to sync
4078 * normally as they will be referenced by the checkpointed uberblock.
4079 */
4080 boolean_t checkpoint = B_FALSE;
4081 if (bp->blk_birth <= spa->spa_checkpoint_txg &&
4082 spa_syncing_txg(spa) > spa->spa_checkpoint_txg) {
4083 /*
4084 * At this point, if the block is part of the checkpoint
4085 * there is no way it was created in the current txg.
4086 */
4087 ASSERT(!now);
4088 ASSERT3U(spa_syncing_txg(spa), ==, txg);
4089 checkpoint = B_TRUE;
4090 }
4091
b128c09f 4092 spa_config_enter(spa, SCL_FREE, FTAG, RW_READER);
34dc7c2f 4093
a1d477c2
MA
4094 for (int d = 0; d < ndvas; d++) {
4095 if (now) {
4096 metaslab_unalloc_dva(spa, &dva[d], txg);
4097 } else {
d2734cce
SD
4098 ASSERT3U(txg, ==, spa_syncing_txg(spa));
4099 metaslab_free_dva(spa, &dva[d], checkpoint);
a1d477c2
MA
4100 }
4101 }
b128c09f
BB
4102
4103 spa_config_exit(spa, SCL_FREE, FTAG);
34dc7c2f
BB
4104}
4105
4106int
4107metaslab_claim(spa_t *spa, const blkptr_t *bp, uint64_t txg)
4108{
4109 const dva_t *dva = bp->blk_dva;
4110 int ndvas = BP_GET_NDVAS(bp);
1c27024e 4111 int error = 0;
34dc7c2f
BB
4112
4113 ASSERT(!BP_IS_HOLE(bp));
4114
b128c09f
BB
4115 if (txg != 0) {
4116 /*
4117 * First do a dry run to make sure all DVAs are claimable,
4118 * so we don't have to unwind from partial failures below.
4119 */
4120 if ((error = metaslab_claim(spa, bp, 0)) != 0)
4121 return (error);
4122 }
4123
4124 spa_config_enter(spa, SCL_ALLOC, FTAG, RW_READER);
4125
cc99f275
DB
4126 for (int d = 0; d < ndvas; d++) {
4127 error = metaslab_claim_dva(spa, &dva[d], txg);
4128 if (error != 0)
b128c09f 4129 break;
cc99f275 4130 }
b128c09f
BB
4131
4132 spa_config_exit(spa, SCL_ALLOC, FTAG);
4133
4134 ASSERT(error == 0 || txg == 0);
34dc7c2f 4135
b128c09f 4136 return (error);
34dc7c2f 4137}
920dd524 4138
d1d7e268
MK
4139void
4140metaslab_fastwrite_mark(spa_t *spa, const blkptr_t *bp)
920dd524
ED
4141{
4142 const dva_t *dva = bp->blk_dva;
4143 int ndvas = BP_GET_NDVAS(bp);
4144 uint64_t psize = BP_GET_PSIZE(bp);
4145 int d;
4146 vdev_t *vd;
4147
4148 ASSERT(!BP_IS_HOLE(bp));
9b67f605 4149 ASSERT(!BP_IS_EMBEDDED(bp));
920dd524
ED
4150 ASSERT(psize > 0);
4151
4152 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
4153
4154 for (d = 0; d < ndvas; d++) {
4155 if ((vd = vdev_lookup_top(spa, DVA_GET_VDEV(&dva[d]))) == NULL)
4156 continue;
4157 atomic_add_64(&vd->vdev_pending_fastwrite, psize);
4158 }
4159
4160 spa_config_exit(spa, SCL_VDEV, FTAG);
4161}
4162
d1d7e268
MK
4163void
4164metaslab_fastwrite_unmark(spa_t *spa, const blkptr_t *bp)
920dd524
ED
4165{
4166 const dva_t *dva = bp->blk_dva;
4167 int ndvas = BP_GET_NDVAS(bp);
4168 uint64_t psize = BP_GET_PSIZE(bp);
4169 int d;
4170 vdev_t *vd;
4171
4172 ASSERT(!BP_IS_HOLE(bp));
9b67f605 4173 ASSERT(!BP_IS_EMBEDDED(bp));
920dd524
ED
4174 ASSERT(psize > 0);
4175
4176 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
4177
4178 for (d = 0; d < ndvas; d++) {
4179 if ((vd = vdev_lookup_top(spa, DVA_GET_VDEV(&dva[d]))) == NULL)
4180 continue;
4181 ASSERT3U(vd->vdev_pending_fastwrite, >=, psize);
4182 atomic_sub_64(&vd->vdev_pending_fastwrite, psize);
4183 }
4184
4185 spa_config_exit(spa, SCL_VDEV, FTAG);
4186}
30b92c1d 4187
a1d477c2
MA
4188/* ARGSUSED */
4189static void
4190metaslab_check_free_impl_cb(uint64_t inner, vdev_t *vd, uint64_t offset,
4191 uint64_t size, void *arg)
4192{
4193 if (vd->vdev_ops == &vdev_indirect_ops)
4194 return;
4195
4196 metaslab_check_free_impl(vd, offset, size);
4197}
4198
4199static void
4200metaslab_check_free_impl(vdev_t *vd, uint64_t offset, uint64_t size)
4201{
4202 metaslab_t *msp;
4203 ASSERTV(spa_t *spa = vd->vdev_spa);
4204
4205 if ((zfs_flags & ZFS_DEBUG_ZIO_FREE) == 0)
4206 return;
4207
4208 if (vd->vdev_ops->vdev_op_remap != NULL) {
4209 vd->vdev_ops->vdev_op_remap(vd, offset, size,
4210 metaslab_check_free_impl_cb, NULL);
4211 return;
4212 }
4213
4214 ASSERT(vdev_is_concrete(vd));
4215 ASSERT3U(offset >> vd->vdev_ms_shift, <, vd->vdev_ms_count);
4216 ASSERT3U(spa_config_held(spa, SCL_ALL, RW_READER), !=, 0);
4217
4218 msp = vd->vdev_ms[offset >> vd->vdev_ms_shift];
4219
4220 mutex_enter(&msp->ms_lock);
4221 if (msp->ms_loaded)
d2734cce 4222 range_tree_verify(msp->ms_allocatable, offset, size);
a1d477c2 4223
d2734cce
SD
4224 range_tree_verify(msp->ms_freeing, offset, size);
4225 range_tree_verify(msp->ms_checkpointing, offset, size);
4226 range_tree_verify(msp->ms_freed, offset, size);
a1d477c2 4227 for (int j = 0; j < TXG_DEFER_SIZE; j++)
d2734cce 4228 range_tree_verify(msp->ms_defer[j], offset, size);
a1d477c2
MA
4229 mutex_exit(&msp->ms_lock);
4230}
4231
13fe0198
MA
4232void
4233metaslab_check_free(spa_t *spa, const blkptr_t *bp)
4234{
13fe0198
MA
4235 if ((zfs_flags & ZFS_DEBUG_ZIO_FREE) == 0)
4236 return;
4237
4238 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
1c27024e 4239 for (int i = 0; i < BP_GET_NDVAS(bp); i++) {
93cf2076
GW
4240 uint64_t vdev = DVA_GET_VDEV(&bp->blk_dva[i]);
4241 vdev_t *vd = vdev_lookup_top(spa, vdev);
4242 uint64_t offset = DVA_GET_OFFSET(&bp->blk_dva[i]);
13fe0198 4243 uint64_t size = DVA_GET_ASIZE(&bp->blk_dva[i]);
13fe0198 4244
a1d477c2
MA
4245 if (DVA_GET_GANG(&bp->blk_dva[i]))
4246 size = vdev_psize_to_asize(vd, SPA_GANGBLOCKSIZE);
4247
4248 ASSERT3P(vd, !=, NULL);
13fe0198 4249
a1d477c2 4250 metaslab_check_free_impl(vd, offset, size);
13fe0198
MA
4251 }
4252 spa_config_exit(spa, SCL_VDEV, FTAG);
4253}
4254
93ce2b4c 4255#if defined(_KERNEL)
cc99f275 4256/* BEGIN CSTYLED */
99b14de4 4257module_param(metaslab_aliquot, ulong, 0644);
99b14de4
ED
4258MODULE_PARM_DESC(metaslab_aliquot,
4259 "allocation granularity (a.k.a. stripe size)");
02730c33
BB
4260
4261module_param(metaslab_debug_load, int, 0644);
93cf2076
GW
4262MODULE_PARM_DESC(metaslab_debug_load,
4263 "load all metaslabs when pool is first opened");
02730c33
BB
4264
4265module_param(metaslab_debug_unload, int, 0644);
1ce04573
BB
4266MODULE_PARM_DESC(metaslab_debug_unload,
4267 "prevent metaslabs from being unloaded");
02730c33
BB
4268
4269module_param(metaslab_preload_enabled, int, 0644);
f3a7f661
GW
4270MODULE_PARM_DESC(metaslab_preload_enabled,
4271 "preload potential metaslabs during reassessment");
f4a4046b 4272
02730c33 4273module_param(zfs_mg_noalloc_threshold, int, 0644);
f4a4046b
TC
4274MODULE_PARM_DESC(zfs_mg_noalloc_threshold,
4275 "percentage of free space for metaslab group to allow allocation");
02730c33
BB
4276
4277module_param(zfs_mg_fragmentation_threshold, int, 0644);
f3a7f661
GW
4278MODULE_PARM_DESC(zfs_mg_fragmentation_threshold,
4279 "fragmentation for metaslab group to allow allocation");
4280
02730c33 4281module_param(zfs_metaslab_fragmentation_threshold, int, 0644);
f3a7f661
GW
4282MODULE_PARM_DESC(zfs_metaslab_fragmentation_threshold,
4283 "fragmentation for metaslab to allow allocation");
02730c33
BB
4284
4285module_param(metaslab_fragmentation_factor_enabled, int, 0644);
f3a7f661
GW
4286MODULE_PARM_DESC(metaslab_fragmentation_factor_enabled,
4287 "use the fragmentation metric to prefer less fragmented metaslabs");
02730c33
BB
4288
4289module_param(metaslab_lba_weighting_enabled, int, 0644);
f3a7f661
GW
4290MODULE_PARM_DESC(metaslab_lba_weighting_enabled,
4291 "prefer metaslabs with lower LBAs");
02730c33
BB
4292
4293module_param(metaslab_bias_enabled, int, 0644);
f3a7f661
GW
4294MODULE_PARM_DESC(metaslab_bias_enabled,
4295 "enable metaslab group biasing");
4e21fd06
DB
4296
4297module_param(zfs_metaslab_segment_weight_enabled, int, 0644);
4298MODULE_PARM_DESC(zfs_metaslab_segment_weight_enabled,
4299 "enable segment-based metaslab selection");
4300
4301module_param(zfs_metaslab_switch_threshold, int, 0644);
4302MODULE_PARM_DESC(zfs_metaslab_switch_threshold,
4303 "segment-based metaslab selection maximum buckets before switching");
a1d477c2 4304
d830d479
MA
4305module_param(metaslab_force_ganging, ulong, 0644);
4306MODULE_PARM_DESC(metaslab_force_ganging,
a1d477c2 4307 "blocks larger than this size are forced to be gang blocks");
cc99f275
DB
4308/* END CSTYLED */
4309
93ce2b4c 4310#endif