]> git.proxmox.com Git - mirror_zfs.git/blame - module/zfs/metaslab.c
OpenZFS 9166 - zfs storage pool checkpoint
[mirror_zfs.git] / module / zfs / metaslab.c
CommitLineData
34dc7c2f
BB
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
428870ff 22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
4e21fd06 23 * Copyright (c) 2011, 2016 by Delphix. All rights reserved.
2e528b49 24 * Copyright (c) 2013 by Saso Kiselkov. All rights reserved.
34dc7c2f
BB
25 */
26
34dc7c2f 27#include <sys/zfs_context.h>
34dc7c2f
BB
28#include <sys/dmu.h>
29#include <sys/dmu_tx.h>
30#include <sys/space_map.h>
31#include <sys/metaslab_impl.h>
32#include <sys/vdev_impl.h>
33#include <sys/zio.h>
93cf2076 34#include <sys/spa_impl.h>
f3a7f661 35#include <sys/zfeature.h>
a1d477c2 36#include <sys/vdev_indirect_mapping.h>
d2734cce 37#include <sys/zap.h>
34dc7c2f 38
d1d7e268 39#define WITH_DF_BLOCK_ALLOCATOR
6d974228 40
3dfb57a3
DB
41#define GANG_ALLOCATION(flags) \
42 ((flags) & (METASLAB_GANG_CHILD | METASLAB_GANG_HEADER))
22c81dd8 43
e8fe6684
ED
44/*
45 * Metaslab granularity, in bytes. This is roughly similar to what would be
46 * referred to as the "stripe size" in traditional RAID arrays. In normal
47 * operation, we will try to write this amount of data to a top-level vdev
48 * before moving on to the next one.
49 */
99b14de4 50unsigned long metaslab_aliquot = 512 << 10;
e8fe6684 51
d830d479
MA
52/*
53 * For testing, make some blocks above a certain size be gang blocks.
54 */
55unsigned long metaslab_force_ganging = SPA_MAXBLOCKSIZE + 1;
34dc7c2f 56
d2734cce
SD
57/*
58 * Since we can touch multiple metaslabs (and their respective space maps)
59 * with each transaction group, we benefit from having a smaller space map
60 * block size since it allows us to issue more I/O operations scattered
61 * around the disk.
62 */
63int zfs_metaslab_sm_blksz = (1 << 12);
64
e51be066
GW
65/*
66 * The in-core space map representation is more compact than its on-disk form.
67 * The zfs_condense_pct determines how much more compact the in-core
4e21fd06 68 * space map representation must be before we compact it on-disk.
e51be066
GW
69 * Values should be greater than or equal to 100.
70 */
71int zfs_condense_pct = 200;
72
b02fe35d
AR
73/*
74 * Condensing a metaslab is not guaranteed to actually reduce the amount of
75 * space used on disk. In particular, a space map uses data in increments of
96358617 76 * MAX(1 << ashift, space_map_blksz), so a metaslab might use the
b02fe35d
AR
77 * same number of blocks after condensing. Since the goal of condensing is to
78 * reduce the number of IOPs required to read the space map, we only want to
79 * condense when we can be sure we will reduce the number of blocks used by the
80 * space map. Unfortunately, we cannot precisely compute whether or not this is
81 * the case in metaslab_should_condense since we are holding ms_lock. Instead,
82 * we apply the following heuristic: do not condense a spacemap unless the
83 * uncondensed size consumes greater than zfs_metaslab_condense_block_threshold
84 * blocks.
85 */
86int zfs_metaslab_condense_block_threshold = 4;
87
ac72fac3
GW
88/*
89 * The zfs_mg_noalloc_threshold defines which metaslab groups should
90 * be eligible for allocation. The value is defined as a percentage of
f3a7f661 91 * free space. Metaslab groups that have more free space than
ac72fac3
GW
92 * zfs_mg_noalloc_threshold are always eligible for allocations. Once
93 * a metaslab group's free space is less than or equal to the
94 * zfs_mg_noalloc_threshold the allocator will avoid allocating to that
95 * group unless all groups in the pool have reached zfs_mg_noalloc_threshold.
96 * Once all groups in the pool reach zfs_mg_noalloc_threshold then all
97 * groups are allowed to accept allocations. Gang blocks are always
98 * eligible to allocate on any metaslab group. The default value of 0 means
99 * no metaslab group will be excluded based on this criterion.
100 */
101int zfs_mg_noalloc_threshold = 0;
6d974228 102
f3a7f661
GW
103/*
104 * Metaslab groups are considered eligible for allocations if their
105 * fragmenation metric (measured as a percentage) is less than or equal to
106 * zfs_mg_fragmentation_threshold. If a metaslab group exceeds this threshold
107 * then it will be skipped unless all metaslab groups within the metaslab
108 * class have also crossed this threshold.
109 */
110int zfs_mg_fragmentation_threshold = 85;
111
112/*
113 * Allow metaslabs to keep their active state as long as their fragmentation
114 * percentage is less than or equal to zfs_metaslab_fragmentation_threshold. An
115 * active metaslab that exceeds this threshold will no longer keep its active
116 * status allowing better metaslabs to be selected.
117 */
118int zfs_metaslab_fragmentation_threshold = 70;
119
428870ff 120/*
aa7d06a9 121 * When set will load all metaslabs when pool is first opened.
428870ff 122 */
aa7d06a9
GW
123int metaslab_debug_load = 0;
124
125/*
126 * When set will prevent metaslabs from being unloaded.
127 */
128int metaslab_debug_unload = 0;
428870ff 129
9babb374
BB
130/*
131 * Minimum size which forces the dynamic allocator to change
428870ff 132 * it's allocation strategy. Once the space map cannot satisfy
9babb374
BB
133 * an allocation of this size then it switches to using more
134 * aggressive strategy (i.e search by size rather than offset).
135 */
4e21fd06 136uint64_t metaslab_df_alloc_threshold = SPA_OLD_MAXBLOCKSIZE;
9babb374
BB
137
138/*
139 * The minimum free space, in percent, which must be available
140 * in a space map to continue allocations in a first-fit fashion.
4e21fd06 141 * Once the space map's free space drops below this level we dynamically
9babb374
BB
142 * switch to using best-fit allocations.
143 */
428870ff
BB
144int metaslab_df_free_pct = 4;
145
428870ff 146/*
93cf2076 147 * Percentage of all cpus that can be used by the metaslab taskq.
428870ff 148 */
93cf2076 149int metaslab_load_pct = 50;
428870ff
BB
150
151/*
93cf2076
GW
152 * Determines how many txgs a metaslab may remain loaded without having any
153 * allocations from it. As long as a metaslab continues to be used we will
154 * keep it loaded.
428870ff 155 */
93cf2076 156int metaslab_unload_delay = TXG_SIZE * 2;
9babb374 157
93cf2076
GW
158/*
159 * Max number of metaslabs per group to preload.
160 */
161int metaslab_preload_limit = SPA_DVAS_PER_BP;
162
163/*
164 * Enable/disable preloading of metaslab.
165 */
f3a7f661 166int metaslab_preload_enabled = B_TRUE;
93cf2076
GW
167
168/*
f3a7f661 169 * Enable/disable fragmentation weighting on metaslabs.
93cf2076 170 */
f3a7f661 171int metaslab_fragmentation_factor_enabled = B_TRUE;
93cf2076 172
f3a7f661
GW
173/*
174 * Enable/disable lba weighting (i.e. outer tracks are given preference).
175 */
176int metaslab_lba_weighting_enabled = B_TRUE;
177
178/*
179 * Enable/disable metaslab group biasing.
180 */
181int metaslab_bias_enabled = B_TRUE;
182
4e21fd06 183
a1d477c2
MA
184/*
185 * Enable/disable remapping of indirect DVAs to their concrete vdevs.
186 */
187boolean_t zfs_remap_blkptr_enable = B_TRUE;
188
4e21fd06
DB
189/*
190 * Enable/disable segment-based metaslab selection.
191 */
192int zfs_metaslab_segment_weight_enabled = B_TRUE;
193
194/*
195 * When using segment-based metaslab selection, we will continue
196 * allocating from the active metaslab until we have exhausted
197 * zfs_metaslab_switch_threshold of its buckets.
198 */
199int zfs_metaslab_switch_threshold = 2;
200
201/*
202 * Internal switch to enable/disable the metaslab allocation tracing
203 * facility.
204 */
205#ifdef _METASLAB_TRACING
206boolean_t metaslab_trace_enabled = B_TRUE;
207#endif
208
209/*
210 * Maximum entries that the metaslab allocation tracing facility will keep
211 * in a given list when running in non-debug mode. We limit the number
212 * of entries in non-debug mode to prevent us from using up too much memory.
213 * The limit should be sufficiently large that we don't expect any allocation
214 * to every exceed this value. In debug mode, the system will panic if this
215 * limit is ever reached allowing for further investigation.
216 */
217#ifdef _METASLAB_TRACING
218uint64_t metaslab_trace_max_entries = 5000;
219#endif
220
221static uint64_t metaslab_weight(metaslab_t *);
222static void metaslab_set_fragmentation(metaslab_t *);
d2734cce 223static void metaslab_free_impl(vdev_t *, uint64_t, uint64_t, boolean_t);
a1d477c2 224static void metaslab_check_free_impl(vdev_t *, uint64_t, uint64_t);
4e21fd06
DB
225
226#ifdef _METASLAB_TRACING
227kmem_cache_t *metaslab_alloc_trace_cache;
228#endif
93cf2076 229
34dc7c2f
BB
230/*
231 * ==========================================================================
232 * Metaslab classes
233 * ==========================================================================
234 */
235metaslab_class_t *
93cf2076 236metaslab_class_create(spa_t *spa, metaslab_ops_t *ops)
34dc7c2f
BB
237{
238 metaslab_class_t *mc;
239
79c76d5b 240 mc = kmem_zalloc(sizeof (metaslab_class_t), KM_SLEEP);
34dc7c2f 241
428870ff 242 mc->mc_spa = spa;
34dc7c2f 243 mc->mc_rotor = NULL;
9babb374 244 mc->mc_ops = ops;
3dfb57a3
DB
245 mutex_init(&mc->mc_lock, NULL, MUTEX_DEFAULT, NULL);
246 refcount_create_tracked(&mc->mc_alloc_slots);
34dc7c2f
BB
247
248 return (mc);
249}
250
251void
252metaslab_class_destroy(metaslab_class_t *mc)
253{
428870ff
BB
254 ASSERT(mc->mc_rotor == NULL);
255 ASSERT(mc->mc_alloc == 0);
256 ASSERT(mc->mc_deferred == 0);
257 ASSERT(mc->mc_space == 0);
258 ASSERT(mc->mc_dspace == 0);
34dc7c2f 259
3dfb57a3
DB
260 refcount_destroy(&mc->mc_alloc_slots);
261 mutex_destroy(&mc->mc_lock);
34dc7c2f
BB
262 kmem_free(mc, sizeof (metaslab_class_t));
263}
264
428870ff
BB
265int
266metaslab_class_validate(metaslab_class_t *mc)
34dc7c2f 267{
428870ff
BB
268 metaslab_group_t *mg;
269 vdev_t *vd;
34dc7c2f 270
428870ff
BB
271 /*
272 * Must hold one of the spa_config locks.
273 */
274 ASSERT(spa_config_held(mc->mc_spa, SCL_ALL, RW_READER) ||
275 spa_config_held(mc->mc_spa, SCL_ALL, RW_WRITER));
34dc7c2f 276
428870ff
BB
277 if ((mg = mc->mc_rotor) == NULL)
278 return (0);
279
280 do {
281 vd = mg->mg_vd;
282 ASSERT(vd->vdev_mg != NULL);
283 ASSERT3P(vd->vdev_top, ==, vd);
284 ASSERT3P(mg->mg_class, ==, mc);
285 ASSERT3P(vd->vdev_ops, !=, &vdev_hole_ops);
286 } while ((mg = mg->mg_next) != mc->mc_rotor);
287
288 return (0);
34dc7c2f
BB
289}
290
291void
428870ff
BB
292metaslab_class_space_update(metaslab_class_t *mc, int64_t alloc_delta,
293 int64_t defer_delta, int64_t space_delta, int64_t dspace_delta)
34dc7c2f 294{
428870ff
BB
295 atomic_add_64(&mc->mc_alloc, alloc_delta);
296 atomic_add_64(&mc->mc_deferred, defer_delta);
297 atomic_add_64(&mc->mc_space, space_delta);
298 atomic_add_64(&mc->mc_dspace, dspace_delta);
299}
34dc7c2f 300
428870ff
BB
301uint64_t
302metaslab_class_get_alloc(metaslab_class_t *mc)
303{
304 return (mc->mc_alloc);
305}
34dc7c2f 306
428870ff
BB
307uint64_t
308metaslab_class_get_deferred(metaslab_class_t *mc)
309{
310 return (mc->mc_deferred);
311}
34dc7c2f 312
428870ff
BB
313uint64_t
314metaslab_class_get_space(metaslab_class_t *mc)
315{
316 return (mc->mc_space);
317}
34dc7c2f 318
428870ff
BB
319uint64_t
320metaslab_class_get_dspace(metaslab_class_t *mc)
321{
322 return (spa_deflate(mc->mc_spa) ? mc->mc_dspace : mc->mc_space);
34dc7c2f
BB
323}
324
f3a7f661
GW
325void
326metaslab_class_histogram_verify(metaslab_class_t *mc)
327{
328 vdev_t *rvd = mc->mc_spa->spa_root_vdev;
329 uint64_t *mc_hist;
1c27024e 330 int i;
f3a7f661
GW
331
332 if ((zfs_flags & ZFS_DEBUG_HISTOGRAM_VERIFY) == 0)
333 return;
334
335 mc_hist = kmem_zalloc(sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE,
79c76d5b 336 KM_SLEEP);
f3a7f661 337
1c27024e 338 for (int c = 0; c < rvd->vdev_children; c++) {
f3a7f661
GW
339 vdev_t *tvd = rvd->vdev_child[c];
340 metaslab_group_t *mg = tvd->vdev_mg;
341
342 /*
343 * Skip any holes, uninitialized top-levels, or
344 * vdevs that are not in this metalab class.
345 */
a1d477c2 346 if (!vdev_is_concrete(tvd) || tvd->vdev_ms_shift == 0 ||
f3a7f661
GW
347 mg->mg_class != mc) {
348 continue;
349 }
350
351 for (i = 0; i < RANGE_TREE_HISTOGRAM_SIZE; i++)
352 mc_hist[i] += mg->mg_histogram[i];
353 }
354
355 for (i = 0; i < RANGE_TREE_HISTOGRAM_SIZE; i++)
356 VERIFY3U(mc_hist[i], ==, mc->mc_histogram[i]);
357
358 kmem_free(mc_hist, sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE);
359}
360
361/*
362 * Calculate the metaslab class's fragmentation metric. The metric
363 * is weighted based on the space contribution of each metaslab group.
364 * The return value will be a number between 0 and 100 (inclusive), or
365 * ZFS_FRAG_INVALID if the metric has not been set. See comment above the
366 * zfs_frag_table for more information about the metric.
367 */
368uint64_t
369metaslab_class_fragmentation(metaslab_class_t *mc)
370{
371 vdev_t *rvd = mc->mc_spa->spa_root_vdev;
372 uint64_t fragmentation = 0;
f3a7f661
GW
373
374 spa_config_enter(mc->mc_spa, SCL_VDEV, FTAG, RW_READER);
375
1c27024e 376 for (int c = 0; c < rvd->vdev_children; c++) {
f3a7f661
GW
377 vdev_t *tvd = rvd->vdev_child[c];
378 metaslab_group_t *mg = tvd->vdev_mg;
379
380 /*
a1d477c2
MA
381 * Skip any holes, uninitialized top-levels,
382 * or vdevs that are not in this metalab class.
f3a7f661 383 */
a1d477c2 384 if (!vdev_is_concrete(tvd) || tvd->vdev_ms_shift == 0 ||
f3a7f661
GW
385 mg->mg_class != mc) {
386 continue;
387 }
388
389 /*
390 * If a metaslab group does not contain a fragmentation
391 * metric then just bail out.
392 */
393 if (mg->mg_fragmentation == ZFS_FRAG_INVALID) {
394 spa_config_exit(mc->mc_spa, SCL_VDEV, FTAG);
395 return (ZFS_FRAG_INVALID);
396 }
397
398 /*
399 * Determine how much this metaslab_group is contributing
400 * to the overall pool fragmentation metric.
401 */
402 fragmentation += mg->mg_fragmentation *
403 metaslab_group_get_space(mg);
404 }
405 fragmentation /= metaslab_class_get_space(mc);
406
407 ASSERT3U(fragmentation, <=, 100);
408 spa_config_exit(mc->mc_spa, SCL_VDEV, FTAG);
409 return (fragmentation);
410}
411
412/*
413 * Calculate the amount of expandable space that is available in
414 * this metaslab class. If a device is expanded then its expandable
415 * space will be the amount of allocatable space that is currently not
416 * part of this metaslab class.
417 */
418uint64_t
419metaslab_class_expandable_space(metaslab_class_t *mc)
420{
421 vdev_t *rvd = mc->mc_spa->spa_root_vdev;
422 uint64_t space = 0;
f3a7f661
GW
423
424 spa_config_enter(mc->mc_spa, SCL_VDEV, FTAG, RW_READER);
1c27024e 425 for (int c = 0; c < rvd->vdev_children; c++) {
f3a7f661
GW
426 vdev_t *tvd = rvd->vdev_child[c];
427 metaslab_group_t *mg = tvd->vdev_mg;
428
a1d477c2 429 if (!vdev_is_concrete(tvd) || tvd->vdev_ms_shift == 0 ||
f3a7f661
GW
430 mg->mg_class != mc) {
431 continue;
432 }
433
0f676dc2
GM
434 /*
435 * Calculate if we have enough space to add additional
436 * metaslabs. We report the expandable space in terms
437 * of the metaslab size since that's the unit of expansion.
438 */
439 space += P2ALIGN(tvd->vdev_max_asize - tvd->vdev_asize,
440 1ULL << tvd->vdev_ms_shift);
f3a7f661
GW
441 }
442 spa_config_exit(mc->mc_spa, SCL_VDEV, FTAG);
443 return (space);
444}
445
34dc7c2f
BB
446static int
447metaslab_compare(const void *x1, const void *x2)
448{
ee36c709
GN
449 const metaslab_t *m1 = (const metaslab_t *)x1;
450 const metaslab_t *m2 = (const metaslab_t *)x2;
34dc7c2f 451
ee36c709
GN
452 int cmp = AVL_CMP(m2->ms_weight, m1->ms_weight);
453 if (likely(cmp))
454 return (cmp);
34dc7c2f 455
ee36c709 456 IMPLY(AVL_CMP(m1->ms_start, m2->ms_start) == 0, m1 == m2);
34dc7c2f 457
ee36c709 458 return (AVL_CMP(m1->ms_start, m2->ms_start));
34dc7c2f
BB
459}
460
4e21fd06
DB
461/*
462 * Verify that the space accounting on disk matches the in-core range_trees.
463 */
464void
465metaslab_verify_space(metaslab_t *msp, uint64_t txg)
466{
467 spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
468 uint64_t allocated = 0;
4e21fd06 469 uint64_t sm_free_space, msp_free_space;
4e21fd06
DB
470
471 ASSERT(MUTEX_HELD(&msp->ms_lock));
472
473 if ((zfs_flags & ZFS_DEBUG_METASLAB_VERIFY) == 0)
474 return;
475
476 /*
477 * We can only verify the metaslab space when we're called
478 * from syncing context with a loaded metaslab that has an allocated
479 * space map. Calling this in non-syncing context does not
480 * provide a consistent view of the metaslab since we're performing
481 * allocations in the future.
482 */
483 if (txg != spa_syncing_txg(spa) || msp->ms_sm == NULL ||
484 !msp->ms_loaded)
485 return;
486
487 sm_free_space = msp->ms_size - space_map_allocated(msp->ms_sm) -
488 space_map_alloc_delta(msp->ms_sm);
489
490 /*
491 * Account for future allocations since we would have already
492 * deducted that space from the ms_freetree.
493 */
1c27024e 494 for (int t = 0; t < TXG_CONCURRENT_STATES; t++) {
4e21fd06 495 allocated +=
d2734cce 496 range_tree_space(msp->ms_allocating[(txg + t) & TXG_MASK]);
4e21fd06 497 }
4e21fd06 498
d2734cce
SD
499 msp_free_space = range_tree_space(msp->ms_allocatable) + allocated +
500 msp->ms_deferspace + range_tree_space(msp->ms_freed);
4e21fd06
DB
501
502 VERIFY3U(sm_free_space, ==, msp_free_space);
503}
504
505/*
506 * ==========================================================================
507 * Metaslab groups
508 * ==========================================================================
509 */
ac72fac3
GW
510/*
511 * Update the allocatable flag and the metaslab group's capacity.
512 * The allocatable flag is set to true if the capacity is below
3dfb57a3
DB
513 * the zfs_mg_noalloc_threshold or has a fragmentation value that is
514 * greater than zfs_mg_fragmentation_threshold. If a metaslab group
515 * transitions from allocatable to non-allocatable or vice versa then the
516 * metaslab group's class is updated to reflect the transition.
ac72fac3
GW
517 */
518static void
519metaslab_group_alloc_update(metaslab_group_t *mg)
520{
521 vdev_t *vd = mg->mg_vd;
522 metaslab_class_t *mc = mg->mg_class;
523 vdev_stat_t *vs = &vd->vdev_stat;
524 boolean_t was_allocatable;
3dfb57a3 525 boolean_t was_initialized;
ac72fac3
GW
526
527 ASSERT(vd == vd->vdev_top);
a1d477c2
MA
528 ASSERT3U(spa_config_held(mc->mc_spa, SCL_ALLOC, RW_READER), ==,
529 SCL_ALLOC);
ac72fac3
GW
530
531 mutex_enter(&mg->mg_lock);
532 was_allocatable = mg->mg_allocatable;
3dfb57a3 533 was_initialized = mg->mg_initialized;
ac72fac3
GW
534
535 mg->mg_free_capacity = ((vs->vs_space - vs->vs_alloc) * 100) /
536 (vs->vs_space + 1);
537
3dfb57a3
DB
538 mutex_enter(&mc->mc_lock);
539
540 /*
541 * If the metaslab group was just added then it won't
542 * have any space until we finish syncing out this txg.
543 * At that point we will consider it initialized and available
544 * for allocations. We also don't consider non-activated
545 * metaslab groups (e.g. vdevs that are in the middle of being removed)
546 * to be initialized, because they can't be used for allocation.
547 */
548 mg->mg_initialized = metaslab_group_initialized(mg);
549 if (!was_initialized && mg->mg_initialized) {
550 mc->mc_groups++;
551 } else if (was_initialized && !mg->mg_initialized) {
552 ASSERT3U(mc->mc_groups, >, 0);
553 mc->mc_groups--;
554 }
555 if (mg->mg_initialized)
556 mg->mg_no_free_space = B_FALSE;
557
f3a7f661
GW
558 /*
559 * A metaslab group is considered allocatable if it has plenty
560 * of free space or is not heavily fragmented. We only take
561 * fragmentation into account if the metaslab group has a valid
562 * fragmentation metric (i.e. a value between 0 and 100).
563 */
3dfb57a3
DB
564 mg->mg_allocatable = (mg->mg_activation_count > 0 &&
565 mg->mg_free_capacity > zfs_mg_noalloc_threshold &&
f3a7f661
GW
566 (mg->mg_fragmentation == ZFS_FRAG_INVALID ||
567 mg->mg_fragmentation <= zfs_mg_fragmentation_threshold));
ac72fac3
GW
568
569 /*
570 * The mc_alloc_groups maintains a count of the number of
571 * groups in this metaslab class that are still above the
572 * zfs_mg_noalloc_threshold. This is used by the allocating
573 * threads to determine if they should avoid allocations to
574 * a given group. The allocator will avoid allocations to a group
575 * if that group has reached or is below the zfs_mg_noalloc_threshold
576 * and there are still other groups that are above the threshold.
577 * When a group transitions from allocatable to non-allocatable or
578 * vice versa we update the metaslab class to reflect that change.
579 * When the mc_alloc_groups value drops to 0 that means that all
580 * groups have reached the zfs_mg_noalloc_threshold making all groups
581 * eligible for allocations. This effectively means that all devices
582 * are balanced again.
583 */
584 if (was_allocatable && !mg->mg_allocatable)
585 mc->mc_alloc_groups--;
586 else if (!was_allocatable && mg->mg_allocatable)
587 mc->mc_alloc_groups++;
3dfb57a3 588 mutex_exit(&mc->mc_lock);
f3a7f661 589
ac72fac3
GW
590 mutex_exit(&mg->mg_lock);
591}
592
34dc7c2f
BB
593metaslab_group_t *
594metaslab_group_create(metaslab_class_t *mc, vdev_t *vd)
595{
596 metaslab_group_t *mg;
597
79c76d5b 598 mg = kmem_zalloc(sizeof (metaslab_group_t), KM_SLEEP);
34dc7c2f
BB
599 mutex_init(&mg->mg_lock, NULL, MUTEX_DEFAULT, NULL);
600 avl_create(&mg->mg_metaslab_tree, metaslab_compare,
601 sizeof (metaslab_t), offsetof(struct metaslab, ms_group_node));
34dc7c2f 602 mg->mg_vd = vd;
428870ff
BB
603 mg->mg_class = mc;
604 mg->mg_activation_count = 0;
3dfb57a3
DB
605 mg->mg_initialized = B_FALSE;
606 mg->mg_no_free_space = B_TRUE;
607 refcount_create_tracked(&mg->mg_alloc_queue_depth);
34dc7c2f 608
3c51c5cb 609 mg->mg_taskq = taskq_create("metaslab_group_taskq", metaslab_load_pct,
1229323d 610 maxclsyspri, 10, INT_MAX, TASKQ_THREADS_CPU_PCT | TASKQ_DYNAMIC);
93cf2076 611
34dc7c2f
BB
612 return (mg);
613}
614
615void
616metaslab_group_destroy(metaslab_group_t *mg)
617{
428870ff
BB
618 ASSERT(mg->mg_prev == NULL);
619 ASSERT(mg->mg_next == NULL);
620 /*
621 * We may have gone below zero with the activation count
622 * either because we never activated in the first place or
623 * because we're done, and possibly removing the vdev.
624 */
625 ASSERT(mg->mg_activation_count <= 0);
626
3c51c5cb 627 taskq_destroy(mg->mg_taskq);
34dc7c2f
BB
628 avl_destroy(&mg->mg_metaslab_tree);
629 mutex_destroy(&mg->mg_lock);
3dfb57a3 630 refcount_destroy(&mg->mg_alloc_queue_depth);
34dc7c2f
BB
631 kmem_free(mg, sizeof (metaslab_group_t));
632}
633
428870ff
BB
634void
635metaslab_group_activate(metaslab_group_t *mg)
636{
637 metaslab_class_t *mc = mg->mg_class;
638 metaslab_group_t *mgprev, *mgnext;
639
a1d477c2 640 ASSERT3U(spa_config_held(mc->mc_spa, SCL_ALLOC, RW_WRITER), !=, 0);
428870ff
BB
641
642 ASSERT(mc->mc_rotor != mg);
643 ASSERT(mg->mg_prev == NULL);
644 ASSERT(mg->mg_next == NULL);
645 ASSERT(mg->mg_activation_count <= 0);
646
647 if (++mg->mg_activation_count <= 0)
648 return;
649
650 mg->mg_aliquot = metaslab_aliquot * MAX(1, mg->mg_vd->vdev_children);
ac72fac3 651 metaslab_group_alloc_update(mg);
428870ff
BB
652
653 if ((mgprev = mc->mc_rotor) == NULL) {
654 mg->mg_prev = mg;
655 mg->mg_next = mg;
656 } else {
657 mgnext = mgprev->mg_next;
658 mg->mg_prev = mgprev;
659 mg->mg_next = mgnext;
660 mgprev->mg_next = mg;
661 mgnext->mg_prev = mg;
662 }
663 mc->mc_rotor = mg;
664}
665
a1d477c2
MA
666/*
667 * Passivate a metaslab group and remove it from the allocation rotor.
668 * Callers must hold both the SCL_ALLOC and SCL_ZIO lock prior to passivating
669 * a metaslab group. This function will momentarily drop spa_config_locks
670 * that are lower than the SCL_ALLOC lock (see comment below).
671 */
428870ff
BB
672void
673metaslab_group_passivate(metaslab_group_t *mg)
674{
675 metaslab_class_t *mc = mg->mg_class;
a1d477c2 676 spa_t *spa = mc->mc_spa;
428870ff 677 metaslab_group_t *mgprev, *mgnext;
a1d477c2 678 int locks = spa_config_held(spa, SCL_ALL, RW_WRITER);
428870ff 679
a1d477c2
MA
680 ASSERT3U(spa_config_held(spa, SCL_ALLOC | SCL_ZIO, RW_WRITER), ==,
681 (SCL_ALLOC | SCL_ZIO));
428870ff
BB
682
683 if (--mg->mg_activation_count != 0) {
684 ASSERT(mc->mc_rotor != mg);
685 ASSERT(mg->mg_prev == NULL);
686 ASSERT(mg->mg_next == NULL);
687 ASSERT(mg->mg_activation_count < 0);
688 return;
689 }
690
a1d477c2
MA
691 /*
692 * The spa_config_lock is an array of rwlocks, ordered as
693 * follows (from highest to lowest):
694 * SCL_CONFIG > SCL_STATE > SCL_L2ARC > SCL_ALLOC >
695 * SCL_ZIO > SCL_FREE > SCL_VDEV
696 * (For more information about the spa_config_lock see spa_misc.c)
697 * The higher the lock, the broader its coverage. When we passivate
698 * a metaslab group, we must hold both the SCL_ALLOC and the SCL_ZIO
699 * config locks. However, the metaslab group's taskq might be trying
700 * to preload metaslabs so we must drop the SCL_ZIO lock and any
701 * lower locks to allow the I/O to complete. At a minimum,
702 * we continue to hold the SCL_ALLOC lock, which prevents any future
703 * allocations from taking place and any changes to the vdev tree.
704 */
705 spa_config_exit(spa, locks & ~(SCL_ZIO - 1), spa);
c5528b9b 706 taskq_wait_outstanding(mg->mg_taskq, 0);
a1d477c2 707 spa_config_enter(spa, locks & ~(SCL_ZIO - 1), spa, RW_WRITER);
f3a7f661 708 metaslab_group_alloc_update(mg);
93cf2076 709
428870ff
BB
710 mgprev = mg->mg_prev;
711 mgnext = mg->mg_next;
712
713 if (mg == mgnext) {
714 mc->mc_rotor = NULL;
715 } else {
716 mc->mc_rotor = mgnext;
717 mgprev->mg_next = mgnext;
718 mgnext->mg_prev = mgprev;
719 }
720
721 mg->mg_prev = NULL;
722 mg->mg_next = NULL;
723}
724
3dfb57a3
DB
725boolean_t
726metaslab_group_initialized(metaslab_group_t *mg)
727{
728 vdev_t *vd = mg->mg_vd;
729 vdev_stat_t *vs = &vd->vdev_stat;
730
731 return (vs->vs_space != 0 && mg->mg_activation_count > 0);
732}
733
f3a7f661
GW
734uint64_t
735metaslab_group_get_space(metaslab_group_t *mg)
736{
737 return ((1ULL << mg->mg_vd->vdev_ms_shift) * mg->mg_vd->vdev_ms_count);
738}
739
740void
741metaslab_group_histogram_verify(metaslab_group_t *mg)
742{
743 uint64_t *mg_hist;
744 vdev_t *vd = mg->mg_vd;
745 uint64_t ashift = vd->vdev_ashift;
1c27024e 746 int i;
f3a7f661
GW
747
748 if ((zfs_flags & ZFS_DEBUG_HISTOGRAM_VERIFY) == 0)
749 return;
750
751 mg_hist = kmem_zalloc(sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE,
79c76d5b 752 KM_SLEEP);
f3a7f661
GW
753
754 ASSERT3U(RANGE_TREE_HISTOGRAM_SIZE, >=,
755 SPACE_MAP_HISTOGRAM_SIZE + ashift);
756
1c27024e 757 for (int m = 0; m < vd->vdev_ms_count; m++) {
f3a7f661
GW
758 metaslab_t *msp = vd->vdev_ms[m];
759
760 if (msp->ms_sm == NULL)
761 continue;
762
763 for (i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++)
764 mg_hist[i + ashift] +=
765 msp->ms_sm->sm_phys->smp_histogram[i];
766 }
767
768 for (i = 0; i < RANGE_TREE_HISTOGRAM_SIZE; i ++)
769 VERIFY3U(mg_hist[i], ==, mg->mg_histogram[i]);
770
771 kmem_free(mg_hist, sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE);
772}
773
34dc7c2f 774static void
f3a7f661 775metaslab_group_histogram_add(metaslab_group_t *mg, metaslab_t *msp)
34dc7c2f 776{
f3a7f661
GW
777 metaslab_class_t *mc = mg->mg_class;
778 uint64_t ashift = mg->mg_vd->vdev_ashift;
f3a7f661
GW
779
780 ASSERT(MUTEX_HELD(&msp->ms_lock));
781 if (msp->ms_sm == NULL)
782 return;
783
34dc7c2f 784 mutex_enter(&mg->mg_lock);
1c27024e 785 for (int i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) {
f3a7f661
GW
786 mg->mg_histogram[i + ashift] +=
787 msp->ms_sm->sm_phys->smp_histogram[i];
788 mc->mc_histogram[i + ashift] +=
789 msp->ms_sm->sm_phys->smp_histogram[i];
790 }
791 mutex_exit(&mg->mg_lock);
792}
793
794void
795metaslab_group_histogram_remove(metaslab_group_t *mg, metaslab_t *msp)
796{
797 metaslab_class_t *mc = mg->mg_class;
798 uint64_t ashift = mg->mg_vd->vdev_ashift;
f3a7f661
GW
799
800 ASSERT(MUTEX_HELD(&msp->ms_lock));
801 if (msp->ms_sm == NULL)
802 return;
803
804 mutex_enter(&mg->mg_lock);
1c27024e 805 for (int i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) {
f3a7f661
GW
806 ASSERT3U(mg->mg_histogram[i + ashift], >=,
807 msp->ms_sm->sm_phys->smp_histogram[i]);
808 ASSERT3U(mc->mc_histogram[i + ashift], >=,
809 msp->ms_sm->sm_phys->smp_histogram[i]);
810
811 mg->mg_histogram[i + ashift] -=
812 msp->ms_sm->sm_phys->smp_histogram[i];
813 mc->mc_histogram[i + ashift] -=
814 msp->ms_sm->sm_phys->smp_histogram[i];
815 }
816 mutex_exit(&mg->mg_lock);
817}
818
819static void
820metaslab_group_add(metaslab_group_t *mg, metaslab_t *msp)
821{
34dc7c2f 822 ASSERT(msp->ms_group == NULL);
f3a7f661 823 mutex_enter(&mg->mg_lock);
34dc7c2f
BB
824 msp->ms_group = mg;
825 msp->ms_weight = 0;
826 avl_add(&mg->mg_metaslab_tree, msp);
827 mutex_exit(&mg->mg_lock);
f3a7f661
GW
828
829 mutex_enter(&msp->ms_lock);
830 metaslab_group_histogram_add(mg, msp);
831 mutex_exit(&msp->ms_lock);
34dc7c2f
BB
832}
833
834static void
835metaslab_group_remove(metaslab_group_t *mg, metaslab_t *msp)
836{
f3a7f661
GW
837 mutex_enter(&msp->ms_lock);
838 metaslab_group_histogram_remove(mg, msp);
839 mutex_exit(&msp->ms_lock);
840
34dc7c2f
BB
841 mutex_enter(&mg->mg_lock);
842 ASSERT(msp->ms_group == mg);
843 avl_remove(&mg->mg_metaslab_tree, msp);
844 msp->ms_group = NULL;
845 mutex_exit(&mg->mg_lock);
846}
847
848static void
849metaslab_group_sort(metaslab_group_t *mg, metaslab_t *msp, uint64_t weight)
850{
851 /*
852 * Although in principle the weight can be any value, in
f3a7f661 853 * practice we do not use values in the range [1, 511].
34dc7c2f 854 */
f3a7f661 855 ASSERT(weight >= SPA_MINBLOCKSIZE || weight == 0);
34dc7c2f
BB
856 ASSERT(MUTEX_HELD(&msp->ms_lock));
857
858 mutex_enter(&mg->mg_lock);
859 ASSERT(msp->ms_group == mg);
860 avl_remove(&mg->mg_metaslab_tree, msp);
861 msp->ms_weight = weight;
862 avl_add(&mg->mg_metaslab_tree, msp);
863 mutex_exit(&mg->mg_lock);
864}
865
f3a7f661
GW
866/*
867 * Calculate the fragmentation for a given metaslab group. We can use
868 * a simple average here since all metaslabs within the group must have
869 * the same size. The return value will be a value between 0 and 100
870 * (inclusive), or ZFS_FRAG_INVALID if less than half of the metaslab in this
871 * group have a fragmentation metric.
872 */
873uint64_t
874metaslab_group_fragmentation(metaslab_group_t *mg)
875{
876 vdev_t *vd = mg->mg_vd;
877 uint64_t fragmentation = 0;
878 uint64_t valid_ms = 0;
f3a7f661 879
1c27024e 880 for (int m = 0; m < vd->vdev_ms_count; m++) {
f3a7f661
GW
881 metaslab_t *msp = vd->vdev_ms[m];
882
883 if (msp->ms_fragmentation == ZFS_FRAG_INVALID)
884 continue;
885
886 valid_ms++;
887 fragmentation += msp->ms_fragmentation;
888 }
889
890 if (valid_ms <= vd->vdev_ms_count / 2)
891 return (ZFS_FRAG_INVALID);
892
893 fragmentation /= valid_ms;
894 ASSERT3U(fragmentation, <=, 100);
895 return (fragmentation);
896}
897
ac72fac3
GW
898/*
899 * Determine if a given metaslab group should skip allocations. A metaslab
f3a7f661
GW
900 * group should avoid allocations if its free capacity is less than the
901 * zfs_mg_noalloc_threshold or its fragmentation metric is greater than
902 * zfs_mg_fragmentation_threshold and there is at least one metaslab group
3dfb57a3
DB
903 * that can still handle allocations. If the allocation throttle is enabled
904 * then we skip allocations to devices that have reached their maximum
905 * allocation queue depth unless the selected metaslab group is the only
906 * eligible group remaining.
ac72fac3
GW
907 */
908static boolean_t
3dfb57a3
DB
909metaslab_group_allocatable(metaslab_group_t *mg, metaslab_group_t *rotor,
910 uint64_t psize)
ac72fac3 911{
3dfb57a3 912 spa_t *spa = mg->mg_vd->vdev_spa;
ac72fac3
GW
913 metaslab_class_t *mc = mg->mg_class;
914
915 /*
3dfb57a3
DB
916 * We can only consider skipping this metaslab group if it's
917 * in the normal metaslab class and there are other metaslab
918 * groups to select from. Otherwise, we always consider it eligible
f3a7f661 919 * for allocations.
ac72fac3 920 */
3dfb57a3
DB
921 if (mc != spa_normal_class(spa) || mc->mc_groups <= 1)
922 return (B_TRUE);
923
924 /*
925 * If the metaslab group's mg_allocatable flag is set (see comments
926 * in metaslab_group_alloc_update() for more information) and
927 * the allocation throttle is disabled then allow allocations to this
928 * device. However, if the allocation throttle is enabled then
929 * check if we have reached our allocation limit (mg_alloc_queue_depth)
930 * to determine if we should allow allocations to this metaslab group.
931 * If all metaslab groups are no longer considered allocatable
932 * (mc_alloc_groups == 0) or we're trying to allocate the smallest
933 * gang block size then we allow allocations on this metaslab group
934 * regardless of the mg_allocatable or throttle settings.
935 */
936 if (mg->mg_allocatable) {
937 metaslab_group_t *mgp;
938 int64_t qdepth;
939 uint64_t qmax = mg->mg_max_alloc_queue_depth;
940
941 if (!mc->mc_alloc_throttle_enabled)
942 return (B_TRUE);
943
944 /*
945 * If this metaslab group does not have any free space, then
946 * there is no point in looking further.
947 */
948 if (mg->mg_no_free_space)
949 return (B_FALSE);
950
951 qdepth = refcount_count(&mg->mg_alloc_queue_depth);
952
953 /*
954 * If this metaslab group is below its qmax or it's
955 * the only allocatable metasable group, then attempt
956 * to allocate from it.
957 */
958 if (qdepth < qmax || mc->mc_alloc_groups == 1)
959 return (B_TRUE);
960 ASSERT3U(mc->mc_alloc_groups, >, 1);
961
962 /*
963 * Since this metaslab group is at or over its qmax, we
964 * need to determine if there are metaslab groups after this
965 * one that might be able to handle this allocation. This is
966 * racy since we can't hold the locks for all metaslab
967 * groups at the same time when we make this check.
968 */
969 for (mgp = mg->mg_next; mgp != rotor; mgp = mgp->mg_next) {
970 qmax = mgp->mg_max_alloc_queue_depth;
971
972 qdepth = refcount_count(&mgp->mg_alloc_queue_depth);
973
974 /*
975 * If there is another metaslab group that
976 * might be able to handle the allocation, then
977 * we return false so that we skip this group.
978 */
979 if (qdepth < qmax && !mgp->mg_no_free_space)
980 return (B_FALSE);
981 }
982
983 /*
984 * We didn't find another group to handle the allocation
985 * so we can't skip this metaslab group even though
986 * we are at or over our qmax.
987 */
988 return (B_TRUE);
989
990 } else if (mc->mc_alloc_groups == 0 || psize == SPA_MINBLOCKSIZE) {
991 return (B_TRUE);
992 }
993 return (B_FALSE);
ac72fac3
GW
994}
995
428870ff
BB
996/*
997 * ==========================================================================
93cf2076 998 * Range tree callbacks
428870ff
BB
999 * ==========================================================================
1000 */
93cf2076
GW
1001
1002/*
1003 * Comparison function for the private size-ordered tree. Tree is sorted
1004 * by size, larger sizes at the end of the tree.
1005 */
428870ff 1006static int
93cf2076 1007metaslab_rangesize_compare(const void *x1, const void *x2)
428870ff 1008{
93cf2076
GW
1009 const range_seg_t *r1 = x1;
1010 const range_seg_t *r2 = x2;
1011 uint64_t rs_size1 = r1->rs_end - r1->rs_start;
1012 uint64_t rs_size2 = r2->rs_end - r2->rs_start;
428870ff 1013
ee36c709
GN
1014 int cmp = AVL_CMP(rs_size1, rs_size2);
1015 if (likely(cmp))
1016 return (cmp);
428870ff 1017
ee36c709 1018 return (AVL_CMP(r1->rs_start, r2->rs_start));
428870ff
BB
1019}
1020
93cf2076
GW
1021/*
1022 * ==========================================================================
4e21fd06 1023 * Common allocator routines
93cf2076
GW
1024 * ==========================================================================
1025 */
1026
9babb374 1027/*
428870ff 1028 * Return the maximum contiguous segment within the metaslab.
9babb374 1029 */
9babb374 1030uint64_t
93cf2076 1031metaslab_block_maxsize(metaslab_t *msp)
9babb374 1032{
d2734cce 1033 avl_tree_t *t = &msp->ms_allocatable_by_size;
93cf2076 1034 range_seg_t *rs;
9babb374 1035
93cf2076 1036 if (t == NULL || (rs = avl_last(t)) == NULL)
9babb374
BB
1037 return (0ULL);
1038
93cf2076
GW
1039 return (rs->rs_end - rs->rs_start);
1040}
1041
4e21fd06
DB
1042static range_seg_t *
1043metaslab_block_find(avl_tree_t *t, uint64_t start, uint64_t size)
93cf2076 1044{
4e21fd06
DB
1045 range_seg_t *rs, rsearch;
1046 avl_index_t where;
93cf2076 1047
4e21fd06
DB
1048 rsearch.rs_start = start;
1049 rsearch.rs_end = start + size;
93cf2076 1050
4e21fd06
DB
1051 rs = avl_find(t, &rsearch, &where);
1052 if (rs == NULL) {
1053 rs = avl_nearest(t, where, AVL_AFTER);
93cf2076 1054 }
93cf2076 1055
4e21fd06
DB
1056 return (rs);
1057}
93cf2076
GW
1058
1059#if defined(WITH_FF_BLOCK_ALLOCATOR) || \
1060 defined(WITH_DF_BLOCK_ALLOCATOR) || \
1061 defined(WITH_CF_BLOCK_ALLOCATOR)
1062/*
1063 * This is a helper function that can be used by the allocator to find
1064 * a suitable block to allocate. This will search the specified AVL
1065 * tree looking for a block that matches the specified criteria.
1066 */
1067static uint64_t
1068metaslab_block_picker(avl_tree_t *t, uint64_t *cursor, uint64_t size,
1069 uint64_t align)
1070{
4e21fd06 1071 range_seg_t *rs = metaslab_block_find(t, *cursor, size);
93cf2076
GW
1072
1073 while (rs != NULL) {
1074 uint64_t offset = P2ROUNDUP(rs->rs_start, align);
1075
1076 if (offset + size <= rs->rs_end) {
1077 *cursor = offset + size;
1078 return (offset);
1079 }
1080 rs = AVL_NEXT(t, rs);
1081 }
1082
1083 /*
1084 * If we know we've searched the whole map (*cursor == 0), give up.
1085 * Otherwise, reset the cursor to the beginning and try again.
1086 */
1087 if (*cursor == 0)
1088 return (-1ULL);
1089
1090 *cursor = 0;
1091 return (metaslab_block_picker(t, cursor, size, align));
9babb374 1092}
93cf2076 1093#endif /* WITH_FF/DF/CF_BLOCK_ALLOCATOR */
9babb374 1094
22c81dd8 1095#if defined(WITH_FF_BLOCK_ALLOCATOR)
428870ff
BB
1096/*
1097 * ==========================================================================
1098 * The first-fit block allocator
1099 * ==========================================================================
1100 */
1101static uint64_t
93cf2076 1102metaslab_ff_alloc(metaslab_t *msp, uint64_t size)
9babb374 1103{
93cf2076
GW
1104 /*
1105 * Find the largest power of 2 block size that evenly divides the
1106 * requested size. This is used to try to allocate blocks with similar
1107 * alignment from the same area of the metaslab (i.e. same cursor
1108 * bucket) but it does not guarantee that other allocations sizes
1109 * may exist in the same region.
1110 */
428870ff 1111 uint64_t align = size & -size;
9bd274dd 1112 uint64_t *cursor = &msp->ms_lbas[highbit64(align) - 1];
d2734cce 1113 avl_tree_t *t = &msp->ms_allocatable->rt_root;
9babb374 1114
428870ff 1115 return (metaslab_block_picker(t, cursor, size, align));
9babb374
BB
1116}
1117
93cf2076 1118static metaslab_ops_t metaslab_ff_ops = {
f3a7f661 1119 metaslab_ff_alloc
428870ff 1120};
9babb374 1121
93cf2076 1122metaslab_ops_t *zfs_metaslab_ops = &metaslab_ff_ops;
22c81dd8
BB
1123#endif /* WITH_FF_BLOCK_ALLOCATOR */
1124
1125#if defined(WITH_DF_BLOCK_ALLOCATOR)
428870ff
BB
1126/*
1127 * ==========================================================================
1128 * Dynamic block allocator -
1129 * Uses the first fit allocation scheme until space get low and then
1130 * adjusts to a best fit allocation method. Uses metaslab_df_alloc_threshold
1131 * and metaslab_df_free_pct to determine when to switch the allocation scheme.
1132 * ==========================================================================
1133 */
9babb374 1134static uint64_t
93cf2076 1135metaslab_df_alloc(metaslab_t *msp, uint64_t size)
9babb374 1136{
93cf2076
GW
1137 /*
1138 * Find the largest power of 2 block size that evenly divides the
1139 * requested size. This is used to try to allocate blocks with similar
1140 * alignment from the same area of the metaslab (i.e. same cursor
1141 * bucket) but it does not guarantee that other allocations sizes
1142 * may exist in the same region.
1143 */
9babb374 1144 uint64_t align = size & -size;
9bd274dd 1145 uint64_t *cursor = &msp->ms_lbas[highbit64(align) - 1];
d2734cce 1146 range_tree_t *rt = msp->ms_allocatable;
93cf2076
GW
1147 avl_tree_t *t = &rt->rt_root;
1148 uint64_t max_size = metaslab_block_maxsize(msp);
1149 int free_pct = range_tree_space(rt) * 100 / msp->ms_size;
9babb374 1150
93cf2076 1151 ASSERT(MUTEX_HELD(&msp->ms_lock));
d2734cce
SD
1152 ASSERT3U(avl_numnodes(t), ==,
1153 avl_numnodes(&msp->ms_allocatable_by_size));
9babb374
BB
1154
1155 if (max_size < size)
1156 return (-1ULL);
1157
1158 /*
1159 * If we're running low on space switch to using the size
1160 * sorted AVL tree (best-fit).
1161 */
1162 if (max_size < metaslab_df_alloc_threshold ||
1163 free_pct < metaslab_df_free_pct) {
d2734cce 1164 t = &msp->ms_allocatable_by_size;
9babb374
BB
1165 *cursor = 0;
1166 }
1167
1168 return (metaslab_block_picker(t, cursor, size, 1ULL));
1169}
1170
93cf2076 1171static metaslab_ops_t metaslab_df_ops = {
f3a7f661 1172 metaslab_df_alloc
34dc7c2f
BB
1173};
1174
93cf2076 1175metaslab_ops_t *zfs_metaslab_ops = &metaslab_df_ops;
22c81dd8
BB
1176#endif /* WITH_DF_BLOCK_ALLOCATOR */
1177
93cf2076 1178#if defined(WITH_CF_BLOCK_ALLOCATOR)
428870ff
BB
1179/*
1180 * ==========================================================================
93cf2076
GW
1181 * Cursor fit block allocator -
1182 * Select the largest region in the metaslab, set the cursor to the beginning
1183 * of the range and the cursor_end to the end of the range. As allocations
1184 * are made advance the cursor. Continue allocating from the cursor until
1185 * the range is exhausted and then find a new range.
428870ff
BB
1186 * ==========================================================================
1187 */
1188static uint64_t
93cf2076 1189metaslab_cf_alloc(metaslab_t *msp, uint64_t size)
428870ff 1190{
d2734cce
SD
1191 range_tree_t *rt = msp->ms_allocatable;
1192 avl_tree_t *t = &msp->ms_allocatable_by_size;
93cf2076
GW
1193 uint64_t *cursor = &msp->ms_lbas[0];
1194 uint64_t *cursor_end = &msp->ms_lbas[1];
428870ff
BB
1195 uint64_t offset = 0;
1196
93cf2076
GW
1197 ASSERT(MUTEX_HELD(&msp->ms_lock));
1198 ASSERT3U(avl_numnodes(t), ==, avl_numnodes(&rt->rt_root));
428870ff 1199
93cf2076 1200 ASSERT3U(*cursor_end, >=, *cursor);
428870ff 1201
93cf2076
GW
1202 if ((*cursor + size) > *cursor_end) {
1203 range_seg_t *rs;
428870ff 1204
d2734cce 1205 rs = avl_last(&msp->ms_allocatable_by_size);
93cf2076
GW
1206 if (rs == NULL || (rs->rs_end - rs->rs_start) < size)
1207 return (-1ULL);
428870ff 1208
93cf2076
GW
1209 *cursor = rs->rs_start;
1210 *cursor_end = rs->rs_end;
428870ff 1211 }
93cf2076
GW
1212
1213 offset = *cursor;
1214 *cursor += size;
1215
428870ff
BB
1216 return (offset);
1217}
1218
93cf2076 1219static metaslab_ops_t metaslab_cf_ops = {
f3a7f661 1220 metaslab_cf_alloc
428870ff
BB
1221};
1222
93cf2076
GW
1223metaslab_ops_t *zfs_metaslab_ops = &metaslab_cf_ops;
1224#endif /* WITH_CF_BLOCK_ALLOCATOR */
22c81dd8
BB
1225
1226#if defined(WITH_NDF_BLOCK_ALLOCATOR)
93cf2076
GW
1227/*
1228 * ==========================================================================
1229 * New dynamic fit allocator -
1230 * Select a region that is large enough to allocate 2^metaslab_ndf_clump_shift
1231 * contiguous blocks. If no region is found then just use the largest segment
1232 * that remains.
1233 * ==========================================================================
1234 */
1235
1236/*
1237 * Determines desired number of contiguous blocks (2^metaslab_ndf_clump_shift)
1238 * to request from the allocator.
1239 */
428870ff
BB
1240uint64_t metaslab_ndf_clump_shift = 4;
1241
1242static uint64_t
93cf2076 1243metaslab_ndf_alloc(metaslab_t *msp, uint64_t size)
428870ff 1244{
d2734cce 1245 avl_tree_t *t = &msp->ms_allocatable->rt_root;
428870ff 1246 avl_index_t where;
93cf2076 1247 range_seg_t *rs, rsearch;
9bd274dd 1248 uint64_t hbit = highbit64(size);
93cf2076
GW
1249 uint64_t *cursor = &msp->ms_lbas[hbit - 1];
1250 uint64_t max_size = metaslab_block_maxsize(msp);
428870ff 1251
93cf2076 1252 ASSERT(MUTEX_HELD(&msp->ms_lock));
d2734cce
SD
1253 ASSERT3U(avl_numnodes(t), ==,
1254 avl_numnodes(&msp->ms_allocatable_by_size));
428870ff
BB
1255
1256 if (max_size < size)
1257 return (-1ULL);
1258
93cf2076
GW
1259 rsearch.rs_start = *cursor;
1260 rsearch.rs_end = *cursor + size;
428870ff 1261
93cf2076
GW
1262 rs = avl_find(t, &rsearch, &where);
1263 if (rs == NULL || (rs->rs_end - rs->rs_start) < size) {
d2734cce 1264 t = &msp->ms_allocatable_by_size;
428870ff 1265
93cf2076
GW
1266 rsearch.rs_start = 0;
1267 rsearch.rs_end = MIN(max_size,
428870ff 1268 1ULL << (hbit + metaslab_ndf_clump_shift));
93cf2076
GW
1269 rs = avl_find(t, &rsearch, &where);
1270 if (rs == NULL)
1271 rs = avl_nearest(t, where, AVL_AFTER);
1272 ASSERT(rs != NULL);
428870ff
BB
1273 }
1274
93cf2076
GW
1275 if ((rs->rs_end - rs->rs_start) >= size) {
1276 *cursor = rs->rs_start + size;
1277 return (rs->rs_start);
428870ff
BB
1278 }
1279 return (-1ULL);
1280}
1281
93cf2076 1282static metaslab_ops_t metaslab_ndf_ops = {
f3a7f661 1283 metaslab_ndf_alloc
428870ff
BB
1284};
1285
93cf2076 1286metaslab_ops_t *zfs_metaslab_ops = &metaslab_ndf_ops;
22c81dd8 1287#endif /* WITH_NDF_BLOCK_ALLOCATOR */
9babb374 1288
93cf2076 1289
34dc7c2f
BB
1290/*
1291 * ==========================================================================
1292 * Metaslabs
1293 * ==========================================================================
1294 */
93cf2076
GW
1295
1296/*
1297 * Wait for any in-progress metaslab loads to complete.
1298 */
1299void
1300metaslab_load_wait(metaslab_t *msp)
1301{
1302 ASSERT(MUTEX_HELD(&msp->ms_lock));
1303
1304 while (msp->ms_loading) {
1305 ASSERT(!msp->ms_loaded);
1306 cv_wait(&msp->ms_load_cv, &msp->ms_lock);
1307 }
1308}
1309
1310int
1311metaslab_load(metaslab_t *msp)
1312{
1313 int error = 0;
4e21fd06 1314 boolean_t success = B_FALSE;
93cf2076
GW
1315
1316 ASSERT(MUTEX_HELD(&msp->ms_lock));
1317 ASSERT(!msp->ms_loaded);
1318 ASSERT(!msp->ms_loading);
1319
1320 msp->ms_loading = B_TRUE;
a1d477c2
MA
1321 /*
1322 * Nobody else can manipulate a loading metaslab, so it's now safe
1323 * to drop the lock. This way we don't have to hold the lock while
1324 * reading the spacemap from disk.
1325 */
1326 mutex_exit(&msp->ms_lock);
93cf2076
GW
1327
1328 /*
1329 * If the space map has not been allocated yet, then treat
d2734cce 1330 * all the space in the metaslab as free and add it to ms_allocatable.
93cf2076 1331 */
d2734cce
SD
1332 if (msp->ms_sm != NULL) {
1333 error = space_map_load(msp->ms_sm, msp->ms_allocatable,
1334 SM_FREE);
1335 } else {
1336 range_tree_add(msp->ms_allocatable,
1337 msp->ms_start, msp->ms_size);
1338 }
93cf2076 1339
4e21fd06 1340 success = (error == 0);
a1d477c2
MA
1341
1342 mutex_enter(&msp->ms_lock);
93cf2076
GW
1343 msp->ms_loading = B_FALSE;
1344
4e21fd06
DB
1345 if (success) {
1346 ASSERT3P(msp->ms_group, !=, NULL);
1347 msp->ms_loaded = B_TRUE;
1348
d2734cce
SD
1349 /*
1350 * If the metaslab already has a spacemap, then we need to
1351 * remove all segments from the defer tree; otherwise, the
1352 * metaslab is completely empty and we can skip this.
1353 */
1354 if (msp->ms_sm != NULL) {
1355 for (int t = 0; t < TXG_DEFER_SIZE; t++) {
1356 range_tree_walk(msp->ms_defer[t],
1357 range_tree_remove, msp->ms_allocatable);
1358 }
93cf2076 1359 }
4e21fd06 1360 msp->ms_max_size = metaslab_block_maxsize(msp);
93cf2076
GW
1361 }
1362 cv_broadcast(&msp->ms_load_cv);
1363 return (error);
1364}
1365
1366void
1367metaslab_unload(metaslab_t *msp)
1368{
1369 ASSERT(MUTEX_HELD(&msp->ms_lock));
d2734cce 1370 range_tree_vacate(msp->ms_allocatable, NULL, NULL);
93cf2076
GW
1371 msp->ms_loaded = B_FALSE;
1372 msp->ms_weight &= ~METASLAB_ACTIVE_MASK;
4e21fd06 1373 msp->ms_max_size = 0;
93cf2076
GW
1374}
1375
fb42a493
PS
1376int
1377metaslab_init(metaslab_group_t *mg, uint64_t id, uint64_t object, uint64_t txg,
1378 metaslab_t **msp)
34dc7c2f
BB
1379{
1380 vdev_t *vd = mg->mg_vd;
93cf2076 1381 objset_t *mos = vd->vdev_spa->spa_meta_objset;
fb42a493
PS
1382 metaslab_t *ms;
1383 int error;
34dc7c2f 1384
79c76d5b 1385 ms = kmem_zalloc(sizeof (metaslab_t), KM_SLEEP);
fb42a493 1386 mutex_init(&ms->ms_lock, NULL, MUTEX_DEFAULT, NULL);
a1d477c2 1387 mutex_init(&ms->ms_sync_lock, NULL, MUTEX_DEFAULT, NULL);
fb42a493
PS
1388 cv_init(&ms->ms_load_cv, NULL, CV_DEFAULT, NULL);
1389 ms->ms_id = id;
1390 ms->ms_start = id << vd->vdev_ms_shift;
1391 ms->ms_size = 1ULL << vd->vdev_ms_shift;
34dc7c2f 1392
93cf2076
GW
1393 /*
1394 * We only open space map objects that already exist. All others
afe37326 1395 * will be opened when we finally allocate an object for it.
93cf2076 1396 */
afe37326 1397 if (object != 0) {
fb42a493 1398 error = space_map_open(&ms->ms_sm, mos, object, ms->ms_start,
a1d477c2 1399 ms->ms_size, vd->vdev_ashift);
fb42a493
PS
1400
1401 if (error != 0) {
1402 kmem_free(ms, sizeof (metaslab_t));
1403 return (error);
1404 }
1405
1406 ASSERT(ms->ms_sm != NULL);
93cf2076 1407 }
34dc7c2f
BB
1408
1409 /*
93cf2076 1410 * We create the main range tree here, but we don't create the
258553d3 1411 * other range trees until metaslab_sync_done(). This serves
34dc7c2f
BB
1412 * two purposes: it allows metaslab_sync_done() to detect the
1413 * addition of new space; and for debugging, it ensures that we'd
1414 * data fault on any attempt to use this metaslab before it's ready.
1415 */
d2734cce
SD
1416 ms->ms_allocatable = range_tree_create_impl(&rt_avl_ops,
1417 &ms->ms_allocatable_by_size, metaslab_rangesize_compare, 0);
fb42a493 1418 metaslab_group_add(mg, ms);
34dc7c2f 1419
4e21fd06 1420 metaslab_set_fragmentation(ms);
428870ff 1421
34dc7c2f
BB
1422 /*
1423 * If we're opening an existing pool (txg == 0) or creating
1424 * a new one (txg == TXG_INITIAL), all space is available now.
1425 * If we're adding space to an existing pool, the new space
1426 * does not become available until after this txg has synced.
4e21fd06
DB
1427 * The metaslab's weight will also be initialized when we sync
1428 * out this txg. This ensures that we don't attempt to allocate
1429 * from it before we have initialized it completely.
34dc7c2f
BB
1430 */
1431 if (txg <= TXG_INITIAL)
fb42a493 1432 metaslab_sync_done(ms, 0);
34dc7c2f 1433
93cf2076
GW
1434 /*
1435 * If metaslab_debug_load is set and we're initializing a metaslab
4e21fd06 1436 * that has an allocated space map object then load the its space
93cf2076
GW
1437 * map so that can verify frees.
1438 */
fb42a493
PS
1439 if (metaslab_debug_load && ms->ms_sm != NULL) {
1440 mutex_enter(&ms->ms_lock);
1441 VERIFY0(metaslab_load(ms));
1442 mutex_exit(&ms->ms_lock);
93cf2076
GW
1443 }
1444
34dc7c2f 1445 if (txg != 0) {
34dc7c2f 1446 vdev_dirty(vd, 0, NULL, txg);
fb42a493 1447 vdev_dirty(vd, VDD_METASLAB, ms, txg);
34dc7c2f
BB
1448 }
1449
fb42a493
PS
1450 *msp = ms;
1451
1452 return (0);
34dc7c2f
BB
1453}
1454
1455void
1456metaslab_fini(metaslab_t *msp)
1457{
93cf2076 1458 metaslab_group_t *mg = msp->ms_group;
34dc7c2f
BB
1459
1460 metaslab_group_remove(mg, msp);
1461
1462 mutex_enter(&msp->ms_lock);
93cf2076
GW
1463 VERIFY(msp->ms_group == NULL);
1464 vdev_space_update(mg->mg_vd, -space_map_allocated(msp->ms_sm),
1465 0, -msp->ms_size);
1466 space_map_close(msp->ms_sm);
1467
1468 metaslab_unload(msp);
d2734cce
SD
1469 range_tree_destroy(msp->ms_allocatable);
1470 range_tree_destroy(msp->ms_freeing);
1471 range_tree_destroy(msp->ms_freed);
34dc7c2f 1472
1c27024e 1473 for (int t = 0; t < TXG_SIZE; t++) {
d2734cce 1474 range_tree_destroy(msp->ms_allocating[t]);
34dc7c2f
BB
1475 }
1476
1c27024e 1477 for (int t = 0; t < TXG_DEFER_SIZE; t++) {
d2734cce 1478 range_tree_destroy(msp->ms_defer[t]);
e51be066 1479 }
c99c9001 1480 ASSERT0(msp->ms_deferspace);
428870ff 1481
d2734cce
SD
1482 range_tree_destroy(msp->ms_checkpointing);
1483
34dc7c2f 1484 mutex_exit(&msp->ms_lock);
93cf2076 1485 cv_destroy(&msp->ms_load_cv);
34dc7c2f 1486 mutex_destroy(&msp->ms_lock);
a1d477c2 1487 mutex_destroy(&msp->ms_sync_lock);
34dc7c2f
BB
1488
1489 kmem_free(msp, sizeof (metaslab_t));
1490}
1491
f3a7f661
GW
1492#define FRAGMENTATION_TABLE_SIZE 17
1493
93cf2076 1494/*
f3a7f661
GW
1495 * This table defines a segment size based fragmentation metric that will
1496 * allow each metaslab to derive its own fragmentation value. This is done
1497 * by calculating the space in each bucket of the spacemap histogram and
1498 * multiplying that by the fragmetation metric in this table. Doing
1499 * this for all buckets and dividing it by the total amount of free
1500 * space in this metaslab (i.e. the total free space in all buckets) gives
1501 * us the fragmentation metric. This means that a high fragmentation metric
1502 * equates to most of the free space being comprised of small segments.
1503 * Conversely, if the metric is low, then most of the free space is in
1504 * large segments. A 10% change in fragmentation equates to approximately
1505 * double the number of segments.
93cf2076 1506 *
f3a7f661
GW
1507 * This table defines 0% fragmented space using 16MB segments. Testing has
1508 * shown that segments that are greater than or equal to 16MB do not suffer
1509 * from drastic performance problems. Using this value, we derive the rest
1510 * of the table. Since the fragmentation value is never stored on disk, it
1511 * is possible to change these calculations in the future.
1512 */
1513int zfs_frag_table[FRAGMENTATION_TABLE_SIZE] = {
1514 100, /* 512B */
1515 100, /* 1K */
1516 98, /* 2K */
1517 95, /* 4K */
1518 90, /* 8K */
1519 80, /* 16K */
1520 70, /* 32K */
1521 60, /* 64K */
1522 50, /* 128K */
1523 40, /* 256K */
1524 30, /* 512K */
1525 20, /* 1M */
1526 15, /* 2M */
1527 10, /* 4M */
1528 5, /* 8M */
1529 0 /* 16M */
1530};
1531
1532/*
1533 * Calclate the metaslab's fragmentation metric. A return value
1534 * of ZFS_FRAG_INVALID means that the metaslab has not been upgraded and does
1535 * not support this metric. Otherwise, the return value should be in the
1536 * range [0, 100].
93cf2076 1537 */
4e21fd06
DB
1538static void
1539metaslab_set_fragmentation(metaslab_t *msp)
93cf2076 1540{
f3a7f661
GW
1541 spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
1542 uint64_t fragmentation = 0;
1543 uint64_t total = 0;
1544 boolean_t feature_enabled = spa_feature_is_enabled(spa,
1545 SPA_FEATURE_SPACEMAP_HISTOGRAM);
93cf2076 1546
4e21fd06
DB
1547 if (!feature_enabled) {
1548 msp->ms_fragmentation = ZFS_FRAG_INVALID;
1549 return;
1550 }
f3a7f661 1551
93cf2076 1552 /*
f3a7f661
GW
1553 * A null space map means that the entire metaslab is free
1554 * and thus is not fragmented.
93cf2076 1555 */
4e21fd06
DB
1556 if (msp->ms_sm == NULL) {
1557 msp->ms_fragmentation = 0;
1558 return;
1559 }
f3a7f661
GW
1560
1561 /*
4e21fd06 1562 * If this metaslab's space map has not been upgraded, flag it
f3a7f661
GW
1563 * so that we upgrade next time we encounter it.
1564 */
1565 if (msp->ms_sm->sm_dbuf->db_size != sizeof (space_map_phys_t)) {
3b7f360c 1566 uint64_t txg = spa_syncing_txg(spa);
93cf2076
GW
1567 vdev_t *vd = msp->ms_group->mg_vd;
1568
3b7f360c
GW
1569 /*
1570 * If we've reached the final dirty txg, then we must
1571 * be shutting down the pool. We don't want to dirty
1572 * any data past this point so skip setting the condense
1573 * flag. We can retry this action the next time the pool
1574 * is imported.
1575 */
1576 if (spa_writeable(spa) && txg < spa_final_dirty_txg(spa)) {
8b0a0840
TC
1577 msp->ms_condense_wanted = B_TRUE;
1578 vdev_dirty(vd, VDD_METASLAB, msp, txg + 1);
964c2d69 1579 zfs_dbgmsg("txg %llu, requesting force condense: "
3b7f360c
GW
1580 "ms_id %llu, vdev_id %llu", txg, msp->ms_id,
1581 vd->vdev_id);
8b0a0840 1582 }
4e21fd06
DB
1583 msp->ms_fragmentation = ZFS_FRAG_INVALID;
1584 return;
93cf2076
GW
1585 }
1586
1c27024e 1587 for (int i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) {
f3a7f661
GW
1588 uint64_t space = 0;
1589 uint8_t shift = msp->ms_sm->sm_shift;
4e21fd06 1590
f3a7f661
GW
1591 int idx = MIN(shift - SPA_MINBLOCKSHIFT + i,
1592 FRAGMENTATION_TABLE_SIZE - 1);
93cf2076 1593
93cf2076
GW
1594 if (msp->ms_sm->sm_phys->smp_histogram[i] == 0)
1595 continue;
1596
f3a7f661
GW
1597 space = msp->ms_sm->sm_phys->smp_histogram[i] << (i + shift);
1598 total += space;
1599
1600 ASSERT3U(idx, <, FRAGMENTATION_TABLE_SIZE);
1601 fragmentation += space * zfs_frag_table[idx];
93cf2076 1602 }
f3a7f661
GW
1603
1604 if (total > 0)
1605 fragmentation /= total;
1606 ASSERT3U(fragmentation, <=, 100);
4e21fd06
DB
1607
1608 msp->ms_fragmentation = fragmentation;
93cf2076 1609}
34dc7c2f 1610
f3a7f661
GW
1611/*
1612 * Compute a weight -- a selection preference value -- for the given metaslab.
1613 * This is based on the amount of free space, the level of fragmentation,
1614 * the LBA range, and whether the metaslab is loaded.
1615 */
34dc7c2f 1616static uint64_t
4e21fd06 1617metaslab_space_weight(metaslab_t *msp)
34dc7c2f
BB
1618{
1619 metaslab_group_t *mg = msp->ms_group;
34dc7c2f
BB
1620 vdev_t *vd = mg->mg_vd;
1621 uint64_t weight, space;
1622
1623 ASSERT(MUTEX_HELD(&msp->ms_lock));
4e21fd06 1624 ASSERT(!vd->vdev_removing);
c2e42f9d 1625
34dc7c2f
BB
1626 /*
1627 * The baseline weight is the metaslab's free space.
1628 */
93cf2076 1629 space = msp->ms_size - space_map_allocated(msp->ms_sm);
f3a7f661 1630
f3a7f661
GW
1631 if (metaslab_fragmentation_factor_enabled &&
1632 msp->ms_fragmentation != ZFS_FRAG_INVALID) {
1633 /*
1634 * Use the fragmentation information to inversely scale
1635 * down the baseline weight. We need to ensure that we
1636 * don't exclude this metaslab completely when it's 100%
1637 * fragmented. To avoid this we reduce the fragmented value
1638 * by 1.
1639 */
1640 space = (space * (100 - (msp->ms_fragmentation - 1))) / 100;
1641
1642 /*
1643 * If space < SPA_MINBLOCKSIZE, then we will not allocate from
1644 * this metaslab again. The fragmentation metric may have
1645 * decreased the space to something smaller than
1646 * SPA_MINBLOCKSIZE, so reset the space to SPA_MINBLOCKSIZE
1647 * so that we can consume any remaining space.
1648 */
1649 if (space > 0 && space < SPA_MINBLOCKSIZE)
1650 space = SPA_MINBLOCKSIZE;
1651 }
34dc7c2f
BB
1652 weight = space;
1653
1654 /*
1655 * Modern disks have uniform bit density and constant angular velocity.
1656 * Therefore, the outer recording zones are faster (higher bandwidth)
1657 * than the inner zones by the ratio of outer to inner track diameter,
1658 * which is typically around 2:1. We account for this by assigning
1659 * higher weight to lower metaslabs (multiplier ranging from 2x to 1x).
1660 * In effect, this means that we'll select the metaslab with the most
1661 * free bandwidth rather than simply the one with the most free space.
1662 */
fb40095f 1663 if (!vd->vdev_nonrot && metaslab_lba_weighting_enabled) {
f3a7f661
GW
1664 weight = 2 * weight - (msp->ms_id * weight) / vd->vdev_ms_count;
1665 ASSERT(weight >= space && weight <= 2 * space);
1666 }
428870ff 1667
f3a7f661
GW
1668 /*
1669 * If this metaslab is one we're actively using, adjust its
1670 * weight to make it preferable to any inactive metaslab so
1671 * we'll polish it off. If the fragmentation on this metaslab
1672 * has exceed our threshold, then don't mark it active.
1673 */
1674 if (msp->ms_loaded && msp->ms_fragmentation != ZFS_FRAG_INVALID &&
1675 msp->ms_fragmentation <= zfs_metaslab_fragmentation_threshold) {
428870ff
BB
1676 weight |= (msp->ms_weight & METASLAB_ACTIVE_MASK);
1677 }
34dc7c2f 1678
4e21fd06
DB
1679 WEIGHT_SET_SPACEBASED(weight);
1680 return (weight);
1681}
1682
1683/*
1684 * Return the weight of the specified metaslab, according to the segment-based
1685 * weighting algorithm. The metaslab must be loaded. This function can
1686 * be called within a sync pass since it relies only on the metaslab's
1687 * range tree which is always accurate when the metaslab is loaded.
1688 */
1689static uint64_t
1690metaslab_weight_from_range_tree(metaslab_t *msp)
1691{
1692 uint64_t weight = 0;
1693 uint32_t segments = 0;
4e21fd06
DB
1694
1695 ASSERT(msp->ms_loaded);
1696
1c27024e
DB
1697 for (int i = RANGE_TREE_HISTOGRAM_SIZE - 1; i >= SPA_MINBLOCKSHIFT;
1698 i--) {
4e21fd06
DB
1699 uint8_t shift = msp->ms_group->mg_vd->vdev_ashift;
1700 int max_idx = SPACE_MAP_HISTOGRAM_SIZE + shift - 1;
1701
1702 segments <<= 1;
d2734cce 1703 segments += msp->ms_allocatable->rt_histogram[i];
4e21fd06
DB
1704
1705 /*
1706 * The range tree provides more precision than the space map
1707 * and must be downgraded so that all values fit within the
1708 * space map's histogram. This allows us to compare loaded
1709 * vs. unloaded metaslabs to determine which metaslab is
1710 * considered "best".
1711 */
1712 if (i > max_idx)
1713 continue;
1714
1715 if (segments != 0) {
1716 WEIGHT_SET_COUNT(weight, segments);
1717 WEIGHT_SET_INDEX(weight, i);
1718 WEIGHT_SET_ACTIVE(weight, 0);
1719 break;
1720 }
1721 }
1722 return (weight);
1723}
1724
1725/*
1726 * Calculate the weight based on the on-disk histogram. This should only
1727 * be called after a sync pass has completely finished since the on-disk
1728 * information is updated in metaslab_sync().
1729 */
1730static uint64_t
1731metaslab_weight_from_spacemap(metaslab_t *msp)
1732{
1733 uint64_t weight = 0;
4e21fd06 1734
1c27024e 1735 for (int i = SPACE_MAP_HISTOGRAM_SIZE - 1; i >= 0; i--) {
4e21fd06
DB
1736 if (msp->ms_sm->sm_phys->smp_histogram[i] != 0) {
1737 WEIGHT_SET_COUNT(weight,
1738 msp->ms_sm->sm_phys->smp_histogram[i]);
1739 WEIGHT_SET_INDEX(weight, i +
1740 msp->ms_sm->sm_shift);
1741 WEIGHT_SET_ACTIVE(weight, 0);
1742 break;
1743 }
1744 }
1745 return (weight);
1746}
1747
1748/*
1749 * Compute a segment-based weight for the specified metaslab. The weight
1750 * is determined by highest bucket in the histogram. The information
1751 * for the highest bucket is encoded into the weight value.
1752 */
1753static uint64_t
1754metaslab_segment_weight(metaslab_t *msp)
1755{
1756 metaslab_group_t *mg = msp->ms_group;
1757 uint64_t weight = 0;
1758 uint8_t shift = mg->mg_vd->vdev_ashift;
1759
1760 ASSERT(MUTEX_HELD(&msp->ms_lock));
1761
1762 /*
1763 * The metaslab is completely free.
1764 */
1765 if (space_map_allocated(msp->ms_sm) == 0) {
1766 int idx = highbit64(msp->ms_size) - 1;
1767 int max_idx = SPACE_MAP_HISTOGRAM_SIZE + shift - 1;
1768
1769 if (idx < max_idx) {
1770 WEIGHT_SET_COUNT(weight, 1ULL);
1771 WEIGHT_SET_INDEX(weight, idx);
1772 } else {
1773 WEIGHT_SET_COUNT(weight, 1ULL << (idx - max_idx));
1774 WEIGHT_SET_INDEX(weight, max_idx);
1775 }
1776 WEIGHT_SET_ACTIVE(weight, 0);
1777 ASSERT(!WEIGHT_IS_SPACEBASED(weight));
1778
1779 return (weight);
1780 }
1781
1782 ASSERT3U(msp->ms_sm->sm_dbuf->db_size, ==, sizeof (space_map_phys_t));
1783
1784 /*
1785 * If the metaslab is fully allocated then just make the weight 0.
1786 */
1787 if (space_map_allocated(msp->ms_sm) == msp->ms_size)
1788 return (0);
1789 /*
1790 * If the metaslab is already loaded, then use the range tree to
1791 * determine the weight. Otherwise, we rely on the space map information
1792 * to generate the weight.
1793 */
1794 if (msp->ms_loaded) {
1795 weight = metaslab_weight_from_range_tree(msp);
1796 } else {
1797 weight = metaslab_weight_from_spacemap(msp);
1798 }
1799
1800 /*
1801 * If the metaslab was active the last time we calculated its weight
1802 * then keep it active. We want to consume the entire region that
1803 * is associated with this weight.
1804 */
1805 if (msp->ms_activation_weight != 0 && weight != 0)
1806 WEIGHT_SET_ACTIVE(weight, WEIGHT_GET_ACTIVE(msp->ms_weight));
1807 return (weight);
1808}
1809
1810/*
1811 * Determine if we should attempt to allocate from this metaslab. If the
1812 * metaslab has a maximum size then we can quickly determine if the desired
1813 * allocation size can be satisfied. Otherwise, if we're using segment-based
1814 * weighting then we can determine the maximum allocation that this metaslab
1815 * can accommodate based on the index encoded in the weight. If we're using
1816 * space-based weights then rely on the entire weight (excluding the weight
1817 * type bit).
1818 */
1819boolean_t
1820metaslab_should_allocate(metaslab_t *msp, uint64_t asize)
1821{
1822 boolean_t should_allocate;
1823
1824 if (msp->ms_max_size != 0)
1825 return (msp->ms_max_size >= asize);
1826
1827 if (!WEIGHT_IS_SPACEBASED(msp->ms_weight)) {
1828 /*
1829 * The metaslab segment weight indicates segments in the
1830 * range [2^i, 2^(i+1)), where i is the index in the weight.
1831 * Since the asize might be in the middle of the range, we
1832 * should attempt the allocation if asize < 2^(i+1).
1833 */
1834 should_allocate = (asize <
1835 1ULL << (WEIGHT_GET_INDEX(msp->ms_weight) + 1));
1836 } else {
1837 should_allocate = (asize <=
1838 (msp->ms_weight & ~METASLAB_WEIGHT_TYPE));
1839 }
1840 return (should_allocate);
1841}
1842static uint64_t
1843metaslab_weight(metaslab_t *msp)
1844{
1845 vdev_t *vd = msp->ms_group->mg_vd;
1846 spa_t *spa = vd->vdev_spa;
1847 uint64_t weight;
1848
1849 ASSERT(MUTEX_HELD(&msp->ms_lock));
1850
1851 /*
a1d477c2 1852 * If this vdev is in the process of being removed, there is nothing
4e21fd06
DB
1853 * for us to do here.
1854 */
a1d477c2 1855 if (vd->vdev_removing)
4e21fd06 1856 return (0);
4e21fd06
DB
1857
1858 metaslab_set_fragmentation(msp);
1859
1860 /*
1861 * Update the maximum size if the metaslab is loaded. This will
1862 * ensure that we get an accurate maximum size if newly freed space
1863 * has been added back into the free tree.
1864 */
1865 if (msp->ms_loaded)
1866 msp->ms_max_size = metaslab_block_maxsize(msp);
1867
1868 /*
1869 * Segment-based weighting requires space map histogram support.
1870 */
1871 if (zfs_metaslab_segment_weight_enabled &&
1872 spa_feature_is_enabled(spa, SPA_FEATURE_SPACEMAP_HISTOGRAM) &&
1873 (msp->ms_sm == NULL || msp->ms_sm->sm_dbuf->db_size ==
1874 sizeof (space_map_phys_t))) {
1875 weight = metaslab_segment_weight(msp);
1876 } else {
1877 weight = metaslab_space_weight(msp);
1878 }
93cf2076 1879 return (weight);
34dc7c2f
BB
1880}
1881
1882static int
6d974228 1883metaslab_activate(metaslab_t *msp, uint64_t activation_weight)
34dc7c2f 1884{
34dc7c2f
BB
1885 ASSERT(MUTEX_HELD(&msp->ms_lock));
1886
1887 if ((msp->ms_weight & METASLAB_ACTIVE_MASK) == 0) {
93cf2076
GW
1888 metaslab_load_wait(msp);
1889 if (!msp->ms_loaded) {
1890 int error = metaslab_load(msp);
1891 if (error) {
428870ff
BB
1892 metaslab_group_sort(msp->ms_group, msp, 0);
1893 return (error);
1894 }
34dc7c2f 1895 }
9babb374 1896
4e21fd06 1897 msp->ms_activation_weight = msp->ms_weight;
34dc7c2f
BB
1898 metaslab_group_sort(msp->ms_group, msp,
1899 msp->ms_weight | activation_weight);
1900 }
93cf2076 1901 ASSERT(msp->ms_loaded);
34dc7c2f
BB
1902 ASSERT(msp->ms_weight & METASLAB_ACTIVE_MASK);
1903
1904 return (0);
1905}
1906
1907static void
4e21fd06 1908metaslab_passivate(metaslab_t *msp, uint64_t weight)
34dc7c2f 1909{
4e21fd06
DB
1910 ASSERTV(uint64_t size = weight & ~METASLAB_WEIGHT_TYPE);
1911
34dc7c2f
BB
1912 /*
1913 * If size < SPA_MINBLOCKSIZE, then we will not allocate from
1914 * this metaslab again. In that case, it had better be empty,
1915 * or we would be leaving space on the table.
1916 */
94d49e8f
TC
1917 ASSERT(!WEIGHT_IS_SPACEBASED(msp->ms_weight) ||
1918 size >= SPA_MINBLOCKSIZE ||
d2734cce 1919 range_tree_space(msp->ms_allocatable) == 0);
4e21fd06
DB
1920 ASSERT0(weight & METASLAB_ACTIVE_MASK);
1921
1922 msp->ms_activation_weight = 0;
1923 metaslab_group_sort(msp->ms_group, msp, weight);
34dc7c2f
BB
1924 ASSERT((msp->ms_weight & METASLAB_ACTIVE_MASK) == 0);
1925}
1926
4e21fd06
DB
1927/*
1928 * Segment-based metaslabs are activated once and remain active until
1929 * we either fail an allocation attempt (similar to space-based metaslabs)
1930 * or have exhausted the free space in zfs_metaslab_switch_threshold
1931 * buckets since the metaslab was activated. This function checks to see
1932 * if we've exhaused the zfs_metaslab_switch_threshold buckets in the
1933 * metaslab and passivates it proactively. This will allow us to select a
1934 * metaslab with a larger contiguous region, if any, remaining within this
1935 * metaslab group. If we're in sync pass > 1, then we continue using this
1936 * metaslab so that we don't dirty more block and cause more sync passes.
1937 */
1938void
1939metaslab_segment_may_passivate(metaslab_t *msp)
1940{
1941 spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
4e21fd06
DB
1942
1943 if (WEIGHT_IS_SPACEBASED(msp->ms_weight) || spa_sync_pass(spa) > 1)
1944 return;
1945
1946 /*
1947 * Since we are in the middle of a sync pass, the most accurate
1948 * information that is accessible to us is the in-core range tree
1949 * histogram; calculate the new weight based on that information.
1950 */
1c27024e
DB
1951 uint64_t weight = metaslab_weight_from_range_tree(msp);
1952 int activation_idx = WEIGHT_GET_INDEX(msp->ms_activation_weight);
1953 int current_idx = WEIGHT_GET_INDEX(weight);
4e21fd06
DB
1954
1955 if (current_idx <= activation_idx - zfs_metaslab_switch_threshold)
1956 metaslab_passivate(msp, weight);
1957}
1958
93cf2076
GW
1959static void
1960metaslab_preload(void *arg)
1961{
1962 metaslab_t *msp = arg;
1963 spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
1cd77734 1964 fstrans_cookie_t cookie = spl_fstrans_mark();
93cf2076 1965
080b3100
GW
1966 ASSERT(!MUTEX_HELD(&msp->ms_group->mg_lock));
1967
93cf2076
GW
1968 mutex_enter(&msp->ms_lock);
1969 metaslab_load_wait(msp);
1970 if (!msp->ms_loaded)
1971 (void) metaslab_load(msp);
4e21fd06 1972 msp->ms_selected_txg = spa_syncing_txg(spa);
93cf2076 1973 mutex_exit(&msp->ms_lock);
1cd77734 1974 spl_fstrans_unmark(cookie);
93cf2076
GW
1975}
1976
1977static void
1978metaslab_group_preload(metaslab_group_t *mg)
1979{
1980 spa_t *spa = mg->mg_vd->vdev_spa;
1981 metaslab_t *msp;
1982 avl_tree_t *t = &mg->mg_metaslab_tree;
1983 int m = 0;
1984
1985 if (spa_shutting_down(spa) || !metaslab_preload_enabled) {
c5528b9b 1986 taskq_wait_outstanding(mg->mg_taskq, 0);
93cf2076
GW
1987 return;
1988 }
93cf2076 1989
080b3100 1990 mutex_enter(&mg->mg_lock);
a1d477c2 1991
93cf2076 1992 /*
080b3100 1993 * Load the next potential metaslabs
93cf2076 1994 */
4e21fd06 1995 for (msp = avl_first(t); msp != NULL; msp = AVL_NEXT(t, msp)) {
a1d477c2
MA
1996 ASSERT3P(msp->ms_group, ==, mg);
1997
f3a7f661
GW
1998 /*
1999 * We preload only the maximum number of metaslabs specified
2000 * by metaslab_preload_limit. If a metaslab is being forced
2001 * to condense then we preload it too. This will ensure
2002 * that force condensing happens in the next txg.
2003 */
2004 if (++m > metaslab_preload_limit && !msp->ms_condense_wanted) {
f3a7f661
GW
2005 continue;
2006 }
93cf2076
GW
2007
2008 VERIFY(taskq_dispatch(mg->mg_taskq, metaslab_preload,
48d3eb40 2009 msp, TQ_SLEEP) != TASKQID_INVALID);
93cf2076
GW
2010 }
2011 mutex_exit(&mg->mg_lock);
2012}
2013
e51be066 2014/*
93cf2076
GW
2015 * Determine if the space map's on-disk footprint is past our tolerance
2016 * for inefficiency. We would like to use the following criteria to make
2017 * our decision:
e51be066
GW
2018 *
2019 * 1. The size of the space map object should not dramatically increase as a
93cf2076 2020 * result of writing out the free space range tree.
e51be066
GW
2021 *
2022 * 2. The minimal on-disk space map representation is zfs_condense_pct/100
93cf2076 2023 * times the size than the free space range tree representation
a1d477c2 2024 * (i.e. zfs_condense_pct = 110 and in-core = 1MB, minimal = 1.1MB).
e51be066 2025 *
b02fe35d
AR
2026 * 3. The on-disk size of the space map should actually decrease.
2027 *
e51be066
GW
2028 * Checking the first condition is tricky since we don't want to walk
2029 * the entire AVL tree calculating the estimated on-disk size. Instead we
93cf2076
GW
2030 * use the size-ordered range tree in the metaslab and calculate the
2031 * size required to write out the largest segment in our free tree. If the
e51be066
GW
2032 * size required to represent that segment on disk is larger than the space
2033 * map object then we avoid condensing this map.
2034 *
2035 * To determine the second criterion we use a best-case estimate and assume
2036 * each segment can be represented on-disk as a single 64-bit entry. We refer
2037 * to this best-case estimate as the space map's minimal form.
b02fe35d
AR
2038 *
2039 * Unfortunately, we cannot compute the on-disk size of the space map in this
2040 * context because we cannot accurately compute the effects of compression, etc.
2041 * Instead, we apply the heuristic described in the block comment for
2042 * zfs_metaslab_condense_block_threshold - we only condense if the space used
2043 * is greater than a threshold number of blocks.
e51be066
GW
2044 */
2045static boolean_t
2046metaslab_should_condense(metaslab_t *msp)
2047{
93cf2076
GW
2048 space_map_t *sm = msp->ms_sm;
2049 range_seg_t *rs;
b02fe35d
AR
2050 uint64_t size, entries, segsz, object_size, optimal_size, record_size;
2051 dmu_object_info_t doi;
d2734cce
SD
2052 vdev_t *vd = msp->ms_group->mg_vd;
2053 uint64_t vdev_blocksize = 1 << vd->vdev_ashift;
2054 uint64_t current_txg = spa_syncing_txg(vd->vdev_spa);
e51be066
GW
2055
2056 ASSERT(MUTEX_HELD(&msp->ms_lock));
93cf2076 2057 ASSERT(msp->ms_loaded);
e51be066
GW
2058
2059 /*
d2734cce
SD
2060 * Allocations and frees in early passes are generally more space
2061 * efficient (in terms of blocks described in space map entries)
2062 * than the ones in later passes (e.g. we don't compress after
2063 * sync pass 5) and condensing a metaslab multiple times in a txg
2064 * could degrade performance.
2065 *
2066 * Thus we prefer condensing each metaslab at most once every txg at
2067 * the earliest sync pass possible. If a metaslab is eligible for
2068 * condensing again after being considered for condensing within the
2069 * same txg, it will hopefully be dirty in the next txg where it will
2070 * be condensed at an earlier pass.
2071 */
2072 if (msp->ms_condense_checked_txg == current_txg)
2073 return (B_FALSE);
2074 msp->ms_condense_checked_txg = current_txg;
2075
2076 /*
2077 * Use the ms_allocatable_by_size range tree, which is ordered by
2078 * size, to obtain the largest segment in the free tree. We always
2079 * condense metaslabs that are empty and metaslabs for which a
2080 * condense request has been made.
e51be066 2081 */
d2734cce 2082 rs = avl_last(&msp->ms_allocatable_by_size);
f3a7f661 2083 if (rs == NULL || msp->ms_condense_wanted)
e51be066
GW
2084 return (B_TRUE);
2085
2086 /*
2087 * Calculate the number of 64-bit entries this segment would
2088 * require when written to disk. If this single segment would be
2089 * larger on-disk than the entire current on-disk structure, then
2090 * clearly condensing will increase the on-disk structure size.
2091 */
93cf2076 2092 size = (rs->rs_end - rs->rs_start) >> sm->sm_shift;
e51be066
GW
2093 entries = size / (MIN(size, SM_RUN_MAX));
2094 segsz = entries * sizeof (uint64_t);
2095
d2734cce
SD
2096 optimal_size =
2097 sizeof (uint64_t) * avl_numnodes(&msp->ms_allocatable->rt_root);
b02fe35d
AR
2098 object_size = space_map_length(msp->ms_sm);
2099
2100 dmu_object_info_from_db(sm->sm_dbuf, &doi);
2101 record_size = MAX(doi.doi_data_block_size, vdev_blocksize);
2102
2103 return (segsz <= object_size &&
2104 object_size >= (optimal_size * zfs_condense_pct / 100) &&
2105 object_size > zfs_metaslab_condense_block_threshold * record_size);
e51be066
GW
2106}
2107
2108/*
2109 * Condense the on-disk space map representation to its minimized form.
2110 * The minimized form consists of a small number of allocations followed by
93cf2076 2111 * the entries of the free range tree.
e51be066
GW
2112 */
2113static void
2114metaslab_condense(metaslab_t *msp, uint64_t txg, dmu_tx_t *tx)
2115{
93cf2076
GW
2116 range_tree_t *condense_tree;
2117 space_map_t *sm = msp->ms_sm;
e51be066
GW
2118
2119 ASSERT(MUTEX_HELD(&msp->ms_lock));
93cf2076 2120 ASSERT(msp->ms_loaded);
e51be066 2121
f3a7f661 2122
964c2d69 2123 zfs_dbgmsg("condensing: txg %llu, msp[%llu] %p, vdev id %llu, "
5f3d9c69
JS
2124 "spa %s, smp size %llu, segments %lu, forcing condense=%s", txg,
2125 msp->ms_id, msp, msp->ms_group->mg_vd->vdev_id,
2126 msp->ms_group->mg_vd->vdev_spa->spa_name,
d2734cce
SD
2127 space_map_length(msp->ms_sm),
2128 avl_numnodes(&msp->ms_allocatable->rt_root),
f3a7f661
GW
2129 msp->ms_condense_wanted ? "TRUE" : "FALSE");
2130
2131 msp->ms_condense_wanted = B_FALSE;
e51be066
GW
2132
2133 /*
93cf2076 2134 * Create an range tree that is 100% allocated. We remove segments
e51be066
GW
2135 * that have been freed in this txg, any deferred frees that exist,
2136 * and any allocation in the future. Removing segments should be
93cf2076
GW
2137 * a relatively inexpensive operation since we expect these trees to
2138 * have a small number of nodes.
e51be066 2139 */
a1d477c2 2140 condense_tree = range_tree_create(NULL, NULL);
93cf2076 2141 range_tree_add(condense_tree, msp->ms_start, msp->ms_size);
e51be066 2142
d2734cce
SD
2143 range_tree_walk(msp->ms_freeing, range_tree_remove, condense_tree);
2144 range_tree_walk(msp->ms_freed, range_tree_remove, condense_tree);
e51be066 2145
1c27024e 2146 for (int t = 0; t < TXG_DEFER_SIZE; t++) {
d2734cce 2147 range_tree_walk(msp->ms_defer[t],
93cf2076
GW
2148 range_tree_remove, condense_tree);
2149 }
e51be066 2150
1c27024e 2151 for (int t = 1; t < TXG_CONCURRENT_STATES; t++) {
d2734cce 2152 range_tree_walk(msp->ms_allocating[(txg + t) & TXG_MASK],
93cf2076
GW
2153 range_tree_remove, condense_tree);
2154 }
e51be066
GW
2155
2156 /*
2157 * We're about to drop the metaslab's lock thus allowing
2158 * other consumers to change it's content. Set the
93cf2076 2159 * metaslab's ms_condensing flag to ensure that
e51be066
GW
2160 * allocations on this metaslab do not occur while we're
2161 * in the middle of committing it to disk. This is only critical
d2734cce 2162 * for ms_allocatable as all other range trees use per txg
e51be066
GW
2163 * views of their content.
2164 */
93cf2076 2165 msp->ms_condensing = B_TRUE;
e51be066
GW
2166
2167 mutex_exit(&msp->ms_lock);
d2734cce 2168 space_map_truncate(sm, zfs_metaslab_sm_blksz, tx);
e51be066
GW
2169
2170 /*
4e21fd06 2171 * While we would ideally like to create a space map representation
e51be066 2172 * that consists only of allocation records, doing so can be
93cf2076 2173 * prohibitively expensive because the in-core free tree can be
e51be066 2174 * large, and therefore computationally expensive to subtract
93cf2076
GW
2175 * from the condense_tree. Instead we sync out two trees, a cheap
2176 * allocation only tree followed by the in-core free tree. While not
e51be066
GW
2177 * optimal, this is typically close to optimal, and much cheaper to
2178 * compute.
2179 */
93cf2076
GW
2180 space_map_write(sm, condense_tree, SM_ALLOC, tx);
2181 range_tree_vacate(condense_tree, NULL, NULL);
2182 range_tree_destroy(condense_tree);
e51be066 2183
d2734cce 2184 space_map_write(sm, msp->ms_allocatable, SM_FREE, tx);
a1d477c2 2185 mutex_enter(&msp->ms_lock);
93cf2076 2186 msp->ms_condensing = B_FALSE;
e51be066
GW
2187}
2188
34dc7c2f
BB
2189/*
2190 * Write a metaslab to disk in the context of the specified transaction group.
2191 */
2192void
2193metaslab_sync(metaslab_t *msp, uint64_t txg)
2194{
93cf2076
GW
2195 metaslab_group_t *mg = msp->ms_group;
2196 vdev_t *vd = mg->mg_vd;
34dc7c2f 2197 spa_t *spa = vd->vdev_spa;
428870ff 2198 objset_t *mos = spa_meta_objset(spa);
d2734cce 2199 range_tree_t *alloctree = msp->ms_allocating[txg & TXG_MASK];
34dc7c2f 2200 dmu_tx_t *tx;
93cf2076 2201 uint64_t object = space_map_object(msp->ms_sm);
34dc7c2f 2202
428870ff
BB
2203 ASSERT(!vd->vdev_ishole);
2204
e51be066
GW
2205 /*
2206 * This metaslab has just been added so there's no work to do now.
2207 */
d2734cce 2208 if (msp->ms_freeing == NULL) {
93cf2076 2209 ASSERT3P(alloctree, ==, NULL);
e51be066
GW
2210 return;
2211 }
2212
93cf2076 2213 ASSERT3P(alloctree, !=, NULL);
d2734cce
SD
2214 ASSERT3P(msp->ms_freeing, !=, NULL);
2215 ASSERT3P(msp->ms_freed, !=, NULL);
2216 ASSERT3P(msp->ms_checkpointing, !=, NULL);
e51be066 2217
f3a7f661 2218 /*
d2734cce
SD
2219 * Normally, we don't want to process a metaslab if there are no
2220 * allocations or frees to perform. However, if the metaslab is being
2221 * forced to condense and it's loaded, we need to let it through.
f3a7f661 2222 */
d2734cce
SD
2223 if (range_tree_is_empty(alloctree) &&
2224 range_tree_is_empty(msp->ms_freeing) &&
2225 range_tree_is_empty(msp->ms_checkpointing) &&
3b7f360c 2226 !(msp->ms_loaded && msp->ms_condense_wanted))
428870ff 2227 return;
34dc7c2f 2228
3b7f360c
GW
2229
2230 VERIFY(txg <= spa_final_dirty_txg(spa));
2231
34dc7c2f
BB
2232 /*
2233 * The only state that can actually be changing concurrently with
d2734cce
SD
2234 * metaslab_sync() is the metaslab's ms_allocatable. No other
2235 * thread can be modifying this txg's alloc, freeing,
2236 * freed, or space_map_phys_t. We drop ms_lock whenever we
2237 * could call into the DMU, because the DMU can call down to us
a1d477c2
MA
2238 * (e.g. via zio_free()) at any time.
2239 *
2240 * The spa_vdev_remove_thread() can be reading metaslab state
2241 * concurrently, and it is locked out by the ms_sync_lock. Note
2242 * that the ms_lock is insufficient for this, because it is dropped
2243 * by space_map_write().
34dc7c2f 2244 */
428870ff 2245 tx = dmu_tx_create_assigned(spa_get_dsl(spa), txg);
34dc7c2f 2246
93cf2076
GW
2247 if (msp->ms_sm == NULL) {
2248 uint64_t new_object;
2249
d2734cce 2250 new_object = space_map_alloc(mos, zfs_metaslab_sm_blksz, tx);
93cf2076
GW
2251 VERIFY3U(new_object, !=, 0);
2252
2253 VERIFY0(space_map_open(&msp->ms_sm, mos, new_object,
a1d477c2 2254 msp->ms_start, msp->ms_size, vd->vdev_ashift));
93cf2076 2255 ASSERT(msp->ms_sm != NULL);
34dc7c2f
BB
2256 }
2257
d2734cce
SD
2258 if (!range_tree_is_empty(msp->ms_checkpointing) &&
2259 vd->vdev_checkpoint_sm == NULL) {
2260 ASSERT(spa_has_checkpoint(spa));
2261
2262 uint64_t new_object = space_map_alloc(mos,
2263 vdev_standard_sm_blksz, tx);
2264 VERIFY3U(new_object, !=, 0);
2265
2266 VERIFY0(space_map_open(&vd->vdev_checkpoint_sm,
2267 mos, new_object, 0, vd->vdev_asize, vd->vdev_ashift));
2268 ASSERT3P(vd->vdev_checkpoint_sm, !=, NULL);
2269
2270 /*
2271 * We save the space map object as an entry in vdev_top_zap
2272 * so it can be retrieved when the pool is reopened after an
2273 * export or through zdb.
2274 */
2275 VERIFY0(zap_add(vd->vdev_spa->spa_meta_objset,
2276 vd->vdev_top_zap, VDEV_TOP_ZAP_POOL_CHECKPOINT_SM,
2277 sizeof (new_object), 1, &new_object, tx));
2278 }
2279
a1d477c2 2280 mutex_enter(&msp->ms_sync_lock);
428870ff
BB
2281 mutex_enter(&msp->ms_lock);
2282
96358617 2283 /*
4e21fd06
DB
2284 * Note: metaslab_condense() clears the space map's histogram.
2285 * Therefore we must verify and remove this histogram before
96358617
MA
2286 * condensing.
2287 */
2288 metaslab_group_histogram_verify(mg);
2289 metaslab_class_histogram_verify(mg->mg_class);
2290 metaslab_group_histogram_remove(mg, msp);
2291
d2734cce 2292 if (msp->ms_loaded && metaslab_should_condense(msp)) {
e51be066
GW
2293 metaslab_condense(msp, txg, tx);
2294 } else {
a1d477c2 2295 mutex_exit(&msp->ms_lock);
93cf2076 2296 space_map_write(msp->ms_sm, alloctree, SM_ALLOC, tx);
d2734cce 2297 space_map_write(msp->ms_sm, msp->ms_freeing, SM_FREE, tx);
a1d477c2 2298 mutex_enter(&msp->ms_lock);
e51be066 2299 }
428870ff 2300
d2734cce
SD
2301 if (!range_tree_is_empty(msp->ms_checkpointing)) {
2302 ASSERT(spa_has_checkpoint(spa));
2303 ASSERT3P(vd->vdev_checkpoint_sm, !=, NULL);
2304
2305 /*
2306 * Since we are doing writes to disk and the ms_checkpointing
2307 * tree won't be changing during that time, we drop the
2308 * ms_lock while writing to the checkpoint space map.
2309 */
2310 mutex_exit(&msp->ms_lock);
2311 space_map_write(vd->vdev_checkpoint_sm,
2312 msp->ms_checkpointing, SM_FREE, tx);
2313 mutex_enter(&msp->ms_lock);
2314 space_map_update(vd->vdev_checkpoint_sm);
2315
2316 spa->spa_checkpoint_info.sci_dspace +=
2317 range_tree_space(msp->ms_checkpointing);
2318 vd->vdev_stat.vs_checkpoint_space +=
2319 range_tree_space(msp->ms_checkpointing);
2320 ASSERT3U(vd->vdev_stat.vs_checkpoint_space, ==,
2321 -vd->vdev_checkpoint_sm->sm_alloc);
2322
2323 range_tree_vacate(msp->ms_checkpointing, NULL, NULL);
2324 }
2325
93cf2076
GW
2326 if (msp->ms_loaded) {
2327 /*
a1d477c2 2328 * When the space map is loaded, we have an accurate
93cf2076
GW
2329 * histogram in the range tree. This gives us an opportunity
2330 * to bring the space map's histogram up-to-date so we clear
2331 * it first before updating it.
2332 */
2333 space_map_histogram_clear(msp->ms_sm);
d2734cce 2334 space_map_histogram_add(msp->ms_sm, msp->ms_allocatable, tx);
4e21fd06
DB
2335
2336 /*
2337 * Since we've cleared the histogram we need to add back
2338 * any free space that has already been processed, plus
2339 * any deferred space. This allows the on-disk histogram
2340 * to accurately reflect all free space even if some space
2341 * is not yet available for allocation (i.e. deferred).
2342 */
d2734cce 2343 space_map_histogram_add(msp->ms_sm, msp->ms_freed, tx);
4e21fd06 2344
93cf2076 2345 /*
4e21fd06
DB
2346 * Add back any deferred free space that has not been
2347 * added back into the in-core free tree yet. This will
2348 * ensure that we don't end up with a space map histogram
2349 * that is completely empty unless the metaslab is fully
2350 * allocated.
93cf2076 2351 */
1c27024e 2352 for (int t = 0; t < TXG_DEFER_SIZE; t++) {
4e21fd06 2353 space_map_histogram_add(msp->ms_sm,
d2734cce 2354 msp->ms_defer[t], tx);
4e21fd06 2355 }
93cf2076 2356 }
4e21fd06
DB
2357
2358 /*
2359 * Always add the free space from this sync pass to the space
2360 * map histogram. We want to make sure that the on-disk histogram
2361 * accounts for all free space. If the space map is not loaded,
2362 * then we will lose some accuracy but will correct it the next
2363 * time we load the space map.
2364 */
d2734cce 2365 space_map_histogram_add(msp->ms_sm, msp->ms_freeing, tx);
4e21fd06 2366
f3a7f661
GW
2367 metaslab_group_histogram_add(mg, msp);
2368 metaslab_group_histogram_verify(mg);
2369 metaslab_class_histogram_verify(mg->mg_class);
34dc7c2f 2370
e51be066 2371 /*
93cf2076 2372 * For sync pass 1, we avoid traversing this txg's free range tree
d2734cce
SD
2373 * and instead will just swap the pointers for freeing and
2374 * freed. We can safely do this since the freed_tree is
e51be066
GW
2375 * guaranteed to be empty on the initial pass.
2376 */
2377 if (spa_sync_pass(spa) == 1) {
d2734cce 2378 range_tree_swap(&msp->ms_freeing, &msp->ms_freed);
e51be066 2379 } else {
d2734cce
SD
2380 range_tree_vacate(msp->ms_freeing,
2381 range_tree_add, msp->ms_freed);
34dc7c2f 2382 }
f3a7f661 2383 range_tree_vacate(alloctree, NULL, NULL);
34dc7c2f 2384
d2734cce
SD
2385 ASSERT0(range_tree_space(msp->ms_allocating[txg & TXG_MASK]));
2386 ASSERT0(range_tree_space(msp->ms_allocating[TXG_CLEAN(txg)
2387 & TXG_MASK]));
2388 ASSERT0(range_tree_space(msp->ms_freeing));
2389 ASSERT0(range_tree_space(msp->ms_checkpointing));
34dc7c2f
BB
2390
2391 mutex_exit(&msp->ms_lock);
2392
93cf2076
GW
2393 if (object != space_map_object(msp->ms_sm)) {
2394 object = space_map_object(msp->ms_sm);
2395 dmu_write(mos, vd->vdev_ms_array, sizeof (uint64_t) *
2396 msp->ms_id, sizeof (uint64_t), &object, tx);
2397 }
a1d477c2 2398 mutex_exit(&msp->ms_sync_lock);
34dc7c2f
BB
2399 dmu_tx_commit(tx);
2400}
2401
2402/*
2403 * Called after a transaction group has completely synced to mark
2404 * all of the metaslab's free space as usable.
2405 */
2406void
2407metaslab_sync_done(metaslab_t *msp, uint64_t txg)
2408{
34dc7c2f
BB
2409 metaslab_group_t *mg = msp->ms_group;
2410 vdev_t *vd = mg->mg_vd;
4e21fd06 2411 spa_t *spa = vd->vdev_spa;
93cf2076 2412 range_tree_t **defer_tree;
428870ff 2413 int64_t alloc_delta, defer_delta;
4e21fd06 2414 boolean_t defer_allowed = B_TRUE;
428870ff
BB
2415
2416 ASSERT(!vd->vdev_ishole);
34dc7c2f
BB
2417
2418 mutex_enter(&msp->ms_lock);
2419
2420 /*
2421 * If this metaslab is just becoming available, initialize its
258553d3 2422 * range trees and add its capacity to the vdev.
34dc7c2f 2423 */
d2734cce 2424 if (msp->ms_freed == NULL) {
1c27024e 2425 for (int t = 0; t < TXG_SIZE; t++) {
d2734cce 2426 ASSERT(msp->ms_allocating[t] == NULL);
93cf2076 2427
d2734cce 2428 msp->ms_allocating[t] = range_tree_create(NULL, NULL);
34dc7c2f 2429 }
428870ff 2430
d2734cce
SD
2431 ASSERT3P(msp->ms_freeing, ==, NULL);
2432 msp->ms_freeing = range_tree_create(NULL, NULL);
258553d3 2433
d2734cce
SD
2434 ASSERT3P(msp->ms_freed, ==, NULL);
2435 msp->ms_freed = range_tree_create(NULL, NULL);
258553d3 2436
1c27024e 2437 for (int t = 0; t < TXG_DEFER_SIZE; t++) {
d2734cce 2438 ASSERT(msp->ms_defer[t] == NULL);
e51be066 2439
d2734cce 2440 msp->ms_defer[t] = range_tree_create(NULL, NULL);
93cf2076 2441 }
428870ff 2442
d2734cce
SD
2443 ASSERT3P(msp->ms_checkpointing, ==, NULL);
2444 msp->ms_checkpointing = range_tree_create(NULL, NULL);
2445
93cf2076 2446 vdev_space_update(vd, 0, 0, msp->ms_size);
34dc7c2f 2447 }
d2734cce
SD
2448 ASSERT0(range_tree_space(msp->ms_freeing));
2449 ASSERT0(range_tree_space(msp->ms_checkpointing));
34dc7c2f 2450
d2734cce 2451 defer_tree = &msp->ms_defer[txg % TXG_DEFER_SIZE];
93cf2076 2452
1c27024e 2453 uint64_t free_space = metaslab_class_get_space(spa_normal_class(spa)) -
4e21fd06 2454 metaslab_class_get_alloc(spa_normal_class(spa));
a1d477c2 2455 if (free_space <= spa_get_slop_space(spa) || vd->vdev_removing) {
4e21fd06
DB
2456 defer_allowed = B_FALSE;
2457 }
2458
2459 defer_delta = 0;
93cf2076 2460 alloc_delta = space_map_alloc_delta(msp->ms_sm);
4e21fd06 2461 if (defer_allowed) {
d2734cce 2462 defer_delta = range_tree_space(msp->ms_freed) -
4e21fd06
DB
2463 range_tree_space(*defer_tree);
2464 } else {
2465 defer_delta -= range_tree_space(*defer_tree);
2466 }
428870ff
BB
2467
2468 vdev_space_update(vd, alloc_delta + defer_delta, defer_delta, 0);
34dc7c2f 2469
34dc7c2f 2470 /*
93cf2076 2471 * If there's a metaslab_load() in progress, wait for it to complete
34dc7c2f 2472 * so that we have a consistent view of the in-core space map.
34dc7c2f 2473 */
93cf2076 2474 metaslab_load_wait(msp);
c2e42f9d
GW
2475
2476 /*
93cf2076 2477 * Move the frees from the defer_tree back to the free
d2734cce
SD
2478 * range tree (if it's loaded). Swap the freed_tree and
2479 * the defer_tree -- this is safe to do because we've
2480 * just emptied out the defer_tree.
c2e42f9d 2481 */
93cf2076 2482 range_tree_vacate(*defer_tree,
d2734cce 2483 msp->ms_loaded ? range_tree_add : NULL, msp->ms_allocatable);
4e21fd06 2484 if (defer_allowed) {
d2734cce 2485 range_tree_swap(&msp->ms_freed, defer_tree);
4e21fd06 2486 } else {
d2734cce
SD
2487 range_tree_vacate(msp->ms_freed,
2488 msp->ms_loaded ? range_tree_add : NULL,
2489 msp->ms_allocatable);
4e21fd06 2490 }
93cf2076 2491 space_map_update(msp->ms_sm);
34dc7c2f 2492
428870ff
BB
2493 msp->ms_deferspace += defer_delta;
2494 ASSERT3S(msp->ms_deferspace, >=, 0);
93cf2076 2495 ASSERT3S(msp->ms_deferspace, <=, msp->ms_size);
428870ff
BB
2496 if (msp->ms_deferspace != 0) {
2497 /*
2498 * Keep syncing this metaslab until all deferred frees
2499 * are back in circulation.
2500 */
2501 vdev_dirty(vd, VDD_METASLAB, msp, txg + 1);
2502 }
2503
4e21fd06
DB
2504 /*
2505 * Calculate the new weights before unloading any metaslabs.
2506 * This will give us the most accurate weighting.
2507 */
2508 metaslab_group_sort(mg, msp, metaslab_weight(msp));
2509
2510 /*
2511 * If the metaslab is loaded and we've not tried to load or allocate
2512 * from it in 'metaslab_unload_delay' txgs, then unload it.
2513 */
2514 if (msp->ms_loaded &&
2515 msp->ms_selected_txg + metaslab_unload_delay < txg) {
2516
1c27024e 2517 for (int t = 1; t < TXG_CONCURRENT_STATES; t++) {
93cf2076 2518 VERIFY0(range_tree_space(
d2734cce 2519 msp->ms_allocating[(txg + t) & TXG_MASK]));
93cf2076 2520 }
34dc7c2f 2521
93cf2076
GW
2522 if (!metaslab_debug_unload)
2523 metaslab_unload(msp);
34dc7c2f
BB
2524 }
2525
d2734cce
SD
2526 ASSERT0(range_tree_space(msp->ms_allocating[txg & TXG_MASK]));
2527 ASSERT0(range_tree_space(msp->ms_freeing));
2528 ASSERT0(range_tree_space(msp->ms_freed));
2529 ASSERT0(range_tree_space(msp->ms_checkpointing));
a1d477c2 2530
34dc7c2f
BB
2531 mutex_exit(&msp->ms_lock);
2532}
2533
428870ff
BB
2534void
2535metaslab_sync_reassess(metaslab_group_t *mg)
2536{
a1d477c2
MA
2537 spa_t *spa = mg->mg_class->mc_spa;
2538
2539 spa_config_enter(spa, SCL_ALLOC, FTAG, RW_READER);
1be627f5 2540 metaslab_group_alloc_update(mg);
f3a7f661 2541 mg->mg_fragmentation = metaslab_group_fragmentation(mg);
6d974228 2542
428870ff 2543 /*
a1d477c2
MA
2544 * Preload the next potential metaslabs but only on active
2545 * metaslab groups. We can get into a state where the metaslab
2546 * is no longer active since we dirty metaslabs as we remove a
2547 * a device, thus potentially making the metaslab group eligible
2548 * for preloading.
428870ff 2549 */
a1d477c2
MA
2550 if (mg->mg_activation_count > 0) {
2551 metaslab_group_preload(mg);
2552 }
2553 spa_config_exit(spa, SCL_ALLOC, FTAG);
428870ff
BB
2554}
2555
34dc7c2f
BB
2556static uint64_t
2557metaslab_distance(metaslab_t *msp, dva_t *dva)
2558{
2559 uint64_t ms_shift = msp->ms_group->mg_vd->vdev_ms_shift;
2560 uint64_t offset = DVA_GET_OFFSET(dva) >> ms_shift;
93cf2076 2561 uint64_t start = msp->ms_id;
34dc7c2f
BB
2562
2563 if (msp->ms_group->mg_vd->vdev_id != DVA_GET_VDEV(dva))
2564 return (1ULL << 63);
2565
2566 if (offset < start)
2567 return ((start - offset) << ms_shift);
2568 if (offset > start)
2569 return ((offset - start) << ms_shift);
2570 return (0);
2571}
2572
4e21fd06
DB
2573/*
2574 * ==========================================================================
2575 * Metaslab allocation tracing facility
2576 * ==========================================================================
2577 */
2578#ifdef _METASLAB_TRACING
2579kstat_t *metaslab_trace_ksp;
2580kstat_named_t metaslab_trace_over_limit;
2581
2582void
2583metaslab_alloc_trace_init(void)
2584{
2585 ASSERT(metaslab_alloc_trace_cache == NULL);
2586 metaslab_alloc_trace_cache = kmem_cache_create(
2587 "metaslab_alloc_trace_cache", sizeof (metaslab_alloc_trace_t),
2588 0, NULL, NULL, NULL, NULL, NULL, 0);
2589 metaslab_trace_ksp = kstat_create("zfs", 0, "metaslab_trace_stats",
2590 "misc", KSTAT_TYPE_NAMED, 1, KSTAT_FLAG_VIRTUAL);
2591 if (metaslab_trace_ksp != NULL) {
2592 metaslab_trace_ksp->ks_data = &metaslab_trace_over_limit;
2593 kstat_named_init(&metaslab_trace_over_limit,
2594 "metaslab_trace_over_limit", KSTAT_DATA_UINT64);
2595 kstat_install(metaslab_trace_ksp);
2596 }
2597}
2598
2599void
2600metaslab_alloc_trace_fini(void)
2601{
2602 if (metaslab_trace_ksp != NULL) {
2603 kstat_delete(metaslab_trace_ksp);
2604 metaslab_trace_ksp = NULL;
2605 }
2606 kmem_cache_destroy(metaslab_alloc_trace_cache);
2607 metaslab_alloc_trace_cache = NULL;
2608}
2609
2610/*
2611 * Add an allocation trace element to the allocation tracing list.
2612 */
2613static void
2614metaslab_trace_add(zio_alloc_list_t *zal, metaslab_group_t *mg,
2615 metaslab_t *msp, uint64_t psize, uint32_t dva_id, uint64_t offset)
2616{
2617 metaslab_alloc_trace_t *mat;
2618
2619 if (!metaslab_trace_enabled)
2620 return;
2621
2622 /*
2623 * When the tracing list reaches its maximum we remove
2624 * the second element in the list before adding a new one.
2625 * By removing the second element we preserve the original
2626 * entry as a clue to what allocations steps have already been
2627 * performed.
2628 */
2629 if (zal->zal_size == metaslab_trace_max_entries) {
2630 metaslab_alloc_trace_t *mat_next;
2631#ifdef DEBUG
2632 panic("too many entries in allocation list");
2633#endif
2634 atomic_inc_64(&metaslab_trace_over_limit.value.ui64);
2635 zal->zal_size--;
2636 mat_next = list_next(&zal->zal_list, list_head(&zal->zal_list));
2637 list_remove(&zal->zal_list, mat_next);
2638 kmem_cache_free(metaslab_alloc_trace_cache, mat_next);
2639 }
2640
2641 mat = kmem_cache_alloc(metaslab_alloc_trace_cache, KM_SLEEP);
2642 list_link_init(&mat->mat_list_node);
2643 mat->mat_mg = mg;
2644 mat->mat_msp = msp;
2645 mat->mat_size = psize;
2646 mat->mat_dva_id = dva_id;
2647 mat->mat_offset = offset;
2648 mat->mat_weight = 0;
2649
2650 if (msp != NULL)
2651 mat->mat_weight = msp->ms_weight;
2652
2653 /*
2654 * The list is part of the zio so locking is not required. Only
2655 * a single thread will perform allocations for a given zio.
2656 */
2657 list_insert_tail(&zal->zal_list, mat);
2658 zal->zal_size++;
2659
2660 ASSERT3U(zal->zal_size, <=, metaslab_trace_max_entries);
2661}
2662
2663void
2664metaslab_trace_init(zio_alloc_list_t *zal)
2665{
2666 list_create(&zal->zal_list, sizeof (metaslab_alloc_trace_t),
2667 offsetof(metaslab_alloc_trace_t, mat_list_node));
2668 zal->zal_size = 0;
2669}
2670
2671void
2672metaslab_trace_fini(zio_alloc_list_t *zal)
2673{
2674 metaslab_alloc_trace_t *mat;
2675
2676 while ((mat = list_remove_head(&zal->zal_list)) != NULL)
2677 kmem_cache_free(metaslab_alloc_trace_cache, mat);
2678 list_destroy(&zal->zal_list);
2679 zal->zal_size = 0;
2680}
2681#else
2682
2683#define metaslab_trace_add(zal, mg, msp, psize, id, off)
2684
2685void
2686metaslab_alloc_trace_init(void)
2687{
2688}
2689
2690void
2691metaslab_alloc_trace_fini(void)
2692{
2693}
2694
2695void
2696metaslab_trace_init(zio_alloc_list_t *zal)
2697{
2698}
2699
2700void
2701metaslab_trace_fini(zio_alloc_list_t *zal)
2702{
2703}
2704
2705#endif /* _METASLAB_TRACING */
2706
3dfb57a3
DB
2707/*
2708 * ==========================================================================
2709 * Metaslab block operations
2710 * ==========================================================================
2711 */
2712
2713static void
2714metaslab_group_alloc_increment(spa_t *spa, uint64_t vdev, void *tag, int flags)
2715{
3dfb57a3
DB
2716 if (!(flags & METASLAB_ASYNC_ALLOC) ||
2717 flags & METASLAB_DONT_THROTTLE)
2718 return;
2719
1c27024e 2720 metaslab_group_t *mg = vdev_lookup_top(spa, vdev)->vdev_mg;
3dfb57a3
DB
2721 if (!mg->mg_class->mc_alloc_throttle_enabled)
2722 return;
2723
2724 (void) refcount_add(&mg->mg_alloc_queue_depth, tag);
2725}
2726
2727void
2728metaslab_group_alloc_decrement(spa_t *spa, uint64_t vdev, void *tag, int flags)
2729{
3dfb57a3
DB
2730 if (!(flags & METASLAB_ASYNC_ALLOC) ||
2731 flags & METASLAB_DONT_THROTTLE)
2732 return;
2733
1c27024e 2734 metaslab_group_t *mg = vdev_lookup_top(spa, vdev)->vdev_mg;
3dfb57a3
DB
2735 if (!mg->mg_class->mc_alloc_throttle_enabled)
2736 return;
2737
2738 (void) refcount_remove(&mg->mg_alloc_queue_depth, tag);
2739}
2740
2741void
2742metaslab_group_alloc_verify(spa_t *spa, const blkptr_t *bp, void *tag)
2743{
2744#ifdef ZFS_DEBUG
2745 const dva_t *dva = bp->blk_dva;
2746 int ndvas = BP_GET_NDVAS(bp);
3dfb57a3 2747
1c27024e 2748 for (int d = 0; d < ndvas; d++) {
3dfb57a3
DB
2749 uint64_t vdev = DVA_GET_VDEV(&dva[d]);
2750 metaslab_group_t *mg = vdev_lookup_top(spa, vdev)->vdev_mg;
2751 VERIFY(refcount_not_held(&mg->mg_alloc_queue_depth, tag));
2752 }
2753#endif
2754}
2755
34dc7c2f 2756static uint64_t
4e21fd06
DB
2757metaslab_block_alloc(metaslab_t *msp, uint64_t size, uint64_t txg)
2758{
2759 uint64_t start;
d2734cce 2760 range_tree_t *rt = msp->ms_allocatable;
4e21fd06
DB
2761 metaslab_class_t *mc = msp->ms_group->mg_class;
2762
2763 VERIFY(!msp->ms_condensing);
2764
2765 start = mc->mc_ops->msop_alloc(msp, size);
2766 if (start != -1ULL) {
2767 metaslab_group_t *mg = msp->ms_group;
2768 vdev_t *vd = mg->mg_vd;
2769
2770 VERIFY0(P2PHASE(start, 1ULL << vd->vdev_ashift));
2771 VERIFY0(P2PHASE(size, 1ULL << vd->vdev_ashift));
2772 VERIFY3U(range_tree_space(rt) - size, <=, msp->ms_size);
2773 range_tree_remove(rt, start, size);
2774
d2734cce 2775 if (range_tree_is_empty(msp->ms_allocating[txg & TXG_MASK]))
4e21fd06
DB
2776 vdev_dirty(mg->mg_vd, VDD_METASLAB, msp, txg);
2777
d2734cce 2778 range_tree_add(msp->ms_allocating[txg & TXG_MASK], start, size);
4e21fd06
DB
2779
2780 /* Track the last successful allocation */
2781 msp->ms_alloc_txg = txg;
2782 metaslab_verify_space(msp, txg);
2783 }
2784
2785 /*
2786 * Now that we've attempted the allocation we need to update the
2787 * metaslab's maximum block size since it may have changed.
2788 */
2789 msp->ms_max_size = metaslab_block_maxsize(msp);
2790 return (start);
2791}
2792
2793static uint64_t
2794metaslab_group_alloc_normal(metaslab_group_t *mg, zio_alloc_list_t *zal,
2795 uint64_t asize, uint64_t txg, uint64_t min_distance, dva_t *dva, int d)
34dc7c2f
BB
2796{
2797 metaslab_t *msp = NULL;
2798 uint64_t offset = -1ULL;
34dc7c2f
BB
2799 uint64_t activation_weight;
2800 uint64_t target_distance;
2801 int i;
2802
2803 activation_weight = METASLAB_WEIGHT_PRIMARY;
9babb374
BB
2804 for (i = 0; i < d; i++) {
2805 if (DVA_GET_VDEV(&dva[i]) == mg->mg_vd->vdev_id) {
34dc7c2f 2806 activation_weight = METASLAB_WEIGHT_SECONDARY;
9babb374
BB
2807 break;
2808 }
2809 }
34dc7c2f 2810
1c27024e 2811 metaslab_t *search = kmem_alloc(sizeof (*search), KM_SLEEP);
4e21fd06
DB
2812 search->ms_weight = UINT64_MAX;
2813 search->ms_start = 0;
34dc7c2f 2814 for (;;) {
9babb374 2815 boolean_t was_active;
4e21fd06
DB
2816 avl_tree_t *t = &mg->mg_metaslab_tree;
2817 avl_index_t idx;
9babb374 2818
34dc7c2f 2819 mutex_enter(&mg->mg_lock);
4e21fd06
DB
2820
2821 /*
2822 * Find the metaslab with the highest weight that is less
2823 * than what we've already tried. In the common case, this
2824 * means that we will examine each metaslab at most once.
2825 * Note that concurrent callers could reorder metaslabs
2826 * by activation/passivation once we have dropped the mg_lock.
2827 * If a metaslab is activated by another thread, and we fail
2828 * to allocate from the metaslab we have selected, we may
2829 * not try the newly-activated metaslab, and instead activate
2830 * another metaslab. This is not optimal, but generally
2831 * does not cause any problems (a possible exception being
2832 * if every metaslab is completely full except for the
2833 * the newly-activated metaslab which we fail to examine).
2834 */
2835 msp = avl_find(t, search, &idx);
2836 if (msp == NULL)
2837 msp = avl_nearest(t, idx, AVL_AFTER);
2838 for (; msp != NULL; msp = AVL_NEXT(t, msp)) {
2839
2840 if (!metaslab_should_allocate(msp, asize)) {
2841 metaslab_trace_add(zal, mg, msp, asize, d,
2842 TRACE_TOO_SMALL);
2843 continue;
34dc7c2f 2844 }
7a614407
GW
2845
2846 /*
2847 * If the selected metaslab is condensing, skip it.
2848 */
93cf2076 2849 if (msp->ms_condensing)
7a614407
GW
2850 continue;
2851
9babb374 2852 was_active = msp->ms_weight & METASLAB_ACTIVE_MASK;
34dc7c2f
BB
2853 if (activation_weight == METASLAB_WEIGHT_PRIMARY)
2854 break;
2855
2856 target_distance = min_distance +
93cf2076
GW
2857 (space_map_allocated(msp->ms_sm) != 0 ? 0 :
2858 min_distance >> 1);
34dc7c2f 2859
4e21fd06 2860 for (i = 0; i < d; i++) {
34dc7c2f
BB
2861 if (metaslab_distance(msp, &dva[i]) <
2862 target_distance)
2863 break;
4e21fd06 2864 }
34dc7c2f
BB
2865 if (i == d)
2866 break;
2867 }
2868 mutex_exit(&mg->mg_lock);
4e21fd06
DB
2869 if (msp == NULL) {
2870 kmem_free(search, sizeof (*search));
34dc7c2f 2871 return (-1ULL);
4e21fd06
DB
2872 }
2873 search->ms_weight = msp->ms_weight;
2874 search->ms_start = msp->ms_start + 1;
34dc7c2f 2875
ac72fac3
GW
2876 mutex_enter(&msp->ms_lock);
2877
34dc7c2f
BB
2878 /*
2879 * Ensure that the metaslab we have selected is still
2880 * capable of handling our request. It's possible that
2881 * another thread may have changed the weight while we
4e21fd06
DB
2882 * were blocked on the metaslab lock. We check the
2883 * active status first to see if we need to reselect
2884 * a new metaslab.
34dc7c2f 2885 */
4e21fd06 2886 if (was_active && !(msp->ms_weight & METASLAB_ACTIVE_MASK)) {
34dc7c2f
BB
2887 mutex_exit(&msp->ms_lock);
2888 continue;
2889 }
2890
2891 if ((msp->ms_weight & METASLAB_WEIGHT_SECONDARY) &&
2892 activation_weight == METASLAB_WEIGHT_PRIMARY) {
2893 metaslab_passivate(msp,
2894 msp->ms_weight & ~METASLAB_ACTIVE_MASK);
2895 mutex_exit(&msp->ms_lock);
2896 continue;
2897 }
2898
6d974228 2899 if (metaslab_activate(msp, activation_weight) != 0) {
34dc7c2f
BB
2900 mutex_exit(&msp->ms_lock);
2901 continue;
2902 }
4e21fd06
DB
2903 msp->ms_selected_txg = txg;
2904
2905 /*
2906 * Now that we have the lock, recheck to see if we should
2907 * continue to use this metaslab for this allocation. The
2908 * the metaslab is now loaded so metaslab_should_allocate() can
2909 * accurately determine if the allocation attempt should
2910 * proceed.
2911 */
2912 if (!metaslab_should_allocate(msp, asize)) {
2913 /* Passivate this metaslab and select a new one. */
2914 metaslab_trace_add(zal, mg, msp, asize, d,
2915 TRACE_TOO_SMALL);
2916 goto next;
2917 }
2918
34dc7c2f 2919
7a614407
GW
2920 /*
2921 * If this metaslab is currently condensing then pick again as
2922 * we can't manipulate this metaslab until it's committed
2923 * to disk.
2924 */
93cf2076 2925 if (msp->ms_condensing) {
4e21fd06
DB
2926 metaslab_trace_add(zal, mg, msp, asize, d,
2927 TRACE_CONDENSING);
7a614407
GW
2928 mutex_exit(&msp->ms_lock);
2929 continue;
2930 }
2931
4e21fd06
DB
2932 offset = metaslab_block_alloc(msp, asize, txg);
2933 metaslab_trace_add(zal, mg, msp, asize, d, offset);
2934
2935 if (offset != -1ULL) {
2936 /* Proactively passivate the metaslab, if needed */
2937 metaslab_segment_may_passivate(msp);
34dc7c2f 2938 break;
4e21fd06
DB
2939 }
2940next:
2941 ASSERT(msp->ms_loaded);
2942
2943 /*
2944 * We were unable to allocate from this metaslab so determine
2945 * a new weight for this metaslab. Now that we have loaded
2946 * the metaslab we can provide a better hint to the metaslab
2947 * selector.
2948 *
2949 * For space-based metaslabs, we use the maximum block size.
2950 * This information is only available when the metaslab
2951 * is loaded and is more accurate than the generic free
2952 * space weight that was calculated by metaslab_weight().
2953 * This information allows us to quickly compare the maximum
2954 * available allocation in the metaslab to the allocation
2955 * size being requested.
2956 *
2957 * For segment-based metaslabs, determine the new weight
2958 * based on the highest bucket in the range tree. We
2959 * explicitly use the loaded segment weight (i.e. the range
2960 * tree histogram) since it contains the space that is
2961 * currently available for allocation and is accurate
2962 * even within a sync pass.
2963 */
2964 if (WEIGHT_IS_SPACEBASED(msp->ms_weight)) {
2965 uint64_t weight = metaslab_block_maxsize(msp);
2966 WEIGHT_SET_SPACEBASED(weight);
2967 metaslab_passivate(msp, weight);
2968 } else {
2969 metaslab_passivate(msp,
2970 metaslab_weight_from_range_tree(msp));
2971 }
34dc7c2f 2972
4e21fd06
DB
2973 /*
2974 * We have just failed an allocation attempt, check
2975 * that metaslab_should_allocate() agrees. Otherwise,
2976 * we may end up in an infinite loop retrying the same
2977 * metaslab.
2978 */
2979 ASSERT(!metaslab_should_allocate(msp, asize));
34dc7c2f
BB
2980 mutex_exit(&msp->ms_lock);
2981 }
4e21fd06
DB
2982 mutex_exit(&msp->ms_lock);
2983 kmem_free(search, sizeof (*search));
2984 return (offset);
2985}
34dc7c2f 2986
4e21fd06
DB
2987static uint64_t
2988metaslab_group_alloc(metaslab_group_t *mg, zio_alloc_list_t *zal,
2989 uint64_t asize, uint64_t txg, uint64_t min_distance, dva_t *dva, int d)
2990{
2991 uint64_t offset;
2992 ASSERT(mg->mg_initialized);
34dc7c2f 2993
4e21fd06
DB
2994 offset = metaslab_group_alloc_normal(mg, zal, asize, txg,
2995 min_distance, dva, d);
34dc7c2f 2996
4e21fd06
DB
2997 mutex_enter(&mg->mg_lock);
2998 if (offset == -1ULL) {
2999 mg->mg_failed_allocations++;
3000 metaslab_trace_add(zal, mg, NULL, asize, d,
3001 TRACE_GROUP_FAILURE);
3002 if (asize == SPA_GANGBLOCKSIZE) {
3003 /*
3004 * This metaslab group was unable to allocate
3005 * the minimum gang block size so it must be out of
3006 * space. We must notify the allocation throttle
3007 * to start skipping allocation attempts to this
3008 * metaslab group until more space becomes available.
3009 * Note: this failure cannot be caused by the
3010 * allocation throttle since the allocation throttle
3011 * is only responsible for skipping devices and
3012 * not failing block allocations.
3013 */
3014 mg->mg_no_free_space = B_TRUE;
3015 }
3016 }
3017 mg->mg_allocations++;
3018 mutex_exit(&mg->mg_lock);
34dc7c2f
BB
3019 return (offset);
3020}
3021
4e21fd06
DB
3022/*
3023 * If we have to write a ditto block (i.e. more than one DVA for a given BP)
3024 * on the same vdev as an existing DVA of this BP, then try to allocate it
3025 * at least (vdev_asize / (2 ^ ditto_same_vdev_distance_shift)) away from the
3026 * existing DVAs.
3027 */
3028int ditto_same_vdev_distance_shift = 3;
3029
34dc7c2f
BB
3030/*
3031 * Allocate a block for the specified i/o.
3032 */
a1d477c2 3033int
34dc7c2f 3034metaslab_alloc_dva(spa_t *spa, metaslab_class_t *mc, uint64_t psize,
4e21fd06
DB
3035 dva_t *dva, int d, dva_t *hintdva, uint64_t txg, int flags,
3036 zio_alloc_list_t *zal)
34dc7c2f 3037{
920dd524 3038 metaslab_group_t *mg, *fast_mg, *rotor;
34dc7c2f 3039 vdev_t *vd;
4e21fd06 3040 boolean_t try_hard = B_FALSE;
34dc7c2f
BB
3041
3042 ASSERT(!DVA_IS_VALID(&dva[d]));
3043
3044 /*
3045 * For testing, make some blocks above a certain size be gang blocks.
3046 */
d830d479 3047 if (psize >= metaslab_force_ganging && (ddi_get_lbolt() & 3) == 0) {
4e21fd06 3048 metaslab_trace_add(zal, NULL, NULL, psize, d, TRACE_FORCE_GANG);
2e528b49 3049 return (SET_ERROR(ENOSPC));
4e21fd06 3050 }
34dc7c2f
BB
3051
3052 /*
3053 * Start at the rotor and loop through all mgs until we find something.
428870ff 3054 * Note that there's no locking on mc_rotor or mc_aliquot because
34dc7c2f
BB
3055 * nothing actually breaks if we miss a few updates -- we just won't
3056 * allocate quite as evenly. It all balances out over time.
3057 *
3058 * If we are doing ditto or log blocks, try to spread them across
3059 * consecutive vdevs. If we're forced to reuse a vdev before we've
3060 * allocated all of our ditto blocks, then try and spread them out on
3061 * that vdev as much as possible. If it turns out to not be possible,
3062 * gradually lower our standards until anything becomes acceptable.
3063 * Also, allocating on consecutive vdevs (as opposed to random vdevs)
3064 * gives us hope of containing our fault domains to something we're
3065 * able to reason about. Otherwise, any two top-level vdev failures
3066 * will guarantee the loss of data. With consecutive allocation,
3067 * only two adjacent top-level vdev failures will result in data loss.
3068 *
3069 * If we are doing gang blocks (hintdva is non-NULL), try to keep
3070 * ourselves on the same vdev as our gang block header. That
3071 * way, we can hope for locality in vdev_cache, plus it makes our
3072 * fault domains something tractable.
3073 */
3074 if (hintdva) {
3075 vd = vdev_lookup_top(spa, DVA_GET_VDEV(&hintdva[d]));
428870ff
BB
3076
3077 /*
3078 * It's possible the vdev we're using as the hint no
a1d477c2
MA
3079 * longer exists or its mg has been closed (e.g. by
3080 * device removal). Consult the rotor when
428870ff
BB
3081 * all else fails.
3082 */
a1d477c2 3083 if (vd != NULL && vd->vdev_mg != NULL) {
34dc7c2f 3084 mg = vd->vdev_mg;
428870ff
BB
3085
3086 if (flags & METASLAB_HINTBP_AVOID &&
3087 mg->mg_next != NULL)
3088 mg = mg->mg_next;
3089 } else {
3090 mg = mc->mc_rotor;
3091 }
34dc7c2f
BB
3092 } else if (d != 0) {
3093 vd = vdev_lookup_top(spa, DVA_GET_VDEV(&dva[d - 1]));
3094 mg = vd->vdev_mg->mg_next;
920dd524
ED
3095 } else if (flags & METASLAB_FASTWRITE) {
3096 mg = fast_mg = mc->mc_rotor;
3097
3098 do {
3099 if (fast_mg->mg_vd->vdev_pending_fastwrite <
3100 mg->mg_vd->vdev_pending_fastwrite)
3101 mg = fast_mg;
3102 } while ((fast_mg = fast_mg->mg_next) != mc->mc_rotor);
3103
34dc7c2f
BB
3104 } else {
3105 mg = mc->mc_rotor;
3106 }
3107
3108 /*
428870ff
BB
3109 * If the hint put us into the wrong metaslab class, or into a
3110 * metaslab group that has been passivated, just follow the rotor.
34dc7c2f 3111 */
428870ff 3112 if (mg->mg_class != mc || mg->mg_activation_count <= 0)
34dc7c2f
BB
3113 mg = mc->mc_rotor;
3114
3115 rotor = mg;
3116top:
34dc7c2f 3117 do {
4e21fd06 3118 boolean_t allocatable;
428870ff 3119
3dfb57a3 3120 ASSERT(mg->mg_activation_count == 1);
34dc7c2f 3121 vd = mg->mg_vd;
fb5f0bc8 3122
34dc7c2f 3123 /*
b128c09f 3124 * Don't allocate from faulted devices.
34dc7c2f 3125 */
4e21fd06 3126 if (try_hard) {
fb5f0bc8
BB
3127 spa_config_enter(spa, SCL_ZIO, FTAG, RW_READER);
3128 allocatable = vdev_allocatable(vd);
3129 spa_config_exit(spa, SCL_ZIO, FTAG);
3130 } else {
3131 allocatable = vdev_allocatable(vd);
3132 }
ac72fac3
GW
3133
3134 /*
3135 * Determine if the selected metaslab group is eligible
3dfb57a3
DB
3136 * for allocations. If we're ganging then don't allow
3137 * this metaslab group to skip allocations since that would
3138 * inadvertently return ENOSPC and suspend the pool
ac72fac3
GW
3139 * even though space is still available.
3140 */
4e21fd06 3141 if (allocatable && !GANG_ALLOCATION(flags) && !try_hard) {
3dfb57a3
DB
3142 allocatable = metaslab_group_allocatable(mg, rotor,
3143 psize);
3144 }
ac72fac3 3145
4e21fd06
DB
3146 if (!allocatable) {
3147 metaslab_trace_add(zal, mg, NULL, psize, d,
3148 TRACE_NOT_ALLOCATABLE);
34dc7c2f 3149 goto next;
4e21fd06 3150 }
fb5f0bc8 3151
3dfb57a3
DB
3152 ASSERT(mg->mg_initialized);
3153
34dc7c2f 3154 /*
4e21fd06
DB
3155 * Avoid writing single-copy data to a failing,
3156 * non-redundant vdev, unless we've already tried all
3157 * other vdevs.
34dc7c2f
BB
3158 */
3159 if ((vd->vdev_stat.vs_write_errors > 0 ||
3160 vd->vdev_state < VDEV_STATE_HEALTHY) &&
4e21fd06
DB
3161 d == 0 && !try_hard && vd->vdev_children == 0) {
3162 metaslab_trace_add(zal, mg, NULL, psize, d,
3163 TRACE_VDEV_ERROR);
34dc7c2f
BB
3164 goto next;
3165 }
3166
3167 ASSERT(mg->mg_class == mc);
3168
4e21fd06
DB
3169 /*
3170 * If we don't need to try hard, then require that the
3171 * block be 1/8th of the device away from any other DVAs
3172 * in this BP. If we are trying hard, allow any offset
3173 * to be used (distance=0).
3174 */
1c27024e 3175 uint64_t distance = 0;
4e21fd06
DB
3176 if (!try_hard) {
3177 distance = vd->vdev_asize >>
3178 ditto_same_vdev_distance_shift;
3179 if (distance <= (1ULL << vd->vdev_ms_shift))
3180 distance = 0;
3181 }
34dc7c2f 3182
1c27024e 3183 uint64_t asize = vdev_psize_to_asize(vd, psize);
34dc7c2f
BB
3184 ASSERT(P2PHASE(asize, 1ULL << vd->vdev_ashift) == 0);
3185
1c27024e
DB
3186 uint64_t offset = metaslab_group_alloc(mg, zal, asize, txg,
3187 distance, dva, d);
3dfb57a3 3188
34dc7c2f
BB
3189 if (offset != -1ULL) {
3190 /*
3191 * If we've just selected this metaslab group,
3192 * figure out whether the corresponding vdev is
3193 * over- or under-used relative to the pool,
3194 * and set an allocation bias to even it out.
bb3250d0
ED
3195 *
3196 * Bias is also used to compensate for unequally
3197 * sized vdevs so that space is allocated fairly.
34dc7c2f 3198 */
f3a7f661 3199 if (mc->mc_aliquot == 0 && metaslab_bias_enabled) {
34dc7c2f 3200 vdev_stat_t *vs = &vd->vdev_stat;
bb3250d0
ED
3201 int64_t vs_free = vs->vs_space - vs->vs_alloc;
3202 int64_t mc_free = mc->mc_space - mc->mc_alloc;
3203 int64_t ratio;
34dc7c2f
BB
3204
3205 /*
6d974228
GW
3206 * Calculate how much more or less we should
3207 * try to allocate from this device during
3208 * this iteration around the rotor.
6d974228 3209 *
bb3250d0
ED
3210 * This basically introduces a zero-centered
3211 * bias towards the devices with the most
3212 * free space, while compensating for vdev
3213 * size differences.
3214 *
3215 * Examples:
3216 * vdev V1 = 16M/128M
3217 * vdev V2 = 16M/128M
3218 * ratio(V1) = 100% ratio(V2) = 100%
3219 *
3220 * vdev V1 = 16M/128M
3221 * vdev V2 = 64M/128M
3222 * ratio(V1) = 127% ratio(V2) = 72%
6d974228 3223 *
bb3250d0
ED
3224 * vdev V1 = 16M/128M
3225 * vdev V2 = 64M/512M
3226 * ratio(V1) = 40% ratio(V2) = 160%
34dc7c2f 3227 */
bb3250d0
ED
3228 ratio = (vs_free * mc->mc_alloc_groups * 100) /
3229 (mc_free + 1);
3230 mg->mg_bias = ((ratio - 100) *
6d974228 3231 (int64_t)mg->mg_aliquot) / 100;
f3a7f661
GW
3232 } else if (!metaslab_bias_enabled) {
3233 mg->mg_bias = 0;
34dc7c2f
BB
3234 }
3235
920dd524
ED
3236 if ((flags & METASLAB_FASTWRITE) ||
3237 atomic_add_64_nv(&mc->mc_aliquot, asize) >=
34dc7c2f
BB
3238 mg->mg_aliquot + mg->mg_bias) {
3239 mc->mc_rotor = mg->mg_next;
428870ff 3240 mc->mc_aliquot = 0;
34dc7c2f
BB
3241 }
3242
3243 DVA_SET_VDEV(&dva[d], vd->vdev_id);
3244 DVA_SET_OFFSET(&dva[d], offset);
e3e7cf60
D
3245 DVA_SET_GANG(&dva[d],
3246 ((flags & METASLAB_GANG_HEADER) ? 1 : 0));
34dc7c2f
BB
3247 DVA_SET_ASIZE(&dva[d], asize);
3248
920dd524
ED
3249 if (flags & METASLAB_FASTWRITE) {
3250 atomic_add_64(&vd->vdev_pending_fastwrite,
3251 psize);
920dd524
ED
3252 }
3253
34dc7c2f
BB
3254 return (0);
3255 }
3256next:
3257 mc->mc_rotor = mg->mg_next;
428870ff 3258 mc->mc_aliquot = 0;
34dc7c2f
BB
3259 } while ((mg = mg->mg_next) != rotor);
3260
4e21fd06
DB
3261 /*
3262 * If we haven't tried hard, do so now.
3263 */
3264 if (!try_hard) {
3265 try_hard = B_TRUE;
fb5f0bc8
BB
3266 goto top;
3267 }
3268
34dc7c2f
BB
3269 bzero(&dva[d], sizeof (dva_t));
3270
4e21fd06 3271 metaslab_trace_add(zal, rotor, NULL, psize, d, TRACE_ENOSPC);
2e528b49 3272 return (SET_ERROR(ENOSPC));
34dc7c2f
BB
3273}
3274
a1d477c2
MA
3275void
3276metaslab_free_concrete(vdev_t *vd, uint64_t offset, uint64_t asize,
d2734cce 3277 boolean_t checkpoint)
a1d477c2
MA
3278{
3279 metaslab_t *msp;
d2734cce 3280 spa_t *spa = vd->vdev_spa;
a1d477c2 3281
a1d477c2
MA
3282 ASSERT(vdev_is_concrete(vd));
3283 ASSERT3U(spa_config_held(spa, SCL_ALL, RW_READER), !=, 0);
3284 ASSERT3U(offset >> vd->vdev_ms_shift, <, vd->vdev_ms_count);
3285
3286 msp = vd->vdev_ms[offset >> vd->vdev_ms_shift];
3287
3288 VERIFY(!msp->ms_condensing);
3289 VERIFY3U(offset, >=, msp->ms_start);
3290 VERIFY3U(offset + asize, <=, msp->ms_start + msp->ms_size);
3291 VERIFY0(P2PHASE(offset, 1ULL << vd->vdev_ashift));
3292 VERIFY0(P2PHASE(asize, 1ULL << vd->vdev_ashift));
3293
3294 metaslab_check_free_impl(vd, offset, asize);
d2734cce 3295
a1d477c2 3296 mutex_enter(&msp->ms_lock);
d2734cce
SD
3297 if (range_tree_is_empty(msp->ms_freeing) &&
3298 range_tree_is_empty(msp->ms_checkpointing)) {
3299 vdev_dirty(vd, VDD_METASLAB, msp, spa_syncing_txg(spa));
3300 }
3301
3302 if (checkpoint) {
3303 ASSERT(spa_has_checkpoint(spa));
3304 range_tree_add(msp->ms_checkpointing, offset, asize);
3305 } else {
3306 range_tree_add(msp->ms_freeing, offset, asize);
a1d477c2 3307 }
a1d477c2
MA
3308 mutex_exit(&msp->ms_lock);
3309}
3310
3311/* ARGSUSED */
3312void
3313metaslab_free_impl_cb(uint64_t inner_offset, vdev_t *vd, uint64_t offset,
3314 uint64_t size, void *arg)
3315{
d2734cce
SD
3316 boolean_t *checkpoint = arg;
3317
3318 ASSERT3P(checkpoint, !=, NULL);
a1d477c2
MA
3319
3320 if (vd->vdev_ops->vdev_op_remap != NULL)
d2734cce 3321 vdev_indirect_mark_obsolete(vd, offset, size);
a1d477c2 3322 else
d2734cce 3323 metaslab_free_impl(vd, offset, size, *checkpoint);
a1d477c2
MA
3324}
3325
3326static void
3327metaslab_free_impl(vdev_t *vd, uint64_t offset, uint64_t size,
d2734cce 3328 boolean_t checkpoint)
a1d477c2
MA
3329{
3330 spa_t *spa = vd->vdev_spa;
3331
3332 ASSERT3U(spa_config_held(spa, SCL_ALL, RW_READER), !=, 0);
3333
d2734cce 3334 if (spa_syncing_txg(spa) > spa_freeze_txg(spa))
a1d477c2
MA
3335 return;
3336
3337 if (spa->spa_vdev_removal != NULL &&
9e052db4 3338 spa->spa_vdev_removal->svr_vdev_id == vd->vdev_id &&
a1d477c2
MA
3339 vdev_is_concrete(vd)) {
3340 /*
3341 * Note: we check if the vdev is concrete because when
3342 * we complete the removal, we first change the vdev to be
3343 * an indirect vdev (in open context), and then (in syncing
3344 * context) clear spa_vdev_removal.
3345 */
d2734cce 3346 free_from_removing_vdev(vd, offset, size);
a1d477c2 3347 } else if (vd->vdev_ops->vdev_op_remap != NULL) {
d2734cce 3348 vdev_indirect_mark_obsolete(vd, offset, size);
a1d477c2 3349 vd->vdev_ops->vdev_op_remap(vd, offset, size,
d2734cce 3350 metaslab_free_impl_cb, &checkpoint);
a1d477c2 3351 } else {
d2734cce 3352 metaslab_free_concrete(vd, offset, size, checkpoint);
a1d477c2
MA
3353 }
3354}
3355
3356typedef struct remap_blkptr_cb_arg {
3357 blkptr_t *rbca_bp;
3358 spa_remap_cb_t rbca_cb;
3359 vdev_t *rbca_remap_vd;
3360 uint64_t rbca_remap_offset;
3361 void *rbca_cb_arg;
3362} remap_blkptr_cb_arg_t;
3363
3364void
3365remap_blkptr_cb(uint64_t inner_offset, vdev_t *vd, uint64_t offset,
3366 uint64_t size, void *arg)
3367{
3368 remap_blkptr_cb_arg_t *rbca = arg;
3369 blkptr_t *bp = rbca->rbca_bp;
3370
3371 /* We can not remap split blocks. */
3372 if (size != DVA_GET_ASIZE(&bp->blk_dva[0]))
3373 return;
3374 ASSERT0(inner_offset);
3375
3376 if (rbca->rbca_cb != NULL) {
3377 /*
3378 * At this point we know that we are not handling split
3379 * blocks and we invoke the callback on the previous
3380 * vdev which must be indirect.
3381 */
3382 ASSERT3P(rbca->rbca_remap_vd->vdev_ops, ==, &vdev_indirect_ops);
3383
3384 rbca->rbca_cb(rbca->rbca_remap_vd->vdev_id,
3385 rbca->rbca_remap_offset, size, rbca->rbca_cb_arg);
3386
3387 /* set up remap_blkptr_cb_arg for the next call */
3388 rbca->rbca_remap_vd = vd;
3389 rbca->rbca_remap_offset = offset;
3390 }
3391
3392 /*
3393 * The phys birth time is that of dva[0]. This ensures that we know
3394 * when each dva was written, so that resilver can determine which
3395 * blocks need to be scrubbed (i.e. those written during the time
3396 * the vdev was offline). It also ensures that the key used in
3397 * the ARC hash table is unique (i.e. dva[0] + phys_birth). If
3398 * we didn't change the phys_birth, a lookup in the ARC for a
3399 * remapped BP could find the data that was previously stored at
3400 * this vdev + offset.
3401 */
3402 vdev_t *oldvd = vdev_lookup_top(vd->vdev_spa,
3403 DVA_GET_VDEV(&bp->blk_dva[0]));
3404 vdev_indirect_births_t *vib = oldvd->vdev_indirect_births;
3405 bp->blk_phys_birth = vdev_indirect_births_physbirth(vib,
3406 DVA_GET_OFFSET(&bp->blk_dva[0]), DVA_GET_ASIZE(&bp->blk_dva[0]));
3407
3408 DVA_SET_VDEV(&bp->blk_dva[0], vd->vdev_id);
3409 DVA_SET_OFFSET(&bp->blk_dva[0], offset);
3410}
3411
34dc7c2f 3412/*
a1d477c2
MA
3413 * If the block pointer contains any indirect DVAs, modify them to refer to
3414 * concrete DVAs. Note that this will sometimes not be possible, leaving
3415 * the indirect DVA in place. This happens if the indirect DVA spans multiple
3416 * segments in the mapping (i.e. it is a "split block").
3417 *
3418 * If the BP was remapped, calls the callback on the original dva (note the
3419 * callback can be called multiple times if the original indirect DVA refers
3420 * to another indirect DVA, etc).
3421 *
3422 * Returns TRUE if the BP was remapped.
34dc7c2f 3423 */
a1d477c2
MA
3424boolean_t
3425spa_remap_blkptr(spa_t *spa, blkptr_t *bp, spa_remap_cb_t callback, void *arg)
34dc7c2f 3426{
a1d477c2
MA
3427 remap_blkptr_cb_arg_t rbca;
3428
3429 if (!zfs_remap_blkptr_enable)
3430 return (B_FALSE);
3431
3432 if (!spa_feature_is_enabled(spa, SPA_FEATURE_OBSOLETE_COUNTS))
3433 return (B_FALSE);
3434
3435 /*
3436 * Dedup BP's can not be remapped, because ddt_phys_select() depends
3437 * on DVA[0] being the same in the BP as in the DDT (dedup table).
3438 */
3439 if (BP_GET_DEDUP(bp))
3440 return (B_FALSE);
3441
3442 /*
3443 * Gang blocks can not be remapped, because
3444 * zio_checksum_gang_verifier() depends on the DVA[0] that's in
3445 * the BP used to read the gang block header (GBH) being the same
3446 * as the DVA[0] that we allocated for the GBH.
3447 */
3448 if (BP_IS_GANG(bp))
3449 return (B_FALSE);
3450
3451 /*
3452 * Embedded BP's have no DVA to remap.
3453 */
3454 if (BP_GET_NDVAS(bp) < 1)
3455 return (B_FALSE);
3456
3457 /*
3458 * Note: we only remap dva[0]. If we remapped other dvas, we
3459 * would no longer know what their phys birth txg is.
3460 */
3461 dva_t *dva = &bp->blk_dva[0];
3462
34dc7c2f
BB
3463 uint64_t offset = DVA_GET_OFFSET(dva);
3464 uint64_t size = DVA_GET_ASIZE(dva);
a1d477c2
MA
3465 vdev_t *vd = vdev_lookup_top(spa, DVA_GET_VDEV(dva));
3466
3467 if (vd->vdev_ops->vdev_op_remap == NULL)
3468 return (B_FALSE);
3469
3470 rbca.rbca_bp = bp;
3471 rbca.rbca_cb = callback;
3472 rbca.rbca_remap_vd = vd;
3473 rbca.rbca_remap_offset = offset;
3474 rbca.rbca_cb_arg = arg;
3475
3476 /*
3477 * remap_blkptr_cb() will be called in order for each level of
3478 * indirection, until a concrete vdev is reached or a split block is
3479 * encountered. old_vd and old_offset are updated within the callback
3480 * as we go from the one indirect vdev to the next one (either concrete
3481 * or indirect again) in that order.
3482 */
3483 vd->vdev_ops->vdev_op_remap(vd, offset, size, remap_blkptr_cb, &rbca);
3484
3485 /* Check if the DVA wasn't remapped because it is a split block */
3486 if (DVA_GET_VDEV(&rbca.rbca_bp->blk_dva[0]) == vd->vdev_id)
3487 return (B_FALSE);
3488
3489 return (B_TRUE);
3490}
3491
3492/*
3493 * Undo the allocation of a DVA which happened in the given transaction group.
3494 */
3495void
3496metaslab_unalloc_dva(spa_t *spa, const dva_t *dva, uint64_t txg)
3497{
34dc7c2f 3498 metaslab_t *msp;
a1d477c2
MA
3499 vdev_t *vd;
3500 uint64_t vdev = DVA_GET_VDEV(dva);
3501 uint64_t offset = DVA_GET_OFFSET(dva);
3502 uint64_t size = DVA_GET_ASIZE(dva);
3503
3504 ASSERT(DVA_IS_VALID(dva));
3505 ASSERT3U(spa_config_held(spa, SCL_ALL, RW_READER), !=, 0);
34dc7c2f 3506
34dc7c2f
BB
3507 if (txg > spa_freeze_txg(spa))
3508 return;
3509
7d2868d5 3510 if ((vd = vdev_lookup_top(spa, vdev)) == NULL || !DVA_IS_VALID(dva) ||
34dc7c2f 3511 (offset >> vd->vdev_ms_shift) >= vd->vdev_ms_count) {
7d2868d5
BB
3512 zfs_panic_recover("metaslab_free_dva(): bad DVA %llu:%llu:%llu",
3513 (u_longlong_t)vdev, (u_longlong_t)offset,
3514 (u_longlong_t)size);
34dc7c2f
BB
3515 return;
3516 }
3517
a1d477c2
MA
3518 ASSERT(!vd->vdev_removing);
3519 ASSERT(vdev_is_concrete(vd));
3520 ASSERT0(vd->vdev_indirect_config.vic_mapping_object);
3521 ASSERT3P(vd->vdev_indirect_mapping, ==, NULL);
34dc7c2f
BB
3522
3523 if (DVA_GET_GANG(dva))
3524 size = vdev_psize_to_asize(vd, SPA_GANGBLOCKSIZE);
3525
a1d477c2 3526 msp = vd->vdev_ms[offset >> vd->vdev_ms_shift];
93cf2076 3527
a1d477c2 3528 mutex_enter(&msp->ms_lock);
d2734cce 3529 range_tree_remove(msp->ms_allocating[txg & TXG_MASK],
a1d477c2 3530 offset, size);
34dc7c2f 3531
a1d477c2
MA
3532 VERIFY(!msp->ms_condensing);
3533 VERIFY3U(offset, >=, msp->ms_start);
3534 VERIFY3U(offset + size, <=, msp->ms_start + msp->ms_size);
d2734cce 3535 VERIFY3U(range_tree_space(msp->ms_allocatable) + size, <=,
a1d477c2
MA
3536 msp->ms_size);
3537 VERIFY0(P2PHASE(offset, 1ULL << vd->vdev_ashift));
3538 VERIFY0(P2PHASE(size, 1ULL << vd->vdev_ashift));
d2734cce 3539 range_tree_add(msp->ms_allocatable, offset, size);
34dc7c2f
BB
3540 mutex_exit(&msp->ms_lock);
3541}
3542
3543/*
d2734cce 3544 * Free the block represented by the given DVA.
34dc7c2f 3545 */
a1d477c2 3546void
d2734cce 3547metaslab_free_dva(spa_t *spa, const dva_t *dva, boolean_t checkpoint)
34dc7c2f
BB
3548{
3549 uint64_t vdev = DVA_GET_VDEV(dva);
3550 uint64_t offset = DVA_GET_OFFSET(dva);
3551 uint64_t size = DVA_GET_ASIZE(dva);
a1d477c2 3552 vdev_t *vd = vdev_lookup_top(spa, vdev);
34dc7c2f
BB
3553
3554 ASSERT(DVA_IS_VALID(dva));
a1d477c2 3555 ASSERT3U(spa_config_held(spa, SCL_ALL, RW_READER), !=, 0);
34dc7c2f 3556
a1d477c2 3557 if (DVA_GET_GANG(dva)) {
34dc7c2f 3558 size = vdev_psize_to_asize(vd, SPA_GANGBLOCKSIZE);
34dc7c2f
BB
3559 }
3560
d2734cce 3561 metaslab_free_impl(vd, offset, size, checkpoint);
34dc7c2f
BB
3562}
3563
3dfb57a3
DB
3564/*
3565 * Reserve some allocation slots. The reservation system must be called
3566 * before we call into the allocator. If there aren't any available slots
3567 * then the I/O will be throttled until an I/O completes and its slots are
3568 * freed up. The function returns true if it was successful in placing
3569 * the reservation.
3570 */
3571boolean_t
3572metaslab_class_throttle_reserve(metaslab_class_t *mc, int slots, zio_t *zio,
3573 int flags)
3574{
3575 uint64_t available_slots = 0;
3dfb57a3
DB
3576 boolean_t slot_reserved = B_FALSE;
3577
3578 ASSERT(mc->mc_alloc_throttle_enabled);
3579 mutex_enter(&mc->mc_lock);
3580
1c27024e 3581 uint64_t reserved_slots = refcount_count(&mc->mc_alloc_slots);
3dfb57a3
DB
3582 if (reserved_slots < mc->mc_alloc_max_slots)
3583 available_slots = mc->mc_alloc_max_slots - reserved_slots;
3584
3585 if (slots <= available_slots || GANG_ALLOCATION(flags)) {
3dfb57a3
DB
3586 /*
3587 * We reserve the slots individually so that we can unreserve
3588 * them individually when an I/O completes.
3589 */
1c27024e 3590 for (int d = 0; d < slots; d++) {
3dfb57a3
DB
3591 reserved_slots = refcount_add(&mc->mc_alloc_slots, zio);
3592 }
3593 zio->io_flags |= ZIO_FLAG_IO_ALLOCATING;
3594 slot_reserved = B_TRUE;
3595 }
3596
3597 mutex_exit(&mc->mc_lock);
3598 return (slot_reserved);
3599}
3600
3601void
3602metaslab_class_throttle_unreserve(metaslab_class_t *mc, int slots, zio_t *zio)
3603{
3dfb57a3
DB
3604 ASSERT(mc->mc_alloc_throttle_enabled);
3605 mutex_enter(&mc->mc_lock);
1c27024e 3606 for (int d = 0; d < slots; d++) {
3dfb57a3
DB
3607 (void) refcount_remove(&mc->mc_alloc_slots, zio);
3608 }
3609 mutex_exit(&mc->mc_lock);
3610}
3611
a1d477c2
MA
3612static int
3613metaslab_claim_concrete(vdev_t *vd, uint64_t offset, uint64_t size,
3614 uint64_t txg)
3615{
3616 metaslab_t *msp;
3617 spa_t *spa = vd->vdev_spa;
3618 int error = 0;
3619
3620 if (offset >> vd->vdev_ms_shift >= vd->vdev_ms_count)
3621 return (ENXIO);
3622
3623 ASSERT3P(vd->vdev_ms, !=, NULL);
3624 msp = vd->vdev_ms[offset >> vd->vdev_ms_shift];
3625
3626 mutex_enter(&msp->ms_lock);
3627
3628 if ((txg != 0 && spa_writeable(spa)) || !msp->ms_loaded)
3629 error = metaslab_activate(msp, METASLAB_WEIGHT_SECONDARY);
3630
d2734cce
SD
3631 if (error == 0 &&
3632 !range_tree_contains(msp->ms_allocatable, offset, size))
a1d477c2
MA
3633 error = SET_ERROR(ENOENT);
3634
3635 if (error || txg == 0) { /* txg == 0 indicates dry run */
3636 mutex_exit(&msp->ms_lock);
3637 return (error);
3638 }
3639
3640 VERIFY(!msp->ms_condensing);
3641 VERIFY0(P2PHASE(offset, 1ULL << vd->vdev_ashift));
3642 VERIFY0(P2PHASE(size, 1ULL << vd->vdev_ashift));
d2734cce
SD
3643 VERIFY3U(range_tree_space(msp->ms_allocatable) - size, <=,
3644 msp->ms_size);
3645 range_tree_remove(msp->ms_allocatable, offset, size);
a1d477c2
MA
3646
3647 if (spa_writeable(spa)) { /* don't dirty if we're zdb(1M) */
d2734cce 3648 if (range_tree_is_empty(msp->ms_allocating[txg & TXG_MASK]))
a1d477c2 3649 vdev_dirty(vd, VDD_METASLAB, msp, txg);
d2734cce
SD
3650 range_tree_add(msp->ms_allocating[txg & TXG_MASK],
3651 offset, size);
a1d477c2
MA
3652 }
3653
3654 mutex_exit(&msp->ms_lock);
3655
3656 return (0);
3657}
3658
3659typedef struct metaslab_claim_cb_arg_t {
3660 uint64_t mcca_txg;
3661 int mcca_error;
3662} metaslab_claim_cb_arg_t;
3663
3664/* ARGSUSED */
3665static void
3666metaslab_claim_impl_cb(uint64_t inner_offset, vdev_t *vd, uint64_t offset,
3667 uint64_t size, void *arg)
3668{
3669 metaslab_claim_cb_arg_t *mcca_arg = arg;
3670
3671 if (mcca_arg->mcca_error == 0) {
3672 mcca_arg->mcca_error = metaslab_claim_concrete(vd, offset,
3673 size, mcca_arg->mcca_txg);
3674 }
3675}
3676
3677int
3678metaslab_claim_impl(vdev_t *vd, uint64_t offset, uint64_t size, uint64_t txg)
3679{
3680 if (vd->vdev_ops->vdev_op_remap != NULL) {
3681 metaslab_claim_cb_arg_t arg;
3682
3683 /*
3684 * Only zdb(1M) can claim on indirect vdevs. This is used
3685 * to detect leaks of mapped space (that are not accounted
3686 * for in the obsolete counts, spacemap, or bpobj).
3687 */
3688 ASSERT(!spa_writeable(vd->vdev_spa));
3689 arg.mcca_error = 0;
3690 arg.mcca_txg = txg;
3691
3692 vd->vdev_ops->vdev_op_remap(vd, offset, size,
3693 metaslab_claim_impl_cb, &arg);
3694
3695 if (arg.mcca_error == 0) {
3696 arg.mcca_error = metaslab_claim_concrete(vd,
3697 offset, size, txg);
3698 }
3699 return (arg.mcca_error);
3700 } else {
3701 return (metaslab_claim_concrete(vd, offset, size, txg));
3702 }
3703}
3704
3705/*
3706 * Intent log support: upon opening the pool after a crash, notify the SPA
3707 * of blocks that the intent log has allocated for immediate write, but
3708 * which are still considered free by the SPA because the last transaction
3709 * group didn't commit yet.
3710 */
3711static int
3712metaslab_claim_dva(spa_t *spa, const dva_t *dva, uint64_t txg)
3713{
3714 uint64_t vdev = DVA_GET_VDEV(dva);
3715 uint64_t offset = DVA_GET_OFFSET(dva);
3716 uint64_t size = DVA_GET_ASIZE(dva);
3717 vdev_t *vd;
3718
3719 if ((vd = vdev_lookup_top(spa, vdev)) == NULL) {
3720 return (SET_ERROR(ENXIO));
3721 }
3722
3723 ASSERT(DVA_IS_VALID(dva));
3724
3725 if (DVA_GET_GANG(dva))
3726 size = vdev_psize_to_asize(vd, SPA_GANGBLOCKSIZE);
3727
3728 return (metaslab_claim_impl(vd, offset, size, txg));
3729}
3730
34dc7c2f
BB
3731int
3732metaslab_alloc(spa_t *spa, metaslab_class_t *mc, uint64_t psize, blkptr_t *bp,
4e21fd06
DB
3733 int ndvas, uint64_t txg, blkptr_t *hintbp, int flags,
3734 zio_alloc_list_t *zal, zio_t *zio)
34dc7c2f
BB
3735{
3736 dva_t *dva = bp->blk_dva;
3737 dva_t *hintdva = hintbp->blk_dva;
1c27024e 3738 int error = 0;
34dc7c2f 3739
b128c09f 3740 ASSERT(bp->blk_birth == 0);
428870ff 3741 ASSERT(BP_PHYSICAL_BIRTH(bp) == 0);
b128c09f
BB
3742
3743 spa_config_enter(spa, SCL_ALLOC, FTAG, RW_READER);
3744
3745 if (mc->mc_rotor == NULL) { /* no vdevs in this class */
3746 spa_config_exit(spa, SCL_ALLOC, FTAG);
2e528b49 3747 return (SET_ERROR(ENOSPC));
b128c09f 3748 }
34dc7c2f
BB
3749
3750 ASSERT(ndvas > 0 && ndvas <= spa_max_replication(spa));
3751 ASSERT(BP_GET_NDVAS(bp) == 0);
3752 ASSERT(hintbp == NULL || ndvas <= BP_GET_NDVAS(hintbp));
4e21fd06 3753 ASSERT3P(zal, !=, NULL);
34dc7c2f 3754
1c27024e 3755 for (int d = 0; d < ndvas; d++) {
34dc7c2f 3756 error = metaslab_alloc_dva(spa, mc, psize, dva, d, hintdva,
4e21fd06 3757 txg, flags, zal);
93cf2076 3758 if (error != 0) {
34dc7c2f 3759 for (d--; d >= 0; d--) {
a1d477c2 3760 metaslab_unalloc_dva(spa, &dva[d], txg);
3dfb57a3
DB
3761 metaslab_group_alloc_decrement(spa,
3762 DVA_GET_VDEV(&dva[d]), zio, flags);
34dc7c2f
BB
3763 bzero(&dva[d], sizeof (dva_t));
3764 }
b128c09f 3765 spa_config_exit(spa, SCL_ALLOC, FTAG);
34dc7c2f 3766 return (error);
3dfb57a3
DB
3767 } else {
3768 /*
3769 * Update the metaslab group's queue depth
3770 * based on the newly allocated dva.
3771 */
3772 metaslab_group_alloc_increment(spa,
3773 DVA_GET_VDEV(&dva[d]), zio, flags);
34dc7c2f 3774 }
3dfb57a3 3775
34dc7c2f
BB
3776 }
3777 ASSERT(error == 0);
3778 ASSERT(BP_GET_NDVAS(bp) == ndvas);
3779
b128c09f
BB
3780 spa_config_exit(spa, SCL_ALLOC, FTAG);
3781
efe7978d 3782 BP_SET_BIRTH(bp, txg, 0);
b128c09f 3783
34dc7c2f
BB
3784 return (0);
3785}
3786
3787void
3788metaslab_free(spa_t *spa, const blkptr_t *bp, uint64_t txg, boolean_t now)
3789{
3790 const dva_t *dva = bp->blk_dva;
1c27024e 3791 int ndvas = BP_GET_NDVAS(bp);
34dc7c2f
BB
3792
3793 ASSERT(!BP_IS_HOLE(bp));
428870ff 3794 ASSERT(!now || bp->blk_birth >= spa_syncing_txg(spa));
b128c09f 3795
d2734cce
SD
3796 /*
3797 * If we have a checkpoint for the pool we need to make sure that
3798 * the blocks that we free that are part of the checkpoint won't be
3799 * reused until the checkpoint is discarded or we revert to it.
3800 *
3801 * The checkpoint flag is passed down the metaslab_free code path
3802 * and is set whenever we want to add a block to the checkpoint's
3803 * accounting. That is, we "checkpoint" blocks that existed at the
3804 * time the checkpoint was created and are therefore referenced by
3805 * the checkpointed uberblock.
3806 *
3807 * Note that, we don't checkpoint any blocks if the current
3808 * syncing txg <= spa_checkpoint_txg. We want these frees to sync
3809 * normally as they will be referenced by the checkpointed uberblock.
3810 */
3811 boolean_t checkpoint = B_FALSE;
3812 if (bp->blk_birth <= spa->spa_checkpoint_txg &&
3813 spa_syncing_txg(spa) > spa->spa_checkpoint_txg) {
3814 /*
3815 * At this point, if the block is part of the checkpoint
3816 * there is no way it was created in the current txg.
3817 */
3818 ASSERT(!now);
3819 ASSERT3U(spa_syncing_txg(spa), ==, txg);
3820 checkpoint = B_TRUE;
3821 }
3822
b128c09f 3823 spa_config_enter(spa, SCL_FREE, FTAG, RW_READER);
34dc7c2f 3824
a1d477c2
MA
3825 for (int d = 0; d < ndvas; d++) {
3826 if (now) {
3827 metaslab_unalloc_dva(spa, &dva[d], txg);
3828 } else {
d2734cce
SD
3829 ASSERT3U(txg, ==, spa_syncing_txg(spa));
3830 metaslab_free_dva(spa, &dva[d], checkpoint);
a1d477c2
MA
3831 }
3832 }
b128c09f
BB
3833
3834 spa_config_exit(spa, SCL_FREE, FTAG);
34dc7c2f
BB
3835}
3836
3837int
3838metaslab_claim(spa_t *spa, const blkptr_t *bp, uint64_t txg)
3839{
3840 const dva_t *dva = bp->blk_dva;
3841 int ndvas = BP_GET_NDVAS(bp);
1c27024e 3842 int error = 0;
34dc7c2f
BB
3843
3844 ASSERT(!BP_IS_HOLE(bp));
3845
b128c09f
BB
3846 if (txg != 0) {
3847 /*
3848 * First do a dry run to make sure all DVAs are claimable,
3849 * so we don't have to unwind from partial failures below.
3850 */
3851 if ((error = metaslab_claim(spa, bp, 0)) != 0)
3852 return (error);
3853 }
3854
3855 spa_config_enter(spa, SCL_ALLOC, FTAG, RW_READER);
3856
1c27024e 3857 for (int d = 0; d < ndvas; d++)
34dc7c2f 3858 if ((error = metaslab_claim_dva(spa, &dva[d], txg)) != 0)
b128c09f
BB
3859 break;
3860
3861 spa_config_exit(spa, SCL_ALLOC, FTAG);
3862
3863 ASSERT(error == 0 || txg == 0);
34dc7c2f 3864
b128c09f 3865 return (error);
34dc7c2f 3866}
920dd524 3867
d1d7e268
MK
3868void
3869metaslab_fastwrite_mark(spa_t *spa, const blkptr_t *bp)
920dd524
ED
3870{
3871 const dva_t *dva = bp->blk_dva;
3872 int ndvas = BP_GET_NDVAS(bp);
3873 uint64_t psize = BP_GET_PSIZE(bp);
3874 int d;
3875 vdev_t *vd;
3876
3877 ASSERT(!BP_IS_HOLE(bp));
9b67f605 3878 ASSERT(!BP_IS_EMBEDDED(bp));
920dd524
ED
3879 ASSERT(psize > 0);
3880
3881 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
3882
3883 for (d = 0; d < ndvas; d++) {
3884 if ((vd = vdev_lookup_top(spa, DVA_GET_VDEV(&dva[d]))) == NULL)
3885 continue;
3886 atomic_add_64(&vd->vdev_pending_fastwrite, psize);
3887 }
3888
3889 spa_config_exit(spa, SCL_VDEV, FTAG);
3890}
3891
d1d7e268
MK
3892void
3893metaslab_fastwrite_unmark(spa_t *spa, const blkptr_t *bp)
920dd524
ED
3894{
3895 const dva_t *dva = bp->blk_dva;
3896 int ndvas = BP_GET_NDVAS(bp);
3897 uint64_t psize = BP_GET_PSIZE(bp);
3898 int d;
3899 vdev_t *vd;
3900
3901 ASSERT(!BP_IS_HOLE(bp));
9b67f605 3902 ASSERT(!BP_IS_EMBEDDED(bp));
920dd524
ED
3903 ASSERT(psize > 0);
3904
3905 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
3906
3907 for (d = 0; d < ndvas; d++) {
3908 if ((vd = vdev_lookup_top(spa, DVA_GET_VDEV(&dva[d]))) == NULL)
3909 continue;
3910 ASSERT3U(vd->vdev_pending_fastwrite, >=, psize);
3911 atomic_sub_64(&vd->vdev_pending_fastwrite, psize);
3912 }
3913
3914 spa_config_exit(spa, SCL_VDEV, FTAG);
3915}
30b92c1d 3916
a1d477c2
MA
3917/* ARGSUSED */
3918static void
3919metaslab_check_free_impl_cb(uint64_t inner, vdev_t *vd, uint64_t offset,
3920 uint64_t size, void *arg)
3921{
3922 if (vd->vdev_ops == &vdev_indirect_ops)
3923 return;
3924
3925 metaslab_check_free_impl(vd, offset, size);
3926}
3927
3928static void
3929metaslab_check_free_impl(vdev_t *vd, uint64_t offset, uint64_t size)
3930{
3931 metaslab_t *msp;
3932 ASSERTV(spa_t *spa = vd->vdev_spa);
3933
3934 if ((zfs_flags & ZFS_DEBUG_ZIO_FREE) == 0)
3935 return;
3936
3937 if (vd->vdev_ops->vdev_op_remap != NULL) {
3938 vd->vdev_ops->vdev_op_remap(vd, offset, size,
3939 metaslab_check_free_impl_cb, NULL);
3940 return;
3941 }
3942
3943 ASSERT(vdev_is_concrete(vd));
3944 ASSERT3U(offset >> vd->vdev_ms_shift, <, vd->vdev_ms_count);
3945 ASSERT3U(spa_config_held(spa, SCL_ALL, RW_READER), !=, 0);
3946
3947 msp = vd->vdev_ms[offset >> vd->vdev_ms_shift];
3948
3949 mutex_enter(&msp->ms_lock);
3950 if (msp->ms_loaded)
d2734cce 3951 range_tree_verify(msp->ms_allocatable, offset, size);
a1d477c2 3952
d2734cce
SD
3953 range_tree_verify(msp->ms_freeing, offset, size);
3954 range_tree_verify(msp->ms_checkpointing, offset, size);
3955 range_tree_verify(msp->ms_freed, offset, size);
a1d477c2 3956 for (int j = 0; j < TXG_DEFER_SIZE; j++)
d2734cce 3957 range_tree_verify(msp->ms_defer[j], offset, size);
a1d477c2
MA
3958 mutex_exit(&msp->ms_lock);
3959}
3960
13fe0198
MA
3961void
3962metaslab_check_free(spa_t *spa, const blkptr_t *bp)
3963{
13fe0198
MA
3964 if ((zfs_flags & ZFS_DEBUG_ZIO_FREE) == 0)
3965 return;
3966
3967 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
1c27024e 3968 for (int i = 0; i < BP_GET_NDVAS(bp); i++) {
93cf2076
GW
3969 uint64_t vdev = DVA_GET_VDEV(&bp->blk_dva[i]);
3970 vdev_t *vd = vdev_lookup_top(spa, vdev);
3971 uint64_t offset = DVA_GET_OFFSET(&bp->blk_dva[i]);
13fe0198 3972 uint64_t size = DVA_GET_ASIZE(&bp->blk_dva[i]);
13fe0198 3973
a1d477c2
MA
3974 if (DVA_GET_GANG(&bp->blk_dva[i]))
3975 size = vdev_psize_to_asize(vd, SPA_GANGBLOCKSIZE);
3976
3977 ASSERT3P(vd, !=, NULL);
13fe0198 3978
a1d477c2 3979 metaslab_check_free_impl(vd, offset, size);
13fe0198
MA
3980 }
3981 spa_config_exit(spa, SCL_VDEV, FTAG);
3982}
3983
93ce2b4c 3984#if defined(_KERNEL)
02730c33 3985/* CSTYLED */
99b14de4 3986module_param(metaslab_aliquot, ulong, 0644);
99b14de4
ED
3987MODULE_PARM_DESC(metaslab_aliquot,
3988 "allocation granularity (a.k.a. stripe size)");
02730c33
BB
3989
3990module_param(metaslab_debug_load, int, 0644);
93cf2076
GW
3991MODULE_PARM_DESC(metaslab_debug_load,
3992 "load all metaslabs when pool is first opened");
02730c33
BB
3993
3994module_param(metaslab_debug_unload, int, 0644);
1ce04573
BB
3995MODULE_PARM_DESC(metaslab_debug_unload,
3996 "prevent metaslabs from being unloaded");
02730c33
BB
3997
3998module_param(metaslab_preload_enabled, int, 0644);
f3a7f661
GW
3999MODULE_PARM_DESC(metaslab_preload_enabled,
4000 "preload potential metaslabs during reassessment");
f4a4046b 4001
02730c33 4002module_param(zfs_mg_noalloc_threshold, int, 0644);
f4a4046b
TC
4003MODULE_PARM_DESC(zfs_mg_noalloc_threshold,
4004 "percentage of free space for metaslab group to allow allocation");
02730c33
BB
4005
4006module_param(zfs_mg_fragmentation_threshold, int, 0644);
f3a7f661
GW
4007MODULE_PARM_DESC(zfs_mg_fragmentation_threshold,
4008 "fragmentation for metaslab group to allow allocation");
4009
02730c33 4010module_param(zfs_metaslab_fragmentation_threshold, int, 0644);
f3a7f661
GW
4011MODULE_PARM_DESC(zfs_metaslab_fragmentation_threshold,
4012 "fragmentation for metaslab to allow allocation");
02730c33
BB
4013
4014module_param(metaslab_fragmentation_factor_enabled, int, 0644);
f3a7f661
GW
4015MODULE_PARM_DESC(metaslab_fragmentation_factor_enabled,
4016 "use the fragmentation metric to prefer less fragmented metaslabs");
02730c33
BB
4017
4018module_param(metaslab_lba_weighting_enabled, int, 0644);
f3a7f661
GW
4019MODULE_PARM_DESC(metaslab_lba_weighting_enabled,
4020 "prefer metaslabs with lower LBAs");
02730c33
BB
4021
4022module_param(metaslab_bias_enabled, int, 0644);
f3a7f661
GW
4023MODULE_PARM_DESC(metaslab_bias_enabled,
4024 "enable metaslab group biasing");
4e21fd06
DB
4025
4026module_param(zfs_metaslab_segment_weight_enabled, int, 0644);
4027MODULE_PARM_DESC(zfs_metaslab_segment_weight_enabled,
4028 "enable segment-based metaslab selection");
4029
4030module_param(zfs_metaslab_switch_threshold, int, 0644);
4031MODULE_PARM_DESC(zfs_metaslab_switch_threshold,
4032 "segment-based metaslab selection maximum buckets before switching");
a1d477c2
MA
4033
4034/* CSTYLED */
d830d479
MA
4035module_param(metaslab_force_ganging, ulong, 0644);
4036MODULE_PARM_DESC(metaslab_force_ganging,
a1d477c2 4037 "blocks larger than this size are forced to be gang blocks");
93ce2b4c 4038#endif