]> git.proxmox.com Git - mirror_zfs.git/blame - module/zfs/metaslab.c
OpenZFS 7500 - Simplify dbuf_free_range by removing dn_unlisted_l0_blkid
[mirror_zfs.git] / module / zfs / metaslab.c
CommitLineData
34dc7c2f
BB
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
428870ff 22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
4e21fd06 23 * Copyright (c) 2011, 2016 by Delphix. All rights reserved.
2e528b49 24 * Copyright (c) 2013 by Saso Kiselkov. All rights reserved.
34dc7c2f
BB
25 */
26
34dc7c2f 27#include <sys/zfs_context.h>
34dc7c2f
BB
28#include <sys/dmu.h>
29#include <sys/dmu_tx.h>
30#include <sys/space_map.h>
31#include <sys/metaslab_impl.h>
32#include <sys/vdev_impl.h>
33#include <sys/zio.h>
93cf2076 34#include <sys/spa_impl.h>
f3a7f661 35#include <sys/zfeature.h>
34dc7c2f 36
d1d7e268 37#define WITH_DF_BLOCK_ALLOCATOR
6d974228 38
3dfb57a3
DB
39#define GANG_ALLOCATION(flags) \
40 ((flags) & (METASLAB_GANG_CHILD | METASLAB_GANG_HEADER))
22c81dd8 41
e8fe6684
ED
42/*
43 * Metaslab granularity, in bytes. This is roughly similar to what would be
44 * referred to as the "stripe size" in traditional RAID arrays. In normal
45 * operation, we will try to write this amount of data to a top-level vdev
46 * before moving on to the next one.
47 */
99b14de4 48unsigned long metaslab_aliquot = 512 << 10;
e8fe6684 49
34dc7c2f
BB
50uint64_t metaslab_gang_bang = SPA_MAXBLOCKSIZE + 1; /* force gang blocks */
51
e51be066
GW
52/*
53 * The in-core space map representation is more compact than its on-disk form.
54 * The zfs_condense_pct determines how much more compact the in-core
4e21fd06 55 * space map representation must be before we compact it on-disk.
e51be066
GW
56 * Values should be greater than or equal to 100.
57 */
58int zfs_condense_pct = 200;
59
b02fe35d
AR
60/*
61 * Condensing a metaslab is not guaranteed to actually reduce the amount of
62 * space used on disk. In particular, a space map uses data in increments of
96358617 63 * MAX(1 << ashift, space_map_blksz), so a metaslab might use the
b02fe35d
AR
64 * same number of blocks after condensing. Since the goal of condensing is to
65 * reduce the number of IOPs required to read the space map, we only want to
66 * condense when we can be sure we will reduce the number of blocks used by the
67 * space map. Unfortunately, we cannot precisely compute whether or not this is
68 * the case in metaslab_should_condense since we are holding ms_lock. Instead,
69 * we apply the following heuristic: do not condense a spacemap unless the
70 * uncondensed size consumes greater than zfs_metaslab_condense_block_threshold
71 * blocks.
72 */
73int zfs_metaslab_condense_block_threshold = 4;
74
ac72fac3
GW
75/*
76 * The zfs_mg_noalloc_threshold defines which metaslab groups should
77 * be eligible for allocation. The value is defined as a percentage of
f3a7f661 78 * free space. Metaslab groups that have more free space than
ac72fac3
GW
79 * zfs_mg_noalloc_threshold are always eligible for allocations. Once
80 * a metaslab group's free space is less than or equal to the
81 * zfs_mg_noalloc_threshold the allocator will avoid allocating to that
82 * group unless all groups in the pool have reached zfs_mg_noalloc_threshold.
83 * Once all groups in the pool reach zfs_mg_noalloc_threshold then all
84 * groups are allowed to accept allocations. Gang blocks are always
85 * eligible to allocate on any metaslab group. The default value of 0 means
86 * no metaslab group will be excluded based on this criterion.
87 */
88int zfs_mg_noalloc_threshold = 0;
6d974228 89
f3a7f661
GW
90/*
91 * Metaslab groups are considered eligible for allocations if their
92 * fragmenation metric (measured as a percentage) is less than or equal to
93 * zfs_mg_fragmentation_threshold. If a metaslab group exceeds this threshold
94 * then it will be skipped unless all metaslab groups within the metaslab
95 * class have also crossed this threshold.
96 */
97int zfs_mg_fragmentation_threshold = 85;
98
99/*
100 * Allow metaslabs to keep their active state as long as their fragmentation
101 * percentage is less than or equal to zfs_metaslab_fragmentation_threshold. An
102 * active metaslab that exceeds this threshold will no longer keep its active
103 * status allowing better metaslabs to be selected.
104 */
105int zfs_metaslab_fragmentation_threshold = 70;
106
428870ff 107/*
aa7d06a9 108 * When set will load all metaslabs when pool is first opened.
428870ff 109 */
aa7d06a9
GW
110int metaslab_debug_load = 0;
111
112/*
113 * When set will prevent metaslabs from being unloaded.
114 */
115int metaslab_debug_unload = 0;
428870ff 116
9babb374
BB
117/*
118 * Minimum size which forces the dynamic allocator to change
428870ff 119 * it's allocation strategy. Once the space map cannot satisfy
9babb374
BB
120 * an allocation of this size then it switches to using more
121 * aggressive strategy (i.e search by size rather than offset).
122 */
4e21fd06 123uint64_t metaslab_df_alloc_threshold = SPA_OLD_MAXBLOCKSIZE;
9babb374
BB
124
125/*
126 * The minimum free space, in percent, which must be available
127 * in a space map to continue allocations in a first-fit fashion.
4e21fd06 128 * Once the space map's free space drops below this level we dynamically
9babb374
BB
129 * switch to using best-fit allocations.
130 */
428870ff
BB
131int metaslab_df_free_pct = 4;
132
428870ff 133/*
93cf2076 134 * Percentage of all cpus that can be used by the metaslab taskq.
428870ff 135 */
93cf2076 136int metaslab_load_pct = 50;
428870ff
BB
137
138/*
93cf2076
GW
139 * Determines how many txgs a metaslab may remain loaded without having any
140 * allocations from it. As long as a metaslab continues to be used we will
141 * keep it loaded.
428870ff 142 */
93cf2076 143int metaslab_unload_delay = TXG_SIZE * 2;
9babb374 144
93cf2076
GW
145/*
146 * Max number of metaslabs per group to preload.
147 */
148int metaslab_preload_limit = SPA_DVAS_PER_BP;
149
150/*
151 * Enable/disable preloading of metaslab.
152 */
f3a7f661 153int metaslab_preload_enabled = B_TRUE;
93cf2076
GW
154
155/*
f3a7f661 156 * Enable/disable fragmentation weighting on metaslabs.
93cf2076 157 */
f3a7f661 158int metaslab_fragmentation_factor_enabled = B_TRUE;
93cf2076 159
f3a7f661
GW
160/*
161 * Enable/disable lba weighting (i.e. outer tracks are given preference).
162 */
163int metaslab_lba_weighting_enabled = B_TRUE;
164
165/*
166 * Enable/disable metaslab group biasing.
167 */
168int metaslab_bias_enabled = B_TRUE;
169
4e21fd06
DB
170
171/*
172 * Enable/disable segment-based metaslab selection.
173 */
174int zfs_metaslab_segment_weight_enabled = B_TRUE;
175
176/*
177 * When using segment-based metaslab selection, we will continue
178 * allocating from the active metaslab until we have exhausted
179 * zfs_metaslab_switch_threshold of its buckets.
180 */
181int zfs_metaslab_switch_threshold = 2;
182
183/*
184 * Internal switch to enable/disable the metaslab allocation tracing
185 * facility.
186 */
187#ifdef _METASLAB_TRACING
188boolean_t metaslab_trace_enabled = B_TRUE;
189#endif
190
191/*
192 * Maximum entries that the metaslab allocation tracing facility will keep
193 * in a given list when running in non-debug mode. We limit the number
194 * of entries in non-debug mode to prevent us from using up too much memory.
195 * The limit should be sufficiently large that we don't expect any allocation
196 * to every exceed this value. In debug mode, the system will panic if this
197 * limit is ever reached allowing for further investigation.
198 */
199#ifdef _METASLAB_TRACING
200uint64_t metaslab_trace_max_entries = 5000;
201#endif
202
203static uint64_t metaslab_weight(metaslab_t *);
204static void metaslab_set_fragmentation(metaslab_t *);
205
206#ifdef _METASLAB_TRACING
207kmem_cache_t *metaslab_alloc_trace_cache;
208#endif
93cf2076 209
34dc7c2f
BB
210/*
211 * ==========================================================================
212 * Metaslab classes
213 * ==========================================================================
214 */
215metaslab_class_t *
93cf2076 216metaslab_class_create(spa_t *spa, metaslab_ops_t *ops)
34dc7c2f
BB
217{
218 metaslab_class_t *mc;
219
79c76d5b 220 mc = kmem_zalloc(sizeof (metaslab_class_t), KM_SLEEP);
34dc7c2f 221
428870ff 222 mc->mc_spa = spa;
34dc7c2f 223 mc->mc_rotor = NULL;
9babb374 224 mc->mc_ops = ops;
3dfb57a3
DB
225 mutex_init(&mc->mc_lock, NULL, MUTEX_DEFAULT, NULL);
226 refcount_create_tracked(&mc->mc_alloc_slots);
34dc7c2f
BB
227
228 return (mc);
229}
230
231void
232metaslab_class_destroy(metaslab_class_t *mc)
233{
428870ff
BB
234 ASSERT(mc->mc_rotor == NULL);
235 ASSERT(mc->mc_alloc == 0);
236 ASSERT(mc->mc_deferred == 0);
237 ASSERT(mc->mc_space == 0);
238 ASSERT(mc->mc_dspace == 0);
34dc7c2f 239
3dfb57a3
DB
240 refcount_destroy(&mc->mc_alloc_slots);
241 mutex_destroy(&mc->mc_lock);
34dc7c2f
BB
242 kmem_free(mc, sizeof (metaslab_class_t));
243}
244
428870ff
BB
245int
246metaslab_class_validate(metaslab_class_t *mc)
34dc7c2f 247{
428870ff
BB
248 metaslab_group_t *mg;
249 vdev_t *vd;
34dc7c2f 250
428870ff
BB
251 /*
252 * Must hold one of the spa_config locks.
253 */
254 ASSERT(spa_config_held(mc->mc_spa, SCL_ALL, RW_READER) ||
255 spa_config_held(mc->mc_spa, SCL_ALL, RW_WRITER));
34dc7c2f 256
428870ff
BB
257 if ((mg = mc->mc_rotor) == NULL)
258 return (0);
259
260 do {
261 vd = mg->mg_vd;
262 ASSERT(vd->vdev_mg != NULL);
263 ASSERT3P(vd->vdev_top, ==, vd);
264 ASSERT3P(mg->mg_class, ==, mc);
265 ASSERT3P(vd->vdev_ops, !=, &vdev_hole_ops);
266 } while ((mg = mg->mg_next) != mc->mc_rotor);
267
268 return (0);
34dc7c2f
BB
269}
270
271void
428870ff
BB
272metaslab_class_space_update(metaslab_class_t *mc, int64_t alloc_delta,
273 int64_t defer_delta, int64_t space_delta, int64_t dspace_delta)
34dc7c2f 274{
428870ff
BB
275 atomic_add_64(&mc->mc_alloc, alloc_delta);
276 atomic_add_64(&mc->mc_deferred, defer_delta);
277 atomic_add_64(&mc->mc_space, space_delta);
278 atomic_add_64(&mc->mc_dspace, dspace_delta);
279}
34dc7c2f 280
428870ff
BB
281uint64_t
282metaslab_class_get_alloc(metaslab_class_t *mc)
283{
284 return (mc->mc_alloc);
285}
34dc7c2f 286
428870ff
BB
287uint64_t
288metaslab_class_get_deferred(metaslab_class_t *mc)
289{
290 return (mc->mc_deferred);
291}
34dc7c2f 292
428870ff
BB
293uint64_t
294metaslab_class_get_space(metaslab_class_t *mc)
295{
296 return (mc->mc_space);
297}
34dc7c2f 298
428870ff
BB
299uint64_t
300metaslab_class_get_dspace(metaslab_class_t *mc)
301{
302 return (spa_deflate(mc->mc_spa) ? mc->mc_dspace : mc->mc_space);
34dc7c2f
BB
303}
304
f3a7f661
GW
305void
306metaslab_class_histogram_verify(metaslab_class_t *mc)
307{
308 vdev_t *rvd = mc->mc_spa->spa_root_vdev;
309 uint64_t *mc_hist;
310 int i, c;
311
312 if ((zfs_flags & ZFS_DEBUG_HISTOGRAM_VERIFY) == 0)
313 return;
314
315 mc_hist = kmem_zalloc(sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE,
79c76d5b 316 KM_SLEEP);
f3a7f661
GW
317
318 for (c = 0; c < rvd->vdev_children; c++) {
319 vdev_t *tvd = rvd->vdev_child[c];
320 metaslab_group_t *mg = tvd->vdev_mg;
321
322 /*
323 * Skip any holes, uninitialized top-levels, or
324 * vdevs that are not in this metalab class.
325 */
326 if (tvd->vdev_ishole || tvd->vdev_ms_shift == 0 ||
327 mg->mg_class != mc) {
328 continue;
329 }
330
331 for (i = 0; i < RANGE_TREE_HISTOGRAM_SIZE; i++)
332 mc_hist[i] += mg->mg_histogram[i];
333 }
334
335 for (i = 0; i < RANGE_TREE_HISTOGRAM_SIZE; i++)
336 VERIFY3U(mc_hist[i], ==, mc->mc_histogram[i]);
337
338 kmem_free(mc_hist, sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE);
339}
340
341/*
342 * Calculate the metaslab class's fragmentation metric. The metric
343 * is weighted based on the space contribution of each metaslab group.
344 * The return value will be a number between 0 and 100 (inclusive), or
345 * ZFS_FRAG_INVALID if the metric has not been set. See comment above the
346 * zfs_frag_table for more information about the metric.
347 */
348uint64_t
349metaslab_class_fragmentation(metaslab_class_t *mc)
350{
351 vdev_t *rvd = mc->mc_spa->spa_root_vdev;
352 uint64_t fragmentation = 0;
353 int c;
354
355 spa_config_enter(mc->mc_spa, SCL_VDEV, FTAG, RW_READER);
356
357 for (c = 0; c < rvd->vdev_children; c++) {
358 vdev_t *tvd = rvd->vdev_child[c];
359 metaslab_group_t *mg = tvd->vdev_mg;
360
361 /*
362 * Skip any holes, uninitialized top-levels, or
363 * vdevs that are not in this metalab class.
364 */
365 if (tvd->vdev_ishole || tvd->vdev_ms_shift == 0 ||
366 mg->mg_class != mc) {
367 continue;
368 }
369
370 /*
371 * If a metaslab group does not contain a fragmentation
372 * metric then just bail out.
373 */
374 if (mg->mg_fragmentation == ZFS_FRAG_INVALID) {
375 spa_config_exit(mc->mc_spa, SCL_VDEV, FTAG);
376 return (ZFS_FRAG_INVALID);
377 }
378
379 /*
380 * Determine how much this metaslab_group is contributing
381 * to the overall pool fragmentation metric.
382 */
383 fragmentation += mg->mg_fragmentation *
384 metaslab_group_get_space(mg);
385 }
386 fragmentation /= metaslab_class_get_space(mc);
387
388 ASSERT3U(fragmentation, <=, 100);
389 spa_config_exit(mc->mc_spa, SCL_VDEV, FTAG);
390 return (fragmentation);
391}
392
393/*
394 * Calculate the amount of expandable space that is available in
395 * this metaslab class. If a device is expanded then its expandable
396 * space will be the amount of allocatable space that is currently not
397 * part of this metaslab class.
398 */
399uint64_t
400metaslab_class_expandable_space(metaslab_class_t *mc)
401{
402 vdev_t *rvd = mc->mc_spa->spa_root_vdev;
403 uint64_t space = 0;
404 int c;
405
406 spa_config_enter(mc->mc_spa, SCL_VDEV, FTAG, RW_READER);
407 for (c = 0; c < rvd->vdev_children; c++) {
408 vdev_t *tvd = rvd->vdev_child[c];
409 metaslab_group_t *mg = tvd->vdev_mg;
410
411 if (tvd->vdev_ishole || tvd->vdev_ms_shift == 0 ||
412 mg->mg_class != mc) {
413 continue;
414 }
415
416 space += tvd->vdev_max_asize - tvd->vdev_asize;
417 }
418 spa_config_exit(mc->mc_spa, SCL_VDEV, FTAG);
419 return (space);
420}
421
34dc7c2f
BB
422static int
423metaslab_compare(const void *x1, const void *x2)
424{
ee36c709
GN
425 const metaslab_t *m1 = (const metaslab_t *)x1;
426 const metaslab_t *m2 = (const metaslab_t *)x2;
34dc7c2f 427
ee36c709
GN
428 int cmp = AVL_CMP(m2->ms_weight, m1->ms_weight);
429 if (likely(cmp))
430 return (cmp);
34dc7c2f 431
ee36c709 432 IMPLY(AVL_CMP(m1->ms_start, m2->ms_start) == 0, m1 == m2);
34dc7c2f 433
ee36c709 434 return (AVL_CMP(m1->ms_start, m2->ms_start));
34dc7c2f
BB
435}
436
4e21fd06
DB
437/*
438 * Verify that the space accounting on disk matches the in-core range_trees.
439 */
440void
441metaslab_verify_space(metaslab_t *msp, uint64_t txg)
442{
443 spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
444 uint64_t allocated = 0;
445 uint64_t freed = 0;
446 uint64_t sm_free_space, msp_free_space;
447 int t;
448
449 ASSERT(MUTEX_HELD(&msp->ms_lock));
450
451 if ((zfs_flags & ZFS_DEBUG_METASLAB_VERIFY) == 0)
452 return;
453
454 /*
455 * We can only verify the metaslab space when we're called
456 * from syncing context with a loaded metaslab that has an allocated
457 * space map. Calling this in non-syncing context does not
458 * provide a consistent view of the metaslab since we're performing
459 * allocations in the future.
460 */
461 if (txg != spa_syncing_txg(spa) || msp->ms_sm == NULL ||
462 !msp->ms_loaded)
463 return;
464
465 sm_free_space = msp->ms_size - space_map_allocated(msp->ms_sm) -
466 space_map_alloc_delta(msp->ms_sm);
467
468 /*
469 * Account for future allocations since we would have already
470 * deducted that space from the ms_freetree.
471 */
472 for (t = 0; t < TXG_CONCURRENT_STATES; t++) {
473 allocated +=
474 range_tree_space(msp->ms_alloctree[(txg + t) & TXG_MASK]);
475 }
476 freed = range_tree_space(msp->ms_freetree[TXG_CLEAN(txg) & TXG_MASK]);
477
478 msp_free_space = range_tree_space(msp->ms_tree) + allocated +
479 msp->ms_deferspace + freed;
480
481 VERIFY3U(sm_free_space, ==, msp_free_space);
482}
483
484/*
485 * ==========================================================================
486 * Metaslab groups
487 * ==========================================================================
488 */
ac72fac3
GW
489/*
490 * Update the allocatable flag and the metaslab group's capacity.
491 * The allocatable flag is set to true if the capacity is below
3dfb57a3
DB
492 * the zfs_mg_noalloc_threshold or has a fragmentation value that is
493 * greater than zfs_mg_fragmentation_threshold. If a metaslab group
494 * transitions from allocatable to non-allocatable or vice versa then the
495 * metaslab group's class is updated to reflect the transition.
ac72fac3
GW
496 */
497static void
498metaslab_group_alloc_update(metaslab_group_t *mg)
499{
500 vdev_t *vd = mg->mg_vd;
501 metaslab_class_t *mc = mg->mg_class;
502 vdev_stat_t *vs = &vd->vdev_stat;
503 boolean_t was_allocatable;
3dfb57a3 504 boolean_t was_initialized;
ac72fac3
GW
505
506 ASSERT(vd == vd->vdev_top);
507
508 mutex_enter(&mg->mg_lock);
509 was_allocatable = mg->mg_allocatable;
3dfb57a3 510 was_initialized = mg->mg_initialized;
ac72fac3
GW
511
512 mg->mg_free_capacity = ((vs->vs_space - vs->vs_alloc) * 100) /
513 (vs->vs_space + 1);
514
3dfb57a3
DB
515 mutex_enter(&mc->mc_lock);
516
517 /*
518 * If the metaslab group was just added then it won't
519 * have any space until we finish syncing out this txg.
520 * At that point we will consider it initialized and available
521 * for allocations. We also don't consider non-activated
522 * metaslab groups (e.g. vdevs that are in the middle of being removed)
523 * to be initialized, because they can't be used for allocation.
524 */
525 mg->mg_initialized = metaslab_group_initialized(mg);
526 if (!was_initialized && mg->mg_initialized) {
527 mc->mc_groups++;
528 } else if (was_initialized && !mg->mg_initialized) {
529 ASSERT3U(mc->mc_groups, >, 0);
530 mc->mc_groups--;
531 }
532 if (mg->mg_initialized)
533 mg->mg_no_free_space = B_FALSE;
534
f3a7f661
GW
535 /*
536 * A metaslab group is considered allocatable if it has plenty
537 * of free space or is not heavily fragmented. We only take
538 * fragmentation into account if the metaslab group has a valid
539 * fragmentation metric (i.e. a value between 0 and 100).
540 */
3dfb57a3
DB
541 mg->mg_allocatable = (mg->mg_activation_count > 0 &&
542 mg->mg_free_capacity > zfs_mg_noalloc_threshold &&
f3a7f661
GW
543 (mg->mg_fragmentation == ZFS_FRAG_INVALID ||
544 mg->mg_fragmentation <= zfs_mg_fragmentation_threshold));
ac72fac3
GW
545
546 /*
547 * The mc_alloc_groups maintains a count of the number of
548 * groups in this metaslab class that are still above the
549 * zfs_mg_noalloc_threshold. This is used by the allocating
550 * threads to determine if they should avoid allocations to
551 * a given group. The allocator will avoid allocations to a group
552 * if that group has reached or is below the zfs_mg_noalloc_threshold
553 * and there are still other groups that are above the threshold.
554 * When a group transitions from allocatable to non-allocatable or
555 * vice versa we update the metaslab class to reflect that change.
556 * When the mc_alloc_groups value drops to 0 that means that all
557 * groups have reached the zfs_mg_noalloc_threshold making all groups
558 * eligible for allocations. This effectively means that all devices
559 * are balanced again.
560 */
561 if (was_allocatable && !mg->mg_allocatable)
562 mc->mc_alloc_groups--;
563 else if (!was_allocatable && mg->mg_allocatable)
564 mc->mc_alloc_groups++;
3dfb57a3 565 mutex_exit(&mc->mc_lock);
f3a7f661 566
ac72fac3
GW
567 mutex_exit(&mg->mg_lock);
568}
569
34dc7c2f
BB
570metaslab_group_t *
571metaslab_group_create(metaslab_class_t *mc, vdev_t *vd)
572{
573 metaslab_group_t *mg;
574
79c76d5b 575 mg = kmem_zalloc(sizeof (metaslab_group_t), KM_SLEEP);
34dc7c2f
BB
576 mutex_init(&mg->mg_lock, NULL, MUTEX_DEFAULT, NULL);
577 avl_create(&mg->mg_metaslab_tree, metaslab_compare,
578 sizeof (metaslab_t), offsetof(struct metaslab, ms_group_node));
34dc7c2f 579 mg->mg_vd = vd;
428870ff
BB
580 mg->mg_class = mc;
581 mg->mg_activation_count = 0;
3dfb57a3
DB
582 mg->mg_initialized = B_FALSE;
583 mg->mg_no_free_space = B_TRUE;
584 refcount_create_tracked(&mg->mg_alloc_queue_depth);
34dc7c2f 585
3c51c5cb 586 mg->mg_taskq = taskq_create("metaslab_group_taskq", metaslab_load_pct,
1229323d 587 maxclsyspri, 10, INT_MAX, TASKQ_THREADS_CPU_PCT | TASKQ_DYNAMIC);
93cf2076 588
34dc7c2f
BB
589 return (mg);
590}
591
592void
593metaslab_group_destroy(metaslab_group_t *mg)
594{
428870ff
BB
595 ASSERT(mg->mg_prev == NULL);
596 ASSERT(mg->mg_next == NULL);
597 /*
598 * We may have gone below zero with the activation count
599 * either because we never activated in the first place or
600 * because we're done, and possibly removing the vdev.
601 */
602 ASSERT(mg->mg_activation_count <= 0);
603
3c51c5cb 604 taskq_destroy(mg->mg_taskq);
34dc7c2f
BB
605 avl_destroy(&mg->mg_metaslab_tree);
606 mutex_destroy(&mg->mg_lock);
3dfb57a3 607 refcount_destroy(&mg->mg_alloc_queue_depth);
34dc7c2f
BB
608 kmem_free(mg, sizeof (metaslab_group_t));
609}
610
428870ff
BB
611void
612metaslab_group_activate(metaslab_group_t *mg)
613{
614 metaslab_class_t *mc = mg->mg_class;
615 metaslab_group_t *mgprev, *mgnext;
616
617 ASSERT(spa_config_held(mc->mc_spa, SCL_ALLOC, RW_WRITER));
618
619 ASSERT(mc->mc_rotor != mg);
620 ASSERT(mg->mg_prev == NULL);
621 ASSERT(mg->mg_next == NULL);
622 ASSERT(mg->mg_activation_count <= 0);
623
624 if (++mg->mg_activation_count <= 0)
625 return;
626
627 mg->mg_aliquot = metaslab_aliquot * MAX(1, mg->mg_vd->vdev_children);
ac72fac3 628 metaslab_group_alloc_update(mg);
428870ff
BB
629
630 if ((mgprev = mc->mc_rotor) == NULL) {
631 mg->mg_prev = mg;
632 mg->mg_next = mg;
633 } else {
634 mgnext = mgprev->mg_next;
635 mg->mg_prev = mgprev;
636 mg->mg_next = mgnext;
637 mgprev->mg_next = mg;
638 mgnext->mg_prev = mg;
639 }
640 mc->mc_rotor = mg;
641}
642
643void
644metaslab_group_passivate(metaslab_group_t *mg)
645{
646 metaslab_class_t *mc = mg->mg_class;
647 metaslab_group_t *mgprev, *mgnext;
648
649 ASSERT(spa_config_held(mc->mc_spa, SCL_ALLOC, RW_WRITER));
650
651 if (--mg->mg_activation_count != 0) {
652 ASSERT(mc->mc_rotor != mg);
653 ASSERT(mg->mg_prev == NULL);
654 ASSERT(mg->mg_next == NULL);
655 ASSERT(mg->mg_activation_count < 0);
656 return;
657 }
658
c5528b9b 659 taskq_wait_outstanding(mg->mg_taskq, 0);
f3a7f661 660 metaslab_group_alloc_update(mg);
93cf2076 661
428870ff
BB
662 mgprev = mg->mg_prev;
663 mgnext = mg->mg_next;
664
665 if (mg == mgnext) {
666 mc->mc_rotor = NULL;
667 } else {
668 mc->mc_rotor = mgnext;
669 mgprev->mg_next = mgnext;
670 mgnext->mg_prev = mgprev;
671 }
672
673 mg->mg_prev = NULL;
674 mg->mg_next = NULL;
675}
676
3dfb57a3
DB
677boolean_t
678metaslab_group_initialized(metaslab_group_t *mg)
679{
680 vdev_t *vd = mg->mg_vd;
681 vdev_stat_t *vs = &vd->vdev_stat;
682
683 return (vs->vs_space != 0 && mg->mg_activation_count > 0);
684}
685
f3a7f661
GW
686uint64_t
687metaslab_group_get_space(metaslab_group_t *mg)
688{
689 return ((1ULL << mg->mg_vd->vdev_ms_shift) * mg->mg_vd->vdev_ms_count);
690}
691
692void
693metaslab_group_histogram_verify(metaslab_group_t *mg)
694{
695 uint64_t *mg_hist;
696 vdev_t *vd = mg->mg_vd;
697 uint64_t ashift = vd->vdev_ashift;
698 int i, m;
699
700 if ((zfs_flags & ZFS_DEBUG_HISTOGRAM_VERIFY) == 0)
701 return;
702
703 mg_hist = kmem_zalloc(sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE,
79c76d5b 704 KM_SLEEP);
f3a7f661
GW
705
706 ASSERT3U(RANGE_TREE_HISTOGRAM_SIZE, >=,
707 SPACE_MAP_HISTOGRAM_SIZE + ashift);
708
709 for (m = 0; m < vd->vdev_ms_count; m++) {
710 metaslab_t *msp = vd->vdev_ms[m];
711
712 if (msp->ms_sm == NULL)
713 continue;
714
715 for (i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++)
716 mg_hist[i + ashift] +=
717 msp->ms_sm->sm_phys->smp_histogram[i];
718 }
719
720 for (i = 0; i < RANGE_TREE_HISTOGRAM_SIZE; i ++)
721 VERIFY3U(mg_hist[i], ==, mg->mg_histogram[i]);
722
723 kmem_free(mg_hist, sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE);
724}
725
34dc7c2f 726static void
f3a7f661 727metaslab_group_histogram_add(metaslab_group_t *mg, metaslab_t *msp)
34dc7c2f 728{
f3a7f661
GW
729 metaslab_class_t *mc = mg->mg_class;
730 uint64_t ashift = mg->mg_vd->vdev_ashift;
731 int i;
732
733 ASSERT(MUTEX_HELD(&msp->ms_lock));
734 if (msp->ms_sm == NULL)
735 return;
736
34dc7c2f 737 mutex_enter(&mg->mg_lock);
f3a7f661
GW
738 for (i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) {
739 mg->mg_histogram[i + ashift] +=
740 msp->ms_sm->sm_phys->smp_histogram[i];
741 mc->mc_histogram[i + ashift] +=
742 msp->ms_sm->sm_phys->smp_histogram[i];
743 }
744 mutex_exit(&mg->mg_lock);
745}
746
747void
748metaslab_group_histogram_remove(metaslab_group_t *mg, metaslab_t *msp)
749{
750 metaslab_class_t *mc = mg->mg_class;
751 uint64_t ashift = mg->mg_vd->vdev_ashift;
752 int i;
753
754 ASSERT(MUTEX_HELD(&msp->ms_lock));
755 if (msp->ms_sm == NULL)
756 return;
757
758 mutex_enter(&mg->mg_lock);
759 for (i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) {
760 ASSERT3U(mg->mg_histogram[i + ashift], >=,
761 msp->ms_sm->sm_phys->smp_histogram[i]);
762 ASSERT3U(mc->mc_histogram[i + ashift], >=,
763 msp->ms_sm->sm_phys->smp_histogram[i]);
764
765 mg->mg_histogram[i + ashift] -=
766 msp->ms_sm->sm_phys->smp_histogram[i];
767 mc->mc_histogram[i + ashift] -=
768 msp->ms_sm->sm_phys->smp_histogram[i];
769 }
770 mutex_exit(&mg->mg_lock);
771}
772
773static void
774metaslab_group_add(metaslab_group_t *mg, metaslab_t *msp)
775{
34dc7c2f 776 ASSERT(msp->ms_group == NULL);
f3a7f661 777 mutex_enter(&mg->mg_lock);
34dc7c2f
BB
778 msp->ms_group = mg;
779 msp->ms_weight = 0;
780 avl_add(&mg->mg_metaslab_tree, msp);
781 mutex_exit(&mg->mg_lock);
f3a7f661
GW
782
783 mutex_enter(&msp->ms_lock);
784 metaslab_group_histogram_add(mg, msp);
785 mutex_exit(&msp->ms_lock);
34dc7c2f
BB
786}
787
788static void
789metaslab_group_remove(metaslab_group_t *mg, metaslab_t *msp)
790{
f3a7f661
GW
791 mutex_enter(&msp->ms_lock);
792 metaslab_group_histogram_remove(mg, msp);
793 mutex_exit(&msp->ms_lock);
794
34dc7c2f
BB
795 mutex_enter(&mg->mg_lock);
796 ASSERT(msp->ms_group == mg);
797 avl_remove(&mg->mg_metaslab_tree, msp);
798 msp->ms_group = NULL;
799 mutex_exit(&mg->mg_lock);
800}
801
802static void
803metaslab_group_sort(metaslab_group_t *mg, metaslab_t *msp, uint64_t weight)
804{
805 /*
806 * Although in principle the weight can be any value, in
f3a7f661 807 * practice we do not use values in the range [1, 511].
34dc7c2f 808 */
f3a7f661 809 ASSERT(weight >= SPA_MINBLOCKSIZE || weight == 0);
34dc7c2f
BB
810 ASSERT(MUTEX_HELD(&msp->ms_lock));
811
812 mutex_enter(&mg->mg_lock);
813 ASSERT(msp->ms_group == mg);
814 avl_remove(&mg->mg_metaslab_tree, msp);
815 msp->ms_weight = weight;
816 avl_add(&mg->mg_metaslab_tree, msp);
817 mutex_exit(&mg->mg_lock);
818}
819
f3a7f661
GW
820/*
821 * Calculate the fragmentation for a given metaslab group. We can use
822 * a simple average here since all metaslabs within the group must have
823 * the same size. The return value will be a value between 0 and 100
824 * (inclusive), or ZFS_FRAG_INVALID if less than half of the metaslab in this
825 * group have a fragmentation metric.
826 */
827uint64_t
828metaslab_group_fragmentation(metaslab_group_t *mg)
829{
830 vdev_t *vd = mg->mg_vd;
831 uint64_t fragmentation = 0;
832 uint64_t valid_ms = 0;
833 int m;
834
835 for (m = 0; m < vd->vdev_ms_count; m++) {
836 metaslab_t *msp = vd->vdev_ms[m];
837
838 if (msp->ms_fragmentation == ZFS_FRAG_INVALID)
839 continue;
840
841 valid_ms++;
842 fragmentation += msp->ms_fragmentation;
843 }
844
845 if (valid_ms <= vd->vdev_ms_count / 2)
846 return (ZFS_FRAG_INVALID);
847
848 fragmentation /= valid_ms;
849 ASSERT3U(fragmentation, <=, 100);
850 return (fragmentation);
851}
852
ac72fac3
GW
853/*
854 * Determine if a given metaslab group should skip allocations. A metaslab
f3a7f661
GW
855 * group should avoid allocations if its free capacity is less than the
856 * zfs_mg_noalloc_threshold or its fragmentation metric is greater than
857 * zfs_mg_fragmentation_threshold and there is at least one metaslab group
3dfb57a3
DB
858 * that can still handle allocations. If the allocation throttle is enabled
859 * then we skip allocations to devices that have reached their maximum
860 * allocation queue depth unless the selected metaslab group is the only
861 * eligible group remaining.
ac72fac3
GW
862 */
863static boolean_t
3dfb57a3
DB
864metaslab_group_allocatable(metaslab_group_t *mg, metaslab_group_t *rotor,
865 uint64_t psize)
ac72fac3 866{
3dfb57a3 867 spa_t *spa = mg->mg_vd->vdev_spa;
ac72fac3
GW
868 metaslab_class_t *mc = mg->mg_class;
869
870 /*
3dfb57a3
DB
871 * We can only consider skipping this metaslab group if it's
872 * in the normal metaslab class and there are other metaslab
873 * groups to select from. Otherwise, we always consider it eligible
f3a7f661 874 * for allocations.
ac72fac3 875 */
3dfb57a3
DB
876 if (mc != spa_normal_class(spa) || mc->mc_groups <= 1)
877 return (B_TRUE);
878
879 /*
880 * If the metaslab group's mg_allocatable flag is set (see comments
881 * in metaslab_group_alloc_update() for more information) and
882 * the allocation throttle is disabled then allow allocations to this
883 * device. However, if the allocation throttle is enabled then
884 * check if we have reached our allocation limit (mg_alloc_queue_depth)
885 * to determine if we should allow allocations to this metaslab group.
886 * If all metaslab groups are no longer considered allocatable
887 * (mc_alloc_groups == 0) or we're trying to allocate the smallest
888 * gang block size then we allow allocations on this metaslab group
889 * regardless of the mg_allocatable or throttle settings.
890 */
891 if (mg->mg_allocatable) {
892 metaslab_group_t *mgp;
893 int64_t qdepth;
894 uint64_t qmax = mg->mg_max_alloc_queue_depth;
895
896 if (!mc->mc_alloc_throttle_enabled)
897 return (B_TRUE);
898
899 /*
900 * If this metaslab group does not have any free space, then
901 * there is no point in looking further.
902 */
903 if (mg->mg_no_free_space)
904 return (B_FALSE);
905
906 qdepth = refcount_count(&mg->mg_alloc_queue_depth);
907
908 /*
909 * If this metaslab group is below its qmax or it's
910 * the only allocatable metasable group, then attempt
911 * to allocate from it.
912 */
913 if (qdepth < qmax || mc->mc_alloc_groups == 1)
914 return (B_TRUE);
915 ASSERT3U(mc->mc_alloc_groups, >, 1);
916
917 /*
918 * Since this metaslab group is at or over its qmax, we
919 * need to determine if there are metaslab groups after this
920 * one that might be able to handle this allocation. This is
921 * racy since we can't hold the locks for all metaslab
922 * groups at the same time when we make this check.
923 */
924 for (mgp = mg->mg_next; mgp != rotor; mgp = mgp->mg_next) {
925 qmax = mgp->mg_max_alloc_queue_depth;
926
927 qdepth = refcount_count(&mgp->mg_alloc_queue_depth);
928
929 /*
930 * If there is another metaslab group that
931 * might be able to handle the allocation, then
932 * we return false so that we skip this group.
933 */
934 if (qdepth < qmax && !mgp->mg_no_free_space)
935 return (B_FALSE);
936 }
937
938 /*
939 * We didn't find another group to handle the allocation
940 * so we can't skip this metaslab group even though
941 * we are at or over our qmax.
942 */
943 return (B_TRUE);
944
945 } else if (mc->mc_alloc_groups == 0 || psize == SPA_MINBLOCKSIZE) {
946 return (B_TRUE);
947 }
948 return (B_FALSE);
ac72fac3
GW
949}
950
428870ff
BB
951/*
952 * ==========================================================================
93cf2076 953 * Range tree callbacks
428870ff
BB
954 * ==========================================================================
955 */
93cf2076
GW
956
957/*
958 * Comparison function for the private size-ordered tree. Tree is sorted
959 * by size, larger sizes at the end of the tree.
960 */
428870ff 961static int
93cf2076 962metaslab_rangesize_compare(const void *x1, const void *x2)
428870ff 963{
93cf2076
GW
964 const range_seg_t *r1 = x1;
965 const range_seg_t *r2 = x2;
966 uint64_t rs_size1 = r1->rs_end - r1->rs_start;
967 uint64_t rs_size2 = r2->rs_end - r2->rs_start;
428870ff 968
ee36c709
GN
969 int cmp = AVL_CMP(rs_size1, rs_size2);
970 if (likely(cmp))
971 return (cmp);
428870ff 972
ee36c709 973 return (AVL_CMP(r1->rs_start, r2->rs_start));
428870ff
BB
974}
975
34dc7c2f 976/*
93cf2076
GW
977 * Create any block allocator specific components. The current allocators
978 * rely on using both a size-ordered range_tree_t and an array of uint64_t's.
34dc7c2f 979 */
93cf2076
GW
980static void
981metaslab_rt_create(range_tree_t *rt, void *arg)
34dc7c2f 982{
93cf2076 983 metaslab_t *msp = arg;
34dc7c2f 984
93cf2076
GW
985 ASSERT3P(rt->rt_arg, ==, msp);
986 ASSERT(msp->ms_tree == NULL);
34dc7c2f 987
93cf2076
GW
988 avl_create(&msp->ms_size_tree, metaslab_rangesize_compare,
989 sizeof (range_seg_t), offsetof(range_seg_t, rs_pp_node));
9babb374
BB
990}
991
93cf2076
GW
992/*
993 * Destroy the block allocator specific components.
994 */
9babb374 995static void
93cf2076 996metaslab_rt_destroy(range_tree_t *rt, void *arg)
9babb374 997{
93cf2076 998 metaslab_t *msp = arg;
428870ff 999
93cf2076
GW
1000 ASSERT3P(rt->rt_arg, ==, msp);
1001 ASSERT3P(msp->ms_tree, ==, rt);
1002 ASSERT0(avl_numnodes(&msp->ms_size_tree));
428870ff 1003
93cf2076 1004 avl_destroy(&msp->ms_size_tree);
9babb374
BB
1005}
1006
1007static void
93cf2076 1008metaslab_rt_add(range_tree_t *rt, range_seg_t *rs, void *arg)
9babb374 1009{
93cf2076 1010 metaslab_t *msp = arg;
9babb374 1011
93cf2076
GW
1012 ASSERT3P(rt->rt_arg, ==, msp);
1013 ASSERT3P(msp->ms_tree, ==, rt);
1014 VERIFY(!msp->ms_condensing);
1015 avl_add(&msp->ms_size_tree, rs);
34dc7c2f
BB
1016}
1017
34dc7c2f 1018static void
93cf2076 1019metaslab_rt_remove(range_tree_t *rt, range_seg_t *rs, void *arg)
34dc7c2f 1020{
93cf2076
GW
1021 metaslab_t *msp = arg;
1022
1023 ASSERT3P(rt->rt_arg, ==, msp);
1024 ASSERT3P(msp->ms_tree, ==, rt);
1025 VERIFY(!msp->ms_condensing);
1026 avl_remove(&msp->ms_size_tree, rs);
34dc7c2f
BB
1027}
1028
34dc7c2f 1029static void
93cf2076 1030metaslab_rt_vacate(range_tree_t *rt, void *arg)
34dc7c2f 1031{
93cf2076
GW
1032 metaslab_t *msp = arg;
1033
1034 ASSERT3P(rt->rt_arg, ==, msp);
1035 ASSERT3P(msp->ms_tree, ==, rt);
1036
1037 /*
1038 * Normally one would walk the tree freeing nodes along the way.
1039 * Since the nodes are shared with the range trees we can avoid
1040 * walking all nodes and just reinitialize the avl tree. The nodes
1041 * will be freed by the range tree, so we don't want to free them here.
1042 */
1043 avl_create(&msp->ms_size_tree, metaslab_rangesize_compare,
1044 sizeof (range_seg_t), offsetof(range_seg_t, rs_pp_node));
34dc7c2f
BB
1045}
1046
93cf2076
GW
1047static range_tree_ops_t metaslab_rt_ops = {
1048 metaslab_rt_create,
1049 metaslab_rt_destroy,
1050 metaslab_rt_add,
1051 metaslab_rt_remove,
1052 metaslab_rt_vacate
1053};
1054
1055/*
1056 * ==========================================================================
4e21fd06 1057 * Common allocator routines
93cf2076
GW
1058 * ==========================================================================
1059 */
1060
9babb374 1061/*
428870ff 1062 * Return the maximum contiguous segment within the metaslab.
9babb374 1063 */
9babb374 1064uint64_t
93cf2076 1065metaslab_block_maxsize(metaslab_t *msp)
9babb374 1066{
93cf2076
GW
1067 avl_tree_t *t = &msp->ms_size_tree;
1068 range_seg_t *rs;
9babb374 1069
93cf2076 1070 if (t == NULL || (rs = avl_last(t)) == NULL)
9babb374
BB
1071 return (0ULL);
1072
93cf2076
GW
1073 return (rs->rs_end - rs->rs_start);
1074}
1075
4e21fd06
DB
1076static range_seg_t *
1077metaslab_block_find(avl_tree_t *t, uint64_t start, uint64_t size)
93cf2076 1078{
4e21fd06
DB
1079 range_seg_t *rs, rsearch;
1080 avl_index_t where;
93cf2076 1081
4e21fd06
DB
1082 rsearch.rs_start = start;
1083 rsearch.rs_end = start + size;
93cf2076 1084
4e21fd06
DB
1085 rs = avl_find(t, &rsearch, &where);
1086 if (rs == NULL) {
1087 rs = avl_nearest(t, where, AVL_AFTER);
93cf2076 1088 }
93cf2076 1089
4e21fd06
DB
1090 return (rs);
1091}
93cf2076
GW
1092
1093#if defined(WITH_FF_BLOCK_ALLOCATOR) || \
1094 defined(WITH_DF_BLOCK_ALLOCATOR) || \
1095 defined(WITH_CF_BLOCK_ALLOCATOR)
1096/*
1097 * This is a helper function that can be used by the allocator to find
1098 * a suitable block to allocate. This will search the specified AVL
1099 * tree looking for a block that matches the specified criteria.
1100 */
1101static uint64_t
1102metaslab_block_picker(avl_tree_t *t, uint64_t *cursor, uint64_t size,
1103 uint64_t align)
1104{
4e21fd06 1105 range_seg_t *rs = metaslab_block_find(t, *cursor, size);
93cf2076
GW
1106
1107 while (rs != NULL) {
1108 uint64_t offset = P2ROUNDUP(rs->rs_start, align);
1109
1110 if (offset + size <= rs->rs_end) {
1111 *cursor = offset + size;
1112 return (offset);
1113 }
1114 rs = AVL_NEXT(t, rs);
1115 }
1116
1117 /*
1118 * If we know we've searched the whole map (*cursor == 0), give up.
1119 * Otherwise, reset the cursor to the beginning and try again.
1120 */
1121 if (*cursor == 0)
1122 return (-1ULL);
1123
1124 *cursor = 0;
1125 return (metaslab_block_picker(t, cursor, size, align));
9babb374 1126}
93cf2076 1127#endif /* WITH_FF/DF/CF_BLOCK_ALLOCATOR */
9babb374 1128
22c81dd8 1129#if defined(WITH_FF_BLOCK_ALLOCATOR)
428870ff
BB
1130/*
1131 * ==========================================================================
1132 * The first-fit block allocator
1133 * ==========================================================================
1134 */
1135static uint64_t
93cf2076 1136metaslab_ff_alloc(metaslab_t *msp, uint64_t size)
9babb374 1137{
93cf2076
GW
1138 /*
1139 * Find the largest power of 2 block size that evenly divides the
1140 * requested size. This is used to try to allocate blocks with similar
1141 * alignment from the same area of the metaslab (i.e. same cursor
1142 * bucket) but it does not guarantee that other allocations sizes
1143 * may exist in the same region.
1144 */
428870ff 1145 uint64_t align = size & -size;
9bd274dd 1146 uint64_t *cursor = &msp->ms_lbas[highbit64(align) - 1];
93cf2076 1147 avl_tree_t *t = &msp->ms_tree->rt_root;
9babb374 1148
428870ff 1149 return (metaslab_block_picker(t, cursor, size, align));
9babb374
BB
1150}
1151
93cf2076 1152static metaslab_ops_t metaslab_ff_ops = {
f3a7f661 1153 metaslab_ff_alloc
428870ff 1154};
9babb374 1155
93cf2076 1156metaslab_ops_t *zfs_metaslab_ops = &metaslab_ff_ops;
22c81dd8
BB
1157#endif /* WITH_FF_BLOCK_ALLOCATOR */
1158
1159#if defined(WITH_DF_BLOCK_ALLOCATOR)
428870ff
BB
1160/*
1161 * ==========================================================================
1162 * Dynamic block allocator -
1163 * Uses the first fit allocation scheme until space get low and then
1164 * adjusts to a best fit allocation method. Uses metaslab_df_alloc_threshold
1165 * and metaslab_df_free_pct to determine when to switch the allocation scheme.
1166 * ==========================================================================
1167 */
9babb374 1168static uint64_t
93cf2076 1169metaslab_df_alloc(metaslab_t *msp, uint64_t size)
9babb374 1170{
93cf2076
GW
1171 /*
1172 * Find the largest power of 2 block size that evenly divides the
1173 * requested size. This is used to try to allocate blocks with similar
1174 * alignment from the same area of the metaslab (i.e. same cursor
1175 * bucket) but it does not guarantee that other allocations sizes
1176 * may exist in the same region.
1177 */
9babb374 1178 uint64_t align = size & -size;
9bd274dd 1179 uint64_t *cursor = &msp->ms_lbas[highbit64(align) - 1];
93cf2076
GW
1180 range_tree_t *rt = msp->ms_tree;
1181 avl_tree_t *t = &rt->rt_root;
1182 uint64_t max_size = metaslab_block_maxsize(msp);
1183 int free_pct = range_tree_space(rt) * 100 / msp->ms_size;
9babb374 1184
93cf2076
GW
1185 ASSERT(MUTEX_HELD(&msp->ms_lock));
1186 ASSERT3U(avl_numnodes(t), ==, avl_numnodes(&msp->ms_size_tree));
9babb374
BB
1187
1188 if (max_size < size)
1189 return (-1ULL);
1190
1191 /*
1192 * If we're running low on space switch to using the size
1193 * sorted AVL tree (best-fit).
1194 */
1195 if (max_size < metaslab_df_alloc_threshold ||
1196 free_pct < metaslab_df_free_pct) {
93cf2076 1197 t = &msp->ms_size_tree;
9babb374
BB
1198 *cursor = 0;
1199 }
1200
1201 return (metaslab_block_picker(t, cursor, size, 1ULL));
1202}
1203
93cf2076 1204static metaslab_ops_t metaslab_df_ops = {
f3a7f661 1205 metaslab_df_alloc
34dc7c2f
BB
1206};
1207
93cf2076 1208metaslab_ops_t *zfs_metaslab_ops = &metaslab_df_ops;
22c81dd8
BB
1209#endif /* WITH_DF_BLOCK_ALLOCATOR */
1210
93cf2076 1211#if defined(WITH_CF_BLOCK_ALLOCATOR)
428870ff
BB
1212/*
1213 * ==========================================================================
93cf2076
GW
1214 * Cursor fit block allocator -
1215 * Select the largest region in the metaslab, set the cursor to the beginning
1216 * of the range and the cursor_end to the end of the range. As allocations
1217 * are made advance the cursor. Continue allocating from the cursor until
1218 * the range is exhausted and then find a new range.
428870ff
BB
1219 * ==========================================================================
1220 */
1221static uint64_t
93cf2076 1222metaslab_cf_alloc(metaslab_t *msp, uint64_t size)
428870ff 1223{
93cf2076
GW
1224 range_tree_t *rt = msp->ms_tree;
1225 avl_tree_t *t = &msp->ms_size_tree;
1226 uint64_t *cursor = &msp->ms_lbas[0];
1227 uint64_t *cursor_end = &msp->ms_lbas[1];
428870ff
BB
1228 uint64_t offset = 0;
1229
93cf2076
GW
1230 ASSERT(MUTEX_HELD(&msp->ms_lock));
1231 ASSERT3U(avl_numnodes(t), ==, avl_numnodes(&rt->rt_root));
428870ff 1232
93cf2076 1233 ASSERT3U(*cursor_end, >=, *cursor);
428870ff 1234
93cf2076
GW
1235 if ((*cursor + size) > *cursor_end) {
1236 range_seg_t *rs;
428870ff 1237
93cf2076
GW
1238 rs = avl_last(&msp->ms_size_tree);
1239 if (rs == NULL || (rs->rs_end - rs->rs_start) < size)
1240 return (-1ULL);
428870ff 1241
93cf2076
GW
1242 *cursor = rs->rs_start;
1243 *cursor_end = rs->rs_end;
428870ff 1244 }
93cf2076
GW
1245
1246 offset = *cursor;
1247 *cursor += size;
1248
428870ff
BB
1249 return (offset);
1250}
1251
93cf2076 1252static metaslab_ops_t metaslab_cf_ops = {
f3a7f661 1253 metaslab_cf_alloc
428870ff
BB
1254};
1255
93cf2076
GW
1256metaslab_ops_t *zfs_metaslab_ops = &metaslab_cf_ops;
1257#endif /* WITH_CF_BLOCK_ALLOCATOR */
22c81dd8
BB
1258
1259#if defined(WITH_NDF_BLOCK_ALLOCATOR)
93cf2076
GW
1260/*
1261 * ==========================================================================
1262 * New dynamic fit allocator -
1263 * Select a region that is large enough to allocate 2^metaslab_ndf_clump_shift
1264 * contiguous blocks. If no region is found then just use the largest segment
1265 * that remains.
1266 * ==========================================================================
1267 */
1268
1269/*
1270 * Determines desired number of contiguous blocks (2^metaslab_ndf_clump_shift)
1271 * to request from the allocator.
1272 */
428870ff
BB
1273uint64_t metaslab_ndf_clump_shift = 4;
1274
1275static uint64_t
93cf2076 1276metaslab_ndf_alloc(metaslab_t *msp, uint64_t size)
428870ff 1277{
93cf2076 1278 avl_tree_t *t = &msp->ms_tree->rt_root;
428870ff 1279 avl_index_t where;
93cf2076 1280 range_seg_t *rs, rsearch;
9bd274dd 1281 uint64_t hbit = highbit64(size);
93cf2076
GW
1282 uint64_t *cursor = &msp->ms_lbas[hbit - 1];
1283 uint64_t max_size = metaslab_block_maxsize(msp);
428870ff 1284
93cf2076
GW
1285 ASSERT(MUTEX_HELD(&msp->ms_lock));
1286 ASSERT3U(avl_numnodes(t), ==, avl_numnodes(&msp->ms_size_tree));
428870ff
BB
1287
1288 if (max_size < size)
1289 return (-1ULL);
1290
93cf2076
GW
1291 rsearch.rs_start = *cursor;
1292 rsearch.rs_end = *cursor + size;
428870ff 1293
93cf2076
GW
1294 rs = avl_find(t, &rsearch, &where);
1295 if (rs == NULL || (rs->rs_end - rs->rs_start) < size) {
1296 t = &msp->ms_size_tree;
428870ff 1297
93cf2076
GW
1298 rsearch.rs_start = 0;
1299 rsearch.rs_end = MIN(max_size,
428870ff 1300 1ULL << (hbit + metaslab_ndf_clump_shift));
93cf2076
GW
1301 rs = avl_find(t, &rsearch, &where);
1302 if (rs == NULL)
1303 rs = avl_nearest(t, where, AVL_AFTER);
1304 ASSERT(rs != NULL);
428870ff
BB
1305 }
1306
93cf2076
GW
1307 if ((rs->rs_end - rs->rs_start) >= size) {
1308 *cursor = rs->rs_start + size;
1309 return (rs->rs_start);
428870ff
BB
1310 }
1311 return (-1ULL);
1312}
1313
93cf2076 1314static metaslab_ops_t metaslab_ndf_ops = {
f3a7f661 1315 metaslab_ndf_alloc
428870ff
BB
1316};
1317
93cf2076 1318metaslab_ops_t *zfs_metaslab_ops = &metaslab_ndf_ops;
22c81dd8 1319#endif /* WITH_NDF_BLOCK_ALLOCATOR */
9babb374 1320
93cf2076 1321
34dc7c2f
BB
1322/*
1323 * ==========================================================================
1324 * Metaslabs
1325 * ==========================================================================
1326 */
93cf2076
GW
1327
1328/*
1329 * Wait for any in-progress metaslab loads to complete.
1330 */
1331void
1332metaslab_load_wait(metaslab_t *msp)
1333{
1334 ASSERT(MUTEX_HELD(&msp->ms_lock));
1335
1336 while (msp->ms_loading) {
1337 ASSERT(!msp->ms_loaded);
1338 cv_wait(&msp->ms_load_cv, &msp->ms_lock);
1339 }
1340}
1341
1342int
1343metaslab_load(metaslab_t *msp)
1344{
1345 int error = 0;
1346 int t;
4e21fd06 1347 boolean_t success = B_FALSE;
93cf2076
GW
1348
1349 ASSERT(MUTEX_HELD(&msp->ms_lock));
1350 ASSERT(!msp->ms_loaded);
1351 ASSERT(!msp->ms_loading);
1352
1353 msp->ms_loading = B_TRUE;
1354
1355 /*
1356 * If the space map has not been allocated yet, then treat
1357 * all the space in the metaslab as free and add it to the
1358 * ms_tree.
1359 */
1360 if (msp->ms_sm != NULL)
1361 error = space_map_load(msp->ms_sm, msp->ms_tree, SM_FREE);
1362 else
1363 range_tree_add(msp->ms_tree, msp->ms_start, msp->ms_size);
1364
4e21fd06 1365 success = (error == 0);
93cf2076
GW
1366 msp->ms_loading = B_FALSE;
1367
4e21fd06
DB
1368 if (success) {
1369 ASSERT3P(msp->ms_group, !=, NULL);
1370 msp->ms_loaded = B_TRUE;
1371
93cf2076
GW
1372 for (t = 0; t < TXG_DEFER_SIZE; t++) {
1373 range_tree_walk(msp->ms_defertree[t],
1374 range_tree_remove, msp->ms_tree);
1375 }
4e21fd06 1376 msp->ms_max_size = metaslab_block_maxsize(msp);
93cf2076
GW
1377 }
1378 cv_broadcast(&msp->ms_load_cv);
1379 return (error);
1380}
1381
1382void
1383metaslab_unload(metaslab_t *msp)
1384{
1385 ASSERT(MUTEX_HELD(&msp->ms_lock));
1386 range_tree_vacate(msp->ms_tree, NULL, NULL);
1387 msp->ms_loaded = B_FALSE;
1388 msp->ms_weight &= ~METASLAB_ACTIVE_MASK;
4e21fd06 1389 msp->ms_max_size = 0;
93cf2076
GW
1390}
1391
fb42a493
PS
1392int
1393metaslab_init(metaslab_group_t *mg, uint64_t id, uint64_t object, uint64_t txg,
1394 metaslab_t **msp)
34dc7c2f
BB
1395{
1396 vdev_t *vd = mg->mg_vd;
93cf2076 1397 objset_t *mos = vd->vdev_spa->spa_meta_objset;
fb42a493
PS
1398 metaslab_t *ms;
1399 int error;
34dc7c2f 1400
79c76d5b 1401 ms = kmem_zalloc(sizeof (metaslab_t), KM_SLEEP);
fb42a493
PS
1402 mutex_init(&ms->ms_lock, NULL, MUTEX_DEFAULT, NULL);
1403 cv_init(&ms->ms_load_cv, NULL, CV_DEFAULT, NULL);
1404 ms->ms_id = id;
1405 ms->ms_start = id << vd->vdev_ms_shift;
1406 ms->ms_size = 1ULL << vd->vdev_ms_shift;
34dc7c2f 1407
93cf2076
GW
1408 /*
1409 * We only open space map objects that already exist. All others
afe37326 1410 * will be opened when we finally allocate an object for it.
93cf2076 1411 */
afe37326 1412 if (object != 0) {
fb42a493
PS
1413 error = space_map_open(&ms->ms_sm, mos, object, ms->ms_start,
1414 ms->ms_size, vd->vdev_ashift, &ms->ms_lock);
1415
1416 if (error != 0) {
1417 kmem_free(ms, sizeof (metaslab_t));
1418 return (error);
1419 }
1420
1421 ASSERT(ms->ms_sm != NULL);
93cf2076 1422 }
34dc7c2f
BB
1423
1424 /*
93cf2076
GW
1425 * We create the main range tree here, but we don't create the
1426 * alloctree and freetree until metaslab_sync_done(). This serves
34dc7c2f
BB
1427 * two purposes: it allows metaslab_sync_done() to detect the
1428 * addition of new space; and for debugging, it ensures that we'd
1429 * data fault on any attempt to use this metaslab before it's ready.
1430 */
fb42a493
PS
1431 ms->ms_tree = range_tree_create(&metaslab_rt_ops, ms, &ms->ms_lock);
1432 metaslab_group_add(mg, ms);
34dc7c2f 1433
4e21fd06 1434 metaslab_set_fragmentation(ms);
428870ff 1435
34dc7c2f
BB
1436 /*
1437 * If we're opening an existing pool (txg == 0) or creating
1438 * a new one (txg == TXG_INITIAL), all space is available now.
1439 * If we're adding space to an existing pool, the new space
1440 * does not become available until after this txg has synced.
4e21fd06
DB
1441 * The metaslab's weight will also be initialized when we sync
1442 * out this txg. This ensures that we don't attempt to allocate
1443 * from it before we have initialized it completely.
34dc7c2f
BB
1444 */
1445 if (txg <= TXG_INITIAL)
fb42a493 1446 metaslab_sync_done(ms, 0);
34dc7c2f 1447
93cf2076
GW
1448 /*
1449 * If metaslab_debug_load is set and we're initializing a metaslab
4e21fd06 1450 * that has an allocated space map object then load the its space
93cf2076
GW
1451 * map so that can verify frees.
1452 */
fb42a493
PS
1453 if (metaslab_debug_load && ms->ms_sm != NULL) {
1454 mutex_enter(&ms->ms_lock);
1455 VERIFY0(metaslab_load(ms));
1456 mutex_exit(&ms->ms_lock);
93cf2076
GW
1457 }
1458
34dc7c2f 1459 if (txg != 0) {
34dc7c2f 1460 vdev_dirty(vd, 0, NULL, txg);
fb42a493 1461 vdev_dirty(vd, VDD_METASLAB, ms, txg);
34dc7c2f
BB
1462 }
1463
fb42a493
PS
1464 *msp = ms;
1465
1466 return (0);
34dc7c2f
BB
1467}
1468
1469void
1470metaslab_fini(metaslab_t *msp)
1471{
d6320ddb 1472 int t;
34dc7c2f 1473
93cf2076 1474 metaslab_group_t *mg = msp->ms_group;
34dc7c2f
BB
1475
1476 metaslab_group_remove(mg, msp);
1477
1478 mutex_enter(&msp->ms_lock);
93cf2076
GW
1479 VERIFY(msp->ms_group == NULL);
1480 vdev_space_update(mg->mg_vd, -space_map_allocated(msp->ms_sm),
1481 0, -msp->ms_size);
1482 space_map_close(msp->ms_sm);
1483
1484 metaslab_unload(msp);
1485 range_tree_destroy(msp->ms_tree);
34dc7c2f 1486
d6320ddb 1487 for (t = 0; t < TXG_SIZE; t++) {
93cf2076
GW
1488 range_tree_destroy(msp->ms_alloctree[t]);
1489 range_tree_destroy(msp->ms_freetree[t]);
34dc7c2f
BB
1490 }
1491
e51be066 1492 for (t = 0; t < TXG_DEFER_SIZE; t++) {
93cf2076 1493 range_tree_destroy(msp->ms_defertree[t]);
e51be066 1494 }
428870ff 1495
c99c9001 1496 ASSERT0(msp->ms_deferspace);
428870ff 1497
34dc7c2f 1498 mutex_exit(&msp->ms_lock);
93cf2076 1499 cv_destroy(&msp->ms_load_cv);
34dc7c2f
BB
1500 mutex_destroy(&msp->ms_lock);
1501
1502 kmem_free(msp, sizeof (metaslab_t));
1503}
1504
f3a7f661
GW
1505#define FRAGMENTATION_TABLE_SIZE 17
1506
93cf2076 1507/*
f3a7f661
GW
1508 * This table defines a segment size based fragmentation metric that will
1509 * allow each metaslab to derive its own fragmentation value. This is done
1510 * by calculating the space in each bucket of the spacemap histogram and
1511 * multiplying that by the fragmetation metric in this table. Doing
1512 * this for all buckets and dividing it by the total amount of free
1513 * space in this metaslab (i.e. the total free space in all buckets) gives
1514 * us the fragmentation metric. This means that a high fragmentation metric
1515 * equates to most of the free space being comprised of small segments.
1516 * Conversely, if the metric is low, then most of the free space is in
1517 * large segments. A 10% change in fragmentation equates to approximately
1518 * double the number of segments.
93cf2076 1519 *
f3a7f661
GW
1520 * This table defines 0% fragmented space using 16MB segments. Testing has
1521 * shown that segments that are greater than or equal to 16MB do not suffer
1522 * from drastic performance problems. Using this value, we derive the rest
1523 * of the table. Since the fragmentation value is never stored on disk, it
1524 * is possible to change these calculations in the future.
1525 */
1526int zfs_frag_table[FRAGMENTATION_TABLE_SIZE] = {
1527 100, /* 512B */
1528 100, /* 1K */
1529 98, /* 2K */
1530 95, /* 4K */
1531 90, /* 8K */
1532 80, /* 16K */
1533 70, /* 32K */
1534 60, /* 64K */
1535 50, /* 128K */
1536 40, /* 256K */
1537 30, /* 512K */
1538 20, /* 1M */
1539 15, /* 2M */
1540 10, /* 4M */
1541 5, /* 8M */
1542 0 /* 16M */
1543};
1544
1545/*
1546 * Calclate the metaslab's fragmentation metric. A return value
1547 * of ZFS_FRAG_INVALID means that the metaslab has not been upgraded and does
1548 * not support this metric. Otherwise, the return value should be in the
1549 * range [0, 100].
93cf2076 1550 */
4e21fd06
DB
1551static void
1552metaslab_set_fragmentation(metaslab_t *msp)
93cf2076 1553{
f3a7f661
GW
1554 spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
1555 uint64_t fragmentation = 0;
1556 uint64_t total = 0;
1557 boolean_t feature_enabled = spa_feature_is_enabled(spa,
1558 SPA_FEATURE_SPACEMAP_HISTOGRAM);
93cf2076
GW
1559 int i;
1560
4e21fd06
DB
1561 if (!feature_enabled) {
1562 msp->ms_fragmentation = ZFS_FRAG_INVALID;
1563 return;
1564 }
f3a7f661 1565
93cf2076 1566 /*
f3a7f661
GW
1567 * A null space map means that the entire metaslab is free
1568 * and thus is not fragmented.
93cf2076 1569 */
4e21fd06
DB
1570 if (msp->ms_sm == NULL) {
1571 msp->ms_fragmentation = 0;
1572 return;
1573 }
f3a7f661
GW
1574
1575 /*
4e21fd06 1576 * If this metaslab's space map has not been upgraded, flag it
f3a7f661
GW
1577 * so that we upgrade next time we encounter it.
1578 */
1579 if (msp->ms_sm->sm_dbuf->db_size != sizeof (space_map_phys_t)) {
93cf2076
GW
1580 vdev_t *vd = msp->ms_group->mg_vd;
1581
8b0a0840
TC
1582 if (spa_writeable(vd->vdev_spa)) {
1583 uint64_t txg = spa_syncing_txg(spa);
1584
1585 msp->ms_condense_wanted = B_TRUE;
1586 vdev_dirty(vd, VDD_METASLAB, msp, txg + 1);
1587 spa_dbgmsg(spa, "txg %llu, requesting force condense: "
1588 "msp %p, vd %p", txg, msp, vd);
1589 }
4e21fd06
DB
1590 msp->ms_fragmentation = ZFS_FRAG_INVALID;
1591 return;
93cf2076
GW
1592 }
1593
f3a7f661
GW
1594 for (i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) {
1595 uint64_t space = 0;
1596 uint8_t shift = msp->ms_sm->sm_shift;
4e21fd06 1597
f3a7f661
GW
1598 int idx = MIN(shift - SPA_MINBLOCKSHIFT + i,
1599 FRAGMENTATION_TABLE_SIZE - 1);
93cf2076 1600
93cf2076
GW
1601 if (msp->ms_sm->sm_phys->smp_histogram[i] == 0)
1602 continue;
1603
f3a7f661
GW
1604 space = msp->ms_sm->sm_phys->smp_histogram[i] << (i + shift);
1605 total += space;
1606
1607 ASSERT3U(idx, <, FRAGMENTATION_TABLE_SIZE);
1608 fragmentation += space * zfs_frag_table[idx];
93cf2076 1609 }
f3a7f661
GW
1610
1611 if (total > 0)
1612 fragmentation /= total;
1613 ASSERT3U(fragmentation, <=, 100);
4e21fd06
DB
1614
1615 msp->ms_fragmentation = fragmentation;
93cf2076 1616}
34dc7c2f 1617
f3a7f661
GW
1618/*
1619 * Compute a weight -- a selection preference value -- for the given metaslab.
1620 * This is based on the amount of free space, the level of fragmentation,
1621 * the LBA range, and whether the metaslab is loaded.
1622 */
34dc7c2f 1623static uint64_t
4e21fd06 1624metaslab_space_weight(metaslab_t *msp)
34dc7c2f
BB
1625{
1626 metaslab_group_t *mg = msp->ms_group;
34dc7c2f
BB
1627 vdev_t *vd = mg->mg_vd;
1628 uint64_t weight, space;
1629
1630 ASSERT(MUTEX_HELD(&msp->ms_lock));
4e21fd06 1631 ASSERT(!vd->vdev_removing);
c2e42f9d 1632
34dc7c2f
BB
1633 /*
1634 * The baseline weight is the metaslab's free space.
1635 */
93cf2076 1636 space = msp->ms_size - space_map_allocated(msp->ms_sm);
f3a7f661 1637
f3a7f661
GW
1638 if (metaslab_fragmentation_factor_enabled &&
1639 msp->ms_fragmentation != ZFS_FRAG_INVALID) {
1640 /*
1641 * Use the fragmentation information to inversely scale
1642 * down the baseline weight. We need to ensure that we
1643 * don't exclude this metaslab completely when it's 100%
1644 * fragmented. To avoid this we reduce the fragmented value
1645 * by 1.
1646 */
1647 space = (space * (100 - (msp->ms_fragmentation - 1))) / 100;
1648
1649 /*
1650 * If space < SPA_MINBLOCKSIZE, then we will not allocate from
1651 * this metaslab again. The fragmentation metric may have
1652 * decreased the space to something smaller than
1653 * SPA_MINBLOCKSIZE, so reset the space to SPA_MINBLOCKSIZE
1654 * so that we can consume any remaining space.
1655 */
1656 if (space > 0 && space < SPA_MINBLOCKSIZE)
1657 space = SPA_MINBLOCKSIZE;
1658 }
34dc7c2f
BB
1659 weight = space;
1660
1661 /*
1662 * Modern disks have uniform bit density and constant angular velocity.
1663 * Therefore, the outer recording zones are faster (higher bandwidth)
1664 * than the inner zones by the ratio of outer to inner track diameter,
1665 * which is typically around 2:1. We account for this by assigning
1666 * higher weight to lower metaslabs (multiplier ranging from 2x to 1x).
1667 * In effect, this means that we'll select the metaslab with the most
1668 * free bandwidth rather than simply the one with the most free space.
1669 */
fb40095f 1670 if (!vd->vdev_nonrot && metaslab_lba_weighting_enabled) {
f3a7f661
GW
1671 weight = 2 * weight - (msp->ms_id * weight) / vd->vdev_ms_count;
1672 ASSERT(weight >= space && weight <= 2 * space);
1673 }
428870ff 1674
f3a7f661
GW
1675 /*
1676 * If this metaslab is one we're actively using, adjust its
1677 * weight to make it preferable to any inactive metaslab so
1678 * we'll polish it off. If the fragmentation on this metaslab
1679 * has exceed our threshold, then don't mark it active.
1680 */
1681 if (msp->ms_loaded && msp->ms_fragmentation != ZFS_FRAG_INVALID &&
1682 msp->ms_fragmentation <= zfs_metaslab_fragmentation_threshold) {
428870ff
BB
1683 weight |= (msp->ms_weight & METASLAB_ACTIVE_MASK);
1684 }
34dc7c2f 1685
4e21fd06
DB
1686 WEIGHT_SET_SPACEBASED(weight);
1687 return (weight);
1688}
1689
1690/*
1691 * Return the weight of the specified metaslab, according to the segment-based
1692 * weighting algorithm. The metaslab must be loaded. This function can
1693 * be called within a sync pass since it relies only on the metaslab's
1694 * range tree which is always accurate when the metaslab is loaded.
1695 */
1696static uint64_t
1697metaslab_weight_from_range_tree(metaslab_t *msp)
1698{
1699 uint64_t weight = 0;
1700 uint32_t segments = 0;
1701 int i;
1702
1703 ASSERT(msp->ms_loaded);
1704
1705 for (i = RANGE_TREE_HISTOGRAM_SIZE - 1; i >= SPA_MINBLOCKSHIFT; i--) {
1706 uint8_t shift = msp->ms_group->mg_vd->vdev_ashift;
1707 int max_idx = SPACE_MAP_HISTOGRAM_SIZE + shift - 1;
1708
1709 segments <<= 1;
1710 segments += msp->ms_tree->rt_histogram[i];
1711
1712 /*
1713 * The range tree provides more precision than the space map
1714 * and must be downgraded so that all values fit within the
1715 * space map's histogram. This allows us to compare loaded
1716 * vs. unloaded metaslabs to determine which metaslab is
1717 * considered "best".
1718 */
1719 if (i > max_idx)
1720 continue;
1721
1722 if (segments != 0) {
1723 WEIGHT_SET_COUNT(weight, segments);
1724 WEIGHT_SET_INDEX(weight, i);
1725 WEIGHT_SET_ACTIVE(weight, 0);
1726 break;
1727 }
1728 }
1729 return (weight);
1730}
1731
1732/*
1733 * Calculate the weight based on the on-disk histogram. This should only
1734 * be called after a sync pass has completely finished since the on-disk
1735 * information is updated in metaslab_sync().
1736 */
1737static uint64_t
1738metaslab_weight_from_spacemap(metaslab_t *msp)
1739{
1740 uint64_t weight = 0;
1741 int i;
1742
1743 for (i = SPACE_MAP_HISTOGRAM_SIZE - 1; i >= 0; i--) {
1744 if (msp->ms_sm->sm_phys->smp_histogram[i] != 0) {
1745 WEIGHT_SET_COUNT(weight,
1746 msp->ms_sm->sm_phys->smp_histogram[i]);
1747 WEIGHT_SET_INDEX(weight, i +
1748 msp->ms_sm->sm_shift);
1749 WEIGHT_SET_ACTIVE(weight, 0);
1750 break;
1751 }
1752 }
1753 return (weight);
1754}
1755
1756/*
1757 * Compute a segment-based weight for the specified metaslab. The weight
1758 * is determined by highest bucket in the histogram. The information
1759 * for the highest bucket is encoded into the weight value.
1760 */
1761static uint64_t
1762metaslab_segment_weight(metaslab_t *msp)
1763{
1764 metaslab_group_t *mg = msp->ms_group;
1765 uint64_t weight = 0;
1766 uint8_t shift = mg->mg_vd->vdev_ashift;
1767
1768 ASSERT(MUTEX_HELD(&msp->ms_lock));
1769
1770 /*
1771 * The metaslab is completely free.
1772 */
1773 if (space_map_allocated(msp->ms_sm) == 0) {
1774 int idx = highbit64(msp->ms_size) - 1;
1775 int max_idx = SPACE_MAP_HISTOGRAM_SIZE + shift - 1;
1776
1777 if (idx < max_idx) {
1778 WEIGHT_SET_COUNT(weight, 1ULL);
1779 WEIGHT_SET_INDEX(weight, idx);
1780 } else {
1781 WEIGHT_SET_COUNT(weight, 1ULL << (idx - max_idx));
1782 WEIGHT_SET_INDEX(weight, max_idx);
1783 }
1784 WEIGHT_SET_ACTIVE(weight, 0);
1785 ASSERT(!WEIGHT_IS_SPACEBASED(weight));
1786
1787 return (weight);
1788 }
1789
1790 ASSERT3U(msp->ms_sm->sm_dbuf->db_size, ==, sizeof (space_map_phys_t));
1791
1792 /*
1793 * If the metaslab is fully allocated then just make the weight 0.
1794 */
1795 if (space_map_allocated(msp->ms_sm) == msp->ms_size)
1796 return (0);
1797 /*
1798 * If the metaslab is already loaded, then use the range tree to
1799 * determine the weight. Otherwise, we rely on the space map information
1800 * to generate the weight.
1801 */
1802 if (msp->ms_loaded) {
1803 weight = metaslab_weight_from_range_tree(msp);
1804 } else {
1805 weight = metaslab_weight_from_spacemap(msp);
1806 }
1807
1808 /*
1809 * If the metaslab was active the last time we calculated its weight
1810 * then keep it active. We want to consume the entire region that
1811 * is associated with this weight.
1812 */
1813 if (msp->ms_activation_weight != 0 && weight != 0)
1814 WEIGHT_SET_ACTIVE(weight, WEIGHT_GET_ACTIVE(msp->ms_weight));
1815 return (weight);
1816}
1817
1818/*
1819 * Determine if we should attempt to allocate from this metaslab. If the
1820 * metaslab has a maximum size then we can quickly determine if the desired
1821 * allocation size can be satisfied. Otherwise, if we're using segment-based
1822 * weighting then we can determine the maximum allocation that this metaslab
1823 * can accommodate based on the index encoded in the weight. If we're using
1824 * space-based weights then rely on the entire weight (excluding the weight
1825 * type bit).
1826 */
1827boolean_t
1828metaslab_should_allocate(metaslab_t *msp, uint64_t asize)
1829{
1830 boolean_t should_allocate;
1831
1832 if (msp->ms_max_size != 0)
1833 return (msp->ms_max_size >= asize);
1834
1835 if (!WEIGHT_IS_SPACEBASED(msp->ms_weight)) {
1836 /*
1837 * The metaslab segment weight indicates segments in the
1838 * range [2^i, 2^(i+1)), where i is the index in the weight.
1839 * Since the asize might be in the middle of the range, we
1840 * should attempt the allocation if asize < 2^(i+1).
1841 */
1842 should_allocate = (asize <
1843 1ULL << (WEIGHT_GET_INDEX(msp->ms_weight) + 1));
1844 } else {
1845 should_allocate = (asize <=
1846 (msp->ms_weight & ~METASLAB_WEIGHT_TYPE));
1847 }
1848 return (should_allocate);
1849}
1850static uint64_t
1851metaslab_weight(metaslab_t *msp)
1852{
1853 vdev_t *vd = msp->ms_group->mg_vd;
1854 spa_t *spa = vd->vdev_spa;
1855 uint64_t weight;
1856
1857 ASSERT(MUTEX_HELD(&msp->ms_lock));
1858
1859 /*
1860 * This vdev is in the process of being removed so there is nothing
1861 * for us to do here.
1862 */
1863 if (vd->vdev_removing) {
1864 ASSERT0(space_map_allocated(msp->ms_sm));
1865 ASSERT0(vd->vdev_ms_shift);
1866 return (0);
1867 }
1868
1869 metaslab_set_fragmentation(msp);
1870
1871 /*
1872 * Update the maximum size if the metaslab is loaded. This will
1873 * ensure that we get an accurate maximum size if newly freed space
1874 * has been added back into the free tree.
1875 */
1876 if (msp->ms_loaded)
1877 msp->ms_max_size = metaslab_block_maxsize(msp);
1878
1879 /*
1880 * Segment-based weighting requires space map histogram support.
1881 */
1882 if (zfs_metaslab_segment_weight_enabled &&
1883 spa_feature_is_enabled(spa, SPA_FEATURE_SPACEMAP_HISTOGRAM) &&
1884 (msp->ms_sm == NULL || msp->ms_sm->sm_dbuf->db_size ==
1885 sizeof (space_map_phys_t))) {
1886 weight = metaslab_segment_weight(msp);
1887 } else {
1888 weight = metaslab_space_weight(msp);
1889 }
93cf2076 1890 return (weight);
34dc7c2f
BB
1891}
1892
1893static int
6d974228 1894metaslab_activate(metaslab_t *msp, uint64_t activation_weight)
34dc7c2f 1895{
34dc7c2f
BB
1896 ASSERT(MUTEX_HELD(&msp->ms_lock));
1897
1898 if ((msp->ms_weight & METASLAB_ACTIVE_MASK) == 0) {
93cf2076
GW
1899 metaslab_load_wait(msp);
1900 if (!msp->ms_loaded) {
1901 int error = metaslab_load(msp);
1902 if (error) {
428870ff
BB
1903 metaslab_group_sort(msp->ms_group, msp, 0);
1904 return (error);
1905 }
34dc7c2f 1906 }
9babb374 1907
4e21fd06 1908 msp->ms_activation_weight = msp->ms_weight;
34dc7c2f
BB
1909 metaslab_group_sort(msp->ms_group, msp,
1910 msp->ms_weight | activation_weight);
1911 }
93cf2076 1912 ASSERT(msp->ms_loaded);
34dc7c2f
BB
1913 ASSERT(msp->ms_weight & METASLAB_ACTIVE_MASK);
1914
1915 return (0);
1916}
1917
1918static void
4e21fd06 1919metaslab_passivate(metaslab_t *msp, uint64_t weight)
34dc7c2f 1920{
4e21fd06
DB
1921 ASSERTV(uint64_t size = weight & ~METASLAB_WEIGHT_TYPE);
1922
34dc7c2f
BB
1923 /*
1924 * If size < SPA_MINBLOCKSIZE, then we will not allocate from
1925 * this metaslab again. In that case, it had better be empty,
1926 * or we would be leaving space on the table.
1927 */
4e21fd06
DB
1928 ASSERT(size >= SPA_MINBLOCKSIZE ||
1929 range_tree_space(msp->ms_tree) == 0);
1930 ASSERT0(weight & METASLAB_ACTIVE_MASK);
1931
1932 msp->ms_activation_weight = 0;
1933 metaslab_group_sort(msp->ms_group, msp, weight);
34dc7c2f
BB
1934 ASSERT((msp->ms_weight & METASLAB_ACTIVE_MASK) == 0);
1935}
1936
4e21fd06
DB
1937/*
1938 * Segment-based metaslabs are activated once and remain active until
1939 * we either fail an allocation attempt (similar to space-based metaslabs)
1940 * or have exhausted the free space in zfs_metaslab_switch_threshold
1941 * buckets since the metaslab was activated. This function checks to see
1942 * if we've exhaused the zfs_metaslab_switch_threshold buckets in the
1943 * metaslab and passivates it proactively. This will allow us to select a
1944 * metaslab with a larger contiguous region, if any, remaining within this
1945 * metaslab group. If we're in sync pass > 1, then we continue using this
1946 * metaslab so that we don't dirty more block and cause more sync passes.
1947 */
1948void
1949metaslab_segment_may_passivate(metaslab_t *msp)
1950{
1951 spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
1952 uint64_t weight;
1953 int activation_idx, current_idx;
1954
1955 if (WEIGHT_IS_SPACEBASED(msp->ms_weight) || spa_sync_pass(spa) > 1)
1956 return;
1957
1958 /*
1959 * Since we are in the middle of a sync pass, the most accurate
1960 * information that is accessible to us is the in-core range tree
1961 * histogram; calculate the new weight based on that information.
1962 */
1963 weight = metaslab_weight_from_range_tree(msp);
1964 activation_idx = WEIGHT_GET_INDEX(msp->ms_activation_weight);
1965 current_idx = WEIGHT_GET_INDEX(weight);
1966
1967 if (current_idx <= activation_idx - zfs_metaslab_switch_threshold)
1968 metaslab_passivate(msp, weight);
1969}
1970
93cf2076
GW
1971static void
1972metaslab_preload(void *arg)
1973{
1974 metaslab_t *msp = arg;
1975 spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
1cd77734 1976 fstrans_cookie_t cookie = spl_fstrans_mark();
93cf2076 1977
080b3100
GW
1978 ASSERT(!MUTEX_HELD(&msp->ms_group->mg_lock));
1979
93cf2076
GW
1980 mutex_enter(&msp->ms_lock);
1981 metaslab_load_wait(msp);
1982 if (!msp->ms_loaded)
1983 (void) metaslab_load(msp);
4e21fd06 1984 msp->ms_selected_txg = spa_syncing_txg(spa);
93cf2076 1985 mutex_exit(&msp->ms_lock);
1cd77734 1986 spl_fstrans_unmark(cookie);
93cf2076
GW
1987}
1988
1989static void
1990metaslab_group_preload(metaslab_group_t *mg)
1991{
1992 spa_t *spa = mg->mg_vd->vdev_spa;
1993 metaslab_t *msp;
1994 avl_tree_t *t = &mg->mg_metaslab_tree;
1995 int m = 0;
1996
1997 if (spa_shutting_down(spa) || !metaslab_preload_enabled) {
c5528b9b 1998 taskq_wait_outstanding(mg->mg_taskq, 0);
93cf2076
GW
1999 return;
2000 }
93cf2076 2001
080b3100 2002 mutex_enter(&mg->mg_lock);
93cf2076 2003 /*
080b3100 2004 * Load the next potential metaslabs
93cf2076 2005 */
4e21fd06 2006 for (msp = avl_first(t); msp != NULL; msp = AVL_NEXT(t, msp)) {
f3a7f661
GW
2007 /*
2008 * We preload only the maximum number of metaslabs specified
2009 * by metaslab_preload_limit. If a metaslab is being forced
2010 * to condense then we preload it too. This will ensure
2011 * that force condensing happens in the next txg.
2012 */
2013 if (++m > metaslab_preload_limit && !msp->ms_condense_wanted) {
f3a7f661
GW
2014 continue;
2015 }
93cf2076
GW
2016
2017 VERIFY(taskq_dispatch(mg->mg_taskq, metaslab_preload,
48d3eb40 2018 msp, TQ_SLEEP) != TASKQID_INVALID);
93cf2076
GW
2019 }
2020 mutex_exit(&mg->mg_lock);
2021}
2022
e51be066 2023/*
93cf2076
GW
2024 * Determine if the space map's on-disk footprint is past our tolerance
2025 * for inefficiency. We would like to use the following criteria to make
2026 * our decision:
e51be066
GW
2027 *
2028 * 1. The size of the space map object should not dramatically increase as a
93cf2076 2029 * result of writing out the free space range tree.
e51be066
GW
2030 *
2031 * 2. The minimal on-disk space map representation is zfs_condense_pct/100
93cf2076
GW
2032 * times the size than the free space range tree representation
2033 * (i.e. zfs_condense_pct = 110 and in-core = 1MB, minimal = 1.1.MB).
e51be066 2034 *
b02fe35d
AR
2035 * 3. The on-disk size of the space map should actually decrease.
2036 *
e51be066
GW
2037 * Checking the first condition is tricky since we don't want to walk
2038 * the entire AVL tree calculating the estimated on-disk size. Instead we
93cf2076
GW
2039 * use the size-ordered range tree in the metaslab and calculate the
2040 * size required to write out the largest segment in our free tree. If the
e51be066
GW
2041 * size required to represent that segment on disk is larger than the space
2042 * map object then we avoid condensing this map.
2043 *
2044 * To determine the second criterion we use a best-case estimate and assume
2045 * each segment can be represented on-disk as a single 64-bit entry. We refer
2046 * to this best-case estimate as the space map's minimal form.
b02fe35d
AR
2047 *
2048 * Unfortunately, we cannot compute the on-disk size of the space map in this
2049 * context because we cannot accurately compute the effects of compression, etc.
2050 * Instead, we apply the heuristic described in the block comment for
2051 * zfs_metaslab_condense_block_threshold - we only condense if the space used
2052 * is greater than a threshold number of blocks.
e51be066
GW
2053 */
2054static boolean_t
2055metaslab_should_condense(metaslab_t *msp)
2056{
93cf2076
GW
2057 space_map_t *sm = msp->ms_sm;
2058 range_seg_t *rs;
b02fe35d
AR
2059 uint64_t size, entries, segsz, object_size, optimal_size, record_size;
2060 dmu_object_info_t doi;
f4bae2ed 2061 uint64_t vdev_blocksize = 1ULL << msp->ms_group->mg_vd->vdev_ashift;
e51be066
GW
2062
2063 ASSERT(MUTEX_HELD(&msp->ms_lock));
93cf2076 2064 ASSERT(msp->ms_loaded);
e51be066
GW
2065
2066 /*
93cf2076 2067 * Use the ms_size_tree range tree, which is ordered by size, to
f3a7f661
GW
2068 * obtain the largest segment in the free tree. We always condense
2069 * metaslabs that are empty and metaslabs for which a condense
2070 * request has been made.
e51be066 2071 */
93cf2076 2072 rs = avl_last(&msp->ms_size_tree);
f3a7f661 2073 if (rs == NULL || msp->ms_condense_wanted)
e51be066
GW
2074 return (B_TRUE);
2075
2076 /*
2077 * Calculate the number of 64-bit entries this segment would
2078 * require when written to disk. If this single segment would be
2079 * larger on-disk than the entire current on-disk structure, then
2080 * clearly condensing will increase the on-disk structure size.
2081 */
93cf2076 2082 size = (rs->rs_end - rs->rs_start) >> sm->sm_shift;
e51be066
GW
2083 entries = size / (MIN(size, SM_RUN_MAX));
2084 segsz = entries * sizeof (uint64_t);
2085
b02fe35d
AR
2086 optimal_size = sizeof (uint64_t) * avl_numnodes(&msp->ms_tree->rt_root);
2087 object_size = space_map_length(msp->ms_sm);
2088
2089 dmu_object_info_from_db(sm->sm_dbuf, &doi);
2090 record_size = MAX(doi.doi_data_block_size, vdev_blocksize);
2091
2092 return (segsz <= object_size &&
2093 object_size >= (optimal_size * zfs_condense_pct / 100) &&
2094 object_size > zfs_metaslab_condense_block_threshold * record_size);
e51be066
GW
2095}
2096
2097/*
2098 * Condense the on-disk space map representation to its minimized form.
2099 * The minimized form consists of a small number of allocations followed by
93cf2076 2100 * the entries of the free range tree.
e51be066
GW
2101 */
2102static void
2103metaslab_condense(metaslab_t *msp, uint64_t txg, dmu_tx_t *tx)
2104{
2105 spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
93cf2076
GW
2106 range_tree_t *freetree = msp->ms_freetree[txg & TXG_MASK];
2107 range_tree_t *condense_tree;
2108 space_map_t *sm = msp->ms_sm;
e51be066
GW
2109 int t;
2110
2111 ASSERT(MUTEX_HELD(&msp->ms_lock));
2112 ASSERT3U(spa_sync_pass(spa), ==, 1);
93cf2076 2113 ASSERT(msp->ms_loaded);
e51be066 2114
f3a7f661 2115
5f3d9c69
JS
2116 spa_dbgmsg(spa, "condensing: txg %llu, msp[%llu] %p, vdev id %llu, "
2117 "spa %s, smp size %llu, segments %lu, forcing condense=%s", txg,
2118 msp->ms_id, msp, msp->ms_group->mg_vd->vdev_id,
2119 msp->ms_group->mg_vd->vdev_spa->spa_name,
2120 space_map_length(msp->ms_sm), avl_numnodes(&msp->ms_tree->rt_root),
f3a7f661
GW
2121 msp->ms_condense_wanted ? "TRUE" : "FALSE");
2122
2123 msp->ms_condense_wanted = B_FALSE;
e51be066
GW
2124
2125 /*
93cf2076 2126 * Create an range tree that is 100% allocated. We remove segments
e51be066
GW
2127 * that have been freed in this txg, any deferred frees that exist,
2128 * and any allocation in the future. Removing segments should be
93cf2076
GW
2129 * a relatively inexpensive operation since we expect these trees to
2130 * have a small number of nodes.
e51be066 2131 */
93cf2076
GW
2132 condense_tree = range_tree_create(NULL, NULL, &msp->ms_lock);
2133 range_tree_add(condense_tree, msp->ms_start, msp->ms_size);
e51be066
GW
2134
2135 /*
93cf2076 2136 * Remove what's been freed in this txg from the condense_tree.
e51be066 2137 * Since we're in sync_pass 1, we know that all the frees from
93cf2076 2138 * this txg are in the freetree.
e51be066 2139 */
93cf2076 2140 range_tree_walk(freetree, range_tree_remove, condense_tree);
e51be066 2141
93cf2076
GW
2142 for (t = 0; t < TXG_DEFER_SIZE; t++) {
2143 range_tree_walk(msp->ms_defertree[t],
2144 range_tree_remove, condense_tree);
2145 }
e51be066 2146
93cf2076
GW
2147 for (t = 1; t < TXG_CONCURRENT_STATES; t++) {
2148 range_tree_walk(msp->ms_alloctree[(txg + t) & TXG_MASK],
2149 range_tree_remove, condense_tree);
2150 }
e51be066
GW
2151
2152 /*
2153 * We're about to drop the metaslab's lock thus allowing
2154 * other consumers to change it's content. Set the
93cf2076 2155 * metaslab's ms_condensing flag to ensure that
e51be066
GW
2156 * allocations on this metaslab do not occur while we're
2157 * in the middle of committing it to disk. This is only critical
93cf2076 2158 * for the ms_tree as all other range trees use per txg
e51be066
GW
2159 * views of their content.
2160 */
93cf2076 2161 msp->ms_condensing = B_TRUE;
e51be066
GW
2162
2163 mutex_exit(&msp->ms_lock);
93cf2076 2164 space_map_truncate(sm, tx);
e51be066
GW
2165 mutex_enter(&msp->ms_lock);
2166
2167 /*
4e21fd06 2168 * While we would ideally like to create a space map representation
e51be066 2169 * that consists only of allocation records, doing so can be
93cf2076 2170 * prohibitively expensive because the in-core free tree can be
e51be066 2171 * large, and therefore computationally expensive to subtract
93cf2076
GW
2172 * from the condense_tree. Instead we sync out two trees, a cheap
2173 * allocation only tree followed by the in-core free tree. While not
e51be066
GW
2174 * optimal, this is typically close to optimal, and much cheaper to
2175 * compute.
2176 */
93cf2076
GW
2177 space_map_write(sm, condense_tree, SM_ALLOC, tx);
2178 range_tree_vacate(condense_tree, NULL, NULL);
2179 range_tree_destroy(condense_tree);
e51be066 2180
93cf2076
GW
2181 space_map_write(sm, msp->ms_tree, SM_FREE, tx);
2182 msp->ms_condensing = B_FALSE;
e51be066
GW
2183}
2184
34dc7c2f
BB
2185/*
2186 * Write a metaslab to disk in the context of the specified transaction group.
2187 */
2188void
2189metaslab_sync(metaslab_t *msp, uint64_t txg)
2190{
93cf2076
GW
2191 metaslab_group_t *mg = msp->ms_group;
2192 vdev_t *vd = mg->mg_vd;
34dc7c2f 2193 spa_t *spa = vd->vdev_spa;
428870ff 2194 objset_t *mos = spa_meta_objset(spa);
93cf2076
GW
2195 range_tree_t *alloctree = msp->ms_alloctree[txg & TXG_MASK];
2196 range_tree_t **freetree = &msp->ms_freetree[txg & TXG_MASK];
2197 range_tree_t **freed_tree =
2198 &msp->ms_freetree[TXG_CLEAN(txg) & TXG_MASK];
34dc7c2f 2199 dmu_tx_t *tx;
93cf2076 2200 uint64_t object = space_map_object(msp->ms_sm);
34dc7c2f 2201
428870ff
BB
2202 ASSERT(!vd->vdev_ishole);
2203
e51be066
GW
2204 /*
2205 * This metaslab has just been added so there's no work to do now.
2206 */
93cf2076
GW
2207 if (*freetree == NULL) {
2208 ASSERT3P(alloctree, ==, NULL);
e51be066
GW
2209 return;
2210 }
2211
93cf2076
GW
2212 ASSERT3P(alloctree, !=, NULL);
2213 ASSERT3P(*freetree, !=, NULL);
2214 ASSERT3P(*freed_tree, !=, NULL);
e51be066 2215
f3a7f661
GW
2216 /*
2217 * Normally, we don't want to process a metaslab if there
2218 * are no allocations or frees to perform. However, if the metaslab
2219 * is being forced to condense we need to let it through.
2220 */
93cf2076 2221 if (range_tree_space(alloctree) == 0 &&
f3a7f661
GW
2222 range_tree_space(*freetree) == 0 &&
2223 !msp->ms_condense_wanted)
428870ff 2224 return;
34dc7c2f
BB
2225
2226 /*
2227 * The only state that can actually be changing concurrently with
93cf2076
GW
2228 * metaslab_sync() is the metaslab's ms_tree. No other thread can
2229 * be modifying this txg's alloctree, freetree, freed_tree, or
2230 * space_map_phys_t. Therefore, we only hold ms_lock to satify
4e21fd06 2231 * space map ASSERTs. We drop it whenever we call into the DMU,
93cf2076
GW
2232 * because the DMU can call down to us (e.g. via zio_free()) at
2233 * any time.
34dc7c2f 2234 */
428870ff
BB
2235
2236 tx = dmu_tx_create_assigned(spa_get_dsl(spa), txg);
34dc7c2f 2237
93cf2076
GW
2238 if (msp->ms_sm == NULL) {
2239 uint64_t new_object;
2240
2241 new_object = space_map_alloc(mos, tx);
2242 VERIFY3U(new_object, !=, 0);
2243
2244 VERIFY0(space_map_open(&msp->ms_sm, mos, new_object,
2245 msp->ms_start, msp->ms_size, vd->vdev_ashift,
2246 &msp->ms_lock));
2247 ASSERT(msp->ms_sm != NULL);
34dc7c2f
BB
2248 }
2249
428870ff
BB
2250 mutex_enter(&msp->ms_lock);
2251
96358617 2252 /*
4e21fd06
DB
2253 * Note: metaslab_condense() clears the space map's histogram.
2254 * Therefore we must verify and remove this histogram before
96358617
MA
2255 * condensing.
2256 */
2257 metaslab_group_histogram_verify(mg);
2258 metaslab_class_histogram_verify(mg->mg_class);
2259 metaslab_group_histogram_remove(mg, msp);
2260
93cf2076 2261 if (msp->ms_loaded && spa_sync_pass(spa) == 1 &&
e51be066
GW
2262 metaslab_should_condense(msp)) {
2263 metaslab_condense(msp, txg, tx);
2264 } else {
93cf2076
GW
2265 space_map_write(msp->ms_sm, alloctree, SM_ALLOC, tx);
2266 space_map_write(msp->ms_sm, *freetree, SM_FREE, tx);
e51be066 2267 }
428870ff 2268
93cf2076 2269 if (msp->ms_loaded) {
4e21fd06
DB
2270 int t;
2271
93cf2076
GW
2272 /*
2273 * When the space map is loaded, we have an accruate
2274 * histogram in the range tree. This gives us an opportunity
2275 * to bring the space map's histogram up-to-date so we clear
2276 * it first before updating it.
2277 */
2278 space_map_histogram_clear(msp->ms_sm);
2279 space_map_histogram_add(msp->ms_sm, msp->ms_tree, tx);
4e21fd06
DB
2280
2281 /*
2282 * Since we've cleared the histogram we need to add back
2283 * any free space that has already been processed, plus
2284 * any deferred space. This allows the on-disk histogram
2285 * to accurately reflect all free space even if some space
2286 * is not yet available for allocation (i.e. deferred).
2287 */
2288 space_map_histogram_add(msp->ms_sm, *freed_tree, tx);
2289
93cf2076 2290 /*
4e21fd06
DB
2291 * Add back any deferred free space that has not been
2292 * added back into the in-core free tree yet. This will
2293 * ensure that we don't end up with a space map histogram
2294 * that is completely empty unless the metaslab is fully
2295 * allocated.
93cf2076 2296 */
4e21fd06
DB
2297 for (t = 0; t < TXG_DEFER_SIZE; t++) {
2298 space_map_histogram_add(msp->ms_sm,
2299 msp->ms_defertree[t], tx);
2300 }
93cf2076 2301 }
4e21fd06
DB
2302
2303 /*
2304 * Always add the free space from this sync pass to the space
2305 * map histogram. We want to make sure that the on-disk histogram
2306 * accounts for all free space. If the space map is not loaded,
2307 * then we will lose some accuracy but will correct it the next
2308 * time we load the space map.
2309 */
2310 space_map_histogram_add(msp->ms_sm, *freetree, tx);
2311
f3a7f661
GW
2312 metaslab_group_histogram_add(mg, msp);
2313 metaslab_group_histogram_verify(mg);
2314 metaslab_class_histogram_verify(mg->mg_class);
34dc7c2f 2315
e51be066 2316 /*
93cf2076
GW
2317 * For sync pass 1, we avoid traversing this txg's free range tree
2318 * and instead will just swap the pointers for freetree and
2319 * freed_tree. We can safely do this since the freed_tree is
e51be066
GW
2320 * guaranteed to be empty on the initial pass.
2321 */
2322 if (spa_sync_pass(spa) == 1) {
93cf2076 2323 range_tree_swap(freetree, freed_tree);
e51be066 2324 } else {
93cf2076 2325 range_tree_vacate(*freetree, range_tree_add, *freed_tree);
34dc7c2f 2326 }
f3a7f661 2327 range_tree_vacate(alloctree, NULL, NULL);
34dc7c2f 2328
93cf2076 2329 ASSERT0(range_tree_space(msp->ms_alloctree[txg & TXG_MASK]));
4e21fd06 2330 ASSERT0(range_tree_space(msp->ms_alloctree[TXG_CLEAN(txg) & TXG_MASK]));
93cf2076 2331 ASSERT0(range_tree_space(msp->ms_freetree[txg & TXG_MASK]));
34dc7c2f
BB
2332
2333 mutex_exit(&msp->ms_lock);
2334
93cf2076
GW
2335 if (object != space_map_object(msp->ms_sm)) {
2336 object = space_map_object(msp->ms_sm);
2337 dmu_write(mos, vd->vdev_ms_array, sizeof (uint64_t) *
2338 msp->ms_id, sizeof (uint64_t), &object, tx);
2339 }
34dc7c2f
BB
2340 dmu_tx_commit(tx);
2341}
2342
2343/*
2344 * Called after a transaction group has completely synced to mark
2345 * all of the metaslab's free space as usable.
2346 */
2347void
2348metaslab_sync_done(metaslab_t *msp, uint64_t txg)
2349{
34dc7c2f
BB
2350 metaslab_group_t *mg = msp->ms_group;
2351 vdev_t *vd = mg->mg_vd;
4e21fd06 2352 spa_t *spa = vd->vdev_spa;
93cf2076
GW
2353 range_tree_t **freed_tree;
2354 range_tree_t **defer_tree;
428870ff 2355 int64_t alloc_delta, defer_delta;
4e21fd06
DB
2356 uint64_t free_space;
2357 boolean_t defer_allowed = B_TRUE;
d6320ddb 2358 int t;
428870ff
BB
2359
2360 ASSERT(!vd->vdev_ishole);
34dc7c2f
BB
2361
2362 mutex_enter(&msp->ms_lock);
2363
2364 /*
2365 * If this metaslab is just becoming available, initialize its
93cf2076
GW
2366 * alloctrees, freetrees, and defertree and add its capacity to
2367 * the vdev.
34dc7c2f 2368 */
93cf2076 2369 if (msp->ms_freetree[TXG_CLEAN(txg) & TXG_MASK] == NULL) {
d6320ddb 2370 for (t = 0; t < TXG_SIZE; t++) {
93cf2076
GW
2371 ASSERT(msp->ms_alloctree[t] == NULL);
2372 ASSERT(msp->ms_freetree[t] == NULL);
2373
2374 msp->ms_alloctree[t] = range_tree_create(NULL, msp,
2375 &msp->ms_lock);
2376 msp->ms_freetree[t] = range_tree_create(NULL, msp,
2377 &msp->ms_lock);
34dc7c2f 2378 }
428870ff 2379
e51be066 2380 for (t = 0; t < TXG_DEFER_SIZE; t++) {
93cf2076 2381 ASSERT(msp->ms_defertree[t] == NULL);
e51be066 2382
93cf2076
GW
2383 msp->ms_defertree[t] = range_tree_create(NULL, msp,
2384 &msp->ms_lock);
2385 }
428870ff 2386
93cf2076 2387 vdev_space_update(vd, 0, 0, msp->ms_size);
34dc7c2f
BB
2388 }
2389
93cf2076
GW
2390 freed_tree = &msp->ms_freetree[TXG_CLEAN(txg) & TXG_MASK];
2391 defer_tree = &msp->ms_defertree[txg % TXG_DEFER_SIZE];
2392
4e21fd06
DB
2393 free_space = metaslab_class_get_space(spa_normal_class(spa)) -
2394 metaslab_class_get_alloc(spa_normal_class(spa));
2395 if (free_space <= spa_get_slop_space(spa)) {
2396 defer_allowed = B_FALSE;
2397 }
2398
2399 defer_delta = 0;
93cf2076 2400 alloc_delta = space_map_alloc_delta(msp->ms_sm);
4e21fd06
DB
2401 if (defer_allowed) {
2402 defer_delta = range_tree_space(*freed_tree) -
2403 range_tree_space(*defer_tree);
2404 } else {
2405 defer_delta -= range_tree_space(*defer_tree);
2406 }
428870ff
BB
2407
2408 vdev_space_update(vd, alloc_delta + defer_delta, defer_delta, 0);
34dc7c2f 2409
93cf2076
GW
2410 ASSERT0(range_tree_space(msp->ms_alloctree[txg & TXG_MASK]));
2411 ASSERT0(range_tree_space(msp->ms_freetree[txg & TXG_MASK]));
34dc7c2f
BB
2412
2413 /*
93cf2076 2414 * If there's a metaslab_load() in progress, wait for it to complete
34dc7c2f 2415 * so that we have a consistent view of the in-core space map.
34dc7c2f 2416 */
93cf2076 2417 metaslab_load_wait(msp);
c2e42f9d
GW
2418
2419 /*
93cf2076
GW
2420 * Move the frees from the defer_tree back to the free
2421 * range tree (if it's loaded). Swap the freed_tree and the
2422 * defer_tree -- this is safe to do because we've just emptied out
2423 * the defer_tree.
c2e42f9d 2424 */
93cf2076
GW
2425 range_tree_vacate(*defer_tree,
2426 msp->ms_loaded ? range_tree_add : NULL, msp->ms_tree);
4e21fd06
DB
2427 if (defer_allowed) {
2428 range_tree_swap(freed_tree, defer_tree);
2429 } else {
2430 range_tree_vacate(*freed_tree,
2431 msp->ms_loaded ? range_tree_add : NULL, msp->ms_tree);
2432 }
34dc7c2f 2433
93cf2076 2434 space_map_update(msp->ms_sm);
34dc7c2f 2435
428870ff
BB
2436 msp->ms_deferspace += defer_delta;
2437 ASSERT3S(msp->ms_deferspace, >=, 0);
93cf2076 2438 ASSERT3S(msp->ms_deferspace, <=, msp->ms_size);
428870ff
BB
2439 if (msp->ms_deferspace != 0) {
2440 /*
2441 * Keep syncing this metaslab until all deferred frees
2442 * are back in circulation.
2443 */
2444 vdev_dirty(vd, VDD_METASLAB, msp, txg + 1);
2445 }
2446
4e21fd06
DB
2447 /*
2448 * Calculate the new weights before unloading any metaslabs.
2449 * This will give us the most accurate weighting.
2450 */
2451 metaslab_group_sort(mg, msp, metaslab_weight(msp));
2452
2453 /*
2454 * If the metaslab is loaded and we've not tried to load or allocate
2455 * from it in 'metaslab_unload_delay' txgs, then unload it.
2456 */
2457 if (msp->ms_loaded &&
2458 msp->ms_selected_txg + metaslab_unload_delay < txg) {
2459
93cf2076
GW
2460 for (t = 1; t < TXG_CONCURRENT_STATES; t++) {
2461 VERIFY0(range_tree_space(
2462 msp->ms_alloctree[(txg + t) & TXG_MASK]));
2463 }
34dc7c2f 2464
93cf2076
GW
2465 if (!metaslab_debug_unload)
2466 metaslab_unload(msp);
34dc7c2f
BB
2467 }
2468
34dc7c2f
BB
2469 mutex_exit(&msp->ms_lock);
2470}
2471
428870ff
BB
2472void
2473metaslab_sync_reassess(metaslab_group_t *mg)
2474{
1be627f5 2475 metaslab_group_alloc_update(mg);
f3a7f661 2476 mg->mg_fragmentation = metaslab_group_fragmentation(mg);
6d974228 2477
428870ff 2478 /*
93cf2076 2479 * Preload the next potential metaslabs
428870ff 2480 */
93cf2076 2481 metaslab_group_preload(mg);
428870ff
BB
2482}
2483
34dc7c2f
BB
2484static uint64_t
2485metaslab_distance(metaslab_t *msp, dva_t *dva)
2486{
2487 uint64_t ms_shift = msp->ms_group->mg_vd->vdev_ms_shift;
2488 uint64_t offset = DVA_GET_OFFSET(dva) >> ms_shift;
93cf2076 2489 uint64_t start = msp->ms_id;
34dc7c2f
BB
2490
2491 if (msp->ms_group->mg_vd->vdev_id != DVA_GET_VDEV(dva))
2492 return (1ULL << 63);
2493
2494 if (offset < start)
2495 return ((start - offset) << ms_shift);
2496 if (offset > start)
2497 return ((offset - start) << ms_shift);
2498 return (0);
2499}
2500
4e21fd06
DB
2501/*
2502 * ==========================================================================
2503 * Metaslab allocation tracing facility
2504 * ==========================================================================
2505 */
2506#ifdef _METASLAB_TRACING
2507kstat_t *metaslab_trace_ksp;
2508kstat_named_t metaslab_trace_over_limit;
2509
2510void
2511metaslab_alloc_trace_init(void)
2512{
2513 ASSERT(metaslab_alloc_trace_cache == NULL);
2514 metaslab_alloc_trace_cache = kmem_cache_create(
2515 "metaslab_alloc_trace_cache", sizeof (metaslab_alloc_trace_t),
2516 0, NULL, NULL, NULL, NULL, NULL, 0);
2517 metaslab_trace_ksp = kstat_create("zfs", 0, "metaslab_trace_stats",
2518 "misc", KSTAT_TYPE_NAMED, 1, KSTAT_FLAG_VIRTUAL);
2519 if (metaslab_trace_ksp != NULL) {
2520 metaslab_trace_ksp->ks_data = &metaslab_trace_over_limit;
2521 kstat_named_init(&metaslab_trace_over_limit,
2522 "metaslab_trace_over_limit", KSTAT_DATA_UINT64);
2523 kstat_install(metaslab_trace_ksp);
2524 }
2525}
2526
2527void
2528metaslab_alloc_trace_fini(void)
2529{
2530 if (metaslab_trace_ksp != NULL) {
2531 kstat_delete(metaslab_trace_ksp);
2532 metaslab_trace_ksp = NULL;
2533 }
2534 kmem_cache_destroy(metaslab_alloc_trace_cache);
2535 metaslab_alloc_trace_cache = NULL;
2536}
2537
2538/*
2539 * Add an allocation trace element to the allocation tracing list.
2540 */
2541static void
2542metaslab_trace_add(zio_alloc_list_t *zal, metaslab_group_t *mg,
2543 metaslab_t *msp, uint64_t psize, uint32_t dva_id, uint64_t offset)
2544{
2545 metaslab_alloc_trace_t *mat;
2546
2547 if (!metaslab_trace_enabled)
2548 return;
2549
2550 /*
2551 * When the tracing list reaches its maximum we remove
2552 * the second element in the list before adding a new one.
2553 * By removing the second element we preserve the original
2554 * entry as a clue to what allocations steps have already been
2555 * performed.
2556 */
2557 if (zal->zal_size == metaslab_trace_max_entries) {
2558 metaslab_alloc_trace_t *mat_next;
2559#ifdef DEBUG
2560 panic("too many entries in allocation list");
2561#endif
2562 atomic_inc_64(&metaslab_trace_over_limit.value.ui64);
2563 zal->zal_size--;
2564 mat_next = list_next(&zal->zal_list, list_head(&zal->zal_list));
2565 list_remove(&zal->zal_list, mat_next);
2566 kmem_cache_free(metaslab_alloc_trace_cache, mat_next);
2567 }
2568
2569 mat = kmem_cache_alloc(metaslab_alloc_trace_cache, KM_SLEEP);
2570 list_link_init(&mat->mat_list_node);
2571 mat->mat_mg = mg;
2572 mat->mat_msp = msp;
2573 mat->mat_size = psize;
2574 mat->mat_dva_id = dva_id;
2575 mat->mat_offset = offset;
2576 mat->mat_weight = 0;
2577
2578 if (msp != NULL)
2579 mat->mat_weight = msp->ms_weight;
2580
2581 /*
2582 * The list is part of the zio so locking is not required. Only
2583 * a single thread will perform allocations for a given zio.
2584 */
2585 list_insert_tail(&zal->zal_list, mat);
2586 zal->zal_size++;
2587
2588 ASSERT3U(zal->zal_size, <=, metaslab_trace_max_entries);
2589}
2590
2591void
2592metaslab_trace_init(zio_alloc_list_t *zal)
2593{
2594 list_create(&zal->zal_list, sizeof (metaslab_alloc_trace_t),
2595 offsetof(metaslab_alloc_trace_t, mat_list_node));
2596 zal->zal_size = 0;
2597}
2598
2599void
2600metaslab_trace_fini(zio_alloc_list_t *zal)
2601{
2602 metaslab_alloc_trace_t *mat;
2603
2604 while ((mat = list_remove_head(&zal->zal_list)) != NULL)
2605 kmem_cache_free(metaslab_alloc_trace_cache, mat);
2606 list_destroy(&zal->zal_list);
2607 zal->zal_size = 0;
2608}
2609#else
2610
2611#define metaslab_trace_add(zal, mg, msp, psize, id, off)
2612
2613void
2614metaslab_alloc_trace_init(void)
2615{
2616}
2617
2618void
2619metaslab_alloc_trace_fini(void)
2620{
2621}
2622
2623void
2624metaslab_trace_init(zio_alloc_list_t *zal)
2625{
2626}
2627
2628void
2629metaslab_trace_fini(zio_alloc_list_t *zal)
2630{
2631}
2632
2633#endif /* _METASLAB_TRACING */
2634
3dfb57a3
DB
2635/*
2636 * ==========================================================================
2637 * Metaslab block operations
2638 * ==========================================================================
2639 */
2640
2641static void
2642metaslab_group_alloc_increment(spa_t *spa, uint64_t vdev, void *tag, int flags)
2643{
2644 metaslab_group_t *mg;
2645
2646 if (!(flags & METASLAB_ASYNC_ALLOC) ||
2647 flags & METASLAB_DONT_THROTTLE)
2648 return;
2649
2650 mg = vdev_lookup_top(spa, vdev)->vdev_mg;
2651 if (!mg->mg_class->mc_alloc_throttle_enabled)
2652 return;
2653
2654 (void) refcount_add(&mg->mg_alloc_queue_depth, tag);
2655}
2656
2657void
2658metaslab_group_alloc_decrement(spa_t *spa, uint64_t vdev, void *tag, int flags)
2659{
2660 metaslab_group_t *mg;
2661
2662 if (!(flags & METASLAB_ASYNC_ALLOC) ||
2663 flags & METASLAB_DONT_THROTTLE)
2664 return;
2665
2666 mg = vdev_lookup_top(spa, vdev)->vdev_mg;
2667 if (!mg->mg_class->mc_alloc_throttle_enabled)
2668 return;
2669
2670 (void) refcount_remove(&mg->mg_alloc_queue_depth, tag);
2671}
2672
2673void
2674metaslab_group_alloc_verify(spa_t *spa, const blkptr_t *bp, void *tag)
2675{
2676#ifdef ZFS_DEBUG
2677 const dva_t *dva = bp->blk_dva;
2678 int ndvas = BP_GET_NDVAS(bp);
2679 int d;
2680
2681 for (d = 0; d < ndvas; d++) {
2682 uint64_t vdev = DVA_GET_VDEV(&dva[d]);
2683 metaslab_group_t *mg = vdev_lookup_top(spa, vdev)->vdev_mg;
2684 VERIFY(refcount_not_held(&mg->mg_alloc_queue_depth, tag));
2685 }
2686#endif
2687}
2688
34dc7c2f 2689static uint64_t
4e21fd06
DB
2690metaslab_block_alloc(metaslab_t *msp, uint64_t size, uint64_t txg)
2691{
2692 uint64_t start;
2693 range_tree_t *rt = msp->ms_tree;
2694 metaslab_class_t *mc = msp->ms_group->mg_class;
2695
2696 VERIFY(!msp->ms_condensing);
2697
2698 start = mc->mc_ops->msop_alloc(msp, size);
2699 if (start != -1ULL) {
2700 metaslab_group_t *mg = msp->ms_group;
2701 vdev_t *vd = mg->mg_vd;
2702
2703 VERIFY0(P2PHASE(start, 1ULL << vd->vdev_ashift));
2704 VERIFY0(P2PHASE(size, 1ULL << vd->vdev_ashift));
2705 VERIFY3U(range_tree_space(rt) - size, <=, msp->ms_size);
2706 range_tree_remove(rt, start, size);
2707
2708 if (range_tree_space(msp->ms_alloctree[txg & TXG_MASK]) == 0)
2709 vdev_dirty(mg->mg_vd, VDD_METASLAB, msp, txg);
2710
2711 range_tree_add(msp->ms_alloctree[txg & TXG_MASK], start, size);
2712
2713 /* Track the last successful allocation */
2714 msp->ms_alloc_txg = txg;
2715 metaslab_verify_space(msp, txg);
2716 }
2717
2718 /*
2719 * Now that we've attempted the allocation we need to update the
2720 * metaslab's maximum block size since it may have changed.
2721 */
2722 msp->ms_max_size = metaslab_block_maxsize(msp);
2723 return (start);
2724}
2725
2726static uint64_t
2727metaslab_group_alloc_normal(metaslab_group_t *mg, zio_alloc_list_t *zal,
2728 uint64_t asize, uint64_t txg, uint64_t min_distance, dva_t *dva, int d)
34dc7c2f
BB
2729{
2730 metaslab_t *msp = NULL;
4e21fd06 2731 metaslab_t *search;
34dc7c2f 2732 uint64_t offset = -1ULL;
34dc7c2f
BB
2733 uint64_t activation_weight;
2734 uint64_t target_distance;
2735 int i;
2736
2737 activation_weight = METASLAB_WEIGHT_PRIMARY;
9babb374
BB
2738 for (i = 0; i < d; i++) {
2739 if (DVA_GET_VDEV(&dva[i]) == mg->mg_vd->vdev_id) {
34dc7c2f 2740 activation_weight = METASLAB_WEIGHT_SECONDARY;
9babb374
BB
2741 break;
2742 }
2743 }
34dc7c2f 2744
4e21fd06
DB
2745 search = kmem_alloc(sizeof (*search), KM_SLEEP);
2746 search->ms_weight = UINT64_MAX;
2747 search->ms_start = 0;
34dc7c2f 2748 for (;;) {
9babb374 2749 boolean_t was_active;
4e21fd06
DB
2750 avl_tree_t *t = &mg->mg_metaslab_tree;
2751 avl_index_t idx;
9babb374 2752
34dc7c2f 2753 mutex_enter(&mg->mg_lock);
4e21fd06
DB
2754
2755 /*
2756 * Find the metaslab with the highest weight that is less
2757 * than what we've already tried. In the common case, this
2758 * means that we will examine each metaslab at most once.
2759 * Note that concurrent callers could reorder metaslabs
2760 * by activation/passivation once we have dropped the mg_lock.
2761 * If a metaslab is activated by another thread, and we fail
2762 * to allocate from the metaslab we have selected, we may
2763 * not try the newly-activated metaslab, and instead activate
2764 * another metaslab. This is not optimal, but generally
2765 * does not cause any problems (a possible exception being
2766 * if every metaslab is completely full except for the
2767 * the newly-activated metaslab which we fail to examine).
2768 */
2769 msp = avl_find(t, search, &idx);
2770 if (msp == NULL)
2771 msp = avl_nearest(t, idx, AVL_AFTER);
2772 for (; msp != NULL; msp = AVL_NEXT(t, msp)) {
2773
2774 if (!metaslab_should_allocate(msp, asize)) {
2775 metaslab_trace_add(zal, mg, msp, asize, d,
2776 TRACE_TOO_SMALL);
2777 continue;
34dc7c2f 2778 }
7a614407
GW
2779
2780 /*
2781 * If the selected metaslab is condensing, skip it.
2782 */
93cf2076 2783 if (msp->ms_condensing)
7a614407
GW
2784 continue;
2785
9babb374 2786 was_active = msp->ms_weight & METASLAB_ACTIVE_MASK;
34dc7c2f
BB
2787 if (activation_weight == METASLAB_WEIGHT_PRIMARY)
2788 break;
2789
2790 target_distance = min_distance +
93cf2076
GW
2791 (space_map_allocated(msp->ms_sm) != 0 ? 0 :
2792 min_distance >> 1);
34dc7c2f 2793
4e21fd06 2794 for (i = 0; i < d; i++) {
34dc7c2f
BB
2795 if (metaslab_distance(msp, &dva[i]) <
2796 target_distance)
2797 break;
4e21fd06 2798 }
34dc7c2f
BB
2799 if (i == d)
2800 break;
2801 }
2802 mutex_exit(&mg->mg_lock);
4e21fd06
DB
2803 if (msp == NULL) {
2804 kmem_free(search, sizeof (*search));
34dc7c2f 2805 return (-1ULL);
4e21fd06
DB
2806 }
2807 search->ms_weight = msp->ms_weight;
2808 search->ms_start = msp->ms_start + 1;
34dc7c2f 2809
ac72fac3
GW
2810 mutex_enter(&msp->ms_lock);
2811
34dc7c2f
BB
2812 /*
2813 * Ensure that the metaslab we have selected is still
2814 * capable of handling our request. It's possible that
2815 * another thread may have changed the weight while we
4e21fd06
DB
2816 * were blocked on the metaslab lock. We check the
2817 * active status first to see if we need to reselect
2818 * a new metaslab.
34dc7c2f 2819 */
4e21fd06 2820 if (was_active && !(msp->ms_weight & METASLAB_ACTIVE_MASK)) {
34dc7c2f
BB
2821 mutex_exit(&msp->ms_lock);
2822 continue;
2823 }
2824
2825 if ((msp->ms_weight & METASLAB_WEIGHT_SECONDARY) &&
2826 activation_weight == METASLAB_WEIGHT_PRIMARY) {
2827 metaslab_passivate(msp,
2828 msp->ms_weight & ~METASLAB_ACTIVE_MASK);
2829 mutex_exit(&msp->ms_lock);
2830 continue;
2831 }
2832
6d974228 2833 if (metaslab_activate(msp, activation_weight) != 0) {
34dc7c2f
BB
2834 mutex_exit(&msp->ms_lock);
2835 continue;
2836 }
4e21fd06
DB
2837 msp->ms_selected_txg = txg;
2838
2839 /*
2840 * Now that we have the lock, recheck to see if we should
2841 * continue to use this metaslab for this allocation. The
2842 * the metaslab is now loaded so metaslab_should_allocate() can
2843 * accurately determine if the allocation attempt should
2844 * proceed.
2845 */
2846 if (!metaslab_should_allocate(msp, asize)) {
2847 /* Passivate this metaslab and select a new one. */
2848 metaslab_trace_add(zal, mg, msp, asize, d,
2849 TRACE_TOO_SMALL);
2850 goto next;
2851 }
2852
34dc7c2f 2853
7a614407
GW
2854 /*
2855 * If this metaslab is currently condensing then pick again as
2856 * we can't manipulate this metaslab until it's committed
2857 * to disk.
2858 */
93cf2076 2859 if (msp->ms_condensing) {
4e21fd06
DB
2860 metaslab_trace_add(zal, mg, msp, asize, d,
2861 TRACE_CONDENSING);
7a614407
GW
2862 mutex_exit(&msp->ms_lock);
2863 continue;
2864 }
2865
4e21fd06
DB
2866 offset = metaslab_block_alloc(msp, asize, txg);
2867 metaslab_trace_add(zal, mg, msp, asize, d, offset);
2868
2869 if (offset != -1ULL) {
2870 /* Proactively passivate the metaslab, if needed */
2871 metaslab_segment_may_passivate(msp);
34dc7c2f 2872 break;
4e21fd06
DB
2873 }
2874next:
2875 ASSERT(msp->ms_loaded);
2876
2877 /*
2878 * We were unable to allocate from this metaslab so determine
2879 * a new weight for this metaslab. Now that we have loaded
2880 * the metaslab we can provide a better hint to the metaslab
2881 * selector.
2882 *
2883 * For space-based metaslabs, we use the maximum block size.
2884 * This information is only available when the metaslab
2885 * is loaded and is more accurate than the generic free
2886 * space weight that was calculated by metaslab_weight().
2887 * This information allows us to quickly compare the maximum
2888 * available allocation in the metaslab to the allocation
2889 * size being requested.
2890 *
2891 * For segment-based metaslabs, determine the new weight
2892 * based on the highest bucket in the range tree. We
2893 * explicitly use the loaded segment weight (i.e. the range
2894 * tree histogram) since it contains the space that is
2895 * currently available for allocation and is accurate
2896 * even within a sync pass.
2897 */
2898 if (WEIGHT_IS_SPACEBASED(msp->ms_weight)) {
2899 uint64_t weight = metaslab_block_maxsize(msp);
2900 WEIGHT_SET_SPACEBASED(weight);
2901 metaslab_passivate(msp, weight);
2902 } else {
2903 metaslab_passivate(msp,
2904 metaslab_weight_from_range_tree(msp));
2905 }
34dc7c2f 2906
4e21fd06
DB
2907 /*
2908 * We have just failed an allocation attempt, check
2909 * that metaslab_should_allocate() agrees. Otherwise,
2910 * we may end up in an infinite loop retrying the same
2911 * metaslab.
2912 */
2913 ASSERT(!metaslab_should_allocate(msp, asize));
34dc7c2f
BB
2914 mutex_exit(&msp->ms_lock);
2915 }
4e21fd06
DB
2916 mutex_exit(&msp->ms_lock);
2917 kmem_free(search, sizeof (*search));
2918 return (offset);
2919}
34dc7c2f 2920
4e21fd06
DB
2921static uint64_t
2922metaslab_group_alloc(metaslab_group_t *mg, zio_alloc_list_t *zal,
2923 uint64_t asize, uint64_t txg, uint64_t min_distance, dva_t *dva, int d)
2924{
2925 uint64_t offset;
2926 ASSERT(mg->mg_initialized);
34dc7c2f 2927
4e21fd06
DB
2928 offset = metaslab_group_alloc_normal(mg, zal, asize, txg,
2929 min_distance, dva, d);
34dc7c2f 2930
4e21fd06
DB
2931 mutex_enter(&mg->mg_lock);
2932 if (offset == -1ULL) {
2933 mg->mg_failed_allocations++;
2934 metaslab_trace_add(zal, mg, NULL, asize, d,
2935 TRACE_GROUP_FAILURE);
2936 if (asize == SPA_GANGBLOCKSIZE) {
2937 /*
2938 * This metaslab group was unable to allocate
2939 * the minimum gang block size so it must be out of
2940 * space. We must notify the allocation throttle
2941 * to start skipping allocation attempts to this
2942 * metaslab group until more space becomes available.
2943 * Note: this failure cannot be caused by the
2944 * allocation throttle since the allocation throttle
2945 * is only responsible for skipping devices and
2946 * not failing block allocations.
2947 */
2948 mg->mg_no_free_space = B_TRUE;
2949 }
2950 }
2951 mg->mg_allocations++;
2952 mutex_exit(&mg->mg_lock);
34dc7c2f
BB
2953 return (offset);
2954}
2955
4e21fd06
DB
2956/*
2957 * If we have to write a ditto block (i.e. more than one DVA for a given BP)
2958 * on the same vdev as an existing DVA of this BP, then try to allocate it
2959 * at least (vdev_asize / (2 ^ ditto_same_vdev_distance_shift)) away from the
2960 * existing DVAs.
2961 */
2962int ditto_same_vdev_distance_shift = 3;
2963
34dc7c2f
BB
2964/*
2965 * Allocate a block for the specified i/o.
2966 */
2967static int
2968metaslab_alloc_dva(spa_t *spa, metaslab_class_t *mc, uint64_t psize,
4e21fd06
DB
2969 dva_t *dva, int d, dva_t *hintdva, uint64_t txg, int flags,
2970 zio_alloc_list_t *zal)
34dc7c2f 2971{
920dd524 2972 metaslab_group_t *mg, *fast_mg, *rotor;
34dc7c2f 2973 vdev_t *vd;
4e21fd06 2974 boolean_t try_hard = B_FALSE;
34dc7c2f
BB
2975
2976 ASSERT(!DVA_IS_VALID(&dva[d]));
2977
2978 /*
2979 * For testing, make some blocks above a certain size be gang blocks.
2980 */
4e21fd06
DB
2981 if (psize >= metaslab_gang_bang && (ddi_get_lbolt() & 3) == 0) {
2982 metaslab_trace_add(zal, NULL, NULL, psize, d, TRACE_FORCE_GANG);
2e528b49 2983 return (SET_ERROR(ENOSPC));
4e21fd06 2984 }
34dc7c2f
BB
2985
2986 /*
2987 * Start at the rotor and loop through all mgs until we find something.
428870ff 2988 * Note that there's no locking on mc_rotor or mc_aliquot because
34dc7c2f
BB
2989 * nothing actually breaks if we miss a few updates -- we just won't
2990 * allocate quite as evenly. It all balances out over time.
2991 *
2992 * If we are doing ditto or log blocks, try to spread them across
2993 * consecutive vdevs. If we're forced to reuse a vdev before we've
2994 * allocated all of our ditto blocks, then try and spread them out on
2995 * that vdev as much as possible. If it turns out to not be possible,
2996 * gradually lower our standards until anything becomes acceptable.
2997 * Also, allocating on consecutive vdevs (as opposed to random vdevs)
2998 * gives us hope of containing our fault domains to something we're
2999 * able to reason about. Otherwise, any two top-level vdev failures
3000 * will guarantee the loss of data. With consecutive allocation,
3001 * only two adjacent top-level vdev failures will result in data loss.
3002 *
3003 * If we are doing gang blocks (hintdva is non-NULL), try to keep
3004 * ourselves on the same vdev as our gang block header. That
3005 * way, we can hope for locality in vdev_cache, plus it makes our
3006 * fault domains something tractable.
3007 */
3008 if (hintdva) {
3009 vd = vdev_lookup_top(spa, DVA_GET_VDEV(&hintdva[d]));
428870ff
BB
3010
3011 /*
3012 * It's possible the vdev we're using as the hint no
3013 * longer exists (i.e. removed). Consult the rotor when
3014 * all else fails.
3015 */
3016 if (vd != NULL) {
34dc7c2f 3017 mg = vd->vdev_mg;
428870ff
BB
3018
3019 if (flags & METASLAB_HINTBP_AVOID &&
3020 mg->mg_next != NULL)
3021 mg = mg->mg_next;
3022 } else {
3023 mg = mc->mc_rotor;
3024 }
34dc7c2f
BB
3025 } else if (d != 0) {
3026 vd = vdev_lookup_top(spa, DVA_GET_VDEV(&dva[d - 1]));
3027 mg = vd->vdev_mg->mg_next;
920dd524
ED
3028 } else if (flags & METASLAB_FASTWRITE) {
3029 mg = fast_mg = mc->mc_rotor;
3030
3031 do {
3032 if (fast_mg->mg_vd->vdev_pending_fastwrite <
3033 mg->mg_vd->vdev_pending_fastwrite)
3034 mg = fast_mg;
3035 } while ((fast_mg = fast_mg->mg_next) != mc->mc_rotor);
3036
34dc7c2f
BB
3037 } else {
3038 mg = mc->mc_rotor;
3039 }
3040
3041 /*
428870ff
BB
3042 * If the hint put us into the wrong metaslab class, or into a
3043 * metaslab group that has been passivated, just follow the rotor.
34dc7c2f 3044 */
428870ff 3045 if (mg->mg_class != mc || mg->mg_activation_count <= 0)
34dc7c2f
BB
3046 mg = mc->mc_rotor;
3047
3048 rotor = mg;
3049top:
34dc7c2f 3050 do {
4e21fd06 3051 boolean_t allocatable;
3dfb57a3 3052 uint64_t offset;
4e21fd06 3053 uint64_t distance, asize;
428870ff 3054
3dfb57a3 3055 ASSERT(mg->mg_activation_count == 1);
34dc7c2f 3056 vd = mg->mg_vd;
fb5f0bc8 3057
34dc7c2f 3058 /*
b128c09f 3059 * Don't allocate from faulted devices.
34dc7c2f 3060 */
4e21fd06 3061 if (try_hard) {
fb5f0bc8
BB
3062 spa_config_enter(spa, SCL_ZIO, FTAG, RW_READER);
3063 allocatable = vdev_allocatable(vd);
3064 spa_config_exit(spa, SCL_ZIO, FTAG);
3065 } else {
3066 allocatable = vdev_allocatable(vd);
3067 }
ac72fac3
GW
3068
3069 /*
3070 * Determine if the selected metaslab group is eligible
3dfb57a3
DB
3071 * for allocations. If we're ganging then don't allow
3072 * this metaslab group to skip allocations since that would
3073 * inadvertently return ENOSPC and suspend the pool
ac72fac3
GW
3074 * even though space is still available.
3075 */
4e21fd06 3076 if (allocatable && !GANG_ALLOCATION(flags) && !try_hard) {
3dfb57a3
DB
3077 allocatable = metaslab_group_allocatable(mg, rotor,
3078 psize);
3079 }
ac72fac3 3080
4e21fd06
DB
3081 if (!allocatable) {
3082 metaslab_trace_add(zal, mg, NULL, psize, d,
3083 TRACE_NOT_ALLOCATABLE);
34dc7c2f 3084 goto next;
4e21fd06 3085 }
fb5f0bc8 3086
3dfb57a3
DB
3087 ASSERT(mg->mg_initialized);
3088
34dc7c2f 3089 /*
4e21fd06
DB
3090 * Avoid writing single-copy data to a failing,
3091 * non-redundant vdev, unless we've already tried all
3092 * other vdevs.
34dc7c2f
BB
3093 */
3094 if ((vd->vdev_stat.vs_write_errors > 0 ||
3095 vd->vdev_state < VDEV_STATE_HEALTHY) &&
4e21fd06
DB
3096 d == 0 && !try_hard && vd->vdev_children == 0) {
3097 metaslab_trace_add(zal, mg, NULL, psize, d,
3098 TRACE_VDEV_ERROR);
34dc7c2f
BB
3099 goto next;
3100 }
3101
3102 ASSERT(mg->mg_class == mc);
3103
4e21fd06
DB
3104 /*
3105 * If we don't need to try hard, then require that the
3106 * block be 1/8th of the device away from any other DVAs
3107 * in this BP. If we are trying hard, allow any offset
3108 * to be used (distance=0).
3109 */
3110 distance = 0;
3111 if (!try_hard) {
3112 distance = vd->vdev_asize >>
3113 ditto_same_vdev_distance_shift;
3114 if (distance <= (1ULL << vd->vdev_ms_shift))
3115 distance = 0;
3116 }
34dc7c2f
BB
3117
3118 asize = vdev_psize_to_asize(vd, psize);
3119 ASSERT(P2PHASE(asize, 1ULL << vd->vdev_ashift) == 0);
3120
4e21fd06
DB
3121 offset = metaslab_group_alloc(mg, zal, asize, txg, distance,
3122 dva, d);
3dfb57a3 3123
34dc7c2f
BB
3124 if (offset != -1ULL) {
3125 /*
3126 * If we've just selected this metaslab group,
3127 * figure out whether the corresponding vdev is
3128 * over- or under-used relative to the pool,
3129 * and set an allocation bias to even it out.
bb3250d0
ED
3130 *
3131 * Bias is also used to compensate for unequally
3132 * sized vdevs so that space is allocated fairly.
34dc7c2f 3133 */
f3a7f661 3134 if (mc->mc_aliquot == 0 && metaslab_bias_enabled) {
34dc7c2f 3135 vdev_stat_t *vs = &vd->vdev_stat;
bb3250d0
ED
3136 int64_t vs_free = vs->vs_space - vs->vs_alloc;
3137 int64_t mc_free = mc->mc_space - mc->mc_alloc;
3138 int64_t ratio;
34dc7c2f
BB
3139
3140 /*
6d974228
GW
3141 * Calculate how much more or less we should
3142 * try to allocate from this device during
3143 * this iteration around the rotor.
6d974228 3144 *
bb3250d0
ED
3145 * This basically introduces a zero-centered
3146 * bias towards the devices with the most
3147 * free space, while compensating for vdev
3148 * size differences.
3149 *
3150 * Examples:
3151 * vdev V1 = 16M/128M
3152 * vdev V2 = 16M/128M
3153 * ratio(V1) = 100% ratio(V2) = 100%
3154 *
3155 * vdev V1 = 16M/128M
3156 * vdev V2 = 64M/128M
3157 * ratio(V1) = 127% ratio(V2) = 72%
6d974228 3158 *
bb3250d0
ED
3159 * vdev V1 = 16M/128M
3160 * vdev V2 = 64M/512M
3161 * ratio(V1) = 40% ratio(V2) = 160%
34dc7c2f 3162 */
bb3250d0
ED
3163 ratio = (vs_free * mc->mc_alloc_groups * 100) /
3164 (mc_free + 1);
3165 mg->mg_bias = ((ratio - 100) *
6d974228 3166 (int64_t)mg->mg_aliquot) / 100;
f3a7f661
GW
3167 } else if (!metaslab_bias_enabled) {
3168 mg->mg_bias = 0;
34dc7c2f
BB
3169 }
3170
920dd524
ED
3171 if ((flags & METASLAB_FASTWRITE) ||
3172 atomic_add_64_nv(&mc->mc_aliquot, asize) >=
34dc7c2f
BB
3173 mg->mg_aliquot + mg->mg_bias) {
3174 mc->mc_rotor = mg->mg_next;
428870ff 3175 mc->mc_aliquot = 0;
34dc7c2f
BB
3176 }
3177
3178 DVA_SET_VDEV(&dva[d], vd->vdev_id);
3179 DVA_SET_OFFSET(&dva[d], offset);
e3e7cf60
D
3180 DVA_SET_GANG(&dva[d],
3181 ((flags & METASLAB_GANG_HEADER) ? 1 : 0));
34dc7c2f
BB
3182 DVA_SET_ASIZE(&dva[d], asize);
3183
920dd524
ED
3184 if (flags & METASLAB_FASTWRITE) {
3185 atomic_add_64(&vd->vdev_pending_fastwrite,
3186 psize);
920dd524
ED
3187 }
3188
34dc7c2f
BB
3189 return (0);
3190 }
3191next:
3192 mc->mc_rotor = mg->mg_next;
428870ff 3193 mc->mc_aliquot = 0;
34dc7c2f
BB
3194 } while ((mg = mg->mg_next) != rotor);
3195
4e21fd06
DB
3196 /*
3197 * If we haven't tried hard, do so now.
3198 */
3199 if (!try_hard) {
3200 try_hard = B_TRUE;
fb5f0bc8
BB
3201 goto top;
3202 }
3203
34dc7c2f
BB
3204 bzero(&dva[d], sizeof (dva_t));
3205
4e21fd06 3206 metaslab_trace_add(zal, rotor, NULL, psize, d, TRACE_ENOSPC);
2e528b49 3207 return (SET_ERROR(ENOSPC));
34dc7c2f
BB
3208}
3209
3210/*
3211 * Free the block represented by DVA in the context of the specified
3212 * transaction group.
3213 */
3214static void
3215metaslab_free_dva(spa_t *spa, const dva_t *dva, uint64_t txg, boolean_t now)
3216{
3217 uint64_t vdev = DVA_GET_VDEV(dva);
3218 uint64_t offset = DVA_GET_OFFSET(dva);
3219 uint64_t size = DVA_GET_ASIZE(dva);
3220 vdev_t *vd;
3221 metaslab_t *msp;
3222
34dc7c2f
BB
3223 if (txg > spa_freeze_txg(spa))
3224 return;
3225
7d2868d5 3226 if ((vd = vdev_lookup_top(spa, vdev)) == NULL || !DVA_IS_VALID(dva) ||
34dc7c2f 3227 (offset >> vd->vdev_ms_shift) >= vd->vdev_ms_count) {
7d2868d5
BB
3228 zfs_panic_recover("metaslab_free_dva(): bad DVA %llu:%llu:%llu",
3229 (u_longlong_t)vdev, (u_longlong_t)offset,
3230 (u_longlong_t)size);
34dc7c2f
BB
3231 return;
3232 }
3233
3234 msp = vd->vdev_ms[offset >> vd->vdev_ms_shift];
3235
3236 if (DVA_GET_GANG(dva))
3237 size = vdev_psize_to_asize(vd, SPA_GANGBLOCKSIZE);
3238
3239 mutex_enter(&msp->ms_lock);
3240
3241 if (now) {
93cf2076 3242 range_tree_remove(msp->ms_alloctree[txg & TXG_MASK],
34dc7c2f 3243 offset, size);
93cf2076
GW
3244
3245 VERIFY(!msp->ms_condensing);
3246 VERIFY3U(offset, >=, msp->ms_start);
3247 VERIFY3U(offset + size, <=, msp->ms_start + msp->ms_size);
3248 VERIFY3U(range_tree_space(msp->ms_tree) + size, <=,
3249 msp->ms_size);
3250 VERIFY0(P2PHASE(offset, 1ULL << vd->vdev_ashift));
3251 VERIFY0(P2PHASE(size, 1ULL << vd->vdev_ashift));
3252 range_tree_add(msp->ms_tree, offset, size);
4e21fd06 3253 msp->ms_max_size = metaslab_block_maxsize(msp);
34dc7c2f 3254 } else {
93cf2076 3255 if (range_tree_space(msp->ms_freetree[txg & TXG_MASK]) == 0)
34dc7c2f 3256 vdev_dirty(vd, VDD_METASLAB, msp, txg);
93cf2076
GW
3257 range_tree_add(msp->ms_freetree[txg & TXG_MASK],
3258 offset, size);
34dc7c2f
BB
3259 }
3260
3261 mutex_exit(&msp->ms_lock);
3262}
3263
3264/*
3265 * Intent log support: upon opening the pool after a crash, notify the SPA
3266 * of blocks that the intent log has allocated for immediate write, but
3267 * which are still considered free by the SPA because the last transaction
3268 * group didn't commit yet.
3269 */
3270static int
3271metaslab_claim_dva(spa_t *spa, const dva_t *dva, uint64_t txg)
3272{
3273 uint64_t vdev = DVA_GET_VDEV(dva);
3274 uint64_t offset = DVA_GET_OFFSET(dva);
3275 uint64_t size = DVA_GET_ASIZE(dva);
3276 vdev_t *vd;
3277 metaslab_t *msp;
428870ff 3278 int error = 0;
34dc7c2f
BB
3279
3280 ASSERT(DVA_IS_VALID(dva));
3281
3282 if ((vd = vdev_lookup_top(spa, vdev)) == NULL ||
3283 (offset >> vd->vdev_ms_shift) >= vd->vdev_ms_count)
2e528b49 3284 return (SET_ERROR(ENXIO));
34dc7c2f
BB
3285
3286 msp = vd->vdev_ms[offset >> vd->vdev_ms_shift];
3287
3288 if (DVA_GET_GANG(dva))
3289 size = vdev_psize_to_asize(vd, SPA_GANGBLOCKSIZE);
3290
3291 mutex_enter(&msp->ms_lock);
3292
93cf2076 3293 if ((txg != 0 && spa_writeable(spa)) || !msp->ms_loaded)
6d974228 3294 error = metaslab_activate(msp, METASLAB_WEIGHT_SECONDARY);
428870ff 3295
93cf2076 3296 if (error == 0 && !range_tree_contains(msp->ms_tree, offset, size))
2e528b49 3297 error = SET_ERROR(ENOENT);
428870ff 3298
b128c09f 3299 if (error || txg == 0) { /* txg == 0 indicates dry run */
34dc7c2f
BB
3300 mutex_exit(&msp->ms_lock);
3301 return (error);
3302 }
3303
93cf2076
GW
3304 VERIFY(!msp->ms_condensing);
3305 VERIFY0(P2PHASE(offset, 1ULL << vd->vdev_ashift));
3306 VERIFY0(P2PHASE(size, 1ULL << vd->vdev_ashift));
3307 VERIFY3U(range_tree_space(msp->ms_tree) - size, <=, msp->ms_size);
3308 range_tree_remove(msp->ms_tree, offset, size);
b128c09f 3309
fb5f0bc8 3310 if (spa_writeable(spa)) { /* don't dirty if we're zdb(1M) */
93cf2076 3311 if (range_tree_space(msp->ms_alloctree[txg & TXG_MASK]) == 0)
b128c09f 3312 vdev_dirty(vd, VDD_METASLAB, msp, txg);
93cf2076 3313 range_tree_add(msp->ms_alloctree[txg & TXG_MASK], offset, size);
b128c09f 3314 }
34dc7c2f
BB
3315
3316 mutex_exit(&msp->ms_lock);
3317
3318 return (0);
3319}
3320
3dfb57a3
DB
3321/*
3322 * Reserve some allocation slots. The reservation system must be called
3323 * before we call into the allocator. If there aren't any available slots
3324 * then the I/O will be throttled until an I/O completes and its slots are
3325 * freed up. The function returns true if it was successful in placing
3326 * the reservation.
3327 */
3328boolean_t
3329metaslab_class_throttle_reserve(metaslab_class_t *mc, int slots, zio_t *zio,
3330 int flags)
3331{
3332 uint64_t available_slots = 0;
3333 uint64_t reserved_slots;
3334 boolean_t slot_reserved = B_FALSE;
3335
3336 ASSERT(mc->mc_alloc_throttle_enabled);
3337 mutex_enter(&mc->mc_lock);
3338
3339 reserved_slots = refcount_count(&mc->mc_alloc_slots);
3340 if (reserved_slots < mc->mc_alloc_max_slots)
3341 available_slots = mc->mc_alloc_max_slots - reserved_slots;
3342
3343 if (slots <= available_slots || GANG_ALLOCATION(flags)) {
3344 int d;
3345
3346 /*
3347 * We reserve the slots individually so that we can unreserve
3348 * them individually when an I/O completes.
3349 */
3350 for (d = 0; d < slots; d++) {
3351 reserved_slots = refcount_add(&mc->mc_alloc_slots, zio);
3352 }
3353 zio->io_flags |= ZIO_FLAG_IO_ALLOCATING;
3354 slot_reserved = B_TRUE;
3355 }
3356
3357 mutex_exit(&mc->mc_lock);
3358 return (slot_reserved);
3359}
3360
3361void
3362metaslab_class_throttle_unreserve(metaslab_class_t *mc, int slots, zio_t *zio)
3363{
3364 int d;
3365
3366 ASSERT(mc->mc_alloc_throttle_enabled);
3367 mutex_enter(&mc->mc_lock);
3368 for (d = 0; d < slots; d++) {
3369 (void) refcount_remove(&mc->mc_alloc_slots, zio);
3370 }
3371 mutex_exit(&mc->mc_lock);
3372}
3373
34dc7c2f
BB
3374int
3375metaslab_alloc(spa_t *spa, metaslab_class_t *mc, uint64_t psize, blkptr_t *bp,
4e21fd06
DB
3376 int ndvas, uint64_t txg, blkptr_t *hintbp, int flags,
3377 zio_alloc_list_t *zal, zio_t *zio)
34dc7c2f
BB
3378{
3379 dva_t *dva = bp->blk_dva;
3380 dva_t *hintdva = hintbp->blk_dva;
d6320ddb 3381 int d, error = 0;
34dc7c2f 3382
b128c09f 3383 ASSERT(bp->blk_birth == 0);
428870ff 3384 ASSERT(BP_PHYSICAL_BIRTH(bp) == 0);
b128c09f
BB
3385
3386 spa_config_enter(spa, SCL_ALLOC, FTAG, RW_READER);
3387
3388 if (mc->mc_rotor == NULL) { /* no vdevs in this class */
3389 spa_config_exit(spa, SCL_ALLOC, FTAG);
2e528b49 3390 return (SET_ERROR(ENOSPC));
b128c09f 3391 }
34dc7c2f
BB
3392
3393 ASSERT(ndvas > 0 && ndvas <= spa_max_replication(spa));
3394 ASSERT(BP_GET_NDVAS(bp) == 0);
3395 ASSERT(hintbp == NULL || ndvas <= BP_GET_NDVAS(hintbp));
4e21fd06 3396 ASSERT3P(zal, !=, NULL);
34dc7c2f 3397
d6320ddb 3398 for (d = 0; d < ndvas; d++) {
34dc7c2f 3399 error = metaslab_alloc_dva(spa, mc, psize, dva, d, hintdva,
4e21fd06 3400 txg, flags, zal);
93cf2076 3401 if (error != 0) {
34dc7c2f
BB
3402 for (d--; d >= 0; d--) {
3403 metaslab_free_dva(spa, &dva[d], txg, B_TRUE);
3dfb57a3
DB
3404 metaslab_group_alloc_decrement(spa,
3405 DVA_GET_VDEV(&dva[d]), zio, flags);
34dc7c2f
BB
3406 bzero(&dva[d], sizeof (dva_t));
3407 }
b128c09f 3408 spa_config_exit(spa, SCL_ALLOC, FTAG);
34dc7c2f 3409 return (error);
3dfb57a3
DB
3410 } else {
3411 /*
3412 * Update the metaslab group's queue depth
3413 * based on the newly allocated dva.
3414 */
3415 metaslab_group_alloc_increment(spa,
3416 DVA_GET_VDEV(&dva[d]), zio, flags);
34dc7c2f 3417 }
3dfb57a3 3418
34dc7c2f
BB
3419 }
3420 ASSERT(error == 0);
3421 ASSERT(BP_GET_NDVAS(bp) == ndvas);
3422
b128c09f
BB
3423 spa_config_exit(spa, SCL_ALLOC, FTAG);
3424
efe7978d 3425 BP_SET_BIRTH(bp, txg, 0);
b128c09f 3426
34dc7c2f
BB
3427 return (0);
3428}
3429
3430void
3431metaslab_free(spa_t *spa, const blkptr_t *bp, uint64_t txg, boolean_t now)
3432{
3433 const dva_t *dva = bp->blk_dva;
d6320ddb 3434 int d, ndvas = BP_GET_NDVAS(bp);
34dc7c2f
BB
3435
3436 ASSERT(!BP_IS_HOLE(bp));
428870ff 3437 ASSERT(!now || bp->blk_birth >= spa_syncing_txg(spa));
b128c09f
BB
3438
3439 spa_config_enter(spa, SCL_FREE, FTAG, RW_READER);
34dc7c2f 3440
d6320ddb 3441 for (d = 0; d < ndvas; d++)
34dc7c2f 3442 metaslab_free_dva(spa, &dva[d], txg, now);
b128c09f
BB
3443
3444 spa_config_exit(spa, SCL_FREE, FTAG);
34dc7c2f
BB
3445}
3446
3447int
3448metaslab_claim(spa_t *spa, const blkptr_t *bp, uint64_t txg)
3449{
3450 const dva_t *dva = bp->blk_dva;
3451 int ndvas = BP_GET_NDVAS(bp);
d6320ddb 3452 int d, error = 0;
34dc7c2f
BB
3453
3454 ASSERT(!BP_IS_HOLE(bp));
3455
b128c09f
BB
3456 if (txg != 0) {
3457 /*
3458 * First do a dry run to make sure all DVAs are claimable,
3459 * so we don't have to unwind from partial failures below.
3460 */
3461 if ((error = metaslab_claim(spa, bp, 0)) != 0)
3462 return (error);
3463 }
3464
3465 spa_config_enter(spa, SCL_ALLOC, FTAG, RW_READER);
3466
d6320ddb 3467 for (d = 0; d < ndvas; d++)
34dc7c2f 3468 if ((error = metaslab_claim_dva(spa, &dva[d], txg)) != 0)
b128c09f
BB
3469 break;
3470
3471 spa_config_exit(spa, SCL_ALLOC, FTAG);
3472
3473 ASSERT(error == 0 || txg == 0);
34dc7c2f 3474
b128c09f 3475 return (error);
34dc7c2f 3476}
920dd524 3477
d1d7e268
MK
3478void
3479metaslab_fastwrite_mark(spa_t *spa, const blkptr_t *bp)
920dd524
ED
3480{
3481 const dva_t *dva = bp->blk_dva;
3482 int ndvas = BP_GET_NDVAS(bp);
3483 uint64_t psize = BP_GET_PSIZE(bp);
3484 int d;
3485 vdev_t *vd;
3486
3487 ASSERT(!BP_IS_HOLE(bp));
9b67f605 3488 ASSERT(!BP_IS_EMBEDDED(bp));
920dd524
ED
3489 ASSERT(psize > 0);
3490
3491 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
3492
3493 for (d = 0; d < ndvas; d++) {
3494 if ((vd = vdev_lookup_top(spa, DVA_GET_VDEV(&dva[d]))) == NULL)
3495 continue;
3496 atomic_add_64(&vd->vdev_pending_fastwrite, psize);
3497 }
3498
3499 spa_config_exit(spa, SCL_VDEV, FTAG);
3500}
3501
d1d7e268
MK
3502void
3503metaslab_fastwrite_unmark(spa_t *spa, const blkptr_t *bp)
920dd524
ED
3504{
3505 const dva_t *dva = bp->blk_dva;
3506 int ndvas = BP_GET_NDVAS(bp);
3507 uint64_t psize = BP_GET_PSIZE(bp);
3508 int d;
3509 vdev_t *vd;
3510
3511 ASSERT(!BP_IS_HOLE(bp));
9b67f605 3512 ASSERT(!BP_IS_EMBEDDED(bp));
920dd524
ED
3513 ASSERT(psize > 0);
3514
3515 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
3516
3517 for (d = 0; d < ndvas; d++) {
3518 if ((vd = vdev_lookup_top(spa, DVA_GET_VDEV(&dva[d]))) == NULL)
3519 continue;
3520 ASSERT3U(vd->vdev_pending_fastwrite, >=, psize);
3521 atomic_sub_64(&vd->vdev_pending_fastwrite, psize);
3522 }
3523
3524 spa_config_exit(spa, SCL_VDEV, FTAG);
3525}
30b92c1d 3526
13fe0198
MA
3527void
3528metaslab_check_free(spa_t *spa, const blkptr_t *bp)
3529{
3530 int i, j;
3531
3532 if ((zfs_flags & ZFS_DEBUG_ZIO_FREE) == 0)
3533 return;
3534
3535 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
3536 for (i = 0; i < BP_GET_NDVAS(bp); i++) {
93cf2076
GW
3537 uint64_t vdev = DVA_GET_VDEV(&bp->blk_dva[i]);
3538 vdev_t *vd = vdev_lookup_top(spa, vdev);
3539 uint64_t offset = DVA_GET_OFFSET(&bp->blk_dva[i]);
13fe0198 3540 uint64_t size = DVA_GET_ASIZE(&bp->blk_dva[i]);
93cf2076 3541 metaslab_t *msp = vd->vdev_ms[offset >> vd->vdev_ms_shift];
13fe0198 3542
93cf2076
GW
3543 if (msp->ms_loaded)
3544 range_tree_verify(msp->ms_tree, offset, size);
13fe0198
MA
3545
3546 for (j = 0; j < TXG_SIZE; j++)
93cf2076 3547 range_tree_verify(msp->ms_freetree[j], offset, size);
13fe0198 3548 for (j = 0; j < TXG_DEFER_SIZE; j++)
93cf2076 3549 range_tree_verify(msp->ms_defertree[j], offset, size);
13fe0198
MA
3550 }
3551 spa_config_exit(spa, SCL_VDEV, FTAG);
3552}
3553
30b92c1d 3554#if defined(_KERNEL) && defined(HAVE_SPL)
02730c33 3555/* CSTYLED */
99b14de4 3556module_param(metaslab_aliquot, ulong, 0644);
99b14de4
ED
3557MODULE_PARM_DESC(metaslab_aliquot,
3558 "allocation granularity (a.k.a. stripe size)");
02730c33
BB
3559
3560module_param(metaslab_debug_load, int, 0644);
93cf2076
GW
3561MODULE_PARM_DESC(metaslab_debug_load,
3562 "load all metaslabs when pool is first opened");
02730c33
BB
3563
3564module_param(metaslab_debug_unload, int, 0644);
1ce04573
BB
3565MODULE_PARM_DESC(metaslab_debug_unload,
3566 "prevent metaslabs from being unloaded");
02730c33
BB
3567
3568module_param(metaslab_preload_enabled, int, 0644);
f3a7f661
GW
3569MODULE_PARM_DESC(metaslab_preload_enabled,
3570 "preload potential metaslabs during reassessment");
f4a4046b 3571
02730c33 3572module_param(zfs_mg_noalloc_threshold, int, 0644);
f4a4046b
TC
3573MODULE_PARM_DESC(zfs_mg_noalloc_threshold,
3574 "percentage of free space for metaslab group to allow allocation");
02730c33
BB
3575
3576module_param(zfs_mg_fragmentation_threshold, int, 0644);
f3a7f661
GW
3577MODULE_PARM_DESC(zfs_mg_fragmentation_threshold,
3578 "fragmentation for metaslab group to allow allocation");
3579
02730c33 3580module_param(zfs_metaslab_fragmentation_threshold, int, 0644);
f3a7f661
GW
3581MODULE_PARM_DESC(zfs_metaslab_fragmentation_threshold,
3582 "fragmentation for metaslab to allow allocation");
02730c33
BB
3583
3584module_param(metaslab_fragmentation_factor_enabled, int, 0644);
f3a7f661
GW
3585MODULE_PARM_DESC(metaslab_fragmentation_factor_enabled,
3586 "use the fragmentation metric to prefer less fragmented metaslabs");
02730c33
BB
3587
3588module_param(metaslab_lba_weighting_enabled, int, 0644);
f3a7f661
GW
3589MODULE_PARM_DESC(metaslab_lba_weighting_enabled,
3590 "prefer metaslabs with lower LBAs");
02730c33
BB
3591
3592module_param(metaslab_bias_enabled, int, 0644);
f3a7f661
GW
3593MODULE_PARM_DESC(metaslab_bias_enabled,
3594 "enable metaslab group biasing");
4e21fd06
DB
3595
3596module_param(zfs_metaslab_segment_weight_enabled, int, 0644);
3597MODULE_PARM_DESC(zfs_metaslab_segment_weight_enabled,
3598 "enable segment-based metaslab selection");
3599
3600module_param(zfs_metaslab_switch_threshold, int, 0644);
3601MODULE_PARM_DESC(zfs_metaslab_switch_threshold,
3602 "segment-based metaslab selection maximum buckets before switching");
30b92c1d 3603#endif /* _KERNEL && HAVE_SPL */