]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - zfs/module/zfs/metaslab.c
UBUNTU: SAUCE: (noup) Update zfs to 0.7.5-1ubuntu15 (LP: #1764690)
[mirror_ubuntu-bionic-kernel.git] / zfs / module / zfs / metaslab.c
1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2011, 2016 by Delphix. All rights reserved.
24 * Copyright (c) 2013 by Saso Kiselkov. All rights reserved.
25 */
26
27 #include <sys/zfs_context.h>
28 #include <sys/dmu.h>
29 #include <sys/dmu_tx.h>
30 #include <sys/space_map.h>
31 #include <sys/metaslab_impl.h>
32 #include <sys/vdev_impl.h>
33 #include <sys/zio.h>
34 #include <sys/spa_impl.h>
35 #include <sys/zfeature.h>
36
37 #define WITH_DF_BLOCK_ALLOCATOR
38
39 #define GANG_ALLOCATION(flags) \
40 ((flags) & (METASLAB_GANG_CHILD | METASLAB_GANG_HEADER))
41
42 /*
43 * Metaslab granularity, in bytes. This is roughly similar to what would be
44 * referred to as the "stripe size" in traditional RAID arrays. In normal
45 * operation, we will try to write this amount of data to a top-level vdev
46 * before moving on to the next one.
47 */
48 unsigned long metaslab_aliquot = 512 << 10;
49
50 uint64_t metaslab_gang_bang = SPA_MAXBLOCKSIZE + 1; /* force gang blocks */
51
52 /*
53 * The in-core space map representation is more compact than its on-disk form.
54 * The zfs_condense_pct determines how much more compact the in-core
55 * space map representation must be before we compact it on-disk.
56 * Values should be greater than or equal to 100.
57 */
58 int zfs_condense_pct = 200;
59
60 /*
61 * Condensing a metaslab is not guaranteed to actually reduce the amount of
62 * space used on disk. In particular, a space map uses data in increments of
63 * MAX(1 << ashift, space_map_blksz), so a metaslab might use the
64 * same number of blocks after condensing. Since the goal of condensing is to
65 * reduce the number of IOPs required to read the space map, we only want to
66 * condense when we can be sure we will reduce the number of blocks used by the
67 * space map. Unfortunately, we cannot precisely compute whether or not this is
68 * the case in metaslab_should_condense since we are holding ms_lock. Instead,
69 * we apply the following heuristic: do not condense a spacemap unless the
70 * uncondensed size consumes greater than zfs_metaslab_condense_block_threshold
71 * blocks.
72 */
73 int zfs_metaslab_condense_block_threshold = 4;
74
75 /*
76 * The zfs_mg_noalloc_threshold defines which metaslab groups should
77 * be eligible for allocation. The value is defined as a percentage of
78 * free space. Metaslab groups that have more free space than
79 * zfs_mg_noalloc_threshold are always eligible for allocations. Once
80 * a metaslab group's free space is less than or equal to the
81 * zfs_mg_noalloc_threshold the allocator will avoid allocating to that
82 * group unless all groups in the pool have reached zfs_mg_noalloc_threshold.
83 * Once all groups in the pool reach zfs_mg_noalloc_threshold then all
84 * groups are allowed to accept allocations. Gang blocks are always
85 * eligible to allocate on any metaslab group. The default value of 0 means
86 * no metaslab group will be excluded based on this criterion.
87 */
88 int zfs_mg_noalloc_threshold = 0;
89
90 /*
91 * Metaslab groups are considered eligible for allocations if their
92 * fragmenation metric (measured as a percentage) is less than or equal to
93 * zfs_mg_fragmentation_threshold. If a metaslab group exceeds this threshold
94 * then it will be skipped unless all metaslab groups within the metaslab
95 * class have also crossed this threshold.
96 */
97 int zfs_mg_fragmentation_threshold = 85;
98
99 /*
100 * Allow metaslabs to keep their active state as long as their fragmentation
101 * percentage is less than or equal to zfs_metaslab_fragmentation_threshold. An
102 * active metaslab that exceeds this threshold will no longer keep its active
103 * status allowing better metaslabs to be selected.
104 */
105 int zfs_metaslab_fragmentation_threshold = 70;
106
107 /*
108 * When set will load all metaslabs when pool is first opened.
109 */
110 int metaslab_debug_load = 0;
111
112 /*
113 * When set will prevent metaslabs from being unloaded.
114 */
115 int metaslab_debug_unload = 0;
116
117 /*
118 * Minimum size which forces the dynamic allocator to change
119 * it's allocation strategy. Once the space map cannot satisfy
120 * an allocation of this size then it switches to using more
121 * aggressive strategy (i.e search by size rather than offset).
122 */
123 uint64_t metaslab_df_alloc_threshold = SPA_OLD_MAXBLOCKSIZE;
124
125 /*
126 * The minimum free space, in percent, which must be available
127 * in a space map to continue allocations in a first-fit fashion.
128 * Once the space map's free space drops below this level we dynamically
129 * switch to using best-fit allocations.
130 */
131 int metaslab_df_free_pct = 4;
132
133 /*
134 * Percentage of all cpus that can be used by the metaslab taskq.
135 */
136 int metaslab_load_pct = 50;
137
138 /*
139 * Determines how many txgs a metaslab may remain loaded without having any
140 * allocations from it. As long as a metaslab continues to be used we will
141 * keep it loaded.
142 */
143 int metaslab_unload_delay = TXG_SIZE * 2;
144
145 /*
146 * Max number of metaslabs per group to preload.
147 */
148 int metaslab_preload_limit = SPA_DVAS_PER_BP;
149
150 /*
151 * Enable/disable preloading of metaslab.
152 */
153 int metaslab_preload_enabled = B_TRUE;
154
155 /*
156 * Enable/disable fragmentation weighting on metaslabs.
157 */
158 int metaslab_fragmentation_factor_enabled = B_TRUE;
159
160 /*
161 * Enable/disable lba weighting (i.e. outer tracks are given preference).
162 */
163 int metaslab_lba_weighting_enabled = B_TRUE;
164
165 /*
166 * Enable/disable metaslab group biasing.
167 */
168 int metaslab_bias_enabled = B_TRUE;
169
170
171 /*
172 * Enable/disable segment-based metaslab selection.
173 */
174 int zfs_metaslab_segment_weight_enabled = B_TRUE;
175
176 /*
177 * When using segment-based metaslab selection, we will continue
178 * allocating from the active metaslab until we have exhausted
179 * zfs_metaslab_switch_threshold of its buckets.
180 */
181 int zfs_metaslab_switch_threshold = 2;
182
183 /*
184 * Internal switch to enable/disable the metaslab allocation tracing
185 * facility.
186 */
187 #ifdef _METASLAB_TRACING
188 boolean_t metaslab_trace_enabled = B_TRUE;
189 #endif
190
191 /*
192 * Maximum entries that the metaslab allocation tracing facility will keep
193 * in a given list when running in non-debug mode. We limit the number
194 * of entries in non-debug mode to prevent us from using up too much memory.
195 * The limit should be sufficiently large that we don't expect any allocation
196 * to every exceed this value. In debug mode, the system will panic if this
197 * limit is ever reached allowing for further investigation.
198 */
199 #ifdef _METASLAB_TRACING
200 uint64_t metaslab_trace_max_entries = 5000;
201 #endif
202
203 static uint64_t metaslab_weight(metaslab_t *);
204 static void metaslab_set_fragmentation(metaslab_t *);
205
206 #ifdef _METASLAB_TRACING
207 kmem_cache_t *metaslab_alloc_trace_cache;
208 #endif
209
210 /*
211 * ==========================================================================
212 * Metaslab classes
213 * ==========================================================================
214 */
215 metaslab_class_t *
216 metaslab_class_create(spa_t *spa, metaslab_ops_t *ops)
217 {
218 metaslab_class_t *mc;
219
220 mc = kmem_zalloc(sizeof (metaslab_class_t), KM_SLEEP);
221
222 mc->mc_spa = spa;
223 mc->mc_rotor = NULL;
224 mc->mc_ops = ops;
225 mutex_init(&mc->mc_lock, NULL, MUTEX_DEFAULT, NULL);
226 refcount_create_tracked(&mc->mc_alloc_slots);
227
228 return (mc);
229 }
230
231 void
232 metaslab_class_destroy(metaslab_class_t *mc)
233 {
234 ASSERT(mc->mc_rotor == NULL);
235 ASSERT(mc->mc_alloc == 0);
236 ASSERT(mc->mc_deferred == 0);
237 ASSERT(mc->mc_space == 0);
238 ASSERT(mc->mc_dspace == 0);
239
240 refcount_destroy(&mc->mc_alloc_slots);
241 mutex_destroy(&mc->mc_lock);
242 kmem_free(mc, sizeof (metaslab_class_t));
243 }
244
245 int
246 metaslab_class_validate(metaslab_class_t *mc)
247 {
248 metaslab_group_t *mg;
249 vdev_t *vd;
250
251 /*
252 * Must hold one of the spa_config locks.
253 */
254 ASSERT(spa_config_held(mc->mc_spa, SCL_ALL, RW_READER) ||
255 spa_config_held(mc->mc_spa, SCL_ALL, RW_WRITER));
256
257 if ((mg = mc->mc_rotor) == NULL)
258 return (0);
259
260 do {
261 vd = mg->mg_vd;
262 ASSERT(vd->vdev_mg != NULL);
263 ASSERT3P(vd->vdev_top, ==, vd);
264 ASSERT3P(mg->mg_class, ==, mc);
265 ASSERT3P(vd->vdev_ops, !=, &vdev_hole_ops);
266 } while ((mg = mg->mg_next) != mc->mc_rotor);
267
268 return (0);
269 }
270
271 void
272 metaslab_class_space_update(metaslab_class_t *mc, int64_t alloc_delta,
273 int64_t defer_delta, int64_t space_delta, int64_t dspace_delta)
274 {
275 atomic_add_64(&mc->mc_alloc, alloc_delta);
276 atomic_add_64(&mc->mc_deferred, defer_delta);
277 atomic_add_64(&mc->mc_space, space_delta);
278 atomic_add_64(&mc->mc_dspace, dspace_delta);
279 }
280
281 uint64_t
282 metaslab_class_get_alloc(metaslab_class_t *mc)
283 {
284 return (mc->mc_alloc);
285 }
286
287 uint64_t
288 metaslab_class_get_deferred(metaslab_class_t *mc)
289 {
290 return (mc->mc_deferred);
291 }
292
293 uint64_t
294 metaslab_class_get_space(metaslab_class_t *mc)
295 {
296 return (mc->mc_space);
297 }
298
299 uint64_t
300 metaslab_class_get_dspace(metaslab_class_t *mc)
301 {
302 return (spa_deflate(mc->mc_spa) ? mc->mc_dspace : mc->mc_space);
303 }
304
305 void
306 metaslab_class_histogram_verify(metaslab_class_t *mc)
307 {
308 vdev_t *rvd = mc->mc_spa->spa_root_vdev;
309 uint64_t *mc_hist;
310 int i, c;
311
312 if ((zfs_flags & ZFS_DEBUG_HISTOGRAM_VERIFY) == 0)
313 return;
314
315 mc_hist = kmem_zalloc(sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE,
316 KM_SLEEP);
317
318 for (c = 0; c < rvd->vdev_children; c++) {
319 vdev_t *tvd = rvd->vdev_child[c];
320 metaslab_group_t *mg = tvd->vdev_mg;
321
322 /*
323 * Skip any holes, uninitialized top-levels, or
324 * vdevs that are not in this metalab class.
325 */
326 if (tvd->vdev_ishole || tvd->vdev_ms_shift == 0 ||
327 mg->mg_class != mc) {
328 continue;
329 }
330
331 for (i = 0; i < RANGE_TREE_HISTOGRAM_SIZE; i++)
332 mc_hist[i] += mg->mg_histogram[i];
333 }
334
335 for (i = 0; i < RANGE_TREE_HISTOGRAM_SIZE; i++)
336 VERIFY3U(mc_hist[i], ==, mc->mc_histogram[i]);
337
338 kmem_free(mc_hist, sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE);
339 }
340
341 /*
342 * Calculate the metaslab class's fragmentation metric. The metric
343 * is weighted based on the space contribution of each metaslab group.
344 * The return value will be a number between 0 and 100 (inclusive), or
345 * ZFS_FRAG_INVALID if the metric has not been set. See comment above the
346 * zfs_frag_table for more information about the metric.
347 */
348 uint64_t
349 metaslab_class_fragmentation(metaslab_class_t *mc)
350 {
351 vdev_t *rvd = mc->mc_spa->spa_root_vdev;
352 uint64_t fragmentation = 0;
353 int c;
354
355 spa_config_enter(mc->mc_spa, SCL_VDEV, FTAG, RW_READER);
356
357 for (c = 0; c < rvd->vdev_children; c++) {
358 vdev_t *tvd = rvd->vdev_child[c];
359 metaslab_group_t *mg = tvd->vdev_mg;
360
361 /*
362 * Skip any holes, uninitialized top-levels, or
363 * vdevs that are not in this metalab class.
364 */
365 if (tvd->vdev_ishole || tvd->vdev_ms_shift == 0 ||
366 mg->mg_class != mc) {
367 continue;
368 }
369
370 /*
371 * If a metaslab group does not contain a fragmentation
372 * metric then just bail out.
373 */
374 if (mg->mg_fragmentation == ZFS_FRAG_INVALID) {
375 spa_config_exit(mc->mc_spa, SCL_VDEV, FTAG);
376 return (ZFS_FRAG_INVALID);
377 }
378
379 /*
380 * Determine how much this metaslab_group is contributing
381 * to the overall pool fragmentation metric.
382 */
383 fragmentation += mg->mg_fragmentation *
384 metaslab_group_get_space(mg);
385 }
386 fragmentation /= metaslab_class_get_space(mc);
387
388 ASSERT3U(fragmentation, <=, 100);
389 spa_config_exit(mc->mc_spa, SCL_VDEV, FTAG);
390 return (fragmentation);
391 }
392
393 /*
394 * Calculate the amount of expandable space that is available in
395 * this metaslab class. If a device is expanded then its expandable
396 * space will be the amount of allocatable space that is currently not
397 * part of this metaslab class.
398 */
399 uint64_t
400 metaslab_class_expandable_space(metaslab_class_t *mc)
401 {
402 vdev_t *rvd = mc->mc_spa->spa_root_vdev;
403 uint64_t space = 0;
404 int c;
405
406 spa_config_enter(mc->mc_spa, SCL_VDEV, FTAG, RW_READER);
407 for (c = 0; c < rvd->vdev_children; c++) {
408 vdev_t *tvd = rvd->vdev_child[c];
409 metaslab_group_t *mg = tvd->vdev_mg;
410
411 if (tvd->vdev_ishole || tvd->vdev_ms_shift == 0 ||
412 mg->mg_class != mc) {
413 continue;
414 }
415
416 /*
417 * Calculate if we have enough space to add additional
418 * metaslabs. We report the expandable space in terms
419 * of the metaslab size since that's the unit of expansion.
420 */
421 space += P2ALIGN(tvd->vdev_max_asize - tvd->vdev_asize,
422 1ULL << tvd->vdev_ms_shift);
423 }
424 spa_config_exit(mc->mc_spa, SCL_VDEV, FTAG);
425 return (space);
426 }
427
428 static int
429 metaslab_compare(const void *x1, const void *x2)
430 {
431 const metaslab_t *m1 = (const metaslab_t *)x1;
432 const metaslab_t *m2 = (const metaslab_t *)x2;
433
434 int cmp = AVL_CMP(m2->ms_weight, m1->ms_weight);
435 if (likely(cmp))
436 return (cmp);
437
438 IMPLY(AVL_CMP(m1->ms_start, m2->ms_start) == 0, m1 == m2);
439
440 return (AVL_CMP(m1->ms_start, m2->ms_start));
441 }
442
443 /*
444 * Verify that the space accounting on disk matches the in-core range_trees.
445 */
446 void
447 metaslab_verify_space(metaslab_t *msp, uint64_t txg)
448 {
449 spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
450 uint64_t allocated = 0;
451 uint64_t sm_free_space, msp_free_space;
452 int t;
453
454 ASSERT(MUTEX_HELD(&msp->ms_lock));
455
456 if ((zfs_flags & ZFS_DEBUG_METASLAB_VERIFY) == 0)
457 return;
458
459 /*
460 * We can only verify the metaslab space when we're called
461 * from syncing context with a loaded metaslab that has an allocated
462 * space map. Calling this in non-syncing context does not
463 * provide a consistent view of the metaslab since we're performing
464 * allocations in the future.
465 */
466 if (txg != spa_syncing_txg(spa) || msp->ms_sm == NULL ||
467 !msp->ms_loaded)
468 return;
469
470 sm_free_space = msp->ms_size - space_map_allocated(msp->ms_sm) -
471 space_map_alloc_delta(msp->ms_sm);
472
473 /*
474 * Account for future allocations since we would have already
475 * deducted that space from the ms_freetree.
476 */
477 for (t = 0; t < TXG_CONCURRENT_STATES; t++) {
478 allocated +=
479 range_tree_space(msp->ms_alloctree[(txg + t) & TXG_MASK]);
480 }
481
482 msp_free_space = range_tree_space(msp->ms_tree) + allocated +
483 msp->ms_deferspace + range_tree_space(msp->ms_freedtree);
484
485 VERIFY3U(sm_free_space, ==, msp_free_space);
486 }
487
488 /*
489 * ==========================================================================
490 * Metaslab groups
491 * ==========================================================================
492 */
493 /*
494 * Update the allocatable flag and the metaslab group's capacity.
495 * The allocatable flag is set to true if the capacity is below
496 * the zfs_mg_noalloc_threshold or has a fragmentation value that is
497 * greater than zfs_mg_fragmentation_threshold. If a metaslab group
498 * transitions from allocatable to non-allocatable or vice versa then the
499 * metaslab group's class is updated to reflect the transition.
500 */
501 static void
502 metaslab_group_alloc_update(metaslab_group_t *mg)
503 {
504 vdev_t *vd = mg->mg_vd;
505 metaslab_class_t *mc = mg->mg_class;
506 vdev_stat_t *vs = &vd->vdev_stat;
507 boolean_t was_allocatable;
508 boolean_t was_initialized;
509
510 ASSERT(vd == vd->vdev_top);
511
512 mutex_enter(&mg->mg_lock);
513 was_allocatable = mg->mg_allocatable;
514 was_initialized = mg->mg_initialized;
515
516 mg->mg_free_capacity = ((vs->vs_space - vs->vs_alloc) * 100) /
517 (vs->vs_space + 1);
518
519 mutex_enter(&mc->mc_lock);
520
521 /*
522 * If the metaslab group was just added then it won't
523 * have any space until we finish syncing out this txg.
524 * At that point we will consider it initialized and available
525 * for allocations. We also don't consider non-activated
526 * metaslab groups (e.g. vdevs that are in the middle of being removed)
527 * to be initialized, because they can't be used for allocation.
528 */
529 mg->mg_initialized = metaslab_group_initialized(mg);
530 if (!was_initialized && mg->mg_initialized) {
531 mc->mc_groups++;
532 } else if (was_initialized && !mg->mg_initialized) {
533 ASSERT3U(mc->mc_groups, >, 0);
534 mc->mc_groups--;
535 }
536 if (mg->mg_initialized)
537 mg->mg_no_free_space = B_FALSE;
538
539 /*
540 * A metaslab group is considered allocatable if it has plenty
541 * of free space or is not heavily fragmented. We only take
542 * fragmentation into account if the metaslab group has a valid
543 * fragmentation metric (i.e. a value between 0 and 100).
544 */
545 mg->mg_allocatable = (mg->mg_activation_count > 0 &&
546 mg->mg_free_capacity > zfs_mg_noalloc_threshold &&
547 (mg->mg_fragmentation == ZFS_FRAG_INVALID ||
548 mg->mg_fragmentation <= zfs_mg_fragmentation_threshold));
549
550 /*
551 * The mc_alloc_groups maintains a count of the number of
552 * groups in this metaslab class that are still above the
553 * zfs_mg_noalloc_threshold. This is used by the allocating
554 * threads to determine if they should avoid allocations to
555 * a given group. The allocator will avoid allocations to a group
556 * if that group has reached or is below the zfs_mg_noalloc_threshold
557 * and there are still other groups that are above the threshold.
558 * When a group transitions from allocatable to non-allocatable or
559 * vice versa we update the metaslab class to reflect that change.
560 * When the mc_alloc_groups value drops to 0 that means that all
561 * groups have reached the zfs_mg_noalloc_threshold making all groups
562 * eligible for allocations. This effectively means that all devices
563 * are balanced again.
564 */
565 if (was_allocatable && !mg->mg_allocatable)
566 mc->mc_alloc_groups--;
567 else if (!was_allocatable && mg->mg_allocatable)
568 mc->mc_alloc_groups++;
569 mutex_exit(&mc->mc_lock);
570
571 mutex_exit(&mg->mg_lock);
572 }
573
574 metaslab_group_t *
575 metaslab_group_create(metaslab_class_t *mc, vdev_t *vd)
576 {
577 metaslab_group_t *mg;
578
579 mg = kmem_zalloc(sizeof (metaslab_group_t), KM_SLEEP);
580 mutex_init(&mg->mg_lock, NULL, MUTEX_DEFAULT, NULL);
581 avl_create(&mg->mg_metaslab_tree, metaslab_compare,
582 sizeof (metaslab_t), offsetof(struct metaslab, ms_group_node));
583 mg->mg_vd = vd;
584 mg->mg_class = mc;
585 mg->mg_activation_count = 0;
586 mg->mg_initialized = B_FALSE;
587 mg->mg_no_free_space = B_TRUE;
588 refcount_create_tracked(&mg->mg_alloc_queue_depth);
589
590 mg->mg_taskq = taskq_create("metaslab_group_taskq", metaslab_load_pct,
591 maxclsyspri, 10, INT_MAX, TASKQ_THREADS_CPU_PCT | TASKQ_DYNAMIC);
592
593 return (mg);
594 }
595
596 void
597 metaslab_group_destroy(metaslab_group_t *mg)
598 {
599 ASSERT(mg->mg_prev == NULL);
600 ASSERT(mg->mg_next == NULL);
601 /*
602 * We may have gone below zero with the activation count
603 * either because we never activated in the first place or
604 * because we're done, and possibly removing the vdev.
605 */
606 ASSERT(mg->mg_activation_count <= 0);
607
608 taskq_destroy(mg->mg_taskq);
609 avl_destroy(&mg->mg_metaslab_tree);
610 mutex_destroy(&mg->mg_lock);
611 refcount_destroy(&mg->mg_alloc_queue_depth);
612 kmem_free(mg, sizeof (metaslab_group_t));
613 }
614
615 void
616 metaslab_group_activate(metaslab_group_t *mg)
617 {
618 metaslab_class_t *mc = mg->mg_class;
619 metaslab_group_t *mgprev, *mgnext;
620
621 ASSERT(spa_config_held(mc->mc_spa, SCL_ALLOC, RW_WRITER));
622
623 ASSERT(mc->mc_rotor != mg);
624 ASSERT(mg->mg_prev == NULL);
625 ASSERT(mg->mg_next == NULL);
626 ASSERT(mg->mg_activation_count <= 0);
627
628 if (++mg->mg_activation_count <= 0)
629 return;
630
631 mg->mg_aliquot = metaslab_aliquot * MAX(1, mg->mg_vd->vdev_children);
632 metaslab_group_alloc_update(mg);
633
634 if ((mgprev = mc->mc_rotor) == NULL) {
635 mg->mg_prev = mg;
636 mg->mg_next = mg;
637 } else {
638 mgnext = mgprev->mg_next;
639 mg->mg_prev = mgprev;
640 mg->mg_next = mgnext;
641 mgprev->mg_next = mg;
642 mgnext->mg_prev = mg;
643 }
644 mc->mc_rotor = mg;
645 }
646
647 void
648 metaslab_group_passivate(metaslab_group_t *mg)
649 {
650 metaslab_class_t *mc = mg->mg_class;
651 metaslab_group_t *mgprev, *mgnext;
652
653 ASSERT(spa_config_held(mc->mc_spa, SCL_ALLOC, RW_WRITER));
654
655 if (--mg->mg_activation_count != 0) {
656 ASSERT(mc->mc_rotor != mg);
657 ASSERT(mg->mg_prev == NULL);
658 ASSERT(mg->mg_next == NULL);
659 ASSERT(mg->mg_activation_count < 0);
660 return;
661 }
662
663 taskq_wait_outstanding(mg->mg_taskq, 0);
664 metaslab_group_alloc_update(mg);
665
666 mgprev = mg->mg_prev;
667 mgnext = mg->mg_next;
668
669 if (mg == mgnext) {
670 mc->mc_rotor = NULL;
671 } else {
672 mc->mc_rotor = mgnext;
673 mgprev->mg_next = mgnext;
674 mgnext->mg_prev = mgprev;
675 }
676
677 mg->mg_prev = NULL;
678 mg->mg_next = NULL;
679 }
680
681 boolean_t
682 metaslab_group_initialized(metaslab_group_t *mg)
683 {
684 vdev_t *vd = mg->mg_vd;
685 vdev_stat_t *vs = &vd->vdev_stat;
686
687 return (vs->vs_space != 0 && mg->mg_activation_count > 0);
688 }
689
690 uint64_t
691 metaslab_group_get_space(metaslab_group_t *mg)
692 {
693 return ((1ULL << mg->mg_vd->vdev_ms_shift) * mg->mg_vd->vdev_ms_count);
694 }
695
696 void
697 metaslab_group_histogram_verify(metaslab_group_t *mg)
698 {
699 uint64_t *mg_hist;
700 vdev_t *vd = mg->mg_vd;
701 uint64_t ashift = vd->vdev_ashift;
702 int i, m;
703
704 if ((zfs_flags & ZFS_DEBUG_HISTOGRAM_VERIFY) == 0)
705 return;
706
707 mg_hist = kmem_zalloc(sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE,
708 KM_SLEEP);
709
710 ASSERT3U(RANGE_TREE_HISTOGRAM_SIZE, >=,
711 SPACE_MAP_HISTOGRAM_SIZE + ashift);
712
713 for (m = 0; m < vd->vdev_ms_count; m++) {
714 metaslab_t *msp = vd->vdev_ms[m];
715
716 if (msp->ms_sm == NULL)
717 continue;
718
719 for (i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++)
720 mg_hist[i + ashift] +=
721 msp->ms_sm->sm_phys->smp_histogram[i];
722 }
723
724 for (i = 0; i < RANGE_TREE_HISTOGRAM_SIZE; i ++)
725 VERIFY3U(mg_hist[i], ==, mg->mg_histogram[i]);
726
727 kmem_free(mg_hist, sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE);
728 }
729
730 static void
731 metaslab_group_histogram_add(metaslab_group_t *mg, metaslab_t *msp)
732 {
733 metaslab_class_t *mc = mg->mg_class;
734 uint64_t ashift = mg->mg_vd->vdev_ashift;
735 int i;
736
737 ASSERT(MUTEX_HELD(&msp->ms_lock));
738 if (msp->ms_sm == NULL)
739 return;
740
741 mutex_enter(&mg->mg_lock);
742 for (i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) {
743 mg->mg_histogram[i + ashift] +=
744 msp->ms_sm->sm_phys->smp_histogram[i];
745 mc->mc_histogram[i + ashift] +=
746 msp->ms_sm->sm_phys->smp_histogram[i];
747 }
748 mutex_exit(&mg->mg_lock);
749 }
750
751 void
752 metaslab_group_histogram_remove(metaslab_group_t *mg, metaslab_t *msp)
753 {
754 metaslab_class_t *mc = mg->mg_class;
755 uint64_t ashift = mg->mg_vd->vdev_ashift;
756 int i;
757
758 ASSERT(MUTEX_HELD(&msp->ms_lock));
759 if (msp->ms_sm == NULL)
760 return;
761
762 mutex_enter(&mg->mg_lock);
763 for (i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) {
764 ASSERT3U(mg->mg_histogram[i + ashift], >=,
765 msp->ms_sm->sm_phys->smp_histogram[i]);
766 ASSERT3U(mc->mc_histogram[i + ashift], >=,
767 msp->ms_sm->sm_phys->smp_histogram[i]);
768
769 mg->mg_histogram[i + ashift] -=
770 msp->ms_sm->sm_phys->smp_histogram[i];
771 mc->mc_histogram[i + ashift] -=
772 msp->ms_sm->sm_phys->smp_histogram[i];
773 }
774 mutex_exit(&mg->mg_lock);
775 }
776
777 static void
778 metaslab_group_add(metaslab_group_t *mg, metaslab_t *msp)
779 {
780 ASSERT(msp->ms_group == NULL);
781 mutex_enter(&mg->mg_lock);
782 msp->ms_group = mg;
783 msp->ms_weight = 0;
784 avl_add(&mg->mg_metaslab_tree, msp);
785 mutex_exit(&mg->mg_lock);
786
787 mutex_enter(&msp->ms_lock);
788 metaslab_group_histogram_add(mg, msp);
789 mutex_exit(&msp->ms_lock);
790 }
791
792 static void
793 metaslab_group_remove(metaslab_group_t *mg, metaslab_t *msp)
794 {
795 mutex_enter(&msp->ms_lock);
796 metaslab_group_histogram_remove(mg, msp);
797 mutex_exit(&msp->ms_lock);
798
799 mutex_enter(&mg->mg_lock);
800 ASSERT(msp->ms_group == mg);
801 avl_remove(&mg->mg_metaslab_tree, msp);
802 msp->ms_group = NULL;
803 mutex_exit(&mg->mg_lock);
804 }
805
806 static void
807 metaslab_group_sort(metaslab_group_t *mg, metaslab_t *msp, uint64_t weight)
808 {
809 /*
810 * Although in principle the weight can be any value, in
811 * practice we do not use values in the range [1, 511].
812 */
813 ASSERT(weight >= SPA_MINBLOCKSIZE || weight == 0);
814 ASSERT(MUTEX_HELD(&msp->ms_lock));
815
816 mutex_enter(&mg->mg_lock);
817 ASSERT(msp->ms_group == mg);
818 avl_remove(&mg->mg_metaslab_tree, msp);
819 msp->ms_weight = weight;
820 avl_add(&mg->mg_metaslab_tree, msp);
821 mutex_exit(&mg->mg_lock);
822 }
823
824 /*
825 * Calculate the fragmentation for a given metaslab group. We can use
826 * a simple average here since all metaslabs within the group must have
827 * the same size. The return value will be a value between 0 and 100
828 * (inclusive), or ZFS_FRAG_INVALID if less than half of the metaslab in this
829 * group have a fragmentation metric.
830 */
831 uint64_t
832 metaslab_group_fragmentation(metaslab_group_t *mg)
833 {
834 vdev_t *vd = mg->mg_vd;
835 uint64_t fragmentation = 0;
836 uint64_t valid_ms = 0;
837 int m;
838
839 for (m = 0; m < vd->vdev_ms_count; m++) {
840 metaslab_t *msp = vd->vdev_ms[m];
841
842 if (msp->ms_fragmentation == ZFS_FRAG_INVALID)
843 continue;
844
845 valid_ms++;
846 fragmentation += msp->ms_fragmentation;
847 }
848
849 if (valid_ms <= vd->vdev_ms_count / 2)
850 return (ZFS_FRAG_INVALID);
851
852 fragmentation /= valid_ms;
853 ASSERT3U(fragmentation, <=, 100);
854 return (fragmentation);
855 }
856
857 /*
858 * Determine if a given metaslab group should skip allocations. A metaslab
859 * group should avoid allocations if its free capacity is less than the
860 * zfs_mg_noalloc_threshold or its fragmentation metric is greater than
861 * zfs_mg_fragmentation_threshold and there is at least one metaslab group
862 * that can still handle allocations. If the allocation throttle is enabled
863 * then we skip allocations to devices that have reached their maximum
864 * allocation queue depth unless the selected metaslab group is the only
865 * eligible group remaining.
866 */
867 static boolean_t
868 metaslab_group_allocatable(metaslab_group_t *mg, metaslab_group_t *rotor,
869 uint64_t psize)
870 {
871 spa_t *spa = mg->mg_vd->vdev_spa;
872 metaslab_class_t *mc = mg->mg_class;
873
874 /*
875 * We can only consider skipping this metaslab group if it's
876 * in the normal metaslab class and there are other metaslab
877 * groups to select from. Otherwise, we always consider it eligible
878 * for allocations.
879 */
880 if (mc != spa_normal_class(spa) || mc->mc_groups <= 1)
881 return (B_TRUE);
882
883 /*
884 * If the metaslab group's mg_allocatable flag is set (see comments
885 * in metaslab_group_alloc_update() for more information) and
886 * the allocation throttle is disabled then allow allocations to this
887 * device. However, if the allocation throttle is enabled then
888 * check if we have reached our allocation limit (mg_alloc_queue_depth)
889 * to determine if we should allow allocations to this metaslab group.
890 * If all metaslab groups are no longer considered allocatable
891 * (mc_alloc_groups == 0) or we're trying to allocate the smallest
892 * gang block size then we allow allocations on this metaslab group
893 * regardless of the mg_allocatable or throttle settings.
894 */
895 if (mg->mg_allocatable) {
896 metaslab_group_t *mgp;
897 int64_t qdepth;
898 uint64_t qmax = mg->mg_max_alloc_queue_depth;
899
900 if (!mc->mc_alloc_throttle_enabled)
901 return (B_TRUE);
902
903 /*
904 * If this metaslab group does not have any free space, then
905 * there is no point in looking further.
906 */
907 if (mg->mg_no_free_space)
908 return (B_FALSE);
909
910 qdepth = refcount_count(&mg->mg_alloc_queue_depth);
911
912 /*
913 * If this metaslab group is below its qmax or it's
914 * the only allocatable metasable group, then attempt
915 * to allocate from it.
916 */
917 if (qdepth < qmax || mc->mc_alloc_groups == 1)
918 return (B_TRUE);
919 ASSERT3U(mc->mc_alloc_groups, >, 1);
920
921 /*
922 * Since this metaslab group is at or over its qmax, we
923 * need to determine if there are metaslab groups after this
924 * one that might be able to handle this allocation. This is
925 * racy since we can't hold the locks for all metaslab
926 * groups at the same time when we make this check.
927 */
928 for (mgp = mg->mg_next; mgp != rotor; mgp = mgp->mg_next) {
929 qmax = mgp->mg_max_alloc_queue_depth;
930
931 qdepth = refcount_count(&mgp->mg_alloc_queue_depth);
932
933 /*
934 * If there is another metaslab group that
935 * might be able to handle the allocation, then
936 * we return false so that we skip this group.
937 */
938 if (qdepth < qmax && !mgp->mg_no_free_space)
939 return (B_FALSE);
940 }
941
942 /*
943 * We didn't find another group to handle the allocation
944 * so we can't skip this metaslab group even though
945 * we are at or over our qmax.
946 */
947 return (B_TRUE);
948
949 } else if (mc->mc_alloc_groups == 0 || psize == SPA_MINBLOCKSIZE) {
950 return (B_TRUE);
951 }
952 return (B_FALSE);
953 }
954
955 /*
956 * ==========================================================================
957 * Range tree callbacks
958 * ==========================================================================
959 */
960
961 /*
962 * Comparison function for the private size-ordered tree. Tree is sorted
963 * by size, larger sizes at the end of the tree.
964 */
965 static int
966 metaslab_rangesize_compare(const void *x1, const void *x2)
967 {
968 const range_seg_t *r1 = x1;
969 const range_seg_t *r2 = x2;
970 uint64_t rs_size1 = r1->rs_end - r1->rs_start;
971 uint64_t rs_size2 = r2->rs_end - r2->rs_start;
972
973 int cmp = AVL_CMP(rs_size1, rs_size2);
974 if (likely(cmp))
975 return (cmp);
976
977 return (AVL_CMP(r1->rs_start, r2->rs_start));
978 }
979
980 /*
981 * Create any block allocator specific components. The current allocators
982 * rely on using both a size-ordered range_tree_t and an array of uint64_t's.
983 */
984 static void
985 metaslab_rt_create(range_tree_t *rt, void *arg)
986 {
987 metaslab_t *msp = arg;
988
989 ASSERT3P(rt->rt_arg, ==, msp);
990 ASSERT(msp->ms_tree == NULL);
991
992 avl_create(&msp->ms_size_tree, metaslab_rangesize_compare,
993 sizeof (range_seg_t), offsetof(range_seg_t, rs_pp_node));
994 }
995
996 /*
997 * Destroy the block allocator specific components.
998 */
999 static void
1000 metaslab_rt_destroy(range_tree_t *rt, void *arg)
1001 {
1002 metaslab_t *msp = arg;
1003
1004 ASSERT3P(rt->rt_arg, ==, msp);
1005 ASSERT3P(msp->ms_tree, ==, rt);
1006 ASSERT0(avl_numnodes(&msp->ms_size_tree));
1007
1008 avl_destroy(&msp->ms_size_tree);
1009 }
1010
1011 static void
1012 metaslab_rt_add(range_tree_t *rt, range_seg_t *rs, void *arg)
1013 {
1014 metaslab_t *msp = arg;
1015
1016 ASSERT3P(rt->rt_arg, ==, msp);
1017 ASSERT3P(msp->ms_tree, ==, rt);
1018 VERIFY(!msp->ms_condensing);
1019 avl_add(&msp->ms_size_tree, rs);
1020 }
1021
1022 static void
1023 metaslab_rt_remove(range_tree_t *rt, range_seg_t *rs, void *arg)
1024 {
1025 metaslab_t *msp = arg;
1026
1027 ASSERT3P(rt->rt_arg, ==, msp);
1028 ASSERT3P(msp->ms_tree, ==, rt);
1029 VERIFY(!msp->ms_condensing);
1030 avl_remove(&msp->ms_size_tree, rs);
1031 }
1032
1033 static void
1034 metaslab_rt_vacate(range_tree_t *rt, void *arg)
1035 {
1036 metaslab_t *msp = arg;
1037
1038 ASSERT3P(rt->rt_arg, ==, msp);
1039 ASSERT3P(msp->ms_tree, ==, rt);
1040
1041 /*
1042 * Normally one would walk the tree freeing nodes along the way.
1043 * Since the nodes are shared with the range trees we can avoid
1044 * walking all nodes and just reinitialize the avl tree. The nodes
1045 * will be freed by the range tree, so we don't want to free them here.
1046 */
1047 avl_create(&msp->ms_size_tree, metaslab_rangesize_compare,
1048 sizeof (range_seg_t), offsetof(range_seg_t, rs_pp_node));
1049 }
1050
1051 static range_tree_ops_t metaslab_rt_ops = {
1052 metaslab_rt_create,
1053 metaslab_rt_destroy,
1054 metaslab_rt_add,
1055 metaslab_rt_remove,
1056 metaslab_rt_vacate
1057 };
1058
1059 /*
1060 * ==========================================================================
1061 * Common allocator routines
1062 * ==========================================================================
1063 */
1064
1065 /*
1066 * Return the maximum contiguous segment within the metaslab.
1067 */
1068 uint64_t
1069 metaslab_block_maxsize(metaslab_t *msp)
1070 {
1071 avl_tree_t *t = &msp->ms_size_tree;
1072 range_seg_t *rs;
1073
1074 if (t == NULL || (rs = avl_last(t)) == NULL)
1075 return (0ULL);
1076
1077 return (rs->rs_end - rs->rs_start);
1078 }
1079
1080 static range_seg_t *
1081 metaslab_block_find(avl_tree_t *t, uint64_t start, uint64_t size)
1082 {
1083 range_seg_t *rs, rsearch;
1084 avl_index_t where;
1085
1086 rsearch.rs_start = start;
1087 rsearch.rs_end = start + size;
1088
1089 rs = avl_find(t, &rsearch, &where);
1090 if (rs == NULL) {
1091 rs = avl_nearest(t, where, AVL_AFTER);
1092 }
1093
1094 return (rs);
1095 }
1096
1097 #if defined(WITH_FF_BLOCK_ALLOCATOR) || \
1098 defined(WITH_DF_BLOCK_ALLOCATOR) || \
1099 defined(WITH_CF_BLOCK_ALLOCATOR)
1100 /*
1101 * This is a helper function that can be used by the allocator to find
1102 * a suitable block to allocate. This will search the specified AVL
1103 * tree looking for a block that matches the specified criteria.
1104 */
1105 static uint64_t
1106 metaslab_block_picker(avl_tree_t *t, uint64_t *cursor, uint64_t size,
1107 uint64_t align)
1108 {
1109 range_seg_t *rs = metaslab_block_find(t, *cursor, size);
1110
1111 while (rs != NULL) {
1112 uint64_t offset = P2ROUNDUP(rs->rs_start, align);
1113
1114 if (offset + size <= rs->rs_end) {
1115 *cursor = offset + size;
1116 return (offset);
1117 }
1118 rs = AVL_NEXT(t, rs);
1119 }
1120
1121 /*
1122 * If we know we've searched the whole map (*cursor == 0), give up.
1123 * Otherwise, reset the cursor to the beginning and try again.
1124 */
1125 if (*cursor == 0)
1126 return (-1ULL);
1127
1128 *cursor = 0;
1129 return (metaslab_block_picker(t, cursor, size, align));
1130 }
1131 #endif /* WITH_FF/DF/CF_BLOCK_ALLOCATOR */
1132
1133 #if defined(WITH_FF_BLOCK_ALLOCATOR)
1134 /*
1135 * ==========================================================================
1136 * The first-fit block allocator
1137 * ==========================================================================
1138 */
1139 static uint64_t
1140 metaslab_ff_alloc(metaslab_t *msp, uint64_t size)
1141 {
1142 /*
1143 * Find the largest power of 2 block size that evenly divides the
1144 * requested size. This is used to try to allocate blocks with similar
1145 * alignment from the same area of the metaslab (i.e. same cursor
1146 * bucket) but it does not guarantee that other allocations sizes
1147 * may exist in the same region.
1148 */
1149 uint64_t align = size & -size;
1150 uint64_t *cursor = &msp->ms_lbas[highbit64(align) - 1];
1151 avl_tree_t *t = &msp->ms_tree->rt_root;
1152
1153 return (metaslab_block_picker(t, cursor, size, align));
1154 }
1155
1156 static metaslab_ops_t metaslab_ff_ops = {
1157 metaslab_ff_alloc
1158 };
1159
1160 metaslab_ops_t *zfs_metaslab_ops = &metaslab_ff_ops;
1161 #endif /* WITH_FF_BLOCK_ALLOCATOR */
1162
1163 #if defined(WITH_DF_BLOCK_ALLOCATOR)
1164 /*
1165 * ==========================================================================
1166 * Dynamic block allocator -
1167 * Uses the first fit allocation scheme until space get low and then
1168 * adjusts to a best fit allocation method. Uses metaslab_df_alloc_threshold
1169 * and metaslab_df_free_pct to determine when to switch the allocation scheme.
1170 * ==========================================================================
1171 */
1172 static uint64_t
1173 metaslab_df_alloc(metaslab_t *msp, uint64_t size)
1174 {
1175 /*
1176 * Find the largest power of 2 block size that evenly divides the
1177 * requested size. This is used to try to allocate blocks with similar
1178 * alignment from the same area of the metaslab (i.e. same cursor
1179 * bucket) but it does not guarantee that other allocations sizes
1180 * may exist in the same region.
1181 */
1182 uint64_t align = size & -size;
1183 uint64_t *cursor = &msp->ms_lbas[highbit64(align) - 1];
1184 range_tree_t *rt = msp->ms_tree;
1185 avl_tree_t *t = &rt->rt_root;
1186 uint64_t max_size = metaslab_block_maxsize(msp);
1187 int free_pct = range_tree_space(rt) * 100 / msp->ms_size;
1188
1189 ASSERT(MUTEX_HELD(&msp->ms_lock));
1190 ASSERT3U(avl_numnodes(t), ==, avl_numnodes(&msp->ms_size_tree));
1191
1192 if (max_size < size)
1193 return (-1ULL);
1194
1195 /*
1196 * If we're running low on space switch to using the size
1197 * sorted AVL tree (best-fit).
1198 */
1199 if (max_size < metaslab_df_alloc_threshold ||
1200 free_pct < metaslab_df_free_pct) {
1201 t = &msp->ms_size_tree;
1202 *cursor = 0;
1203 }
1204
1205 return (metaslab_block_picker(t, cursor, size, 1ULL));
1206 }
1207
1208 static metaslab_ops_t metaslab_df_ops = {
1209 metaslab_df_alloc
1210 };
1211
1212 metaslab_ops_t *zfs_metaslab_ops = &metaslab_df_ops;
1213 #endif /* WITH_DF_BLOCK_ALLOCATOR */
1214
1215 #if defined(WITH_CF_BLOCK_ALLOCATOR)
1216 /*
1217 * ==========================================================================
1218 * Cursor fit block allocator -
1219 * Select the largest region in the metaslab, set the cursor to the beginning
1220 * of the range and the cursor_end to the end of the range. As allocations
1221 * are made advance the cursor. Continue allocating from the cursor until
1222 * the range is exhausted and then find a new range.
1223 * ==========================================================================
1224 */
1225 static uint64_t
1226 metaslab_cf_alloc(metaslab_t *msp, uint64_t size)
1227 {
1228 range_tree_t *rt = msp->ms_tree;
1229 avl_tree_t *t = &msp->ms_size_tree;
1230 uint64_t *cursor = &msp->ms_lbas[0];
1231 uint64_t *cursor_end = &msp->ms_lbas[1];
1232 uint64_t offset = 0;
1233
1234 ASSERT(MUTEX_HELD(&msp->ms_lock));
1235 ASSERT3U(avl_numnodes(t), ==, avl_numnodes(&rt->rt_root));
1236
1237 ASSERT3U(*cursor_end, >=, *cursor);
1238
1239 if ((*cursor + size) > *cursor_end) {
1240 range_seg_t *rs;
1241
1242 rs = avl_last(&msp->ms_size_tree);
1243 if (rs == NULL || (rs->rs_end - rs->rs_start) < size)
1244 return (-1ULL);
1245
1246 *cursor = rs->rs_start;
1247 *cursor_end = rs->rs_end;
1248 }
1249
1250 offset = *cursor;
1251 *cursor += size;
1252
1253 return (offset);
1254 }
1255
1256 static metaslab_ops_t metaslab_cf_ops = {
1257 metaslab_cf_alloc
1258 };
1259
1260 metaslab_ops_t *zfs_metaslab_ops = &metaslab_cf_ops;
1261 #endif /* WITH_CF_BLOCK_ALLOCATOR */
1262
1263 #if defined(WITH_NDF_BLOCK_ALLOCATOR)
1264 /*
1265 * ==========================================================================
1266 * New dynamic fit allocator -
1267 * Select a region that is large enough to allocate 2^metaslab_ndf_clump_shift
1268 * contiguous blocks. If no region is found then just use the largest segment
1269 * that remains.
1270 * ==========================================================================
1271 */
1272
1273 /*
1274 * Determines desired number of contiguous blocks (2^metaslab_ndf_clump_shift)
1275 * to request from the allocator.
1276 */
1277 uint64_t metaslab_ndf_clump_shift = 4;
1278
1279 static uint64_t
1280 metaslab_ndf_alloc(metaslab_t *msp, uint64_t size)
1281 {
1282 avl_tree_t *t = &msp->ms_tree->rt_root;
1283 avl_index_t where;
1284 range_seg_t *rs, rsearch;
1285 uint64_t hbit = highbit64(size);
1286 uint64_t *cursor = &msp->ms_lbas[hbit - 1];
1287 uint64_t max_size = metaslab_block_maxsize(msp);
1288
1289 ASSERT(MUTEX_HELD(&msp->ms_lock));
1290 ASSERT3U(avl_numnodes(t), ==, avl_numnodes(&msp->ms_size_tree));
1291
1292 if (max_size < size)
1293 return (-1ULL);
1294
1295 rsearch.rs_start = *cursor;
1296 rsearch.rs_end = *cursor + size;
1297
1298 rs = avl_find(t, &rsearch, &where);
1299 if (rs == NULL || (rs->rs_end - rs->rs_start) < size) {
1300 t = &msp->ms_size_tree;
1301
1302 rsearch.rs_start = 0;
1303 rsearch.rs_end = MIN(max_size,
1304 1ULL << (hbit + metaslab_ndf_clump_shift));
1305 rs = avl_find(t, &rsearch, &where);
1306 if (rs == NULL)
1307 rs = avl_nearest(t, where, AVL_AFTER);
1308 ASSERT(rs != NULL);
1309 }
1310
1311 if ((rs->rs_end - rs->rs_start) >= size) {
1312 *cursor = rs->rs_start + size;
1313 return (rs->rs_start);
1314 }
1315 return (-1ULL);
1316 }
1317
1318 static metaslab_ops_t metaslab_ndf_ops = {
1319 metaslab_ndf_alloc
1320 };
1321
1322 metaslab_ops_t *zfs_metaslab_ops = &metaslab_ndf_ops;
1323 #endif /* WITH_NDF_BLOCK_ALLOCATOR */
1324
1325
1326 /*
1327 * ==========================================================================
1328 * Metaslabs
1329 * ==========================================================================
1330 */
1331
1332 /*
1333 * Wait for any in-progress metaslab loads to complete.
1334 */
1335 void
1336 metaslab_load_wait(metaslab_t *msp)
1337 {
1338 ASSERT(MUTEX_HELD(&msp->ms_lock));
1339
1340 while (msp->ms_loading) {
1341 ASSERT(!msp->ms_loaded);
1342 cv_wait(&msp->ms_load_cv, &msp->ms_lock);
1343 }
1344 }
1345
1346 int
1347 metaslab_load(metaslab_t *msp)
1348 {
1349 int error = 0;
1350 int t;
1351 boolean_t success = B_FALSE;
1352
1353 ASSERT(MUTEX_HELD(&msp->ms_lock));
1354 ASSERT(!msp->ms_loaded);
1355 ASSERT(!msp->ms_loading);
1356
1357 msp->ms_loading = B_TRUE;
1358
1359 /*
1360 * If the space map has not been allocated yet, then treat
1361 * all the space in the metaslab as free and add it to the
1362 * ms_tree.
1363 */
1364 if (msp->ms_sm != NULL)
1365 error = space_map_load(msp->ms_sm, msp->ms_tree, SM_FREE);
1366 else
1367 range_tree_add(msp->ms_tree, msp->ms_start, msp->ms_size);
1368
1369 success = (error == 0);
1370 msp->ms_loading = B_FALSE;
1371
1372 if (success) {
1373 ASSERT3P(msp->ms_group, !=, NULL);
1374 msp->ms_loaded = B_TRUE;
1375
1376 for (t = 0; t < TXG_DEFER_SIZE; t++) {
1377 range_tree_walk(msp->ms_defertree[t],
1378 range_tree_remove, msp->ms_tree);
1379 }
1380 msp->ms_max_size = metaslab_block_maxsize(msp);
1381 }
1382 cv_broadcast(&msp->ms_load_cv);
1383 return (error);
1384 }
1385
1386 void
1387 metaslab_unload(metaslab_t *msp)
1388 {
1389 ASSERT(MUTEX_HELD(&msp->ms_lock));
1390 range_tree_vacate(msp->ms_tree, NULL, NULL);
1391 msp->ms_loaded = B_FALSE;
1392 msp->ms_weight &= ~METASLAB_ACTIVE_MASK;
1393 msp->ms_max_size = 0;
1394 }
1395
1396 int
1397 metaslab_init(metaslab_group_t *mg, uint64_t id, uint64_t object, uint64_t txg,
1398 metaslab_t **msp)
1399 {
1400 vdev_t *vd = mg->mg_vd;
1401 objset_t *mos = vd->vdev_spa->spa_meta_objset;
1402 metaslab_t *ms;
1403 int error;
1404
1405 ms = kmem_zalloc(sizeof (metaslab_t), KM_SLEEP);
1406 mutex_init(&ms->ms_lock, NULL, MUTEX_DEFAULT, NULL);
1407 cv_init(&ms->ms_load_cv, NULL, CV_DEFAULT, NULL);
1408 ms->ms_id = id;
1409 ms->ms_start = id << vd->vdev_ms_shift;
1410 ms->ms_size = 1ULL << vd->vdev_ms_shift;
1411
1412 /*
1413 * We only open space map objects that already exist. All others
1414 * will be opened when we finally allocate an object for it.
1415 */
1416 if (object != 0) {
1417 error = space_map_open(&ms->ms_sm, mos, object, ms->ms_start,
1418 ms->ms_size, vd->vdev_ashift, &ms->ms_lock);
1419
1420 if (error != 0) {
1421 kmem_free(ms, sizeof (metaslab_t));
1422 return (error);
1423 }
1424
1425 ASSERT(ms->ms_sm != NULL);
1426 }
1427
1428 /*
1429 * We create the main range tree here, but we don't create the
1430 * other range trees until metaslab_sync_done(). This serves
1431 * two purposes: it allows metaslab_sync_done() to detect the
1432 * addition of new space; and for debugging, it ensures that we'd
1433 * data fault on any attempt to use this metaslab before it's ready.
1434 */
1435 ms->ms_tree = range_tree_create(&metaslab_rt_ops, ms, &ms->ms_lock);
1436 metaslab_group_add(mg, ms);
1437
1438 metaslab_set_fragmentation(ms);
1439
1440 /*
1441 * If we're opening an existing pool (txg == 0) or creating
1442 * a new one (txg == TXG_INITIAL), all space is available now.
1443 * If we're adding space to an existing pool, the new space
1444 * does not become available until after this txg has synced.
1445 * The metaslab's weight will also be initialized when we sync
1446 * out this txg. This ensures that we don't attempt to allocate
1447 * from it before we have initialized it completely.
1448 */
1449 if (txg <= TXG_INITIAL)
1450 metaslab_sync_done(ms, 0);
1451
1452 /*
1453 * If metaslab_debug_load is set and we're initializing a metaslab
1454 * that has an allocated space map object then load the its space
1455 * map so that can verify frees.
1456 */
1457 if (metaslab_debug_load && ms->ms_sm != NULL) {
1458 mutex_enter(&ms->ms_lock);
1459 VERIFY0(metaslab_load(ms));
1460 mutex_exit(&ms->ms_lock);
1461 }
1462
1463 if (txg != 0) {
1464 vdev_dirty(vd, 0, NULL, txg);
1465 vdev_dirty(vd, VDD_METASLAB, ms, txg);
1466 }
1467
1468 *msp = ms;
1469
1470 return (0);
1471 }
1472
1473 void
1474 metaslab_fini(metaslab_t *msp)
1475 {
1476 int t;
1477
1478 metaslab_group_t *mg = msp->ms_group;
1479
1480 metaslab_group_remove(mg, msp);
1481
1482 mutex_enter(&msp->ms_lock);
1483 VERIFY(msp->ms_group == NULL);
1484 vdev_space_update(mg->mg_vd, -space_map_allocated(msp->ms_sm),
1485 0, -msp->ms_size);
1486 space_map_close(msp->ms_sm);
1487
1488 metaslab_unload(msp);
1489 range_tree_destroy(msp->ms_tree);
1490 range_tree_destroy(msp->ms_freeingtree);
1491 range_tree_destroy(msp->ms_freedtree);
1492
1493 for (t = 0; t < TXG_SIZE; t++) {
1494 range_tree_destroy(msp->ms_alloctree[t]);
1495 }
1496
1497 for (t = 0; t < TXG_DEFER_SIZE; t++) {
1498 range_tree_destroy(msp->ms_defertree[t]);
1499 }
1500
1501 ASSERT0(msp->ms_deferspace);
1502
1503 mutex_exit(&msp->ms_lock);
1504 cv_destroy(&msp->ms_load_cv);
1505 mutex_destroy(&msp->ms_lock);
1506
1507 kmem_free(msp, sizeof (metaslab_t));
1508 }
1509
1510 #define FRAGMENTATION_TABLE_SIZE 17
1511
1512 /*
1513 * This table defines a segment size based fragmentation metric that will
1514 * allow each metaslab to derive its own fragmentation value. This is done
1515 * by calculating the space in each bucket of the spacemap histogram and
1516 * multiplying that by the fragmetation metric in this table. Doing
1517 * this for all buckets and dividing it by the total amount of free
1518 * space in this metaslab (i.e. the total free space in all buckets) gives
1519 * us the fragmentation metric. This means that a high fragmentation metric
1520 * equates to most of the free space being comprised of small segments.
1521 * Conversely, if the metric is low, then most of the free space is in
1522 * large segments. A 10% change in fragmentation equates to approximately
1523 * double the number of segments.
1524 *
1525 * This table defines 0% fragmented space using 16MB segments. Testing has
1526 * shown that segments that are greater than or equal to 16MB do not suffer
1527 * from drastic performance problems. Using this value, we derive the rest
1528 * of the table. Since the fragmentation value is never stored on disk, it
1529 * is possible to change these calculations in the future.
1530 */
1531 int zfs_frag_table[FRAGMENTATION_TABLE_SIZE] = {
1532 100, /* 512B */
1533 100, /* 1K */
1534 98, /* 2K */
1535 95, /* 4K */
1536 90, /* 8K */
1537 80, /* 16K */
1538 70, /* 32K */
1539 60, /* 64K */
1540 50, /* 128K */
1541 40, /* 256K */
1542 30, /* 512K */
1543 20, /* 1M */
1544 15, /* 2M */
1545 10, /* 4M */
1546 5, /* 8M */
1547 0 /* 16M */
1548 };
1549
1550 /*
1551 * Calclate the metaslab's fragmentation metric. A return value
1552 * of ZFS_FRAG_INVALID means that the metaslab has not been upgraded and does
1553 * not support this metric. Otherwise, the return value should be in the
1554 * range [0, 100].
1555 */
1556 static void
1557 metaslab_set_fragmentation(metaslab_t *msp)
1558 {
1559 spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
1560 uint64_t fragmentation = 0;
1561 uint64_t total = 0;
1562 boolean_t feature_enabled = spa_feature_is_enabled(spa,
1563 SPA_FEATURE_SPACEMAP_HISTOGRAM);
1564 int i;
1565
1566 if (!feature_enabled) {
1567 msp->ms_fragmentation = ZFS_FRAG_INVALID;
1568 return;
1569 }
1570
1571 /*
1572 * A null space map means that the entire metaslab is free
1573 * and thus is not fragmented.
1574 */
1575 if (msp->ms_sm == NULL) {
1576 msp->ms_fragmentation = 0;
1577 return;
1578 }
1579
1580 /*
1581 * If this metaslab's space map has not been upgraded, flag it
1582 * so that we upgrade next time we encounter it.
1583 */
1584 if (msp->ms_sm->sm_dbuf->db_size != sizeof (space_map_phys_t)) {
1585 uint64_t txg = spa_syncing_txg(spa);
1586 vdev_t *vd = msp->ms_group->mg_vd;
1587
1588 /*
1589 * If we've reached the final dirty txg, then we must
1590 * be shutting down the pool. We don't want to dirty
1591 * any data past this point so skip setting the condense
1592 * flag. We can retry this action the next time the pool
1593 * is imported.
1594 */
1595 if (spa_writeable(spa) && txg < spa_final_dirty_txg(spa)) {
1596 msp->ms_condense_wanted = B_TRUE;
1597 vdev_dirty(vd, VDD_METASLAB, msp, txg + 1);
1598 spa_dbgmsg(spa, "txg %llu, requesting force condense: "
1599 "ms_id %llu, vdev_id %llu", txg, msp->ms_id,
1600 vd->vdev_id);
1601 }
1602 msp->ms_fragmentation = ZFS_FRAG_INVALID;
1603 return;
1604 }
1605
1606 for (i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) {
1607 uint64_t space = 0;
1608 uint8_t shift = msp->ms_sm->sm_shift;
1609
1610 int idx = MIN(shift - SPA_MINBLOCKSHIFT + i,
1611 FRAGMENTATION_TABLE_SIZE - 1);
1612
1613 if (msp->ms_sm->sm_phys->smp_histogram[i] == 0)
1614 continue;
1615
1616 space = msp->ms_sm->sm_phys->smp_histogram[i] << (i + shift);
1617 total += space;
1618
1619 ASSERT3U(idx, <, FRAGMENTATION_TABLE_SIZE);
1620 fragmentation += space * zfs_frag_table[idx];
1621 }
1622
1623 if (total > 0)
1624 fragmentation /= total;
1625 ASSERT3U(fragmentation, <=, 100);
1626
1627 msp->ms_fragmentation = fragmentation;
1628 }
1629
1630 /*
1631 * Compute a weight -- a selection preference value -- for the given metaslab.
1632 * This is based on the amount of free space, the level of fragmentation,
1633 * the LBA range, and whether the metaslab is loaded.
1634 */
1635 static uint64_t
1636 metaslab_space_weight(metaslab_t *msp)
1637 {
1638 metaslab_group_t *mg = msp->ms_group;
1639 vdev_t *vd = mg->mg_vd;
1640 uint64_t weight, space;
1641
1642 ASSERT(MUTEX_HELD(&msp->ms_lock));
1643 ASSERT(!vd->vdev_removing);
1644
1645 /*
1646 * The baseline weight is the metaslab's free space.
1647 */
1648 space = msp->ms_size - space_map_allocated(msp->ms_sm);
1649
1650 if (metaslab_fragmentation_factor_enabled &&
1651 msp->ms_fragmentation != ZFS_FRAG_INVALID) {
1652 /*
1653 * Use the fragmentation information to inversely scale
1654 * down the baseline weight. We need to ensure that we
1655 * don't exclude this metaslab completely when it's 100%
1656 * fragmented. To avoid this we reduce the fragmented value
1657 * by 1.
1658 */
1659 space = (space * (100 - (msp->ms_fragmentation - 1))) / 100;
1660
1661 /*
1662 * If space < SPA_MINBLOCKSIZE, then we will not allocate from
1663 * this metaslab again. The fragmentation metric may have
1664 * decreased the space to something smaller than
1665 * SPA_MINBLOCKSIZE, so reset the space to SPA_MINBLOCKSIZE
1666 * so that we can consume any remaining space.
1667 */
1668 if (space > 0 && space < SPA_MINBLOCKSIZE)
1669 space = SPA_MINBLOCKSIZE;
1670 }
1671 weight = space;
1672
1673 /*
1674 * Modern disks have uniform bit density and constant angular velocity.
1675 * Therefore, the outer recording zones are faster (higher bandwidth)
1676 * than the inner zones by the ratio of outer to inner track diameter,
1677 * which is typically around 2:1. We account for this by assigning
1678 * higher weight to lower metaslabs (multiplier ranging from 2x to 1x).
1679 * In effect, this means that we'll select the metaslab with the most
1680 * free bandwidth rather than simply the one with the most free space.
1681 */
1682 if (!vd->vdev_nonrot && metaslab_lba_weighting_enabled) {
1683 weight = 2 * weight - (msp->ms_id * weight) / vd->vdev_ms_count;
1684 ASSERT(weight >= space && weight <= 2 * space);
1685 }
1686
1687 /*
1688 * If this metaslab is one we're actively using, adjust its
1689 * weight to make it preferable to any inactive metaslab so
1690 * we'll polish it off. If the fragmentation on this metaslab
1691 * has exceed our threshold, then don't mark it active.
1692 */
1693 if (msp->ms_loaded && msp->ms_fragmentation != ZFS_FRAG_INVALID &&
1694 msp->ms_fragmentation <= zfs_metaslab_fragmentation_threshold) {
1695 weight |= (msp->ms_weight & METASLAB_ACTIVE_MASK);
1696 }
1697
1698 WEIGHT_SET_SPACEBASED(weight);
1699 return (weight);
1700 }
1701
1702 /*
1703 * Return the weight of the specified metaslab, according to the segment-based
1704 * weighting algorithm. The metaslab must be loaded. This function can
1705 * be called within a sync pass since it relies only on the metaslab's
1706 * range tree which is always accurate when the metaslab is loaded.
1707 */
1708 static uint64_t
1709 metaslab_weight_from_range_tree(metaslab_t *msp)
1710 {
1711 uint64_t weight = 0;
1712 uint32_t segments = 0;
1713 int i;
1714
1715 ASSERT(msp->ms_loaded);
1716
1717 for (i = RANGE_TREE_HISTOGRAM_SIZE - 1; i >= SPA_MINBLOCKSHIFT; i--) {
1718 uint8_t shift = msp->ms_group->mg_vd->vdev_ashift;
1719 int max_idx = SPACE_MAP_HISTOGRAM_SIZE + shift - 1;
1720
1721 segments <<= 1;
1722 segments += msp->ms_tree->rt_histogram[i];
1723
1724 /*
1725 * The range tree provides more precision than the space map
1726 * and must be downgraded so that all values fit within the
1727 * space map's histogram. This allows us to compare loaded
1728 * vs. unloaded metaslabs to determine which metaslab is
1729 * considered "best".
1730 */
1731 if (i > max_idx)
1732 continue;
1733
1734 if (segments != 0) {
1735 WEIGHT_SET_COUNT(weight, segments);
1736 WEIGHT_SET_INDEX(weight, i);
1737 WEIGHT_SET_ACTIVE(weight, 0);
1738 break;
1739 }
1740 }
1741 return (weight);
1742 }
1743
1744 /*
1745 * Calculate the weight based on the on-disk histogram. This should only
1746 * be called after a sync pass has completely finished since the on-disk
1747 * information is updated in metaslab_sync().
1748 */
1749 static uint64_t
1750 metaslab_weight_from_spacemap(metaslab_t *msp)
1751 {
1752 uint64_t weight = 0;
1753 int i;
1754
1755 for (i = SPACE_MAP_HISTOGRAM_SIZE - 1; i >= 0; i--) {
1756 if (msp->ms_sm->sm_phys->smp_histogram[i] != 0) {
1757 WEIGHT_SET_COUNT(weight,
1758 msp->ms_sm->sm_phys->smp_histogram[i]);
1759 WEIGHT_SET_INDEX(weight, i +
1760 msp->ms_sm->sm_shift);
1761 WEIGHT_SET_ACTIVE(weight, 0);
1762 break;
1763 }
1764 }
1765 return (weight);
1766 }
1767
1768 /*
1769 * Compute a segment-based weight for the specified metaslab. The weight
1770 * is determined by highest bucket in the histogram. The information
1771 * for the highest bucket is encoded into the weight value.
1772 */
1773 static uint64_t
1774 metaslab_segment_weight(metaslab_t *msp)
1775 {
1776 metaslab_group_t *mg = msp->ms_group;
1777 uint64_t weight = 0;
1778 uint8_t shift = mg->mg_vd->vdev_ashift;
1779
1780 ASSERT(MUTEX_HELD(&msp->ms_lock));
1781
1782 /*
1783 * The metaslab is completely free.
1784 */
1785 if (space_map_allocated(msp->ms_sm) == 0) {
1786 int idx = highbit64(msp->ms_size) - 1;
1787 int max_idx = SPACE_MAP_HISTOGRAM_SIZE + shift - 1;
1788
1789 if (idx < max_idx) {
1790 WEIGHT_SET_COUNT(weight, 1ULL);
1791 WEIGHT_SET_INDEX(weight, idx);
1792 } else {
1793 WEIGHT_SET_COUNT(weight, 1ULL << (idx - max_idx));
1794 WEIGHT_SET_INDEX(weight, max_idx);
1795 }
1796 WEIGHT_SET_ACTIVE(weight, 0);
1797 ASSERT(!WEIGHT_IS_SPACEBASED(weight));
1798
1799 return (weight);
1800 }
1801
1802 ASSERT3U(msp->ms_sm->sm_dbuf->db_size, ==, sizeof (space_map_phys_t));
1803
1804 /*
1805 * If the metaslab is fully allocated then just make the weight 0.
1806 */
1807 if (space_map_allocated(msp->ms_sm) == msp->ms_size)
1808 return (0);
1809 /*
1810 * If the metaslab is already loaded, then use the range tree to
1811 * determine the weight. Otherwise, we rely on the space map information
1812 * to generate the weight.
1813 */
1814 if (msp->ms_loaded) {
1815 weight = metaslab_weight_from_range_tree(msp);
1816 } else {
1817 weight = metaslab_weight_from_spacemap(msp);
1818 }
1819
1820 /*
1821 * If the metaslab was active the last time we calculated its weight
1822 * then keep it active. We want to consume the entire region that
1823 * is associated with this weight.
1824 */
1825 if (msp->ms_activation_weight != 0 && weight != 0)
1826 WEIGHT_SET_ACTIVE(weight, WEIGHT_GET_ACTIVE(msp->ms_weight));
1827 return (weight);
1828 }
1829
1830 /*
1831 * Determine if we should attempt to allocate from this metaslab. If the
1832 * metaslab has a maximum size then we can quickly determine if the desired
1833 * allocation size can be satisfied. Otherwise, if we're using segment-based
1834 * weighting then we can determine the maximum allocation that this metaslab
1835 * can accommodate based on the index encoded in the weight. If we're using
1836 * space-based weights then rely on the entire weight (excluding the weight
1837 * type bit).
1838 */
1839 boolean_t
1840 metaslab_should_allocate(metaslab_t *msp, uint64_t asize)
1841 {
1842 boolean_t should_allocate;
1843
1844 if (msp->ms_max_size != 0)
1845 return (msp->ms_max_size >= asize);
1846
1847 if (!WEIGHT_IS_SPACEBASED(msp->ms_weight)) {
1848 /*
1849 * The metaslab segment weight indicates segments in the
1850 * range [2^i, 2^(i+1)), where i is the index in the weight.
1851 * Since the asize might be in the middle of the range, we
1852 * should attempt the allocation if asize < 2^(i+1).
1853 */
1854 should_allocate = (asize <
1855 1ULL << (WEIGHT_GET_INDEX(msp->ms_weight) + 1));
1856 } else {
1857 should_allocate = (asize <=
1858 (msp->ms_weight & ~METASLAB_WEIGHT_TYPE));
1859 }
1860 return (should_allocate);
1861 }
1862 static uint64_t
1863 metaslab_weight(metaslab_t *msp)
1864 {
1865 vdev_t *vd = msp->ms_group->mg_vd;
1866 spa_t *spa = vd->vdev_spa;
1867 uint64_t weight;
1868
1869 ASSERT(MUTEX_HELD(&msp->ms_lock));
1870
1871 /*
1872 * This vdev is in the process of being removed so there is nothing
1873 * for us to do here.
1874 */
1875 if (vd->vdev_removing) {
1876 ASSERT0(space_map_allocated(msp->ms_sm));
1877 ASSERT0(vd->vdev_ms_shift);
1878 return (0);
1879 }
1880
1881 metaslab_set_fragmentation(msp);
1882
1883 /*
1884 * Update the maximum size if the metaslab is loaded. This will
1885 * ensure that we get an accurate maximum size if newly freed space
1886 * has been added back into the free tree.
1887 */
1888 if (msp->ms_loaded)
1889 msp->ms_max_size = metaslab_block_maxsize(msp);
1890
1891 /*
1892 * Segment-based weighting requires space map histogram support.
1893 */
1894 if (zfs_metaslab_segment_weight_enabled &&
1895 spa_feature_is_enabled(spa, SPA_FEATURE_SPACEMAP_HISTOGRAM) &&
1896 (msp->ms_sm == NULL || msp->ms_sm->sm_dbuf->db_size ==
1897 sizeof (space_map_phys_t))) {
1898 weight = metaslab_segment_weight(msp);
1899 } else {
1900 weight = metaslab_space_weight(msp);
1901 }
1902 return (weight);
1903 }
1904
1905 static int
1906 metaslab_activate(metaslab_t *msp, uint64_t activation_weight)
1907 {
1908 ASSERT(MUTEX_HELD(&msp->ms_lock));
1909
1910 if ((msp->ms_weight & METASLAB_ACTIVE_MASK) == 0) {
1911 metaslab_load_wait(msp);
1912 if (!msp->ms_loaded) {
1913 int error = metaslab_load(msp);
1914 if (error) {
1915 metaslab_group_sort(msp->ms_group, msp, 0);
1916 return (error);
1917 }
1918 }
1919
1920 msp->ms_activation_weight = msp->ms_weight;
1921 metaslab_group_sort(msp->ms_group, msp,
1922 msp->ms_weight | activation_weight);
1923 }
1924 ASSERT(msp->ms_loaded);
1925 ASSERT(msp->ms_weight & METASLAB_ACTIVE_MASK);
1926
1927 return (0);
1928 }
1929
1930 static void
1931 metaslab_passivate(metaslab_t *msp, uint64_t weight)
1932 {
1933 ASSERTV(uint64_t size = weight & ~METASLAB_WEIGHT_TYPE);
1934
1935 /*
1936 * If size < SPA_MINBLOCKSIZE, then we will not allocate from
1937 * this metaslab again. In that case, it had better be empty,
1938 * or we would be leaving space on the table.
1939 */
1940 ASSERT(!WEIGHT_IS_SPACEBASED(msp->ms_weight) ||
1941 size >= SPA_MINBLOCKSIZE ||
1942 range_tree_space(msp->ms_tree) == 0);
1943 ASSERT0(weight & METASLAB_ACTIVE_MASK);
1944
1945 msp->ms_activation_weight = 0;
1946 metaslab_group_sort(msp->ms_group, msp, weight);
1947 ASSERT((msp->ms_weight & METASLAB_ACTIVE_MASK) == 0);
1948 }
1949
1950 /*
1951 * Segment-based metaslabs are activated once and remain active until
1952 * we either fail an allocation attempt (similar to space-based metaslabs)
1953 * or have exhausted the free space in zfs_metaslab_switch_threshold
1954 * buckets since the metaslab was activated. This function checks to see
1955 * if we've exhaused the zfs_metaslab_switch_threshold buckets in the
1956 * metaslab and passivates it proactively. This will allow us to select a
1957 * metaslab with a larger contiguous region, if any, remaining within this
1958 * metaslab group. If we're in sync pass > 1, then we continue using this
1959 * metaslab so that we don't dirty more block and cause more sync passes.
1960 */
1961 void
1962 metaslab_segment_may_passivate(metaslab_t *msp)
1963 {
1964 spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
1965 uint64_t weight;
1966 int activation_idx, current_idx;
1967
1968 if (WEIGHT_IS_SPACEBASED(msp->ms_weight) || spa_sync_pass(spa) > 1)
1969 return;
1970
1971 /*
1972 * Since we are in the middle of a sync pass, the most accurate
1973 * information that is accessible to us is the in-core range tree
1974 * histogram; calculate the new weight based on that information.
1975 */
1976 weight = metaslab_weight_from_range_tree(msp);
1977 activation_idx = WEIGHT_GET_INDEX(msp->ms_activation_weight);
1978 current_idx = WEIGHT_GET_INDEX(weight);
1979
1980 if (current_idx <= activation_idx - zfs_metaslab_switch_threshold)
1981 metaslab_passivate(msp, weight);
1982 }
1983
1984 static void
1985 metaslab_preload(void *arg)
1986 {
1987 metaslab_t *msp = arg;
1988 spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
1989 fstrans_cookie_t cookie = spl_fstrans_mark();
1990
1991 ASSERT(!MUTEX_HELD(&msp->ms_group->mg_lock));
1992
1993 mutex_enter(&msp->ms_lock);
1994 metaslab_load_wait(msp);
1995 if (!msp->ms_loaded)
1996 (void) metaslab_load(msp);
1997 msp->ms_selected_txg = spa_syncing_txg(spa);
1998 mutex_exit(&msp->ms_lock);
1999 spl_fstrans_unmark(cookie);
2000 }
2001
2002 static void
2003 metaslab_group_preload(metaslab_group_t *mg)
2004 {
2005 spa_t *spa = mg->mg_vd->vdev_spa;
2006 metaslab_t *msp;
2007 avl_tree_t *t = &mg->mg_metaslab_tree;
2008 int m = 0;
2009
2010 if (spa_shutting_down(spa) || !metaslab_preload_enabled) {
2011 taskq_wait_outstanding(mg->mg_taskq, 0);
2012 return;
2013 }
2014
2015 mutex_enter(&mg->mg_lock);
2016 /*
2017 * Load the next potential metaslabs
2018 */
2019 for (msp = avl_first(t); msp != NULL; msp = AVL_NEXT(t, msp)) {
2020 /*
2021 * We preload only the maximum number of metaslabs specified
2022 * by metaslab_preload_limit. If a metaslab is being forced
2023 * to condense then we preload it too. This will ensure
2024 * that force condensing happens in the next txg.
2025 */
2026 if (++m > metaslab_preload_limit && !msp->ms_condense_wanted) {
2027 continue;
2028 }
2029
2030 VERIFY(taskq_dispatch(mg->mg_taskq, metaslab_preload,
2031 msp, TQ_SLEEP) != TASKQID_INVALID);
2032 }
2033 mutex_exit(&mg->mg_lock);
2034 }
2035
2036 /*
2037 * Determine if the space map's on-disk footprint is past our tolerance
2038 * for inefficiency. We would like to use the following criteria to make
2039 * our decision:
2040 *
2041 * 1. The size of the space map object should not dramatically increase as a
2042 * result of writing out the free space range tree.
2043 *
2044 * 2. The minimal on-disk space map representation is zfs_condense_pct/100
2045 * times the size than the free space range tree representation
2046 * (i.e. zfs_condense_pct = 110 and in-core = 1MB, minimal = 1.1.MB).
2047 *
2048 * 3. The on-disk size of the space map should actually decrease.
2049 *
2050 * Checking the first condition is tricky since we don't want to walk
2051 * the entire AVL tree calculating the estimated on-disk size. Instead we
2052 * use the size-ordered range tree in the metaslab and calculate the
2053 * size required to write out the largest segment in our free tree. If the
2054 * size required to represent that segment on disk is larger than the space
2055 * map object then we avoid condensing this map.
2056 *
2057 * To determine the second criterion we use a best-case estimate and assume
2058 * each segment can be represented on-disk as a single 64-bit entry. We refer
2059 * to this best-case estimate as the space map's minimal form.
2060 *
2061 * Unfortunately, we cannot compute the on-disk size of the space map in this
2062 * context because we cannot accurately compute the effects of compression, etc.
2063 * Instead, we apply the heuristic described in the block comment for
2064 * zfs_metaslab_condense_block_threshold - we only condense if the space used
2065 * is greater than a threshold number of blocks.
2066 */
2067 static boolean_t
2068 metaslab_should_condense(metaslab_t *msp)
2069 {
2070 space_map_t *sm = msp->ms_sm;
2071 range_seg_t *rs;
2072 uint64_t size, entries, segsz, object_size, optimal_size, record_size;
2073 dmu_object_info_t doi;
2074 uint64_t vdev_blocksize = 1ULL << msp->ms_group->mg_vd->vdev_ashift;
2075
2076 ASSERT(MUTEX_HELD(&msp->ms_lock));
2077 ASSERT(msp->ms_loaded);
2078
2079 /*
2080 * Use the ms_size_tree range tree, which is ordered by size, to
2081 * obtain the largest segment in the free tree. We always condense
2082 * metaslabs that are empty and metaslabs for which a condense
2083 * request has been made.
2084 */
2085 rs = avl_last(&msp->ms_size_tree);
2086 if (rs == NULL || msp->ms_condense_wanted)
2087 return (B_TRUE);
2088
2089 /*
2090 * Calculate the number of 64-bit entries this segment would
2091 * require when written to disk. If this single segment would be
2092 * larger on-disk than the entire current on-disk structure, then
2093 * clearly condensing will increase the on-disk structure size.
2094 */
2095 size = (rs->rs_end - rs->rs_start) >> sm->sm_shift;
2096 entries = size / (MIN(size, SM_RUN_MAX));
2097 segsz = entries * sizeof (uint64_t);
2098
2099 optimal_size = sizeof (uint64_t) * avl_numnodes(&msp->ms_tree->rt_root);
2100 object_size = space_map_length(msp->ms_sm);
2101
2102 dmu_object_info_from_db(sm->sm_dbuf, &doi);
2103 record_size = MAX(doi.doi_data_block_size, vdev_blocksize);
2104
2105 return (segsz <= object_size &&
2106 object_size >= (optimal_size * zfs_condense_pct / 100) &&
2107 object_size > zfs_metaslab_condense_block_threshold * record_size);
2108 }
2109
2110 /*
2111 * Condense the on-disk space map representation to its minimized form.
2112 * The minimized form consists of a small number of allocations followed by
2113 * the entries of the free range tree.
2114 */
2115 static void
2116 metaslab_condense(metaslab_t *msp, uint64_t txg, dmu_tx_t *tx)
2117 {
2118 spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
2119 range_tree_t *condense_tree;
2120 space_map_t *sm = msp->ms_sm;
2121 int t;
2122
2123 ASSERT(MUTEX_HELD(&msp->ms_lock));
2124 ASSERT3U(spa_sync_pass(spa), ==, 1);
2125 ASSERT(msp->ms_loaded);
2126
2127
2128 spa_dbgmsg(spa, "condensing: txg %llu, msp[%llu] %p, vdev id %llu, "
2129 "spa %s, smp size %llu, segments %lu, forcing condense=%s", txg,
2130 msp->ms_id, msp, msp->ms_group->mg_vd->vdev_id,
2131 msp->ms_group->mg_vd->vdev_spa->spa_name,
2132 space_map_length(msp->ms_sm), avl_numnodes(&msp->ms_tree->rt_root),
2133 msp->ms_condense_wanted ? "TRUE" : "FALSE");
2134
2135 msp->ms_condense_wanted = B_FALSE;
2136
2137 /*
2138 * Create an range tree that is 100% allocated. We remove segments
2139 * that have been freed in this txg, any deferred frees that exist,
2140 * and any allocation in the future. Removing segments should be
2141 * a relatively inexpensive operation since we expect these trees to
2142 * have a small number of nodes.
2143 */
2144 condense_tree = range_tree_create(NULL, NULL, &msp->ms_lock);
2145 range_tree_add(condense_tree, msp->ms_start, msp->ms_size);
2146
2147 /*
2148 * Remove what's been freed in this txg from the condense_tree.
2149 * Since we're in sync_pass 1, we know that all the frees from
2150 * this txg are in the freeingtree.
2151 */
2152 range_tree_walk(msp->ms_freeingtree, range_tree_remove, condense_tree);
2153
2154 for (t = 0; t < TXG_DEFER_SIZE; t++) {
2155 range_tree_walk(msp->ms_defertree[t],
2156 range_tree_remove, condense_tree);
2157 }
2158
2159 for (t = 1; t < TXG_CONCURRENT_STATES; t++) {
2160 range_tree_walk(msp->ms_alloctree[(txg + t) & TXG_MASK],
2161 range_tree_remove, condense_tree);
2162 }
2163
2164 /*
2165 * We're about to drop the metaslab's lock thus allowing
2166 * other consumers to change it's content. Set the
2167 * metaslab's ms_condensing flag to ensure that
2168 * allocations on this metaslab do not occur while we're
2169 * in the middle of committing it to disk. This is only critical
2170 * for the ms_tree as all other range trees use per txg
2171 * views of their content.
2172 */
2173 msp->ms_condensing = B_TRUE;
2174
2175 mutex_exit(&msp->ms_lock);
2176 space_map_truncate(sm, tx);
2177 mutex_enter(&msp->ms_lock);
2178
2179 /*
2180 * While we would ideally like to create a space map representation
2181 * that consists only of allocation records, doing so can be
2182 * prohibitively expensive because the in-core free tree can be
2183 * large, and therefore computationally expensive to subtract
2184 * from the condense_tree. Instead we sync out two trees, a cheap
2185 * allocation only tree followed by the in-core free tree. While not
2186 * optimal, this is typically close to optimal, and much cheaper to
2187 * compute.
2188 */
2189 space_map_write(sm, condense_tree, SM_ALLOC, tx);
2190 range_tree_vacate(condense_tree, NULL, NULL);
2191 range_tree_destroy(condense_tree);
2192
2193 space_map_write(sm, msp->ms_tree, SM_FREE, tx);
2194 msp->ms_condensing = B_FALSE;
2195 }
2196
2197 /*
2198 * Write a metaslab to disk in the context of the specified transaction group.
2199 */
2200 void
2201 metaslab_sync(metaslab_t *msp, uint64_t txg)
2202 {
2203 metaslab_group_t *mg = msp->ms_group;
2204 vdev_t *vd = mg->mg_vd;
2205 spa_t *spa = vd->vdev_spa;
2206 objset_t *mos = spa_meta_objset(spa);
2207 range_tree_t *alloctree = msp->ms_alloctree[txg & TXG_MASK];
2208 dmu_tx_t *tx;
2209 uint64_t object = space_map_object(msp->ms_sm);
2210
2211 ASSERT(!vd->vdev_ishole);
2212
2213 /*
2214 * This metaslab has just been added so there's no work to do now.
2215 */
2216 if (msp->ms_freeingtree == NULL) {
2217 ASSERT3P(alloctree, ==, NULL);
2218 return;
2219 }
2220
2221 ASSERT3P(alloctree, !=, NULL);
2222 ASSERT3P(msp->ms_freeingtree, !=, NULL);
2223 ASSERT3P(msp->ms_freedtree, !=, NULL);
2224
2225 /*
2226 * Normally, we don't want to process a metaslab if there
2227 * are no allocations or frees to perform. However, if the metaslab
2228 * is being forced to condense and it's loaded, we need to let it
2229 * through.
2230 */
2231 if (range_tree_space(alloctree) == 0 &&
2232 range_tree_space(msp->ms_freeingtree) == 0 &&
2233 !(msp->ms_loaded && msp->ms_condense_wanted))
2234 return;
2235
2236
2237 VERIFY(txg <= spa_final_dirty_txg(spa));
2238
2239 /*
2240 * The only state that can actually be changing concurrently with
2241 * metaslab_sync() is the metaslab's ms_tree. No other thread can
2242 * be modifying this txg's alloctree, freeingtree, freedtree, or
2243 * space_map_phys_t. Therefore, we only hold ms_lock to satify
2244 * space map ASSERTs. We drop it whenever we call into the DMU,
2245 * because the DMU can call down to us (e.g. via zio_free()) at
2246 * any time.
2247 */
2248
2249 tx = dmu_tx_create_assigned(spa_get_dsl(spa), txg);
2250
2251 if (msp->ms_sm == NULL) {
2252 uint64_t new_object;
2253
2254 new_object = space_map_alloc(mos, tx);
2255 VERIFY3U(new_object, !=, 0);
2256
2257 VERIFY0(space_map_open(&msp->ms_sm, mos, new_object,
2258 msp->ms_start, msp->ms_size, vd->vdev_ashift,
2259 &msp->ms_lock));
2260 ASSERT(msp->ms_sm != NULL);
2261 }
2262
2263 mutex_enter(&msp->ms_lock);
2264
2265 /*
2266 * Note: metaslab_condense() clears the space map's histogram.
2267 * Therefore we must verify and remove this histogram before
2268 * condensing.
2269 */
2270 metaslab_group_histogram_verify(mg);
2271 metaslab_class_histogram_verify(mg->mg_class);
2272 metaslab_group_histogram_remove(mg, msp);
2273
2274 if (msp->ms_loaded && spa_sync_pass(spa) == 1 &&
2275 metaslab_should_condense(msp)) {
2276 metaslab_condense(msp, txg, tx);
2277 } else {
2278 space_map_write(msp->ms_sm, alloctree, SM_ALLOC, tx);
2279 space_map_write(msp->ms_sm, msp->ms_freeingtree, SM_FREE, tx);
2280 }
2281
2282 if (msp->ms_loaded) {
2283 int t;
2284
2285 /*
2286 * When the space map is loaded, we have an accruate
2287 * histogram in the range tree. This gives us an opportunity
2288 * to bring the space map's histogram up-to-date so we clear
2289 * it first before updating it.
2290 */
2291 space_map_histogram_clear(msp->ms_sm);
2292 space_map_histogram_add(msp->ms_sm, msp->ms_tree, tx);
2293
2294 /*
2295 * Since we've cleared the histogram we need to add back
2296 * any free space that has already been processed, plus
2297 * any deferred space. This allows the on-disk histogram
2298 * to accurately reflect all free space even if some space
2299 * is not yet available for allocation (i.e. deferred).
2300 */
2301 space_map_histogram_add(msp->ms_sm, msp->ms_freedtree, tx);
2302
2303 /*
2304 * Add back any deferred free space that has not been
2305 * added back into the in-core free tree yet. This will
2306 * ensure that we don't end up with a space map histogram
2307 * that is completely empty unless the metaslab is fully
2308 * allocated.
2309 */
2310 for (t = 0; t < TXG_DEFER_SIZE; t++) {
2311 space_map_histogram_add(msp->ms_sm,
2312 msp->ms_defertree[t], tx);
2313 }
2314 }
2315
2316 /*
2317 * Always add the free space from this sync pass to the space
2318 * map histogram. We want to make sure that the on-disk histogram
2319 * accounts for all free space. If the space map is not loaded,
2320 * then we will lose some accuracy but will correct it the next
2321 * time we load the space map.
2322 */
2323 space_map_histogram_add(msp->ms_sm, msp->ms_freeingtree, tx);
2324
2325 metaslab_group_histogram_add(mg, msp);
2326 metaslab_group_histogram_verify(mg);
2327 metaslab_class_histogram_verify(mg->mg_class);
2328
2329 /*
2330 * For sync pass 1, we avoid traversing this txg's free range tree
2331 * and instead will just swap the pointers for freeingtree and
2332 * freedtree. We can safely do this since the freed_tree is
2333 * guaranteed to be empty on the initial pass.
2334 */
2335 if (spa_sync_pass(spa) == 1) {
2336 range_tree_swap(&msp->ms_freeingtree, &msp->ms_freedtree);
2337 } else {
2338 range_tree_vacate(msp->ms_freeingtree,
2339 range_tree_add, msp->ms_freedtree);
2340 }
2341 range_tree_vacate(alloctree, NULL, NULL);
2342
2343 ASSERT0(range_tree_space(msp->ms_alloctree[txg & TXG_MASK]));
2344 ASSERT0(range_tree_space(msp->ms_alloctree[TXG_CLEAN(txg) & TXG_MASK]));
2345 ASSERT0(range_tree_space(msp->ms_freeingtree));
2346
2347 mutex_exit(&msp->ms_lock);
2348
2349 if (object != space_map_object(msp->ms_sm)) {
2350 object = space_map_object(msp->ms_sm);
2351 dmu_write(mos, vd->vdev_ms_array, sizeof (uint64_t) *
2352 msp->ms_id, sizeof (uint64_t), &object, tx);
2353 }
2354 dmu_tx_commit(tx);
2355 }
2356
2357 /*
2358 * Called after a transaction group has completely synced to mark
2359 * all of the metaslab's free space as usable.
2360 */
2361 void
2362 metaslab_sync_done(metaslab_t *msp, uint64_t txg)
2363 {
2364 metaslab_group_t *mg = msp->ms_group;
2365 vdev_t *vd = mg->mg_vd;
2366 spa_t *spa = vd->vdev_spa;
2367 range_tree_t **defer_tree;
2368 int64_t alloc_delta, defer_delta;
2369 uint64_t free_space;
2370 boolean_t defer_allowed = B_TRUE;
2371 int t;
2372
2373 ASSERT(!vd->vdev_ishole);
2374
2375 mutex_enter(&msp->ms_lock);
2376
2377 /*
2378 * If this metaslab is just becoming available, initialize its
2379 * range trees and add its capacity to the vdev.
2380 */
2381 if (msp->ms_freedtree == NULL) {
2382 for (t = 0; t < TXG_SIZE; t++) {
2383 ASSERT(msp->ms_alloctree[t] == NULL);
2384
2385 msp->ms_alloctree[t] = range_tree_create(NULL, msp,
2386 &msp->ms_lock);
2387 }
2388
2389 ASSERT3P(msp->ms_freeingtree, ==, NULL);
2390 msp->ms_freeingtree = range_tree_create(NULL, msp,
2391 &msp->ms_lock);
2392
2393 ASSERT3P(msp->ms_freedtree, ==, NULL);
2394 msp->ms_freedtree = range_tree_create(NULL, msp,
2395 &msp->ms_lock);
2396
2397 for (t = 0; t < TXG_DEFER_SIZE; t++) {
2398 ASSERT(msp->ms_defertree[t] == NULL);
2399
2400 msp->ms_defertree[t] = range_tree_create(NULL, msp,
2401 &msp->ms_lock);
2402 }
2403
2404 vdev_space_update(vd, 0, 0, msp->ms_size);
2405 }
2406
2407 defer_tree = &msp->ms_defertree[txg % TXG_DEFER_SIZE];
2408
2409 free_space = metaslab_class_get_space(spa_normal_class(spa)) -
2410 metaslab_class_get_alloc(spa_normal_class(spa));
2411 if (free_space <= spa_get_slop_space(spa)) {
2412 defer_allowed = B_FALSE;
2413 }
2414
2415 defer_delta = 0;
2416 alloc_delta = space_map_alloc_delta(msp->ms_sm);
2417 if (defer_allowed) {
2418 defer_delta = range_tree_space(msp->ms_freedtree) -
2419 range_tree_space(*defer_tree);
2420 } else {
2421 defer_delta -= range_tree_space(*defer_tree);
2422 }
2423
2424 vdev_space_update(vd, alloc_delta + defer_delta, defer_delta, 0);
2425
2426 /*
2427 * If there's a metaslab_load() in progress, wait for it to complete
2428 * so that we have a consistent view of the in-core space map.
2429 */
2430 metaslab_load_wait(msp);
2431
2432 /*
2433 * Move the frees from the defer_tree back to the free
2434 * range tree (if it's loaded). Swap the freed_tree and the
2435 * defer_tree -- this is safe to do because we've just emptied out
2436 * the defer_tree.
2437 */
2438 range_tree_vacate(*defer_tree,
2439 msp->ms_loaded ? range_tree_add : NULL, msp->ms_tree);
2440 if (defer_allowed) {
2441 range_tree_swap(&msp->ms_freedtree, defer_tree);
2442 } else {
2443 range_tree_vacate(msp->ms_freedtree,
2444 msp->ms_loaded ? range_tree_add : NULL, msp->ms_tree);
2445 }
2446
2447 space_map_update(msp->ms_sm);
2448
2449 msp->ms_deferspace += defer_delta;
2450 ASSERT3S(msp->ms_deferspace, >=, 0);
2451 ASSERT3S(msp->ms_deferspace, <=, msp->ms_size);
2452 if (msp->ms_deferspace != 0) {
2453 /*
2454 * Keep syncing this metaslab until all deferred frees
2455 * are back in circulation.
2456 */
2457 vdev_dirty(vd, VDD_METASLAB, msp, txg + 1);
2458 }
2459
2460 /*
2461 * Calculate the new weights before unloading any metaslabs.
2462 * This will give us the most accurate weighting.
2463 */
2464 metaslab_group_sort(mg, msp, metaslab_weight(msp));
2465
2466 /*
2467 * If the metaslab is loaded and we've not tried to load or allocate
2468 * from it in 'metaslab_unload_delay' txgs, then unload it.
2469 */
2470 if (msp->ms_loaded &&
2471 msp->ms_selected_txg + metaslab_unload_delay < txg) {
2472
2473 for (t = 1; t < TXG_CONCURRENT_STATES; t++) {
2474 VERIFY0(range_tree_space(
2475 msp->ms_alloctree[(txg + t) & TXG_MASK]));
2476 }
2477
2478 if (!metaslab_debug_unload)
2479 metaslab_unload(msp);
2480 }
2481
2482 mutex_exit(&msp->ms_lock);
2483 }
2484
2485 void
2486 metaslab_sync_reassess(metaslab_group_t *mg)
2487 {
2488 metaslab_group_alloc_update(mg);
2489 mg->mg_fragmentation = metaslab_group_fragmentation(mg);
2490
2491 /*
2492 * Preload the next potential metaslabs
2493 */
2494 metaslab_group_preload(mg);
2495 }
2496
2497 static uint64_t
2498 metaslab_distance(metaslab_t *msp, dva_t *dva)
2499 {
2500 uint64_t ms_shift = msp->ms_group->mg_vd->vdev_ms_shift;
2501 uint64_t offset = DVA_GET_OFFSET(dva) >> ms_shift;
2502 uint64_t start = msp->ms_id;
2503
2504 if (msp->ms_group->mg_vd->vdev_id != DVA_GET_VDEV(dva))
2505 return (1ULL << 63);
2506
2507 if (offset < start)
2508 return ((start - offset) << ms_shift);
2509 if (offset > start)
2510 return ((offset - start) << ms_shift);
2511 return (0);
2512 }
2513
2514 /*
2515 * ==========================================================================
2516 * Metaslab allocation tracing facility
2517 * ==========================================================================
2518 */
2519 #ifdef _METASLAB_TRACING
2520 kstat_t *metaslab_trace_ksp;
2521 kstat_named_t metaslab_trace_over_limit;
2522
2523 void
2524 metaslab_alloc_trace_init(void)
2525 {
2526 ASSERT(metaslab_alloc_trace_cache == NULL);
2527 metaslab_alloc_trace_cache = kmem_cache_create(
2528 "metaslab_alloc_trace_cache", sizeof (metaslab_alloc_trace_t),
2529 0, NULL, NULL, NULL, NULL, NULL, 0);
2530 metaslab_trace_ksp = kstat_create("zfs", 0, "metaslab_trace_stats",
2531 "misc", KSTAT_TYPE_NAMED, 1, KSTAT_FLAG_VIRTUAL);
2532 if (metaslab_trace_ksp != NULL) {
2533 metaslab_trace_ksp->ks_data = &metaslab_trace_over_limit;
2534 kstat_named_init(&metaslab_trace_over_limit,
2535 "metaslab_trace_over_limit", KSTAT_DATA_UINT64);
2536 kstat_install(metaslab_trace_ksp);
2537 }
2538 }
2539
2540 void
2541 metaslab_alloc_trace_fini(void)
2542 {
2543 if (metaslab_trace_ksp != NULL) {
2544 kstat_delete(metaslab_trace_ksp);
2545 metaslab_trace_ksp = NULL;
2546 }
2547 kmem_cache_destroy(metaslab_alloc_trace_cache);
2548 metaslab_alloc_trace_cache = NULL;
2549 }
2550
2551 /*
2552 * Add an allocation trace element to the allocation tracing list.
2553 */
2554 static void
2555 metaslab_trace_add(zio_alloc_list_t *zal, metaslab_group_t *mg,
2556 metaslab_t *msp, uint64_t psize, uint32_t dva_id, uint64_t offset)
2557 {
2558 metaslab_alloc_trace_t *mat;
2559
2560 if (!metaslab_trace_enabled)
2561 return;
2562
2563 /*
2564 * When the tracing list reaches its maximum we remove
2565 * the second element in the list before adding a new one.
2566 * By removing the second element we preserve the original
2567 * entry as a clue to what allocations steps have already been
2568 * performed.
2569 */
2570 if (zal->zal_size == metaslab_trace_max_entries) {
2571 metaslab_alloc_trace_t *mat_next;
2572 #ifdef DEBUG
2573 panic("too many entries in allocation list");
2574 #endif
2575 atomic_inc_64(&metaslab_trace_over_limit.value.ui64);
2576 zal->zal_size--;
2577 mat_next = list_next(&zal->zal_list, list_head(&zal->zal_list));
2578 list_remove(&zal->zal_list, mat_next);
2579 kmem_cache_free(metaslab_alloc_trace_cache, mat_next);
2580 }
2581
2582 mat = kmem_cache_alloc(metaslab_alloc_trace_cache, KM_SLEEP);
2583 list_link_init(&mat->mat_list_node);
2584 mat->mat_mg = mg;
2585 mat->mat_msp = msp;
2586 mat->mat_size = psize;
2587 mat->mat_dva_id = dva_id;
2588 mat->mat_offset = offset;
2589 mat->mat_weight = 0;
2590
2591 if (msp != NULL)
2592 mat->mat_weight = msp->ms_weight;
2593
2594 /*
2595 * The list is part of the zio so locking is not required. Only
2596 * a single thread will perform allocations for a given zio.
2597 */
2598 list_insert_tail(&zal->zal_list, mat);
2599 zal->zal_size++;
2600
2601 ASSERT3U(zal->zal_size, <=, metaslab_trace_max_entries);
2602 }
2603
2604 void
2605 metaslab_trace_init(zio_alloc_list_t *zal)
2606 {
2607 list_create(&zal->zal_list, sizeof (metaslab_alloc_trace_t),
2608 offsetof(metaslab_alloc_trace_t, mat_list_node));
2609 zal->zal_size = 0;
2610 }
2611
2612 void
2613 metaslab_trace_fini(zio_alloc_list_t *zal)
2614 {
2615 metaslab_alloc_trace_t *mat;
2616
2617 while ((mat = list_remove_head(&zal->zal_list)) != NULL)
2618 kmem_cache_free(metaslab_alloc_trace_cache, mat);
2619 list_destroy(&zal->zal_list);
2620 zal->zal_size = 0;
2621 }
2622 #else
2623
2624 #define metaslab_trace_add(zal, mg, msp, psize, id, off)
2625
2626 void
2627 metaslab_alloc_trace_init(void)
2628 {
2629 }
2630
2631 void
2632 metaslab_alloc_trace_fini(void)
2633 {
2634 }
2635
2636 void
2637 metaslab_trace_init(zio_alloc_list_t *zal)
2638 {
2639 }
2640
2641 void
2642 metaslab_trace_fini(zio_alloc_list_t *zal)
2643 {
2644 }
2645
2646 #endif /* _METASLAB_TRACING */
2647
2648 /*
2649 * ==========================================================================
2650 * Metaslab block operations
2651 * ==========================================================================
2652 */
2653
2654 static void
2655 metaslab_group_alloc_increment(spa_t *spa, uint64_t vdev, void *tag, int flags)
2656 {
2657 metaslab_group_t *mg;
2658
2659 if (!(flags & METASLAB_ASYNC_ALLOC) ||
2660 flags & METASLAB_DONT_THROTTLE)
2661 return;
2662
2663 mg = vdev_lookup_top(spa, vdev)->vdev_mg;
2664 if (!mg->mg_class->mc_alloc_throttle_enabled)
2665 return;
2666
2667 (void) refcount_add(&mg->mg_alloc_queue_depth, tag);
2668 }
2669
2670 void
2671 metaslab_group_alloc_decrement(spa_t *spa, uint64_t vdev, void *tag, int flags)
2672 {
2673 metaslab_group_t *mg;
2674
2675 if (!(flags & METASLAB_ASYNC_ALLOC) ||
2676 flags & METASLAB_DONT_THROTTLE)
2677 return;
2678
2679 mg = vdev_lookup_top(spa, vdev)->vdev_mg;
2680 if (!mg->mg_class->mc_alloc_throttle_enabled)
2681 return;
2682
2683 (void) refcount_remove(&mg->mg_alloc_queue_depth, tag);
2684 }
2685
2686 void
2687 metaslab_group_alloc_verify(spa_t *spa, const blkptr_t *bp, void *tag)
2688 {
2689 #ifdef ZFS_DEBUG
2690 const dva_t *dva = bp->blk_dva;
2691 int ndvas = BP_GET_NDVAS(bp);
2692 int d;
2693
2694 for (d = 0; d < ndvas; d++) {
2695 uint64_t vdev = DVA_GET_VDEV(&dva[d]);
2696 metaslab_group_t *mg = vdev_lookup_top(spa, vdev)->vdev_mg;
2697 VERIFY(refcount_not_held(&mg->mg_alloc_queue_depth, tag));
2698 }
2699 #endif
2700 }
2701
2702 static uint64_t
2703 metaslab_block_alloc(metaslab_t *msp, uint64_t size, uint64_t txg)
2704 {
2705 uint64_t start;
2706 range_tree_t *rt = msp->ms_tree;
2707 metaslab_class_t *mc = msp->ms_group->mg_class;
2708
2709 VERIFY(!msp->ms_condensing);
2710
2711 start = mc->mc_ops->msop_alloc(msp, size);
2712 if (start != -1ULL) {
2713 metaslab_group_t *mg = msp->ms_group;
2714 vdev_t *vd = mg->mg_vd;
2715
2716 VERIFY0(P2PHASE(start, 1ULL << vd->vdev_ashift));
2717 VERIFY0(P2PHASE(size, 1ULL << vd->vdev_ashift));
2718 VERIFY3U(range_tree_space(rt) - size, <=, msp->ms_size);
2719 range_tree_remove(rt, start, size);
2720
2721 if (range_tree_space(msp->ms_alloctree[txg & TXG_MASK]) == 0)
2722 vdev_dirty(mg->mg_vd, VDD_METASLAB, msp, txg);
2723
2724 range_tree_add(msp->ms_alloctree[txg & TXG_MASK], start, size);
2725
2726 /* Track the last successful allocation */
2727 msp->ms_alloc_txg = txg;
2728 metaslab_verify_space(msp, txg);
2729 }
2730
2731 /*
2732 * Now that we've attempted the allocation we need to update the
2733 * metaslab's maximum block size since it may have changed.
2734 */
2735 msp->ms_max_size = metaslab_block_maxsize(msp);
2736 return (start);
2737 }
2738
2739 static uint64_t
2740 metaslab_group_alloc_normal(metaslab_group_t *mg, zio_alloc_list_t *zal,
2741 uint64_t asize, uint64_t txg, uint64_t min_distance, dva_t *dva, int d)
2742 {
2743 metaslab_t *msp = NULL;
2744 metaslab_t *search;
2745 uint64_t offset = -1ULL;
2746 uint64_t activation_weight;
2747 uint64_t target_distance;
2748 int i;
2749
2750 activation_weight = METASLAB_WEIGHT_PRIMARY;
2751 for (i = 0; i < d; i++) {
2752 if (DVA_GET_VDEV(&dva[i]) == mg->mg_vd->vdev_id) {
2753 activation_weight = METASLAB_WEIGHT_SECONDARY;
2754 break;
2755 }
2756 }
2757
2758 search = kmem_alloc(sizeof (*search), KM_SLEEP);
2759 search->ms_weight = UINT64_MAX;
2760 search->ms_start = 0;
2761 for (;;) {
2762 boolean_t was_active;
2763 avl_tree_t *t = &mg->mg_metaslab_tree;
2764 avl_index_t idx;
2765
2766 mutex_enter(&mg->mg_lock);
2767
2768 /*
2769 * Find the metaslab with the highest weight that is less
2770 * than what we've already tried. In the common case, this
2771 * means that we will examine each metaslab at most once.
2772 * Note that concurrent callers could reorder metaslabs
2773 * by activation/passivation once we have dropped the mg_lock.
2774 * If a metaslab is activated by another thread, and we fail
2775 * to allocate from the metaslab we have selected, we may
2776 * not try the newly-activated metaslab, and instead activate
2777 * another metaslab. This is not optimal, but generally
2778 * does not cause any problems (a possible exception being
2779 * if every metaslab is completely full except for the
2780 * the newly-activated metaslab which we fail to examine).
2781 */
2782 msp = avl_find(t, search, &idx);
2783 if (msp == NULL)
2784 msp = avl_nearest(t, idx, AVL_AFTER);
2785 for (; msp != NULL; msp = AVL_NEXT(t, msp)) {
2786
2787 if (!metaslab_should_allocate(msp, asize)) {
2788 metaslab_trace_add(zal, mg, msp, asize, d,
2789 TRACE_TOO_SMALL);
2790 continue;
2791 }
2792
2793 /*
2794 * If the selected metaslab is condensing, skip it.
2795 */
2796 if (msp->ms_condensing)
2797 continue;
2798
2799 was_active = msp->ms_weight & METASLAB_ACTIVE_MASK;
2800 if (activation_weight == METASLAB_WEIGHT_PRIMARY)
2801 break;
2802
2803 target_distance = min_distance +
2804 (space_map_allocated(msp->ms_sm) != 0 ? 0 :
2805 min_distance >> 1);
2806
2807 for (i = 0; i < d; i++) {
2808 if (metaslab_distance(msp, &dva[i]) <
2809 target_distance)
2810 break;
2811 }
2812 if (i == d)
2813 break;
2814 }
2815 mutex_exit(&mg->mg_lock);
2816 if (msp == NULL) {
2817 kmem_free(search, sizeof (*search));
2818 return (-1ULL);
2819 }
2820 search->ms_weight = msp->ms_weight;
2821 search->ms_start = msp->ms_start + 1;
2822
2823 mutex_enter(&msp->ms_lock);
2824
2825 /*
2826 * Ensure that the metaslab we have selected is still
2827 * capable of handling our request. It's possible that
2828 * another thread may have changed the weight while we
2829 * were blocked on the metaslab lock. We check the
2830 * active status first to see if we need to reselect
2831 * a new metaslab.
2832 */
2833 if (was_active && !(msp->ms_weight & METASLAB_ACTIVE_MASK)) {
2834 mutex_exit(&msp->ms_lock);
2835 continue;
2836 }
2837
2838 if ((msp->ms_weight & METASLAB_WEIGHT_SECONDARY) &&
2839 activation_weight == METASLAB_WEIGHT_PRIMARY) {
2840 metaslab_passivate(msp,
2841 msp->ms_weight & ~METASLAB_ACTIVE_MASK);
2842 mutex_exit(&msp->ms_lock);
2843 continue;
2844 }
2845
2846 if (metaslab_activate(msp, activation_weight) != 0) {
2847 mutex_exit(&msp->ms_lock);
2848 continue;
2849 }
2850 msp->ms_selected_txg = txg;
2851
2852 /*
2853 * Now that we have the lock, recheck to see if we should
2854 * continue to use this metaslab for this allocation. The
2855 * the metaslab is now loaded so metaslab_should_allocate() can
2856 * accurately determine if the allocation attempt should
2857 * proceed.
2858 */
2859 if (!metaslab_should_allocate(msp, asize)) {
2860 /* Passivate this metaslab and select a new one. */
2861 metaslab_trace_add(zal, mg, msp, asize, d,
2862 TRACE_TOO_SMALL);
2863 goto next;
2864 }
2865
2866
2867 /*
2868 * If this metaslab is currently condensing then pick again as
2869 * we can't manipulate this metaslab until it's committed
2870 * to disk.
2871 */
2872 if (msp->ms_condensing) {
2873 metaslab_trace_add(zal, mg, msp, asize, d,
2874 TRACE_CONDENSING);
2875 mutex_exit(&msp->ms_lock);
2876 continue;
2877 }
2878
2879 offset = metaslab_block_alloc(msp, asize, txg);
2880 metaslab_trace_add(zal, mg, msp, asize, d, offset);
2881
2882 if (offset != -1ULL) {
2883 /* Proactively passivate the metaslab, if needed */
2884 metaslab_segment_may_passivate(msp);
2885 break;
2886 }
2887 next:
2888 ASSERT(msp->ms_loaded);
2889
2890 /*
2891 * We were unable to allocate from this metaslab so determine
2892 * a new weight for this metaslab. Now that we have loaded
2893 * the metaslab we can provide a better hint to the metaslab
2894 * selector.
2895 *
2896 * For space-based metaslabs, we use the maximum block size.
2897 * This information is only available when the metaslab
2898 * is loaded and is more accurate than the generic free
2899 * space weight that was calculated by metaslab_weight().
2900 * This information allows us to quickly compare the maximum
2901 * available allocation in the metaslab to the allocation
2902 * size being requested.
2903 *
2904 * For segment-based metaslabs, determine the new weight
2905 * based on the highest bucket in the range tree. We
2906 * explicitly use the loaded segment weight (i.e. the range
2907 * tree histogram) since it contains the space that is
2908 * currently available for allocation and is accurate
2909 * even within a sync pass.
2910 */
2911 if (WEIGHT_IS_SPACEBASED(msp->ms_weight)) {
2912 uint64_t weight = metaslab_block_maxsize(msp);
2913 WEIGHT_SET_SPACEBASED(weight);
2914 metaslab_passivate(msp, weight);
2915 } else {
2916 metaslab_passivate(msp,
2917 metaslab_weight_from_range_tree(msp));
2918 }
2919
2920 /*
2921 * We have just failed an allocation attempt, check
2922 * that metaslab_should_allocate() agrees. Otherwise,
2923 * we may end up in an infinite loop retrying the same
2924 * metaslab.
2925 */
2926 ASSERT(!metaslab_should_allocate(msp, asize));
2927 mutex_exit(&msp->ms_lock);
2928 }
2929 mutex_exit(&msp->ms_lock);
2930 kmem_free(search, sizeof (*search));
2931 return (offset);
2932 }
2933
2934 static uint64_t
2935 metaslab_group_alloc(metaslab_group_t *mg, zio_alloc_list_t *zal,
2936 uint64_t asize, uint64_t txg, uint64_t min_distance, dva_t *dva, int d)
2937 {
2938 uint64_t offset;
2939 ASSERT(mg->mg_initialized);
2940
2941 offset = metaslab_group_alloc_normal(mg, zal, asize, txg,
2942 min_distance, dva, d);
2943
2944 mutex_enter(&mg->mg_lock);
2945 if (offset == -1ULL) {
2946 mg->mg_failed_allocations++;
2947 metaslab_trace_add(zal, mg, NULL, asize, d,
2948 TRACE_GROUP_FAILURE);
2949 if (asize == SPA_GANGBLOCKSIZE) {
2950 /*
2951 * This metaslab group was unable to allocate
2952 * the minimum gang block size so it must be out of
2953 * space. We must notify the allocation throttle
2954 * to start skipping allocation attempts to this
2955 * metaslab group until more space becomes available.
2956 * Note: this failure cannot be caused by the
2957 * allocation throttle since the allocation throttle
2958 * is only responsible for skipping devices and
2959 * not failing block allocations.
2960 */
2961 mg->mg_no_free_space = B_TRUE;
2962 }
2963 }
2964 mg->mg_allocations++;
2965 mutex_exit(&mg->mg_lock);
2966 return (offset);
2967 }
2968
2969 /*
2970 * If we have to write a ditto block (i.e. more than one DVA for a given BP)
2971 * on the same vdev as an existing DVA of this BP, then try to allocate it
2972 * at least (vdev_asize / (2 ^ ditto_same_vdev_distance_shift)) away from the
2973 * existing DVAs.
2974 */
2975 int ditto_same_vdev_distance_shift = 3;
2976
2977 /*
2978 * Allocate a block for the specified i/o.
2979 */
2980 static int
2981 metaslab_alloc_dva(spa_t *spa, metaslab_class_t *mc, uint64_t psize,
2982 dva_t *dva, int d, dva_t *hintdva, uint64_t txg, int flags,
2983 zio_alloc_list_t *zal)
2984 {
2985 metaslab_group_t *mg, *fast_mg, *rotor;
2986 vdev_t *vd;
2987 boolean_t try_hard = B_FALSE;
2988
2989 ASSERT(!DVA_IS_VALID(&dva[d]));
2990
2991 /*
2992 * For testing, make some blocks above a certain size be gang blocks.
2993 */
2994 if (psize >= metaslab_gang_bang && (ddi_get_lbolt() & 3) == 0) {
2995 metaslab_trace_add(zal, NULL, NULL, psize, d, TRACE_FORCE_GANG);
2996 return (SET_ERROR(ENOSPC));
2997 }
2998
2999 /*
3000 * Start at the rotor and loop through all mgs until we find something.
3001 * Note that there's no locking on mc_rotor or mc_aliquot because
3002 * nothing actually breaks if we miss a few updates -- we just won't
3003 * allocate quite as evenly. It all balances out over time.
3004 *
3005 * If we are doing ditto or log blocks, try to spread them across
3006 * consecutive vdevs. If we're forced to reuse a vdev before we've
3007 * allocated all of our ditto blocks, then try and spread them out on
3008 * that vdev as much as possible. If it turns out to not be possible,
3009 * gradually lower our standards until anything becomes acceptable.
3010 * Also, allocating on consecutive vdevs (as opposed to random vdevs)
3011 * gives us hope of containing our fault domains to something we're
3012 * able to reason about. Otherwise, any two top-level vdev failures
3013 * will guarantee the loss of data. With consecutive allocation,
3014 * only two adjacent top-level vdev failures will result in data loss.
3015 *
3016 * If we are doing gang blocks (hintdva is non-NULL), try to keep
3017 * ourselves on the same vdev as our gang block header. That
3018 * way, we can hope for locality in vdev_cache, plus it makes our
3019 * fault domains something tractable.
3020 */
3021 if (hintdva) {
3022 vd = vdev_lookup_top(spa, DVA_GET_VDEV(&hintdva[d]));
3023
3024 /*
3025 * It's possible the vdev we're using as the hint no
3026 * longer exists (i.e. removed). Consult the rotor when
3027 * all else fails.
3028 */
3029 if (vd != NULL) {
3030 mg = vd->vdev_mg;
3031
3032 if (flags & METASLAB_HINTBP_AVOID &&
3033 mg->mg_next != NULL)
3034 mg = mg->mg_next;
3035 } else {
3036 mg = mc->mc_rotor;
3037 }
3038 } else if (d != 0) {
3039 vd = vdev_lookup_top(spa, DVA_GET_VDEV(&dva[d - 1]));
3040 mg = vd->vdev_mg->mg_next;
3041 } else if (flags & METASLAB_FASTWRITE) {
3042 mg = fast_mg = mc->mc_rotor;
3043
3044 do {
3045 if (fast_mg->mg_vd->vdev_pending_fastwrite <
3046 mg->mg_vd->vdev_pending_fastwrite)
3047 mg = fast_mg;
3048 } while ((fast_mg = fast_mg->mg_next) != mc->mc_rotor);
3049
3050 } else {
3051 mg = mc->mc_rotor;
3052 }
3053
3054 /*
3055 * If the hint put us into the wrong metaslab class, or into a
3056 * metaslab group that has been passivated, just follow the rotor.
3057 */
3058 if (mg->mg_class != mc || mg->mg_activation_count <= 0)
3059 mg = mc->mc_rotor;
3060
3061 rotor = mg;
3062 top:
3063 do {
3064 boolean_t allocatable;
3065 uint64_t offset;
3066 uint64_t distance, asize;
3067
3068 ASSERT(mg->mg_activation_count == 1);
3069 vd = mg->mg_vd;
3070
3071 /*
3072 * Don't allocate from faulted devices.
3073 */
3074 if (try_hard) {
3075 spa_config_enter(spa, SCL_ZIO, FTAG, RW_READER);
3076 allocatable = vdev_allocatable(vd);
3077 spa_config_exit(spa, SCL_ZIO, FTAG);
3078 } else {
3079 allocatable = vdev_allocatable(vd);
3080 }
3081
3082 /*
3083 * Determine if the selected metaslab group is eligible
3084 * for allocations. If we're ganging then don't allow
3085 * this metaslab group to skip allocations since that would
3086 * inadvertently return ENOSPC and suspend the pool
3087 * even though space is still available.
3088 */
3089 if (allocatable && !GANG_ALLOCATION(flags) && !try_hard) {
3090 allocatable = metaslab_group_allocatable(mg, rotor,
3091 psize);
3092 }
3093
3094 if (!allocatable) {
3095 metaslab_trace_add(zal, mg, NULL, psize, d,
3096 TRACE_NOT_ALLOCATABLE);
3097 goto next;
3098 }
3099
3100 ASSERT(mg->mg_initialized);
3101
3102 /*
3103 * Avoid writing single-copy data to a failing,
3104 * non-redundant vdev, unless we've already tried all
3105 * other vdevs.
3106 */
3107 if ((vd->vdev_stat.vs_write_errors > 0 ||
3108 vd->vdev_state < VDEV_STATE_HEALTHY) &&
3109 d == 0 && !try_hard && vd->vdev_children == 0) {
3110 metaslab_trace_add(zal, mg, NULL, psize, d,
3111 TRACE_VDEV_ERROR);
3112 goto next;
3113 }
3114
3115 ASSERT(mg->mg_class == mc);
3116
3117 /*
3118 * If we don't need to try hard, then require that the
3119 * block be 1/8th of the device away from any other DVAs
3120 * in this BP. If we are trying hard, allow any offset
3121 * to be used (distance=0).
3122 */
3123 distance = 0;
3124 if (!try_hard) {
3125 distance = vd->vdev_asize >>
3126 ditto_same_vdev_distance_shift;
3127 if (distance <= (1ULL << vd->vdev_ms_shift))
3128 distance = 0;
3129 }
3130
3131 asize = vdev_psize_to_asize(vd, psize);
3132 ASSERT(P2PHASE(asize, 1ULL << vd->vdev_ashift) == 0);
3133
3134 offset = metaslab_group_alloc(mg, zal, asize, txg, distance,
3135 dva, d);
3136
3137 if (offset != -1ULL) {
3138 /*
3139 * If we've just selected this metaslab group,
3140 * figure out whether the corresponding vdev is
3141 * over- or under-used relative to the pool,
3142 * and set an allocation bias to even it out.
3143 *
3144 * Bias is also used to compensate for unequally
3145 * sized vdevs so that space is allocated fairly.
3146 */
3147 if (mc->mc_aliquot == 0 && metaslab_bias_enabled) {
3148 vdev_stat_t *vs = &vd->vdev_stat;
3149 int64_t vs_free = vs->vs_space - vs->vs_alloc;
3150 int64_t mc_free = mc->mc_space - mc->mc_alloc;
3151 int64_t ratio;
3152
3153 /*
3154 * Calculate how much more or less we should
3155 * try to allocate from this device during
3156 * this iteration around the rotor.
3157 *
3158 * This basically introduces a zero-centered
3159 * bias towards the devices with the most
3160 * free space, while compensating for vdev
3161 * size differences.
3162 *
3163 * Examples:
3164 * vdev V1 = 16M/128M
3165 * vdev V2 = 16M/128M
3166 * ratio(V1) = 100% ratio(V2) = 100%
3167 *
3168 * vdev V1 = 16M/128M
3169 * vdev V2 = 64M/128M
3170 * ratio(V1) = 127% ratio(V2) = 72%
3171 *
3172 * vdev V1 = 16M/128M
3173 * vdev V2 = 64M/512M
3174 * ratio(V1) = 40% ratio(V2) = 160%
3175 */
3176 ratio = (vs_free * mc->mc_alloc_groups * 100) /
3177 (mc_free + 1);
3178 mg->mg_bias = ((ratio - 100) *
3179 (int64_t)mg->mg_aliquot) / 100;
3180 } else if (!metaslab_bias_enabled) {
3181 mg->mg_bias = 0;
3182 }
3183
3184 if ((flags & METASLAB_FASTWRITE) ||
3185 atomic_add_64_nv(&mc->mc_aliquot, asize) >=
3186 mg->mg_aliquot + mg->mg_bias) {
3187 mc->mc_rotor = mg->mg_next;
3188 mc->mc_aliquot = 0;
3189 }
3190
3191 DVA_SET_VDEV(&dva[d], vd->vdev_id);
3192 DVA_SET_OFFSET(&dva[d], offset);
3193 DVA_SET_GANG(&dva[d],
3194 ((flags & METASLAB_GANG_HEADER) ? 1 : 0));
3195 DVA_SET_ASIZE(&dva[d], asize);
3196
3197 if (flags & METASLAB_FASTWRITE) {
3198 atomic_add_64(&vd->vdev_pending_fastwrite,
3199 psize);
3200 }
3201
3202 return (0);
3203 }
3204 next:
3205 mc->mc_rotor = mg->mg_next;
3206 mc->mc_aliquot = 0;
3207 } while ((mg = mg->mg_next) != rotor);
3208
3209 /*
3210 * If we haven't tried hard, do so now.
3211 */
3212 if (!try_hard) {
3213 try_hard = B_TRUE;
3214 goto top;
3215 }
3216
3217 bzero(&dva[d], sizeof (dva_t));
3218
3219 metaslab_trace_add(zal, rotor, NULL, psize, d, TRACE_ENOSPC);
3220 return (SET_ERROR(ENOSPC));
3221 }
3222
3223 /*
3224 * Free the block represented by DVA in the context of the specified
3225 * transaction group.
3226 */
3227 static void
3228 metaslab_free_dva(spa_t *spa, const dva_t *dva, uint64_t txg, boolean_t now)
3229 {
3230 uint64_t vdev = DVA_GET_VDEV(dva);
3231 uint64_t offset = DVA_GET_OFFSET(dva);
3232 uint64_t size = DVA_GET_ASIZE(dva);
3233 vdev_t *vd;
3234 metaslab_t *msp;
3235
3236 if (txg > spa_freeze_txg(spa))
3237 return;
3238
3239 if ((vd = vdev_lookup_top(spa, vdev)) == NULL || !DVA_IS_VALID(dva) ||
3240 (offset >> vd->vdev_ms_shift) >= vd->vdev_ms_count) {
3241 zfs_panic_recover("metaslab_free_dva(): bad DVA %llu:%llu:%llu",
3242 (u_longlong_t)vdev, (u_longlong_t)offset,
3243 (u_longlong_t)size);
3244 return;
3245 }
3246
3247 msp = vd->vdev_ms[offset >> vd->vdev_ms_shift];
3248
3249 if (DVA_GET_GANG(dva))
3250 size = vdev_psize_to_asize(vd, SPA_GANGBLOCKSIZE);
3251
3252 mutex_enter(&msp->ms_lock);
3253
3254 if (now) {
3255 range_tree_remove(msp->ms_alloctree[txg & TXG_MASK],
3256 offset, size);
3257
3258 VERIFY(!msp->ms_condensing);
3259 VERIFY3U(offset, >=, msp->ms_start);
3260 VERIFY3U(offset + size, <=, msp->ms_start + msp->ms_size);
3261 VERIFY3U(range_tree_space(msp->ms_tree) + size, <=,
3262 msp->ms_size);
3263 VERIFY0(P2PHASE(offset, 1ULL << vd->vdev_ashift));
3264 VERIFY0(P2PHASE(size, 1ULL << vd->vdev_ashift));
3265 range_tree_add(msp->ms_tree, offset, size);
3266 msp->ms_max_size = metaslab_block_maxsize(msp);
3267 } else {
3268 VERIFY3U(txg, ==, spa->spa_syncing_txg);
3269 if (range_tree_space(msp->ms_freeingtree) == 0)
3270 vdev_dirty(vd, VDD_METASLAB, msp, txg);
3271 range_tree_add(msp->ms_freeingtree, offset, size);
3272 }
3273
3274 mutex_exit(&msp->ms_lock);
3275 }
3276
3277 /*
3278 * Intent log support: upon opening the pool after a crash, notify the SPA
3279 * of blocks that the intent log has allocated for immediate write, but
3280 * which are still considered free by the SPA because the last transaction
3281 * group didn't commit yet.
3282 */
3283 static int
3284 metaslab_claim_dva(spa_t *spa, const dva_t *dva, uint64_t txg)
3285 {
3286 uint64_t vdev = DVA_GET_VDEV(dva);
3287 uint64_t offset = DVA_GET_OFFSET(dva);
3288 uint64_t size = DVA_GET_ASIZE(dva);
3289 vdev_t *vd;
3290 metaslab_t *msp;
3291 int error = 0;
3292
3293 ASSERT(DVA_IS_VALID(dva));
3294
3295 if ((vd = vdev_lookup_top(spa, vdev)) == NULL ||
3296 (offset >> vd->vdev_ms_shift) >= vd->vdev_ms_count)
3297 return (SET_ERROR(ENXIO));
3298
3299 msp = vd->vdev_ms[offset >> vd->vdev_ms_shift];
3300
3301 if (DVA_GET_GANG(dva))
3302 size = vdev_psize_to_asize(vd, SPA_GANGBLOCKSIZE);
3303
3304 mutex_enter(&msp->ms_lock);
3305
3306 if ((txg != 0 && spa_writeable(spa)) || !msp->ms_loaded)
3307 error = metaslab_activate(msp, METASLAB_WEIGHT_SECONDARY);
3308
3309 if (error == 0 && !range_tree_contains(msp->ms_tree, offset, size))
3310 error = SET_ERROR(ENOENT);
3311
3312 if (error || txg == 0) { /* txg == 0 indicates dry run */
3313 mutex_exit(&msp->ms_lock);
3314 return (error);
3315 }
3316
3317 VERIFY(!msp->ms_condensing);
3318 VERIFY0(P2PHASE(offset, 1ULL << vd->vdev_ashift));
3319 VERIFY0(P2PHASE(size, 1ULL << vd->vdev_ashift));
3320 VERIFY3U(range_tree_space(msp->ms_tree) - size, <=, msp->ms_size);
3321 range_tree_remove(msp->ms_tree, offset, size);
3322
3323 if (spa_writeable(spa)) { /* don't dirty if we're zdb(1M) */
3324 if (range_tree_space(msp->ms_alloctree[txg & TXG_MASK]) == 0)
3325 vdev_dirty(vd, VDD_METASLAB, msp, txg);
3326 range_tree_add(msp->ms_alloctree[txg & TXG_MASK], offset, size);
3327 }
3328
3329 mutex_exit(&msp->ms_lock);
3330
3331 return (0);
3332 }
3333
3334 /*
3335 * Reserve some allocation slots. The reservation system must be called
3336 * before we call into the allocator. If there aren't any available slots
3337 * then the I/O will be throttled until an I/O completes and its slots are
3338 * freed up. The function returns true if it was successful in placing
3339 * the reservation.
3340 */
3341 boolean_t
3342 metaslab_class_throttle_reserve(metaslab_class_t *mc, int slots, zio_t *zio,
3343 int flags)
3344 {
3345 uint64_t available_slots = 0;
3346 uint64_t reserved_slots;
3347 boolean_t slot_reserved = B_FALSE;
3348
3349 ASSERT(mc->mc_alloc_throttle_enabled);
3350 mutex_enter(&mc->mc_lock);
3351
3352 reserved_slots = refcount_count(&mc->mc_alloc_slots);
3353 if (reserved_slots < mc->mc_alloc_max_slots)
3354 available_slots = mc->mc_alloc_max_slots - reserved_slots;
3355
3356 if (slots <= available_slots || GANG_ALLOCATION(flags)) {
3357 int d;
3358
3359 /*
3360 * We reserve the slots individually so that we can unreserve
3361 * them individually when an I/O completes.
3362 */
3363 for (d = 0; d < slots; d++) {
3364 reserved_slots = refcount_add(&mc->mc_alloc_slots, zio);
3365 }
3366 zio->io_flags |= ZIO_FLAG_IO_ALLOCATING;
3367 slot_reserved = B_TRUE;
3368 }
3369
3370 mutex_exit(&mc->mc_lock);
3371 return (slot_reserved);
3372 }
3373
3374 void
3375 metaslab_class_throttle_unreserve(metaslab_class_t *mc, int slots, zio_t *zio)
3376 {
3377 int d;
3378
3379 ASSERT(mc->mc_alloc_throttle_enabled);
3380 mutex_enter(&mc->mc_lock);
3381 for (d = 0; d < slots; d++) {
3382 (void) refcount_remove(&mc->mc_alloc_slots, zio);
3383 }
3384 mutex_exit(&mc->mc_lock);
3385 }
3386
3387 int
3388 metaslab_alloc(spa_t *spa, metaslab_class_t *mc, uint64_t psize, blkptr_t *bp,
3389 int ndvas, uint64_t txg, blkptr_t *hintbp, int flags,
3390 zio_alloc_list_t *zal, zio_t *zio)
3391 {
3392 dva_t *dva = bp->blk_dva;
3393 dva_t *hintdva = hintbp->blk_dva;
3394 int d, error = 0;
3395
3396 ASSERT(bp->blk_birth == 0);
3397 ASSERT(BP_PHYSICAL_BIRTH(bp) == 0);
3398
3399 spa_config_enter(spa, SCL_ALLOC, FTAG, RW_READER);
3400
3401 if (mc->mc_rotor == NULL) { /* no vdevs in this class */
3402 spa_config_exit(spa, SCL_ALLOC, FTAG);
3403 return (SET_ERROR(ENOSPC));
3404 }
3405
3406 ASSERT(ndvas > 0 && ndvas <= spa_max_replication(spa));
3407 ASSERT(BP_GET_NDVAS(bp) == 0);
3408 ASSERT(hintbp == NULL || ndvas <= BP_GET_NDVAS(hintbp));
3409 ASSERT3P(zal, !=, NULL);
3410
3411 for (d = 0; d < ndvas; d++) {
3412 error = metaslab_alloc_dva(spa, mc, psize, dva, d, hintdva,
3413 txg, flags, zal);
3414 if (error != 0) {
3415 for (d--; d >= 0; d--) {
3416 metaslab_free_dva(spa, &dva[d], txg, B_TRUE);
3417 metaslab_group_alloc_decrement(spa,
3418 DVA_GET_VDEV(&dva[d]), zio, flags);
3419 bzero(&dva[d], sizeof (dva_t));
3420 }
3421 spa_config_exit(spa, SCL_ALLOC, FTAG);
3422 return (error);
3423 } else {
3424 /*
3425 * Update the metaslab group's queue depth
3426 * based on the newly allocated dva.
3427 */
3428 metaslab_group_alloc_increment(spa,
3429 DVA_GET_VDEV(&dva[d]), zio, flags);
3430 }
3431
3432 }
3433 ASSERT(error == 0);
3434 ASSERT(BP_GET_NDVAS(bp) == ndvas);
3435
3436 spa_config_exit(spa, SCL_ALLOC, FTAG);
3437
3438 BP_SET_BIRTH(bp, txg, 0);
3439
3440 return (0);
3441 }
3442
3443 void
3444 metaslab_free(spa_t *spa, const blkptr_t *bp, uint64_t txg, boolean_t now)
3445 {
3446 const dva_t *dva = bp->blk_dva;
3447 int d, ndvas = BP_GET_NDVAS(bp);
3448
3449 ASSERT(!BP_IS_HOLE(bp));
3450 ASSERT(!now || bp->blk_birth >= spa_syncing_txg(spa));
3451
3452 spa_config_enter(spa, SCL_FREE, FTAG, RW_READER);
3453
3454 for (d = 0; d < ndvas; d++)
3455 metaslab_free_dva(spa, &dva[d], txg, now);
3456
3457 spa_config_exit(spa, SCL_FREE, FTAG);
3458 }
3459
3460 int
3461 metaslab_claim(spa_t *spa, const blkptr_t *bp, uint64_t txg)
3462 {
3463 const dva_t *dva = bp->blk_dva;
3464 int ndvas = BP_GET_NDVAS(bp);
3465 int d, error = 0;
3466
3467 ASSERT(!BP_IS_HOLE(bp));
3468
3469 if (txg != 0) {
3470 /*
3471 * First do a dry run to make sure all DVAs are claimable,
3472 * so we don't have to unwind from partial failures below.
3473 */
3474 if ((error = metaslab_claim(spa, bp, 0)) != 0)
3475 return (error);
3476 }
3477
3478 spa_config_enter(spa, SCL_ALLOC, FTAG, RW_READER);
3479
3480 for (d = 0; d < ndvas; d++)
3481 if ((error = metaslab_claim_dva(spa, &dva[d], txg)) != 0)
3482 break;
3483
3484 spa_config_exit(spa, SCL_ALLOC, FTAG);
3485
3486 ASSERT(error == 0 || txg == 0);
3487
3488 return (error);
3489 }
3490
3491 void
3492 metaslab_fastwrite_mark(spa_t *spa, const blkptr_t *bp)
3493 {
3494 const dva_t *dva = bp->blk_dva;
3495 int ndvas = BP_GET_NDVAS(bp);
3496 uint64_t psize = BP_GET_PSIZE(bp);
3497 int d;
3498 vdev_t *vd;
3499
3500 ASSERT(!BP_IS_HOLE(bp));
3501 ASSERT(!BP_IS_EMBEDDED(bp));
3502 ASSERT(psize > 0);
3503
3504 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
3505
3506 for (d = 0; d < ndvas; d++) {
3507 if ((vd = vdev_lookup_top(spa, DVA_GET_VDEV(&dva[d]))) == NULL)
3508 continue;
3509 atomic_add_64(&vd->vdev_pending_fastwrite, psize);
3510 }
3511
3512 spa_config_exit(spa, SCL_VDEV, FTAG);
3513 }
3514
3515 void
3516 metaslab_fastwrite_unmark(spa_t *spa, const blkptr_t *bp)
3517 {
3518 const dva_t *dva = bp->blk_dva;
3519 int ndvas = BP_GET_NDVAS(bp);
3520 uint64_t psize = BP_GET_PSIZE(bp);
3521 int d;
3522 vdev_t *vd;
3523
3524 ASSERT(!BP_IS_HOLE(bp));
3525 ASSERT(!BP_IS_EMBEDDED(bp));
3526 ASSERT(psize > 0);
3527
3528 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
3529
3530 for (d = 0; d < ndvas; d++) {
3531 if ((vd = vdev_lookup_top(spa, DVA_GET_VDEV(&dva[d]))) == NULL)
3532 continue;
3533 ASSERT3U(vd->vdev_pending_fastwrite, >=, psize);
3534 atomic_sub_64(&vd->vdev_pending_fastwrite, psize);
3535 }
3536
3537 spa_config_exit(spa, SCL_VDEV, FTAG);
3538 }
3539
3540 void
3541 metaslab_check_free(spa_t *spa, const blkptr_t *bp)
3542 {
3543 int i, j;
3544
3545 if ((zfs_flags & ZFS_DEBUG_ZIO_FREE) == 0)
3546 return;
3547
3548 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
3549 for (i = 0; i < BP_GET_NDVAS(bp); i++) {
3550 uint64_t vdev = DVA_GET_VDEV(&bp->blk_dva[i]);
3551 vdev_t *vd = vdev_lookup_top(spa, vdev);
3552 uint64_t offset = DVA_GET_OFFSET(&bp->blk_dva[i]);
3553 uint64_t size = DVA_GET_ASIZE(&bp->blk_dva[i]);
3554 metaslab_t *msp = vd->vdev_ms[offset >> vd->vdev_ms_shift];
3555
3556 if (msp->ms_loaded)
3557 range_tree_verify(msp->ms_tree, offset, size);
3558
3559 range_tree_verify(msp->ms_freeingtree, offset, size);
3560 range_tree_verify(msp->ms_freedtree, offset, size);
3561 for (j = 0; j < TXG_DEFER_SIZE; j++)
3562 range_tree_verify(msp->ms_defertree[j], offset, size);
3563 }
3564 spa_config_exit(spa, SCL_VDEV, FTAG);
3565 }
3566
3567 #if defined(_KERNEL) && defined(HAVE_SPL)
3568 /* CSTYLED */
3569 module_param(metaslab_aliquot, ulong, 0644);
3570 MODULE_PARM_DESC(metaslab_aliquot,
3571 "allocation granularity (a.k.a. stripe size)");
3572
3573 module_param(metaslab_debug_load, int, 0644);
3574 MODULE_PARM_DESC(metaslab_debug_load,
3575 "load all metaslabs when pool is first opened");
3576
3577 module_param(metaslab_debug_unload, int, 0644);
3578 MODULE_PARM_DESC(metaslab_debug_unload,
3579 "prevent metaslabs from being unloaded");
3580
3581 module_param(metaslab_preload_enabled, int, 0644);
3582 MODULE_PARM_DESC(metaslab_preload_enabled,
3583 "preload potential metaslabs during reassessment");
3584
3585 module_param(zfs_mg_noalloc_threshold, int, 0644);
3586 MODULE_PARM_DESC(zfs_mg_noalloc_threshold,
3587 "percentage of free space for metaslab group to allow allocation");
3588
3589 module_param(zfs_mg_fragmentation_threshold, int, 0644);
3590 MODULE_PARM_DESC(zfs_mg_fragmentation_threshold,
3591 "fragmentation for metaslab group to allow allocation");
3592
3593 module_param(zfs_metaslab_fragmentation_threshold, int, 0644);
3594 MODULE_PARM_DESC(zfs_metaslab_fragmentation_threshold,
3595 "fragmentation for metaslab to allow allocation");
3596
3597 module_param(metaslab_fragmentation_factor_enabled, int, 0644);
3598 MODULE_PARM_DESC(metaslab_fragmentation_factor_enabled,
3599 "use the fragmentation metric to prefer less fragmented metaslabs");
3600
3601 module_param(metaslab_lba_weighting_enabled, int, 0644);
3602 MODULE_PARM_DESC(metaslab_lba_weighting_enabled,
3603 "prefer metaslabs with lower LBAs");
3604
3605 module_param(metaslab_bias_enabled, int, 0644);
3606 MODULE_PARM_DESC(metaslab_bias_enabled,
3607 "enable metaslab group biasing");
3608
3609 module_param(zfs_metaslab_segment_weight_enabled, int, 0644);
3610 MODULE_PARM_DESC(zfs_metaslab_segment_weight_enabled,
3611 "enable segment-based metaslab selection");
3612
3613 module_param(zfs_metaslab_switch_threshold, int, 0644);
3614 MODULE_PARM_DESC(zfs_metaslab_switch_threshold,
3615 "segment-based metaslab selection maximum buckets before switching");
3616 #endif /* _KERNEL && HAVE_SPL */