]> git.proxmox.com Git - mirror_zfs.git/blob - module/zfs/metaslab.c
Tricky semantics of ms_max_size in metaslab_should_allocate()
[mirror_zfs.git] / module / zfs / metaslab.c
1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2011, 2019 by Delphix. All rights reserved.
24 * Copyright (c) 2013 by Saso Kiselkov. All rights reserved.
25 * Copyright (c) 2017, Intel Corporation.
26 */
27
28 #include <sys/zfs_context.h>
29 #include <sys/dmu.h>
30 #include <sys/dmu_tx.h>
31 #include <sys/space_map.h>
32 #include <sys/metaslab_impl.h>
33 #include <sys/vdev_impl.h>
34 #include <sys/zio.h>
35 #include <sys/spa_impl.h>
36 #include <sys/zfeature.h>
37 #include <sys/vdev_indirect_mapping.h>
38 #include <sys/zap.h>
39
40 #define WITH_DF_BLOCK_ALLOCATOR
41
42 #define GANG_ALLOCATION(flags) \
43 ((flags) & (METASLAB_GANG_CHILD | METASLAB_GANG_HEADER))
44
45 /*
46 * Metaslab granularity, in bytes. This is roughly similar to what would be
47 * referred to as the "stripe size" in traditional RAID arrays. In normal
48 * operation, we will try to write this amount of data to a top-level vdev
49 * before moving on to the next one.
50 */
51 unsigned long metaslab_aliquot = 512 << 10;
52
53 /*
54 * For testing, make some blocks above a certain size be gang blocks.
55 */
56 unsigned long metaslab_force_ganging = SPA_MAXBLOCKSIZE + 1;
57
58 /*
59 * In pools where the log space map feature is not enabled we touch
60 * multiple metaslabs (and their respective space maps) with each
61 * transaction group. Thus, we benefit from having a small space map
62 * block size since it allows us to issue more I/O operations scattered
63 * around the disk. So a sane default for the space map block size
64 * is 8~16K.
65 */
66 int zfs_metaslab_sm_blksz_no_log = (1 << 14);
67
68 /*
69 * When the log space map feature is enabled, we accumulate a lot of
70 * changes per metaslab that are flushed once in a while so we benefit
71 * from a bigger block size like 128K for the metaslab space maps.
72 */
73 int zfs_metaslab_sm_blksz_with_log = (1 << 17);
74
75 /*
76 * The in-core space map representation is more compact than its on-disk form.
77 * The zfs_condense_pct determines how much more compact the in-core
78 * space map representation must be before we compact it on-disk.
79 * Values should be greater than or equal to 100.
80 */
81 int zfs_condense_pct = 200;
82
83 /*
84 * Condensing a metaslab is not guaranteed to actually reduce the amount of
85 * space used on disk. In particular, a space map uses data in increments of
86 * MAX(1 << ashift, space_map_blksz), so a metaslab might use the
87 * same number of blocks after condensing. Since the goal of condensing is to
88 * reduce the number of IOPs required to read the space map, we only want to
89 * condense when we can be sure we will reduce the number of blocks used by the
90 * space map. Unfortunately, we cannot precisely compute whether or not this is
91 * the case in metaslab_should_condense since we are holding ms_lock. Instead,
92 * we apply the following heuristic: do not condense a spacemap unless the
93 * uncondensed size consumes greater than zfs_metaslab_condense_block_threshold
94 * blocks.
95 */
96 int zfs_metaslab_condense_block_threshold = 4;
97
98 /*
99 * The zfs_mg_noalloc_threshold defines which metaslab groups should
100 * be eligible for allocation. The value is defined as a percentage of
101 * free space. Metaslab groups that have more free space than
102 * zfs_mg_noalloc_threshold are always eligible for allocations. Once
103 * a metaslab group's free space is less than or equal to the
104 * zfs_mg_noalloc_threshold the allocator will avoid allocating to that
105 * group unless all groups in the pool have reached zfs_mg_noalloc_threshold.
106 * Once all groups in the pool reach zfs_mg_noalloc_threshold then all
107 * groups are allowed to accept allocations. Gang blocks are always
108 * eligible to allocate on any metaslab group. The default value of 0 means
109 * no metaslab group will be excluded based on this criterion.
110 */
111 int zfs_mg_noalloc_threshold = 0;
112
113 /*
114 * Metaslab groups are considered eligible for allocations if their
115 * fragmenation metric (measured as a percentage) is less than or
116 * equal to zfs_mg_fragmentation_threshold. If a metaslab group
117 * exceeds this threshold then it will be skipped unless all metaslab
118 * groups within the metaslab class have also crossed this threshold.
119 *
120 * This tunable was introduced to avoid edge cases where we continue
121 * allocating from very fragmented disks in our pool while other, less
122 * fragmented disks, exists. On the other hand, if all disks in the
123 * pool are uniformly approaching the threshold, the threshold can
124 * be a speed bump in performance, where we keep switching the disks
125 * that we allocate from (e.g. we allocate some segments from disk A
126 * making it bypassing the threshold while freeing segments from disk
127 * B getting its fragmentation below the threshold).
128 *
129 * Empirically, we've seen that our vdev selection for allocations is
130 * good enough that fragmentation increases uniformly across all vdevs
131 * the majority of the time. Thus we set the threshold percentage high
132 * enough to avoid hitting the speed bump on pools that are being pushed
133 * to the edge.
134 */
135 int zfs_mg_fragmentation_threshold = 95;
136
137 /*
138 * Allow metaslabs to keep their active state as long as their fragmentation
139 * percentage is less than or equal to zfs_metaslab_fragmentation_threshold. An
140 * active metaslab that exceeds this threshold will no longer keep its active
141 * status allowing better metaslabs to be selected.
142 */
143 int zfs_metaslab_fragmentation_threshold = 70;
144
145 /*
146 * When set will load all metaslabs when pool is first opened.
147 */
148 int metaslab_debug_load = 0;
149
150 /*
151 * When set will prevent metaslabs from being unloaded.
152 */
153 int metaslab_debug_unload = 0;
154
155 /*
156 * Minimum size which forces the dynamic allocator to change
157 * it's allocation strategy. Once the space map cannot satisfy
158 * an allocation of this size then it switches to using more
159 * aggressive strategy (i.e search by size rather than offset).
160 */
161 uint64_t metaslab_df_alloc_threshold = SPA_OLD_MAXBLOCKSIZE;
162
163 /*
164 * The minimum free space, in percent, which must be available
165 * in a space map to continue allocations in a first-fit fashion.
166 * Once the space map's free space drops below this level we dynamically
167 * switch to using best-fit allocations.
168 */
169 int metaslab_df_free_pct = 4;
170
171 /*
172 * Maximum distance to search forward from the last offset. Without this
173 * limit, fragmented pools can see >100,000 iterations and
174 * metaslab_block_picker() becomes the performance limiting factor on
175 * high-performance storage.
176 *
177 * With the default setting of 16MB, we typically see less than 500
178 * iterations, even with very fragmented, ashift=9 pools. The maximum number
179 * of iterations possible is:
180 * metaslab_df_max_search / (2 * (1<<ashift))
181 * With the default setting of 16MB this is 16*1024 (with ashift=9) or
182 * 2048 (with ashift=12).
183 */
184 int metaslab_df_max_search = 16 * 1024 * 1024;
185
186 /*
187 * If we are not searching forward (due to metaslab_df_max_search,
188 * metaslab_df_free_pct, or metaslab_df_alloc_threshold), this tunable
189 * controls what segment is used. If it is set, we will use the largest free
190 * segment. If it is not set, we will use a segment of exactly the requested
191 * size (or larger).
192 */
193 int metaslab_df_use_largest_segment = B_FALSE;
194
195 /*
196 * Percentage of all cpus that can be used by the metaslab taskq.
197 */
198 int metaslab_load_pct = 50;
199
200 /*
201 * Determines how many txgs a metaslab may remain loaded without having any
202 * allocations from it. As long as a metaslab continues to be used we will
203 * keep it loaded.
204 */
205 int metaslab_unload_delay = TXG_SIZE * 2;
206
207 /*
208 * Max number of metaslabs per group to preload.
209 */
210 int metaslab_preload_limit = SPA_DVAS_PER_BP;
211
212 /*
213 * Enable/disable preloading of metaslab.
214 */
215 int metaslab_preload_enabled = B_TRUE;
216
217 /*
218 * Enable/disable fragmentation weighting on metaslabs.
219 */
220 int metaslab_fragmentation_factor_enabled = B_TRUE;
221
222 /*
223 * Enable/disable lba weighting (i.e. outer tracks are given preference).
224 */
225 int metaslab_lba_weighting_enabled = B_TRUE;
226
227 /*
228 * Enable/disable metaslab group biasing.
229 */
230 int metaslab_bias_enabled = B_TRUE;
231
232 /*
233 * Enable/disable remapping of indirect DVAs to their concrete vdevs.
234 */
235 boolean_t zfs_remap_blkptr_enable = B_TRUE;
236
237 /*
238 * Enable/disable segment-based metaslab selection.
239 */
240 int zfs_metaslab_segment_weight_enabled = B_TRUE;
241
242 /*
243 * When using segment-based metaslab selection, we will continue
244 * allocating from the active metaslab until we have exhausted
245 * zfs_metaslab_switch_threshold of its buckets.
246 */
247 int zfs_metaslab_switch_threshold = 2;
248
249 /*
250 * Internal switch to enable/disable the metaslab allocation tracing
251 * facility.
252 */
253 #ifdef _METASLAB_TRACING
254 boolean_t metaslab_trace_enabled = B_TRUE;
255 #endif
256
257 /*
258 * Maximum entries that the metaslab allocation tracing facility will keep
259 * in a given list when running in non-debug mode. We limit the number
260 * of entries in non-debug mode to prevent us from using up too much memory.
261 * The limit should be sufficiently large that we don't expect any allocation
262 * to every exceed this value. In debug mode, the system will panic if this
263 * limit is ever reached allowing for further investigation.
264 */
265 #ifdef _METASLAB_TRACING
266 uint64_t metaslab_trace_max_entries = 5000;
267 #endif
268
269 /*
270 * Maximum number of metaslabs per group that can be disabled
271 * simultaneously.
272 */
273 int max_disabled_ms = 3;
274
275 static uint64_t metaslab_weight(metaslab_t *);
276 static void metaslab_set_fragmentation(metaslab_t *);
277 static void metaslab_free_impl(vdev_t *, uint64_t, uint64_t, boolean_t);
278 static void metaslab_check_free_impl(vdev_t *, uint64_t, uint64_t);
279
280 static void metaslab_passivate(metaslab_t *msp, uint64_t weight);
281 static uint64_t metaslab_weight_from_range_tree(metaslab_t *msp);
282 static void metaslab_flush_update(metaslab_t *, dmu_tx_t *);
283 #ifdef _METASLAB_TRACING
284 kmem_cache_t *metaslab_alloc_trace_cache;
285 #endif
286
287 /*
288 * ==========================================================================
289 * Metaslab classes
290 * ==========================================================================
291 */
292 metaslab_class_t *
293 metaslab_class_create(spa_t *spa, metaslab_ops_t *ops)
294 {
295 metaslab_class_t *mc;
296
297 mc = kmem_zalloc(sizeof (metaslab_class_t), KM_SLEEP);
298
299 mc->mc_spa = spa;
300 mc->mc_rotor = NULL;
301 mc->mc_ops = ops;
302 mutex_init(&mc->mc_lock, NULL, MUTEX_DEFAULT, NULL);
303 mc->mc_alloc_slots = kmem_zalloc(spa->spa_alloc_count *
304 sizeof (zfs_refcount_t), KM_SLEEP);
305 mc->mc_alloc_max_slots = kmem_zalloc(spa->spa_alloc_count *
306 sizeof (uint64_t), KM_SLEEP);
307 for (int i = 0; i < spa->spa_alloc_count; i++)
308 zfs_refcount_create_tracked(&mc->mc_alloc_slots[i]);
309
310 return (mc);
311 }
312
313 void
314 metaslab_class_destroy(metaslab_class_t *mc)
315 {
316 ASSERT(mc->mc_rotor == NULL);
317 ASSERT(mc->mc_alloc == 0);
318 ASSERT(mc->mc_deferred == 0);
319 ASSERT(mc->mc_space == 0);
320 ASSERT(mc->mc_dspace == 0);
321
322 for (int i = 0; i < mc->mc_spa->spa_alloc_count; i++)
323 zfs_refcount_destroy(&mc->mc_alloc_slots[i]);
324 kmem_free(mc->mc_alloc_slots, mc->mc_spa->spa_alloc_count *
325 sizeof (zfs_refcount_t));
326 kmem_free(mc->mc_alloc_max_slots, mc->mc_spa->spa_alloc_count *
327 sizeof (uint64_t));
328 mutex_destroy(&mc->mc_lock);
329 kmem_free(mc, sizeof (metaslab_class_t));
330 }
331
332 int
333 metaslab_class_validate(metaslab_class_t *mc)
334 {
335 metaslab_group_t *mg;
336 vdev_t *vd;
337
338 /*
339 * Must hold one of the spa_config locks.
340 */
341 ASSERT(spa_config_held(mc->mc_spa, SCL_ALL, RW_READER) ||
342 spa_config_held(mc->mc_spa, SCL_ALL, RW_WRITER));
343
344 if ((mg = mc->mc_rotor) == NULL)
345 return (0);
346
347 do {
348 vd = mg->mg_vd;
349 ASSERT(vd->vdev_mg != NULL);
350 ASSERT3P(vd->vdev_top, ==, vd);
351 ASSERT3P(mg->mg_class, ==, mc);
352 ASSERT3P(vd->vdev_ops, !=, &vdev_hole_ops);
353 } while ((mg = mg->mg_next) != mc->mc_rotor);
354
355 return (0);
356 }
357
358 static void
359 metaslab_class_space_update(metaslab_class_t *mc, int64_t alloc_delta,
360 int64_t defer_delta, int64_t space_delta, int64_t dspace_delta)
361 {
362 atomic_add_64(&mc->mc_alloc, alloc_delta);
363 atomic_add_64(&mc->mc_deferred, defer_delta);
364 atomic_add_64(&mc->mc_space, space_delta);
365 atomic_add_64(&mc->mc_dspace, dspace_delta);
366 }
367
368 uint64_t
369 metaslab_class_get_alloc(metaslab_class_t *mc)
370 {
371 return (mc->mc_alloc);
372 }
373
374 uint64_t
375 metaslab_class_get_deferred(metaslab_class_t *mc)
376 {
377 return (mc->mc_deferred);
378 }
379
380 uint64_t
381 metaslab_class_get_space(metaslab_class_t *mc)
382 {
383 return (mc->mc_space);
384 }
385
386 uint64_t
387 metaslab_class_get_dspace(metaslab_class_t *mc)
388 {
389 return (spa_deflate(mc->mc_spa) ? mc->mc_dspace : mc->mc_space);
390 }
391
392 void
393 metaslab_class_histogram_verify(metaslab_class_t *mc)
394 {
395 spa_t *spa = mc->mc_spa;
396 vdev_t *rvd = spa->spa_root_vdev;
397 uint64_t *mc_hist;
398 int i;
399
400 if ((zfs_flags & ZFS_DEBUG_HISTOGRAM_VERIFY) == 0)
401 return;
402
403 mc_hist = kmem_zalloc(sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE,
404 KM_SLEEP);
405
406 for (int c = 0; c < rvd->vdev_children; c++) {
407 vdev_t *tvd = rvd->vdev_child[c];
408 metaslab_group_t *mg = tvd->vdev_mg;
409
410 /*
411 * Skip any holes, uninitialized top-levels, or
412 * vdevs that are not in this metalab class.
413 */
414 if (!vdev_is_concrete(tvd) || tvd->vdev_ms_shift == 0 ||
415 mg->mg_class != mc) {
416 continue;
417 }
418
419 for (i = 0; i < RANGE_TREE_HISTOGRAM_SIZE; i++)
420 mc_hist[i] += mg->mg_histogram[i];
421 }
422
423 for (i = 0; i < RANGE_TREE_HISTOGRAM_SIZE; i++)
424 VERIFY3U(mc_hist[i], ==, mc->mc_histogram[i]);
425
426 kmem_free(mc_hist, sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE);
427 }
428
429 /*
430 * Calculate the metaslab class's fragmentation metric. The metric
431 * is weighted based on the space contribution of each metaslab group.
432 * The return value will be a number between 0 and 100 (inclusive), or
433 * ZFS_FRAG_INVALID if the metric has not been set. See comment above the
434 * zfs_frag_table for more information about the metric.
435 */
436 uint64_t
437 metaslab_class_fragmentation(metaslab_class_t *mc)
438 {
439 vdev_t *rvd = mc->mc_spa->spa_root_vdev;
440 uint64_t fragmentation = 0;
441
442 spa_config_enter(mc->mc_spa, SCL_VDEV, FTAG, RW_READER);
443
444 for (int c = 0; c < rvd->vdev_children; c++) {
445 vdev_t *tvd = rvd->vdev_child[c];
446 metaslab_group_t *mg = tvd->vdev_mg;
447
448 /*
449 * Skip any holes, uninitialized top-levels,
450 * or vdevs that are not in this metalab class.
451 */
452 if (!vdev_is_concrete(tvd) || tvd->vdev_ms_shift == 0 ||
453 mg->mg_class != mc) {
454 continue;
455 }
456
457 /*
458 * If a metaslab group does not contain a fragmentation
459 * metric then just bail out.
460 */
461 if (mg->mg_fragmentation == ZFS_FRAG_INVALID) {
462 spa_config_exit(mc->mc_spa, SCL_VDEV, FTAG);
463 return (ZFS_FRAG_INVALID);
464 }
465
466 /*
467 * Determine how much this metaslab_group is contributing
468 * to the overall pool fragmentation metric.
469 */
470 fragmentation += mg->mg_fragmentation *
471 metaslab_group_get_space(mg);
472 }
473 fragmentation /= metaslab_class_get_space(mc);
474
475 ASSERT3U(fragmentation, <=, 100);
476 spa_config_exit(mc->mc_spa, SCL_VDEV, FTAG);
477 return (fragmentation);
478 }
479
480 /*
481 * Calculate the amount of expandable space that is available in
482 * this metaslab class. If a device is expanded then its expandable
483 * space will be the amount of allocatable space that is currently not
484 * part of this metaslab class.
485 */
486 uint64_t
487 metaslab_class_expandable_space(metaslab_class_t *mc)
488 {
489 vdev_t *rvd = mc->mc_spa->spa_root_vdev;
490 uint64_t space = 0;
491
492 spa_config_enter(mc->mc_spa, SCL_VDEV, FTAG, RW_READER);
493 for (int c = 0; c < rvd->vdev_children; c++) {
494 vdev_t *tvd = rvd->vdev_child[c];
495 metaslab_group_t *mg = tvd->vdev_mg;
496
497 if (!vdev_is_concrete(tvd) || tvd->vdev_ms_shift == 0 ||
498 mg->mg_class != mc) {
499 continue;
500 }
501
502 /*
503 * Calculate if we have enough space to add additional
504 * metaslabs. We report the expandable space in terms
505 * of the metaslab size since that's the unit of expansion.
506 */
507 space += P2ALIGN(tvd->vdev_max_asize - tvd->vdev_asize,
508 1ULL << tvd->vdev_ms_shift);
509 }
510 spa_config_exit(mc->mc_spa, SCL_VDEV, FTAG);
511 return (space);
512 }
513
514 static int
515 metaslab_compare(const void *x1, const void *x2)
516 {
517 const metaslab_t *m1 = (const metaslab_t *)x1;
518 const metaslab_t *m2 = (const metaslab_t *)x2;
519
520 int sort1 = 0;
521 int sort2 = 0;
522 if (m1->ms_allocator != -1 && m1->ms_primary)
523 sort1 = 1;
524 else if (m1->ms_allocator != -1 && !m1->ms_primary)
525 sort1 = 2;
526 if (m2->ms_allocator != -1 && m2->ms_primary)
527 sort2 = 1;
528 else if (m2->ms_allocator != -1 && !m2->ms_primary)
529 sort2 = 2;
530
531 /*
532 * Sort inactive metaslabs first, then primaries, then secondaries. When
533 * selecting a metaslab to allocate from, an allocator first tries its
534 * primary, then secondary active metaslab. If it doesn't have active
535 * metaslabs, or can't allocate from them, it searches for an inactive
536 * metaslab to activate. If it can't find a suitable one, it will steal
537 * a primary or secondary metaslab from another allocator.
538 */
539 if (sort1 < sort2)
540 return (-1);
541 if (sort1 > sort2)
542 return (1);
543
544 int cmp = AVL_CMP(m2->ms_weight, m1->ms_weight);
545 if (likely(cmp))
546 return (cmp);
547
548 IMPLY(AVL_CMP(m1->ms_start, m2->ms_start) == 0, m1 == m2);
549
550 return (AVL_CMP(m1->ms_start, m2->ms_start));
551 }
552
553 /*
554 * ==========================================================================
555 * Metaslab groups
556 * ==========================================================================
557 */
558 /*
559 * Update the allocatable flag and the metaslab group's capacity.
560 * The allocatable flag is set to true if the capacity is below
561 * the zfs_mg_noalloc_threshold or has a fragmentation value that is
562 * greater than zfs_mg_fragmentation_threshold. If a metaslab group
563 * transitions from allocatable to non-allocatable or vice versa then the
564 * metaslab group's class is updated to reflect the transition.
565 */
566 static void
567 metaslab_group_alloc_update(metaslab_group_t *mg)
568 {
569 vdev_t *vd = mg->mg_vd;
570 metaslab_class_t *mc = mg->mg_class;
571 vdev_stat_t *vs = &vd->vdev_stat;
572 boolean_t was_allocatable;
573 boolean_t was_initialized;
574
575 ASSERT(vd == vd->vdev_top);
576 ASSERT3U(spa_config_held(mc->mc_spa, SCL_ALLOC, RW_READER), ==,
577 SCL_ALLOC);
578
579 mutex_enter(&mg->mg_lock);
580 was_allocatable = mg->mg_allocatable;
581 was_initialized = mg->mg_initialized;
582
583 mg->mg_free_capacity = ((vs->vs_space - vs->vs_alloc) * 100) /
584 (vs->vs_space + 1);
585
586 mutex_enter(&mc->mc_lock);
587
588 /*
589 * If the metaslab group was just added then it won't
590 * have any space until we finish syncing out this txg.
591 * At that point we will consider it initialized and available
592 * for allocations. We also don't consider non-activated
593 * metaslab groups (e.g. vdevs that are in the middle of being removed)
594 * to be initialized, because they can't be used for allocation.
595 */
596 mg->mg_initialized = metaslab_group_initialized(mg);
597 if (!was_initialized && mg->mg_initialized) {
598 mc->mc_groups++;
599 } else if (was_initialized && !mg->mg_initialized) {
600 ASSERT3U(mc->mc_groups, >, 0);
601 mc->mc_groups--;
602 }
603 if (mg->mg_initialized)
604 mg->mg_no_free_space = B_FALSE;
605
606 /*
607 * A metaslab group is considered allocatable if it has plenty
608 * of free space or is not heavily fragmented. We only take
609 * fragmentation into account if the metaslab group has a valid
610 * fragmentation metric (i.e. a value between 0 and 100).
611 */
612 mg->mg_allocatable = (mg->mg_activation_count > 0 &&
613 mg->mg_free_capacity > zfs_mg_noalloc_threshold &&
614 (mg->mg_fragmentation == ZFS_FRAG_INVALID ||
615 mg->mg_fragmentation <= zfs_mg_fragmentation_threshold));
616
617 /*
618 * The mc_alloc_groups maintains a count of the number of
619 * groups in this metaslab class that are still above the
620 * zfs_mg_noalloc_threshold. This is used by the allocating
621 * threads to determine if they should avoid allocations to
622 * a given group. The allocator will avoid allocations to a group
623 * if that group has reached or is below the zfs_mg_noalloc_threshold
624 * and there are still other groups that are above the threshold.
625 * When a group transitions from allocatable to non-allocatable or
626 * vice versa we update the metaslab class to reflect that change.
627 * When the mc_alloc_groups value drops to 0 that means that all
628 * groups have reached the zfs_mg_noalloc_threshold making all groups
629 * eligible for allocations. This effectively means that all devices
630 * are balanced again.
631 */
632 if (was_allocatable && !mg->mg_allocatable)
633 mc->mc_alloc_groups--;
634 else if (!was_allocatable && mg->mg_allocatable)
635 mc->mc_alloc_groups++;
636 mutex_exit(&mc->mc_lock);
637
638 mutex_exit(&mg->mg_lock);
639 }
640
641 int
642 metaslab_sort_by_flushed(const void *va, const void *vb)
643 {
644 const metaslab_t *a = va;
645 const metaslab_t *b = vb;
646
647 int cmp = AVL_CMP(a->ms_unflushed_txg, b->ms_unflushed_txg);
648 if (likely(cmp))
649 return (cmp);
650
651 uint64_t a_vdev_id = a->ms_group->mg_vd->vdev_id;
652 uint64_t b_vdev_id = b->ms_group->mg_vd->vdev_id;
653 cmp = AVL_CMP(a_vdev_id, b_vdev_id);
654 if (cmp)
655 return (cmp);
656
657 return (AVL_CMP(a->ms_id, b->ms_id));
658 }
659
660 metaslab_group_t *
661 metaslab_group_create(metaslab_class_t *mc, vdev_t *vd, int allocators)
662 {
663 metaslab_group_t *mg;
664
665 mg = kmem_zalloc(sizeof (metaslab_group_t), KM_SLEEP);
666 mutex_init(&mg->mg_lock, NULL, MUTEX_DEFAULT, NULL);
667 mutex_init(&mg->mg_ms_disabled_lock, NULL, MUTEX_DEFAULT, NULL);
668 cv_init(&mg->mg_ms_disabled_cv, NULL, CV_DEFAULT, NULL);
669 mg->mg_primaries = kmem_zalloc(allocators * sizeof (metaslab_t *),
670 KM_SLEEP);
671 mg->mg_secondaries = kmem_zalloc(allocators * sizeof (metaslab_t *),
672 KM_SLEEP);
673 avl_create(&mg->mg_metaslab_tree, metaslab_compare,
674 sizeof (metaslab_t), offsetof(metaslab_t, ms_group_node));
675 mg->mg_vd = vd;
676 mg->mg_class = mc;
677 mg->mg_activation_count = 0;
678 mg->mg_initialized = B_FALSE;
679 mg->mg_no_free_space = B_TRUE;
680 mg->mg_allocators = allocators;
681
682 mg->mg_alloc_queue_depth = kmem_zalloc(allocators *
683 sizeof (zfs_refcount_t), KM_SLEEP);
684 mg->mg_cur_max_alloc_queue_depth = kmem_zalloc(allocators *
685 sizeof (uint64_t), KM_SLEEP);
686 for (int i = 0; i < allocators; i++) {
687 zfs_refcount_create_tracked(&mg->mg_alloc_queue_depth[i]);
688 mg->mg_cur_max_alloc_queue_depth[i] = 0;
689 }
690
691 mg->mg_taskq = taskq_create("metaslab_group_taskq", metaslab_load_pct,
692 maxclsyspri, 10, INT_MAX, TASKQ_THREADS_CPU_PCT | TASKQ_DYNAMIC);
693
694 return (mg);
695 }
696
697 void
698 metaslab_group_destroy(metaslab_group_t *mg)
699 {
700 ASSERT(mg->mg_prev == NULL);
701 ASSERT(mg->mg_next == NULL);
702 /*
703 * We may have gone below zero with the activation count
704 * either because we never activated in the first place or
705 * because we're done, and possibly removing the vdev.
706 */
707 ASSERT(mg->mg_activation_count <= 0);
708
709 taskq_destroy(mg->mg_taskq);
710 avl_destroy(&mg->mg_metaslab_tree);
711 kmem_free(mg->mg_primaries, mg->mg_allocators * sizeof (metaslab_t *));
712 kmem_free(mg->mg_secondaries, mg->mg_allocators *
713 sizeof (metaslab_t *));
714 mutex_destroy(&mg->mg_lock);
715 mutex_destroy(&mg->mg_ms_disabled_lock);
716 cv_destroy(&mg->mg_ms_disabled_cv);
717
718 for (int i = 0; i < mg->mg_allocators; i++) {
719 zfs_refcount_destroy(&mg->mg_alloc_queue_depth[i]);
720 mg->mg_cur_max_alloc_queue_depth[i] = 0;
721 }
722 kmem_free(mg->mg_alloc_queue_depth, mg->mg_allocators *
723 sizeof (zfs_refcount_t));
724 kmem_free(mg->mg_cur_max_alloc_queue_depth, mg->mg_allocators *
725 sizeof (uint64_t));
726
727 kmem_free(mg, sizeof (metaslab_group_t));
728 }
729
730 void
731 metaslab_group_activate(metaslab_group_t *mg)
732 {
733 metaslab_class_t *mc = mg->mg_class;
734 metaslab_group_t *mgprev, *mgnext;
735
736 ASSERT3U(spa_config_held(mc->mc_spa, SCL_ALLOC, RW_WRITER), !=, 0);
737
738 ASSERT(mc->mc_rotor != mg);
739 ASSERT(mg->mg_prev == NULL);
740 ASSERT(mg->mg_next == NULL);
741 ASSERT(mg->mg_activation_count <= 0);
742
743 if (++mg->mg_activation_count <= 0)
744 return;
745
746 mg->mg_aliquot = metaslab_aliquot * MAX(1, mg->mg_vd->vdev_children);
747 metaslab_group_alloc_update(mg);
748
749 if ((mgprev = mc->mc_rotor) == NULL) {
750 mg->mg_prev = mg;
751 mg->mg_next = mg;
752 } else {
753 mgnext = mgprev->mg_next;
754 mg->mg_prev = mgprev;
755 mg->mg_next = mgnext;
756 mgprev->mg_next = mg;
757 mgnext->mg_prev = mg;
758 }
759 mc->mc_rotor = mg;
760 }
761
762 /*
763 * Passivate a metaslab group and remove it from the allocation rotor.
764 * Callers must hold both the SCL_ALLOC and SCL_ZIO lock prior to passivating
765 * a metaslab group. This function will momentarily drop spa_config_locks
766 * that are lower than the SCL_ALLOC lock (see comment below).
767 */
768 void
769 metaslab_group_passivate(metaslab_group_t *mg)
770 {
771 metaslab_class_t *mc = mg->mg_class;
772 spa_t *spa = mc->mc_spa;
773 metaslab_group_t *mgprev, *mgnext;
774 int locks = spa_config_held(spa, SCL_ALL, RW_WRITER);
775
776 ASSERT3U(spa_config_held(spa, SCL_ALLOC | SCL_ZIO, RW_WRITER), ==,
777 (SCL_ALLOC | SCL_ZIO));
778
779 if (--mg->mg_activation_count != 0) {
780 ASSERT(mc->mc_rotor != mg);
781 ASSERT(mg->mg_prev == NULL);
782 ASSERT(mg->mg_next == NULL);
783 ASSERT(mg->mg_activation_count < 0);
784 return;
785 }
786
787 /*
788 * The spa_config_lock is an array of rwlocks, ordered as
789 * follows (from highest to lowest):
790 * SCL_CONFIG > SCL_STATE > SCL_L2ARC > SCL_ALLOC >
791 * SCL_ZIO > SCL_FREE > SCL_VDEV
792 * (For more information about the spa_config_lock see spa_misc.c)
793 * The higher the lock, the broader its coverage. When we passivate
794 * a metaslab group, we must hold both the SCL_ALLOC and the SCL_ZIO
795 * config locks. However, the metaslab group's taskq might be trying
796 * to preload metaslabs so we must drop the SCL_ZIO lock and any
797 * lower locks to allow the I/O to complete. At a minimum,
798 * we continue to hold the SCL_ALLOC lock, which prevents any future
799 * allocations from taking place and any changes to the vdev tree.
800 */
801 spa_config_exit(spa, locks & ~(SCL_ZIO - 1), spa);
802 taskq_wait_outstanding(mg->mg_taskq, 0);
803 spa_config_enter(spa, locks & ~(SCL_ZIO - 1), spa, RW_WRITER);
804 metaslab_group_alloc_update(mg);
805 for (int i = 0; i < mg->mg_allocators; i++) {
806 metaslab_t *msp = mg->mg_primaries[i];
807 if (msp != NULL) {
808 mutex_enter(&msp->ms_lock);
809 metaslab_passivate(msp,
810 metaslab_weight_from_range_tree(msp));
811 mutex_exit(&msp->ms_lock);
812 }
813 msp = mg->mg_secondaries[i];
814 if (msp != NULL) {
815 mutex_enter(&msp->ms_lock);
816 metaslab_passivate(msp,
817 metaslab_weight_from_range_tree(msp));
818 mutex_exit(&msp->ms_lock);
819 }
820 }
821
822 mgprev = mg->mg_prev;
823 mgnext = mg->mg_next;
824
825 if (mg == mgnext) {
826 mc->mc_rotor = NULL;
827 } else {
828 mc->mc_rotor = mgnext;
829 mgprev->mg_next = mgnext;
830 mgnext->mg_prev = mgprev;
831 }
832
833 mg->mg_prev = NULL;
834 mg->mg_next = NULL;
835 }
836
837 boolean_t
838 metaslab_group_initialized(metaslab_group_t *mg)
839 {
840 vdev_t *vd = mg->mg_vd;
841 vdev_stat_t *vs = &vd->vdev_stat;
842
843 return (vs->vs_space != 0 && mg->mg_activation_count > 0);
844 }
845
846 uint64_t
847 metaslab_group_get_space(metaslab_group_t *mg)
848 {
849 return ((1ULL << mg->mg_vd->vdev_ms_shift) * mg->mg_vd->vdev_ms_count);
850 }
851
852 void
853 metaslab_group_histogram_verify(metaslab_group_t *mg)
854 {
855 uint64_t *mg_hist;
856 vdev_t *vd = mg->mg_vd;
857 uint64_t ashift = vd->vdev_ashift;
858 int i;
859
860 if ((zfs_flags & ZFS_DEBUG_HISTOGRAM_VERIFY) == 0)
861 return;
862
863 mg_hist = kmem_zalloc(sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE,
864 KM_SLEEP);
865
866 ASSERT3U(RANGE_TREE_HISTOGRAM_SIZE, >=,
867 SPACE_MAP_HISTOGRAM_SIZE + ashift);
868
869 for (int m = 0; m < vd->vdev_ms_count; m++) {
870 metaslab_t *msp = vd->vdev_ms[m];
871
872 /* skip if not active or not a member */
873 if (msp->ms_sm == NULL || msp->ms_group != mg)
874 continue;
875
876 for (i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++)
877 mg_hist[i + ashift] +=
878 msp->ms_sm->sm_phys->smp_histogram[i];
879 }
880
881 for (i = 0; i < RANGE_TREE_HISTOGRAM_SIZE; i ++)
882 VERIFY3U(mg_hist[i], ==, mg->mg_histogram[i]);
883
884 kmem_free(mg_hist, sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE);
885 }
886
887 static void
888 metaslab_group_histogram_add(metaslab_group_t *mg, metaslab_t *msp)
889 {
890 metaslab_class_t *mc = mg->mg_class;
891 uint64_t ashift = mg->mg_vd->vdev_ashift;
892
893 ASSERT(MUTEX_HELD(&msp->ms_lock));
894 if (msp->ms_sm == NULL)
895 return;
896
897 mutex_enter(&mg->mg_lock);
898 for (int i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) {
899 mg->mg_histogram[i + ashift] +=
900 msp->ms_sm->sm_phys->smp_histogram[i];
901 mc->mc_histogram[i + ashift] +=
902 msp->ms_sm->sm_phys->smp_histogram[i];
903 }
904 mutex_exit(&mg->mg_lock);
905 }
906
907 void
908 metaslab_group_histogram_remove(metaslab_group_t *mg, metaslab_t *msp)
909 {
910 metaslab_class_t *mc = mg->mg_class;
911 uint64_t ashift = mg->mg_vd->vdev_ashift;
912
913 ASSERT(MUTEX_HELD(&msp->ms_lock));
914 if (msp->ms_sm == NULL)
915 return;
916
917 mutex_enter(&mg->mg_lock);
918 for (int i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) {
919 ASSERT3U(mg->mg_histogram[i + ashift], >=,
920 msp->ms_sm->sm_phys->smp_histogram[i]);
921 ASSERT3U(mc->mc_histogram[i + ashift], >=,
922 msp->ms_sm->sm_phys->smp_histogram[i]);
923
924 mg->mg_histogram[i + ashift] -=
925 msp->ms_sm->sm_phys->smp_histogram[i];
926 mc->mc_histogram[i + ashift] -=
927 msp->ms_sm->sm_phys->smp_histogram[i];
928 }
929 mutex_exit(&mg->mg_lock);
930 }
931
932 static void
933 metaslab_group_add(metaslab_group_t *mg, metaslab_t *msp)
934 {
935 ASSERT(msp->ms_group == NULL);
936 mutex_enter(&mg->mg_lock);
937 msp->ms_group = mg;
938 msp->ms_weight = 0;
939 avl_add(&mg->mg_metaslab_tree, msp);
940 mutex_exit(&mg->mg_lock);
941
942 mutex_enter(&msp->ms_lock);
943 metaslab_group_histogram_add(mg, msp);
944 mutex_exit(&msp->ms_lock);
945 }
946
947 static void
948 metaslab_group_remove(metaslab_group_t *mg, metaslab_t *msp)
949 {
950 mutex_enter(&msp->ms_lock);
951 metaslab_group_histogram_remove(mg, msp);
952 mutex_exit(&msp->ms_lock);
953
954 mutex_enter(&mg->mg_lock);
955 ASSERT(msp->ms_group == mg);
956 avl_remove(&mg->mg_metaslab_tree, msp);
957 msp->ms_group = NULL;
958 mutex_exit(&mg->mg_lock);
959 }
960
961 static void
962 metaslab_group_sort_impl(metaslab_group_t *mg, metaslab_t *msp, uint64_t weight)
963 {
964 ASSERT(MUTEX_HELD(&msp->ms_lock));
965 ASSERT(MUTEX_HELD(&mg->mg_lock));
966 ASSERT(msp->ms_group == mg);
967
968 avl_remove(&mg->mg_metaslab_tree, msp);
969 msp->ms_weight = weight;
970 avl_add(&mg->mg_metaslab_tree, msp);
971
972 }
973
974 static void
975 metaslab_group_sort(metaslab_group_t *mg, metaslab_t *msp, uint64_t weight)
976 {
977 /*
978 * Although in principle the weight can be any value, in
979 * practice we do not use values in the range [1, 511].
980 */
981 ASSERT(weight >= SPA_MINBLOCKSIZE || weight == 0);
982 ASSERT(MUTEX_HELD(&msp->ms_lock));
983
984 mutex_enter(&mg->mg_lock);
985 metaslab_group_sort_impl(mg, msp, weight);
986 mutex_exit(&mg->mg_lock);
987 }
988
989 /*
990 * Calculate the fragmentation for a given metaslab group. We can use
991 * a simple average here since all metaslabs within the group must have
992 * the same size. The return value will be a value between 0 and 100
993 * (inclusive), or ZFS_FRAG_INVALID if less than half of the metaslab in this
994 * group have a fragmentation metric.
995 */
996 uint64_t
997 metaslab_group_fragmentation(metaslab_group_t *mg)
998 {
999 vdev_t *vd = mg->mg_vd;
1000 uint64_t fragmentation = 0;
1001 uint64_t valid_ms = 0;
1002
1003 for (int m = 0; m < vd->vdev_ms_count; m++) {
1004 metaslab_t *msp = vd->vdev_ms[m];
1005
1006 if (msp->ms_fragmentation == ZFS_FRAG_INVALID)
1007 continue;
1008 if (msp->ms_group != mg)
1009 continue;
1010
1011 valid_ms++;
1012 fragmentation += msp->ms_fragmentation;
1013 }
1014
1015 if (valid_ms <= mg->mg_vd->vdev_ms_count / 2)
1016 return (ZFS_FRAG_INVALID);
1017
1018 fragmentation /= valid_ms;
1019 ASSERT3U(fragmentation, <=, 100);
1020 return (fragmentation);
1021 }
1022
1023 /*
1024 * Determine if a given metaslab group should skip allocations. A metaslab
1025 * group should avoid allocations if its free capacity is less than the
1026 * zfs_mg_noalloc_threshold or its fragmentation metric is greater than
1027 * zfs_mg_fragmentation_threshold and there is at least one metaslab group
1028 * that can still handle allocations. If the allocation throttle is enabled
1029 * then we skip allocations to devices that have reached their maximum
1030 * allocation queue depth unless the selected metaslab group is the only
1031 * eligible group remaining.
1032 */
1033 static boolean_t
1034 metaslab_group_allocatable(metaslab_group_t *mg, metaslab_group_t *rotor,
1035 uint64_t psize, int allocator, int d)
1036 {
1037 spa_t *spa = mg->mg_vd->vdev_spa;
1038 metaslab_class_t *mc = mg->mg_class;
1039
1040 /*
1041 * We can only consider skipping this metaslab group if it's
1042 * in the normal metaslab class and there are other metaslab
1043 * groups to select from. Otherwise, we always consider it eligible
1044 * for allocations.
1045 */
1046 if ((mc != spa_normal_class(spa) &&
1047 mc != spa_special_class(spa) &&
1048 mc != spa_dedup_class(spa)) ||
1049 mc->mc_groups <= 1)
1050 return (B_TRUE);
1051
1052 /*
1053 * If the metaslab group's mg_allocatable flag is set (see comments
1054 * in metaslab_group_alloc_update() for more information) and
1055 * the allocation throttle is disabled then allow allocations to this
1056 * device. However, if the allocation throttle is enabled then
1057 * check if we have reached our allocation limit (mg_alloc_queue_depth)
1058 * to determine if we should allow allocations to this metaslab group.
1059 * If all metaslab groups are no longer considered allocatable
1060 * (mc_alloc_groups == 0) or we're trying to allocate the smallest
1061 * gang block size then we allow allocations on this metaslab group
1062 * regardless of the mg_allocatable or throttle settings.
1063 */
1064 if (mg->mg_allocatable) {
1065 metaslab_group_t *mgp;
1066 int64_t qdepth;
1067 uint64_t qmax = mg->mg_cur_max_alloc_queue_depth[allocator];
1068
1069 if (!mc->mc_alloc_throttle_enabled)
1070 return (B_TRUE);
1071
1072 /*
1073 * If this metaslab group does not have any free space, then
1074 * there is no point in looking further.
1075 */
1076 if (mg->mg_no_free_space)
1077 return (B_FALSE);
1078
1079 /*
1080 * Relax allocation throttling for ditto blocks. Due to
1081 * random imbalances in allocation it tends to push copies
1082 * to one vdev, that looks a bit better at the moment.
1083 */
1084 qmax = qmax * (4 + d) / 4;
1085
1086 qdepth = zfs_refcount_count(
1087 &mg->mg_alloc_queue_depth[allocator]);
1088
1089 /*
1090 * If this metaslab group is below its qmax or it's
1091 * the only allocatable metasable group, then attempt
1092 * to allocate from it.
1093 */
1094 if (qdepth < qmax || mc->mc_alloc_groups == 1)
1095 return (B_TRUE);
1096 ASSERT3U(mc->mc_alloc_groups, >, 1);
1097
1098 /*
1099 * Since this metaslab group is at or over its qmax, we
1100 * need to determine if there are metaslab groups after this
1101 * one that might be able to handle this allocation. This is
1102 * racy since we can't hold the locks for all metaslab
1103 * groups at the same time when we make this check.
1104 */
1105 for (mgp = mg->mg_next; mgp != rotor; mgp = mgp->mg_next) {
1106 qmax = mgp->mg_cur_max_alloc_queue_depth[allocator];
1107 qmax = qmax * (4 + d) / 4;
1108 qdepth = zfs_refcount_count(
1109 &mgp->mg_alloc_queue_depth[allocator]);
1110
1111 /*
1112 * If there is another metaslab group that
1113 * might be able to handle the allocation, then
1114 * we return false so that we skip this group.
1115 */
1116 if (qdepth < qmax && !mgp->mg_no_free_space)
1117 return (B_FALSE);
1118 }
1119
1120 /*
1121 * We didn't find another group to handle the allocation
1122 * so we can't skip this metaslab group even though
1123 * we are at or over our qmax.
1124 */
1125 return (B_TRUE);
1126
1127 } else if (mc->mc_alloc_groups == 0 || psize == SPA_MINBLOCKSIZE) {
1128 return (B_TRUE);
1129 }
1130 return (B_FALSE);
1131 }
1132
1133 /*
1134 * ==========================================================================
1135 * Range tree callbacks
1136 * ==========================================================================
1137 */
1138
1139 /*
1140 * Comparison function for the private size-ordered tree. Tree is sorted
1141 * by size, larger sizes at the end of the tree.
1142 */
1143 static int
1144 metaslab_rangesize_compare(const void *x1, const void *x2)
1145 {
1146 const range_seg_t *r1 = x1;
1147 const range_seg_t *r2 = x2;
1148 uint64_t rs_size1 = r1->rs_end - r1->rs_start;
1149 uint64_t rs_size2 = r2->rs_end - r2->rs_start;
1150
1151 int cmp = AVL_CMP(rs_size1, rs_size2);
1152 if (likely(cmp))
1153 return (cmp);
1154
1155 return (AVL_CMP(r1->rs_start, r2->rs_start));
1156 }
1157
1158 /*
1159 * ==========================================================================
1160 * Common allocator routines
1161 * ==========================================================================
1162 */
1163
1164 /*
1165 * Return the maximum contiguous segment within the metaslab.
1166 */
1167 uint64_t
1168 metaslab_block_maxsize(metaslab_t *msp)
1169 {
1170 avl_tree_t *t = &msp->ms_allocatable_by_size;
1171 range_seg_t *rs;
1172
1173 if (t == NULL || (rs = avl_last(t)) == NULL)
1174 return (0ULL);
1175
1176 return (rs->rs_end - rs->rs_start);
1177 }
1178
1179 static range_seg_t *
1180 metaslab_block_find(avl_tree_t *t, uint64_t start, uint64_t size)
1181 {
1182 range_seg_t *rs, rsearch;
1183 avl_index_t where;
1184
1185 rsearch.rs_start = start;
1186 rsearch.rs_end = start + size;
1187
1188 rs = avl_find(t, &rsearch, &where);
1189 if (rs == NULL) {
1190 rs = avl_nearest(t, where, AVL_AFTER);
1191 }
1192
1193 return (rs);
1194 }
1195
1196 #if defined(WITH_DF_BLOCK_ALLOCATOR) || \
1197 defined(WITH_CF_BLOCK_ALLOCATOR)
1198 /*
1199 * This is a helper function that can be used by the allocator to find
1200 * a suitable block to allocate. This will search the specified AVL
1201 * tree looking for a block that matches the specified criteria.
1202 */
1203 static uint64_t
1204 metaslab_block_picker(avl_tree_t *t, uint64_t *cursor, uint64_t size,
1205 uint64_t max_search)
1206 {
1207 range_seg_t *rs = metaslab_block_find(t, *cursor, size);
1208 uint64_t first_found;
1209
1210 if (rs != NULL)
1211 first_found = rs->rs_start;
1212
1213 while (rs != NULL && rs->rs_start - first_found <= max_search) {
1214 uint64_t offset = rs->rs_start;
1215 if (offset + size <= rs->rs_end) {
1216 *cursor = offset + size;
1217 return (offset);
1218 }
1219 rs = AVL_NEXT(t, rs);
1220 }
1221
1222 *cursor = 0;
1223 return (-1ULL);
1224 }
1225 #endif /* WITH_DF/CF_BLOCK_ALLOCATOR */
1226
1227 #if defined(WITH_DF_BLOCK_ALLOCATOR)
1228 /*
1229 * ==========================================================================
1230 * Dynamic Fit (df) block allocator
1231 *
1232 * Search for a free chunk of at least this size, starting from the last
1233 * offset (for this alignment of block) looking for up to
1234 * metaslab_df_max_search bytes (16MB). If a large enough free chunk is not
1235 * found within 16MB, then return a free chunk of exactly the requested size (or
1236 * larger).
1237 *
1238 * If it seems like searching from the last offset will be unproductive, skip
1239 * that and just return a free chunk of exactly the requested size (or larger).
1240 * This is based on metaslab_df_alloc_threshold and metaslab_df_free_pct. This
1241 * mechanism is probably not very useful and may be removed in the future.
1242 *
1243 * The behavior when not searching can be changed to return the largest free
1244 * chunk, instead of a free chunk of exactly the requested size, by setting
1245 * metaslab_df_use_largest_segment.
1246 * ==========================================================================
1247 */
1248 static uint64_t
1249 metaslab_df_alloc(metaslab_t *msp, uint64_t size)
1250 {
1251 /*
1252 * Find the largest power of 2 block size that evenly divides the
1253 * requested size. This is used to try to allocate blocks with similar
1254 * alignment from the same area of the metaslab (i.e. same cursor
1255 * bucket) but it does not guarantee that other allocations sizes
1256 * may exist in the same region.
1257 */
1258 uint64_t align = size & -size;
1259 uint64_t *cursor = &msp->ms_lbas[highbit64(align) - 1];
1260 range_tree_t *rt = msp->ms_allocatable;
1261 int free_pct = range_tree_space(rt) * 100 / msp->ms_size;
1262 uint64_t offset;
1263
1264 ASSERT(MUTEX_HELD(&msp->ms_lock));
1265 ASSERT3U(avl_numnodes(&rt->rt_root), ==,
1266 avl_numnodes(&msp->ms_allocatable_by_size));
1267
1268 /*
1269 * If we're running low on space, find a segment based on size,
1270 * rather than iterating based on offset.
1271 */
1272 if (metaslab_block_maxsize(msp) < metaslab_df_alloc_threshold ||
1273 free_pct < metaslab_df_free_pct) {
1274 offset = -1;
1275 } else {
1276 offset = metaslab_block_picker(&rt->rt_root,
1277 cursor, size, metaslab_df_max_search);
1278 }
1279
1280 if (offset == -1) {
1281 range_seg_t *rs;
1282 if (metaslab_df_use_largest_segment) {
1283 /* use largest free segment */
1284 rs = avl_last(&msp->ms_allocatable_by_size);
1285 } else {
1286 /* use segment of this size, or next largest */
1287 rs = metaslab_block_find(&msp->ms_allocatable_by_size,
1288 0, size);
1289 }
1290 if (rs != NULL && rs->rs_start + size <= rs->rs_end) {
1291 offset = rs->rs_start;
1292 *cursor = offset + size;
1293 }
1294 }
1295
1296 return (offset);
1297 }
1298
1299 static metaslab_ops_t metaslab_df_ops = {
1300 metaslab_df_alloc
1301 };
1302
1303 metaslab_ops_t *zfs_metaslab_ops = &metaslab_df_ops;
1304 #endif /* WITH_DF_BLOCK_ALLOCATOR */
1305
1306 #if defined(WITH_CF_BLOCK_ALLOCATOR)
1307 /*
1308 * ==========================================================================
1309 * Cursor fit block allocator -
1310 * Select the largest region in the metaslab, set the cursor to the beginning
1311 * of the range and the cursor_end to the end of the range. As allocations
1312 * are made advance the cursor. Continue allocating from the cursor until
1313 * the range is exhausted and then find a new range.
1314 * ==========================================================================
1315 */
1316 static uint64_t
1317 metaslab_cf_alloc(metaslab_t *msp, uint64_t size)
1318 {
1319 range_tree_t *rt = msp->ms_allocatable;
1320 avl_tree_t *t = &msp->ms_allocatable_by_size;
1321 uint64_t *cursor = &msp->ms_lbas[0];
1322 uint64_t *cursor_end = &msp->ms_lbas[1];
1323 uint64_t offset = 0;
1324
1325 ASSERT(MUTEX_HELD(&msp->ms_lock));
1326 ASSERT3U(avl_numnodes(t), ==, avl_numnodes(&rt->rt_root));
1327
1328 ASSERT3U(*cursor_end, >=, *cursor);
1329
1330 if ((*cursor + size) > *cursor_end) {
1331 range_seg_t *rs;
1332
1333 rs = avl_last(&msp->ms_allocatable_by_size);
1334 if (rs == NULL || (rs->rs_end - rs->rs_start) < size)
1335 return (-1ULL);
1336
1337 *cursor = rs->rs_start;
1338 *cursor_end = rs->rs_end;
1339 }
1340
1341 offset = *cursor;
1342 *cursor += size;
1343
1344 return (offset);
1345 }
1346
1347 static metaslab_ops_t metaslab_cf_ops = {
1348 metaslab_cf_alloc
1349 };
1350
1351 metaslab_ops_t *zfs_metaslab_ops = &metaslab_cf_ops;
1352 #endif /* WITH_CF_BLOCK_ALLOCATOR */
1353
1354 #if defined(WITH_NDF_BLOCK_ALLOCATOR)
1355 /*
1356 * ==========================================================================
1357 * New dynamic fit allocator -
1358 * Select a region that is large enough to allocate 2^metaslab_ndf_clump_shift
1359 * contiguous blocks. If no region is found then just use the largest segment
1360 * that remains.
1361 * ==========================================================================
1362 */
1363
1364 /*
1365 * Determines desired number of contiguous blocks (2^metaslab_ndf_clump_shift)
1366 * to request from the allocator.
1367 */
1368 uint64_t metaslab_ndf_clump_shift = 4;
1369
1370 static uint64_t
1371 metaslab_ndf_alloc(metaslab_t *msp, uint64_t size)
1372 {
1373 avl_tree_t *t = &msp->ms_allocatable->rt_root;
1374 avl_index_t where;
1375 range_seg_t *rs, rsearch;
1376 uint64_t hbit = highbit64(size);
1377 uint64_t *cursor = &msp->ms_lbas[hbit - 1];
1378 uint64_t max_size = metaslab_block_maxsize(msp);
1379
1380 ASSERT(MUTEX_HELD(&msp->ms_lock));
1381 ASSERT3U(avl_numnodes(t), ==,
1382 avl_numnodes(&msp->ms_allocatable_by_size));
1383
1384 if (max_size < size)
1385 return (-1ULL);
1386
1387 rsearch.rs_start = *cursor;
1388 rsearch.rs_end = *cursor + size;
1389
1390 rs = avl_find(t, &rsearch, &where);
1391 if (rs == NULL || (rs->rs_end - rs->rs_start) < size) {
1392 t = &msp->ms_allocatable_by_size;
1393
1394 rsearch.rs_start = 0;
1395 rsearch.rs_end = MIN(max_size,
1396 1ULL << (hbit + metaslab_ndf_clump_shift));
1397 rs = avl_find(t, &rsearch, &where);
1398 if (rs == NULL)
1399 rs = avl_nearest(t, where, AVL_AFTER);
1400 ASSERT(rs != NULL);
1401 }
1402
1403 if ((rs->rs_end - rs->rs_start) >= size) {
1404 *cursor = rs->rs_start + size;
1405 return (rs->rs_start);
1406 }
1407 return (-1ULL);
1408 }
1409
1410 static metaslab_ops_t metaslab_ndf_ops = {
1411 metaslab_ndf_alloc
1412 };
1413
1414 metaslab_ops_t *zfs_metaslab_ops = &metaslab_ndf_ops;
1415 #endif /* WITH_NDF_BLOCK_ALLOCATOR */
1416
1417
1418 /*
1419 * ==========================================================================
1420 * Metaslabs
1421 * ==========================================================================
1422 */
1423
1424 /*
1425 * Wait for any in-progress metaslab loads to complete.
1426 */
1427 void
1428 metaslab_load_wait(metaslab_t *msp)
1429 {
1430 ASSERT(MUTEX_HELD(&msp->ms_lock));
1431
1432 while (msp->ms_loading) {
1433 ASSERT(!msp->ms_loaded);
1434 cv_wait(&msp->ms_load_cv, &msp->ms_lock);
1435 }
1436 }
1437
1438 /*
1439 * Wait for any in-progress flushing to complete.
1440 */
1441 void
1442 metaslab_flush_wait(metaslab_t *msp)
1443 {
1444 ASSERT(MUTEX_HELD(&msp->ms_lock));
1445
1446 while (msp->ms_flushing)
1447 cv_wait(&msp->ms_flush_cv, &msp->ms_lock);
1448 }
1449
1450 uint64_t
1451 metaslab_allocated_space(metaslab_t *msp)
1452 {
1453 return (msp->ms_allocated_space);
1454 }
1455
1456 /*
1457 * Verify that the space accounting on disk matches the in-core range_trees.
1458 */
1459 static void
1460 metaslab_verify_space(metaslab_t *msp, uint64_t txg)
1461 {
1462 spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
1463 uint64_t allocating = 0;
1464 uint64_t sm_free_space, msp_free_space;
1465
1466 ASSERT(MUTEX_HELD(&msp->ms_lock));
1467 ASSERT(!msp->ms_condensing);
1468
1469 if ((zfs_flags & ZFS_DEBUG_METASLAB_VERIFY) == 0)
1470 return;
1471
1472 /*
1473 * We can only verify the metaslab space when we're called
1474 * from syncing context with a loaded metaslab that has an
1475 * allocated space map. Calling this in non-syncing context
1476 * does not provide a consistent view of the metaslab since
1477 * we're performing allocations in the future.
1478 */
1479 if (txg != spa_syncing_txg(spa) || msp->ms_sm == NULL ||
1480 !msp->ms_loaded)
1481 return;
1482
1483 /*
1484 * Even though the smp_alloc field can get negative,
1485 * when it comes to a metaslab's space map, that should
1486 * never be the case.
1487 */
1488 ASSERT3S(space_map_allocated(msp->ms_sm), >=, 0);
1489
1490 ASSERT3U(space_map_allocated(msp->ms_sm), >=,
1491 range_tree_space(msp->ms_unflushed_frees));
1492
1493 ASSERT3U(metaslab_allocated_space(msp), ==,
1494 space_map_allocated(msp->ms_sm) +
1495 range_tree_space(msp->ms_unflushed_allocs) -
1496 range_tree_space(msp->ms_unflushed_frees));
1497
1498 sm_free_space = msp->ms_size - metaslab_allocated_space(msp);
1499
1500 /*
1501 * Account for future allocations since we would have
1502 * already deducted that space from the ms_allocatable.
1503 */
1504 for (int t = 0; t < TXG_CONCURRENT_STATES; t++) {
1505 allocating +=
1506 range_tree_space(msp->ms_allocating[(txg + t) & TXG_MASK]);
1507 }
1508
1509 ASSERT3U(msp->ms_deferspace, ==,
1510 range_tree_space(msp->ms_defer[0]) +
1511 range_tree_space(msp->ms_defer[1]));
1512
1513 msp_free_space = range_tree_space(msp->ms_allocatable) + allocating +
1514 msp->ms_deferspace + range_tree_space(msp->ms_freed);
1515
1516 VERIFY3U(sm_free_space, ==, msp_free_space);
1517 }
1518
1519 static void
1520 metaslab_aux_histograms_clear(metaslab_t *msp)
1521 {
1522 /*
1523 * Auxiliary histograms are only cleared when resetting them,
1524 * which can only happen while the metaslab is loaded.
1525 */
1526 ASSERT(msp->ms_loaded);
1527
1528 bzero(msp->ms_synchist, sizeof (msp->ms_synchist));
1529 for (int t = 0; t < TXG_DEFER_SIZE; t++)
1530 bzero(msp->ms_deferhist[t], sizeof (msp->ms_deferhist[t]));
1531 }
1532
1533 static void
1534 metaslab_aux_histogram_add(uint64_t *histogram, uint64_t shift,
1535 range_tree_t *rt)
1536 {
1537 /*
1538 * This is modeled after space_map_histogram_add(), so refer to that
1539 * function for implementation details. We want this to work like
1540 * the space map histogram, and not the range tree histogram, as we
1541 * are essentially constructing a delta that will be later subtracted
1542 * from the space map histogram.
1543 */
1544 int idx = 0;
1545 for (int i = shift; i < RANGE_TREE_HISTOGRAM_SIZE; i++) {
1546 ASSERT3U(i, >=, idx + shift);
1547 histogram[idx] += rt->rt_histogram[i] << (i - idx - shift);
1548
1549 if (idx < SPACE_MAP_HISTOGRAM_SIZE - 1) {
1550 ASSERT3U(idx + shift, ==, i);
1551 idx++;
1552 ASSERT3U(idx, <, SPACE_MAP_HISTOGRAM_SIZE);
1553 }
1554 }
1555 }
1556
1557 /*
1558 * Called at every sync pass that the metaslab gets synced.
1559 *
1560 * The reason is that we want our auxiliary histograms to be updated
1561 * wherever the metaslab's space map histogram is updated. This way
1562 * we stay consistent on which parts of the metaslab space map's
1563 * histogram are currently not available for allocations (e.g because
1564 * they are in the defer, freed, and freeing trees).
1565 */
1566 static void
1567 metaslab_aux_histograms_update(metaslab_t *msp)
1568 {
1569 space_map_t *sm = msp->ms_sm;
1570 ASSERT(sm != NULL);
1571
1572 /*
1573 * This is similar to the metaslab's space map histogram updates
1574 * that take place in metaslab_sync(). The only difference is that
1575 * we only care about segments that haven't made it into the
1576 * ms_allocatable tree yet.
1577 */
1578 if (msp->ms_loaded) {
1579 metaslab_aux_histograms_clear(msp);
1580
1581 metaslab_aux_histogram_add(msp->ms_synchist,
1582 sm->sm_shift, msp->ms_freed);
1583
1584 for (int t = 0; t < TXG_DEFER_SIZE; t++) {
1585 metaslab_aux_histogram_add(msp->ms_deferhist[t],
1586 sm->sm_shift, msp->ms_defer[t]);
1587 }
1588 }
1589
1590 metaslab_aux_histogram_add(msp->ms_synchist,
1591 sm->sm_shift, msp->ms_freeing);
1592 }
1593
1594 /*
1595 * Called every time we are done syncing (writing to) the metaslab,
1596 * i.e. at the end of each sync pass.
1597 * [see the comment in metaslab_impl.h for ms_synchist, ms_deferhist]
1598 */
1599 static void
1600 metaslab_aux_histograms_update_done(metaslab_t *msp, boolean_t defer_allowed)
1601 {
1602 spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
1603 space_map_t *sm = msp->ms_sm;
1604
1605 if (sm == NULL) {
1606 /*
1607 * We came here from metaslab_init() when creating/opening a
1608 * pool, looking at a metaslab that hasn't had any allocations
1609 * yet.
1610 */
1611 return;
1612 }
1613
1614 /*
1615 * This is similar to the actions that we take for the ms_freed
1616 * and ms_defer trees in metaslab_sync_done().
1617 */
1618 uint64_t hist_index = spa_syncing_txg(spa) % TXG_DEFER_SIZE;
1619 if (defer_allowed) {
1620 bcopy(msp->ms_synchist, msp->ms_deferhist[hist_index],
1621 sizeof (msp->ms_synchist));
1622 } else {
1623 bzero(msp->ms_deferhist[hist_index],
1624 sizeof (msp->ms_deferhist[hist_index]));
1625 }
1626 bzero(msp->ms_synchist, sizeof (msp->ms_synchist));
1627 }
1628
1629 /*
1630 * Ensure that the metaslab's weight and fragmentation are consistent
1631 * with the contents of the histogram (either the range tree's histogram
1632 * or the space map's depending whether the metaslab is loaded).
1633 */
1634 static void
1635 metaslab_verify_weight_and_frag(metaslab_t *msp)
1636 {
1637 ASSERT(MUTEX_HELD(&msp->ms_lock));
1638
1639 if ((zfs_flags & ZFS_DEBUG_METASLAB_VERIFY) == 0)
1640 return;
1641
1642 /* see comment in metaslab_verify_unflushed_changes() */
1643 if (msp->ms_group == NULL)
1644 return;
1645
1646 /*
1647 * Devices being removed always return a weight of 0 and leave
1648 * fragmentation and ms_max_size as is - there is nothing for
1649 * us to verify here.
1650 */
1651 vdev_t *vd = msp->ms_group->mg_vd;
1652 if (vd->vdev_removing)
1653 return;
1654
1655 /*
1656 * If the metaslab is dirty it probably means that we've done
1657 * some allocations or frees that have changed our histograms
1658 * and thus the weight.
1659 */
1660 for (int t = 0; t < TXG_SIZE; t++) {
1661 if (txg_list_member(&vd->vdev_ms_list, msp, t))
1662 return;
1663 }
1664
1665 /*
1666 * This verification checks that our in-memory state is consistent
1667 * with what's on disk. If the pool is read-only then there aren't
1668 * any changes and we just have the initially-loaded state.
1669 */
1670 if (!spa_writeable(msp->ms_group->mg_vd->vdev_spa))
1671 return;
1672
1673 /* some extra verification for in-core tree if you can */
1674 if (msp->ms_loaded) {
1675 range_tree_stat_verify(msp->ms_allocatable);
1676 VERIFY(space_map_histogram_verify(msp->ms_sm,
1677 msp->ms_allocatable));
1678 }
1679
1680 uint64_t weight = msp->ms_weight;
1681 uint64_t was_active = msp->ms_weight & METASLAB_ACTIVE_MASK;
1682 boolean_t space_based = WEIGHT_IS_SPACEBASED(msp->ms_weight);
1683 uint64_t frag = msp->ms_fragmentation;
1684 uint64_t max_segsize = msp->ms_max_size;
1685
1686 msp->ms_weight = 0;
1687 msp->ms_fragmentation = 0;
1688 msp->ms_max_size = 0;
1689
1690 /*
1691 * This function is used for verification purposes. Regardless of
1692 * whether metaslab_weight() thinks this metaslab should be active or
1693 * not, we want to ensure that the actual weight (and therefore the
1694 * value of ms_weight) would be the same if it was to be recalculated
1695 * at this point.
1696 */
1697 msp->ms_weight = metaslab_weight(msp) | was_active;
1698
1699 VERIFY3U(max_segsize, ==, msp->ms_max_size);
1700
1701 /*
1702 * If the weight type changed then there is no point in doing
1703 * verification. Revert fields to their original values.
1704 */
1705 if ((space_based && !WEIGHT_IS_SPACEBASED(msp->ms_weight)) ||
1706 (!space_based && WEIGHT_IS_SPACEBASED(msp->ms_weight))) {
1707 msp->ms_fragmentation = frag;
1708 msp->ms_weight = weight;
1709 return;
1710 }
1711
1712 VERIFY3U(msp->ms_fragmentation, ==, frag);
1713 VERIFY3U(msp->ms_weight, ==, weight);
1714 }
1715
1716 static int
1717 metaslab_load_impl(metaslab_t *msp)
1718 {
1719 int error = 0;
1720
1721 ASSERT(MUTEX_HELD(&msp->ms_lock));
1722 ASSERT(msp->ms_loading);
1723 ASSERT(!msp->ms_condensing);
1724
1725 /*
1726 * We temporarily drop the lock to unblock other operations while we
1727 * are reading the space map. Therefore, metaslab_sync() and
1728 * metaslab_sync_done() can run at the same time as we do.
1729 *
1730 * If we are using the log space maps, metaslab_sync() can't write to
1731 * the metaslab's space map while we are loading as we only write to
1732 * it when we are flushing the metaslab, and that can't happen while
1733 * we are loading it.
1734 *
1735 * If we are not using log space maps though, metaslab_sync() can
1736 * append to the space map while we are loading. Therefore we load
1737 * only entries that existed when we started the load. Additionally,
1738 * metaslab_sync_done() has to wait for the load to complete because
1739 * there are potential races like metaslab_load() loading parts of the
1740 * space map that are currently being appended by metaslab_sync(). If
1741 * we didn't, the ms_allocatable would have entries that
1742 * metaslab_sync_done() would try to re-add later.
1743 *
1744 * That's why before dropping the lock we remember the synced length
1745 * of the metaslab and read up to that point of the space map,
1746 * ignoring entries appended by metaslab_sync() that happen after we
1747 * drop the lock.
1748 */
1749 uint64_t length = msp->ms_synced_length;
1750 mutex_exit(&msp->ms_lock);
1751
1752 hrtime_t load_start = gethrtime();
1753 if (msp->ms_sm != NULL) {
1754 error = space_map_load_length(msp->ms_sm, msp->ms_allocatable,
1755 SM_FREE, length);
1756 } else {
1757 /*
1758 * The space map has not been allocated yet, so treat
1759 * all the space in the metaslab as free and add it to the
1760 * ms_allocatable tree.
1761 */
1762 range_tree_add(msp->ms_allocatable,
1763 msp->ms_start, msp->ms_size);
1764
1765 if (msp->ms_freed != NULL) {
1766 /*
1767 * If the ms_sm doesn't exist, this means that this
1768 * metaslab hasn't gone through metaslab_sync() and
1769 * thus has never been dirtied. So we shouldn't
1770 * expect any unflushed allocs or frees from previous
1771 * TXGs.
1772 *
1773 * Note: ms_freed and all the other trees except for
1774 * the ms_allocatable, can be NULL at this point only
1775 * if this is a new metaslab of a vdev that just got
1776 * expanded.
1777 */
1778 ASSERT(range_tree_is_empty(msp->ms_unflushed_allocs));
1779 ASSERT(range_tree_is_empty(msp->ms_unflushed_frees));
1780 }
1781 }
1782
1783 /*
1784 * We need to grab the ms_sync_lock to prevent metaslab_sync() from
1785 * changing the ms_sm (or log_sm) and the metaslab's range trees
1786 * while we are about to use them and populate the ms_allocatable.
1787 * The ms_lock is insufficient for this because metaslab_sync() doesn't
1788 * hold the ms_lock while writing the ms_checkpointing tree to disk.
1789 */
1790 mutex_enter(&msp->ms_sync_lock);
1791 mutex_enter(&msp->ms_lock);
1792
1793 ASSERT(!msp->ms_condensing);
1794 ASSERT(!msp->ms_flushing);
1795
1796 if (error != 0) {
1797 mutex_exit(&msp->ms_sync_lock);
1798 return (error);
1799 }
1800
1801 ASSERT3P(msp->ms_group, !=, NULL);
1802 msp->ms_loaded = B_TRUE;
1803
1804 /*
1805 * Apply all the unflushed changes to ms_allocatable right
1806 * away so any manipulations we do below have a clear view
1807 * of what is allocated and what is free.
1808 */
1809 range_tree_walk(msp->ms_unflushed_allocs,
1810 range_tree_remove, msp->ms_allocatable);
1811 range_tree_walk(msp->ms_unflushed_frees,
1812 range_tree_add, msp->ms_allocatable);
1813
1814 msp->ms_loaded = B_TRUE;
1815
1816 ASSERT3P(msp->ms_group, !=, NULL);
1817 spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
1818 if (spa_syncing_log_sm(spa) != NULL) {
1819 ASSERT(spa_feature_is_enabled(spa,
1820 SPA_FEATURE_LOG_SPACEMAP));
1821
1822 /*
1823 * If we use a log space map we add all the segments
1824 * that are in ms_unflushed_frees so they are available
1825 * for allocation.
1826 *
1827 * ms_allocatable needs to contain all free segments
1828 * that are ready for allocations (thus not segments
1829 * from ms_freeing, ms_freed, and the ms_defer trees).
1830 * But if we grab the lock in this code path at a sync
1831 * pass later that 1, then it also contains the
1832 * segments of ms_freed (they were added to it earlier
1833 * in this path through ms_unflushed_frees). So we
1834 * need to remove all the segments that exist in
1835 * ms_freed from ms_allocatable as they will be added
1836 * later in metaslab_sync_done().
1837 *
1838 * When there's no log space map, the ms_allocatable
1839 * correctly doesn't contain any segments that exist
1840 * in ms_freed [see ms_synced_length].
1841 */
1842 range_tree_walk(msp->ms_freed,
1843 range_tree_remove, msp->ms_allocatable);
1844 }
1845
1846 /*
1847 * If we are not using the log space map, ms_allocatable
1848 * contains the segments that exist in the ms_defer trees
1849 * [see ms_synced_length]. Thus we need to remove them
1850 * from ms_allocatable as they will be added again in
1851 * metaslab_sync_done().
1852 *
1853 * If we are using the log space map, ms_allocatable still
1854 * contains the segments that exist in the ms_defer trees.
1855 * Not because it read them through the ms_sm though. But
1856 * because these segments are part of ms_unflushed_frees
1857 * whose segments we add to ms_allocatable earlier in this
1858 * code path.
1859 */
1860 for (int t = 0; t < TXG_DEFER_SIZE; t++) {
1861 range_tree_walk(msp->ms_defer[t],
1862 range_tree_remove, msp->ms_allocatable);
1863 }
1864
1865 /*
1866 * Call metaslab_recalculate_weight_and_sort() now that the
1867 * metaslab is loaded so we get the metaslab's real weight.
1868 *
1869 * Unless this metaslab was created with older software and
1870 * has not yet been converted to use segment-based weight, we
1871 * expect the new weight to be better or equal to the weight
1872 * that the metaslab had while it was not loaded. This is
1873 * because the old weight does not take into account the
1874 * consolidation of adjacent segments between TXGs. [see
1875 * comment for ms_synchist and ms_deferhist[] for more info]
1876 */
1877 uint64_t weight = msp->ms_weight;
1878 metaslab_recalculate_weight_and_sort(msp);
1879 if (!WEIGHT_IS_SPACEBASED(weight))
1880 ASSERT3U(weight, <=, msp->ms_weight);
1881 msp->ms_max_size = metaslab_block_maxsize(msp);
1882
1883 hrtime_t load_end = gethrtime();
1884 if (zfs_flags & ZFS_DEBUG_LOG_SPACEMAP) {
1885 zfs_dbgmsg("loading: txg %llu, spa %s, vdev_id %llu, "
1886 "ms_id %llu, smp_length %llu, "
1887 "unflushed_allocs %llu, unflushed_frees %llu, "
1888 "freed %llu, defer %llu + %llu, "
1889 "loading_time %lld ms",
1890 spa_syncing_txg(spa), spa_name(spa),
1891 msp->ms_group->mg_vd->vdev_id, msp->ms_id,
1892 space_map_length(msp->ms_sm),
1893 range_tree_space(msp->ms_unflushed_allocs),
1894 range_tree_space(msp->ms_unflushed_frees),
1895 range_tree_space(msp->ms_freed),
1896 range_tree_space(msp->ms_defer[0]),
1897 range_tree_space(msp->ms_defer[1]),
1898 (longlong_t)((load_end - load_start) / 1000000));
1899 }
1900
1901 metaslab_verify_space(msp, spa_syncing_txg(spa));
1902 mutex_exit(&msp->ms_sync_lock);
1903 return (0);
1904 }
1905
1906 int
1907 metaslab_load(metaslab_t *msp)
1908 {
1909 ASSERT(MUTEX_HELD(&msp->ms_lock));
1910
1911 /*
1912 * There may be another thread loading the same metaslab, if that's
1913 * the case just wait until the other thread is done and return.
1914 */
1915 metaslab_load_wait(msp);
1916 if (msp->ms_loaded)
1917 return (0);
1918 VERIFY(!msp->ms_loading);
1919 ASSERT(!msp->ms_condensing);
1920
1921 /*
1922 * We set the loading flag BEFORE potentially dropping the lock to
1923 * wait for an ongoing flush (see ms_flushing below). This way other
1924 * threads know that there is already a thread that is loading this
1925 * metaslab.
1926 */
1927 msp->ms_loading = B_TRUE;
1928
1929 /*
1930 * Wait for any in-progress flushing to finish as we drop the ms_lock
1931 * both here (during space_map_load()) and in metaslab_flush() (when
1932 * we flush our changes to the ms_sm).
1933 */
1934 if (msp->ms_flushing)
1935 metaslab_flush_wait(msp);
1936
1937 /*
1938 * In the possibility that we were waiting for the metaslab to be
1939 * flushed (where we temporarily dropped the ms_lock), ensure that
1940 * no one else loaded the metaslab somehow.
1941 */
1942 ASSERT(!msp->ms_loaded);
1943
1944 int error = metaslab_load_impl(msp);
1945
1946 ASSERT(MUTEX_HELD(&msp->ms_lock));
1947 msp->ms_loading = B_FALSE;
1948 cv_broadcast(&msp->ms_load_cv);
1949
1950 return (error);
1951 }
1952
1953 void
1954 metaslab_unload(metaslab_t *msp)
1955 {
1956 ASSERT(MUTEX_HELD(&msp->ms_lock));
1957
1958 metaslab_verify_weight_and_frag(msp);
1959
1960 range_tree_vacate(msp->ms_allocatable, NULL, NULL);
1961 msp->ms_loaded = B_FALSE;
1962
1963 msp->ms_activation_weight = 0;
1964 msp->ms_weight &= ~METASLAB_ACTIVE_MASK;
1965 msp->ms_max_size = 0;
1966
1967 /*
1968 * We explicitly recalculate the metaslab's weight based on its space
1969 * map (as it is now not loaded). We want unload metaslabs to always
1970 * have their weights calculated from the space map histograms, while
1971 * loaded ones have it calculated from their in-core range tree
1972 * [see metaslab_load()]. This way, the weight reflects the information
1973 * available in-core, whether it is loaded or not.
1974 *
1975 * If ms_group == NULL means that we came here from metaslab_fini(),
1976 * at which point it doesn't make sense for us to do the recalculation
1977 * and the sorting.
1978 */
1979 if (msp->ms_group != NULL)
1980 metaslab_recalculate_weight_and_sort(msp);
1981 }
1982
1983 void
1984 metaslab_space_update(vdev_t *vd, metaslab_class_t *mc, int64_t alloc_delta,
1985 int64_t defer_delta, int64_t space_delta)
1986 {
1987 vdev_space_update(vd, alloc_delta, defer_delta, space_delta);
1988
1989 ASSERT3P(vd->vdev_spa->spa_root_vdev, ==, vd->vdev_parent);
1990 ASSERT(vd->vdev_ms_count != 0);
1991
1992 metaslab_class_space_update(mc, alloc_delta, defer_delta, space_delta,
1993 vdev_deflated_space(vd, space_delta));
1994 }
1995
1996 int
1997 metaslab_init(metaslab_group_t *mg, uint64_t id, uint64_t object,
1998 uint64_t txg, metaslab_t **msp)
1999 {
2000 vdev_t *vd = mg->mg_vd;
2001 spa_t *spa = vd->vdev_spa;
2002 objset_t *mos = spa->spa_meta_objset;
2003 metaslab_t *ms;
2004 int error;
2005
2006 ms = kmem_zalloc(sizeof (metaslab_t), KM_SLEEP);
2007 mutex_init(&ms->ms_lock, NULL, MUTEX_DEFAULT, NULL);
2008 mutex_init(&ms->ms_sync_lock, NULL, MUTEX_DEFAULT, NULL);
2009 cv_init(&ms->ms_load_cv, NULL, CV_DEFAULT, NULL);
2010 cv_init(&ms->ms_flush_cv, NULL, CV_DEFAULT, NULL);
2011
2012 ms->ms_id = id;
2013 ms->ms_start = id << vd->vdev_ms_shift;
2014 ms->ms_size = 1ULL << vd->vdev_ms_shift;
2015 ms->ms_allocator = -1;
2016 ms->ms_new = B_TRUE;
2017
2018 /*
2019 * We only open space map objects that already exist. All others
2020 * will be opened when we finally allocate an object for it.
2021 *
2022 * Note:
2023 * When called from vdev_expand(), we can't call into the DMU as
2024 * we are holding the spa_config_lock as a writer and we would
2025 * deadlock [see relevant comment in vdev_metaslab_init()]. in
2026 * that case, the object parameter is zero though, so we won't
2027 * call into the DMU.
2028 */
2029 if (object != 0) {
2030 error = space_map_open(&ms->ms_sm, mos, object, ms->ms_start,
2031 ms->ms_size, vd->vdev_ashift);
2032
2033 if (error != 0) {
2034 kmem_free(ms, sizeof (metaslab_t));
2035 return (error);
2036 }
2037
2038 ASSERT(ms->ms_sm != NULL);
2039 ms->ms_allocated_space = space_map_allocated(ms->ms_sm);
2040 }
2041
2042 /*
2043 * We create the ms_allocatable here, but we don't create the
2044 * other range trees until metaslab_sync_done(). This serves
2045 * two purposes: it allows metaslab_sync_done() to detect the
2046 * addition of new space; and for debugging, it ensures that
2047 * we'd data fault on any attempt to use this metaslab before
2048 * it's ready.
2049 */
2050 ms->ms_allocatable = range_tree_create_impl(&rt_avl_ops,
2051 &ms->ms_allocatable_by_size, metaslab_rangesize_compare, 0);
2052
2053 ms->ms_trim = range_tree_create(NULL, NULL);
2054
2055 metaslab_group_add(mg, ms);
2056 metaslab_set_fragmentation(ms);
2057
2058 /*
2059 * If we're opening an existing pool (txg == 0) or creating
2060 * a new one (txg == TXG_INITIAL), all space is available now.
2061 * If we're adding space to an existing pool, the new space
2062 * does not become available until after this txg has synced.
2063 * The metaslab's weight will also be initialized when we sync
2064 * out this txg. This ensures that we don't attempt to allocate
2065 * from it before we have initialized it completely.
2066 */
2067 if (txg <= TXG_INITIAL) {
2068 metaslab_sync_done(ms, 0);
2069 metaslab_space_update(vd, mg->mg_class,
2070 metaslab_allocated_space(ms), 0, 0);
2071 }
2072
2073 if (txg != 0) {
2074 vdev_dirty(vd, 0, NULL, txg);
2075 vdev_dirty(vd, VDD_METASLAB, ms, txg);
2076 }
2077
2078 *msp = ms;
2079
2080 return (0);
2081 }
2082
2083 static void
2084 metaslab_fini_flush_data(metaslab_t *msp)
2085 {
2086 spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
2087
2088 if (metaslab_unflushed_txg(msp) == 0) {
2089 ASSERT3P(avl_find(&spa->spa_metaslabs_by_flushed, msp, NULL),
2090 ==, NULL);
2091 return;
2092 }
2093 ASSERT(spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP));
2094
2095 mutex_enter(&spa->spa_flushed_ms_lock);
2096 avl_remove(&spa->spa_metaslabs_by_flushed, msp);
2097 mutex_exit(&spa->spa_flushed_ms_lock);
2098
2099 spa_log_sm_decrement_mscount(spa, metaslab_unflushed_txg(msp));
2100 spa_log_summary_decrement_mscount(spa, metaslab_unflushed_txg(msp));
2101 }
2102
2103 uint64_t
2104 metaslab_unflushed_changes_memused(metaslab_t *ms)
2105 {
2106 return ((range_tree_numsegs(ms->ms_unflushed_allocs) +
2107 range_tree_numsegs(ms->ms_unflushed_frees)) *
2108 sizeof (range_seg_t));
2109 }
2110
2111 void
2112 metaslab_fini(metaslab_t *msp)
2113 {
2114 metaslab_group_t *mg = msp->ms_group;
2115 vdev_t *vd = mg->mg_vd;
2116 spa_t *spa = vd->vdev_spa;
2117
2118 metaslab_fini_flush_data(msp);
2119
2120 metaslab_group_remove(mg, msp);
2121
2122 mutex_enter(&msp->ms_lock);
2123 VERIFY(msp->ms_group == NULL);
2124 metaslab_space_update(vd, mg->mg_class,
2125 -metaslab_allocated_space(msp), 0, -msp->ms_size);
2126
2127 space_map_close(msp->ms_sm);
2128 msp->ms_sm = NULL;
2129
2130 metaslab_unload(msp);
2131 range_tree_destroy(msp->ms_allocatable);
2132 range_tree_destroy(msp->ms_freeing);
2133 range_tree_destroy(msp->ms_freed);
2134
2135 ASSERT3U(spa->spa_unflushed_stats.sus_memused, >=,
2136 metaslab_unflushed_changes_memused(msp));
2137 spa->spa_unflushed_stats.sus_memused -=
2138 metaslab_unflushed_changes_memused(msp);
2139 range_tree_vacate(msp->ms_unflushed_allocs, NULL, NULL);
2140 range_tree_destroy(msp->ms_unflushed_allocs);
2141 range_tree_vacate(msp->ms_unflushed_frees, NULL, NULL);
2142 range_tree_destroy(msp->ms_unflushed_frees);
2143
2144 for (int t = 0; t < TXG_SIZE; t++) {
2145 range_tree_destroy(msp->ms_allocating[t]);
2146 }
2147
2148 for (int t = 0; t < TXG_DEFER_SIZE; t++) {
2149 range_tree_destroy(msp->ms_defer[t]);
2150 }
2151 ASSERT0(msp->ms_deferspace);
2152
2153 range_tree_destroy(msp->ms_checkpointing);
2154
2155 for (int t = 0; t < TXG_SIZE; t++)
2156 ASSERT(!txg_list_member(&vd->vdev_ms_list, msp, t));
2157
2158 range_tree_vacate(msp->ms_trim, NULL, NULL);
2159 range_tree_destroy(msp->ms_trim);
2160
2161 mutex_exit(&msp->ms_lock);
2162 cv_destroy(&msp->ms_load_cv);
2163 cv_destroy(&msp->ms_flush_cv);
2164 mutex_destroy(&msp->ms_lock);
2165 mutex_destroy(&msp->ms_sync_lock);
2166 ASSERT3U(msp->ms_allocator, ==, -1);
2167
2168 kmem_free(msp, sizeof (metaslab_t));
2169 }
2170
2171 #define FRAGMENTATION_TABLE_SIZE 17
2172
2173 /*
2174 * This table defines a segment size based fragmentation metric that will
2175 * allow each metaslab to derive its own fragmentation value. This is done
2176 * by calculating the space in each bucket of the spacemap histogram and
2177 * multiplying that by the fragmentation metric in this table. Doing
2178 * this for all buckets and dividing it by the total amount of free
2179 * space in this metaslab (i.e. the total free space in all buckets) gives
2180 * us the fragmentation metric. This means that a high fragmentation metric
2181 * equates to most of the free space being comprised of small segments.
2182 * Conversely, if the metric is low, then most of the free space is in
2183 * large segments. A 10% change in fragmentation equates to approximately
2184 * double the number of segments.
2185 *
2186 * This table defines 0% fragmented space using 16MB segments. Testing has
2187 * shown that segments that are greater than or equal to 16MB do not suffer
2188 * from drastic performance problems. Using this value, we derive the rest
2189 * of the table. Since the fragmentation value is never stored on disk, it
2190 * is possible to change these calculations in the future.
2191 */
2192 int zfs_frag_table[FRAGMENTATION_TABLE_SIZE] = {
2193 100, /* 512B */
2194 100, /* 1K */
2195 98, /* 2K */
2196 95, /* 4K */
2197 90, /* 8K */
2198 80, /* 16K */
2199 70, /* 32K */
2200 60, /* 64K */
2201 50, /* 128K */
2202 40, /* 256K */
2203 30, /* 512K */
2204 20, /* 1M */
2205 15, /* 2M */
2206 10, /* 4M */
2207 5, /* 8M */
2208 0 /* 16M */
2209 };
2210
2211 /*
2212 * Calculate the metaslab's fragmentation metric and set ms_fragmentation.
2213 * Setting this value to ZFS_FRAG_INVALID means that the metaslab has not
2214 * been upgraded and does not support this metric. Otherwise, the return
2215 * value should be in the range [0, 100].
2216 */
2217 static void
2218 metaslab_set_fragmentation(metaslab_t *msp)
2219 {
2220 spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
2221 uint64_t fragmentation = 0;
2222 uint64_t total = 0;
2223 boolean_t feature_enabled = spa_feature_is_enabled(spa,
2224 SPA_FEATURE_SPACEMAP_HISTOGRAM);
2225
2226 if (!feature_enabled) {
2227 msp->ms_fragmentation = ZFS_FRAG_INVALID;
2228 return;
2229 }
2230
2231 /*
2232 * A null space map means that the entire metaslab is free
2233 * and thus is not fragmented.
2234 */
2235 if (msp->ms_sm == NULL) {
2236 msp->ms_fragmentation = 0;
2237 return;
2238 }
2239
2240 /*
2241 * If this metaslab's space map has not been upgraded, flag it
2242 * so that we upgrade next time we encounter it.
2243 */
2244 if (msp->ms_sm->sm_dbuf->db_size != sizeof (space_map_phys_t)) {
2245 uint64_t txg = spa_syncing_txg(spa);
2246 vdev_t *vd = msp->ms_group->mg_vd;
2247
2248 /*
2249 * If we've reached the final dirty txg, then we must
2250 * be shutting down the pool. We don't want to dirty
2251 * any data past this point so skip setting the condense
2252 * flag. We can retry this action the next time the pool
2253 * is imported.
2254 */
2255 if (spa_writeable(spa) && txg < spa_final_dirty_txg(spa)) {
2256 msp->ms_condense_wanted = B_TRUE;
2257 vdev_dirty(vd, VDD_METASLAB, msp, txg + 1);
2258 zfs_dbgmsg("txg %llu, requesting force condense: "
2259 "ms_id %llu, vdev_id %llu", txg, msp->ms_id,
2260 vd->vdev_id);
2261 }
2262 msp->ms_fragmentation = ZFS_FRAG_INVALID;
2263 return;
2264 }
2265
2266 for (int i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) {
2267 uint64_t space = 0;
2268 uint8_t shift = msp->ms_sm->sm_shift;
2269
2270 int idx = MIN(shift - SPA_MINBLOCKSHIFT + i,
2271 FRAGMENTATION_TABLE_SIZE - 1);
2272
2273 if (msp->ms_sm->sm_phys->smp_histogram[i] == 0)
2274 continue;
2275
2276 space = msp->ms_sm->sm_phys->smp_histogram[i] << (i + shift);
2277 total += space;
2278
2279 ASSERT3U(idx, <, FRAGMENTATION_TABLE_SIZE);
2280 fragmentation += space * zfs_frag_table[idx];
2281 }
2282
2283 if (total > 0)
2284 fragmentation /= total;
2285 ASSERT3U(fragmentation, <=, 100);
2286
2287 msp->ms_fragmentation = fragmentation;
2288 }
2289
2290 /*
2291 * Compute a weight -- a selection preference value -- for the given metaslab.
2292 * This is based on the amount of free space, the level of fragmentation,
2293 * the LBA range, and whether the metaslab is loaded.
2294 */
2295 static uint64_t
2296 metaslab_space_weight(metaslab_t *msp)
2297 {
2298 metaslab_group_t *mg = msp->ms_group;
2299 vdev_t *vd = mg->mg_vd;
2300 uint64_t weight, space;
2301
2302 ASSERT(MUTEX_HELD(&msp->ms_lock));
2303 ASSERT(!vd->vdev_removing);
2304
2305 /*
2306 * The baseline weight is the metaslab's free space.
2307 */
2308 space = msp->ms_size - metaslab_allocated_space(msp);
2309
2310 if (metaslab_fragmentation_factor_enabled &&
2311 msp->ms_fragmentation != ZFS_FRAG_INVALID) {
2312 /*
2313 * Use the fragmentation information to inversely scale
2314 * down the baseline weight. We need to ensure that we
2315 * don't exclude this metaslab completely when it's 100%
2316 * fragmented. To avoid this we reduce the fragmented value
2317 * by 1.
2318 */
2319 space = (space * (100 - (msp->ms_fragmentation - 1))) / 100;
2320
2321 /*
2322 * If space < SPA_MINBLOCKSIZE, then we will not allocate from
2323 * this metaslab again. The fragmentation metric may have
2324 * decreased the space to something smaller than
2325 * SPA_MINBLOCKSIZE, so reset the space to SPA_MINBLOCKSIZE
2326 * so that we can consume any remaining space.
2327 */
2328 if (space > 0 && space < SPA_MINBLOCKSIZE)
2329 space = SPA_MINBLOCKSIZE;
2330 }
2331 weight = space;
2332
2333 /*
2334 * Modern disks have uniform bit density and constant angular velocity.
2335 * Therefore, the outer recording zones are faster (higher bandwidth)
2336 * than the inner zones by the ratio of outer to inner track diameter,
2337 * which is typically around 2:1. We account for this by assigning
2338 * higher weight to lower metaslabs (multiplier ranging from 2x to 1x).
2339 * In effect, this means that we'll select the metaslab with the most
2340 * free bandwidth rather than simply the one with the most free space.
2341 */
2342 if (!vd->vdev_nonrot && metaslab_lba_weighting_enabled) {
2343 weight = 2 * weight - (msp->ms_id * weight) / vd->vdev_ms_count;
2344 ASSERT(weight >= space && weight <= 2 * space);
2345 }
2346
2347 /*
2348 * If this metaslab is one we're actively using, adjust its
2349 * weight to make it preferable to any inactive metaslab so
2350 * we'll polish it off. If the fragmentation on this metaslab
2351 * has exceed our threshold, then don't mark it active.
2352 */
2353 if (msp->ms_loaded && msp->ms_fragmentation != ZFS_FRAG_INVALID &&
2354 msp->ms_fragmentation <= zfs_metaslab_fragmentation_threshold) {
2355 weight |= (msp->ms_weight & METASLAB_ACTIVE_MASK);
2356 }
2357
2358 WEIGHT_SET_SPACEBASED(weight);
2359 return (weight);
2360 }
2361
2362 /*
2363 * Return the weight of the specified metaslab, according to the segment-based
2364 * weighting algorithm. The metaslab must be loaded. This function can
2365 * be called within a sync pass since it relies only on the metaslab's
2366 * range tree which is always accurate when the metaslab is loaded.
2367 */
2368 static uint64_t
2369 metaslab_weight_from_range_tree(metaslab_t *msp)
2370 {
2371 uint64_t weight = 0;
2372 uint32_t segments = 0;
2373
2374 ASSERT(msp->ms_loaded);
2375
2376 for (int i = RANGE_TREE_HISTOGRAM_SIZE - 1; i >= SPA_MINBLOCKSHIFT;
2377 i--) {
2378 uint8_t shift = msp->ms_group->mg_vd->vdev_ashift;
2379 int max_idx = SPACE_MAP_HISTOGRAM_SIZE + shift - 1;
2380
2381 segments <<= 1;
2382 segments += msp->ms_allocatable->rt_histogram[i];
2383
2384 /*
2385 * The range tree provides more precision than the space map
2386 * and must be downgraded so that all values fit within the
2387 * space map's histogram. This allows us to compare loaded
2388 * vs. unloaded metaslabs to determine which metaslab is
2389 * considered "best".
2390 */
2391 if (i > max_idx)
2392 continue;
2393
2394 if (segments != 0) {
2395 WEIGHT_SET_COUNT(weight, segments);
2396 WEIGHT_SET_INDEX(weight, i);
2397 WEIGHT_SET_ACTIVE(weight, 0);
2398 break;
2399 }
2400 }
2401 return (weight);
2402 }
2403
2404 /*
2405 * Calculate the weight based on the on-disk histogram. Should be applied
2406 * only to unloaded metaslabs (i.e no incoming allocations) in-order to
2407 * give results consistent with the on-disk state
2408 */
2409 static uint64_t
2410 metaslab_weight_from_spacemap(metaslab_t *msp)
2411 {
2412 space_map_t *sm = msp->ms_sm;
2413 ASSERT(!msp->ms_loaded);
2414 ASSERT(sm != NULL);
2415 ASSERT3U(space_map_object(sm), !=, 0);
2416 ASSERT3U(sm->sm_dbuf->db_size, ==, sizeof (space_map_phys_t));
2417
2418 /*
2419 * Create a joint histogram from all the segments that have made
2420 * it to the metaslab's space map histogram, that are not yet
2421 * available for allocation because they are still in the freeing
2422 * pipeline (e.g. freeing, freed, and defer trees). Then subtract
2423 * these segments from the space map's histogram to get a more
2424 * accurate weight.
2425 */
2426 uint64_t deferspace_histogram[SPACE_MAP_HISTOGRAM_SIZE] = {0};
2427 for (int i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++)
2428 deferspace_histogram[i] += msp->ms_synchist[i];
2429 for (int t = 0; t < TXG_DEFER_SIZE; t++) {
2430 for (int i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) {
2431 deferspace_histogram[i] += msp->ms_deferhist[t][i];
2432 }
2433 }
2434
2435 uint64_t weight = 0;
2436 for (int i = SPACE_MAP_HISTOGRAM_SIZE - 1; i >= 0; i--) {
2437 ASSERT3U(sm->sm_phys->smp_histogram[i], >=,
2438 deferspace_histogram[i]);
2439 uint64_t count =
2440 sm->sm_phys->smp_histogram[i] - deferspace_histogram[i];
2441 if (count != 0) {
2442 WEIGHT_SET_COUNT(weight, count);
2443 WEIGHT_SET_INDEX(weight, i + sm->sm_shift);
2444 WEIGHT_SET_ACTIVE(weight, 0);
2445 break;
2446 }
2447 }
2448 return (weight);
2449 }
2450
2451 /*
2452 * Compute a segment-based weight for the specified metaslab. The weight
2453 * is determined by highest bucket in the histogram. The information
2454 * for the highest bucket is encoded into the weight value.
2455 */
2456 static uint64_t
2457 metaslab_segment_weight(metaslab_t *msp)
2458 {
2459 metaslab_group_t *mg = msp->ms_group;
2460 uint64_t weight = 0;
2461 uint8_t shift = mg->mg_vd->vdev_ashift;
2462
2463 ASSERT(MUTEX_HELD(&msp->ms_lock));
2464
2465 /*
2466 * The metaslab is completely free.
2467 */
2468 if (metaslab_allocated_space(msp) == 0) {
2469 int idx = highbit64(msp->ms_size) - 1;
2470 int max_idx = SPACE_MAP_HISTOGRAM_SIZE + shift - 1;
2471
2472 if (idx < max_idx) {
2473 WEIGHT_SET_COUNT(weight, 1ULL);
2474 WEIGHT_SET_INDEX(weight, idx);
2475 } else {
2476 WEIGHT_SET_COUNT(weight, 1ULL << (idx - max_idx));
2477 WEIGHT_SET_INDEX(weight, max_idx);
2478 }
2479 WEIGHT_SET_ACTIVE(weight, 0);
2480 ASSERT(!WEIGHT_IS_SPACEBASED(weight));
2481 return (weight);
2482 }
2483
2484 ASSERT3U(msp->ms_sm->sm_dbuf->db_size, ==, sizeof (space_map_phys_t));
2485
2486 /*
2487 * If the metaslab is fully allocated then just make the weight 0.
2488 */
2489 if (metaslab_allocated_space(msp) == msp->ms_size)
2490 return (0);
2491 /*
2492 * If the metaslab is already loaded, then use the range tree to
2493 * determine the weight. Otherwise, we rely on the space map information
2494 * to generate the weight.
2495 */
2496 if (msp->ms_loaded) {
2497 weight = metaslab_weight_from_range_tree(msp);
2498 } else {
2499 weight = metaslab_weight_from_spacemap(msp);
2500 }
2501
2502 /*
2503 * If the metaslab was active the last time we calculated its weight
2504 * then keep it active. We want to consume the entire region that
2505 * is associated with this weight.
2506 */
2507 if (msp->ms_activation_weight != 0 && weight != 0)
2508 WEIGHT_SET_ACTIVE(weight, WEIGHT_GET_ACTIVE(msp->ms_weight));
2509 return (weight);
2510 }
2511
2512 /*
2513 * Determine if we should attempt to allocate from this metaslab. If the
2514 * metaslab is loaded, then we can determine if the desired allocation
2515 * can be satisfied by looking at the size of the maximum free segment
2516 * on that metaslab. Otherwise, we make our decision based on the metaslab's
2517 * weight. For segment-based weighting we can determine the maximum
2518 * allocation based on the index encoded in its value. For space-based
2519 * weights we rely on the entire weight (excluding the weight-type bit).
2520 */
2521 boolean_t
2522 metaslab_should_allocate(metaslab_t *msp, uint64_t asize)
2523 {
2524 if (msp->ms_loaded) {
2525 return (msp->ms_max_size >= asize);
2526 } else {
2527 ASSERT0(msp->ms_max_size);
2528 }
2529
2530 boolean_t should_allocate;
2531 if (!WEIGHT_IS_SPACEBASED(msp->ms_weight)) {
2532 /*
2533 * The metaslab segment weight indicates segments in the
2534 * range [2^i, 2^(i+1)), where i is the index in the weight.
2535 * Since the asize might be in the middle of the range, we
2536 * should attempt the allocation if asize < 2^(i+1).
2537 */
2538 should_allocate = (asize <
2539 1ULL << (WEIGHT_GET_INDEX(msp->ms_weight) + 1));
2540 } else {
2541 should_allocate = (asize <=
2542 (msp->ms_weight & ~METASLAB_WEIGHT_TYPE));
2543 }
2544
2545 return (should_allocate);
2546 }
2547 static uint64_t
2548 metaslab_weight(metaslab_t *msp)
2549 {
2550 vdev_t *vd = msp->ms_group->mg_vd;
2551 spa_t *spa = vd->vdev_spa;
2552 uint64_t weight;
2553
2554 ASSERT(MUTEX_HELD(&msp->ms_lock));
2555
2556 /*
2557 * If this vdev is in the process of being removed, there is nothing
2558 * for us to do here.
2559 */
2560 if (vd->vdev_removing)
2561 return (0);
2562
2563 metaslab_set_fragmentation(msp);
2564
2565 /*
2566 * Update the maximum size if the metaslab is loaded. This will
2567 * ensure that we get an accurate maximum size if newly freed space
2568 * has been added back into the free tree.
2569 */
2570 if (msp->ms_loaded)
2571 msp->ms_max_size = metaslab_block_maxsize(msp);
2572 else
2573 ASSERT0(msp->ms_max_size);
2574
2575 /*
2576 * Segment-based weighting requires space map histogram support.
2577 */
2578 if (zfs_metaslab_segment_weight_enabled &&
2579 spa_feature_is_enabled(spa, SPA_FEATURE_SPACEMAP_HISTOGRAM) &&
2580 (msp->ms_sm == NULL || msp->ms_sm->sm_dbuf->db_size ==
2581 sizeof (space_map_phys_t))) {
2582 weight = metaslab_segment_weight(msp);
2583 } else {
2584 weight = metaslab_space_weight(msp);
2585 }
2586 return (weight);
2587 }
2588
2589 void
2590 metaslab_recalculate_weight_and_sort(metaslab_t *msp)
2591 {
2592 ASSERT(MUTEX_HELD(&msp->ms_lock));
2593
2594 /* note: we preserve the mask (e.g. indication of primary, etc..) */
2595 uint64_t was_active = msp->ms_weight & METASLAB_ACTIVE_MASK;
2596 metaslab_group_sort(msp->ms_group, msp,
2597 metaslab_weight(msp) | was_active);
2598 }
2599
2600 static int
2601 metaslab_activate_allocator(metaslab_group_t *mg, metaslab_t *msp,
2602 int allocator, uint64_t activation_weight)
2603 {
2604 ASSERT(MUTEX_HELD(&msp->ms_lock));
2605
2606 /*
2607 * If we're activating for the claim code, we don't want to actually
2608 * set the metaslab up for a specific allocator.
2609 */
2610 if (activation_weight == METASLAB_WEIGHT_CLAIM)
2611 return (0);
2612
2613 metaslab_t **arr = (activation_weight == METASLAB_WEIGHT_PRIMARY ?
2614 mg->mg_primaries : mg->mg_secondaries);
2615
2616 mutex_enter(&mg->mg_lock);
2617 if (arr[allocator] != NULL) {
2618 mutex_exit(&mg->mg_lock);
2619 return (EEXIST);
2620 }
2621
2622 arr[allocator] = msp;
2623 ASSERT3S(msp->ms_allocator, ==, -1);
2624 msp->ms_allocator = allocator;
2625 msp->ms_primary = (activation_weight == METASLAB_WEIGHT_PRIMARY);
2626 mutex_exit(&mg->mg_lock);
2627
2628 return (0);
2629 }
2630
2631 static int
2632 metaslab_activate(metaslab_t *msp, int allocator, uint64_t activation_weight)
2633 {
2634 ASSERT(MUTEX_HELD(&msp->ms_lock));
2635
2636 /*
2637 * The current metaslab is already activated for us so there
2638 * is nothing to do. Already activated though, doesn't mean
2639 * that this metaslab is activated for our allocator nor our
2640 * requested activation weight. The metaslab could have started
2641 * as an active one for our allocator but changed allocators
2642 * while we were waiting to grab its ms_lock or we stole it
2643 * [see find_valid_metaslab()]. This means that there is a
2644 * possibility of passivating a metaslab of another allocator
2645 * or from a different activation mask, from this thread.
2646 */
2647 if ((msp->ms_weight & METASLAB_ACTIVE_MASK) != 0) {
2648 ASSERT(msp->ms_loaded);
2649 return (0);
2650 }
2651
2652 int error = metaslab_load(msp);
2653 if (error != 0) {
2654 metaslab_group_sort(msp->ms_group, msp, 0);
2655 return (error);
2656 }
2657
2658 /*
2659 * When entering metaslab_load() we may have dropped the
2660 * ms_lock because we were loading this metaslab, or we
2661 * were waiting for another thread to load it for us. In
2662 * that scenario, we recheck the weight of the metaslab
2663 * to see if it was activated by another thread.
2664 *
2665 * If the metaslab was activated for another allocator or
2666 * it was activated with a different activation weight (e.g.
2667 * we wanted to make it a primary but it was activated as
2668 * secondary) we return error (EBUSY).
2669 *
2670 * If the metaslab was activated for the same allocator
2671 * and requested activation mask, skip activating it.
2672 */
2673 if ((msp->ms_weight & METASLAB_ACTIVE_MASK) != 0) {
2674 if (msp->ms_allocator != allocator)
2675 return (EBUSY);
2676
2677 if ((msp->ms_weight & activation_weight) == 0)
2678 return (SET_ERROR(EBUSY));
2679
2680 EQUIV((activation_weight == METASLAB_WEIGHT_PRIMARY),
2681 msp->ms_primary);
2682 return (0);
2683 }
2684
2685 /*
2686 * If the metaslab has literally 0 space, it will have weight 0. In
2687 * that case, don't bother activating it. This can happen if the
2688 * metaslab had space during find_valid_metaslab, but another thread
2689 * loaded it and used all that space while we were waiting to grab the
2690 * lock.
2691 */
2692 if (msp->ms_weight == 0) {
2693 ASSERT0(range_tree_space(msp->ms_allocatable));
2694 return (SET_ERROR(ENOSPC));
2695 }
2696
2697 if ((error = metaslab_activate_allocator(msp->ms_group, msp,
2698 allocator, activation_weight)) != 0) {
2699 return (error);
2700 }
2701
2702 ASSERT0(msp->ms_activation_weight);
2703 msp->ms_activation_weight = msp->ms_weight;
2704 metaslab_group_sort(msp->ms_group, msp,
2705 msp->ms_weight | activation_weight);
2706
2707 ASSERT(msp->ms_loaded);
2708 ASSERT(msp->ms_weight & METASLAB_ACTIVE_MASK);
2709
2710 return (0);
2711 }
2712
2713 static void
2714 metaslab_passivate_allocator(metaslab_group_t *mg, metaslab_t *msp,
2715 uint64_t weight)
2716 {
2717 ASSERT(MUTEX_HELD(&msp->ms_lock));
2718 ASSERT(msp->ms_loaded);
2719
2720 if (msp->ms_weight & METASLAB_WEIGHT_CLAIM) {
2721 metaslab_group_sort(mg, msp, weight);
2722 return;
2723 }
2724
2725 mutex_enter(&mg->mg_lock);
2726 ASSERT3P(msp->ms_group, ==, mg);
2727 ASSERT3S(0, <=, msp->ms_allocator);
2728 ASSERT3U(msp->ms_allocator, <, mg->mg_allocators);
2729
2730 if (msp->ms_primary) {
2731 ASSERT3P(mg->mg_primaries[msp->ms_allocator], ==, msp);
2732 ASSERT(msp->ms_weight & METASLAB_WEIGHT_PRIMARY);
2733 mg->mg_primaries[msp->ms_allocator] = NULL;
2734 } else {
2735 ASSERT3P(mg->mg_secondaries[msp->ms_allocator], ==, msp);
2736 ASSERT(msp->ms_weight & METASLAB_WEIGHT_SECONDARY);
2737 mg->mg_secondaries[msp->ms_allocator] = NULL;
2738 }
2739 msp->ms_allocator = -1;
2740 metaslab_group_sort_impl(mg, msp, weight);
2741 mutex_exit(&mg->mg_lock);
2742 }
2743
2744 static void
2745 metaslab_passivate(metaslab_t *msp, uint64_t weight)
2746 {
2747 ASSERTV(uint64_t size = weight & ~METASLAB_WEIGHT_TYPE);
2748
2749 /*
2750 * If size < SPA_MINBLOCKSIZE, then we will not allocate from
2751 * this metaslab again. In that case, it had better be empty,
2752 * or we would be leaving space on the table.
2753 */
2754 ASSERT(!WEIGHT_IS_SPACEBASED(msp->ms_weight) ||
2755 size >= SPA_MINBLOCKSIZE ||
2756 range_tree_space(msp->ms_allocatable) == 0);
2757 ASSERT0(weight & METASLAB_ACTIVE_MASK);
2758
2759 ASSERT(msp->ms_activation_weight != 0);
2760 msp->ms_activation_weight = 0;
2761 metaslab_passivate_allocator(msp->ms_group, msp, weight);
2762 ASSERT0(msp->ms_weight & METASLAB_ACTIVE_MASK);
2763 }
2764
2765 /*
2766 * Segment-based metaslabs are activated once and remain active until
2767 * we either fail an allocation attempt (similar to space-based metaslabs)
2768 * or have exhausted the free space in zfs_metaslab_switch_threshold
2769 * buckets since the metaslab was activated. This function checks to see
2770 * if we've exhaused the zfs_metaslab_switch_threshold buckets in the
2771 * metaslab and passivates it proactively. This will allow us to select a
2772 * metaslab with a larger contiguous region, if any, remaining within this
2773 * metaslab group. If we're in sync pass > 1, then we continue using this
2774 * metaslab so that we don't dirty more block and cause more sync passes.
2775 */
2776 void
2777 metaslab_segment_may_passivate(metaslab_t *msp)
2778 {
2779 spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
2780
2781 if (WEIGHT_IS_SPACEBASED(msp->ms_weight) || spa_sync_pass(spa) > 1)
2782 return;
2783
2784 /*
2785 * Since we are in the middle of a sync pass, the most accurate
2786 * information that is accessible to us is the in-core range tree
2787 * histogram; calculate the new weight based on that information.
2788 */
2789 uint64_t weight = metaslab_weight_from_range_tree(msp);
2790 int activation_idx = WEIGHT_GET_INDEX(msp->ms_activation_weight);
2791 int current_idx = WEIGHT_GET_INDEX(weight);
2792
2793 if (current_idx <= activation_idx - zfs_metaslab_switch_threshold)
2794 metaslab_passivate(msp, weight);
2795 }
2796
2797 static void
2798 metaslab_preload(void *arg)
2799 {
2800 metaslab_t *msp = arg;
2801 spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
2802 fstrans_cookie_t cookie = spl_fstrans_mark();
2803
2804 ASSERT(!MUTEX_HELD(&msp->ms_group->mg_lock));
2805
2806 mutex_enter(&msp->ms_lock);
2807 (void) metaslab_load(msp);
2808 msp->ms_selected_txg = spa_syncing_txg(spa);
2809 mutex_exit(&msp->ms_lock);
2810 spl_fstrans_unmark(cookie);
2811 }
2812
2813 static void
2814 metaslab_group_preload(metaslab_group_t *mg)
2815 {
2816 spa_t *spa = mg->mg_vd->vdev_spa;
2817 metaslab_t *msp;
2818 avl_tree_t *t = &mg->mg_metaslab_tree;
2819 int m = 0;
2820
2821 if (spa_shutting_down(spa) || !metaslab_preload_enabled) {
2822 taskq_wait_outstanding(mg->mg_taskq, 0);
2823 return;
2824 }
2825
2826 mutex_enter(&mg->mg_lock);
2827
2828 /*
2829 * Load the next potential metaslabs
2830 */
2831 for (msp = avl_first(t); msp != NULL; msp = AVL_NEXT(t, msp)) {
2832 ASSERT3P(msp->ms_group, ==, mg);
2833
2834 /*
2835 * We preload only the maximum number of metaslabs specified
2836 * by metaslab_preload_limit. If a metaslab is being forced
2837 * to condense then we preload it too. This will ensure
2838 * that force condensing happens in the next txg.
2839 */
2840 if (++m > metaslab_preload_limit && !msp->ms_condense_wanted) {
2841 continue;
2842 }
2843
2844 VERIFY(taskq_dispatch(mg->mg_taskq, metaslab_preload,
2845 msp, TQ_SLEEP) != TASKQID_INVALID);
2846 }
2847 mutex_exit(&mg->mg_lock);
2848 }
2849
2850 /*
2851 * Determine if the space map's on-disk footprint is past our tolerance for
2852 * inefficiency. We would like to use the following criteria to make our
2853 * decision:
2854 *
2855 * 1. Do not condense if the size of the space map object would dramatically
2856 * increase as a result of writing out the free space range tree.
2857 *
2858 * 2. Condense if the on on-disk space map representation is at least
2859 * zfs_condense_pct/100 times the size of the optimal representation
2860 * (i.e. zfs_condense_pct = 110 and in-core = 1MB, optimal = 1.1MB).
2861 *
2862 * 3. Do not condense if the on-disk size of the space map does not actually
2863 * decrease.
2864 *
2865 * Unfortunately, we cannot compute the on-disk size of the space map in this
2866 * context because we cannot accurately compute the effects of compression, etc.
2867 * Instead, we apply the heuristic described in the block comment for
2868 * zfs_metaslab_condense_block_threshold - we only condense if the space used
2869 * is greater than a threshold number of blocks.
2870 */
2871 static boolean_t
2872 metaslab_should_condense(metaslab_t *msp)
2873 {
2874 space_map_t *sm = msp->ms_sm;
2875 vdev_t *vd = msp->ms_group->mg_vd;
2876 uint64_t vdev_blocksize = 1 << vd->vdev_ashift;
2877
2878 ASSERT(MUTEX_HELD(&msp->ms_lock));
2879 ASSERT(msp->ms_loaded);
2880 ASSERT(sm != NULL);
2881 ASSERT3U(spa_sync_pass(vd->vdev_spa), ==, 1);
2882
2883 /*
2884 * We always condense metaslabs that are empty and metaslabs for
2885 * which a condense request has been made.
2886 */
2887 if (avl_is_empty(&msp->ms_allocatable_by_size) ||
2888 msp->ms_condense_wanted)
2889 return (B_TRUE);
2890
2891 uint64_t record_size = MAX(sm->sm_blksz, vdev_blocksize);
2892 uint64_t object_size = space_map_length(sm);
2893 uint64_t optimal_size = space_map_estimate_optimal_size(sm,
2894 msp->ms_allocatable, SM_NO_VDEVID);
2895
2896 return (object_size >= (optimal_size * zfs_condense_pct / 100) &&
2897 object_size > zfs_metaslab_condense_block_threshold * record_size);
2898 }
2899
2900 /*
2901 * Condense the on-disk space map representation to its minimized form.
2902 * The minimized form consists of a small number of allocations followed
2903 * by the entries of the free range tree (ms_allocatable). The condensed
2904 * spacemap contains all the entries of previous TXGs (including those in
2905 * the pool-wide log spacemaps; thus this is effectively a superset of
2906 * metaslab_flush()), but this TXG's entries still need to be written.
2907 */
2908 static void
2909 metaslab_condense(metaslab_t *msp, dmu_tx_t *tx)
2910 {
2911 range_tree_t *condense_tree;
2912 space_map_t *sm = msp->ms_sm;
2913 uint64_t txg = dmu_tx_get_txg(tx);
2914 spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
2915
2916 ASSERT(MUTEX_HELD(&msp->ms_lock));
2917 ASSERT(msp->ms_loaded);
2918 ASSERT(msp->ms_sm != NULL);
2919
2920 /*
2921 * In order to condense the space map, we need to change it so it
2922 * only describes which segments are currently allocated and free.
2923 *
2924 * All the current free space resides in the ms_allocatable, all
2925 * the ms_defer trees, and all the ms_allocating trees. We ignore
2926 * ms_freed because it is empty because we're in sync pass 1. We
2927 * ignore ms_freeing because these changes are not yet reflected
2928 * in the spacemap (they will be written later this txg).
2929 *
2930 * So to truncate the space map to represent all the entries of
2931 * previous TXGs we do the following:
2932 *
2933 * 1] We create a range tree (condense tree) that is 100% allocated.
2934 * 2] We remove from it all segments found in the ms_defer trees
2935 * as those segments are marked as free in the original space
2936 * map. We do the same with the ms_allocating trees for the same
2937 * reason. Removing these segments should be a relatively
2938 * inexpensive operation since we expect these trees to have a
2939 * small number of nodes.
2940 * 3] We vacate any unflushed allocs as they should already exist
2941 * in the condense tree. Then we vacate any unflushed frees as
2942 * they should already be part of ms_allocatable.
2943 * 4] At this point, we would ideally like to remove all segments
2944 * in the ms_allocatable tree from the condense tree. This way
2945 * we would write all the entries of the condense tree as the
2946 * condensed space map, which would only contain allocated
2947 * segments with everything else assumed to be freed.
2948 *
2949 * Doing so can be prohibitively expensive as ms_allocatable can
2950 * be large, and therefore computationally expensive to subtract
2951 * from the condense_tree. Instead we first sync out the
2952 * condense_tree and then the ms_allocatable, in the condensed
2953 * space map. While this is not optimal, it is typically close to
2954 * optimal and more importantly much cheaper to compute.
2955 *
2956 * 5] Finally, as both of the unflushed trees were written to our
2957 * new and condensed metaslab space map, we basically flushed
2958 * all the unflushed changes to disk, thus we call
2959 * metaslab_flush_update().
2960 */
2961 ASSERT3U(spa_sync_pass(spa), ==, 1);
2962 ASSERT(range_tree_is_empty(msp->ms_freed)); /* since it is pass 1 */
2963
2964 zfs_dbgmsg("condensing: txg %llu, msp[%llu] %px, vdev id %llu, "
2965 "spa %s, smp size %llu, segments %lu, forcing condense=%s", txg,
2966 msp->ms_id, msp, msp->ms_group->mg_vd->vdev_id,
2967 spa->spa_name, space_map_length(msp->ms_sm),
2968 avl_numnodes(&msp->ms_allocatable->rt_root),
2969 msp->ms_condense_wanted ? "TRUE" : "FALSE");
2970
2971 msp->ms_condense_wanted = B_FALSE;
2972
2973 condense_tree = range_tree_create(NULL, NULL);
2974 range_tree_add(condense_tree, msp->ms_start, msp->ms_size);
2975
2976 for (int t = 0; t < TXG_DEFER_SIZE; t++) {
2977 range_tree_walk(msp->ms_defer[t],
2978 range_tree_remove, condense_tree);
2979 }
2980
2981 for (int t = 0; t < TXG_CONCURRENT_STATES; t++) {
2982 range_tree_walk(msp->ms_allocating[(txg + t) & TXG_MASK],
2983 range_tree_remove, condense_tree);
2984 }
2985
2986 ASSERT3U(spa->spa_unflushed_stats.sus_memused, >=,
2987 metaslab_unflushed_changes_memused(msp));
2988 spa->spa_unflushed_stats.sus_memused -=
2989 metaslab_unflushed_changes_memused(msp);
2990 range_tree_vacate(msp->ms_unflushed_allocs, NULL, NULL);
2991 range_tree_vacate(msp->ms_unflushed_frees, NULL, NULL);
2992
2993 /*
2994 * We're about to drop the metaslab's lock thus allowing other
2995 * consumers to change it's content. Set the metaslab's ms_condensing
2996 * flag to ensure that allocations on this metaslab do not occur
2997 * while we're in the middle of committing it to disk. This is only
2998 * critical for ms_allocatable as all other range trees use per TXG
2999 * views of their content.
3000 */
3001 msp->ms_condensing = B_TRUE;
3002
3003 mutex_exit(&msp->ms_lock);
3004 uint64_t object = space_map_object(msp->ms_sm);
3005 space_map_truncate(sm,
3006 spa_feature_is_enabled(spa, SPA_FEATURE_LOG_SPACEMAP) ?
3007 zfs_metaslab_sm_blksz_with_log : zfs_metaslab_sm_blksz_no_log, tx);
3008
3009 /*
3010 * space_map_truncate() may have reallocated the spacemap object.
3011 * If so, update the vdev_ms_array.
3012 */
3013 if (space_map_object(msp->ms_sm) != object) {
3014 object = space_map_object(msp->ms_sm);
3015 dmu_write(spa->spa_meta_objset,
3016 msp->ms_group->mg_vd->vdev_ms_array, sizeof (uint64_t) *
3017 msp->ms_id, sizeof (uint64_t), &object, tx);
3018 }
3019
3020 /*
3021 * Note:
3022 * When the log space map feature is enabled, each space map will
3023 * always have ALLOCS followed by FREES for each sync pass. This is
3024 * typically true even when the log space map feature is disabled,
3025 * except from the case where a metaslab goes through metaslab_sync()
3026 * and gets condensed. In that case the metaslab's space map will have
3027 * ALLOCS followed by FREES (due to condensing) followed by ALLOCS
3028 * followed by FREES (due to space_map_write() in metaslab_sync()) for
3029 * sync pass 1.
3030 */
3031 space_map_write(sm, condense_tree, SM_ALLOC, SM_NO_VDEVID, tx);
3032 space_map_write(sm, msp->ms_allocatable, SM_FREE, SM_NO_VDEVID, tx);
3033
3034 range_tree_vacate(condense_tree, NULL, NULL);
3035 range_tree_destroy(condense_tree);
3036 mutex_enter(&msp->ms_lock);
3037
3038 msp->ms_condensing = B_FALSE;
3039 metaslab_flush_update(msp, tx);
3040 }
3041
3042 /*
3043 * Called when the metaslab has been flushed (its own spacemap now reflects
3044 * all the contents of the pool-wide spacemap log). Updates the metaslab's
3045 * metadata and any pool-wide related log space map data (e.g. summary,
3046 * obsolete logs, etc..) to reflect that.
3047 */
3048 static void
3049 metaslab_flush_update(metaslab_t *msp, dmu_tx_t *tx)
3050 {
3051 metaslab_group_t *mg = msp->ms_group;
3052 spa_t *spa = mg->mg_vd->vdev_spa;
3053
3054 ASSERT(MUTEX_HELD(&msp->ms_lock));
3055
3056 ASSERT3U(spa_sync_pass(spa), ==, 1);
3057 ASSERT(range_tree_is_empty(msp->ms_unflushed_allocs));
3058 ASSERT(range_tree_is_empty(msp->ms_unflushed_frees));
3059
3060 /*
3061 * Just because a metaslab got flushed, that doesn't mean that
3062 * it will pass through metaslab_sync_done(). Thus, make sure to
3063 * update ms_synced_length here in case it doesn't.
3064 */
3065 msp->ms_synced_length = space_map_length(msp->ms_sm);
3066
3067 /*
3068 * We may end up here from metaslab_condense() without the
3069 * feature being active. In that case this is a no-op.
3070 */
3071 if (!spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP))
3072 return;
3073
3074 ASSERT(spa_syncing_log_sm(spa) != NULL);
3075 ASSERT(msp->ms_sm != NULL);
3076 ASSERT(metaslab_unflushed_txg(msp) != 0);
3077 ASSERT3P(avl_find(&spa->spa_metaslabs_by_flushed, msp, NULL), ==, msp);
3078
3079 VERIFY3U(tx->tx_txg, <=, spa_final_dirty_txg(spa));
3080
3081 /* update metaslab's position in our flushing tree */
3082 uint64_t ms_prev_flushed_txg = metaslab_unflushed_txg(msp);
3083 mutex_enter(&spa->spa_flushed_ms_lock);
3084 avl_remove(&spa->spa_metaslabs_by_flushed, msp);
3085 metaslab_set_unflushed_txg(msp, spa_syncing_txg(spa), tx);
3086 avl_add(&spa->spa_metaslabs_by_flushed, msp);
3087 mutex_exit(&spa->spa_flushed_ms_lock);
3088
3089 /* update metaslab counts of spa_log_sm_t nodes */
3090 spa_log_sm_decrement_mscount(spa, ms_prev_flushed_txg);
3091 spa_log_sm_increment_current_mscount(spa);
3092
3093 /* cleanup obsolete logs if any */
3094 uint64_t log_blocks_before = spa_log_sm_nblocks(spa);
3095 spa_cleanup_old_sm_logs(spa, tx);
3096 uint64_t log_blocks_after = spa_log_sm_nblocks(spa);
3097 VERIFY3U(log_blocks_after, <=, log_blocks_before);
3098
3099 /* update log space map summary */
3100 uint64_t blocks_gone = log_blocks_before - log_blocks_after;
3101 spa_log_summary_add_flushed_metaslab(spa);
3102 spa_log_summary_decrement_mscount(spa, ms_prev_flushed_txg);
3103 spa_log_summary_decrement_blkcount(spa, blocks_gone);
3104 }
3105
3106 boolean_t
3107 metaslab_flush(metaslab_t *msp, dmu_tx_t *tx)
3108 {
3109 spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
3110
3111 ASSERT(MUTEX_HELD(&msp->ms_lock));
3112 ASSERT3U(spa_sync_pass(spa), ==, 1);
3113 ASSERT(spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP));
3114
3115 ASSERT(msp->ms_sm != NULL);
3116 ASSERT(metaslab_unflushed_txg(msp) != 0);
3117 ASSERT(avl_find(&spa->spa_metaslabs_by_flushed, msp, NULL) != NULL);
3118
3119 /*
3120 * There is nothing wrong with flushing the same metaslab twice, as
3121 * this codepath should work on that case. However, the current
3122 * flushing scheme makes sure to avoid this situation as we would be
3123 * making all these calls without having anything meaningful to write
3124 * to disk. We assert this behavior here.
3125 */
3126 ASSERT3U(metaslab_unflushed_txg(msp), <, dmu_tx_get_txg(tx));
3127
3128 /*
3129 * We can not flush while loading, because then we would
3130 * not load the ms_unflushed_{allocs,frees}.
3131 */
3132 if (msp->ms_loading)
3133 return (B_FALSE);
3134
3135 metaslab_verify_space(msp, dmu_tx_get_txg(tx));
3136 metaslab_verify_weight_and_frag(msp);
3137
3138 /*
3139 * Metaslab condensing is effectively flushing. Therefore if the
3140 * metaslab can be condensed we can just condense it instead of
3141 * flushing it.
3142 *
3143 * Note that metaslab_condense() does call metaslab_flush_update()
3144 * so we can just return immediately after condensing. We also
3145 * don't need to care about setting ms_flushing or broadcasting
3146 * ms_flush_cv, even if we temporarily drop the ms_lock in
3147 * metaslab_condense(), as the metaslab is already loaded.
3148 */
3149 if (msp->ms_loaded && metaslab_should_condense(msp)) {
3150 metaslab_group_t *mg = msp->ms_group;
3151
3152 /*
3153 * For all histogram operations below refer to the
3154 * comments of metaslab_sync() where we follow a
3155 * similar procedure.
3156 */
3157 metaslab_group_histogram_verify(mg);
3158 metaslab_class_histogram_verify(mg->mg_class);
3159 metaslab_group_histogram_remove(mg, msp);
3160
3161 metaslab_condense(msp, tx);
3162
3163 space_map_histogram_clear(msp->ms_sm);
3164 space_map_histogram_add(msp->ms_sm, msp->ms_allocatable, tx);
3165 ASSERT(range_tree_is_empty(msp->ms_freed));
3166 for (int t = 0; t < TXG_DEFER_SIZE; t++) {
3167 space_map_histogram_add(msp->ms_sm,
3168 msp->ms_defer[t], tx);
3169 }
3170 metaslab_aux_histograms_update(msp);
3171
3172 metaslab_group_histogram_add(mg, msp);
3173 metaslab_group_histogram_verify(mg);
3174 metaslab_class_histogram_verify(mg->mg_class);
3175
3176 metaslab_verify_space(msp, dmu_tx_get_txg(tx));
3177
3178 /*
3179 * Since we recreated the histogram (and potentially
3180 * the ms_sm too while condensing) ensure that the
3181 * weight is updated too because we are not guaranteed
3182 * that this metaslab is dirty and will go through
3183 * metaslab_sync_done().
3184 */
3185 metaslab_recalculate_weight_and_sort(msp);
3186 return (B_TRUE);
3187 }
3188
3189 msp->ms_flushing = B_TRUE;
3190 uint64_t sm_len_before = space_map_length(msp->ms_sm);
3191
3192 mutex_exit(&msp->ms_lock);
3193 space_map_write(msp->ms_sm, msp->ms_unflushed_allocs, SM_ALLOC,
3194 SM_NO_VDEVID, tx);
3195 space_map_write(msp->ms_sm, msp->ms_unflushed_frees, SM_FREE,
3196 SM_NO_VDEVID, tx);
3197 mutex_enter(&msp->ms_lock);
3198
3199 uint64_t sm_len_after = space_map_length(msp->ms_sm);
3200 if (zfs_flags & ZFS_DEBUG_LOG_SPACEMAP) {
3201 zfs_dbgmsg("flushing: txg %llu, spa %s, vdev_id %llu, "
3202 "ms_id %llu, unflushed_allocs %llu, unflushed_frees %llu, "
3203 "appended %llu bytes", dmu_tx_get_txg(tx), spa_name(spa),
3204 msp->ms_group->mg_vd->vdev_id, msp->ms_id,
3205 range_tree_space(msp->ms_unflushed_allocs),
3206 range_tree_space(msp->ms_unflushed_frees),
3207 (sm_len_after - sm_len_before));
3208 }
3209
3210 ASSERT3U(spa->spa_unflushed_stats.sus_memused, >=,
3211 metaslab_unflushed_changes_memused(msp));
3212 spa->spa_unflushed_stats.sus_memused -=
3213 metaslab_unflushed_changes_memused(msp);
3214 range_tree_vacate(msp->ms_unflushed_allocs, NULL, NULL);
3215 range_tree_vacate(msp->ms_unflushed_frees, NULL, NULL);
3216
3217 metaslab_verify_space(msp, dmu_tx_get_txg(tx));
3218 metaslab_verify_weight_and_frag(msp);
3219
3220 metaslab_flush_update(msp, tx);
3221
3222 metaslab_verify_space(msp, dmu_tx_get_txg(tx));
3223 metaslab_verify_weight_and_frag(msp);
3224
3225 msp->ms_flushing = B_FALSE;
3226 cv_broadcast(&msp->ms_flush_cv);
3227 return (B_TRUE);
3228 }
3229
3230 /*
3231 * Write a metaslab to disk in the context of the specified transaction group.
3232 */
3233 void
3234 metaslab_sync(metaslab_t *msp, uint64_t txg)
3235 {
3236 metaslab_group_t *mg = msp->ms_group;
3237 vdev_t *vd = mg->mg_vd;
3238 spa_t *spa = vd->vdev_spa;
3239 objset_t *mos = spa_meta_objset(spa);
3240 range_tree_t *alloctree = msp->ms_allocating[txg & TXG_MASK];
3241 dmu_tx_t *tx;
3242
3243 ASSERT(!vd->vdev_ishole);
3244
3245 /*
3246 * This metaslab has just been added so there's no work to do now.
3247 */
3248 if (msp->ms_freeing == NULL) {
3249 ASSERT3P(alloctree, ==, NULL);
3250 return;
3251 }
3252
3253 ASSERT3P(alloctree, !=, NULL);
3254 ASSERT3P(msp->ms_freeing, !=, NULL);
3255 ASSERT3P(msp->ms_freed, !=, NULL);
3256 ASSERT3P(msp->ms_checkpointing, !=, NULL);
3257 ASSERT3P(msp->ms_trim, !=, NULL);
3258
3259 /*
3260 * Normally, we don't want to process a metaslab if there are no
3261 * allocations or frees to perform. However, if the metaslab is being
3262 * forced to condense and it's loaded, we need to let it through.
3263 */
3264 if (range_tree_is_empty(alloctree) &&
3265 range_tree_is_empty(msp->ms_freeing) &&
3266 range_tree_is_empty(msp->ms_checkpointing) &&
3267 !(msp->ms_loaded && msp->ms_condense_wanted))
3268 return;
3269
3270
3271 VERIFY(txg <= spa_final_dirty_txg(spa));
3272
3273 /*
3274 * The only state that can actually be changing concurrently
3275 * with metaslab_sync() is the metaslab's ms_allocatable. No
3276 * other thread can be modifying this txg's alloc, freeing,
3277 * freed, or space_map_phys_t. We drop ms_lock whenever we
3278 * could call into the DMU, because the DMU can call down to
3279 * us (e.g. via zio_free()) at any time.
3280 *
3281 * The spa_vdev_remove_thread() can be reading metaslab state
3282 * concurrently, and it is locked out by the ms_sync_lock.
3283 * Note that the ms_lock is insufficient for this, because it
3284 * is dropped by space_map_write().
3285 */
3286 tx = dmu_tx_create_assigned(spa_get_dsl(spa), txg);
3287
3288 /*
3289 * Generate a log space map if one doesn't exist already.
3290 */
3291 spa_generate_syncing_log_sm(spa, tx);
3292
3293 if (msp->ms_sm == NULL) {
3294 uint64_t new_object = space_map_alloc(mos,
3295 spa_feature_is_enabled(spa, SPA_FEATURE_LOG_SPACEMAP) ?
3296 zfs_metaslab_sm_blksz_with_log :
3297 zfs_metaslab_sm_blksz_no_log, tx);
3298 VERIFY3U(new_object, !=, 0);
3299
3300 dmu_write(mos, vd->vdev_ms_array, sizeof (uint64_t) *
3301 msp->ms_id, sizeof (uint64_t), &new_object, tx);
3302
3303 VERIFY0(space_map_open(&msp->ms_sm, mos, new_object,
3304 msp->ms_start, msp->ms_size, vd->vdev_ashift));
3305 ASSERT(msp->ms_sm != NULL);
3306
3307 ASSERT(range_tree_is_empty(msp->ms_unflushed_allocs));
3308 ASSERT(range_tree_is_empty(msp->ms_unflushed_frees));
3309 ASSERT0(metaslab_allocated_space(msp));
3310 }
3311
3312 if (metaslab_unflushed_txg(msp) == 0 &&
3313 spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP)) {
3314 ASSERT(spa_syncing_log_sm(spa) != NULL);
3315
3316 metaslab_set_unflushed_txg(msp, spa_syncing_txg(spa), tx);
3317 spa_log_sm_increment_current_mscount(spa);
3318 spa_log_summary_add_flushed_metaslab(spa);
3319
3320 ASSERT(msp->ms_sm != NULL);
3321 mutex_enter(&spa->spa_flushed_ms_lock);
3322 avl_add(&spa->spa_metaslabs_by_flushed, msp);
3323 mutex_exit(&spa->spa_flushed_ms_lock);
3324
3325 ASSERT(range_tree_is_empty(msp->ms_unflushed_allocs));
3326 ASSERT(range_tree_is_empty(msp->ms_unflushed_frees));
3327 }
3328
3329 if (!range_tree_is_empty(msp->ms_checkpointing) &&
3330 vd->vdev_checkpoint_sm == NULL) {
3331 ASSERT(spa_has_checkpoint(spa));
3332
3333 uint64_t new_object = space_map_alloc(mos,
3334 zfs_vdev_standard_sm_blksz, tx);
3335 VERIFY3U(new_object, !=, 0);
3336
3337 VERIFY0(space_map_open(&vd->vdev_checkpoint_sm,
3338 mos, new_object, 0, vd->vdev_asize, vd->vdev_ashift));
3339 ASSERT3P(vd->vdev_checkpoint_sm, !=, NULL);
3340
3341 /*
3342 * We save the space map object as an entry in vdev_top_zap
3343 * so it can be retrieved when the pool is reopened after an
3344 * export or through zdb.
3345 */
3346 VERIFY0(zap_add(vd->vdev_spa->spa_meta_objset,
3347 vd->vdev_top_zap, VDEV_TOP_ZAP_POOL_CHECKPOINT_SM,
3348 sizeof (new_object), 1, &new_object, tx));
3349 }
3350
3351 mutex_enter(&msp->ms_sync_lock);
3352 mutex_enter(&msp->ms_lock);
3353
3354 /*
3355 * Note: metaslab_condense() clears the space map's histogram.
3356 * Therefore we must verify and remove this histogram before
3357 * condensing.
3358 */
3359 metaslab_group_histogram_verify(mg);
3360 metaslab_class_histogram_verify(mg->mg_class);
3361 metaslab_group_histogram_remove(mg, msp);
3362
3363 if (spa->spa_sync_pass == 1 && msp->ms_loaded &&
3364 metaslab_should_condense(msp))
3365 metaslab_condense(msp, tx);
3366
3367 /*
3368 * We'll be going to disk to sync our space accounting, thus we
3369 * drop the ms_lock during that time so allocations coming from
3370 * open-context (ZIL) for future TXGs do not block.
3371 */
3372 mutex_exit(&msp->ms_lock);
3373 space_map_t *log_sm = spa_syncing_log_sm(spa);
3374 if (log_sm != NULL) {
3375 ASSERT(spa_feature_is_enabled(spa, SPA_FEATURE_LOG_SPACEMAP));
3376
3377 space_map_write(log_sm, alloctree, SM_ALLOC,
3378 vd->vdev_id, tx);
3379 space_map_write(log_sm, msp->ms_freeing, SM_FREE,
3380 vd->vdev_id, tx);
3381 mutex_enter(&msp->ms_lock);
3382
3383 ASSERT3U(spa->spa_unflushed_stats.sus_memused, >=,
3384 metaslab_unflushed_changes_memused(msp));
3385 spa->spa_unflushed_stats.sus_memused -=
3386 metaslab_unflushed_changes_memused(msp);
3387 range_tree_remove_xor_add(alloctree,
3388 msp->ms_unflushed_frees, msp->ms_unflushed_allocs);
3389 range_tree_remove_xor_add(msp->ms_freeing,
3390 msp->ms_unflushed_allocs, msp->ms_unflushed_frees);
3391 spa->spa_unflushed_stats.sus_memused +=
3392 metaslab_unflushed_changes_memused(msp);
3393 } else {
3394 ASSERT(!spa_feature_is_enabled(spa, SPA_FEATURE_LOG_SPACEMAP));
3395
3396 space_map_write(msp->ms_sm, alloctree, SM_ALLOC,
3397 SM_NO_VDEVID, tx);
3398 space_map_write(msp->ms_sm, msp->ms_freeing, SM_FREE,
3399 SM_NO_VDEVID, tx);
3400 mutex_enter(&msp->ms_lock);
3401 }
3402
3403 msp->ms_allocated_space += range_tree_space(alloctree);
3404 ASSERT3U(msp->ms_allocated_space, >=,
3405 range_tree_space(msp->ms_freeing));
3406 msp->ms_allocated_space -= range_tree_space(msp->ms_freeing);
3407
3408 if (!range_tree_is_empty(msp->ms_checkpointing)) {
3409 ASSERT(spa_has_checkpoint(spa));
3410 ASSERT3P(vd->vdev_checkpoint_sm, !=, NULL);
3411
3412 /*
3413 * Since we are doing writes to disk and the ms_checkpointing
3414 * tree won't be changing during that time, we drop the
3415 * ms_lock while writing to the checkpoint space map, for the
3416 * same reason mentioned above.
3417 */
3418 mutex_exit(&msp->ms_lock);
3419 space_map_write(vd->vdev_checkpoint_sm,
3420 msp->ms_checkpointing, SM_FREE, SM_NO_VDEVID, tx);
3421 mutex_enter(&msp->ms_lock);
3422
3423 spa->spa_checkpoint_info.sci_dspace +=
3424 range_tree_space(msp->ms_checkpointing);
3425 vd->vdev_stat.vs_checkpoint_space +=
3426 range_tree_space(msp->ms_checkpointing);
3427 ASSERT3U(vd->vdev_stat.vs_checkpoint_space, ==,
3428 -space_map_allocated(vd->vdev_checkpoint_sm));
3429
3430 range_tree_vacate(msp->ms_checkpointing, NULL, NULL);
3431 }
3432
3433 if (msp->ms_loaded) {
3434 /*
3435 * When the space map is loaded, we have an accurate
3436 * histogram in the range tree. This gives us an opportunity
3437 * to bring the space map's histogram up-to-date so we clear
3438 * it first before updating it.
3439 */
3440 space_map_histogram_clear(msp->ms_sm);
3441 space_map_histogram_add(msp->ms_sm, msp->ms_allocatable, tx);
3442
3443 /*
3444 * Since we've cleared the histogram we need to add back
3445 * any free space that has already been processed, plus
3446 * any deferred space. This allows the on-disk histogram
3447 * to accurately reflect all free space even if some space
3448 * is not yet available for allocation (i.e. deferred).
3449 */
3450 space_map_histogram_add(msp->ms_sm, msp->ms_freed, tx);
3451
3452 /*
3453 * Add back any deferred free space that has not been
3454 * added back into the in-core free tree yet. This will
3455 * ensure that we don't end up with a space map histogram
3456 * that is completely empty unless the metaslab is fully
3457 * allocated.
3458 */
3459 for (int t = 0; t < TXG_DEFER_SIZE; t++) {
3460 space_map_histogram_add(msp->ms_sm,
3461 msp->ms_defer[t], tx);
3462 }
3463 }
3464
3465 /*
3466 * Always add the free space from this sync pass to the space
3467 * map histogram. We want to make sure that the on-disk histogram
3468 * accounts for all free space. If the space map is not loaded,
3469 * then we will lose some accuracy but will correct it the next
3470 * time we load the space map.
3471 */
3472 space_map_histogram_add(msp->ms_sm, msp->ms_freeing, tx);
3473 metaslab_aux_histograms_update(msp);
3474
3475 metaslab_group_histogram_add(mg, msp);
3476 metaslab_group_histogram_verify(mg);
3477 metaslab_class_histogram_verify(mg->mg_class);
3478
3479 /*
3480 * For sync pass 1, we avoid traversing this txg's free range tree
3481 * and instead will just swap the pointers for freeing and freed.
3482 * We can safely do this since the freed_tree is guaranteed to be
3483 * empty on the initial pass.
3484 *
3485 * Keep in mind that even if we are currently using a log spacemap
3486 * we want current frees to end up in the ms_allocatable (but not
3487 * get appended to the ms_sm) so their ranges can be reused as usual.
3488 */
3489 if (spa_sync_pass(spa) == 1) {
3490 range_tree_swap(&msp->ms_freeing, &msp->ms_freed);
3491 ASSERT0(msp->ms_allocated_this_txg);
3492 } else {
3493 range_tree_vacate(msp->ms_freeing,
3494 range_tree_add, msp->ms_freed);
3495 }
3496 msp->ms_allocated_this_txg += range_tree_space(alloctree);
3497 range_tree_vacate(alloctree, NULL, NULL);
3498
3499 ASSERT0(range_tree_space(msp->ms_allocating[txg & TXG_MASK]));
3500 ASSERT0(range_tree_space(msp->ms_allocating[TXG_CLEAN(txg)
3501 & TXG_MASK]));
3502 ASSERT0(range_tree_space(msp->ms_freeing));
3503 ASSERT0(range_tree_space(msp->ms_checkpointing));
3504
3505 mutex_exit(&msp->ms_lock);
3506
3507 /*
3508 * Verify that the space map object ID has been recorded in the
3509 * vdev_ms_array.
3510 */
3511 uint64_t object;
3512 VERIFY0(dmu_read(mos, vd->vdev_ms_array,
3513 msp->ms_id * sizeof (uint64_t), sizeof (uint64_t), &object, 0));
3514 VERIFY3U(object, ==, space_map_object(msp->ms_sm));
3515
3516 mutex_exit(&msp->ms_sync_lock);
3517 dmu_tx_commit(tx);
3518 }
3519
3520 void
3521 metaslab_potentially_unload(metaslab_t *msp, uint64_t txg)
3522 {
3523 /*
3524 * If the metaslab is loaded and we've not tried to load or allocate
3525 * from it in 'metaslab_unload_delay' txgs, then unload it.
3526 */
3527 if (msp->ms_loaded &&
3528 msp->ms_disabled == 0 &&
3529 msp->ms_selected_txg + metaslab_unload_delay < txg) {
3530 for (int t = 1; t < TXG_CONCURRENT_STATES; t++) {
3531 VERIFY0(range_tree_space(
3532 msp->ms_allocating[(txg + t) & TXG_MASK]));
3533 }
3534 if (msp->ms_allocator != -1) {
3535 metaslab_passivate(msp, msp->ms_weight &
3536 ~METASLAB_ACTIVE_MASK);
3537 }
3538
3539 if (!metaslab_debug_unload)
3540 metaslab_unload(msp);
3541 }
3542 }
3543
3544 /*
3545 * Called after a transaction group has completely synced to mark
3546 * all of the metaslab's free space as usable.
3547 */
3548 void
3549 metaslab_sync_done(metaslab_t *msp, uint64_t txg)
3550 {
3551 metaslab_group_t *mg = msp->ms_group;
3552 vdev_t *vd = mg->mg_vd;
3553 spa_t *spa = vd->vdev_spa;
3554 range_tree_t **defer_tree;
3555 int64_t alloc_delta, defer_delta;
3556 boolean_t defer_allowed = B_TRUE;
3557
3558 ASSERT(!vd->vdev_ishole);
3559
3560 mutex_enter(&msp->ms_lock);
3561
3562 /*
3563 * If this metaslab is just becoming available, initialize its
3564 * range trees and add its capacity to the vdev.
3565 */
3566 if (msp->ms_freed == NULL) {
3567 for (int t = 0; t < TXG_SIZE; t++) {
3568 ASSERT(msp->ms_allocating[t] == NULL);
3569
3570 msp->ms_allocating[t] = range_tree_create(NULL, NULL);
3571 }
3572
3573 ASSERT3P(msp->ms_freeing, ==, NULL);
3574 msp->ms_freeing = range_tree_create(NULL, NULL);
3575
3576 ASSERT3P(msp->ms_freed, ==, NULL);
3577 msp->ms_freed = range_tree_create(NULL, NULL);
3578
3579 for (int t = 0; t < TXG_DEFER_SIZE; t++) {
3580 ASSERT3P(msp->ms_defer[t], ==, NULL);
3581 msp->ms_defer[t] = range_tree_create(NULL, NULL);
3582 }
3583
3584 ASSERT3P(msp->ms_checkpointing, ==, NULL);
3585 msp->ms_checkpointing = range_tree_create(NULL, NULL);
3586
3587 ASSERT3P(msp->ms_unflushed_allocs, ==, NULL);
3588 msp->ms_unflushed_allocs = range_tree_create(NULL, NULL);
3589 ASSERT3P(msp->ms_unflushed_frees, ==, NULL);
3590 msp->ms_unflushed_frees = range_tree_create(NULL, NULL);
3591
3592 metaslab_space_update(vd, mg->mg_class, 0, 0, msp->ms_size);
3593 }
3594 ASSERT0(range_tree_space(msp->ms_freeing));
3595 ASSERT0(range_tree_space(msp->ms_checkpointing));
3596
3597 defer_tree = &msp->ms_defer[txg % TXG_DEFER_SIZE];
3598
3599 uint64_t free_space = metaslab_class_get_space(spa_normal_class(spa)) -
3600 metaslab_class_get_alloc(spa_normal_class(spa));
3601 if (free_space <= spa_get_slop_space(spa) || vd->vdev_removing) {
3602 defer_allowed = B_FALSE;
3603 }
3604
3605 defer_delta = 0;
3606 alloc_delta = msp->ms_allocated_this_txg -
3607 range_tree_space(msp->ms_freed);
3608
3609 if (defer_allowed) {
3610 defer_delta = range_tree_space(msp->ms_freed) -
3611 range_tree_space(*defer_tree);
3612 } else {
3613 defer_delta -= range_tree_space(*defer_tree);
3614 }
3615 metaslab_space_update(vd, mg->mg_class, alloc_delta + defer_delta,
3616 defer_delta, 0);
3617
3618 if (spa_syncing_log_sm(spa) == NULL) {
3619 /*
3620 * If there's a metaslab_load() in progress and we don't have
3621 * a log space map, it means that we probably wrote to the
3622 * metaslab's space map. If this is the case, we need to
3623 * make sure that we wait for the load to complete so that we
3624 * have a consistent view at the in-core side of the metaslab.
3625 */
3626 metaslab_load_wait(msp);
3627 } else {
3628 ASSERT(spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP));
3629 }
3630
3631 /*
3632 * When auto-trimming is enabled, free ranges which are added to
3633 * ms_allocatable are also be added to ms_trim. The ms_trim tree is
3634 * periodically consumed by the vdev_autotrim_thread() which issues
3635 * trims for all ranges and then vacates the tree. The ms_trim tree
3636 * can be discarded at any time with the sole consequence of recent
3637 * frees not being trimmed.
3638 */
3639 if (spa_get_autotrim(spa) == SPA_AUTOTRIM_ON) {
3640 range_tree_walk(*defer_tree, range_tree_add, msp->ms_trim);
3641 if (!defer_allowed) {
3642 range_tree_walk(msp->ms_freed, range_tree_add,
3643 msp->ms_trim);
3644 }
3645 } else {
3646 range_tree_vacate(msp->ms_trim, NULL, NULL);
3647 }
3648
3649 /*
3650 * Move the frees from the defer_tree back to the free
3651 * range tree (if it's loaded). Swap the freed_tree and
3652 * the defer_tree -- this is safe to do because we've
3653 * just emptied out the defer_tree.
3654 */
3655 range_tree_vacate(*defer_tree,
3656 msp->ms_loaded ? range_tree_add : NULL, msp->ms_allocatable);
3657 if (defer_allowed) {
3658 range_tree_swap(&msp->ms_freed, defer_tree);
3659 } else {
3660 range_tree_vacate(msp->ms_freed,
3661 msp->ms_loaded ? range_tree_add : NULL,
3662 msp->ms_allocatable);
3663 }
3664
3665 msp->ms_synced_length = space_map_length(msp->ms_sm);
3666
3667 msp->ms_deferspace += defer_delta;
3668 ASSERT3S(msp->ms_deferspace, >=, 0);
3669 ASSERT3S(msp->ms_deferspace, <=, msp->ms_size);
3670 if (msp->ms_deferspace != 0) {
3671 /*
3672 * Keep syncing this metaslab until all deferred frees
3673 * are back in circulation.
3674 */
3675 vdev_dirty(vd, VDD_METASLAB, msp, txg + 1);
3676 }
3677 metaslab_aux_histograms_update_done(msp, defer_allowed);
3678
3679 if (msp->ms_new) {
3680 msp->ms_new = B_FALSE;
3681 mutex_enter(&mg->mg_lock);
3682 mg->mg_ms_ready++;
3683 mutex_exit(&mg->mg_lock);
3684 }
3685
3686 /*
3687 * Re-sort metaslab within its group now that we've adjusted
3688 * its allocatable space.
3689 */
3690 metaslab_recalculate_weight_and_sort(msp);
3691
3692 ASSERT0(range_tree_space(msp->ms_allocating[txg & TXG_MASK]));
3693 ASSERT0(range_tree_space(msp->ms_freeing));
3694 ASSERT0(range_tree_space(msp->ms_freed));
3695 ASSERT0(range_tree_space(msp->ms_checkpointing));
3696
3697 msp->ms_allocated_this_txg = 0;
3698 mutex_exit(&msp->ms_lock);
3699 }
3700
3701 void
3702 metaslab_sync_reassess(metaslab_group_t *mg)
3703 {
3704 spa_t *spa = mg->mg_class->mc_spa;
3705
3706 spa_config_enter(spa, SCL_ALLOC, FTAG, RW_READER);
3707 metaslab_group_alloc_update(mg);
3708 mg->mg_fragmentation = metaslab_group_fragmentation(mg);
3709
3710 /*
3711 * Preload the next potential metaslabs but only on active
3712 * metaslab groups. We can get into a state where the metaslab
3713 * is no longer active since we dirty metaslabs as we remove a
3714 * a device, thus potentially making the metaslab group eligible
3715 * for preloading.
3716 */
3717 if (mg->mg_activation_count > 0) {
3718 metaslab_group_preload(mg);
3719 }
3720 spa_config_exit(spa, SCL_ALLOC, FTAG);
3721 }
3722
3723 /*
3724 * When writing a ditto block (i.e. more than one DVA for a given BP) on
3725 * the same vdev as an existing DVA of this BP, then try to allocate it
3726 * on a different metaslab than existing DVAs (i.e. a unique metaslab).
3727 */
3728 static boolean_t
3729 metaslab_is_unique(metaslab_t *msp, dva_t *dva)
3730 {
3731 uint64_t dva_ms_id;
3732
3733 if (DVA_GET_ASIZE(dva) == 0)
3734 return (B_TRUE);
3735
3736 if (msp->ms_group->mg_vd->vdev_id != DVA_GET_VDEV(dva))
3737 return (B_TRUE);
3738
3739 dva_ms_id = DVA_GET_OFFSET(dva) >> msp->ms_group->mg_vd->vdev_ms_shift;
3740
3741 return (msp->ms_id != dva_ms_id);
3742 }
3743
3744 /*
3745 * ==========================================================================
3746 * Metaslab allocation tracing facility
3747 * ==========================================================================
3748 */
3749 #ifdef _METASLAB_TRACING
3750 kstat_t *metaslab_trace_ksp;
3751 kstat_named_t metaslab_trace_over_limit;
3752
3753 void
3754 metaslab_alloc_trace_init(void)
3755 {
3756 ASSERT(metaslab_alloc_trace_cache == NULL);
3757 metaslab_alloc_trace_cache = kmem_cache_create(
3758 "metaslab_alloc_trace_cache", sizeof (metaslab_alloc_trace_t),
3759 0, NULL, NULL, NULL, NULL, NULL, 0);
3760 metaslab_trace_ksp = kstat_create("zfs", 0, "metaslab_trace_stats",
3761 "misc", KSTAT_TYPE_NAMED, 1, KSTAT_FLAG_VIRTUAL);
3762 if (metaslab_trace_ksp != NULL) {
3763 metaslab_trace_ksp->ks_data = &metaslab_trace_over_limit;
3764 kstat_named_init(&metaslab_trace_over_limit,
3765 "metaslab_trace_over_limit", KSTAT_DATA_UINT64);
3766 kstat_install(metaslab_trace_ksp);
3767 }
3768 }
3769
3770 void
3771 metaslab_alloc_trace_fini(void)
3772 {
3773 if (metaslab_trace_ksp != NULL) {
3774 kstat_delete(metaslab_trace_ksp);
3775 metaslab_trace_ksp = NULL;
3776 }
3777 kmem_cache_destroy(metaslab_alloc_trace_cache);
3778 metaslab_alloc_trace_cache = NULL;
3779 }
3780
3781 /*
3782 * Add an allocation trace element to the allocation tracing list.
3783 */
3784 static void
3785 metaslab_trace_add(zio_alloc_list_t *zal, metaslab_group_t *mg,
3786 metaslab_t *msp, uint64_t psize, uint32_t dva_id, uint64_t offset,
3787 int allocator)
3788 {
3789 metaslab_alloc_trace_t *mat;
3790
3791 if (!metaslab_trace_enabled)
3792 return;
3793
3794 /*
3795 * When the tracing list reaches its maximum we remove
3796 * the second element in the list before adding a new one.
3797 * By removing the second element we preserve the original
3798 * entry as a clue to what allocations steps have already been
3799 * performed.
3800 */
3801 if (zal->zal_size == metaslab_trace_max_entries) {
3802 metaslab_alloc_trace_t *mat_next;
3803 #ifdef DEBUG
3804 panic("too many entries in allocation list");
3805 #endif
3806 atomic_inc_64(&metaslab_trace_over_limit.value.ui64);
3807 zal->zal_size--;
3808 mat_next = list_next(&zal->zal_list, list_head(&zal->zal_list));
3809 list_remove(&zal->zal_list, mat_next);
3810 kmem_cache_free(metaslab_alloc_trace_cache, mat_next);
3811 }
3812
3813 mat = kmem_cache_alloc(metaslab_alloc_trace_cache, KM_SLEEP);
3814 list_link_init(&mat->mat_list_node);
3815 mat->mat_mg = mg;
3816 mat->mat_msp = msp;
3817 mat->mat_size = psize;
3818 mat->mat_dva_id = dva_id;
3819 mat->mat_offset = offset;
3820 mat->mat_weight = 0;
3821 mat->mat_allocator = allocator;
3822
3823 if (msp != NULL)
3824 mat->mat_weight = msp->ms_weight;
3825
3826 /*
3827 * The list is part of the zio so locking is not required. Only
3828 * a single thread will perform allocations for a given zio.
3829 */
3830 list_insert_tail(&zal->zal_list, mat);
3831 zal->zal_size++;
3832
3833 ASSERT3U(zal->zal_size, <=, metaslab_trace_max_entries);
3834 }
3835
3836 void
3837 metaslab_trace_init(zio_alloc_list_t *zal)
3838 {
3839 list_create(&zal->zal_list, sizeof (metaslab_alloc_trace_t),
3840 offsetof(metaslab_alloc_trace_t, mat_list_node));
3841 zal->zal_size = 0;
3842 }
3843
3844 void
3845 metaslab_trace_fini(zio_alloc_list_t *zal)
3846 {
3847 metaslab_alloc_trace_t *mat;
3848
3849 while ((mat = list_remove_head(&zal->zal_list)) != NULL)
3850 kmem_cache_free(metaslab_alloc_trace_cache, mat);
3851 list_destroy(&zal->zal_list);
3852 zal->zal_size = 0;
3853 }
3854 #else
3855
3856 #define metaslab_trace_add(zal, mg, msp, psize, id, off, alloc)
3857
3858 void
3859 metaslab_alloc_trace_init(void)
3860 {
3861 }
3862
3863 void
3864 metaslab_alloc_trace_fini(void)
3865 {
3866 }
3867
3868 void
3869 metaslab_trace_init(zio_alloc_list_t *zal)
3870 {
3871 }
3872
3873 void
3874 metaslab_trace_fini(zio_alloc_list_t *zal)
3875 {
3876 }
3877
3878 #endif /* _METASLAB_TRACING */
3879
3880 /*
3881 * ==========================================================================
3882 * Metaslab block operations
3883 * ==========================================================================
3884 */
3885
3886 static void
3887 metaslab_group_alloc_increment(spa_t *spa, uint64_t vdev, void *tag, int flags,
3888 int allocator)
3889 {
3890 if (!(flags & METASLAB_ASYNC_ALLOC) ||
3891 (flags & METASLAB_DONT_THROTTLE))
3892 return;
3893
3894 metaslab_group_t *mg = vdev_lookup_top(spa, vdev)->vdev_mg;
3895 if (!mg->mg_class->mc_alloc_throttle_enabled)
3896 return;
3897
3898 (void) zfs_refcount_add(&mg->mg_alloc_queue_depth[allocator], tag);
3899 }
3900
3901 static void
3902 metaslab_group_increment_qdepth(metaslab_group_t *mg, int allocator)
3903 {
3904 uint64_t max = mg->mg_max_alloc_queue_depth;
3905 uint64_t cur = mg->mg_cur_max_alloc_queue_depth[allocator];
3906 while (cur < max) {
3907 if (atomic_cas_64(&mg->mg_cur_max_alloc_queue_depth[allocator],
3908 cur, cur + 1) == cur) {
3909 atomic_inc_64(
3910 &mg->mg_class->mc_alloc_max_slots[allocator]);
3911 return;
3912 }
3913 cur = mg->mg_cur_max_alloc_queue_depth[allocator];
3914 }
3915 }
3916
3917 void
3918 metaslab_group_alloc_decrement(spa_t *spa, uint64_t vdev, void *tag, int flags,
3919 int allocator, boolean_t io_complete)
3920 {
3921 if (!(flags & METASLAB_ASYNC_ALLOC) ||
3922 (flags & METASLAB_DONT_THROTTLE))
3923 return;
3924
3925 metaslab_group_t *mg = vdev_lookup_top(spa, vdev)->vdev_mg;
3926 if (!mg->mg_class->mc_alloc_throttle_enabled)
3927 return;
3928
3929 (void) zfs_refcount_remove(&mg->mg_alloc_queue_depth[allocator], tag);
3930 if (io_complete)
3931 metaslab_group_increment_qdepth(mg, allocator);
3932 }
3933
3934 void
3935 metaslab_group_alloc_verify(spa_t *spa, const blkptr_t *bp, void *tag,
3936 int allocator)
3937 {
3938 #ifdef ZFS_DEBUG
3939 const dva_t *dva = bp->blk_dva;
3940 int ndvas = BP_GET_NDVAS(bp);
3941
3942 for (int d = 0; d < ndvas; d++) {
3943 uint64_t vdev = DVA_GET_VDEV(&dva[d]);
3944 metaslab_group_t *mg = vdev_lookup_top(spa, vdev)->vdev_mg;
3945 VERIFY(zfs_refcount_not_held(
3946 &mg->mg_alloc_queue_depth[allocator], tag));
3947 }
3948 #endif
3949 }
3950
3951 static uint64_t
3952 metaslab_block_alloc(metaslab_t *msp, uint64_t size, uint64_t txg)
3953 {
3954 uint64_t start;
3955 range_tree_t *rt = msp->ms_allocatable;
3956 metaslab_class_t *mc = msp->ms_group->mg_class;
3957
3958 ASSERT(MUTEX_HELD(&msp->ms_lock));
3959 VERIFY(!msp->ms_condensing);
3960 VERIFY0(msp->ms_disabled);
3961
3962 start = mc->mc_ops->msop_alloc(msp, size);
3963 if (start != -1ULL) {
3964 metaslab_group_t *mg = msp->ms_group;
3965 vdev_t *vd = mg->mg_vd;
3966
3967 VERIFY0(P2PHASE(start, 1ULL << vd->vdev_ashift));
3968 VERIFY0(P2PHASE(size, 1ULL << vd->vdev_ashift));
3969 VERIFY3U(range_tree_space(rt) - size, <=, msp->ms_size);
3970 range_tree_remove(rt, start, size);
3971 range_tree_clear(msp->ms_trim, start, size);
3972
3973 if (range_tree_is_empty(msp->ms_allocating[txg & TXG_MASK]))
3974 vdev_dirty(mg->mg_vd, VDD_METASLAB, msp, txg);
3975
3976 range_tree_add(msp->ms_allocating[txg & TXG_MASK], start, size);
3977
3978 /* Track the last successful allocation */
3979 msp->ms_alloc_txg = txg;
3980 metaslab_verify_space(msp, txg);
3981 }
3982
3983 /*
3984 * Now that we've attempted the allocation we need to update the
3985 * metaslab's maximum block size since it may have changed.
3986 */
3987 msp->ms_max_size = metaslab_block_maxsize(msp);
3988 return (start);
3989 }
3990
3991 /*
3992 * Find the metaslab with the highest weight that is less than what we've
3993 * already tried. In the common case, this means that we will examine each
3994 * metaslab at most once. Note that concurrent callers could reorder metaslabs
3995 * by activation/passivation once we have dropped the mg_lock. If a metaslab is
3996 * activated by another thread, and we fail to allocate from the metaslab we
3997 * have selected, we may not try the newly-activated metaslab, and instead
3998 * activate another metaslab. This is not optimal, but generally does not cause
3999 * any problems (a possible exception being if every metaslab is completely full
4000 * except for the the newly-activated metaslab which we fail to examine).
4001 */
4002 static metaslab_t *
4003 find_valid_metaslab(metaslab_group_t *mg, uint64_t activation_weight,
4004 dva_t *dva, int d, boolean_t want_unique, uint64_t asize, int allocator,
4005 zio_alloc_list_t *zal, metaslab_t *search, boolean_t *was_active)
4006 {
4007 avl_index_t idx;
4008 avl_tree_t *t = &mg->mg_metaslab_tree;
4009 metaslab_t *msp = avl_find(t, search, &idx);
4010 if (msp == NULL)
4011 msp = avl_nearest(t, idx, AVL_AFTER);
4012
4013 for (; msp != NULL; msp = AVL_NEXT(t, msp)) {
4014 int i;
4015 if (!metaslab_should_allocate(msp, asize)) {
4016 metaslab_trace_add(zal, mg, msp, asize, d,
4017 TRACE_TOO_SMALL, allocator);
4018 continue;
4019 }
4020
4021 /*
4022 * If the selected metaslab is condensing or disabled,
4023 * skip it.
4024 */
4025 if (msp->ms_condensing || msp->ms_disabled > 0)
4026 continue;
4027
4028 *was_active = msp->ms_allocator != -1;
4029 /*
4030 * If we're activating as primary, this is our first allocation
4031 * from this disk, so we don't need to check how close we are.
4032 * If the metaslab under consideration was already active,
4033 * we're getting desperate enough to steal another allocator's
4034 * metaslab, so we still don't care about distances.
4035 */
4036 if (activation_weight == METASLAB_WEIGHT_PRIMARY || *was_active)
4037 break;
4038
4039 for (i = 0; i < d; i++) {
4040 if (want_unique &&
4041 !metaslab_is_unique(msp, &dva[i]))
4042 break; /* try another metaslab */
4043 }
4044 if (i == d)
4045 break;
4046 }
4047
4048 if (msp != NULL) {
4049 search->ms_weight = msp->ms_weight;
4050 search->ms_start = msp->ms_start + 1;
4051 search->ms_allocator = msp->ms_allocator;
4052 search->ms_primary = msp->ms_primary;
4053 }
4054 return (msp);
4055 }
4056
4057 void
4058 metaslab_active_mask_verify(metaslab_t *msp)
4059 {
4060 ASSERT(MUTEX_HELD(&msp->ms_lock));
4061
4062 if ((zfs_flags & ZFS_DEBUG_METASLAB_VERIFY) == 0)
4063 return;
4064
4065 if ((msp->ms_weight & METASLAB_ACTIVE_MASK) == 0)
4066 return;
4067
4068 if (msp->ms_weight & METASLAB_WEIGHT_PRIMARY) {
4069 VERIFY0(msp->ms_weight & METASLAB_WEIGHT_SECONDARY);
4070 VERIFY0(msp->ms_weight & METASLAB_WEIGHT_CLAIM);
4071 VERIFY3S(msp->ms_allocator, !=, -1);
4072 VERIFY(msp->ms_primary);
4073 return;
4074 }
4075
4076 if (msp->ms_weight & METASLAB_WEIGHT_SECONDARY) {
4077 VERIFY0(msp->ms_weight & METASLAB_WEIGHT_PRIMARY);
4078 VERIFY0(msp->ms_weight & METASLAB_WEIGHT_CLAIM);
4079 VERIFY3S(msp->ms_allocator, !=, -1);
4080 VERIFY(!msp->ms_primary);
4081 return;
4082 }
4083
4084 if (msp->ms_weight & METASLAB_WEIGHT_CLAIM) {
4085 VERIFY0(msp->ms_weight & METASLAB_WEIGHT_PRIMARY);
4086 VERIFY0(msp->ms_weight & METASLAB_WEIGHT_SECONDARY);
4087 VERIFY3S(msp->ms_allocator, ==, -1);
4088 return;
4089 }
4090 }
4091
4092 /* ARGSUSED */
4093 static uint64_t
4094 metaslab_group_alloc_normal(metaslab_group_t *mg, zio_alloc_list_t *zal,
4095 uint64_t asize, uint64_t txg, boolean_t want_unique, dva_t *dva,
4096 int d, int allocator)
4097 {
4098 metaslab_t *msp = NULL;
4099 uint64_t offset = -1ULL;
4100
4101 uint64_t activation_weight = METASLAB_WEIGHT_PRIMARY;
4102 for (int i = 0; i < d; i++) {
4103 if (activation_weight == METASLAB_WEIGHT_PRIMARY &&
4104 DVA_GET_VDEV(&dva[i]) == mg->mg_vd->vdev_id) {
4105 activation_weight = METASLAB_WEIGHT_SECONDARY;
4106 } else if (activation_weight == METASLAB_WEIGHT_SECONDARY &&
4107 DVA_GET_VDEV(&dva[i]) == mg->mg_vd->vdev_id) {
4108 activation_weight = METASLAB_WEIGHT_CLAIM;
4109 break;
4110 }
4111 }
4112
4113 /*
4114 * If we don't have enough metaslabs active to fill the entire array, we
4115 * just use the 0th slot.
4116 */
4117 if (mg->mg_ms_ready < mg->mg_allocators * 3)
4118 allocator = 0;
4119
4120 ASSERT3U(mg->mg_vd->vdev_ms_count, >=, 2);
4121
4122 metaslab_t *search = kmem_alloc(sizeof (*search), KM_SLEEP);
4123 search->ms_weight = UINT64_MAX;
4124 search->ms_start = 0;
4125 /*
4126 * At the end of the metaslab tree are the already-active metaslabs,
4127 * first the primaries, then the secondaries. When we resume searching
4128 * through the tree, we need to consider ms_allocator and ms_primary so
4129 * we start in the location right after where we left off, and don't
4130 * accidentally loop forever considering the same metaslabs.
4131 */
4132 search->ms_allocator = -1;
4133 search->ms_primary = B_TRUE;
4134 for (;;) {
4135 boolean_t was_active = B_FALSE;
4136
4137 mutex_enter(&mg->mg_lock);
4138
4139 if (activation_weight == METASLAB_WEIGHT_PRIMARY &&
4140 mg->mg_primaries[allocator] != NULL) {
4141 msp = mg->mg_primaries[allocator];
4142
4143 /*
4144 * Even though we don't hold the ms_lock for the
4145 * primary metaslab, those fields should not
4146 * change while we hold the mg_lock. Thus is is
4147 * safe to make assertions on them.
4148 */
4149 ASSERT(msp->ms_primary);
4150 ASSERT3S(msp->ms_allocator, ==, allocator);
4151 ASSERT(msp->ms_loaded);
4152
4153 was_active = B_TRUE;
4154 } else if (activation_weight == METASLAB_WEIGHT_SECONDARY &&
4155 mg->mg_secondaries[allocator] != NULL) {
4156 msp = mg->mg_secondaries[allocator];
4157
4158 /*
4159 * See comment above about the similar assertions
4160 * for the primary metaslab.
4161 */
4162 ASSERT(!msp->ms_primary);
4163 ASSERT3S(msp->ms_allocator, ==, allocator);
4164 ASSERT(msp->ms_loaded);
4165
4166 was_active = B_TRUE;
4167 } else {
4168 msp = find_valid_metaslab(mg, activation_weight, dva, d,
4169 want_unique, asize, allocator, zal, search,
4170 &was_active);
4171 }
4172
4173 mutex_exit(&mg->mg_lock);
4174 if (msp == NULL) {
4175 kmem_free(search, sizeof (*search));
4176 return (-1ULL);
4177 }
4178 mutex_enter(&msp->ms_lock);
4179
4180 metaslab_active_mask_verify(msp);
4181
4182 /*
4183 * This code is disabled out because of issues with
4184 * tracepoints in non-gpl kernel modules.
4185 */
4186 #if 0
4187 DTRACE_PROBE3(ms__activation__attempt,
4188 metaslab_t *, msp, uint64_t, activation_weight,
4189 boolean_t, was_active);
4190 #endif
4191
4192 /*
4193 * Ensure that the metaslab we have selected is still
4194 * capable of handling our request. It's possible that
4195 * another thread may have changed the weight while we
4196 * were blocked on the metaslab lock. We check the
4197 * active status first to see if we need to reselect
4198 * a new metaslab.
4199 */
4200 if (was_active && !(msp->ms_weight & METASLAB_ACTIVE_MASK)) {
4201 ASSERT3S(msp->ms_allocator, ==, -1);
4202 mutex_exit(&msp->ms_lock);
4203 continue;
4204 }
4205
4206 /*
4207 * If the metaslab was activated for another allocator
4208 * while we were waiting in the ms_lock above, or it's
4209 * a primary and we're seeking a secondary (or vice versa),
4210 * we go back and select a new metaslab.
4211 */
4212 if (!was_active && (msp->ms_weight & METASLAB_ACTIVE_MASK) &&
4213 (msp->ms_allocator != -1) &&
4214 (msp->ms_allocator != allocator || ((activation_weight ==
4215 METASLAB_WEIGHT_PRIMARY) != msp->ms_primary))) {
4216 ASSERT(msp->ms_loaded);
4217 ASSERT((msp->ms_weight & METASLAB_WEIGHT_CLAIM) ||
4218 msp->ms_allocator != -1);
4219 mutex_exit(&msp->ms_lock);
4220 continue;
4221 }
4222
4223 /*
4224 * This metaslab was used for claiming regions allocated
4225 * by the ZIL during pool import. Once these regions are
4226 * claimed we don't need to keep the CLAIM bit set
4227 * anymore. Passivate this metaslab to zero its activation
4228 * mask.
4229 */
4230 if (msp->ms_weight & METASLAB_WEIGHT_CLAIM &&
4231 activation_weight != METASLAB_WEIGHT_CLAIM) {
4232 ASSERT(msp->ms_loaded);
4233 ASSERT3S(msp->ms_allocator, ==, -1);
4234 metaslab_passivate(msp, msp->ms_weight &
4235 ~METASLAB_WEIGHT_CLAIM);
4236 mutex_exit(&msp->ms_lock);
4237 continue;
4238 }
4239
4240 msp->ms_selected_txg = txg;
4241
4242 int activation_error =
4243 metaslab_activate(msp, allocator, activation_weight);
4244 metaslab_active_mask_verify(msp);
4245
4246 /*
4247 * If the metaslab was activated by another thread for
4248 * another allocator or activation_weight (EBUSY), or it
4249 * failed because another metaslab was assigned as primary
4250 * for this allocator (EEXIST) we continue using this
4251 * metaslab for our allocation, rather than going on to a
4252 * worse metaslab (we waited for that metaslab to be loaded
4253 * after all).
4254 *
4255 * If the activation failed due to an I/O error or ENOSPC we
4256 * skip to the next metaslab.
4257 */
4258 boolean_t activated;
4259 if (activation_error == 0) {
4260 activated = B_TRUE;
4261 } else if (activation_error == EBUSY ||
4262 activation_error == EEXIST) {
4263 activated = B_FALSE;
4264 } else {
4265 mutex_exit(&msp->ms_lock);
4266 continue;
4267 }
4268 ASSERT(msp->ms_loaded);
4269
4270 /*
4271 * Now that we have the lock, recheck to see if we should
4272 * continue to use this metaslab for this allocation. The
4273 * the metaslab is now loaded so metaslab_should_allocate()
4274 * can accurately determine if the allocation attempt should
4275 * proceed.
4276 */
4277 if (!metaslab_should_allocate(msp, asize)) {
4278 /* Passivate this metaslab and select a new one. */
4279 metaslab_trace_add(zal, mg, msp, asize, d,
4280 TRACE_TOO_SMALL, allocator);
4281 goto next;
4282 }
4283
4284 /*
4285 * If this metaslab is currently condensing then pick again
4286 * as we can't manipulate this metaslab until it's committed
4287 * to disk. If this metaslab is being initialized, we shouldn't
4288 * allocate from it since the allocated region might be
4289 * overwritten after allocation.
4290 */
4291 if (msp->ms_condensing) {
4292 metaslab_trace_add(zal, mg, msp, asize, d,
4293 TRACE_CONDENSING, allocator);
4294 if (activated) {
4295 metaslab_passivate(msp, msp->ms_weight &
4296 ~METASLAB_ACTIVE_MASK);
4297 }
4298 mutex_exit(&msp->ms_lock);
4299 continue;
4300 } else if (msp->ms_disabled > 0) {
4301 metaslab_trace_add(zal, mg, msp, asize, d,
4302 TRACE_DISABLED, allocator);
4303 if (activated) {
4304 metaslab_passivate(msp, msp->ms_weight &
4305 ~METASLAB_ACTIVE_MASK);
4306 }
4307 mutex_exit(&msp->ms_lock);
4308 continue;
4309 }
4310
4311 offset = metaslab_block_alloc(msp, asize, txg);
4312 metaslab_trace_add(zal, mg, msp, asize, d, offset, allocator);
4313
4314 if (offset != -1ULL) {
4315 /* Proactively passivate the metaslab, if needed */
4316 if (activated)
4317 metaslab_segment_may_passivate(msp);
4318 break;
4319 }
4320 next:
4321 ASSERT(msp->ms_loaded);
4322
4323 /*
4324 * This code is disabled out because of issues with
4325 * tracepoints in non-gpl kernel modules.
4326 */
4327 #if 0
4328 DTRACE_PROBE2(ms__alloc__failure, metaslab_t *, msp,
4329 uint64_t, asize);
4330 #endif
4331
4332 /*
4333 * We were unable to allocate from this metaslab so determine
4334 * a new weight for this metaslab. Now that we have loaded
4335 * the metaslab we can provide a better hint to the metaslab
4336 * selector.
4337 *
4338 * For space-based metaslabs, we use the maximum block size.
4339 * This information is only available when the metaslab
4340 * is loaded and is more accurate than the generic free
4341 * space weight that was calculated by metaslab_weight().
4342 * This information allows us to quickly compare the maximum
4343 * available allocation in the metaslab to the allocation
4344 * size being requested.
4345 *
4346 * For segment-based metaslabs, determine the new weight
4347 * based on the highest bucket in the range tree. We
4348 * explicitly use the loaded segment weight (i.e. the range
4349 * tree histogram) since it contains the space that is
4350 * currently available for allocation and is accurate
4351 * even within a sync pass.
4352 */
4353 uint64_t weight;
4354 if (WEIGHT_IS_SPACEBASED(msp->ms_weight)) {
4355 weight = metaslab_block_maxsize(msp);
4356 WEIGHT_SET_SPACEBASED(weight);
4357 } else {
4358 weight = metaslab_weight_from_range_tree(msp);
4359 }
4360
4361 if (activated) {
4362 metaslab_passivate(msp, weight);
4363 } else {
4364 /*
4365 * For the case where we use the metaslab that is
4366 * active for another allocator we want to make
4367 * sure that we retain the activation mask.
4368 *
4369 * Note that we could attempt to use something like
4370 * metaslab_recalculate_weight_and_sort() that
4371 * retains the activation mask here. That function
4372 * uses metaslab_weight() to set the weight though
4373 * which is not as accurate as the calculations
4374 * above.
4375 */
4376 weight |= msp->ms_weight & METASLAB_ACTIVE_MASK;
4377 metaslab_group_sort(mg, msp, weight);
4378 }
4379 metaslab_active_mask_verify(msp);
4380
4381 /*
4382 * We have just failed an allocation attempt, check
4383 * that metaslab_should_allocate() agrees. Otherwise,
4384 * we may end up in an infinite loop retrying the same
4385 * metaslab.
4386 */
4387 ASSERT(!metaslab_should_allocate(msp, asize));
4388
4389 mutex_exit(&msp->ms_lock);
4390 }
4391 mutex_exit(&msp->ms_lock);
4392 kmem_free(search, sizeof (*search));
4393 return (offset);
4394 }
4395
4396 static uint64_t
4397 metaslab_group_alloc(metaslab_group_t *mg, zio_alloc_list_t *zal,
4398 uint64_t asize, uint64_t txg, boolean_t want_unique, dva_t *dva,
4399 int d, int allocator)
4400 {
4401 uint64_t offset;
4402 ASSERT(mg->mg_initialized);
4403
4404 offset = metaslab_group_alloc_normal(mg, zal, asize, txg, want_unique,
4405 dva, d, allocator);
4406
4407 mutex_enter(&mg->mg_lock);
4408 if (offset == -1ULL) {
4409 mg->mg_failed_allocations++;
4410 metaslab_trace_add(zal, mg, NULL, asize, d,
4411 TRACE_GROUP_FAILURE, allocator);
4412 if (asize == SPA_GANGBLOCKSIZE) {
4413 /*
4414 * This metaslab group was unable to allocate
4415 * the minimum gang block size so it must be out of
4416 * space. We must notify the allocation throttle
4417 * to start skipping allocation attempts to this
4418 * metaslab group until more space becomes available.
4419 * Note: this failure cannot be caused by the
4420 * allocation throttle since the allocation throttle
4421 * is only responsible for skipping devices and
4422 * not failing block allocations.
4423 */
4424 mg->mg_no_free_space = B_TRUE;
4425 }
4426 }
4427 mg->mg_allocations++;
4428 mutex_exit(&mg->mg_lock);
4429 return (offset);
4430 }
4431
4432 /*
4433 * Allocate a block for the specified i/o.
4434 */
4435 int
4436 metaslab_alloc_dva(spa_t *spa, metaslab_class_t *mc, uint64_t psize,
4437 dva_t *dva, int d, dva_t *hintdva, uint64_t txg, int flags,
4438 zio_alloc_list_t *zal, int allocator)
4439 {
4440 metaslab_group_t *mg, *fast_mg, *rotor;
4441 vdev_t *vd;
4442 boolean_t try_hard = B_FALSE;
4443
4444 ASSERT(!DVA_IS_VALID(&dva[d]));
4445
4446 /*
4447 * For testing, make some blocks above a certain size be gang blocks.
4448 * This will result in more split blocks when using device removal,
4449 * and a large number of split blocks coupled with ztest-induced
4450 * damage can result in extremely long reconstruction times. This
4451 * will also test spilling from special to normal.
4452 */
4453 if (psize >= metaslab_force_ganging && (spa_get_random(100) < 3)) {
4454 metaslab_trace_add(zal, NULL, NULL, psize, d, TRACE_FORCE_GANG,
4455 allocator);
4456 return (SET_ERROR(ENOSPC));
4457 }
4458
4459 /*
4460 * Start at the rotor and loop through all mgs until we find something.
4461 * Note that there's no locking on mc_rotor or mc_aliquot because
4462 * nothing actually breaks if we miss a few updates -- we just won't
4463 * allocate quite as evenly. It all balances out over time.
4464 *
4465 * If we are doing ditto or log blocks, try to spread them across
4466 * consecutive vdevs. If we're forced to reuse a vdev before we've
4467 * allocated all of our ditto blocks, then try and spread them out on
4468 * that vdev as much as possible. If it turns out to not be possible,
4469 * gradually lower our standards until anything becomes acceptable.
4470 * Also, allocating on consecutive vdevs (as opposed to random vdevs)
4471 * gives us hope of containing our fault domains to something we're
4472 * able to reason about. Otherwise, any two top-level vdev failures
4473 * will guarantee the loss of data. With consecutive allocation,
4474 * only two adjacent top-level vdev failures will result in data loss.
4475 *
4476 * If we are doing gang blocks (hintdva is non-NULL), try to keep
4477 * ourselves on the same vdev as our gang block header. That
4478 * way, we can hope for locality in vdev_cache, plus it makes our
4479 * fault domains something tractable.
4480 */
4481 if (hintdva) {
4482 vd = vdev_lookup_top(spa, DVA_GET_VDEV(&hintdva[d]));
4483
4484 /*
4485 * It's possible the vdev we're using as the hint no
4486 * longer exists or its mg has been closed (e.g. by
4487 * device removal). Consult the rotor when
4488 * all else fails.
4489 */
4490 if (vd != NULL && vd->vdev_mg != NULL) {
4491 mg = vd->vdev_mg;
4492
4493 if (flags & METASLAB_HINTBP_AVOID &&
4494 mg->mg_next != NULL)
4495 mg = mg->mg_next;
4496 } else {
4497 mg = mc->mc_rotor;
4498 }
4499 } else if (d != 0) {
4500 vd = vdev_lookup_top(spa, DVA_GET_VDEV(&dva[d - 1]));
4501 mg = vd->vdev_mg->mg_next;
4502 } else if (flags & METASLAB_FASTWRITE) {
4503 mg = fast_mg = mc->mc_rotor;
4504
4505 do {
4506 if (fast_mg->mg_vd->vdev_pending_fastwrite <
4507 mg->mg_vd->vdev_pending_fastwrite)
4508 mg = fast_mg;
4509 } while ((fast_mg = fast_mg->mg_next) != mc->mc_rotor);
4510
4511 } else {
4512 ASSERT(mc->mc_rotor != NULL);
4513 mg = mc->mc_rotor;
4514 }
4515
4516 /*
4517 * If the hint put us into the wrong metaslab class, or into a
4518 * metaslab group that has been passivated, just follow the rotor.
4519 */
4520 if (mg->mg_class != mc || mg->mg_activation_count <= 0)
4521 mg = mc->mc_rotor;
4522
4523 rotor = mg;
4524 top:
4525 do {
4526 boolean_t allocatable;
4527
4528 ASSERT(mg->mg_activation_count == 1);
4529 vd = mg->mg_vd;
4530
4531 /*
4532 * Don't allocate from faulted devices.
4533 */
4534 if (try_hard) {
4535 spa_config_enter(spa, SCL_ZIO, FTAG, RW_READER);
4536 allocatable = vdev_allocatable(vd);
4537 spa_config_exit(spa, SCL_ZIO, FTAG);
4538 } else {
4539 allocatable = vdev_allocatable(vd);
4540 }
4541
4542 /*
4543 * Determine if the selected metaslab group is eligible
4544 * for allocations. If we're ganging then don't allow
4545 * this metaslab group to skip allocations since that would
4546 * inadvertently return ENOSPC and suspend the pool
4547 * even though space is still available.
4548 */
4549 if (allocatable && !GANG_ALLOCATION(flags) && !try_hard) {
4550 allocatable = metaslab_group_allocatable(mg, rotor,
4551 psize, allocator, d);
4552 }
4553
4554 if (!allocatable) {
4555 metaslab_trace_add(zal, mg, NULL, psize, d,
4556 TRACE_NOT_ALLOCATABLE, allocator);
4557 goto next;
4558 }
4559
4560 ASSERT(mg->mg_initialized);
4561
4562 /*
4563 * Avoid writing single-copy data to a failing,
4564 * non-redundant vdev, unless we've already tried all
4565 * other vdevs.
4566 */
4567 if ((vd->vdev_stat.vs_write_errors > 0 ||
4568 vd->vdev_state < VDEV_STATE_HEALTHY) &&
4569 d == 0 && !try_hard && vd->vdev_children == 0) {
4570 metaslab_trace_add(zal, mg, NULL, psize, d,
4571 TRACE_VDEV_ERROR, allocator);
4572 goto next;
4573 }
4574
4575 ASSERT(mg->mg_class == mc);
4576
4577 uint64_t asize = vdev_psize_to_asize(vd, psize);
4578 ASSERT(P2PHASE(asize, 1ULL << vd->vdev_ashift) == 0);
4579
4580 /*
4581 * If we don't need to try hard, then require that the
4582 * block be on an different metaslab from any other DVAs
4583 * in this BP (unique=true). If we are trying hard, then
4584 * allow any metaslab to be used (unique=false).
4585 */
4586 uint64_t offset = metaslab_group_alloc(mg, zal, asize, txg,
4587 !try_hard, dva, d, allocator);
4588
4589 if (offset != -1ULL) {
4590 /*
4591 * If we've just selected this metaslab group,
4592 * figure out whether the corresponding vdev is
4593 * over- or under-used relative to the pool,
4594 * and set an allocation bias to even it out.
4595 *
4596 * Bias is also used to compensate for unequally
4597 * sized vdevs so that space is allocated fairly.
4598 */
4599 if (mc->mc_aliquot == 0 && metaslab_bias_enabled) {
4600 vdev_stat_t *vs = &vd->vdev_stat;
4601 int64_t vs_free = vs->vs_space - vs->vs_alloc;
4602 int64_t mc_free = mc->mc_space - mc->mc_alloc;
4603 int64_t ratio;
4604
4605 /*
4606 * Calculate how much more or less we should
4607 * try to allocate from this device during
4608 * this iteration around the rotor.
4609 *
4610 * This basically introduces a zero-centered
4611 * bias towards the devices with the most
4612 * free space, while compensating for vdev
4613 * size differences.
4614 *
4615 * Examples:
4616 * vdev V1 = 16M/128M
4617 * vdev V2 = 16M/128M
4618 * ratio(V1) = 100% ratio(V2) = 100%
4619 *
4620 * vdev V1 = 16M/128M
4621 * vdev V2 = 64M/128M
4622 * ratio(V1) = 127% ratio(V2) = 72%
4623 *
4624 * vdev V1 = 16M/128M
4625 * vdev V2 = 64M/512M
4626 * ratio(V1) = 40% ratio(V2) = 160%
4627 */
4628 ratio = (vs_free * mc->mc_alloc_groups * 100) /
4629 (mc_free + 1);
4630 mg->mg_bias = ((ratio - 100) *
4631 (int64_t)mg->mg_aliquot) / 100;
4632 } else if (!metaslab_bias_enabled) {
4633 mg->mg_bias = 0;
4634 }
4635
4636 if ((flags & METASLAB_FASTWRITE) ||
4637 atomic_add_64_nv(&mc->mc_aliquot, asize) >=
4638 mg->mg_aliquot + mg->mg_bias) {
4639 mc->mc_rotor = mg->mg_next;
4640 mc->mc_aliquot = 0;
4641 }
4642
4643 DVA_SET_VDEV(&dva[d], vd->vdev_id);
4644 DVA_SET_OFFSET(&dva[d], offset);
4645 DVA_SET_GANG(&dva[d],
4646 ((flags & METASLAB_GANG_HEADER) ? 1 : 0));
4647 DVA_SET_ASIZE(&dva[d], asize);
4648
4649 if (flags & METASLAB_FASTWRITE) {
4650 atomic_add_64(&vd->vdev_pending_fastwrite,
4651 psize);
4652 }
4653
4654 return (0);
4655 }
4656 next:
4657 mc->mc_rotor = mg->mg_next;
4658 mc->mc_aliquot = 0;
4659 } while ((mg = mg->mg_next) != rotor);
4660
4661 /*
4662 * If we haven't tried hard, do so now.
4663 */
4664 if (!try_hard) {
4665 try_hard = B_TRUE;
4666 goto top;
4667 }
4668
4669 bzero(&dva[d], sizeof (dva_t));
4670
4671 metaslab_trace_add(zal, rotor, NULL, psize, d, TRACE_ENOSPC, allocator);
4672 return (SET_ERROR(ENOSPC));
4673 }
4674
4675 void
4676 metaslab_free_concrete(vdev_t *vd, uint64_t offset, uint64_t asize,
4677 boolean_t checkpoint)
4678 {
4679 metaslab_t *msp;
4680 spa_t *spa = vd->vdev_spa;
4681
4682 ASSERT(vdev_is_concrete(vd));
4683 ASSERT3U(spa_config_held(spa, SCL_ALL, RW_READER), !=, 0);
4684 ASSERT3U(offset >> vd->vdev_ms_shift, <, vd->vdev_ms_count);
4685
4686 msp = vd->vdev_ms[offset >> vd->vdev_ms_shift];
4687
4688 VERIFY(!msp->ms_condensing);
4689 VERIFY3U(offset, >=, msp->ms_start);
4690 VERIFY3U(offset + asize, <=, msp->ms_start + msp->ms_size);
4691 VERIFY0(P2PHASE(offset, 1ULL << vd->vdev_ashift));
4692 VERIFY0(P2PHASE(asize, 1ULL << vd->vdev_ashift));
4693
4694 metaslab_check_free_impl(vd, offset, asize);
4695
4696 mutex_enter(&msp->ms_lock);
4697 if (range_tree_is_empty(msp->ms_freeing) &&
4698 range_tree_is_empty(msp->ms_checkpointing)) {
4699 vdev_dirty(vd, VDD_METASLAB, msp, spa_syncing_txg(spa));
4700 }
4701
4702 if (checkpoint) {
4703 ASSERT(spa_has_checkpoint(spa));
4704 range_tree_add(msp->ms_checkpointing, offset, asize);
4705 } else {
4706 range_tree_add(msp->ms_freeing, offset, asize);
4707 }
4708 mutex_exit(&msp->ms_lock);
4709 }
4710
4711 /* ARGSUSED */
4712 void
4713 metaslab_free_impl_cb(uint64_t inner_offset, vdev_t *vd, uint64_t offset,
4714 uint64_t size, void *arg)
4715 {
4716 boolean_t *checkpoint = arg;
4717
4718 ASSERT3P(checkpoint, !=, NULL);
4719
4720 if (vd->vdev_ops->vdev_op_remap != NULL)
4721 vdev_indirect_mark_obsolete(vd, offset, size);
4722 else
4723 metaslab_free_impl(vd, offset, size, *checkpoint);
4724 }
4725
4726 static void
4727 metaslab_free_impl(vdev_t *vd, uint64_t offset, uint64_t size,
4728 boolean_t checkpoint)
4729 {
4730 spa_t *spa = vd->vdev_spa;
4731
4732 ASSERT3U(spa_config_held(spa, SCL_ALL, RW_READER), !=, 0);
4733
4734 if (spa_syncing_txg(spa) > spa_freeze_txg(spa))
4735 return;
4736
4737 if (spa->spa_vdev_removal != NULL &&
4738 spa->spa_vdev_removal->svr_vdev_id == vd->vdev_id &&
4739 vdev_is_concrete(vd)) {
4740 /*
4741 * Note: we check if the vdev is concrete because when
4742 * we complete the removal, we first change the vdev to be
4743 * an indirect vdev (in open context), and then (in syncing
4744 * context) clear spa_vdev_removal.
4745 */
4746 free_from_removing_vdev(vd, offset, size);
4747 } else if (vd->vdev_ops->vdev_op_remap != NULL) {
4748 vdev_indirect_mark_obsolete(vd, offset, size);
4749 vd->vdev_ops->vdev_op_remap(vd, offset, size,
4750 metaslab_free_impl_cb, &checkpoint);
4751 } else {
4752 metaslab_free_concrete(vd, offset, size, checkpoint);
4753 }
4754 }
4755
4756 typedef struct remap_blkptr_cb_arg {
4757 blkptr_t *rbca_bp;
4758 spa_remap_cb_t rbca_cb;
4759 vdev_t *rbca_remap_vd;
4760 uint64_t rbca_remap_offset;
4761 void *rbca_cb_arg;
4762 } remap_blkptr_cb_arg_t;
4763
4764 void
4765 remap_blkptr_cb(uint64_t inner_offset, vdev_t *vd, uint64_t offset,
4766 uint64_t size, void *arg)
4767 {
4768 remap_blkptr_cb_arg_t *rbca = arg;
4769 blkptr_t *bp = rbca->rbca_bp;
4770
4771 /* We can not remap split blocks. */
4772 if (size != DVA_GET_ASIZE(&bp->blk_dva[0]))
4773 return;
4774 ASSERT0(inner_offset);
4775
4776 if (rbca->rbca_cb != NULL) {
4777 /*
4778 * At this point we know that we are not handling split
4779 * blocks and we invoke the callback on the previous
4780 * vdev which must be indirect.
4781 */
4782 ASSERT3P(rbca->rbca_remap_vd->vdev_ops, ==, &vdev_indirect_ops);
4783
4784 rbca->rbca_cb(rbca->rbca_remap_vd->vdev_id,
4785 rbca->rbca_remap_offset, size, rbca->rbca_cb_arg);
4786
4787 /* set up remap_blkptr_cb_arg for the next call */
4788 rbca->rbca_remap_vd = vd;
4789 rbca->rbca_remap_offset = offset;
4790 }
4791
4792 /*
4793 * The phys birth time is that of dva[0]. This ensures that we know
4794 * when each dva was written, so that resilver can determine which
4795 * blocks need to be scrubbed (i.e. those written during the time
4796 * the vdev was offline). It also ensures that the key used in
4797 * the ARC hash table is unique (i.e. dva[0] + phys_birth). If
4798 * we didn't change the phys_birth, a lookup in the ARC for a
4799 * remapped BP could find the data that was previously stored at
4800 * this vdev + offset.
4801 */
4802 vdev_t *oldvd = vdev_lookup_top(vd->vdev_spa,
4803 DVA_GET_VDEV(&bp->blk_dva[0]));
4804 vdev_indirect_births_t *vib = oldvd->vdev_indirect_births;
4805 bp->blk_phys_birth = vdev_indirect_births_physbirth(vib,
4806 DVA_GET_OFFSET(&bp->blk_dva[0]), DVA_GET_ASIZE(&bp->blk_dva[0]));
4807
4808 DVA_SET_VDEV(&bp->blk_dva[0], vd->vdev_id);
4809 DVA_SET_OFFSET(&bp->blk_dva[0], offset);
4810 }
4811
4812 /*
4813 * If the block pointer contains any indirect DVAs, modify them to refer to
4814 * concrete DVAs. Note that this will sometimes not be possible, leaving
4815 * the indirect DVA in place. This happens if the indirect DVA spans multiple
4816 * segments in the mapping (i.e. it is a "split block").
4817 *
4818 * If the BP was remapped, calls the callback on the original dva (note the
4819 * callback can be called multiple times if the original indirect DVA refers
4820 * to another indirect DVA, etc).
4821 *
4822 * Returns TRUE if the BP was remapped.
4823 */
4824 boolean_t
4825 spa_remap_blkptr(spa_t *spa, blkptr_t *bp, spa_remap_cb_t callback, void *arg)
4826 {
4827 remap_blkptr_cb_arg_t rbca;
4828
4829 if (!zfs_remap_blkptr_enable)
4830 return (B_FALSE);
4831
4832 if (!spa_feature_is_enabled(spa, SPA_FEATURE_OBSOLETE_COUNTS))
4833 return (B_FALSE);
4834
4835 /*
4836 * Dedup BP's can not be remapped, because ddt_phys_select() depends
4837 * on DVA[0] being the same in the BP as in the DDT (dedup table).
4838 */
4839 if (BP_GET_DEDUP(bp))
4840 return (B_FALSE);
4841
4842 /*
4843 * Gang blocks can not be remapped, because
4844 * zio_checksum_gang_verifier() depends on the DVA[0] that's in
4845 * the BP used to read the gang block header (GBH) being the same
4846 * as the DVA[0] that we allocated for the GBH.
4847 */
4848 if (BP_IS_GANG(bp))
4849 return (B_FALSE);
4850
4851 /*
4852 * Embedded BP's have no DVA to remap.
4853 */
4854 if (BP_GET_NDVAS(bp) < 1)
4855 return (B_FALSE);
4856
4857 /*
4858 * Note: we only remap dva[0]. If we remapped other dvas, we
4859 * would no longer know what their phys birth txg is.
4860 */
4861 dva_t *dva = &bp->blk_dva[0];
4862
4863 uint64_t offset = DVA_GET_OFFSET(dva);
4864 uint64_t size = DVA_GET_ASIZE(dva);
4865 vdev_t *vd = vdev_lookup_top(spa, DVA_GET_VDEV(dva));
4866
4867 if (vd->vdev_ops->vdev_op_remap == NULL)
4868 return (B_FALSE);
4869
4870 rbca.rbca_bp = bp;
4871 rbca.rbca_cb = callback;
4872 rbca.rbca_remap_vd = vd;
4873 rbca.rbca_remap_offset = offset;
4874 rbca.rbca_cb_arg = arg;
4875
4876 /*
4877 * remap_blkptr_cb() will be called in order for each level of
4878 * indirection, until a concrete vdev is reached or a split block is
4879 * encountered. old_vd and old_offset are updated within the callback
4880 * as we go from the one indirect vdev to the next one (either concrete
4881 * or indirect again) in that order.
4882 */
4883 vd->vdev_ops->vdev_op_remap(vd, offset, size, remap_blkptr_cb, &rbca);
4884
4885 /* Check if the DVA wasn't remapped because it is a split block */
4886 if (DVA_GET_VDEV(&rbca.rbca_bp->blk_dva[0]) == vd->vdev_id)
4887 return (B_FALSE);
4888
4889 return (B_TRUE);
4890 }
4891
4892 /*
4893 * Undo the allocation of a DVA which happened in the given transaction group.
4894 */
4895 void
4896 metaslab_unalloc_dva(spa_t *spa, const dva_t *dva, uint64_t txg)
4897 {
4898 metaslab_t *msp;
4899 vdev_t *vd;
4900 uint64_t vdev = DVA_GET_VDEV(dva);
4901 uint64_t offset = DVA_GET_OFFSET(dva);
4902 uint64_t size = DVA_GET_ASIZE(dva);
4903
4904 ASSERT(DVA_IS_VALID(dva));
4905 ASSERT3U(spa_config_held(spa, SCL_ALL, RW_READER), !=, 0);
4906
4907 if (txg > spa_freeze_txg(spa))
4908 return;
4909
4910 if ((vd = vdev_lookup_top(spa, vdev)) == NULL || !DVA_IS_VALID(dva) ||
4911 (offset >> vd->vdev_ms_shift) >= vd->vdev_ms_count) {
4912 zfs_panic_recover("metaslab_free_dva(): bad DVA %llu:%llu:%llu",
4913 (u_longlong_t)vdev, (u_longlong_t)offset,
4914 (u_longlong_t)size);
4915 return;
4916 }
4917
4918 ASSERT(!vd->vdev_removing);
4919 ASSERT(vdev_is_concrete(vd));
4920 ASSERT0(vd->vdev_indirect_config.vic_mapping_object);
4921 ASSERT3P(vd->vdev_indirect_mapping, ==, NULL);
4922
4923 if (DVA_GET_GANG(dva))
4924 size = vdev_psize_to_asize(vd, SPA_GANGBLOCKSIZE);
4925
4926 msp = vd->vdev_ms[offset >> vd->vdev_ms_shift];
4927
4928 mutex_enter(&msp->ms_lock);
4929 range_tree_remove(msp->ms_allocating[txg & TXG_MASK],
4930 offset, size);
4931
4932 VERIFY(!msp->ms_condensing);
4933 VERIFY3U(offset, >=, msp->ms_start);
4934 VERIFY3U(offset + size, <=, msp->ms_start + msp->ms_size);
4935 VERIFY3U(range_tree_space(msp->ms_allocatable) + size, <=,
4936 msp->ms_size);
4937 VERIFY0(P2PHASE(offset, 1ULL << vd->vdev_ashift));
4938 VERIFY0(P2PHASE(size, 1ULL << vd->vdev_ashift));
4939 range_tree_add(msp->ms_allocatable, offset, size);
4940 mutex_exit(&msp->ms_lock);
4941 }
4942
4943 /*
4944 * Free the block represented by the given DVA.
4945 */
4946 void
4947 metaslab_free_dva(spa_t *spa, const dva_t *dva, boolean_t checkpoint)
4948 {
4949 uint64_t vdev = DVA_GET_VDEV(dva);
4950 uint64_t offset = DVA_GET_OFFSET(dva);
4951 uint64_t size = DVA_GET_ASIZE(dva);
4952 vdev_t *vd = vdev_lookup_top(spa, vdev);
4953
4954 ASSERT(DVA_IS_VALID(dva));
4955 ASSERT3U(spa_config_held(spa, SCL_ALL, RW_READER), !=, 0);
4956
4957 if (DVA_GET_GANG(dva)) {
4958 size = vdev_psize_to_asize(vd, SPA_GANGBLOCKSIZE);
4959 }
4960
4961 metaslab_free_impl(vd, offset, size, checkpoint);
4962 }
4963
4964 /*
4965 * Reserve some allocation slots. The reservation system must be called
4966 * before we call into the allocator. If there aren't any available slots
4967 * then the I/O will be throttled until an I/O completes and its slots are
4968 * freed up. The function returns true if it was successful in placing
4969 * the reservation.
4970 */
4971 boolean_t
4972 metaslab_class_throttle_reserve(metaslab_class_t *mc, int slots, int allocator,
4973 zio_t *zio, int flags)
4974 {
4975 uint64_t available_slots = 0;
4976 boolean_t slot_reserved = B_FALSE;
4977 uint64_t max = mc->mc_alloc_max_slots[allocator];
4978
4979 ASSERT(mc->mc_alloc_throttle_enabled);
4980 mutex_enter(&mc->mc_lock);
4981
4982 uint64_t reserved_slots =
4983 zfs_refcount_count(&mc->mc_alloc_slots[allocator]);
4984 if (reserved_slots < max)
4985 available_slots = max - reserved_slots;
4986
4987 if (slots <= available_slots || GANG_ALLOCATION(flags) ||
4988 flags & METASLAB_MUST_RESERVE) {
4989 /*
4990 * We reserve the slots individually so that we can unreserve
4991 * them individually when an I/O completes.
4992 */
4993 for (int d = 0; d < slots; d++) {
4994 reserved_slots =
4995 zfs_refcount_add(&mc->mc_alloc_slots[allocator],
4996 zio);
4997 }
4998 zio->io_flags |= ZIO_FLAG_IO_ALLOCATING;
4999 slot_reserved = B_TRUE;
5000 }
5001
5002 mutex_exit(&mc->mc_lock);
5003 return (slot_reserved);
5004 }
5005
5006 void
5007 metaslab_class_throttle_unreserve(metaslab_class_t *mc, int slots,
5008 int allocator, zio_t *zio)
5009 {
5010 ASSERT(mc->mc_alloc_throttle_enabled);
5011 mutex_enter(&mc->mc_lock);
5012 for (int d = 0; d < slots; d++) {
5013 (void) zfs_refcount_remove(&mc->mc_alloc_slots[allocator],
5014 zio);
5015 }
5016 mutex_exit(&mc->mc_lock);
5017 }
5018
5019 static int
5020 metaslab_claim_concrete(vdev_t *vd, uint64_t offset, uint64_t size,
5021 uint64_t txg)
5022 {
5023 metaslab_t *msp;
5024 spa_t *spa = vd->vdev_spa;
5025 int error = 0;
5026
5027 if (offset >> vd->vdev_ms_shift >= vd->vdev_ms_count)
5028 return (SET_ERROR(ENXIO));
5029
5030 ASSERT3P(vd->vdev_ms, !=, NULL);
5031 msp = vd->vdev_ms[offset >> vd->vdev_ms_shift];
5032
5033 mutex_enter(&msp->ms_lock);
5034
5035 if ((txg != 0 && spa_writeable(spa)) || !msp->ms_loaded) {
5036 error = metaslab_activate(msp, 0, METASLAB_WEIGHT_CLAIM);
5037 if (error == EBUSY) {
5038 ASSERT(msp->ms_loaded);
5039 ASSERT(msp->ms_weight & METASLAB_ACTIVE_MASK);
5040 error = 0;
5041 }
5042 }
5043
5044 if (error == 0 &&
5045 !range_tree_contains(msp->ms_allocatable, offset, size))
5046 error = SET_ERROR(ENOENT);
5047
5048 if (error || txg == 0) { /* txg == 0 indicates dry run */
5049 mutex_exit(&msp->ms_lock);
5050 return (error);
5051 }
5052
5053 VERIFY(!msp->ms_condensing);
5054 VERIFY0(P2PHASE(offset, 1ULL << vd->vdev_ashift));
5055 VERIFY0(P2PHASE(size, 1ULL << vd->vdev_ashift));
5056 VERIFY3U(range_tree_space(msp->ms_allocatable) - size, <=,
5057 msp->ms_size);
5058 range_tree_remove(msp->ms_allocatable, offset, size);
5059 range_tree_clear(msp->ms_trim, offset, size);
5060
5061 if (spa_writeable(spa)) { /* don't dirty if we're zdb(1M) */
5062 if (range_tree_is_empty(msp->ms_allocating[txg & TXG_MASK]))
5063 vdev_dirty(vd, VDD_METASLAB, msp, txg);
5064 range_tree_add(msp->ms_allocating[txg & TXG_MASK],
5065 offset, size);
5066 }
5067
5068 mutex_exit(&msp->ms_lock);
5069
5070 return (0);
5071 }
5072
5073 typedef struct metaslab_claim_cb_arg_t {
5074 uint64_t mcca_txg;
5075 int mcca_error;
5076 } metaslab_claim_cb_arg_t;
5077
5078 /* ARGSUSED */
5079 static void
5080 metaslab_claim_impl_cb(uint64_t inner_offset, vdev_t *vd, uint64_t offset,
5081 uint64_t size, void *arg)
5082 {
5083 metaslab_claim_cb_arg_t *mcca_arg = arg;
5084
5085 if (mcca_arg->mcca_error == 0) {
5086 mcca_arg->mcca_error = metaslab_claim_concrete(vd, offset,
5087 size, mcca_arg->mcca_txg);
5088 }
5089 }
5090
5091 int
5092 metaslab_claim_impl(vdev_t *vd, uint64_t offset, uint64_t size, uint64_t txg)
5093 {
5094 if (vd->vdev_ops->vdev_op_remap != NULL) {
5095 metaslab_claim_cb_arg_t arg;
5096
5097 /*
5098 * Only zdb(1M) can claim on indirect vdevs. This is used
5099 * to detect leaks of mapped space (that are not accounted
5100 * for in the obsolete counts, spacemap, or bpobj).
5101 */
5102 ASSERT(!spa_writeable(vd->vdev_spa));
5103 arg.mcca_error = 0;
5104 arg.mcca_txg = txg;
5105
5106 vd->vdev_ops->vdev_op_remap(vd, offset, size,
5107 metaslab_claim_impl_cb, &arg);
5108
5109 if (arg.mcca_error == 0) {
5110 arg.mcca_error = metaslab_claim_concrete(vd,
5111 offset, size, txg);
5112 }
5113 return (arg.mcca_error);
5114 } else {
5115 return (metaslab_claim_concrete(vd, offset, size, txg));
5116 }
5117 }
5118
5119 /*
5120 * Intent log support: upon opening the pool after a crash, notify the SPA
5121 * of blocks that the intent log has allocated for immediate write, but
5122 * which are still considered free by the SPA because the last transaction
5123 * group didn't commit yet.
5124 */
5125 static int
5126 metaslab_claim_dva(spa_t *spa, const dva_t *dva, uint64_t txg)
5127 {
5128 uint64_t vdev = DVA_GET_VDEV(dva);
5129 uint64_t offset = DVA_GET_OFFSET(dva);
5130 uint64_t size = DVA_GET_ASIZE(dva);
5131 vdev_t *vd;
5132
5133 if ((vd = vdev_lookup_top(spa, vdev)) == NULL) {
5134 return (SET_ERROR(ENXIO));
5135 }
5136
5137 ASSERT(DVA_IS_VALID(dva));
5138
5139 if (DVA_GET_GANG(dva))
5140 size = vdev_psize_to_asize(vd, SPA_GANGBLOCKSIZE);
5141
5142 return (metaslab_claim_impl(vd, offset, size, txg));
5143 }
5144
5145 int
5146 metaslab_alloc(spa_t *spa, metaslab_class_t *mc, uint64_t psize, blkptr_t *bp,
5147 int ndvas, uint64_t txg, blkptr_t *hintbp, int flags,
5148 zio_alloc_list_t *zal, zio_t *zio, int allocator)
5149 {
5150 dva_t *dva = bp->blk_dva;
5151 dva_t *hintdva = (hintbp != NULL) ? hintbp->blk_dva : NULL;
5152 int error = 0;
5153
5154 ASSERT(bp->blk_birth == 0);
5155 ASSERT(BP_PHYSICAL_BIRTH(bp) == 0);
5156
5157 spa_config_enter(spa, SCL_ALLOC, FTAG, RW_READER);
5158
5159 if (mc->mc_rotor == NULL) { /* no vdevs in this class */
5160 spa_config_exit(spa, SCL_ALLOC, FTAG);
5161 return (SET_ERROR(ENOSPC));
5162 }
5163
5164 ASSERT(ndvas > 0 && ndvas <= spa_max_replication(spa));
5165 ASSERT(BP_GET_NDVAS(bp) == 0);
5166 ASSERT(hintbp == NULL || ndvas <= BP_GET_NDVAS(hintbp));
5167 ASSERT3P(zal, !=, NULL);
5168
5169 for (int d = 0; d < ndvas; d++) {
5170 error = metaslab_alloc_dva(spa, mc, psize, dva, d, hintdva,
5171 txg, flags, zal, allocator);
5172 if (error != 0) {
5173 for (d--; d >= 0; d--) {
5174 metaslab_unalloc_dva(spa, &dva[d], txg);
5175 metaslab_group_alloc_decrement(spa,
5176 DVA_GET_VDEV(&dva[d]), zio, flags,
5177 allocator, B_FALSE);
5178 bzero(&dva[d], sizeof (dva_t));
5179 }
5180 spa_config_exit(spa, SCL_ALLOC, FTAG);
5181 return (error);
5182 } else {
5183 /*
5184 * Update the metaslab group's queue depth
5185 * based on the newly allocated dva.
5186 */
5187 metaslab_group_alloc_increment(spa,
5188 DVA_GET_VDEV(&dva[d]), zio, flags, allocator);
5189 }
5190
5191 }
5192 ASSERT(error == 0);
5193 ASSERT(BP_GET_NDVAS(bp) == ndvas);
5194
5195 spa_config_exit(spa, SCL_ALLOC, FTAG);
5196
5197 BP_SET_BIRTH(bp, txg, 0);
5198
5199 return (0);
5200 }
5201
5202 void
5203 metaslab_free(spa_t *spa, const blkptr_t *bp, uint64_t txg, boolean_t now)
5204 {
5205 const dva_t *dva = bp->blk_dva;
5206 int ndvas = BP_GET_NDVAS(bp);
5207
5208 ASSERT(!BP_IS_HOLE(bp));
5209 ASSERT(!now || bp->blk_birth >= spa_syncing_txg(spa));
5210
5211 /*
5212 * If we have a checkpoint for the pool we need to make sure that
5213 * the blocks that we free that are part of the checkpoint won't be
5214 * reused until the checkpoint is discarded or we revert to it.
5215 *
5216 * The checkpoint flag is passed down the metaslab_free code path
5217 * and is set whenever we want to add a block to the checkpoint's
5218 * accounting. That is, we "checkpoint" blocks that existed at the
5219 * time the checkpoint was created and are therefore referenced by
5220 * the checkpointed uberblock.
5221 *
5222 * Note that, we don't checkpoint any blocks if the current
5223 * syncing txg <= spa_checkpoint_txg. We want these frees to sync
5224 * normally as they will be referenced by the checkpointed uberblock.
5225 */
5226 boolean_t checkpoint = B_FALSE;
5227 if (bp->blk_birth <= spa->spa_checkpoint_txg &&
5228 spa_syncing_txg(spa) > spa->spa_checkpoint_txg) {
5229 /*
5230 * At this point, if the block is part of the checkpoint
5231 * there is no way it was created in the current txg.
5232 */
5233 ASSERT(!now);
5234 ASSERT3U(spa_syncing_txg(spa), ==, txg);
5235 checkpoint = B_TRUE;
5236 }
5237
5238 spa_config_enter(spa, SCL_FREE, FTAG, RW_READER);
5239
5240 for (int d = 0; d < ndvas; d++) {
5241 if (now) {
5242 metaslab_unalloc_dva(spa, &dva[d], txg);
5243 } else {
5244 ASSERT3U(txg, ==, spa_syncing_txg(spa));
5245 metaslab_free_dva(spa, &dva[d], checkpoint);
5246 }
5247 }
5248
5249 spa_config_exit(spa, SCL_FREE, FTAG);
5250 }
5251
5252 int
5253 metaslab_claim(spa_t *spa, const blkptr_t *bp, uint64_t txg)
5254 {
5255 const dva_t *dva = bp->blk_dva;
5256 int ndvas = BP_GET_NDVAS(bp);
5257 int error = 0;
5258
5259 ASSERT(!BP_IS_HOLE(bp));
5260
5261 if (txg != 0) {
5262 /*
5263 * First do a dry run to make sure all DVAs are claimable,
5264 * so we don't have to unwind from partial failures below.
5265 */
5266 if ((error = metaslab_claim(spa, bp, 0)) != 0)
5267 return (error);
5268 }
5269
5270 spa_config_enter(spa, SCL_ALLOC, FTAG, RW_READER);
5271
5272 for (int d = 0; d < ndvas; d++) {
5273 error = metaslab_claim_dva(spa, &dva[d], txg);
5274 if (error != 0)
5275 break;
5276 }
5277
5278 spa_config_exit(spa, SCL_ALLOC, FTAG);
5279
5280 ASSERT(error == 0 || txg == 0);
5281
5282 return (error);
5283 }
5284
5285 void
5286 metaslab_fastwrite_mark(spa_t *spa, const blkptr_t *bp)
5287 {
5288 const dva_t *dva = bp->blk_dva;
5289 int ndvas = BP_GET_NDVAS(bp);
5290 uint64_t psize = BP_GET_PSIZE(bp);
5291 int d;
5292 vdev_t *vd;
5293
5294 ASSERT(!BP_IS_HOLE(bp));
5295 ASSERT(!BP_IS_EMBEDDED(bp));
5296 ASSERT(psize > 0);
5297
5298 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
5299
5300 for (d = 0; d < ndvas; d++) {
5301 if ((vd = vdev_lookup_top(spa, DVA_GET_VDEV(&dva[d]))) == NULL)
5302 continue;
5303 atomic_add_64(&vd->vdev_pending_fastwrite, psize);
5304 }
5305
5306 spa_config_exit(spa, SCL_VDEV, FTAG);
5307 }
5308
5309 void
5310 metaslab_fastwrite_unmark(spa_t *spa, const blkptr_t *bp)
5311 {
5312 const dva_t *dva = bp->blk_dva;
5313 int ndvas = BP_GET_NDVAS(bp);
5314 uint64_t psize = BP_GET_PSIZE(bp);
5315 int d;
5316 vdev_t *vd;
5317
5318 ASSERT(!BP_IS_HOLE(bp));
5319 ASSERT(!BP_IS_EMBEDDED(bp));
5320 ASSERT(psize > 0);
5321
5322 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
5323
5324 for (d = 0; d < ndvas; d++) {
5325 if ((vd = vdev_lookup_top(spa, DVA_GET_VDEV(&dva[d]))) == NULL)
5326 continue;
5327 ASSERT3U(vd->vdev_pending_fastwrite, >=, psize);
5328 atomic_sub_64(&vd->vdev_pending_fastwrite, psize);
5329 }
5330
5331 spa_config_exit(spa, SCL_VDEV, FTAG);
5332 }
5333
5334 /* ARGSUSED */
5335 static void
5336 metaslab_check_free_impl_cb(uint64_t inner, vdev_t *vd, uint64_t offset,
5337 uint64_t size, void *arg)
5338 {
5339 if (vd->vdev_ops == &vdev_indirect_ops)
5340 return;
5341
5342 metaslab_check_free_impl(vd, offset, size);
5343 }
5344
5345 static void
5346 metaslab_check_free_impl(vdev_t *vd, uint64_t offset, uint64_t size)
5347 {
5348 metaslab_t *msp;
5349 ASSERTV(spa_t *spa = vd->vdev_spa);
5350
5351 if ((zfs_flags & ZFS_DEBUG_ZIO_FREE) == 0)
5352 return;
5353
5354 if (vd->vdev_ops->vdev_op_remap != NULL) {
5355 vd->vdev_ops->vdev_op_remap(vd, offset, size,
5356 metaslab_check_free_impl_cb, NULL);
5357 return;
5358 }
5359
5360 ASSERT(vdev_is_concrete(vd));
5361 ASSERT3U(offset >> vd->vdev_ms_shift, <, vd->vdev_ms_count);
5362 ASSERT3U(spa_config_held(spa, SCL_ALL, RW_READER), !=, 0);
5363
5364 msp = vd->vdev_ms[offset >> vd->vdev_ms_shift];
5365
5366 mutex_enter(&msp->ms_lock);
5367 if (msp->ms_loaded) {
5368 range_tree_verify_not_present(msp->ms_allocatable,
5369 offset, size);
5370 }
5371
5372 /*
5373 * Check all segments that currently exist in the freeing pipeline.
5374 *
5375 * It would intuitively make sense to also check the current allocating
5376 * tree since metaslab_unalloc_dva() exists for extents that are
5377 * allocated and freed in the same sync pass withing the same txg.
5378 * Unfortunately there are places (e.g. the ZIL) where we allocate a
5379 * segment but then we free part of it within the same txg
5380 * [see zil_sync()]. Thus, we don't call range_tree_verify() in the
5381 * current allocating tree.
5382 */
5383 range_tree_verify_not_present(msp->ms_freeing, offset, size);
5384 range_tree_verify_not_present(msp->ms_checkpointing, offset, size);
5385 range_tree_verify_not_present(msp->ms_freed, offset, size);
5386 for (int j = 0; j < TXG_DEFER_SIZE; j++)
5387 range_tree_verify_not_present(msp->ms_defer[j], offset, size);
5388 range_tree_verify_not_present(msp->ms_trim, offset, size);
5389 mutex_exit(&msp->ms_lock);
5390 }
5391
5392 void
5393 metaslab_check_free(spa_t *spa, const blkptr_t *bp)
5394 {
5395 if ((zfs_flags & ZFS_DEBUG_ZIO_FREE) == 0)
5396 return;
5397
5398 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
5399 for (int i = 0; i < BP_GET_NDVAS(bp); i++) {
5400 uint64_t vdev = DVA_GET_VDEV(&bp->blk_dva[i]);
5401 vdev_t *vd = vdev_lookup_top(spa, vdev);
5402 uint64_t offset = DVA_GET_OFFSET(&bp->blk_dva[i]);
5403 uint64_t size = DVA_GET_ASIZE(&bp->blk_dva[i]);
5404
5405 if (DVA_GET_GANG(&bp->blk_dva[i]))
5406 size = vdev_psize_to_asize(vd, SPA_GANGBLOCKSIZE);
5407
5408 ASSERT3P(vd, !=, NULL);
5409
5410 metaslab_check_free_impl(vd, offset, size);
5411 }
5412 spa_config_exit(spa, SCL_VDEV, FTAG);
5413 }
5414
5415 static void
5416 metaslab_group_disable_wait(metaslab_group_t *mg)
5417 {
5418 ASSERT(MUTEX_HELD(&mg->mg_ms_disabled_lock));
5419 while (mg->mg_disabled_updating) {
5420 cv_wait(&mg->mg_ms_disabled_cv, &mg->mg_ms_disabled_lock);
5421 }
5422 }
5423
5424 static void
5425 metaslab_group_disabled_increment(metaslab_group_t *mg)
5426 {
5427 ASSERT(MUTEX_HELD(&mg->mg_ms_disabled_lock));
5428 ASSERT(mg->mg_disabled_updating);
5429
5430 while (mg->mg_ms_disabled >= max_disabled_ms) {
5431 cv_wait(&mg->mg_ms_disabled_cv, &mg->mg_ms_disabled_lock);
5432 }
5433 mg->mg_ms_disabled++;
5434 ASSERT3U(mg->mg_ms_disabled, <=, max_disabled_ms);
5435 }
5436
5437 /*
5438 * Mark the metaslab as disabled to prevent any allocations on this metaslab.
5439 * We must also track how many metaslabs are currently disabled within a
5440 * metaslab group and limit them to prevent allocation failures from
5441 * occurring because all metaslabs are disabled.
5442 */
5443 void
5444 metaslab_disable(metaslab_t *msp)
5445 {
5446 ASSERT(!MUTEX_HELD(&msp->ms_lock));
5447 metaslab_group_t *mg = msp->ms_group;
5448
5449 mutex_enter(&mg->mg_ms_disabled_lock);
5450
5451 /*
5452 * To keep an accurate count of how many threads have disabled
5453 * a specific metaslab group, we only allow one thread to mark
5454 * the metaslab group at a time. This ensures that the value of
5455 * ms_disabled will be accurate when we decide to mark a metaslab
5456 * group as disabled. To do this we force all other threads
5457 * to wait till the metaslab's mg_disabled_updating flag is no
5458 * longer set.
5459 */
5460 metaslab_group_disable_wait(mg);
5461 mg->mg_disabled_updating = B_TRUE;
5462 if (msp->ms_disabled == 0) {
5463 metaslab_group_disabled_increment(mg);
5464 }
5465 mutex_enter(&msp->ms_lock);
5466 msp->ms_disabled++;
5467 mutex_exit(&msp->ms_lock);
5468
5469 mg->mg_disabled_updating = B_FALSE;
5470 cv_broadcast(&mg->mg_ms_disabled_cv);
5471 mutex_exit(&mg->mg_ms_disabled_lock);
5472 }
5473
5474 void
5475 metaslab_enable(metaslab_t *msp, boolean_t sync)
5476 {
5477 metaslab_group_t *mg = msp->ms_group;
5478 spa_t *spa = mg->mg_vd->vdev_spa;
5479
5480 /*
5481 * Wait for the outstanding IO to be synced to prevent newly
5482 * allocated blocks from being overwritten. This used by
5483 * initialize and TRIM which are modifying unallocated space.
5484 */
5485 if (sync)
5486 txg_wait_synced(spa_get_dsl(spa), 0);
5487
5488 mutex_enter(&mg->mg_ms_disabled_lock);
5489 mutex_enter(&msp->ms_lock);
5490 if (--msp->ms_disabled == 0) {
5491 mg->mg_ms_disabled--;
5492 cv_broadcast(&mg->mg_ms_disabled_cv);
5493 }
5494 mutex_exit(&msp->ms_lock);
5495 mutex_exit(&mg->mg_ms_disabled_lock);
5496 }
5497
5498 static void
5499 metaslab_update_ondisk_flush_data(metaslab_t *ms, dmu_tx_t *tx)
5500 {
5501 vdev_t *vd = ms->ms_group->mg_vd;
5502 spa_t *spa = vd->vdev_spa;
5503 objset_t *mos = spa_meta_objset(spa);
5504
5505 ASSERT(spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP));
5506
5507 metaslab_unflushed_phys_t entry = {
5508 .msp_unflushed_txg = metaslab_unflushed_txg(ms),
5509 };
5510 uint64_t entry_size = sizeof (entry);
5511 uint64_t entry_offset = ms->ms_id * entry_size;
5512
5513 uint64_t object = 0;
5514 int err = zap_lookup(mos, vd->vdev_top_zap,
5515 VDEV_TOP_ZAP_MS_UNFLUSHED_PHYS_TXGS, sizeof (uint64_t), 1,
5516 &object);
5517 if (err == ENOENT) {
5518 object = dmu_object_alloc(mos, DMU_OTN_UINT64_METADATA,
5519 SPA_OLD_MAXBLOCKSIZE, DMU_OT_NONE, 0, tx);
5520 VERIFY0(zap_add(mos, vd->vdev_top_zap,
5521 VDEV_TOP_ZAP_MS_UNFLUSHED_PHYS_TXGS, sizeof (uint64_t), 1,
5522 &object, tx));
5523 } else {
5524 VERIFY0(err);
5525 }
5526
5527 dmu_write(spa_meta_objset(spa), object, entry_offset, entry_size,
5528 &entry, tx);
5529 }
5530
5531 void
5532 metaslab_set_unflushed_txg(metaslab_t *ms, uint64_t txg, dmu_tx_t *tx)
5533 {
5534 spa_t *spa = ms->ms_group->mg_vd->vdev_spa;
5535
5536 if (!spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP))
5537 return;
5538
5539 ms->ms_unflushed_txg = txg;
5540 metaslab_update_ondisk_flush_data(ms, tx);
5541 }
5542
5543 uint64_t
5544 metaslab_unflushed_txg(metaslab_t *ms)
5545 {
5546 return (ms->ms_unflushed_txg);
5547 }
5548
5549 #if defined(_KERNEL)
5550 /* BEGIN CSTYLED */
5551 module_param(metaslab_aliquot, ulong, 0644);
5552 MODULE_PARM_DESC(metaslab_aliquot,
5553 "allocation granularity (a.k.a. stripe size)");
5554
5555 module_param(metaslab_debug_load, int, 0644);
5556 MODULE_PARM_DESC(metaslab_debug_load,
5557 "load all metaslabs when pool is first opened");
5558
5559 module_param(metaslab_debug_unload, int, 0644);
5560 MODULE_PARM_DESC(metaslab_debug_unload,
5561 "prevent metaslabs from being unloaded");
5562
5563 module_param(metaslab_preload_enabled, int, 0644);
5564 MODULE_PARM_DESC(metaslab_preload_enabled,
5565 "preload potential metaslabs during reassessment");
5566
5567 module_param(zfs_mg_noalloc_threshold, int, 0644);
5568 MODULE_PARM_DESC(zfs_mg_noalloc_threshold,
5569 "percentage of free space for metaslab group to allow allocation");
5570
5571 module_param(zfs_mg_fragmentation_threshold, int, 0644);
5572 MODULE_PARM_DESC(zfs_mg_fragmentation_threshold,
5573 "fragmentation for metaslab group to allow allocation");
5574
5575 module_param(zfs_metaslab_fragmentation_threshold, int, 0644);
5576 MODULE_PARM_DESC(zfs_metaslab_fragmentation_threshold,
5577 "fragmentation for metaslab to allow allocation");
5578
5579 module_param(metaslab_fragmentation_factor_enabled, int, 0644);
5580 MODULE_PARM_DESC(metaslab_fragmentation_factor_enabled,
5581 "use the fragmentation metric to prefer less fragmented metaslabs");
5582
5583 module_param(metaslab_lba_weighting_enabled, int, 0644);
5584 MODULE_PARM_DESC(metaslab_lba_weighting_enabled,
5585 "prefer metaslabs with lower LBAs");
5586
5587 module_param(metaslab_bias_enabled, int, 0644);
5588 MODULE_PARM_DESC(metaslab_bias_enabled,
5589 "enable metaslab group biasing");
5590
5591 module_param(zfs_metaslab_segment_weight_enabled, int, 0644);
5592 MODULE_PARM_DESC(zfs_metaslab_segment_weight_enabled,
5593 "enable segment-based metaslab selection");
5594
5595 module_param(zfs_metaslab_switch_threshold, int, 0644);
5596 MODULE_PARM_DESC(zfs_metaslab_switch_threshold,
5597 "segment-based metaslab selection maximum buckets before switching");
5598
5599 module_param(metaslab_force_ganging, ulong, 0644);
5600 MODULE_PARM_DESC(metaslab_force_ganging,
5601 "blocks larger than this size are forced to be gang blocks");
5602
5603 module_param(metaslab_df_max_search, int, 0644);
5604 MODULE_PARM_DESC(metaslab_df_max_search,
5605 "max distance (bytes) to search forward before using size tree");
5606
5607 module_param(metaslab_df_use_largest_segment, int, 0644);
5608 MODULE_PARM_DESC(metaslab_df_use_largest_segment,
5609 "when looking in size tree, use largest segment instead of exact fit");
5610 /* END CSTYLED */
5611
5612 #endif