]> git.proxmox.com Git - mirror_zfs.git/blob - module/zfs/metaslab.c
0983ba143a1d1e5688df07587a672d12f63c26f7
[mirror_zfs.git] / module / zfs / metaslab.c
1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or https://opensource.org/licenses/CDDL-1.0.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2011, 2019 by Delphix. All rights reserved.
24 * Copyright (c) 2013 by Saso Kiselkov. All rights reserved.
25 * Copyright (c) 2015, Nexenta Systems, Inc. All rights reserved.
26 * Copyright (c) 2017, Intel Corporation.
27 */
28
29 #include <sys/zfs_context.h>
30 #include <sys/dmu.h>
31 #include <sys/dmu_tx.h>
32 #include <sys/space_map.h>
33 #include <sys/metaslab_impl.h>
34 #include <sys/vdev_impl.h>
35 #include <sys/vdev_draid.h>
36 #include <sys/zio.h>
37 #include <sys/spa_impl.h>
38 #include <sys/zfeature.h>
39 #include <sys/vdev_indirect_mapping.h>
40 #include <sys/zap.h>
41 #include <sys/btree.h>
42
43 #define GANG_ALLOCATION(flags) \
44 ((flags) & (METASLAB_GANG_CHILD | METASLAB_GANG_HEADER))
45
46 /*
47 * Metaslab granularity, in bytes. This is roughly similar to what would be
48 * referred to as the "stripe size" in traditional RAID arrays. In normal
49 * operation, we will try to write this amount of data to each disk before
50 * moving on to the next top-level vdev.
51 */
52 static uint64_t metaslab_aliquot = 1024 * 1024;
53
54 /*
55 * For testing, make some blocks above a certain size be gang blocks.
56 */
57 uint64_t metaslab_force_ganging = SPA_MAXBLOCKSIZE + 1;
58
59 /*
60 * Of blocks of size >= metaslab_force_ganging, actually gang them this often.
61 */
62 uint_t metaslab_force_ganging_pct = 3;
63
64 /*
65 * In pools where the log space map feature is not enabled we touch
66 * multiple metaslabs (and their respective space maps) with each
67 * transaction group. Thus, we benefit from having a small space map
68 * block size since it allows us to issue more I/O operations scattered
69 * around the disk. So a sane default for the space map block size
70 * is 8~16K.
71 */
72 int zfs_metaslab_sm_blksz_no_log = (1 << 14);
73
74 /*
75 * When the log space map feature is enabled, we accumulate a lot of
76 * changes per metaslab that are flushed once in a while so we benefit
77 * from a bigger block size like 128K for the metaslab space maps.
78 */
79 int zfs_metaslab_sm_blksz_with_log = (1 << 17);
80
81 /*
82 * The in-core space map representation is more compact than its on-disk form.
83 * The zfs_condense_pct determines how much more compact the in-core
84 * space map representation must be before we compact it on-disk.
85 * Values should be greater than or equal to 100.
86 */
87 uint_t zfs_condense_pct = 200;
88
89 /*
90 * Condensing a metaslab is not guaranteed to actually reduce the amount of
91 * space used on disk. In particular, a space map uses data in increments of
92 * MAX(1 << ashift, space_map_blksz), so a metaslab might use the
93 * same number of blocks after condensing. Since the goal of condensing is to
94 * reduce the number of IOPs required to read the space map, we only want to
95 * condense when we can be sure we will reduce the number of blocks used by the
96 * space map. Unfortunately, we cannot precisely compute whether or not this is
97 * the case in metaslab_should_condense since we are holding ms_lock. Instead,
98 * we apply the following heuristic: do not condense a spacemap unless the
99 * uncondensed size consumes greater than zfs_metaslab_condense_block_threshold
100 * blocks.
101 */
102 static const int zfs_metaslab_condense_block_threshold = 4;
103
104 /*
105 * The zfs_mg_noalloc_threshold defines which metaslab groups should
106 * be eligible for allocation. The value is defined as a percentage of
107 * free space. Metaslab groups that have more free space than
108 * zfs_mg_noalloc_threshold are always eligible for allocations. Once
109 * a metaslab group's free space is less than or equal to the
110 * zfs_mg_noalloc_threshold the allocator will avoid allocating to that
111 * group unless all groups in the pool have reached zfs_mg_noalloc_threshold.
112 * Once all groups in the pool reach zfs_mg_noalloc_threshold then all
113 * groups are allowed to accept allocations. Gang blocks are always
114 * eligible to allocate on any metaslab group. The default value of 0 means
115 * no metaslab group will be excluded based on this criterion.
116 */
117 static uint_t zfs_mg_noalloc_threshold = 0;
118
119 /*
120 * Metaslab groups are considered eligible for allocations if their
121 * fragmentation metric (measured as a percentage) is less than or
122 * equal to zfs_mg_fragmentation_threshold. If a metaslab group
123 * exceeds this threshold then it will be skipped unless all metaslab
124 * groups within the metaslab class have also crossed this threshold.
125 *
126 * This tunable was introduced to avoid edge cases where we continue
127 * allocating from very fragmented disks in our pool while other, less
128 * fragmented disks, exists. On the other hand, if all disks in the
129 * pool are uniformly approaching the threshold, the threshold can
130 * be a speed bump in performance, where we keep switching the disks
131 * that we allocate from (e.g. we allocate some segments from disk A
132 * making it bypassing the threshold while freeing segments from disk
133 * B getting its fragmentation below the threshold).
134 *
135 * Empirically, we've seen that our vdev selection for allocations is
136 * good enough that fragmentation increases uniformly across all vdevs
137 * the majority of the time. Thus we set the threshold percentage high
138 * enough to avoid hitting the speed bump on pools that are being pushed
139 * to the edge.
140 */
141 static uint_t zfs_mg_fragmentation_threshold = 95;
142
143 /*
144 * Allow metaslabs to keep their active state as long as their fragmentation
145 * percentage is less than or equal to zfs_metaslab_fragmentation_threshold. An
146 * active metaslab that exceeds this threshold will no longer keep its active
147 * status allowing better metaslabs to be selected.
148 */
149 static uint_t zfs_metaslab_fragmentation_threshold = 70;
150
151 /*
152 * When set will load all metaslabs when pool is first opened.
153 */
154 int metaslab_debug_load = B_FALSE;
155
156 /*
157 * When set will prevent metaslabs from being unloaded.
158 */
159 static int metaslab_debug_unload = B_FALSE;
160
161 /*
162 * Minimum size which forces the dynamic allocator to change
163 * it's allocation strategy. Once the space map cannot satisfy
164 * an allocation of this size then it switches to using more
165 * aggressive strategy (i.e search by size rather than offset).
166 */
167 uint64_t metaslab_df_alloc_threshold = SPA_OLD_MAXBLOCKSIZE;
168
169 /*
170 * The minimum free space, in percent, which must be available
171 * in a space map to continue allocations in a first-fit fashion.
172 * Once the space map's free space drops below this level we dynamically
173 * switch to using best-fit allocations.
174 */
175 uint_t metaslab_df_free_pct = 4;
176
177 /*
178 * Maximum distance to search forward from the last offset. Without this
179 * limit, fragmented pools can see >100,000 iterations and
180 * metaslab_block_picker() becomes the performance limiting factor on
181 * high-performance storage.
182 *
183 * With the default setting of 16MB, we typically see less than 500
184 * iterations, even with very fragmented, ashift=9 pools. The maximum number
185 * of iterations possible is:
186 * metaslab_df_max_search / (2 * (1<<ashift))
187 * With the default setting of 16MB this is 16*1024 (with ashift=9) or
188 * 2048 (with ashift=12).
189 */
190 static uint_t metaslab_df_max_search = 16 * 1024 * 1024;
191
192 /*
193 * Forces the metaslab_block_picker function to search for at least this many
194 * segments forwards until giving up on finding a segment that the allocation
195 * will fit into.
196 */
197 static const uint32_t metaslab_min_search_count = 100;
198
199 /*
200 * If we are not searching forward (due to metaslab_df_max_search,
201 * metaslab_df_free_pct, or metaslab_df_alloc_threshold), this tunable
202 * controls what segment is used. If it is set, we will use the largest free
203 * segment. If it is not set, we will use a segment of exactly the requested
204 * size (or larger).
205 */
206 static int metaslab_df_use_largest_segment = B_FALSE;
207
208 /*
209 * These tunables control how long a metaslab will remain loaded after the
210 * last allocation from it. A metaslab can't be unloaded until at least
211 * metaslab_unload_delay TXG's and metaslab_unload_delay_ms milliseconds
212 * have elapsed. However, zfs_metaslab_mem_limit may cause it to be
213 * unloaded sooner. These settings are intended to be generous -- to keep
214 * metaslabs loaded for a long time, reducing the rate of metaslab loading.
215 */
216 static uint_t metaslab_unload_delay = 32;
217 static uint_t metaslab_unload_delay_ms = 10 * 60 * 1000; /* ten minutes */
218
219 /*
220 * Max number of metaslabs per group to preload.
221 */
222 uint_t metaslab_preload_limit = 10;
223
224 /*
225 * Enable/disable preloading of metaslab.
226 */
227 static int metaslab_preload_enabled = B_TRUE;
228
229 /*
230 * Enable/disable fragmentation weighting on metaslabs.
231 */
232 static int metaslab_fragmentation_factor_enabled = B_TRUE;
233
234 /*
235 * Enable/disable lba weighting (i.e. outer tracks are given preference).
236 */
237 static int metaslab_lba_weighting_enabled = B_TRUE;
238
239 /*
240 * Enable/disable metaslab group biasing.
241 */
242 static int metaslab_bias_enabled = B_TRUE;
243
244 /*
245 * Enable/disable remapping of indirect DVAs to their concrete vdevs.
246 */
247 static const boolean_t zfs_remap_blkptr_enable = B_TRUE;
248
249 /*
250 * Enable/disable segment-based metaslab selection.
251 */
252 static int zfs_metaslab_segment_weight_enabled = B_TRUE;
253
254 /*
255 * When using segment-based metaslab selection, we will continue
256 * allocating from the active metaslab until we have exhausted
257 * zfs_metaslab_switch_threshold of its buckets.
258 */
259 static int zfs_metaslab_switch_threshold = 2;
260
261 /*
262 * Internal switch to enable/disable the metaslab allocation tracing
263 * facility.
264 */
265 static const boolean_t metaslab_trace_enabled = B_FALSE;
266
267 /*
268 * Maximum entries that the metaslab allocation tracing facility will keep
269 * in a given list when running in non-debug mode. We limit the number
270 * of entries in non-debug mode to prevent us from using up too much memory.
271 * The limit should be sufficiently large that we don't expect any allocation
272 * to every exceed this value. In debug mode, the system will panic if this
273 * limit is ever reached allowing for further investigation.
274 */
275 static const uint64_t metaslab_trace_max_entries = 5000;
276
277 /*
278 * Maximum number of metaslabs per group that can be disabled
279 * simultaneously.
280 */
281 static const int max_disabled_ms = 3;
282
283 /*
284 * Time (in seconds) to respect ms_max_size when the metaslab is not loaded.
285 * To avoid 64-bit overflow, don't set above UINT32_MAX.
286 */
287 static uint64_t zfs_metaslab_max_size_cache_sec = 1 * 60 * 60; /* 1 hour */
288
289 /*
290 * Maximum percentage of memory to use on storing loaded metaslabs. If loading
291 * a metaslab would take it over this percentage, the oldest selected metaslab
292 * is automatically unloaded.
293 */
294 static uint_t zfs_metaslab_mem_limit = 25;
295
296 /*
297 * Force the per-metaslab range trees to use 64-bit integers to store
298 * segments. Used for debugging purposes.
299 */
300 static const boolean_t zfs_metaslab_force_large_segs = B_FALSE;
301
302 /*
303 * By default we only store segments over a certain size in the size-sorted
304 * metaslab trees (ms_allocatable_by_size and
305 * ms_unflushed_frees_by_size). This dramatically reduces memory usage and
306 * improves load and unload times at the cost of causing us to use slightly
307 * larger segments than we would otherwise in some cases.
308 */
309 static const uint32_t metaslab_by_size_min_shift = 14;
310
311 /*
312 * If not set, we will first try normal allocation. If that fails then
313 * we will do a gang allocation. If that fails then we will do a "try hard"
314 * gang allocation. If that fails then we will have a multi-layer gang
315 * block.
316 *
317 * If set, we will first try normal allocation. If that fails then
318 * we will do a "try hard" allocation. If that fails we will do a gang
319 * allocation. If that fails we will do a "try hard" gang allocation. If
320 * that fails then we will have a multi-layer gang block.
321 */
322 static int zfs_metaslab_try_hard_before_gang = B_FALSE;
323
324 /*
325 * When not trying hard, we only consider the best zfs_metaslab_find_max_tries
326 * metaslabs. This improves performance, especially when there are many
327 * metaslabs per vdev and the allocation can't actually be satisfied (so we
328 * would otherwise iterate all the metaslabs). If there is a metaslab with a
329 * worse weight but it can actually satisfy the allocation, we won't find it
330 * until trying hard. This may happen if the worse metaslab is not loaded
331 * (and the true weight is better than we have calculated), or due to weight
332 * bucketization. E.g. we are looking for a 60K segment, and the best
333 * metaslabs all have free segments in the 32-63K bucket, but the best
334 * zfs_metaslab_find_max_tries metaslabs have ms_max_size <60KB, and a
335 * subsequent metaslab has ms_max_size >60KB (but fewer segments in this
336 * bucket, and therefore a lower weight).
337 */
338 static uint_t zfs_metaslab_find_max_tries = 100;
339
340 static uint64_t metaslab_weight(metaslab_t *, boolean_t);
341 static void metaslab_set_fragmentation(metaslab_t *, boolean_t);
342 static void metaslab_free_impl(vdev_t *, uint64_t, uint64_t, boolean_t);
343 static void metaslab_check_free_impl(vdev_t *, uint64_t, uint64_t);
344
345 static void metaslab_passivate(metaslab_t *msp, uint64_t weight);
346 static uint64_t metaslab_weight_from_range_tree(metaslab_t *msp);
347 static void metaslab_flush_update(metaslab_t *, dmu_tx_t *);
348 static unsigned int metaslab_idx_func(multilist_t *, void *);
349 static void metaslab_evict(metaslab_t *, uint64_t);
350 static void metaslab_rt_add(range_tree_t *rt, range_seg_t *rs, void *arg);
351 kmem_cache_t *metaslab_alloc_trace_cache;
352
353 typedef struct metaslab_stats {
354 kstat_named_t metaslabstat_trace_over_limit;
355 kstat_named_t metaslabstat_reload_tree;
356 kstat_named_t metaslabstat_too_many_tries;
357 kstat_named_t metaslabstat_try_hard;
358 } metaslab_stats_t;
359
360 static metaslab_stats_t metaslab_stats = {
361 { "trace_over_limit", KSTAT_DATA_UINT64 },
362 { "reload_tree", KSTAT_DATA_UINT64 },
363 { "too_many_tries", KSTAT_DATA_UINT64 },
364 { "try_hard", KSTAT_DATA_UINT64 },
365 };
366
367 #define METASLABSTAT_BUMP(stat) \
368 atomic_inc_64(&metaslab_stats.stat.value.ui64);
369
370
371 static kstat_t *metaslab_ksp;
372
373 void
374 metaslab_stat_init(void)
375 {
376 ASSERT(metaslab_alloc_trace_cache == NULL);
377 metaslab_alloc_trace_cache = kmem_cache_create(
378 "metaslab_alloc_trace_cache", sizeof (metaslab_alloc_trace_t),
379 0, NULL, NULL, NULL, NULL, NULL, 0);
380 metaslab_ksp = kstat_create("zfs", 0, "metaslab_stats",
381 "misc", KSTAT_TYPE_NAMED, sizeof (metaslab_stats) /
382 sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL);
383 if (metaslab_ksp != NULL) {
384 metaslab_ksp->ks_data = &metaslab_stats;
385 kstat_install(metaslab_ksp);
386 }
387 }
388
389 void
390 metaslab_stat_fini(void)
391 {
392 if (metaslab_ksp != NULL) {
393 kstat_delete(metaslab_ksp);
394 metaslab_ksp = NULL;
395 }
396
397 kmem_cache_destroy(metaslab_alloc_trace_cache);
398 metaslab_alloc_trace_cache = NULL;
399 }
400
401 /*
402 * ==========================================================================
403 * Metaslab classes
404 * ==========================================================================
405 */
406 metaslab_class_t *
407 metaslab_class_create(spa_t *spa, const metaslab_ops_t *ops)
408 {
409 metaslab_class_t *mc;
410
411 mc = kmem_zalloc(offsetof(metaslab_class_t,
412 mc_allocator[spa->spa_alloc_count]), KM_SLEEP);
413
414 mc->mc_spa = spa;
415 mc->mc_ops = ops;
416 mutex_init(&mc->mc_lock, NULL, MUTEX_DEFAULT, NULL);
417 multilist_create(&mc->mc_metaslab_txg_list, sizeof (metaslab_t),
418 offsetof(metaslab_t, ms_class_txg_node), metaslab_idx_func);
419 for (int i = 0; i < spa->spa_alloc_count; i++) {
420 metaslab_class_allocator_t *mca = &mc->mc_allocator[i];
421 mca->mca_rotor = NULL;
422 zfs_refcount_create_tracked(&mca->mca_alloc_slots);
423 }
424
425 return (mc);
426 }
427
428 void
429 metaslab_class_destroy(metaslab_class_t *mc)
430 {
431 spa_t *spa = mc->mc_spa;
432
433 ASSERT(mc->mc_alloc == 0);
434 ASSERT(mc->mc_deferred == 0);
435 ASSERT(mc->mc_space == 0);
436 ASSERT(mc->mc_dspace == 0);
437
438 for (int i = 0; i < spa->spa_alloc_count; i++) {
439 metaslab_class_allocator_t *mca = &mc->mc_allocator[i];
440 ASSERT(mca->mca_rotor == NULL);
441 zfs_refcount_destroy(&mca->mca_alloc_slots);
442 }
443 mutex_destroy(&mc->mc_lock);
444 multilist_destroy(&mc->mc_metaslab_txg_list);
445 kmem_free(mc, offsetof(metaslab_class_t,
446 mc_allocator[spa->spa_alloc_count]));
447 }
448
449 int
450 metaslab_class_validate(metaslab_class_t *mc)
451 {
452 metaslab_group_t *mg;
453 vdev_t *vd;
454
455 /*
456 * Must hold one of the spa_config locks.
457 */
458 ASSERT(spa_config_held(mc->mc_spa, SCL_ALL, RW_READER) ||
459 spa_config_held(mc->mc_spa, SCL_ALL, RW_WRITER));
460
461 if ((mg = mc->mc_allocator[0].mca_rotor) == NULL)
462 return (0);
463
464 do {
465 vd = mg->mg_vd;
466 ASSERT(vd->vdev_mg != NULL);
467 ASSERT3P(vd->vdev_top, ==, vd);
468 ASSERT3P(mg->mg_class, ==, mc);
469 ASSERT3P(vd->vdev_ops, !=, &vdev_hole_ops);
470 } while ((mg = mg->mg_next) != mc->mc_allocator[0].mca_rotor);
471
472 return (0);
473 }
474
475 static void
476 metaslab_class_space_update(metaslab_class_t *mc, int64_t alloc_delta,
477 int64_t defer_delta, int64_t space_delta, int64_t dspace_delta)
478 {
479 atomic_add_64(&mc->mc_alloc, alloc_delta);
480 atomic_add_64(&mc->mc_deferred, defer_delta);
481 atomic_add_64(&mc->mc_space, space_delta);
482 atomic_add_64(&mc->mc_dspace, dspace_delta);
483 }
484
485 uint64_t
486 metaslab_class_get_alloc(metaslab_class_t *mc)
487 {
488 return (mc->mc_alloc);
489 }
490
491 uint64_t
492 metaslab_class_get_deferred(metaslab_class_t *mc)
493 {
494 return (mc->mc_deferred);
495 }
496
497 uint64_t
498 metaslab_class_get_space(metaslab_class_t *mc)
499 {
500 return (mc->mc_space);
501 }
502
503 uint64_t
504 metaslab_class_get_dspace(metaslab_class_t *mc)
505 {
506 return (spa_deflate(mc->mc_spa) ? mc->mc_dspace : mc->mc_space);
507 }
508
509 void
510 metaslab_class_histogram_verify(metaslab_class_t *mc)
511 {
512 spa_t *spa = mc->mc_spa;
513 vdev_t *rvd = spa->spa_root_vdev;
514 uint64_t *mc_hist;
515 int i;
516
517 if ((zfs_flags & ZFS_DEBUG_HISTOGRAM_VERIFY) == 0)
518 return;
519
520 mc_hist = kmem_zalloc(sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE,
521 KM_SLEEP);
522
523 mutex_enter(&mc->mc_lock);
524 for (int c = 0; c < rvd->vdev_children; c++) {
525 vdev_t *tvd = rvd->vdev_child[c];
526 metaslab_group_t *mg = vdev_get_mg(tvd, mc);
527
528 /*
529 * Skip any holes, uninitialized top-levels, or
530 * vdevs that are not in this metalab class.
531 */
532 if (!vdev_is_concrete(tvd) || tvd->vdev_ms_shift == 0 ||
533 mg->mg_class != mc) {
534 continue;
535 }
536
537 IMPLY(mg == mg->mg_vd->vdev_log_mg,
538 mc == spa_embedded_log_class(mg->mg_vd->vdev_spa));
539
540 for (i = 0; i < RANGE_TREE_HISTOGRAM_SIZE; i++)
541 mc_hist[i] += mg->mg_histogram[i];
542 }
543
544 for (i = 0; i < RANGE_TREE_HISTOGRAM_SIZE; i++) {
545 VERIFY3U(mc_hist[i], ==, mc->mc_histogram[i]);
546 }
547
548 mutex_exit(&mc->mc_lock);
549 kmem_free(mc_hist, sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE);
550 }
551
552 /*
553 * Calculate the metaslab class's fragmentation metric. The metric
554 * is weighted based on the space contribution of each metaslab group.
555 * The return value will be a number between 0 and 100 (inclusive), or
556 * ZFS_FRAG_INVALID if the metric has not been set. See comment above the
557 * zfs_frag_table for more information about the metric.
558 */
559 uint64_t
560 metaslab_class_fragmentation(metaslab_class_t *mc)
561 {
562 vdev_t *rvd = mc->mc_spa->spa_root_vdev;
563 uint64_t fragmentation = 0;
564
565 spa_config_enter(mc->mc_spa, SCL_VDEV, FTAG, RW_READER);
566
567 for (int c = 0; c < rvd->vdev_children; c++) {
568 vdev_t *tvd = rvd->vdev_child[c];
569 metaslab_group_t *mg = tvd->vdev_mg;
570
571 /*
572 * Skip any holes, uninitialized top-levels,
573 * or vdevs that are not in this metalab class.
574 */
575 if (!vdev_is_concrete(tvd) || tvd->vdev_ms_shift == 0 ||
576 mg->mg_class != mc) {
577 continue;
578 }
579
580 /*
581 * If a metaslab group does not contain a fragmentation
582 * metric then just bail out.
583 */
584 if (mg->mg_fragmentation == ZFS_FRAG_INVALID) {
585 spa_config_exit(mc->mc_spa, SCL_VDEV, FTAG);
586 return (ZFS_FRAG_INVALID);
587 }
588
589 /*
590 * Determine how much this metaslab_group is contributing
591 * to the overall pool fragmentation metric.
592 */
593 fragmentation += mg->mg_fragmentation *
594 metaslab_group_get_space(mg);
595 }
596 fragmentation /= metaslab_class_get_space(mc);
597
598 ASSERT3U(fragmentation, <=, 100);
599 spa_config_exit(mc->mc_spa, SCL_VDEV, FTAG);
600 return (fragmentation);
601 }
602
603 /*
604 * Calculate the amount of expandable space that is available in
605 * this metaslab class. If a device is expanded then its expandable
606 * space will be the amount of allocatable space that is currently not
607 * part of this metaslab class.
608 */
609 uint64_t
610 metaslab_class_expandable_space(metaslab_class_t *mc)
611 {
612 vdev_t *rvd = mc->mc_spa->spa_root_vdev;
613 uint64_t space = 0;
614
615 spa_config_enter(mc->mc_spa, SCL_VDEV, FTAG, RW_READER);
616 for (int c = 0; c < rvd->vdev_children; c++) {
617 vdev_t *tvd = rvd->vdev_child[c];
618 metaslab_group_t *mg = tvd->vdev_mg;
619
620 if (!vdev_is_concrete(tvd) || tvd->vdev_ms_shift == 0 ||
621 mg->mg_class != mc) {
622 continue;
623 }
624
625 /*
626 * Calculate if we have enough space to add additional
627 * metaslabs. We report the expandable space in terms
628 * of the metaslab size since that's the unit of expansion.
629 */
630 space += P2ALIGN(tvd->vdev_max_asize - tvd->vdev_asize,
631 1ULL << tvd->vdev_ms_shift);
632 }
633 spa_config_exit(mc->mc_spa, SCL_VDEV, FTAG);
634 return (space);
635 }
636
637 void
638 metaslab_class_evict_old(metaslab_class_t *mc, uint64_t txg)
639 {
640 multilist_t *ml = &mc->mc_metaslab_txg_list;
641 for (int i = 0; i < multilist_get_num_sublists(ml); i++) {
642 multilist_sublist_t *mls = multilist_sublist_lock(ml, i);
643 metaslab_t *msp = multilist_sublist_head(mls);
644 multilist_sublist_unlock(mls);
645 while (msp != NULL) {
646 mutex_enter(&msp->ms_lock);
647
648 /*
649 * If the metaslab has been removed from the list
650 * (which could happen if we were at the memory limit
651 * and it was evicted during this loop), then we can't
652 * proceed and we should restart the sublist.
653 */
654 if (!multilist_link_active(&msp->ms_class_txg_node)) {
655 mutex_exit(&msp->ms_lock);
656 i--;
657 break;
658 }
659 mls = multilist_sublist_lock(ml, i);
660 metaslab_t *next_msp = multilist_sublist_next(mls, msp);
661 multilist_sublist_unlock(mls);
662 if (txg >
663 msp->ms_selected_txg + metaslab_unload_delay &&
664 gethrtime() > msp->ms_selected_time +
665 (uint64_t)MSEC2NSEC(metaslab_unload_delay_ms)) {
666 metaslab_evict(msp, txg);
667 } else {
668 /*
669 * Once we've hit a metaslab selected too
670 * recently to evict, we're done evicting for
671 * now.
672 */
673 mutex_exit(&msp->ms_lock);
674 break;
675 }
676 mutex_exit(&msp->ms_lock);
677 msp = next_msp;
678 }
679 }
680 }
681
682 static int
683 metaslab_compare(const void *x1, const void *x2)
684 {
685 const metaslab_t *m1 = (const metaslab_t *)x1;
686 const metaslab_t *m2 = (const metaslab_t *)x2;
687
688 int sort1 = 0;
689 int sort2 = 0;
690 if (m1->ms_allocator != -1 && m1->ms_primary)
691 sort1 = 1;
692 else if (m1->ms_allocator != -1 && !m1->ms_primary)
693 sort1 = 2;
694 if (m2->ms_allocator != -1 && m2->ms_primary)
695 sort2 = 1;
696 else if (m2->ms_allocator != -1 && !m2->ms_primary)
697 sort2 = 2;
698
699 /*
700 * Sort inactive metaslabs first, then primaries, then secondaries. When
701 * selecting a metaslab to allocate from, an allocator first tries its
702 * primary, then secondary active metaslab. If it doesn't have active
703 * metaslabs, or can't allocate from them, it searches for an inactive
704 * metaslab to activate. If it can't find a suitable one, it will steal
705 * a primary or secondary metaslab from another allocator.
706 */
707 if (sort1 < sort2)
708 return (-1);
709 if (sort1 > sort2)
710 return (1);
711
712 int cmp = TREE_CMP(m2->ms_weight, m1->ms_weight);
713 if (likely(cmp))
714 return (cmp);
715
716 IMPLY(TREE_CMP(m1->ms_start, m2->ms_start) == 0, m1 == m2);
717
718 return (TREE_CMP(m1->ms_start, m2->ms_start));
719 }
720
721 /*
722 * ==========================================================================
723 * Metaslab groups
724 * ==========================================================================
725 */
726 /*
727 * Update the allocatable flag and the metaslab group's capacity.
728 * The allocatable flag is set to true if the capacity is below
729 * the zfs_mg_noalloc_threshold or has a fragmentation value that is
730 * greater than zfs_mg_fragmentation_threshold. If a metaslab group
731 * transitions from allocatable to non-allocatable or vice versa then the
732 * metaslab group's class is updated to reflect the transition.
733 */
734 static void
735 metaslab_group_alloc_update(metaslab_group_t *mg)
736 {
737 vdev_t *vd = mg->mg_vd;
738 metaslab_class_t *mc = mg->mg_class;
739 vdev_stat_t *vs = &vd->vdev_stat;
740 boolean_t was_allocatable;
741 boolean_t was_initialized;
742
743 ASSERT(vd == vd->vdev_top);
744 ASSERT3U(spa_config_held(mc->mc_spa, SCL_ALLOC, RW_READER), ==,
745 SCL_ALLOC);
746
747 mutex_enter(&mg->mg_lock);
748 was_allocatable = mg->mg_allocatable;
749 was_initialized = mg->mg_initialized;
750
751 mg->mg_free_capacity = ((vs->vs_space - vs->vs_alloc) * 100) /
752 (vs->vs_space + 1);
753
754 mutex_enter(&mc->mc_lock);
755
756 /*
757 * If the metaslab group was just added then it won't
758 * have any space until we finish syncing out this txg.
759 * At that point we will consider it initialized and available
760 * for allocations. We also don't consider non-activated
761 * metaslab groups (e.g. vdevs that are in the middle of being removed)
762 * to be initialized, because they can't be used for allocation.
763 */
764 mg->mg_initialized = metaslab_group_initialized(mg);
765 if (!was_initialized && mg->mg_initialized) {
766 mc->mc_groups++;
767 } else if (was_initialized && !mg->mg_initialized) {
768 ASSERT3U(mc->mc_groups, >, 0);
769 mc->mc_groups--;
770 }
771 if (mg->mg_initialized)
772 mg->mg_no_free_space = B_FALSE;
773
774 /*
775 * A metaslab group is considered allocatable if it has plenty
776 * of free space or is not heavily fragmented. We only take
777 * fragmentation into account if the metaslab group has a valid
778 * fragmentation metric (i.e. a value between 0 and 100).
779 */
780 mg->mg_allocatable = (mg->mg_activation_count > 0 &&
781 mg->mg_free_capacity > zfs_mg_noalloc_threshold &&
782 (mg->mg_fragmentation == ZFS_FRAG_INVALID ||
783 mg->mg_fragmentation <= zfs_mg_fragmentation_threshold));
784
785 /*
786 * The mc_alloc_groups maintains a count of the number of
787 * groups in this metaslab class that are still above the
788 * zfs_mg_noalloc_threshold. This is used by the allocating
789 * threads to determine if they should avoid allocations to
790 * a given group. The allocator will avoid allocations to a group
791 * if that group has reached or is below the zfs_mg_noalloc_threshold
792 * and there are still other groups that are above the threshold.
793 * When a group transitions from allocatable to non-allocatable or
794 * vice versa we update the metaslab class to reflect that change.
795 * When the mc_alloc_groups value drops to 0 that means that all
796 * groups have reached the zfs_mg_noalloc_threshold making all groups
797 * eligible for allocations. This effectively means that all devices
798 * are balanced again.
799 */
800 if (was_allocatable && !mg->mg_allocatable)
801 mc->mc_alloc_groups--;
802 else if (!was_allocatable && mg->mg_allocatable)
803 mc->mc_alloc_groups++;
804 mutex_exit(&mc->mc_lock);
805
806 mutex_exit(&mg->mg_lock);
807 }
808
809 int
810 metaslab_sort_by_flushed(const void *va, const void *vb)
811 {
812 const metaslab_t *a = va;
813 const metaslab_t *b = vb;
814
815 int cmp = TREE_CMP(a->ms_unflushed_txg, b->ms_unflushed_txg);
816 if (likely(cmp))
817 return (cmp);
818
819 uint64_t a_vdev_id = a->ms_group->mg_vd->vdev_id;
820 uint64_t b_vdev_id = b->ms_group->mg_vd->vdev_id;
821 cmp = TREE_CMP(a_vdev_id, b_vdev_id);
822 if (cmp)
823 return (cmp);
824
825 return (TREE_CMP(a->ms_id, b->ms_id));
826 }
827
828 metaslab_group_t *
829 metaslab_group_create(metaslab_class_t *mc, vdev_t *vd, int allocators)
830 {
831 metaslab_group_t *mg;
832
833 mg = kmem_zalloc(offsetof(metaslab_group_t,
834 mg_allocator[allocators]), KM_SLEEP);
835 mutex_init(&mg->mg_lock, NULL, MUTEX_DEFAULT, NULL);
836 mutex_init(&mg->mg_ms_disabled_lock, NULL, MUTEX_DEFAULT, NULL);
837 cv_init(&mg->mg_ms_disabled_cv, NULL, CV_DEFAULT, NULL);
838 avl_create(&mg->mg_metaslab_tree, metaslab_compare,
839 sizeof (metaslab_t), offsetof(metaslab_t, ms_group_node));
840 mg->mg_vd = vd;
841 mg->mg_class = mc;
842 mg->mg_activation_count = 0;
843 mg->mg_initialized = B_FALSE;
844 mg->mg_no_free_space = B_TRUE;
845 mg->mg_allocators = allocators;
846
847 for (int i = 0; i < allocators; i++) {
848 metaslab_group_allocator_t *mga = &mg->mg_allocator[i];
849 zfs_refcount_create_tracked(&mga->mga_alloc_queue_depth);
850 }
851
852 return (mg);
853 }
854
855 void
856 metaslab_group_destroy(metaslab_group_t *mg)
857 {
858 ASSERT(mg->mg_prev == NULL);
859 ASSERT(mg->mg_next == NULL);
860 /*
861 * We may have gone below zero with the activation count
862 * either because we never activated in the first place or
863 * because we're done, and possibly removing the vdev.
864 */
865 ASSERT(mg->mg_activation_count <= 0);
866
867 avl_destroy(&mg->mg_metaslab_tree);
868 mutex_destroy(&mg->mg_lock);
869 mutex_destroy(&mg->mg_ms_disabled_lock);
870 cv_destroy(&mg->mg_ms_disabled_cv);
871
872 for (int i = 0; i < mg->mg_allocators; i++) {
873 metaslab_group_allocator_t *mga = &mg->mg_allocator[i];
874 zfs_refcount_destroy(&mga->mga_alloc_queue_depth);
875 }
876 kmem_free(mg, offsetof(metaslab_group_t,
877 mg_allocator[mg->mg_allocators]));
878 }
879
880 void
881 metaslab_group_activate(metaslab_group_t *mg)
882 {
883 metaslab_class_t *mc = mg->mg_class;
884 spa_t *spa = mc->mc_spa;
885 metaslab_group_t *mgprev, *mgnext;
886
887 ASSERT3U(spa_config_held(spa, SCL_ALLOC, RW_WRITER), !=, 0);
888
889 ASSERT(mg->mg_prev == NULL);
890 ASSERT(mg->mg_next == NULL);
891 ASSERT(mg->mg_activation_count <= 0);
892
893 if (++mg->mg_activation_count <= 0)
894 return;
895
896 mg->mg_aliquot = metaslab_aliquot * MAX(1,
897 vdev_get_ndisks(mg->mg_vd) - vdev_get_nparity(mg->mg_vd));
898 metaslab_group_alloc_update(mg);
899
900 if ((mgprev = mc->mc_allocator[0].mca_rotor) == NULL) {
901 mg->mg_prev = mg;
902 mg->mg_next = mg;
903 } else {
904 mgnext = mgprev->mg_next;
905 mg->mg_prev = mgprev;
906 mg->mg_next = mgnext;
907 mgprev->mg_next = mg;
908 mgnext->mg_prev = mg;
909 }
910 for (int i = 0; i < spa->spa_alloc_count; i++) {
911 mc->mc_allocator[i].mca_rotor = mg;
912 mg = mg->mg_next;
913 }
914 }
915
916 /*
917 * Passivate a metaslab group and remove it from the allocation rotor.
918 * Callers must hold both the SCL_ALLOC and SCL_ZIO lock prior to passivating
919 * a metaslab group. This function will momentarily drop spa_config_locks
920 * that are lower than the SCL_ALLOC lock (see comment below).
921 */
922 void
923 metaslab_group_passivate(metaslab_group_t *mg)
924 {
925 metaslab_class_t *mc = mg->mg_class;
926 spa_t *spa = mc->mc_spa;
927 metaslab_group_t *mgprev, *mgnext;
928 int locks = spa_config_held(spa, SCL_ALL, RW_WRITER);
929
930 ASSERT3U(spa_config_held(spa, SCL_ALLOC | SCL_ZIO, RW_WRITER), ==,
931 (SCL_ALLOC | SCL_ZIO));
932
933 if (--mg->mg_activation_count != 0) {
934 for (int i = 0; i < spa->spa_alloc_count; i++)
935 ASSERT(mc->mc_allocator[i].mca_rotor != mg);
936 ASSERT(mg->mg_prev == NULL);
937 ASSERT(mg->mg_next == NULL);
938 ASSERT(mg->mg_activation_count < 0);
939 return;
940 }
941
942 /*
943 * The spa_config_lock is an array of rwlocks, ordered as
944 * follows (from highest to lowest):
945 * SCL_CONFIG > SCL_STATE > SCL_L2ARC > SCL_ALLOC >
946 * SCL_ZIO > SCL_FREE > SCL_VDEV
947 * (For more information about the spa_config_lock see spa_misc.c)
948 * The higher the lock, the broader its coverage. When we passivate
949 * a metaslab group, we must hold both the SCL_ALLOC and the SCL_ZIO
950 * config locks. However, the metaslab group's taskq might be trying
951 * to preload metaslabs so we must drop the SCL_ZIO lock and any
952 * lower locks to allow the I/O to complete. At a minimum,
953 * we continue to hold the SCL_ALLOC lock, which prevents any future
954 * allocations from taking place and any changes to the vdev tree.
955 */
956 spa_config_exit(spa, locks & ~(SCL_ZIO - 1), spa);
957 taskq_wait_outstanding(spa->spa_metaslab_taskq, 0);
958 spa_config_enter(spa, locks & ~(SCL_ZIO - 1), spa, RW_WRITER);
959 metaslab_group_alloc_update(mg);
960 for (int i = 0; i < mg->mg_allocators; i++) {
961 metaslab_group_allocator_t *mga = &mg->mg_allocator[i];
962 metaslab_t *msp = mga->mga_primary;
963 if (msp != NULL) {
964 mutex_enter(&msp->ms_lock);
965 metaslab_passivate(msp,
966 metaslab_weight_from_range_tree(msp));
967 mutex_exit(&msp->ms_lock);
968 }
969 msp = mga->mga_secondary;
970 if (msp != NULL) {
971 mutex_enter(&msp->ms_lock);
972 metaslab_passivate(msp,
973 metaslab_weight_from_range_tree(msp));
974 mutex_exit(&msp->ms_lock);
975 }
976 }
977
978 mgprev = mg->mg_prev;
979 mgnext = mg->mg_next;
980
981 if (mg == mgnext) {
982 mgnext = NULL;
983 } else {
984 mgprev->mg_next = mgnext;
985 mgnext->mg_prev = mgprev;
986 }
987 for (int i = 0; i < spa->spa_alloc_count; i++) {
988 if (mc->mc_allocator[i].mca_rotor == mg)
989 mc->mc_allocator[i].mca_rotor = mgnext;
990 }
991
992 mg->mg_prev = NULL;
993 mg->mg_next = NULL;
994 }
995
996 boolean_t
997 metaslab_group_initialized(metaslab_group_t *mg)
998 {
999 vdev_t *vd = mg->mg_vd;
1000 vdev_stat_t *vs = &vd->vdev_stat;
1001
1002 return (vs->vs_space != 0 && mg->mg_activation_count > 0);
1003 }
1004
1005 uint64_t
1006 metaslab_group_get_space(metaslab_group_t *mg)
1007 {
1008 /*
1009 * Note that the number of nodes in mg_metaslab_tree may be one less
1010 * than vdev_ms_count, due to the embedded log metaslab.
1011 */
1012 mutex_enter(&mg->mg_lock);
1013 uint64_t ms_count = avl_numnodes(&mg->mg_metaslab_tree);
1014 mutex_exit(&mg->mg_lock);
1015 return ((1ULL << mg->mg_vd->vdev_ms_shift) * ms_count);
1016 }
1017
1018 void
1019 metaslab_group_histogram_verify(metaslab_group_t *mg)
1020 {
1021 uint64_t *mg_hist;
1022 avl_tree_t *t = &mg->mg_metaslab_tree;
1023 uint64_t ashift = mg->mg_vd->vdev_ashift;
1024
1025 if ((zfs_flags & ZFS_DEBUG_HISTOGRAM_VERIFY) == 0)
1026 return;
1027
1028 mg_hist = kmem_zalloc(sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE,
1029 KM_SLEEP);
1030
1031 ASSERT3U(RANGE_TREE_HISTOGRAM_SIZE, >=,
1032 SPACE_MAP_HISTOGRAM_SIZE + ashift);
1033
1034 mutex_enter(&mg->mg_lock);
1035 for (metaslab_t *msp = avl_first(t);
1036 msp != NULL; msp = AVL_NEXT(t, msp)) {
1037 VERIFY3P(msp->ms_group, ==, mg);
1038 /* skip if not active */
1039 if (msp->ms_sm == NULL)
1040 continue;
1041
1042 for (int i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) {
1043 mg_hist[i + ashift] +=
1044 msp->ms_sm->sm_phys->smp_histogram[i];
1045 }
1046 }
1047
1048 for (int i = 0; i < RANGE_TREE_HISTOGRAM_SIZE; i ++)
1049 VERIFY3U(mg_hist[i], ==, mg->mg_histogram[i]);
1050
1051 mutex_exit(&mg->mg_lock);
1052
1053 kmem_free(mg_hist, sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE);
1054 }
1055
1056 static void
1057 metaslab_group_histogram_add(metaslab_group_t *mg, metaslab_t *msp)
1058 {
1059 metaslab_class_t *mc = mg->mg_class;
1060 uint64_t ashift = mg->mg_vd->vdev_ashift;
1061
1062 ASSERT(MUTEX_HELD(&msp->ms_lock));
1063 if (msp->ms_sm == NULL)
1064 return;
1065
1066 mutex_enter(&mg->mg_lock);
1067 mutex_enter(&mc->mc_lock);
1068 for (int i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) {
1069 IMPLY(mg == mg->mg_vd->vdev_log_mg,
1070 mc == spa_embedded_log_class(mg->mg_vd->vdev_spa));
1071 mg->mg_histogram[i + ashift] +=
1072 msp->ms_sm->sm_phys->smp_histogram[i];
1073 mc->mc_histogram[i + ashift] +=
1074 msp->ms_sm->sm_phys->smp_histogram[i];
1075 }
1076 mutex_exit(&mc->mc_lock);
1077 mutex_exit(&mg->mg_lock);
1078 }
1079
1080 void
1081 metaslab_group_histogram_remove(metaslab_group_t *mg, metaslab_t *msp)
1082 {
1083 metaslab_class_t *mc = mg->mg_class;
1084 uint64_t ashift = mg->mg_vd->vdev_ashift;
1085
1086 ASSERT(MUTEX_HELD(&msp->ms_lock));
1087 if (msp->ms_sm == NULL)
1088 return;
1089
1090 mutex_enter(&mg->mg_lock);
1091 mutex_enter(&mc->mc_lock);
1092 for (int i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) {
1093 ASSERT3U(mg->mg_histogram[i + ashift], >=,
1094 msp->ms_sm->sm_phys->smp_histogram[i]);
1095 ASSERT3U(mc->mc_histogram[i + ashift], >=,
1096 msp->ms_sm->sm_phys->smp_histogram[i]);
1097 IMPLY(mg == mg->mg_vd->vdev_log_mg,
1098 mc == spa_embedded_log_class(mg->mg_vd->vdev_spa));
1099
1100 mg->mg_histogram[i + ashift] -=
1101 msp->ms_sm->sm_phys->smp_histogram[i];
1102 mc->mc_histogram[i + ashift] -=
1103 msp->ms_sm->sm_phys->smp_histogram[i];
1104 }
1105 mutex_exit(&mc->mc_lock);
1106 mutex_exit(&mg->mg_lock);
1107 }
1108
1109 static void
1110 metaslab_group_add(metaslab_group_t *mg, metaslab_t *msp)
1111 {
1112 ASSERT(msp->ms_group == NULL);
1113 mutex_enter(&mg->mg_lock);
1114 msp->ms_group = mg;
1115 msp->ms_weight = 0;
1116 avl_add(&mg->mg_metaslab_tree, msp);
1117 mutex_exit(&mg->mg_lock);
1118
1119 mutex_enter(&msp->ms_lock);
1120 metaslab_group_histogram_add(mg, msp);
1121 mutex_exit(&msp->ms_lock);
1122 }
1123
1124 static void
1125 metaslab_group_remove(metaslab_group_t *mg, metaslab_t *msp)
1126 {
1127 mutex_enter(&msp->ms_lock);
1128 metaslab_group_histogram_remove(mg, msp);
1129 mutex_exit(&msp->ms_lock);
1130
1131 mutex_enter(&mg->mg_lock);
1132 ASSERT(msp->ms_group == mg);
1133 avl_remove(&mg->mg_metaslab_tree, msp);
1134
1135 metaslab_class_t *mc = msp->ms_group->mg_class;
1136 multilist_sublist_t *mls =
1137 multilist_sublist_lock_obj(&mc->mc_metaslab_txg_list, msp);
1138 if (multilist_link_active(&msp->ms_class_txg_node))
1139 multilist_sublist_remove(mls, msp);
1140 multilist_sublist_unlock(mls);
1141
1142 msp->ms_group = NULL;
1143 mutex_exit(&mg->mg_lock);
1144 }
1145
1146 static void
1147 metaslab_group_sort_impl(metaslab_group_t *mg, metaslab_t *msp, uint64_t weight)
1148 {
1149 ASSERT(MUTEX_HELD(&msp->ms_lock));
1150 ASSERT(MUTEX_HELD(&mg->mg_lock));
1151 ASSERT(msp->ms_group == mg);
1152
1153 avl_remove(&mg->mg_metaslab_tree, msp);
1154 msp->ms_weight = weight;
1155 avl_add(&mg->mg_metaslab_tree, msp);
1156
1157 }
1158
1159 static void
1160 metaslab_group_sort(metaslab_group_t *mg, metaslab_t *msp, uint64_t weight)
1161 {
1162 /*
1163 * Although in principle the weight can be any value, in
1164 * practice we do not use values in the range [1, 511].
1165 */
1166 ASSERT(weight >= SPA_MINBLOCKSIZE || weight == 0);
1167 ASSERT(MUTEX_HELD(&msp->ms_lock));
1168
1169 mutex_enter(&mg->mg_lock);
1170 metaslab_group_sort_impl(mg, msp, weight);
1171 mutex_exit(&mg->mg_lock);
1172 }
1173
1174 /*
1175 * Calculate the fragmentation for a given metaslab group. We can use
1176 * a simple average here since all metaslabs within the group must have
1177 * the same size. The return value will be a value between 0 and 100
1178 * (inclusive), or ZFS_FRAG_INVALID if less than half of the metaslab in this
1179 * group have a fragmentation metric.
1180 */
1181 uint64_t
1182 metaslab_group_fragmentation(metaslab_group_t *mg)
1183 {
1184 vdev_t *vd = mg->mg_vd;
1185 uint64_t fragmentation = 0;
1186 uint64_t valid_ms = 0;
1187
1188 for (int m = 0; m < vd->vdev_ms_count; m++) {
1189 metaslab_t *msp = vd->vdev_ms[m];
1190
1191 if (msp->ms_fragmentation == ZFS_FRAG_INVALID)
1192 continue;
1193 if (msp->ms_group != mg)
1194 continue;
1195
1196 valid_ms++;
1197 fragmentation += msp->ms_fragmentation;
1198 }
1199
1200 if (valid_ms <= mg->mg_vd->vdev_ms_count / 2)
1201 return (ZFS_FRAG_INVALID);
1202
1203 fragmentation /= valid_ms;
1204 ASSERT3U(fragmentation, <=, 100);
1205 return (fragmentation);
1206 }
1207
1208 /*
1209 * Determine if a given metaslab group should skip allocations. A metaslab
1210 * group should avoid allocations if its free capacity is less than the
1211 * zfs_mg_noalloc_threshold or its fragmentation metric is greater than
1212 * zfs_mg_fragmentation_threshold and there is at least one metaslab group
1213 * that can still handle allocations. If the allocation throttle is enabled
1214 * then we skip allocations to devices that have reached their maximum
1215 * allocation queue depth unless the selected metaslab group is the only
1216 * eligible group remaining.
1217 */
1218 static boolean_t
1219 metaslab_group_allocatable(metaslab_group_t *mg, metaslab_group_t *rotor,
1220 int flags, uint64_t psize, int allocator, int d)
1221 {
1222 spa_t *spa = mg->mg_vd->vdev_spa;
1223 metaslab_class_t *mc = mg->mg_class;
1224
1225 /*
1226 * We can only consider skipping this metaslab group if it's
1227 * in the normal metaslab class and there are other metaslab
1228 * groups to select from. Otherwise, we always consider it eligible
1229 * for allocations.
1230 */
1231 if ((mc != spa_normal_class(spa) &&
1232 mc != spa_special_class(spa) &&
1233 mc != spa_dedup_class(spa)) ||
1234 mc->mc_groups <= 1)
1235 return (B_TRUE);
1236
1237 /*
1238 * If the metaslab group's mg_allocatable flag is set (see comments
1239 * in metaslab_group_alloc_update() for more information) and
1240 * the allocation throttle is disabled then allow allocations to this
1241 * device. However, if the allocation throttle is enabled then
1242 * check if we have reached our allocation limit (mga_alloc_queue_depth)
1243 * to determine if we should allow allocations to this metaslab group.
1244 * If all metaslab groups are no longer considered allocatable
1245 * (mc_alloc_groups == 0) or we're trying to allocate the smallest
1246 * gang block size then we allow allocations on this metaslab group
1247 * regardless of the mg_allocatable or throttle settings.
1248 */
1249 if (mg->mg_allocatable) {
1250 metaslab_group_allocator_t *mga = &mg->mg_allocator[allocator];
1251 int64_t qdepth;
1252 uint64_t qmax = mga->mga_cur_max_alloc_queue_depth;
1253
1254 if (!mc->mc_alloc_throttle_enabled)
1255 return (B_TRUE);
1256
1257 /*
1258 * If this metaslab group does not have any free space, then
1259 * there is no point in looking further.
1260 */
1261 if (mg->mg_no_free_space)
1262 return (B_FALSE);
1263
1264 /*
1265 * Some allocations (e.g., those coming from device removal
1266 * where the * allocations are not even counted in the
1267 * metaslab * allocation queues) are allowed to bypass
1268 * the throttle.
1269 */
1270 if (flags & METASLAB_DONT_THROTTLE)
1271 return (B_TRUE);
1272
1273 /*
1274 * Relax allocation throttling for ditto blocks. Due to
1275 * random imbalances in allocation it tends to push copies
1276 * to one vdev, that looks a bit better at the moment.
1277 */
1278 qmax = qmax * (4 + d) / 4;
1279
1280 qdepth = zfs_refcount_count(&mga->mga_alloc_queue_depth);
1281
1282 /*
1283 * If this metaslab group is below its qmax or it's
1284 * the only allocatable metaslab group, then attempt
1285 * to allocate from it.
1286 */
1287 if (qdepth < qmax || mc->mc_alloc_groups == 1)
1288 return (B_TRUE);
1289 ASSERT3U(mc->mc_alloc_groups, >, 1);
1290
1291 /*
1292 * Since this metaslab group is at or over its qmax, we
1293 * need to determine if there are metaslab groups after this
1294 * one that might be able to handle this allocation. This is
1295 * racy since we can't hold the locks for all metaslab
1296 * groups at the same time when we make this check.
1297 */
1298 for (metaslab_group_t *mgp = mg->mg_next;
1299 mgp != rotor; mgp = mgp->mg_next) {
1300 metaslab_group_allocator_t *mgap =
1301 &mgp->mg_allocator[allocator];
1302 qmax = mgap->mga_cur_max_alloc_queue_depth;
1303 qmax = qmax * (4 + d) / 4;
1304 qdepth =
1305 zfs_refcount_count(&mgap->mga_alloc_queue_depth);
1306
1307 /*
1308 * If there is another metaslab group that
1309 * might be able to handle the allocation, then
1310 * we return false so that we skip this group.
1311 */
1312 if (qdepth < qmax && !mgp->mg_no_free_space)
1313 return (B_FALSE);
1314 }
1315
1316 /*
1317 * We didn't find another group to handle the allocation
1318 * so we can't skip this metaslab group even though
1319 * we are at or over our qmax.
1320 */
1321 return (B_TRUE);
1322
1323 } else if (mc->mc_alloc_groups == 0 || psize == SPA_MINBLOCKSIZE) {
1324 return (B_TRUE);
1325 }
1326 return (B_FALSE);
1327 }
1328
1329 /*
1330 * ==========================================================================
1331 * Range tree callbacks
1332 * ==========================================================================
1333 */
1334
1335 /*
1336 * Comparison function for the private size-ordered tree using 32-bit
1337 * ranges. Tree is sorted by size, larger sizes at the end of the tree.
1338 */
1339 __attribute__((always_inline)) inline
1340 static int
1341 metaslab_rangesize32_compare(const void *x1, const void *x2)
1342 {
1343 const range_seg32_t *r1 = x1;
1344 const range_seg32_t *r2 = x2;
1345
1346 uint64_t rs_size1 = r1->rs_end - r1->rs_start;
1347 uint64_t rs_size2 = r2->rs_end - r2->rs_start;
1348
1349 int cmp = TREE_CMP(rs_size1, rs_size2);
1350
1351 return (cmp + !cmp * TREE_CMP(r1->rs_start, r2->rs_start));
1352 }
1353
1354 /*
1355 * Comparison function for the private size-ordered tree using 64-bit
1356 * ranges. Tree is sorted by size, larger sizes at the end of the tree.
1357 */
1358 __attribute__((always_inline)) inline
1359 static int
1360 metaslab_rangesize64_compare(const void *x1, const void *x2)
1361 {
1362 const range_seg64_t *r1 = x1;
1363 const range_seg64_t *r2 = x2;
1364
1365 uint64_t rs_size1 = r1->rs_end - r1->rs_start;
1366 uint64_t rs_size2 = r2->rs_end - r2->rs_start;
1367
1368 int cmp = TREE_CMP(rs_size1, rs_size2);
1369
1370 return (cmp + !cmp * TREE_CMP(r1->rs_start, r2->rs_start));
1371 }
1372
1373 typedef struct metaslab_rt_arg {
1374 zfs_btree_t *mra_bt;
1375 uint32_t mra_floor_shift;
1376 } metaslab_rt_arg_t;
1377
1378 struct mssa_arg {
1379 range_tree_t *rt;
1380 metaslab_rt_arg_t *mra;
1381 };
1382
1383 static void
1384 metaslab_size_sorted_add(void *arg, uint64_t start, uint64_t size)
1385 {
1386 struct mssa_arg *mssap = arg;
1387 range_tree_t *rt = mssap->rt;
1388 metaslab_rt_arg_t *mrap = mssap->mra;
1389 range_seg_max_t seg = {0};
1390 rs_set_start(&seg, rt, start);
1391 rs_set_end(&seg, rt, start + size);
1392 metaslab_rt_add(rt, &seg, mrap);
1393 }
1394
1395 static void
1396 metaslab_size_tree_full_load(range_tree_t *rt)
1397 {
1398 metaslab_rt_arg_t *mrap = rt->rt_arg;
1399 METASLABSTAT_BUMP(metaslabstat_reload_tree);
1400 ASSERT0(zfs_btree_numnodes(mrap->mra_bt));
1401 mrap->mra_floor_shift = 0;
1402 struct mssa_arg arg = {0};
1403 arg.rt = rt;
1404 arg.mra = mrap;
1405 range_tree_walk(rt, metaslab_size_sorted_add, &arg);
1406 }
1407
1408
1409 ZFS_BTREE_FIND_IN_BUF_FUNC(metaslab_rt_find_rangesize32_in_buf,
1410 range_seg32_t, metaslab_rangesize32_compare)
1411
1412 ZFS_BTREE_FIND_IN_BUF_FUNC(metaslab_rt_find_rangesize64_in_buf,
1413 range_seg64_t, metaslab_rangesize64_compare)
1414
1415 /*
1416 * Create any block allocator specific components. The current allocators
1417 * rely on using both a size-ordered range_tree_t and an array of uint64_t's.
1418 */
1419 static void
1420 metaslab_rt_create(range_tree_t *rt, void *arg)
1421 {
1422 metaslab_rt_arg_t *mrap = arg;
1423 zfs_btree_t *size_tree = mrap->mra_bt;
1424
1425 size_t size;
1426 int (*compare) (const void *, const void *);
1427 bt_find_in_buf_f bt_find;
1428 switch (rt->rt_type) {
1429 case RANGE_SEG32:
1430 size = sizeof (range_seg32_t);
1431 compare = metaslab_rangesize32_compare;
1432 bt_find = metaslab_rt_find_rangesize32_in_buf;
1433 break;
1434 case RANGE_SEG64:
1435 size = sizeof (range_seg64_t);
1436 compare = metaslab_rangesize64_compare;
1437 bt_find = metaslab_rt_find_rangesize64_in_buf;
1438 break;
1439 default:
1440 panic("Invalid range seg type %d", rt->rt_type);
1441 }
1442 zfs_btree_create(size_tree, compare, bt_find, size);
1443 mrap->mra_floor_shift = metaslab_by_size_min_shift;
1444 }
1445
1446 static void
1447 metaslab_rt_destroy(range_tree_t *rt, void *arg)
1448 {
1449 (void) rt;
1450 metaslab_rt_arg_t *mrap = arg;
1451 zfs_btree_t *size_tree = mrap->mra_bt;
1452
1453 zfs_btree_destroy(size_tree);
1454 kmem_free(mrap, sizeof (*mrap));
1455 }
1456
1457 static void
1458 metaslab_rt_add(range_tree_t *rt, range_seg_t *rs, void *arg)
1459 {
1460 metaslab_rt_arg_t *mrap = arg;
1461 zfs_btree_t *size_tree = mrap->mra_bt;
1462
1463 if (rs_get_end(rs, rt) - rs_get_start(rs, rt) <
1464 (1ULL << mrap->mra_floor_shift))
1465 return;
1466
1467 zfs_btree_add(size_tree, rs);
1468 }
1469
1470 static void
1471 metaslab_rt_remove(range_tree_t *rt, range_seg_t *rs, void *arg)
1472 {
1473 metaslab_rt_arg_t *mrap = arg;
1474 zfs_btree_t *size_tree = mrap->mra_bt;
1475
1476 if (rs_get_end(rs, rt) - rs_get_start(rs, rt) < (1ULL <<
1477 mrap->mra_floor_shift))
1478 return;
1479
1480 zfs_btree_remove(size_tree, rs);
1481 }
1482
1483 static void
1484 metaslab_rt_vacate(range_tree_t *rt, void *arg)
1485 {
1486 metaslab_rt_arg_t *mrap = arg;
1487 zfs_btree_t *size_tree = mrap->mra_bt;
1488 zfs_btree_clear(size_tree);
1489 zfs_btree_destroy(size_tree);
1490
1491 metaslab_rt_create(rt, arg);
1492 }
1493
1494 static const range_tree_ops_t metaslab_rt_ops = {
1495 .rtop_create = metaslab_rt_create,
1496 .rtop_destroy = metaslab_rt_destroy,
1497 .rtop_add = metaslab_rt_add,
1498 .rtop_remove = metaslab_rt_remove,
1499 .rtop_vacate = metaslab_rt_vacate
1500 };
1501
1502 /*
1503 * ==========================================================================
1504 * Common allocator routines
1505 * ==========================================================================
1506 */
1507
1508 /*
1509 * Return the maximum contiguous segment within the metaslab.
1510 */
1511 uint64_t
1512 metaslab_largest_allocatable(metaslab_t *msp)
1513 {
1514 zfs_btree_t *t = &msp->ms_allocatable_by_size;
1515 range_seg_t *rs;
1516
1517 if (t == NULL)
1518 return (0);
1519 if (zfs_btree_numnodes(t) == 0)
1520 metaslab_size_tree_full_load(msp->ms_allocatable);
1521
1522 rs = zfs_btree_last(t, NULL);
1523 if (rs == NULL)
1524 return (0);
1525
1526 return (rs_get_end(rs, msp->ms_allocatable) - rs_get_start(rs,
1527 msp->ms_allocatable));
1528 }
1529
1530 /*
1531 * Return the maximum contiguous segment within the unflushed frees of this
1532 * metaslab.
1533 */
1534 static uint64_t
1535 metaslab_largest_unflushed_free(metaslab_t *msp)
1536 {
1537 ASSERT(MUTEX_HELD(&msp->ms_lock));
1538
1539 if (msp->ms_unflushed_frees == NULL)
1540 return (0);
1541
1542 if (zfs_btree_numnodes(&msp->ms_unflushed_frees_by_size) == 0)
1543 metaslab_size_tree_full_load(msp->ms_unflushed_frees);
1544 range_seg_t *rs = zfs_btree_last(&msp->ms_unflushed_frees_by_size,
1545 NULL);
1546 if (rs == NULL)
1547 return (0);
1548
1549 /*
1550 * When a range is freed from the metaslab, that range is added to
1551 * both the unflushed frees and the deferred frees. While the block
1552 * will eventually be usable, if the metaslab were loaded the range
1553 * would not be added to the ms_allocatable tree until TXG_DEFER_SIZE
1554 * txgs had passed. As a result, when attempting to estimate an upper
1555 * bound for the largest currently-usable free segment in the
1556 * metaslab, we need to not consider any ranges currently in the defer
1557 * trees. This algorithm approximates the largest available chunk in
1558 * the largest range in the unflushed_frees tree by taking the first
1559 * chunk. While this may be a poor estimate, it should only remain so
1560 * briefly and should eventually self-correct as frees are no longer
1561 * deferred. Similar logic applies to the ms_freed tree. See
1562 * metaslab_load() for more details.
1563 *
1564 * There are two primary sources of inaccuracy in this estimate. Both
1565 * are tolerated for performance reasons. The first source is that we
1566 * only check the largest segment for overlaps. Smaller segments may
1567 * have more favorable overlaps with the other trees, resulting in
1568 * larger usable chunks. Second, we only look at the first chunk in
1569 * the largest segment; there may be other usable chunks in the
1570 * largest segment, but we ignore them.
1571 */
1572 uint64_t rstart = rs_get_start(rs, msp->ms_unflushed_frees);
1573 uint64_t rsize = rs_get_end(rs, msp->ms_unflushed_frees) - rstart;
1574 for (int t = 0; t < TXG_DEFER_SIZE; t++) {
1575 uint64_t start = 0;
1576 uint64_t size = 0;
1577 boolean_t found = range_tree_find_in(msp->ms_defer[t], rstart,
1578 rsize, &start, &size);
1579 if (found) {
1580 if (rstart == start)
1581 return (0);
1582 rsize = start - rstart;
1583 }
1584 }
1585
1586 uint64_t start = 0;
1587 uint64_t size = 0;
1588 boolean_t found = range_tree_find_in(msp->ms_freed, rstart,
1589 rsize, &start, &size);
1590 if (found)
1591 rsize = start - rstart;
1592
1593 return (rsize);
1594 }
1595
1596 static range_seg_t *
1597 metaslab_block_find(zfs_btree_t *t, range_tree_t *rt, uint64_t start,
1598 uint64_t size, zfs_btree_index_t *where)
1599 {
1600 range_seg_t *rs;
1601 range_seg_max_t rsearch;
1602
1603 rs_set_start(&rsearch, rt, start);
1604 rs_set_end(&rsearch, rt, start + size);
1605
1606 rs = zfs_btree_find(t, &rsearch, where);
1607 if (rs == NULL) {
1608 rs = zfs_btree_next(t, where, where);
1609 }
1610
1611 return (rs);
1612 }
1613
1614 /*
1615 * This is a helper function that can be used by the allocator to find a
1616 * suitable block to allocate. This will search the specified B-tree looking
1617 * for a block that matches the specified criteria.
1618 */
1619 static uint64_t
1620 metaslab_block_picker(range_tree_t *rt, uint64_t *cursor, uint64_t size,
1621 uint64_t max_search)
1622 {
1623 if (*cursor == 0)
1624 *cursor = rt->rt_start;
1625 zfs_btree_t *bt = &rt->rt_root;
1626 zfs_btree_index_t where;
1627 range_seg_t *rs = metaslab_block_find(bt, rt, *cursor, size, &where);
1628 uint64_t first_found;
1629 int count_searched = 0;
1630
1631 if (rs != NULL)
1632 first_found = rs_get_start(rs, rt);
1633
1634 while (rs != NULL && (rs_get_start(rs, rt) - first_found <=
1635 max_search || count_searched < metaslab_min_search_count)) {
1636 uint64_t offset = rs_get_start(rs, rt);
1637 if (offset + size <= rs_get_end(rs, rt)) {
1638 *cursor = offset + size;
1639 return (offset);
1640 }
1641 rs = zfs_btree_next(bt, &where, &where);
1642 count_searched++;
1643 }
1644
1645 *cursor = 0;
1646 return (-1ULL);
1647 }
1648
1649 static uint64_t metaslab_df_alloc(metaslab_t *msp, uint64_t size);
1650 static uint64_t metaslab_cf_alloc(metaslab_t *msp, uint64_t size);
1651 static uint64_t metaslab_ndf_alloc(metaslab_t *msp, uint64_t size);
1652 metaslab_ops_t *metaslab_allocator(spa_t *spa);
1653
1654 static metaslab_ops_t metaslab_allocators[] = {
1655 { "dynamic", metaslab_df_alloc },
1656 { "cursor", metaslab_cf_alloc },
1657 { "new-dynamic", metaslab_ndf_alloc },
1658 };
1659
1660 static int
1661 spa_find_allocator_byname(const char *val)
1662 {
1663 int a = ARRAY_SIZE(metaslab_allocators) - 1;
1664 if (strcmp("new-dynamic", val) == 0)
1665 return (-1); /* remove when ndf is working */
1666 for (; a >= 0; a--) {
1667 if (strcmp(val, metaslab_allocators[a].msop_name) == 0)
1668 return (a);
1669 }
1670 return (-1);
1671 }
1672
1673 void
1674 spa_set_allocator(spa_t *spa, const char *allocator)
1675 {
1676 int a = spa_find_allocator_byname(allocator);
1677 if (a < 0) a = 0;
1678 spa->spa_active_allocator = a;
1679 zfs_dbgmsg("spa allocator: %s\n", metaslab_allocators[a].msop_name);
1680 }
1681
1682 int
1683 spa_get_allocator(spa_t *spa)
1684 {
1685 return (spa->spa_active_allocator);
1686 }
1687
1688 #if defined(_KERNEL)
1689 int
1690 param_set_active_allocator_common(const char *val)
1691 {
1692 char *p;
1693
1694 if (val == NULL)
1695 return (SET_ERROR(EINVAL));
1696
1697 if ((p = strchr(val, '\n')) != NULL)
1698 *p = '\0';
1699
1700 int a = spa_find_allocator_byname(val);
1701 if (a < 0)
1702 return (SET_ERROR(EINVAL));
1703
1704 zfs_active_allocator = metaslab_allocators[a].msop_name;
1705 return (0);
1706 }
1707 #endif
1708
1709 metaslab_ops_t *
1710 metaslab_allocator(spa_t *spa)
1711 {
1712 int allocator = spa_get_allocator(spa);
1713 return (&metaslab_allocators[allocator]);
1714 }
1715
1716 /*
1717 * ==========================================================================
1718 * Dynamic Fit (df) block allocator
1719 *
1720 * Search for a free chunk of at least this size, starting from the last
1721 * offset (for this alignment of block) looking for up to
1722 * metaslab_df_max_search bytes (16MB). If a large enough free chunk is not
1723 * found within 16MB, then return a free chunk of exactly the requested size (or
1724 * larger).
1725 *
1726 * If it seems like searching from the last offset will be unproductive, skip
1727 * that and just return a free chunk of exactly the requested size (or larger).
1728 * This is based on metaslab_df_alloc_threshold and metaslab_df_free_pct. This
1729 * mechanism is probably not very useful and may be removed in the future.
1730 *
1731 * The behavior when not searching can be changed to return the largest free
1732 * chunk, instead of a free chunk of exactly the requested size, by setting
1733 * metaslab_df_use_largest_segment.
1734 * ==========================================================================
1735 */
1736 static uint64_t
1737 metaslab_df_alloc(metaslab_t *msp, uint64_t size)
1738 {
1739 /*
1740 * Find the largest power of 2 block size that evenly divides the
1741 * requested size. This is used to try to allocate blocks with similar
1742 * alignment from the same area of the metaslab (i.e. same cursor
1743 * bucket) but it does not guarantee that other allocations sizes
1744 * may exist in the same region.
1745 */
1746 uint64_t align = size & -size;
1747 uint64_t *cursor = &msp->ms_lbas[highbit64(align) - 1];
1748 range_tree_t *rt = msp->ms_allocatable;
1749 uint_t free_pct = range_tree_space(rt) * 100 / msp->ms_size;
1750 uint64_t offset;
1751
1752 ASSERT(MUTEX_HELD(&msp->ms_lock));
1753
1754 /*
1755 * If we're running low on space, find a segment based on size,
1756 * rather than iterating based on offset.
1757 */
1758 if (metaslab_largest_allocatable(msp) < metaslab_df_alloc_threshold ||
1759 free_pct < metaslab_df_free_pct) {
1760 offset = -1;
1761 } else {
1762 offset = metaslab_block_picker(rt,
1763 cursor, size, metaslab_df_max_search);
1764 }
1765
1766 if (offset == -1) {
1767 range_seg_t *rs;
1768 if (zfs_btree_numnodes(&msp->ms_allocatable_by_size) == 0)
1769 metaslab_size_tree_full_load(msp->ms_allocatable);
1770
1771 if (metaslab_df_use_largest_segment) {
1772 /* use largest free segment */
1773 rs = zfs_btree_last(&msp->ms_allocatable_by_size, NULL);
1774 } else {
1775 zfs_btree_index_t where;
1776 /* use segment of this size, or next largest */
1777 rs = metaslab_block_find(&msp->ms_allocatable_by_size,
1778 rt, msp->ms_start, size, &where);
1779 }
1780 if (rs != NULL && rs_get_start(rs, rt) + size <= rs_get_end(rs,
1781 rt)) {
1782 offset = rs_get_start(rs, rt);
1783 *cursor = offset + size;
1784 }
1785 }
1786
1787 return (offset);
1788 }
1789
1790 /*
1791 * ==========================================================================
1792 * Cursor fit block allocator -
1793 * Select the largest region in the metaslab, set the cursor to the beginning
1794 * of the range and the cursor_end to the end of the range. As allocations
1795 * are made advance the cursor. Continue allocating from the cursor until
1796 * the range is exhausted and then find a new range.
1797 * ==========================================================================
1798 */
1799 static uint64_t
1800 metaslab_cf_alloc(metaslab_t *msp, uint64_t size)
1801 {
1802 range_tree_t *rt = msp->ms_allocatable;
1803 zfs_btree_t *t = &msp->ms_allocatable_by_size;
1804 uint64_t *cursor = &msp->ms_lbas[0];
1805 uint64_t *cursor_end = &msp->ms_lbas[1];
1806 uint64_t offset = 0;
1807
1808 ASSERT(MUTEX_HELD(&msp->ms_lock));
1809
1810 ASSERT3U(*cursor_end, >=, *cursor);
1811
1812 if ((*cursor + size) > *cursor_end) {
1813 range_seg_t *rs;
1814
1815 if (zfs_btree_numnodes(t) == 0)
1816 metaslab_size_tree_full_load(msp->ms_allocatable);
1817 rs = zfs_btree_last(t, NULL);
1818 if (rs == NULL || (rs_get_end(rs, rt) - rs_get_start(rs, rt)) <
1819 size)
1820 return (-1ULL);
1821
1822 *cursor = rs_get_start(rs, rt);
1823 *cursor_end = rs_get_end(rs, rt);
1824 }
1825
1826 offset = *cursor;
1827 *cursor += size;
1828
1829 return (offset);
1830 }
1831
1832 /*
1833 * ==========================================================================
1834 * New dynamic fit allocator -
1835 * Select a region that is large enough to allocate 2^metaslab_ndf_clump_shift
1836 * contiguous blocks. If no region is found then just use the largest segment
1837 * that remains.
1838 * ==========================================================================
1839 */
1840
1841 /*
1842 * Determines desired number of contiguous blocks (2^metaslab_ndf_clump_shift)
1843 * to request from the allocator.
1844 */
1845 uint64_t metaslab_ndf_clump_shift = 4;
1846
1847 static uint64_t
1848 metaslab_ndf_alloc(metaslab_t *msp, uint64_t size)
1849 {
1850 zfs_btree_t *t = &msp->ms_allocatable->rt_root;
1851 range_tree_t *rt = msp->ms_allocatable;
1852 zfs_btree_index_t where;
1853 range_seg_t *rs;
1854 range_seg_max_t rsearch;
1855 uint64_t hbit = highbit64(size);
1856 uint64_t *cursor = &msp->ms_lbas[hbit - 1];
1857 uint64_t max_size = metaslab_largest_allocatable(msp);
1858
1859 ASSERT(MUTEX_HELD(&msp->ms_lock));
1860
1861 if (max_size < size)
1862 return (-1ULL);
1863
1864 rs_set_start(&rsearch, rt, *cursor);
1865 rs_set_end(&rsearch, rt, *cursor + size);
1866
1867 rs = zfs_btree_find(t, &rsearch, &where);
1868 if (rs == NULL || (rs_get_end(rs, rt) - rs_get_start(rs, rt)) < size) {
1869 t = &msp->ms_allocatable_by_size;
1870
1871 rs_set_start(&rsearch, rt, 0);
1872 rs_set_end(&rsearch, rt, MIN(max_size, 1ULL << (hbit +
1873 metaslab_ndf_clump_shift)));
1874
1875 rs = zfs_btree_find(t, &rsearch, &where);
1876 if (rs == NULL)
1877 rs = zfs_btree_next(t, &where, &where);
1878 ASSERT(rs != NULL);
1879 }
1880
1881 if ((rs_get_end(rs, rt) - rs_get_start(rs, rt)) >= size) {
1882 *cursor = rs_get_start(rs, rt) + size;
1883 return (rs_get_start(rs, rt));
1884 }
1885 return (-1ULL);
1886 }
1887
1888 /*
1889 * ==========================================================================
1890 * Metaslabs
1891 * ==========================================================================
1892 */
1893
1894 /*
1895 * Wait for any in-progress metaslab loads to complete.
1896 */
1897 static void
1898 metaslab_load_wait(metaslab_t *msp)
1899 {
1900 ASSERT(MUTEX_HELD(&msp->ms_lock));
1901
1902 while (msp->ms_loading) {
1903 ASSERT(!msp->ms_loaded);
1904 cv_wait(&msp->ms_load_cv, &msp->ms_lock);
1905 }
1906 }
1907
1908 /*
1909 * Wait for any in-progress flushing to complete.
1910 */
1911 static void
1912 metaslab_flush_wait(metaslab_t *msp)
1913 {
1914 ASSERT(MUTEX_HELD(&msp->ms_lock));
1915
1916 while (msp->ms_flushing)
1917 cv_wait(&msp->ms_flush_cv, &msp->ms_lock);
1918 }
1919
1920 static unsigned int
1921 metaslab_idx_func(multilist_t *ml, void *arg)
1922 {
1923 metaslab_t *msp = arg;
1924
1925 /*
1926 * ms_id values are allocated sequentially, so full 64bit
1927 * division would be a waste of time, so limit it to 32 bits.
1928 */
1929 return ((unsigned int)msp->ms_id % multilist_get_num_sublists(ml));
1930 }
1931
1932 uint64_t
1933 metaslab_allocated_space(metaslab_t *msp)
1934 {
1935 return (msp->ms_allocated_space);
1936 }
1937
1938 /*
1939 * Verify that the space accounting on disk matches the in-core range_trees.
1940 */
1941 static void
1942 metaslab_verify_space(metaslab_t *msp, uint64_t txg)
1943 {
1944 spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
1945 uint64_t allocating = 0;
1946 uint64_t sm_free_space, msp_free_space;
1947
1948 ASSERT(MUTEX_HELD(&msp->ms_lock));
1949 ASSERT(!msp->ms_condensing);
1950
1951 if ((zfs_flags & ZFS_DEBUG_METASLAB_VERIFY) == 0)
1952 return;
1953
1954 /*
1955 * We can only verify the metaslab space when we're called
1956 * from syncing context with a loaded metaslab that has an
1957 * allocated space map. Calling this in non-syncing context
1958 * does not provide a consistent view of the metaslab since
1959 * we're performing allocations in the future.
1960 */
1961 if (txg != spa_syncing_txg(spa) || msp->ms_sm == NULL ||
1962 !msp->ms_loaded)
1963 return;
1964
1965 /*
1966 * Even though the smp_alloc field can get negative,
1967 * when it comes to a metaslab's space map, that should
1968 * never be the case.
1969 */
1970 ASSERT3S(space_map_allocated(msp->ms_sm), >=, 0);
1971
1972 ASSERT3U(space_map_allocated(msp->ms_sm), >=,
1973 range_tree_space(msp->ms_unflushed_frees));
1974
1975 ASSERT3U(metaslab_allocated_space(msp), ==,
1976 space_map_allocated(msp->ms_sm) +
1977 range_tree_space(msp->ms_unflushed_allocs) -
1978 range_tree_space(msp->ms_unflushed_frees));
1979
1980 sm_free_space = msp->ms_size - metaslab_allocated_space(msp);
1981
1982 /*
1983 * Account for future allocations since we would have
1984 * already deducted that space from the ms_allocatable.
1985 */
1986 for (int t = 0; t < TXG_CONCURRENT_STATES; t++) {
1987 allocating +=
1988 range_tree_space(msp->ms_allocating[(txg + t) & TXG_MASK]);
1989 }
1990 ASSERT3U(allocating + msp->ms_allocated_this_txg, ==,
1991 msp->ms_allocating_total);
1992
1993 ASSERT3U(msp->ms_deferspace, ==,
1994 range_tree_space(msp->ms_defer[0]) +
1995 range_tree_space(msp->ms_defer[1]));
1996
1997 msp_free_space = range_tree_space(msp->ms_allocatable) + allocating +
1998 msp->ms_deferspace + range_tree_space(msp->ms_freed);
1999
2000 VERIFY3U(sm_free_space, ==, msp_free_space);
2001 }
2002
2003 static void
2004 metaslab_aux_histograms_clear(metaslab_t *msp)
2005 {
2006 /*
2007 * Auxiliary histograms are only cleared when resetting them,
2008 * which can only happen while the metaslab is loaded.
2009 */
2010 ASSERT(msp->ms_loaded);
2011
2012 memset(msp->ms_synchist, 0, sizeof (msp->ms_synchist));
2013 for (int t = 0; t < TXG_DEFER_SIZE; t++)
2014 memset(msp->ms_deferhist[t], 0, sizeof (msp->ms_deferhist[t]));
2015 }
2016
2017 static void
2018 metaslab_aux_histogram_add(uint64_t *histogram, uint64_t shift,
2019 range_tree_t *rt)
2020 {
2021 /*
2022 * This is modeled after space_map_histogram_add(), so refer to that
2023 * function for implementation details. We want this to work like
2024 * the space map histogram, and not the range tree histogram, as we
2025 * are essentially constructing a delta that will be later subtracted
2026 * from the space map histogram.
2027 */
2028 int idx = 0;
2029 for (int i = shift; i < RANGE_TREE_HISTOGRAM_SIZE; i++) {
2030 ASSERT3U(i, >=, idx + shift);
2031 histogram[idx] += rt->rt_histogram[i] << (i - idx - shift);
2032
2033 if (idx < SPACE_MAP_HISTOGRAM_SIZE - 1) {
2034 ASSERT3U(idx + shift, ==, i);
2035 idx++;
2036 ASSERT3U(idx, <, SPACE_MAP_HISTOGRAM_SIZE);
2037 }
2038 }
2039 }
2040
2041 /*
2042 * Called at every sync pass that the metaslab gets synced.
2043 *
2044 * The reason is that we want our auxiliary histograms to be updated
2045 * wherever the metaslab's space map histogram is updated. This way
2046 * we stay consistent on which parts of the metaslab space map's
2047 * histogram are currently not available for allocations (e.g because
2048 * they are in the defer, freed, and freeing trees).
2049 */
2050 static void
2051 metaslab_aux_histograms_update(metaslab_t *msp)
2052 {
2053 space_map_t *sm = msp->ms_sm;
2054 ASSERT(sm != NULL);
2055
2056 /*
2057 * This is similar to the metaslab's space map histogram updates
2058 * that take place in metaslab_sync(). The only difference is that
2059 * we only care about segments that haven't made it into the
2060 * ms_allocatable tree yet.
2061 */
2062 if (msp->ms_loaded) {
2063 metaslab_aux_histograms_clear(msp);
2064
2065 metaslab_aux_histogram_add(msp->ms_synchist,
2066 sm->sm_shift, msp->ms_freed);
2067
2068 for (int t = 0; t < TXG_DEFER_SIZE; t++) {
2069 metaslab_aux_histogram_add(msp->ms_deferhist[t],
2070 sm->sm_shift, msp->ms_defer[t]);
2071 }
2072 }
2073
2074 metaslab_aux_histogram_add(msp->ms_synchist,
2075 sm->sm_shift, msp->ms_freeing);
2076 }
2077
2078 /*
2079 * Called every time we are done syncing (writing to) the metaslab,
2080 * i.e. at the end of each sync pass.
2081 * [see the comment in metaslab_impl.h for ms_synchist, ms_deferhist]
2082 */
2083 static void
2084 metaslab_aux_histograms_update_done(metaslab_t *msp, boolean_t defer_allowed)
2085 {
2086 spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
2087 space_map_t *sm = msp->ms_sm;
2088
2089 if (sm == NULL) {
2090 /*
2091 * We came here from metaslab_init() when creating/opening a
2092 * pool, looking at a metaslab that hasn't had any allocations
2093 * yet.
2094 */
2095 return;
2096 }
2097
2098 /*
2099 * This is similar to the actions that we take for the ms_freed
2100 * and ms_defer trees in metaslab_sync_done().
2101 */
2102 uint64_t hist_index = spa_syncing_txg(spa) % TXG_DEFER_SIZE;
2103 if (defer_allowed) {
2104 memcpy(msp->ms_deferhist[hist_index], msp->ms_synchist,
2105 sizeof (msp->ms_synchist));
2106 } else {
2107 memset(msp->ms_deferhist[hist_index], 0,
2108 sizeof (msp->ms_deferhist[hist_index]));
2109 }
2110 memset(msp->ms_synchist, 0, sizeof (msp->ms_synchist));
2111 }
2112
2113 /*
2114 * Ensure that the metaslab's weight and fragmentation are consistent
2115 * with the contents of the histogram (either the range tree's histogram
2116 * or the space map's depending whether the metaslab is loaded).
2117 */
2118 static void
2119 metaslab_verify_weight_and_frag(metaslab_t *msp)
2120 {
2121 ASSERT(MUTEX_HELD(&msp->ms_lock));
2122
2123 if ((zfs_flags & ZFS_DEBUG_METASLAB_VERIFY) == 0)
2124 return;
2125
2126 /*
2127 * We can end up here from vdev_remove_complete(), in which case we
2128 * cannot do these assertions because we hold spa config locks and
2129 * thus we are not allowed to read from the DMU.
2130 *
2131 * We check if the metaslab group has been removed and if that's
2132 * the case we return immediately as that would mean that we are
2133 * here from the aforementioned code path.
2134 */
2135 if (msp->ms_group == NULL)
2136 return;
2137
2138 /*
2139 * Devices being removed always return a weight of 0 and leave
2140 * fragmentation and ms_max_size as is - there is nothing for
2141 * us to verify here.
2142 */
2143 vdev_t *vd = msp->ms_group->mg_vd;
2144 if (vd->vdev_removing)
2145 return;
2146
2147 /*
2148 * If the metaslab is dirty it probably means that we've done
2149 * some allocations or frees that have changed our histograms
2150 * and thus the weight.
2151 */
2152 for (int t = 0; t < TXG_SIZE; t++) {
2153 if (txg_list_member(&vd->vdev_ms_list, msp, t))
2154 return;
2155 }
2156
2157 /*
2158 * This verification checks that our in-memory state is consistent
2159 * with what's on disk. If the pool is read-only then there aren't
2160 * any changes and we just have the initially-loaded state.
2161 */
2162 if (!spa_writeable(msp->ms_group->mg_vd->vdev_spa))
2163 return;
2164
2165 /* some extra verification for in-core tree if you can */
2166 if (msp->ms_loaded) {
2167 range_tree_stat_verify(msp->ms_allocatable);
2168 VERIFY(space_map_histogram_verify(msp->ms_sm,
2169 msp->ms_allocatable));
2170 }
2171
2172 uint64_t weight = msp->ms_weight;
2173 uint64_t was_active = msp->ms_weight & METASLAB_ACTIVE_MASK;
2174 boolean_t space_based = WEIGHT_IS_SPACEBASED(msp->ms_weight);
2175 uint64_t frag = msp->ms_fragmentation;
2176 uint64_t max_segsize = msp->ms_max_size;
2177
2178 msp->ms_weight = 0;
2179 msp->ms_fragmentation = 0;
2180
2181 /*
2182 * This function is used for verification purposes and thus should
2183 * not introduce any side-effects/mutations on the system's state.
2184 *
2185 * Regardless of whether metaslab_weight() thinks this metaslab
2186 * should be active or not, we want to ensure that the actual weight
2187 * (and therefore the value of ms_weight) would be the same if it
2188 * was to be recalculated at this point.
2189 *
2190 * In addition we set the nodirty flag so metaslab_weight() does
2191 * not dirty the metaslab for future TXGs (e.g. when trying to
2192 * force condensing to upgrade the metaslab spacemaps).
2193 */
2194 msp->ms_weight = metaslab_weight(msp, B_TRUE) | was_active;
2195
2196 VERIFY3U(max_segsize, ==, msp->ms_max_size);
2197
2198 /*
2199 * If the weight type changed then there is no point in doing
2200 * verification. Revert fields to their original values.
2201 */
2202 if ((space_based && !WEIGHT_IS_SPACEBASED(msp->ms_weight)) ||
2203 (!space_based && WEIGHT_IS_SPACEBASED(msp->ms_weight))) {
2204 msp->ms_fragmentation = frag;
2205 msp->ms_weight = weight;
2206 return;
2207 }
2208
2209 VERIFY3U(msp->ms_fragmentation, ==, frag);
2210 VERIFY3U(msp->ms_weight, ==, weight);
2211 }
2212
2213 /*
2214 * If we're over the zfs_metaslab_mem_limit, select the loaded metaslab from
2215 * this class that was used longest ago, and attempt to unload it. We don't
2216 * want to spend too much time in this loop to prevent performance
2217 * degradation, and we expect that most of the time this operation will
2218 * succeed. Between that and the normal unloading processing during txg sync,
2219 * we expect this to keep the metaslab memory usage under control.
2220 */
2221 static void
2222 metaslab_potentially_evict(metaslab_class_t *mc)
2223 {
2224 #ifdef _KERNEL
2225 uint64_t allmem = arc_all_memory();
2226 uint64_t inuse = spl_kmem_cache_inuse(zfs_btree_leaf_cache);
2227 uint64_t size = spl_kmem_cache_entry_size(zfs_btree_leaf_cache);
2228 uint_t tries = 0;
2229 for (; allmem * zfs_metaslab_mem_limit / 100 < inuse * size &&
2230 tries < multilist_get_num_sublists(&mc->mc_metaslab_txg_list) * 2;
2231 tries++) {
2232 unsigned int idx = multilist_get_random_index(
2233 &mc->mc_metaslab_txg_list);
2234 multilist_sublist_t *mls =
2235 multilist_sublist_lock(&mc->mc_metaslab_txg_list, idx);
2236 metaslab_t *msp = multilist_sublist_head(mls);
2237 multilist_sublist_unlock(mls);
2238 while (msp != NULL && allmem * zfs_metaslab_mem_limit / 100 <
2239 inuse * size) {
2240 VERIFY3P(mls, ==, multilist_sublist_lock(
2241 &mc->mc_metaslab_txg_list, idx));
2242 ASSERT3U(idx, ==,
2243 metaslab_idx_func(&mc->mc_metaslab_txg_list, msp));
2244
2245 if (!multilist_link_active(&msp->ms_class_txg_node)) {
2246 multilist_sublist_unlock(mls);
2247 break;
2248 }
2249 metaslab_t *next_msp = multilist_sublist_next(mls, msp);
2250 multilist_sublist_unlock(mls);
2251 /*
2252 * If the metaslab is currently loading there are two
2253 * cases. If it's the metaslab we're evicting, we
2254 * can't continue on or we'll panic when we attempt to
2255 * recursively lock the mutex. If it's another
2256 * metaslab that's loading, it can be safely skipped,
2257 * since we know it's very new and therefore not a
2258 * good eviction candidate. We check later once the
2259 * lock is held that the metaslab is fully loaded
2260 * before actually unloading it.
2261 */
2262 if (msp->ms_loading) {
2263 msp = next_msp;
2264 inuse =
2265 spl_kmem_cache_inuse(zfs_btree_leaf_cache);
2266 continue;
2267 }
2268 /*
2269 * We can't unload metaslabs with no spacemap because
2270 * they're not ready to be unloaded yet. We can't
2271 * unload metaslabs with outstanding allocations
2272 * because doing so could cause the metaslab's weight
2273 * to decrease while it's unloaded, which violates an
2274 * invariant that we use to prevent unnecessary
2275 * loading. We also don't unload metaslabs that are
2276 * currently active because they are high-weight
2277 * metaslabs that are likely to be used in the near
2278 * future.
2279 */
2280 mutex_enter(&msp->ms_lock);
2281 if (msp->ms_allocator == -1 && msp->ms_sm != NULL &&
2282 msp->ms_allocating_total == 0) {
2283 metaslab_unload(msp);
2284 }
2285 mutex_exit(&msp->ms_lock);
2286 msp = next_msp;
2287 inuse = spl_kmem_cache_inuse(zfs_btree_leaf_cache);
2288 }
2289 }
2290 #else
2291 (void) mc, (void) zfs_metaslab_mem_limit;
2292 #endif
2293 }
2294
2295 static int
2296 metaslab_load_impl(metaslab_t *msp)
2297 {
2298 int error = 0;
2299
2300 ASSERT(MUTEX_HELD(&msp->ms_lock));
2301 ASSERT(msp->ms_loading);
2302 ASSERT(!msp->ms_condensing);
2303
2304 /*
2305 * We temporarily drop the lock to unblock other operations while we
2306 * are reading the space map. Therefore, metaslab_sync() and
2307 * metaslab_sync_done() can run at the same time as we do.
2308 *
2309 * If we are using the log space maps, metaslab_sync() can't write to
2310 * the metaslab's space map while we are loading as we only write to
2311 * it when we are flushing the metaslab, and that can't happen while
2312 * we are loading it.
2313 *
2314 * If we are not using log space maps though, metaslab_sync() can
2315 * append to the space map while we are loading. Therefore we load
2316 * only entries that existed when we started the load. Additionally,
2317 * metaslab_sync_done() has to wait for the load to complete because
2318 * there are potential races like metaslab_load() loading parts of the
2319 * space map that are currently being appended by metaslab_sync(). If
2320 * we didn't, the ms_allocatable would have entries that
2321 * metaslab_sync_done() would try to re-add later.
2322 *
2323 * That's why before dropping the lock we remember the synced length
2324 * of the metaslab and read up to that point of the space map,
2325 * ignoring entries appended by metaslab_sync() that happen after we
2326 * drop the lock.
2327 */
2328 uint64_t length = msp->ms_synced_length;
2329 mutex_exit(&msp->ms_lock);
2330
2331 hrtime_t load_start = gethrtime();
2332 metaslab_rt_arg_t *mrap;
2333 if (msp->ms_allocatable->rt_arg == NULL) {
2334 mrap = kmem_zalloc(sizeof (*mrap), KM_SLEEP);
2335 } else {
2336 mrap = msp->ms_allocatable->rt_arg;
2337 msp->ms_allocatable->rt_ops = NULL;
2338 msp->ms_allocatable->rt_arg = NULL;
2339 }
2340 mrap->mra_bt = &msp->ms_allocatable_by_size;
2341 mrap->mra_floor_shift = metaslab_by_size_min_shift;
2342
2343 if (msp->ms_sm != NULL) {
2344 error = space_map_load_length(msp->ms_sm, msp->ms_allocatable,
2345 SM_FREE, length);
2346
2347 /* Now, populate the size-sorted tree. */
2348 metaslab_rt_create(msp->ms_allocatable, mrap);
2349 msp->ms_allocatable->rt_ops = &metaslab_rt_ops;
2350 msp->ms_allocatable->rt_arg = mrap;
2351
2352 struct mssa_arg arg = {0};
2353 arg.rt = msp->ms_allocatable;
2354 arg.mra = mrap;
2355 range_tree_walk(msp->ms_allocatable, metaslab_size_sorted_add,
2356 &arg);
2357 } else {
2358 /*
2359 * Add the size-sorted tree first, since we don't need to load
2360 * the metaslab from the spacemap.
2361 */
2362 metaslab_rt_create(msp->ms_allocatable, mrap);
2363 msp->ms_allocatable->rt_ops = &metaslab_rt_ops;
2364 msp->ms_allocatable->rt_arg = mrap;
2365 /*
2366 * The space map has not been allocated yet, so treat
2367 * all the space in the metaslab as free and add it to the
2368 * ms_allocatable tree.
2369 */
2370 range_tree_add(msp->ms_allocatable,
2371 msp->ms_start, msp->ms_size);
2372
2373 if (msp->ms_new) {
2374 /*
2375 * If the ms_sm doesn't exist, this means that this
2376 * metaslab hasn't gone through metaslab_sync() and
2377 * thus has never been dirtied. So we shouldn't
2378 * expect any unflushed allocs or frees from previous
2379 * TXGs.
2380 */
2381 ASSERT(range_tree_is_empty(msp->ms_unflushed_allocs));
2382 ASSERT(range_tree_is_empty(msp->ms_unflushed_frees));
2383 }
2384 }
2385
2386 /*
2387 * We need to grab the ms_sync_lock to prevent metaslab_sync() from
2388 * changing the ms_sm (or log_sm) and the metaslab's range trees
2389 * while we are about to use them and populate the ms_allocatable.
2390 * The ms_lock is insufficient for this because metaslab_sync() doesn't
2391 * hold the ms_lock while writing the ms_checkpointing tree to disk.
2392 */
2393 mutex_enter(&msp->ms_sync_lock);
2394 mutex_enter(&msp->ms_lock);
2395
2396 ASSERT(!msp->ms_condensing);
2397 ASSERT(!msp->ms_flushing);
2398
2399 if (error != 0) {
2400 mutex_exit(&msp->ms_sync_lock);
2401 return (error);
2402 }
2403
2404 ASSERT3P(msp->ms_group, !=, NULL);
2405 msp->ms_loaded = B_TRUE;
2406
2407 /*
2408 * Apply all the unflushed changes to ms_allocatable right
2409 * away so any manipulations we do below have a clear view
2410 * of what is allocated and what is free.
2411 */
2412 range_tree_walk(msp->ms_unflushed_allocs,
2413 range_tree_remove, msp->ms_allocatable);
2414 range_tree_walk(msp->ms_unflushed_frees,
2415 range_tree_add, msp->ms_allocatable);
2416
2417 ASSERT3P(msp->ms_group, !=, NULL);
2418 spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
2419 if (spa_syncing_log_sm(spa) != NULL) {
2420 ASSERT(spa_feature_is_enabled(spa,
2421 SPA_FEATURE_LOG_SPACEMAP));
2422
2423 /*
2424 * If we use a log space map we add all the segments
2425 * that are in ms_unflushed_frees so they are available
2426 * for allocation.
2427 *
2428 * ms_allocatable needs to contain all free segments
2429 * that are ready for allocations (thus not segments
2430 * from ms_freeing, ms_freed, and the ms_defer trees).
2431 * But if we grab the lock in this code path at a sync
2432 * pass later that 1, then it also contains the
2433 * segments of ms_freed (they were added to it earlier
2434 * in this path through ms_unflushed_frees). So we
2435 * need to remove all the segments that exist in
2436 * ms_freed from ms_allocatable as they will be added
2437 * later in metaslab_sync_done().
2438 *
2439 * When there's no log space map, the ms_allocatable
2440 * correctly doesn't contain any segments that exist
2441 * in ms_freed [see ms_synced_length].
2442 */
2443 range_tree_walk(msp->ms_freed,
2444 range_tree_remove, msp->ms_allocatable);
2445 }
2446
2447 /*
2448 * If we are not using the log space map, ms_allocatable
2449 * contains the segments that exist in the ms_defer trees
2450 * [see ms_synced_length]. Thus we need to remove them
2451 * from ms_allocatable as they will be added again in
2452 * metaslab_sync_done().
2453 *
2454 * If we are using the log space map, ms_allocatable still
2455 * contains the segments that exist in the ms_defer trees.
2456 * Not because it read them through the ms_sm though. But
2457 * because these segments are part of ms_unflushed_frees
2458 * whose segments we add to ms_allocatable earlier in this
2459 * code path.
2460 */
2461 for (int t = 0; t < TXG_DEFER_SIZE; t++) {
2462 range_tree_walk(msp->ms_defer[t],
2463 range_tree_remove, msp->ms_allocatable);
2464 }
2465
2466 /*
2467 * Call metaslab_recalculate_weight_and_sort() now that the
2468 * metaslab is loaded so we get the metaslab's real weight.
2469 *
2470 * Unless this metaslab was created with older software and
2471 * has not yet been converted to use segment-based weight, we
2472 * expect the new weight to be better or equal to the weight
2473 * that the metaslab had while it was not loaded. This is
2474 * because the old weight does not take into account the
2475 * consolidation of adjacent segments between TXGs. [see
2476 * comment for ms_synchist and ms_deferhist[] for more info]
2477 */
2478 uint64_t weight = msp->ms_weight;
2479 uint64_t max_size = msp->ms_max_size;
2480 metaslab_recalculate_weight_and_sort(msp);
2481 if (!WEIGHT_IS_SPACEBASED(weight))
2482 ASSERT3U(weight, <=, msp->ms_weight);
2483 msp->ms_max_size = metaslab_largest_allocatable(msp);
2484 ASSERT3U(max_size, <=, msp->ms_max_size);
2485 hrtime_t load_end = gethrtime();
2486 msp->ms_load_time = load_end;
2487 zfs_dbgmsg("metaslab_load: txg %llu, spa %s, vdev_id %llu, "
2488 "ms_id %llu, smp_length %llu, "
2489 "unflushed_allocs %llu, unflushed_frees %llu, "
2490 "freed %llu, defer %llu + %llu, unloaded time %llu ms, "
2491 "loading_time %lld ms, ms_max_size %llu, "
2492 "max size error %lld, "
2493 "old_weight %llx, new_weight %llx",
2494 (u_longlong_t)spa_syncing_txg(spa), spa_name(spa),
2495 (u_longlong_t)msp->ms_group->mg_vd->vdev_id,
2496 (u_longlong_t)msp->ms_id,
2497 (u_longlong_t)space_map_length(msp->ms_sm),
2498 (u_longlong_t)range_tree_space(msp->ms_unflushed_allocs),
2499 (u_longlong_t)range_tree_space(msp->ms_unflushed_frees),
2500 (u_longlong_t)range_tree_space(msp->ms_freed),
2501 (u_longlong_t)range_tree_space(msp->ms_defer[0]),
2502 (u_longlong_t)range_tree_space(msp->ms_defer[1]),
2503 (longlong_t)((load_start - msp->ms_unload_time) / 1000000),
2504 (longlong_t)((load_end - load_start) / 1000000),
2505 (u_longlong_t)msp->ms_max_size,
2506 (u_longlong_t)msp->ms_max_size - max_size,
2507 (u_longlong_t)weight, (u_longlong_t)msp->ms_weight);
2508
2509 metaslab_verify_space(msp, spa_syncing_txg(spa));
2510 mutex_exit(&msp->ms_sync_lock);
2511 return (0);
2512 }
2513
2514 int
2515 metaslab_load(metaslab_t *msp)
2516 {
2517 ASSERT(MUTEX_HELD(&msp->ms_lock));
2518
2519 /*
2520 * There may be another thread loading the same metaslab, if that's
2521 * the case just wait until the other thread is done and return.
2522 */
2523 metaslab_load_wait(msp);
2524 if (msp->ms_loaded)
2525 return (0);
2526 VERIFY(!msp->ms_loading);
2527 ASSERT(!msp->ms_condensing);
2528
2529 /*
2530 * We set the loading flag BEFORE potentially dropping the lock to
2531 * wait for an ongoing flush (see ms_flushing below). This way other
2532 * threads know that there is already a thread that is loading this
2533 * metaslab.
2534 */
2535 msp->ms_loading = B_TRUE;
2536
2537 /*
2538 * Wait for any in-progress flushing to finish as we drop the ms_lock
2539 * both here (during space_map_load()) and in metaslab_flush() (when
2540 * we flush our changes to the ms_sm).
2541 */
2542 if (msp->ms_flushing)
2543 metaslab_flush_wait(msp);
2544
2545 /*
2546 * In the possibility that we were waiting for the metaslab to be
2547 * flushed (where we temporarily dropped the ms_lock), ensure that
2548 * no one else loaded the metaslab somehow.
2549 */
2550 ASSERT(!msp->ms_loaded);
2551
2552 /*
2553 * If we're loading a metaslab in the normal class, consider evicting
2554 * another one to keep our memory usage under the limit defined by the
2555 * zfs_metaslab_mem_limit tunable.
2556 */
2557 if (spa_normal_class(msp->ms_group->mg_class->mc_spa) ==
2558 msp->ms_group->mg_class) {
2559 metaslab_potentially_evict(msp->ms_group->mg_class);
2560 }
2561
2562 int error = metaslab_load_impl(msp);
2563
2564 ASSERT(MUTEX_HELD(&msp->ms_lock));
2565 msp->ms_loading = B_FALSE;
2566 cv_broadcast(&msp->ms_load_cv);
2567
2568 return (error);
2569 }
2570
2571 void
2572 metaslab_unload(metaslab_t *msp)
2573 {
2574 ASSERT(MUTEX_HELD(&msp->ms_lock));
2575
2576 /*
2577 * This can happen if a metaslab is selected for eviction (in
2578 * metaslab_potentially_evict) and then unloaded during spa_sync (via
2579 * metaslab_class_evict_old).
2580 */
2581 if (!msp->ms_loaded)
2582 return;
2583
2584 range_tree_vacate(msp->ms_allocatable, NULL, NULL);
2585 msp->ms_loaded = B_FALSE;
2586 msp->ms_unload_time = gethrtime();
2587
2588 msp->ms_activation_weight = 0;
2589 msp->ms_weight &= ~METASLAB_ACTIVE_MASK;
2590
2591 if (msp->ms_group != NULL) {
2592 metaslab_class_t *mc = msp->ms_group->mg_class;
2593 multilist_sublist_t *mls =
2594 multilist_sublist_lock_obj(&mc->mc_metaslab_txg_list, msp);
2595 if (multilist_link_active(&msp->ms_class_txg_node))
2596 multilist_sublist_remove(mls, msp);
2597 multilist_sublist_unlock(mls);
2598
2599 spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
2600 zfs_dbgmsg("metaslab_unload: txg %llu, spa %s, vdev_id %llu, "
2601 "ms_id %llu, weight %llx, "
2602 "selected txg %llu (%llu ms ago), alloc_txg %llu, "
2603 "loaded %llu ms ago, max_size %llu",
2604 (u_longlong_t)spa_syncing_txg(spa), spa_name(spa),
2605 (u_longlong_t)msp->ms_group->mg_vd->vdev_id,
2606 (u_longlong_t)msp->ms_id,
2607 (u_longlong_t)msp->ms_weight,
2608 (u_longlong_t)msp->ms_selected_txg,
2609 (u_longlong_t)(msp->ms_unload_time -
2610 msp->ms_selected_time) / 1000 / 1000,
2611 (u_longlong_t)msp->ms_alloc_txg,
2612 (u_longlong_t)(msp->ms_unload_time -
2613 msp->ms_load_time) / 1000 / 1000,
2614 (u_longlong_t)msp->ms_max_size);
2615 }
2616
2617 /*
2618 * We explicitly recalculate the metaslab's weight based on its space
2619 * map (as it is now not loaded). We want unload metaslabs to always
2620 * have their weights calculated from the space map histograms, while
2621 * loaded ones have it calculated from their in-core range tree
2622 * [see metaslab_load()]. This way, the weight reflects the information
2623 * available in-core, whether it is loaded or not.
2624 *
2625 * If ms_group == NULL means that we came here from metaslab_fini(),
2626 * at which point it doesn't make sense for us to do the recalculation
2627 * and the sorting.
2628 */
2629 if (msp->ms_group != NULL)
2630 metaslab_recalculate_weight_and_sort(msp);
2631 }
2632
2633 /*
2634 * We want to optimize the memory use of the per-metaslab range
2635 * trees. To do this, we store the segments in the range trees in
2636 * units of sectors, zero-indexing from the start of the metaslab. If
2637 * the vdev_ms_shift - the vdev_ashift is less than 32, we can store
2638 * the ranges using two uint32_ts, rather than two uint64_ts.
2639 */
2640 range_seg_type_t
2641 metaslab_calculate_range_tree_type(vdev_t *vdev, metaslab_t *msp,
2642 uint64_t *start, uint64_t *shift)
2643 {
2644 if (vdev->vdev_ms_shift - vdev->vdev_ashift < 32 &&
2645 !zfs_metaslab_force_large_segs) {
2646 *shift = vdev->vdev_ashift;
2647 *start = msp->ms_start;
2648 return (RANGE_SEG32);
2649 } else {
2650 *shift = 0;
2651 *start = 0;
2652 return (RANGE_SEG64);
2653 }
2654 }
2655
2656 void
2657 metaslab_set_selected_txg(metaslab_t *msp, uint64_t txg)
2658 {
2659 ASSERT(MUTEX_HELD(&msp->ms_lock));
2660 metaslab_class_t *mc = msp->ms_group->mg_class;
2661 multilist_sublist_t *mls =
2662 multilist_sublist_lock_obj(&mc->mc_metaslab_txg_list, msp);
2663 if (multilist_link_active(&msp->ms_class_txg_node))
2664 multilist_sublist_remove(mls, msp);
2665 msp->ms_selected_txg = txg;
2666 msp->ms_selected_time = gethrtime();
2667 multilist_sublist_insert_tail(mls, msp);
2668 multilist_sublist_unlock(mls);
2669 }
2670
2671 void
2672 metaslab_space_update(vdev_t *vd, metaslab_class_t *mc, int64_t alloc_delta,
2673 int64_t defer_delta, int64_t space_delta)
2674 {
2675 vdev_space_update(vd, alloc_delta, defer_delta, space_delta);
2676
2677 ASSERT3P(vd->vdev_spa->spa_root_vdev, ==, vd->vdev_parent);
2678 ASSERT(vd->vdev_ms_count != 0);
2679
2680 metaslab_class_space_update(mc, alloc_delta, defer_delta, space_delta,
2681 vdev_deflated_space(vd, space_delta));
2682 }
2683
2684 int
2685 metaslab_init(metaslab_group_t *mg, uint64_t id, uint64_t object,
2686 uint64_t txg, metaslab_t **msp)
2687 {
2688 vdev_t *vd = mg->mg_vd;
2689 spa_t *spa = vd->vdev_spa;
2690 objset_t *mos = spa->spa_meta_objset;
2691 metaslab_t *ms;
2692 int error;
2693
2694 ms = kmem_zalloc(sizeof (metaslab_t), KM_SLEEP);
2695 mutex_init(&ms->ms_lock, NULL, MUTEX_DEFAULT, NULL);
2696 mutex_init(&ms->ms_sync_lock, NULL, MUTEX_DEFAULT, NULL);
2697 cv_init(&ms->ms_load_cv, NULL, CV_DEFAULT, NULL);
2698 cv_init(&ms->ms_flush_cv, NULL, CV_DEFAULT, NULL);
2699 multilist_link_init(&ms->ms_class_txg_node);
2700
2701 ms->ms_id = id;
2702 ms->ms_start = id << vd->vdev_ms_shift;
2703 ms->ms_size = 1ULL << vd->vdev_ms_shift;
2704 ms->ms_allocator = -1;
2705 ms->ms_new = B_TRUE;
2706
2707 vdev_ops_t *ops = vd->vdev_ops;
2708 if (ops->vdev_op_metaslab_init != NULL)
2709 ops->vdev_op_metaslab_init(vd, &ms->ms_start, &ms->ms_size);
2710
2711 /*
2712 * We only open space map objects that already exist. All others
2713 * will be opened when we finally allocate an object for it. For
2714 * readonly pools there is no need to open the space map object.
2715 *
2716 * Note:
2717 * When called from vdev_expand(), we can't call into the DMU as
2718 * we are holding the spa_config_lock as a writer and we would
2719 * deadlock [see relevant comment in vdev_metaslab_init()]. in
2720 * that case, the object parameter is zero though, so we won't
2721 * call into the DMU.
2722 */
2723 if (object != 0 && !(spa->spa_mode == SPA_MODE_READ &&
2724 !spa->spa_read_spacemaps)) {
2725 error = space_map_open(&ms->ms_sm, mos, object, ms->ms_start,
2726 ms->ms_size, vd->vdev_ashift);
2727
2728 if (error != 0) {
2729 kmem_free(ms, sizeof (metaslab_t));
2730 return (error);
2731 }
2732
2733 ASSERT(ms->ms_sm != NULL);
2734 ms->ms_allocated_space = space_map_allocated(ms->ms_sm);
2735 }
2736
2737 uint64_t shift, start;
2738 range_seg_type_t type =
2739 metaslab_calculate_range_tree_type(vd, ms, &start, &shift);
2740
2741 ms->ms_allocatable = range_tree_create(NULL, type, NULL, start, shift);
2742 for (int t = 0; t < TXG_SIZE; t++) {
2743 ms->ms_allocating[t] = range_tree_create(NULL, type,
2744 NULL, start, shift);
2745 }
2746 ms->ms_freeing = range_tree_create(NULL, type, NULL, start, shift);
2747 ms->ms_freed = range_tree_create(NULL, type, NULL, start, shift);
2748 for (int t = 0; t < TXG_DEFER_SIZE; t++) {
2749 ms->ms_defer[t] = range_tree_create(NULL, type, NULL,
2750 start, shift);
2751 }
2752 ms->ms_checkpointing =
2753 range_tree_create(NULL, type, NULL, start, shift);
2754 ms->ms_unflushed_allocs =
2755 range_tree_create(NULL, type, NULL, start, shift);
2756
2757 metaslab_rt_arg_t *mrap = kmem_zalloc(sizeof (*mrap), KM_SLEEP);
2758 mrap->mra_bt = &ms->ms_unflushed_frees_by_size;
2759 mrap->mra_floor_shift = metaslab_by_size_min_shift;
2760 ms->ms_unflushed_frees = range_tree_create(&metaslab_rt_ops,
2761 type, mrap, start, shift);
2762
2763 ms->ms_trim = range_tree_create(NULL, type, NULL, start, shift);
2764
2765 metaslab_group_add(mg, ms);
2766 metaslab_set_fragmentation(ms, B_FALSE);
2767
2768 /*
2769 * If we're opening an existing pool (txg == 0) or creating
2770 * a new one (txg == TXG_INITIAL), all space is available now.
2771 * If we're adding space to an existing pool, the new space
2772 * does not become available until after this txg has synced.
2773 * The metaslab's weight will also be initialized when we sync
2774 * out this txg. This ensures that we don't attempt to allocate
2775 * from it before we have initialized it completely.
2776 */
2777 if (txg <= TXG_INITIAL) {
2778 metaslab_sync_done(ms, 0);
2779 metaslab_space_update(vd, mg->mg_class,
2780 metaslab_allocated_space(ms), 0, 0);
2781 }
2782
2783 if (txg != 0) {
2784 vdev_dirty(vd, 0, NULL, txg);
2785 vdev_dirty(vd, VDD_METASLAB, ms, txg);
2786 }
2787
2788 *msp = ms;
2789
2790 return (0);
2791 }
2792
2793 static void
2794 metaslab_fini_flush_data(metaslab_t *msp)
2795 {
2796 spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
2797
2798 if (metaslab_unflushed_txg(msp) == 0) {
2799 ASSERT3P(avl_find(&spa->spa_metaslabs_by_flushed, msp, NULL),
2800 ==, NULL);
2801 return;
2802 }
2803 ASSERT(spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP));
2804
2805 mutex_enter(&spa->spa_flushed_ms_lock);
2806 avl_remove(&spa->spa_metaslabs_by_flushed, msp);
2807 mutex_exit(&spa->spa_flushed_ms_lock);
2808
2809 spa_log_sm_decrement_mscount(spa, metaslab_unflushed_txg(msp));
2810 spa_log_summary_decrement_mscount(spa, metaslab_unflushed_txg(msp),
2811 metaslab_unflushed_dirty(msp));
2812 }
2813
2814 uint64_t
2815 metaslab_unflushed_changes_memused(metaslab_t *ms)
2816 {
2817 return ((range_tree_numsegs(ms->ms_unflushed_allocs) +
2818 range_tree_numsegs(ms->ms_unflushed_frees)) *
2819 ms->ms_unflushed_allocs->rt_root.bt_elem_size);
2820 }
2821
2822 void
2823 metaslab_fini(metaslab_t *msp)
2824 {
2825 metaslab_group_t *mg = msp->ms_group;
2826 vdev_t *vd = mg->mg_vd;
2827 spa_t *spa = vd->vdev_spa;
2828
2829 metaslab_fini_flush_data(msp);
2830
2831 metaslab_group_remove(mg, msp);
2832
2833 mutex_enter(&msp->ms_lock);
2834 VERIFY(msp->ms_group == NULL);
2835
2836 /*
2837 * If this metaslab hasn't been through metaslab_sync_done() yet its
2838 * space hasn't been accounted for in its vdev and doesn't need to be
2839 * subtracted.
2840 */
2841 if (!msp->ms_new) {
2842 metaslab_space_update(vd, mg->mg_class,
2843 -metaslab_allocated_space(msp), 0, -msp->ms_size);
2844
2845 }
2846 space_map_close(msp->ms_sm);
2847 msp->ms_sm = NULL;
2848
2849 metaslab_unload(msp);
2850
2851 range_tree_destroy(msp->ms_allocatable);
2852 range_tree_destroy(msp->ms_freeing);
2853 range_tree_destroy(msp->ms_freed);
2854
2855 ASSERT3U(spa->spa_unflushed_stats.sus_memused, >=,
2856 metaslab_unflushed_changes_memused(msp));
2857 spa->spa_unflushed_stats.sus_memused -=
2858 metaslab_unflushed_changes_memused(msp);
2859 range_tree_vacate(msp->ms_unflushed_allocs, NULL, NULL);
2860 range_tree_destroy(msp->ms_unflushed_allocs);
2861 range_tree_destroy(msp->ms_checkpointing);
2862 range_tree_vacate(msp->ms_unflushed_frees, NULL, NULL);
2863 range_tree_destroy(msp->ms_unflushed_frees);
2864
2865 for (int t = 0; t < TXG_SIZE; t++) {
2866 range_tree_destroy(msp->ms_allocating[t]);
2867 }
2868 for (int t = 0; t < TXG_DEFER_SIZE; t++) {
2869 range_tree_destroy(msp->ms_defer[t]);
2870 }
2871 ASSERT0(msp->ms_deferspace);
2872
2873 for (int t = 0; t < TXG_SIZE; t++)
2874 ASSERT(!txg_list_member(&vd->vdev_ms_list, msp, t));
2875
2876 range_tree_vacate(msp->ms_trim, NULL, NULL);
2877 range_tree_destroy(msp->ms_trim);
2878
2879 mutex_exit(&msp->ms_lock);
2880 cv_destroy(&msp->ms_load_cv);
2881 cv_destroy(&msp->ms_flush_cv);
2882 mutex_destroy(&msp->ms_lock);
2883 mutex_destroy(&msp->ms_sync_lock);
2884 ASSERT3U(msp->ms_allocator, ==, -1);
2885
2886 kmem_free(msp, sizeof (metaslab_t));
2887 }
2888
2889 #define FRAGMENTATION_TABLE_SIZE 17
2890
2891 /*
2892 * This table defines a segment size based fragmentation metric that will
2893 * allow each metaslab to derive its own fragmentation value. This is done
2894 * by calculating the space in each bucket of the spacemap histogram and
2895 * multiplying that by the fragmentation metric in this table. Doing
2896 * this for all buckets and dividing it by the total amount of free
2897 * space in this metaslab (i.e. the total free space in all buckets) gives
2898 * us the fragmentation metric. This means that a high fragmentation metric
2899 * equates to most of the free space being comprised of small segments.
2900 * Conversely, if the metric is low, then most of the free space is in
2901 * large segments. A 10% change in fragmentation equates to approximately
2902 * double the number of segments.
2903 *
2904 * This table defines 0% fragmented space using 16MB segments. Testing has
2905 * shown that segments that are greater than or equal to 16MB do not suffer
2906 * from drastic performance problems. Using this value, we derive the rest
2907 * of the table. Since the fragmentation value is never stored on disk, it
2908 * is possible to change these calculations in the future.
2909 */
2910 static const int zfs_frag_table[FRAGMENTATION_TABLE_SIZE] = {
2911 100, /* 512B */
2912 100, /* 1K */
2913 98, /* 2K */
2914 95, /* 4K */
2915 90, /* 8K */
2916 80, /* 16K */
2917 70, /* 32K */
2918 60, /* 64K */
2919 50, /* 128K */
2920 40, /* 256K */
2921 30, /* 512K */
2922 20, /* 1M */
2923 15, /* 2M */
2924 10, /* 4M */
2925 5, /* 8M */
2926 0 /* 16M */
2927 };
2928
2929 /*
2930 * Calculate the metaslab's fragmentation metric and set ms_fragmentation.
2931 * Setting this value to ZFS_FRAG_INVALID means that the metaslab has not
2932 * been upgraded and does not support this metric. Otherwise, the return
2933 * value should be in the range [0, 100].
2934 */
2935 static void
2936 metaslab_set_fragmentation(metaslab_t *msp, boolean_t nodirty)
2937 {
2938 spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
2939 uint64_t fragmentation = 0;
2940 uint64_t total = 0;
2941 boolean_t feature_enabled = spa_feature_is_enabled(spa,
2942 SPA_FEATURE_SPACEMAP_HISTOGRAM);
2943
2944 if (!feature_enabled) {
2945 msp->ms_fragmentation = ZFS_FRAG_INVALID;
2946 return;
2947 }
2948
2949 /*
2950 * A null space map means that the entire metaslab is free
2951 * and thus is not fragmented.
2952 */
2953 if (msp->ms_sm == NULL) {
2954 msp->ms_fragmentation = 0;
2955 return;
2956 }
2957
2958 /*
2959 * If this metaslab's space map has not been upgraded, flag it
2960 * so that we upgrade next time we encounter it.
2961 */
2962 if (msp->ms_sm->sm_dbuf->db_size != sizeof (space_map_phys_t)) {
2963 uint64_t txg = spa_syncing_txg(spa);
2964 vdev_t *vd = msp->ms_group->mg_vd;
2965
2966 /*
2967 * If we've reached the final dirty txg, then we must
2968 * be shutting down the pool. We don't want to dirty
2969 * any data past this point so skip setting the condense
2970 * flag. We can retry this action the next time the pool
2971 * is imported. We also skip marking this metaslab for
2972 * condensing if the caller has explicitly set nodirty.
2973 */
2974 if (!nodirty &&
2975 spa_writeable(spa) && txg < spa_final_dirty_txg(spa)) {
2976 msp->ms_condense_wanted = B_TRUE;
2977 vdev_dirty(vd, VDD_METASLAB, msp, txg + 1);
2978 zfs_dbgmsg("txg %llu, requesting force condense: "
2979 "ms_id %llu, vdev_id %llu", (u_longlong_t)txg,
2980 (u_longlong_t)msp->ms_id,
2981 (u_longlong_t)vd->vdev_id);
2982 }
2983 msp->ms_fragmentation = ZFS_FRAG_INVALID;
2984 return;
2985 }
2986
2987 for (int i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) {
2988 uint64_t space = 0;
2989 uint8_t shift = msp->ms_sm->sm_shift;
2990
2991 int idx = MIN(shift - SPA_MINBLOCKSHIFT + i,
2992 FRAGMENTATION_TABLE_SIZE - 1);
2993
2994 if (msp->ms_sm->sm_phys->smp_histogram[i] == 0)
2995 continue;
2996
2997 space = msp->ms_sm->sm_phys->smp_histogram[i] << (i + shift);
2998 total += space;
2999
3000 ASSERT3U(idx, <, FRAGMENTATION_TABLE_SIZE);
3001 fragmentation += space * zfs_frag_table[idx];
3002 }
3003
3004 if (total > 0)
3005 fragmentation /= total;
3006 ASSERT3U(fragmentation, <=, 100);
3007
3008 msp->ms_fragmentation = fragmentation;
3009 }
3010
3011 /*
3012 * Compute a weight -- a selection preference value -- for the given metaslab.
3013 * This is based on the amount of free space, the level of fragmentation,
3014 * the LBA range, and whether the metaslab is loaded.
3015 */
3016 static uint64_t
3017 metaslab_space_weight(metaslab_t *msp)
3018 {
3019 metaslab_group_t *mg = msp->ms_group;
3020 vdev_t *vd = mg->mg_vd;
3021 uint64_t weight, space;
3022
3023 ASSERT(MUTEX_HELD(&msp->ms_lock));
3024
3025 /*
3026 * The baseline weight is the metaslab's free space.
3027 */
3028 space = msp->ms_size - metaslab_allocated_space(msp);
3029
3030 if (metaslab_fragmentation_factor_enabled &&
3031 msp->ms_fragmentation != ZFS_FRAG_INVALID) {
3032 /*
3033 * Use the fragmentation information to inversely scale
3034 * down the baseline weight. We need to ensure that we
3035 * don't exclude this metaslab completely when it's 100%
3036 * fragmented. To avoid this we reduce the fragmented value
3037 * by 1.
3038 */
3039 space = (space * (100 - (msp->ms_fragmentation - 1))) / 100;
3040
3041 /*
3042 * If space < SPA_MINBLOCKSIZE, then we will not allocate from
3043 * this metaslab again. The fragmentation metric may have
3044 * decreased the space to something smaller than
3045 * SPA_MINBLOCKSIZE, so reset the space to SPA_MINBLOCKSIZE
3046 * so that we can consume any remaining space.
3047 */
3048 if (space > 0 && space < SPA_MINBLOCKSIZE)
3049 space = SPA_MINBLOCKSIZE;
3050 }
3051 weight = space;
3052
3053 /*
3054 * Modern disks have uniform bit density and constant angular velocity.
3055 * Therefore, the outer recording zones are faster (higher bandwidth)
3056 * than the inner zones by the ratio of outer to inner track diameter,
3057 * which is typically around 2:1. We account for this by assigning
3058 * higher weight to lower metaslabs (multiplier ranging from 2x to 1x).
3059 * In effect, this means that we'll select the metaslab with the most
3060 * free bandwidth rather than simply the one with the most free space.
3061 */
3062 if (!vd->vdev_nonrot && metaslab_lba_weighting_enabled) {
3063 weight = 2 * weight - (msp->ms_id * weight) / vd->vdev_ms_count;
3064 ASSERT(weight >= space && weight <= 2 * space);
3065 }
3066
3067 /*
3068 * If this metaslab is one we're actively using, adjust its
3069 * weight to make it preferable to any inactive metaslab so
3070 * we'll polish it off. If the fragmentation on this metaslab
3071 * has exceed our threshold, then don't mark it active.
3072 */
3073 if (msp->ms_loaded && msp->ms_fragmentation != ZFS_FRAG_INVALID &&
3074 msp->ms_fragmentation <= zfs_metaslab_fragmentation_threshold) {
3075 weight |= (msp->ms_weight & METASLAB_ACTIVE_MASK);
3076 }
3077
3078 WEIGHT_SET_SPACEBASED(weight);
3079 return (weight);
3080 }
3081
3082 /*
3083 * Return the weight of the specified metaslab, according to the segment-based
3084 * weighting algorithm. The metaslab must be loaded. This function can
3085 * be called within a sync pass since it relies only on the metaslab's
3086 * range tree which is always accurate when the metaslab is loaded.
3087 */
3088 static uint64_t
3089 metaslab_weight_from_range_tree(metaslab_t *msp)
3090 {
3091 uint64_t weight = 0;
3092 uint32_t segments = 0;
3093
3094 ASSERT(msp->ms_loaded);
3095
3096 for (int i = RANGE_TREE_HISTOGRAM_SIZE - 1; i >= SPA_MINBLOCKSHIFT;
3097 i--) {
3098 uint8_t shift = msp->ms_group->mg_vd->vdev_ashift;
3099 int max_idx = SPACE_MAP_HISTOGRAM_SIZE + shift - 1;
3100
3101 segments <<= 1;
3102 segments += msp->ms_allocatable->rt_histogram[i];
3103
3104 /*
3105 * The range tree provides more precision than the space map
3106 * and must be downgraded so that all values fit within the
3107 * space map's histogram. This allows us to compare loaded
3108 * vs. unloaded metaslabs to determine which metaslab is
3109 * considered "best".
3110 */
3111 if (i > max_idx)
3112 continue;
3113
3114 if (segments != 0) {
3115 WEIGHT_SET_COUNT(weight, segments);
3116 WEIGHT_SET_INDEX(weight, i);
3117 WEIGHT_SET_ACTIVE(weight, 0);
3118 break;
3119 }
3120 }
3121 return (weight);
3122 }
3123
3124 /*
3125 * Calculate the weight based on the on-disk histogram. Should be applied
3126 * only to unloaded metaslabs (i.e no incoming allocations) in-order to
3127 * give results consistent with the on-disk state
3128 */
3129 static uint64_t
3130 metaslab_weight_from_spacemap(metaslab_t *msp)
3131 {
3132 space_map_t *sm = msp->ms_sm;
3133 ASSERT(!msp->ms_loaded);
3134 ASSERT(sm != NULL);
3135 ASSERT3U(space_map_object(sm), !=, 0);
3136 ASSERT3U(sm->sm_dbuf->db_size, ==, sizeof (space_map_phys_t));
3137
3138 /*
3139 * Create a joint histogram from all the segments that have made
3140 * it to the metaslab's space map histogram, that are not yet
3141 * available for allocation because they are still in the freeing
3142 * pipeline (e.g. freeing, freed, and defer trees). Then subtract
3143 * these segments from the space map's histogram to get a more
3144 * accurate weight.
3145 */
3146 uint64_t deferspace_histogram[SPACE_MAP_HISTOGRAM_SIZE] = {0};
3147 for (int i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++)
3148 deferspace_histogram[i] += msp->ms_synchist[i];
3149 for (int t = 0; t < TXG_DEFER_SIZE; t++) {
3150 for (int i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) {
3151 deferspace_histogram[i] += msp->ms_deferhist[t][i];
3152 }
3153 }
3154
3155 uint64_t weight = 0;
3156 for (int i = SPACE_MAP_HISTOGRAM_SIZE - 1; i >= 0; i--) {
3157 ASSERT3U(sm->sm_phys->smp_histogram[i], >=,
3158 deferspace_histogram[i]);
3159 uint64_t count =
3160 sm->sm_phys->smp_histogram[i] - deferspace_histogram[i];
3161 if (count != 0) {
3162 WEIGHT_SET_COUNT(weight, count);
3163 WEIGHT_SET_INDEX(weight, i + sm->sm_shift);
3164 WEIGHT_SET_ACTIVE(weight, 0);
3165 break;
3166 }
3167 }
3168 return (weight);
3169 }
3170
3171 /*
3172 * Compute a segment-based weight for the specified metaslab. The weight
3173 * is determined by highest bucket in the histogram. The information
3174 * for the highest bucket is encoded into the weight value.
3175 */
3176 static uint64_t
3177 metaslab_segment_weight(metaslab_t *msp)
3178 {
3179 metaslab_group_t *mg = msp->ms_group;
3180 uint64_t weight = 0;
3181 uint8_t shift = mg->mg_vd->vdev_ashift;
3182
3183 ASSERT(MUTEX_HELD(&msp->ms_lock));
3184
3185 /*
3186 * The metaslab is completely free.
3187 */
3188 if (metaslab_allocated_space(msp) == 0) {
3189 int idx = highbit64(msp->ms_size) - 1;
3190 int max_idx = SPACE_MAP_HISTOGRAM_SIZE + shift - 1;
3191
3192 if (idx < max_idx) {
3193 WEIGHT_SET_COUNT(weight, 1ULL);
3194 WEIGHT_SET_INDEX(weight, idx);
3195 } else {
3196 WEIGHT_SET_COUNT(weight, 1ULL << (idx - max_idx));
3197 WEIGHT_SET_INDEX(weight, max_idx);
3198 }
3199 WEIGHT_SET_ACTIVE(weight, 0);
3200 ASSERT(!WEIGHT_IS_SPACEBASED(weight));
3201 return (weight);
3202 }
3203
3204 ASSERT3U(msp->ms_sm->sm_dbuf->db_size, ==, sizeof (space_map_phys_t));
3205
3206 /*
3207 * If the metaslab is fully allocated then just make the weight 0.
3208 */
3209 if (metaslab_allocated_space(msp) == msp->ms_size)
3210 return (0);
3211 /*
3212 * If the metaslab is already loaded, then use the range tree to
3213 * determine the weight. Otherwise, we rely on the space map information
3214 * to generate the weight.
3215 */
3216 if (msp->ms_loaded) {
3217 weight = metaslab_weight_from_range_tree(msp);
3218 } else {
3219 weight = metaslab_weight_from_spacemap(msp);
3220 }
3221
3222 /*
3223 * If the metaslab was active the last time we calculated its weight
3224 * then keep it active. We want to consume the entire region that
3225 * is associated with this weight.
3226 */
3227 if (msp->ms_activation_weight != 0 && weight != 0)
3228 WEIGHT_SET_ACTIVE(weight, WEIGHT_GET_ACTIVE(msp->ms_weight));
3229 return (weight);
3230 }
3231
3232 /*
3233 * Determine if we should attempt to allocate from this metaslab. If the
3234 * metaslab is loaded, then we can determine if the desired allocation
3235 * can be satisfied by looking at the size of the maximum free segment
3236 * on that metaslab. Otherwise, we make our decision based on the metaslab's
3237 * weight. For segment-based weighting we can determine the maximum
3238 * allocation based on the index encoded in its value. For space-based
3239 * weights we rely on the entire weight (excluding the weight-type bit).
3240 */
3241 static boolean_t
3242 metaslab_should_allocate(metaslab_t *msp, uint64_t asize, boolean_t try_hard)
3243 {
3244 /*
3245 * This case will usually but not always get caught by the checks below;
3246 * metaslabs can be loaded by various means, including the trim and
3247 * initialize code. Once that happens, without this check they are
3248 * allocatable even before they finish their first txg sync.
3249 */
3250 if (unlikely(msp->ms_new))
3251 return (B_FALSE);
3252
3253 /*
3254 * If the metaslab is loaded, ms_max_size is definitive and we can use
3255 * the fast check. If it's not, the ms_max_size is a lower bound (once
3256 * set), and we should use the fast check as long as we're not in
3257 * try_hard and it's been less than zfs_metaslab_max_size_cache_sec
3258 * seconds since the metaslab was unloaded.
3259 */
3260 if (msp->ms_loaded ||
3261 (msp->ms_max_size != 0 && !try_hard && gethrtime() <
3262 msp->ms_unload_time + SEC2NSEC(zfs_metaslab_max_size_cache_sec)))
3263 return (msp->ms_max_size >= asize);
3264
3265 boolean_t should_allocate;
3266 if (!WEIGHT_IS_SPACEBASED(msp->ms_weight)) {
3267 /*
3268 * The metaslab segment weight indicates segments in the
3269 * range [2^i, 2^(i+1)), where i is the index in the weight.
3270 * Since the asize might be in the middle of the range, we
3271 * should attempt the allocation if asize < 2^(i+1).
3272 */
3273 should_allocate = (asize <
3274 1ULL << (WEIGHT_GET_INDEX(msp->ms_weight) + 1));
3275 } else {
3276 should_allocate = (asize <=
3277 (msp->ms_weight & ~METASLAB_WEIGHT_TYPE));
3278 }
3279
3280 return (should_allocate);
3281 }
3282
3283 static uint64_t
3284 metaslab_weight(metaslab_t *msp, boolean_t nodirty)
3285 {
3286 vdev_t *vd = msp->ms_group->mg_vd;
3287 spa_t *spa = vd->vdev_spa;
3288 uint64_t weight;
3289
3290 ASSERT(MUTEX_HELD(&msp->ms_lock));
3291
3292 metaslab_set_fragmentation(msp, nodirty);
3293
3294 /*
3295 * Update the maximum size. If the metaslab is loaded, this will
3296 * ensure that we get an accurate maximum size if newly freed space
3297 * has been added back into the free tree. If the metaslab is
3298 * unloaded, we check if there's a larger free segment in the
3299 * unflushed frees. This is a lower bound on the largest allocatable
3300 * segment size. Coalescing of adjacent entries may reveal larger
3301 * allocatable segments, but we aren't aware of those until loading
3302 * the space map into a range tree.
3303 */
3304 if (msp->ms_loaded) {
3305 msp->ms_max_size = metaslab_largest_allocatable(msp);
3306 } else {
3307 msp->ms_max_size = MAX(msp->ms_max_size,
3308 metaslab_largest_unflushed_free(msp));
3309 }
3310
3311 /*
3312 * Segment-based weighting requires space map histogram support.
3313 */
3314 if (zfs_metaslab_segment_weight_enabled &&
3315 spa_feature_is_enabled(spa, SPA_FEATURE_SPACEMAP_HISTOGRAM) &&
3316 (msp->ms_sm == NULL || msp->ms_sm->sm_dbuf->db_size ==
3317 sizeof (space_map_phys_t))) {
3318 weight = metaslab_segment_weight(msp);
3319 } else {
3320 weight = metaslab_space_weight(msp);
3321 }
3322 return (weight);
3323 }
3324
3325 void
3326 metaslab_recalculate_weight_and_sort(metaslab_t *msp)
3327 {
3328 ASSERT(MUTEX_HELD(&msp->ms_lock));
3329
3330 /* note: we preserve the mask (e.g. indication of primary, etc..) */
3331 uint64_t was_active = msp->ms_weight & METASLAB_ACTIVE_MASK;
3332 metaslab_group_sort(msp->ms_group, msp,
3333 metaslab_weight(msp, B_FALSE) | was_active);
3334 }
3335
3336 static int
3337 metaslab_activate_allocator(metaslab_group_t *mg, metaslab_t *msp,
3338 int allocator, uint64_t activation_weight)
3339 {
3340 metaslab_group_allocator_t *mga = &mg->mg_allocator[allocator];
3341 ASSERT(MUTEX_HELD(&msp->ms_lock));
3342
3343 /*
3344 * If we're activating for the claim code, we don't want to actually
3345 * set the metaslab up for a specific allocator.
3346 */
3347 if (activation_weight == METASLAB_WEIGHT_CLAIM) {
3348 ASSERT0(msp->ms_activation_weight);
3349 msp->ms_activation_weight = msp->ms_weight;
3350 metaslab_group_sort(mg, msp, msp->ms_weight |
3351 activation_weight);
3352 return (0);
3353 }
3354
3355 metaslab_t **mspp = (activation_weight == METASLAB_WEIGHT_PRIMARY ?
3356 &mga->mga_primary : &mga->mga_secondary);
3357
3358 mutex_enter(&mg->mg_lock);
3359 if (*mspp != NULL) {
3360 mutex_exit(&mg->mg_lock);
3361 return (EEXIST);
3362 }
3363
3364 *mspp = msp;
3365 ASSERT3S(msp->ms_allocator, ==, -1);
3366 msp->ms_allocator = allocator;
3367 msp->ms_primary = (activation_weight == METASLAB_WEIGHT_PRIMARY);
3368
3369 ASSERT0(msp->ms_activation_weight);
3370 msp->ms_activation_weight = msp->ms_weight;
3371 metaslab_group_sort_impl(mg, msp,
3372 msp->ms_weight | activation_weight);
3373 mutex_exit(&mg->mg_lock);
3374
3375 return (0);
3376 }
3377
3378 static int
3379 metaslab_activate(metaslab_t *msp, int allocator, uint64_t activation_weight)
3380 {
3381 ASSERT(MUTEX_HELD(&msp->ms_lock));
3382
3383 /*
3384 * The current metaslab is already activated for us so there
3385 * is nothing to do. Already activated though, doesn't mean
3386 * that this metaslab is activated for our allocator nor our
3387 * requested activation weight. The metaslab could have started
3388 * as an active one for our allocator but changed allocators
3389 * while we were waiting to grab its ms_lock or we stole it
3390 * [see find_valid_metaslab()]. This means that there is a
3391 * possibility of passivating a metaslab of another allocator
3392 * or from a different activation mask, from this thread.
3393 */
3394 if ((msp->ms_weight & METASLAB_ACTIVE_MASK) != 0) {
3395 ASSERT(msp->ms_loaded);
3396 return (0);
3397 }
3398
3399 int error = metaslab_load(msp);
3400 if (error != 0) {
3401 metaslab_group_sort(msp->ms_group, msp, 0);
3402 return (error);
3403 }
3404
3405 /*
3406 * When entering metaslab_load() we may have dropped the
3407 * ms_lock because we were loading this metaslab, or we
3408 * were waiting for another thread to load it for us. In
3409 * that scenario, we recheck the weight of the metaslab
3410 * to see if it was activated by another thread.
3411 *
3412 * If the metaslab was activated for another allocator or
3413 * it was activated with a different activation weight (e.g.
3414 * we wanted to make it a primary but it was activated as
3415 * secondary) we return error (EBUSY).
3416 *
3417 * If the metaslab was activated for the same allocator
3418 * and requested activation mask, skip activating it.
3419 */
3420 if ((msp->ms_weight & METASLAB_ACTIVE_MASK) != 0) {
3421 if (msp->ms_allocator != allocator)
3422 return (EBUSY);
3423
3424 if ((msp->ms_weight & activation_weight) == 0)
3425 return (SET_ERROR(EBUSY));
3426
3427 EQUIV((activation_weight == METASLAB_WEIGHT_PRIMARY),
3428 msp->ms_primary);
3429 return (0);
3430 }
3431
3432 /*
3433 * If the metaslab has literally 0 space, it will have weight 0. In
3434 * that case, don't bother activating it. This can happen if the
3435 * metaslab had space during find_valid_metaslab, but another thread
3436 * loaded it and used all that space while we were waiting to grab the
3437 * lock.
3438 */
3439 if (msp->ms_weight == 0) {
3440 ASSERT0(range_tree_space(msp->ms_allocatable));
3441 return (SET_ERROR(ENOSPC));
3442 }
3443
3444 if ((error = metaslab_activate_allocator(msp->ms_group, msp,
3445 allocator, activation_weight)) != 0) {
3446 return (error);
3447 }
3448
3449 ASSERT(msp->ms_loaded);
3450 ASSERT(msp->ms_weight & METASLAB_ACTIVE_MASK);
3451
3452 return (0);
3453 }
3454
3455 static void
3456 metaslab_passivate_allocator(metaslab_group_t *mg, metaslab_t *msp,
3457 uint64_t weight)
3458 {
3459 ASSERT(MUTEX_HELD(&msp->ms_lock));
3460 ASSERT(msp->ms_loaded);
3461
3462 if (msp->ms_weight & METASLAB_WEIGHT_CLAIM) {
3463 metaslab_group_sort(mg, msp, weight);
3464 return;
3465 }
3466
3467 mutex_enter(&mg->mg_lock);
3468 ASSERT3P(msp->ms_group, ==, mg);
3469 ASSERT3S(0, <=, msp->ms_allocator);
3470 ASSERT3U(msp->ms_allocator, <, mg->mg_allocators);
3471
3472 metaslab_group_allocator_t *mga = &mg->mg_allocator[msp->ms_allocator];
3473 if (msp->ms_primary) {
3474 ASSERT3P(mga->mga_primary, ==, msp);
3475 ASSERT(msp->ms_weight & METASLAB_WEIGHT_PRIMARY);
3476 mga->mga_primary = NULL;
3477 } else {
3478 ASSERT3P(mga->mga_secondary, ==, msp);
3479 ASSERT(msp->ms_weight & METASLAB_WEIGHT_SECONDARY);
3480 mga->mga_secondary = NULL;
3481 }
3482 msp->ms_allocator = -1;
3483 metaslab_group_sort_impl(mg, msp, weight);
3484 mutex_exit(&mg->mg_lock);
3485 }
3486
3487 static void
3488 metaslab_passivate(metaslab_t *msp, uint64_t weight)
3489 {
3490 uint64_t size __maybe_unused = weight & ~METASLAB_WEIGHT_TYPE;
3491
3492 /*
3493 * If size < SPA_MINBLOCKSIZE, then we will not allocate from
3494 * this metaslab again. In that case, it had better be empty,
3495 * or we would be leaving space on the table.
3496 */
3497 ASSERT(!WEIGHT_IS_SPACEBASED(msp->ms_weight) ||
3498 size >= SPA_MINBLOCKSIZE ||
3499 range_tree_space(msp->ms_allocatable) == 0);
3500 ASSERT0(weight & METASLAB_ACTIVE_MASK);
3501
3502 ASSERT(msp->ms_activation_weight != 0);
3503 msp->ms_activation_weight = 0;
3504 metaslab_passivate_allocator(msp->ms_group, msp, weight);
3505 ASSERT0(msp->ms_weight & METASLAB_ACTIVE_MASK);
3506 }
3507
3508 /*
3509 * Segment-based metaslabs are activated once and remain active until
3510 * we either fail an allocation attempt (similar to space-based metaslabs)
3511 * or have exhausted the free space in zfs_metaslab_switch_threshold
3512 * buckets since the metaslab was activated. This function checks to see
3513 * if we've exhausted the zfs_metaslab_switch_threshold buckets in the
3514 * metaslab and passivates it proactively. This will allow us to select a
3515 * metaslab with a larger contiguous region, if any, remaining within this
3516 * metaslab group. If we're in sync pass > 1, then we continue using this
3517 * metaslab so that we don't dirty more block and cause more sync passes.
3518 */
3519 static void
3520 metaslab_segment_may_passivate(metaslab_t *msp)
3521 {
3522 spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
3523
3524 if (WEIGHT_IS_SPACEBASED(msp->ms_weight) || spa_sync_pass(spa) > 1)
3525 return;
3526
3527 /*
3528 * Since we are in the middle of a sync pass, the most accurate
3529 * information that is accessible to us is the in-core range tree
3530 * histogram; calculate the new weight based on that information.
3531 */
3532 uint64_t weight = metaslab_weight_from_range_tree(msp);
3533 int activation_idx = WEIGHT_GET_INDEX(msp->ms_activation_weight);
3534 int current_idx = WEIGHT_GET_INDEX(weight);
3535
3536 if (current_idx <= activation_idx - zfs_metaslab_switch_threshold)
3537 metaslab_passivate(msp, weight);
3538 }
3539
3540 static void
3541 metaslab_preload(void *arg)
3542 {
3543 metaslab_t *msp = arg;
3544 metaslab_class_t *mc = msp->ms_group->mg_class;
3545 spa_t *spa = mc->mc_spa;
3546 fstrans_cookie_t cookie = spl_fstrans_mark();
3547
3548 ASSERT(!MUTEX_HELD(&msp->ms_group->mg_lock));
3549
3550 mutex_enter(&msp->ms_lock);
3551 (void) metaslab_load(msp);
3552 metaslab_set_selected_txg(msp, spa_syncing_txg(spa));
3553 mutex_exit(&msp->ms_lock);
3554 spl_fstrans_unmark(cookie);
3555 }
3556
3557 static void
3558 metaslab_group_preload(metaslab_group_t *mg)
3559 {
3560 spa_t *spa = mg->mg_vd->vdev_spa;
3561 metaslab_t *msp;
3562 avl_tree_t *t = &mg->mg_metaslab_tree;
3563 int m = 0;
3564
3565 if (spa_shutting_down(spa) || !metaslab_preload_enabled)
3566 return;
3567
3568 mutex_enter(&mg->mg_lock);
3569
3570 /*
3571 * Load the next potential metaslabs
3572 */
3573 for (msp = avl_first(t); msp != NULL; msp = AVL_NEXT(t, msp)) {
3574 ASSERT3P(msp->ms_group, ==, mg);
3575
3576 /*
3577 * We preload only the maximum number of metaslabs specified
3578 * by metaslab_preload_limit. If a metaslab is being forced
3579 * to condense then we preload it too. This will ensure
3580 * that force condensing happens in the next txg.
3581 */
3582 if (++m > metaslab_preload_limit && !msp->ms_condense_wanted) {
3583 continue;
3584 }
3585
3586 VERIFY(taskq_dispatch(spa->spa_metaslab_taskq, metaslab_preload,
3587 msp, TQ_SLEEP | (m <= mg->mg_allocators ? TQ_FRONT : 0))
3588 != TASKQID_INVALID);
3589 }
3590 mutex_exit(&mg->mg_lock);
3591 }
3592
3593 /*
3594 * Determine if the space map's on-disk footprint is past our tolerance for
3595 * inefficiency. We would like to use the following criteria to make our
3596 * decision:
3597 *
3598 * 1. Do not condense if the size of the space map object would dramatically
3599 * increase as a result of writing out the free space range tree.
3600 *
3601 * 2. Condense if the on on-disk space map representation is at least
3602 * zfs_condense_pct/100 times the size of the optimal representation
3603 * (i.e. zfs_condense_pct = 110 and in-core = 1MB, optimal = 1.1MB).
3604 *
3605 * 3. Do not condense if the on-disk size of the space map does not actually
3606 * decrease.
3607 *
3608 * Unfortunately, we cannot compute the on-disk size of the space map in this
3609 * context because we cannot accurately compute the effects of compression, etc.
3610 * Instead, we apply the heuristic described in the block comment for
3611 * zfs_metaslab_condense_block_threshold - we only condense if the space used
3612 * is greater than a threshold number of blocks.
3613 */
3614 static boolean_t
3615 metaslab_should_condense(metaslab_t *msp)
3616 {
3617 space_map_t *sm = msp->ms_sm;
3618 vdev_t *vd = msp->ms_group->mg_vd;
3619 uint64_t vdev_blocksize = 1ULL << vd->vdev_ashift;
3620
3621 ASSERT(MUTEX_HELD(&msp->ms_lock));
3622 ASSERT(msp->ms_loaded);
3623 ASSERT(sm != NULL);
3624 ASSERT3U(spa_sync_pass(vd->vdev_spa), ==, 1);
3625
3626 /*
3627 * We always condense metaslabs that are empty and metaslabs for
3628 * which a condense request has been made.
3629 */
3630 if (range_tree_numsegs(msp->ms_allocatable) == 0 ||
3631 msp->ms_condense_wanted)
3632 return (B_TRUE);
3633
3634 uint64_t record_size = MAX(sm->sm_blksz, vdev_blocksize);
3635 uint64_t object_size = space_map_length(sm);
3636 uint64_t optimal_size = space_map_estimate_optimal_size(sm,
3637 msp->ms_allocatable, SM_NO_VDEVID);
3638
3639 return (object_size >= (optimal_size * zfs_condense_pct / 100) &&
3640 object_size > zfs_metaslab_condense_block_threshold * record_size);
3641 }
3642
3643 /*
3644 * Condense the on-disk space map representation to its minimized form.
3645 * The minimized form consists of a small number of allocations followed
3646 * by the entries of the free range tree (ms_allocatable). The condensed
3647 * spacemap contains all the entries of previous TXGs (including those in
3648 * the pool-wide log spacemaps; thus this is effectively a superset of
3649 * metaslab_flush()), but this TXG's entries still need to be written.
3650 */
3651 static void
3652 metaslab_condense(metaslab_t *msp, dmu_tx_t *tx)
3653 {
3654 range_tree_t *condense_tree;
3655 space_map_t *sm = msp->ms_sm;
3656 uint64_t txg = dmu_tx_get_txg(tx);
3657 spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
3658
3659 ASSERT(MUTEX_HELD(&msp->ms_lock));
3660 ASSERT(msp->ms_loaded);
3661 ASSERT(msp->ms_sm != NULL);
3662
3663 /*
3664 * In order to condense the space map, we need to change it so it
3665 * only describes which segments are currently allocated and free.
3666 *
3667 * All the current free space resides in the ms_allocatable, all
3668 * the ms_defer trees, and all the ms_allocating trees. We ignore
3669 * ms_freed because it is empty because we're in sync pass 1. We
3670 * ignore ms_freeing because these changes are not yet reflected
3671 * in the spacemap (they will be written later this txg).
3672 *
3673 * So to truncate the space map to represent all the entries of
3674 * previous TXGs we do the following:
3675 *
3676 * 1] We create a range tree (condense tree) that is 100% empty.
3677 * 2] We add to it all segments found in the ms_defer trees
3678 * as those segments are marked as free in the original space
3679 * map. We do the same with the ms_allocating trees for the same
3680 * reason. Adding these segments should be a relatively
3681 * inexpensive operation since we expect these trees to have a
3682 * small number of nodes.
3683 * 3] We vacate any unflushed allocs, since they are not frees we
3684 * need to add to the condense tree. Then we vacate any
3685 * unflushed frees as they should already be part of ms_allocatable.
3686 * 4] At this point, we would ideally like to add all segments
3687 * in the ms_allocatable tree from the condense tree. This way
3688 * we would write all the entries of the condense tree as the
3689 * condensed space map, which would only contain freed
3690 * segments with everything else assumed to be allocated.
3691 *
3692 * Doing so can be prohibitively expensive as ms_allocatable can
3693 * be large, and therefore computationally expensive to add to
3694 * the condense_tree. Instead we first sync out an entry marking
3695 * everything as allocated, then the condense_tree and then the
3696 * ms_allocatable, in the condensed space map. While this is not
3697 * optimal, it is typically close to optimal and more importantly
3698 * much cheaper to compute.
3699 *
3700 * 5] Finally, as both of the unflushed trees were written to our
3701 * new and condensed metaslab space map, we basically flushed
3702 * all the unflushed changes to disk, thus we call
3703 * metaslab_flush_update().
3704 */
3705 ASSERT3U(spa_sync_pass(spa), ==, 1);
3706 ASSERT(range_tree_is_empty(msp->ms_freed)); /* since it is pass 1 */
3707
3708 zfs_dbgmsg("condensing: txg %llu, msp[%llu] %px, vdev id %llu, "
3709 "spa %s, smp size %llu, segments %llu, forcing condense=%s",
3710 (u_longlong_t)txg, (u_longlong_t)msp->ms_id, msp,
3711 (u_longlong_t)msp->ms_group->mg_vd->vdev_id,
3712 spa->spa_name, (u_longlong_t)space_map_length(msp->ms_sm),
3713 (u_longlong_t)range_tree_numsegs(msp->ms_allocatable),
3714 msp->ms_condense_wanted ? "TRUE" : "FALSE");
3715
3716 msp->ms_condense_wanted = B_FALSE;
3717
3718 range_seg_type_t type;
3719 uint64_t shift, start;
3720 type = metaslab_calculate_range_tree_type(msp->ms_group->mg_vd, msp,
3721 &start, &shift);
3722
3723 condense_tree = range_tree_create(NULL, type, NULL, start, shift);
3724
3725 for (int t = 0; t < TXG_DEFER_SIZE; t++) {
3726 range_tree_walk(msp->ms_defer[t],
3727 range_tree_add, condense_tree);
3728 }
3729
3730 for (int t = 0; t < TXG_CONCURRENT_STATES; t++) {
3731 range_tree_walk(msp->ms_allocating[(txg + t) & TXG_MASK],
3732 range_tree_add, condense_tree);
3733 }
3734
3735 ASSERT3U(spa->spa_unflushed_stats.sus_memused, >=,
3736 metaslab_unflushed_changes_memused(msp));
3737 spa->spa_unflushed_stats.sus_memused -=
3738 metaslab_unflushed_changes_memused(msp);
3739 range_tree_vacate(msp->ms_unflushed_allocs, NULL, NULL);
3740 range_tree_vacate(msp->ms_unflushed_frees, NULL, NULL);
3741
3742 /*
3743 * We're about to drop the metaslab's lock thus allowing other
3744 * consumers to change it's content. Set the metaslab's ms_condensing
3745 * flag to ensure that allocations on this metaslab do not occur
3746 * while we're in the middle of committing it to disk. This is only
3747 * critical for ms_allocatable as all other range trees use per TXG
3748 * views of their content.
3749 */
3750 msp->ms_condensing = B_TRUE;
3751
3752 mutex_exit(&msp->ms_lock);
3753 uint64_t object = space_map_object(msp->ms_sm);
3754 space_map_truncate(sm,
3755 spa_feature_is_enabled(spa, SPA_FEATURE_LOG_SPACEMAP) ?
3756 zfs_metaslab_sm_blksz_with_log : zfs_metaslab_sm_blksz_no_log, tx);
3757
3758 /*
3759 * space_map_truncate() may have reallocated the spacemap object.
3760 * If so, update the vdev_ms_array.
3761 */
3762 if (space_map_object(msp->ms_sm) != object) {
3763 object = space_map_object(msp->ms_sm);
3764 dmu_write(spa->spa_meta_objset,
3765 msp->ms_group->mg_vd->vdev_ms_array, sizeof (uint64_t) *
3766 msp->ms_id, sizeof (uint64_t), &object, tx);
3767 }
3768
3769 /*
3770 * Note:
3771 * When the log space map feature is enabled, each space map will
3772 * always have ALLOCS followed by FREES for each sync pass. This is
3773 * typically true even when the log space map feature is disabled,
3774 * except from the case where a metaslab goes through metaslab_sync()
3775 * and gets condensed. In that case the metaslab's space map will have
3776 * ALLOCS followed by FREES (due to condensing) followed by ALLOCS
3777 * followed by FREES (due to space_map_write() in metaslab_sync()) for
3778 * sync pass 1.
3779 */
3780 range_tree_t *tmp_tree = range_tree_create(NULL, type, NULL, start,
3781 shift);
3782 range_tree_add(tmp_tree, msp->ms_start, msp->ms_size);
3783 space_map_write(sm, tmp_tree, SM_ALLOC, SM_NO_VDEVID, tx);
3784 space_map_write(sm, msp->ms_allocatable, SM_FREE, SM_NO_VDEVID, tx);
3785 space_map_write(sm, condense_tree, SM_FREE, SM_NO_VDEVID, tx);
3786
3787 range_tree_vacate(condense_tree, NULL, NULL);
3788 range_tree_destroy(condense_tree);
3789 range_tree_vacate(tmp_tree, NULL, NULL);
3790 range_tree_destroy(tmp_tree);
3791 mutex_enter(&msp->ms_lock);
3792
3793 msp->ms_condensing = B_FALSE;
3794 metaslab_flush_update(msp, tx);
3795 }
3796
3797 static void
3798 metaslab_unflushed_add(metaslab_t *msp, dmu_tx_t *tx)
3799 {
3800 spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
3801 ASSERT(spa_syncing_log_sm(spa) != NULL);
3802 ASSERT(msp->ms_sm != NULL);
3803 ASSERT(range_tree_is_empty(msp->ms_unflushed_allocs));
3804 ASSERT(range_tree_is_empty(msp->ms_unflushed_frees));
3805
3806 mutex_enter(&spa->spa_flushed_ms_lock);
3807 metaslab_set_unflushed_txg(msp, spa_syncing_txg(spa), tx);
3808 metaslab_set_unflushed_dirty(msp, B_TRUE);
3809 avl_add(&spa->spa_metaslabs_by_flushed, msp);
3810 mutex_exit(&spa->spa_flushed_ms_lock);
3811
3812 spa_log_sm_increment_current_mscount(spa);
3813 spa_log_summary_add_flushed_metaslab(spa, B_TRUE);
3814 }
3815
3816 void
3817 metaslab_unflushed_bump(metaslab_t *msp, dmu_tx_t *tx, boolean_t dirty)
3818 {
3819 spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
3820 ASSERT(spa_syncing_log_sm(spa) != NULL);
3821 ASSERT(msp->ms_sm != NULL);
3822 ASSERT(metaslab_unflushed_txg(msp) != 0);
3823 ASSERT3P(avl_find(&spa->spa_metaslabs_by_flushed, msp, NULL), ==, msp);
3824 ASSERT(range_tree_is_empty(msp->ms_unflushed_allocs));
3825 ASSERT(range_tree_is_empty(msp->ms_unflushed_frees));
3826
3827 VERIFY3U(tx->tx_txg, <=, spa_final_dirty_txg(spa));
3828
3829 /* update metaslab's position in our flushing tree */
3830 uint64_t ms_prev_flushed_txg = metaslab_unflushed_txg(msp);
3831 boolean_t ms_prev_flushed_dirty = metaslab_unflushed_dirty(msp);
3832 mutex_enter(&spa->spa_flushed_ms_lock);
3833 avl_remove(&spa->spa_metaslabs_by_flushed, msp);
3834 metaslab_set_unflushed_txg(msp, spa_syncing_txg(spa), tx);
3835 metaslab_set_unflushed_dirty(msp, dirty);
3836 avl_add(&spa->spa_metaslabs_by_flushed, msp);
3837 mutex_exit(&spa->spa_flushed_ms_lock);
3838
3839 /* update metaslab counts of spa_log_sm_t nodes */
3840 spa_log_sm_decrement_mscount(spa, ms_prev_flushed_txg);
3841 spa_log_sm_increment_current_mscount(spa);
3842
3843 /* update log space map summary */
3844 spa_log_summary_decrement_mscount(spa, ms_prev_flushed_txg,
3845 ms_prev_flushed_dirty);
3846 spa_log_summary_add_flushed_metaslab(spa, dirty);
3847
3848 /* cleanup obsolete logs if any */
3849 spa_cleanup_old_sm_logs(spa, tx);
3850 }
3851
3852 /*
3853 * Called when the metaslab has been flushed (its own spacemap now reflects
3854 * all the contents of the pool-wide spacemap log). Updates the metaslab's
3855 * metadata and any pool-wide related log space map data (e.g. summary,
3856 * obsolete logs, etc..) to reflect that.
3857 */
3858 static void
3859 metaslab_flush_update(metaslab_t *msp, dmu_tx_t *tx)
3860 {
3861 metaslab_group_t *mg = msp->ms_group;
3862 spa_t *spa = mg->mg_vd->vdev_spa;
3863
3864 ASSERT(MUTEX_HELD(&msp->ms_lock));
3865
3866 ASSERT3U(spa_sync_pass(spa), ==, 1);
3867
3868 /*
3869 * Just because a metaslab got flushed, that doesn't mean that
3870 * it will pass through metaslab_sync_done(). Thus, make sure to
3871 * update ms_synced_length here in case it doesn't.
3872 */
3873 msp->ms_synced_length = space_map_length(msp->ms_sm);
3874
3875 /*
3876 * We may end up here from metaslab_condense() without the
3877 * feature being active. In that case this is a no-op.
3878 */
3879 if (!spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP) ||
3880 metaslab_unflushed_txg(msp) == 0)
3881 return;
3882
3883 metaslab_unflushed_bump(msp, tx, B_FALSE);
3884 }
3885
3886 boolean_t
3887 metaslab_flush(metaslab_t *msp, dmu_tx_t *tx)
3888 {
3889 spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
3890
3891 ASSERT(MUTEX_HELD(&msp->ms_lock));
3892 ASSERT3U(spa_sync_pass(spa), ==, 1);
3893 ASSERT(spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP));
3894
3895 ASSERT(msp->ms_sm != NULL);
3896 ASSERT(metaslab_unflushed_txg(msp) != 0);
3897 ASSERT(avl_find(&spa->spa_metaslabs_by_flushed, msp, NULL) != NULL);
3898
3899 /*
3900 * There is nothing wrong with flushing the same metaslab twice, as
3901 * this codepath should work on that case. However, the current
3902 * flushing scheme makes sure to avoid this situation as we would be
3903 * making all these calls without having anything meaningful to write
3904 * to disk. We assert this behavior here.
3905 */
3906 ASSERT3U(metaslab_unflushed_txg(msp), <, dmu_tx_get_txg(tx));
3907
3908 /*
3909 * We can not flush while loading, because then we would
3910 * not load the ms_unflushed_{allocs,frees}.
3911 */
3912 if (msp->ms_loading)
3913 return (B_FALSE);
3914
3915 metaslab_verify_space(msp, dmu_tx_get_txg(tx));
3916 metaslab_verify_weight_and_frag(msp);
3917
3918 /*
3919 * Metaslab condensing is effectively flushing. Therefore if the
3920 * metaslab can be condensed we can just condense it instead of
3921 * flushing it.
3922 *
3923 * Note that metaslab_condense() does call metaslab_flush_update()
3924 * so we can just return immediately after condensing. We also
3925 * don't need to care about setting ms_flushing or broadcasting
3926 * ms_flush_cv, even if we temporarily drop the ms_lock in
3927 * metaslab_condense(), as the metaslab is already loaded.
3928 */
3929 if (msp->ms_loaded && metaslab_should_condense(msp)) {
3930 metaslab_group_t *mg = msp->ms_group;
3931
3932 /*
3933 * For all histogram operations below refer to the
3934 * comments of metaslab_sync() where we follow a
3935 * similar procedure.
3936 */
3937 metaslab_group_histogram_verify(mg);
3938 metaslab_class_histogram_verify(mg->mg_class);
3939 metaslab_group_histogram_remove(mg, msp);
3940
3941 metaslab_condense(msp, tx);
3942
3943 space_map_histogram_clear(msp->ms_sm);
3944 space_map_histogram_add(msp->ms_sm, msp->ms_allocatable, tx);
3945 ASSERT(range_tree_is_empty(msp->ms_freed));
3946 for (int t = 0; t < TXG_DEFER_SIZE; t++) {
3947 space_map_histogram_add(msp->ms_sm,
3948 msp->ms_defer[t], tx);
3949 }
3950 metaslab_aux_histograms_update(msp);
3951
3952 metaslab_group_histogram_add(mg, msp);
3953 metaslab_group_histogram_verify(mg);
3954 metaslab_class_histogram_verify(mg->mg_class);
3955
3956 metaslab_verify_space(msp, dmu_tx_get_txg(tx));
3957
3958 /*
3959 * Since we recreated the histogram (and potentially
3960 * the ms_sm too while condensing) ensure that the
3961 * weight is updated too because we are not guaranteed
3962 * that this metaslab is dirty and will go through
3963 * metaslab_sync_done().
3964 */
3965 metaslab_recalculate_weight_and_sort(msp);
3966 return (B_TRUE);
3967 }
3968
3969 msp->ms_flushing = B_TRUE;
3970 uint64_t sm_len_before = space_map_length(msp->ms_sm);
3971
3972 mutex_exit(&msp->ms_lock);
3973 space_map_write(msp->ms_sm, msp->ms_unflushed_allocs, SM_ALLOC,
3974 SM_NO_VDEVID, tx);
3975 space_map_write(msp->ms_sm, msp->ms_unflushed_frees, SM_FREE,
3976 SM_NO_VDEVID, tx);
3977 mutex_enter(&msp->ms_lock);
3978
3979 uint64_t sm_len_after = space_map_length(msp->ms_sm);
3980 if (zfs_flags & ZFS_DEBUG_LOG_SPACEMAP) {
3981 zfs_dbgmsg("flushing: txg %llu, spa %s, vdev_id %llu, "
3982 "ms_id %llu, unflushed_allocs %llu, unflushed_frees %llu, "
3983 "appended %llu bytes", (u_longlong_t)dmu_tx_get_txg(tx),
3984 spa_name(spa),
3985 (u_longlong_t)msp->ms_group->mg_vd->vdev_id,
3986 (u_longlong_t)msp->ms_id,
3987 (u_longlong_t)range_tree_space(msp->ms_unflushed_allocs),
3988 (u_longlong_t)range_tree_space(msp->ms_unflushed_frees),
3989 (u_longlong_t)(sm_len_after - sm_len_before));
3990 }
3991
3992 ASSERT3U(spa->spa_unflushed_stats.sus_memused, >=,
3993 metaslab_unflushed_changes_memused(msp));
3994 spa->spa_unflushed_stats.sus_memused -=
3995 metaslab_unflushed_changes_memused(msp);
3996 range_tree_vacate(msp->ms_unflushed_allocs, NULL, NULL);
3997 range_tree_vacate(msp->ms_unflushed_frees, NULL, NULL);
3998
3999 metaslab_verify_space(msp, dmu_tx_get_txg(tx));
4000 metaslab_verify_weight_and_frag(msp);
4001
4002 metaslab_flush_update(msp, tx);
4003
4004 metaslab_verify_space(msp, dmu_tx_get_txg(tx));
4005 metaslab_verify_weight_and_frag(msp);
4006
4007 msp->ms_flushing = B_FALSE;
4008 cv_broadcast(&msp->ms_flush_cv);
4009 return (B_TRUE);
4010 }
4011
4012 /*
4013 * Write a metaslab to disk in the context of the specified transaction group.
4014 */
4015 void
4016 metaslab_sync(metaslab_t *msp, uint64_t txg)
4017 {
4018 metaslab_group_t *mg = msp->ms_group;
4019 vdev_t *vd = mg->mg_vd;
4020 spa_t *spa = vd->vdev_spa;
4021 objset_t *mos = spa_meta_objset(spa);
4022 range_tree_t *alloctree = msp->ms_allocating[txg & TXG_MASK];
4023 dmu_tx_t *tx;
4024
4025 ASSERT(!vd->vdev_ishole);
4026
4027 /*
4028 * This metaslab has just been added so there's no work to do now.
4029 */
4030 if (msp->ms_new) {
4031 ASSERT0(range_tree_space(alloctree));
4032 ASSERT0(range_tree_space(msp->ms_freeing));
4033 ASSERT0(range_tree_space(msp->ms_freed));
4034 ASSERT0(range_tree_space(msp->ms_checkpointing));
4035 ASSERT0(range_tree_space(msp->ms_trim));
4036 return;
4037 }
4038
4039 /*
4040 * Normally, we don't want to process a metaslab if there are no
4041 * allocations or frees to perform. However, if the metaslab is being
4042 * forced to condense, it's loaded and we're not beyond the final
4043 * dirty txg, we need to let it through. Not condensing beyond the
4044 * final dirty txg prevents an issue where metaslabs that need to be
4045 * condensed but were loaded for other reasons could cause a panic
4046 * here. By only checking the txg in that branch of the conditional,
4047 * we preserve the utility of the VERIFY statements in all other
4048 * cases.
4049 */
4050 if (range_tree_is_empty(alloctree) &&
4051 range_tree_is_empty(msp->ms_freeing) &&
4052 range_tree_is_empty(msp->ms_checkpointing) &&
4053 !(msp->ms_loaded && msp->ms_condense_wanted &&
4054 txg <= spa_final_dirty_txg(spa)))
4055 return;
4056
4057
4058 VERIFY3U(txg, <=, spa_final_dirty_txg(spa));
4059
4060 /*
4061 * The only state that can actually be changing concurrently
4062 * with metaslab_sync() is the metaslab's ms_allocatable. No
4063 * other thread can be modifying this txg's alloc, freeing,
4064 * freed, or space_map_phys_t. We drop ms_lock whenever we
4065 * could call into the DMU, because the DMU can call down to
4066 * us (e.g. via zio_free()) at any time.
4067 *
4068 * The spa_vdev_remove_thread() can be reading metaslab state
4069 * concurrently, and it is locked out by the ms_sync_lock.
4070 * Note that the ms_lock is insufficient for this, because it
4071 * is dropped by space_map_write().
4072 */
4073 tx = dmu_tx_create_assigned(spa_get_dsl(spa), txg);
4074
4075 /*
4076 * Generate a log space map if one doesn't exist already.
4077 */
4078 spa_generate_syncing_log_sm(spa, tx);
4079
4080 if (msp->ms_sm == NULL) {
4081 uint64_t new_object = space_map_alloc(mos,
4082 spa_feature_is_enabled(spa, SPA_FEATURE_LOG_SPACEMAP) ?
4083 zfs_metaslab_sm_blksz_with_log :
4084 zfs_metaslab_sm_blksz_no_log, tx);
4085 VERIFY3U(new_object, !=, 0);
4086
4087 dmu_write(mos, vd->vdev_ms_array, sizeof (uint64_t) *
4088 msp->ms_id, sizeof (uint64_t), &new_object, tx);
4089
4090 VERIFY0(space_map_open(&msp->ms_sm, mos, new_object,
4091 msp->ms_start, msp->ms_size, vd->vdev_ashift));
4092 ASSERT(msp->ms_sm != NULL);
4093
4094 ASSERT(range_tree_is_empty(msp->ms_unflushed_allocs));
4095 ASSERT(range_tree_is_empty(msp->ms_unflushed_frees));
4096 ASSERT0(metaslab_allocated_space(msp));
4097 }
4098
4099 if (!range_tree_is_empty(msp->ms_checkpointing) &&
4100 vd->vdev_checkpoint_sm == NULL) {
4101 ASSERT(spa_has_checkpoint(spa));
4102
4103 uint64_t new_object = space_map_alloc(mos,
4104 zfs_vdev_standard_sm_blksz, tx);
4105 VERIFY3U(new_object, !=, 0);
4106
4107 VERIFY0(space_map_open(&vd->vdev_checkpoint_sm,
4108 mos, new_object, 0, vd->vdev_asize, vd->vdev_ashift));
4109 ASSERT3P(vd->vdev_checkpoint_sm, !=, NULL);
4110
4111 /*
4112 * We save the space map object as an entry in vdev_top_zap
4113 * so it can be retrieved when the pool is reopened after an
4114 * export or through zdb.
4115 */
4116 VERIFY0(zap_add(vd->vdev_spa->spa_meta_objset,
4117 vd->vdev_top_zap, VDEV_TOP_ZAP_POOL_CHECKPOINT_SM,
4118 sizeof (new_object), 1, &new_object, tx));
4119 }
4120
4121 mutex_enter(&msp->ms_sync_lock);
4122 mutex_enter(&msp->ms_lock);
4123
4124 /*
4125 * Note: metaslab_condense() clears the space map's histogram.
4126 * Therefore we must verify and remove this histogram before
4127 * condensing.
4128 */
4129 metaslab_group_histogram_verify(mg);
4130 metaslab_class_histogram_verify(mg->mg_class);
4131 metaslab_group_histogram_remove(mg, msp);
4132
4133 if (spa->spa_sync_pass == 1 && msp->ms_loaded &&
4134 metaslab_should_condense(msp))
4135 metaslab_condense(msp, tx);
4136
4137 /*
4138 * We'll be going to disk to sync our space accounting, thus we
4139 * drop the ms_lock during that time so allocations coming from
4140 * open-context (ZIL) for future TXGs do not block.
4141 */
4142 mutex_exit(&msp->ms_lock);
4143 space_map_t *log_sm = spa_syncing_log_sm(spa);
4144 if (log_sm != NULL) {
4145 ASSERT(spa_feature_is_enabled(spa, SPA_FEATURE_LOG_SPACEMAP));
4146 if (metaslab_unflushed_txg(msp) == 0)
4147 metaslab_unflushed_add(msp, tx);
4148 else if (!metaslab_unflushed_dirty(msp))
4149 metaslab_unflushed_bump(msp, tx, B_TRUE);
4150
4151 space_map_write(log_sm, alloctree, SM_ALLOC,
4152 vd->vdev_id, tx);
4153 space_map_write(log_sm, msp->ms_freeing, SM_FREE,
4154 vd->vdev_id, tx);
4155 mutex_enter(&msp->ms_lock);
4156
4157 ASSERT3U(spa->spa_unflushed_stats.sus_memused, >=,
4158 metaslab_unflushed_changes_memused(msp));
4159 spa->spa_unflushed_stats.sus_memused -=
4160 metaslab_unflushed_changes_memused(msp);
4161 range_tree_remove_xor_add(alloctree,
4162 msp->ms_unflushed_frees, msp->ms_unflushed_allocs);
4163 range_tree_remove_xor_add(msp->ms_freeing,
4164 msp->ms_unflushed_allocs, msp->ms_unflushed_frees);
4165 spa->spa_unflushed_stats.sus_memused +=
4166 metaslab_unflushed_changes_memused(msp);
4167 } else {
4168 ASSERT(!spa_feature_is_enabled(spa, SPA_FEATURE_LOG_SPACEMAP));
4169
4170 space_map_write(msp->ms_sm, alloctree, SM_ALLOC,
4171 SM_NO_VDEVID, tx);
4172 space_map_write(msp->ms_sm, msp->ms_freeing, SM_FREE,
4173 SM_NO_VDEVID, tx);
4174 mutex_enter(&msp->ms_lock);
4175 }
4176
4177 msp->ms_allocated_space += range_tree_space(alloctree);
4178 ASSERT3U(msp->ms_allocated_space, >=,
4179 range_tree_space(msp->ms_freeing));
4180 msp->ms_allocated_space -= range_tree_space(msp->ms_freeing);
4181
4182 if (!range_tree_is_empty(msp->ms_checkpointing)) {
4183 ASSERT(spa_has_checkpoint(spa));
4184 ASSERT3P(vd->vdev_checkpoint_sm, !=, NULL);
4185
4186 /*
4187 * Since we are doing writes to disk and the ms_checkpointing
4188 * tree won't be changing during that time, we drop the
4189 * ms_lock while writing to the checkpoint space map, for the
4190 * same reason mentioned above.
4191 */
4192 mutex_exit(&msp->ms_lock);
4193 space_map_write(vd->vdev_checkpoint_sm,
4194 msp->ms_checkpointing, SM_FREE, SM_NO_VDEVID, tx);
4195 mutex_enter(&msp->ms_lock);
4196
4197 spa->spa_checkpoint_info.sci_dspace +=
4198 range_tree_space(msp->ms_checkpointing);
4199 vd->vdev_stat.vs_checkpoint_space +=
4200 range_tree_space(msp->ms_checkpointing);
4201 ASSERT3U(vd->vdev_stat.vs_checkpoint_space, ==,
4202 -space_map_allocated(vd->vdev_checkpoint_sm));
4203
4204 range_tree_vacate(msp->ms_checkpointing, NULL, NULL);
4205 }
4206
4207 if (msp->ms_loaded) {
4208 /*
4209 * When the space map is loaded, we have an accurate
4210 * histogram in the range tree. This gives us an opportunity
4211 * to bring the space map's histogram up-to-date so we clear
4212 * it first before updating it.
4213 */
4214 space_map_histogram_clear(msp->ms_sm);
4215 space_map_histogram_add(msp->ms_sm, msp->ms_allocatable, tx);
4216
4217 /*
4218 * Since we've cleared the histogram we need to add back
4219 * any free space that has already been processed, plus
4220 * any deferred space. This allows the on-disk histogram
4221 * to accurately reflect all free space even if some space
4222 * is not yet available for allocation (i.e. deferred).
4223 */
4224 space_map_histogram_add(msp->ms_sm, msp->ms_freed, tx);
4225
4226 /*
4227 * Add back any deferred free space that has not been
4228 * added back into the in-core free tree yet. This will
4229 * ensure that we don't end up with a space map histogram
4230 * that is completely empty unless the metaslab is fully
4231 * allocated.
4232 */
4233 for (int t = 0; t < TXG_DEFER_SIZE; t++) {
4234 space_map_histogram_add(msp->ms_sm,
4235 msp->ms_defer[t], tx);
4236 }
4237 }
4238
4239 /*
4240 * Always add the free space from this sync pass to the space
4241 * map histogram. We want to make sure that the on-disk histogram
4242 * accounts for all free space. If the space map is not loaded,
4243 * then we will lose some accuracy but will correct it the next
4244 * time we load the space map.
4245 */
4246 space_map_histogram_add(msp->ms_sm, msp->ms_freeing, tx);
4247 metaslab_aux_histograms_update(msp);
4248
4249 metaslab_group_histogram_add(mg, msp);
4250 metaslab_group_histogram_verify(mg);
4251 metaslab_class_histogram_verify(mg->mg_class);
4252
4253 /*
4254 * For sync pass 1, we avoid traversing this txg's free range tree
4255 * and instead will just swap the pointers for freeing and freed.
4256 * We can safely do this since the freed_tree is guaranteed to be
4257 * empty on the initial pass.
4258 *
4259 * Keep in mind that even if we are currently using a log spacemap
4260 * we want current frees to end up in the ms_allocatable (but not
4261 * get appended to the ms_sm) so their ranges can be reused as usual.
4262 */
4263 if (spa_sync_pass(spa) == 1) {
4264 range_tree_swap(&msp->ms_freeing, &msp->ms_freed);
4265 ASSERT0(msp->ms_allocated_this_txg);
4266 } else {
4267 range_tree_vacate(msp->ms_freeing,
4268 range_tree_add, msp->ms_freed);
4269 }
4270 msp->ms_allocated_this_txg += range_tree_space(alloctree);
4271 range_tree_vacate(alloctree, NULL, NULL);
4272
4273 ASSERT0(range_tree_space(msp->ms_allocating[txg & TXG_MASK]));
4274 ASSERT0(range_tree_space(msp->ms_allocating[TXG_CLEAN(txg)
4275 & TXG_MASK]));
4276 ASSERT0(range_tree_space(msp->ms_freeing));
4277 ASSERT0(range_tree_space(msp->ms_checkpointing));
4278
4279 mutex_exit(&msp->ms_lock);
4280
4281 /*
4282 * Verify that the space map object ID has been recorded in the
4283 * vdev_ms_array.
4284 */
4285 uint64_t object;
4286 VERIFY0(dmu_read(mos, vd->vdev_ms_array,
4287 msp->ms_id * sizeof (uint64_t), sizeof (uint64_t), &object, 0));
4288 VERIFY3U(object, ==, space_map_object(msp->ms_sm));
4289
4290 mutex_exit(&msp->ms_sync_lock);
4291 dmu_tx_commit(tx);
4292 }
4293
4294 static void
4295 metaslab_evict(metaslab_t *msp, uint64_t txg)
4296 {
4297 if (!msp->ms_loaded || msp->ms_disabled != 0)
4298 return;
4299
4300 for (int t = 1; t < TXG_CONCURRENT_STATES; t++) {
4301 VERIFY0(range_tree_space(
4302 msp->ms_allocating[(txg + t) & TXG_MASK]));
4303 }
4304 if (msp->ms_allocator != -1)
4305 metaslab_passivate(msp, msp->ms_weight & ~METASLAB_ACTIVE_MASK);
4306
4307 if (!metaslab_debug_unload)
4308 metaslab_unload(msp);
4309 }
4310
4311 /*
4312 * Called after a transaction group has completely synced to mark
4313 * all of the metaslab's free space as usable.
4314 */
4315 void
4316 metaslab_sync_done(metaslab_t *msp, uint64_t txg)
4317 {
4318 metaslab_group_t *mg = msp->ms_group;
4319 vdev_t *vd = mg->mg_vd;
4320 spa_t *spa = vd->vdev_spa;
4321 range_tree_t **defer_tree;
4322 int64_t alloc_delta, defer_delta;
4323 boolean_t defer_allowed = B_TRUE;
4324
4325 ASSERT(!vd->vdev_ishole);
4326
4327 mutex_enter(&msp->ms_lock);
4328
4329 if (msp->ms_new) {
4330 /* this is a new metaslab, add its capacity to the vdev */
4331 metaslab_space_update(vd, mg->mg_class, 0, 0, msp->ms_size);
4332
4333 /* there should be no allocations nor frees at this point */
4334 VERIFY0(msp->ms_allocated_this_txg);
4335 VERIFY0(range_tree_space(msp->ms_freed));
4336 }
4337
4338 ASSERT0(range_tree_space(msp->ms_freeing));
4339 ASSERT0(range_tree_space(msp->ms_checkpointing));
4340
4341 defer_tree = &msp->ms_defer[txg % TXG_DEFER_SIZE];
4342
4343 uint64_t free_space = metaslab_class_get_space(spa_normal_class(spa)) -
4344 metaslab_class_get_alloc(spa_normal_class(spa));
4345 if (free_space <= spa_get_slop_space(spa) || vd->vdev_removing ||
4346 vd->vdev_rz_expanding) {
4347 defer_allowed = B_FALSE;
4348 }
4349
4350 defer_delta = 0;
4351 alloc_delta = msp->ms_allocated_this_txg -
4352 range_tree_space(msp->ms_freed);
4353
4354 if (defer_allowed) {
4355 defer_delta = range_tree_space(msp->ms_freed) -
4356 range_tree_space(*defer_tree);
4357 } else {
4358 defer_delta -= range_tree_space(*defer_tree);
4359 }
4360 metaslab_space_update(vd, mg->mg_class, alloc_delta + defer_delta,
4361 defer_delta, 0);
4362
4363 if (spa_syncing_log_sm(spa) == NULL) {
4364 /*
4365 * If there's a metaslab_load() in progress and we don't have
4366 * a log space map, it means that we probably wrote to the
4367 * metaslab's space map. If this is the case, we need to
4368 * make sure that we wait for the load to complete so that we
4369 * have a consistent view at the in-core side of the metaslab.
4370 */
4371 metaslab_load_wait(msp);
4372 } else {
4373 ASSERT(spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP));
4374 }
4375
4376 /*
4377 * When auto-trimming is enabled, free ranges which are added to
4378 * ms_allocatable are also be added to ms_trim. The ms_trim tree is
4379 * periodically consumed by the vdev_autotrim_thread() which issues
4380 * trims for all ranges and then vacates the tree. The ms_trim tree
4381 * can be discarded at any time with the sole consequence of recent
4382 * frees not being trimmed.
4383 */
4384 if (spa_get_autotrim(spa) == SPA_AUTOTRIM_ON) {
4385 range_tree_walk(*defer_tree, range_tree_add, msp->ms_trim);
4386 if (!defer_allowed) {
4387 range_tree_walk(msp->ms_freed, range_tree_add,
4388 msp->ms_trim);
4389 }
4390 } else {
4391 range_tree_vacate(msp->ms_trim, NULL, NULL);
4392 }
4393
4394 /*
4395 * Move the frees from the defer_tree back to the free
4396 * range tree (if it's loaded). Swap the freed_tree and
4397 * the defer_tree -- this is safe to do because we've
4398 * just emptied out the defer_tree.
4399 */
4400 range_tree_vacate(*defer_tree,
4401 msp->ms_loaded ? range_tree_add : NULL, msp->ms_allocatable);
4402 if (defer_allowed) {
4403 range_tree_swap(&msp->ms_freed, defer_tree);
4404 } else {
4405 range_tree_vacate(msp->ms_freed,
4406 msp->ms_loaded ? range_tree_add : NULL,
4407 msp->ms_allocatable);
4408 }
4409
4410 msp->ms_synced_length = space_map_length(msp->ms_sm);
4411
4412 msp->ms_deferspace += defer_delta;
4413 ASSERT3S(msp->ms_deferspace, >=, 0);
4414 ASSERT3S(msp->ms_deferspace, <=, msp->ms_size);
4415 if (msp->ms_deferspace != 0) {
4416 /*
4417 * Keep syncing this metaslab until all deferred frees
4418 * are back in circulation.
4419 */
4420 vdev_dirty(vd, VDD_METASLAB, msp, txg + 1);
4421 }
4422 metaslab_aux_histograms_update_done(msp, defer_allowed);
4423
4424 if (msp->ms_new) {
4425 msp->ms_new = B_FALSE;
4426 mutex_enter(&mg->mg_lock);
4427 mg->mg_ms_ready++;
4428 mutex_exit(&mg->mg_lock);
4429 }
4430
4431 /*
4432 * Re-sort metaslab within its group now that we've adjusted
4433 * its allocatable space.
4434 */
4435 metaslab_recalculate_weight_and_sort(msp);
4436
4437 ASSERT0(range_tree_space(msp->ms_allocating[txg & TXG_MASK]));
4438 ASSERT0(range_tree_space(msp->ms_freeing));
4439 ASSERT0(range_tree_space(msp->ms_freed));
4440 ASSERT0(range_tree_space(msp->ms_checkpointing));
4441 msp->ms_allocating_total -= msp->ms_allocated_this_txg;
4442 msp->ms_allocated_this_txg = 0;
4443 mutex_exit(&msp->ms_lock);
4444 }
4445
4446 void
4447 metaslab_sync_reassess(metaslab_group_t *mg)
4448 {
4449 spa_t *spa = mg->mg_class->mc_spa;
4450
4451 spa_config_enter(spa, SCL_ALLOC, FTAG, RW_READER);
4452 metaslab_group_alloc_update(mg);
4453 mg->mg_fragmentation = metaslab_group_fragmentation(mg);
4454
4455 /*
4456 * Preload the next potential metaslabs but only on active
4457 * metaslab groups. We can get into a state where the metaslab
4458 * is no longer active since we dirty metaslabs as we remove a
4459 * a device, thus potentially making the metaslab group eligible
4460 * for preloading.
4461 */
4462 if (mg->mg_activation_count > 0) {
4463 metaslab_group_preload(mg);
4464 }
4465 spa_config_exit(spa, SCL_ALLOC, FTAG);
4466 }
4467
4468 /*
4469 * When writing a ditto block (i.e. more than one DVA for a given BP) on
4470 * the same vdev as an existing DVA of this BP, then try to allocate it
4471 * on a different metaslab than existing DVAs (i.e. a unique metaslab).
4472 */
4473 static boolean_t
4474 metaslab_is_unique(metaslab_t *msp, dva_t *dva)
4475 {
4476 uint64_t dva_ms_id;
4477
4478 if (DVA_GET_ASIZE(dva) == 0)
4479 return (B_TRUE);
4480
4481 if (msp->ms_group->mg_vd->vdev_id != DVA_GET_VDEV(dva))
4482 return (B_TRUE);
4483
4484 dva_ms_id = DVA_GET_OFFSET(dva) >> msp->ms_group->mg_vd->vdev_ms_shift;
4485
4486 return (msp->ms_id != dva_ms_id);
4487 }
4488
4489 /*
4490 * ==========================================================================
4491 * Metaslab allocation tracing facility
4492 * ==========================================================================
4493 */
4494
4495 /*
4496 * Add an allocation trace element to the allocation tracing list.
4497 */
4498 static void
4499 metaslab_trace_add(zio_alloc_list_t *zal, metaslab_group_t *mg,
4500 metaslab_t *msp, uint64_t psize, uint32_t dva_id, uint64_t offset,
4501 int allocator)
4502 {
4503 metaslab_alloc_trace_t *mat;
4504
4505 if (!metaslab_trace_enabled)
4506 return;
4507
4508 /*
4509 * When the tracing list reaches its maximum we remove
4510 * the second element in the list before adding a new one.
4511 * By removing the second element we preserve the original
4512 * entry as a clue to what allocations steps have already been
4513 * performed.
4514 */
4515 if (zal->zal_size == metaslab_trace_max_entries) {
4516 metaslab_alloc_trace_t *mat_next;
4517 #ifdef ZFS_DEBUG
4518 panic("too many entries in allocation list");
4519 #endif
4520 METASLABSTAT_BUMP(metaslabstat_trace_over_limit);
4521 zal->zal_size--;
4522 mat_next = list_next(&zal->zal_list, list_head(&zal->zal_list));
4523 list_remove(&zal->zal_list, mat_next);
4524 kmem_cache_free(metaslab_alloc_trace_cache, mat_next);
4525 }
4526
4527 mat = kmem_cache_alloc(metaslab_alloc_trace_cache, KM_SLEEP);
4528 list_link_init(&mat->mat_list_node);
4529 mat->mat_mg = mg;
4530 mat->mat_msp = msp;
4531 mat->mat_size = psize;
4532 mat->mat_dva_id = dva_id;
4533 mat->mat_offset = offset;
4534 mat->mat_weight = 0;
4535 mat->mat_allocator = allocator;
4536
4537 if (msp != NULL)
4538 mat->mat_weight = msp->ms_weight;
4539
4540 /*
4541 * The list is part of the zio so locking is not required. Only
4542 * a single thread will perform allocations for a given zio.
4543 */
4544 list_insert_tail(&zal->zal_list, mat);
4545 zal->zal_size++;
4546
4547 ASSERT3U(zal->zal_size, <=, metaslab_trace_max_entries);
4548 }
4549
4550 void
4551 metaslab_trace_init(zio_alloc_list_t *zal)
4552 {
4553 list_create(&zal->zal_list, sizeof (metaslab_alloc_trace_t),
4554 offsetof(metaslab_alloc_trace_t, mat_list_node));
4555 zal->zal_size = 0;
4556 }
4557
4558 void
4559 metaslab_trace_fini(zio_alloc_list_t *zal)
4560 {
4561 metaslab_alloc_trace_t *mat;
4562
4563 while ((mat = list_remove_head(&zal->zal_list)) != NULL)
4564 kmem_cache_free(metaslab_alloc_trace_cache, mat);
4565 list_destroy(&zal->zal_list);
4566 zal->zal_size = 0;
4567 }
4568
4569 /*
4570 * ==========================================================================
4571 * Metaslab block operations
4572 * ==========================================================================
4573 */
4574
4575 static void
4576 metaslab_group_alloc_increment(spa_t *spa, uint64_t vdev, const void *tag,
4577 int flags, int allocator)
4578 {
4579 if (!(flags & METASLAB_ASYNC_ALLOC) ||
4580 (flags & METASLAB_DONT_THROTTLE))
4581 return;
4582
4583 metaslab_group_t *mg = vdev_lookup_top(spa, vdev)->vdev_mg;
4584 if (!mg->mg_class->mc_alloc_throttle_enabled)
4585 return;
4586
4587 metaslab_group_allocator_t *mga = &mg->mg_allocator[allocator];
4588 (void) zfs_refcount_add(&mga->mga_alloc_queue_depth, tag);
4589 }
4590
4591 static void
4592 metaslab_group_increment_qdepth(metaslab_group_t *mg, int allocator)
4593 {
4594 metaslab_group_allocator_t *mga = &mg->mg_allocator[allocator];
4595 metaslab_class_allocator_t *mca =
4596 &mg->mg_class->mc_allocator[allocator];
4597 uint64_t max = mg->mg_max_alloc_queue_depth;
4598 uint64_t cur = mga->mga_cur_max_alloc_queue_depth;
4599 while (cur < max) {
4600 if (atomic_cas_64(&mga->mga_cur_max_alloc_queue_depth,
4601 cur, cur + 1) == cur) {
4602 atomic_inc_64(&mca->mca_alloc_max_slots);
4603 return;
4604 }
4605 cur = mga->mga_cur_max_alloc_queue_depth;
4606 }
4607 }
4608
4609 void
4610 metaslab_group_alloc_decrement(spa_t *spa, uint64_t vdev, const void *tag,
4611 int flags, int allocator, boolean_t io_complete)
4612 {
4613 if (!(flags & METASLAB_ASYNC_ALLOC) ||
4614 (flags & METASLAB_DONT_THROTTLE))
4615 return;
4616
4617 metaslab_group_t *mg = vdev_lookup_top(spa, vdev)->vdev_mg;
4618 if (!mg->mg_class->mc_alloc_throttle_enabled)
4619 return;
4620
4621 metaslab_group_allocator_t *mga = &mg->mg_allocator[allocator];
4622 (void) zfs_refcount_remove(&mga->mga_alloc_queue_depth, tag);
4623 if (io_complete)
4624 metaslab_group_increment_qdepth(mg, allocator);
4625 }
4626
4627 void
4628 metaslab_group_alloc_verify(spa_t *spa, const blkptr_t *bp, const void *tag,
4629 int allocator)
4630 {
4631 #ifdef ZFS_DEBUG
4632 const dva_t *dva = bp->blk_dva;
4633 int ndvas = BP_GET_NDVAS(bp);
4634
4635 for (int d = 0; d < ndvas; d++) {
4636 uint64_t vdev = DVA_GET_VDEV(&dva[d]);
4637 metaslab_group_t *mg = vdev_lookup_top(spa, vdev)->vdev_mg;
4638 metaslab_group_allocator_t *mga = &mg->mg_allocator[allocator];
4639 VERIFY(zfs_refcount_not_held(&mga->mga_alloc_queue_depth, tag));
4640 }
4641 #endif
4642 }
4643
4644 static uint64_t
4645 metaslab_block_alloc(metaslab_t *msp, uint64_t size, uint64_t txg)
4646 {
4647 uint64_t start;
4648 range_tree_t *rt = msp->ms_allocatable;
4649 metaslab_class_t *mc = msp->ms_group->mg_class;
4650
4651 ASSERT(MUTEX_HELD(&msp->ms_lock));
4652 VERIFY(!msp->ms_condensing);
4653 VERIFY0(msp->ms_disabled);
4654 VERIFY0(msp->ms_new);
4655
4656 start = mc->mc_ops->msop_alloc(msp, size);
4657 if (start != -1ULL) {
4658 metaslab_group_t *mg = msp->ms_group;
4659 vdev_t *vd = mg->mg_vd;
4660
4661 VERIFY0(P2PHASE(start, 1ULL << vd->vdev_ashift));
4662 VERIFY0(P2PHASE(size, 1ULL << vd->vdev_ashift));
4663 VERIFY3U(range_tree_space(rt) - size, <=, msp->ms_size);
4664 range_tree_remove(rt, start, size);
4665 range_tree_clear(msp->ms_trim, start, size);
4666
4667 if (range_tree_is_empty(msp->ms_allocating[txg & TXG_MASK]))
4668 vdev_dirty(mg->mg_vd, VDD_METASLAB, msp, txg);
4669
4670 range_tree_add(msp->ms_allocating[txg & TXG_MASK], start, size);
4671 msp->ms_allocating_total += size;
4672
4673 /* Track the last successful allocation */
4674 msp->ms_alloc_txg = txg;
4675 metaslab_verify_space(msp, txg);
4676 }
4677
4678 /*
4679 * Now that we've attempted the allocation we need to update the
4680 * metaslab's maximum block size since it may have changed.
4681 */
4682 msp->ms_max_size = metaslab_largest_allocatable(msp);
4683 return (start);
4684 }
4685
4686 /*
4687 * Find the metaslab with the highest weight that is less than what we've
4688 * already tried. In the common case, this means that we will examine each
4689 * metaslab at most once. Note that concurrent callers could reorder metaslabs
4690 * by activation/passivation once we have dropped the mg_lock. If a metaslab is
4691 * activated by another thread, and we fail to allocate from the metaslab we
4692 * have selected, we may not try the newly-activated metaslab, and instead
4693 * activate another metaslab. This is not optimal, but generally does not cause
4694 * any problems (a possible exception being if every metaslab is completely full
4695 * except for the newly-activated metaslab which we fail to examine).
4696 */
4697 static metaslab_t *
4698 find_valid_metaslab(metaslab_group_t *mg, uint64_t activation_weight,
4699 dva_t *dva, int d, boolean_t want_unique, uint64_t asize, int allocator,
4700 boolean_t try_hard, zio_alloc_list_t *zal, metaslab_t *search,
4701 boolean_t *was_active)
4702 {
4703 avl_index_t idx;
4704 avl_tree_t *t = &mg->mg_metaslab_tree;
4705 metaslab_t *msp = avl_find(t, search, &idx);
4706 if (msp == NULL)
4707 msp = avl_nearest(t, idx, AVL_AFTER);
4708
4709 uint_t tries = 0;
4710 for (; msp != NULL; msp = AVL_NEXT(t, msp)) {
4711 int i;
4712
4713 if (!try_hard && tries > zfs_metaslab_find_max_tries) {
4714 METASLABSTAT_BUMP(metaslabstat_too_many_tries);
4715 return (NULL);
4716 }
4717 tries++;
4718
4719 if (!metaslab_should_allocate(msp, asize, try_hard)) {
4720 metaslab_trace_add(zal, mg, msp, asize, d,
4721 TRACE_TOO_SMALL, allocator);
4722 continue;
4723 }
4724
4725 /*
4726 * If the selected metaslab is condensing or disabled, or
4727 * hasn't gone through a metaslab_sync_done(), then skip it.
4728 */
4729 if (msp->ms_condensing || msp->ms_disabled > 0 || msp->ms_new)
4730 continue;
4731
4732 *was_active = msp->ms_allocator != -1;
4733 /*
4734 * If we're activating as primary, this is our first allocation
4735 * from this disk, so we don't need to check how close we are.
4736 * If the metaslab under consideration was already active,
4737 * we're getting desperate enough to steal another allocator's
4738 * metaslab, so we still don't care about distances.
4739 */
4740 if (activation_weight == METASLAB_WEIGHT_PRIMARY || *was_active)
4741 break;
4742
4743 for (i = 0; i < d; i++) {
4744 if (want_unique &&
4745 !metaslab_is_unique(msp, &dva[i]))
4746 break; /* try another metaslab */
4747 }
4748 if (i == d)
4749 break;
4750 }
4751
4752 if (msp != NULL) {
4753 search->ms_weight = msp->ms_weight;
4754 search->ms_start = msp->ms_start + 1;
4755 search->ms_allocator = msp->ms_allocator;
4756 search->ms_primary = msp->ms_primary;
4757 }
4758 return (msp);
4759 }
4760
4761 static void
4762 metaslab_active_mask_verify(metaslab_t *msp)
4763 {
4764 ASSERT(MUTEX_HELD(&msp->ms_lock));
4765
4766 if ((zfs_flags & ZFS_DEBUG_METASLAB_VERIFY) == 0)
4767 return;
4768
4769 if ((msp->ms_weight & METASLAB_ACTIVE_MASK) == 0)
4770 return;
4771
4772 if (msp->ms_weight & METASLAB_WEIGHT_PRIMARY) {
4773 VERIFY0(msp->ms_weight & METASLAB_WEIGHT_SECONDARY);
4774 VERIFY0(msp->ms_weight & METASLAB_WEIGHT_CLAIM);
4775 VERIFY3S(msp->ms_allocator, !=, -1);
4776 VERIFY(msp->ms_primary);
4777 return;
4778 }
4779
4780 if (msp->ms_weight & METASLAB_WEIGHT_SECONDARY) {
4781 VERIFY0(msp->ms_weight & METASLAB_WEIGHT_PRIMARY);
4782 VERIFY0(msp->ms_weight & METASLAB_WEIGHT_CLAIM);
4783 VERIFY3S(msp->ms_allocator, !=, -1);
4784 VERIFY(!msp->ms_primary);
4785 return;
4786 }
4787
4788 if (msp->ms_weight & METASLAB_WEIGHT_CLAIM) {
4789 VERIFY0(msp->ms_weight & METASLAB_WEIGHT_PRIMARY);
4790 VERIFY0(msp->ms_weight & METASLAB_WEIGHT_SECONDARY);
4791 VERIFY3S(msp->ms_allocator, ==, -1);
4792 return;
4793 }
4794 }
4795
4796 static uint64_t
4797 metaslab_group_alloc_normal(metaslab_group_t *mg, zio_alloc_list_t *zal,
4798 uint64_t asize, uint64_t txg, boolean_t want_unique, dva_t *dva, int d,
4799 int allocator, boolean_t try_hard)
4800 {
4801 metaslab_t *msp = NULL;
4802 uint64_t offset = -1ULL;
4803
4804 uint64_t activation_weight = METASLAB_WEIGHT_PRIMARY;
4805 for (int i = 0; i < d; i++) {
4806 if (activation_weight == METASLAB_WEIGHT_PRIMARY &&
4807 DVA_GET_VDEV(&dva[i]) == mg->mg_vd->vdev_id) {
4808 activation_weight = METASLAB_WEIGHT_SECONDARY;
4809 } else if (activation_weight == METASLAB_WEIGHT_SECONDARY &&
4810 DVA_GET_VDEV(&dva[i]) == mg->mg_vd->vdev_id) {
4811 activation_weight = METASLAB_WEIGHT_CLAIM;
4812 break;
4813 }
4814 }
4815
4816 /*
4817 * If we don't have enough metaslabs active to fill the entire array, we
4818 * just use the 0th slot.
4819 */
4820 if (mg->mg_ms_ready < mg->mg_allocators * 3)
4821 allocator = 0;
4822 metaslab_group_allocator_t *mga = &mg->mg_allocator[allocator];
4823
4824 ASSERT3U(mg->mg_vd->vdev_ms_count, >=, 2);
4825
4826 metaslab_t *search = kmem_alloc(sizeof (*search), KM_SLEEP);
4827 search->ms_weight = UINT64_MAX;
4828 search->ms_start = 0;
4829 /*
4830 * At the end of the metaslab tree are the already-active metaslabs,
4831 * first the primaries, then the secondaries. When we resume searching
4832 * through the tree, we need to consider ms_allocator and ms_primary so
4833 * we start in the location right after where we left off, and don't
4834 * accidentally loop forever considering the same metaslabs.
4835 */
4836 search->ms_allocator = -1;
4837 search->ms_primary = B_TRUE;
4838 for (;;) {
4839 boolean_t was_active = B_FALSE;
4840
4841 mutex_enter(&mg->mg_lock);
4842
4843 if (activation_weight == METASLAB_WEIGHT_PRIMARY &&
4844 mga->mga_primary != NULL) {
4845 msp = mga->mga_primary;
4846
4847 /*
4848 * Even though we don't hold the ms_lock for the
4849 * primary metaslab, those fields should not
4850 * change while we hold the mg_lock. Thus it is
4851 * safe to make assertions on them.
4852 */
4853 ASSERT(msp->ms_primary);
4854 ASSERT3S(msp->ms_allocator, ==, allocator);
4855 ASSERT(msp->ms_loaded);
4856
4857 was_active = B_TRUE;
4858 ASSERT(msp->ms_weight & METASLAB_ACTIVE_MASK);
4859 } else if (activation_weight == METASLAB_WEIGHT_SECONDARY &&
4860 mga->mga_secondary != NULL) {
4861 msp = mga->mga_secondary;
4862
4863 /*
4864 * See comment above about the similar assertions
4865 * for the primary metaslab.
4866 */
4867 ASSERT(!msp->ms_primary);
4868 ASSERT3S(msp->ms_allocator, ==, allocator);
4869 ASSERT(msp->ms_loaded);
4870
4871 was_active = B_TRUE;
4872 ASSERT(msp->ms_weight & METASLAB_ACTIVE_MASK);
4873 } else {
4874 msp = find_valid_metaslab(mg, activation_weight, dva, d,
4875 want_unique, asize, allocator, try_hard, zal,
4876 search, &was_active);
4877 }
4878
4879 mutex_exit(&mg->mg_lock);
4880 if (msp == NULL) {
4881 kmem_free(search, sizeof (*search));
4882 return (-1ULL);
4883 }
4884 mutex_enter(&msp->ms_lock);
4885
4886 metaslab_active_mask_verify(msp);
4887
4888 /*
4889 * This code is disabled out because of issues with
4890 * tracepoints in non-gpl kernel modules.
4891 */
4892 #if 0
4893 DTRACE_PROBE3(ms__activation__attempt,
4894 metaslab_t *, msp, uint64_t, activation_weight,
4895 boolean_t, was_active);
4896 #endif
4897
4898 /*
4899 * Ensure that the metaslab we have selected is still
4900 * capable of handling our request. It's possible that
4901 * another thread may have changed the weight while we
4902 * were blocked on the metaslab lock. We check the
4903 * active status first to see if we need to set_selected_txg
4904 * a new metaslab.
4905 */
4906 if (was_active && !(msp->ms_weight & METASLAB_ACTIVE_MASK)) {
4907 ASSERT3S(msp->ms_allocator, ==, -1);
4908 mutex_exit(&msp->ms_lock);
4909 continue;
4910 }
4911
4912 /*
4913 * If the metaslab was activated for another allocator
4914 * while we were waiting in the ms_lock above, or it's
4915 * a primary and we're seeking a secondary (or vice versa),
4916 * we go back and select a new metaslab.
4917 */
4918 if (!was_active && (msp->ms_weight & METASLAB_ACTIVE_MASK) &&
4919 (msp->ms_allocator != -1) &&
4920 (msp->ms_allocator != allocator || ((activation_weight ==
4921 METASLAB_WEIGHT_PRIMARY) != msp->ms_primary))) {
4922 ASSERT(msp->ms_loaded);
4923 ASSERT((msp->ms_weight & METASLAB_WEIGHT_CLAIM) ||
4924 msp->ms_allocator != -1);
4925 mutex_exit(&msp->ms_lock);
4926 continue;
4927 }
4928
4929 /*
4930 * This metaslab was used for claiming regions allocated
4931 * by the ZIL during pool import. Once these regions are
4932 * claimed we don't need to keep the CLAIM bit set
4933 * anymore. Passivate this metaslab to zero its activation
4934 * mask.
4935 */
4936 if (msp->ms_weight & METASLAB_WEIGHT_CLAIM &&
4937 activation_weight != METASLAB_WEIGHT_CLAIM) {
4938 ASSERT(msp->ms_loaded);
4939 ASSERT3S(msp->ms_allocator, ==, -1);
4940 metaslab_passivate(msp, msp->ms_weight &
4941 ~METASLAB_WEIGHT_CLAIM);
4942 mutex_exit(&msp->ms_lock);
4943 continue;
4944 }
4945
4946 metaslab_set_selected_txg(msp, txg);
4947
4948 int activation_error =
4949 metaslab_activate(msp, allocator, activation_weight);
4950 metaslab_active_mask_verify(msp);
4951
4952 /*
4953 * If the metaslab was activated by another thread for
4954 * another allocator or activation_weight (EBUSY), or it
4955 * failed because another metaslab was assigned as primary
4956 * for this allocator (EEXIST) we continue using this
4957 * metaslab for our allocation, rather than going on to a
4958 * worse metaslab (we waited for that metaslab to be loaded
4959 * after all).
4960 *
4961 * If the activation failed due to an I/O error or ENOSPC we
4962 * skip to the next metaslab.
4963 */
4964 boolean_t activated;
4965 if (activation_error == 0) {
4966 activated = B_TRUE;
4967 } else if (activation_error == EBUSY ||
4968 activation_error == EEXIST) {
4969 activated = B_FALSE;
4970 } else {
4971 mutex_exit(&msp->ms_lock);
4972 continue;
4973 }
4974 ASSERT(msp->ms_loaded);
4975
4976 /*
4977 * Now that we have the lock, recheck to see if we should
4978 * continue to use this metaslab for this allocation. The
4979 * the metaslab is now loaded so metaslab_should_allocate()
4980 * can accurately determine if the allocation attempt should
4981 * proceed.
4982 */
4983 if (!metaslab_should_allocate(msp, asize, try_hard)) {
4984 /* Passivate this metaslab and select a new one. */
4985 metaslab_trace_add(zal, mg, msp, asize, d,
4986 TRACE_TOO_SMALL, allocator);
4987 goto next;
4988 }
4989
4990 /*
4991 * If this metaslab is currently condensing then pick again
4992 * as we can't manipulate this metaslab until it's committed
4993 * to disk. If this metaslab is being initialized, we shouldn't
4994 * allocate from it since the allocated region might be
4995 * overwritten after allocation.
4996 */
4997 if (msp->ms_condensing) {
4998 metaslab_trace_add(zal, mg, msp, asize, d,
4999 TRACE_CONDENSING, allocator);
5000 if (activated) {
5001 metaslab_passivate(msp, msp->ms_weight &
5002 ~METASLAB_ACTIVE_MASK);
5003 }
5004 mutex_exit(&msp->ms_lock);
5005 continue;
5006 } else if (msp->ms_disabled > 0) {
5007 metaslab_trace_add(zal, mg, msp, asize, d,
5008 TRACE_DISABLED, allocator);
5009 if (activated) {
5010 metaslab_passivate(msp, msp->ms_weight &
5011 ~METASLAB_ACTIVE_MASK);
5012 }
5013 mutex_exit(&msp->ms_lock);
5014 continue;
5015 }
5016
5017 offset = metaslab_block_alloc(msp, asize, txg);
5018 metaslab_trace_add(zal, mg, msp, asize, d, offset, allocator);
5019
5020 if (offset != -1ULL) {
5021 /* Proactively passivate the metaslab, if needed */
5022 if (activated)
5023 metaslab_segment_may_passivate(msp);
5024 break;
5025 }
5026 next:
5027 ASSERT(msp->ms_loaded);
5028
5029 /*
5030 * This code is disabled out because of issues with
5031 * tracepoints in non-gpl kernel modules.
5032 */
5033 #if 0
5034 DTRACE_PROBE2(ms__alloc__failure, metaslab_t *, msp,
5035 uint64_t, asize);
5036 #endif
5037
5038 /*
5039 * We were unable to allocate from this metaslab so determine
5040 * a new weight for this metaslab. Now that we have loaded
5041 * the metaslab we can provide a better hint to the metaslab
5042 * selector.
5043 *
5044 * For space-based metaslabs, we use the maximum block size.
5045 * This information is only available when the metaslab
5046 * is loaded and is more accurate than the generic free
5047 * space weight that was calculated by metaslab_weight().
5048 * This information allows us to quickly compare the maximum
5049 * available allocation in the metaslab to the allocation
5050 * size being requested.
5051 *
5052 * For segment-based metaslabs, determine the new weight
5053 * based on the highest bucket in the range tree. We
5054 * explicitly use the loaded segment weight (i.e. the range
5055 * tree histogram) since it contains the space that is
5056 * currently available for allocation and is accurate
5057 * even within a sync pass.
5058 */
5059 uint64_t weight;
5060 if (WEIGHT_IS_SPACEBASED(msp->ms_weight)) {
5061 weight = metaslab_largest_allocatable(msp);
5062 WEIGHT_SET_SPACEBASED(weight);
5063 } else {
5064 weight = metaslab_weight_from_range_tree(msp);
5065 }
5066
5067 if (activated) {
5068 metaslab_passivate(msp, weight);
5069 } else {
5070 /*
5071 * For the case where we use the metaslab that is
5072 * active for another allocator we want to make
5073 * sure that we retain the activation mask.
5074 *
5075 * Note that we could attempt to use something like
5076 * metaslab_recalculate_weight_and_sort() that
5077 * retains the activation mask here. That function
5078 * uses metaslab_weight() to set the weight though
5079 * which is not as accurate as the calculations
5080 * above.
5081 */
5082 weight |= msp->ms_weight & METASLAB_ACTIVE_MASK;
5083 metaslab_group_sort(mg, msp, weight);
5084 }
5085 metaslab_active_mask_verify(msp);
5086
5087 /*
5088 * We have just failed an allocation attempt, check
5089 * that metaslab_should_allocate() agrees. Otherwise,
5090 * we may end up in an infinite loop retrying the same
5091 * metaslab.
5092 */
5093 ASSERT(!metaslab_should_allocate(msp, asize, try_hard));
5094
5095 mutex_exit(&msp->ms_lock);
5096 }
5097 mutex_exit(&msp->ms_lock);
5098 kmem_free(search, sizeof (*search));
5099 return (offset);
5100 }
5101
5102 static uint64_t
5103 metaslab_group_alloc(metaslab_group_t *mg, zio_alloc_list_t *zal,
5104 uint64_t asize, uint64_t txg, boolean_t want_unique, dva_t *dva, int d,
5105 int allocator, boolean_t try_hard)
5106 {
5107 uint64_t offset;
5108 ASSERT(mg->mg_initialized);
5109
5110 offset = metaslab_group_alloc_normal(mg, zal, asize, txg, want_unique,
5111 dva, d, allocator, try_hard);
5112
5113 mutex_enter(&mg->mg_lock);
5114 if (offset == -1ULL) {
5115 mg->mg_failed_allocations++;
5116 metaslab_trace_add(zal, mg, NULL, asize, d,
5117 TRACE_GROUP_FAILURE, allocator);
5118 if (asize == SPA_GANGBLOCKSIZE) {
5119 /*
5120 * This metaslab group was unable to allocate
5121 * the minimum gang block size so it must be out of
5122 * space. We must notify the allocation throttle
5123 * to start skipping allocation attempts to this
5124 * metaslab group until more space becomes available.
5125 * Note: this failure cannot be caused by the
5126 * allocation throttle since the allocation throttle
5127 * is only responsible for skipping devices and
5128 * not failing block allocations.
5129 */
5130 mg->mg_no_free_space = B_TRUE;
5131 }
5132 }
5133 mg->mg_allocations++;
5134 mutex_exit(&mg->mg_lock);
5135 return (offset);
5136 }
5137
5138 /*
5139 * Allocate a block for the specified i/o.
5140 */
5141 int
5142 metaslab_alloc_dva(spa_t *spa, metaslab_class_t *mc, uint64_t psize,
5143 dva_t *dva, int d, dva_t *hintdva, uint64_t txg, int flags,
5144 zio_alloc_list_t *zal, int allocator)
5145 {
5146 metaslab_class_allocator_t *mca = &mc->mc_allocator[allocator];
5147 metaslab_group_t *mg, *rotor;
5148 vdev_t *vd;
5149 boolean_t try_hard = B_FALSE;
5150
5151 ASSERT(!DVA_IS_VALID(&dva[d]));
5152
5153 /*
5154 * For testing, make some blocks above a certain size be gang blocks.
5155 * This will result in more split blocks when using device removal,
5156 * and a large number of split blocks coupled with ztest-induced
5157 * damage can result in extremely long reconstruction times. This
5158 * will also test spilling from special to normal.
5159 */
5160 if (psize >= metaslab_force_ganging &&
5161 metaslab_force_ganging_pct > 0 &&
5162 (random_in_range(100) < MIN(metaslab_force_ganging_pct, 100))) {
5163 metaslab_trace_add(zal, NULL, NULL, psize, d, TRACE_FORCE_GANG,
5164 allocator);
5165 return (SET_ERROR(ENOSPC));
5166 }
5167
5168 /*
5169 * Start at the rotor and loop through all mgs until we find something.
5170 * Note that there's no locking on mca_rotor or mca_aliquot because
5171 * nothing actually breaks if we miss a few updates -- we just won't
5172 * allocate quite as evenly. It all balances out over time.
5173 *
5174 * If we are doing ditto or log blocks, try to spread them across
5175 * consecutive vdevs. If we're forced to reuse a vdev before we've
5176 * allocated all of our ditto blocks, then try and spread them out on
5177 * that vdev as much as possible. If it turns out to not be possible,
5178 * gradually lower our standards until anything becomes acceptable.
5179 * Also, allocating on consecutive vdevs (as opposed to random vdevs)
5180 * gives us hope of containing our fault domains to something we're
5181 * able to reason about. Otherwise, any two top-level vdev failures
5182 * will guarantee the loss of data. With consecutive allocation,
5183 * only two adjacent top-level vdev failures will result in data loss.
5184 *
5185 * If we are doing gang blocks (hintdva is non-NULL), try to keep
5186 * ourselves on the same vdev as our gang block header. That
5187 * way, we can hope for locality in vdev_cache, plus it makes our
5188 * fault domains something tractable.
5189 */
5190 if (hintdva) {
5191 vd = vdev_lookup_top(spa, DVA_GET_VDEV(&hintdva[d]));
5192
5193 /*
5194 * It's possible the vdev we're using as the hint no
5195 * longer exists or its mg has been closed (e.g. by
5196 * device removal). Consult the rotor when
5197 * all else fails.
5198 */
5199 if (vd != NULL && vd->vdev_mg != NULL) {
5200 mg = vdev_get_mg(vd, mc);
5201
5202 if (flags & METASLAB_HINTBP_AVOID)
5203 mg = mg->mg_next;
5204 } else {
5205 mg = mca->mca_rotor;
5206 }
5207 } else if (d != 0) {
5208 vd = vdev_lookup_top(spa, DVA_GET_VDEV(&dva[d - 1]));
5209 mg = vd->vdev_mg->mg_next;
5210 } else {
5211 ASSERT(mca->mca_rotor != NULL);
5212 mg = mca->mca_rotor;
5213 }
5214
5215 /*
5216 * If the hint put us into the wrong metaslab class, or into a
5217 * metaslab group that has been passivated, just follow the rotor.
5218 */
5219 if (mg->mg_class != mc || mg->mg_activation_count <= 0)
5220 mg = mca->mca_rotor;
5221
5222 rotor = mg;
5223 top:
5224 do {
5225 boolean_t allocatable;
5226
5227 ASSERT(mg->mg_activation_count == 1);
5228 vd = mg->mg_vd;
5229
5230 /*
5231 * Don't allocate from faulted devices.
5232 */
5233 if (try_hard) {
5234 spa_config_enter(spa, SCL_ZIO, FTAG, RW_READER);
5235 allocatable = vdev_allocatable(vd);
5236 spa_config_exit(spa, SCL_ZIO, FTAG);
5237 } else {
5238 allocatable = vdev_allocatable(vd);
5239 }
5240
5241 /*
5242 * Determine if the selected metaslab group is eligible
5243 * for allocations. If we're ganging then don't allow
5244 * this metaslab group to skip allocations since that would
5245 * inadvertently return ENOSPC and suspend the pool
5246 * even though space is still available.
5247 */
5248 if (allocatable && !GANG_ALLOCATION(flags) && !try_hard) {
5249 allocatable = metaslab_group_allocatable(mg, rotor,
5250 flags, psize, allocator, d);
5251 }
5252
5253 if (!allocatable) {
5254 metaslab_trace_add(zal, mg, NULL, psize, d,
5255 TRACE_NOT_ALLOCATABLE, allocator);
5256 goto next;
5257 }
5258
5259 ASSERT(mg->mg_initialized);
5260
5261 /*
5262 * Avoid writing single-copy data to an unhealthy,
5263 * non-redundant vdev, unless we've already tried all
5264 * other vdevs.
5265 */
5266 if (vd->vdev_state < VDEV_STATE_HEALTHY &&
5267 d == 0 && !try_hard && vd->vdev_children == 0) {
5268 metaslab_trace_add(zal, mg, NULL, psize, d,
5269 TRACE_VDEV_ERROR, allocator);
5270 goto next;
5271 }
5272
5273 ASSERT(mg->mg_class == mc);
5274
5275 uint64_t asize = vdev_psize_to_asize_txg(vd, psize, txg);
5276 ASSERT(P2PHASE(asize, 1ULL << vd->vdev_ashift) == 0);
5277
5278 /*
5279 * If we don't need to try hard, then require that the
5280 * block be on a different metaslab from any other DVAs
5281 * in this BP (unique=true). If we are trying hard, then
5282 * allow any metaslab to be used (unique=false).
5283 */
5284 uint64_t offset = metaslab_group_alloc(mg, zal, asize, txg,
5285 !try_hard, dva, d, allocator, try_hard);
5286
5287 if (offset != -1ULL) {
5288 /*
5289 * If we've just selected this metaslab group,
5290 * figure out whether the corresponding vdev is
5291 * over- or under-used relative to the pool,
5292 * and set an allocation bias to even it out.
5293 *
5294 * Bias is also used to compensate for unequally
5295 * sized vdevs so that space is allocated fairly.
5296 */
5297 if (mca->mca_aliquot == 0 && metaslab_bias_enabled) {
5298 vdev_stat_t *vs = &vd->vdev_stat;
5299 int64_t vs_free = vs->vs_space - vs->vs_alloc;
5300 int64_t mc_free = mc->mc_space - mc->mc_alloc;
5301 int64_t ratio;
5302
5303 /*
5304 * Calculate how much more or less we should
5305 * try to allocate from this device during
5306 * this iteration around the rotor.
5307 *
5308 * This basically introduces a zero-centered
5309 * bias towards the devices with the most
5310 * free space, while compensating for vdev
5311 * size differences.
5312 *
5313 * Examples:
5314 * vdev V1 = 16M/128M
5315 * vdev V2 = 16M/128M
5316 * ratio(V1) = 100% ratio(V2) = 100%
5317 *
5318 * vdev V1 = 16M/128M
5319 * vdev V2 = 64M/128M
5320 * ratio(V1) = 127% ratio(V2) = 72%
5321 *
5322 * vdev V1 = 16M/128M
5323 * vdev V2 = 64M/512M
5324 * ratio(V1) = 40% ratio(V2) = 160%
5325 */
5326 ratio = (vs_free * mc->mc_alloc_groups * 100) /
5327 (mc_free + 1);
5328 mg->mg_bias = ((ratio - 100) *
5329 (int64_t)mg->mg_aliquot) / 100;
5330 } else if (!metaslab_bias_enabled) {
5331 mg->mg_bias = 0;
5332 }
5333
5334 if ((flags & METASLAB_ZIL) ||
5335 atomic_add_64_nv(&mca->mca_aliquot, asize) >=
5336 mg->mg_aliquot + mg->mg_bias) {
5337 mca->mca_rotor = mg->mg_next;
5338 mca->mca_aliquot = 0;
5339 }
5340
5341 DVA_SET_VDEV(&dva[d], vd->vdev_id);
5342 DVA_SET_OFFSET(&dva[d], offset);
5343 DVA_SET_GANG(&dva[d],
5344 ((flags & METASLAB_GANG_HEADER) ? 1 : 0));
5345 DVA_SET_ASIZE(&dva[d], asize);
5346
5347 return (0);
5348 }
5349 next:
5350 mca->mca_rotor = mg->mg_next;
5351 mca->mca_aliquot = 0;
5352 } while ((mg = mg->mg_next) != rotor);
5353
5354 /*
5355 * If we haven't tried hard, perhaps do so now.
5356 */
5357 if (!try_hard && (zfs_metaslab_try_hard_before_gang ||
5358 GANG_ALLOCATION(flags) || (flags & METASLAB_ZIL) != 0 ||
5359 psize <= 1 << spa->spa_min_ashift)) {
5360 METASLABSTAT_BUMP(metaslabstat_try_hard);
5361 try_hard = B_TRUE;
5362 goto top;
5363 }
5364
5365 memset(&dva[d], 0, sizeof (dva_t));
5366
5367 metaslab_trace_add(zal, rotor, NULL, psize, d, TRACE_ENOSPC, allocator);
5368 return (SET_ERROR(ENOSPC));
5369 }
5370
5371 void
5372 metaslab_free_concrete(vdev_t *vd, uint64_t offset, uint64_t asize,
5373 boolean_t checkpoint)
5374 {
5375 metaslab_t *msp;
5376 spa_t *spa = vd->vdev_spa;
5377
5378 ASSERT(vdev_is_concrete(vd));
5379 ASSERT3U(spa_config_held(spa, SCL_ALL, RW_READER), !=, 0);
5380 ASSERT3U(offset >> vd->vdev_ms_shift, <, vd->vdev_ms_count);
5381
5382 msp = vd->vdev_ms[offset >> vd->vdev_ms_shift];
5383
5384 VERIFY(!msp->ms_condensing);
5385 VERIFY3U(offset, >=, msp->ms_start);
5386 VERIFY3U(offset + asize, <=, msp->ms_start + msp->ms_size);
5387 VERIFY0(P2PHASE(offset, 1ULL << vd->vdev_ashift));
5388 VERIFY0(P2PHASE(asize, 1ULL << vd->vdev_ashift));
5389
5390 metaslab_check_free_impl(vd, offset, asize);
5391
5392 mutex_enter(&msp->ms_lock);
5393 if (range_tree_is_empty(msp->ms_freeing) &&
5394 range_tree_is_empty(msp->ms_checkpointing)) {
5395 vdev_dirty(vd, VDD_METASLAB, msp, spa_syncing_txg(spa));
5396 }
5397
5398 if (checkpoint) {
5399 ASSERT(spa_has_checkpoint(spa));
5400 range_tree_add(msp->ms_checkpointing, offset, asize);
5401 } else {
5402 range_tree_add(msp->ms_freeing, offset, asize);
5403 }
5404 mutex_exit(&msp->ms_lock);
5405 }
5406
5407 void
5408 metaslab_free_impl_cb(uint64_t inner_offset, vdev_t *vd, uint64_t offset,
5409 uint64_t size, void *arg)
5410 {
5411 (void) inner_offset;
5412 boolean_t *checkpoint = arg;
5413
5414 ASSERT3P(checkpoint, !=, NULL);
5415
5416 if (vd->vdev_ops->vdev_op_remap != NULL)
5417 vdev_indirect_mark_obsolete(vd, offset, size);
5418 else
5419 metaslab_free_impl(vd, offset, size, *checkpoint);
5420 }
5421
5422 static void
5423 metaslab_free_impl(vdev_t *vd, uint64_t offset, uint64_t size,
5424 boolean_t checkpoint)
5425 {
5426 spa_t *spa = vd->vdev_spa;
5427
5428 ASSERT3U(spa_config_held(spa, SCL_ALL, RW_READER), !=, 0);
5429
5430 if (spa_syncing_txg(spa) > spa_freeze_txg(spa))
5431 return;
5432
5433 if (spa->spa_vdev_removal != NULL &&
5434 spa->spa_vdev_removal->svr_vdev_id == vd->vdev_id &&
5435 vdev_is_concrete(vd)) {
5436 /*
5437 * Note: we check if the vdev is concrete because when
5438 * we complete the removal, we first change the vdev to be
5439 * an indirect vdev (in open context), and then (in syncing
5440 * context) clear spa_vdev_removal.
5441 */
5442 free_from_removing_vdev(vd, offset, size);
5443 } else if (vd->vdev_ops->vdev_op_remap != NULL) {
5444 vdev_indirect_mark_obsolete(vd, offset, size);
5445 vd->vdev_ops->vdev_op_remap(vd, offset, size,
5446 metaslab_free_impl_cb, &checkpoint);
5447 } else {
5448 metaslab_free_concrete(vd, offset, size, checkpoint);
5449 }
5450 }
5451
5452 typedef struct remap_blkptr_cb_arg {
5453 blkptr_t *rbca_bp;
5454 spa_remap_cb_t rbca_cb;
5455 vdev_t *rbca_remap_vd;
5456 uint64_t rbca_remap_offset;
5457 void *rbca_cb_arg;
5458 } remap_blkptr_cb_arg_t;
5459
5460 static void
5461 remap_blkptr_cb(uint64_t inner_offset, vdev_t *vd, uint64_t offset,
5462 uint64_t size, void *arg)
5463 {
5464 remap_blkptr_cb_arg_t *rbca = arg;
5465 blkptr_t *bp = rbca->rbca_bp;
5466
5467 /* We can not remap split blocks. */
5468 if (size != DVA_GET_ASIZE(&bp->blk_dva[0]))
5469 return;
5470 ASSERT0(inner_offset);
5471
5472 if (rbca->rbca_cb != NULL) {
5473 /*
5474 * At this point we know that we are not handling split
5475 * blocks and we invoke the callback on the previous
5476 * vdev which must be indirect.
5477 */
5478 ASSERT3P(rbca->rbca_remap_vd->vdev_ops, ==, &vdev_indirect_ops);
5479
5480 rbca->rbca_cb(rbca->rbca_remap_vd->vdev_id,
5481 rbca->rbca_remap_offset, size, rbca->rbca_cb_arg);
5482
5483 /* set up remap_blkptr_cb_arg for the next call */
5484 rbca->rbca_remap_vd = vd;
5485 rbca->rbca_remap_offset = offset;
5486 }
5487
5488 /*
5489 * The phys birth time is that of dva[0]. This ensures that we know
5490 * when each dva was written, so that resilver can determine which
5491 * blocks need to be scrubbed (i.e. those written during the time
5492 * the vdev was offline). It also ensures that the key used in
5493 * the ARC hash table is unique (i.e. dva[0] + phys_birth). If
5494 * we didn't change the phys_birth, a lookup in the ARC for a
5495 * remapped BP could find the data that was previously stored at
5496 * this vdev + offset.
5497 */
5498 vdev_t *oldvd = vdev_lookup_top(vd->vdev_spa,
5499 DVA_GET_VDEV(&bp->blk_dva[0]));
5500 vdev_indirect_births_t *vib = oldvd->vdev_indirect_births;
5501 bp->blk_phys_birth = vdev_indirect_births_physbirth(vib,
5502 DVA_GET_OFFSET(&bp->blk_dva[0]), DVA_GET_ASIZE(&bp->blk_dva[0]));
5503
5504 DVA_SET_VDEV(&bp->blk_dva[0], vd->vdev_id);
5505 DVA_SET_OFFSET(&bp->blk_dva[0], offset);
5506 }
5507
5508 /*
5509 * If the block pointer contains any indirect DVAs, modify them to refer to
5510 * concrete DVAs. Note that this will sometimes not be possible, leaving
5511 * the indirect DVA in place. This happens if the indirect DVA spans multiple
5512 * segments in the mapping (i.e. it is a "split block").
5513 *
5514 * If the BP was remapped, calls the callback on the original dva (note the
5515 * callback can be called multiple times if the original indirect DVA refers
5516 * to another indirect DVA, etc).
5517 *
5518 * Returns TRUE if the BP was remapped.
5519 */
5520 boolean_t
5521 spa_remap_blkptr(spa_t *spa, blkptr_t *bp, spa_remap_cb_t callback, void *arg)
5522 {
5523 remap_blkptr_cb_arg_t rbca;
5524
5525 if (!zfs_remap_blkptr_enable)
5526 return (B_FALSE);
5527
5528 if (!spa_feature_is_enabled(spa, SPA_FEATURE_OBSOLETE_COUNTS))
5529 return (B_FALSE);
5530
5531 /*
5532 * Dedup BP's can not be remapped, because ddt_phys_select() depends
5533 * on DVA[0] being the same in the BP as in the DDT (dedup table).
5534 */
5535 if (BP_GET_DEDUP(bp))
5536 return (B_FALSE);
5537
5538 /*
5539 * Gang blocks can not be remapped, because
5540 * zio_checksum_gang_verifier() depends on the DVA[0] that's in
5541 * the BP used to read the gang block header (GBH) being the same
5542 * as the DVA[0] that we allocated for the GBH.
5543 */
5544 if (BP_IS_GANG(bp))
5545 return (B_FALSE);
5546
5547 /*
5548 * Embedded BP's have no DVA to remap.
5549 */
5550 if (BP_GET_NDVAS(bp) < 1)
5551 return (B_FALSE);
5552
5553 /*
5554 * Note: we only remap dva[0]. If we remapped other dvas, we
5555 * would no longer know what their phys birth txg is.
5556 */
5557 dva_t *dva = &bp->blk_dva[0];
5558
5559 uint64_t offset = DVA_GET_OFFSET(dva);
5560 uint64_t size = DVA_GET_ASIZE(dva);
5561 vdev_t *vd = vdev_lookup_top(spa, DVA_GET_VDEV(dva));
5562
5563 if (vd->vdev_ops->vdev_op_remap == NULL)
5564 return (B_FALSE);
5565
5566 rbca.rbca_bp = bp;
5567 rbca.rbca_cb = callback;
5568 rbca.rbca_remap_vd = vd;
5569 rbca.rbca_remap_offset = offset;
5570 rbca.rbca_cb_arg = arg;
5571
5572 /*
5573 * remap_blkptr_cb() will be called in order for each level of
5574 * indirection, until a concrete vdev is reached or a split block is
5575 * encountered. old_vd and old_offset are updated within the callback
5576 * as we go from the one indirect vdev to the next one (either concrete
5577 * or indirect again) in that order.
5578 */
5579 vd->vdev_ops->vdev_op_remap(vd, offset, size, remap_blkptr_cb, &rbca);
5580
5581 /* Check if the DVA wasn't remapped because it is a split block */
5582 if (DVA_GET_VDEV(&rbca.rbca_bp->blk_dva[0]) == vd->vdev_id)
5583 return (B_FALSE);
5584
5585 return (B_TRUE);
5586 }
5587
5588 /*
5589 * Undo the allocation of a DVA which happened in the given transaction group.
5590 */
5591 void
5592 metaslab_unalloc_dva(spa_t *spa, const dva_t *dva, uint64_t txg)
5593 {
5594 metaslab_t *msp;
5595 vdev_t *vd;
5596 uint64_t vdev = DVA_GET_VDEV(dva);
5597 uint64_t offset = DVA_GET_OFFSET(dva);
5598 uint64_t size = DVA_GET_ASIZE(dva);
5599
5600 ASSERT(DVA_IS_VALID(dva));
5601 ASSERT3U(spa_config_held(spa, SCL_ALL, RW_READER), !=, 0);
5602
5603 if (txg > spa_freeze_txg(spa))
5604 return;
5605
5606 if ((vd = vdev_lookup_top(spa, vdev)) == NULL || !DVA_IS_VALID(dva) ||
5607 (offset >> vd->vdev_ms_shift) >= vd->vdev_ms_count) {
5608 zfs_panic_recover("metaslab_free_dva(): bad DVA %llu:%llu:%llu",
5609 (u_longlong_t)vdev, (u_longlong_t)offset,
5610 (u_longlong_t)size);
5611 return;
5612 }
5613
5614 ASSERT(!vd->vdev_removing);
5615 ASSERT(vdev_is_concrete(vd));
5616 ASSERT0(vd->vdev_indirect_config.vic_mapping_object);
5617 ASSERT3P(vd->vdev_indirect_mapping, ==, NULL);
5618
5619 if (DVA_GET_GANG(dva))
5620 size = vdev_gang_header_asize(vd);
5621
5622 msp = vd->vdev_ms[offset >> vd->vdev_ms_shift];
5623
5624 mutex_enter(&msp->ms_lock);
5625 range_tree_remove(msp->ms_allocating[txg & TXG_MASK],
5626 offset, size);
5627 msp->ms_allocating_total -= size;
5628
5629 VERIFY(!msp->ms_condensing);
5630 VERIFY3U(offset, >=, msp->ms_start);
5631 VERIFY3U(offset + size, <=, msp->ms_start + msp->ms_size);
5632 VERIFY3U(range_tree_space(msp->ms_allocatable) + size, <=,
5633 msp->ms_size);
5634 VERIFY0(P2PHASE(offset, 1ULL << vd->vdev_ashift));
5635 VERIFY0(P2PHASE(size, 1ULL << vd->vdev_ashift));
5636 range_tree_add(msp->ms_allocatable, offset, size);
5637 mutex_exit(&msp->ms_lock);
5638 }
5639
5640 /*
5641 * Free the block represented by the given DVA.
5642 */
5643 void
5644 metaslab_free_dva(spa_t *spa, const dva_t *dva, boolean_t checkpoint)
5645 {
5646 uint64_t vdev = DVA_GET_VDEV(dva);
5647 uint64_t offset = DVA_GET_OFFSET(dva);
5648 uint64_t size = DVA_GET_ASIZE(dva);
5649 vdev_t *vd = vdev_lookup_top(spa, vdev);
5650
5651 ASSERT(DVA_IS_VALID(dva));
5652 ASSERT3U(spa_config_held(spa, SCL_ALL, RW_READER), !=, 0);
5653
5654 if (DVA_GET_GANG(dva)) {
5655 size = vdev_gang_header_asize(vd);
5656 }
5657
5658 metaslab_free_impl(vd, offset, size, checkpoint);
5659 }
5660
5661 /*
5662 * Reserve some allocation slots. The reservation system must be called
5663 * before we call into the allocator. If there aren't any available slots
5664 * then the I/O will be throttled until an I/O completes and its slots are
5665 * freed up. The function returns true if it was successful in placing
5666 * the reservation.
5667 */
5668 boolean_t
5669 metaslab_class_throttle_reserve(metaslab_class_t *mc, int slots, int allocator,
5670 zio_t *zio, int flags)
5671 {
5672 metaslab_class_allocator_t *mca = &mc->mc_allocator[allocator];
5673 uint64_t max = mca->mca_alloc_max_slots;
5674
5675 ASSERT(mc->mc_alloc_throttle_enabled);
5676 if (GANG_ALLOCATION(flags) || (flags & METASLAB_MUST_RESERVE) ||
5677 zfs_refcount_count(&mca->mca_alloc_slots) + slots <= max) {
5678 /*
5679 * The potential race between _count() and _add() is covered
5680 * by the allocator lock in most cases, or irrelevant due to
5681 * GANG_ALLOCATION() or METASLAB_MUST_RESERVE set in others.
5682 * But even if we assume some other non-existing scenario, the
5683 * worst that can happen is few more I/Os get to allocation
5684 * earlier, that is not a problem.
5685 *
5686 * We reserve the slots individually so that we can unreserve
5687 * them individually when an I/O completes.
5688 */
5689 zfs_refcount_add_few(&mca->mca_alloc_slots, slots, zio);
5690 zio->io_flags |= ZIO_FLAG_IO_ALLOCATING;
5691 return (B_TRUE);
5692 }
5693 return (B_FALSE);
5694 }
5695
5696 void
5697 metaslab_class_throttle_unreserve(metaslab_class_t *mc, int slots,
5698 int allocator, zio_t *zio)
5699 {
5700 metaslab_class_allocator_t *mca = &mc->mc_allocator[allocator];
5701
5702 ASSERT(mc->mc_alloc_throttle_enabled);
5703 zfs_refcount_remove_few(&mca->mca_alloc_slots, slots, zio);
5704 }
5705
5706 static int
5707 metaslab_claim_concrete(vdev_t *vd, uint64_t offset, uint64_t size,
5708 uint64_t txg)
5709 {
5710 metaslab_t *msp;
5711 spa_t *spa = vd->vdev_spa;
5712 int error = 0;
5713
5714 if (offset >> vd->vdev_ms_shift >= vd->vdev_ms_count)
5715 return (SET_ERROR(ENXIO));
5716
5717 ASSERT3P(vd->vdev_ms, !=, NULL);
5718 msp = vd->vdev_ms[offset >> vd->vdev_ms_shift];
5719
5720 mutex_enter(&msp->ms_lock);
5721
5722 if ((txg != 0 && spa_writeable(spa)) || !msp->ms_loaded) {
5723 error = metaslab_activate(msp, 0, METASLAB_WEIGHT_CLAIM);
5724 if (error == EBUSY) {
5725 ASSERT(msp->ms_loaded);
5726 ASSERT(msp->ms_weight & METASLAB_ACTIVE_MASK);
5727 error = 0;
5728 }
5729 }
5730
5731 if (error == 0 &&
5732 !range_tree_contains(msp->ms_allocatable, offset, size))
5733 error = SET_ERROR(ENOENT);
5734
5735 if (error || txg == 0) { /* txg == 0 indicates dry run */
5736 mutex_exit(&msp->ms_lock);
5737 return (error);
5738 }
5739
5740 VERIFY(!msp->ms_condensing);
5741 VERIFY0(P2PHASE(offset, 1ULL << vd->vdev_ashift));
5742 VERIFY0(P2PHASE(size, 1ULL << vd->vdev_ashift));
5743 VERIFY3U(range_tree_space(msp->ms_allocatable) - size, <=,
5744 msp->ms_size);
5745 range_tree_remove(msp->ms_allocatable, offset, size);
5746 range_tree_clear(msp->ms_trim, offset, size);
5747
5748 if (spa_writeable(spa)) { /* don't dirty if we're zdb(8) */
5749 metaslab_class_t *mc = msp->ms_group->mg_class;
5750 multilist_sublist_t *mls =
5751 multilist_sublist_lock_obj(&mc->mc_metaslab_txg_list, msp);
5752 if (!multilist_link_active(&msp->ms_class_txg_node)) {
5753 msp->ms_selected_txg = txg;
5754 multilist_sublist_insert_head(mls, msp);
5755 }
5756 multilist_sublist_unlock(mls);
5757
5758 if (range_tree_is_empty(msp->ms_allocating[txg & TXG_MASK]))
5759 vdev_dirty(vd, VDD_METASLAB, msp, txg);
5760 range_tree_add(msp->ms_allocating[txg & TXG_MASK],
5761 offset, size);
5762 msp->ms_allocating_total += size;
5763 }
5764
5765 mutex_exit(&msp->ms_lock);
5766
5767 return (0);
5768 }
5769
5770 typedef struct metaslab_claim_cb_arg_t {
5771 uint64_t mcca_txg;
5772 int mcca_error;
5773 } metaslab_claim_cb_arg_t;
5774
5775 static void
5776 metaslab_claim_impl_cb(uint64_t inner_offset, vdev_t *vd, uint64_t offset,
5777 uint64_t size, void *arg)
5778 {
5779 (void) inner_offset;
5780 metaslab_claim_cb_arg_t *mcca_arg = arg;
5781
5782 if (mcca_arg->mcca_error == 0) {
5783 mcca_arg->mcca_error = metaslab_claim_concrete(vd, offset,
5784 size, mcca_arg->mcca_txg);
5785 }
5786 }
5787
5788 int
5789 metaslab_claim_impl(vdev_t *vd, uint64_t offset, uint64_t size, uint64_t txg)
5790 {
5791 if (vd->vdev_ops->vdev_op_remap != NULL) {
5792 metaslab_claim_cb_arg_t arg;
5793
5794 /*
5795 * Only zdb(8) can claim on indirect vdevs. This is used
5796 * to detect leaks of mapped space (that are not accounted
5797 * for in the obsolete counts, spacemap, or bpobj).
5798 */
5799 ASSERT(!spa_writeable(vd->vdev_spa));
5800 arg.mcca_error = 0;
5801 arg.mcca_txg = txg;
5802
5803 vd->vdev_ops->vdev_op_remap(vd, offset, size,
5804 metaslab_claim_impl_cb, &arg);
5805
5806 if (arg.mcca_error == 0) {
5807 arg.mcca_error = metaslab_claim_concrete(vd,
5808 offset, size, txg);
5809 }
5810 return (arg.mcca_error);
5811 } else {
5812 return (metaslab_claim_concrete(vd, offset, size, txg));
5813 }
5814 }
5815
5816 /*
5817 * Intent log support: upon opening the pool after a crash, notify the SPA
5818 * of blocks that the intent log has allocated for immediate write, but
5819 * which are still considered free by the SPA because the last transaction
5820 * group didn't commit yet.
5821 */
5822 static int
5823 metaslab_claim_dva(spa_t *spa, const dva_t *dva, uint64_t txg)
5824 {
5825 uint64_t vdev = DVA_GET_VDEV(dva);
5826 uint64_t offset = DVA_GET_OFFSET(dva);
5827 uint64_t size = DVA_GET_ASIZE(dva);
5828 vdev_t *vd;
5829
5830 if ((vd = vdev_lookup_top(spa, vdev)) == NULL) {
5831 return (SET_ERROR(ENXIO));
5832 }
5833
5834 ASSERT(DVA_IS_VALID(dva));
5835
5836 if (DVA_GET_GANG(dva))
5837 size = vdev_gang_header_asize(vd);
5838
5839 return (metaslab_claim_impl(vd, offset, size, txg));
5840 }
5841
5842 int
5843 metaslab_alloc(spa_t *spa, metaslab_class_t *mc, uint64_t psize, blkptr_t *bp,
5844 int ndvas, uint64_t txg, blkptr_t *hintbp, int flags,
5845 zio_alloc_list_t *zal, zio_t *zio, int allocator)
5846 {
5847 dva_t *dva = bp->blk_dva;
5848 dva_t *hintdva = (hintbp != NULL) ? hintbp->blk_dva : NULL;
5849 int error = 0;
5850
5851 ASSERT(bp->blk_birth == 0);
5852 ASSERT(BP_PHYSICAL_BIRTH(bp) == 0);
5853
5854 spa_config_enter(spa, SCL_ALLOC, FTAG, RW_READER);
5855
5856 if (mc->mc_allocator[allocator].mca_rotor == NULL) {
5857 /* no vdevs in this class */
5858 spa_config_exit(spa, SCL_ALLOC, FTAG);
5859 return (SET_ERROR(ENOSPC));
5860 }
5861
5862 ASSERT(ndvas > 0 && ndvas <= spa_max_replication(spa));
5863 ASSERT(BP_GET_NDVAS(bp) == 0);
5864 ASSERT(hintbp == NULL || ndvas <= BP_GET_NDVAS(hintbp));
5865 ASSERT3P(zal, !=, NULL);
5866
5867 for (int d = 0; d < ndvas; d++) {
5868 error = metaslab_alloc_dva(spa, mc, psize, dva, d, hintdva,
5869 txg, flags, zal, allocator);
5870 if (error != 0) {
5871 for (d--; d >= 0; d--) {
5872 metaslab_unalloc_dva(spa, &dva[d], txg);
5873 metaslab_group_alloc_decrement(spa,
5874 DVA_GET_VDEV(&dva[d]), zio, flags,
5875 allocator, B_FALSE);
5876 memset(&dva[d], 0, sizeof (dva_t));
5877 }
5878 spa_config_exit(spa, SCL_ALLOC, FTAG);
5879 return (error);
5880 } else {
5881 /*
5882 * Update the metaslab group's queue depth
5883 * based on the newly allocated dva.
5884 */
5885 metaslab_group_alloc_increment(spa,
5886 DVA_GET_VDEV(&dva[d]), zio, flags, allocator);
5887 }
5888 }
5889 ASSERT(error == 0);
5890 ASSERT(BP_GET_NDVAS(bp) == ndvas);
5891
5892 spa_config_exit(spa, SCL_ALLOC, FTAG);
5893
5894 BP_SET_BIRTH(bp, txg, 0);
5895
5896 return (0);
5897 }
5898
5899 void
5900 metaslab_free(spa_t *spa, const blkptr_t *bp, uint64_t txg, boolean_t now)
5901 {
5902 const dva_t *dva = bp->blk_dva;
5903 int ndvas = BP_GET_NDVAS(bp);
5904
5905 ASSERT(!BP_IS_HOLE(bp));
5906 ASSERT(!now || bp->blk_birth >= spa_syncing_txg(spa));
5907
5908 /*
5909 * If we have a checkpoint for the pool we need to make sure that
5910 * the blocks that we free that are part of the checkpoint won't be
5911 * reused until the checkpoint is discarded or we revert to it.
5912 *
5913 * The checkpoint flag is passed down the metaslab_free code path
5914 * and is set whenever we want to add a block to the checkpoint's
5915 * accounting. That is, we "checkpoint" blocks that existed at the
5916 * time the checkpoint was created and are therefore referenced by
5917 * the checkpointed uberblock.
5918 *
5919 * Note that, we don't checkpoint any blocks if the current
5920 * syncing txg <= spa_checkpoint_txg. We want these frees to sync
5921 * normally as they will be referenced by the checkpointed uberblock.
5922 */
5923 boolean_t checkpoint = B_FALSE;
5924 if (bp->blk_birth <= spa->spa_checkpoint_txg &&
5925 spa_syncing_txg(spa) > spa->spa_checkpoint_txg) {
5926 /*
5927 * At this point, if the block is part of the checkpoint
5928 * there is no way it was created in the current txg.
5929 */
5930 ASSERT(!now);
5931 ASSERT3U(spa_syncing_txg(spa), ==, txg);
5932 checkpoint = B_TRUE;
5933 }
5934
5935 spa_config_enter(spa, SCL_FREE, FTAG, RW_READER);
5936
5937 for (int d = 0; d < ndvas; d++) {
5938 if (now) {
5939 metaslab_unalloc_dva(spa, &dva[d], txg);
5940 } else {
5941 ASSERT3U(txg, ==, spa_syncing_txg(spa));
5942 metaslab_free_dva(spa, &dva[d], checkpoint);
5943 }
5944 }
5945
5946 spa_config_exit(spa, SCL_FREE, FTAG);
5947 }
5948
5949 int
5950 metaslab_claim(spa_t *spa, const blkptr_t *bp, uint64_t txg)
5951 {
5952 const dva_t *dva = bp->blk_dva;
5953 int ndvas = BP_GET_NDVAS(bp);
5954 int error = 0;
5955
5956 ASSERT(!BP_IS_HOLE(bp));
5957
5958 if (txg != 0) {
5959 /*
5960 * First do a dry run to make sure all DVAs are claimable,
5961 * so we don't have to unwind from partial failures below.
5962 */
5963 if ((error = metaslab_claim(spa, bp, 0)) != 0)
5964 return (error);
5965 }
5966
5967 spa_config_enter(spa, SCL_ALLOC, FTAG, RW_READER);
5968
5969 for (int d = 0; d < ndvas; d++) {
5970 error = metaslab_claim_dva(spa, &dva[d], txg);
5971 if (error != 0)
5972 break;
5973 }
5974
5975 spa_config_exit(spa, SCL_ALLOC, FTAG);
5976
5977 ASSERT(error == 0 || txg == 0);
5978
5979 return (error);
5980 }
5981
5982 static void
5983 metaslab_check_free_impl_cb(uint64_t inner, vdev_t *vd, uint64_t offset,
5984 uint64_t size, void *arg)
5985 {
5986 (void) inner, (void) arg;
5987
5988 if (vd->vdev_ops == &vdev_indirect_ops)
5989 return;
5990
5991 metaslab_check_free_impl(vd, offset, size);
5992 }
5993
5994 static void
5995 metaslab_check_free_impl(vdev_t *vd, uint64_t offset, uint64_t size)
5996 {
5997 metaslab_t *msp;
5998 spa_t *spa __maybe_unused = vd->vdev_spa;
5999
6000 if ((zfs_flags & ZFS_DEBUG_ZIO_FREE) == 0)
6001 return;
6002
6003 if (vd->vdev_ops->vdev_op_remap != NULL) {
6004 vd->vdev_ops->vdev_op_remap(vd, offset, size,
6005 metaslab_check_free_impl_cb, NULL);
6006 return;
6007 }
6008
6009 ASSERT(vdev_is_concrete(vd));
6010 ASSERT3U(offset >> vd->vdev_ms_shift, <, vd->vdev_ms_count);
6011 ASSERT3U(spa_config_held(spa, SCL_ALL, RW_READER), !=, 0);
6012
6013 msp = vd->vdev_ms[offset >> vd->vdev_ms_shift];
6014
6015 mutex_enter(&msp->ms_lock);
6016 if (msp->ms_loaded) {
6017 range_tree_verify_not_present(msp->ms_allocatable,
6018 offset, size);
6019 }
6020
6021 /*
6022 * Check all segments that currently exist in the freeing pipeline.
6023 *
6024 * It would intuitively make sense to also check the current allocating
6025 * tree since metaslab_unalloc_dva() exists for extents that are
6026 * allocated and freed in the same sync pass within the same txg.
6027 * Unfortunately there are places (e.g. the ZIL) where we allocate a
6028 * segment but then we free part of it within the same txg
6029 * [see zil_sync()]. Thus, we don't call range_tree_verify() in the
6030 * current allocating tree.
6031 */
6032 range_tree_verify_not_present(msp->ms_freeing, offset, size);
6033 range_tree_verify_not_present(msp->ms_checkpointing, offset, size);
6034 range_tree_verify_not_present(msp->ms_freed, offset, size);
6035 for (int j = 0; j < TXG_DEFER_SIZE; j++)
6036 range_tree_verify_not_present(msp->ms_defer[j], offset, size);
6037 range_tree_verify_not_present(msp->ms_trim, offset, size);
6038 mutex_exit(&msp->ms_lock);
6039 }
6040
6041 void
6042 metaslab_check_free(spa_t *spa, const blkptr_t *bp)
6043 {
6044 if ((zfs_flags & ZFS_DEBUG_ZIO_FREE) == 0)
6045 return;
6046
6047 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
6048 for (int i = 0; i < BP_GET_NDVAS(bp); i++) {
6049 uint64_t vdev = DVA_GET_VDEV(&bp->blk_dva[i]);
6050 vdev_t *vd = vdev_lookup_top(spa, vdev);
6051 uint64_t offset = DVA_GET_OFFSET(&bp->blk_dva[i]);
6052 uint64_t size = DVA_GET_ASIZE(&bp->blk_dva[i]);
6053
6054 if (DVA_GET_GANG(&bp->blk_dva[i]))
6055 size = vdev_gang_header_asize(vd);
6056
6057 ASSERT3P(vd, !=, NULL);
6058
6059 metaslab_check_free_impl(vd, offset, size);
6060 }
6061 spa_config_exit(spa, SCL_VDEV, FTAG);
6062 }
6063
6064 static void
6065 metaslab_group_disable_wait(metaslab_group_t *mg)
6066 {
6067 ASSERT(MUTEX_HELD(&mg->mg_ms_disabled_lock));
6068 while (mg->mg_disabled_updating) {
6069 cv_wait(&mg->mg_ms_disabled_cv, &mg->mg_ms_disabled_lock);
6070 }
6071 }
6072
6073 static void
6074 metaslab_group_disabled_increment(metaslab_group_t *mg)
6075 {
6076 ASSERT(MUTEX_HELD(&mg->mg_ms_disabled_lock));
6077 ASSERT(mg->mg_disabled_updating);
6078
6079 while (mg->mg_ms_disabled >= max_disabled_ms) {
6080 cv_wait(&mg->mg_ms_disabled_cv, &mg->mg_ms_disabled_lock);
6081 }
6082 mg->mg_ms_disabled++;
6083 ASSERT3U(mg->mg_ms_disabled, <=, max_disabled_ms);
6084 }
6085
6086 /*
6087 * Mark the metaslab as disabled to prevent any allocations on this metaslab.
6088 * We must also track how many metaslabs are currently disabled within a
6089 * metaslab group and limit them to prevent allocation failures from
6090 * occurring because all metaslabs are disabled.
6091 */
6092 void
6093 metaslab_disable(metaslab_t *msp)
6094 {
6095 ASSERT(!MUTEX_HELD(&msp->ms_lock));
6096 metaslab_group_t *mg = msp->ms_group;
6097
6098 mutex_enter(&mg->mg_ms_disabled_lock);
6099
6100 /*
6101 * To keep an accurate count of how many threads have disabled
6102 * a specific metaslab group, we only allow one thread to mark
6103 * the metaslab group at a time. This ensures that the value of
6104 * ms_disabled will be accurate when we decide to mark a metaslab
6105 * group as disabled. To do this we force all other threads
6106 * to wait till the metaslab's mg_disabled_updating flag is no
6107 * longer set.
6108 */
6109 metaslab_group_disable_wait(mg);
6110 mg->mg_disabled_updating = B_TRUE;
6111 if (msp->ms_disabled == 0) {
6112 metaslab_group_disabled_increment(mg);
6113 }
6114 mutex_enter(&msp->ms_lock);
6115 msp->ms_disabled++;
6116 mutex_exit(&msp->ms_lock);
6117
6118 mg->mg_disabled_updating = B_FALSE;
6119 cv_broadcast(&mg->mg_ms_disabled_cv);
6120 mutex_exit(&mg->mg_ms_disabled_lock);
6121 }
6122
6123 void
6124 metaslab_enable(metaslab_t *msp, boolean_t sync, boolean_t unload)
6125 {
6126 metaslab_group_t *mg = msp->ms_group;
6127 spa_t *spa = mg->mg_vd->vdev_spa;
6128
6129 /*
6130 * Wait for the outstanding IO to be synced to prevent newly
6131 * allocated blocks from being overwritten. This used by
6132 * initialize and TRIM which are modifying unallocated space.
6133 */
6134 if (sync)
6135 txg_wait_synced(spa_get_dsl(spa), 0);
6136
6137 mutex_enter(&mg->mg_ms_disabled_lock);
6138 mutex_enter(&msp->ms_lock);
6139 if (--msp->ms_disabled == 0) {
6140 mg->mg_ms_disabled--;
6141 cv_broadcast(&mg->mg_ms_disabled_cv);
6142 if (unload)
6143 metaslab_unload(msp);
6144 }
6145 mutex_exit(&msp->ms_lock);
6146 mutex_exit(&mg->mg_ms_disabled_lock);
6147 }
6148
6149 void
6150 metaslab_set_unflushed_dirty(metaslab_t *ms, boolean_t dirty)
6151 {
6152 ms->ms_unflushed_dirty = dirty;
6153 }
6154
6155 static void
6156 metaslab_update_ondisk_flush_data(metaslab_t *ms, dmu_tx_t *tx)
6157 {
6158 vdev_t *vd = ms->ms_group->mg_vd;
6159 spa_t *spa = vd->vdev_spa;
6160 objset_t *mos = spa_meta_objset(spa);
6161
6162 ASSERT(spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP));
6163
6164 metaslab_unflushed_phys_t entry = {
6165 .msp_unflushed_txg = metaslab_unflushed_txg(ms),
6166 };
6167 uint64_t entry_size = sizeof (entry);
6168 uint64_t entry_offset = ms->ms_id * entry_size;
6169
6170 uint64_t object = 0;
6171 int err = zap_lookup(mos, vd->vdev_top_zap,
6172 VDEV_TOP_ZAP_MS_UNFLUSHED_PHYS_TXGS, sizeof (uint64_t), 1,
6173 &object);
6174 if (err == ENOENT) {
6175 object = dmu_object_alloc(mos, DMU_OTN_UINT64_METADATA,
6176 SPA_OLD_MAXBLOCKSIZE, DMU_OT_NONE, 0, tx);
6177 VERIFY0(zap_add(mos, vd->vdev_top_zap,
6178 VDEV_TOP_ZAP_MS_UNFLUSHED_PHYS_TXGS, sizeof (uint64_t), 1,
6179 &object, tx));
6180 } else {
6181 VERIFY0(err);
6182 }
6183
6184 dmu_write(spa_meta_objset(spa), object, entry_offset, entry_size,
6185 &entry, tx);
6186 }
6187
6188 void
6189 metaslab_set_unflushed_txg(metaslab_t *ms, uint64_t txg, dmu_tx_t *tx)
6190 {
6191 ms->ms_unflushed_txg = txg;
6192 metaslab_update_ondisk_flush_data(ms, tx);
6193 }
6194
6195 boolean_t
6196 metaslab_unflushed_dirty(metaslab_t *ms)
6197 {
6198 return (ms->ms_unflushed_dirty);
6199 }
6200
6201 uint64_t
6202 metaslab_unflushed_txg(metaslab_t *ms)
6203 {
6204 return (ms->ms_unflushed_txg);
6205 }
6206
6207 ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, aliquot, U64, ZMOD_RW,
6208 "Allocation granularity (a.k.a. stripe size)");
6209
6210 ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, debug_load, INT, ZMOD_RW,
6211 "Load all metaslabs when pool is first opened");
6212
6213 ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, debug_unload, INT, ZMOD_RW,
6214 "Prevent metaslabs from being unloaded");
6215
6216 ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, preload_enabled, INT, ZMOD_RW,
6217 "Preload potential metaslabs during reassessment");
6218
6219 ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, preload_limit, UINT, ZMOD_RW,
6220 "Max number of metaslabs per group to preload");
6221
6222 ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, unload_delay, UINT, ZMOD_RW,
6223 "Delay in txgs after metaslab was last used before unloading");
6224
6225 ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, unload_delay_ms, UINT, ZMOD_RW,
6226 "Delay in milliseconds after metaslab was last used before unloading");
6227
6228 /* BEGIN CSTYLED */
6229 ZFS_MODULE_PARAM(zfs_mg, zfs_mg_, noalloc_threshold, UINT, ZMOD_RW,
6230 "Percentage of metaslab group size that should be free to make it "
6231 "eligible for allocation");
6232
6233 ZFS_MODULE_PARAM(zfs_mg, zfs_mg_, fragmentation_threshold, UINT, ZMOD_RW,
6234 "Percentage of metaslab group size that should be considered eligible "
6235 "for allocations unless all metaslab groups within the metaslab class "
6236 "have also crossed this threshold");
6237
6238 ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, fragmentation_factor_enabled, INT,
6239 ZMOD_RW,
6240 "Use the fragmentation metric to prefer less fragmented metaslabs");
6241 /* END CSTYLED */
6242
6243 ZFS_MODULE_PARAM(zfs_metaslab, zfs_metaslab_, fragmentation_threshold, UINT,
6244 ZMOD_RW, "Fragmentation for metaslab to allow allocation");
6245
6246 ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, lba_weighting_enabled, INT, ZMOD_RW,
6247 "Prefer metaslabs with lower LBAs");
6248
6249 ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, bias_enabled, INT, ZMOD_RW,
6250 "Enable metaslab group biasing");
6251
6252 ZFS_MODULE_PARAM(zfs_metaslab, zfs_metaslab_, segment_weight_enabled, INT,
6253 ZMOD_RW, "Enable segment-based metaslab selection");
6254
6255 ZFS_MODULE_PARAM(zfs_metaslab, zfs_metaslab_, switch_threshold, INT, ZMOD_RW,
6256 "Segment-based metaslab selection maximum buckets before switching");
6257
6258 ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, force_ganging, U64, ZMOD_RW,
6259 "Blocks larger than this size are sometimes forced to be gang blocks");
6260
6261 ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, force_ganging_pct, UINT, ZMOD_RW,
6262 "Percentage of large blocks that will be forced to be gang blocks");
6263
6264 ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, df_max_search, UINT, ZMOD_RW,
6265 "Max distance (bytes) to search forward before using size tree");
6266
6267 ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, df_use_largest_segment, INT, ZMOD_RW,
6268 "When looking in size tree, use largest segment instead of exact fit");
6269
6270 ZFS_MODULE_PARAM(zfs_metaslab, zfs_metaslab_, max_size_cache_sec, U64,
6271 ZMOD_RW, "How long to trust the cached max chunk size of a metaslab");
6272
6273 ZFS_MODULE_PARAM(zfs_metaslab, zfs_metaslab_, mem_limit, UINT, ZMOD_RW,
6274 "Percentage of memory that can be used to store metaslab range trees");
6275
6276 ZFS_MODULE_PARAM(zfs_metaslab, zfs_metaslab_, try_hard_before_gang, INT,
6277 ZMOD_RW, "Try hard to allocate before ganging");
6278
6279 ZFS_MODULE_PARAM(zfs_metaslab, zfs_metaslab_, find_max_tries, UINT, ZMOD_RW,
6280 "Normally only consider this many of the best metaslabs in each vdev");
6281
6282 /* BEGIN CSTYLED */
6283 ZFS_MODULE_PARAM_CALL(zfs, zfs_, active_allocator,
6284 param_set_active_allocator, param_get_charp, ZMOD_RW,
6285 "SPA active allocator");
6286 /* END CSTYLED */