]>
Commit | Line | Data |
---|---|---|
34dc7c2f BB |
1 | /* |
2 | * CDDL HEADER START | |
3 | * | |
4 | * The contents of this file are subject to the terms of the | |
5 | * Common Development and Distribution License (the "License"). | |
6 | * You may not use this file except in compliance with the License. | |
7 | * | |
8 | * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE | |
1d3ba0bf | 9 | * or https://opensource.org/licenses/CDDL-1.0. |
34dc7c2f BB |
10 | * See the License for the specific language governing permissions |
11 | * and limitations under the License. | |
12 | * | |
13 | * When distributing Covered Code, include this CDDL HEADER in each | |
14 | * file and include the License file at usr/src/OPENSOLARIS.LICENSE. | |
15 | * If applicable, add the following below this CDDL HEADER, with the | |
16 | * fields enclosed by brackets "[]" replaced with your own identifying | |
17 | * information: Portions Copyright [yyyy] [name of copyright owner] | |
18 | * | |
19 | * CDDL HEADER END | |
20 | */ | |
21 | /* | |
428870ff | 22 | * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. |
893a6d62 | 23 | * Copyright (c) 2011, 2019 by Delphix. All rights reserved. |
2e528b49 | 24 | * Copyright (c) 2013 by Saso Kiselkov. All rights reserved. |
dce63135 | 25 | * Copyright (c) 2015, Nexenta Systems, Inc. All rights reserved. |
cc99f275 | 26 | * Copyright (c) 2017, Intel Corporation. |
34dc7c2f BB |
27 | */ |
28 | ||
34dc7c2f | 29 | #include <sys/zfs_context.h> |
34dc7c2f BB |
30 | #include <sys/dmu.h> |
31 | #include <sys/dmu_tx.h> | |
32 | #include <sys/space_map.h> | |
33 | #include <sys/metaslab_impl.h> | |
34 | #include <sys/vdev_impl.h> | |
b2255edc | 35 | #include <sys/vdev_draid.h> |
34dc7c2f | 36 | #include <sys/zio.h> |
93cf2076 | 37 | #include <sys/spa_impl.h> |
f3a7f661 | 38 | #include <sys/zfeature.h> |
a1d477c2 | 39 | #include <sys/vdev_indirect_mapping.h> |
d2734cce | 40 | #include <sys/zap.h> |
ca577779 | 41 | #include <sys/btree.h> |
34dc7c2f | 42 | |
d1d7e268 | 43 | #define WITH_DF_BLOCK_ALLOCATOR |
6d974228 | 44 | |
3dfb57a3 DB |
45 | #define GANG_ALLOCATION(flags) \ |
46 | ((flags) & (METASLAB_GANG_CHILD | METASLAB_GANG_HEADER)) | |
22c81dd8 | 47 | |
e8fe6684 ED |
48 | /* |
49 | * Metaslab granularity, in bytes. This is roughly similar to what would be | |
50 | * referred to as the "stripe size" in traditional RAID arrays. In normal | |
c55b2932 AM |
51 | * operation, we will try to write this amount of data to each disk before |
52 | * moving on to the next top-level vdev. | |
e8fe6684 | 53 | */ |
ab8d9c17 | 54 | static uint64_t metaslab_aliquot = 1024 * 1024; |
e8fe6684 | 55 | |
d830d479 MA |
56 | /* |
57 | * For testing, make some blocks above a certain size be gang blocks. | |
58 | */ | |
ab8d9c17 | 59 | uint64_t metaslab_force_ganging = SPA_MAXBLOCKSIZE + 1; |
34dc7c2f | 60 | |
46adb282 RN |
61 | /* |
62 | * Of blocks of size >= metaslab_force_ganging, actually gang them this often. | |
63 | */ | |
64 | uint_t metaslab_force_ganging_pct = 3; | |
65 | ||
d2734cce | 66 | /* |
93e28d66 SD |
67 | * In pools where the log space map feature is not enabled we touch |
68 | * multiple metaslabs (and their respective space maps) with each | |
69 | * transaction group. Thus, we benefit from having a small space map | |
d2734cce | 70 | * block size since it allows us to issue more I/O operations scattered |
93e28d66 SD |
71 | * around the disk. So a sane default for the space map block size |
72 | * is 8~16K. | |
d2734cce | 73 | */ |
93e28d66 SD |
74 | int zfs_metaslab_sm_blksz_no_log = (1 << 14); |
75 | ||
76 | /* | |
77 | * When the log space map feature is enabled, we accumulate a lot of | |
78 | * changes per metaslab that are flushed once in a while so we benefit | |
79 | * from a bigger block size like 128K for the metaslab space maps. | |
80 | */ | |
81 | int zfs_metaslab_sm_blksz_with_log = (1 << 17); | |
d2734cce | 82 | |
e51be066 GW |
83 | /* |
84 | * The in-core space map representation is more compact than its on-disk form. | |
85 | * The zfs_condense_pct determines how much more compact the in-core | |
4e21fd06 | 86 | * space map representation must be before we compact it on-disk. |
e51be066 GW |
87 | * Values should be greater than or equal to 100. |
88 | */ | |
fdc2d303 | 89 | uint_t zfs_condense_pct = 200; |
e51be066 | 90 | |
b02fe35d AR |
91 | /* |
92 | * Condensing a metaslab is not guaranteed to actually reduce the amount of | |
93 | * space used on disk. In particular, a space map uses data in increments of | |
96358617 | 94 | * MAX(1 << ashift, space_map_blksz), so a metaslab might use the |
b02fe35d AR |
95 | * same number of blocks after condensing. Since the goal of condensing is to |
96 | * reduce the number of IOPs required to read the space map, we only want to | |
97 | * condense when we can be sure we will reduce the number of blocks used by the | |
98 | * space map. Unfortunately, we cannot precisely compute whether or not this is | |
99 | * the case in metaslab_should_condense since we are holding ms_lock. Instead, | |
100 | * we apply the following heuristic: do not condense a spacemap unless the | |
101 | * uncondensed size consumes greater than zfs_metaslab_condense_block_threshold | |
102 | * blocks. | |
103 | */ | |
18168da7 | 104 | static const int zfs_metaslab_condense_block_threshold = 4; |
b02fe35d | 105 | |
ac72fac3 GW |
106 | /* |
107 | * The zfs_mg_noalloc_threshold defines which metaslab groups should | |
108 | * be eligible for allocation. The value is defined as a percentage of | |
f3a7f661 | 109 | * free space. Metaslab groups that have more free space than |
ac72fac3 GW |
110 | * zfs_mg_noalloc_threshold are always eligible for allocations. Once |
111 | * a metaslab group's free space is less than or equal to the | |
112 | * zfs_mg_noalloc_threshold the allocator will avoid allocating to that | |
113 | * group unless all groups in the pool have reached zfs_mg_noalloc_threshold. | |
114 | * Once all groups in the pool reach zfs_mg_noalloc_threshold then all | |
115 | * groups are allowed to accept allocations. Gang blocks are always | |
116 | * eligible to allocate on any metaslab group. The default value of 0 means | |
117 | * no metaslab group will be excluded based on this criterion. | |
118 | */ | |
fdc2d303 | 119 | static uint_t zfs_mg_noalloc_threshold = 0; |
6d974228 | 120 | |
f3a7f661 GW |
121 | /* |
122 | * Metaslab groups are considered eligible for allocations if their | |
e1cfd73f | 123 | * fragmentation metric (measured as a percentage) is less than or |
cb020f0d SD |
124 | * equal to zfs_mg_fragmentation_threshold. If a metaslab group |
125 | * exceeds this threshold then it will be skipped unless all metaslab | |
126 | * groups within the metaslab class have also crossed this threshold. | |
127 | * | |
128 | * This tunable was introduced to avoid edge cases where we continue | |
129 | * allocating from very fragmented disks in our pool while other, less | |
130 | * fragmented disks, exists. On the other hand, if all disks in the | |
131 | * pool are uniformly approaching the threshold, the threshold can | |
132 | * be a speed bump in performance, where we keep switching the disks | |
133 | * that we allocate from (e.g. we allocate some segments from disk A | |
134 | * making it bypassing the threshold while freeing segments from disk | |
135 | * B getting its fragmentation below the threshold). | |
136 | * | |
137 | * Empirically, we've seen that our vdev selection for allocations is | |
138 | * good enough that fragmentation increases uniformly across all vdevs | |
139 | * the majority of the time. Thus we set the threshold percentage high | |
140 | * enough to avoid hitting the speed bump on pools that are being pushed | |
141 | * to the edge. | |
f3a7f661 | 142 | */ |
fdc2d303 | 143 | static uint_t zfs_mg_fragmentation_threshold = 95; |
f3a7f661 GW |
144 | |
145 | /* | |
146 | * Allow metaslabs to keep their active state as long as their fragmentation | |
147 | * percentage is less than or equal to zfs_metaslab_fragmentation_threshold. An | |
148 | * active metaslab that exceeds this threshold will no longer keep its active | |
149 | * status allowing better metaslabs to be selected. | |
150 | */ | |
fdc2d303 | 151 | static uint_t zfs_metaslab_fragmentation_threshold = 70; |
f3a7f661 | 152 | |
428870ff | 153 | /* |
aa7d06a9 | 154 | * When set will load all metaslabs when pool is first opened. |
428870ff | 155 | */ |
18168da7 | 156 | int metaslab_debug_load = B_FALSE; |
aa7d06a9 GW |
157 | |
158 | /* | |
159 | * When set will prevent metaslabs from being unloaded. | |
160 | */ | |
18168da7 | 161 | static int metaslab_debug_unload = B_FALSE; |
428870ff | 162 | |
9babb374 BB |
163 | /* |
164 | * Minimum size which forces the dynamic allocator to change | |
428870ff | 165 | * it's allocation strategy. Once the space map cannot satisfy |
9babb374 BB |
166 | * an allocation of this size then it switches to using more |
167 | * aggressive strategy (i.e search by size rather than offset). | |
168 | */ | |
4e21fd06 | 169 | uint64_t metaslab_df_alloc_threshold = SPA_OLD_MAXBLOCKSIZE; |
9babb374 BB |
170 | |
171 | /* | |
172 | * The minimum free space, in percent, which must be available | |
173 | * in a space map to continue allocations in a first-fit fashion. | |
4e21fd06 | 174 | * Once the space map's free space drops below this level we dynamically |
9babb374 BB |
175 | * switch to using best-fit allocations. |
176 | */ | |
fdc2d303 | 177 | uint_t metaslab_df_free_pct = 4; |
428870ff | 178 | |
d3230d76 MA |
179 | /* |
180 | * Maximum distance to search forward from the last offset. Without this | |
181 | * limit, fragmented pools can see >100,000 iterations and | |
182 | * metaslab_block_picker() becomes the performance limiting factor on | |
183 | * high-performance storage. | |
184 | * | |
185 | * With the default setting of 16MB, we typically see less than 500 | |
186 | * iterations, even with very fragmented, ashift=9 pools. The maximum number | |
187 | * of iterations possible is: | |
188 | * metaslab_df_max_search / (2 * (1<<ashift)) | |
189 | * With the default setting of 16MB this is 16*1024 (with ashift=9) or | |
190 | * 2048 (with ashift=12). | |
191 | */ | |
fdc2d303 | 192 | static uint_t metaslab_df_max_search = 16 * 1024 * 1024; |
d3230d76 | 193 | |
ca577779 PD |
194 | /* |
195 | * Forces the metaslab_block_picker function to search for at least this many | |
196 | * segments forwards until giving up on finding a segment that the allocation | |
197 | * will fit into. | |
198 | */ | |
18168da7 | 199 | static const uint32_t metaslab_min_search_count = 100; |
ca577779 | 200 | |
d3230d76 MA |
201 | /* |
202 | * If we are not searching forward (due to metaslab_df_max_search, | |
203 | * metaslab_df_free_pct, or metaslab_df_alloc_threshold), this tunable | |
204 | * controls what segment is used. If it is set, we will use the largest free | |
205 | * segment. If it is not set, we will use a segment of exactly the requested | |
206 | * size (or larger). | |
207 | */ | |
18168da7 | 208 | static int metaslab_df_use_largest_segment = B_FALSE; |
d3230d76 | 209 | |
428870ff | 210 | /* |
93cf2076 | 211 | * Percentage of all cpus that can be used by the metaslab taskq. |
428870ff | 212 | */ |
93cf2076 | 213 | int metaslab_load_pct = 50; |
428870ff BB |
214 | |
215 | /* | |
eef0f4d8 PD |
216 | * These tunables control how long a metaslab will remain loaded after the |
217 | * last allocation from it. A metaslab can't be unloaded until at least | |
218 | * metaslab_unload_delay TXG's and metaslab_unload_delay_ms milliseconds | |
219 | * have elapsed. However, zfs_metaslab_mem_limit may cause it to be | |
220 | * unloaded sooner. These settings are intended to be generous -- to keep | |
221 | * metaslabs loaded for a long time, reducing the rate of metaslab loading. | |
428870ff | 222 | */ |
fdc2d303 RY |
223 | static uint_t metaslab_unload_delay = 32; |
224 | static uint_t metaslab_unload_delay_ms = 10 * 60 * 1000; /* ten minutes */ | |
9babb374 | 225 | |
93cf2076 GW |
226 | /* |
227 | * Max number of metaslabs per group to preload. | |
228 | */ | |
fdc2d303 | 229 | uint_t metaslab_preload_limit = 10; |
93cf2076 GW |
230 | |
231 | /* | |
232 | * Enable/disable preloading of metaslab. | |
233 | */ | |
18168da7 | 234 | static int metaslab_preload_enabled = B_TRUE; |
93cf2076 GW |
235 | |
236 | /* | |
f3a7f661 | 237 | * Enable/disable fragmentation weighting on metaslabs. |
93cf2076 | 238 | */ |
18168da7 | 239 | static int metaslab_fragmentation_factor_enabled = B_TRUE; |
93cf2076 | 240 | |
f3a7f661 GW |
241 | /* |
242 | * Enable/disable lba weighting (i.e. outer tracks are given preference). | |
243 | */ | |
18168da7 | 244 | static int metaslab_lba_weighting_enabled = B_TRUE; |
f3a7f661 GW |
245 | |
246 | /* | |
247 | * Enable/disable metaslab group biasing. | |
248 | */ | |
18168da7 | 249 | static int metaslab_bias_enabled = B_TRUE; |
f3a7f661 | 250 | |
a1d477c2 MA |
251 | /* |
252 | * Enable/disable remapping of indirect DVAs to their concrete vdevs. | |
253 | */ | |
18168da7 | 254 | static const boolean_t zfs_remap_blkptr_enable = B_TRUE; |
a1d477c2 | 255 | |
4e21fd06 DB |
256 | /* |
257 | * Enable/disable segment-based metaslab selection. | |
258 | */ | |
18168da7 | 259 | static int zfs_metaslab_segment_weight_enabled = B_TRUE; |
4e21fd06 DB |
260 | |
261 | /* | |
262 | * When using segment-based metaslab selection, we will continue | |
263 | * allocating from the active metaslab until we have exhausted | |
264 | * zfs_metaslab_switch_threshold of its buckets. | |
265 | */ | |
18168da7 | 266 | static int zfs_metaslab_switch_threshold = 2; |
4e21fd06 DB |
267 | |
268 | /* | |
269 | * Internal switch to enable/disable the metaslab allocation tracing | |
270 | * facility. | |
271 | */ | |
18168da7 | 272 | static const boolean_t metaslab_trace_enabled = B_FALSE; |
4e21fd06 DB |
273 | |
274 | /* | |
275 | * Maximum entries that the metaslab allocation tracing facility will keep | |
276 | * in a given list when running in non-debug mode. We limit the number | |
277 | * of entries in non-debug mode to prevent us from using up too much memory. | |
278 | * The limit should be sufficiently large that we don't expect any allocation | |
279 | * to every exceed this value. In debug mode, the system will panic if this | |
280 | * limit is ever reached allowing for further investigation. | |
281 | */ | |
18168da7 | 282 | static const uint64_t metaslab_trace_max_entries = 5000; |
4e21fd06 | 283 | |
1b939560 BB |
284 | /* |
285 | * Maximum number of metaslabs per group that can be disabled | |
286 | * simultaneously. | |
287 | */ | |
18168da7 | 288 | static const int max_disabled_ms = 3; |
1b939560 | 289 | |
ca577779 PD |
290 | /* |
291 | * Time (in seconds) to respect ms_max_size when the metaslab is not loaded. | |
292 | * To avoid 64-bit overflow, don't set above UINT32_MAX. | |
293 | */ | |
ab8d9c17 | 294 | static uint64_t zfs_metaslab_max_size_cache_sec = 1 * 60 * 60; /* 1 hour */ |
ca577779 | 295 | |
f09fda50 PD |
296 | /* |
297 | * Maximum percentage of memory to use on storing loaded metaslabs. If loading | |
298 | * a metaslab would take it over this percentage, the oldest selected metaslab | |
299 | * is automatically unloaded. | |
300 | */ | |
fdc2d303 | 301 | static uint_t zfs_metaslab_mem_limit = 25; |
eef0f4d8 PD |
302 | |
303 | /* | |
ca577779 PD |
304 | * Force the per-metaslab range trees to use 64-bit integers to store |
305 | * segments. Used for debugging purposes. | |
eef0f4d8 | 306 | */ |
18168da7 | 307 | static const boolean_t zfs_metaslab_force_large_segs = B_FALSE; |
ca577779 PD |
308 | |
309 | /* | |
310 | * By default we only store segments over a certain size in the size-sorted | |
311 | * metaslab trees (ms_allocatable_by_size and | |
312 | * ms_unflushed_frees_by_size). This dramatically reduces memory usage and | |
313 | * improves load and unload times at the cost of causing us to use slightly | |
314 | * larger segments than we would otherwise in some cases. | |
315 | */ | |
18168da7 | 316 | static const uint32_t metaslab_by_size_min_shift = 14; |
f09fda50 | 317 | |
be5c6d96 MA |
318 | /* |
319 | * If not set, we will first try normal allocation. If that fails then | |
320 | * we will do a gang allocation. If that fails then we will do a "try hard" | |
321 | * gang allocation. If that fails then we will have a multi-layer gang | |
322 | * block. | |
323 | * | |
324 | * If set, we will first try normal allocation. If that fails then | |
325 | * we will do a "try hard" allocation. If that fails we will do a gang | |
326 | * allocation. If that fails we will do a "try hard" gang allocation. If | |
327 | * that fails then we will have a multi-layer gang block. | |
328 | */ | |
18168da7 | 329 | static int zfs_metaslab_try_hard_before_gang = B_FALSE; |
be5c6d96 MA |
330 | |
331 | /* | |
332 | * When not trying hard, we only consider the best zfs_metaslab_find_max_tries | |
333 | * metaslabs. This improves performance, especially when there are many | |
334 | * metaslabs per vdev and the allocation can't actually be satisfied (so we | |
335 | * would otherwise iterate all the metaslabs). If there is a metaslab with a | |
336 | * worse weight but it can actually satisfy the allocation, we won't find it | |
337 | * until trying hard. This may happen if the worse metaslab is not loaded | |
338 | * (and the true weight is better than we have calculated), or due to weight | |
339 | * bucketization. E.g. we are looking for a 60K segment, and the best | |
340 | * metaslabs all have free segments in the 32-63K bucket, but the best | |
341 | * zfs_metaslab_find_max_tries metaslabs have ms_max_size <60KB, and a | |
342 | * subsequent metaslab has ms_max_size >60KB (but fewer segments in this | |
343 | * bucket, and therefore a lower weight). | |
344 | */ | |
fdc2d303 | 345 | static uint_t zfs_metaslab_find_max_tries = 100; |
be5c6d96 | 346 | |
65a91b16 SD |
347 | static uint64_t metaslab_weight(metaslab_t *, boolean_t); |
348 | static void metaslab_set_fragmentation(metaslab_t *, boolean_t); | |
d2734cce | 349 | static void metaslab_free_impl(vdev_t *, uint64_t, uint64_t, boolean_t); |
a1d477c2 | 350 | static void metaslab_check_free_impl(vdev_t *, uint64_t, uint64_t); |
4e21fd06 | 351 | |
492f64e9 PD |
352 | static void metaslab_passivate(metaslab_t *msp, uint64_t weight); |
353 | static uint64_t metaslab_weight_from_range_tree(metaslab_t *msp); | |
93e28d66 | 354 | static void metaslab_flush_update(metaslab_t *, dmu_tx_t *); |
f09fda50 PD |
355 | static unsigned int metaslab_idx_func(multilist_t *, void *); |
356 | static void metaslab_evict(metaslab_t *, uint64_t); | |
ca577779 | 357 | static void metaslab_rt_add(range_tree_t *rt, range_seg_t *rs, void *arg); |
4e21fd06 | 358 | kmem_cache_t *metaslab_alloc_trace_cache; |
ca577779 PD |
359 | |
360 | typedef struct metaslab_stats { | |
361 | kstat_named_t metaslabstat_trace_over_limit; | |
ca577779 | 362 | kstat_named_t metaslabstat_reload_tree; |
be5c6d96 MA |
363 | kstat_named_t metaslabstat_too_many_tries; |
364 | kstat_named_t metaslabstat_try_hard; | |
ca577779 PD |
365 | } metaslab_stats_t; |
366 | ||
367 | static metaslab_stats_t metaslab_stats = { | |
368 | { "trace_over_limit", KSTAT_DATA_UINT64 }, | |
ca577779 | 369 | { "reload_tree", KSTAT_DATA_UINT64 }, |
be5c6d96 MA |
370 | { "too_many_tries", KSTAT_DATA_UINT64 }, |
371 | { "try_hard", KSTAT_DATA_UINT64 }, | |
ca577779 PD |
372 | }; |
373 | ||
374 | #define METASLABSTAT_BUMP(stat) \ | |
375 | atomic_inc_64(&metaslab_stats.stat.value.ui64); | |
376 | ||
377 | ||
18168da7 | 378 | static kstat_t *metaslab_ksp; |
ca577779 PD |
379 | |
380 | void | |
381 | metaslab_stat_init(void) | |
382 | { | |
383 | ASSERT(metaslab_alloc_trace_cache == NULL); | |
384 | metaslab_alloc_trace_cache = kmem_cache_create( | |
385 | "metaslab_alloc_trace_cache", sizeof (metaslab_alloc_trace_t), | |
386 | 0, NULL, NULL, NULL, NULL, NULL, 0); | |
387 | metaslab_ksp = kstat_create("zfs", 0, "metaslab_stats", | |
388 | "misc", KSTAT_TYPE_NAMED, sizeof (metaslab_stats) / | |
389 | sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL); | |
390 | if (metaslab_ksp != NULL) { | |
391 | metaslab_ksp->ks_data = &metaslab_stats; | |
392 | kstat_install(metaslab_ksp); | |
393 | } | |
394 | } | |
395 | ||
396 | void | |
397 | metaslab_stat_fini(void) | |
398 | { | |
399 | if (metaslab_ksp != NULL) { | |
400 | kstat_delete(metaslab_ksp); | |
401 | metaslab_ksp = NULL; | |
402 | } | |
403 | ||
404 | kmem_cache_destroy(metaslab_alloc_trace_cache); | |
405 | metaslab_alloc_trace_cache = NULL; | |
406 | } | |
93cf2076 | 407 | |
34dc7c2f BB |
408 | /* |
409 | * ========================================================================== | |
410 | * Metaslab classes | |
411 | * ========================================================================== | |
412 | */ | |
413 | metaslab_class_t * | |
18168da7 | 414 | metaslab_class_create(spa_t *spa, const metaslab_ops_t *ops) |
34dc7c2f BB |
415 | { |
416 | metaslab_class_t *mc; | |
417 | ||
f8020c93 AM |
418 | mc = kmem_zalloc(offsetof(metaslab_class_t, |
419 | mc_allocator[spa->spa_alloc_count]), KM_SLEEP); | |
34dc7c2f | 420 | |
428870ff | 421 | mc->mc_spa = spa; |
9babb374 | 422 | mc->mc_ops = ops; |
3dfb57a3 | 423 | mutex_init(&mc->mc_lock, NULL, MUTEX_DEFAULT, NULL); |
ffdf019c | 424 | multilist_create(&mc->mc_metaslab_txg_list, sizeof (metaslab_t), |
f09fda50 | 425 | offsetof(metaslab_t, ms_class_txg_node), metaslab_idx_func); |
f8020c93 AM |
426 | for (int i = 0; i < spa->spa_alloc_count; i++) { |
427 | metaslab_class_allocator_t *mca = &mc->mc_allocator[i]; | |
428 | mca->mca_rotor = NULL; | |
429 | zfs_refcount_create_tracked(&mca->mca_alloc_slots); | |
430 | } | |
34dc7c2f BB |
431 | |
432 | return (mc); | |
433 | } | |
434 | ||
435 | void | |
436 | metaslab_class_destroy(metaslab_class_t *mc) | |
437 | { | |
f8020c93 AM |
438 | spa_t *spa = mc->mc_spa; |
439 | ||
428870ff BB |
440 | ASSERT(mc->mc_alloc == 0); |
441 | ASSERT(mc->mc_deferred == 0); | |
442 | ASSERT(mc->mc_space == 0); | |
443 | ASSERT(mc->mc_dspace == 0); | |
34dc7c2f | 444 | |
f8020c93 AM |
445 | for (int i = 0; i < spa->spa_alloc_count; i++) { |
446 | metaslab_class_allocator_t *mca = &mc->mc_allocator[i]; | |
447 | ASSERT(mca->mca_rotor == NULL); | |
448 | zfs_refcount_destroy(&mca->mca_alloc_slots); | |
449 | } | |
3dfb57a3 | 450 | mutex_destroy(&mc->mc_lock); |
ffdf019c | 451 | multilist_destroy(&mc->mc_metaslab_txg_list); |
f8020c93 AM |
452 | kmem_free(mc, offsetof(metaslab_class_t, |
453 | mc_allocator[spa->spa_alloc_count])); | |
34dc7c2f BB |
454 | } |
455 | ||
428870ff BB |
456 | int |
457 | metaslab_class_validate(metaslab_class_t *mc) | |
34dc7c2f | 458 | { |
428870ff BB |
459 | metaslab_group_t *mg; |
460 | vdev_t *vd; | |
34dc7c2f | 461 | |
428870ff BB |
462 | /* |
463 | * Must hold one of the spa_config locks. | |
464 | */ | |
465 | ASSERT(spa_config_held(mc->mc_spa, SCL_ALL, RW_READER) || | |
466 | spa_config_held(mc->mc_spa, SCL_ALL, RW_WRITER)); | |
34dc7c2f | 467 | |
f8020c93 | 468 | if ((mg = mc->mc_allocator[0].mca_rotor) == NULL) |
428870ff BB |
469 | return (0); |
470 | ||
471 | do { | |
472 | vd = mg->mg_vd; | |
473 | ASSERT(vd->vdev_mg != NULL); | |
474 | ASSERT3P(vd->vdev_top, ==, vd); | |
475 | ASSERT3P(mg->mg_class, ==, mc); | |
476 | ASSERT3P(vd->vdev_ops, !=, &vdev_hole_ops); | |
f8020c93 | 477 | } while ((mg = mg->mg_next) != mc->mc_allocator[0].mca_rotor); |
428870ff BB |
478 | |
479 | return (0); | |
34dc7c2f BB |
480 | } |
481 | ||
cc99f275 | 482 | static void |
428870ff BB |
483 | metaslab_class_space_update(metaslab_class_t *mc, int64_t alloc_delta, |
484 | int64_t defer_delta, int64_t space_delta, int64_t dspace_delta) | |
34dc7c2f | 485 | { |
428870ff BB |
486 | atomic_add_64(&mc->mc_alloc, alloc_delta); |
487 | atomic_add_64(&mc->mc_deferred, defer_delta); | |
488 | atomic_add_64(&mc->mc_space, space_delta); | |
489 | atomic_add_64(&mc->mc_dspace, dspace_delta); | |
490 | } | |
34dc7c2f | 491 | |
428870ff BB |
492 | uint64_t |
493 | metaslab_class_get_alloc(metaslab_class_t *mc) | |
494 | { | |
495 | return (mc->mc_alloc); | |
496 | } | |
34dc7c2f | 497 | |
428870ff BB |
498 | uint64_t |
499 | metaslab_class_get_deferred(metaslab_class_t *mc) | |
500 | { | |
501 | return (mc->mc_deferred); | |
502 | } | |
34dc7c2f | 503 | |
428870ff BB |
504 | uint64_t |
505 | metaslab_class_get_space(metaslab_class_t *mc) | |
506 | { | |
507 | return (mc->mc_space); | |
508 | } | |
34dc7c2f | 509 | |
428870ff BB |
510 | uint64_t |
511 | metaslab_class_get_dspace(metaslab_class_t *mc) | |
512 | { | |
513 | return (spa_deflate(mc->mc_spa) ? mc->mc_dspace : mc->mc_space); | |
34dc7c2f BB |
514 | } |
515 | ||
f3a7f661 GW |
516 | void |
517 | metaslab_class_histogram_verify(metaslab_class_t *mc) | |
518 | { | |
cc99f275 DB |
519 | spa_t *spa = mc->mc_spa; |
520 | vdev_t *rvd = spa->spa_root_vdev; | |
f3a7f661 | 521 | uint64_t *mc_hist; |
1c27024e | 522 | int i; |
f3a7f661 GW |
523 | |
524 | if ((zfs_flags & ZFS_DEBUG_HISTOGRAM_VERIFY) == 0) | |
525 | return; | |
526 | ||
527 | mc_hist = kmem_zalloc(sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE, | |
79c76d5b | 528 | KM_SLEEP); |
f3a7f661 | 529 | |
a0e01997 | 530 | mutex_enter(&mc->mc_lock); |
1c27024e | 531 | for (int c = 0; c < rvd->vdev_children; c++) { |
f3a7f661 | 532 | vdev_t *tvd = rvd->vdev_child[c]; |
aa755b35 | 533 | metaslab_group_t *mg = vdev_get_mg(tvd, mc); |
f3a7f661 GW |
534 | |
535 | /* | |
536 | * Skip any holes, uninitialized top-levels, or | |
537 | * vdevs that are not in this metalab class. | |
538 | */ | |
a1d477c2 | 539 | if (!vdev_is_concrete(tvd) || tvd->vdev_ms_shift == 0 || |
f3a7f661 GW |
540 | mg->mg_class != mc) { |
541 | continue; | |
542 | } | |
543 | ||
aa755b35 MA |
544 | IMPLY(mg == mg->mg_vd->vdev_log_mg, |
545 | mc == spa_embedded_log_class(mg->mg_vd->vdev_spa)); | |
546 | ||
f3a7f661 GW |
547 | for (i = 0; i < RANGE_TREE_HISTOGRAM_SIZE; i++) |
548 | mc_hist[i] += mg->mg_histogram[i]; | |
549 | } | |
550 | ||
aa755b35 | 551 | for (i = 0; i < RANGE_TREE_HISTOGRAM_SIZE; i++) { |
f3a7f661 | 552 | VERIFY3U(mc_hist[i], ==, mc->mc_histogram[i]); |
aa755b35 | 553 | } |
f3a7f661 | 554 | |
a0e01997 | 555 | mutex_exit(&mc->mc_lock); |
f3a7f661 GW |
556 | kmem_free(mc_hist, sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE); |
557 | } | |
558 | ||
559 | /* | |
560 | * Calculate the metaslab class's fragmentation metric. The metric | |
561 | * is weighted based on the space contribution of each metaslab group. | |
562 | * The return value will be a number between 0 and 100 (inclusive), or | |
563 | * ZFS_FRAG_INVALID if the metric has not been set. See comment above the | |
564 | * zfs_frag_table for more information about the metric. | |
565 | */ | |
566 | uint64_t | |
567 | metaslab_class_fragmentation(metaslab_class_t *mc) | |
568 | { | |
569 | vdev_t *rvd = mc->mc_spa->spa_root_vdev; | |
570 | uint64_t fragmentation = 0; | |
f3a7f661 GW |
571 | |
572 | spa_config_enter(mc->mc_spa, SCL_VDEV, FTAG, RW_READER); | |
573 | ||
1c27024e | 574 | for (int c = 0; c < rvd->vdev_children; c++) { |
f3a7f661 GW |
575 | vdev_t *tvd = rvd->vdev_child[c]; |
576 | metaslab_group_t *mg = tvd->vdev_mg; | |
577 | ||
578 | /* | |
a1d477c2 MA |
579 | * Skip any holes, uninitialized top-levels, |
580 | * or vdevs that are not in this metalab class. | |
f3a7f661 | 581 | */ |
a1d477c2 | 582 | if (!vdev_is_concrete(tvd) || tvd->vdev_ms_shift == 0 || |
f3a7f661 GW |
583 | mg->mg_class != mc) { |
584 | continue; | |
585 | } | |
586 | ||
587 | /* | |
588 | * If a metaslab group does not contain a fragmentation | |
589 | * metric then just bail out. | |
590 | */ | |
591 | if (mg->mg_fragmentation == ZFS_FRAG_INVALID) { | |
592 | spa_config_exit(mc->mc_spa, SCL_VDEV, FTAG); | |
593 | return (ZFS_FRAG_INVALID); | |
594 | } | |
595 | ||
596 | /* | |
597 | * Determine how much this metaslab_group is contributing | |
598 | * to the overall pool fragmentation metric. | |
599 | */ | |
600 | fragmentation += mg->mg_fragmentation * | |
601 | metaslab_group_get_space(mg); | |
602 | } | |
603 | fragmentation /= metaslab_class_get_space(mc); | |
604 | ||
605 | ASSERT3U(fragmentation, <=, 100); | |
606 | spa_config_exit(mc->mc_spa, SCL_VDEV, FTAG); | |
607 | return (fragmentation); | |
608 | } | |
609 | ||
610 | /* | |
611 | * Calculate the amount of expandable space that is available in | |
612 | * this metaslab class. If a device is expanded then its expandable | |
613 | * space will be the amount of allocatable space that is currently not | |
614 | * part of this metaslab class. | |
615 | */ | |
616 | uint64_t | |
617 | metaslab_class_expandable_space(metaslab_class_t *mc) | |
618 | { | |
619 | vdev_t *rvd = mc->mc_spa->spa_root_vdev; | |
620 | uint64_t space = 0; | |
f3a7f661 GW |
621 | |
622 | spa_config_enter(mc->mc_spa, SCL_VDEV, FTAG, RW_READER); | |
1c27024e | 623 | for (int c = 0; c < rvd->vdev_children; c++) { |
f3a7f661 GW |
624 | vdev_t *tvd = rvd->vdev_child[c]; |
625 | metaslab_group_t *mg = tvd->vdev_mg; | |
626 | ||
a1d477c2 | 627 | if (!vdev_is_concrete(tvd) || tvd->vdev_ms_shift == 0 || |
f3a7f661 GW |
628 | mg->mg_class != mc) { |
629 | continue; | |
630 | } | |
631 | ||
0f676dc2 GM |
632 | /* |
633 | * Calculate if we have enough space to add additional | |
634 | * metaslabs. We report the expandable space in terms | |
635 | * of the metaslab size since that's the unit of expansion. | |
636 | */ | |
637 | space += P2ALIGN(tvd->vdev_max_asize - tvd->vdev_asize, | |
638 | 1ULL << tvd->vdev_ms_shift); | |
f3a7f661 GW |
639 | } |
640 | spa_config_exit(mc->mc_spa, SCL_VDEV, FTAG); | |
641 | return (space); | |
642 | } | |
643 | ||
f09fda50 PD |
644 | void |
645 | metaslab_class_evict_old(metaslab_class_t *mc, uint64_t txg) | |
646 | { | |
ffdf019c | 647 | multilist_t *ml = &mc->mc_metaslab_txg_list; |
f09fda50 PD |
648 | for (int i = 0; i < multilist_get_num_sublists(ml); i++) { |
649 | multilist_sublist_t *mls = multilist_sublist_lock(ml, i); | |
650 | metaslab_t *msp = multilist_sublist_head(mls); | |
651 | multilist_sublist_unlock(mls); | |
652 | while (msp != NULL) { | |
653 | mutex_enter(&msp->ms_lock); | |
f09fda50 PD |
654 | |
655 | /* | |
656 | * If the metaslab has been removed from the list | |
657 | * (which could happen if we were at the memory limit | |
658 | * and it was evicted during this loop), then we can't | |
659 | * proceed and we should restart the sublist. | |
660 | */ | |
661 | if (!multilist_link_active(&msp->ms_class_txg_node)) { | |
662 | mutex_exit(&msp->ms_lock); | |
663 | i--; | |
664 | break; | |
665 | } | |
666 | mls = multilist_sublist_lock(ml, i); | |
667 | metaslab_t *next_msp = multilist_sublist_next(mls, msp); | |
668 | multilist_sublist_unlock(mls); | |
eef0f4d8 PD |
669 | if (txg > |
670 | msp->ms_selected_txg + metaslab_unload_delay && | |
671 | gethrtime() > msp->ms_selected_time + | |
672 | (uint64_t)MSEC2NSEC(metaslab_unload_delay_ms)) { | |
673 | metaslab_evict(msp, txg); | |
674 | } else { | |
675 | /* | |
676 | * Once we've hit a metaslab selected too | |
677 | * recently to evict, we're done evicting for | |
678 | * now. | |
679 | */ | |
680 | mutex_exit(&msp->ms_lock); | |
681 | break; | |
682 | } | |
f09fda50 PD |
683 | mutex_exit(&msp->ms_lock); |
684 | msp = next_msp; | |
685 | } | |
686 | } | |
687 | } | |
688 | ||
34dc7c2f BB |
689 | static int |
690 | metaslab_compare(const void *x1, const void *x2) | |
691 | { | |
ee36c709 GN |
692 | const metaslab_t *m1 = (const metaslab_t *)x1; |
693 | const metaslab_t *m2 = (const metaslab_t *)x2; | |
34dc7c2f | 694 | |
492f64e9 PD |
695 | int sort1 = 0; |
696 | int sort2 = 0; | |
697 | if (m1->ms_allocator != -1 && m1->ms_primary) | |
698 | sort1 = 1; | |
699 | else if (m1->ms_allocator != -1 && !m1->ms_primary) | |
700 | sort1 = 2; | |
701 | if (m2->ms_allocator != -1 && m2->ms_primary) | |
702 | sort2 = 1; | |
703 | else if (m2->ms_allocator != -1 && !m2->ms_primary) | |
704 | sort2 = 2; | |
705 | ||
706 | /* | |
707 | * Sort inactive metaslabs first, then primaries, then secondaries. When | |
708 | * selecting a metaslab to allocate from, an allocator first tries its | |
709 | * primary, then secondary active metaslab. If it doesn't have active | |
710 | * metaslabs, or can't allocate from them, it searches for an inactive | |
711 | * metaslab to activate. If it can't find a suitable one, it will steal | |
712 | * a primary or secondary metaslab from another allocator. | |
713 | */ | |
714 | if (sort1 < sort2) | |
715 | return (-1); | |
716 | if (sort1 > sort2) | |
717 | return (1); | |
718 | ||
ca577779 | 719 | int cmp = TREE_CMP(m2->ms_weight, m1->ms_weight); |
ee36c709 GN |
720 | if (likely(cmp)) |
721 | return (cmp); | |
34dc7c2f | 722 | |
ca577779 | 723 | IMPLY(TREE_CMP(m1->ms_start, m2->ms_start) == 0, m1 == m2); |
34dc7c2f | 724 | |
ca577779 | 725 | return (TREE_CMP(m1->ms_start, m2->ms_start)); |
34dc7c2f BB |
726 | } |
727 | ||
4e21fd06 DB |
728 | /* |
729 | * ========================================================================== | |
730 | * Metaslab groups | |
731 | * ========================================================================== | |
732 | */ | |
ac72fac3 GW |
733 | /* |
734 | * Update the allocatable flag and the metaslab group's capacity. | |
735 | * The allocatable flag is set to true if the capacity is below | |
3dfb57a3 DB |
736 | * the zfs_mg_noalloc_threshold or has a fragmentation value that is |
737 | * greater than zfs_mg_fragmentation_threshold. If a metaslab group | |
738 | * transitions from allocatable to non-allocatable or vice versa then the | |
739 | * metaslab group's class is updated to reflect the transition. | |
ac72fac3 GW |
740 | */ |
741 | static void | |
742 | metaslab_group_alloc_update(metaslab_group_t *mg) | |
743 | { | |
744 | vdev_t *vd = mg->mg_vd; | |
745 | metaslab_class_t *mc = mg->mg_class; | |
746 | vdev_stat_t *vs = &vd->vdev_stat; | |
747 | boolean_t was_allocatable; | |
3dfb57a3 | 748 | boolean_t was_initialized; |
ac72fac3 GW |
749 | |
750 | ASSERT(vd == vd->vdev_top); | |
a1d477c2 MA |
751 | ASSERT3U(spa_config_held(mc->mc_spa, SCL_ALLOC, RW_READER), ==, |
752 | SCL_ALLOC); | |
ac72fac3 GW |
753 | |
754 | mutex_enter(&mg->mg_lock); | |
755 | was_allocatable = mg->mg_allocatable; | |
3dfb57a3 | 756 | was_initialized = mg->mg_initialized; |
ac72fac3 GW |
757 | |
758 | mg->mg_free_capacity = ((vs->vs_space - vs->vs_alloc) * 100) / | |
759 | (vs->vs_space + 1); | |
760 | ||
3dfb57a3 DB |
761 | mutex_enter(&mc->mc_lock); |
762 | ||
763 | /* | |
764 | * If the metaslab group was just added then it won't | |
765 | * have any space until we finish syncing out this txg. | |
766 | * At that point we will consider it initialized and available | |
767 | * for allocations. We also don't consider non-activated | |
768 | * metaslab groups (e.g. vdevs that are in the middle of being removed) | |
769 | * to be initialized, because they can't be used for allocation. | |
770 | */ | |
771 | mg->mg_initialized = metaslab_group_initialized(mg); | |
772 | if (!was_initialized && mg->mg_initialized) { | |
773 | mc->mc_groups++; | |
774 | } else if (was_initialized && !mg->mg_initialized) { | |
775 | ASSERT3U(mc->mc_groups, >, 0); | |
776 | mc->mc_groups--; | |
777 | } | |
778 | if (mg->mg_initialized) | |
779 | mg->mg_no_free_space = B_FALSE; | |
780 | ||
f3a7f661 GW |
781 | /* |
782 | * A metaslab group is considered allocatable if it has plenty | |
783 | * of free space or is not heavily fragmented. We only take | |
784 | * fragmentation into account if the metaslab group has a valid | |
785 | * fragmentation metric (i.e. a value between 0 and 100). | |
786 | */ | |
3dfb57a3 DB |
787 | mg->mg_allocatable = (mg->mg_activation_count > 0 && |
788 | mg->mg_free_capacity > zfs_mg_noalloc_threshold && | |
f3a7f661 GW |
789 | (mg->mg_fragmentation == ZFS_FRAG_INVALID || |
790 | mg->mg_fragmentation <= zfs_mg_fragmentation_threshold)); | |
ac72fac3 GW |
791 | |
792 | /* | |
793 | * The mc_alloc_groups maintains a count of the number of | |
794 | * groups in this metaslab class that are still above the | |
795 | * zfs_mg_noalloc_threshold. This is used by the allocating | |
796 | * threads to determine if they should avoid allocations to | |
797 | * a given group. The allocator will avoid allocations to a group | |
798 | * if that group has reached or is below the zfs_mg_noalloc_threshold | |
799 | * and there are still other groups that are above the threshold. | |
800 | * When a group transitions from allocatable to non-allocatable or | |
801 | * vice versa we update the metaslab class to reflect that change. | |
802 | * When the mc_alloc_groups value drops to 0 that means that all | |
803 | * groups have reached the zfs_mg_noalloc_threshold making all groups | |
804 | * eligible for allocations. This effectively means that all devices | |
805 | * are balanced again. | |
806 | */ | |
807 | if (was_allocatable && !mg->mg_allocatable) | |
808 | mc->mc_alloc_groups--; | |
809 | else if (!was_allocatable && mg->mg_allocatable) | |
810 | mc->mc_alloc_groups++; | |
3dfb57a3 | 811 | mutex_exit(&mc->mc_lock); |
f3a7f661 | 812 | |
ac72fac3 GW |
813 | mutex_exit(&mg->mg_lock); |
814 | } | |
815 | ||
93e28d66 SD |
816 | int |
817 | metaslab_sort_by_flushed(const void *va, const void *vb) | |
818 | { | |
819 | const metaslab_t *a = va; | |
820 | const metaslab_t *b = vb; | |
821 | ||
ca577779 | 822 | int cmp = TREE_CMP(a->ms_unflushed_txg, b->ms_unflushed_txg); |
93e28d66 SD |
823 | if (likely(cmp)) |
824 | return (cmp); | |
825 | ||
826 | uint64_t a_vdev_id = a->ms_group->mg_vd->vdev_id; | |
827 | uint64_t b_vdev_id = b->ms_group->mg_vd->vdev_id; | |
ca577779 | 828 | cmp = TREE_CMP(a_vdev_id, b_vdev_id); |
93e28d66 SD |
829 | if (cmp) |
830 | return (cmp); | |
831 | ||
ca577779 | 832 | return (TREE_CMP(a->ms_id, b->ms_id)); |
93e28d66 SD |
833 | } |
834 | ||
34dc7c2f | 835 | metaslab_group_t * |
492f64e9 | 836 | metaslab_group_create(metaslab_class_t *mc, vdev_t *vd, int allocators) |
34dc7c2f BB |
837 | { |
838 | metaslab_group_t *mg; | |
839 | ||
f8020c93 AM |
840 | mg = kmem_zalloc(offsetof(metaslab_group_t, |
841 | mg_allocator[allocators]), KM_SLEEP); | |
34dc7c2f | 842 | mutex_init(&mg->mg_lock, NULL, MUTEX_DEFAULT, NULL); |
1b939560 BB |
843 | mutex_init(&mg->mg_ms_disabled_lock, NULL, MUTEX_DEFAULT, NULL); |
844 | cv_init(&mg->mg_ms_disabled_cv, NULL, CV_DEFAULT, NULL); | |
34dc7c2f | 845 | avl_create(&mg->mg_metaslab_tree, metaslab_compare, |
93e28d66 | 846 | sizeof (metaslab_t), offsetof(metaslab_t, ms_group_node)); |
34dc7c2f | 847 | mg->mg_vd = vd; |
428870ff BB |
848 | mg->mg_class = mc; |
849 | mg->mg_activation_count = 0; | |
3dfb57a3 DB |
850 | mg->mg_initialized = B_FALSE; |
851 | mg->mg_no_free_space = B_TRUE; | |
492f64e9 PD |
852 | mg->mg_allocators = allocators; |
853 | ||
492f64e9 | 854 | for (int i = 0; i < allocators; i++) { |
32d805c3 MA |
855 | metaslab_group_allocator_t *mga = &mg->mg_allocator[i]; |
856 | zfs_refcount_create_tracked(&mga->mga_alloc_queue_depth); | |
492f64e9 | 857 | } |
34dc7c2f | 858 | |
3c51c5cb | 859 | mg->mg_taskq = taskq_create("metaslab_group_taskq", metaslab_load_pct, |
1229323d | 860 | maxclsyspri, 10, INT_MAX, TASKQ_THREADS_CPU_PCT | TASKQ_DYNAMIC); |
93cf2076 | 861 | |
34dc7c2f BB |
862 | return (mg); |
863 | } | |
864 | ||
865 | void | |
866 | metaslab_group_destroy(metaslab_group_t *mg) | |
867 | { | |
428870ff BB |
868 | ASSERT(mg->mg_prev == NULL); |
869 | ASSERT(mg->mg_next == NULL); | |
870 | /* | |
871 | * We may have gone below zero with the activation count | |
872 | * either because we never activated in the first place or | |
873 | * because we're done, and possibly removing the vdev. | |
874 | */ | |
875 | ASSERT(mg->mg_activation_count <= 0); | |
876 | ||
3c51c5cb | 877 | taskq_destroy(mg->mg_taskq); |
34dc7c2f BB |
878 | avl_destroy(&mg->mg_metaslab_tree); |
879 | mutex_destroy(&mg->mg_lock); | |
1b939560 BB |
880 | mutex_destroy(&mg->mg_ms_disabled_lock); |
881 | cv_destroy(&mg->mg_ms_disabled_cv); | |
492f64e9 PD |
882 | |
883 | for (int i = 0; i < mg->mg_allocators; i++) { | |
32d805c3 MA |
884 | metaslab_group_allocator_t *mga = &mg->mg_allocator[i]; |
885 | zfs_refcount_destroy(&mga->mga_alloc_queue_depth); | |
492f64e9 | 886 | } |
f8020c93 AM |
887 | kmem_free(mg, offsetof(metaslab_group_t, |
888 | mg_allocator[mg->mg_allocators])); | |
34dc7c2f BB |
889 | } |
890 | ||
428870ff BB |
891 | void |
892 | metaslab_group_activate(metaslab_group_t *mg) | |
893 | { | |
894 | metaslab_class_t *mc = mg->mg_class; | |
f8020c93 | 895 | spa_t *spa = mc->mc_spa; |
428870ff BB |
896 | metaslab_group_t *mgprev, *mgnext; |
897 | ||
f8020c93 | 898 | ASSERT3U(spa_config_held(spa, SCL_ALLOC, RW_WRITER), !=, 0); |
428870ff | 899 | |
428870ff BB |
900 | ASSERT(mg->mg_prev == NULL); |
901 | ASSERT(mg->mg_next == NULL); | |
902 | ASSERT(mg->mg_activation_count <= 0); | |
903 | ||
904 | if (++mg->mg_activation_count <= 0) | |
905 | return; | |
906 | ||
c55b2932 AM |
907 | mg->mg_aliquot = metaslab_aliquot * MAX(1, |
908 | vdev_get_ndisks(mg->mg_vd) - vdev_get_nparity(mg->mg_vd)); | |
ac72fac3 | 909 | metaslab_group_alloc_update(mg); |
428870ff | 910 | |
f8020c93 | 911 | if ((mgprev = mc->mc_allocator[0].mca_rotor) == NULL) { |
428870ff BB |
912 | mg->mg_prev = mg; |
913 | mg->mg_next = mg; | |
914 | } else { | |
915 | mgnext = mgprev->mg_next; | |
916 | mg->mg_prev = mgprev; | |
917 | mg->mg_next = mgnext; | |
918 | mgprev->mg_next = mg; | |
919 | mgnext->mg_prev = mg; | |
920 | } | |
f8020c93 AM |
921 | for (int i = 0; i < spa->spa_alloc_count; i++) { |
922 | mc->mc_allocator[i].mca_rotor = mg; | |
923 | mg = mg->mg_next; | |
924 | } | |
428870ff BB |
925 | } |
926 | ||
a1d477c2 MA |
927 | /* |
928 | * Passivate a metaslab group and remove it from the allocation rotor. | |
929 | * Callers must hold both the SCL_ALLOC and SCL_ZIO lock prior to passivating | |
930 | * a metaslab group. This function will momentarily drop spa_config_locks | |
931 | * that are lower than the SCL_ALLOC lock (see comment below). | |
932 | */ | |
428870ff BB |
933 | void |
934 | metaslab_group_passivate(metaslab_group_t *mg) | |
935 | { | |
936 | metaslab_class_t *mc = mg->mg_class; | |
a1d477c2 | 937 | spa_t *spa = mc->mc_spa; |
428870ff | 938 | metaslab_group_t *mgprev, *mgnext; |
a1d477c2 | 939 | int locks = spa_config_held(spa, SCL_ALL, RW_WRITER); |
428870ff | 940 | |
a1d477c2 MA |
941 | ASSERT3U(spa_config_held(spa, SCL_ALLOC | SCL_ZIO, RW_WRITER), ==, |
942 | (SCL_ALLOC | SCL_ZIO)); | |
428870ff BB |
943 | |
944 | if (--mg->mg_activation_count != 0) { | |
f8020c93 AM |
945 | for (int i = 0; i < spa->spa_alloc_count; i++) |
946 | ASSERT(mc->mc_allocator[i].mca_rotor != mg); | |
428870ff BB |
947 | ASSERT(mg->mg_prev == NULL); |
948 | ASSERT(mg->mg_next == NULL); | |
949 | ASSERT(mg->mg_activation_count < 0); | |
950 | return; | |
951 | } | |
952 | ||
a1d477c2 MA |
953 | /* |
954 | * The spa_config_lock is an array of rwlocks, ordered as | |
955 | * follows (from highest to lowest): | |
956 | * SCL_CONFIG > SCL_STATE > SCL_L2ARC > SCL_ALLOC > | |
957 | * SCL_ZIO > SCL_FREE > SCL_VDEV | |
958 | * (For more information about the spa_config_lock see spa_misc.c) | |
959 | * The higher the lock, the broader its coverage. When we passivate | |
960 | * a metaslab group, we must hold both the SCL_ALLOC and the SCL_ZIO | |
961 | * config locks. However, the metaslab group's taskq might be trying | |
962 | * to preload metaslabs so we must drop the SCL_ZIO lock and any | |
963 | * lower locks to allow the I/O to complete. At a minimum, | |
964 | * we continue to hold the SCL_ALLOC lock, which prevents any future | |
965 | * allocations from taking place and any changes to the vdev tree. | |
966 | */ | |
967 | spa_config_exit(spa, locks & ~(SCL_ZIO - 1), spa); | |
c5528b9b | 968 | taskq_wait_outstanding(mg->mg_taskq, 0); |
a1d477c2 | 969 | spa_config_enter(spa, locks & ~(SCL_ZIO - 1), spa, RW_WRITER); |
f3a7f661 | 970 | metaslab_group_alloc_update(mg); |
492f64e9 | 971 | for (int i = 0; i < mg->mg_allocators; i++) { |
32d805c3 MA |
972 | metaslab_group_allocator_t *mga = &mg->mg_allocator[i]; |
973 | metaslab_t *msp = mga->mga_primary; | |
492f64e9 PD |
974 | if (msp != NULL) { |
975 | mutex_enter(&msp->ms_lock); | |
976 | metaslab_passivate(msp, | |
977 | metaslab_weight_from_range_tree(msp)); | |
978 | mutex_exit(&msp->ms_lock); | |
979 | } | |
32d805c3 | 980 | msp = mga->mga_secondary; |
492f64e9 PD |
981 | if (msp != NULL) { |
982 | mutex_enter(&msp->ms_lock); | |
983 | metaslab_passivate(msp, | |
984 | metaslab_weight_from_range_tree(msp)); | |
985 | mutex_exit(&msp->ms_lock); | |
986 | } | |
987 | } | |
93cf2076 | 988 | |
428870ff BB |
989 | mgprev = mg->mg_prev; |
990 | mgnext = mg->mg_next; | |
991 | ||
992 | if (mg == mgnext) { | |
f8020c93 | 993 | mgnext = NULL; |
428870ff | 994 | } else { |
428870ff BB |
995 | mgprev->mg_next = mgnext; |
996 | mgnext->mg_prev = mgprev; | |
997 | } | |
f8020c93 AM |
998 | for (int i = 0; i < spa->spa_alloc_count; i++) { |
999 | if (mc->mc_allocator[i].mca_rotor == mg) | |
1000 | mc->mc_allocator[i].mca_rotor = mgnext; | |
1001 | } | |
428870ff BB |
1002 | |
1003 | mg->mg_prev = NULL; | |
1004 | mg->mg_next = NULL; | |
1005 | } | |
1006 | ||
3dfb57a3 DB |
1007 | boolean_t |
1008 | metaslab_group_initialized(metaslab_group_t *mg) | |
1009 | { | |
1010 | vdev_t *vd = mg->mg_vd; | |
1011 | vdev_stat_t *vs = &vd->vdev_stat; | |
1012 | ||
1013 | return (vs->vs_space != 0 && mg->mg_activation_count > 0); | |
1014 | } | |
1015 | ||
f3a7f661 GW |
1016 | uint64_t |
1017 | metaslab_group_get_space(metaslab_group_t *mg) | |
1018 | { | |
aa755b35 MA |
1019 | /* |
1020 | * Note that the number of nodes in mg_metaslab_tree may be one less | |
1021 | * than vdev_ms_count, due to the embedded log metaslab. | |
1022 | */ | |
1023 | mutex_enter(&mg->mg_lock); | |
1024 | uint64_t ms_count = avl_numnodes(&mg->mg_metaslab_tree); | |
1025 | mutex_exit(&mg->mg_lock); | |
1026 | return ((1ULL << mg->mg_vd->vdev_ms_shift) * ms_count); | |
f3a7f661 GW |
1027 | } |
1028 | ||
1029 | void | |
1030 | metaslab_group_histogram_verify(metaslab_group_t *mg) | |
1031 | { | |
1032 | uint64_t *mg_hist; | |
aa755b35 MA |
1033 | avl_tree_t *t = &mg->mg_metaslab_tree; |
1034 | uint64_t ashift = mg->mg_vd->vdev_ashift; | |
f3a7f661 GW |
1035 | |
1036 | if ((zfs_flags & ZFS_DEBUG_HISTOGRAM_VERIFY) == 0) | |
1037 | return; | |
1038 | ||
1039 | mg_hist = kmem_zalloc(sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE, | |
79c76d5b | 1040 | KM_SLEEP); |
f3a7f661 GW |
1041 | |
1042 | ASSERT3U(RANGE_TREE_HISTOGRAM_SIZE, >=, | |
1043 | SPACE_MAP_HISTOGRAM_SIZE + ashift); | |
1044 | ||
aa755b35 MA |
1045 | mutex_enter(&mg->mg_lock); |
1046 | for (metaslab_t *msp = avl_first(t); | |
1047 | msp != NULL; msp = AVL_NEXT(t, msp)) { | |
1048 | VERIFY3P(msp->ms_group, ==, mg); | |
1049 | /* skip if not active */ | |
1050 | if (msp->ms_sm == NULL) | |
f3a7f661 GW |
1051 | continue; |
1052 | ||
aa755b35 | 1053 | for (int i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) { |
f3a7f661 GW |
1054 | mg_hist[i + ashift] += |
1055 | msp->ms_sm->sm_phys->smp_histogram[i]; | |
aa755b35 | 1056 | } |
f3a7f661 GW |
1057 | } |
1058 | ||
aa755b35 | 1059 | for (int i = 0; i < RANGE_TREE_HISTOGRAM_SIZE; i ++) |
f3a7f661 GW |
1060 | VERIFY3U(mg_hist[i], ==, mg->mg_histogram[i]); |
1061 | ||
aa755b35 MA |
1062 | mutex_exit(&mg->mg_lock); |
1063 | ||
f3a7f661 GW |
1064 | kmem_free(mg_hist, sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE); |
1065 | } | |
1066 | ||
34dc7c2f | 1067 | static void |
f3a7f661 | 1068 | metaslab_group_histogram_add(metaslab_group_t *mg, metaslab_t *msp) |
34dc7c2f | 1069 | { |
f3a7f661 GW |
1070 | metaslab_class_t *mc = mg->mg_class; |
1071 | uint64_t ashift = mg->mg_vd->vdev_ashift; | |
f3a7f661 GW |
1072 | |
1073 | ASSERT(MUTEX_HELD(&msp->ms_lock)); | |
1074 | if (msp->ms_sm == NULL) | |
1075 | return; | |
1076 | ||
34dc7c2f | 1077 | mutex_enter(&mg->mg_lock); |
a0e01997 | 1078 | mutex_enter(&mc->mc_lock); |
1c27024e | 1079 | for (int i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) { |
aa755b35 MA |
1080 | IMPLY(mg == mg->mg_vd->vdev_log_mg, |
1081 | mc == spa_embedded_log_class(mg->mg_vd->vdev_spa)); | |
f3a7f661 GW |
1082 | mg->mg_histogram[i + ashift] += |
1083 | msp->ms_sm->sm_phys->smp_histogram[i]; | |
1084 | mc->mc_histogram[i + ashift] += | |
1085 | msp->ms_sm->sm_phys->smp_histogram[i]; | |
1086 | } | |
a0e01997 | 1087 | mutex_exit(&mc->mc_lock); |
f3a7f661 GW |
1088 | mutex_exit(&mg->mg_lock); |
1089 | } | |
1090 | ||
1091 | void | |
1092 | metaslab_group_histogram_remove(metaslab_group_t *mg, metaslab_t *msp) | |
1093 | { | |
1094 | metaslab_class_t *mc = mg->mg_class; | |
1095 | uint64_t ashift = mg->mg_vd->vdev_ashift; | |
f3a7f661 GW |
1096 | |
1097 | ASSERT(MUTEX_HELD(&msp->ms_lock)); | |
1098 | if (msp->ms_sm == NULL) | |
1099 | return; | |
1100 | ||
1101 | mutex_enter(&mg->mg_lock); | |
a0e01997 | 1102 | mutex_enter(&mc->mc_lock); |
1c27024e | 1103 | for (int i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) { |
f3a7f661 GW |
1104 | ASSERT3U(mg->mg_histogram[i + ashift], >=, |
1105 | msp->ms_sm->sm_phys->smp_histogram[i]); | |
1106 | ASSERT3U(mc->mc_histogram[i + ashift], >=, | |
1107 | msp->ms_sm->sm_phys->smp_histogram[i]); | |
aa755b35 MA |
1108 | IMPLY(mg == mg->mg_vd->vdev_log_mg, |
1109 | mc == spa_embedded_log_class(mg->mg_vd->vdev_spa)); | |
f3a7f661 GW |
1110 | |
1111 | mg->mg_histogram[i + ashift] -= | |
1112 | msp->ms_sm->sm_phys->smp_histogram[i]; | |
1113 | mc->mc_histogram[i + ashift] -= | |
1114 | msp->ms_sm->sm_phys->smp_histogram[i]; | |
1115 | } | |
a0e01997 | 1116 | mutex_exit(&mc->mc_lock); |
f3a7f661 GW |
1117 | mutex_exit(&mg->mg_lock); |
1118 | } | |
1119 | ||
1120 | static void | |
1121 | metaslab_group_add(metaslab_group_t *mg, metaslab_t *msp) | |
1122 | { | |
34dc7c2f | 1123 | ASSERT(msp->ms_group == NULL); |
f3a7f661 | 1124 | mutex_enter(&mg->mg_lock); |
34dc7c2f BB |
1125 | msp->ms_group = mg; |
1126 | msp->ms_weight = 0; | |
1127 | avl_add(&mg->mg_metaslab_tree, msp); | |
1128 | mutex_exit(&mg->mg_lock); | |
f3a7f661 GW |
1129 | |
1130 | mutex_enter(&msp->ms_lock); | |
1131 | metaslab_group_histogram_add(mg, msp); | |
1132 | mutex_exit(&msp->ms_lock); | |
34dc7c2f BB |
1133 | } |
1134 | ||
1135 | static void | |
1136 | metaslab_group_remove(metaslab_group_t *mg, metaslab_t *msp) | |
1137 | { | |
f3a7f661 GW |
1138 | mutex_enter(&msp->ms_lock); |
1139 | metaslab_group_histogram_remove(mg, msp); | |
1140 | mutex_exit(&msp->ms_lock); | |
1141 | ||
34dc7c2f BB |
1142 | mutex_enter(&mg->mg_lock); |
1143 | ASSERT(msp->ms_group == mg); | |
1144 | avl_remove(&mg->mg_metaslab_tree, msp); | |
f09fda50 PD |
1145 | |
1146 | metaslab_class_t *mc = msp->ms_group->mg_class; | |
1147 | multilist_sublist_t *mls = | |
ffdf019c | 1148 | multilist_sublist_lock_obj(&mc->mc_metaslab_txg_list, msp); |
f09fda50 PD |
1149 | if (multilist_link_active(&msp->ms_class_txg_node)) |
1150 | multilist_sublist_remove(mls, msp); | |
1151 | multilist_sublist_unlock(mls); | |
1152 | ||
34dc7c2f BB |
1153 | msp->ms_group = NULL; |
1154 | mutex_exit(&mg->mg_lock); | |
1155 | } | |
1156 | ||
492f64e9 PD |
1157 | static void |
1158 | metaslab_group_sort_impl(metaslab_group_t *mg, metaslab_t *msp, uint64_t weight) | |
1159 | { | |
679b0f2a | 1160 | ASSERT(MUTEX_HELD(&msp->ms_lock)); |
492f64e9 PD |
1161 | ASSERT(MUTEX_HELD(&mg->mg_lock)); |
1162 | ASSERT(msp->ms_group == mg); | |
679b0f2a | 1163 | |
492f64e9 PD |
1164 | avl_remove(&mg->mg_metaslab_tree, msp); |
1165 | msp->ms_weight = weight; | |
1166 | avl_add(&mg->mg_metaslab_tree, msp); | |
1167 | ||
1168 | } | |
1169 | ||
34dc7c2f BB |
1170 | static void |
1171 | metaslab_group_sort(metaslab_group_t *mg, metaslab_t *msp, uint64_t weight) | |
1172 | { | |
1173 | /* | |
1174 | * Although in principle the weight can be any value, in | |
f3a7f661 | 1175 | * practice we do not use values in the range [1, 511]. |
34dc7c2f | 1176 | */ |
f3a7f661 | 1177 | ASSERT(weight >= SPA_MINBLOCKSIZE || weight == 0); |
34dc7c2f BB |
1178 | ASSERT(MUTEX_HELD(&msp->ms_lock)); |
1179 | ||
1180 | mutex_enter(&mg->mg_lock); | |
492f64e9 | 1181 | metaslab_group_sort_impl(mg, msp, weight); |
34dc7c2f BB |
1182 | mutex_exit(&mg->mg_lock); |
1183 | } | |
1184 | ||
f3a7f661 GW |
1185 | /* |
1186 | * Calculate the fragmentation for a given metaslab group. We can use | |
1187 | * a simple average here since all metaslabs within the group must have | |
1188 | * the same size. The return value will be a value between 0 and 100 | |
1189 | * (inclusive), or ZFS_FRAG_INVALID if less than half of the metaslab in this | |
1190 | * group have a fragmentation metric. | |
1191 | */ | |
1192 | uint64_t | |
1193 | metaslab_group_fragmentation(metaslab_group_t *mg) | |
1194 | { | |
1195 | vdev_t *vd = mg->mg_vd; | |
1196 | uint64_t fragmentation = 0; | |
1197 | uint64_t valid_ms = 0; | |
f3a7f661 | 1198 | |
1c27024e | 1199 | for (int m = 0; m < vd->vdev_ms_count; m++) { |
f3a7f661 GW |
1200 | metaslab_t *msp = vd->vdev_ms[m]; |
1201 | ||
1202 | if (msp->ms_fragmentation == ZFS_FRAG_INVALID) | |
1203 | continue; | |
cc99f275 DB |
1204 | if (msp->ms_group != mg) |
1205 | continue; | |
f3a7f661 GW |
1206 | |
1207 | valid_ms++; | |
1208 | fragmentation += msp->ms_fragmentation; | |
1209 | } | |
1210 | ||
cc99f275 | 1211 | if (valid_ms <= mg->mg_vd->vdev_ms_count / 2) |
f3a7f661 GW |
1212 | return (ZFS_FRAG_INVALID); |
1213 | ||
1214 | fragmentation /= valid_ms; | |
1215 | ASSERT3U(fragmentation, <=, 100); | |
1216 | return (fragmentation); | |
1217 | } | |
1218 | ||
ac72fac3 GW |
1219 | /* |
1220 | * Determine if a given metaslab group should skip allocations. A metaslab | |
f3a7f661 GW |
1221 | * group should avoid allocations if its free capacity is less than the |
1222 | * zfs_mg_noalloc_threshold or its fragmentation metric is greater than | |
1223 | * zfs_mg_fragmentation_threshold and there is at least one metaslab group | |
3dfb57a3 DB |
1224 | * that can still handle allocations. If the allocation throttle is enabled |
1225 | * then we skip allocations to devices that have reached their maximum | |
1226 | * allocation queue depth unless the selected metaslab group is the only | |
1227 | * eligible group remaining. | |
ac72fac3 GW |
1228 | */ |
1229 | static boolean_t | |
3dfb57a3 | 1230 | metaslab_group_allocatable(metaslab_group_t *mg, metaslab_group_t *rotor, |
7bf4c97a | 1231 | int flags, uint64_t psize, int allocator, int d) |
ac72fac3 | 1232 | { |
3dfb57a3 | 1233 | spa_t *spa = mg->mg_vd->vdev_spa; |
ac72fac3 GW |
1234 | metaslab_class_t *mc = mg->mg_class; |
1235 | ||
1236 | /* | |
3dfb57a3 DB |
1237 | * We can only consider skipping this metaslab group if it's |
1238 | * in the normal metaslab class and there are other metaslab | |
1239 | * groups to select from. Otherwise, we always consider it eligible | |
f3a7f661 | 1240 | * for allocations. |
ac72fac3 | 1241 | */ |
cc99f275 DB |
1242 | if ((mc != spa_normal_class(spa) && |
1243 | mc != spa_special_class(spa) && | |
1244 | mc != spa_dedup_class(spa)) || | |
1245 | mc->mc_groups <= 1) | |
3dfb57a3 DB |
1246 | return (B_TRUE); |
1247 | ||
1248 | /* | |
1249 | * If the metaslab group's mg_allocatable flag is set (see comments | |
1250 | * in metaslab_group_alloc_update() for more information) and | |
1251 | * the allocation throttle is disabled then allow allocations to this | |
1252 | * device. However, if the allocation throttle is enabled then | |
f8020c93 | 1253 | * check if we have reached our allocation limit (mga_alloc_queue_depth) |
3dfb57a3 DB |
1254 | * to determine if we should allow allocations to this metaslab group. |
1255 | * If all metaslab groups are no longer considered allocatable | |
1256 | * (mc_alloc_groups == 0) or we're trying to allocate the smallest | |
1257 | * gang block size then we allow allocations on this metaslab group | |
1258 | * regardless of the mg_allocatable or throttle settings. | |
1259 | */ | |
1260 | if (mg->mg_allocatable) { | |
32d805c3 | 1261 | metaslab_group_allocator_t *mga = &mg->mg_allocator[allocator]; |
3dfb57a3 | 1262 | int64_t qdepth; |
32d805c3 | 1263 | uint64_t qmax = mga->mga_cur_max_alloc_queue_depth; |
3dfb57a3 DB |
1264 | |
1265 | if (!mc->mc_alloc_throttle_enabled) | |
1266 | return (B_TRUE); | |
1267 | ||
1268 | /* | |
1269 | * If this metaslab group does not have any free space, then | |
1270 | * there is no point in looking further. | |
1271 | */ | |
1272 | if (mg->mg_no_free_space) | |
1273 | return (B_FALSE); | |
1274 | ||
7bf4c97a SD |
1275 | /* |
1276 | * Some allocations (e.g., those coming from device removal | |
1277 | * where the * allocations are not even counted in the | |
1278 | * metaslab * allocation queues) are allowed to bypass | |
1279 | * the throttle. | |
1280 | */ | |
1281 | if (flags & METASLAB_DONT_THROTTLE) | |
1282 | return (B_TRUE); | |
1283 | ||
c197a77c | 1284 | /* |
1285 | * Relax allocation throttling for ditto blocks. Due to | |
1286 | * random imbalances in allocation it tends to push copies | |
1287 | * to one vdev, that looks a bit better at the moment. | |
1288 | */ | |
1289 | qmax = qmax * (4 + d) / 4; | |
1290 | ||
32d805c3 | 1291 | qdepth = zfs_refcount_count(&mga->mga_alloc_queue_depth); |
3dfb57a3 DB |
1292 | |
1293 | /* | |
1294 | * If this metaslab group is below its qmax or it's | |
1295 | * the only allocatable metasable group, then attempt | |
1296 | * to allocate from it. | |
1297 | */ | |
1298 | if (qdepth < qmax || mc->mc_alloc_groups == 1) | |
1299 | return (B_TRUE); | |
1300 | ASSERT3U(mc->mc_alloc_groups, >, 1); | |
1301 | ||
1302 | /* | |
1303 | * Since this metaslab group is at or over its qmax, we | |
1304 | * need to determine if there are metaslab groups after this | |
1305 | * one that might be able to handle this allocation. This is | |
1306 | * racy since we can't hold the locks for all metaslab | |
1307 | * groups at the same time when we make this check. | |
1308 | */ | |
32d805c3 MA |
1309 | for (metaslab_group_t *mgp = mg->mg_next; |
1310 | mgp != rotor; mgp = mgp->mg_next) { | |
1311 | metaslab_group_allocator_t *mgap = | |
1312 | &mgp->mg_allocator[allocator]; | |
1313 | qmax = mgap->mga_cur_max_alloc_queue_depth; | |
c197a77c | 1314 | qmax = qmax * (4 + d) / 4; |
32d805c3 MA |
1315 | qdepth = |
1316 | zfs_refcount_count(&mgap->mga_alloc_queue_depth); | |
3dfb57a3 DB |
1317 | |
1318 | /* | |
1319 | * If there is another metaslab group that | |
1320 | * might be able to handle the allocation, then | |
1321 | * we return false so that we skip this group. | |
1322 | */ | |
1323 | if (qdepth < qmax && !mgp->mg_no_free_space) | |
1324 | return (B_FALSE); | |
1325 | } | |
1326 | ||
1327 | /* | |
1328 | * We didn't find another group to handle the allocation | |
1329 | * so we can't skip this metaslab group even though | |
1330 | * we are at or over our qmax. | |
1331 | */ | |
1332 | return (B_TRUE); | |
1333 | ||
1334 | } else if (mc->mc_alloc_groups == 0 || psize == SPA_MINBLOCKSIZE) { | |
1335 | return (B_TRUE); | |
1336 | } | |
1337 | return (B_FALSE); | |
ac72fac3 GW |
1338 | } |
1339 | ||
428870ff BB |
1340 | /* |
1341 | * ========================================================================== | |
93cf2076 | 1342 | * Range tree callbacks |
428870ff BB |
1343 | * ========================================================================== |
1344 | */ | |
93cf2076 GW |
1345 | |
1346 | /* | |
ca577779 PD |
1347 | * Comparison function for the private size-ordered tree using 32-bit |
1348 | * ranges. Tree is sorted by size, larger sizes at the end of the tree. | |
93cf2076 | 1349 | */ |
677c6f84 | 1350 | __attribute__((always_inline)) inline |
428870ff | 1351 | static int |
ca577779 | 1352 | metaslab_rangesize32_compare(const void *x1, const void *x2) |
428870ff | 1353 | { |
ca577779 PD |
1354 | const range_seg32_t *r1 = x1; |
1355 | const range_seg32_t *r2 = x2; | |
1356 | ||
93cf2076 GW |
1357 | uint64_t rs_size1 = r1->rs_end - r1->rs_start; |
1358 | uint64_t rs_size2 = r2->rs_end - r2->rs_start; | |
428870ff | 1359 | |
ca577779 | 1360 | int cmp = TREE_CMP(rs_size1, rs_size2); |
428870ff | 1361 | |
677c6f84 | 1362 | return (cmp + !cmp * TREE_CMP(r1->rs_start, r2->rs_start)); |
428870ff BB |
1363 | } |
1364 | ||
ca577779 PD |
1365 | /* |
1366 | * Comparison function for the private size-ordered tree using 64-bit | |
1367 | * ranges. Tree is sorted by size, larger sizes at the end of the tree. | |
1368 | */ | |
677c6f84 | 1369 | __attribute__((always_inline)) inline |
ca577779 PD |
1370 | static int |
1371 | metaslab_rangesize64_compare(const void *x1, const void *x2) | |
1372 | { | |
1373 | const range_seg64_t *r1 = x1; | |
1374 | const range_seg64_t *r2 = x2; | |
1375 | ||
1376 | uint64_t rs_size1 = r1->rs_end - r1->rs_start; | |
1377 | uint64_t rs_size2 = r2->rs_end - r2->rs_start; | |
1378 | ||
1379 | int cmp = TREE_CMP(rs_size1, rs_size2); | |
ca577779 | 1380 | |
677c6f84 | 1381 | return (cmp + !cmp * TREE_CMP(r1->rs_start, r2->rs_start)); |
ca577779 | 1382 | } |
677c6f84 | 1383 | |
ca577779 PD |
1384 | typedef struct metaslab_rt_arg { |
1385 | zfs_btree_t *mra_bt; | |
1386 | uint32_t mra_floor_shift; | |
1387 | } metaslab_rt_arg_t; | |
1388 | ||
1389 | struct mssa_arg { | |
1390 | range_tree_t *rt; | |
1391 | metaslab_rt_arg_t *mra; | |
1392 | }; | |
1393 | ||
1394 | static void | |
1395 | metaslab_size_sorted_add(void *arg, uint64_t start, uint64_t size) | |
1396 | { | |
1397 | struct mssa_arg *mssap = arg; | |
1398 | range_tree_t *rt = mssap->rt; | |
1399 | metaslab_rt_arg_t *mrap = mssap->mra; | |
1400 | range_seg_max_t seg = {0}; | |
1401 | rs_set_start(&seg, rt, start); | |
1402 | rs_set_end(&seg, rt, start + size); | |
1403 | metaslab_rt_add(rt, &seg, mrap); | |
1404 | } | |
1405 | ||
1406 | static void | |
1407 | metaslab_size_tree_full_load(range_tree_t *rt) | |
1408 | { | |
1409 | metaslab_rt_arg_t *mrap = rt->rt_arg; | |
ca577779 | 1410 | METASLABSTAT_BUMP(metaslabstat_reload_tree); |
ca577779 PD |
1411 | ASSERT0(zfs_btree_numnodes(mrap->mra_bt)); |
1412 | mrap->mra_floor_shift = 0; | |
1413 | struct mssa_arg arg = {0}; | |
1414 | arg.rt = rt; | |
1415 | arg.mra = mrap; | |
1416 | range_tree_walk(rt, metaslab_size_sorted_add, &arg); | |
1417 | } | |
1418 | ||
677c6f84 RY |
1419 | |
1420 | ZFS_BTREE_FIND_IN_BUF_FUNC(metaslab_rt_find_rangesize32_in_buf, | |
1421 | range_seg32_t, metaslab_rangesize32_compare) | |
1422 | ||
1423 | ZFS_BTREE_FIND_IN_BUF_FUNC(metaslab_rt_find_rangesize64_in_buf, | |
1424 | range_seg64_t, metaslab_rangesize64_compare) | |
1425 | ||
ca577779 PD |
1426 | /* |
1427 | * Create any block allocator specific components. The current allocators | |
1428 | * rely on using both a size-ordered range_tree_t and an array of uint64_t's. | |
1429 | */ | |
ca577779 PD |
1430 | static void |
1431 | metaslab_rt_create(range_tree_t *rt, void *arg) | |
1432 | { | |
1433 | metaslab_rt_arg_t *mrap = arg; | |
1434 | zfs_btree_t *size_tree = mrap->mra_bt; | |
1435 | ||
1436 | size_t size; | |
1437 | int (*compare) (const void *, const void *); | |
677c6f84 | 1438 | bt_find_in_buf_f bt_find; |
ca577779 PD |
1439 | switch (rt->rt_type) { |
1440 | case RANGE_SEG32: | |
1441 | size = sizeof (range_seg32_t); | |
1442 | compare = metaslab_rangesize32_compare; | |
677c6f84 | 1443 | bt_find = metaslab_rt_find_rangesize32_in_buf; |
ca577779 PD |
1444 | break; |
1445 | case RANGE_SEG64: | |
1446 | size = sizeof (range_seg64_t); | |
1447 | compare = metaslab_rangesize64_compare; | |
677c6f84 | 1448 | bt_find = metaslab_rt_find_rangesize64_in_buf; |
ca577779 PD |
1449 | break; |
1450 | default: | |
1451 | panic("Invalid range seg type %d", rt->rt_type); | |
1452 | } | |
677c6f84 | 1453 | zfs_btree_create(size_tree, compare, bt_find, size); |
ca577779 PD |
1454 | mrap->mra_floor_shift = metaslab_by_size_min_shift; |
1455 | } | |
1456 | ||
ca577779 PD |
1457 | static void |
1458 | metaslab_rt_destroy(range_tree_t *rt, void *arg) | |
1459 | { | |
14e4e3cb | 1460 | (void) rt; |
ca577779 PD |
1461 | metaslab_rt_arg_t *mrap = arg; |
1462 | zfs_btree_t *size_tree = mrap->mra_bt; | |
1463 | ||
1464 | zfs_btree_destroy(size_tree); | |
1465 | kmem_free(mrap, sizeof (*mrap)); | |
1466 | } | |
1467 | ||
ca577779 PD |
1468 | static void |
1469 | metaslab_rt_add(range_tree_t *rt, range_seg_t *rs, void *arg) | |
1470 | { | |
1471 | metaslab_rt_arg_t *mrap = arg; | |
1472 | zfs_btree_t *size_tree = mrap->mra_bt; | |
1473 | ||
1474 | if (rs_get_end(rs, rt) - rs_get_start(rs, rt) < | |
e506a0ce | 1475 | (1ULL << mrap->mra_floor_shift)) |
ca577779 PD |
1476 | return; |
1477 | ||
1478 | zfs_btree_add(size_tree, rs); | |
1479 | } | |
1480 | ||
ca577779 PD |
1481 | static void |
1482 | metaslab_rt_remove(range_tree_t *rt, range_seg_t *rs, void *arg) | |
1483 | { | |
1484 | metaslab_rt_arg_t *mrap = arg; | |
1485 | zfs_btree_t *size_tree = mrap->mra_bt; | |
1486 | ||
e506a0ce | 1487 | if (rs_get_end(rs, rt) - rs_get_start(rs, rt) < (1ULL << |
ca577779 PD |
1488 | mrap->mra_floor_shift)) |
1489 | return; | |
1490 | ||
1491 | zfs_btree_remove(size_tree, rs); | |
1492 | } | |
1493 | ||
ca577779 PD |
1494 | static void |
1495 | metaslab_rt_vacate(range_tree_t *rt, void *arg) | |
1496 | { | |
1497 | metaslab_rt_arg_t *mrap = arg; | |
1498 | zfs_btree_t *size_tree = mrap->mra_bt; | |
1499 | zfs_btree_clear(size_tree); | |
1500 | zfs_btree_destroy(size_tree); | |
1501 | ||
1502 | metaslab_rt_create(rt, arg); | |
1503 | } | |
1504 | ||
18168da7 | 1505 | static const range_tree_ops_t metaslab_rt_ops = { |
ca577779 PD |
1506 | .rtop_create = metaslab_rt_create, |
1507 | .rtop_destroy = metaslab_rt_destroy, | |
1508 | .rtop_add = metaslab_rt_add, | |
1509 | .rtop_remove = metaslab_rt_remove, | |
1510 | .rtop_vacate = metaslab_rt_vacate | |
1511 | }; | |
1512 | ||
93cf2076 GW |
1513 | /* |
1514 | * ========================================================================== | |
4e21fd06 | 1515 | * Common allocator routines |
93cf2076 GW |
1516 | * ========================================================================== |
1517 | */ | |
1518 | ||
9babb374 | 1519 | /* |
428870ff | 1520 | * Return the maximum contiguous segment within the metaslab. |
9babb374 | 1521 | */ |
9babb374 | 1522 | uint64_t |
c81f1790 | 1523 | metaslab_largest_allocatable(metaslab_t *msp) |
9babb374 | 1524 | { |
ca577779 | 1525 | zfs_btree_t *t = &msp->ms_allocatable_by_size; |
93cf2076 | 1526 | range_seg_t *rs; |
9babb374 | 1527 | |
c81f1790 PD |
1528 | if (t == NULL) |
1529 | return (0); | |
ca577779 PD |
1530 | if (zfs_btree_numnodes(t) == 0) |
1531 | metaslab_size_tree_full_load(msp->ms_allocatable); | |
1532 | ||
1533 | rs = zfs_btree_last(t, NULL); | |
c81f1790 PD |
1534 | if (rs == NULL) |
1535 | return (0); | |
9babb374 | 1536 | |
ca577779 PD |
1537 | return (rs_get_end(rs, msp->ms_allocatable) - rs_get_start(rs, |
1538 | msp->ms_allocatable)); | |
93cf2076 GW |
1539 | } |
1540 | ||
c81f1790 PD |
1541 | /* |
1542 | * Return the maximum contiguous segment within the unflushed frees of this | |
1543 | * metaslab. | |
1544 | */ | |
65c7cc49 | 1545 | static uint64_t |
c81f1790 PD |
1546 | metaslab_largest_unflushed_free(metaslab_t *msp) |
1547 | { | |
1548 | ASSERT(MUTEX_HELD(&msp->ms_lock)); | |
1549 | ||
1550 | if (msp->ms_unflushed_frees == NULL) | |
1551 | return (0); | |
1552 | ||
ca577779 PD |
1553 | if (zfs_btree_numnodes(&msp->ms_unflushed_frees_by_size) == 0) |
1554 | metaslab_size_tree_full_load(msp->ms_unflushed_frees); | |
1555 | range_seg_t *rs = zfs_btree_last(&msp->ms_unflushed_frees_by_size, | |
1556 | NULL); | |
c81f1790 PD |
1557 | if (rs == NULL) |
1558 | return (0); | |
1559 | ||
1560 | /* | |
1561 | * When a range is freed from the metaslab, that range is added to | |
1562 | * both the unflushed frees and the deferred frees. While the block | |
1563 | * will eventually be usable, if the metaslab were loaded the range | |
1564 | * would not be added to the ms_allocatable tree until TXG_DEFER_SIZE | |
1565 | * txgs had passed. As a result, when attempting to estimate an upper | |
1566 | * bound for the largest currently-usable free segment in the | |
1567 | * metaslab, we need to not consider any ranges currently in the defer | |
1568 | * trees. This algorithm approximates the largest available chunk in | |
1569 | * the largest range in the unflushed_frees tree by taking the first | |
1570 | * chunk. While this may be a poor estimate, it should only remain so | |
1571 | * briefly and should eventually self-correct as frees are no longer | |
1572 | * deferred. Similar logic applies to the ms_freed tree. See | |
1573 | * metaslab_load() for more details. | |
1574 | * | |
e1cfd73f | 1575 | * There are two primary sources of inaccuracy in this estimate. Both |
c81f1790 PD |
1576 | * are tolerated for performance reasons. The first source is that we |
1577 | * only check the largest segment for overlaps. Smaller segments may | |
1578 | * have more favorable overlaps with the other trees, resulting in | |
1579 | * larger usable chunks. Second, we only look at the first chunk in | |
1580 | * the largest segment; there may be other usable chunks in the | |
1581 | * largest segment, but we ignore them. | |
1582 | */ | |
ca577779 PD |
1583 | uint64_t rstart = rs_get_start(rs, msp->ms_unflushed_frees); |
1584 | uint64_t rsize = rs_get_end(rs, msp->ms_unflushed_frees) - rstart; | |
c81f1790 PD |
1585 | for (int t = 0; t < TXG_DEFER_SIZE; t++) { |
1586 | uint64_t start = 0; | |
1587 | uint64_t size = 0; | |
1588 | boolean_t found = range_tree_find_in(msp->ms_defer[t], rstart, | |
1589 | rsize, &start, &size); | |
1590 | if (found) { | |
1591 | if (rstart == start) | |
1592 | return (0); | |
1593 | rsize = start - rstart; | |
1594 | } | |
1595 | } | |
1596 | ||
1597 | uint64_t start = 0; | |
1598 | uint64_t size = 0; | |
1599 | boolean_t found = range_tree_find_in(msp->ms_freed, rstart, | |
1600 | rsize, &start, &size); | |
1601 | if (found) | |
1602 | rsize = start - rstart; | |
1603 | ||
1604 | return (rsize); | |
1605 | } | |
1606 | ||
4e21fd06 | 1607 | static range_seg_t * |
ca577779 PD |
1608 | metaslab_block_find(zfs_btree_t *t, range_tree_t *rt, uint64_t start, |
1609 | uint64_t size, zfs_btree_index_t *where) | |
93cf2076 | 1610 | { |
ca577779 PD |
1611 | range_seg_t *rs; |
1612 | range_seg_max_t rsearch; | |
93cf2076 | 1613 | |
ca577779 PD |
1614 | rs_set_start(&rsearch, rt, start); |
1615 | rs_set_end(&rsearch, rt, start + size); | |
93cf2076 | 1616 | |
ca577779 | 1617 | rs = zfs_btree_find(t, &rsearch, where); |
4e21fd06 | 1618 | if (rs == NULL) { |
ca577779 | 1619 | rs = zfs_btree_next(t, where, where); |
93cf2076 | 1620 | } |
93cf2076 | 1621 | |
4e21fd06 DB |
1622 | return (rs); |
1623 | } | |
93cf2076 | 1624 | |
d3230d76 | 1625 | #if defined(WITH_DF_BLOCK_ALLOCATOR) || \ |
93cf2076 | 1626 | defined(WITH_CF_BLOCK_ALLOCATOR) |
b2255edc | 1627 | |
93cf2076 | 1628 | /* |
ca577779 PD |
1629 | * This is a helper function that can be used by the allocator to find a |
1630 | * suitable block to allocate. This will search the specified B-tree looking | |
1631 | * for a block that matches the specified criteria. | |
93cf2076 GW |
1632 | */ |
1633 | static uint64_t | |
ca577779 | 1634 | metaslab_block_picker(range_tree_t *rt, uint64_t *cursor, uint64_t size, |
d3230d76 | 1635 | uint64_t max_search) |
93cf2076 | 1636 | { |
ca577779 PD |
1637 | if (*cursor == 0) |
1638 | *cursor = rt->rt_start; | |
1639 | zfs_btree_t *bt = &rt->rt_root; | |
1640 | zfs_btree_index_t where; | |
1641 | range_seg_t *rs = metaslab_block_find(bt, rt, *cursor, size, &where); | |
d3230d76 | 1642 | uint64_t first_found; |
ca577779 | 1643 | int count_searched = 0; |
93cf2076 | 1644 | |
d3230d76 | 1645 | if (rs != NULL) |
ca577779 | 1646 | first_found = rs_get_start(rs, rt); |
93cf2076 | 1647 | |
ca577779 PD |
1648 | while (rs != NULL && (rs_get_start(rs, rt) - first_found <= |
1649 | max_search || count_searched < metaslab_min_search_count)) { | |
1650 | uint64_t offset = rs_get_start(rs, rt); | |
1651 | if (offset + size <= rs_get_end(rs, rt)) { | |
93cf2076 GW |
1652 | *cursor = offset + size; |
1653 | return (offset); | |
1654 | } | |
ca577779 PD |
1655 | rs = zfs_btree_next(bt, &where, &where); |
1656 | count_searched++; | |
93cf2076 GW |
1657 | } |
1658 | ||
93cf2076 | 1659 | *cursor = 0; |
d3230d76 | 1660 | return (-1ULL); |
9babb374 | 1661 | } |
d3230d76 | 1662 | #endif /* WITH_DF/CF_BLOCK_ALLOCATOR */ |
22c81dd8 BB |
1663 | |
1664 | #if defined(WITH_DF_BLOCK_ALLOCATOR) | |
428870ff BB |
1665 | /* |
1666 | * ========================================================================== | |
d3230d76 MA |
1667 | * Dynamic Fit (df) block allocator |
1668 | * | |
1669 | * Search for a free chunk of at least this size, starting from the last | |
1670 | * offset (for this alignment of block) looking for up to | |
1671 | * metaslab_df_max_search bytes (16MB). If a large enough free chunk is not | |
1672 | * found within 16MB, then return a free chunk of exactly the requested size (or | |
1673 | * larger). | |
1674 | * | |
1675 | * If it seems like searching from the last offset will be unproductive, skip | |
1676 | * that and just return a free chunk of exactly the requested size (or larger). | |
1677 | * This is based on metaslab_df_alloc_threshold and metaslab_df_free_pct. This | |
1678 | * mechanism is probably not very useful and may be removed in the future. | |
1679 | * | |
1680 | * The behavior when not searching can be changed to return the largest free | |
1681 | * chunk, instead of a free chunk of exactly the requested size, by setting | |
1682 | * metaslab_df_use_largest_segment. | |
428870ff BB |
1683 | * ========================================================================== |
1684 | */ | |
9babb374 | 1685 | static uint64_t |
93cf2076 | 1686 | metaslab_df_alloc(metaslab_t *msp, uint64_t size) |
9babb374 | 1687 | { |
93cf2076 GW |
1688 | /* |
1689 | * Find the largest power of 2 block size that evenly divides the | |
1690 | * requested size. This is used to try to allocate blocks with similar | |
1691 | * alignment from the same area of the metaslab (i.e. same cursor | |
1692 | * bucket) but it does not guarantee that other allocations sizes | |
1693 | * may exist in the same region. | |
1694 | */ | |
9babb374 | 1695 | uint64_t align = size & -size; |
9bd274dd | 1696 | uint64_t *cursor = &msp->ms_lbas[highbit64(align) - 1]; |
d2734cce | 1697 | range_tree_t *rt = msp->ms_allocatable; |
fdc2d303 | 1698 | uint_t free_pct = range_tree_space(rt) * 100 / msp->ms_size; |
d3230d76 | 1699 | uint64_t offset; |
9babb374 | 1700 | |
93cf2076 | 1701 | ASSERT(MUTEX_HELD(&msp->ms_lock)); |
9babb374 | 1702 | |
9babb374 | 1703 | /* |
d3230d76 MA |
1704 | * If we're running low on space, find a segment based on size, |
1705 | * rather than iterating based on offset. | |
9babb374 | 1706 | */ |
c81f1790 | 1707 | if (metaslab_largest_allocatable(msp) < metaslab_df_alloc_threshold || |
9babb374 | 1708 | free_pct < metaslab_df_free_pct) { |
d3230d76 MA |
1709 | offset = -1; |
1710 | } else { | |
ca577779 | 1711 | offset = metaslab_block_picker(rt, |
d3230d76 | 1712 | cursor, size, metaslab_df_max_search); |
9babb374 BB |
1713 | } |
1714 | ||
d3230d76 MA |
1715 | if (offset == -1) { |
1716 | range_seg_t *rs; | |
ca577779 PD |
1717 | if (zfs_btree_numnodes(&msp->ms_allocatable_by_size) == 0) |
1718 | metaslab_size_tree_full_load(msp->ms_allocatable); | |
b2255edc | 1719 | |
d3230d76 MA |
1720 | if (metaslab_df_use_largest_segment) { |
1721 | /* use largest free segment */ | |
ca577779 | 1722 | rs = zfs_btree_last(&msp->ms_allocatable_by_size, NULL); |
d3230d76 | 1723 | } else { |
ca577779 | 1724 | zfs_btree_index_t where; |
d3230d76 MA |
1725 | /* use segment of this size, or next largest */ |
1726 | rs = metaslab_block_find(&msp->ms_allocatable_by_size, | |
ca577779 | 1727 | rt, msp->ms_start, size, &where); |
d3230d76 | 1728 | } |
ca577779 PD |
1729 | if (rs != NULL && rs_get_start(rs, rt) + size <= rs_get_end(rs, |
1730 | rt)) { | |
1731 | offset = rs_get_start(rs, rt); | |
d3230d76 MA |
1732 | *cursor = offset + size; |
1733 | } | |
1734 | } | |
1735 | ||
1736 | return (offset); | |
9babb374 BB |
1737 | } |
1738 | ||
18168da7 | 1739 | const metaslab_ops_t zfs_metaslab_ops = { |
f3a7f661 | 1740 | metaslab_df_alloc |
34dc7c2f | 1741 | }; |
22c81dd8 BB |
1742 | #endif /* WITH_DF_BLOCK_ALLOCATOR */ |
1743 | ||
93cf2076 | 1744 | #if defined(WITH_CF_BLOCK_ALLOCATOR) |
428870ff BB |
1745 | /* |
1746 | * ========================================================================== | |
93cf2076 GW |
1747 | * Cursor fit block allocator - |
1748 | * Select the largest region in the metaslab, set the cursor to the beginning | |
1749 | * of the range and the cursor_end to the end of the range. As allocations | |
1750 | * are made advance the cursor. Continue allocating from the cursor until | |
1751 | * the range is exhausted and then find a new range. | |
428870ff BB |
1752 | * ========================================================================== |
1753 | */ | |
1754 | static uint64_t | |
93cf2076 | 1755 | metaslab_cf_alloc(metaslab_t *msp, uint64_t size) |
428870ff | 1756 | { |
d2734cce | 1757 | range_tree_t *rt = msp->ms_allocatable; |
ca577779 | 1758 | zfs_btree_t *t = &msp->ms_allocatable_by_size; |
93cf2076 GW |
1759 | uint64_t *cursor = &msp->ms_lbas[0]; |
1760 | uint64_t *cursor_end = &msp->ms_lbas[1]; | |
428870ff BB |
1761 | uint64_t offset = 0; |
1762 | ||
93cf2076 | 1763 | ASSERT(MUTEX_HELD(&msp->ms_lock)); |
428870ff | 1764 | |
93cf2076 | 1765 | ASSERT3U(*cursor_end, >=, *cursor); |
428870ff | 1766 | |
93cf2076 GW |
1767 | if ((*cursor + size) > *cursor_end) { |
1768 | range_seg_t *rs; | |
428870ff | 1769 | |
ca577779 PD |
1770 | if (zfs_btree_numnodes(t) == 0) |
1771 | metaslab_size_tree_full_load(msp->ms_allocatable); | |
1772 | rs = zfs_btree_last(t, NULL); | |
1773 | if (rs == NULL || (rs_get_end(rs, rt) - rs_get_start(rs, rt)) < | |
1774 | size) | |
93cf2076 | 1775 | return (-1ULL); |
428870ff | 1776 | |
ca577779 PD |
1777 | *cursor = rs_get_start(rs, rt); |
1778 | *cursor_end = rs_get_end(rs, rt); | |
428870ff | 1779 | } |
93cf2076 GW |
1780 | |
1781 | offset = *cursor; | |
1782 | *cursor += size; | |
1783 | ||
428870ff BB |
1784 | return (offset); |
1785 | } | |
1786 | ||
18168da7 | 1787 | const metaslab_ops_t zfs_metaslab_ops = { |
f3a7f661 | 1788 | metaslab_cf_alloc |
428870ff | 1789 | }; |
93cf2076 | 1790 | #endif /* WITH_CF_BLOCK_ALLOCATOR */ |
22c81dd8 BB |
1791 | |
1792 | #if defined(WITH_NDF_BLOCK_ALLOCATOR) | |
93cf2076 GW |
1793 | /* |
1794 | * ========================================================================== | |
1795 | * New dynamic fit allocator - | |
1796 | * Select a region that is large enough to allocate 2^metaslab_ndf_clump_shift | |
1797 | * contiguous blocks. If no region is found then just use the largest segment | |
1798 | * that remains. | |
1799 | * ========================================================================== | |
1800 | */ | |
1801 | ||
1802 | /* | |
1803 | * Determines desired number of contiguous blocks (2^metaslab_ndf_clump_shift) | |
1804 | * to request from the allocator. | |
1805 | */ | |
428870ff BB |
1806 | uint64_t metaslab_ndf_clump_shift = 4; |
1807 | ||
1808 | static uint64_t | |
93cf2076 | 1809 | metaslab_ndf_alloc(metaslab_t *msp, uint64_t size) |
428870ff | 1810 | { |
ca577779 PD |
1811 | zfs_btree_t *t = &msp->ms_allocatable->rt_root; |
1812 | range_tree_t *rt = msp->ms_allocatable; | |
1813 | zfs_btree_index_t where; | |
1814 | range_seg_t *rs; | |
1815 | range_seg_max_t rsearch; | |
9bd274dd | 1816 | uint64_t hbit = highbit64(size); |
93cf2076 | 1817 | uint64_t *cursor = &msp->ms_lbas[hbit - 1]; |
c81f1790 | 1818 | uint64_t max_size = metaslab_largest_allocatable(msp); |
428870ff | 1819 | |
93cf2076 | 1820 | ASSERT(MUTEX_HELD(&msp->ms_lock)); |
428870ff BB |
1821 | |
1822 | if (max_size < size) | |
1823 | return (-1ULL); | |
1824 | ||
ca577779 PD |
1825 | rs_set_start(&rsearch, rt, *cursor); |
1826 | rs_set_end(&rsearch, rt, *cursor + size); | |
428870ff | 1827 | |
ca577779 PD |
1828 | rs = zfs_btree_find(t, &rsearch, &where); |
1829 | if (rs == NULL || (rs_get_end(rs, rt) - rs_get_start(rs, rt)) < size) { | |
d2734cce | 1830 | t = &msp->ms_allocatable_by_size; |
428870ff | 1831 | |
ca577779 PD |
1832 | rs_set_start(&rsearch, rt, 0); |
1833 | rs_set_end(&rsearch, rt, MIN(max_size, 1ULL << (hbit + | |
1834 | metaslab_ndf_clump_shift))); | |
1835 | ||
1836 | rs = zfs_btree_find(t, &rsearch, &where); | |
93cf2076 | 1837 | if (rs == NULL) |
ca577779 | 1838 | rs = zfs_btree_next(t, &where, &where); |
93cf2076 | 1839 | ASSERT(rs != NULL); |
428870ff BB |
1840 | } |
1841 | ||
ca577779 PD |
1842 | if ((rs_get_end(rs, rt) - rs_get_start(rs, rt)) >= size) { |
1843 | *cursor = rs_get_start(rs, rt) + size; | |
1844 | return (rs_get_start(rs, rt)); | |
428870ff BB |
1845 | } |
1846 | return (-1ULL); | |
1847 | } | |
1848 | ||
18168da7 | 1849 | const metaslab_ops_t zfs_metaslab_ops = { |
f3a7f661 | 1850 | metaslab_ndf_alloc |
428870ff | 1851 | }; |
22c81dd8 | 1852 | #endif /* WITH_NDF_BLOCK_ALLOCATOR */ |
9babb374 | 1853 | |
93cf2076 | 1854 | |
34dc7c2f BB |
1855 | /* |
1856 | * ========================================================================== | |
1857 | * Metaslabs | |
1858 | * ========================================================================== | |
1859 | */ | |
93cf2076 | 1860 | |
93e28d66 SD |
1861 | /* |
1862 | * Wait for any in-progress metaslab loads to complete. | |
1863 | */ | |
65c7cc49 | 1864 | static void |
93e28d66 SD |
1865 | metaslab_load_wait(metaslab_t *msp) |
1866 | { | |
1867 | ASSERT(MUTEX_HELD(&msp->ms_lock)); | |
1868 | ||
1869 | while (msp->ms_loading) { | |
1870 | ASSERT(!msp->ms_loaded); | |
1871 | cv_wait(&msp->ms_load_cv, &msp->ms_lock); | |
1872 | } | |
1873 | } | |
1874 | ||
1875 | /* | |
1876 | * Wait for any in-progress flushing to complete. | |
1877 | */ | |
65c7cc49 | 1878 | static void |
93e28d66 SD |
1879 | metaslab_flush_wait(metaslab_t *msp) |
1880 | { | |
1881 | ASSERT(MUTEX_HELD(&msp->ms_lock)); | |
1882 | ||
1883 | while (msp->ms_flushing) | |
1884 | cv_wait(&msp->ms_flush_cv, &msp->ms_lock); | |
1885 | } | |
1886 | ||
f09fda50 PD |
1887 | static unsigned int |
1888 | metaslab_idx_func(multilist_t *ml, void *arg) | |
1889 | { | |
1890 | metaslab_t *msp = arg; | |
5b7053a9 AM |
1891 | |
1892 | /* | |
1893 | * ms_id values are allocated sequentially, so full 64bit | |
1894 | * division would be a waste of time, so limit it to 32 bits. | |
1895 | */ | |
1896 | return ((unsigned int)msp->ms_id % multilist_get_num_sublists(ml)); | |
f09fda50 PD |
1897 | } |
1898 | ||
93e28d66 SD |
1899 | uint64_t |
1900 | metaslab_allocated_space(metaslab_t *msp) | |
1901 | { | |
1902 | return (msp->ms_allocated_space); | |
1903 | } | |
1904 | ||
1905 | /* | |
1906 | * Verify that the space accounting on disk matches the in-core range_trees. | |
1907 | */ | |
1908 | static void | |
1909 | metaslab_verify_space(metaslab_t *msp, uint64_t txg) | |
1910 | { | |
1911 | spa_t *spa = msp->ms_group->mg_vd->vdev_spa; | |
1912 | uint64_t allocating = 0; | |
1913 | uint64_t sm_free_space, msp_free_space; | |
1914 | ||
1915 | ASSERT(MUTEX_HELD(&msp->ms_lock)); | |
1916 | ASSERT(!msp->ms_condensing); | |
1917 | ||
1918 | if ((zfs_flags & ZFS_DEBUG_METASLAB_VERIFY) == 0) | |
1919 | return; | |
1920 | ||
1921 | /* | |
1922 | * We can only verify the metaslab space when we're called | |
1923 | * from syncing context with a loaded metaslab that has an | |
1924 | * allocated space map. Calling this in non-syncing context | |
1925 | * does not provide a consistent view of the metaslab since | |
1926 | * we're performing allocations in the future. | |
1927 | */ | |
1928 | if (txg != spa_syncing_txg(spa) || msp->ms_sm == NULL || | |
1929 | !msp->ms_loaded) | |
1930 | return; | |
1931 | ||
1932 | /* | |
1933 | * Even though the smp_alloc field can get negative, | |
1934 | * when it comes to a metaslab's space map, that should | |
1935 | * never be the case. | |
1936 | */ | |
1937 | ASSERT3S(space_map_allocated(msp->ms_sm), >=, 0); | |
1938 | ||
1939 | ASSERT3U(space_map_allocated(msp->ms_sm), >=, | |
1940 | range_tree_space(msp->ms_unflushed_frees)); | |
1941 | ||
1942 | ASSERT3U(metaslab_allocated_space(msp), ==, | |
1943 | space_map_allocated(msp->ms_sm) + | |
1944 | range_tree_space(msp->ms_unflushed_allocs) - | |
1945 | range_tree_space(msp->ms_unflushed_frees)); | |
1946 | ||
1947 | sm_free_space = msp->ms_size - metaslab_allocated_space(msp); | |
1948 | ||
1949 | /* | |
1950 | * Account for future allocations since we would have | |
1951 | * already deducted that space from the ms_allocatable. | |
1952 | */ | |
1953 | for (int t = 0; t < TXG_CONCURRENT_STATES; t++) { | |
1954 | allocating += | |
1955 | range_tree_space(msp->ms_allocating[(txg + t) & TXG_MASK]); | |
1956 | } | |
f09fda50 PD |
1957 | ASSERT3U(allocating + msp->ms_allocated_this_txg, ==, |
1958 | msp->ms_allocating_total); | |
93e28d66 SD |
1959 | |
1960 | ASSERT3U(msp->ms_deferspace, ==, | |
1961 | range_tree_space(msp->ms_defer[0]) + | |
1962 | range_tree_space(msp->ms_defer[1])); | |
1963 | ||
1964 | msp_free_space = range_tree_space(msp->ms_allocatable) + allocating + | |
1965 | msp->ms_deferspace + range_tree_space(msp->ms_freed); | |
1966 | ||
1967 | VERIFY3U(sm_free_space, ==, msp_free_space); | |
1968 | } | |
1969 | ||
928e8ad4 SD |
1970 | static void |
1971 | metaslab_aux_histograms_clear(metaslab_t *msp) | |
1972 | { | |
1973 | /* | |
1974 | * Auxiliary histograms are only cleared when resetting them, | |
1975 | * which can only happen while the metaslab is loaded. | |
1976 | */ | |
1977 | ASSERT(msp->ms_loaded); | |
1978 | ||
861166b0 | 1979 | memset(msp->ms_synchist, 0, sizeof (msp->ms_synchist)); |
928e8ad4 | 1980 | for (int t = 0; t < TXG_DEFER_SIZE; t++) |
861166b0 | 1981 | memset(msp->ms_deferhist[t], 0, sizeof (msp->ms_deferhist[t])); |
928e8ad4 SD |
1982 | } |
1983 | ||
1984 | static void | |
1985 | metaslab_aux_histogram_add(uint64_t *histogram, uint64_t shift, | |
1986 | range_tree_t *rt) | |
1987 | { | |
1988 | /* | |
1989 | * This is modeled after space_map_histogram_add(), so refer to that | |
1990 | * function for implementation details. We want this to work like | |
1991 | * the space map histogram, and not the range tree histogram, as we | |
1992 | * are essentially constructing a delta that will be later subtracted | |
1993 | * from the space map histogram. | |
1994 | */ | |
1995 | int idx = 0; | |
1996 | for (int i = shift; i < RANGE_TREE_HISTOGRAM_SIZE; i++) { | |
1997 | ASSERT3U(i, >=, idx + shift); | |
1998 | histogram[idx] += rt->rt_histogram[i] << (i - idx - shift); | |
1999 | ||
2000 | if (idx < SPACE_MAP_HISTOGRAM_SIZE - 1) { | |
2001 | ASSERT3U(idx + shift, ==, i); | |
2002 | idx++; | |
2003 | ASSERT3U(idx, <, SPACE_MAP_HISTOGRAM_SIZE); | |
2004 | } | |
2005 | } | |
2006 | } | |
2007 | ||
2008 | /* | |
2009 | * Called at every sync pass that the metaslab gets synced. | |
2010 | * | |
2011 | * The reason is that we want our auxiliary histograms to be updated | |
2012 | * wherever the metaslab's space map histogram is updated. This way | |
2013 | * we stay consistent on which parts of the metaslab space map's | |
2014 | * histogram are currently not available for allocations (e.g because | |
2015 | * they are in the defer, freed, and freeing trees). | |
2016 | */ | |
2017 | static void | |
2018 | metaslab_aux_histograms_update(metaslab_t *msp) | |
2019 | { | |
2020 | space_map_t *sm = msp->ms_sm; | |
2021 | ASSERT(sm != NULL); | |
2022 | ||
2023 | /* | |
2024 | * This is similar to the metaslab's space map histogram updates | |
2025 | * that take place in metaslab_sync(). The only difference is that | |
2026 | * we only care about segments that haven't made it into the | |
2027 | * ms_allocatable tree yet. | |
2028 | */ | |
2029 | if (msp->ms_loaded) { | |
2030 | metaslab_aux_histograms_clear(msp); | |
2031 | ||
2032 | metaslab_aux_histogram_add(msp->ms_synchist, | |
2033 | sm->sm_shift, msp->ms_freed); | |
2034 | ||
2035 | for (int t = 0; t < TXG_DEFER_SIZE; t++) { | |
2036 | metaslab_aux_histogram_add(msp->ms_deferhist[t], | |
2037 | sm->sm_shift, msp->ms_defer[t]); | |
2038 | } | |
2039 | } | |
2040 | ||
2041 | metaslab_aux_histogram_add(msp->ms_synchist, | |
2042 | sm->sm_shift, msp->ms_freeing); | |
2043 | } | |
2044 | ||
2045 | /* | |
2046 | * Called every time we are done syncing (writing to) the metaslab, | |
2047 | * i.e. at the end of each sync pass. | |
2048 | * [see the comment in metaslab_impl.h for ms_synchist, ms_deferhist] | |
2049 | */ | |
2050 | static void | |
2051 | metaslab_aux_histograms_update_done(metaslab_t *msp, boolean_t defer_allowed) | |
2052 | { | |
2053 | spa_t *spa = msp->ms_group->mg_vd->vdev_spa; | |
2054 | space_map_t *sm = msp->ms_sm; | |
2055 | ||
2056 | if (sm == NULL) { | |
2057 | /* | |
2058 | * We came here from metaslab_init() when creating/opening a | |
2059 | * pool, looking at a metaslab that hasn't had any allocations | |
2060 | * yet. | |
2061 | */ | |
2062 | return; | |
2063 | } | |
2064 | ||
2065 | /* | |
2066 | * This is similar to the actions that we take for the ms_freed | |
2067 | * and ms_defer trees in metaslab_sync_done(). | |
2068 | */ | |
2069 | uint64_t hist_index = spa_syncing_txg(spa) % TXG_DEFER_SIZE; | |
2070 | if (defer_allowed) { | |
861166b0 | 2071 | memcpy(msp->ms_deferhist[hist_index], msp->ms_synchist, |
928e8ad4 SD |
2072 | sizeof (msp->ms_synchist)); |
2073 | } else { | |
861166b0 | 2074 | memset(msp->ms_deferhist[hist_index], 0, |
928e8ad4 SD |
2075 | sizeof (msp->ms_deferhist[hist_index])); |
2076 | } | |
861166b0 | 2077 | memset(msp->ms_synchist, 0, sizeof (msp->ms_synchist)); |
928e8ad4 SD |
2078 | } |
2079 | ||
2080 | /* | |
2081 | * Ensure that the metaslab's weight and fragmentation are consistent | |
2082 | * with the contents of the histogram (either the range tree's histogram | |
2083 | * or the space map's depending whether the metaslab is loaded). | |
2084 | */ | |
2085 | static void | |
2086 | metaslab_verify_weight_and_frag(metaslab_t *msp) | |
2087 | { | |
2088 | ASSERT(MUTEX_HELD(&msp->ms_lock)); | |
2089 | ||
2090 | if ((zfs_flags & ZFS_DEBUG_METASLAB_VERIFY) == 0) | |
2091 | return; | |
2092 | ||
2fcf4481 SD |
2093 | /* |
2094 | * We can end up here from vdev_remove_complete(), in which case we | |
2095 | * cannot do these assertions because we hold spa config locks and | |
2096 | * thus we are not allowed to read from the DMU. | |
2097 | * | |
2098 | * We check if the metaslab group has been removed and if that's | |
2099 | * the case we return immediately as that would mean that we are | |
2100 | * here from the aforementioned code path. | |
2101 | */ | |
928e8ad4 SD |
2102 | if (msp->ms_group == NULL) |
2103 | return; | |
2104 | ||
2105 | /* | |
2106 | * Devices being removed always return a weight of 0 and leave | |
2107 | * fragmentation and ms_max_size as is - there is nothing for | |
2108 | * us to verify here. | |
2109 | */ | |
2110 | vdev_t *vd = msp->ms_group->mg_vd; | |
2111 | if (vd->vdev_removing) | |
2112 | return; | |
2113 | ||
2114 | /* | |
2115 | * If the metaslab is dirty it probably means that we've done | |
2116 | * some allocations or frees that have changed our histograms | |
2117 | * and thus the weight. | |
2118 | */ | |
2119 | for (int t = 0; t < TXG_SIZE; t++) { | |
2120 | if (txg_list_member(&vd->vdev_ms_list, msp, t)) | |
2121 | return; | |
2122 | } | |
2123 | ||
2124 | /* | |
2125 | * This verification checks that our in-memory state is consistent | |
2126 | * with what's on disk. If the pool is read-only then there aren't | |
2127 | * any changes and we just have the initially-loaded state. | |
2128 | */ | |
2129 | if (!spa_writeable(msp->ms_group->mg_vd->vdev_spa)) | |
2130 | return; | |
2131 | ||
2132 | /* some extra verification for in-core tree if you can */ | |
2133 | if (msp->ms_loaded) { | |
2134 | range_tree_stat_verify(msp->ms_allocatable); | |
2135 | VERIFY(space_map_histogram_verify(msp->ms_sm, | |
2136 | msp->ms_allocatable)); | |
2137 | } | |
2138 | ||
2139 | uint64_t weight = msp->ms_weight; | |
2140 | uint64_t was_active = msp->ms_weight & METASLAB_ACTIVE_MASK; | |
2141 | boolean_t space_based = WEIGHT_IS_SPACEBASED(msp->ms_weight); | |
2142 | uint64_t frag = msp->ms_fragmentation; | |
2143 | uint64_t max_segsize = msp->ms_max_size; | |
2144 | ||
2145 | msp->ms_weight = 0; | |
2146 | msp->ms_fragmentation = 0; | |
928e8ad4 SD |
2147 | |
2148 | /* | |
65a91b16 SD |
2149 | * This function is used for verification purposes and thus should |
2150 | * not introduce any side-effects/mutations on the system's state. | |
2151 | * | |
2152 | * Regardless of whether metaslab_weight() thinks this metaslab | |
2153 | * should be active or not, we want to ensure that the actual weight | |
2154 | * (and therefore the value of ms_weight) would be the same if it | |
2155 | * was to be recalculated at this point. | |
2156 | * | |
2157 | * In addition we set the nodirty flag so metaslab_weight() does | |
2158 | * not dirty the metaslab for future TXGs (e.g. when trying to | |
2159 | * force condensing to upgrade the metaslab spacemaps). | |
928e8ad4 | 2160 | */ |
65a91b16 | 2161 | msp->ms_weight = metaslab_weight(msp, B_TRUE) | was_active; |
928e8ad4 SD |
2162 | |
2163 | VERIFY3U(max_segsize, ==, msp->ms_max_size); | |
2164 | ||
2165 | /* | |
2166 | * If the weight type changed then there is no point in doing | |
2167 | * verification. Revert fields to their original values. | |
2168 | */ | |
2169 | if ((space_based && !WEIGHT_IS_SPACEBASED(msp->ms_weight)) || | |
2170 | (!space_based && WEIGHT_IS_SPACEBASED(msp->ms_weight))) { | |
2171 | msp->ms_fragmentation = frag; | |
2172 | msp->ms_weight = weight; | |
2173 | return; | |
2174 | } | |
2175 | ||
2176 | VERIFY3U(msp->ms_fragmentation, ==, frag); | |
2177 | VERIFY3U(msp->ms_weight, ==, weight); | |
2178 | } | |
2179 | ||
f09fda50 PD |
2180 | /* |
2181 | * If we're over the zfs_metaslab_mem_limit, select the loaded metaslab from | |
2182 | * this class that was used longest ago, and attempt to unload it. We don't | |
2183 | * want to spend too much time in this loop to prevent performance | |
e1cfd73f | 2184 | * degradation, and we expect that most of the time this operation will |
f09fda50 PD |
2185 | * succeed. Between that and the normal unloading processing during txg sync, |
2186 | * we expect this to keep the metaslab memory usage under control. | |
2187 | */ | |
2188 | static void | |
2189 | metaslab_potentially_evict(metaslab_class_t *mc) | |
2190 | { | |
2191 | #ifdef _KERNEL | |
2192 | uint64_t allmem = arc_all_memory(); | |
65019062 MM |
2193 | uint64_t inuse = spl_kmem_cache_inuse(zfs_btree_leaf_cache); |
2194 | uint64_t size = spl_kmem_cache_entry_size(zfs_btree_leaf_cache); | |
fdc2d303 | 2195 | uint_t tries = 0; |
f09fda50 | 2196 | for (; allmem * zfs_metaslab_mem_limit / 100 < inuse * size && |
ffdf019c | 2197 | tries < multilist_get_num_sublists(&mc->mc_metaslab_txg_list) * 2; |
f09fda50 PD |
2198 | tries++) { |
2199 | unsigned int idx = multilist_get_random_index( | |
ffdf019c | 2200 | &mc->mc_metaslab_txg_list); |
f09fda50 | 2201 | multilist_sublist_t *mls = |
ffdf019c | 2202 | multilist_sublist_lock(&mc->mc_metaslab_txg_list, idx); |
f09fda50 PD |
2203 | metaslab_t *msp = multilist_sublist_head(mls); |
2204 | multilist_sublist_unlock(mls); | |
2205 | while (msp != NULL && allmem * zfs_metaslab_mem_limit / 100 < | |
2206 | inuse * size) { | |
2207 | VERIFY3P(mls, ==, multilist_sublist_lock( | |
ffdf019c | 2208 | &mc->mc_metaslab_txg_list, idx)); |
f09fda50 | 2209 | ASSERT3U(idx, ==, |
ffdf019c | 2210 | metaslab_idx_func(&mc->mc_metaslab_txg_list, msp)); |
f09fda50 PD |
2211 | |
2212 | if (!multilist_link_active(&msp->ms_class_txg_node)) { | |
2213 | multilist_sublist_unlock(mls); | |
2214 | break; | |
2215 | } | |
2216 | metaslab_t *next_msp = multilist_sublist_next(mls, msp); | |
2217 | multilist_sublist_unlock(mls); | |
2218 | /* | |
2219 | * If the metaslab is currently loading there are two | |
2220 | * cases. If it's the metaslab we're evicting, we | |
2221 | * can't continue on or we'll panic when we attempt to | |
2222 | * recursively lock the mutex. If it's another | |
2223 | * metaslab that's loading, it can be safely skipped, | |
2224 | * since we know it's very new and therefore not a | |
2225 | * good eviction candidate. We check later once the | |
2226 | * lock is held that the metaslab is fully loaded | |
2227 | * before actually unloading it. | |
2228 | */ | |
2229 | if (msp->ms_loading) { | |
2230 | msp = next_msp; | |
65019062 MM |
2231 | inuse = |
2232 | spl_kmem_cache_inuse(zfs_btree_leaf_cache); | |
f09fda50 PD |
2233 | continue; |
2234 | } | |
2235 | /* | |
2236 | * We can't unload metaslabs with no spacemap because | |
2237 | * they're not ready to be unloaded yet. We can't | |
2238 | * unload metaslabs with outstanding allocations | |
2239 | * because doing so could cause the metaslab's weight | |
2240 | * to decrease while it's unloaded, which violates an | |
2241 | * invariant that we use to prevent unnecessary | |
2242 | * loading. We also don't unload metaslabs that are | |
2243 | * currently active because they are high-weight | |
2244 | * metaslabs that are likely to be used in the near | |
2245 | * future. | |
2246 | */ | |
2247 | mutex_enter(&msp->ms_lock); | |
2248 | if (msp->ms_allocator == -1 && msp->ms_sm != NULL && | |
2249 | msp->ms_allocating_total == 0) { | |
2250 | metaslab_unload(msp); | |
2251 | } | |
2252 | mutex_exit(&msp->ms_lock); | |
2253 | msp = next_msp; | |
65019062 | 2254 | inuse = spl_kmem_cache_inuse(zfs_btree_leaf_cache); |
f09fda50 PD |
2255 | } |
2256 | } | |
14e4e3cb | 2257 | #else |
18168da7 | 2258 | (void) mc, (void) zfs_metaslab_mem_limit; |
f09fda50 PD |
2259 | #endif |
2260 | } | |
2261 | ||
b194fab0 SD |
2262 | static int |
2263 | metaslab_load_impl(metaslab_t *msp) | |
93cf2076 GW |
2264 | { |
2265 | int error = 0; | |
93cf2076 GW |
2266 | |
2267 | ASSERT(MUTEX_HELD(&msp->ms_lock)); | |
b194fab0 | 2268 | ASSERT(msp->ms_loading); |
425d3237 | 2269 | ASSERT(!msp->ms_condensing); |
93cf2076 | 2270 | |
a1d477c2 | 2271 | /* |
425d3237 SD |
2272 | * We temporarily drop the lock to unblock other operations while we |
2273 | * are reading the space map. Therefore, metaslab_sync() and | |
2274 | * metaslab_sync_done() can run at the same time as we do. | |
2275 | * | |
93e28d66 SD |
2276 | * If we are using the log space maps, metaslab_sync() can't write to |
2277 | * the metaslab's space map while we are loading as we only write to | |
2278 | * it when we are flushing the metaslab, and that can't happen while | |
2279 | * we are loading it. | |
2280 | * | |
2281 | * If we are not using log space maps though, metaslab_sync() can | |
2282 | * append to the space map while we are loading. Therefore we load | |
2283 | * only entries that existed when we started the load. Additionally, | |
2284 | * metaslab_sync_done() has to wait for the load to complete because | |
2285 | * there are potential races like metaslab_load() loading parts of the | |
2286 | * space map that are currently being appended by metaslab_sync(). If | |
2287 | * we didn't, the ms_allocatable would have entries that | |
2288 | * metaslab_sync_done() would try to re-add later. | |
425d3237 SD |
2289 | * |
2290 | * That's why before dropping the lock we remember the synced length | |
2291 | * of the metaslab and read up to that point of the space map, | |
2292 | * ignoring entries appended by metaslab_sync() that happen after we | |
2293 | * drop the lock. | |
a1d477c2 | 2294 | */ |
425d3237 | 2295 | uint64_t length = msp->ms_synced_length; |
a1d477c2 | 2296 | mutex_exit(&msp->ms_lock); |
93cf2076 | 2297 | |
93e28d66 | 2298 | hrtime_t load_start = gethrtime(); |
ca577779 PD |
2299 | metaslab_rt_arg_t *mrap; |
2300 | if (msp->ms_allocatable->rt_arg == NULL) { | |
2301 | mrap = kmem_zalloc(sizeof (*mrap), KM_SLEEP); | |
2302 | } else { | |
2303 | mrap = msp->ms_allocatable->rt_arg; | |
2304 | msp->ms_allocatable->rt_ops = NULL; | |
2305 | msp->ms_allocatable->rt_arg = NULL; | |
2306 | } | |
2307 | mrap->mra_bt = &msp->ms_allocatable_by_size; | |
2308 | mrap->mra_floor_shift = metaslab_by_size_min_shift; | |
2309 | ||
d2734cce | 2310 | if (msp->ms_sm != NULL) { |
425d3237 SD |
2311 | error = space_map_load_length(msp->ms_sm, msp->ms_allocatable, |
2312 | SM_FREE, length); | |
ca577779 PD |
2313 | |
2314 | /* Now, populate the size-sorted tree. */ | |
2315 | metaslab_rt_create(msp->ms_allocatable, mrap); | |
2316 | msp->ms_allocatable->rt_ops = &metaslab_rt_ops; | |
2317 | msp->ms_allocatable->rt_arg = mrap; | |
2318 | ||
2319 | struct mssa_arg arg = {0}; | |
2320 | arg.rt = msp->ms_allocatable; | |
2321 | arg.mra = mrap; | |
2322 | range_tree_walk(msp->ms_allocatable, metaslab_size_sorted_add, | |
2323 | &arg); | |
d2734cce | 2324 | } else { |
ca577779 PD |
2325 | /* |
2326 | * Add the size-sorted tree first, since we don't need to load | |
2327 | * the metaslab from the spacemap. | |
2328 | */ | |
2329 | metaslab_rt_create(msp->ms_allocatable, mrap); | |
2330 | msp->ms_allocatable->rt_ops = &metaslab_rt_ops; | |
2331 | msp->ms_allocatable->rt_arg = mrap; | |
425d3237 SD |
2332 | /* |
2333 | * The space map has not been allocated yet, so treat | |
2334 | * all the space in the metaslab as free and add it to the | |
2335 | * ms_allocatable tree. | |
2336 | */ | |
d2734cce SD |
2337 | range_tree_add(msp->ms_allocatable, |
2338 | msp->ms_start, msp->ms_size); | |
93e28d66 | 2339 | |
793c958f | 2340 | if (msp->ms_new) { |
93e28d66 SD |
2341 | /* |
2342 | * If the ms_sm doesn't exist, this means that this | |
2343 | * metaslab hasn't gone through metaslab_sync() and | |
2344 | * thus has never been dirtied. So we shouldn't | |
2345 | * expect any unflushed allocs or frees from previous | |
2346 | * TXGs. | |
93e28d66 SD |
2347 | */ |
2348 | ASSERT(range_tree_is_empty(msp->ms_unflushed_allocs)); | |
2349 | ASSERT(range_tree_is_empty(msp->ms_unflushed_frees)); | |
2350 | } | |
d2734cce | 2351 | } |
93cf2076 | 2352 | |
425d3237 SD |
2353 | /* |
2354 | * We need to grab the ms_sync_lock to prevent metaslab_sync() from | |
93e28d66 SD |
2355 | * changing the ms_sm (or log_sm) and the metaslab's range trees |
2356 | * while we are about to use them and populate the ms_allocatable. | |
2357 | * The ms_lock is insufficient for this because metaslab_sync() doesn't | |
2358 | * hold the ms_lock while writing the ms_checkpointing tree to disk. | |
425d3237 SD |
2359 | */ |
2360 | mutex_enter(&msp->ms_sync_lock); | |
a1d477c2 | 2361 | mutex_enter(&msp->ms_lock); |
93e28d66 | 2362 | |
425d3237 | 2363 | ASSERT(!msp->ms_condensing); |
93e28d66 | 2364 | ASSERT(!msp->ms_flushing); |
93cf2076 | 2365 | |
8eef9976 SD |
2366 | if (error != 0) { |
2367 | mutex_exit(&msp->ms_sync_lock); | |
b194fab0 | 2368 | return (error); |
8eef9976 | 2369 | } |
4e21fd06 | 2370 | |
b194fab0 SD |
2371 | ASSERT3P(msp->ms_group, !=, NULL); |
2372 | msp->ms_loaded = B_TRUE; | |
2373 | ||
2374 | /* | |
93e28d66 SD |
2375 | * Apply all the unflushed changes to ms_allocatable right |
2376 | * away so any manipulations we do below have a clear view | |
2377 | * of what is allocated and what is free. | |
2378 | */ | |
2379 | range_tree_walk(msp->ms_unflushed_allocs, | |
2380 | range_tree_remove, msp->ms_allocatable); | |
2381 | range_tree_walk(msp->ms_unflushed_frees, | |
2382 | range_tree_add, msp->ms_allocatable); | |
2383 | ||
93e28d66 SD |
2384 | ASSERT3P(msp->ms_group, !=, NULL); |
2385 | spa_t *spa = msp->ms_group->mg_vd->vdev_spa; | |
2386 | if (spa_syncing_log_sm(spa) != NULL) { | |
2387 | ASSERT(spa_feature_is_enabled(spa, | |
2388 | SPA_FEATURE_LOG_SPACEMAP)); | |
2389 | ||
2390 | /* | |
2391 | * If we use a log space map we add all the segments | |
2392 | * that are in ms_unflushed_frees so they are available | |
2393 | * for allocation. | |
2394 | * | |
2395 | * ms_allocatable needs to contain all free segments | |
2396 | * that are ready for allocations (thus not segments | |
2397 | * from ms_freeing, ms_freed, and the ms_defer trees). | |
2398 | * But if we grab the lock in this code path at a sync | |
2399 | * pass later that 1, then it also contains the | |
2400 | * segments of ms_freed (they were added to it earlier | |
2401 | * in this path through ms_unflushed_frees). So we | |
2402 | * need to remove all the segments that exist in | |
2403 | * ms_freed from ms_allocatable as they will be added | |
2404 | * later in metaslab_sync_done(). | |
2405 | * | |
2406 | * When there's no log space map, the ms_allocatable | |
2407 | * correctly doesn't contain any segments that exist | |
2408 | * in ms_freed [see ms_synced_length]. | |
2409 | */ | |
2410 | range_tree_walk(msp->ms_freed, | |
2411 | range_tree_remove, msp->ms_allocatable); | |
2412 | } | |
2413 | ||
2414 | /* | |
2415 | * If we are not using the log space map, ms_allocatable | |
2416 | * contains the segments that exist in the ms_defer trees | |
2417 | * [see ms_synced_length]. Thus we need to remove them | |
2418 | * from ms_allocatable as they will be added again in | |
425d3237 | 2419 | * metaslab_sync_done(). |
93e28d66 SD |
2420 | * |
2421 | * If we are using the log space map, ms_allocatable still | |
2422 | * contains the segments that exist in the ms_defer trees. | |
2423 | * Not because it read them through the ms_sm though. But | |
2424 | * because these segments are part of ms_unflushed_frees | |
2425 | * whose segments we add to ms_allocatable earlier in this | |
2426 | * code path. | |
b194fab0 | 2427 | */ |
425d3237 SD |
2428 | for (int t = 0; t < TXG_DEFER_SIZE; t++) { |
2429 | range_tree_walk(msp->ms_defer[t], | |
2430 | range_tree_remove, msp->ms_allocatable); | |
93cf2076 | 2431 | } |
425d3237 | 2432 | |
928e8ad4 SD |
2433 | /* |
2434 | * Call metaslab_recalculate_weight_and_sort() now that the | |
2435 | * metaslab is loaded so we get the metaslab's real weight. | |
2436 | * | |
2437 | * Unless this metaslab was created with older software and | |
2438 | * has not yet been converted to use segment-based weight, we | |
2439 | * expect the new weight to be better or equal to the weight | |
2440 | * that the metaslab had while it was not loaded. This is | |
2441 | * because the old weight does not take into account the | |
2442 | * consolidation of adjacent segments between TXGs. [see | |
2443 | * comment for ms_synchist and ms_deferhist[] for more info] | |
2444 | */ | |
2445 | uint64_t weight = msp->ms_weight; | |
c81f1790 | 2446 | uint64_t max_size = msp->ms_max_size; |
928e8ad4 SD |
2447 | metaslab_recalculate_weight_and_sort(msp); |
2448 | if (!WEIGHT_IS_SPACEBASED(weight)) | |
2449 | ASSERT3U(weight, <=, msp->ms_weight); | |
c81f1790 PD |
2450 | msp->ms_max_size = metaslab_largest_allocatable(msp); |
2451 | ASSERT3U(max_size, <=, msp->ms_max_size); | |
93e28d66 | 2452 | hrtime_t load_end = gethrtime(); |
d64c6a2e MA |
2453 | msp->ms_load_time = load_end; |
2454 | zfs_dbgmsg("metaslab_load: txg %llu, spa %s, vdev_id %llu, " | |
2455 | "ms_id %llu, smp_length %llu, " | |
2456 | "unflushed_allocs %llu, unflushed_frees %llu, " | |
2457 | "freed %llu, defer %llu + %llu, unloaded time %llu ms, " | |
2458 | "loading_time %lld ms, ms_max_size %llu, " | |
2459 | "max size error %lld, " | |
2460 | "old_weight %llx, new_weight %llx", | |
8e739b2c RE |
2461 | (u_longlong_t)spa_syncing_txg(spa), spa_name(spa), |
2462 | (u_longlong_t)msp->ms_group->mg_vd->vdev_id, | |
2463 | (u_longlong_t)msp->ms_id, | |
2464 | (u_longlong_t)space_map_length(msp->ms_sm), | |
2465 | (u_longlong_t)range_tree_space(msp->ms_unflushed_allocs), | |
2466 | (u_longlong_t)range_tree_space(msp->ms_unflushed_frees), | |
2467 | (u_longlong_t)range_tree_space(msp->ms_freed), | |
2468 | (u_longlong_t)range_tree_space(msp->ms_defer[0]), | |
2469 | (u_longlong_t)range_tree_space(msp->ms_defer[1]), | |
d64c6a2e MA |
2470 | (longlong_t)((load_start - msp->ms_unload_time) / 1000000), |
2471 | (longlong_t)((load_end - load_start) / 1000000), | |
8e739b2c RE |
2472 | (u_longlong_t)msp->ms_max_size, |
2473 | (u_longlong_t)msp->ms_max_size - max_size, | |
2474 | (u_longlong_t)weight, (u_longlong_t)msp->ms_weight); | |
93e28d66 | 2475 | |
425d3237 SD |
2476 | metaslab_verify_space(msp, spa_syncing_txg(spa)); |
2477 | mutex_exit(&msp->ms_sync_lock); | |
b194fab0 SD |
2478 | return (0); |
2479 | } | |
2480 | ||
2481 | int | |
2482 | metaslab_load(metaslab_t *msp) | |
2483 | { | |
2484 | ASSERT(MUTEX_HELD(&msp->ms_lock)); | |
2485 | ||
2486 | /* | |
2487 | * There may be another thread loading the same metaslab, if that's | |
2488 | * the case just wait until the other thread is done and return. | |
2489 | */ | |
2490 | metaslab_load_wait(msp); | |
2491 | if (msp->ms_loaded) | |
2492 | return (0); | |
2493 | VERIFY(!msp->ms_loading); | |
425d3237 | 2494 | ASSERT(!msp->ms_condensing); |
b194fab0 | 2495 | |
93e28d66 SD |
2496 | /* |
2497 | * We set the loading flag BEFORE potentially dropping the lock to | |
2498 | * wait for an ongoing flush (see ms_flushing below). This way other | |
2499 | * threads know that there is already a thread that is loading this | |
2500 | * metaslab. | |
2501 | */ | |
b194fab0 | 2502 | msp->ms_loading = B_TRUE; |
93e28d66 SD |
2503 | |
2504 | /* | |
2505 | * Wait for any in-progress flushing to finish as we drop the ms_lock | |
2506 | * both here (during space_map_load()) and in metaslab_flush() (when | |
2507 | * we flush our changes to the ms_sm). | |
2508 | */ | |
2509 | if (msp->ms_flushing) | |
2510 | metaslab_flush_wait(msp); | |
2511 | ||
2512 | /* | |
2513 | * In the possibility that we were waiting for the metaslab to be | |
2514 | * flushed (where we temporarily dropped the ms_lock), ensure that | |
2515 | * no one else loaded the metaslab somehow. | |
2516 | */ | |
2517 | ASSERT(!msp->ms_loaded); | |
2518 | ||
f09fda50 PD |
2519 | /* |
2520 | * If we're loading a metaslab in the normal class, consider evicting | |
2521 | * another one to keep our memory usage under the limit defined by the | |
2522 | * zfs_metaslab_mem_limit tunable. | |
2523 | */ | |
2524 | if (spa_normal_class(msp->ms_group->mg_class->mc_spa) == | |
2525 | msp->ms_group->mg_class) { | |
2526 | metaslab_potentially_evict(msp->ms_group->mg_class); | |
2527 | } | |
2528 | ||
b194fab0 | 2529 | int error = metaslab_load_impl(msp); |
93e28d66 SD |
2530 | |
2531 | ASSERT(MUTEX_HELD(&msp->ms_lock)); | |
b194fab0 | 2532 | msp->ms_loading = B_FALSE; |
93cf2076 | 2533 | cv_broadcast(&msp->ms_load_cv); |
b194fab0 | 2534 | |
93cf2076 GW |
2535 | return (error); |
2536 | } | |
2537 | ||
2538 | void | |
2539 | metaslab_unload(metaslab_t *msp) | |
2540 | { | |
2541 | ASSERT(MUTEX_HELD(&msp->ms_lock)); | |
928e8ad4 | 2542 | |
f09fda50 PD |
2543 | /* |
2544 | * This can happen if a metaslab is selected for eviction (in | |
2545 | * metaslab_potentially_evict) and then unloaded during spa_sync (via | |
2546 | * metaslab_class_evict_old). | |
2547 | */ | |
2548 | if (!msp->ms_loaded) | |
2549 | return; | |
928e8ad4 | 2550 | |
d2734cce | 2551 | range_tree_vacate(msp->ms_allocatable, NULL, NULL); |
93cf2076 | 2552 | msp->ms_loaded = B_FALSE; |
c81f1790 | 2553 | msp->ms_unload_time = gethrtime(); |
928e8ad4 | 2554 | |
679b0f2a | 2555 | msp->ms_activation_weight = 0; |
93cf2076 | 2556 | msp->ms_weight &= ~METASLAB_ACTIVE_MASK; |
928e8ad4 | 2557 | |
f09fda50 PD |
2558 | if (msp->ms_group != NULL) { |
2559 | metaslab_class_t *mc = msp->ms_group->mg_class; | |
2560 | multilist_sublist_t *mls = | |
ffdf019c | 2561 | multilist_sublist_lock_obj(&mc->mc_metaslab_txg_list, msp); |
f09fda50 PD |
2562 | if (multilist_link_active(&msp->ms_class_txg_node)) |
2563 | multilist_sublist_remove(mls, msp); | |
2564 | multilist_sublist_unlock(mls); | |
d64c6a2e MA |
2565 | |
2566 | spa_t *spa = msp->ms_group->mg_vd->vdev_spa; | |
2567 | zfs_dbgmsg("metaslab_unload: txg %llu, spa %s, vdev_id %llu, " | |
2568 | "ms_id %llu, weight %llx, " | |
2569 | "selected txg %llu (%llu ms ago), alloc_txg %llu, " | |
2570 | "loaded %llu ms ago, max_size %llu", | |
8e739b2c RE |
2571 | (u_longlong_t)spa_syncing_txg(spa), spa_name(spa), |
2572 | (u_longlong_t)msp->ms_group->mg_vd->vdev_id, | |
2573 | (u_longlong_t)msp->ms_id, | |
2574 | (u_longlong_t)msp->ms_weight, | |
2575 | (u_longlong_t)msp->ms_selected_txg, | |
2576 | (u_longlong_t)(msp->ms_unload_time - | |
2577 | msp->ms_selected_time) / 1000 / 1000, | |
2578 | (u_longlong_t)msp->ms_alloc_txg, | |
2579 | (u_longlong_t)(msp->ms_unload_time - | |
2580 | msp->ms_load_time) / 1000 / 1000, | |
2581 | (u_longlong_t)msp->ms_max_size); | |
f09fda50 PD |
2582 | } |
2583 | ||
928e8ad4 SD |
2584 | /* |
2585 | * We explicitly recalculate the metaslab's weight based on its space | |
2586 | * map (as it is now not loaded). We want unload metaslabs to always | |
2587 | * have their weights calculated from the space map histograms, while | |
2588 | * loaded ones have it calculated from their in-core range tree | |
2589 | * [see metaslab_load()]. This way, the weight reflects the information | |
93e28d66 | 2590 | * available in-core, whether it is loaded or not. |
928e8ad4 SD |
2591 | * |
2592 | * If ms_group == NULL means that we came here from metaslab_fini(), | |
2593 | * at which point it doesn't make sense for us to do the recalculation | |
2594 | * and the sorting. | |
2595 | */ | |
2596 | if (msp->ms_group != NULL) | |
2597 | metaslab_recalculate_weight_and_sort(msp); | |
93cf2076 GW |
2598 | } |
2599 | ||
ca577779 PD |
2600 | /* |
2601 | * We want to optimize the memory use of the per-metaslab range | |
2602 | * trees. To do this, we store the segments in the range trees in | |
2603 | * units of sectors, zero-indexing from the start of the metaslab. If | |
2604 | * the vdev_ms_shift - the vdev_ashift is less than 32, we can store | |
2605 | * the ranges using two uint32_ts, rather than two uint64_ts. | |
2606 | */ | |
6774931d | 2607 | range_seg_type_t |
ca577779 PD |
2608 | metaslab_calculate_range_tree_type(vdev_t *vdev, metaslab_t *msp, |
2609 | uint64_t *start, uint64_t *shift) | |
2610 | { | |
2611 | if (vdev->vdev_ms_shift - vdev->vdev_ashift < 32 && | |
2612 | !zfs_metaslab_force_large_segs) { | |
2613 | *shift = vdev->vdev_ashift; | |
2614 | *start = msp->ms_start; | |
2615 | return (RANGE_SEG32); | |
2616 | } else { | |
2617 | *shift = 0; | |
2618 | *start = 0; | |
2619 | return (RANGE_SEG64); | |
2620 | } | |
2621 | } | |
2622 | ||
f09fda50 PD |
2623 | void |
2624 | metaslab_set_selected_txg(metaslab_t *msp, uint64_t txg) | |
2625 | { | |
2626 | ASSERT(MUTEX_HELD(&msp->ms_lock)); | |
2627 | metaslab_class_t *mc = msp->ms_group->mg_class; | |
2628 | multilist_sublist_t *mls = | |
ffdf019c | 2629 | multilist_sublist_lock_obj(&mc->mc_metaslab_txg_list, msp); |
f09fda50 PD |
2630 | if (multilist_link_active(&msp->ms_class_txg_node)) |
2631 | multilist_sublist_remove(mls, msp); | |
2632 | msp->ms_selected_txg = txg; | |
eef0f4d8 | 2633 | msp->ms_selected_time = gethrtime(); |
f09fda50 PD |
2634 | multilist_sublist_insert_tail(mls, msp); |
2635 | multilist_sublist_unlock(mls); | |
2636 | } | |
2637 | ||
93e28d66 | 2638 | void |
cc99f275 DB |
2639 | metaslab_space_update(vdev_t *vd, metaslab_class_t *mc, int64_t alloc_delta, |
2640 | int64_t defer_delta, int64_t space_delta) | |
2641 | { | |
2642 | vdev_space_update(vd, alloc_delta, defer_delta, space_delta); | |
2643 | ||
2644 | ASSERT3P(vd->vdev_spa->spa_root_vdev, ==, vd->vdev_parent); | |
2645 | ASSERT(vd->vdev_ms_count != 0); | |
2646 | ||
2647 | metaslab_class_space_update(mc, alloc_delta, defer_delta, space_delta, | |
2648 | vdev_deflated_space(vd, space_delta)); | |
2649 | } | |
2650 | ||
fb42a493 | 2651 | int |
93e28d66 SD |
2652 | metaslab_init(metaslab_group_t *mg, uint64_t id, uint64_t object, |
2653 | uint64_t txg, metaslab_t **msp) | |
34dc7c2f BB |
2654 | { |
2655 | vdev_t *vd = mg->mg_vd; | |
cc99f275 DB |
2656 | spa_t *spa = vd->vdev_spa; |
2657 | objset_t *mos = spa->spa_meta_objset; | |
fb42a493 PS |
2658 | metaslab_t *ms; |
2659 | int error; | |
34dc7c2f | 2660 | |
79c76d5b | 2661 | ms = kmem_zalloc(sizeof (metaslab_t), KM_SLEEP); |
fb42a493 | 2662 | mutex_init(&ms->ms_lock, NULL, MUTEX_DEFAULT, NULL); |
a1d477c2 | 2663 | mutex_init(&ms->ms_sync_lock, NULL, MUTEX_DEFAULT, NULL); |
fb42a493 | 2664 | cv_init(&ms->ms_load_cv, NULL, CV_DEFAULT, NULL); |
93e28d66 | 2665 | cv_init(&ms->ms_flush_cv, NULL, CV_DEFAULT, NULL); |
f09fda50 | 2666 | multilist_link_init(&ms->ms_class_txg_node); |
619f0976 | 2667 | |
fb42a493 PS |
2668 | ms->ms_id = id; |
2669 | ms->ms_start = id << vd->vdev_ms_shift; | |
2670 | ms->ms_size = 1ULL << vd->vdev_ms_shift; | |
492f64e9 PD |
2671 | ms->ms_allocator = -1; |
2672 | ms->ms_new = B_TRUE; | |
34dc7c2f | 2673 | |
b2255edc BB |
2674 | vdev_ops_t *ops = vd->vdev_ops; |
2675 | if (ops->vdev_op_metaslab_init != NULL) | |
2676 | ops->vdev_op_metaslab_init(vd, &ms->ms_start, &ms->ms_size); | |
2677 | ||
93cf2076 GW |
2678 | /* |
2679 | * We only open space map objects that already exist. All others | |
e39fe05b FU |
2680 | * will be opened when we finally allocate an object for it. For |
2681 | * readonly pools there is no need to open the space map object. | |
425d3237 SD |
2682 | * |
2683 | * Note: | |
2684 | * When called from vdev_expand(), we can't call into the DMU as | |
2685 | * we are holding the spa_config_lock as a writer and we would | |
2686 | * deadlock [see relevant comment in vdev_metaslab_init()]. in | |
2687 | * that case, the object parameter is zero though, so we won't | |
2688 | * call into the DMU. | |
93cf2076 | 2689 | */ |
e39fe05b FU |
2690 | if (object != 0 && !(spa->spa_mode == SPA_MODE_READ && |
2691 | !spa->spa_read_spacemaps)) { | |
fb42a493 | 2692 | error = space_map_open(&ms->ms_sm, mos, object, ms->ms_start, |
a1d477c2 | 2693 | ms->ms_size, vd->vdev_ashift); |
fb42a493 PS |
2694 | |
2695 | if (error != 0) { | |
2696 | kmem_free(ms, sizeof (metaslab_t)); | |
2697 | return (error); | |
2698 | } | |
2699 | ||
2700 | ASSERT(ms->ms_sm != NULL); | |
425d3237 | 2701 | ms->ms_allocated_space = space_map_allocated(ms->ms_sm); |
93cf2076 | 2702 | } |
34dc7c2f | 2703 | |
ca577779 | 2704 | uint64_t shift, start; |
793c958f SD |
2705 | range_seg_type_t type = |
2706 | metaslab_calculate_range_tree_type(vd, ms, &start, &shift); | |
ca577779 | 2707 | |
ca577779 | 2708 | ms->ms_allocatable = range_tree_create(NULL, type, NULL, start, shift); |
793c958f SD |
2709 | for (int t = 0; t < TXG_SIZE; t++) { |
2710 | ms->ms_allocating[t] = range_tree_create(NULL, type, | |
2711 | NULL, start, shift); | |
2712 | } | |
2713 | ms->ms_freeing = range_tree_create(NULL, type, NULL, start, shift); | |
2714 | ms->ms_freed = range_tree_create(NULL, type, NULL, start, shift); | |
2715 | for (int t = 0; t < TXG_DEFER_SIZE; t++) { | |
2716 | ms->ms_defer[t] = range_tree_create(NULL, type, NULL, | |
2717 | start, shift); | |
2718 | } | |
2719 | ms->ms_checkpointing = | |
2720 | range_tree_create(NULL, type, NULL, start, shift); | |
2721 | ms->ms_unflushed_allocs = | |
2722 | range_tree_create(NULL, type, NULL, start, shift); | |
2723 | ||
2724 | metaslab_rt_arg_t *mrap = kmem_zalloc(sizeof (*mrap), KM_SLEEP); | |
2725 | mrap->mra_bt = &ms->ms_unflushed_frees_by_size; | |
2726 | mrap->mra_floor_shift = metaslab_by_size_min_shift; | |
2727 | ms->ms_unflushed_frees = range_tree_create(&metaslab_rt_ops, | |
2728 | type, mrap, start, shift); | |
34dc7c2f | 2729 | |
ca577779 | 2730 | ms->ms_trim = range_tree_create(NULL, type, NULL, start, shift); |
1b939560 BB |
2731 | |
2732 | metaslab_group_add(mg, ms); | |
65a91b16 | 2733 | metaslab_set_fragmentation(ms, B_FALSE); |
428870ff | 2734 | |
34dc7c2f BB |
2735 | /* |
2736 | * If we're opening an existing pool (txg == 0) or creating | |
2737 | * a new one (txg == TXG_INITIAL), all space is available now. | |
2738 | * If we're adding space to an existing pool, the new space | |
2739 | * does not become available until after this txg has synced. | |
4e21fd06 DB |
2740 | * The metaslab's weight will also be initialized when we sync |
2741 | * out this txg. This ensures that we don't attempt to allocate | |
2742 | * from it before we have initialized it completely. | |
34dc7c2f | 2743 | */ |
425d3237 | 2744 | if (txg <= TXG_INITIAL) { |
fb42a493 | 2745 | metaslab_sync_done(ms, 0); |
425d3237 SD |
2746 | metaslab_space_update(vd, mg->mg_class, |
2747 | metaslab_allocated_space(ms), 0, 0); | |
2748 | } | |
34dc7c2f BB |
2749 | |
2750 | if (txg != 0) { | |
34dc7c2f | 2751 | vdev_dirty(vd, 0, NULL, txg); |
fb42a493 | 2752 | vdev_dirty(vd, VDD_METASLAB, ms, txg); |
34dc7c2f BB |
2753 | } |
2754 | ||
fb42a493 PS |
2755 | *msp = ms; |
2756 | ||
2757 | return (0); | |
34dc7c2f BB |
2758 | } |
2759 | ||
93e28d66 SD |
2760 | static void |
2761 | metaslab_fini_flush_data(metaslab_t *msp) | |
2762 | { | |
2763 | spa_t *spa = msp->ms_group->mg_vd->vdev_spa; | |
2764 | ||
2765 | if (metaslab_unflushed_txg(msp) == 0) { | |
2766 | ASSERT3P(avl_find(&spa->spa_metaslabs_by_flushed, msp, NULL), | |
2767 | ==, NULL); | |
2768 | return; | |
2769 | } | |
2770 | ASSERT(spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP)); | |
2771 | ||
2772 | mutex_enter(&spa->spa_flushed_ms_lock); | |
2773 | avl_remove(&spa->spa_metaslabs_by_flushed, msp); | |
2774 | mutex_exit(&spa->spa_flushed_ms_lock); | |
2775 | ||
2776 | spa_log_sm_decrement_mscount(spa, metaslab_unflushed_txg(msp)); | |
600a02b8 AM |
2777 | spa_log_summary_decrement_mscount(spa, metaslab_unflushed_txg(msp), |
2778 | metaslab_unflushed_dirty(msp)); | |
93e28d66 SD |
2779 | } |
2780 | ||
2781 | uint64_t | |
2782 | metaslab_unflushed_changes_memused(metaslab_t *ms) | |
2783 | { | |
2784 | return ((range_tree_numsegs(ms->ms_unflushed_allocs) + | |
2785 | range_tree_numsegs(ms->ms_unflushed_frees)) * | |
ca577779 | 2786 | ms->ms_unflushed_allocs->rt_root.bt_elem_size); |
93e28d66 SD |
2787 | } |
2788 | ||
34dc7c2f BB |
2789 | void |
2790 | metaslab_fini(metaslab_t *msp) | |
2791 | { | |
93cf2076 | 2792 | metaslab_group_t *mg = msp->ms_group; |
cc99f275 | 2793 | vdev_t *vd = mg->mg_vd; |
93e28d66 SD |
2794 | spa_t *spa = vd->vdev_spa; |
2795 | ||
2796 | metaslab_fini_flush_data(msp); | |
34dc7c2f BB |
2797 | |
2798 | metaslab_group_remove(mg, msp); | |
2799 | ||
2800 | mutex_enter(&msp->ms_lock); | |
93cf2076 | 2801 | VERIFY(msp->ms_group == NULL); |
793c958f | 2802 | |
aa755b35 | 2803 | /* |
793c958f | 2804 | * If this metaslab hasn't been through metaslab_sync_done() yet its |
aa755b35 MA |
2805 | * space hasn't been accounted for in its vdev and doesn't need to be |
2806 | * subtracted. | |
2807 | */ | |
793c958f | 2808 | if (!msp->ms_new) { |
aa755b35 MA |
2809 | metaslab_space_update(vd, mg->mg_class, |
2810 | -metaslab_allocated_space(msp), 0, -msp->ms_size); | |
cc99f275 | 2811 | |
aa755b35 | 2812 | } |
93cf2076 | 2813 | space_map_close(msp->ms_sm); |
93e28d66 | 2814 | msp->ms_sm = NULL; |
93cf2076 GW |
2815 | |
2816 | metaslab_unload(msp); | |
aa755b35 | 2817 | |
d2734cce | 2818 | range_tree_destroy(msp->ms_allocatable); |
793c958f SD |
2819 | range_tree_destroy(msp->ms_freeing); |
2820 | range_tree_destroy(msp->ms_freed); | |
34dc7c2f | 2821 | |
793c958f SD |
2822 | ASSERT3U(spa->spa_unflushed_stats.sus_memused, >=, |
2823 | metaslab_unflushed_changes_memused(msp)); | |
2824 | spa->spa_unflushed_stats.sus_memused -= | |
2825 | metaslab_unflushed_changes_memused(msp); | |
2826 | range_tree_vacate(msp->ms_unflushed_allocs, NULL, NULL); | |
2827 | range_tree_destroy(msp->ms_unflushed_allocs); | |
2828 | range_tree_destroy(msp->ms_checkpointing); | |
2829 | range_tree_vacate(msp->ms_unflushed_frees, NULL, NULL); | |
2830 | range_tree_destroy(msp->ms_unflushed_frees); | |
93e28d66 | 2831 | |
793c958f SD |
2832 | for (int t = 0; t < TXG_SIZE; t++) { |
2833 | range_tree_destroy(msp->ms_allocating[t]); | |
2834 | } | |
2835 | for (int t = 0; t < TXG_DEFER_SIZE; t++) { | |
2836 | range_tree_destroy(msp->ms_defer[t]); | |
e51be066 | 2837 | } |
c99c9001 | 2838 | ASSERT0(msp->ms_deferspace); |
428870ff | 2839 | |
928e8ad4 SD |
2840 | for (int t = 0; t < TXG_SIZE; t++) |
2841 | ASSERT(!txg_list_member(&vd->vdev_ms_list, msp, t)); | |
2842 | ||
1b939560 BB |
2843 | range_tree_vacate(msp->ms_trim, NULL, NULL); |
2844 | range_tree_destroy(msp->ms_trim); | |
2845 | ||
34dc7c2f | 2846 | mutex_exit(&msp->ms_lock); |
93cf2076 | 2847 | cv_destroy(&msp->ms_load_cv); |
93e28d66 | 2848 | cv_destroy(&msp->ms_flush_cv); |
34dc7c2f | 2849 | mutex_destroy(&msp->ms_lock); |
a1d477c2 | 2850 | mutex_destroy(&msp->ms_sync_lock); |
492f64e9 | 2851 | ASSERT3U(msp->ms_allocator, ==, -1); |
34dc7c2f BB |
2852 | |
2853 | kmem_free(msp, sizeof (metaslab_t)); | |
2854 | } | |
2855 | ||
f3a7f661 GW |
2856 | #define FRAGMENTATION_TABLE_SIZE 17 |
2857 | ||
93cf2076 | 2858 | /* |
f3a7f661 GW |
2859 | * This table defines a segment size based fragmentation metric that will |
2860 | * allow each metaslab to derive its own fragmentation value. This is done | |
2861 | * by calculating the space in each bucket of the spacemap histogram and | |
928e8ad4 | 2862 | * multiplying that by the fragmentation metric in this table. Doing |
f3a7f661 GW |
2863 | * this for all buckets and dividing it by the total amount of free |
2864 | * space in this metaslab (i.e. the total free space in all buckets) gives | |
2865 | * us the fragmentation metric. This means that a high fragmentation metric | |
2866 | * equates to most of the free space being comprised of small segments. | |
2867 | * Conversely, if the metric is low, then most of the free space is in | |
2868 | * large segments. A 10% change in fragmentation equates to approximately | |
2869 | * double the number of segments. | |
93cf2076 | 2870 | * |
f3a7f661 GW |
2871 | * This table defines 0% fragmented space using 16MB segments. Testing has |
2872 | * shown that segments that are greater than or equal to 16MB do not suffer | |
2873 | * from drastic performance problems. Using this value, we derive the rest | |
2874 | * of the table. Since the fragmentation value is never stored on disk, it | |
2875 | * is possible to change these calculations in the future. | |
2876 | */ | |
18168da7 | 2877 | static const int zfs_frag_table[FRAGMENTATION_TABLE_SIZE] = { |
f3a7f661 GW |
2878 | 100, /* 512B */ |
2879 | 100, /* 1K */ | |
2880 | 98, /* 2K */ | |
2881 | 95, /* 4K */ | |
2882 | 90, /* 8K */ | |
2883 | 80, /* 16K */ | |
2884 | 70, /* 32K */ | |
2885 | 60, /* 64K */ | |
2886 | 50, /* 128K */ | |
2887 | 40, /* 256K */ | |
2888 | 30, /* 512K */ | |
2889 | 20, /* 1M */ | |
2890 | 15, /* 2M */ | |
2891 | 10, /* 4M */ | |
2892 | 5, /* 8M */ | |
2893 | 0 /* 16M */ | |
2894 | }; | |
2895 | ||
2896 | /* | |
425d3237 SD |
2897 | * Calculate the metaslab's fragmentation metric and set ms_fragmentation. |
2898 | * Setting this value to ZFS_FRAG_INVALID means that the metaslab has not | |
2899 | * been upgraded and does not support this metric. Otherwise, the return | |
2900 | * value should be in the range [0, 100]. | |
93cf2076 | 2901 | */ |
4e21fd06 | 2902 | static void |
65a91b16 | 2903 | metaslab_set_fragmentation(metaslab_t *msp, boolean_t nodirty) |
93cf2076 | 2904 | { |
f3a7f661 GW |
2905 | spa_t *spa = msp->ms_group->mg_vd->vdev_spa; |
2906 | uint64_t fragmentation = 0; | |
2907 | uint64_t total = 0; | |
2908 | boolean_t feature_enabled = spa_feature_is_enabled(spa, | |
2909 | SPA_FEATURE_SPACEMAP_HISTOGRAM); | |
93cf2076 | 2910 | |
4e21fd06 DB |
2911 | if (!feature_enabled) { |
2912 | msp->ms_fragmentation = ZFS_FRAG_INVALID; | |
2913 | return; | |
2914 | } | |
f3a7f661 | 2915 | |
93cf2076 | 2916 | /* |
f3a7f661 GW |
2917 | * A null space map means that the entire metaslab is free |
2918 | * and thus is not fragmented. | |
93cf2076 | 2919 | */ |
4e21fd06 DB |
2920 | if (msp->ms_sm == NULL) { |
2921 | msp->ms_fragmentation = 0; | |
2922 | return; | |
2923 | } | |
f3a7f661 GW |
2924 | |
2925 | /* | |
4e21fd06 | 2926 | * If this metaslab's space map has not been upgraded, flag it |
f3a7f661 GW |
2927 | * so that we upgrade next time we encounter it. |
2928 | */ | |
2929 | if (msp->ms_sm->sm_dbuf->db_size != sizeof (space_map_phys_t)) { | |
3b7f360c | 2930 | uint64_t txg = spa_syncing_txg(spa); |
93cf2076 GW |
2931 | vdev_t *vd = msp->ms_group->mg_vd; |
2932 | ||
3b7f360c GW |
2933 | /* |
2934 | * If we've reached the final dirty txg, then we must | |
2935 | * be shutting down the pool. We don't want to dirty | |
2936 | * any data past this point so skip setting the condense | |
2937 | * flag. We can retry this action the next time the pool | |
65a91b16 SD |
2938 | * is imported. We also skip marking this metaslab for |
2939 | * condensing if the caller has explicitly set nodirty. | |
3b7f360c | 2940 | */ |
65a91b16 SD |
2941 | if (!nodirty && |
2942 | spa_writeable(spa) && txg < spa_final_dirty_txg(spa)) { | |
8b0a0840 TC |
2943 | msp->ms_condense_wanted = B_TRUE; |
2944 | vdev_dirty(vd, VDD_METASLAB, msp, txg + 1); | |
964c2d69 | 2945 | zfs_dbgmsg("txg %llu, requesting force condense: " |
8e739b2c RE |
2946 | "ms_id %llu, vdev_id %llu", (u_longlong_t)txg, |
2947 | (u_longlong_t)msp->ms_id, | |
2948 | (u_longlong_t)vd->vdev_id); | |
8b0a0840 | 2949 | } |
4e21fd06 DB |
2950 | msp->ms_fragmentation = ZFS_FRAG_INVALID; |
2951 | return; | |
93cf2076 GW |
2952 | } |
2953 | ||
1c27024e | 2954 | for (int i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) { |
f3a7f661 GW |
2955 | uint64_t space = 0; |
2956 | uint8_t shift = msp->ms_sm->sm_shift; | |
4e21fd06 | 2957 | |
f3a7f661 GW |
2958 | int idx = MIN(shift - SPA_MINBLOCKSHIFT + i, |
2959 | FRAGMENTATION_TABLE_SIZE - 1); | |
93cf2076 | 2960 | |
93cf2076 GW |
2961 | if (msp->ms_sm->sm_phys->smp_histogram[i] == 0) |
2962 | continue; | |
2963 | ||
f3a7f661 GW |
2964 | space = msp->ms_sm->sm_phys->smp_histogram[i] << (i + shift); |
2965 | total += space; | |
2966 | ||
2967 | ASSERT3U(idx, <, FRAGMENTATION_TABLE_SIZE); | |
2968 | fragmentation += space * zfs_frag_table[idx]; | |
93cf2076 | 2969 | } |
f3a7f661 GW |
2970 | |
2971 | if (total > 0) | |
2972 | fragmentation /= total; | |
2973 | ASSERT3U(fragmentation, <=, 100); | |
4e21fd06 DB |
2974 | |
2975 | msp->ms_fragmentation = fragmentation; | |
93cf2076 | 2976 | } |
34dc7c2f | 2977 | |
f3a7f661 GW |
2978 | /* |
2979 | * Compute a weight -- a selection preference value -- for the given metaslab. | |
2980 | * This is based on the amount of free space, the level of fragmentation, | |
2981 | * the LBA range, and whether the metaslab is loaded. | |
2982 | */ | |
34dc7c2f | 2983 | static uint64_t |
4e21fd06 | 2984 | metaslab_space_weight(metaslab_t *msp) |
34dc7c2f BB |
2985 | { |
2986 | metaslab_group_t *mg = msp->ms_group; | |
34dc7c2f BB |
2987 | vdev_t *vd = mg->mg_vd; |
2988 | uint64_t weight, space; | |
2989 | ||
2990 | ASSERT(MUTEX_HELD(&msp->ms_lock)); | |
c2e42f9d | 2991 | |
34dc7c2f BB |
2992 | /* |
2993 | * The baseline weight is the metaslab's free space. | |
2994 | */ | |
425d3237 | 2995 | space = msp->ms_size - metaslab_allocated_space(msp); |
f3a7f661 | 2996 | |
f3a7f661 GW |
2997 | if (metaslab_fragmentation_factor_enabled && |
2998 | msp->ms_fragmentation != ZFS_FRAG_INVALID) { | |
2999 | /* | |
3000 | * Use the fragmentation information to inversely scale | |
3001 | * down the baseline weight. We need to ensure that we | |
3002 | * don't exclude this metaslab completely when it's 100% | |
3003 | * fragmented. To avoid this we reduce the fragmented value | |
3004 | * by 1. | |
3005 | */ | |
3006 | space = (space * (100 - (msp->ms_fragmentation - 1))) / 100; | |
3007 | ||
3008 | /* | |
3009 | * If space < SPA_MINBLOCKSIZE, then we will not allocate from | |
3010 | * this metaslab again. The fragmentation metric may have | |
3011 | * decreased the space to something smaller than | |
3012 | * SPA_MINBLOCKSIZE, so reset the space to SPA_MINBLOCKSIZE | |
3013 | * so that we can consume any remaining space. | |
3014 | */ | |
3015 | if (space > 0 && space < SPA_MINBLOCKSIZE) | |
3016 | space = SPA_MINBLOCKSIZE; | |
3017 | } | |
34dc7c2f BB |
3018 | weight = space; |
3019 | ||
3020 | /* | |
3021 | * Modern disks have uniform bit density and constant angular velocity. | |
3022 | * Therefore, the outer recording zones are faster (higher bandwidth) | |
3023 | * than the inner zones by the ratio of outer to inner track diameter, | |
3024 | * which is typically around 2:1. We account for this by assigning | |
3025 | * higher weight to lower metaslabs (multiplier ranging from 2x to 1x). | |
3026 | * In effect, this means that we'll select the metaslab with the most | |
3027 | * free bandwidth rather than simply the one with the most free space. | |
3028 | */ | |
fb40095f | 3029 | if (!vd->vdev_nonrot && metaslab_lba_weighting_enabled) { |
f3a7f661 GW |
3030 | weight = 2 * weight - (msp->ms_id * weight) / vd->vdev_ms_count; |
3031 | ASSERT(weight >= space && weight <= 2 * space); | |
3032 | } | |
428870ff | 3033 | |
f3a7f661 GW |
3034 | /* |
3035 | * If this metaslab is one we're actively using, adjust its | |
3036 | * weight to make it preferable to any inactive metaslab so | |
3037 | * we'll polish it off. If the fragmentation on this metaslab | |
3038 | * has exceed our threshold, then don't mark it active. | |
3039 | */ | |
3040 | if (msp->ms_loaded && msp->ms_fragmentation != ZFS_FRAG_INVALID && | |
3041 | msp->ms_fragmentation <= zfs_metaslab_fragmentation_threshold) { | |
428870ff BB |
3042 | weight |= (msp->ms_weight & METASLAB_ACTIVE_MASK); |
3043 | } | |
34dc7c2f | 3044 | |
4e21fd06 DB |
3045 | WEIGHT_SET_SPACEBASED(weight); |
3046 | return (weight); | |
3047 | } | |
3048 | ||
3049 | /* | |
3050 | * Return the weight of the specified metaslab, according to the segment-based | |
3051 | * weighting algorithm. The metaslab must be loaded. This function can | |
3052 | * be called within a sync pass since it relies only on the metaslab's | |
3053 | * range tree which is always accurate when the metaslab is loaded. | |
3054 | */ | |
3055 | static uint64_t | |
3056 | metaslab_weight_from_range_tree(metaslab_t *msp) | |
3057 | { | |
3058 | uint64_t weight = 0; | |
3059 | uint32_t segments = 0; | |
4e21fd06 DB |
3060 | |
3061 | ASSERT(msp->ms_loaded); | |
3062 | ||
1c27024e DB |
3063 | for (int i = RANGE_TREE_HISTOGRAM_SIZE - 1; i >= SPA_MINBLOCKSHIFT; |
3064 | i--) { | |
4e21fd06 DB |
3065 | uint8_t shift = msp->ms_group->mg_vd->vdev_ashift; |
3066 | int max_idx = SPACE_MAP_HISTOGRAM_SIZE + shift - 1; | |
3067 | ||
3068 | segments <<= 1; | |
d2734cce | 3069 | segments += msp->ms_allocatable->rt_histogram[i]; |
4e21fd06 DB |
3070 | |
3071 | /* | |
3072 | * The range tree provides more precision than the space map | |
3073 | * and must be downgraded so that all values fit within the | |
3074 | * space map's histogram. This allows us to compare loaded | |
3075 | * vs. unloaded metaslabs to determine which metaslab is | |
3076 | * considered "best". | |
3077 | */ | |
3078 | if (i > max_idx) | |
3079 | continue; | |
3080 | ||
3081 | if (segments != 0) { | |
3082 | WEIGHT_SET_COUNT(weight, segments); | |
3083 | WEIGHT_SET_INDEX(weight, i); | |
3084 | WEIGHT_SET_ACTIVE(weight, 0); | |
3085 | break; | |
3086 | } | |
3087 | } | |
3088 | return (weight); | |
3089 | } | |
3090 | ||
3091 | /* | |
93e28d66 SD |
3092 | * Calculate the weight based on the on-disk histogram. Should be applied |
3093 | * only to unloaded metaslabs (i.e no incoming allocations) in-order to | |
3094 | * give results consistent with the on-disk state | |
4e21fd06 DB |
3095 | */ |
3096 | static uint64_t | |
3097 | metaslab_weight_from_spacemap(metaslab_t *msp) | |
3098 | { | |
928e8ad4 SD |
3099 | space_map_t *sm = msp->ms_sm; |
3100 | ASSERT(!msp->ms_loaded); | |
3101 | ASSERT(sm != NULL); | |
3102 | ASSERT3U(space_map_object(sm), !=, 0); | |
3103 | ASSERT3U(sm->sm_dbuf->db_size, ==, sizeof (space_map_phys_t)); | |
4e21fd06 | 3104 | |
928e8ad4 SD |
3105 | /* |
3106 | * Create a joint histogram from all the segments that have made | |
3107 | * it to the metaslab's space map histogram, that are not yet | |
3108 | * available for allocation because they are still in the freeing | |
3109 | * pipeline (e.g. freeing, freed, and defer trees). Then subtract | |
3110 | * these segments from the space map's histogram to get a more | |
3111 | * accurate weight. | |
3112 | */ | |
3113 | uint64_t deferspace_histogram[SPACE_MAP_HISTOGRAM_SIZE] = {0}; | |
3114 | for (int i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) | |
3115 | deferspace_histogram[i] += msp->ms_synchist[i]; | |
3116 | for (int t = 0; t < TXG_DEFER_SIZE; t++) { | |
3117 | for (int i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) { | |
3118 | deferspace_histogram[i] += msp->ms_deferhist[t][i]; | |
3119 | } | |
3120 | } | |
3121 | ||
3122 | uint64_t weight = 0; | |
1c27024e | 3123 | for (int i = SPACE_MAP_HISTOGRAM_SIZE - 1; i >= 0; i--) { |
928e8ad4 SD |
3124 | ASSERT3U(sm->sm_phys->smp_histogram[i], >=, |
3125 | deferspace_histogram[i]); | |
3126 | uint64_t count = | |
3127 | sm->sm_phys->smp_histogram[i] - deferspace_histogram[i]; | |
3128 | if (count != 0) { | |
3129 | WEIGHT_SET_COUNT(weight, count); | |
3130 | WEIGHT_SET_INDEX(weight, i + sm->sm_shift); | |
4e21fd06 DB |
3131 | WEIGHT_SET_ACTIVE(weight, 0); |
3132 | break; | |
3133 | } | |
3134 | } | |
3135 | return (weight); | |
3136 | } | |
3137 | ||
3138 | /* | |
3139 | * Compute a segment-based weight for the specified metaslab. The weight | |
3140 | * is determined by highest bucket in the histogram. The information | |
3141 | * for the highest bucket is encoded into the weight value. | |
3142 | */ | |
3143 | static uint64_t | |
3144 | metaslab_segment_weight(metaslab_t *msp) | |
3145 | { | |
3146 | metaslab_group_t *mg = msp->ms_group; | |
3147 | uint64_t weight = 0; | |
3148 | uint8_t shift = mg->mg_vd->vdev_ashift; | |
3149 | ||
3150 | ASSERT(MUTEX_HELD(&msp->ms_lock)); | |
3151 | ||
3152 | /* | |
3153 | * The metaslab is completely free. | |
3154 | */ | |
425d3237 | 3155 | if (metaslab_allocated_space(msp) == 0) { |
4e21fd06 DB |
3156 | int idx = highbit64(msp->ms_size) - 1; |
3157 | int max_idx = SPACE_MAP_HISTOGRAM_SIZE + shift - 1; | |
3158 | ||
3159 | if (idx < max_idx) { | |
3160 | WEIGHT_SET_COUNT(weight, 1ULL); | |
3161 | WEIGHT_SET_INDEX(weight, idx); | |
3162 | } else { | |
3163 | WEIGHT_SET_COUNT(weight, 1ULL << (idx - max_idx)); | |
3164 | WEIGHT_SET_INDEX(weight, max_idx); | |
3165 | } | |
3166 | WEIGHT_SET_ACTIVE(weight, 0); | |
3167 | ASSERT(!WEIGHT_IS_SPACEBASED(weight)); | |
4e21fd06 DB |
3168 | return (weight); |
3169 | } | |
3170 | ||
3171 | ASSERT3U(msp->ms_sm->sm_dbuf->db_size, ==, sizeof (space_map_phys_t)); | |
3172 | ||
3173 | /* | |
3174 | * If the metaslab is fully allocated then just make the weight 0. | |
3175 | */ | |
425d3237 | 3176 | if (metaslab_allocated_space(msp) == msp->ms_size) |
4e21fd06 DB |
3177 | return (0); |
3178 | /* | |
3179 | * If the metaslab is already loaded, then use the range tree to | |
3180 | * determine the weight. Otherwise, we rely on the space map information | |
3181 | * to generate the weight. | |
3182 | */ | |
3183 | if (msp->ms_loaded) { | |
3184 | weight = metaslab_weight_from_range_tree(msp); | |
3185 | } else { | |
3186 | weight = metaslab_weight_from_spacemap(msp); | |
3187 | } | |
3188 | ||
3189 | /* | |
3190 | * If the metaslab was active the last time we calculated its weight | |
3191 | * then keep it active. We want to consume the entire region that | |
3192 | * is associated with this weight. | |
3193 | */ | |
3194 | if (msp->ms_activation_weight != 0 && weight != 0) | |
3195 | WEIGHT_SET_ACTIVE(weight, WEIGHT_GET_ACTIVE(msp->ms_weight)); | |
3196 | return (weight); | |
3197 | } | |
3198 | ||
3199 | /* | |
3200 | * Determine if we should attempt to allocate from this metaslab. If the | |
7f319089 SD |
3201 | * metaslab is loaded, then we can determine if the desired allocation |
3202 | * can be satisfied by looking at the size of the maximum free segment | |
3203 | * on that metaslab. Otherwise, we make our decision based on the metaslab's | |
3204 | * weight. For segment-based weighting we can determine the maximum | |
3205 | * allocation based on the index encoded in its value. For space-based | |
3206 | * weights we rely on the entire weight (excluding the weight-type bit). | |
4e21fd06 | 3207 | */ |
65c7cc49 | 3208 | static boolean_t |
c81f1790 | 3209 | metaslab_should_allocate(metaslab_t *msp, uint64_t asize, boolean_t try_hard) |
4e21fd06 | 3210 | { |
c81f1790 PD |
3211 | /* |
3212 | * If the metaslab is loaded, ms_max_size is definitive and we can use | |
3213 | * the fast check. If it's not, the ms_max_size is a lower bound (once | |
3214 | * set), and we should use the fast check as long as we're not in | |
3215 | * try_hard and it's been less than zfs_metaslab_max_size_cache_sec | |
3216 | * seconds since the metaslab was unloaded. | |
3217 | */ | |
3218 | if (msp->ms_loaded || | |
3219 | (msp->ms_max_size != 0 && !try_hard && gethrtime() < | |
3220 | msp->ms_unload_time + SEC2NSEC(zfs_metaslab_max_size_cache_sec))) | |
4e21fd06 DB |
3221 | return (msp->ms_max_size >= asize); |
3222 | ||
679b0f2a | 3223 | boolean_t should_allocate; |
4e21fd06 DB |
3224 | if (!WEIGHT_IS_SPACEBASED(msp->ms_weight)) { |
3225 | /* | |
3226 | * The metaslab segment weight indicates segments in the | |
3227 | * range [2^i, 2^(i+1)), where i is the index in the weight. | |
3228 | * Since the asize might be in the middle of the range, we | |
3229 | * should attempt the allocation if asize < 2^(i+1). | |
3230 | */ | |
3231 | should_allocate = (asize < | |
3232 | 1ULL << (WEIGHT_GET_INDEX(msp->ms_weight) + 1)); | |
3233 | } else { | |
3234 | should_allocate = (asize <= | |
3235 | (msp->ms_weight & ~METASLAB_WEIGHT_TYPE)); | |
3236 | } | |
679b0f2a | 3237 | |
4e21fd06 DB |
3238 | return (should_allocate); |
3239 | } | |
65a91b16 | 3240 | |
4e21fd06 | 3241 | static uint64_t |
65a91b16 | 3242 | metaslab_weight(metaslab_t *msp, boolean_t nodirty) |
4e21fd06 DB |
3243 | { |
3244 | vdev_t *vd = msp->ms_group->mg_vd; | |
3245 | spa_t *spa = vd->vdev_spa; | |
3246 | uint64_t weight; | |
3247 | ||
3248 | ASSERT(MUTEX_HELD(&msp->ms_lock)); | |
3249 | ||
65a91b16 | 3250 | metaslab_set_fragmentation(msp, nodirty); |
4e21fd06 DB |
3251 | |
3252 | /* | |
c81f1790 | 3253 | * Update the maximum size. If the metaslab is loaded, this will |
4e21fd06 | 3254 | * ensure that we get an accurate maximum size if newly freed space |
c81f1790 PD |
3255 | * has been added back into the free tree. If the metaslab is |
3256 | * unloaded, we check if there's a larger free segment in the | |
3257 | * unflushed frees. This is a lower bound on the largest allocatable | |
3258 | * segment size. Coalescing of adjacent entries may reveal larger | |
3259 | * allocatable segments, but we aren't aware of those until loading | |
3260 | * the space map into a range tree. | |
4e21fd06 | 3261 | */ |
c81f1790 PD |
3262 | if (msp->ms_loaded) { |
3263 | msp->ms_max_size = metaslab_largest_allocatable(msp); | |
3264 | } else { | |
3265 | msp->ms_max_size = MAX(msp->ms_max_size, | |
3266 | metaslab_largest_unflushed_free(msp)); | |
3267 | } | |
4e21fd06 DB |
3268 | |
3269 | /* | |
3270 | * Segment-based weighting requires space map histogram support. | |
3271 | */ | |
3272 | if (zfs_metaslab_segment_weight_enabled && | |
3273 | spa_feature_is_enabled(spa, SPA_FEATURE_SPACEMAP_HISTOGRAM) && | |
3274 | (msp->ms_sm == NULL || msp->ms_sm->sm_dbuf->db_size == | |
3275 | sizeof (space_map_phys_t))) { | |
3276 | weight = metaslab_segment_weight(msp); | |
3277 | } else { | |
3278 | weight = metaslab_space_weight(msp); | |
3279 | } | |
93cf2076 | 3280 | return (weight); |
34dc7c2f BB |
3281 | } |
3282 | ||
928e8ad4 SD |
3283 | void |
3284 | metaslab_recalculate_weight_and_sort(metaslab_t *msp) | |
3285 | { | |
679b0f2a PD |
3286 | ASSERT(MUTEX_HELD(&msp->ms_lock)); |
3287 | ||
928e8ad4 SD |
3288 | /* note: we preserve the mask (e.g. indication of primary, etc..) */ |
3289 | uint64_t was_active = msp->ms_weight & METASLAB_ACTIVE_MASK; | |
3290 | metaslab_group_sort(msp->ms_group, msp, | |
65a91b16 | 3291 | metaslab_weight(msp, B_FALSE) | was_active); |
928e8ad4 SD |
3292 | } |
3293 | ||
34dc7c2f | 3294 | static int |
492f64e9 PD |
3295 | metaslab_activate_allocator(metaslab_group_t *mg, metaslab_t *msp, |
3296 | int allocator, uint64_t activation_weight) | |
3297 | { | |
32d805c3 | 3298 | metaslab_group_allocator_t *mga = &mg->mg_allocator[allocator]; |
679b0f2a PD |
3299 | ASSERT(MUTEX_HELD(&msp->ms_lock)); |
3300 | ||
492f64e9 PD |
3301 | /* |
3302 | * If we're activating for the claim code, we don't want to actually | |
3303 | * set the metaslab up for a specific allocator. | |
3304 | */ | |
f09fda50 PD |
3305 | if (activation_weight == METASLAB_WEIGHT_CLAIM) { |
3306 | ASSERT0(msp->ms_activation_weight); | |
3307 | msp->ms_activation_weight = msp->ms_weight; | |
3308 | metaslab_group_sort(mg, msp, msp->ms_weight | | |
3309 | activation_weight); | |
492f64e9 | 3310 | return (0); |
f09fda50 | 3311 | } |
679b0f2a | 3312 | |
32d805c3 MA |
3313 | metaslab_t **mspp = (activation_weight == METASLAB_WEIGHT_PRIMARY ? |
3314 | &mga->mga_primary : &mga->mga_secondary); | |
492f64e9 | 3315 | |
492f64e9 | 3316 | mutex_enter(&mg->mg_lock); |
32d805c3 | 3317 | if (*mspp != NULL) { |
492f64e9 PD |
3318 | mutex_exit(&mg->mg_lock); |
3319 | return (EEXIST); | |
3320 | } | |
3321 | ||
32d805c3 | 3322 | *mspp = msp; |
492f64e9 PD |
3323 | ASSERT3S(msp->ms_allocator, ==, -1); |
3324 | msp->ms_allocator = allocator; | |
3325 | msp->ms_primary = (activation_weight == METASLAB_WEIGHT_PRIMARY); | |
f09fda50 PD |
3326 | |
3327 | ASSERT0(msp->ms_activation_weight); | |
3328 | msp->ms_activation_weight = msp->ms_weight; | |
3329 | metaslab_group_sort_impl(mg, msp, | |
3330 | msp->ms_weight | activation_weight); | |
492f64e9 PD |
3331 | mutex_exit(&mg->mg_lock); |
3332 | ||
3333 | return (0); | |
3334 | } | |
3335 | ||
3336 | static int | |
3337 | metaslab_activate(metaslab_t *msp, int allocator, uint64_t activation_weight) | |
34dc7c2f | 3338 | { |
34dc7c2f BB |
3339 | ASSERT(MUTEX_HELD(&msp->ms_lock)); |
3340 | ||
679b0f2a PD |
3341 | /* |
3342 | * The current metaslab is already activated for us so there | |
3343 | * is nothing to do. Already activated though, doesn't mean | |
3344 | * that this metaslab is activated for our allocator nor our | |
3345 | * requested activation weight. The metaslab could have started | |
3346 | * as an active one for our allocator but changed allocators | |
3347 | * while we were waiting to grab its ms_lock or we stole it | |
3348 | * [see find_valid_metaslab()]. This means that there is a | |
3349 | * possibility of passivating a metaslab of another allocator | |
3350 | * or from a different activation mask, from this thread. | |
3351 | */ | |
3352 | if ((msp->ms_weight & METASLAB_ACTIVE_MASK) != 0) { | |
3353 | ASSERT(msp->ms_loaded); | |
3354 | return (0); | |
3355 | } | |
3356 | ||
3357 | int error = metaslab_load(msp); | |
3358 | if (error != 0) { | |
3359 | metaslab_group_sort(msp->ms_group, msp, 0); | |
3360 | return (error); | |
3361 | } | |
3362 | ||
3363 | /* | |
3364 | * When entering metaslab_load() we may have dropped the | |
3365 | * ms_lock because we were loading this metaslab, or we | |
3366 | * were waiting for another thread to load it for us. In | |
3367 | * that scenario, we recheck the weight of the metaslab | |
3368 | * to see if it was activated by another thread. | |
3369 | * | |
3370 | * If the metaslab was activated for another allocator or | |
3371 | * it was activated with a different activation weight (e.g. | |
3372 | * we wanted to make it a primary but it was activated as | |
3373 | * secondary) we return error (EBUSY). | |
3374 | * | |
3375 | * If the metaslab was activated for the same allocator | |
3376 | * and requested activation mask, skip activating it. | |
3377 | */ | |
3378 | if ((msp->ms_weight & METASLAB_ACTIVE_MASK) != 0) { | |
3379 | if (msp->ms_allocator != allocator) | |
3380 | return (EBUSY); | |
3381 | ||
3382 | if ((msp->ms_weight & activation_weight) == 0) | |
7ab96299 | 3383 | return (SET_ERROR(EBUSY)); |
9babb374 | 3384 | |
679b0f2a PD |
3385 | EQUIV((activation_weight == METASLAB_WEIGHT_PRIMARY), |
3386 | msp->ms_primary); | |
3387 | return (0); | |
34dc7c2f | 3388 | } |
679b0f2a | 3389 | |
fe0ea848 PD |
3390 | /* |
3391 | * If the metaslab has literally 0 space, it will have weight 0. In | |
3392 | * that case, don't bother activating it. This can happen if the | |
3393 | * metaslab had space during find_valid_metaslab, but another thread | |
3394 | * loaded it and used all that space while we were waiting to grab the | |
3395 | * lock. | |
3396 | */ | |
3397 | if (msp->ms_weight == 0) { | |
3398 | ASSERT0(range_tree_space(msp->ms_allocatable)); | |
3399 | return (SET_ERROR(ENOSPC)); | |
3400 | } | |
3401 | ||
679b0f2a PD |
3402 | if ((error = metaslab_activate_allocator(msp->ms_group, msp, |
3403 | allocator, activation_weight)) != 0) { | |
3404 | return (error); | |
3405 | } | |
3406 | ||
93cf2076 | 3407 | ASSERT(msp->ms_loaded); |
34dc7c2f BB |
3408 | ASSERT(msp->ms_weight & METASLAB_ACTIVE_MASK); |
3409 | ||
3410 | return (0); | |
3411 | } | |
3412 | ||
492f64e9 PD |
3413 | static void |
3414 | metaslab_passivate_allocator(metaslab_group_t *mg, metaslab_t *msp, | |
3415 | uint64_t weight) | |
3416 | { | |
3417 | ASSERT(MUTEX_HELD(&msp->ms_lock)); | |
679b0f2a PD |
3418 | ASSERT(msp->ms_loaded); |
3419 | ||
492f64e9 PD |
3420 | if (msp->ms_weight & METASLAB_WEIGHT_CLAIM) { |
3421 | metaslab_group_sort(mg, msp, weight); | |
3422 | return; | |
3423 | } | |
3424 | ||
3425 | mutex_enter(&mg->mg_lock); | |
3426 | ASSERT3P(msp->ms_group, ==, mg); | |
679b0f2a PD |
3427 | ASSERT3S(0, <=, msp->ms_allocator); |
3428 | ASSERT3U(msp->ms_allocator, <, mg->mg_allocators); | |
3429 | ||
32d805c3 | 3430 | metaslab_group_allocator_t *mga = &mg->mg_allocator[msp->ms_allocator]; |
492f64e9 | 3431 | if (msp->ms_primary) { |
32d805c3 | 3432 | ASSERT3P(mga->mga_primary, ==, msp); |
492f64e9 | 3433 | ASSERT(msp->ms_weight & METASLAB_WEIGHT_PRIMARY); |
32d805c3 | 3434 | mga->mga_primary = NULL; |
492f64e9 | 3435 | } else { |
32d805c3 | 3436 | ASSERT3P(mga->mga_secondary, ==, msp); |
679b0f2a | 3437 | ASSERT(msp->ms_weight & METASLAB_WEIGHT_SECONDARY); |
32d805c3 | 3438 | mga->mga_secondary = NULL; |
492f64e9 PD |
3439 | } |
3440 | msp->ms_allocator = -1; | |
3441 | metaslab_group_sort_impl(mg, msp, weight); | |
3442 | mutex_exit(&mg->mg_lock); | |
3443 | } | |
3444 | ||
34dc7c2f | 3445 | static void |
4e21fd06 | 3446 | metaslab_passivate(metaslab_t *msp, uint64_t weight) |
34dc7c2f | 3447 | { |
2a8ba608 | 3448 | uint64_t size __maybe_unused = weight & ~METASLAB_WEIGHT_TYPE; |
4e21fd06 | 3449 | |
34dc7c2f BB |
3450 | /* |
3451 | * If size < SPA_MINBLOCKSIZE, then we will not allocate from | |
3452 | * this metaslab again. In that case, it had better be empty, | |
3453 | * or we would be leaving space on the table. | |
3454 | */ | |
94d49e8f TC |
3455 | ASSERT(!WEIGHT_IS_SPACEBASED(msp->ms_weight) || |
3456 | size >= SPA_MINBLOCKSIZE || | |
d2734cce | 3457 | range_tree_space(msp->ms_allocatable) == 0); |
4e21fd06 DB |
3458 | ASSERT0(weight & METASLAB_ACTIVE_MASK); |
3459 | ||
679b0f2a | 3460 | ASSERT(msp->ms_activation_weight != 0); |
4e21fd06 | 3461 | msp->ms_activation_weight = 0; |
492f64e9 | 3462 | metaslab_passivate_allocator(msp->ms_group, msp, weight); |
679b0f2a | 3463 | ASSERT0(msp->ms_weight & METASLAB_ACTIVE_MASK); |
34dc7c2f BB |
3464 | } |
3465 | ||
4e21fd06 DB |
3466 | /* |
3467 | * Segment-based metaslabs are activated once and remain active until | |
3468 | * we either fail an allocation attempt (similar to space-based metaslabs) | |
3469 | * or have exhausted the free space in zfs_metaslab_switch_threshold | |
3470 | * buckets since the metaslab was activated. This function checks to see | |
e1cfd73f | 3471 | * if we've exhausted the zfs_metaslab_switch_threshold buckets in the |
4e21fd06 DB |
3472 | * metaslab and passivates it proactively. This will allow us to select a |
3473 | * metaslab with a larger contiguous region, if any, remaining within this | |
3474 | * metaslab group. If we're in sync pass > 1, then we continue using this | |
3475 | * metaslab so that we don't dirty more block and cause more sync passes. | |
3476 | */ | |
65c7cc49 | 3477 | static void |
4e21fd06 DB |
3478 | metaslab_segment_may_passivate(metaslab_t *msp) |
3479 | { | |
3480 | spa_t *spa = msp->ms_group->mg_vd->vdev_spa; | |
4e21fd06 DB |
3481 | |
3482 | if (WEIGHT_IS_SPACEBASED(msp->ms_weight) || spa_sync_pass(spa) > 1) | |
3483 | return; | |
3484 | ||
3485 | /* | |
3486 | * Since we are in the middle of a sync pass, the most accurate | |
3487 | * information that is accessible to us is the in-core range tree | |
3488 | * histogram; calculate the new weight based on that information. | |
3489 | */ | |
1c27024e DB |
3490 | uint64_t weight = metaslab_weight_from_range_tree(msp); |
3491 | int activation_idx = WEIGHT_GET_INDEX(msp->ms_activation_weight); | |
3492 | int current_idx = WEIGHT_GET_INDEX(weight); | |
4e21fd06 DB |
3493 | |
3494 | if (current_idx <= activation_idx - zfs_metaslab_switch_threshold) | |
3495 | metaslab_passivate(msp, weight); | |
3496 | } | |
3497 | ||
93cf2076 GW |
3498 | static void |
3499 | metaslab_preload(void *arg) | |
3500 | { | |
3501 | metaslab_t *msp = arg; | |
f09fda50 PD |
3502 | metaslab_class_t *mc = msp->ms_group->mg_class; |
3503 | spa_t *spa = mc->mc_spa; | |
1cd77734 | 3504 | fstrans_cookie_t cookie = spl_fstrans_mark(); |
93cf2076 | 3505 | |
080b3100 GW |
3506 | ASSERT(!MUTEX_HELD(&msp->ms_group->mg_lock)); |
3507 | ||
93cf2076 | 3508 | mutex_enter(&msp->ms_lock); |
b194fab0 | 3509 | (void) metaslab_load(msp); |
f09fda50 | 3510 | metaslab_set_selected_txg(msp, spa_syncing_txg(spa)); |
93cf2076 | 3511 | mutex_exit(&msp->ms_lock); |
1cd77734 | 3512 | spl_fstrans_unmark(cookie); |
93cf2076 GW |
3513 | } |
3514 | ||
3515 | static void | |
3516 | metaslab_group_preload(metaslab_group_t *mg) | |
3517 | { | |
3518 | spa_t *spa = mg->mg_vd->vdev_spa; | |
3519 | metaslab_t *msp; | |
3520 | avl_tree_t *t = &mg->mg_metaslab_tree; | |
3521 | int m = 0; | |
3522 | ||
3523 | if (spa_shutting_down(spa) || !metaslab_preload_enabled) { | |
c5528b9b | 3524 | taskq_wait_outstanding(mg->mg_taskq, 0); |
93cf2076 GW |
3525 | return; |
3526 | } | |
93cf2076 | 3527 | |
080b3100 | 3528 | mutex_enter(&mg->mg_lock); |
a1d477c2 | 3529 | |
93cf2076 | 3530 | /* |
080b3100 | 3531 | * Load the next potential metaslabs |
93cf2076 | 3532 | */ |
4e21fd06 | 3533 | for (msp = avl_first(t); msp != NULL; msp = AVL_NEXT(t, msp)) { |
a1d477c2 MA |
3534 | ASSERT3P(msp->ms_group, ==, mg); |
3535 | ||
f3a7f661 GW |
3536 | /* |
3537 | * We preload only the maximum number of metaslabs specified | |
3538 | * by metaslab_preload_limit. If a metaslab is being forced | |
3539 | * to condense then we preload it too. This will ensure | |
3540 | * that force condensing happens in the next txg. | |
3541 | */ | |
3542 | if (++m > metaslab_preload_limit && !msp->ms_condense_wanted) { | |
f3a7f661 GW |
3543 | continue; |
3544 | } | |
93cf2076 GW |
3545 | |
3546 | VERIFY(taskq_dispatch(mg->mg_taskq, metaslab_preload, | |
48d3eb40 | 3547 | msp, TQ_SLEEP) != TASKQID_INVALID); |
93cf2076 GW |
3548 | } |
3549 | mutex_exit(&mg->mg_lock); | |
3550 | } | |
3551 | ||
e51be066 | 3552 | /* |
93e28d66 SD |
3553 | * Determine if the space map's on-disk footprint is past our tolerance for |
3554 | * inefficiency. We would like to use the following criteria to make our | |
3555 | * decision: | |
e51be066 | 3556 | * |
93e28d66 SD |
3557 | * 1. Do not condense if the size of the space map object would dramatically |
3558 | * increase as a result of writing out the free space range tree. | |
e51be066 | 3559 | * |
93e28d66 SD |
3560 | * 2. Condense if the on on-disk space map representation is at least |
3561 | * zfs_condense_pct/100 times the size of the optimal representation | |
3562 | * (i.e. zfs_condense_pct = 110 and in-core = 1MB, optimal = 1.1MB). | |
e51be066 | 3563 | * |
93e28d66 SD |
3564 | * 3. Do not condense if the on-disk size of the space map does not actually |
3565 | * decrease. | |
b02fe35d | 3566 | * |
b02fe35d AR |
3567 | * Unfortunately, we cannot compute the on-disk size of the space map in this |
3568 | * context because we cannot accurately compute the effects of compression, etc. | |
3569 | * Instead, we apply the heuristic described in the block comment for | |
3570 | * zfs_metaslab_condense_block_threshold - we only condense if the space used | |
3571 | * is greater than a threshold number of blocks. | |
e51be066 GW |
3572 | */ |
3573 | static boolean_t | |
3574 | metaslab_should_condense(metaslab_t *msp) | |
3575 | { | |
93cf2076 | 3576 | space_map_t *sm = msp->ms_sm; |
d2734cce | 3577 | vdev_t *vd = msp->ms_group->mg_vd; |
e506a0ce | 3578 | uint64_t vdev_blocksize = 1ULL << vd->vdev_ashift; |
e51be066 GW |
3579 | |
3580 | ASSERT(MUTEX_HELD(&msp->ms_lock)); | |
93cf2076 | 3581 | ASSERT(msp->ms_loaded); |
93e28d66 SD |
3582 | ASSERT(sm != NULL); |
3583 | ASSERT3U(spa_sync_pass(vd->vdev_spa), ==, 1); | |
d2734cce SD |
3584 | |
3585 | /* | |
4d044c4c SD |
3586 | * We always condense metaslabs that are empty and metaslabs for |
3587 | * which a condense request has been made. | |
e51be066 | 3588 | */ |
ca577779 | 3589 | if (range_tree_numsegs(msp->ms_allocatable) == 0 || |
4d044c4c | 3590 | msp->ms_condense_wanted) |
e51be066 GW |
3591 | return (B_TRUE); |
3592 | ||
93e28d66 SD |
3593 | uint64_t record_size = MAX(sm->sm_blksz, vdev_blocksize); |
3594 | uint64_t object_size = space_map_length(sm); | |
4d044c4c SD |
3595 | uint64_t optimal_size = space_map_estimate_optimal_size(sm, |
3596 | msp->ms_allocatable, SM_NO_VDEVID); | |
b02fe35d | 3597 | |
4d044c4c | 3598 | return (object_size >= (optimal_size * zfs_condense_pct / 100) && |
b02fe35d | 3599 | object_size > zfs_metaslab_condense_block_threshold * record_size); |
e51be066 GW |
3600 | } |
3601 | ||
3602 | /* | |
3603 | * Condense the on-disk space map representation to its minimized form. | |
93e28d66 SD |
3604 | * The minimized form consists of a small number of allocations followed |
3605 | * by the entries of the free range tree (ms_allocatable). The condensed | |
3606 | * spacemap contains all the entries of previous TXGs (including those in | |
3607 | * the pool-wide log spacemaps; thus this is effectively a superset of | |
3608 | * metaslab_flush()), but this TXG's entries still need to be written. | |
e51be066 GW |
3609 | */ |
3610 | static void | |
93e28d66 | 3611 | metaslab_condense(metaslab_t *msp, dmu_tx_t *tx) |
e51be066 | 3612 | { |
93cf2076 GW |
3613 | range_tree_t *condense_tree; |
3614 | space_map_t *sm = msp->ms_sm; | |
93e28d66 SD |
3615 | uint64_t txg = dmu_tx_get_txg(tx); |
3616 | spa_t *spa = msp->ms_group->mg_vd->vdev_spa; | |
e51be066 GW |
3617 | |
3618 | ASSERT(MUTEX_HELD(&msp->ms_lock)); | |
93cf2076 | 3619 | ASSERT(msp->ms_loaded); |
93e28d66 | 3620 | ASSERT(msp->ms_sm != NULL); |
e51be066 | 3621 | |
93e28d66 SD |
3622 | /* |
3623 | * In order to condense the space map, we need to change it so it | |
3624 | * only describes which segments are currently allocated and free. | |
3625 | * | |
3626 | * All the current free space resides in the ms_allocatable, all | |
3627 | * the ms_defer trees, and all the ms_allocating trees. We ignore | |
3628 | * ms_freed because it is empty because we're in sync pass 1. We | |
3629 | * ignore ms_freeing because these changes are not yet reflected | |
3630 | * in the spacemap (they will be written later this txg). | |
3631 | * | |
3632 | * So to truncate the space map to represent all the entries of | |
3633 | * previous TXGs we do the following: | |
3634 | * | |
ca577779 PD |
3635 | * 1] We create a range tree (condense tree) that is 100% empty. |
3636 | * 2] We add to it all segments found in the ms_defer trees | |
93e28d66 SD |
3637 | * as those segments are marked as free in the original space |
3638 | * map. We do the same with the ms_allocating trees for the same | |
ca577779 | 3639 | * reason. Adding these segments should be a relatively |
93e28d66 SD |
3640 | * inexpensive operation since we expect these trees to have a |
3641 | * small number of nodes. | |
ca577779 PD |
3642 | * 3] We vacate any unflushed allocs, since they are not frees we |
3643 | * need to add to the condense tree. Then we vacate any | |
3644 | * unflushed frees as they should already be part of ms_allocatable. | |
3645 | * 4] At this point, we would ideally like to add all segments | |
93e28d66 SD |
3646 | * in the ms_allocatable tree from the condense tree. This way |
3647 | * we would write all the entries of the condense tree as the | |
dd4bc569 | 3648 | * condensed space map, which would only contain freed |
ca577779 | 3649 | * segments with everything else assumed to be allocated. |
93e28d66 SD |
3650 | * |
3651 | * Doing so can be prohibitively expensive as ms_allocatable can | |
ca577779 PD |
3652 | * be large, and therefore computationally expensive to add to |
3653 | * the condense_tree. Instead we first sync out an entry marking | |
3654 | * everything as allocated, then the condense_tree and then the | |
3655 | * ms_allocatable, in the condensed space map. While this is not | |
3656 | * optimal, it is typically close to optimal and more importantly | |
3657 | * much cheaper to compute. | |
93e28d66 SD |
3658 | * |
3659 | * 5] Finally, as both of the unflushed trees were written to our | |
3660 | * new and condensed metaslab space map, we basically flushed | |
3661 | * all the unflushed changes to disk, thus we call | |
3662 | * metaslab_flush_update(). | |
3663 | */ | |
3664 | ASSERT3U(spa_sync_pass(spa), ==, 1); | |
3665 | ASSERT(range_tree_is_empty(msp->ms_freed)); /* since it is pass 1 */ | |
f3a7f661 | 3666 | |
a887d653 | 3667 | zfs_dbgmsg("condensing: txg %llu, msp[%llu] %px, vdev id %llu, " |
8e739b2c RE |
3668 | "spa %s, smp size %llu, segments %llu, forcing condense=%s", |
3669 | (u_longlong_t)txg, (u_longlong_t)msp->ms_id, msp, | |
3670 | (u_longlong_t)msp->ms_group->mg_vd->vdev_id, | |
3671 | spa->spa_name, (u_longlong_t)space_map_length(msp->ms_sm), | |
3672 | (u_longlong_t)range_tree_numsegs(msp->ms_allocatable), | |
f3a7f661 GW |
3673 | msp->ms_condense_wanted ? "TRUE" : "FALSE"); |
3674 | ||
3675 | msp->ms_condense_wanted = B_FALSE; | |
e51be066 | 3676 | |
ca577779 PD |
3677 | range_seg_type_t type; |
3678 | uint64_t shift, start; | |
3679 | type = metaslab_calculate_range_tree_type(msp->ms_group->mg_vd, msp, | |
3680 | &start, &shift); | |
3681 | ||
3682 | condense_tree = range_tree_create(NULL, type, NULL, start, shift); | |
e51be066 | 3683 | |
1c27024e | 3684 | for (int t = 0; t < TXG_DEFER_SIZE; t++) { |
d2734cce | 3685 | range_tree_walk(msp->ms_defer[t], |
ca577779 | 3686 | range_tree_add, condense_tree); |
93cf2076 | 3687 | } |
e51be066 | 3688 | |
93e28d66 | 3689 | for (int t = 0; t < TXG_CONCURRENT_STATES; t++) { |
d2734cce | 3690 | range_tree_walk(msp->ms_allocating[(txg + t) & TXG_MASK], |
ca577779 | 3691 | range_tree_add, condense_tree); |
93cf2076 | 3692 | } |
e51be066 | 3693 | |
93e28d66 SD |
3694 | ASSERT3U(spa->spa_unflushed_stats.sus_memused, >=, |
3695 | metaslab_unflushed_changes_memused(msp)); | |
3696 | spa->spa_unflushed_stats.sus_memused -= | |
3697 | metaslab_unflushed_changes_memused(msp); | |
3698 | range_tree_vacate(msp->ms_unflushed_allocs, NULL, NULL); | |
3699 | range_tree_vacate(msp->ms_unflushed_frees, NULL, NULL); | |
3700 | ||
e51be066 | 3701 | /* |
93e28d66 SD |
3702 | * We're about to drop the metaslab's lock thus allowing other |
3703 | * consumers to change it's content. Set the metaslab's ms_condensing | |
3704 | * flag to ensure that allocations on this metaslab do not occur | |
3705 | * while we're in the middle of committing it to disk. This is only | |
3706 | * critical for ms_allocatable as all other range trees use per TXG | |
e51be066 GW |
3707 | * views of their content. |
3708 | */ | |
93cf2076 | 3709 | msp->ms_condensing = B_TRUE; |
e51be066 GW |
3710 | |
3711 | mutex_exit(&msp->ms_lock); | |
93e28d66 SD |
3712 | uint64_t object = space_map_object(msp->ms_sm); |
3713 | space_map_truncate(sm, | |
3714 | spa_feature_is_enabled(spa, SPA_FEATURE_LOG_SPACEMAP) ? | |
3715 | zfs_metaslab_sm_blksz_with_log : zfs_metaslab_sm_blksz_no_log, tx); | |
3716 | ||
3717 | /* | |
3718 | * space_map_truncate() may have reallocated the spacemap object. | |
3719 | * If so, update the vdev_ms_array. | |
3720 | */ | |
3721 | if (space_map_object(msp->ms_sm) != object) { | |
3722 | object = space_map_object(msp->ms_sm); | |
3723 | dmu_write(spa->spa_meta_objset, | |
3724 | msp->ms_group->mg_vd->vdev_ms_array, sizeof (uint64_t) * | |
3725 | msp->ms_id, sizeof (uint64_t), &object, tx); | |
3726 | } | |
e51be066 GW |
3727 | |
3728 | /* | |
93e28d66 SD |
3729 | * Note: |
3730 | * When the log space map feature is enabled, each space map will | |
3731 | * always have ALLOCS followed by FREES for each sync pass. This is | |
3732 | * typically true even when the log space map feature is disabled, | |
3733 | * except from the case where a metaslab goes through metaslab_sync() | |
3734 | * and gets condensed. In that case the metaslab's space map will have | |
3735 | * ALLOCS followed by FREES (due to condensing) followed by ALLOCS | |
3736 | * followed by FREES (due to space_map_write() in metaslab_sync()) for | |
3737 | * sync pass 1. | |
e51be066 | 3738 | */ |
ca577779 PD |
3739 | range_tree_t *tmp_tree = range_tree_create(NULL, type, NULL, start, |
3740 | shift); | |
3741 | range_tree_add(tmp_tree, msp->ms_start, msp->ms_size); | |
3742 | space_map_write(sm, tmp_tree, SM_ALLOC, SM_NO_VDEVID, tx); | |
93e28d66 | 3743 | space_map_write(sm, msp->ms_allocatable, SM_FREE, SM_NO_VDEVID, tx); |
ca577779 | 3744 | space_map_write(sm, condense_tree, SM_FREE, SM_NO_VDEVID, tx); |
93e28d66 | 3745 | |
93cf2076 GW |
3746 | range_tree_vacate(condense_tree, NULL, NULL); |
3747 | range_tree_destroy(condense_tree); | |
ca577779 PD |
3748 | range_tree_vacate(tmp_tree, NULL, NULL); |
3749 | range_tree_destroy(tmp_tree); | |
a1d477c2 | 3750 | mutex_enter(&msp->ms_lock); |
93e28d66 | 3751 | |
93cf2076 | 3752 | msp->ms_condensing = B_FALSE; |
93e28d66 SD |
3753 | metaslab_flush_update(msp, tx); |
3754 | } | |
3755 | ||
93e28d66 | 3756 | static void |
600a02b8 | 3757 | metaslab_unflushed_add(metaslab_t *msp, dmu_tx_t *tx) |
93e28d66 | 3758 | { |
600a02b8 AM |
3759 | spa_t *spa = msp->ms_group->mg_vd->vdev_spa; |
3760 | ASSERT(spa_syncing_log_sm(spa) != NULL); | |
3761 | ASSERT(msp->ms_sm != NULL); | |
93e28d66 SD |
3762 | ASSERT(range_tree_is_empty(msp->ms_unflushed_allocs)); |
3763 | ASSERT(range_tree_is_empty(msp->ms_unflushed_frees)); | |
3764 | ||
600a02b8 AM |
3765 | mutex_enter(&spa->spa_flushed_ms_lock); |
3766 | metaslab_set_unflushed_txg(msp, spa_syncing_txg(spa), tx); | |
3767 | metaslab_set_unflushed_dirty(msp, B_TRUE); | |
3768 | avl_add(&spa->spa_metaslabs_by_flushed, msp); | |
3769 | mutex_exit(&spa->spa_flushed_ms_lock); | |
93e28d66 | 3770 | |
600a02b8 AM |
3771 | spa_log_sm_increment_current_mscount(spa); |
3772 | spa_log_summary_add_flushed_metaslab(spa, B_TRUE); | |
3773 | } | |
93e28d66 | 3774 | |
600a02b8 AM |
3775 | void |
3776 | metaslab_unflushed_bump(metaslab_t *msp, dmu_tx_t *tx, boolean_t dirty) | |
3777 | { | |
3778 | spa_t *spa = msp->ms_group->mg_vd->vdev_spa; | |
93e28d66 SD |
3779 | ASSERT(spa_syncing_log_sm(spa) != NULL); |
3780 | ASSERT(msp->ms_sm != NULL); | |
3781 | ASSERT(metaslab_unflushed_txg(msp) != 0); | |
3782 | ASSERT3P(avl_find(&spa->spa_metaslabs_by_flushed, msp, NULL), ==, msp); | |
600a02b8 AM |
3783 | ASSERT(range_tree_is_empty(msp->ms_unflushed_allocs)); |
3784 | ASSERT(range_tree_is_empty(msp->ms_unflushed_frees)); | |
93e28d66 SD |
3785 | |
3786 | VERIFY3U(tx->tx_txg, <=, spa_final_dirty_txg(spa)); | |
3787 | ||
3788 | /* update metaslab's position in our flushing tree */ | |
3789 | uint64_t ms_prev_flushed_txg = metaslab_unflushed_txg(msp); | |
600a02b8 | 3790 | boolean_t ms_prev_flushed_dirty = metaslab_unflushed_dirty(msp); |
93e28d66 SD |
3791 | mutex_enter(&spa->spa_flushed_ms_lock); |
3792 | avl_remove(&spa->spa_metaslabs_by_flushed, msp); | |
3793 | metaslab_set_unflushed_txg(msp, spa_syncing_txg(spa), tx); | |
600a02b8 | 3794 | metaslab_set_unflushed_dirty(msp, dirty); |
93e28d66 SD |
3795 | avl_add(&spa->spa_metaslabs_by_flushed, msp); |
3796 | mutex_exit(&spa->spa_flushed_ms_lock); | |
3797 | ||
3798 | /* update metaslab counts of spa_log_sm_t nodes */ | |
3799 | spa_log_sm_decrement_mscount(spa, ms_prev_flushed_txg); | |
3800 | spa_log_sm_increment_current_mscount(spa); | |
3801 | ||
600a02b8 AM |
3802 | /* update log space map summary */ |
3803 | spa_log_summary_decrement_mscount(spa, ms_prev_flushed_txg, | |
3804 | ms_prev_flushed_dirty); | |
3805 | spa_log_summary_add_flushed_metaslab(spa, dirty); | |
3806 | ||
93e28d66 | 3807 | /* cleanup obsolete logs if any */ |
93e28d66 | 3808 | spa_cleanup_old_sm_logs(spa, tx); |
600a02b8 | 3809 | } |
93e28d66 | 3810 | |
600a02b8 AM |
3811 | /* |
3812 | * Called when the metaslab has been flushed (its own spacemap now reflects | |
3813 | * all the contents of the pool-wide spacemap log). Updates the metaslab's | |
3814 | * metadata and any pool-wide related log space map data (e.g. summary, | |
3815 | * obsolete logs, etc..) to reflect that. | |
3816 | */ | |
3817 | static void | |
3818 | metaslab_flush_update(metaslab_t *msp, dmu_tx_t *tx) | |
3819 | { | |
3820 | metaslab_group_t *mg = msp->ms_group; | |
3821 | spa_t *spa = mg->mg_vd->vdev_spa; | |
3822 | ||
3823 | ASSERT(MUTEX_HELD(&msp->ms_lock)); | |
3824 | ||
3825 | ASSERT3U(spa_sync_pass(spa), ==, 1); | |
3826 | ||
3827 | /* | |
3828 | * Just because a metaslab got flushed, that doesn't mean that | |
3829 | * it will pass through metaslab_sync_done(). Thus, make sure to | |
3830 | * update ms_synced_length here in case it doesn't. | |
3831 | */ | |
3832 | msp->ms_synced_length = space_map_length(msp->ms_sm); | |
3833 | ||
3834 | /* | |
3835 | * We may end up here from metaslab_condense() without the | |
3836 | * feature being active. In that case this is a no-op. | |
3837 | */ | |
3838 | if (!spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP) || | |
3839 | metaslab_unflushed_txg(msp) == 0) | |
3840 | return; | |
3841 | ||
3842 | metaslab_unflushed_bump(msp, tx, B_FALSE); | |
93e28d66 SD |
3843 | } |
3844 | ||
3845 | boolean_t | |
3846 | metaslab_flush(metaslab_t *msp, dmu_tx_t *tx) | |
3847 | { | |
3848 | spa_t *spa = msp->ms_group->mg_vd->vdev_spa; | |
3849 | ||
3850 | ASSERT(MUTEX_HELD(&msp->ms_lock)); | |
3851 | ASSERT3U(spa_sync_pass(spa), ==, 1); | |
3852 | ASSERT(spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP)); | |
3853 | ||
3854 | ASSERT(msp->ms_sm != NULL); | |
3855 | ASSERT(metaslab_unflushed_txg(msp) != 0); | |
3856 | ASSERT(avl_find(&spa->spa_metaslabs_by_flushed, msp, NULL) != NULL); | |
3857 | ||
3858 | /* | |
3859 | * There is nothing wrong with flushing the same metaslab twice, as | |
3860 | * this codepath should work on that case. However, the current | |
3861 | * flushing scheme makes sure to avoid this situation as we would be | |
3862 | * making all these calls without having anything meaningful to write | |
3863 | * to disk. We assert this behavior here. | |
3864 | */ | |
3865 | ASSERT3U(metaslab_unflushed_txg(msp), <, dmu_tx_get_txg(tx)); | |
3866 | ||
3867 | /* | |
3868 | * We can not flush while loading, because then we would | |
3869 | * not load the ms_unflushed_{allocs,frees}. | |
3870 | */ | |
3871 | if (msp->ms_loading) | |
3872 | return (B_FALSE); | |
3873 | ||
3874 | metaslab_verify_space(msp, dmu_tx_get_txg(tx)); | |
3875 | metaslab_verify_weight_and_frag(msp); | |
3876 | ||
3877 | /* | |
3878 | * Metaslab condensing is effectively flushing. Therefore if the | |
3879 | * metaslab can be condensed we can just condense it instead of | |
3880 | * flushing it. | |
3881 | * | |
3882 | * Note that metaslab_condense() does call metaslab_flush_update() | |
3883 | * so we can just return immediately after condensing. We also | |
3884 | * don't need to care about setting ms_flushing or broadcasting | |
3885 | * ms_flush_cv, even if we temporarily drop the ms_lock in | |
3886 | * metaslab_condense(), as the metaslab is already loaded. | |
3887 | */ | |
3888 | if (msp->ms_loaded && metaslab_should_condense(msp)) { | |
3889 | metaslab_group_t *mg = msp->ms_group; | |
3890 | ||
3891 | /* | |
3892 | * For all histogram operations below refer to the | |
3893 | * comments of metaslab_sync() where we follow a | |
3894 | * similar procedure. | |
3895 | */ | |
3896 | metaslab_group_histogram_verify(mg); | |
3897 | metaslab_class_histogram_verify(mg->mg_class); | |
3898 | metaslab_group_histogram_remove(mg, msp); | |
3899 | ||
3900 | metaslab_condense(msp, tx); | |
3901 | ||
3902 | space_map_histogram_clear(msp->ms_sm); | |
3903 | space_map_histogram_add(msp->ms_sm, msp->ms_allocatable, tx); | |
3904 | ASSERT(range_tree_is_empty(msp->ms_freed)); | |
3905 | for (int t = 0; t < TXG_DEFER_SIZE; t++) { | |
3906 | space_map_histogram_add(msp->ms_sm, | |
3907 | msp->ms_defer[t], tx); | |
3908 | } | |
3909 | metaslab_aux_histograms_update(msp); | |
3910 | ||
3911 | metaslab_group_histogram_add(mg, msp); | |
3912 | metaslab_group_histogram_verify(mg); | |
3913 | metaslab_class_histogram_verify(mg->mg_class); | |
3914 | ||
3915 | metaslab_verify_space(msp, dmu_tx_get_txg(tx)); | |
3916 | ||
3917 | /* | |
3918 | * Since we recreated the histogram (and potentially | |
3919 | * the ms_sm too while condensing) ensure that the | |
3920 | * weight is updated too because we are not guaranteed | |
3921 | * that this metaslab is dirty and will go through | |
3922 | * metaslab_sync_done(). | |
3923 | */ | |
3924 | metaslab_recalculate_weight_and_sort(msp); | |
3925 | return (B_TRUE); | |
3926 | } | |
3927 | ||
3928 | msp->ms_flushing = B_TRUE; | |
3929 | uint64_t sm_len_before = space_map_length(msp->ms_sm); | |
3930 | ||
3931 | mutex_exit(&msp->ms_lock); | |
3932 | space_map_write(msp->ms_sm, msp->ms_unflushed_allocs, SM_ALLOC, | |
3933 | SM_NO_VDEVID, tx); | |
3934 | space_map_write(msp->ms_sm, msp->ms_unflushed_frees, SM_FREE, | |
3935 | SM_NO_VDEVID, tx); | |
3936 | mutex_enter(&msp->ms_lock); | |
3937 | ||
3938 | uint64_t sm_len_after = space_map_length(msp->ms_sm); | |
3939 | if (zfs_flags & ZFS_DEBUG_LOG_SPACEMAP) { | |
3940 | zfs_dbgmsg("flushing: txg %llu, spa %s, vdev_id %llu, " | |
3941 | "ms_id %llu, unflushed_allocs %llu, unflushed_frees %llu, " | |
8e739b2c RE |
3942 | "appended %llu bytes", (u_longlong_t)dmu_tx_get_txg(tx), |
3943 | spa_name(spa), | |
3944 | (u_longlong_t)msp->ms_group->mg_vd->vdev_id, | |
3945 | (u_longlong_t)msp->ms_id, | |
3946 | (u_longlong_t)range_tree_space(msp->ms_unflushed_allocs), | |
3947 | (u_longlong_t)range_tree_space(msp->ms_unflushed_frees), | |
3948 | (u_longlong_t)(sm_len_after - sm_len_before)); | |
93e28d66 SD |
3949 | } |
3950 | ||
3951 | ASSERT3U(spa->spa_unflushed_stats.sus_memused, >=, | |
3952 | metaslab_unflushed_changes_memused(msp)); | |
3953 | spa->spa_unflushed_stats.sus_memused -= | |
3954 | metaslab_unflushed_changes_memused(msp); | |
3955 | range_tree_vacate(msp->ms_unflushed_allocs, NULL, NULL); | |
3956 | range_tree_vacate(msp->ms_unflushed_frees, NULL, NULL); | |
3957 | ||
3958 | metaslab_verify_space(msp, dmu_tx_get_txg(tx)); | |
3959 | metaslab_verify_weight_and_frag(msp); | |
3960 | ||
3961 | metaslab_flush_update(msp, tx); | |
3962 | ||
3963 | metaslab_verify_space(msp, dmu_tx_get_txg(tx)); | |
3964 | metaslab_verify_weight_and_frag(msp); | |
3965 | ||
3966 | msp->ms_flushing = B_FALSE; | |
3967 | cv_broadcast(&msp->ms_flush_cv); | |
3968 | return (B_TRUE); | |
e51be066 GW |
3969 | } |
3970 | ||
34dc7c2f BB |
3971 | /* |
3972 | * Write a metaslab to disk in the context of the specified transaction group. | |
3973 | */ | |
3974 | void | |
3975 | metaslab_sync(metaslab_t *msp, uint64_t txg) | |
3976 | { | |
93cf2076 GW |
3977 | metaslab_group_t *mg = msp->ms_group; |
3978 | vdev_t *vd = mg->mg_vd; | |
34dc7c2f | 3979 | spa_t *spa = vd->vdev_spa; |
428870ff | 3980 | objset_t *mos = spa_meta_objset(spa); |
d2734cce | 3981 | range_tree_t *alloctree = msp->ms_allocating[txg & TXG_MASK]; |
34dc7c2f | 3982 | dmu_tx_t *tx; |
34dc7c2f | 3983 | |
428870ff BB |
3984 | ASSERT(!vd->vdev_ishole); |
3985 | ||
e51be066 GW |
3986 | /* |
3987 | * This metaslab has just been added so there's no work to do now. | |
3988 | */ | |
793c958f SD |
3989 | if (msp->ms_new) { |
3990 | ASSERT0(range_tree_space(alloctree)); | |
3991 | ASSERT0(range_tree_space(msp->ms_freeing)); | |
3992 | ASSERT0(range_tree_space(msp->ms_freed)); | |
3993 | ASSERT0(range_tree_space(msp->ms_checkpointing)); | |
3994 | ASSERT0(range_tree_space(msp->ms_trim)); | |
e51be066 GW |
3995 | return; |
3996 | } | |
3997 | ||
f3a7f661 | 3998 | /* |
d2734cce SD |
3999 | * Normally, we don't want to process a metaslab if there are no |
4000 | * allocations or frees to perform. However, if the metaslab is being | |
475aa97c PD |
4001 | * forced to condense, it's loaded and we're not beyond the final |
4002 | * dirty txg, we need to let it through. Not condensing beyond the | |
4003 | * final dirty txg prevents an issue where metaslabs that need to be | |
4004 | * condensed but were loaded for other reasons could cause a panic | |
4005 | * here. By only checking the txg in that branch of the conditional, | |
4006 | * we preserve the utility of the VERIFY statements in all other | |
4007 | * cases. | |
f3a7f661 | 4008 | */ |
d2734cce SD |
4009 | if (range_tree_is_empty(alloctree) && |
4010 | range_tree_is_empty(msp->ms_freeing) && | |
4011 | range_tree_is_empty(msp->ms_checkpointing) && | |
475aa97c PD |
4012 | !(msp->ms_loaded && msp->ms_condense_wanted && |
4013 | txg <= spa_final_dirty_txg(spa))) | |
428870ff | 4014 | return; |
34dc7c2f | 4015 | |
3b7f360c | 4016 | |
ca577779 | 4017 | VERIFY3U(txg, <=, spa_final_dirty_txg(spa)); |
3b7f360c | 4018 | |
34dc7c2f | 4019 | /* |
425d3237 SD |
4020 | * The only state that can actually be changing concurrently |
4021 | * with metaslab_sync() is the metaslab's ms_allocatable. No | |
4022 | * other thread can be modifying this txg's alloc, freeing, | |
d2734cce | 4023 | * freed, or space_map_phys_t. We drop ms_lock whenever we |
425d3237 SD |
4024 | * could call into the DMU, because the DMU can call down to |
4025 | * us (e.g. via zio_free()) at any time. | |
a1d477c2 MA |
4026 | * |
4027 | * The spa_vdev_remove_thread() can be reading metaslab state | |
425d3237 SD |
4028 | * concurrently, and it is locked out by the ms_sync_lock. |
4029 | * Note that the ms_lock is insufficient for this, because it | |
4030 | * is dropped by space_map_write(). | |
34dc7c2f | 4031 | */ |
428870ff | 4032 | tx = dmu_tx_create_assigned(spa_get_dsl(spa), txg); |
34dc7c2f | 4033 | |
93e28d66 SD |
4034 | /* |
4035 | * Generate a log space map if one doesn't exist already. | |
4036 | */ | |
4037 | spa_generate_syncing_log_sm(spa, tx); | |
93cf2076 | 4038 | |
93e28d66 SD |
4039 | if (msp->ms_sm == NULL) { |
4040 | uint64_t new_object = space_map_alloc(mos, | |
4041 | spa_feature_is_enabled(spa, SPA_FEATURE_LOG_SPACEMAP) ? | |
4042 | zfs_metaslab_sm_blksz_with_log : | |
4043 | zfs_metaslab_sm_blksz_no_log, tx); | |
93cf2076 GW |
4044 | VERIFY3U(new_object, !=, 0); |
4045 | ||
93e28d66 SD |
4046 | dmu_write(mos, vd->vdev_ms_array, sizeof (uint64_t) * |
4047 | msp->ms_id, sizeof (uint64_t), &new_object, tx); | |
4048 | ||
93cf2076 | 4049 | VERIFY0(space_map_open(&msp->ms_sm, mos, new_object, |
a1d477c2 | 4050 | msp->ms_start, msp->ms_size, vd->vdev_ashift)); |
93cf2076 | 4051 | ASSERT(msp->ms_sm != NULL); |
93e28d66 SD |
4052 | |
4053 | ASSERT(range_tree_is_empty(msp->ms_unflushed_allocs)); | |
4054 | ASSERT(range_tree_is_empty(msp->ms_unflushed_frees)); | |
425d3237 | 4055 | ASSERT0(metaslab_allocated_space(msp)); |
34dc7c2f BB |
4056 | } |
4057 | ||
d2734cce SD |
4058 | if (!range_tree_is_empty(msp->ms_checkpointing) && |
4059 | vd->vdev_checkpoint_sm == NULL) { | |
4060 | ASSERT(spa_has_checkpoint(spa)); | |
4061 | ||
4062 | uint64_t new_object = space_map_alloc(mos, | |
93e28d66 | 4063 | zfs_vdev_standard_sm_blksz, tx); |
d2734cce SD |
4064 | VERIFY3U(new_object, !=, 0); |
4065 | ||
4066 | VERIFY0(space_map_open(&vd->vdev_checkpoint_sm, | |
4067 | mos, new_object, 0, vd->vdev_asize, vd->vdev_ashift)); | |
4068 | ASSERT3P(vd->vdev_checkpoint_sm, !=, NULL); | |
4069 | ||
4070 | /* | |
4071 | * We save the space map object as an entry in vdev_top_zap | |
4072 | * so it can be retrieved when the pool is reopened after an | |
4073 | * export or through zdb. | |
4074 | */ | |
4075 | VERIFY0(zap_add(vd->vdev_spa->spa_meta_objset, | |
4076 | vd->vdev_top_zap, VDEV_TOP_ZAP_POOL_CHECKPOINT_SM, | |
4077 | sizeof (new_object), 1, &new_object, tx)); | |
4078 | } | |
4079 | ||
a1d477c2 | 4080 | mutex_enter(&msp->ms_sync_lock); |
428870ff BB |
4081 | mutex_enter(&msp->ms_lock); |
4082 | ||
96358617 | 4083 | /* |
4e21fd06 DB |
4084 | * Note: metaslab_condense() clears the space map's histogram. |
4085 | * Therefore we must verify and remove this histogram before | |
96358617 MA |
4086 | * condensing. |
4087 | */ | |
4088 | metaslab_group_histogram_verify(mg); | |
4089 | metaslab_class_histogram_verify(mg->mg_class); | |
4090 | metaslab_group_histogram_remove(mg, msp); | |
4091 | ||
93e28d66 SD |
4092 | if (spa->spa_sync_pass == 1 && msp->ms_loaded && |
4093 | metaslab_should_condense(msp)) | |
4094 | metaslab_condense(msp, tx); | |
4095 | ||
4096 | /* | |
4097 | * We'll be going to disk to sync our space accounting, thus we | |
4098 | * drop the ms_lock during that time so allocations coming from | |
4099 | * open-context (ZIL) for future TXGs do not block. | |
4100 | */ | |
4101 | mutex_exit(&msp->ms_lock); | |
4102 | space_map_t *log_sm = spa_syncing_log_sm(spa); | |
4103 | if (log_sm != NULL) { | |
4104 | ASSERT(spa_feature_is_enabled(spa, SPA_FEATURE_LOG_SPACEMAP)); | |
600a02b8 AM |
4105 | if (metaslab_unflushed_txg(msp) == 0) |
4106 | metaslab_unflushed_add(msp, tx); | |
4107 | else if (!metaslab_unflushed_dirty(msp)) | |
4108 | metaslab_unflushed_bump(msp, tx, B_TRUE); | |
93e28d66 SD |
4109 | |
4110 | space_map_write(log_sm, alloctree, SM_ALLOC, | |
4111 | vd->vdev_id, tx); | |
4112 | space_map_write(log_sm, msp->ms_freeing, SM_FREE, | |
4113 | vd->vdev_id, tx); | |
4114 | mutex_enter(&msp->ms_lock); | |
4115 | ||
4116 | ASSERT3U(spa->spa_unflushed_stats.sus_memused, >=, | |
4117 | metaslab_unflushed_changes_memused(msp)); | |
4118 | spa->spa_unflushed_stats.sus_memused -= | |
4119 | metaslab_unflushed_changes_memused(msp); | |
4120 | range_tree_remove_xor_add(alloctree, | |
4121 | msp->ms_unflushed_frees, msp->ms_unflushed_allocs); | |
4122 | range_tree_remove_xor_add(msp->ms_freeing, | |
4123 | msp->ms_unflushed_allocs, msp->ms_unflushed_frees); | |
4124 | spa->spa_unflushed_stats.sus_memused += | |
4125 | metaslab_unflushed_changes_memused(msp); | |
e51be066 | 4126 | } else { |
93e28d66 SD |
4127 | ASSERT(!spa_feature_is_enabled(spa, SPA_FEATURE_LOG_SPACEMAP)); |
4128 | ||
4d044c4c SD |
4129 | space_map_write(msp->ms_sm, alloctree, SM_ALLOC, |
4130 | SM_NO_VDEVID, tx); | |
4131 | space_map_write(msp->ms_sm, msp->ms_freeing, SM_FREE, | |
4132 | SM_NO_VDEVID, tx); | |
a1d477c2 | 4133 | mutex_enter(&msp->ms_lock); |
e51be066 | 4134 | } |
428870ff | 4135 | |
425d3237 SD |
4136 | msp->ms_allocated_space += range_tree_space(alloctree); |
4137 | ASSERT3U(msp->ms_allocated_space, >=, | |
4138 | range_tree_space(msp->ms_freeing)); | |
4139 | msp->ms_allocated_space -= range_tree_space(msp->ms_freeing); | |
4140 | ||
d2734cce SD |
4141 | if (!range_tree_is_empty(msp->ms_checkpointing)) { |
4142 | ASSERT(spa_has_checkpoint(spa)); | |
4143 | ASSERT3P(vd->vdev_checkpoint_sm, !=, NULL); | |
4144 | ||
4145 | /* | |
4146 | * Since we are doing writes to disk and the ms_checkpointing | |
4147 | * tree won't be changing during that time, we drop the | |
93e28d66 SD |
4148 | * ms_lock while writing to the checkpoint space map, for the |
4149 | * same reason mentioned above. | |
d2734cce SD |
4150 | */ |
4151 | mutex_exit(&msp->ms_lock); | |
4152 | space_map_write(vd->vdev_checkpoint_sm, | |
4d044c4c | 4153 | msp->ms_checkpointing, SM_FREE, SM_NO_VDEVID, tx); |
d2734cce | 4154 | mutex_enter(&msp->ms_lock); |
d2734cce SD |
4155 | |
4156 | spa->spa_checkpoint_info.sci_dspace += | |
4157 | range_tree_space(msp->ms_checkpointing); | |
4158 | vd->vdev_stat.vs_checkpoint_space += | |
4159 | range_tree_space(msp->ms_checkpointing); | |
4160 | ASSERT3U(vd->vdev_stat.vs_checkpoint_space, ==, | |
425d3237 | 4161 | -space_map_allocated(vd->vdev_checkpoint_sm)); |
d2734cce SD |
4162 | |
4163 | range_tree_vacate(msp->ms_checkpointing, NULL, NULL); | |
4164 | } | |
4165 | ||
93cf2076 GW |
4166 | if (msp->ms_loaded) { |
4167 | /* | |
a1d477c2 | 4168 | * When the space map is loaded, we have an accurate |
93cf2076 GW |
4169 | * histogram in the range tree. This gives us an opportunity |
4170 | * to bring the space map's histogram up-to-date so we clear | |
4171 | * it first before updating it. | |
4172 | */ | |
4173 | space_map_histogram_clear(msp->ms_sm); | |
d2734cce | 4174 | space_map_histogram_add(msp->ms_sm, msp->ms_allocatable, tx); |
4e21fd06 DB |
4175 | |
4176 | /* | |
4177 | * Since we've cleared the histogram we need to add back | |
4178 | * any free space that has already been processed, plus | |
4179 | * any deferred space. This allows the on-disk histogram | |
4180 | * to accurately reflect all free space even if some space | |
4181 | * is not yet available for allocation (i.e. deferred). | |
4182 | */ | |
d2734cce | 4183 | space_map_histogram_add(msp->ms_sm, msp->ms_freed, tx); |
4e21fd06 | 4184 | |
93cf2076 | 4185 | /* |
4e21fd06 DB |
4186 | * Add back any deferred free space that has not been |
4187 | * added back into the in-core free tree yet. This will | |
4188 | * ensure that we don't end up with a space map histogram | |
4189 | * that is completely empty unless the metaslab is fully | |
4190 | * allocated. | |
93cf2076 | 4191 | */ |
1c27024e | 4192 | for (int t = 0; t < TXG_DEFER_SIZE; t++) { |
4e21fd06 | 4193 | space_map_histogram_add(msp->ms_sm, |
d2734cce | 4194 | msp->ms_defer[t], tx); |
4e21fd06 | 4195 | } |
93cf2076 | 4196 | } |
4e21fd06 DB |
4197 | |
4198 | /* | |
4199 | * Always add the free space from this sync pass to the space | |
4200 | * map histogram. We want to make sure that the on-disk histogram | |
4201 | * accounts for all free space. If the space map is not loaded, | |
4202 | * then we will lose some accuracy but will correct it the next | |
4203 | * time we load the space map. | |
4204 | */ | |
d2734cce | 4205 | space_map_histogram_add(msp->ms_sm, msp->ms_freeing, tx); |
928e8ad4 | 4206 | metaslab_aux_histograms_update(msp); |
4e21fd06 | 4207 | |
f3a7f661 GW |
4208 | metaslab_group_histogram_add(mg, msp); |
4209 | metaslab_group_histogram_verify(mg); | |
4210 | metaslab_class_histogram_verify(mg->mg_class); | |
34dc7c2f | 4211 | |
e51be066 | 4212 | /* |
93cf2076 | 4213 | * For sync pass 1, we avoid traversing this txg's free range tree |
425d3237 SD |
4214 | * and instead will just swap the pointers for freeing and freed. |
4215 | * We can safely do this since the freed_tree is guaranteed to be | |
4216 | * empty on the initial pass. | |
93e28d66 SD |
4217 | * |
4218 | * Keep in mind that even if we are currently using a log spacemap | |
4219 | * we want current frees to end up in the ms_allocatable (but not | |
4220 | * get appended to the ms_sm) so their ranges can be reused as usual. | |
e51be066 GW |
4221 | */ |
4222 | if (spa_sync_pass(spa) == 1) { | |
d2734cce | 4223 | range_tree_swap(&msp->ms_freeing, &msp->ms_freed); |
425d3237 | 4224 | ASSERT0(msp->ms_allocated_this_txg); |
e51be066 | 4225 | } else { |
d2734cce SD |
4226 | range_tree_vacate(msp->ms_freeing, |
4227 | range_tree_add, msp->ms_freed); | |
34dc7c2f | 4228 | } |
425d3237 | 4229 | msp->ms_allocated_this_txg += range_tree_space(alloctree); |
f3a7f661 | 4230 | range_tree_vacate(alloctree, NULL, NULL); |
34dc7c2f | 4231 | |
d2734cce SD |
4232 | ASSERT0(range_tree_space(msp->ms_allocating[txg & TXG_MASK])); |
4233 | ASSERT0(range_tree_space(msp->ms_allocating[TXG_CLEAN(txg) | |
4234 | & TXG_MASK])); | |
4235 | ASSERT0(range_tree_space(msp->ms_freeing)); | |
4236 | ASSERT0(range_tree_space(msp->ms_checkpointing)); | |
34dc7c2f BB |
4237 | |
4238 | mutex_exit(&msp->ms_lock); | |
4239 | ||
93e28d66 SD |
4240 | /* |
4241 | * Verify that the space map object ID has been recorded in the | |
4242 | * vdev_ms_array. | |
4243 | */ | |
4244 | uint64_t object; | |
4245 | VERIFY0(dmu_read(mos, vd->vdev_ms_array, | |
4246 | msp->ms_id * sizeof (uint64_t), sizeof (uint64_t), &object, 0)); | |
4247 | VERIFY3U(object, ==, space_map_object(msp->ms_sm)); | |
4248 | ||
a1d477c2 | 4249 | mutex_exit(&msp->ms_sync_lock); |
34dc7c2f BB |
4250 | dmu_tx_commit(tx); |
4251 | } | |
4252 | ||
f09fda50 PD |
4253 | static void |
4254 | metaslab_evict(metaslab_t *msp, uint64_t txg) | |
893a6d62 | 4255 | { |
f09fda50 PD |
4256 | if (!msp->ms_loaded || msp->ms_disabled != 0) |
4257 | return; | |
893a6d62 | 4258 | |
f09fda50 PD |
4259 | for (int t = 1; t < TXG_CONCURRENT_STATES; t++) { |
4260 | VERIFY0(range_tree_space( | |
4261 | msp->ms_allocating[(txg + t) & TXG_MASK])); | |
893a6d62 | 4262 | } |
f09fda50 PD |
4263 | if (msp->ms_allocator != -1) |
4264 | metaslab_passivate(msp, msp->ms_weight & ~METASLAB_ACTIVE_MASK); | |
4265 | ||
4266 | if (!metaslab_debug_unload) | |
4267 | metaslab_unload(msp); | |
893a6d62 PD |
4268 | } |
4269 | ||
34dc7c2f BB |
4270 | /* |
4271 | * Called after a transaction group has completely synced to mark | |
4272 | * all of the metaslab's free space as usable. | |
4273 | */ | |
4274 | void | |
4275 | metaslab_sync_done(metaslab_t *msp, uint64_t txg) | |
4276 | { | |
34dc7c2f BB |
4277 | metaslab_group_t *mg = msp->ms_group; |
4278 | vdev_t *vd = mg->mg_vd; | |
4e21fd06 | 4279 | spa_t *spa = vd->vdev_spa; |
93cf2076 | 4280 | range_tree_t **defer_tree; |
428870ff | 4281 | int64_t alloc_delta, defer_delta; |
4e21fd06 | 4282 | boolean_t defer_allowed = B_TRUE; |
428870ff BB |
4283 | |
4284 | ASSERT(!vd->vdev_ishole); | |
34dc7c2f BB |
4285 | |
4286 | mutex_enter(&msp->ms_lock); | |
4287 | ||
793c958f SD |
4288 | if (msp->ms_new) { |
4289 | /* this is a new metaslab, add its capacity to the vdev */ | |
cc99f275 | 4290 | metaslab_space_update(vd, mg->mg_class, 0, 0, msp->ms_size); |
793c958f SD |
4291 | |
4292 | /* there should be no allocations nor frees at this point */ | |
4293 | VERIFY0(msp->ms_allocated_this_txg); | |
4294 | VERIFY0(range_tree_space(msp->ms_freed)); | |
34dc7c2f | 4295 | } |
793c958f | 4296 | |
d2734cce SD |
4297 | ASSERT0(range_tree_space(msp->ms_freeing)); |
4298 | ASSERT0(range_tree_space(msp->ms_checkpointing)); | |
34dc7c2f | 4299 | |
d2734cce | 4300 | defer_tree = &msp->ms_defer[txg % TXG_DEFER_SIZE]; |
93cf2076 | 4301 | |
1c27024e | 4302 | uint64_t free_space = metaslab_class_get_space(spa_normal_class(spa)) - |
4e21fd06 | 4303 | metaslab_class_get_alloc(spa_normal_class(spa)); |
a1d477c2 | 4304 | if (free_space <= spa_get_slop_space(spa) || vd->vdev_removing) { |
4e21fd06 DB |
4305 | defer_allowed = B_FALSE; |
4306 | } | |
4307 | ||
4308 | defer_delta = 0; | |
425d3237 SD |
4309 | alloc_delta = msp->ms_allocated_this_txg - |
4310 | range_tree_space(msp->ms_freed); | |
93e28d66 | 4311 | |
4e21fd06 | 4312 | if (defer_allowed) { |
d2734cce | 4313 | defer_delta = range_tree_space(msp->ms_freed) - |
4e21fd06 DB |
4314 | range_tree_space(*defer_tree); |
4315 | } else { | |
4316 | defer_delta -= range_tree_space(*defer_tree); | |
4317 | } | |
cc99f275 DB |
4318 | metaslab_space_update(vd, mg->mg_class, alloc_delta + defer_delta, |
4319 | defer_delta, 0); | |
34dc7c2f | 4320 | |
93e28d66 SD |
4321 | if (spa_syncing_log_sm(spa) == NULL) { |
4322 | /* | |
4323 | * If there's a metaslab_load() in progress and we don't have | |
4324 | * a log space map, it means that we probably wrote to the | |
4325 | * metaslab's space map. If this is the case, we need to | |
4326 | * make sure that we wait for the load to complete so that we | |
4327 | * have a consistent view at the in-core side of the metaslab. | |
4328 | */ | |
4329 | metaslab_load_wait(msp); | |
4330 | } else { | |
4331 | ASSERT(spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP)); | |
4332 | } | |
c2e42f9d | 4333 | |
1b939560 BB |
4334 | /* |
4335 | * When auto-trimming is enabled, free ranges which are added to | |
4336 | * ms_allocatable are also be added to ms_trim. The ms_trim tree is | |
4337 | * periodically consumed by the vdev_autotrim_thread() which issues | |
4338 | * trims for all ranges and then vacates the tree. The ms_trim tree | |
4339 | * can be discarded at any time with the sole consequence of recent | |
4340 | * frees not being trimmed. | |
4341 | */ | |
4342 | if (spa_get_autotrim(spa) == SPA_AUTOTRIM_ON) { | |
4343 | range_tree_walk(*defer_tree, range_tree_add, msp->ms_trim); | |
4344 | if (!defer_allowed) { | |
4345 | range_tree_walk(msp->ms_freed, range_tree_add, | |
4346 | msp->ms_trim); | |
4347 | } | |
4348 | } else { | |
4349 | range_tree_vacate(msp->ms_trim, NULL, NULL); | |
4350 | } | |
4351 | ||
c2e42f9d | 4352 | /* |
93cf2076 | 4353 | * Move the frees from the defer_tree back to the free |
d2734cce SD |
4354 | * range tree (if it's loaded). Swap the freed_tree and |
4355 | * the defer_tree -- this is safe to do because we've | |
4356 | * just emptied out the defer_tree. | |
c2e42f9d | 4357 | */ |
93cf2076 | 4358 | range_tree_vacate(*defer_tree, |
d2734cce | 4359 | msp->ms_loaded ? range_tree_add : NULL, msp->ms_allocatable); |
4e21fd06 | 4360 | if (defer_allowed) { |
d2734cce | 4361 | range_tree_swap(&msp->ms_freed, defer_tree); |
4e21fd06 | 4362 | } else { |
d2734cce SD |
4363 | range_tree_vacate(msp->ms_freed, |
4364 | msp->ms_loaded ? range_tree_add : NULL, | |
4365 | msp->ms_allocatable); | |
4e21fd06 | 4366 | } |
425d3237 SD |
4367 | |
4368 | msp->ms_synced_length = space_map_length(msp->ms_sm); | |
34dc7c2f | 4369 | |
428870ff BB |
4370 | msp->ms_deferspace += defer_delta; |
4371 | ASSERT3S(msp->ms_deferspace, >=, 0); | |
93cf2076 | 4372 | ASSERT3S(msp->ms_deferspace, <=, msp->ms_size); |
428870ff BB |
4373 | if (msp->ms_deferspace != 0) { |
4374 | /* | |
4375 | * Keep syncing this metaslab until all deferred frees | |
4376 | * are back in circulation. | |
4377 | */ | |
4378 | vdev_dirty(vd, VDD_METASLAB, msp, txg + 1); | |
4379 | } | |
928e8ad4 | 4380 | metaslab_aux_histograms_update_done(msp, defer_allowed); |
428870ff | 4381 | |
492f64e9 PD |
4382 | if (msp->ms_new) { |
4383 | msp->ms_new = B_FALSE; | |
4384 | mutex_enter(&mg->mg_lock); | |
4385 | mg->mg_ms_ready++; | |
4386 | mutex_exit(&mg->mg_lock); | |
4387 | } | |
928e8ad4 | 4388 | |
4e21fd06 | 4389 | /* |
928e8ad4 SD |
4390 | * Re-sort metaslab within its group now that we've adjusted |
4391 | * its allocatable space. | |
4e21fd06 | 4392 | */ |
928e8ad4 | 4393 | metaslab_recalculate_weight_and_sort(msp); |
4e21fd06 | 4394 | |
d2734cce SD |
4395 | ASSERT0(range_tree_space(msp->ms_allocating[txg & TXG_MASK])); |
4396 | ASSERT0(range_tree_space(msp->ms_freeing)); | |
4397 | ASSERT0(range_tree_space(msp->ms_freed)); | |
4398 | ASSERT0(range_tree_space(msp->ms_checkpointing)); | |
f09fda50 | 4399 | msp->ms_allocating_total -= msp->ms_allocated_this_txg; |
425d3237 | 4400 | msp->ms_allocated_this_txg = 0; |
34dc7c2f BB |
4401 | mutex_exit(&msp->ms_lock); |
4402 | } | |
4403 | ||
428870ff BB |
4404 | void |
4405 | metaslab_sync_reassess(metaslab_group_t *mg) | |
4406 | { | |
a1d477c2 MA |
4407 | spa_t *spa = mg->mg_class->mc_spa; |
4408 | ||
4409 | spa_config_enter(spa, SCL_ALLOC, FTAG, RW_READER); | |
1be627f5 | 4410 | metaslab_group_alloc_update(mg); |
f3a7f661 | 4411 | mg->mg_fragmentation = metaslab_group_fragmentation(mg); |
6d974228 | 4412 | |
428870ff | 4413 | /* |
a1d477c2 MA |
4414 | * Preload the next potential metaslabs but only on active |
4415 | * metaslab groups. We can get into a state where the metaslab | |
4416 | * is no longer active since we dirty metaslabs as we remove a | |
4417 | * a device, thus potentially making the metaslab group eligible | |
4418 | * for preloading. | |
428870ff | 4419 | */ |
a1d477c2 MA |
4420 | if (mg->mg_activation_count > 0) { |
4421 | metaslab_group_preload(mg); | |
4422 | } | |
4423 | spa_config_exit(spa, SCL_ALLOC, FTAG); | |
428870ff BB |
4424 | } |
4425 | ||
cc99f275 DB |
4426 | /* |
4427 | * When writing a ditto block (i.e. more than one DVA for a given BP) on | |
4428 | * the same vdev as an existing DVA of this BP, then try to allocate it | |
4429 | * on a different metaslab than existing DVAs (i.e. a unique metaslab). | |
4430 | */ | |
4431 | static boolean_t | |
4432 | metaslab_is_unique(metaslab_t *msp, dva_t *dva) | |
34dc7c2f | 4433 | { |
cc99f275 DB |
4434 | uint64_t dva_ms_id; |
4435 | ||
4436 | if (DVA_GET_ASIZE(dva) == 0) | |
4437 | return (B_TRUE); | |
34dc7c2f BB |
4438 | |
4439 | if (msp->ms_group->mg_vd->vdev_id != DVA_GET_VDEV(dva)) | |
cc99f275 | 4440 | return (B_TRUE); |
34dc7c2f | 4441 | |
cc99f275 DB |
4442 | dva_ms_id = DVA_GET_OFFSET(dva) >> msp->ms_group->mg_vd->vdev_ms_shift; |
4443 | ||
4444 | return (msp->ms_id != dva_ms_id); | |
34dc7c2f BB |
4445 | } |
4446 | ||
4e21fd06 DB |
4447 | /* |
4448 | * ========================================================================== | |
4449 | * Metaslab allocation tracing facility | |
4450 | * ========================================================================== | |
4451 | */ | |
4e21fd06 DB |
4452 | |
4453 | /* | |
4454 | * Add an allocation trace element to the allocation tracing list. | |
4455 | */ | |
4456 | static void | |
4457 | metaslab_trace_add(zio_alloc_list_t *zal, metaslab_group_t *mg, | |
492f64e9 PD |
4458 | metaslab_t *msp, uint64_t psize, uint32_t dva_id, uint64_t offset, |
4459 | int allocator) | |
4e21fd06 DB |
4460 | { |
4461 | metaslab_alloc_trace_t *mat; | |
4462 | ||
4463 | if (!metaslab_trace_enabled) | |
4464 | return; | |
4465 | ||
4466 | /* | |
4467 | * When the tracing list reaches its maximum we remove | |
4468 | * the second element in the list before adding a new one. | |
4469 | * By removing the second element we preserve the original | |
4470 | * entry as a clue to what allocations steps have already been | |
4471 | * performed. | |
4472 | */ | |
4473 | if (zal->zal_size == metaslab_trace_max_entries) { | |
4474 | metaslab_alloc_trace_t *mat_next; | |
6d8da841 | 4475 | #ifdef ZFS_DEBUG |
4e21fd06 DB |
4476 | panic("too many entries in allocation list"); |
4477 | #endif | |
ca577779 | 4478 | METASLABSTAT_BUMP(metaslabstat_trace_over_limit); |
4e21fd06 DB |
4479 | zal->zal_size--; |
4480 | mat_next = list_next(&zal->zal_list, list_head(&zal->zal_list)); | |
4481 | list_remove(&zal->zal_list, mat_next); | |
4482 | kmem_cache_free(metaslab_alloc_trace_cache, mat_next); | |
4483 | } | |
4484 | ||
4485 | mat = kmem_cache_alloc(metaslab_alloc_trace_cache, KM_SLEEP); | |
4486 | list_link_init(&mat->mat_list_node); | |
4487 | mat->mat_mg = mg; | |
4488 | mat->mat_msp = msp; | |
4489 | mat->mat_size = psize; | |
4490 | mat->mat_dva_id = dva_id; | |
4491 | mat->mat_offset = offset; | |
4492 | mat->mat_weight = 0; | |
492f64e9 | 4493 | mat->mat_allocator = allocator; |
4e21fd06 DB |
4494 | |
4495 | if (msp != NULL) | |
4496 | mat->mat_weight = msp->ms_weight; | |
4497 | ||
4498 | /* | |
4499 | * The list is part of the zio so locking is not required. Only | |
4500 | * a single thread will perform allocations for a given zio. | |
4501 | */ | |
4502 | list_insert_tail(&zal->zal_list, mat); | |
4503 | zal->zal_size++; | |
4504 | ||
4505 | ASSERT3U(zal->zal_size, <=, metaslab_trace_max_entries); | |
4506 | } | |
4507 | ||
4508 | void | |
4509 | metaslab_trace_init(zio_alloc_list_t *zal) | |
4510 | { | |
4511 | list_create(&zal->zal_list, sizeof (metaslab_alloc_trace_t), | |
4512 | offsetof(metaslab_alloc_trace_t, mat_list_node)); | |
4513 | zal->zal_size = 0; | |
4514 | } | |
4515 | ||
4516 | void | |
4517 | metaslab_trace_fini(zio_alloc_list_t *zal) | |
4518 | { | |
4519 | metaslab_alloc_trace_t *mat; | |
4520 | ||
4521 | while ((mat = list_remove_head(&zal->zal_list)) != NULL) | |
4522 | kmem_cache_free(metaslab_alloc_trace_cache, mat); | |
4523 | list_destroy(&zal->zal_list); | |
4524 | zal->zal_size = 0; | |
4525 | } | |
4e21fd06 | 4526 | |
3dfb57a3 DB |
4527 | /* |
4528 | * ========================================================================== | |
4529 | * Metaslab block operations | |
4530 | * ========================================================================== | |
4531 | */ | |
4532 | ||
4533 | static void | |
dd66857d AZ |
4534 | metaslab_group_alloc_increment(spa_t *spa, uint64_t vdev, const void *tag, |
4535 | int flags, int allocator) | |
3dfb57a3 | 4536 | { |
3dfb57a3 | 4537 | if (!(flags & METASLAB_ASYNC_ALLOC) || |
492f64e9 | 4538 | (flags & METASLAB_DONT_THROTTLE)) |
3dfb57a3 DB |
4539 | return; |
4540 | ||
1c27024e | 4541 | metaslab_group_t *mg = vdev_lookup_top(spa, vdev)->vdev_mg; |
3dfb57a3 DB |
4542 | if (!mg->mg_class->mc_alloc_throttle_enabled) |
4543 | return; | |
4544 | ||
32d805c3 MA |
4545 | metaslab_group_allocator_t *mga = &mg->mg_allocator[allocator]; |
4546 | (void) zfs_refcount_add(&mga->mga_alloc_queue_depth, tag); | |
492f64e9 PD |
4547 | } |
4548 | ||
4549 | static void | |
4550 | metaslab_group_increment_qdepth(metaslab_group_t *mg, int allocator) | |
4551 | { | |
32d805c3 | 4552 | metaslab_group_allocator_t *mga = &mg->mg_allocator[allocator]; |
f8020c93 AM |
4553 | metaslab_class_allocator_t *mca = |
4554 | &mg->mg_class->mc_allocator[allocator]; | |
492f64e9 | 4555 | uint64_t max = mg->mg_max_alloc_queue_depth; |
32d805c3 | 4556 | uint64_t cur = mga->mga_cur_max_alloc_queue_depth; |
492f64e9 | 4557 | while (cur < max) { |
32d805c3 | 4558 | if (atomic_cas_64(&mga->mga_cur_max_alloc_queue_depth, |
492f64e9 | 4559 | cur, cur + 1) == cur) { |
f8020c93 | 4560 | atomic_inc_64(&mca->mca_alloc_max_slots); |
492f64e9 PD |
4561 | return; |
4562 | } | |
32d805c3 | 4563 | cur = mga->mga_cur_max_alloc_queue_depth; |
492f64e9 | 4564 | } |
3dfb57a3 DB |
4565 | } |
4566 | ||
4567 | void | |
dd66857d AZ |
4568 | metaslab_group_alloc_decrement(spa_t *spa, uint64_t vdev, const void *tag, |
4569 | int flags, int allocator, boolean_t io_complete) | |
3dfb57a3 | 4570 | { |
3dfb57a3 | 4571 | if (!(flags & METASLAB_ASYNC_ALLOC) || |
492f64e9 | 4572 | (flags & METASLAB_DONT_THROTTLE)) |
3dfb57a3 DB |
4573 | return; |
4574 | ||
1c27024e | 4575 | metaslab_group_t *mg = vdev_lookup_top(spa, vdev)->vdev_mg; |
3dfb57a3 DB |
4576 | if (!mg->mg_class->mc_alloc_throttle_enabled) |
4577 | return; | |
4578 | ||
32d805c3 MA |
4579 | metaslab_group_allocator_t *mga = &mg->mg_allocator[allocator]; |
4580 | (void) zfs_refcount_remove(&mga->mga_alloc_queue_depth, tag); | |
492f64e9 PD |
4581 | if (io_complete) |
4582 | metaslab_group_increment_qdepth(mg, allocator); | |
3dfb57a3 DB |
4583 | } |
4584 | ||
4585 | void | |
dd66857d | 4586 | metaslab_group_alloc_verify(spa_t *spa, const blkptr_t *bp, const void *tag, |
492f64e9 | 4587 | int allocator) |
3dfb57a3 DB |
4588 | { |
4589 | #ifdef ZFS_DEBUG | |
4590 | const dva_t *dva = bp->blk_dva; | |
4591 | int ndvas = BP_GET_NDVAS(bp); | |
3dfb57a3 | 4592 | |
1c27024e | 4593 | for (int d = 0; d < ndvas; d++) { |
3dfb57a3 DB |
4594 | uint64_t vdev = DVA_GET_VDEV(&dva[d]); |
4595 | metaslab_group_t *mg = vdev_lookup_top(spa, vdev)->vdev_mg; | |
32d805c3 MA |
4596 | metaslab_group_allocator_t *mga = &mg->mg_allocator[allocator]; |
4597 | VERIFY(zfs_refcount_not_held(&mga->mga_alloc_queue_depth, tag)); | |
3dfb57a3 DB |
4598 | } |
4599 | #endif | |
4600 | } | |
4601 | ||
34dc7c2f | 4602 | static uint64_t |
4e21fd06 DB |
4603 | metaslab_block_alloc(metaslab_t *msp, uint64_t size, uint64_t txg) |
4604 | { | |
4605 | uint64_t start; | |
d2734cce | 4606 | range_tree_t *rt = msp->ms_allocatable; |
4e21fd06 DB |
4607 | metaslab_class_t *mc = msp->ms_group->mg_class; |
4608 | ||
93e28d66 | 4609 | ASSERT(MUTEX_HELD(&msp->ms_lock)); |
4e21fd06 | 4610 | VERIFY(!msp->ms_condensing); |
1b939560 | 4611 | VERIFY0(msp->ms_disabled); |
4e21fd06 DB |
4612 | |
4613 | start = mc->mc_ops->msop_alloc(msp, size); | |
4614 | if (start != -1ULL) { | |
4615 | metaslab_group_t *mg = msp->ms_group; | |
4616 | vdev_t *vd = mg->mg_vd; | |
4617 | ||
4618 | VERIFY0(P2PHASE(start, 1ULL << vd->vdev_ashift)); | |
4619 | VERIFY0(P2PHASE(size, 1ULL << vd->vdev_ashift)); | |
4620 | VERIFY3U(range_tree_space(rt) - size, <=, msp->ms_size); | |
4621 | range_tree_remove(rt, start, size); | |
1b939560 | 4622 | range_tree_clear(msp->ms_trim, start, size); |
4e21fd06 | 4623 | |
d2734cce | 4624 | if (range_tree_is_empty(msp->ms_allocating[txg & TXG_MASK])) |
4e21fd06 DB |
4625 | vdev_dirty(mg->mg_vd, VDD_METASLAB, msp, txg); |
4626 | ||
d2734cce | 4627 | range_tree_add(msp->ms_allocating[txg & TXG_MASK], start, size); |
f09fda50 | 4628 | msp->ms_allocating_total += size; |
4e21fd06 DB |
4629 | |
4630 | /* Track the last successful allocation */ | |
4631 | msp->ms_alloc_txg = txg; | |
4632 | metaslab_verify_space(msp, txg); | |
4633 | } | |
4634 | ||
4635 | /* | |
4636 | * Now that we've attempted the allocation we need to update the | |
4637 | * metaslab's maximum block size since it may have changed. | |
4638 | */ | |
c81f1790 | 4639 | msp->ms_max_size = metaslab_largest_allocatable(msp); |
4e21fd06 DB |
4640 | return (start); |
4641 | } | |
4642 | ||
492f64e9 PD |
4643 | /* |
4644 | * Find the metaslab with the highest weight that is less than what we've | |
4645 | * already tried. In the common case, this means that we will examine each | |
4646 | * metaslab at most once. Note that concurrent callers could reorder metaslabs | |
4647 | * by activation/passivation once we have dropped the mg_lock. If a metaslab is | |
4648 | * activated by another thread, and we fail to allocate from the metaslab we | |
4649 | * have selected, we may not try the newly-activated metaslab, and instead | |
4650 | * activate another metaslab. This is not optimal, but generally does not cause | |
4651 | * any problems (a possible exception being if every metaslab is completely full | |
e1cfd73f | 4652 | * except for the newly-activated metaslab which we fail to examine). |
492f64e9 PD |
4653 | */ |
4654 | static metaslab_t * | |
4655 | find_valid_metaslab(metaslab_group_t *mg, uint64_t activation_weight, | |
cc99f275 | 4656 | dva_t *dva, int d, boolean_t want_unique, uint64_t asize, int allocator, |
c81f1790 PD |
4657 | boolean_t try_hard, zio_alloc_list_t *zal, metaslab_t *search, |
4658 | boolean_t *was_active) | |
492f64e9 PD |
4659 | { |
4660 | avl_index_t idx; | |
4661 | avl_tree_t *t = &mg->mg_metaslab_tree; | |
4662 | metaslab_t *msp = avl_find(t, search, &idx); | |
4663 | if (msp == NULL) | |
4664 | msp = avl_nearest(t, idx, AVL_AFTER); | |
4665 | ||
fdc2d303 | 4666 | uint_t tries = 0; |
492f64e9 PD |
4667 | for (; msp != NULL; msp = AVL_NEXT(t, msp)) { |
4668 | int i; | |
be5c6d96 MA |
4669 | |
4670 | if (!try_hard && tries > zfs_metaslab_find_max_tries) { | |
4671 | METASLABSTAT_BUMP(metaslabstat_too_many_tries); | |
4672 | return (NULL); | |
4673 | } | |
4674 | tries++; | |
4675 | ||
c81f1790 | 4676 | if (!metaslab_should_allocate(msp, asize, try_hard)) { |
492f64e9 PD |
4677 | metaslab_trace_add(zal, mg, msp, asize, d, |
4678 | TRACE_TOO_SMALL, allocator); | |
4679 | continue; | |
4680 | } | |
4681 | ||
4682 | /* | |
1b939560 BB |
4683 | * If the selected metaslab is condensing or disabled, |
4684 | * skip it. | |
492f64e9 | 4685 | */ |
1b939560 | 4686 | if (msp->ms_condensing || msp->ms_disabled > 0) |
492f64e9 PD |
4687 | continue; |
4688 | ||
4689 | *was_active = msp->ms_allocator != -1; | |
4690 | /* | |
4691 | * If we're activating as primary, this is our first allocation | |
4692 | * from this disk, so we don't need to check how close we are. | |
4693 | * If the metaslab under consideration was already active, | |
4694 | * we're getting desperate enough to steal another allocator's | |
4695 | * metaslab, so we still don't care about distances. | |
4696 | */ | |
4697 | if (activation_weight == METASLAB_WEIGHT_PRIMARY || *was_active) | |
4698 | break; | |
4699 | ||
492f64e9 | 4700 | for (i = 0; i < d; i++) { |
cc99f275 DB |
4701 | if (want_unique && |
4702 | !metaslab_is_unique(msp, &dva[i])) | |
4703 | break; /* try another metaslab */ | |
492f64e9 PD |
4704 | } |
4705 | if (i == d) | |
4706 | break; | |
4707 | } | |
4708 | ||
4709 | if (msp != NULL) { | |
4710 | search->ms_weight = msp->ms_weight; | |
4711 | search->ms_start = msp->ms_start + 1; | |
4712 | search->ms_allocator = msp->ms_allocator; | |
4713 | search->ms_primary = msp->ms_primary; | |
4714 | } | |
4715 | return (msp); | |
4716 | } | |
4717 | ||
65c7cc49 | 4718 | static void |
679b0f2a PD |
4719 | metaslab_active_mask_verify(metaslab_t *msp) |
4720 | { | |
4721 | ASSERT(MUTEX_HELD(&msp->ms_lock)); | |
4722 | ||
4723 | if ((zfs_flags & ZFS_DEBUG_METASLAB_VERIFY) == 0) | |
4724 | return; | |
4725 | ||
4726 | if ((msp->ms_weight & METASLAB_ACTIVE_MASK) == 0) | |
4727 | return; | |
4728 | ||
4729 | if (msp->ms_weight & METASLAB_WEIGHT_PRIMARY) { | |
4730 | VERIFY0(msp->ms_weight & METASLAB_WEIGHT_SECONDARY); | |
4731 | VERIFY0(msp->ms_weight & METASLAB_WEIGHT_CLAIM); | |
4732 | VERIFY3S(msp->ms_allocator, !=, -1); | |
4733 | VERIFY(msp->ms_primary); | |
4734 | return; | |
4735 | } | |
4736 | ||
4737 | if (msp->ms_weight & METASLAB_WEIGHT_SECONDARY) { | |
4738 | VERIFY0(msp->ms_weight & METASLAB_WEIGHT_PRIMARY); | |
4739 | VERIFY0(msp->ms_weight & METASLAB_WEIGHT_CLAIM); | |
4740 | VERIFY3S(msp->ms_allocator, !=, -1); | |
4741 | VERIFY(!msp->ms_primary); | |
4742 | return; | |
4743 | } | |
4744 | ||
4745 | if (msp->ms_weight & METASLAB_WEIGHT_CLAIM) { | |
4746 | VERIFY0(msp->ms_weight & METASLAB_WEIGHT_PRIMARY); | |
4747 | VERIFY0(msp->ms_weight & METASLAB_WEIGHT_SECONDARY); | |
4748 | VERIFY3S(msp->ms_allocator, ==, -1); | |
4749 | return; | |
4750 | } | |
4751 | } | |
4752 | ||
4e21fd06 DB |
4753 | static uint64_t |
4754 | metaslab_group_alloc_normal(metaslab_group_t *mg, zio_alloc_list_t *zal, | |
c81f1790 PD |
4755 | uint64_t asize, uint64_t txg, boolean_t want_unique, dva_t *dva, int d, |
4756 | int allocator, boolean_t try_hard) | |
34dc7c2f BB |
4757 | { |
4758 | metaslab_t *msp = NULL; | |
4759 | uint64_t offset = -1ULL; | |
34dc7c2f | 4760 | |
679b0f2a | 4761 | uint64_t activation_weight = METASLAB_WEIGHT_PRIMARY; |
492f64e9 PD |
4762 | for (int i = 0; i < d; i++) { |
4763 | if (activation_weight == METASLAB_WEIGHT_PRIMARY && | |
4764 | DVA_GET_VDEV(&dva[i]) == mg->mg_vd->vdev_id) { | |
34dc7c2f | 4765 | activation_weight = METASLAB_WEIGHT_SECONDARY; |
492f64e9 PD |
4766 | } else if (activation_weight == METASLAB_WEIGHT_SECONDARY && |
4767 | DVA_GET_VDEV(&dva[i]) == mg->mg_vd->vdev_id) { | |
e38afd34 | 4768 | activation_weight = METASLAB_WEIGHT_CLAIM; |
9babb374 BB |
4769 | break; |
4770 | } | |
4771 | } | |
34dc7c2f | 4772 | |
492f64e9 PD |
4773 | /* |
4774 | * If we don't have enough metaslabs active to fill the entire array, we | |
4775 | * just use the 0th slot. | |
4776 | */ | |
e38afd34 | 4777 | if (mg->mg_ms_ready < mg->mg_allocators * 3) |
492f64e9 | 4778 | allocator = 0; |
32d805c3 | 4779 | metaslab_group_allocator_t *mga = &mg->mg_allocator[allocator]; |
492f64e9 PD |
4780 | |
4781 | ASSERT3U(mg->mg_vd->vdev_ms_count, >=, 2); | |
4782 | ||
1c27024e | 4783 | metaslab_t *search = kmem_alloc(sizeof (*search), KM_SLEEP); |
4e21fd06 DB |
4784 | search->ms_weight = UINT64_MAX; |
4785 | search->ms_start = 0; | |
492f64e9 PD |
4786 | /* |
4787 | * At the end of the metaslab tree are the already-active metaslabs, | |
4788 | * first the primaries, then the secondaries. When we resume searching | |
4789 | * through the tree, we need to consider ms_allocator and ms_primary so | |
4790 | * we start in the location right after where we left off, and don't | |
4791 | * accidentally loop forever considering the same metaslabs. | |
4792 | */ | |
4793 | search->ms_allocator = -1; | |
4794 | search->ms_primary = B_TRUE; | |
34dc7c2f | 4795 | for (;;) { |
492f64e9 | 4796 | boolean_t was_active = B_FALSE; |
9babb374 | 4797 | |
34dc7c2f | 4798 | mutex_enter(&mg->mg_lock); |
4e21fd06 | 4799 | |
492f64e9 | 4800 | if (activation_weight == METASLAB_WEIGHT_PRIMARY && |
32d805c3 MA |
4801 | mga->mga_primary != NULL) { |
4802 | msp = mga->mga_primary; | |
679b0f2a PD |
4803 | |
4804 | /* | |
4805 | * Even though we don't hold the ms_lock for the | |
4806 | * primary metaslab, those fields should not | |
e1cfd73f | 4807 | * change while we hold the mg_lock. Thus it is |
679b0f2a PD |
4808 | * safe to make assertions on them. |
4809 | */ | |
4810 | ASSERT(msp->ms_primary); | |
4811 | ASSERT3S(msp->ms_allocator, ==, allocator); | |
4812 | ASSERT(msp->ms_loaded); | |
4813 | ||
492f64e9 | 4814 | was_active = B_TRUE; |
f09fda50 | 4815 | ASSERT(msp->ms_weight & METASLAB_ACTIVE_MASK); |
492f64e9 | 4816 | } else if (activation_weight == METASLAB_WEIGHT_SECONDARY && |
32d805c3 MA |
4817 | mga->mga_secondary != NULL) { |
4818 | msp = mga->mga_secondary; | |
679b0f2a PD |
4819 | |
4820 | /* | |
4821 | * See comment above about the similar assertions | |
4822 | * for the primary metaslab. | |
4823 | */ | |
4824 | ASSERT(!msp->ms_primary); | |
4825 | ASSERT3S(msp->ms_allocator, ==, allocator); | |
4826 | ASSERT(msp->ms_loaded); | |
4827 | ||
492f64e9 | 4828 | was_active = B_TRUE; |
f09fda50 | 4829 | ASSERT(msp->ms_weight & METASLAB_ACTIVE_MASK); |
492f64e9 PD |
4830 | } else { |
4831 | msp = find_valid_metaslab(mg, activation_weight, dva, d, | |
c81f1790 PD |
4832 | want_unique, asize, allocator, try_hard, zal, |
4833 | search, &was_active); | |
34dc7c2f | 4834 | } |
492f64e9 | 4835 | |
34dc7c2f | 4836 | mutex_exit(&mg->mg_lock); |
4e21fd06 DB |
4837 | if (msp == NULL) { |
4838 | kmem_free(search, sizeof (*search)); | |
34dc7c2f | 4839 | return (-1ULL); |
4e21fd06 | 4840 | } |
ac72fac3 | 4841 | mutex_enter(&msp->ms_lock); |
679b0f2a PD |
4842 | |
4843 | metaslab_active_mask_verify(msp); | |
4844 | ||
4845 | /* | |
4846 | * This code is disabled out because of issues with | |
4847 | * tracepoints in non-gpl kernel modules. | |
4848 | */ | |
4849 | #if 0 | |
4850 | DTRACE_PROBE3(ms__activation__attempt, | |
4851 | metaslab_t *, msp, uint64_t, activation_weight, | |
4852 | boolean_t, was_active); | |
4853 | #endif | |
4854 | ||
34dc7c2f BB |
4855 | /* |
4856 | * Ensure that the metaslab we have selected is still | |
4857 | * capable of handling our request. It's possible that | |
4858 | * another thread may have changed the weight while we | |
4e21fd06 | 4859 | * were blocked on the metaslab lock. We check the |
f09fda50 | 4860 | * active status first to see if we need to set_selected_txg |
4e21fd06 | 4861 | * a new metaslab. |
34dc7c2f | 4862 | */ |
4e21fd06 | 4863 | if (was_active && !(msp->ms_weight & METASLAB_ACTIVE_MASK)) { |
679b0f2a | 4864 | ASSERT3S(msp->ms_allocator, ==, -1); |
34dc7c2f BB |
4865 | mutex_exit(&msp->ms_lock); |
4866 | continue; | |
4867 | } | |
4868 | ||
492f64e9 | 4869 | /* |
679b0f2a PD |
4870 | * If the metaslab was activated for another allocator |
4871 | * while we were waiting in the ms_lock above, or it's | |
4872 | * a primary and we're seeking a secondary (or vice versa), | |
4873 | * we go back and select a new metaslab. | |
492f64e9 PD |
4874 | */ |
4875 | if (!was_active && (msp->ms_weight & METASLAB_ACTIVE_MASK) && | |
4876 | (msp->ms_allocator != -1) && | |
4877 | (msp->ms_allocator != allocator || ((activation_weight == | |
4878 | METASLAB_WEIGHT_PRIMARY) != msp->ms_primary))) { | |
679b0f2a PD |
4879 | ASSERT(msp->ms_loaded); |
4880 | ASSERT((msp->ms_weight & METASLAB_WEIGHT_CLAIM) || | |
4881 | msp->ms_allocator != -1); | |
492f64e9 PD |
4882 | mutex_exit(&msp->ms_lock); |
4883 | continue; | |
4884 | } | |
4885 | ||
679b0f2a PD |
4886 | /* |
4887 | * This metaslab was used for claiming regions allocated | |
4888 | * by the ZIL during pool import. Once these regions are | |
4889 | * claimed we don't need to keep the CLAIM bit set | |
4890 | * anymore. Passivate this metaslab to zero its activation | |
4891 | * mask. | |
4892 | */ | |
e38afd34 | 4893 | if (msp->ms_weight & METASLAB_WEIGHT_CLAIM && |
4894 | activation_weight != METASLAB_WEIGHT_CLAIM) { | |
679b0f2a PD |
4895 | ASSERT(msp->ms_loaded); |
4896 | ASSERT3S(msp->ms_allocator, ==, -1); | |
492f64e9 PD |
4897 | metaslab_passivate(msp, msp->ms_weight & |
4898 | ~METASLAB_WEIGHT_CLAIM); | |
34dc7c2f BB |
4899 | mutex_exit(&msp->ms_lock); |
4900 | continue; | |
4901 | } | |
4902 | ||
f09fda50 | 4903 | metaslab_set_selected_txg(msp, txg); |
679b0f2a PD |
4904 | |
4905 | int activation_error = | |
4906 | metaslab_activate(msp, allocator, activation_weight); | |
4907 | metaslab_active_mask_verify(msp); | |
4908 | ||
4909 | /* | |
4910 | * If the metaslab was activated by another thread for | |
4911 | * another allocator or activation_weight (EBUSY), or it | |
4912 | * failed because another metaslab was assigned as primary | |
4913 | * for this allocator (EEXIST) we continue using this | |
4914 | * metaslab for our allocation, rather than going on to a | |
4915 | * worse metaslab (we waited for that metaslab to be loaded | |
4916 | * after all). | |
4917 | * | |
fe0ea848 PD |
4918 | * If the activation failed due to an I/O error or ENOSPC we |
4919 | * skip to the next metaslab. | |
679b0f2a PD |
4920 | */ |
4921 | boolean_t activated; | |
4922 | if (activation_error == 0) { | |
4923 | activated = B_TRUE; | |
4924 | } else if (activation_error == EBUSY || | |
4925 | activation_error == EEXIST) { | |
4926 | activated = B_FALSE; | |
4927 | } else { | |
34dc7c2f BB |
4928 | mutex_exit(&msp->ms_lock); |
4929 | continue; | |
4930 | } | |
679b0f2a | 4931 | ASSERT(msp->ms_loaded); |
4e21fd06 DB |
4932 | |
4933 | /* | |
4934 | * Now that we have the lock, recheck to see if we should | |
4935 | * continue to use this metaslab for this allocation. The | |
679b0f2a PD |
4936 | * the metaslab is now loaded so metaslab_should_allocate() |
4937 | * can accurately determine if the allocation attempt should | |
4e21fd06 DB |
4938 | * proceed. |
4939 | */ | |
c81f1790 | 4940 | if (!metaslab_should_allocate(msp, asize, try_hard)) { |
4e21fd06 DB |
4941 | /* Passivate this metaslab and select a new one. */ |
4942 | metaslab_trace_add(zal, mg, msp, asize, d, | |
492f64e9 | 4943 | TRACE_TOO_SMALL, allocator); |
4e21fd06 DB |
4944 | goto next; |
4945 | } | |
4946 | ||
7a614407 | 4947 | /* |
679b0f2a PD |
4948 | * If this metaslab is currently condensing then pick again |
4949 | * as we can't manipulate this metaslab until it's committed | |
619f0976 GW |
4950 | * to disk. If this metaslab is being initialized, we shouldn't |
4951 | * allocate from it since the allocated region might be | |
4952 | * overwritten after allocation. | |
7a614407 | 4953 | */ |
93cf2076 | 4954 | if (msp->ms_condensing) { |
4e21fd06 | 4955 | metaslab_trace_add(zal, mg, msp, asize, d, |
492f64e9 | 4956 | TRACE_CONDENSING, allocator); |
679b0f2a PD |
4957 | if (activated) { |
4958 | metaslab_passivate(msp, msp->ms_weight & | |
4959 | ~METASLAB_ACTIVE_MASK); | |
4960 | } | |
7a614407 GW |
4961 | mutex_exit(&msp->ms_lock); |
4962 | continue; | |
1b939560 | 4963 | } else if (msp->ms_disabled > 0) { |
619f0976 | 4964 | metaslab_trace_add(zal, mg, msp, asize, d, |
1b939560 | 4965 | TRACE_DISABLED, allocator); |
679b0f2a PD |
4966 | if (activated) { |
4967 | metaslab_passivate(msp, msp->ms_weight & | |
4968 | ~METASLAB_ACTIVE_MASK); | |
4969 | } | |
619f0976 GW |
4970 | mutex_exit(&msp->ms_lock); |
4971 | continue; | |
7a614407 GW |
4972 | } |
4973 | ||
4e21fd06 | 4974 | offset = metaslab_block_alloc(msp, asize, txg); |
492f64e9 | 4975 | metaslab_trace_add(zal, mg, msp, asize, d, offset, allocator); |
4e21fd06 DB |
4976 | |
4977 | if (offset != -1ULL) { | |
4978 | /* Proactively passivate the metaslab, if needed */ | |
679b0f2a PD |
4979 | if (activated) |
4980 | metaslab_segment_may_passivate(msp); | |
34dc7c2f | 4981 | break; |
4e21fd06 DB |
4982 | } |
4983 | next: | |
4984 | ASSERT(msp->ms_loaded); | |
4985 | ||
679b0f2a PD |
4986 | /* |
4987 | * This code is disabled out because of issues with | |
4988 | * tracepoints in non-gpl kernel modules. | |
4989 | */ | |
4990 | #if 0 | |
4991 | DTRACE_PROBE2(ms__alloc__failure, metaslab_t *, msp, | |
4992 | uint64_t, asize); | |
4993 | #endif | |
4994 | ||
4e21fd06 DB |
4995 | /* |
4996 | * We were unable to allocate from this metaslab so determine | |
4997 | * a new weight for this metaslab. Now that we have loaded | |
4998 | * the metaslab we can provide a better hint to the metaslab | |
4999 | * selector. | |
5000 | * | |
5001 | * For space-based metaslabs, we use the maximum block size. | |
5002 | * This information is only available when the metaslab | |
5003 | * is loaded and is more accurate than the generic free | |
5004 | * space weight that was calculated by metaslab_weight(). | |
5005 | * This information allows us to quickly compare the maximum | |
5006 | * available allocation in the metaslab to the allocation | |
5007 | * size being requested. | |
5008 | * | |
5009 | * For segment-based metaslabs, determine the new weight | |
5010 | * based on the highest bucket in the range tree. We | |
5011 | * explicitly use the loaded segment weight (i.e. the range | |
5012 | * tree histogram) since it contains the space that is | |
5013 | * currently available for allocation and is accurate | |
5014 | * even within a sync pass. | |
5015 | */ | |
679b0f2a | 5016 | uint64_t weight; |
4e21fd06 | 5017 | if (WEIGHT_IS_SPACEBASED(msp->ms_weight)) { |
c81f1790 | 5018 | weight = metaslab_largest_allocatable(msp); |
4e21fd06 | 5019 | WEIGHT_SET_SPACEBASED(weight); |
679b0f2a PD |
5020 | } else { |
5021 | weight = metaslab_weight_from_range_tree(msp); | |
5022 | } | |
5023 | ||
5024 | if (activated) { | |
4e21fd06 DB |
5025 | metaslab_passivate(msp, weight); |
5026 | } else { | |
679b0f2a PD |
5027 | /* |
5028 | * For the case where we use the metaslab that is | |
5029 | * active for another allocator we want to make | |
5030 | * sure that we retain the activation mask. | |
5031 | * | |
5032 | * Note that we could attempt to use something like | |
5033 | * metaslab_recalculate_weight_and_sort() that | |
5034 | * retains the activation mask here. That function | |
5035 | * uses metaslab_weight() to set the weight though | |
5036 | * which is not as accurate as the calculations | |
5037 | * above. | |
5038 | */ | |
5039 | weight |= msp->ms_weight & METASLAB_ACTIVE_MASK; | |
5040 | metaslab_group_sort(mg, msp, weight); | |
4e21fd06 | 5041 | } |
679b0f2a | 5042 | metaslab_active_mask_verify(msp); |
34dc7c2f | 5043 | |
4e21fd06 DB |
5044 | /* |
5045 | * We have just failed an allocation attempt, check | |
5046 | * that metaslab_should_allocate() agrees. Otherwise, | |
5047 | * we may end up in an infinite loop retrying the same | |
5048 | * metaslab. | |
5049 | */ | |
c81f1790 | 5050 | ASSERT(!metaslab_should_allocate(msp, asize, try_hard)); |
cc99f275 | 5051 | |
34dc7c2f BB |
5052 | mutex_exit(&msp->ms_lock); |
5053 | } | |
4e21fd06 DB |
5054 | mutex_exit(&msp->ms_lock); |
5055 | kmem_free(search, sizeof (*search)); | |
5056 | return (offset); | |
5057 | } | |
34dc7c2f | 5058 | |
4e21fd06 DB |
5059 | static uint64_t |
5060 | metaslab_group_alloc(metaslab_group_t *mg, zio_alloc_list_t *zal, | |
c81f1790 PD |
5061 | uint64_t asize, uint64_t txg, boolean_t want_unique, dva_t *dva, int d, |
5062 | int allocator, boolean_t try_hard) | |
4e21fd06 DB |
5063 | { |
5064 | uint64_t offset; | |
5065 | ASSERT(mg->mg_initialized); | |
34dc7c2f | 5066 | |
cc99f275 | 5067 | offset = metaslab_group_alloc_normal(mg, zal, asize, txg, want_unique, |
c81f1790 | 5068 | dva, d, allocator, try_hard); |
34dc7c2f | 5069 | |
4e21fd06 DB |
5070 | mutex_enter(&mg->mg_lock); |
5071 | if (offset == -1ULL) { | |
5072 | mg->mg_failed_allocations++; | |
5073 | metaslab_trace_add(zal, mg, NULL, asize, d, | |
492f64e9 | 5074 | TRACE_GROUP_FAILURE, allocator); |
4e21fd06 DB |
5075 | if (asize == SPA_GANGBLOCKSIZE) { |
5076 | /* | |
5077 | * This metaslab group was unable to allocate | |
5078 | * the minimum gang block size so it must be out of | |
5079 | * space. We must notify the allocation throttle | |
5080 | * to start skipping allocation attempts to this | |
5081 | * metaslab group until more space becomes available. | |
5082 | * Note: this failure cannot be caused by the | |
5083 | * allocation throttle since the allocation throttle | |
5084 | * is only responsible for skipping devices and | |
5085 | * not failing block allocations. | |
5086 | */ | |
5087 | mg->mg_no_free_space = B_TRUE; | |
5088 | } | |
5089 | } | |
5090 | mg->mg_allocations++; | |
5091 | mutex_exit(&mg->mg_lock); | |
34dc7c2f BB |
5092 | return (offset); |
5093 | } | |
5094 | ||
5095 | /* | |
5096 | * Allocate a block for the specified i/o. | |
5097 | */ | |
a1d477c2 | 5098 | int |
34dc7c2f | 5099 | metaslab_alloc_dva(spa_t *spa, metaslab_class_t *mc, uint64_t psize, |
4e21fd06 | 5100 | dva_t *dva, int d, dva_t *hintdva, uint64_t txg, int flags, |
492f64e9 | 5101 | zio_alloc_list_t *zal, int allocator) |
34dc7c2f | 5102 | { |
f8020c93 | 5103 | metaslab_class_allocator_t *mca = &mc->mc_allocator[allocator]; |
920dd524 | 5104 | metaslab_group_t *mg, *fast_mg, *rotor; |
34dc7c2f | 5105 | vdev_t *vd; |
4e21fd06 | 5106 | boolean_t try_hard = B_FALSE; |
34dc7c2f BB |
5107 | |
5108 | ASSERT(!DVA_IS_VALID(&dva[d])); | |
5109 | ||
5110 | /* | |
5111 | * For testing, make some blocks above a certain size be gang blocks. | |
09b85f2d BB |
5112 | * This will result in more split blocks when using device removal, |
5113 | * and a large number of split blocks coupled with ztest-induced | |
5114 | * damage can result in extremely long reconstruction times. This | |
5115 | * will also test spilling from special to normal. | |
34dc7c2f | 5116 | */ |
46adb282 RN |
5117 | if (psize >= metaslab_force_ganging && |
5118 | metaslab_force_ganging_pct > 0 && | |
5119 | (random_in_range(100) < MIN(metaslab_force_ganging_pct, 100))) { | |
492f64e9 PD |
5120 | metaslab_trace_add(zal, NULL, NULL, psize, d, TRACE_FORCE_GANG, |
5121 | allocator); | |
2e528b49 | 5122 | return (SET_ERROR(ENOSPC)); |
4e21fd06 | 5123 | } |
34dc7c2f BB |
5124 | |
5125 | /* | |
5126 | * Start at the rotor and loop through all mgs until we find something. | |
f8020c93 | 5127 | * Note that there's no locking on mca_rotor or mca_aliquot because |
34dc7c2f BB |
5128 | * nothing actually breaks if we miss a few updates -- we just won't |
5129 | * allocate quite as evenly. It all balances out over time. | |
5130 | * | |
5131 | * If we are doing ditto or log blocks, try to spread them across | |
5132 | * consecutive vdevs. If we're forced to reuse a vdev before we've | |
5133 | * allocated all of our ditto blocks, then try and spread them out on | |
5134 | * that vdev as much as possible. If it turns out to not be possible, | |
5135 | * gradually lower our standards until anything becomes acceptable. | |
5136 | * Also, allocating on consecutive vdevs (as opposed to random vdevs) | |
5137 | * gives us hope of containing our fault domains to something we're | |
5138 | * able to reason about. Otherwise, any two top-level vdev failures | |
5139 | * will guarantee the loss of data. With consecutive allocation, | |
5140 | * only two adjacent top-level vdev failures will result in data loss. | |
5141 | * | |
5142 | * If we are doing gang blocks (hintdva is non-NULL), try to keep | |
5143 | * ourselves on the same vdev as our gang block header. That | |
5144 | * way, we can hope for locality in vdev_cache, plus it makes our | |
5145 | * fault domains something tractable. | |
5146 | */ | |
5147 | if (hintdva) { | |
5148 | vd = vdev_lookup_top(spa, DVA_GET_VDEV(&hintdva[d])); | |
428870ff BB |
5149 | |
5150 | /* | |
5151 | * It's possible the vdev we're using as the hint no | |
a1d477c2 MA |
5152 | * longer exists or its mg has been closed (e.g. by |
5153 | * device removal). Consult the rotor when | |
428870ff BB |
5154 | * all else fails. |
5155 | */ | |
a1d477c2 | 5156 | if (vd != NULL && vd->vdev_mg != NULL) { |
aa755b35 | 5157 | mg = vdev_get_mg(vd, mc); |
428870ff | 5158 | |
ef55679a | 5159 | if (flags & METASLAB_HINTBP_AVOID) |
428870ff BB |
5160 | mg = mg->mg_next; |
5161 | } else { | |
f8020c93 | 5162 | mg = mca->mca_rotor; |
428870ff | 5163 | } |
34dc7c2f BB |
5164 | } else if (d != 0) { |
5165 | vd = vdev_lookup_top(spa, DVA_GET_VDEV(&dva[d - 1])); | |
5166 | mg = vd->vdev_mg->mg_next; | |
920dd524 | 5167 | } else if (flags & METASLAB_FASTWRITE) { |
f8020c93 | 5168 | mg = fast_mg = mca->mca_rotor; |
920dd524 ED |
5169 | |
5170 | do { | |
5171 | if (fast_mg->mg_vd->vdev_pending_fastwrite < | |
5172 | mg->mg_vd->vdev_pending_fastwrite) | |
5173 | mg = fast_mg; | |
f8020c93 | 5174 | } while ((fast_mg = fast_mg->mg_next) != mca->mca_rotor); |
920dd524 | 5175 | |
34dc7c2f | 5176 | } else { |
f8020c93 AM |
5177 | ASSERT(mca->mca_rotor != NULL); |
5178 | mg = mca->mca_rotor; | |
34dc7c2f BB |
5179 | } |
5180 | ||
5181 | /* | |
428870ff BB |
5182 | * If the hint put us into the wrong metaslab class, or into a |
5183 | * metaslab group that has been passivated, just follow the rotor. | |
34dc7c2f | 5184 | */ |
428870ff | 5185 | if (mg->mg_class != mc || mg->mg_activation_count <= 0) |
f8020c93 | 5186 | mg = mca->mca_rotor; |
34dc7c2f BB |
5187 | |
5188 | rotor = mg; | |
5189 | top: | |
34dc7c2f | 5190 | do { |
4e21fd06 | 5191 | boolean_t allocatable; |
428870ff | 5192 | |
3dfb57a3 | 5193 | ASSERT(mg->mg_activation_count == 1); |
34dc7c2f | 5194 | vd = mg->mg_vd; |
fb5f0bc8 | 5195 | |
34dc7c2f | 5196 | /* |
b128c09f | 5197 | * Don't allocate from faulted devices. |
34dc7c2f | 5198 | */ |
4e21fd06 | 5199 | if (try_hard) { |
fb5f0bc8 BB |
5200 | spa_config_enter(spa, SCL_ZIO, FTAG, RW_READER); |
5201 | allocatable = vdev_allocatable(vd); | |
5202 | spa_config_exit(spa, SCL_ZIO, FTAG); | |
5203 | } else { | |
5204 | allocatable = vdev_allocatable(vd); | |
5205 | } | |
ac72fac3 GW |
5206 | |
5207 | /* | |
5208 | * Determine if the selected metaslab group is eligible | |
3dfb57a3 DB |
5209 | * for allocations. If we're ganging then don't allow |
5210 | * this metaslab group to skip allocations since that would | |
5211 | * inadvertently return ENOSPC and suspend the pool | |
ac72fac3 GW |
5212 | * even though space is still available. |
5213 | */ | |
4e21fd06 | 5214 | if (allocatable && !GANG_ALLOCATION(flags) && !try_hard) { |
3dfb57a3 | 5215 | allocatable = metaslab_group_allocatable(mg, rotor, |
7bf4c97a | 5216 | flags, psize, allocator, d); |
3dfb57a3 | 5217 | } |
ac72fac3 | 5218 | |
4e21fd06 DB |
5219 | if (!allocatable) { |
5220 | metaslab_trace_add(zal, mg, NULL, psize, d, | |
492f64e9 | 5221 | TRACE_NOT_ALLOCATABLE, allocator); |
34dc7c2f | 5222 | goto next; |
4e21fd06 | 5223 | } |
fb5f0bc8 | 5224 | |
3dfb57a3 DB |
5225 | ASSERT(mg->mg_initialized); |
5226 | ||
34dc7c2f | 5227 | /* |
4dcc2bde | 5228 | * Avoid writing single-copy data to an unhealthy, |
4e21fd06 DB |
5229 | * non-redundant vdev, unless we've already tried all |
5230 | * other vdevs. | |
34dc7c2f | 5231 | */ |
4dcc2bde | 5232 | if (vd->vdev_state < VDEV_STATE_HEALTHY && |
4e21fd06 DB |
5233 | d == 0 && !try_hard && vd->vdev_children == 0) { |
5234 | metaslab_trace_add(zal, mg, NULL, psize, d, | |
492f64e9 | 5235 | TRACE_VDEV_ERROR, allocator); |
34dc7c2f BB |
5236 | goto next; |
5237 | } | |
5238 | ||
5239 | ASSERT(mg->mg_class == mc); | |
5240 | ||
1c27024e | 5241 | uint64_t asize = vdev_psize_to_asize(vd, psize); |
34dc7c2f BB |
5242 | ASSERT(P2PHASE(asize, 1ULL << vd->vdev_ashift) == 0); |
5243 | ||
cc99f275 DB |
5244 | /* |
5245 | * If we don't need to try hard, then require that the | |
e1cfd73f | 5246 | * block be on a different metaslab from any other DVAs |
cc99f275 DB |
5247 | * in this BP (unique=true). If we are trying hard, then |
5248 | * allow any metaslab to be used (unique=false). | |
5249 | */ | |
1c27024e | 5250 | uint64_t offset = metaslab_group_alloc(mg, zal, asize, txg, |
c81f1790 | 5251 | !try_hard, dva, d, allocator, try_hard); |
3dfb57a3 | 5252 | |
34dc7c2f BB |
5253 | if (offset != -1ULL) { |
5254 | /* | |
5255 | * If we've just selected this metaslab group, | |
5256 | * figure out whether the corresponding vdev is | |
5257 | * over- or under-used relative to the pool, | |
5258 | * and set an allocation bias to even it out. | |
bb3250d0 ED |
5259 | * |
5260 | * Bias is also used to compensate for unequally | |
5261 | * sized vdevs so that space is allocated fairly. | |
34dc7c2f | 5262 | */ |
f8020c93 | 5263 | if (mca->mca_aliquot == 0 && metaslab_bias_enabled) { |
34dc7c2f | 5264 | vdev_stat_t *vs = &vd->vdev_stat; |
bb3250d0 ED |
5265 | int64_t vs_free = vs->vs_space - vs->vs_alloc; |
5266 | int64_t mc_free = mc->mc_space - mc->mc_alloc; | |
5267 | int64_t ratio; | |
34dc7c2f BB |
5268 | |
5269 | /* | |
6d974228 GW |
5270 | * Calculate how much more or less we should |
5271 | * try to allocate from this device during | |
5272 | * this iteration around the rotor. | |
6d974228 | 5273 | * |
bb3250d0 ED |
5274 | * This basically introduces a zero-centered |
5275 | * bias towards the devices with the most | |
5276 | * free space, while compensating for vdev | |
5277 | * size differences. | |
5278 | * | |
5279 | * Examples: | |
5280 | * vdev V1 = 16M/128M | |
5281 | * vdev V2 = 16M/128M | |
5282 | * ratio(V1) = 100% ratio(V2) = 100% | |
5283 | * | |
5284 | * vdev V1 = 16M/128M | |
5285 | * vdev V2 = 64M/128M | |
5286 | * ratio(V1) = 127% ratio(V2) = 72% | |
6d974228 | 5287 | * |
bb3250d0 ED |
5288 | * vdev V1 = 16M/128M |
5289 | * vdev V2 = 64M/512M | |
5290 | * ratio(V1) = 40% ratio(V2) = 160% | |
34dc7c2f | 5291 | */ |
bb3250d0 ED |
5292 | ratio = (vs_free * mc->mc_alloc_groups * 100) / |
5293 | (mc_free + 1); | |
5294 | mg->mg_bias = ((ratio - 100) * | |
6d974228 | 5295 | (int64_t)mg->mg_aliquot) / 100; |
f3a7f661 GW |
5296 | } else if (!metaslab_bias_enabled) { |
5297 | mg->mg_bias = 0; | |
34dc7c2f BB |
5298 | } |
5299 | ||
920dd524 | 5300 | if ((flags & METASLAB_FASTWRITE) || |
f8020c93 | 5301 | atomic_add_64_nv(&mca->mca_aliquot, asize) >= |
34dc7c2f | 5302 | mg->mg_aliquot + mg->mg_bias) { |
f8020c93 AM |
5303 | mca->mca_rotor = mg->mg_next; |
5304 | mca->mca_aliquot = 0; | |
34dc7c2f BB |
5305 | } |
5306 | ||
5307 | DVA_SET_VDEV(&dva[d], vd->vdev_id); | |
5308 | DVA_SET_OFFSET(&dva[d], offset); | |
e3e7cf60 D |
5309 | DVA_SET_GANG(&dva[d], |
5310 | ((flags & METASLAB_GANG_HEADER) ? 1 : 0)); | |
34dc7c2f BB |
5311 | DVA_SET_ASIZE(&dva[d], asize); |
5312 | ||
920dd524 ED |
5313 | if (flags & METASLAB_FASTWRITE) { |
5314 | atomic_add_64(&vd->vdev_pending_fastwrite, | |
5315 | psize); | |
920dd524 ED |
5316 | } |
5317 | ||
34dc7c2f BB |
5318 | return (0); |
5319 | } | |
5320 | next: | |
f8020c93 AM |
5321 | mca->mca_rotor = mg->mg_next; |
5322 | mca->mca_aliquot = 0; | |
34dc7c2f BB |
5323 | } while ((mg = mg->mg_next) != rotor); |
5324 | ||
4e21fd06 | 5325 | /* |
be5c6d96 | 5326 | * If we haven't tried hard, perhaps do so now. |
4e21fd06 | 5327 | */ |
be5c6d96 MA |
5328 | if (!try_hard && (zfs_metaslab_try_hard_before_gang || |
5329 | GANG_ALLOCATION(flags) || (flags & METASLAB_ZIL) != 0 || | |
5330 | psize <= 1 << spa->spa_min_ashift)) { | |
5331 | METASLABSTAT_BUMP(metaslabstat_try_hard); | |
4e21fd06 | 5332 | try_hard = B_TRUE; |
fb5f0bc8 BB |
5333 | goto top; |
5334 | } | |
5335 | ||
861166b0 | 5336 | memset(&dva[d], 0, sizeof (dva_t)); |
34dc7c2f | 5337 | |
492f64e9 | 5338 | metaslab_trace_add(zal, rotor, NULL, psize, d, TRACE_ENOSPC, allocator); |
2e528b49 | 5339 | return (SET_ERROR(ENOSPC)); |
34dc7c2f BB |
5340 | } |
5341 | ||
a1d477c2 MA |
5342 | void |
5343 | metaslab_free_concrete(vdev_t *vd, uint64_t offset, uint64_t asize, | |
d2734cce | 5344 | boolean_t checkpoint) |
a1d477c2 MA |
5345 | { |
5346 | metaslab_t *msp; | |
d2734cce | 5347 | spa_t *spa = vd->vdev_spa; |
a1d477c2 | 5348 | |
a1d477c2 MA |
5349 | ASSERT(vdev_is_concrete(vd)); |
5350 | ASSERT3U(spa_config_held(spa, SCL_ALL, RW_READER), !=, 0); | |
5351 | ASSERT3U(offset >> vd->vdev_ms_shift, <, vd->vdev_ms_count); | |
5352 | ||
5353 | msp = vd->vdev_ms[offset >> vd->vdev_ms_shift]; | |
5354 | ||
5355 | VERIFY(!msp->ms_condensing); | |
5356 | VERIFY3U(offset, >=, msp->ms_start); | |
5357 | VERIFY3U(offset + asize, <=, msp->ms_start + msp->ms_size); | |
5358 | VERIFY0(P2PHASE(offset, 1ULL << vd->vdev_ashift)); | |
5359 | VERIFY0(P2PHASE(asize, 1ULL << vd->vdev_ashift)); | |
5360 | ||
5361 | metaslab_check_free_impl(vd, offset, asize); | |
d2734cce | 5362 | |
a1d477c2 | 5363 | mutex_enter(&msp->ms_lock); |
d2734cce SD |
5364 | if (range_tree_is_empty(msp->ms_freeing) && |
5365 | range_tree_is_empty(msp->ms_checkpointing)) { | |
5366 | vdev_dirty(vd, VDD_METASLAB, msp, spa_syncing_txg(spa)); | |
5367 | } | |
5368 | ||
5369 | if (checkpoint) { | |
5370 | ASSERT(spa_has_checkpoint(spa)); | |
5371 | range_tree_add(msp->ms_checkpointing, offset, asize); | |
5372 | } else { | |
5373 | range_tree_add(msp->ms_freeing, offset, asize); | |
a1d477c2 | 5374 | } |
a1d477c2 MA |
5375 | mutex_exit(&msp->ms_lock); |
5376 | } | |
5377 | ||
a1d477c2 MA |
5378 | void |
5379 | metaslab_free_impl_cb(uint64_t inner_offset, vdev_t *vd, uint64_t offset, | |
5380 | uint64_t size, void *arg) | |
5381 | { | |
14e4e3cb | 5382 | (void) inner_offset; |
d2734cce SD |
5383 | boolean_t *checkpoint = arg; |
5384 | ||
5385 | ASSERT3P(checkpoint, !=, NULL); | |
a1d477c2 MA |
5386 | |
5387 | if (vd->vdev_ops->vdev_op_remap != NULL) | |
d2734cce | 5388 | vdev_indirect_mark_obsolete(vd, offset, size); |
a1d477c2 | 5389 | else |
d2734cce | 5390 | metaslab_free_impl(vd, offset, size, *checkpoint); |
a1d477c2 MA |
5391 | } |
5392 | ||
5393 | static void | |
5394 | metaslab_free_impl(vdev_t *vd, uint64_t offset, uint64_t size, | |
d2734cce | 5395 | boolean_t checkpoint) |
a1d477c2 MA |
5396 | { |
5397 | spa_t *spa = vd->vdev_spa; | |
5398 | ||
5399 | ASSERT3U(spa_config_held(spa, SCL_ALL, RW_READER), !=, 0); | |
5400 | ||
d2734cce | 5401 | if (spa_syncing_txg(spa) > spa_freeze_txg(spa)) |
a1d477c2 MA |
5402 | return; |
5403 | ||
5404 | if (spa->spa_vdev_removal != NULL && | |
9e052db4 | 5405 | spa->spa_vdev_removal->svr_vdev_id == vd->vdev_id && |
a1d477c2 MA |
5406 | vdev_is_concrete(vd)) { |
5407 | /* | |
5408 | * Note: we check if the vdev is concrete because when | |
5409 | * we complete the removal, we first change the vdev to be | |
5410 | * an indirect vdev (in open context), and then (in syncing | |
5411 | * context) clear spa_vdev_removal. | |
5412 | */ | |
d2734cce | 5413 | free_from_removing_vdev(vd, offset, size); |
a1d477c2 | 5414 | } else if (vd->vdev_ops->vdev_op_remap != NULL) { |
d2734cce | 5415 | vdev_indirect_mark_obsolete(vd, offset, size); |
a1d477c2 | 5416 | vd->vdev_ops->vdev_op_remap(vd, offset, size, |
d2734cce | 5417 | metaslab_free_impl_cb, &checkpoint); |
a1d477c2 | 5418 | } else { |
d2734cce | 5419 | metaslab_free_concrete(vd, offset, size, checkpoint); |
a1d477c2 MA |
5420 | } |
5421 | } | |
5422 | ||
5423 | typedef struct remap_blkptr_cb_arg { | |
5424 | blkptr_t *rbca_bp; | |
5425 | spa_remap_cb_t rbca_cb; | |
5426 | vdev_t *rbca_remap_vd; | |
5427 | uint64_t rbca_remap_offset; | |
5428 | void *rbca_cb_arg; | |
5429 | } remap_blkptr_cb_arg_t; | |
5430 | ||
65c7cc49 | 5431 | static void |
a1d477c2 MA |
5432 | remap_blkptr_cb(uint64_t inner_offset, vdev_t *vd, uint64_t offset, |
5433 | uint64_t size, void *arg) | |
5434 | { | |
5435 | remap_blkptr_cb_arg_t *rbca = arg; | |
5436 | blkptr_t *bp = rbca->rbca_bp; | |
5437 | ||
5438 | /* We can not remap split blocks. */ | |
5439 | if (size != DVA_GET_ASIZE(&bp->blk_dva[0])) | |
5440 | return; | |
5441 | ASSERT0(inner_offset); | |
5442 | ||
5443 | if (rbca->rbca_cb != NULL) { | |
5444 | /* | |
5445 | * At this point we know that we are not handling split | |
5446 | * blocks and we invoke the callback on the previous | |
5447 | * vdev which must be indirect. | |
5448 | */ | |
5449 | ASSERT3P(rbca->rbca_remap_vd->vdev_ops, ==, &vdev_indirect_ops); | |
5450 | ||
5451 | rbca->rbca_cb(rbca->rbca_remap_vd->vdev_id, | |
5452 | rbca->rbca_remap_offset, size, rbca->rbca_cb_arg); | |
5453 | ||
5454 | /* set up remap_blkptr_cb_arg for the next call */ | |
5455 | rbca->rbca_remap_vd = vd; | |
5456 | rbca->rbca_remap_offset = offset; | |
5457 | } | |
5458 | ||
5459 | /* | |
5460 | * The phys birth time is that of dva[0]. This ensures that we know | |
5461 | * when each dva was written, so that resilver can determine which | |
5462 | * blocks need to be scrubbed (i.e. those written during the time | |
5463 | * the vdev was offline). It also ensures that the key used in | |
5464 | * the ARC hash table is unique (i.e. dva[0] + phys_birth). If | |
5465 | * we didn't change the phys_birth, a lookup in the ARC for a | |
5466 | * remapped BP could find the data that was previously stored at | |
5467 | * this vdev + offset. | |
5468 | */ | |
5469 | vdev_t *oldvd = vdev_lookup_top(vd->vdev_spa, | |
5470 | DVA_GET_VDEV(&bp->blk_dva[0])); | |
5471 | vdev_indirect_births_t *vib = oldvd->vdev_indirect_births; | |
5472 | bp->blk_phys_birth = vdev_indirect_births_physbirth(vib, | |
5473 | DVA_GET_OFFSET(&bp->blk_dva[0]), DVA_GET_ASIZE(&bp->blk_dva[0])); | |
5474 | ||
5475 | DVA_SET_VDEV(&bp->blk_dva[0], vd->vdev_id); | |
5476 | DVA_SET_OFFSET(&bp->blk_dva[0], offset); | |
5477 | } | |
5478 | ||
34dc7c2f | 5479 | /* |
a1d477c2 MA |
5480 | * If the block pointer contains any indirect DVAs, modify them to refer to |
5481 | * concrete DVAs. Note that this will sometimes not be possible, leaving | |
5482 | * the indirect DVA in place. This happens if the indirect DVA spans multiple | |
5483 | * segments in the mapping (i.e. it is a "split block"). | |
5484 | * | |
5485 | * If the BP was remapped, calls the callback on the original dva (note the | |
5486 | * callback can be called multiple times if the original indirect DVA refers | |
5487 | * to another indirect DVA, etc). | |
5488 | * | |
5489 | * Returns TRUE if the BP was remapped. | |
34dc7c2f | 5490 | */ |
a1d477c2 MA |
5491 | boolean_t |
5492 | spa_remap_blkptr(spa_t *spa, blkptr_t *bp, spa_remap_cb_t callback, void *arg) | |
34dc7c2f | 5493 | { |
a1d477c2 MA |
5494 | remap_blkptr_cb_arg_t rbca; |
5495 | ||
5496 | if (!zfs_remap_blkptr_enable) | |
5497 | return (B_FALSE); | |
5498 | ||
5499 | if (!spa_feature_is_enabled(spa, SPA_FEATURE_OBSOLETE_COUNTS)) | |
5500 | return (B_FALSE); | |
5501 | ||
5502 | /* | |
5503 | * Dedup BP's can not be remapped, because ddt_phys_select() depends | |
5504 | * on DVA[0] being the same in the BP as in the DDT (dedup table). | |
5505 | */ | |
5506 | if (BP_GET_DEDUP(bp)) | |
5507 | return (B_FALSE); | |
5508 | ||
5509 | /* | |
5510 | * Gang blocks can not be remapped, because | |
5511 | * zio_checksum_gang_verifier() depends on the DVA[0] that's in | |
5512 | * the BP used to read the gang block header (GBH) being the same | |
5513 | * as the DVA[0] that we allocated for the GBH. | |
5514 | */ | |
5515 | if (BP_IS_GANG(bp)) | |
5516 | return (B_FALSE); | |
5517 | ||
5518 | /* | |
5519 | * Embedded BP's have no DVA to remap. | |
5520 | */ | |
5521 | if (BP_GET_NDVAS(bp) < 1) | |
5522 | return (B_FALSE); | |
5523 | ||
5524 | /* | |
5525 | * Note: we only remap dva[0]. If we remapped other dvas, we | |
5526 | * would no longer know what their phys birth txg is. | |
5527 | */ | |
5528 | dva_t *dva = &bp->blk_dva[0]; | |
5529 | ||
34dc7c2f BB |
5530 | uint64_t offset = DVA_GET_OFFSET(dva); |
5531 | uint64_t size = DVA_GET_ASIZE(dva); | |
a1d477c2 MA |
5532 | vdev_t *vd = vdev_lookup_top(spa, DVA_GET_VDEV(dva)); |
5533 | ||
5534 | if (vd->vdev_ops->vdev_op_remap == NULL) | |
5535 | return (B_FALSE); | |
5536 | ||
5537 | rbca.rbca_bp = bp; | |
5538 | rbca.rbca_cb = callback; | |
5539 | rbca.rbca_remap_vd = vd; | |
5540 | rbca.rbca_remap_offset = offset; | |
5541 | rbca.rbca_cb_arg = arg; | |
5542 | ||
5543 | /* | |
5544 | * remap_blkptr_cb() will be called in order for each level of | |
5545 | * indirection, until a concrete vdev is reached or a split block is | |
5546 | * encountered. old_vd and old_offset are updated within the callback | |
5547 | * as we go from the one indirect vdev to the next one (either concrete | |
5548 | * or indirect again) in that order. | |
5549 | */ | |
5550 | vd->vdev_ops->vdev_op_remap(vd, offset, size, remap_blkptr_cb, &rbca); | |
5551 | ||
5552 | /* Check if the DVA wasn't remapped because it is a split block */ | |
5553 | if (DVA_GET_VDEV(&rbca.rbca_bp->blk_dva[0]) == vd->vdev_id) | |
5554 | return (B_FALSE); | |
5555 | ||
5556 | return (B_TRUE); | |
5557 | } | |
5558 | ||
5559 | /* | |
5560 | * Undo the allocation of a DVA which happened in the given transaction group. | |
5561 | */ | |
5562 | void | |
5563 | metaslab_unalloc_dva(spa_t *spa, const dva_t *dva, uint64_t txg) | |
5564 | { | |
34dc7c2f | 5565 | metaslab_t *msp; |
a1d477c2 MA |
5566 | vdev_t *vd; |
5567 | uint64_t vdev = DVA_GET_VDEV(dva); | |
5568 | uint64_t offset = DVA_GET_OFFSET(dva); | |
5569 | uint64_t size = DVA_GET_ASIZE(dva); | |
5570 | ||
5571 | ASSERT(DVA_IS_VALID(dva)); | |
5572 | ASSERT3U(spa_config_held(spa, SCL_ALL, RW_READER), !=, 0); | |
34dc7c2f | 5573 | |
34dc7c2f BB |
5574 | if (txg > spa_freeze_txg(spa)) |
5575 | return; | |
5576 | ||
7d2868d5 | 5577 | if ((vd = vdev_lookup_top(spa, vdev)) == NULL || !DVA_IS_VALID(dva) || |
34dc7c2f | 5578 | (offset >> vd->vdev_ms_shift) >= vd->vdev_ms_count) { |
7d2868d5 BB |
5579 | zfs_panic_recover("metaslab_free_dva(): bad DVA %llu:%llu:%llu", |
5580 | (u_longlong_t)vdev, (u_longlong_t)offset, | |
5581 | (u_longlong_t)size); | |
34dc7c2f BB |
5582 | return; |
5583 | } | |
5584 | ||
a1d477c2 MA |
5585 | ASSERT(!vd->vdev_removing); |
5586 | ASSERT(vdev_is_concrete(vd)); | |
5587 | ASSERT0(vd->vdev_indirect_config.vic_mapping_object); | |
5588 | ASSERT3P(vd->vdev_indirect_mapping, ==, NULL); | |
34dc7c2f BB |
5589 | |
5590 | if (DVA_GET_GANG(dva)) | |
2b56a634 | 5591 | size = vdev_gang_header_asize(vd); |
34dc7c2f | 5592 | |
a1d477c2 | 5593 | msp = vd->vdev_ms[offset >> vd->vdev_ms_shift]; |
93cf2076 | 5594 | |
a1d477c2 | 5595 | mutex_enter(&msp->ms_lock); |
d2734cce | 5596 | range_tree_remove(msp->ms_allocating[txg & TXG_MASK], |
a1d477c2 | 5597 | offset, size); |
f09fda50 | 5598 | msp->ms_allocating_total -= size; |
34dc7c2f | 5599 | |
a1d477c2 MA |
5600 | VERIFY(!msp->ms_condensing); |
5601 | VERIFY3U(offset, >=, msp->ms_start); | |
5602 | VERIFY3U(offset + size, <=, msp->ms_start + msp->ms_size); | |
d2734cce | 5603 | VERIFY3U(range_tree_space(msp->ms_allocatable) + size, <=, |
a1d477c2 MA |
5604 | msp->ms_size); |
5605 | VERIFY0(P2PHASE(offset, 1ULL << vd->vdev_ashift)); | |
5606 | VERIFY0(P2PHASE(size, 1ULL << vd->vdev_ashift)); | |
d2734cce | 5607 | range_tree_add(msp->ms_allocatable, offset, size); |
34dc7c2f BB |
5608 | mutex_exit(&msp->ms_lock); |
5609 | } | |
5610 | ||
5611 | /* | |
d2734cce | 5612 | * Free the block represented by the given DVA. |
34dc7c2f | 5613 | */ |
a1d477c2 | 5614 | void |
d2734cce | 5615 | metaslab_free_dva(spa_t *spa, const dva_t *dva, boolean_t checkpoint) |
34dc7c2f BB |
5616 | { |
5617 | uint64_t vdev = DVA_GET_VDEV(dva); | |
5618 | uint64_t offset = DVA_GET_OFFSET(dva); | |
5619 | uint64_t size = DVA_GET_ASIZE(dva); | |
a1d477c2 | 5620 | vdev_t *vd = vdev_lookup_top(spa, vdev); |
34dc7c2f BB |
5621 | |
5622 | ASSERT(DVA_IS_VALID(dva)); | |
a1d477c2 | 5623 | ASSERT3U(spa_config_held(spa, SCL_ALL, RW_READER), !=, 0); |
34dc7c2f | 5624 | |
a1d477c2 | 5625 | if (DVA_GET_GANG(dva)) { |
2b56a634 | 5626 | size = vdev_gang_header_asize(vd); |
34dc7c2f BB |
5627 | } |
5628 | ||
d2734cce | 5629 | metaslab_free_impl(vd, offset, size, checkpoint); |
34dc7c2f BB |
5630 | } |
5631 | ||
3dfb57a3 DB |
5632 | /* |
5633 | * Reserve some allocation slots. The reservation system must be called | |
5634 | * before we call into the allocator. If there aren't any available slots | |
5635 | * then the I/O will be throttled until an I/O completes and its slots are | |
5636 | * freed up. The function returns true if it was successful in placing | |
5637 | * the reservation. | |
5638 | */ | |
5639 | boolean_t | |
492f64e9 PD |
5640 | metaslab_class_throttle_reserve(metaslab_class_t *mc, int slots, int allocator, |
5641 | zio_t *zio, int flags) | |
3dfb57a3 | 5642 | { |
f8020c93 | 5643 | metaslab_class_allocator_t *mca = &mc->mc_allocator[allocator]; |
f8020c93 | 5644 | uint64_t max = mca->mca_alloc_max_slots; |
3dfb57a3 DB |
5645 | |
5646 | ASSERT(mc->mc_alloc_throttle_enabled); | |
1b50749c AM |
5647 | if (GANG_ALLOCATION(flags) || (flags & METASLAB_MUST_RESERVE) || |
5648 | zfs_refcount_count(&mca->mca_alloc_slots) + slots <= max) { | |
3dfb57a3 | 5649 | /* |
dd3bda39 AM |
5650 | * The potential race between _count() and _add() is covered |
5651 | * by the allocator lock in most cases, or irrelevant due to | |
5652 | * GANG_ALLOCATION() or METASLAB_MUST_RESERVE set in others. | |
5653 | * But even if we assume some other non-existing scenario, the | |
5654 | * worst that can happen is few more I/Os get to allocation | |
5655 | * earlier, that is not a problem. | |
5656 | * | |
3dfb57a3 DB |
5657 | * We reserve the slots individually so that we can unreserve |
5658 | * them individually when an I/O completes. | |
5659 | */ | |
5ba4025a | 5660 | zfs_refcount_add_few(&mca->mca_alloc_slots, slots, zio); |
3dfb57a3 | 5661 | zio->io_flags |= ZIO_FLAG_IO_ALLOCATING; |
1b50749c | 5662 | return (B_TRUE); |
3dfb57a3 | 5663 | } |
1b50749c | 5664 | return (B_FALSE); |
3dfb57a3 DB |
5665 | } |
5666 | ||
5667 | void | |
492f64e9 PD |
5668 | metaslab_class_throttle_unreserve(metaslab_class_t *mc, int slots, |
5669 | int allocator, zio_t *zio) | |
3dfb57a3 | 5670 | { |
f8020c93 AM |
5671 | metaslab_class_allocator_t *mca = &mc->mc_allocator[allocator]; |
5672 | ||
3dfb57a3 | 5673 | ASSERT(mc->mc_alloc_throttle_enabled); |
5ba4025a | 5674 | zfs_refcount_remove_few(&mca->mca_alloc_slots, slots, zio); |
3dfb57a3 DB |
5675 | } |
5676 | ||
a1d477c2 MA |
5677 | static int |
5678 | metaslab_claim_concrete(vdev_t *vd, uint64_t offset, uint64_t size, | |
5679 | uint64_t txg) | |
5680 | { | |
5681 | metaslab_t *msp; | |
5682 | spa_t *spa = vd->vdev_spa; | |
5683 | int error = 0; | |
5684 | ||
5685 | if (offset >> vd->vdev_ms_shift >= vd->vdev_ms_count) | |
7ab96299 | 5686 | return (SET_ERROR(ENXIO)); |
a1d477c2 MA |
5687 | |
5688 | ASSERT3P(vd->vdev_ms, !=, NULL); | |
5689 | msp = vd->vdev_ms[offset >> vd->vdev_ms_shift]; | |
5690 | ||
5691 | mutex_enter(&msp->ms_lock); | |
5692 | ||
7ab96299 | 5693 | if ((txg != 0 && spa_writeable(spa)) || !msp->ms_loaded) { |
492f64e9 | 5694 | error = metaslab_activate(msp, 0, METASLAB_WEIGHT_CLAIM); |
7ab96299 TC |
5695 | if (error == EBUSY) { |
5696 | ASSERT(msp->ms_loaded); | |
5697 | ASSERT(msp->ms_weight & METASLAB_ACTIVE_MASK); | |
5698 | error = 0; | |
5699 | } | |
5700 | } | |
a1d477c2 | 5701 | |
d2734cce SD |
5702 | if (error == 0 && |
5703 | !range_tree_contains(msp->ms_allocatable, offset, size)) | |
a1d477c2 MA |
5704 | error = SET_ERROR(ENOENT); |
5705 | ||
5706 | if (error || txg == 0) { /* txg == 0 indicates dry run */ | |
5707 | mutex_exit(&msp->ms_lock); | |
5708 | return (error); | |
5709 | } | |
5710 | ||
5711 | VERIFY(!msp->ms_condensing); | |
5712 | VERIFY0(P2PHASE(offset, 1ULL << vd->vdev_ashift)); | |
5713 | VERIFY0(P2PHASE(size, 1ULL << vd->vdev_ashift)); | |
d2734cce SD |
5714 | VERIFY3U(range_tree_space(msp->ms_allocatable) - size, <=, |
5715 | msp->ms_size); | |
5716 | range_tree_remove(msp->ms_allocatable, offset, size); | |
1b939560 | 5717 | range_tree_clear(msp->ms_trim, offset, size); |
a1d477c2 | 5718 | |
76d04993 | 5719 | if (spa_writeable(spa)) { /* don't dirty if we're zdb(8) */ |
f09fda50 PD |
5720 | metaslab_class_t *mc = msp->ms_group->mg_class; |
5721 | multilist_sublist_t *mls = | |
ffdf019c | 5722 | multilist_sublist_lock_obj(&mc->mc_metaslab_txg_list, msp); |
f09fda50 PD |
5723 | if (!multilist_link_active(&msp->ms_class_txg_node)) { |
5724 | msp->ms_selected_txg = txg; | |
5725 | multilist_sublist_insert_head(mls, msp); | |
5726 | } | |
5727 | multilist_sublist_unlock(mls); | |
5728 | ||
d2734cce | 5729 | if (range_tree_is_empty(msp->ms_allocating[txg & TXG_MASK])) |
a1d477c2 | 5730 | vdev_dirty(vd, VDD_METASLAB, msp, txg); |
d2734cce SD |
5731 | range_tree_add(msp->ms_allocating[txg & TXG_MASK], |
5732 | offset, size); | |
f09fda50 | 5733 | msp->ms_allocating_total += size; |
a1d477c2 MA |
5734 | } |
5735 | ||
5736 | mutex_exit(&msp->ms_lock); | |
5737 | ||
5738 | return (0); | |
5739 | } | |
5740 | ||
5741 | typedef struct metaslab_claim_cb_arg_t { | |
5742 | uint64_t mcca_txg; | |
5743 | int mcca_error; | |
5744 | } metaslab_claim_cb_arg_t; | |
5745 | ||
a1d477c2 MA |
5746 | static void |
5747 | metaslab_claim_impl_cb(uint64_t inner_offset, vdev_t *vd, uint64_t offset, | |
5748 | uint64_t size, void *arg) | |
5749 | { | |
14e4e3cb | 5750 | (void) inner_offset; |
a1d477c2 MA |
5751 | metaslab_claim_cb_arg_t *mcca_arg = arg; |
5752 | ||
5753 | if (mcca_arg->mcca_error == 0) { | |
5754 | mcca_arg->mcca_error = metaslab_claim_concrete(vd, offset, | |
5755 | size, mcca_arg->mcca_txg); | |
5756 | } | |
5757 | } | |
5758 | ||
5759 | int | |
5760 | metaslab_claim_impl(vdev_t *vd, uint64_t offset, uint64_t size, uint64_t txg) | |
5761 | { | |
5762 | if (vd->vdev_ops->vdev_op_remap != NULL) { | |
5763 | metaslab_claim_cb_arg_t arg; | |
5764 | ||
5765 | /* | |
76d04993 | 5766 | * Only zdb(8) can claim on indirect vdevs. This is used |
a1d477c2 MA |
5767 | * to detect leaks of mapped space (that are not accounted |
5768 | * for in the obsolete counts, spacemap, or bpobj). | |
5769 | */ | |
5770 | ASSERT(!spa_writeable(vd->vdev_spa)); | |
5771 | arg.mcca_error = 0; | |
5772 | arg.mcca_txg = txg; | |
5773 | ||
5774 | vd->vdev_ops->vdev_op_remap(vd, offset, size, | |
5775 | metaslab_claim_impl_cb, &arg); | |
5776 | ||
5777 | if (arg.mcca_error == 0) { | |
5778 | arg.mcca_error = metaslab_claim_concrete(vd, | |
5779 | offset, size, txg); | |
5780 | } | |
5781 | return (arg.mcca_error); | |
5782 | } else { | |
5783 | return (metaslab_claim_concrete(vd, offset, size, txg)); | |
5784 | } | |
5785 | } | |
5786 | ||
5787 | /* | |
5788 | * Intent log support: upon opening the pool after a crash, notify the SPA | |
5789 | * of blocks that the intent log has allocated for immediate write, but | |
5790 | * which are still considered free by the SPA because the last transaction | |
5791 | * group didn't commit yet. | |
5792 | */ | |
5793 | static int | |
5794 | metaslab_claim_dva(spa_t *spa, const dva_t *dva, uint64_t txg) | |
5795 | { | |
5796 | uint64_t vdev = DVA_GET_VDEV(dva); | |
5797 | uint64_t offset = DVA_GET_OFFSET(dva); | |
5798 | uint64_t size = DVA_GET_ASIZE(dva); | |
5799 | vdev_t *vd; | |
5800 | ||
5801 | if ((vd = vdev_lookup_top(spa, vdev)) == NULL) { | |
5802 | return (SET_ERROR(ENXIO)); | |
5803 | } | |
5804 | ||
5805 | ASSERT(DVA_IS_VALID(dva)); | |
5806 | ||
5807 | if (DVA_GET_GANG(dva)) | |
2b56a634 | 5808 | size = vdev_gang_header_asize(vd); |
a1d477c2 MA |
5809 | |
5810 | return (metaslab_claim_impl(vd, offset, size, txg)); | |
5811 | } | |
5812 | ||
34dc7c2f BB |
5813 | int |
5814 | metaslab_alloc(spa_t *spa, metaslab_class_t *mc, uint64_t psize, blkptr_t *bp, | |
4e21fd06 | 5815 | int ndvas, uint64_t txg, blkptr_t *hintbp, int flags, |
492f64e9 | 5816 | zio_alloc_list_t *zal, zio_t *zio, int allocator) |
34dc7c2f BB |
5817 | { |
5818 | dva_t *dva = bp->blk_dva; | |
928e8ad4 | 5819 | dva_t *hintdva = (hintbp != NULL) ? hintbp->blk_dva : NULL; |
1c27024e | 5820 | int error = 0; |
34dc7c2f | 5821 | |
b128c09f | 5822 | ASSERT(bp->blk_birth == 0); |
428870ff | 5823 | ASSERT(BP_PHYSICAL_BIRTH(bp) == 0); |
b128c09f BB |
5824 | |
5825 | spa_config_enter(spa, SCL_ALLOC, FTAG, RW_READER); | |
5826 | ||
f8020c93 AM |
5827 | if (mc->mc_allocator[allocator].mca_rotor == NULL) { |
5828 | /* no vdevs in this class */ | |
b128c09f | 5829 | spa_config_exit(spa, SCL_ALLOC, FTAG); |
2e528b49 | 5830 | return (SET_ERROR(ENOSPC)); |
b128c09f | 5831 | } |
34dc7c2f BB |
5832 | |
5833 | ASSERT(ndvas > 0 && ndvas <= spa_max_replication(spa)); | |
5834 | ASSERT(BP_GET_NDVAS(bp) == 0); | |
5835 | ASSERT(hintbp == NULL || ndvas <= BP_GET_NDVAS(hintbp)); | |
4e21fd06 | 5836 | ASSERT3P(zal, !=, NULL); |
34dc7c2f | 5837 | |
1c27024e | 5838 | for (int d = 0; d < ndvas; d++) { |
34dc7c2f | 5839 | error = metaslab_alloc_dva(spa, mc, psize, dva, d, hintdva, |
492f64e9 | 5840 | txg, flags, zal, allocator); |
93cf2076 | 5841 | if (error != 0) { |
34dc7c2f | 5842 | for (d--; d >= 0; d--) { |
a1d477c2 | 5843 | metaslab_unalloc_dva(spa, &dva[d], txg); |
3dfb57a3 | 5844 | metaslab_group_alloc_decrement(spa, |
492f64e9 PD |
5845 | DVA_GET_VDEV(&dva[d]), zio, flags, |
5846 | allocator, B_FALSE); | |
861166b0 | 5847 | memset(&dva[d], 0, sizeof (dva_t)); |
34dc7c2f | 5848 | } |
b128c09f | 5849 | spa_config_exit(spa, SCL_ALLOC, FTAG); |
34dc7c2f | 5850 | return (error); |
3dfb57a3 DB |
5851 | } else { |
5852 | /* | |
5853 | * Update the metaslab group's queue depth | |
5854 | * based on the newly allocated dva. | |
5855 | */ | |
5856 | metaslab_group_alloc_increment(spa, | |
492f64e9 | 5857 | DVA_GET_VDEV(&dva[d]), zio, flags, allocator); |
34dc7c2f BB |
5858 | } |
5859 | } | |
5860 | ASSERT(error == 0); | |
5861 | ASSERT(BP_GET_NDVAS(bp) == ndvas); | |
5862 | ||
b128c09f BB |
5863 | spa_config_exit(spa, SCL_ALLOC, FTAG); |
5864 | ||
efe7978d | 5865 | BP_SET_BIRTH(bp, txg, 0); |
b128c09f | 5866 | |
34dc7c2f BB |
5867 | return (0); |
5868 | } | |
5869 | ||
5870 | void | |
5871 | metaslab_free(spa_t *spa, const blkptr_t *bp, uint64_t txg, boolean_t now) | |
5872 | { | |
5873 | const dva_t *dva = bp->blk_dva; | |
1c27024e | 5874 | int ndvas = BP_GET_NDVAS(bp); |
34dc7c2f BB |
5875 | |
5876 | ASSERT(!BP_IS_HOLE(bp)); | |
428870ff | 5877 | ASSERT(!now || bp->blk_birth >= spa_syncing_txg(spa)); |
b128c09f | 5878 | |
d2734cce SD |
5879 | /* |
5880 | * If we have a checkpoint for the pool we need to make sure that | |
5881 | * the blocks that we free that are part of the checkpoint won't be | |
5882 | * reused until the checkpoint is discarded or we revert to it. | |
5883 | * | |
5884 | * The checkpoint flag is passed down the metaslab_free code path | |
5885 | * and is set whenever we want to add a block to the checkpoint's | |
5886 | * accounting. That is, we "checkpoint" blocks that existed at the | |
5887 | * time the checkpoint was created and are therefore referenced by | |
5888 | * the checkpointed uberblock. | |
5889 | * | |
5890 | * Note that, we don't checkpoint any blocks if the current | |
5891 | * syncing txg <= spa_checkpoint_txg. We want these frees to sync | |
5892 | * normally as they will be referenced by the checkpointed uberblock. | |
5893 | */ | |
5894 | boolean_t checkpoint = B_FALSE; | |
5895 | if (bp->blk_birth <= spa->spa_checkpoint_txg && | |
5896 | spa_syncing_txg(spa) > spa->spa_checkpoint_txg) { | |
5897 | /* | |
5898 | * At this point, if the block is part of the checkpoint | |
5899 | * there is no way it was created in the current txg. | |
5900 | */ | |
5901 | ASSERT(!now); | |
5902 | ASSERT3U(spa_syncing_txg(spa), ==, txg); | |
5903 | checkpoint = B_TRUE; | |
5904 | } | |
5905 | ||
b128c09f | 5906 | spa_config_enter(spa, SCL_FREE, FTAG, RW_READER); |
34dc7c2f | 5907 | |
a1d477c2 MA |
5908 | for (int d = 0; d < ndvas; d++) { |
5909 | if (now) { | |
5910 | metaslab_unalloc_dva(spa, &dva[d], txg); | |
5911 | } else { | |
d2734cce SD |
5912 | ASSERT3U(txg, ==, spa_syncing_txg(spa)); |
5913 | metaslab_free_dva(spa, &dva[d], checkpoint); | |
a1d477c2 MA |
5914 | } |
5915 | } | |
b128c09f BB |
5916 | |
5917 | spa_config_exit(spa, SCL_FREE, FTAG); | |
34dc7c2f BB |
5918 | } |
5919 | ||
5920 | int | |
5921 | metaslab_claim(spa_t *spa, const blkptr_t *bp, uint64_t txg) | |
5922 | { | |
5923 | const dva_t *dva = bp->blk_dva; | |
5924 | int ndvas = BP_GET_NDVAS(bp); | |
1c27024e | 5925 | int error = 0; |
34dc7c2f BB |
5926 | |
5927 | ASSERT(!BP_IS_HOLE(bp)); | |
5928 | ||
b128c09f BB |
5929 | if (txg != 0) { |
5930 | /* | |
5931 | * First do a dry run to make sure all DVAs are claimable, | |
5932 | * so we don't have to unwind from partial failures below. | |
5933 | */ | |
5934 | if ((error = metaslab_claim(spa, bp, 0)) != 0) | |
5935 | return (error); | |
5936 | } | |
5937 | ||
5938 | spa_config_enter(spa, SCL_ALLOC, FTAG, RW_READER); | |
5939 | ||
cc99f275 DB |
5940 | for (int d = 0; d < ndvas; d++) { |
5941 | error = metaslab_claim_dva(spa, &dva[d], txg); | |
5942 | if (error != 0) | |
b128c09f | 5943 | break; |
cc99f275 | 5944 | } |
b128c09f BB |
5945 | |
5946 | spa_config_exit(spa, SCL_ALLOC, FTAG); | |
5947 | ||
5948 | ASSERT(error == 0 || txg == 0); | |
34dc7c2f | 5949 | |
b128c09f | 5950 | return (error); |
34dc7c2f | 5951 | } |
920dd524 | 5952 | |
d1d7e268 MK |
5953 | void |
5954 | metaslab_fastwrite_mark(spa_t *spa, const blkptr_t *bp) | |
920dd524 ED |
5955 | { |
5956 | const dva_t *dva = bp->blk_dva; | |
5957 | int ndvas = BP_GET_NDVAS(bp); | |
5958 | uint64_t psize = BP_GET_PSIZE(bp); | |
5959 | int d; | |
5960 | vdev_t *vd; | |
5961 | ||
5962 | ASSERT(!BP_IS_HOLE(bp)); | |
9b67f605 | 5963 | ASSERT(!BP_IS_EMBEDDED(bp)); |
920dd524 ED |
5964 | ASSERT(psize > 0); |
5965 | ||
5966 | spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER); | |
5967 | ||
5968 | for (d = 0; d < ndvas; d++) { | |
5969 | if ((vd = vdev_lookup_top(spa, DVA_GET_VDEV(&dva[d]))) == NULL) | |
5970 | continue; | |
5971 | atomic_add_64(&vd->vdev_pending_fastwrite, psize); | |
5972 | } | |
5973 | ||
5974 | spa_config_exit(spa, SCL_VDEV, FTAG); | |
5975 | } | |
5976 | ||
d1d7e268 MK |
5977 | void |
5978 | metaslab_fastwrite_unmark(spa_t *spa, const blkptr_t *bp) | |
920dd524 ED |
5979 | { |
5980 | const dva_t *dva = bp->blk_dva; | |
5981 | int ndvas = BP_GET_NDVAS(bp); | |
5982 | uint64_t psize = BP_GET_PSIZE(bp); | |
5983 | int d; | |
5984 | vdev_t *vd; | |
5985 | ||
5986 | ASSERT(!BP_IS_HOLE(bp)); | |
9b67f605 | 5987 | ASSERT(!BP_IS_EMBEDDED(bp)); |
920dd524 ED |
5988 | ASSERT(psize > 0); |
5989 | ||
5990 | spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER); | |
5991 | ||
5992 | for (d = 0; d < ndvas; d++) { | |
5993 | if ((vd = vdev_lookup_top(spa, DVA_GET_VDEV(&dva[d]))) == NULL) | |
5994 | continue; | |
5995 | ASSERT3U(vd->vdev_pending_fastwrite, >=, psize); | |
5996 | atomic_sub_64(&vd->vdev_pending_fastwrite, psize); | |
5997 | } | |
5998 | ||
5999 | spa_config_exit(spa, SCL_VDEV, FTAG); | |
6000 | } | |
30b92c1d | 6001 | |
a1d477c2 MA |
6002 | static void |
6003 | metaslab_check_free_impl_cb(uint64_t inner, vdev_t *vd, uint64_t offset, | |
6004 | uint64_t size, void *arg) | |
6005 | { | |
14e4e3cb AZ |
6006 | (void) inner, (void) arg; |
6007 | ||
a1d477c2 MA |
6008 | if (vd->vdev_ops == &vdev_indirect_ops) |
6009 | return; | |
6010 | ||
6011 | metaslab_check_free_impl(vd, offset, size); | |
6012 | } | |
6013 | ||
6014 | static void | |
6015 | metaslab_check_free_impl(vdev_t *vd, uint64_t offset, uint64_t size) | |
6016 | { | |
6017 | metaslab_t *msp; | |
2a8ba608 | 6018 | spa_t *spa __maybe_unused = vd->vdev_spa; |
a1d477c2 MA |
6019 | |
6020 | if ((zfs_flags & ZFS_DEBUG_ZIO_FREE) == 0) | |
6021 | return; | |
6022 | ||
6023 | if (vd->vdev_ops->vdev_op_remap != NULL) { | |
6024 | vd->vdev_ops->vdev_op_remap(vd, offset, size, | |
6025 | metaslab_check_free_impl_cb, NULL); | |
6026 | return; | |
6027 | } | |
6028 | ||
6029 | ASSERT(vdev_is_concrete(vd)); | |
6030 | ASSERT3U(offset >> vd->vdev_ms_shift, <, vd->vdev_ms_count); | |
6031 | ASSERT3U(spa_config_held(spa, SCL_ALL, RW_READER), !=, 0); | |
6032 | ||
6033 | msp = vd->vdev_ms[offset >> vd->vdev_ms_shift]; | |
6034 | ||
6035 | mutex_enter(&msp->ms_lock); | |
df72b8be SD |
6036 | if (msp->ms_loaded) { |
6037 | range_tree_verify_not_present(msp->ms_allocatable, | |
6038 | offset, size); | |
6039 | } | |
a1d477c2 | 6040 | |
93e28d66 SD |
6041 | /* |
6042 | * Check all segments that currently exist in the freeing pipeline. | |
6043 | * | |
6044 | * It would intuitively make sense to also check the current allocating | |
6045 | * tree since metaslab_unalloc_dva() exists for extents that are | |
e1cfd73f | 6046 | * allocated and freed in the same sync pass within the same txg. |
93e28d66 SD |
6047 | * Unfortunately there are places (e.g. the ZIL) where we allocate a |
6048 | * segment but then we free part of it within the same txg | |
6049 | * [see zil_sync()]. Thus, we don't call range_tree_verify() in the | |
6050 | * current allocating tree. | |
6051 | */ | |
df72b8be SD |
6052 | range_tree_verify_not_present(msp->ms_freeing, offset, size); |
6053 | range_tree_verify_not_present(msp->ms_checkpointing, offset, size); | |
6054 | range_tree_verify_not_present(msp->ms_freed, offset, size); | |
a1d477c2 | 6055 | for (int j = 0; j < TXG_DEFER_SIZE; j++) |
df72b8be | 6056 | range_tree_verify_not_present(msp->ms_defer[j], offset, size); |
93e28d66 | 6057 | range_tree_verify_not_present(msp->ms_trim, offset, size); |
a1d477c2 MA |
6058 | mutex_exit(&msp->ms_lock); |
6059 | } | |
6060 | ||
13fe0198 MA |
6061 | void |
6062 | metaslab_check_free(spa_t *spa, const blkptr_t *bp) | |
6063 | { | |
13fe0198 MA |
6064 | if ((zfs_flags & ZFS_DEBUG_ZIO_FREE) == 0) |
6065 | return; | |
6066 | ||
6067 | spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER); | |
1c27024e | 6068 | for (int i = 0; i < BP_GET_NDVAS(bp); i++) { |
93cf2076 GW |
6069 | uint64_t vdev = DVA_GET_VDEV(&bp->blk_dva[i]); |
6070 | vdev_t *vd = vdev_lookup_top(spa, vdev); | |
6071 | uint64_t offset = DVA_GET_OFFSET(&bp->blk_dva[i]); | |
13fe0198 | 6072 | uint64_t size = DVA_GET_ASIZE(&bp->blk_dva[i]); |
13fe0198 | 6073 | |
a1d477c2 | 6074 | if (DVA_GET_GANG(&bp->blk_dva[i])) |
2b56a634 | 6075 | size = vdev_gang_header_asize(vd); |
a1d477c2 MA |
6076 | |
6077 | ASSERT3P(vd, !=, NULL); | |
13fe0198 | 6078 | |
a1d477c2 | 6079 | metaslab_check_free_impl(vd, offset, size); |
13fe0198 MA |
6080 | } |
6081 | spa_config_exit(spa, SCL_VDEV, FTAG); | |
6082 | } | |
6083 | ||
1b939560 BB |
6084 | static void |
6085 | metaslab_group_disable_wait(metaslab_group_t *mg) | |
6086 | { | |
6087 | ASSERT(MUTEX_HELD(&mg->mg_ms_disabled_lock)); | |
6088 | while (mg->mg_disabled_updating) { | |
6089 | cv_wait(&mg->mg_ms_disabled_cv, &mg->mg_ms_disabled_lock); | |
6090 | } | |
6091 | } | |
6092 | ||
6093 | static void | |
6094 | metaslab_group_disabled_increment(metaslab_group_t *mg) | |
6095 | { | |
6096 | ASSERT(MUTEX_HELD(&mg->mg_ms_disabled_lock)); | |
6097 | ASSERT(mg->mg_disabled_updating); | |
6098 | ||
6099 | while (mg->mg_ms_disabled >= max_disabled_ms) { | |
6100 | cv_wait(&mg->mg_ms_disabled_cv, &mg->mg_ms_disabled_lock); | |
6101 | } | |
6102 | mg->mg_ms_disabled++; | |
6103 | ASSERT3U(mg->mg_ms_disabled, <=, max_disabled_ms); | |
6104 | } | |
6105 | ||
6106 | /* | |
6107 | * Mark the metaslab as disabled to prevent any allocations on this metaslab. | |
6108 | * We must also track how many metaslabs are currently disabled within a | |
6109 | * metaslab group and limit them to prevent allocation failures from | |
6110 | * occurring because all metaslabs are disabled. | |
6111 | */ | |
6112 | void | |
6113 | metaslab_disable(metaslab_t *msp) | |
6114 | { | |
6115 | ASSERT(!MUTEX_HELD(&msp->ms_lock)); | |
6116 | metaslab_group_t *mg = msp->ms_group; | |
6117 | ||
6118 | mutex_enter(&mg->mg_ms_disabled_lock); | |
6119 | ||
6120 | /* | |
6121 | * To keep an accurate count of how many threads have disabled | |
6122 | * a specific metaslab group, we only allow one thread to mark | |
6123 | * the metaslab group at a time. This ensures that the value of | |
6124 | * ms_disabled will be accurate when we decide to mark a metaslab | |
6125 | * group as disabled. To do this we force all other threads | |
6126 | * to wait till the metaslab's mg_disabled_updating flag is no | |
6127 | * longer set. | |
6128 | */ | |
6129 | metaslab_group_disable_wait(mg); | |
6130 | mg->mg_disabled_updating = B_TRUE; | |
6131 | if (msp->ms_disabled == 0) { | |
6132 | metaslab_group_disabled_increment(mg); | |
6133 | } | |
6134 | mutex_enter(&msp->ms_lock); | |
6135 | msp->ms_disabled++; | |
6136 | mutex_exit(&msp->ms_lock); | |
6137 | ||
6138 | mg->mg_disabled_updating = B_FALSE; | |
6139 | cv_broadcast(&mg->mg_ms_disabled_cv); | |
6140 | mutex_exit(&mg->mg_ms_disabled_lock); | |
6141 | } | |
6142 | ||
6143 | void | |
f09fda50 | 6144 | metaslab_enable(metaslab_t *msp, boolean_t sync, boolean_t unload) |
1b939560 BB |
6145 | { |
6146 | metaslab_group_t *mg = msp->ms_group; | |
6147 | spa_t *spa = mg->mg_vd->vdev_spa; | |
6148 | ||
6149 | /* | |
6150 | * Wait for the outstanding IO to be synced to prevent newly | |
6151 | * allocated blocks from being overwritten. This used by | |
6152 | * initialize and TRIM which are modifying unallocated space. | |
6153 | */ | |
6154 | if (sync) | |
6155 | txg_wait_synced(spa_get_dsl(spa), 0); | |
6156 | ||
6157 | mutex_enter(&mg->mg_ms_disabled_lock); | |
6158 | mutex_enter(&msp->ms_lock); | |
6159 | if (--msp->ms_disabled == 0) { | |
6160 | mg->mg_ms_disabled--; | |
6161 | cv_broadcast(&mg->mg_ms_disabled_cv); | |
f09fda50 PD |
6162 | if (unload) |
6163 | metaslab_unload(msp); | |
1b939560 BB |
6164 | } |
6165 | mutex_exit(&msp->ms_lock); | |
6166 | mutex_exit(&mg->mg_ms_disabled_lock); | |
6167 | } | |
6168 | ||
600a02b8 AM |
6169 | void |
6170 | metaslab_set_unflushed_dirty(metaslab_t *ms, boolean_t dirty) | |
6171 | { | |
6172 | ms->ms_unflushed_dirty = dirty; | |
6173 | } | |
6174 | ||
93e28d66 SD |
6175 | static void |
6176 | metaslab_update_ondisk_flush_data(metaslab_t *ms, dmu_tx_t *tx) | |
6177 | { | |
6178 | vdev_t *vd = ms->ms_group->mg_vd; | |
6179 | spa_t *spa = vd->vdev_spa; | |
6180 | objset_t *mos = spa_meta_objset(spa); | |
6181 | ||
6182 | ASSERT(spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP)); | |
6183 | ||
6184 | metaslab_unflushed_phys_t entry = { | |
6185 | .msp_unflushed_txg = metaslab_unflushed_txg(ms), | |
6186 | }; | |
6187 | uint64_t entry_size = sizeof (entry); | |
6188 | uint64_t entry_offset = ms->ms_id * entry_size; | |
6189 | ||
6190 | uint64_t object = 0; | |
6191 | int err = zap_lookup(mos, vd->vdev_top_zap, | |
6192 | VDEV_TOP_ZAP_MS_UNFLUSHED_PHYS_TXGS, sizeof (uint64_t), 1, | |
6193 | &object); | |
6194 | if (err == ENOENT) { | |
6195 | object = dmu_object_alloc(mos, DMU_OTN_UINT64_METADATA, | |
6196 | SPA_OLD_MAXBLOCKSIZE, DMU_OT_NONE, 0, tx); | |
6197 | VERIFY0(zap_add(mos, vd->vdev_top_zap, | |
6198 | VDEV_TOP_ZAP_MS_UNFLUSHED_PHYS_TXGS, sizeof (uint64_t), 1, | |
6199 | &object, tx)); | |
6200 | } else { | |
6201 | VERIFY0(err); | |
6202 | } | |
6203 | ||
6204 | dmu_write(spa_meta_objset(spa), object, entry_offset, entry_size, | |
6205 | &entry, tx); | |
6206 | } | |
6207 | ||
6208 | void | |
6209 | metaslab_set_unflushed_txg(metaslab_t *ms, uint64_t txg, dmu_tx_t *tx) | |
6210 | { | |
93e28d66 SD |
6211 | ms->ms_unflushed_txg = txg; |
6212 | metaslab_update_ondisk_flush_data(ms, tx); | |
6213 | } | |
6214 | ||
600a02b8 AM |
6215 | boolean_t |
6216 | metaslab_unflushed_dirty(metaslab_t *ms) | |
6217 | { | |
6218 | return (ms->ms_unflushed_dirty); | |
6219 | } | |
6220 | ||
93e28d66 SD |
6221 | uint64_t |
6222 | metaslab_unflushed_txg(metaslab_t *ms) | |
6223 | { | |
6224 | return (ms->ms_unflushed_txg); | |
6225 | } | |
6226 | ||
ab8d9c17 | 6227 | ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, aliquot, U64, ZMOD_RW, |
03fdcb9a | 6228 | "Allocation granularity (a.k.a. stripe size)"); |
02730c33 | 6229 | |
03fdcb9a MM |
6230 | ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, debug_load, INT, ZMOD_RW, |
6231 | "Load all metaslabs when pool is first opened"); | |
02730c33 | 6232 | |
03fdcb9a MM |
6233 | ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, debug_unload, INT, ZMOD_RW, |
6234 | "Prevent metaslabs from being unloaded"); | |
f4a4046b | 6235 | |
03fdcb9a MM |
6236 | ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, preload_enabled, INT, ZMOD_RW, |
6237 | "Preload potential metaslabs during reassessment"); | |
eef0f4d8 | 6238 | |
fdc2d303 | 6239 | ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, unload_delay, UINT, ZMOD_RW, |
03fdcb9a | 6240 | "Delay in txgs after metaslab was last used before unloading"); |
eef0f4d8 | 6241 | |
fdc2d303 | 6242 | ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, unload_delay_ms, UINT, ZMOD_RW, |
03fdcb9a | 6243 | "Delay in milliseconds after metaslab was last used before unloading"); |
02730c33 | 6244 | |
03fdcb9a | 6245 | /* BEGIN CSTYLED */ |
fdc2d303 | 6246 | ZFS_MODULE_PARAM(zfs_mg, zfs_mg_, noalloc_threshold, UINT, ZMOD_RW, |
03fdcb9a MM |
6247 | "Percentage of metaslab group size that should be free to make it " |
6248 | "eligible for allocation"); | |
f3a7f661 | 6249 | |
fdc2d303 | 6250 | ZFS_MODULE_PARAM(zfs_mg, zfs_mg_, fragmentation_threshold, UINT, ZMOD_RW, |
03fdcb9a MM |
6251 | "Percentage of metaslab group size that should be considered eligible " |
6252 | "for allocations unless all metaslab groups within the metaslab class " | |
6253 | "have also crossed this threshold"); | |
02730c33 | 6254 | |
7ada752a AZ |
6255 | ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, fragmentation_factor_enabled, INT, |
6256 | ZMOD_RW, | |
03fdcb9a MM |
6257 | "Use the fragmentation metric to prefer less fragmented metaslabs"); |
6258 | /* END CSTYLED */ | |
02730c33 | 6259 | |
fdc2d303 | 6260 | ZFS_MODULE_PARAM(zfs_metaslab, zfs_metaslab_, fragmentation_threshold, UINT, |
7ada752a AZ |
6261 | ZMOD_RW, "Fragmentation for metaslab to allow allocation"); |
6262 | ||
03fdcb9a MM |
6263 | ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, lba_weighting_enabled, INT, ZMOD_RW, |
6264 | "Prefer metaslabs with lower LBAs"); | |
4e21fd06 | 6265 | |
03fdcb9a MM |
6266 | ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, bias_enabled, INT, ZMOD_RW, |
6267 | "Enable metaslab group biasing"); | |
4e21fd06 | 6268 | |
03fdcb9a MM |
6269 | ZFS_MODULE_PARAM(zfs_metaslab, zfs_metaslab_, segment_weight_enabled, INT, |
6270 | ZMOD_RW, "Enable segment-based metaslab selection"); | |
a1d477c2 | 6271 | |
03fdcb9a MM |
6272 | ZFS_MODULE_PARAM(zfs_metaslab, zfs_metaslab_, switch_threshold, INT, ZMOD_RW, |
6273 | "Segment-based metaslab selection maximum buckets before switching"); | |
d3230d76 | 6274 | |
ab8d9c17 | 6275 | ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, force_ganging, U64, ZMOD_RW, |
46adb282 RN |
6276 | "Blocks larger than this size are sometimes forced to be gang blocks"); |
6277 | ||
6278 | ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, force_ganging_pct, UINT, ZMOD_RW, | |
6279 | "Percentage of large blocks that will be forced to be gang blocks"); | |
d3230d76 | 6280 | |
fdc2d303 | 6281 | ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, df_max_search, UINT, ZMOD_RW, |
03fdcb9a | 6282 | "Max distance (bytes) to search forward before using size tree"); |
c81f1790 | 6283 | |
03fdcb9a MM |
6284 | ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, df_use_largest_segment, INT, ZMOD_RW, |
6285 | "When looking in size tree, use largest segment instead of exact fit"); | |
f09fda50 | 6286 | |
ab8d9c17 | 6287 | ZFS_MODULE_PARAM(zfs_metaslab, zfs_metaslab_, max_size_cache_sec, U64, |
03fdcb9a | 6288 | ZMOD_RW, "How long to trust the cached max chunk size of a metaslab"); |
cc99f275 | 6289 | |
fdc2d303 | 6290 | ZFS_MODULE_PARAM(zfs_metaslab, zfs_metaslab_, mem_limit, UINT, ZMOD_RW, |
03fdcb9a | 6291 | "Percentage of memory that can be used to store metaslab range trees"); |
be5c6d96 MA |
6292 | |
6293 | ZFS_MODULE_PARAM(zfs_metaslab, zfs_metaslab_, try_hard_before_gang, INT, | |
6294 | ZMOD_RW, "Try hard to allocate before ganging"); | |
6295 | ||
fdc2d303 | 6296 | ZFS_MODULE_PARAM(zfs_metaslab, zfs_metaslab_, find_max_tries, UINT, ZMOD_RW, |
be5c6d96 | 6297 | "Normally only consider this many of the best metaslabs in each vdev"); |