]>
Commit | Line | Data |
---|---|---|
34dc7c2f BB |
1 | /* |
2 | * CDDL HEADER START | |
3 | * | |
4 | * The contents of this file are subject to the terms of the | |
5 | * Common Development and Distribution License (the "License"). | |
6 | * You may not use this file except in compliance with the License. | |
7 | * | |
8 | * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE | |
9 | * or http://www.opensolaris.org/os/licensing. | |
10 | * See the License for the specific language governing permissions | |
11 | * and limitations under the License. | |
12 | * | |
13 | * When distributing Covered Code, include this CDDL HEADER in each | |
14 | * file and include the License file at usr/src/OPENSOLARIS.LICENSE. | |
15 | * If applicable, add the following below this CDDL HEADER, with the | |
16 | * fields enclosed by brackets "[]" replaced with your own identifying | |
17 | * information: Portions Copyright [yyyy] [name of copyright owner] | |
18 | * | |
19 | * CDDL HEADER END | |
20 | */ | |
21 | /* | |
428870ff | 22 | * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. |
cc99f275 | 23 | * Copyright (c) 2011, 2018 by Delphix. All rights reserved. |
2e528b49 | 24 | * Copyright (c) 2013 by Saso Kiselkov. All rights reserved. |
cc99f275 | 25 | * Copyright (c) 2017, Intel Corporation. |
34dc7c2f BB |
26 | */ |
27 | ||
34dc7c2f | 28 | #include <sys/zfs_context.h> |
34dc7c2f BB |
29 | #include <sys/dmu.h> |
30 | #include <sys/dmu_tx.h> | |
31 | #include <sys/space_map.h> | |
32 | #include <sys/metaslab_impl.h> | |
33 | #include <sys/vdev_impl.h> | |
34 | #include <sys/zio.h> | |
93cf2076 | 35 | #include <sys/spa_impl.h> |
f3a7f661 | 36 | #include <sys/zfeature.h> |
a1d477c2 | 37 | #include <sys/vdev_indirect_mapping.h> |
d2734cce | 38 | #include <sys/zap.h> |
34dc7c2f | 39 | |
d1d7e268 | 40 | #define WITH_DF_BLOCK_ALLOCATOR |
6d974228 | 41 | |
3dfb57a3 DB |
42 | #define GANG_ALLOCATION(flags) \ |
43 | ((flags) & (METASLAB_GANG_CHILD | METASLAB_GANG_HEADER)) | |
22c81dd8 | 44 | |
e8fe6684 ED |
45 | /* |
46 | * Metaslab granularity, in bytes. This is roughly similar to what would be | |
47 | * referred to as the "stripe size" in traditional RAID arrays. In normal | |
48 | * operation, we will try to write this amount of data to a top-level vdev | |
49 | * before moving on to the next one. | |
50 | */ | |
99b14de4 | 51 | unsigned long metaslab_aliquot = 512 << 10; |
e8fe6684 | 52 | |
d830d479 MA |
53 | /* |
54 | * For testing, make some blocks above a certain size be gang blocks. | |
55 | */ | |
56 | unsigned long metaslab_force_ganging = SPA_MAXBLOCKSIZE + 1; | |
34dc7c2f | 57 | |
d2734cce SD |
58 | /* |
59 | * Since we can touch multiple metaslabs (and their respective space maps) | |
60 | * with each transaction group, we benefit from having a smaller space map | |
61 | * block size since it allows us to issue more I/O operations scattered | |
62 | * around the disk. | |
63 | */ | |
64 | int zfs_metaslab_sm_blksz = (1 << 12); | |
65 | ||
e51be066 GW |
66 | /* |
67 | * The in-core space map representation is more compact than its on-disk form. | |
68 | * The zfs_condense_pct determines how much more compact the in-core | |
4e21fd06 | 69 | * space map representation must be before we compact it on-disk. |
e51be066 GW |
70 | * Values should be greater than or equal to 100. |
71 | */ | |
72 | int zfs_condense_pct = 200; | |
73 | ||
b02fe35d AR |
74 | /* |
75 | * Condensing a metaslab is not guaranteed to actually reduce the amount of | |
76 | * space used on disk. In particular, a space map uses data in increments of | |
96358617 | 77 | * MAX(1 << ashift, space_map_blksz), so a metaslab might use the |
b02fe35d AR |
78 | * same number of blocks after condensing. Since the goal of condensing is to |
79 | * reduce the number of IOPs required to read the space map, we only want to | |
80 | * condense when we can be sure we will reduce the number of blocks used by the | |
81 | * space map. Unfortunately, we cannot precisely compute whether or not this is | |
82 | * the case in metaslab_should_condense since we are holding ms_lock. Instead, | |
83 | * we apply the following heuristic: do not condense a spacemap unless the | |
84 | * uncondensed size consumes greater than zfs_metaslab_condense_block_threshold | |
85 | * blocks. | |
86 | */ | |
87 | int zfs_metaslab_condense_block_threshold = 4; | |
88 | ||
ac72fac3 GW |
89 | /* |
90 | * The zfs_mg_noalloc_threshold defines which metaslab groups should | |
91 | * be eligible for allocation. The value is defined as a percentage of | |
f3a7f661 | 92 | * free space. Metaslab groups that have more free space than |
ac72fac3 GW |
93 | * zfs_mg_noalloc_threshold are always eligible for allocations. Once |
94 | * a metaslab group's free space is less than or equal to the | |
95 | * zfs_mg_noalloc_threshold the allocator will avoid allocating to that | |
96 | * group unless all groups in the pool have reached zfs_mg_noalloc_threshold. | |
97 | * Once all groups in the pool reach zfs_mg_noalloc_threshold then all | |
98 | * groups are allowed to accept allocations. Gang blocks are always | |
99 | * eligible to allocate on any metaslab group. The default value of 0 means | |
100 | * no metaslab group will be excluded based on this criterion. | |
101 | */ | |
102 | int zfs_mg_noalloc_threshold = 0; | |
6d974228 | 103 | |
f3a7f661 GW |
104 | /* |
105 | * Metaslab groups are considered eligible for allocations if their | |
106 | * fragmenation metric (measured as a percentage) is less than or equal to | |
107 | * zfs_mg_fragmentation_threshold. If a metaslab group exceeds this threshold | |
108 | * then it will be skipped unless all metaslab groups within the metaslab | |
109 | * class have also crossed this threshold. | |
110 | */ | |
111 | int zfs_mg_fragmentation_threshold = 85; | |
112 | ||
113 | /* | |
114 | * Allow metaslabs to keep their active state as long as their fragmentation | |
115 | * percentage is less than or equal to zfs_metaslab_fragmentation_threshold. An | |
116 | * active metaslab that exceeds this threshold will no longer keep its active | |
117 | * status allowing better metaslabs to be selected. | |
118 | */ | |
119 | int zfs_metaslab_fragmentation_threshold = 70; | |
120 | ||
428870ff | 121 | /* |
aa7d06a9 | 122 | * When set will load all metaslabs when pool is first opened. |
428870ff | 123 | */ |
aa7d06a9 GW |
124 | int metaslab_debug_load = 0; |
125 | ||
126 | /* | |
127 | * When set will prevent metaslabs from being unloaded. | |
128 | */ | |
129 | int metaslab_debug_unload = 0; | |
428870ff | 130 | |
9babb374 BB |
131 | /* |
132 | * Minimum size which forces the dynamic allocator to change | |
428870ff | 133 | * it's allocation strategy. Once the space map cannot satisfy |
9babb374 BB |
134 | * an allocation of this size then it switches to using more |
135 | * aggressive strategy (i.e search by size rather than offset). | |
136 | */ | |
4e21fd06 | 137 | uint64_t metaslab_df_alloc_threshold = SPA_OLD_MAXBLOCKSIZE; |
9babb374 BB |
138 | |
139 | /* | |
140 | * The minimum free space, in percent, which must be available | |
141 | * in a space map to continue allocations in a first-fit fashion. | |
4e21fd06 | 142 | * Once the space map's free space drops below this level we dynamically |
9babb374 BB |
143 | * switch to using best-fit allocations. |
144 | */ | |
428870ff BB |
145 | int metaslab_df_free_pct = 4; |
146 | ||
428870ff | 147 | /* |
93cf2076 | 148 | * Percentage of all cpus that can be used by the metaslab taskq. |
428870ff | 149 | */ |
93cf2076 | 150 | int metaslab_load_pct = 50; |
428870ff BB |
151 | |
152 | /* | |
93cf2076 GW |
153 | * Determines how many txgs a metaslab may remain loaded without having any |
154 | * allocations from it. As long as a metaslab continues to be used we will | |
155 | * keep it loaded. | |
428870ff | 156 | */ |
93cf2076 | 157 | int metaslab_unload_delay = TXG_SIZE * 2; |
9babb374 | 158 | |
93cf2076 GW |
159 | /* |
160 | * Max number of metaslabs per group to preload. | |
161 | */ | |
162 | int metaslab_preload_limit = SPA_DVAS_PER_BP; | |
163 | ||
164 | /* | |
165 | * Enable/disable preloading of metaslab. | |
166 | */ | |
f3a7f661 | 167 | int metaslab_preload_enabled = B_TRUE; |
93cf2076 GW |
168 | |
169 | /* | |
f3a7f661 | 170 | * Enable/disable fragmentation weighting on metaslabs. |
93cf2076 | 171 | */ |
f3a7f661 | 172 | int metaslab_fragmentation_factor_enabled = B_TRUE; |
93cf2076 | 173 | |
f3a7f661 GW |
174 | /* |
175 | * Enable/disable lba weighting (i.e. outer tracks are given preference). | |
176 | */ | |
177 | int metaslab_lba_weighting_enabled = B_TRUE; | |
178 | ||
179 | /* | |
180 | * Enable/disable metaslab group biasing. | |
181 | */ | |
182 | int metaslab_bias_enabled = B_TRUE; | |
183 | ||
4e21fd06 | 184 | |
a1d477c2 MA |
185 | /* |
186 | * Enable/disable remapping of indirect DVAs to their concrete vdevs. | |
187 | */ | |
188 | boolean_t zfs_remap_blkptr_enable = B_TRUE; | |
189 | ||
4e21fd06 DB |
190 | /* |
191 | * Enable/disable segment-based metaslab selection. | |
192 | */ | |
193 | int zfs_metaslab_segment_weight_enabled = B_TRUE; | |
194 | ||
195 | /* | |
196 | * When using segment-based metaslab selection, we will continue | |
197 | * allocating from the active metaslab until we have exhausted | |
198 | * zfs_metaslab_switch_threshold of its buckets. | |
199 | */ | |
200 | int zfs_metaslab_switch_threshold = 2; | |
201 | ||
202 | /* | |
203 | * Internal switch to enable/disable the metaslab allocation tracing | |
204 | * facility. | |
205 | */ | |
206 | #ifdef _METASLAB_TRACING | |
207 | boolean_t metaslab_trace_enabled = B_TRUE; | |
208 | #endif | |
209 | ||
210 | /* | |
211 | * Maximum entries that the metaslab allocation tracing facility will keep | |
212 | * in a given list when running in non-debug mode. We limit the number | |
213 | * of entries in non-debug mode to prevent us from using up too much memory. | |
214 | * The limit should be sufficiently large that we don't expect any allocation | |
215 | * to every exceed this value. In debug mode, the system will panic if this | |
216 | * limit is ever reached allowing for further investigation. | |
217 | */ | |
218 | #ifdef _METASLAB_TRACING | |
219 | uint64_t metaslab_trace_max_entries = 5000; | |
220 | #endif | |
221 | ||
222 | static uint64_t metaslab_weight(metaslab_t *); | |
223 | static void metaslab_set_fragmentation(metaslab_t *); | |
d2734cce | 224 | static void metaslab_free_impl(vdev_t *, uint64_t, uint64_t, boolean_t); |
a1d477c2 | 225 | static void metaslab_check_free_impl(vdev_t *, uint64_t, uint64_t); |
4e21fd06 | 226 | |
492f64e9 PD |
227 | static void metaslab_passivate(metaslab_t *msp, uint64_t weight); |
228 | static uint64_t metaslab_weight_from_range_tree(metaslab_t *msp); | |
4e21fd06 DB |
229 | #ifdef _METASLAB_TRACING |
230 | kmem_cache_t *metaslab_alloc_trace_cache; | |
231 | #endif | |
93cf2076 | 232 | |
34dc7c2f BB |
233 | /* |
234 | * ========================================================================== | |
235 | * Metaslab classes | |
236 | * ========================================================================== | |
237 | */ | |
238 | metaslab_class_t * | |
93cf2076 | 239 | metaslab_class_create(spa_t *spa, metaslab_ops_t *ops) |
34dc7c2f BB |
240 | { |
241 | metaslab_class_t *mc; | |
242 | ||
79c76d5b | 243 | mc = kmem_zalloc(sizeof (metaslab_class_t), KM_SLEEP); |
34dc7c2f | 244 | |
428870ff | 245 | mc->mc_spa = spa; |
34dc7c2f | 246 | mc->mc_rotor = NULL; |
9babb374 | 247 | mc->mc_ops = ops; |
3dfb57a3 | 248 | mutex_init(&mc->mc_lock, NULL, MUTEX_DEFAULT, NULL); |
492f64e9 | 249 | mc->mc_alloc_slots = kmem_zalloc(spa->spa_alloc_count * |
c13060e4 | 250 | sizeof (zfs_refcount_t), KM_SLEEP); |
492f64e9 PD |
251 | mc->mc_alloc_max_slots = kmem_zalloc(spa->spa_alloc_count * |
252 | sizeof (uint64_t), KM_SLEEP); | |
253 | for (int i = 0; i < spa->spa_alloc_count; i++) | |
424fd7c3 | 254 | zfs_refcount_create_tracked(&mc->mc_alloc_slots[i]); |
34dc7c2f BB |
255 | |
256 | return (mc); | |
257 | } | |
258 | ||
259 | void | |
260 | metaslab_class_destroy(metaslab_class_t *mc) | |
261 | { | |
428870ff BB |
262 | ASSERT(mc->mc_rotor == NULL); |
263 | ASSERT(mc->mc_alloc == 0); | |
264 | ASSERT(mc->mc_deferred == 0); | |
265 | ASSERT(mc->mc_space == 0); | |
266 | ASSERT(mc->mc_dspace == 0); | |
34dc7c2f | 267 | |
492f64e9 | 268 | for (int i = 0; i < mc->mc_spa->spa_alloc_count; i++) |
424fd7c3 | 269 | zfs_refcount_destroy(&mc->mc_alloc_slots[i]); |
492f64e9 | 270 | kmem_free(mc->mc_alloc_slots, mc->mc_spa->spa_alloc_count * |
c13060e4 | 271 | sizeof (zfs_refcount_t)); |
492f64e9 PD |
272 | kmem_free(mc->mc_alloc_max_slots, mc->mc_spa->spa_alloc_count * |
273 | sizeof (uint64_t)); | |
3dfb57a3 | 274 | mutex_destroy(&mc->mc_lock); |
34dc7c2f BB |
275 | kmem_free(mc, sizeof (metaslab_class_t)); |
276 | } | |
277 | ||
428870ff BB |
278 | int |
279 | metaslab_class_validate(metaslab_class_t *mc) | |
34dc7c2f | 280 | { |
428870ff BB |
281 | metaslab_group_t *mg; |
282 | vdev_t *vd; | |
34dc7c2f | 283 | |
428870ff BB |
284 | /* |
285 | * Must hold one of the spa_config locks. | |
286 | */ | |
287 | ASSERT(spa_config_held(mc->mc_spa, SCL_ALL, RW_READER) || | |
288 | spa_config_held(mc->mc_spa, SCL_ALL, RW_WRITER)); | |
34dc7c2f | 289 | |
428870ff BB |
290 | if ((mg = mc->mc_rotor) == NULL) |
291 | return (0); | |
292 | ||
293 | do { | |
294 | vd = mg->mg_vd; | |
295 | ASSERT(vd->vdev_mg != NULL); | |
296 | ASSERT3P(vd->vdev_top, ==, vd); | |
297 | ASSERT3P(mg->mg_class, ==, mc); | |
298 | ASSERT3P(vd->vdev_ops, !=, &vdev_hole_ops); | |
299 | } while ((mg = mg->mg_next) != mc->mc_rotor); | |
300 | ||
301 | return (0); | |
34dc7c2f BB |
302 | } |
303 | ||
cc99f275 | 304 | static void |
428870ff BB |
305 | metaslab_class_space_update(metaslab_class_t *mc, int64_t alloc_delta, |
306 | int64_t defer_delta, int64_t space_delta, int64_t dspace_delta) | |
34dc7c2f | 307 | { |
428870ff BB |
308 | atomic_add_64(&mc->mc_alloc, alloc_delta); |
309 | atomic_add_64(&mc->mc_deferred, defer_delta); | |
310 | atomic_add_64(&mc->mc_space, space_delta); | |
311 | atomic_add_64(&mc->mc_dspace, dspace_delta); | |
312 | } | |
34dc7c2f | 313 | |
428870ff BB |
314 | uint64_t |
315 | metaslab_class_get_alloc(metaslab_class_t *mc) | |
316 | { | |
317 | return (mc->mc_alloc); | |
318 | } | |
34dc7c2f | 319 | |
428870ff BB |
320 | uint64_t |
321 | metaslab_class_get_deferred(metaslab_class_t *mc) | |
322 | { | |
323 | return (mc->mc_deferred); | |
324 | } | |
34dc7c2f | 325 | |
428870ff BB |
326 | uint64_t |
327 | metaslab_class_get_space(metaslab_class_t *mc) | |
328 | { | |
329 | return (mc->mc_space); | |
330 | } | |
34dc7c2f | 331 | |
428870ff BB |
332 | uint64_t |
333 | metaslab_class_get_dspace(metaslab_class_t *mc) | |
334 | { | |
335 | return (spa_deflate(mc->mc_spa) ? mc->mc_dspace : mc->mc_space); | |
34dc7c2f BB |
336 | } |
337 | ||
f3a7f661 GW |
338 | void |
339 | metaslab_class_histogram_verify(metaslab_class_t *mc) | |
340 | { | |
cc99f275 DB |
341 | spa_t *spa = mc->mc_spa; |
342 | vdev_t *rvd = spa->spa_root_vdev; | |
f3a7f661 | 343 | uint64_t *mc_hist; |
1c27024e | 344 | int i; |
f3a7f661 GW |
345 | |
346 | if ((zfs_flags & ZFS_DEBUG_HISTOGRAM_VERIFY) == 0) | |
347 | return; | |
348 | ||
349 | mc_hist = kmem_zalloc(sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE, | |
79c76d5b | 350 | KM_SLEEP); |
f3a7f661 | 351 | |
1c27024e | 352 | for (int c = 0; c < rvd->vdev_children; c++) { |
f3a7f661 GW |
353 | vdev_t *tvd = rvd->vdev_child[c]; |
354 | metaslab_group_t *mg = tvd->vdev_mg; | |
355 | ||
356 | /* | |
357 | * Skip any holes, uninitialized top-levels, or | |
358 | * vdevs that are not in this metalab class. | |
359 | */ | |
a1d477c2 | 360 | if (!vdev_is_concrete(tvd) || tvd->vdev_ms_shift == 0 || |
f3a7f661 GW |
361 | mg->mg_class != mc) { |
362 | continue; | |
363 | } | |
364 | ||
365 | for (i = 0; i < RANGE_TREE_HISTOGRAM_SIZE; i++) | |
366 | mc_hist[i] += mg->mg_histogram[i]; | |
367 | } | |
368 | ||
369 | for (i = 0; i < RANGE_TREE_HISTOGRAM_SIZE; i++) | |
370 | VERIFY3U(mc_hist[i], ==, mc->mc_histogram[i]); | |
371 | ||
372 | kmem_free(mc_hist, sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE); | |
373 | } | |
374 | ||
375 | /* | |
376 | * Calculate the metaslab class's fragmentation metric. The metric | |
377 | * is weighted based on the space contribution of each metaslab group. | |
378 | * The return value will be a number between 0 and 100 (inclusive), or | |
379 | * ZFS_FRAG_INVALID if the metric has not been set. See comment above the | |
380 | * zfs_frag_table for more information about the metric. | |
381 | */ | |
382 | uint64_t | |
383 | metaslab_class_fragmentation(metaslab_class_t *mc) | |
384 | { | |
385 | vdev_t *rvd = mc->mc_spa->spa_root_vdev; | |
386 | uint64_t fragmentation = 0; | |
f3a7f661 GW |
387 | |
388 | spa_config_enter(mc->mc_spa, SCL_VDEV, FTAG, RW_READER); | |
389 | ||
1c27024e | 390 | for (int c = 0; c < rvd->vdev_children; c++) { |
f3a7f661 GW |
391 | vdev_t *tvd = rvd->vdev_child[c]; |
392 | metaslab_group_t *mg = tvd->vdev_mg; | |
393 | ||
394 | /* | |
a1d477c2 MA |
395 | * Skip any holes, uninitialized top-levels, |
396 | * or vdevs that are not in this metalab class. | |
f3a7f661 | 397 | */ |
a1d477c2 | 398 | if (!vdev_is_concrete(tvd) || tvd->vdev_ms_shift == 0 || |
f3a7f661 GW |
399 | mg->mg_class != mc) { |
400 | continue; | |
401 | } | |
402 | ||
403 | /* | |
404 | * If a metaslab group does not contain a fragmentation | |
405 | * metric then just bail out. | |
406 | */ | |
407 | if (mg->mg_fragmentation == ZFS_FRAG_INVALID) { | |
408 | spa_config_exit(mc->mc_spa, SCL_VDEV, FTAG); | |
409 | return (ZFS_FRAG_INVALID); | |
410 | } | |
411 | ||
412 | /* | |
413 | * Determine how much this metaslab_group is contributing | |
414 | * to the overall pool fragmentation metric. | |
415 | */ | |
416 | fragmentation += mg->mg_fragmentation * | |
417 | metaslab_group_get_space(mg); | |
418 | } | |
419 | fragmentation /= metaslab_class_get_space(mc); | |
420 | ||
421 | ASSERT3U(fragmentation, <=, 100); | |
422 | spa_config_exit(mc->mc_spa, SCL_VDEV, FTAG); | |
423 | return (fragmentation); | |
424 | } | |
425 | ||
426 | /* | |
427 | * Calculate the amount of expandable space that is available in | |
428 | * this metaslab class. If a device is expanded then its expandable | |
429 | * space will be the amount of allocatable space that is currently not | |
430 | * part of this metaslab class. | |
431 | */ | |
432 | uint64_t | |
433 | metaslab_class_expandable_space(metaslab_class_t *mc) | |
434 | { | |
435 | vdev_t *rvd = mc->mc_spa->spa_root_vdev; | |
436 | uint64_t space = 0; | |
f3a7f661 GW |
437 | |
438 | spa_config_enter(mc->mc_spa, SCL_VDEV, FTAG, RW_READER); | |
1c27024e | 439 | for (int c = 0; c < rvd->vdev_children; c++) { |
f3a7f661 GW |
440 | vdev_t *tvd = rvd->vdev_child[c]; |
441 | metaslab_group_t *mg = tvd->vdev_mg; | |
442 | ||
a1d477c2 | 443 | if (!vdev_is_concrete(tvd) || tvd->vdev_ms_shift == 0 || |
f3a7f661 GW |
444 | mg->mg_class != mc) { |
445 | continue; | |
446 | } | |
447 | ||
0f676dc2 GM |
448 | /* |
449 | * Calculate if we have enough space to add additional | |
450 | * metaslabs. We report the expandable space in terms | |
451 | * of the metaslab size since that's the unit of expansion. | |
452 | */ | |
453 | space += P2ALIGN(tvd->vdev_max_asize - tvd->vdev_asize, | |
454 | 1ULL << tvd->vdev_ms_shift); | |
f3a7f661 GW |
455 | } |
456 | spa_config_exit(mc->mc_spa, SCL_VDEV, FTAG); | |
457 | return (space); | |
458 | } | |
459 | ||
34dc7c2f BB |
460 | static int |
461 | metaslab_compare(const void *x1, const void *x2) | |
462 | { | |
ee36c709 GN |
463 | const metaslab_t *m1 = (const metaslab_t *)x1; |
464 | const metaslab_t *m2 = (const metaslab_t *)x2; | |
34dc7c2f | 465 | |
492f64e9 PD |
466 | int sort1 = 0; |
467 | int sort2 = 0; | |
468 | if (m1->ms_allocator != -1 && m1->ms_primary) | |
469 | sort1 = 1; | |
470 | else if (m1->ms_allocator != -1 && !m1->ms_primary) | |
471 | sort1 = 2; | |
472 | if (m2->ms_allocator != -1 && m2->ms_primary) | |
473 | sort2 = 1; | |
474 | else if (m2->ms_allocator != -1 && !m2->ms_primary) | |
475 | sort2 = 2; | |
476 | ||
477 | /* | |
478 | * Sort inactive metaslabs first, then primaries, then secondaries. When | |
479 | * selecting a metaslab to allocate from, an allocator first tries its | |
480 | * primary, then secondary active metaslab. If it doesn't have active | |
481 | * metaslabs, or can't allocate from them, it searches for an inactive | |
482 | * metaslab to activate. If it can't find a suitable one, it will steal | |
483 | * a primary or secondary metaslab from another allocator. | |
484 | */ | |
485 | if (sort1 < sort2) | |
486 | return (-1); | |
487 | if (sort1 > sort2) | |
488 | return (1); | |
489 | ||
ee36c709 GN |
490 | int cmp = AVL_CMP(m2->ms_weight, m1->ms_weight); |
491 | if (likely(cmp)) | |
492 | return (cmp); | |
34dc7c2f | 493 | |
ee36c709 | 494 | IMPLY(AVL_CMP(m1->ms_start, m2->ms_start) == 0, m1 == m2); |
34dc7c2f | 495 | |
ee36c709 | 496 | return (AVL_CMP(m1->ms_start, m2->ms_start)); |
34dc7c2f BB |
497 | } |
498 | ||
425d3237 SD |
499 | uint64_t |
500 | metaslab_allocated_space(metaslab_t *msp) | |
501 | { | |
502 | return (msp->ms_allocated_space); | |
503 | } | |
504 | ||
4e21fd06 DB |
505 | /* |
506 | * Verify that the space accounting on disk matches the in-core range_trees. | |
507 | */ | |
425d3237 | 508 | static void |
4e21fd06 DB |
509 | metaslab_verify_space(metaslab_t *msp, uint64_t txg) |
510 | { | |
511 | spa_t *spa = msp->ms_group->mg_vd->vdev_spa; | |
425d3237 | 512 | uint64_t allocating = 0; |
4e21fd06 | 513 | uint64_t sm_free_space, msp_free_space; |
4e21fd06 DB |
514 | |
515 | ASSERT(MUTEX_HELD(&msp->ms_lock)); | |
425d3237 | 516 | ASSERT(!msp->ms_condensing); |
4e21fd06 DB |
517 | |
518 | if ((zfs_flags & ZFS_DEBUG_METASLAB_VERIFY) == 0) | |
519 | return; | |
520 | ||
521 | /* | |
522 | * We can only verify the metaslab space when we're called | |
425d3237 SD |
523 | * from syncing context with a loaded metaslab that has an |
524 | * allocated space map. Calling this in non-syncing context | |
525 | * does not provide a consistent view of the metaslab since | |
526 | * we're performing allocations in the future. | |
4e21fd06 DB |
527 | */ |
528 | if (txg != spa_syncing_txg(spa) || msp->ms_sm == NULL || | |
529 | !msp->ms_loaded) | |
530 | return; | |
531 | ||
425d3237 SD |
532 | /* |
533 | * Even though the smp_alloc field can get negative (e.g. | |
534 | * see vdev_checkpoint_sm), that should never be the case | |
535 | * when it come's to a metaslab's space map. | |
536 | */ | |
537 | ASSERT3S(space_map_allocated(msp->ms_sm), >=, 0); | |
538 | ||
539 | sm_free_space = msp->ms_size - metaslab_allocated_space(msp); | |
4e21fd06 DB |
540 | |
541 | /* | |
425d3237 SD |
542 | * Account for future allocations since we would have |
543 | * already deducted that space from the ms_allocatable. | |
4e21fd06 | 544 | */ |
1c27024e | 545 | for (int t = 0; t < TXG_CONCURRENT_STATES; t++) { |
425d3237 | 546 | allocating += |
d2734cce | 547 | range_tree_space(msp->ms_allocating[(txg + t) & TXG_MASK]); |
4e21fd06 | 548 | } |
4e21fd06 | 549 | |
425d3237 SD |
550 | ASSERT3U(msp->ms_deferspace, ==, |
551 | range_tree_space(msp->ms_defer[0]) + | |
552 | range_tree_space(msp->ms_defer[1])); | |
553 | ||
554 | msp_free_space = range_tree_space(msp->ms_allocatable) + allocating + | |
d2734cce | 555 | msp->ms_deferspace + range_tree_space(msp->ms_freed); |
4e21fd06 DB |
556 | |
557 | VERIFY3U(sm_free_space, ==, msp_free_space); | |
558 | } | |
559 | ||
560 | /* | |
561 | * ========================================================================== | |
562 | * Metaslab groups | |
563 | * ========================================================================== | |
564 | */ | |
ac72fac3 GW |
565 | /* |
566 | * Update the allocatable flag and the metaslab group's capacity. | |
567 | * The allocatable flag is set to true if the capacity is below | |
3dfb57a3 DB |
568 | * the zfs_mg_noalloc_threshold or has a fragmentation value that is |
569 | * greater than zfs_mg_fragmentation_threshold. If a metaslab group | |
570 | * transitions from allocatable to non-allocatable or vice versa then the | |
571 | * metaslab group's class is updated to reflect the transition. | |
ac72fac3 GW |
572 | */ |
573 | static void | |
574 | metaslab_group_alloc_update(metaslab_group_t *mg) | |
575 | { | |
576 | vdev_t *vd = mg->mg_vd; | |
577 | metaslab_class_t *mc = mg->mg_class; | |
578 | vdev_stat_t *vs = &vd->vdev_stat; | |
579 | boolean_t was_allocatable; | |
3dfb57a3 | 580 | boolean_t was_initialized; |
ac72fac3 GW |
581 | |
582 | ASSERT(vd == vd->vdev_top); | |
a1d477c2 MA |
583 | ASSERT3U(spa_config_held(mc->mc_spa, SCL_ALLOC, RW_READER), ==, |
584 | SCL_ALLOC); | |
ac72fac3 GW |
585 | |
586 | mutex_enter(&mg->mg_lock); | |
587 | was_allocatable = mg->mg_allocatable; | |
3dfb57a3 | 588 | was_initialized = mg->mg_initialized; |
ac72fac3 GW |
589 | |
590 | mg->mg_free_capacity = ((vs->vs_space - vs->vs_alloc) * 100) / | |
591 | (vs->vs_space + 1); | |
592 | ||
3dfb57a3 DB |
593 | mutex_enter(&mc->mc_lock); |
594 | ||
595 | /* | |
596 | * If the metaslab group was just added then it won't | |
597 | * have any space until we finish syncing out this txg. | |
598 | * At that point we will consider it initialized and available | |
599 | * for allocations. We also don't consider non-activated | |
600 | * metaslab groups (e.g. vdevs that are in the middle of being removed) | |
601 | * to be initialized, because they can't be used for allocation. | |
602 | */ | |
603 | mg->mg_initialized = metaslab_group_initialized(mg); | |
604 | if (!was_initialized && mg->mg_initialized) { | |
605 | mc->mc_groups++; | |
606 | } else if (was_initialized && !mg->mg_initialized) { | |
607 | ASSERT3U(mc->mc_groups, >, 0); | |
608 | mc->mc_groups--; | |
609 | } | |
610 | if (mg->mg_initialized) | |
611 | mg->mg_no_free_space = B_FALSE; | |
612 | ||
f3a7f661 GW |
613 | /* |
614 | * A metaslab group is considered allocatable if it has plenty | |
615 | * of free space or is not heavily fragmented. We only take | |
616 | * fragmentation into account if the metaslab group has a valid | |
617 | * fragmentation metric (i.e. a value between 0 and 100). | |
618 | */ | |
3dfb57a3 DB |
619 | mg->mg_allocatable = (mg->mg_activation_count > 0 && |
620 | mg->mg_free_capacity > zfs_mg_noalloc_threshold && | |
f3a7f661 GW |
621 | (mg->mg_fragmentation == ZFS_FRAG_INVALID || |
622 | mg->mg_fragmentation <= zfs_mg_fragmentation_threshold)); | |
ac72fac3 GW |
623 | |
624 | /* | |
625 | * The mc_alloc_groups maintains a count of the number of | |
626 | * groups in this metaslab class that are still above the | |
627 | * zfs_mg_noalloc_threshold. This is used by the allocating | |
628 | * threads to determine if they should avoid allocations to | |
629 | * a given group. The allocator will avoid allocations to a group | |
630 | * if that group has reached or is below the zfs_mg_noalloc_threshold | |
631 | * and there are still other groups that are above the threshold. | |
632 | * When a group transitions from allocatable to non-allocatable or | |
633 | * vice versa we update the metaslab class to reflect that change. | |
634 | * When the mc_alloc_groups value drops to 0 that means that all | |
635 | * groups have reached the zfs_mg_noalloc_threshold making all groups | |
636 | * eligible for allocations. This effectively means that all devices | |
637 | * are balanced again. | |
638 | */ | |
639 | if (was_allocatable && !mg->mg_allocatable) | |
640 | mc->mc_alloc_groups--; | |
641 | else if (!was_allocatable && mg->mg_allocatable) | |
642 | mc->mc_alloc_groups++; | |
3dfb57a3 | 643 | mutex_exit(&mc->mc_lock); |
f3a7f661 | 644 | |
ac72fac3 GW |
645 | mutex_exit(&mg->mg_lock); |
646 | } | |
647 | ||
34dc7c2f | 648 | metaslab_group_t * |
492f64e9 | 649 | metaslab_group_create(metaslab_class_t *mc, vdev_t *vd, int allocators) |
34dc7c2f BB |
650 | { |
651 | metaslab_group_t *mg; | |
652 | ||
79c76d5b | 653 | mg = kmem_zalloc(sizeof (metaslab_group_t), KM_SLEEP); |
34dc7c2f | 654 | mutex_init(&mg->mg_lock, NULL, MUTEX_DEFAULT, NULL); |
619f0976 GW |
655 | mutex_init(&mg->mg_ms_initialize_lock, NULL, MUTEX_DEFAULT, NULL); |
656 | cv_init(&mg->mg_ms_initialize_cv, NULL, CV_DEFAULT, NULL); | |
492f64e9 PD |
657 | mg->mg_primaries = kmem_zalloc(allocators * sizeof (metaslab_t *), |
658 | KM_SLEEP); | |
659 | mg->mg_secondaries = kmem_zalloc(allocators * sizeof (metaslab_t *), | |
660 | KM_SLEEP); | |
34dc7c2f BB |
661 | avl_create(&mg->mg_metaslab_tree, metaslab_compare, |
662 | sizeof (metaslab_t), offsetof(struct metaslab, ms_group_node)); | |
34dc7c2f | 663 | mg->mg_vd = vd; |
428870ff BB |
664 | mg->mg_class = mc; |
665 | mg->mg_activation_count = 0; | |
3dfb57a3 DB |
666 | mg->mg_initialized = B_FALSE; |
667 | mg->mg_no_free_space = B_TRUE; | |
492f64e9 PD |
668 | mg->mg_allocators = allocators; |
669 | ||
c13060e4 TS |
670 | mg->mg_alloc_queue_depth = kmem_zalloc(allocators * |
671 | sizeof (zfs_refcount_t), KM_SLEEP); | |
492f64e9 PD |
672 | mg->mg_cur_max_alloc_queue_depth = kmem_zalloc(allocators * |
673 | sizeof (uint64_t), KM_SLEEP); | |
674 | for (int i = 0; i < allocators; i++) { | |
424fd7c3 | 675 | zfs_refcount_create_tracked(&mg->mg_alloc_queue_depth[i]); |
492f64e9 PD |
676 | mg->mg_cur_max_alloc_queue_depth[i] = 0; |
677 | } | |
34dc7c2f | 678 | |
3c51c5cb | 679 | mg->mg_taskq = taskq_create("metaslab_group_taskq", metaslab_load_pct, |
1229323d | 680 | maxclsyspri, 10, INT_MAX, TASKQ_THREADS_CPU_PCT | TASKQ_DYNAMIC); |
93cf2076 | 681 | |
34dc7c2f BB |
682 | return (mg); |
683 | } | |
684 | ||
685 | void | |
686 | metaslab_group_destroy(metaslab_group_t *mg) | |
687 | { | |
428870ff BB |
688 | ASSERT(mg->mg_prev == NULL); |
689 | ASSERT(mg->mg_next == NULL); | |
690 | /* | |
691 | * We may have gone below zero with the activation count | |
692 | * either because we never activated in the first place or | |
693 | * because we're done, and possibly removing the vdev. | |
694 | */ | |
695 | ASSERT(mg->mg_activation_count <= 0); | |
696 | ||
3c51c5cb | 697 | taskq_destroy(mg->mg_taskq); |
34dc7c2f | 698 | avl_destroy(&mg->mg_metaslab_tree); |
492f64e9 PD |
699 | kmem_free(mg->mg_primaries, mg->mg_allocators * sizeof (metaslab_t *)); |
700 | kmem_free(mg->mg_secondaries, mg->mg_allocators * | |
701 | sizeof (metaslab_t *)); | |
34dc7c2f | 702 | mutex_destroy(&mg->mg_lock); |
619f0976 GW |
703 | mutex_destroy(&mg->mg_ms_initialize_lock); |
704 | cv_destroy(&mg->mg_ms_initialize_cv); | |
492f64e9 PD |
705 | |
706 | for (int i = 0; i < mg->mg_allocators; i++) { | |
424fd7c3 | 707 | zfs_refcount_destroy(&mg->mg_alloc_queue_depth[i]); |
492f64e9 PD |
708 | mg->mg_cur_max_alloc_queue_depth[i] = 0; |
709 | } | |
710 | kmem_free(mg->mg_alloc_queue_depth, mg->mg_allocators * | |
c13060e4 | 711 | sizeof (zfs_refcount_t)); |
492f64e9 PD |
712 | kmem_free(mg->mg_cur_max_alloc_queue_depth, mg->mg_allocators * |
713 | sizeof (uint64_t)); | |
714 | ||
34dc7c2f BB |
715 | kmem_free(mg, sizeof (metaslab_group_t)); |
716 | } | |
717 | ||
428870ff BB |
718 | void |
719 | metaslab_group_activate(metaslab_group_t *mg) | |
720 | { | |
721 | metaslab_class_t *mc = mg->mg_class; | |
722 | metaslab_group_t *mgprev, *mgnext; | |
723 | ||
a1d477c2 | 724 | ASSERT3U(spa_config_held(mc->mc_spa, SCL_ALLOC, RW_WRITER), !=, 0); |
428870ff BB |
725 | |
726 | ASSERT(mc->mc_rotor != mg); | |
727 | ASSERT(mg->mg_prev == NULL); | |
728 | ASSERT(mg->mg_next == NULL); | |
729 | ASSERT(mg->mg_activation_count <= 0); | |
730 | ||
731 | if (++mg->mg_activation_count <= 0) | |
732 | return; | |
733 | ||
734 | mg->mg_aliquot = metaslab_aliquot * MAX(1, mg->mg_vd->vdev_children); | |
ac72fac3 | 735 | metaslab_group_alloc_update(mg); |
428870ff BB |
736 | |
737 | if ((mgprev = mc->mc_rotor) == NULL) { | |
738 | mg->mg_prev = mg; | |
739 | mg->mg_next = mg; | |
740 | } else { | |
741 | mgnext = mgprev->mg_next; | |
742 | mg->mg_prev = mgprev; | |
743 | mg->mg_next = mgnext; | |
744 | mgprev->mg_next = mg; | |
745 | mgnext->mg_prev = mg; | |
746 | } | |
747 | mc->mc_rotor = mg; | |
748 | } | |
749 | ||
a1d477c2 MA |
750 | /* |
751 | * Passivate a metaslab group and remove it from the allocation rotor. | |
752 | * Callers must hold both the SCL_ALLOC and SCL_ZIO lock prior to passivating | |
753 | * a metaslab group. This function will momentarily drop spa_config_locks | |
754 | * that are lower than the SCL_ALLOC lock (see comment below). | |
755 | */ | |
428870ff BB |
756 | void |
757 | metaslab_group_passivate(metaslab_group_t *mg) | |
758 | { | |
759 | metaslab_class_t *mc = mg->mg_class; | |
a1d477c2 | 760 | spa_t *spa = mc->mc_spa; |
428870ff | 761 | metaslab_group_t *mgprev, *mgnext; |
a1d477c2 | 762 | int locks = spa_config_held(spa, SCL_ALL, RW_WRITER); |
428870ff | 763 | |
a1d477c2 MA |
764 | ASSERT3U(spa_config_held(spa, SCL_ALLOC | SCL_ZIO, RW_WRITER), ==, |
765 | (SCL_ALLOC | SCL_ZIO)); | |
428870ff BB |
766 | |
767 | if (--mg->mg_activation_count != 0) { | |
768 | ASSERT(mc->mc_rotor != mg); | |
769 | ASSERT(mg->mg_prev == NULL); | |
770 | ASSERT(mg->mg_next == NULL); | |
771 | ASSERT(mg->mg_activation_count < 0); | |
772 | return; | |
773 | } | |
774 | ||
a1d477c2 MA |
775 | /* |
776 | * The spa_config_lock is an array of rwlocks, ordered as | |
777 | * follows (from highest to lowest): | |
778 | * SCL_CONFIG > SCL_STATE > SCL_L2ARC > SCL_ALLOC > | |
779 | * SCL_ZIO > SCL_FREE > SCL_VDEV | |
780 | * (For more information about the spa_config_lock see spa_misc.c) | |
781 | * The higher the lock, the broader its coverage. When we passivate | |
782 | * a metaslab group, we must hold both the SCL_ALLOC and the SCL_ZIO | |
783 | * config locks. However, the metaslab group's taskq might be trying | |
784 | * to preload metaslabs so we must drop the SCL_ZIO lock and any | |
785 | * lower locks to allow the I/O to complete. At a minimum, | |
786 | * we continue to hold the SCL_ALLOC lock, which prevents any future | |
787 | * allocations from taking place and any changes to the vdev tree. | |
788 | */ | |
789 | spa_config_exit(spa, locks & ~(SCL_ZIO - 1), spa); | |
c5528b9b | 790 | taskq_wait_outstanding(mg->mg_taskq, 0); |
a1d477c2 | 791 | spa_config_enter(spa, locks & ~(SCL_ZIO - 1), spa, RW_WRITER); |
f3a7f661 | 792 | metaslab_group_alloc_update(mg); |
492f64e9 PD |
793 | for (int i = 0; i < mg->mg_allocators; i++) { |
794 | metaslab_t *msp = mg->mg_primaries[i]; | |
795 | if (msp != NULL) { | |
796 | mutex_enter(&msp->ms_lock); | |
797 | metaslab_passivate(msp, | |
798 | metaslab_weight_from_range_tree(msp)); | |
799 | mutex_exit(&msp->ms_lock); | |
800 | } | |
801 | msp = mg->mg_secondaries[i]; | |
802 | if (msp != NULL) { | |
803 | mutex_enter(&msp->ms_lock); | |
804 | metaslab_passivate(msp, | |
805 | metaslab_weight_from_range_tree(msp)); | |
806 | mutex_exit(&msp->ms_lock); | |
807 | } | |
808 | } | |
93cf2076 | 809 | |
428870ff BB |
810 | mgprev = mg->mg_prev; |
811 | mgnext = mg->mg_next; | |
812 | ||
813 | if (mg == mgnext) { | |
814 | mc->mc_rotor = NULL; | |
815 | } else { | |
816 | mc->mc_rotor = mgnext; | |
817 | mgprev->mg_next = mgnext; | |
818 | mgnext->mg_prev = mgprev; | |
819 | } | |
820 | ||
821 | mg->mg_prev = NULL; | |
822 | mg->mg_next = NULL; | |
823 | } | |
824 | ||
3dfb57a3 DB |
825 | boolean_t |
826 | metaslab_group_initialized(metaslab_group_t *mg) | |
827 | { | |
828 | vdev_t *vd = mg->mg_vd; | |
829 | vdev_stat_t *vs = &vd->vdev_stat; | |
830 | ||
831 | return (vs->vs_space != 0 && mg->mg_activation_count > 0); | |
832 | } | |
833 | ||
f3a7f661 GW |
834 | uint64_t |
835 | metaslab_group_get_space(metaslab_group_t *mg) | |
836 | { | |
837 | return ((1ULL << mg->mg_vd->vdev_ms_shift) * mg->mg_vd->vdev_ms_count); | |
838 | } | |
839 | ||
840 | void | |
841 | metaslab_group_histogram_verify(metaslab_group_t *mg) | |
842 | { | |
843 | uint64_t *mg_hist; | |
844 | vdev_t *vd = mg->mg_vd; | |
845 | uint64_t ashift = vd->vdev_ashift; | |
1c27024e | 846 | int i; |
f3a7f661 GW |
847 | |
848 | if ((zfs_flags & ZFS_DEBUG_HISTOGRAM_VERIFY) == 0) | |
849 | return; | |
850 | ||
851 | mg_hist = kmem_zalloc(sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE, | |
79c76d5b | 852 | KM_SLEEP); |
f3a7f661 GW |
853 | |
854 | ASSERT3U(RANGE_TREE_HISTOGRAM_SIZE, >=, | |
855 | SPACE_MAP_HISTOGRAM_SIZE + ashift); | |
856 | ||
1c27024e | 857 | for (int m = 0; m < vd->vdev_ms_count; m++) { |
f3a7f661 | 858 | metaslab_t *msp = vd->vdev_ms[m]; |
928e8ad4 | 859 | ASSERT(msp != NULL); |
f3a7f661 | 860 | |
cc99f275 DB |
861 | /* skip if not active or not a member */ |
862 | if (msp->ms_sm == NULL || msp->ms_group != mg) | |
f3a7f661 GW |
863 | continue; |
864 | ||
865 | for (i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) | |
866 | mg_hist[i + ashift] += | |
867 | msp->ms_sm->sm_phys->smp_histogram[i]; | |
868 | } | |
869 | ||
870 | for (i = 0; i < RANGE_TREE_HISTOGRAM_SIZE; i ++) | |
871 | VERIFY3U(mg_hist[i], ==, mg->mg_histogram[i]); | |
872 | ||
873 | kmem_free(mg_hist, sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE); | |
874 | } | |
875 | ||
34dc7c2f | 876 | static void |
f3a7f661 | 877 | metaslab_group_histogram_add(metaslab_group_t *mg, metaslab_t *msp) |
34dc7c2f | 878 | { |
f3a7f661 GW |
879 | metaslab_class_t *mc = mg->mg_class; |
880 | uint64_t ashift = mg->mg_vd->vdev_ashift; | |
f3a7f661 GW |
881 | |
882 | ASSERT(MUTEX_HELD(&msp->ms_lock)); | |
883 | if (msp->ms_sm == NULL) | |
884 | return; | |
885 | ||
34dc7c2f | 886 | mutex_enter(&mg->mg_lock); |
1c27024e | 887 | for (int i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) { |
f3a7f661 GW |
888 | mg->mg_histogram[i + ashift] += |
889 | msp->ms_sm->sm_phys->smp_histogram[i]; | |
890 | mc->mc_histogram[i + ashift] += | |
891 | msp->ms_sm->sm_phys->smp_histogram[i]; | |
892 | } | |
893 | mutex_exit(&mg->mg_lock); | |
894 | } | |
895 | ||
896 | void | |
897 | metaslab_group_histogram_remove(metaslab_group_t *mg, metaslab_t *msp) | |
898 | { | |
899 | metaslab_class_t *mc = mg->mg_class; | |
900 | uint64_t ashift = mg->mg_vd->vdev_ashift; | |
f3a7f661 GW |
901 | |
902 | ASSERT(MUTEX_HELD(&msp->ms_lock)); | |
903 | if (msp->ms_sm == NULL) | |
904 | return; | |
905 | ||
906 | mutex_enter(&mg->mg_lock); | |
1c27024e | 907 | for (int i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) { |
f3a7f661 GW |
908 | ASSERT3U(mg->mg_histogram[i + ashift], >=, |
909 | msp->ms_sm->sm_phys->smp_histogram[i]); | |
910 | ASSERT3U(mc->mc_histogram[i + ashift], >=, | |
911 | msp->ms_sm->sm_phys->smp_histogram[i]); | |
912 | ||
913 | mg->mg_histogram[i + ashift] -= | |
914 | msp->ms_sm->sm_phys->smp_histogram[i]; | |
915 | mc->mc_histogram[i + ashift] -= | |
916 | msp->ms_sm->sm_phys->smp_histogram[i]; | |
917 | } | |
918 | mutex_exit(&mg->mg_lock); | |
919 | } | |
920 | ||
921 | static void | |
922 | metaslab_group_add(metaslab_group_t *mg, metaslab_t *msp) | |
923 | { | |
34dc7c2f | 924 | ASSERT(msp->ms_group == NULL); |
f3a7f661 | 925 | mutex_enter(&mg->mg_lock); |
34dc7c2f BB |
926 | msp->ms_group = mg; |
927 | msp->ms_weight = 0; | |
928 | avl_add(&mg->mg_metaslab_tree, msp); | |
929 | mutex_exit(&mg->mg_lock); | |
f3a7f661 GW |
930 | |
931 | mutex_enter(&msp->ms_lock); | |
932 | metaslab_group_histogram_add(mg, msp); | |
933 | mutex_exit(&msp->ms_lock); | |
34dc7c2f BB |
934 | } |
935 | ||
936 | static void | |
937 | metaslab_group_remove(metaslab_group_t *mg, metaslab_t *msp) | |
938 | { | |
f3a7f661 GW |
939 | mutex_enter(&msp->ms_lock); |
940 | metaslab_group_histogram_remove(mg, msp); | |
941 | mutex_exit(&msp->ms_lock); | |
942 | ||
34dc7c2f BB |
943 | mutex_enter(&mg->mg_lock); |
944 | ASSERT(msp->ms_group == mg); | |
945 | avl_remove(&mg->mg_metaslab_tree, msp); | |
946 | msp->ms_group = NULL; | |
947 | mutex_exit(&mg->mg_lock); | |
948 | } | |
949 | ||
492f64e9 PD |
950 | static void |
951 | metaslab_group_sort_impl(metaslab_group_t *mg, metaslab_t *msp, uint64_t weight) | |
952 | { | |
953 | ASSERT(MUTEX_HELD(&mg->mg_lock)); | |
954 | ASSERT(msp->ms_group == mg); | |
955 | avl_remove(&mg->mg_metaslab_tree, msp); | |
956 | msp->ms_weight = weight; | |
957 | avl_add(&mg->mg_metaslab_tree, msp); | |
958 | ||
959 | } | |
960 | ||
34dc7c2f BB |
961 | static void |
962 | metaslab_group_sort(metaslab_group_t *mg, metaslab_t *msp, uint64_t weight) | |
963 | { | |
964 | /* | |
965 | * Although in principle the weight can be any value, in | |
f3a7f661 | 966 | * practice we do not use values in the range [1, 511]. |
34dc7c2f | 967 | */ |
f3a7f661 | 968 | ASSERT(weight >= SPA_MINBLOCKSIZE || weight == 0); |
34dc7c2f BB |
969 | ASSERT(MUTEX_HELD(&msp->ms_lock)); |
970 | ||
971 | mutex_enter(&mg->mg_lock); | |
492f64e9 | 972 | metaslab_group_sort_impl(mg, msp, weight); |
34dc7c2f BB |
973 | mutex_exit(&mg->mg_lock); |
974 | } | |
975 | ||
f3a7f661 GW |
976 | /* |
977 | * Calculate the fragmentation for a given metaslab group. We can use | |
978 | * a simple average here since all metaslabs within the group must have | |
979 | * the same size. The return value will be a value between 0 and 100 | |
980 | * (inclusive), or ZFS_FRAG_INVALID if less than half of the metaslab in this | |
981 | * group have a fragmentation metric. | |
982 | */ | |
983 | uint64_t | |
984 | metaslab_group_fragmentation(metaslab_group_t *mg) | |
985 | { | |
986 | vdev_t *vd = mg->mg_vd; | |
987 | uint64_t fragmentation = 0; | |
988 | uint64_t valid_ms = 0; | |
f3a7f661 | 989 | |
1c27024e | 990 | for (int m = 0; m < vd->vdev_ms_count; m++) { |
f3a7f661 GW |
991 | metaslab_t *msp = vd->vdev_ms[m]; |
992 | ||
993 | if (msp->ms_fragmentation == ZFS_FRAG_INVALID) | |
994 | continue; | |
cc99f275 DB |
995 | if (msp->ms_group != mg) |
996 | continue; | |
f3a7f661 GW |
997 | |
998 | valid_ms++; | |
999 | fragmentation += msp->ms_fragmentation; | |
1000 | } | |
1001 | ||
cc99f275 | 1002 | if (valid_ms <= mg->mg_vd->vdev_ms_count / 2) |
f3a7f661 GW |
1003 | return (ZFS_FRAG_INVALID); |
1004 | ||
1005 | fragmentation /= valid_ms; | |
1006 | ASSERT3U(fragmentation, <=, 100); | |
1007 | return (fragmentation); | |
1008 | } | |
1009 | ||
ac72fac3 GW |
1010 | /* |
1011 | * Determine if a given metaslab group should skip allocations. A metaslab | |
f3a7f661 GW |
1012 | * group should avoid allocations if its free capacity is less than the |
1013 | * zfs_mg_noalloc_threshold or its fragmentation metric is greater than | |
1014 | * zfs_mg_fragmentation_threshold and there is at least one metaslab group | |
3dfb57a3 DB |
1015 | * that can still handle allocations. If the allocation throttle is enabled |
1016 | * then we skip allocations to devices that have reached their maximum | |
1017 | * allocation queue depth unless the selected metaslab group is the only | |
1018 | * eligible group remaining. | |
ac72fac3 GW |
1019 | */ |
1020 | static boolean_t | |
3dfb57a3 | 1021 | metaslab_group_allocatable(metaslab_group_t *mg, metaslab_group_t *rotor, |
c197a77c | 1022 | uint64_t psize, int allocator, int d) |
ac72fac3 | 1023 | { |
3dfb57a3 | 1024 | spa_t *spa = mg->mg_vd->vdev_spa; |
ac72fac3 GW |
1025 | metaslab_class_t *mc = mg->mg_class; |
1026 | ||
1027 | /* | |
3dfb57a3 DB |
1028 | * We can only consider skipping this metaslab group if it's |
1029 | * in the normal metaslab class and there are other metaslab | |
1030 | * groups to select from. Otherwise, we always consider it eligible | |
f3a7f661 | 1031 | * for allocations. |
ac72fac3 | 1032 | */ |
cc99f275 DB |
1033 | if ((mc != spa_normal_class(spa) && |
1034 | mc != spa_special_class(spa) && | |
1035 | mc != spa_dedup_class(spa)) || | |
1036 | mc->mc_groups <= 1) | |
3dfb57a3 DB |
1037 | return (B_TRUE); |
1038 | ||
1039 | /* | |
1040 | * If the metaslab group's mg_allocatable flag is set (see comments | |
1041 | * in metaslab_group_alloc_update() for more information) and | |
1042 | * the allocation throttle is disabled then allow allocations to this | |
1043 | * device. However, if the allocation throttle is enabled then | |
1044 | * check if we have reached our allocation limit (mg_alloc_queue_depth) | |
1045 | * to determine if we should allow allocations to this metaslab group. | |
1046 | * If all metaslab groups are no longer considered allocatable | |
1047 | * (mc_alloc_groups == 0) or we're trying to allocate the smallest | |
1048 | * gang block size then we allow allocations on this metaslab group | |
1049 | * regardless of the mg_allocatable or throttle settings. | |
1050 | */ | |
1051 | if (mg->mg_allocatable) { | |
1052 | metaslab_group_t *mgp; | |
1053 | int64_t qdepth; | |
492f64e9 | 1054 | uint64_t qmax = mg->mg_cur_max_alloc_queue_depth[allocator]; |
3dfb57a3 DB |
1055 | |
1056 | if (!mc->mc_alloc_throttle_enabled) | |
1057 | return (B_TRUE); | |
1058 | ||
1059 | /* | |
1060 | * If this metaslab group does not have any free space, then | |
1061 | * there is no point in looking further. | |
1062 | */ | |
1063 | if (mg->mg_no_free_space) | |
1064 | return (B_FALSE); | |
1065 | ||
c197a77c | 1066 | /* |
1067 | * Relax allocation throttling for ditto blocks. Due to | |
1068 | * random imbalances in allocation it tends to push copies | |
1069 | * to one vdev, that looks a bit better at the moment. | |
1070 | */ | |
1071 | qmax = qmax * (4 + d) / 4; | |
1072 | ||
424fd7c3 TS |
1073 | qdepth = zfs_refcount_count( |
1074 | &mg->mg_alloc_queue_depth[allocator]); | |
3dfb57a3 DB |
1075 | |
1076 | /* | |
1077 | * If this metaslab group is below its qmax or it's | |
1078 | * the only allocatable metasable group, then attempt | |
1079 | * to allocate from it. | |
1080 | */ | |
1081 | if (qdepth < qmax || mc->mc_alloc_groups == 1) | |
1082 | return (B_TRUE); | |
1083 | ASSERT3U(mc->mc_alloc_groups, >, 1); | |
1084 | ||
1085 | /* | |
1086 | * Since this metaslab group is at or over its qmax, we | |
1087 | * need to determine if there are metaslab groups after this | |
1088 | * one that might be able to handle this allocation. This is | |
1089 | * racy since we can't hold the locks for all metaslab | |
1090 | * groups at the same time when we make this check. | |
1091 | */ | |
1092 | for (mgp = mg->mg_next; mgp != rotor; mgp = mgp->mg_next) { | |
492f64e9 | 1093 | qmax = mgp->mg_cur_max_alloc_queue_depth[allocator]; |
c197a77c | 1094 | qmax = qmax * (4 + d) / 4; |
424fd7c3 | 1095 | qdepth = zfs_refcount_count( |
492f64e9 | 1096 | &mgp->mg_alloc_queue_depth[allocator]); |
3dfb57a3 DB |
1097 | |
1098 | /* | |
1099 | * If there is another metaslab group that | |
1100 | * might be able to handle the allocation, then | |
1101 | * we return false so that we skip this group. | |
1102 | */ | |
1103 | if (qdepth < qmax && !mgp->mg_no_free_space) | |
1104 | return (B_FALSE); | |
1105 | } | |
1106 | ||
1107 | /* | |
1108 | * We didn't find another group to handle the allocation | |
1109 | * so we can't skip this metaslab group even though | |
1110 | * we are at or over our qmax. | |
1111 | */ | |
1112 | return (B_TRUE); | |
1113 | ||
1114 | } else if (mc->mc_alloc_groups == 0 || psize == SPA_MINBLOCKSIZE) { | |
1115 | return (B_TRUE); | |
1116 | } | |
1117 | return (B_FALSE); | |
ac72fac3 GW |
1118 | } |
1119 | ||
428870ff BB |
1120 | /* |
1121 | * ========================================================================== | |
93cf2076 | 1122 | * Range tree callbacks |
428870ff BB |
1123 | * ========================================================================== |
1124 | */ | |
93cf2076 GW |
1125 | |
1126 | /* | |
1127 | * Comparison function for the private size-ordered tree. Tree is sorted | |
1128 | * by size, larger sizes at the end of the tree. | |
1129 | */ | |
428870ff | 1130 | static int |
93cf2076 | 1131 | metaslab_rangesize_compare(const void *x1, const void *x2) |
428870ff | 1132 | { |
93cf2076 GW |
1133 | const range_seg_t *r1 = x1; |
1134 | const range_seg_t *r2 = x2; | |
1135 | uint64_t rs_size1 = r1->rs_end - r1->rs_start; | |
1136 | uint64_t rs_size2 = r2->rs_end - r2->rs_start; | |
428870ff | 1137 | |
ee36c709 GN |
1138 | int cmp = AVL_CMP(rs_size1, rs_size2); |
1139 | if (likely(cmp)) | |
1140 | return (cmp); | |
428870ff | 1141 | |
ee36c709 | 1142 | return (AVL_CMP(r1->rs_start, r2->rs_start)); |
428870ff BB |
1143 | } |
1144 | ||
93cf2076 GW |
1145 | /* |
1146 | * ========================================================================== | |
4e21fd06 | 1147 | * Common allocator routines |
93cf2076 GW |
1148 | * ========================================================================== |
1149 | */ | |
1150 | ||
9babb374 | 1151 | /* |
428870ff | 1152 | * Return the maximum contiguous segment within the metaslab. |
9babb374 | 1153 | */ |
9babb374 | 1154 | uint64_t |
93cf2076 | 1155 | metaslab_block_maxsize(metaslab_t *msp) |
9babb374 | 1156 | { |
d2734cce | 1157 | avl_tree_t *t = &msp->ms_allocatable_by_size; |
93cf2076 | 1158 | range_seg_t *rs; |
9babb374 | 1159 | |
93cf2076 | 1160 | if (t == NULL || (rs = avl_last(t)) == NULL) |
9babb374 BB |
1161 | return (0ULL); |
1162 | ||
93cf2076 GW |
1163 | return (rs->rs_end - rs->rs_start); |
1164 | } | |
1165 | ||
4e21fd06 DB |
1166 | static range_seg_t * |
1167 | metaslab_block_find(avl_tree_t *t, uint64_t start, uint64_t size) | |
93cf2076 | 1168 | { |
4e21fd06 DB |
1169 | range_seg_t *rs, rsearch; |
1170 | avl_index_t where; | |
93cf2076 | 1171 | |
4e21fd06 DB |
1172 | rsearch.rs_start = start; |
1173 | rsearch.rs_end = start + size; | |
93cf2076 | 1174 | |
4e21fd06 DB |
1175 | rs = avl_find(t, &rsearch, &where); |
1176 | if (rs == NULL) { | |
1177 | rs = avl_nearest(t, where, AVL_AFTER); | |
93cf2076 | 1178 | } |
93cf2076 | 1179 | |
4e21fd06 DB |
1180 | return (rs); |
1181 | } | |
93cf2076 GW |
1182 | |
1183 | #if defined(WITH_FF_BLOCK_ALLOCATOR) || \ | |
1184 | defined(WITH_DF_BLOCK_ALLOCATOR) || \ | |
1185 | defined(WITH_CF_BLOCK_ALLOCATOR) | |
1186 | /* | |
1187 | * This is a helper function that can be used by the allocator to find | |
1188 | * a suitable block to allocate. This will search the specified AVL | |
1189 | * tree looking for a block that matches the specified criteria. | |
1190 | */ | |
1191 | static uint64_t | |
1192 | metaslab_block_picker(avl_tree_t *t, uint64_t *cursor, uint64_t size, | |
1193 | uint64_t align) | |
1194 | { | |
4e21fd06 | 1195 | range_seg_t *rs = metaslab_block_find(t, *cursor, size); |
93cf2076 GW |
1196 | |
1197 | while (rs != NULL) { | |
1198 | uint64_t offset = P2ROUNDUP(rs->rs_start, align); | |
1199 | ||
1200 | if (offset + size <= rs->rs_end) { | |
1201 | *cursor = offset + size; | |
1202 | return (offset); | |
1203 | } | |
1204 | rs = AVL_NEXT(t, rs); | |
1205 | } | |
1206 | ||
1207 | /* | |
1208 | * If we know we've searched the whole map (*cursor == 0), give up. | |
1209 | * Otherwise, reset the cursor to the beginning and try again. | |
1210 | */ | |
1211 | if (*cursor == 0) | |
1212 | return (-1ULL); | |
1213 | ||
1214 | *cursor = 0; | |
1215 | return (metaslab_block_picker(t, cursor, size, align)); | |
9babb374 | 1216 | } |
93cf2076 | 1217 | #endif /* WITH_FF/DF/CF_BLOCK_ALLOCATOR */ |
9babb374 | 1218 | |
22c81dd8 | 1219 | #if defined(WITH_FF_BLOCK_ALLOCATOR) |
428870ff BB |
1220 | /* |
1221 | * ========================================================================== | |
1222 | * The first-fit block allocator | |
1223 | * ========================================================================== | |
1224 | */ | |
1225 | static uint64_t | |
93cf2076 | 1226 | metaslab_ff_alloc(metaslab_t *msp, uint64_t size) |
9babb374 | 1227 | { |
93cf2076 GW |
1228 | /* |
1229 | * Find the largest power of 2 block size that evenly divides the | |
1230 | * requested size. This is used to try to allocate blocks with similar | |
1231 | * alignment from the same area of the metaslab (i.e. same cursor | |
1232 | * bucket) but it does not guarantee that other allocations sizes | |
1233 | * may exist in the same region. | |
1234 | */ | |
428870ff | 1235 | uint64_t align = size & -size; |
9bd274dd | 1236 | uint64_t *cursor = &msp->ms_lbas[highbit64(align) - 1]; |
d2734cce | 1237 | avl_tree_t *t = &msp->ms_allocatable->rt_root; |
9babb374 | 1238 | |
428870ff | 1239 | return (metaslab_block_picker(t, cursor, size, align)); |
9babb374 BB |
1240 | } |
1241 | ||
93cf2076 | 1242 | static metaslab_ops_t metaslab_ff_ops = { |
f3a7f661 | 1243 | metaslab_ff_alloc |
428870ff | 1244 | }; |
9babb374 | 1245 | |
93cf2076 | 1246 | metaslab_ops_t *zfs_metaslab_ops = &metaslab_ff_ops; |
22c81dd8 BB |
1247 | #endif /* WITH_FF_BLOCK_ALLOCATOR */ |
1248 | ||
1249 | #if defined(WITH_DF_BLOCK_ALLOCATOR) | |
428870ff BB |
1250 | /* |
1251 | * ========================================================================== | |
1252 | * Dynamic block allocator - | |
1253 | * Uses the first fit allocation scheme until space get low and then | |
1254 | * adjusts to a best fit allocation method. Uses metaslab_df_alloc_threshold | |
1255 | * and metaslab_df_free_pct to determine when to switch the allocation scheme. | |
1256 | * ========================================================================== | |
1257 | */ | |
9babb374 | 1258 | static uint64_t |
93cf2076 | 1259 | metaslab_df_alloc(metaslab_t *msp, uint64_t size) |
9babb374 | 1260 | { |
93cf2076 GW |
1261 | /* |
1262 | * Find the largest power of 2 block size that evenly divides the | |
1263 | * requested size. This is used to try to allocate blocks with similar | |
1264 | * alignment from the same area of the metaslab (i.e. same cursor | |
1265 | * bucket) but it does not guarantee that other allocations sizes | |
1266 | * may exist in the same region. | |
1267 | */ | |
9babb374 | 1268 | uint64_t align = size & -size; |
9bd274dd | 1269 | uint64_t *cursor = &msp->ms_lbas[highbit64(align) - 1]; |
d2734cce | 1270 | range_tree_t *rt = msp->ms_allocatable; |
93cf2076 GW |
1271 | avl_tree_t *t = &rt->rt_root; |
1272 | uint64_t max_size = metaslab_block_maxsize(msp); | |
1273 | int free_pct = range_tree_space(rt) * 100 / msp->ms_size; | |
9babb374 | 1274 | |
93cf2076 | 1275 | ASSERT(MUTEX_HELD(&msp->ms_lock)); |
d2734cce SD |
1276 | ASSERT3U(avl_numnodes(t), ==, |
1277 | avl_numnodes(&msp->ms_allocatable_by_size)); | |
9babb374 BB |
1278 | |
1279 | if (max_size < size) | |
1280 | return (-1ULL); | |
1281 | ||
1282 | /* | |
1283 | * If we're running low on space switch to using the size | |
1284 | * sorted AVL tree (best-fit). | |
1285 | */ | |
1286 | if (max_size < metaslab_df_alloc_threshold || | |
1287 | free_pct < metaslab_df_free_pct) { | |
d2734cce | 1288 | t = &msp->ms_allocatable_by_size; |
9babb374 BB |
1289 | *cursor = 0; |
1290 | } | |
1291 | ||
1292 | return (metaslab_block_picker(t, cursor, size, 1ULL)); | |
1293 | } | |
1294 | ||
93cf2076 | 1295 | static metaslab_ops_t metaslab_df_ops = { |
f3a7f661 | 1296 | metaslab_df_alloc |
34dc7c2f BB |
1297 | }; |
1298 | ||
93cf2076 | 1299 | metaslab_ops_t *zfs_metaslab_ops = &metaslab_df_ops; |
22c81dd8 BB |
1300 | #endif /* WITH_DF_BLOCK_ALLOCATOR */ |
1301 | ||
93cf2076 | 1302 | #if defined(WITH_CF_BLOCK_ALLOCATOR) |
428870ff BB |
1303 | /* |
1304 | * ========================================================================== | |
93cf2076 GW |
1305 | * Cursor fit block allocator - |
1306 | * Select the largest region in the metaslab, set the cursor to the beginning | |
1307 | * of the range and the cursor_end to the end of the range. As allocations | |
1308 | * are made advance the cursor. Continue allocating from the cursor until | |
1309 | * the range is exhausted and then find a new range. | |
428870ff BB |
1310 | * ========================================================================== |
1311 | */ | |
1312 | static uint64_t | |
93cf2076 | 1313 | metaslab_cf_alloc(metaslab_t *msp, uint64_t size) |
428870ff | 1314 | { |
d2734cce SD |
1315 | range_tree_t *rt = msp->ms_allocatable; |
1316 | avl_tree_t *t = &msp->ms_allocatable_by_size; | |
93cf2076 GW |
1317 | uint64_t *cursor = &msp->ms_lbas[0]; |
1318 | uint64_t *cursor_end = &msp->ms_lbas[1]; | |
428870ff BB |
1319 | uint64_t offset = 0; |
1320 | ||
93cf2076 GW |
1321 | ASSERT(MUTEX_HELD(&msp->ms_lock)); |
1322 | ASSERT3U(avl_numnodes(t), ==, avl_numnodes(&rt->rt_root)); | |
428870ff | 1323 | |
93cf2076 | 1324 | ASSERT3U(*cursor_end, >=, *cursor); |
428870ff | 1325 | |
93cf2076 GW |
1326 | if ((*cursor + size) > *cursor_end) { |
1327 | range_seg_t *rs; | |
428870ff | 1328 | |
d2734cce | 1329 | rs = avl_last(&msp->ms_allocatable_by_size); |
93cf2076 GW |
1330 | if (rs == NULL || (rs->rs_end - rs->rs_start) < size) |
1331 | return (-1ULL); | |
428870ff | 1332 | |
93cf2076 GW |
1333 | *cursor = rs->rs_start; |
1334 | *cursor_end = rs->rs_end; | |
428870ff | 1335 | } |
93cf2076 GW |
1336 | |
1337 | offset = *cursor; | |
1338 | *cursor += size; | |
1339 | ||
428870ff BB |
1340 | return (offset); |
1341 | } | |
1342 | ||
93cf2076 | 1343 | static metaslab_ops_t metaslab_cf_ops = { |
f3a7f661 | 1344 | metaslab_cf_alloc |
428870ff BB |
1345 | }; |
1346 | ||
93cf2076 GW |
1347 | metaslab_ops_t *zfs_metaslab_ops = &metaslab_cf_ops; |
1348 | #endif /* WITH_CF_BLOCK_ALLOCATOR */ | |
22c81dd8 BB |
1349 | |
1350 | #if defined(WITH_NDF_BLOCK_ALLOCATOR) | |
93cf2076 GW |
1351 | /* |
1352 | * ========================================================================== | |
1353 | * New dynamic fit allocator - | |
1354 | * Select a region that is large enough to allocate 2^metaslab_ndf_clump_shift | |
1355 | * contiguous blocks. If no region is found then just use the largest segment | |
1356 | * that remains. | |
1357 | * ========================================================================== | |
1358 | */ | |
1359 | ||
1360 | /* | |
1361 | * Determines desired number of contiguous blocks (2^metaslab_ndf_clump_shift) | |
1362 | * to request from the allocator. | |
1363 | */ | |
428870ff BB |
1364 | uint64_t metaslab_ndf_clump_shift = 4; |
1365 | ||
1366 | static uint64_t | |
93cf2076 | 1367 | metaslab_ndf_alloc(metaslab_t *msp, uint64_t size) |
428870ff | 1368 | { |
d2734cce | 1369 | avl_tree_t *t = &msp->ms_allocatable->rt_root; |
428870ff | 1370 | avl_index_t where; |
93cf2076 | 1371 | range_seg_t *rs, rsearch; |
9bd274dd | 1372 | uint64_t hbit = highbit64(size); |
93cf2076 GW |
1373 | uint64_t *cursor = &msp->ms_lbas[hbit - 1]; |
1374 | uint64_t max_size = metaslab_block_maxsize(msp); | |
428870ff | 1375 | |
93cf2076 | 1376 | ASSERT(MUTEX_HELD(&msp->ms_lock)); |
d2734cce SD |
1377 | ASSERT3U(avl_numnodes(t), ==, |
1378 | avl_numnodes(&msp->ms_allocatable_by_size)); | |
428870ff BB |
1379 | |
1380 | if (max_size < size) | |
1381 | return (-1ULL); | |
1382 | ||
93cf2076 GW |
1383 | rsearch.rs_start = *cursor; |
1384 | rsearch.rs_end = *cursor + size; | |
428870ff | 1385 | |
93cf2076 GW |
1386 | rs = avl_find(t, &rsearch, &where); |
1387 | if (rs == NULL || (rs->rs_end - rs->rs_start) < size) { | |
d2734cce | 1388 | t = &msp->ms_allocatable_by_size; |
428870ff | 1389 | |
93cf2076 GW |
1390 | rsearch.rs_start = 0; |
1391 | rsearch.rs_end = MIN(max_size, | |
428870ff | 1392 | 1ULL << (hbit + metaslab_ndf_clump_shift)); |
93cf2076 GW |
1393 | rs = avl_find(t, &rsearch, &where); |
1394 | if (rs == NULL) | |
1395 | rs = avl_nearest(t, where, AVL_AFTER); | |
1396 | ASSERT(rs != NULL); | |
428870ff BB |
1397 | } |
1398 | ||
93cf2076 GW |
1399 | if ((rs->rs_end - rs->rs_start) >= size) { |
1400 | *cursor = rs->rs_start + size; | |
1401 | return (rs->rs_start); | |
428870ff BB |
1402 | } |
1403 | return (-1ULL); | |
1404 | } | |
1405 | ||
93cf2076 | 1406 | static metaslab_ops_t metaslab_ndf_ops = { |
f3a7f661 | 1407 | metaslab_ndf_alloc |
428870ff BB |
1408 | }; |
1409 | ||
93cf2076 | 1410 | metaslab_ops_t *zfs_metaslab_ops = &metaslab_ndf_ops; |
22c81dd8 | 1411 | #endif /* WITH_NDF_BLOCK_ALLOCATOR */ |
9babb374 | 1412 | |
93cf2076 | 1413 | |
34dc7c2f BB |
1414 | /* |
1415 | * ========================================================================== | |
1416 | * Metaslabs | |
1417 | * ========================================================================== | |
1418 | */ | |
93cf2076 | 1419 | |
928e8ad4 SD |
1420 | static void |
1421 | metaslab_aux_histograms_clear(metaslab_t *msp) | |
1422 | { | |
1423 | /* | |
1424 | * Auxiliary histograms are only cleared when resetting them, | |
1425 | * which can only happen while the metaslab is loaded. | |
1426 | */ | |
1427 | ASSERT(msp->ms_loaded); | |
1428 | ||
1429 | bzero(msp->ms_synchist, sizeof (msp->ms_synchist)); | |
1430 | for (int t = 0; t < TXG_DEFER_SIZE; t++) | |
1431 | bzero(msp->ms_deferhist[t], sizeof (msp->ms_deferhist[t])); | |
1432 | } | |
1433 | ||
1434 | static void | |
1435 | metaslab_aux_histogram_add(uint64_t *histogram, uint64_t shift, | |
1436 | range_tree_t *rt) | |
1437 | { | |
1438 | /* | |
1439 | * This is modeled after space_map_histogram_add(), so refer to that | |
1440 | * function for implementation details. We want this to work like | |
1441 | * the space map histogram, and not the range tree histogram, as we | |
1442 | * are essentially constructing a delta that will be later subtracted | |
1443 | * from the space map histogram. | |
1444 | */ | |
1445 | int idx = 0; | |
1446 | for (int i = shift; i < RANGE_TREE_HISTOGRAM_SIZE; i++) { | |
1447 | ASSERT3U(i, >=, idx + shift); | |
1448 | histogram[idx] += rt->rt_histogram[i] << (i - idx - shift); | |
1449 | ||
1450 | if (idx < SPACE_MAP_HISTOGRAM_SIZE - 1) { | |
1451 | ASSERT3U(idx + shift, ==, i); | |
1452 | idx++; | |
1453 | ASSERT3U(idx, <, SPACE_MAP_HISTOGRAM_SIZE); | |
1454 | } | |
1455 | } | |
1456 | } | |
1457 | ||
1458 | /* | |
1459 | * Called at every sync pass that the metaslab gets synced. | |
1460 | * | |
1461 | * The reason is that we want our auxiliary histograms to be updated | |
1462 | * wherever the metaslab's space map histogram is updated. This way | |
1463 | * we stay consistent on which parts of the metaslab space map's | |
1464 | * histogram are currently not available for allocations (e.g because | |
1465 | * they are in the defer, freed, and freeing trees). | |
1466 | */ | |
1467 | static void | |
1468 | metaslab_aux_histograms_update(metaslab_t *msp) | |
1469 | { | |
1470 | space_map_t *sm = msp->ms_sm; | |
1471 | ASSERT(sm != NULL); | |
1472 | ||
1473 | /* | |
1474 | * This is similar to the metaslab's space map histogram updates | |
1475 | * that take place in metaslab_sync(). The only difference is that | |
1476 | * we only care about segments that haven't made it into the | |
1477 | * ms_allocatable tree yet. | |
1478 | */ | |
1479 | if (msp->ms_loaded) { | |
1480 | metaslab_aux_histograms_clear(msp); | |
1481 | ||
1482 | metaslab_aux_histogram_add(msp->ms_synchist, | |
1483 | sm->sm_shift, msp->ms_freed); | |
1484 | ||
1485 | for (int t = 0; t < TXG_DEFER_SIZE; t++) { | |
1486 | metaslab_aux_histogram_add(msp->ms_deferhist[t], | |
1487 | sm->sm_shift, msp->ms_defer[t]); | |
1488 | } | |
1489 | } | |
1490 | ||
1491 | metaslab_aux_histogram_add(msp->ms_synchist, | |
1492 | sm->sm_shift, msp->ms_freeing); | |
1493 | } | |
1494 | ||
1495 | /* | |
1496 | * Called every time we are done syncing (writing to) the metaslab, | |
1497 | * i.e. at the end of each sync pass. | |
1498 | * [see the comment in metaslab_impl.h for ms_synchist, ms_deferhist] | |
1499 | */ | |
1500 | static void | |
1501 | metaslab_aux_histograms_update_done(metaslab_t *msp, boolean_t defer_allowed) | |
1502 | { | |
1503 | spa_t *spa = msp->ms_group->mg_vd->vdev_spa; | |
1504 | space_map_t *sm = msp->ms_sm; | |
1505 | ||
1506 | if (sm == NULL) { | |
1507 | /* | |
1508 | * We came here from metaslab_init() when creating/opening a | |
1509 | * pool, looking at a metaslab that hasn't had any allocations | |
1510 | * yet. | |
1511 | */ | |
1512 | return; | |
1513 | } | |
1514 | ||
1515 | /* | |
1516 | * This is similar to the actions that we take for the ms_freed | |
1517 | * and ms_defer trees in metaslab_sync_done(). | |
1518 | */ | |
1519 | uint64_t hist_index = spa_syncing_txg(spa) % TXG_DEFER_SIZE; | |
1520 | if (defer_allowed) { | |
1521 | bcopy(msp->ms_synchist, msp->ms_deferhist[hist_index], | |
1522 | sizeof (msp->ms_synchist)); | |
1523 | } else { | |
1524 | bzero(msp->ms_deferhist[hist_index], | |
1525 | sizeof (msp->ms_deferhist[hist_index])); | |
1526 | } | |
1527 | bzero(msp->ms_synchist, sizeof (msp->ms_synchist)); | |
1528 | } | |
1529 | ||
1530 | /* | |
1531 | * Ensure that the metaslab's weight and fragmentation are consistent | |
1532 | * with the contents of the histogram (either the range tree's histogram | |
1533 | * or the space map's depending whether the metaslab is loaded). | |
1534 | */ | |
1535 | static void | |
1536 | metaslab_verify_weight_and_frag(metaslab_t *msp) | |
1537 | { | |
1538 | ASSERT(MUTEX_HELD(&msp->ms_lock)); | |
1539 | ||
1540 | if ((zfs_flags & ZFS_DEBUG_METASLAB_VERIFY) == 0) | |
1541 | return; | |
1542 | ||
1543 | /* see comment in metaslab_verify_unflushed_changes() */ | |
1544 | if (msp->ms_group == NULL) | |
1545 | return; | |
1546 | ||
1547 | /* | |
1548 | * Devices being removed always return a weight of 0 and leave | |
1549 | * fragmentation and ms_max_size as is - there is nothing for | |
1550 | * us to verify here. | |
1551 | */ | |
1552 | vdev_t *vd = msp->ms_group->mg_vd; | |
1553 | if (vd->vdev_removing) | |
1554 | return; | |
1555 | ||
1556 | /* | |
1557 | * If the metaslab is dirty it probably means that we've done | |
1558 | * some allocations or frees that have changed our histograms | |
1559 | * and thus the weight. | |
1560 | */ | |
1561 | for (int t = 0; t < TXG_SIZE; t++) { | |
1562 | if (txg_list_member(&vd->vdev_ms_list, msp, t)) | |
1563 | return; | |
1564 | } | |
1565 | ||
1566 | /* | |
1567 | * This verification checks that our in-memory state is consistent | |
1568 | * with what's on disk. If the pool is read-only then there aren't | |
1569 | * any changes and we just have the initially-loaded state. | |
1570 | */ | |
1571 | if (!spa_writeable(msp->ms_group->mg_vd->vdev_spa)) | |
1572 | return; | |
1573 | ||
1574 | /* some extra verification for in-core tree if you can */ | |
1575 | if (msp->ms_loaded) { | |
1576 | range_tree_stat_verify(msp->ms_allocatable); | |
1577 | VERIFY(space_map_histogram_verify(msp->ms_sm, | |
1578 | msp->ms_allocatable)); | |
1579 | } | |
1580 | ||
1581 | uint64_t weight = msp->ms_weight; | |
1582 | uint64_t was_active = msp->ms_weight & METASLAB_ACTIVE_MASK; | |
1583 | boolean_t space_based = WEIGHT_IS_SPACEBASED(msp->ms_weight); | |
1584 | uint64_t frag = msp->ms_fragmentation; | |
1585 | uint64_t max_segsize = msp->ms_max_size; | |
1586 | ||
1587 | msp->ms_weight = 0; | |
1588 | msp->ms_fragmentation = 0; | |
1589 | msp->ms_max_size = 0; | |
1590 | ||
1591 | /* | |
1592 | * This function is used for verification purposes. Regardless of | |
1593 | * whether metaslab_weight() thinks this metaslab should be active or | |
1594 | * not, we want to ensure that the actual weight (and therefore the | |
1595 | * value of ms_weight) would be the same if it was to be recalculated | |
1596 | * at this point. | |
1597 | */ | |
1598 | msp->ms_weight = metaslab_weight(msp) | was_active; | |
1599 | ||
1600 | VERIFY3U(max_segsize, ==, msp->ms_max_size); | |
1601 | ||
1602 | /* | |
1603 | * If the weight type changed then there is no point in doing | |
1604 | * verification. Revert fields to their original values. | |
1605 | */ | |
1606 | if ((space_based && !WEIGHT_IS_SPACEBASED(msp->ms_weight)) || | |
1607 | (!space_based && WEIGHT_IS_SPACEBASED(msp->ms_weight))) { | |
1608 | msp->ms_fragmentation = frag; | |
1609 | msp->ms_weight = weight; | |
1610 | return; | |
1611 | } | |
1612 | ||
1613 | VERIFY3U(msp->ms_fragmentation, ==, frag); | |
1614 | VERIFY3U(msp->ms_weight, ==, weight); | |
1615 | } | |
1616 | ||
93cf2076 GW |
1617 | /* |
1618 | * Wait for any in-progress metaslab loads to complete. | |
1619 | */ | |
b194fab0 | 1620 | static void |
93cf2076 GW |
1621 | metaslab_load_wait(metaslab_t *msp) |
1622 | { | |
1623 | ASSERT(MUTEX_HELD(&msp->ms_lock)); | |
1624 | ||
1625 | while (msp->ms_loading) { | |
1626 | ASSERT(!msp->ms_loaded); | |
1627 | cv_wait(&msp->ms_load_cv, &msp->ms_lock); | |
1628 | } | |
1629 | } | |
1630 | ||
b194fab0 SD |
1631 | static int |
1632 | metaslab_load_impl(metaslab_t *msp) | |
93cf2076 GW |
1633 | { |
1634 | int error = 0; | |
93cf2076 GW |
1635 | |
1636 | ASSERT(MUTEX_HELD(&msp->ms_lock)); | |
b194fab0 | 1637 | ASSERT(msp->ms_loading); |
425d3237 | 1638 | ASSERT(!msp->ms_condensing); |
93cf2076 | 1639 | |
a1d477c2 | 1640 | /* |
425d3237 SD |
1641 | * We temporarily drop the lock to unblock other operations while we |
1642 | * are reading the space map. Therefore, metaslab_sync() and | |
1643 | * metaslab_sync_done() can run at the same time as we do. | |
1644 | * | |
1645 | * metaslab_sync() can append to the space map while we are loading. | |
1646 | * Therefore we load only entries that existed when we started the | |
1647 | * load. Additionally, metaslab_sync_done() has to wait for the load | |
1648 | * to complete because there are potential races like metaslab_load() | |
1649 | * loading parts of the space map that are currently being appended | |
1650 | * by metaslab_sync(). If we didn't, the ms_allocatable would have | |
1651 | * entries that metaslab_sync_done() would try to re-add later. | |
1652 | * | |
1653 | * That's why before dropping the lock we remember the synced length | |
1654 | * of the metaslab and read up to that point of the space map, | |
1655 | * ignoring entries appended by metaslab_sync() that happen after we | |
1656 | * drop the lock. | |
a1d477c2 | 1657 | */ |
425d3237 | 1658 | uint64_t length = msp->ms_synced_length; |
a1d477c2 | 1659 | mutex_exit(&msp->ms_lock); |
93cf2076 | 1660 | |
d2734cce | 1661 | if (msp->ms_sm != NULL) { |
425d3237 SD |
1662 | error = space_map_load_length(msp->ms_sm, msp->ms_allocatable, |
1663 | SM_FREE, length); | |
d2734cce | 1664 | } else { |
425d3237 SD |
1665 | /* |
1666 | * The space map has not been allocated yet, so treat | |
1667 | * all the space in the metaslab as free and add it to the | |
1668 | * ms_allocatable tree. | |
1669 | */ | |
d2734cce SD |
1670 | range_tree_add(msp->ms_allocatable, |
1671 | msp->ms_start, msp->ms_size); | |
1672 | } | |
93cf2076 | 1673 | |
425d3237 SD |
1674 | /* |
1675 | * We need to grab the ms_sync_lock to prevent metaslab_sync() from | |
1676 | * changing the ms_sm and the metaslab's range trees while we are | |
1677 | * about to use them and populate the ms_allocatable. The ms_lock | |
1678 | * is insufficient for this because metaslab_sync() doesn't hold | |
1679 | * the ms_lock while writing the ms_checkpointing tree to disk. | |
1680 | */ | |
1681 | mutex_enter(&msp->ms_sync_lock); | |
a1d477c2 | 1682 | mutex_enter(&msp->ms_lock); |
425d3237 | 1683 | ASSERT(!msp->ms_condensing); |
93cf2076 | 1684 | |
b194fab0 SD |
1685 | if (error != 0) |
1686 | return (error); | |
4e21fd06 | 1687 | |
b194fab0 SD |
1688 | ASSERT3P(msp->ms_group, !=, NULL); |
1689 | msp->ms_loaded = B_TRUE; | |
1690 | ||
1691 | /* | |
425d3237 SD |
1692 | * The ms_allocatable contains the segments that exist in the |
1693 | * ms_defer trees [see ms_synced_length]. Thus we need to remove | |
1694 | * them from ms_allocatable as they will be added again in | |
1695 | * metaslab_sync_done(). | |
b194fab0 | 1696 | */ |
425d3237 SD |
1697 | for (int t = 0; t < TXG_DEFER_SIZE; t++) { |
1698 | range_tree_walk(msp->ms_defer[t], | |
1699 | range_tree_remove, msp->ms_allocatable); | |
93cf2076 | 1700 | } |
425d3237 | 1701 | |
928e8ad4 SD |
1702 | /* |
1703 | * Call metaslab_recalculate_weight_and_sort() now that the | |
1704 | * metaslab is loaded so we get the metaslab's real weight. | |
1705 | * | |
1706 | * Unless this metaslab was created with older software and | |
1707 | * has not yet been converted to use segment-based weight, we | |
1708 | * expect the new weight to be better or equal to the weight | |
1709 | * that the metaslab had while it was not loaded. This is | |
1710 | * because the old weight does not take into account the | |
1711 | * consolidation of adjacent segments between TXGs. [see | |
1712 | * comment for ms_synchist and ms_deferhist[] for more info] | |
1713 | */ | |
1714 | uint64_t weight = msp->ms_weight; | |
1715 | metaslab_recalculate_weight_and_sort(msp); | |
1716 | if (!WEIGHT_IS_SPACEBASED(weight)) | |
1717 | ASSERT3U(weight, <=, msp->ms_weight); | |
b194fab0 SD |
1718 | msp->ms_max_size = metaslab_block_maxsize(msp); |
1719 | ||
425d3237 SD |
1720 | spa_t *spa = msp->ms_group->mg_vd->vdev_spa; |
1721 | metaslab_verify_space(msp, spa_syncing_txg(spa)); | |
1722 | mutex_exit(&msp->ms_sync_lock); | |
1723 | ||
b194fab0 SD |
1724 | return (0); |
1725 | } | |
1726 | ||
1727 | int | |
1728 | metaslab_load(metaslab_t *msp) | |
1729 | { | |
1730 | ASSERT(MUTEX_HELD(&msp->ms_lock)); | |
1731 | ||
1732 | /* | |
1733 | * There may be another thread loading the same metaslab, if that's | |
1734 | * the case just wait until the other thread is done and return. | |
1735 | */ | |
1736 | metaslab_load_wait(msp); | |
1737 | if (msp->ms_loaded) | |
1738 | return (0); | |
1739 | VERIFY(!msp->ms_loading); | |
425d3237 | 1740 | ASSERT(!msp->ms_condensing); |
b194fab0 SD |
1741 | |
1742 | msp->ms_loading = B_TRUE; | |
1743 | int error = metaslab_load_impl(msp); | |
1744 | msp->ms_loading = B_FALSE; | |
93cf2076 | 1745 | cv_broadcast(&msp->ms_load_cv); |
b194fab0 | 1746 | |
93cf2076 GW |
1747 | return (error); |
1748 | } | |
1749 | ||
1750 | void | |
1751 | metaslab_unload(metaslab_t *msp) | |
1752 | { | |
1753 | ASSERT(MUTEX_HELD(&msp->ms_lock)); | |
928e8ad4 SD |
1754 | |
1755 | metaslab_verify_weight_and_frag(msp); | |
1756 | ||
d2734cce | 1757 | range_tree_vacate(msp->ms_allocatable, NULL, NULL); |
93cf2076 | 1758 | msp->ms_loaded = B_FALSE; |
928e8ad4 | 1759 | |
93cf2076 | 1760 | msp->ms_weight &= ~METASLAB_ACTIVE_MASK; |
4e21fd06 | 1761 | msp->ms_max_size = 0; |
928e8ad4 SD |
1762 | |
1763 | /* | |
1764 | * We explicitly recalculate the metaslab's weight based on its space | |
1765 | * map (as it is now not loaded). We want unload metaslabs to always | |
1766 | * have their weights calculated from the space map histograms, while | |
1767 | * loaded ones have it calculated from their in-core range tree | |
1768 | * [see metaslab_load()]. This way, the weight reflects the information | |
1769 | * available in-core, whether it is loaded or not | |
1770 | * | |
1771 | * If ms_group == NULL means that we came here from metaslab_fini(), | |
1772 | * at which point it doesn't make sense for us to do the recalculation | |
1773 | * and the sorting. | |
1774 | */ | |
1775 | if (msp->ms_group != NULL) | |
1776 | metaslab_recalculate_weight_and_sort(msp); | |
93cf2076 GW |
1777 | } |
1778 | ||
cc99f275 DB |
1779 | static void |
1780 | metaslab_space_update(vdev_t *vd, metaslab_class_t *mc, int64_t alloc_delta, | |
1781 | int64_t defer_delta, int64_t space_delta) | |
1782 | { | |
1783 | vdev_space_update(vd, alloc_delta, defer_delta, space_delta); | |
1784 | ||
1785 | ASSERT3P(vd->vdev_spa->spa_root_vdev, ==, vd->vdev_parent); | |
1786 | ASSERT(vd->vdev_ms_count != 0); | |
1787 | ||
1788 | metaslab_class_space_update(mc, alloc_delta, defer_delta, space_delta, | |
1789 | vdev_deflated_space(vd, space_delta)); | |
1790 | } | |
1791 | ||
fb42a493 PS |
1792 | int |
1793 | metaslab_init(metaslab_group_t *mg, uint64_t id, uint64_t object, uint64_t txg, | |
1794 | metaslab_t **msp) | |
34dc7c2f BB |
1795 | { |
1796 | vdev_t *vd = mg->mg_vd; | |
cc99f275 DB |
1797 | spa_t *spa = vd->vdev_spa; |
1798 | objset_t *mos = spa->spa_meta_objset; | |
fb42a493 PS |
1799 | metaslab_t *ms; |
1800 | int error; | |
34dc7c2f | 1801 | |
79c76d5b | 1802 | ms = kmem_zalloc(sizeof (metaslab_t), KM_SLEEP); |
fb42a493 | 1803 | mutex_init(&ms->ms_lock, NULL, MUTEX_DEFAULT, NULL); |
a1d477c2 | 1804 | mutex_init(&ms->ms_sync_lock, NULL, MUTEX_DEFAULT, NULL); |
fb42a493 | 1805 | cv_init(&ms->ms_load_cv, NULL, CV_DEFAULT, NULL); |
619f0976 | 1806 | |
fb42a493 PS |
1807 | ms->ms_id = id; |
1808 | ms->ms_start = id << vd->vdev_ms_shift; | |
1809 | ms->ms_size = 1ULL << vd->vdev_ms_shift; | |
492f64e9 PD |
1810 | ms->ms_allocator = -1; |
1811 | ms->ms_new = B_TRUE; | |
34dc7c2f | 1812 | |
93cf2076 GW |
1813 | /* |
1814 | * We only open space map objects that already exist. All others | |
afe37326 | 1815 | * will be opened when we finally allocate an object for it. |
425d3237 SD |
1816 | * |
1817 | * Note: | |
1818 | * When called from vdev_expand(), we can't call into the DMU as | |
1819 | * we are holding the spa_config_lock as a writer and we would | |
1820 | * deadlock [see relevant comment in vdev_metaslab_init()]. in | |
1821 | * that case, the object parameter is zero though, so we won't | |
1822 | * call into the DMU. | |
93cf2076 | 1823 | */ |
afe37326 | 1824 | if (object != 0) { |
fb42a493 | 1825 | error = space_map_open(&ms->ms_sm, mos, object, ms->ms_start, |
a1d477c2 | 1826 | ms->ms_size, vd->vdev_ashift); |
fb42a493 PS |
1827 | |
1828 | if (error != 0) { | |
1829 | kmem_free(ms, sizeof (metaslab_t)); | |
1830 | return (error); | |
1831 | } | |
1832 | ||
1833 | ASSERT(ms->ms_sm != NULL); | |
425d3237 | 1834 | ms->ms_allocated_space = space_map_allocated(ms->ms_sm); |
93cf2076 | 1835 | } |
34dc7c2f BB |
1836 | |
1837 | /* | |
425d3237 | 1838 | * We create the ms_allocatable here, but we don't create the |
258553d3 | 1839 | * other range trees until metaslab_sync_done(). This serves |
34dc7c2f | 1840 | * two purposes: it allows metaslab_sync_done() to detect the |
425d3237 SD |
1841 | * addition of new space; and for debugging, it ensures that |
1842 | * we'd data fault on any attempt to use this metaslab before | |
1843 | * it's ready. | |
34dc7c2f | 1844 | */ |
d2734cce SD |
1845 | ms->ms_allocatable = range_tree_create_impl(&rt_avl_ops, |
1846 | &ms->ms_allocatable_by_size, metaslab_rangesize_compare, 0); | |
fb42a493 | 1847 | metaslab_group_add(mg, ms); |
34dc7c2f | 1848 | |
4e21fd06 | 1849 | metaslab_set_fragmentation(ms); |
428870ff | 1850 | |
34dc7c2f BB |
1851 | /* |
1852 | * If we're opening an existing pool (txg == 0) or creating | |
1853 | * a new one (txg == TXG_INITIAL), all space is available now. | |
1854 | * If we're adding space to an existing pool, the new space | |
1855 | * does not become available until after this txg has synced. | |
4e21fd06 DB |
1856 | * The metaslab's weight will also be initialized when we sync |
1857 | * out this txg. This ensures that we don't attempt to allocate | |
1858 | * from it before we have initialized it completely. | |
34dc7c2f | 1859 | */ |
425d3237 | 1860 | if (txg <= TXG_INITIAL) { |
fb42a493 | 1861 | metaslab_sync_done(ms, 0); |
425d3237 SD |
1862 | metaslab_space_update(vd, mg->mg_class, |
1863 | metaslab_allocated_space(ms), 0, 0); | |
1864 | } | |
34dc7c2f | 1865 | |
93cf2076 GW |
1866 | /* |
1867 | * If metaslab_debug_load is set and we're initializing a metaslab | |
cc99f275 DB |
1868 | * that has an allocated space map object then load the space map |
1869 | * so that we can verify frees. | |
93cf2076 | 1870 | */ |
fb42a493 PS |
1871 | if (metaslab_debug_load && ms->ms_sm != NULL) { |
1872 | mutex_enter(&ms->ms_lock); | |
1873 | VERIFY0(metaslab_load(ms)); | |
1874 | mutex_exit(&ms->ms_lock); | |
93cf2076 GW |
1875 | } |
1876 | ||
34dc7c2f | 1877 | if (txg != 0) { |
34dc7c2f | 1878 | vdev_dirty(vd, 0, NULL, txg); |
fb42a493 | 1879 | vdev_dirty(vd, VDD_METASLAB, ms, txg); |
34dc7c2f BB |
1880 | } |
1881 | ||
fb42a493 PS |
1882 | *msp = ms; |
1883 | ||
1884 | return (0); | |
34dc7c2f BB |
1885 | } |
1886 | ||
1887 | void | |
1888 | metaslab_fini(metaslab_t *msp) | |
1889 | { | |
93cf2076 | 1890 | metaslab_group_t *mg = msp->ms_group; |
cc99f275 | 1891 | vdev_t *vd = mg->mg_vd; |
34dc7c2f BB |
1892 | |
1893 | metaslab_group_remove(mg, msp); | |
1894 | ||
1895 | mutex_enter(&msp->ms_lock); | |
93cf2076 | 1896 | VERIFY(msp->ms_group == NULL); |
cc99f275 | 1897 | metaslab_space_update(vd, mg->mg_class, |
425d3237 | 1898 | -metaslab_allocated_space(msp), 0, -msp->ms_size); |
cc99f275 | 1899 | |
93cf2076 GW |
1900 | space_map_close(msp->ms_sm); |
1901 | ||
1902 | metaslab_unload(msp); | |
cc99f275 | 1903 | |
d2734cce SD |
1904 | range_tree_destroy(msp->ms_allocatable); |
1905 | range_tree_destroy(msp->ms_freeing); | |
1906 | range_tree_destroy(msp->ms_freed); | |
34dc7c2f | 1907 | |
1c27024e | 1908 | for (int t = 0; t < TXG_SIZE; t++) { |
d2734cce | 1909 | range_tree_destroy(msp->ms_allocating[t]); |
34dc7c2f BB |
1910 | } |
1911 | ||
1c27024e | 1912 | for (int t = 0; t < TXG_DEFER_SIZE; t++) { |
d2734cce | 1913 | range_tree_destroy(msp->ms_defer[t]); |
e51be066 | 1914 | } |
c99c9001 | 1915 | ASSERT0(msp->ms_deferspace); |
428870ff | 1916 | |
d2734cce SD |
1917 | range_tree_destroy(msp->ms_checkpointing); |
1918 | ||
928e8ad4 SD |
1919 | for (int t = 0; t < TXG_SIZE; t++) |
1920 | ASSERT(!txg_list_member(&vd->vdev_ms_list, msp, t)); | |
1921 | ||
34dc7c2f | 1922 | mutex_exit(&msp->ms_lock); |
93cf2076 | 1923 | cv_destroy(&msp->ms_load_cv); |
34dc7c2f | 1924 | mutex_destroy(&msp->ms_lock); |
a1d477c2 | 1925 | mutex_destroy(&msp->ms_sync_lock); |
492f64e9 | 1926 | ASSERT3U(msp->ms_allocator, ==, -1); |
34dc7c2f BB |
1927 | |
1928 | kmem_free(msp, sizeof (metaslab_t)); | |
1929 | } | |
1930 | ||
f3a7f661 GW |
1931 | #define FRAGMENTATION_TABLE_SIZE 17 |
1932 | ||
93cf2076 | 1933 | /* |
f3a7f661 GW |
1934 | * This table defines a segment size based fragmentation metric that will |
1935 | * allow each metaslab to derive its own fragmentation value. This is done | |
1936 | * by calculating the space in each bucket of the spacemap histogram and | |
928e8ad4 | 1937 | * multiplying that by the fragmentation metric in this table. Doing |
f3a7f661 GW |
1938 | * this for all buckets and dividing it by the total amount of free |
1939 | * space in this metaslab (i.e. the total free space in all buckets) gives | |
1940 | * us the fragmentation metric. This means that a high fragmentation metric | |
1941 | * equates to most of the free space being comprised of small segments. | |
1942 | * Conversely, if the metric is low, then most of the free space is in | |
1943 | * large segments. A 10% change in fragmentation equates to approximately | |
1944 | * double the number of segments. | |
93cf2076 | 1945 | * |
f3a7f661 GW |
1946 | * This table defines 0% fragmented space using 16MB segments. Testing has |
1947 | * shown that segments that are greater than or equal to 16MB do not suffer | |
1948 | * from drastic performance problems. Using this value, we derive the rest | |
1949 | * of the table. Since the fragmentation value is never stored on disk, it | |
1950 | * is possible to change these calculations in the future. | |
1951 | */ | |
1952 | int zfs_frag_table[FRAGMENTATION_TABLE_SIZE] = { | |
1953 | 100, /* 512B */ | |
1954 | 100, /* 1K */ | |
1955 | 98, /* 2K */ | |
1956 | 95, /* 4K */ | |
1957 | 90, /* 8K */ | |
1958 | 80, /* 16K */ | |
1959 | 70, /* 32K */ | |
1960 | 60, /* 64K */ | |
1961 | 50, /* 128K */ | |
1962 | 40, /* 256K */ | |
1963 | 30, /* 512K */ | |
1964 | 20, /* 1M */ | |
1965 | 15, /* 2M */ | |
1966 | 10, /* 4M */ | |
1967 | 5, /* 8M */ | |
1968 | 0 /* 16M */ | |
1969 | }; | |
1970 | ||
1971 | /* | |
425d3237 SD |
1972 | * Calculate the metaslab's fragmentation metric and set ms_fragmentation. |
1973 | * Setting this value to ZFS_FRAG_INVALID means that the metaslab has not | |
1974 | * been upgraded and does not support this metric. Otherwise, the return | |
1975 | * value should be in the range [0, 100]. | |
93cf2076 | 1976 | */ |
4e21fd06 DB |
1977 | static void |
1978 | metaslab_set_fragmentation(metaslab_t *msp) | |
93cf2076 | 1979 | { |
f3a7f661 GW |
1980 | spa_t *spa = msp->ms_group->mg_vd->vdev_spa; |
1981 | uint64_t fragmentation = 0; | |
1982 | uint64_t total = 0; | |
1983 | boolean_t feature_enabled = spa_feature_is_enabled(spa, | |
1984 | SPA_FEATURE_SPACEMAP_HISTOGRAM); | |
93cf2076 | 1985 | |
4e21fd06 DB |
1986 | if (!feature_enabled) { |
1987 | msp->ms_fragmentation = ZFS_FRAG_INVALID; | |
1988 | return; | |
1989 | } | |
f3a7f661 | 1990 | |
93cf2076 | 1991 | /* |
f3a7f661 GW |
1992 | * A null space map means that the entire metaslab is free |
1993 | * and thus is not fragmented. | |
93cf2076 | 1994 | */ |
4e21fd06 DB |
1995 | if (msp->ms_sm == NULL) { |
1996 | msp->ms_fragmentation = 0; | |
1997 | return; | |
1998 | } | |
f3a7f661 GW |
1999 | |
2000 | /* | |
4e21fd06 | 2001 | * If this metaslab's space map has not been upgraded, flag it |
f3a7f661 GW |
2002 | * so that we upgrade next time we encounter it. |
2003 | */ | |
2004 | if (msp->ms_sm->sm_dbuf->db_size != sizeof (space_map_phys_t)) { | |
3b7f360c | 2005 | uint64_t txg = spa_syncing_txg(spa); |
93cf2076 GW |
2006 | vdev_t *vd = msp->ms_group->mg_vd; |
2007 | ||
3b7f360c GW |
2008 | /* |
2009 | * If we've reached the final dirty txg, then we must | |
2010 | * be shutting down the pool. We don't want to dirty | |
2011 | * any data past this point so skip setting the condense | |
2012 | * flag. We can retry this action the next time the pool | |
2013 | * is imported. | |
2014 | */ | |
2015 | if (spa_writeable(spa) && txg < spa_final_dirty_txg(spa)) { | |
8b0a0840 TC |
2016 | msp->ms_condense_wanted = B_TRUE; |
2017 | vdev_dirty(vd, VDD_METASLAB, msp, txg + 1); | |
964c2d69 | 2018 | zfs_dbgmsg("txg %llu, requesting force condense: " |
3b7f360c GW |
2019 | "ms_id %llu, vdev_id %llu", txg, msp->ms_id, |
2020 | vd->vdev_id); | |
8b0a0840 | 2021 | } |
4e21fd06 DB |
2022 | msp->ms_fragmentation = ZFS_FRAG_INVALID; |
2023 | return; | |
93cf2076 GW |
2024 | } |
2025 | ||
1c27024e | 2026 | for (int i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) { |
f3a7f661 GW |
2027 | uint64_t space = 0; |
2028 | uint8_t shift = msp->ms_sm->sm_shift; | |
4e21fd06 | 2029 | |
f3a7f661 GW |
2030 | int idx = MIN(shift - SPA_MINBLOCKSHIFT + i, |
2031 | FRAGMENTATION_TABLE_SIZE - 1); | |
93cf2076 | 2032 | |
93cf2076 GW |
2033 | if (msp->ms_sm->sm_phys->smp_histogram[i] == 0) |
2034 | continue; | |
2035 | ||
f3a7f661 GW |
2036 | space = msp->ms_sm->sm_phys->smp_histogram[i] << (i + shift); |
2037 | total += space; | |
2038 | ||
2039 | ASSERT3U(idx, <, FRAGMENTATION_TABLE_SIZE); | |
2040 | fragmentation += space * zfs_frag_table[idx]; | |
93cf2076 | 2041 | } |
f3a7f661 GW |
2042 | |
2043 | if (total > 0) | |
2044 | fragmentation /= total; | |
2045 | ASSERT3U(fragmentation, <=, 100); | |
4e21fd06 DB |
2046 | |
2047 | msp->ms_fragmentation = fragmentation; | |
93cf2076 | 2048 | } |
34dc7c2f | 2049 | |
f3a7f661 GW |
2050 | /* |
2051 | * Compute a weight -- a selection preference value -- for the given metaslab. | |
2052 | * This is based on the amount of free space, the level of fragmentation, | |
2053 | * the LBA range, and whether the metaslab is loaded. | |
2054 | */ | |
34dc7c2f | 2055 | static uint64_t |
4e21fd06 | 2056 | metaslab_space_weight(metaslab_t *msp) |
34dc7c2f BB |
2057 | { |
2058 | metaslab_group_t *mg = msp->ms_group; | |
34dc7c2f BB |
2059 | vdev_t *vd = mg->mg_vd; |
2060 | uint64_t weight, space; | |
2061 | ||
2062 | ASSERT(MUTEX_HELD(&msp->ms_lock)); | |
4e21fd06 | 2063 | ASSERT(!vd->vdev_removing); |
c2e42f9d | 2064 | |
34dc7c2f BB |
2065 | /* |
2066 | * The baseline weight is the metaslab's free space. | |
2067 | */ | |
425d3237 | 2068 | space = msp->ms_size - metaslab_allocated_space(msp); |
f3a7f661 | 2069 | |
f3a7f661 GW |
2070 | if (metaslab_fragmentation_factor_enabled && |
2071 | msp->ms_fragmentation != ZFS_FRAG_INVALID) { | |
2072 | /* | |
2073 | * Use the fragmentation information to inversely scale | |
2074 | * down the baseline weight. We need to ensure that we | |
2075 | * don't exclude this metaslab completely when it's 100% | |
2076 | * fragmented. To avoid this we reduce the fragmented value | |
2077 | * by 1. | |
2078 | */ | |
2079 | space = (space * (100 - (msp->ms_fragmentation - 1))) / 100; | |
2080 | ||
2081 | /* | |
2082 | * If space < SPA_MINBLOCKSIZE, then we will not allocate from | |
2083 | * this metaslab again. The fragmentation metric may have | |
2084 | * decreased the space to something smaller than | |
2085 | * SPA_MINBLOCKSIZE, so reset the space to SPA_MINBLOCKSIZE | |
2086 | * so that we can consume any remaining space. | |
2087 | */ | |
2088 | if (space > 0 && space < SPA_MINBLOCKSIZE) | |
2089 | space = SPA_MINBLOCKSIZE; | |
2090 | } | |
34dc7c2f BB |
2091 | weight = space; |
2092 | ||
2093 | /* | |
2094 | * Modern disks have uniform bit density and constant angular velocity. | |
2095 | * Therefore, the outer recording zones are faster (higher bandwidth) | |
2096 | * than the inner zones by the ratio of outer to inner track diameter, | |
2097 | * which is typically around 2:1. We account for this by assigning | |
2098 | * higher weight to lower metaslabs (multiplier ranging from 2x to 1x). | |
2099 | * In effect, this means that we'll select the metaslab with the most | |
2100 | * free bandwidth rather than simply the one with the most free space. | |
2101 | */ | |
fb40095f | 2102 | if (!vd->vdev_nonrot && metaslab_lba_weighting_enabled) { |
f3a7f661 GW |
2103 | weight = 2 * weight - (msp->ms_id * weight) / vd->vdev_ms_count; |
2104 | ASSERT(weight >= space && weight <= 2 * space); | |
2105 | } | |
428870ff | 2106 | |
f3a7f661 GW |
2107 | /* |
2108 | * If this metaslab is one we're actively using, adjust its | |
2109 | * weight to make it preferable to any inactive metaslab so | |
2110 | * we'll polish it off. If the fragmentation on this metaslab | |
2111 | * has exceed our threshold, then don't mark it active. | |
2112 | */ | |
2113 | if (msp->ms_loaded && msp->ms_fragmentation != ZFS_FRAG_INVALID && | |
2114 | msp->ms_fragmentation <= zfs_metaslab_fragmentation_threshold) { | |
428870ff BB |
2115 | weight |= (msp->ms_weight & METASLAB_ACTIVE_MASK); |
2116 | } | |
34dc7c2f | 2117 | |
4e21fd06 DB |
2118 | WEIGHT_SET_SPACEBASED(weight); |
2119 | return (weight); | |
2120 | } | |
2121 | ||
2122 | /* | |
2123 | * Return the weight of the specified metaslab, according to the segment-based | |
2124 | * weighting algorithm. The metaslab must be loaded. This function can | |
2125 | * be called within a sync pass since it relies only on the metaslab's | |
2126 | * range tree which is always accurate when the metaslab is loaded. | |
2127 | */ | |
2128 | static uint64_t | |
2129 | metaslab_weight_from_range_tree(metaslab_t *msp) | |
2130 | { | |
2131 | uint64_t weight = 0; | |
2132 | uint32_t segments = 0; | |
4e21fd06 DB |
2133 | |
2134 | ASSERT(msp->ms_loaded); | |
2135 | ||
1c27024e DB |
2136 | for (int i = RANGE_TREE_HISTOGRAM_SIZE - 1; i >= SPA_MINBLOCKSHIFT; |
2137 | i--) { | |
4e21fd06 DB |
2138 | uint8_t shift = msp->ms_group->mg_vd->vdev_ashift; |
2139 | int max_idx = SPACE_MAP_HISTOGRAM_SIZE + shift - 1; | |
2140 | ||
2141 | segments <<= 1; | |
d2734cce | 2142 | segments += msp->ms_allocatable->rt_histogram[i]; |
4e21fd06 DB |
2143 | |
2144 | /* | |
2145 | * The range tree provides more precision than the space map | |
2146 | * and must be downgraded so that all values fit within the | |
2147 | * space map's histogram. This allows us to compare loaded | |
2148 | * vs. unloaded metaslabs to determine which metaslab is | |
2149 | * considered "best". | |
2150 | */ | |
2151 | if (i > max_idx) | |
2152 | continue; | |
2153 | ||
2154 | if (segments != 0) { | |
2155 | WEIGHT_SET_COUNT(weight, segments); | |
2156 | WEIGHT_SET_INDEX(weight, i); | |
2157 | WEIGHT_SET_ACTIVE(weight, 0); | |
2158 | break; | |
2159 | } | |
2160 | } | |
2161 | return (weight); | |
2162 | } | |
2163 | ||
2164 | /* | |
2165 | * Calculate the weight based on the on-disk histogram. This should only | |
2166 | * be called after a sync pass has completely finished since the on-disk | |
2167 | * information is updated in metaslab_sync(). | |
2168 | */ | |
2169 | static uint64_t | |
2170 | metaslab_weight_from_spacemap(metaslab_t *msp) | |
2171 | { | |
928e8ad4 SD |
2172 | space_map_t *sm = msp->ms_sm; |
2173 | ASSERT(!msp->ms_loaded); | |
2174 | ASSERT(sm != NULL); | |
2175 | ASSERT3U(space_map_object(sm), !=, 0); | |
2176 | ASSERT3U(sm->sm_dbuf->db_size, ==, sizeof (space_map_phys_t)); | |
4e21fd06 | 2177 | |
928e8ad4 SD |
2178 | /* |
2179 | * Create a joint histogram from all the segments that have made | |
2180 | * it to the metaslab's space map histogram, that are not yet | |
2181 | * available for allocation because they are still in the freeing | |
2182 | * pipeline (e.g. freeing, freed, and defer trees). Then subtract | |
2183 | * these segments from the space map's histogram to get a more | |
2184 | * accurate weight. | |
2185 | */ | |
2186 | uint64_t deferspace_histogram[SPACE_MAP_HISTOGRAM_SIZE] = {0}; | |
2187 | for (int i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) | |
2188 | deferspace_histogram[i] += msp->ms_synchist[i]; | |
2189 | for (int t = 0; t < TXG_DEFER_SIZE; t++) { | |
2190 | for (int i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) { | |
2191 | deferspace_histogram[i] += msp->ms_deferhist[t][i]; | |
2192 | } | |
2193 | } | |
2194 | ||
2195 | uint64_t weight = 0; | |
1c27024e | 2196 | for (int i = SPACE_MAP_HISTOGRAM_SIZE - 1; i >= 0; i--) { |
928e8ad4 SD |
2197 | ASSERT3U(sm->sm_phys->smp_histogram[i], >=, |
2198 | deferspace_histogram[i]); | |
2199 | uint64_t count = | |
2200 | sm->sm_phys->smp_histogram[i] - deferspace_histogram[i]; | |
2201 | if (count != 0) { | |
2202 | WEIGHT_SET_COUNT(weight, count); | |
2203 | WEIGHT_SET_INDEX(weight, i + sm->sm_shift); | |
4e21fd06 DB |
2204 | WEIGHT_SET_ACTIVE(weight, 0); |
2205 | break; | |
2206 | } | |
2207 | } | |
2208 | return (weight); | |
2209 | } | |
2210 | ||
2211 | /* | |
2212 | * Compute a segment-based weight for the specified metaslab. The weight | |
2213 | * is determined by highest bucket in the histogram. The information | |
2214 | * for the highest bucket is encoded into the weight value. | |
2215 | */ | |
2216 | static uint64_t | |
2217 | metaslab_segment_weight(metaslab_t *msp) | |
2218 | { | |
2219 | metaslab_group_t *mg = msp->ms_group; | |
2220 | uint64_t weight = 0; | |
2221 | uint8_t shift = mg->mg_vd->vdev_ashift; | |
2222 | ||
2223 | ASSERT(MUTEX_HELD(&msp->ms_lock)); | |
2224 | ||
2225 | /* | |
2226 | * The metaslab is completely free. | |
2227 | */ | |
425d3237 | 2228 | if (metaslab_allocated_space(msp) == 0) { |
4e21fd06 DB |
2229 | int idx = highbit64(msp->ms_size) - 1; |
2230 | int max_idx = SPACE_MAP_HISTOGRAM_SIZE + shift - 1; | |
2231 | ||
2232 | if (idx < max_idx) { | |
2233 | WEIGHT_SET_COUNT(weight, 1ULL); | |
2234 | WEIGHT_SET_INDEX(weight, idx); | |
2235 | } else { | |
2236 | WEIGHT_SET_COUNT(weight, 1ULL << (idx - max_idx)); | |
2237 | WEIGHT_SET_INDEX(weight, max_idx); | |
2238 | } | |
2239 | WEIGHT_SET_ACTIVE(weight, 0); | |
2240 | ASSERT(!WEIGHT_IS_SPACEBASED(weight)); | |
2241 | ||
2242 | return (weight); | |
2243 | } | |
2244 | ||
2245 | ASSERT3U(msp->ms_sm->sm_dbuf->db_size, ==, sizeof (space_map_phys_t)); | |
2246 | ||
2247 | /* | |
2248 | * If the metaslab is fully allocated then just make the weight 0. | |
2249 | */ | |
425d3237 | 2250 | if (metaslab_allocated_space(msp) == msp->ms_size) |
4e21fd06 DB |
2251 | return (0); |
2252 | /* | |
2253 | * If the metaslab is already loaded, then use the range tree to | |
2254 | * determine the weight. Otherwise, we rely on the space map information | |
2255 | * to generate the weight. | |
2256 | */ | |
2257 | if (msp->ms_loaded) { | |
2258 | weight = metaslab_weight_from_range_tree(msp); | |
2259 | } else { | |
2260 | weight = metaslab_weight_from_spacemap(msp); | |
2261 | } | |
2262 | ||
2263 | /* | |
2264 | * If the metaslab was active the last time we calculated its weight | |
2265 | * then keep it active. We want to consume the entire region that | |
2266 | * is associated with this weight. | |
2267 | */ | |
2268 | if (msp->ms_activation_weight != 0 && weight != 0) | |
2269 | WEIGHT_SET_ACTIVE(weight, WEIGHT_GET_ACTIVE(msp->ms_weight)); | |
2270 | return (weight); | |
2271 | } | |
2272 | ||
2273 | /* | |
2274 | * Determine if we should attempt to allocate from this metaslab. If the | |
2275 | * metaslab has a maximum size then we can quickly determine if the desired | |
2276 | * allocation size can be satisfied. Otherwise, if we're using segment-based | |
2277 | * weighting then we can determine the maximum allocation that this metaslab | |
2278 | * can accommodate based on the index encoded in the weight. If we're using | |
2279 | * space-based weights then rely on the entire weight (excluding the weight | |
2280 | * type bit). | |
2281 | */ | |
2282 | boolean_t | |
2283 | metaslab_should_allocate(metaslab_t *msp, uint64_t asize) | |
2284 | { | |
2285 | boolean_t should_allocate; | |
2286 | ||
2287 | if (msp->ms_max_size != 0) | |
2288 | return (msp->ms_max_size >= asize); | |
2289 | ||
2290 | if (!WEIGHT_IS_SPACEBASED(msp->ms_weight)) { | |
2291 | /* | |
2292 | * The metaslab segment weight indicates segments in the | |
2293 | * range [2^i, 2^(i+1)), where i is the index in the weight. | |
2294 | * Since the asize might be in the middle of the range, we | |
2295 | * should attempt the allocation if asize < 2^(i+1). | |
2296 | */ | |
2297 | should_allocate = (asize < | |
2298 | 1ULL << (WEIGHT_GET_INDEX(msp->ms_weight) + 1)); | |
2299 | } else { | |
2300 | should_allocate = (asize <= | |
2301 | (msp->ms_weight & ~METASLAB_WEIGHT_TYPE)); | |
2302 | } | |
2303 | return (should_allocate); | |
2304 | } | |
2305 | static uint64_t | |
2306 | metaslab_weight(metaslab_t *msp) | |
2307 | { | |
2308 | vdev_t *vd = msp->ms_group->mg_vd; | |
2309 | spa_t *spa = vd->vdev_spa; | |
2310 | uint64_t weight; | |
2311 | ||
2312 | ASSERT(MUTEX_HELD(&msp->ms_lock)); | |
2313 | ||
2314 | /* | |
a1d477c2 | 2315 | * If this vdev is in the process of being removed, there is nothing |
4e21fd06 DB |
2316 | * for us to do here. |
2317 | */ | |
a1d477c2 | 2318 | if (vd->vdev_removing) |
4e21fd06 | 2319 | return (0); |
4e21fd06 DB |
2320 | |
2321 | metaslab_set_fragmentation(msp); | |
2322 | ||
2323 | /* | |
2324 | * Update the maximum size if the metaslab is loaded. This will | |
2325 | * ensure that we get an accurate maximum size if newly freed space | |
2326 | * has been added back into the free tree. | |
2327 | */ | |
2328 | if (msp->ms_loaded) | |
2329 | msp->ms_max_size = metaslab_block_maxsize(msp); | |
425d3237 SD |
2330 | else |
2331 | ASSERT0(msp->ms_max_size); | |
4e21fd06 DB |
2332 | |
2333 | /* | |
2334 | * Segment-based weighting requires space map histogram support. | |
2335 | */ | |
2336 | if (zfs_metaslab_segment_weight_enabled && | |
2337 | spa_feature_is_enabled(spa, SPA_FEATURE_SPACEMAP_HISTOGRAM) && | |
2338 | (msp->ms_sm == NULL || msp->ms_sm->sm_dbuf->db_size == | |
2339 | sizeof (space_map_phys_t))) { | |
2340 | weight = metaslab_segment_weight(msp); | |
2341 | } else { | |
2342 | weight = metaslab_space_weight(msp); | |
2343 | } | |
93cf2076 | 2344 | return (weight); |
34dc7c2f BB |
2345 | } |
2346 | ||
928e8ad4 SD |
2347 | void |
2348 | metaslab_recalculate_weight_and_sort(metaslab_t *msp) | |
2349 | { | |
2350 | /* note: we preserve the mask (e.g. indication of primary, etc..) */ | |
2351 | uint64_t was_active = msp->ms_weight & METASLAB_ACTIVE_MASK; | |
2352 | metaslab_group_sort(msp->ms_group, msp, | |
2353 | metaslab_weight(msp) | was_active); | |
2354 | } | |
2355 | ||
34dc7c2f | 2356 | static int |
492f64e9 PD |
2357 | metaslab_activate_allocator(metaslab_group_t *mg, metaslab_t *msp, |
2358 | int allocator, uint64_t activation_weight) | |
2359 | { | |
2360 | /* | |
2361 | * If we're activating for the claim code, we don't want to actually | |
2362 | * set the metaslab up for a specific allocator. | |
2363 | */ | |
2364 | if (activation_weight == METASLAB_WEIGHT_CLAIM) | |
2365 | return (0); | |
2366 | metaslab_t **arr = (activation_weight == METASLAB_WEIGHT_PRIMARY ? | |
2367 | mg->mg_primaries : mg->mg_secondaries); | |
2368 | ||
2369 | ASSERT(MUTEX_HELD(&msp->ms_lock)); | |
2370 | mutex_enter(&mg->mg_lock); | |
2371 | if (arr[allocator] != NULL) { | |
2372 | mutex_exit(&mg->mg_lock); | |
2373 | return (EEXIST); | |
2374 | } | |
2375 | ||
2376 | arr[allocator] = msp; | |
2377 | ASSERT3S(msp->ms_allocator, ==, -1); | |
2378 | msp->ms_allocator = allocator; | |
2379 | msp->ms_primary = (activation_weight == METASLAB_WEIGHT_PRIMARY); | |
2380 | mutex_exit(&mg->mg_lock); | |
2381 | ||
2382 | return (0); | |
2383 | } | |
2384 | ||
2385 | static int | |
2386 | metaslab_activate(metaslab_t *msp, int allocator, uint64_t activation_weight) | |
34dc7c2f | 2387 | { |
34dc7c2f BB |
2388 | ASSERT(MUTEX_HELD(&msp->ms_lock)); |
2389 | ||
2390 | if ((msp->ms_weight & METASLAB_ACTIVE_MASK) == 0) { | |
b194fab0 SD |
2391 | int error = metaslab_load(msp); |
2392 | if (error != 0) { | |
2393 | metaslab_group_sort(msp->ms_group, msp, 0); | |
2394 | return (error); | |
34dc7c2f | 2395 | } |
492f64e9 PD |
2396 | if ((msp->ms_weight & METASLAB_ACTIVE_MASK) != 0) { |
2397 | /* | |
2398 | * The metaslab was activated for another allocator | |
2399 | * while we were waiting, we should reselect. | |
2400 | */ | |
7ab96299 | 2401 | return (SET_ERROR(EBUSY)); |
492f64e9 PD |
2402 | } |
2403 | if ((error = metaslab_activate_allocator(msp->ms_group, msp, | |
2404 | allocator, activation_weight)) != 0) { | |
2405 | return (error); | |
2406 | } | |
9babb374 | 2407 | |
4e21fd06 | 2408 | msp->ms_activation_weight = msp->ms_weight; |
34dc7c2f BB |
2409 | metaslab_group_sort(msp->ms_group, msp, |
2410 | msp->ms_weight | activation_weight); | |
2411 | } | |
93cf2076 | 2412 | ASSERT(msp->ms_loaded); |
34dc7c2f BB |
2413 | ASSERT(msp->ms_weight & METASLAB_ACTIVE_MASK); |
2414 | ||
2415 | return (0); | |
2416 | } | |
2417 | ||
492f64e9 PD |
2418 | static void |
2419 | metaslab_passivate_allocator(metaslab_group_t *mg, metaslab_t *msp, | |
2420 | uint64_t weight) | |
2421 | { | |
2422 | ASSERT(MUTEX_HELD(&msp->ms_lock)); | |
2423 | if (msp->ms_weight & METASLAB_WEIGHT_CLAIM) { | |
2424 | metaslab_group_sort(mg, msp, weight); | |
2425 | return; | |
2426 | } | |
2427 | ||
2428 | mutex_enter(&mg->mg_lock); | |
2429 | ASSERT3P(msp->ms_group, ==, mg); | |
2430 | if (msp->ms_primary) { | |
2431 | ASSERT3U(0, <=, msp->ms_allocator); | |
2432 | ASSERT3U(msp->ms_allocator, <, mg->mg_allocators); | |
2433 | ASSERT3P(mg->mg_primaries[msp->ms_allocator], ==, msp); | |
2434 | ASSERT(msp->ms_weight & METASLAB_WEIGHT_PRIMARY); | |
2435 | mg->mg_primaries[msp->ms_allocator] = NULL; | |
2436 | } else { | |
2437 | ASSERT(msp->ms_weight & METASLAB_WEIGHT_SECONDARY); | |
2438 | ASSERT3P(mg->mg_secondaries[msp->ms_allocator], ==, msp); | |
2439 | mg->mg_secondaries[msp->ms_allocator] = NULL; | |
2440 | } | |
2441 | msp->ms_allocator = -1; | |
2442 | metaslab_group_sort_impl(mg, msp, weight); | |
2443 | mutex_exit(&mg->mg_lock); | |
2444 | } | |
2445 | ||
34dc7c2f | 2446 | static void |
4e21fd06 | 2447 | metaslab_passivate(metaslab_t *msp, uint64_t weight) |
34dc7c2f | 2448 | { |
4e21fd06 DB |
2449 | ASSERTV(uint64_t size = weight & ~METASLAB_WEIGHT_TYPE); |
2450 | ||
34dc7c2f BB |
2451 | /* |
2452 | * If size < SPA_MINBLOCKSIZE, then we will not allocate from | |
2453 | * this metaslab again. In that case, it had better be empty, | |
2454 | * or we would be leaving space on the table. | |
2455 | */ | |
94d49e8f TC |
2456 | ASSERT(!WEIGHT_IS_SPACEBASED(msp->ms_weight) || |
2457 | size >= SPA_MINBLOCKSIZE || | |
d2734cce | 2458 | range_tree_space(msp->ms_allocatable) == 0); |
4e21fd06 DB |
2459 | ASSERT0(weight & METASLAB_ACTIVE_MASK); |
2460 | ||
2461 | msp->ms_activation_weight = 0; | |
492f64e9 | 2462 | metaslab_passivate_allocator(msp->ms_group, msp, weight); |
34dc7c2f BB |
2463 | ASSERT((msp->ms_weight & METASLAB_ACTIVE_MASK) == 0); |
2464 | } | |
2465 | ||
4e21fd06 DB |
2466 | /* |
2467 | * Segment-based metaslabs are activated once and remain active until | |
2468 | * we either fail an allocation attempt (similar to space-based metaslabs) | |
2469 | * or have exhausted the free space in zfs_metaslab_switch_threshold | |
2470 | * buckets since the metaslab was activated. This function checks to see | |
2471 | * if we've exhaused the zfs_metaslab_switch_threshold buckets in the | |
2472 | * metaslab and passivates it proactively. This will allow us to select a | |
2473 | * metaslab with a larger contiguous region, if any, remaining within this | |
2474 | * metaslab group. If we're in sync pass > 1, then we continue using this | |
2475 | * metaslab so that we don't dirty more block and cause more sync passes. | |
2476 | */ | |
2477 | void | |
2478 | metaslab_segment_may_passivate(metaslab_t *msp) | |
2479 | { | |
2480 | spa_t *spa = msp->ms_group->mg_vd->vdev_spa; | |
4e21fd06 DB |
2481 | |
2482 | if (WEIGHT_IS_SPACEBASED(msp->ms_weight) || spa_sync_pass(spa) > 1) | |
2483 | return; | |
2484 | ||
2485 | /* | |
2486 | * Since we are in the middle of a sync pass, the most accurate | |
2487 | * information that is accessible to us is the in-core range tree | |
2488 | * histogram; calculate the new weight based on that information. | |
2489 | */ | |
1c27024e DB |
2490 | uint64_t weight = metaslab_weight_from_range_tree(msp); |
2491 | int activation_idx = WEIGHT_GET_INDEX(msp->ms_activation_weight); | |
2492 | int current_idx = WEIGHT_GET_INDEX(weight); | |
4e21fd06 DB |
2493 | |
2494 | if (current_idx <= activation_idx - zfs_metaslab_switch_threshold) | |
2495 | metaslab_passivate(msp, weight); | |
2496 | } | |
2497 | ||
93cf2076 GW |
2498 | static void |
2499 | metaslab_preload(void *arg) | |
2500 | { | |
2501 | metaslab_t *msp = arg; | |
2502 | spa_t *spa = msp->ms_group->mg_vd->vdev_spa; | |
1cd77734 | 2503 | fstrans_cookie_t cookie = spl_fstrans_mark(); |
93cf2076 | 2504 | |
080b3100 GW |
2505 | ASSERT(!MUTEX_HELD(&msp->ms_group->mg_lock)); |
2506 | ||
93cf2076 | 2507 | mutex_enter(&msp->ms_lock); |
b194fab0 | 2508 | (void) metaslab_load(msp); |
4e21fd06 | 2509 | msp->ms_selected_txg = spa_syncing_txg(spa); |
93cf2076 | 2510 | mutex_exit(&msp->ms_lock); |
1cd77734 | 2511 | spl_fstrans_unmark(cookie); |
93cf2076 GW |
2512 | } |
2513 | ||
2514 | static void | |
2515 | metaslab_group_preload(metaslab_group_t *mg) | |
2516 | { | |
2517 | spa_t *spa = mg->mg_vd->vdev_spa; | |
2518 | metaslab_t *msp; | |
2519 | avl_tree_t *t = &mg->mg_metaslab_tree; | |
2520 | int m = 0; | |
2521 | ||
2522 | if (spa_shutting_down(spa) || !metaslab_preload_enabled) { | |
c5528b9b | 2523 | taskq_wait_outstanding(mg->mg_taskq, 0); |
93cf2076 GW |
2524 | return; |
2525 | } | |
93cf2076 | 2526 | |
080b3100 | 2527 | mutex_enter(&mg->mg_lock); |
a1d477c2 | 2528 | |
93cf2076 | 2529 | /* |
080b3100 | 2530 | * Load the next potential metaslabs |
93cf2076 | 2531 | */ |
4e21fd06 | 2532 | for (msp = avl_first(t); msp != NULL; msp = AVL_NEXT(t, msp)) { |
a1d477c2 MA |
2533 | ASSERT3P(msp->ms_group, ==, mg); |
2534 | ||
f3a7f661 GW |
2535 | /* |
2536 | * We preload only the maximum number of metaslabs specified | |
2537 | * by metaslab_preload_limit. If a metaslab is being forced | |
2538 | * to condense then we preload it too. This will ensure | |
2539 | * that force condensing happens in the next txg. | |
2540 | */ | |
2541 | if (++m > metaslab_preload_limit && !msp->ms_condense_wanted) { | |
f3a7f661 GW |
2542 | continue; |
2543 | } | |
93cf2076 GW |
2544 | |
2545 | VERIFY(taskq_dispatch(mg->mg_taskq, metaslab_preload, | |
48d3eb40 | 2546 | msp, TQ_SLEEP) != TASKQID_INVALID); |
93cf2076 GW |
2547 | } |
2548 | mutex_exit(&mg->mg_lock); | |
2549 | } | |
2550 | ||
e51be066 | 2551 | /* |
93cf2076 GW |
2552 | * Determine if the space map's on-disk footprint is past our tolerance |
2553 | * for inefficiency. We would like to use the following criteria to make | |
2554 | * our decision: | |
e51be066 GW |
2555 | * |
2556 | * 1. The size of the space map object should not dramatically increase as a | |
93cf2076 | 2557 | * result of writing out the free space range tree. |
e51be066 GW |
2558 | * |
2559 | * 2. The minimal on-disk space map representation is zfs_condense_pct/100 | |
93cf2076 | 2560 | * times the size than the free space range tree representation |
a1d477c2 | 2561 | * (i.e. zfs_condense_pct = 110 and in-core = 1MB, minimal = 1.1MB). |
e51be066 | 2562 | * |
b02fe35d AR |
2563 | * 3. The on-disk size of the space map should actually decrease. |
2564 | * | |
b02fe35d AR |
2565 | * Unfortunately, we cannot compute the on-disk size of the space map in this |
2566 | * context because we cannot accurately compute the effects of compression, etc. | |
2567 | * Instead, we apply the heuristic described in the block comment for | |
2568 | * zfs_metaslab_condense_block_threshold - we only condense if the space used | |
2569 | * is greater than a threshold number of blocks. | |
e51be066 GW |
2570 | */ |
2571 | static boolean_t | |
2572 | metaslab_should_condense(metaslab_t *msp) | |
2573 | { | |
93cf2076 | 2574 | space_map_t *sm = msp->ms_sm; |
d2734cce SD |
2575 | vdev_t *vd = msp->ms_group->mg_vd; |
2576 | uint64_t vdev_blocksize = 1 << vd->vdev_ashift; | |
2577 | uint64_t current_txg = spa_syncing_txg(vd->vdev_spa); | |
e51be066 GW |
2578 | |
2579 | ASSERT(MUTEX_HELD(&msp->ms_lock)); | |
93cf2076 | 2580 | ASSERT(msp->ms_loaded); |
e51be066 GW |
2581 | |
2582 | /* | |
d2734cce SD |
2583 | * Allocations and frees in early passes are generally more space |
2584 | * efficient (in terms of blocks described in space map entries) | |
2585 | * than the ones in later passes (e.g. we don't compress after | |
2586 | * sync pass 5) and condensing a metaslab multiple times in a txg | |
2587 | * could degrade performance. | |
2588 | * | |
2589 | * Thus we prefer condensing each metaslab at most once every txg at | |
2590 | * the earliest sync pass possible. If a metaslab is eligible for | |
2591 | * condensing again after being considered for condensing within the | |
2592 | * same txg, it will hopefully be dirty in the next txg where it will | |
2593 | * be condensed at an earlier pass. | |
2594 | */ | |
2595 | if (msp->ms_condense_checked_txg == current_txg) | |
2596 | return (B_FALSE); | |
2597 | msp->ms_condense_checked_txg = current_txg; | |
2598 | ||
2599 | /* | |
4d044c4c SD |
2600 | * We always condense metaslabs that are empty and metaslabs for |
2601 | * which a condense request has been made. | |
e51be066 | 2602 | */ |
4d044c4c SD |
2603 | if (avl_is_empty(&msp->ms_allocatable_by_size) || |
2604 | msp->ms_condense_wanted) | |
e51be066 GW |
2605 | return (B_TRUE); |
2606 | ||
4d044c4c SD |
2607 | uint64_t object_size = space_map_length(msp->ms_sm); |
2608 | uint64_t optimal_size = space_map_estimate_optimal_size(sm, | |
2609 | msp->ms_allocatable, SM_NO_VDEVID); | |
b02fe35d | 2610 | |
4d044c4c | 2611 | dmu_object_info_t doi; |
b02fe35d | 2612 | dmu_object_info_from_db(sm->sm_dbuf, &doi); |
4d044c4c | 2613 | uint64_t record_size = MAX(doi.doi_data_block_size, vdev_blocksize); |
b02fe35d | 2614 | |
4d044c4c | 2615 | return (object_size >= (optimal_size * zfs_condense_pct / 100) && |
b02fe35d | 2616 | object_size > zfs_metaslab_condense_block_threshold * record_size); |
e51be066 GW |
2617 | } |
2618 | ||
2619 | /* | |
2620 | * Condense the on-disk space map representation to its minimized form. | |
2621 | * The minimized form consists of a small number of allocations followed by | |
93cf2076 | 2622 | * the entries of the free range tree. |
e51be066 GW |
2623 | */ |
2624 | static void | |
2625 | metaslab_condense(metaslab_t *msp, uint64_t txg, dmu_tx_t *tx) | |
2626 | { | |
93cf2076 GW |
2627 | range_tree_t *condense_tree; |
2628 | space_map_t *sm = msp->ms_sm; | |
e51be066 GW |
2629 | |
2630 | ASSERT(MUTEX_HELD(&msp->ms_lock)); | |
93cf2076 | 2631 | ASSERT(msp->ms_loaded); |
e51be066 | 2632 | |
f3a7f661 | 2633 | |
964c2d69 | 2634 | zfs_dbgmsg("condensing: txg %llu, msp[%llu] %p, vdev id %llu, " |
5f3d9c69 JS |
2635 | "spa %s, smp size %llu, segments %lu, forcing condense=%s", txg, |
2636 | msp->ms_id, msp, msp->ms_group->mg_vd->vdev_id, | |
2637 | msp->ms_group->mg_vd->vdev_spa->spa_name, | |
d2734cce SD |
2638 | space_map_length(msp->ms_sm), |
2639 | avl_numnodes(&msp->ms_allocatable->rt_root), | |
f3a7f661 GW |
2640 | msp->ms_condense_wanted ? "TRUE" : "FALSE"); |
2641 | ||
2642 | msp->ms_condense_wanted = B_FALSE; | |
e51be066 GW |
2643 | |
2644 | /* | |
93cf2076 | 2645 | * Create an range tree that is 100% allocated. We remove segments |
e51be066 GW |
2646 | * that have been freed in this txg, any deferred frees that exist, |
2647 | * and any allocation in the future. Removing segments should be | |
93cf2076 GW |
2648 | * a relatively inexpensive operation since we expect these trees to |
2649 | * have a small number of nodes. | |
e51be066 | 2650 | */ |
a1d477c2 | 2651 | condense_tree = range_tree_create(NULL, NULL); |
93cf2076 | 2652 | range_tree_add(condense_tree, msp->ms_start, msp->ms_size); |
e51be066 | 2653 | |
d2734cce SD |
2654 | range_tree_walk(msp->ms_freeing, range_tree_remove, condense_tree); |
2655 | range_tree_walk(msp->ms_freed, range_tree_remove, condense_tree); | |
e51be066 | 2656 | |
1c27024e | 2657 | for (int t = 0; t < TXG_DEFER_SIZE; t++) { |
d2734cce | 2658 | range_tree_walk(msp->ms_defer[t], |
93cf2076 GW |
2659 | range_tree_remove, condense_tree); |
2660 | } | |
e51be066 | 2661 | |
1c27024e | 2662 | for (int t = 1; t < TXG_CONCURRENT_STATES; t++) { |
d2734cce | 2663 | range_tree_walk(msp->ms_allocating[(txg + t) & TXG_MASK], |
93cf2076 GW |
2664 | range_tree_remove, condense_tree); |
2665 | } | |
e51be066 GW |
2666 | |
2667 | /* | |
2668 | * We're about to drop the metaslab's lock thus allowing | |
2669 | * other consumers to change it's content. Set the | |
93cf2076 | 2670 | * metaslab's ms_condensing flag to ensure that |
e51be066 GW |
2671 | * allocations on this metaslab do not occur while we're |
2672 | * in the middle of committing it to disk. This is only critical | |
d2734cce | 2673 | * for ms_allocatable as all other range trees use per txg |
e51be066 GW |
2674 | * views of their content. |
2675 | */ | |
93cf2076 | 2676 | msp->ms_condensing = B_TRUE; |
e51be066 GW |
2677 | |
2678 | mutex_exit(&msp->ms_lock); | |
d2734cce | 2679 | space_map_truncate(sm, zfs_metaslab_sm_blksz, tx); |
e51be066 GW |
2680 | |
2681 | /* | |
4e21fd06 | 2682 | * While we would ideally like to create a space map representation |
e51be066 | 2683 | * that consists only of allocation records, doing so can be |
93cf2076 | 2684 | * prohibitively expensive because the in-core free tree can be |
e51be066 | 2685 | * large, and therefore computationally expensive to subtract |
93cf2076 GW |
2686 | * from the condense_tree. Instead we sync out two trees, a cheap |
2687 | * allocation only tree followed by the in-core free tree. While not | |
e51be066 GW |
2688 | * optimal, this is typically close to optimal, and much cheaper to |
2689 | * compute. | |
2690 | */ | |
4d044c4c | 2691 | space_map_write(sm, condense_tree, SM_ALLOC, SM_NO_VDEVID, tx); |
93cf2076 GW |
2692 | range_tree_vacate(condense_tree, NULL, NULL); |
2693 | range_tree_destroy(condense_tree); | |
e51be066 | 2694 | |
4d044c4c | 2695 | space_map_write(sm, msp->ms_allocatable, SM_FREE, SM_NO_VDEVID, tx); |
a1d477c2 | 2696 | mutex_enter(&msp->ms_lock); |
93cf2076 | 2697 | msp->ms_condensing = B_FALSE; |
e51be066 GW |
2698 | } |
2699 | ||
34dc7c2f BB |
2700 | /* |
2701 | * Write a metaslab to disk in the context of the specified transaction group. | |
2702 | */ | |
2703 | void | |
2704 | metaslab_sync(metaslab_t *msp, uint64_t txg) | |
2705 | { | |
93cf2076 GW |
2706 | metaslab_group_t *mg = msp->ms_group; |
2707 | vdev_t *vd = mg->mg_vd; | |
34dc7c2f | 2708 | spa_t *spa = vd->vdev_spa; |
428870ff | 2709 | objset_t *mos = spa_meta_objset(spa); |
d2734cce | 2710 | range_tree_t *alloctree = msp->ms_allocating[txg & TXG_MASK]; |
34dc7c2f | 2711 | dmu_tx_t *tx; |
93cf2076 | 2712 | uint64_t object = space_map_object(msp->ms_sm); |
34dc7c2f | 2713 | |
428870ff BB |
2714 | ASSERT(!vd->vdev_ishole); |
2715 | ||
e51be066 GW |
2716 | /* |
2717 | * This metaslab has just been added so there's no work to do now. | |
2718 | */ | |
d2734cce | 2719 | if (msp->ms_freeing == NULL) { |
93cf2076 | 2720 | ASSERT3P(alloctree, ==, NULL); |
e51be066 GW |
2721 | return; |
2722 | } | |
2723 | ||
93cf2076 | 2724 | ASSERT3P(alloctree, !=, NULL); |
d2734cce SD |
2725 | ASSERT3P(msp->ms_freeing, !=, NULL); |
2726 | ASSERT3P(msp->ms_freed, !=, NULL); | |
2727 | ASSERT3P(msp->ms_checkpointing, !=, NULL); | |
e51be066 | 2728 | |
f3a7f661 | 2729 | /* |
d2734cce SD |
2730 | * Normally, we don't want to process a metaslab if there are no |
2731 | * allocations or frees to perform. However, if the metaslab is being | |
2732 | * forced to condense and it's loaded, we need to let it through. | |
f3a7f661 | 2733 | */ |
d2734cce SD |
2734 | if (range_tree_is_empty(alloctree) && |
2735 | range_tree_is_empty(msp->ms_freeing) && | |
2736 | range_tree_is_empty(msp->ms_checkpointing) && | |
3b7f360c | 2737 | !(msp->ms_loaded && msp->ms_condense_wanted)) |
428870ff | 2738 | return; |
34dc7c2f | 2739 | |
3b7f360c GW |
2740 | |
2741 | VERIFY(txg <= spa_final_dirty_txg(spa)); | |
2742 | ||
34dc7c2f | 2743 | /* |
425d3237 SD |
2744 | * The only state that can actually be changing concurrently |
2745 | * with metaslab_sync() is the metaslab's ms_allocatable. No | |
2746 | * other thread can be modifying this txg's alloc, freeing, | |
d2734cce | 2747 | * freed, or space_map_phys_t. We drop ms_lock whenever we |
425d3237 SD |
2748 | * could call into the DMU, because the DMU can call down to |
2749 | * us (e.g. via zio_free()) at any time. | |
a1d477c2 MA |
2750 | * |
2751 | * The spa_vdev_remove_thread() can be reading metaslab state | |
425d3237 SD |
2752 | * concurrently, and it is locked out by the ms_sync_lock. |
2753 | * Note that the ms_lock is insufficient for this, because it | |
2754 | * is dropped by space_map_write(). | |
34dc7c2f | 2755 | */ |
428870ff | 2756 | tx = dmu_tx_create_assigned(spa_get_dsl(spa), txg); |
34dc7c2f | 2757 | |
93cf2076 GW |
2758 | if (msp->ms_sm == NULL) { |
2759 | uint64_t new_object; | |
2760 | ||
d2734cce | 2761 | new_object = space_map_alloc(mos, zfs_metaslab_sm_blksz, tx); |
93cf2076 GW |
2762 | VERIFY3U(new_object, !=, 0); |
2763 | ||
2764 | VERIFY0(space_map_open(&msp->ms_sm, mos, new_object, | |
a1d477c2 | 2765 | msp->ms_start, msp->ms_size, vd->vdev_ashift)); |
425d3237 | 2766 | |
93cf2076 | 2767 | ASSERT(msp->ms_sm != NULL); |
425d3237 | 2768 | ASSERT0(metaslab_allocated_space(msp)); |
34dc7c2f BB |
2769 | } |
2770 | ||
d2734cce SD |
2771 | if (!range_tree_is_empty(msp->ms_checkpointing) && |
2772 | vd->vdev_checkpoint_sm == NULL) { | |
2773 | ASSERT(spa_has_checkpoint(spa)); | |
2774 | ||
2775 | uint64_t new_object = space_map_alloc(mos, | |
2776 | vdev_standard_sm_blksz, tx); | |
2777 | VERIFY3U(new_object, !=, 0); | |
2778 | ||
2779 | VERIFY0(space_map_open(&vd->vdev_checkpoint_sm, | |
2780 | mos, new_object, 0, vd->vdev_asize, vd->vdev_ashift)); | |
2781 | ASSERT3P(vd->vdev_checkpoint_sm, !=, NULL); | |
2782 | ||
2783 | /* | |
2784 | * We save the space map object as an entry in vdev_top_zap | |
2785 | * so it can be retrieved when the pool is reopened after an | |
2786 | * export or through zdb. | |
2787 | */ | |
2788 | VERIFY0(zap_add(vd->vdev_spa->spa_meta_objset, | |
2789 | vd->vdev_top_zap, VDEV_TOP_ZAP_POOL_CHECKPOINT_SM, | |
2790 | sizeof (new_object), 1, &new_object, tx)); | |
2791 | } | |
2792 | ||
a1d477c2 | 2793 | mutex_enter(&msp->ms_sync_lock); |
428870ff BB |
2794 | mutex_enter(&msp->ms_lock); |
2795 | ||
96358617 | 2796 | /* |
4e21fd06 DB |
2797 | * Note: metaslab_condense() clears the space map's histogram. |
2798 | * Therefore we must verify and remove this histogram before | |
96358617 MA |
2799 | * condensing. |
2800 | */ | |
2801 | metaslab_group_histogram_verify(mg); | |
2802 | metaslab_class_histogram_verify(mg->mg_class); | |
2803 | metaslab_group_histogram_remove(mg, msp); | |
2804 | ||
d2734cce | 2805 | if (msp->ms_loaded && metaslab_should_condense(msp)) { |
e51be066 GW |
2806 | metaslab_condense(msp, txg, tx); |
2807 | } else { | |
a1d477c2 | 2808 | mutex_exit(&msp->ms_lock); |
4d044c4c SD |
2809 | space_map_write(msp->ms_sm, alloctree, SM_ALLOC, |
2810 | SM_NO_VDEVID, tx); | |
2811 | space_map_write(msp->ms_sm, msp->ms_freeing, SM_FREE, | |
2812 | SM_NO_VDEVID, tx); | |
a1d477c2 | 2813 | mutex_enter(&msp->ms_lock); |
e51be066 | 2814 | } |
428870ff | 2815 | |
425d3237 SD |
2816 | msp->ms_allocated_space += range_tree_space(alloctree); |
2817 | ASSERT3U(msp->ms_allocated_space, >=, | |
2818 | range_tree_space(msp->ms_freeing)); | |
2819 | msp->ms_allocated_space -= range_tree_space(msp->ms_freeing); | |
2820 | ||
d2734cce SD |
2821 | if (!range_tree_is_empty(msp->ms_checkpointing)) { |
2822 | ASSERT(spa_has_checkpoint(spa)); | |
2823 | ASSERT3P(vd->vdev_checkpoint_sm, !=, NULL); | |
2824 | ||
2825 | /* | |
2826 | * Since we are doing writes to disk and the ms_checkpointing | |
2827 | * tree won't be changing during that time, we drop the | |
2828 | * ms_lock while writing to the checkpoint space map. | |
2829 | */ | |
2830 | mutex_exit(&msp->ms_lock); | |
2831 | space_map_write(vd->vdev_checkpoint_sm, | |
4d044c4c | 2832 | msp->ms_checkpointing, SM_FREE, SM_NO_VDEVID, tx); |
d2734cce | 2833 | mutex_enter(&msp->ms_lock); |
d2734cce SD |
2834 | |
2835 | spa->spa_checkpoint_info.sci_dspace += | |
2836 | range_tree_space(msp->ms_checkpointing); | |
2837 | vd->vdev_stat.vs_checkpoint_space += | |
2838 | range_tree_space(msp->ms_checkpointing); | |
2839 | ASSERT3U(vd->vdev_stat.vs_checkpoint_space, ==, | |
425d3237 | 2840 | -space_map_allocated(vd->vdev_checkpoint_sm)); |
d2734cce SD |
2841 | |
2842 | range_tree_vacate(msp->ms_checkpointing, NULL, NULL); | |
2843 | } | |
2844 | ||
93cf2076 GW |
2845 | if (msp->ms_loaded) { |
2846 | /* | |
a1d477c2 | 2847 | * When the space map is loaded, we have an accurate |
93cf2076 GW |
2848 | * histogram in the range tree. This gives us an opportunity |
2849 | * to bring the space map's histogram up-to-date so we clear | |
2850 | * it first before updating it. | |
2851 | */ | |
2852 | space_map_histogram_clear(msp->ms_sm); | |
d2734cce | 2853 | space_map_histogram_add(msp->ms_sm, msp->ms_allocatable, tx); |
4e21fd06 DB |
2854 | |
2855 | /* | |
2856 | * Since we've cleared the histogram we need to add back | |
2857 | * any free space that has already been processed, plus | |
2858 | * any deferred space. This allows the on-disk histogram | |
2859 | * to accurately reflect all free space even if some space | |
2860 | * is not yet available for allocation (i.e. deferred). | |
2861 | */ | |
d2734cce | 2862 | space_map_histogram_add(msp->ms_sm, msp->ms_freed, tx); |
4e21fd06 | 2863 | |
93cf2076 | 2864 | /* |
4e21fd06 DB |
2865 | * Add back any deferred free space that has not been |
2866 | * added back into the in-core free tree yet. This will | |
2867 | * ensure that we don't end up with a space map histogram | |
2868 | * that is completely empty unless the metaslab is fully | |
2869 | * allocated. | |
93cf2076 | 2870 | */ |
1c27024e | 2871 | for (int t = 0; t < TXG_DEFER_SIZE; t++) { |
4e21fd06 | 2872 | space_map_histogram_add(msp->ms_sm, |
d2734cce | 2873 | msp->ms_defer[t], tx); |
4e21fd06 | 2874 | } |
93cf2076 | 2875 | } |
4e21fd06 DB |
2876 | |
2877 | /* | |
2878 | * Always add the free space from this sync pass to the space | |
2879 | * map histogram. We want to make sure that the on-disk histogram | |
2880 | * accounts for all free space. If the space map is not loaded, | |
2881 | * then we will lose some accuracy but will correct it the next | |
2882 | * time we load the space map. | |
2883 | */ | |
d2734cce | 2884 | space_map_histogram_add(msp->ms_sm, msp->ms_freeing, tx); |
928e8ad4 | 2885 | metaslab_aux_histograms_update(msp); |
4e21fd06 | 2886 | |
f3a7f661 GW |
2887 | metaslab_group_histogram_add(mg, msp); |
2888 | metaslab_group_histogram_verify(mg); | |
2889 | metaslab_class_histogram_verify(mg->mg_class); | |
34dc7c2f | 2890 | |
e51be066 | 2891 | /* |
93cf2076 | 2892 | * For sync pass 1, we avoid traversing this txg's free range tree |
425d3237 SD |
2893 | * and instead will just swap the pointers for freeing and freed. |
2894 | * We can safely do this since the freed_tree is guaranteed to be | |
2895 | * empty on the initial pass. | |
e51be066 GW |
2896 | */ |
2897 | if (spa_sync_pass(spa) == 1) { | |
d2734cce | 2898 | range_tree_swap(&msp->ms_freeing, &msp->ms_freed); |
425d3237 | 2899 | ASSERT0(msp->ms_allocated_this_txg); |
e51be066 | 2900 | } else { |
d2734cce SD |
2901 | range_tree_vacate(msp->ms_freeing, |
2902 | range_tree_add, msp->ms_freed); | |
34dc7c2f | 2903 | } |
425d3237 | 2904 | msp->ms_allocated_this_txg += range_tree_space(alloctree); |
f3a7f661 | 2905 | range_tree_vacate(alloctree, NULL, NULL); |
34dc7c2f | 2906 | |
d2734cce SD |
2907 | ASSERT0(range_tree_space(msp->ms_allocating[txg & TXG_MASK])); |
2908 | ASSERT0(range_tree_space(msp->ms_allocating[TXG_CLEAN(txg) | |
2909 | & TXG_MASK])); | |
2910 | ASSERT0(range_tree_space(msp->ms_freeing)); | |
2911 | ASSERT0(range_tree_space(msp->ms_checkpointing)); | |
34dc7c2f BB |
2912 | |
2913 | mutex_exit(&msp->ms_lock); | |
2914 | ||
93cf2076 GW |
2915 | if (object != space_map_object(msp->ms_sm)) { |
2916 | object = space_map_object(msp->ms_sm); | |
2917 | dmu_write(mos, vd->vdev_ms_array, sizeof (uint64_t) * | |
2918 | msp->ms_id, sizeof (uint64_t), &object, tx); | |
2919 | } | |
a1d477c2 | 2920 | mutex_exit(&msp->ms_sync_lock); |
34dc7c2f BB |
2921 | dmu_tx_commit(tx); |
2922 | } | |
2923 | ||
2924 | /* | |
2925 | * Called after a transaction group has completely synced to mark | |
2926 | * all of the metaslab's free space as usable. | |
2927 | */ | |
2928 | void | |
2929 | metaslab_sync_done(metaslab_t *msp, uint64_t txg) | |
2930 | { | |
34dc7c2f BB |
2931 | metaslab_group_t *mg = msp->ms_group; |
2932 | vdev_t *vd = mg->mg_vd; | |
4e21fd06 | 2933 | spa_t *spa = vd->vdev_spa; |
93cf2076 | 2934 | range_tree_t **defer_tree; |
428870ff | 2935 | int64_t alloc_delta, defer_delta; |
4e21fd06 | 2936 | boolean_t defer_allowed = B_TRUE; |
428870ff BB |
2937 | |
2938 | ASSERT(!vd->vdev_ishole); | |
34dc7c2f BB |
2939 | |
2940 | mutex_enter(&msp->ms_lock); | |
2941 | ||
2942 | /* | |
2943 | * If this metaslab is just becoming available, initialize its | |
258553d3 | 2944 | * range trees and add its capacity to the vdev. |
34dc7c2f | 2945 | */ |
d2734cce | 2946 | if (msp->ms_freed == NULL) { |
1c27024e | 2947 | for (int t = 0; t < TXG_SIZE; t++) { |
d2734cce | 2948 | ASSERT(msp->ms_allocating[t] == NULL); |
93cf2076 | 2949 | |
d2734cce | 2950 | msp->ms_allocating[t] = range_tree_create(NULL, NULL); |
34dc7c2f | 2951 | } |
428870ff | 2952 | |
d2734cce SD |
2953 | ASSERT3P(msp->ms_freeing, ==, NULL); |
2954 | msp->ms_freeing = range_tree_create(NULL, NULL); | |
258553d3 | 2955 | |
d2734cce SD |
2956 | ASSERT3P(msp->ms_freed, ==, NULL); |
2957 | msp->ms_freed = range_tree_create(NULL, NULL); | |
258553d3 | 2958 | |
1c27024e | 2959 | for (int t = 0; t < TXG_DEFER_SIZE; t++) { |
d2734cce | 2960 | ASSERT(msp->ms_defer[t] == NULL); |
e51be066 | 2961 | |
d2734cce | 2962 | msp->ms_defer[t] = range_tree_create(NULL, NULL); |
93cf2076 | 2963 | } |
428870ff | 2964 | |
d2734cce SD |
2965 | ASSERT3P(msp->ms_checkpointing, ==, NULL); |
2966 | msp->ms_checkpointing = range_tree_create(NULL, NULL); | |
2967 | ||
cc99f275 | 2968 | metaslab_space_update(vd, mg->mg_class, 0, 0, msp->ms_size); |
34dc7c2f | 2969 | } |
d2734cce SD |
2970 | ASSERT0(range_tree_space(msp->ms_freeing)); |
2971 | ASSERT0(range_tree_space(msp->ms_checkpointing)); | |
34dc7c2f | 2972 | |
d2734cce | 2973 | defer_tree = &msp->ms_defer[txg % TXG_DEFER_SIZE]; |
93cf2076 | 2974 | |
1c27024e | 2975 | uint64_t free_space = metaslab_class_get_space(spa_normal_class(spa)) - |
4e21fd06 | 2976 | metaslab_class_get_alloc(spa_normal_class(spa)); |
a1d477c2 | 2977 | if (free_space <= spa_get_slop_space(spa) || vd->vdev_removing) { |
4e21fd06 DB |
2978 | defer_allowed = B_FALSE; |
2979 | } | |
2980 | ||
2981 | defer_delta = 0; | |
425d3237 SD |
2982 | alloc_delta = msp->ms_allocated_this_txg - |
2983 | range_tree_space(msp->ms_freed); | |
4e21fd06 | 2984 | if (defer_allowed) { |
d2734cce | 2985 | defer_delta = range_tree_space(msp->ms_freed) - |
4e21fd06 DB |
2986 | range_tree_space(*defer_tree); |
2987 | } else { | |
2988 | defer_delta -= range_tree_space(*defer_tree); | |
2989 | } | |
428870ff | 2990 | |
cc99f275 DB |
2991 | metaslab_space_update(vd, mg->mg_class, alloc_delta + defer_delta, |
2992 | defer_delta, 0); | |
34dc7c2f | 2993 | |
34dc7c2f | 2994 | /* |
93cf2076 | 2995 | * If there's a metaslab_load() in progress, wait for it to complete |
34dc7c2f | 2996 | * so that we have a consistent view of the in-core space map. |
34dc7c2f | 2997 | */ |
93cf2076 | 2998 | metaslab_load_wait(msp); |
c2e42f9d GW |
2999 | |
3000 | /* | |
93cf2076 | 3001 | * Move the frees from the defer_tree back to the free |
d2734cce SD |
3002 | * range tree (if it's loaded). Swap the freed_tree and |
3003 | * the defer_tree -- this is safe to do because we've | |
3004 | * just emptied out the defer_tree. | |
c2e42f9d | 3005 | */ |
93cf2076 | 3006 | range_tree_vacate(*defer_tree, |
d2734cce | 3007 | msp->ms_loaded ? range_tree_add : NULL, msp->ms_allocatable); |
4e21fd06 | 3008 | if (defer_allowed) { |
d2734cce | 3009 | range_tree_swap(&msp->ms_freed, defer_tree); |
4e21fd06 | 3010 | } else { |
d2734cce SD |
3011 | range_tree_vacate(msp->ms_freed, |
3012 | msp->ms_loaded ? range_tree_add : NULL, | |
3013 | msp->ms_allocatable); | |
4e21fd06 | 3014 | } |
425d3237 SD |
3015 | |
3016 | msp->ms_synced_length = space_map_length(msp->ms_sm); | |
34dc7c2f | 3017 | |
428870ff BB |
3018 | msp->ms_deferspace += defer_delta; |
3019 | ASSERT3S(msp->ms_deferspace, >=, 0); | |
93cf2076 | 3020 | ASSERT3S(msp->ms_deferspace, <=, msp->ms_size); |
428870ff BB |
3021 | if (msp->ms_deferspace != 0) { |
3022 | /* | |
3023 | * Keep syncing this metaslab until all deferred frees | |
3024 | * are back in circulation. | |
3025 | */ | |
3026 | vdev_dirty(vd, VDD_METASLAB, msp, txg + 1); | |
3027 | } | |
928e8ad4 | 3028 | metaslab_aux_histograms_update_done(msp, defer_allowed); |
428870ff | 3029 | |
492f64e9 PD |
3030 | if (msp->ms_new) { |
3031 | msp->ms_new = B_FALSE; | |
3032 | mutex_enter(&mg->mg_lock); | |
3033 | mg->mg_ms_ready++; | |
3034 | mutex_exit(&mg->mg_lock); | |
3035 | } | |
928e8ad4 | 3036 | |
4e21fd06 | 3037 | /* |
928e8ad4 SD |
3038 | * Re-sort metaslab within its group now that we've adjusted |
3039 | * its allocatable space. | |
4e21fd06 | 3040 | */ |
928e8ad4 | 3041 | metaslab_recalculate_weight_and_sort(msp); |
4e21fd06 DB |
3042 | |
3043 | /* | |
3044 | * If the metaslab is loaded and we've not tried to load or allocate | |
3045 | * from it in 'metaslab_unload_delay' txgs, then unload it. | |
3046 | */ | |
3047 | if (msp->ms_loaded && | |
619f0976 | 3048 | msp->ms_initializing == 0 && |
4e21fd06 DB |
3049 | msp->ms_selected_txg + metaslab_unload_delay < txg) { |
3050 | ||
1c27024e | 3051 | for (int t = 1; t < TXG_CONCURRENT_STATES; t++) { |
93cf2076 | 3052 | VERIFY0(range_tree_space( |
d2734cce | 3053 | msp->ms_allocating[(txg + t) & TXG_MASK])); |
93cf2076 | 3054 | } |
492f64e9 PD |
3055 | if (msp->ms_allocator != -1) { |
3056 | metaslab_passivate(msp, msp->ms_weight & | |
3057 | ~METASLAB_ACTIVE_MASK); | |
3058 | } | |
34dc7c2f | 3059 | |
93cf2076 GW |
3060 | if (!metaslab_debug_unload) |
3061 | metaslab_unload(msp); | |
34dc7c2f BB |
3062 | } |
3063 | ||
d2734cce SD |
3064 | ASSERT0(range_tree_space(msp->ms_allocating[txg & TXG_MASK])); |
3065 | ASSERT0(range_tree_space(msp->ms_freeing)); | |
3066 | ASSERT0(range_tree_space(msp->ms_freed)); | |
3067 | ASSERT0(range_tree_space(msp->ms_checkpointing)); | |
a1d477c2 | 3068 | |
425d3237 | 3069 | msp->ms_allocated_this_txg = 0; |
34dc7c2f BB |
3070 | mutex_exit(&msp->ms_lock); |
3071 | } | |
3072 | ||
428870ff BB |
3073 | void |
3074 | metaslab_sync_reassess(metaslab_group_t *mg) | |
3075 | { | |
a1d477c2 MA |
3076 | spa_t *spa = mg->mg_class->mc_spa; |
3077 | ||
3078 | spa_config_enter(spa, SCL_ALLOC, FTAG, RW_READER); | |
1be627f5 | 3079 | metaslab_group_alloc_update(mg); |
f3a7f661 | 3080 | mg->mg_fragmentation = metaslab_group_fragmentation(mg); |
6d974228 | 3081 | |
428870ff | 3082 | /* |
a1d477c2 MA |
3083 | * Preload the next potential metaslabs but only on active |
3084 | * metaslab groups. We can get into a state where the metaslab | |
3085 | * is no longer active since we dirty metaslabs as we remove a | |
3086 | * a device, thus potentially making the metaslab group eligible | |
3087 | * for preloading. | |
428870ff | 3088 | */ |
a1d477c2 MA |
3089 | if (mg->mg_activation_count > 0) { |
3090 | metaslab_group_preload(mg); | |
3091 | } | |
3092 | spa_config_exit(spa, SCL_ALLOC, FTAG); | |
428870ff BB |
3093 | } |
3094 | ||
cc99f275 DB |
3095 | /* |
3096 | * When writing a ditto block (i.e. more than one DVA for a given BP) on | |
3097 | * the same vdev as an existing DVA of this BP, then try to allocate it | |
3098 | * on a different metaslab than existing DVAs (i.e. a unique metaslab). | |
3099 | */ | |
3100 | static boolean_t | |
3101 | metaslab_is_unique(metaslab_t *msp, dva_t *dva) | |
34dc7c2f | 3102 | { |
cc99f275 DB |
3103 | uint64_t dva_ms_id; |
3104 | ||
3105 | if (DVA_GET_ASIZE(dva) == 0) | |
3106 | return (B_TRUE); | |
34dc7c2f BB |
3107 | |
3108 | if (msp->ms_group->mg_vd->vdev_id != DVA_GET_VDEV(dva)) | |
cc99f275 | 3109 | return (B_TRUE); |
34dc7c2f | 3110 | |
cc99f275 DB |
3111 | dva_ms_id = DVA_GET_OFFSET(dva) >> msp->ms_group->mg_vd->vdev_ms_shift; |
3112 | ||
3113 | return (msp->ms_id != dva_ms_id); | |
34dc7c2f BB |
3114 | } |
3115 | ||
4e21fd06 DB |
3116 | /* |
3117 | * ========================================================================== | |
3118 | * Metaslab allocation tracing facility | |
3119 | * ========================================================================== | |
3120 | */ | |
3121 | #ifdef _METASLAB_TRACING | |
3122 | kstat_t *metaslab_trace_ksp; | |
3123 | kstat_named_t metaslab_trace_over_limit; | |
3124 | ||
3125 | void | |
3126 | metaslab_alloc_trace_init(void) | |
3127 | { | |
3128 | ASSERT(metaslab_alloc_trace_cache == NULL); | |
3129 | metaslab_alloc_trace_cache = kmem_cache_create( | |
3130 | "metaslab_alloc_trace_cache", sizeof (metaslab_alloc_trace_t), | |
3131 | 0, NULL, NULL, NULL, NULL, NULL, 0); | |
3132 | metaslab_trace_ksp = kstat_create("zfs", 0, "metaslab_trace_stats", | |
3133 | "misc", KSTAT_TYPE_NAMED, 1, KSTAT_FLAG_VIRTUAL); | |
3134 | if (metaslab_trace_ksp != NULL) { | |
3135 | metaslab_trace_ksp->ks_data = &metaslab_trace_over_limit; | |
3136 | kstat_named_init(&metaslab_trace_over_limit, | |
3137 | "metaslab_trace_over_limit", KSTAT_DATA_UINT64); | |
3138 | kstat_install(metaslab_trace_ksp); | |
3139 | } | |
3140 | } | |
3141 | ||
3142 | void | |
3143 | metaslab_alloc_trace_fini(void) | |
3144 | { | |
3145 | if (metaslab_trace_ksp != NULL) { | |
3146 | kstat_delete(metaslab_trace_ksp); | |
3147 | metaslab_trace_ksp = NULL; | |
3148 | } | |
3149 | kmem_cache_destroy(metaslab_alloc_trace_cache); | |
3150 | metaslab_alloc_trace_cache = NULL; | |
3151 | } | |
3152 | ||
3153 | /* | |
3154 | * Add an allocation trace element to the allocation tracing list. | |
3155 | */ | |
3156 | static void | |
3157 | metaslab_trace_add(zio_alloc_list_t *zal, metaslab_group_t *mg, | |
492f64e9 PD |
3158 | metaslab_t *msp, uint64_t psize, uint32_t dva_id, uint64_t offset, |
3159 | int allocator) | |
4e21fd06 DB |
3160 | { |
3161 | metaslab_alloc_trace_t *mat; | |
3162 | ||
3163 | if (!metaslab_trace_enabled) | |
3164 | return; | |
3165 | ||
3166 | /* | |
3167 | * When the tracing list reaches its maximum we remove | |
3168 | * the second element in the list before adding a new one. | |
3169 | * By removing the second element we preserve the original | |
3170 | * entry as a clue to what allocations steps have already been | |
3171 | * performed. | |
3172 | */ | |
3173 | if (zal->zal_size == metaslab_trace_max_entries) { | |
3174 | metaslab_alloc_trace_t *mat_next; | |
3175 | #ifdef DEBUG | |
3176 | panic("too many entries in allocation list"); | |
3177 | #endif | |
3178 | atomic_inc_64(&metaslab_trace_over_limit.value.ui64); | |
3179 | zal->zal_size--; | |
3180 | mat_next = list_next(&zal->zal_list, list_head(&zal->zal_list)); | |
3181 | list_remove(&zal->zal_list, mat_next); | |
3182 | kmem_cache_free(metaslab_alloc_trace_cache, mat_next); | |
3183 | } | |
3184 | ||
3185 | mat = kmem_cache_alloc(metaslab_alloc_trace_cache, KM_SLEEP); | |
3186 | list_link_init(&mat->mat_list_node); | |
3187 | mat->mat_mg = mg; | |
3188 | mat->mat_msp = msp; | |
3189 | mat->mat_size = psize; | |
3190 | mat->mat_dva_id = dva_id; | |
3191 | mat->mat_offset = offset; | |
3192 | mat->mat_weight = 0; | |
492f64e9 | 3193 | mat->mat_allocator = allocator; |
4e21fd06 DB |
3194 | |
3195 | if (msp != NULL) | |
3196 | mat->mat_weight = msp->ms_weight; | |
3197 | ||
3198 | /* | |
3199 | * The list is part of the zio so locking is not required. Only | |
3200 | * a single thread will perform allocations for a given zio. | |
3201 | */ | |
3202 | list_insert_tail(&zal->zal_list, mat); | |
3203 | zal->zal_size++; | |
3204 | ||
3205 | ASSERT3U(zal->zal_size, <=, metaslab_trace_max_entries); | |
3206 | } | |
3207 | ||
3208 | void | |
3209 | metaslab_trace_init(zio_alloc_list_t *zal) | |
3210 | { | |
3211 | list_create(&zal->zal_list, sizeof (metaslab_alloc_trace_t), | |
3212 | offsetof(metaslab_alloc_trace_t, mat_list_node)); | |
3213 | zal->zal_size = 0; | |
3214 | } | |
3215 | ||
3216 | void | |
3217 | metaslab_trace_fini(zio_alloc_list_t *zal) | |
3218 | { | |
3219 | metaslab_alloc_trace_t *mat; | |
3220 | ||
3221 | while ((mat = list_remove_head(&zal->zal_list)) != NULL) | |
3222 | kmem_cache_free(metaslab_alloc_trace_cache, mat); | |
3223 | list_destroy(&zal->zal_list); | |
3224 | zal->zal_size = 0; | |
3225 | } | |
3226 | #else | |
3227 | ||
492f64e9 | 3228 | #define metaslab_trace_add(zal, mg, msp, psize, id, off, alloc) |
4e21fd06 DB |
3229 | |
3230 | void | |
3231 | metaslab_alloc_trace_init(void) | |
3232 | { | |
3233 | } | |
3234 | ||
3235 | void | |
3236 | metaslab_alloc_trace_fini(void) | |
3237 | { | |
3238 | } | |
3239 | ||
3240 | void | |
3241 | metaslab_trace_init(zio_alloc_list_t *zal) | |
3242 | { | |
3243 | } | |
3244 | ||
3245 | void | |
3246 | metaslab_trace_fini(zio_alloc_list_t *zal) | |
3247 | { | |
3248 | } | |
3249 | ||
3250 | #endif /* _METASLAB_TRACING */ | |
3251 | ||
3dfb57a3 DB |
3252 | /* |
3253 | * ========================================================================== | |
3254 | * Metaslab block operations | |
3255 | * ========================================================================== | |
3256 | */ | |
3257 | ||
3258 | static void | |
492f64e9 PD |
3259 | metaslab_group_alloc_increment(spa_t *spa, uint64_t vdev, void *tag, int flags, |
3260 | int allocator) | |
3dfb57a3 | 3261 | { |
3dfb57a3 | 3262 | if (!(flags & METASLAB_ASYNC_ALLOC) || |
492f64e9 | 3263 | (flags & METASLAB_DONT_THROTTLE)) |
3dfb57a3 DB |
3264 | return; |
3265 | ||
1c27024e | 3266 | metaslab_group_t *mg = vdev_lookup_top(spa, vdev)->vdev_mg; |
3dfb57a3 DB |
3267 | if (!mg->mg_class->mc_alloc_throttle_enabled) |
3268 | return; | |
3269 | ||
c13060e4 | 3270 | (void) zfs_refcount_add(&mg->mg_alloc_queue_depth[allocator], tag); |
492f64e9 PD |
3271 | } |
3272 | ||
3273 | static void | |
3274 | metaslab_group_increment_qdepth(metaslab_group_t *mg, int allocator) | |
3275 | { | |
3276 | uint64_t max = mg->mg_max_alloc_queue_depth; | |
3277 | uint64_t cur = mg->mg_cur_max_alloc_queue_depth[allocator]; | |
3278 | while (cur < max) { | |
3279 | if (atomic_cas_64(&mg->mg_cur_max_alloc_queue_depth[allocator], | |
3280 | cur, cur + 1) == cur) { | |
3281 | atomic_inc_64( | |
3282 | &mg->mg_class->mc_alloc_max_slots[allocator]); | |
3283 | return; | |
3284 | } | |
3285 | cur = mg->mg_cur_max_alloc_queue_depth[allocator]; | |
3286 | } | |
3dfb57a3 DB |
3287 | } |
3288 | ||
3289 | void | |
492f64e9 PD |
3290 | metaslab_group_alloc_decrement(spa_t *spa, uint64_t vdev, void *tag, int flags, |
3291 | int allocator, boolean_t io_complete) | |
3dfb57a3 | 3292 | { |
3dfb57a3 | 3293 | if (!(flags & METASLAB_ASYNC_ALLOC) || |
492f64e9 | 3294 | (flags & METASLAB_DONT_THROTTLE)) |
3dfb57a3 DB |
3295 | return; |
3296 | ||
1c27024e | 3297 | metaslab_group_t *mg = vdev_lookup_top(spa, vdev)->vdev_mg; |
3dfb57a3 DB |
3298 | if (!mg->mg_class->mc_alloc_throttle_enabled) |
3299 | return; | |
3300 | ||
424fd7c3 | 3301 | (void) zfs_refcount_remove(&mg->mg_alloc_queue_depth[allocator], tag); |
492f64e9 PD |
3302 | if (io_complete) |
3303 | metaslab_group_increment_qdepth(mg, allocator); | |
3dfb57a3 DB |
3304 | } |
3305 | ||
3306 | void | |
492f64e9 PD |
3307 | metaslab_group_alloc_verify(spa_t *spa, const blkptr_t *bp, void *tag, |
3308 | int allocator) | |
3dfb57a3 DB |
3309 | { |
3310 | #ifdef ZFS_DEBUG | |
3311 | const dva_t *dva = bp->blk_dva; | |
3312 | int ndvas = BP_GET_NDVAS(bp); | |
3dfb57a3 | 3313 | |
1c27024e | 3314 | for (int d = 0; d < ndvas; d++) { |
3dfb57a3 DB |
3315 | uint64_t vdev = DVA_GET_VDEV(&dva[d]); |
3316 | metaslab_group_t *mg = vdev_lookup_top(spa, vdev)->vdev_mg; | |
424fd7c3 TS |
3317 | VERIFY(zfs_refcount_not_held( |
3318 | &mg->mg_alloc_queue_depth[allocator], tag)); | |
3dfb57a3 DB |
3319 | } |
3320 | #endif | |
3321 | } | |
3322 | ||
34dc7c2f | 3323 | static uint64_t |
4e21fd06 DB |
3324 | metaslab_block_alloc(metaslab_t *msp, uint64_t size, uint64_t txg) |
3325 | { | |
3326 | uint64_t start; | |
d2734cce | 3327 | range_tree_t *rt = msp->ms_allocatable; |
4e21fd06 DB |
3328 | metaslab_class_t *mc = msp->ms_group->mg_class; |
3329 | ||
3330 | VERIFY(!msp->ms_condensing); | |
619f0976 | 3331 | VERIFY0(msp->ms_initializing); |
4e21fd06 DB |
3332 | |
3333 | start = mc->mc_ops->msop_alloc(msp, size); | |
3334 | if (start != -1ULL) { | |
3335 | metaslab_group_t *mg = msp->ms_group; | |
3336 | vdev_t *vd = mg->mg_vd; | |
3337 | ||
3338 | VERIFY0(P2PHASE(start, 1ULL << vd->vdev_ashift)); | |
3339 | VERIFY0(P2PHASE(size, 1ULL << vd->vdev_ashift)); | |
3340 | VERIFY3U(range_tree_space(rt) - size, <=, msp->ms_size); | |
3341 | range_tree_remove(rt, start, size); | |
3342 | ||
d2734cce | 3343 | if (range_tree_is_empty(msp->ms_allocating[txg & TXG_MASK])) |
4e21fd06 DB |
3344 | vdev_dirty(mg->mg_vd, VDD_METASLAB, msp, txg); |
3345 | ||
d2734cce | 3346 | range_tree_add(msp->ms_allocating[txg & TXG_MASK], start, size); |
4e21fd06 DB |
3347 | |
3348 | /* Track the last successful allocation */ | |
3349 | msp->ms_alloc_txg = txg; | |
3350 | metaslab_verify_space(msp, txg); | |
3351 | } | |
3352 | ||
3353 | /* | |
3354 | * Now that we've attempted the allocation we need to update the | |
3355 | * metaslab's maximum block size since it may have changed. | |
3356 | */ | |
3357 | msp->ms_max_size = metaslab_block_maxsize(msp); | |
3358 | return (start); | |
3359 | } | |
3360 | ||
492f64e9 PD |
3361 | /* |
3362 | * Find the metaslab with the highest weight that is less than what we've | |
3363 | * already tried. In the common case, this means that we will examine each | |
3364 | * metaslab at most once. Note that concurrent callers could reorder metaslabs | |
3365 | * by activation/passivation once we have dropped the mg_lock. If a metaslab is | |
3366 | * activated by another thread, and we fail to allocate from the metaslab we | |
3367 | * have selected, we may not try the newly-activated metaslab, and instead | |
3368 | * activate another metaslab. This is not optimal, but generally does not cause | |
3369 | * any problems (a possible exception being if every metaslab is completely full | |
3370 | * except for the the newly-activated metaslab which we fail to examine). | |
3371 | */ | |
3372 | static metaslab_t * | |
3373 | find_valid_metaslab(metaslab_group_t *mg, uint64_t activation_weight, | |
cc99f275 | 3374 | dva_t *dva, int d, boolean_t want_unique, uint64_t asize, int allocator, |
492f64e9 PD |
3375 | zio_alloc_list_t *zal, metaslab_t *search, boolean_t *was_active) |
3376 | { | |
3377 | avl_index_t idx; | |
3378 | avl_tree_t *t = &mg->mg_metaslab_tree; | |
3379 | metaslab_t *msp = avl_find(t, search, &idx); | |
3380 | if (msp == NULL) | |
3381 | msp = avl_nearest(t, idx, AVL_AFTER); | |
3382 | ||
3383 | for (; msp != NULL; msp = AVL_NEXT(t, msp)) { | |
3384 | int i; | |
3385 | if (!metaslab_should_allocate(msp, asize)) { | |
3386 | metaslab_trace_add(zal, mg, msp, asize, d, | |
3387 | TRACE_TOO_SMALL, allocator); | |
3388 | continue; | |
3389 | } | |
3390 | ||
3391 | /* | |
619f0976 GW |
3392 | * If the selected metaslab is condensing or being |
3393 | * initialized, skip it. | |
492f64e9 | 3394 | */ |
619f0976 | 3395 | if (msp->ms_condensing || msp->ms_initializing > 0) |
492f64e9 PD |
3396 | continue; |
3397 | ||
3398 | *was_active = msp->ms_allocator != -1; | |
3399 | /* | |
3400 | * If we're activating as primary, this is our first allocation | |
3401 | * from this disk, so we don't need to check how close we are. | |
3402 | * If the metaslab under consideration was already active, | |
3403 | * we're getting desperate enough to steal another allocator's | |
3404 | * metaslab, so we still don't care about distances. | |
3405 | */ | |
3406 | if (activation_weight == METASLAB_WEIGHT_PRIMARY || *was_active) | |
3407 | break; | |
3408 | ||
492f64e9 | 3409 | for (i = 0; i < d; i++) { |
cc99f275 DB |
3410 | if (want_unique && |
3411 | !metaslab_is_unique(msp, &dva[i])) | |
3412 | break; /* try another metaslab */ | |
492f64e9 PD |
3413 | } |
3414 | if (i == d) | |
3415 | break; | |
3416 | } | |
3417 | ||
3418 | if (msp != NULL) { | |
3419 | search->ms_weight = msp->ms_weight; | |
3420 | search->ms_start = msp->ms_start + 1; | |
3421 | search->ms_allocator = msp->ms_allocator; | |
3422 | search->ms_primary = msp->ms_primary; | |
3423 | } | |
3424 | return (msp); | |
3425 | } | |
3426 | ||
3427 | /* ARGSUSED */ | |
4e21fd06 DB |
3428 | static uint64_t |
3429 | metaslab_group_alloc_normal(metaslab_group_t *mg, zio_alloc_list_t *zal, | |
cc99f275 DB |
3430 | uint64_t asize, uint64_t txg, boolean_t want_unique, dva_t *dva, |
3431 | int d, int allocator) | |
34dc7c2f BB |
3432 | { |
3433 | metaslab_t *msp = NULL; | |
3434 | uint64_t offset = -1ULL; | |
34dc7c2f | 3435 | uint64_t activation_weight; |
34dc7c2f BB |
3436 | |
3437 | activation_weight = METASLAB_WEIGHT_PRIMARY; | |
492f64e9 PD |
3438 | for (int i = 0; i < d; i++) { |
3439 | if (activation_weight == METASLAB_WEIGHT_PRIMARY && | |
3440 | DVA_GET_VDEV(&dva[i]) == mg->mg_vd->vdev_id) { | |
34dc7c2f | 3441 | activation_weight = METASLAB_WEIGHT_SECONDARY; |
492f64e9 PD |
3442 | } else if (activation_weight == METASLAB_WEIGHT_SECONDARY && |
3443 | DVA_GET_VDEV(&dva[i]) == mg->mg_vd->vdev_id) { | |
e38afd34 | 3444 | activation_weight = METASLAB_WEIGHT_CLAIM; |
9babb374 BB |
3445 | break; |
3446 | } | |
3447 | } | |
34dc7c2f | 3448 | |
492f64e9 PD |
3449 | /* |
3450 | * If we don't have enough metaslabs active to fill the entire array, we | |
3451 | * just use the 0th slot. | |
3452 | */ | |
e38afd34 | 3453 | if (mg->mg_ms_ready < mg->mg_allocators * 3) |
492f64e9 | 3454 | allocator = 0; |
492f64e9 PD |
3455 | |
3456 | ASSERT3U(mg->mg_vd->vdev_ms_count, >=, 2); | |
3457 | ||
1c27024e | 3458 | metaslab_t *search = kmem_alloc(sizeof (*search), KM_SLEEP); |
4e21fd06 DB |
3459 | search->ms_weight = UINT64_MAX; |
3460 | search->ms_start = 0; | |
492f64e9 PD |
3461 | /* |
3462 | * At the end of the metaslab tree are the already-active metaslabs, | |
3463 | * first the primaries, then the secondaries. When we resume searching | |
3464 | * through the tree, we need to consider ms_allocator and ms_primary so | |
3465 | * we start in the location right after where we left off, and don't | |
3466 | * accidentally loop forever considering the same metaslabs. | |
3467 | */ | |
3468 | search->ms_allocator = -1; | |
3469 | search->ms_primary = B_TRUE; | |
34dc7c2f | 3470 | for (;;) { |
492f64e9 | 3471 | boolean_t was_active = B_FALSE; |
9babb374 | 3472 | |
34dc7c2f | 3473 | mutex_enter(&mg->mg_lock); |
4e21fd06 | 3474 | |
492f64e9 PD |
3475 | if (activation_weight == METASLAB_WEIGHT_PRIMARY && |
3476 | mg->mg_primaries[allocator] != NULL) { | |
3477 | msp = mg->mg_primaries[allocator]; | |
3478 | was_active = B_TRUE; | |
3479 | } else if (activation_weight == METASLAB_WEIGHT_SECONDARY && | |
e38afd34 | 3480 | mg->mg_secondaries[allocator] != NULL) { |
492f64e9 PD |
3481 | msp = mg->mg_secondaries[allocator]; |
3482 | was_active = B_TRUE; | |
3483 | } else { | |
3484 | msp = find_valid_metaslab(mg, activation_weight, dva, d, | |
cc99f275 | 3485 | want_unique, asize, allocator, zal, search, |
492f64e9 | 3486 | &was_active); |
34dc7c2f | 3487 | } |
492f64e9 | 3488 | |
34dc7c2f | 3489 | mutex_exit(&mg->mg_lock); |
4e21fd06 DB |
3490 | if (msp == NULL) { |
3491 | kmem_free(search, sizeof (*search)); | |
34dc7c2f | 3492 | return (-1ULL); |
4e21fd06 | 3493 | } |
34dc7c2f | 3494 | |
ac72fac3 | 3495 | mutex_enter(&msp->ms_lock); |
34dc7c2f BB |
3496 | /* |
3497 | * Ensure that the metaslab we have selected is still | |
3498 | * capable of handling our request. It's possible that | |
3499 | * another thread may have changed the weight while we | |
4e21fd06 DB |
3500 | * were blocked on the metaslab lock. We check the |
3501 | * active status first to see if we need to reselect | |
3502 | * a new metaslab. | |
34dc7c2f | 3503 | */ |
4e21fd06 | 3504 | if (was_active && !(msp->ms_weight & METASLAB_ACTIVE_MASK)) { |
34dc7c2f BB |
3505 | mutex_exit(&msp->ms_lock); |
3506 | continue; | |
3507 | } | |
3508 | ||
492f64e9 PD |
3509 | /* |
3510 | * If the metaslab is freshly activated for an allocator that | |
3511 | * isn't the one we're allocating from, or if it's a primary and | |
3512 | * we're seeking a secondary (or vice versa), we go back and | |
3513 | * select a new metaslab. | |
3514 | */ | |
3515 | if (!was_active && (msp->ms_weight & METASLAB_ACTIVE_MASK) && | |
3516 | (msp->ms_allocator != -1) && | |
3517 | (msp->ms_allocator != allocator || ((activation_weight == | |
3518 | METASLAB_WEIGHT_PRIMARY) != msp->ms_primary))) { | |
3519 | mutex_exit(&msp->ms_lock); | |
3520 | continue; | |
3521 | } | |
3522 | ||
e38afd34 | 3523 | if (msp->ms_weight & METASLAB_WEIGHT_CLAIM && |
3524 | activation_weight != METASLAB_WEIGHT_CLAIM) { | |
492f64e9 PD |
3525 | metaslab_passivate(msp, msp->ms_weight & |
3526 | ~METASLAB_WEIGHT_CLAIM); | |
34dc7c2f BB |
3527 | mutex_exit(&msp->ms_lock); |
3528 | continue; | |
3529 | } | |
3530 | ||
492f64e9 | 3531 | if (metaslab_activate(msp, allocator, activation_weight) != 0) { |
34dc7c2f BB |
3532 | mutex_exit(&msp->ms_lock); |
3533 | continue; | |
3534 | } | |
492f64e9 | 3535 | |
4e21fd06 DB |
3536 | msp->ms_selected_txg = txg; |
3537 | ||
3538 | /* | |
3539 | * Now that we have the lock, recheck to see if we should | |
3540 | * continue to use this metaslab for this allocation. The | |
3541 | * the metaslab is now loaded so metaslab_should_allocate() can | |
3542 | * accurately determine if the allocation attempt should | |
3543 | * proceed. | |
3544 | */ | |
3545 | if (!metaslab_should_allocate(msp, asize)) { | |
3546 | /* Passivate this metaslab and select a new one. */ | |
3547 | metaslab_trace_add(zal, mg, msp, asize, d, | |
492f64e9 | 3548 | TRACE_TOO_SMALL, allocator); |
4e21fd06 DB |
3549 | goto next; |
3550 | } | |
3551 | ||
34dc7c2f | 3552 | |
7a614407 GW |
3553 | /* |
3554 | * If this metaslab is currently condensing then pick again as | |
3555 | * we can't manipulate this metaslab until it's committed | |
619f0976 GW |
3556 | * to disk. If this metaslab is being initialized, we shouldn't |
3557 | * allocate from it since the allocated region might be | |
3558 | * overwritten after allocation. | |
7a614407 | 3559 | */ |
93cf2076 | 3560 | if (msp->ms_condensing) { |
4e21fd06 | 3561 | metaslab_trace_add(zal, mg, msp, asize, d, |
492f64e9 PD |
3562 | TRACE_CONDENSING, allocator); |
3563 | metaslab_passivate(msp, msp->ms_weight & | |
3564 | ~METASLAB_ACTIVE_MASK); | |
7a614407 GW |
3565 | mutex_exit(&msp->ms_lock); |
3566 | continue; | |
619f0976 GW |
3567 | } else if (msp->ms_initializing > 0) { |
3568 | metaslab_trace_add(zal, mg, msp, asize, d, | |
3569 | TRACE_INITIALIZING, allocator); | |
3570 | metaslab_passivate(msp, msp->ms_weight & | |
3571 | ~METASLAB_ACTIVE_MASK); | |
3572 | mutex_exit(&msp->ms_lock); | |
3573 | continue; | |
7a614407 GW |
3574 | } |
3575 | ||
4e21fd06 | 3576 | offset = metaslab_block_alloc(msp, asize, txg); |
492f64e9 | 3577 | metaslab_trace_add(zal, mg, msp, asize, d, offset, allocator); |
4e21fd06 DB |
3578 | |
3579 | if (offset != -1ULL) { | |
3580 | /* Proactively passivate the metaslab, if needed */ | |
3581 | metaslab_segment_may_passivate(msp); | |
34dc7c2f | 3582 | break; |
4e21fd06 DB |
3583 | } |
3584 | next: | |
3585 | ASSERT(msp->ms_loaded); | |
3586 | ||
3587 | /* | |
3588 | * We were unable to allocate from this metaslab so determine | |
3589 | * a new weight for this metaslab. Now that we have loaded | |
3590 | * the metaslab we can provide a better hint to the metaslab | |
3591 | * selector. | |
3592 | * | |
3593 | * For space-based metaslabs, we use the maximum block size. | |
3594 | * This information is only available when the metaslab | |
3595 | * is loaded and is more accurate than the generic free | |
3596 | * space weight that was calculated by metaslab_weight(). | |
3597 | * This information allows us to quickly compare the maximum | |
3598 | * available allocation in the metaslab to the allocation | |
3599 | * size being requested. | |
3600 | * | |
3601 | * For segment-based metaslabs, determine the new weight | |
3602 | * based on the highest bucket in the range tree. We | |
3603 | * explicitly use the loaded segment weight (i.e. the range | |
3604 | * tree histogram) since it contains the space that is | |
3605 | * currently available for allocation and is accurate | |
3606 | * even within a sync pass. | |
3607 | */ | |
3608 | if (WEIGHT_IS_SPACEBASED(msp->ms_weight)) { | |
3609 | uint64_t weight = metaslab_block_maxsize(msp); | |
3610 | WEIGHT_SET_SPACEBASED(weight); | |
3611 | metaslab_passivate(msp, weight); | |
3612 | } else { | |
3613 | metaslab_passivate(msp, | |
3614 | metaslab_weight_from_range_tree(msp)); | |
3615 | } | |
34dc7c2f | 3616 | |
4e21fd06 DB |
3617 | /* |
3618 | * We have just failed an allocation attempt, check | |
3619 | * that metaslab_should_allocate() agrees. Otherwise, | |
3620 | * we may end up in an infinite loop retrying the same | |
3621 | * metaslab. | |
3622 | */ | |
3623 | ASSERT(!metaslab_should_allocate(msp, asize)); | |
cc99f275 | 3624 | |
34dc7c2f BB |
3625 | mutex_exit(&msp->ms_lock); |
3626 | } | |
4e21fd06 DB |
3627 | mutex_exit(&msp->ms_lock); |
3628 | kmem_free(search, sizeof (*search)); | |
3629 | return (offset); | |
3630 | } | |
34dc7c2f | 3631 | |
4e21fd06 DB |
3632 | static uint64_t |
3633 | metaslab_group_alloc(metaslab_group_t *mg, zio_alloc_list_t *zal, | |
cc99f275 DB |
3634 | uint64_t asize, uint64_t txg, boolean_t want_unique, dva_t *dva, |
3635 | int d, int allocator) | |
4e21fd06 DB |
3636 | { |
3637 | uint64_t offset; | |
3638 | ASSERT(mg->mg_initialized); | |
34dc7c2f | 3639 | |
cc99f275 DB |
3640 | offset = metaslab_group_alloc_normal(mg, zal, asize, txg, want_unique, |
3641 | dva, d, allocator); | |
34dc7c2f | 3642 | |
4e21fd06 DB |
3643 | mutex_enter(&mg->mg_lock); |
3644 | if (offset == -1ULL) { | |
3645 | mg->mg_failed_allocations++; | |
3646 | metaslab_trace_add(zal, mg, NULL, asize, d, | |
492f64e9 | 3647 | TRACE_GROUP_FAILURE, allocator); |
4e21fd06 DB |
3648 | if (asize == SPA_GANGBLOCKSIZE) { |
3649 | /* | |
3650 | * This metaslab group was unable to allocate | |
3651 | * the minimum gang block size so it must be out of | |
3652 | * space. We must notify the allocation throttle | |
3653 | * to start skipping allocation attempts to this | |
3654 | * metaslab group until more space becomes available. | |
3655 | * Note: this failure cannot be caused by the | |
3656 | * allocation throttle since the allocation throttle | |
3657 | * is only responsible for skipping devices and | |
3658 | * not failing block allocations. | |
3659 | */ | |
3660 | mg->mg_no_free_space = B_TRUE; | |
3661 | } | |
3662 | } | |
3663 | mg->mg_allocations++; | |
3664 | mutex_exit(&mg->mg_lock); | |
34dc7c2f BB |
3665 | return (offset); |
3666 | } | |
3667 | ||
3668 | /* | |
3669 | * Allocate a block for the specified i/o. | |
3670 | */ | |
a1d477c2 | 3671 | int |
34dc7c2f | 3672 | metaslab_alloc_dva(spa_t *spa, metaslab_class_t *mc, uint64_t psize, |
4e21fd06 | 3673 | dva_t *dva, int d, dva_t *hintdva, uint64_t txg, int flags, |
492f64e9 | 3674 | zio_alloc_list_t *zal, int allocator) |
34dc7c2f | 3675 | { |
920dd524 | 3676 | metaslab_group_t *mg, *fast_mg, *rotor; |
34dc7c2f | 3677 | vdev_t *vd; |
4e21fd06 | 3678 | boolean_t try_hard = B_FALSE; |
34dc7c2f BB |
3679 | |
3680 | ASSERT(!DVA_IS_VALID(&dva[d])); | |
3681 | ||
3682 | /* | |
3683 | * For testing, make some blocks above a certain size be gang blocks. | |
09b85f2d BB |
3684 | * This will result in more split blocks when using device removal, |
3685 | * and a large number of split blocks coupled with ztest-induced | |
3686 | * damage can result in extremely long reconstruction times. This | |
3687 | * will also test spilling from special to normal. | |
34dc7c2f | 3688 | */ |
09b85f2d | 3689 | if (psize >= metaslab_force_ganging && (spa_get_random(100) < 3)) { |
492f64e9 PD |
3690 | metaslab_trace_add(zal, NULL, NULL, psize, d, TRACE_FORCE_GANG, |
3691 | allocator); | |
2e528b49 | 3692 | return (SET_ERROR(ENOSPC)); |
4e21fd06 | 3693 | } |
34dc7c2f BB |
3694 | |
3695 | /* | |
3696 | * Start at the rotor and loop through all mgs until we find something. | |
428870ff | 3697 | * Note that there's no locking on mc_rotor or mc_aliquot because |
34dc7c2f BB |
3698 | * nothing actually breaks if we miss a few updates -- we just won't |
3699 | * allocate quite as evenly. It all balances out over time. | |
3700 | * | |
3701 | * If we are doing ditto or log blocks, try to spread them across | |
3702 | * consecutive vdevs. If we're forced to reuse a vdev before we've | |
3703 | * allocated all of our ditto blocks, then try and spread them out on | |
3704 | * that vdev as much as possible. If it turns out to not be possible, | |
3705 | * gradually lower our standards until anything becomes acceptable. | |
3706 | * Also, allocating on consecutive vdevs (as opposed to random vdevs) | |
3707 | * gives us hope of containing our fault domains to something we're | |
3708 | * able to reason about. Otherwise, any two top-level vdev failures | |
3709 | * will guarantee the loss of data. With consecutive allocation, | |
3710 | * only two adjacent top-level vdev failures will result in data loss. | |
3711 | * | |
3712 | * If we are doing gang blocks (hintdva is non-NULL), try to keep | |
3713 | * ourselves on the same vdev as our gang block header. That | |
3714 | * way, we can hope for locality in vdev_cache, plus it makes our | |
3715 | * fault domains something tractable. | |
3716 | */ | |
3717 | if (hintdva) { | |
3718 | vd = vdev_lookup_top(spa, DVA_GET_VDEV(&hintdva[d])); | |
428870ff BB |
3719 | |
3720 | /* | |
3721 | * It's possible the vdev we're using as the hint no | |
a1d477c2 MA |
3722 | * longer exists or its mg has been closed (e.g. by |
3723 | * device removal). Consult the rotor when | |
428870ff BB |
3724 | * all else fails. |
3725 | */ | |
a1d477c2 | 3726 | if (vd != NULL && vd->vdev_mg != NULL) { |
34dc7c2f | 3727 | mg = vd->vdev_mg; |
428870ff BB |
3728 | |
3729 | if (flags & METASLAB_HINTBP_AVOID && | |
3730 | mg->mg_next != NULL) | |
3731 | mg = mg->mg_next; | |
3732 | } else { | |
3733 | mg = mc->mc_rotor; | |
3734 | } | |
34dc7c2f BB |
3735 | } else if (d != 0) { |
3736 | vd = vdev_lookup_top(spa, DVA_GET_VDEV(&dva[d - 1])); | |
3737 | mg = vd->vdev_mg->mg_next; | |
920dd524 ED |
3738 | } else if (flags & METASLAB_FASTWRITE) { |
3739 | mg = fast_mg = mc->mc_rotor; | |
3740 | ||
3741 | do { | |
3742 | if (fast_mg->mg_vd->vdev_pending_fastwrite < | |
3743 | mg->mg_vd->vdev_pending_fastwrite) | |
3744 | mg = fast_mg; | |
3745 | } while ((fast_mg = fast_mg->mg_next) != mc->mc_rotor); | |
3746 | ||
34dc7c2f | 3747 | } else { |
cc99f275 | 3748 | ASSERT(mc->mc_rotor != NULL); |
34dc7c2f BB |
3749 | mg = mc->mc_rotor; |
3750 | } | |
3751 | ||
3752 | /* | |
428870ff BB |
3753 | * If the hint put us into the wrong metaslab class, or into a |
3754 | * metaslab group that has been passivated, just follow the rotor. | |
34dc7c2f | 3755 | */ |
428870ff | 3756 | if (mg->mg_class != mc || mg->mg_activation_count <= 0) |
34dc7c2f BB |
3757 | mg = mc->mc_rotor; |
3758 | ||
3759 | rotor = mg; | |
3760 | top: | |
34dc7c2f | 3761 | do { |
4e21fd06 | 3762 | boolean_t allocatable; |
428870ff | 3763 | |
3dfb57a3 | 3764 | ASSERT(mg->mg_activation_count == 1); |
34dc7c2f | 3765 | vd = mg->mg_vd; |
fb5f0bc8 | 3766 | |
34dc7c2f | 3767 | /* |
b128c09f | 3768 | * Don't allocate from faulted devices. |
34dc7c2f | 3769 | */ |
4e21fd06 | 3770 | if (try_hard) { |
fb5f0bc8 BB |
3771 | spa_config_enter(spa, SCL_ZIO, FTAG, RW_READER); |
3772 | allocatable = vdev_allocatable(vd); | |
3773 | spa_config_exit(spa, SCL_ZIO, FTAG); | |
3774 | } else { | |
3775 | allocatable = vdev_allocatable(vd); | |
3776 | } | |
ac72fac3 GW |
3777 | |
3778 | /* | |
3779 | * Determine if the selected metaslab group is eligible | |
3dfb57a3 DB |
3780 | * for allocations. If we're ganging then don't allow |
3781 | * this metaslab group to skip allocations since that would | |
3782 | * inadvertently return ENOSPC and suspend the pool | |
ac72fac3 GW |
3783 | * even though space is still available. |
3784 | */ | |
4e21fd06 | 3785 | if (allocatable && !GANG_ALLOCATION(flags) && !try_hard) { |
3dfb57a3 | 3786 | allocatable = metaslab_group_allocatable(mg, rotor, |
c197a77c | 3787 | psize, allocator, d); |
3dfb57a3 | 3788 | } |
ac72fac3 | 3789 | |
4e21fd06 DB |
3790 | if (!allocatable) { |
3791 | metaslab_trace_add(zal, mg, NULL, psize, d, | |
492f64e9 | 3792 | TRACE_NOT_ALLOCATABLE, allocator); |
34dc7c2f | 3793 | goto next; |
4e21fd06 | 3794 | } |
fb5f0bc8 | 3795 | |
3dfb57a3 DB |
3796 | ASSERT(mg->mg_initialized); |
3797 | ||
34dc7c2f | 3798 | /* |
4e21fd06 DB |
3799 | * Avoid writing single-copy data to a failing, |
3800 | * non-redundant vdev, unless we've already tried all | |
3801 | * other vdevs. | |
34dc7c2f BB |
3802 | */ |
3803 | if ((vd->vdev_stat.vs_write_errors > 0 || | |
3804 | vd->vdev_state < VDEV_STATE_HEALTHY) && | |
4e21fd06 DB |
3805 | d == 0 && !try_hard && vd->vdev_children == 0) { |
3806 | metaslab_trace_add(zal, mg, NULL, psize, d, | |
492f64e9 | 3807 | TRACE_VDEV_ERROR, allocator); |
34dc7c2f BB |
3808 | goto next; |
3809 | } | |
3810 | ||
3811 | ASSERT(mg->mg_class == mc); | |
3812 | ||
1c27024e | 3813 | uint64_t asize = vdev_psize_to_asize(vd, psize); |
34dc7c2f BB |
3814 | ASSERT(P2PHASE(asize, 1ULL << vd->vdev_ashift) == 0); |
3815 | ||
cc99f275 DB |
3816 | /* |
3817 | * If we don't need to try hard, then require that the | |
3818 | * block be on an different metaslab from any other DVAs | |
3819 | * in this BP (unique=true). If we are trying hard, then | |
3820 | * allow any metaslab to be used (unique=false). | |
3821 | */ | |
1c27024e | 3822 | uint64_t offset = metaslab_group_alloc(mg, zal, asize, txg, |
cc99f275 | 3823 | !try_hard, dva, d, allocator); |
3dfb57a3 | 3824 | |
34dc7c2f BB |
3825 | if (offset != -1ULL) { |
3826 | /* | |
3827 | * If we've just selected this metaslab group, | |
3828 | * figure out whether the corresponding vdev is | |
3829 | * over- or under-used relative to the pool, | |
3830 | * and set an allocation bias to even it out. | |
bb3250d0 ED |
3831 | * |
3832 | * Bias is also used to compensate for unequally | |
3833 | * sized vdevs so that space is allocated fairly. | |
34dc7c2f | 3834 | */ |
f3a7f661 | 3835 | if (mc->mc_aliquot == 0 && metaslab_bias_enabled) { |
34dc7c2f | 3836 | vdev_stat_t *vs = &vd->vdev_stat; |
bb3250d0 ED |
3837 | int64_t vs_free = vs->vs_space - vs->vs_alloc; |
3838 | int64_t mc_free = mc->mc_space - mc->mc_alloc; | |
3839 | int64_t ratio; | |
34dc7c2f BB |
3840 | |
3841 | /* | |
6d974228 GW |
3842 | * Calculate how much more or less we should |
3843 | * try to allocate from this device during | |
3844 | * this iteration around the rotor. | |
6d974228 | 3845 | * |
bb3250d0 ED |
3846 | * This basically introduces a zero-centered |
3847 | * bias towards the devices with the most | |
3848 | * free space, while compensating for vdev | |
3849 | * size differences. | |
3850 | * | |
3851 | * Examples: | |
3852 | * vdev V1 = 16M/128M | |
3853 | * vdev V2 = 16M/128M | |
3854 | * ratio(V1) = 100% ratio(V2) = 100% | |
3855 | * | |
3856 | * vdev V1 = 16M/128M | |
3857 | * vdev V2 = 64M/128M | |
3858 | * ratio(V1) = 127% ratio(V2) = 72% | |
6d974228 | 3859 | * |
bb3250d0 ED |
3860 | * vdev V1 = 16M/128M |
3861 | * vdev V2 = 64M/512M | |
3862 | * ratio(V1) = 40% ratio(V2) = 160% | |
34dc7c2f | 3863 | */ |
bb3250d0 ED |
3864 | ratio = (vs_free * mc->mc_alloc_groups * 100) / |
3865 | (mc_free + 1); | |
3866 | mg->mg_bias = ((ratio - 100) * | |
6d974228 | 3867 | (int64_t)mg->mg_aliquot) / 100; |
f3a7f661 GW |
3868 | } else if (!metaslab_bias_enabled) { |
3869 | mg->mg_bias = 0; | |
34dc7c2f BB |
3870 | } |
3871 | ||
920dd524 ED |
3872 | if ((flags & METASLAB_FASTWRITE) || |
3873 | atomic_add_64_nv(&mc->mc_aliquot, asize) >= | |
34dc7c2f BB |
3874 | mg->mg_aliquot + mg->mg_bias) { |
3875 | mc->mc_rotor = mg->mg_next; | |
428870ff | 3876 | mc->mc_aliquot = 0; |
34dc7c2f BB |
3877 | } |
3878 | ||
3879 | DVA_SET_VDEV(&dva[d], vd->vdev_id); | |
3880 | DVA_SET_OFFSET(&dva[d], offset); | |
e3e7cf60 D |
3881 | DVA_SET_GANG(&dva[d], |
3882 | ((flags & METASLAB_GANG_HEADER) ? 1 : 0)); | |
34dc7c2f BB |
3883 | DVA_SET_ASIZE(&dva[d], asize); |
3884 | ||
920dd524 ED |
3885 | if (flags & METASLAB_FASTWRITE) { |
3886 | atomic_add_64(&vd->vdev_pending_fastwrite, | |
3887 | psize); | |
920dd524 ED |
3888 | } |
3889 | ||
34dc7c2f BB |
3890 | return (0); |
3891 | } | |
3892 | next: | |
3893 | mc->mc_rotor = mg->mg_next; | |
428870ff | 3894 | mc->mc_aliquot = 0; |
34dc7c2f BB |
3895 | } while ((mg = mg->mg_next) != rotor); |
3896 | ||
4e21fd06 DB |
3897 | /* |
3898 | * If we haven't tried hard, do so now. | |
3899 | */ | |
3900 | if (!try_hard) { | |
3901 | try_hard = B_TRUE; | |
fb5f0bc8 BB |
3902 | goto top; |
3903 | } | |
3904 | ||
34dc7c2f BB |
3905 | bzero(&dva[d], sizeof (dva_t)); |
3906 | ||
492f64e9 | 3907 | metaslab_trace_add(zal, rotor, NULL, psize, d, TRACE_ENOSPC, allocator); |
2e528b49 | 3908 | return (SET_ERROR(ENOSPC)); |
34dc7c2f BB |
3909 | } |
3910 | ||
a1d477c2 MA |
3911 | void |
3912 | metaslab_free_concrete(vdev_t *vd, uint64_t offset, uint64_t asize, | |
d2734cce | 3913 | boolean_t checkpoint) |
a1d477c2 MA |
3914 | { |
3915 | metaslab_t *msp; | |
d2734cce | 3916 | spa_t *spa = vd->vdev_spa; |
a1d477c2 | 3917 | |
a1d477c2 MA |
3918 | ASSERT(vdev_is_concrete(vd)); |
3919 | ASSERT3U(spa_config_held(spa, SCL_ALL, RW_READER), !=, 0); | |
3920 | ASSERT3U(offset >> vd->vdev_ms_shift, <, vd->vdev_ms_count); | |
3921 | ||
3922 | msp = vd->vdev_ms[offset >> vd->vdev_ms_shift]; | |
3923 | ||
3924 | VERIFY(!msp->ms_condensing); | |
3925 | VERIFY3U(offset, >=, msp->ms_start); | |
3926 | VERIFY3U(offset + asize, <=, msp->ms_start + msp->ms_size); | |
3927 | VERIFY0(P2PHASE(offset, 1ULL << vd->vdev_ashift)); | |
3928 | VERIFY0(P2PHASE(asize, 1ULL << vd->vdev_ashift)); | |
3929 | ||
3930 | metaslab_check_free_impl(vd, offset, asize); | |
d2734cce | 3931 | |
a1d477c2 | 3932 | mutex_enter(&msp->ms_lock); |
d2734cce SD |
3933 | if (range_tree_is_empty(msp->ms_freeing) && |
3934 | range_tree_is_empty(msp->ms_checkpointing)) { | |
3935 | vdev_dirty(vd, VDD_METASLAB, msp, spa_syncing_txg(spa)); | |
3936 | } | |
3937 | ||
3938 | if (checkpoint) { | |
3939 | ASSERT(spa_has_checkpoint(spa)); | |
3940 | range_tree_add(msp->ms_checkpointing, offset, asize); | |
3941 | } else { | |
3942 | range_tree_add(msp->ms_freeing, offset, asize); | |
a1d477c2 | 3943 | } |
a1d477c2 MA |
3944 | mutex_exit(&msp->ms_lock); |
3945 | } | |
3946 | ||
3947 | /* ARGSUSED */ | |
3948 | void | |
3949 | metaslab_free_impl_cb(uint64_t inner_offset, vdev_t *vd, uint64_t offset, | |
3950 | uint64_t size, void *arg) | |
3951 | { | |
d2734cce SD |
3952 | boolean_t *checkpoint = arg; |
3953 | ||
3954 | ASSERT3P(checkpoint, !=, NULL); | |
a1d477c2 MA |
3955 | |
3956 | if (vd->vdev_ops->vdev_op_remap != NULL) | |
d2734cce | 3957 | vdev_indirect_mark_obsolete(vd, offset, size); |
a1d477c2 | 3958 | else |
d2734cce | 3959 | metaslab_free_impl(vd, offset, size, *checkpoint); |
a1d477c2 MA |
3960 | } |
3961 | ||
3962 | static void | |
3963 | metaslab_free_impl(vdev_t *vd, uint64_t offset, uint64_t size, | |
d2734cce | 3964 | boolean_t checkpoint) |
a1d477c2 MA |
3965 | { |
3966 | spa_t *spa = vd->vdev_spa; | |
3967 | ||
3968 | ASSERT3U(spa_config_held(spa, SCL_ALL, RW_READER), !=, 0); | |
3969 | ||
d2734cce | 3970 | if (spa_syncing_txg(spa) > spa_freeze_txg(spa)) |
a1d477c2 MA |
3971 | return; |
3972 | ||
3973 | if (spa->spa_vdev_removal != NULL && | |
9e052db4 | 3974 | spa->spa_vdev_removal->svr_vdev_id == vd->vdev_id && |
a1d477c2 MA |
3975 | vdev_is_concrete(vd)) { |
3976 | /* | |
3977 | * Note: we check if the vdev is concrete because when | |
3978 | * we complete the removal, we first change the vdev to be | |
3979 | * an indirect vdev (in open context), and then (in syncing | |
3980 | * context) clear spa_vdev_removal. | |
3981 | */ | |
d2734cce | 3982 | free_from_removing_vdev(vd, offset, size); |
a1d477c2 | 3983 | } else if (vd->vdev_ops->vdev_op_remap != NULL) { |
d2734cce | 3984 | vdev_indirect_mark_obsolete(vd, offset, size); |
a1d477c2 | 3985 | vd->vdev_ops->vdev_op_remap(vd, offset, size, |
d2734cce | 3986 | metaslab_free_impl_cb, &checkpoint); |
a1d477c2 | 3987 | } else { |
d2734cce | 3988 | metaslab_free_concrete(vd, offset, size, checkpoint); |
a1d477c2 MA |
3989 | } |
3990 | } | |
3991 | ||
3992 | typedef struct remap_blkptr_cb_arg { | |
3993 | blkptr_t *rbca_bp; | |
3994 | spa_remap_cb_t rbca_cb; | |
3995 | vdev_t *rbca_remap_vd; | |
3996 | uint64_t rbca_remap_offset; | |
3997 | void *rbca_cb_arg; | |
3998 | } remap_blkptr_cb_arg_t; | |
3999 | ||
4000 | void | |
4001 | remap_blkptr_cb(uint64_t inner_offset, vdev_t *vd, uint64_t offset, | |
4002 | uint64_t size, void *arg) | |
4003 | { | |
4004 | remap_blkptr_cb_arg_t *rbca = arg; | |
4005 | blkptr_t *bp = rbca->rbca_bp; | |
4006 | ||
4007 | /* We can not remap split blocks. */ | |
4008 | if (size != DVA_GET_ASIZE(&bp->blk_dva[0])) | |
4009 | return; | |
4010 | ASSERT0(inner_offset); | |
4011 | ||
4012 | if (rbca->rbca_cb != NULL) { | |
4013 | /* | |
4014 | * At this point we know that we are not handling split | |
4015 | * blocks and we invoke the callback on the previous | |
4016 | * vdev which must be indirect. | |
4017 | */ | |
4018 | ASSERT3P(rbca->rbca_remap_vd->vdev_ops, ==, &vdev_indirect_ops); | |
4019 | ||
4020 | rbca->rbca_cb(rbca->rbca_remap_vd->vdev_id, | |
4021 | rbca->rbca_remap_offset, size, rbca->rbca_cb_arg); | |
4022 | ||
4023 | /* set up remap_blkptr_cb_arg for the next call */ | |
4024 | rbca->rbca_remap_vd = vd; | |
4025 | rbca->rbca_remap_offset = offset; | |
4026 | } | |
4027 | ||
4028 | /* | |
4029 | * The phys birth time is that of dva[0]. This ensures that we know | |
4030 | * when each dva was written, so that resilver can determine which | |
4031 | * blocks need to be scrubbed (i.e. those written during the time | |
4032 | * the vdev was offline). It also ensures that the key used in | |
4033 | * the ARC hash table is unique (i.e. dva[0] + phys_birth). If | |
4034 | * we didn't change the phys_birth, a lookup in the ARC for a | |
4035 | * remapped BP could find the data that was previously stored at | |
4036 | * this vdev + offset. | |
4037 | */ | |
4038 | vdev_t *oldvd = vdev_lookup_top(vd->vdev_spa, | |
4039 | DVA_GET_VDEV(&bp->blk_dva[0])); | |
4040 | vdev_indirect_births_t *vib = oldvd->vdev_indirect_births; | |
4041 | bp->blk_phys_birth = vdev_indirect_births_physbirth(vib, | |
4042 | DVA_GET_OFFSET(&bp->blk_dva[0]), DVA_GET_ASIZE(&bp->blk_dva[0])); | |
4043 | ||
4044 | DVA_SET_VDEV(&bp->blk_dva[0], vd->vdev_id); | |
4045 | DVA_SET_OFFSET(&bp->blk_dva[0], offset); | |
4046 | } | |
4047 | ||
34dc7c2f | 4048 | /* |
a1d477c2 MA |
4049 | * If the block pointer contains any indirect DVAs, modify them to refer to |
4050 | * concrete DVAs. Note that this will sometimes not be possible, leaving | |
4051 | * the indirect DVA in place. This happens if the indirect DVA spans multiple | |
4052 | * segments in the mapping (i.e. it is a "split block"). | |
4053 | * | |
4054 | * If the BP was remapped, calls the callback on the original dva (note the | |
4055 | * callback can be called multiple times if the original indirect DVA refers | |
4056 | * to another indirect DVA, etc). | |
4057 | * | |
4058 | * Returns TRUE if the BP was remapped. | |
34dc7c2f | 4059 | */ |
a1d477c2 MA |
4060 | boolean_t |
4061 | spa_remap_blkptr(spa_t *spa, blkptr_t *bp, spa_remap_cb_t callback, void *arg) | |
34dc7c2f | 4062 | { |
a1d477c2 MA |
4063 | remap_blkptr_cb_arg_t rbca; |
4064 | ||
4065 | if (!zfs_remap_blkptr_enable) | |
4066 | return (B_FALSE); | |
4067 | ||
4068 | if (!spa_feature_is_enabled(spa, SPA_FEATURE_OBSOLETE_COUNTS)) | |
4069 | return (B_FALSE); | |
4070 | ||
4071 | /* | |
4072 | * Dedup BP's can not be remapped, because ddt_phys_select() depends | |
4073 | * on DVA[0] being the same in the BP as in the DDT (dedup table). | |
4074 | */ | |
4075 | if (BP_GET_DEDUP(bp)) | |
4076 | return (B_FALSE); | |
4077 | ||
4078 | /* | |
4079 | * Gang blocks can not be remapped, because | |
4080 | * zio_checksum_gang_verifier() depends on the DVA[0] that's in | |
4081 | * the BP used to read the gang block header (GBH) being the same | |
4082 | * as the DVA[0] that we allocated for the GBH. | |
4083 | */ | |
4084 | if (BP_IS_GANG(bp)) | |
4085 | return (B_FALSE); | |
4086 | ||
4087 | /* | |
4088 | * Embedded BP's have no DVA to remap. | |
4089 | */ | |
4090 | if (BP_GET_NDVAS(bp) < 1) | |
4091 | return (B_FALSE); | |
4092 | ||
4093 | /* | |
4094 | * Note: we only remap dva[0]. If we remapped other dvas, we | |
4095 | * would no longer know what their phys birth txg is. | |
4096 | */ | |
4097 | dva_t *dva = &bp->blk_dva[0]; | |
4098 | ||
34dc7c2f BB |
4099 | uint64_t offset = DVA_GET_OFFSET(dva); |
4100 | uint64_t size = DVA_GET_ASIZE(dva); | |
a1d477c2 MA |
4101 | vdev_t *vd = vdev_lookup_top(spa, DVA_GET_VDEV(dva)); |
4102 | ||
4103 | if (vd->vdev_ops->vdev_op_remap == NULL) | |
4104 | return (B_FALSE); | |
4105 | ||
4106 | rbca.rbca_bp = bp; | |
4107 | rbca.rbca_cb = callback; | |
4108 | rbca.rbca_remap_vd = vd; | |
4109 | rbca.rbca_remap_offset = offset; | |
4110 | rbca.rbca_cb_arg = arg; | |
4111 | ||
4112 | /* | |
4113 | * remap_blkptr_cb() will be called in order for each level of | |
4114 | * indirection, until a concrete vdev is reached or a split block is | |
4115 | * encountered. old_vd and old_offset are updated within the callback | |
4116 | * as we go from the one indirect vdev to the next one (either concrete | |
4117 | * or indirect again) in that order. | |
4118 | */ | |
4119 | vd->vdev_ops->vdev_op_remap(vd, offset, size, remap_blkptr_cb, &rbca); | |
4120 | ||
4121 | /* Check if the DVA wasn't remapped because it is a split block */ | |
4122 | if (DVA_GET_VDEV(&rbca.rbca_bp->blk_dva[0]) == vd->vdev_id) | |
4123 | return (B_FALSE); | |
4124 | ||
4125 | return (B_TRUE); | |
4126 | } | |
4127 | ||
4128 | /* | |
4129 | * Undo the allocation of a DVA which happened in the given transaction group. | |
4130 | */ | |
4131 | void | |
4132 | metaslab_unalloc_dva(spa_t *spa, const dva_t *dva, uint64_t txg) | |
4133 | { | |
34dc7c2f | 4134 | metaslab_t *msp; |
a1d477c2 MA |
4135 | vdev_t *vd; |
4136 | uint64_t vdev = DVA_GET_VDEV(dva); | |
4137 | uint64_t offset = DVA_GET_OFFSET(dva); | |
4138 | uint64_t size = DVA_GET_ASIZE(dva); | |
4139 | ||
4140 | ASSERT(DVA_IS_VALID(dva)); | |
4141 | ASSERT3U(spa_config_held(spa, SCL_ALL, RW_READER), !=, 0); | |
34dc7c2f | 4142 | |
34dc7c2f BB |
4143 | if (txg > spa_freeze_txg(spa)) |
4144 | return; | |
4145 | ||
7d2868d5 | 4146 | if ((vd = vdev_lookup_top(spa, vdev)) == NULL || !DVA_IS_VALID(dva) || |
34dc7c2f | 4147 | (offset >> vd->vdev_ms_shift) >= vd->vdev_ms_count) { |
7d2868d5 BB |
4148 | zfs_panic_recover("metaslab_free_dva(): bad DVA %llu:%llu:%llu", |
4149 | (u_longlong_t)vdev, (u_longlong_t)offset, | |
4150 | (u_longlong_t)size); | |
34dc7c2f BB |
4151 | return; |
4152 | } | |
4153 | ||
a1d477c2 MA |
4154 | ASSERT(!vd->vdev_removing); |
4155 | ASSERT(vdev_is_concrete(vd)); | |
4156 | ASSERT0(vd->vdev_indirect_config.vic_mapping_object); | |
4157 | ASSERT3P(vd->vdev_indirect_mapping, ==, NULL); | |
34dc7c2f BB |
4158 | |
4159 | if (DVA_GET_GANG(dva)) | |
4160 | size = vdev_psize_to_asize(vd, SPA_GANGBLOCKSIZE); | |
4161 | ||
a1d477c2 | 4162 | msp = vd->vdev_ms[offset >> vd->vdev_ms_shift]; |
93cf2076 | 4163 | |
a1d477c2 | 4164 | mutex_enter(&msp->ms_lock); |
d2734cce | 4165 | range_tree_remove(msp->ms_allocating[txg & TXG_MASK], |
a1d477c2 | 4166 | offset, size); |
34dc7c2f | 4167 | |
a1d477c2 MA |
4168 | VERIFY(!msp->ms_condensing); |
4169 | VERIFY3U(offset, >=, msp->ms_start); | |
4170 | VERIFY3U(offset + size, <=, msp->ms_start + msp->ms_size); | |
d2734cce | 4171 | VERIFY3U(range_tree_space(msp->ms_allocatable) + size, <=, |
a1d477c2 MA |
4172 | msp->ms_size); |
4173 | VERIFY0(P2PHASE(offset, 1ULL << vd->vdev_ashift)); | |
4174 | VERIFY0(P2PHASE(size, 1ULL << vd->vdev_ashift)); | |
d2734cce | 4175 | range_tree_add(msp->ms_allocatable, offset, size); |
34dc7c2f BB |
4176 | mutex_exit(&msp->ms_lock); |
4177 | } | |
4178 | ||
4179 | /* | |
d2734cce | 4180 | * Free the block represented by the given DVA. |
34dc7c2f | 4181 | */ |
a1d477c2 | 4182 | void |
d2734cce | 4183 | metaslab_free_dva(spa_t *spa, const dva_t *dva, boolean_t checkpoint) |
34dc7c2f BB |
4184 | { |
4185 | uint64_t vdev = DVA_GET_VDEV(dva); | |
4186 | uint64_t offset = DVA_GET_OFFSET(dva); | |
4187 | uint64_t size = DVA_GET_ASIZE(dva); | |
a1d477c2 | 4188 | vdev_t *vd = vdev_lookup_top(spa, vdev); |
34dc7c2f BB |
4189 | |
4190 | ASSERT(DVA_IS_VALID(dva)); | |
a1d477c2 | 4191 | ASSERT3U(spa_config_held(spa, SCL_ALL, RW_READER), !=, 0); |
34dc7c2f | 4192 | |
a1d477c2 | 4193 | if (DVA_GET_GANG(dva)) { |
34dc7c2f | 4194 | size = vdev_psize_to_asize(vd, SPA_GANGBLOCKSIZE); |
34dc7c2f BB |
4195 | } |
4196 | ||
d2734cce | 4197 | metaslab_free_impl(vd, offset, size, checkpoint); |
34dc7c2f BB |
4198 | } |
4199 | ||
3dfb57a3 DB |
4200 | /* |
4201 | * Reserve some allocation slots. The reservation system must be called | |
4202 | * before we call into the allocator. If there aren't any available slots | |
4203 | * then the I/O will be throttled until an I/O completes and its slots are | |
4204 | * freed up. The function returns true if it was successful in placing | |
4205 | * the reservation. | |
4206 | */ | |
4207 | boolean_t | |
492f64e9 PD |
4208 | metaslab_class_throttle_reserve(metaslab_class_t *mc, int slots, int allocator, |
4209 | zio_t *zio, int flags) | |
3dfb57a3 DB |
4210 | { |
4211 | uint64_t available_slots = 0; | |
3dfb57a3 | 4212 | boolean_t slot_reserved = B_FALSE; |
492f64e9 | 4213 | uint64_t max = mc->mc_alloc_max_slots[allocator]; |
3dfb57a3 DB |
4214 | |
4215 | ASSERT(mc->mc_alloc_throttle_enabled); | |
4216 | mutex_enter(&mc->mc_lock); | |
4217 | ||
492f64e9 | 4218 | uint64_t reserved_slots = |
424fd7c3 | 4219 | zfs_refcount_count(&mc->mc_alloc_slots[allocator]); |
492f64e9 PD |
4220 | if (reserved_slots < max) |
4221 | available_slots = max - reserved_slots; | |
3dfb57a3 | 4222 | |
cc99f275 DB |
4223 | if (slots <= available_slots || GANG_ALLOCATION(flags) || |
4224 | flags & METASLAB_MUST_RESERVE) { | |
3dfb57a3 DB |
4225 | /* |
4226 | * We reserve the slots individually so that we can unreserve | |
4227 | * them individually when an I/O completes. | |
4228 | */ | |
1c27024e | 4229 | for (int d = 0; d < slots; d++) { |
492f64e9 | 4230 | reserved_slots = |
c13060e4 | 4231 | zfs_refcount_add(&mc->mc_alloc_slots[allocator], |
492f64e9 | 4232 | zio); |
3dfb57a3 DB |
4233 | } |
4234 | zio->io_flags |= ZIO_FLAG_IO_ALLOCATING; | |
4235 | slot_reserved = B_TRUE; | |
4236 | } | |
4237 | ||
4238 | mutex_exit(&mc->mc_lock); | |
4239 | return (slot_reserved); | |
4240 | } | |
4241 | ||
4242 | void | |
492f64e9 PD |
4243 | metaslab_class_throttle_unreserve(metaslab_class_t *mc, int slots, |
4244 | int allocator, zio_t *zio) | |
3dfb57a3 | 4245 | { |
3dfb57a3 DB |
4246 | ASSERT(mc->mc_alloc_throttle_enabled); |
4247 | mutex_enter(&mc->mc_lock); | |
1c27024e | 4248 | for (int d = 0; d < slots; d++) { |
424fd7c3 | 4249 | (void) zfs_refcount_remove(&mc->mc_alloc_slots[allocator], |
492f64e9 | 4250 | zio); |
3dfb57a3 DB |
4251 | } |
4252 | mutex_exit(&mc->mc_lock); | |
4253 | } | |
4254 | ||
a1d477c2 MA |
4255 | static int |
4256 | metaslab_claim_concrete(vdev_t *vd, uint64_t offset, uint64_t size, | |
4257 | uint64_t txg) | |
4258 | { | |
4259 | metaslab_t *msp; | |
4260 | spa_t *spa = vd->vdev_spa; | |
4261 | int error = 0; | |
4262 | ||
4263 | if (offset >> vd->vdev_ms_shift >= vd->vdev_ms_count) | |
7ab96299 | 4264 | return (SET_ERROR(ENXIO)); |
a1d477c2 MA |
4265 | |
4266 | ASSERT3P(vd->vdev_ms, !=, NULL); | |
4267 | msp = vd->vdev_ms[offset >> vd->vdev_ms_shift]; | |
4268 | ||
4269 | mutex_enter(&msp->ms_lock); | |
4270 | ||
7ab96299 | 4271 | if ((txg != 0 && spa_writeable(spa)) || !msp->ms_loaded) { |
492f64e9 | 4272 | error = metaslab_activate(msp, 0, METASLAB_WEIGHT_CLAIM); |
7ab96299 TC |
4273 | if (error == EBUSY) { |
4274 | ASSERT(msp->ms_loaded); | |
4275 | ASSERT(msp->ms_weight & METASLAB_ACTIVE_MASK); | |
4276 | error = 0; | |
4277 | } | |
4278 | } | |
a1d477c2 | 4279 | |
d2734cce SD |
4280 | if (error == 0 && |
4281 | !range_tree_contains(msp->ms_allocatable, offset, size)) | |
a1d477c2 MA |
4282 | error = SET_ERROR(ENOENT); |
4283 | ||
4284 | if (error || txg == 0) { /* txg == 0 indicates dry run */ | |
4285 | mutex_exit(&msp->ms_lock); | |
4286 | return (error); | |
4287 | } | |
4288 | ||
4289 | VERIFY(!msp->ms_condensing); | |
4290 | VERIFY0(P2PHASE(offset, 1ULL << vd->vdev_ashift)); | |
4291 | VERIFY0(P2PHASE(size, 1ULL << vd->vdev_ashift)); | |
d2734cce SD |
4292 | VERIFY3U(range_tree_space(msp->ms_allocatable) - size, <=, |
4293 | msp->ms_size); | |
4294 | range_tree_remove(msp->ms_allocatable, offset, size); | |
a1d477c2 MA |
4295 | |
4296 | if (spa_writeable(spa)) { /* don't dirty if we're zdb(1M) */ | |
d2734cce | 4297 | if (range_tree_is_empty(msp->ms_allocating[txg & TXG_MASK])) |
a1d477c2 | 4298 | vdev_dirty(vd, VDD_METASLAB, msp, txg); |
d2734cce SD |
4299 | range_tree_add(msp->ms_allocating[txg & TXG_MASK], |
4300 | offset, size); | |
a1d477c2 MA |
4301 | } |
4302 | ||
4303 | mutex_exit(&msp->ms_lock); | |
4304 | ||
4305 | return (0); | |
4306 | } | |
4307 | ||
4308 | typedef struct metaslab_claim_cb_arg_t { | |
4309 | uint64_t mcca_txg; | |
4310 | int mcca_error; | |
4311 | } metaslab_claim_cb_arg_t; | |
4312 | ||
4313 | /* ARGSUSED */ | |
4314 | static void | |
4315 | metaslab_claim_impl_cb(uint64_t inner_offset, vdev_t *vd, uint64_t offset, | |
4316 | uint64_t size, void *arg) | |
4317 | { | |
4318 | metaslab_claim_cb_arg_t *mcca_arg = arg; | |
4319 | ||
4320 | if (mcca_arg->mcca_error == 0) { | |
4321 | mcca_arg->mcca_error = metaslab_claim_concrete(vd, offset, | |
4322 | size, mcca_arg->mcca_txg); | |
4323 | } | |
4324 | } | |
4325 | ||
4326 | int | |
4327 | metaslab_claim_impl(vdev_t *vd, uint64_t offset, uint64_t size, uint64_t txg) | |
4328 | { | |
4329 | if (vd->vdev_ops->vdev_op_remap != NULL) { | |
4330 | metaslab_claim_cb_arg_t arg; | |
4331 | ||
4332 | /* | |
4333 | * Only zdb(1M) can claim on indirect vdevs. This is used | |
4334 | * to detect leaks of mapped space (that are not accounted | |
4335 | * for in the obsolete counts, spacemap, or bpobj). | |
4336 | */ | |
4337 | ASSERT(!spa_writeable(vd->vdev_spa)); | |
4338 | arg.mcca_error = 0; | |
4339 | arg.mcca_txg = txg; | |
4340 | ||
4341 | vd->vdev_ops->vdev_op_remap(vd, offset, size, | |
4342 | metaslab_claim_impl_cb, &arg); | |
4343 | ||
4344 | if (arg.mcca_error == 0) { | |
4345 | arg.mcca_error = metaslab_claim_concrete(vd, | |
4346 | offset, size, txg); | |
4347 | } | |
4348 | return (arg.mcca_error); | |
4349 | } else { | |
4350 | return (metaslab_claim_concrete(vd, offset, size, txg)); | |
4351 | } | |
4352 | } | |
4353 | ||
4354 | /* | |
4355 | * Intent log support: upon opening the pool after a crash, notify the SPA | |
4356 | * of blocks that the intent log has allocated for immediate write, but | |
4357 | * which are still considered free by the SPA because the last transaction | |
4358 | * group didn't commit yet. | |
4359 | */ | |
4360 | static int | |
4361 | metaslab_claim_dva(spa_t *spa, const dva_t *dva, uint64_t txg) | |
4362 | { | |
4363 | uint64_t vdev = DVA_GET_VDEV(dva); | |
4364 | uint64_t offset = DVA_GET_OFFSET(dva); | |
4365 | uint64_t size = DVA_GET_ASIZE(dva); | |
4366 | vdev_t *vd; | |
4367 | ||
4368 | if ((vd = vdev_lookup_top(spa, vdev)) == NULL) { | |
4369 | return (SET_ERROR(ENXIO)); | |
4370 | } | |
4371 | ||
4372 | ASSERT(DVA_IS_VALID(dva)); | |
4373 | ||
4374 | if (DVA_GET_GANG(dva)) | |
4375 | size = vdev_psize_to_asize(vd, SPA_GANGBLOCKSIZE); | |
4376 | ||
4377 | return (metaslab_claim_impl(vd, offset, size, txg)); | |
4378 | } | |
4379 | ||
34dc7c2f BB |
4380 | int |
4381 | metaslab_alloc(spa_t *spa, metaslab_class_t *mc, uint64_t psize, blkptr_t *bp, | |
4e21fd06 | 4382 | int ndvas, uint64_t txg, blkptr_t *hintbp, int flags, |
492f64e9 | 4383 | zio_alloc_list_t *zal, zio_t *zio, int allocator) |
34dc7c2f BB |
4384 | { |
4385 | dva_t *dva = bp->blk_dva; | |
928e8ad4 | 4386 | dva_t *hintdva = (hintbp != NULL) ? hintbp->blk_dva : NULL; |
1c27024e | 4387 | int error = 0; |
34dc7c2f | 4388 | |
b128c09f | 4389 | ASSERT(bp->blk_birth == 0); |
428870ff | 4390 | ASSERT(BP_PHYSICAL_BIRTH(bp) == 0); |
b128c09f BB |
4391 | |
4392 | spa_config_enter(spa, SCL_ALLOC, FTAG, RW_READER); | |
4393 | ||
4394 | if (mc->mc_rotor == NULL) { /* no vdevs in this class */ | |
4395 | spa_config_exit(spa, SCL_ALLOC, FTAG); | |
2e528b49 | 4396 | return (SET_ERROR(ENOSPC)); |
b128c09f | 4397 | } |
34dc7c2f BB |
4398 | |
4399 | ASSERT(ndvas > 0 && ndvas <= spa_max_replication(spa)); | |
4400 | ASSERT(BP_GET_NDVAS(bp) == 0); | |
4401 | ASSERT(hintbp == NULL || ndvas <= BP_GET_NDVAS(hintbp)); | |
4e21fd06 | 4402 | ASSERT3P(zal, !=, NULL); |
34dc7c2f | 4403 | |
1c27024e | 4404 | for (int d = 0; d < ndvas; d++) { |
34dc7c2f | 4405 | error = metaslab_alloc_dva(spa, mc, psize, dva, d, hintdva, |
492f64e9 | 4406 | txg, flags, zal, allocator); |
93cf2076 | 4407 | if (error != 0) { |
34dc7c2f | 4408 | for (d--; d >= 0; d--) { |
a1d477c2 | 4409 | metaslab_unalloc_dva(spa, &dva[d], txg); |
3dfb57a3 | 4410 | metaslab_group_alloc_decrement(spa, |
492f64e9 PD |
4411 | DVA_GET_VDEV(&dva[d]), zio, flags, |
4412 | allocator, B_FALSE); | |
34dc7c2f BB |
4413 | bzero(&dva[d], sizeof (dva_t)); |
4414 | } | |
b128c09f | 4415 | spa_config_exit(spa, SCL_ALLOC, FTAG); |
34dc7c2f | 4416 | return (error); |
3dfb57a3 DB |
4417 | } else { |
4418 | /* | |
4419 | * Update the metaslab group's queue depth | |
4420 | * based on the newly allocated dva. | |
4421 | */ | |
4422 | metaslab_group_alloc_increment(spa, | |
492f64e9 | 4423 | DVA_GET_VDEV(&dva[d]), zio, flags, allocator); |
34dc7c2f | 4424 | } |
3dfb57a3 | 4425 | |
34dc7c2f BB |
4426 | } |
4427 | ASSERT(error == 0); | |
4428 | ASSERT(BP_GET_NDVAS(bp) == ndvas); | |
4429 | ||
b128c09f BB |
4430 | spa_config_exit(spa, SCL_ALLOC, FTAG); |
4431 | ||
efe7978d | 4432 | BP_SET_BIRTH(bp, txg, 0); |
b128c09f | 4433 | |
34dc7c2f BB |
4434 | return (0); |
4435 | } | |
4436 | ||
4437 | void | |
4438 | metaslab_free(spa_t *spa, const blkptr_t *bp, uint64_t txg, boolean_t now) | |
4439 | { | |
4440 | const dva_t *dva = bp->blk_dva; | |
1c27024e | 4441 | int ndvas = BP_GET_NDVAS(bp); |
34dc7c2f BB |
4442 | |
4443 | ASSERT(!BP_IS_HOLE(bp)); | |
428870ff | 4444 | ASSERT(!now || bp->blk_birth >= spa_syncing_txg(spa)); |
b128c09f | 4445 | |
d2734cce SD |
4446 | /* |
4447 | * If we have a checkpoint for the pool we need to make sure that | |
4448 | * the blocks that we free that are part of the checkpoint won't be | |
4449 | * reused until the checkpoint is discarded or we revert to it. | |
4450 | * | |
4451 | * The checkpoint flag is passed down the metaslab_free code path | |
4452 | * and is set whenever we want to add a block to the checkpoint's | |
4453 | * accounting. That is, we "checkpoint" blocks that existed at the | |
4454 | * time the checkpoint was created and are therefore referenced by | |
4455 | * the checkpointed uberblock. | |
4456 | * | |
4457 | * Note that, we don't checkpoint any blocks if the current | |
4458 | * syncing txg <= spa_checkpoint_txg. We want these frees to sync | |
4459 | * normally as they will be referenced by the checkpointed uberblock. | |
4460 | */ | |
4461 | boolean_t checkpoint = B_FALSE; | |
4462 | if (bp->blk_birth <= spa->spa_checkpoint_txg && | |
4463 | spa_syncing_txg(spa) > spa->spa_checkpoint_txg) { | |
4464 | /* | |
4465 | * At this point, if the block is part of the checkpoint | |
4466 | * there is no way it was created in the current txg. | |
4467 | */ | |
4468 | ASSERT(!now); | |
4469 | ASSERT3U(spa_syncing_txg(spa), ==, txg); | |
4470 | checkpoint = B_TRUE; | |
4471 | } | |
4472 | ||
b128c09f | 4473 | spa_config_enter(spa, SCL_FREE, FTAG, RW_READER); |
34dc7c2f | 4474 | |
a1d477c2 MA |
4475 | for (int d = 0; d < ndvas; d++) { |
4476 | if (now) { | |
4477 | metaslab_unalloc_dva(spa, &dva[d], txg); | |
4478 | } else { | |
d2734cce SD |
4479 | ASSERT3U(txg, ==, spa_syncing_txg(spa)); |
4480 | metaslab_free_dva(spa, &dva[d], checkpoint); | |
a1d477c2 MA |
4481 | } |
4482 | } | |
b128c09f BB |
4483 | |
4484 | spa_config_exit(spa, SCL_FREE, FTAG); | |
34dc7c2f BB |
4485 | } |
4486 | ||
4487 | int | |
4488 | metaslab_claim(spa_t *spa, const blkptr_t *bp, uint64_t txg) | |
4489 | { | |
4490 | const dva_t *dva = bp->blk_dva; | |
4491 | int ndvas = BP_GET_NDVAS(bp); | |
1c27024e | 4492 | int error = 0; |
34dc7c2f BB |
4493 | |
4494 | ASSERT(!BP_IS_HOLE(bp)); | |
4495 | ||
b128c09f BB |
4496 | if (txg != 0) { |
4497 | /* | |
4498 | * First do a dry run to make sure all DVAs are claimable, | |
4499 | * so we don't have to unwind from partial failures below. | |
4500 | */ | |
4501 | if ((error = metaslab_claim(spa, bp, 0)) != 0) | |
4502 | return (error); | |
4503 | } | |
4504 | ||
4505 | spa_config_enter(spa, SCL_ALLOC, FTAG, RW_READER); | |
4506 | ||
cc99f275 DB |
4507 | for (int d = 0; d < ndvas; d++) { |
4508 | error = metaslab_claim_dva(spa, &dva[d], txg); | |
4509 | if (error != 0) | |
b128c09f | 4510 | break; |
cc99f275 | 4511 | } |
b128c09f BB |
4512 | |
4513 | spa_config_exit(spa, SCL_ALLOC, FTAG); | |
4514 | ||
4515 | ASSERT(error == 0 || txg == 0); | |
34dc7c2f | 4516 | |
b128c09f | 4517 | return (error); |
34dc7c2f | 4518 | } |
920dd524 | 4519 | |
d1d7e268 MK |
4520 | void |
4521 | metaslab_fastwrite_mark(spa_t *spa, const blkptr_t *bp) | |
920dd524 ED |
4522 | { |
4523 | const dva_t *dva = bp->blk_dva; | |
4524 | int ndvas = BP_GET_NDVAS(bp); | |
4525 | uint64_t psize = BP_GET_PSIZE(bp); | |
4526 | int d; | |
4527 | vdev_t *vd; | |
4528 | ||
4529 | ASSERT(!BP_IS_HOLE(bp)); | |
9b67f605 | 4530 | ASSERT(!BP_IS_EMBEDDED(bp)); |
920dd524 ED |
4531 | ASSERT(psize > 0); |
4532 | ||
4533 | spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER); | |
4534 | ||
4535 | for (d = 0; d < ndvas; d++) { | |
4536 | if ((vd = vdev_lookup_top(spa, DVA_GET_VDEV(&dva[d]))) == NULL) | |
4537 | continue; | |
4538 | atomic_add_64(&vd->vdev_pending_fastwrite, psize); | |
4539 | } | |
4540 | ||
4541 | spa_config_exit(spa, SCL_VDEV, FTAG); | |
4542 | } | |
4543 | ||
d1d7e268 MK |
4544 | void |
4545 | metaslab_fastwrite_unmark(spa_t *spa, const blkptr_t *bp) | |
920dd524 ED |
4546 | { |
4547 | const dva_t *dva = bp->blk_dva; | |
4548 | int ndvas = BP_GET_NDVAS(bp); | |
4549 | uint64_t psize = BP_GET_PSIZE(bp); | |
4550 | int d; | |
4551 | vdev_t *vd; | |
4552 | ||
4553 | ASSERT(!BP_IS_HOLE(bp)); | |
9b67f605 | 4554 | ASSERT(!BP_IS_EMBEDDED(bp)); |
920dd524 ED |
4555 | ASSERT(psize > 0); |
4556 | ||
4557 | spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER); | |
4558 | ||
4559 | for (d = 0; d < ndvas; d++) { | |
4560 | if ((vd = vdev_lookup_top(spa, DVA_GET_VDEV(&dva[d]))) == NULL) | |
4561 | continue; | |
4562 | ASSERT3U(vd->vdev_pending_fastwrite, >=, psize); | |
4563 | atomic_sub_64(&vd->vdev_pending_fastwrite, psize); | |
4564 | } | |
4565 | ||
4566 | spa_config_exit(spa, SCL_VDEV, FTAG); | |
4567 | } | |
30b92c1d | 4568 | |
a1d477c2 MA |
4569 | /* ARGSUSED */ |
4570 | static void | |
4571 | metaslab_check_free_impl_cb(uint64_t inner, vdev_t *vd, uint64_t offset, | |
4572 | uint64_t size, void *arg) | |
4573 | { | |
4574 | if (vd->vdev_ops == &vdev_indirect_ops) | |
4575 | return; | |
4576 | ||
4577 | metaslab_check_free_impl(vd, offset, size); | |
4578 | } | |
4579 | ||
4580 | static void | |
4581 | metaslab_check_free_impl(vdev_t *vd, uint64_t offset, uint64_t size) | |
4582 | { | |
4583 | metaslab_t *msp; | |
4584 | ASSERTV(spa_t *spa = vd->vdev_spa); | |
4585 | ||
4586 | if ((zfs_flags & ZFS_DEBUG_ZIO_FREE) == 0) | |
4587 | return; | |
4588 | ||
4589 | if (vd->vdev_ops->vdev_op_remap != NULL) { | |
4590 | vd->vdev_ops->vdev_op_remap(vd, offset, size, | |
4591 | metaslab_check_free_impl_cb, NULL); | |
4592 | return; | |
4593 | } | |
4594 | ||
4595 | ASSERT(vdev_is_concrete(vd)); | |
4596 | ASSERT3U(offset >> vd->vdev_ms_shift, <, vd->vdev_ms_count); | |
4597 | ASSERT3U(spa_config_held(spa, SCL_ALL, RW_READER), !=, 0); | |
4598 | ||
4599 | msp = vd->vdev_ms[offset >> vd->vdev_ms_shift]; | |
4600 | ||
4601 | mutex_enter(&msp->ms_lock); | |
df72b8be SD |
4602 | if (msp->ms_loaded) { |
4603 | range_tree_verify_not_present(msp->ms_allocatable, | |
4604 | offset, size); | |
4605 | } | |
a1d477c2 | 4606 | |
df72b8be SD |
4607 | range_tree_verify_not_present(msp->ms_freeing, offset, size); |
4608 | range_tree_verify_not_present(msp->ms_checkpointing, offset, size); | |
4609 | range_tree_verify_not_present(msp->ms_freed, offset, size); | |
a1d477c2 | 4610 | for (int j = 0; j < TXG_DEFER_SIZE; j++) |
df72b8be | 4611 | range_tree_verify_not_present(msp->ms_defer[j], offset, size); |
a1d477c2 MA |
4612 | mutex_exit(&msp->ms_lock); |
4613 | } | |
4614 | ||
13fe0198 MA |
4615 | void |
4616 | metaslab_check_free(spa_t *spa, const blkptr_t *bp) | |
4617 | { | |
13fe0198 MA |
4618 | if ((zfs_flags & ZFS_DEBUG_ZIO_FREE) == 0) |
4619 | return; | |
4620 | ||
4621 | spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER); | |
1c27024e | 4622 | for (int i = 0; i < BP_GET_NDVAS(bp); i++) { |
93cf2076 GW |
4623 | uint64_t vdev = DVA_GET_VDEV(&bp->blk_dva[i]); |
4624 | vdev_t *vd = vdev_lookup_top(spa, vdev); | |
4625 | uint64_t offset = DVA_GET_OFFSET(&bp->blk_dva[i]); | |
13fe0198 | 4626 | uint64_t size = DVA_GET_ASIZE(&bp->blk_dva[i]); |
13fe0198 | 4627 | |
a1d477c2 MA |
4628 | if (DVA_GET_GANG(&bp->blk_dva[i])) |
4629 | size = vdev_psize_to_asize(vd, SPA_GANGBLOCKSIZE); | |
4630 | ||
4631 | ASSERT3P(vd, !=, NULL); | |
13fe0198 | 4632 | |
a1d477c2 | 4633 | metaslab_check_free_impl(vd, offset, size); |
13fe0198 MA |
4634 | } |
4635 | spa_config_exit(spa, SCL_VDEV, FTAG); | |
4636 | } | |
4637 | ||
93ce2b4c | 4638 | #if defined(_KERNEL) |
cc99f275 | 4639 | /* BEGIN CSTYLED */ |
99b14de4 | 4640 | module_param(metaslab_aliquot, ulong, 0644); |
99b14de4 ED |
4641 | MODULE_PARM_DESC(metaslab_aliquot, |
4642 | "allocation granularity (a.k.a. stripe size)"); | |
02730c33 BB |
4643 | |
4644 | module_param(metaslab_debug_load, int, 0644); | |
93cf2076 GW |
4645 | MODULE_PARM_DESC(metaslab_debug_load, |
4646 | "load all metaslabs when pool is first opened"); | |
02730c33 BB |
4647 | |
4648 | module_param(metaslab_debug_unload, int, 0644); | |
1ce04573 BB |
4649 | MODULE_PARM_DESC(metaslab_debug_unload, |
4650 | "prevent metaslabs from being unloaded"); | |
02730c33 BB |
4651 | |
4652 | module_param(metaslab_preload_enabled, int, 0644); | |
f3a7f661 GW |
4653 | MODULE_PARM_DESC(metaslab_preload_enabled, |
4654 | "preload potential metaslabs during reassessment"); | |
f4a4046b | 4655 | |
02730c33 | 4656 | module_param(zfs_mg_noalloc_threshold, int, 0644); |
f4a4046b TC |
4657 | MODULE_PARM_DESC(zfs_mg_noalloc_threshold, |
4658 | "percentage of free space for metaslab group to allow allocation"); | |
02730c33 BB |
4659 | |
4660 | module_param(zfs_mg_fragmentation_threshold, int, 0644); | |
f3a7f661 GW |
4661 | MODULE_PARM_DESC(zfs_mg_fragmentation_threshold, |
4662 | "fragmentation for metaslab group to allow allocation"); | |
4663 | ||
02730c33 | 4664 | module_param(zfs_metaslab_fragmentation_threshold, int, 0644); |
f3a7f661 GW |
4665 | MODULE_PARM_DESC(zfs_metaslab_fragmentation_threshold, |
4666 | "fragmentation for metaslab to allow allocation"); | |
02730c33 BB |
4667 | |
4668 | module_param(metaslab_fragmentation_factor_enabled, int, 0644); | |
f3a7f661 GW |
4669 | MODULE_PARM_DESC(metaslab_fragmentation_factor_enabled, |
4670 | "use the fragmentation metric to prefer less fragmented metaslabs"); | |
02730c33 BB |
4671 | |
4672 | module_param(metaslab_lba_weighting_enabled, int, 0644); | |
f3a7f661 GW |
4673 | MODULE_PARM_DESC(metaslab_lba_weighting_enabled, |
4674 | "prefer metaslabs with lower LBAs"); | |
02730c33 BB |
4675 | |
4676 | module_param(metaslab_bias_enabled, int, 0644); | |
f3a7f661 GW |
4677 | MODULE_PARM_DESC(metaslab_bias_enabled, |
4678 | "enable metaslab group biasing"); | |
4e21fd06 DB |
4679 | |
4680 | module_param(zfs_metaslab_segment_weight_enabled, int, 0644); | |
4681 | MODULE_PARM_DESC(zfs_metaslab_segment_weight_enabled, | |
4682 | "enable segment-based metaslab selection"); | |
4683 | ||
4684 | module_param(zfs_metaslab_switch_threshold, int, 0644); | |
4685 | MODULE_PARM_DESC(zfs_metaslab_switch_threshold, | |
4686 | "segment-based metaslab selection maximum buckets before switching"); | |
a1d477c2 | 4687 | |
d830d479 MA |
4688 | module_param(metaslab_force_ganging, ulong, 0644); |
4689 | MODULE_PARM_DESC(metaslab_force_ganging, | |
a1d477c2 | 4690 | "blocks larger than this size are forced to be gang blocks"); |
cc99f275 DB |
4691 | /* END CSTYLED */ |
4692 | ||
93ce2b4c | 4693 | #endif |