]>
Commit | Line | Data |
---|---|---|
34dc7c2f BB |
1 | /* |
2 | * CDDL HEADER START | |
3 | * | |
4 | * The contents of this file are subject to the terms of the | |
5 | * Common Development and Distribution License (the "License"). | |
6 | * You may not use this file except in compliance with the License. | |
7 | * | |
8 | * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE | |
9 | * or http://www.opensolaris.org/os/licensing. | |
10 | * See the License for the specific language governing permissions | |
11 | * and limitations under the License. | |
12 | * | |
13 | * When distributing Covered Code, include this CDDL HEADER in each | |
14 | * file and include the License file at usr/src/OPENSOLARIS.LICENSE. | |
15 | * If applicable, add the following below this CDDL HEADER, with the | |
16 | * fields enclosed by brackets "[]" replaced with your own identifying | |
17 | * information: Portions Copyright [yyyy] [name of copyright owner] | |
18 | * | |
19 | * CDDL HEADER END | |
20 | */ | |
21 | /* | |
428870ff | 22 | * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. |
4e21fd06 | 23 | * Copyright (c) 2011, 2016 by Delphix. All rights reserved. |
2e528b49 | 24 | * Copyright (c) 2013 by Saso Kiselkov. All rights reserved. |
34dc7c2f BB |
25 | */ |
26 | ||
34dc7c2f | 27 | #include <sys/zfs_context.h> |
34dc7c2f BB |
28 | #include <sys/dmu.h> |
29 | #include <sys/dmu_tx.h> | |
30 | #include <sys/space_map.h> | |
31 | #include <sys/metaslab_impl.h> | |
32 | #include <sys/vdev_impl.h> | |
33 | #include <sys/zio.h> | |
93cf2076 | 34 | #include <sys/spa_impl.h> |
f3a7f661 | 35 | #include <sys/zfeature.h> |
34dc7c2f | 36 | |
d1d7e268 | 37 | #define WITH_DF_BLOCK_ALLOCATOR |
6d974228 | 38 | |
3dfb57a3 DB |
39 | #define GANG_ALLOCATION(flags) \ |
40 | ((flags) & (METASLAB_GANG_CHILD | METASLAB_GANG_HEADER)) | |
22c81dd8 | 41 | |
e8fe6684 ED |
42 | /* |
43 | * Metaslab granularity, in bytes. This is roughly similar to what would be | |
44 | * referred to as the "stripe size" in traditional RAID arrays. In normal | |
45 | * operation, we will try to write this amount of data to a top-level vdev | |
46 | * before moving on to the next one. | |
47 | */ | |
99b14de4 | 48 | unsigned long metaslab_aliquot = 512 << 10; |
e8fe6684 | 49 | |
34dc7c2f BB |
50 | uint64_t metaslab_gang_bang = SPA_MAXBLOCKSIZE + 1; /* force gang blocks */ |
51 | ||
e51be066 GW |
52 | /* |
53 | * The in-core space map representation is more compact than its on-disk form. | |
54 | * The zfs_condense_pct determines how much more compact the in-core | |
4e21fd06 | 55 | * space map representation must be before we compact it on-disk. |
e51be066 GW |
56 | * Values should be greater than or equal to 100. |
57 | */ | |
58 | int zfs_condense_pct = 200; | |
59 | ||
b02fe35d AR |
60 | /* |
61 | * Condensing a metaslab is not guaranteed to actually reduce the amount of | |
62 | * space used on disk. In particular, a space map uses data in increments of | |
96358617 | 63 | * MAX(1 << ashift, space_map_blksz), so a metaslab might use the |
b02fe35d AR |
64 | * same number of blocks after condensing. Since the goal of condensing is to |
65 | * reduce the number of IOPs required to read the space map, we only want to | |
66 | * condense when we can be sure we will reduce the number of blocks used by the | |
67 | * space map. Unfortunately, we cannot precisely compute whether or not this is | |
68 | * the case in metaslab_should_condense since we are holding ms_lock. Instead, | |
69 | * we apply the following heuristic: do not condense a spacemap unless the | |
70 | * uncondensed size consumes greater than zfs_metaslab_condense_block_threshold | |
71 | * blocks. | |
72 | */ | |
73 | int zfs_metaslab_condense_block_threshold = 4; | |
74 | ||
ac72fac3 GW |
75 | /* |
76 | * The zfs_mg_noalloc_threshold defines which metaslab groups should | |
77 | * be eligible for allocation. The value is defined as a percentage of | |
f3a7f661 | 78 | * free space. Metaslab groups that have more free space than |
ac72fac3 GW |
79 | * zfs_mg_noalloc_threshold are always eligible for allocations. Once |
80 | * a metaslab group's free space is less than or equal to the | |
81 | * zfs_mg_noalloc_threshold the allocator will avoid allocating to that | |
82 | * group unless all groups in the pool have reached zfs_mg_noalloc_threshold. | |
83 | * Once all groups in the pool reach zfs_mg_noalloc_threshold then all | |
84 | * groups are allowed to accept allocations. Gang blocks are always | |
85 | * eligible to allocate on any metaslab group. The default value of 0 means | |
86 | * no metaslab group will be excluded based on this criterion. | |
87 | */ | |
88 | int zfs_mg_noalloc_threshold = 0; | |
6d974228 | 89 | |
f3a7f661 GW |
90 | /* |
91 | * Metaslab groups are considered eligible for allocations if their | |
92 | * fragmenation metric (measured as a percentage) is less than or equal to | |
93 | * zfs_mg_fragmentation_threshold. If a metaslab group exceeds this threshold | |
94 | * then it will be skipped unless all metaslab groups within the metaslab | |
95 | * class have also crossed this threshold. | |
96 | */ | |
97 | int zfs_mg_fragmentation_threshold = 85; | |
98 | ||
99 | /* | |
100 | * Allow metaslabs to keep their active state as long as their fragmentation | |
101 | * percentage is less than or equal to zfs_metaslab_fragmentation_threshold. An | |
102 | * active metaslab that exceeds this threshold will no longer keep its active | |
103 | * status allowing better metaslabs to be selected. | |
104 | */ | |
105 | int zfs_metaslab_fragmentation_threshold = 70; | |
106 | ||
428870ff | 107 | /* |
aa7d06a9 | 108 | * When set will load all metaslabs when pool is first opened. |
428870ff | 109 | */ |
aa7d06a9 GW |
110 | int metaslab_debug_load = 0; |
111 | ||
112 | /* | |
113 | * When set will prevent metaslabs from being unloaded. | |
114 | */ | |
115 | int metaslab_debug_unload = 0; | |
428870ff | 116 | |
9babb374 BB |
117 | /* |
118 | * Minimum size which forces the dynamic allocator to change | |
428870ff | 119 | * it's allocation strategy. Once the space map cannot satisfy |
9babb374 BB |
120 | * an allocation of this size then it switches to using more |
121 | * aggressive strategy (i.e search by size rather than offset). | |
122 | */ | |
4e21fd06 | 123 | uint64_t metaslab_df_alloc_threshold = SPA_OLD_MAXBLOCKSIZE; |
9babb374 BB |
124 | |
125 | /* | |
126 | * The minimum free space, in percent, which must be available | |
127 | * in a space map to continue allocations in a first-fit fashion. | |
4e21fd06 | 128 | * Once the space map's free space drops below this level we dynamically |
9babb374 BB |
129 | * switch to using best-fit allocations. |
130 | */ | |
428870ff BB |
131 | int metaslab_df_free_pct = 4; |
132 | ||
428870ff | 133 | /* |
93cf2076 | 134 | * Percentage of all cpus that can be used by the metaslab taskq. |
428870ff | 135 | */ |
93cf2076 | 136 | int metaslab_load_pct = 50; |
428870ff BB |
137 | |
138 | /* | |
93cf2076 GW |
139 | * Determines how many txgs a metaslab may remain loaded without having any |
140 | * allocations from it. As long as a metaslab continues to be used we will | |
141 | * keep it loaded. | |
428870ff | 142 | */ |
93cf2076 | 143 | int metaslab_unload_delay = TXG_SIZE * 2; |
9babb374 | 144 | |
93cf2076 GW |
145 | /* |
146 | * Max number of metaslabs per group to preload. | |
147 | */ | |
148 | int metaslab_preload_limit = SPA_DVAS_PER_BP; | |
149 | ||
150 | /* | |
151 | * Enable/disable preloading of metaslab. | |
152 | */ | |
f3a7f661 | 153 | int metaslab_preload_enabled = B_TRUE; |
93cf2076 GW |
154 | |
155 | /* | |
f3a7f661 | 156 | * Enable/disable fragmentation weighting on metaslabs. |
93cf2076 | 157 | */ |
f3a7f661 | 158 | int metaslab_fragmentation_factor_enabled = B_TRUE; |
93cf2076 | 159 | |
f3a7f661 GW |
160 | /* |
161 | * Enable/disable lba weighting (i.e. outer tracks are given preference). | |
162 | */ | |
163 | int metaslab_lba_weighting_enabled = B_TRUE; | |
164 | ||
165 | /* | |
166 | * Enable/disable metaslab group biasing. | |
167 | */ | |
168 | int metaslab_bias_enabled = B_TRUE; | |
169 | ||
4e21fd06 DB |
170 | |
171 | /* | |
172 | * Enable/disable segment-based metaslab selection. | |
173 | */ | |
174 | int zfs_metaslab_segment_weight_enabled = B_TRUE; | |
175 | ||
176 | /* | |
177 | * When using segment-based metaslab selection, we will continue | |
178 | * allocating from the active metaslab until we have exhausted | |
179 | * zfs_metaslab_switch_threshold of its buckets. | |
180 | */ | |
181 | int zfs_metaslab_switch_threshold = 2; | |
182 | ||
183 | /* | |
184 | * Internal switch to enable/disable the metaslab allocation tracing | |
185 | * facility. | |
186 | */ | |
187 | #ifdef _METASLAB_TRACING | |
188 | boolean_t metaslab_trace_enabled = B_TRUE; | |
189 | #endif | |
190 | ||
191 | /* | |
192 | * Maximum entries that the metaslab allocation tracing facility will keep | |
193 | * in a given list when running in non-debug mode. We limit the number | |
194 | * of entries in non-debug mode to prevent us from using up too much memory. | |
195 | * The limit should be sufficiently large that we don't expect any allocation | |
196 | * to every exceed this value. In debug mode, the system will panic if this | |
197 | * limit is ever reached allowing for further investigation. | |
198 | */ | |
199 | #ifdef _METASLAB_TRACING | |
200 | uint64_t metaslab_trace_max_entries = 5000; | |
201 | #endif | |
202 | ||
203 | static uint64_t metaslab_weight(metaslab_t *); | |
204 | static void metaslab_set_fragmentation(metaslab_t *); | |
205 | ||
206 | #ifdef _METASLAB_TRACING | |
207 | kmem_cache_t *metaslab_alloc_trace_cache; | |
208 | #endif | |
93cf2076 | 209 | |
34dc7c2f BB |
210 | /* |
211 | * ========================================================================== | |
212 | * Metaslab classes | |
213 | * ========================================================================== | |
214 | */ | |
215 | metaslab_class_t * | |
93cf2076 | 216 | metaslab_class_create(spa_t *spa, metaslab_ops_t *ops) |
34dc7c2f BB |
217 | { |
218 | metaslab_class_t *mc; | |
219 | ||
79c76d5b | 220 | mc = kmem_zalloc(sizeof (metaslab_class_t), KM_SLEEP); |
34dc7c2f | 221 | |
428870ff | 222 | mc->mc_spa = spa; |
34dc7c2f | 223 | mc->mc_rotor = NULL; |
9babb374 | 224 | mc->mc_ops = ops; |
3dfb57a3 DB |
225 | mutex_init(&mc->mc_lock, NULL, MUTEX_DEFAULT, NULL); |
226 | refcount_create_tracked(&mc->mc_alloc_slots); | |
34dc7c2f BB |
227 | |
228 | return (mc); | |
229 | } | |
230 | ||
231 | void | |
232 | metaslab_class_destroy(metaslab_class_t *mc) | |
233 | { | |
428870ff BB |
234 | ASSERT(mc->mc_rotor == NULL); |
235 | ASSERT(mc->mc_alloc == 0); | |
236 | ASSERT(mc->mc_deferred == 0); | |
237 | ASSERT(mc->mc_space == 0); | |
238 | ASSERT(mc->mc_dspace == 0); | |
34dc7c2f | 239 | |
3dfb57a3 DB |
240 | refcount_destroy(&mc->mc_alloc_slots); |
241 | mutex_destroy(&mc->mc_lock); | |
34dc7c2f BB |
242 | kmem_free(mc, sizeof (metaslab_class_t)); |
243 | } | |
244 | ||
428870ff BB |
245 | int |
246 | metaslab_class_validate(metaslab_class_t *mc) | |
34dc7c2f | 247 | { |
428870ff BB |
248 | metaslab_group_t *mg; |
249 | vdev_t *vd; | |
34dc7c2f | 250 | |
428870ff BB |
251 | /* |
252 | * Must hold one of the spa_config locks. | |
253 | */ | |
254 | ASSERT(spa_config_held(mc->mc_spa, SCL_ALL, RW_READER) || | |
255 | spa_config_held(mc->mc_spa, SCL_ALL, RW_WRITER)); | |
34dc7c2f | 256 | |
428870ff BB |
257 | if ((mg = mc->mc_rotor) == NULL) |
258 | return (0); | |
259 | ||
260 | do { | |
261 | vd = mg->mg_vd; | |
262 | ASSERT(vd->vdev_mg != NULL); | |
263 | ASSERT3P(vd->vdev_top, ==, vd); | |
264 | ASSERT3P(mg->mg_class, ==, mc); | |
265 | ASSERT3P(vd->vdev_ops, !=, &vdev_hole_ops); | |
266 | } while ((mg = mg->mg_next) != mc->mc_rotor); | |
267 | ||
268 | return (0); | |
34dc7c2f BB |
269 | } |
270 | ||
271 | void | |
428870ff BB |
272 | metaslab_class_space_update(metaslab_class_t *mc, int64_t alloc_delta, |
273 | int64_t defer_delta, int64_t space_delta, int64_t dspace_delta) | |
34dc7c2f | 274 | { |
428870ff BB |
275 | atomic_add_64(&mc->mc_alloc, alloc_delta); |
276 | atomic_add_64(&mc->mc_deferred, defer_delta); | |
277 | atomic_add_64(&mc->mc_space, space_delta); | |
278 | atomic_add_64(&mc->mc_dspace, dspace_delta); | |
279 | } | |
34dc7c2f | 280 | |
428870ff BB |
281 | uint64_t |
282 | metaslab_class_get_alloc(metaslab_class_t *mc) | |
283 | { | |
284 | return (mc->mc_alloc); | |
285 | } | |
34dc7c2f | 286 | |
428870ff BB |
287 | uint64_t |
288 | metaslab_class_get_deferred(metaslab_class_t *mc) | |
289 | { | |
290 | return (mc->mc_deferred); | |
291 | } | |
34dc7c2f | 292 | |
428870ff BB |
293 | uint64_t |
294 | metaslab_class_get_space(metaslab_class_t *mc) | |
295 | { | |
296 | return (mc->mc_space); | |
297 | } | |
34dc7c2f | 298 | |
428870ff BB |
299 | uint64_t |
300 | metaslab_class_get_dspace(metaslab_class_t *mc) | |
301 | { | |
302 | return (spa_deflate(mc->mc_spa) ? mc->mc_dspace : mc->mc_space); | |
34dc7c2f BB |
303 | } |
304 | ||
f3a7f661 GW |
305 | void |
306 | metaslab_class_histogram_verify(metaslab_class_t *mc) | |
307 | { | |
308 | vdev_t *rvd = mc->mc_spa->spa_root_vdev; | |
309 | uint64_t *mc_hist; | |
1c27024e | 310 | int i; |
f3a7f661 GW |
311 | |
312 | if ((zfs_flags & ZFS_DEBUG_HISTOGRAM_VERIFY) == 0) | |
313 | return; | |
314 | ||
315 | mc_hist = kmem_zalloc(sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE, | |
79c76d5b | 316 | KM_SLEEP); |
f3a7f661 | 317 | |
1c27024e | 318 | for (int c = 0; c < rvd->vdev_children; c++) { |
f3a7f661 GW |
319 | vdev_t *tvd = rvd->vdev_child[c]; |
320 | metaslab_group_t *mg = tvd->vdev_mg; | |
321 | ||
322 | /* | |
323 | * Skip any holes, uninitialized top-levels, or | |
324 | * vdevs that are not in this metalab class. | |
325 | */ | |
326 | if (tvd->vdev_ishole || tvd->vdev_ms_shift == 0 || | |
327 | mg->mg_class != mc) { | |
328 | continue; | |
329 | } | |
330 | ||
331 | for (i = 0; i < RANGE_TREE_HISTOGRAM_SIZE; i++) | |
332 | mc_hist[i] += mg->mg_histogram[i]; | |
333 | } | |
334 | ||
335 | for (i = 0; i < RANGE_TREE_HISTOGRAM_SIZE; i++) | |
336 | VERIFY3U(mc_hist[i], ==, mc->mc_histogram[i]); | |
337 | ||
338 | kmem_free(mc_hist, sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE); | |
339 | } | |
340 | ||
341 | /* | |
342 | * Calculate the metaslab class's fragmentation metric. The metric | |
343 | * is weighted based on the space contribution of each metaslab group. | |
344 | * The return value will be a number between 0 and 100 (inclusive), or | |
345 | * ZFS_FRAG_INVALID if the metric has not been set. See comment above the | |
346 | * zfs_frag_table for more information about the metric. | |
347 | */ | |
348 | uint64_t | |
349 | metaslab_class_fragmentation(metaslab_class_t *mc) | |
350 | { | |
351 | vdev_t *rvd = mc->mc_spa->spa_root_vdev; | |
352 | uint64_t fragmentation = 0; | |
f3a7f661 GW |
353 | |
354 | spa_config_enter(mc->mc_spa, SCL_VDEV, FTAG, RW_READER); | |
355 | ||
1c27024e | 356 | for (int c = 0; c < rvd->vdev_children; c++) { |
f3a7f661 GW |
357 | vdev_t *tvd = rvd->vdev_child[c]; |
358 | metaslab_group_t *mg = tvd->vdev_mg; | |
359 | ||
360 | /* | |
361 | * Skip any holes, uninitialized top-levels, or | |
362 | * vdevs that are not in this metalab class. | |
363 | */ | |
364 | if (tvd->vdev_ishole || tvd->vdev_ms_shift == 0 || | |
365 | mg->mg_class != mc) { | |
366 | continue; | |
367 | } | |
368 | ||
369 | /* | |
370 | * If a metaslab group does not contain a fragmentation | |
371 | * metric then just bail out. | |
372 | */ | |
373 | if (mg->mg_fragmentation == ZFS_FRAG_INVALID) { | |
374 | spa_config_exit(mc->mc_spa, SCL_VDEV, FTAG); | |
375 | return (ZFS_FRAG_INVALID); | |
376 | } | |
377 | ||
378 | /* | |
379 | * Determine how much this metaslab_group is contributing | |
380 | * to the overall pool fragmentation metric. | |
381 | */ | |
382 | fragmentation += mg->mg_fragmentation * | |
383 | metaslab_group_get_space(mg); | |
384 | } | |
385 | fragmentation /= metaslab_class_get_space(mc); | |
386 | ||
387 | ASSERT3U(fragmentation, <=, 100); | |
388 | spa_config_exit(mc->mc_spa, SCL_VDEV, FTAG); | |
389 | return (fragmentation); | |
390 | } | |
391 | ||
392 | /* | |
393 | * Calculate the amount of expandable space that is available in | |
394 | * this metaslab class. If a device is expanded then its expandable | |
395 | * space will be the amount of allocatable space that is currently not | |
396 | * part of this metaslab class. | |
397 | */ | |
398 | uint64_t | |
399 | metaslab_class_expandable_space(metaslab_class_t *mc) | |
400 | { | |
401 | vdev_t *rvd = mc->mc_spa->spa_root_vdev; | |
402 | uint64_t space = 0; | |
f3a7f661 GW |
403 | |
404 | spa_config_enter(mc->mc_spa, SCL_VDEV, FTAG, RW_READER); | |
1c27024e | 405 | for (int c = 0; c < rvd->vdev_children; c++) { |
f3a7f661 GW |
406 | vdev_t *tvd = rvd->vdev_child[c]; |
407 | metaslab_group_t *mg = tvd->vdev_mg; | |
408 | ||
409 | if (tvd->vdev_ishole || tvd->vdev_ms_shift == 0 || | |
410 | mg->mg_class != mc) { | |
411 | continue; | |
412 | } | |
413 | ||
0f676dc2 GM |
414 | /* |
415 | * Calculate if we have enough space to add additional | |
416 | * metaslabs. We report the expandable space in terms | |
417 | * of the metaslab size since that's the unit of expansion. | |
418 | */ | |
419 | space += P2ALIGN(tvd->vdev_max_asize - tvd->vdev_asize, | |
420 | 1ULL << tvd->vdev_ms_shift); | |
f3a7f661 GW |
421 | } |
422 | spa_config_exit(mc->mc_spa, SCL_VDEV, FTAG); | |
423 | return (space); | |
424 | } | |
425 | ||
34dc7c2f BB |
426 | static int |
427 | metaslab_compare(const void *x1, const void *x2) | |
428 | { | |
ee36c709 GN |
429 | const metaslab_t *m1 = (const metaslab_t *)x1; |
430 | const metaslab_t *m2 = (const metaslab_t *)x2; | |
34dc7c2f | 431 | |
ee36c709 GN |
432 | int cmp = AVL_CMP(m2->ms_weight, m1->ms_weight); |
433 | if (likely(cmp)) | |
434 | return (cmp); | |
34dc7c2f | 435 | |
ee36c709 | 436 | IMPLY(AVL_CMP(m1->ms_start, m2->ms_start) == 0, m1 == m2); |
34dc7c2f | 437 | |
ee36c709 | 438 | return (AVL_CMP(m1->ms_start, m2->ms_start)); |
34dc7c2f BB |
439 | } |
440 | ||
4e21fd06 DB |
441 | /* |
442 | * Verify that the space accounting on disk matches the in-core range_trees. | |
443 | */ | |
444 | void | |
445 | metaslab_verify_space(metaslab_t *msp, uint64_t txg) | |
446 | { | |
447 | spa_t *spa = msp->ms_group->mg_vd->vdev_spa; | |
448 | uint64_t allocated = 0; | |
4e21fd06 | 449 | uint64_t sm_free_space, msp_free_space; |
4e21fd06 DB |
450 | |
451 | ASSERT(MUTEX_HELD(&msp->ms_lock)); | |
452 | ||
453 | if ((zfs_flags & ZFS_DEBUG_METASLAB_VERIFY) == 0) | |
454 | return; | |
455 | ||
456 | /* | |
457 | * We can only verify the metaslab space when we're called | |
458 | * from syncing context with a loaded metaslab that has an allocated | |
459 | * space map. Calling this in non-syncing context does not | |
460 | * provide a consistent view of the metaslab since we're performing | |
461 | * allocations in the future. | |
462 | */ | |
463 | if (txg != spa_syncing_txg(spa) || msp->ms_sm == NULL || | |
464 | !msp->ms_loaded) | |
465 | return; | |
466 | ||
467 | sm_free_space = msp->ms_size - space_map_allocated(msp->ms_sm) - | |
468 | space_map_alloc_delta(msp->ms_sm); | |
469 | ||
470 | /* | |
471 | * Account for future allocations since we would have already | |
472 | * deducted that space from the ms_freetree. | |
473 | */ | |
1c27024e | 474 | for (int t = 0; t < TXG_CONCURRENT_STATES; t++) { |
4e21fd06 DB |
475 | allocated += |
476 | range_tree_space(msp->ms_alloctree[(txg + t) & TXG_MASK]); | |
477 | } | |
4e21fd06 DB |
478 | |
479 | msp_free_space = range_tree_space(msp->ms_tree) + allocated + | |
258553d3 | 480 | msp->ms_deferspace + range_tree_space(msp->ms_freedtree); |
4e21fd06 DB |
481 | |
482 | VERIFY3U(sm_free_space, ==, msp_free_space); | |
483 | } | |
484 | ||
485 | /* | |
486 | * ========================================================================== | |
487 | * Metaslab groups | |
488 | * ========================================================================== | |
489 | */ | |
ac72fac3 GW |
490 | /* |
491 | * Update the allocatable flag and the metaslab group's capacity. | |
492 | * The allocatable flag is set to true if the capacity is below | |
3dfb57a3 DB |
493 | * the zfs_mg_noalloc_threshold or has a fragmentation value that is |
494 | * greater than zfs_mg_fragmentation_threshold. If a metaslab group | |
495 | * transitions from allocatable to non-allocatable or vice versa then the | |
496 | * metaslab group's class is updated to reflect the transition. | |
ac72fac3 GW |
497 | */ |
498 | static void | |
499 | metaslab_group_alloc_update(metaslab_group_t *mg) | |
500 | { | |
501 | vdev_t *vd = mg->mg_vd; | |
502 | metaslab_class_t *mc = mg->mg_class; | |
503 | vdev_stat_t *vs = &vd->vdev_stat; | |
504 | boolean_t was_allocatable; | |
3dfb57a3 | 505 | boolean_t was_initialized; |
ac72fac3 GW |
506 | |
507 | ASSERT(vd == vd->vdev_top); | |
508 | ||
509 | mutex_enter(&mg->mg_lock); | |
510 | was_allocatable = mg->mg_allocatable; | |
3dfb57a3 | 511 | was_initialized = mg->mg_initialized; |
ac72fac3 GW |
512 | |
513 | mg->mg_free_capacity = ((vs->vs_space - vs->vs_alloc) * 100) / | |
514 | (vs->vs_space + 1); | |
515 | ||
3dfb57a3 DB |
516 | mutex_enter(&mc->mc_lock); |
517 | ||
518 | /* | |
519 | * If the metaslab group was just added then it won't | |
520 | * have any space until we finish syncing out this txg. | |
521 | * At that point we will consider it initialized and available | |
522 | * for allocations. We also don't consider non-activated | |
523 | * metaslab groups (e.g. vdevs that are in the middle of being removed) | |
524 | * to be initialized, because they can't be used for allocation. | |
525 | */ | |
526 | mg->mg_initialized = metaslab_group_initialized(mg); | |
527 | if (!was_initialized && mg->mg_initialized) { | |
528 | mc->mc_groups++; | |
529 | } else if (was_initialized && !mg->mg_initialized) { | |
530 | ASSERT3U(mc->mc_groups, >, 0); | |
531 | mc->mc_groups--; | |
532 | } | |
533 | if (mg->mg_initialized) | |
534 | mg->mg_no_free_space = B_FALSE; | |
535 | ||
f3a7f661 GW |
536 | /* |
537 | * A metaslab group is considered allocatable if it has plenty | |
538 | * of free space or is not heavily fragmented. We only take | |
539 | * fragmentation into account if the metaslab group has a valid | |
540 | * fragmentation metric (i.e. a value between 0 and 100). | |
541 | */ | |
3dfb57a3 DB |
542 | mg->mg_allocatable = (mg->mg_activation_count > 0 && |
543 | mg->mg_free_capacity > zfs_mg_noalloc_threshold && | |
f3a7f661 GW |
544 | (mg->mg_fragmentation == ZFS_FRAG_INVALID || |
545 | mg->mg_fragmentation <= zfs_mg_fragmentation_threshold)); | |
ac72fac3 GW |
546 | |
547 | /* | |
548 | * The mc_alloc_groups maintains a count of the number of | |
549 | * groups in this metaslab class that are still above the | |
550 | * zfs_mg_noalloc_threshold. This is used by the allocating | |
551 | * threads to determine if they should avoid allocations to | |
552 | * a given group. The allocator will avoid allocations to a group | |
553 | * if that group has reached or is below the zfs_mg_noalloc_threshold | |
554 | * and there are still other groups that are above the threshold. | |
555 | * When a group transitions from allocatable to non-allocatable or | |
556 | * vice versa we update the metaslab class to reflect that change. | |
557 | * When the mc_alloc_groups value drops to 0 that means that all | |
558 | * groups have reached the zfs_mg_noalloc_threshold making all groups | |
559 | * eligible for allocations. This effectively means that all devices | |
560 | * are balanced again. | |
561 | */ | |
562 | if (was_allocatable && !mg->mg_allocatable) | |
563 | mc->mc_alloc_groups--; | |
564 | else if (!was_allocatable && mg->mg_allocatable) | |
565 | mc->mc_alloc_groups++; | |
3dfb57a3 | 566 | mutex_exit(&mc->mc_lock); |
f3a7f661 | 567 | |
ac72fac3 GW |
568 | mutex_exit(&mg->mg_lock); |
569 | } | |
570 | ||
34dc7c2f BB |
571 | metaslab_group_t * |
572 | metaslab_group_create(metaslab_class_t *mc, vdev_t *vd) | |
573 | { | |
574 | metaslab_group_t *mg; | |
575 | ||
79c76d5b | 576 | mg = kmem_zalloc(sizeof (metaslab_group_t), KM_SLEEP); |
34dc7c2f BB |
577 | mutex_init(&mg->mg_lock, NULL, MUTEX_DEFAULT, NULL); |
578 | avl_create(&mg->mg_metaslab_tree, metaslab_compare, | |
579 | sizeof (metaslab_t), offsetof(struct metaslab, ms_group_node)); | |
34dc7c2f | 580 | mg->mg_vd = vd; |
428870ff BB |
581 | mg->mg_class = mc; |
582 | mg->mg_activation_count = 0; | |
3dfb57a3 DB |
583 | mg->mg_initialized = B_FALSE; |
584 | mg->mg_no_free_space = B_TRUE; | |
585 | refcount_create_tracked(&mg->mg_alloc_queue_depth); | |
34dc7c2f | 586 | |
3c51c5cb | 587 | mg->mg_taskq = taskq_create("metaslab_group_taskq", metaslab_load_pct, |
1229323d | 588 | maxclsyspri, 10, INT_MAX, TASKQ_THREADS_CPU_PCT | TASKQ_DYNAMIC); |
93cf2076 | 589 | |
34dc7c2f BB |
590 | return (mg); |
591 | } | |
592 | ||
593 | void | |
594 | metaslab_group_destroy(metaslab_group_t *mg) | |
595 | { | |
428870ff BB |
596 | ASSERT(mg->mg_prev == NULL); |
597 | ASSERT(mg->mg_next == NULL); | |
598 | /* | |
599 | * We may have gone below zero with the activation count | |
600 | * either because we never activated in the first place or | |
601 | * because we're done, and possibly removing the vdev. | |
602 | */ | |
603 | ASSERT(mg->mg_activation_count <= 0); | |
604 | ||
3c51c5cb | 605 | taskq_destroy(mg->mg_taskq); |
34dc7c2f BB |
606 | avl_destroy(&mg->mg_metaslab_tree); |
607 | mutex_destroy(&mg->mg_lock); | |
3dfb57a3 | 608 | refcount_destroy(&mg->mg_alloc_queue_depth); |
34dc7c2f BB |
609 | kmem_free(mg, sizeof (metaslab_group_t)); |
610 | } | |
611 | ||
428870ff BB |
612 | void |
613 | metaslab_group_activate(metaslab_group_t *mg) | |
614 | { | |
615 | metaslab_class_t *mc = mg->mg_class; | |
616 | metaslab_group_t *mgprev, *mgnext; | |
617 | ||
618 | ASSERT(spa_config_held(mc->mc_spa, SCL_ALLOC, RW_WRITER)); | |
619 | ||
620 | ASSERT(mc->mc_rotor != mg); | |
621 | ASSERT(mg->mg_prev == NULL); | |
622 | ASSERT(mg->mg_next == NULL); | |
623 | ASSERT(mg->mg_activation_count <= 0); | |
624 | ||
625 | if (++mg->mg_activation_count <= 0) | |
626 | return; | |
627 | ||
628 | mg->mg_aliquot = metaslab_aliquot * MAX(1, mg->mg_vd->vdev_children); | |
ac72fac3 | 629 | metaslab_group_alloc_update(mg); |
428870ff BB |
630 | |
631 | if ((mgprev = mc->mc_rotor) == NULL) { | |
632 | mg->mg_prev = mg; | |
633 | mg->mg_next = mg; | |
634 | } else { | |
635 | mgnext = mgprev->mg_next; | |
636 | mg->mg_prev = mgprev; | |
637 | mg->mg_next = mgnext; | |
638 | mgprev->mg_next = mg; | |
639 | mgnext->mg_prev = mg; | |
640 | } | |
641 | mc->mc_rotor = mg; | |
642 | } | |
643 | ||
644 | void | |
645 | metaslab_group_passivate(metaslab_group_t *mg) | |
646 | { | |
647 | metaslab_class_t *mc = mg->mg_class; | |
648 | metaslab_group_t *mgprev, *mgnext; | |
649 | ||
650 | ASSERT(spa_config_held(mc->mc_spa, SCL_ALLOC, RW_WRITER)); | |
651 | ||
652 | if (--mg->mg_activation_count != 0) { | |
653 | ASSERT(mc->mc_rotor != mg); | |
654 | ASSERT(mg->mg_prev == NULL); | |
655 | ASSERT(mg->mg_next == NULL); | |
656 | ASSERT(mg->mg_activation_count < 0); | |
657 | return; | |
658 | } | |
659 | ||
c5528b9b | 660 | taskq_wait_outstanding(mg->mg_taskq, 0); |
f3a7f661 | 661 | metaslab_group_alloc_update(mg); |
93cf2076 | 662 | |
428870ff BB |
663 | mgprev = mg->mg_prev; |
664 | mgnext = mg->mg_next; | |
665 | ||
666 | if (mg == mgnext) { | |
667 | mc->mc_rotor = NULL; | |
668 | } else { | |
669 | mc->mc_rotor = mgnext; | |
670 | mgprev->mg_next = mgnext; | |
671 | mgnext->mg_prev = mgprev; | |
672 | } | |
673 | ||
674 | mg->mg_prev = NULL; | |
675 | mg->mg_next = NULL; | |
676 | } | |
677 | ||
3dfb57a3 DB |
678 | boolean_t |
679 | metaslab_group_initialized(metaslab_group_t *mg) | |
680 | { | |
681 | vdev_t *vd = mg->mg_vd; | |
682 | vdev_stat_t *vs = &vd->vdev_stat; | |
683 | ||
684 | return (vs->vs_space != 0 && mg->mg_activation_count > 0); | |
685 | } | |
686 | ||
f3a7f661 GW |
687 | uint64_t |
688 | metaslab_group_get_space(metaslab_group_t *mg) | |
689 | { | |
690 | return ((1ULL << mg->mg_vd->vdev_ms_shift) * mg->mg_vd->vdev_ms_count); | |
691 | } | |
692 | ||
693 | void | |
694 | metaslab_group_histogram_verify(metaslab_group_t *mg) | |
695 | { | |
696 | uint64_t *mg_hist; | |
697 | vdev_t *vd = mg->mg_vd; | |
698 | uint64_t ashift = vd->vdev_ashift; | |
1c27024e | 699 | int i; |
f3a7f661 GW |
700 | |
701 | if ((zfs_flags & ZFS_DEBUG_HISTOGRAM_VERIFY) == 0) | |
702 | return; | |
703 | ||
704 | mg_hist = kmem_zalloc(sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE, | |
79c76d5b | 705 | KM_SLEEP); |
f3a7f661 GW |
706 | |
707 | ASSERT3U(RANGE_TREE_HISTOGRAM_SIZE, >=, | |
708 | SPACE_MAP_HISTOGRAM_SIZE + ashift); | |
709 | ||
1c27024e | 710 | for (int m = 0; m < vd->vdev_ms_count; m++) { |
f3a7f661 GW |
711 | metaslab_t *msp = vd->vdev_ms[m]; |
712 | ||
713 | if (msp->ms_sm == NULL) | |
714 | continue; | |
715 | ||
716 | for (i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) | |
717 | mg_hist[i + ashift] += | |
718 | msp->ms_sm->sm_phys->smp_histogram[i]; | |
719 | } | |
720 | ||
721 | for (i = 0; i < RANGE_TREE_HISTOGRAM_SIZE; i ++) | |
722 | VERIFY3U(mg_hist[i], ==, mg->mg_histogram[i]); | |
723 | ||
724 | kmem_free(mg_hist, sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE); | |
725 | } | |
726 | ||
34dc7c2f | 727 | static void |
f3a7f661 | 728 | metaslab_group_histogram_add(metaslab_group_t *mg, metaslab_t *msp) |
34dc7c2f | 729 | { |
f3a7f661 GW |
730 | metaslab_class_t *mc = mg->mg_class; |
731 | uint64_t ashift = mg->mg_vd->vdev_ashift; | |
f3a7f661 GW |
732 | |
733 | ASSERT(MUTEX_HELD(&msp->ms_lock)); | |
734 | if (msp->ms_sm == NULL) | |
735 | return; | |
736 | ||
34dc7c2f | 737 | mutex_enter(&mg->mg_lock); |
1c27024e | 738 | for (int i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) { |
f3a7f661 GW |
739 | mg->mg_histogram[i + ashift] += |
740 | msp->ms_sm->sm_phys->smp_histogram[i]; | |
741 | mc->mc_histogram[i + ashift] += | |
742 | msp->ms_sm->sm_phys->smp_histogram[i]; | |
743 | } | |
744 | mutex_exit(&mg->mg_lock); | |
745 | } | |
746 | ||
747 | void | |
748 | metaslab_group_histogram_remove(metaslab_group_t *mg, metaslab_t *msp) | |
749 | { | |
750 | metaslab_class_t *mc = mg->mg_class; | |
751 | uint64_t ashift = mg->mg_vd->vdev_ashift; | |
f3a7f661 GW |
752 | |
753 | ASSERT(MUTEX_HELD(&msp->ms_lock)); | |
754 | if (msp->ms_sm == NULL) | |
755 | return; | |
756 | ||
757 | mutex_enter(&mg->mg_lock); | |
1c27024e | 758 | for (int i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) { |
f3a7f661 GW |
759 | ASSERT3U(mg->mg_histogram[i + ashift], >=, |
760 | msp->ms_sm->sm_phys->smp_histogram[i]); | |
761 | ASSERT3U(mc->mc_histogram[i + ashift], >=, | |
762 | msp->ms_sm->sm_phys->smp_histogram[i]); | |
763 | ||
764 | mg->mg_histogram[i + ashift] -= | |
765 | msp->ms_sm->sm_phys->smp_histogram[i]; | |
766 | mc->mc_histogram[i + ashift] -= | |
767 | msp->ms_sm->sm_phys->smp_histogram[i]; | |
768 | } | |
769 | mutex_exit(&mg->mg_lock); | |
770 | } | |
771 | ||
772 | static void | |
773 | metaslab_group_add(metaslab_group_t *mg, metaslab_t *msp) | |
774 | { | |
34dc7c2f | 775 | ASSERT(msp->ms_group == NULL); |
f3a7f661 | 776 | mutex_enter(&mg->mg_lock); |
34dc7c2f BB |
777 | msp->ms_group = mg; |
778 | msp->ms_weight = 0; | |
779 | avl_add(&mg->mg_metaslab_tree, msp); | |
780 | mutex_exit(&mg->mg_lock); | |
f3a7f661 GW |
781 | |
782 | mutex_enter(&msp->ms_lock); | |
783 | metaslab_group_histogram_add(mg, msp); | |
784 | mutex_exit(&msp->ms_lock); | |
34dc7c2f BB |
785 | } |
786 | ||
787 | static void | |
788 | metaslab_group_remove(metaslab_group_t *mg, metaslab_t *msp) | |
789 | { | |
f3a7f661 GW |
790 | mutex_enter(&msp->ms_lock); |
791 | metaslab_group_histogram_remove(mg, msp); | |
792 | mutex_exit(&msp->ms_lock); | |
793 | ||
34dc7c2f BB |
794 | mutex_enter(&mg->mg_lock); |
795 | ASSERT(msp->ms_group == mg); | |
796 | avl_remove(&mg->mg_metaslab_tree, msp); | |
797 | msp->ms_group = NULL; | |
798 | mutex_exit(&mg->mg_lock); | |
799 | } | |
800 | ||
801 | static void | |
802 | metaslab_group_sort(metaslab_group_t *mg, metaslab_t *msp, uint64_t weight) | |
803 | { | |
804 | /* | |
805 | * Although in principle the weight can be any value, in | |
f3a7f661 | 806 | * practice we do not use values in the range [1, 511]. |
34dc7c2f | 807 | */ |
f3a7f661 | 808 | ASSERT(weight >= SPA_MINBLOCKSIZE || weight == 0); |
34dc7c2f BB |
809 | ASSERT(MUTEX_HELD(&msp->ms_lock)); |
810 | ||
811 | mutex_enter(&mg->mg_lock); | |
812 | ASSERT(msp->ms_group == mg); | |
813 | avl_remove(&mg->mg_metaslab_tree, msp); | |
814 | msp->ms_weight = weight; | |
815 | avl_add(&mg->mg_metaslab_tree, msp); | |
816 | mutex_exit(&mg->mg_lock); | |
817 | } | |
818 | ||
f3a7f661 GW |
819 | /* |
820 | * Calculate the fragmentation for a given metaslab group. We can use | |
821 | * a simple average here since all metaslabs within the group must have | |
822 | * the same size. The return value will be a value between 0 and 100 | |
823 | * (inclusive), or ZFS_FRAG_INVALID if less than half of the metaslab in this | |
824 | * group have a fragmentation metric. | |
825 | */ | |
826 | uint64_t | |
827 | metaslab_group_fragmentation(metaslab_group_t *mg) | |
828 | { | |
829 | vdev_t *vd = mg->mg_vd; | |
830 | uint64_t fragmentation = 0; | |
831 | uint64_t valid_ms = 0; | |
f3a7f661 | 832 | |
1c27024e | 833 | for (int m = 0; m < vd->vdev_ms_count; m++) { |
f3a7f661 GW |
834 | metaslab_t *msp = vd->vdev_ms[m]; |
835 | ||
836 | if (msp->ms_fragmentation == ZFS_FRAG_INVALID) | |
837 | continue; | |
838 | ||
839 | valid_ms++; | |
840 | fragmentation += msp->ms_fragmentation; | |
841 | } | |
842 | ||
843 | if (valid_ms <= vd->vdev_ms_count / 2) | |
844 | return (ZFS_FRAG_INVALID); | |
845 | ||
846 | fragmentation /= valid_ms; | |
847 | ASSERT3U(fragmentation, <=, 100); | |
848 | return (fragmentation); | |
849 | } | |
850 | ||
ac72fac3 GW |
851 | /* |
852 | * Determine if a given metaslab group should skip allocations. A metaslab | |
f3a7f661 GW |
853 | * group should avoid allocations if its free capacity is less than the |
854 | * zfs_mg_noalloc_threshold or its fragmentation metric is greater than | |
855 | * zfs_mg_fragmentation_threshold and there is at least one metaslab group | |
3dfb57a3 DB |
856 | * that can still handle allocations. If the allocation throttle is enabled |
857 | * then we skip allocations to devices that have reached their maximum | |
858 | * allocation queue depth unless the selected metaslab group is the only | |
859 | * eligible group remaining. | |
ac72fac3 GW |
860 | */ |
861 | static boolean_t | |
3dfb57a3 DB |
862 | metaslab_group_allocatable(metaslab_group_t *mg, metaslab_group_t *rotor, |
863 | uint64_t psize) | |
ac72fac3 | 864 | { |
3dfb57a3 | 865 | spa_t *spa = mg->mg_vd->vdev_spa; |
ac72fac3 GW |
866 | metaslab_class_t *mc = mg->mg_class; |
867 | ||
868 | /* | |
3dfb57a3 DB |
869 | * We can only consider skipping this metaslab group if it's |
870 | * in the normal metaslab class and there are other metaslab | |
871 | * groups to select from. Otherwise, we always consider it eligible | |
f3a7f661 | 872 | * for allocations. |
ac72fac3 | 873 | */ |
3dfb57a3 DB |
874 | if (mc != spa_normal_class(spa) || mc->mc_groups <= 1) |
875 | return (B_TRUE); | |
876 | ||
877 | /* | |
878 | * If the metaslab group's mg_allocatable flag is set (see comments | |
879 | * in metaslab_group_alloc_update() for more information) and | |
880 | * the allocation throttle is disabled then allow allocations to this | |
881 | * device. However, if the allocation throttle is enabled then | |
882 | * check if we have reached our allocation limit (mg_alloc_queue_depth) | |
883 | * to determine if we should allow allocations to this metaslab group. | |
884 | * If all metaslab groups are no longer considered allocatable | |
885 | * (mc_alloc_groups == 0) or we're trying to allocate the smallest | |
886 | * gang block size then we allow allocations on this metaslab group | |
887 | * regardless of the mg_allocatable or throttle settings. | |
888 | */ | |
889 | if (mg->mg_allocatable) { | |
890 | metaslab_group_t *mgp; | |
891 | int64_t qdepth; | |
892 | uint64_t qmax = mg->mg_max_alloc_queue_depth; | |
893 | ||
894 | if (!mc->mc_alloc_throttle_enabled) | |
895 | return (B_TRUE); | |
896 | ||
897 | /* | |
898 | * If this metaslab group does not have any free space, then | |
899 | * there is no point in looking further. | |
900 | */ | |
901 | if (mg->mg_no_free_space) | |
902 | return (B_FALSE); | |
903 | ||
904 | qdepth = refcount_count(&mg->mg_alloc_queue_depth); | |
905 | ||
906 | /* | |
907 | * If this metaslab group is below its qmax or it's | |
908 | * the only allocatable metasable group, then attempt | |
909 | * to allocate from it. | |
910 | */ | |
911 | if (qdepth < qmax || mc->mc_alloc_groups == 1) | |
912 | return (B_TRUE); | |
913 | ASSERT3U(mc->mc_alloc_groups, >, 1); | |
914 | ||
915 | /* | |
916 | * Since this metaslab group is at or over its qmax, we | |
917 | * need to determine if there are metaslab groups after this | |
918 | * one that might be able to handle this allocation. This is | |
919 | * racy since we can't hold the locks for all metaslab | |
920 | * groups at the same time when we make this check. | |
921 | */ | |
922 | for (mgp = mg->mg_next; mgp != rotor; mgp = mgp->mg_next) { | |
923 | qmax = mgp->mg_max_alloc_queue_depth; | |
924 | ||
925 | qdepth = refcount_count(&mgp->mg_alloc_queue_depth); | |
926 | ||
927 | /* | |
928 | * If there is another metaslab group that | |
929 | * might be able to handle the allocation, then | |
930 | * we return false so that we skip this group. | |
931 | */ | |
932 | if (qdepth < qmax && !mgp->mg_no_free_space) | |
933 | return (B_FALSE); | |
934 | } | |
935 | ||
936 | /* | |
937 | * We didn't find another group to handle the allocation | |
938 | * so we can't skip this metaslab group even though | |
939 | * we are at or over our qmax. | |
940 | */ | |
941 | return (B_TRUE); | |
942 | ||
943 | } else if (mc->mc_alloc_groups == 0 || psize == SPA_MINBLOCKSIZE) { | |
944 | return (B_TRUE); | |
945 | } | |
946 | return (B_FALSE); | |
ac72fac3 GW |
947 | } |
948 | ||
428870ff BB |
949 | /* |
950 | * ========================================================================== | |
93cf2076 | 951 | * Range tree callbacks |
428870ff BB |
952 | * ========================================================================== |
953 | */ | |
93cf2076 GW |
954 | |
955 | /* | |
956 | * Comparison function for the private size-ordered tree. Tree is sorted | |
957 | * by size, larger sizes at the end of the tree. | |
958 | */ | |
428870ff | 959 | static int |
93cf2076 | 960 | metaslab_rangesize_compare(const void *x1, const void *x2) |
428870ff | 961 | { |
93cf2076 GW |
962 | const range_seg_t *r1 = x1; |
963 | const range_seg_t *r2 = x2; | |
964 | uint64_t rs_size1 = r1->rs_end - r1->rs_start; | |
965 | uint64_t rs_size2 = r2->rs_end - r2->rs_start; | |
428870ff | 966 | |
ee36c709 GN |
967 | int cmp = AVL_CMP(rs_size1, rs_size2); |
968 | if (likely(cmp)) | |
969 | return (cmp); | |
428870ff | 970 | |
ee36c709 | 971 | return (AVL_CMP(r1->rs_start, r2->rs_start)); |
428870ff BB |
972 | } |
973 | ||
93cf2076 GW |
974 | /* |
975 | * ========================================================================== | |
4e21fd06 | 976 | * Common allocator routines |
93cf2076 GW |
977 | * ========================================================================== |
978 | */ | |
979 | ||
9babb374 | 980 | /* |
428870ff | 981 | * Return the maximum contiguous segment within the metaslab. |
9babb374 | 982 | */ |
9babb374 | 983 | uint64_t |
93cf2076 | 984 | metaslab_block_maxsize(metaslab_t *msp) |
9babb374 | 985 | { |
93cf2076 GW |
986 | avl_tree_t *t = &msp->ms_size_tree; |
987 | range_seg_t *rs; | |
9babb374 | 988 | |
93cf2076 | 989 | if (t == NULL || (rs = avl_last(t)) == NULL) |
9babb374 BB |
990 | return (0ULL); |
991 | ||
93cf2076 GW |
992 | return (rs->rs_end - rs->rs_start); |
993 | } | |
994 | ||
4e21fd06 DB |
995 | static range_seg_t * |
996 | metaslab_block_find(avl_tree_t *t, uint64_t start, uint64_t size) | |
93cf2076 | 997 | { |
4e21fd06 DB |
998 | range_seg_t *rs, rsearch; |
999 | avl_index_t where; | |
93cf2076 | 1000 | |
4e21fd06 DB |
1001 | rsearch.rs_start = start; |
1002 | rsearch.rs_end = start + size; | |
93cf2076 | 1003 | |
4e21fd06 DB |
1004 | rs = avl_find(t, &rsearch, &where); |
1005 | if (rs == NULL) { | |
1006 | rs = avl_nearest(t, where, AVL_AFTER); | |
93cf2076 | 1007 | } |
93cf2076 | 1008 | |
4e21fd06 DB |
1009 | return (rs); |
1010 | } | |
93cf2076 GW |
1011 | |
1012 | #if defined(WITH_FF_BLOCK_ALLOCATOR) || \ | |
1013 | defined(WITH_DF_BLOCK_ALLOCATOR) || \ | |
1014 | defined(WITH_CF_BLOCK_ALLOCATOR) | |
1015 | /* | |
1016 | * This is a helper function that can be used by the allocator to find | |
1017 | * a suitable block to allocate. This will search the specified AVL | |
1018 | * tree looking for a block that matches the specified criteria. | |
1019 | */ | |
1020 | static uint64_t | |
1021 | metaslab_block_picker(avl_tree_t *t, uint64_t *cursor, uint64_t size, | |
1022 | uint64_t align) | |
1023 | { | |
4e21fd06 | 1024 | range_seg_t *rs = metaslab_block_find(t, *cursor, size); |
93cf2076 GW |
1025 | |
1026 | while (rs != NULL) { | |
1027 | uint64_t offset = P2ROUNDUP(rs->rs_start, align); | |
1028 | ||
1029 | if (offset + size <= rs->rs_end) { | |
1030 | *cursor = offset + size; | |
1031 | return (offset); | |
1032 | } | |
1033 | rs = AVL_NEXT(t, rs); | |
1034 | } | |
1035 | ||
1036 | /* | |
1037 | * If we know we've searched the whole map (*cursor == 0), give up. | |
1038 | * Otherwise, reset the cursor to the beginning and try again. | |
1039 | */ | |
1040 | if (*cursor == 0) | |
1041 | return (-1ULL); | |
1042 | ||
1043 | *cursor = 0; | |
1044 | return (metaslab_block_picker(t, cursor, size, align)); | |
9babb374 | 1045 | } |
93cf2076 | 1046 | #endif /* WITH_FF/DF/CF_BLOCK_ALLOCATOR */ |
9babb374 | 1047 | |
22c81dd8 | 1048 | #if defined(WITH_FF_BLOCK_ALLOCATOR) |
428870ff BB |
1049 | /* |
1050 | * ========================================================================== | |
1051 | * The first-fit block allocator | |
1052 | * ========================================================================== | |
1053 | */ | |
1054 | static uint64_t | |
93cf2076 | 1055 | metaslab_ff_alloc(metaslab_t *msp, uint64_t size) |
9babb374 | 1056 | { |
93cf2076 GW |
1057 | /* |
1058 | * Find the largest power of 2 block size that evenly divides the | |
1059 | * requested size. This is used to try to allocate blocks with similar | |
1060 | * alignment from the same area of the metaslab (i.e. same cursor | |
1061 | * bucket) but it does not guarantee that other allocations sizes | |
1062 | * may exist in the same region. | |
1063 | */ | |
428870ff | 1064 | uint64_t align = size & -size; |
9bd274dd | 1065 | uint64_t *cursor = &msp->ms_lbas[highbit64(align) - 1]; |
93cf2076 | 1066 | avl_tree_t *t = &msp->ms_tree->rt_root; |
9babb374 | 1067 | |
428870ff | 1068 | return (metaslab_block_picker(t, cursor, size, align)); |
9babb374 BB |
1069 | } |
1070 | ||
93cf2076 | 1071 | static metaslab_ops_t metaslab_ff_ops = { |
f3a7f661 | 1072 | metaslab_ff_alloc |
428870ff | 1073 | }; |
9babb374 | 1074 | |
93cf2076 | 1075 | metaslab_ops_t *zfs_metaslab_ops = &metaslab_ff_ops; |
22c81dd8 BB |
1076 | #endif /* WITH_FF_BLOCK_ALLOCATOR */ |
1077 | ||
1078 | #if defined(WITH_DF_BLOCK_ALLOCATOR) | |
428870ff BB |
1079 | /* |
1080 | * ========================================================================== | |
1081 | * Dynamic block allocator - | |
1082 | * Uses the first fit allocation scheme until space get low and then | |
1083 | * adjusts to a best fit allocation method. Uses metaslab_df_alloc_threshold | |
1084 | * and metaslab_df_free_pct to determine when to switch the allocation scheme. | |
1085 | * ========================================================================== | |
1086 | */ | |
9babb374 | 1087 | static uint64_t |
93cf2076 | 1088 | metaslab_df_alloc(metaslab_t *msp, uint64_t size) |
9babb374 | 1089 | { |
93cf2076 GW |
1090 | /* |
1091 | * Find the largest power of 2 block size that evenly divides the | |
1092 | * requested size. This is used to try to allocate blocks with similar | |
1093 | * alignment from the same area of the metaslab (i.e. same cursor | |
1094 | * bucket) but it does not guarantee that other allocations sizes | |
1095 | * may exist in the same region. | |
1096 | */ | |
9babb374 | 1097 | uint64_t align = size & -size; |
9bd274dd | 1098 | uint64_t *cursor = &msp->ms_lbas[highbit64(align) - 1]; |
93cf2076 GW |
1099 | range_tree_t *rt = msp->ms_tree; |
1100 | avl_tree_t *t = &rt->rt_root; | |
1101 | uint64_t max_size = metaslab_block_maxsize(msp); | |
1102 | int free_pct = range_tree_space(rt) * 100 / msp->ms_size; | |
9babb374 | 1103 | |
93cf2076 GW |
1104 | ASSERT(MUTEX_HELD(&msp->ms_lock)); |
1105 | ASSERT3U(avl_numnodes(t), ==, avl_numnodes(&msp->ms_size_tree)); | |
9babb374 BB |
1106 | |
1107 | if (max_size < size) | |
1108 | return (-1ULL); | |
1109 | ||
1110 | /* | |
1111 | * If we're running low on space switch to using the size | |
1112 | * sorted AVL tree (best-fit). | |
1113 | */ | |
1114 | if (max_size < metaslab_df_alloc_threshold || | |
1115 | free_pct < metaslab_df_free_pct) { | |
93cf2076 | 1116 | t = &msp->ms_size_tree; |
9babb374 BB |
1117 | *cursor = 0; |
1118 | } | |
1119 | ||
1120 | return (metaslab_block_picker(t, cursor, size, 1ULL)); | |
1121 | } | |
1122 | ||
93cf2076 | 1123 | static metaslab_ops_t metaslab_df_ops = { |
f3a7f661 | 1124 | metaslab_df_alloc |
34dc7c2f BB |
1125 | }; |
1126 | ||
93cf2076 | 1127 | metaslab_ops_t *zfs_metaslab_ops = &metaslab_df_ops; |
22c81dd8 BB |
1128 | #endif /* WITH_DF_BLOCK_ALLOCATOR */ |
1129 | ||
93cf2076 | 1130 | #if defined(WITH_CF_BLOCK_ALLOCATOR) |
428870ff BB |
1131 | /* |
1132 | * ========================================================================== | |
93cf2076 GW |
1133 | * Cursor fit block allocator - |
1134 | * Select the largest region in the metaslab, set the cursor to the beginning | |
1135 | * of the range and the cursor_end to the end of the range. As allocations | |
1136 | * are made advance the cursor. Continue allocating from the cursor until | |
1137 | * the range is exhausted and then find a new range. | |
428870ff BB |
1138 | * ========================================================================== |
1139 | */ | |
1140 | static uint64_t | |
93cf2076 | 1141 | metaslab_cf_alloc(metaslab_t *msp, uint64_t size) |
428870ff | 1142 | { |
93cf2076 GW |
1143 | range_tree_t *rt = msp->ms_tree; |
1144 | avl_tree_t *t = &msp->ms_size_tree; | |
1145 | uint64_t *cursor = &msp->ms_lbas[0]; | |
1146 | uint64_t *cursor_end = &msp->ms_lbas[1]; | |
428870ff BB |
1147 | uint64_t offset = 0; |
1148 | ||
93cf2076 GW |
1149 | ASSERT(MUTEX_HELD(&msp->ms_lock)); |
1150 | ASSERT3U(avl_numnodes(t), ==, avl_numnodes(&rt->rt_root)); | |
428870ff | 1151 | |
93cf2076 | 1152 | ASSERT3U(*cursor_end, >=, *cursor); |
428870ff | 1153 | |
93cf2076 GW |
1154 | if ((*cursor + size) > *cursor_end) { |
1155 | range_seg_t *rs; | |
428870ff | 1156 | |
93cf2076 GW |
1157 | rs = avl_last(&msp->ms_size_tree); |
1158 | if (rs == NULL || (rs->rs_end - rs->rs_start) < size) | |
1159 | return (-1ULL); | |
428870ff | 1160 | |
93cf2076 GW |
1161 | *cursor = rs->rs_start; |
1162 | *cursor_end = rs->rs_end; | |
428870ff | 1163 | } |
93cf2076 GW |
1164 | |
1165 | offset = *cursor; | |
1166 | *cursor += size; | |
1167 | ||
428870ff BB |
1168 | return (offset); |
1169 | } | |
1170 | ||
93cf2076 | 1171 | static metaslab_ops_t metaslab_cf_ops = { |
f3a7f661 | 1172 | metaslab_cf_alloc |
428870ff BB |
1173 | }; |
1174 | ||
93cf2076 GW |
1175 | metaslab_ops_t *zfs_metaslab_ops = &metaslab_cf_ops; |
1176 | #endif /* WITH_CF_BLOCK_ALLOCATOR */ | |
22c81dd8 BB |
1177 | |
1178 | #if defined(WITH_NDF_BLOCK_ALLOCATOR) | |
93cf2076 GW |
1179 | /* |
1180 | * ========================================================================== | |
1181 | * New dynamic fit allocator - | |
1182 | * Select a region that is large enough to allocate 2^metaslab_ndf_clump_shift | |
1183 | * contiguous blocks. If no region is found then just use the largest segment | |
1184 | * that remains. | |
1185 | * ========================================================================== | |
1186 | */ | |
1187 | ||
1188 | /* | |
1189 | * Determines desired number of contiguous blocks (2^metaslab_ndf_clump_shift) | |
1190 | * to request from the allocator. | |
1191 | */ | |
428870ff BB |
1192 | uint64_t metaslab_ndf_clump_shift = 4; |
1193 | ||
1194 | static uint64_t | |
93cf2076 | 1195 | metaslab_ndf_alloc(metaslab_t *msp, uint64_t size) |
428870ff | 1196 | { |
93cf2076 | 1197 | avl_tree_t *t = &msp->ms_tree->rt_root; |
428870ff | 1198 | avl_index_t where; |
93cf2076 | 1199 | range_seg_t *rs, rsearch; |
9bd274dd | 1200 | uint64_t hbit = highbit64(size); |
93cf2076 GW |
1201 | uint64_t *cursor = &msp->ms_lbas[hbit - 1]; |
1202 | uint64_t max_size = metaslab_block_maxsize(msp); | |
428870ff | 1203 | |
93cf2076 GW |
1204 | ASSERT(MUTEX_HELD(&msp->ms_lock)); |
1205 | ASSERT3U(avl_numnodes(t), ==, avl_numnodes(&msp->ms_size_tree)); | |
428870ff BB |
1206 | |
1207 | if (max_size < size) | |
1208 | return (-1ULL); | |
1209 | ||
93cf2076 GW |
1210 | rsearch.rs_start = *cursor; |
1211 | rsearch.rs_end = *cursor + size; | |
428870ff | 1212 | |
93cf2076 GW |
1213 | rs = avl_find(t, &rsearch, &where); |
1214 | if (rs == NULL || (rs->rs_end - rs->rs_start) < size) { | |
1215 | t = &msp->ms_size_tree; | |
428870ff | 1216 | |
93cf2076 GW |
1217 | rsearch.rs_start = 0; |
1218 | rsearch.rs_end = MIN(max_size, | |
428870ff | 1219 | 1ULL << (hbit + metaslab_ndf_clump_shift)); |
93cf2076 GW |
1220 | rs = avl_find(t, &rsearch, &where); |
1221 | if (rs == NULL) | |
1222 | rs = avl_nearest(t, where, AVL_AFTER); | |
1223 | ASSERT(rs != NULL); | |
428870ff BB |
1224 | } |
1225 | ||
93cf2076 GW |
1226 | if ((rs->rs_end - rs->rs_start) >= size) { |
1227 | *cursor = rs->rs_start + size; | |
1228 | return (rs->rs_start); | |
428870ff BB |
1229 | } |
1230 | return (-1ULL); | |
1231 | } | |
1232 | ||
93cf2076 | 1233 | static metaslab_ops_t metaslab_ndf_ops = { |
f3a7f661 | 1234 | metaslab_ndf_alloc |
428870ff BB |
1235 | }; |
1236 | ||
93cf2076 | 1237 | metaslab_ops_t *zfs_metaslab_ops = &metaslab_ndf_ops; |
22c81dd8 | 1238 | #endif /* WITH_NDF_BLOCK_ALLOCATOR */ |
9babb374 | 1239 | |
93cf2076 | 1240 | |
34dc7c2f BB |
1241 | /* |
1242 | * ========================================================================== | |
1243 | * Metaslabs | |
1244 | * ========================================================================== | |
1245 | */ | |
93cf2076 GW |
1246 | |
1247 | /* | |
1248 | * Wait for any in-progress metaslab loads to complete. | |
1249 | */ | |
1250 | void | |
1251 | metaslab_load_wait(metaslab_t *msp) | |
1252 | { | |
1253 | ASSERT(MUTEX_HELD(&msp->ms_lock)); | |
1254 | ||
1255 | while (msp->ms_loading) { | |
1256 | ASSERT(!msp->ms_loaded); | |
1257 | cv_wait(&msp->ms_load_cv, &msp->ms_lock); | |
1258 | } | |
1259 | } | |
1260 | ||
1261 | int | |
1262 | metaslab_load(metaslab_t *msp) | |
1263 | { | |
1264 | int error = 0; | |
4e21fd06 | 1265 | boolean_t success = B_FALSE; |
93cf2076 GW |
1266 | |
1267 | ASSERT(MUTEX_HELD(&msp->ms_lock)); | |
1268 | ASSERT(!msp->ms_loaded); | |
1269 | ASSERT(!msp->ms_loading); | |
1270 | ||
1271 | msp->ms_loading = B_TRUE; | |
1272 | ||
1273 | /* | |
1274 | * If the space map has not been allocated yet, then treat | |
1275 | * all the space in the metaslab as free and add it to the | |
1276 | * ms_tree. | |
1277 | */ | |
1278 | if (msp->ms_sm != NULL) | |
1279 | error = space_map_load(msp->ms_sm, msp->ms_tree, SM_FREE); | |
1280 | else | |
1281 | range_tree_add(msp->ms_tree, msp->ms_start, msp->ms_size); | |
1282 | ||
4e21fd06 | 1283 | success = (error == 0); |
93cf2076 GW |
1284 | msp->ms_loading = B_FALSE; |
1285 | ||
4e21fd06 DB |
1286 | if (success) { |
1287 | ASSERT3P(msp->ms_group, !=, NULL); | |
1288 | msp->ms_loaded = B_TRUE; | |
1289 | ||
1c27024e | 1290 | for (int t = 0; t < TXG_DEFER_SIZE; t++) { |
93cf2076 GW |
1291 | range_tree_walk(msp->ms_defertree[t], |
1292 | range_tree_remove, msp->ms_tree); | |
1293 | } | |
4e21fd06 | 1294 | msp->ms_max_size = metaslab_block_maxsize(msp); |
93cf2076 GW |
1295 | } |
1296 | cv_broadcast(&msp->ms_load_cv); | |
1297 | return (error); | |
1298 | } | |
1299 | ||
1300 | void | |
1301 | metaslab_unload(metaslab_t *msp) | |
1302 | { | |
1303 | ASSERT(MUTEX_HELD(&msp->ms_lock)); | |
1304 | range_tree_vacate(msp->ms_tree, NULL, NULL); | |
1305 | msp->ms_loaded = B_FALSE; | |
1306 | msp->ms_weight &= ~METASLAB_ACTIVE_MASK; | |
4e21fd06 | 1307 | msp->ms_max_size = 0; |
93cf2076 GW |
1308 | } |
1309 | ||
fb42a493 PS |
1310 | int |
1311 | metaslab_init(metaslab_group_t *mg, uint64_t id, uint64_t object, uint64_t txg, | |
1312 | metaslab_t **msp) | |
34dc7c2f BB |
1313 | { |
1314 | vdev_t *vd = mg->mg_vd; | |
93cf2076 | 1315 | objset_t *mos = vd->vdev_spa->spa_meta_objset; |
fb42a493 PS |
1316 | metaslab_t *ms; |
1317 | int error; | |
34dc7c2f | 1318 | |
79c76d5b | 1319 | ms = kmem_zalloc(sizeof (metaslab_t), KM_SLEEP); |
fb42a493 PS |
1320 | mutex_init(&ms->ms_lock, NULL, MUTEX_DEFAULT, NULL); |
1321 | cv_init(&ms->ms_load_cv, NULL, CV_DEFAULT, NULL); | |
1322 | ms->ms_id = id; | |
1323 | ms->ms_start = id << vd->vdev_ms_shift; | |
1324 | ms->ms_size = 1ULL << vd->vdev_ms_shift; | |
34dc7c2f | 1325 | |
93cf2076 GW |
1326 | /* |
1327 | * We only open space map objects that already exist. All others | |
afe37326 | 1328 | * will be opened when we finally allocate an object for it. |
93cf2076 | 1329 | */ |
afe37326 | 1330 | if (object != 0) { |
fb42a493 PS |
1331 | error = space_map_open(&ms->ms_sm, mos, object, ms->ms_start, |
1332 | ms->ms_size, vd->vdev_ashift, &ms->ms_lock); | |
1333 | ||
1334 | if (error != 0) { | |
1335 | kmem_free(ms, sizeof (metaslab_t)); | |
1336 | return (error); | |
1337 | } | |
1338 | ||
1339 | ASSERT(ms->ms_sm != NULL); | |
93cf2076 | 1340 | } |
34dc7c2f BB |
1341 | |
1342 | /* | |
93cf2076 | 1343 | * We create the main range tree here, but we don't create the |
258553d3 | 1344 | * other range trees until metaslab_sync_done(). This serves |
34dc7c2f BB |
1345 | * two purposes: it allows metaslab_sync_done() to detect the |
1346 | * addition of new space; and for debugging, it ensures that we'd | |
1347 | * data fault on any attempt to use this metaslab before it's ready. | |
1348 | */ | |
d4a72f23 TC |
1349 | ms->ms_tree = range_tree_create_impl(&rt_avl_ops, &ms->ms_size_tree, |
1350 | metaslab_rangesize_compare, &ms->ms_lock, 0); | |
fb42a493 | 1351 | metaslab_group_add(mg, ms); |
34dc7c2f | 1352 | |
4e21fd06 | 1353 | metaslab_set_fragmentation(ms); |
428870ff | 1354 | |
34dc7c2f BB |
1355 | /* |
1356 | * If we're opening an existing pool (txg == 0) or creating | |
1357 | * a new one (txg == TXG_INITIAL), all space is available now. | |
1358 | * If we're adding space to an existing pool, the new space | |
1359 | * does not become available until after this txg has synced. | |
4e21fd06 DB |
1360 | * The metaslab's weight will also be initialized when we sync |
1361 | * out this txg. This ensures that we don't attempt to allocate | |
1362 | * from it before we have initialized it completely. | |
34dc7c2f BB |
1363 | */ |
1364 | if (txg <= TXG_INITIAL) | |
fb42a493 | 1365 | metaslab_sync_done(ms, 0); |
34dc7c2f | 1366 | |
93cf2076 GW |
1367 | /* |
1368 | * If metaslab_debug_load is set and we're initializing a metaslab | |
4e21fd06 | 1369 | * that has an allocated space map object then load the its space |
93cf2076 GW |
1370 | * map so that can verify frees. |
1371 | */ | |
fb42a493 PS |
1372 | if (metaslab_debug_load && ms->ms_sm != NULL) { |
1373 | mutex_enter(&ms->ms_lock); | |
1374 | VERIFY0(metaslab_load(ms)); | |
1375 | mutex_exit(&ms->ms_lock); | |
93cf2076 GW |
1376 | } |
1377 | ||
34dc7c2f | 1378 | if (txg != 0) { |
34dc7c2f | 1379 | vdev_dirty(vd, 0, NULL, txg); |
fb42a493 | 1380 | vdev_dirty(vd, VDD_METASLAB, ms, txg); |
34dc7c2f BB |
1381 | } |
1382 | ||
fb42a493 PS |
1383 | *msp = ms; |
1384 | ||
1385 | return (0); | |
34dc7c2f BB |
1386 | } |
1387 | ||
1388 | void | |
1389 | metaslab_fini(metaslab_t *msp) | |
1390 | { | |
93cf2076 | 1391 | metaslab_group_t *mg = msp->ms_group; |
34dc7c2f BB |
1392 | |
1393 | metaslab_group_remove(mg, msp); | |
1394 | ||
1395 | mutex_enter(&msp->ms_lock); | |
93cf2076 GW |
1396 | VERIFY(msp->ms_group == NULL); |
1397 | vdev_space_update(mg->mg_vd, -space_map_allocated(msp->ms_sm), | |
1398 | 0, -msp->ms_size); | |
1399 | space_map_close(msp->ms_sm); | |
1400 | ||
1401 | metaslab_unload(msp); | |
1402 | range_tree_destroy(msp->ms_tree); | |
258553d3 TC |
1403 | range_tree_destroy(msp->ms_freeingtree); |
1404 | range_tree_destroy(msp->ms_freedtree); | |
34dc7c2f | 1405 | |
1c27024e | 1406 | for (int t = 0; t < TXG_SIZE; t++) { |
93cf2076 | 1407 | range_tree_destroy(msp->ms_alloctree[t]); |
34dc7c2f BB |
1408 | } |
1409 | ||
1c27024e | 1410 | for (int t = 0; t < TXG_DEFER_SIZE; t++) { |
93cf2076 | 1411 | range_tree_destroy(msp->ms_defertree[t]); |
e51be066 | 1412 | } |
428870ff | 1413 | |
c99c9001 | 1414 | ASSERT0(msp->ms_deferspace); |
428870ff | 1415 | |
34dc7c2f | 1416 | mutex_exit(&msp->ms_lock); |
93cf2076 | 1417 | cv_destroy(&msp->ms_load_cv); |
34dc7c2f BB |
1418 | mutex_destroy(&msp->ms_lock); |
1419 | ||
1420 | kmem_free(msp, sizeof (metaslab_t)); | |
1421 | } | |
1422 | ||
f3a7f661 GW |
1423 | #define FRAGMENTATION_TABLE_SIZE 17 |
1424 | ||
93cf2076 | 1425 | /* |
f3a7f661 GW |
1426 | * This table defines a segment size based fragmentation metric that will |
1427 | * allow each metaslab to derive its own fragmentation value. This is done | |
1428 | * by calculating the space in each bucket of the spacemap histogram and | |
1429 | * multiplying that by the fragmetation metric in this table. Doing | |
1430 | * this for all buckets and dividing it by the total amount of free | |
1431 | * space in this metaslab (i.e. the total free space in all buckets) gives | |
1432 | * us the fragmentation metric. This means that a high fragmentation metric | |
1433 | * equates to most of the free space being comprised of small segments. | |
1434 | * Conversely, if the metric is low, then most of the free space is in | |
1435 | * large segments. A 10% change in fragmentation equates to approximately | |
1436 | * double the number of segments. | |
93cf2076 | 1437 | * |
f3a7f661 GW |
1438 | * This table defines 0% fragmented space using 16MB segments. Testing has |
1439 | * shown that segments that are greater than or equal to 16MB do not suffer | |
1440 | * from drastic performance problems. Using this value, we derive the rest | |
1441 | * of the table. Since the fragmentation value is never stored on disk, it | |
1442 | * is possible to change these calculations in the future. | |
1443 | */ | |
1444 | int zfs_frag_table[FRAGMENTATION_TABLE_SIZE] = { | |
1445 | 100, /* 512B */ | |
1446 | 100, /* 1K */ | |
1447 | 98, /* 2K */ | |
1448 | 95, /* 4K */ | |
1449 | 90, /* 8K */ | |
1450 | 80, /* 16K */ | |
1451 | 70, /* 32K */ | |
1452 | 60, /* 64K */ | |
1453 | 50, /* 128K */ | |
1454 | 40, /* 256K */ | |
1455 | 30, /* 512K */ | |
1456 | 20, /* 1M */ | |
1457 | 15, /* 2M */ | |
1458 | 10, /* 4M */ | |
1459 | 5, /* 8M */ | |
1460 | 0 /* 16M */ | |
1461 | }; | |
1462 | ||
1463 | /* | |
1464 | * Calclate the metaslab's fragmentation metric. A return value | |
1465 | * of ZFS_FRAG_INVALID means that the metaslab has not been upgraded and does | |
1466 | * not support this metric. Otherwise, the return value should be in the | |
1467 | * range [0, 100]. | |
93cf2076 | 1468 | */ |
4e21fd06 DB |
1469 | static void |
1470 | metaslab_set_fragmentation(metaslab_t *msp) | |
93cf2076 | 1471 | { |
f3a7f661 GW |
1472 | spa_t *spa = msp->ms_group->mg_vd->vdev_spa; |
1473 | uint64_t fragmentation = 0; | |
1474 | uint64_t total = 0; | |
1475 | boolean_t feature_enabled = spa_feature_is_enabled(spa, | |
1476 | SPA_FEATURE_SPACEMAP_HISTOGRAM); | |
93cf2076 | 1477 | |
4e21fd06 DB |
1478 | if (!feature_enabled) { |
1479 | msp->ms_fragmentation = ZFS_FRAG_INVALID; | |
1480 | return; | |
1481 | } | |
f3a7f661 | 1482 | |
93cf2076 | 1483 | /* |
f3a7f661 GW |
1484 | * A null space map means that the entire metaslab is free |
1485 | * and thus is not fragmented. | |
93cf2076 | 1486 | */ |
4e21fd06 DB |
1487 | if (msp->ms_sm == NULL) { |
1488 | msp->ms_fragmentation = 0; | |
1489 | return; | |
1490 | } | |
f3a7f661 GW |
1491 | |
1492 | /* | |
4e21fd06 | 1493 | * If this metaslab's space map has not been upgraded, flag it |
f3a7f661 GW |
1494 | * so that we upgrade next time we encounter it. |
1495 | */ | |
1496 | if (msp->ms_sm->sm_dbuf->db_size != sizeof (space_map_phys_t)) { | |
3b7f360c | 1497 | uint64_t txg = spa_syncing_txg(spa); |
93cf2076 GW |
1498 | vdev_t *vd = msp->ms_group->mg_vd; |
1499 | ||
3b7f360c GW |
1500 | /* |
1501 | * If we've reached the final dirty txg, then we must | |
1502 | * be shutting down the pool. We don't want to dirty | |
1503 | * any data past this point so skip setting the condense | |
1504 | * flag. We can retry this action the next time the pool | |
1505 | * is imported. | |
1506 | */ | |
1507 | if (spa_writeable(spa) && txg < spa_final_dirty_txg(spa)) { | |
8b0a0840 TC |
1508 | msp->ms_condense_wanted = B_TRUE; |
1509 | vdev_dirty(vd, VDD_METASLAB, msp, txg + 1); | |
1510 | spa_dbgmsg(spa, "txg %llu, requesting force condense: " | |
3b7f360c GW |
1511 | "ms_id %llu, vdev_id %llu", txg, msp->ms_id, |
1512 | vd->vdev_id); | |
8b0a0840 | 1513 | } |
4e21fd06 DB |
1514 | msp->ms_fragmentation = ZFS_FRAG_INVALID; |
1515 | return; | |
93cf2076 GW |
1516 | } |
1517 | ||
1c27024e | 1518 | for (int i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) { |
f3a7f661 GW |
1519 | uint64_t space = 0; |
1520 | uint8_t shift = msp->ms_sm->sm_shift; | |
4e21fd06 | 1521 | |
f3a7f661 GW |
1522 | int idx = MIN(shift - SPA_MINBLOCKSHIFT + i, |
1523 | FRAGMENTATION_TABLE_SIZE - 1); | |
93cf2076 | 1524 | |
93cf2076 GW |
1525 | if (msp->ms_sm->sm_phys->smp_histogram[i] == 0) |
1526 | continue; | |
1527 | ||
f3a7f661 GW |
1528 | space = msp->ms_sm->sm_phys->smp_histogram[i] << (i + shift); |
1529 | total += space; | |
1530 | ||
1531 | ASSERT3U(idx, <, FRAGMENTATION_TABLE_SIZE); | |
1532 | fragmentation += space * zfs_frag_table[idx]; | |
93cf2076 | 1533 | } |
f3a7f661 GW |
1534 | |
1535 | if (total > 0) | |
1536 | fragmentation /= total; | |
1537 | ASSERT3U(fragmentation, <=, 100); | |
4e21fd06 DB |
1538 | |
1539 | msp->ms_fragmentation = fragmentation; | |
93cf2076 | 1540 | } |
34dc7c2f | 1541 | |
f3a7f661 GW |
1542 | /* |
1543 | * Compute a weight -- a selection preference value -- for the given metaslab. | |
1544 | * This is based on the amount of free space, the level of fragmentation, | |
1545 | * the LBA range, and whether the metaslab is loaded. | |
1546 | */ | |
34dc7c2f | 1547 | static uint64_t |
4e21fd06 | 1548 | metaslab_space_weight(metaslab_t *msp) |
34dc7c2f BB |
1549 | { |
1550 | metaslab_group_t *mg = msp->ms_group; | |
34dc7c2f BB |
1551 | vdev_t *vd = mg->mg_vd; |
1552 | uint64_t weight, space; | |
1553 | ||
1554 | ASSERT(MUTEX_HELD(&msp->ms_lock)); | |
4e21fd06 | 1555 | ASSERT(!vd->vdev_removing); |
c2e42f9d | 1556 | |
34dc7c2f BB |
1557 | /* |
1558 | * The baseline weight is the metaslab's free space. | |
1559 | */ | |
93cf2076 | 1560 | space = msp->ms_size - space_map_allocated(msp->ms_sm); |
f3a7f661 | 1561 | |
f3a7f661 GW |
1562 | if (metaslab_fragmentation_factor_enabled && |
1563 | msp->ms_fragmentation != ZFS_FRAG_INVALID) { | |
1564 | /* | |
1565 | * Use the fragmentation information to inversely scale | |
1566 | * down the baseline weight. We need to ensure that we | |
1567 | * don't exclude this metaslab completely when it's 100% | |
1568 | * fragmented. To avoid this we reduce the fragmented value | |
1569 | * by 1. | |
1570 | */ | |
1571 | space = (space * (100 - (msp->ms_fragmentation - 1))) / 100; | |
1572 | ||
1573 | /* | |
1574 | * If space < SPA_MINBLOCKSIZE, then we will not allocate from | |
1575 | * this metaslab again. The fragmentation metric may have | |
1576 | * decreased the space to something smaller than | |
1577 | * SPA_MINBLOCKSIZE, so reset the space to SPA_MINBLOCKSIZE | |
1578 | * so that we can consume any remaining space. | |
1579 | */ | |
1580 | if (space > 0 && space < SPA_MINBLOCKSIZE) | |
1581 | space = SPA_MINBLOCKSIZE; | |
1582 | } | |
34dc7c2f BB |
1583 | weight = space; |
1584 | ||
1585 | /* | |
1586 | * Modern disks have uniform bit density and constant angular velocity. | |
1587 | * Therefore, the outer recording zones are faster (higher bandwidth) | |
1588 | * than the inner zones by the ratio of outer to inner track diameter, | |
1589 | * which is typically around 2:1. We account for this by assigning | |
1590 | * higher weight to lower metaslabs (multiplier ranging from 2x to 1x). | |
1591 | * In effect, this means that we'll select the metaslab with the most | |
1592 | * free bandwidth rather than simply the one with the most free space. | |
1593 | */ | |
fb40095f | 1594 | if (!vd->vdev_nonrot && metaslab_lba_weighting_enabled) { |
f3a7f661 GW |
1595 | weight = 2 * weight - (msp->ms_id * weight) / vd->vdev_ms_count; |
1596 | ASSERT(weight >= space && weight <= 2 * space); | |
1597 | } | |
428870ff | 1598 | |
f3a7f661 GW |
1599 | /* |
1600 | * If this metaslab is one we're actively using, adjust its | |
1601 | * weight to make it preferable to any inactive metaslab so | |
1602 | * we'll polish it off. If the fragmentation on this metaslab | |
1603 | * has exceed our threshold, then don't mark it active. | |
1604 | */ | |
1605 | if (msp->ms_loaded && msp->ms_fragmentation != ZFS_FRAG_INVALID && | |
1606 | msp->ms_fragmentation <= zfs_metaslab_fragmentation_threshold) { | |
428870ff BB |
1607 | weight |= (msp->ms_weight & METASLAB_ACTIVE_MASK); |
1608 | } | |
34dc7c2f | 1609 | |
4e21fd06 DB |
1610 | WEIGHT_SET_SPACEBASED(weight); |
1611 | return (weight); | |
1612 | } | |
1613 | ||
1614 | /* | |
1615 | * Return the weight of the specified metaslab, according to the segment-based | |
1616 | * weighting algorithm. The metaslab must be loaded. This function can | |
1617 | * be called within a sync pass since it relies only on the metaslab's | |
1618 | * range tree which is always accurate when the metaslab is loaded. | |
1619 | */ | |
1620 | static uint64_t | |
1621 | metaslab_weight_from_range_tree(metaslab_t *msp) | |
1622 | { | |
1623 | uint64_t weight = 0; | |
1624 | uint32_t segments = 0; | |
4e21fd06 DB |
1625 | |
1626 | ASSERT(msp->ms_loaded); | |
1627 | ||
1c27024e DB |
1628 | for (int i = RANGE_TREE_HISTOGRAM_SIZE - 1; i >= SPA_MINBLOCKSHIFT; |
1629 | i--) { | |
4e21fd06 DB |
1630 | uint8_t shift = msp->ms_group->mg_vd->vdev_ashift; |
1631 | int max_idx = SPACE_MAP_HISTOGRAM_SIZE + shift - 1; | |
1632 | ||
1633 | segments <<= 1; | |
1634 | segments += msp->ms_tree->rt_histogram[i]; | |
1635 | ||
1636 | /* | |
1637 | * The range tree provides more precision than the space map | |
1638 | * and must be downgraded so that all values fit within the | |
1639 | * space map's histogram. This allows us to compare loaded | |
1640 | * vs. unloaded metaslabs to determine which metaslab is | |
1641 | * considered "best". | |
1642 | */ | |
1643 | if (i > max_idx) | |
1644 | continue; | |
1645 | ||
1646 | if (segments != 0) { | |
1647 | WEIGHT_SET_COUNT(weight, segments); | |
1648 | WEIGHT_SET_INDEX(weight, i); | |
1649 | WEIGHT_SET_ACTIVE(weight, 0); | |
1650 | break; | |
1651 | } | |
1652 | } | |
1653 | return (weight); | |
1654 | } | |
1655 | ||
1656 | /* | |
1657 | * Calculate the weight based on the on-disk histogram. This should only | |
1658 | * be called after a sync pass has completely finished since the on-disk | |
1659 | * information is updated in metaslab_sync(). | |
1660 | */ | |
1661 | static uint64_t | |
1662 | metaslab_weight_from_spacemap(metaslab_t *msp) | |
1663 | { | |
1664 | uint64_t weight = 0; | |
4e21fd06 | 1665 | |
1c27024e | 1666 | for (int i = SPACE_MAP_HISTOGRAM_SIZE - 1; i >= 0; i--) { |
4e21fd06 DB |
1667 | if (msp->ms_sm->sm_phys->smp_histogram[i] != 0) { |
1668 | WEIGHT_SET_COUNT(weight, | |
1669 | msp->ms_sm->sm_phys->smp_histogram[i]); | |
1670 | WEIGHT_SET_INDEX(weight, i + | |
1671 | msp->ms_sm->sm_shift); | |
1672 | WEIGHT_SET_ACTIVE(weight, 0); | |
1673 | break; | |
1674 | } | |
1675 | } | |
1676 | return (weight); | |
1677 | } | |
1678 | ||
1679 | /* | |
1680 | * Compute a segment-based weight for the specified metaslab. The weight | |
1681 | * is determined by highest bucket in the histogram. The information | |
1682 | * for the highest bucket is encoded into the weight value. | |
1683 | */ | |
1684 | static uint64_t | |
1685 | metaslab_segment_weight(metaslab_t *msp) | |
1686 | { | |
1687 | metaslab_group_t *mg = msp->ms_group; | |
1688 | uint64_t weight = 0; | |
1689 | uint8_t shift = mg->mg_vd->vdev_ashift; | |
1690 | ||
1691 | ASSERT(MUTEX_HELD(&msp->ms_lock)); | |
1692 | ||
1693 | /* | |
1694 | * The metaslab is completely free. | |
1695 | */ | |
1696 | if (space_map_allocated(msp->ms_sm) == 0) { | |
1697 | int idx = highbit64(msp->ms_size) - 1; | |
1698 | int max_idx = SPACE_MAP_HISTOGRAM_SIZE + shift - 1; | |
1699 | ||
1700 | if (idx < max_idx) { | |
1701 | WEIGHT_SET_COUNT(weight, 1ULL); | |
1702 | WEIGHT_SET_INDEX(weight, idx); | |
1703 | } else { | |
1704 | WEIGHT_SET_COUNT(weight, 1ULL << (idx - max_idx)); | |
1705 | WEIGHT_SET_INDEX(weight, max_idx); | |
1706 | } | |
1707 | WEIGHT_SET_ACTIVE(weight, 0); | |
1708 | ASSERT(!WEIGHT_IS_SPACEBASED(weight)); | |
1709 | ||
1710 | return (weight); | |
1711 | } | |
1712 | ||
1713 | ASSERT3U(msp->ms_sm->sm_dbuf->db_size, ==, sizeof (space_map_phys_t)); | |
1714 | ||
1715 | /* | |
1716 | * If the metaslab is fully allocated then just make the weight 0. | |
1717 | */ | |
1718 | if (space_map_allocated(msp->ms_sm) == msp->ms_size) | |
1719 | return (0); | |
1720 | /* | |
1721 | * If the metaslab is already loaded, then use the range tree to | |
1722 | * determine the weight. Otherwise, we rely on the space map information | |
1723 | * to generate the weight. | |
1724 | */ | |
1725 | if (msp->ms_loaded) { | |
1726 | weight = metaslab_weight_from_range_tree(msp); | |
1727 | } else { | |
1728 | weight = metaslab_weight_from_spacemap(msp); | |
1729 | } | |
1730 | ||
1731 | /* | |
1732 | * If the metaslab was active the last time we calculated its weight | |
1733 | * then keep it active. We want to consume the entire region that | |
1734 | * is associated with this weight. | |
1735 | */ | |
1736 | if (msp->ms_activation_weight != 0 && weight != 0) | |
1737 | WEIGHT_SET_ACTIVE(weight, WEIGHT_GET_ACTIVE(msp->ms_weight)); | |
1738 | return (weight); | |
1739 | } | |
1740 | ||
1741 | /* | |
1742 | * Determine if we should attempt to allocate from this metaslab. If the | |
1743 | * metaslab has a maximum size then we can quickly determine if the desired | |
1744 | * allocation size can be satisfied. Otherwise, if we're using segment-based | |
1745 | * weighting then we can determine the maximum allocation that this metaslab | |
1746 | * can accommodate based on the index encoded in the weight. If we're using | |
1747 | * space-based weights then rely on the entire weight (excluding the weight | |
1748 | * type bit). | |
1749 | */ | |
1750 | boolean_t | |
1751 | metaslab_should_allocate(metaslab_t *msp, uint64_t asize) | |
1752 | { | |
1753 | boolean_t should_allocate; | |
1754 | ||
1755 | if (msp->ms_max_size != 0) | |
1756 | return (msp->ms_max_size >= asize); | |
1757 | ||
1758 | if (!WEIGHT_IS_SPACEBASED(msp->ms_weight)) { | |
1759 | /* | |
1760 | * The metaslab segment weight indicates segments in the | |
1761 | * range [2^i, 2^(i+1)), where i is the index in the weight. | |
1762 | * Since the asize might be in the middle of the range, we | |
1763 | * should attempt the allocation if asize < 2^(i+1). | |
1764 | */ | |
1765 | should_allocate = (asize < | |
1766 | 1ULL << (WEIGHT_GET_INDEX(msp->ms_weight) + 1)); | |
1767 | } else { | |
1768 | should_allocate = (asize <= | |
1769 | (msp->ms_weight & ~METASLAB_WEIGHT_TYPE)); | |
1770 | } | |
1771 | return (should_allocate); | |
1772 | } | |
1773 | static uint64_t | |
1774 | metaslab_weight(metaslab_t *msp) | |
1775 | { | |
1776 | vdev_t *vd = msp->ms_group->mg_vd; | |
1777 | spa_t *spa = vd->vdev_spa; | |
1778 | uint64_t weight; | |
1779 | ||
1780 | ASSERT(MUTEX_HELD(&msp->ms_lock)); | |
1781 | ||
1782 | /* | |
1783 | * This vdev is in the process of being removed so there is nothing | |
1784 | * for us to do here. | |
1785 | */ | |
1786 | if (vd->vdev_removing) { | |
1787 | ASSERT0(space_map_allocated(msp->ms_sm)); | |
1788 | ASSERT0(vd->vdev_ms_shift); | |
1789 | return (0); | |
1790 | } | |
1791 | ||
1792 | metaslab_set_fragmentation(msp); | |
1793 | ||
1794 | /* | |
1795 | * Update the maximum size if the metaslab is loaded. This will | |
1796 | * ensure that we get an accurate maximum size if newly freed space | |
1797 | * has been added back into the free tree. | |
1798 | */ | |
1799 | if (msp->ms_loaded) | |
1800 | msp->ms_max_size = metaslab_block_maxsize(msp); | |
1801 | ||
1802 | /* | |
1803 | * Segment-based weighting requires space map histogram support. | |
1804 | */ | |
1805 | if (zfs_metaslab_segment_weight_enabled && | |
1806 | spa_feature_is_enabled(spa, SPA_FEATURE_SPACEMAP_HISTOGRAM) && | |
1807 | (msp->ms_sm == NULL || msp->ms_sm->sm_dbuf->db_size == | |
1808 | sizeof (space_map_phys_t))) { | |
1809 | weight = metaslab_segment_weight(msp); | |
1810 | } else { | |
1811 | weight = metaslab_space_weight(msp); | |
1812 | } | |
93cf2076 | 1813 | return (weight); |
34dc7c2f BB |
1814 | } |
1815 | ||
1816 | static int | |
6d974228 | 1817 | metaslab_activate(metaslab_t *msp, uint64_t activation_weight) |
34dc7c2f | 1818 | { |
34dc7c2f BB |
1819 | ASSERT(MUTEX_HELD(&msp->ms_lock)); |
1820 | ||
1821 | if ((msp->ms_weight & METASLAB_ACTIVE_MASK) == 0) { | |
93cf2076 GW |
1822 | metaslab_load_wait(msp); |
1823 | if (!msp->ms_loaded) { | |
1824 | int error = metaslab_load(msp); | |
1825 | if (error) { | |
428870ff BB |
1826 | metaslab_group_sort(msp->ms_group, msp, 0); |
1827 | return (error); | |
1828 | } | |
34dc7c2f | 1829 | } |
9babb374 | 1830 | |
4e21fd06 | 1831 | msp->ms_activation_weight = msp->ms_weight; |
34dc7c2f BB |
1832 | metaslab_group_sort(msp->ms_group, msp, |
1833 | msp->ms_weight | activation_weight); | |
1834 | } | |
93cf2076 | 1835 | ASSERT(msp->ms_loaded); |
34dc7c2f BB |
1836 | ASSERT(msp->ms_weight & METASLAB_ACTIVE_MASK); |
1837 | ||
1838 | return (0); | |
1839 | } | |
1840 | ||
1841 | static void | |
4e21fd06 | 1842 | metaslab_passivate(metaslab_t *msp, uint64_t weight) |
34dc7c2f | 1843 | { |
4e21fd06 DB |
1844 | ASSERTV(uint64_t size = weight & ~METASLAB_WEIGHT_TYPE); |
1845 | ||
34dc7c2f BB |
1846 | /* |
1847 | * If size < SPA_MINBLOCKSIZE, then we will not allocate from | |
1848 | * this metaslab again. In that case, it had better be empty, | |
1849 | * or we would be leaving space on the table. | |
1850 | */ | |
94d49e8f TC |
1851 | ASSERT(!WEIGHT_IS_SPACEBASED(msp->ms_weight) || |
1852 | size >= SPA_MINBLOCKSIZE || | |
4e21fd06 DB |
1853 | range_tree_space(msp->ms_tree) == 0); |
1854 | ASSERT0(weight & METASLAB_ACTIVE_MASK); | |
1855 | ||
1856 | msp->ms_activation_weight = 0; | |
1857 | metaslab_group_sort(msp->ms_group, msp, weight); | |
34dc7c2f BB |
1858 | ASSERT((msp->ms_weight & METASLAB_ACTIVE_MASK) == 0); |
1859 | } | |
1860 | ||
4e21fd06 DB |
1861 | /* |
1862 | * Segment-based metaslabs are activated once and remain active until | |
1863 | * we either fail an allocation attempt (similar to space-based metaslabs) | |
1864 | * or have exhausted the free space in zfs_metaslab_switch_threshold | |
1865 | * buckets since the metaslab was activated. This function checks to see | |
1866 | * if we've exhaused the zfs_metaslab_switch_threshold buckets in the | |
1867 | * metaslab and passivates it proactively. This will allow us to select a | |
1868 | * metaslab with a larger contiguous region, if any, remaining within this | |
1869 | * metaslab group. If we're in sync pass > 1, then we continue using this | |
1870 | * metaslab so that we don't dirty more block and cause more sync passes. | |
1871 | */ | |
1872 | void | |
1873 | metaslab_segment_may_passivate(metaslab_t *msp) | |
1874 | { | |
1875 | spa_t *spa = msp->ms_group->mg_vd->vdev_spa; | |
4e21fd06 DB |
1876 | |
1877 | if (WEIGHT_IS_SPACEBASED(msp->ms_weight) || spa_sync_pass(spa) > 1) | |
1878 | return; | |
1879 | ||
1880 | /* | |
1881 | * Since we are in the middle of a sync pass, the most accurate | |
1882 | * information that is accessible to us is the in-core range tree | |
1883 | * histogram; calculate the new weight based on that information. | |
1884 | */ | |
1c27024e DB |
1885 | uint64_t weight = metaslab_weight_from_range_tree(msp); |
1886 | int activation_idx = WEIGHT_GET_INDEX(msp->ms_activation_weight); | |
1887 | int current_idx = WEIGHT_GET_INDEX(weight); | |
4e21fd06 DB |
1888 | |
1889 | if (current_idx <= activation_idx - zfs_metaslab_switch_threshold) | |
1890 | metaslab_passivate(msp, weight); | |
1891 | } | |
1892 | ||
93cf2076 GW |
1893 | static void |
1894 | metaslab_preload(void *arg) | |
1895 | { | |
1896 | metaslab_t *msp = arg; | |
1897 | spa_t *spa = msp->ms_group->mg_vd->vdev_spa; | |
1cd77734 | 1898 | fstrans_cookie_t cookie = spl_fstrans_mark(); |
93cf2076 | 1899 | |
080b3100 GW |
1900 | ASSERT(!MUTEX_HELD(&msp->ms_group->mg_lock)); |
1901 | ||
93cf2076 GW |
1902 | mutex_enter(&msp->ms_lock); |
1903 | metaslab_load_wait(msp); | |
1904 | if (!msp->ms_loaded) | |
1905 | (void) metaslab_load(msp); | |
4e21fd06 | 1906 | msp->ms_selected_txg = spa_syncing_txg(spa); |
93cf2076 | 1907 | mutex_exit(&msp->ms_lock); |
1cd77734 | 1908 | spl_fstrans_unmark(cookie); |
93cf2076 GW |
1909 | } |
1910 | ||
1911 | static void | |
1912 | metaslab_group_preload(metaslab_group_t *mg) | |
1913 | { | |
1914 | spa_t *spa = mg->mg_vd->vdev_spa; | |
1915 | metaslab_t *msp; | |
1916 | avl_tree_t *t = &mg->mg_metaslab_tree; | |
1917 | int m = 0; | |
1918 | ||
1919 | if (spa_shutting_down(spa) || !metaslab_preload_enabled) { | |
c5528b9b | 1920 | taskq_wait_outstanding(mg->mg_taskq, 0); |
93cf2076 GW |
1921 | return; |
1922 | } | |
93cf2076 | 1923 | |
080b3100 | 1924 | mutex_enter(&mg->mg_lock); |
93cf2076 | 1925 | /* |
080b3100 | 1926 | * Load the next potential metaslabs |
93cf2076 | 1927 | */ |
4e21fd06 | 1928 | for (msp = avl_first(t); msp != NULL; msp = AVL_NEXT(t, msp)) { |
f3a7f661 GW |
1929 | /* |
1930 | * We preload only the maximum number of metaslabs specified | |
1931 | * by metaslab_preload_limit. If a metaslab is being forced | |
1932 | * to condense then we preload it too. This will ensure | |
1933 | * that force condensing happens in the next txg. | |
1934 | */ | |
1935 | if (++m > metaslab_preload_limit && !msp->ms_condense_wanted) { | |
f3a7f661 GW |
1936 | continue; |
1937 | } | |
93cf2076 GW |
1938 | |
1939 | VERIFY(taskq_dispatch(mg->mg_taskq, metaslab_preload, | |
48d3eb40 | 1940 | msp, TQ_SLEEP) != TASKQID_INVALID); |
93cf2076 GW |
1941 | } |
1942 | mutex_exit(&mg->mg_lock); | |
1943 | } | |
1944 | ||
e51be066 | 1945 | /* |
93cf2076 GW |
1946 | * Determine if the space map's on-disk footprint is past our tolerance |
1947 | * for inefficiency. We would like to use the following criteria to make | |
1948 | * our decision: | |
e51be066 GW |
1949 | * |
1950 | * 1. The size of the space map object should not dramatically increase as a | |
93cf2076 | 1951 | * result of writing out the free space range tree. |
e51be066 GW |
1952 | * |
1953 | * 2. The minimal on-disk space map representation is zfs_condense_pct/100 | |
93cf2076 GW |
1954 | * times the size than the free space range tree representation |
1955 | * (i.e. zfs_condense_pct = 110 and in-core = 1MB, minimal = 1.1.MB). | |
e51be066 | 1956 | * |
b02fe35d AR |
1957 | * 3. The on-disk size of the space map should actually decrease. |
1958 | * | |
e51be066 GW |
1959 | * Checking the first condition is tricky since we don't want to walk |
1960 | * the entire AVL tree calculating the estimated on-disk size. Instead we | |
93cf2076 GW |
1961 | * use the size-ordered range tree in the metaslab and calculate the |
1962 | * size required to write out the largest segment in our free tree. If the | |
e51be066 GW |
1963 | * size required to represent that segment on disk is larger than the space |
1964 | * map object then we avoid condensing this map. | |
1965 | * | |
1966 | * To determine the second criterion we use a best-case estimate and assume | |
1967 | * each segment can be represented on-disk as a single 64-bit entry. We refer | |
1968 | * to this best-case estimate as the space map's minimal form. | |
b02fe35d AR |
1969 | * |
1970 | * Unfortunately, we cannot compute the on-disk size of the space map in this | |
1971 | * context because we cannot accurately compute the effects of compression, etc. | |
1972 | * Instead, we apply the heuristic described in the block comment for | |
1973 | * zfs_metaslab_condense_block_threshold - we only condense if the space used | |
1974 | * is greater than a threshold number of blocks. | |
e51be066 GW |
1975 | */ |
1976 | static boolean_t | |
1977 | metaslab_should_condense(metaslab_t *msp) | |
1978 | { | |
93cf2076 GW |
1979 | space_map_t *sm = msp->ms_sm; |
1980 | range_seg_t *rs; | |
b02fe35d AR |
1981 | uint64_t size, entries, segsz, object_size, optimal_size, record_size; |
1982 | dmu_object_info_t doi; | |
f4bae2ed | 1983 | uint64_t vdev_blocksize = 1ULL << msp->ms_group->mg_vd->vdev_ashift; |
e51be066 GW |
1984 | |
1985 | ASSERT(MUTEX_HELD(&msp->ms_lock)); | |
93cf2076 | 1986 | ASSERT(msp->ms_loaded); |
e51be066 GW |
1987 | |
1988 | /* | |
93cf2076 | 1989 | * Use the ms_size_tree range tree, which is ordered by size, to |
f3a7f661 GW |
1990 | * obtain the largest segment in the free tree. We always condense |
1991 | * metaslabs that are empty and metaslabs for which a condense | |
1992 | * request has been made. | |
e51be066 | 1993 | */ |
93cf2076 | 1994 | rs = avl_last(&msp->ms_size_tree); |
f3a7f661 | 1995 | if (rs == NULL || msp->ms_condense_wanted) |
e51be066 GW |
1996 | return (B_TRUE); |
1997 | ||
1998 | /* | |
1999 | * Calculate the number of 64-bit entries this segment would | |
2000 | * require when written to disk. If this single segment would be | |
2001 | * larger on-disk than the entire current on-disk structure, then | |
2002 | * clearly condensing will increase the on-disk structure size. | |
2003 | */ | |
93cf2076 | 2004 | size = (rs->rs_end - rs->rs_start) >> sm->sm_shift; |
e51be066 GW |
2005 | entries = size / (MIN(size, SM_RUN_MAX)); |
2006 | segsz = entries * sizeof (uint64_t); | |
2007 | ||
b02fe35d AR |
2008 | optimal_size = sizeof (uint64_t) * avl_numnodes(&msp->ms_tree->rt_root); |
2009 | object_size = space_map_length(msp->ms_sm); | |
2010 | ||
2011 | dmu_object_info_from_db(sm->sm_dbuf, &doi); | |
2012 | record_size = MAX(doi.doi_data_block_size, vdev_blocksize); | |
2013 | ||
2014 | return (segsz <= object_size && | |
2015 | object_size >= (optimal_size * zfs_condense_pct / 100) && | |
2016 | object_size > zfs_metaslab_condense_block_threshold * record_size); | |
e51be066 GW |
2017 | } |
2018 | ||
2019 | /* | |
2020 | * Condense the on-disk space map representation to its minimized form. | |
2021 | * The minimized form consists of a small number of allocations followed by | |
93cf2076 | 2022 | * the entries of the free range tree. |
e51be066 GW |
2023 | */ |
2024 | static void | |
2025 | metaslab_condense(metaslab_t *msp, uint64_t txg, dmu_tx_t *tx) | |
2026 | { | |
2027 | spa_t *spa = msp->ms_group->mg_vd->vdev_spa; | |
93cf2076 GW |
2028 | range_tree_t *condense_tree; |
2029 | space_map_t *sm = msp->ms_sm; | |
e51be066 GW |
2030 | |
2031 | ASSERT(MUTEX_HELD(&msp->ms_lock)); | |
2032 | ASSERT3U(spa_sync_pass(spa), ==, 1); | |
93cf2076 | 2033 | ASSERT(msp->ms_loaded); |
e51be066 | 2034 | |
f3a7f661 | 2035 | |
5f3d9c69 JS |
2036 | spa_dbgmsg(spa, "condensing: txg %llu, msp[%llu] %p, vdev id %llu, " |
2037 | "spa %s, smp size %llu, segments %lu, forcing condense=%s", txg, | |
2038 | msp->ms_id, msp, msp->ms_group->mg_vd->vdev_id, | |
2039 | msp->ms_group->mg_vd->vdev_spa->spa_name, | |
2040 | space_map_length(msp->ms_sm), avl_numnodes(&msp->ms_tree->rt_root), | |
f3a7f661 GW |
2041 | msp->ms_condense_wanted ? "TRUE" : "FALSE"); |
2042 | ||
2043 | msp->ms_condense_wanted = B_FALSE; | |
e51be066 GW |
2044 | |
2045 | /* | |
93cf2076 | 2046 | * Create an range tree that is 100% allocated. We remove segments |
e51be066 GW |
2047 | * that have been freed in this txg, any deferred frees that exist, |
2048 | * and any allocation in the future. Removing segments should be | |
93cf2076 GW |
2049 | * a relatively inexpensive operation since we expect these trees to |
2050 | * have a small number of nodes. | |
e51be066 | 2051 | */ |
93cf2076 GW |
2052 | condense_tree = range_tree_create(NULL, NULL, &msp->ms_lock); |
2053 | range_tree_add(condense_tree, msp->ms_start, msp->ms_size); | |
e51be066 GW |
2054 | |
2055 | /* | |
93cf2076 | 2056 | * Remove what's been freed in this txg from the condense_tree. |
e51be066 | 2057 | * Since we're in sync_pass 1, we know that all the frees from |
258553d3 | 2058 | * this txg are in the freeingtree. |
e51be066 | 2059 | */ |
258553d3 | 2060 | range_tree_walk(msp->ms_freeingtree, range_tree_remove, condense_tree); |
e51be066 | 2061 | |
1c27024e | 2062 | for (int t = 0; t < TXG_DEFER_SIZE; t++) { |
93cf2076 GW |
2063 | range_tree_walk(msp->ms_defertree[t], |
2064 | range_tree_remove, condense_tree); | |
2065 | } | |
e51be066 | 2066 | |
1c27024e | 2067 | for (int t = 1; t < TXG_CONCURRENT_STATES; t++) { |
93cf2076 GW |
2068 | range_tree_walk(msp->ms_alloctree[(txg + t) & TXG_MASK], |
2069 | range_tree_remove, condense_tree); | |
2070 | } | |
e51be066 GW |
2071 | |
2072 | /* | |
2073 | * We're about to drop the metaslab's lock thus allowing | |
2074 | * other consumers to change it's content. Set the | |
93cf2076 | 2075 | * metaslab's ms_condensing flag to ensure that |
e51be066 GW |
2076 | * allocations on this metaslab do not occur while we're |
2077 | * in the middle of committing it to disk. This is only critical | |
93cf2076 | 2078 | * for the ms_tree as all other range trees use per txg |
e51be066 GW |
2079 | * views of their content. |
2080 | */ | |
93cf2076 | 2081 | msp->ms_condensing = B_TRUE; |
e51be066 GW |
2082 | |
2083 | mutex_exit(&msp->ms_lock); | |
93cf2076 | 2084 | space_map_truncate(sm, tx); |
e51be066 GW |
2085 | mutex_enter(&msp->ms_lock); |
2086 | ||
2087 | /* | |
4e21fd06 | 2088 | * While we would ideally like to create a space map representation |
e51be066 | 2089 | * that consists only of allocation records, doing so can be |
93cf2076 | 2090 | * prohibitively expensive because the in-core free tree can be |
e51be066 | 2091 | * large, and therefore computationally expensive to subtract |
93cf2076 GW |
2092 | * from the condense_tree. Instead we sync out two trees, a cheap |
2093 | * allocation only tree followed by the in-core free tree. While not | |
e51be066 GW |
2094 | * optimal, this is typically close to optimal, and much cheaper to |
2095 | * compute. | |
2096 | */ | |
93cf2076 GW |
2097 | space_map_write(sm, condense_tree, SM_ALLOC, tx); |
2098 | range_tree_vacate(condense_tree, NULL, NULL); | |
2099 | range_tree_destroy(condense_tree); | |
e51be066 | 2100 | |
93cf2076 GW |
2101 | space_map_write(sm, msp->ms_tree, SM_FREE, tx); |
2102 | msp->ms_condensing = B_FALSE; | |
e51be066 GW |
2103 | } |
2104 | ||
34dc7c2f BB |
2105 | /* |
2106 | * Write a metaslab to disk in the context of the specified transaction group. | |
2107 | */ | |
2108 | void | |
2109 | metaslab_sync(metaslab_t *msp, uint64_t txg) | |
2110 | { | |
93cf2076 GW |
2111 | metaslab_group_t *mg = msp->ms_group; |
2112 | vdev_t *vd = mg->mg_vd; | |
34dc7c2f | 2113 | spa_t *spa = vd->vdev_spa; |
428870ff | 2114 | objset_t *mos = spa_meta_objset(spa); |
93cf2076 | 2115 | range_tree_t *alloctree = msp->ms_alloctree[txg & TXG_MASK]; |
34dc7c2f | 2116 | dmu_tx_t *tx; |
93cf2076 | 2117 | uint64_t object = space_map_object(msp->ms_sm); |
34dc7c2f | 2118 | |
428870ff BB |
2119 | ASSERT(!vd->vdev_ishole); |
2120 | ||
e51be066 GW |
2121 | /* |
2122 | * This metaslab has just been added so there's no work to do now. | |
2123 | */ | |
258553d3 | 2124 | if (msp->ms_freeingtree == NULL) { |
93cf2076 | 2125 | ASSERT3P(alloctree, ==, NULL); |
e51be066 GW |
2126 | return; |
2127 | } | |
2128 | ||
93cf2076 | 2129 | ASSERT3P(alloctree, !=, NULL); |
258553d3 TC |
2130 | ASSERT3P(msp->ms_freeingtree, !=, NULL); |
2131 | ASSERT3P(msp->ms_freedtree, !=, NULL); | |
e51be066 | 2132 | |
f3a7f661 GW |
2133 | /* |
2134 | * Normally, we don't want to process a metaslab if there | |
2135 | * are no allocations or frees to perform. However, if the metaslab | |
3b7f360c GW |
2136 | * is being forced to condense and it's loaded, we need to let it |
2137 | * through. | |
f3a7f661 | 2138 | */ |
93cf2076 | 2139 | if (range_tree_space(alloctree) == 0 && |
258553d3 | 2140 | range_tree_space(msp->ms_freeingtree) == 0 && |
3b7f360c | 2141 | !(msp->ms_loaded && msp->ms_condense_wanted)) |
428870ff | 2142 | return; |
34dc7c2f | 2143 | |
3b7f360c GW |
2144 | |
2145 | VERIFY(txg <= spa_final_dirty_txg(spa)); | |
2146 | ||
34dc7c2f BB |
2147 | /* |
2148 | * The only state that can actually be changing concurrently with | |
93cf2076 | 2149 | * metaslab_sync() is the metaslab's ms_tree. No other thread can |
258553d3 | 2150 | * be modifying this txg's alloctree, freeingtree, freedtree, or |
93cf2076 | 2151 | * space_map_phys_t. Therefore, we only hold ms_lock to satify |
4e21fd06 | 2152 | * space map ASSERTs. We drop it whenever we call into the DMU, |
93cf2076 GW |
2153 | * because the DMU can call down to us (e.g. via zio_free()) at |
2154 | * any time. | |
34dc7c2f | 2155 | */ |
428870ff BB |
2156 | |
2157 | tx = dmu_tx_create_assigned(spa_get_dsl(spa), txg); | |
34dc7c2f | 2158 | |
93cf2076 GW |
2159 | if (msp->ms_sm == NULL) { |
2160 | uint64_t new_object; | |
2161 | ||
2162 | new_object = space_map_alloc(mos, tx); | |
2163 | VERIFY3U(new_object, !=, 0); | |
2164 | ||
2165 | VERIFY0(space_map_open(&msp->ms_sm, mos, new_object, | |
2166 | msp->ms_start, msp->ms_size, vd->vdev_ashift, | |
2167 | &msp->ms_lock)); | |
2168 | ASSERT(msp->ms_sm != NULL); | |
34dc7c2f BB |
2169 | } |
2170 | ||
428870ff BB |
2171 | mutex_enter(&msp->ms_lock); |
2172 | ||
96358617 | 2173 | /* |
4e21fd06 DB |
2174 | * Note: metaslab_condense() clears the space map's histogram. |
2175 | * Therefore we must verify and remove this histogram before | |
96358617 MA |
2176 | * condensing. |
2177 | */ | |
2178 | metaslab_group_histogram_verify(mg); | |
2179 | metaslab_class_histogram_verify(mg->mg_class); | |
2180 | metaslab_group_histogram_remove(mg, msp); | |
2181 | ||
93cf2076 | 2182 | if (msp->ms_loaded && spa_sync_pass(spa) == 1 && |
e51be066 GW |
2183 | metaslab_should_condense(msp)) { |
2184 | metaslab_condense(msp, txg, tx); | |
2185 | } else { | |
93cf2076 | 2186 | space_map_write(msp->ms_sm, alloctree, SM_ALLOC, tx); |
258553d3 | 2187 | space_map_write(msp->ms_sm, msp->ms_freeingtree, SM_FREE, tx); |
e51be066 | 2188 | } |
428870ff | 2189 | |
93cf2076 GW |
2190 | if (msp->ms_loaded) { |
2191 | /* | |
2192 | * When the space map is loaded, we have an accruate | |
2193 | * histogram in the range tree. This gives us an opportunity | |
2194 | * to bring the space map's histogram up-to-date so we clear | |
2195 | * it first before updating it. | |
2196 | */ | |
2197 | space_map_histogram_clear(msp->ms_sm); | |
2198 | space_map_histogram_add(msp->ms_sm, msp->ms_tree, tx); | |
4e21fd06 DB |
2199 | |
2200 | /* | |
2201 | * Since we've cleared the histogram we need to add back | |
2202 | * any free space that has already been processed, plus | |
2203 | * any deferred space. This allows the on-disk histogram | |
2204 | * to accurately reflect all free space even if some space | |
2205 | * is not yet available for allocation (i.e. deferred). | |
2206 | */ | |
258553d3 | 2207 | space_map_histogram_add(msp->ms_sm, msp->ms_freedtree, tx); |
4e21fd06 | 2208 | |
93cf2076 | 2209 | /* |
4e21fd06 DB |
2210 | * Add back any deferred free space that has not been |
2211 | * added back into the in-core free tree yet. This will | |
2212 | * ensure that we don't end up with a space map histogram | |
2213 | * that is completely empty unless the metaslab is fully | |
2214 | * allocated. | |
93cf2076 | 2215 | */ |
1c27024e | 2216 | for (int t = 0; t < TXG_DEFER_SIZE; t++) { |
4e21fd06 DB |
2217 | space_map_histogram_add(msp->ms_sm, |
2218 | msp->ms_defertree[t], tx); | |
2219 | } | |
93cf2076 | 2220 | } |
4e21fd06 DB |
2221 | |
2222 | /* | |
2223 | * Always add the free space from this sync pass to the space | |
2224 | * map histogram. We want to make sure that the on-disk histogram | |
2225 | * accounts for all free space. If the space map is not loaded, | |
2226 | * then we will lose some accuracy but will correct it the next | |
2227 | * time we load the space map. | |
2228 | */ | |
258553d3 | 2229 | space_map_histogram_add(msp->ms_sm, msp->ms_freeingtree, tx); |
4e21fd06 | 2230 | |
f3a7f661 GW |
2231 | metaslab_group_histogram_add(mg, msp); |
2232 | metaslab_group_histogram_verify(mg); | |
2233 | metaslab_class_histogram_verify(mg->mg_class); | |
34dc7c2f | 2234 | |
e51be066 | 2235 | /* |
93cf2076 | 2236 | * For sync pass 1, we avoid traversing this txg's free range tree |
258553d3 TC |
2237 | * and instead will just swap the pointers for freeingtree and |
2238 | * freedtree. We can safely do this since the freed_tree is | |
e51be066 GW |
2239 | * guaranteed to be empty on the initial pass. |
2240 | */ | |
2241 | if (spa_sync_pass(spa) == 1) { | |
258553d3 | 2242 | range_tree_swap(&msp->ms_freeingtree, &msp->ms_freedtree); |
e51be066 | 2243 | } else { |
258553d3 TC |
2244 | range_tree_vacate(msp->ms_freeingtree, |
2245 | range_tree_add, msp->ms_freedtree); | |
34dc7c2f | 2246 | } |
f3a7f661 | 2247 | range_tree_vacate(alloctree, NULL, NULL); |
34dc7c2f | 2248 | |
93cf2076 | 2249 | ASSERT0(range_tree_space(msp->ms_alloctree[txg & TXG_MASK])); |
4e21fd06 | 2250 | ASSERT0(range_tree_space(msp->ms_alloctree[TXG_CLEAN(txg) & TXG_MASK])); |
258553d3 | 2251 | ASSERT0(range_tree_space(msp->ms_freeingtree)); |
34dc7c2f BB |
2252 | |
2253 | mutex_exit(&msp->ms_lock); | |
2254 | ||
93cf2076 GW |
2255 | if (object != space_map_object(msp->ms_sm)) { |
2256 | object = space_map_object(msp->ms_sm); | |
2257 | dmu_write(mos, vd->vdev_ms_array, sizeof (uint64_t) * | |
2258 | msp->ms_id, sizeof (uint64_t), &object, tx); | |
2259 | } | |
34dc7c2f BB |
2260 | dmu_tx_commit(tx); |
2261 | } | |
2262 | ||
2263 | /* | |
2264 | * Called after a transaction group has completely synced to mark | |
2265 | * all of the metaslab's free space as usable. | |
2266 | */ | |
2267 | void | |
2268 | metaslab_sync_done(metaslab_t *msp, uint64_t txg) | |
2269 | { | |
34dc7c2f BB |
2270 | metaslab_group_t *mg = msp->ms_group; |
2271 | vdev_t *vd = mg->mg_vd; | |
4e21fd06 | 2272 | spa_t *spa = vd->vdev_spa; |
93cf2076 | 2273 | range_tree_t **defer_tree; |
428870ff | 2274 | int64_t alloc_delta, defer_delta; |
4e21fd06 | 2275 | boolean_t defer_allowed = B_TRUE; |
428870ff BB |
2276 | |
2277 | ASSERT(!vd->vdev_ishole); | |
34dc7c2f BB |
2278 | |
2279 | mutex_enter(&msp->ms_lock); | |
2280 | ||
2281 | /* | |
2282 | * If this metaslab is just becoming available, initialize its | |
258553d3 | 2283 | * range trees and add its capacity to the vdev. |
34dc7c2f | 2284 | */ |
258553d3 | 2285 | if (msp->ms_freedtree == NULL) { |
1c27024e | 2286 | for (int t = 0; t < TXG_SIZE; t++) { |
93cf2076 | 2287 | ASSERT(msp->ms_alloctree[t] == NULL); |
93cf2076 GW |
2288 | |
2289 | msp->ms_alloctree[t] = range_tree_create(NULL, msp, | |
2290 | &msp->ms_lock); | |
34dc7c2f | 2291 | } |
428870ff | 2292 | |
258553d3 TC |
2293 | ASSERT3P(msp->ms_freeingtree, ==, NULL); |
2294 | msp->ms_freeingtree = range_tree_create(NULL, msp, | |
2295 | &msp->ms_lock); | |
2296 | ||
2297 | ASSERT3P(msp->ms_freedtree, ==, NULL); | |
2298 | msp->ms_freedtree = range_tree_create(NULL, msp, | |
2299 | &msp->ms_lock); | |
2300 | ||
1c27024e | 2301 | for (int t = 0; t < TXG_DEFER_SIZE; t++) { |
93cf2076 | 2302 | ASSERT(msp->ms_defertree[t] == NULL); |
e51be066 | 2303 | |
93cf2076 GW |
2304 | msp->ms_defertree[t] = range_tree_create(NULL, msp, |
2305 | &msp->ms_lock); | |
2306 | } | |
428870ff | 2307 | |
93cf2076 | 2308 | vdev_space_update(vd, 0, 0, msp->ms_size); |
34dc7c2f BB |
2309 | } |
2310 | ||
93cf2076 GW |
2311 | defer_tree = &msp->ms_defertree[txg % TXG_DEFER_SIZE]; |
2312 | ||
1c27024e | 2313 | uint64_t free_space = metaslab_class_get_space(spa_normal_class(spa)) - |
4e21fd06 DB |
2314 | metaslab_class_get_alloc(spa_normal_class(spa)); |
2315 | if (free_space <= spa_get_slop_space(spa)) { | |
2316 | defer_allowed = B_FALSE; | |
2317 | } | |
2318 | ||
2319 | defer_delta = 0; | |
93cf2076 | 2320 | alloc_delta = space_map_alloc_delta(msp->ms_sm); |
4e21fd06 | 2321 | if (defer_allowed) { |
258553d3 | 2322 | defer_delta = range_tree_space(msp->ms_freedtree) - |
4e21fd06 DB |
2323 | range_tree_space(*defer_tree); |
2324 | } else { | |
2325 | defer_delta -= range_tree_space(*defer_tree); | |
2326 | } | |
428870ff BB |
2327 | |
2328 | vdev_space_update(vd, alloc_delta + defer_delta, defer_delta, 0); | |
34dc7c2f | 2329 | |
34dc7c2f | 2330 | /* |
93cf2076 | 2331 | * If there's a metaslab_load() in progress, wait for it to complete |
34dc7c2f | 2332 | * so that we have a consistent view of the in-core space map. |
34dc7c2f | 2333 | */ |
93cf2076 | 2334 | metaslab_load_wait(msp); |
c2e42f9d GW |
2335 | |
2336 | /* | |
93cf2076 GW |
2337 | * Move the frees from the defer_tree back to the free |
2338 | * range tree (if it's loaded). Swap the freed_tree and the | |
2339 | * defer_tree -- this is safe to do because we've just emptied out | |
2340 | * the defer_tree. | |
c2e42f9d | 2341 | */ |
93cf2076 GW |
2342 | range_tree_vacate(*defer_tree, |
2343 | msp->ms_loaded ? range_tree_add : NULL, msp->ms_tree); | |
4e21fd06 | 2344 | if (defer_allowed) { |
258553d3 | 2345 | range_tree_swap(&msp->ms_freedtree, defer_tree); |
4e21fd06 | 2346 | } else { |
258553d3 | 2347 | range_tree_vacate(msp->ms_freedtree, |
4e21fd06 DB |
2348 | msp->ms_loaded ? range_tree_add : NULL, msp->ms_tree); |
2349 | } | |
34dc7c2f | 2350 | |
93cf2076 | 2351 | space_map_update(msp->ms_sm); |
34dc7c2f | 2352 | |
428870ff BB |
2353 | msp->ms_deferspace += defer_delta; |
2354 | ASSERT3S(msp->ms_deferspace, >=, 0); | |
93cf2076 | 2355 | ASSERT3S(msp->ms_deferspace, <=, msp->ms_size); |
428870ff BB |
2356 | if (msp->ms_deferspace != 0) { |
2357 | /* | |
2358 | * Keep syncing this metaslab until all deferred frees | |
2359 | * are back in circulation. | |
2360 | */ | |
2361 | vdev_dirty(vd, VDD_METASLAB, msp, txg + 1); | |
2362 | } | |
2363 | ||
4e21fd06 DB |
2364 | /* |
2365 | * Calculate the new weights before unloading any metaslabs. | |
2366 | * This will give us the most accurate weighting. | |
2367 | */ | |
2368 | metaslab_group_sort(mg, msp, metaslab_weight(msp)); | |
2369 | ||
2370 | /* | |
2371 | * If the metaslab is loaded and we've not tried to load or allocate | |
2372 | * from it in 'metaslab_unload_delay' txgs, then unload it. | |
2373 | */ | |
2374 | if (msp->ms_loaded && | |
2375 | msp->ms_selected_txg + metaslab_unload_delay < txg) { | |
2376 | ||
1c27024e | 2377 | for (int t = 1; t < TXG_CONCURRENT_STATES; t++) { |
93cf2076 GW |
2378 | VERIFY0(range_tree_space( |
2379 | msp->ms_alloctree[(txg + t) & TXG_MASK])); | |
2380 | } | |
34dc7c2f | 2381 | |
93cf2076 GW |
2382 | if (!metaslab_debug_unload) |
2383 | metaslab_unload(msp); | |
34dc7c2f BB |
2384 | } |
2385 | ||
34dc7c2f BB |
2386 | mutex_exit(&msp->ms_lock); |
2387 | } | |
2388 | ||
428870ff BB |
2389 | void |
2390 | metaslab_sync_reassess(metaslab_group_t *mg) | |
2391 | { | |
1be627f5 | 2392 | metaslab_group_alloc_update(mg); |
f3a7f661 | 2393 | mg->mg_fragmentation = metaslab_group_fragmentation(mg); |
6d974228 | 2394 | |
428870ff | 2395 | /* |
93cf2076 | 2396 | * Preload the next potential metaslabs |
428870ff | 2397 | */ |
93cf2076 | 2398 | metaslab_group_preload(mg); |
428870ff BB |
2399 | } |
2400 | ||
34dc7c2f BB |
2401 | static uint64_t |
2402 | metaslab_distance(metaslab_t *msp, dva_t *dva) | |
2403 | { | |
2404 | uint64_t ms_shift = msp->ms_group->mg_vd->vdev_ms_shift; | |
2405 | uint64_t offset = DVA_GET_OFFSET(dva) >> ms_shift; | |
93cf2076 | 2406 | uint64_t start = msp->ms_id; |
34dc7c2f BB |
2407 | |
2408 | if (msp->ms_group->mg_vd->vdev_id != DVA_GET_VDEV(dva)) | |
2409 | return (1ULL << 63); | |
2410 | ||
2411 | if (offset < start) | |
2412 | return ((start - offset) << ms_shift); | |
2413 | if (offset > start) | |
2414 | return ((offset - start) << ms_shift); | |
2415 | return (0); | |
2416 | } | |
2417 | ||
4e21fd06 DB |
2418 | /* |
2419 | * ========================================================================== | |
2420 | * Metaslab allocation tracing facility | |
2421 | * ========================================================================== | |
2422 | */ | |
2423 | #ifdef _METASLAB_TRACING | |
2424 | kstat_t *metaslab_trace_ksp; | |
2425 | kstat_named_t metaslab_trace_over_limit; | |
2426 | ||
2427 | void | |
2428 | metaslab_alloc_trace_init(void) | |
2429 | { | |
2430 | ASSERT(metaslab_alloc_trace_cache == NULL); | |
2431 | metaslab_alloc_trace_cache = kmem_cache_create( | |
2432 | "metaslab_alloc_trace_cache", sizeof (metaslab_alloc_trace_t), | |
2433 | 0, NULL, NULL, NULL, NULL, NULL, 0); | |
2434 | metaslab_trace_ksp = kstat_create("zfs", 0, "metaslab_trace_stats", | |
2435 | "misc", KSTAT_TYPE_NAMED, 1, KSTAT_FLAG_VIRTUAL); | |
2436 | if (metaslab_trace_ksp != NULL) { | |
2437 | metaslab_trace_ksp->ks_data = &metaslab_trace_over_limit; | |
2438 | kstat_named_init(&metaslab_trace_over_limit, | |
2439 | "metaslab_trace_over_limit", KSTAT_DATA_UINT64); | |
2440 | kstat_install(metaslab_trace_ksp); | |
2441 | } | |
2442 | } | |
2443 | ||
2444 | void | |
2445 | metaslab_alloc_trace_fini(void) | |
2446 | { | |
2447 | if (metaslab_trace_ksp != NULL) { | |
2448 | kstat_delete(metaslab_trace_ksp); | |
2449 | metaslab_trace_ksp = NULL; | |
2450 | } | |
2451 | kmem_cache_destroy(metaslab_alloc_trace_cache); | |
2452 | metaslab_alloc_trace_cache = NULL; | |
2453 | } | |
2454 | ||
2455 | /* | |
2456 | * Add an allocation trace element to the allocation tracing list. | |
2457 | */ | |
2458 | static void | |
2459 | metaslab_trace_add(zio_alloc_list_t *zal, metaslab_group_t *mg, | |
2460 | metaslab_t *msp, uint64_t psize, uint32_t dva_id, uint64_t offset) | |
2461 | { | |
2462 | metaslab_alloc_trace_t *mat; | |
2463 | ||
2464 | if (!metaslab_trace_enabled) | |
2465 | return; | |
2466 | ||
2467 | /* | |
2468 | * When the tracing list reaches its maximum we remove | |
2469 | * the second element in the list before adding a new one. | |
2470 | * By removing the second element we preserve the original | |
2471 | * entry as a clue to what allocations steps have already been | |
2472 | * performed. | |
2473 | */ | |
2474 | if (zal->zal_size == metaslab_trace_max_entries) { | |
2475 | metaslab_alloc_trace_t *mat_next; | |
2476 | #ifdef DEBUG | |
2477 | panic("too many entries in allocation list"); | |
2478 | #endif | |
2479 | atomic_inc_64(&metaslab_trace_over_limit.value.ui64); | |
2480 | zal->zal_size--; | |
2481 | mat_next = list_next(&zal->zal_list, list_head(&zal->zal_list)); | |
2482 | list_remove(&zal->zal_list, mat_next); | |
2483 | kmem_cache_free(metaslab_alloc_trace_cache, mat_next); | |
2484 | } | |
2485 | ||
2486 | mat = kmem_cache_alloc(metaslab_alloc_trace_cache, KM_SLEEP); | |
2487 | list_link_init(&mat->mat_list_node); | |
2488 | mat->mat_mg = mg; | |
2489 | mat->mat_msp = msp; | |
2490 | mat->mat_size = psize; | |
2491 | mat->mat_dva_id = dva_id; | |
2492 | mat->mat_offset = offset; | |
2493 | mat->mat_weight = 0; | |
2494 | ||
2495 | if (msp != NULL) | |
2496 | mat->mat_weight = msp->ms_weight; | |
2497 | ||
2498 | /* | |
2499 | * The list is part of the zio so locking is not required. Only | |
2500 | * a single thread will perform allocations for a given zio. | |
2501 | */ | |
2502 | list_insert_tail(&zal->zal_list, mat); | |
2503 | zal->zal_size++; | |
2504 | ||
2505 | ASSERT3U(zal->zal_size, <=, metaslab_trace_max_entries); | |
2506 | } | |
2507 | ||
2508 | void | |
2509 | metaslab_trace_init(zio_alloc_list_t *zal) | |
2510 | { | |
2511 | list_create(&zal->zal_list, sizeof (metaslab_alloc_trace_t), | |
2512 | offsetof(metaslab_alloc_trace_t, mat_list_node)); | |
2513 | zal->zal_size = 0; | |
2514 | } | |
2515 | ||
2516 | void | |
2517 | metaslab_trace_fini(zio_alloc_list_t *zal) | |
2518 | { | |
2519 | metaslab_alloc_trace_t *mat; | |
2520 | ||
2521 | while ((mat = list_remove_head(&zal->zal_list)) != NULL) | |
2522 | kmem_cache_free(metaslab_alloc_trace_cache, mat); | |
2523 | list_destroy(&zal->zal_list); | |
2524 | zal->zal_size = 0; | |
2525 | } | |
2526 | #else | |
2527 | ||
2528 | #define metaslab_trace_add(zal, mg, msp, psize, id, off) | |
2529 | ||
2530 | void | |
2531 | metaslab_alloc_trace_init(void) | |
2532 | { | |
2533 | } | |
2534 | ||
2535 | void | |
2536 | metaslab_alloc_trace_fini(void) | |
2537 | { | |
2538 | } | |
2539 | ||
2540 | void | |
2541 | metaslab_trace_init(zio_alloc_list_t *zal) | |
2542 | { | |
2543 | } | |
2544 | ||
2545 | void | |
2546 | metaslab_trace_fini(zio_alloc_list_t *zal) | |
2547 | { | |
2548 | } | |
2549 | ||
2550 | #endif /* _METASLAB_TRACING */ | |
2551 | ||
3dfb57a3 DB |
2552 | /* |
2553 | * ========================================================================== | |
2554 | * Metaslab block operations | |
2555 | * ========================================================================== | |
2556 | */ | |
2557 | ||
2558 | static void | |
2559 | metaslab_group_alloc_increment(spa_t *spa, uint64_t vdev, void *tag, int flags) | |
2560 | { | |
3dfb57a3 DB |
2561 | if (!(flags & METASLAB_ASYNC_ALLOC) || |
2562 | flags & METASLAB_DONT_THROTTLE) | |
2563 | return; | |
2564 | ||
1c27024e | 2565 | metaslab_group_t *mg = vdev_lookup_top(spa, vdev)->vdev_mg; |
3dfb57a3 DB |
2566 | if (!mg->mg_class->mc_alloc_throttle_enabled) |
2567 | return; | |
2568 | ||
2569 | (void) refcount_add(&mg->mg_alloc_queue_depth, tag); | |
2570 | } | |
2571 | ||
2572 | void | |
2573 | metaslab_group_alloc_decrement(spa_t *spa, uint64_t vdev, void *tag, int flags) | |
2574 | { | |
3dfb57a3 DB |
2575 | if (!(flags & METASLAB_ASYNC_ALLOC) || |
2576 | flags & METASLAB_DONT_THROTTLE) | |
2577 | return; | |
2578 | ||
1c27024e | 2579 | metaslab_group_t *mg = vdev_lookup_top(spa, vdev)->vdev_mg; |
3dfb57a3 DB |
2580 | if (!mg->mg_class->mc_alloc_throttle_enabled) |
2581 | return; | |
2582 | ||
2583 | (void) refcount_remove(&mg->mg_alloc_queue_depth, tag); | |
2584 | } | |
2585 | ||
2586 | void | |
2587 | metaslab_group_alloc_verify(spa_t *spa, const blkptr_t *bp, void *tag) | |
2588 | { | |
2589 | #ifdef ZFS_DEBUG | |
2590 | const dva_t *dva = bp->blk_dva; | |
2591 | int ndvas = BP_GET_NDVAS(bp); | |
3dfb57a3 | 2592 | |
1c27024e | 2593 | for (int d = 0; d < ndvas; d++) { |
3dfb57a3 DB |
2594 | uint64_t vdev = DVA_GET_VDEV(&dva[d]); |
2595 | metaslab_group_t *mg = vdev_lookup_top(spa, vdev)->vdev_mg; | |
2596 | VERIFY(refcount_not_held(&mg->mg_alloc_queue_depth, tag)); | |
2597 | } | |
2598 | #endif | |
2599 | } | |
2600 | ||
34dc7c2f | 2601 | static uint64_t |
4e21fd06 DB |
2602 | metaslab_block_alloc(metaslab_t *msp, uint64_t size, uint64_t txg) |
2603 | { | |
2604 | uint64_t start; | |
2605 | range_tree_t *rt = msp->ms_tree; | |
2606 | metaslab_class_t *mc = msp->ms_group->mg_class; | |
2607 | ||
2608 | VERIFY(!msp->ms_condensing); | |
2609 | ||
2610 | start = mc->mc_ops->msop_alloc(msp, size); | |
2611 | if (start != -1ULL) { | |
2612 | metaslab_group_t *mg = msp->ms_group; | |
2613 | vdev_t *vd = mg->mg_vd; | |
2614 | ||
2615 | VERIFY0(P2PHASE(start, 1ULL << vd->vdev_ashift)); | |
2616 | VERIFY0(P2PHASE(size, 1ULL << vd->vdev_ashift)); | |
2617 | VERIFY3U(range_tree_space(rt) - size, <=, msp->ms_size); | |
2618 | range_tree_remove(rt, start, size); | |
2619 | ||
2620 | if (range_tree_space(msp->ms_alloctree[txg & TXG_MASK]) == 0) | |
2621 | vdev_dirty(mg->mg_vd, VDD_METASLAB, msp, txg); | |
2622 | ||
2623 | range_tree_add(msp->ms_alloctree[txg & TXG_MASK], start, size); | |
2624 | ||
2625 | /* Track the last successful allocation */ | |
2626 | msp->ms_alloc_txg = txg; | |
2627 | metaslab_verify_space(msp, txg); | |
2628 | } | |
2629 | ||
2630 | /* | |
2631 | * Now that we've attempted the allocation we need to update the | |
2632 | * metaslab's maximum block size since it may have changed. | |
2633 | */ | |
2634 | msp->ms_max_size = metaslab_block_maxsize(msp); | |
2635 | return (start); | |
2636 | } | |
2637 | ||
2638 | static uint64_t | |
2639 | metaslab_group_alloc_normal(metaslab_group_t *mg, zio_alloc_list_t *zal, | |
2640 | uint64_t asize, uint64_t txg, uint64_t min_distance, dva_t *dva, int d) | |
34dc7c2f BB |
2641 | { |
2642 | metaslab_t *msp = NULL; | |
2643 | uint64_t offset = -1ULL; | |
34dc7c2f BB |
2644 | uint64_t activation_weight; |
2645 | uint64_t target_distance; | |
2646 | int i; | |
2647 | ||
2648 | activation_weight = METASLAB_WEIGHT_PRIMARY; | |
9babb374 BB |
2649 | for (i = 0; i < d; i++) { |
2650 | if (DVA_GET_VDEV(&dva[i]) == mg->mg_vd->vdev_id) { | |
34dc7c2f | 2651 | activation_weight = METASLAB_WEIGHT_SECONDARY; |
9babb374 BB |
2652 | break; |
2653 | } | |
2654 | } | |
34dc7c2f | 2655 | |
1c27024e | 2656 | metaslab_t *search = kmem_alloc(sizeof (*search), KM_SLEEP); |
4e21fd06 DB |
2657 | search->ms_weight = UINT64_MAX; |
2658 | search->ms_start = 0; | |
34dc7c2f | 2659 | for (;;) { |
9babb374 | 2660 | boolean_t was_active; |
4e21fd06 DB |
2661 | avl_tree_t *t = &mg->mg_metaslab_tree; |
2662 | avl_index_t idx; | |
9babb374 | 2663 | |
34dc7c2f | 2664 | mutex_enter(&mg->mg_lock); |
4e21fd06 DB |
2665 | |
2666 | /* | |
2667 | * Find the metaslab with the highest weight that is less | |
2668 | * than what we've already tried. In the common case, this | |
2669 | * means that we will examine each metaslab at most once. | |
2670 | * Note that concurrent callers could reorder metaslabs | |
2671 | * by activation/passivation once we have dropped the mg_lock. | |
2672 | * If a metaslab is activated by another thread, and we fail | |
2673 | * to allocate from the metaslab we have selected, we may | |
2674 | * not try the newly-activated metaslab, and instead activate | |
2675 | * another metaslab. This is not optimal, but generally | |
2676 | * does not cause any problems (a possible exception being | |
2677 | * if every metaslab is completely full except for the | |
2678 | * the newly-activated metaslab which we fail to examine). | |
2679 | */ | |
2680 | msp = avl_find(t, search, &idx); | |
2681 | if (msp == NULL) | |
2682 | msp = avl_nearest(t, idx, AVL_AFTER); | |
2683 | for (; msp != NULL; msp = AVL_NEXT(t, msp)) { | |
2684 | ||
2685 | if (!metaslab_should_allocate(msp, asize)) { | |
2686 | metaslab_trace_add(zal, mg, msp, asize, d, | |
2687 | TRACE_TOO_SMALL); | |
2688 | continue; | |
34dc7c2f | 2689 | } |
7a614407 GW |
2690 | |
2691 | /* | |
2692 | * If the selected metaslab is condensing, skip it. | |
2693 | */ | |
93cf2076 | 2694 | if (msp->ms_condensing) |
7a614407 GW |
2695 | continue; |
2696 | ||
9babb374 | 2697 | was_active = msp->ms_weight & METASLAB_ACTIVE_MASK; |
34dc7c2f BB |
2698 | if (activation_weight == METASLAB_WEIGHT_PRIMARY) |
2699 | break; | |
2700 | ||
2701 | target_distance = min_distance + | |
93cf2076 GW |
2702 | (space_map_allocated(msp->ms_sm) != 0 ? 0 : |
2703 | min_distance >> 1); | |
34dc7c2f | 2704 | |
4e21fd06 | 2705 | for (i = 0; i < d; i++) { |
34dc7c2f BB |
2706 | if (metaslab_distance(msp, &dva[i]) < |
2707 | target_distance) | |
2708 | break; | |
4e21fd06 | 2709 | } |
34dc7c2f BB |
2710 | if (i == d) |
2711 | break; | |
2712 | } | |
2713 | mutex_exit(&mg->mg_lock); | |
4e21fd06 DB |
2714 | if (msp == NULL) { |
2715 | kmem_free(search, sizeof (*search)); | |
34dc7c2f | 2716 | return (-1ULL); |
4e21fd06 DB |
2717 | } |
2718 | search->ms_weight = msp->ms_weight; | |
2719 | search->ms_start = msp->ms_start + 1; | |
34dc7c2f | 2720 | |
ac72fac3 GW |
2721 | mutex_enter(&msp->ms_lock); |
2722 | ||
34dc7c2f BB |
2723 | /* |
2724 | * Ensure that the metaslab we have selected is still | |
2725 | * capable of handling our request. It's possible that | |
2726 | * another thread may have changed the weight while we | |
4e21fd06 DB |
2727 | * were blocked on the metaslab lock. We check the |
2728 | * active status first to see if we need to reselect | |
2729 | * a new metaslab. | |
34dc7c2f | 2730 | */ |
4e21fd06 | 2731 | if (was_active && !(msp->ms_weight & METASLAB_ACTIVE_MASK)) { |
34dc7c2f BB |
2732 | mutex_exit(&msp->ms_lock); |
2733 | continue; | |
2734 | } | |
2735 | ||
2736 | if ((msp->ms_weight & METASLAB_WEIGHT_SECONDARY) && | |
2737 | activation_weight == METASLAB_WEIGHT_PRIMARY) { | |
2738 | metaslab_passivate(msp, | |
2739 | msp->ms_weight & ~METASLAB_ACTIVE_MASK); | |
2740 | mutex_exit(&msp->ms_lock); | |
2741 | continue; | |
2742 | } | |
2743 | ||
6d974228 | 2744 | if (metaslab_activate(msp, activation_weight) != 0) { |
34dc7c2f BB |
2745 | mutex_exit(&msp->ms_lock); |
2746 | continue; | |
2747 | } | |
4e21fd06 DB |
2748 | msp->ms_selected_txg = txg; |
2749 | ||
2750 | /* | |
2751 | * Now that we have the lock, recheck to see if we should | |
2752 | * continue to use this metaslab for this allocation. The | |
2753 | * the metaslab is now loaded so metaslab_should_allocate() can | |
2754 | * accurately determine if the allocation attempt should | |
2755 | * proceed. | |
2756 | */ | |
2757 | if (!metaslab_should_allocate(msp, asize)) { | |
2758 | /* Passivate this metaslab and select a new one. */ | |
2759 | metaslab_trace_add(zal, mg, msp, asize, d, | |
2760 | TRACE_TOO_SMALL); | |
2761 | goto next; | |
2762 | } | |
2763 | ||
34dc7c2f | 2764 | |
7a614407 GW |
2765 | /* |
2766 | * If this metaslab is currently condensing then pick again as | |
2767 | * we can't manipulate this metaslab until it's committed | |
2768 | * to disk. | |
2769 | */ | |
93cf2076 | 2770 | if (msp->ms_condensing) { |
4e21fd06 DB |
2771 | metaslab_trace_add(zal, mg, msp, asize, d, |
2772 | TRACE_CONDENSING); | |
7a614407 GW |
2773 | mutex_exit(&msp->ms_lock); |
2774 | continue; | |
2775 | } | |
2776 | ||
4e21fd06 DB |
2777 | offset = metaslab_block_alloc(msp, asize, txg); |
2778 | metaslab_trace_add(zal, mg, msp, asize, d, offset); | |
2779 | ||
2780 | if (offset != -1ULL) { | |
2781 | /* Proactively passivate the metaslab, if needed */ | |
2782 | metaslab_segment_may_passivate(msp); | |
34dc7c2f | 2783 | break; |
4e21fd06 DB |
2784 | } |
2785 | next: | |
2786 | ASSERT(msp->ms_loaded); | |
2787 | ||
2788 | /* | |
2789 | * We were unable to allocate from this metaslab so determine | |
2790 | * a new weight for this metaslab. Now that we have loaded | |
2791 | * the metaslab we can provide a better hint to the metaslab | |
2792 | * selector. | |
2793 | * | |
2794 | * For space-based metaslabs, we use the maximum block size. | |
2795 | * This information is only available when the metaslab | |
2796 | * is loaded and is more accurate than the generic free | |
2797 | * space weight that was calculated by metaslab_weight(). | |
2798 | * This information allows us to quickly compare the maximum | |
2799 | * available allocation in the metaslab to the allocation | |
2800 | * size being requested. | |
2801 | * | |
2802 | * For segment-based metaslabs, determine the new weight | |
2803 | * based on the highest bucket in the range tree. We | |
2804 | * explicitly use the loaded segment weight (i.e. the range | |
2805 | * tree histogram) since it contains the space that is | |
2806 | * currently available for allocation and is accurate | |
2807 | * even within a sync pass. | |
2808 | */ | |
2809 | if (WEIGHT_IS_SPACEBASED(msp->ms_weight)) { | |
2810 | uint64_t weight = metaslab_block_maxsize(msp); | |
2811 | WEIGHT_SET_SPACEBASED(weight); | |
2812 | metaslab_passivate(msp, weight); | |
2813 | } else { | |
2814 | metaslab_passivate(msp, | |
2815 | metaslab_weight_from_range_tree(msp)); | |
2816 | } | |
34dc7c2f | 2817 | |
4e21fd06 DB |
2818 | /* |
2819 | * We have just failed an allocation attempt, check | |
2820 | * that metaslab_should_allocate() agrees. Otherwise, | |
2821 | * we may end up in an infinite loop retrying the same | |
2822 | * metaslab. | |
2823 | */ | |
2824 | ASSERT(!metaslab_should_allocate(msp, asize)); | |
34dc7c2f BB |
2825 | mutex_exit(&msp->ms_lock); |
2826 | } | |
4e21fd06 DB |
2827 | mutex_exit(&msp->ms_lock); |
2828 | kmem_free(search, sizeof (*search)); | |
2829 | return (offset); | |
2830 | } | |
34dc7c2f | 2831 | |
4e21fd06 DB |
2832 | static uint64_t |
2833 | metaslab_group_alloc(metaslab_group_t *mg, zio_alloc_list_t *zal, | |
2834 | uint64_t asize, uint64_t txg, uint64_t min_distance, dva_t *dva, int d) | |
2835 | { | |
2836 | uint64_t offset; | |
2837 | ASSERT(mg->mg_initialized); | |
34dc7c2f | 2838 | |
4e21fd06 DB |
2839 | offset = metaslab_group_alloc_normal(mg, zal, asize, txg, |
2840 | min_distance, dva, d); | |
34dc7c2f | 2841 | |
4e21fd06 DB |
2842 | mutex_enter(&mg->mg_lock); |
2843 | if (offset == -1ULL) { | |
2844 | mg->mg_failed_allocations++; | |
2845 | metaslab_trace_add(zal, mg, NULL, asize, d, | |
2846 | TRACE_GROUP_FAILURE); | |
2847 | if (asize == SPA_GANGBLOCKSIZE) { | |
2848 | /* | |
2849 | * This metaslab group was unable to allocate | |
2850 | * the minimum gang block size so it must be out of | |
2851 | * space. We must notify the allocation throttle | |
2852 | * to start skipping allocation attempts to this | |
2853 | * metaslab group until more space becomes available. | |
2854 | * Note: this failure cannot be caused by the | |
2855 | * allocation throttle since the allocation throttle | |
2856 | * is only responsible for skipping devices and | |
2857 | * not failing block allocations. | |
2858 | */ | |
2859 | mg->mg_no_free_space = B_TRUE; | |
2860 | } | |
2861 | } | |
2862 | mg->mg_allocations++; | |
2863 | mutex_exit(&mg->mg_lock); | |
34dc7c2f BB |
2864 | return (offset); |
2865 | } | |
2866 | ||
4e21fd06 DB |
2867 | /* |
2868 | * If we have to write a ditto block (i.e. more than one DVA for a given BP) | |
2869 | * on the same vdev as an existing DVA of this BP, then try to allocate it | |
2870 | * at least (vdev_asize / (2 ^ ditto_same_vdev_distance_shift)) away from the | |
2871 | * existing DVAs. | |
2872 | */ | |
2873 | int ditto_same_vdev_distance_shift = 3; | |
2874 | ||
34dc7c2f BB |
2875 | /* |
2876 | * Allocate a block for the specified i/o. | |
2877 | */ | |
2878 | static int | |
2879 | metaslab_alloc_dva(spa_t *spa, metaslab_class_t *mc, uint64_t psize, | |
4e21fd06 DB |
2880 | dva_t *dva, int d, dva_t *hintdva, uint64_t txg, int flags, |
2881 | zio_alloc_list_t *zal) | |
34dc7c2f | 2882 | { |
920dd524 | 2883 | metaslab_group_t *mg, *fast_mg, *rotor; |
34dc7c2f | 2884 | vdev_t *vd; |
4e21fd06 | 2885 | boolean_t try_hard = B_FALSE; |
34dc7c2f BB |
2886 | |
2887 | ASSERT(!DVA_IS_VALID(&dva[d])); | |
2888 | ||
2889 | /* | |
2890 | * For testing, make some blocks above a certain size be gang blocks. | |
2891 | */ | |
4e21fd06 DB |
2892 | if (psize >= metaslab_gang_bang && (ddi_get_lbolt() & 3) == 0) { |
2893 | metaslab_trace_add(zal, NULL, NULL, psize, d, TRACE_FORCE_GANG); | |
2e528b49 | 2894 | return (SET_ERROR(ENOSPC)); |
4e21fd06 | 2895 | } |
34dc7c2f BB |
2896 | |
2897 | /* | |
2898 | * Start at the rotor and loop through all mgs until we find something. | |
428870ff | 2899 | * Note that there's no locking on mc_rotor or mc_aliquot because |
34dc7c2f BB |
2900 | * nothing actually breaks if we miss a few updates -- we just won't |
2901 | * allocate quite as evenly. It all balances out over time. | |
2902 | * | |
2903 | * If we are doing ditto or log blocks, try to spread them across | |
2904 | * consecutive vdevs. If we're forced to reuse a vdev before we've | |
2905 | * allocated all of our ditto blocks, then try and spread them out on | |
2906 | * that vdev as much as possible. If it turns out to not be possible, | |
2907 | * gradually lower our standards until anything becomes acceptable. | |
2908 | * Also, allocating on consecutive vdevs (as opposed to random vdevs) | |
2909 | * gives us hope of containing our fault domains to something we're | |
2910 | * able to reason about. Otherwise, any two top-level vdev failures | |
2911 | * will guarantee the loss of data. With consecutive allocation, | |
2912 | * only two adjacent top-level vdev failures will result in data loss. | |
2913 | * | |
2914 | * If we are doing gang blocks (hintdva is non-NULL), try to keep | |
2915 | * ourselves on the same vdev as our gang block header. That | |
2916 | * way, we can hope for locality in vdev_cache, plus it makes our | |
2917 | * fault domains something tractable. | |
2918 | */ | |
2919 | if (hintdva) { | |
2920 | vd = vdev_lookup_top(spa, DVA_GET_VDEV(&hintdva[d])); | |
428870ff BB |
2921 | |
2922 | /* | |
2923 | * It's possible the vdev we're using as the hint no | |
2924 | * longer exists (i.e. removed). Consult the rotor when | |
2925 | * all else fails. | |
2926 | */ | |
2927 | if (vd != NULL) { | |
34dc7c2f | 2928 | mg = vd->vdev_mg; |
428870ff BB |
2929 | |
2930 | if (flags & METASLAB_HINTBP_AVOID && | |
2931 | mg->mg_next != NULL) | |
2932 | mg = mg->mg_next; | |
2933 | } else { | |
2934 | mg = mc->mc_rotor; | |
2935 | } | |
34dc7c2f BB |
2936 | } else if (d != 0) { |
2937 | vd = vdev_lookup_top(spa, DVA_GET_VDEV(&dva[d - 1])); | |
2938 | mg = vd->vdev_mg->mg_next; | |
920dd524 ED |
2939 | } else if (flags & METASLAB_FASTWRITE) { |
2940 | mg = fast_mg = mc->mc_rotor; | |
2941 | ||
2942 | do { | |
2943 | if (fast_mg->mg_vd->vdev_pending_fastwrite < | |
2944 | mg->mg_vd->vdev_pending_fastwrite) | |
2945 | mg = fast_mg; | |
2946 | } while ((fast_mg = fast_mg->mg_next) != mc->mc_rotor); | |
2947 | ||
34dc7c2f BB |
2948 | } else { |
2949 | mg = mc->mc_rotor; | |
2950 | } | |
2951 | ||
2952 | /* | |
428870ff BB |
2953 | * If the hint put us into the wrong metaslab class, or into a |
2954 | * metaslab group that has been passivated, just follow the rotor. | |
34dc7c2f | 2955 | */ |
428870ff | 2956 | if (mg->mg_class != mc || mg->mg_activation_count <= 0) |
34dc7c2f BB |
2957 | mg = mc->mc_rotor; |
2958 | ||
2959 | rotor = mg; | |
2960 | top: | |
34dc7c2f | 2961 | do { |
4e21fd06 | 2962 | boolean_t allocatable; |
428870ff | 2963 | |
3dfb57a3 | 2964 | ASSERT(mg->mg_activation_count == 1); |
34dc7c2f | 2965 | vd = mg->mg_vd; |
fb5f0bc8 | 2966 | |
34dc7c2f | 2967 | /* |
b128c09f | 2968 | * Don't allocate from faulted devices. |
34dc7c2f | 2969 | */ |
4e21fd06 | 2970 | if (try_hard) { |
fb5f0bc8 BB |
2971 | spa_config_enter(spa, SCL_ZIO, FTAG, RW_READER); |
2972 | allocatable = vdev_allocatable(vd); | |
2973 | spa_config_exit(spa, SCL_ZIO, FTAG); | |
2974 | } else { | |
2975 | allocatable = vdev_allocatable(vd); | |
2976 | } | |
ac72fac3 GW |
2977 | |
2978 | /* | |
2979 | * Determine if the selected metaslab group is eligible | |
3dfb57a3 DB |
2980 | * for allocations. If we're ganging then don't allow |
2981 | * this metaslab group to skip allocations since that would | |
2982 | * inadvertently return ENOSPC and suspend the pool | |
ac72fac3 GW |
2983 | * even though space is still available. |
2984 | */ | |
4e21fd06 | 2985 | if (allocatable && !GANG_ALLOCATION(flags) && !try_hard) { |
3dfb57a3 DB |
2986 | allocatable = metaslab_group_allocatable(mg, rotor, |
2987 | psize); | |
2988 | } | |
ac72fac3 | 2989 | |
4e21fd06 DB |
2990 | if (!allocatable) { |
2991 | metaslab_trace_add(zal, mg, NULL, psize, d, | |
2992 | TRACE_NOT_ALLOCATABLE); | |
34dc7c2f | 2993 | goto next; |
4e21fd06 | 2994 | } |
fb5f0bc8 | 2995 | |
3dfb57a3 DB |
2996 | ASSERT(mg->mg_initialized); |
2997 | ||
34dc7c2f | 2998 | /* |
4e21fd06 DB |
2999 | * Avoid writing single-copy data to a failing, |
3000 | * non-redundant vdev, unless we've already tried all | |
3001 | * other vdevs. | |
34dc7c2f BB |
3002 | */ |
3003 | if ((vd->vdev_stat.vs_write_errors > 0 || | |
3004 | vd->vdev_state < VDEV_STATE_HEALTHY) && | |
4e21fd06 DB |
3005 | d == 0 && !try_hard && vd->vdev_children == 0) { |
3006 | metaslab_trace_add(zal, mg, NULL, psize, d, | |
3007 | TRACE_VDEV_ERROR); | |
34dc7c2f BB |
3008 | goto next; |
3009 | } | |
3010 | ||
3011 | ASSERT(mg->mg_class == mc); | |
3012 | ||
4e21fd06 DB |
3013 | /* |
3014 | * If we don't need to try hard, then require that the | |
3015 | * block be 1/8th of the device away from any other DVAs | |
3016 | * in this BP. If we are trying hard, allow any offset | |
3017 | * to be used (distance=0). | |
3018 | */ | |
1c27024e | 3019 | uint64_t distance = 0; |
4e21fd06 DB |
3020 | if (!try_hard) { |
3021 | distance = vd->vdev_asize >> | |
3022 | ditto_same_vdev_distance_shift; | |
3023 | if (distance <= (1ULL << vd->vdev_ms_shift)) | |
3024 | distance = 0; | |
3025 | } | |
34dc7c2f | 3026 | |
1c27024e | 3027 | uint64_t asize = vdev_psize_to_asize(vd, psize); |
34dc7c2f BB |
3028 | ASSERT(P2PHASE(asize, 1ULL << vd->vdev_ashift) == 0); |
3029 | ||
1c27024e DB |
3030 | uint64_t offset = metaslab_group_alloc(mg, zal, asize, txg, |
3031 | distance, dva, d); | |
3dfb57a3 | 3032 | |
34dc7c2f BB |
3033 | if (offset != -1ULL) { |
3034 | /* | |
3035 | * If we've just selected this metaslab group, | |
3036 | * figure out whether the corresponding vdev is | |
3037 | * over- or under-used relative to the pool, | |
3038 | * and set an allocation bias to even it out. | |
bb3250d0 ED |
3039 | * |
3040 | * Bias is also used to compensate for unequally | |
3041 | * sized vdevs so that space is allocated fairly. | |
34dc7c2f | 3042 | */ |
f3a7f661 | 3043 | if (mc->mc_aliquot == 0 && metaslab_bias_enabled) { |
34dc7c2f | 3044 | vdev_stat_t *vs = &vd->vdev_stat; |
bb3250d0 ED |
3045 | int64_t vs_free = vs->vs_space - vs->vs_alloc; |
3046 | int64_t mc_free = mc->mc_space - mc->mc_alloc; | |
3047 | int64_t ratio; | |
34dc7c2f BB |
3048 | |
3049 | /* | |
6d974228 GW |
3050 | * Calculate how much more or less we should |
3051 | * try to allocate from this device during | |
3052 | * this iteration around the rotor. | |
6d974228 | 3053 | * |
bb3250d0 ED |
3054 | * This basically introduces a zero-centered |
3055 | * bias towards the devices with the most | |
3056 | * free space, while compensating for vdev | |
3057 | * size differences. | |
3058 | * | |
3059 | * Examples: | |
3060 | * vdev V1 = 16M/128M | |
3061 | * vdev V2 = 16M/128M | |
3062 | * ratio(V1) = 100% ratio(V2) = 100% | |
3063 | * | |
3064 | * vdev V1 = 16M/128M | |
3065 | * vdev V2 = 64M/128M | |
3066 | * ratio(V1) = 127% ratio(V2) = 72% | |
6d974228 | 3067 | * |
bb3250d0 ED |
3068 | * vdev V1 = 16M/128M |
3069 | * vdev V2 = 64M/512M | |
3070 | * ratio(V1) = 40% ratio(V2) = 160% | |
34dc7c2f | 3071 | */ |
bb3250d0 ED |
3072 | ratio = (vs_free * mc->mc_alloc_groups * 100) / |
3073 | (mc_free + 1); | |
3074 | mg->mg_bias = ((ratio - 100) * | |
6d974228 | 3075 | (int64_t)mg->mg_aliquot) / 100; |
f3a7f661 GW |
3076 | } else if (!metaslab_bias_enabled) { |
3077 | mg->mg_bias = 0; | |
34dc7c2f BB |
3078 | } |
3079 | ||
920dd524 ED |
3080 | if ((flags & METASLAB_FASTWRITE) || |
3081 | atomic_add_64_nv(&mc->mc_aliquot, asize) >= | |
34dc7c2f BB |
3082 | mg->mg_aliquot + mg->mg_bias) { |
3083 | mc->mc_rotor = mg->mg_next; | |
428870ff | 3084 | mc->mc_aliquot = 0; |
34dc7c2f BB |
3085 | } |
3086 | ||
3087 | DVA_SET_VDEV(&dva[d], vd->vdev_id); | |
3088 | DVA_SET_OFFSET(&dva[d], offset); | |
e3e7cf60 D |
3089 | DVA_SET_GANG(&dva[d], |
3090 | ((flags & METASLAB_GANG_HEADER) ? 1 : 0)); | |
34dc7c2f BB |
3091 | DVA_SET_ASIZE(&dva[d], asize); |
3092 | ||
920dd524 ED |
3093 | if (flags & METASLAB_FASTWRITE) { |
3094 | atomic_add_64(&vd->vdev_pending_fastwrite, | |
3095 | psize); | |
920dd524 ED |
3096 | } |
3097 | ||
34dc7c2f BB |
3098 | return (0); |
3099 | } | |
3100 | next: | |
3101 | mc->mc_rotor = mg->mg_next; | |
428870ff | 3102 | mc->mc_aliquot = 0; |
34dc7c2f BB |
3103 | } while ((mg = mg->mg_next) != rotor); |
3104 | ||
4e21fd06 DB |
3105 | /* |
3106 | * If we haven't tried hard, do so now. | |
3107 | */ | |
3108 | if (!try_hard) { | |
3109 | try_hard = B_TRUE; | |
fb5f0bc8 BB |
3110 | goto top; |
3111 | } | |
3112 | ||
34dc7c2f BB |
3113 | bzero(&dva[d], sizeof (dva_t)); |
3114 | ||
4e21fd06 | 3115 | metaslab_trace_add(zal, rotor, NULL, psize, d, TRACE_ENOSPC); |
2e528b49 | 3116 | return (SET_ERROR(ENOSPC)); |
34dc7c2f BB |
3117 | } |
3118 | ||
3119 | /* | |
3120 | * Free the block represented by DVA in the context of the specified | |
3121 | * transaction group. | |
3122 | */ | |
3123 | static void | |
3124 | metaslab_free_dva(spa_t *spa, const dva_t *dva, uint64_t txg, boolean_t now) | |
3125 | { | |
3126 | uint64_t vdev = DVA_GET_VDEV(dva); | |
3127 | uint64_t offset = DVA_GET_OFFSET(dva); | |
3128 | uint64_t size = DVA_GET_ASIZE(dva); | |
3129 | vdev_t *vd; | |
3130 | metaslab_t *msp; | |
3131 | ||
34dc7c2f BB |
3132 | if (txg > spa_freeze_txg(spa)) |
3133 | return; | |
3134 | ||
7d2868d5 | 3135 | if ((vd = vdev_lookup_top(spa, vdev)) == NULL || !DVA_IS_VALID(dva) || |
34dc7c2f | 3136 | (offset >> vd->vdev_ms_shift) >= vd->vdev_ms_count) { |
7d2868d5 BB |
3137 | zfs_panic_recover("metaslab_free_dva(): bad DVA %llu:%llu:%llu", |
3138 | (u_longlong_t)vdev, (u_longlong_t)offset, | |
3139 | (u_longlong_t)size); | |
34dc7c2f BB |
3140 | return; |
3141 | } | |
3142 | ||
3143 | msp = vd->vdev_ms[offset >> vd->vdev_ms_shift]; | |
3144 | ||
3145 | if (DVA_GET_GANG(dva)) | |
3146 | size = vdev_psize_to_asize(vd, SPA_GANGBLOCKSIZE); | |
3147 | ||
3148 | mutex_enter(&msp->ms_lock); | |
3149 | ||
3150 | if (now) { | |
93cf2076 | 3151 | range_tree_remove(msp->ms_alloctree[txg & TXG_MASK], |
34dc7c2f | 3152 | offset, size); |
93cf2076 GW |
3153 | |
3154 | VERIFY(!msp->ms_condensing); | |
3155 | VERIFY3U(offset, >=, msp->ms_start); | |
3156 | VERIFY3U(offset + size, <=, msp->ms_start + msp->ms_size); | |
3157 | VERIFY3U(range_tree_space(msp->ms_tree) + size, <=, | |
3158 | msp->ms_size); | |
3159 | VERIFY0(P2PHASE(offset, 1ULL << vd->vdev_ashift)); | |
3160 | VERIFY0(P2PHASE(size, 1ULL << vd->vdev_ashift)); | |
3161 | range_tree_add(msp->ms_tree, offset, size); | |
4e21fd06 | 3162 | msp->ms_max_size = metaslab_block_maxsize(msp); |
34dc7c2f | 3163 | } else { |
258553d3 TC |
3164 | VERIFY3U(txg, ==, spa->spa_syncing_txg); |
3165 | if (range_tree_space(msp->ms_freeingtree) == 0) | |
34dc7c2f | 3166 | vdev_dirty(vd, VDD_METASLAB, msp, txg); |
258553d3 | 3167 | range_tree_add(msp->ms_freeingtree, offset, size); |
34dc7c2f BB |
3168 | } |
3169 | ||
3170 | mutex_exit(&msp->ms_lock); | |
3171 | } | |
3172 | ||
3173 | /* | |
3174 | * Intent log support: upon opening the pool after a crash, notify the SPA | |
3175 | * of blocks that the intent log has allocated for immediate write, but | |
3176 | * which are still considered free by the SPA because the last transaction | |
3177 | * group didn't commit yet. | |
3178 | */ | |
3179 | static int | |
3180 | metaslab_claim_dva(spa_t *spa, const dva_t *dva, uint64_t txg) | |
3181 | { | |
3182 | uint64_t vdev = DVA_GET_VDEV(dva); | |
3183 | uint64_t offset = DVA_GET_OFFSET(dva); | |
3184 | uint64_t size = DVA_GET_ASIZE(dva); | |
3185 | vdev_t *vd; | |
3186 | metaslab_t *msp; | |
428870ff | 3187 | int error = 0; |
34dc7c2f BB |
3188 | |
3189 | ASSERT(DVA_IS_VALID(dva)); | |
3190 | ||
3191 | if ((vd = vdev_lookup_top(spa, vdev)) == NULL || | |
3192 | (offset >> vd->vdev_ms_shift) >= vd->vdev_ms_count) | |
2e528b49 | 3193 | return (SET_ERROR(ENXIO)); |
34dc7c2f BB |
3194 | |
3195 | msp = vd->vdev_ms[offset >> vd->vdev_ms_shift]; | |
3196 | ||
3197 | if (DVA_GET_GANG(dva)) | |
3198 | size = vdev_psize_to_asize(vd, SPA_GANGBLOCKSIZE); | |
3199 | ||
3200 | mutex_enter(&msp->ms_lock); | |
3201 | ||
93cf2076 | 3202 | if ((txg != 0 && spa_writeable(spa)) || !msp->ms_loaded) |
6d974228 | 3203 | error = metaslab_activate(msp, METASLAB_WEIGHT_SECONDARY); |
428870ff | 3204 | |
93cf2076 | 3205 | if (error == 0 && !range_tree_contains(msp->ms_tree, offset, size)) |
2e528b49 | 3206 | error = SET_ERROR(ENOENT); |
428870ff | 3207 | |
b128c09f | 3208 | if (error || txg == 0) { /* txg == 0 indicates dry run */ |
34dc7c2f BB |
3209 | mutex_exit(&msp->ms_lock); |
3210 | return (error); | |
3211 | } | |
3212 | ||
93cf2076 GW |
3213 | VERIFY(!msp->ms_condensing); |
3214 | VERIFY0(P2PHASE(offset, 1ULL << vd->vdev_ashift)); | |
3215 | VERIFY0(P2PHASE(size, 1ULL << vd->vdev_ashift)); | |
3216 | VERIFY3U(range_tree_space(msp->ms_tree) - size, <=, msp->ms_size); | |
3217 | range_tree_remove(msp->ms_tree, offset, size); | |
b128c09f | 3218 | |
fb5f0bc8 | 3219 | if (spa_writeable(spa)) { /* don't dirty if we're zdb(1M) */ |
93cf2076 | 3220 | if (range_tree_space(msp->ms_alloctree[txg & TXG_MASK]) == 0) |
b128c09f | 3221 | vdev_dirty(vd, VDD_METASLAB, msp, txg); |
93cf2076 | 3222 | range_tree_add(msp->ms_alloctree[txg & TXG_MASK], offset, size); |
b128c09f | 3223 | } |
34dc7c2f BB |
3224 | |
3225 | mutex_exit(&msp->ms_lock); | |
3226 | ||
3227 | return (0); | |
3228 | } | |
3229 | ||
3dfb57a3 DB |
3230 | /* |
3231 | * Reserve some allocation slots. The reservation system must be called | |
3232 | * before we call into the allocator. If there aren't any available slots | |
3233 | * then the I/O will be throttled until an I/O completes and its slots are | |
3234 | * freed up. The function returns true if it was successful in placing | |
3235 | * the reservation. | |
3236 | */ | |
3237 | boolean_t | |
3238 | metaslab_class_throttle_reserve(metaslab_class_t *mc, int slots, zio_t *zio, | |
3239 | int flags) | |
3240 | { | |
3241 | uint64_t available_slots = 0; | |
3dfb57a3 DB |
3242 | boolean_t slot_reserved = B_FALSE; |
3243 | ||
3244 | ASSERT(mc->mc_alloc_throttle_enabled); | |
3245 | mutex_enter(&mc->mc_lock); | |
3246 | ||
1c27024e | 3247 | uint64_t reserved_slots = refcount_count(&mc->mc_alloc_slots); |
3dfb57a3 DB |
3248 | if (reserved_slots < mc->mc_alloc_max_slots) |
3249 | available_slots = mc->mc_alloc_max_slots - reserved_slots; | |
3250 | ||
3251 | if (slots <= available_slots || GANG_ALLOCATION(flags)) { | |
3dfb57a3 DB |
3252 | /* |
3253 | * We reserve the slots individually so that we can unreserve | |
3254 | * them individually when an I/O completes. | |
3255 | */ | |
1c27024e | 3256 | for (int d = 0; d < slots; d++) { |
3dfb57a3 DB |
3257 | reserved_slots = refcount_add(&mc->mc_alloc_slots, zio); |
3258 | } | |
3259 | zio->io_flags |= ZIO_FLAG_IO_ALLOCATING; | |
3260 | slot_reserved = B_TRUE; | |
3261 | } | |
3262 | ||
3263 | mutex_exit(&mc->mc_lock); | |
3264 | return (slot_reserved); | |
3265 | } | |
3266 | ||
3267 | void | |
3268 | metaslab_class_throttle_unreserve(metaslab_class_t *mc, int slots, zio_t *zio) | |
3269 | { | |
3dfb57a3 DB |
3270 | ASSERT(mc->mc_alloc_throttle_enabled); |
3271 | mutex_enter(&mc->mc_lock); | |
1c27024e | 3272 | for (int d = 0; d < slots; d++) { |
3dfb57a3 DB |
3273 | (void) refcount_remove(&mc->mc_alloc_slots, zio); |
3274 | } | |
3275 | mutex_exit(&mc->mc_lock); | |
3276 | } | |
3277 | ||
34dc7c2f BB |
3278 | int |
3279 | metaslab_alloc(spa_t *spa, metaslab_class_t *mc, uint64_t psize, blkptr_t *bp, | |
4e21fd06 DB |
3280 | int ndvas, uint64_t txg, blkptr_t *hintbp, int flags, |
3281 | zio_alloc_list_t *zal, zio_t *zio) | |
34dc7c2f BB |
3282 | { |
3283 | dva_t *dva = bp->blk_dva; | |
3284 | dva_t *hintdva = hintbp->blk_dva; | |
1c27024e | 3285 | int error = 0; |
34dc7c2f | 3286 | |
b128c09f | 3287 | ASSERT(bp->blk_birth == 0); |
428870ff | 3288 | ASSERT(BP_PHYSICAL_BIRTH(bp) == 0); |
b128c09f BB |
3289 | |
3290 | spa_config_enter(spa, SCL_ALLOC, FTAG, RW_READER); | |
3291 | ||
3292 | if (mc->mc_rotor == NULL) { /* no vdevs in this class */ | |
3293 | spa_config_exit(spa, SCL_ALLOC, FTAG); | |
2e528b49 | 3294 | return (SET_ERROR(ENOSPC)); |
b128c09f | 3295 | } |
34dc7c2f BB |
3296 | |
3297 | ASSERT(ndvas > 0 && ndvas <= spa_max_replication(spa)); | |
3298 | ASSERT(BP_GET_NDVAS(bp) == 0); | |
3299 | ASSERT(hintbp == NULL || ndvas <= BP_GET_NDVAS(hintbp)); | |
4e21fd06 | 3300 | ASSERT3P(zal, !=, NULL); |
34dc7c2f | 3301 | |
1c27024e | 3302 | for (int d = 0; d < ndvas; d++) { |
34dc7c2f | 3303 | error = metaslab_alloc_dva(spa, mc, psize, dva, d, hintdva, |
4e21fd06 | 3304 | txg, flags, zal); |
93cf2076 | 3305 | if (error != 0) { |
34dc7c2f BB |
3306 | for (d--; d >= 0; d--) { |
3307 | metaslab_free_dva(spa, &dva[d], txg, B_TRUE); | |
3dfb57a3 DB |
3308 | metaslab_group_alloc_decrement(spa, |
3309 | DVA_GET_VDEV(&dva[d]), zio, flags); | |
34dc7c2f BB |
3310 | bzero(&dva[d], sizeof (dva_t)); |
3311 | } | |
b128c09f | 3312 | spa_config_exit(spa, SCL_ALLOC, FTAG); |
34dc7c2f | 3313 | return (error); |
3dfb57a3 DB |
3314 | } else { |
3315 | /* | |
3316 | * Update the metaslab group's queue depth | |
3317 | * based on the newly allocated dva. | |
3318 | */ | |
3319 | metaslab_group_alloc_increment(spa, | |
3320 | DVA_GET_VDEV(&dva[d]), zio, flags); | |
34dc7c2f | 3321 | } |
3dfb57a3 | 3322 | |
34dc7c2f BB |
3323 | } |
3324 | ASSERT(error == 0); | |
3325 | ASSERT(BP_GET_NDVAS(bp) == ndvas); | |
3326 | ||
b128c09f BB |
3327 | spa_config_exit(spa, SCL_ALLOC, FTAG); |
3328 | ||
efe7978d | 3329 | BP_SET_BIRTH(bp, txg, 0); |
b128c09f | 3330 | |
34dc7c2f BB |
3331 | return (0); |
3332 | } | |
3333 | ||
3334 | void | |
3335 | metaslab_free(spa_t *spa, const blkptr_t *bp, uint64_t txg, boolean_t now) | |
3336 | { | |
3337 | const dva_t *dva = bp->blk_dva; | |
1c27024e | 3338 | int ndvas = BP_GET_NDVAS(bp); |
34dc7c2f BB |
3339 | |
3340 | ASSERT(!BP_IS_HOLE(bp)); | |
428870ff | 3341 | ASSERT(!now || bp->blk_birth >= spa_syncing_txg(spa)); |
b128c09f BB |
3342 | |
3343 | spa_config_enter(spa, SCL_FREE, FTAG, RW_READER); | |
34dc7c2f | 3344 | |
1c27024e | 3345 | for (int d = 0; d < ndvas; d++) |
34dc7c2f | 3346 | metaslab_free_dva(spa, &dva[d], txg, now); |
b128c09f BB |
3347 | |
3348 | spa_config_exit(spa, SCL_FREE, FTAG); | |
34dc7c2f BB |
3349 | } |
3350 | ||
3351 | int | |
3352 | metaslab_claim(spa_t *spa, const blkptr_t *bp, uint64_t txg) | |
3353 | { | |
3354 | const dva_t *dva = bp->blk_dva; | |
3355 | int ndvas = BP_GET_NDVAS(bp); | |
1c27024e | 3356 | int error = 0; |
34dc7c2f BB |
3357 | |
3358 | ASSERT(!BP_IS_HOLE(bp)); | |
3359 | ||
b128c09f BB |
3360 | if (txg != 0) { |
3361 | /* | |
3362 | * First do a dry run to make sure all DVAs are claimable, | |
3363 | * so we don't have to unwind from partial failures below. | |
3364 | */ | |
3365 | if ((error = metaslab_claim(spa, bp, 0)) != 0) | |
3366 | return (error); | |
3367 | } | |
3368 | ||
3369 | spa_config_enter(spa, SCL_ALLOC, FTAG, RW_READER); | |
3370 | ||
1c27024e | 3371 | for (int d = 0; d < ndvas; d++) |
34dc7c2f | 3372 | if ((error = metaslab_claim_dva(spa, &dva[d], txg)) != 0) |
b128c09f BB |
3373 | break; |
3374 | ||
3375 | spa_config_exit(spa, SCL_ALLOC, FTAG); | |
3376 | ||
3377 | ASSERT(error == 0 || txg == 0); | |
34dc7c2f | 3378 | |
b128c09f | 3379 | return (error); |
34dc7c2f | 3380 | } |
920dd524 | 3381 | |
d1d7e268 MK |
3382 | void |
3383 | metaslab_fastwrite_mark(spa_t *spa, const blkptr_t *bp) | |
920dd524 ED |
3384 | { |
3385 | const dva_t *dva = bp->blk_dva; | |
3386 | int ndvas = BP_GET_NDVAS(bp); | |
3387 | uint64_t psize = BP_GET_PSIZE(bp); | |
3388 | int d; | |
3389 | vdev_t *vd; | |
3390 | ||
3391 | ASSERT(!BP_IS_HOLE(bp)); | |
9b67f605 | 3392 | ASSERT(!BP_IS_EMBEDDED(bp)); |
920dd524 ED |
3393 | ASSERT(psize > 0); |
3394 | ||
3395 | spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER); | |
3396 | ||
3397 | for (d = 0; d < ndvas; d++) { | |
3398 | if ((vd = vdev_lookup_top(spa, DVA_GET_VDEV(&dva[d]))) == NULL) | |
3399 | continue; | |
3400 | atomic_add_64(&vd->vdev_pending_fastwrite, psize); | |
3401 | } | |
3402 | ||
3403 | spa_config_exit(spa, SCL_VDEV, FTAG); | |
3404 | } | |
3405 | ||
d1d7e268 MK |
3406 | void |
3407 | metaslab_fastwrite_unmark(spa_t *spa, const blkptr_t *bp) | |
920dd524 ED |
3408 | { |
3409 | const dva_t *dva = bp->blk_dva; | |
3410 | int ndvas = BP_GET_NDVAS(bp); | |
3411 | uint64_t psize = BP_GET_PSIZE(bp); | |
3412 | int d; | |
3413 | vdev_t *vd; | |
3414 | ||
3415 | ASSERT(!BP_IS_HOLE(bp)); | |
9b67f605 | 3416 | ASSERT(!BP_IS_EMBEDDED(bp)); |
920dd524 ED |
3417 | ASSERT(psize > 0); |
3418 | ||
3419 | spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER); | |
3420 | ||
3421 | for (d = 0; d < ndvas; d++) { | |
3422 | if ((vd = vdev_lookup_top(spa, DVA_GET_VDEV(&dva[d]))) == NULL) | |
3423 | continue; | |
3424 | ASSERT3U(vd->vdev_pending_fastwrite, >=, psize); | |
3425 | atomic_sub_64(&vd->vdev_pending_fastwrite, psize); | |
3426 | } | |
3427 | ||
3428 | spa_config_exit(spa, SCL_VDEV, FTAG); | |
3429 | } | |
30b92c1d | 3430 | |
13fe0198 MA |
3431 | void |
3432 | metaslab_check_free(spa_t *spa, const blkptr_t *bp) | |
3433 | { | |
13fe0198 MA |
3434 | if ((zfs_flags & ZFS_DEBUG_ZIO_FREE) == 0) |
3435 | return; | |
3436 | ||
3437 | spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER); | |
1c27024e | 3438 | for (int i = 0; i < BP_GET_NDVAS(bp); i++) { |
93cf2076 GW |
3439 | uint64_t vdev = DVA_GET_VDEV(&bp->blk_dva[i]); |
3440 | vdev_t *vd = vdev_lookup_top(spa, vdev); | |
3441 | uint64_t offset = DVA_GET_OFFSET(&bp->blk_dva[i]); | |
13fe0198 | 3442 | uint64_t size = DVA_GET_ASIZE(&bp->blk_dva[i]); |
93cf2076 | 3443 | metaslab_t *msp = vd->vdev_ms[offset >> vd->vdev_ms_shift]; |
13fe0198 | 3444 | |
93cf2076 GW |
3445 | if (msp->ms_loaded) |
3446 | range_tree_verify(msp->ms_tree, offset, size); | |
13fe0198 | 3447 | |
258553d3 TC |
3448 | range_tree_verify(msp->ms_freeingtree, offset, size); |
3449 | range_tree_verify(msp->ms_freedtree, offset, size); | |
1c27024e | 3450 | for (int j = 0; j < TXG_DEFER_SIZE; j++) |
93cf2076 | 3451 | range_tree_verify(msp->ms_defertree[j], offset, size); |
13fe0198 MA |
3452 | } |
3453 | spa_config_exit(spa, SCL_VDEV, FTAG); | |
3454 | } | |
3455 | ||
30b92c1d | 3456 | #if defined(_KERNEL) && defined(HAVE_SPL) |
02730c33 | 3457 | /* CSTYLED */ |
99b14de4 | 3458 | module_param(metaslab_aliquot, ulong, 0644); |
99b14de4 ED |
3459 | MODULE_PARM_DESC(metaslab_aliquot, |
3460 | "allocation granularity (a.k.a. stripe size)"); | |
02730c33 BB |
3461 | |
3462 | module_param(metaslab_debug_load, int, 0644); | |
93cf2076 GW |
3463 | MODULE_PARM_DESC(metaslab_debug_load, |
3464 | "load all metaslabs when pool is first opened"); | |
02730c33 BB |
3465 | |
3466 | module_param(metaslab_debug_unload, int, 0644); | |
1ce04573 BB |
3467 | MODULE_PARM_DESC(metaslab_debug_unload, |
3468 | "prevent metaslabs from being unloaded"); | |
02730c33 BB |
3469 | |
3470 | module_param(metaslab_preload_enabled, int, 0644); | |
f3a7f661 GW |
3471 | MODULE_PARM_DESC(metaslab_preload_enabled, |
3472 | "preload potential metaslabs during reassessment"); | |
f4a4046b | 3473 | |
02730c33 | 3474 | module_param(zfs_mg_noalloc_threshold, int, 0644); |
f4a4046b TC |
3475 | MODULE_PARM_DESC(zfs_mg_noalloc_threshold, |
3476 | "percentage of free space for metaslab group to allow allocation"); | |
02730c33 BB |
3477 | |
3478 | module_param(zfs_mg_fragmentation_threshold, int, 0644); | |
f3a7f661 GW |
3479 | MODULE_PARM_DESC(zfs_mg_fragmentation_threshold, |
3480 | "fragmentation for metaslab group to allow allocation"); | |
3481 | ||
02730c33 | 3482 | module_param(zfs_metaslab_fragmentation_threshold, int, 0644); |
f3a7f661 GW |
3483 | MODULE_PARM_DESC(zfs_metaslab_fragmentation_threshold, |
3484 | "fragmentation for metaslab to allow allocation"); | |
02730c33 BB |
3485 | |
3486 | module_param(metaslab_fragmentation_factor_enabled, int, 0644); | |
f3a7f661 GW |
3487 | MODULE_PARM_DESC(metaslab_fragmentation_factor_enabled, |
3488 | "use the fragmentation metric to prefer less fragmented metaslabs"); | |
02730c33 BB |
3489 | |
3490 | module_param(metaslab_lba_weighting_enabled, int, 0644); | |
f3a7f661 GW |
3491 | MODULE_PARM_DESC(metaslab_lba_weighting_enabled, |
3492 | "prefer metaslabs with lower LBAs"); | |
02730c33 BB |
3493 | |
3494 | module_param(metaslab_bias_enabled, int, 0644); | |
f3a7f661 GW |
3495 | MODULE_PARM_DESC(metaslab_bias_enabled, |
3496 | "enable metaslab group biasing"); | |
4e21fd06 DB |
3497 | |
3498 | module_param(zfs_metaslab_segment_weight_enabled, int, 0644); | |
3499 | MODULE_PARM_DESC(zfs_metaslab_segment_weight_enabled, | |
3500 | "enable segment-based metaslab selection"); | |
3501 | ||
3502 | module_param(zfs_metaslab_switch_threshold, int, 0644); | |
3503 | MODULE_PARM_DESC(zfs_metaslab_switch_threshold, | |
3504 | "segment-based metaslab selection maximum buckets before switching"); | |
30b92c1d | 3505 | #endif /* _KERNEL && HAVE_SPL */ |