]>
Commit | Line | Data |
---|---|---|
34dc7c2f BB |
1 | /* |
2 | * CDDL HEADER START | |
3 | * | |
4 | * The contents of this file are subject to the terms of the | |
5 | * Common Development and Distribution License (the "License"). | |
6 | * You may not use this file except in compliance with the License. | |
7 | * | |
8 | * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE | |
9 | * or http://www.opensolaris.org/os/licensing. | |
10 | * See the License for the specific language governing permissions | |
11 | * and limitations under the License. | |
12 | * | |
13 | * When distributing Covered Code, include this CDDL HEADER in each | |
14 | * file and include the License file at usr/src/OPENSOLARIS.LICENSE. | |
15 | * If applicable, add the following below this CDDL HEADER, with the | |
16 | * fields enclosed by brackets "[]" replaced with your own identifying | |
17 | * information: Portions Copyright [yyyy] [name of copyright owner] | |
18 | * | |
19 | * CDDL HEADER END | |
20 | */ | |
21 | /* | |
428870ff | 22 | * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. |
9bd274dd | 23 | * Copyright (c) 2011, 2014 by Delphix. All rights reserved. |
2e528b49 | 24 | * Copyright (c) 2013 by Saso Kiselkov. All rights reserved. |
34dc7c2f BB |
25 | */ |
26 | ||
34dc7c2f | 27 | #include <sys/zfs_context.h> |
34dc7c2f BB |
28 | #include <sys/dmu.h> |
29 | #include <sys/dmu_tx.h> | |
30 | #include <sys/space_map.h> | |
31 | #include <sys/metaslab_impl.h> | |
32 | #include <sys/vdev_impl.h> | |
33 | #include <sys/zio.h> | |
93cf2076 | 34 | #include <sys/spa_impl.h> |
f3a7f661 | 35 | #include <sys/zfeature.h> |
34dc7c2f | 36 | |
d1d7e268 | 37 | #define WITH_DF_BLOCK_ALLOCATOR |
6d974228 GW |
38 | |
39 | /* | |
40 | * Allow allocations to switch to gang blocks quickly. We do this to | |
41 | * avoid having to load lots of space_maps in a given txg. There are, | |
42 | * however, some cases where we want to avoid "fast" ganging and instead | |
43 | * we want to do an exhaustive search of all metaslabs on this device. | |
672692c7 | 44 | * Currently we don't allow any gang, slog, or dump device related allocations |
6d974228 GW |
45 | * to "fast" gang. |
46 | */ | |
47 | #define CAN_FASTGANG(flags) \ | |
48 | (!((flags) & (METASLAB_GANG_CHILD | METASLAB_GANG_HEADER | \ | |
49 | METASLAB_GANG_AVOID))) | |
22c81dd8 | 50 | |
93cf2076 GW |
51 | #define METASLAB_WEIGHT_PRIMARY (1ULL << 63) |
52 | #define METASLAB_WEIGHT_SECONDARY (1ULL << 62) | |
53 | #define METASLAB_ACTIVE_MASK \ | |
54 | (METASLAB_WEIGHT_PRIMARY | METASLAB_WEIGHT_SECONDARY) | |
55 | ||
34dc7c2f BB |
56 | uint64_t metaslab_aliquot = 512ULL << 10; |
57 | uint64_t metaslab_gang_bang = SPA_MAXBLOCKSIZE + 1; /* force gang blocks */ | |
58 | ||
e51be066 GW |
59 | /* |
60 | * The in-core space map representation is more compact than its on-disk form. | |
61 | * The zfs_condense_pct determines how much more compact the in-core | |
62 | * space_map representation must be before we compact it on-disk. | |
63 | * Values should be greater than or equal to 100. | |
64 | */ | |
65 | int zfs_condense_pct = 200; | |
66 | ||
b02fe35d AR |
67 | /* |
68 | * Condensing a metaslab is not guaranteed to actually reduce the amount of | |
69 | * space used on disk. In particular, a space map uses data in increments of | |
96358617 | 70 | * MAX(1 << ashift, space_map_blksz), so a metaslab might use the |
b02fe35d AR |
71 | * same number of blocks after condensing. Since the goal of condensing is to |
72 | * reduce the number of IOPs required to read the space map, we only want to | |
73 | * condense when we can be sure we will reduce the number of blocks used by the | |
74 | * space map. Unfortunately, we cannot precisely compute whether or not this is | |
75 | * the case in metaslab_should_condense since we are holding ms_lock. Instead, | |
76 | * we apply the following heuristic: do not condense a spacemap unless the | |
77 | * uncondensed size consumes greater than zfs_metaslab_condense_block_threshold | |
78 | * blocks. | |
79 | */ | |
80 | int zfs_metaslab_condense_block_threshold = 4; | |
81 | ||
ac72fac3 GW |
82 | /* |
83 | * The zfs_mg_noalloc_threshold defines which metaslab groups should | |
84 | * be eligible for allocation. The value is defined as a percentage of | |
f3a7f661 | 85 | * free space. Metaslab groups that have more free space than |
ac72fac3 GW |
86 | * zfs_mg_noalloc_threshold are always eligible for allocations. Once |
87 | * a metaslab group's free space is less than or equal to the | |
88 | * zfs_mg_noalloc_threshold the allocator will avoid allocating to that | |
89 | * group unless all groups in the pool have reached zfs_mg_noalloc_threshold. | |
90 | * Once all groups in the pool reach zfs_mg_noalloc_threshold then all | |
91 | * groups are allowed to accept allocations. Gang blocks are always | |
92 | * eligible to allocate on any metaslab group. The default value of 0 means | |
93 | * no metaslab group will be excluded based on this criterion. | |
94 | */ | |
95 | int zfs_mg_noalloc_threshold = 0; | |
6d974228 | 96 | |
f3a7f661 GW |
97 | /* |
98 | * Metaslab groups are considered eligible for allocations if their | |
99 | * fragmenation metric (measured as a percentage) is less than or equal to | |
100 | * zfs_mg_fragmentation_threshold. If a metaslab group exceeds this threshold | |
101 | * then it will be skipped unless all metaslab groups within the metaslab | |
102 | * class have also crossed this threshold. | |
103 | */ | |
104 | int zfs_mg_fragmentation_threshold = 85; | |
105 | ||
106 | /* | |
107 | * Allow metaslabs to keep their active state as long as their fragmentation | |
108 | * percentage is less than or equal to zfs_metaslab_fragmentation_threshold. An | |
109 | * active metaslab that exceeds this threshold will no longer keep its active | |
110 | * status allowing better metaslabs to be selected. | |
111 | */ | |
112 | int zfs_metaslab_fragmentation_threshold = 70; | |
113 | ||
428870ff | 114 | /* |
aa7d06a9 | 115 | * When set will load all metaslabs when pool is first opened. |
428870ff | 116 | */ |
aa7d06a9 GW |
117 | int metaslab_debug_load = 0; |
118 | ||
119 | /* | |
120 | * When set will prevent metaslabs from being unloaded. | |
121 | */ | |
122 | int metaslab_debug_unload = 0; | |
428870ff | 123 | |
9babb374 BB |
124 | /* |
125 | * Minimum size which forces the dynamic allocator to change | |
428870ff | 126 | * it's allocation strategy. Once the space map cannot satisfy |
9babb374 BB |
127 | * an allocation of this size then it switches to using more |
128 | * aggressive strategy (i.e search by size rather than offset). | |
129 | */ | |
130 | uint64_t metaslab_df_alloc_threshold = SPA_MAXBLOCKSIZE; | |
131 | ||
132 | /* | |
133 | * The minimum free space, in percent, which must be available | |
134 | * in a space map to continue allocations in a first-fit fashion. | |
135 | * Once the space_map's free space drops below this level we dynamically | |
136 | * switch to using best-fit allocations. | |
137 | */ | |
428870ff BB |
138 | int metaslab_df_free_pct = 4; |
139 | ||
428870ff | 140 | /* |
93cf2076 | 141 | * Percentage of all cpus that can be used by the metaslab taskq. |
428870ff | 142 | */ |
93cf2076 | 143 | int metaslab_load_pct = 50; |
428870ff BB |
144 | |
145 | /* | |
93cf2076 GW |
146 | * Determines how many txgs a metaslab may remain loaded without having any |
147 | * allocations from it. As long as a metaslab continues to be used we will | |
148 | * keep it loaded. | |
428870ff | 149 | */ |
93cf2076 | 150 | int metaslab_unload_delay = TXG_SIZE * 2; |
9babb374 | 151 | |
93cf2076 GW |
152 | /* |
153 | * Max number of metaslabs per group to preload. | |
154 | */ | |
155 | int metaslab_preload_limit = SPA_DVAS_PER_BP; | |
156 | ||
157 | /* | |
158 | * Enable/disable preloading of metaslab. | |
159 | */ | |
f3a7f661 | 160 | int metaslab_preload_enabled = B_TRUE; |
93cf2076 GW |
161 | |
162 | /* | |
f3a7f661 | 163 | * Enable/disable fragmentation weighting on metaslabs. |
93cf2076 | 164 | */ |
f3a7f661 | 165 | int metaslab_fragmentation_factor_enabled = B_TRUE; |
93cf2076 | 166 | |
f3a7f661 GW |
167 | /* |
168 | * Enable/disable lba weighting (i.e. outer tracks are given preference). | |
169 | */ | |
170 | int metaslab_lba_weighting_enabled = B_TRUE; | |
171 | ||
172 | /* | |
173 | * Enable/disable metaslab group biasing. | |
174 | */ | |
175 | int metaslab_bias_enabled = B_TRUE; | |
176 | ||
177 | static uint64_t metaslab_fragmentation(metaslab_t *); | |
93cf2076 | 178 | |
34dc7c2f BB |
179 | /* |
180 | * ========================================================================== | |
181 | * Metaslab classes | |
182 | * ========================================================================== | |
183 | */ | |
184 | metaslab_class_t * | |
93cf2076 | 185 | metaslab_class_create(spa_t *spa, metaslab_ops_t *ops) |
34dc7c2f BB |
186 | { |
187 | metaslab_class_t *mc; | |
188 | ||
79c76d5b | 189 | mc = kmem_zalloc(sizeof (metaslab_class_t), KM_SLEEP); |
34dc7c2f | 190 | |
428870ff | 191 | mc->mc_spa = spa; |
34dc7c2f | 192 | mc->mc_rotor = NULL; |
9babb374 | 193 | mc->mc_ops = ops; |
920dd524 | 194 | mutex_init(&mc->mc_fastwrite_lock, NULL, MUTEX_DEFAULT, NULL); |
34dc7c2f BB |
195 | |
196 | return (mc); | |
197 | } | |
198 | ||
199 | void | |
200 | metaslab_class_destroy(metaslab_class_t *mc) | |
201 | { | |
428870ff BB |
202 | ASSERT(mc->mc_rotor == NULL); |
203 | ASSERT(mc->mc_alloc == 0); | |
204 | ASSERT(mc->mc_deferred == 0); | |
205 | ASSERT(mc->mc_space == 0); | |
206 | ASSERT(mc->mc_dspace == 0); | |
34dc7c2f | 207 | |
920dd524 | 208 | mutex_destroy(&mc->mc_fastwrite_lock); |
34dc7c2f BB |
209 | kmem_free(mc, sizeof (metaslab_class_t)); |
210 | } | |
211 | ||
428870ff BB |
212 | int |
213 | metaslab_class_validate(metaslab_class_t *mc) | |
34dc7c2f | 214 | { |
428870ff BB |
215 | metaslab_group_t *mg; |
216 | vdev_t *vd; | |
34dc7c2f | 217 | |
428870ff BB |
218 | /* |
219 | * Must hold one of the spa_config locks. | |
220 | */ | |
221 | ASSERT(spa_config_held(mc->mc_spa, SCL_ALL, RW_READER) || | |
222 | spa_config_held(mc->mc_spa, SCL_ALL, RW_WRITER)); | |
34dc7c2f | 223 | |
428870ff BB |
224 | if ((mg = mc->mc_rotor) == NULL) |
225 | return (0); | |
226 | ||
227 | do { | |
228 | vd = mg->mg_vd; | |
229 | ASSERT(vd->vdev_mg != NULL); | |
230 | ASSERT3P(vd->vdev_top, ==, vd); | |
231 | ASSERT3P(mg->mg_class, ==, mc); | |
232 | ASSERT3P(vd->vdev_ops, !=, &vdev_hole_ops); | |
233 | } while ((mg = mg->mg_next) != mc->mc_rotor); | |
234 | ||
235 | return (0); | |
34dc7c2f BB |
236 | } |
237 | ||
238 | void | |
428870ff BB |
239 | metaslab_class_space_update(metaslab_class_t *mc, int64_t alloc_delta, |
240 | int64_t defer_delta, int64_t space_delta, int64_t dspace_delta) | |
34dc7c2f | 241 | { |
428870ff BB |
242 | atomic_add_64(&mc->mc_alloc, alloc_delta); |
243 | atomic_add_64(&mc->mc_deferred, defer_delta); | |
244 | atomic_add_64(&mc->mc_space, space_delta); | |
245 | atomic_add_64(&mc->mc_dspace, dspace_delta); | |
246 | } | |
34dc7c2f | 247 | |
428870ff BB |
248 | uint64_t |
249 | metaslab_class_get_alloc(metaslab_class_t *mc) | |
250 | { | |
251 | return (mc->mc_alloc); | |
252 | } | |
34dc7c2f | 253 | |
428870ff BB |
254 | uint64_t |
255 | metaslab_class_get_deferred(metaslab_class_t *mc) | |
256 | { | |
257 | return (mc->mc_deferred); | |
258 | } | |
34dc7c2f | 259 | |
428870ff BB |
260 | uint64_t |
261 | metaslab_class_get_space(metaslab_class_t *mc) | |
262 | { | |
263 | return (mc->mc_space); | |
264 | } | |
34dc7c2f | 265 | |
428870ff BB |
266 | uint64_t |
267 | metaslab_class_get_dspace(metaslab_class_t *mc) | |
268 | { | |
269 | return (spa_deflate(mc->mc_spa) ? mc->mc_dspace : mc->mc_space); | |
34dc7c2f BB |
270 | } |
271 | ||
f3a7f661 GW |
272 | void |
273 | metaslab_class_histogram_verify(metaslab_class_t *mc) | |
274 | { | |
275 | vdev_t *rvd = mc->mc_spa->spa_root_vdev; | |
276 | uint64_t *mc_hist; | |
277 | int i, c; | |
278 | ||
279 | if ((zfs_flags & ZFS_DEBUG_HISTOGRAM_VERIFY) == 0) | |
280 | return; | |
281 | ||
282 | mc_hist = kmem_zalloc(sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE, | |
79c76d5b | 283 | KM_SLEEP); |
f3a7f661 GW |
284 | |
285 | for (c = 0; c < rvd->vdev_children; c++) { | |
286 | vdev_t *tvd = rvd->vdev_child[c]; | |
287 | metaslab_group_t *mg = tvd->vdev_mg; | |
288 | ||
289 | /* | |
290 | * Skip any holes, uninitialized top-levels, or | |
291 | * vdevs that are not in this metalab class. | |
292 | */ | |
293 | if (tvd->vdev_ishole || tvd->vdev_ms_shift == 0 || | |
294 | mg->mg_class != mc) { | |
295 | continue; | |
296 | } | |
297 | ||
298 | for (i = 0; i < RANGE_TREE_HISTOGRAM_SIZE; i++) | |
299 | mc_hist[i] += mg->mg_histogram[i]; | |
300 | } | |
301 | ||
302 | for (i = 0; i < RANGE_TREE_HISTOGRAM_SIZE; i++) | |
303 | VERIFY3U(mc_hist[i], ==, mc->mc_histogram[i]); | |
304 | ||
305 | kmem_free(mc_hist, sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE); | |
306 | } | |
307 | ||
308 | /* | |
309 | * Calculate the metaslab class's fragmentation metric. The metric | |
310 | * is weighted based on the space contribution of each metaslab group. | |
311 | * The return value will be a number between 0 and 100 (inclusive), or | |
312 | * ZFS_FRAG_INVALID if the metric has not been set. See comment above the | |
313 | * zfs_frag_table for more information about the metric. | |
314 | */ | |
315 | uint64_t | |
316 | metaslab_class_fragmentation(metaslab_class_t *mc) | |
317 | { | |
318 | vdev_t *rvd = mc->mc_spa->spa_root_vdev; | |
319 | uint64_t fragmentation = 0; | |
320 | int c; | |
321 | ||
322 | spa_config_enter(mc->mc_spa, SCL_VDEV, FTAG, RW_READER); | |
323 | ||
324 | for (c = 0; c < rvd->vdev_children; c++) { | |
325 | vdev_t *tvd = rvd->vdev_child[c]; | |
326 | metaslab_group_t *mg = tvd->vdev_mg; | |
327 | ||
328 | /* | |
329 | * Skip any holes, uninitialized top-levels, or | |
330 | * vdevs that are not in this metalab class. | |
331 | */ | |
332 | if (tvd->vdev_ishole || tvd->vdev_ms_shift == 0 || | |
333 | mg->mg_class != mc) { | |
334 | continue; | |
335 | } | |
336 | ||
337 | /* | |
338 | * If a metaslab group does not contain a fragmentation | |
339 | * metric then just bail out. | |
340 | */ | |
341 | if (mg->mg_fragmentation == ZFS_FRAG_INVALID) { | |
342 | spa_config_exit(mc->mc_spa, SCL_VDEV, FTAG); | |
343 | return (ZFS_FRAG_INVALID); | |
344 | } | |
345 | ||
346 | /* | |
347 | * Determine how much this metaslab_group is contributing | |
348 | * to the overall pool fragmentation metric. | |
349 | */ | |
350 | fragmentation += mg->mg_fragmentation * | |
351 | metaslab_group_get_space(mg); | |
352 | } | |
353 | fragmentation /= metaslab_class_get_space(mc); | |
354 | ||
355 | ASSERT3U(fragmentation, <=, 100); | |
356 | spa_config_exit(mc->mc_spa, SCL_VDEV, FTAG); | |
357 | return (fragmentation); | |
358 | } | |
359 | ||
360 | /* | |
361 | * Calculate the amount of expandable space that is available in | |
362 | * this metaslab class. If a device is expanded then its expandable | |
363 | * space will be the amount of allocatable space that is currently not | |
364 | * part of this metaslab class. | |
365 | */ | |
366 | uint64_t | |
367 | metaslab_class_expandable_space(metaslab_class_t *mc) | |
368 | { | |
369 | vdev_t *rvd = mc->mc_spa->spa_root_vdev; | |
370 | uint64_t space = 0; | |
371 | int c; | |
372 | ||
373 | spa_config_enter(mc->mc_spa, SCL_VDEV, FTAG, RW_READER); | |
374 | for (c = 0; c < rvd->vdev_children; c++) { | |
375 | vdev_t *tvd = rvd->vdev_child[c]; | |
376 | metaslab_group_t *mg = tvd->vdev_mg; | |
377 | ||
378 | if (tvd->vdev_ishole || tvd->vdev_ms_shift == 0 || | |
379 | mg->mg_class != mc) { | |
380 | continue; | |
381 | } | |
382 | ||
383 | space += tvd->vdev_max_asize - tvd->vdev_asize; | |
384 | } | |
385 | spa_config_exit(mc->mc_spa, SCL_VDEV, FTAG); | |
386 | return (space); | |
387 | } | |
388 | ||
34dc7c2f BB |
389 | /* |
390 | * ========================================================================== | |
391 | * Metaslab groups | |
392 | * ========================================================================== | |
393 | */ | |
394 | static int | |
395 | metaslab_compare(const void *x1, const void *x2) | |
396 | { | |
397 | const metaslab_t *m1 = x1; | |
398 | const metaslab_t *m2 = x2; | |
399 | ||
400 | if (m1->ms_weight < m2->ms_weight) | |
401 | return (1); | |
402 | if (m1->ms_weight > m2->ms_weight) | |
403 | return (-1); | |
404 | ||
405 | /* | |
406 | * If the weights are identical, use the offset to force uniqueness. | |
407 | */ | |
93cf2076 | 408 | if (m1->ms_start < m2->ms_start) |
34dc7c2f | 409 | return (-1); |
93cf2076 | 410 | if (m1->ms_start > m2->ms_start) |
34dc7c2f BB |
411 | return (1); |
412 | ||
413 | ASSERT3P(m1, ==, m2); | |
414 | ||
415 | return (0); | |
416 | } | |
417 | ||
ac72fac3 GW |
418 | /* |
419 | * Update the allocatable flag and the metaslab group's capacity. | |
420 | * The allocatable flag is set to true if the capacity is below | |
421 | * the zfs_mg_noalloc_threshold. If a metaslab group transitions | |
422 | * from allocatable to non-allocatable or vice versa then the metaslab | |
423 | * group's class is updated to reflect the transition. | |
424 | */ | |
425 | static void | |
426 | metaslab_group_alloc_update(metaslab_group_t *mg) | |
427 | { | |
428 | vdev_t *vd = mg->mg_vd; | |
429 | metaslab_class_t *mc = mg->mg_class; | |
430 | vdev_stat_t *vs = &vd->vdev_stat; | |
431 | boolean_t was_allocatable; | |
432 | ||
433 | ASSERT(vd == vd->vdev_top); | |
434 | ||
435 | mutex_enter(&mg->mg_lock); | |
436 | was_allocatable = mg->mg_allocatable; | |
437 | ||
438 | mg->mg_free_capacity = ((vs->vs_space - vs->vs_alloc) * 100) / | |
439 | (vs->vs_space + 1); | |
440 | ||
f3a7f661 GW |
441 | /* |
442 | * A metaslab group is considered allocatable if it has plenty | |
443 | * of free space or is not heavily fragmented. We only take | |
444 | * fragmentation into account if the metaslab group has a valid | |
445 | * fragmentation metric (i.e. a value between 0 and 100). | |
446 | */ | |
447 | mg->mg_allocatable = (mg->mg_free_capacity > zfs_mg_noalloc_threshold && | |
448 | (mg->mg_fragmentation == ZFS_FRAG_INVALID || | |
449 | mg->mg_fragmentation <= zfs_mg_fragmentation_threshold)); | |
ac72fac3 GW |
450 | |
451 | /* | |
452 | * The mc_alloc_groups maintains a count of the number of | |
453 | * groups in this metaslab class that are still above the | |
454 | * zfs_mg_noalloc_threshold. This is used by the allocating | |
455 | * threads to determine if they should avoid allocations to | |
456 | * a given group. The allocator will avoid allocations to a group | |
457 | * if that group has reached or is below the zfs_mg_noalloc_threshold | |
458 | * and there are still other groups that are above the threshold. | |
459 | * When a group transitions from allocatable to non-allocatable or | |
460 | * vice versa we update the metaslab class to reflect that change. | |
461 | * When the mc_alloc_groups value drops to 0 that means that all | |
462 | * groups have reached the zfs_mg_noalloc_threshold making all groups | |
463 | * eligible for allocations. This effectively means that all devices | |
464 | * are balanced again. | |
465 | */ | |
466 | if (was_allocatable && !mg->mg_allocatable) | |
467 | mc->mc_alloc_groups--; | |
468 | else if (!was_allocatable && mg->mg_allocatable) | |
469 | mc->mc_alloc_groups++; | |
f3a7f661 | 470 | |
ac72fac3 GW |
471 | mutex_exit(&mg->mg_lock); |
472 | } | |
473 | ||
34dc7c2f BB |
474 | metaslab_group_t * |
475 | metaslab_group_create(metaslab_class_t *mc, vdev_t *vd) | |
476 | { | |
477 | metaslab_group_t *mg; | |
478 | ||
79c76d5b | 479 | mg = kmem_zalloc(sizeof (metaslab_group_t), KM_SLEEP); |
34dc7c2f BB |
480 | mutex_init(&mg->mg_lock, NULL, MUTEX_DEFAULT, NULL); |
481 | avl_create(&mg->mg_metaslab_tree, metaslab_compare, | |
482 | sizeof (metaslab_t), offsetof(struct metaslab, ms_group_node)); | |
34dc7c2f | 483 | mg->mg_vd = vd; |
428870ff BB |
484 | mg->mg_class = mc; |
485 | mg->mg_activation_count = 0; | |
34dc7c2f | 486 | |
3c51c5cb | 487 | mg->mg_taskq = taskq_create("metaslab_group_taskq", metaslab_load_pct, |
93cf2076 GW |
488 | minclsyspri, 10, INT_MAX, TASKQ_THREADS_CPU_PCT); |
489 | ||
34dc7c2f BB |
490 | return (mg); |
491 | } | |
492 | ||
493 | void | |
494 | metaslab_group_destroy(metaslab_group_t *mg) | |
495 | { | |
428870ff BB |
496 | ASSERT(mg->mg_prev == NULL); |
497 | ASSERT(mg->mg_next == NULL); | |
498 | /* | |
499 | * We may have gone below zero with the activation count | |
500 | * either because we never activated in the first place or | |
501 | * because we're done, and possibly removing the vdev. | |
502 | */ | |
503 | ASSERT(mg->mg_activation_count <= 0); | |
504 | ||
3c51c5cb | 505 | taskq_destroy(mg->mg_taskq); |
34dc7c2f BB |
506 | avl_destroy(&mg->mg_metaslab_tree); |
507 | mutex_destroy(&mg->mg_lock); | |
508 | kmem_free(mg, sizeof (metaslab_group_t)); | |
509 | } | |
510 | ||
428870ff BB |
511 | void |
512 | metaslab_group_activate(metaslab_group_t *mg) | |
513 | { | |
514 | metaslab_class_t *mc = mg->mg_class; | |
515 | metaslab_group_t *mgprev, *mgnext; | |
516 | ||
517 | ASSERT(spa_config_held(mc->mc_spa, SCL_ALLOC, RW_WRITER)); | |
518 | ||
519 | ASSERT(mc->mc_rotor != mg); | |
520 | ASSERT(mg->mg_prev == NULL); | |
521 | ASSERT(mg->mg_next == NULL); | |
522 | ASSERT(mg->mg_activation_count <= 0); | |
523 | ||
524 | if (++mg->mg_activation_count <= 0) | |
525 | return; | |
526 | ||
527 | mg->mg_aliquot = metaslab_aliquot * MAX(1, mg->mg_vd->vdev_children); | |
ac72fac3 | 528 | metaslab_group_alloc_update(mg); |
428870ff BB |
529 | |
530 | if ((mgprev = mc->mc_rotor) == NULL) { | |
531 | mg->mg_prev = mg; | |
532 | mg->mg_next = mg; | |
533 | } else { | |
534 | mgnext = mgprev->mg_next; | |
535 | mg->mg_prev = mgprev; | |
536 | mg->mg_next = mgnext; | |
537 | mgprev->mg_next = mg; | |
538 | mgnext->mg_prev = mg; | |
539 | } | |
540 | mc->mc_rotor = mg; | |
541 | } | |
542 | ||
543 | void | |
544 | metaslab_group_passivate(metaslab_group_t *mg) | |
545 | { | |
546 | metaslab_class_t *mc = mg->mg_class; | |
547 | metaslab_group_t *mgprev, *mgnext; | |
548 | ||
549 | ASSERT(spa_config_held(mc->mc_spa, SCL_ALLOC, RW_WRITER)); | |
550 | ||
551 | if (--mg->mg_activation_count != 0) { | |
552 | ASSERT(mc->mc_rotor != mg); | |
553 | ASSERT(mg->mg_prev == NULL); | |
554 | ASSERT(mg->mg_next == NULL); | |
555 | ASSERT(mg->mg_activation_count < 0); | |
556 | return; | |
557 | } | |
558 | ||
c5528b9b | 559 | taskq_wait_outstanding(mg->mg_taskq, 0); |
f3a7f661 | 560 | metaslab_group_alloc_update(mg); |
93cf2076 | 561 | |
428870ff BB |
562 | mgprev = mg->mg_prev; |
563 | mgnext = mg->mg_next; | |
564 | ||
565 | if (mg == mgnext) { | |
566 | mc->mc_rotor = NULL; | |
567 | } else { | |
568 | mc->mc_rotor = mgnext; | |
569 | mgprev->mg_next = mgnext; | |
570 | mgnext->mg_prev = mgprev; | |
571 | } | |
572 | ||
573 | mg->mg_prev = NULL; | |
574 | mg->mg_next = NULL; | |
575 | } | |
576 | ||
f3a7f661 GW |
577 | uint64_t |
578 | metaslab_group_get_space(metaslab_group_t *mg) | |
579 | { | |
580 | return ((1ULL << mg->mg_vd->vdev_ms_shift) * mg->mg_vd->vdev_ms_count); | |
581 | } | |
582 | ||
583 | void | |
584 | metaslab_group_histogram_verify(metaslab_group_t *mg) | |
585 | { | |
586 | uint64_t *mg_hist; | |
587 | vdev_t *vd = mg->mg_vd; | |
588 | uint64_t ashift = vd->vdev_ashift; | |
589 | int i, m; | |
590 | ||
591 | if ((zfs_flags & ZFS_DEBUG_HISTOGRAM_VERIFY) == 0) | |
592 | return; | |
593 | ||
594 | mg_hist = kmem_zalloc(sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE, | |
79c76d5b | 595 | KM_SLEEP); |
f3a7f661 GW |
596 | |
597 | ASSERT3U(RANGE_TREE_HISTOGRAM_SIZE, >=, | |
598 | SPACE_MAP_HISTOGRAM_SIZE + ashift); | |
599 | ||
600 | for (m = 0; m < vd->vdev_ms_count; m++) { | |
601 | metaslab_t *msp = vd->vdev_ms[m]; | |
602 | ||
603 | if (msp->ms_sm == NULL) | |
604 | continue; | |
605 | ||
606 | for (i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) | |
607 | mg_hist[i + ashift] += | |
608 | msp->ms_sm->sm_phys->smp_histogram[i]; | |
609 | } | |
610 | ||
611 | for (i = 0; i < RANGE_TREE_HISTOGRAM_SIZE; i ++) | |
612 | VERIFY3U(mg_hist[i], ==, mg->mg_histogram[i]); | |
613 | ||
614 | kmem_free(mg_hist, sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE); | |
615 | } | |
616 | ||
34dc7c2f | 617 | static void |
f3a7f661 | 618 | metaslab_group_histogram_add(metaslab_group_t *mg, metaslab_t *msp) |
34dc7c2f | 619 | { |
f3a7f661 GW |
620 | metaslab_class_t *mc = mg->mg_class; |
621 | uint64_t ashift = mg->mg_vd->vdev_ashift; | |
622 | int i; | |
623 | ||
624 | ASSERT(MUTEX_HELD(&msp->ms_lock)); | |
625 | if (msp->ms_sm == NULL) | |
626 | return; | |
627 | ||
34dc7c2f | 628 | mutex_enter(&mg->mg_lock); |
f3a7f661 GW |
629 | for (i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) { |
630 | mg->mg_histogram[i + ashift] += | |
631 | msp->ms_sm->sm_phys->smp_histogram[i]; | |
632 | mc->mc_histogram[i + ashift] += | |
633 | msp->ms_sm->sm_phys->smp_histogram[i]; | |
634 | } | |
635 | mutex_exit(&mg->mg_lock); | |
636 | } | |
637 | ||
638 | void | |
639 | metaslab_group_histogram_remove(metaslab_group_t *mg, metaslab_t *msp) | |
640 | { | |
641 | metaslab_class_t *mc = mg->mg_class; | |
642 | uint64_t ashift = mg->mg_vd->vdev_ashift; | |
643 | int i; | |
644 | ||
645 | ASSERT(MUTEX_HELD(&msp->ms_lock)); | |
646 | if (msp->ms_sm == NULL) | |
647 | return; | |
648 | ||
649 | mutex_enter(&mg->mg_lock); | |
650 | for (i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) { | |
651 | ASSERT3U(mg->mg_histogram[i + ashift], >=, | |
652 | msp->ms_sm->sm_phys->smp_histogram[i]); | |
653 | ASSERT3U(mc->mc_histogram[i + ashift], >=, | |
654 | msp->ms_sm->sm_phys->smp_histogram[i]); | |
655 | ||
656 | mg->mg_histogram[i + ashift] -= | |
657 | msp->ms_sm->sm_phys->smp_histogram[i]; | |
658 | mc->mc_histogram[i + ashift] -= | |
659 | msp->ms_sm->sm_phys->smp_histogram[i]; | |
660 | } | |
661 | mutex_exit(&mg->mg_lock); | |
662 | } | |
663 | ||
664 | static void | |
665 | metaslab_group_add(metaslab_group_t *mg, metaslab_t *msp) | |
666 | { | |
34dc7c2f | 667 | ASSERT(msp->ms_group == NULL); |
f3a7f661 | 668 | mutex_enter(&mg->mg_lock); |
34dc7c2f BB |
669 | msp->ms_group = mg; |
670 | msp->ms_weight = 0; | |
671 | avl_add(&mg->mg_metaslab_tree, msp); | |
672 | mutex_exit(&mg->mg_lock); | |
f3a7f661 GW |
673 | |
674 | mutex_enter(&msp->ms_lock); | |
675 | metaslab_group_histogram_add(mg, msp); | |
676 | mutex_exit(&msp->ms_lock); | |
34dc7c2f BB |
677 | } |
678 | ||
679 | static void | |
680 | metaslab_group_remove(metaslab_group_t *mg, metaslab_t *msp) | |
681 | { | |
f3a7f661 GW |
682 | mutex_enter(&msp->ms_lock); |
683 | metaslab_group_histogram_remove(mg, msp); | |
684 | mutex_exit(&msp->ms_lock); | |
685 | ||
34dc7c2f BB |
686 | mutex_enter(&mg->mg_lock); |
687 | ASSERT(msp->ms_group == mg); | |
688 | avl_remove(&mg->mg_metaslab_tree, msp); | |
689 | msp->ms_group = NULL; | |
690 | mutex_exit(&mg->mg_lock); | |
691 | } | |
692 | ||
693 | static void | |
694 | metaslab_group_sort(metaslab_group_t *mg, metaslab_t *msp, uint64_t weight) | |
695 | { | |
696 | /* | |
697 | * Although in principle the weight can be any value, in | |
f3a7f661 | 698 | * practice we do not use values in the range [1, 511]. |
34dc7c2f | 699 | */ |
f3a7f661 | 700 | ASSERT(weight >= SPA_MINBLOCKSIZE || weight == 0); |
34dc7c2f BB |
701 | ASSERT(MUTEX_HELD(&msp->ms_lock)); |
702 | ||
703 | mutex_enter(&mg->mg_lock); | |
704 | ASSERT(msp->ms_group == mg); | |
705 | avl_remove(&mg->mg_metaslab_tree, msp); | |
706 | msp->ms_weight = weight; | |
707 | avl_add(&mg->mg_metaslab_tree, msp); | |
708 | mutex_exit(&mg->mg_lock); | |
709 | } | |
710 | ||
f3a7f661 GW |
711 | /* |
712 | * Calculate the fragmentation for a given metaslab group. We can use | |
713 | * a simple average here since all metaslabs within the group must have | |
714 | * the same size. The return value will be a value between 0 and 100 | |
715 | * (inclusive), or ZFS_FRAG_INVALID if less than half of the metaslab in this | |
716 | * group have a fragmentation metric. | |
717 | */ | |
718 | uint64_t | |
719 | metaslab_group_fragmentation(metaslab_group_t *mg) | |
720 | { | |
721 | vdev_t *vd = mg->mg_vd; | |
722 | uint64_t fragmentation = 0; | |
723 | uint64_t valid_ms = 0; | |
724 | int m; | |
725 | ||
726 | for (m = 0; m < vd->vdev_ms_count; m++) { | |
727 | metaslab_t *msp = vd->vdev_ms[m]; | |
728 | ||
729 | if (msp->ms_fragmentation == ZFS_FRAG_INVALID) | |
730 | continue; | |
731 | ||
732 | valid_ms++; | |
733 | fragmentation += msp->ms_fragmentation; | |
734 | } | |
735 | ||
736 | if (valid_ms <= vd->vdev_ms_count / 2) | |
737 | return (ZFS_FRAG_INVALID); | |
738 | ||
739 | fragmentation /= valid_ms; | |
740 | ASSERT3U(fragmentation, <=, 100); | |
741 | return (fragmentation); | |
742 | } | |
743 | ||
ac72fac3 GW |
744 | /* |
745 | * Determine if a given metaslab group should skip allocations. A metaslab | |
f3a7f661 GW |
746 | * group should avoid allocations if its free capacity is less than the |
747 | * zfs_mg_noalloc_threshold or its fragmentation metric is greater than | |
748 | * zfs_mg_fragmentation_threshold and there is at least one metaslab group | |
ac72fac3 GW |
749 | * that can still handle allocations. |
750 | */ | |
751 | static boolean_t | |
752 | metaslab_group_allocatable(metaslab_group_t *mg) | |
753 | { | |
754 | vdev_t *vd = mg->mg_vd; | |
755 | spa_t *spa = vd->vdev_spa; | |
756 | metaslab_class_t *mc = mg->mg_class; | |
757 | ||
758 | /* | |
f3a7f661 GW |
759 | * We use two key metrics to determine if a metaslab group is |
760 | * considered allocatable -- free space and fragmentation. If | |
761 | * the free space is greater than the free space threshold and | |
762 | * the fragmentation is less than the fragmentation threshold then | |
763 | * consider the group allocatable. There are two case when we will | |
764 | * not consider these key metrics. The first is if the group is | |
765 | * associated with a slog device and the second is if all groups | |
766 | * in this metaslab class have already been consider ineligible | |
767 | * for allocations. | |
ac72fac3 | 768 | */ |
f3a7f661 GW |
769 | return ((mg->mg_free_capacity > zfs_mg_noalloc_threshold && |
770 | (mg->mg_fragmentation == ZFS_FRAG_INVALID || | |
771 | mg->mg_fragmentation <= zfs_mg_fragmentation_threshold)) || | |
ac72fac3 GW |
772 | mc != spa_normal_class(spa) || mc->mc_alloc_groups == 0); |
773 | } | |
774 | ||
428870ff BB |
775 | /* |
776 | * ========================================================================== | |
93cf2076 | 777 | * Range tree callbacks |
428870ff BB |
778 | * ========================================================================== |
779 | */ | |
93cf2076 GW |
780 | |
781 | /* | |
782 | * Comparison function for the private size-ordered tree. Tree is sorted | |
783 | * by size, larger sizes at the end of the tree. | |
784 | */ | |
428870ff | 785 | static int |
93cf2076 | 786 | metaslab_rangesize_compare(const void *x1, const void *x2) |
428870ff | 787 | { |
93cf2076 GW |
788 | const range_seg_t *r1 = x1; |
789 | const range_seg_t *r2 = x2; | |
790 | uint64_t rs_size1 = r1->rs_end - r1->rs_start; | |
791 | uint64_t rs_size2 = r2->rs_end - r2->rs_start; | |
428870ff | 792 | |
93cf2076 | 793 | if (rs_size1 < rs_size2) |
428870ff | 794 | return (-1); |
93cf2076 | 795 | if (rs_size1 > rs_size2) |
428870ff BB |
796 | return (1); |
797 | ||
93cf2076 | 798 | if (r1->rs_start < r2->rs_start) |
428870ff | 799 | return (-1); |
93cf2076 GW |
800 | |
801 | if (r1->rs_start > r2->rs_start) | |
428870ff BB |
802 | return (1); |
803 | ||
804 | return (0); | |
805 | } | |
806 | ||
34dc7c2f | 807 | /* |
93cf2076 GW |
808 | * Create any block allocator specific components. The current allocators |
809 | * rely on using both a size-ordered range_tree_t and an array of uint64_t's. | |
34dc7c2f | 810 | */ |
93cf2076 GW |
811 | static void |
812 | metaslab_rt_create(range_tree_t *rt, void *arg) | |
34dc7c2f | 813 | { |
93cf2076 | 814 | metaslab_t *msp = arg; |
34dc7c2f | 815 | |
93cf2076 GW |
816 | ASSERT3P(rt->rt_arg, ==, msp); |
817 | ASSERT(msp->ms_tree == NULL); | |
34dc7c2f | 818 | |
93cf2076 GW |
819 | avl_create(&msp->ms_size_tree, metaslab_rangesize_compare, |
820 | sizeof (range_seg_t), offsetof(range_seg_t, rs_pp_node)); | |
9babb374 BB |
821 | } |
822 | ||
93cf2076 GW |
823 | /* |
824 | * Destroy the block allocator specific components. | |
825 | */ | |
9babb374 | 826 | static void |
93cf2076 | 827 | metaslab_rt_destroy(range_tree_t *rt, void *arg) |
9babb374 | 828 | { |
93cf2076 | 829 | metaslab_t *msp = arg; |
428870ff | 830 | |
93cf2076 GW |
831 | ASSERT3P(rt->rt_arg, ==, msp); |
832 | ASSERT3P(msp->ms_tree, ==, rt); | |
833 | ASSERT0(avl_numnodes(&msp->ms_size_tree)); | |
428870ff | 834 | |
93cf2076 | 835 | avl_destroy(&msp->ms_size_tree); |
9babb374 BB |
836 | } |
837 | ||
838 | static void | |
93cf2076 | 839 | metaslab_rt_add(range_tree_t *rt, range_seg_t *rs, void *arg) |
9babb374 | 840 | { |
93cf2076 | 841 | metaslab_t *msp = arg; |
9babb374 | 842 | |
93cf2076 GW |
843 | ASSERT3P(rt->rt_arg, ==, msp); |
844 | ASSERT3P(msp->ms_tree, ==, rt); | |
845 | VERIFY(!msp->ms_condensing); | |
846 | avl_add(&msp->ms_size_tree, rs); | |
34dc7c2f BB |
847 | } |
848 | ||
34dc7c2f | 849 | static void |
93cf2076 | 850 | metaslab_rt_remove(range_tree_t *rt, range_seg_t *rs, void *arg) |
34dc7c2f | 851 | { |
93cf2076 GW |
852 | metaslab_t *msp = arg; |
853 | ||
854 | ASSERT3P(rt->rt_arg, ==, msp); | |
855 | ASSERT3P(msp->ms_tree, ==, rt); | |
856 | VERIFY(!msp->ms_condensing); | |
857 | avl_remove(&msp->ms_size_tree, rs); | |
34dc7c2f BB |
858 | } |
859 | ||
34dc7c2f | 860 | static void |
93cf2076 | 861 | metaslab_rt_vacate(range_tree_t *rt, void *arg) |
34dc7c2f | 862 | { |
93cf2076 GW |
863 | metaslab_t *msp = arg; |
864 | ||
865 | ASSERT3P(rt->rt_arg, ==, msp); | |
866 | ASSERT3P(msp->ms_tree, ==, rt); | |
867 | ||
868 | /* | |
869 | * Normally one would walk the tree freeing nodes along the way. | |
870 | * Since the nodes are shared with the range trees we can avoid | |
871 | * walking all nodes and just reinitialize the avl tree. The nodes | |
872 | * will be freed by the range tree, so we don't want to free them here. | |
873 | */ | |
874 | avl_create(&msp->ms_size_tree, metaslab_rangesize_compare, | |
875 | sizeof (range_seg_t), offsetof(range_seg_t, rs_pp_node)); | |
34dc7c2f BB |
876 | } |
877 | ||
93cf2076 GW |
878 | static range_tree_ops_t metaslab_rt_ops = { |
879 | metaslab_rt_create, | |
880 | metaslab_rt_destroy, | |
881 | metaslab_rt_add, | |
882 | metaslab_rt_remove, | |
883 | metaslab_rt_vacate | |
884 | }; | |
885 | ||
886 | /* | |
887 | * ========================================================================== | |
888 | * Metaslab block operations | |
889 | * ========================================================================== | |
890 | */ | |
891 | ||
9babb374 | 892 | /* |
428870ff | 893 | * Return the maximum contiguous segment within the metaslab. |
9babb374 | 894 | */ |
9babb374 | 895 | uint64_t |
93cf2076 | 896 | metaslab_block_maxsize(metaslab_t *msp) |
9babb374 | 897 | { |
93cf2076 GW |
898 | avl_tree_t *t = &msp->ms_size_tree; |
899 | range_seg_t *rs; | |
9babb374 | 900 | |
93cf2076 | 901 | if (t == NULL || (rs = avl_last(t)) == NULL) |
9babb374 BB |
902 | return (0ULL); |
903 | ||
93cf2076 GW |
904 | return (rs->rs_end - rs->rs_start); |
905 | } | |
906 | ||
907 | uint64_t | |
908 | metaslab_block_alloc(metaslab_t *msp, uint64_t size) | |
909 | { | |
910 | uint64_t start; | |
911 | range_tree_t *rt = msp->ms_tree; | |
912 | ||
913 | VERIFY(!msp->ms_condensing); | |
914 | ||
915 | start = msp->ms_ops->msop_alloc(msp, size); | |
916 | if (start != -1ULL) { | |
917 | vdev_t *vd = msp->ms_group->mg_vd; | |
918 | ||
919 | VERIFY0(P2PHASE(start, 1ULL << vd->vdev_ashift)); | |
920 | VERIFY0(P2PHASE(size, 1ULL << vd->vdev_ashift)); | |
921 | VERIFY3U(range_tree_space(rt) - size, <=, msp->ms_size); | |
922 | range_tree_remove(rt, start, size); | |
923 | } | |
924 | return (start); | |
925 | } | |
926 | ||
927 | /* | |
928 | * ========================================================================== | |
929 | * Common allocator routines | |
930 | * ========================================================================== | |
931 | */ | |
932 | ||
933 | #if defined(WITH_FF_BLOCK_ALLOCATOR) || \ | |
934 | defined(WITH_DF_BLOCK_ALLOCATOR) || \ | |
935 | defined(WITH_CF_BLOCK_ALLOCATOR) | |
936 | /* | |
937 | * This is a helper function that can be used by the allocator to find | |
938 | * a suitable block to allocate. This will search the specified AVL | |
939 | * tree looking for a block that matches the specified criteria. | |
940 | */ | |
941 | static uint64_t | |
942 | metaslab_block_picker(avl_tree_t *t, uint64_t *cursor, uint64_t size, | |
943 | uint64_t align) | |
944 | { | |
945 | range_seg_t *rs, rsearch; | |
946 | avl_index_t where; | |
947 | ||
948 | rsearch.rs_start = *cursor; | |
949 | rsearch.rs_end = *cursor + size; | |
950 | ||
951 | rs = avl_find(t, &rsearch, &where); | |
952 | if (rs == NULL) | |
953 | rs = avl_nearest(t, where, AVL_AFTER); | |
954 | ||
955 | while (rs != NULL) { | |
956 | uint64_t offset = P2ROUNDUP(rs->rs_start, align); | |
957 | ||
958 | if (offset + size <= rs->rs_end) { | |
959 | *cursor = offset + size; | |
960 | return (offset); | |
961 | } | |
962 | rs = AVL_NEXT(t, rs); | |
963 | } | |
964 | ||
965 | /* | |
966 | * If we know we've searched the whole map (*cursor == 0), give up. | |
967 | * Otherwise, reset the cursor to the beginning and try again. | |
968 | */ | |
969 | if (*cursor == 0) | |
970 | return (-1ULL); | |
971 | ||
972 | *cursor = 0; | |
973 | return (metaslab_block_picker(t, cursor, size, align)); | |
9babb374 | 974 | } |
93cf2076 | 975 | #endif /* WITH_FF/DF/CF_BLOCK_ALLOCATOR */ |
9babb374 | 976 | |
22c81dd8 | 977 | #if defined(WITH_FF_BLOCK_ALLOCATOR) |
428870ff BB |
978 | /* |
979 | * ========================================================================== | |
980 | * The first-fit block allocator | |
981 | * ========================================================================== | |
982 | */ | |
983 | static uint64_t | |
93cf2076 | 984 | metaslab_ff_alloc(metaslab_t *msp, uint64_t size) |
9babb374 | 985 | { |
93cf2076 GW |
986 | /* |
987 | * Find the largest power of 2 block size that evenly divides the | |
988 | * requested size. This is used to try to allocate blocks with similar | |
989 | * alignment from the same area of the metaslab (i.e. same cursor | |
990 | * bucket) but it does not guarantee that other allocations sizes | |
991 | * may exist in the same region. | |
992 | */ | |
428870ff | 993 | uint64_t align = size & -size; |
9bd274dd | 994 | uint64_t *cursor = &msp->ms_lbas[highbit64(align) - 1]; |
93cf2076 | 995 | avl_tree_t *t = &msp->ms_tree->rt_root; |
9babb374 | 996 | |
428870ff | 997 | return (metaslab_block_picker(t, cursor, size, align)); |
9babb374 BB |
998 | } |
999 | ||
93cf2076 | 1000 | static metaslab_ops_t metaslab_ff_ops = { |
f3a7f661 | 1001 | metaslab_ff_alloc |
428870ff | 1002 | }; |
9babb374 | 1003 | |
93cf2076 | 1004 | metaslab_ops_t *zfs_metaslab_ops = &metaslab_ff_ops; |
22c81dd8 BB |
1005 | #endif /* WITH_FF_BLOCK_ALLOCATOR */ |
1006 | ||
1007 | #if defined(WITH_DF_BLOCK_ALLOCATOR) | |
428870ff BB |
1008 | /* |
1009 | * ========================================================================== | |
1010 | * Dynamic block allocator - | |
1011 | * Uses the first fit allocation scheme until space get low and then | |
1012 | * adjusts to a best fit allocation method. Uses metaslab_df_alloc_threshold | |
1013 | * and metaslab_df_free_pct to determine when to switch the allocation scheme. | |
1014 | * ========================================================================== | |
1015 | */ | |
9babb374 | 1016 | static uint64_t |
93cf2076 | 1017 | metaslab_df_alloc(metaslab_t *msp, uint64_t size) |
9babb374 | 1018 | { |
93cf2076 GW |
1019 | /* |
1020 | * Find the largest power of 2 block size that evenly divides the | |
1021 | * requested size. This is used to try to allocate blocks with similar | |
1022 | * alignment from the same area of the metaslab (i.e. same cursor | |
1023 | * bucket) but it does not guarantee that other allocations sizes | |
1024 | * may exist in the same region. | |
1025 | */ | |
9babb374 | 1026 | uint64_t align = size & -size; |
9bd274dd | 1027 | uint64_t *cursor = &msp->ms_lbas[highbit64(align) - 1]; |
93cf2076 GW |
1028 | range_tree_t *rt = msp->ms_tree; |
1029 | avl_tree_t *t = &rt->rt_root; | |
1030 | uint64_t max_size = metaslab_block_maxsize(msp); | |
1031 | int free_pct = range_tree_space(rt) * 100 / msp->ms_size; | |
9babb374 | 1032 | |
93cf2076 GW |
1033 | ASSERT(MUTEX_HELD(&msp->ms_lock)); |
1034 | ASSERT3U(avl_numnodes(t), ==, avl_numnodes(&msp->ms_size_tree)); | |
9babb374 BB |
1035 | |
1036 | if (max_size < size) | |
1037 | return (-1ULL); | |
1038 | ||
1039 | /* | |
1040 | * If we're running low on space switch to using the size | |
1041 | * sorted AVL tree (best-fit). | |
1042 | */ | |
1043 | if (max_size < metaslab_df_alloc_threshold || | |
1044 | free_pct < metaslab_df_free_pct) { | |
93cf2076 | 1045 | t = &msp->ms_size_tree; |
9babb374 BB |
1046 | *cursor = 0; |
1047 | } | |
1048 | ||
1049 | return (metaslab_block_picker(t, cursor, size, 1ULL)); | |
1050 | } | |
1051 | ||
93cf2076 | 1052 | static metaslab_ops_t metaslab_df_ops = { |
f3a7f661 | 1053 | metaslab_df_alloc |
34dc7c2f BB |
1054 | }; |
1055 | ||
93cf2076 | 1056 | metaslab_ops_t *zfs_metaslab_ops = &metaslab_df_ops; |
22c81dd8 BB |
1057 | #endif /* WITH_DF_BLOCK_ALLOCATOR */ |
1058 | ||
93cf2076 | 1059 | #if defined(WITH_CF_BLOCK_ALLOCATOR) |
428870ff BB |
1060 | /* |
1061 | * ========================================================================== | |
93cf2076 GW |
1062 | * Cursor fit block allocator - |
1063 | * Select the largest region in the metaslab, set the cursor to the beginning | |
1064 | * of the range and the cursor_end to the end of the range. As allocations | |
1065 | * are made advance the cursor. Continue allocating from the cursor until | |
1066 | * the range is exhausted and then find a new range. | |
428870ff BB |
1067 | * ========================================================================== |
1068 | */ | |
1069 | static uint64_t | |
93cf2076 | 1070 | metaslab_cf_alloc(metaslab_t *msp, uint64_t size) |
428870ff | 1071 | { |
93cf2076 GW |
1072 | range_tree_t *rt = msp->ms_tree; |
1073 | avl_tree_t *t = &msp->ms_size_tree; | |
1074 | uint64_t *cursor = &msp->ms_lbas[0]; | |
1075 | uint64_t *cursor_end = &msp->ms_lbas[1]; | |
428870ff BB |
1076 | uint64_t offset = 0; |
1077 | ||
93cf2076 GW |
1078 | ASSERT(MUTEX_HELD(&msp->ms_lock)); |
1079 | ASSERT3U(avl_numnodes(t), ==, avl_numnodes(&rt->rt_root)); | |
428870ff | 1080 | |
93cf2076 | 1081 | ASSERT3U(*cursor_end, >=, *cursor); |
428870ff | 1082 | |
93cf2076 GW |
1083 | if ((*cursor + size) > *cursor_end) { |
1084 | range_seg_t *rs; | |
428870ff | 1085 | |
93cf2076 GW |
1086 | rs = avl_last(&msp->ms_size_tree); |
1087 | if (rs == NULL || (rs->rs_end - rs->rs_start) < size) | |
1088 | return (-1ULL); | |
428870ff | 1089 | |
93cf2076 GW |
1090 | *cursor = rs->rs_start; |
1091 | *cursor_end = rs->rs_end; | |
428870ff | 1092 | } |
93cf2076 GW |
1093 | |
1094 | offset = *cursor; | |
1095 | *cursor += size; | |
1096 | ||
428870ff BB |
1097 | return (offset); |
1098 | } | |
1099 | ||
93cf2076 | 1100 | static metaslab_ops_t metaslab_cf_ops = { |
f3a7f661 | 1101 | metaslab_cf_alloc |
428870ff BB |
1102 | }; |
1103 | ||
93cf2076 GW |
1104 | metaslab_ops_t *zfs_metaslab_ops = &metaslab_cf_ops; |
1105 | #endif /* WITH_CF_BLOCK_ALLOCATOR */ | |
22c81dd8 BB |
1106 | |
1107 | #if defined(WITH_NDF_BLOCK_ALLOCATOR) | |
93cf2076 GW |
1108 | /* |
1109 | * ========================================================================== | |
1110 | * New dynamic fit allocator - | |
1111 | * Select a region that is large enough to allocate 2^metaslab_ndf_clump_shift | |
1112 | * contiguous blocks. If no region is found then just use the largest segment | |
1113 | * that remains. | |
1114 | * ========================================================================== | |
1115 | */ | |
1116 | ||
1117 | /* | |
1118 | * Determines desired number of contiguous blocks (2^metaslab_ndf_clump_shift) | |
1119 | * to request from the allocator. | |
1120 | */ | |
428870ff BB |
1121 | uint64_t metaslab_ndf_clump_shift = 4; |
1122 | ||
1123 | static uint64_t | |
93cf2076 | 1124 | metaslab_ndf_alloc(metaslab_t *msp, uint64_t size) |
428870ff | 1125 | { |
93cf2076 | 1126 | avl_tree_t *t = &msp->ms_tree->rt_root; |
428870ff | 1127 | avl_index_t where; |
93cf2076 | 1128 | range_seg_t *rs, rsearch; |
9bd274dd | 1129 | uint64_t hbit = highbit64(size); |
93cf2076 GW |
1130 | uint64_t *cursor = &msp->ms_lbas[hbit - 1]; |
1131 | uint64_t max_size = metaslab_block_maxsize(msp); | |
428870ff | 1132 | |
93cf2076 GW |
1133 | ASSERT(MUTEX_HELD(&msp->ms_lock)); |
1134 | ASSERT3U(avl_numnodes(t), ==, avl_numnodes(&msp->ms_size_tree)); | |
428870ff BB |
1135 | |
1136 | if (max_size < size) | |
1137 | return (-1ULL); | |
1138 | ||
93cf2076 GW |
1139 | rsearch.rs_start = *cursor; |
1140 | rsearch.rs_end = *cursor + size; | |
428870ff | 1141 | |
93cf2076 GW |
1142 | rs = avl_find(t, &rsearch, &where); |
1143 | if (rs == NULL || (rs->rs_end - rs->rs_start) < size) { | |
1144 | t = &msp->ms_size_tree; | |
428870ff | 1145 | |
93cf2076 GW |
1146 | rsearch.rs_start = 0; |
1147 | rsearch.rs_end = MIN(max_size, | |
428870ff | 1148 | 1ULL << (hbit + metaslab_ndf_clump_shift)); |
93cf2076 GW |
1149 | rs = avl_find(t, &rsearch, &where); |
1150 | if (rs == NULL) | |
1151 | rs = avl_nearest(t, where, AVL_AFTER); | |
1152 | ASSERT(rs != NULL); | |
428870ff BB |
1153 | } |
1154 | ||
93cf2076 GW |
1155 | if ((rs->rs_end - rs->rs_start) >= size) { |
1156 | *cursor = rs->rs_start + size; | |
1157 | return (rs->rs_start); | |
428870ff BB |
1158 | } |
1159 | return (-1ULL); | |
1160 | } | |
1161 | ||
93cf2076 | 1162 | static metaslab_ops_t metaslab_ndf_ops = { |
f3a7f661 | 1163 | metaslab_ndf_alloc |
428870ff BB |
1164 | }; |
1165 | ||
93cf2076 | 1166 | metaslab_ops_t *zfs_metaslab_ops = &metaslab_ndf_ops; |
22c81dd8 | 1167 | #endif /* WITH_NDF_BLOCK_ALLOCATOR */ |
9babb374 | 1168 | |
93cf2076 | 1169 | |
34dc7c2f BB |
1170 | /* |
1171 | * ========================================================================== | |
1172 | * Metaslabs | |
1173 | * ========================================================================== | |
1174 | */ | |
93cf2076 GW |
1175 | |
1176 | /* | |
1177 | * Wait for any in-progress metaslab loads to complete. | |
1178 | */ | |
1179 | void | |
1180 | metaslab_load_wait(metaslab_t *msp) | |
1181 | { | |
1182 | ASSERT(MUTEX_HELD(&msp->ms_lock)); | |
1183 | ||
1184 | while (msp->ms_loading) { | |
1185 | ASSERT(!msp->ms_loaded); | |
1186 | cv_wait(&msp->ms_load_cv, &msp->ms_lock); | |
1187 | } | |
1188 | } | |
1189 | ||
1190 | int | |
1191 | metaslab_load(metaslab_t *msp) | |
1192 | { | |
1193 | int error = 0; | |
1194 | int t; | |
1195 | ||
1196 | ASSERT(MUTEX_HELD(&msp->ms_lock)); | |
1197 | ASSERT(!msp->ms_loaded); | |
1198 | ASSERT(!msp->ms_loading); | |
1199 | ||
1200 | msp->ms_loading = B_TRUE; | |
1201 | ||
1202 | /* | |
1203 | * If the space map has not been allocated yet, then treat | |
1204 | * all the space in the metaslab as free and add it to the | |
1205 | * ms_tree. | |
1206 | */ | |
1207 | if (msp->ms_sm != NULL) | |
1208 | error = space_map_load(msp->ms_sm, msp->ms_tree, SM_FREE); | |
1209 | else | |
1210 | range_tree_add(msp->ms_tree, msp->ms_start, msp->ms_size); | |
1211 | ||
1212 | msp->ms_loaded = (error == 0); | |
1213 | msp->ms_loading = B_FALSE; | |
1214 | ||
1215 | if (msp->ms_loaded) { | |
1216 | for (t = 0; t < TXG_DEFER_SIZE; t++) { | |
1217 | range_tree_walk(msp->ms_defertree[t], | |
1218 | range_tree_remove, msp->ms_tree); | |
1219 | } | |
1220 | } | |
1221 | cv_broadcast(&msp->ms_load_cv); | |
1222 | return (error); | |
1223 | } | |
1224 | ||
1225 | void | |
1226 | metaslab_unload(metaslab_t *msp) | |
1227 | { | |
1228 | ASSERT(MUTEX_HELD(&msp->ms_lock)); | |
1229 | range_tree_vacate(msp->ms_tree, NULL, NULL); | |
1230 | msp->ms_loaded = B_FALSE; | |
1231 | msp->ms_weight &= ~METASLAB_ACTIVE_MASK; | |
1232 | } | |
1233 | ||
fb42a493 PS |
1234 | int |
1235 | metaslab_init(metaslab_group_t *mg, uint64_t id, uint64_t object, uint64_t txg, | |
1236 | metaslab_t **msp) | |
34dc7c2f BB |
1237 | { |
1238 | vdev_t *vd = mg->mg_vd; | |
93cf2076 | 1239 | objset_t *mos = vd->vdev_spa->spa_meta_objset; |
fb42a493 PS |
1240 | metaslab_t *ms; |
1241 | int error; | |
34dc7c2f | 1242 | |
79c76d5b | 1243 | ms = kmem_zalloc(sizeof (metaslab_t), KM_SLEEP); |
fb42a493 PS |
1244 | mutex_init(&ms->ms_lock, NULL, MUTEX_DEFAULT, NULL); |
1245 | cv_init(&ms->ms_load_cv, NULL, CV_DEFAULT, NULL); | |
1246 | ms->ms_id = id; | |
1247 | ms->ms_start = id << vd->vdev_ms_shift; | |
1248 | ms->ms_size = 1ULL << vd->vdev_ms_shift; | |
34dc7c2f | 1249 | |
93cf2076 GW |
1250 | /* |
1251 | * We only open space map objects that already exist. All others | |
afe37326 | 1252 | * will be opened when we finally allocate an object for it. |
93cf2076 | 1253 | */ |
afe37326 | 1254 | if (object != 0) { |
fb42a493 PS |
1255 | error = space_map_open(&ms->ms_sm, mos, object, ms->ms_start, |
1256 | ms->ms_size, vd->vdev_ashift, &ms->ms_lock); | |
1257 | ||
1258 | if (error != 0) { | |
1259 | kmem_free(ms, sizeof (metaslab_t)); | |
1260 | return (error); | |
1261 | } | |
1262 | ||
1263 | ASSERT(ms->ms_sm != NULL); | |
93cf2076 | 1264 | } |
34dc7c2f BB |
1265 | |
1266 | /* | |
93cf2076 GW |
1267 | * We create the main range tree here, but we don't create the |
1268 | * alloctree and freetree until metaslab_sync_done(). This serves | |
34dc7c2f BB |
1269 | * two purposes: it allows metaslab_sync_done() to detect the |
1270 | * addition of new space; and for debugging, it ensures that we'd | |
1271 | * data fault on any attempt to use this metaslab before it's ready. | |
1272 | */ | |
fb42a493 PS |
1273 | ms->ms_tree = range_tree_create(&metaslab_rt_ops, ms, &ms->ms_lock); |
1274 | metaslab_group_add(mg, ms); | |
34dc7c2f | 1275 | |
fb42a493 PS |
1276 | ms->ms_fragmentation = metaslab_fragmentation(ms); |
1277 | ms->ms_ops = mg->mg_class->mc_ops; | |
428870ff | 1278 | |
34dc7c2f BB |
1279 | /* |
1280 | * If we're opening an existing pool (txg == 0) or creating | |
1281 | * a new one (txg == TXG_INITIAL), all space is available now. | |
1282 | * If we're adding space to an existing pool, the new space | |
1283 | * does not become available until after this txg has synced. | |
1284 | */ | |
1285 | if (txg <= TXG_INITIAL) | |
fb42a493 | 1286 | metaslab_sync_done(ms, 0); |
34dc7c2f | 1287 | |
93cf2076 GW |
1288 | /* |
1289 | * If metaslab_debug_load is set and we're initializing a metaslab | |
1290 | * that has an allocated space_map object then load the its space | |
1291 | * map so that can verify frees. | |
1292 | */ | |
fb42a493 PS |
1293 | if (metaslab_debug_load && ms->ms_sm != NULL) { |
1294 | mutex_enter(&ms->ms_lock); | |
1295 | VERIFY0(metaslab_load(ms)); | |
1296 | mutex_exit(&ms->ms_lock); | |
93cf2076 GW |
1297 | } |
1298 | ||
34dc7c2f | 1299 | if (txg != 0) { |
34dc7c2f | 1300 | vdev_dirty(vd, 0, NULL, txg); |
fb42a493 | 1301 | vdev_dirty(vd, VDD_METASLAB, ms, txg); |
34dc7c2f BB |
1302 | } |
1303 | ||
fb42a493 PS |
1304 | *msp = ms; |
1305 | ||
1306 | return (0); | |
34dc7c2f BB |
1307 | } |
1308 | ||
1309 | void | |
1310 | metaslab_fini(metaslab_t *msp) | |
1311 | { | |
d6320ddb | 1312 | int t; |
34dc7c2f | 1313 | |
93cf2076 | 1314 | metaslab_group_t *mg = msp->ms_group; |
34dc7c2f BB |
1315 | |
1316 | metaslab_group_remove(mg, msp); | |
1317 | ||
1318 | mutex_enter(&msp->ms_lock); | |
1319 | ||
93cf2076 GW |
1320 | VERIFY(msp->ms_group == NULL); |
1321 | vdev_space_update(mg->mg_vd, -space_map_allocated(msp->ms_sm), | |
1322 | 0, -msp->ms_size); | |
1323 | space_map_close(msp->ms_sm); | |
1324 | ||
1325 | metaslab_unload(msp); | |
1326 | range_tree_destroy(msp->ms_tree); | |
34dc7c2f | 1327 | |
d6320ddb | 1328 | for (t = 0; t < TXG_SIZE; t++) { |
93cf2076 GW |
1329 | range_tree_destroy(msp->ms_alloctree[t]); |
1330 | range_tree_destroy(msp->ms_freetree[t]); | |
34dc7c2f BB |
1331 | } |
1332 | ||
e51be066 | 1333 | for (t = 0; t < TXG_DEFER_SIZE; t++) { |
93cf2076 | 1334 | range_tree_destroy(msp->ms_defertree[t]); |
e51be066 | 1335 | } |
428870ff | 1336 | |
c99c9001 | 1337 | ASSERT0(msp->ms_deferspace); |
428870ff | 1338 | |
34dc7c2f | 1339 | mutex_exit(&msp->ms_lock); |
93cf2076 | 1340 | cv_destroy(&msp->ms_load_cv); |
34dc7c2f BB |
1341 | mutex_destroy(&msp->ms_lock); |
1342 | ||
1343 | kmem_free(msp, sizeof (metaslab_t)); | |
1344 | } | |
1345 | ||
f3a7f661 GW |
1346 | #define FRAGMENTATION_TABLE_SIZE 17 |
1347 | ||
93cf2076 | 1348 | /* |
f3a7f661 GW |
1349 | * This table defines a segment size based fragmentation metric that will |
1350 | * allow each metaslab to derive its own fragmentation value. This is done | |
1351 | * by calculating the space in each bucket of the spacemap histogram and | |
1352 | * multiplying that by the fragmetation metric in this table. Doing | |
1353 | * this for all buckets and dividing it by the total amount of free | |
1354 | * space in this metaslab (i.e. the total free space in all buckets) gives | |
1355 | * us the fragmentation metric. This means that a high fragmentation metric | |
1356 | * equates to most of the free space being comprised of small segments. | |
1357 | * Conversely, if the metric is low, then most of the free space is in | |
1358 | * large segments. A 10% change in fragmentation equates to approximately | |
1359 | * double the number of segments. | |
93cf2076 | 1360 | * |
f3a7f661 GW |
1361 | * This table defines 0% fragmented space using 16MB segments. Testing has |
1362 | * shown that segments that are greater than or equal to 16MB do not suffer | |
1363 | * from drastic performance problems. Using this value, we derive the rest | |
1364 | * of the table. Since the fragmentation value is never stored on disk, it | |
1365 | * is possible to change these calculations in the future. | |
1366 | */ | |
1367 | int zfs_frag_table[FRAGMENTATION_TABLE_SIZE] = { | |
1368 | 100, /* 512B */ | |
1369 | 100, /* 1K */ | |
1370 | 98, /* 2K */ | |
1371 | 95, /* 4K */ | |
1372 | 90, /* 8K */ | |
1373 | 80, /* 16K */ | |
1374 | 70, /* 32K */ | |
1375 | 60, /* 64K */ | |
1376 | 50, /* 128K */ | |
1377 | 40, /* 256K */ | |
1378 | 30, /* 512K */ | |
1379 | 20, /* 1M */ | |
1380 | 15, /* 2M */ | |
1381 | 10, /* 4M */ | |
1382 | 5, /* 8M */ | |
1383 | 0 /* 16M */ | |
1384 | }; | |
1385 | ||
1386 | /* | |
1387 | * Calclate the metaslab's fragmentation metric. A return value | |
1388 | * of ZFS_FRAG_INVALID means that the metaslab has not been upgraded and does | |
1389 | * not support this metric. Otherwise, the return value should be in the | |
1390 | * range [0, 100]. | |
93cf2076 GW |
1391 | */ |
1392 | static uint64_t | |
f3a7f661 | 1393 | metaslab_fragmentation(metaslab_t *msp) |
93cf2076 | 1394 | { |
f3a7f661 GW |
1395 | spa_t *spa = msp->ms_group->mg_vd->vdev_spa; |
1396 | uint64_t fragmentation = 0; | |
1397 | uint64_t total = 0; | |
1398 | boolean_t feature_enabled = spa_feature_is_enabled(spa, | |
1399 | SPA_FEATURE_SPACEMAP_HISTOGRAM); | |
93cf2076 GW |
1400 | int i; |
1401 | ||
f3a7f661 GW |
1402 | if (!feature_enabled) |
1403 | return (ZFS_FRAG_INVALID); | |
1404 | ||
93cf2076 | 1405 | /* |
f3a7f661 GW |
1406 | * A null space map means that the entire metaslab is free |
1407 | * and thus is not fragmented. | |
93cf2076 | 1408 | */ |
f3a7f661 GW |
1409 | if (msp->ms_sm == NULL) |
1410 | return (0); | |
1411 | ||
1412 | /* | |
1413 | * If this metaslab's space_map has not been upgraded, flag it | |
1414 | * so that we upgrade next time we encounter it. | |
1415 | */ | |
1416 | if (msp->ms_sm->sm_dbuf->db_size != sizeof (space_map_phys_t)) { | |
93cf2076 GW |
1417 | vdev_t *vd = msp->ms_group->mg_vd; |
1418 | ||
8b0a0840 TC |
1419 | if (spa_writeable(vd->vdev_spa)) { |
1420 | uint64_t txg = spa_syncing_txg(spa); | |
1421 | ||
1422 | msp->ms_condense_wanted = B_TRUE; | |
1423 | vdev_dirty(vd, VDD_METASLAB, msp, txg + 1); | |
1424 | spa_dbgmsg(spa, "txg %llu, requesting force condense: " | |
1425 | "msp %p, vd %p", txg, msp, vd); | |
1426 | } | |
f3a7f661 | 1427 | return (ZFS_FRAG_INVALID); |
93cf2076 GW |
1428 | } |
1429 | ||
f3a7f661 GW |
1430 | for (i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) { |
1431 | uint64_t space = 0; | |
1432 | uint8_t shift = msp->ms_sm->sm_shift; | |
1433 | int idx = MIN(shift - SPA_MINBLOCKSHIFT + i, | |
1434 | FRAGMENTATION_TABLE_SIZE - 1); | |
93cf2076 | 1435 | |
93cf2076 GW |
1436 | if (msp->ms_sm->sm_phys->smp_histogram[i] == 0) |
1437 | continue; | |
1438 | ||
f3a7f661 GW |
1439 | space = msp->ms_sm->sm_phys->smp_histogram[i] << (i + shift); |
1440 | total += space; | |
1441 | ||
1442 | ASSERT3U(idx, <, FRAGMENTATION_TABLE_SIZE); | |
1443 | fragmentation += space * zfs_frag_table[idx]; | |
93cf2076 | 1444 | } |
f3a7f661 GW |
1445 | |
1446 | if (total > 0) | |
1447 | fragmentation /= total; | |
1448 | ASSERT3U(fragmentation, <=, 100); | |
1449 | return (fragmentation); | |
93cf2076 | 1450 | } |
34dc7c2f | 1451 | |
f3a7f661 GW |
1452 | /* |
1453 | * Compute a weight -- a selection preference value -- for the given metaslab. | |
1454 | * This is based on the amount of free space, the level of fragmentation, | |
1455 | * the LBA range, and whether the metaslab is loaded. | |
1456 | */ | |
34dc7c2f BB |
1457 | static uint64_t |
1458 | metaslab_weight(metaslab_t *msp) | |
1459 | { | |
1460 | metaslab_group_t *mg = msp->ms_group; | |
34dc7c2f BB |
1461 | vdev_t *vd = mg->mg_vd; |
1462 | uint64_t weight, space; | |
1463 | ||
1464 | ASSERT(MUTEX_HELD(&msp->ms_lock)); | |
1465 | ||
c2e42f9d GW |
1466 | /* |
1467 | * This vdev is in the process of being removed so there is nothing | |
1468 | * for us to do here. | |
1469 | */ | |
1470 | if (vd->vdev_removing) { | |
93cf2076 | 1471 | ASSERT0(space_map_allocated(msp->ms_sm)); |
c2e42f9d GW |
1472 | ASSERT0(vd->vdev_ms_shift); |
1473 | return (0); | |
1474 | } | |
1475 | ||
34dc7c2f BB |
1476 | /* |
1477 | * The baseline weight is the metaslab's free space. | |
1478 | */ | |
93cf2076 | 1479 | space = msp->ms_size - space_map_allocated(msp->ms_sm); |
f3a7f661 GW |
1480 | |
1481 | msp->ms_fragmentation = metaslab_fragmentation(msp); | |
1482 | if (metaslab_fragmentation_factor_enabled && | |
1483 | msp->ms_fragmentation != ZFS_FRAG_INVALID) { | |
1484 | /* | |
1485 | * Use the fragmentation information to inversely scale | |
1486 | * down the baseline weight. We need to ensure that we | |
1487 | * don't exclude this metaslab completely when it's 100% | |
1488 | * fragmented. To avoid this we reduce the fragmented value | |
1489 | * by 1. | |
1490 | */ | |
1491 | space = (space * (100 - (msp->ms_fragmentation - 1))) / 100; | |
1492 | ||
1493 | /* | |
1494 | * If space < SPA_MINBLOCKSIZE, then we will not allocate from | |
1495 | * this metaslab again. The fragmentation metric may have | |
1496 | * decreased the space to something smaller than | |
1497 | * SPA_MINBLOCKSIZE, so reset the space to SPA_MINBLOCKSIZE | |
1498 | * so that we can consume any remaining space. | |
1499 | */ | |
1500 | if (space > 0 && space < SPA_MINBLOCKSIZE) | |
1501 | space = SPA_MINBLOCKSIZE; | |
1502 | } | |
34dc7c2f BB |
1503 | weight = space; |
1504 | ||
1505 | /* | |
1506 | * Modern disks have uniform bit density and constant angular velocity. | |
1507 | * Therefore, the outer recording zones are faster (higher bandwidth) | |
1508 | * than the inner zones by the ratio of outer to inner track diameter, | |
1509 | * which is typically around 2:1. We account for this by assigning | |
1510 | * higher weight to lower metaslabs (multiplier ranging from 2x to 1x). | |
1511 | * In effect, this means that we'll select the metaslab with the most | |
1512 | * free bandwidth rather than simply the one with the most free space. | |
1513 | */ | |
f3a7f661 GW |
1514 | if (metaslab_lba_weighting_enabled) { |
1515 | weight = 2 * weight - (msp->ms_id * weight) / vd->vdev_ms_count; | |
1516 | ASSERT(weight >= space && weight <= 2 * space); | |
1517 | } | |
428870ff | 1518 | |
f3a7f661 GW |
1519 | /* |
1520 | * If this metaslab is one we're actively using, adjust its | |
1521 | * weight to make it preferable to any inactive metaslab so | |
1522 | * we'll polish it off. If the fragmentation on this metaslab | |
1523 | * has exceed our threshold, then don't mark it active. | |
1524 | */ | |
1525 | if (msp->ms_loaded && msp->ms_fragmentation != ZFS_FRAG_INVALID && | |
1526 | msp->ms_fragmentation <= zfs_metaslab_fragmentation_threshold) { | |
428870ff BB |
1527 | weight |= (msp->ms_weight & METASLAB_ACTIVE_MASK); |
1528 | } | |
34dc7c2f | 1529 | |
93cf2076 | 1530 | return (weight); |
34dc7c2f BB |
1531 | } |
1532 | ||
1533 | static int | |
6d974228 | 1534 | metaslab_activate(metaslab_t *msp, uint64_t activation_weight) |
34dc7c2f | 1535 | { |
34dc7c2f BB |
1536 | ASSERT(MUTEX_HELD(&msp->ms_lock)); |
1537 | ||
1538 | if ((msp->ms_weight & METASLAB_ACTIVE_MASK) == 0) { | |
93cf2076 GW |
1539 | metaslab_load_wait(msp); |
1540 | if (!msp->ms_loaded) { | |
1541 | int error = metaslab_load(msp); | |
1542 | if (error) { | |
428870ff BB |
1543 | metaslab_group_sort(msp->ms_group, msp, 0); |
1544 | return (error); | |
1545 | } | |
34dc7c2f | 1546 | } |
9babb374 | 1547 | |
34dc7c2f BB |
1548 | metaslab_group_sort(msp->ms_group, msp, |
1549 | msp->ms_weight | activation_weight); | |
1550 | } | |
93cf2076 | 1551 | ASSERT(msp->ms_loaded); |
34dc7c2f BB |
1552 | ASSERT(msp->ms_weight & METASLAB_ACTIVE_MASK); |
1553 | ||
1554 | return (0); | |
1555 | } | |
1556 | ||
1557 | static void | |
1558 | metaslab_passivate(metaslab_t *msp, uint64_t size) | |
1559 | { | |
1560 | /* | |
1561 | * If size < SPA_MINBLOCKSIZE, then we will not allocate from | |
1562 | * this metaslab again. In that case, it had better be empty, | |
1563 | * or we would be leaving space on the table. | |
1564 | */ | |
93cf2076 | 1565 | ASSERT(size >= SPA_MINBLOCKSIZE || range_tree_space(msp->ms_tree) == 0); |
34dc7c2f BB |
1566 | metaslab_group_sort(msp->ms_group, msp, MIN(msp->ms_weight, size)); |
1567 | ASSERT((msp->ms_weight & METASLAB_ACTIVE_MASK) == 0); | |
1568 | } | |
1569 | ||
93cf2076 GW |
1570 | static void |
1571 | metaslab_preload(void *arg) | |
1572 | { | |
1573 | metaslab_t *msp = arg; | |
1574 | spa_t *spa = msp->ms_group->mg_vd->vdev_spa; | |
1575 | ||
080b3100 GW |
1576 | ASSERT(!MUTEX_HELD(&msp->ms_group->mg_lock)); |
1577 | ||
93cf2076 GW |
1578 | mutex_enter(&msp->ms_lock); |
1579 | metaslab_load_wait(msp); | |
1580 | if (!msp->ms_loaded) | |
1581 | (void) metaslab_load(msp); | |
1582 | ||
1583 | /* | |
1584 | * Set the ms_access_txg value so that we don't unload it right away. | |
1585 | */ | |
1586 | msp->ms_access_txg = spa_syncing_txg(spa) + metaslab_unload_delay + 1; | |
1587 | mutex_exit(&msp->ms_lock); | |
1588 | } | |
1589 | ||
1590 | static void | |
1591 | metaslab_group_preload(metaslab_group_t *mg) | |
1592 | { | |
1593 | spa_t *spa = mg->mg_vd->vdev_spa; | |
1594 | metaslab_t *msp; | |
1595 | avl_tree_t *t = &mg->mg_metaslab_tree; | |
1596 | int m = 0; | |
1597 | ||
1598 | if (spa_shutting_down(spa) || !metaslab_preload_enabled) { | |
c5528b9b | 1599 | taskq_wait_outstanding(mg->mg_taskq, 0); |
93cf2076 GW |
1600 | return; |
1601 | } | |
93cf2076 | 1602 | |
080b3100 | 1603 | mutex_enter(&mg->mg_lock); |
93cf2076 | 1604 | /* |
080b3100 | 1605 | * Load the next potential metaslabs |
93cf2076 | 1606 | */ |
080b3100 GW |
1607 | msp = avl_first(t); |
1608 | while (msp != NULL) { | |
1609 | metaslab_t *msp_next = AVL_NEXT(t, msp); | |
93cf2076 | 1610 | |
f3a7f661 GW |
1611 | /* |
1612 | * We preload only the maximum number of metaslabs specified | |
1613 | * by metaslab_preload_limit. If a metaslab is being forced | |
1614 | * to condense then we preload it too. This will ensure | |
1615 | * that force condensing happens in the next txg. | |
1616 | */ | |
1617 | if (++m > metaslab_preload_limit && !msp->ms_condense_wanted) { | |
1618 | msp = msp_next; | |
1619 | continue; | |
1620 | } | |
93cf2076 | 1621 | |
080b3100 GW |
1622 | /* |
1623 | * We must drop the metaslab group lock here to preserve | |
1624 | * lock ordering with the ms_lock (when grabbing both | |
1625 | * the mg_lock and the ms_lock, the ms_lock must be taken | |
1626 | * first). As a result, it is possible that the ordering | |
1627 | * of the metaslabs within the avl tree may change before | |
1628 | * we reacquire the lock. The metaslab cannot be removed from | |
1629 | * the tree while we're in syncing context so it is safe to | |
1630 | * drop the mg_lock here. If the metaslabs are reordered | |
1631 | * nothing will break -- we just may end up loading a | |
1632 | * less than optimal one. | |
1633 | */ | |
1634 | mutex_exit(&mg->mg_lock); | |
93cf2076 | 1635 | VERIFY(taskq_dispatch(mg->mg_taskq, metaslab_preload, |
79c76d5b | 1636 | msp, TQ_SLEEP) != 0); |
080b3100 GW |
1637 | mutex_enter(&mg->mg_lock); |
1638 | msp = msp_next; | |
93cf2076 GW |
1639 | } |
1640 | mutex_exit(&mg->mg_lock); | |
1641 | } | |
1642 | ||
e51be066 | 1643 | /* |
93cf2076 GW |
1644 | * Determine if the space map's on-disk footprint is past our tolerance |
1645 | * for inefficiency. We would like to use the following criteria to make | |
1646 | * our decision: | |
e51be066 GW |
1647 | * |
1648 | * 1. The size of the space map object should not dramatically increase as a | |
93cf2076 | 1649 | * result of writing out the free space range tree. |
e51be066 GW |
1650 | * |
1651 | * 2. The minimal on-disk space map representation is zfs_condense_pct/100 | |
93cf2076 GW |
1652 | * times the size than the free space range tree representation |
1653 | * (i.e. zfs_condense_pct = 110 and in-core = 1MB, minimal = 1.1.MB). | |
e51be066 | 1654 | * |
b02fe35d AR |
1655 | * 3. The on-disk size of the space map should actually decrease. |
1656 | * | |
e51be066 GW |
1657 | * Checking the first condition is tricky since we don't want to walk |
1658 | * the entire AVL tree calculating the estimated on-disk size. Instead we | |
93cf2076 GW |
1659 | * use the size-ordered range tree in the metaslab and calculate the |
1660 | * size required to write out the largest segment in our free tree. If the | |
e51be066 GW |
1661 | * size required to represent that segment on disk is larger than the space |
1662 | * map object then we avoid condensing this map. | |
1663 | * | |
1664 | * To determine the second criterion we use a best-case estimate and assume | |
1665 | * each segment can be represented on-disk as a single 64-bit entry. We refer | |
1666 | * to this best-case estimate as the space map's minimal form. | |
b02fe35d AR |
1667 | * |
1668 | * Unfortunately, we cannot compute the on-disk size of the space map in this | |
1669 | * context because we cannot accurately compute the effects of compression, etc. | |
1670 | * Instead, we apply the heuristic described in the block comment for | |
1671 | * zfs_metaslab_condense_block_threshold - we only condense if the space used | |
1672 | * is greater than a threshold number of blocks. | |
e51be066 GW |
1673 | */ |
1674 | static boolean_t | |
1675 | metaslab_should_condense(metaslab_t *msp) | |
1676 | { | |
93cf2076 GW |
1677 | space_map_t *sm = msp->ms_sm; |
1678 | range_seg_t *rs; | |
b02fe35d AR |
1679 | uint64_t size, entries, segsz, object_size, optimal_size, record_size; |
1680 | dmu_object_info_t doi; | |
1681 | uint64_t vdev_blocksize = 1 << msp->ms_group->mg_vd->vdev_ashift; | |
e51be066 GW |
1682 | |
1683 | ASSERT(MUTEX_HELD(&msp->ms_lock)); | |
93cf2076 | 1684 | ASSERT(msp->ms_loaded); |
e51be066 GW |
1685 | |
1686 | /* | |
93cf2076 | 1687 | * Use the ms_size_tree range tree, which is ordered by size, to |
f3a7f661 GW |
1688 | * obtain the largest segment in the free tree. We always condense |
1689 | * metaslabs that are empty and metaslabs for which a condense | |
1690 | * request has been made. | |
e51be066 | 1691 | */ |
93cf2076 | 1692 | rs = avl_last(&msp->ms_size_tree); |
f3a7f661 | 1693 | if (rs == NULL || msp->ms_condense_wanted) |
e51be066 GW |
1694 | return (B_TRUE); |
1695 | ||
1696 | /* | |
1697 | * Calculate the number of 64-bit entries this segment would | |
1698 | * require when written to disk. If this single segment would be | |
1699 | * larger on-disk than the entire current on-disk structure, then | |
1700 | * clearly condensing will increase the on-disk structure size. | |
1701 | */ | |
93cf2076 | 1702 | size = (rs->rs_end - rs->rs_start) >> sm->sm_shift; |
e51be066 GW |
1703 | entries = size / (MIN(size, SM_RUN_MAX)); |
1704 | segsz = entries * sizeof (uint64_t); | |
1705 | ||
b02fe35d AR |
1706 | optimal_size = sizeof (uint64_t) * avl_numnodes(&msp->ms_tree->rt_root); |
1707 | object_size = space_map_length(msp->ms_sm); | |
1708 | ||
1709 | dmu_object_info_from_db(sm->sm_dbuf, &doi); | |
1710 | record_size = MAX(doi.doi_data_block_size, vdev_blocksize); | |
1711 | ||
1712 | return (segsz <= object_size && | |
1713 | object_size >= (optimal_size * zfs_condense_pct / 100) && | |
1714 | object_size > zfs_metaslab_condense_block_threshold * record_size); | |
e51be066 GW |
1715 | } |
1716 | ||
1717 | /* | |
1718 | * Condense the on-disk space map representation to its minimized form. | |
1719 | * The minimized form consists of a small number of allocations followed by | |
93cf2076 | 1720 | * the entries of the free range tree. |
e51be066 GW |
1721 | */ |
1722 | static void | |
1723 | metaslab_condense(metaslab_t *msp, uint64_t txg, dmu_tx_t *tx) | |
1724 | { | |
1725 | spa_t *spa = msp->ms_group->mg_vd->vdev_spa; | |
93cf2076 GW |
1726 | range_tree_t *freetree = msp->ms_freetree[txg & TXG_MASK]; |
1727 | range_tree_t *condense_tree; | |
1728 | space_map_t *sm = msp->ms_sm; | |
e51be066 GW |
1729 | int t; |
1730 | ||
1731 | ASSERT(MUTEX_HELD(&msp->ms_lock)); | |
1732 | ASSERT3U(spa_sync_pass(spa), ==, 1); | |
93cf2076 | 1733 | ASSERT(msp->ms_loaded); |
e51be066 | 1734 | |
f3a7f661 | 1735 | |
e51be066 | 1736 | spa_dbgmsg(spa, "condensing: txg %llu, msp[%llu] %p, " |
f3a7f661 GW |
1737 | "smp size %llu, segments %lu, forcing condense=%s", txg, |
1738 | msp->ms_id, msp, space_map_length(msp->ms_sm), | |
1739 | avl_numnodes(&msp->ms_tree->rt_root), | |
1740 | msp->ms_condense_wanted ? "TRUE" : "FALSE"); | |
1741 | ||
1742 | msp->ms_condense_wanted = B_FALSE; | |
e51be066 GW |
1743 | |
1744 | /* | |
93cf2076 | 1745 | * Create an range tree that is 100% allocated. We remove segments |
e51be066 GW |
1746 | * that have been freed in this txg, any deferred frees that exist, |
1747 | * and any allocation in the future. Removing segments should be | |
93cf2076 GW |
1748 | * a relatively inexpensive operation since we expect these trees to |
1749 | * have a small number of nodes. | |
e51be066 | 1750 | */ |
93cf2076 GW |
1751 | condense_tree = range_tree_create(NULL, NULL, &msp->ms_lock); |
1752 | range_tree_add(condense_tree, msp->ms_start, msp->ms_size); | |
e51be066 GW |
1753 | |
1754 | /* | |
93cf2076 | 1755 | * Remove what's been freed in this txg from the condense_tree. |
e51be066 | 1756 | * Since we're in sync_pass 1, we know that all the frees from |
93cf2076 | 1757 | * this txg are in the freetree. |
e51be066 | 1758 | */ |
93cf2076 | 1759 | range_tree_walk(freetree, range_tree_remove, condense_tree); |
e51be066 | 1760 | |
93cf2076 GW |
1761 | for (t = 0; t < TXG_DEFER_SIZE; t++) { |
1762 | range_tree_walk(msp->ms_defertree[t], | |
1763 | range_tree_remove, condense_tree); | |
1764 | } | |
e51be066 | 1765 | |
93cf2076 GW |
1766 | for (t = 1; t < TXG_CONCURRENT_STATES; t++) { |
1767 | range_tree_walk(msp->ms_alloctree[(txg + t) & TXG_MASK], | |
1768 | range_tree_remove, condense_tree); | |
1769 | } | |
e51be066 GW |
1770 | |
1771 | /* | |
1772 | * We're about to drop the metaslab's lock thus allowing | |
1773 | * other consumers to change it's content. Set the | |
93cf2076 | 1774 | * metaslab's ms_condensing flag to ensure that |
e51be066 GW |
1775 | * allocations on this metaslab do not occur while we're |
1776 | * in the middle of committing it to disk. This is only critical | |
93cf2076 | 1777 | * for the ms_tree as all other range trees use per txg |
e51be066 GW |
1778 | * views of their content. |
1779 | */ | |
93cf2076 | 1780 | msp->ms_condensing = B_TRUE; |
e51be066 GW |
1781 | |
1782 | mutex_exit(&msp->ms_lock); | |
93cf2076 | 1783 | space_map_truncate(sm, tx); |
e51be066 GW |
1784 | mutex_enter(&msp->ms_lock); |
1785 | ||
1786 | /* | |
1787 | * While we would ideally like to create a space_map representation | |
1788 | * that consists only of allocation records, doing so can be | |
93cf2076 | 1789 | * prohibitively expensive because the in-core free tree can be |
e51be066 | 1790 | * large, and therefore computationally expensive to subtract |
93cf2076 GW |
1791 | * from the condense_tree. Instead we sync out two trees, a cheap |
1792 | * allocation only tree followed by the in-core free tree. While not | |
e51be066 GW |
1793 | * optimal, this is typically close to optimal, and much cheaper to |
1794 | * compute. | |
1795 | */ | |
93cf2076 GW |
1796 | space_map_write(sm, condense_tree, SM_ALLOC, tx); |
1797 | range_tree_vacate(condense_tree, NULL, NULL); | |
1798 | range_tree_destroy(condense_tree); | |
e51be066 | 1799 | |
93cf2076 GW |
1800 | space_map_write(sm, msp->ms_tree, SM_FREE, tx); |
1801 | msp->ms_condensing = B_FALSE; | |
e51be066 GW |
1802 | } |
1803 | ||
34dc7c2f BB |
1804 | /* |
1805 | * Write a metaslab to disk in the context of the specified transaction group. | |
1806 | */ | |
1807 | void | |
1808 | metaslab_sync(metaslab_t *msp, uint64_t txg) | |
1809 | { | |
93cf2076 GW |
1810 | metaslab_group_t *mg = msp->ms_group; |
1811 | vdev_t *vd = mg->mg_vd; | |
34dc7c2f | 1812 | spa_t *spa = vd->vdev_spa; |
428870ff | 1813 | objset_t *mos = spa_meta_objset(spa); |
93cf2076 GW |
1814 | range_tree_t *alloctree = msp->ms_alloctree[txg & TXG_MASK]; |
1815 | range_tree_t **freetree = &msp->ms_freetree[txg & TXG_MASK]; | |
1816 | range_tree_t **freed_tree = | |
1817 | &msp->ms_freetree[TXG_CLEAN(txg) & TXG_MASK]; | |
34dc7c2f | 1818 | dmu_tx_t *tx; |
93cf2076 | 1819 | uint64_t object = space_map_object(msp->ms_sm); |
34dc7c2f | 1820 | |
428870ff BB |
1821 | ASSERT(!vd->vdev_ishole); |
1822 | ||
e51be066 GW |
1823 | /* |
1824 | * This metaslab has just been added so there's no work to do now. | |
1825 | */ | |
93cf2076 GW |
1826 | if (*freetree == NULL) { |
1827 | ASSERT3P(alloctree, ==, NULL); | |
e51be066 GW |
1828 | return; |
1829 | } | |
1830 | ||
93cf2076 GW |
1831 | ASSERT3P(alloctree, !=, NULL); |
1832 | ASSERT3P(*freetree, !=, NULL); | |
1833 | ASSERT3P(*freed_tree, !=, NULL); | |
e51be066 | 1834 | |
f3a7f661 GW |
1835 | /* |
1836 | * Normally, we don't want to process a metaslab if there | |
1837 | * are no allocations or frees to perform. However, if the metaslab | |
1838 | * is being forced to condense we need to let it through. | |
1839 | */ | |
93cf2076 | 1840 | if (range_tree_space(alloctree) == 0 && |
f3a7f661 GW |
1841 | range_tree_space(*freetree) == 0 && |
1842 | !msp->ms_condense_wanted) | |
428870ff | 1843 | return; |
34dc7c2f BB |
1844 | |
1845 | /* | |
1846 | * The only state that can actually be changing concurrently with | |
93cf2076 GW |
1847 | * metaslab_sync() is the metaslab's ms_tree. No other thread can |
1848 | * be modifying this txg's alloctree, freetree, freed_tree, or | |
1849 | * space_map_phys_t. Therefore, we only hold ms_lock to satify | |
1850 | * space_map ASSERTs. We drop it whenever we call into the DMU, | |
1851 | * because the DMU can call down to us (e.g. via zio_free()) at | |
1852 | * any time. | |
34dc7c2f | 1853 | */ |
428870ff BB |
1854 | |
1855 | tx = dmu_tx_create_assigned(spa_get_dsl(spa), txg); | |
34dc7c2f | 1856 | |
93cf2076 GW |
1857 | if (msp->ms_sm == NULL) { |
1858 | uint64_t new_object; | |
1859 | ||
1860 | new_object = space_map_alloc(mos, tx); | |
1861 | VERIFY3U(new_object, !=, 0); | |
1862 | ||
1863 | VERIFY0(space_map_open(&msp->ms_sm, mos, new_object, | |
1864 | msp->ms_start, msp->ms_size, vd->vdev_ashift, | |
1865 | &msp->ms_lock)); | |
1866 | ASSERT(msp->ms_sm != NULL); | |
34dc7c2f BB |
1867 | } |
1868 | ||
428870ff BB |
1869 | mutex_enter(&msp->ms_lock); |
1870 | ||
96358617 MA |
1871 | /* |
1872 | * Note: metaslab_condense() clears the space_map's histogram. | |
1873 | * Therefore we muse verify and remove this histogram before | |
1874 | * condensing. | |
1875 | */ | |
1876 | metaslab_group_histogram_verify(mg); | |
1877 | metaslab_class_histogram_verify(mg->mg_class); | |
1878 | metaslab_group_histogram_remove(mg, msp); | |
1879 | ||
93cf2076 | 1880 | if (msp->ms_loaded && spa_sync_pass(spa) == 1 && |
e51be066 GW |
1881 | metaslab_should_condense(msp)) { |
1882 | metaslab_condense(msp, txg, tx); | |
1883 | } else { | |
93cf2076 GW |
1884 | space_map_write(msp->ms_sm, alloctree, SM_ALLOC, tx); |
1885 | space_map_write(msp->ms_sm, *freetree, SM_FREE, tx); | |
e51be066 | 1886 | } |
428870ff | 1887 | |
93cf2076 GW |
1888 | if (msp->ms_loaded) { |
1889 | /* | |
1890 | * When the space map is loaded, we have an accruate | |
1891 | * histogram in the range tree. This gives us an opportunity | |
1892 | * to bring the space map's histogram up-to-date so we clear | |
1893 | * it first before updating it. | |
1894 | */ | |
1895 | space_map_histogram_clear(msp->ms_sm); | |
1896 | space_map_histogram_add(msp->ms_sm, msp->ms_tree, tx); | |
1897 | } else { | |
1898 | /* | |
1899 | * Since the space map is not loaded we simply update the | |
1900 | * exisiting histogram with what was freed in this txg. This | |
1901 | * means that the on-disk histogram may not have an accurate | |
1902 | * view of the free space but it's close enough to allow | |
1903 | * us to make allocation decisions. | |
1904 | */ | |
1905 | space_map_histogram_add(msp->ms_sm, *freetree, tx); | |
1906 | } | |
f3a7f661 GW |
1907 | metaslab_group_histogram_add(mg, msp); |
1908 | metaslab_group_histogram_verify(mg); | |
1909 | metaslab_class_histogram_verify(mg->mg_class); | |
34dc7c2f | 1910 | |
e51be066 | 1911 | /* |
93cf2076 GW |
1912 | * For sync pass 1, we avoid traversing this txg's free range tree |
1913 | * and instead will just swap the pointers for freetree and | |
1914 | * freed_tree. We can safely do this since the freed_tree is | |
e51be066 GW |
1915 | * guaranteed to be empty on the initial pass. |
1916 | */ | |
1917 | if (spa_sync_pass(spa) == 1) { | |
93cf2076 | 1918 | range_tree_swap(freetree, freed_tree); |
e51be066 | 1919 | } else { |
93cf2076 | 1920 | range_tree_vacate(*freetree, range_tree_add, *freed_tree); |
34dc7c2f | 1921 | } |
f3a7f661 | 1922 | range_tree_vacate(alloctree, NULL, NULL); |
34dc7c2f | 1923 | |
93cf2076 GW |
1924 | ASSERT0(range_tree_space(msp->ms_alloctree[txg & TXG_MASK])); |
1925 | ASSERT0(range_tree_space(msp->ms_freetree[txg & TXG_MASK])); | |
34dc7c2f BB |
1926 | |
1927 | mutex_exit(&msp->ms_lock); | |
1928 | ||
93cf2076 GW |
1929 | if (object != space_map_object(msp->ms_sm)) { |
1930 | object = space_map_object(msp->ms_sm); | |
1931 | dmu_write(mos, vd->vdev_ms_array, sizeof (uint64_t) * | |
1932 | msp->ms_id, sizeof (uint64_t), &object, tx); | |
1933 | } | |
34dc7c2f BB |
1934 | dmu_tx_commit(tx); |
1935 | } | |
1936 | ||
1937 | /* | |
1938 | * Called after a transaction group has completely synced to mark | |
1939 | * all of the metaslab's free space as usable. | |
1940 | */ | |
1941 | void | |
1942 | metaslab_sync_done(metaslab_t *msp, uint64_t txg) | |
1943 | { | |
34dc7c2f BB |
1944 | metaslab_group_t *mg = msp->ms_group; |
1945 | vdev_t *vd = mg->mg_vd; | |
93cf2076 GW |
1946 | range_tree_t **freed_tree; |
1947 | range_tree_t **defer_tree; | |
428870ff | 1948 | int64_t alloc_delta, defer_delta; |
d6320ddb | 1949 | int t; |
428870ff BB |
1950 | |
1951 | ASSERT(!vd->vdev_ishole); | |
34dc7c2f BB |
1952 | |
1953 | mutex_enter(&msp->ms_lock); | |
1954 | ||
1955 | /* | |
1956 | * If this metaslab is just becoming available, initialize its | |
93cf2076 GW |
1957 | * alloctrees, freetrees, and defertree and add its capacity to |
1958 | * the vdev. | |
34dc7c2f | 1959 | */ |
93cf2076 | 1960 | if (msp->ms_freetree[TXG_CLEAN(txg) & TXG_MASK] == NULL) { |
d6320ddb | 1961 | for (t = 0; t < TXG_SIZE; t++) { |
93cf2076 GW |
1962 | ASSERT(msp->ms_alloctree[t] == NULL); |
1963 | ASSERT(msp->ms_freetree[t] == NULL); | |
1964 | ||
1965 | msp->ms_alloctree[t] = range_tree_create(NULL, msp, | |
1966 | &msp->ms_lock); | |
1967 | msp->ms_freetree[t] = range_tree_create(NULL, msp, | |
1968 | &msp->ms_lock); | |
34dc7c2f | 1969 | } |
428870ff | 1970 | |
e51be066 | 1971 | for (t = 0; t < TXG_DEFER_SIZE; t++) { |
93cf2076 | 1972 | ASSERT(msp->ms_defertree[t] == NULL); |
e51be066 | 1973 | |
93cf2076 GW |
1974 | msp->ms_defertree[t] = range_tree_create(NULL, msp, |
1975 | &msp->ms_lock); | |
1976 | } | |
428870ff | 1977 | |
93cf2076 | 1978 | vdev_space_update(vd, 0, 0, msp->ms_size); |
34dc7c2f BB |
1979 | } |
1980 | ||
93cf2076 GW |
1981 | freed_tree = &msp->ms_freetree[TXG_CLEAN(txg) & TXG_MASK]; |
1982 | defer_tree = &msp->ms_defertree[txg % TXG_DEFER_SIZE]; | |
1983 | ||
1984 | alloc_delta = space_map_alloc_delta(msp->ms_sm); | |
1985 | defer_delta = range_tree_space(*freed_tree) - | |
1986 | range_tree_space(*defer_tree); | |
428870ff BB |
1987 | |
1988 | vdev_space_update(vd, alloc_delta + defer_delta, defer_delta, 0); | |
34dc7c2f | 1989 | |
93cf2076 GW |
1990 | ASSERT0(range_tree_space(msp->ms_alloctree[txg & TXG_MASK])); |
1991 | ASSERT0(range_tree_space(msp->ms_freetree[txg & TXG_MASK])); | |
34dc7c2f BB |
1992 | |
1993 | /* | |
93cf2076 | 1994 | * If there's a metaslab_load() in progress, wait for it to complete |
34dc7c2f | 1995 | * so that we have a consistent view of the in-core space map. |
34dc7c2f | 1996 | */ |
93cf2076 | 1997 | metaslab_load_wait(msp); |
c2e42f9d GW |
1998 | |
1999 | /* | |
93cf2076 GW |
2000 | * Move the frees from the defer_tree back to the free |
2001 | * range tree (if it's loaded). Swap the freed_tree and the | |
2002 | * defer_tree -- this is safe to do because we've just emptied out | |
2003 | * the defer_tree. | |
c2e42f9d | 2004 | */ |
93cf2076 GW |
2005 | range_tree_vacate(*defer_tree, |
2006 | msp->ms_loaded ? range_tree_add : NULL, msp->ms_tree); | |
2007 | range_tree_swap(freed_tree, defer_tree); | |
34dc7c2f | 2008 | |
93cf2076 | 2009 | space_map_update(msp->ms_sm); |
34dc7c2f | 2010 | |
428870ff BB |
2011 | msp->ms_deferspace += defer_delta; |
2012 | ASSERT3S(msp->ms_deferspace, >=, 0); | |
93cf2076 | 2013 | ASSERT3S(msp->ms_deferspace, <=, msp->ms_size); |
428870ff BB |
2014 | if (msp->ms_deferspace != 0) { |
2015 | /* | |
2016 | * Keep syncing this metaslab until all deferred frees | |
2017 | * are back in circulation. | |
2018 | */ | |
2019 | vdev_dirty(vd, VDD_METASLAB, msp, txg + 1); | |
2020 | } | |
2021 | ||
93cf2076 GW |
2022 | if (msp->ms_loaded && msp->ms_access_txg < txg) { |
2023 | for (t = 1; t < TXG_CONCURRENT_STATES; t++) { | |
2024 | VERIFY0(range_tree_space( | |
2025 | msp->ms_alloctree[(txg + t) & TXG_MASK])); | |
2026 | } | |
34dc7c2f | 2027 | |
93cf2076 GW |
2028 | if (!metaslab_debug_unload) |
2029 | metaslab_unload(msp); | |
34dc7c2f BB |
2030 | } |
2031 | ||
2032 | metaslab_group_sort(mg, msp, metaslab_weight(msp)); | |
34dc7c2f BB |
2033 | mutex_exit(&msp->ms_lock); |
2034 | } | |
2035 | ||
428870ff BB |
2036 | void |
2037 | metaslab_sync_reassess(metaslab_group_t *mg) | |
2038 | { | |
1be627f5 | 2039 | metaslab_group_alloc_update(mg); |
f3a7f661 | 2040 | mg->mg_fragmentation = metaslab_group_fragmentation(mg); |
6d974228 | 2041 | |
428870ff | 2042 | /* |
93cf2076 | 2043 | * Preload the next potential metaslabs |
428870ff | 2044 | */ |
93cf2076 | 2045 | metaslab_group_preload(mg); |
428870ff BB |
2046 | } |
2047 | ||
34dc7c2f BB |
2048 | static uint64_t |
2049 | metaslab_distance(metaslab_t *msp, dva_t *dva) | |
2050 | { | |
2051 | uint64_t ms_shift = msp->ms_group->mg_vd->vdev_ms_shift; | |
2052 | uint64_t offset = DVA_GET_OFFSET(dva) >> ms_shift; | |
93cf2076 | 2053 | uint64_t start = msp->ms_id; |
34dc7c2f BB |
2054 | |
2055 | if (msp->ms_group->mg_vd->vdev_id != DVA_GET_VDEV(dva)) | |
2056 | return (1ULL << 63); | |
2057 | ||
2058 | if (offset < start) | |
2059 | return ((start - offset) << ms_shift); | |
2060 | if (offset > start) | |
2061 | return ((offset - start) << ms_shift); | |
2062 | return (0); | |
2063 | } | |
2064 | ||
2065 | static uint64_t | |
6d974228 | 2066 | metaslab_group_alloc(metaslab_group_t *mg, uint64_t psize, uint64_t asize, |
672692c7 | 2067 | uint64_t txg, uint64_t min_distance, dva_t *dva, int d) |
34dc7c2f | 2068 | { |
6d974228 | 2069 | spa_t *spa = mg->mg_vd->vdev_spa; |
34dc7c2f BB |
2070 | metaslab_t *msp = NULL; |
2071 | uint64_t offset = -1ULL; | |
2072 | avl_tree_t *t = &mg->mg_metaslab_tree; | |
2073 | uint64_t activation_weight; | |
2074 | uint64_t target_distance; | |
2075 | int i; | |
2076 | ||
2077 | activation_weight = METASLAB_WEIGHT_PRIMARY; | |
9babb374 BB |
2078 | for (i = 0; i < d; i++) { |
2079 | if (DVA_GET_VDEV(&dva[i]) == mg->mg_vd->vdev_id) { | |
34dc7c2f | 2080 | activation_weight = METASLAB_WEIGHT_SECONDARY; |
9babb374 BB |
2081 | break; |
2082 | } | |
2083 | } | |
34dc7c2f BB |
2084 | |
2085 | for (;;) { | |
9babb374 BB |
2086 | boolean_t was_active; |
2087 | ||
34dc7c2f BB |
2088 | mutex_enter(&mg->mg_lock); |
2089 | for (msp = avl_first(t); msp; msp = AVL_NEXT(t, msp)) { | |
6d974228 GW |
2090 | if (msp->ms_weight < asize) { |
2091 | spa_dbgmsg(spa, "%s: failed to meet weight " | |
2092 | "requirement: vdev %llu, txg %llu, mg %p, " | |
2093 | "msp %p, psize %llu, asize %llu, " | |
672692c7 GW |
2094 | "weight %llu", spa_name(spa), |
2095 | mg->mg_vd->vdev_id, txg, | |
2096 | mg, msp, psize, asize, msp->ms_weight); | |
34dc7c2f BB |
2097 | mutex_exit(&mg->mg_lock); |
2098 | return (-1ULL); | |
2099 | } | |
7a614407 GW |
2100 | |
2101 | /* | |
2102 | * If the selected metaslab is condensing, skip it. | |
2103 | */ | |
93cf2076 | 2104 | if (msp->ms_condensing) |
7a614407 GW |
2105 | continue; |
2106 | ||
9babb374 | 2107 | was_active = msp->ms_weight & METASLAB_ACTIVE_MASK; |
34dc7c2f BB |
2108 | if (activation_weight == METASLAB_WEIGHT_PRIMARY) |
2109 | break; | |
2110 | ||
2111 | target_distance = min_distance + | |
93cf2076 GW |
2112 | (space_map_allocated(msp->ms_sm) != 0 ? 0 : |
2113 | min_distance >> 1); | |
34dc7c2f BB |
2114 | |
2115 | for (i = 0; i < d; i++) | |
2116 | if (metaslab_distance(msp, &dva[i]) < | |
2117 | target_distance) | |
2118 | break; | |
2119 | if (i == d) | |
2120 | break; | |
2121 | } | |
2122 | mutex_exit(&mg->mg_lock); | |
2123 | if (msp == NULL) | |
2124 | return (-1ULL); | |
2125 | ||
ac72fac3 GW |
2126 | mutex_enter(&msp->ms_lock); |
2127 | ||
34dc7c2f BB |
2128 | /* |
2129 | * Ensure that the metaslab we have selected is still | |
2130 | * capable of handling our request. It's possible that | |
2131 | * another thread may have changed the weight while we | |
2132 | * were blocked on the metaslab lock. | |
2133 | */ | |
6d974228 | 2134 | if (msp->ms_weight < asize || (was_active && |
9babb374 BB |
2135 | !(msp->ms_weight & METASLAB_ACTIVE_MASK) && |
2136 | activation_weight == METASLAB_WEIGHT_PRIMARY)) { | |
34dc7c2f BB |
2137 | mutex_exit(&msp->ms_lock); |
2138 | continue; | |
2139 | } | |
2140 | ||
2141 | if ((msp->ms_weight & METASLAB_WEIGHT_SECONDARY) && | |
2142 | activation_weight == METASLAB_WEIGHT_PRIMARY) { | |
2143 | metaslab_passivate(msp, | |
2144 | msp->ms_weight & ~METASLAB_ACTIVE_MASK); | |
2145 | mutex_exit(&msp->ms_lock); | |
2146 | continue; | |
2147 | } | |
2148 | ||
6d974228 | 2149 | if (metaslab_activate(msp, activation_weight) != 0) { |
34dc7c2f BB |
2150 | mutex_exit(&msp->ms_lock); |
2151 | continue; | |
2152 | } | |
2153 | ||
7a614407 GW |
2154 | /* |
2155 | * If this metaslab is currently condensing then pick again as | |
2156 | * we can't manipulate this metaslab until it's committed | |
2157 | * to disk. | |
2158 | */ | |
93cf2076 | 2159 | if (msp->ms_condensing) { |
7a614407 GW |
2160 | mutex_exit(&msp->ms_lock); |
2161 | continue; | |
2162 | } | |
2163 | ||
93cf2076 | 2164 | if ((offset = metaslab_block_alloc(msp, asize)) != -1ULL) |
34dc7c2f BB |
2165 | break; |
2166 | ||
93cf2076 | 2167 | metaslab_passivate(msp, metaslab_block_maxsize(msp)); |
34dc7c2f BB |
2168 | mutex_exit(&msp->ms_lock); |
2169 | } | |
2170 | ||
93cf2076 | 2171 | if (range_tree_space(msp->ms_alloctree[txg & TXG_MASK]) == 0) |
34dc7c2f BB |
2172 | vdev_dirty(mg->mg_vd, VDD_METASLAB, msp, txg); |
2173 | ||
93cf2076 GW |
2174 | range_tree_add(msp->ms_alloctree[txg & TXG_MASK], offset, asize); |
2175 | msp->ms_access_txg = txg + metaslab_unload_delay; | |
34dc7c2f BB |
2176 | |
2177 | mutex_exit(&msp->ms_lock); | |
2178 | ||
2179 | return (offset); | |
2180 | } | |
2181 | ||
2182 | /* | |
2183 | * Allocate a block for the specified i/o. | |
2184 | */ | |
2185 | static int | |
2186 | metaslab_alloc_dva(spa_t *spa, metaslab_class_t *mc, uint64_t psize, | |
b128c09f | 2187 | dva_t *dva, int d, dva_t *hintdva, uint64_t txg, int flags) |
34dc7c2f | 2188 | { |
920dd524 | 2189 | metaslab_group_t *mg, *fast_mg, *rotor; |
34dc7c2f BB |
2190 | vdev_t *vd; |
2191 | int dshift = 3; | |
2192 | int all_zero; | |
fb5f0bc8 BB |
2193 | int zio_lock = B_FALSE; |
2194 | boolean_t allocatable; | |
34dc7c2f BB |
2195 | uint64_t offset = -1ULL; |
2196 | uint64_t asize; | |
2197 | uint64_t distance; | |
2198 | ||
2199 | ASSERT(!DVA_IS_VALID(&dva[d])); | |
2200 | ||
2201 | /* | |
2202 | * For testing, make some blocks above a certain size be gang blocks. | |
2203 | */ | |
428870ff | 2204 | if (psize >= metaslab_gang_bang && (ddi_get_lbolt() & 3) == 0) |
2e528b49 | 2205 | return (SET_ERROR(ENOSPC)); |
34dc7c2f | 2206 | |
920dd524 ED |
2207 | if (flags & METASLAB_FASTWRITE) |
2208 | mutex_enter(&mc->mc_fastwrite_lock); | |
2209 | ||
34dc7c2f BB |
2210 | /* |
2211 | * Start at the rotor and loop through all mgs until we find something. | |
428870ff | 2212 | * Note that there's no locking on mc_rotor or mc_aliquot because |
34dc7c2f BB |
2213 | * nothing actually breaks if we miss a few updates -- we just won't |
2214 | * allocate quite as evenly. It all balances out over time. | |
2215 | * | |
2216 | * If we are doing ditto or log blocks, try to spread them across | |
2217 | * consecutive vdevs. If we're forced to reuse a vdev before we've | |
2218 | * allocated all of our ditto blocks, then try and spread them out on | |
2219 | * that vdev as much as possible. If it turns out to not be possible, | |
2220 | * gradually lower our standards until anything becomes acceptable. | |
2221 | * Also, allocating on consecutive vdevs (as opposed to random vdevs) | |
2222 | * gives us hope of containing our fault domains to something we're | |
2223 | * able to reason about. Otherwise, any two top-level vdev failures | |
2224 | * will guarantee the loss of data. With consecutive allocation, | |
2225 | * only two adjacent top-level vdev failures will result in data loss. | |
2226 | * | |
2227 | * If we are doing gang blocks (hintdva is non-NULL), try to keep | |
2228 | * ourselves on the same vdev as our gang block header. That | |
2229 | * way, we can hope for locality in vdev_cache, plus it makes our | |
2230 | * fault domains something tractable. | |
2231 | */ | |
2232 | if (hintdva) { | |
2233 | vd = vdev_lookup_top(spa, DVA_GET_VDEV(&hintdva[d])); | |
428870ff BB |
2234 | |
2235 | /* | |
2236 | * It's possible the vdev we're using as the hint no | |
2237 | * longer exists (i.e. removed). Consult the rotor when | |
2238 | * all else fails. | |
2239 | */ | |
2240 | if (vd != NULL) { | |
34dc7c2f | 2241 | mg = vd->vdev_mg; |
428870ff BB |
2242 | |
2243 | if (flags & METASLAB_HINTBP_AVOID && | |
2244 | mg->mg_next != NULL) | |
2245 | mg = mg->mg_next; | |
2246 | } else { | |
2247 | mg = mc->mc_rotor; | |
2248 | } | |
34dc7c2f BB |
2249 | } else if (d != 0) { |
2250 | vd = vdev_lookup_top(spa, DVA_GET_VDEV(&dva[d - 1])); | |
2251 | mg = vd->vdev_mg->mg_next; | |
920dd524 ED |
2252 | } else if (flags & METASLAB_FASTWRITE) { |
2253 | mg = fast_mg = mc->mc_rotor; | |
2254 | ||
2255 | do { | |
2256 | if (fast_mg->mg_vd->vdev_pending_fastwrite < | |
2257 | mg->mg_vd->vdev_pending_fastwrite) | |
2258 | mg = fast_mg; | |
2259 | } while ((fast_mg = fast_mg->mg_next) != mc->mc_rotor); | |
2260 | ||
34dc7c2f BB |
2261 | } else { |
2262 | mg = mc->mc_rotor; | |
2263 | } | |
2264 | ||
2265 | /* | |
428870ff BB |
2266 | * If the hint put us into the wrong metaslab class, or into a |
2267 | * metaslab group that has been passivated, just follow the rotor. | |
34dc7c2f | 2268 | */ |
428870ff | 2269 | if (mg->mg_class != mc || mg->mg_activation_count <= 0) |
34dc7c2f BB |
2270 | mg = mc->mc_rotor; |
2271 | ||
2272 | rotor = mg; | |
2273 | top: | |
2274 | all_zero = B_TRUE; | |
2275 | do { | |
428870ff BB |
2276 | ASSERT(mg->mg_activation_count == 1); |
2277 | ||
34dc7c2f | 2278 | vd = mg->mg_vd; |
fb5f0bc8 | 2279 | |
34dc7c2f | 2280 | /* |
b128c09f | 2281 | * Don't allocate from faulted devices. |
34dc7c2f | 2282 | */ |
fb5f0bc8 BB |
2283 | if (zio_lock) { |
2284 | spa_config_enter(spa, SCL_ZIO, FTAG, RW_READER); | |
2285 | allocatable = vdev_allocatable(vd); | |
2286 | spa_config_exit(spa, SCL_ZIO, FTAG); | |
2287 | } else { | |
2288 | allocatable = vdev_allocatable(vd); | |
2289 | } | |
ac72fac3 GW |
2290 | |
2291 | /* | |
2292 | * Determine if the selected metaslab group is eligible | |
2293 | * for allocations. If we're ganging or have requested | |
2294 | * an allocation for the smallest gang block size | |
2295 | * then we don't want to avoid allocating to the this | |
2296 | * metaslab group. If we're in this condition we should | |
2297 | * try to allocate from any device possible so that we | |
2298 | * don't inadvertently return ENOSPC and suspend the pool | |
2299 | * even though space is still available. | |
2300 | */ | |
2301 | if (allocatable && CAN_FASTGANG(flags) && | |
2302 | psize > SPA_GANGBLOCKSIZE) | |
2303 | allocatable = metaslab_group_allocatable(mg); | |
2304 | ||
fb5f0bc8 | 2305 | if (!allocatable) |
34dc7c2f | 2306 | goto next; |
fb5f0bc8 | 2307 | |
34dc7c2f BB |
2308 | /* |
2309 | * Avoid writing single-copy data to a failing vdev | |
43a696ed | 2310 | * unless the user instructs us that it is okay. |
34dc7c2f BB |
2311 | */ |
2312 | if ((vd->vdev_stat.vs_write_errors > 0 || | |
2313 | vd->vdev_state < VDEV_STATE_HEALTHY) && | |
f3a7f661 | 2314 | d == 0 && dshift == 3 && vd->vdev_children == 0) { |
34dc7c2f BB |
2315 | all_zero = B_FALSE; |
2316 | goto next; | |
2317 | } | |
2318 | ||
2319 | ASSERT(mg->mg_class == mc); | |
2320 | ||
2321 | distance = vd->vdev_asize >> dshift; | |
2322 | if (distance <= (1ULL << vd->vdev_ms_shift)) | |
2323 | distance = 0; | |
2324 | else | |
2325 | all_zero = B_FALSE; | |
2326 | ||
2327 | asize = vdev_psize_to_asize(vd, psize); | |
2328 | ASSERT(P2PHASE(asize, 1ULL << vd->vdev_ashift) == 0); | |
2329 | ||
6d974228 | 2330 | offset = metaslab_group_alloc(mg, psize, asize, txg, distance, |
672692c7 | 2331 | dva, d); |
34dc7c2f BB |
2332 | if (offset != -1ULL) { |
2333 | /* | |
2334 | * If we've just selected this metaslab group, | |
2335 | * figure out whether the corresponding vdev is | |
2336 | * over- or under-used relative to the pool, | |
2337 | * and set an allocation bias to even it out. | |
bb3250d0 ED |
2338 | * |
2339 | * Bias is also used to compensate for unequally | |
2340 | * sized vdevs so that space is allocated fairly. | |
34dc7c2f | 2341 | */ |
f3a7f661 | 2342 | if (mc->mc_aliquot == 0 && metaslab_bias_enabled) { |
34dc7c2f | 2343 | vdev_stat_t *vs = &vd->vdev_stat; |
bb3250d0 ED |
2344 | int64_t vs_free = vs->vs_space - vs->vs_alloc; |
2345 | int64_t mc_free = mc->mc_space - mc->mc_alloc; | |
2346 | int64_t ratio; | |
34dc7c2f BB |
2347 | |
2348 | /* | |
6d974228 GW |
2349 | * Calculate how much more or less we should |
2350 | * try to allocate from this device during | |
2351 | * this iteration around the rotor. | |
6d974228 | 2352 | * |
bb3250d0 ED |
2353 | * This basically introduces a zero-centered |
2354 | * bias towards the devices with the most | |
2355 | * free space, while compensating for vdev | |
2356 | * size differences. | |
2357 | * | |
2358 | * Examples: | |
2359 | * vdev V1 = 16M/128M | |
2360 | * vdev V2 = 16M/128M | |
2361 | * ratio(V1) = 100% ratio(V2) = 100% | |
2362 | * | |
2363 | * vdev V1 = 16M/128M | |
2364 | * vdev V2 = 64M/128M | |
2365 | * ratio(V1) = 127% ratio(V2) = 72% | |
6d974228 | 2366 | * |
bb3250d0 ED |
2367 | * vdev V1 = 16M/128M |
2368 | * vdev V2 = 64M/512M | |
2369 | * ratio(V1) = 40% ratio(V2) = 160% | |
34dc7c2f | 2370 | */ |
bb3250d0 ED |
2371 | ratio = (vs_free * mc->mc_alloc_groups * 100) / |
2372 | (mc_free + 1); | |
2373 | mg->mg_bias = ((ratio - 100) * | |
6d974228 | 2374 | (int64_t)mg->mg_aliquot) / 100; |
f3a7f661 GW |
2375 | } else if (!metaslab_bias_enabled) { |
2376 | mg->mg_bias = 0; | |
34dc7c2f BB |
2377 | } |
2378 | ||
920dd524 ED |
2379 | if ((flags & METASLAB_FASTWRITE) || |
2380 | atomic_add_64_nv(&mc->mc_aliquot, asize) >= | |
34dc7c2f BB |
2381 | mg->mg_aliquot + mg->mg_bias) { |
2382 | mc->mc_rotor = mg->mg_next; | |
428870ff | 2383 | mc->mc_aliquot = 0; |
34dc7c2f BB |
2384 | } |
2385 | ||
2386 | DVA_SET_VDEV(&dva[d], vd->vdev_id); | |
2387 | DVA_SET_OFFSET(&dva[d], offset); | |
b128c09f | 2388 | DVA_SET_GANG(&dva[d], !!(flags & METASLAB_GANG_HEADER)); |
34dc7c2f BB |
2389 | DVA_SET_ASIZE(&dva[d], asize); |
2390 | ||
920dd524 ED |
2391 | if (flags & METASLAB_FASTWRITE) { |
2392 | atomic_add_64(&vd->vdev_pending_fastwrite, | |
2393 | psize); | |
2394 | mutex_exit(&mc->mc_fastwrite_lock); | |
2395 | } | |
2396 | ||
34dc7c2f BB |
2397 | return (0); |
2398 | } | |
2399 | next: | |
2400 | mc->mc_rotor = mg->mg_next; | |
428870ff | 2401 | mc->mc_aliquot = 0; |
34dc7c2f BB |
2402 | } while ((mg = mg->mg_next) != rotor); |
2403 | ||
2404 | if (!all_zero) { | |
2405 | dshift++; | |
2406 | ASSERT(dshift < 64); | |
2407 | goto top; | |
2408 | } | |
2409 | ||
9babb374 | 2410 | if (!allocatable && !zio_lock) { |
fb5f0bc8 BB |
2411 | dshift = 3; |
2412 | zio_lock = B_TRUE; | |
2413 | goto top; | |
2414 | } | |
2415 | ||
34dc7c2f BB |
2416 | bzero(&dva[d], sizeof (dva_t)); |
2417 | ||
920dd524 ED |
2418 | if (flags & METASLAB_FASTWRITE) |
2419 | mutex_exit(&mc->mc_fastwrite_lock); | |
2e528b49 MA |
2420 | |
2421 | return (SET_ERROR(ENOSPC)); | |
34dc7c2f BB |
2422 | } |
2423 | ||
2424 | /* | |
2425 | * Free the block represented by DVA in the context of the specified | |
2426 | * transaction group. | |
2427 | */ | |
2428 | static void | |
2429 | metaslab_free_dva(spa_t *spa, const dva_t *dva, uint64_t txg, boolean_t now) | |
2430 | { | |
2431 | uint64_t vdev = DVA_GET_VDEV(dva); | |
2432 | uint64_t offset = DVA_GET_OFFSET(dva); | |
2433 | uint64_t size = DVA_GET_ASIZE(dva); | |
2434 | vdev_t *vd; | |
2435 | metaslab_t *msp; | |
2436 | ||
34dc7c2f BB |
2437 | if (txg > spa_freeze_txg(spa)) |
2438 | return; | |
2439 | ||
7d2868d5 | 2440 | if ((vd = vdev_lookup_top(spa, vdev)) == NULL || !DVA_IS_VALID(dva) || |
34dc7c2f | 2441 | (offset >> vd->vdev_ms_shift) >= vd->vdev_ms_count) { |
7d2868d5 BB |
2442 | zfs_panic_recover("metaslab_free_dva(): bad DVA %llu:%llu:%llu", |
2443 | (u_longlong_t)vdev, (u_longlong_t)offset, | |
2444 | (u_longlong_t)size); | |
34dc7c2f BB |
2445 | return; |
2446 | } | |
2447 | ||
2448 | msp = vd->vdev_ms[offset >> vd->vdev_ms_shift]; | |
2449 | ||
2450 | if (DVA_GET_GANG(dva)) | |
2451 | size = vdev_psize_to_asize(vd, SPA_GANGBLOCKSIZE); | |
2452 | ||
2453 | mutex_enter(&msp->ms_lock); | |
2454 | ||
2455 | if (now) { | |
93cf2076 | 2456 | range_tree_remove(msp->ms_alloctree[txg & TXG_MASK], |
34dc7c2f | 2457 | offset, size); |
93cf2076 GW |
2458 | |
2459 | VERIFY(!msp->ms_condensing); | |
2460 | VERIFY3U(offset, >=, msp->ms_start); | |
2461 | VERIFY3U(offset + size, <=, msp->ms_start + msp->ms_size); | |
2462 | VERIFY3U(range_tree_space(msp->ms_tree) + size, <=, | |
2463 | msp->ms_size); | |
2464 | VERIFY0(P2PHASE(offset, 1ULL << vd->vdev_ashift)); | |
2465 | VERIFY0(P2PHASE(size, 1ULL << vd->vdev_ashift)); | |
2466 | range_tree_add(msp->ms_tree, offset, size); | |
34dc7c2f | 2467 | } else { |
93cf2076 | 2468 | if (range_tree_space(msp->ms_freetree[txg & TXG_MASK]) == 0) |
34dc7c2f | 2469 | vdev_dirty(vd, VDD_METASLAB, msp, txg); |
93cf2076 GW |
2470 | range_tree_add(msp->ms_freetree[txg & TXG_MASK], |
2471 | offset, size); | |
34dc7c2f BB |
2472 | } |
2473 | ||
2474 | mutex_exit(&msp->ms_lock); | |
2475 | } | |
2476 | ||
2477 | /* | |
2478 | * Intent log support: upon opening the pool after a crash, notify the SPA | |
2479 | * of blocks that the intent log has allocated for immediate write, but | |
2480 | * which are still considered free by the SPA because the last transaction | |
2481 | * group didn't commit yet. | |
2482 | */ | |
2483 | static int | |
2484 | metaslab_claim_dva(spa_t *spa, const dva_t *dva, uint64_t txg) | |
2485 | { | |
2486 | uint64_t vdev = DVA_GET_VDEV(dva); | |
2487 | uint64_t offset = DVA_GET_OFFSET(dva); | |
2488 | uint64_t size = DVA_GET_ASIZE(dva); | |
2489 | vdev_t *vd; | |
2490 | metaslab_t *msp; | |
428870ff | 2491 | int error = 0; |
34dc7c2f BB |
2492 | |
2493 | ASSERT(DVA_IS_VALID(dva)); | |
2494 | ||
2495 | if ((vd = vdev_lookup_top(spa, vdev)) == NULL || | |
2496 | (offset >> vd->vdev_ms_shift) >= vd->vdev_ms_count) | |
2e528b49 | 2497 | return (SET_ERROR(ENXIO)); |
34dc7c2f BB |
2498 | |
2499 | msp = vd->vdev_ms[offset >> vd->vdev_ms_shift]; | |
2500 | ||
2501 | if (DVA_GET_GANG(dva)) | |
2502 | size = vdev_psize_to_asize(vd, SPA_GANGBLOCKSIZE); | |
2503 | ||
2504 | mutex_enter(&msp->ms_lock); | |
2505 | ||
93cf2076 | 2506 | if ((txg != 0 && spa_writeable(spa)) || !msp->ms_loaded) |
6d974228 | 2507 | error = metaslab_activate(msp, METASLAB_WEIGHT_SECONDARY); |
428870ff | 2508 | |
93cf2076 | 2509 | if (error == 0 && !range_tree_contains(msp->ms_tree, offset, size)) |
2e528b49 | 2510 | error = SET_ERROR(ENOENT); |
428870ff | 2511 | |
b128c09f | 2512 | if (error || txg == 0) { /* txg == 0 indicates dry run */ |
34dc7c2f BB |
2513 | mutex_exit(&msp->ms_lock); |
2514 | return (error); | |
2515 | } | |
2516 | ||
93cf2076 GW |
2517 | VERIFY(!msp->ms_condensing); |
2518 | VERIFY0(P2PHASE(offset, 1ULL << vd->vdev_ashift)); | |
2519 | VERIFY0(P2PHASE(size, 1ULL << vd->vdev_ashift)); | |
2520 | VERIFY3U(range_tree_space(msp->ms_tree) - size, <=, msp->ms_size); | |
2521 | range_tree_remove(msp->ms_tree, offset, size); | |
b128c09f | 2522 | |
fb5f0bc8 | 2523 | if (spa_writeable(spa)) { /* don't dirty if we're zdb(1M) */ |
93cf2076 | 2524 | if (range_tree_space(msp->ms_alloctree[txg & TXG_MASK]) == 0) |
b128c09f | 2525 | vdev_dirty(vd, VDD_METASLAB, msp, txg); |
93cf2076 | 2526 | range_tree_add(msp->ms_alloctree[txg & TXG_MASK], offset, size); |
b128c09f | 2527 | } |
34dc7c2f BB |
2528 | |
2529 | mutex_exit(&msp->ms_lock); | |
2530 | ||
2531 | return (0); | |
2532 | } | |
2533 | ||
2534 | int | |
2535 | metaslab_alloc(spa_t *spa, metaslab_class_t *mc, uint64_t psize, blkptr_t *bp, | |
b128c09f | 2536 | int ndvas, uint64_t txg, blkptr_t *hintbp, int flags) |
34dc7c2f BB |
2537 | { |
2538 | dva_t *dva = bp->blk_dva; | |
2539 | dva_t *hintdva = hintbp->blk_dva; | |
d6320ddb | 2540 | int d, error = 0; |
34dc7c2f | 2541 | |
b128c09f | 2542 | ASSERT(bp->blk_birth == 0); |
428870ff | 2543 | ASSERT(BP_PHYSICAL_BIRTH(bp) == 0); |
b128c09f BB |
2544 | |
2545 | spa_config_enter(spa, SCL_ALLOC, FTAG, RW_READER); | |
2546 | ||
2547 | if (mc->mc_rotor == NULL) { /* no vdevs in this class */ | |
2548 | spa_config_exit(spa, SCL_ALLOC, FTAG); | |
2e528b49 | 2549 | return (SET_ERROR(ENOSPC)); |
b128c09f | 2550 | } |
34dc7c2f BB |
2551 | |
2552 | ASSERT(ndvas > 0 && ndvas <= spa_max_replication(spa)); | |
2553 | ASSERT(BP_GET_NDVAS(bp) == 0); | |
2554 | ASSERT(hintbp == NULL || ndvas <= BP_GET_NDVAS(hintbp)); | |
2555 | ||
d6320ddb | 2556 | for (d = 0; d < ndvas; d++) { |
34dc7c2f | 2557 | error = metaslab_alloc_dva(spa, mc, psize, dva, d, hintdva, |
b128c09f | 2558 | txg, flags); |
93cf2076 | 2559 | if (error != 0) { |
34dc7c2f BB |
2560 | for (d--; d >= 0; d--) { |
2561 | metaslab_free_dva(spa, &dva[d], txg, B_TRUE); | |
2562 | bzero(&dva[d], sizeof (dva_t)); | |
2563 | } | |
b128c09f | 2564 | spa_config_exit(spa, SCL_ALLOC, FTAG); |
34dc7c2f BB |
2565 | return (error); |
2566 | } | |
2567 | } | |
2568 | ASSERT(error == 0); | |
2569 | ASSERT(BP_GET_NDVAS(bp) == ndvas); | |
2570 | ||
b128c09f BB |
2571 | spa_config_exit(spa, SCL_ALLOC, FTAG); |
2572 | ||
428870ff | 2573 | BP_SET_BIRTH(bp, txg, txg); |
b128c09f | 2574 | |
34dc7c2f BB |
2575 | return (0); |
2576 | } | |
2577 | ||
2578 | void | |
2579 | metaslab_free(spa_t *spa, const blkptr_t *bp, uint64_t txg, boolean_t now) | |
2580 | { | |
2581 | const dva_t *dva = bp->blk_dva; | |
d6320ddb | 2582 | int d, ndvas = BP_GET_NDVAS(bp); |
34dc7c2f BB |
2583 | |
2584 | ASSERT(!BP_IS_HOLE(bp)); | |
428870ff | 2585 | ASSERT(!now || bp->blk_birth >= spa_syncing_txg(spa)); |
b128c09f BB |
2586 | |
2587 | spa_config_enter(spa, SCL_FREE, FTAG, RW_READER); | |
34dc7c2f | 2588 | |
d6320ddb | 2589 | for (d = 0; d < ndvas; d++) |
34dc7c2f | 2590 | metaslab_free_dva(spa, &dva[d], txg, now); |
b128c09f BB |
2591 | |
2592 | spa_config_exit(spa, SCL_FREE, FTAG); | |
34dc7c2f BB |
2593 | } |
2594 | ||
2595 | int | |
2596 | metaslab_claim(spa_t *spa, const blkptr_t *bp, uint64_t txg) | |
2597 | { | |
2598 | const dva_t *dva = bp->blk_dva; | |
2599 | int ndvas = BP_GET_NDVAS(bp); | |
d6320ddb | 2600 | int d, error = 0; |
34dc7c2f BB |
2601 | |
2602 | ASSERT(!BP_IS_HOLE(bp)); | |
2603 | ||
b128c09f BB |
2604 | if (txg != 0) { |
2605 | /* | |
2606 | * First do a dry run to make sure all DVAs are claimable, | |
2607 | * so we don't have to unwind from partial failures below. | |
2608 | */ | |
2609 | if ((error = metaslab_claim(spa, bp, 0)) != 0) | |
2610 | return (error); | |
2611 | } | |
2612 | ||
2613 | spa_config_enter(spa, SCL_ALLOC, FTAG, RW_READER); | |
2614 | ||
d6320ddb | 2615 | for (d = 0; d < ndvas; d++) |
34dc7c2f | 2616 | if ((error = metaslab_claim_dva(spa, &dva[d], txg)) != 0) |
b128c09f BB |
2617 | break; |
2618 | ||
2619 | spa_config_exit(spa, SCL_ALLOC, FTAG); | |
2620 | ||
2621 | ASSERT(error == 0 || txg == 0); | |
34dc7c2f | 2622 | |
b128c09f | 2623 | return (error); |
34dc7c2f | 2624 | } |
920dd524 | 2625 | |
d1d7e268 MK |
2626 | void |
2627 | metaslab_fastwrite_mark(spa_t *spa, const blkptr_t *bp) | |
920dd524 ED |
2628 | { |
2629 | const dva_t *dva = bp->blk_dva; | |
2630 | int ndvas = BP_GET_NDVAS(bp); | |
2631 | uint64_t psize = BP_GET_PSIZE(bp); | |
2632 | int d; | |
2633 | vdev_t *vd; | |
2634 | ||
2635 | ASSERT(!BP_IS_HOLE(bp)); | |
9b67f605 | 2636 | ASSERT(!BP_IS_EMBEDDED(bp)); |
920dd524 ED |
2637 | ASSERT(psize > 0); |
2638 | ||
2639 | spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER); | |
2640 | ||
2641 | for (d = 0; d < ndvas; d++) { | |
2642 | if ((vd = vdev_lookup_top(spa, DVA_GET_VDEV(&dva[d]))) == NULL) | |
2643 | continue; | |
2644 | atomic_add_64(&vd->vdev_pending_fastwrite, psize); | |
2645 | } | |
2646 | ||
2647 | spa_config_exit(spa, SCL_VDEV, FTAG); | |
2648 | } | |
2649 | ||
d1d7e268 MK |
2650 | void |
2651 | metaslab_fastwrite_unmark(spa_t *spa, const blkptr_t *bp) | |
920dd524 ED |
2652 | { |
2653 | const dva_t *dva = bp->blk_dva; | |
2654 | int ndvas = BP_GET_NDVAS(bp); | |
2655 | uint64_t psize = BP_GET_PSIZE(bp); | |
2656 | int d; | |
2657 | vdev_t *vd; | |
2658 | ||
2659 | ASSERT(!BP_IS_HOLE(bp)); | |
9b67f605 | 2660 | ASSERT(!BP_IS_EMBEDDED(bp)); |
920dd524 ED |
2661 | ASSERT(psize > 0); |
2662 | ||
2663 | spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER); | |
2664 | ||
2665 | for (d = 0; d < ndvas; d++) { | |
2666 | if ((vd = vdev_lookup_top(spa, DVA_GET_VDEV(&dva[d]))) == NULL) | |
2667 | continue; | |
2668 | ASSERT3U(vd->vdev_pending_fastwrite, >=, psize); | |
2669 | atomic_sub_64(&vd->vdev_pending_fastwrite, psize); | |
2670 | } | |
2671 | ||
2672 | spa_config_exit(spa, SCL_VDEV, FTAG); | |
2673 | } | |
30b92c1d | 2674 | |
13fe0198 MA |
2675 | void |
2676 | metaslab_check_free(spa_t *spa, const blkptr_t *bp) | |
2677 | { | |
2678 | int i, j; | |
2679 | ||
2680 | if ((zfs_flags & ZFS_DEBUG_ZIO_FREE) == 0) | |
2681 | return; | |
2682 | ||
2683 | spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER); | |
2684 | for (i = 0; i < BP_GET_NDVAS(bp); i++) { | |
93cf2076 GW |
2685 | uint64_t vdev = DVA_GET_VDEV(&bp->blk_dva[i]); |
2686 | vdev_t *vd = vdev_lookup_top(spa, vdev); | |
2687 | uint64_t offset = DVA_GET_OFFSET(&bp->blk_dva[i]); | |
13fe0198 | 2688 | uint64_t size = DVA_GET_ASIZE(&bp->blk_dva[i]); |
93cf2076 | 2689 | metaslab_t *msp = vd->vdev_ms[offset >> vd->vdev_ms_shift]; |
13fe0198 | 2690 | |
93cf2076 GW |
2691 | if (msp->ms_loaded) |
2692 | range_tree_verify(msp->ms_tree, offset, size); | |
13fe0198 MA |
2693 | |
2694 | for (j = 0; j < TXG_SIZE; j++) | |
93cf2076 | 2695 | range_tree_verify(msp->ms_freetree[j], offset, size); |
13fe0198 | 2696 | for (j = 0; j < TXG_DEFER_SIZE; j++) |
93cf2076 | 2697 | range_tree_verify(msp->ms_defertree[j], offset, size); |
13fe0198 MA |
2698 | } |
2699 | spa_config_exit(spa, SCL_VDEV, FTAG); | |
2700 | } | |
2701 | ||
30b92c1d | 2702 | #if defined(_KERNEL) && defined(HAVE_SPL) |
aa7d06a9 | 2703 | module_param(metaslab_debug_load, int, 0644); |
aa7d06a9 | 2704 | module_param(metaslab_debug_unload, int, 0644); |
f3a7f661 GW |
2705 | module_param(metaslab_preload_enabled, int, 0644); |
2706 | module_param(zfs_mg_noalloc_threshold, int, 0644); | |
2707 | module_param(zfs_mg_fragmentation_threshold, int, 0644); | |
2708 | module_param(zfs_metaslab_fragmentation_threshold, int, 0644); | |
2709 | module_param(metaslab_fragmentation_factor_enabled, int, 0644); | |
2710 | module_param(metaslab_lba_weighting_enabled, int, 0644); | |
2711 | module_param(metaslab_bias_enabled, int, 0644); | |
2712 | ||
93cf2076 GW |
2713 | MODULE_PARM_DESC(metaslab_debug_load, |
2714 | "load all metaslabs when pool is first opened"); | |
1ce04573 BB |
2715 | MODULE_PARM_DESC(metaslab_debug_unload, |
2716 | "prevent metaslabs from being unloaded"); | |
f3a7f661 GW |
2717 | MODULE_PARM_DESC(metaslab_preload_enabled, |
2718 | "preload potential metaslabs during reassessment"); | |
f4a4046b | 2719 | |
f4a4046b TC |
2720 | MODULE_PARM_DESC(zfs_mg_noalloc_threshold, |
2721 | "percentage of free space for metaslab group to allow allocation"); | |
f3a7f661 GW |
2722 | MODULE_PARM_DESC(zfs_mg_fragmentation_threshold, |
2723 | "fragmentation for metaslab group to allow allocation"); | |
2724 | ||
2725 | MODULE_PARM_DESC(zfs_metaslab_fragmentation_threshold, | |
2726 | "fragmentation for metaslab to allow allocation"); | |
2727 | MODULE_PARM_DESC(metaslab_fragmentation_factor_enabled, | |
2728 | "use the fragmentation metric to prefer less fragmented metaslabs"); | |
2729 | MODULE_PARM_DESC(metaslab_lba_weighting_enabled, | |
2730 | "prefer metaslabs with lower LBAs"); | |
2731 | MODULE_PARM_DESC(metaslab_bias_enabled, | |
2732 | "enable metaslab group biasing"); | |
30b92c1d | 2733 | #endif /* _KERNEL && HAVE_SPL */ |