]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * CDDL HEADER START | |
3 | * | |
4 | * The contents of this file are subject to the terms of the | |
5 | * Common Development and Distribution License (the "License"). | |
6 | * You may not use this file except in compliance with the License. | |
7 | * | |
8 | * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE | |
9 | * or http://www.opensolaris.org/os/licensing. | |
10 | * See the License for the specific language governing permissions | |
11 | * and limitations under the License. | |
12 | * | |
13 | * When distributing Covered Code, include this CDDL HEADER in each | |
14 | * file and include the License file at usr/src/OPENSOLARIS.LICENSE. | |
15 | * If applicable, add the following below this CDDL HEADER, with the | |
16 | * fields enclosed by brackets "[]" replaced with your own identifying | |
17 | * information: Portions Copyright [yyyy] [name of copyright owner] | |
18 | * | |
19 | * CDDL HEADER END | |
20 | */ | |
21 | /* | |
22 | * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. | |
23 | * Copyright (c) 2011, 2014 by Delphix. All rights reserved. | |
24 | * Copyright (c) 2013 by Saso Kiselkov. All rights reserved. | |
25 | */ | |
26 | ||
27 | #include <sys/zfs_context.h> | |
28 | #include <sys/dmu.h> | |
29 | #include <sys/dmu_tx.h> | |
30 | #include <sys/space_map.h> | |
31 | #include <sys/metaslab_impl.h> | |
32 | #include <sys/vdev_impl.h> | |
33 | #include <sys/zio.h> | |
34 | #include <sys/spa_impl.h> | |
35 | #include <sys/zfeature.h> | |
36 | ||
37 | #define WITH_DF_BLOCK_ALLOCATOR | |
38 | ||
39 | /* | |
40 | * Allow allocations to switch to gang blocks quickly. We do this to | |
41 | * avoid having to load lots of space_maps in a given txg. There are, | |
42 | * however, some cases where we want to avoid "fast" ganging and instead | |
43 | * we want to do an exhaustive search of all metaslabs on this device. | |
44 | * Currently we don't allow any gang, slog, or dump device related allocations | |
45 | * to "fast" gang. | |
46 | */ | |
47 | #define CAN_FASTGANG(flags) \ | |
48 | (!((flags) & (METASLAB_GANG_CHILD | METASLAB_GANG_HEADER | \ | |
49 | METASLAB_GANG_AVOID))) | |
50 | ||
51 | #define METASLAB_WEIGHT_PRIMARY (1ULL << 63) | |
52 | #define METASLAB_WEIGHT_SECONDARY (1ULL << 62) | |
53 | #define METASLAB_ACTIVE_MASK \ | |
54 | (METASLAB_WEIGHT_PRIMARY | METASLAB_WEIGHT_SECONDARY) | |
55 | ||
56 | uint64_t metaslab_aliquot = 512ULL << 10; | |
57 | uint64_t metaslab_gang_bang = SPA_MAXBLOCKSIZE + 1; /* force gang blocks */ | |
58 | ||
59 | /* | |
60 | * The in-core space map representation is more compact than its on-disk form. | |
61 | * The zfs_condense_pct determines how much more compact the in-core | |
62 | * space_map representation must be before we compact it on-disk. | |
63 | * Values should be greater than or equal to 100. | |
64 | */ | |
65 | int zfs_condense_pct = 200; | |
66 | ||
67 | /* | |
68 | * Condensing a metaslab is not guaranteed to actually reduce the amount of | |
69 | * space used on disk. In particular, a space map uses data in increments of | |
70 | * MAX(1 << ashift, space_map_blksz), so a metaslab might use the | |
71 | * same number of blocks after condensing. Since the goal of condensing is to | |
72 | * reduce the number of IOPs required to read the space map, we only want to | |
73 | * condense when we can be sure we will reduce the number of blocks used by the | |
74 | * space map. Unfortunately, we cannot precisely compute whether or not this is | |
75 | * the case in metaslab_should_condense since we are holding ms_lock. Instead, | |
76 | * we apply the following heuristic: do not condense a spacemap unless the | |
77 | * uncondensed size consumes greater than zfs_metaslab_condense_block_threshold | |
78 | * blocks. | |
79 | */ | |
80 | int zfs_metaslab_condense_block_threshold = 4; | |
81 | ||
82 | /* | |
83 | * The zfs_mg_noalloc_threshold defines which metaslab groups should | |
84 | * be eligible for allocation. The value is defined as a percentage of | |
85 | * free space. Metaslab groups that have more free space than | |
86 | * zfs_mg_noalloc_threshold are always eligible for allocations. Once | |
87 | * a metaslab group's free space is less than or equal to the | |
88 | * zfs_mg_noalloc_threshold the allocator will avoid allocating to that | |
89 | * group unless all groups in the pool have reached zfs_mg_noalloc_threshold. | |
90 | * Once all groups in the pool reach zfs_mg_noalloc_threshold then all | |
91 | * groups are allowed to accept allocations. Gang blocks are always | |
92 | * eligible to allocate on any metaslab group. The default value of 0 means | |
93 | * no metaslab group will be excluded based on this criterion. | |
94 | */ | |
95 | int zfs_mg_noalloc_threshold = 0; | |
96 | ||
97 | /* | |
98 | * Metaslab groups are considered eligible for allocations if their | |
99 | * fragmenation metric (measured as a percentage) is less than or equal to | |
100 | * zfs_mg_fragmentation_threshold. If a metaslab group exceeds this threshold | |
101 | * then it will be skipped unless all metaslab groups within the metaslab | |
102 | * class have also crossed this threshold. | |
103 | */ | |
104 | int zfs_mg_fragmentation_threshold = 85; | |
105 | ||
106 | /* | |
107 | * Allow metaslabs to keep their active state as long as their fragmentation | |
108 | * percentage is less than or equal to zfs_metaslab_fragmentation_threshold. An | |
109 | * active metaslab that exceeds this threshold will no longer keep its active | |
110 | * status allowing better metaslabs to be selected. | |
111 | */ | |
112 | int zfs_metaslab_fragmentation_threshold = 70; | |
113 | ||
114 | /* | |
115 | * When set will load all metaslabs when pool is first opened. | |
116 | */ | |
117 | int metaslab_debug_load = 0; | |
118 | ||
119 | /* | |
120 | * When set will prevent metaslabs from being unloaded. | |
121 | */ | |
122 | int metaslab_debug_unload = 0; | |
123 | ||
124 | /* | |
125 | * Minimum size which forces the dynamic allocator to change | |
126 | * it's allocation strategy. Once the space map cannot satisfy | |
127 | * an allocation of this size then it switches to using more | |
128 | * aggressive strategy (i.e search by size rather than offset). | |
129 | */ | |
130 | uint64_t metaslab_df_alloc_threshold = SPA_MAXBLOCKSIZE; | |
131 | ||
132 | /* | |
133 | * The minimum free space, in percent, which must be available | |
134 | * in a space map to continue allocations in a first-fit fashion. | |
135 | * Once the space_map's free space drops below this level we dynamically | |
136 | * switch to using best-fit allocations. | |
137 | */ | |
138 | int metaslab_df_free_pct = 4; | |
139 | ||
140 | /* | |
141 | * A metaslab is considered "free" if it contains a contiguous | |
142 | * segment which is greater than metaslab_min_alloc_size. | |
143 | */ | |
144 | uint64_t metaslab_min_alloc_size = DMU_MAX_ACCESS; | |
145 | ||
146 | /* | |
147 | * Percentage of all cpus that can be used by the metaslab taskq. | |
148 | */ | |
149 | int metaslab_load_pct = 50; | |
150 | ||
151 | /* | |
152 | * Determines how many txgs a metaslab may remain loaded without having any | |
153 | * allocations from it. As long as a metaslab continues to be used we will | |
154 | * keep it loaded. | |
155 | */ | |
156 | int metaslab_unload_delay = TXG_SIZE * 2; | |
157 | ||
158 | /* | |
159 | * Max number of metaslabs per group to preload. | |
160 | */ | |
161 | int metaslab_preload_limit = SPA_DVAS_PER_BP; | |
162 | ||
163 | /* | |
164 | * Enable/disable preloading of metaslab. | |
165 | */ | |
166 | int metaslab_preload_enabled = B_TRUE; | |
167 | ||
168 | /* | |
169 | * Enable/disable fragmentation weighting on metaslabs. | |
170 | */ | |
171 | int metaslab_fragmentation_factor_enabled = B_TRUE; | |
172 | ||
173 | /* | |
174 | * Enable/disable lba weighting (i.e. outer tracks are given preference). | |
175 | */ | |
176 | int metaslab_lba_weighting_enabled = B_TRUE; | |
177 | ||
178 | /* | |
179 | * Enable/disable metaslab group biasing. | |
180 | */ | |
181 | int metaslab_bias_enabled = B_TRUE; | |
182 | ||
183 | static uint64_t metaslab_fragmentation(metaslab_t *); | |
184 | ||
185 | /* | |
186 | * ========================================================================== | |
187 | * Metaslab classes | |
188 | * ========================================================================== | |
189 | */ | |
190 | metaslab_class_t * | |
191 | metaslab_class_create(spa_t *spa, metaslab_ops_t *ops) | |
192 | { | |
193 | metaslab_class_t *mc; | |
194 | ||
195 | mc = kmem_zalloc(sizeof (metaslab_class_t), KM_SLEEP); | |
196 | ||
197 | mc->mc_spa = spa; | |
198 | mc->mc_rotor = NULL; | |
199 | mc->mc_ops = ops; | |
200 | mutex_init(&mc->mc_fastwrite_lock, NULL, MUTEX_DEFAULT, NULL); | |
201 | ||
202 | return (mc); | |
203 | } | |
204 | ||
205 | void | |
206 | metaslab_class_destroy(metaslab_class_t *mc) | |
207 | { | |
208 | ASSERT(mc->mc_rotor == NULL); | |
209 | ASSERT(mc->mc_alloc == 0); | |
210 | ASSERT(mc->mc_deferred == 0); | |
211 | ASSERT(mc->mc_space == 0); | |
212 | ASSERT(mc->mc_dspace == 0); | |
213 | ||
214 | mutex_destroy(&mc->mc_fastwrite_lock); | |
215 | kmem_free(mc, sizeof (metaslab_class_t)); | |
216 | } | |
217 | ||
218 | int | |
219 | metaslab_class_validate(metaslab_class_t *mc) | |
220 | { | |
221 | metaslab_group_t *mg; | |
222 | vdev_t *vd; | |
223 | ||
224 | /* | |
225 | * Must hold one of the spa_config locks. | |
226 | */ | |
227 | ASSERT(spa_config_held(mc->mc_spa, SCL_ALL, RW_READER) || | |
228 | spa_config_held(mc->mc_spa, SCL_ALL, RW_WRITER)); | |
229 | ||
230 | if ((mg = mc->mc_rotor) == NULL) | |
231 | return (0); | |
232 | ||
233 | do { | |
234 | vd = mg->mg_vd; | |
235 | ASSERT(vd->vdev_mg != NULL); | |
236 | ASSERT3P(vd->vdev_top, ==, vd); | |
237 | ASSERT3P(mg->mg_class, ==, mc); | |
238 | ASSERT3P(vd->vdev_ops, !=, &vdev_hole_ops); | |
239 | } while ((mg = mg->mg_next) != mc->mc_rotor); | |
240 | ||
241 | return (0); | |
242 | } | |
243 | ||
244 | void | |
245 | metaslab_class_space_update(metaslab_class_t *mc, int64_t alloc_delta, | |
246 | int64_t defer_delta, int64_t space_delta, int64_t dspace_delta) | |
247 | { | |
248 | atomic_add_64(&mc->mc_alloc, alloc_delta); | |
249 | atomic_add_64(&mc->mc_deferred, defer_delta); | |
250 | atomic_add_64(&mc->mc_space, space_delta); | |
251 | atomic_add_64(&mc->mc_dspace, dspace_delta); | |
252 | } | |
253 | ||
254 | uint64_t | |
255 | metaslab_class_get_alloc(metaslab_class_t *mc) | |
256 | { | |
257 | return (mc->mc_alloc); | |
258 | } | |
259 | ||
260 | uint64_t | |
261 | metaslab_class_get_deferred(metaslab_class_t *mc) | |
262 | { | |
263 | return (mc->mc_deferred); | |
264 | } | |
265 | ||
266 | uint64_t | |
267 | metaslab_class_get_space(metaslab_class_t *mc) | |
268 | { | |
269 | return (mc->mc_space); | |
270 | } | |
271 | ||
272 | uint64_t | |
273 | metaslab_class_get_dspace(metaslab_class_t *mc) | |
274 | { | |
275 | return (spa_deflate(mc->mc_spa) ? mc->mc_dspace : mc->mc_space); | |
276 | } | |
277 | ||
278 | void | |
279 | metaslab_class_histogram_verify(metaslab_class_t *mc) | |
280 | { | |
281 | vdev_t *rvd = mc->mc_spa->spa_root_vdev; | |
282 | uint64_t *mc_hist; | |
283 | int i, c; | |
284 | ||
285 | if ((zfs_flags & ZFS_DEBUG_HISTOGRAM_VERIFY) == 0) | |
286 | return; | |
287 | ||
288 | mc_hist = kmem_zalloc(sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE, | |
289 | KM_SLEEP); | |
290 | ||
291 | for (c = 0; c < rvd->vdev_children; c++) { | |
292 | vdev_t *tvd = rvd->vdev_child[c]; | |
293 | metaslab_group_t *mg = tvd->vdev_mg; | |
294 | ||
295 | /* | |
296 | * Skip any holes, uninitialized top-levels, or | |
297 | * vdevs that are not in this metalab class. | |
298 | */ | |
299 | if (tvd->vdev_ishole || tvd->vdev_ms_shift == 0 || | |
300 | mg->mg_class != mc) { | |
301 | continue; | |
302 | } | |
303 | ||
304 | for (i = 0; i < RANGE_TREE_HISTOGRAM_SIZE; i++) | |
305 | mc_hist[i] += mg->mg_histogram[i]; | |
306 | } | |
307 | ||
308 | for (i = 0; i < RANGE_TREE_HISTOGRAM_SIZE; i++) | |
309 | VERIFY3U(mc_hist[i], ==, mc->mc_histogram[i]); | |
310 | ||
311 | kmem_free(mc_hist, sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE); | |
312 | } | |
313 | ||
314 | /* | |
315 | * Calculate the metaslab class's fragmentation metric. The metric | |
316 | * is weighted based on the space contribution of each metaslab group. | |
317 | * The return value will be a number between 0 and 100 (inclusive), or | |
318 | * ZFS_FRAG_INVALID if the metric has not been set. See comment above the | |
319 | * zfs_frag_table for more information about the metric. | |
320 | */ | |
321 | uint64_t | |
322 | metaslab_class_fragmentation(metaslab_class_t *mc) | |
323 | { | |
324 | vdev_t *rvd = mc->mc_spa->spa_root_vdev; | |
325 | uint64_t fragmentation = 0; | |
326 | int c; | |
327 | ||
328 | spa_config_enter(mc->mc_spa, SCL_VDEV, FTAG, RW_READER); | |
329 | ||
330 | for (c = 0; c < rvd->vdev_children; c++) { | |
331 | vdev_t *tvd = rvd->vdev_child[c]; | |
332 | metaslab_group_t *mg = tvd->vdev_mg; | |
333 | ||
334 | /* | |
335 | * Skip any holes, uninitialized top-levels, or | |
336 | * vdevs that are not in this metalab class. | |
337 | */ | |
338 | if (tvd->vdev_ishole || tvd->vdev_ms_shift == 0 || | |
339 | mg->mg_class != mc) { | |
340 | continue; | |
341 | } | |
342 | ||
343 | /* | |
344 | * If a metaslab group does not contain a fragmentation | |
345 | * metric then just bail out. | |
346 | */ | |
347 | if (mg->mg_fragmentation == ZFS_FRAG_INVALID) { | |
348 | spa_config_exit(mc->mc_spa, SCL_VDEV, FTAG); | |
349 | return (ZFS_FRAG_INVALID); | |
350 | } | |
351 | ||
352 | /* | |
353 | * Determine how much this metaslab_group is contributing | |
354 | * to the overall pool fragmentation metric. | |
355 | */ | |
356 | fragmentation += mg->mg_fragmentation * | |
357 | metaslab_group_get_space(mg); | |
358 | } | |
359 | fragmentation /= metaslab_class_get_space(mc); | |
360 | ||
361 | ASSERT3U(fragmentation, <=, 100); | |
362 | spa_config_exit(mc->mc_spa, SCL_VDEV, FTAG); | |
363 | return (fragmentation); | |
364 | } | |
365 | ||
366 | /* | |
367 | * Calculate the amount of expandable space that is available in | |
368 | * this metaslab class. If a device is expanded then its expandable | |
369 | * space will be the amount of allocatable space that is currently not | |
370 | * part of this metaslab class. | |
371 | */ | |
372 | uint64_t | |
373 | metaslab_class_expandable_space(metaslab_class_t *mc) | |
374 | { | |
375 | vdev_t *rvd = mc->mc_spa->spa_root_vdev; | |
376 | uint64_t space = 0; | |
377 | int c; | |
378 | ||
379 | spa_config_enter(mc->mc_spa, SCL_VDEV, FTAG, RW_READER); | |
380 | for (c = 0; c < rvd->vdev_children; c++) { | |
381 | vdev_t *tvd = rvd->vdev_child[c]; | |
382 | metaslab_group_t *mg = tvd->vdev_mg; | |
383 | ||
384 | if (tvd->vdev_ishole || tvd->vdev_ms_shift == 0 || | |
385 | mg->mg_class != mc) { | |
386 | continue; | |
387 | } | |
388 | ||
389 | space += tvd->vdev_max_asize - tvd->vdev_asize; | |
390 | } | |
391 | spa_config_exit(mc->mc_spa, SCL_VDEV, FTAG); | |
392 | return (space); | |
393 | } | |
394 | ||
395 | /* | |
396 | * ========================================================================== | |
397 | * Metaslab groups | |
398 | * ========================================================================== | |
399 | */ | |
400 | static int | |
401 | metaslab_compare(const void *x1, const void *x2) | |
402 | { | |
403 | const metaslab_t *m1 = x1; | |
404 | const metaslab_t *m2 = x2; | |
405 | ||
406 | if (m1->ms_weight < m2->ms_weight) | |
407 | return (1); | |
408 | if (m1->ms_weight > m2->ms_weight) | |
409 | return (-1); | |
410 | ||
411 | /* | |
412 | * If the weights are identical, use the offset to force uniqueness. | |
413 | */ | |
414 | if (m1->ms_start < m2->ms_start) | |
415 | return (-1); | |
416 | if (m1->ms_start > m2->ms_start) | |
417 | return (1); | |
418 | ||
419 | ASSERT3P(m1, ==, m2); | |
420 | ||
421 | return (0); | |
422 | } | |
423 | ||
424 | /* | |
425 | * Update the allocatable flag and the metaslab group's capacity. | |
426 | * The allocatable flag is set to true if the capacity is below | |
427 | * the zfs_mg_noalloc_threshold. If a metaslab group transitions | |
428 | * from allocatable to non-allocatable or vice versa then the metaslab | |
429 | * group's class is updated to reflect the transition. | |
430 | */ | |
431 | static void | |
432 | metaslab_group_alloc_update(metaslab_group_t *mg) | |
433 | { | |
434 | vdev_t *vd = mg->mg_vd; | |
435 | metaslab_class_t *mc = mg->mg_class; | |
436 | vdev_stat_t *vs = &vd->vdev_stat; | |
437 | boolean_t was_allocatable; | |
438 | ||
439 | ASSERT(vd == vd->vdev_top); | |
440 | ||
441 | mutex_enter(&mg->mg_lock); | |
442 | was_allocatable = mg->mg_allocatable; | |
443 | ||
444 | mg->mg_free_capacity = ((vs->vs_space - vs->vs_alloc) * 100) / | |
445 | (vs->vs_space + 1); | |
446 | ||
447 | /* | |
448 | * A metaslab group is considered allocatable if it has plenty | |
449 | * of free space or is not heavily fragmented. We only take | |
450 | * fragmentation into account if the metaslab group has a valid | |
451 | * fragmentation metric (i.e. a value between 0 and 100). | |
452 | */ | |
453 | mg->mg_allocatable = (mg->mg_free_capacity > zfs_mg_noalloc_threshold && | |
454 | (mg->mg_fragmentation == ZFS_FRAG_INVALID || | |
455 | mg->mg_fragmentation <= zfs_mg_fragmentation_threshold)); | |
456 | ||
457 | /* | |
458 | * The mc_alloc_groups maintains a count of the number of | |
459 | * groups in this metaslab class that are still above the | |
460 | * zfs_mg_noalloc_threshold. This is used by the allocating | |
461 | * threads to determine if they should avoid allocations to | |
462 | * a given group. The allocator will avoid allocations to a group | |
463 | * if that group has reached or is below the zfs_mg_noalloc_threshold | |
464 | * and there are still other groups that are above the threshold. | |
465 | * When a group transitions from allocatable to non-allocatable or | |
466 | * vice versa we update the metaslab class to reflect that change. | |
467 | * When the mc_alloc_groups value drops to 0 that means that all | |
468 | * groups have reached the zfs_mg_noalloc_threshold making all groups | |
469 | * eligible for allocations. This effectively means that all devices | |
470 | * are balanced again. | |
471 | */ | |
472 | if (was_allocatable && !mg->mg_allocatable) | |
473 | mc->mc_alloc_groups--; | |
474 | else if (!was_allocatable && mg->mg_allocatable) | |
475 | mc->mc_alloc_groups++; | |
476 | ||
477 | mutex_exit(&mg->mg_lock); | |
478 | } | |
479 | ||
480 | metaslab_group_t * | |
481 | metaslab_group_create(metaslab_class_t *mc, vdev_t *vd) | |
482 | { | |
483 | metaslab_group_t *mg; | |
484 | ||
485 | mg = kmem_zalloc(sizeof (metaslab_group_t), KM_SLEEP); | |
486 | mutex_init(&mg->mg_lock, NULL, MUTEX_DEFAULT, NULL); | |
487 | avl_create(&mg->mg_metaslab_tree, metaslab_compare, | |
488 | sizeof (metaslab_t), offsetof(struct metaslab, ms_group_node)); | |
489 | mg->mg_vd = vd; | |
490 | mg->mg_class = mc; | |
491 | mg->mg_activation_count = 0; | |
492 | ||
493 | mg->mg_taskq = taskq_create("metaslab_group_taskq", metaslab_load_pct, | |
494 | minclsyspri, 10, INT_MAX, TASKQ_THREADS_CPU_PCT); | |
495 | ||
496 | return (mg); | |
497 | } | |
498 | ||
499 | void | |
500 | metaslab_group_destroy(metaslab_group_t *mg) | |
501 | { | |
502 | ASSERT(mg->mg_prev == NULL); | |
503 | ASSERT(mg->mg_next == NULL); | |
504 | /* | |
505 | * We may have gone below zero with the activation count | |
506 | * either because we never activated in the first place or | |
507 | * because we're done, and possibly removing the vdev. | |
508 | */ | |
509 | ASSERT(mg->mg_activation_count <= 0); | |
510 | ||
511 | taskq_destroy(mg->mg_taskq); | |
512 | avl_destroy(&mg->mg_metaslab_tree); | |
513 | mutex_destroy(&mg->mg_lock); | |
514 | kmem_free(mg, sizeof (metaslab_group_t)); | |
515 | } | |
516 | ||
517 | void | |
518 | metaslab_group_activate(metaslab_group_t *mg) | |
519 | { | |
520 | metaslab_class_t *mc = mg->mg_class; | |
521 | metaslab_group_t *mgprev, *mgnext; | |
522 | ||
523 | ASSERT(spa_config_held(mc->mc_spa, SCL_ALLOC, RW_WRITER)); | |
524 | ||
525 | ASSERT(mc->mc_rotor != mg); | |
526 | ASSERT(mg->mg_prev == NULL); | |
527 | ASSERT(mg->mg_next == NULL); | |
528 | ASSERT(mg->mg_activation_count <= 0); | |
529 | ||
530 | if (++mg->mg_activation_count <= 0) | |
531 | return; | |
532 | ||
533 | mg->mg_aliquot = metaslab_aliquot * MAX(1, mg->mg_vd->vdev_children); | |
534 | metaslab_group_alloc_update(mg); | |
535 | ||
536 | if ((mgprev = mc->mc_rotor) == NULL) { | |
537 | mg->mg_prev = mg; | |
538 | mg->mg_next = mg; | |
539 | } else { | |
540 | mgnext = mgprev->mg_next; | |
541 | mg->mg_prev = mgprev; | |
542 | mg->mg_next = mgnext; | |
543 | mgprev->mg_next = mg; | |
544 | mgnext->mg_prev = mg; | |
545 | } | |
546 | mc->mc_rotor = mg; | |
547 | } | |
548 | ||
549 | void | |
550 | metaslab_group_passivate(metaslab_group_t *mg) | |
551 | { | |
552 | metaslab_class_t *mc = mg->mg_class; | |
553 | metaslab_group_t *mgprev, *mgnext; | |
554 | ||
555 | ASSERT(spa_config_held(mc->mc_spa, SCL_ALLOC, RW_WRITER)); | |
556 | ||
557 | if (--mg->mg_activation_count != 0) { | |
558 | ASSERT(mc->mc_rotor != mg); | |
559 | ASSERT(mg->mg_prev == NULL); | |
560 | ASSERT(mg->mg_next == NULL); | |
561 | ASSERT(mg->mg_activation_count < 0); | |
562 | return; | |
563 | } | |
564 | ||
565 | taskq_wait(mg->mg_taskq); | |
566 | metaslab_group_alloc_update(mg); | |
567 | ||
568 | mgprev = mg->mg_prev; | |
569 | mgnext = mg->mg_next; | |
570 | ||
571 | if (mg == mgnext) { | |
572 | mc->mc_rotor = NULL; | |
573 | } else { | |
574 | mc->mc_rotor = mgnext; | |
575 | mgprev->mg_next = mgnext; | |
576 | mgnext->mg_prev = mgprev; | |
577 | } | |
578 | ||
579 | mg->mg_prev = NULL; | |
580 | mg->mg_next = NULL; | |
581 | } | |
582 | ||
583 | uint64_t | |
584 | metaslab_group_get_space(metaslab_group_t *mg) | |
585 | { | |
586 | return ((1ULL << mg->mg_vd->vdev_ms_shift) * mg->mg_vd->vdev_ms_count); | |
587 | } | |
588 | ||
589 | void | |
590 | metaslab_group_histogram_verify(metaslab_group_t *mg) | |
591 | { | |
592 | uint64_t *mg_hist; | |
593 | vdev_t *vd = mg->mg_vd; | |
594 | uint64_t ashift = vd->vdev_ashift; | |
595 | int i, m; | |
596 | ||
597 | if ((zfs_flags & ZFS_DEBUG_HISTOGRAM_VERIFY) == 0) | |
598 | return; | |
599 | ||
600 | mg_hist = kmem_zalloc(sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE, | |
601 | KM_SLEEP); | |
602 | ||
603 | ASSERT3U(RANGE_TREE_HISTOGRAM_SIZE, >=, | |
604 | SPACE_MAP_HISTOGRAM_SIZE + ashift); | |
605 | ||
606 | for (m = 0; m < vd->vdev_ms_count; m++) { | |
607 | metaslab_t *msp = vd->vdev_ms[m]; | |
608 | ||
609 | if (msp->ms_sm == NULL) | |
610 | continue; | |
611 | ||
612 | for (i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) | |
613 | mg_hist[i + ashift] += | |
614 | msp->ms_sm->sm_phys->smp_histogram[i]; | |
615 | } | |
616 | ||
617 | for (i = 0; i < RANGE_TREE_HISTOGRAM_SIZE; i ++) | |
618 | VERIFY3U(mg_hist[i], ==, mg->mg_histogram[i]); | |
619 | ||
620 | kmem_free(mg_hist, sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE); | |
621 | } | |
622 | ||
623 | static void | |
624 | metaslab_group_histogram_add(metaslab_group_t *mg, metaslab_t *msp) | |
625 | { | |
626 | metaslab_class_t *mc = mg->mg_class; | |
627 | uint64_t ashift = mg->mg_vd->vdev_ashift; | |
628 | int i; | |
629 | ||
630 | ASSERT(MUTEX_HELD(&msp->ms_lock)); | |
631 | if (msp->ms_sm == NULL) | |
632 | return; | |
633 | ||
634 | mutex_enter(&mg->mg_lock); | |
635 | for (i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) { | |
636 | mg->mg_histogram[i + ashift] += | |
637 | msp->ms_sm->sm_phys->smp_histogram[i]; | |
638 | mc->mc_histogram[i + ashift] += | |
639 | msp->ms_sm->sm_phys->smp_histogram[i]; | |
640 | } | |
641 | mutex_exit(&mg->mg_lock); | |
642 | } | |
643 | ||
644 | void | |
645 | metaslab_group_histogram_remove(metaslab_group_t *mg, metaslab_t *msp) | |
646 | { | |
647 | metaslab_class_t *mc = mg->mg_class; | |
648 | uint64_t ashift = mg->mg_vd->vdev_ashift; | |
649 | int i; | |
650 | ||
651 | ASSERT(MUTEX_HELD(&msp->ms_lock)); | |
652 | if (msp->ms_sm == NULL) | |
653 | return; | |
654 | ||
655 | mutex_enter(&mg->mg_lock); | |
656 | for (i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) { | |
657 | ASSERT3U(mg->mg_histogram[i + ashift], >=, | |
658 | msp->ms_sm->sm_phys->smp_histogram[i]); | |
659 | ASSERT3U(mc->mc_histogram[i + ashift], >=, | |
660 | msp->ms_sm->sm_phys->smp_histogram[i]); | |
661 | ||
662 | mg->mg_histogram[i + ashift] -= | |
663 | msp->ms_sm->sm_phys->smp_histogram[i]; | |
664 | mc->mc_histogram[i + ashift] -= | |
665 | msp->ms_sm->sm_phys->smp_histogram[i]; | |
666 | } | |
667 | mutex_exit(&mg->mg_lock); | |
668 | } | |
669 | ||
670 | static void | |
671 | metaslab_group_add(metaslab_group_t *mg, metaslab_t *msp) | |
672 | { | |
673 | ASSERT(msp->ms_group == NULL); | |
674 | mutex_enter(&mg->mg_lock); | |
675 | msp->ms_group = mg; | |
676 | msp->ms_weight = 0; | |
677 | avl_add(&mg->mg_metaslab_tree, msp); | |
678 | mutex_exit(&mg->mg_lock); | |
679 | ||
680 | mutex_enter(&msp->ms_lock); | |
681 | metaslab_group_histogram_add(mg, msp); | |
682 | mutex_exit(&msp->ms_lock); | |
683 | } | |
684 | ||
685 | static void | |
686 | metaslab_group_remove(metaslab_group_t *mg, metaslab_t *msp) | |
687 | { | |
688 | mutex_enter(&msp->ms_lock); | |
689 | metaslab_group_histogram_remove(mg, msp); | |
690 | mutex_exit(&msp->ms_lock); | |
691 | ||
692 | mutex_enter(&mg->mg_lock); | |
693 | ASSERT(msp->ms_group == mg); | |
694 | avl_remove(&mg->mg_metaslab_tree, msp); | |
695 | msp->ms_group = NULL; | |
696 | mutex_exit(&mg->mg_lock); | |
697 | } | |
698 | ||
699 | static void | |
700 | metaslab_group_sort(metaslab_group_t *mg, metaslab_t *msp, uint64_t weight) | |
701 | { | |
702 | /* | |
703 | * Although in principle the weight can be any value, in | |
704 | * practice we do not use values in the range [1, 511]. | |
705 | */ | |
706 | ASSERT(weight >= SPA_MINBLOCKSIZE || weight == 0); | |
707 | ASSERT(MUTEX_HELD(&msp->ms_lock)); | |
708 | ||
709 | mutex_enter(&mg->mg_lock); | |
710 | ASSERT(msp->ms_group == mg); | |
711 | avl_remove(&mg->mg_metaslab_tree, msp); | |
712 | msp->ms_weight = weight; | |
713 | avl_add(&mg->mg_metaslab_tree, msp); | |
714 | mutex_exit(&mg->mg_lock); | |
715 | } | |
716 | ||
717 | /* | |
718 | * Calculate the fragmentation for a given metaslab group. We can use | |
719 | * a simple average here since all metaslabs within the group must have | |
720 | * the same size. The return value will be a value between 0 and 100 | |
721 | * (inclusive), or ZFS_FRAG_INVALID if less than half of the metaslab in this | |
722 | * group have a fragmentation metric. | |
723 | */ | |
724 | uint64_t | |
725 | metaslab_group_fragmentation(metaslab_group_t *mg) | |
726 | { | |
727 | vdev_t *vd = mg->mg_vd; | |
728 | uint64_t fragmentation = 0; | |
729 | uint64_t valid_ms = 0; | |
730 | int m; | |
731 | ||
732 | for (m = 0; m < vd->vdev_ms_count; m++) { | |
733 | metaslab_t *msp = vd->vdev_ms[m]; | |
734 | ||
735 | if (msp->ms_fragmentation == ZFS_FRAG_INVALID) | |
736 | continue; | |
737 | ||
738 | valid_ms++; | |
739 | fragmentation += msp->ms_fragmentation; | |
740 | } | |
741 | ||
742 | if (valid_ms <= vd->vdev_ms_count / 2) | |
743 | return (ZFS_FRAG_INVALID); | |
744 | ||
745 | fragmentation /= valid_ms; | |
746 | ASSERT3U(fragmentation, <=, 100); | |
747 | return (fragmentation); | |
748 | } | |
749 | ||
750 | /* | |
751 | * Determine if a given metaslab group should skip allocations. A metaslab | |
752 | * group should avoid allocations if its free capacity is less than the | |
753 | * zfs_mg_noalloc_threshold or its fragmentation metric is greater than | |
754 | * zfs_mg_fragmentation_threshold and there is at least one metaslab group | |
755 | * that can still handle allocations. | |
756 | */ | |
757 | static boolean_t | |
758 | metaslab_group_allocatable(metaslab_group_t *mg) | |
759 | { | |
760 | vdev_t *vd = mg->mg_vd; | |
761 | spa_t *spa = vd->vdev_spa; | |
762 | metaslab_class_t *mc = mg->mg_class; | |
763 | ||
764 | /* | |
765 | * We use two key metrics to determine if a metaslab group is | |
766 | * considered allocatable -- free space and fragmentation. If | |
767 | * the free space is greater than the free space threshold and | |
768 | * the fragmentation is less than the fragmentation threshold then | |
769 | * consider the group allocatable. There are two case when we will | |
770 | * not consider these key metrics. The first is if the group is | |
771 | * associated with a slog device and the second is if all groups | |
772 | * in this metaslab class have already been consider ineligible | |
773 | * for allocations. | |
774 | */ | |
775 | return ((mg->mg_free_capacity > zfs_mg_noalloc_threshold && | |
776 | (mg->mg_fragmentation == ZFS_FRAG_INVALID || | |
777 | mg->mg_fragmentation <= zfs_mg_fragmentation_threshold)) || | |
778 | mc != spa_normal_class(spa) || mc->mc_alloc_groups == 0); | |
779 | } | |
780 | ||
781 | /* | |
782 | * ========================================================================== | |
783 | * Range tree callbacks | |
784 | * ========================================================================== | |
785 | */ | |
786 | ||
787 | /* | |
788 | * Comparison function for the private size-ordered tree. Tree is sorted | |
789 | * by size, larger sizes at the end of the tree. | |
790 | */ | |
791 | static int | |
792 | metaslab_rangesize_compare(const void *x1, const void *x2) | |
793 | { | |
794 | const range_seg_t *r1 = x1; | |
795 | const range_seg_t *r2 = x2; | |
796 | uint64_t rs_size1 = r1->rs_end - r1->rs_start; | |
797 | uint64_t rs_size2 = r2->rs_end - r2->rs_start; | |
798 | ||
799 | if (rs_size1 < rs_size2) | |
800 | return (-1); | |
801 | if (rs_size1 > rs_size2) | |
802 | return (1); | |
803 | ||
804 | if (r1->rs_start < r2->rs_start) | |
805 | return (-1); | |
806 | ||
807 | if (r1->rs_start > r2->rs_start) | |
808 | return (1); | |
809 | ||
810 | return (0); | |
811 | } | |
812 | ||
813 | /* | |
814 | * Create any block allocator specific components. The current allocators | |
815 | * rely on using both a size-ordered range_tree_t and an array of uint64_t's. | |
816 | */ | |
817 | static void | |
818 | metaslab_rt_create(range_tree_t *rt, void *arg) | |
819 | { | |
820 | metaslab_t *msp = arg; | |
821 | ||
822 | ASSERT3P(rt->rt_arg, ==, msp); | |
823 | ASSERT(msp->ms_tree == NULL); | |
824 | ||
825 | avl_create(&msp->ms_size_tree, metaslab_rangesize_compare, | |
826 | sizeof (range_seg_t), offsetof(range_seg_t, rs_pp_node)); | |
827 | } | |
828 | ||
829 | /* | |
830 | * Destroy the block allocator specific components. | |
831 | */ | |
832 | static void | |
833 | metaslab_rt_destroy(range_tree_t *rt, void *arg) | |
834 | { | |
835 | metaslab_t *msp = arg; | |
836 | ||
837 | ASSERT3P(rt->rt_arg, ==, msp); | |
838 | ASSERT3P(msp->ms_tree, ==, rt); | |
839 | ASSERT0(avl_numnodes(&msp->ms_size_tree)); | |
840 | ||
841 | avl_destroy(&msp->ms_size_tree); | |
842 | } | |
843 | ||
844 | static void | |
845 | metaslab_rt_add(range_tree_t *rt, range_seg_t *rs, void *arg) | |
846 | { | |
847 | metaslab_t *msp = arg; | |
848 | ||
849 | ASSERT3P(rt->rt_arg, ==, msp); | |
850 | ASSERT3P(msp->ms_tree, ==, rt); | |
851 | VERIFY(!msp->ms_condensing); | |
852 | avl_add(&msp->ms_size_tree, rs); | |
853 | } | |
854 | ||
855 | static void | |
856 | metaslab_rt_remove(range_tree_t *rt, range_seg_t *rs, void *arg) | |
857 | { | |
858 | metaslab_t *msp = arg; | |
859 | ||
860 | ASSERT3P(rt->rt_arg, ==, msp); | |
861 | ASSERT3P(msp->ms_tree, ==, rt); | |
862 | VERIFY(!msp->ms_condensing); | |
863 | avl_remove(&msp->ms_size_tree, rs); | |
864 | } | |
865 | ||
866 | static void | |
867 | metaslab_rt_vacate(range_tree_t *rt, void *arg) | |
868 | { | |
869 | metaslab_t *msp = arg; | |
870 | ||
871 | ASSERT3P(rt->rt_arg, ==, msp); | |
872 | ASSERT3P(msp->ms_tree, ==, rt); | |
873 | ||
874 | /* | |
875 | * Normally one would walk the tree freeing nodes along the way. | |
876 | * Since the nodes are shared with the range trees we can avoid | |
877 | * walking all nodes and just reinitialize the avl tree. The nodes | |
878 | * will be freed by the range tree, so we don't want to free them here. | |
879 | */ | |
880 | avl_create(&msp->ms_size_tree, metaslab_rangesize_compare, | |
881 | sizeof (range_seg_t), offsetof(range_seg_t, rs_pp_node)); | |
882 | } | |
883 | ||
884 | static range_tree_ops_t metaslab_rt_ops = { | |
885 | metaslab_rt_create, | |
886 | metaslab_rt_destroy, | |
887 | metaslab_rt_add, | |
888 | metaslab_rt_remove, | |
889 | metaslab_rt_vacate | |
890 | }; | |
891 | ||
892 | /* | |
893 | * ========================================================================== | |
894 | * Metaslab block operations | |
895 | * ========================================================================== | |
896 | */ | |
897 | ||
898 | /* | |
899 | * Return the maximum contiguous segment within the metaslab. | |
900 | */ | |
901 | uint64_t | |
902 | metaslab_block_maxsize(metaslab_t *msp) | |
903 | { | |
904 | avl_tree_t *t = &msp->ms_size_tree; | |
905 | range_seg_t *rs; | |
906 | ||
907 | if (t == NULL || (rs = avl_last(t)) == NULL) | |
908 | return (0ULL); | |
909 | ||
910 | return (rs->rs_end - rs->rs_start); | |
911 | } | |
912 | ||
913 | uint64_t | |
914 | metaslab_block_alloc(metaslab_t *msp, uint64_t size) | |
915 | { | |
916 | uint64_t start; | |
917 | range_tree_t *rt = msp->ms_tree; | |
918 | ||
919 | VERIFY(!msp->ms_condensing); | |
920 | ||
921 | start = msp->ms_ops->msop_alloc(msp, size); | |
922 | if (start != -1ULL) { | |
923 | vdev_t *vd = msp->ms_group->mg_vd; | |
924 | ||
925 | VERIFY0(P2PHASE(start, 1ULL << vd->vdev_ashift)); | |
926 | VERIFY0(P2PHASE(size, 1ULL << vd->vdev_ashift)); | |
927 | VERIFY3U(range_tree_space(rt) - size, <=, msp->ms_size); | |
928 | range_tree_remove(rt, start, size); | |
929 | } | |
930 | return (start); | |
931 | } | |
932 | ||
933 | /* | |
934 | * ========================================================================== | |
935 | * Common allocator routines | |
936 | * ========================================================================== | |
937 | */ | |
938 | ||
939 | #if defined(WITH_FF_BLOCK_ALLOCATOR) || \ | |
940 | defined(WITH_DF_BLOCK_ALLOCATOR) || \ | |
941 | defined(WITH_CF_BLOCK_ALLOCATOR) | |
942 | /* | |
943 | * This is a helper function that can be used by the allocator to find | |
944 | * a suitable block to allocate. This will search the specified AVL | |
945 | * tree looking for a block that matches the specified criteria. | |
946 | */ | |
947 | static uint64_t | |
948 | metaslab_block_picker(avl_tree_t *t, uint64_t *cursor, uint64_t size, | |
949 | uint64_t align) | |
950 | { | |
951 | range_seg_t *rs, rsearch; | |
952 | avl_index_t where; | |
953 | ||
954 | rsearch.rs_start = *cursor; | |
955 | rsearch.rs_end = *cursor + size; | |
956 | ||
957 | rs = avl_find(t, &rsearch, &where); | |
958 | if (rs == NULL) | |
959 | rs = avl_nearest(t, where, AVL_AFTER); | |
960 | ||
961 | while (rs != NULL) { | |
962 | uint64_t offset = P2ROUNDUP(rs->rs_start, align); | |
963 | ||
964 | if (offset + size <= rs->rs_end) { | |
965 | *cursor = offset + size; | |
966 | return (offset); | |
967 | } | |
968 | rs = AVL_NEXT(t, rs); | |
969 | } | |
970 | ||
971 | /* | |
972 | * If we know we've searched the whole map (*cursor == 0), give up. | |
973 | * Otherwise, reset the cursor to the beginning and try again. | |
974 | */ | |
975 | if (*cursor == 0) | |
976 | return (-1ULL); | |
977 | ||
978 | *cursor = 0; | |
979 | return (metaslab_block_picker(t, cursor, size, align)); | |
980 | } | |
981 | #endif /* WITH_FF/DF/CF_BLOCK_ALLOCATOR */ | |
982 | ||
983 | #if defined(WITH_FF_BLOCK_ALLOCATOR) | |
984 | /* | |
985 | * ========================================================================== | |
986 | * The first-fit block allocator | |
987 | * ========================================================================== | |
988 | */ | |
989 | static uint64_t | |
990 | metaslab_ff_alloc(metaslab_t *msp, uint64_t size) | |
991 | { | |
992 | /* | |
993 | * Find the largest power of 2 block size that evenly divides the | |
994 | * requested size. This is used to try to allocate blocks with similar | |
995 | * alignment from the same area of the metaslab (i.e. same cursor | |
996 | * bucket) but it does not guarantee that other allocations sizes | |
997 | * may exist in the same region. | |
998 | */ | |
999 | uint64_t align = size & -size; | |
1000 | uint64_t *cursor = &msp->ms_lbas[highbit64(align) - 1]; | |
1001 | avl_tree_t *t = &msp->ms_tree->rt_root; | |
1002 | ||
1003 | return (metaslab_block_picker(t, cursor, size, align)); | |
1004 | } | |
1005 | ||
1006 | static metaslab_ops_t metaslab_ff_ops = { | |
1007 | metaslab_ff_alloc | |
1008 | }; | |
1009 | ||
1010 | metaslab_ops_t *zfs_metaslab_ops = &metaslab_ff_ops; | |
1011 | #endif /* WITH_FF_BLOCK_ALLOCATOR */ | |
1012 | ||
1013 | #if defined(WITH_DF_BLOCK_ALLOCATOR) | |
1014 | /* | |
1015 | * ========================================================================== | |
1016 | * Dynamic block allocator - | |
1017 | * Uses the first fit allocation scheme until space get low and then | |
1018 | * adjusts to a best fit allocation method. Uses metaslab_df_alloc_threshold | |
1019 | * and metaslab_df_free_pct to determine when to switch the allocation scheme. | |
1020 | * ========================================================================== | |
1021 | */ | |
1022 | static uint64_t | |
1023 | metaslab_df_alloc(metaslab_t *msp, uint64_t size) | |
1024 | { | |
1025 | /* | |
1026 | * Find the largest power of 2 block size that evenly divides the | |
1027 | * requested size. This is used to try to allocate blocks with similar | |
1028 | * alignment from the same area of the metaslab (i.e. same cursor | |
1029 | * bucket) but it does not guarantee that other allocations sizes | |
1030 | * may exist in the same region. | |
1031 | */ | |
1032 | uint64_t align = size & -size; | |
1033 | uint64_t *cursor = &msp->ms_lbas[highbit64(align) - 1]; | |
1034 | range_tree_t *rt = msp->ms_tree; | |
1035 | avl_tree_t *t = &rt->rt_root; | |
1036 | uint64_t max_size = metaslab_block_maxsize(msp); | |
1037 | int free_pct = range_tree_space(rt) * 100 / msp->ms_size; | |
1038 | ||
1039 | ASSERT(MUTEX_HELD(&msp->ms_lock)); | |
1040 | ASSERT3U(avl_numnodes(t), ==, avl_numnodes(&msp->ms_size_tree)); | |
1041 | ||
1042 | if (max_size < size) | |
1043 | return (-1ULL); | |
1044 | ||
1045 | /* | |
1046 | * If we're running low on space switch to using the size | |
1047 | * sorted AVL tree (best-fit). | |
1048 | */ | |
1049 | if (max_size < metaslab_df_alloc_threshold || | |
1050 | free_pct < metaslab_df_free_pct) { | |
1051 | t = &msp->ms_size_tree; | |
1052 | *cursor = 0; | |
1053 | } | |
1054 | ||
1055 | return (metaslab_block_picker(t, cursor, size, 1ULL)); | |
1056 | } | |
1057 | ||
1058 | static metaslab_ops_t metaslab_df_ops = { | |
1059 | metaslab_df_alloc | |
1060 | }; | |
1061 | ||
1062 | metaslab_ops_t *zfs_metaslab_ops = &metaslab_df_ops; | |
1063 | #endif /* WITH_DF_BLOCK_ALLOCATOR */ | |
1064 | ||
1065 | #if defined(WITH_CF_BLOCK_ALLOCATOR) | |
1066 | /* | |
1067 | * ========================================================================== | |
1068 | * Cursor fit block allocator - | |
1069 | * Select the largest region in the metaslab, set the cursor to the beginning | |
1070 | * of the range and the cursor_end to the end of the range. As allocations | |
1071 | * are made advance the cursor. Continue allocating from the cursor until | |
1072 | * the range is exhausted and then find a new range. | |
1073 | * ========================================================================== | |
1074 | */ | |
1075 | static uint64_t | |
1076 | metaslab_cf_alloc(metaslab_t *msp, uint64_t size) | |
1077 | { | |
1078 | range_tree_t *rt = msp->ms_tree; | |
1079 | avl_tree_t *t = &msp->ms_size_tree; | |
1080 | uint64_t *cursor = &msp->ms_lbas[0]; | |
1081 | uint64_t *cursor_end = &msp->ms_lbas[1]; | |
1082 | uint64_t offset = 0; | |
1083 | ||
1084 | ASSERT(MUTEX_HELD(&msp->ms_lock)); | |
1085 | ASSERT3U(avl_numnodes(t), ==, avl_numnodes(&rt->rt_root)); | |
1086 | ||
1087 | ASSERT3U(*cursor_end, >=, *cursor); | |
1088 | ||
1089 | if ((*cursor + size) > *cursor_end) { | |
1090 | range_seg_t *rs; | |
1091 | ||
1092 | rs = avl_last(&msp->ms_size_tree); | |
1093 | if (rs == NULL || (rs->rs_end - rs->rs_start) < size) | |
1094 | return (-1ULL); | |
1095 | ||
1096 | *cursor = rs->rs_start; | |
1097 | *cursor_end = rs->rs_end; | |
1098 | } | |
1099 | ||
1100 | offset = *cursor; | |
1101 | *cursor += size; | |
1102 | ||
1103 | return (offset); | |
1104 | } | |
1105 | ||
1106 | static metaslab_ops_t metaslab_cf_ops = { | |
1107 | metaslab_cf_alloc | |
1108 | }; | |
1109 | ||
1110 | metaslab_ops_t *zfs_metaslab_ops = &metaslab_cf_ops; | |
1111 | #endif /* WITH_CF_BLOCK_ALLOCATOR */ | |
1112 | ||
1113 | #if defined(WITH_NDF_BLOCK_ALLOCATOR) | |
1114 | /* | |
1115 | * ========================================================================== | |
1116 | * New dynamic fit allocator - | |
1117 | * Select a region that is large enough to allocate 2^metaslab_ndf_clump_shift | |
1118 | * contiguous blocks. If no region is found then just use the largest segment | |
1119 | * that remains. | |
1120 | * ========================================================================== | |
1121 | */ | |
1122 | ||
1123 | /* | |
1124 | * Determines desired number of contiguous blocks (2^metaslab_ndf_clump_shift) | |
1125 | * to request from the allocator. | |
1126 | */ | |
1127 | uint64_t metaslab_ndf_clump_shift = 4; | |
1128 | ||
1129 | static uint64_t | |
1130 | metaslab_ndf_alloc(metaslab_t *msp, uint64_t size) | |
1131 | { | |
1132 | avl_tree_t *t = &msp->ms_tree->rt_root; | |
1133 | avl_index_t where; | |
1134 | range_seg_t *rs, rsearch; | |
1135 | uint64_t hbit = highbit64(size); | |
1136 | uint64_t *cursor = &msp->ms_lbas[hbit - 1]; | |
1137 | uint64_t max_size = metaslab_block_maxsize(msp); | |
1138 | ||
1139 | ASSERT(MUTEX_HELD(&msp->ms_lock)); | |
1140 | ASSERT3U(avl_numnodes(t), ==, avl_numnodes(&msp->ms_size_tree)); | |
1141 | ||
1142 | if (max_size < size) | |
1143 | return (-1ULL); | |
1144 | ||
1145 | rsearch.rs_start = *cursor; | |
1146 | rsearch.rs_end = *cursor + size; | |
1147 | ||
1148 | rs = avl_find(t, &rsearch, &where); | |
1149 | if (rs == NULL || (rs->rs_end - rs->rs_start) < size) { | |
1150 | t = &msp->ms_size_tree; | |
1151 | ||
1152 | rsearch.rs_start = 0; | |
1153 | rsearch.rs_end = MIN(max_size, | |
1154 | 1ULL << (hbit + metaslab_ndf_clump_shift)); | |
1155 | rs = avl_find(t, &rsearch, &where); | |
1156 | if (rs == NULL) | |
1157 | rs = avl_nearest(t, where, AVL_AFTER); | |
1158 | ASSERT(rs != NULL); | |
1159 | } | |
1160 | ||
1161 | if ((rs->rs_end - rs->rs_start) >= size) { | |
1162 | *cursor = rs->rs_start + size; | |
1163 | return (rs->rs_start); | |
1164 | } | |
1165 | return (-1ULL); | |
1166 | } | |
1167 | ||
1168 | static metaslab_ops_t metaslab_ndf_ops = { | |
1169 | metaslab_ndf_alloc | |
1170 | }; | |
1171 | ||
1172 | metaslab_ops_t *zfs_metaslab_ops = &metaslab_ndf_ops; | |
1173 | #endif /* WITH_NDF_BLOCK_ALLOCATOR */ | |
1174 | ||
1175 | ||
1176 | /* | |
1177 | * ========================================================================== | |
1178 | * Metaslabs | |
1179 | * ========================================================================== | |
1180 | */ | |
1181 | ||
1182 | /* | |
1183 | * Wait for any in-progress metaslab loads to complete. | |
1184 | */ | |
1185 | void | |
1186 | metaslab_load_wait(metaslab_t *msp) | |
1187 | { | |
1188 | ASSERT(MUTEX_HELD(&msp->ms_lock)); | |
1189 | ||
1190 | while (msp->ms_loading) { | |
1191 | ASSERT(!msp->ms_loaded); | |
1192 | cv_wait(&msp->ms_load_cv, &msp->ms_lock); | |
1193 | } | |
1194 | } | |
1195 | ||
1196 | int | |
1197 | metaslab_load(metaslab_t *msp) | |
1198 | { | |
1199 | int error = 0; | |
1200 | int t; | |
1201 | ||
1202 | ASSERT(MUTEX_HELD(&msp->ms_lock)); | |
1203 | ASSERT(!msp->ms_loaded); | |
1204 | ASSERT(!msp->ms_loading); | |
1205 | ||
1206 | msp->ms_loading = B_TRUE; | |
1207 | ||
1208 | /* | |
1209 | * If the space map has not been allocated yet, then treat | |
1210 | * all the space in the metaslab as free and add it to the | |
1211 | * ms_tree. | |
1212 | */ | |
1213 | if (msp->ms_sm != NULL) | |
1214 | error = space_map_load(msp->ms_sm, msp->ms_tree, SM_FREE); | |
1215 | else | |
1216 | range_tree_add(msp->ms_tree, msp->ms_start, msp->ms_size); | |
1217 | ||
1218 | msp->ms_loaded = (error == 0); | |
1219 | msp->ms_loading = B_FALSE; | |
1220 | ||
1221 | if (msp->ms_loaded) { | |
1222 | for (t = 0; t < TXG_DEFER_SIZE; t++) { | |
1223 | range_tree_walk(msp->ms_defertree[t], | |
1224 | range_tree_remove, msp->ms_tree); | |
1225 | } | |
1226 | } | |
1227 | cv_broadcast(&msp->ms_load_cv); | |
1228 | return (error); | |
1229 | } | |
1230 | ||
1231 | void | |
1232 | metaslab_unload(metaslab_t *msp) | |
1233 | { | |
1234 | ASSERT(MUTEX_HELD(&msp->ms_lock)); | |
1235 | range_tree_vacate(msp->ms_tree, NULL, NULL); | |
1236 | msp->ms_loaded = B_FALSE; | |
1237 | msp->ms_weight &= ~METASLAB_ACTIVE_MASK; | |
1238 | } | |
1239 | ||
1240 | int | |
1241 | metaslab_init(metaslab_group_t *mg, uint64_t id, uint64_t object, uint64_t txg, | |
1242 | metaslab_t **msp) | |
1243 | { | |
1244 | vdev_t *vd = mg->mg_vd; | |
1245 | objset_t *mos = vd->vdev_spa->spa_meta_objset; | |
1246 | metaslab_t *ms; | |
1247 | int error; | |
1248 | ||
1249 | ms = kmem_zalloc(sizeof (metaslab_t), KM_SLEEP); | |
1250 | mutex_init(&ms->ms_lock, NULL, MUTEX_DEFAULT, NULL); | |
1251 | cv_init(&ms->ms_load_cv, NULL, CV_DEFAULT, NULL); | |
1252 | ms->ms_id = id; | |
1253 | ms->ms_start = id << vd->vdev_ms_shift; | |
1254 | ms->ms_size = 1ULL << vd->vdev_ms_shift; | |
1255 | ||
1256 | /* | |
1257 | * We only open space map objects that already exist. All others | |
1258 | * will be opened when we finally allocate an object for it. | |
1259 | */ | |
1260 | if (object != 0) { | |
1261 | error = space_map_open(&ms->ms_sm, mos, object, ms->ms_start, | |
1262 | ms->ms_size, vd->vdev_ashift, &ms->ms_lock); | |
1263 | ||
1264 | if (error != 0) { | |
1265 | kmem_free(ms, sizeof (metaslab_t)); | |
1266 | return (error); | |
1267 | } | |
1268 | ||
1269 | ASSERT(ms->ms_sm != NULL); | |
1270 | } | |
1271 | ||
1272 | /* | |
1273 | * We create the main range tree here, but we don't create the | |
1274 | * alloctree and freetree until metaslab_sync_done(). This serves | |
1275 | * two purposes: it allows metaslab_sync_done() to detect the | |
1276 | * addition of new space; and for debugging, it ensures that we'd | |
1277 | * data fault on any attempt to use this metaslab before it's ready. | |
1278 | */ | |
1279 | ms->ms_tree = range_tree_create(&metaslab_rt_ops, ms, &ms->ms_lock); | |
1280 | metaslab_group_add(mg, ms); | |
1281 | ||
1282 | ms->ms_fragmentation = metaslab_fragmentation(ms); | |
1283 | ms->ms_ops = mg->mg_class->mc_ops; | |
1284 | ||
1285 | /* | |
1286 | * If we're opening an existing pool (txg == 0) or creating | |
1287 | * a new one (txg == TXG_INITIAL), all space is available now. | |
1288 | * If we're adding space to an existing pool, the new space | |
1289 | * does not become available until after this txg has synced. | |
1290 | */ | |
1291 | if (txg <= TXG_INITIAL) | |
1292 | metaslab_sync_done(ms, 0); | |
1293 | ||
1294 | /* | |
1295 | * If metaslab_debug_load is set and we're initializing a metaslab | |
1296 | * that has an allocated space_map object then load the its space | |
1297 | * map so that can verify frees. | |
1298 | */ | |
1299 | if (metaslab_debug_load && ms->ms_sm != NULL) { | |
1300 | mutex_enter(&ms->ms_lock); | |
1301 | VERIFY0(metaslab_load(ms)); | |
1302 | mutex_exit(&ms->ms_lock); | |
1303 | } | |
1304 | ||
1305 | if (txg != 0) { | |
1306 | vdev_dirty(vd, 0, NULL, txg); | |
1307 | vdev_dirty(vd, VDD_METASLAB, ms, txg); | |
1308 | } | |
1309 | ||
1310 | *msp = ms; | |
1311 | ||
1312 | return (0); | |
1313 | } | |
1314 | ||
1315 | void | |
1316 | metaslab_fini(metaslab_t *msp) | |
1317 | { | |
1318 | int t; | |
1319 | ||
1320 | metaslab_group_t *mg = msp->ms_group; | |
1321 | ||
1322 | metaslab_group_remove(mg, msp); | |
1323 | ||
1324 | mutex_enter(&msp->ms_lock); | |
1325 | ||
1326 | VERIFY(msp->ms_group == NULL); | |
1327 | vdev_space_update(mg->mg_vd, -space_map_allocated(msp->ms_sm), | |
1328 | 0, -msp->ms_size); | |
1329 | space_map_close(msp->ms_sm); | |
1330 | ||
1331 | metaslab_unload(msp); | |
1332 | range_tree_destroy(msp->ms_tree); | |
1333 | ||
1334 | for (t = 0; t < TXG_SIZE; t++) { | |
1335 | range_tree_destroy(msp->ms_alloctree[t]); | |
1336 | range_tree_destroy(msp->ms_freetree[t]); | |
1337 | } | |
1338 | ||
1339 | for (t = 0; t < TXG_DEFER_SIZE; t++) { | |
1340 | range_tree_destroy(msp->ms_defertree[t]); | |
1341 | } | |
1342 | ||
1343 | ASSERT0(msp->ms_deferspace); | |
1344 | ||
1345 | mutex_exit(&msp->ms_lock); | |
1346 | cv_destroy(&msp->ms_load_cv); | |
1347 | mutex_destroy(&msp->ms_lock); | |
1348 | ||
1349 | kmem_free(msp, sizeof (metaslab_t)); | |
1350 | } | |
1351 | ||
1352 | #define FRAGMENTATION_TABLE_SIZE 17 | |
1353 | ||
1354 | /* | |
1355 | * This table defines a segment size based fragmentation metric that will | |
1356 | * allow each metaslab to derive its own fragmentation value. This is done | |
1357 | * by calculating the space in each bucket of the spacemap histogram and | |
1358 | * multiplying that by the fragmetation metric in this table. Doing | |
1359 | * this for all buckets and dividing it by the total amount of free | |
1360 | * space in this metaslab (i.e. the total free space in all buckets) gives | |
1361 | * us the fragmentation metric. This means that a high fragmentation metric | |
1362 | * equates to most of the free space being comprised of small segments. | |
1363 | * Conversely, if the metric is low, then most of the free space is in | |
1364 | * large segments. A 10% change in fragmentation equates to approximately | |
1365 | * double the number of segments. | |
1366 | * | |
1367 | * This table defines 0% fragmented space using 16MB segments. Testing has | |
1368 | * shown that segments that are greater than or equal to 16MB do not suffer | |
1369 | * from drastic performance problems. Using this value, we derive the rest | |
1370 | * of the table. Since the fragmentation value is never stored on disk, it | |
1371 | * is possible to change these calculations in the future. | |
1372 | */ | |
1373 | int zfs_frag_table[FRAGMENTATION_TABLE_SIZE] = { | |
1374 | 100, /* 512B */ | |
1375 | 100, /* 1K */ | |
1376 | 98, /* 2K */ | |
1377 | 95, /* 4K */ | |
1378 | 90, /* 8K */ | |
1379 | 80, /* 16K */ | |
1380 | 70, /* 32K */ | |
1381 | 60, /* 64K */ | |
1382 | 50, /* 128K */ | |
1383 | 40, /* 256K */ | |
1384 | 30, /* 512K */ | |
1385 | 20, /* 1M */ | |
1386 | 15, /* 2M */ | |
1387 | 10, /* 4M */ | |
1388 | 5, /* 8M */ | |
1389 | 0 /* 16M */ | |
1390 | }; | |
1391 | ||
1392 | /* | |
1393 | * Calclate the metaslab's fragmentation metric. A return value | |
1394 | * of ZFS_FRAG_INVALID means that the metaslab has not been upgraded and does | |
1395 | * not support this metric. Otherwise, the return value should be in the | |
1396 | * range [0, 100]. | |
1397 | */ | |
1398 | static uint64_t | |
1399 | metaslab_fragmentation(metaslab_t *msp) | |
1400 | { | |
1401 | spa_t *spa = msp->ms_group->mg_vd->vdev_spa; | |
1402 | uint64_t fragmentation = 0; | |
1403 | uint64_t total = 0; | |
1404 | boolean_t feature_enabled = spa_feature_is_enabled(spa, | |
1405 | SPA_FEATURE_SPACEMAP_HISTOGRAM); | |
1406 | int i; | |
1407 | ||
1408 | if (!feature_enabled) | |
1409 | return (ZFS_FRAG_INVALID); | |
1410 | ||
1411 | /* | |
1412 | * A null space map means that the entire metaslab is free | |
1413 | * and thus is not fragmented. | |
1414 | */ | |
1415 | if (msp->ms_sm == NULL) | |
1416 | return (0); | |
1417 | ||
1418 | /* | |
1419 | * If this metaslab's space_map has not been upgraded, flag it | |
1420 | * so that we upgrade next time we encounter it. | |
1421 | */ | |
1422 | if (msp->ms_sm->sm_dbuf->db_size != sizeof (space_map_phys_t)) { | |
1423 | vdev_t *vd = msp->ms_group->mg_vd; | |
1424 | ||
1425 | if (spa_writeable(vd->vdev_spa)) { | |
1426 | uint64_t txg = spa_syncing_txg(spa); | |
1427 | ||
1428 | msp->ms_condense_wanted = B_TRUE; | |
1429 | vdev_dirty(vd, VDD_METASLAB, msp, txg + 1); | |
1430 | spa_dbgmsg(spa, "txg %llu, requesting force condense: " | |
1431 | "msp %p, vd %p", txg, msp, vd); | |
1432 | } | |
1433 | return (ZFS_FRAG_INVALID); | |
1434 | } | |
1435 | ||
1436 | for (i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) { | |
1437 | uint64_t space = 0; | |
1438 | uint8_t shift = msp->ms_sm->sm_shift; | |
1439 | int idx = MIN(shift - SPA_MINBLOCKSHIFT + i, | |
1440 | FRAGMENTATION_TABLE_SIZE - 1); | |
1441 | ||
1442 | if (msp->ms_sm->sm_phys->smp_histogram[i] == 0) | |
1443 | continue; | |
1444 | ||
1445 | space = msp->ms_sm->sm_phys->smp_histogram[i] << (i + shift); | |
1446 | total += space; | |
1447 | ||
1448 | ASSERT3U(idx, <, FRAGMENTATION_TABLE_SIZE); | |
1449 | fragmentation += space * zfs_frag_table[idx]; | |
1450 | } | |
1451 | ||
1452 | if (total > 0) | |
1453 | fragmentation /= total; | |
1454 | ASSERT3U(fragmentation, <=, 100); | |
1455 | return (fragmentation); | |
1456 | } | |
1457 | ||
1458 | /* | |
1459 | * Compute a weight -- a selection preference value -- for the given metaslab. | |
1460 | * This is based on the amount of free space, the level of fragmentation, | |
1461 | * the LBA range, and whether the metaslab is loaded. | |
1462 | */ | |
1463 | static uint64_t | |
1464 | metaslab_weight(metaslab_t *msp) | |
1465 | { | |
1466 | metaslab_group_t *mg = msp->ms_group; | |
1467 | vdev_t *vd = mg->mg_vd; | |
1468 | uint64_t weight, space; | |
1469 | ||
1470 | ASSERT(MUTEX_HELD(&msp->ms_lock)); | |
1471 | ||
1472 | /* | |
1473 | * This vdev is in the process of being removed so there is nothing | |
1474 | * for us to do here. | |
1475 | */ | |
1476 | if (vd->vdev_removing) { | |
1477 | ASSERT0(space_map_allocated(msp->ms_sm)); | |
1478 | ASSERT0(vd->vdev_ms_shift); | |
1479 | return (0); | |
1480 | } | |
1481 | ||
1482 | /* | |
1483 | * The baseline weight is the metaslab's free space. | |
1484 | */ | |
1485 | space = msp->ms_size - space_map_allocated(msp->ms_sm); | |
1486 | ||
1487 | msp->ms_fragmentation = metaslab_fragmentation(msp); | |
1488 | if (metaslab_fragmentation_factor_enabled && | |
1489 | msp->ms_fragmentation != ZFS_FRAG_INVALID) { | |
1490 | /* | |
1491 | * Use the fragmentation information to inversely scale | |
1492 | * down the baseline weight. We need to ensure that we | |
1493 | * don't exclude this metaslab completely when it's 100% | |
1494 | * fragmented. To avoid this we reduce the fragmented value | |
1495 | * by 1. | |
1496 | */ | |
1497 | space = (space * (100 - (msp->ms_fragmentation - 1))) / 100; | |
1498 | ||
1499 | /* | |
1500 | * If space < SPA_MINBLOCKSIZE, then we will not allocate from | |
1501 | * this metaslab again. The fragmentation metric may have | |
1502 | * decreased the space to something smaller than | |
1503 | * SPA_MINBLOCKSIZE, so reset the space to SPA_MINBLOCKSIZE | |
1504 | * so that we can consume any remaining space. | |
1505 | */ | |
1506 | if (space > 0 && space < SPA_MINBLOCKSIZE) | |
1507 | space = SPA_MINBLOCKSIZE; | |
1508 | } | |
1509 | weight = space; | |
1510 | ||
1511 | /* | |
1512 | * Modern disks have uniform bit density and constant angular velocity. | |
1513 | * Therefore, the outer recording zones are faster (higher bandwidth) | |
1514 | * than the inner zones by the ratio of outer to inner track diameter, | |
1515 | * which is typically around 2:1. We account for this by assigning | |
1516 | * higher weight to lower metaslabs (multiplier ranging from 2x to 1x). | |
1517 | * In effect, this means that we'll select the metaslab with the most | |
1518 | * free bandwidth rather than simply the one with the most free space. | |
1519 | */ | |
1520 | if (metaslab_lba_weighting_enabled) { | |
1521 | weight = 2 * weight - (msp->ms_id * weight) / vd->vdev_ms_count; | |
1522 | ASSERT(weight >= space && weight <= 2 * space); | |
1523 | } | |
1524 | ||
1525 | /* | |
1526 | * If this metaslab is one we're actively using, adjust its | |
1527 | * weight to make it preferable to any inactive metaslab so | |
1528 | * we'll polish it off. If the fragmentation on this metaslab | |
1529 | * has exceed our threshold, then don't mark it active. | |
1530 | */ | |
1531 | if (msp->ms_loaded && msp->ms_fragmentation != ZFS_FRAG_INVALID && | |
1532 | msp->ms_fragmentation <= zfs_metaslab_fragmentation_threshold) { | |
1533 | weight |= (msp->ms_weight & METASLAB_ACTIVE_MASK); | |
1534 | } | |
1535 | ||
1536 | return (weight); | |
1537 | } | |
1538 | ||
1539 | static int | |
1540 | metaslab_activate(metaslab_t *msp, uint64_t activation_weight) | |
1541 | { | |
1542 | ASSERT(MUTEX_HELD(&msp->ms_lock)); | |
1543 | ||
1544 | if ((msp->ms_weight & METASLAB_ACTIVE_MASK) == 0) { | |
1545 | metaslab_load_wait(msp); | |
1546 | if (!msp->ms_loaded) { | |
1547 | int error = metaslab_load(msp); | |
1548 | if (error) { | |
1549 | metaslab_group_sort(msp->ms_group, msp, 0); | |
1550 | return (error); | |
1551 | } | |
1552 | } | |
1553 | ||
1554 | metaslab_group_sort(msp->ms_group, msp, | |
1555 | msp->ms_weight | activation_weight); | |
1556 | } | |
1557 | ASSERT(msp->ms_loaded); | |
1558 | ASSERT(msp->ms_weight & METASLAB_ACTIVE_MASK); | |
1559 | ||
1560 | return (0); | |
1561 | } | |
1562 | ||
1563 | static void | |
1564 | metaslab_passivate(metaslab_t *msp, uint64_t size) | |
1565 | { | |
1566 | /* | |
1567 | * If size < SPA_MINBLOCKSIZE, then we will not allocate from | |
1568 | * this metaslab again. In that case, it had better be empty, | |
1569 | * or we would be leaving space on the table. | |
1570 | */ | |
1571 | ASSERT(size >= SPA_MINBLOCKSIZE || range_tree_space(msp->ms_tree) == 0); | |
1572 | metaslab_group_sort(msp->ms_group, msp, MIN(msp->ms_weight, size)); | |
1573 | ASSERT((msp->ms_weight & METASLAB_ACTIVE_MASK) == 0); | |
1574 | } | |
1575 | ||
1576 | static void | |
1577 | metaslab_preload(void *arg) | |
1578 | { | |
1579 | metaslab_t *msp = arg; | |
1580 | spa_t *spa = msp->ms_group->mg_vd->vdev_spa; | |
1581 | ||
1582 | ASSERT(!MUTEX_HELD(&msp->ms_group->mg_lock)); | |
1583 | ||
1584 | mutex_enter(&msp->ms_lock); | |
1585 | metaslab_load_wait(msp); | |
1586 | if (!msp->ms_loaded) | |
1587 | (void) metaslab_load(msp); | |
1588 | ||
1589 | /* | |
1590 | * Set the ms_access_txg value so that we don't unload it right away. | |
1591 | */ | |
1592 | msp->ms_access_txg = spa_syncing_txg(spa) + metaslab_unload_delay + 1; | |
1593 | mutex_exit(&msp->ms_lock); | |
1594 | } | |
1595 | ||
1596 | static void | |
1597 | metaslab_group_preload(metaslab_group_t *mg) | |
1598 | { | |
1599 | spa_t *spa = mg->mg_vd->vdev_spa; | |
1600 | metaslab_t *msp; | |
1601 | avl_tree_t *t = &mg->mg_metaslab_tree; | |
1602 | int m = 0; | |
1603 | ||
1604 | if (spa_shutting_down(spa) || !metaslab_preload_enabled) { | |
1605 | taskq_wait(mg->mg_taskq); | |
1606 | return; | |
1607 | } | |
1608 | ||
1609 | mutex_enter(&mg->mg_lock); | |
1610 | /* | |
1611 | * Load the next potential metaslabs | |
1612 | */ | |
1613 | msp = avl_first(t); | |
1614 | while (msp != NULL) { | |
1615 | metaslab_t *msp_next = AVL_NEXT(t, msp); | |
1616 | ||
1617 | /* | |
1618 | * We preload only the maximum number of metaslabs specified | |
1619 | * by metaslab_preload_limit. If a metaslab is being forced | |
1620 | * to condense then we preload it too. This will ensure | |
1621 | * that force condensing happens in the next txg. | |
1622 | */ | |
1623 | if (++m > metaslab_preload_limit && !msp->ms_condense_wanted) { | |
1624 | msp = msp_next; | |
1625 | continue; | |
1626 | } | |
1627 | ||
1628 | /* | |
1629 | * We must drop the metaslab group lock here to preserve | |
1630 | * lock ordering with the ms_lock (when grabbing both | |
1631 | * the mg_lock and the ms_lock, the ms_lock must be taken | |
1632 | * first). As a result, it is possible that the ordering | |
1633 | * of the metaslabs within the avl tree may change before | |
1634 | * we reacquire the lock. The metaslab cannot be removed from | |
1635 | * the tree while we're in syncing context so it is safe to | |
1636 | * drop the mg_lock here. If the metaslabs are reordered | |
1637 | * nothing will break -- we just may end up loading a | |
1638 | * less than optimal one. | |
1639 | */ | |
1640 | mutex_exit(&mg->mg_lock); | |
1641 | VERIFY(taskq_dispatch(mg->mg_taskq, metaslab_preload, | |
1642 | msp, TQ_SLEEP) != 0); | |
1643 | mutex_enter(&mg->mg_lock); | |
1644 | msp = msp_next; | |
1645 | } | |
1646 | mutex_exit(&mg->mg_lock); | |
1647 | } | |
1648 | ||
1649 | /* | |
1650 | * Determine if the space map's on-disk footprint is past our tolerance | |
1651 | * for inefficiency. We would like to use the following criteria to make | |
1652 | * our decision: | |
1653 | * | |
1654 | * 1. The size of the space map object should not dramatically increase as a | |
1655 | * result of writing out the free space range tree. | |
1656 | * | |
1657 | * 2. The minimal on-disk space map representation is zfs_condense_pct/100 | |
1658 | * times the size than the free space range tree representation | |
1659 | * (i.e. zfs_condense_pct = 110 and in-core = 1MB, minimal = 1.1.MB). | |
1660 | * | |
1661 | * 3. The on-disk size of the space map should actually decrease. | |
1662 | * | |
1663 | * Checking the first condition is tricky since we don't want to walk | |
1664 | * the entire AVL tree calculating the estimated on-disk size. Instead we | |
1665 | * use the size-ordered range tree in the metaslab and calculate the | |
1666 | * size required to write out the largest segment in our free tree. If the | |
1667 | * size required to represent that segment on disk is larger than the space | |
1668 | * map object then we avoid condensing this map. | |
1669 | * | |
1670 | * To determine the second criterion we use a best-case estimate and assume | |
1671 | * each segment can be represented on-disk as a single 64-bit entry. We refer | |
1672 | * to this best-case estimate as the space map's minimal form. | |
1673 | * | |
1674 | * Unfortunately, we cannot compute the on-disk size of the space map in this | |
1675 | * context because we cannot accurately compute the effects of compression, etc. | |
1676 | * Instead, we apply the heuristic described in the block comment for | |
1677 | * zfs_metaslab_condense_block_threshold - we only condense if the space used | |
1678 | * is greater than a threshold number of blocks. | |
1679 | */ | |
1680 | static boolean_t | |
1681 | metaslab_should_condense(metaslab_t *msp) | |
1682 | { | |
1683 | space_map_t *sm = msp->ms_sm; | |
1684 | range_seg_t *rs; | |
1685 | uint64_t size, entries, segsz, object_size, optimal_size, record_size; | |
1686 | dmu_object_info_t doi; | |
1687 | uint64_t vdev_blocksize = 1 << msp->ms_group->mg_vd->vdev_ashift; | |
1688 | ||
1689 | ASSERT(MUTEX_HELD(&msp->ms_lock)); | |
1690 | ASSERT(msp->ms_loaded); | |
1691 | ||
1692 | /* | |
1693 | * Use the ms_size_tree range tree, which is ordered by size, to | |
1694 | * obtain the largest segment in the free tree. We always condense | |
1695 | * metaslabs that are empty and metaslabs for which a condense | |
1696 | * request has been made. | |
1697 | */ | |
1698 | rs = avl_last(&msp->ms_size_tree); | |
1699 | if (rs == NULL || msp->ms_condense_wanted) | |
1700 | return (B_TRUE); | |
1701 | ||
1702 | /* | |
1703 | * Calculate the number of 64-bit entries this segment would | |
1704 | * require when written to disk. If this single segment would be | |
1705 | * larger on-disk than the entire current on-disk structure, then | |
1706 | * clearly condensing will increase the on-disk structure size. | |
1707 | */ | |
1708 | size = (rs->rs_end - rs->rs_start) >> sm->sm_shift; | |
1709 | entries = size / (MIN(size, SM_RUN_MAX)); | |
1710 | segsz = entries * sizeof (uint64_t); | |
1711 | ||
1712 | optimal_size = sizeof (uint64_t) * avl_numnodes(&msp->ms_tree->rt_root); | |
1713 | object_size = space_map_length(msp->ms_sm); | |
1714 | ||
1715 | dmu_object_info_from_db(sm->sm_dbuf, &doi); | |
1716 | record_size = MAX(doi.doi_data_block_size, vdev_blocksize); | |
1717 | ||
1718 | return (segsz <= object_size && | |
1719 | object_size >= (optimal_size * zfs_condense_pct / 100) && | |
1720 | object_size > zfs_metaslab_condense_block_threshold * record_size); | |
1721 | } | |
1722 | ||
1723 | /* | |
1724 | * Condense the on-disk space map representation to its minimized form. | |
1725 | * The minimized form consists of a small number of allocations followed by | |
1726 | * the entries of the free range tree. | |
1727 | */ | |
1728 | static void | |
1729 | metaslab_condense(metaslab_t *msp, uint64_t txg, dmu_tx_t *tx) | |
1730 | { | |
1731 | spa_t *spa = msp->ms_group->mg_vd->vdev_spa; | |
1732 | range_tree_t *freetree = msp->ms_freetree[txg & TXG_MASK]; | |
1733 | range_tree_t *condense_tree; | |
1734 | space_map_t *sm = msp->ms_sm; | |
1735 | int t; | |
1736 | ||
1737 | ASSERT(MUTEX_HELD(&msp->ms_lock)); | |
1738 | ASSERT3U(spa_sync_pass(spa), ==, 1); | |
1739 | ASSERT(msp->ms_loaded); | |
1740 | ||
1741 | ||
1742 | spa_dbgmsg(spa, "condensing: txg %llu, msp[%llu] %p, " | |
1743 | "smp size %llu, segments %lu, forcing condense=%s", txg, | |
1744 | msp->ms_id, msp, space_map_length(msp->ms_sm), | |
1745 | avl_numnodes(&msp->ms_tree->rt_root), | |
1746 | msp->ms_condense_wanted ? "TRUE" : "FALSE"); | |
1747 | ||
1748 | msp->ms_condense_wanted = B_FALSE; | |
1749 | ||
1750 | /* | |
1751 | * Create an range tree that is 100% allocated. We remove segments | |
1752 | * that have been freed in this txg, any deferred frees that exist, | |
1753 | * and any allocation in the future. Removing segments should be | |
1754 | * a relatively inexpensive operation since we expect these trees to | |
1755 | * have a small number of nodes. | |
1756 | */ | |
1757 | condense_tree = range_tree_create(NULL, NULL, &msp->ms_lock); | |
1758 | range_tree_add(condense_tree, msp->ms_start, msp->ms_size); | |
1759 | ||
1760 | /* | |
1761 | * Remove what's been freed in this txg from the condense_tree. | |
1762 | * Since we're in sync_pass 1, we know that all the frees from | |
1763 | * this txg are in the freetree. | |
1764 | */ | |
1765 | range_tree_walk(freetree, range_tree_remove, condense_tree); | |
1766 | ||
1767 | for (t = 0; t < TXG_DEFER_SIZE; t++) { | |
1768 | range_tree_walk(msp->ms_defertree[t], | |
1769 | range_tree_remove, condense_tree); | |
1770 | } | |
1771 | ||
1772 | for (t = 1; t < TXG_CONCURRENT_STATES; t++) { | |
1773 | range_tree_walk(msp->ms_alloctree[(txg + t) & TXG_MASK], | |
1774 | range_tree_remove, condense_tree); | |
1775 | } | |
1776 | ||
1777 | /* | |
1778 | * We're about to drop the metaslab's lock thus allowing | |
1779 | * other consumers to change it's content. Set the | |
1780 | * metaslab's ms_condensing flag to ensure that | |
1781 | * allocations on this metaslab do not occur while we're | |
1782 | * in the middle of committing it to disk. This is only critical | |
1783 | * for the ms_tree as all other range trees use per txg | |
1784 | * views of their content. | |
1785 | */ | |
1786 | msp->ms_condensing = B_TRUE; | |
1787 | ||
1788 | mutex_exit(&msp->ms_lock); | |
1789 | space_map_truncate(sm, tx); | |
1790 | mutex_enter(&msp->ms_lock); | |
1791 | ||
1792 | /* | |
1793 | * While we would ideally like to create a space_map representation | |
1794 | * that consists only of allocation records, doing so can be | |
1795 | * prohibitively expensive because the in-core free tree can be | |
1796 | * large, and therefore computationally expensive to subtract | |
1797 | * from the condense_tree. Instead we sync out two trees, a cheap | |
1798 | * allocation only tree followed by the in-core free tree. While not | |
1799 | * optimal, this is typically close to optimal, and much cheaper to | |
1800 | * compute. | |
1801 | */ | |
1802 | space_map_write(sm, condense_tree, SM_ALLOC, tx); | |
1803 | range_tree_vacate(condense_tree, NULL, NULL); | |
1804 | range_tree_destroy(condense_tree); | |
1805 | ||
1806 | space_map_write(sm, msp->ms_tree, SM_FREE, tx); | |
1807 | msp->ms_condensing = B_FALSE; | |
1808 | } | |
1809 | ||
1810 | /* | |
1811 | * Write a metaslab to disk in the context of the specified transaction group. | |
1812 | */ | |
1813 | void | |
1814 | metaslab_sync(metaslab_t *msp, uint64_t txg) | |
1815 | { | |
1816 | metaslab_group_t *mg = msp->ms_group; | |
1817 | vdev_t *vd = mg->mg_vd; | |
1818 | spa_t *spa = vd->vdev_spa; | |
1819 | objset_t *mos = spa_meta_objset(spa); | |
1820 | range_tree_t *alloctree = msp->ms_alloctree[txg & TXG_MASK]; | |
1821 | range_tree_t **freetree = &msp->ms_freetree[txg & TXG_MASK]; | |
1822 | range_tree_t **freed_tree = | |
1823 | &msp->ms_freetree[TXG_CLEAN(txg) & TXG_MASK]; | |
1824 | dmu_tx_t *tx; | |
1825 | uint64_t object = space_map_object(msp->ms_sm); | |
1826 | ||
1827 | ASSERT(!vd->vdev_ishole); | |
1828 | ||
1829 | /* | |
1830 | * This metaslab has just been added so there's no work to do now. | |
1831 | */ | |
1832 | if (*freetree == NULL) { | |
1833 | ASSERT3P(alloctree, ==, NULL); | |
1834 | return; | |
1835 | } | |
1836 | ||
1837 | ASSERT3P(alloctree, !=, NULL); | |
1838 | ASSERT3P(*freetree, !=, NULL); | |
1839 | ASSERT3P(*freed_tree, !=, NULL); | |
1840 | ||
1841 | /* | |
1842 | * Normally, we don't want to process a metaslab if there | |
1843 | * are no allocations or frees to perform. However, if the metaslab | |
1844 | * is being forced to condense we need to let it through. | |
1845 | */ | |
1846 | if (range_tree_space(alloctree) == 0 && | |
1847 | range_tree_space(*freetree) == 0 && | |
1848 | !msp->ms_condense_wanted) | |
1849 | return; | |
1850 | ||
1851 | /* | |
1852 | * The only state that can actually be changing concurrently with | |
1853 | * metaslab_sync() is the metaslab's ms_tree. No other thread can | |
1854 | * be modifying this txg's alloctree, freetree, freed_tree, or | |
1855 | * space_map_phys_t. Therefore, we only hold ms_lock to satify | |
1856 | * space_map ASSERTs. We drop it whenever we call into the DMU, | |
1857 | * because the DMU can call down to us (e.g. via zio_free()) at | |
1858 | * any time. | |
1859 | */ | |
1860 | ||
1861 | tx = dmu_tx_create_assigned(spa_get_dsl(spa), txg); | |
1862 | ||
1863 | if (msp->ms_sm == NULL) { | |
1864 | uint64_t new_object; | |
1865 | ||
1866 | new_object = space_map_alloc(mos, tx); | |
1867 | VERIFY3U(new_object, !=, 0); | |
1868 | ||
1869 | VERIFY0(space_map_open(&msp->ms_sm, mos, new_object, | |
1870 | msp->ms_start, msp->ms_size, vd->vdev_ashift, | |
1871 | &msp->ms_lock)); | |
1872 | ASSERT(msp->ms_sm != NULL); | |
1873 | } | |
1874 | ||
1875 | mutex_enter(&msp->ms_lock); | |
1876 | ||
1877 | /* | |
1878 | * Note: metaslab_condense() clears the space_map's histogram. | |
1879 | * Therefore we muse verify and remove this histogram before | |
1880 | * condensing. | |
1881 | */ | |
1882 | metaslab_group_histogram_verify(mg); | |
1883 | metaslab_class_histogram_verify(mg->mg_class); | |
1884 | metaslab_group_histogram_remove(mg, msp); | |
1885 | ||
1886 | if (msp->ms_loaded && spa_sync_pass(spa) == 1 && | |
1887 | metaslab_should_condense(msp)) { | |
1888 | metaslab_condense(msp, txg, tx); | |
1889 | } else { | |
1890 | space_map_write(msp->ms_sm, alloctree, SM_ALLOC, tx); | |
1891 | space_map_write(msp->ms_sm, *freetree, SM_FREE, tx); | |
1892 | } | |
1893 | ||
1894 | if (msp->ms_loaded) { | |
1895 | /* | |
1896 | * When the space map is loaded, we have an accruate | |
1897 | * histogram in the range tree. This gives us an opportunity | |
1898 | * to bring the space map's histogram up-to-date so we clear | |
1899 | * it first before updating it. | |
1900 | */ | |
1901 | space_map_histogram_clear(msp->ms_sm); | |
1902 | space_map_histogram_add(msp->ms_sm, msp->ms_tree, tx); | |
1903 | } else { | |
1904 | /* | |
1905 | * Since the space map is not loaded we simply update the | |
1906 | * exisiting histogram with what was freed in this txg. This | |
1907 | * means that the on-disk histogram may not have an accurate | |
1908 | * view of the free space but it's close enough to allow | |
1909 | * us to make allocation decisions. | |
1910 | */ | |
1911 | space_map_histogram_add(msp->ms_sm, *freetree, tx); | |
1912 | } | |
1913 | metaslab_group_histogram_add(mg, msp); | |
1914 | metaslab_group_histogram_verify(mg); | |
1915 | metaslab_class_histogram_verify(mg->mg_class); | |
1916 | ||
1917 | /* | |
1918 | * For sync pass 1, we avoid traversing this txg's free range tree | |
1919 | * and instead will just swap the pointers for freetree and | |
1920 | * freed_tree. We can safely do this since the freed_tree is | |
1921 | * guaranteed to be empty on the initial pass. | |
1922 | */ | |
1923 | if (spa_sync_pass(spa) == 1) { | |
1924 | range_tree_swap(freetree, freed_tree); | |
1925 | } else { | |
1926 | range_tree_vacate(*freetree, range_tree_add, *freed_tree); | |
1927 | } | |
1928 | range_tree_vacate(alloctree, NULL, NULL); | |
1929 | ||
1930 | ASSERT0(range_tree_space(msp->ms_alloctree[txg & TXG_MASK])); | |
1931 | ASSERT0(range_tree_space(msp->ms_freetree[txg & TXG_MASK])); | |
1932 | ||
1933 | mutex_exit(&msp->ms_lock); | |
1934 | ||
1935 | if (object != space_map_object(msp->ms_sm)) { | |
1936 | object = space_map_object(msp->ms_sm); | |
1937 | dmu_write(mos, vd->vdev_ms_array, sizeof (uint64_t) * | |
1938 | msp->ms_id, sizeof (uint64_t), &object, tx); | |
1939 | } | |
1940 | dmu_tx_commit(tx); | |
1941 | } | |
1942 | ||
1943 | /* | |
1944 | * Called after a transaction group has completely synced to mark | |
1945 | * all of the metaslab's free space as usable. | |
1946 | */ | |
1947 | void | |
1948 | metaslab_sync_done(metaslab_t *msp, uint64_t txg) | |
1949 | { | |
1950 | metaslab_group_t *mg = msp->ms_group; | |
1951 | vdev_t *vd = mg->mg_vd; | |
1952 | range_tree_t **freed_tree; | |
1953 | range_tree_t **defer_tree; | |
1954 | int64_t alloc_delta, defer_delta; | |
1955 | int t; | |
1956 | ||
1957 | ASSERT(!vd->vdev_ishole); | |
1958 | ||
1959 | mutex_enter(&msp->ms_lock); | |
1960 | ||
1961 | /* | |
1962 | * If this metaslab is just becoming available, initialize its | |
1963 | * alloctrees, freetrees, and defertree and add its capacity to | |
1964 | * the vdev. | |
1965 | */ | |
1966 | if (msp->ms_freetree[TXG_CLEAN(txg) & TXG_MASK] == NULL) { | |
1967 | for (t = 0; t < TXG_SIZE; t++) { | |
1968 | ASSERT(msp->ms_alloctree[t] == NULL); | |
1969 | ASSERT(msp->ms_freetree[t] == NULL); | |
1970 | ||
1971 | msp->ms_alloctree[t] = range_tree_create(NULL, msp, | |
1972 | &msp->ms_lock); | |
1973 | msp->ms_freetree[t] = range_tree_create(NULL, msp, | |
1974 | &msp->ms_lock); | |
1975 | } | |
1976 | ||
1977 | for (t = 0; t < TXG_DEFER_SIZE; t++) { | |
1978 | ASSERT(msp->ms_defertree[t] == NULL); | |
1979 | ||
1980 | msp->ms_defertree[t] = range_tree_create(NULL, msp, | |
1981 | &msp->ms_lock); | |
1982 | } | |
1983 | ||
1984 | vdev_space_update(vd, 0, 0, msp->ms_size); | |
1985 | } | |
1986 | ||
1987 | freed_tree = &msp->ms_freetree[TXG_CLEAN(txg) & TXG_MASK]; | |
1988 | defer_tree = &msp->ms_defertree[txg % TXG_DEFER_SIZE]; | |
1989 | ||
1990 | alloc_delta = space_map_alloc_delta(msp->ms_sm); | |
1991 | defer_delta = range_tree_space(*freed_tree) - | |
1992 | range_tree_space(*defer_tree); | |
1993 | ||
1994 | vdev_space_update(vd, alloc_delta + defer_delta, defer_delta, 0); | |
1995 | ||
1996 | ASSERT0(range_tree_space(msp->ms_alloctree[txg & TXG_MASK])); | |
1997 | ASSERT0(range_tree_space(msp->ms_freetree[txg & TXG_MASK])); | |
1998 | ||
1999 | /* | |
2000 | * If there's a metaslab_load() in progress, wait for it to complete | |
2001 | * so that we have a consistent view of the in-core space map. | |
2002 | */ | |
2003 | metaslab_load_wait(msp); | |
2004 | ||
2005 | /* | |
2006 | * Move the frees from the defer_tree back to the free | |
2007 | * range tree (if it's loaded). Swap the freed_tree and the | |
2008 | * defer_tree -- this is safe to do because we've just emptied out | |
2009 | * the defer_tree. | |
2010 | */ | |
2011 | range_tree_vacate(*defer_tree, | |
2012 | msp->ms_loaded ? range_tree_add : NULL, msp->ms_tree); | |
2013 | range_tree_swap(freed_tree, defer_tree); | |
2014 | ||
2015 | space_map_update(msp->ms_sm); | |
2016 | ||
2017 | msp->ms_deferspace += defer_delta; | |
2018 | ASSERT3S(msp->ms_deferspace, >=, 0); | |
2019 | ASSERT3S(msp->ms_deferspace, <=, msp->ms_size); | |
2020 | if (msp->ms_deferspace != 0) { | |
2021 | /* | |
2022 | * Keep syncing this metaslab until all deferred frees | |
2023 | * are back in circulation. | |
2024 | */ | |
2025 | vdev_dirty(vd, VDD_METASLAB, msp, txg + 1); | |
2026 | } | |
2027 | ||
2028 | if (msp->ms_loaded && msp->ms_access_txg < txg) { | |
2029 | for (t = 1; t < TXG_CONCURRENT_STATES; t++) { | |
2030 | VERIFY0(range_tree_space( | |
2031 | msp->ms_alloctree[(txg + t) & TXG_MASK])); | |
2032 | } | |
2033 | ||
2034 | if (!metaslab_debug_unload) | |
2035 | metaslab_unload(msp); | |
2036 | } | |
2037 | ||
2038 | metaslab_group_sort(mg, msp, metaslab_weight(msp)); | |
2039 | mutex_exit(&msp->ms_lock); | |
2040 | } | |
2041 | ||
2042 | void | |
2043 | metaslab_sync_reassess(metaslab_group_t *mg) | |
2044 | { | |
2045 | metaslab_group_alloc_update(mg); | |
2046 | mg->mg_fragmentation = metaslab_group_fragmentation(mg); | |
2047 | ||
2048 | /* | |
2049 | * Preload the next potential metaslabs | |
2050 | */ | |
2051 | metaslab_group_preload(mg); | |
2052 | } | |
2053 | ||
2054 | static uint64_t | |
2055 | metaslab_distance(metaslab_t *msp, dva_t *dva) | |
2056 | { | |
2057 | uint64_t ms_shift = msp->ms_group->mg_vd->vdev_ms_shift; | |
2058 | uint64_t offset = DVA_GET_OFFSET(dva) >> ms_shift; | |
2059 | uint64_t start = msp->ms_id; | |
2060 | ||
2061 | if (msp->ms_group->mg_vd->vdev_id != DVA_GET_VDEV(dva)) | |
2062 | return (1ULL << 63); | |
2063 | ||
2064 | if (offset < start) | |
2065 | return ((start - offset) << ms_shift); | |
2066 | if (offset > start) | |
2067 | return ((offset - start) << ms_shift); | |
2068 | return (0); | |
2069 | } | |
2070 | ||
2071 | static uint64_t | |
2072 | metaslab_group_alloc(metaslab_group_t *mg, uint64_t psize, uint64_t asize, | |
2073 | uint64_t txg, uint64_t min_distance, dva_t *dva, int d) | |
2074 | { | |
2075 | spa_t *spa = mg->mg_vd->vdev_spa; | |
2076 | metaslab_t *msp = NULL; | |
2077 | uint64_t offset = -1ULL; | |
2078 | avl_tree_t *t = &mg->mg_metaslab_tree; | |
2079 | uint64_t activation_weight; | |
2080 | uint64_t target_distance; | |
2081 | int i; | |
2082 | ||
2083 | activation_weight = METASLAB_WEIGHT_PRIMARY; | |
2084 | for (i = 0; i < d; i++) { | |
2085 | if (DVA_GET_VDEV(&dva[i]) == mg->mg_vd->vdev_id) { | |
2086 | activation_weight = METASLAB_WEIGHT_SECONDARY; | |
2087 | break; | |
2088 | } | |
2089 | } | |
2090 | ||
2091 | for (;;) { | |
2092 | boolean_t was_active; | |
2093 | ||
2094 | mutex_enter(&mg->mg_lock); | |
2095 | for (msp = avl_first(t); msp; msp = AVL_NEXT(t, msp)) { | |
2096 | if (msp->ms_weight < asize) { | |
2097 | spa_dbgmsg(spa, "%s: failed to meet weight " | |
2098 | "requirement: vdev %llu, txg %llu, mg %p, " | |
2099 | "msp %p, psize %llu, asize %llu, " | |
2100 | "weight %llu", spa_name(spa), | |
2101 | mg->mg_vd->vdev_id, txg, | |
2102 | mg, msp, psize, asize, msp->ms_weight); | |
2103 | mutex_exit(&mg->mg_lock); | |
2104 | return (-1ULL); | |
2105 | } | |
2106 | ||
2107 | /* | |
2108 | * If the selected metaslab is condensing, skip it. | |
2109 | */ | |
2110 | if (msp->ms_condensing) | |
2111 | continue; | |
2112 | ||
2113 | was_active = msp->ms_weight & METASLAB_ACTIVE_MASK; | |
2114 | if (activation_weight == METASLAB_WEIGHT_PRIMARY) | |
2115 | break; | |
2116 | ||
2117 | target_distance = min_distance + | |
2118 | (space_map_allocated(msp->ms_sm) != 0 ? 0 : | |
2119 | min_distance >> 1); | |
2120 | ||
2121 | for (i = 0; i < d; i++) | |
2122 | if (metaslab_distance(msp, &dva[i]) < | |
2123 | target_distance) | |
2124 | break; | |
2125 | if (i == d) | |
2126 | break; | |
2127 | } | |
2128 | mutex_exit(&mg->mg_lock); | |
2129 | if (msp == NULL) | |
2130 | return (-1ULL); | |
2131 | ||
2132 | mutex_enter(&msp->ms_lock); | |
2133 | ||
2134 | /* | |
2135 | * Ensure that the metaslab we have selected is still | |
2136 | * capable of handling our request. It's possible that | |
2137 | * another thread may have changed the weight while we | |
2138 | * were blocked on the metaslab lock. | |
2139 | */ | |
2140 | if (msp->ms_weight < asize || (was_active && | |
2141 | !(msp->ms_weight & METASLAB_ACTIVE_MASK) && | |
2142 | activation_weight == METASLAB_WEIGHT_PRIMARY)) { | |
2143 | mutex_exit(&msp->ms_lock); | |
2144 | continue; | |
2145 | } | |
2146 | ||
2147 | if ((msp->ms_weight & METASLAB_WEIGHT_SECONDARY) && | |
2148 | activation_weight == METASLAB_WEIGHT_PRIMARY) { | |
2149 | metaslab_passivate(msp, | |
2150 | msp->ms_weight & ~METASLAB_ACTIVE_MASK); | |
2151 | mutex_exit(&msp->ms_lock); | |
2152 | continue; | |
2153 | } | |
2154 | ||
2155 | if (metaslab_activate(msp, activation_weight) != 0) { | |
2156 | mutex_exit(&msp->ms_lock); | |
2157 | continue; | |
2158 | } | |
2159 | ||
2160 | /* | |
2161 | * If this metaslab is currently condensing then pick again as | |
2162 | * we can't manipulate this metaslab until it's committed | |
2163 | * to disk. | |
2164 | */ | |
2165 | if (msp->ms_condensing) { | |
2166 | mutex_exit(&msp->ms_lock); | |
2167 | continue; | |
2168 | } | |
2169 | ||
2170 | if ((offset = metaslab_block_alloc(msp, asize)) != -1ULL) | |
2171 | break; | |
2172 | ||
2173 | metaslab_passivate(msp, metaslab_block_maxsize(msp)); | |
2174 | mutex_exit(&msp->ms_lock); | |
2175 | } | |
2176 | ||
2177 | if (range_tree_space(msp->ms_alloctree[txg & TXG_MASK]) == 0) | |
2178 | vdev_dirty(mg->mg_vd, VDD_METASLAB, msp, txg); | |
2179 | ||
2180 | range_tree_add(msp->ms_alloctree[txg & TXG_MASK], offset, asize); | |
2181 | msp->ms_access_txg = txg + metaslab_unload_delay; | |
2182 | ||
2183 | mutex_exit(&msp->ms_lock); | |
2184 | ||
2185 | return (offset); | |
2186 | } | |
2187 | ||
2188 | /* | |
2189 | * Allocate a block for the specified i/o. | |
2190 | */ | |
2191 | static int | |
2192 | metaslab_alloc_dva(spa_t *spa, metaslab_class_t *mc, uint64_t psize, | |
2193 | dva_t *dva, int d, dva_t *hintdva, uint64_t txg, int flags) | |
2194 | { | |
2195 | metaslab_group_t *mg, *fast_mg, *rotor; | |
2196 | vdev_t *vd; | |
2197 | int dshift = 3; | |
2198 | int all_zero; | |
2199 | int zio_lock = B_FALSE; | |
2200 | boolean_t allocatable; | |
2201 | uint64_t offset = -1ULL; | |
2202 | uint64_t asize; | |
2203 | uint64_t distance; | |
2204 | ||
2205 | ASSERT(!DVA_IS_VALID(&dva[d])); | |
2206 | ||
2207 | /* | |
2208 | * For testing, make some blocks above a certain size be gang blocks. | |
2209 | */ | |
2210 | if (psize >= metaslab_gang_bang && (ddi_get_lbolt() & 3) == 0) | |
2211 | return (SET_ERROR(ENOSPC)); | |
2212 | ||
2213 | if (flags & METASLAB_FASTWRITE) | |
2214 | mutex_enter(&mc->mc_fastwrite_lock); | |
2215 | ||
2216 | /* | |
2217 | * Start at the rotor and loop through all mgs until we find something. | |
2218 | * Note that there's no locking on mc_rotor or mc_aliquot because | |
2219 | * nothing actually breaks if we miss a few updates -- we just won't | |
2220 | * allocate quite as evenly. It all balances out over time. | |
2221 | * | |
2222 | * If we are doing ditto or log blocks, try to spread them across | |
2223 | * consecutive vdevs. If we're forced to reuse a vdev before we've | |
2224 | * allocated all of our ditto blocks, then try and spread them out on | |
2225 | * that vdev as much as possible. If it turns out to not be possible, | |
2226 | * gradually lower our standards until anything becomes acceptable. | |
2227 | * Also, allocating on consecutive vdevs (as opposed to random vdevs) | |
2228 | * gives us hope of containing our fault domains to something we're | |
2229 | * able to reason about. Otherwise, any two top-level vdev failures | |
2230 | * will guarantee the loss of data. With consecutive allocation, | |
2231 | * only two adjacent top-level vdev failures will result in data loss. | |
2232 | * | |
2233 | * If we are doing gang blocks (hintdva is non-NULL), try to keep | |
2234 | * ourselves on the same vdev as our gang block header. That | |
2235 | * way, we can hope for locality in vdev_cache, plus it makes our | |
2236 | * fault domains something tractable. | |
2237 | */ | |
2238 | if (hintdva) { | |
2239 | vd = vdev_lookup_top(spa, DVA_GET_VDEV(&hintdva[d])); | |
2240 | ||
2241 | /* | |
2242 | * It's possible the vdev we're using as the hint no | |
2243 | * longer exists (i.e. removed). Consult the rotor when | |
2244 | * all else fails. | |
2245 | */ | |
2246 | if (vd != NULL) { | |
2247 | mg = vd->vdev_mg; | |
2248 | ||
2249 | if (flags & METASLAB_HINTBP_AVOID && | |
2250 | mg->mg_next != NULL) | |
2251 | mg = mg->mg_next; | |
2252 | } else { | |
2253 | mg = mc->mc_rotor; | |
2254 | } | |
2255 | } else if (d != 0) { | |
2256 | vd = vdev_lookup_top(spa, DVA_GET_VDEV(&dva[d - 1])); | |
2257 | mg = vd->vdev_mg->mg_next; | |
2258 | } else if (flags & METASLAB_FASTWRITE) { | |
2259 | mg = fast_mg = mc->mc_rotor; | |
2260 | ||
2261 | do { | |
2262 | if (fast_mg->mg_vd->vdev_pending_fastwrite < | |
2263 | mg->mg_vd->vdev_pending_fastwrite) | |
2264 | mg = fast_mg; | |
2265 | } while ((fast_mg = fast_mg->mg_next) != mc->mc_rotor); | |
2266 | ||
2267 | } else { | |
2268 | mg = mc->mc_rotor; | |
2269 | } | |
2270 | ||
2271 | /* | |
2272 | * If the hint put us into the wrong metaslab class, or into a | |
2273 | * metaslab group that has been passivated, just follow the rotor. | |
2274 | */ | |
2275 | if (mg->mg_class != mc || mg->mg_activation_count <= 0) | |
2276 | mg = mc->mc_rotor; | |
2277 | ||
2278 | rotor = mg; | |
2279 | top: | |
2280 | all_zero = B_TRUE; | |
2281 | do { | |
2282 | ASSERT(mg->mg_activation_count == 1); | |
2283 | ||
2284 | vd = mg->mg_vd; | |
2285 | ||
2286 | /* | |
2287 | * Don't allocate from faulted devices. | |
2288 | */ | |
2289 | if (zio_lock) { | |
2290 | spa_config_enter(spa, SCL_ZIO, FTAG, RW_READER); | |
2291 | allocatable = vdev_allocatable(vd); | |
2292 | spa_config_exit(spa, SCL_ZIO, FTAG); | |
2293 | } else { | |
2294 | allocatable = vdev_allocatable(vd); | |
2295 | } | |
2296 | ||
2297 | /* | |
2298 | * Determine if the selected metaslab group is eligible | |
2299 | * for allocations. If we're ganging or have requested | |
2300 | * an allocation for the smallest gang block size | |
2301 | * then we don't want to avoid allocating to the this | |
2302 | * metaslab group. If we're in this condition we should | |
2303 | * try to allocate from any device possible so that we | |
2304 | * don't inadvertently return ENOSPC and suspend the pool | |
2305 | * even though space is still available. | |
2306 | */ | |
2307 | if (allocatable && CAN_FASTGANG(flags) && | |
2308 | psize > SPA_GANGBLOCKSIZE) | |
2309 | allocatable = metaslab_group_allocatable(mg); | |
2310 | ||
2311 | if (!allocatable) | |
2312 | goto next; | |
2313 | ||
2314 | /* | |
2315 | * Avoid writing single-copy data to a failing vdev | |
2316 | * unless the user instructs us that it is okay. | |
2317 | */ | |
2318 | if ((vd->vdev_stat.vs_write_errors > 0 || | |
2319 | vd->vdev_state < VDEV_STATE_HEALTHY) && | |
2320 | d == 0 && dshift == 3 && vd->vdev_children == 0) { | |
2321 | all_zero = B_FALSE; | |
2322 | goto next; | |
2323 | } | |
2324 | ||
2325 | ASSERT(mg->mg_class == mc); | |
2326 | ||
2327 | distance = vd->vdev_asize >> dshift; | |
2328 | if (distance <= (1ULL << vd->vdev_ms_shift)) | |
2329 | distance = 0; | |
2330 | else | |
2331 | all_zero = B_FALSE; | |
2332 | ||
2333 | asize = vdev_psize_to_asize(vd, psize); | |
2334 | ASSERT(P2PHASE(asize, 1ULL << vd->vdev_ashift) == 0); | |
2335 | ||
2336 | offset = metaslab_group_alloc(mg, psize, asize, txg, distance, | |
2337 | dva, d); | |
2338 | if (offset != -1ULL) { | |
2339 | /* | |
2340 | * If we've just selected this metaslab group, | |
2341 | * figure out whether the corresponding vdev is | |
2342 | * over- or under-used relative to the pool, | |
2343 | * and set an allocation bias to even it out. | |
2344 | */ | |
2345 | if (mc->mc_aliquot == 0 && metaslab_bias_enabled) { | |
2346 | vdev_stat_t *vs = &vd->vdev_stat; | |
2347 | int64_t vu, cu; | |
2348 | ||
2349 | vu = (vs->vs_alloc * 100) / (vs->vs_space + 1); | |
2350 | cu = (mc->mc_alloc * 100) / (mc->mc_space + 1); | |
2351 | ||
2352 | /* | |
2353 | * Calculate how much more or less we should | |
2354 | * try to allocate from this device during | |
2355 | * this iteration around the rotor. | |
2356 | * For example, if a device is 80% full | |
2357 | * and the pool is 20% full then we should | |
2358 | * reduce allocations by 60% on this device. | |
2359 | * | |
2360 | * mg_bias = (20 - 80) * 512K / 100 = -307K | |
2361 | * | |
2362 | * This reduces allocations by 307K for this | |
2363 | * iteration. | |
2364 | */ | |
2365 | mg->mg_bias = ((cu - vu) * | |
2366 | (int64_t)mg->mg_aliquot) / 100; | |
2367 | } else if (!metaslab_bias_enabled) { | |
2368 | mg->mg_bias = 0; | |
2369 | } | |
2370 | ||
2371 | if ((flags & METASLAB_FASTWRITE) || | |
2372 | atomic_add_64_nv(&mc->mc_aliquot, asize) >= | |
2373 | mg->mg_aliquot + mg->mg_bias) { | |
2374 | mc->mc_rotor = mg->mg_next; | |
2375 | mc->mc_aliquot = 0; | |
2376 | } | |
2377 | ||
2378 | DVA_SET_VDEV(&dva[d], vd->vdev_id); | |
2379 | DVA_SET_OFFSET(&dva[d], offset); | |
2380 | DVA_SET_GANG(&dva[d], !!(flags & METASLAB_GANG_HEADER)); | |
2381 | DVA_SET_ASIZE(&dva[d], asize); | |
2382 | ||
2383 | if (flags & METASLAB_FASTWRITE) { | |
2384 | atomic_add_64(&vd->vdev_pending_fastwrite, | |
2385 | psize); | |
2386 | mutex_exit(&mc->mc_fastwrite_lock); | |
2387 | } | |
2388 | ||
2389 | return (0); | |
2390 | } | |
2391 | next: | |
2392 | mc->mc_rotor = mg->mg_next; | |
2393 | mc->mc_aliquot = 0; | |
2394 | } while ((mg = mg->mg_next) != rotor); | |
2395 | ||
2396 | if (!all_zero) { | |
2397 | dshift++; | |
2398 | ASSERT(dshift < 64); | |
2399 | goto top; | |
2400 | } | |
2401 | ||
2402 | if (!allocatable && !zio_lock) { | |
2403 | dshift = 3; | |
2404 | zio_lock = B_TRUE; | |
2405 | goto top; | |
2406 | } | |
2407 | ||
2408 | bzero(&dva[d], sizeof (dva_t)); | |
2409 | ||
2410 | if (flags & METASLAB_FASTWRITE) | |
2411 | mutex_exit(&mc->mc_fastwrite_lock); | |
2412 | ||
2413 | return (SET_ERROR(ENOSPC)); | |
2414 | } | |
2415 | ||
2416 | /* | |
2417 | * Free the block represented by DVA in the context of the specified | |
2418 | * transaction group. | |
2419 | */ | |
2420 | static void | |
2421 | metaslab_free_dva(spa_t *spa, const dva_t *dva, uint64_t txg, boolean_t now) | |
2422 | { | |
2423 | uint64_t vdev = DVA_GET_VDEV(dva); | |
2424 | uint64_t offset = DVA_GET_OFFSET(dva); | |
2425 | uint64_t size = DVA_GET_ASIZE(dva); | |
2426 | vdev_t *vd; | |
2427 | metaslab_t *msp; | |
2428 | ||
2429 | if (txg > spa_freeze_txg(spa)) | |
2430 | return; | |
2431 | ||
2432 | if ((vd = vdev_lookup_top(spa, vdev)) == NULL || !DVA_IS_VALID(dva) || | |
2433 | (offset >> vd->vdev_ms_shift) >= vd->vdev_ms_count) { | |
2434 | zfs_panic_recover("metaslab_free_dva(): bad DVA %llu:%llu:%llu", | |
2435 | (u_longlong_t)vdev, (u_longlong_t)offset, | |
2436 | (u_longlong_t)size); | |
2437 | return; | |
2438 | } | |
2439 | ||
2440 | msp = vd->vdev_ms[offset >> vd->vdev_ms_shift]; | |
2441 | ||
2442 | if (DVA_GET_GANG(dva)) | |
2443 | size = vdev_psize_to_asize(vd, SPA_GANGBLOCKSIZE); | |
2444 | ||
2445 | mutex_enter(&msp->ms_lock); | |
2446 | ||
2447 | if (now) { | |
2448 | range_tree_remove(msp->ms_alloctree[txg & TXG_MASK], | |
2449 | offset, size); | |
2450 | ||
2451 | VERIFY(!msp->ms_condensing); | |
2452 | VERIFY3U(offset, >=, msp->ms_start); | |
2453 | VERIFY3U(offset + size, <=, msp->ms_start + msp->ms_size); | |
2454 | VERIFY3U(range_tree_space(msp->ms_tree) + size, <=, | |
2455 | msp->ms_size); | |
2456 | VERIFY0(P2PHASE(offset, 1ULL << vd->vdev_ashift)); | |
2457 | VERIFY0(P2PHASE(size, 1ULL << vd->vdev_ashift)); | |
2458 | range_tree_add(msp->ms_tree, offset, size); | |
2459 | } else { | |
2460 | if (range_tree_space(msp->ms_freetree[txg & TXG_MASK]) == 0) | |
2461 | vdev_dirty(vd, VDD_METASLAB, msp, txg); | |
2462 | range_tree_add(msp->ms_freetree[txg & TXG_MASK], | |
2463 | offset, size); | |
2464 | } | |
2465 | ||
2466 | mutex_exit(&msp->ms_lock); | |
2467 | } | |
2468 | ||
2469 | /* | |
2470 | * Intent log support: upon opening the pool after a crash, notify the SPA | |
2471 | * of blocks that the intent log has allocated for immediate write, but | |
2472 | * which are still considered free by the SPA because the last transaction | |
2473 | * group didn't commit yet. | |
2474 | */ | |
2475 | static int | |
2476 | metaslab_claim_dva(spa_t *spa, const dva_t *dva, uint64_t txg) | |
2477 | { | |
2478 | uint64_t vdev = DVA_GET_VDEV(dva); | |
2479 | uint64_t offset = DVA_GET_OFFSET(dva); | |
2480 | uint64_t size = DVA_GET_ASIZE(dva); | |
2481 | vdev_t *vd; | |
2482 | metaslab_t *msp; | |
2483 | int error = 0; | |
2484 | ||
2485 | ASSERT(DVA_IS_VALID(dva)); | |
2486 | ||
2487 | if ((vd = vdev_lookup_top(spa, vdev)) == NULL || | |
2488 | (offset >> vd->vdev_ms_shift) >= vd->vdev_ms_count) | |
2489 | return (SET_ERROR(ENXIO)); | |
2490 | ||
2491 | msp = vd->vdev_ms[offset >> vd->vdev_ms_shift]; | |
2492 | ||
2493 | if (DVA_GET_GANG(dva)) | |
2494 | size = vdev_psize_to_asize(vd, SPA_GANGBLOCKSIZE); | |
2495 | ||
2496 | mutex_enter(&msp->ms_lock); | |
2497 | ||
2498 | if ((txg != 0 && spa_writeable(spa)) || !msp->ms_loaded) | |
2499 | error = metaslab_activate(msp, METASLAB_WEIGHT_SECONDARY); | |
2500 | ||
2501 | if (error == 0 && !range_tree_contains(msp->ms_tree, offset, size)) | |
2502 | error = SET_ERROR(ENOENT); | |
2503 | ||
2504 | if (error || txg == 0) { /* txg == 0 indicates dry run */ | |
2505 | mutex_exit(&msp->ms_lock); | |
2506 | return (error); | |
2507 | } | |
2508 | ||
2509 | VERIFY(!msp->ms_condensing); | |
2510 | VERIFY0(P2PHASE(offset, 1ULL << vd->vdev_ashift)); | |
2511 | VERIFY0(P2PHASE(size, 1ULL << vd->vdev_ashift)); | |
2512 | VERIFY3U(range_tree_space(msp->ms_tree) - size, <=, msp->ms_size); | |
2513 | range_tree_remove(msp->ms_tree, offset, size); | |
2514 | ||
2515 | if (spa_writeable(spa)) { /* don't dirty if we're zdb(1M) */ | |
2516 | if (range_tree_space(msp->ms_alloctree[txg & TXG_MASK]) == 0) | |
2517 | vdev_dirty(vd, VDD_METASLAB, msp, txg); | |
2518 | range_tree_add(msp->ms_alloctree[txg & TXG_MASK], offset, size); | |
2519 | } | |
2520 | ||
2521 | mutex_exit(&msp->ms_lock); | |
2522 | ||
2523 | return (0); | |
2524 | } | |
2525 | ||
2526 | int | |
2527 | metaslab_alloc(spa_t *spa, metaslab_class_t *mc, uint64_t psize, blkptr_t *bp, | |
2528 | int ndvas, uint64_t txg, blkptr_t *hintbp, int flags) | |
2529 | { | |
2530 | dva_t *dva = bp->blk_dva; | |
2531 | dva_t *hintdva = hintbp->blk_dva; | |
2532 | int d, error = 0; | |
2533 | ||
2534 | ASSERT(bp->blk_birth == 0); | |
2535 | ASSERT(BP_PHYSICAL_BIRTH(bp) == 0); | |
2536 | ||
2537 | spa_config_enter(spa, SCL_ALLOC, FTAG, RW_READER); | |
2538 | ||
2539 | if (mc->mc_rotor == NULL) { /* no vdevs in this class */ | |
2540 | spa_config_exit(spa, SCL_ALLOC, FTAG); | |
2541 | return (SET_ERROR(ENOSPC)); | |
2542 | } | |
2543 | ||
2544 | ASSERT(ndvas > 0 && ndvas <= spa_max_replication(spa)); | |
2545 | ASSERT(BP_GET_NDVAS(bp) == 0); | |
2546 | ASSERT(hintbp == NULL || ndvas <= BP_GET_NDVAS(hintbp)); | |
2547 | ||
2548 | for (d = 0; d < ndvas; d++) { | |
2549 | error = metaslab_alloc_dva(spa, mc, psize, dva, d, hintdva, | |
2550 | txg, flags); | |
2551 | if (error != 0) { | |
2552 | for (d--; d >= 0; d--) { | |
2553 | metaslab_free_dva(spa, &dva[d], txg, B_TRUE); | |
2554 | bzero(&dva[d], sizeof (dva_t)); | |
2555 | } | |
2556 | spa_config_exit(spa, SCL_ALLOC, FTAG); | |
2557 | return (error); | |
2558 | } | |
2559 | } | |
2560 | ASSERT(error == 0); | |
2561 | ASSERT(BP_GET_NDVAS(bp) == ndvas); | |
2562 | ||
2563 | spa_config_exit(spa, SCL_ALLOC, FTAG); | |
2564 | ||
2565 | BP_SET_BIRTH(bp, txg, txg); | |
2566 | ||
2567 | return (0); | |
2568 | } | |
2569 | ||
2570 | void | |
2571 | metaslab_free(spa_t *spa, const blkptr_t *bp, uint64_t txg, boolean_t now) | |
2572 | { | |
2573 | const dva_t *dva = bp->blk_dva; | |
2574 | int d, ndvas = BP_GET_NDVAS(bp); | |
2575 | ||
2576 | ASSERT(!BP_IS_HOLE(bp)); | |
2577 | ASSERT(!now || bp->blk_birth >= spa_syncing_txg(spa)); | |
2578 | ||
2579 | spa_config_enter(spa, SCL_FREE, FTAG, RW_READER); | |
2580 | ||
2581 | for (d = 0; d < ndvas; d++) | |
2582 | metaslab_free_dva(spa, &dva[d], txg, now); | |
2583 | ||
2584 | spa_config_exit(spa, SCL_FREE, FTAG); | |
2585 | } | |
2586 | ||
2587 | int | |
2588 | metaslab_claim(spa_t *spa, const blkptr_t *bp, uint64_t txg) | |
2589 | { | |
2590 | const dva_t *dva = bp->blk_dva; | |
2591 | int ndvas = BP_GET_NDVAS(bp); | |
2592 | int d, error = 0; | |
2593 | ||
2594 | ASSERT(!BP_IS_HOLE(bp)); | |
2595 | ||
2596 | if (txg != 0) { | |
2597 | /* | |
2598 | * First do a dry run to make sure all DVAs are claimable, | |
2599 | * so we don't have to unwind from partial failures below. | |
2600 | */ | |
2601 | if ((error = metaslab_claim(spa, bp, 0)) != 0) | |
2602 | return (error); | |
2603 | } | |
2604 | ||
2605 | spa_config_enter(spa, SCL_ALLOC, FTAG, RW_READER); | |
2606 | ||
2607 | for (d = 0; d < ndvas; d++) | |
2608 | if ((error = metaslab_claim_dva(spa, &dva[d], txg)) != 0) | |
2609 | break; | |
2610 | ||
2611 | spa_config_exit(spa, SCL_ALLOC, FTAG); | |
2612 | ||
2613 | ASSERT(error == 0 || txg == 0); | |
2614 | ||
2615 | return (error); | |
2616 | } | |
2617 | ||
2618 | void | |
2619 | metaslab_fastwrite_mark(spa_t *spa, const blkptr_t *bp) | |
2620 | { | |
2621 | const dva_t *dva = bp->blk_dva; | |
2622 | int ndvas = BP_GET_NDVAS(bp); | |
2623 | uint64_t psize = BP_GET_PSIZE(bp); | |
2624 | int d; | |
2625 | vdev_t *vd; | |
2626 | ||
2627 | ASSERT(!BP_IS_HOLE(bp)); | |
2628 | ASSERT(!BP_IS_EMBEDDED(bp)); | |
2629 | ASSERT(psize > 0); | |
2630 | ||
2631 | spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER); | |
2632 | ||
2633 | for (d = 0; d < ndvas; d++) { | |
2634 | if ((vd = vdev_lookup_top(spa, DVA_GET_VDEV(&dva[d]))) == NULL) | |
2635 | continue; | |
2636 | atomic_add_64(&vd->vdev_pending_fastwrite, psize); | |
2637 | } | |
2638 | ||
2639 | spa_config_exit(spa, SCL_VDEV, FTAG); | |
2640 | } | |
2641 | ||
2642 | void | |
2643 | metaslab_fastwrite_unmark(spa_t *spa, const blkptr_t *bp) | |
2644 | { | |
2645 | const dva_t *dva = bp->blk_dva; | |
2646 | int ndvas = BP_GET_NDVAS(bp); | |
2647 | uint64_t psize = BP_GET_PSIZE(bp); | |
2648 | int d; | |
2649 | vdev_t *vd; | |
2650 | ||
2651 | ASSERT(!BP_IS_HOLE(bp)); | |
2652 | ASSERT(!BP_IS_EMBEDDED(bp)); | |
2653 | ASSERT(psize > 0); | |
2654 | ||
2655 | spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER); | |
2656 | ||
2657 | for (d = 0; d < ndvas; d++) { | |
2658 | if ((vd = vdev_lookup_top(spa, DVA_GET_VDEV(&dva[d]))) == NULL) | |
2659 | continue; | |
2660 | ASSERT3U(vd->vdev_pending_fastwrite, >=, psize); | |
2661 | atomic_sub_64(&vd->vdev_pending_fastwrite, psize); | |
2662 | } | |
2663 | ||
2664 | spa_config_exit(spa, SCL_VDEV, FTAG); | |
2665 | } | |
2666 | ||
2667 | void | |
2668 | metaslab_check_free(spa_t *spa, const blkptr_t *bp) | |
2669 | { | |
2670 | int i, j; | |
2671 | ||
2672 | if ((zfs_flags & ZFS_DEBUG_ZIO_FREE) == 0) | |
2673 | return; | |
2674 | ||
2675 | spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER); | |
2676 | for (i = 0; i < BP_GET_NDVAS(bp); i++) { | |
2677 | uint64_t vdev = DVA_GET_VDEV(&bp->blk_dva[i]); | |
2678 | vdev_t *vd = vdev_lookup_top(spa, vdev); | |
2679 | uint64_t offset = DVA_GET_OFFSET(&bp->blk_dva[i]); | |
2680 | uint64_t size = DVA_GET_ASIZE(&bp->blk_dva[i]); | |
2681 | metaslab_t *msp = vd->vdev_ms[offset >> vd->vdev_ms_shift]; | |
2682 | ||
2683 | if (msp->ms_loaded) | |
2684 | range_tree_verify(msp->ms_tree, offset, size); | |
2685 | ||
2686 | for (j = 0; j < TXG_SIZE; j++) | |
2687 | range_tree_verify(msp->ms_freetree[j], offset, size); | |
2688 | for (j = 0; j < TXG_DEFER_SIZE; j++) | |
2689 | range_tree_verify(msp->ms_defertree[j], offset, size); | |
2690 | } | |
2691 | spa_config_exit(spa, SCL_VDEV, FTAG); | |
2692 | } | |
2693 | ||
2694 | #if defined(_KERNEL) && defined(HAVE_SPL) | |
2695 | module_param(metaslab_debug_load, int, 0644); | |
2696 | module_param(metaslab_debug_unload, int, 0644); | |
2697 | module_param(metaslab_preload_enabled, int, 0644); | |
2698 | module_param(zfs_mg_noalloc_threshold, int, 0644); | |
2699 | module_param(zfs_mg_fragmentation_threshold, int, 0644); | |
2700 | module_param(zfs_metaslab_fragmentation_threshold, int, 0644); | |
2701 | module_param(metaslab_fragmentation_factor_enabled, int, 0644); | |
2702 | module_param(metaslab_lba_weighting_enabled, int, 0644); | |
2703 | module_param(metaslab_bias_enabled, int, 0644); | |
2704 | ||
2705 | MODULE_PARM_DESC(metaslab_debug_load, | |
2706 | "load all metaslabs when pool is first opened"); | |
2707 | MODULE_PARM_DESC(metaslab_debug_unload, | |
2708 | "prevent metaslabs from being unloaded"); | |
2709 | MODULE_PARM_DESC(metaslab_preload_enabled, | |
2710 | "preload potential metaslabs during reassessment"); | |
2711 | ||
2712 | MODULE_PARM_DESC(zfs_mg_noalloc_threshold, | |
2713 | "percentage of free space for metaslab group to allow allocation"); | |
2714 | MODULE_PARM_DESC(zfs_mg_fragmentation_threshold, | |
2715 | "fragmentation for metaslab group to allow allocation"); | |
2716 | ||
2717 | MODULE_PARM_DESC(zfs_metaslab_fragmentation_threshold, | |
2718 | "fragmentation for metaslab to allow allocation"); | |
2719 | MODULE_PARM_DESC(metaslab_fragmentation_factor_enabled, | |
2720 | "use the fragmentation metric to prefer less fragmented metaslabs"); | |
2721 | MODULE_PARM_DESC(metaslab_lba_weighting_enabled, | |
2722 | "prefer metaslabs with lower LBAs"); | |
2723 | MODULE_PARM_DESC(metaslab_bias_enabled, | |
2724 | "enable metaslab group biasing"); | |
2725 | #endif /* _KERNEL && HAVE_SPL */ |