]>
Commit | Line | Data |
---|---|---|
34dc7c2f BB |
1 | /* |
2 | * CDDL HEADER START | |
3 | * | |
4 | * The contents of this file are subject to the terms of the | |
5 | * Common Development and Distribution License (the "License"). | |
6 | * You may not use this file except in compliance with the License. | |
7 | * | |
8 | * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE | |
9 | * or http://www.opensolaris.org/os/licensing. | |
10 | * See the License for the specific language governing permissions | |
11 | * and limitations under the License. | |
12 | * | |
13 | * When distributing Covered Code, include this CDDL HEADER in each | |
14 | * file and include the License file at usr/src/OPENSOLARIS.LICENSE. | |
15 | * If applicable, add the following below this CDDL HEADER, with the | |
16 | * fields enclosed by brackets "[]" replaced with your own identifying | |
17 | * information: Portions Copyright [yyyy] [name of copyright owner] | |
18 | * | |
19 | * CDDL HEADER END | |
20 | */ | |
21 | /* | |
428870ff | 22 | * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. |
9bd274dd | 23 | * Copyright (c) 2011, 2014 by Delphix. All rights reserved. |
2e528b49 | 24 | * Copyright (c) 2013 by Saso Kiselkov. All rights reserved. |
34dc7c2f BB |
25 | */ |
26 | ||
34dc7c2f | 27 | #include <sys/zfs_context.h> |
34dc7c2f BB |
28 | #include <sys/dmu.h> |
29 | #include <sys/dmu_tx.h> | |
30 | #include <sys/space_map.h> | |
31 | #include <sys/metaslab_impl.h> | |
32 | #include <sys/vdev_impl.h> | |
33 | #include <sys/zio.h> | |
93cf2076 | 34 | #include <sys/spa_impl.h> |
34dc7c2f | 35 | |
d1d7e268 | 36 | #define WITH_DF_BLOCK_ALLOCATOR |
6d974228 GW |
37 | |
38 | /* | |
39 | * Allow allocations to switch to gang blocks quickly. We do this to | |
40 | * avoid having to load lots of space_maps in a given txg. There are, | |
41 | * however, some cases where we want to avoid "fast" ganging and instead | |
42 | * we want to do an exhaustive search of all metaslabs on this device. | |
672692c7 | 43 | * Currently we don't allow any gang, slog, or dump device related allocations |
6d974228 GW |
44 | * to "fast" gang. |
45 | */ | |
46 | #define CAN_FASTGANG(flags) \ | |
47 | (!((flags) & (METASLAB_GANG_CHILD | METASLAB_GANG_HEADER | \ | |
48 | METASLAB_GANG_AVOID))) | |
22c81dd8 | 49 | |
93cf2076 GW |
50 | #define METASLAB_WEIGHT_PRIMARY (1ULL << 63) |
51 | #define METASLAB_WEIGHT_SECONDARY (1ULL << 62) | |
52 | #define METASLAB_ACTIVE_MASK \ | |
53 | (METASLAB_WEIGHT_PRIMARY | METASLAB_WEIGHT_SECONDARY) | |
54 | ||
34dc7c2f BB |
55 | uint64_t metaslab_aliquot = 512ULL << 10; |
56 | uint64_t metaslab_gang_bang = SPA_MAXBLOCKSIZE + 1; /* force gang blocks */ | |
57 | ||
e51be066 GW |
58 | /* |
59 | * The in-core space map representation is more compact than its on-disk form. | |
60 | * The zfs_condense_pct determines how much more compact the in-core | |
61 | * space_map representation must be before we compact it on-disk. | |
62 | * Values should be greater than or equal to 100. | |
63 | */ | |
64 | int zfs_condense_pct = 200; | |
65 | ||
ac72fac3 GW |
66 | /* |
67 | * The zfs_mg_noalloc_threshold defines which metaslab groups should | |
68 | * be eligible for allocation. The value is defined as a percentage of | |
69 | * a free space. Metaslab groups that have more free space than | |
70 | * zfs_mg_noalloc_threshold are always eligible for allocations. Once | |
71 | * a metaslab group's free space is less than or equal to the | |
72 | * zfs_mg_noalloc_threshold the allocator will avoid allocating to that | |
73 | * group unless all groups in the pool have reached zfs_mg_noalloc_threshold. | |
74 | * Once all groups in the pool reach zfs_mg_noalloc_threshold then all | |
75 | * groups are allowed to accept allocations. Gang blocks are always | |
76 | * eligible to allocate on any metaslab group. The default value of 0 means | |
77 | * no metaslab group will be excluded based on this criterion. | |
78 | */ | |
79 | int zfs_mg_noalloc_threshold = 0; | |
6d974228 | 80 | |
428870ff | 81 | /* |
aa7d06a9 | 82 | * When set will load all metaslabs when pool is first opened. |
428870ff | 83 | */ |
aa7d06a9 GW |
84 | int metaslab_debug_load = 0; |
85 | ||
86 | /* | |
87 | * When set will prevent metaslabs from being unloaded. | |
88 | */ | |
89 | int metaslab_debug_unload = 0; | |
428870ff | 90 | |
9babb374 BB |
91 | /* |
92 | * Minimum size which forces the dynamic allocator to change | |
428870ff | 93 | * it's allocation strategy. Once the space map cannot satisfy |
9babb374 BB |
94 | * an allocation of this size then it switches to using more |
95 | * aggressive strategy (i.e search by size rather than offset). | |
96 | */ | |
97 | uint64_t metaslab_df_alloc_threshold = SPA_MAXBLOCKSIZE; | |
98 | ||
99 | /* | |
100 | * The minimum free space, in percent, which must be available | |
101 | * in a space map to continue allocations in a first-fit fashion. | |
102 | * Once the space_map's free space drops below this level we dynamically | |
103 | * switch to using best-fit allocations. | |
104 | */ | |
428870ff BB |
105 | int metaslab_df_free_pct = 4; |
106 | ||
107 | /* | |
108 | * A metaslab is considered "free" if it contains a contiguous | |
109 | * segment which is greater than metaslab_min_alloc_size. | |
110 | */ | |
111 | uint64_t metaslab_min_alloc_size = DMU_MAX_ACCESS; | |
112 | ||
113 | /* | |
93cf2076 | 114 | * Percentage of all cpus that can be used by the metaslab taskq. |
428870ff | 115 | */ |
93cf2076 | 116 | int metaslab_load_pct = 50; |
428870ff BB |
117 | |
118 | /* | |
93cf2076 GW |
119 | * Determines how many txgs a metaslab may remain loaded without having any |
120 | * allocations from it. As long as a metaslab continues to be used we will | |
121 | * keep it loaded. | |
428870ff | 122 | */ |
93cf2076 | 123 | int metaslab_unload_delay = TXG_SIZE * 2; |
9babb374 | 124 | |
43a696ed GW |
125 | /* |
126 | * Should we be willing to write data to degraded vdevs? | |
127 | */ | |
128 | boolean_t zfs_write_to_degraded = B_FALSE; | |
129 | ||
93cf2076 GW |
130 | /* |
131 | * Max number of metaslabs per group to preload. | |
132 | */ | |
133 | int metaslab_preload_limit = SPA_DVAS_PER_BP; | |
134 | ||
135 | /* | |
136 | * Enable/disable preloading of metaslab. | |
137 | */ | |
138 | boolean_t metaslab_preload_enabled = B_TRUE; | |
139 | ||
140 | /* | |
141 | * Enable/disable additional weight factor for each metaslab. | |
142 | */ | |
143 | boolean_t metaslab_weight_factor_enable = B_FALSE; | |
144 | ||
145 | ||
34dc7c2f BB |
146 | /* |
147 | * ========================================================================== | |
148 | * Metaslab classes | |
149 | * ========================================================================== | |
150 | */ | |
151 | metaslab_class_t * | |
93cf2076 | 152 | metaslab_class_create(spa_t *spa, metaslab_ops_t *ops) |
34dc7c2f BB |
153 | { |
154 | metaslab_class_t *mc; | |
155 | ||
b8d06fca | 156 | mc = kmem_zalloc(sizeof (metaslab_class_t), KM_PUSHPAGE); |
34dc7c2f | 157 | |
428870ff | 158 | mc->mc_spa = spa; |
34dc7c2f | 159 | mc->mc_rotor = NULL; |
9babb374 | 160 | mc->mc_ops = ops; |
920dd524 | 161 | mutex_init(&mc->mc_fastwrite_lock, NULL, MUTEX_DEFAULT, NULL); |
34dc7c2f BB |
162 | |
163 | return (mc); | |
164 | } | |
165 | ||
166 | void | |
167 | metaslab_class_destroy(metaslab_class_t *mc) | |
168 | { | |
428870ff BB |
169 | ASSERT(mc->mc_rotor == NULL); |
170 | ASSERT(mc->mc_alloc == 0); | |
171 | ASSERT(mc->mc_deferred == 0); | |
172 | ASSERT(mc->mc_space == 0); | |
173 | ASSERT(mc->mc_dspace == 0); | |
34dc7c2f | 174 | |
920dd524 | 175 | mutex_destroy(&mc->mc_fastwrite_lock); |
34dc7c2f BB |
176 | kmem_free(mc, sizeof (metaslab_class_t)); |
177 | } | |
178 | ||
428870ff BB |
179 | int |
180 | metaslab_class_validate(metaslab_class_t *mc) | |
34dc7c2f | 181 | { |
428870ff BB |
182 | metaslab_group_t *mg; |
183 | vdev_t *vd; | |
34dc7c2f | 184 | |
428870ff BB |
185 | /* |
186 | * Must hold one of the spa_config locks. | |
187 | */ | |
188 | ASSERT(spa_config_held(mc->mc_spa, SCL_ALL, RW_READER) || | |
189 | spa_config_held(mc->mc_spa, SCL_ALL, RW_WRITER)); | |
34dc7c2f | 190 | |
428870ff BB |
191 | if ((mg = mc->mc_rotor) == NULL) |
192 | return (0); | |
193 | ||
194 | do { | |
195 | vd = mg->mg_vd; | |
196 | ASSERT(vd->vdev_mg != NULL); | |
197 | ASSERT3P(vd->vdev_top, ==, vd); | |
198 | ASSERT3P(mg->mg_class, ==, mc); | |
199 | ASSERT3P(vd->vdev_ops, !=, &vdev_hole_ops); | |
200 | } while ((mg = mg->mg_next) != mc->mc_rotor); | |
201 | ||
202 | return (0); | |
34dc7c2f BB |
203 | } |
204 | ||
205 | void | |
428870ff BB |
206 | metaslab_class_space_update(metaslab_class_t *mc, int64_t alloc_delta, |
207 | int64_t defer_delta, int64_t space_delta, int64_t dspace_delta) | |
34dc7c2f | 208 | { |
428870ff BB |
209 | atomic_add_64(&mc->mc_alloc, alloc_delta); |
210 | atomic_add_64(&mc->mc_deferred, defer_delta); | |
211 | atomic_add_64(&mc->mc_space, space_delta); | |
212 | atomic_add_64(&mc->mc_dspace, dspace_delta); | |
213 | } | |
34dc7c2f | 214 | |
428870ff BB |
215 | uint64_t |
216 | metaslab_class_get_alloc(metaslab_class_t *mc) | |
217 | { | |
218 | return (mc->mc_alloc); | |
219 | } | |
34dc7c2f | 220 | |
428870ff BB |
221 | uint64_t |
222 | metaslab_class_get_deferred(metaslab_class_t *mc) | |
223 | { | |
224 | return (mc->mc_deferred); | |
225 | } | |
34dc7c2f | 226 | |
428870ff BB |
227 | uint64_t |
228 | metaslab_class_get_space(metaslab_class_t *mc) | |
229 | { | |
230 | return (mc->mc_space); | |
231 | } | |
34dc7c2f | 232 | |
428870ff BB |
233 | uint64_t |
234 | metaslab_class_get_dspace(metaslab_class_t *mc) | |
235 | { | |
236 | return (spa_deflate(mc->mc_spa) ? mc->mc_dspace : mc->mc_space); | |
34dc7c2f BB |
237 | } |
238 | ||
239 | /* | |
240 | * ========================================================================== | |
241 | * Metaslab groups | |
242 | * ========================================================================== | |
243 | */ | |
244 | static int | |
245 | metaslab_compare(const void *x1, const void *x2) | |
246 | { | |
247 | const metaslab_t *m1 = x1; | |
248 | const metaslab_t *m2 = x2; | |
249 | ||
250 | if (m1->ms_weight < m2->ms_weight) | |
251 | return (1); | |
252 | if (m1->ms_weight > m2->ms_weight) | |
253 | return (-1); | |
254 | ||
255 | /* | |
256 | * If the weights are identical, use the offset to force uniqueness. | |
257 | */ | |
93cf2076 | 258 | if (m1->ms_start < m2->ms_start) |
34dc7c2f | 259 | return (-1); |
93cf2076 | 260 | if (m1->ms_start > m2->ms_start) |
34dc7c2f BB |
261 | return (1); |
262 | ||
263 | ASSERT3P(m1, ==, m2); | |
264 | ||
265 | return (0); | |
266 | } | |
267 | ||
ac72fac3 GW |
268 | /* |
269 | * Update the allocatable flag and the metaslab group's capacity. | |
270 | * The allocatable flag is set to true if the capacity is below | |
271 | * the zfs_mg_noalloc_threshold. If a metaslab group transitions | |
272 | * from allocatable to non-allocatable or vice versa then the metaslab | |
273 | * group's class is updated to reflect the transition. | |
274 | */ | |
275 | static void | |
276 | metaslab_group_alloc_update(metaslab_group_t *mg) | |
277 | { | |
278 | vdev_t *vd = mg->mg_vd; | |
279 | metaslab_class_t *mc = mg->mg_class; | |
280 | vdev_stat_t *vs = &vd->vdev_stat; | |
281 | boolean_t was_allocatable; | |
282 | ||
283 | ASSERT(vd == vd->vdev_top); | |
284 | ||
285 | mutex_enter(&mg->mg_lock); | |
286 | was_allocatable = mg->mg_allocatable; | |
287 | ||
288 | mg->mg_free_capacity = ((vs->vs_space - vs->vs_alloc) * 100) / | |
289 | (vs->vs_space + 1); | |
290 | ||
291 | mg->mg_allocatable = (mg->mg_free_capacity > zfs_mg_noalloc_threshold); | |
292 | ||
293 | /* | |
294 | * The mc_alloc_groups maintains a count of the number of | |
295 | * groups in this metaslab class that are still above the | |
296 | * zfs_mg_noalloc_threshold. This is used by the allocating | |
297 | * threads to determine if they should avoid allocations to | |
298 | * a given group. The allocator will avoid allocations to a group | |
299 | * if that group has reached or is below the zfs_mg_noalloc_threshold | |
300 | * and there are still other groups that are above the threshold. | |
301 | * When a group transitions from allocatable to non-allocatable or | |
302 | * vice versa we update the metaslab class to reflect that change. | |
303 | * When the mc_alloc_groups value drops to 0 that means that all | |
304 | * groups have reached the zfs_mg_noalloc_threshold making all groups | |
305 | * eligible for allocations. This effectively means that all devices | |
306 | * are balanced again. | |
307 | */ | |
308 | if (was_allocatable && !mg->mg_allocatable) | |
309 | mc->mc_alloc_groups--; | |
310 | else if (!was_allocatable && mg->mg_allocatable) | |
311 | mc->mc_alloc_groups++; | |
312 | mutex_exit(&mg->mg_lock); | |
313 | } | |
314 | ||
34dc7c2f BB |
315 | metaslab_group_t * |
316 | metaslab_group_create(metaslab_class_t *mc, vdev_t *vd) | |
317 | { | |
318 | metaslab_group_t *mg; | |
319 | ||
b8d06fca | 320 | mg = kmem_zalloc(sizeof (metaslab_group_t), KM_PUSHPAGE); |
34dc7c2f BB |
321 | mutex_init(&mg->mg_lock, NULL, MUTEX_DEFAULT, NULL); |
322 | avl_create(&mg->mg_metaslab_tree, metaslab_compare, | |
323 | sizeof (metaslab_t), offsetof(struct metaslab, ms_group_node)); | |
34dc7c2f | 324 | mg->mg_vd = vd; |
428870ff BB |
325 | mg->mg_class = mc; |
326 | mg->mg_activation_count = 0; | |
34dc7c2f | 327 | |
3c51c5cb | 328 | mg->mg_taskq = taskq_create("metaslab_group_taskq", metaslab_load_pct, |
93cf2076 GW |
329 | minclsyspri, 10, INT_MAX, TASKQ_THREADS_CPU_PCT); |
330 | ||
34dc7c2f BB |
331 | return (mg); |
332 | } | |
333 | ||
334 | void | |
335 | metaslab_group_destroy(metaslab_group_t *mg) | |
336 | { | |
428870ff BB |
337 | ASSERT(mg->mg_prev == NULL); |
338 | ASSERT(mg->mg_next == NULL); | |
339 | /* | |
340 | * We may have gone below zero with the activation count | |
341 | * either because we never activated in the first place or | |
342 | * because we're done, and possibly removing the vdev. | |
343 | */ | |
344 | ASSERT(mg->mg_activation_count <= 0); | |
345 | ||
3c51c5cb | 346 | taskq_destroy(mg->mg_taskq); |
34dc7c2f BB |
347 | avl_destroy(&mg->mg_metaslab_tree); |
348 | mutex_destroy(&mg->mg_lock); | |
349 | kmem_free(mg, sizeof (metaslab_group_t)); | |
350 | } | |
351 | ||
428870ff BB |
352 | void |
353 | metaslab_group_activate(metaslab_group_t *mg) | |
354 | { | |
355 | metaslab_class_t *mc = mg->mg_class; | |
356 | metaslab_group_t *mgprev, *mgnext; | |
357 | ||
358 | ASSERT(spa_config_held(mc->mc_spa, SCL_ALLOC, RW_WRITER)); | |
359 | ||
360 | ASSERT(mc->mc_rotor != mg); | |
361 | ASSERT(mg->mg_prev == NULL); | |
362 | ASSERT(mg->mg_next == NULL); | |
363 | ASSERT(mg->mg_activation_count <= 0); | |
364 | ||
365 | if (++mg->mg_activation_count <= 0) | |
366 | return; | |
367 | ||
368 | mg->mg_aliquot = metaslab_aliquot * MAX(1, mg->mg_vd->vdev_children); | |
ac72fac3 | 369 | metaslab_group_alloc_update(mg); |
428870ff BB |
370 | |
371 | if ((mgprev = mc->mc_rotor) == NULL) { | |
372 | mg->mg_prev = mg; | |
373 | mg->mg_next = mg; | |
374 | } else { | |
375 | mgnext = mgprev->mg_next; | |
376 | mg->mg_prev = mgprev; | |
377 | mg->mg_next = mgnext; | |
378 | mgprev->mg_next = mg; | |
379 | mgnext->mg_prev = mg; | |
380 | } | |
381 | mc->mc_rotor = mg; | |
382 | } | |
383 | ||
384 | void | |
385 | metaslab_group_passivate(metaslab_group_t *mg) | |
386 | { | |
387 | metaslab_class_t *mc = mg->mg_class; | |
388 | metaslab_group_t *mgprev, *mgnext; | |
389 | ||
390 | ASSERT(spa_config_held(mc->mc_spa, SCL_ALLOC, RW_WRITER)); | |
391 | ||
392 | if (--mg->mg_activation_count != 0) { | |
393 | ASSERT(mc->mc_rotor != mg); | |
394 | ASSERT(mg->mg_prev == NULL); | |
395 | ASSERT(mg->mg_next == NULL); | |
396 | ASSERT(mg->mg_activation_count < 0); | |
397 | return; | |
398 | } | |
399 | ||
93cf2076 GW |
400 | taskq_wait(mg->mg_taskq); |
401 | ||
428870ff BB |
402 | mgprev = mg->mg_prev; |
403 | mgnext = mg->mg_next; | |
404 | ||
405 | if (mg == mgnext) { | |
406 | mc->mc_rotor = NULL; | |
407 | } else { | |
408 | mc->mc_rotor = mgnext; | |
409 | mgprev->mg_next = mgnext; | |
410 | mgnext->mg_prev = mgprev; | |
411 | } | |
412 | ||
413 | mg->mg_prev = NULL; | |
414 | mg->mg_next = NULL; | |
415 | } | |
416 | ||
34dc7c2f BB |
417 | static void |
418 | metaslab_group_add(metaslab_group_t *mg, metaslab_t *msp) | |
419 | { | |
420 | mutex_enter(&mg->mg_lock); | |
421 | ASSERT(msp->ms_group == NULL); | |
422 | msp->ms_group = mg; | |
423 | msp->ms_weight = 0; | |
424 | avl_add(&mg->mg_metaslab_tree, msp); | |
425 | mutex_exit(&mg->mg_lock); | |
426 | } | |
427 | ||
428 | static void | |
429 | metaslab_group_remove(metaslab_group_t *mg, metaslab_t *msp) | |
430 | { | |
431 | mutex_enter(&mg->mg_lock); | |
432 | ASSERT(msp->ms_group == mg); | |
433 | avl_remove(&mg->mg_metaslab_tree, msp); | |
434 | msp->ms_group = NULL; | |
435 | mutex_exit(&mg->mg_lock); | |
436 | } | |
437 | ||
438 | static void | |
439 | metaslab_group_sort(metaslab_group_t *mg, metaslab_t *msp, uint64_t weight) | |
440 | { | |
441 | /* | |
442 | * Although in principle the weight can be any value, in | |
443 | * practice we do not use values in the range [1, 510]. | |
444 | */ | |
445 | ASSERT(weight >= SPA_MINBLOCKSIZE-1 || weight == 0); | |
446 | ASSERT(MUTEX_HELD(&msp->ms_lock)); | |
447 | ||
448 | mutex_enter(&mg->mg_lock); | |
449 | ASSERT(msp->ms_group == mg); | |
450 | avl_remove(&mg->mg_metaslab_tree, msp); | |
451 | msp->ms_weight = weight; | |
452 | avl_add(&mg->mg_metaslab_tree, msp); | |
453 | mutex_exit(&mg->mg_lock); | |
454 | } | |
455 | ||
ac72fac3 GW |
456 | /* |
457 | * Determine if a given metaslab group should skip allocations. A metaslab | |
458 | * group should avoid allocations if its used capacity has crossed the | |
459 | * zfs_mg_noalloc_threshold and there is at least one metaslab group | |
460 | * that can still handle allocations. | |
461 | */ | |
462 | static boolean_t | |
463 | metaslab_group_allocatable(metaslab_group_t *mg) | |
464 | { | |
465 | vdev_t *vd = mg->mg_vd; | |
466 | spa_t *spa = vd->vdev_spa; | |
467 | metaslab_class_t *mc = mg->mg_class; | |
468 | ||
469 | /* | |
470 | * A metaslab group is considered allocatable if its free capacity | |
471 | * is greater than the set value of zfs_mg_noalloc_threshold, it's | |
472 | * associated with a slog, or there are no other metaslab groups | |
473 | * with free capacity greater than zfs_mg_noalloc_threshold. | |
474 | */ | |
475 | return (mg->mg_free_capacity > zfs_mg_noalloc_threshold || | |
476 | mc != spa_normal_class(spa) || mc->mc_alloc_groups == 0); | |
477 | } | |
478 | ||
428870ff BB |
479 | /* |
480 | * ========================================================================== | |
93cf2076 | 481 | * Range tree callbacks |
428870ff BB |
482 | * ========================================================================== |
483 | */ | |
93cf2076 GW |
484 | |
485 | /* | |
486 | * Comparison function for the private size-ordered tree. Tree is sorted | |
487 | * by size, larger sizes at the end of the tree. | |
488 | */ | |
428870ff | 489 | static int |
93cf2076 | 490 | metaslab_rangesize_compare(const void *x1, const void *x2) |
428870ff | 491 | { |
93cf2076 GW |
492 | const range_seg_t *r1 = x1; |
493 | const range_seg_t *r2 = x2; | |
494 | uint64_t rs_size1 = r1->rs_end - r1->rs_start; | |
495 | uint64_t rs_size2 = r2->rs_end - r2->rs_start; | |
428870ff | 496 | |
93cf2076 | 497 | if (rs_size1 < rs_size2) |
428870ff | 498 | return (-1); |
93cf2076 | 499 | if (rs_size1 > rs_size2) |
428870ff BB |
500 | return (1); |
501 | ||
93cf2076 | 502 | if (r1->rs_start < r2->rs_start) |
428870ff | 503 | return (-1); |
93cf2076 GW |
504 | |
505 | if (r1->rs_start > r2->rs_start) | |
428870ff BB |
506 | return (1); |
507 | ||
508 | return (0); | |
509 | } | |
510 | ||
34dc7c2f | 511 | /* |
93cf2076 GW |
512 | * Create any block allocator specific components. The current allocators |
513 | * rely on using both a size-ordered range_tree_t and an array of uint64_t's. | |
34dc7c2f | 514 | */ |
93cf2076 GW |
515 | static void |
516 | metaslab_rt_create(range_tree_t *rt, void *arg) | |
34dc7c2f | 517 | { |
93cf2076 | 518 | metaslab_t *msp = arg; |
34dc7c2f | 519 | |
93cf2076 GW |
520 | ASSERT3P(rt->rt_arg, ==, msp); |
521 | ASSERT(msp->ms_tree == NULL); | |
34dc7c2f | 522 | |
93cf2076 GW |
523 | avl_create(&msp->ms_size_tree, metaslab_rangesize_compare, |
524 | sizeof (range_seg_t), offsetof(range_seg_t, rs_pp_node)); | |
9babb374 BB |
525 | } |
526 | ||
93cf2076 GW |
527 | /* |
528 | * Destroy the block allocator specific components. | |
529 | */ | |
9babb374 | 530 | static void |
93cf2076 | 531 | metaslab_rt_destroy(range_tree_t *rt, void *arg) |
9babb374 | 532 | { |
93cf2076 | 533 | metaslab_t *msp = arg; |
428870ff | 534 | |
93cf2076 GW |
535 | ASSERT3P(rt->rt_arg, ==, msp); |
536 | ASSERT3P(msp->ms_tree, ==, rt); | |
537 | ASSERT0(avl_numnodes(&msp->ms_size_tree)); | |
428870ff | 538 | |
93cf2076 | 539 | avl_destroy(&msp->ms_size_tree); |
9babb374 BB |
540 | } |
541 | ||
542 | static void | |
93cf2076 | 543 | metaslab_rt_add(range_tree_t *rt, range_seg_t *rs, void *arg) |
9babb374 | 544 | { |
93cf2076 | 545 | metaslab_t *msp = arg; |
9babb374 | 546 | |
93cf2076 GW |
547 | ASSERT3P(rt->rt_arg, ==, msp); |
548 | ASSERT3P(msp->ms_tree, ==, rt); | |
549 | VERIFY(!msp->ms_condensing); | |
550 | avl_add(&msp->ms_size_tree, rs); | |
34dc7c2f BB |
551 | } |
552 | ||
34dc7c2f | 553 | static void |
93cf2076 | 554 | metaslab_rt_remove(range_tree_t *rt, range_seg_t *rs, void *arg) |
34dc7c2f | 555 | { |
93cf2076 GW |
556 | metaslab_t *msp = arg; |
557 | ||
558 | ASSERT3P(rt->rt_arg, ==, msp); | |
559 | ASSERT3P(msp->ms_tree, ==, rt); | |
560 | VERIFY(!msp->ms_condensing); | |
561 | avl_remove(&msp->ms_size_tree, rs); | |
34dc7c2f BB |
562 | } |
563 | ||
34dc7c2f | 564 | static void |
93cf2076 | 565 | metaslab_rt_vacate(range_tree_t *rt, void *arg) |
34dc7c2f | 566 | { |
93cf2076 GW |
567 | metaslab_t *msp = arg; |
568 | ||
569 | ASSERT3P(rt->rt_arg, ==, msp); | |
570 | ASSERT3P(msp->ms_tree, ==, rt); | |
571 | ||
572 | /* | |
573 | * Normally one would walk the tree freeing nodes along the way. | |
574 | * Since the nodes are shared with the range trees we can avoid | |
575 | * walking all nodes and just reinitialize the avl tree. The nodes | |
576 | * will be freed by the range tree, so we don't want to free them here. | |
577 | */ | |
578 | avl_create(&msp->ms_size_tree, metaslab_rangesize_compare, | |
579 | sizeof (range_seg_t), offsetof(range_seg_t, rs_pp_node)); | |
34dc7c2f BB |
580 | } |
581 | ||
93cf2076 GW |
582 | static range_tree_ops_t metaslab_rt_ops = { |
583 | metaslab_rt_create, | |
584 | metaslab_rt_destroy, | |
585 | metaslab_rt_add, | |
586 | metaslab_rt_remove, | |
587 | metaslab_rt_vacate | |
588 | }; | |
589 | ||
590 | /* | |
591 | * ========================================================================== | |
592 | * Metaslab block operations | |
593 | * ========================================================================== | |
594 | */ | |
595 | ||
9babb374 | 596 | /* |
428870ff | 597 | * Return the maximum contiguous segment within the metaslab. |
9babb374 | 598 | */ |
9babb374 | 599 | uint64_t |
93cf2076 | 600 | metaslab_block_maxsize(metaslab_t *msp) |
9babb374 | 601 | { |
93cf2076 GW |
602 | avl_tree_t *t = &msp->ms_size_tree; |
603 | range_seg_t *rs; | |
9babb374 | 604 | |
93cf2076 | 605 | if (t == NULL || (rs = avl_last(t)) == NULL) |
9babb374 BB |
606 | return (0ULL); |
607 | ||
93cf2076 GW |
608 | return (rs->rs_end - rs->rs_start); |
609 | } | |
610 | ||
611 | uint64_t | |
612 | metaslab_block_alloc(metaslab_t *msp, uint64_t size) | |
613 | { | |
614 | uint64_t start; | |
615 | range_tree_t *rt = msp->ms_tree; | |
616 | ||
617 | VERIFY(!msp->ms_condensing); | |
618 | ||
619 | start = msp->ms_ops->msop_alloc(msp, size); | |
620 | if (start != -1ULL) { | |
621 | vdev_t *vd = msp->ms_group->mg_vd; | |
622 | ||
623 | VERIFY0(P2PHASE(start, 1ULL << vd->vdev_ashift)); | |
624 | VERIFY0(P2PHASE(size, 1ULL << vd->vdev_ashift)); | |
625 | VERIFY3U(range_tree_space(rt) - size, <=, msp->ms_size); | |
626 | range_tree_remove(rt, start, size); | |
627 | } | |
628 | return (start); | |
629 | } | |
630 | ||
631 | /* | |
632 | * ========================================================================== | |
633 | * Common allocator routines | |
634 | * ========================================================================== | |
635 | */ | |
636 | ||
637 | #if defined(WITH_FF_BLOCK_ALLOCATOR) || \ | |
638 | defined(WITH_DF_BLOCK_ALLOCATOR) || \ | |
639 | defined(WITH_CF_BLOCK_ALLOCATOR) | |
640 | /* | |
641 | * This is a helper function that can be used by the allocator to find | |
642 | * a suitable block to allocate. This will search the specified AVL | |
643 | * tree looking for a block that matches the specified criteria. | |
644 | */ | |
645 | static uint64_t | |
646 | metaslab_block_picker(avl_tree_t *t, uint64_t *cursor, uint64_t size, | |
647 | uint64_t align) | |
648 | { | |
649 | range_seg_t *rs, rsearch; | |
650 | avl_index_t where; | |
651 | ||
652 | rsearch.rs_start = *cursor; | |
653 | rsearch.rs_end = *cursor + size; | |
654 | ||
655 | rs = avl_find(t, &rsearch, &where); | |
656 | if (rs == NULL) | |
657 | rs = avl_nearest(t, where, AVL_AFTER); | |
658 | ||
659 | while (rs != NULL) { | |
660 | uint64_t offset = P2ROUNDUP(rs->rs_start, align); | |
661 | ||
662 | if (offset + size <= rs->rs_end) { | |
663 | *cursor = offset + size; | |
664 | return (offset); | |
665 | } | |
666 | rs = AVL_NEXT(t, rs); | |
667 | } | |
668 | ||
669 | /* | |
670 | * If we know we've searched the whole map (*cursor == 0), give up. | |
671 | * Otherwise, reset the cursor to the beginning and try again. | |
672 | */ | |
673 | if (*cursor == 0) | |
674 | return (-1ULL); | |
675 | ||
676 | *cursor = 0; | |
677 | return (metaslab_block_picker(t, cursor, size, align)); | |
9babb374 | 678 | } |
93cf2076 | 679 | #endif /* WITH_FF/DF/CF_BLOCK_ALLOCATOR */ |
9babb374 | 680 | |
22c81dd8 | 681 | #if defined(WITH_FF_BLOCK_ALLOCATOR) |
428870ff BB |
682 | /* |
683 | * ========================================================================== | |
684 | * The first-fit block allocator | |
685 | * ========================================================================== | |
686 | */ | |
687 | static uint64_t | |
93cf2076 | 688 | metaslab_ff_alloc(metaslab_t *msp, uint64_t size) |
9babb374 | 689 | { |
93cf2076 GW |
690 | /* |
691 | * Find the largest power of 2 block size that evenly divides the | |
692 | * requested size. This is used to try to allocate blocks with similar | |
693 | * alignment from the same area of the metaslab (i.e. same cursor | |
694 | * bucket) but it does not guarantee that other allocations sizes | |
695 | * may exist in the same region. | |
696 | */ | |
428870ff | 697 | uint64_t align = size & -size; |
9bd274dd | 698 | uint64_t *cursor = &msp->ms_lbas[highbit64(align) - 1]; |
93cf2076 | 699 | avl_tree_t *t = &msp->ms_tree->rt_root; |
9babb374 | 700 | |
428870ff | 701 | return (metaslab_block_picker(t, cursor, size, align)); |
9babb374 BB |
702 | } |
703 | ||
428870ff | 704 | /* ARGSUSED */ |
93cf2076 GW |
705 | static boolean_t |
706 | metaslab_ff_fragmented(metaslab_t *msp) | |
9babb374 | 707 | { |
428870ff | 708 | return (B_TRUE); |
9babb374 BB |
709 | } |
710 | ||
93cf2076 | 711 | static metaslab_ops_t metaslab_ff_ops = { |
428870ff | 712 | metaslab_ff_alloc, |
428870ff BB |
713 | metaslab_ff_fragmented |
714 | }; | |
9babb374 | 715 | |
93cf2076 | 716 | metaslab_ops_t *zfs_metaslab_ops = &metaslab_ff_ops; |
22c81dd8 BB |
717 | #endif /* WITH_FF_BLOCK_ALLOCATOR */ |
718 | ||
719 | #if defined(WITH_DF_BLOCK_ALLOCATOR) | |
428870ff BB |
720 | /* |
721 | * ========================================================================== | |
722 | * Dynamic block allocator - | |
723 | * Uses the first fit allocation scheme until space get low and then | |
724 | * adjusts to a best fit allocation method. Uses metaslab_df_alloc_threshold | |
725 | * and metaslab_df_free_pct to determine when to switch the allocation scheme. | |
726 | * ========================================================================== | |
727 | */ | |
9babb374 | 728 | static uint64_t |
93cf2076 | 729 | metaslab_df_alloc(metaslab_t *msp, uint64_t size) |
9babb374 | 730 | { |
93cf2076 GW |
731 | /* |
732 | * Find the largest power of 2 block size that evenly divides the | |
733 | * requested size. This is used to try to allocate blocks with similar | |
734 | * alignment from the same area of the metaslab (i.e. same cursor | |
735 | * bucket) but it does not guarantee that other allocations sizes | |
736 | * may exist in the same region. | |
737 | */ | |
9babb374 | 738 | uint64_t align = size & -size; |
9bd274dd | 739 | uint64_t *cursor = &msp->ms_lbas[highbit64(align) - 1]; |
93cf2076 GW |
740 | range_tree_t *rt = msp->ms_tree; |
741 | avl_tree_t *t = &rt->rt_root; | |
742 | uint64_t max_size = metaslab_block_maxsize(msp); | |
743 | int free_pct = range_tree_space(rt) * 100 / msp->ms_size; | |
9babb374 | 744 | |
93cf2076 GW |
745 | ASSERT(MUTEX_HELD(&msp->ms_lock)); |
746 | ASSERT3U(avl_numnodes(t), ==, avl_numnodes(&msp->ms_size_tree)); | |
9babb374 BB |
747 | |
748 | if (max_size < size) | |
749 | return (-1ULL); | |
750 | ||
751 | /* | |
752 | * If we're running low on space switch to using the size | |
753 | * sorted AVL tree (best-fit). | |
754 | */ | |
755 | if (max_size < metaslab_df_alloc_threshold || | |
756 | free_pct < metaslab_df_free_pct) { | |
93cf2076 | 757 | t = &msp->ms_size_tree; |
9babb374 BB |
758 | *cursor = 0; |
759 | } | |
760 | ||
761 | return (metaslab_block_picker(t, cursor, size, 1ULL)); | |
762 | } | |
763 | ||
428870ff | 764 | static boolean_t |
93cf2076 | 765 | metaslab_df_fragmented(metaslab_t *msp) |
9babb374 | 766 | { |
93cf2076 GW |
767 | range_tree_t *rt = msp->ms_tree; |
768 | uint64_t max_size = metaslab_block_maxsize(msp); | |
769 | int free_pct = range_tree_space(rt) * 100 / msp->ms_size; | |
9babb374 | 770 | |
428870ff BB |
771 | if (max_size >= metaslab_df_alloc_threshold && |
772 | free_pct >= metaslab_df_free_pct) | |
773 | return (B_FALSE); | |
774 | ||
93cf2076 | 775 | |
428870ff | 776 | return (B_TRUE); |
9babb374 BB |
777 | } |
778 | ||
93cf2076 | 779 | static metaslab_ops_t metaslab_df_ops = { |
9babb374 | 780 | metaslab_df_alloc, |
428870ff | 781 | metaslab_df_fragmented |
34dc7c2f BB |
782 | }; |
783 | ||
93cf2076 | 784 | metaslab_ops_t *zfs_metaslab_ops = &metaslab_df_ops; |
22c81dd8 BB |
785 | #endif /* WITH_DF_BLOCK_ALLOCATOR */ |
786 | ||
93cf2076 | 787 | #if defined(WITH_CF_BLOCK_ALLOCATOR) |
428870ff BB |
788 | /* |
789 | * ========================================================================== | |
93cf2076 GW |
790 | * Cursor fit block allocator - |
791 | * Select the largest region in the metaslab, set the cursor to the beginning | |
792 | * of the range and the cursor_end to the end of the range. As allocations | |
793 | * are made advance the cursor. Continue allocating from the cursor until | |
794 | * the range is exhausted and then find a new range. | |
428870ff BB |
795 | * ========================================================================== |
796 | */ | |
797 | static uint64_t | |
93cf2076 | 798 | metaslab_cf_alloc(metaslab_t *msp, uint64_t size) |
428870ff | 799 | { |
93cf2076 GW |
800 | range_tree_t *rt = msp->ms_tree; |
801 | avl_tree_t *t = &msp->ms_size_tree; | |
802 | uint64_t *cursor = &msp->ms_lbas[0]; | |
803 | uint64_t *cursor_end = &msp->ms_lbas[1]; | |
428870ff BB |
804 | uint64_t offset = 0; |
805 | ||
93cf2076 GW |
806 | ASSERT(MUTEX_HELD(&msp->ms_lock)); |
807 | ASSERT3U(avl_numnodes(t), ==, avl_numnodes(&rt->rt_root)); | |
428870ff | 808 | |
93cf2076 | 809 | ASSERT3U(*cursor_end, >=, *cursor); |
428870ff | 810 | |
93cf2076 GW |
811 | if ((*cursor + size) > *cursor_end) { |
812 | range_seg_t *rs; | |
428870ff | 813 | |
93cf2076 GW |
814 | rs = avl_last(&msp->ms_size_tree); |
815 | if (rs == NULL || (rs->rs_end - rs->rs_start) < size) | |
816 | return (-1ULL); | |
428870ff | 817 | |
93cf2076 GW |
818 | *cursor = rs->rs_start; |
819 | *cursor_end = rs->rs_end; | |
428870ff | 820 | } |
93cf2076 GW |
821 | |
822 | offset = *cursor; | |
823 | *cursor += size; | |
824 | ||
428870ff BB |
825 | return (offset); |
826 | } | |
827 | ||
828 | static boolean_t | |
93cf2076 | 829 | metaslab_cf_fragmented(metaslab_t *msp) |
428870ff | 830 | { |
93cf2076 | 831 | return (metaslab_block_maxsize(msp) < metaslab_min_alloc_size); |
428870ff BB |
832 | } |
833 | ||
93cf2076 GW |
834 | static metaslab_ops_t metaslab_cf_ops = { |
835 | metaslab_cf_alloc, | |
836 | metaslab_cf_fragmented | |
428870ff BB |
837 | }; |
838 | ||
93cf2076 GW |
839 | metaslab_ops_t *zfs_metaslab_ops = &metaslab_cf_ops; |
840 | #endif /* WITH_CF_BLOCK_ALLOCATOR */ | |
22c81dd8 BB |
841 | |
842 | #if defined(WITH_NDF_BLOCK_ALLOCATOR) | |
93cf2076 GW |
843 | /* |
844 | * ========================================================================== | |
845 | * New dynamic fit allocator - | |
846 | * Select a region that is large enough to allocate 2^metaslab_ndf_clump_shift | |
847 | * contiguous blocks. If no region is found then just use the largest segment | |
848 | * that remains. | |
849 | * ========================================================================== | |
850 | */ | |
851 | ||
852 | /* | |
853 | * Determines desired number of contiguous blocks (2^metaslab_ndf_clump_shift) | |
854 | * to request from the allocator. | |
855 | */ | |
428870ff BB |
856 | uint64_t metaslab_ndf_clump_shift = 4; |
857 | ||
858 | static uint64_t | |
93cf2076 | 859 | metaslab_ndf_alloc(metaslab_t *msp, uint64_t size) |
428870ff | 860 | { |
93cf2076 | 861 | avl_tree_t *t = &msp->ms_tree->rt_root; |
428870ff | 862 | avl_index_t where; |
93cf2076 | 863 | range_seg_t *rs, rsearch; |
9bd274dd | 864 | uint64_t hbit = highbit64(size); |
93cf2076 GW |
865 | uint64_t *cursor = &msp->ms_lbas[hbit - 1]; |
866 | uint64_t max_size = metaslab_block_maxsize(msp); | |
428870ff | 867 | |
93cf2076 GW |
868 | ASSERT(MUTEX_HELD(&msp->ms_lock)); |
869 | ASSERT3U(avl_numnodes(t), ==, avl_numnodes(&msp->ms_size_tree)); | |
428870ff BB |
870 | |
871 | if (max_size < size) | |
872 | return (-1ULL); | |
873 | ||
93cf2076 GW |
874 | rsearch.rs_start = *cursor; |
875 | rsearch.rs_end = *cursor + size; | |
428870ff | 876 | |
93cf2076 GW |
877 | rs = avl_find(t, &rsearch, &where); |
878 | if (rs == NULL || (rs->rs_end - rs->rs_start) < size) { | |
879 | t = &msp->ms_size_tree; | |
428870ff | 880 | |
93cf2076 GW |
881 | rsearch.rs_start = 0; |
882 | rsearch.rs_end = MIN(max_size, | |
428870ff | 883 | 1ULL << (hbit + metaslab_ndf_clump_shift)); |
93cf2076 GW |
884 | rs = avl_find(t, &rsearch, &where); |
885 | if (rs == NULL) | |
886 | rs = avl_nearest(t, where, AVL_AFTER); | |
887 | ASSERT(rs != NULL); | |
428870ff BB |
888 | } |
889 | ||
93cf2076 GW |
890 | if ((rs->rs_end - rs->rs_start) >= size) { |
891 | *cursor = rs->rs_start + size; | |
892 | return (rs->rs_start); | |
428870ff BB |
893 | } |
894 | return (-1ULL); | |
895 | } | |
896 | ||
897 | static boolean_t | |
93cf2076 | 898 | metaslab_ndf_fragmented(metaslab_t *msp) |
428870ff | 899 | { |
93cf2076 GW |
900 | return (metaslab_block_maxsize(msp) <= |
901 | (metaslab_min_alloc_size << metaslab_ndf_clump_shift)); | |
428870ff BB |
902 | } |
903 | ||
93cf2076 | 904 | static metaslab_ops_t metaslab_ndf_ops = { |
428870ff | 905 | metaslab_ndf_alloc, |
428870ff BB |
906 | metaslab_ndf_fragmented |
907 | }; | |
908 | ||
93cf2076 | 909 | metaslab_ops_t *zfs_metaslab_ops = &metaslab_ndf_ops; |
22c81dd8 | 910 | #endif /* WITH_NDF_BLOCK_ALLOCATOR */ |
9babb374 | 911 | |
93cf2076 | 912 | |
34dc7c2f BB |
913 | /* |
914 | * ========================================================================== | |
915 | * Metaslabs | |
916 | * ========================================================================== | |
917 | */ | |
93cf2076 GW |
918 | |
919 | /* | |
920 | * Wait for any in-progress metaslab loads to complete. | |
921 | */ | |
922 | void | |
923 | metaslab_load_wait(metaslab_t *msp) | |
924 | { | |
925 | ASSERT(MUTEX_HELD(&msp->ms_lock)); | |
926 | ||
927 | while (msp->ms_loading) { | |
928 | ASSERT(!msp->ms_loaded); | |
929 | cv_wait(&msp->ms_load_cv, &msp->ms_lock); | |
930 | } | |
931 | } | |
932 | ||
933 | int | |
934 | metaslab_load(metaslab_t *msp) | |
935 | { | |
936 | int error = 0; | |
937 | int t; | |
938 | ||
939 | ASSERT(MUTEX_HELD(&msp->ms_lock)); | |
940 | ASSERT(!msp->ms_loaded); | |
941 | ASSERT(!msp->ms_loading); | |
942 | ||
943 | msp->ms_loading = B_TRUE; | |
944 | ||
945 | /* | |
946 | * If the space map has not been allocated yet, then treat | |
947 | * all the space in the metaslab as free and add it to the | |
948 | * ms_tree. | |
949 | */ | |
950 | if (msp->ms_sm != NULL) | |
951 | error = space_map_load(msp->ms_sm, msp->ms_tree, SM_FREE); | |
952 | else | |
953 | range_tree_add(msp->ms_tree, msp->ms_start, msp->ms_size); | |
954 | ||
955 | msp->ms_loaded = (error == 0); | |
956 | msp->ms_loading = B_FALSE; | |
957 | ||
958 | if (msp->ms_loaded) { | |
959 | for (t = 0; t < TXG_DEFER_SIZE; t++) { | |
960 | range_tree_walk(msp->ms_defertree[t], | |
961 | range_tree_remove, msp->ms_tree); | |
962 | } | |
963 | } | |
964 | cv_broadcast(&msp->ms_load_cv); | |
965 | return (error); | |
966 | } | |
967 | ||
968 | void | |
969 | metaslab_unload(metaslab_t *msp) | |
970 | { | |
971 | ASSERT(MUTEX_HELD(&msp->ms_lock)); | |
972 | range_tree_vacate(msp->ms_tree, NULL, NULL); | |
973 | msp->ms_loaded = B_FALSE; | |
974 | msp->ms_weight &= ~METASLAB_ACTIVE_MASK; | |
975 | } | |
976 | ||
34dc7c2f | 977 | metaslab_t * |
93cf2076 | 978 | metaslab_init(metaslab_group_t *mg, uint64_t id, uint64_t object, uint64_t txg) |
34dc7c2f BB |
979 | { |
980 | vdev_t *vd = mg->mg_vd; | |
93cf2076 | 981 | objset_t *mos = vd->vdev_spa->spa_meta_objset; |
34dc7c2f BB |
982 | metaslab_t *msp; |
983 | ||
b8d06fca | 984 | msp = kmem_zalloc(sizeof (metaslab_t), KM_PUSHPAGE); |
34dc7c2f | 985 | mutex_init(&msp->ms_lock, NULL, MUTEX_DEFAULT, NULL); |
93cf2076 GW |
986 | cv_init(&msp->ms_load_cv, NULL, CV_DEFAULT, NULL); |
987 | msp->ms_id = id; | |
988 | msp->ms_start = id << vd->vdev_ms_shift; | |
989 | msp->ms_size = 1ULL << vd->vdev_ms_shift; | |
34dc7c2f | 990 | |
93cf2076 GW |
991 | /* |
992 | * We only open space map objects that already exist. All others | |
993 | * will be opened when we finally allocate an object for it. | |
994 | */ | |
995 | if (object != 0) { | |
996 | VERIFY0(space_map_open(&msp->ms_sm, mos, object, msp->ms_start, | |
997 | msp->ms_size, vd->vdev_ashift, &msp->ms_lock)); | |
998 | ASSERT(msp->ms_sm != NULL); | |
999 | } | |
34dc7c2f BB |
1000 | |
1001 | /* | |
93cf2076 GW |
1002 | * We create the main range tree here, but we don't create the |
1003 | * alloctree and freetree until metaslab_sync_done(). This serves | |
34dc7c2f BB |
1004 | * two purposes: it allows metaslab_sync_done() to detect the |
1005 | * addition of new space; and for debugging, it ensures that we'd | |
1006 | * data fault on any attempt to use this metaslab before it's ready. | |
1007 | */ | |
93cf2076 | 1008 | msp->ms_tree = range_tree_create(&metaslab_rt_ops, msp, &msp->ms_lock); |
34dc7c2f BB |
1009 | metaslab_group_add(mg, msp); |
1010 | ||
93cf2076 | 1011 | msp->ms_ops = mg->mg_class->mc_ops; |
428870ff | 1012 | |
34dc7c2f BB |
1013 | /* |
1014 | * If we're opening an existing pool (txg == 0) or creating | |
1015 | * a new one (txg == TXG_INITIAL), all space is available now. | |
1016 | * If we're adding space to an existing pool, the new space | |
1017 | * does not become available until after this txg has synced. | |
1018 | */ | |
1019 | if (txg <= TXG_INITIAL) | |
1020 | metaslab_sync_done(msp, 0); | |
1021 | ||
93cf2076 GW |
1022 | /* |
1023 | * If metaslab_debug_load is set and we're initializing a metaslab | |
1024 | * that has an allocated space_map object then load the its space | |
1025 | * map so that can verify frees. | |
1026 | */ | |
1027 | if (metaslab_debug_load && msp->ms_sm != NULL) { | |
1028 | mutex_enter(&msp->ms_lock); | |
1029 | VERIFY0(metaslab_load(msp)); | |
1030 | mutex_exit(&msp->ms_lock); | |
1031 | } | |
1032 | ||
34dc7c2f | 1033 | if (txg != 0) { |
34dc7c2f | 1034 | vdev_dirty(vd, 0, NULL, txg); |
428870ff | 1035 | vdev_dirty(vd, VDD_METASLAB, msp, txg); |
34dc7c2f BB |
1036 | } |
1037 | ||
1038 | return (msp); | |
1039 | } | |
1040 | ||
1041 | void | |
1042 | metaslab_fini(metaslab_t *msp) | |
1043 | { | |
d6320ddb | 1044 | int t; |
34dc7c2f | 1045 | |
93cf2076 | 1046 | metaslab_group_t *mg = msp->ms_group; |
34dc7c2f BB |
1047 | |
1048 | metaslab_group_remove(mg, msp); | |
1049 | ||
1050 | mutex_enter(&msp->ms_lock); | |
1051 | ||
93cf2076 GW |
1052 | VERIFY(msp->ms_group == NULL); |
1053 | vdev_space_update(mg->mg_vd, -space_map_allocated(msp->ms_sm), | |
1054 | 0, -msp->ms_size); | |
1055 | space_map_close(msp->ms_sm); | |
1056 | ||
1057 | metaslab_unload(msp); | |
1058 | range_tree_destroy(msp->ms_tree); | |
34dc7c2f | 1059 | |
d6320ddb | 1060 | for (t = 0; t < TXG_SIZE; t++) { |
93cf2076 GW |
1061 | range_tree_destroy(msp->ms_alloctree[t]); |
1062 | range_tree_destroy(msp->ms_freetree[t]); | |
34dc7c2f BB |
1063 | } |
1064 | ||
e51be066 | 1065 | for (t = 0; t < TXG_DEFER_SIZE; t++) { |
93cf2076 | 1066 | range_tree_destroy(msp->ms_defertree[t]); |
e51be066 | 1067 | } |
428870ff | 1068 | |
c99c9001 | 1069 | ASSERT0(msp->ms_deferspace); |
428870ff | 1070 | |
34dc7c2f | 1071 | mutex_exit(&msp->ms_lock); |
93cf2076 | 1072 | cv_destroy(&msp->ms_load_cv); |
34dc7c2f BB |
1073 | mutex_destroy(&msp->ms_lock); |
1074 | ||
1075 | kmem_free(msp, sizeof (metaslab_t)); | |
1076 | } | |
1077 | ||
93cf2076 GW |
1078 | /* |
1079 | * Apply a weighting factor based on the histogram information for this | |
1080 | * metaslab. The current weighting factor is somewhat arbitrary and requires | |
1081 | * additional investigation. The implementation provides a measure of | |
1082 | * "weighted" free space and gives a higher weighting for larger contiguous | |
1083 | * regions. The weighting factor is determined by counting the number of | |
1084 | * sm_shift sectors that exist in each region represented by the histogram. | |
1085 | * That value is then multiplied by the power of 2 exponent and the sm_shift | |
1086 | * value. | |
1087 | * | |
1088 | * For example, assume the 2^21 histogram bucket has 4 2MB regions and the | |
1089 | * metaslab has an sm_shift value of 9 (512B): | |
1090 | * | |
1091 | * 1) calculate the number of sm_shift sectors in the region: | |
1092 | * 2^21 / 2^9 = 2^12 = 4096 * 4 (number of regions) = 16384 | |
1093 | * 2) multiply by the power of 2 exponent and the sm_shift value: | |
1094 | * 16384 * 21 * 9 = 3096576 | |
1095 | * This value will be added to the weighting of the metaslab. | |
1096 | */ | |
1097 | static uint64_t | |
1098 | metaslab_weight_factor(metaslab_t *msp) | |
1099 | { | |
1100 | uint64_t factor = 0; | |
1101 | uint64_t sectors; | |
1102 | int i; | |
1103 | ||
1104 | /* | |
1105 | * A null space map means that the entire metaslab is free, | |
1106 | * calculate a weight factor that spans the entire size of the | |
1107 | * metaslab. | |
1108 | */ | |
1109 | if (msp->ms_sm == NULL) { | |
1110 | vdev_t *vd = msp->ms_group->mg_vd; | |
1111 | ||
9bd274dd | 1112 | i = highbit64(msp->ms_size) - 1; |
93cf2076 GW |
1113 | sectors = msp->ms_size >> vd->vdev_ashift; |
1114 | return (sectors * i * vd->vdev_ashift); | |
1115 | } | |
1116 | ||
1117 | if (msp->ms_sm->sm_dbuf->db_size != sizeof (space_map_phys_t)) | |
1118 | return (0); | |
1119 | ||
1120 | for (i = 0; i < SPACE_MAP_HISTOGRAM_SIZE(msp->ms_sm); i++) { | |
1121 | if (msp->ms_sm->sm_phys->smp_histogram[i] == 0) | |
1122 | continue; | |
1123 | ||
1124 | /* | |
1125 | * Determine the number of sm_shift sectors in the region | |
1126 | * indicated by the histogram. For example, given an | |
1127 | * sm_shift value of 9 (512 bytes) and i = 4 then we know | |
1128 | * that we're looking at an 8K region in the histogram | |
1129 | * (i.e. 9 + 4 = 13, 2^13 = 8192). To figure out the | |
1130 | * number of sm_shift sectors (512 bytes in this example), | |
1131 | * we would take 8192 / 512 = 16. Since the histogram | |
1132 | * is offset by sm_shift we can simply use the value of | |
1133 | * of i to calculate this (i.e. 2^i = 16 where i = 4). | |
1134 | */ | |
1135 | sectors = msp->ms_sm->sm_phys->smp_histogram[i] << i; | |
1136 | factor += (i + msp->ms_sm->sm_shift) * sectors; | |
1137 | } | |
1138 | return (factor * msp->ms_sm->sm_shift); | |
1139 | } | |
34dc7c2f BB |
1140 | |
1141 | static uint64_t | |
1142 | metaslab_weight(metaslab_t *msp) | |
1143 | { | |
1144 | metaslab_group_t *mg = msp->ms_group; | |
34dc7c2f BB |
1145 | vdev_t *vd = mg->mg_vd; |
1146 | uint64_t weight, space; | |
1147 | ||
1148 | ASSERT(MUTEX_HELD(&msp->ms_lock)); | |
1149 | ||
c2e42f9d GW |
1150 | /* |
1151 | * This vdev is in the process of being removed so there is nothing | |
1152 | * for us to do here. | |
1153 | */ | |
1154 | if (vd->vdev_removing) { | |
93cf2076 | 1155 | ASSERT0(space_map_allocated(msp->ms_sm)); |
c2e42f9d GW |
1156 | ASSERT0(vd->vdev_ms_shift); |
1157 | return (0); | |
1158 | } | |
1159 | ||
34dc7c2f BB |
1160 | /* |
1161 | * The baseline weight is the metaslab's free space. | |
1162 | */ | |
93cf2076 | 1163 | space = msp->ms_size - space_map_allocated(msp->ms_sm); |
34dc7c2f BB |
1164 | weight = space; |
1165 | ||
1166 | /* | |
1167 | * Modern disks have uniform bit density and constant angular velocity. | |
1168 | * Therefore, the outer recording zones are faster (higher bandwidth) | |
1169 | * than the inner zones by the ratio of outer to inner track diameter, | |
1170 | * which is typically around 2:1. We account for this by assigning | |
1171 | * higher weight to lower metaslabs (multiplier ranging from 2x to 1x). | |
1172 | * In effect, this means that we'll select the metaslab with the most | |
1173 | * free bandwidth rather than simply the one with the most free space. | |
1174 | */ | |
93cf2076 | 1175 | weight = 2 * weight - (msp->ms_id * weight) / vd->vdev_ms_count; |
34dc7c2f BB |
1176 | ASSERT(weight >= space && weight <= 2 * space); |
1177 | ||
93cf2076 GW |
1178 | msp->ms_factor = metaslab_weight_factor(msp); |
1179 | if (metaslab_weight_factor_enable) | |
1180 | weight += msp->ms_factor; | |
428870ff | 1181 | |
93cf2076 | 1182 | if (msp->ms_loaded && !msp->ms_ops->msop_fragmented(msp)) { |
428870ff BB |
1183 | /* |
1184 | * If this metaslab is one we're actively using, adjust its | |
1185 | * weight to make it preferable to any inactive metaslab so | |
1186 | * we'll polish it off. | |
1187 | */ | |
1188 | weight |= (msp->ms_weight & METASLAB_ACTIVE_MASK); | |
1189 | } | |
34dc7c2f | 1190 | |
93cf2076 | 1191 | return (weight); |
34dc7c2f BB |
1192 | } |
1193 | ||
1194 | static int | |
6d974228 | 1195 | metaslab_activate(metaslab_t *msp, uint64_t activation_weight) |
34dc7c2f | 1196 | { |
34dc7c2f BB |
1197 | ASSERT(MUTEX_HELD(&msp->ms_lock)); |
1198 | ||
1199 | if ((msp->ms_weight & METASLAB_ACTIVE_MASK) == 0) { | |
93cf2076 GW |
1200 | metaslab_load_wait(msp); |
1201 | if (!msp->ms_loaded) { | |
1202 | int error = metaslab_load(msp); | |
1203 | if (error) { | |
428870ff BB |
1204 | metaslab_group_sort(msp->ms_group, msp, 0); |
1205 | return (error); | |
1206 | } | |
34dc7c2f | 1207 | } |
9babb374 | 1208 | |
34dc7c2f BB |
1209 | metaslab_group_sort(msp->ms_group, msp, |
1210 | msp->ms_weight | activation_weight); | |
1211 | } | |
93cf2076 | 1212 | ASSERT(msp->ms_loaded); |
34dc7c2f BB |
1213 | ASSERT(msp->ms_weight & METASLAB_ACTIVE_MASK); |
1214 | ||
1215 | return (0); | |
1216 | } | |
1217 | ||
1218 | static void | |
1219 | metaslab_passivate(metaslab_t *msp, uint64_t size) | |
1220 | { | |
1221 | /* | |
1222 | * If size < SPA_MINBLOCKSIZE, then we will not allocate from | |
1223 | * this metaslab again. In that case, it had better be empty, | |
1224 | * or we would be leaving space on the table. | |
1225 | */ | |
93cf2076 | 1226 | ASSERT(size >= SPA_MINBLOCKSIZE || range_tree_space(msp->ms_tree) == 0); |
34dc7c2f BB |
1227 | metaslab_group_sort(msp->ms_group, msp, MIN(msp->ms_weight, size)); |
1228 | ASSERT((msp->ms_weight & METASLAB_ACTIVE_MASK) == 0); | |
1229 | } | |
1230 | ||
93cf2076 GW |
1231 | static void |
1232 | metaslab_preload(void *arg) | |
1233 | { | |
1234 | metaslab_t *msp = arg; | |
1235 | spa_t *spa = msp->ms_group->mg_vd->vdev_spa; | |
1236 | ||
080b3100 GW |
1237 | ASSERT(!MUTEX_HELD(&msp->ms_group->mg_lock)); |
1238 | ||
93cf2076 GW |
1239 | mutex_enter(&msp->ms_lock); |
1240 | metaslab_load_wait(msp); | |
1241 | if (!msp->ms_loaded) | |
1242 | (void) metaslab_load(msp); | |
1243 | ||
1244 | /* | |
1245 | * Set the ms_access_txg value so that we don't unload it right away. | |
1246 | */ | |
1247 | msp->ms_access_txg = spa_syncing_txg(spa) + metaslab_unload_delay + 1; | |
1248 | mutex_exit(&msp->ms_lock); | |
1249 | } | |
1250 | ||
1251 | static void | |
1252 | metaslab_group_preload(metaslab_group_t *mg) | |
1253 | { | |
1254 | spa_t *spa = mg->mg_vd->vdev_spa; | |
1255 | metaslab_t *msp; | |
1256 | avl_tree_t *t = &mg->mg_metaslab_tree; | |
1257 | int m = 0; | |
1258 | ||
1259 | if (spa_shutting_down(spa) || !metaslab_preload_enabled) { | |
1260 | taskq_wait(mg->mg_taskq); | |
1261 | return; | |
1262 | } | |
93cf2076 | 1263 | |
080b3100 | 1264 | mutex_enter(&mg->mg_lock); |
93cf2076 | 1265 | /* |
080b3100 | 1266 | * Load the next potential metaslabs |
93cf2076 | 1267 | */ |
080b3100 GW |
1268 | msp = avl_first(t); |
1269 | while (msp != NULL) { | |
1270 | metaslab_t *msp_next = AVL_NEXT(t, msp); | |
93cf2076 GW |
1271 | |
1272 | /* If we have reached our preload limit then we're done */ | |
1273 | if (++m > metaslab_preload_limit) | |
1274 | break; | |
1275 | ||
080b3100 GW |
1276 | /* |
1277 | * We must drop the metaslab group lock here to preserve | |
1278 | * lock ordering with the ms_lock (when grabbing both | |
1279 | * the mg_lock and the ms_lock, the ms_lock must be taken | |
1280 | * first). As a result, it is possible that the ordering | |
1281 | * of the metaslabs within the avl tree may change before | |
1282 | * we reacquire the lock. The metaslab cannot be removed from | |
1283 | * the tree while we're in syncing context so it is safe to | |
1284 | * drop the mg_lock here. If the metaslabs are reordered | |
1285 | * nothing will break -- we just may end up loading a | |
1286 | * less than optimal one. | |
1287 | */ | |
1288 | mutex_exit(&mg->mg_lock); | |
93cf2076 GW |
1289 | VERIFY(taskq_dispatch(mg->mg_taskq, metaslab_preload, |
1290 | msp, TQ_PUSHPAGE) != 0); | |
080b3100 GW |
1291 | mutex_enter(&mg->mg_lock); |
1292 | msp = msp_next; | |
93cf2076 GW |
1293 | } |
1294 | mutex_exit(&mg->mg_lock); | |
1295 | } | |
1296 | ||
e51be066 | 1297 | /* |
93cf2076 GW |
1298 | * Determine if the space map's on-disk footprint is past our tolerance |
1299 | * for inefficiency. We would like to use the following criteria to make | |
1300 | * our decision: | |
e51be066 GW |
1301 | * |
1302 | * 1. The size of the space map object should not dramatically increase as a | |
93cf2076 | 1303 | * result of writing out the free space range tree. |
e51be066 GW |
1304 | * |
1305 | * 2. The minimal on-disk space map representation is zfs_condense_pct/100 | |
93cf2076 GW |
1306 | * times the size than the free space range tree representation |
1307 | * (i.e. zfs_condense_pct = 110 and in-core = 1MB, minimal = 1.1.MB). | |
e51be066 GW |
1308 | * |
1309 | * Checking the first condition is tricky since we don't want to walk | |
1310 | * the entire AVL tree calculating the estimated on-disk size. Instead we | |
93cf2076 GW |
1311 | * use the size-ordered range tree in the metaslab and calculate the |
1312 | * size required to write out the largest segment in our free tree. If the | |
e51be066 GW |
1313 | * size required to represent that segment on disk is larger than the space |
1314 | * map object then we avoid condensing this map. | |
1315 | * | |
1316 | * To determine the second criterion we use a best-case estimate and assume | |
1317 | * each segment can be represented on-disk as a single 64-bit entry. We refer | |
1318 | * to this best-case estimate as the space map's minimal form. | |
1319 | */ | |
1320 | static boolean_t | |
1321 | metaslab_should_condense(metaslab_t *msp) | |
1322 | { | |
93cf2076 GW |
1323 | space_map_t *sm = msp->ms_sm; |
1324 | range_seg_t *rs; | |
e51be066 GW |
1325 | uint64_t size, entries, segsz; |
1326 | ||
1327 | ASSERT(MUTEX_HELD(&msp->ms_lock)); | |
93cf2076 | 1328 | ASSERT(msp->ms_loaded); |
e51be066 GW |
1329 | |
1330 | /* | |
93cf2076 GW |
1331 | * Use the ms_size_tree range tree, which is ordered by size, to |
1332 | * obtain the largest segment in the free tree. If the tree is empty | |
1333 | * then we should condense the map. | |
e51be066 | 1334 | */ |
93cf2076 GW |
1335 | rs = avl_last(&msp->ms_size_tree); |
1336 | if (rs == NULL) | |
e51be066 GW |
1337 | return (B_TRUE); |
1338 | ||
1339 | /* | |
1340 | * Calculate the number of 64-bit entries this segment would | |
1341 | * require when written to disk. If this single segment would be | |
1342 | * larger on-disk than the entire current on-disk structure, then | |
1343 | * clearly condensing will increase the on-disk structure size. | |
1344 | */ | |
93cf2076 | 1345 | size = (rs->rs_end - rs->rs_start) >> sm->sm_shift; |
e51be066 GW |
1346 | entries = size / (MIN(size, SM_RUN_MAX)); |
1347 | segsz = entries * sizeof (uint64_t); | |
1348 | ||
93cf2076 GW |
1349 | return (segsz <= space_map_length(msp->ms_sm) && |
1350 | space_map_length(msp->ms_sm) >= (zfs_condense_pct * | |
1351 | sizeof (uint64_t) * avl_numnodes(&msp->ms_tree->rt_root)) / 100); | |
e51be066 GW |
1352 | } |
1353 | ||
1354 | /* | |
1355 | * Condense the on-disk space map representation to its minimized form. | |
1356 | * The minimized form consists of a small number of allocations followed by | |
93cf2076 | 1357 | * the entries of the free range tree. |
e51be066 GW |
1358 | */ |
1359 | static void | |
1360 | metaslab_condense(metaslab_t *msp, uint64_t txg, dmu_tx_t *tx) | |
1361 | { | |
1362 | spa_t *spa = msp->ms_group->mg_vd->vdev_spa; | |
93cf2076 GW |
1363 | range_tree_t *freetree = msp->ms_freetree[txg & TXG_MASK]; |
1364 | range_tree_t *condense_tree; | |
1365 | space_map_t *sm = msp->ms_sm; | |
e51be066 GW |
1366 | int t; |
1367 | ||
1368 | ASSERT(MUTEX_HELD(&msp->ms_lock)); | |
1369 | ASSERT3U(spa_sync_pass(spa), ==, 1); | |
93cf2076 | 1370 | ASSERT(msp->ms_loaded); |
e51be066 GW |
1371 | |
1372 | spa_dbgmsg(spa, "condensing: txg %llu, msp[%llu] %p, " | |
93cf2076 GW |
1373 | "smp size %llu, segments %lu", txg, msp->ms_id, msp, |
1374 | space_map_length(msp->ms_sm), avl_numnodes(&msp->ms_tree->rt_root)); | |
e51be066 GW |
1375 | |
1376 | /* | |
93cf2076 | 1377 | * Create an range tree that is 100% allocated. We remove segments |
e51be066 GW |
1378 | * that have been freed in this txg, any deferred frees that exist, |
1379 | * and any allocation in the future. Removing segments should be | |
93cf2076 GW |
1380 | * a relatively inexpensive operation since we expect these trees to |
1381 | * have a small number of nodes. | |
e51be066 | 1382 | */ |
93cf2076 GW |
1383 | condense_tree = range_tree_create(NULL, NULL, &msp->ms_lock); |
1384 | range_tree_add(condense_tree, msp->ms_start, msp->ms_size); | |
e51be066 GW |
1385 | |
1386 | /* | |
93cf2076 | 1387 | * Remove what's been freed in this txg from the condense_tree. |
e51be066 | 1388 | * Since we're in sync_pass 1, we know that all the frees from |
93cf2076 | 1389 | * this txg are in the freetree. |
e51be066 | 1390 | */ |
93cf2076 | 1391 | range_tree_walk(freetree, range_tree_remove, condense_tree); |
e51be066 | 1392 | |
93cf2076 GW |
1393 | for (t = 0; t < TXG_DEFER_SIZE; t++) { |
1394 | range_tree_walk(msp->ms_defertree[t], | |
1395 | range_tree_remove, condense_tree); | |
1396 | } | |
e51be066 | 1397 | |
93cf2076 GW |
1398 | for (t = 1; t < TXG_CONCURRENT_STATES; t++) { |
1399 | range_tree_walk(msp->ms_alloctree[(txg + t) & TXG_MASK], | |
1400 | range_tree_remove, condense_tree); | |
1401 | } | |
e51be066 GW |
1402 | |
1403 | /* | |
1404 | * We're about to drop the metaslab's lock thus allowing | |
1405 | * other consumers to change it's content. Set the | |
93cf2076 | 1406 | * metaslab's ms_condensing flag to ensure that |
e51be066 GW |
1407 | * allocations on this metaslab do not occur while we're |
1408 | * in the middle of committing it to disk. This is only critical | |
93cf2076 | 1409 | * for the ms_tree as all other range trees use per txg |
e51be066 GW |
1410 | * views of their content. |
1411 | */ | |
93cf2076 | 1412 | msp->ms_condensing = B_TRUE; |
e51be066 GW |
1413 | |
1414 | mutex_exit(&msp->ms_lock); | |
93cf2076 | 1415 | space_map_truncate(sm, tx); |
e51be066 GW |
1416 | mutex_enter(&msp->ms_lock); |
1417 | ||
1418 | /* | |
1419 | * While we would ideally like to create a space_map representation | |
1420 | * that consists only of allocation records, doing so can be | |
93cf2076 | 1421 | * prohibitively expensive because the in-core free tree can be |
e51be066 | 1422 | * large, and therefore computationally expensive to subtract |
93cf2076 GW |
1423 | * from the condense_tree. Instead we sync out two trees, a cheap |
1424 | * allocation only tree followed by the in-core free tree. While not | |
e51be066 GW |
1425 | * optimal, this is typically close to optimal, and much cheaper to |
1426 | * compute. | |
1427 | */ | |
93cf2076 GW |
1428 | space_map_write(sm, condense_tree, SM_ALLOC, tx); |
1429 | range_tree_vacate(condense_tree, NULL, NULL); | |
1430 | range_tree_destroy(condense_tree); | |
e51be066 | 1431 | |
93cf2076 GW |
1432 | space_map_write(sm, msp->ms_tree, SM_FREE, tx); |
1433 | msp->ms_condensing = B_FALSE; | |
e51be066 GW |
1434 | } |
1435 | ||
34dc7c2f BB |
1436 | /* |
1437 | * Write a metaslab to disk in the context of the specified transaction group. | |
1438 | */ | |
1439 | void | |
1440 | metaslab_sync(metaslab_t *msp, uint64_t txg) | |
1441 | { | |
93cf2076 GW |
1442 | metaslab_group_t *mg = msp->ms_group; |
1443 | vdev_t *vd = mg->mg_vd; | |
34dc7c2f | 1444 | spa_t *spa = vd->vdev_spa; |
428870ff | 1445 | objset_t *mos = spa_meta_objset(spa); |
93cf2076 GW |
1446 | range_tree_t *alloctree = msp->ms_alloctree[txg & TXG_MASK]; |
1447 | range_tree_t **freetree = &msp->ms_freetree[txg & TXG_MASK]; | |
1448 | range_tree_t **freed_tree = | |
1449 | &msp->ms_freetree[TXG_CLEAN(txg) & TXG_MASK]; | |
34dc7c2f | 1450 | dmu_tx_t *tx; |
93cf2076 | 1451 | uint64_t object = space_map_object(msp->ms_sm); |
34dc7c2f | 1452 | |
428870ff BB |
1453 | ASSERT(!vd->vdev_ishole); |
1454 | ||
e51be066 GW |
1455 | /* |
1456 | * This metaslab has just been added so there's no work to do now. | |
1457 | */ | |
93cf2076 GW |
1458 | if (*freetree == NULL) { |
1459 | ASSERT3P(alloctree, ==, NULL); | |
e51be066 GW |
1460 | return; |
1461 | } | |
1462 | ||
93cf2076 GW |
1463 | ASSERT3P(alloctree, !=, NULL); |
1464 | ASSERT3P(*freetree, !=, NULL); | |
1465 | ASSERT3P(*freed_tree, !=, NULL); | |
e51be066 | 1466 | |
93cf2076 GW |
1467 | if (range_tree_space(alloctree) == 0 && |
1468 | range_tree_space(*freetree) == 0) | |
428870ff | 1469 | return; |
34dc7c2f BB |
1470 | |
1471 | /* | |
1472 | * The only state that can actually be changing concurrently with | |
93cf2076 GW |
1473 | * metaslab_sync() is the metaslab's ms_tree. No other thread can |
1474 | * be modifying this txg's alloctree, freetree, freed_tree, or | |
1475 | * space_map_phys_t. Therefore, we only hold ms_lock to satify | |
1476 | * space_map ASSERTs. We drop it whenever we call into the DMU, | |
1477 | * because the DMU can call down to us (e.g. via zio_free()) at | |
1478 | * any time. | |
34dc7c2f | 1479 | */ |
428870ff BB |
1480 | |
1481 | tx = dmu_tx_create_assigned(spa_get_dsl(spa), txg); | |
34dc7c2f | 1482 | |
93cf2076 GW |
1483 | if (msp->ms_sm == NULL) { |
1484 | uint64_t new_object; | |
1485 | ||
1486 | new_object = space_map_alloc(mos, tx); | |
1487 | VERIFY3U(new_object, !=, 0); | |
1488 | ||
1489 | VERIFY0(space_map_open(&msp->ms_sm, mos, new_object, | |
1490 | msp->ms_start, msp->ms_size, vd->vdev_ashift, | |
1491 | &msp->ms_lock)); | |
1492 | ASSERT(msp->ms_sm != NULL); | |
34dc7c2f BB |
1493 | } |
1494 | ||
428870ff BB |
1495 | mutex_enter(&msp->ms_lock); |
1496 | ||
93cf2076 | 1497 | if (msp->ms_loaded && spa_sync_pass(spa) == 1 && |
e51be066 GW |
1498 | metaslab_should_condense(msp)) { |
1499 | metaslab_condense(msp, txg, tx); | |
1500 | } else { | |
93cf2076 GW |
1501 | space_map_write(msp->ms_sm, alloctree, SM_ALLOC, tx); |
1502 | space_map_write(msp->ms_sm, *freetree, SM_FREE, tx); | |
e51be066 | 1503 | } |
428870ff | 1504 | |
93cf2076 GW |
1505 | range_tree_vacate(alloctree, NULL, NULL); |
1506 | ||
1507 | if (msp->ms_loaded) { | |
1508 | /* | |
1509 | * When the space map is loaded, we have an accruate | |
1510 | * histogram in the range tree. This gives us an opportunity | |
1511 | * to bring the space map's histogram up-to-date so we clear | |
1512 | * it first before updating it. | |
1513 | */ | |
1514 | space_map_histogram_clear(msp->ms_sm); | |
1515 | space_map_histogram_add(msp->ms_sm, msp->ms_tree, tx); | |
1516 | } else { | |
1517 | /* | |
1518 | * Since the space map is not loaded we simply update the | |
1519 | * exisiting histogram with what was freed in this txg. This | |
1520 | * means that the on-disk histogram may not have an accurate | |
1521 | * view of the free space but it's close enough to allow | |
1522 | * us to make allocation decisions. | |
1523 | */ | |
1524 | space_map_histogram_add(msp->ms_sm, *freetree, tx); | |
1525 | } | |
34dc7c2f | 1526 | |
e51be066 | 1527 | /* |
93cf2076 GW |
1528 | * For sync pass 1, we avoid traversing this txg's free range tree |
1529 | * and instead will just swap the pointers for freetree and | |
1530 | * freed_tree. We can safely do this since the freed_tree is | |
e51be066 GW |
1531 | * guaranteed to be empty on the initial pass. |
1532 | */ | |
1533 | if (spa_sync_pass(spa) == 1) { | |
93cf2076 | 1534 | range_tree_swap(freetree, freed_tree); |
e51be066 | 1535 | } else { |
93cf2076 | 1536 | range_tree_vacate(*freetree, range_tree_add, *freed_tree); |
34dc7c2f BB |
1537 | } |
1538 | ||
93cf2076 GW |
1539 | ASSERT0(range_tree_space(msp->ms_alloctree[txg & TXG_MASK])); |
1540 | ASSERT0(range_tree_space(msp->ms_freetree[txg & TXG_MASK])); | |
34dc7c2f BB |
1541 | |
1542 | mutex_exit(&msp->ms_lock); | |
1543 | ||
93cf2076 GW |
1544 | if (object != space_map_object(msp->ms_sm)) { |
1545 | object = space_map_object(msp->ms_sm); | |
1546 | dmu_write(mos, vd->vdev_ms_array, sizeof (uint64_t) * | |
1547 | msp->ms_id, sizeof (uint64_t), &object, tx); | |
1548 | } | |
34dc7c2f BB |
1549 | dmu_tx_commit(tx); |
1550 | } | |
1551 | ||
1552 | /* | |
1553 | * Called after a transaction group has completely synced to mark | |
1554 | * all of the metaslab's free space as usable. | |
1555 | */ | |
1556 | void | |
1557 | metaslab_sync_done(metaslab_t *msp, uint64_t txg) | |
1558 | { | |
34dc7c2f BB |
1559 | metaslab_group_t *mg = msp->ms_group; |
1560 | vdev_t *vd = mg->mg_vd; | |
93cf2076 GW |
1561 | range_tree_t **freed_tree; |
1562 | range_tree_t **defer_tree; | |
428870ff | 1563 | int64_t alloc_delta, defer_delta; |
d6320ddb | 1564 | int t; |
428870ff BB |
1565 | |
1566 | ASSERT(!vd->vdev_ishole); | |
34dc7c2f BB |
1567 | |
1568 | mutex_enter(&msp->ms_lock); | |
1569 | ||
1570 | /* | |
1571 | * If this metaslab is just becoming available, initialize its | |
93cf2076 GW |
1572 | * alloctrees, freetrees, and defertree and add its capacity to |
1573 | * the vdev. | |
34dc7c2f | 1574 | */ |
93cf2076 | 1575 | if (msp->ms_freetree[TXG_CLEAN(txg) & TXG_MASK] == NULL) { |
d6320ddb | 1576 | for (t = 0; t < TXG_SIZE; t++) { |
93cf2076 GW |
1577 | ASSERT(msp->ms_alloctree[t] == NULL); |
1578 | ASSERT(msp->ms_freetree[t] == NULL); | |
1579 | ||
1580 | msp->ms_alloctree[t] = range_tree_create(NULL, msp, | |
1581 | &msp->ms_lock); | |
1582 | msp->ms_freetree[t] = range_tree_create(NULL, msp, | |
1583 | &msp->ms_lock); | |
34dc7c2f | 1584 | } |
428870ff | 1585 | |
e51be066 | 1586 | for (t = 0; t < TXG_DEFER_SIZE; t++) { |
93cf2076 | 1587 | ASSERT(msp->ms_defertree[t] == NULL); |
e51be066 | 1588 | |
93cf2076 GW |
1589 | msp->ms_defertree[t] = range_tree_create(NULL, msp, |
1590 | &msp->ms_lock); | |
1591 | } | |
428870ff | 1592 | |
93cf2076 | 1593 | vdev_space_update(vd, 0, 0, msp->ms_size); |
34dc7c2f BB |
1594 | } |
1595 | ||
93cf2076 GW |
1596 | freed_tree = &msp->ms_freetree[TXG_CLEAN(txg) & TXG_MASK]; |
1597 | defer_tree = &msp->ms_defertree[txg % TXG_DEFER_SIZE]; | |
1598 | ||
1599 | alloc_delta = space_map_alloc_delta(msp->ms_sm); | |
1600 | defer_delta = range_tree_space(*freed_tree) - | |
1601 | range_tree_space(*defer_tree); | |
428870ff BB |
1602 | |
1603 | vdev_space_update(vd, alloc_delta + defer_delta, defer_delta, 0); | |
34dc7c2f | 1604 | |
93cf2076 GW |
1605 | ASSERT0(range_tree_space(msp->ms_alloctree[txg & TXG_MASK])); |
1606 | ASSERT0(range_tree_space(msp->ms_freetree[txg & TXG_MASK])); | |
34dc7c2f BB |
1607 | |
1608 | /* | |
93cf2076 | 1609 | * If there's a metaslab_load() in progress, wait for it to complete |
34dc7c2f | 1610 | * so that we have a consistent view of the in-core space map. |
34dc7c2f | 1611 | */ |
93cf2076 | 1612 | metaslab_load_wait(msp); |
c2e42f9d GW |
1613 | |
1614 | /* | |
93cf2076 GW |
1615 | * Move the frees from the defer_tree back to the free |
1616 | * range tree (if it's loaded). Swap the freed_tree and the | |
1617 | * defer_tree -- this is safe to do because we've just emptied out | |
1618 | * the defer_tree. | |
c2e42f9d | 1619 | */ |
93cf2076 GW |
1620 | range_tree_vacate(*defer_tree, |
1621 | msp->ms_loaded ? range_tree_add : NULL, msp->ms_tree); | |
1622 | range_tree_swap(freed_tree, defer_tree); | |
34dc7c2f | 1623 | |
93cf2076 | 1624 | space_map_update(msp->ms_sm); |
34dc7c2f | 1625 | |
428870ff BB |
1626 | msp->ms_deferspace += defer_delta; |
1627 | ASSERT3S(msp->ms_deferspace, >=, 0); | |
93cf2076 | 1628 | ASSERT3S(msp->ms_deferspace, <=, msp->ms_size); |
428870ff BB |
1629 | if (msp->ms_deferspace != 0) { |
1630 | /* | |
1631 | * Keep syncing this metaslab until all deferred frees | |
1632 | * are back in circulation. | |
1633 | */ | |
1634 | vdev_dirty(vd, VDD_METASLAB, msp, txg + 1); | |
1635 | } | |
1636 | ||
93cf2076 GW |
1637 | if (msp->ms_loaded && msp->ms_access_txg < txg) { |
1638 | for (t = 1; t < TXG_CONCURRENT_STATES; t++) { | |
1639 | VERIFY0(range_tree_space( | |
1640 | msp->ms_alloctree[(txg + t) & TXG_MASK])); | |
1641 | } | |
34dc7c2f | 1642 | |
93cf2076 GW |
1643 | if (!metaslab_debug_unload) |
1644 | metaslab_unload(msp); | |
34dc7c2f BB |
1645 | } |
1646 | ||
1647 | metaslab_group_sort(mg, msp, metaslab_weight(msp)); | |
34dc7c2f | 1648 | mutex_exit(&msp->ms_lock); |
93cf2076 | 1649 | |
34dc7c2f BB |
1650 | } |
1651 | ||
428870ff BB |
1652 | void |
1653 | metaslab_sync_reassess(metaslab_group_t *mg) | |
1654 | { | |
1be627f5 | 1655 | metaslab_group_alloc_update(mg); |
6d974228 | 1656 | |
428870ff | 1657 | /* |
93cf2076 | 1658 | * Preload the next potential metaslabs |
428870ff | 1659 | */ |
93cf2076 | 1660 | metaslab_group_preload(mg); |
428870ff BB |
1661 | } |
1662 | ||
34dc7c2f BB |
1663 | static uint64_t |
1664 | metaslab_distance(metaslab_t *msp, dva_t *dva) | |
1665 | { | |
1666 | uint64_t ms_shift = msp->ms_group->mg_vd->vdev_ms_shift; | |
1667 | uint64_t offset = DVA_GET_OFFSET(dva) >> ms_shift; | |
93cf2076 | 1668 | uint64_t start = msp->ms_id; |
34dc7c2f BB |
1669 | |
1670 | if (msp->ms_group->mg_vd->vdev_id != DVA_GET_VDEV(dva)) | |
1671 | return (1ULL << 63); | |
1672 | ||
1673 | if (offset < start) | |
1674 | return ((start - offset) << ms_shift); | |
1675 | if (offset > start) | |
1676 | return ((offset - start) << ms_shift); | |
1677 | return (0); | |
1678 | } | |
1679 | ||
1680 | static uint64_t | |
6d974228 | 1681 | metaslab_group_alloc(metaslab_group_t *mg, uint64_t psize, uint64_t asize, |
672692c7 | 1682 | uint64_t txg, uint64_t min_distance, dva_t *dva, int d) |
34dc7c2f | 1683 | { |
6d974228 | 1684 | spa_t *spa = mg->mg_vd->vdev_spa; |
34dc7c2f BB |
1685 | metaslab_t *msp = NULL; |
1686 | uint64_t offset = -1ULL; | |
1687 | avl_tree_t *t = &mg->mg_metaslab_tree; | |
1688 | uint64_t activation_weight; | |
1689 | uint64_t target_distance; | |
1690 | int i; | |
1691 | ||
1692 | activation_weight = METASLAB_WEIGHT_PRIMARY; | |
9babb374 BB |
1693 | for (i = 0; i < d; i++) { |
1694 | if (DVA_GET_VDEV(&dva[i]) == mg->mg_vd->vdev_id) { | |
34dc7c2f | 1695 | activation_weight = METASLAB_WEIGHT_SECONDARY; |
9babb374 BB |
1696 | break; |
1697 | } | |
1698 | } | |
34dc7c2f BB |
1699 | |
1700 | for (;;) { | |
9babb374 BB |
1701 | boolean_t was_active; |
1702 | ||
34dc7c2f BB |
1703 | mutex_enter(&mg->mg_lock); |
1704 | for (msp = avl_first(t); msp; msp = AVL_NEXT(t, msp)) { | |
6d974228 GW |
1705 | if (msp->ms_weight < asize) { |
1706 | spa_dbgmsg(spa, "%s: failed to meet weight " | |
1707 | "requirement: vdev %llu, txg %llu, mg %p, " | |
1708 | "msp %p, psize %llu, asize %llu, " | |
672692c7 GW |
1709 | "weight %llu", spa_name(spa), |
1710 | mg->mg_vd->vdev_id, txg, | |
1711 | mg, msp, psize, asize, msp->ms_weight); | |
34dc7c2f BB |
1712 | mutex_exit(&mg->mg_lock); |
1713 | return (-1ULL); | |
1714 | } | |
7a614407 GW |
1715 | |
1716 | /* | |
1717 | * If the selected metaslab is condensing, skip it. | |
1718 | */ | |
93cf2076 | 1719 | if (msp->ms_condensing) |
7a614407 GW |
1720 | continue; |
1721 | ||
9babb374 | 1722 | was_active = msp->ms_weight & METASLAB_ACTIVE_MASK; |
34dc7c2f BB |
1723 | if (activation_weight == METASLAB_WEIGHT_PRIMARY) |
1724 | break; | |
1725 | ||
1726 | target_distance = min_distance + | |
93cf2076 GW |
1727 | (space_map_allocated(msp->ms_sm) != 0 ? 0 : |
1728 | min_distance >> 1); | |
34dc7c2f BB |
1729 | |
1730 | for (i = 0; i < d; i++) | |
1731 | if (metaslab_distance(msp, &dva[i]) < | |
1732 | target_distance) | |
1733 | break; | |
1734 | if (i == d) | |
1735 | break; | |
1736 | } | |
1737 | mutex_exit(&mg->mg_lock); | |
1738 | if (msp == NULL) | |
1739 | return (-1ULL); | |
1740 | ||
ac72fac3 GW |
1741 | mutex_enter(&msp->ms_lock); |
1742 | ||
34dc7c2f BB |
1743 | /* |
1744 | * Ensure that the metaslab we have selected is still | |
1745 | * capable of handling our request. It's possible that | |
1746 | * another thread may have changed the weight while we | |
1747 | * were blocked on the metaslab lock. | |
1748 | */ | |
6d974228 | 1749 | if (msp->ms_weight < asize || (was_active && |
9babb374 BB |
1750 | !(msp->ms_weight & METASLAB_ACTIVE_MASK) && |
1751 | activation_weight == METASLAB_WEIGHT_PRIMARY)) { | |
34dc7c2f BB |
1752 | mutex_exit(&msp->ms_lock); |
1753 | continue; | |
1754 | } | |
1755 | ||
1756 | if ((msp->ms_weight & METASLAB_WEIGHT_SECONDARY) && | |
1757 | activation_weight == METASLAB_WEIGHT_PRIMARY) { | |
1758 | metaslab_passivate(msp, | |
1759 | msp->ms_weight & ~METASLAB_ACTIVE_MASK); | |
1760 | mutex_exit(&msp->ms_lock); | |
1761 | continue; | |
1762 | } | |
1763 | ||
6d974228 | 1764 | if (metaslab_activate(msp, activation_weight) != 0) { |
34dc7c2f BB |
1765 | mutex_exit(&msp->ms_lock); |
1766 | continue; | |
1767 | } | |
1768 | ||
7a614407 GW |
1769 | /* |
1770 | * If this metaslab is currently condensing then pick again as | |
1771 | * we can't manipulate this metaslab until it's committed | |
1772 | * to disk. | |
1773 | */ | |
93cf2076 | 1774 | if (msp->ms_condensing) { |
7a614407 GW |
1775 | mutex_exit(&msp->ms_lock); |
1776 | continue; | |
1777 | } | |
1778 | ||
93cf2076 | 1779 | if ((offset = metaslab_block_alloc(msp, asize)) != -1ULL) |
34dc7c2f BB |
1780 | break; |
1781 | ||
93cf2076 | 1782 | metaslab_passivate(msp, metaslab_block_maxsize(msp)); |
34dc7c2f BB |
1783 | mutex_exit(&msp->ms_lock); |
1784 | } | |
1785 | ||
93cf2076 | 1786 | if (range_tree_space(msp->ms_alloctree[txg & TXG_MASK]) == 0) |
34dc7c2f BB |
1787 | vdev_dirty(mg->mg_vd, VDD_METASLAB, msp, txg); |
1788 | ||
93cf2076 GW |
1789 | range_tree_add(msp->ms_alloctree[txg & TXG_MASK], offset, asize); |
1790 | msp->ms_access_txg = txg + metaslab_unload_delay; | |
34dc7c2f BB |
1791 | |
1792 | mutex_exit(&msp->ms_lock); | |
1793 | ||
1794 | return (offset); | |
1795 | } | |
1796 | ||
1797 | /* | |
1798 | * Allocate a block for the specified i/o. | |
1799 | */ | |
1800 | static int | |
1801 | metaslab_alloc_dva(spa_t *spa, metaslab_class_t *mc, uint64_t psize, | |
b128c09f | 1802 | dva_t *dva, int d, dva_t *hintdva, uint64_t txg, int flags) |
34dc7c2f | 1803 | { |
920dd524 | 1804 | metaslab_group_t *mg, *fast_mg, *rotor; |
34dc7c2f BB |
1805 | vdev_t *vd; |
1806 | int dshift = 3; | |
1807 | int all_zero; | |
fb5f0bc8 BB |
1808 | int zio_lock = B_FALSE; |
1809 | boolean_t allocatable; | |
34dc7c2f BB |
1810 | uint64_t offset = -1ULL; |
1811 | uint64_t asize; | |
1812 | uint64_t distance; | |
1813 | ||
1814 | ASSERT(!DVA_IS_VALID(&dva[d])); | |
1815 | ||
1816 | /* | |
1817 | * For testing, make some blocks above a certain size be gang blocks. | |
1818 | */ | |
428870ff | 1819 | if (psize >= metaslab_gang_bang && (ddi_get_lbolt() & 3) == 0) |
2e528b49 | 1820 | return (SET_ERROR(ENOSPC)); |
34dc7c2f | 1821 | |
920dd524 ED |
1822 | if (flags & METASLAB_FASTWRITE) |
1823 | mutex_enter(&mc->mc_fastwrite_lock); | |
1824 | ||
34dc7c2f BB |
1825 | /* |
1826 | * Start at the rotor and loop through all mgs until we find something. | |
428870ff | 1827 | * Note that there's no locking on mc_rotor or mc_aliquot because |
34dc7c2f BB |
1828 | * nothing actually breaks if we miss a few updates -- we just won't |
1829 | * allocate quite as evenly. It all balances out over time. | |
1830 | * | |
1831 | * If we are doing ditto or log blocks, try to spread them across | |
1832 | * consecutive vdevs. If we're forced to reuse a vdev before we've | |
1833 | * allocated all of our ditto blocks, then try and spread them out on | |
1834 | * that vdev as much as possible. If it turns out to not be possible, | |
1835 | * gradually lower our standards until anything becomes acceptable. | |
1836 | * Also, allocating on consecutive vdevs (as opposed to random vdevs) | |
1837 | * gives us hope of containing our fault domains to something we're | |
1838 | * able to reason about. Otherwise, any two top-level vdev failures | |
1839 | * will guarantee the loss of data. With consecutive allocation, | |
1840 | * only two adjacent top-level vdev failures will result in data loss. | |
1841 | * | |
1842 | * If we are doing gang blocks (hintdva is non-NULL), try to keep | |
1843 | * ourselves on the same vdev as our gang block header. That | |
1844 | * way, we can hope for locality in vdev_cache, plus it makes our | |
1845 | * fault domains something tractable. | |
1846 | */ | |
1847 | if (hintdva) { | |
1848 | vd = vdev_lookup_top(spa, DVA_GET_VDEV(&hintdva[d])); | |
428870ff BB |
1849 | |
1850 | /* | |
1851 | * It's possible the vdev we're using as the hint no | |
1852 | * longer exists (i.e. removed). Consult the rotor when | |
1853 | * all else fails. | |
1854 | */ | |
1855 | if (vd != NULL) { | |
34dc7c2f | 1856 | mg = vd->vdev_mg; |
428870ff BB |
1857 | |
1858 | if (flags & METASLAB_HINTBP_AVOID && | |
1859 | mg->mg_next != NULL) | |
1860 | mg = mg->mg_next; | |
1861 | } else { | |
1862 | mg = mc->mc_rotor; | |
1863 | } | |
34dc7c2f BB |
1864 | } else if (d != 0) { |
1865 | vd = vdev_lookup_top(spa, DVA_GET_VDEV(&dva[d - 1])); | |
1866 | mg = vd->vdev_mg->mg_next; | |
920dd524 ED |
1867 | } else if (flags & METASLAB_FASTWRITE) { |
1868 | mg = fast_mg = mc->mc_rotor; | |
1869 | ||
1870 | do { | |
1871 | if (fast_mg->mg_vd->vdev_pending_fastwrite < | |
1872 | mg->mg_vd->vdev_pending_fastwrite) | |
1873 | mg = fast_mg; | |
1874 | } while ((fast_mg = fast_mg->mg_next) != mc->mc_rotor); | |
1875 | ||
34dc7c2f BB |
1876 | } else { |
1877 | mg = mc->mc_rotor; | |
1878 | } | |
1879 | ||
1880 | /* | |
428870ff BB |
1881 | * If the hint put us into the wrong metaslab class, or into a |
1882 | * metaslab group that has been passivated, just follow the rotor. | |
34dc7c2f | 1883 | */ |
428870ff | 1884 | if (mg->mg_class != mc || mg->mg_activation_count <= 0) |
34dc7c2f BB |
1885 | mg = mc->mc_rotor; |
1886 | ||
1887 | rotor = mg; | |
1888 | top: | |
1889 | all_zero = B_TRUE; | |
1890 | do { | |
428870ff BB |
1891 | ASSERT(mg->mg_activation_count == 1); |
1892 | ||
34dc7c2f | 1893 | vd = mg->mg_vd; |
fb5f0bc8 | 1894 | |
34dc7c2f | 1895 | /* |
b128c09f | 1896 | * Don't allocate from faulted devices. |
34dc7c2f | 1897 | */ |
fb5f0bc8 BB |
1898 | if (zio_lock) { |
1899 | spa_config_enter(spa, SCL_ZIO, FTAG, RW_READER); | |
1900 | allocatable = vdev_allocatable(vd); | |
1901 | spa_config_exit(spa, SCL_ZIO, FTAG); | |
1902 | } else { | |
1903 | allocatable = vdev_allocatable(vd); | |
1904 | } | |
ac72fac3 GW |
1905 | |
1906 | /* | |
1907 | * Determine if the selected metaslab group is eligible | |
1908 | * for allocations. If we're ganging or have requested | |
1909 | * an allocation for the smallest gang block size | |
1910 | * then we don't want to avoid allocating to the this | |
1911 | * metaslab group. If we're in this condition we should | |
1912 | * try to allocate from any device possible so that we | |
1913 | * don't inadvertently return ENOSPC and suspend the pool | |
1914 | * even though space is still available. | |
1915 | */ | |
1916 | if (allocatable && CAN_FASTGANG(flags) && | |
1917 | psize > SPA_GANGBLOCKSIZE) | |
1918 | allocatable = metaslab_group_allocatable(mg); | |
1919 | ||
fb5f0bc8 | 1920 | if (!allocatable) |
34dc7c2f | 1921 | goto next; |
fb5f0bc8 | 1922 | |
34dc7c2f BB |
1923 | /* |
1924 | * Avoid writing single-copy data to a failing vdev | |
43a696ed | 1925 | * unless the user instructs us that it is okay. |
34dc7c2f BB |
1926 | */ |
1927 | if ((vd->vdev_stat.vs_write_errors > 0 || | |
1928 | vd->vdev_state < VDEV_STATE_HEALTHY) && | |
43a696ed GW |
1929 | d == 0 && dshift == 3 && |
1930 | !(zfs_write_to_degraded && vd->vdev_state == | |
1931 | VDEV_STATE_DEGRADED)) { | |
34dc7c2f BB |
1932 | all_zero = B_FALSE; |
1933 | goto next; | |
1934 | } | |
1935 | ||
1936 | ASSERT(mg->mg_class == mc); | |
1937 | ||
1938 | distance = vd->vdev_asize >> dshift; | |
1939 | if (distance <= (1ULL << vd->vdev_ms_shift)) | |
1940 | distance = 0; | |
1941 | else | |
1942 | all_zero = B_FALSE; | |
1943 | ||
1944 | asize = vdev_psize_to_asize(vd, psize); | |
1945 | ASSERT(P2PHASE(asize, 1ULL << vd->vdev_ashift) == 0); | |
1946 | ||
6d974228 | 1947 | offset = metaslab_group_alloc(mg, psize, asize, txg, distance, |
672692c7 | 1948 | dva, d); |
34dc7c2f BB |
1949 | if (offset != -1ULL) { |
1950 | /* | |
1951 | * If we've just selected this metaslab group, | |
1952 | * figure out whether the corresponding vdev is | |
1953 | * over- or under-used relative to the pool, | |
1954 | * and set an allocation bias to even it out. | |
1955 | */ | |
428870ff | 1956 | if (mc->mc_aliquot == 0) { |
34dc7c2f | 1957 | vdev_stat_t *vs = &vd->vdev_stat; |
428870ff | 1958 | int64_t vu, cu; |
34dc7c2f | 1959 | |
6d974228 GW |
1960 | vu = (vs->vs_alloc * 100) / (vs->vs_space + 1); |
1961 | cu = (mc->mc_alloc * 100) / (mc->mc_space + 1); | |
34dc7c2f BB |
1962 | |
1963 | /* | |
6d974228 GW |
1964 | * Calculate how much more or less we should |
1965 | * try to allocate from this device during | |
1966 | * this iteration around the rotor. | |
1967 | * For example, if a device is 80% full | |
1968 | * and the pool is 20% full then we should | |
1969 | * reduce allocations by 60% on this device. | |
1970 | * | |
1971 | * mg_bias = (20 - 80) * 512K / 100 = -307K | |
1972 | * | |
1973 | * This reduces allocations by 307K for this | |
1974 | * iteration. | |
34dc7c2f | 1975 | */ |
428870ff | 1976 | mg->mg_bias = ((cu - vu) * |
6d974228 | 1977 | (int64_t)mg->mg_aliquot) / 100; |
34dc7c2f BB |
1978 | } |
1979 | ||
920dd524 ED |
1980 | if ((flags & METASLAB_FASTWRITE) || |
1981 | atomic_add_64_nv(&mc->mc_aliquot, asize) >= | |
34dc7c2f BB |
1982 | mg->mg_aliquot + mg->mg_bias) { |
1983 | mc->mc_rotor = mg->mg_next; | |
428870ff | 1984 | mc->mc_aliquot = 0; |
34dc7c2f BB |
1985 | } |
1986 | ||
1987 | DVA_SET_VDEV(&dva[d], vd->vdev_id); | |
1988 | DVA_SET_OFFSET(&dva[d], offset); | |
b128c09f | 1989 | DVA_SET_GANG(&dva[d], !!(flags & METASLAB_GANG_HEADER)); |
34dc7c2f BB |
1990 | DVA_SET_ASIZE(&dva[d], asize); |
1991 | ||
920dd524 ED |
1992 | if (flags & METASLAB_FASTWRITE) { |
1993 | atomic_add_64(&vd->vdev_pending_fastwrite, | |
1994 | psize); | |
1995 | mutex_exit(&mc->mc_fastwrite_lock); | |
1996 | } | |
1997 | ||
34dc7c2f BB |
1998 | return (0); |
1999 | } | |
2000 | next: | |
2001 | mc->mc_rotor = mg->mg_next; | |
428870ff | 2002 | mc->mc_aliquot = 0; |
34dc7c2f BB |
2003 | } while ((mg = mg->mg_next) != rotor); |
2004 | ||
2005 | if (!all_zero) { | |
2006 | dshift++; | |
2007 | ASSERT(dshift < 64); | |
2008 | goto top; | |
2009 | } | |
2010 | ||
9babb374 | 2011 | if (!allocatable && !zio_lock) { |
fb5f0bc8 BB |
2012 | dshift = 3; |
2013 | zio_lock = B_TRUE; | |
2014 | goto top; | |
2015 | } | |
2016 | ||
34dc7c2f BB |
2017 | bzero(&dva[d], sizeof (dva_t)); |
2018 | ||
920dd524 ED |
2019 | if (flags & METASLAB_FASTWRITE) |
2020 | mutex_exit(&mc->mc_fastwrite_lock); | |
2e528b49 MA |
2021 | |
2022 | return (SET_ERROR(ENOSPC)); | |
34dc7c2f BB |
2023 | } |
2024 | ||
2025 | /* | |
2026 | * Free the block represented by DVA in the context of the specified | |
2027 | * transaction group. | |
2028 | */ | |
2029 | static void | |
2030 | metaslab_free_dva(spa_t *spa, const dva_t *dva, uint64_t txg, boolean_t now) | |
2031 | { | |
2032 | uint64_t vdev = DVA_GET_VDEV(dva); | |
2033 | uint64_t offset = DVA_GET_OFFSET(dva); | |
2034 | uint64_t size = DVA_GET_ASIZE(dva); | |
2035 | vdev_t *vd; | |
2036 | metaslab_t *msp; | |
2037 | ||
2038 | ASSERT(DVA_IS_VALID(dva)); | |
2039 | ||
2040 | if (txg > spa_freeze_txg(spa)) | |
2041 | return; | |
2042 | ||
2043 | if ((vd = vdev_lookup_top(spa, vdev)) == NULL || | |
2044 | (offset >> vd->vdev_ms_shift) >= vd->vdev_ms_count) { | |
2045 | cmn_err(CE_WARN, "metaslab_free_dva(): bad DVA %llu:%llu", | |
2046 | (u_longlong_t)vdev, (u_longlong_t)offset); | |
2047 | ASSERT(0); | |
2048 | return; | |
2049 | } | |
2050 | ||
2051 | msp = vd->vdev_ms[offset >> vd->vdev_ms_shift]; | |
2052 | ||
2053 | if (DVA_GET_GANG(dva)) | |
2054 | size = vdev_psize_to_asize(vd, SPA_GANGBLOCKSIZE); | |
2055 | ||
2056 | mutex_enter(&msp->ms_lock); | |
2057 | ||
2058 | if (now) { | |
93cf2076 | 2059 | range_tree_remove(msp->ms_alloctree[txg & TXG_MASK], |
34dc7c2f | 2060 | offset, size); |
93cf2076 GW |
2061 | |
2062 | VERIFY(!msp->ms_condensing); | |
2063 | VERIFY3U(offset, >=, msp->ms_start); | |
2064 | VERIFY3U(offset + size, <=, msp->ms_start + msp->ms_size); | |
2065 | VERIFY3U(range_tree_space(msp->ms_tree) + size, <=, | |
2066 | msp->ms_size); | |
2067 | VERIFY0(P2PHASE(offset, 1ULL << vd->vdev_ashift)); | |
2068 | VERIFY0(P2PHASE(size, 1ULL << vd->vdev_ashift)); | |
2069 | range_tree_add(msp->ms_tree, offset, size); | |
34dc7c2f | 2070 | } else { |
93cf2076 | 2071 | if (range_tree_space(msp->ms_freetree[txg & TXG_MASK]) == 0) |
34dc7c2f | 2072 | vdev_dirty(vd, VDD_METASLAB, msp, txg); |
93cf2076 GW |
2073 | range_tree_add(msp->ms_freetree[txg & TXG_MASK], |
2074 | offset, size); | |
34dc7c2f BB |
2075 | } |
2076 | ||
2077 | mutex_exit(&msp->ms_lock); | |
2078 | } | |
2079 | ||
2080 | /* | |
2081 | * Intent log support: upon opening the pool after a crash, notify the SPA | |
2082 | * of blocks that the intent log has allocated for immediate write, but | |
2083 | * which are still considered free by the SPA because the last transaction | |
2084 | * group didn't commit yet. | |
2085 | */ | |
2086 | static int | |
2087 | metaslab_claim_dva(spa_t *spa, const dva_t *dva, uint64_t txg) | |
2088 | { | |
2089 | uint64_t vdev = DVA_GET_VDEV(dva); | |
2090 | uint64_t offset = DVA_GET_OFFSET(dva); | |
2091 | uint64_t size = DVA_GET_ASIZE(dva); | |
2092 | vdev_t *vd; | |
2093 | metaslab_t *msp; | |
428870ff | 2094 | int error = 0; |
34dc7c2f BB |
2095 | |
2096 | ASSERT(DVA_IS_VALID(dva)); | |
2097 | ||
2098 | if ((vd = vdev_lookup_top(spa, vdev)) == NULL || | |
2099 | (offset >> vd->vdev_ms_shift) >= vd->vdev_ms_count) | |
2e528b49 | 2100 | return (SET_ERROR(ENXIO)); |
34dc7c2f BB |
2101 | |
2102 | msp = vd->vdev_ms[offset >> vd->vdev_ms_shift]; | |
2103 | ||
2104 | if (DVA_GET_GANG(dva)) | |
2105 | size = vdev_psize_to_asize(vd, SPA_GANGBLOCKSIZE); | |
2106 | ||
2107 | mutex_enter(&msp->ms_lock); | |
2108 | ||
93cf2076 | 2109 | if ((txg != 0 && spa_writeable(spa)) || !msp->ms_loaded) |
6d974228 | 2110 | error = metaslab_activate(msp, METASLAB_WEIGHT_SECONDARY); |
428870ff | 2111 | |
93cf2076 | 2112 | if (error == 0 && !range_tree_contains(msp->ms_tree, offset, size)) |
2e528b49 | 2113 | error = SET_ERROR(ENOENT); |
428870ff | 2114 | |
b128c09f | 2115 | if (error || txg == 0) { /* txg == 0 indicates dry run */ |
34dc7c2f BB |
2116 | mutex_exit(&msp->ms_lock); |
2117 | return (error); | |
2118 | } | |
2119 | ||
93cf2076 GW |
2120 | VERIFY(!msp->ms_condensing); |
2121 | VERIFY0(P2PHASE(offset, 1ULL << vd->vdev_ashift)); | |
2122 | VERIFY0(P2PHASE(size, 1ULL << vd->vdev_ashift)); | |
2123 | VERIFY3U(range_tree_space(msp->ms_tree) - size, <=, msp->ms_size); | |
2124 | range_tree_remove(msp->ms_tree, offset, size); | |
b128c09f | 2125 | |
fb5f0bc8 | 2126 | if (spa_writeable(spa)) { /* don't dirty if we're zdb(1M) */ |
93cf2076 | 2127 | if (range_tree_space(msp->ms_alloctree[txg & TXG_MASK]) == 0) |
b128c09f | 2128 | vdev_dirty(vd, VDD_METASLAB, msp, txg); |
93cf2076 | 2129 | range_tree_add(msp->ms_alloctree[txg & TXG_MASK], offset, size); |
b128c09f | 2130 | } |
34dc7c2f BB |
2131 | |
2132 | mutex_exit(&msp->ms_lock); | |
2133 | ||
2134 | return (0); | |
2135 | } | |
2136 | ||
2137 | int | |
2138 | metaslab_alloc(spa_t *spa, metaslab_class_t *mc, uint64_t psize, blkptr_t *bp, | |
b128c09f | 2139 | int ndvas, uint64_t txg, blkptr_t *hintbp, int flags) |
34dc7c2f BB |
2140 | { |
2141 | dva_t *dva = bp->blk_dva; | |
2142 | dva_t *hintdva = hintbp->blk_dva; | |
d6320ddb | 2143 | int d, error = 0; |
34dc7c2f | 2144 | |
b128c09f | 2145 | ASSERT(bp->blk_birth == 0); |
428870ff | 2146 | ASSERT(BP_PHYSICAL_BIRTH(bp) == 0); |
b128c09f BB |
2147 | |
2148 | spa_config_enter(spa, SCL_ALLOC, FTAG, RW_READER); | |
2149 | ||
2150 | if (mc->mc_rotor == NULL) { /* no vdevs in this class */ | |
2151 | spa_config_exit(spa, SCL_ALLOC, FTAG); | |
2e528b49 | 2152 | return (SET_ERROR(ENOSPC)); |
b128c09f | 2153 | } |
34dc7c2f BB |
2154 | |
2155 | ASSERT(ndvas > 0 && ndvas <= spa_max_replication(spa)); | |
2156 | ASSERT(BP_GET_NDVAS(bp) == 0); | |
2157 | ASSERT(hintbp == NULL || ndvas <= BP_GET_NDVAS(hintbp)); | |
2158 | ||
d6320ddb | 2159 | for (d = 0; d < ndvas; d++) { |
34dc7c2f | 2160 | error = metaslab_alloc_dva(spa, mc, psize, dva, d, hintdva, |
b128c09f | 2161 | txg, flags); |
93cf2076 | 2162 | if (error != 0) { |
34dc7c2f BB |
2163 | for (d--; d >= 0; d--) { |
2164 | metaslab_free_dva(spa, &dva[d], txg, B_TRUE); | |
2165 | bzero(&dva[d], sizeof (dva_t)); | |
2166 | } | |
b128c09f | 2167 | spa_config_exit(spa, SCL_ALLOC, FTAG); |
34dc7c2f BB |
2168 | return (error); |
2169 | } | |
2170 | } | |
2171 | ASSERT(error == 0); | |
2172 | ASSERT(BP_GET_NDVAS(bp) == ndvas); | |
2173 | ||
b128c09f BB |
2174 | spa_config_exit(spa, SCL_ALLOC, FTAG); |
2175 | ||
428870ff | 2176 | BP_SET_BIRTH(bp, txg, txg); |
b128c09f | 2177 | |
34dc7c2f BB |
2178 | return (0); |
2179 | } | |
2180 | ||
2181 | void | |
2182 | metaslab_free(spa_t *spa, const blkptr_t *bp, uint64_t txg, boolean_t now) | |
2183 | { | |
2184 | const dva_t *dva = bp->blk_dva; | |
d6320ddb | 2185 | int d, ndvas = BP_GET_NDVAS(bp); |
34dc7c2f BB |
2186 | |
2187 | ASSERT(!BP_IS_HOLE(bp)); | |
428870ff | 2188 | ASSERT(!now || bp->blk_birth >= spa_syncing_txg(spa)); |
b128c09f BB |
2189 | |
2190 | spa_config_enter(spa, SCL_FREE, FTAG, RW_READER); | |
34dc7c2f | 2191 | |
d6320ddb | 2192 | for (d = 0; d < ndvas; d++) |
34dc7c2f | 2193 | metaslab_free_dva(spa, &dva[d], txg, now); |
b128c09f BB |
2194 | |
2195 | spa_config_exit(spa, SCL_FREE, FTAG); | |
34dc7c2f BB |
2196 | } |
2197 | ||
2198 | int | |
2199 | metaslab_claim(spa_t *spa, const blkptr_t *bp, uint64_t txg) | |
2200 | { | |
2201 | const dva_t *dva = bp->blk_dva; | |
2202 | int ndvas = BP_GET_NDVAS(bp); | |
d6320ddb | 2203 | int d, error = 0; |
34dc7c2f BB |
2204 | |
2205 | ASSERT(!BP_IS_HOLE(bp)); | |
2206 | ||
b128c09f BB |
2207 | if (txg != 0) { |
2208 | /* | |
2209 | * First do a dry run to make sure all DVAs are claimable, | |
2210 | * so we don't have to unwind from partial failures below. | |
2211 | */ | |
2212 | if ((error = metaslab_claim(spa, bp, 0)) != 0) | |
2213 | return (error); | |
2214 | } | |
2215 | ||
2216 | spa_config_enter(spa, SCL_ALLOC, FTAG, RW_READER); | |
2217 | ||
d6320ddb | 2218 | for (d = 0; d < ndvas; d++) |
34dc7c2f | 2219 | if ((error = metaslab_claim_dva(spa, &dva[d], txg)) != 0) |
b128c09f BB |
2220 | break; |
2221 | ||
2222 | spa_config_exit(spa, SCL_ALLOC, FTAG); | |
2223 | ||
2224 | ASSERT(error == 0 || txg == 0); | |
34dc7c2f | 2225 | |
b128c09f | 2226 | return (error); |
34dc7c2f | 2227 | } |
920dd524 | 2228 | |
d1d7e268 MK |
2229 | void |
2230 | metaslab_fastwrite_mark(spa_t *spa, const blkptr_t *bp) | |
920dd524 ED |
2231 | { |
2232 | const dva_t *dva = bp->blk_dva; | |
2233 | int ndvas = BP_GET_NDVAS(bp); | |
2234 | uint64_t psize = BP_GET_PSIZE(bp); | |
2235 | int d; | |
2236 | vdev_t *vd; | |
2237 | ||
2238 | ASSERT(!BP_IS_HOLE(bp)); | |
2239 | ASSERT(psize > 0); | |
2240 | ||
2241 | spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER); | |
2242 | ||
2243 | for (d = 0; d < ndvas; d++) { | |
2244 | if ((vd = vdev_lookup_top(spa, DVA_GET_VDEV(&dva[d]))) == NULL) | |
2245 | continue; | |
2246 | atomic_add_64(&vd->vdev_pending_fastwrite, psize); | |
2247 | } | |
2248 | ||
2249 | spa_config_exit(spa, SCL_VDEV, FTAG); | |
2250 | } | |
2251 | ||
d1d7e268 MK |
2252 | void |
2253 | metaslab_fastwrite_unmark(spa_t *spa, const blkptr_t *bp) | |
920dd524 ED |
2254 | { |
2255 | const dva_t *dva = bp->blk_dva; | |
2256 | int ndvas = BP_GET_NDVAS(bp); | |
2257 | uint64_t psize = BP_GET_PSIZE(bp); | |
2258 | int d; | |
2259 | vdev_t *vd; | |
2260 | ||
2261 | ASSERT(!BP_IS_HOLE(bp)); | |
2262 | ASSERT(psize > 0); | |
2263 | ||
2264 | spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER); | |
2265 | ||
2266 | for (d = 0; d < ndvas; d++) { | |
2267 | if ((vd = vdev_lookup_top(spa, DVA_GET_VDEV(&dva[d]))) == NULL) | |
2268 | continue; | |
2269 | ASSERT3U(vd->vdev_pending_fastwrite, >=, psize); | |
2270 | atomic_sub_64(&vd->vdev_pending_fastwrite, psize); | |
2271 | } | |
2272 | ||
2273 | spa_config_exit(spa, SCL_VDEV, FTAG); | |
2274 | } | |
30b92c1d | 2275 | |
13fe0198 MA |
2276 | void |
2277 | metaslab_check_free(spa_t *spa, const blkptr_t *bp) | |
2278 | { | |
2279 | int i, j; | |
2280 | ||
2281 | if ((zfs_flags & ZFS_DEBUG_ZIO_FREE) == 0) | |
2282 | return; | |
2283 | ||
2284 | spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER); | |
2285 | for (i = 0; i < BP_GET_NDVAS(bp); i++) { | |
93cf2076 GW |
2286 | uint64_t vdev = DVA_GET_VDEV(&bp->blk_dva[i]); |
2287 | vdev_t *vd = vdev_lookup_top(spa, vdev); | |
2288 | uint64_t offset = DVA_GET_OFFSET(&bp->blk_dva[i]); | |
13fe0198 | 2289 | uint64_t size = DVA_GET_ASIZE(&bp->blk_dva[i]); |
93cf2076 | 2290 | metaslab_t *msp = vd->vdev_ms[offset >> vd->vdev_ms_shift]; |
13fe0198 | 2291 | |
93cf2076 GW |
2292 | if (msp->ms_loaded) |
2293 | range_tree_verify(msp->ms_tree, offset, size); | |
13fe0198 MA |
2294 | |
2295 | for (j = 0; j < TXG_SIZE; j++) | |
93cf2076 | 2296 | range_tree_verify(msp->ms_freetree[j], offset, size); |
13fe0198 | 2297 | for (j = 0; j < TXG_DEFER_SIZE; j++) |
93cf2076 | 2298 | range_tree_verify(msp->ms_defertree[j], offset, size); |
13fe0198 MA |
2299 | } |
2300 | spa_config_exit(spa, SCL_VDEV, FTAG); | |
2301 | } | |
2302 | ||
30b92c1d | 2303 | #if defined(_KERNEL) && defined(HAVE_SPL) |
aa7d06a9 | 2304 | module_param(metaslab_debug_load, int, 0644); |
aa7d06a9 | 2305 | module_param(metaslab_debug_unload, int, 0644); |
93cf2076 GW |
2306 | MODULE_PARM_DESC(metaslab_debug_load, |
2307 | "load all metaslabs when pool is first opened"); | |
1ce04573 BB |
2308 | MODULE_PARM_DESC(metaslab_debug_unload, |
2309 | "prevent metaslabs from being unloaded"); | |
f4a4046b TC |
2310 | |
2311 | module_param(zfs_mg_noalloc_threshold, int, 0644); | |
2312 | MODULE_PARM_DESC(zfs_mg_noalloc_threshold, | |
2313 | "percentage of free space for metaslab group to allow allocation"); | |
30b92c1d | 2314 | #endif /* _KERNEL && HAVE_SPL */ |