]> git.proxmox.com Git - mirror_zfs.git/blob - module/zfs/metaslab.c
Illumos #4374
[mirror_zfs.git] / module / zfs / metaslab.c
1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2011, 2014 by Delphix. All rights reserved.
24 * Copyright (c) 2013 by Saso Kiselkov. All rights reserved.
25 */
26
27 #include <sys/zfs_context.h>
28 #include <sys/dmu.h>
29 #include <sys/dmu_tx.h>
30 #include <sys/space_map.h>
31 #include <sys/metaslab_impl.h>
32 #include <sys/vdev_impl.h>
33 #include <sys/zio.h>
34 #include <sys/spa_impl.h>
35
36 #define WITH_DF_BLOCK_ALLOCATOR
37
38 /*
39 * Allow allocations to switch to gang blocks quickly. We do this to
40 * avoid having to load lots of space_maps in a given txg. There are,
41 * however, some cases where we want to avoid "fast" ganging and instead
42 * we want to do an exhaustive search of all metaslabs on this device.
43 * Currently we don't allow any gang, zil, or dump device related allocations
44 * to "fast" gang.
45 */
46 #define CAN_FASTGANG(flags) \
47 (!((flags) & (METASLAB_GANG_CHILD | METASLAB_GANG_HEADER | \
48 METASLAB_GANG_AVOID)))
49
50 #define METASLAB_WEIGHT_PRIMARY (1ULL << 63)
51 #define METASLAB_WEIGHT_SECONDARY (1ULL << 62)
52 #define METASLAB_ACTIVE_MASK \
53 (METASLAB_WEIGHT_PRIMARY | METASLAB_WEIGHT_SECONDARY)
54
55 uint64_t metaslab_aliquot = 512ULL << 10;
56 uint64_t metaslab_gang_bang = SPA_MAXBLOCKSIZE + 1; /* force gang blocks */
57
58 /*
59 * The in-core space map representation is more compact than its on-disk form.
60 * The zfs_condense_pct determines how much more compact the in-core
61 * space_map representation must be before we compact it on-disk.
62 * Values should be greater than or equal to 100.
63 */
64 int zfs_condense_pct = 200;
65
66 /*
67 * This value defines the number of allowed allocation failures per vdev.
68 * If a device reaches this threshold in a given txg then we consider skipping
69 * allocations on that device. The value of zfs_mg_alloc_failures is computed
70 * in zio_init() unless it has been overridden in /etc/system.
71 */
72 int zfs_mg_alloc_failures = 0;
73
74 /*
75 * The zfs_mg_noalloc_threshold defines which metaslab groups should
76 * be eligible for allocation. The value is defined as a percentage of
77 * a free space. Metaslab groups that have more free space than
78 * zfs_mg_noalloc_threshold are always eligible for allocations. Once
79 * a metaslab group's free space is less than or equal to the
80 * zfs_mg_noalloc_threshold the allocator will avoid allocating to that
81 * group unless all groups in the pool have reached zfs_mg_noalloc_threshold.
82 * Once all groups in the pool reach zfs_mg_noalloc_threshold then all
83 * groups are allowed to accept allocations. Gang blocks are always
84 * eligible to allocate on any metaslab group. The default value of 0 means
85 * no metaslab group will be excluded based on this criterion.
86 */
87 int zfs_mg_noalloc_threshold = 0;
88
89 /*
90 * When set will load all metaslabs when pool is first opened.
91 */
92 int metaslab_debug_load = 0;
93
94 /*
95 * When set will prevent metaslabs from being unloaded.
96 */
97 int metaslab_debug_unload = 0;
98
99 /*
100 * Minimum size which forces the dynamic allocator to change
101 * it's allocation strategy. Once the space map cannot satisfy
102 * an allocation of this size then it switches to using more
103 * aggressive strategy (i.e search by size rather than offset).
104 */
105 uint64_t metaslab_df_alloc_threshold = SPA_MAXBLOCKSIZE;
106
107 /*
108 * The minimum free space, in percent, which must be available
109 * in a space map to continue allocations in a first-fit fashion.
110 * Once the space_map's free space drops below this level we dynamically
111 * switch to using best-fit allocations.
112 */
113 int metaslab_df_free_pct = 4;
114
115 /*
116 * A metaslab is considered "free" if it contains a contiguous
117 * segment which is greater than metaslab_min_alloc_size.
118 */
119 uint64_t metaslab_min_alloc_size = DMU_MAX_ACCESS;
120
121 /*
122 * Percentage of all cpus that can be used by the metaslab taskq.
123 */
124 int metaslab_load_pct = 50;
125
126 /*
127 * Determines how many txgs a metaslab may remain loaded without having any
128 * allocations from it. As long as a metaslab continues to be used we will
129 * keep it loaded.
130 */
131 int metaslab_unload_delay = TXG_SIZE * 2;
132
133 /*
134 * Should we be willing to write data to degraded vdevs?
135 */
136 boolean_t zfs_write_to_degraded = B_FALSE;
137
138 /*
139 * Max number of metaslabs per group to preload.
140 */
141 int metaslab_preload_limit = SPA_DVAS_PER_BP;
142
143 /*
144 * Enable/disable preloading of metaslab.
145 */
146 boolean_t metaslab_preload_enabled = B_TRUE;
147
148 /*
149 * Enable/disable additional weight factor for each metaslab.
150 */
151 boolean_t metaslab_weight_factor_enable = B_FALSE;
152
153
154 /*
155 * ==========================================================================
156 * Metaslab classes
157 * ==========================================================================
158 */
159 metaslab_class_t *
160 metaslab_class_create(spa_t *spa, metaslab_ops_t *ops)
161 {
162 metaslab_class_t *mc;
163
164 mc = kmem_zalloc(sizeof (metaslab_class_t), KM_PUSHPAGE);
165
166 mc->mc_spa = spa;
167 mc->mc_rotor = NULL;
168 mc->mc_ops = ops;
169 mutex_init(&mc->mc_fastwrite_lock, NULL, MUTEX_DEFAULT, NULL);
170
171 return (mc);
172 }
173
174 void
175 metaslab_class_destroy(metaslab_class_t *mc)
176 {
177 ASSERT(mc->mc_rotor == NULL);
178 ASSERT(mc->mc_alloc == 0);
179 ASSERT(mc->mc_deferred == 0);
180 ASSERT(mc->mc_space == 0);
181 ASSERT(mc->mc_dspace == 0);
182
183 mutex_destroy(&mc->mc_fastwrite_lock);
184 kmem_free(mc, sizeof (metaslab_class_t));
185 }
186
187 int
188 metaslab_class_validate(metaslab_class_t *mc)
189 {
190 metaslab_group_t *mg;
191 vdev_t *vd;
192
193 /*
194 * Must hold one of the spa_config locks.
195 */
196 ASSERT(spa_config_held(mc->mc_spa, SCL_ALL, RW_READER) ||
197 spa_config_held(mc->mc_spa, SCL_ALL, RW_WRITER));
198
199 if ((mg = mc->mc_rotor) == NULL)
200 return (0);
201
202 do {
203 vd = mg->mg_vd;
204 ASSERT(vd->vdev_mg != NULL);
205 ASSERT3P(vd->vdev_top, ==, vd);
206 ASSERT3P(mg->mg_class, ==, mc);
207 ASSERT3P(vd->vdev_ops, !=, &vdev_hole_ops);
208 } while ((mg = mg->mg_next) != mc->mc_rotor);
209
210 return (0);
211 }
212
213 void
214 metaslab_class_space_update(metaslab_class_t *mc, int64_t alloc_delta,
215 int64_t defer_delta, int64_t space_delta, int64_t dspace_delta)
216 {
217 atomic_add_64(&mc->mc_alloc, alloc_delta);
218 atomic_add_64(&mc->mc_deferred, defer_delta);
219 atomic_add_64(&mc->mc_space, space_delta);
220 atomic_add_64(&mc->mc_dspace, dspace_delta);
221 }
222
223 uint64_t
224 metaslab_class_get_alloc(metaslab_class_t *mc)
225 {
226 return (mc->mc_alloc);
227 }
228
229 uint64_t
230 metaslab_class_get_deferred(metaslab_class_t *mc)
231 {
232 return (mc->mc_deferred);
233 }
234
235 uint64_t
236 metaslab_class_get_space(metaslab_class_t *mc)
237 {
238 return (mc->mc_space);
239 }
240
241 uint64_t
242 metaslab_class_get_dspace(metaslab_class_t *mc)
243 {
244 return (spa_deflate(mc->mc_spa) ? mc->mc_dspace : mc->mc_space);
245 }
246
247 /*
248 * ==========================================================================
249 * Metaslab groups
250 * ==========================================================================
251 */
252 static int
253 metaslab_compare(const void *x1, const void *x2)
254 {
255 const metaslab_t *m1 = x1;
256 const metaslab_t *m2 = x2;
257
258 if (m1->ms_weight < m2->ms_weight)
259 return (1);
260 if (m1->ms_weight > m2->ms_weight)
261 return (-1);
262
263 /*
264 * If the weights are identical, use the offset to force uniqueness.
265 */
266 if (m1->ms_start < m2->ms_start)
267 return (-1);
268 if (m1->ms_start > m2->ms_start)
269 return (1);
270
271 ASSERT3P(m1, ==, m2);
272
273 return (0);
274 }
275
276 /*
277 * Update the allocatable flag and the metaslab group's capacity.
278 * The allocatable flag is set to true if the capacity is below
279 * the zfs_mg_noalloc_threshold. If a metaslab group transitions
280 * from allocatable to non-allocatable or vice versa then the metaslab
281 * group's class is updated to reflect the transition.
282 */
283 static void
284 metaslab_group_alloc_update(metaslab_group_t *mg)
285 {
286 vdev_t *vd = mg->mg_vd;
287 metaslab_class_t *mc = mg->mg_class;
288 vdev_stat_t *vs = &vd->vdev_stat;
289 boolean_t was_allocatable;
290
291 ASSERT(vd == vd->vdev_top);
292
293 mutex_enter(&mg->mg_lock);
294 was_allocatable = mg->mg_allocatable;
295
296 mg->mg_free_capacity = ((vs->vs_space - vs->vs_alloc) * 100) /
297 (vs->vs_space + 1);
298
299 mg->mg_allocatable = (mg->mg_free_capacity > zfs_mg_noalloc_threshold);
300
301 /*
302 * The mc_alloc_groups maintains a count of the number of
303 * groups in this metaslab class that are still above the
304 * zfs_mg_noalloc_threshold. This is used by the allocating
305 * threads to determine if they should avoid allocations to
306 * a given group. The allocator will avoid allocations to a group
307 * if that group has reached or is below the zfs_mg_noalloc_threshold
308 * and there are still other groups that are above the threshold.
309 * When a group transitions from allocatable to non-allocatable or
310 * vice versa we update the metaslab class to reflect that change.
311 * When the mc_alloc_groups value drops to 0 that means that all
312 * groups have reached the zfs_mg_noalloc_threshold making all groups
313 * eligible for allocations. This effectively means that all devices
314 * are balanced again.
315 */
316 if (was_allocatable && !mg->mg_allocatable)
317 mc->mc_alloc_groups--;
318 else if (!was_allocatable && mg->mg_allocatable)
319 mc->mc_alloc_groups++;
320 mutex_exit(&mg->mg_lock);
321 }
322
323 metaslab_group_t *
324 metaslab_group_create(metaslab_class_t *mc, vdev_t *vd)
325 {
326 metaslab_group_t *mg;
327
328 mg = kmem_zalloc(sizeof (metaslab_group_t), KM_PUSHPAGE);
329 mutex_init(&mg->mg_lock, NULL, MUTEX_DEFAULT, NULL);
330 avl_create(&mg->mg_metaslab_tree, metaslab_compare,
331 sizeof (metaslab_t), offsetof(struct metaslab, ms_group_node));
332 mg->mg_vd = vd;
333 mg->mg_class = mc;
334 mg->mg_activation_count = 0;
335
336 mg->mg_taskq = taskq_create("metaslab_group_taskq", metaslab_load_pct,
337 minclsyspri, 10, INT_MAX, TASKQ_THREADS_CPU_PCT);
338
339 return (mg);
340 }
341
342 void
343 metaslab_group_destroy(metaslab_group_t *mg)
344 {
345 ASSERT(mg->mg_prev == NULL);
346 ASSERT(mg->mg_next == NULL);
347 /*
348 * We may have gone below zero with the activation count
349 * either because we never activated in the first place or
350 * because we're done, and possibly removing the vdev.
351 */
352 ASSERT(mg->mg_activation_count <= 0);
353
354 taskq_destroy(mg->mg_taskq);
355 avl_destroy(&mg->mg_metaslab_tree);
356 mutex_destroy(&mg->mg_lock);
357 kmem_free(mg, sizeof (metaslab_group_t));
358 }
359
360 void
361 metaslab_group_activate(metaslab_group_t *mg)
362 {
363 metaslab_class_t *mc = mg->mg_class;
364 metaslab_group_t *mgprev, *mgnext;
365
366 ASSERT(spa_config_held(mc->mc_spa, SCL_ALLOC, RW_WRITER));
367
368 ASSERT(mc->mc_rotor != mg);
369 ASSERT(mg->mg_prev == NULL);
370 ASSERT(mg->mg_next == NULL);
371 ASSERT(mg->mg_activation_count <= 0);
372
373 if (++mg->mg_activation_count <= 0)
374 return;
375
376 mg->mg_aliquot = metaslab_aliquot * MAX(1, mg->mg_vd->vdev_children);
377 metaslab_group_alloc_update(mg);
378
379 if ((mgprev = mc->mc_rotor) == NULL) {
380 mg->mg_prev = mg;
381 mg->mg_next = mg;
382 } else {
383 mgnext = mgprev->mg_next;
384 mg->mg_prev = mgprev;
385 mg->mg_next = mgnext;
386 mgprev->mg_next = mg;
387 mgnext->mg_prev = mg;
388 }
389 mc->mc_rotor = mg;
390 }
391
392 void
393 metaslab_group_passivate(metaslab_group_t *mg)
394 {
395 metaslab_class_t *mc = mg->mg_class;
396 metaslab_group_t *mgprev, *mgnext;
397
398 ASSERT(spa_config_held(mc->mc_spa, SCL_ALLOC, RW_WRITER));
399
400 if (--mg->mg_activation_count != 0) {
401 ASSERT(mc->mc_rotor != mg);
402 ASSERT(mg->mg_prev == NULL);
403 ASSERT(mg->mg_next == NULL);
404 ASSERT(mg->mg_activation_count < 0);
405 return;
406 }
407
408 taskq_wait(mg->mg_taskq);
409
410 mgprev = mg->mg_prev;
411 mgnext = mg->mg_next;
412
413 if (mg == mgnext) {
414 mc->mc_rotor = NULL;
415 } else {
416 mc->mc_rotor = mgnext;
417 mgprev->mg_next = mgnext;
418 mgnext->mg_prev = mgprev;
419 }
420
421 mg->mg_prev = NULL;
422 mg->mg_next = NULL;
423 }
424
425 static void
426 metaslab_group_add(metaslab_group_t *mg, metaslab_t *msp)
427 {
428 mutex_enter(&mg->mg_lock);
429 ASSERT(msp->ms_group == NULL);
430 msp->ms_group = mg;
431 msp->ms_weight = 0;
432 avl_add(&mg->mg_metaslab_tree, msp);
433 mutex_exit(&mg->mg_lock);
434 }
435
436 static void
437 metaslab_group_remove(metaslab_group_t *mg, metaslab_t *msp)
438 {
439 mutex_enter(&mg->mg_lock);
440 ASSERT(msp->ms_group == mg);
441 avl_remove(&mg->mg_metaslab_tree, msp);
442 msp->ms_group = NULL;
443 mutex_exit(&mg->mg_lock);
444 }
445
446 static void
447 metaslab_group_sort(metaslab_group_t *mg, metaslab_t *msp, uint64_t weight)
448 {
449 /*
450 * Although in principle the weight can be any value, in
451 * practice we do not use values in the range [1, 510].
452 */
453 ASSERT(weight >= SPA_MINBLOCKSIZE-1 || weight == 0);
454 ASSERT(MUTEX_HELD(&msp->ms_lock));
455
456 mutex_enter(&mg->mg_lock);
457 ASSERT(msp->ms_group == mg);
458 avl_remove(&mg->mg_metaslab_tree, msp);
459 msp->ms_weight = weight;
460 avl_add(&mg->mg_metaslab_tree, msp);
461 mutex_exit(&mg->mg_lock);
462 }
463
464 /*
465 * Determine if a given metaslab group should skip allocations. A metaslab
466 * group should avoid allocations if its used capacity has crossed the
467 * zfs_mg_noalloc_threshold and there is at least one metaslab group
468 * that can still handle allocations.
469 */
470 static boolean_t
471 metaslab_group_allocatable(metaslab_group_t *mg)
472 {
473 vdev_t *vd = mg->mg_vd;
474 spa_t *spa = vd->vdev_spa;
475 metaslab_class_t *mc = mg->mg_class;
476
477 /*
478 * A metaslab group is considered allocatable if its free capacity
479 * is greater than the set value of zfs_mg_noalloc_threshold, it's
480 * associated with a slog, or there are no other metaslab groups
481 * with free capacity greater than zfs_mg_noalloc_threshold.
482 */
483 return (mg->mg_free_capacity > zfs_mg_noalloc_threshold ||
484 mc != spa_normal_class(spa) || mc->mc_alloc_groups == 0);
485 }
486
487 /*
488 * ==========================================================================
489 * Range tree callbacks
490 * ==========================================================================
491 */
492
493 /*
494 * Comparison function for the private size-ordered tree. Tree is sorted
495 * by size, larger sizes at the end of the tree.
496 */
497 static int
498 metaslab_rangesize_compare(const void *x1, const void *x2)
499 {
500 const range_seg_t *r1 = x1;
501 const range_seg_t *r2 = x2;
502 uint64_t rs_size1 = r1->rs_end - r1->rs_start;
503 uint64_t rs_size2 = r2->rs_end - r2->rs_start;
504
505 if (rs_size1 < rs_size2)
506 return (-1);
507 if (rs_size1 > rs_size2)
508 return (1);
509
510 if (r1->rs_start < r2->rs_start)
511 return (-1);
512
513 if (r1->rs_start > r2->rs_start)
514 return (1);
515
516 return (0);
517 }
518
519 /*
520 * Create any block allocator specific components. The current allocators
521 * rely on using both a size-ordered range_tree_t and an array of uint64_t's.
522 */
523 static void
524 metaslab_rt_create(range_tree_t *rt, void *arg)
525 {
526 metaslab_t *msp = arg;
527
528 ASSERT3P(rt->rt_arg, ==, msp);
529 ASSERT(msp->ms_tree == NULL);
530
531 avl_create(&msp->ms_size_tree, metaslab_rangesize_compare,
532 sizeof (range_seg_t), offsetof(range_seg_t, rs_pp_node));
533 }
534
535 /*
536 * Destroy the block allocator specific components.
537 */
538 static void
539 metaslab_rt_destroy(range_tree_t *rt, void *arg)
540 {
541 metaslab_t *msp = arg;
542
543 ASSERT3P(rt->rt_arg, ==, msp);
544 ASSERT3P(msp->ms_tree, ==, rt);
545 ASSERT0(avl_numnodes(&msp->ms_size_tree));
546
547 avl_destroy(&msp->ms_size_tree);
548 }
549
550 static void
551 metaslab_rt_add(range_tree_t *rt, range_seg_t *rs, void *arg)
552 {
553 metaslab_t *msp = arg;
554
555 ASSERT3P(rt->rt_arg, ==, msp);
556 ASSERT3P(msp->ms_tree, ==, rt);
557 VERIFY(!msp->ms_condensing);
558 avl_add(&msp->ms_size_tree, rs);
559 }
560
561 static void
562 metaslab_rt_remove(range_tree_t *rt, range_seg_t *rs, void *arg)
563 {
564 metaslab_t *msp = arg;
565
566 ASSERT3P(rt->rt_arg, ==, msp);
567 ASSERT3P(msp->ms_tree, ==, rt);
568 VERIFY(!msp->ms_condensing);
569 avl_remove(&msp->ms_size_tree, rs);
570 }
571
572 static void
573 metaslab_rt_vacate(range_tree_t *rt, void *arg)
574 {
575 metaslab_t *msp = arg;
576
577 ASSERT3P(rt->rt_arg, ==, msp);
578 ASSERT3P(msp->ms_tree, ==, rt);
579
580 /*
581 * Normally one would walk the tree freeing nodes along the way.
582 * Since the nodes are shared with the range trees we can avoid
583 * walking all nodes and just reinitialize the avl tree. The nodes
584 * will be freed by the range tree, so we don't want to free them here.
585 */
586 avl_create(&msp->ms_size_tree, metaslab_rangesize_compare,
587 sizeof (range_seg_t), offsetof(range_seg_t, rs_pp_node));
588 }
589
590 static range_tree_ops_t metaslab_rt_ops = {
591 metaslab_rt_create,
592 metaslab_rt_destroy,
593 metaslab_rt_add,
594 metaslab_rt_remove,
595 metaslab_rt_vacate
596 };
597
598 /*
599 * ==========================================================================
600 * Metaslab block operations
601 * ==========================================================================
602 */
603
604 /*
605 * Return the maximum contiguous segment within the metaslab.
606 */
607 uint64_t
608 metaslab_block_maxsize(metaslab_t *msp)
609 {
610 avl_tree_t *t = &msp->ms_size_tree;
611 range_seg_t *rs;
612
613 if (t == NULL || (rs = avl_last(t)) == NULL)
614 return (0ULL);
615
616 return (rs->rs_end - rs->rs_start);
617 }
618
619 uint64_t
620 metaslab_block_alloc(metaslab_t *msp, uint64_t size)
621 {
622 uint64_t start;
623 range_tree_t *rt = msp->ms_tree;
624
625 VERIFY(!msp->ms_condensing);
626
627 start = msp->ms_ops->msop_alloc(msp, size);
628 if (start != -1ULL) {
629 vdev_t *vd = msp->ms_group->mg_vd;
630
631 VERIFY0(P2PHASE(start, 1ULL << vd->vdev_ashift));
632 VERIFY0(P2PHASE(size, 1ULL << vd->vdev_ashift));
633 VERIFY3U(range_tree_space(rt) - size, <=, msp->ms_size);
634 range_tree_remove(rt, start, size);
635 }
636 return (start);
637 }
638
639 /*
640 * ==========================================================================
641 * Common allocator routines
642 * ==========================================================================
643 */
644
645 #if defined(WITH_FF_BLOCK_ALLOCATOR) || \
646 defined(WITH_DF_BLOCK_ALLOCATOR) || \
647 defined(WITH_CF_BLOCK_ALLOCATOR)
648 /*
649 * This is a helper function that can be used by the allocator to find
650 * a suitable block to allocate. This will search the specified AVL
651 * tree looking for a block that matches the specified criteria.
652 */
653 static uint64_t
654 metaslab_block_picker(avl_tree_t *t, uint64_t *cursor, uint64_t size,
655 uint64_t align)
656 {
657 range_seg_t *rs, rsearch;
658 avl_index_t where;
659
660 rsearch.rs_start = *cursor;
661 rsearch.rs_end = *cursor + size;
662
663 rs = avl_find(t, &rsearch, &where);
664 if (rs == NULL)
665 rs = avl_nearest(t, where, AVL_AFTER);
666
667 while (rs != NULL) {
668 uint64_t offset = P2ROUNDUP(rs->rs_start, align);
669
670 if (offset + size <= rs->rs_end) {
671 *cursor = offset + size;
672 return (offset);
673 }
674 rs = AVL_NEXT(t, rs);
675 }
676
677 /*
678 * If we know we've searched the whole map (*cursor == 0), give up.
679 * Otherwise, reset the cursor to the beginning and try again.
680 */
681 if (*cursor == 0)
682 return (-1ULL);
683
684 *cursor = 0;
685 return (metaslab_block_picker(t, cursor, size, align));
686 }
687 #endif /* WITH_FF/DF/CF_BLOCK_ALLOCATOR */
688
689 #if defined(WITH_FF_BLOCK_ALLOCATOR)
690 /*
691 * ==========================================================================
692 * The first-fit block allocator
693 * ==========================================================================
694 */
695 static uint64_t
696 metaslab_ff_alloc(metaslab_t *msp, uint64_t size)
697 {
698 /*
699 * Find the largest power of 2 block size that evenly divides the
700 * requested size. This is used to try to allocate blocks with similar
701 * alignment from the same area of the metaslab (i.e. same cursor
702 * bucket) but it does not guarantee that other allocations sizes
703 * may exist in the same region.
704 */
705 uint64_t align = size & -size;
706 uint64_t *cursor = &msp->ms_lbas[highbit64(align) - 1];
707 avl_tree_t *t = &msp->ms_tree->rt_root;
708
709 return (metaslab_block_picker(t, cursor, size, align));
710 }
711
712 /* ARGSUSED */
713 static boolean_t
714 metaslab_ff_fragmented(metaslab_t *msp)
715 {
716 return (B_TRUE);
717 }
718
719 static metaslab_ops_t metaslab_ff_ops = {
720 metaslab_ff_alloc,
721 metaslab_ff_fragmented
722 };
723
724 metaslab_ops_t *zfs_metaslab_ops = &metaslab_ff_ops;
725 #endif /* WITH_FF_BLOCK_ALLOCATOR */
726
727 #if defined(WITH_DF_BLOCK_ALLOCATOR)
728 /*
729 * ==========================================================================
730 * Dynamic block allocator -
731 * Uses the first fit allocation scheme until space get low and then
732 * adjusts to a best fit allocation method. Uses metaslab_df_alloc_threshold
733 * and metaslab_df_free_pct to determine when to switch the allocation scheme.
734 * ==========================================================================
735 */
736 static uint64_t
737 metaslab_df_alloc(metaslab_t *msp, uint64_t size)
738 {
739 /*
740 * Find the largest power of 2 block size that evenly divides the
741 * requested size. This is used to try to allocate blocks with similar
742 * alignment from the same area of the metaslab (i.e. same cursor
743 * bucket) but it does not guarantee that other allocations sizes
744 * may exist in the same region.
745 */
746 uint64_t align = size & -size;
747 uint64_t *cursor = &msp->ms_lbas[highbit64(align) - 1];
748 range_tree_t *rt = msp->ms_tree;
749 avl_tree_t *t = &rt->rt_root;
750 uint64_t max_size = metaslab_block_maxsize(msp);
751 int free_pct = range_tree_space(rt) * 100 / msp->ms_size;
752
753 ASSERT(MUTEX_HELD(&msp->ms_lock));
754 ASSERT3U(avl_numnodes(t), ==, avl_numnodes(&msp->ms_size_tree));
755
756 if (max_size < size)
757 return (-1ULL);
758
759 /*
760 * If we're running low on space switch to using the size
761 * sorted AVL tree (best-fit).
762 */
763 if (max_size < metaslab_df_alloc_threshold ||
764 free_pct < metaslab_df_free_pct) {
765 t = &msp->ms_size_tree;
766 *cursor = 0;
767 }
768
769 return (metaslab_block_picker(t, cursor, size, 1ULL));
770 }
771
772 static boolean_t
773 metaslab_df_fragmented(metaslab_t *msp)
774 {
775 range_tree_t *rt = msp->ms_tree;
776 uint64_t max_size = metaslab_block_maxsize(msp);
777 int free_pct = range_tree_space(rt) * 100 / msp->ms_size;
778
779 if (max_size >= metaslab_df_alloc_threshold &&
780 free_pct >= metaslab_df_free_pct)
781 return (B_FALSE);
782
783
784 return (B_TRUE);
785 }
786
787 static metaslab_ops_t metaslab_df_ops = {
788 metaslab_df_alloc,
789 metaslab_df_fragmented
790 };
791
792 metaslab_ops_t *zfs_metaslab_ops = &metaslab_df_ops;
793 #endif /* WITH_DF_BLOCK_ALLOCATOR */
794
795 #if defined(WITH_CF_BLOCK_ALLOCATOR)
796 /*
797 * ==========================================================================
798 * Cursor fit block allocator -
799 * Select the largest region in the metaslab, set the cursor to the beginning
800 * of the range and the cursor_end to the end of the range. As allocations
801 * are made advance the cursor. Continue allocating from the cursor until
802 * the range is exhausted and then find a new range.
803 * ==========================================================================
804 */
805 static uint64_t
806 metaslab_cf_alloc(metaslab_t *msp, uint64_t size)
807 {
808 range_tree_t *rt = msp->ms_tree;
809 avl_tree_t *t = &msp->ms_size_tree;
810 uint64_t *cursor = &msp->ms_lbas[0];
811 uint64_t *cursor_end = &msp->ms_lbas[1];
812 uint64_t offset = 0;
813
814 ASSERT(MUTEX_HELD(&msp->ms_lock));
815 ASSERT3U(avl_numnodes(t), ==, avl_numnodes(&rt->rt_root));
816
817 ASSERT3U(*cursor_end, >=, *cursor);
818
819 if ((*cursor + size) > *cursor_end) {
820 range_seg_t *rs;
821
822 rs = avl_last(&msp->ms_size_tree);
823 if (rs == NULL || (rs->rs_end - rs->rs_start) < size)
824 return (-1ULL);
825
826 *cursor = rs->rs_start;
827 *cursor_end = rs->rs_end;
828 }
829
830 offset = *cursor;
831 *cursor += size;
832
833 return (offset);
834 }
835
836 static boolean_t
837 metaslab_cf_fragmented(metaslab_t *msp)
838 {
839 return (metaslab_block_maxsize(msp) < metaslab_min_alloc_size);
840 }
841
842 static metaslab_ops_t metaslab_cf_ops = {
843 metaslab_cf_alloc,
844 metaslab_cf_fragmented
845 };
846
847 metaslab_ops_t *zfs_metaslab_ops = &metaslab_cf_ops;
848 #endif /* WITH_CF_BLOCK_ALLOCATOR */
849
850 #if defined(WITH_NDF_BLOCK_ALLOCATOR)
851 /*
852 * ==========================================================================
853 * New dynamic fit allocator -
854 * Select a region that is large enough to allocate 2^metaslab_ndf_clump_shift
855 * contiguous blocks. If no region is found then just use the largest segment
856 * that remains.
857 * ==========================================================================
858 */
859
860 /*
861 * Determines desired number of contiguous blocks (2^metaslab_ndf_clump_shift)
862 * to request from the allocator.
863 */
864 uint64_t metaslab_ndf_clump_shift = 4;
865
866 static uint64_t
867 metaslab_ndf_alloc(metaslab_t *msp, uint64_t size)
868 {
869 avl_tree_t *t = &msp->ms_tree->rt_root;
870 avl_index_t where;
871 range_seg_t *rs, rsearch;
872 uint64_t hbit = highbit64(size);
873 uint64_t *cursor = &msp->ms_lbas[hbit - 1];
874 uint64_t max_size = metaslab_block_maxsize(msp);
875
876 ASSERT(MUTEX_HELD(&msp->ms_lock));
877 ASSERT3U(avl_numnodes(t), ==, avl_numnodes(&msp->ms_size_tree));
878
879 if (max_size < size)
880 return (-1ULL);
881
882 rsearch.rs_start = *cursor;
883 rsearch.rs_end = *cursor + size;
884
885 rs = avl_find(t, &rsearch, &where);
886 if (rs == NULL || (rs->rs_end - rs->rs_start) < size) {
887 t = &msp->ms_size_tree;
888
889 rsearch.rs_start = 0;
890 rsearch.rs_end = MIN(max_size,
891 1ULL << (hbit + metaslab_ndf_clump_shift));
892 rs = avl_find(t, &rsearch, &where);
893 if (rs == NULL)
894 rs = avl_nearest(t, where, AVL_AFTER);
895 ASSERT(rs != NULL);
896 }
897
898 if ((rs->rs_end - rs->rs_start) >= size) {
899 *cursor = rs->rs_start + size;
900 return (rs->rs_start);
901 }
902 return (-1ULL);
903 }
904
905 static boolean_t
906 metaslab_ndf_fragmented(metaslab_t *msp)
907 {
908 return (metaslab_block_maxsize(msp) <=
909 (metaslab_min_alloc_size << metaslab_ndf_clump_shift));
910 }
911
912 static metaslab_ops_t metaslab_ndf_ops = {
913 metaslab_ndf_alloc,
914 metaslab_ndf_fragmented
915 };
916
917 metaslab_ops_t *zfs_metaslab_ops = &metaslab_ndf_ops;
918 #endif /* WITH_NDF_BLOCK_ALLOCATOR */
919
920
921 /*
922 * ==========================================================================
923 * Metaslabs
924 * ==========================================================================
925 */
926
927 /*
928 * Wait for any in-progress metaslab loads to complete.
929 */
930 void
931 metaslab_load_wait(metaslab_t *msp)
932 {
933 ASSERT(MUTEX_HELD(&msp->ms_lock));
934
935 while (msp->ms_loading) {
936 ASSERT(!msp->ms_loaded);
937 cv_wait(&msp->ms_load_cv, &msp->ms_lock);
938 }
939 }
940
941 int
942 metaslab_load(metaslab_t *msp)
943 {
944 int error = 0;
945 int t;
946
947 ASSERT(MUTEX_HELD(&msp->ms_lock));
948 ASSERT(!msp->ms_loaded);
949 ASSERT(!msp->ms_loading);
950
951 msp->ms_loading = B_TRUE;
952
953 /*
954 * If the space map has not been allocated yet, then treat
955 * all the space in the metaslab as free and add it to the
956 * ms_tree.
957 */
958 if (msp->ms_sm != NULL)
959 error = space_map_load(msp->ms_sm, msp->ms_tree, SM_FREE);
960 else
961 range_tree_add(msp->ms_tree, msp->ms_start, msp->ms_size);
962
963 msp->ms_loaded = (error == 0);
964 msp->ms_loading = B_FALSE;
965
966 if (msp->ms_loaded) {
967 for (t = 0; t < TXG_DEFER_SIZE; t++) {
968 range_tree_walk(msp->ms_defertree[t],
969 range_tree_remove, msp->ms_tree);
970 }
971 }
972 cv_broadcast(&msp->ms_load_cv);
973 return (error);
974 }
975
976 void
977 metaslab_unload(metaslab_t *msp)
978 {
979 ASSERT(MUTEX_HELD(&msp->ms_lock));
980 range_tree_vacate(msp->ms_tree, NULL, NULL);
981 msp->ms_loaded = B_FALSE;
982 msp->ms_weight &= ~METASLAB_ACTIVE_MASK;
983 }
984
985 metaslab_t *
986 metaslab_init(metaslab_group_t *mg, uint64_t id, uint64_t object, uint64_t txg)
987 {
988 vdev_t *vd = mg->mg_vd;
989 objset_t *mos = vd->vdev_spa->spa_meta_objset;
990 metaslab_t *msp;
991
992 msp = kmem_zalloc(sizeof (metaslab_t), KM_PUSHPAGE);
993 mutex_init(&msp->ms_lock, NULL, MUTEX_DEFAULT, NULL);
994 cv_init(&msp->ms_load_cv, NULL, CV_DEFAULT, NULL);
995 msp->ms_id = id;
996 msp->ms_start = id << vd->vdev_ms_shift;
997 msp->ms_size = 1ULL << vd->vdev_ms_shift;
998
999 /*
1000 * We only open space map objects that already exist. All others
1001 * will be opened when we finally allocate an object for it.
1002 */
1003 if (object != 0) {
1004 VERIFY0(space_map_open(&msp->ms_sm, mos, object, msp->ms_start,
1005 msp->ms_size, vd->vdev_ashift, &msp->ms_lock));
1006 ASSERT(msp->ms_sm != NULL);
1007 }
1008
1009 /*
1010 * We create the main range tree here, but we don't create the
1011 * alloctree and freetree until metaslab_sync_done(). This serves
1012 * two purposes: it allows metaslab_sync_done() to detect the
1013 * addition of new space; and for debugging, it ensures that we'd
1014 * data fault on any attempt to use this metaslab before it's ready.
1015 */
1016 msp->ms_tree = range_tree_create(&metaslab_rt_ops, msp, &msp->ms_lock);
1017 metaslab_group_add(mg, msp);
1018
1019 msp->ms_ops = mg->mg_class->mc_ops;
1020
1021 /*
1022 * If we're opening an existing pool (txg == 0) or creating
1023 * a new one (txg == TXG_INITIAL), all space is available now.
1024 * If we're adding space to an existing pool, the new space
1025 * does not become available until after this txg has synced.
1026 */
1027 if (txg <= TXG_INITIAL)
1028 metaslab_sync_done(msp, 0);
1029
1030 /*
1031 * If metaslab_debug_load is set and we're initializing a metaslab
1032 * that has an allocated space_map object then load the its space
1033 * map so that can verify frees.
1034 */
1035 if (metaslab_debug_load && msp->ms_sm != NULL) {
1036 mutex_enter(&msp->ms_lock);
1037 VERIFY0(metaslab_load(msp));
1038 mutex_exit(&msp->ms_lock);
1039 }
1040
1041 if (txg != 0) {
1042 vdev_dirty(vd, 0, NULL, txg);
1043 vdev_dirty(vd, VDD_METASLAB, msp, txg);
1044 }
1045
1046 return (msp);
1047 }
1048
1049 void
1050 metaslab_fini(metaslab_t *msp)
1051 {
1052 int t;
1053
1054 metaslab_group_t *mg = msp->ms_group;
1055
1056 metaslab_group_remove(mg, msp);
1057
1058 mutex_enter(&msp->ms_lock);
1059
1060 VERIFY(msp->ms_group == NULL);
1061 vdev_space_update(mg->mg_vd, -space_map_allocated(msp->ms_sm),
1062 0, -msp->ms_size);
1063 space_map_close(msp->ms_sm);
1064
1065 metaslab_unload(msp);
1066 range_tree_destroy(msp->ms_tree);
1067
1068 for (t = 0; t < TXG_SIZE; t++) {
1069 range_tree_destroy(msp->ms_alloctree[t]);
1070 range_tree_destroy(msp->ms_freetree[t]);
1071 }
1072
1073 for (t = 0; t < TXG_DEFER_SIZE; t++) {
1074 range_tree_destroy(msp->ms_defertree[t]);
1075 }
1076
1077 ASSERT0(msp->ms_deferspace);
1078
1079 mutex_exit(&msp->ms_lock);
1080 cv_destroy(&msp->ms_load_cv);
1081 mutex_destroy(&msp->ms_lock);
1082
1083 kmem_free(msp, sizeof (metaslab_t));
1084 }
1085
1086 /*
1087 * Apply a weighting factor based on the histogram information for this
1088 * metaslab. The current weighting factor is somewhat arbitrary and requires
1089 * additional investigation. The implementation provides a measure of
1090 * "weighted" free space and gives a higher weighting for larger contiguous
1091 * regions. The weighting factor is determined by counting the number of
1092 * sm_shift sectors that exist in each region represented by the histogram.
1093 * That value is then multiplied by the power of 2 exponent and the sm_shift
1094 * value.
1095 *
1096 * For example, assume the 2^21 histogram bucket has 4 2MB regions and the
1097 * metaslab has an sm_shift value of 9 (512B):
1098 *
1099 * 1) calculate the number of sm_shift sectors in the region:
1100 * 2^21 / 2^9 = 2^12 = 4096 * 4 (number of regions) = 16384
1101 * 2) multiply by the power of 2 exponent and the sm_shift value:
1102 * 16384 * 21 * 9 = 3096576
1103 * This value will be added to the weighting of the metaslab.
1104 */
1105 static uint64_t
1106 metaslab_weight_factor(metaslab_t *msp)
1107 {
1108 uint64_t factor = 0;
1109 uint64_t sectors;
1110 int i;
1111
1112 /*
1113 * A null space map means that the entire metaslab is free,
1114 * calculate a weight factor that spans the entire size of the
1115 * metaslab.
1116 */
1117 if (msp->ms_sm == NULL) {
1118 vdev_t *vd = msp->ms_group->mg_vd;
1119
1120 i = highbit64(msp->ms_size) - 1;
1121 sectors = msp->ms_size >> vd->vdev_ashift;
1122 return (sectors * i * vd->vdev_ashift);
1123 }
1124
1125 if (msp->ms_sm->sm_dbuf->db_size != sizeof (space_map_phys_t))
1126 return (0);
1127
1128 for (i = 0; i < SPACE_MAP_HISTOGRAM_SIZE(msp->ms_sm); i++) {
1129 if (msp->ms_sm->sm_phys->smp_histogram[i] == 0)
1130 continue;
1131
1132 /*
1133 * Determine the number of sm_shift sectors in the region
1134 * indicated by the histogram. For example, given an
1135 * sm_shift value of 9 (512 bytes) and i = 4 then we know
1136 * that we're looking at an 8K region in the histogram
1137 * (i.e. 9 + 4 = 13, 2^13 = 8192). To figure out the
1138 * number of sm_shift sectors (512 bytes in this example),
1139 * we would take 8192 / 512 = 16. Since the histogram
1140 * is offset by sm_shift we can simply use the value of
1141 * of i to calculate this (i.e. 2^i = 16 where i = 4).
1142 */
1143 sectors = msp->ms_sm->sm_phys->smp_histogram[i] << i;
1144 factor += (i + msp->ms_sm->sm_shift) * sectors;
1145 }
1146 return (factor * msp->ms_sm->sm_shift);
1147 }
1148
1149 static uint64_t
1150 metaslab_weight(metaslab_t *msp)
1151 {
1152 metaslab_group_t *mg = msp->ms_group;
1153 vdev_t *vd = mg->mg_vd;
1154 uint64_t weight, space;
1155
1156 ASSERT(MUTEX_HELD(&msp->ms_lock));
1157
1158 /*
1159 * This vdev is in the process of being removed so there is nothing
1160 * for us to do here.
1161 */
1162 if (vd->vdev_removing) {
1163 ASSERT0(space_map_allocated(msp->ms_sm));
1164 ASSERT0(vd->vdev_ms_shift);
1165 return (0);
1166 }
1167
1168 /*
1169 * The baseline weight is the metaslab's free space.
1170 */
1171 space = msp->ms_size - space_map_allocated(msp->ms_sm);
1172 weight = space;
1173
1174 /*
1175 * Modern disks have uniform bit density and constant angular velocity.
1176 * Therefore, the outer recording zones are faster (higher bandwidth)
1177 * than the inner zones by the ratio of outer to inner track diameter,
1178 * which is typically around 2:1. We account for this by assigning
1179 * higher weight to lower metaslabs (multiplier ranging from 2x to 1x).
1180 * In effect, this means that we'll select the metaslab with the most
1181 * free bandwidth rather than simply the one with the most free space.
1182 */
1183 weight = 2 * weight - (msp->ms_id * weight) / vd->vdev_ms_count;
1184 ASSERT(weight >= space && weight <= 2 * space);
1185
1186 msp->ms_factor = metaslab_weight_factor(msp);
1187 if (metaslab_weight_factor_enable)
1188 weight += msp->ms_factor;
1189
1190 if (msp->ms_loaded && !msp->ms_ops->msop_fragmented(msp)) {
1191 /*
1192 * If this metaslab is one we're actively using, adjust its
1193 * weight to make it preferable to any inactive metaslab so
1194 * we'll polish it off.
1195 */
1196 weight |= (msp->ms_weight & METASLAB_ACTIVE_MASK);
1197 }
1198
1199 return (weight);
1200 }
1201
1202 static int
1203 metaslab_activate(metaslab_t *msp, uint64_t activation_weight)
1204 {
1205 ASSERT(MUTEX_HELD(&msp->ms_lock));
1206
1207 if ((msp->ms_weight & METASLAB_ACTIVE_MASK) == 0) {
1208 metaslab_load_wait(msp);
1209 if (!msp->ms_loaded) {
1210 int error = metaslab_load(msp);
1211 if (error) {
1212 metaslab_group_sort(msp->ms_group, msp, 0);
1213 return (error);
1214 }
1215 }
1216
1217 metaslab_group_sort(msp->ms_group, msp,
1218 msp->ms_weight | activation_weight);
1219 }
1220 ASSERT(msp->ms_loaded);
1221 ASSERT(msp->ms_weight & METASLAB_ACTIVE_MASK);
1222
1223 return (0);
1224 }
1225
1226 static void
1227 metaslab_passivate(metaslab_t *msp, uint64_t size)
1228 {
1229 /*
1230 * If size < SPA_MINBLOCKSIZE, then we will not allocate from
1231 * this metaslab again. In that case, it had better be empty,
1232 * or we would be leaving space on the table.
1233 */
1234 ASSERT(size >= SPA_MINBLOCKSIZE || range_tree_space(msp->ms_tree) == 0);
1235 metaslab_group_sort(msp->ms_group, msp, MIN(msp->ms_weight, size));
1236 ASSERT((msp->ms_weight & METASLAB_ACTIVE_MASK) == 0);
1237 }
1238
1239 static void
1240 metaslab_preload(void *arg)
1241 {
1242 metaslab_t *msp = arg;
1243 spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
1244
1245 ASSERT(!MUTEX_HELD(&msp->ms_group->mg_lock));
1246
1247 mutex_enter(&msp->ms_lock);
1248 metaslab_load_wait(msp);
1249 if (!msp->ms_loaded)
1250 (void) metaslab_load(msp);
1251
1252 /*
1253 * Set the ms_access_txg value so that we don't unload it right away.
1254 */
1255 msp->ms_access_txg = spa_syncing_txg(spa) + metaslab_unload_delay + 1;
1256 mutex_exit(&msp->ms_lock);
1257 }
1258
1259 static void
1260 metaslab_group_preload(metaslab_group_t *mg)
1261 {
1262 spa_t *spa = mg->mg_vd->vdev_spa;
1263 metaslab_t *msp;
1264 avl_tree_t *t = &mg->mg_metaslab_tree;
1265 int m = 0;
1266
1267 if (spa_shutting_down(spa) || !metaslab_preload_enabled) {
1268 taskq_wait(mg->mg_taskq);
1269 return;
1270 }
1271
1272 mutex_enter(&mg->mg_lock);
1273 /*
1274 * Load the next potential metaslabs
1275 */
1276 msp = avl_first(t);
1277 while (msp != NULL) {
1278 metaslab_t *msp_next = AVL_NEXT(t, msp);
1279
1280 /* If we have reached our preload limit then we're done */
1281 if (++m > metaslab_preload_limit)
1282 break;
1283
1284 /*
1285 * We must drop the metaslab group lock here to preserve
1286 * lock ordering with the ms_lock (when grabbing both
1287 * the mg_lock and the ms_lock, the ms_lock must be taken
1288 * first). As a result, it is possible that the ordering
1289 * of the metaslabs within the avl tree may change before
1290 * we reacquire the lock. The metaslab cannot be removed from
1291 * the tree while we're in syncing context so it is safe to
1292 * drop the mg_lock here. If the metaslabs are reordered
1293 * nothing will break -- we just may end up loading a
1294 * less than optimal one.
1295 */
1296 mutex_exit(&mg->mg_lock);
1297 VERIFY(taskq_dispatch(mg->mg_taskq, metaslab_preload,
1298 msp, TQ_PUSHPAGE) != 0);
1299 mutex_enter(&mg->mg_lock);
1300 msp = msp_next;
1301 }
1302 mutex_exit(&mg->mg_lock);
1303 }
1304
1305 /*
1306 * Determine if the space map's on-disk footprint is past our tolerance
1307 * for inefficiency. We would like to use the following criteria to make
1308 * our decision:
1309 *
1310 * 1. The size of the space map object should not dramatically increase as a
1311 * result of writing out the free space range tree.
1312 *
1313 * 2. The minimal on-disk space map representation is zfs_condense_pct/100
1314 * times the size than the free space range tree representation
1315 * (i.e. zfs_condense_pct = 110 and in-core = 1MB, minimal = 1.1.MB).
1316 *
1317 * Checking the first condition is tricky since we don't want to walk
1318 * the entire AVL tree calculating the estimated on-disk size. Instead we
1319 * use the size-ordered range tree in the metaslab and calculate the
1320 * size required to write out the largest segment in our free tree. If the
1321 * size required to represent that segment on disk is larger than the space
1322 * map object then we avoid condensing this map.
1323 *
1324 * To determine the second criterion we use a best-case estimate and assume
1325 * each segment can be represented on-disk as a single 64-bit entry. We refer
1326 * to this best-case estimate as the space map's minimal form.
1327 */
1328 static boolean_t
1329 metaslab_should_condense(metaslab_t *msp)
1330 {
1331 space_map_t *sm = msp->ms_sm;
1332 range_seg_t *rs;
1333 uint64_t size, entries, segsz;
1334
1335 ASSERT(MUTEX_HELD(&msp->ms_lock));
1336 ASSERT(msp->ms_loaded);
1337
1338 /*
1339 * Use the ms_size_tree range tree, which is ordered by size, to
1340 * obtain the largest segment in the free tree. If the tree is empty
1341 * then we should condense the map.
1342 */
1343 rs = avl_last(&msp->ms_size_tree);
1344 if (rs == NULL)
1345 return (B_TRUE);
1346
1347 /*
1348 * Calculate the number of 64-bit entries this segment would
1349 * require when written to disk. If this single segment would be
1350 * larger on-disk than the entire current on-disk structure, then
1351 * clearly condensing will increase the on-disk structure size.
1352 */
1353 size = (rs->rs_end - rs->rs_start) >> sm->sm_shift;
1354 entries = size / (MIN(size, SM_RUN_MAX));
1355 segsz = entries * sizeof (uint64_t);
1356
1357 return (segsz <= space_map_length(msp->ms_sm) &&
1358 space_map_length(msp->ms_sm) >= (zfs_condense_pct *
1359 sizeof (uint64_t) * avl_numnodes(&msp->ms_tree->rt_root)) / 100);
1360 }
1361
1362 /*
1363 * Condense the on-disk space map representation to its minimized form.
1364 * The minimized form consists of a small number of allocations followed by
1365 * the entries of the free range tree.
1366 */
1367 static void
1368 metaslab_condense(metaslab_t *msp, uint64_t txg, dmu_tx_t *tx)
1369 {
1370 spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
1371 range_tree_t *freetree = msp->ms_freetree[txg & TXG_MASK];
1372 range_tree_t *condense_tree;
1373 space_map_t *sm = msp->ms_sm;
1374 int t;
1375
1376 ASSERT(MUTEX_HELD(&msp->ms_lock));
1377 ASSERT3U(spa_sync_pass(spa), ==, 1);
1378 ASSERT(msp->ms_loaded);
1379
1380 spa_dbgmsg(spa, "condensing: txg %llu, msp[%llu] %p, "
1381 "smp size %llu, segments %lu", txg, msp->ms_id, msp,
1382 space_map_length(msp->ms_sm), avl_numnodes(&msp->ms_tree->rt_root));
1383
1384 /*
1385 * Create an range tree that is 100% allocated. We remove segments
1386 * that have been freed in this txg, any deferred frees that exist,
1387 * and any allocation in the future. Removing segments should be
1388 * a relatively inexpensive operation since we expect these trees to
1389 * have a small number of nodes.
1390 */
1391 condense_tree = range_tree_create(NULL, NULL, &msp->ms_lock);
1392 range_tree_add(condense_tree, msp->ms_start, msp->ms_size);
1393
1394 /*
1395 * Remove what's been freed in this txg from the condense_tree.
1396 * Since we're in sync_pass 1, we know that all the frees from
1397 * this txg are in the freetree.
1398 */
1399 range_tree_walk(freetree, range_tree_remove, condense_tree);
1400
1401 for (t = 0; t < TXG_DEFER_SIZE; t++) {
1402 range_tree_walk(msp->ms_defertree[t],
1403 range_tree_remove, condense_tree);
1404 }
1405
1406 for (t = 1; t < TXG_CONCURRENT_STATES; t++) {
1407 range_tree_walk(msp->ms_alloctree[(txg + t) & TXG_MASK],
1408 range_tree_remove, condense_tree);
1409 }
1410
1411 /*
1412 * We're about to drop the metaslab's lock thus allowing
1413 * other consumers to change it's content. Set the
1414 * metaslab's ms_condensing flag to ensure that
1415 * allocations on this metaslab do not occur while we're
1416 * in the middle of committing it to disk. This is only critical
1417 * for the ms_tree as all other range trees use per txg
1418 * views of their content.
1419 */
1420 msp->ms_condensing = B_TRUE;
1421
1422 mutex_exit(&msp->ms_lock);
1423 space_map_truncate(sm, tx);
1424 mutex_enter(&msp->ms_lock);
1425
1426 /*
1427 * While we would ideally like to create a space_map representation
1428 * that consists only of allocation records, doing so can be
1429 * prohibitively expensive because the in-core free tree can be
1430 * large, and therefore computationally expensive to subtract
1431 * from the condense_tree. Instead we sync out two trees, a cheap
1432 * allocation only tree followed by the in-core free tree. While not
1433 * optimal, this is typically close to optimal, and much cheaper to
1434 * compute.
1435 */
1436 space_map_write(sm, condense_tree, SM_ALLOC, tx);
1437 range_tree_vacate(condense_tree, NULL, NULL);
1438 range_tree_destroy(condense_tree);
1439
1440 space_map_write(sm, msp->ms_tree, SM_FREE, tx);
1441 msp->ms_condensing = B_FALSE;
1442 }
1443
1444 /*
1445 * Write a metaslab to disk in the context of the specified transaction group.
1446 */
1447 void
1448 metaslab_sync(metaslab_t *msp, uint64_t txg)
1449 {
1450 metaslab_group_t *mg = msp->ms_group;
1451 vdev_t *vd = mg->mg_vd;
1452 spa_t *spa = vd->vdev_spa;
1453 objset_t *mos = spa_meta_objset(spa);
1454 range_tree_t *alloctree = msp->ms_alloctree[txg & TXG_MASK];
1455 range_tree_t **freetree = &msp->ms_freetree[txg & TXG_MASK];
1456 range_tree_t **freed_tree =
1457 &msp->ms_freetree[TXG_CLEAN(txg) & TXG_MASK];
1458 dmu_tx_t *tx;
1459 uint64_t object = space_map_object(msp->ms_sm);
1460
1461 ASSERT(!vd->vdev_ishole);
1462
1463 /*
1464 * This metaslab has just been added so there's no work to do now.
1465 */
1466 if (*freetree == NULL) {
1467 ASSERT3P(alloctree, ==, NULL);
1468 return;
1469 }
1470
1471 ASSERT3P(alloctree, !=, NULL);
1472 ASSERT3P(*freetree, !=, NULL);
1473 ASSERT3P(*freed_tree, !=, NULL);
1474
1475 if (range_tree_space(alloctree) == 0 &&
1476 range_tree_space(*freetree) == 0)
1477 return;
1478
1479 /*
1480 * The only state that can actually be changing concurrently with
1481 * metaslab_sync() is the metaslab's ms_tree. No other thread can
1482 * be modifying this txg's alloctree, freetree, freed_tree, or
1483 * space_map_phys_t. Therefore, we only hold ms_lock to satify
1484 * space_map ASSERTs. We drop it whenever we call into the DMU,
1485 * because the DMU can call down to us (e.g. via zio_free()) at
1486 * any time.
1487 */
1488
1489 tx = dmu_tx_create_assigned(spa_get_dsl(spa), txg);
1490
1491 if (msp->ms_sm == NULL) {
1492 uint64_t new_object;
1493
1494 new_object = space_map_alloc(mos, tx);
1495 VERIFY3U(new_object, !=, 0);
1496
1497 VERIFY0(space_map_open(&msp->ms_sm, mos, new_object,
1498 msp->ms_start, msp->ms_size, vd->vdev_ashift,
1499 &msp->ms_lock));
1500 ASSERT(msp->ms_sm != NULL);
1501 }
1502
1503 mutex_enter(&msp->ms_lock);
1504
1505 if (msp->ms_loaded && spa_sync_pass(spa) == 1 &&
1506 metaslab_should_condense(msp)) {
1507 metaslab_condense(msp, txg, tx);
1508 } else {
1509 space_map_write(msp->ms_sm, alloctree, SM_ALLOC, tx);
1510 space_map_write(msp->ms_sm, *freetree, SM_FREE, tx);
1511 }
1512
1513 range_tree_vacate(alloctree, NULL, NULL);
1514
1515 if (msp->ms_loaded) {
1516 /*
1517 * When the space map is loaded, we have an accruate
1518 * histogram in the range tree. This gives us an opportunity
1519 * to bring the space map's histogram up-to-date so we clear
1520 * it first before updating it.
1521 */
1522 space_map_histogram_clear(msp->ms_sm);
1523 space_map_histogram_add(msp->ms_sm, msp->ms_tree, tx);
1524 } else {
1525 /*
1526 * Since the space map is not loaded we simply update the
1527 * exisiting histogram with what was freed in this txg. This
1528 * means that the on-disk histogram may not have an accurate
1529 * view of the free space but it's close enough to allow
1530 * us to make allocation decisions.
1531 */
1532 space_map_histogram_add(msp->ms_sm, *freetree, tx);
1533 }
1534
1535 /*
1536 * For sync pass 1, we avoid traversing this txg's free range tree
1537 * and instead will just swap the pointers for freetree and
1538 * freed_tree. We can safely do this since the freed_tree is
1539 * guaranteed to be empty on the initial pass.
1540 */
1541 if (spa_sync_pass(spa) == 1) {
1542 range_tree_swap(freetree, freed_tree);
1543 } else {
1544 range_tree_vacate(*freetree, range_tree_add, *freed_tree);
1545 }
1546
1547 ASSERT0(range_tree_space(msp->ms_alloctree[txg & TXG_MASK]));
1548 ASSERT0(range_tree_space(msp->ms_freetree[txg & TXG_MASK]));
1549
1550 mutex_exit(&msp->ms_lock);
1551
1552 if (object != space_map_object(msp->ms_sm)) {
1553 object = space_map_object(msp->ms_sm);
1554 dmu_write(mos, vd->vdev_ms_array, sizeof (uint64_t) *
1555 msp->ms_id, sizeof (uint64_t), &object, tx);
1556 }
1557 dmu_tx_commit(tx);
1558 }
1559
1560 /*
1561 * Called after a transaction group has completely synced to mark
1562 * all of the metaslab's free space as usable.
1563 */
1564 void
1565 metaslab_sync_done(metaslab_t *msp, uint64_t txg)
1566 {
1567 metaslab_group_t *mg = msp->ms_group;
1568 vdev_t *vd = mg->mg_vd;
1569 range_tree_t **freed_tree;
1570 range_tree_t **defer_tree;
1571 int64_t alloc_delta, defer_delta;
1572 int t;
1573
1574 ASSERT(!vd->vdev_ishole);
1575
1576 mutex_enter(&msp->ms_lock);
1577
1578 /*
1579 * If this metaslab is just becoming available, initialize its
1580 * alloctrees, freetrees, and defertree and add its capacity to
1581 * the vdev.
1582 */
1583 if (msp->ms_freetree[TXG_CLEAN(txg) & TXG_MASK] == NULL) {
1584 for (t = 0; t < TXG_SIZE; t++) {
1585 ASSERT(msp->ms_alloctree[t] == NULL);
1586 ASSERT(msp->ms_freetree[t] == NULL);
1587
1588 msp->ms_alloctree[t] = range_tree_create(NULL, msp,
1589 &msp->ms_lock);
1590 msp->ms_freetree[t] = range_tree_create(NULL, msp,
1591 &msp->ms_lock);
1592 }
1593
1594 for (t = 0; t < TXG_DEFER_SIZE; t++) {
1595 ASSERT(msp->ms_defertree[t] == NULL);
1596
1597 msp->ms_defertree[t] = range_tree_create(NULL, msp,
1598 &msp->ms_lock);
1599 }
1600
1601 vdev_space_update(vd, 0, 0, msp->ms_size);
1602 }
1603
1604 freed_tree = &msp->ms_freetree[TXG_CLEAN(txg) & TXG_MASK];
1605 defer_tree = &msp->ms_defertree[txg % TXG_DEFER_SIZE];
1606
1607 alloc_delta = space_map_alloc_delta(msp->ms_sm);
1608 defer_delta = range_tree_space(*freed_tree) -
1609 range_tree_space(*defer_tree);
1610
1611 vdev_space_update(vd, alloc_delta + defer_delta, defer_delta, 0);
1612
1613 ASSERT0(range_tree_space(msp->ms_alloctree[txg & TXG_MASK]));
1614 ASSERT0(range_tree_space(msp->ms_freetree[txg & TXG_MASK]));
1615
1616 /*
1617 * If there's a metaslab_load() in progress, wait for it to complete
1618 * so that we have a consistent view of the in-core space map.
1619 */
1620 metaslab_load_wait(msp);
1621
1622 /*
1623 * Move the frees from the defer_tree back to the free
1624 * range tree (if it's loaded). Swap the freed_tree and the
1625 * defer_tree -- this is safe to do because we've just emptied out
1626 * the defer_tree.
1627 */
1628 range_tree_vacate(*defer_tree,
1629 msp->ms_loaded ? range_tree_add : NULL, msp->ms_tree);
1630 range_tree_swap(freed_tree, defer_tree);
1631
1632 space_map_update(msp->ms_sm);
1633
1634 msp->ms_deferspace += defer_delta;
1635 ASSERT3S(msp->ms_deferspace, >=, 0);
1636 ASSERT3S(msp->ms_deferspace, <=, msp->ms_size);
1637 if (msp->ms_deferspace != 0) {
1638 /*
1639 * Keep syncing this metaslab until all deferred frees
1640 * are back in circulation.
1641 */
1642 vdev_dirty(vd, VDD_METASLAB, msp, txg + 1);
1643 }
1644
1645 if (msp->ms_loaded && msp->ms_access_txg < txg) {
1646 for (t = 1; t < TXG_CONCURRENT_STATES; t++) {
1647 VERIFY0(range_tree_space(
1648 msp->ms_alloctree[(txg + t) & TXG_MASK]));
1649 }
1650
1651 if (!metaslab_debug_unload)
1652 metaslab_unload(msp);
1653 }
1654
1655 metaslab_group_sort(mg, msp, metaslab_weight(msp));
1656 mutex_exit(&msp->ms_lock);
1657
1658 }
1659
1660 void
1661 metaslab_sync_reassess(metaslab_group_t *mg)
1662 {
1663 int64_t failures = mg->mg_alloc_failures;
1664
1665 metaslab_group_alloc_update(mg);
1666 atomic_add_64(&mg->mg_alloc_failures, -failures);
1667
1668 /*
1669 * Preload the next potential metaslabs
1670 */
1671 metaslab_group_preload(mg);
1672 }
1673
1674 static uint64_t
1675 metaslab_distance(metaslab_t *msp, dva_t *dva)
1676 {
1677 uint64_t ms_shift = msp->ms_group->mg_vd->vdev_ms_shift;
1678 uint64_t offset = DVA_GET_OFFSET(dva) >> ms_shift;
1679 uint64_t start = msp->ms_id;
1680
1681 if (msp->ms_group->mg_vd->vdev_id != DVA_GET_VDEV(dva))
1682 return (1ULL << 63);
1683
1684 if (offset < start)
1685 return ((start - offset) << ms_shift);
1686 if (offset > start)
1687 return ((offset - start) << ms_shift);
1688 return (0);
1689 }
1690
1691 static uint64_t
1692 metaslab_group_alloc(metaslab_group_t *mg, uint64_t psize, uint64_t asize,
1693 uint64_t txg, uint64_t min_distance, dva_t *dva, int d, int flags)
1694 {
1695 spa_t *spa = mg->mg_vd->vdev_spa;
1696 metaslab_t *msp = NULL;
1697 uint64_t offset = -1ULL;
1698 avl_tree_t *t = &mg->mg_metaslab_tree;
1699 uint64_t activation_weight;
1700 uint64_t target_distance;
1701 int i;
1702
1703 activation_weight = METASLAB_WEIGHT_PRIMARY;
1704 for (i = 0; i < d; i++) {
1705 if (DVA_GET_VDEV(&dva[i]) == mg->mg_vd->vdev_id) {
1706 activation_weight = METASLAB_WEIGHT_SECONDARY;
1707 break;
1708 }
1709 }
1710
1711 for (;;) {
1712 boolean_t was_active;
1713
1714 mutex_enter(&mg->mg_lock);
1715 for (msp = avl_first(t); msp; msp = AVL_NEXT(t, msp)) {
1716 if (msp->ms_weight < asize) {
1717 spa_dbgmsg(spa, "%s: failed to meet weight "
1718 "requirement: vdev %llu, txg %llu, mg %p, "
1719 "msp %p, psize %llu, asize %llu, "
1720 "failures %llu, weight %llu",
1721 spa_name(spa), mg->mg_vd->vdev_id, txg,
1722 mg, msp, psize, asize,
1723 mg->mg_alloc_failures, msp->ms_weight);
1724 mutex_exit(&mg->mg_lock);
1725 return (-1ULL);
1726 }
1727
1728 /*
1729 * If the selected metaslab is condensing, skip it.
1730 */
1731 if (msp->ms_condensing)
1732 continue;
1733
1734 was_active = msp->ms_weight & METASLAB_ACTIVE_MASK;
1735 if (activation_weight == METASLAB_WEIGHT_PRIMARY)
1736 break;
1737
1738 target_distance = min_distance +
1739 (space_map_allocated(msp->ms_sm) != 0 ? 0 :
1740 min_distance >> 1);
1741
1742 for (i = 0; i < d; i++)
1743 if (metaslab_distance(msp, &dva[i]) <
1744 target_distance)
1745 break;
1746 if (i == d)
1747 break;
1748 }
1749 mutex_exit(&mg->mg_lock);
1750 if (msp == NULL)
1751 return (-1ULL);
1752
1753 mutex_enter(&msp->ms_lock);
1754
1755 /*
1756 * If we've already reached the allowable number of failed
1757 * allocation attempts on this metaslab group then we
1758 * consider skipping it. We skip it only if we're allowed
1759 * to "fast" gang, the physical size is larger than
1760 * a gang block, and we're attempting to allocate from
1761 * the primary metaslab.
1762 */
1763 if (mg->mg_alloc_failures > zfs_mg_alloc_failures &&
1764 CAN_FASTGANG(flags) && psize > SPA_GANGBLOCKSIZE &&
1765 activation_weight == METASLAB_WEIGHT_PRIMARY) {
1766 spa_dbgmsg(spa, "%s: skipping metaslab group: "
1767 "vdev %llu, txg %llu, mg %p, msp[%llu] %p, "
1768 "psize %llu, asize %llu, failures %llu",
1769 spa_name(spa), mg->mg_vd->vdev_id, txg, mg,
1770 msp->ms_id, msp, psize, asize,
1771 mg->mg_alloc_failures);
1772 mutex_exit(&msp->ms_lock);
1773 return (-1ULL);
1774 }
1775
1776 /*
1777 * Ensure that the metaslab we have selected is still
1778 * capable of handling our request. It's possible that
1779 * another thread may have changed the weight while we
1780 * were blocked on the metaslab lock.
1781 */
1782 if (msp->ms_weight < asize || (was_active &&
1783 !(msp->ms_weight & METASLAB_ACTIVE_MASK) &&
1784 activation_weight == METASLAB_WEIGHT_PRIMARY)) {
1785 mutex_exit(&msp->ms_lock);
1786 continue;
1787 }
1788
1789 if ((msp->ms_weight & METASLAB_WEIGHT_SECONDARY) &&
1790 activation_weight == METASLAB_WEIGHT_PRIMARY) {
1791 metaslab_passivate(msp,
1792 msp->ms_weight & ~METASLAB_ACTIVE_MASK);
1793 mutex_exit(&msp->ms_lock);
1794 continue;
1795 }
1796
1797 if (metaslab_activate(msp, activation_weight) != 0) {
1798 mutex_exit(&msp->ms_lock);
1799 continue;
1800 }
1801
1802 /*
1803 * If this metaslab is currently condensing then pick again as
1804 * we can't manipulate this metaslab until it's committed
1805 * to disk.
1806 */
1807 if (msp->ms_condensing) {
1808 mutex_exit(&msp->ms_lock);
1809 continue;
1810 }
1811
1812 if ((offset = metaslab_block_alloc(msp, asize)) != -1ULL)
1813 break;
1814
1815 atomic_inc_64(&mg->mg_alloc_failures);
1816
1817 metaslab_passivate(msp, metaslab_block_maxsize(msp));
1818 mutex_exit(&msp->ms_lock);
1819 }
1820
1821 if (range_tree_space(msp->ms_alloctree[txg & TXG_MASK]) == 0)
1822 vdev_dirty(mg->mg_vd, VDD_METASLAB, msp, txg);
1823
1824 range_tree_add(msp->ms_alloctree[txg & TXG_MASK], offset, asize);
1825 msp->ms_access_txg = txg + metaslab_unload_delay;
1826
1827 mutex_exit(&msp->ms_lock);
1828
1829 return (offset);
1830 }
1831
1832 /*
1833 * Allocate a block for the specified i/o.
1834 */
1835 static int
1836 metaslab_alloc_dva(spa_t *spa, metaslab_class_t *mc, uint64_t psize,
1837 dva_t *dva, int d, dva_t *hintdva, uint64_t txg, int flags)
1838 {
1839 metaslab_group_t *mg, *fast_mg, *rotor;
1840 vdev_t *vd;
1841 int dshift = 3;
1842 int all_zero;
1843 int zio_lock = B_FALSE;
1844 boolean_t allocatable;
1845 uint64_t offset = -1ULL;
1846 uint64_t asize;
1847 uint64_t distance;
1848
1849 ASSERT(!DVA_IS_VALID(&dva[d]));
1850
1851 /*
1852 * For testing, make some blocks above a certain size be gang blocks.
1853 */
1854 if (psize >= metaslab_gang_bang && (ddi_get_lbolt() & 3) == 0)
1855 return (SET_ERROR(ENOSPC));
1856
1857 if (flags & METASLAB_FASTWRITE)
1858 mutex_enter(&mc->mc_fastwrite_lock);
1859
1860 /*
1861 * Start at the rotor and loop through all mgs until we find something.
1862 * Note that there's no locking on mc_rotor or mc_aliquot because
1863 * nothing actually breaks if we miss a few updates -- we just won't
1864 * allocate quite as evenly. It all balances out over time.
1865 *
1866 * If we are doing ditto or log blocks, try to spread them across
1867 * consecutive vdevs. If we're forced to reuse a vdev before we've
1868 * allocated all of our ditto blocks, then try and spread them out on
1869 * that vdev as much as possible. If it turns out to not be possible,
1870 * gradually lower our standards until anything becomes acceptable.
1871 * Also, allocating on consecutive vdevs (as opposed to random vdevs)
1872 * gives us hope of containing our fault domains to something we're
1873 * able to reason about. Otherwise, any two top-level vdev failures
1874 * will guarantee the loss of data. With consecutive allocation,
1875 * only two adjacent top-level vdev failures will result in data loss.
1876 *
1877 * If we are doing gang blocks (hintdva is non-NULL), try to keep
1878 * ourselves on the same vdev as our gang block header. That
1879 * way, we can hope for locality in vdev_cache, plus it makes our
1880 * fault domains something tractable.
1881 */
1882 if (hintdva) {
1883 vd = vdev_lookup_top(spa, DVA_GET_VDEV(&hintdva[d]));
1884
1885 /*
1886 * It's possible the vdev we're using as the hint no
1887 * longer exists (i.e. removed). Consult the rotor when
1888 * all else fails.
1889 */
1890 if (vd != NULL) {
1891 mg = vd->vdev_mg;
1892
1893 if (flags & METASLAB_HINTBP_AVOID &&
1894 mg->mg_next != NULL)
1895 mg = mg->mg_next;
1896 } else {
1897 mg = mc->mc_rotor;
1898 }
1899 } else if (d != 0) {
1900 vd = vdev_lookup_top(spa, DVA_GET_VDEV(&dva[d - 1]));
1901 mg = vd->vdev_mg->mg_next;
1902 } else if (flags & METASLAB_FASTWRITE) {
1903 mg = fast_mg = mc->mc_rotor;
1904
1905 do {
1906 if (fast_mg->mg_vd->vdev_pending_fastwrite <
1907 mg->mg_vd->vdev_pending_fastwrite)
1908 mg = fast_mg;
1909 } while ((fast_mg = fast_mg->mg_next) != mc->mc_rotor);
1910
1911 } else {
1912 mg = mc->mc_rotor;
1913 }
1914
1915 /*
1916 * If the hint put us into the wrong metaslab class, or into a
1917 * metaslab group that has been passivated, just follow the rotor.
1918 */
1919 if (mg->mg_class != mc || mg->mg_activation_count <= 0)
1920 mg = mc->mc_rotor;
1921
1922 rotor = mg;
1923 top:
1924 all_zero = B_TRUE;
1925 do {
1926 ASSERT(mg->mg_activation_count == 1);
1927
1928 vd = mg->mg_vd;
1929
1930 /*
1931 * Don't allocate from faulted devices.
1932 */
1933 if (zio_lock) {
1934 spa_config_enter(spa, SCL_ZIO, FTAG, RW_READER);
1935 allocatable = vdev_allocatable(vd);
1936 spa_config_exit(spa, SCL_ZIO, FTAG);
1937 } else {
1938 allocatable = vdev_allocatable(vd);
1939 }
1940
1941 /*
1942 * Determine if the selected metaslab group is eligible
1943 * for allocations. If we're ganging or have requested
1944 * an allocation for the smallest gang block size
1945 * then we don't want to avoid allocating to the this
1946 * metaslab group. If we're in this condition we should
1947 * try to allocate from any device possible so that we
1948 * don't inadvertently return ENOSPC and suspend the pool
1949 * even though space is still available.
1950 */
1951 if (allocatable && CAN_FASTGANG(flags) &&
1952 psize > SPA_GANGBLOCKSIZE)
1953 allocatable = metaslab_group_allocatable(mg);
1954
1955 if (!allocatable)
1956 goto next;
1957
1958 /*
1959 * Avoid writing single-copy data to a failing vdev
1960 * unless the user instructs us that it is okay.
1961 */
1962 if ((vd->vdev_stat.vs_write_errors > 0 ||
1963 vd->vdev_state < VDEV_STATE_HEALTHY) &&
1964 d == 0 && dshift == 3 &&
1965 !(zfs_write_to_degraded && vd->vdev_state ==
1966 VDEV_STATE_DEGRADED)) {
1967 all_zero = B_FALSE;
1968 goto next;
1969 }
1970
1971 ASSERT(mg->mg_class == mc);
1972
1973 distance = vd->vdev_asize >> dshift;
1974 if (distance <= (1ULL << vd->vdev_ms_shift))
1975 distance = 0;
1976 else
1977 all_zero = B_FALSE;
1978
1979 asize = vdev_psize_to_asize(vd, psize);
1980 ASSERT(P2PHASE(asize, 1ULL << vd->vdev_ashift) == 0);
1981
1982 offset = metaslab_group_alloc(mg, psize, asize, txg, distance,
1983 dva, d, flags);
1984 if (offset != -1ULL) {
1985 /*
1986 * If we've just selected this metaslab group,
1987 * figure out whether the corresponding vdev is
1988 * over- or under-used relative to the pool,
1989 * and set an allocation bias to even it out.
1990 */
1991 if (mc->mc_aliquot == 0) {
1992 vdev_stat_t *vs = &vd->vdev_stat;
1993 int64_t vu, cu;
1994
1995 vu = (vs->vs_alloc * 100) / (vs->vs_space + 1);
1996 cu = (mc->mc_alloc * 100) / (mc->mc_space + 1);
1997
1998 /*
1999 * Calculate how much more or less we should
2000 * try to allocate from this device during
2001 * this iteration around the rotor.
2002 * For example, if a device is 80% full
2003 * and the pool is 20% full then we should
2004 * reduce allocations by 60% on this device.
2005 *
2006 * mg_bias = (20 - 80) * 512K / 100 = -307K
2007 *
2008 * This reduces allocations by 307K for this
2009 * iteration.
2010 */
2011 mg->mg_bias = ((cu - vu) *
2012 (int64_t)mg->mg_aliquot) / 100;
2013 }
2014
2015 if ((flags & METASLAB_FASTWRITE) ||
2016 atomic_add_64_nv(&mc->mc_aliquot, asize) >=
2017 mg->mg_aliquot + mg->mg_bias) {
2018 mc->mc_rotor = mg->mg_next;
2019 mc->mc_aliquot = 0;
2020 }
2021
2022 DVA_SET_VDEV(&dva[d], vd->vdev_id);
2023 DVA_SET_OFFSET(&dva[d], offset);
2024 DVA_SET_GANG(&dva[d], !!(flags & METASLAB_GANG_HEADER));
2025 DVA_SET_ASIZE(&dva[d], asize);
2026
2027 if (flags & METASLAB_FASTWRITE) {
2028 atomic_add_64(&vd->vdev_pending_fastwrite,
2029 psize);
2030 mutex_exit(&mc->mc_fastwrite_lock);
2031 }
2032
2033 return (0);
2034 }
2035 next:
2036 mc->mc_rotor = mg->mg_next;
2037 mc->mc_aliquot = 0;
2038 } while ((mg = mg->mg_next) != rotor);
2039
2040 if (!all_zero) {
2041 dshift++;
2042 ASSERT(dshift < 64);
2043 goto top;
2044 }
2045
2046 if (!allocatable && !zio_lock) {
2047 dshift = 3;
2048 zio_lock = B_TRUE;
2049 goto top;
2050 }
2051
2052 bzero(&dva[d], sizeof (dva_t));
2053
2054 if (flags & METASLAB_FASTWRITE)
2055 mutex_exit(&mc->mc_fastwrite_lock);
2056
2057 return (SET_ERROR(ENOSPC));
2058 }
2059
2060 /*
2061 * Free the block represented by DVA in the context of the specified
2062 * transaction group.
2063 */
2064 static void
2065 metaslab_free_dva(spa_t *spa, const dva_t *dva, uint64_t txg, boolean_t now)
2066 {
2067 uint64_t vdev = DVA_GET_VDEV(dva);
2068 uint64_t offset = DVA_GET_OFFSET(dva);
2069 uint64_t size = DVA_GET_ASIZE(dva);
2070 vdev_t *vd;
2071 metaslab_t *msp;
2072
2073 ASSERT(DVA_IS_VALID(dva));
2074
2075 if (txg > spa_freeze_txg(spa))
2076 return;
2077
2078 if ((vd = vdev_lookup_top(spa, vdev)) == NULL ||
2079 (offset >> vd->vdev_ms_shift) >= vd->vdev_ms_count) {
2080 cmn_err(CE_WARN, "metaslab_free_dva(): bad DVA %llu:%llu",
2081 (u_longlong_t)vdev, (u_longlong_t)offset);
2082 ASSERT(0);
2083 return;
2084 }
2085
2086 msp = vd->vdev_ms[offset >> vd->vdev_ms_shift];
2087
2088 if (DVA_GET_GANG(dva))
2089 size = vdev_psize_to_asize(vd, SPA_GANGBLOCKSIZE);
2090
2091 mutex_enter(&msp->ms_lock);
2092
2093 if (now) {
2094 range_tree_remove(msp->ms_alloctree[txg & TXG_MASK],
2095 offset, size);
2096
2097 VERIFY(!msp->ms_condensing);
2098 VERIFY3U(offset, >=, msp->ms_start);
2099 VERIFY3U(offset + size, <=, msp->ms_start + msp->ms_size);
2100 VERIFY3U(range_tree_space(msp->ms_tree) + size, <=,
2101 msp->ms_size);
2102 VERIFY0(P2PHASE(offset, 1ULL << vd->vdev_ashift));
2103 VERIFY0(P2PHASE(size, 1ULL << vd->vdev_ashift));
2104 range_tree_add(msp->ms_tree, offset, size);
2105 } else {
2106 if (range_tree_space(msp->ms_freetree[txg & TXG_MASK]) == 0)
2107 vdev_dirty(vd, VDD_METASLAB, msp, txg);
2108 range_tree_add(msp->ms_freetree[txg & TXG_MASK],
2109 offset, size);
2110 }
2111
2112 mutex_exit(&msp->ms_lock);
2113 }
2114
2115 /*
2116 * Intent log support: upon opening the pool after a crash, notify the SPA
2117 * of blocks that the intent log has allocated for immediate write, but
2118 * which are still considered free by the SPA because the last transaction
2119 * group didn't commit yet.
2120 */
2121 static int
2122 metaslab_claim_dva(spa_t *spa, const dva_t *dva, uint64_t txg)
2123 {
2124 uint64_t vdev = DVA_GET_VDEV(dva);
2125 uint64_t offset = DVA_GET_OFFSET(dva);
2126 uint64_t size = DVA_GET_ASIZE(dva);
2127 vdev_t *vd;
2128 metaslab_t *msp;
2129 int error = 0;
2130
2131 ASSERT(DVA_IS_VALID(dva));
2132
2133 if ((vd = vdev_lookup_top(spa, vdev)) == NULL ||
2134 (offset >> vd->vdev_ms_shift) >= vd->vdev_ms_count)
2135 return (SET_ERROR(ENXIO));
2136
2137 msp = vd->vdev_ms[offset >> vd->vdev_ms_shift];
2138
2139 if (DVA_GET_GANG(dva))
2140 size = vdev_psize_to_asize(vd, SPA_GANGBLOCKSIZE);
2141
2142 mutex_enter(&msp->ms_lock);
2143
2144 if ((txg != 0 && spa_writeable(spa)) || !msp->ms_loaded)
2145 error = metaslab_activate(msp, METASLAB_WEIGHT_SECONDARY);
2146
2147 if (error == 0 && !range_tree_contains(msp->ms_tree, offset, size))
2148 error = SET_ERROR(ENOENT);
2149
2150 if (error || txg == 0) { /* txg == 0 indicates dry run */
2151 mutex_exit(&msp->ms_lock);
2152 return (error);
2153 }
2154
2155 VERIFY(!msp->ms_condensing);
2156 VERIFY0(P2PHASE(offset, 1ULL << vd->vdev_ashift));
2157 VERIFY0(P2PHASE(size, 1ULL << vd->vdev_ashift));
2158 VERIFY3U(range_tree_space(msp->ms_tree) - size, <=, msp->ms_size);
2159 range_tree_remove(msp->ms_tree, offset, size);
2160
2161 if (spa_writeable(spa)) { /* don't dirty if we're zdb(1M) */
2162 if (range_tree_space(msp->ms_alloctree[txg & TXG_MASK]) == 0)
2163 vdev_dirty(vd, VDD_METASLAB, msp, txg);
2164 range_tree_add(msp->ms_alloctree[txg & TXG_MASK], offset, size);
2165 }
2166
2167 mutex_exit(&msp->ms_lock);
2168
2169 return (0);
2170 }
2171
2172 int
2173 metaslab_alloc(spa_t *spa, metaslab_class_t *mc, uint64_t psize, blkptr_t *bp,
2174 int ndvas, uint64_t txg, blkptr_t *hintbp, int flags)
2175 {
2176 dva_t *dva = bp->blk_dva;
2177 dva_t *hintdva = hintbp->blk_dva;
2178 int d, error = 0;
2179
2180 ASSERT(bp->blk_birth == 0);
2181 ASSERT(BP_PHYSICAL_BIRTH(bp) == 0);
2182
2183 spa_config_enter(spa, SCL_ALLOC, FTAG, RW_READER);
2184
2185 if (mc->mc_rotor == NULL) { /* no vdevs in this class */
2186 spa_config_exit(spa, SCL_ALLOC, FTAG);
2187 return (SET_ERROR(ENOSPC));
2188 }
2189
2190 ASSERT(ndvas > 0 && ndvas <= spa_max_replication(spa));
2191 ASSERT(BP_GET_NDVAS(bp) == 0);
2192 ASSERT(hintbp == NULL || ndvas <= BP_GET_NDVAS(hintbp));
2193
2194 for (d = 0; d < ndvas; d++) {
2195 error = metaslab_alloc_dva(spa, mc, psize, dva, d, hintdva,
2196 txg, flags);
2197 if (error != 0) {
2198 for (d--; d >= 0; d--) {
2199 metaslab_free_dva(spa, &dva[d], txg, B_TRUE);
2200 bzero(&dva[d], sizeof (dva_t));
2201 }
2202 spa_config_exit(spa, SCL_ALLOC, FTAG);
2203 return (error);
2204 }
2205 }
2206 ASSERT(error == 0);
2207 ASSERT(BP_GET_NDVAS(bp) == ndvas);
2208
2209 spa_config_exit(spa, SCL_ALLOC, FTAG);
2210
2211 BP_SET_BIRTH(bp, txg, txg);
2212
2213 return (0);
2214 }
2215
2216 void
2217 metaslab_free(spa_t *spa, const blkptr_t *bp, uint64_t txg, boolean_t now)
2218 {
2219 const dva_t *dva = bp->blk_dva;
2220 int d, ndvas = BP_GET_NDVAS(bp);
2221
2222 ASSERT(!BP_IS_HOLE(bp));
2223 ASSERT(!now || bp->blk_birth >= spa_syncing_txg(spa));
2224
2225 spa_config_enter(spa, SCL_FREE, FTAG, RW_READER);
2226
2227 for (d = 0; d < ndvas; d++)
2228 metaslab_free_dva(spa, &dva[d], txg, now);
2229
2230 spa_config_exit(spa, SCL_FREE, FTAG);
2231 }
2232
2233 int
2234 metaslab_claim(spa_t *spa, const blkptr_t *bp, uint64_t txg)
2235 {
2236 const dva_t *dva = bp->blk_dva;
2237 int ndvas = BP_GET_NDVAS(bp);
2238 int d, error = 0;
2239
2240 ASSERT(!BP_IS_HOLE(bp));
2241
2242 if (txg != 0) {
2243 /*
2244 * First do a dry run to make sure all DVAs are claimable,
2245 * so we don't have to unwind from partial failures below.
2246 */
2247 if ((error = metaslab_claim(spa, bp, 0)) != 0)
2248 return (error);
2249 }
2250
2251 spa_config_enter(spa, SCL_ALLOC, FTAG, RW_READER);
2252
2253 for (d = 0; d < ndvas; d++)
2254 if ((error = metaslab_claim_dva(spa, &dva[d], txg)) != 0)
2255 break;
2256
2257 spa_config_exit(spa, SCL_ALLOC, FTAG);
2258
2259 ASSERT(error == 0 || txg == 0);
2260
2261 return (error);
2262 }
2263
2264 void
2265 metaslab_fastwrite_mark(spa_t *spa, const blkptr_t *bp)
2266 {
2267 const dva_t *dva = bp->blk_dva;
2268 int ndvas = BP_GET_NDVAS(bp);
2269 uint64_t psize = BP_GET_PSIZE(bp);
2270 int d;
2271 vdev_t *vd;
2272
2273 ASSERT(!BP_IS_HOLE(bp));
2274 ASSERT(psize > 0);
2275
2276 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
2277
2278 for (d = 0; d < ndvas; d++) {
2279 if ((vd = vdev_lookup_top(spa, DVA_GET_VDEV(&dva[d]))) == NULL)
2280 continue;
2281 atomic_add_64(&vd->vdev_pending_fastwrite, psize);
2282 }
2283
2284 spa_config_exit(spa, SCL_VDEV, FTAG);
2285 }
2286
2287 void
2288 metaslab_fastwrite_unmark(spa_t *spa, const blkptr_t *bp)
2289 {
2290 const dva_t *dva = bp->blk_dva;
2291 int ndvas = BP_GET_NDVAS(bp);
2292 uint64_t psize = BP_GET_PSIZE(bp);
2293 int d;
2294 vdev_t *vd;
2295
2296 ASSERT(!BP_IS_HOLE(bp));
2297 ASSERT(psize > 0);
2298
2299 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
2300
2301 for (d = 0; d < ndvas; d++) {
2302 if ((vd = vdev_lookup_top(spa, DVA_GET_VDEV(&dva[d]))) == NULL)
2303 continue;
2304 ASSERT3U(vd->vdev_pending_fastwrite, >=, psize);
2305 atomic_sub_64(&vd->vdev_pending_fastwrite, psize);
2306 }
2307
2308 spa_config_exit(spa, SCL_VDEV, FTAG);
2309 }
2310
2311 void
2312 metaslab_check_free(spa_t *spa, const blkptr_t *bp)
2313 {
2314 int i, j;
2315
2316 if ((zfs_flags & ZFS_DEBUG_ZIO_FREE) == 0)
2317 return;
2318
2319 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
2320 for (i = 0; i < BP_GET_NDVAS(bp); i++) {
2321 uint64_t vdev = DVA_GET_VDEV(&bp->blk_dva[i]);
2322 vdev_t *vd = vdev_lookup_top(spa, vdev);
2323 uint64_t offset = DVA_GET_OFFSET(&bp->blk_dva[i]);
2324 uint64_t size = DVA_GET_ASIZE(&bp->blk_dva[i]);
2325 metaslab_t *msp = vd->vdev_ms[offset >> vd->vdev_ms_shift];
2326
2327 if (msp->ms_loaded)
2328 range_tree_verify(msp->ms_tree, offset, size);
2329
2330 for (j = 0; j < TXG_SIZE; j++)
2331 range_tree_verify(msp->ms_freetree[j], offset, size);
2332 for (j = 0; j < TXG_DEFER_SIZE; j++)
2333 range_tree_verify(msp->ms_defertree[j], offset, size);
2334 }
2335 spa_config_exit(spa, SCL_VDEV, FTAG);
2336 }
2337
2338 #if defined(_KERNEL) && defined(HAVE_SPL)
2339 module_param(metaslab_debug_load, int, 0644);
2340 module_param(metaslab_debug_unload, int, 0644);
2341 MODULE_PARM_DESC(metaslab_debug_load,
2342 "load all metaslabs when pool is first opened");
2343 MODULE_PARM_DESC(metaslab_debug_unload,
2344 "prevent metaslabs from being unloaded");
2345
2346 module_param(zfs_mg_noalloc_threshold, int, 0644);
2347 MODULE_PARM_DESC(zfs_mg_noalloc_threshold,
2348 "percentage of free space for metaslab group to allow allocation");
2349 #endif /* _KERNEL && HAVE_SPL */