4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2012 by Delphix. All rights reserved.
26 #include <sys/zfs_context.h>
28 #include <sys/dmu_tx.h>
29 #include <sys/space_map.h>
30 #include <sys/metaslab_impl.h>
31 #include <sys/vdev_impl.h>
34 #define WITH_DF_BLOCK_ALLOCATOR
37 * Allow allocations to switch to gang blocks quickly. We do this to
38 * avoid having to load lots of space_maps in a given txg. There are,
39 * however, some cases where we want to avoid "fast" ganging and instead
40 * we want to do an exhaustive search of all metaslabs on this device.
41 * Currently we don't allow any gang, zil, or dump device related allocations
44 #define CAN_FASTGANG(flags) \
45 (!((flags) & (METASLAB_GANG_CHILD | METASLAB_GANG_HEADER | \
46 METASLAB_GANG_AVOID)))
48 uint64_t metaslab_aliquot
= 512ULL << 10;
49 uint64_t metaslab_gang_bang
= SPA_MAXBLOCKSIZE
+ 1; /* force gang blocks */
52 * This value defines the number of allowed allocation failures per vdev.
53 * If a device reaches this threshold in a given txg then we consider skipping
54 * allocations on that device.
56 int zfs_mg_alloc_failures
;
59 * Metaslab debugging: when set, keeps all space maps in core to verify frees.
61 int metaslab_debug
= 0;
64 * Minimum size which forces the dynamic allocator to change
65 * it's allocation strategy. Once the space map cannot satisfy
66 * an allocation of this size then it switches to using more
67 * aggressive strategy (i.e search by size rather than offset).
69 uint64_t metaslab_df_alloc_threshold
= SPA_MAXBLOCKSIZE
;
72 * The minimum free space, in percent, which must be available
73 * in a space map to continue allocations in a first-fit fashion.
74 * Once the space_map's free space drops below this level we dynamically
75 * switch to using best-fit allocations.
77 int metaslab_df_free_pct
= 4;
80 * A metaslab is considered "free" if it contains a contiguous
81 * segment which is greater than metaslab_min_alloc_size.
83 uint64_t metaslab_min_alloc_size
= DMU_MAX_ACCESS
;
86 * Max number of space_maps to prefetch.
88 int metaslab_prefetch_limit
= SPA_DVAS_PER_BP
;
91 * Percentage bonus multiplier for metaslabs that are in the bonus area.
93 int metaslab_smo_bonus_pct
= 150;
96 * ==========================================================================
98 * ==========================================================================
101 metaslab_class_create(spa_t
*spa
, space_map_ops_t
*ops
)
103 metaslab_class_t
*mc
;
105 mc
= kmem_zalloc(sizeof (metaslab_class_t
), KM_PUSHPAGE
);
110 mutex_init(&mc
->mc_fastwrite_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
116 metaslab_class_destroy(metaslab_class_t
*mc
)
118 ASSERT(mc
->mc_rotor
== NULL
);
119 ASSERT(mc
->mc_alloc
== 0);
120 ASSERT(mc
->mc_deferred
== 0);
121 ASSERT(mc
->mc_space
== 0);
122 ASSERT(mc
->mc_dspace
== 0);
124 mutex_destroy(&mc
->mc_fastwrite_lock
);
125 kmem_free(mc
, sizeof (metaslab_class_t
));
129 metaslab_class_validate(metaslab_class_t
*mc
)
131 metaslab_group_t
*mg
;
135 * Must hold one of the spa_config locks.
137 ASSERT(spa_config_held(mc
->mc_spa
, SCL_ALL
, RW_READER
) ||
138 spa_config_held(mc
->mc_spa
, SCL_ALL
, RW_WRITER
));
140 if ((mg
= mc
->mc_rotor
) == NULL
)
145 ASSERT(vd
->vdev_mg
!= NULL
);
146 ASSERT3P(vd
->vdev_top
, ==, vd
);
147 ASSERT3P(mg
->mg_class
, ==, mc
);
148 ASSERT3P(vd
->vdev_ops
, !=, &vdev_hole_ops
);
149 } while ((mg
= mg
->mg_next
) != mc
->mc_rotor
);
155 metaslab_class_space_update(metaslab_class_t
*mc
, int64_t alloc_delta
,
156 int64_t defer_delta
, int64_t space_delta
, int64_t dspace_delta
)
158 atomic_add_64(&mc
->mc_alloc
, alloc_delta
);
159 atomic_add_64(&mc
->mc_deferred
, defer_delta
);
160 atomic_add_64(&mc
->mc_space
, space_delta
);
161 atomic_add_64(&mc
->mc_dspace
, dspace_delta
);
165 metaslab_class_get_alloc(metaslab_class_t
*mc
)
167 return (mc
->mc_alloc
);
171 metaslab_class_get_deferred(metaslab_class_t
*mc
)
173 return (mc
->mc_deferred
);
177 metaslab_class_get_space(metaslab_class_t
*mc
)
179 return (mc
->mc_space
);
183 metaslab_class_get_dspace(metaslab_class_t
*mc
)
185 return (spa_deflate(mc
->mc_spa
) ? mc
->mc_dspace
: mc
->mc_space
);
189 * ==========================================================================
191 * ==========================================================================
194 metaslab_compare(const void *x1
, const void *x2
)
196 const metaslab_t
*m1
= x1
;
197 const metaslab_t
*m2
= x2
;
199 if (m1
->ms_weight
< m2
->ms_weight
)
201 if (m1
->ms_weight
> m2
->ms_weight
)
205 * If the weights are identical, use the offset to force uniqueness.
207 if (m1
->ms_map
.sm_start
< m2
->ms_map
.sm_start
)
209 if (m1
->ms_map
.sm_start
> m2
->ms_map
.sm_start
)
212 ASSERT3P(m1
, ==, m2
);
218 metaslab_group_create(metaslab_class_t
*mc
, vdev_t
*vd
)
220 metaslab_group_t
*mg
;
222 mg
= kmem_zalloc(sizeof (metaslab_group_t
), KM_PUSHPAGE
);
223 mutex_init(&mg
->mg_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
224 avl_create(&mg
->mg_metaslab_tree
, metaslab_compare
,
225 sizeof (metaslab_t
), offsetof(struct metaslab
, ms_group_node
));
228 mg
->mg_activation_count
= 0;
234 metaslab_group_destroy(metaslab_group_t
*mg
)
236 ASSERT(mg
->mg_prev
== NULL
);
237 ASSERT(mg
->mg_next
== NULL
);
239 * We may have gone below zero with the activation count
240 * either because we never activated in the first place or
241 * because we're done, and possibly removing the vdev.
243 ASSERT(mg
->mg_activation_count
<= 0);
245 avl_destroy(&mg
->mg_metaslab_tree
);
246 mutex_destroy(&mg
->mg_lock
);
247 kmem_free(mg
, sizeof (metaslab_group_t
));
251 metaslab_group_activate(metaslab_group_t
*mg
)
253 metaslab_class_t
*mc
= mg
->mg_class
;
254 metaslab_group_t
*mgprev
, *mgnext
;
256 ASSERT(spa_config_held(mc
->mc_spa
, SCL_ALLOC
, RW_WRITER
));
258 ASSERT(mc
->mc_rotor
!= mg
);
259 ASSERT(mg
->mg_prev
== NULL
);
260 ASSERT(mg
->mg_next
== NULL
);
261 ASSERT(mg
->mg_activation_count
<= 0);
263 if (++mg
->mg_activation_count
<= 0)
266 mg
->mg_aliquot
= metaslab_aliquot
* MAX(1, mg
->mg_vd
->vdev_children
);
268 if ((mgprev
= mc
->mc_rotor
) == NULL
) {
272 mgnext
= mgprev
->mg_next
;
273 mg
->mg_prev
= mgprev
;
274 mg
->mg_next
= mgnext
;
275 mgprev
->mg_next
= mg
;
276 mgnext
->mg_prev
= mg
;
282 metaslab_group_passivate(metaslab_group_t
*mg
)
284 metaslab_class_t
*mc
= mg
->mg_class
;
285 metaslab_group_t
*mgprev
, *mgnext
;
287 ASSERT(spa_config_held(mc
->mc_spa
, SCL_ALLOC
, RW_WRITER
));
289 if (--mg
->mg_activation_count
!= 0) {
290 ASSERT(mc
->mc_rotor
!= mg
);
291 ASSERT(mg
->mg_prev
== NULL
);
292 ASSERT(mg
->mg_next
== NULL
);
293 ASSERT(mg
->mg_activation_count
< 0);
297 mgprev
= mg
->mg_prev
;
298 mgnext
= mg
->mg_next
;
303 mc
->mc_rotor
= mgnext
;
304 mgprev
->mg_next
= mgnext
;
305 mgnext
->mg_prev
= mgprev
;
313 metaslab_group_add(metaslab_group_t
*mg
, metaslab_t
*msp
)
315 mutex_enter(&mg
->mg_lock
);
316 ASSERT(msp
->ms_group
== NULL
);
319 avl_add(&mg
->mg_metaslab_tree
, msp
);
320 mutex_exit(&mg
->mg_lock
);
324 metaslab_group_remove(metaslab_group_t
*mg
, metaslab_t
*msp
)
326 mutex_enter(&mg
->mg_lock
);
327 ASSERT(msp
->ms_group
== mg
);
328 avl_remove(&mg
->mg_metaslab_tree
, msp
);
329 msp
->ms_group
= NULL
;
330 mutex_exit(&mg
->mg_lock
);
334 metaslab_group_sort(metaslab_group_t
*mg
, metaslab_t
*msp
, uint64_t weight
)
337 * Although in principle the weight can be any value, in
338 * practice we do not use values in the range [1, 510].
340 ASSERT(weight
>= SPA_MINBLOCKSIZE
-1 || weight
== 0);
341 ASSERT(MUTEX_HELD(&msp
->ms_lock
));
343 mutex_enter(&mg
->mg_lock
);
344 ASSERT(msp
->ms_group
== mg
);
345 avl_remove(&mg
->mg_metaslab_tree
, msp
);
346 msp
->ms_weight
= weight
;
347 avl_add(&mg
->mg_metaslab_tree
, msp
);
348 mutex_exit(&mg
->mg_lock
);
352 * ==========================================================================
353 * Common allocator routines
354 * ==========================================================================
357 metaslab_segsize_compare(const void *x1
, const void *x2
)
359 const space_seg_t
*s1
= x1
;
360 const space_seg_t
*s2
= x2
;
361 uint64_t ss_size1
= s1
->ss_end
- s1
->ss_start
;
362 uint64_t ss_size2
= s2
->ss_end
- s2
->ss_start
;
364 if (ss_size1
< ss_size2
)
366 if (ss_size1
> ss_size2
)
369 if (s1
->ss_start
< s2
->ss_start
)
371 if (s1
->ss_start
> s2
->ss_start
)
377 #if defined(WITH_FF_BLOCK_ALLOCATOR) || \
378 defined(WITH_DF_BLOCK_ALLOCATOR) || \
379 defined(WITH_CDF_BLOCK_ALLOCATOR)
381 * This is a helper function that can be used by the allocator to find
382 * a suitable block to allocate. This will search the specified AVL
383 * tree looking for a block that matches the specified criteria.
386 metaslab_block_picker(avl_tree_t
*t
, uint64_t *cursor
, uint64_t size
,
389 space_seg_t
*ss
, ssearch
;
392 ssearch
.ss_start
= *cursor
;
393 ssearch
.ss_end
= *cursor
+ size
;
395 ss
= avl_find(t
, &ssearch
, &where
);
397 ss
= avl_nearest(t
, where
, AVL_AFTER
);
400 uint64_t offset
= P2ROUNDUP(ss
->ss_start
, align
);
402 if (offset
+ size
<= ss
->ss_end
) {
403 *cursor
= offset
+ size
;
406 ss
= AVL_NEXT(t
, ss
);
410 * If we know we've searched the whole map (*cursor == 0), give up.
411 * Otherwise, reset the cursor to the beginning and try again.
417 return (metaslab_block_picker(t
, cursor
, size
, align
));
419 #endif /* WITH_FF/DF/CDF_BLOCK_ALLOCATOR */
422 metaslab_pp_load(space_map_t
*sm
)
426 ASSERT(sm
->sm_ppd
== NULL
);
427 sm
->sm_ppd
= kmem_zalloc(64 * sizeof (uint64_t), KM_PUSHPAGE
);
429 sm
->sm_pp_root
= kmem_alloc(sizeof (avl_tree_t
), KM_PUSHPAGE
);
430 avl_create(sm
->sm_pp_root
, metaslab_segsize_compare
,
431 sizeof (space_seg_t
), offsetof(struct space_seg
, ss_pp_node
));
433 for (ss
= avl_first(&sm
->sm_root
); ss
; ss
= AVL_NEXT(&sm
->sm_root
, ss
))
434 avl_add(sm
->sm_pp_root
, ss
);
438 metaslab_pp_unload(space_map_t
*sm
)
442 kmem_free(sm
->sm_ppd
, 64 * sizeof (uint64_t));
445 while (avl_destroy_nodes(sm
->sm_pp_root
, &cookie
) != NULL
) {
446 /* tear down the tree */
449 avl_destroy(sm
->sm_pp_root
);
450 kmem_free(sm
->sm_pp_root
, sizeof (avl_tree_t
));
451 sm
->sm_pp_root
= NULL
;
456 metaslab_pp_claim(space_map_t
*sm
, uint64_t start
, uint64_t size
)
458 /* No need to update cursor */
463 metaslab_pp_free(space_map_t
*sm
, uint64_t start
, uint64_t size
)
465 /* No need to update cursor */
469 * Return the maximum contiguous segment within the metaslab.
472 metaslab_pp_maxsize(space_map_t
*sm
)
474 avl_tree_t
*t
= sm
->sm_pp_root
;
477 if (t
== NULL
|| (ss
= avl_last(t
)) == NULL
)
480 return (ss
->ss_end
- ss
->ss_start
);
483 #if defined(WITH_FF_BLOCK_ALLOCATOR)
485 * ==========================================================================
486 * The first-fit block allocator
487 * ==========================================================================
490 metaslab_ff_alloc(space_map_t
*sm
, uint64_t size
)
492 avl_tree_t
*t
= &sm
->sm_root
;
493 uint64_t align
= size
& -size
;
494 uint64_t *cursor
= (uint64_t *)sm
->sm_ppd
+ highbit(align
) - 1;
496 return (metaslab_block_picker(t
, cursor
, size
, align
));
501 metaslab_ff_fragmented(space_map_t
*sm
)
506 static space_map_ops_t metaslab_ff_ops
= {
513 metaslab_ff_fragmented
516 space_map_ops_t
*zfs_metaslab_ops
= &metaslab_ff_ops
;
517 #endif /* WITH_FF_BLOCK_ALLOCATOR */
519 #if defined(WITH_DF_BLOCK_ALLOCATOR)
521 * ==========================================================================
522 * Dynamic block allocator -
523 * Uses the first fit allocation scheme until space get low and then
524 * adjusts to a best fit allocation method. Uses metaslab_df_alloc_threshold
525 * and metaslab_df_free_pct to determine when to switch the allocation scheme.
526 * ==========================================================================
529 metaslab_df_alloc(space_map_t
*sm
, uint64_t size
)
531 avl_tree_t
*t
= &sm
->sm_root
;
532 uint64_t align
= size
& -size
;
533 uint64_t *cursor
= (uint64_t *)sm
->sm_ppd
+ highbit(align
) - 1;
534 uint64_t max_size
= metaslab_pp_maxsize(sm
);
535 int free_pct
= sm
->sm_space
* 100 / sm
->sm_size
;
537 ASSERT(MUTEX_HELD(sm
->sm_lock
));
538 ASSERT3U(avl_numnodes(&sm
->sm_root
), ==, avl_numnodes(sm
->sm_pp_root
));
544 * If we're running low on space switch to using the size
545 * sorted AVL tree (best-fit).
547 if (max_size
< metaslab_df_alloc_threshold
||
548 free_pct
< metaslab_df_free_pct
) {
553 return (metaslab_block_picker(t
, cursor
, size
, 1ULL));
557 metaslab_df_fragmented(space_map_t
*sm
)
559 uint64_t max_size
= metaslab_pp_maxsize(sm
);
560 int free_pct
= sm
->sm_space
* 100 / sm
->sm_size
;
562 if (max_size
>= metaslab_df_alloc_threshold
&&
563 free_pct
>= metaslab_df_free_pct
)
569 static space_map_ops_t metaslab_df_ops
= {
576 metaslab_df_fragmented
579 space_map_ops_t
*zfs_metaslab_ops
= &metaslab_df_ops
;
580 #endif /* WITH_DF_BLOCK_ALLOCATOR */
583 * ==========================================================================
584 * Other experimental allocators
585 * ==========================================================================
587 #if defined(WITH_CDF_BLOCK_ALLOCATOR)
589 metaslab_cdf_alloc(space_map_t
*sm
, uint64_t size
)
591 avl_tree_t
*t
= &sm
->sm_root
;
592 uint64_t *cursor
= (uint64_t *)sm
->sm_ppd
;
593 uint64_t *extent_end
= (uint64_t *)sm
->sm_ppd
+ 1;
594 uint64_t max_size
= metaslab_pp_maxsize(sm
);
595 uint64_t rsize
= size
;
598 ASSERT(MUTEX_HELD(sm
->sm_lock
));
599 ASSERT3U(avl_numnodes(&sm
->sm_root
), ==, avl_numnodes(sm
->sm_pp_root
));
604 ASSERT3U(*extent_end
, >=, *cursor
);
607 * If we're running low on space switch to using the size
608 * sorted AVL tree (best-fit).
610 if ((*cursor
+ size
) > *extent_end
) {
613 *cursor
= *extent_end
= 0;
615 if (max_size
> 2 * SPA_MAXBLOCKSIZE
)
616 rsize
= MIN(metaslab_min_alloc_size
, max_size
);
617 offset
= metaslab_block_picker(t
, extent_end
, rsize
, 1ULL);
619 *cursor
= offset
+ size
;
621 offset
= metaslab_block_picker(t
, cursor
, rsize
, 1ULL);
623 ASSERT3U(*cursor
, <=, *extent_end
);
628 metaslab_cdf_fragmented(space_map_t
*sm
)
630 uint64_t max_size
= metaslab_pp_maxsize(sm
);
632 if (max_size
> (metaslab_min_alloc_size
* 10))
637 static space_map_ops_t metaslab_cdf_ops
= {
644 metaslab_cdf_fragmented
647 space_map_ops_t
*zfs_metaslab_ops
= &metaslab_cdf_ops
;
648 #endif /* WITH_CDF_BLOCK_ALLOCATOR */
650 #if defined(WITH_NDF_BLOCK_ALLOCATOR)
651 uint64_t metaslab_ndf_clump_shift
= 4;
654 metaslab_ndf_alloc(space_map_t
*sm
, uint64_t size
)
656 avl_tree_t
*t
= &sm
->sm_root
;
658 space_seg_t
*ss
, ssearch
;
659 uint64_t hbit
= highbit(size
);
660 uint64_t *cursor
= (uint64_t *)sm
->sm_ppd
+ hbit
- 1;
661 uint64_t max_size
= metaslab_pp_maxsize(sm
);
663 ASSERT(MUTEX_HELD(sm
->sm_lock
));
664 ASSERT3U(avl_numnodes(&sm
->sm_root
), ==, avl_numnodes(sm
->sm_pp_root
));
669 ssearch
.ss_start
= *cursor
;
670 ssearch
.ss_end
= *cursor
+ size
;
672 ss
= avl_find(t
, &ssearch
, &where
);
673 if (ss
== NULL
|| (ss
->ss_start
+ size
> ss
->ss_end
)) {
676 ssearch
.ss_start
= 0;
677 ssearch
.ss_end
= MIN(max_size
,
678 1ULL << (hbit
+ metaslab_ndf_clump_shift
));
679 ss
= avl_find(t
, &ssearch
, &where
);
681 ss
= avl_nearest(t
, where
, AVL_AFTER
);
686 if (ss
->ss_start
+ size
<= ss
->ss_end
) {
687 *cursor
= ss
->ss_start
+ size
;
688 return (ss
->ss_start
);
695 metaslab_ndf_fragmented(space_map_t
*sm
)
697 uint64_t max_size
= metaslab_pp_maxsize(sm
);
699 if (max_size
> (metaslab_min_alloc_size
<< metaslab_ndf_clump_shift
))
705 static space_map_ops_t metaslab_ndf_ops
= {
712 metaslab_ndf_fragmented
715 space_map_ops_t
*zfs_metaslab_ops
= &metaslab_ndf_ops
;
716 #endif /* WITH_NDF_BLOCK_ALLOCATOR */
719 * ==========================================================================
721 * ==========================================================================
724 metaslab_init(metaslab_group_t
*mg
, space_map_obj_t
*smo
,
725 uint64_t start
, uint64_t size
, uint64_t txg
)
727 vdev_t
*vd
= mg
->mg_vd
;
730 msp
= kmem_zalloc(sizeof (metaslab_t
), KM_PUSHPAGE
);
731 mutex_init(&msp
->ms_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
733 msp
->ms_smo_syncing
= *smo
;
736 * We create the main space map here, but we don't create the
737 * allocmaps and freemaps until metaslab_sync_done(). This serves
738 * two purposes: it allows metaslab_sync_done() to detect the
739 * addition of new space; and for debugging, it ensures that we'd
740 * data fault on any attempt to use this metaslab before it's ready.
742 space_map_create(&msp
->ms_map
, start
, size
,
743 vd
->vdev_ashift
, &msp
->ms_lock
);
745 metaslab_group_add(mg
, msp
);
747 if (metaslab_debug
&& smo
->smo_object
!= 0) {
748 mutex_enter(&msp
->ms_lock
);
749 VERIFY(space_map_load(&msp
->ms_map
, mg
->mg_class
->mc_ops
,
750 SM_FREE
, smo
, spa_meta_objset(vd
->vdev_spa
)) == 0);
751 mutex_exit(&msp
->ms_lock
);
755 * If we're opening an existing pool (txg == 0) or creating
756 * a new one (txg == TXG_INITIAL), all space is available now.
757 * If we're adding space to an existing pool, the new space
758 * does not become available until after this txg has synced.
760 if (txg
<= TXG_INITIAL
)
761 metaslab_sync_done(msp
, 0);
764 vdev_dirty(vd
, 0, NULL
, txg
);
765 vdev_dirty(vd
, VDD_METASLAB
, msp
, txg
);
772 metaslab_fini(metaslab_t
*msp
)
774 metaslab_group_t
*mg
= msp
->ms_group
;
777 vdev_space_update(mg
->mg_vd
,
778 -msp
->ms_smo
.smo_alloc
, 0, -msp
->ms_map
.sm_size
);
780 metaslab_group_remove(mg
, msp
);
782 mutex_enter(&msp
->ms_lock
);
784 space_map_unload(&msp
->ms_map
);
785 space_map_destroy(&msp
->ms_map
);
787 for (t
= 0; t
< TXG_SIZE
; t
++) {
788 space_map_destroy(&msp
->ms_allocmap
[t
]);
789 space_map_destroy(&msp
->ms_freemap
[t
]);
792 for (t
= 0; t
< TXG_DEFER_SIZE
; t
++)
793 space_map_destroy(&msp
->ms_defermap
[t
]);
795 ASSERT3S(msp
->ms_deferspace
, ==, 0);
797 mutex_exit(&msp
->ms_lock
);
798 mutex_destroy(&msp
->ms_lock
);
800 kmem_free(msp
, sizeof (metaslab_t
));
803 #define METASLAB_WEIGHT_PRIMARY (1ULL << 63)
804 #define METASLAB_WEIGHT_SECONDARY (1ULL << 62)
805 #define METASLAB_ACTIVE_MASK \
806 (METASLAB_WEIGHT_PRIMARY | METASLAB_WEIGHT_SECONDARY)
809 metaslab_weight(metaslab_t
*msp
)
811 metaslab_group_t
*mg
= msp
->ms_group
;
812 space_map_t
*sm
= &msp
->ms_map
;
813 space_map_obj_t
*smo
= &msp
->ms_smo
;
814 vdev_t
*vd
= mg
->mg_vd
;
815 uint64_t weight
, space
;
817 ASSERT(MUTEX_HELD(&msp
->ms_lock
));
820 * The baseline weight is the metaslab's free space.
822 space
= sm
->sm_size
- smo
->smo_alloc
;
826 * Modern disks have uniform bit density and constant angular velocity.
827 * Therefore, the outer recording zones are faster (higher bandwidth)
828 * than the inner zones by the ratio of outer to inner track diameter,
829 * which is typically around 2:1. We account for this by assigning
830 * higher weight to lower metaslabs (multiplier ranging from 2x to 1x).
831 * In effect, this means that we'll select the metaslab with the most
832 * free bandwidth rather than simply the one with the most free space.
834 weight
= 2 * weight
-
835 ((sm
->sm_start
>> vd
->vdev_ms_shift
) * weight
) / vd
->vdev_ms_count
;
836 ASSERT(weight
>= space
&& weight
<= 2 * space
);
839 * For locality, assign higher weight to metaslabs which have
840 * a lower offset than what we've already activated.
842 if (sm
->sm_start
<= mg
->mg_bonus_area
)
843 weight
*= (metaslab_smo_bonus_pct
/ 100);
844 ASSERT(weight
>= space
&&
845 weight
<= 2 * (metaslab_smo_bonus_pct
/ 100) * space
);
847 if (sm
->sm_loaded
&& !sm
->sm_ops
->smop_fragmented(sm
)) {
849 * If this metaslab is one we're actively using, adjust its
850 * weight to make it preferable to any inactive metaslab so
851 * we'll polish it off.
853 weight
|= (msp
->ms_weight
& METASLAB_ACTIVE_MASK
);
859 metaslab_prefetch(metaslab_group_t
*mg
)
861 spa_t
*spa
= mg
->mg_vd
->vdev_spa
;
863 avl_tree_t
*t
= &mg
->mg_metaslab_tree
;
866 mutex_enter(&mg
->mg_lock
);
869 * Prefetch the next potential metaslabs
871 for (msp
= avl_first(t
), m
= 0; msp
; msp
= AVL_NEXT(t
, msp
), m
++) {
872 space_map_t
*sm
= &msp
->ms_map
;
873 space_map_obj_t
*smo
= &msp
->ms_smo
;
875 /* If we have reached our prefetch limit then we're done */
876 if (m
>= metaslab_prefetch_limit
)
879 if (!sm
->sm_loaded
&& smo
->smo_object
!= 0) {
880 mutex_exit(&mg
->mg_lock
);
881 dmu_prefetch(spa_meta_objset(spa
), smo
->smo_object
,
882 0ULL, smo
->smo_objsize
);
883 mutex_enter(&mg
->mg_lock
);
886 mutex_exit(&mg
->mg_lock
);
890 metaslab_activate(metaslab_t
*msp
, uint64_t activation_weight
)
892 metaslab_group_t
*mg
= msp
->ms_group
;
893 space_map_t
*sm
= &msp
->ms_map
;
894 space_map_ops_t
*sm_ops
= msp
->ms_group
->mg_class
->mc_ops
;
897 ASSERT(MUTEX_HELD(&msp
->ms_lock
));
899 if ((msp
->ms_weight
& METASLAB_ACTIVE_MASK
) == 0) {
900 space_map_load_wait(sm
);
901 if (!sm
->sm_loaded
) {
902 space_map_obj_t
*smo
= &msp
->ms_smo
;
904 int error
= space_map_load(sm
, sm_ops
, SM_FREE
, smo
,
905 spa_meta_objset(msp
->ms_group
->mg_vd
->vdev_spa
));
907 metaslab_group_sort(msp
->ms_group
, msp
, 0);
910 for (t
= 0; t
< TXG_DEFER_SIZE
; t
++)
911 space_map_walk(&msp
->ms_defermap
[t
],
912 space_map_claim
, sm
);
917 * Track the bonus area as we activate new metaslabs.
919 if (sm
->sm_start
> mg
->mg_bonus_area
) {
920 mutex_enter(&mg
->mg_lock
);
921 mg
->mg_bonus_area
= sm
->sm_start
;
922 mutex_exit(&mg
->mg_lock
);
925 metaslab_group_sort(msp
->ms_group
, msp
,
926 msp
->ms_weight
| activation_weight
);
928 ASSERT(sm
->sm_loaded
);
929 ASSERT(msp
->ms_weight
& METASLAB_ACTIVE_MASK
);
935 metaslab_passivate(metaslab_t
*msp
, uint64_t size
)
938 * If size < SPA_MINBLOCKSIZE, then we will not allocate from
939 * this metaslab again. In that case, it had better be empty,
940 * or we would be leaving space on the table.
942 ASSERT(size
>= SPA_MINBLOCKSIZE
|| msp
->ms_map
.sm_space
== 0);
943 metaslab_group_sort(msp
->ms_group
, msp
, MIN(msp
->ms_weight
, size
));
944 ASSERT((msp
->ms_weight
& METASLAB_ACTIVE_MASK
) == 0);
948 * Write a metaslab to disk in the context of the specified transaction group.
951 metaslab_sync(metaslab_t
*msp
, uint64_t txg
)
953 vdev_t
*vd
= msp
->ms_group
->mg_vd
;
954 spa_t
*spa
= vd
->vdev_spa
;
955 objset_t
*mos
= spa_meta_objset(spa
);
956 space_map_t
*allocmap
= &msp
->ms_allocmap
[txg
& TXG_MASK
];
957 space_map_t
*freemap
= &msp
->ms_freemap
[txg
& TXG_MASK
];
958 space_map_t
*freed_map
= &msp
->ms_freemap
[TXG_CLEAN(txg
) & TXG_MASK
];
959 space_map_t
*sm
= &msp
->ms_map
;
960 space_map_obj_t
*smo
= &msp
->ms_smo_syncing
;
965 ASSERT(!vd
->vdev_ishole
);
967 if (allocmap
->sm_space
== 0 && freemap
->sm_space
== 0)
971 * The only state that can actually be changing concurrently with
972 * metaslab_sync() is the metaslab's ms_map. No other thread can
973 * be modifying this txg's allocmap, freemap, freed_map, or smo.
974 * Therefore, we only hold ms_lock to satify space_map ASSERTs.
975 * We drop it whenever we call into the DMU, because the DMU
976 * can call down to us (e.g. via zio_free()) at any time.
979 tx
= dmu_tx_create_assigned(spa_get_dsl(spa
), txg
);
981 if (smo
->smo_object
== 0) {
982 ASSERT(smo
->smo_objsize
== 0);
983 ASSERT(smo
->smo_alloc
== 0);
984 smo
->smo_object
= dmu_object_alloc(mos
,
985 DMU_OT_SPACE_MAP
, 1 << SPACE_MAP_BLOCKSHIFT
,
986 DMU_OT_SPACE_MAP_HEADER
, sizeof (*smo
), tx
);
987 ASSERT(smo
->smo_object
!= 0);
988 dmu_write(mos
, vd
->vdev_ms_array
, sizeof (uint64_t) *
989 (sm
->sm_start
>> vd
->vdev_ms_shift
),
990 sizeof (uint64_t), &smo
->smo_object
, tx
);
993 mutex_enter(&msp
->ms_lock
);
995 space_map_walk(freemap
, space_map_add
, freed_map
);
997 if (sm
->sm_loaded
&& spa_sync_pass(spa
) == 1 && smo
->smo_objsize
>=
998 2 * sizeof (uint64_t) * avl_numnodes(&sm
->sm_root
)) {
1000 * The in-core space map representation is twice as compact
1001 * as the on-disk one, so it's time to condense the latter
1002 * by generating a pure allocmap from first principles.
1004 * This metaslab is 100% allocated,
1005 * minus the content of the in-core map (sm),
1006 * minus what's been freed this txg (freed_map),
1007 * minus deferred frees (ms_defermap[]),
1008 * minus allocations from txgs in the future
1009 * (because they haven't been committed yet).
1011 space_map_vacate(allocmap
, NULL
, NULL
);
1012 space_map_vacate(freemap
, NULL
, NULL
);
1014 space_map_add(allocmap
, allocmap
->sm_start
, allocmap
->sm_size
);
1016 space_map_walk(sm
, space_map_remove
, allocmap
);
1017 space_map_walk(freed_map
, space_map_remove
, allocmap
);
1019 for (t
= 0; t
< TXG_DEFER_SIZE
; t
++)
1020 space_map_walk(&msp
->ms_defermap
[t
],
1021 space_map_remove
, allocmap
);
1023 for (t
= 1; t
< TXG_CONCURRENT_STATES
; t
++)
1024 space_map_walk(&msp
->ms_allocmap
[(txg
+ t
) & TXG_MASK
],
1025 space_map_remove
, allocmap
);
1027 mutex_exit(&msp
->ms_lock
);
1028 space_map_truncate(smo
, mos
, tx
);
1029 mutex_enter(&msp
->ms_lock
);
1032 space_map_sync(allocmap
, SM_ALLOC
, smo
, mos
, tx
);
1033 space_map_sync(freemap
, SM_FREE
, smo
, mos
, tx
);
1035 mutex_exit(&msp
->ms_lock
);
1037 VERIFY(0 == dmu_bonus_hold(mos
, smo
->smo_object
, FTAG
, &db
));
1038 dmu_buf_will_dirty(db
, tx
);
1039 ASSERT3U(db
->db_size
, >=, sizeof (*smo
));
1040 bcopy(smo
, db
->db_data
, sizeof (*smo
));
1041 dmu_buf_rele(db
, FTAG
);
1047 * Called after a transaction group has completely synced to mark
1048 * all of the metaslab's free space as usable.
1051 metaslab_sync_done(metaslab_t
*msp
, uint64_t txg
)
1053 space_map_obj_t
*smo
= &msp
->ms_smo
;
1054 space_map_obj_t
*smosync
= &msp
->ms_smo_syncing
;
1055 space_map_t
*sm
= &msp
->ms_map
;
1056 space_map_t
*freed_map
= &msp
->ms_freemap
[TXG_CLEAN(txg
) & TXG_MASK
];
1057 space_map_t
*defer_map
= &msp
->ms_defermap
[txg
% TXG_DEFER_SIZE
];
1058 metaslab_group_t
*mg
= msp
->ms_group
;
1059 vdev_t
*vd
= mg
->mg_vd
;
1060 int64_t alloc_delta
, defer_delta
;
1063 ASSERT(!vd
->vdev_ishole
);
1065 mutex_enter(&msp
->ms_lock
);
1068 * If this metaslab is just becoming available, initialize its
1069 * allocmaps and freemaps and add its capacity to the vdev.
1071 if (freed_map
->sm_size
== 0) {
1072 for (t
= 0; t
< TXG_SIZE
; t
++) {
1073 space_map_create(&msp
->ms_allocmap
[t
], sm
->sm_start
,
1074 sm
->sm_size
, sm
->sm_shift
, sm
->sm_lock
);
1075 space_map_create(&msp
->ms_freemap
[t
], sm
->sm_start
,
1076 sm
->sm_size
, sm
->sm_shift
, sm
->sm_lock
);
1079 for (t
= 0; t
< TXG_DEFER_SIZE
; t
++)
1080 space_map_create(&msp
->ms_defermap
[t
], sm
->sm_start
,
1081 sm
->sm_size
, sm
->sm_shift
, sm
->sm_lock
);
1083 vdev_space_update(vd
, 0, 0, sm
->sm_size
);
1086 alloc_delta
= smosync
->smo_alloc
- smo
->smo_alloc
;
1087 defer_delta
= freed_map
->sm_space
- defer_map
->sm_space
;
1089 vdev_space_update(vd
, alloc_delta
+ defer_delta
, defer_delta
, 0);
1091 ASSERT(msp
->ms_allocmap
[txg
& TXG_MASK
].sm_space
== 0);
1092 ASSERT(msp
->ms_freemap
[txg
& TXG_MASK
].sm_space
== 0);
1095 * If there's a space_map_load() in progress, wait for it to complete
1096 * so that we have a consistent view of the in-core space map.
1097 * Then, add defer_map (oldest deferred frees) to this map and
1098 * transfer freed_map (this txg's frees) to defer_map.
1100 space_map_load_wait(sm
);
1101 space_map_vacate(defer_map
, sm
->sm_loaded
? space_map_free
: NULL
, sm
);
1102 space_map_vacate(freed_map
, space_map_add
, defer_map
);
1106 msp
->ms_deferspace
+= defer_delta
;
1107 ASSERT3S(msp
->ms_deferspace
, >=, 0);
1108 ASSERT3S(msp
->ms_deferspace
, <=, sm
->sm_size
);
1109 if (msp
->ms_deferspace
!= 0) {
1111 * Keep syncing this metaslab until all deferred frees
1112 * are back in circulation.
1114 vdev_dirty(vd
, VDD_METASLAB
, msp
, txg
+ 1);
1118 * If the map is loaded but no longer active, evict it as soon as all
1119 * future allocations have synced. (If we unloaded it now and then
1120 * loaded a moment later, the map wouldn't reflect those allocations.)
1122 if (sm
->sm_loaded
&& (msp
->ms_weight
& METASLAB_ACTIVE_MASK
) == 0) {
1125 for (t
= 1; t
< TXG_CONCURRENT_STATES
; t
++)
1126 if (msp
->ms_allocmap
[(txg
+ t
) & TXG_MASK
].sm_space
)
1129 if (evictable
&& !metaslab_debug
)
1130 space_map_unload(sm
);
1133 metaslab_group_sort(mg
, msp
, metaslab_weight(msp
));
1135 mutex_exit(&msp
->ms_lock
);
1139 metaslab_sync_reassess(metaslab_group_t
*mg
)
1141 vdev_t
*vd
= mg
->mg_vd
;
1142 int64_t failures
= mg
->mg_alloc_failures
;
1146 * Re-evaluate all metaslabs which have lower offsets than the
1149 for (m
= 0; m
< vd
->vdev_ms_count
; m
++) {
1150 metaslab_t
*msp
= vd
->vdev_ms
[m
];
1152 if (msp
->ms_map
.sm_start
> mg
->mg_bonus_area
)
1155 mutex_enter(&msp
->ms_lock
);
1156 metaslab_group_sort(mg
, msp
, metaslab_weight(msp
));
1157 mutex_exit(&msp
->ms_lock
);
1160 atomic_add_64(&mg
->mg_alloc_failures
, -failures
);
1163 * Prefetch the next potential metaslabs
1165 metaslab_prefetch(mg
);
1169 metaslab_distance(metaslab_t
*msp
, dva_t
*dva
)
1171 uint64_t ms_shift
= msp
->ms_group
->mg_vd
->vdev_ms_shift
;
1172 uint64_t offset
= DVA_GET_OFFSET(dva
) >> ms_shift
;
1173 uint64_t start
= msp
->ms_map
.sm_start
>> ms_shift
;
1175 if (msp
->ms_group
->mg_vd
->vdev_id
!= DVA_GET_VDEV(dva
))
1176 return (1ULL << 63);
1179 return ((start
- offset
) << ms_shift
);
1181 return ((offset
- start
) << ms_shift
);
1186 metaslab_group_alloc(metaslab_group_t
*mg
, uint64_t psize
, uint64_t asize
,
1187 uint64_t txg
, uint64_t min_distance
, dva_t
*dva
, int d
, int flags
)
1189 spa_t
*spa
= mg
->mg_vd
->vdev_spa
;
1190 metaslab_t
*msp
= NULL
;
1191 uint64_t offset
= -1ULL;
1192 avl_tree_t
*t
= &mg
->mg_metaslab_tree
;
1193 uint64_t activation_weight
;
1194 uint64_t target_distance
;
1197 activation_weight
= METASLAB_WEIGHT_PRIMARY
;
1198 for (i
= 0; i
< d
; i
++) {
1199 if (DVA_GET_VDEV(&dva
[i
]) == mg
->mg_vd
->vdev_id
) {
1200 activation_weight
= METASLAB_WEIGHT_SECONDARY
;
1206 boolean_t was_active
;
1208 mutex_enter(&mg
->mg_lock
);
1209 for (msp
= avl_first(t
); msp
; msp
= AVL_NEXT(t
, msp
)) {
1210 if (msp
->ms_weight
< asize
) {
1211 spa_dbgmsg(spa
, "%s: failed to meet weight "
1212 "requirement: vdev %llu, txg %llu, mg %p, "
1213 "msp %p, psize %llu, asize %llu, "
1214 "failures %llu, weight %llu",
1215 spa_name(spa
), mg
->mg_vd
->vdev_id
, txg
,
1216 mg
, msp
, psize
, asize
,
1217 mg
->mg_alloc_failures
, msp
->ms_weight
);
1218 mutex_exit(&mg
->mg_lock
);
1221 was_active
= msp
->ms_weight
& METASLAB_ACTIVE_MASK
;
1222 if (activation_weight
== METASLAB_WEIGHT_PRIMARY
)
1225 target_distance
= min_distance
+
1226 (msp
->ms_smo
.smo_alloc
? 0 : min_distance
>> 1);
1228 for (i
= 0; i
< d
; i
++)
1229 if (metaslab_distance(msp
, &dva
[i
]) <
1235 mutex_exit(&mg
->mg_lock
);
1240 * If we've already reached the allowable number of failed
1241 * allocation attempts on this metaslab group then we
1242 * consider skipping it. We skip it only if we're allowed
1243 * to "fast" gang, the physical size is larger than
1244 * a gang block, and we're attempting to allocate from
1245 * the primary metaslab.
1247 if (mg
->mg_alloc_failures
> zfs_mg_alloc_failures
&&
1248 CAN_FASTGANG(flags
) && psize
> SPA_GANGBLOCKSIZE
&&
1249 activation_weight
== METASLAB_WEIGHT_PRIMARY
) {
1250 spa_dbgmsg(spa
, "%s: skipping metaslab group: "
1251 "vdev %llu, txg %llu, mg %p, psize %llu, "
1252 "asize %llu, failures %llu", spa_name(spa
),
1253 mg
->mg_vd
->vdev_id
, txg
, mg
, psize
, asize
,
1254 mg
->mg_alloc_failures
);
1258 mutex_enter(&msp
->ms_lock
);
1261 * Ensure that the metaslab we have selected is still
1262 * capable of handling our request. It's possible that
1263 * another thread may have changed the weight while we
1264 * were blocked on the metaslab lock.
1266 if (msp
->ms_weight
< asize
|| (was_active
&&
1267 !(msp
->ms_weight
& METASLAB_ACTIVE_MASK
) &&
1268 activation_weight
== METASLAB_WEIGHT_PRIMARY
)) {
1269 mutex_exit(&msp
->ms_lock
);
1273 if ((msp
->ms_weight
& METASLAB_WEIGHT_SECONDARY
) &&
1274 activation_weight
== METASLAB_WEIGHT_PRIMARY
) {
1275 metaslab_passivate(msp
,
1276 msp
->ms_weight
& ~METASLAB_ACTIVE_MASK
);
1277 mutex_exit(&msp
->ms_lock
);
1281 if (metaslab_activate(msp
, activation_weight
) != 0) {
1282 mutex_exit(&msp
->ms_lock
);
1286 if ((offset
= space_map_alloc(&msp
->ms_map
, asize
)) != -1ULL)
1289 atomic_inc_64(&mg
->mg_alloc_failures
);
1291 metaslab_passivate(msp
, space_map_maxsize(&msp
->ms_map
));
1293 mutex_exit(&msp
->ms_lock
);
1296 if (msp
->ms_allocmap
[txg
& TXG_MASK
].sm_space
== 0)
1297 vdev_dirty(mg
->mg_vd
, VDD_METASLAB
, msp
, txg
);
1299 space_map_add(&msp
->ms_allocmap
[txg
& TXG_MASK
], offset
, asize
);
1301 mutex_exit(&msp
->ms_lock
);
1307 * Allocate a block for the specified i/o.
1310 metaslab_alloc_dva(spa_t
*spa
, metaslab_class_t
*mc
, uint64_t psize
,
1311 dva_t
*dva
, int d
, dva_t
*hintdva
, uint64_t txg
, int flags
)
1313 metaslab_group_t
*mg
, *fast_mg
, *rotor
;
1317 int zio_lock
= B_FALSE
;
1318 boolean_t allocatable
;
1319 uint64_t offset
= -1ULL;
1323 ASSERT(!DVA_IS_VALID(&dva
[d
]));
1326 * For testing, make some blocks above a certain size be gang blocks.
1328 if (psize
>= metaslab_gang_bang
&& (ddi_get_lbolt() & 3) == 0)
1331 if (flags
& METASLAB_FASTWRITE
)
1332 mutex_enter(&mc
->mc_fastwrite_lock
);
1335 * Start at the rotor and loop through all mgs until we find something.
1336 * Note that there's no locking on mc_rotor or mc_aliquot because
1337 * nothing actually breaks if we miss a few updates -- we just won't
1338 * allocate quite as evenly. It all balances out over time.
1340 * If we are doing ditto or log blocks, try to spread them across
1341 * consecutive vdevs. If we're forced to reuse a vdev before we've
1342 * allocated all of our ditto blocks, then try and spread them out on
1343 * that vdev as much as possible. If it turns out to not be possible,
1344 * gradually lower our standards until anything becomes acceptable.
1345 * Also, allocating on consecutive vdevs (as opposed to random vdevs)
1346 * gives us hope of containing our fault domains to something we're
1347 * able to reason about. Otherwise, any two top-level vdev failures
1348 * will guarantee the loss of data. With consecutive allocation,
1349 * only two adjacent top-level vdev failures will result in data loss.
1351 * If we are doing gang blocks (hintdva is non-NULL), try to keep
1352 * ourselves on the same vdev as our gang block header. That
1353 * way, we can hope for locality in vdev_cache, plus it makes our
1354 * fault domains something tractable.
1357 vd
= vdev_lookup_top(spa
, DVA_GET_VDEV(&hintdva
[d
]));
1360 * It's possible the vdev we're using as the hint no
1361 * longer exists (i.e. removed). Consult the rotor when
1367 if (flags
& METASLAB_HINTBP_AVOID
&&
1368 mg
->mg_next
!= NULL
)
1373 } else if (d
!= 0) {
1374 vd
= vdev_lookup_top(spa
, DVA_GET_VDEV(&dva
[d
- 1]));
1375 mg
= vd
->vdev_mg
->mg_next
;
1376 } else if (flags
& METASLAB_FASTWRITE
) {
1377 mg
= fast_mg
= mc
->mc_rotor
;
1380 if (fast_mg
->mg_vd
->vdev_pending_fastwrite
<
1381 mg
->mg_vd
->vdev_pending_fastwrite
)
1383 } while ((fast_mg
= fast_mg
->mg_next
) != mc
->mc_rotor
);
1390 * If the hint put us into the wrong metaslab class, or into a
1391 * metaslab group that has been passivated, just follow the rotor.
1393 if (mg
->mg_class
!= mc
|| mg
->mg_activation_count
<= 0)
1400 ASSERT(mg
->mg_activation_count
== 1);
1405 * Don't allocate from faulted devices.
1408 spa_config_enter(spa
, SCL_ZIO
, FTAG
, RW_READER
);
1409 allocatable
= vdev_allocatable(vd
);
1410 spa_config_exit(spa
, SCL_ZIO
, FTAG
);
1412 allocatable
= vdev_allocatable(vd
);
1418 * Avoid writing single-copy data to a failing vdev
1420 if ((vd
->vdev_stat
.vs_write_errors
> 0 ||
1421 vd
->vdev_state
< VDEV_STATE_HEALTHY
) &&
1422 d
== 0 && dshift
== 3) {
1427 ASSERT(mg
->mg_class
== mc
);
1429 distance
= vd
->vdev_asize
>> dshift
;
1430 if (distance
<= (1ULL << vd
->vdev_ms_shift
))
1435 asize
= vdev_psize_to_asize(vd
, psize
);
1436 ASSERT(P2PHASE(asize
, 1ULL << vd
->vdev_ashift
) == 0);
1438 offset
= metaslab_group_alloc(mg
, psize
, asize
, txg
, distance
,
1440 if (offset
!= -1ULL) {
1442 * If we've just selected this metaslab group,
1443 * figure out whether the corresponding vdev is
1444 * over- or under-used relative to the pool,
1445 * and set an allocation bias to even it out.
1447 if (mc
->mc_aliquot
== 0) {
1448 vdev_stat_t
*vs
= &vd
->vdev_stat
;
1451 vu
= (vs
->vs_alloc
* 100) / (vs
->vs_space
+ 1);
1452 cu
= (mc
->mc_alloc
* 100) / (mc
->mc_space
+ 1);
1455 * Calculate how much more or less we should
1456 * try to allocate from this device during
1457 * this iteration around the rotor.
1458 * For example, if a device is 80% full
1459 * and the pool is 20% full then we should
1460 * reduce allocations by 60% on this device.
1462 * mg_bias = (20 - 80) * 512K / 100 = -307K
1464 * This reduces allocations by 307K for this
1467 mg
->mg_bias
= ((cu
- vu
) *
1468 (int64_t)mg
->mg_aliquot
) / 100;
1471 if ((flags
& METASLAB_FASTWRITE
) ||
1472 atomic_add_64_nv(&mc
->mc_aliquot
, asize
) >=
1473 mg
->mg_aliquot
+ mg
->mg_bias
) {
1474 mc
->mc_rotor
= mg
->mg_next
;
1478 DVA_SET_VDEV(&dva
[d
], vd
->vdev_id
);
1479 DVA_SET_OFFSET(&dva
[d
], offset
);
1480 DVA_SET_GANG(&dva
[d
], !!(flags
& METASLAB_GANG_HEADER
));
1481 DVA_SET_ASIZE(&dva
[d
], asize
);
1483 if (flags
& METASLAB_FASTWRITE
) {
1484 atomic_add_64(&vd
->vdev_pending_fastwrite
,
1486 mutex_exit(&mc
->mc_fastwrite_lock
);
1492 mc
->mc_rotor
= mg
->mg_next
;
1494 } while ((mg
= mg
->mg_next
) != rotor
);
1498 ASSERT(dshift
< 64);
1502 if (!allocatable
&& !zio_lock
) {
1508 bzero(&dva
[d
], sizeof (dva_t
));
1510 if (flags
& METASLAB_FASTWRITE
)
1511 mutex_exit(&mc
->mc_fastwrite_lock
);
1516 * Free the block represented by DVA in the context of the specified
1517 * transaction group.
1520 metaslab_free_dva(spa_t
*spa
, const dva_t
*dva
, uint64_t txg
, boolean_t now
)
1522 uint64_t vdev
= DVA_GET_VDEV(dva
);
1523 uint64_t offset
= DVA_GET_OFFSET(dva
);
1524 uint64_t size
= DVA_GET_ASIZE(dva
);
1528 ASSERT(DVA_IS_VALID(dva
));
1530 if (txg
> spa_freeze_txg(spa
))
1533 if ((vd
= vdev_lookup_top(spa
, vdev
)) == NULL
||
1534 (offset
>> vd
->vdev_ms_shift
) >= vd
->vdev_ms_count
) {
1535 cmn_err(CE_WARN
, "metaslab_free_dva(): bad DVA %llu:%llu",
1536 (u_longlong_t
)vdev
, (u_longlong_t
)offset
);
1541 msp
= vd
->vdev_ms
[offset
>> vd
->vdev_ms_shift
];
1543 if (DVA_GET_GANG(dva
))
1544 size
= vdev_psize_to_asize(vd
, SPA_GANGBLOCKSIZE
);
1546 mutex_enter(&msp
->ms_lock
);
1549 space_map_remove(&msp
->ms_allocmap
[txg
& TXG_MASK
],
1551 space_map_free(&msp
->ms_map
, offset
, size
);
1553 if (msp
->ms_freemap
[txg
& TXG_MASK
].sm_space
== 0)
1554 vdev_dirty(vd
, VDD_METASLAB
, msp
, txg
);
1555 space_map_add(&msp
->ms_freemap
[txg
& TXG_MASK
], offset
, size
);
1558 mutex_exit(&msp
->ms_lock
);
1562 * Intent log support: upon opening the pool after a crash, notify the SPA
1563 * of blocks that the intent log has allocated for immediate write, but
1564 * which are still considered free by the SPA because the last transaction
1565 * group didn't commit yet.
1568 metaslab_claim_dva(spa_t
*spa
, const dva_t
*dva
, uint64_t txg
)
1570 uint64_t vdev
= DVA_GET_VDEV(dva
);
1571 uint64_t offset
= DVA_GET_OFFSET(dva
);
1572 uint64_t size
= DVA_GET_ASIZE(dva
);
1577 ASSERT(DVA_IS_VALID(dva
));
1579 if ((vd
= vdev_lookup_top(spa
, vdev
)) == NULL
||
1580 (offset
>> vd
->vdev_ms_shift
) >= vd
->vdev_ms_count
)
1583 msp
= vd
->vdev_ms
[offset
>> vd
->vdev_ms_shift
];
1585 if (DVA_GET_GANG(dva
))
1586 size
= vdev_psize_to_asize(vd
, SPA_GANGBLOCKSIZE
);
1588 mutex_enter(&msp
->ms_lock
);
1590 if ((txg
!= 0 && spa_writeable(spa
)) || !msp
->ms_map
.sm_loaded
)
1591 error
= metaslab_activate(msp
, METASLAB_WEIGHT_SECONDARY
);
1593 if (error
== 0 && !space_map_contains(&msp
->ms_map
, offset
, size
))
1596 if (error
|| txg
== 0) { /* txg == 0 indicates dry run */
1597 mutex_exit(&msp
->ms_lock
);
1601 space_map_claim(&msp
->ms_map
, offset
, size
);
1603 if (spa_writeable(spa
)) { /* don't dirty if we're zdb(1M) */
1604 if (msp
->ms_allocmap
[txg
& TXG_MASK
].sm_space
== 0)
1605 vdev_dirty(vd
, VDD_METASLAB
, msp
, txg
);
1606 space_map_add(&msp
->ms_allocmap
[txg
& TXG_MASK
], offset
, size
);
1609 mutex_exit(&msp
->ms_lock
);
1615 metaslab_alloc(spa_t
*spa
, metaslab_class_t
*mc
, uint64_t psize
, blkptr_t
*bp
,
1616 int ndvas
, uint64_t txg
, blkptr_t
*hintbp
, int flags
)
1618 dva_t
*dva
= bp
->blk_dva
;
1619 dva_t
*hintdva
= hintbp
->blk_dva
;
1622 ASSERT(bp
->blk_birth
== 0);
1623 ASSERT(BP_PHYSICAL_BIRTH(bp
) == 0);
1625 spa_config_enter(spa
, SCL_ALLOC
, FTAG
, RW_READER
);
1627 if (mc
->mc_rotor
== NULL
) { /* no vdevs in this class */
1628 spa_config_exit(spa
, SCL_ALLOC
, FTAG
);
1632 ASSERT(ndvas
> 0 && ndvas
<= spa_max_replication(spa
));
1633 ASSERT(BP_GET_NDVAS(bp
) == 0);
1634 ASSERT(hintbp
== NULL
|| ndvas
<= BP_GET_NDVAS(hintbp
));
1636 for (d
= 0; d
< ndvas
; d
++) {
1637 error
= metaslab_alloc_dva(spa
, mc
, psize
, dva
, d
, hintdva
,
1640 for (d
--; d
>= 0; d
--) {
1641 metaslab_free_dva(spa
, &dva
[d
], txg
, B_TRUE
);
1642 bzero(&dva
[d
], sizeof (dva_t
));
1644 spa_config_exit(spa
, SCL_ALLOC
, FTAG
);
1649 ASSERT(BP_GET_NDVAS(bp
) == ndvas
);
1651 spa_config_exit(spa
, SCL_ALLOC
, FTAG
);
1653 BP_SET_BIRTH(bp
, txg
, txg
);
1659 metaslab_free(spa_t
*spa
, const blkptr_t
*bp
, uint64_t txg
, boolean_t now
)
1661 const dva_t
*dva
= bp
->blk_dva
;
1662 int d
, ndvas
= BP_GET_NDVAS(bp
);
1664 ASSERT(!BP_IS_HOLE(bp
));
1665 ASSERT(!now
|| bp
->blk_birth
>= spa_syncing_txg(spa
));
1667 spa_config_enter(spa
, SCL_FREE
, FTAG
, RW_READER
);
1669 for (d
= 0; d
< ndvas
; d
++)
1670 metaslab_free_dva(spa
, &dva
[d
], txg
, now
);
1672 spa_config_exit(spa
, SCL_FREE
, FTAG
);
1676 metaslab_claim(spa_t
*spa
, const blkptr_t
*bp
, uint64_t txg
)
1678 const dva_t
*dva
= bp
->blk_dva
;
1679 int ndvas
= BP_GET_NDVAS(bp
);
1682 ASSERT(!BP_IS_HOLE(bp
));
1686 * First do a dry run to make sure all DVAs are claimable,
1687 * so we don't have to unwind from partial failures below.
1689 if ((error
= metaslab_claim(spa
, bp
, 0)) != 0)
1693 spa_config_enter(spa
, SCL_ALLOC
, FTAG
, RW_READER
);
1695 for (d
= 0; d
< ndvas
; d
++)
1696 if ((error
= metaslab_claim_dva(spa
, &dva
[d
], txg
)) != 0)
1699 spa_config_exit(spa
, SCL_ALLOC
, FTAG
);
1701 ASSERT(error
== 0 || txg
== 0);
1706 void metaslab_fastwrite_mark(spa_t
*spa
, const blkptr_t
*bp
)
1708 const dva_t
*dva
= bp
->blk_dva
;
1709 int ndvas
= BP_GET_NDVAS(bp
);
1710 uint64_t psize
= BP_GET_PSIZE(bp
);
1714 ASSERT(!BP_IS_HOLE(bp
));
1717 spa_config_enter(spa
, SCL_VDEV
, FTAG
, RW_READER
);
1719 for (d
= 0; d
< ndvas
; d
++) {
1720 if ((vd
= vdev_lookup_top(spa
, DVA_GET_VDEV(&dva
[d
]))) == NULL
)
1722 atomic_add_64(&vd
->vdev_pending_fastwrite
, psize
);
1725 spa_config_exit(spa
, SCL_VDEV
, FTAG
);
1728 void metaslab_fastwrite_unmark(spa_t
*spa
, const blkptr_t
*bp
)
1730 const dva_t
*dva
= bp
->blk_dva
;
1731 int ndvas
= BP_GET_NDVAS(bp
);
1732 uint64_t psize
= BP_GET_PSIZE(bp
);
1736 ASSERT(!BP_IS_HOLE(bp
));
1739 spa_config_enter(spa
, SCL_VDEV
, FTAG
, RW_READER
);
1741 for (d
= 0; d
< ndvas
; d
++) {
1742 if ((vd
= vdev_lookup_top(spa
, DVA_GET_VDEV(&dva
[d
]))) == NULL
)
1744 ASSERT3U(vd
->vdev_pending_fastwrite
, >=, psize
);
1745 atomic_sub_64(&vd
->vdev_pending_fastwrite
, psize
);
1748 spa_config_exit(spa
, SCL_VDEV
, FTAG
);
1751 #if defined(_KERNEL) && defined(HAVE_SPL)
1752 module_param(metaslab_debug
, int, 0644);
1753 MODULE_PARM_DESC(metaslab_debug
, "keep space maps in core to verify frees");
1754 #endif /* _KERNEL && HAVE_SPL */