]>
Commit | Line | Data |
---|---|---|
34dc7c2f BB |
1 | /* |
2 | * CDDL HEADER START | |
3 | * | |
4 | * The contents of this file are subject to the terms of the | |
5 | * Common Development and Distribution License (the "License"). | |
6 | * You may not use this file except in compliance with the License. | |
7 | * | |
8 | * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE | |
9 | * or http://www.opensolaris.org/os/licensing. | |
10 | * See the License for the specific language governing permissions | |
11 | * and limitations under the License. | |
12 | * | |
13 | * When distributing Covered Code, include this CDDL HEADER in each | |
14 | * file and include the License file at usr/src/OPENSOLARIS.LICENSE. | |
15 | * If applicable, add the following below this CDDL HEADER, with the | |
16 | * fields enclosed by brackets "[]" replaced with your own identifying | |
17 | * information: Portions Copyright [yyyy] [name of copyright owner] | |
18 | * | |
19 | * CDDL HEADER END | |
20 | */ | |
21 | /* | |
9babb374 | 22 | * Copyright 2009 Sun Microsystems, Inc. All rights reserved. |
34dc7c2f | 23 | * Use is subject to license terms. |
55d85d5a GW |
24 | */ |
25 | ||
26 | /* | |
492f64e9 | 27 | * Copyright (c) 2011, 2018 by Delphix. All rights reserved. |
34dc7c2f BB |
28 | */ |
29 | ||
30 | #ifndef _SYS_METASLAB_IMPL_H | |
31 | #define _SYS_METASLAB_IMPL_H | |
32 | ||
34dc7c2f BB |
33 | #include <sys/metaslab.h> |
34 | #include <sys/space_map.h> | |
93cf2076 | 35 | #include <sys/range_tree.h> |
34dc7c2f BB |
36 | #include <sys/vdev.h> |
37 | #include <sys/txg.h> | |
38 | #include <sys/avl.h> | |
39 | ||
40 | #ifdef __cplusplus | |
41 | extern "C" { | |
42 | #endif | |
43 | ||
4e21fd06 DB |
44 | /* |
45 | * Metaslab allocation tracing record. | |
46 | */ | |
47 | typedef struct metaslab_alloc_trace { | |
48 | list_node_t mat_list_node; | |
49 | metaslab_group_t *mat_mg; | |
50 | metaslab_t *mat_msp; | |
51 | uint64_t mat_size; | |
52 | uint64_t mat_weight; | |
53 | uint32_t mat_dva_id; | |
54 | uint64_t mat_offset; | |
492f64e9 | 55 | int mat_allocator; |
4e21fd06 DB |
56 | } metaslab_alloc_trace_t; |
57 | ||
58 | /* | |
59 | * Used by the metaslab allocation tracing facility to indicate | |
60 | * error conditions. These errors are stored to the offset member | |
61 | * of the metaslab_alloc_trace_t record and displayed by mdb. | |
62 | */ | |
63 | typedef enum trace_alloc_type { | |
64 | TRACE_ALLOC_FAILURE = -1ULL, | |
65 | TRACE_TOO_SMALL = -2ULL, | |
66 | TRACE_FORCE_GANG = -3ULL, | |
67 | TRACE_NOT_ALLOCATABLE = -4ULL, | |
68 | TRACE_GROUP_FAILURE = -5ULL, | |
69 | TRACE_ENOSPC = -6ULL, | |
70 | TRACE_CONDENSING = -7ULL, | |
619f0976 GW |
71 | TRACE_VDEV_ERROR = -8ULL, |
72 | TRACE_INITIALIZING = -9ULL | |
4e21fd06 DB |
73 | } trace_alloc_type_t; |
74 | ||
75 | #define METASLAB_WEIGHT_PRIMARY (1ULL << 63) | |
76 | #define METASLAB_WEIGHT_SECONDARY (1ULL << 62) | |
492f64e9 PD |
77 | #define METASLAB_WEIGHT_CLAIM (1ULL << 61) |
78 | #define METASLAB_WEIGHT_TYPE (1ULL << 60) | |
4e21fd06 | 79 | #define METASLAB_ACTIVE_MASK \ |
492f64e9 PD |
80 | (METASLAB_WEIGHT_PRIMARY | METASLAB_WEIGHT_SECONDARY | \ |
81 | METASLAB_WEIGHT_CLAIM) | |
4e21fd06 DB |
82 | |
83 | /* | |
84 | * The metaslab weight is used to encode the amount of free space in a | |
85 | * metaslab, such that the "best" metaslab appears first when sorting the | |
86 | * metaslabs by weight. The weight (and therefore the "best" metaslab) can | |
87 | * be determined in two different ways: by computing a weighted sum of all | |
88 | * the free space in the metaslab (a space based weight) or by counting only | |
89 | * the free segments of the largest size (a segment based weight). We prefer | |
90 | * the segment based weight because it reflects how the free space is | |
91 | * comprised, but we cannot always use it -- legacy pools do not have the | |
92 | * space map histogram information necessary to determine the largest | |
93 | * contiguous regions. Pools that have the space map histogram determine | |
94 | * the segment weight by looking at each bucket in the histogram and | |
95 | * determining the free space whose size in bytes is in the range: | |
96 | * [2^i, 2^(i+1)) | |
97 | * We then encode the largest index, i, that contains regions into the | |
98 | * segment-weighted value. | |
99 | * | |
100 | * Space-based weight: | |
101 | * | |
102 | * 64 56 48 40 32 24 16 8 0 | |
103 | * +-------+-------+-------+-------+-------+-------+-------+-------+ | |
492f64e9 | 104 | * |PSC1| weighted-free space | |
4e21fd06 DB |
105 | * +-------+-------+-------+-------+-------+-------+-------+-------+ |
106 | * | |
107 | * PS - indicates primary and secondary activation | |
492f64e9 | 108 | * C - indicates activation for claimed block zio |
4e21fd06 DB |
109 | * space - the fragmentation-weighted space |
110 | * | |
111 | * Segment-based weight: | |
112 | * | |
113 | * 64 56 48 40 32 24 16 8 0 | |
114 | * +-------+-------+-------+-------+-------+-------+-------+-------+ | |
492f64e9 | 115 | * |PSC0| idx| count of segments in region | |
4e21fd06 DB |
116 | * +-------+-------+-------+-------+-------+-------+-------+-------+ |
117 | * | |
118 | * PS - indicates primary and secondary activation | |
492f64e9 | 119 | * C - indicates activation for claimed block zio |
4e21fd06 DB |
120 | * idx - index for the highest bucket in the histogram |
121 | * count - number of segments in the specified bucket | |
122 | */ | |
492f64e9 PD |
123 | #define WEIGHT_GET_ACTIVE(weight) BF64_GET((weight), 61, 3) |
124 | #define WEIGHT_SET_ACTIVE(weight, x) BF64_SET((weight), 61, 3, x) | |
4e21fd06 DB |
125 | |
126 | #define WEIGHT_IS_SPACEBASED(weight) \ | |
492f64e9 PD |
127 | ((weight) == 0 || BF64_GET((weight), 60, 1)) |
128 | #define WEIGHT_SET_SPACEBASED(weight) BF64_SET((weight), 60, 1, 1) | |
4e21fd06 DB |
129 | |
130 | /* | |
131 | * These macros are only applicable to segment-based weighting. | |
132 | */ | |
492f64e9 PD |
133 | #define WEIGHT_GET_INDEX(weight) BF64_GET((weight), 54, 6) |
134 | #define WEIGHT_SET_INDEX(weight, x) BF64_SET((weight), 54, 6, x) | |
135 | #define WEIGHT_GET_COUNT(weight) BF64_GET((weight), 0, 54) | |
136 | #define WEIGHT_SET_COUNT(weight, x) BF64_SET((weight), 0, 54, x) | |
4e21fd06 | 137 | |
f3a7f661 GW |
138 | /* |
139 | * A metaslab class encompasses a category of allocatable top-level vdevs. | |
140 | * Each top-level vdev is associated with a metaslab group which defines | |
141 | * the allocatable region for that vdev. Examples of these categories include | |
142 | * "normal" for data block allocations (i.e. main pool allocations) or "log" | |
143 | * for allocations designated for intent log devices (i.e. slog devices). | |
144 | * When a block allocation is requested from the SPA it is associated with a | |
145 | * metaslab_class_t, and only top-level vdevs (i.e. metaslab groups) belonging | |
146 | * to the class can be used to satisfy that request. Allocations are done | |
147 | * by traversing the metaslab groups that are linked off of the mc_rotor field. | |
148 | * This rotor points to the next metaslab group where allocations will be | |
149 | * attempted. Allocating a block is a 3 step process -- select the metaslab | |
150 | * group, select the metaslab, and then allocate the block. The metaslab | |
151 | * class defines the low-level block allocator that will be used as the | |
152 | * final step in allocation. These allocators are pluggable allowing each class | |
153 | * to use a block allocator that best suits that class. | |
154 | */ | |
34dc7c2f | 155 | struct metaslab_class { |
3dfb57a3 | 156 | kmutex_t mc_lock; |
428870ff | 157 | spa_t *mc_spa; |
34dc7c2f | 158 | metaslab_group_t *mc_rotor; |
93cf2076 | 159 | metaslab_ops_t *mc_ops; |
428870ff | 160 | uint64_t mc_aliquot; |
3dfb57a3 DB |
161 | |
162 | /* | |
163 | * Track the number of metaslab groups that have been initialized | |
164 | * and can accept allocations. An initialized metaslab group is | |
165 | * one has been completely added to the config (i.e. we have | |
166 | * updated the MOS config and the space has been added to the pool). | |
167 | */ | |
168 | uint64_t mc_groups; | |
169 | ||
170 | /* | |
171 | * Toggle to enable/disable the allocation throttle. | |
172 | */ | |
173 | boolean_t mc_alloc_throttle_enabled; | |
174 | ||
175 | /* | |
176 | * The allocation throttle works on a reservation system. Whenever | |
177 | * an asynchronous zio wants to perform an allocation it must | |
178 | * first reserve the number of blocks that it wants to allocate. | |
179 | * If there aren't sufficient slots available for the pending zio | |
180 | * then that I/O is throttled until more slots free up. The current | |
181 | * number of reserved allocations is maintained by the mc_alloc_slots | |
182 | * refcount. The mc_alloc_max_slots value determines the maximum | |
183 | * number of allocations that the system allows. Gang blocks are | |
184 | * allowed to reserve slots even if we've reached the maximum | |
185 | * number of allocations allowed. | |
186 | */ | |
492f64e9 | 187 | uint64_t *mc_alloc_max_slots; |
c13060e4 | 188 | zfs_refcount_t *mc_alloc_slots; |
3dfb57a3 | 189 | |
ac72fac3 | 190 | uint64_t mc_alloc_groups; /* # of allocatable groups */ |
3dfb57a3 | 191 | |
428870ff BB |
192 | uint64_t mc_alloc; /* total allocated space */ |
193 | uint64_t mc_deferred; /* total deferred frees */ | |
194 | uint64_t mc_space; /* total space (alloc + free) */ | |
195 | uint64_t mc_dspace; /* total deflated space */ | |
f3a7f661 | 196 | uint64_t mc_histogram[RANGE_TREE_HISTOGRAM_SIZE]; |
34dc7c2f BB |
197 | }; |
198 | ||
f3a7f661 GW |
199 | /* |
200 | * Metaslab groups encapsulate all the allocatable regions (i.e. metaslabs) | |
4e33ba4c | 201 | * of a top-level vdev. They are linked together to form a circular linked |
f3a7f661 GW |
202 | * list and can belong to only one metaslab class. Metaslab groups may become |
203 | * ineligible for allocations for a number of reasons such as limited free | |
204 | * space, fragmentation, or going offline. When this happens the allocator will | |
205 | * simply find the next metaslab group in the linked list and attempt | |
206 | * to allocate from that group instead. | |
207 | */ | |
34dc7c2f BB |
208 | struct metaslab_group { |
209 | kmutex_t mg_lock; | |
492f64e9 PD |
210 | metaslab_t **mg_primaries; |
211 | metaslab_t **mg_secondaries; | |
34dc7c2f BB |
212 | avl_tree_t mg_metaslab_tree; |
213 | uint64_t mg_aliquot; | |
ac72fac3 | 214 | boolean_t mg_allocatable; /* can we allocate? */ |
492f64e9 | 215 | uint64_t mg_ms_ready; |
3dfb57a3 DB |
216 | |
217 | /* | |
218 | * A metaslab group is considered to be initialized only after | |
219 | * we have updated the MOS config and added the space to the pool. | |
220 | * We only allow allocation attempts to a metaslab group if it | |
221 | * has been initialized. | |
222 | */ | |
223 | boolean_t mg_initialized; | |
224 | ||
ac72fac3 | 225 | uint64_t mg_free_capacity; /* percentage free */ |
34dc7c2f | 226 | int64_t mg_bias; |
428870ff | 227 | int64_t mg_activation_count; |
34dc7c2f BB |
228 | metaslab_class_t *mg_class; |
229 | vdev_t *mg_vd; | |
93cf2076 | 230 | taskq_t *mg_taskq; |
34dc7c2f BB |
231 | metaslab_group_t *mg_prev; |
232 | metaslab_group_t *mg_next; | |
3dfb57a3 DB |
233 | |
234 | /* | |
492f64e9 PD |
235 | * In order for the allocation throttle to function properly, we cannot |
236 | * have too many IOs going to each disk by default; the throttle | |
237 | * operates by allocating more work to disks that finish quickly, so | |
238 | * allocating larger chunks to each disk reduces its effectiveness. | |
239 | * However, if the number of IOs going to each allocator is too small, | |
240 | * we will not perform proper aggregation at the vdev_queue layer, | |
241 | * also resulting in decreased performance. Therefore, we will use a | |
242 | * ramp-up strategy. | |
243 | * | |
244 | * Each allocator in each metaslab group has a current queue depth | |
245 | * (mg_alloc_queue_depth[allocator]) and a current max queue depth | |
246 | * (mg_cur_max_alloc_queue_depth[allocator]), and each metaslab group | |
247 | * has an absolute max queue depth (mg_max_alloc_queue_depth). We | |
248 | * add IOs to an allocator until the mg_alloc_queue_depth for that | |
249 | * allocator hits the cur_max. Every time an IO completes for a given | |
250 | * allocator on a given metaslab group, we increment its cur_max until | |
251 | * it reaches mg_max_alloc_queue_depth. The cur_max resets every txg to | |
252 | * help protect against disks that decrease in performance over time. | |
253 | * | |
254 | * It's possible for an allocator to handle more allocations than | |
255 | * its max. This can occur when gang blocks are required or when other | |
256 | * groups are unable to handle their share of allocations. | |
3dfb57a3 DB |
257 | */ |
258 | uint64_t mg_max_alloc_queue_depth; | |
492f64e9 | 259 | uint64_t *mg_cur_max_alloc_queue_depth; |
c13060e4 | 260 | zfs_refcount_t *mg_alloc_queue_depth; |
492f64e9 | 261 | int mg_allocators; |
3dfb57a3 DB |
262 | /* |
263 | * A metalab group that can no longer allocate the minimum block | |
264 | * size will set mg_no_free_space. Once a metaslab group is out | |
265 | * of space then its share of work must be distributed to other | |
266 | * groups. | |
267 | */ | |
268 | boolean_t mg_no_free_space; | |
269 | ||
270 | uint64_t mg_allocations; | |
271 | uint64_t mg_failed_allocations; | |
f3a7f661 GW |
272 | uint64_t mg_fragmentation; |
273 | uint64_t mg_histogram[RANGE_TREE_HISTOGRAM_SIZE]; | |
619f0976 GW |
274 | |
275 | int mg_ms_initializing; | |
276 | boolean_t mg_initialize_updating; | |
277 | kmutex_t mg_ms_initialize_lock; | |
278 | kcondvar_t mg_ms_initialize_cv; | |
34dc7c2f BB |
279 | }; |
280 | ||
281 | /* | |
93cf2076 | 282 | * This value defines the number of elements in the ms_lbas array. The value |
f3a7f661 GW |
283 | * of 64 was chosen as it covers all power of 2 buckets up to UINT64_MAX. |
284 | * This is the equivalent of highbit(UINT64_MAX). | |
93cf2076 GW |
285 | */ |
286 | #define MAX_LBAS 64 | |
287 | ||
288 | /* | |
258553d3 | 289 | * Each metaslab maintains a set of in-core trees to track metaslab |
d2734cce | 290 | * operations. The in-core free tree (ms_allocatable) contains the list of |
258553d3 | 291 | * free segments which are eligible for allocation. As blocks are |
d2734cce SD |
292 | * allocated, the allocated segment are removed from the ms_allocatable and |
293 | * added to a per txg allocation tree (ms_allocating). As blocks are | |
294 | * freed, they are added to the free tree (ms_freeing). These trees | |
a1d477c2 MA |
295 | * allow us to process all allocations and frees in syncing context |
296 | * where it is safe to update the on-disk space maps. An additional set | |
297 | * of in-core trees is maintained to track deferred frees | |
d2734cce SD |
298 | * (ms_defer). Once a block is freed it will move from the |
299 | * ms_freed to the ms_defer tree. A deferred free means that a block | |
258553d3 TC |
300 | * has been freed but cannot be used by the pool until TXG_DEFER_SIZE |
301 | * transactions groups later. For example, a block that is freed in txg | |
302 | * 50 will not be available for reallocation until txg 52 (50 + | |
303 | * TXG_DEFER_SIZE). This provides a safety net for uberblock rollback. | |
304 | * A pool could be safely rolled back TXG_DEFERS_SIZE transactions | |
305 | * groups and ensure that no block has been reallocated. | |
93cf2076 GW |
306 | * |
307 | * The simplified transition diagram looks like this: | |
308 | * | |
309 | * | |
310 | * ALLOCATE | |
311 | * | | |
312 | * V | |
d2734cce | 313 | * free segment (ms_allocatable) -> ms_allocating[4] -> (write to space map) |
93cf2076 | 314 | * ^ |
d2734cce SD |
315 | * | ms_freeing <--- FREE |
316 | * | | | |
317 | * | v | |
318 | * | ms_freed | |
319 | * | | | |
320 | * +-------- ms_defer[2] <-------+-------> (write to space map) | |
e51be066 | 321 | * |
93cf2076 GW |
322 | * |
323 | * Each metaslab's space is tracked in a single space map in the MOS, | |
258553d3 TC |
324 | * which is only updated in syncing context. Each time we sync a txg, |
325 | * we append the allocs and frees from that txg to the space map. The | |
326 | * pool space is only updated once all metaslabs have finished syncing. | |
e51be066 | 327 | * |
258553d3 TC |
328 | * To load the in-core free tree we read the space map from disk. This |
329 | * object contains a series of alloc and free records that are combined | |
330 | * to make up the list of all free segments in this metaslab. These | |
d2734cce SD |
331 | * segments are represented in-core by the ms_allocatable and are stored |
332 | * in an AVL tree. | |
e51be066 | 333 | * |
93cf2076 | 334 | * As the space map grows (as a result of the appends) it will |
258553d3 TC |
335 | * eventually become space-inefficient. When the metaslab's in-core |
336 | * free tree is zfs_condense_pct/100 times the size of the minimal | |
337 | * on-disk representation, we rewrite it in its minimized form. If a | |
338 | * metaslab needs to condense then we must set the ms_condensing flag to | |
339 | * ensure that allocations are not performed on the metaslab that is | |
340 | * being written. | |
34dc7c2f BB |
341 | */ |
342 | struct metaslab { | |
425d3237 SD |
343 | /* |
344 | * This is the main lock of the metaslab and its purpose is to | |
345 | * coordinate our allocations and frees [e.g metaslab_block_alloc(), | |
346 | * metaslab_free_concrete(), ..etc] with our various syncing | |
347 | * procedures [e.g. metaslab_sync(), metaslab_sync_done(), ..etc]. | |
348 | * | |
349 | * The lock is also used during some miscellaneous operations like | |
350 | * using the metaslab's histogram for the metaslab group's histogram | |
351 | * aggregation, or marking the metaslab for initialization. | |
352 | */ | |
93cf2076 | 353 | kmutex_t ms_lock; |
425d3237 SD |
354 | |
355 | /* | |
356 | * Acquired together with the ms_lock whenever we expect to | |
357 | * write to metaslab data on-disk (i.e flushing entries to | |
358 | * the metaslab's space map). It helps coordinate readers of | |
359 | * the metaslab's space map [see spa_vdev_remove_thread()] | |
360 | * with writers [see metaslab_sync()]. | |
361 | * | |
362 | * Note that metaslab_load(), even though a reader, uses | |
363 | * a completely different mechanism to deal with the reading | |
364 | * of the metaslab's space map based on ms_synced_length. That | |
365 | * said, the function still uses the ms_sync_lock after it | |
366 | * has read the ms_sm [see relevant comment in metaslab_load() | |
367 | * as to why]. | |
368 | */ | |
a1d477c2 | 369 | kmutex_t ms_sync_lock; |
425d3237 | 370 | |
93cf2076 GW |
371 | kcondvar_t ms_load_cv; |
372 | space_map_t *ms_sm; | |
93cf2076 GW |
373 | uint64_t ms_id; |
374 | uint64_t ms_start; | |
375 | uint64_t ms_size; | |
f3a7f661 | 376 | uint64_t ms_fragmentation; |
93cf2076 | 377 | |
d2734cce SD |
378 | range_tree_t *ms_allocating[TXG_SIZE]; |
379 | range_tree_t *ms_allocatable; | |
425d3237 | 380 | uint64_t ms_allocated_this_txg; |
93cf2076 | 381 | |
258553d3 TC |
382 | /* |
383 | * The following range trees are accessed only from syncing context. | |
384 | * ms_free*tree only have entries while syncing, and are empty | |
385 | * between syncs. | |
386 | */ | |
d2734cce SD |
387 | range_tree_t *ms_freeing; /* to free this syncing txg */ |
388 | range_tree_t *ms_freed; /* already freed this syncing txg */ | |
389 | range_tree_t *ms_defer[TXG_DEFER_SIZE]; | |
390 | range_tree_t *ms_checkpointing; /* to add to the checkpoint */ | |
258553d3 | 391 | |
93cf2076 | 392 | boolean_t ms_condensing; /* condensing? */ |
f3a7f661 | 393 | boolean_t ms_condense_wanted; |
d2734cce | 394 | uint64_t ms_condense_checked_txg; |
4e21fd06 | 395 | |
619f0976 GW |
396 | uint64_t ms_initializing; /* leaves initializing this ms */ |
397 | ||
4e21fd06 | 398 | /* |
b194fab0 SD |
399 | * We must always hold the ms_lock when modifying ms_loaded |
400 | * and ms_loading. | |
4e21fd06 | 401 | */ |
93cf2076 GW |
402 | boolean_t ms_loaded; |
403 | boolean_t ms_loading; | |
404 | ||
425d3237 SD |
405 | /* |
406 | * Tracks the exact amount of allocated space of this metaslab | |
407 | * (and specifically the metaslab's space map) up to the most | |
408 | * recently completed sync pass [see usage in metaslab_sync()]. | |
409 | */ | |
410 | uint64_t ms_allocated_space; | |
428870ff | 411 | int64_t ms_deferspace; /* sum of ms_defermap[] space */ |
34dc7c2f | 412 | uint64_t ms_weight; /* weight vs. others in group */ |
4e21fd06 DB |
413 | uint64_t ms_activation_weight; /* activation weight */ |
414 | ||
415 | /* | |
416 | * Track of whenever a metaslab is selected for loading or allocation. | |
417 | * We use this value to determine how long the metaslab should | |
418 | * stay cached. | |
419 | */ | |
420 | uint64_t ms_selected_txg; | |
421 | ||
422 | uint64_t ms_alloc_txg; /* last successful alloc (debug only) */ | |
423 | uint64_t ms_max_size; /* maximum allocatable size */ | |
93cf2076 | 424 | |
492f64e9 PD |
425 | /* |
426 | * -1 if it's not active in an allocator, otherwise set to the allocator | |
427 | * this metaslab is active for. | |
428 | */ | |
429 | int ms_allocator; | |
430 | boolean_t ms_primary; /* Only valid if ms_allocator is not -1 */ | |
431 | ||
93cf2076 GW |
432 | /* |
433 | * The metaslab block allocators can optionally use a size-ordered | |
434 | * range tree and/or an array of LBAs. Not all allocators use | |
d2734cce SD |
435 | * this functionality. The ms_allocatable_by_size should always |
436 | * contain the same number of segments as the ms_allocatable. The | |
437 | * only difference is that the ms_allocatable_by_size is ordered by | |
438 | * segment sizes. | |
93cf2076 | 439 | */ |
d2734cce | 440 | avl_tree_t ms_allocatable_by_size; |
93cf2076 GW |
441 | uint64_t ms_lbas[MAX_LBAS]; |
442 | ||
34dc7c2f BB |
443 | metaslab_group_t *ms_group; /* metaslab group */ |
444 | avl_node_t ms_group_node; /* node in metaslab group tree */ | |
445 | txg_node_t ms_txg_node; /* per-txg dirty metaslab links */ | |
492f64e9 | 446 | |
425d3237 SD |
447 | /* updated every time we are done syncing the metaslab's space map */ |
448 | uint64_t ms_synced_length; | |
449 | ||
492f64e9 | 450 | boolean_t ms_new; |
34dc7c2f BB |
451 | }; |
452 | ||
453 | #ifdef __cplusplus | |
454 | } | |
455 | #endif | |
456 | ||
457 | #endif /* _SYS_METASLAB_IMPL_H */ |