]> git.proxmox.com Git - mirror_zfs.git/blob - include/sys/metaslab_impl.h
Cap metaslab memory usage
[mirror_zfs.git] / include / sys / metaslab_impl.h
1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
24 */
25
26 /*
27 * Copyright (c) 2011, 2019 by Delphix. All rights reserved.
28 */
29
30 #ifndef _SYS_METASLAB_IMPL_H
31 #define _SYS_METASLAB_IMPL_H
32
33 #include <sys/metaslab.h>
34 #include <sys/space_map.h>
35 #include <sys/range_tree.h>
36 #include <sys/vdev.h>
37 #include <sys/txg.h>
38 #include <sys/avl.h>
39 #include <sys/multilist.h>
40
41 #ifdef __cplusplus
42 extern "C" {
43 #endif
44
45 /*
46 * Metaslab allocation tracing record.
47 */
48 typedef struct metaslab_alloc_trace {
49 list_node_t mat_list_node;
50 metaslab_group_t *mat_mg;
51 metaslab_t *mat_msp;
52 uint64_t mat_size;
53 uint64_t mat_weight;
54 uint32_t mat_dva_id;
55 uint64_t mat_offset;
56 int mat_allocator;
57 } metaslab_alloc_trace_t;
58
59 /*
60 * Used by the metaslab allocation tracing facility to indicate
61 * error conditions. These errors are stored to the offset member
62 * of the metaslab_alloc_trace_t record and displayed by mdb.
63 */
64 typedef enum trace_alloc_type {
65 TRACE_ALLOC_FAILURE = -1ULL,
66 TRACE_TOO_SMALL = -2ULL,
67 TRACE_FORCE_GANG = -3ULL,
68 TRACE_NOT_ALLOCATABLE = -4ULL,
69 TRACE_GROUP_FAILURE = -5ULL,
70 TRACE_ENOSPC = -6ULL,
71 TRACE_CONDENSING = -7ULL,
72 TRACE_VDEV_ERROR = -8ULL,
73 TRACE_DISABLED = -9ULL,
74 } trace_alloc_type_t;
75
76 #define METASLAB_WEIGHT_PRIMARY (1ULL << 63)
77 #define METASLAB_WEIGHT_SECONDARY (1ULL << 62)
78 #define METASLAB_WEIGHT_CLAIM (1ULL << 61)
79 #define METASLAB_WEIGHT_TYPE (1ULL << 60)
80 #define METASLAB_ACTIVE_MASK \
81 (METASLAB_WEIGHT_PRIMARY | METASLAB_WEIGHT_SECONDARY | \
82 METASLAB_WEIGHT_CLAIM)
83
84 /*
85 * The metaslab weight is used to encode the amount of free space in a
86 * metaslab, such that the "best" metaslab appears first when sorting the
87 * metaslabs by weight. The weight (and therefore the "best" metaslab) can
88 * be determined in two different ways: by computing a weighted sum of all
89 * the free space in the metaslab (a space based weight) or by counting only
90 * the free segments of the largest size (a segment based weight). We prefer
91 * the segment based weight because it reflects how the free space is
92 * comprised, but we cannot always use it -- legacy pools do not have the
93 * space map histogram information necessary to determine the largest
94 * contiguous regions. Pools that have the space map histogram determine
95 * the segment weight by looking at each bucket in the histogram and
96 * determining the free space whose size in bytes is in the range:
97 * [2^i, 2^(i+1))
98 * We then encode the largest index, i, that contains regions into the
99 * segment-weighted value.
100 *
101 * Space-based weight:
102 *
103 * 64 56 48 40 32 24 16 8 0
104 * +-------+-------+-------+-------+-------+-------+-------+-------+
105 * |PSC1| weighted-free space |
106 * +-------+-------+-------+-------+-------+-------+-------+-------+
107 *
108 * PS - indicates primary and secondary activation
109 * C - indicates activation for claimed block zio
110 * space - the fragmentation-weighted space
111 *
112 * Segment-based weight:
113 *
114 * 64 56 48 40 32 24 16 8 0
115 * +-------+-------+-------+-------+-------+-------+-------+-------+
116 * |PSC0| idx| count of segments in region |
117 * +-------+-------+-------+-------+-------+-------+-------+-------+
118 *
119 * PS - indicates primary and secondary activation
120 * C - indicates activation for claimed block zio
121 * idx - index for the highest bucket in the histogram
122 * count - number of segments in the specified bucket
123 */
124 #define WEIGHT_GET_ACTIVE(weight) BF64_GET((weight), 61, 3)
125 #define WEIGHT_SET_ACTIVE(weight, x) BF64_SET((weight), 61, 3, x)
126
127 #define WEIGHT_IS_SPACEBASED(weight) \
128 ((weight) == 0 || BF64_GET((weight), 60, 1))
129 #define WEIGHT_SET_SPACEBASED(weight) BF64_SET((weight), 60, 1, 1)
130
131 /*
132 * These macros are only applicable to segment-based weighting.
133 */
134 #define WEIGHT_GET_INDEX(weight) BF64_GET((weight), 54, 6)
135 #define WEIGHT_SET_INDEX(weight, x) BF64_SET((weight), 54, 6, x)
136 #define WEIGHT_GET_COUNT(weight) BF64_GET((weight), 0, 54)
137 #define WEIGHT_SET_COUNT(weight, x) BF64_SET((weight), 0, 54, x)
138
139 /*
140 * A metaslab class encompasses a category of allocatable top-level vdevs.
141 * Each top-level vdev is associated with a metaslab group which defines
142 * the allocatable region for that vdev. Examples of these categories include
143 * "normal" for data block allocations (i.e. main pool allocations) or "log"
144 * for allocations designated for intent log devices (i.e. slog devices).
145 * When a block allocation is requested from the SPA it is associated with a
146 * metaslab_class_t, and only top-level vdevs (i.e. metaslab groups) belonging
147 * to the class can be used to satisfy that request. Allocations are done
148 * by traversing the metaslab groups that are linked off of the mc_rotor field.
149 * This rotor points to the next metaslab group where allocations will be
150 * attempted. Allocating a block is a 3 step process -- select the metaslab
151 * group, select the metaslab, and then allocate the block. The metaslab
152 * class defines the low-level block allocator that will be used as the
153 * final step in allocation. These allocators are pluggable allowing each class
154 * to use a block allocator that best suits that class.
155 */
156 struct metaslab_class {
157 kmutex_t mc_lock;
158 spa_t *mc_spa;
159 metaslab_group_t *mc_rotor;
160 metaslab_ops_t *mc_ops;
161 uint64_t mc_aliquot;
162
163 /*
164 * Track the number of metaslab groups that have been initialized
165 * and can accept allocations. An initialized metaslab group is
166 * one has been completely added to the config (i.e. we have
167 * updated the MOS config and the space has been added to the pool).
168 */
169 uint64_t mc_groups;
170
171 /*
172 * Toggle to enable/disable the allocation throttle.
173 */
174 boolean_t mc_alloc_throttle_enabled;
175
176 /*
177 * The allocation throttle works on a reservation system. Whenever
178 * an asynchronous zio wants to perform an allocation it must
179 * first reserve the number of blocks that it wants to allocate.
180 * If there aren't sufficient slots available for the pending zio
181 * then that I/O is throttled until more slots free up. The current
182 * number of reserved allocations is maintained by the mc_alloc_slots
183 * refcount. The mc_alloc_max_slots value determines the maximum
184 * number of allocations that the system allows. Gang blocks are
185 * allowed to reserve slots even if we've reached the maximum
186 * number of allocations allowed.
187 */
188 uint64_t *mc_alloc_max_slots;
189 zfs_refcount_t *mc_alloc_slots;
190
191 uint64_t mc_alloc_groups; /* # of allocatable groups */
192
193 uint64_t mc_alloc; /* total allocated space */
194 uint64_t mc_deferred; /* total deferred frees */
195 uint64_t mc_space; /* total space (alloc + free) */
196 uint64_t mc_dspace; /* total deflated space */
197 uint64_t mc_histogram[RANGE_TREE_HISTOGRAM_SIZE];
198
199 /*
200 * List of all loaded metaslabs in the class, sorted in order of most
201 * recent use.
202 */
203 multilist_t *mc_metaslab_txg_list;
204 };
205
206 /*
207 * Metaslab groups encapsulate all the allocatable regions (i.e. metaslabs)
208 * of a top-level vdev. They are linked together to form a circular linked
209 * list and can belong to only one metaslab class. Metaslab groups may become
210 * ineligible for allocations for a number of reasons such as limited free
211 * space, fragmentation, or going offline. When this happens the allocator will
212 * simply find the next metaslab group in the linked list and attempt
213 * to allocate from that group instead.
214 */
215 struct metaslab_group {
216 kmutex_t mg_lock;
217 metaslab_t **mg_primaries;
218 metaslab_t **mg_secondaries;
219 avl_tree_t mg_metaslab_tree;
220 uint64_t mg_aliquot;
221 boolean_t mg_allocatable; /* can we allocate? */
222 uint64_t mg_ms_ready;
223
224 /*
225 * A metaslab group is considered to be initialized only after
226 * we have updated the MOS config and added the space to the pool.
227 * We only allow allocation attempts to a metaslab group if it
228 * has been initialized.
229 */
230 boolean_t mg_initialized;
231
232 uint64_t mg_free_capacity; /* percentage free */
233 int64_t mg_bias;
234 int64_t mg_activation_count;
235 metaslab_class_t *mg_class;
236 vdev_t *mg_vd;
237 taskq_t *mg_taskq;
238 metaslab_group_t *mg_prev;
239 metaslab_group_t *mg_next;
240
241 /*
242 * In order for the allocation throttle to function properly, we cannot
243 * have too many IOs going to each disk by default; the throttle
244 * operates by allocating more work to disks that finish quickly, so
245 * allocating larger chunks to each disk reduces its effectiveness.
246 * However, if the number of IOs going to each allocator is too small,
247 * we will not perform proper aggregation at the vdev_queue layer,
248 * also resulting in decreased performance. Therefore, we will use a
249 * ramp-up strategy.
250 *
251 * Each allocator in each metaslab group has a current queue depth
252 * (mg_alloc_queue_depth[allocator]) and a current max queue depth
253 * (mg_cur_max_alloc_queue_depth[allocator]), and each metaslab group
254 * has an absolute max queue depth (mg_max_alloc_queue_depth). We
255 * add IOs to an allocator until the mg_alloc_queue_depth for that
256 * allocator hits the cur_max. Every time an IO completes for a given
257 * allocator on a given metaslab group, we increment its cur_max until
258 * it reaches mg_max_alloc_queue_depth. The cur_max resets every txg to
259 * help protect against disks that decrease in performance over time.
260 *
261 * It's possible for an allocator to handle more allocations than
262 * its max. This can occur when gang blocks are required or when other
263 * groups are unable to handle their share of allocations.
264 */
265 uint64_t mg_max_alloc_queue_depth;
266 uint64_t *mg_cur_max_alloc_queue_depth;
267 zfs_refcount_t *mg_alloc_queue_depth;
268 int mg_allocators;
269 /*
270 * A metalab group that can no longer allocate the minimum block
271 * size will set mg_no_free_space. Once a metaslab group is out
272 * of space then its share of work must be distributed to other
273 * groups.
274 */
275 boolean_t mg_no_free_space;
276
277 uint64_t mg_allocations;
278 uint64_t mg_failed_allocations;
279 uint64_t mg_fragmentation;
280 uint64_t mg_histogram[RANGE_TREE_HISTOGRAM_SIZE];
281
282 int mg_ms_disabled;
283 boolean_t mg_disabled_updating;
284 kmutex_t mg_ms_disabled_lock;
285 kcondvar_t mg_ms_disabled_cv;
286 };
287
288 /*
289 * This value defines the number of elements in the ms_lbas array. The value
290 * of 64 was chosen as it covers all power of 2 buckets up to UINT64_MAX.
291 * This is the equivalent of highbit(UINT64_MAX).
292 */
293 #define MAX_LBAS 64
294
295 /*
296 * Each metaslab maintains a set of in-core trees to track metaslab
297 * operations. The in-core free tree (ms_allocatable) contains the list of
298 * free segments which are eligible for allocation. As blocks are
299 * allocated, the allocated segment are removed from the ms_allocatable and
300 * added to a per txg allocation tree (ms_allocating). As blocks are
301 * freed, they are added to the free tree (ms_freeing). These trees
302 * allow us to process all allocations and frees in syncing context
303 * where it is safe to update the on-disk space maps. An additional set
304 * of in-core trees is maintained to track deferred frees
305 * (ms_defer). Once a block is freed it will move from the
306 * ms_freed to the ms_defer tree. A deferred free means that a block
307 * has been freed but cannot be used by the pool until TXG_DEFER_SIZE
308 * transactions groups later. For example, a block that is freed in txg
309 * 50 will not be available for reallocation until txg 52 (50 +
310 * TXG_DEFER_SIZE). This provides a safety net for uberblock rollback.
311 * A pool could be safely rolled back TXG_DEFERS_SIZE transactions
312 * groups and ensure that no block has been reallocated.
313 *
314 * The simplified transition diagram looks like this:
315 *
316 *
317 * ALLOCATE
318 * |
319 * V
320 * free segment (ms_allocatable) -> ms_allocating[4] -> (write to space map)
321 * ^
322 * | ms_freeing <--- FREE
323 * | |
324 * | v
325 * | ms_freed
326 * | |
327 * +-------- ms_defer[2] <-------+-------> (write to space map)
328 *
329 *
330 * Each metaslab's space is tracked in a single space map in the MOS,
331 * which is only updated in syncing context. Each time we sync a txg,
332 * we append the allocs and frees from that txg to the space map. The
333 * pool space is only updated once all metaslabs have finished syncing.
334 *
335 * To load the in-core free tree we read the space map from disk. This
336 * object contains a series of alloc and free records that are combined
337 * to make up the list of all free segments in this metaslab. These
338 * segments are represented in-core by the ms_allocatable and are stored
339 * in an AVL tree.
340 *
341 * As the space map grows (as a result of the appends) it will
342 * eventually become space-inefficient. When the metaslab's in-core
343 * free tree is zfs_condense_pct/100 times the size of the minimal
344 * on-disk representation, we rewrite it in its minimized form. If a
345 * metaslab needs to condense then we must set the ms_condensing flag to
346 * ensure that allocations are not performed on the metaslab that is
347 * being written.
348 */
349 struct metaslab {
350 /*
351 * This is the main lock of the metaslab and its purpose is to
352 * coordinate our allocations and frees [e.g metaslab_block_alloc(),
353 * metaslab_free_concrete(), ..etc] with our various syncing
354 * procedures [e.g. metaslab_sync(), metaslab_sync_done(), ..etc].
355 *
356 * The lock is also used during some miscellaneous operations like
357 * using the metaslab's histogram for the metaslab group's histogram
358 * aggregation, or marking the metaslab for initialization.
359 */
360 kmutex_t ms_lock;
361
362 /*
363 * Acquired together with the ms_lock whenever we expect to
364 * write to metaslab data on-disk (i.e flushing entries to
365 * the metaslab's space map). It helps coordinate readers of
366 * the metaslab's space map [see spa_vdev_remove_thread()]
367 * with writers [see metaslab_sync() or metaslab_flush()].
368 *
369 * Note that metaslab_load(), even though a reader, uses
370 * a completely different mechanism to deal with the reading
371 * of the metaslab's space map based on ms_synced_length. That
372 * said, the function still uses the ms_sync_lock after it
373 * has read the ms_sm [see relevant comment in metaslab_load()
374 * as to why].
375 */
376 kmutex_t ms_sync_lock;
377
378 kcondvar_t ms_load_cv;
379 space_map_t *ms_sm;
380 uint64_t ms_id;
381 uint64_t ms_start;
382 uint64_t ms_size;
383 uint64_t ms_fragmentation;
384
385 range_tree_t *ms_allocating[TXG_SIZE];
386 range_tree_t *ms_allocatable;
387 uint64_t ms_allocated_this_txg;
388 uint64_t ms_allocating_total;
389
390 /*
391 * The following range trees are accessed only from syncing context.
392 * ms_free*tree only have entries while syncing, and are empty
393 * between syncs.
394 */
395 range_tree_t *ms_freeing; /* to free this syncing txg */
396 range_tree_t *ms_freed; /* already freed this syncing txg */
397 range_tree_t *ms_defer[TXG_DEFER_SIZE];
398 range_tree_t *ms_checkpointing; /* to add to the checkpoint */
399
400 /*
401 * The ms_trim tree is the set of allocatable segments which are
402 * eligible for trimming. (When the metaslab is loaded, it's a
403 * subset of ms_allocatable.) It's kept in-core as long as the
404 * autotrim property is set and is not vacated when the metaslab
405 * is unloaded. Its purpose is to aggregate freed ranges to
406 * facilitate efficient trimming.
407 */
408 range_tree_t *ms_trim;
409
410 boolean_t ms_condensing; /* condensing? */
411 boolean_t ms_condense_wanted;
412
413 /*
414 * The number of consumers which have disabled the metaslab.
415 */
416 uint64_t ms_disabled;
417
418 /*
419 * We must always hold the ms_lock when modifying ms_loaded
420 * and ms_loading.
421 */
422 boolean_t ms_loaded;
423 boolean_t ms_loading;
424 kcondvar_t ms_flush_cv;
425 boolean_t ms_flushing;
426
427 /*
428 * The following histograms count entries that are in the
429 * metaslab's space map (and its histogram) but are not in
430 * ms_allocatable yet, because they are in ms_freed, ms_freeing,
431 * or ms_defer[].
432 *
433 * When the metaslab is not loaded, its ms_weight needs to
434 * reflect what is allocatable (i.e. what will be part of
435 * ms_allocatable if it is loaded). The weight is computed from
436 * the spacemap histogram, but that includes ranges that are
437 * not yet allocatable (because they are in ms_freed,
438 * ms_freeing, or ms_defer[]). Therefore, when calculating the
439 * weight, we need to remove those ranges.
440 *
441 * The ranges in the ms_freed and ms_defer[] range trees are all
442 * present in the spacemap. However, the spacemap may have
443 * multiple entries to represent a contiguous range, because it
444 * is written across multiple sync passes, but the changes of
445 * all sync passes are consolidated into the range trees.
446 * Adjacent ranges that are freed in different sync passes of
447 * one txg will be represented separately (as 2 or more entries)
448 * in the space map (and its histogram), but these adjacent
449 * ranges will be consolidated (represented as one entry) in the
450 * ms_freed/ms_defer[] range trees (and their histograms).
451 *
452 * When calculating the weight, we can not simply subtract the
453 * range trees' histograms from the spacemap's histogram,
454 * because the range trees' histograms may have entries in
455 * higher buckets than the spacemap, due to consolidation.
456 * Instead we must subtract the exact entries that were added to
457 * the spacemap's histogram. ms_synchist and ms_deferhist[]
458 * represent these exact entries, so we can subtract them from
459 * the spacemap's histogram when calculating ms_weight.
460 *
461 * ms_synchist represents the same ranges as ms_freeing +
462 * ms_freed, but without consolidation across sync passes.
463 *
464 * ms_deferhist[i] represents the same ranges as ms_defer[i],
465 * but without consolidation across sync passes.
466 */
467 uint64_t ms_synchist[SPACE_MAP_HISTOGRAM_SIZE];
468 uint64_t ms_deferhist[TXG_DEFER_SIZE][SPACE_MAP_HISTOGRAM_SIZE];
469
470 /*
471 * Tracks the exact amount of allocated space of this metaslab
472 * (and specifically the metaslab's space map) up to the most
473 * recently completed sync pass [see usage in metaslab_sync()].
474 */
475 uint64_t ms_allocated_space;
476 int64_t ms_deferspace; /* sum of ms_defermap[] space */
477 uint64_t ms_weight; /* weight vs. others in group */
478 uint64_t ms_activation_weight; /* activation weight */
479
480 /*
481 * Track of whenever a metaslab is selected for loading or allocation.
482 * We use this value to determine how long the metaslab should
483 * stay cached.
484 */
485 uint64_t ms_selected_txg;
486 /*
487 * ms_load/unload_time can be used for performance monitoring
488 * (e.g. by dtrace or mdb).
489 */
490 hrtime_t ms_load_time; /* time last loaded */
491 hrtime_t ms_unload_time; /* time last unloaded */
492
493 uint64_t ms_alloc_txg; /* last successful alloc (debug only) */
494 uint64_t ms_max_size; /* maximum allocatable size */
495
496 /*
497 * -1 if it's not active in an allocator, otherwise set to the allocator
498 * this metaslab is active for.
499 */
500 int ms_allocator;
501 boolean_t ms_primary; /* Only valid if ms_allocator is not -1 */
502
503 /*
504 * The metaslab block allocators can optionally use a size-ordered
505 * range tree and/or an array of LBAs. Not all allocators use
506 * this functionality. The ms_allocatable_by_size should always
507 * contain the same number of segments as the ms_allocatable. The
508 * only difference is that the ms_allocatable_by_size is ordered by
509 * segment sizes.
510 */
511 avl_tree_t ms_allocatable_by_size;
512 avl_tree_t ms_unflushed_frees_by_size;
513 uint64_t ms_lbas[MAX_LBAS];
514
515 metaslab_group_t *ms_group; /* metaslab group */
516 avl_node_t ms_group_node; /* node in metaslab group tree */
517 txg_node_t ms_txg_node; /* per-txg dirty metaslab links */
518 avl_node_t ms_spa_txg_node; /* node in spa_metaslabs_by_txg */
519 /*
520 * Node in metaslab class's selected txg list
521 */
522 multilist_node_t ms_class_txg_node;
523
524 /*
525 * Allocs and frees that are committed to the vdev log spacemap but
526 * not yet to this metaslab's spacemap.
527 */
528 range_tree_t *ms_unflushed_allocs;
529 range_tree_t *ms_unflushed_frees;
530
531 /*
532 * We have flushed entries up to but not including this TXG. In
533 * other words, all changes from this TXG and onward should not
534 * be in this metaslab's space map and must be read from the
535 * log space maps.
536 */
537 uint64_t ms_unflushed_txg;
538
539 /* updated every time we are done syncing the metaslab's space map */
540 uint64_t ms_synced_length;
541
542 boolean_t ms_new;
543 };
544
545 typedef struct metaslab_unflushed_phys {
546 /* on-disk counterpart of ms_unflushed_txg */
547 uint64_t msp_unflushed_txg;
548 } metaslab_unflushed_phys_t;
549
550 #ifdef __cplusplus
551 }
552 #endif
553
554 #endif /* _SYS_METASLAB_IMPL_H */